1 /*	$NetBSD: radeon_cypress_dpm.c,v 1.3 2022/07/15 06:42:08 mrg Exp $	*/
2 
3 /*
4  * Copyright 2011 Advanced Micro Devices, Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Alex Deucher
25  */
26 
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: radeon_cypress_dpm.c,v 1.3 2022/07/15 06:42:08 mrg Exp $");
29 
30 #include <linux/pci.h>
31 
32 #include "atom.h"
33 #include "cypress_dpm.h"
34 #include "evergreend.h"
35 #include "r600_dpm.h"
36 #include "radeon.h"
37 #include "radeon_asic.h"
38 
39 #define SMC_RAM_END 0x8000
40 
41 #define MC_CG_ARB_FREQ_F0           0x0a
42 #define MC_CG_ARB_FREQ_F1           0x0b
43 #define MC_CG_ARB_FREQ_F2           0x0c
44 #define MC_CG_ARB_FREQ_F3           0x0d
45 
46 #define MC_CG_SEQ_DRAMCONF_S0       0x05
47 #define MC_CG_SEQ_DRAMCONF_S1       0x06
48 #define MC_CG_SEQ_YCLK_SUSPEND      0x04
49 #define MC_CG_SEQ_YCLK_RESUME       0x0a
50 
51 struct rv7xx_ps *rv770_get_ps(struct radeon_ps *rps);
52 struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev);
53 struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev);
54 
cypress_enable_bif_dynamic_pcie_gen2(struct radeon_device * rdev,bool enable)55 static void cypress_enable_bif_dynamic_pcie_gen2(struct radeon_device *rdev,
56 						 bool enable)
57 {
58 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
59 	u32 tmp, bif;
60 
61 	tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
62 	if (enable) {
63 		if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
64 		    (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
65 			if (!pi->boot_in_gen2) {
66 				bif = RREG32(CG_BIF_REQ_AND_RSP) & ~CG_CLIENT_REQ_MASK;
67 				bif |= CG_CLIENT_REQ(0xd);
68 				WREG32(CG_BIF_REQ_AND_RSP, bif);
69 
70 				tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
71 				tmp |= LC_HW_VOLTAGE_IF_CONTROL(1);
72 				tmp |= LC_GEN2_EN_STRAP;
73 
74 				tmp |= LC_CLR_FAILED_SPD_CHANGE_CNT;
75 				WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
76 				udelay(10);
77 				tmp &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
78 				WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
79 			}
80 		}
81 	} else {
82 		if (!pi->boot_in_gen2) {
83 			tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
84 			tmp &= ~LC_GEN2_EN_STRAP;
85 		}
86 		if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
87 		    (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2))
88 			WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
89 	}
90 }
91 
cypress_enable_dynamic_pcie_gen2(struct radeon_device * rdev,bool enable)92 static void cypress_enable_dynamic_pcie_gen2(struct radeon_device *rdev,
93 					     bool enable)
94 {
95 	cypress_enable_bif_dynamic_pcie_gen2(rdev, enable);
96 
97 	if (enable)
98 		WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE);
99 	else
100 		WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
101 }
102 
103 #if 0
104 static int cypress_enter_ulp_state(struct radeon_device *rdev)
105 {
106 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
107 
108 	if (pi->gfx_clock_gating) {
109 		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
110 		WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON);
111 		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
112 
113 		RREG32(GB_ADDR_CONFIG);
114 	}
115 
116 	WREG32_P(SMC_MSG, HOST_SMC_MSG(PPSMC_MSG_SwitchToMinimumPower),
117 		 ~HOST_SMC_MSG_MASK);
118 
119 	udelay(7000);
120 
121 	return 0;
122 }
123 #endif
124 
cypress_gfx_clock_gating_enable(struct radeon_device * rdev,bool enable)125 static void cypress_gfx_clock_gating_enable(struct radeon_device *rdev,
126 					    bool enable)
127 {
128 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
129 
130 	if (enable) {
131 		if (eg_pi->light_sleep) {
132 			WREG32(GRBM_GFX_INDEX, 0xC0000000);
133 
134 			WREG32_CG(CG_CGLS_TILE_0, 0xFFFFFFFF);
135 			WREG32_CG(CG_CGLS_TILE_1, 0xFFFFFFFF);
136 			WREG32_CG(CG_CGLS_TILE_2, 0xFFFFFFFF);
137 			WREG32_CG(CG_CGLS_TILE_3, 0xFFFFFFFF);
138 			WREG32_CG(CG_CGLS_TILE_4, 0xFFFFFFFF);
139 			WREG32_CG(CG_CGLS_TILE_5, 0xFFFFFFFF);
140 			WREG32_CG(CG_CGLS_TILE_6, 0xFFFFFFFF);
141 			WREG32_CG(CG_CGLS_TILE_7, 0xFFFFFFFF);
142 			WREG32_CG(CG_CGLS_TILE_8, 0xFFFFFFFF);
143 			WREG32_CG(CG_CGLS_TILE_9, 0xFFFFFFFF);
144 			WREG32_CG(CG_CGLS_TILE_10, 0xFFFFFFFF);
145 			WREG32_CG(CG_CGLS_TILE_11, 0xFFFFFFFF);
146 
147 			WREG32_P(SCLK_PWRMGT_CNTL, DYN_LIGHT_SLEEP_EN, ~DYN_LIGHT_SLEEP_EN);
148 		}
149 		WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN);
150 	} else {
151 		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
152 		WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON);
153 		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
154 		RREG32(GB_ADDR_CONFIG);
155 
156 		if (eg_pi->light_sleep) {
157 			WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_LIGHT_SLEEP_EN);
158 
159 			WREG32(GRBM_GFX_INDEX, 0xC0000000);
160 
161 			WREG32_CG(CG_CGLS_TILE_0, 0);
162 			WREG32_CG(CG_CGLS_TILE_1, 0);
163 			WREG32_CG(CG_CGLS_TILE_2, 0);
164 			WREG32_CG(CG_CGLS_TILE_3, 0);
165 			WREG32_CG(CG_CGLS_TILE_4, 0);
166 			WREG32_CG(CG_CGLS_TILE_5, 0);
167 			WREG32_CG(CG_CGLS_TILE_6, 0);
168 			WREG32_CG(CG_CGLS_TILE_7, 0);
169 			WREG32_CG(CG_CGLS_TILE_8, 0);
170 			WREG32_CG(CG_CGLS_TILE_9, 0);
171 			WREG32_CG(CG_CGLS_TILE_10, 0);
172 			WREG32_CG(CG_CGLS_TILE_11, 0);
173 		}
174 	}
175 }
176 
cypress_mg_clock_gating_enable(struct radeon_device * rdev,bool enable)177 static void cypress_mg_clock_gating_enable(struct radeon_device *rdev,
178 					   bool enable)
179 {
180 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
181 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
182 
183 	if (enable) {
184 		u32 cgts_sm_ctrl_reg;
185 
186 		if (rdev->family == CHIP_CEDAR)
187 			cgts_sm_ctrl_reg = CEDAR_MGCGCGTSSMCTRL_DFLT;
188 		else if (rdev->family == CHIP_REDWOOD)
189 			cgts_sm_ctrl_reg = REDWOOD_MGCGCGTSSMCTRL_DFLT;
190 		else
191 			cgts_sm_ctrl_reg = CYPRESS_MGCGCGTSSMCTRL_DFLT;
192 
193 		WREG32(GRBM_GFX_INDEX, 0xC0000000);
194 
195 		WREG32_CG(CG_CGTT_LOCAL_0, CYPRESS_MGCGTTLOCAL0_DFLT);
196 		WREG32_CG(CG_CGTT_LOCAL_1, CYPRESS_MGCGTTLOCAL1_DFLT & 0xFFFFCFFF);
197 		WREG32_CG(CG_CGTT_LOCAL_2, CYPRESS_MGCGTTLOCAL2_DFLT);
198 		WREG32_CG(CG_CGTT_LOCAL_3, CYPRESS_MGCGTTLOCAL3_DFLT);
199 
200 		if (pi->mgcgtssm)
201 			WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg);
202 
203 		if (eg_pi->mcls) {
204 			WREG32_P(MC_CITF_MISC_RD_CG, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
205 			WREG32_P(MC_CITF_MISC_WR_CG, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
206 			WREG32_P(MC_CITF_MISC_VM_CG, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
207 			WREG32_P(MC_HUB_MISC_HUB_CG, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
208 			WREG32_P(MC_HUB_MISC_VM_CG, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
209 			WREG32_P(MC_HUB_MISC_SIP_CG, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
210 			WREG32_P(MC_XPB_CLK_GAT, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
211 			WREG32_P(VM_L2_CG, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
212 		}
213 	} else {
214 		WREG32(GRBM_GFX_INDEX, 0xC0000000);
215 
216 		WREG32_CG(CG_CGTT_LOCAL_0, 0xFFFFFFFF);
217 		WREG32_CG(CG_CGTT_LOCAL_1, 0xFFFFFFFF);
218 		WREG32_CG(CG_CGTT_LOCAL_2, 0xFFFFFFFF);
219 		WREG32_CG(CG_CGTT_LOCAL_3, 0xFFFFFFFF);
220 
221 		if (pi->mgcgtssm)
222 			WREG32(CGTS_SM_CTRL_REG, 0x81f44bc0);
223 	}
224 }
225 
cypress_enable_spread_spectrum(struct radeon_device * rdev,bool enable)226 void cypress_enable_spread_spectrum(struct radeon_device *rdev,
227 				    bool enable)
228 {
229 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
230 
231 	if (enable) {
232 		if (pi->sclk_ss)
233 			WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN);
234 
235 		if (pi->mclk_ss)
236 			WREG32_P(MPLL_CNTL_MODE, SS_SSEN, ~SS_SSEN);
237 	} else {
238 		WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN);
239 		WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN);
240 		WREG32_P(MPLL_CNTL_MODE, 0, ~SS_SSEN);
241 		WREG32_P(MPLL_CNTL_MODE, 0, ~SS_DSMODE_EN);
242 	}
243 }
244 
cypress_start_dpm(struct radeon_device * rdev)245 void cypress_start_dpm(struct radeon_device *rdev)
246 {
247 	WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
248 }
249 
cypress_enable_sclk_control(struct radeon_device * rdev,bool enable)250 void cypress_enable_sclk_control(struct radeon_device *rdev,
251 				 bool enable)
252 {
253 	if (enable)
254 		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
255 	else
256 		WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
257 }
258 
cypress_enable_mclk_control(struct radeon_device * rdev,bool enable)259 void cypress_enable_mclk_control(struct radeon_device *rdev,
260 				 bool enable)
261 {
262 	if (enable)
263 		WREG32_P(MCLK_PWRMGT_CNTL, 0, ~MPLL_PWRMGT_OFF);
264 	else
265 		WREG32_P(MCLK_PWRMGT_CNTL, MPLL_PWRMGT_OFF, ~MPLL_PWRMGT_OFF);
266 }
267 
cypress_notify_smc_display_change(struct radeon_device * rdev,bool has_display)268 int cypress_notify_smc_display_change(struct radeon_device *rdev,
269 				      bool has_display)
270 {
271 	PPSMC_Msg msg = has_display ?
272 		(PPSMC_Msg)PPSMC_MSG_HasDisplay : (PPSMC_Msg)PPSMC_MSG_NoDisplay;
273 
274 	if (rv770_send_msg_to_smc(rdev, msg) != PPSMC_Result_OK)
275 		return -EINVAL;
276 
277 	return 0;
278 }
279 
cypress_program_response_times(struct radeon_device * rdev)280 void cypress_program_response_times(struct radeon_device *rdev)
281 {
282 	u32 reference_clock;
283 	u32 mclk_switch_limit;
284 
285 	reference_clock = radeon_get_xclk(rdev);
286 	mclk_switch_limit = (460 * reference_clock) / 100;
287 
288 	rv770_write_smc_soft_register(rdev,
289 				      RV770_SMC_SOFT_REGISTER_mclk_switch_lim,
290 				      mclk_switch_limit);
291 
292 	rv770_write_smc_soft_register(rdev,
293 				      RV770_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
294 
295 	rv770_write_smc_soft_register(rdev,
296 				      RV770_SMC_SOFT_REGISTER_mc_block_delay, 0xAA);
297 
298 	rv770_program_response_times(rdev);
299 
300 	if (ASIC_IS_LOMBOK(rdev))
301 		rv770_write_smc_soft_register(rdev,
302 					      RV770_SMC_SOFT_REGISTER_is_asic_lombok, 1);
303 
304 }
305 
cypress_pcie_performance_request(struct radeon_device * rdev,u8 perf_req,bool advertise)306 static int cypress_pcie_performance_request(struct radeon_device *rdev,
307 					    u8 perf_req, bool advertise)
308 {
309 #if defined(CONFIG_ACPI)
310 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
311 #endif
312 	u32 tmp;
313 
314 	udelay(10);
315 	tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
316 	if ((perf_req == PCIE_PERF_REQ_PECI_GEN1) && (tmp & LC_CURRENT_DATA_RATE))
317 		return 0;
318 
319 #if defined(CONFIG_ACPI)
320 	if ((perf_req == PCIE_PERF_REQ_PECI_GEN1) ||
321 	    (perf_req == PCIE_PERF_REQ_PECI_GEN2)) {
322 		eg_pi->pcie_performance_request_registered = true;
323 		return radeon_acpi_pcie_performance_request(rdev, perf_req, advertise);
324 	} else if ((perf_req == PCIE_PERF_REQ_REMOVE_REGISTRY) &&
325 		   eg_pi->pcie_performance_request_registered) {
326 		eg_pi->pcie_performance_request_registered = false;
327 		return radeon_acpi_pcie_performance_request(rdev, perf_req, advertise);
328 	}
329 #endif
330 
331 	return 0;
332 }
333 
cypress_advertise_gen2_capability(struct radeon_device * rdev)334 void cypress_advertise_gen2_capability(struct radeon_device *rdev)
335 {
336 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
337 	u32 tmp;
338 
339 #if defined(CONFIG_ACPI)
340 	radeon_acpi_pcie_notify_device_ready(rdev);
341 #endif
342 
343 	tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
344 
345 	if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
346 	    (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2))
347 		pi->pcie_gen2 = true;
348 	else
349 		pi->pcie_gen2 = false;
350 
351 	if (!pi->pcie_gen2)
352 		cypress_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, true);
353 
354 }
355 
cypress_get_maximum_link_speed(struct radeon_ps * radeon_state)356 static enum radeon_pcie_gen cypress_get_maximum_link_speed(struct radeon_ps *radeon_state)
357 {
358 	struct rv7xx_ps *state = rv770_get_ps(radeon_state);
359 
360 	if (state->high.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2)
361 		return 1;
362 	return 0;
363 }
364 
cypress_notify_link_speed_change_after_state_change(struct radeon_device * rdev,struct radeon_ps * radeon_new_state,struct radeon_ps * radeon_current_state)365 void cypress_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
366 							 struct radeon_ps *radeon_new_state,
367 							 struct radeon_ps *radeon_current_state)
368 {
369 	enum radeon_pcie_gen pcie_link_speed_target =
370 		cypress_get_maximum_link_speed(radeon_new_state);
371 	enum radeon_pcie_gen pcie_link_speed_current =
372 		cypress_get_maximum_link_speed(radeon_current_state);
373 	u8 request;
374 
375 	if (pcie_link_speed_target < pcie_link_speed_current) {
376 		if (pcie_link_speed_target == RADEON_PCIE_GEN1)
377 			request = PCIE_PERF_REQ_PECI_GEN1;
378 		else if (pcie_link_speed_target == RADEON_PCIE_GEN2)
379 			request = PCIE_PERF_REQ_PECI_GEN2;
380 		else
381 			request = PCIE_PERF_REQ_PECI_GEN3;
382 
383 		cypress_pcie_performance_request(rdev, request, false);
384 	}
385 }
386 
cypress_notify_link_speed_change_before_state_change(struct radeon_device * rdev,struct radeon_ps * radeon_new_state,struct radeon_ps * radeon_current_state)387 void cypress_notify_link_speed_change_before_state_change(struct radeon_device *rdev,
388 							  struct radeon_ps *radeon_new_state,
389 							  struct radeon_ps *radeon_current_state)
390 {
391 	enum radeon_pcie_gen pcie_link_speed_target =
392 		cypress_get_maximum_link_speed(radeon_new_state);
393 	enum radeon_pcie_gen pcie_link_speed_current =
394 		cypress_get_maximum_link_speed(radeon_current_state);
395 	u8 request;
396 
397 	if (pcie_link_speed_target > pcie_link_speed_current) {
398 		if (pcie_link_speed_target == RADEON_PCIE_GEN1)
399 			request = PCIE_PERF_REQ_PECI_GEN1;
400 		else if (pcie_link_speed_target == RADEON_PCIE_GEN2)
401 			request = PCIE_PERF_REQ_PECI_GEN2;
402 		else
403 			request = PCIE_PERF_REQ_PECI_GEN3;
404 
405 		cypress_pcie_performance_request(rdev, request, false);
406 	}
407 }
408 
cypress_populate_voltage_value(struct radeon_device * rdev,struct atom_voltage_table * table,u16 value,RV770_SMC_VOLTAGE_VALUE * voltage)409 static int cypress_populate_voltage_value(struct radeon_device *rdev,
410 					  struct atom_voltage_table *table,
411 					  u16 value, RV770_SMC_VOLTAGE_VALUE *voltage)
412 {
413 	unsigned int i;
414 
415 	for (i = 0; i < table->count; i++) {
416 		if (value <= table->entries[i].value) {
417 			voltage->index = (u8)i;
418 			voltage->value = cpu_to_be16(table->entries[i].value);
419 			break;
420 		}
421 	}
422 
423 	if (i == table->count)
424 		return -EINVAL;
425 
426 	return 0;
427 }
428 
cypress_get_strobe_mode_settings(struct radeon_device * rdev,u32 mclk)429 u8 cypress_get_strobe_mode_settings(struct radeon_device *rdev, u32 mclk)
430 {
431 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
432 	u8 result = 0;
433 	bool strobe_mode = false;
434 
435 	if (pi->mem_gddr5) {
436 		if (mclk <= pi->mclk_strobe_mode_threshold)
437 			strobe_mode = true;
438 		result = cypress_get_mclk_frequency_ratio(rdev, mclk, strobe_mode);
439 
440 		if (strobe_mode)
441 			result |= SMC_STROBE_ENABLE;
442 	}
443 
444 	return result;
445 }
446 
cypress_map_clkf_to_ibias(struct radeon_device * rdev,u32 clkf)447 u32 cypress_map_clkf_to_ibias(struct radeon_device *rdev, u32 clkf)
448 {
449 	u32 ref_clk = rdev->clock.mpll.reference_freq;
450 	u32 vco = clkf * ref_clk;
451 
452 	/* 100 Mhz ref clk */
453 	if (ref_clk == 10000) {
454 		if (vco > 500000)
455 			return 0xC6;
456 		if (vco > 400000)
457 			return 0x9D;
458 		if (vco > 330000)
459 			return 0x6C;
460 		if (vco > 250000)
461 			return 0x2B;
462 		if (vco >  160000)
463 			return 0x5B;
464 		if (vco > 120000)
465 			return 0x0A;
466 		return 0x4B;
467 	}
468 
469 	/* 27 Mhz ref clk */
470 	if (vco > 250000)
471 		return 0x8B;
472 	if (vco > 200000)
473 		return 0xCC;
474 	if (vco > 150000)
475 		return 0x9B;
476 	return 0x6B;
477 }
478 
cypress_populate_mclk_value(struct radeon_device * rdev,u32 engine_clock,u32 memory_clock,RV7XX_SMC_MCLK_VALUE * mclk,bool strobe_mode,bool dll_state_on)479 static int cypress_populate_mclk_value(struct radeon_device *rdev,
480 				       u32 engine_clock, u32 memory_clock,
481 				       RV7XX_SMC_MCLK_VALUE *mclk,
482 				       bool strobe_mode, bool dll_state_on)
483 {
484 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
485 
486 	u32 mpll_ad_func_cntl =
487 		pi->clk_regs.rv770.mpll_ad_func_cntl;
488 	u32 mpll_ad_func_cntl_2 =
489 		pi->clk_regs.rv770.mpll_ad_func_cntl_2;
490 	u32 mpll_dq_func_cntl =
491 		pi->clk_regs.rv770.mpll_dq_func_cntl;
492 	u32 mpll_dq_func_cntl_2 =
493 		pi->clk_regs.rv770.mpll_dq_func_cntl_2;
494 	u32 mclk_pwrmgt_cntl =
495 		pi->clk_regs.rv770.mclk_pwrmgt_cntl;
496 	u32 dll_cntl =
497 		pi->clk_regs.rv770.dll_cntl;
498 	u32 mpll_ss1 = pi->clk_regs.rv770.mpll_ss1;
499 	u32 mpll_ss2 = pi->clk_regs.rv770.mpll_ss2;
500 	struct atom_clock_dividers dividers;
501 	u32 ibias;
502 	u32 dll_speed;
503 	int ret;
504 	u32 mc_seq_misc7;
505 
506 	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM,
507 					     memory_clock, strobe_mode, &dividers);
508 	if (ret)
509 		return ret;
510 
511 	if (!strobe_mode) {
512 		mc_seq_misc7 = RREG32(MC_SEQ_MISC7);
513 
514 		if(mc_seq_misc7 & 0x8000000)
515 			dividers.post_div = 1;
516 	}
517 
518 	ibias = cypress_map_clkf_to_ibias(rdev, dividers.whole_fb_div);
519 
520 	mpll_ad_func_cntl &= ~(CLKR_MASK |
521 			       YCLK_POST_DIV_MASK |
522 			       CLKF_MASK |
523 			       CLKFRAC_MASK |
524 			       IBIAS_MASK);
525 	mpll_ad_func_cntl |= CLKR(dividers.ref_div);
526 	mpll_ad_func_cntl |= YCLK_POST_DIV(dividers.post_div);
527 	mpll_ad_func_cntl |= CLKF(dividers.whole_fb_div);
528 	mpll_ad_func_cntl |= CLKFRAC(dividers.frac_fb_div);
529 	mpll_ad_func_cntl |= IBIAS(ibias);
530 
531 	if (dividers.vco_mode)
532 		mpll_ad_func_cntl_2 |= VCO_MODE;
533 	else
534 		mpll_ad_func_cntl_2 &= ~VCO_MODE;
535 
536 	if (pi->mem_gddr5) {
537 		mpll_dq_func_cntl &= ~(CLKR_MASK |
538 				       YCLK_POST_DIV_MASK |
539 				       CLKF_MASK |
540 				       CLKFRAC_MASK |
541 				       IBIAS_MASK);
542 		mpll_dq_func_cntl |= CLKR(dividers.ref_div);
543 		mpll_dq_func_cntl |= YCLK_POST_DIV(dividers.post_div);
544 		mpll_dq_func_cntl |= CLKF(dividers.whole_fb_div);
545 		mpll_dq_func_cntl |= CLKFRAC(dividers.frac_fb_div);
546 		mpll_dq_func_cntl |= IBIAS(ibias);
547 
548 		if (strobe_mode)
549 			mpll_dq_func_cntl &= ~PDNB;
550 		else
551 			mpll_dq_func_cntl |= PDNB;
552 
553 		if (dividers.vco_mode)
554 			mpll_dq_func_cntl_2 |= VCO_MODE;
555 		else
556 			mpll_dq_func_cntl_2 &= ~VCO_MODE;
557 	}
558 
559 	if (pi->mclk_ss) {
560 		struct radeon_atom_ss ss;
561 		u32 vco_freq = memory_clock * dividers.post_div;
562 
563 		if (radeon_atombios_get_asic_ss_info(rdev, &ss,
564 						     ASIC_INTERNAL_MEMORY_SS, vco_freq)) {
565 			u32 reference_clock = rdev->clock.mpll.reference_freq;
566 			u32 decoded_ref = rv740_get_decoded_reference_divider(dividers.ref_div);
567 			u32 clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
568 			u32 clk_v = ss.percentage *
569 				(0x4000 * dividers.whole_fb_div + 0x800 * dividers.frac_fb_div) / (clk_s * 625);
570 
571 			mpll_ss1 &= ~CLKV_MASK;
572 			mpll_ss1 |= CLKV(clk_v);
573 
574 			mpll_ss2 &= ~CLKS_MASK;
575 			mpll_ss2 |= CLKS(clk_s);
576 		}
577 	}
578 
579 	dll_speed = rv740_get_dll_speed(pi->mem_gddr5,
580 					memory_clock);
581 
582 	mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
583 	mclk_pwrmgt_cntl |= DLL_SPEED(dll_speed);
584 	if (dll_state_on)
585 		mclk_pwrmgt_cntl |= (MRDCKA0_PDNB |
586 				     MRDCKA1_PDNB |
587 				     MRDCKB0_PDNB |
588 				     MRDCKB1_PDNB |
589 				     MRDCKC0_PDNB |
590 				     MRDCKC1_PDNB |
591 				     MRDCKD0_PDNB |
592 				     MRDCKD1_PDNB);
593 	else
594 		mclk_pwrmgt_cntl &= ~(MRDCKA0_PDNB |
595 				      MRDCKA1_PDNB |
596 				      MRDCKB0_PDNB |
597 				      MRDCKB1_PDNB |
598 				      MRDCKC0_PDNB |
599 				      MRDCKC1_PDNB |
600 				      MRDCKD0_PDNB |
601 				      MRDCKD1_PDNB);
602 
603 	mclk->mclk770.mclk_value = cpu_to_be32(memory_clock);
604 	mclk->mclk770.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
605 	mclk->mclk770.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
606 	mclk->mclk770.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
607 	mclk->mclk770.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
608 	mclk->mclk770.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
609 	mclk->mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl);
610 	mclk->mclk770.vMPLL_SS = cpu_to_be32(mpll_ss1);
611 	mclk->mclk770.vMPLL_SS2 = cpu_to_be32(mpll_ss2);
612 
613 	return 0;
614 }
615 
cypress_get_mclk_frequency_ratio(struct radeon_device * rdev,u32 memory_clock,bool strobe_mode)616 u8 cypress_get_mclk_frequency_ratio(struct radeon_device *rdev,
617 				    u32 memory_clock, bool strobe_mode)
618 {
619 	u8 mc_para_index;
620 
621 	if (rdev->family >= CHIP_BARTS) {
622 		if (strobe_mode) {
623 			if (memory_clock < 10000)
624 				mc_para_index = 0x00;
625 			else if (memory_clock > 47500)
626 				mc_para_index = 0x0f;
627 			else
628 				mc_para_index = (u8)((memory_clock - 10000) / 2500);
629 		} else {
630 			if (memory_clock < 65000)
631 				mc_para_index = 0x00;
632 			else if (memory_clock > 135000)
633 				mc_para_index = 0x0f;
634 			else
635 				mc_para_index = (u8)((memory_clock - 60000) / 5000);
636 		}
637 	} else {
638 		if (strobe_mode) {
639 			if (memory_clock < 10000)
640 				mc_para_index = 0x00;
641 			else if (memory_clock > 47500)
642 				mc_para_index = 0x0f;
643 			else
644 				mc_para_index = (u8)((memory_clock - 10000) / 2500);
645 		} else {
646 			if (memory_clock < 40000)
647 				mc_para_index = 0x00;
648 			else if (memory_clock > 115000)
649 				mc_para_index = 0x0f;
650 			else
651 				mc_para_index = (u8)((memory_clock - 40000) / 5000);
652 		}
653 	}
654 	return mc_para_index;
655 }
656 
cypress_populate_mvdd_value(struct radeon_device * rdev,u32 mclk,RV770_SMC_VOLTAGE_VALUE * voltage)657 static int cypress_populate_mvdd_value(struct radeon_device *rdev,
658 				       u32 mclk,
659 				       RV770_SMC_VOLTAGE_VALUE *voltage)
660 {
661 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
662 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
663 
664 	if (!pi->mvdd_control) {
665 		voltage->index = eg_pi->mvdd_high_index;
666 		voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
667 		return 0;
668 	}
669 
670 	if (mclk <= pi->mvdd_split_frequency) {
671 		voltage->index = eg_pi->mvdd_low_index;
672 		voltage->value = cpu_to_be16(MVDD_LOW_VALUE);
673 	} else {
674 		voltage->index = eg_pi->mvdd_high_index;
675 		voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
676 	}
677 
678 	return 0;
679 }
680 
cypress_convert_power_level_to_smc(struct radeon_device * rdev,struct rv7xx_pl * pl,RV770_SMC_HW_PERFORMANCE_LEVEL * level,u8 watermark_level)681 int cypress_convert_power_level_to_smc(struct radeon_device *rdev,
682 				       struct rv7xx_pl *pl,
683 				       RV770_SMC_HW_PERFORMANCE_LEVEL *level,
684 				       u8 watermark_level)
685 {
686 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
687 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
688 	int ret;
689 	bool dll_state_on;
690 
691 	level->gen2PCIE = pi->pcie_gen2 ?
692 		((pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? 1 : 0) : 0;
693 	level->gen2XSP  = (pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? 1 : 0;
694 	level->backbias = (pl->flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) ? 1 : 0;
695 	level->displayWatermark = watermark_level;
696 
697 	ret = rv740_populate_sclk_value(rdev, pl->sclk, &level->sclk);
698 	if (ret)
699 		return ret;
700 
701 	level->mcFlags =  0;
702 	if (pi->mclk_stutter_mode_threshold &&
703 	    (pl->mclk <= pi->mclk_stutter_mode_threshold) &&
704 	    !eg_pi->uvd_enabled) {
705 		level->mcFlags |= SMC_MC_STUTTER_EN;
706 		if (eg_pi->sclk_deep_sleep)
707 			level->stateFlags |= PPSMC_STATEFLAG_AUTO_PULSE_SKIP;
708 		else
709 			level->stateFlags &= ~PPSMC_STATEFLAG_AUTO_PULSE_SKIP;
710 	}
711 
712 	if (pi->mem_gddr5) {
713 		if (pl->mclk > pi->mclk_edc_enable_threshold)
714 			level->mcFlags |= SMC_MC_EDC_RD_FLAG;
715 
716 		if (pl->mclk > eg_pi->mclk_edc_wr_enable_threshold)
717 			level->mcFlags |= SMC_MC_EDC_WR_FLAG;
718 
719 		level->strobeMode = cypress_get_strobe_mode_settings(rdev, pl->mclk);
720 
721 		if (level->strobeMode & SMC_STROBE_ENABLE) {
722 			if (cypress_get_mclk_frequency_ratio(rdev, pl->mclk, true) >=
723 			    ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
724 				dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
725 			else
726 				dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
727 		} else
728 			dll_state_on = eg_pi->dll_default_on;
729 
730 		ret = cypress_populate_mclk_value(rdev,
731 						  pl->sclk,
732 						  pl->mclk,
733 						  &level->mclk,
734 						  (level->strobeMode & SMC_STROBE_ENABLE) != 0,
735 						  dll_state_on);
736 	} else {
737 		ret = cypress_populate_mclk_value(rdev,
738 						  pl->sclk,
739 						  pl->mclk,
740 						  &level->mclk,
741 						  true,
742 						  true);
743 	}
744 	if (ret)
745 		return ret;
746 
747 	ret = cypress_populate_voltage_value(rdev,
748 					     &eg_pi->vddc_voltage_table,
749 					     pl->vddc,
750 					     &level->vddc);
751 	if (ret)
752 		return ret;
753 
754 	if (eg_pi->vddci_control) {
755 		ret = cypress_populate_voltage_value(rdev,
756 						     &eg_pi->vddci_voltage_table,
757 						     pl->vddci,
758 						     &level->vddci);
759 		if (ret)
760 			return ret;
761 	}
762 
763 	ret = cypress_populate_mvdd_value(rdev, pl->mclk, &level->mvdd);
764 
765 	return ret;
766 }
767 
cypress_convert_power_state_to_smc(struct radeon_device * rdev,struct radeon_ps * radeon_state,RV770_SMC_SWSTATE * smc_state)768 static int cypress_convert_power_state_to_smc(struct radeon_device *rdev,
769 					      struct radeon_ps *radeon_state,
770 					      RV770_SMC_SWSTATE *smc_state)
771 {
772 	struct rv7xx_ps *state = rv770_get_ps(radeon_state);
773 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
774 	int ret;
775 
776 	if (!(radeon_state->caps & ATOM_PPLIB_DISALLOW_ON_DC))
777 		smc_state->flags |= PPSMC_SWSTATE_FLAG_DC;
778 
779 	ret = cypress_convert_power_level_to_smc(rdev,
780 						 &state->low,
781 						 &smc_state->levels[0],
782 						 PPSMC_DISPLAY_WATERMARK_LOW);
783 	if (ret)
784 		return ret;
785 
786 	ret = cypress_convert_power_level_to_smc(rdev,
787 						 &state->medium,
788 						 &smc_state->levels[1],
789 						 PPSMC_DISPLAY_WATERMARK_LOW);
790 	if (ret)
791 		return ret;
792 
793 	ret = cypress_convert_power_level_to_smc(rdev,
794 						 &state->high,
795 						 &smc_state->levels[2],
796 						 PPSMC_DISPLAY_WATERMARK_HIGH);
797 	if (ret)
798 		return ret;
799 
800 	smc_state->levels[0].arbValue = MC_CG_ARB_FREQ_F1;
801 	smc_state->levels[1].arbValue = MC_CG_ARB_FREQ_F2;
802 	smc_state->levels[2].arbValue = MC_CG_ARB_FREQ_F3;
803 
804 	if (eg_pi->dynamic_ac_timing) {
805 		smc_state->levels[0].ACIndex = 2;
806 		smc_state->levels[1].ACIndex = 3;
807 		smc_state->levels[2].ACIndex = 4;
808 	} else {
809 		smc_state->levels[0].ACIndex = 0;
810 		smc_state->levels[1].ACIndex = 0;
811 		smc_state->levels[2].ACIndex = 0;
812 	}
813 
814 	rv770_populate_smc_sp(rdev, radeon_state, smc_state);
815 
816 	return rv770_populate_smc_t(rdev, radeon_state, smc_state);
817 }
818 
cypress_convert_mc_registers(struct evergreen_mc_reg_entry * entry,SMC_Evergreen_MCRegisterSet * data,u32 num_entries,u32 valid_flag)819 static void cypress_convert_mc_registers(struct evergreen_mc_reg_entry *entry,
820 					 SMC_Evergreen_MCRegisterSet *data,
821 					 u32 num_entries, u32 valid_flag)
822 {
823 	u32 i, j;
824 
825 	for (i = 0, j = 0; j < num_entries; j++) {
826 		if (valid_flag & (1 << j)) {
827 			data->value[i] = cpu_to_be32(entry->mc_data[j]);
828 			i++;
829 		}
830 	}
831 }
832 
cypress_convert_mc_reg_table_entry_to_smc(struct radeon_device * rdev,struct rv7xx_pl * pl,SMC_Evergreen_MCRegisterSet * mc_reg_table_data)833 static void cypress_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
834 						      struct rv7xx_pl *pl,
835 						      SMC_Evergreen_MCRegisterSet *mc_reg_table_data)
836 {
837 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
838 	u32 i = 0;
839 
840 	for (i = 0; i < eg_pi->mc_reg_table.num_entries; i++) {
841 		if (pl->mclk <=
842 		    eg_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
843 			break;
844 	}
845 
846 	if ((i == eg_pi->mc_reg_table.num_entries) && (i > 0))
847 		--i;
848 
849 	cypress_convert_mc_registers(&eg_pi->mc_reg_table.mc_reg_table_entry[i],
850 				     mc_reg_table_data,
851 				     eg_pi->mc_reg_table.last,
852 				     eg_pi->mc_reg_table.valid_flag);
853 }
854 
cypress_convert_mc_reg_table_to_smc(struct radeon_device * rdev,struct radeon_ps * radeon_state,SMC_Evergreen_MCRegisters * mc_reg_table)855 static void cypress_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
856 						struct radeon_ps *radeon_state,
857 						SMC_Evergreen_MCRegisters *mc_reg_table)
858 {
859 	struct rv7xx_ps *state = rv770_get_ps(radeon_state);
860 
861 	cypress_convert_mc_reg_table_entry_to_smc(rdev,
862 						  &state->low,
863 						  &mc_reg_table->data[2]);
864 	cypress_convert_mc_reg_table_entry_to_smc(rdev,
865 						  &state->medium,
866 						  &mc_reg_table->data[3]);
867 	cypress_convert_mc_reg_table_entry_to_smc(rdev,
868 						  &state->high,
869 						  &mc_reg_table->data[4]);
870 }
871 
cypress_upload_sw_state(struct radeon_device * rdev,struct radeon_ps * radeon_new_state)872 int cypress_upload_sw_state(struct radeon_device *rdev,
873 			    struct radeon_ps *radeon_new_state)
874 {
875 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
876 	u16 address = pi->state_table_start +
877 		offsetof(RV770_SMC_STATETABLE, driverState);
878 	RV770_SMC_SWSTATE state = { 0 };
879 	int ret;
880 
881 	ret = cypress_convert_power_state_to_smc(rdev, radeon_new_state, &state);
882 	if (ret)
883 		return ret;
884 
885 	return rv770_copy_bytes_to_smc(rdev, address, (u8 *)&state,
886 				    sizeof(RV770_SMC_SWSTATE),
887 				    pi->sram_end);
888 }
889 
cypress_upload_mc_reg_table(struct radeon_device * rdev,struct radeon_ps * radeon_new_state)890 int cypress_upload_mc_reg_table(struct radeon_device *rdev,
891 				struct radeon_ps *radeon_new_state)
892 {
893 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
894 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
895 	SMC_Evergreen_MCRegisters mc_reg_table = { 0 };
896 	u16 address;
897 
898 	cypress_convert_mc_reg_table_to_smc(rdev, radeon_new_state, &mc_reg_table);
899 
900 	address = eg_pi->mc_reg_table_start +
901 		(u16)offsetof(SMC_Evergreen_MCRegisters, data[2]);
902 
903 	return rv770_copy_bytes_to_smc(rdev, address,
904 				       (u8 *)&mc_reg_table.data[2],
905 				       sizeof(SMC_Evergreen_MCRegisterSet) * 3,
906 				       pi->sram_end);
907 }
908 
cypress_calculate_burst_time(struct radeon_device * rdev,u32 engine_clock,u32 memory_clock)909 u32 cypress_calculate_burst_time(struct radeon_device *rdev,
910 				 u32 engine_clock, u32 memory_clock)
911 {
912 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
913 	u32 multiplier = pi->mem_gddr5 ? 1 : 2;
914 	u32 result = (4 * multiplier * engine_clock) / (memory_clock / 2);
915 	u32 burst_time;
916 
917 	if (result <= 4)
918 		burst_time = 0;
919 	else if (result < 8)
920 		burst_time = result - 4;
921 	else {
922 		burst_time = result / 2 ;
923 		if (burst_time > 18)
924 			burst_time = 18;
925 	}
926 
927 	return burst_time;
928 }
929 
cypress_program_memory_timing_parameters(struct radeon_device * rdev,struct radeon_ps * radeon_new_state)930 void cypress_program_memory_timing_parameters(struct radeon_device *rdev,
931 					      struct radeon_ps *radeon_new_state)
932 {
933 	struct rv7xx_ps *new_state = rv770_get_ps(radeon_new_state);
934 	u32 mc_arb_burst_time = RREG32(MC_ARB_BURST_TIME);
935 
936 	mc_arb_burst_time &= ~(STATE1_MASK | STATE2_MASK | STATE3_MASK);
937 
938 	mc_arb_burst_time |= STATE1(cypress_calculate_burst_time(rdev,
939 								 new_state->low.sclk,
940 								 new_state->low.mclk));
941 	mc_arb_burst_time |= STATE2(cypress_calculate_burst_time(rdev,
942 								 new_state->medium.sclk,
943 								 new_state->medium.mclk));
944 	mc_arb_burst_time |= STATE3(cypress_calculate_burst_time(rdev,
945 								 new_state->high.sclk,
946 								 new_state->high.mclk));
947 
948 	rv730_program_memory_timing_parameters(rdev, radeon_new_state);
949 
950 	WREG32(MC_ARB_BURST_TIME, mc_arb_burst_time);
951 }
952 
cypress_populate_mc_reg_addresses(struct radeon_device * rdev,SMC_Evergreen_MCRegisters * mc_reg_table)953 static void cypress_populate_mc_reg_addresses(struct radeon_device *rdev,
954 					      SMC_Evergreen_MCRegisters *mc_reg_table)
955 {
956 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
957 	u32 i, j;
958 
959 	for (i = 0, j = 0; j < eg_pi->mc_reg_table.last; j++) {
960 		if (eg_pi->mc_reg_table.valid_flag & (1 << j)) {
961 			mc_reg_table->address[i].s0 =
962 				cpu_to_be16(eg_pi->mc_reg_table.mc_reg_address[j].s0);
963 			mc_reg_table->address[i].s1 =
964 				cpu_to_be16(eg_pi->mc_reg_table.mc_reg_address[j].s1);
965 			i++;
966 		}
967 	}
968 
969 	mc_reg_table->last = (u8)i;
970 }
971 
cypress_set_mc_reg_address_table(struct radeon_device * rdev)972 static void cypress_set_mc_reg_address_table(struct radeon_device *rdev)
973 {
974 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
975 	u32 i = 0;
976 
977 	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_RAS_TIMING_LP >> 2;
978 	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_RAS_TIMING >> 2;
979 	i++;
980 
981 	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_CAS_TIMING_LP >> 2;
982 	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_CAS_TIMING >> 2;
983 	i++;
984 
985 	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_MISC_TIMING_LP >> 2;
986 	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_MISC_TIMING >> 2;
987 	i++;
988 
989 	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_MISC_TIMING2_LP >> 2;
990 	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_MISC_TIMING2 >> 2;
991 	i++;
992 
993 	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_RD_CTL_D0_LP >> 2;
994 	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_RD_CTL_D0 >> 2;
995 	i++;
996 
997 	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_RD_CTL_D1_LP >> 2;
998 	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_RD_CTL_D1 >> 2;
999 	i++;
1000 
1001 	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_WR_CTL_D0_LP >> 2;
1002 	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_WR_CTL_D0 >> 2;
1003 	i++;
1004 
1005 	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_WR_CTL_D1_LP >> 2;
1006 	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_WR_CTL_D1 >> 2;
1007 	i++;
1008 
1009 	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
1010 	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_PMG_CMD_EMRS >> 2;
1011 	i++;
1012 
1013 	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
1014 	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_PMG_CMD_MRS >> 2;
1015 	i++;
1016 
1017 	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
1018 	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_PMG_CMD_MRS1 >> 2;
1019 	i++;
1020 
1021 	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_MISC1 >> 2;
1022 	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_MISC1 >> 2;
1023 	i++;
1024 
1025 	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_RESERVE_M >> 2;
1026 	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_RESERVE_M >> 2;
1027 	i++;
1028 
1029 	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_MISC3 >> 2;
1030 	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_MISC3 >> 2;
1031 	i++;
1032 
1033 	eg_pi->mc_reg_table.last = (u8)i;
1034 }
1035 
cypress_retrieve_ac_timing_for_one_entry(struct radeon_device * rdev,struct evergreen_mc_reg_entry * entry)1036 static void cypress_retrieve_ac_timing_for_one_entry(struct radeon_device *rdev,
1037 						     struct evergreen_mc_reg_entry *entry)
1038 {
1039 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1040 	u32 i;
1041 
1042 	for (i = 0; i < eg_pi->mc_reg_table.last; i++)
1043 		entry->mc_data[i] =
1044 			RREG32(eg_pi->mc_reg_table.mc_reg_address[i].s1 << 2);
1045 
1046 }
1047 
cypress_retrieve_ac_timing_for_all_ranges(struct radeon_device * rdev,struct atom_memory_clock_range_table * range_table)1048 static void cypress_retrieve_ac_timing_for_all_ranges(struct radeon_device *rdev,
1049 						      struct atom_memory_clock_range_table *range_table)
1050 {
1051 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1052 	u32 i, j;
1053 
1054 	for (i = 0; i < range_table->num_entries; i++) {
1055 		eg_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max =
1056 			range_table->mclk[i];
1057 		radeon_atom_set_ac_timing(rdev, range_table->mclk[i]);
1058 		cypress_retrieve_ac_timing_for_one_entry(rdev,
1059 							 &eg_pi->mc_reg_table.mc_reg_table_entry[i]);
1060 	}
1061 
1062 	eg_pi->mc_reg_table.num_entries = range_table->num_entries;
1063 	eg_pi->mc_reg_table.valid_flag = 0;
1064 
1065 	for (i = 0; i < eg_pi->mc_reg_table.last; i++) {
1066 		for (j = 1; j < range_table->num_entries; j++) {
1067 			if (eg_pi->mc_reg_table.mc_reg_table_entry[j-1].mc_data[i] !=
1068 			    eg_pi->mc_reg_table.mc_reg_table_entry[j].mc_data[i]) {
1069 				eg_pi->mc_reg_table.valid_flag |= (1 << i);
1070 				break;
1071 			}
1072 		}
1073 	}
1074 }
1075 
cypress_initialize_mc_reg_table(struct radeon_device * rdev)1076 static int cypress_initialize_mc_reg_table(struct radeon_device *rdev)
1077 {
1078 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1079 	u8 module_index = rv770_get_memory_module_index(rdev);
1080 	struct atom_memory_clock_range_table range_table = { 0 };
1081 	int ret;
1082 
1083 	ret = radeon_atom_get_mclk_range_table(rdev,
1084 					       pi->mem_gddr5,
1085 					       module_index, &range_table);
1086 	if (ret)
1087 		return ret;
1088 
1089 	cypress_retrieve_ac_timing_for_all_ranges(rdev, &range_table);
1090 
1091 	return 0;
1092 }
1093 
cypress_wait_for_mc_sequencer(struct radeon_device * rdev,u8 value)1094 static void cypress_wait_for_mc_sequencer(struct radeon_device *rdev, u8 value)
1095 {
1096 	u32 i, j;
1097 	u32 channels = 2;
1098 
1099 	if ((rdev->family == CHIP_CYPRESS) ||
1100 	    (rdev->family == CHIP_HEMLOCK))
1101 		channels = 4;
1102 	else if (rdev->family == CHIP_CEDAR)
1103 		channels = 1;
1104 
1105 	for (i = 0; i < channels; i++) {
1106 		if ((rdev->family == CHIP_CYPRESS) ||
1107 		    (rdev->family == CHIP_HEMLOCK)) {
1108 			WREG32_P(MC_CONFIG_MCD, MC_RD_ENABLE_MCD(i), ~MC_RD_ENABLE_MCD_MASK);
1109 			WREG32_P(MC_CG_CONFIG_MCD, MC_RD_ENABLE_MCD(i), ~MC_RD_ENABLE_MCD_MASK);
1110 		} else {
1111 			WREG32_P(MC_CONFIG, MC_RD_ENABLE(i), ~MC_RD_ENABLE_MASK);
1112 			WREG32_P(MC_CG_CONFIG, MC_RD_ENABLE(i), ~MC_RD_ENABLE_MASK);
1113 		}
1114 		for (j = 0; j < rdev->usec_timeout; j++) {
1115 			if (((RREG32(MC_SEQ_CG) & CG_SEQ_RESP_MASK) >> CG_SEQ_RESP_SHIFT) == value)
1116 				break;
1117 			udelay(1);
1118 		}
1119 	}
1120 }
1121 
cypress_force_mc_use_s1(struct radeon_device * rdev,struct radeon_ps * radeon_boot_state)1122 static void cypress_force_mc_use_s1(struct radeon_device *rdev,
1123 				    struct radeon_ps *radeon_boot_state)
1124 {
1125 	struct rv7xx_ps *boot_state = rv770_get_ps(radeon_boot_state);
1126 	u32 strobe_mode;
1127 	u32 mc_seq_cg;
1128 	int i;
1129 
1130 	if (RREG32(MC_SEQ_STATUS_M) & PMG_PWRSTATE)
1131 		return;
1132 
1133 	radeon_atom_set_ac_timing(rdev, boot_state->low.mclk);
1134 	radeon_mc_wait_for_idle(rdev);
1135 
1136 	if ((rdev->family == CHIP_CYPRESS) ||
1137 	    (rdev->family == CHIP_HEMLOCK)) {
1138 		WREG32(MC_CONFIG_MCD, 0xf);
1139 		WREG32(MC_CG_CONFIG_MCD, 0xf);
1140 	} else {
1141 		WREG32(MC_CONFIG, 0xf);
1142 		WREG32(MC_CG_CONFIG, 0xf);
1143 	}
1144 
1145 	for (i = 0; i < rdev->num_crtc; i++)
1146 		radeon_wait_for_vblank(rdev, i);
1147 
1148 	WREG32(MC_SEQ_CG, MC_CG_SEQ_YCLK_SUSPEND);
1149 	cypress_wait_for_mc_sequencer(rdev, MC_CG_SEQ_YCLK_SUSPEND);
1150 
1151 	strobe_mode = cypress_get_strobe_mode_settings(rdev,
1152 						       boot_state->low.mclk);
1153 
1154 	mc_seq_cg = CG_SEQ_REQ(MC_CG_SEQ_DRAMCONF_S1);
1155 	mc_seq_cg |= SEQ_CG_RESP(strobe_mode);
1156 	WREG32(MC_SEQ_CG, mc_seq_cg);
1157 
1158 	for (i = 0; i < rdev->usec_timeout; i++) {
1159 		if (RREG32(MC_SEQ_STATUS_M) & PMG_PWRSTATE)
1160 			break;
1161 		udelay(1);
1162 	}
1163 
1164 	mc_seq_cg &= ~CG_SEQ_REQ_MASK;
1165 	mc_seq_cg |= CG_SEQ_REQ(MC_CG_SEQ_YCLK_RESUME);
1166 	WREG32(MC_SEQ_CG, mc_seq_cg);
1167 
1168 	cypress_wait_for_mc_sequencer(rdev, MC_CG_SEQ_YCLK_RESUME);
1169 }
1170 
cypress_copy_ac_timing_from_s1_to_s0(struct radeon_device * rdev)1171 static void cypress_copy_ac_timing_from_s1_to_s0(struct radeon_device *rdev)
1172 {
1173 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1174 	u32 value;
1175 	u32 i;
1176 
1177 	for (i = 0; i < eg_pi->mc_reg_table.last; i++) {
1178 		value = RREG32(eg_pi->mc_reg_table.mc_reg_address[i].s1 << 2);
1179 		WREG32(eg_pi->mc_reg_table.mc_reg_address[i].s0 << 2, value);
1180 	}
1181 }
1182 
cypress_force_mc_use_s0(struct radeon_device * rdev,struct radeon_ps * radeon_boot_state)1183 static void cypress_force_mc_use_s0(struct radeon_device *rdev,
1184 				    struct radeon_ps *radeon_boot_state)
1185 {
1186 	struct rv7xx_ps *boot_state = rv770_get_ps(radeon_boot_state);
1187 	u32 strobe_mode;
1188 	u32 mc_seq_cg;
1189 	int i;
1190 
1191 	cypress_copy_ac_timing_from_s1_to_s0(rdev);
1192 	radeon_mc_wait_for_idle(rdev);
1193 
1194 	if ((rdev->family == CHIP_CYPRESS) ||
1195 	    (rdev->family == CHIP_HEMLOCK)) {
1196 		WREG32(MC_CONFIG_MCD, 0xf);
1197 		WREG32(MC_CG_CONFIG_MCD, 0xf);
1198 	} else {
1199 		WREG32(MC_CONFIG, 0xf);
1200 		WREG32(MC_CG_CONFIG, 0xf);
1201 	}
1202 
1203 	for (i = 0; i < rdev->num_crtc; i++)
1204 		radeon_wait_for_vblank(rdev, i);
1205 
1206 	WREG32(MC_SEQ_CG, MC_CG_SEQ_YCLK_SUSPEND);
1207 	cypress_wait_for_mc_sequencer(rdev, MC_CG_SEQ_YCLK_SUSPEND);
1208 
1209 	strobe_mode = cypress_get_strobe_mode_settings(rdev,
1210 						       boot_state->low.mclk);
1211 
1212 	mc_seq_cg = CG_SEQ_REQ(MC_CG_SEQ_DRAMCONF_S0);
1213 	mc_seq_cg |= SEQ_CG_RESP(strobe_mode);
1214 	WREG32(MC_SEQ_CG, mc_seq_cg);
1215 
1216 	for (i = 0; i < rdev->usec_timeout; i++) {
1217 		if (!(RREG32(MC_SEQ_STATUS_M) & PMG_PWRSTATE))
1218 			break;
1219 		udelay(1);
1220 	}
1221 
1222 	mc_seq_cg &= ~CG_SEQ_REQ_MASK;
1223 	mc_seq_cg |= CG_SEQ_REQ(MC_CG_SEQ_YCLK_RESUME);
1224 	WREG32(MC_SEQ_CG, mc_seq_cg);
1225 
1226 	cypress_wait_for_mc_sequencer(rdev, MC_CG_SEQ_YCLK_RESUME);
1227 }
1228 
cypress_populate_initial_mvdd_value(struct radeon_device * rdev,RV770_SMC_VOLTAGE_VALUE * voltage)1229 static int cypress_populate_initial_mvdd_value(struct radeon_device *rdev,
1230 					       RV770_SMC_VOLTAGE_VALUE *voltage)
1231 {
1232 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1233 
1234 	voltage->index = eg_pi->mvdd_high_index;
1235 	voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
1236 
1237 	return 0;
1238 }
1239 
cypress_populate_smc_initial_state(struct radeon_device * rdev,struct radeon_ps * radeon_initial_state,RV770_SMC_STATETABLE * table)1240 int cypress_populate_smc_initial_state(struct radeon_device *rdev,
1241 				       struct radeon_ps *radeon_initial_state,
1242 				       RV770_SMC_STATETABLE *table)
1243 {
1244 	struct rv7xx_ps *initial_state = rv770_get_ps(radeon_initial_state);
1245 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1246 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1247 	u32 a_t;
1248 
1249 	table->initialState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL =
1250 		cpu_to_be32(pi->clk_regs.rv770.mpll_ad_func_cntl);
1251 	table->initialState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL_2 =
1252 		cpu_to_be32(pi->clk_regs.rv770.mpll_ad_func_cntl_2);
1253 	table->initialState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL =
1254 		cpu_to_be32(pi->clk_regs.rv770.mpll_dq_func_cntl);
1255 	table->initialState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL_2 =
1256 		cpu_to_be32(pi->clk_regs.rv770.mpll_dq_func_cntl_2);
1257 	table->initialState.levels[0].mclk.mclk770.vMCLK_PWRMGT_CNTL =
1258 		cpu_to_be32(pi->clk_regs.rv770.mclk_pwrmgt_cntl);
1259 	table->initialState.levels[0].mclk.mclk770.vDLL_CNTL =
1260 		cpu_to_be32(pi->clk_regs.rv770.dll_cntl);
1261 
1262 	table->initialState.levels[0].mclk.mclk770.vMPLL_SS =
1263 		cpu_to_be32(pi->clk_regs.rv770.mpll_ss1);
1264 	table->initialState.levels[0].mclk.mclk770.vMPLL_SS2 =
1265 		cpu_to_be32(pi->clk_regs.rv770.mpll_ss2);
1266 
1267 	table->initialState.levels[0].mclk.mclk770.mclk_value =
1268 		cpu_to_be32(initial_state->low.mclk);
1269 
1270 	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
1271 		cpu_to_be32(pi->clk_regs.rv770.cg_spll_func_cntl);
1272 	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
1273 		cpu_to_be32(pi->clk_regs.rv770.cg_spll_func_cntl_2);
1274 	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
1275 		cpu_to_be32(pi->clk_regs.rv770.cg_spll_func_cntl_3);
1276 	table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
1277 		cpu_to_be32(pi->clk_regs.rv770.cg_spll_spread_spectrum);
1278 	table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
1279 		cpu_to_be32(pi->clk_regs.rv770.cg_spll_spread_spectrum_2);
1280 
1281 	table->initialState.levels[0].sclk.sclk_value =
1282 		cpu_to_be32(initial_state->low.sclk);
1283 
1284 	table->initialState.levels[0].arbValue = MC_CG_ARB_FREQ_F0;
1285 
1286 	table->initialState.levels[0].ACIndex = 0;
1287 
1288 	cypress_populate_voltage_value(rdev,
1289 				       &eg_pi->vddc_voltage_table,
1290 				       initial_state->low.vddc,
1291 				       &table->initialState.levels[0].vddc);
1292 
1293 	if (eg_pi->vddci_control)
1294 		cypress_populate_voltage_value(rdev,
1295 					       &eg_pi->vddci_voltage_table,
1296 					       initial_state->low.vddci,
1297 					       &table->initialState.levels[0].vddci);
1298 
1299 	cypress_populate_initial_mvdd_value(rdev,
1300 					    &table->initialState.levels[0].mvdd);
1301 
1302 	a_t = CG_R(0xffff) | CG_L(0);
1303 	table->initialState.levels[0].aT = cpu_to_be32(a_t);
1304 
1305 	table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
1306 
1307 
1308 	if (pi->boot_in_gen2)
1309 		table->initialState.levels[0].gen2PCIE = 1;
1310 	else
1311 		table->initialState.levels[0].gen2PCIE = 0;
1312 	if (initial_state->low.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2)
1313 		table->initialState.levels[0].gen2XSP = 1;
1314 	else
1315 		table->initialState.levels[0].gen2XSP = 0;
1316 
1317 	if (pi->mem_gddr5) {
1318 		table->initialState.levels[0].strobeMode =
1319 			cypress_get_strobe_mode_settings(rdev,
1320 							 initial_state->low.mclk);
1321 
1322 		if (initial_state->low.mclk > pi->mclk_edc_enable_threshold)
1323 			table->initialState.levels[0].mcFlags = SMC_MC_EDC_RD_FLAG | SMC_MC_EDC_WR_FLAG;
1324 		else
1325 			table->initialState.levels[0].mcFlags =  0;
1326 	}
1327 
1328 	table->initialState.levels[1] = table->initialState.levels[0];
1329 	table->initialState.levels[2] = table->initialState.levels[0];
1330 
1331 	table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
1332 
1333 	return 0;
1334 }
1335 
cypress_populate_smc_acpi_state(struct radeon_device * rdev,RV770_SMC_STATETABLE * table)1336 int cypress_populate_smc_acpi_state(struct radeon_device *rdev,
1337 				    RV770_SMC_STATETABLE *table)
1338 {
1339 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1340 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1341 	u32 mpll_ad_func_cntl =
1342 		pi->clk_regs.rv770.mpll_ad_func_cntl;
1343 	u32 mpll_ad_func_cntl_2 =
1344 		pi->clk_regs.rv770.mpll_ad_func_cntl_2;
1345 	u32 mpll_dq_func_cntl =
1346 		pi->clk_regs.rv770.mpll_dq_func_cntl;
1347 	u32 mpll_dq_func_cntl_2 =
1348 		pi->clk_regs.rv770.mpll_dq_func_cntl_2;
1349 	u32 spll_func_cntl =
1350 		pi->clk_regs.rv770.cg_spll_func_cntl;
1351 	u32 spll_func_cntl_2 =
1352 		pi->clk_regs.rv770.cg_spll_func_cntl_2;
1353 	u32 spll_func_cntl_3 =
1354 		pi->clk_regs.rv770.cg_spll_func_cntl_3;
1355 	u32 mclk_pwrmgt_cntl =
1356 		pi->clk_regs.rv770.mclk_pwrmgt_cntl;
1357 	u32 dll_cntl =
1358 		pi->clk_regs.rv770.dll_cntl;
1359 
1360 	table->ACPIState = table->initialState;
1361 
1362 	table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
1363 
1364 	if (pi->acpi_vddc) {
1365 		cypress_populate_voltage_value(rdev,
1366 					       &eg_pi->vddc_voltage_table,
1367 					       pi->acpi_vddc,
1368 					       &table->ACPIState.levels[0].vddc);
1369 		if (pi->pcie_gen2) {
1370 			if (pi->acpi_pcie_gen2)
1371 				table->ACPIState.levels[0].gen2PCIE = 1;
1372 			else
1373 				table->ACPIState.levels[0].gen2PCIE = 0;
1374 		} else
1375 			table->ACPIState.levels[0].gen2PCIE = 0;
1376 		if (pi->acpi_pcie_gen2)
1377 			table->ACPIState.levels[0].gen2XSP = 1;
1378 		else
1379 			table->ACPIState.levels[0].gen2XSP = 0;
1380 	} else {
1381 		cypress_populate_voltage_value(rdev,
1382 					       &eg_pi->vddc_voltage_table,
1383 					       pi->min_vddc_in_table,
1384 					       &table->ACPIState.levels[0].vddc);
1385 		table->ACPIState.levels[0].gen2PCIE = 0;
1386 	}
1387 
1388 	if (eg_pi->acpi_vddci) {
1389 		if (eg_pi->vddci_control) {
1390 			cypress_populate_voltage_value(rdev,
1391 						       &eg_pi->vddci_voltage_table,
1392 						       eg_pi->acpi_vddci,
1393 						       &table->ACPIState.levels[0].vddci);
1394 		}
1395 	}
1396 
1397 	mpll_ad_func_cntl &= ~PDNB;
1398 
1399 	mpll_ad_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN;
1400 
1401 	if (pi->mem_gddr5)
1402 		mpll_dq_func_cntl &= ~PDNB;
1403 	mpll_dq_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN | BYPASS;
1404 
1405 	mclk_pwrmgt_cntl |= (MRDCKA0_RESET |
1406 			     MRDCKA1_RESET |
1407 			     MRDCKB0_RESET |
1408 			     MRDCKB1_RESET |
1409 			     MRDCKC0_RESET |
1410 			     MRDCKC1_RESET |
1411 			     MRDCKD0_RESET |
1412 			     MRDCKD1_RESET);
1413 
1414 	mclk_pwrmgt_cntl &= ~(MRDCKA0_PDNB |
1415 			      MRDCKA1_PDNB |
1416 			      MRDCKB0_PDNB |
1417 			      MRDCKB1_PDNB |
1418 			      MRDCKC0_PDNB |
1419 			      MRDCKC1_PDNB |
1420 			      MRDCKD0_PDNB |
1421 			      MRDCKD1_PDNB);
1422 
1423 	dll_cntl |= (MRDCKA0_BYPASS |
1424 		     MRDCKA1_BYPASS |
1425 		     MRDCKB0_BYPASS |
1426 		     MRDCKB1_BYPASS |
1427 		     MRDCKC0_BYPASS |
1428 		     MRDCKC1_BYPASS |
1429 		     MRDCKD0_BYPASS |
1430 		     MRDCKD1_BYPASS);
1431 
1432 	/* evergreen only */
1433 	if (rdev->family <= CHIP_HEMLOCK)
1434 		spll_func_cntl |= SPLL_RESET | SPLL_SLEEP | SPLL_BYPASS_EN;
1435 
1436 	spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
1437 	spll_func_cntl_2 |= SCLK_MUX_SEL(4);
1438 
1439 	table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL =
1440 		cpu_to_be32(mpll_ad_func_cntl);
1441 	table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL_2 =
1442 		cpu_to_be32(mpll_ad_func_cntl_2);
1443 	table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL =
1444 		cpu_to_be32(mpll_dq_func_cntl);
1445 	table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL_2 =
1446 		cpu_to_be32(mpll_dq_func_cntl_2);
1447 	table->ACPIState.levels[0].mclk.mclk770.vMCLK_PWRMGT_CNTL =
1448 		cpu_to_be32(mclk_pwrmgt_cntl);
1449 	table->ACPIState.levels[0].mclk.mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl);
1450 
1451 	table->ACPIState.levels[0].mclk.mclk770.mclk_value = 0;
1452 
1453 	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
1454 		cpu_to_be32(spll_func_cntl);
1455 	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
1456 		cpu_to_be32(spll_func_cntl_2);
1457 	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
1458 		cpu_to_be32(spll_func_cntl_3);
1459 
1460 	table->ACPIState.levels[0].sclk.sclk_value = 0;
1461 
1462 	cypress_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd);
1463 
1464 	if (eg_pi->dynamic_ac_timing)
1465 		table->ACPIState.levels[0].ACIndex = 1;
1466 
1467 	table->ACPIState.levels[1] = table->ACPIState.levels[0];
1468 	table->ACPIState.levels[2] = table->ACPIState.levels[0];
1469 
1470 	return 0;
1471 }
1472 
cypress_trim_voltage_table_to_fit_state_table(struct radeon_device * rdev,struct atom_voltage_table * voltage_table)1473 static void cypress_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
1474 							  struct atom_voltage_table *voltage_table)
1475 {
1476 	unsigned int i, diff;
1477 
1478 	if (voltage_table->count <= MAX_NO_VREG_STEPS)
1479 		return;
1480 
1481 	diff = voltage_table->count - MAX_NO_VREG_STEPS;
1482 
1483 	for (i= 0; i < MAX_NO_VREG_STEPS; i++)
1484 		voltage_table->entries[i] = voltage_table->entries[i + diff];
1485 
1486 	voltage_table->count = MAX_NO_VREG_STEPS;
1487 }
1488 
cypress_construct_voltage_tables(struct radeon_device * rdev)1489 int cypress_construct_voltage_tables(struct radeon_device *rdev)
1490 {
1491 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1492 	int ret;
1493 
1494 	ret = radeon_atom_get_voltage_table(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, 0,
1495 					    &eg_pi->vddc_voltage_table);
1496 	if (ret)
1497 		return ret;
1498 
1499 	if (eg_pi->vddc_voltage_table.count > MAX_NO_VREG_STEPS)
1500 		cypress_trim_voltage_table_to_fit_state_table(rdev,
1501 							      &eg_pi->vddc_voltage_table);
1502 
1503 	if (eg_pi->vddci_control) {
1504 		ret = radeon_atom_get_voltage_table(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0,
1505 						    &eg_pi->vddci_voltage_table);
1506 		if (ret)
1507 			return ret;
1508 
1509 		if (eg_pi->vddci_voltage_table.count > MAX_NO_VREG_STEPS)
1510 			cypress_trim_voltage_table_to_fit_state_table(rdev,
1511 								      &eg_pi->vddci_voltage_table);
1512 	}
1513 
1514 	return 0;
1515 }
1516 
cypress_populate_smc_voltage_table(struct radeon_device * rdev,struct atom_voltage_table * voltage_table,RV770_SMC_STATETABLE * table)1517 static void cypress_populate_smc_voltage_table(struct radeon_device *rdev,
1518 					       struct atom_voltage_table *voltage_table,
1519 					       RV770_SMC_STATETABLE *table)
1520 {
1521 	unsigned int i;
1522 
1523 	for (i = 0; i < voltage_table->count; i++) {
1524 		table->highSMIO[i] = 0;
1525 		table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low);
1526 	}
1527 }
1528 
cypress_populate_smc_voltage_tables(struct radeon_device * rdev,RV770_SMC_STATETABLE * table)1529 int cypress_populate_smc_voltage_tables(struct radeon_device *rdev,
1530 					RV770_SMC_STATETABLE *table)
1531 {
1532 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1533 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1534 	unsigned char i;
1535 
1536 	if (eg_pi->vddc_voltage_table.count) {
1537 		cypress_populate_smc_voltage_table(rdev,
1538 						   &eg_pi->vddc_voltage_table,
1539 						   table);
1540 
1541 		table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDC] = 0;
1542 		table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDC] =
1543 			cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
1544 
1545 		for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) {
1546 			if (pi->max_vddc_in_table <=
1547 			    eg_pi->vddc_voltage_table.entries[i].value) {
1548 				table->maxVDDCIndexInPPTable = i;
1549 				break;
1550 			}
1551 		}
1552 	}
1553 
1554 	if (eg_pi->vddci_voltage_table.count) {
1555 		cypress_populate_smc_voltage_table(rdev,
1556 						   &eg_pi->vddci_voltage_table,
1557 						   table);
1558 
1559 		table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDCI] = 0;
1560 		table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDCI] =
1561 			cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
1562 	}
1563 
1564 	return 0;
1565 }
1566 
cypress_get_mclk_split_point(struct atom_memory_info * memory_info)1567 static u32 cypress_get_mclk_split_point(struct atom_memory_info *memory_info)
1568 {
1569 	if ((memory_info->mem_type == MEM_TYPE_GDDR3) ||
1570 	    (memory_info->mem_type == MEM_TYPE_DDR3))
1571 		return 30000;
1572 
1573 	return 0;
1574 }
1575 
cypress_get_mvdd_configuration(struct radeon_device * rdev)1576 int cypress_get_mvdd_configuration(struct radeon_device *rdev)
1577 {
1578 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1579 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1580 	u8 module_index;
1581 	struct atom_memory_info memory_info;
1582 	u32 tmp = RREG32(GENERAL_PWRMGT);
1583 
1584 	if (!(tmp & BACKBIAS_PAD_EN)) {
1585 		eg_pi->mvdd_high_index = 0;
1586 		eg_pi->mvdd_low_index = 1;
1587 		pi->mvdd_control = false;
1588 		return 0;
1589 	}
1590 
1591 	if (tmp & BACKBIAS_VALUE)
1592 		eg_pi->mvdd_high_index = 1;
1593 	else
1594 		eg_pi->mvdd_high_index = 0;
1595 
1596 	eg_pi->mvdd_low_index =
1597 		(eg_pi->mvdd_high_index == 0) ? 1 : 0;
1598 
1599 	module_index = rv770_get_memory_module_index(rdev);
1600 
1601 	if (radeon_atom_get_memory_info(rdev, module_index, &memory_info)) {
1602 		pi->mvdd_control = false;
1603 		return 0;
1604 	}
1605 
1606 	pi->mvdd_split_frequency =
1607 		cypress_get_mclk_split_point(&memory_info);
1608 
1609 	if (pi->mvdd_split_frequency == 0) {
1610 		pi->mvdd_control = false;
1611 		return 0;
1612 	}
1613 
1614 	return 0;
1615 }
1616 
cypress_init_smc_table(struct radeon_device * rdev,struct radeon_ps * radeon_boot_state)1617 static int cypress_init_smc_table(struct radeon_device *rdev,
1618 				  struct radeon_ps *radeon_boot_state)
1619 {
1620 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1621 	RV770_SMC_STATETABLE *table = &pi->smc_statetable;
1622 	int ret;
1623 
1624 	memset(table, 0, sizeof(RV770_SMC_STATETABLE));
1625 
1626 	cypress_populate_smc_voltage_tables(rdev, table);
1627 
1628 	switch (rdev->pm.int_thermal_type) {
1629 	case THERMAL_TYPE_EVERGREEN:
1630 	case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
1631 		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
1632 		break;
1633 	case THERMAL_TYPE_NONE:
1634 		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
1635 		break;
1636 	default:
1637 		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
1638 		break;
1639 	}
1640 
1641 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
1642 		table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
1643 
1644 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1645 		table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT;
1646 
1647 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
1648 		table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
1649 
1650 	if (pi->mem_gddr5)
1651 		table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
1652 
1653 	ret = cypress_populate_smc_initial_state(rdev, radeon_boot_state, table);
1654 	if (ret)
1655 		return ret;
1656 
1657 	ret = cypress_populate_smc_acpi_state(rdev, table);
1658 	if (ret)
1659 		return ret;
1660 
1661 	table->driverState = table->initialState;
1662 
1663 	return rv770_copy_bytes_to_smc(rdev,
1664 				       pi->state_table_start,
1665 				       (u8 *)table, sizeof(RV770_SMC_STATETABLE),
1666 				       pi->sram_end);
1667 }
1668 
cypress_populate_mc_reg_table(struct radeon_device * rdev,struct radeon_ps * radeon_boot_state)1669 int cypress_populate_mc_reg_table(struct radeon_device *rdev,
1670 				  struct radeon_ps *radeon_boot_state)
1671 {
1672 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1673 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1674 	struct rv7xx_ps *boot_state = rv770_get_ps(radeon_boot_state);
1675 	SMC_Evergreen_MCRegisters mc_reg_table = { 0 };
1676 
1677 	rv770_write_smc_soft_register(rdev,
1678 				      RV770_SMC_SOFT_REGISTER_seq_index, 1);
1679 
1680 	cypress_populate_mc_reg_addresses(rdev, &mc_reg_table);
1681 
1682 	cypress_convert_mc_reg_table_entry_to_smc(rdev,
1683 						  &boot_state->low,
1684 						  &mc_reg_table.data[0]);
1685 
1686 	cypress_convert_mc_registers(&eg_pi->mc_reg_table.mc_reg_table_entry[0],
1687 				     &mc_reg_table.data[1], eg_pi->mc_reg_table.last,
1688 				     eg_pi->mc_reg_table.valid_flag);
1689 
1690 	cypress_convert_mc_reg_table_to_smc(rdev, radeon_boot_state, &mc_reg_table);
1691 
1692 	return rv770_copy_bytes_to_smc(rdev, eg_pi->mc_reg_table_start,
1693 				       (u8 *)&mc_reg_table, sizeof(SMC_Evergreen_MCRegisters),
1694 				       pi->sram_end);
1695 }
1696 
cypress_get_table_locations(struct radeon_device * rdev)1697 int cypress_get_table_locations(struct radeon_device *rdev)
1698 {
1699 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1700 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1701 	u32 tmp;
1702 	int ret;
1703 
1704 	ret = rv770_read_smc_sram_dword(rdev,
1705 					EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION +
1706 					EVERGREEN_SMC_FIRMWARE_HEADER_stateTable,
1707 					&tmp, pi->sram_end);
1708 	if (ret)
1709 		return ret;
1710 
1711 	pi->state_table_start = (u16)tmp;
1712 
1713 	ret = rv770_read_smc_sram_dword(rdev,
1714 					EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION +
1715 					EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters,
1716 					&tmp, pi->sram_end);
1717 	if (ret)
1718 		return ret;
1719 
1720 	pi->soft_regs_start = (u16)tmp;
1721 
1722 	ret = rv770_read_smc_sram_dword(rdev,
1723 					EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION +
1724 					EVERGREEN_SMC_FIRMWARE_HEADER_mcRegisterTable,
1725 					&tmp, pi->sram_end);
1726 	if (ret)
1727 		return ret;
1728 
1729 	eg_pi->mc_reg_table_start = (u16)tmp;
1730 
1731 	return 0;
1732 }
1733 
cypress_enable_display_gap(struct radeon_device * rdev)1734 void cypress_enable_display_gap(struct radeon_device *rdev)
1735 {
1736 	u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
1737 
1738 	tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
1739 	tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
1740 		DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE));
1741 
1742 	tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
1743 	tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) |
1744 		DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE));
1745 	WREG32(CG_DISPLAY_GAP_CNTL, tmp);
1746 }
1747 
cypress_program_display_gap(struct radeon_device * rdev)1748 static void cypress_program_display_gap(struct radeon_device *rdev)
1749 {
1750 	u32 tmp, pipe;
1751 	int i;
1752 
1753 	tmp = RREG32(CG_DISPLAY_GAP_CNTL) & ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
1754 	if (rdev->pm.dpm.new_active_crtc_count > 0)
1755 		tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
1756 	else
1757 		tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE);
1758 
1759 	if (rdev->pm.dpm.new_active_crtc_count > 1)
1760 		tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
1761 	else
1762 		tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE);
1763 
1764 	WREG32(CG_DISPLAY_GAP_CNTL, tmp);
1765 
1766 	tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG);
1767 	pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT;
1768 
1769 	if ((rdev->pm.dpm.new_active_crtc_count > 0) &&
1770 	    (!(rdev->pm.dpm.new_active_crtcs & (1 << pipe)))) {
1771 		/* find the first active crtc */
1772 		for (i = 0; i < rdev->num_crtc; i++) {
1773 			if (rdev->pm.dpm.new_active_crtcs & (1 << i))
1774 				break;
1775 		}
1776 		if (i == rdev->num_crtc)
1777 			pipe = 0;
1778 		else
1779 			pipe = i;
1780 
1781 		tmp &= ~DCCG_DISP1_SLOW_SELECT_MASK;
1782 		tmp |= DCCG_DISP1_SLOW_SELECT(pipe);
1783 		WREG32(DCCG_DISP_SLOW_SELECT_REG, tmp);
1784 	}
1785 
1786 	cypress_notify_smc_display_change(rdev, rdev->pm.dpm.new_active_crtc_count > 0);
1787 }
1788 
cypress_dpm_setup_asic(struct radeon_device * rdev)1789 void cypress_dpm_setup_asic(struct radeon_device *rdev)
1790 {
1791 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1792 
1793 	rv740_read_clock_registers(rdev);
1794 	rv770_read_voltage_smio_registers(rdev);
1795 	rv770_get_max_vddc(rdev);
1796 	rv770_get_memory_type(rdev);
1797 
1798 	if (eg_pi->pcie_performance_request)
1799 		eg_pi->pcie_performance_request_registered = false;
1800 
1801 	if (eg_pi->pcie_performance_request)
1802 		cypress_advertise_gen2_capability(rdev);
1803 
1804 	rv770_get_pcie_gen2_status(rdev);
1805 
1806 	rv770_enable_acpi_pm(rdev);
1807 }
1808 
cypress_dpm_enable(struct radeon_device * rdev)1809 int cypress_dpm_enable(struct radeon_device *rdev)
1810 {
1811 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1812 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1813 	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
1814 	int ret;
1815 
1816 	if (pi->gfx_clock_gating)
1817 		rv770_restore_cgcg(rdev);
1818 
1819 	if (rv770_dpm_enabled(rdev))
1820 		return -EINVAL;
1821 
1822 	if (pi->voltage_control) {
1823 		rv770_enable_voltage_control(rdev, true);
1824 		ret = cypress_construct_voltage_tables(rdev);
1825 		if (ret) {
1826 			DRM_ERROR("cypress_construct_voltage_tables failed\n");
1827 			return ret;
1828 		}
1829 	}
1830 
1831 	if (pi->mvdd_control) {
1832 		ret = cypress_get_mvdd_configuration(rdev);
1833 		if (ret) {
1834 			DRM_ERROR("cypress_get_mvdd_configuration failed\n");
1835 			return ret;
1836 		}
1837 	}
1838 
1839 	if (eg_pi->dynamic_ac_timing) {
1840 		cypress_set_mc_reg_address_table(rdev);
1841 		cypress_force_mc_use_s0(rdev, boot_ps);
1842 		ret = cypress_initialize_mc_reg_table(rdev);
1843 		if (ret)
1844 			eg_pi->dynamic_ac_timing = false;
1845 		cypress_force_mc_use_s1(rdev, boot_ps);
1846 	}
1847 
1848 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_BACKBIAS)
1849 		rv770_enable_backbias(rdev, true);
1850 
1851 	if (pi->dynamic_ss)
1852 		cypress_enable_spread_spectrum(rdev, true);
1853 
1854 	if (pi->thermal_protection)
1855 		rv770_enable_thermal_protection(rdev, true);
1856 
1857 	rv770_setup_bsp(rdev);
1858 	rv770_program_git(rdev);
1859 	rv770_program_tp(rdev);
1860 	rv770_program_tpp(rdev);
1861 	rv770_program_sstp(rdev);
1862 	rv770_program_engine_speed_parameters(rdev);
1863 	cypress_enable_display_gap(rdev);
1864 	rv770_program_vc(rdev);
1865 
1866 	if (pi->dynamic_pcie_gen2)
1867 		cypress_enable_dynamic_pcie_gen2(rdev, true);
1868 
1869 	ret = rv770_upload_firmware(rdev);
1870 	if (ret) {
1871 		DRM_ERROR("rv770_upload_firmware failed\n");
1872 		return ret;
1873 	}
1874 
1875 	ret = cypress_get_table_locations(rdev);
1876 	if (ret) {
1877 		DRM_ERROR("cypress_get_table_locations failed\n");
1878 		return ret;
1879 	}
1880 	ret = cypress_init_smc_table(rdev, boot_ps);
1881 	if (ret) {
1882 		DRM_ERROR("cypress_init_smc_table failed\n");
1883 		return ret;
1884 	}
1885 	if (eg_pi->dynamic_ac_timing) {
1886 		ret = cypress_populate_mc_reg_table(rdev, boot_ps);
1887 		if (ret) {
1888 			DRM_ERROR("cypress_populate_mc_reg_table failed\n");
1889 			return ret;
1890 		}
1891 	}
1892 
1893 	cypress_program_response_times(rdev);
1894 
1895 	r7xx_start_smc(rdev);
1896 
1897 	ret = cypress_notify_smc_display_change(rdev, false);
1898 	if (ret) {
1899 		DRM_ERROR("cypress_notify_smc_display_change failed\n");
1900 		return ret;
1901 	}
1902 	cypress_enable_sclk_control(rdev, true);
1903 
1904 	if (eg_pi->memory_transition)
1905 		cypress_enable_mclk_control(rdev, true);
1906 
1907 	cypress_start_dpm(rdev);
1908 
1909 	if (pi->gfx_clock_gating)
1910 		cypress_gfx_clock_gating_enable(rdev, true);
1911 
1912 	if (pi->mg_clock_gating)
1913 		cypress_mg_clock_gating_enable(rdev, true);
1914 
1915 	rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
1916 
1917 	return 0;
1918 }
1919 
cypress_dpm_disable(struct radeon_device * rdev)1920 void cypress_dpm_disable(struct radeon_device *rdev)
1921 {
1922 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1923 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1924 	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
1925 
1926 	if (!rv770_dpm_enabled(rdev))
1927 		return;
1928 
1929 	rv770_clear_vc(rdev);
1930 
1931 	if (pi->thermal_protection)
1932 		rv770_enable_thermal_protection(rdev, false);
1933 
1934 	if (pi->dynamic_pcie_gen2)
1935 		cypress_enable_dynamic_pcie_gen2(rdev, false);
1936 
1937 	if (rdev->irq.installed &&
1938 	    r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
1939 		rdev->irq.dpm_thermal = false;
1940 		radeon_irq_set(rdev);
1941 	}
1942 
1943 	if (pi->gfx_clock_gating)
1944 		cypress_gfx_clock_gating_enable(rdev, false);
1945 
1946 	if (pi->mg_clock_gating)
1947 		cypress_mg_clock_gating_enable(rdev, false);
1948 
1949 	rv770_stop_dpm(rdev);
1950 	r7xx_stop_smc(rdev);
1951 
1952 	cypress_enable_spread_spectrum(rdev, false);
1953 
1954 	if (eg_pi->dynamic_ac_timing)
1955 		cypress_force_mc_use_s1(rdev, boot_ps);
1956 
1957 	rv770_reset_smio_status(rdev);
1958 }
1959 
cypress_dpm_set_power_state(struct radeon_device * rdev)1960 int cypress_dpm_set_power_state(struct radeon_device *rdev)
1961 {
1962 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1963 	struct radeon_ps *new_ps = rdev->pm.dpm.requested_ps;
1964 	struct radeon_ps *old_ps = rdev->pm.dpm.current_ps;
1965 	int ret;
1966 
1967 	ret = rv770_restrict_performance_levels_before_switch(rdev);
1968 	if (ret) {
1969 		DRM_ERROR("rv770_restrict_performance_levels_before_switch failed: %d\n", ret);
1970 		return ret;
1971 	}
1972 	if (eg_pi->pcie_performance_request)
1973 		cypress_notify_link_speed_change_before_state_change(rdev, new_ps, old_ps);
1974 
1975 	rv770_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
1976 	ret = rv770_halt_smc(rdev);
1977 	if (ret) {
1978 		DRM_ERROR("rv770_halt_smc failed\n");
1979 		return ret;
1980 	}
1981 	ret = cypress_upload_sw_state(rdev, new_ps);
1982 	if (ret) {
1983 		DRM_ERROR("cypress_upload_sw_state failed\n");
1984 		return ret;
1985 	}
1986 	if (eg_pi->dynamic_ac_timing) {
1987 		ret = cypress_upload_mc_reg_table(rdev, new_ps);
1988 		if (ret) {
1989 			DRM_ERROR("cypress_upload_mc_reg_table failed\n");
1990 			return ret;
1991 		}
1992 	}
1993 
1994 	cypress_program_memory_timing_parameters(rdev, new_ps);
1995 
1996 	ret = rv770_resume_smc(rdev);
1997 	if (ret) {
1998 		DRM_ERROR("rv770_resume_smc failed\n");
1999 		return ret;
2000 	}
2001 	ret = rv770_set_sw_state(rdev);
2002 	if (ret) {
2003 		DRM_ERROR("rv770_set_sw_state failed\n");
2004 		return ret;
2005 	}
2006 	rv770_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
2007 
2008 	if (eg_pi->pcie_performance_request)
2009 		cypress_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
2010 
2011 	return 0;
2012 }
2013 
2014 #if 0
2015 void cypress_dpm_reset_asic(struct radeon_device *rdev)
2016 {
2017 	rv770_restrict_performance_levels_before_switch(rdev);
2018 	rv770_set_boot_state(rdev);
2019 }
2020 #endif
2021 
cypress_dpm_display_configuration_changed(struct radeon_device * rdev)2022 void cypress_dpm_display_configuration_changed(struct radeon_device *rdev)
2023 {
2024 	cypress_program_display_gap(rdev);
2025 }
2026 
cypress_dpm_init(struct radeon_device * rdev)2027 int cypress_dpm_init(struct radeon_device *rdev)
2028 {
2029 	struct rv7xx_power_info *pi;
2030 	struct evergreen_power_info *eg_pi;
2031 	struct atom_clock_dividers dividers;
2032 	int ret;
2033 
2034 	eg_pi = kzalloc(sizeof(struct evergreen_power_info), GFP_KERNEL);
2035 	if (eg_pi == NULL)
2036 		return -ENOMEM;
2037 	rdev->pm.dpm.priv = eg_pi;
2038 	pi = &eg_pi->rv7xx;
2039 
2040 	rv770_get_max_vddc(rdev);
2041 
2042 	eg_pi->ulv.supported = false;
2043 	pi->acpi_vddc = 0;
2044 	eg_pi->acpi_vddci = 0;
2045 	pi->min_vddc_in_table = 0;
2046 	pi->max_vddc_in_table = 0;
2047 
2048 	ret = r600_get_platform_caps(rdev);
2049 	if (ret)
2050 		return ret;
2051 
2052 	ret = rv7xx_parse_power_table(rdev);
2053 	if (ret)
2054 		return ret;
2055 
2056 	if (rdev->pm.dpm.voltage_response_time == 0)
2057 		rdev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT;
2058 	if (rdev->pm.dpm.backbias_response_time == 0)
2059 		rdev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT;
2060 
2061 	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
2062 					     0, false, &dividers);
2063 	if (ret)
2064 		pi->ref_div = dividers.ref_div + 1;
2065 	else
2066 		pi->ref_div = R600_REFERENCEDIVIDER_DFLT;
2067 
2068 	pi->mclk_strobe_mode_threshold = 40000;
2069 	pi->mclk_edc_enable_threshold = 40000;
2070 	eg_pi->mclk_edc_wr_enable_threshold = 40000;
2071 
2072 	pi->rlp = RV770_RLP_DFLT;
2073 	pi->rmp = RV770_RMP_DFLT;
2074 	pi->lhp = RV770_LHP_DFLT;
2075 	pi->lmp = RV770_LMP_DFLT;
2076 
2077 	pi->voltage_control =
2078 		radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, 0);
2079 
2080 	pi->mvdd_control =
2081 		radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0);
2082 
2083 	eg_pi->vddci_control =
2084 		radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0);
2085 
2086 	rv770_get_engine_memory_ss(rdev);
2087 
2088 	pi->asi = RV770_ASI_DFLT;
2089 	pi->pasi = CYPRESS_HASI_DFLT;
2090 	pi->vrc = CYPRESS_VRC_DFLT;
2091 
2092 	pi->power_gating = false;
2093 
2094 	if ((rdev->family == CHIP_CYPRESS) ||
2095 	    (rdev->family == CHIP_HEMLOCK))
2096 		pi->gfx_clock_gating = false;
2097 	else
2098 		pi->gfx_clock_gating = true;
2099 
2100 	pi->mg_clock_gating = true;
2101 	pi->mgcgtssm = true;
2102 	eg_pi->ls_clock_gating = false;
2103 	eg_pi->sclk_deep_sleep = false;
2104 
2105 	pi->dynamic_pcie_gen2 = true;
2106 
2107 	if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
2108 		pi->thermal_protection = true;
2109 	else
2110 		pi->thermal_protection = false;
2111 
2112 	pi->display_gap = true;
2113 
2114 	if (rdev->flags & RADEON_IS_MOBILITY)
2115 		pi->dcodt = true;
2116 	else
2117 		pi->dcodt = false;
2118 
2119 	pi->ulps = true;
2120 
2121 	eg_pi->dynamic_ac_timing = true;
2122 	eg_pi->abm = true;
2123 	eg_pi->mcls = true;
2124 	eg_pi->light_sleep = true;
2125 	eg_pi->memory_transition = true;
2126 #if defined(CONFIG_ACPI)
2127 	eg_pi->pcie_performance_request =
2128 		radeon_acpi_is_pcie_performance_request_supported(rdev);
2129 #else
2130 	eg_pi->pcie_performance_request = false;
2131 #endif
2132 
2133 	if ((rdev->family == CHIP_CYPRESS) ||
2134 	    (rdev->family == CHIP_HEMLOCK) ||
2135 	    (rdev->family == CHIP_JUNIPER))
2136 		eg_pi->dll_default_on = true;
2137 	else
2138 		eg_pi->dll_default_on = false;
2139 
2140 	eg_pi->sclk_deep_sleep = false;
2141 	pi->mclk_stutter_mode_threshold = 0;
2142 
2143 	pi->sram_end = SMC_RAM_END;
2144 
2145 	return 0;
2146 }
2147 
cypress_dpm_fini(struct radeon_device * rdev)2148 void cypress_dpm_fini(struct radeon_device *rdev)
2149 {
2150 	int i;
2151 
2152 	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
2153 		kfree(rdev->pm.dpm.ps[i].ps_priv);
2154 	}
2155 	kfree(rdev->pm.dpm.ps);
2156 	kfree(rdev->pm.dpm.priv);
2157 }
2158 
cypress_dpm_vblank_too_short(struct radeon_device * rdev)2159 bool cypress_dpm_vblank_too_short(struct radeon_device *rdev)
2160 {
2161 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2162 	u32 vblank_time = r600_dpm_get_vblank_time(rdev);
2163 	/* we never hit the non-gddr5 limit so disable it */
2164 	u32 switch_limit = pi->mem_gddr5 ? 450 : 0;
2165 
2166 	if (vblank_time < switch_limit)
2167 		return true;
2168 	else
2169 		return false;
2170 
2171 }
2172