xref: /linux/drivers/gpu/drm/radeon/ci_dpm.c (revision f86fd32d)
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <linux/pci.h>
26 #include <linux/seq_file.h>
27 
28 #include "atom.h"
29 #include "ci_dpm.h"
30 #include "cikd.h"
31 #include "r600_dpm.h"
32 #include "radeon.h"
33 #include "radeon_asic.h"
34 #include "radeon_ucode.h"
35 
36 #define MC_CG_ARB_FREQ_F0           0x0a
37 #define MC_CG_ARB_FREQ_F1           0x0b
38 #define MC_CG_ARB_FREQ_F2           0x0c
39 #define MC_CG_ARB_FREQ_F3           0x0d
40 
41 #define SMC_RAM_END 0x40000
42 
43 #define VOLTAGE_SCALE               4
44 #define VOLTAGE_VID_OFFSET_SCALE1    625
45 #define VOLTAGE_VID_OFFSET_SCALE2    100
46 
47 static const struct ci_pt_defaults defaults_hawaii_xt =
48 {
49 	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
50 	{ 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
51 	{ 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
52 };
53 
54 static const struct ci_pt_defaults defaults_hawaii_pro =
55 {
56 	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
57 	{ 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
58 	{ 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
59 };
60 
61 static const struct ci_pt_defaults defaults_bonaire_xt =
62 {
63 	1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
64 	{ 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
65 	{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
66 };
67 
68 static const struct ci_pt_defaults defaults_bonaire_pro =
69 {
70 	1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
71 	{ 0x8C,  0x23F, 0x244, 0xA6,  0x83,  0x85,  0x86,  0x86,  0x83,  0xDB,  0xDB,  0xDA,  0x67,  0x60,  0x5F  },
72 	{ 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
73 };
74 
75 static const struct ci_pt_defaults defaults_saturn_xt =
76 {
77 	1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
78 	{ 0x8C,  0x247, 0x249, 0xA6,  0x80,  0x81,  0x8B,  0x89,  0x86,  0xC9,  0xCA,  0xC9,  0x4D,  0x4D,  0x4D  },
79 	{ 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
80 };
81 
82 static const struct ci_pt_defaults defaults_saturn_pro =
83 {
84 	1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
85 	{ 0x96,  0x21D, 0x23B, 0xA1,  0x85,  0x87,  0x83,  0x84,  0x81,  0xE6,  0xE6,  0xE6,  0x71,  0x6A,  0x6A  },
86 	{ 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
87 };
88 
89 static const struct ci_pt_config_reg didt_config_ci[] =
90 {
91 	{ 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
92 	{ 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
93 	{ 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
94 	{ 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
95 	{ 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
96 	{ 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
97 	{ 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
98 	{ 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
99 	{ 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
100 	{ 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
101 	{ 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
102 	{ 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
103 	{ 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
104 	{ 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
105 	{ 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
106 	{ 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
107 	{ 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
108 	{ 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
109 	{ 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
110 	{ 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
111 	{ 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
112 	{ 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
113 	{ 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
114 	{ 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
115 	{ 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
116 	{ 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
117 	{ 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
118 	{ 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
119 	{ 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
120 	{ 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
121 	{ 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
122 	{ 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
123 	{ 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
124 	{ 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
125 	{ 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
126 	{ 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
127 	{ 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
128 	{ 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
129 	{ 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
130 	{ 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
131 	{ 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
132 	{ 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
133 	{ 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
134 	{ 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
135 	{ 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
136 	{ 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
137 	{ 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
138 	{ 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
139 	{ 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
140 	{ 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
141 	{ 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
142 	{ 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
143 	{ 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
144 	{ 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
145 	{ 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
146 	{ 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
147 	{ 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
148 	{ 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
149 	{ 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
150 	{ 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
151 	{ 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
152 	{ 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
153 	{ 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
154 	{ 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
155 	{ 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
156 	{ 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
157 	{ 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
158 	{ 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
159 	{ 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
160 	{ 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
161 	{ 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
162 	{ 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
163 	{ 0xFFFFFFFF }
164 };
165 
166 extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
167 extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
168 				       u32 arb_freq_src, u32 arb_freq_dest);
169 extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
170 extern u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode);
171 extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
172 						     u32 max_voltage_steps,
173 						     struct atom_voltage_table *voltage_table);
174 extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
175 extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
176 extern int ci_mc_load_microcode(struct radeon_device *rdev);
177 extern void cik_update_cg(struct radeon_device *rdev,
178 			  u32 block, bool enable);
179 
180 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
181 					 struct atom_voltage_table_entry *voltage_table,
182 					 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
183 static int ci_set_power_limit(struct radeon_device *rdev, u32 n);
184 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
185 				       u32 target_tdp);
186 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
187 
188 static PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg);
189 static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
190 						      PPSMC_Msg msg, u32 parameter);
191 
192 static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev);
193 static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev);
194 
195 static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
196 {
197 	struct ci_power_info *pi = rdev->pm.dpm.priv;
198 
199 	return pi;
200 }
201 
202 static struct ci_ps *ci_get_ps(struct radeon_ps *rps)
203 {
204 	struct ci_ps *ps = rps->ps_priv;
205 
206 	return ps;
207 }
208 
209 static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
210 {
211 	struct ci_power_info *pi = ci_get_pi(rdev);
212 
213 	switch (rdev->pdev->device) {
214 	case 0x6649:
215 	case 0x6650:
216 	case 0x6651:
217 	case 0x6658:
218 	case 0x665C:
219 	case 0x665D:
220 	default:
221 		pi->powertune_defaults = &defaults_bonaire_xt;
222 		break;
223 	case 0x6640:
224 	case 0x6641:
225 	case 0x6646:
226 	case 0x6647:
227 		pi->powertune_defaults = &defaults_saturn_xt;
228 		break;
229 	case 0x67B8:
230 	case 0x67B0:
231 		pi->powertune_defaults = &defaults_hawaii_xt;
232 		break;
233 	case 0x67BA:
234 	case 0x67B1:
235 		pi->powertune_defaults = &defaults_hawaii_pro;
236 		break;
237 	case 0x67A0:
238 	case 0x67A1:
239 	case 0x67A2:
240 	case 0x67A8:
241 	case 0x67A9:
242 	case 0x67AA:
243 	case 0x67B9:
244 	case 0x67BE:
245 		pi->powertune_defaults = &defaults_bonaire_xt;
246 		break;
247 	}
248 
249 	pi->dte_tj_offset = 0;
250 
251 	pi->caps_power_containment = true;
252 	pi->caps_cac = false;
253 	pi->caps_sq_ramping = false;
254 	pi->caps_db_ramping = false;
255 	pi->caps_td_ramping = false;
256 	pi->caps_tcp_ramping = false;
257 
258 	if (pi->caps_power_containment) {
259 		pi->caps_cac = true;
260 		if (rdev->family == CHIP_HAWAII)
261 			pi->enable_bapm_feature = false;
262 		else
263 			pi->enable_bapm_feature = true;
264 		pi->enable_tdc_limit_feature = true;
265 		pi->enable_pkg_pwr_tracking_feature = true;
266 	}
267 }
268 
269 static u8 ci_convert_to_vid(u16 vddc)
270 {
271 	return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
272 }
273 
274 static int ci_populate_bapm_vddc_vid_sidd(struct radeon_device *rdev)
275 {
276 	struct ci_power_info *pi = ci_get_pi(rdev);
277 	u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
278 	u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
279 	u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
280 	u32 i;
281 
282 	if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
283 		return -EINVAL;
284 	if (rdev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
285 		return -EINVAL;
286 	if (rdev->pm.dpm.dyn_state.cac_leakage_table.count !=
287 	    rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
288 		return -EINVAL;
289 
290 	for (i = 0; i < rdev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
291 		if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
292 			lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
293 			hi_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
294 			hi2_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
295 		} else {
296 			lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
297 			hi_vid[i] = ci_convert_to_vid((u16)rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
298 		}
299 	}
300 	return 0;
301 }
302 
303 static int ci_populate_vddc_vid(struct radeon_device *rdev)
304 {
305 	struct ci_power_info *pi = ci_get_pi(rdev);
306 	u8 *vid = pi->smc_powertune_table.VddCVid;
307 	u32 i;
308 
309 	if (pi->vddc_voltage_table.count > 8)
310 		return -EINVAL;
311 
312 	for (i = 0; i < pi->vddc_voltage_table.count; i++)
313 		vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
314 
315 	return 0;
316 }
317 
318 static int ci_populate_svi_load_line(struct radeon_device *rdev)
319 {
320 	struct ci_power_info *pi = ci_get_pi(rdev);
321 	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
322 
323 	pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
324 	pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
325 	pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
326 	pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
327 
328 	return 0;
329 }
330 
331 static int ci_populate_tdc_limit(struct radeon_device *rdev)
332 {
333 	struct ci_power_info *pi = ci_get_pi(rdev);
334 	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
335 	u16 tdc_limit;
336 
337 	tdc_limit = rdev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
338 	pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
339 	pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
340 		pt_defaults->tdc_vddc_throttle_release_limit_perc;
341 	pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
342 
343 	return 0;
344 }
345 
346 static int ci_populate_dw8(struct radeon_device *rdev)
347 {
348 	struct ci_power_info *pi = ci_get_pi(rdev);
349 	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
350 	int ret;
351 
352 	ret = ci_read_smc_sram_dword(rdev,
353 				     SMU7_FIRMWARE_HEADER_LOCATION +
354 				     offsetof(SMU7_Firmware_Header, PmFuseTable) +
355 				     offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
356 				     (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
357 				     pi->sram_end);
358 	if (ret)
359 		return -EINVAL;
360 	else
361 		pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
362 
363 	return 0;
364 }
365 
366 static int ci_populate_fuzzy_fan(struct radeon_device *rdev)
367 {
368 	struct ci_power_info *pi = ci_get_pi(rdev);
369 
370 	if ((rdev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
371 	    (rdev->pm.dpm.fan.fan_output_sensitivity == 0))
372 		rdev->pm.dpm.fan.fan_output_sensitivity =
373 			rdev->pm.dpm.fan.default_fan_output_sensitivity;
374 
375 	pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
376 		cpu_to_be16(rdev->pm.dpm.fan.fan_output_sensitivity);
377 
378 	return 0;
379 }
380 
381 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev)
382 {
383 	struct ci_power_info *pi = ci_get_pi(rdev);
384 	u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
385 	u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
386 	int i, min, max;
387 
388 	min = max = hi_vid[0];
389 	for (i = 0; i < 8; i++) {
390 		if (0 != hi_vid[i]) {
391 			if (min > hi_vid[i])
392 				min = hi_vid[i];
393 			if (max < hi_vid[i])
394 				max = hi_vid[i];
395 		}
396 
397 		if (0 != lo_vid[i]) {
398 			if (min > lo_vid[i])
399 				min = lo_vid[i];
400 			if (max < lo_vid[i])
401 				max = lo_vid[i];
402 		}
403 	}
404 
405 	if ((min == 0) || (max == 0))
406 		return -EINVAL;
407 	pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
408 	pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
409 
410 	return 0;
411 }
412 
413 static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev)
414 {
415 	struct ci_power_info *pi = ci_get_pi(rdev);
416 	u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
417 	u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
418 	struct radeon_cac_tdp_table *cac_tdp_table =
419 		rdev->pm.dpm.dyn_state.cac_tdp_table;
420 
421 	hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
422 	lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
423 
424 	pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
425 	pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
426 
427 	return 0;
428 }
429 
430 static int ci_populate_bapm_parameters_in_dpm_table(struct radeon_device *rdev)
431 {
432 	struct ci_power_info *pi = ci_get_pi(rdev);
433 	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
434 	SMU7_Discrete_DpmTable  *dpm_table = &pi->smc_state_table;
435 	struct radeon_cac_tdp_table *cac_tdp_table =
436 		rdev->pm.dpm.dyn_state.cac_tdp_table;
437 	struct radeon_ppm_table *ppm = rdev->pm.dpm.dyn_state.ppm_table;
438 	int i, j, k;
439 	const u16 *def1;
440 	const u16 *def2;
441 
442 	dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
443 	dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
444 
445 	dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
446 	dpm_table->GpuTjMax =
447 		(u8)(pi->thermal_temp_setting.temperature_high / 1000);
448 	dpm_table->GpuTjHyst = 8;
449 
450 	dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
451 
452 	if (ppm) {
453 		dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
454 		dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
455 	} else {
456 		dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
457 		dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
458 	}
459 
460 	dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
461 	def1 = pt_defaults->bapmti_r;
462 	def2 = pt_defaults->bapmti_rc;
463 
464 	for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
465 		for (j = 0; j < SMU7_DTE_SOURCES; j++) {
466 			for (k = 0; k < SMU7_DTE_SINKS; k++) {
467 				dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
468 				dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
469 				def1++;
470 				def2++;
471 			}
472 		}
473 	}
474 
475 	return 0;
476 }
477 
478 static int ci_populate_pm_base(struct radeon_device *rdev)
479 {
480 	struct ci_power_info *pi = ci_get_pi(rdev);
481 	u32 pm_fuse_table_offset;
482 	int ret;
483 
484 	if (pi->caps_power_containment) {
485 		ret = ci_read_smc_sram_dword(rdev,
486 					     SMU7_FIRMWARE_HEADER_LOCATION +
487 					     offsetof(SMU7_Firmware_Header, PmFuseTable),
488 					     &pm_fuse_table_offset, pi->sram_end);
489 		if (ret)
490 			return ret;
491 		ret = ci_populate_bapm_vddc_vid_sidd(rdev);
492 		if (ret)
493 			return ret;
494 		ret = ci_populate_vddc_vid(rdev);
495 		if (ret)
496 			return ret;
497 		ret = ci_populate_svi_load_line(rdev);
498 		if (ret)
499 			return ret;
500 		ret = ci_populate_tdc_limit(rdev);
501 		if (ret)
502 			return ret;
503 		ret = ci_populate_dw8(rdev);
504 		if (ret)
505 			return ret;
506 		ret = ci_populate_fuzzy_fan(rdev);
507 		if (ret)
508 			return ret;
509 		ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev);
510 		if (ret)
511 			return ret;
512 		ret = ci_populate_bapm_vddc_base_leakage_sidd(rdev);
513 		if (ret)
514 			return ret;
515 		ret = ci_copy_bytes_to_smc(rdev, pm_fuse_table_offset,
516 					   (u8 *)&pi->smc_powertune_table,
517 					   sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
518 		if (ret)
519 			return ret;
520 	}
521 
522 	return 0;
523 }
524 
525 static void ci_do_enable_didt(struct radeon_device *rdev, const bool enable)
526 {
527 	struct ci_power_info *pi = ci_get_pi(rdev);
528 	u32 data;
529 
530 	if (pi->caps_sq_ramping) {
531 		data = RREG32_DIDT(DIDT_SQ_CTRL0);
532 		if (enable)
533 			data |= DIDT_CTRL_EN;
534 		else
535 			data &= ~DIDT_CTRL_EN;
536 		WREG32_DIDT(DIDT_SQ_CTRL0, data);
537 	}
538 
539 	if (pi->caps_db_ramping) {
540 		data = RREG32_DIDT(DIDT_DB_CTRL0);
541 		if (enable)
542 			data |= DIDT_CTRL_EN;
543 		else
544 			data &= ~DIDT_CTRL_EN;
545 		WREG32_DIDT(DIDT_DB_CTRL0, data);
546 	}
547 
548 	if (pi->caps_td_ramping) {
549 		data = RREG32_DIDT(DIDT_TD_CTRL0);
550 		if (enable)
551 			data |= DIDT_CTRL_EN;
552 		else
553 			data &= ~DIDT_CTRL_EN;
554 		WREG32_DIDT(DIDT_TD_CTRL0, data);
555 	}
556 
557 	if (pi->caps_tcp_ramping) {
558 		data = RREG32_DIDT(DIDT_TCP_CTRL0);
559 		if (enable)
560 			data |= DIDT_CTRL_EN;
561 		else
562 			data &= ~DIDT_CTRL_EN;
563 		WREG32_DIDT(DIDT_TCP_CTRL0, data);
564 	}
565 }
566 
567 static int ci_program_pt_config_registers(struct radeon_device *rdev,
568 					  const struct ci_pt_config_reg *cac_config_regs)
569 {
570 	const struct ci_pt_config_reg *config_regs = cac_config_regs;
571 	u32 data;
572 	u32 cache = 0;
573 
574 	if (config_regs == NULL)
575 		return -EINVAL;
576 
577 	while (config_regs->offset != 0xFFFFFFFF) {
578 		if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
579 			cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
580 		} else {
581 			switch (config_regs->type) {
582 			case CISLANDS_CONFIGREG_SMC_IND:
583 				data = RREG32_SMC(config_regs->offset);
584 				break;
585 			case CISLANDS_CONFIGREG_DIDT_IND:
586 				data = RREG32_DIDT(config_regs->offset);
587 				break;
588 			default:
589 				data = RREG32(config_regs->offset << 2);
590 				break;
591 			}
592 
593 			data &= ~config_regs->mask;
594 			data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
595 			data |= cache;
596 
597 			switch (config_regs->type) {
598 			case CISLANDS_CONFIGREG_SMC_IND:
599 				WREG32_SMC(config_regs->offset, data);
600 				break;
601 			case CISLANDS_CONFIGREG_DIDT_IND:
602 				WREG32_DIDT(config_regs->offset, data);
603 				break;
604 			default:
605 				WREG32(config_regs->offset << 2, data);
606 				break;
607 			}
608 			cache = 0;
609 		}
610 		config_regs++;
611 	}
612 	return 0;
613 }
614 
615 static int ci_enable_didt(struct radeon_device *rdev, bool enable)
616 {
617 	struct ci_power_info *pi = ci_get_pi(rdev);
618 	int ret;
619 
620 	if (pi->caps_sq_ramping || pi->caps_db_ramping ||
621 	    pi->caps_td_ramping || pi->caps_tcp_ramping) {
622 		cik_enter_rlc_safe_mode(rdev);
623 
624 		if (enable) {
625 			ret = ci_program_pt_config_registers(rdev, didt_config_ci);
626 			if (ret) {
627 				cik_exit_rlc_safe_mode(rdev);
628 				return ret;
629 			}
630 		}
631 
632 		ci_do_enable_didt(rdev, enable);
633 
634 		cik_exit_rlc_safe_mode(rdev);
635 	}
636 
637 	return 0;
638 }
639 
640 static int ci_enable_power_containment(struct radeon_device *rdev, bool enable)
641 {
642 	struct ci_power_info *pi = ci_get_pi(rdev);
643 	PPSMC_Result smc_result;
644 	int ret = 0;
645 
646 	if (enable) {
647 		pi->power_containment_features = 0;
648 		if (pi->caps_power_containment) {
649 			if (pi->enable_bapm_feature) {
650 				smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableDTE);
651 				if (smc_result != PPSMC_Result_OK)
652 					ret = -EINVAL;
653 				else
654 					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
655 			}
656 
657 			if (pi->enable_tdc_limit_feature) {
658 				smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitEnable);
659 				if (smc_result != PPSMC_Result_OK)
660 					ret = -EINVAL;
661 				else
662 					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
663 			}
664 
665 			if (pi->enable_pkg_pwr_tracking_feature) {
666 				smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitEnable);
667 				if (smc_result != PPSMC_Result_OK) {
668 					ret = -EINVAL;
669 				} else {
670 					struct radeon_cac_tdp_table *cac_tdp_table =
671 						rdev->pm.dpm.dyn_state.cac_tdp_table;
672 					u32 default_pwr_limit =
673 						(u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
674 
675 					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
676 
677 					ci_set_power_limit(rdev, default_pwr_limit);
678 				}
679 			}
680 		}
681 	} else {
682 		if (pi->caps_power_containment && pi->power_containment_features) {
683 			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
684 				ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitDisable);
685 
686 			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
687 				ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableDTE);
688 
689 			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
690 				ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitDisable);
691 			pi->power_containment_features = 0;
692 		}
693 	}
694 
695 	return ret;
696 }
697 
698 static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable)
699 {
700 	struct ci_power_info *pi = ci_get_pi(rdev);
701 	PPSMC_Result smc_result;
702 	int ret = 0;
703 
704 	if (pi->caps_cac) {
705 		if (enable) {
706 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac);
707 			if (smc_result != PPSMC_Result_OK) {
708 				ret = -EINVAL;
709 				pi->cac_enabled = false;
710 			} else {
711 				pi->cac_enabled = true;
712 			}
713 		} else if (pi->cac_enabled) {
714 			ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
715 			pi->cac_enabled = false;
716 		}
717 	}
718 
719 	return ret;
720 }
721 
722 static int ci_enable_thermal_based_sclk_dpm(struct radeon_device *rdev,
723 					    bool enable)
724 {
725 	struct ci_power_info *pi = ci_get_pi(rdev);
726 	PPSMC_Result smc_result = PPSMC_Result_OK;
727 
728 	if (pi->thermal_sclk_dpm_enabled) {
729 		if (enable)
730 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_ENABLE_THERMAL_DPM);
731 		else
732 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DISABLE_THERMAL_DPM);
733 	}
734 
735 	if (smc_result == PPSMC_Result_OK)
736 		return 0;
737 	else
738 		return -EINVAL;
739 }
740 
741 static int ci_power_control_set_level(struct radeon_device *rdev)
742 {
743 	struct ci_power_info *pi = ci_get_pi(rdev);
744 	struct radeon_cac_tdp_table *cac_tdp_table =
745 		rdev->pm.dpm.dyn_state.cac_tdp_table;
746 	s32 adjust_percent;
747 	s32 target_tdp;
748 	int ret = 0;
749 	bool adjust_polarity = false; /* ??? */
750 
751 	if (pi->caps_power_containment) {
752 		adjust_percent = adjust_polarity ?
753 			rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment);
754 		target_tdp = ((100 + adjust_percent) *
755 			      (s32)cac_tdp_table->configurable_tdp) / 100;
756 
757 		ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp);
758 	}
759 
760 	return ret;
761 }
762 
763 void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
764 {
765 	struct ci_power_info *pi = ci_get_pi(rdev);
766 
767 	if (pi->uvd_power_gated == gate)
768 		return;
769 
770 	pi->uvd_power_gated = gate;
771 
772 	ci_update_uvd_dpm(rdev, gate);
773 }
774 
775 bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
776 {
777 	struct ci_power_info *pi = ci_get_pi(rdev);
778 	u32 vblank_time = r600_dpm_get_vblank_time(rdev);
779 	u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
780 
781 	/* disable mclk switching if the refresh is >120Hz, even if the
782         * blanking period would allow it
783         */
784 	if (r600_dpm_get_vrefresh(rdev) > 120)
785 		return true;
786 
787 	if (vblank_time < switch_limit)
788 		return true;
789 	else
790 		return false;
791 
792 }
793 
794 static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
795 					struct radeon_ps *rps)
796 {
797 	struct ci_ps *ps = ci_get_ps(rps);
798 	struct ci_power_info *pi = ci_get_pi(rdev);
799 	struct radeon_clock_and_voltage_limits *max_limits;
800 	bool disable_mclk_switching;
801 	u32 sclk, mclk;
802 	int i;
803 
804 	if (rps->vce_active) {
805 		rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
806 		rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk;
807 	} else {
808 		rps->evclk = 0;
809 		rps->ecclk = 0;
810 	}
811 
812 	if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
813 	    ci_dpm_vblank_too_short(rdev))
814 		disable_mclk_switching = true;
815 	else
816 		disable_mclk_switching = false;
817 
818 	if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
819 		pi->battery_state = true;
820 	else
821 		pi->battery_state = false;
822 
823 	if (rdev->pm.dpm.ac_power)
824 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
825 	else
826 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
827 
828 	if (rdev->pm.dpm.ac_power == false) {
829 		for (i = 0; i < ps->performance_level_count; i++) {
830 			if (ps->performance_levels[i].mclk > max_limits->mclk)
831 				ps->performance_levels[i].mclk = max_limits->mclk;
832 			if (ps->performance_levels[i].sclk > max_limits->sclk)
833 				ps->performance_levels[i].sclk = max_limits->sclk;
834 		}
835 	}
836 
837 	/* XXX validate the min clocks required for display */
838 
839 	if (disable_mclk_switching) {
840 		mclk  = ps->performance_levels[ps->performance_level_count - 1].mclk;
841 		sclk = ps->performance_levels[0].sclk;
842 	} else {
843 		mclk = ps->performance_levels[0].mclk;
844 		sclk = ps->performance_levels[0].sclk;
845 	}
846 
847 	if (rps->vce_active) {
848 		if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk)
849 			sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk;
850 		if (mclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk)
851 			mclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk;
852 	}
853 
854 	ps->performance_levels[0].sclk = sclk;
855 	ps->performance_levels[0].mclk = mclk;
856 
857 	if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
858 		ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
859 
860 	if (disable_mclk_switching) {
861 		if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
862 			ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
863 	} else {
864 		if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
865 			ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
866 	}
867 }
868 
869 static int ci_thermal_set_temperature_range(struct radeon_device *rdev,
870 					    int min_temp, int max_temp)
871 {
872 	int low_temp = 0 * 1000;
873 	int high_temp = 255 * 1000;
874 	u32 tmp;
875 
876 	if (low_temp < min_temp)
877 		low_temp = min_temp;
878 	if (high_temp > max_temp)
879 		high_temp = max_temp;
880 	if (high_temp < low_temp) {
881 		DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
882 		return -EINVAL;
883 	}
884 
885 	tmp = RREG32_SMC(CG_THERMAL_INT);
886 	tmp &= ~(CI_DIG_THERM_INTH_MASK | CI_DIG_THERM_INTL_MASK);
887 	tmp |= CI_DIG_THERM_INTH(high_temp / 1000) |
888 		CI_DIG_THERM_INTL(low_temp / 1000);
889 	WREG32_SMC(CG_THERMAL_INT, tmp);
890 
891 #if 0
892 	/* XXX: need to figure out how to handle this properly */
893 	tmp = RREG32_SMC(CG_THERMAL_CTRL);
894 	tmp &= DIG_THERM_DPM_MASK;
895 	tmp |= DIG_THERM_DPM(high_temp / 1000);
896 	WREG32_SMC(CG_THERMAL_CTRL, tmp);
897 #endif
898 
899 	rdev->pm.dpm.thermal.min_temp = low_temp;
900 	rdev->pm.dpm.thermal.max_temp = high_temp;
901 
902 	return 0;
903 }
904 
905 static int ci_thermal_enable_alert(struct radeon_device *rdev,
906 				   bool enable)
907 {
908 	u32 thermal_int = RREG32_SMC(CG_THERMAL_INT);
909 	PPSMC_Result result;
910 
911 	if (enable) {
912 		thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
913 		WREG32_SMC(CG_THERMAL_INT, thermal_int);
914 		rdev->irq.dpm_thermal = false;
915 		result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Enable);
916 		if (result != PPSMC_Result_OK) {
917 			DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
918 			return -EINVAL;
919 		}
920 	} else {
921 		thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
922 		WREG32_SMC(CG_THERMAL_INT, thermal_int);
923 		rdev->irq.dpm_thermal = true;
924 		result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Disable);
925 		if (result != PPSMC_Result_OK) {
926 			DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
927 			return -EINVAL;
928 		}
929 	}
930 
931 	return 0;
932 }
933 
934 static void ci_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode)
935 {
936 	struct ci_power_info *pi = ci_get_pi(rdev);
937 	u32 tmp;
938 
939 	if (pi->fan_ctrl_is_in_default_mode) {
940 		tmp = (RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
941 		pi->fan_ctrl_default_mode = tmp;
942 		tmp = (RREG32_SMC(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
943 		pi->t_min = tmp;
944 		pi->fan_ctrl_is_in_default_mode = false;
945 	}
946 
947 	tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK;
948 	tmp |= TMIN(0);
949 	WREG32_SMC(CG_FDO_CTRL2, tmp);
950 
951 	tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
952 	tmp |= FDO_PWM_MODE(mode);
953 	WREG32_SMC(CG_FDO_CTRL2, tmp);
954 }
955 
956 static int ci_thermal_setup_fan_table(struct radeon_device *rdev)
957 {
958 	struct ci_power_info *pi = ci_get_pi(rdev);
959 	SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
960 	u32 duty100;
961 	u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
962 	u16 fdo_min, slope1, slope2;
963 	u32 reference_clock, tmp;
964 	int ret;
965 	u64 tmp64;
966 
967 	if (!pi->fan_table_start) {
968 		rdev->pm.dpm.fan.ucode_fan_control = false;
969 		return 0;
970 	}
971 
972 	duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
973 
974 	if (duty100 == 0) {
975 		rdev->pm.dpm.fan.ucode_fan_control = false;
976 		return 0;
977 	}
978 
979 	tmp64 = (u64)rdev->pm.dpm.fan.pwm_min * duty100;
980 	do_div(tmp64, 10000);
981 	fdo_min = (u16)tmp64;
982 
983 	t_diff1 = rdev->pm.dpm.fan.t_med - rdev->pm.dpm.fan.t_min;
984 	t_diff2 = rdev->pm.dpm.fan.t_high - rdev->pm.dpm.fan.t_med;
985 
986 	pwm_diff1 = rdev->pm.dpm.fan.pwm_med - rdev->pm.dpm.fan.pwm_min;
987 	pwm_diff2 = rdev->pm.dpm.fan.pwm_high - rdev->pm.dpm.fan.pwm_med;
988 
989 	slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
990 	slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
991 
992 	fan_table.TempMin = cpu_to_be16((50 + rdev->pm.dpm.fan.t_min) / 100);
993 	fan_table.TempMed = cpu_to_be16((50 + rdev->pm.dpm.fan.t_med) / 100);
994 	fan_table.TempMax = cpu_to_be16((50 + rdev->pm.dpm.fan.t_max) / 100);
995 
996 	fan_table.Slope1 = cpu_to_be16(slope1);
997 	fan_table.Slope2 = cpu_to_be16(slope2);
998 
999 	fan_table.FdoMin = cpu_to_be16(fdo_min);
1000 
1001 	fan_table.HystDown = cpu_to_be16(rdev->pm.dpm.fan.t_hyst);
1002 
1003 	fan_table.HystUp = cpu_to_be16(1);
1004 
1005 	fan_table.HystSlope = cpu_to_be16(1);
1006 
1007 	fan_table.TempRespLim = cpu_to_be16(5);
1008 
1009 	reference_clock = radeon_get_xclk(rdev);
1010 
1011 	fan_table.RefreshPeriod = cpu_to_be32((rdev->pm.dpm.fan.cycle_delay *
1012 					       reference_clock) / 1600);
1013 
1014 	fan_table.FdoMax = cpu_to_be16((u16)duty100);
1015 
1016 	tmp = (RREG32_SMC(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
1017 	fan_table.TempSrc = (uint8_t)tmp;
1018 
1019 	ret = ci_copy_bytes_to_smc(rdev,
1020 				   pi->fan_table_start,
1021 				   (u8 *)(&fan_table),
1022 				   sizeof(fan_table),
1023 				   pi->sram_end);
1024 
1025 	if (ret) {
1026 		DRM_ERROR("Failed to load fan table to the SMC.");
1027 		rdev->pm.dpm.fan.ucode_fan_control = false;
1028 	}
1029 
1030 	return 0;
1031 }
1032 
1033 static int ci_fan_ctrl_start_smc_fan_control(struct radeon_device *rdev)
1034 {
1035 	struct ci_power_info *pi = ci_get_pi(rdev);
1036 	PPSMC_Result ret;
1037 
1038 	if (pi->caps_od_fuzzy_fan_control_support) {
1039 		ret = ci_send_msg_to_smc_with_parameter(rdev,
1040 							PPSMC_StartFanControl,
1041 							FAN_CONTROL_FUZZY);
1042 		if (ret != PPSMC_Result_OK)
1043 			return -EINVAL;
1044 		ret = ci_send_msg_to_smc_with_parameter(rdev,
1045 							PPSMC_MSG_SetFanPwmMax,
1046 							rdev->pm.dpm.fan.default_max_fan_pwm);
1047 		if (ret != PPSMC_Result_OK)
1048 			return -EINVAL;
1049 	} else {
1050 		ret = ci_send_msg_to_smc_with_parameter(rdev,
1051 							PPSMC_StartFanControl,
1052 							FAN_CONTROL_TABLE);
1053 		if (ret != PPSMC_Result_OK)
1054 			return -EINVAL;
1055 	}
1056 
1057 	pi->fan_is_controlled_by_smc = true;
1058 	return 0;
1059 }
1060 
1061 static int ci_fan_ctrl_stop_smc_fan_control(struct radeon_device *rdev)
1062 {
1063 	PPSMC_Result ret;
1064 	struct ci_power_info *pi = ci_get_pi(rdev);
1065 
1066 	ret = ci_send_msg_to_smc(rdev, PPSMC_StopFanControl);
1067 	if (ret == PPSMC_Result_OK) {
1068 		pi->fan_is_controlled_by_smc = false;
1069 		return 0;
1070 	} else
1071 		return -EINVAL;
1072 }
1073 
1074 int ci_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
1075 					     u32 *speed)
1076 {
1077 	u32 duty, duty100;
1078 	u64 tmp64;
1079 
1080 	if (rdev->pm.no_fan)
1081 		return -ENOENT;
1082 
1083 	duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
1084 	duty = (RREG32_SMC(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
1085 
1086 	if (duty100 == 0)
1087 		return -EINVAL;
1088 
1089 	tmp64 = (u64)duty * 100;
1090 	do_div(tmp64, duty100);
1091 	*speed = (u32)tmp64;
1092 
1093 	if (*speed > 100)
1094 		*speed = 100;
1095 
1096 	return 0;
1097 }
1098 
1099 int ci_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
1100 					     u32 speed)
1101 {
1102 	u32 tmp;
1103 	u32 duty, duty100;
1104 	u64 tmp64;
1105 	struct ci_power_info *pi = ci_get_pi(rdev);
1106 
1107 	if (rdev->pm.no_fan)
1108 		return -ENOENT;
1109 
1110 	if (pi->fan_is_controlled_by_smc)
1111 		return -EINVAL;
1112 
1113 	if (speed > 100)
1114 		return -EINVAL;
1115 
1116 	duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
1117 
1118 	if (duty100 == 0)
1119 		return -EINVAL;
1120 
1121 	tmp64 = (u64)speed * duty100;
1122 	do_div(tmp64, 100);
1123 	duty = (u32)tmp64;
1124 
1125 	tmp = RREG32_SMC(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
1126 	tmp |= FDO_STATIC_DUTY(duty);
1127 	WREG32_SMC(CG_FDO_CTRL0, tmp);
1128 
1129 	return 0;
1130 }
1131 
1132 void ci_fan_ctrl_set_mode(struct radeon_device *rdev, u32 mode)
1133 {
1134 	if (mode) {
1135 		/* stop auto-manage */
1136 		if (rdev->pm.dpm.fan.ucode_fan_control)
1137 			ci_fan_ctrl_stop_smc_fan_control(rdev);
1138 		ci_fan_ctrl_set_static_mode(rdev, mode);
1139 	} else {
1140 		/* restart auto-manage */
1141 		if (rdev->pm.dpm.fan.ucode_fan_control)
1142 			ci_thermal_start_smc_fan_control(rdev);
1143 		else
1144 			ci_fan_ctrl_set_default_mode(rdev);
1145 	}
1146 }
1147 
1148 u32 ci_fan_ctrl_get_mode(struct radeon_device *rdev)
1149 {
1150 	struct ci_power_info *pi = ci_get_pi(rdev);
1151 	u32 tmp;
1152 
1153 	if (pi->fan_is_controlled_by_smc)
1154 		return 0;
1155 
1156 	tmp = RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
1157 	return (tmp >> FDO_PWM_MODE_SHIFT);
1158 }
1159 
1160 #if 0
1161 static int ci_fan_ctrl_get_fan_speed_rpm(struct radeon_device *rdev,
1162 					 u32 *speed)
1163 {
1164 	u32 tach_period;
1165 	u32 xclk = radeon_get_xclk(rdev);
1166 
1167 	if (rdev->pm.no_fan)
1168 		return -ENOENT;
1169 
1170 	if (rdev->pm.fan_pulses_per_revolution == 0)
1171 		return -ENOENT;
1172 
1173 	tach_period = (RREG32_SMC(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
1174 	if (tach_period == 0)
1175 		return -ENOENT;
1176 
1177 	*speed = 60 * xclk * 10000 / tach_period;
1178 
1179 	return 0;
1180 }
1181 
1182 static int ci_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev,
1183 					 u32 speed)
1184 {
1185 	u32 tach_period, tmp;
1186 	u32 xclk = radeon_get_xclk(rdev);
1187 
1188 	if (rdev->pm.no_fan)
1189 		return -ENOENT;
1190 
1191 	if (rdev->pm.fan_pulses_per_revolution == 0)
1192 		return -ENOENT;
1193 
1194 	if ((speed < rdev->pm.fan_min_rpm) ||
1195 	    (speed > rdev->pm.fan_max_rpm))
1196 		return -EINVAL;
1197 
1198 	if (rdev->pm.dpm.fan.ucode_fan_control)
1199 		ci_fan_ctrl_stop_smc_fan_control(rdev);
1200 
1201 	tach_period = 60 * xclk * 10000 / (8 * speed);
1202 	tmp = RREG32_SMC(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
1203 	tmp |= TARGET_PERIOD(tach_period);
1204 	WREG32_SMC(CG_TACH_CTRL, tmp);
1205 
1206 	ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM);
1207 
1208 	return 0;
1209 }
1210 #endif
1211 
1212 static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev)
1213 {
1214 	struct ci_power_info *pi = ci_get_pi(rdev);
1215 	u32 tmp;
1216 
1217 	if (!pi->fan_ctrl_is_in_default_mode) {
1218 		tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
1219 		tmp |= FDO_PWM_MODE(pi->fan_ctrl_default_mode);
1220 		WREG32_SMC(CG_FDO_CTRL2, tmp);
1221 
1222 		tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK;
1223 		tmp |= TMIN(pi->t_min);
1224 		WREG32_SMC(CG_FDO_CTRL2, tmp);
1225 		pi->fan_ctrl_is_in_default_mode = true;
1226 	}
1227 }
1228 
1229 static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev)
1230 {
1231 	if (rdev->pm.dpm.fan.ucode_fan_control) {
1232 		ci_fan_ctrl_start_smc_fan_control(rdev);
1233 		ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
1234 	}
1235 }
1236 
1237 static void ci_thermal_initialize(struct radeon_device *rdev)
1238 {
1239 	u32 tmp;
1240 
1241 	if (rdev->pm.fan_pulses_per_revolution) {
1242 		tmp = RREG32_SMC(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
1243 		tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution -1);
1244 		WREG32_SMC(CG_TACH_CTRL, tmp);
1245 	}
1246 
1247 	tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
1248 	tmp |= TACH_PWM_RESP_RATE(0x28);
1249 	WREG32_SMC(CG_FDO_CTRL2, tmp);
1250 }
1251 
1252 static int ci_thermal_start_thermal_controller(struct radeon_device *rdev)
1253 {
1254 	int ret;
1255 
1256 	ci_thermal_initialize(rdev);
1257 	ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
1258 	if (ret)
1259 		return ret;
1260 	ret = ci_thermal_enable_alert(rdev, true);
1261 	if (ret)
1262 		return ret;
1263 	if (rdev->pm.dpm.fan.ucode_fan_control) {
1264 		ret = ci_thermal_setup_fan_table(rdev);
1265 		if (ret)
1266 			return ret;
1267 		ci_thermal_start_smc_fan_control(rdev);
1268 	}
1269 
1270 	return 0;
1271 }
1272 
1273 static void ci_thermal_stop_thermal_controller(struct radeon_device *rdev)
1274 {
1275 	if (!rdev->pm.no_fan)
1276 		ci_fan_ctrl_set_default_mode(rdev);
1277 }
1278 
1279 #if 0
1280 static int ci_read_smc_soft_register(struct radeon_device *rdev,
1281 				     u16 reg_offset, u32 *value)
1282 {
1283 	struct ci_power_info *pi = ci_get_pi(rdev);
1284 
1285 	return ci_read_smc_sram_dword(rdev,
1286 				      pi->soft_regs_start + reg_offset,
1287 				      value, pi->sram_end);
1288 }
1289 #endif
1290 
1291 static int ci_write_smc_soft_register(struct radeon_device *rdev,
1292 				      u16 reg_offset, u32 value)
1293 {
1294 	struct ci_power_info *pi = ci_get_pi(rdev);
1295 
1296 	return ci_write_smc_sram_dword(rdev,
1297 				       pi->soft_regs_start + reg_offset,
1298 				       value, pi->sram_end);
1299 }
1300 
1301 static void ci_init_fps_limits(struct radeon_device *rdev)
1302 {
1303 	struct ci_power_info *pi = ci_get_pi(rdev);
1304 	SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
1305 
1306 	if (pi->caps_fps) {
1307 		u16 tmp;
1308 
1309 		tmp = 45;
1310 		table->FpsHighT = cpu_to_be16(tmp);
1311 
1312 		tmp = 30;
1313 		table->FpsLowT = cpu_to_be16(tmp);
1314 	}
1315 }
1316 
1317 static int ci_update_sclk_t(struct radeon_device *rdev)
1318 {
1319 	struct ci_power_info *pi = ci_get_pi(rdev);
1320 	int ret = 0;
1321 	u32 low_sclk_interrupt_t = 0;
1322 
1323 	if (pi->caps_sclk_throttle_low_notification) {
1324 		low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
1325 
1326 		ret = ci_copy_bytes_to_smc(rdev,
1327 					   pi->dpm_table_start +
1328 					   offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
1329 					   (u8 *)&low_sclk_interrupt_t,
1330 					   sizeof(u32), pi->sram_end);
1331 
1332 	}
1333 
1334 	return ret;
1335 }
1336 
1337 static void ci_get_leakage_voltages(struct radeon_device *rdev)
1338 {
1339 	struct ci_power_info *pi = ci_get_pi(rdev);
1340 	u16 leakage_id, virtual_voltage_id;
1341 	u16 vddc, vddci;
1342 	int i;
1343 
1344 	pi->vddc_leakage.count = 0;
1345 	pi->vddci_leakage.count = 0;
1346 
1347 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
1348 		for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1349 			virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1350 			if (radeon_atom_get_voltage_evv(rdev, virtual_voltage_id, &vddc) != 0)
1351 				continue;
1352 			if (vddc != 0 && vddc != virtual_voltage_id) {
1353 				pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1354 				pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1355 				pi->vddc_leakage.count++;
1356 			}
1357 		}
1358 	} else if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
1359 		for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1360 			virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1361 			if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci,
1362 										 virtual_voltage_id,
1363 										 leakage_id) == 0) {
1364 				if (vddc != 0 && vddc != virtual_voltage_id) {
1365 					pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1366 					pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1367 					pi->vddc_leakage.count++;
1368 				}
1369 				if (vddci != 0 && vddci != virtual_voltage_id) {
1370 					pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
1371 					pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
1372 					pi->vddci_leakage.count++;
1373 				}
1374 			}
1375 		}
1376 	}
1377 }
1378 
1379 static void ci_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
1380 {
1381 	struct ci_power_info *pi = ci_get_pi(rdev);
1382 	bool want_thermal_protection;
1383 	enum radeon_dpm_event_src dpm_event_src;
1384 	u32 tmp;
1385 
1386 	switch (sources) {
1387 	case 0:
1388 	default:
1389 		want_thermal_protection = false;
1390 		break;
1391 	case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
1392 		want_thermal_protection = true;
1393 		dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
1394 		break;
1395 	case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
1396 		want_thermal_protection = true;
1397 		dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
1398 		break;
1399 	case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
1400 	      (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
1401 		want_thermal_protection = true;
1402 		dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
1403 		break;
1404 	}
1405 
1406 	if (want_thermal_protection) {
1407 #if 0
1408 		/* XXX: need to figure out how to handle this properly */
1409 		tmp = RREG32_SMC(CG_THERMAL_CTRL);
1410 		tmp &= DPM_EVENT_SRC_MASK;
1411 		tmp |= DPM_EVENT_SRC(dpm_event_src);
1412 		WREG32_SMC(CG_THERMAL_CTRL, tmp);
1413 #endif
1414 
1415 		tmp = RREG32_SMC(GENERAL_PWRMGT);
1416 		if (pi->thermal_protection)
1417 			tmp &= ~THERMAL_PROTECTION_DIS;
1418 		else
1419 			tmp |= THERMAL_PROTECTION_DIS;
1420 		WREG32_SMC(GENERAL_PWRMGT, tmp);
1421 	} else {
1422 		tmp = RREG32_SMC(GENERAL_PWRMGT);
1423 		tmp |= THERMAL_PROTECTION_DIS;
1424 		WREG32_SMC(GENERAL_PWRMGT, tmp);
1425 	}
1426 }
1427 
1428 static void ci_enable_auto_throttle_source(struct radeon_device *rdev,
1429 					   enum radeon_dpm_auto_throttle_src source,
1430 					   bool enable)
1431 {
1432 	struct ci_power_info *pi = ci_get_pi(rdev);
1433 
1434 	if (enable) {
1435 		if (!(pi->active_auto_throttle_sources & (1 << source))) {
1436 			pi->active_auto_throttle_sources |= 1 << source;
1437 			ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1438 		}
1439 	} else {
1440 		if (pi->active_auto_throttle_sources & (1 << source)) {
1441 			pi->active_auto_throttle_sources &= ~(1 << source);
1442 			ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1443 		}
1444 	}
1445 }
1446 
1447 static void ci_enable_vr_hot_gpio_interrupt(struct radeon_device *rdev)
1448 {
1449 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1450 		ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
1451 }
1452 
1453 static int ci_unfreeze_sclk_mclk_dpm(struct radeon_device *rdev)
1454 {
1455 	struct ci_power_info *pi = ci_get_pi(rdev);
1456 	PPSMC_Result smc_result;
1457 
1458 	if (!pi->need_update_smu7_dpm_table)
1459 		return 0;
1460 
1461 	if ((!pi->sclk_dpm_key_disabled) &&
1462 	    (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1463 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
1464 		if (smc_result != PPSMC_Result_OK)
1465 			return -EINVAL;
1466 	}
1467 
1468 	if ((!pi->mclk_dpm_key_disabled) &&
1469 	    (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1470 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
1471 		if (smc_result != PPSMC_Result_OK)
1472 			return -EINVAL;
1473 	}
1474 
1475 	pi->need_update_smu7_dpm_table = 0;
1476 	return 0;
1477 }
1478 
1479 static int ci_enable_sclk_mclk_dpm(struct radeon_device *rdev, bool enable)
1480 {
1481 	struct ci_power_info *pi = ci_get_pi(rdev);
1482 	PPSMC_Result smc_result;
1483 
1484 	if (enable) {
1485 		if (!pi->sclk_dpm_key_disabled) {
1486 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Enable);
1487 			if (smc_result != PPSMC_Result_OK)
1488 				return -EINVAL;
1489 		}
1490 
1491 		if (!pi->mclk_dpm_key_disabled) {
1492 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Enable);
1493 			if (smc_result != PPSMC_Result_OK)
1494 				return -EINVAL;
1495 
1496 			WREG32_P(MC_SEQ_CNTL_3, CAC_EN, ~CAC_EN);
1497 
1498 			WREG32_SMC(LCAC_MC0_CNTL, 0x05);
1499 			WREG32_SMC(LCAC_MC1_CNTL, 0x05);
1500 			WREG32_SMC(LCAC_CPL_CNTL, 0x100005);
1501 
1502 			udelay(10);
1503 
1504 			WREG32_SMC(LCAC_MC0_CNTL, 0x400005);
1505 			WREG32_SMC(LCAC_MC1_CNTL, 0x400005);
1506 			WREG32_SMC(LCAC_CPL_CNTL, 0x500005);
1507 		}
1508 	} else {
1509 		if (!pi->sclk_dpm_key_disabled) {
1510 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Disable);
1511 			if (smc_result != PPSMC_Result_OK)
1512 				return -EINVAL;
1513 		}
1514 
1515 		if (!pi->mclk_dpm_key_disabled) {
1516 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Disable);
1517 			if (smc_result != PPSMC_Result_OK)
1518 				return -EINVAL;
1519 		}
1520 	}
1521 
1522 	return 0;
1523 }
1524 
1525 static int ci_start_dpm(struct radeon_device *rdev)
1526 {
1527 	struct ci_power_info *pi = ci_get_pi(rdev);
1528 	PPSMC_Result smc_result;
1529 	int ret;
1530 	u32 tmp;
1531 
1532 	tmp = RREG32_SMC(GENERAL_PWRMGT);
1533 	tmp |= GLOBAL_PWRMGT_EN;
1534 	WREG32_SMC(GENERAL_PWRMGT, tmp);
1535 
1536 	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1537 	tmp |= DYNAMIC_PM_EN;
1538 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1539 
1540 	ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1541 
1542 	WREG32_P(BIF_LNCNT_RESET, 0, ~RESET_LNCNT_EN);
1543 
1544 	smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Enable);
1545 	if (smc_result != PPSMC_Result_OK)
1546 		return -EINVAL;
1547 
1548 	ret = ci_enable_sclk_mclk_dpm(rdev, true);
1549 	if (ret)
1550 		return ret;
1551 
1552 	if (!pi->pcie_dpm_key_disabled) {
1553 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Enable);
1554 		if (smc_result != PPSMC_Result_OK)
1555 			return -EINVAL;
1556 	}
1557 
1558 	return 0;
1559 }
1560 
1561 static int ci_freeze_sclk_mclk_dpm(struct radeon_device *rdev)
1562 {
1563 	struct ci_power_info *pi = ci_get_pi(rdev);
1564 	PPSMC_Result smc_result;
1565 
1566 	if (!pi->need_update_smu7_dpm_table)
1567 		return 0;
1568 
1569 	if ((!pi->sclk_dpm_key_disabled) &&
1570 	    (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1571 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1572 		if (smc_result != PPSMC_Result_OK)
1573 			return -EINVAL;
1574 	}
1575 
1576 	if ((!pi->mclk_dpm_key_disabled) &&
1577 	    (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1578 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1579 		if (smc_result != PPSMC_Result_OK)
1580 			return -EINVAL;
1581 	}
1582 
1583 	return 0;
1584 }
1585 
1586 static int ci_stop_dpm(struct radeon_device *rdev)
1587 {
1588 	struct ci_power_info *pi = ci_get_pi(rdev);
1589 	PPSMC_Result smc_result;
1590 	int ret;
1591 	u32 tmp;
1592 
1593 	tmp = RREG32_SMC(GENERAL_PWRMGT);
1594 	tmp &= ~GLOBAL_PWRMGT_EN;
1595 	WREG32_SMC(GENERAL_PWRMGT, tmp);
1596 
1597 	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1598 	tmp &= ~DYNAMIC_PM_EN;
1599 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1600 
1601 	if (!pi->pcie_dpm_key_disabled) {
1602 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Disable);
1603 		if (smc_result != PPSMC_Result_OK)
1604 			return -EINVAL;
1605 	}
1606 
1607 	ret = ci_enable_sclk_mclk_dpm(rdev, false);
1608 	if (ret)
1609 		return ret;
1610 
1611 	smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Disable);
1612 	if (smc_result != PPSMC_Result_OK)
1613 		return -EINVAL;
1614 
1615 	return 0;
1616 }
1617 
1618 static void ci_enable_sclk_control(struct radeon_device *rdev, bool enable)
1619 {
1620 	u32 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1621 
1622 	if (enable)
1623 		tmp &= ~SCLK_PWRMGT_OFF;
1624 	else
1625 		tmp |= SCLK_PWRMGT_OFF;
1626 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1627 }
1628 
1629 #if 0
1630 static int ci_notify_hw_of_power_source(struct radeon_device *rdev,
1631 					bool ac_power)
1632 {
1633 	struct ci_power_info *pi = ci_get_pi(rdev);
1634 	struct radeon_cac_tdp_table *cac_tdp_table =
1635 		rdev->pm.dpm.dyn_state.cac_tdp_table;
1636 	u32 power_limit;
1637 
1638 	if (ac_power)
1639 		power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1640 	else
1641 		power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1642 
1643 	ci_set_power_limit(rdev, power_limit);
1644 
1645 	if (pi->caps_automatic_dc_transition) {
1646 		if (ac_power)
1647 			ci_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC);
1648 		else
1649 			ci_send_msg_to_smc(rdev, PPSMC_MSG_Remove_DC_Clamp);
1650 	}
1651 
1652 	return 0;
1653 }
1654 #endif
1655 
1656 static PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg)
1657 {
1658 	u32 tmp;
1659 	int i;
1660 
1661 	if (!ci_is_smc_running(rdev))
1662 		return PPSMC_Result_Failed;
1663 
1664 	WREG32(SMC_MESSAGE_0, msg);
1665 
1666 	for (i = 0; i < rdev->usec_timeout; i++) {
1667 		tmp = RREG32(SMC_RESP_0);
1668 		if (tmp != 0)
1669 			break;
1670 		udelay(1);
1671 	}
1672 	tmp = RREG32(SMC_RESP_0);
1673 
1674 	return (PPSMC_Result)tmp;
1675 }
1676 
1677 static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
1678 						      PPSMC_Msg msg, u32 parameter)
1679 {
1680 	WREG32(SMC_MSG_ARG_0, parameter);
1681 	return ci_send_msg_to_smc(rdev, msg);
1682 }
1683 
1684 static PPSMC_Result ci_send_msg_to_smc_return_parameter(struct radeon_device *rdev,
1685 							PPSMC_Msg msg, u32 *parameter)
1686 {
1687 	PPSMC_Result smc_result;
1688 
1689 	smc_result = ci_send_msg_to_smc(rdev, msg);
1690 
1691 	if ((smc_result == PPSMC_Result_OK) && parameter)
1692 		*parameter = RREG32(SMC_MSG_ARG_0);
1693 
1694 	return smc_result;
1695 }
1696 
1697 static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n)
1698 {
1699 	struct ci_power_info *pi = ci_get_pi(rdev);
1700 
1701 	if (!pi->sclk_dpm_key_disabled) {
1702 		PPSMC_Result smc_result =
1703 			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
1704 		if (smc_result != PPSMC_Result_OK)
1705 			return -EINVAL;
1706 	}
1707 
1708 	return 0;
1709 }
1710 
1711 static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n)
1712 {
1713 	struct ci_power_info *pi = ci_get_pi(rdev);
1714 
1715 	if (!pi->mclk_dpm_key_disabled) {
1716 		PPSMC_Result smc_result =
1717 			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
1718 		if (smc_result != PPSMC_Result_OK)
1719 			return -EINVAL;
1720 	}
1721 
1722 	return 0;
1723 }
1724 
1725 static int ci_dpm_force_state_pcie(struct radeon_device *rdev, u32 n)
1726 {
1727 	struct ci_power_info *pi = ci_get_pi(rdev);
1728 
1729 	if (!pi->pcie_dpm_key_disabled) {
1730 		PPSMC_Result smc_result =
1731 			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1732 		if (smc_result != PPSMC_Result_OK)
1733 			return -EINVAL;
1734 	}
1735 
1736 	return 0;
1737 }
1738 
1739 static int ci_set_power_limit(struct radeon_device *rdev, u32 n)
1740 {
1741 	struct ci_power_info *pi = ci_get_pi(rdev);
1742 
1743 	if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1744 		PPSMC_Result smc_result =
1745 			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PkgPwrSetLimit, n);
1746 		if (smc_result != PPSMC_Result_OK)
1747 			return -EINVAL;
1748 	}
1749 
1750 	return 0;
1751 }
1752 
1753 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
1754 				       u32 target_tdp)
1755 {
1756 	PPSMC_Result smc_result =
1757 		ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1758 	if (smc_result != PPSMC_Result_OK)
1759 		return -EINVAL;
1760 	return 0;
1761 }
1762 
1763 #if 0
1764 static int ci_set_boot_state(struct radeon_device *rdev)
1765 {
1766 	return ci_enable_sclk_mclk_dpm(rdev, false);
1767 }
1768 #endif
1769 
1770 static u32 ci_get_average_sclk_freq(struct radeon_device *rdev)
1771 {
1772 	u32 sclk_freq;
1773 	PPSMC_Result smc_result =
1774 		ci_send_msg_to_smc_return_parameter(rdev,
1775 						    PPSMC_MSG_API_GetSclkFrequency,
1776 						    &sclk_freq);
1777 	if (smc_result != PPSMC_Result_OK)
1778 		sclk_freq = 0;
1779 
1780 	return sclk_freq;
1781 }
1782 
1783 static u32 ci_get_average_mclk_freq(struct radeon_device *rdev)
1784 {
1785 	u32 mclk_freq;
1786 	PPSMC_Result smc_result =
1787 		ci_send_msg_to_smc_return_parameter(rdev,
1788 						    PPSMC_MSG_API_GetMclkFrequency,
1789 						    &mclk_freq);
1790 	if (smc_result != PPSMC_Result_OK)
1791 		mclk_freq = 0;
1792 
1793 	return mclk_freq;
1794 }
1795 
1796 static void ci_dpm_start_smc(struct radeon_device *rdev)
1797 {
1798 	int i;
1799 
1800 	ci_program_jump_on_start(rdev);
1801 	ci_start_smc_clock(rdev);
1802 	ci_start_smc(rdev);
1803 	for (i = 0; i < rdev->usec_timeout; i++) {
1804 		if (RREG32_SMC(FIRMWARE_FLAGS) & INTERRUPTS_ENABLED)
1805 			break;
1806 	}
1807 }
1808 
1809 static void ci_dpm_stop_smc(struct radeon_device *rdev)
1810 {
1811 	ci_reset_smc(rdev);
1812 	ci_stop_smc_clock(rdev);
1813 }
1814 
1815 static int ci_process_firmware_header(struct radeon_device *rdev)
1816 {
1817 	struct ci_power_info *pi = ci_get_pi(rdev);
1818 	u32 tmp;
1819 	int ret;
1820 
1821 	ret = ci_read_smc_sram_dword(rdev,
1822 				     SMU7_FIRMWARE_HEADER_LOCATION +
1823 				     offsetof(SMU7_Firmware_Header, DpmTable),
1824 				     &tmp, pi->sram_end);
1825 	if (ret)
1826 		return ret;
1827 
1828 	pi->dpm_table_start = tmp;
1829 
1830 	ret = ci_read_smc_sram_dword(rdev,
1831 				     SMU7_FIRMWARE_HEADER_LOCATION +
1832 				     offsetof(SMU7_Firmware_Header, SoftRegisters),
1833 				     &tmp, pi->sram_end);
1834 	if (ret)
1835 		return ret;
1836 
1837 	pi->soft_regs_start = tmp;
1838 
1839 	ret = ci_read_smc_sram_dword(rdev,
1840 				     SMU7_FIRMWARE_HEADER_LOCATION +
1841 				     offsetof(SMU7_Firmware_Header, mcRegisterTable),
1842 				     &tmp, pi->sram_end);
1843 	if (ret)
1844 		return ret;
1845 
1846 	pi->mc_reg_table_start = tmp;
1847 
1848 	ret = ci_read_smc_sram_dword(rdev,
1849 				     SMU7_FIRMWARE_HEADER_LOCATION +
1850 				     offsetof(SMU7_Firmware_Header, FanTable),
1851 				     &tmp, pi->sram_end);
1852 	if (ret)
1853 		return ret;
1854 
1855 	pi->fan_table_start = tmp;
1856 
1857 	ret = ci_read_smc_sram_dword(rdev,
1858 				     SMU7_FIRMWARE_HEADER_LOCATION +
1859 				     offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1860 				     &tmp, pi->sram_end);
1861 	if (ret)
1862 		return ret;
1863 
1864 	pi->arb_table_start = tmp;
1865 
1866 	return 0;
1867 }
1868 
1869 static void ci_read_clock_registers(struct radeon_device *rdev)
1870 {
1871 	struct ci_power_info *pi = ci_get_pi(rdev);
1872 
1873 	pi->clock_registers.cg_spll_func_cntl =
1874 		RREG32_SMC(CG_SPLL_FUNC_CNTL);
1875 	pi->clock_registers.cg_spll_func_cntl_2 =
1876 		RREG32_SMC(CG_SPLL_FUNC_CNTL_2);
1877 	pi->clock_registers.cg_spll_func_cntl_3 =
1878 		RREG32_SMC(CG_SPLL_FUNC_CNTL_3);
1879 	pi->clock_registers.cg_spll_func_cntl_4 =
1880 		RREG32_SMC(CG_SPLL_FUNC_CNTL_4);
1881 	pi->clock_registers.cg_spll_spread_spectrum =
1882 		RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1883 	pi->clock_registers.cg_spll_spread_spectrum_2 =
1884 		RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM_2);
1885 	pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
1886 	pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
1887 	pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
1888 	pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
1889 	pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
1890 	pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
1891 	pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
1892 	pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
1893 	pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
1894 }
1895 
1896 static void ci_init_sclk_t(struct radeon_device *rdev)
1897 {
1898 	struct ci_power_info *pi = ci_get_pi(rdev);
1899 
1900 	pi->low_sclk_interrupt_t = 0;
1901 }
1902 
1903 static void ci_enable_thermal_protection(struct radeon_device *rdev,
1904 					 bool enable)
1905 {
1906 	u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1907 
1908 	if (enable)
1909 		tmp &= ~THERMAL_PROTECTION_DIS;
1910 	else
1911 		tmp |= THERMAL_PROTECTION_DIS;
1912 	WREG32_SMC(GENERAL_PWRMGT, tmp);
1913 }
1914 
1915 static void ci_enable_acpi_power_management(struct radeon_device *rdev)
1916 {
1917 	u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1918 
1919 	tmp |= STATIC_PM_EN;
1920 
1921 	WREG32_SMC(GENERAL_PWRMGT, tmp);
1922 }
1923 
1924 #if 0
1925 static int ci_enter_ulp_state(struct radeon_device *rdev)
1926 {
1927 
1928 	WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
1929 
1930 	udelay(25000);
1931 
1932 	return 0;
1933 }
1934 
1935 static int ci_exit_ulp_state(struct radeon_device *rdev)
1936 {
1937 	int i;
1938 
1939 	WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
1940 
1941 	udelay(7000);
1942 
1943 	for (i = 0; i < rdev->usec_timeout; i++) {
1944 		if (RREG32(SMC_RESP_0) == 1)
1945 			break;
1946 		udelay(1000);
1947 	}
1948 
1949 	return 0;
1950 }
1951 #endif
1952 
1953 static int ci_notify_smc_display_change(struct radeon_device *rdev,
1954 					bool has_display)
1955 {
1956 	PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
1957 
1958 	return (ci_send_msg_to_smc(rdev, msg) == PPSMC_Result_OK) ?  0 : -EINVAL;
1959 }
1960 
1961 static int ci_enable_ds_master_switch(struct radeon_device *rdev,
1962 				      bool enable)
1963 {
1964 	struct ci_power_info *pi = ci_get_pi(rdev);
1965 
1966 	if (enable) {
1967 		if (pi->caps_sclk_ds) {
1968 			if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
1969 				return -EINVAL;
1970 		} else {
1971 			if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1972 				return -EINVAL;
1973 		}
1974 	} else {
1975 		if (pi->caps_sclk_ds) {
1976 			if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1977 				return -EINVAL;
1978 		}
1979 	}
1980 
1981 	return 0;
1982 }
1983 
1984 static void ci_program_display_gap(struct radeon_device *rdev)
1985 {
1986 	u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1987 	u32 pre_vbi_time_in_us;
1988 	u32 frame_time_in_us;
1989 	u32 ref_clock = rdev->clock.spll.reference_freq;
1990 	u32 refresh_rate = r600_dpm_get_vrefresh(rdev);
1991 	u32 vblank_time = r600_dpm_get_vblank_time(rdev);
1992 
1993 	tmp &= ~DISP_GAP_MASK;
1994 	if (rdev->pm.dpm.new_active_crtc_count > 0)
1995 		tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
1996 	else
1997 		tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE);
1998 	WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1999 
2000 	if (refresh_rate == 0)
2001 		refresh_rate = 60;
2002 	if (vblank_time == 0xffffffff)
2003 		vblank_time = 500;
2004 	frame_time_in_us = 1000000 / refresh_rate;
2005 	pre_vbi_time_in_us =
2006 		frame_time_in_us - 200 - vblank_time;
2007 	tmp = pre_vbi_time_in_us * (ref_clock / 100);
2008 
2009 	WREG32_SMC(CG_DISPLAY_GAP_CNTL2, tmp);
2010 	ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
2011 	ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
2012 
2013 
2014 	ci_notify_smc_display_change(rdev, (rdev->pm.dpm.new_active_crtc_count == 1));
2015 
2016 }
2017 
2018 static void ci_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
2019 {
2020 	struct ci_power_info *pi = ci_get_pi(rdev);
2021 	u32 tmp;
2022 
2023 	if (enable) {
2024 		if (pi->caps_sclk_ss_support) {
2025 			tmp = RREG32_SMC(GENERAL_PWRMGT);
2026 			tmp |= DYN_SPREAD_SPECTRUM_EN;
2027 			WREG32_SMC(GENERAL_PWRMGT, tmp);
2028 		}
2029 	} else {
2030 		tmp = RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
2031 		tmp &= ~SSEN;
2032 		WREG32_SMC(CG_SPLL_SPREAD_SPECTRUM, tmp);
2033 
2034 		tmp = RREG32_SMC(GENERAL_PWRMGT);
2035 		tmp &= ~DYN_SPREAD_SPECTRUM_EN;
2036 		WREG32_SMC(GENERAL_PWRMGT, tmp);
2037 	}
2038 }
2039 
2040 static void ci_program_sstp(struct radeon_device *rdev)
2041 {
2042 	WREG32_SMC(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
2043 }
2044 
2045 static void ci_enable_display_gap(struct radeon_device *rdev)
2046 {
2047 	u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
2048 
2049 	tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK);
2050 	tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
2051 		DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK));
2052 
2053 	WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
2054 }
2055 
2056 static void ci_program_vc(struct radeon_device *rdev)
2057 {
2058 	u32 tmp;
2059 
2060 	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
2061 	tmp &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
2062 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
2063 
2064 	WREG32_SMC(CG_FTV_0, CISLANDS_VRC_DFLT0);
2065 	WREG32_SMC(CG_FTV_1, CISLANDS_VRC_DFLT1);
2066 	WREG32_SMC(CG_FTV_2, CISLANDS_VRC_DFLT2);
2067 	WREG32_SMC(CG_FTV_3, CISLANDS_VRC_DFLT3);
2068 	WREG32_SMC(CG_FTV_4, CISLANDS_VRC_DFLT4);
2069 	WREG32_SMC(CG_FTV_5, CISLANDS_VRC_DFLT5);
2070 	WREG32_SMC(CG_FTV_6, CISLANDS_VRC_DFLT6);
2071 	WREG32_SMC(CG_FTV_7, CISLANDS_VRC_DFLT7);
2072 }
2073 
2074 static void ci_clear_vc(struct radeon_device *rdev)
2075 {
2076 	u32 tmp;
2077 
2078 	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
2079 	tmp |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
2080 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
2081 
2082 	WREG32_SMC(CG_FTV_0, 0);
2083 	WREG32_SMC(CG_FTV_1, 0);
2084 	WREG32_SMC(CG_FTV_2, 0);
2085 	WREG32_SMC(CG_FTV_3, 0);
2086 	WREG32_SMC(CG_FTV_4, 0);
2087 	WREG32_SMC(CG_FTV_5, 0);
2088 	WREG32_SMC(CG_FTV_6, 0);
2089 	WREG32_SMC(CG_FTV_7, 0);
2090 }
2091 
2092 static int ci_upload_firmware(struct radeon_device *rdev)
2093 {
2094 	struct ci_power_info *pi = ci_get_pi(rdev);
2095 	int i, ret;
2096 
2097 	for (i = 0; i < rdev->usec_timeout; i++) {
2098 		if (RREG32_SMC(RCU_UC_EVENTS) & BOOT_SEQ_DONE)
2099 			break;
2100 	}
2101 	WREG32_SMC(SMC_SYSCON_MISC_CNTL, 1);
2102 
2103 	ci_stop_smc_clock(rdev);
2104 	ci_reset_smc(rdev);
2105 
2106 	ret = ci_load_smc_ucode(rdev, pi->sram_end);
2107 
2108 	return ret;
2109 
2110 }
2111 
2112 static int ci_get_svi2_voltage_table(struct radeon_device *rdev,
2113 				     struct radeon_clock_voltage_dependency_table *voltage_dependency_table,
2114 				     struct atom_voltage_table *voltage_table)
2115 {
2116 	u32 i;
2117 
2118 	if (voltage_dependency_table == NULL)
2119 		return -EINVAL;
2120 
2121 	voltage_table->mask_low = 0;
2122 	voltage_table->phase_delay = 0;
2123 
2124 	voltage_table->count = voltage_dependency_table->count;
2125 	for (i = 0; i < voltage_table->count; i++) {
2126 		voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
2127 		voltage_table->entries[i].smio_low = 0;
2128 	}
2129 
2130 	return 0;
2131 }
2132 
2133 static int ci_construct_voltage_tables(struct radeon_device *rdev)
2134 {
2135 	struct ci_power_info *pi = ci_get_pi(rdev);
2136 	int ret;
2137 
2138 	if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2139 		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
2140 						    VOLTAGE_OBJ_GPIO_LUT,
2141 						    &pi->vddc_voltage_table);
2142 		if (ret)
2143 			return ret;
2144 	} else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2145 		ret = ci_get_svi2_voltage_table(rdev,
2146 						&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2147 						&pi->vddc_voltage_table);
2148 		if (ret)
2149 			return ret;
2150 	}
2151 
2152 	if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
2153 		si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDC,
2154 							 &pi->vddc_voltage_table);
2155 
2156 	if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2157 		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
2158 						    VOLTAGE_OBJ_GPIO_LUT,
2159 						    &pi->vddci_voltage_table);
2160 		if (ret)
2161 			return ret;
2162 	} else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2163 		ret = ci_get_svi2_voltage_table(rdev,
2164 						&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2165 						&pi->vddci_voltage_table);
2166 		if (ret)
2167 			return ret;
2168 	}
2169 
2170 	if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
2171 		si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDCI,
2172 							 &pi->vddci_voltage_table);
2173 
2174 	if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2175 		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC,
2176 						    VOLTAGE_OBJ_GPIO_LUT,
2177 						    &pi->mvdd_voltage_table);
2178 		if (ret)
2179 			return ret;
2180 	} else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2181 		ret = ci_get_svi2_voltage_table(rdev,
2182 						&rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2183 						&pi->mvdd_voltage_table);
2184 		if (ret)
2185 			return ret;
2186 	}
2187 
2188 	if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
2189 		si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_MVDD,
2190 							 &pi->mvdd_voltage_table);
2191 
2192 	return 0;
2193 }
2194 
2195 static void ci_populate_smc_voltage_table(struct radeon_device *rdev,
2196 					  struct atom_voltage_table_entry *voltage_table,
2197 					  SMU7_Discrete_VoltageLevel *smc_voltage_table)
2198 {
2199 	int ret;
2200 
2201 	ret = ci_get_std_voltage_value_sidd(rdev, voltage_table,
2202 					    &smc_voltage_table->StdVoltageHiSidd,
2203 					    &smc_voltage_table->StdVoltageLoSidd);
2204 
2205 	if (ret) {
2206 		smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
2207 		smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
2208 	}
2209 
2210 	smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
2211 	smc_voltage_table->StdVoltageHiSidd =
2212 		cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
2213 	smc_voltage_table->StdVoltageLoSidd =
2214 		cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
2215 }
2216 
2217 static int ci_populate_smc_vddc_table(struct radeon_device *rdev,
2218 				      SMU7_Discrete_DpmTable *table)
2219 {
2220 	struct ci_power_info *pi = ci_get_pi(rdev);
2221 	unsigned int count;
2222 
2223 	table->VddcLevelCount = pi->vddc_voltage_table.count;
2224 	for (count = 0; count < table->VddcLevelCount; count++) {
2225 		ci_populate_smc_voltage_table(rdev,
2226 					      &pi->vddc_voltage_table.entries[count],
2227 					      &table->VddcLevel[count]);
2228 
2229 		if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2230 			table->VddcLevel[count].Smio |=
2231 				pi->vddc_voltage_table.entries[count].smio_low;
2232 		else
2233 			table->VddcLevel[count].Smio = 0;
2234 	}
2235 	table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
2236 
2237 	return 0;
2238 }
2239 
2240 static int ci_populate_smc_vddci_table(struct radeon_device *rdev,
2241 				       SMU7_Discrete_DpmTable *table)
2242 {
2243 	unsigned int count;
2244 	struct ci_power_info *pi = ci_get_pi(rdev);
2245 
2246 	table->VddciLevelCount = pi->vddci_voltage_table.count;
2247 	for (count = 0; count < table->VddciLevelCount; count++) {
2248 		ci_populate_smc_voltage_table(rdev,
2249 					      &pi->vddci_voltage_table.entries[count],
2250 					      &table->VddciLevel[count]);
2251 
2252 		if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2253 			table->VddciLevel[count].Smio |=
2254 				pi->vddci_voltage_table.entries[count].smio_low;
2255 		else
2256 			table->VddciLevel[count].Smio = 0;
2257 	}
2258 	table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
2259 
2260 	return 0;
2261 }
2262 
2263 static int ci_populate_smc_mvdd_table(struct radeon_device *rdev,
2264 				      SMU7_Discrete_DpmTable *table)
2265 {
2266 	struct ci_power_info *pi = ci_get_pi(rdev);
2267 	unsigned int count;
2268 
2269 	table->MvddLevelCount = pi->mvdd_voltage_table.count;
2270 	for (count = 0; count < table->MvddLevelCount; count++) {
2271 		ci_populate_smc_voltage_table(rdev,
2272 					      &pi->mvdd_voltage_table.entries[count],
2273 					      &table->MvddLevel[count]);
2274 
2275 		if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2276 			table->MvddLevel[count].Smio |=
2277 				pi->mvdd_voltage_table.entries[count].smio_low;
2278 		else
2279 			table->MvddLevel[count].Smio = 0;
2280 	}
2281 	table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
2282 
2283 	return 0;
2284 }
2285 
2286 static int ci_populate_smc_voltage_tables(struct radeon_device *rdev,
2287 					  SMU7_Discrete_DpmTable *table)
2288 {
2289 	int ret;
2290 
2291 	ret = ci_populate_smc_vddc_table(rdev, table);
2292 	if (ret)
2293 		return ret;
2294 
2295 	ret = ci_populate_smc_vddci_table(rdev, table);
2296 	if (ret)
2297 		return ret;
2298 
2299 	ret = ci_populate_smc_mvdd_table(rdev, table);
2300 	if (ret)
2301 		return ret;
2302 
2303 	return 0;
2304 }
2305 
2306 static int ci_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
2307 				  SMU7_Discrete_VoltageLevel *voltage)
2308 {
2309 	struct ci_power_info *pi = ci_get_pi(rdev);
2310 	u32 i = 0;
2311 
2312 	if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2313 		for (i = 0; i < rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
2314 			if (mclk <= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
2315 				voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
2316 				break;
2317 			}
2318 		}
2319 
2320 		if (i >= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
2321 			return -EINVAL;
2322 	}
2323 
2324 	return -EINVAL;
2325 }
2326 
2327 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
2328 					 struct atom_voltage_table_entry *voltage_table,
2329 					 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
2330 {
2331 	u16 v_index, idx;
2332 	bool voltage_found = false;
2333 	*std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
2334 	*std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
2335 
2336 	if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
2337 		return -EINVAL;
2338 
2339 	if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
2340 		for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2341 			if (voltage_table->value ==
2342 			    rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2343 				voltage_found = true;
2344 				if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
2345 					idx = v_index;
2346 				else
2347 					idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2348 				*std_voltage_lo_sidd =
2349 					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2350 				*std_voltage_hi_sidd =
2351 					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2352 				break;
2353 			}
2354 		}
2355 
2356 		if (!voltage_found) {
2357 			for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2358 				if (voltage_table->value <=
2359 				    rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2360 					voltage_found = true;
2361 					if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
2362 						idx = v_index;
2363 					else
2364 						idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2365 					*std_voltage_lo_sidd =
2366 						rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2367 					*std_voltage_hi_sidd =
2368 						rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2369 					break;
2370 				}
2371 			}
2372 		}
2373 	}
2374 
2375 	return 0;
2376 }
2377 
2378 static void ci_populate_phase_value_based_on_sclk(struct radeon_device *rdev,
2379 						  const struct radeon_phase_shedding_limits_table *limits,
2380 						  u32 sclk,
2381 						  u32 *phase_shedding)
2382 {
2383 	unsigned int i;
2384 
2385 	*phase_shedding = 1;
2386 
2387 	for (i = 0; i < limits->count; i++) {
2388 		if (sclk < limits->entries[i].sclk) {
2389 			*phase_shedding = i;
2390 			break;
2391 		}
2392 	}
2393 }
2394 
2395 static void ci_populate_phase_value_based_on_mclk(struct radeon_device *rdev,
2396 						  const struct radeon_phase_shedding_limits_table *limits,
2397 						  u32 mclk,
2398 						  u32 *phase_shedding)
2399 {
2400 	unsigned int i;
2401 
2402 	*phase_shedding = 1;
2403 
2404 	for (i = 0; i < limits->count; i++) {
2405 		if (mclk < limits->entries[i].mclk) {
2406 			*phase_shedding = i;
2407 			break;
2408 		}
2409 	}
2410 }
2411 
2412 static int ci_init_arb_table_index(struct radeon_device *rdev)
2413 {
2414 	struct ci_power_info *pi = ci_get_pi(rdev);
2415 	u32 tmp;
2416 	int ret;
2417 
2418 	ret = ci_read_smc_sram_dword(rdev, pi->arb_table_start,
2419 				     &tmp, pi->sram_end);
2420 	if (ret)
2421 		return ret;
2422 
2423 	tmp &= 0x00FFFFFF;
2424 	tmp |= MC_CG_ARB_FREQ_F1 << 24;
2425 
2426 	return ci_write_smc_sram_dword(rdev, pi->arb_table_start,
2427 				       tmp, pi->sram_end);
2428 }
2429 
2430 static int ci_get_dependency_volt_by_clk(struct radeon_device *rdev,
2431 					 struct radeon_clock_voltage_dependency_table *allowed_clock_voltage_table,
2432 					 u32 clock, u32 *voltage)
2433 {
2434 	u32 i = 0;
2435 
2436 	if (allowed_clock_voltage_table->count == 0)
2437 		return -EINVAL;
2438 
2439 	for (i = 0; i < allowed_clock_voltage_table->count; i++) {
2440 		if (allowed_clock_voltage_table->entries[i].clk >= clock) {
2441 			*voltage = allowed_clock_voltage_table->entries[i].v;
2442 			return 0;
2443 		}
2444 	}
2445 
2446 	*voltage = allowed_clock_voltage_table->entries[i-1].v;
2447 
2448 	return 0;
2449 }
2450 
2451 static u8 ci_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
2452 					     u32 sclk, u32 min_sclk_in_sr)
2453 {
2454 	u32 i;
2455 	u32 tmp;
2456 	u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ?
2457 		min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK;
2458 
2459 	if (sclk < min)
2460 		return 0;
2461 
2462 	for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
2463 		tmp = sclk / (1 << i);
2464 		if (tmp >= min || i == 0)
2465 			break;
2466 	}
2467 
2468 	return (u8)i;
2469 }
2470 
2471 static int ci_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev)
2472 {
2473 	return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
2474 }
2475 
2476 static int ci_reset_to_default(struct radeon_device *rdev)
2477 {
2478 	return (ci_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
2479 		0 : -EINVAL;
2480 }
2481 
2482 static int ci_force_switch_to_arb_f0(struct radeon_device *rdev)
2483 {
2484 	u32 tmp;
2485 
2486 	tmp = (RREG32_SMC(SMC_SCRATCH9) & 0x0000ff00) >> 8;
2487 
2488 	if (tmp == MC_CG_ARB_FREQ_F0)
2489 		return 0;
2490 
2491 	return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
2492 }
2493 
2494 static void ci_register_patching_mc_arb(struct radeon_device *rdev,
2495 					const u32 engine_clock,
2496 					const u32 memory_clock,
2497 					u32 *dram_timimg2)
2498 {
2499 	bool patch;
2500 	u32 tmp, tmp2;
2501 
2502 	tmp = RREG32(MC_SEQ_MISC0);
2503 	patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
2504 
2505 	if (patch &&
2506 	    ((rdev->pdev->device == 0x67B0) ||
2507 	     (rdev->pdev->device == 0x67B1))) {
2508 		if ((memory_clock > 100000) && (memory_clock <= 125000)) {
2509 			tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
2510 			*dram_timimg2 &= ~0x00ff0000;
2511 			*dram_timimg2 |= tmp2 << 16;
2512 		} else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
2513 			tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
2514 			*dram_timimg2 &= ~0x00ff0000;
2515 			*dram_timimg2 |= tmp2 << 16;
2516 		}
2517 	}
2518 }
2519 
2520 
2521 static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
2522 						u32 sclk,
2523 						u32 mclk,
2524 						SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
2525 {
2526 	u32 dram_timing;
2527 	u32 dram_timing2;
2528 	u32 burst_time;
2529 
2530 	radeon_atom_set_engine_dram_timings(rdev, sclk, mclk);
2531 
2532 	dram_timing  = RREG32(MC_ARB_DRAM_TIMING);
2533 	dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
2534 	burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
2535 
2536 	ci_register_patching_mc_arb(rdev, sclk, mclk, &dram_timing2);
2537 
2538 	arb_regs->McArbDramTiming  = cpu_to_be32(dram_timing);
2539 	arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
2540 	arb_regs->McArbBurstTime = (u8)burst_time;
2541 
2542 	return 0;
2543 }
2544 
2545 static int ci_do_program_memory_timing_parameters(struct radeon_device *rdev)
2546 {
2547 	struct ci_power_info *pi = ci_get_pi(rdev);
2548 	SMU7_Discrete_MCArbDramTimingTable arb_regs;
2549 	u32 i, j;
2550 	int ret =  0;
2551 
2552 	memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2553 
2554 	for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2555 		for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2556 			ret = ci_populate_memory_timing_parameters(rdev,
2557 								   pi->dpm_table.sclk_table.dpm_levels[i].value,
2558 								   pi->dpm_table.mclk_table.dpm_levels[j].value,
2559 								   &arb_regs.entries[i][j]);
2560 			if (ret)
2561 				break;
2562 		}
2563 	}
2564 
2565 	if (ret == 0)
2566 		ret = ci_copy_bytes_to_smc(rdev,
2567 					   pi->arb_table_start,
2568 					   (u8 *)&arb_regs,
2569 					   sizeof(SMU7_Discrete_MCArbDramTimingTable),
2570 					   pi->sram_end);
2571 
2572 	return ret;
2573 }
2574 
2575 static int ci_program_memory_timing_parameters(struct radeon_device *rdev)
2576 {
2577 	struct ci_power_info *pi = ci_get_pi(rdev);
2578 
2579 	if (pi->need_update_smu7_dpm_table == 0)
2580 		return 0;
2581 
2582 	return ci_do_program_memory_timing_parameters(rdev);
2583 }
2584 
2585 static void ci_populate_smc_initial_state(struct radeon_device *rdev,
2586 					  struct radeon_ps *radeon_boot_state)
2587 {
2588 	struct ci_ps *boot_state = ci_get_ps(radeon_boot_state);
2589 	struct ci_power_info *pi = ci_get_pi(rdev);
2590 	u32 level = 0;
2591 
2592 	for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2593 		if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2594 		    boot_state->performance_levels[0].sclk) {
2595 			pi->smc_state_table.GraphicsBootLevel = level;
2596 			break;
2597 		}
2598 	}
2599 
2600 	for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2601 		if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2602 		    boot_state->performance_levels[0].mclk) {
2603 			pi->smc_state_table.MemoryBootLevel = level;
2604 			break;
2605 		}
2606 	}
2607 }
2608 
2609 static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2610 {
2611 	u32 i;
2612 	u32 mask_value = 0;
2613 
2614 	for (i = dpm_table->count; i > 0; i--) {
2615 		mask_value = mask_value << 1;
2616 		if (dpm_table->dpm_levels[i-1].enabled)
2617 			mask_value |= 0x1;
2618 		else
2619 			mask_value &= 0xFFFFFFFE;
2620 	}
2621 
2622 	return mask_value;
2623 }
2624 
2625 static void ci_populate_smc_link_level(struct radeon_device *rdev,
2626 				       SMU7_Discrete_DpmTable *table)
2627 {
2628 	struct ci_power_info *pi = ci_get_pi(rdev);
2629 	struct ci_dpm_table *dpm_table = &pi->dpm_table;
2630 	u32 i;
2631 
2632 	for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2633 		table->LinkLevel[i].PcieGenSpeed =
2634 			(u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2635 		table->LinkLevel[i].PcieLaneCount =
2636 			r600_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2637 		table->LinkLevel[i].EnabledForActivity = 1;
2638 		table->LinkLevel[i].DownT = cpu_to_be32(5);
2639 		table->LinkLevel[i].UpT = cpu_to_be32(30);
2640 	}
2641 
2642 	pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2643 	pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2644 		ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2645 }
2646 
2647 static int ci_populate_smc_uvd_level(struct radeon_device *rdev,
2648 				     SMU7_Discrete_DpmTable *table)
2649 {
2650 	u32 count;
2651 	struct atom_clock_dividers dividers;
2652 	int ret = -EINVAL;
2653 
2654 	table->UvdLevelCount =
2655 		rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2656 
2657 	for (count = 0; count < table->UvdLevelCount; count++) {
2658 		table->UvdLevel[count].VclkFrequency =
2659 			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2660 		table->UvdLevel[count].DclkFrequency =
2661 			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2662 		table->UvdLevel[count].MinVddc =
2663 			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2664 		table->UvdLevel[count].MinVddcPhases = 1;
2665 
2666 		ret = radeon_atom_get_clock_dividers(rdev,
2667 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2668 						     table->UvdLevel[count].VclkFrequency, false, &dividers);
2669 		if (ret)
2670 			return ret;
2671 
2672 		table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2673 
2674 		ret = radeon_atom_get_clock_dividers(rdev,
2675 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2676 						     table->UvdLevel[count].DclkFrequency, false, &dividers);
2677 		if (ret)
2678 			return ret;
2679 
2680 		table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2681 
2682 		table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2683 		table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2684 		table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2685 	}
2686 
2687 	return ret;
2688 }
2689 
2690 static int ci_populate_smc_vce_level(struct radeon_device *rdev,
2691 				     SMU7_Discrete_DpmTable *table)
2692 {
2693 	u32 count;
2694 	struct atom_clock_dividers dividers;
2695 	int ret = -EINVAL;
2696 
2697 	table->VceLevelCount =
2698 		rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2699 
2700 	for (count = 0; count < table->VceLevelCount; count++) {
2701 		table->VceLevel[count].Frequency =
2702 			rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2703 		table->VceLevel[count].MinVoltage =
2704 			(u16)rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2705 		table->VceLevel[count].MinPhases = 1;
2706 
2707 		ret = radeon_atom_get_clock_dividers(rdev,
2708 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2709 						     table->VceLevel[count].Frequency, false, &dividers);
2710 		if (ret)
2711 			return ret;
2712 
2713 		table->VceLevel[count].Divider = (u8)dividers.post_divider;
2714 
2715 		table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2716 		table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2717 	}
2718 
2719 	return ret;
2720 
2721 }
2722 
2723 static int ci_populate_smc_acp_level(struct radeon_device *rdev,
2724 				     SMU7_Discrete_DpmTable *table)
2725 {
2726 	u32 count;
2727 	struct atom_clock_dividers dividers;
2728 	int ret = -EINVAL;
2729 
2730 	table->AcpLevelCount = (u8)
2731 		(rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2732 
2733 	for (count = 0; count < table->AcpLevelCount; count++) {
2734 		table->AcpLevel[count].Frequency =
2735 			rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2736 		table->AcpLevel[count].MinVoltage =
2737 			rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2738 		table->AcpLevel[count].MinPhases = 1;
2739 
2740 		ret = radeon_atom_get_clock_dividers(rdev,
2741 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2742 						     table->AcpLevel[count].Frequency, false, &dividers);
2743 		if (ret)
2744 			return ret;
2745 
2746 		table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2747 
2748 		table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2749 		table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2750 	}
2751 
2752 	return ret;
2753 }
2754 
2755 static int ci_populate_smc_samu_level(struct radeon_device *rdev,
2756 				      SMU7_Discrete_DpmTable *table)
2757 {
2758 	u32 count;
2759 	struct atom_clock_dividers dividers;
2760 	int ret = -EINVAL;
2761 
2762 	table->SamuLevelCount =
2763 		rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2764 
2765 	for (count = 0; count < table->SamuLevelCount; count++) {
2766 		table->SamuLevel[count].Frequency =
2767 			rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2768 		table->SamuLevel[count].MinVoltage =
2769 			rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2770 		table->SamuLevel[count].MinPhases = 1;
2771 
2772 		ret = radeon_atom_get_clock_dividers(rdev,
2773 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2774 						     table->SamuLevel[count].Frequency, false, &dividers);
2775 		if (ret)
2776 			return ret;
2777 
2778 		table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2779 
2780 		table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2781 		table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2782 	}
2783 
2784 	return ret;
2785 }
2786 
2787 static int ci_calculate_mclk_params(struct radeon_device *rdev,
2788 				    u32 memory_clock,
2789 				    SMU7_Discrete_MemoryLevel *mclk,
2790 				    bool strobe_mode,
2791 				    bool dll_state_on)
2792 {
2793 	struct ci_power_info *pi = ci_get_pi(rdev);
2794 	u32  dll_cntl = pi->clock_registers.dll_cntl;
2795 	u32  mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2796 	u32  mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2797 	u32  mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2798 	u32  mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2799 	u32  mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2800 	u32  mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2801 	u32  mpll_ss1 = pi->clock_registers.mpll_ss1;
2802 	u32  mpll_ss2 = pi->clock_registers.mpll_ss2;
2803 	struct atom_mpll_param mpll_param;
2804 	int ret;
2805 
2806 	ret = radeon_atom_get_memory_pll_dividers(rdev, memory_clock, strobe_mode, &mpll_param);
2807 	if (ret)
2808 		return ret;
2809 
2810 	mpll_func_cntl &= ~BWCTRL_MASK;
2811 	mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
2812 
2813 	mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
2814 	mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
2815 		CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
2816 
2817 	mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
2818 	mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
2819 
2820 	if (pi->mem_gddr5) {
2821 		mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
2822 		mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
2823 			YCLK_POST_DIV(mpll_param.post_div);
2824 	}
2825 
2826 	if (pi->caps_mclk_ss_support) {
2827 		struct radeon_atom_ss ss;
2828 		u32 freq_nom;
2829 		u32 tmp;
2830 		u32 reference_clock = rdev->clock.mpll.reference_freq;
2831 
2832 		if (mpll_param.qdr == 1)
2833 			freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
2834 		else
2835 			freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
2836 
2837 		tmp = (freq_nom / reference_clock);
2838 		tmp = tmp * tmp;
2839 		if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2840 						     ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2841 			u32 clks = reference_clock * 5 / ss.rate;
2842 			u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2843 
2844 			mpll_ss1 &= ~CLKV_MASK;
2845 			mpll_ss1 |= CLKV(clkv);
2846 
2847 			mpll_ss2 &= ~CLKS_MASK;
2848 			mpll_ss2 |= CLKS(clks);
2849 		}
2850 	}
2851 
2852 	mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
2853 	mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
2854 
2855 	if (dll_state_on)
2856 		mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
2857 	else
2858 		mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2859 
2860 	mclk->MclkFrequency = memory_clock;
2861 	mclk->MpllFuncCntl = mpll_func_cntl;
2862 	mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
2863 	mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
2864 	mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
2865 	mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
2866 	mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
2867 	mclk->DllCntl = dll_cntl;
2868 	mclk->MpllSs1 = mpll_ss1;
2869 	mclk->MpllSs2 = mpll_ss2;
2870 
2871 	return 0;
2872 }
2873 
2874 static int ci_populate_single_memory_level(struct radeon_device *rdev,
2875 					   u32 memory_clock,
2876 					   SMU7_Discrete_MemoryLevel *memory_level)
2877 {
2878 	struct ci_power_info *pi = ci_get_pi(rdev);
2879 	int ret;
2880 	bool dll_state_on;
2881 
2882 	if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
2883 		ret = ci_get_dependency_volt_by_clk(rdev,
2884 						    &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2885 						    memory_clock, &memory_level->MinVddc);
2886 		if (ret)
2887 			return ret;
2888 	}
2889 
2890 	if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
2891 		ret = ci_get_dependency_volt_by_clk(rdev,
2892 						    &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2893 						    memory_clock, &memory_level->MinVddci);
2894 		if (ret)
2895 			return ret;
2896 	}
2897 
2898 	if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
2899 		ret = ci_get_dependency_volt_by_clk(rdev,
2900 						    &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2901 						    memory_clock, &memory_level->MinMvdd);
2902 		if (ret)
2903 			return ret;
2904 	}
2905 
2906 	memory_level->MinVddcPhases = 1;
2907 
2908 	if (pi->vddc_phase_shed_control)
2909 		ci_populate_phase_value_based_on_mclk(rdev,
2910 						      &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2911 						      memory_clock,
2912 						      &memory_level->MinVddcPhases);
2913 
2914 	memory_level->EnabledForThrottle = 1;
2915 	memory_level->UpH = 0;
2916 	memory_level->DownH = 100;
2917 	memory_level->VoltageDownH = 0;
2918 	memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
2919 
2920 	memory_level->StutterEnable = false;
2921 	memory_level->StrobeEnable = false;
2922 	memory_level->EdcReadEnable = false;
2923 	memory_level->EdcWriteEnable = false;
2924 	memory_level->RttEnable = false;
2925 
2926 	memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2927 
2928 	if (pi->mclk_stutter_mode_threshold &&
2929 	    (memory_clock <= pi->mclk_stutter_mode_threshold) &&
2930 	    (pi->uvd_enabled == false) &&
2931 	    (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
2932 	    (rdev->pm.dpm.new_active_crtc_count <= 2))
2933 		memory_level->StutterEnable = true;
2934 
2935 	if (pi->mclk_strobe_mode_threshold &&
2936 	    (memory_clock <= pi->mclk_strobe_mode_threshold))
2937 		memory_level->StrobeEnable = 1;
2938 
2939 	if (pi->mem_gddr5) {
2940 		memory_level->StrobeRatio =
2941 			si_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
2942 		if (pi->mclk_edc_enable_threshold &&
2943 		    (memory_clock > pi->mclk_edc_enable_threshold))
2944 			memory_level->EdcReadEnable = true;
2945 
2946 		if (pi->mclk_edc_wr_enable_threshold &&
2947 		    (memory_clock > pi->mclk_edc_wr_enable_threshold))
2948 			memory_level->EdcWriteEnable = true;
2949 
2950 		if (memory_level->StrobeEnable) {
2951 			if (si_get_mclk_frequency_ratio(memory_clock, true) >=
2952 			    ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
2953 				dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2954 			else
2955 				dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
2956 		} else {
2957 			dll_state_on = pi->dll_default_on;
2958 		}
2959 	} else {
2960 		memory_level->StrobeRatio = si_get_ddr3_mclk_frequency_ratio(memory_clock);
2961 		dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2962 	}
2963 
2964 	ret = ci_calculate_mclk_params(rdev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
2965 	if (ret)
2966 		return ret;
2967 
2968 	memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
2969 	memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
2970 	memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
2971 	memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
2972 
2973 	memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
2974 	memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
2975 	memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
2976 	memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
2977 	memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
2978 	memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
2979 	memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
2980 	memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
2981 	memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
2982 	memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
2983 	memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
2984 
2985 	return 0;
2986 }
2987 
2988 static int ci_populate_smc_acpi_level(struct radeon_device *rdev,
2989 				      SMU7_Discrete_DpmTable *table)
2990 {
2991 	struct ci_power_info *pi = ci_get_pi(rdev);
2992 	struct atom_clock_dividers dividers;
2993 	SMU7_Discrete_VoltageLevel voltage_level;
2994 	u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
2995 	u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
2996 	u32 dll_cntl = pi->clock_registers.dll_cntl;
2997 	u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2998 	int ret;
2999 
3000 	table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
3001 
3002 	if (pi->acpi_vddc)
3003 		table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
3004 	else
3005 		table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
3006 
3007 	table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
3008 
3009 	table->ACPILevel.SclkFrequency = rdev->clock.spll.reference_freq;
3010 
3011 	ret = radeon_atom_get_clock_dividers(rdev,
3012 					     COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3013 					     table->ACPILevel.SclkFrequency, false, &dividers);
3014 	if (ret)
3015 		return ret;
3016 
3017 	table->ACPILevel.SclkDid = (u8)dividers.post_divider;
3018 	table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3019 	table->ACPILevel.DeepSleepDivId = 0;
3020 
3021 	spll_func_cntl &= ~SPLL_PWRON;
3022 	spll_func_cntl |= SPLL_RESET;
3023 
3024 	spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
3025 	spll_func_cntl_2 |= SCLK_MUX_SEL(4);
3026 
3027 	table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
3028 	table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
3029 	table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
3030 	table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
3031 	table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
3032 	table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3033 	table->ACPILevel.CcPwrDynRm = 0;
3034 	table->ACPILevel.CcPwrDynRm1 = 0;
3035 
3036 	table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
3037 	table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
3038 	table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
3039 	table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
3040 	table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
3041 	table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
3042 	table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
3043 	table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
3044 	table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
3045 	table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
3046 	table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
3047 
3048 	table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
3049 	table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
3050 
3051 	if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
3052 		if (pi->acpi_vddci)
3053 			table->MemoryACPILevel.MinVddci =
3054 				cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
3055 		else
3056 			table->MemoryACPILevel.MinVddci =
3057 				cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
3058 	}
3059 
3060 	if (ci_populate_mvdd_value(rdev, 0, &voltage_level))
3061 		table->MemoryACPILevel.MinMvdd = 0;
3062 	else
3063 		table->MemoryACPILevel.MinMvdd =
3064 			cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
3065 
3066 	mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
3067 	mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
3068 
3069 	dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
3070 
3071 	table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
3072 	table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
3073 	table->MemoryACPILevel.MpllAdFuncCntl =
3074 		cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
3075 	table->MemoryACPILevel.MpllDqFuncCntl =
3076 		cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
3077 	table->MemoryACPILevel.MpllFuncCntl =
3078 		cpu_to_be32(pi->clock_registers.mpll_func_cntl);
3079 	table->MemoryACPILevel.MpllFuncCntl_1 =
3080 		cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
3081 	table->MemoryACPILevel.MpllFuncCntl_2 =
3082 		cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
3083 	table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
3084 	table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
3085 
3086 	table->MemoryACPILevel.EnabledForThrottle = 0;
3087 	table->MemoryACPILevel.EnabledForActivity = 0;
3088 	table->MemoryACPILevel.UpH = 0;
3089 	table->MemoryACPILevel.DownH = 100;
3090 	table->MemoryACPILevel.VoltageDownH = 0;
3091 	table->MemoryACPILevel.ActivityLevel =
3092 		cpu_to_be16((u16)pi->mclk_activity_target);
3093 
3094 	table->MemoryACPILevel.StutterEnable = false;
3095 	table->MemoryACPILevel.StrobeEnable = false;
3096 	table->MemoryACPILevel.EdcReadEnable = false;
3097 	table->MemoryACPILevel.EdcWriteEnable = false;
3098 	table->MemoryACPILevel.RttEnable = false;
3099 
3100 	return 0;
3101 }
3102 
3103 
3104 static int ci_enable_ulv(struct radeon_device *rdev, bool enable)
3105 {
3106 	struct ci_power_info *pi = ci_get_pi(rdev);
3107 	struct ci_ulv_parm *ulv = &pi->ulv;
3108 
3109 	if (ulv->supported) {
3110 		if (enable)
3111 			return (ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
3112 				0 : -EINVAL;
3113 		else
3114 			return (ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
3115 				0 : -EINVAL;
3116 	}
3117 
3118 	return 0;
3119 }
3120 
3121 static int ci_populate_ulv_level(struct radeon_device *rdev,
3122 				 SMU7_Discrete_Ulv *state)
3123 {
3124 	struct ci_power_info *pi = ci_get_pi(rdev);
3125 	u16 ulv_voltage = rdev->pm.dpm.backbias_response_time;
3126 
3127 	state->CcPwrDynRm = 0;
3128 	state->CcPwrDynRm1 = 0;
3129 
3130 	if (ulv_voltage == 0) {
3131 		pi->ulv.supported = false;
3132 		return 0;
3133 	}
3134 
3135 	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
3136 		if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3137 			state->VddcOffset = 0;
3138 		else
3139 			state->VddcOffset =
3140 				rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
3141 	} else {
3142 		if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3143 			state->VddcOffsetVid = 0;
3144 		else
3145 			state->VddcOffsetVid = (u8)
3146 				((rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
3147 				 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
3148 	}
3149 	state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
3150 
3151 	state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
3152 	state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
3153 	state->VddcOffset = cpu_to_be16(state->VddcOffset);
3154 
3155 	return 0;
3156 }
3157 
3158 static int ci_calculate_sclk_params(struct radeon_device *rdev,
3159 				    u32 engine_clock,
3160 				    SMU7_Discrete_GraphicsLevel *sclk)
3161 {
3162 	struct ci_power_info *pi = ci_get_pi(rdev);
3163 	struct atom_clock_dividers dividers;
3164 	u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
3165 	u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
3166 	u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
3167 	u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3168 	u32 reference_clock = rdev->clock.spll.reference_freq;
3169 	u32 reference_divider;
3170 	u32 fbdiv;
3171 	int ret;
3172 
3173 	ret = radeon_atom_get_clock_dividers(rdev,
3174 					     COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3175 					     engine_clock, false, &dividers);
3176 	if (ret)
3177 		return ret;
3178 
3179 	reference_divider = 1 + dividers.ref_div;
3180 	fbdiv = dividers.fb_div & 0x3FFFFFF;
3181 
3182 	spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
3183 	spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
3184 	spll_func_cntl_3 |= SPLL_DITHEN;
3185 
3186 	if (pi->caps_sclk_ss_support) {
3187 		struct radeon_atom_ss ss;
3188 		u32 vco_freq = engine_clock * dividers.post_div;
3189 
3190 		if (radeon_atombios_get_asic_ss_info(rdev, &ss,
3191 						     ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
3192 			u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
3193 			u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
3194 
3195 			cg_spll_spread_spectrum &= ~CLK_S_MASK;
3196 			cg_spll_spread_spectrum |= CLK_S(clk_s);
3197 			cg_spll_spread_spectrum |= SSEN;
3198 
3199 			cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
3200 			cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
3201 		}
3202 	}
3203 
3204 	sclk->SclkFrequency = engine_clock;
3205 	sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
3206 	sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
3207 	sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
3208 	sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
3209 	sclk->SclkDid = (u8)dividers.post_divider;
3210 
3211 	return 0;
3212 }
3213 
3214 static int ci_populate_single_graphic_level(struct radeon_device *rdev,
3215 					    u32 engine_clock,
3216 					    u16 sclk_activity_level_t,
3217 					    SMU7_Discrete_GraphicsLevel *graphic_level)
3218 {
3219 	struct ci_power_info *pi = ci_get_pi(rdev);
3220 	int ret;
3221 
3222 	ret = ci_calculate_sclk_params(rdev, engine_clock, graphic_level);
3223 	if (ret)
3224 		return ret;
3225 
3226 	ret = ci_get_dependency_volt_by_clk(rdev,
3227 					    &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
3228 					    engine_clock, &graphic_level->MinVddc);
3229 	if (ret)
3230 		return ret;
3231 
3232 	graphic_level->SclkFrequency = engine_clock;
3233 
3234 	graphic_level->Flags =  0;
3235 	graphic_level->MinVddcPhases = 1;
3236 
3237 	if (pi->vddc_phase_shed_control)
3238 		ci_populate_phase_value_based_on_sclk(rdev,
3239 						      &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
3240 						      engine_clock,
3241 						      &graphic_level->MinVddcPhases);
3242 
3243 	graphic_level->ActivityLevel = sclk_activity_level_t;
3244 
3245 	graphic_level->CcPwrDynRm = 0;
3246 	graphic_level->CcPwrDynRm1 = 0;
3247 	graphic_level->EnabledForThrottle = 1;
3248 	graphic_level->UpH = 0;
3249 	graphic_level->DownH = 0;
3250 	graphic_level->VoltageDownH = 0;
3251 	graphic_level->PowerThrottle = 0;
3252 
3253 	if (pi->caps_sclk_ds)
3254 		graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(rdev,
3255 										   engine_clock,
3256 										   CISLAND_MINIMUM_ENGINE_CLOCK);
3257 
3258 	graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3259 
3260 	graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
3261 	graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
3262 	graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
3263 	graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
3264 	graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
3265 	graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
3266 	graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
3267 	graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
3268 	graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
3269 	graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
3270 	graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
3271 
3272 	return 0;
3273 }
3274 
3275 static int ci_populate_all_graphic_levels(struct radeon_device *rdev)
3276 {
3277 	struct ci_power_info *pi = ci_get_pi(rdev);
3278 	struct ci_dpm_table *dpm_table = &pi->dpm_table;
3279 	u32 level_array_address = pi->dpm_table_start +
3280 		offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
3281 	u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
3282 		SMU7_MAX_LEVELS_GRAPHICS;
3283 	SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
3284 	u32 i, ret;
3285 
3286 	memset(levels, 0, level_array_size);
3287 
3288 	for (i = 0; i < dpm_table->sclk_table.count; i++) {
3289 		ret = ci_populate_single_graphic_level(rdev,
3290 						       dpm_table->sclk_table.dpm_levels[i].value,
3291 						       (u16)pi->activity_target[i],
3292 						       &pi->smc_state_table.GraphicsLevel[i]);
3293 		if (ret)
3294 			return ret;
3295 		if (i > 1)
3296 			pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
3297 		if (i == (dpm_table->sclk_table.count - 1))
3298 			pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
3299 				PPSMC_DISPLAY_WATERMARK_HIGH;
3300 	}
3301 	pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
3302 
3303 	pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
3304 	pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3305 		ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
3306 
3307 	ret = ci_copy_bytes_to_smc(rdev, level_array_address,
3308 				   (u8 *)levels, level_array_size,
3309 				   pi->sram_end);
3310 	if (ret)
3311 		return ret;
3312 
3313 	return 0;
3314 }
3315 
3316 static int ci_populate_ulv_state(struct radeon_device *rdev,
3317 				 SMU7_Discrete_Ulv *ulv_level)
3318 {
3319 	return ci_populate_ulv_level(rdev, ulv_level);
3320 }
3321 
3322 static int ci_populate_all_memory_levels(struct radeon_device *rdev)
3323 {
3324 	struct ci_power_info *pi = ci_get_pi(rdev);
3325 	struct ci_dpm_table *dpm_table = &pi->dpm_table;
3326 	u32 level_array_address = pi->dpm_table_start +
3327 		offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
3328 	u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
3329 		SMU7_MAX_LEVELS_MEMORY;
3330 	SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
3331 	u32 i, ret;
3332 
3333 	memset(levels, 0, level_array_size);
3334 
3335 	for (i = 0; i < dpm_table->mclk_table.count; i++) {
3336 		if (dpm_table->mclk_table.dpm_levels[i].value == 0)
3337 			return -EINVAL;
3338 		ret = ci_populate_single_memory_level(rdev,
3339 						      dpm_table->mclk_table.dpm_levels[i].value,
3340 						      &pi->smc_state_table.MemoryLevel[i]);
3341 		if (ret)
3342 			return ret;
3343 	}
3344 
3345 	pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
3346 
3347 	if ((dpm_table->mclk_table.count >= 2) &&
3348 	    ((rdev->pdev->device == 0x67B0) || (rdev->pdev->device == 0x67B1))) {
3349 		pi->smc_state_table.MemoryLevel[1].MinVddc =
3350 			pi->smc_state_table.MemoryLevel[0].MinVddc;
3351 		pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
3352 			pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
3353 	}
3354 
3355 	pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
3356 
3357 	pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
3358 	pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3359 		ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
3360 
3361 	pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
3362 		PPSMC_DISPLAY_WATERMARK_HIGH;
3363 
3364 	ret = ci_copy_bytes_to_smc(rdev, level_array_address,
3365 				   (u8 *)levels, level_array_size,
3366 				   pi->sram_end);
3367 	if (ret)
3368 		return ret;
3369 
3370 	return 0;
3371 }
3372 
3373 static void ci_reset_single_dpm_table(struct radeon_device *rdev,
3374 				      struct ci_single_dpm_table* dpm_table,
3375 				      u32 count)
3376 {
3377 	u32 i;
3378 
3379 	dpm_table->count = count;
3380 	for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
3381 		dpm_table->dpm_levels[i].enabled = false;
3382 }
3383 
3384 static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
3385 				      u32 index, u32 pcie_gen, u32 pcie_lanes)
3386 {
3387 	dpm_table->dpm_levels[index].value = pcie_gen;
3388 	dpm_table->dpm_levels[index].param1 = pcie_lanes;
3389 	dpm_table->dpm_levels[index].enabled = true;
3390 }
3391 
3392 static int ci_setup_default_pcie_tables(struct radeon_device *rdev)
3393 {
3394 	struct ci_power_info *pi = ci_get_pi(rdev);
3395 
3396 	if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
3397 		return -EINVAL;
3398 
3399 	if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
3400 		pi->pcie_gen_powersaving = pi->pcie_gen_performance;
3401 		pi->pcie_lane_powersaving = pi->pcie_lane_performance;
3402 	} else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
3403 		pi->pcie_gen_performance = pi->pcie_gen_powersaving;
3404 		pi->pcie_lane_performance = pi->pcie_lane_powersaving;
3405 	}
3406 
3407 	ci_reset_single_dpm_table(rdev,
3408 				  &pi->dpm_table.pcie_speed_table,
3409 				  SMU7_MAX_LEVELS_LINK);
3410 
3411 	if (rdev->family == CHIP_BONAIRE)
3412 		ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3413 					  pi->pcie_gen_powersaving.min,
3414 					  pi->pcie_lane_powersaving.max);
3415 	else
3416 		ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3417 					  pi->pcie_gen_powersaving.min,
3418 					  pi->pcie_lane_powersaving.min);
3419 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
3420 				  pi->pcie_gen_performance.min,
3421 				  pi->pcie_lane_performance.min);
3422 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
3423 				  pi->pcie_gen_powersaving.min,
3424 				  pi->pcie_lane_powersaving.max);
3425 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
3426 				  pi->pcie_gen_performance.min,
3427 				  pi->pcie_lane_performance.max);
3428 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
3429 				  pi->pcie_gen_powersaving.max,
3430 				  pi->pcie_lane_powersaving.max);
3431 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
3432 				  pi->pcie_gen_performance.max,
3433 				  pi->pcie_lane_performance.max);
3434 
3435 	pi->dpm_table.pcie_speed_table.count = 6;
3436 
3437 	return 0;
3438 }
3439 
3440 static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
3441 {
3442 	struct ci_power_info *pi = ci_get_pi(rdev);
3443 	struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
3444 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3445 	struct radeon_clock_voltage_dependency_table *allowed_mclk_table =
3446 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
3447 	struct radeon_cac_leakage_table *std_voltage_table =
3448 		&rdev->pm.dpm.dyn_state.cac_leakage_table;
3449 	u32 i;
3450 
3451 	if (allowed_sclk_vddc_table == NULL)
3452 		return -EINVAL;
3453 	if (allowed_sclk_vddc_table->count < 1)
3454 		return -EINVAL;
3455 	if (allowed_mclk_table == NULL)
3456 		return -EINVAL;
3457 	if (allowed_mclk_table->count < 1)
3458 		return -EINVAL;
3459 
3460 	memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
3461 
3462 	ci_reset_single_dpm_table(rdev,
3463 				  &pi->dpm_table.sclk_table,
3464 				  SMU7_MAX_LEVELS_GRAPHICS);
3465 	ci_reset_single_dpm_table(rdev,
3466 				  &pi->dpm_table.mclk_table,
3467 				  SMU7_MAX_LEVELS_MEMORY);
3468 	ci_reset_single_dpm_table(rdev,
3469 				  &pi->dpm_table.vddc_table,
3470 				  SMU7_MAX_LEVELS_VDDC);
3471 	ci_reset_single_dpm_table(rdev,
3472 				  &pi->dpm_table.vddci_table,
3473 				  SMU7_MAX_LEVELS_VDDCI);
3474 	ci_reset_single_dpm_table(rdev,
3475 				  &pi->dpm_table.mvdd_table,
3476 				  SMU7_MAX_LEVELS_MVDD);
3477 
3478 	pi->dpm_table.sclk_table.count = 0;
3479 	for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3480 		if ((i == 0) ||
3481 		    (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
3482 		     allowed_sclk_vddc_table->entries[i].clk)) {
3483 			pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
3484 				allowed_sclk_vddc_table->entries[i].clk;
3485 			pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
3486 				(i == 0) ? true : false;
3487 			pi->dpm_table.sclk_table.count++;
3488 		}
3489 	}
3490 
3491 	pi->dpm_table.mclk_table.count = 0;
3492 	for (i = 0; i < allowed_mclk_table->count; i++) {
3493 		if ((i == 0) ||
3494 		    (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
3495 		     allowed_mclk_table->entries[i].clk)) {
3496 			pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
3497 				allowed_mclk_table->entries[i].clk;
3498 			pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
3499 				(i == 0) ? true : false;
3500 			pi->dpm_table.mclk_table.count++;
3501 		}
3502 	}
3503 
3504 	for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3505 		pi->dpm_table.vddc_table.dpm_levels[i].value =
3506 			allowed_sclk_vddc_table->entries[i].v;
3507 		pi->dpm_table.vddc_table.dpm_levels[i].param1 =
3508 			std_voltage_table->entries[i].leakage;
3509 		pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
3510 	}
3511 	pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
3512 
3513 	allowed_mclk_table = &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
3514 	if (allowed_mclk_table) {
3515 		for (i = 0; i < allowed_mclk_table->count; i++) {
3516 			pi->dpm_table.vddci_table.dpm_levels[i].value =
3517 				allowed_mclk_table->entries[i].v;
3518 			pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
3519 		}
3520 		pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
3521 	}
3522 
3523 	allowed_mclk_table = &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
3524 	if (allowed_mclk_table) {
3525 		for (i = 0; i < allowed_mclk_table->count; i++) {
3526 			pi->dpm_table.mvdd_table.dpm_levels[i].value =
3527 				allowed_mclk_table->entries[i].v;
3528 			pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
3529 		}
3530 		pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
3531 	}
3532 
3533 	ci_setup_default_pcie_tables(rdev);
3534 
3535 	return 0;
3536 }
3537 
3538 static int ci_find_boot_level(struct ci_single_dpm_table *table,
3539 			      u32 value, u32 *boot_level)
3540 {
3541 	u32 i;
3542 	int ret = -EINVAL;
3543 
3544 	for(i = 0; i < table->count; i++) {
3545 		if (value == table->dpm_levels[i].value) {
3546 			*boot_level = i;
3547 			ret = 0;
3548 		}
3549 	}
3550 
3551 	return ret;
3552 }
3553 
3554 static int ci_init_smc_table(struct radeon_device *rdev)
3555 {
3556 	struct ci_power_info *pi = ci_get_pi(rdev);
3557 	struct ci_ulv_parm *ulv = &pi->ulv;
3558 	struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps;
3559 	SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
3560 	int ret;
3561 
3562 	ret = ci_setup_default_dpm_tables(rdev);
3563 	if (ret)
3564 		return ret;
3565 
3566 	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
3567 		ci_populate_smc_voltage_tables(rdev, table);
3568 
3569 	ci_init_fps_limits(rdev);
3570 
3571 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
3572 		table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
3573 
3574 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3575 		table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3576 
3577 	if (pi->mem_gddr5)
3578 		table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3579 
3580 	if (ulv->supported) {
3581 		ret = ci_populate_ulv_state(rdev, &pi->smc_state_table.Ulv);
3582 		if (ret)
3583 			return ret;
3584 		WREG32_SMC(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3585 	}
3586 
3587 	ret = ci_populate_all_graphic_levels(rdev);
3588 	if (ret)
3589 		return ret;
3590 
3591 	ret = ci_populate_all_memory_levels(rdev);
3592 	if (ret)
3593 		return ret;
3594 
3595 	ci_populate_smc_link_level(rdev, table);
3596 
3597 	ret = ci_populate_smc_acpi_level(rdev, table);
3598 	if (ret)
3599 		return ret;
3600 
3601 	ret = ci_populate_smc_vce_level(rdev, table);
3602 	if (ret)
3603 		return ret;
3604 
3605 	ret = ci_populate_smc_acp_level(rdev, table);
3606 	if (ret)
3607 		return ret;
3608 
3609 	ret = ci_populate_smc_samu_level(rdev, table);
3610 	if (ret)
3611 		return ret;
3612 
3613 	ret = ci_do_program_memory_timing_parameters(rdev);
3614 	if (ret)
3615 		return ret;
3616 
3617 	ret = ci_populate_smc_uvd_level(rdev, table);
3618 	if (ret)
3619 		return ret;
3620 
3621 	table->UvdBootLevel  = 0;
3622 	table->VceBootLevel  = 0;
3623 	table->AcpBootLevel  = 0;
3624 	table->SamuBootLevel  = 0;
3625 	table->GraphicsBootLevel  = 0;
3626 	table->MemoryBootLevel  = 0;
3627 
3628 	ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3629 				 pi->vbios_boot_state.sclk_bootup_value,
3630 				 (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3631 
3632 	ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3633 				 pi->vbios_boot_state.mclk_bootup_value,
3634 				 (u32 *)&pi->smc_state_table.MemoryBootLevel);
3635 
3636 	table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3637 	table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3638 	table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3639 
3640 	ci_populate_smc_initial_state(rdev, radeon_boot_state);
3641 
3642 	ret = ci_populate_bapm_parameters_in_dpm_table(rdev);
3643 	if (ret)
3644 		return ret;
3645 
3646 	table->UVDInterval = 1;
3647 	table->VCEInterval = 1;
3648 	table->ACPInterval = 1;
3649 	table->SAMUInterval = 1;
3650 	table->GraphicsVoltageChangeEnable = 1;
3651 	table->GraphicsThermThrottleEnable = 1;
3652 	table->GraphicsInterval = 1;
3653 	table->VoltageInterval = 1;
3654 	table->ThermalInterval = 1;
3655 	table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3656 					     CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3657 	table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3658 					    CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3659 	table->MemoryVoltageChangeEnable = 1;
3660 	table->MemoryInterval = 1;
3661 	table->VoltageResponseTime = 0;
3662 	table->VddcVddciDelta = 4000;
3663 	table->PhaseResponseTime = 0;
3664 	table->MemoryThermThrottleEnable = 1;
3665 	table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
3666 	table->PCIeGenInterval = 1;
3667 	if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3668 		table->SVI2Enable  = 1;
3669 	else
3670 		table->SVI2Enable  = 0;
3671 
3672 	table->ThermGpio = 17;
3673 	table->SclkStepSize = 0x4000;
3674 
3675 	table->SystemFlags = cpu_to_be32(table->SystemFlags);
3676 	table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3677 	table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3678 	table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3679 	table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3680 	table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3681 	table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3682 	table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3683 	table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3684 	table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3685 	table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3686 	table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3687 	table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3688 	table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3689 
3690 	ret = ci_copy_bytes_to_smc(rdev,
3691 				   pi->dpm_table_start +
3692 				   offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3693 				   (u8 *)&table->SystemFlags,
3694 				   sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3695 				   pi->sram_end);
3696 	if (ret)
3697 		return ret;
3698 
3699 	return 0;
3700 }
3701 
3702 static void ci_trim_single_dpm_states(struct radeon_device *rdev,
3703 				      struct ci_single_dpm_table *dpm_table,
3704 				      u32 low_limit, u32 high_limit)
3705 {
3706 	u32 i;
3707 
3708 	for (i = 0; i < dpm_table->count; i++) {
3709 		if ((dpm_table->dpm_levels[i].value < low_limit) ||
3710 		    (dpm_table->dpm_levels[i].value > high_limit))
3711 			dpm_table->dpm_levels[i].enabled = false;
3712 		else
3713 			dpm_table->dpm_levels[i].enabled = true;
3714 	}
3715 }
3716 
3717 static void ci_trim_pcie_dpm_states(struct radeon_device *rdev,
3718 				    u32 speed_low, u32 lanes_low,
3719 				    u32 speed_high, u32 lanes_high)
3720 {
3721 	struct ci_power_info *pi = ci_get_pi(rdev);
3722 	struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3723 	u32 i, j;
3724 
3725 	for (i = 0; i < pcie_table->count; i++) {
3726 		if ((pcie_table->dpm_levels[i].value < speed_low) ||
3727 		    (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3728 		    (pcie_table->dpm_levels[i].value > speed_high) ||
3729 		    (pcie_table->dpm_levels[i].param1 > lanes_high))
3730 			pcie_table->dpm_levels[i].enabled = false;
3731 		else
3732 			pcie_table->dpm_levels[i].enabled = true;
3733 	}
3734 
3735 	for (i = 0; i < pcie_table->count; i++) {
3736 		if (pcie_table->dpm_levels[i].enabled) {
3737 			for (j = i + 1; j < pcie_table->count; j++) {
3738 				if (pcie_table->dpm_levels[j].enabled) {
3739 					if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3740 					    (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3741 						pcie_table->dpm_levels[j].enabled = false;
3742 				}
3743 			}
3744 		}
3745 	}
3746 }
3747 
3748 static int ci_trim_dpm_states(struct radeon_device *rdev,
3749 			      struct radeon_ps *radeon_state)
3750 {
3751 	struct ci_ps *state = ci_get_ps(radeon_state);
3752 	struct ci_power_info *pi = ci_get_pi(rdev);
3753 	u32 high_limit_count;
3754 
3755 	if (state->performance_level_count < 1)
3756 		return -EINVAL;
3757 
3758 	if (state->performance_level_count == 1)
3759 		high_limit_count = 0;
3760 	else
3761 		high_limit_count = 1;
3762 
3763 	ci_trim_single_dpm_states(rdev,
3764 				  &pi->dpm_table.sclk_table,
3765 				  state->performance_levels[0].sclk,
3766 				  state->performance_levels[high_limit_count].sclk);
3767 
3768 	ci_trim_single_dpm_states(rdev,
3769 				  &pi->dpm_table.mclk_table,
3770 				  state->performance_levels[0].mclk,
3771 				  state->performance_levels[high_limit_count].mclk);
3772 
3773 	ci_trim_pcie_dpm_states(rdev,
3774 				state->performance_levels[0].pcie_gen,
3775 				state->performance_levels[0].pcie_lane,
3776 				state->performance_levels[high_limit_count].pcie_gen,
3777 				state->performance_levels[high_limit_count].pcie_lane);
3778 
3779 	return 0;
3780 }
3781 
3782 static int ci_apply_disp_minimum_voltage_request(struct radeon_device *rdev)
3783 {
3784 	struct radeon_clock_voltage_dependency_table *disp_voltage_table =
3785 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3786 	struct radeon_clock_voltage_dependency_table *vddc_table =
3787 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3788 	u32 requested_voltage = 0;
3789 	u32 i;
3790 
3791 	if (disp_voltage_table == NULL)
3792 		return -EINVAL;
3793 	if (!disp_voltage_table->count)
3794 		return -EINVAL;
3795 
3796 	for (i = 0; i < disp_voltage_table->count; i++) {
3797 		if (rdev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3798 			requested_voltage = disp_voltage_table->entries[i].v;
3799 	}
3800 
3801 	for (i = 0; i < vddc_table->count; i++) {
3802 		if (requested_voltage <= vddc_table->entries[i].v) {
3803 			requested_voltage = vddc_table->entries[i].v;
3804 			return (ci_send_msg_to_smc_with_parameter(rdev,
3805 								  PPSMC_MSG_VddC_Request,
3806 								  requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3807 				0 : -EINVAL;
3808 		}
3809 	}
3810 
3811 	return -EINVAL;
3812 }
3813 
3814 static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
3815 {
3816 	struct ci_power_info *pi = ci_get_pi(rdev);
3817 	PPSMC_Result result;
3818 
3819 	ci_apply_disp_minimum_voltage_request(rdev);
3820 
3821 	if (!pi->sclk_dpm_key_disabled) {
3822 		if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3823 			result = ci_send_msg_to_smc_with_parameter(rdev,
3824 								   PPSMC_MSG_SCLKDPM_SetEnabledMask,
3825 								   pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3826 			if (result != PPSMC_Result_OK)
3827 				return -EINVAL;
3828 		}
3829 	}
3830 
3831 	if (!pi->mclk_dpm_key_disabled) {
3832 		if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3833 			result = ci_send_msg_to_smc_with_parameter(rdev,
3834 								   PPSMC_MSG_MCLKDPM_SetEnabledMask,
3835 								   pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3836 			if (result != PPSMC_Result_OK)
3837 				return -EINVAL;
3838 		}
3839 	}
3840 #if 0
3841 	if (!pi->pcie_dpm_key_disabled) {
3842 		if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3843 			result = ci_send_msg_to_smc_with_parameter(rdev,
3844 								   PPSMC_MSG_PCIeDPM_SetEnabledMask,
3845 								   pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3846 			if (result != PPSMC_Result_OK)
3847 				return -EINVAL;
3848 		}
3849 	}
3850 #endif
3851 	return 0;
3852 }
3853 
3854 static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
3855 						   struct radeon_ps *radeon_state)
3856 {
3857 	struct ci_power_info *pi = ci_get_pi(rdev);
3858 	struct ci_ps *state = ci_get_ps(radeon_state);
3859 	struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
3860 	u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3861 	struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
3862 	u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3863 	u32 i;
3864 
3865 	pi->need_update_smu7_dpm_table = 0;
3866 
3867 	for (i = 0; i < sclk_table->count; i++) {
3868 		if (sclk == sclk_table->dpm_levels[i].value)
3869 			break;
3870 	}
3871 
3872 	if (i >= sclk_table->count) {
3873 		pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3874 	} else {
3875 		/* XXX The current code always reprogrammed the sclk levels,
3876 		 * but we don't currently handle disp sclk requirements
3877 		 * so just skip it.
3878 		 */
3879 		if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
3880 			pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3881 	}
3882 
3883 	for (i = 0; i < mclk_table->count; i++) {
3884 		if (mclk == mclk_table->dpm_levels[i].value)
3885 			break;
3886 	}
3887 
3888 	if (i >= mclk_table->count)
3889 		pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3890 
3891 	if (rdev->pm.dpm.current_active_crtc_count !=
3892 	    rdev->pm.dpm.new_active_crtc_count)
3893 		pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3894 }
3895 
3896 static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct radeon_device *rdev,
3897 						       struct radeon_ps *radeon_state)
3898 {
3899 	struct ci_power_info *pi = ci_get_pi(rdev);
3900 	struct ci_ps *state = ci_get_ps(radeon_state);
3901 	u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3902 	u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3903 	struct ci_dpm_table *dpm_table = &pi->dpm_table;
3904 	int ret;
3905 
3906 	if (!pi->need_update_smu7_dpm_table)
3907 		return 0;
3908 
3909 	if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
3910 		dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
3911 
3912 	if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
3913 		dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
3914 
3915 	if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
3916 		ret = ci_populate_all_graphic_levels(rdev);
3917 		if (ret)
3918 			return ret;
3919 	}
3920 
3921 	if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
3922 		ret = ci_populate_all_memory_levels(rdev);
3923 		if (ret)
3924 			return ret;
3925 	}
3926 
3927 	return 0;
3928 }
3929 
3930 static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
3931 {
3932 	struct ci_power_info *pi = ci_get_pi(rdev);
3933 	const struct radeon_clock_and_voltage_limits *max_limits;
3934 	int i;
3935 
3936 	if (rdev->pm.dpm.ac_power)
3937 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3938 	else
3939 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3940 
3941 	if (enable) {
3942 		pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
3943 
3944 		for (i = rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3945 			if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3946 				pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
3947 
3948 				if (!pi->caps_uvd_dpm)
3949 					break;
3950 			}
3951 		}
3952 
3953 		ci_send_msg_to_smc_with_parameter(rdev,
3954 						  PPSMC_MSG_UVDDPM_SetEnabledMask,
3955 						  pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
3956 
3957 		if (pi->last_mclk_dpm_enable_mask & 0x1) {
3958 			pi->uvd_enabled = true;
3959 			pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
3960 			ci_send_msg_to_smc_with_parameter(rdev,
3961 							  PPSMC_MSG_MCLKDPM_SetEnabledMask,
3962 							  pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3963 		}
3964 	} else {
3965 		if (pi->last_mclk_dpm_enable_mask & 0x1) {
3966 			pi->uvd_enabled = false;
3967 			pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
3968 			ci_send_msg_to_smc_with_parameter(rdev,
3969 							  PPSMC_MSG_MCLKDPM_SetEnabledMask,
3970 							  pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3971 		}
3972 	}
3973 
3974 	return (ci_send_msg_to_smc(rdev, enable ?
3975 				   PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
3976 		0 : -EINVAL;
3977 }
3978 
3979 static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable)
3980 {
3981 	struct ci_power_info *pi = ci_get_pi(rdev);
3982 	const struct radeon_clock_and_voltage_limits *max_limits;
3983 	int i;
3984 
3985 	if (rdev->pm.dpm.ac_power)
3986 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3987 	else
3988 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3989 
3990 	if (enable) {
3991 		pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
3992 		for (i = rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3993 			if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3994 				pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
3995 
3996 				if (!pi->caps_vce_dpm)
3997 					break;
3998 			}
3999 		}
4000 
4001 		ci_send_msg_to_smc_with_parameter(rdev,
4002 						  PPSMC_MSG_VCEDPM_SetEnabledMask,
4003 						  pi->dpm_level_enable_mask.vce_dpm_enable_mask);
4004 	}
4005 
4006 	return (ci_send_msg_to_smc(rdev, enable ?
4007 				   PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
4008 		0 : -EINVAL;
4009 }
4010 
4011 #if 0
4012 static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable)
4013 {
4014 	struct ci_power_info *pi = ci_get_pi(rdev);
4015 	const struct radeon_clock_and_voltage_limits *max_limits;
4016 	int i;
4017 
4018 	if (rdev->pm.dpm.ac_power)
4019 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4020 	else
4021 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4022 
4023 	if (enable) {
4024 		pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
4025 		for (i = rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4026 			if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4027 				pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
4028 
4029 				if (!pi->caps_samu_dpm)
4030 					break;
4031 			}
4032 		}
4033 
4034 		ci_send_msg_to_smc_with_parameter(rdev,
4035 						  PPSMC_MSG_SAMUDPM_SetEnabledMask,
4036 						  pi->dpm_level_enable_mask.samu_dpm_enable_mask);
4037 	}
4038 	return (ci_send_msg_to_smc(rdev, enable ?
4039 				   PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
4040 		0 : -EINVAL;
4041 }
4042 
4043 static int ci_enable_acp_dpm(struct radeon_device *rdev, bool enable)
4044 {
4045 	struct ci_power_info *pi = ci_get_pi(rdev);
4046 	const struct radeon_clock_and_voltage_limits *max_limits;
4047 	int i;
4048 
4049 	if (rdev->pm.dpm.ac_power)
4050 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4051 	else
4052 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4053 
4054 	if (enable) {
4055 		pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
4056 		for (i = rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4057 			if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4058 				pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
4059 
4060 				if (!pi->caps_acp_dpm)
4061 					break;
4062 			}
4063 		}
4064 
4065 		ci_send_msg_to_smc_with_parameter(rdev,
4066 						  PPSMC_MSG_ACPDPM_SetEnabledMask,
4067 						  pi->dpm_level_enable_mask.acp_dpm_enable_mask);
4068 	}
4069 
4070 	return (ci_send_msg_to_smc(rdev, enable ?
4071 				   PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
4072 		0 : -EINVAL;
4073 }
4074 #endif
4075 
4076 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate)
4077 {
4078 	struct ci_power_info *pi = ci_get_pi(rdev);
4079 	u32 tmp;
4080 
4081 	if (!gate) {
4082 		if (pi->caps_uvd_dpm ||
4083 		    (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
4084 			pi->smc_state_table.UvdBootLevel = 0;
4085 		else
4086 			pi->smc_state_table.UvdBootLevel =
4087 				rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
4088 
4089 		tmp = RREG32_SMC(DPM_TABLE_475);
4090 		tmp &= ~UvdBootLevel_MASK;
4091 		tmp |= UvdBootLevel(pi->smc_state_table.UvdBootLevel);
4092 		WREG32_SMC(DPM_TABLE_475, tmp);
4093 	}
4094 
4095 	return ci_enable_uvd_dpm(rdev, !gate);
4096 }
4097 
4098 static u8 ci_get_vce_boot_level(struct radeon_device *rdev)
4099 {
4100 	u8 i;
4101 	u32 min_evclk = 30000; /* ??? */
4102 	struct radeon_vce_clock_voltage_dependency_table *table =
4103 		&rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
4104 
4105 	for (i = 0; i < table->count; i++) {
4106 		if (table->entries[i].evclk >= min_evclk)
4107 			return i;
4108 	}
4109 
4110 	return table->count - 1;
4111 }
4112 
4113 static int ci_update_vce_dpm(struct radeon_device *rdev,
4114 			     struct radeon_ps *radeon_new_state,
4115 			     struct radeon_ps *radeon_current_state)
4116 {
4117 	struct ci_power_info *pi = ci_get_pi(rdev);
4118 	int ret = 0;
4119 	u32 tmp;
4120 
4121 	if (radeon_current_state->evclk != radeon_new_state->evclk) {
4122 		if (radeon_new_state->evclk) {
4123 			/* turn the clocks on when encoding */
4124 			cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false);
4125 
4126 			pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev);
4127 			tmp = RREG32_SMC(DPM_TABLE_475);
4128 			tmp &= ~VceBootLevel_MASK;
4129 			tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel);
4130 			WREG32_SMC(DPM_TABLE_475, tmp);
4131 
4132 			ret = ci_enable_vce_dpm(rdev, true);
4133 		} else {
4134 			/* turn the clocks off when not encoding */
4135 			cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true);
4136 
4137 			ret = ci_enable_vce_dpm(rdev, false);
4138 		}
4139 	}
4140 	return ret;
4141 }
4142 
4143 #if 0
4144 static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate)
4145 {
4146 	return ci_enable_samu_dpm(rdev, gate);
4147 }
4148 
4149 static int ci_update_acp_dpm(struct radeon_device *rdev, bool gate)
4150 {
4151 	struct ci_power_info *pi = ci_get_pi(rdev);
4152 	u32 tmp;
4153 
4154 	if (!gate) {
4155 		pi->smc_state_table.AcpBootLevel = 0;
4156 
4157 		tmp = RREG32_SMC(DPM_TABLE_475);
4158 		tmp &= ~AcpBootLevel_MASK;
4159 		tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
4160 		WREG32_SMC(DPM_TABLE_475, tmp);
4161 	}
4162 
4163 	return ci_enable_acp_dpm(rdev, !gate);
4164 }
4165 #endif
4166 
4167 static int ci_generate_dpm_level_enable_mask(struct radeon_device *rdev,
4168 					     struct radeon_ps *radeon_state)
4169 {
4170 	struct ci_power_info *pi = ci_get_pi(rdev);
4171 	int ret;
4172 
4173 	ret = ci_trim_dpm_states(rdev, radeon_state);
4174 	if (ret)
4175 		return ret;
4176 
4177 	pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
4178 		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
4179 	pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
4180 		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
4181 	pi->last_mclk_dpm_enable_mask =
4182 		pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4183 	if (pi->uvd_enabled) {
4184 		if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
4185 			pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4186 	}
4187 	pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
4188 		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
4189 
4190 	return 0;
4191 }
4192 
4193 static u32 ci_get_lowest_enabled_level(struct radeon_device *rdev,
4194 				       u32 level_mask)
4195 {
4196 	u32 level = 0;
4197 
4198 	while ((level_mask & (1 << level)) == 0)
4199 		level++;
4200 
4201 	return level;
4202 }
4203 
4204 
4205 int ci_dpm_force_performance_level(struct radeon_device *rdev,
4206 				   enum radeon_dpm_forced_level level)
4207 {
4208 	struct ci_power_info *pi = ci_get_pi(rdev);
4209 	u32 tmp, levels, i;
4210 	int ret;
4211 
4212 	if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
4213 		if ((!pi->pcie_dpm_key_disabled) &&
4214 		    pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4215 			levels = 0;
4216 			tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
4217 			while (tmp >>= 1)
4218 				levels++;
4219 			if (levels) {
4220 				ret = ci_dpm_force_state_pcie(rdev, level);
4221 				if (ret)
4222 					return ret;
4223 				for (i = 0; i < rdev->usec_timeout; i++) {
4224 					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
4225 					       CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
4226 					if (tmp == levels)
4227 						break;
4228 					udelay(1);
4229 				}
4230 			}
4231 		}
4232 		if ((!pi->sclk_dpm_key_disabled) &&
4233 		    pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4234 			levels = 0;
4235 			tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
4236 			while (tmp >>= 1)
4237 				levels++;
4238 			if (levels) {
4239 				ret = ci_dpm_force_state_sclk(rdev, levels);
4240 				if (ret)
4241 					return ret;
4242 				for (i = 0; i < rdev->usec_timeout; i++) {
4243 					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4244 					       CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
4245 					if (tmp == levels)
4246 						break;
4247 					udelay(1);
4248 				}
4249 			}
4250 		}
4251 		if ((!pi->mclk_dpm_key_disabled) &&
4252 		    pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4253 			levels = 0;
4254 			tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4255 			while (tmp >>= 1)
4256 				levels++;
4257 			if (levels) {
4258 				ret = ci_dpm_force_state_mclk(rdev, levels);
4259 				if (ret)
4260 					return ret;
4261 				for (i = 0; i < rdev->usec_timeout; i++) {
4262 					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4263 					       CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
4264 					if (tmp == levels)
4265 						break;
4266 					udelay(1);
4267 				}
4268 			}
4269 		}
4270 	} else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
4271 		if ((!pi->sclk_dpm_key_disabled) &&
4272 		    pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4273 			levels = ci_get_lowest_enabled_level(rdev,
4274 							     pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
4275 			ret = ci_dpm_force_state_sclk(rdev, levels);
4276 			if (ret)
4277 				return ret;
4278 			for (i = 0; i < rdev->usec_timeout; i++) {
4279 				tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4280 				       CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
4281 				if (tmp == levels)
4282 					break;
4283 				udelay(1);
4284 			}
4285 		}
4286 		if ((!pi->mclk_dpm_key_disabled) &&
4287 		    pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4288 			levels = ci_get_lowest_enabled_level(rdev,
4289 							     pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4290 			ret = ci_dpm_force_state_mclk(rdev, levels);
4291 			if (ret)
4292 				return ret;
4293 			for (i = 0; i < rdev->usec_timeout; i++) {
4294 				tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4295 				       CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
4296 				if (tmp == levels)
4297 					break;
4298 				udelay(1);
4299 			}
4300 		}
4301 		if ((!pi->pcie_dpm_key_disabled) &&
4302 		    pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4303 			levels = ci_get_lowest_enabled_level(rdev,
4304 							     pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
4305 			ret = ci_dpm_force_state_pcie(rdev, levels);
4306 			if (ret)
4307 				return ret;
4308 			for (i = 0; i < rdev->usec_timeout; i++) {
4309 				tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
4310 				       CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
4311 				if (tmp == levels)
4312 					break;
4313 				udelay(1);
4314 			}
4315 		}
4316 	} else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
4317 		if (!pi->pcie_dpm_key_disabled) {
4318 			PPSMC_Result smc_result;
4319 
4320 			smc_result = ci_send_msg_to_smc(rdev,
4321 							PPSMC_MSG_PCIeDPM_UnForceLevel);
4322 			if (smc_result != PPSMC_Result_OK)
4323 				return -EINVAL;
4324 		}
4325 		ret = ci_upload_dpm_level_enable_mask(rdev);
4326 		if (ret)
4327 			return ret;
4328 	}
4329 
4330 	rdev->pm.dpm.forced_level = level;
4331 
4332 	return 0;
4333 }
4334 
4335 static int ci_set_mc_special_registers(struct radeon_device *rdev,
4336 				       struct ci_mc_reg_table *table)
4337 {
4338 	struct ci_power_info *pi = ci_get_pi(rdev);
4339 	u8 i, j, k;
4340 	u32 temp_reg;
4341 
4342 	for (i = 0, j = table->last; i < table->last; i++) {
4343 		if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4344 			return -EINVAL;
4345 		switch(table->mc_reg_address[i].s1 << 2) {
4346 		case MC_SEQ_MISC1:
4347 			temp_reg = RREG32(MC_PMG_CMD_EMRS);
4348 			table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
4349 			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
4350 			for (k = 0; k < table->num_entries; k++) {
4351 				table->mc_reg_table_entry[k].mc_data[j] =
4352 					((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
4353 			}
4354 			j++;
4355 			if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4356 				return -EINVAL;
4357 
4358 			temp_reg = RREG32(MC_PMG_CMD_MRS);
4359 			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
4360 			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
4361 			for (k = 0; k < table->num_entries; k++) {
4362 				table->mc_reg_table_entry[k].mc_data[j] =
4363 					(temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4364 				if (!pi->mem_gddr5)
4365 					table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
4366 			}
4367 			j++;
4368 			if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4369 				return -EINVAL;
4370 
4371 			if (!pi->mem_gddr5) {
4372 				table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2;
4373 				table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2;
4374 				for (k = 0; k < table->num_entries; k++) {
4375 					table->mc_reg_table_entry[k].mc_data[j] =
4376 						(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
4377 				}
4378 				j++;
4379 				if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4380 					return -EINVAL;
4381 			}
4382 			break;
4383 		case MC_SEQ_RESERVE_M:
4384 			temp_reg = RREG32(MC_PMG_CMD_MRS1);
4385 			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
4386 			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
4387 			for (k = 0; k < table->num_entries; k++) {
4388 				table->mc_reg_table_entry[k].mc_data[j] =
4389 					(temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4390 			}
4391 			j++;
4392 			if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4393 				return -EINVAL;
4394 			break;
4395 		default:
4396 			break;
4397 		}
4398 
4399 	}
4400 
4401 	table->last = j;
4402 
4403 	return 0;
4404 }
4405 
4406 static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
4407 {
4408 	bool result = true;
4409 
4410 	switch(in_reg) {
4411 	case MC_SEQ_RAS_TIMING >> 2:
4412 		*out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
4413 		break;
4414 	case MC_SEQ_DLL_STBY >> 2:
4415 		*out_reg = MC_SEQ_DLL_STBY_LP >> 2;
4416 		break;
4417 	case MC_SEQ_G5PDX_CMD0 >> 2:
4418 		*out_reg = MC_SEQ_G5PDX_CMD0_LP >> 2;
4419 		break;
4420 	case MC_SEQ_G5PDX_CMD1 >> 2:
4421 		*out_reg = MC_SEQ_G5PDX_CMD1_LP >> 2;
4422 		break;
4423 	case MC_SEQ_G5PDX_CTRL >> 2:
4424 		*out_reg = MC_SEQ_G5PDX_CTRL_LP >> 2;
4425 		break;
4426 	case MC_SEQ_CAS_TIMING >> 2:
4427 		*out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
4428 		break;
4429 	case MC_SEQ_MISC_TIMING >> 2:
4430 		*out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
4431 		break;
4432 	case MC_SEQ_MISC_TIMING2 >> 2:
4433 		*out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
4434 		break;
4435 	case MC_SEQ_PMG_DVS_CMD >> 2:
4436 		*out_reg = MC_SEQ_PMG_DVS_CMD_LP >> 2;
4437 		break;
4438 	case MC_SEQ_PMG_DVS_CTL >> 2:
4439 		*out_reg = MC_SEQ_PMG_DVS_CTL_LP >> 2;
4440 		break;
4441 	case MC_SEQ_RD_CTL_D0 >> 2:
4442 		*out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
4443 		break;
4444 	case MC_SEQ_RD_CTL_D1 >> 2:
4445 		*out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
4446 		break;
4447 	case MC_SEQ_WR_CTL_D0 >> 2:
4448 		*out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
4449 		break;
4450 	case MC_SEQ_WR_CTL_D1 >> 2:
4451 		*out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
4452 		break;
4453 	case MC_PMG_CMD_EMRS >> 2:
4454 		*out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
4455 		break;
4456 	case MC_PMG_CMD_MRS >> 2:
4457 		*out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
4458 		break;
4459 	case MC_PMG_CMD_MRS1 >> 2:
4460 		*out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
4461 		break;
4462 	case MC_SEQ_PMG_TIMING >> 2:
4463 		*out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
4464 		break;
4465 	case MC_PMG_CMD_MRS2 >> 2:
4466 		*out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
4467 		break;
4468 	case MC_SEQ_WR_CTL_2 >> 2:
4469 		*out_reg = MC_SEQ_WR_CTL_2_LP >> 2;
4470 		break;
4471 	default:
4472 		result = false;
4473 		break;
4474 	}
4475 
4476 	return result;
4477 }
4478 
4479 static void ci_set_valid_flag(struct ci_mc_reg_table *table)
4480 {
4481 	u8 i, j;
4482 
4483 	for (i = 0; i < table->last; i++) {
4484 		for (j = 1; j < table->num_entries; j++) {
4485 			if (table->mc_reg_table_entry[j-1].mc_data[i] !=
4486 			    table->mc_reg_table_entry[j].mc_data[i]) {
4487 				table->valid_flag |= 1 << i;
4488 				break;
4489 			}
4490 		}
4491 	}
4492 }
4493 
4494 static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
4495 {
4496 	u32 i;
4497 	u16 address;
4498 
4499 	for (i = 0; i < table->last; i++) {
4500 		table->mc_reg_address[i].s0 =
4501 			ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
4502 			address : table->mc_reg_address[i].s1;
4503 	}
4504 }
4505 
4506 static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
4507 				      struct ci_mc_reg_table *ci_table)
4508 {
4509 	u8 i, j;
4510 
4511 	if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4512 		return -EINVAL;
4513 	if (table->num_entries > MAX_AC_TIMING_ENTRIES)
4514 		return -EINVAL;
4515 
4516 	for (i = 0; i < table->last; i++)
4517 		ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
4518 
4519 	ci_table->last = table->last;
4520 
4521 	for (i = 0; i < table->num_entries; i++) {
4522 		ci_table->mc_reg_table_entry[i].mclk_max =
4523 			table->mc_reg_table_entry[i].mclk_max;
4524 		for (j = 0; j < table->last; j++)
4525 			ci_table->mc_reg_table_entry[i].mc_data[j] =
4526 				table->mc_reg_table_entry[i].mc_data[j];
4527 	}
4528 	ci_table->num_entries = table->num_entries;
4529 
4530 	return 0;
4531 }
4532 
4533 static int ci_register_patching_mc_seq(struct radeon_device *rdev,
4534 				       struct ci_mc_reg_table *table)
4535 {
4536 	u8 i, k;
4537 	u32 tmp;
4538 	bool patch;
4539 
4540 	tmp = RREG32(MC_SEQ_MISC0);
4541 	patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
4542 
4543 	if (patch &&
4544 	    ((rdev->pdev->device == 0x67B0) ||
4545 	     (rdev->pdev->device == 0x67B1))) {
4546 		for (i = 0; i < table->last; i++) {
4547 			if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4548 				return -EINVAL;
4549 			switch(table->mc_reg_address[i].s1 >> 2) {
4550 			case MC_SEQ_MISC1:
4551 				for (k = 0; k < table->num_entries; k++) {
4552 					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4553 					    (table->mc_reg_table_entry[k].mclk_max == 137500))
4554 						table->mc_reg_table_entry[k].mc_data[i] =
4555 							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
4556 							0x00000007;
4557 				}
4558 				break;
4559 			case MC_SEQ_WR_CTL_D0:
4560 				for (k = 0; k < table->num_entries; k++) {
4561 					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4562 					    (table->mc_reg_table_entry[k].mclk_max == 137500))
4563 						table->mc_reg_table_entry[k].mc_data[i] =
4564 							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4565 							0x0000D0DD;
4566 				}
4567 				break;
4568 			case MC_SEQ_WR_CTL_D1:
4569 				for (k = 0; k < table->num_entries; k++) {
4570 					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4571 					    (table->mc_reg_table_entry[k].mclk_max == 137500))
4572 						table->mc_reg_table_entry[k].mc_data[i] =
4573 							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4574 							0x0000D0DD;
4575 				}
4576 				break;
4577 			case MC_SEQ_WR_CTL_2:
4578 				for (k = 0; k < table->num_entries; k++) {
4579 					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4580 					    (table->mc_reg_table_entry[k].mclk_max == 137500))
4581 						table->mc_reg_table_entry[k].mc_data[i] = 0;
4582 				}
4583 				break;
4584 			case MC_SEQ_CAS_TIMING:
4585 				for (k = 0; k < table->num_entries; k++) {
4586 					if (table->mc_reg_table_entry[k].mclk_max == 125000)
4587 						table->mc_reg_table_entry[k].mc_data[i] =
4588 							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4589 							0x000C0140;
4590 					else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4591 						table->mc_reg_table_entry[k].mc_data[i] =
4592 							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4593 							0x000C0150;
4594 				}
4595 				break;
4596 			case MC_SEQ_MISC_TIMING:
4597 				for (k = 0; k < table->num_entries; k++) {
4598 					if (table->mc_reg_table_entry[k].mclk_max == 125000)
4599 						table->mc_reg_table_entry[k].mc_data[i] =
4600 							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4601 							0x00000030;
4602 					else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4603 						table->mc_reg_table_entry[k].mc_data[i] =
4604 							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4605 							0x00000035;
4606 				}
4607 				break;
4608 			default:
4609 				break;
4610 			}
4611 		}
4612 
4613 		WREG32(MC_SEQ_IO_DEBUG_INDEX, 3);
4614 		tmp = RREG32(MC_SEQ_IO_DEBUG_DATA);
4615 		tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
4616 		WREG32(MC_SEQ_IO_DEBUG_INDEX, 3);
4617 		WREG32(MC_SEQ_IO_DEBUG_DATA, tmp);
4618 	}
4619 
4620 	return 0;
4621 }
4622 
4623 static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
4624 {
4625 	struct ci_power_info *pi = ci_get_pi(rdev);
4626 	struct atom_mc_reg_table *table;
4627 	struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
4628 	u8 module_index = rv770_get_memory_module_index(rdev);
4629 	int ret;
4630 
4631 	table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
4632 	if (!table)
4633 		return -ENOMEM;
4634 
4635 	WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
4636 	WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
4637 	WREG32(MC_SEQ_DLL_STBY_LP, RREG32(MC_SEQ_DLL_STBY));
4638 	WREG32(MC_SEQ_G5PDX_CMD0_LP, RREG32(MC_SEQ_G5PDX_CMD0));
4639 	WREG32(MC_SEQ_G5PDX_CMD1_LP, RREG32(MC_SEQ_G5PDX_CMD1));
4640 	WREG32(MC_SEQ_G5PDX_CTRL_LP, RREG32(MC_SEQ_G5PDX_CTRL));
4641 	WREG32(MC_SEQ_PMG_DVS_CMD_LP, RREG32(MC_SEQ_PMG_DVS_CMD));
4642 	WREG32(MC_SEQ_PMG_DVS_CTL_LP, RREG32(MC_SEQ_PMG_DVS_CTL));
4643 	WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
4644 	WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
4645 	WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
4646 	WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
4647 	WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
4648 	WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
4649 	WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
4650 	WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
4651 	WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
4652 	WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
4653 	WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
4654 	WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
4655 
4656 	ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
4657 	if (ret)
4658 		goto init_mc_done;
4659 
4660 	ret = ci_copy_vbios_mc_reg_table(table, ci_table);
4661 	if (ret)
4662 		goto init_mc_done;
4663 
4664 	ci_set_s0_mc_reg_index(ci_table);
4665 
4666 	ret = ci_register_patching_mc_seq(rdev, ci_table);
4667 	if (ret)
4668 		goto init_mc_done;
4669 
4670 	ret = ci_set_mc_special_registers(rdev, ci_table);
4671 	if (ret)
4672 		goto init_mc_done;
4673 
4674 	ci_set_valid_flag(ci_table);
4675 
4676 init_mc_done:
4677 	kfree(table);
4678 
4679 	return ret;
4680 }
4681 
4682 static int ci_populate_mc_reg_addresses(struct radeon_device *rdev,
4683 					SMU7_Discrete_MCRegisters *mc_reg_table)
4684 {
4685 	struct ci_power_info *pi = ci_get_pi(rdev);
4686 	u32 i, j;
4687 
4688 	for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4689 		if (pi->mc_reg_table.valid_flag & (1 << j)) {
4690 			if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4691 				return -EINVAL;
4692 			mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4693 			mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4694 			i++;
4695 		}
4696 	}
4697 
4698 	mc_reg_table->last = (u8)i;
4699 
4700 	return 0;
4701 }
4702 
4703 static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4704 				    SMU7_Discrete_MCRegisterSet *data,
4705 				    u32 num_entries, u32 valid_flag)
4706 {
4707 	u32 i, j;
4708 
4709 	for (i = 0, j = 0; j < num_entries; j++) {
4710 		if (valid_flag & (1 << j)) {
4711 			data->value[i] = cpu_to_be32(entry->mc_data[j]);
4712 			i++;
4713 		}
4714 	}
4715 }
4716 
4717 static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
4718 						 const u32 memory_clock,
4719 						 SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4720 {
4721 	struct ci_power_info *pi = ci_get_pi(rdev);
4722 	u32 i = 0;
4723 
4724 	for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4725 		if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4726 			break;
4727 	}
4728 
4729 	if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4730 		--i;
4731 
4732 	ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4733 				mc_reg_table_data, pi->mc_reg_table.last,
4734 				pi->mc_reg_table.valid_flag);
4735 }
4736 
4737 static void ci_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
4738 					   SMU7_Discrete_MCRegisters *mc_reg_table)
4739 {
4740 	struct ci_power_info *pi = ci_get_pi(rdev);
4741 	u32 i;
4742 
4743 	for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4744 		ci_convert_mc_reg_table_entry_to_smc(rdev,
4745 						     pi->dpm_table.mclk_table.dpm_levels[i].value,
4746 						     &mc_reg_table->data[i]);
4747 }
4748 
4749 static int ci_populate_initial_mc_reg_table(struct radeon_device *rdev)
4750 {
4751 	struct ci_power_info *pi = ci_get_pi(rdev);
4752 	int ret;
4753 
4754 	memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4755 
4756 	ret = ci_populate_mc_reg_addresses(rdev, &pi->smc_mc_reg_table);
4757 	if (ret)
4758 		return ret;
4759 	ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4760 
4761 	return ci_copy_bytes_to_smc(rdev,
4762 				    pi->mc_reg_table_start,
4763 				    (u8 *)&pi->smc_mc_reg_table,
4764 				    sizeof(SMU7_Discrete_MCRegisters),
4765 				    pi->sram_end);
4766 }
4767 
4768 static int ci_update_and_upload_mc_reg_table(struct radeon_device *rdev)
4769 {
4770 	struct ci_power_info *pi = ci_get_pi(rdev);
4771 
4772 	if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4773 		return 0;
4774 
4775 	memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4776 
4777 	ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4778 
4779 	return ci_copy_bytes_to_smc(rdev,
4780 				    pi->mc_reg_table_start +
4781 				    offsetof(SMU7_Discrete_MCRegisters, data[0]),
4782 				    (u8 *)&pi->smc_mc_reg_table.data[0],
4783 				    sizeof(SMU7_Discrete_MCRegisterSet) *
4784 				    pi->dpm_table.mclk_table.count,
4785 				    pi->sram_end);
4786 }
4787 
4788 static void ci_enable_voltage_control(struct radeon_device *rdev)
4789 {
4790 	u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
4791 
4792 	tmp |= VOLT_PWRMGT_EN;
4793 	WREG32_SMC(GENERAL_PWRMGT, tmp);
4794 }
4795 
4796 static enum radeon_pcie_gen ci_get_maximum_link_speed(struct radeon_device *rdev,
4797 						      struct radeon_ps *radeon_state)
4798 {
4799 	struct ci_ps *state = ci_get_ps(radeon_state);
4800 	int i;
4801 	u16 pcie_speed, max_speed = 0;
4802 
4803 	for (i = 0; i < state->performance_level_count; i++) {
4804 		pcie_speed = state->performance_levels[i].pcie_gen;
4805 		if (max_speed < pcie_speed)
4806 			max_speed = pcie_speed;
4807 	}
4808 
4809 	return max_speed;
4810 }
4811 
4812 static u16 ci_get_current_pcie_speed(struct radeon_device *rdev)
4813 {
4814 	u32 speed_cntl = 0;
4815 
4816 	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
4817 	speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
4818 
4819 	return (u16)speed_cntl;
4820 }
4821 
4822 static int ci_get_current_pcie_lane_number(struct radeon_device *rdev)
4823 {
4824 	u32 link_width = 0;
4825 
4826 	link_width = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL) & LC_LINK_WIDTH_RD_MASK;
4827 	link_width >>= LC_LINK_WIDTH_RD_SHIFT;
4828 
4829 	switch (link_width) {
4830 	case RADEON_PCIE_LC_LINK_WIDTH_X1:
4831 		return 1;
4832 	case RADEON_PCIE_LC_LINK_WIDTH_X2:
4833 		return 2;
4834 	case RADEON_PCIE_LC_LINK_WIDTH_X4:
4835 		return 4;
4836 	case RADEON_PCIE_LC_LINK_WIDTH_X8:
4837 		return 8;
4838 	case RADEON_PCIE_LC_LINK_WIDTH_X12:
4839 		/* not actually supported */
4840 		return 12;
4841 	case RADEON_PCIE_LC_LINK_WIDTH_X0:
4842 	case RADEON_PCIE_LC_LINK_WIDTH_X16:
4843 	default:
4844 		return 16;
4845 	}
4846 }
4847 
4848 static void ci_request_link_speed_change_before_state_change(struct radeon_device *rdev,
4849 							     struct radeon_ps *radeon_new_state,
4850 							     struct radeon_ps *radeon_current_state)
4851 {
4852 	struct ci_power_info *pi = ci_get_pi(rdev);
4853 	enum radeon_pcie_gen target_link_speed =
4854 		ci_get_maximum_link_speed(rdev, radeon_new_state);
4855 	enum radeon_pcie_gen current_link_speed;
4856 
4857 	if (pi->force_pcie_gen == RADEON_PCIE_GEN_INVALID)
4858 		current_link_speed = ci_get_maximum_link_speed(rdev, radeon_current_state);
4859 	else
4860 		current_link_speed = pi->force_pcie_gen;
4861 
4862 	pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
4863 	pi->pspp_notify_required = false;
4864 	if (target_link_speed > current_link_speed) {
4865 		switch (target_link_speed) {
4866 #ifdef CONFIG_ACPI
4867 		case RADEON_PCIE_GEN3:
4868 			if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
4869 				break;
4870 			pi->force_pcie_gen = RADEON_PCIE_GEN2;
4871 			if (current_link_speed == RADEON_PCIE_GEN2)
4872 				break;
4873 			/* fall through */
4874 		case RADEON_PCIE_GEN2:
4875 			if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
4876 				break;
4877 #endif
4878 			/* fall through */
4879 		default:
4880 			pi->force_pcie_gen = ci_get_current_pcie_speed(rdev);
4881 			break;
4882 		}
4883 	} else {
4884 		if (target_link_speed < current_link_speed)
4885 			pi->pspp_notify_required = true;
4886 	}
4887 }
4888 
4889 static void ci_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
4890 							   struct radeon_ps *radeon_new_state,
4891 							   struct radeon_ps *radeon_current_state)
4892 {
4893 	struct ci_power_info *pi = ci_get_pi(rdev);
4894 	enum radeon_pcie_gen target_link_speed =
4895 		ci_get_maximum_link_speed(rdev, radeon_new_state);
4896 	u8 request;
4897 
4898 	if (pi->pspp_notify_required) {
4899 		if (target_link_speed == RADEON_PCIE_GEN3)
4900 			request = PCIE_PERF_REQ_PECI_GEN3;
4901 		else if (target_link_speed == RADEON_PCIE_GEN2)
4902 			request = PCIE_PERF_REQ_PECI_GEN2;
4903 		else
4904 			request = PCIE_PERF_REQ_PECI_GEN1;
4905 
4906 		if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
4907 		    (ci_get_current_pcie_speed(rdev) > 0))
4908 			return;
4909 
4910 #ifdef CONFIG_ACPI
4911 		radeon_acpi_pcie_performance_request(rdev, request, false);
4912 #endif
4913 	}
4914 }
4915 
4916 static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *rdev)
4917 {
4918 	struct ci_power_info *pi = ci_get_pi(rdev);
4919 	struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
4920 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
4921 	struct radeon_clock_voltage_dependency_table *allowed_mclk_vddc_table =
4922 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
4923 	struct radeon_clock_voltage_dependency_table *allowed_mclk_vddci_table =
4924 		&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
4925 
4926 	if (allowed_sclk_vddc_table == NULL)
4927 		return -EINVAL;
4928 	if (allowed_sclk_vddc_table->count < 1)
4929 		return -EINVAL;
4930 	if (allowed_mclk_vddc_table == NULL)
4931 		return -EINVAL;
4932 	if (allowed_mclk_vddc_table->count < 1)
4933 		return -EINVAL;
4934 	if (allowed_mclk_vddci_table == NULL)
4935 		return -EINVAL;
4936 	if (allowed_mclk_vddci_table->count < 1)
4937 		return -EINVAL;
4938 
4939 	pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
4940 	pi->max_vddc_in_pp_table =
4941 		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4942 
4943 	pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
4944 	pi->max_vddci_in_pp_table =
4945 		allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4946 
4947 	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
4948 		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4949 	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
4950 		allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4951 	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
4952 		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4953 	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
4954 		allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4955 
4956 	return 0;
4957 }
4958 
4959 static void ci_patch_with_vddc_leakage(struct radeon_device *rdev, u16 *vddc)
4960 {
4961 	struct ci_power_info *pi = ci_get_pi(rdev);
4962 	struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
4963 	u32 leakage_index;
4964 
4965 	for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4966 		if (leakage_table->leakage_id[leakage_index] == *vddc) {
4967 			*vddc = leakage_table->actual_voltage[leakage_index];
4968 			break;
4969 		}
4970 	}
4971 }
4972 
4973 static void ci_patch_with_vddci_leakage(struct radeon_device *rdev, u16 *vddci)
4974 {
4975 	struct ci_power_info *pi = ci_get_pi(rdev);
4976 	struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
4977 	u32 leakage_index;
4978 
4979 	for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4980 		if (leakage_table->leakage_id[leakage_index] == *vddci) {
4981 			*vddci = leakage_table->actual_voltage[leakage_index];
4982 			break;
4983 		}
4984 	}
4985 }
4986 
4987 static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4988 								      struct radeon_clock_voltage_dependency_table *table)
4989 {
4990 	u32 i;
4991 
4992 	if (table) {
4993 		for (i = 0; i < table->count; i++)
4994 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4995 	}
4996 }
4997 
4998 static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct radeon_device *rdev,
4999 								       struct radeon_clock_voltage_dependency_table *table)
5000 {
5001 	u32 i;
5002 
5003 	if (table) {
5004 		for (i = 0; i < table->count; i++)
5005 			ci_patch_with_vddci_leakage(rdev, &table->entries[i].v);
5006 	}
5007 }
5008 
5009 static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
5010 									  struct radeon_vce_clock_voltage_dependency_table *table)
5011 {
5012 	u32 i;
5013 
5014 	if (table) {
5015 		for (i = 0; i < table->count; i++)
5016 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
5017 	}
5018 }
5019 
5020 static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
5021 									  struct radeon_uvd_clock_voltage_dependency_table *table)
5022 {
5023 	u32 i;
5024 
5025 	if (table) {
5026 		for (i = 0; i < table->count; i++)
5027 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
5028 	}
5029 }
5030 
5031 static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct radeon_device *rdev,
5032 								   struct radeon_phase_shedding_limits_table *table)
5033 {
5034 	u32 i;
5035 
5036 	if (table) {
5037 		for (i = 0; i < table->count; i++)
5038 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].voltage);
5039 	}
5040 }
5041 
5042 static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct radeon_device *rdev,
5043 							    struct radeon_clock_and_voltage_limits *table)
5044 {
5045 	if (table) {
5046 		ci_patch_with_vddc_leakage(rdev, (u16 *)&table->vddc);
5047 		ci_patch_with_vddci_leakage(rdev, (u16 *)&table->vddci);
5048 	}
5049 }
5050 
5051 static void ci_patch_cac_leakage_table_with_vddc_leakage(struct radeon_device *rdev,
5052 							 struct radeon_cac_leakage_table *table)
5053 {
5054 	u32 i;
5055 
5056 	if (table) {
5057 		for (i = 0; i < table->count; i++)
5058 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].vddc);
5059 	}
5060 }
5061 
5062 static void ci_patch_dependency_tables_with_leakage(struct radeon_device *rdev)
5063 {
5064 
5065 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5066 								  &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
5067 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5068 								  &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
5069 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5070 								  &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
5071 	ci_patch_clock_voltage_dependency_table_with_vddci_leakage(rdev,
5072 								   &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
5073 	ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5074 								      &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
5075 	ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5076 								      &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
5077 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5078 								  &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
5079 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5080 								  &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
5081 	ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(rdev,
5082 							       &rdev->pm.dpm.dyn_state.phase_shedding_limits_table);
5083 	ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
5084 							&rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
5085 	ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
5086 							&rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
5087 	ci_patch_cac_leakage_table_with_vddc_leakage(rdev,
5088 						     &rdev->pm.dpm.dyn_state.cac_leakage_table);
5089 
5090 }
5091 
5092 static void ci_get_memory_type(struct radeon_device *rdev)
5093 {
5094 	struct ci_power_info *pi = ci_get_pi(rdev);
5095 	u32 tmp;
5096 
5097 	tmp = RREG32(MC_SEQ_MISC0);
5098 
5099 	if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) ==
5100 	    MC_SEQ_MISC0_GDDR5_VALUE)
5101 		pi->mem_gddr5 = true;
5102 	else
5103 		pi->mem_gddr5 = false;
5104 
5105 }
5106 
5107 static void ci_update_current_ps(struct radeon_device *rdev,
5108 				 struct radeon_ps *rps)
5109 {
5110 	struct ci_ps *new_ps = ci_get_ps(rps);
5111 	struct ci_power_info *pi = ci_get_pi(rdev);
5112 
5113 	pi->current_rps = *rps;
5114 	pi->current_ps = *new_ps;
5115 	pi->current_rps.ps_priv = &pi->current_ps;
5116 }
5117 
5118 static void ci_update_requested_ps(struct radeon_device *rdev,
5119 				   struct radeon_ps *rps)
5120 {
5121 	struct ci_ps *new_ps = ci_get_ps(rps);
5122 	struct ci_power_info *pi = ci_get_pi(rdev);
5123 
5124 	pi->requested_rps = *rps;
5125 	pi->requested_ps = *new_ps;
5126 	pi->requested_rps.ps_priv = &pi->requested_ps;
5127 }
5128 
5129 int ci_dpm_pre_set_power_state(struct radeon_device *rdev)
5130 {
5131 	struct ci_power_info *pi = ci_get_pi(rdev);
5132 	struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
5133 	struct radeon_ps *new_ps = &requested_ps;
5134 
5135 	ci_update_requested_ps(rdev, new_ps);
5136 
5137 	ci_apply_state_adjust_rules(rdev, &pi->requested_rps);
5138 
5139 	return 0;
5140 }
5141 
5142 void ci_dpm_post_set_power_state(struct radeon_device *rdev)
5143 {
5144 	struct ci_power_info *pi = ci_get_pi(rdev);
5145 	struct radeon_ps *new_ps = &pi->requested_rps;
5146 
5147 	ci_update_current_ps(rdev, new_ps);
5148 }
5149 
5150 
5151 void ci_dpm_setup_asic(struct radeon_device *rdev)
5152 {
5153 	int r;
5154 
5155 	r = ci_mc_load_microcode(rdev);
5156 	if (r)
5157 		DRM_ERROR("Failed to load MC firmware!\n");
5158 	ci_read_clock_registers(rdev);
5159 	ci_get_memory_type(rdev);
5160 	ci_enable_acpi_power_management(rdev);
5161 	ci_init_sclk_t(rdev);
5162 }
5163 
5164 int ci_dpm_enable(struct radeon_device *rdev)
5165 {
5166 	struct ci_power_info *pi = ci_get_pi(rdev);
5167 	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
5168 	int ret;
5169 
5170 	if (ci_is_smc_running(rdev))
5171 		return -EINVAL;
5172 	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
5173 		ci_enable_voltage_control(rdev);
5174 		ret = ci_construct_voltage_tables(rdev);
5175 		if (ret) {
5176 			DRM_ERROR("ci_construct_voltage_tables failed\n");
5177 			return ret;
5178 		}
5179 	}
5180 	if (pi->caps_dynamic_ac_timing) {
5181 		ret = ci_initialize_mc_reg_table(rdev);
5182 		if (ret)
5183 			pi->caps_dynamic_ac_timing = false;
5184 	}
5185 	if (pi->dynamic_ss)
5186 		ci_enable_spread_spectrum(rdev, true);
5187 	if (pi->thermal_protection)
5188 		ci_enable_thermal_protection(rdev, true);
5189 	ci_program_sstp(rdev);
5190 	ci_enable_display_gap(rdev);
5191 	ci_program_vc(rdev);
5192 	ret = ci_upload_firmware(rdev);
5193 	if (ret) {
5194 		DRM_ERROR("ci_upload_firmware failed\n");
5195 		return ret;
5196 	}
5197 	ret = ci_process_firmware_header(rdev);
5198 	if (ret) {
5199 		DRM_ERROR("ci_process_firmware_header failed\n");
5200 		return ret;
5201 	}
5202 	ret = ci_initial_switch_from_arb_f0_to_f1(rdev);
5203 	if (ret) {
5204 		DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
5205 		return ret;
5206 	}
5207 	ret = ci_init_smc_table(rdev);
5208 	if (ret) {
5209 		DRM_ERROR("ci_init_smc_table failed\n");
5210 		return ret;
5211 	}
5212 	ret = ci_init_arb_table_index(rdev);
5213 	if (ret) {
5214 		DRM_ERROR("ci_init_arb_table_index failed\n");
5215 		return ret;
5216 	}
5217 	if (pi->caps_dynamic_ac_timing) {
5218 		ret = ci_populate_initial_mc_reg_table(rdev);
5219 		if (ret) {
5220 			DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
5221 			return ret;
5222 		}
5223 	}
5224 	ret = ci_populate_pm_base(rdev);
5225 	if (ret) {
5226 		DRM_ERROR("ci_populate_pm_base failed\n");
5227 		return ret;
5228 	}
5229 	ci_dpm_start_smc(rdev);
5230 	ci_enable_vr_hot_gpio_interrupt(rdev);
5231 	ret = ci_notify_smc_display_change(rdev, false);
5232 	if (ret) {
5233 		DRM_ERROR("ci_notify_smc_display_change failed\n");
5234 		return ret;
5235 	}
5236 	ci_enable_sclk_control(rdev, true);
5237 	ret = ci_enable_ulv(rdev, true);
5238 	if (ret) {
5239 		DRM_ERROR("ci_enable_ulv failed\n");
5240 		return ret;
5241 	}
5242 	ret = ci_enable_ds_master_switch(rdev, true);
5243 	if (ret) {
5244 		DRM_ERROR("ci_enable_ds_master_switch failed\n");
5245 		return ret;
5246 	}
5247 	ret = ci_start_dpm(rdev);
5248 	if (ret) {
5249 		DRM_ERROR("ci_start_dpm failed\n");
5250 		return ret;
5251 	}
5252 	ret = ci_enable_didt(rdev, true);
5253 	if (ret) {
5254 		DRM_ERROR("ci_enable_didt failed\n");
5255 		return ret;
5256 	}
5257 	ret = ci_enable_smc_cac(rdev, true);
5258 	if (ret) {
5259 		DRM_ERROR("ci_enable_smc_cac failed\n");
5260 		return ret;
5261 	}
5262 	ret = ci_enable_power_containment(rdev, true);
5263 	if (ret) {
5264 		DRM_ERROR("ci_enable_power_containment failed\n");
5265 		return ret;
5266 	}
5267 
5268 	ret = ci_power_control_set_level(rdev);
5269 	if (ret) {
5270 		DRM_ERROR("ci_power_control_set_level failed\n");
5271 		return ret;
5272 	}
5273 
5274 	ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
5275 
5276 	ret = ci_enable_thermal_based_sclk_dpm(rdev, true);
5277 	if (ret) {
5278 		DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
5279 		return ret;
5280 	}
5281 
5282 	ci_thermal_start_thermal_controller(rdev);
5283 
5284 	ci_update_current_ps(rdev, boot_ps);
5285 
5286 	return 0;
5287 }
5288 
5289 static int ci_set_temperature_range(struct radeon_device *rdev)
5290 {
5291 	int ret;
5292 
5293 	ret = ci_thermal_enable_alert(rdev, false);
5294 	if (ret)
5295 		return ret;
5296 	ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
5297 	if (ret)
5298 		return ret;
5299 	ret = ci_thermal_enable_alert(rdev, true);
5300 	if (ret)
5301 		return ret;
5302 
5303 	return ret;
5304 }
5305 
5306 int ci_dpm_late_enable(struct radeon_device *rdev)
5307 {
5308 	int ret;
5309 
5310 	ret = ci_set_temperature_range(rdev);
5311 	if (ret)
5312 		return ret;
5313 
5314 	ci_dpm_powergate_uvd(rdev, true);
5315 
5316 	return 0;
5317 }
5318 
5319 void ci_dpm_disable(struct radeon_device *rdev)
5320 {
5321 	struct ci_power_info *pi = ci_get_pi(rdev);
5322 	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
5323 
5324 	ci_dpm_powergate_uvd(rdev, false);
5325 
5326 	if (!ci_is_smc_running(rdev))
5327 		return;
5328 
5329 	ci_thermal_stop_thermal_controller(rdev);
5330 
5331 	if (pi->thermal_protection)
5332 		ci_enable_thermal_protection(rdev, false);
5333 	ci_enable_power_containment(rdev, false);
5334 	ci_enable_smc_cac(rdev, false);
5335 	ci_enable_didt(rdev, false);
5336 	ci_enable_spread_spectrum(rdev, false);
5337 	ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
5338 	ci_stop_dpm(rdev);
5339 	ci_enable_ds_master_switch(rdev, false);
5340 	ci_enable_ulv(rdev, false);
5341 	ci_clear_vc(rdev);
5342 	ci_reset_to_default(rdev);
5343 	ci_dpm_stop_smc(rdev);
5344 	ci_force_switch_to_arb_f0(rdev);
5345 	ci_enable_thermal_based_sclk_dpm(rdev, false);
5346 
5347 	ci_update_current_ps(rdev, boot_ps);
5348 }
5349 
5350 int ci_dpm_set_power_state(struct radeon_device *rdev)
5351 {
5352 	struct ci_power_info *pi = ci_get_pi(rdev);
5353 	struct radeon_ps *new_ps = &pi->requested_rps;
5354 	struct radeon_ps *old_ps = &pi->current_rps;
5355 	int ret;
5356 
5357 	ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps);
5358 	if (pi->pcie_performance_request)
5359 		ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps);
5360 	ret = ci_freeze_sclk_mclk_dpm(rdev);
5361 	if (ret) {
5362 		DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
5363 		return ret;
5364 	}
5365 	ret = ci_populate_and_upload_sclk_mclk_dpm_levels(rdev, new_ps);
5366 	if (ret) {
5367 		DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
5368 		return ret;
5369 	}
5370 	ret = ci_generate_dpm_level_enable_mask(rdev, new_ps);
5371 	if (ret) {
5372 		DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
5373 		return ret;
5374 	}
5375 
5376 	ret = ci_update_vce_dpm(rdev, new_ps, old_ps);
5377 	if (ret) {
5378 		DRM_ERROR("ci_update_vce_dpm failed\n");
5379 		return ret;
5380 	}
5381 
5382 	ret = ci_update_sclk_t(rdev);
5383 	if (ret) {
5384 		DRM_ERROR("ci_update_sclk_t failed\n");
5385 		return ret;
5386 	}
5387 	if (pi->caps_dynamic_ac_timing) {
5388 		ret = ci_update_and_upload_mc_reg_table(rdev);
5389 		if (ret) {
5390 			DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
5391 			return ret;
5392 		}
5393 	}
5394 	ret = ci_program_memory_timing_parameters(rdev);
5395 	if (ret) {
5396 		DRM_ERROR("ci_program_memory_timing_parameters failed\n");
5397 		return ret;
5398 	}
5399 	ret = ci_unfreeze_sclk_mclk_dpm(rdev);
5400 	if (ret) {
5401 		DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
5402 		return ret;
5403 	}
5404 	ret = ci_upload_dpm_level_enable_mask(rdev);
5405 	if (ret) {
5406 		DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
5407 		return ret;
5408 	}
5409 	if (pi->pcie_performance_request)
5410 		ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
5411 
5412 	return 0;
5413 }
5414 
5415 #if 0
5416 void ci_dpm_reset_asic(struct radeon_device *rdev)
5417 {
5418 	ci_set_boot_state(rdev);
5419 }
5420 #endif
5421 
5422 void ci_dpm_display_configuration_changed(struct radeon_device *rdev)
5423 {
5424 	ci_program_display_gap(rdev);
5425 }
5426 
5427 union power_info {
5428 	struct _ATOM_POWERPLAY_INFO info;
5429 	struct _ATOM_POWERPLAY_INFO_V2 info_2;
5430 	struct _ATOM_POWERPLAY_INFO_V3 info_3;
5431 	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
5432 	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
5433 	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
5434 };
5435 
5436 union pplib_clock_info {
5437 	struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
5438 	struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
5439 	struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
5440 	struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
5441 	struct _ATOM_PPLIB_SI_CLOCK_INFO si;
5442 	struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
5443 };
5444 
5445 union pplib_power_state {
5446 	struct _ATOM_PPLIB_STATE v1;
5447 	struct _ATOM_PPLIB_STATE_V2 v2;
5448 };
5449 
5450 static void ci_parse_pplib_non_clock_info(struct radeon_device *rdev,
5451 					  struct radeon_ps *rps,
5452 					  struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
5453 					  u8 table_rev)
5454 {
5455 	rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
5456 	rps->class = le16_to_cpu(non_clock_info->usClassification);
5457 	rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
5458 
5459 	if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
5460 		rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
5461 		rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
5462 	} else {
5463 		rps->vclk = 0;
5464 		rps->dclk = 0;
5465 	}
5466 
5467 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
5468 		rdev->pm.dpm.boot_ps = rps;
5469 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
5470 		rdev->pm.dpm.uvd_ps = rps;
5471 }
5472 
5473 static void ci_parse_pplib_clock_info(struct radeon_device *rdev,
5474 				      struct radeon_ps *rps, int index,
5475 				      union pplib_clock_info *clock_info)
5476 {
5477 	struct ci_power_info *pi = ci_get_pi(rdev);
5478 	struct ci_ps *ps = ci_get_ps(rps);
5479 	struct ci_pl *pl = &ps->performance_levels[index];
5480 
5481 	ps->performance_level_count = index + 1;
5482 
5483 	pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5484 	pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
5485 	pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5486 	pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5487 
5488 	pl->pcie_gen = r600_get_pcie_gen_support(rdev,
5489 						 pi->sys_pcie_mask,
5490 						 pi->vbios_boot_state.pcie_gen_bootup_value,
5491 						 clock_info->ci.ucPCIEGen);
5492 	pl->pcie_lane = r600_get_pcie_lane_support(rdev,
5493 						   pi->vbios_boot_state.pcie_lane_bootup_value,
5494 						   le16_to_cpu(clock_info->ci.usPCIELane));
5495 
5496 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
5497 		pi->acpi_pcie_gen = pl->pcie_gen;
5498 	}
5499 
5500 	if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
5501 		pi->ulv.supported = true;
5502 		pi->ulv.pl = *pl;
5503 		pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
5504 	}
5505 
5506 	/* patch up boot state */
5507 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
5508 		pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
5509 		pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
5510 		pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
5511 		pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
5512 	}
5513 
5514 	switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
5515 	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
5516 		pi->use_pcie_powersaving_levels = true;
5517 		if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
5518 			pi->pcie_gen_powersaving.max = pl->pcie_gen;
5519 		if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
5520 			pi->pcie_gen_powersaving.min = pl->pcie_gen;
5521 		if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
5522 			pi->pcie_lane_powersaving.max = pl->pcie_lane;
5523 		if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
5524 			pi->pcie_lane_powersaving.min = pl->pcie_lane;
5525 		break;
5526 	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
5527 		pi->use_pcie_performance_levels = true;
5528 		if (pi->pcie_gen_performance.max < pl->pcie_gen)
5529 			pi->pcie_gen_performance.max = pl->pcie_gen;
5530 		if (pi->pcie_gen_performance.min > pl->pcie_gen)
5531 			pi->pcie_gen_performance.min = pl->pcie_gen;
5532 		if (pi->pcie_lane_performance.max < pl->pcie_lane)
5533 			pi->pcie_lane_performance.max = pl->pcie_lane;
5534 		if (pi->pcie_lane_performance.min > pl->pcie_lane)
5535 			pi->pcie_lane_performance.min = pl->pcie_lane;
5536 		break;
5537 	default:
5538 		break;
5539 	}
5540 }
5541 
5542 static int ci_parse_power_table(struct radeon_device *rdev)
5543 {
5544 	struct radeon_mode_info *mode_info = &rdev->mode_info;
5545 	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
5546 	union pplib_power_state *power_state;
5547 	int i, j, k, non_clock_array_index, clock_array_index;
5548 	union pplib_clock_info *clock_info;
5549 	struct _StateArray *state_array;
5550 	struct _ClockInfoArray *clock_info_array;
5551 	struct _NonClockInfoArray *non_clock_info_array;
5552 	union power_info *power_info;
5553 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
5554 	u16 data_offset;
5555 	u8 frev, crev;
5556 	u8 *power_state_offset;
5557 	struct ci_ps *ps;
5558 
5559 	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
5560 				   &frev, &crev, &data_offset))
5561 		return -EINVAL;
5562 	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
5563 
5564 	state_array = (struct _StateArray *)
5565 		(mode_info->atom_context->bios + data_offset +
5566 		 le16_to_cpu(power_info->pplib.usStateArrayOffset));
5567 	clock_info_array = (struct _ClockInfoArray *)
5568 		(mode_info->atom_context->bios + data_offset +
5569 		 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
5570 	non_clock_info_array = (struct _NonClockInfoArray *)
5571 		(mode_info->atom_context->bios + data_offset +
5572 		 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
5573 
5574 	rdev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
5575 				  sizeof(struct radeon_ps),
5576 				  GFP_KERNEL);
5577 	if (!rdev->pm.dpm.ps)
5578 		return -ENOMEM;
5579 	power_state_offset = (u8 *)state_array->states;
5580 	for (i = 0; i < state_array->ucNumEntries; i++) {
5581 		u8 *idx;
5582 		power_state = (union pplib_power_state *)power_state_offset;
5583 		non_clock_array_index = power_state->v2.nonClockInfoIndex;
5584 		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
5585 			&non_clock_info_array->nonClockInfo[non_clock_array_index];
5586 		if (!rdev->pm.power_state[i].clock_info)
5587 			return -EINVAL;
5588 		ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
5589 		if (ps == NULL) {
5590 			kfree(rdev->pm.dpm.ps);
5591 			return -ENOMEM;
5592 		}
5593 		rdev->pm.dpm.ps[i].ps_priv = ps;
5594 		ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
5595 					      non_clock_info,
5596 					      non_clock_info_array->ucEntrySize);
5597 		k = 0;
5598 		idx = (u8 *)&power_state->v2.clockInfoIndex[0];
5599 		for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
5600 			clock_array_index = idx[j];
5601 			if (clock_array_index >= clock_info_array->ucNumEntries)
5602 				continue;
5603 			if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
5604 				break;
5605 			clock_info = (union pplib_clock_info *)
5606 				((u8 *)&clock_info_array->clockInfo[0] +
5607 				 (clock_array_index * clock_info_array->ucEntrySize));
5608 			ci_parse_pplib_clock_info(rdev,
5609 						  &rdev->pm.dpm.ps[i], k,
5610 						  clock_info);
5611 			k++;
5612 		}
5613 		power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
5614 	}
5615 	rdev->pm.dpm.num_ps = state_array->ucNumEntries;
5616 
5617 	/* fill in the vce power states */
5618 	for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
5619 		u32 sclk, mclk;
5620 		clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx;
5621 		clock_info = (union pplib_clock_info *)
5622 			&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
5623 		sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5624 		sclk |= clock_info->ci.ucEngineClockHigh << 16;
5625 		mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5626 		mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5627 		rdev->pm.dpm.vce_states[i].sclk = sclk;
5628 		rdev->pm.dpm.vce_states[i].mclk = mclk;
5629 	}
5630 
5631 	return 0;
5632 }
5633 
5634 static int ci_get_vbios_boot_values(struct radeon_device *rdev,
5635 				    struct ci_vbios_boot_state *boot_state)
5636 {
5637 	struct radeon_mode_info *mode_info = &rdev->mode_info;
5638 	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5639 	ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
5640 	u8 frev, crev;
5641 	u16 data_offset;
5642 
5643 	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
5644 				   &frev, &crev, &data_offset)) {
5645 		firmware_info =
5646 			(ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
5647 						    data_offset);
5648 		boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
5649 		boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
5650 		boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
5651 		boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(rdev);
5652 		boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(rdev);
5653 		boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
5654 		boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
5655 
5656 		return 0;
5657 	}
5658 	return -EINVAL;
5659 }
5660 
5661 void ci_dpm_fini(struct radeon_device *rdev)
5662 {
5663 	int i;
5664 
5665 	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
5666 		kfree(rdev->pm.dpm.ps[i].ps_priv);
5667 	}
5668 	kfree(rdev->pm.dpm.ps);
5669 	kfree(rdev->pm.dpm.priv);
5670 	kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
5671 	r600_free_extended_power_table(rdev);
5672 }
5673 
5674 int ci_dpm_init(struct radeon_device *rdev)
5675 {
5676 	int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
5677 	SMU7_Discrete_DpmTable  *dpm_table;
5678 	struct radeon_gpio_rec gpio;
5679 	u16 data_offset, size;
5680 	u8 frev, crev;
5681 	struct ci_power_info *pi;
5682 	enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
5683 	struct pci_dev *root = rdev->pdev->bus->self;
5684 	int ret;
5685 
5686 	pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5687 	if (pi == NULL)
5688 		return -ENOMEM;
5689 	rdev->pm.dpm.priv = pi;
5690 
5691 	if (!pci_is_root_bus(rdev->pdev->bus))
5692 		speed_cap = pcie_get_speed_cap(root);
5693 	if (speed_cap == PCI_SPEED_UNKNOWN) {
5694 		pi->sys_pcie_mask = 0;
5695 	} else {
5696 		if (speed_cap == PCIE_SPEED_8_0GT)
5697 			pi->sys_pcie_mask = RADEON_PCIE_SPEED_25 |
5698 				RADEON_PCIE_SPEED_50 |
5699 				RADEON_PCIE_SPEED_80;
5700 		else if (speed_cap == PCIE_SPEED_5_0GT)
5701 			pi->sys_pcie_mask = RADEON_PCIE_SPEED_25 |
5702 				RADEON_PCIE_SPEED_50;
5703 		else
5704 			pi->sys_pcie_mask = RADEON_PCIE_SPEED_25;
5705 	}
5706 	pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
5707 
5708 	pi->pcie_gen_performance.max = RADEON_PCIE_GEN1;
5709 	pi->pcie_gen_performance.min = RADEON_PCIE_GEN3;
5710 	pi->pcie_gen_powersaving.max = RADEON_PCIE_GEN1;
5711 	pi->pcie_gen_powersaving.min = RADEON_PCIE_GEN3;
5712 
5713 	pi->pcie_lane_performance.max = 0;
5714 	pi->pcie_lane_performance.min = 16;
5715 	pi->pcie_lane_powersaving.max = 0;
5716 	pi->pcie_lane_powersaving.min = 16;
5717 
5718 	ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state);
5719 	if (ret) {
5720 		ci_dpm_fini(rdev);
5721 		return ret;
5722 	}
5723 
5724 	ret = r600_get_platform_caps(rdev);
5725 	if (ret) {
5726 		ci_dpm_fini(rdev);
5727 		return ret;
5728 	}
5729 
5730 	ret = r600_parse_extended_power_table(rdev);
5731 	if (ret) {
5732 		ci_dpm_fini(rdev);
5733 		return ret;
5734 	}
5735 
5736 	ret = ci_parse_power_table(rdev);
5737 	if (ret) {
5738 		ci_dpm_fini(rdev);
5739 		return ret;
5740 	}
5741 
5742 	pi->dll_default_on = false;
5743 	pi->sram_end = SMC_RAM_END;
5744 
5745 	pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5746 	pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5747 	pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5748 	pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5749 	pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5750 	pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5751 	pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5752 	pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5753 
5754 	pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5755 
5756 	pi->sclk_dpm_key_disabled = 0;
5757 	pi->mclk_dpm_key_disabled = 0;
5758 	pi->pcie_dpm_key_disabled = 0;
5759 	pi->thermal_sclk_dpm_enabled = 0;
5760 
5761 	/* mclk dpm is unstable on some R7 260X cards with the old mc ucode */
5762 	if ((rdev->pdev->device == 0x6658) &&
5763 	    (rdev->mc_fw->size == (BONAIRE_MC_UCODE_SIZE * 4))) {
5764 		pi->mclk_dpm_key_disabled = 1;
5765 	}
5766 
5767 	pi->caps_sclk_ds = true;
5768 
5769 	pi->mclk_strobe_mode_threshold = 40000;
5770 	pi->mclk_stutter_mode_threshold = 40000;
5771 	pi->mclk_edc_enable_threshold = 40000;
5772 	pi->mclk_edc_wr_enable_threshold = 40000;
5773 
5774 	ci_initialize_powertune_defaults(rdev);
5775 
5776 	pi->caps_fps = false;
5777 
5778 	pi->caps_sclk_throttle_low_notification = false;
5779 
5780 	pi->caps_uvd_dpm = true;
5781 	pi->caps_vce_dpm = true;
5782 
5783 	ci_get_leakage_voltages(rdev);
5784 	ci_patch_dependency_tables_with_leakage(rdev);
5785 	ci_set_private_data_variables_based_on_pptable(rdev);
5786 
5787 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5788 		kcalloc(4,
5789 			sizeof(struct radeon_clock_voltage_dependency_entry),
5790 			GFP_KERNEL);
5791 	if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5792 		ci_dpm_fini(rdev);
5793 		return -ENOMEM;
5794 	}
5795 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5796 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5797 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5798 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5799 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5800 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5801 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5802 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5803 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5804 
5805 	rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5806 	rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5807 	rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5808 
5809 	rdev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5810 	rdev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5811 	rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5812 	rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5813 
5814 	if (rdev->family == CHIP_HAWAII) {
5815 		pi->thermal_temp_setting.temperature_low = 94500;
5816 		pi->thermal_temp_setting.temperature_high = 95000;
5817 		pi->thermal_temp_setting.temperature_shutdown = 104000;
5818 	} else {
5819 		pi->thermal_temp_setting.temperature_low = 99500;
5820 		pi->thermal_temp_setting.temperature_high = 100000;
5821 		pi->thermal_temp_setting.temperature_shutdown = 104000;
5822 	}
5823 
5824 	pi->uvd_enabled = false;
5825 
5826 	dpm_table = &pi->smc_state_table;
5827 
5828 	gpio = radeon_atombios_lookup_gpio(rdev, VDDC_VRHOT_GPIO_PINID);
5829 	if (gpio.valid) {
5830 		dpm_table->VRHotGpio = gpio.shift;
5831 		rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5832 	} else {
5833 		dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
5834 		rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5835 	}
5836 
5837 	gpio = radeon_atombios_lookup_gpio(rdev, PP_AC_DC_SWITCH_GPIO_PINID);
5838 	if (gpio.valid) {
5839 		dpm_table->AcDcGpio = gpio.shift;
5840 		rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5841 	} else {
5842 		dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
5843 		rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5844 	}
5845 
5846 	gpio = radeon_atombios_lookup_gpio(rdev, VDDC_PCC_GPIO_PINID);
5847 	if (gpio.valid) {
5848 		u32 tmp = RREG32_SMC(CNB_PWRMGT_CNTL);
5849 
5850 		switch (gpio.shift) {
5851 		case 0:
5852 			tmp &= ~GNB_SLOW_MODE_MASK;
5853 			tmp |= GNB_SLOW_MODE(1);
5854 			break;
5855 		case 1:
5856 			tmp &= ~GNB_SLOW_MODE_MASK;
5857 			tmp |= GNB_SLOW_MODE(2);
5858 			break;
5859 		case 2:
5860 			tmp |= GNB_SLOW;
5861 			break;
5862 		case 3:
5863 			tmp |= FORCE_NB_PS1;
5864 			break;
5865 		case 4:
5866 			tmp |= DPM_ENABLED;
5867 			break;
5868 		default:
5869 			DRM_DEBUG("Invalid PCC GPIO: %u!\n", gpio.shift);
5870 			break;
5871 		}
5872 		WREG32_SMC(CNB_PWRMGT_CNTL, tmp);
5873 	}
5874 
5875 	pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5876 	pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5877 	pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5878 	if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
5879 		pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5880 	else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
5881 		pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5882 
5883 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
5884 		if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
5885 			pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5886 		else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
5887 			pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5888 		else
5889 			rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
5890 	}
5891 
5892 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
5893 		if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
5894 			pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5895 		else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
5896 			pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5897 		else
5898 			rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
5899 	}
5900 
5901 	pi->vddc_phase_shed_control = true;
5902 
5903 #if defined(CONFIG_ACPI)
5904 	pi->pcie_performance_request =
5905 		radeon_acpi_is_pcie_performance_request_supported(rdev);
5906 #else
5907 	pi->pcie_performance_request = false;
5908 #endif
5909 
5910 	if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
5911 				   &frev, &crev, &data_offset)) {
5912 		pi->caps_sclk_ss_support = true;
5913 		pi->caps_mclk_ss_support = true;
5914 		pi->dynamic_ss = true;
5915 	} else {
5916 		pi->caps_sclk_ss_support = false;
5917 		pi->caps_mclk_ss_support = false;
5918 		pi->dynamic_ss = true;
5919 	}
5920 
5921 	if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
5922 		pi->thermal_protection = true;
5923 	else
5924 		pi->thermal_protection = false;
5925 
5926 	pi->caps_dynamic_ac_timing = true;
5927 
5928 	pi->uvd_power_gated = false;
5929 
5930 	/* make sure dc limits are valid */
5931 	if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
5932 	    (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
5933 		rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
5934 			rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
5935 
5936 	pi->fan_ctrl_is_in_default_mode = true;
5937 
5938 	return 0;
5939 }
5940 
5941 void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
5942 						    struct seq_file *m)
5943 {
5944 	struct ci_power_info *pi = ci_get_pi(rdev);
5945 	struct radeon_ps *rps = &pi->current_rps;
5946 	u32 sclk = ci_get_average_sclk_freq(rdev);
5947 	u32 mclk = ci_get_average_mclk_freq(rdev);
5948 
5949 	seq_printf(m, "uvd    %sabled\n", pi->uvd_enabled ? "en" : "dis");
5950 	seq_printf(m, "vce    %sabled\n", rps->vce_active ? "en" : "dis");
5951 	seq_printf(m, "power level avg    sclk: %u mclk: %u\n",
5952 		   sclk, mclk);
5953 }
5954 
5955 void ci_dpm_print_power_state(struct radeon_device *rdev,
5956 			      struct radeon_ps *rps)
5957 {
5958 	struct ci_ps *ps = ci_get_ps(rps);
5959 	struct ci_pl *pl;
5960 	int i;
5961 
5962 	r600_dpm_print_class_info(rps->class, rps->class2);
5963 	r600_dpm_print_cap_info(rps->caps);
5964 	printk("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
5965 	for (i = 0; i < ps->performance_level_count; i++) {
5966 		pl = &ps->performance_levels[i];
5967 		printk("\t\tpower level %d    sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
5968 		       i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
5969 	}
5970 	r600_dpm_print_ps_status(rdev, rps);
5971 }
5972 
5973 u32 ci_dpm_get_current_sclk(struct radeon_device *rdev)
5974 {
5975 	u32 sclk = ci_get_average_sclk_freq(rdev);
5976 
5977 	return sclk;
5978 }
5979 
5980 u32 ci_dpm_get_current_mclk(struct radeon_device *rdev)
5981 {
5982 	u32 mclk = ci_get_average_mclk_freq(rdev);
5983 
5984 	return mclk;
5985 }
5986 
5987 u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low)
5988 {
5989 	struct ci_power_info *pi = ci_get_pi(rdev);
5990 	struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5991 
5992 	if (low)
5993 		return requested_state->performance_levels[0].sclk;
5994 	else
5995 		return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
5996 }
5997 
5998 u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low)
5999 {
6000 	struct ci_power_info *pi = ci_get_pi(rdev);
6001 	struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
6002 
6003 	if (low)
6004 		return requested_state->performance_levels[0].mclk;
6005 	else
6006 		return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
6007 }
6008