xref: /dragonfly/sys/dev/drm/radeon/ci_dpm.c (revision cb739d4d)
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <drm/drmP.h>
26 #include "radeon.h"
27 #include "radeon_asic.h"
28 #include "radeon_ucode.h"
29 #include "cikd.h"
30 #include "r600_dpm.h"
31 #include "ci_dpm.h"
32 #include "ni_dpm.h"
33 #include "atom.h"
34 #include <linux/seq_file.h>
35 
36 #define MC_CG_ARB_FREQ_F0           0x0a
37 #define MC_CG_ARB_FREQ_F1           0x0b
38 #define MC_CG_ARB_FREQ_F2           0x0c
39 #define MC_CG_ARB_FREQ_F3           0x0d
40 
41 #define SMC_RAM_END 0x40000
42 
43 #define VOLTAGE_SCALE               4
44 #define VOLTAGE_VID_OFFSET_SCALE1    625
45 #define VOLTAGE_VID_OFFSET_SCALE2    100
46 
47 static const struct ci_pt_defaults defaults_hawaii_xt =
48 {
49 	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
50 	{ 0x84,  0x0,   0x0,   0x7F,  0x0,   0x0,   0x5A,  0x60,  0x51,  0x8E,  0x79,  0x6B,  0x5F,  0x90,  0x79  },
51 	{ 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC }
52 };
53 
54 static const struct ci_pt_defaults defaults_hawaii_pro =
55 {
56 	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
57 	{ 0x93,  0x0,   0x0,   0x97,  0x0,   0x0,   0x6B,  0x60,  0x51,  0x95,  0x79,  0x6B,  0x5F,  0x90,  0x79  },
58 	{ 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC }
59 };
60 
61 static const struct ci_pt_defaults defaults_bonaire_xt =
62 {
63 	1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
64 	{ 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
65 	{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
66 };
67 
68 #if 0 /* unused */
69 static const struct ci_pt_defaults defaults_bonaire_pro =
70 {
71 	1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
72 	{ 0x8C,  0x23F, 0x244, 0xA6,  0x83,  0x85,  0x86,  0x86,  0x83,  0xDB,  0xDB,  0xDA,  0x67,  0x60,  0x5F  },
73 	{ 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
74 };
75 #endif
76 
77 static const struct ci_pt_defaults defaults_saturn_xt =
78 {
79 	1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
80 	{ 0x8C,  0x247, 0x249, 0xA6,  0x80,  0x81,  0x8B,  0x89,  0x86,  0xC9,  0xCA,  0xC9,  0x4D,  0x4D,  0x4D  },
81 	{ 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
82 };
83 
84 #if 0 /* unused */
85 static const struct ci_pt_defaults defaults_saturn_pro =
86 {
87 	1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
88 	{ 0x96,  0x21D, 0x23B, 0xA1,  0x85,  0x87,  0x83,  0x84,  0x81,  0xE6,  0xE6,  0xE6,  0x71,  0x6A,  0x6A  },
89 	{ 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
90 };
91 #endif
92 
93 static const struct ci_pt_config_reg didt_config_ci[] =
94 {
95 	{ 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
96 	{ 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
97 	{ 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
98 	{ 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
99 	{ 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
100 	{ 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
101 	{ 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
102 	{ 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
103 	{ 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
104 	{ 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
105 	{ 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
106 	{ 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
107 	{ 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
108 	{ 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
109 	{ 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
110 	{ 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
111 	{ 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
112 	{ 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
113 	{ 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
114 	{ 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
115 	{ 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
116 	{ 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
117 	{ 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
118 	{ 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
119 	{ 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
120 	{ 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
121 	{ 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
122 	{ 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
123 	{ 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
124 	{ 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
125 	{ 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
126 	{ 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
127 	{ 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
128 	{ 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
129 	{ 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
130 	{ 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
131 	{ 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
132 	{ 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
133 	{ 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
134 	{ 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
135 	{ 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
136 	{ 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
137 	{ 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
138 	{ 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
139 	{ 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
140 	{ 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
141 	{ 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
142 	{ 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
143 	{ 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
144 	{ 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
145 	{ 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
146 	{ 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
147 	{ 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
148 	{ 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
149 	{ 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
150 	{ 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
151 	{ 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
152 	{ 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
153 	{ 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
154 	{ 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
155 	{ 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
156 	{ 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
157 	{ 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
158 	{ 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
159 	{ 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
160 	{ 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
161 	{ 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
162 	{ 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
163 	{ 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
164 	{ 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
165 	{ 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
166 	{ 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
167 	{ 0xFFFFFFFF }
168 };
169 
170 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
171 					 struct atom_voltage_table_entry *voltage_table,
172 					 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
173 static int ci_set_power_limit(struct radeon_device *rdev, u32 n);
174 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
175 				       u32 target_tdp);
176 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
177 
178 static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
179 {
180         struct ci_power_info *pi = rdev->pm.dpm.priv;
181 
182         return pi;
183 }
184 
185 static struct ci_ps *ci_get_ps(struct radeon_ps *rps)
186 {
187 	struct ci_ps *ps = rps->ps_priv;
188 
189 	return ps;
190 }
191 
192 static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
193 {
194 	struct ci_power_info *pi = ci_get_pi(rdev);
195 
196 	switch (rdev->pdev->device) {
197 	case 0x6649:
198 	case 0x6650:
199 	case 0x6651:
200 	case 0x6658:
201 	case 0x665C:
202 	case 0x665D:
203 	default:
204 		pi->powertune_defaults = &defaults_bonaire_xt;
205 		break;
206 	case 0x6640:
207 	case 0x6641:
208 	case 0x6646:
209 	case 0x6647:
210 		pi->powertune_defaults = &defaults_saturn_xt;
211 		break;
212 	case 0x67B8:
213 	case 0x67B0:
214 		pi->powertune_defaults = &defaults_hawaii_xt;
215 		break;
216 	case 0x67BA:
217 	case 0x67B1:
218 		pi->powertune_defaults = &defaults_hawaii_pro;
219 		break;
220 	case 0x67A0:
221 	case 0x67A1:
222 	case 0x67A2:
223 	case 0x67A8:
224 	case 0x67A9:
225 	case 0x67AA:
226 	case 0x67B9:
227 	case 0x67BE:
228 		pi->powertune_defaults = &defaults_bonaire_xt;
229 		break;
230 	}
231 
232 	pi->dte_tj_offset = 0;
233 
234 	pi->caps_power_containment = true;
235 	pi->caps_cac = false;
236 	pi->caps_sq_ramping = false;
237 	pi->caps_db_ramping = false;
238 	pi->caps_td_ramping = false;
239 	pi->caps_tcp_ramping = false;
240 
241 	if (pi->caps_power_containment) {
242 		pi->caps_cac = true;
243 		pi->enable_bapm_feature = true;
244 		pi->enable_tdc_limit_feature = true;
245 		pi->enable_pkg_pwr_tracking_feature = true;
246 	}
247 }
248 
249 static u8 ci_convert_to_vid(u16 vddc)
250 {
251 	return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
252 }
253 
254 static int ci_populate_bapm_vddc_vid_sidd(struct radeon_device *rdev)
255 {
256 	struct ci_power_info *pi = ci_get_pi(rdev);
257 	u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
258 	u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
259 	u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
260 	u32 i;
261 
262 	if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
263 		return -EINVAL;
264 	if (rdev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
265 		return -EINVAL;
266 	if (rdev->pm.dpm.dyn_state.cac_leakage_table.count !=
267 	    rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
268 		return -EINVAL;
269 
270 	for (i = 0; i < rdev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
271 		if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
272 			lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
273 			hi_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
274 			hi2_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
275 		} else {
276 			lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
277 			hi_vid[i] = ci_convert_to_vid((u16)rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
278 		}
279 	}
280 	return 0;
281 }
282 
283 static int ci_populate_vddc_vid(struct radeon_device *rdev)
284 {
285 	struct ci_power_info *pi = ci_get_pi(rdev);
286 	u8 *vid = pi->smc_powertune_table.VddCVid;
287 	u32 i;
288 
289 	if (pi->vddc_voltage_table.count > 8)
290 		return -EINVAL;
291 
292 	for (i = 0; i < pi->vddc_voltage_table.count; i++)
293 		vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
294 
295 	return 0;
296 }
297 
298 static int ci_populate_svi_load_line(struct radeon_device *rdev)
299 {
300 	struct ci_power_info *pi = ci_get_pi(rdev);
301 	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
302 
303 	pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
304 	pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
305 	pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
306 	pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
307 
308 	return 0;
309 }
310 
311 static int ci_populate_tdc_limit(struct radeon_device *rdev)
312 {
313 	struct ci_power_info *pi = ci_get_pi(rdev);
314 	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
315 	u16 tdc_limit;
316 
317 	tdc_limit = rdev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
318 	pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
319 	pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
320 		pt_defaults->tdc_vddc_throttle_release_limit_perc;
321 	pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
322 
323 	return 0;
324 }
325 
326 static int ci_populate_dw8(struct radeon_device *rdev)
327 {
328 	struct ci_power_info *pi = ci_get_pi(rdev);
329 	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
330 	int ret;
331 
332 	ret = ci_read_smc_sram_dword(rdev,
333 				     SMU7_FIRMWARE_HEADER_LOCATION +
334 				     offsetof(SMU7_Firmware_Header, PmFuseTable) +
335 				     offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
336 				     (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
337 				     pi->sram_end);
338 	if (ret)
339 		return -EINVAL;
340 	else
341 		pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
342 
343 	return 0;
344 }
345 
346 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev)
347 {
348 	struct ci_power_info *pi = ci_get_pi(rdev);
349 	u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
350 	u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
351 	int i, min, max;
352 
353 	min = max = hi_vid[0];
354 	for (i = 0; i < 8; i++) {
355 		if (0 != hi_vid[i]) {
356 			if (min > hi_vid[i])
357 				min = hi_vid[i];
358 			if (max < hi_vid[i])
359 				max = hi_vid[i];
360 		}
361 
362 		if (0 != lo_vid[i]) {
363 			if (min > lo_vid[i])
364 				min = lo_vid[i];
365 			if (max < lo_vid[i])
366 				max = lo_vid[i];
367 		}
368 	}
369 
370 	if ((min == 0) || (max == 0))
371 		return -EINVAL;
372 	pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
373 	pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
374 
375 	return 0;
376 }
377 
378 static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev)
379 {
380 	struct ci_power_info *pi = ci_get_pi(rdev);
381 	u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
382 	u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
383 	struct radeon_cac_tdp_table *cac_tdp_table =
384 		rdev->pm.dpm.dyn_state.cac_tdp_table;
385 
386 	hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
387 	lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
388 
389 	pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
390 	pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
391 
392 	return 0;
393 }
394 
395 static int ci_populate_bapm_parameters_in_dpm_table(struct radeon_device *rdev)
396 {
397 	struct ci_power_info *pi = ci_get_pi(rdev);
398 	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
399 	SMU7_Discrete_DpmTable  *dpm_table = &pi->smc_state_table;
400 	struct radeon_cac_tdp_table *cac_tdp_table =
401 		rdev->pm.dpm.dyn_state.cac_tdp_table;
402 	struct radeon_ppm_table *ppm = rdev->pm.dpm.dyn_state.ppm_table;
403 	int i, j, k;
404 	const u16 *def1;
405 	const u16 *def2;
406 
407 	dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
408 	dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
409 
410 	dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
411 	dpm_table->GpuTjMax =
412 		(u8)(pi->thermal_temp_setting.temperature_high / 1000);
413 	dpm_table->GpuTjHyst = 8;
414 
415 	dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
416 
417 	if (ppm) {
418 		dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
419 		dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
420 	} else {
421 		dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
422 		dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
423 	}
424 
425 	dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
426 	def1 = pt_defaults->bapmti_r;
427 	def2 = pt_defaults->bapmti_rc;
428 
429 	for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
430 		for (j = 0; j < SMU7_DTE_SOURCES; j++) {
431 			for (k = 0; k < SMU7_DTE_SINKS; k++) {
432 				dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
433 				dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
434 				def1++;
435 				def2++;
436 			}
437 		}
438 	}
439 
440 	return 0;
441 }
442 
443 static int ci_populate_pm_base(struct radeon_device *rdev)
444 {
445 	struct ci_power_info *pi = ci_get_pi(rdev);
446 	u32 pm_fuse_table_offset;
447 	int ret;
448 
449 	if (pi->caps_power_containment) {
450 		ret = ci_read_smc_sram_dword(rdev,
451 					     SMU7_FIRMWARE_HEADER_LOCATION +
452 					     offsetof(SMU7_Firmware_Header, PmFuseTable),
453 					     &pm_fuse_table_offset, pi->sram_end);
454 		if (ret)
455 			return ret;
456 		ret = ci_populate_bapm_vddc_vid_sidd(rdev);
457 		if (ret)
458 			return ret;
459 		ret = ci_populate_vddc_vid(rdev);
460 		if (ret)
461 			return ret;
462 		ret = ci_populate_svi_load_line(rdev);
463 		if (ret)
464 			return ret;
465 		ret = ci_populate_tdc_limit(rdev);
466 		if (ret)
467 			return ret;
468 		ret = ci_populate_dw8(rdev);
469 		if (ret)
470 			return ret;
471 		ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev);
472 		if (ret)
473 			return ret;
474 		ret = ci_populate_bapm_vddc_base_leakage_sidd(rdev);
475 		if (ret)
476 			return ret;
477 		ret = ci_copy_bytes_to_smc(rdev, pm_fuse_table_offset,
478 					   (u8 *)&pi->smc_powertune_table,
479 					   sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
480 		if (ret)
481 			return ret;
482 	}
483 
484 	return 0;
485 }
486 
487 static void ci_do_enable_didt(struct radeon_device *rdev, const bool enable)
488 {
489 	struct ci_power_info *pi = ci_get_pi(rdev);
490 	u32 data;
491 
492 	if (pi->caps_sq_ramping) {
493 		data = RREG32_DIDT(DIDT_SQ_CTRL0);
494 		if (enable)
495 			data |= DIDT_CTRL_EN;
496 		else
497 			data &= ~DIDT_CTRL_EN;
498 		WREG32_DIDT(DIDT_SQ_CTRL0, data);
499 	}
500 
501 	if (pi->caps_db_ramping) {
502 		data = RREG32_DIDT(DIDT_DB_CTRL0);
503 		if (enable)
504 			data |= DIDT_CTRL_EN;
505 		else
506 			data &= ~DIDT_CTRL_EN;
507 		WREG32_DIDT(DIDT_DB_CTRL0, data);
508 	}
509 
510 	if (pi->caps_td_ramping) {
511 		data = RREG32_DIDT(DIDT_TD_CTRL0);
512 		if (enable)
513 			data |= DIDT_CTRL_EN;
514 		else
515 			data &= ~DIDT_CTRL_EN;
516 		WREG32_DIDT(DIDT_TD_CTRL0, data);
517 	}
518 
519 	if (pi->caps_tcp_ramping) {
520 		data = RREG32_DIDT(DIDT_TCP_CTRL0);
521 		if (enable)
522 			data |= DIDT_CTRL_EN;
523 		else
524 			data &= ~DIDT_CTRL_EN;
525 		WREG32_DIDT(DIDT_TCP_CTRL0, data);
526 	}
527 }
528 
529 static int ci_program_pt_config_registers(struct radeon_device *rdev,
530 					  const struct ci_pt_config_reg *cac_config_regs)
531 {
532 	const struct ci_pt_config_reg *config_regs = cac_config_regs;
533 	u32 data;
534 	u32 cache = 0;
535 
536 	if (config_regs == NULL)
537 		return -EINVAL;
538 
539 	while (config_regs->offset != 0xFFFFFFFF) {
540 		if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
541 			cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
542 		} else {
543 			switch (config_regs->type) {
544 			case CISLANDS_CONFIGREG_SMC_IND:
545 				data = RREG32_SMC(config_regs->offset);
546 				break;
547 			case CISLANDS_CONFIGREG_DIDT_IND:
548 				data = RREG32_DIDT(config_regs->offset);
549 				break;
550 			default:
551 				data = RREG32(config_regs->offset << 2);
552 				break;
553 			}
554 
555 			data &= ~config_regs->mask;
556 			data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
557 			data |= cache;
558 
559 			switch (config_regs->type) {
560 			case CISLANDS_CONFIGREG_SMC_IND:
561 				WREG32_SMC(config_regs->offset, data);
562 				break;
563 			case CISLANDS_CONFIGREG_DIDT_IND:
564 				WREG32_DIDT(config_regs->offset, data);
565 				break;
566 			default:
567 				WREG32(config_regs->offset << 2, data);
568 				break;
569 			}
570 			cache = 0;
571 		}
572 		config_regs++;
573 	}
574 	return 0;
575 }
576 
577 static int ci_enable_didt(struct radeon_device *rdev, bool enable)
578 {
579 	struct ci_power_info *pi = ci_get_pi(rdev);
580 	int ret;
581 
582 	if (pi->caps_sq_ramping || pi->caps_db_ramping ||
583 	    pi->caps_td_ramping || pi->caps_tcp_ramping) {
584 		cik_enter_rlc_safe_mode(rdev);
585 
586 		if (enable) {
587 			ret = ci_program_pt_config_registers(rdev, didt_config_ci);
588 			if (ret) {
589 				cik_exit_rlc_safe_mode(rdev);
590 				return ret;
591 			}
592 		}
593 
594 		ci_do_enable_didt(rdev, enable);
595 
596 		cik_exit_rlc_safe_mode(rdev);
597 	}
598 
599 	return 0;
600 }
601 
602 static int ci_enable_power_containment(struct radeon_device *rdev, bool enable)
603 {
604 	struct ci_power_info *pi = ci_get_pi(rdev);
605 	PPSMC_Result smc_result;
606 	int ret = 0;
607 
608 	if (enable) {
609 		pi->power_containment_features = 0;
610 		if (pi->caps_power_containment) {
611 			if (pi->enable_bapm_feature) {
612 				smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableDTE);
613 				if (smc_result != PPSMC_Result_OK)
614 					ret = -EINVAL;
615 				else
616 					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
617 			}
618 
619 			if (pi->enable_tdc_limit_feature) {
620 				smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitEnable);
621 				if (smc_result != PPSMC_Result_OK)
622 					ret = -EINVAL;
623 				else
624 					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
625 			}
626 
627 			if (pi->enable_pkg_pwr_tracking_feature) {
628 				smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitEnable);
629 				if (smc_result != PPSMC_Result_OK) {
630 					ret = -EINVAL;
631 				} else {
632 					struct radeon_cac_tdp_table *cac_tdp_table =
633 						rdev->pm.dpm.dyn_state.cac_tdp_table;
634 					u32 default_pwr_limit =
635 						(u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
636 
637 					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
638 
639 					ci_set_power_limit(rdev, default_pwr_limit);
640 				}
641 			}
642 		}
643 	} else {
644 		if (pi->caps_power_containment && pi->power_containment_features) {
645 			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
646 				ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitDisable);
647 
648 			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
649 				ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableDTE);
650 
651 			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
652 				ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitDisable);
653 			pi->power_containment_features = 0;
654 		}
655 	}
656 
657 	return ret;
658 }
659 
660 static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable)
661 {
662 	struct ci_power_info *pi = ci_get_pi(rdev);
663 	PPSMC_Result smc_result;
664 	int ret = 0;
665 
666 	if (pi->caps_cac) {
667 		if (enable) {
668 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac);
669 			if (smc_result != PPSMC_Result_OK) {
670 				ret = -EINVAL;
671 				pi->cac_enabled = false;
672 			} else {
673 				pi->cac_enabled = true;
674 			}
675 		} else if (pi->cac_enabled) {
676 			ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
677 			pi->cac_enabled = false;
678 		}
679 	}
680 
681 	return ret;
682 }
683 
684 static int ci_power_control_set_level(struct radeon_device *rdev)
685 {
686 	struct ci_power_info *pi = ci_get_pi(rdev);
687 	struct radeon_cac_tdp_table *cac_tdp_table =
688 		rdev->pm.dpm.dyn_state.cac_tdp_table;
689 	s32 adjust_percent;
690 	s32 target_tdp;
691 	int ret = 0;
692 	bool adjust_polarity = false; /* ??? */
693 
694 	if (pi->caps_power_containment &&
695 	    (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)) {
696 		adjust_percent = adjust_polarity ?
697 			rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment);
698 		target_tdp = ((100 + adjust_percent) *
699 			      (s32)cac_tdp_table->configurable_tdp) / 100;
700 		target_tdp *= 256;
701 
702 		ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp);
703 	}
704 
705 	return ret;
706 }
707 
708 void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
709 {
710 	struct ci_power_info *pi = ci_get_pi(rdev);
711 
712 	if (pi->uvd_power_gated == gate)
713 		return;
714 
715 	pi->uvd_power_gated = gate;
716 
717 	ci_update_uvd_dpm(rdev, gate);
718 }
719 
720 bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
721 {
722 	struct ci_power_info *pi = ci_get_pi(rdev);
723 	u32 vblank_time = r600_dpm_get_vblank_time(rdev);
724 	u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
725 
726 	if (vblank_time < switch_limit)
727 		return true;
728 	else
729 		return false;
730 
731 }
732 
733 static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
734 					struct radeon_ps *rps)
735 {
736 	struct ci_ps *ps = ci_get_ps(rps);
737 	struct ci_power_info *pi = ci_get_pi(rdev);
738 	struct radeon_clock_and_voltage_limits *max_limits;
739 	bool disable_mclk_switching;
740 	u32 sclk, mclk;
741 	int i;
742 
743 	if (rps->vce_active) {
744 		rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
745 		rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk;
746 	} else {
747 		rps->evclk = 0;
748 		rps->ecclk = 0;
749 	}
750 
751 	if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
752 	    ci_dpm_vblank_too_short(rdev))
753 		disable_mclk_switching = true;
754 	else
755 		disable_mclk_switching = false;
756 
757 	if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
758 		pi->battery_state = true;
759 	else
760 		pi->battery_state = false;
761 
762 	if (rdev->pm.dpm.ac_power)
763 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
764 	else
765 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
766 
767 	if (rdev->pm.dpm.ac_power == false) {
768 		for (i = 0; i < ps->performance_level_count; i++) {
769 			if (ps->performance_levels[i].mclk > max_limits->mclk)
770 				ps->performance_levels[i].mclk = max_limits->mclk;
771 			if (ps->performance_levels[i].sclk > max_limits->sclk)
772 				ps->performance_levels[i].sclk = max_limits->sclk;
773 		}
774 	}
775 
776 	/* XXX validate the min clocks required for display */
777 
778 	if (disable_mclk_switching) {
779 		mclk  = ps->performance_levels[ps->performance_level_count - 1].mclk;
780 		sclk = ps->performance_levels[0].sclk;
781 	} else {
782 		mclk = ps->performance_levels[0].mclk;
783 		sclk = ps->performance_levels[0].sclk;
784 	}
785 
786 	if (rps->vce_active) {
787 		if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk)
788 			sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk;
789 		if (mclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk)
790 			mclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk;
791 	}
792 
793 	ps->performance_levels[0].sclk = sclk;
794 	ps->performance_levels[0].mclk = mclk;
795 
796 	if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
797 		ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
798 
799 	if (disable_mclk_switching) {
800 		if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
801 			ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
802 	} else {
803 		if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
804 			ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
805 	}
806 }
807 
808 static int ci_set_thermal_temperature_range(struct radeon_device *rdev,
809 					    int min_temp, int max_temp)
810 {
811 	int low_temp = 0 * 1000;
812 	int high_temp = 255 * 1000;
813 	u32 tmp;
814 
815 	if (low_temp < min_temp)
816 		low_temp = min_temp;
817 	if (high_temp > max_temp)
818 		high_temp = max_temp;
819 	if (high_temp < low_temp) {
820 		DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
821 		return -EINVAL;
822 	}
823 
824 	tmp = RREG32_SMC(CG_THERMAL_INT);
825 	tmp &= ~(CI_DIG_THERM_INTH_MASK | CI_DIG_THERM_INTL_MASK);
826 	tmp |= CI_DIG_THERM_INTH(high_temp / 1000) |
827 		CI_DIG_THERM_INTL(low_temp / 1000);
828 	WREG32_SMC(CG_THERMAL_INT, tmp);
829 
830 #if 0
831 	/* XXX: need to figure out how to handle this properly */
832 	tmp = RREG32_SMC(CG_THERMAL_CTRL);
833 	tmp &= DIG_THERM_DPM_MASK;
834 	tmp |= DIG_THERM_DPM(high_temp / 1000);
835 	WREG32_SMC(CG_THERMAL_CTRL, tmp);
836 #endif
837 
838 	rdev->pm.dpm.thermal.min_temp = low_temp;
839 	rdev->pm.dpm.thermal.max_temp = high_temp;
840 
841 	return 0;
842 }
843 
844 #if 0
845 static int ci_read_smc_soft_register(struct radeon_device *rdev,
846 				     u16 reg_offset, u32 *value)
847 {
848 	struct ci_power_info *pi = ci_get_pi(rdev);
849 
850 	return ci_read_smc_sram_dword(rdev,
851 				      pi->soft_regs_start + reg_offset,
852 				      value, pi->sram_end);
853 }
854 #endif
855 
856 static int ci_write_smc_soft_register(struct radeon_device *rdev,
857 				      u16 reg_offset, u32 value)
858 {
859 	struct ci_power_info *pi = ci_get_pi(rdev);
860 
861 	return ci_write_smc_sram_dword(rdev,
862 				       pi->soft_regs_start + reg_offset,
863 				       value, pi->sram_end);
864 }
865 
866 static void ci_init_fps_limits(struct radeon_device *rdev)
867 {
868 	struct ci_power_info *pi = ci_get_pi(rdev);
869 	SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
870 
871 	if (pi->caps_fps) {
872 		u16 tmp;
873 
874 		tmp = 45;
875 		table->FpsHighT = cpu_to_be16(tmp);
876 
877 		tmp = 30;
878 		table->FpsLowT = cpu_to_be16(tmp);
879 	}
880 }
881 
882 static int ci_update_sclk_t(struct radeon_device *rdev)
883 {
884 	struct ci_power_info *pi = ci_get_pi(rdev);
885 	int ret = 0;
886 	u32 low_sclk_interrupt_t = 0;
887 
888 	if (pi->caps_sclk_throttle_low_notification) {
889 		low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
890 
891 		ret = ci_copy_bytes_to_smc(rdev,
892 					   pi->dpm_table_start +
893 					   offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
894 					   (u8 *)&low_sclk_interrupt_t,
895 					   sizeof(u32), pi->sram_end);
896 
897 	}
898 
899 	return ret;
900 }
901 
902 static void ci_get_leakage_voltages(struct radeon_device *rdev)
903 {
904 	struct ci_power_info *pi = ci_get_pi(rdev);
905 	u16 leakage_id, virtual_voltage_id;
906 	u16 vddc, vddci;
907 	int i;
908 
909 	pi->vddc_leakage.count = 0;
910 	pi->vddci_leakage.count = 0;
911 
912 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
913 		for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
914 			virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
915 			if (radeon_atom_get_voltage_evv(rdev, virtual_voltage_id, &vddc) != 0)
916 				continue;
917 			if (vddc != 0 && vddc != virtual_voltage_id) {
918 				pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
919 				pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
920 				pi->vddc_leakage.count++;
921 			}
922 		}
923 	} else if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
924 		for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
925 			virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
926 			if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci,
927 										 virtual_voltage_id,
928 										 leakage_id) == 0) {
929 				if (vddc != 0 && vddc != virtual_voltage_id) {
930 					pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
931 					pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
932 					pi->vddc_leakage.count++;
933 				}
934 				if (vddci != 0 && vddci != virtual_voltage_id) {
935 					pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
936 					pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
937 					pi->vddci_leakage.count++;
938 				}
939 			}
940 		}
941 	}
942 }
943 
944 static void ci_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
945 {
946 	struct ci_power_info *pi = ci_get_pi(rdev);
947 	bool want_thermal_protection;
948 	enum radeon_dpm_event_src dpm_event_src;
949 	u32 tmp;
950 
951 	switch (sources) {
952 	case 0:
953 	default:
954 		want_thermal_protection = false;
955 		break;
956 	case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
957 		want_thermal_protection = true;
958 		dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
959 		break;
960 	case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
961 		want_thermal_protection = true;
962 		dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
963 		break;
964 	case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
965 	      (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
966 		want_thermal_protection = true;
967 		dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
968 		break;
969 	}
970 
971 	if (want_thermal_protection) {
972 #if 0
973 		/* XXX: need to figure out how to handle this properly */
974 		tmp = RREG32_SMC(CG_THERMAL_CTRL);
975 		tmp &= DPM_EVENT_SRC_MASK;
976 		tmp |= DPM_EVENT_SRC(dpm_event_src);
977 		WREG32_SMC(CG_THERMAL_CTRL, tmp);
978 #endif
979 
980 		tmp = RREG32_SMC(GENERAL_PWRMGT);
981 		if (pi->thermal_protection)
982 			tmp &= ~THERMAL_PROTECTION_DIS;
983 		else
984 			tmp |= THERMAL_PROTECTION_DIS;
985 		WREG32_SMC(GENERAL_PWRMGT, tmp);
986 	} else {
987 		tmp = RREG32_SMC(GENERAL_PWRMGT);
988 		tmp |= THERMAL_PROTECTION_DIS;
989 		WREG32_SMC(GENERAL_PWRMGT, tmp);
990 	}
991 }
992 
993 static void ci_enable_auto_throttle_source(struct radeon_device *rdev,
994 					   enum radeon_dpm_auto_throttle_src source,
995 					   bool enable)
996 {
997 	struct ci_power_info *pi = ci_get_pi(rdev);
998 
999 	if (enable) {
1000 		if (!(pi->active_auto_throttle_sources & (1 << source))) {
1001 			pi->active_auto_throttle_sources |= 1 << source;
1002 			ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1003 		}
1004 	} else {
1005 		if (pi->active_auto_throttle_sources & (1 << source)) {
1006 			pi->active_auto_throttle_sources &= ~(1 << source);
1007 			ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1008 		}
1009 	}
1010 }
1011 
1012 static void ci_enable_vr_hot_gpio_interrupt(struct radeon_device *rdev)
1013 {
1014 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1015 		ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
1016 }
1017 
1018 static int ci_unfreeze_sclk_mclk_dpm(struct radeon_device *rdev)
1019 {
1020 	struct ci_power_info *pi = ci_get_pi(rdev);
1021 	PPSMC_Result smc_result;
1022 
1023 	if (!pi->need_update_smu7_dpm_table)
1024 		return 0;
1025 
1026 	if ((!pi->sclk_dpm_key_disabled) &&
1027 	    (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1028 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
1029 		if (smc_result != PPSMC_Result_OK)
1030 			return -EINVAL;
1031 	}
1032 
1033 	if ((!pi->mclk_dpm_key_disabled) &&
1034 	    (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1035 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
1036 		if (smc_result != PPSMC_Result_OK)
1037 			return -EINVAL;
1038 	}
1039 
1040 	pi->need_update_smu7_dpm_table = 0;
1041 	return 0;
1042 }
1043 
1044 static int ci_enable_sclk_mclk_dpm(struct radeon_device *rdev, bool enable)
1045 {
1046 	struct ci_power_info *pi = ci_get_pi(rdev);
1047 	PPSMC_Result smc_result;
1048 
1049 	if (enable) {
1050 		if (!pi->sclk_dpm_key_disabled) {
1051 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Enable);
1052 			if (smc_result != PPSMC_Result_OK)
1053 				return -EINVAL;
1054 		}
1055 
1056 		if (!pi->mclk_dpm_key_disabled) {
1057 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Enable);
1058 			if (smc_result != PPSMC_Result_OK)
1059 				return -EINVAL;
1060 
1061 			WREG32_P(MC_SEQ_CNTL_3, CAC_EN, ~CAC_EN);
1062 
1063 			WREG32_SMC(LCAC_MC0_CNTL, 0x05);
1064 			WREG32_SMC(LCAC_MC1_CNTL, 0x05);
1065 			WREG32_SMC(LCAC_CPL_CNTL, 0x100005);
1066 
1067 			udelay(10);
1068 
1069 			WREG32_SMC(LCAC_MC0_CNTL, 0x400005);
1070 			WREG32_SMC(LCAC_MC1_CNTL, 0x400005);
1071 			WREG32_SMC(LCAC_CPL_CNTL, 0x500005);
1072 		}
1073 	} else {
1074 		if (!pi->sclk_dpm_key_disabled) {
1075 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Disable);
1076 			if (smc_result != PPSMC_Result_OK)
1077 				return -EINVAL;
1078 		}
1079 
1080 		if (!pi->mclk_dpm_key_disabled) {
1081 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Disable);
1082 			if (smc_result != PPSMC_Result_OK)
1083 				return -EINVAL;
1084 		}
1085 	}
1086 
1087 	return 0;
1088 }
1089 
1090 static int ci_start_dpm(struct radeon_device *rdev)
1091 {
1092 	struct ci_power_info *pi = ci_get_pi(rdev);
1093 	PPSMC_Result smc_result;
1094 	int ret;
1095 	u32 tmp;
1096 
1097 	tmp = RREG32_SMC(GENERAL_PWRMGT);
1098 	tmp |= GLOBAL_PWRMGT_EN;
1099 	WREG32_SMC(GENERAL_PWRMGT, tmp);
1100 
1101 	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1102 	tmp |= DYNAMIC_PM_EN;
1103 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1104 
1105 	ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1106 
1107 	WREG32_P(BIF_LNCNT_RESET, 0, ~RESET_LNCNT_EN);
1108 
1109 	smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Enable);
1110 	if (smc_result != PPSMC_Result_OK)
1111 		return -EINVAL;
1112 
1113 	ret = ci_enable_sclk_mclk_dpm(rdev, true);
1114 	if (ret)
1115 		return ret;
1116 
1117 	if (!pi->pcie_dpm_key_disabled) {
1118 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Enable);
1119 		if (smc_result != PPSMC_Result_OK)
1120 			return -EINVAL;
1121 	}
1122 
1123 	return 0;
1124 }
1125 
1126 static int ci_freeze_sclk_mclk_dpm(struct radeon_device *rdev)
1127 {
1128 	struct ci_power_info *pi = ci_get_pi(rdev);
1129 	PPSMC_Result smc_result;
1130 
1131 	if (!pi->need_update_smu7_dpm_table)
1132 		return 0;
1133 
1134 	if ((!pi->sclk_dpm_key_disabled) &&
1135 	    (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1136 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1137 		if (smc_result != PPSMC_Result_OK)
1138 			return -EINVAL;
1139 	}
1140 
1141 	if ((!pi->mclk_dpm_key_disabled) &&
1142 	    (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1143 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1144 		if (smc_result != PPSMC_Result_OK)
1145 			return -EINVAL;
1146 	}
1147 
1148 	return 0;
1149 }
1150 
1151 static int ci_stop_dpm(struct radeon_device *rdev)
1152 {
1153 	struct ci_power_info *pi = ci_get_pi(rdev);
1154 	PPSMC_Result smc_result;
1155 	int ret;
1156 	u32 tmp;
1157 
1158 	tmp = RREG32_SMC(GENERAL_PWRMGT);
1159 	tmp &= ~GLOBAL_PWRMGT_EN;
1160 	WREG32_SMC(GENERAL_PWRMGT, tmp);
1161 
1162 	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1163 	tmp &= ~DYNAMIC_PM_EN;
1164 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1165 
1166 	if (!pi->pcie_dpm_key_disabled) {
1167 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Disable);
1168 		if (smc_result != PPSMC_Result_OK)
1169 			return -EINVAL;
1170 	}
1171 
1172 	ret = ci_enable_sclk_mclk_dpm(rdev, false);
1173 	if (ret)
1174 		return ret;
1175 
1176 	smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Disable);
1177 	if (smc_result != PPSMC_Result_OK)
1178 		return -EINVAL;
1179 
1180 	return 0;
1181 }
1182 
1183 static void ci_enable_sclk_control(struct radeon_device *rdev, bool enable)
1184 {
1185 	u32 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1186 
1187 	if (enable)
1188 		tmp &= ~SCLK_PWRMGT_OFF;
1189 	else
1190 		tmp |= SCLK_PWRMGT_OFF;
1191 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1192 }
1193 
1194 #if 0
1195 static int ci_notify_hw_of_power_source(struct radeon_device *rdev,
1196 					bool ac_power)
1197 {
1198 	struct ci_power_info *pi = ci_get_pi(rdev);
1199 	struct radeon_cac_tdp_table *cac_tdp_table =
1200 		rdev->pm.dpm.dyn_state.cac_tdp_table;
1201 	u32 power_limit;
1202 
1203 	if (ac_power)
1204 		power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1205 	else
1206 		power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1207 
1208         ci_set_power_limit(rdev, power_limit);
1209 
1210 	if (pi->caps_automatic_dc_transition) {
1211 		if (ac_power)
1212 			ci_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC);
1213 		else
1214 			ci_send_msg_to_smc(rdev, PPSMC_MSG_Remove_DC_Clamp);
1215 	}
1216 
1217 	return 0;
1218 }
1219 #endif
1220 
1221 static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
1222 						      PPSMC_Msg msg, u32 parameter)
1223 {
1224 	WREG32(SMC_MSG_ARG_0, parameter);
1225 	return ci_send_msg_to_smc(rdev, msg);
1226 }
1227 
1228 static PPSMC_Result ci_send_msg_to_smc_return_parameter(struct radeon_device *rdev,
1229 							PPSMC_Msg msg, u32 *parameter)
1230 {
1231 	PPSMC_Result smc_result;
1232 
1233 	smc_result = ci_send_msg_to_smc(rdev, msg);
1234 
1235 	if ((smc_result == PPSMC_Result_OK) && parameter)
1236 		*parameter = RREG32(SMC_MSG_ARG_0);
1237 
1238 	return smc_result;
1239 }
1240 
1241 static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n)
1242 {
1243 	struct ci_power_info *pi = ci_get_pi(rdev);
1244 
1245 	if (!pi->sclk_dpm_key_disabled) {
1246 		PPSMC_Result smc_result =
1247 			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, n);
1248 		if (smc_result != PPSMC_Result_OK)
1249 			return -EINVAL;
1250 	}
1251 
1252 	return 0;
1253 }
1254 
1255 static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n)
1256 {
1257 	struct ci_power_info *pi = ci_get_pi(rdev);
1258 
1259 	if (!pi->mclk_dpm_key_disabled) {
1260 		PPSMC_Result smc_result =
1261 			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_ForceState, n);
1262 		if (smc_result != PPSMC_Result_OK)
1263 			return -EINVAL;
1264 	}
1265 
1266 	return 0;
1267 }
1268 
1269 static int ci_dpm_force_state_pcie(struct radeon_device *rdev, u32 n)
1270 {
1271 	struct ci_power_info *pi = ci_get_pi(rdev);
1272 
1273 	if (!pi->pcie_dpm_key_disabled) {
1274 		PPSMC_Result smc_result =
1275 			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1276 		if (smc_result != PPSMC_Result_OK)
1277 			return -EINVAL;
1278 	}
1279 
1280 	return 0;
1281 }
1282 
1283 static int ci_set_power_limit(struct radeon_device *rdev, u32 n)
1284 {
1285 	struct ci_power_info *pi = ci_get_pi(rdev);
1286 
1287 	if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1288 		PPSMC_Result smc_result =
1289 			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PkgPwrSetLimit, n);
1290 		if (smc_result != PPSMC_Result_OK)
1291 			return -EINVAL;
1292 	}
1293 
1294 	return 0;
1295 }
1296 
1297 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
1298 				       u32 target_tdp)
1299 {
1300 	PPSMC_Result smc_result =
1301 		ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1302 	if (smc_result != PPSMC_Result_OK)
1303 		return -EINVAL;
1304 	return 0;
1305 }
1306 
1307 #if 0
1308 static int ci_set_boot_state(struct radeon_device *rdev)
1309 {
1310 	return ci_enable_sclk_mclk_dpm(rdev, false);
1311 }
1312 #endif
1313 
1314 static u32 ci_get_average_sclk_freq(struct radeon_device *rdev)
1315 {
1316 	u32 sclk_freq;
1317 	PPSMC_Result smc_result =
1318 		ci_send_msg_to_smc_return_parameter(rdev,
1319 						    PPSMC_MSG_API_GetSclkFrequency,
1320 						    &sclk_freq);
1321 	if (smc_result != PPSMC_Result_OK)
1322 		sclk_freq = 0;
1323 
1324 	return sclk_freq;
1325 }
1326 
1327 static u32 ci_get_average_mclk_freq(struct radeon_device *rdev)
1328 {
1329 	u32 mclk_freq;
1330 	PPSMC_Result smc_result =
1331 		ci_send_msg_to_smc_return_parameter(rdev,
1332 						    PPSMC_MSG_API_GetMclkFrequency,
1333 						    &mclk_freq);
1334 	if (smc_result != PPSMC_Result_OK)
1335 		mclk_freq = 0;
1336 
1337 	return mclk_freq;
1338 }
1339 
1340 static void ci_dpm_start_smc(struct radeon_device *rdev)
1341 {
1342 	int i;
1343 
1344 	ci_program_jump_on_start(rdev);
1345 	ci_start_smc_clock(rdev);
1346 	ci_start_smc(rdev);
1347 	for (i = 0; i < rdev->usec_timeout; i++) {
1348 		if (RREG32_SMC(FIRMWARE_FLAGS) & INTERRUPTS_ENABLED)
1349 			break;
1350 	}
1351 }
1352 
1353 static void ci_dpm_stop_smc(struct radeon_device *rdev)
1354 {
1355 	ci_reset_smc(rdev);
1356 	ci_stop_smc_clock(rdev);
1357 }
1358 
1359 static int ci_process_firmware_header(struct radeon_device *rdev)
1360 {
1361 	struct ci_power_info *pi = ci_get_pi(rdev);
1362 	u32 tmp;
1363 	int ret;
1364 
1365 	ret = ci_read_smc_sram_dword(rdev,
1366 				     SMU7_FIRMWARE_HEADER_LOCATION +
1367 				     offsetof(SMU7_Firmware_Header, DpmTable),
1368 				     &tmp, pi->sram_end);
1369 	if (ret)
1370 		return ret;
1371 
1372 	pi->dpm_table_start = tmp;
1373 
1374 	ret = ci_read_smc_sram_dword(rdev,
1375 				     SMU7_FIRMWARE_HEADER_LOCATION +
1376 				     offsetof(SMU7_Firmware_Header, SoftRegisters),
1377 				     &tmp, pi->sram_end);
1378 	if (ret)
1379 		return ret;
1380 
1381 	pi->soft_regs_start = tmp;
1382 
1383 	ret = ci_read_smc_sram_dword(rdev,
1384 				     SMU7_FIRMWARE_HEADER_LOCATION +
1385 				     offsetof(SMU7_Firmware_Header, mcRegisterTable),
1386 				     &tmp, pi->sram_end);
1387 	if (ret)
1388 		return ret;
1389 
1390 	pi->mc_reg_table_start = tmp;
1391 
1392 	ret = ci_read_smc_sram_dword(rdev,
1393 				     SMU7_FIRMWARE_HEADER_LOCATION +
1394 				     offsetof(SMU7_Firmware_Header, FanTable),
1395 				     &tmp, pi->sram_end);
1396 	if (ret)
1397 		return ret;
1398 
1399 	pi->fan_table_start = tmp;
1400 
1401 	ret = ci_read_smc_sram_dword(rdev,
1402 				     SMU7_FIRMWARE_HEADER_LOCATION +
1403 				     offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1404 				     &tmp, pi->sram_end);
1405 	if (ret)
1406 		return ret;
1407 
1408 	pi->arb_table_start = tmp;
1409 
1410 	return 0;
1411 }
1412 
1413 static void ci_read_clock_registers(struct radeon_device *rdev)
1414 {
1415 	struct ci_power_info *pi = ci_get_pi(rdev);
1416 
1417 	pi->clock_registers.cg_spll_func_cntl =
1418 		RREG32_SMC(CG_SPLL_FUNC_CNTL);
1419 	pi->clock_registers.cg_spll_func_cntl_2 =
1420 		RREG32_SMC(CG_SPLL_FUNC_CNTL_2);
1421 	pi->clock_registers.cg_spll_func_cntl_3 =
1422 		RREG32_SMC(CG_SPLL_FUNC_CNTL_3);
1423 	pi->clock_registers.cg_spll_func_cntl_4 =
1424 		RREG32_SMC(CG_SPLL_FUNC_CNTL_4);
1425 	pi->clock_registers.cg_spll_spread_spectrum =
1426 		RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1427 	pi->clock_registers.cg_spll_spread_spectrum_2 =
1428 		RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM_2);
1429 	pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
1430 	pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
1431 	pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
1432 	pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
1433 	pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
1434 	pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
1435 	pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
1436 	pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
1437 	pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
1438 }
1439 
1440 static void ci_init_sclk_t(struct radeon_device *rdev)
1441 {
1442 	struct ci_power_info *pi = ci_get_pi(rdev);
1443 
1444 	pi->low_sclk_interrupt_t = 0;
1445 }
1446 
1447 static void ci_enable_thermal_protection(struct radeon_device *rdev,
1448 					 bool enable)
1449 {
1450 	u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1451 
1452 	if (enable)
1453 		tmp &= ~THERMAL_PROTECTION_DIS;
1454 	else
1455 		tmp |= THERMAL_PROTECTION_DIS;
1456 	WREG32_SMC(GENERAL_PWRMGT, tmp);
1457 }
1458 
1459 static void ci_enable_acpi_power_management(struct radeon_device *rdev)
1460 {
1461 	u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1462 
1463 	tmp |= STATIC_PM_EN;
1464 
1465 	WREG32_SMC(GENERAL_PWRMGT, tmp);
1466 }
1467 
1468 #if 0
1469 static int ci_enter_ulp_state(struct radeon_device *rdev)
1470 {
1471 
1472 	WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
1473 
1474 	udelay(25000);
1475 
1476 	return 0;
1477 }
1478 
1479 static int ci_exit_ulp_state(struct radeon_device *rdev)
1480 {
1481 	int i;
1482 
1483 	WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
1484 
1485 	udelay(7000);
1486 
1487 	for (i = 0; i < rdev->usec_timeout; i++) {
1488 		if (RREG32(SMC_RESP_0) == 1)
1489 			break;
1490 		udelay(1000);
1491 	}
1492 
1493 	return 0;
1494 }
1495 #endif
1496 
1497 static int ci_notify_smc_display_change(struct radeon_device *rdev,
1498 					bool has_display)
1499 {
1500 	PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
1501 
1502 	return (ci_send_msg_to_smc(rdev, msg) == PPSMC_Result_OK) ?  0 : -EINVAL;
1503 }
1504 
1505 static int ci_enable_ds_master_switch(struct radeon_device *rdev,
1506 				      bool enable)
1507 {
1508 	struct ci_power_info *pi = ci_get_pi(rdev);
1509 
1510 	if (enable) {
1511 		if (pi->caps_sclk_ds) {
1512 			if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
1513 				return -EINVAL;
1514 		} else {
1515 			if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1516 				return -EINVAL;
1517 		}
1518 	} else {
1519 		if (pi->caps_sclk_ds) {
1520 			if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1521 				return -EINVAL;
1522 		}
1523 	}
1524 
1525 	return 0;
1526 }
1527 
1528 static void ci_program_display_gap(struct radeon_device *rdev)
1529 {
1530 	u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1531 	u32 pre_vbi_time_in_us;
1532 	u32 frame_time_in_us;
1533 	u32 ref_clock = rdev->clock.spll.reference_freq;
1534 	u32 refresh_rate = r600_dpm_get_vrefresh(rdev);
1535 	u32 vblank_time = r600_dpm_get_vblank_time(rdev);
1536 
1537 	tmp &= ~DISP_GAP_MASK;
1538 	if (rdev->pm.dpm.new_active_crtc_count > 0)
1539 		tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
1540 	else
1541 		tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE);
1542 	WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1543 
1544 	if (refresh_rate == 0)
1545 		refresh_rate = 60;
1546 	if (vblank_time == 0xffffffff)
1547 		vblank_time = 500;
1548 	frame_time_in_us = 1000000 / refresh_rate;
1549 	pre_vbi_time_in_us =
1550 		frame_time_in_us - 200 - vblank_time;
1551 	tmp = pre_vbi_time_in_us * (ref_clock / 100);
1552 
1553 	WREG32_SMC(CG_DISPLAY_GAP_CNTL2, tmp);
1554 	ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
1555 	ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
1556 
1557 
1558 	ci_notify_smc_display_change(rdev, (rdev->pm.dpm.new_active_crtc_count == 1));
1559 
1560 }
1561 
1562 static void ci_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
1563 {
1564 	struct ci_power_info *pi = ci_get_pi(rdev);
1565 	u32 tmp;
1566 
1567 	if (enable) {
1568 		if (pi->caps_sclk_ss_support) {
1569 			tmp = RREG32_SMC(GENERAL_PWRMGT);
1570 			tmp |= DYN_SPREAD_SPECTRUM_EN;
1571 			WREG32_SMC(GENERAL_PWRMGT, tmp);
1572 		}
1573 	} else {
1574 		tmp = RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1575 		tmp &= ~SSEN;
1576 		WREG32_SMC(CG_SPLL_SPREAD_SPECTRUM, tmp);
1577 
1578 		tmp = RREG32_SMC(GENERAL_PWRMGT);
1579 		tmp &= ~DYN_SPREAD_SPECTRUM_EN;
1580 		WREG32_SMC(GENERAL_PWRMGT, tmp);
1581 	}
1582 }
1583 
1584 static void ci_program_sstp(struct radeon_device *rdev)
1585 {
1586 	WREG32_SMC(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
1587 }
1588 
1589 static void ci_enable_display_gap(struct radeon_device *rdev)
1590 {
1591 	u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1592 
1593         tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK);
1594         tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
1595                 DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK));
1596 
1597 	WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1598 }
1599 
1600 static void ci_program_vc(struct radeon_device *rdev)
1601 {
1602 	u32 tmp;
1603 
1604 	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1605 	tmp &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
1606 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1607 
1608 	WREG32_SMC(CG_FTV_0, CISLANDS_VRC_DFLT0);
1609 	WREG32_SMC(CG_FTV_1, CISLANDS_VRC_DFLT1);
1610 	WREG32_SMC(CG_FTV_2, CISLANDS_VRC_DFLT2);
1611 	WREG32_SMC(CG_FTV_3, CISLANDS_VRC_DFLT3);
1612 	WREG32_SMC(CG_FTV_4, CISLANDS_VRC_DFLT4);
1613 	WREG32_SMC(CG_FTV_5, CISLANDS_VRC_DFLT5);
1614 	WREG32_SMC(CG_FTV_6, CISLANDS_VRC_DFLT6);
1615 	WREG32_SMC(CG_FTV_7, CISLANDS_VRC_DFLT7);
1616 }
1617 
1618 static void ci_clear_vc(struct radeon_device *rdev)
1619 {
1620 	u32 tmp;
1621 
1622 	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1623 	tmp |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
1624 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1625 
1626 	WREG32_SMC(CG_FTV_0, 0);
1627 	WREG32_SMC(CG_FTV_1, 0);
1628 	WREG32_SMC(CG_FTV_2, 0);
1629 	WREG32_SMC(CG_FTV_3, 0);
1630 	WREG32_SMC(CG_FTV_4, 0);
1631 	WREG32_SMC(CG_FTV_5, 0);
1632 	WREG32_SMC(CG_FTV_6, 0);
1633 	WREG32_SMC(CG_FTV_7, 0);
1634 }
1635 
1636 static int ci_upload_firmware(struct radeon_device *rdev)
1637 {
1638 	struct ci_power_info *pi = ci_get_pi(rdev);
1639 	int i, ret;
1640 
1641 	for (i = 0; i < rdev->usec_timeout; i++) {
1642 		if (RREG32_SMC(RCU_UC_EVENTS) & BOOT_SEQ_DONE)
1643 			break;
1644 	}
1645 	WREG32_SMC(SMC_SYSCON_MISC_CNTL, 1);
1646 
1647 	ci_stop_smc_clock(rdev);
1648 	ci_reset_smc(rdev);
1649 
1650 	ret = ci_load_smc_ucode(rdev, pi->sram_end);
1651 
1652 	return ret;
1653 
1654 }
1655 
1656 static int ci_get_svi2_voltage_table(struct radeon_device *rdev,
1657 				     struct radeon_clock_voltage_dependency_table *voltage_dependency_table,
1658 				     struct atom_voltage_table *voltage_table)
1659 {
1660 	u32 i;
1661 
1662 	if (voltage_dependency_table == NULL)
1663 		return -EINVAL;
1664 
1665 	voltage_table->mask_low = 0;
1666 	voltage_table->phase_delay = 0;
1667 
1668 	voltage_table->count = voltage_dependency_table->count;
1669 	for (i = 0; i < voltage_table->count; i++) {
1670 		voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
1671 		voltage_table->entries[i].smio_low = 0;
1672 	}
1673 
1674 	return 0;
1675 }
1676 
1677 static int ci_construct_voltage_tables(struct radeon_device *rdev)
1678 {
1679 	struct ci_power_info *pi = ci_get_pi(rdev);
1680 	int ret;
1681 
1682 	if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1683 		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
1684 						    VOLTAGE_OBJ_GPIO_LUT,
1685 						    &pi->vddc_voltage_table);
1686 		if (ret)
1687 			return ret;
1688 	} else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1689 		ret = ci_get_svi2_voltage_table(rdev,
1690 						&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
1691 						&pi->vddc_voltage_table);
1692 		if (ret)
1693 			return ret;
1694 	}
1695 
1696 	if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
1697 		si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDC,
1698 							 &pi->vddc_voltage_table);
1699 
1700 	if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1701 		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
1702 						    VOLTAGE_OBJ_GPIO_LUT,
1703 						    &pi->vddci_voltage_table);
1704 		if (ret)
1705 			return ret;
1706 	} else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1707 		ret = ci_get_svi2_voltage_table(rdev,
1708 						&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
1709 						&pi->vddci_voltage_table);
1710 		if (ret)
1711 			return ret;
1712 	}
1713 
1714 	if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
1715 		si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDCI,
1716 							 &pi->vddci_voltage_table);
1717 
1718 	if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1719 		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC,
1720 						    VOLTAGE_OBJ_GPIO_LUT,
1721 						    &pi->mvdd_voltage_table);
1722 		if (ret)
1723 			return ret;
1724 	} else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1725 		ret = ci_get_svi2_voltage_table(rdev,
1726 						&rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
1727 						&pi->mvdd_voltage_table);
1728 		if (ret)
1729 			return ret;
1730 	}
1731 
1732 	if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
1733 		si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_MVDD,
1734 							 &pi->mvdd_voltage_table);
1735 
1736 	return 0;
1737 }
1738 
1739 static void ci_populate_smc_voltage_table(struct radeon_device *rdev,
1740 					  struct atom_voltage_table_entry *voltage_table,
1741 					  SMU7_Discrete_VoltageLevel *smc_voltage_table)
1742 {
1743 	int ret;
1744 
1745 	ret = ci_get_std_voltage_value_sidd(rdev, voltage_table,
1746 					    &smc_voltage_table->StdVoltageHiSidd,
1747 					    &smc_voltage_table->StdVoltageLoSidd);
1748 
1749 	if (ret) {
1750 		smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
1751 		smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
1752 	}
1753 
1754 	smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
1755 	smc_voltage_table->StdVoltageHiSidd =
1756 		cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
1757 	smc_voltage_table->StdVoltageLoSidd =
1758 		cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
1759 }
1760 
1761 static int ci_populate_smc_vddc_table(struct radeon_device *rdev,
1762 				      SMU7_Discrete_DpmTable *table)
1763 {
1764 	struct ci_power_info *pi = ci_get_pi(rdev);
1765 	unsigned int count;
1766 
1767 	table->VddcLevelCount = pi->vddc_voltage_table.count;
1768 	for (count = 0; count < table->VddcLevelCount; count++) {
1769 		ci_populate_smc_voltage_table(rdev,
1770 					      &pi->vddc_voltage_table.entries[count],
1771 					      &table->VddcLevel[count]);
1772 
1773 		if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1774 			table->VddcLevel[count].Smio |=
1775 				pi->vddc_voltage_table.entries[count].smio_low;
1776 		else
1777 			table->VddcLevel[count].Smio = 0;
1778 	}
1779 	table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
1780 
1781 	return 0;
1782 }
1783 
1784 static int ci_populate_smc_vddci_table(struct radeon_device *rdev,
1785 				       SMU7_Discrete_DpmTable *table)
1786 {
1787 	unsigned int count;
1788 	struct ci_power_info *pi = ci_get_pi(rdev);
1789 
1790 	table->VddciLevelCount = pi->vddci_voltage_table.count;
1791 	for (count = 0; count < table->VddciLevelCount; count++) {
1792 		ci_populate_smc_voltage_table(rdev,
1793 					      &pi->vddci_voltage_table.entries[count],
1794 					      &table->VddciLevel[count]);
1795 
1796 		if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1797 			table->VddciLevel[count].Smio |=
1798 				pi->vddci_voltage_table.entries[count].smio_low;
1799 		else
1800 			table->VddciLevel[count].Smio = 0;
1801 	}
1802 	table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
1803 
1804 	return 0;
1805 }
1806 
1807 static int ci_populate_smc_mvdd_table(struct radeon_device *rdev,
1808 				      SMU7_Discrete_DpmTable *table)
1809 {
1810 	struct ci_power_info *pi = ci_get_pi(rdev);
1811 	unsigned int count;
1812 
1813 	table->MvddLevelCount = pi->mvdd_voltage_table.count;
1814 	for (count = 0; count < table->MvddLevelCount; count++) {
1815 		ci_populate_smc_voltage_table(rdev,
1816 					      &pi->mvdd_voltage_table.entries[count],
1817 					      &table->MvddLevel[count]);
1818 
1819 		if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1820 			table->MvddLevel[count].Smio |=
1821 				pi->mvdd_voltage_table.entries[count].smio_low;
1822 		else
1823 			table->MvddLevel[count].Smio = 0;
1824 	}
1825 	table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
1826 
1827 	return 0;
1828 }
1829 
1830 static int ci_populate_smc_voltage_tables(struct radeon_device *rdev,
1831 					  SMU7_Discrete_DpmTable *table)
1832 {
1833 	int ret;
1834 
1835 	ret = ci_populate_smc_vddc_table(rdev, table);
1836 	if (ret)
1837 		return ret;
1838 
1839 	ret = ci_populate_smc_vddci_table(rdev, table);
1840 	if (ret)
1841 		return ret;
1842 
1843 	ret = ci_populate_smc_mvdd_table(rdev, table);
1844 	if (ret)
1845 		return ret;
1846 
1847 	return 0;
1848 }
1849 
1850 static int ci_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
1851 				  SMU7_Discrete_VoltageLevel *voltage)
1852 {
1853 	struct ci_power_info *pi = ci_get_pi(rdev);
1854 	u32 i = 0;
1855 
1856 	if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
1857 		for (i = 0; i < rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
1858 			if (mclk <= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
1859 				voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
1860 				break;
1861 			}
1862 		}
1863 
1864 		if (i >= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
1865 			return -EINVAL;
1866 	}
1867 
1868 	return -EINVAL;
1869 }
1870 
1871 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
1872 					 struct atom_voltage_table_entry *voltage_table,
1873 					 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
1874 {
1875 	u16 v_index, idx;
1876 	bool voltage_found = false;
1877 	*std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
1878 	*std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
1879 
1880 	if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
1881 		return -EINVAL;
1882 
1883 	if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
1884 		for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
1885 			if (voltage_table->value ==
1886 			    rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
1887 				voltage_found = true;
1888 				if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
1889 					idx = v_index;
1890 				else
1891 					idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
1892 				*std_voltage_lo_sidd =
1893 					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
1894 				*std_voltage_hi_sidd =
1895 					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
1896 				break;
1897 			}
1898 		}
1899 
1900 		if (!voltage_found) {
1901 			for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
1902 				if (voltage_table->value <=
1903 				    rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
1904 					voltage_found = true;
1905 					if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
1906 						idx = v_index;
1907 					else
1908 						idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
1909 					*std_voltage_lo_sidd =
1910 						rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
1911 					*std_voltage_hi_sidd =
1912 						rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
1913 					break;
1914 				}
1915 			}
1916 		}
1917 	}
1918 
1919 	return 0;
1920 }
1921 
1922 static void ci_populate_phase_value_based_on_sclk(struct radeon_device *rdev,
1923 						  const struct radeon_phase_shedding_limits_table *limits,
1924 						  u32 sclk,
1925 						  u32 *phase_shedding)
1926 {
1927 	unsigned int i;
1928 
1929 	*phase_shedding = 1;
1930 
1931 	for (i = 0; i < limits->count; i++) {
1932 		if (sclk < limits->entries[i].sclk) {
1933 			*phase_shedding = i;
1934 			break;
1935 		}
1936 	}
1937 }
1938 
1939 static void ci_populate_phase_value_based_on_mclk(struct radeon_device *rdev,
1940 						  const struct radeon_phase_shedding_limits_table *limits,
1941 						  u32 mclk,
1942 						  u32 *phase_shedding)
1943 {
1944 	unsigned int i;
1945 
1946 	*phase_shedding = 1;
1947 
1948 	for (i = 0; i < limits->count; i++) {
1949 		if (mclk < limits->entries[i].mclk) {
1950 			*phase_shedding = i;
1951 			break;
1952 		}
1953 	}
1954 }
1955 
1956 static int ci_init_arb_table_index(struct radeon_device *rdev)
1957 {
1958 	struct ci_power_info *pi = ci_get_pi(rdev);
1959 	u32 tmp;
1960 	int ret;
1961 
1962 	ret = ci_read_smc_sram_dword(rdev, pi->arb_table_start,
1963 				     &tmp, pi->sram_end);
1964 	if (ret)
1965 		return ret;
1966 
1967 	tmp &= 0x00FFFFFF;
1968 	tmp |= MC_CG_ARB_FREQ_F1 << 24;
1969 
1970 	return ci_write_smc_sram_dword(rdev, pi->arb_table_start,
1971 				       tmp, pi->sram_end);
1972 }
1973 
1974 static int ci_get_dependency_volt_by_clk(struct radeon_device *rdev,
1975 					 struct radeon_clock_voltage_dependency_table *allowed_clock_voltage_table,
1976 					 u32 clock, u32 *voltage)
1977 {
1978 	u32 i = 0;
1979 
1980 	if (allowed_clock_voltage_table->count == 0)
1981 		return -EINVAL;
1982 
1983 	for (i = 0; i < allowed_clock_voltage_table->count; i++) {
1984 		if (allowed_clock_voltage_table->entries[i].clk >= clock) {
1985 			*voltage = allowed_clock_voltage_table->entries[i].v;
1986 			return 0;
1987 		}
1988 	}
1989 
1990 	*voltage = allowed_clock_voltage_table->entries[i-1].v;
1991 
1992 	return 0;
1993 }
1994 
1995 static u8 ci_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
1996 					     u32 sclk, u32 min_sclk_in_sr)
1997 {
1998 	u32 i;
1999 	u32 tmp;
2000 	u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ?
2001 		min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK;
2002 
2003 	if (sclk < min)
2004 		return 0;
2005 
2006 	for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
2007 		tmp = sclk / (1 << i);
2008 		if (tmp >= min || i == 0)
2009 			break;
2010 	}
2011 
2012 	return (u8)i;
2013 }
2014 
2015 static int ci_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev)
2016 {
2017 	return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
2018 }
2019 
2020 static int ci_reset_to_default(struct radeon_device *rdev)
2021 {
2022 	return (ci_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
2023 		0 : -EINVAL;
2024 }
2025 
2026 static int ci_force_switch_to_arb_f0(struct radeon_device *rdev)
2027 {
2028 	u32 tmp;
2029 
2030 	tmp = (RREG32_SMC(SMC_SCRATCH9) & 0x0000ff00) >> 8;
2031 
2032 	if (tmp == MC_CG_ARB_FREQ_F0)
2033 		return 0;
2034 
2035 	return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
2036 }
2037 
2038 static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
2039 						u32 sclk,
2040 						u32 mclk,
2041 						SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
2042 {
2043 	u32 dram_timing;
2044 	u32 dram_timing2;
2045 	u32 burst_time;
2046 
2047 	radeon_atom_set_engine_dram_timings(rdev, sclk, mclk);
2048 
2049 	dram_timing  = RREG32(MC_ARB_DRAM_TIMING);
2050 	dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
2051 	burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
2052 
2053 	arb_regs->McArbDramTiming  = cpu_to_be32(dram_timing);
2054 	arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
2055 	arb_regs->McArbBurstTime = (u8)burst_time;
2056 
2057 	return 0;
2058 }
2059 
2060 static int ci_do_program_memory_timing_parameters(struct radeon_device *rdev)
2061 {
2062 	struct ci_power_info *pi = ci_get_pi(rdev);
2063 	SMU7_Discrete_MCArbDramTimingTable arb_regs;
2064 	u32 i, j;
2065 	int ret =  0;
2066 
2067 	memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2068 
2069 	for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2070 		for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2071 			ret = ci_populate_memory_timing_parameters(rdev,
2072 								   pi->dpm_table.sclk_table.dpm_levels[i].value,
2073 								   pi->dpm_table.mclk_table.dpm_levels[j].value,
2074 								   &arb_regs.entries[i][j]);
2075 			if (ret)
2076 				break;
2077 		}
2078 	}
2079 
2080 	if (ret == 0)
2081 		ret = ci_copy_bytes_to_smc(rdev,
2082 					   pi->arb_table_start,
2083 					   (u8 *)&arb_regs,
2084 					   sizeof(SMU7_Discrete_MCArbDramTimingTable),
2085 					   pi->sram_end);
2086 
2087 	return ret;
2088 }
2089 
2090 static int ci_program_memory_timing_parameters(struct radeon_device *rdev)
2091 {
2092 	struct ci_power_info *pi = ci_get_pi(rdev);
2093 
2094 	if (pi->need_update_smu7_dpm_table == 0)
2095 		return 0;
2096 
2097 	return ci_do_program_memory_timing_parameters(rdev);
2098 }
2099 
2100 static void ci_populate_smc_initial_state(struct radeon_device *rdev,
2101 					  struct radeon_ps *radeon_boot_state)
2102 {
2103 	struct ci_ps *boot_state = ci_get_ps(radeon_boot_state);
2104 	struct ci_power_info *pi = ci_get_pi(rdev);
2105 	u32 level = 0;
2106 
2107 	for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2108 		if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2109 		    boot_state->performance_levels[0].sclk) {
2110 			pi->smc_state_table.GraphicsBootLevel = level;
2111 			break;
2112 		}
2113 	}
2114 
2115 	for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2116 		if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2117 		    boot_state->performance_levels[0].mclk) {
2118 			pi->smc_state_table.MemoryBootLevel = level;
2119 			break;
2120 		}
2121 	}
2122 }
2123 
2124 static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2125 {
2126 	u32 i;
2127 	u32 mask_value = 0;
2128 
2129 	for (i = dpm_table->count; i > 0; i--) {
2130 		mask_value = mask_value << 1;
2131 		if (dpm_table->dpm_levels[i-1].enabled)
2132 			mask_value |= 0x1;
2133 		else
2134 			mask_value &= 0xFFFFFFFE;
2135 	}
2136 
2137 	return mask_value;
2138 }
2139 
2140 static void ci_populate_smc_link_level(struct radeon_device *rdev,
2141 				       SMU7_Discrete_DpmTable *table)
2142 {
2143 	struct ci_power_info *pi = ci_get_pi(rdev);
2144 	struct ci_dpm_table *dpm_table = &pi->dpm_table;
2145 	u32 i;
2146 
2147 	for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2148 		table->LinkLevel[i].PcieGenSpeed =
2149 			(u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2150 		table->LinkLevel[i].PcieLaneCount =
2151 			r600_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2152 		table->LinkLevel[i].EnabledForActivity = 1;
2153 		table->LinkLevel[i].DownT = cpu_to_be32(5);
2154 		table->LinkLevel[i].UpT = cpu_to_be32(30);
2155 	}
2156 
2157 	pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2158 	pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2159 		ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2160 }
2161 
2162 static int ci_populate_smc_uvd_level(struct radeon_device *rdev,
2163 				     SMU7_Discrete_DpmTable *table)
2164 {
2165 	u32 count;
2166 	struct atom_clock_dividers dividers;
2167 	int ret = -EINVAL;
2168 
2169 	table->UvdLevelCount =
2170 		rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2171 
2172 	for (count = 0; count < table->UvdLevelCount; count++) {
2173 		table->UvdLevel[count].VclkFrequency =
2174 			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2175 		table->UvdLevel[count].DclkFrequency =
2176 			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2177 		table->UvdLevel[count].MinVddc =
2178 			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2179 		table->UvdLevel[count].MinVddcPhases = 1;
2180 
2181 		ret = radeon_atom_get_clock_dividers(rdev,
2182 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2183 						     table->UvdLevel[count].VclkFrequency, false, &dividers);
2184 		if (ret)
2185 			return ret;
2186 
2187 		table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2188 
2189 		ret = radeon_atom_get_clock_dividers(rdev,
2190 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2191 						     table->UvdLevel[count].DclkFrequency, false, &dividers);
2192 		if (ret)
2193 			return ret;
2194 
2195 		table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2196 
2197 		table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2198 		table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2199 		table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2200 	}
2201 
2202 	return ret;
2203 }
2204 
2205 static int ci_populate_smc_vce_level(struct radeon_device *rdev,
2206 				     SMU7_Discrete_DpmTable *table)
2207 {
2208 	u32 count;
2209 	struct atom_clock_dividers dividers;
2210 	int ret = -EINVAL;
2211 
2212 	table->VceLevelCount =
2213 		rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2214 
2215 	for (count = 0; count < table->VceLevelCount; count++) {
2216 		table->VceLevel[count].Frequency =
2217 			rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2218 		table->VceLevel[count].MinVoltage =
2219 			(u16)rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2220 		table->VceLevel[count].MinPhases = 1;
2221 
2222 		ret = radeon_atom_get_clock_dividers(rdev,
2223 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2224 						     table->VceLevel[count].Frequency, false, &dividers);
2225 		if (ret)
2226 			return ret;
2227 
2228 		table->VceLevel[count].Divider = (u8)dividers.post_divider;
2229 
2230 		table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2231 		table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2232 	}
2233 
2234 	return ret;
2235 
2236 }
2237 
2238 static int ci_populate_smc_acp_level(struct radeon_device *rdev,
2239 				     SMU7_Discrete_DpmTable *table)
2240 {
2241 	u32 count;
2242 	struct atom_clock_dividers dividers;
2243 	int ret = -EINVAL;
2244 
2245 	table->AcpLevelCount = (u8)
2246 		(rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2247 
2248 	for (count = 0; count < table->AcpLevelCount; count++) {
2249 		table->AcpLevel[count].Frequency =
2250 			rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2251 		table->AcpLevel[count].MinVoltage =
2252 			rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2253 		table->AcpLevel[count].MinPhases = 1;
2254 
2255 		ret = radeon_atom_get_clock_dividers(rdev,
2256 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2257 						     table->AcpLevel[count].Frequency, false, &dividers);
2258 		if (ret)
2259 			return ret;
2260 
2261 		table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2262 
2263 		table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2264 		table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2265 	}
2266 
2267 	return ret;
2268 }
2269 
2270 static int ci_populate_smc_samu_level(struct radeon_device *rdev,
2271 				      SMU7_Discrete_DpmTable *table)
2272 {
2273 	u32 count;
2274 	struct atom_clock_dividers dividers;
2275 	int ret = -EINVAL;
2276 
2277 	table->SamuLevelCount =
2278 		rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2279 
2280 	for (count = 0; count < table->SamuLevelCount; count++) {
2281 		table->SamuLevel[count].Frequency =
2282 			rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2283 		table->SamuLevel[count].MinVoltage =
2284 			rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2285 		table->SamuLevel[count].MinPhases = 1;
2286 
2287 		ret = radeon_atom_get_clock_dividers(rdev,
2288 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2289 						     table->SamuLevel[count].Frequency, false, &dividers);
2290 		if (ret)
2291 			return ret;
2292 
2293 		table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2294 
2295 		table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2296 		table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2297 	}
2298 
2299 	return ret;
2300 }
2301 
2302 static int ci_calculate_mclk_params(struct radeon_device *rdev,
2303 				    u32 memory_clock,
2304 				    SMU7_Discrete_MemoryLevel *mclk,
2305 				    bool strobe_mode,
2306 				    bool dll_state_on)
2307 {
2308 	struct ci_power_info *pi = ci_get_pi(rdev);
2309 	u32  dll_cntl = pi->clock_registers.dll_cntl;
2310 	u32  mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2311 	u32  mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2312 	u32  mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2313 	u32  mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2314 	u32  mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2315 	u32  mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2316 	u32  mpll_ss1 = pi->clock_registers.mpll_ss1;
2317 	u32  mpll_ss2 = pi->clock_registers.mpll_ss2;
2318 	struct atom_mpll_param mpll_param;
2319 	int ret;
2320 
2321 	ret = radeon_atom_get_memory_pll_dividers(rdev, memory_clock, strobe_mode, &mpll_param);
2322 	if (ret)
2323 		return ret;
2324 
2325 	mpll_func_cntl &= ~BWCTRL_MASK;
2326 	mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
2327 
2328 	mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
2329 	mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
2330 		CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
2331 
2332 	mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
2333 	mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
2334 
2335 	if (pi->mem_gddr5) {
2336 		mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
2337 		mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
2338 			YCLK_POST_DIV(mpll_param.post_div);
2339 	}
2340 
2341 	if (pi->caps_mclk_ss_support) {
2342 		struct radeon_atom_ss ss;
2343 		u32 freq_nom;
2344 		u32 tmp;
2345 		u32 reference_clock = rdev->clock.mpll.reference_freq;
2346 
2347 		if (pi->mem_gddr5)
2348 			freq_nom = memory_clock * 4;
2349 		else
2350 			freq_nom = memory_clock * 2;
2351 
2352 		tmp = (freq_nom / reference_clock);
2353 		tmp = tmp * tmp;
2354 		if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2355 						     ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2356 			u32 clks = reference_clock * 5 / ss.rate;
2357 			u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2358 
2359 			mpll_ss1 &= ~CLKV_MASK;
2360 			mpll_ss1 |= CLKV(clkv);
2361 
2362 			mpll_ss2 &= ~CLKS_MASK;
2363 			mpll_ss2 |= CLKS(clks);
2364 		}
2365 	}
2366 
2367 	mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
2368 	mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
2369 
2370 	if (dll_state_on)
2371 		mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
2372 	else
2373 		mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2374 
2375 	mclk->MclkFrequency = memory_clock;
2376 	mclk->MpllFuncCntl = mpll_func_cntl;
2377 	mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
2378 	mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
2379 	mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
2380 	mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
2381 	mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
2382 	mclk->DllCntl = dll_cntl;
2383 	mclk->MpllSs1 = mpll_ss1;
2384 	mclk->MpllSs2 = mpll_ss2;
2385 
2386 	return 0;
2387 }
2388 
2389 static int ci_populate_single_memory_level(struct radeon_device *rdev,
2390 					   u32 memory_clock,
2391 					   SMU7_Discrete_MemoryLevel *memory_level)
2392 {
2393 	struct ci_power_info *pi = ci_get_pi(rdev);
2394 	int ret;
2395 	bool dll_state_on;
2396 
2397 	if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
2398 		ret = ci_get_dependency_volt_by_clk(rdev,
2399 						    &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2400 						    memory_clock, &memory_level->MinVddc);
2401 		if (ret)
2402 			return ret;
2403 	}
2404 
2405 	if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
2406 		ret = ci_get_dependency_volt_by_clk(rdev,
2407 						    &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2408 						    memory_clock, &memory_level->MinVddci);
2409 		if (ret)
2410 			return ret;
2411 	}
2412 
2413 	if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
2414 		ret = ci_get_dependency_volt_by_clk(rdev,
2415 						    &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2416 						    memory_clock, &memory_level->MinMvdd);
2417 		if (ret)
2418 			return ret;
2419 	}
2420 
2421 	memory_level->MinVddcPhases = 1;
2422 
2423 	if (pi->vddc_phase_shed_control)
2424 		ci_populate_phase_value_based_on_mclk(rdev,
2425 						      &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2426 						      memory_clock,
2427 						      &memory_level->MinVddcPhases);
2428 
2429 	memory_level->EnabledForThrottle = 1;
2430 	memory_level->EnabledForActivity = 1;
2431 	memory_level->UpH = 0;
2432 	memory_level->DownH = 100;
2433 	memory_level->VoltageDownH = 0;
2434 	memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
2435 
2436 	memory_level->StutterEnable = false;
2437 	memory_level->StrobeEnable = false;
2438 	memory_level->EdcReadEnable = false;
2439 	memory_level->EdcWriteEnable = false;
2440 	memory_level->RttEnable = false;
2441 
2442 	memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2443 
2444 	if (pi->mclk_stutter_mode_threshold &&
2445 	    (memory_clock <= pi->mclk_stutter_mode_threshold) &&
2446 	    (pi->uvd_enabled == false) &&
2447 	    (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
2448 	    (rdev->pm.dpm.new_active_crtc_count <= 2))
2449 		memory_level->StutterEnable = true;
2450 
2451 	if (pi->mclk_strobe_mode_threshold &&
2452 	    (memory_clock <= pi->mclk_strobe_mode_threshold))
2453 		memory_level->StrobeEnable = 1;
2454 
2455 	if (pi->mem_gddr5) {
2456 		memory_level->StrobeRatio =
2457 			si_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
2458 		if (pi->mclk_edc_enable_threshold &&
2459 		    (memory_clock > pi->mclk_edc_enable_threshold))
2460 			memory_level->EdcReadEnable = true;
2461 
2462 		if (pi->mclk_edc_wr_enable_threshold &&
2463 		    (memory_clock > pi->mclk_edc_wr_enable_threshold))
2464 			memory_level->EdcWriteEnable = true;
2465 
2466 		if (memory_level->StrobeEnable) {
2467 			if (si_get_mclk_frequency_ratio(memory_clock, true) >=
2468 			    ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
2469 				dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2470 			else
2471 				dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
2472 		} else {
2473 			dll_state_on = pi->dll_default_on;
2474 		}
2475 	} else {
2476 		memory_level->StrobeRatio = si_get_ddr3_mclk_frequency_ratio(memory_clock);
2477 		dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2478 	}
2479 
2480 	ret = ci_calculate_mclk_params(rdev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
2481 	if (ret)
2482 		return ret;
2483 
2484 	memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
2485 	memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
2486         memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
2487         memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
2488 
2489 	memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
2490 	memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
2491 	memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
2492 	memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
2493 	memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
2494 	memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
2495 	memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
2496 	memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
2497 	memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
2498 	memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
2499 	memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
2500 
2501 	return 0;
2502 }
2503 
2504 static int ci_populate_smc_acpi_level(struct radeon_device *rdev,
2505 				      SMU7_Discrete_DpmTable *table)
2506 {
2507 	struct ci_power_info *pi = ci_get_pi(rdev);
2508 	struct atom_clock_dividers dividers;
2509 	SMU7_Discrete_VoltageLevel voltage_level;
2510 	u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
2511 	u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
2512 	u32 dll_cntl = pi->clock_registers.dll_cntl;
2513 	u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2514 	int ret;
2515 
2516 	table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
2517 
2518 	if (pi->acpi_vddc)
2519 		table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
2520 	else
2521 		table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
2522 
2523 	table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
2524 
2525 	table->ACPILevel.SclkFrequency = rdev->clock.spll.reference_freq;
2526 
2527 	ret = radeon_atom_get_clock_dividers(rdev,
2528 					     COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
2529 					     table->ACPILevel.SclkFrequency, false, &dividers);
2530 	if (ret)
2531 		return ret;
2532 
2533 	table->ACPILevel.SclkDid = (u8)dividers.post_divider;
2534 	table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2535 	table->ACPILevel.DeepSleepDivId = 0;
2536 
2537 	spll_func_cntl &= ~SPLL_PWRON;
2538 	spll_func_cntl |= SPLL_RESET;
2539 
2540 	spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
2541 	spll_func_cntl_2 |= SCLK_MUX_SEL(4);
2542 
2543 	table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
2544 	table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
2545 	table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
2546 	table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
2547 	table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
2548 	table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
2549 	table->ACPILevel.CcPwrDynRm = 0;
2550 	table->ACPILevel.CcPwrDynRm1 = 0;
2551 
2552 	table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
2553 	table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
2554 	table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
2555 	table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
2556 	table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
2557 	table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
2558 	table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
2559 	table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
2560 	table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
2561 	table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
2562 	table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
2563 
2564 	table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
2565 	table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
2566 
2567 	if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2568 		if (pi->acpi_vddci)
2569 			table->MemoryACPILevel.MinVddci =
2570 				cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
2571 		else
2572 			table->MemoryACPILevel.MinVddci =
2573 				cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
2574 	}
2575 
2576 	if (ci_populate_mvdd_value(rdev, 0, &voltage_level))
2577 		table->MemoryACPILevel.MinMvdd = 0;
2578 	else
2579 		table->MemoryACPILevel.MinMvdd =
2580 			cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
2581 
2582 	mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
2583 	mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2584 
2585 	dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
2586 
2587 	table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
2588 	table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
2589 	table->MemoryACPILevel.MpllAdFuncCntl =
2590 		cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
2591 	table->MemoryACPILevel.MpllDqFuncCntl =
2592 		cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
2593 	table->MemoryACPILevel.MpllFuncCntl =
2594 		cpu_to_be32(pi->clock_registers.mpll_func_cntl);
2595 	table->MemoryACPILevel.MpllFuncCntl_1 =
2596 		cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
2597 	table->MemoryACPILevel.MpllFuncCntl_2 =
2598 		cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
2599 	table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
2600 	table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
2601 
2602 	table->MemoryACPILevel.EnabledForThrottle = 0;
2603 	table->MemoryACPILevel.EnabledForActivity = 0;
2604 	table->MemoryACPILevel.UpH = 0;
2605 	table->MemoryACPILevel.DownH = 100;
2606 	table->MemoryACPILevel.VoltageDownH = 0;
2607 	table->MemoryACPILevel.ActivityLevel =
2608 		cpu_to_be16((u16)pi->mclk_activity_target);
2609 
2610 	table->MemoryACPILevel.StutterEnable = false;
2611 	table->MemoryACPILevel.StrobeEnable = false;
2612 	table->MemoryACPILevel.EdcReadEnable = false;
2613 	table->MemoryACPILevel.EdcWriteEnable = false;
2614 	table->MemoryACPILevel.RttEnable = false;
2615 
2616 	return 0;
2617 }
2618 
2619 
2620 static int ci_enable_ulv(struct radeon_device *rdev, bool enable)
2621 {
2622 	struct ci_power_info *pi = ci_get_pi(rdev);
2623 	struct ci_ulv_parm *ulv = &pi->ulv;
2624 
2625 	if (ulv->supported) {
2626 		if (enable)
2627 			return (ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
2628 				0 : -EINVAL;
2629 		else
2630 			return (ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
2631 				0 : -EINVAL;
2632 	}
2633 
2634 	return 0;
2635 }
2636 
2637 static int ci_populate_ulv_level(struct radeon_device *rdev,
2638 				 SMU7_Discrete_Ulv *state)
2639 {
2640 	struct ci_power_info *pi = ci_get_pi(rdev);
2641 	u16 ulv_voltage = rdev->pm.dpm.backbias_response_time;
2642 
2643 	state->CcPwrDynRm = 0;
2644 	state->CcPwrDynRm1 = 0;
2645 
2646 	if (ulv_voltage == 0) {
2647 		pi->ulv.supported = false;
2648 		return 0;
2649 	}
2650 
2651 	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2652 		if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
2653 			state->VddcOffset = 0;
2654 		else
2655 			state->VddcOffset =
2656 				rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
2657 	} else {
2658 		if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
2659 			state->VddcOffsetVid = 0;
2660 		else
2661 			state->VddcOffsetVid = (u8)
2662 				((rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
2663 				 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
2664 	}
2665 	state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
2666 
2667 	state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
2668 	state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
2669 	state->VddcOffset = cpu_to_be16(state->VddcOffset);
2670 
2671 	return 0;
2672 }
2673 
2674 static int ci_calculate_sclk_params(struct radeon_device *rdev,
2675 				    u32 engine_clock,
2676 				    SMU7_Discrete_GraphicsLevel *sclk)
2677 {
2678 	struct ci_power_info *pi = ci_get_pi(rdev);
2679 	struct atom_clock_dividers dividers;
2680 	u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
2681 	u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
2682 	u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
2683 	u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
2684 	u32 reference_clock = rdev->clock.spll.reference_freq;
2685 	u32 reference_divider;
2686 	u32 fbdiv;
2687 	int ret;
2688 
2689 	ret = radeon_atom_get_clock_dividers(rdev,
2690 					     COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
2691 					     engine_clock, false, &dividers);
2692 	if (ret)
2693 		return ret;
2694 
2695 	reference_divider = 1 + dividers.ref_div;
2696 	fbdiv = dividers.fb_div & 0x3FFFFFF;
2697 
2698 	spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
2699 	spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
2700         spll_func_cntl_3 |= SPLL_DITHEN;
2701 
2702 	if (pi->caps_sclk_ss_support) {
2703 		struct radeon_atom_ss ss;
2704 		u32 vco_freq = engine_clock * dividers.post_div;
2705 
2706 		if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2707 						     ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
2708 			u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
2709 			u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
2710 
2711 			cg_spll_spread_spectrum &= ~CLK_S_MASK;
2712 			cg_spll_spread_spectrum |= CLK_S(clk_s);
2713 			cg_spll_spread_spectrum |= SSEN;
2714 
2715 			cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
2716 			cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
2717 		}
2718 	}
2719 
2720 	sclk->SclkFrequency = engine_clock;
2721 	sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
2722 	sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
2723 	sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
2724 	sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
2725 	sclk->SclkDid = (u8)dividers.post_divider;
2726 
2727 	return 0;
2728 }
2729 
2730 static int ci_populate_single_graphic_level(struct radeon_device *rdev,
2731 					    u32 engine_clock,
2732 					    u16 sclk_activity_level_t,
2733 					    SMU7_Discrete_GraphicsLevel *graphic_level)
2734 {
2735 	struct ci_power_info *pi = ci_get_pi(rdev);
2736 	int ret;
2737 
2738 	ret = ci_calculate_sclk_params(rdev, engine_clock, graphic_level);
2739 	if (ret)
2740 		return ret;
2741 
2742 	ret = ci_get_dependency_volt_by_clk(rdev,
2743 					    &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
2744 					    engine_clock, &graphic_level->MinVddc);
2745 	if (ret)
2746 		return ret;
2747 
2748 	graphic_level->SclkFrequency = engine_clock;
2749 
2750 	graphic_level->Flags =  0;
2751 	graphic_level->MinVddcPhases = 1;
2752 
2753 	if (pi->vddc_phase_shed_control)
2754 		ci_populate_phase_value_based_on_sclk(rdev,
2755 						      &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2756 						      engine_clock,
2757 						      &graphic_level->MinVddcPhases);
2758 
2759 	graphic_level->ActivityLevel = sclk_activity_level_t;
2760 
2761 	graphic_level->CcPwrDynRm = 0;
2762 	graphic_level->CcPwrDynRm1 = 0;
2763 	graphic_level->EnabledForActivity = 1;
2764 	graphic_level->EnabledForThrottle = 1;
2765 	graphic_level->UpH = 0;
2766 	graphic_level->DownH = 0;
2767 	graphic_level->VoltageDownH = 0;
2768 	graphic_level->PowerThrottle = 0;
2769 
2770 	if (pi->caps_sclk_ds)
2771 		graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(rdev,
2772 										   engine_clock,
2773 										   CISLAND_MINIMUM_ENGINE_CLOCK);
2774 
2775 	graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2776 
2777 	graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
2778         graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
2779 	graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
2780 	graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
2781 	graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
2782 	graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
2783 	graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
2784 	graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
2785 	graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
2786 	graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
2787 	graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
2788 
2789 	return 0;
2790 }
2791 
2792 static int ci_populate_all_graphic_levels(struct radeon_device *rdev)
2793 {
2794 	struct ci_power_info *pi = ci_get_pi(rdev);
2795 	struct ci_dpm_table *dpm_table = &pi->dpm_table;
2796 	u32 level_array_address = pi->dpm_table_start +
2797 		offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
2798 	u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
2799 		SMU7_MAX_LEVELS_GRAPHICS;
2800 	SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
2801 	u32 i, ret;
2802 
2803 	memset(levels, 0, level_array_size);
2804 
2805 	for (i = 0; i < dpm_table->sclk_table.count; i++) {
2806 		ret = ci_populate_single_graphic_level(rdev,
2807 						       dpm_table->sclk_table.dpm_levels[i].value,
2808 						       (u16)pi->activity_target[i],
2809 						       &pi->smc_state_table.GraphicsLevel[i]);
2810 		if (ret)
2811 			return ret;
2812 		if (i == (dpm_table->sclk_table.count - 1))
2813 			pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
2814 				PPSMC_DISPLAY_WATERMARK_HIGH;
2815 	}
2816 
2817 	pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
2818 	pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
2819 		ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
2820 
2821 	ret = ci_copy_bytes_to_smc(rdev, level_array_address,
2822 				   (u8 *)levels, level_array_size,
2823 				   pi->sram_end);
2824 	if (ret)
2825 		return ret;
2826 
2827 	return 0;
2828 }
2829 
2830 static int ci_populate_ulv_state(struct radeon_device *rdev,
2831 				 SMU7_Discrete_Ulv *ulv_level)
2832 {
2833 	return ci_populate_ulv_level(rdev, ulv_level);
2834 }
2835 
2836 static int ci_populate_all_memory_levels(struct radeon_device *rdev)
2837 {
2838 	struct ci_power_info *pi = ci_get_pi(rdev);
2839 	struct ci_dpm_table *dpm_table = &pi->dpm_table;
2840 	u32 level_array_address = pi->dpm_table_start +
2841 		offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
2842 	u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
2843 		SMU7_MAX_LEVELS_MEMORY;
2844 	SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
2845 	u32 i, ret;
2846 
2847 	memset(levels, 0, level_array_size);
2848 
2849 	for (i = 0; i < dpm_table->mclk_table.count; i++) {
2850 		if (dpm_table->mclk_table.dpm_levels[i].value == 0)
2851 			return -EINVAL;
2852 		ret = ci_populate_single_memory_level(rdev,
2853 						      dpm_table->mclk_table.dpm_levels[i].value,
2854 						      &pi->smc_state_table.MemoryLevel[i]);
2855 		if (ret)
2856 			return ret;
2857 	}
2858 
2859 	pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
2860 
2861 	pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
2862 	pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
2863 		ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
2864 
2865 	pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
2866 		PPSMC_DISPLAY_WATERMARK_HIGH;
2867 
2868 	ret = ci_copy_bytes_to_smc(rdev, level_array_address,
2869 				   (u8 *)levels, level_array_size,
2870 				   pi->sram_end);
2871 	if (ret)
2872 		return ret;
2873 
2874 	return 0;
2875 }
2876 
2877 static void ci_reset_single_dpm_table(struct radeon_device *rdev,
2878 				      struct ci_single_dpm_table* dpm_table,
2879 				      u32 count)
2880 {
2881 	u32 i;
2882 
2883 	dpm_table->count = count;
2884 	for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
2885 		dpm_table->dpm_levels[i].enabled = false;
2886 }
2887 
2888 static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
2889 				      u32 index, u32 pcie_gen, u32 pcie_lanes)
2890 {
2891 	dpm_table->dpm_levels[index].value = pcie_gen;
2892 	dpm_table->dpm_levels[index].param1 = pcie_lanes;
2893 	dpm_table->dpm_levels[index].enabled = true;
2894 }
2895 
2896 static int ci_setup_default_pcie_tables(struct radeon_device *rdev)
2897 {
2898 	struct ci_power_info *pi = ci_get_pi(rdev);
2899 
2900 	if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
2901 		return -EINVAL;
2902 
2903 	if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
2904 		pi->pcie_gen_powersaving = pi->pcie_gen_performance;
2905 		pi->pcie_lane_powersaving = pi->pcie_lane_performance;
2906 	} else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
2907 		pi->pcie_gen_performance = pi->pcie_gen_powersaving;
2908 		pi->pcie_lane_performance = pi->pcie_lane_powersaving;
2909 	}
2910 
2911 	ci_reset_single_dpm_table(rdev,
2912 				  &pi->dpm_table.pcie_speed_table,
2913 				  SMU7_MAX_LEVELS_LINK);
2914 
2915 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
2916 				  pi->pcie_gen_powersaving.min,
2917 				  pi->pcie_lane_powersaving.min);
2918 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
2919 				  pi->pcie_gen_performance.min,
2920 				  pi->pcie_lane_performance.min);
2921 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
2922 				  pi->pcie_gen_powersaving.min,
2923 				  pi->pcie_lane_powersaving.max);
2924 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
2925 				  pi->pcie_gen_performance.min,
2926 				  pi->pcie_lane_performance.max);
2927 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
2928 				  pi->pcie_gen_powersaving.max,
2929 				  pi->pcie_lane_powersaving.max);
2930 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
2931 				  pi->pcie_gen_performance.max,
2932 				  pi->pcie_lane_performance.max);
2933 
2934 	pi->dpm_table.pcie_speed_table.count = 6;
2935 
2936 	return 0;
2937 }
2938 
2939 static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
2940 {
2941 	struct ci_power_info *pi = ci_get_pi(rdev);
2942 	struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
2943 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
2944 	struct radeon_clock_voltage_dependency_table *allowed_mclk_table =
2945 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
2946 	struct radeon_cac_leakage_table *std_voltage_table =
2947 		&rdev->pm.dpm.dyn_state.cac_leakage_table;
2948 	u32 i;
2949 
2950 	if (allowed_sclk_vddc_table == NULL)
2951 		return -EINVAL;
2952 	if (allowed_sclk_vddc_table->count < 1)
2953 		return -EINVAL;
2954 	if (allowed_mclk_table == NULL)
2955 		return -EINVAL;
2956 	if (allowed_mclk_table->count < 1)
2957 		return -EINVAL;
2958 
2959 	memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
2960 
2961 	ci_reset_single_dpm_table(rdev,
2962 				  &pi->dpm_table.sclk_table,
2963 				  SMU7_MAX_LEVELS_GRAPHICS);
2964 	ci_reset_single_dpm_table(rdev,
2965 				  &pi->dpm_table.mclk_table,
2966 				  SMU7_MAX_LEVELS_MEMORY);
2967 	ci_reset_single_dpm_table(rdev,
2968 				  &pi->dpm_table.vddc_table,
2969 				  SMU7_MAX_LEVELS_VDDC);
2970 	ci_reset_single_dpm_table(rdev,
2971 				  &pi->dpm_table.vddci_table,
2972 				  SMU7_MAX_LEVELS_VDDCI);
2973 	ci_reset_single_dpm_table(rdev,
2974 				  &pi->dpm_table.mvdd_table,
2975 				  SMU7_MAX_LEVELS_MVDD);
2976 
2977 	pi->dpm_table.sclk_table.count = 0;
2978 	for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
2979 		if ((i == 0) ||
2980 		    (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
2981 		     allowed_sclk_vddc_table->entries[i].clk)) {
2982 			pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
2983 				allowed_sclk_vddc_table->entries[i].clk;
2984 			pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled = true;
2985 			pi->dpm_table.sclk_table.count++;
2986 		}
2987 	}
2988 
2989 	pi->dpm_table.mclk_table.count = 0;
2990 	for (i = 0; i < allowed_mclk_table->count; i++) {
2991 		if ((i==0) ||
2992 		    (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
2993 		     allowed_mclk_table->entries[i].clk)) {
2994 			pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
2995 				allowed_mclk_table->entries[i].clk;
2996 			pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled = true;
2997 			pi->dpm_table.mclk_table.count++;
2998 		}
2999 	}
3000 
3001 	for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3002 		pi->dpm_table.vddc_table.dpm_levels[i].value =
3003 			allowed_sclk_vddc_table->entries[i].v;
3004 		pi->dpm_table.vddc_table.dpm_levels[i].param1 =
3005 			std_voltage_table->entries[i].leakage;
3006 		pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
3007 	}
3008 	pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
3009 
3010 	allowed_mclk_table = &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
3011 	if (allowed_mclk_table) {
3012 		for (i = 0; i < allowed_mclk_table->count; i++) {
3013 			pi->dpm_table.vddci_table.dpm_levels[i].value =
3014 				allowed_mclk_table->entries[i].v;
3015 			pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
3016 		}
3017 		pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
3018 	}
3019 
3020 	allowed_mclk_table = &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
3021 	if (allowed_mclk_table) {
3022 		for (i = 0; i < allowed_mclk_table->count; i++) {
3023 			pi->dpm_table.mvdd_table.dpm_levels[i].value =
3024 				allowed_mclk_table->entries[i].v;
3025 			pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
3026 		}
3027 		pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
3028 	}
3029 
3030 	ci_setup_default_pcie_tables(rdev);
3031 
3032 	return 0;
3033 }
3034 
3035 static int ci_find_boot_level(struct ci_single_dpm_table *table,
3036 			      u32 value, u32 *boot_level)
3037 {
3038 	u32 i;
3039 	int ret = -EINVAL;
3040 
3041 	for(i = 0; i < table->count; i++) {
3042 		if (value == table->dpm_levels[i].value) {
3043 			*boot_level = i;
3044 			ret = 0;
3045 		}
3046 	}
3047 
3048 	return ret;
3049 }
3050 
3051 static int ci_init_smc_table(struct radeon_device *rdev)
3052 {
3053 	struct ci_power_info *pi = ci_get_pi(rdev);
3054 	struct ci_ulv_parm *ulv = &pi->ulv;
3055 	struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps;
3056 	SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
3057 	int ret;
3058 
3059 	ret = ci_setup_default_dpm_tables(rdev);
3060 	if (ret)
3061 		return ret;
3062 
3063 	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
3064 		ci_populate_smc_voltage_tables(rdev, table);
3065 
3066 	ci_init_fps_limits(rdev);
3067 
3068 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
3069 		table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
3070 
3071 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3072 		table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3073 
3074 	if (pi->mem_gddr5)
3075 		table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3076 
3077 	if (ulv->supported) {
3078 		ret = ci_populate_ulv_state(rdev, &pi->smc_state_table.Ulv);
3079 		if (ret)
3080 			return ret;
3081 		WREG32_SMC(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3082 	}
3083 
3084 	ret = ci_populate_all_graphic_levels(rdev);
3085 	if (ret)
3086 		return ret;
3087 
3088 	ret = ci_populate_all_memory_levels(rdev);
3089 	if (ret)
3090 		return ret;
3091 
3092 	ci_populate_smc_link_level(rdev, table);
3093 
3094 	ret = ci_populate_smc_acpi_level(rdev, table);
3095 	if (ret)
3096 		return ret;
3097 
3098 	ret = ci_populate_smc_vce_level(rdev, table);
3099 	if (ret)
3100 		return ret;
3101 
3102 	ret = ci_populate_smc_acp_level(rdev, table);
3103 	if (ret)
3104 		return ret;
3105 
3106 	ret = ci_populate_smc_samu_level(rdev, table);
3107 	if (ret)
3108 		return ret;
3109 
3110 	ret = ci_do_program_memory_timing_parameters(rdev);
3111 	if (ret)
3112 		return ret;
3113 
3114 	ret = ci_populate_smc_uvd_level(rdev, table);
3115 	if (ret)
3116 		return ret;
3117 
3118 	table->UvdBootLevel  = 0;
3119 	table->VceBootLevel  = 0;
3120 	table->AcpBootLevel  = 0;
3121 	table->SamuBootLevel  = 0;
3122 	table->GraphicsBootLevel  = 0;
3123 	table->MemoryBootLevel  = 0;
3124 
3125 	ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3126 				 pi->vbios_boot_state.sclk_bootup_value,
3127 				 (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3128 
3129 	ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3130 				 pi->vbios_boot_state.mclk_bootup_value,
3131 				 (u32 *)&pi->smc_state_table.MemoryBootLevel);
3132 
3133 	table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3134 	table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3135 	table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3136 
3137 	ci_populate_smc_initial_state(rdev, radeon_boot_state);
3138 
3139 	ret = ci_populate_bapm_parameters_in_dpm_table(rdev);
3140 	if (ret)
3141 		return ret;
3142 
3143 	table->UVDInterval = 1;
3144 	table->VCEInterval = 1;
3145 	table->ACPInterval = 1;
3146 	table->SAMUInterval = 1;
3147 	table->GraphicsVoltageChangeEnable = 1;
3148 	table->GraphicsThermThrottleEnable = 1;
3149 	table->GraphicsInterval = 1;
3150 	table->VoltageInterval = 1;
3151 	table->ThermalInterval = 1;
3152 	table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3153 					     CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3154 	table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3155 					    CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3156 	table->MemoryVoltageChangeEnable = 1;
3157 	table->MemoryInterval = 1;
3158 	table->VoltageResponseTime = 0;
3159 	table->VddcVddciDelta = 4000;
3160 	table->PhaseResponseTime = 0;
3161 	table->MemoryThermThrottleEnable = 1;
3162 	table->PCIeBootLinkLevel = 0;
3163 	table->PCIeGenInterval = 1;
3164 	if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3165 		table->SVI2Enable  = 1;
3166 	else
3167 		table->SVI2Enable  = 0;
3168 
3169 	table->ThermGpio = 17;
3170 	table->SclkStepSize = 0x4000;
3171 
3172 	table->SystemFlags = cpu_to_be32(table->SystemFlags);
3173 	table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3174 	table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3175 	table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3176 	table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3177 	table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3178 	table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3179 	table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3180 	table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3181 	table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3182 	table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3183 	table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3184 	table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3185 	table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3186 
3187 	ret = ci_copy_bytes_to_smc(rdev,
3188 				   pi->dpm_table_start +
3189 				   offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3190 				   (u8 *)&table->SystemFlags,
3191 				   sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3192 				   pi->sram_end);
3193 	if (ret)
3194 		return ret;
3195 
3196 	return 0;
3197 }
3198 
3199 static void ci_trim_single_dpm_states(struct radeon_device *rdev,
3200 				      struct ci_single_dpm_table *dpm_table,
3201 				      u32 low_limit, u32 high_limit)
3202 {
3203 	u32 i;
3204 
3205 	for (i = 0; i < dpm_table->count; i++) {
3206 		if ((dpm_table->dpm_levels[i].value < low_limit) ||
3207 		    (dpm_table->dpm_levels[i].value > high_limit))
3208 			dpm_table->dpm_levels[i].enabled = false;
3209 		else
3210 			dpm_table->dpm_levels[i].enabled = true;
3211 	}
3212 }
3213 
3214 static void ci_trim_pcie_dpm_states(struct radeon_device *rdev,
3215 				    u32 speed_low, u32 lanes_low,
3216 				    u32 speed_high, u32 lanes_high)
3217 {
3218 	struct ci_power_info *pi = ci_get_pi(rdev);
3219 	struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3220 	u32 i, j;
3221 
3222 	for (i = 0; i < pcie_table->count; i++) {
3223 		if ((pcie_table->dpm_levels[i].value < speed_low) ||
3224 		    (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3225 		    (pcie_table->dpm_levels[i].value > speed_high) ||
3226 		    (pcie_table->dpm_levels[i].param1 > lanes_high))
3227 			pcie_table->dpm_levels[i].enabled = false;
3228 		else
3229 			pcie_table->dpm_levels[i].enabled = true;
3230 	}
3231 
3232 	for (i = 0; i < pcie_table->count; i++) {
3233 		if (pcie_table->dpm_levels[i].enabled) {
3234 			for (j = i + 1; j < pcie_table->count; j++) {
3235 				if (pcie_table->dpm_levels[j].enabled) {
3236 					if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3237 					    (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3238 						pcie_table->dpm_levels[j].enabled = false;
3239 				}
3240 			}
3241 		}
3242 	}
3243 }
3244 
3245 static int ci_trim_dpm_states(struct radeon_device *rdev,
3246 			      struct radeon_ps *radeon_state)
3247 {
3248 	struct ci_ps *state = ci_get_ps(radeon_state);
3249 	struct ci_power_info *pi = ci_get_pi(rdev);
3250 	u32 high_limit_count;
3251 
3252 	if (state->performance_level_count < 1)
3253 		return -EINVAL;
3254 
3255 	if (state->performance_level_count == 1)
3256 		high_limit_count = 0;
3257 	else
3258 		high_limit_count = 1;
3259 
3260 	ci_trim_single_dpm_states(rdev,
3261 				  &pi->dpm_table.sclk_table,
3262 				  state->performance_levels[0].sclk,
3263 				  state->performance_levels[high_limit_count].sclk);
3264 
3265 	ci_trim_single_dpm_states(rdev,
3266 				  &pi->dpm_table.mclk_table,
3267 				  state->performance_levels[0].mclk,
3268 				  state->performance_levels[high_limit_count].mclk);
3269 
3270 	ci_trim_pcie_dpm_states(rdev,
3271 				state->performance_levels[0].pcie_gen,
3272 				state->performance_levels[0].pcie_lane,
3273 				state->performance_levels[high_limit_count].pcie_gen,
3274 				state->performance_levels[high_limit_count].pcie_lane);
3275 
3276 	return 0;
3277 }
3278 
3279 static int ci_apply_disp_minimum_voltage_request(struct radeon_device *rdev)
3280 {
3281 	struct radeon_clock_voltage_dependency_table *disp_voltage_table =
3282 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3283 	struct radeon_clock_voltage_dependency_table *vddc_table =
3284 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3285 	u32 requested_voltage = 0;
3286 	u32 i;
3287 
3288 	if (disp_voltage_table == NULL)
3289 		return -EINVAL;
3290 	if (!disp_voltage_table->count)
3291 		return -EINVAL;
3292 
3293 	for (i = 0; i < disp_voltage_table->count; i++) {
3294 		if (rdev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3295 			requested_voltage = disp_voltage_table->entries[i].v;
3296 	}
3297 
3298 	for (i = 0; i < vddc_table->count; i++) {
3299 		if (requested_voltage <= vddc_table->entries[i].v) {
3300 			requested_voltage = vddc_table->entries[i].v;
3301 			return (ci_send_msg_to_smc_with_parameter(rdev,
3302 								  PPSMC_MSG_VddC_Request,
3303 								  requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3304 				0 : -EINVAL;
3305 		}
3306 	}
3307 
3308 	return -EINVAL;
3309 }
3310 
3311 static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
3312 {
3313 	struct ci_power_info *pi = ci_get_pi(rdev);
3314 	PPSMC_Result result;
3315 
3316 	if (!pi->sclk_dpm_key_disabled) {
3317 		if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3318 			result = ci_send_msg_to_smc_with_parameter(rdev,
3319 								   PPSMC_MSG_SCLKDPM_SetEnabledMask,
3320 								   pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3321 			if (result != PPSMC_Result_OK)
3322 				return -EINVAL;
3323 		}
3324 	}
3325 
3326 	if (!pi->mclk_dpm_key_disabled) {
3327 		if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3328 			result = ci_send_msg_to_smc_with_parameter(rdev,
3329 								   PPSMC_MSG_MCLKDPM_SetEnabledMask,
3330 								   pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3331 			if (result != PPSMC_Result_OK)
3332 				return -EINVAL;
3333 		}
3334 	}
3335 
3336 	if (!pi->pcie_dpm_key_disabled) {
3337 		if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3338 			result = ci_send_msg_to_smc_with_parameter(rdev,
3339 								   PPSMC_MSG_PCIeDPM_SetEnabledMask,
3340 								   pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3341 			if (result != PPSMC_Result_OK)
3342 				return -EINVAL;
3343 		}
3344 	}
3345 
3346 	ci_apply_disp_minimum_voltage_request(rdev);
3347 
3348 	return 0;
3349 }
3350 
3351 static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
3352 						   struct radeon_ps *radeon_state)
3353 {
3354 	struct ci_power_info *pi = ci_get_pi(rdev);
3355 	struct ci_ps *state = ci_get_ps(radeon_state);
3356 	struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
3357 	u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3358 	struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
3359 	u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3360 	u32 i;
3361 
3362 	pi->need_update_smu7_dpm_table = 0;
3363 
3364 	for (i = 0; i < sclk_table->count; i++) {
3365 		if (sclk == sclk_table->dpm_levels[i].value)
3366 			break;
3367 	}
3368 
3369 	if (i >= sclk_table->count) {
3370 		pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3371 	} else {
3372 		/* XXX check display min clock requirements */
3373 		if (0 != CISLAND_MINIMUM_ENGINE_CLOCK)
3374 			pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3375 	}
3376 
3377 	for (i = 0; i < mclk_table->count; i++) {
3378 		if (mclk == mclk_table->dpm_levels[i].value)
3379 			break;
3380 	}
3381 
3382 	if (i >= mclk_table->count)
3383 		pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3384 
3385 	if (rdev->pm.dpm.current_active_crtc_count !=
3386 	    rdev->pm.dpm.new_active_crtc_count)
3387 		pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3388 }
3389 
3390 static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct radeon_device *rdev,
3391 						       struct radeon_ps *radeon_state)
3392 {
3393 	struct ci_power_info *pi = ci_get_pi(rdev);
3394 	struct ci_ps *state = ci_get_ps(radeon_state);
3395 	u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3396 	u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3397 	struct ci_dpm_table *dpm_table = &pi->dpm_table;
3398 	int ret;
3399 
3400 	if (!pi->need_update_smu7_dpm_table)
3401 		return 0;
3402 
3403 	if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
3404 		dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
3405 
3406 	if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
3407 		dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
3408 
3409 	if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
3410 		ret = ci_populate_all_graphic_levels(rdev);
3411 		if (ret)
3412 			return ret;
3413 	}
3414 
3415 	if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
3416 		ret = ci_populate_all_memory_levels(rdev);
3417 		if (ret)
3418 			return ret;
3419 	}
3420 
3421 	return 0;
3422 }
3423 
3424 static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
3425 {
3426 	struct ci_power_info *pi = ci_get_pi(rdev);
3427 	const struct radeon_clock_and_voltage_limits *max_limits;
3428 	int i;
3429 
3430 	if (rdev->pm.dpm.ac_power)
3431 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3432 	else
3433 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3434 
3435 	if (enable) {
3436 		pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
3437 
3438 		for (i = rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3439 			if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3440 				pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
3441 
3442 				if (!pi->caps_uvd_dpm)
3443 					break;
3444 			}
3445 		}
3446 
3447 		ci_send_msg_to_smc_with_parameter(rdev,
3448 						  PPSMC_MSG_UVDDPM_SetEnabledMask,
3449 						  pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
3450 
3451 		if (pi->last_mclk_dpm_enable_mask & 0x1) {
3452 			pi->uvd_enabled = true;
3453 			pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
3454 			ci_send_msg_to_smc_with_parameter(rdev,
3455 							  PPSMC_MSG_MCLKDPM_SetEnabledMask,
3456 							  pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3457 		}
3458 	} else {
3459 		if (pi->last_mclk_dpm_enable_mask & 0x1) {
3460 			pi->uvd_enabled = false;
3461 			pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
3462 			ci_send_msg_to_smc_with_parameter(rdev,
3463 							  PPSMC_MSG_MCLKDPM_SetEnabledMask,
3464 							  pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3465 		}
3466 	}
3467 
3468 	return (ci_send_msg_to_smc(rdev, enable ?
3469 				   PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
3470 		0 : -EINVAL;
3471 }
3472 
3473 static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable)
3474 {
3475 	struct ci_power_info *pi = ci_get_pi(rdev);
3476 	const struct radeon_clock_and_voltage_limits *max_limits;
3477 	int i;
3478 
3479 	if (rdev->pm.dpm.ac_power)
3480 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3481 	else
3482 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3483 
3484 	if (enable) {
3485 		pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
3486 		for (i = rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3487 			if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3488 				pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
3489 
3490 				if (!pi->caps_vce_dpm)
3491 					break;
3492 			}
3493 		}
3494 
3495 		ci_send_msg_to_smc_with_parameter(rdev,
3496 						  PPSMC_MSG_VCEDPM_SetEnabledMask,
3497 						  pi->dpm_level_enable_mask.vce_dpm_enable_mask);
3498 	}
3499 
3500 	return (ci_send_msg_to_smc(rdev, enable ?
3501 				   PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
3502 		0 : -EINVAL;
3503 }
3504 
3505 #if 0
3506 static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable)
3507 {
3508 	struct ci_power_info *pi = ci_get_pi(rdev);
3509 	const struct radeon_clock_and_voltage_limits *max_limits;
3510 	int i;
3511 
3512 	if (rdev->pm.dpm.ac_power)
3513 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3514 	else
3515 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3516 
3517 	if (enable) {
3518 		pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
3519 		for (i = rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3520 			if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3521 				pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
3522 
3523 				if (!pi->caps_samu_dpm)
3524 					break;
3525 			}
3526 		}
3527 
3528 		ci_send_msg_to_smc_with_parameter(rdev,
3529 						  PPSMC_MSG_SAMUDPM_SetEnabledMask,
3530 						  pi->dpm_level_enable_mask.samu_dpm_enable_mask);
3531 	}
3532 	return (ci_send_msg_to_smc(rdev, enable ?
3533 				   PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
3534 		0 : -EINVAL;
3535 }
3536 
3537 static int ci_enable_acp_dpm(struct radeon_device *rdev, bool enable)
3538 {
3539 	struct ci_power_info *pi = ci_get_pi(rdev);
3540 	const struct radeon_clock_and_voltage_limits *max_limits;
3541 	int i;
3542 
3543 	if (rdev->pm.dpm.ac_power)
3544 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3545 	else
3546 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3547 
3548 	if (enable) {
3549 		pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
3550 		for (i = rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3551 			if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3552 				pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
3553 
3554 				if (!pi->caps_acp_dpm)
3555 					break;
3556 			}
3557 		}
3558 
3559 		ci_send_msg_to_smc_with_parameter(rdev,
3560 						  PPSMC_MSG_ACPDPM_SetEnabledMask,
3561 						  pi->dpm_level_enable_mask.acp_dpm_enable_mask);
3562 	}
3563 
3564 	return (ci_send_msg_to_smc(rdev, enable ?
3565 				   PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
3566 		0 : -EINVAL;
3567 }
3568 #endif
3569 
3570 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate)
3571 {
3572 	struct ci_power_info *pi = ci_get_pi(rdev);
3573 	u32 tmp;
3574 
3575 	if (!gate) {
3576 		if (pi->caps_uvd_dpm ||
3577 		    (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
3578 			pi->smc_state_table.UvdBootLevel = 0;
3579 		else
3580 			pi->smc_state_table.UvdBootLevel =
3581 				rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
3582 
3583 		tmp = RREG32_SMC(DPM_TABLE_475);
3584 		tmp &= ~UvdBootLevel_MASK;
3585 		tmp |= UvdBootLevel(pi->smc_state_table.UvdBootLevel);
3586 		WREG32_SMC(DPM_TABLE_475, tmp);
3587 	}
3588 
3589 	return ci_enable_uvd_dpm(rdev, !gate);
3590 }
3591 
3592 static u8 ci_get_vce_boot_level(struct radeon_device *rdev)
3593 {
3594 	u8 i;
3595 	u32 min_evclk = 30000; /* ??? */
3596 	struct radeon_vce_clock_voltage_dependency_table *table =
3597 		&rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
3598 
3599 	for (i = 0; i < table->count; i++) {
3600 		if (table->entries[i].evclk >= min_evclk)
3601 			return i;
3602 	}
3603 
3604 	return table->count - 1;
3605 }
3606 
3607 static int ci_update_vce_dpm(struct radeon_device *rdev,
3608 			     struct radeon_ps *radeon_new_state,
3609 			     struct radeon_ps *radeon_current_state)
3610 {
3611 	struct ci_power_info *pi = ci_get_pi(rdev);
3612 	int ret = 0;
3613 	u32 tmp;
3614 
3615 	if (radeon_current_state->evclk != radeon_new_state->evclk) {
3616 		if (radeon_new_state->evclk) {
3617 			/* turn the clocks on when encoding */
3618 			cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false);
3619 
3620 			pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev);
3621 			tmp = RREG32_SMC(DPM_TABLE_475);
3622 			tmp &= ~VceBootLevel_MASK;
3623 			tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel);
3624 			WREG32_SMC(DPM_TABLE_475, tmp);
3625 
3626 			ret = ci_enable_vce_dpm(rdev, true);
3627 		} else {
3628 			/* turn the clocks off when not encoding */
3629 			cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true);
3630 
3631 			ret = ci_enable_vce_dpm(rdev, false);
3632 		}
3633 	}
3634 	return ret;
3635 }
3636 
3637 #if 0
3638 static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate)
3639 {
3640 	return ci_enable_samu_dpm(rdev, gate);
3641 }
3642 
3643 static int ci_update_acp_dpm(struct radeon_device *rdev, bool gate)
3644 {
3645 	struct ci_power_info *pi = ci_get_pi(rdev);
3646 	u32 tmp;
3647 
3648 	if (!gate) {
3649 		pi->smc_state_table.AcpBootLevel = 0;
3650 
3651 		tmp = RREG32_SMC(DPM_TABLE_475);
3652 		tmp &= ~AcpBootLevel_MASK;
3653 		tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
3654 		WREG32_SMC(DPM_TABLE_475, tmp);
3655 	}
3656 
3657 	return ci_enable_acp_dpm(rdev, !gate);
3658 }
3659 #endif
3660 
3661 static int ci_generate_dpm_level_enable_mask(struct radeon_device *rdev,
3662 					     struct radeon_ps *radeon_state)
3663 {
3664 	struct ci_power_info *pi = ci_get_pi(rdev);
3665 	int ret;
3666 
3667 	ret = ci_trim_dpm_states(rdev, radeon_state);
3668 	if (ret)
3669 		return ret;
3670 
3671 	pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3672 		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
3673 	pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3674 		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
3675 	pi->last_mclk_dpm_enable_mask =
3676 		pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
3677 	if (pi->uvd_enabled) {
3678 		if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
3679 			pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
3680 	}
3681 	pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
3682 		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
3683 
3684 	return 0;
3685 }
3686 
3687 static u32 ci_get_lowest_enabled_level(struct radeon_device *rdev,
3688 				       u32 level_mask)
3689 {
3690 	u32 level = 0;
3691 
3692 	while ((level_mask & (1 << level)) == 0)
3693 		level++;
3694 
3695 	return level;
3696 }
3697 
3698 
3699 int ci_dpm_force_performance_level(struct radeon_device *rdev,
3700 				   enum radeon_dpm_forced_level level)
3701 {
3702 	struct ci_power_info *pi = ci_get_pi(rdev);
3703 	PPSMC_Result smc_result;
3704 	u32 tmp, levels, i;
3705 	int ret;
3706 
3707 	if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
3708 		if ((!pi->sclk_dpm_key_disabled) &&
3709 		    pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3710 			levels = 0;
3711 			tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
3712 			while (tmp >>= 1)
3713 				levels++;
3714 			if (levels) {
3715 				ret = ci_dpm_force_state_sclk(rdev, levels);
3716 				if (ret)
3717 					return ret;
3718 				for (i = 0; i < rdev->usec_timeout; i++) {
3719 					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3720 					       CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
3721 					if (tmp == levels)
3722 						break;
3723 					udelay(1);
3724 				}
3725 			}
3726 		}
3727 		if ((!pi->mclk_dpm_key_disabled) &&
3728 		    pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3729 			levels = 0;
3730 			tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
3731 			while (tmp >>= 1)
3732 				levels++;
3733 			if (levels) {
3734 				ret = ci_dpm_force_state_mclk(rdev, levels);
3735 				if (ret)
3736 					return ret;
3737 				for (i = 0; i < rdev->usec_timeout; i++) {
3738 					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3739 					       CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
3740 					if (tmp == levels)
3741 						break;
3742 					udelay(1);
3743 				}
3744 			}
3745 		}
3746 		if ((!pi->pcie_dpm_key_disabled) &&
3747 		    pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3748 			levels = 0;
3749 			tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
3750 			while (tmp >>= 1)
3751 				levels++;
3752 			if (levels) {
3753 				ret = ci_dpm_force_state_pcie(rdev, level);
3754 				if (ret)
3755 					return ret;
3756 				for (i = 0; i < rdev->usec_timeout; i++) {
3757 					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
3758 					       CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
3759 					if (tmp == levels)
3760 						break;
3761 					udelay(1);
3762 				}
3763 			}
3764 		}
3765 	} else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
3766 		if ((!pi->sclk_dpm_key_disabled) &&
3767 		    pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3768 			levels = ci_get_lowest_enabled_level(rdev,
3769 							     pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3770 			ret = ci_dpm_force_state_sclk(rdev, levels);
3771 			if (ret)
3772 				return ret;
3773 			for (i = 0; i < rdev->usec_timeout; i++) {
3774 				tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3775 				       CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
3776 				if (tmp == levels)
3777 					break;
3778 				udelay(1);
3779 			}
3780 		}
3781 		if ((!pi->mclk_dpm_key_disabled) &&
3782 		    pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3783 			levels = ci_get_lowest_enabled_level(rdev,
3784 							     pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3785 			ret = ci_dpm_force_state_mclk(rdev, levels);
3786 			if (ret)
3787 				return ret;
3788 			for (i = 0; i < rdev->usec_timeout; i++) {
3789 				tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3790 				       CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
3791 				if (tmp == levels)
3792 					break;
3793 				udelay(1);
3794 			}
3795 		}
3796 		if ((!pi->pcie_dpm_key_disabled) &&
3797 		    pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3798 			levels = ci_get_lowest_enabled_level(rdev,
3799 							     pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3800 			ret = ci_dpm_force_state_pcie(rdev, levels);
3801 			if (ret)
3802 				return ret;
3803 			for (i = 0; i < rdev->usec_timeout; i++) {
3804 				tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
3805 				       CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
3806 				if (tmp == levels)
3807 					break;
3808 				udelay(1);
3809 			}
3810 		}
3811 	} else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
3812 		if (!pi->sclk_dpm_key_disabled) {
3813 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel);
3814 			if (smc_result != PPSMC_Result_OK)
3815 				return -EINVAL;
3816 		}
3817 		if (!pi->mclk_dpm_key_disabled) {
3818 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_NoForcedLevel);
3819 			if (smc_result != PPSMC_Result_OK)
3820 				return -EINVAL;
3821 		}
3822 		if (!pi->pcie_dpm_key_disabled) {
3823 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_UnForceLevel);
3824 			if (smc_result != PPSMC_Result_OK)
3825 				return -EINVAL;
3826 		}
3827 	}
3828 
3829 	rdev->pm.dpm.forced_level = level;
3830 
3831 	return 0;
3832 }
3833 
3834 static int ci_set_mc_special_registers(struct radeon_device *rdev,
3835 				       struct ci_mc_reg_table *table)
3836 {
3837 	struct ci_power_info *pi = ci_get_pi(rdev);
3838 	u8 i, j, k;
3839 	u32 temp_reg;
3840 
3841 	for (i = 0, j = table->last; i < table->last; i++) {
3842 		if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3843 			return -EINVAL;
3844 		switch(table->mc_reg_address[i].s1 << 2) {
3845 		case MC_SEQ_MISC1:
3846 			temp_reg = RREG32(MC_PMG_CMD_EMRS);
3847 			table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
3848 			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
3849 			for (k = 0; k < table->num_entries; k++) {
3850 				table->mc_reg_table_entry[k].mc_data[j] =
3851 					((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
3852 			}
3853 			j++;
3854 			if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3855 				return -EINVAL;
3856 
3857 			temp_reg = RREG32(MC_PMG_CMD_MRS);
3858 			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
3859 			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
3860 			for (k = 0; k < table->num_entries; k++) {
3861 				table->mc_reg_table_entry[k].mc_data[j] =
3862 					(temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3863 				if (!pi->mem_gddr5)
3864 					table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
3865 			}
3866 			j++;
3867 			if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3868 				return -EINVAL;
3869 
3870 			if (!pi->mem_gddr5) {
3871 				table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2;
3872 				table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2;
3873 				for (k = 0; k < table->num_entries; k++) {
3874 					table->mc_reg_table_entry[k].mc_data[j] =
3875 						(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
3876 				}
3877 				j++;
3878 				if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3879 					return -EINVAL;
3880 			}
3881 			break;
3882 		case MC_SEQ_RESERVE_M:
3883 			temp_reg = RREG32(MC_PMG_CMD_MRS1);
3884 			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
3885 			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
3886 			for (k = 0; k < table->num_entries; k++) {
3887 				table->mc_reg_table_entry[k].mc_data[j] =
3888 					(temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3889 			}
3890 			j++;
3891 			if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3892 				return -EINVAL;
3893 			break;
3894 		default:
3895 			break;
3896 		}
3897 
3898 	}
3899 
3900 	table->last = j;
3901 
3902 	return 0;
3903 }
3904 
3905 static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
3906 {
3907 	bool result = true;
3908 
3909 	switch(in_reg) {
3910 	case MC_SEQ_RAS_TIMING >> 2:
3911 		*out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
3912 		break;
3913 	case MC_SEQ_DLL_STBY >> 2:
3914 		*out_reg = MC_SEQ_DLL_STBY_LP >> 2;
3915 		break;
3916 	case MC_SEQ_G5PDX_CMD0 >> 2:
3917 		*out_reg = MC_SEQ_G5PDX_CMD0_LP >> 2;
3918 		break;
3919 	case MC_SEQ_G5PDX_CMD1 >> 2:
3920 		*out_reg = MC_SEQ_G5PDX_CMD1_LP >> 2;
3921 		break;
3922 	case MC_SEQ_G5PDX_CTRL >> 2:
3923 		*out_reg = MC_SEQ_G5PDX_CTRL_LP >> 2;
3924 		break;
3925 	case MC_SEQ_CAS_TIMING >> 2:
3926 		*out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
3927             break;
3928 	case MC_SEQ_MISC_TIMING >> 2:
3929 		*out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
3930 		break;
3931 	case MC_SEQ_MISC_TIMING2 >> 2:
3932 		*out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
3933 		break;
3934 	case MC_SEQ_PMG_DVS_CMD >> 2:
3935 		*out_reg = MC_SEQ_PMG_DVS_CMD_LP >> 2;
3936 		break;
3937 	case MC_SEQ_PMG_DVS_CTL >> 2:
3938 		*out_reg = MC_SEQ_PMG_DVS_CTL_LP >> 2;
3939 		break;
3940 	case MC_SEQ_RD_CTL_D0 >> 2:
3941 		*out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
3942 		break;
3943 	case MC_SEQ_RD_CTL_D1 >> 2:
3944 		*out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
3945 		break;
3946 	case MC_SEQ_WR_CTL_D0 >> 2:
3947 		*out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
3948 		break;
3949 	case MC_SEQ_WR_CTL_D1 >> 2:
3950 		*out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
3951 		break;
3952 	case MC_PMG_CMD_EMRS >> 2:
3953 		*out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
3954 		break;
3955 	case MC_PMG_CMD_MRS >> 2:
3956 		*out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
3957 		break;
3958 	case MC_PMG_CMD_MRS1 >> 2:
3959 		*out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
3960 		break;
3961 	case MC_SEQ_PMG_TIMING >> 2:
3962 		*out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
3963 		break;
3964 	case MC_PMG_CMD_MRS2 >> 2:
3965 		*out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
3966 		break;
3967 	case MC_SEQ_WR_CTL_2 >> 2:
3968 		*out_reg = MC_SEQ_WR_CTL_2_LP >> 2;
3969 		break;
3970 	default:
3971 		result = false;
3972 		break;
3973 	}
3974 
3975 	return result;
3976 }
3977 
3978 static void ci_set_valid_flag(struct ci_mc_reg_table *table)
3979 {
3980 	u8 i, j;
3981 
3982 	for (i = 0; i < table->last; i++) {
3983 		for (j = 1; j < table->num_entries; j++) {
3984 			if (table->mc_reg_table_entry[j-1].mc_data[i] !=
3985 			    table->mc_reg_table_entry[j].mc_data[i]) {
3986 				table->valid_flag |= 1 << i;
3987 				break;
3988 			}
3989 		}
3990 	}
3991 }
3992 
3993 static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
3994 {
3995 	u32 i;
3996 	u16 address;
3997 
3998 	for (i = 0; i < table->last; i++) {
3999 		table->mc_reg_address[i].s0 =
4000 			ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
4001 			address : table->mc_reg_address[i].s1;
4002 	}
4003 }
4004 
4005 static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
4006 				      struct ci_mc_reg_table *ci_table)
4007 {
4008 	u8 i, j;
4009 
4010 	if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4011 		return -EINVAL;
4012 	if (table->num_entries > MAX_AC_TIMING_ENTRIES)
4013 		return -EINVAL;
4014 
4015 	for (i = 0; i < table->last; i++)
4016 		ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
4017 
4018 	ci_table->last = table->last;
4019 
4020 	for (i = 0; i < table->num_entries; i++) {
4021 		ci_table->mc_reg_table_entry[i].mclk_max =
4022 			table->mc_reg_table_entry[i].mclk_max;
4023 		for (j = 0; j < table->last; j++)
4024 			ci_table->mc_reg_table_entry[i].mc_data[j] =
4025 				table->mc_reg_table_entry[i].mc_data[j];
4026 	}
4027 	ci_table->num_entries = table->num_entries;
4028 
4029 	return 0;
4030 }
4031 
4032 static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
4033 {
4034 	struct ci_power_info *pi = ci_get_pi(rdev);
4035 	struct atom_mc_reg_table *table;
4036 	struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
4037 	u8 module_index = rv770_get_memory_module_index(rdev);
4038 	int ret;
4039 
4040 	table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
4041 	if (!table)
4042 		return -ENOMEM;
4043 
4044 	WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
4045 	WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
4046 	WREG32(MC_SEQ_DLL_STBY_LP, RREG32(MC_SEQ_DLL_STBY));
4047 	WREG32(MC_SEQ_G5PDX_CMD0_LP, RREG32(MC_SEQ_G5PDX_CMD0));
4048 	WREG32(MC_SEQ_G5PDX_CMD1_LP, RREG32(MC_SEQ_G5PDX_CMD1));
4049 	WREG32(MC_SEQ_G5PDX_CTRL_LP, RREG32(MC_SEQ_G5PDX_CTRL));
4050 	WREG32(MC_SEQ_PMG_DVS_CMD_LP, RREG32(MC_SEQ_PMG_DVS_CMD));
4051 	WREG32(MC_SEQ_PMG_DVS_CTL_LP, RREG32(MC_SEQ_PMG_DVS_CTL));
4052 	WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
4053 	WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
4054 	WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
4055 	WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
4056 	WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
4057 	WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
4058 	WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
4059 	WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
4060 	WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
4061 	WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
4062 	WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
4063 	WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
4064 
4065 	ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
4066 	if (ret)
4067 		goto init_mc_done;
4068 
4069         ret = ci_copy_vbios_mc_reg_table(table, ci_table);
4070 	if (ret)
4071 		goto init_mc_done;
4072 
4073 	ci_set_s0_mc_reg_index(ci_table);
4074 
4075 	ret = ci_set_mc_special_registers(rdev, ci_table);
4076 	if (ret)
4077 		goto init_mc_done;
4078 
4079 	ci_set_valid_flag(ci_table);
4080 
4081 init_mc_done:
4082 	kfree(table);
4083 
4084 	return ret;
4085 }
4086 
4087 static int ci_populate_mc_reg_addresses(struct radeon_device *rdev,
4088 					SMU7_Discrete_MCRegisters *mc_reg_table)
4089 {
4090 	struct ci_power_info *pi = ci_get_pi(rdev);
4091 	u32 i, j;
4092 
4093 	for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4094 		if (pi->mc_reg_table.valid_flag & (1 << j)) {
4095 			if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4096 				return -EINVAL;
4097 			mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4098 			mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4099 			i++;
4100 		}
4101 	}
4102 
4103 	mc_reg_table->last = (u8)i;
4104 
4105 	return 0;
4106 }
4107 
4108 static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4109 				    SMU7_Discrete_MCRegisterSet *data,
4110 				    u32 num_entries, u32 valid_flag)
4111 {
4112 	u32 i, j;
4113 
4114 	for (i = 0, j = 0; j < num_entries; j++) {
4115 		if (valid_flag & (1 << j)) {
4116 			data->value[i] = cpu_to_be32(entry->mc_data[j]);
4117 			i++;
4118 		}
4119 	}
4120 }
4121 
4122 static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
4123 						 const u32 memory_clock,
4124 						 SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4125 {
4126 	struct ci_power_info *pi = ci_get_pi(rdev);
4127 	u32 i = 0;
4128 
4129 	for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4130 		if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4131 			break;
4132 	}
4133 
4134 	if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4135 		--i;
4136 
4137 	ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4138 				mc_reg_table_data, pi->mc_reg_table.last,
4139 				pi->mc_reg_table.valid_flag);
4140 }
4141 
4142 static void ci_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
4143 					   SMU7_Discrete_MCRegisters *mc_reg_table)
4144 {
4145 	struct ci_power_info *pi = ci_get_pi(rdev);
4146 	u32 i;
4147 
4148 	for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4149 		ci_convert_mc_reg_table_entry_to_smc(rdev,
4150 						     pi->dpm_table.mclk_table.dpm_levels[i].value,
4151 						     &mc_reg_table->data[i]);
4152 }
4153 
4154 static int ci_populate_initial_mc_reg_table(struct radeon_device *rdev)
4155 {
4156 	struct ci_power_info *pi = ci_get_pi(rdev);
4157 	int ret;
4158 
4159 	memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4160 
4161 	ret = ci_populate_mc_reg_addresses(rdev, &pi->smc_mc_reg_table);
4162 	if (ret)
4163 		return ret;
4164 	ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4165 
4166 	return ci_copy_bytes_to_smc(rdev,
4167 				    pi->mc_reg_table_start,
4168 				    (u8 *)&pi->smc_mc_reg_table,
4169 				    sizeof(SMU7_Discrete_MCRegisters),
4170 				    pi->sram_end);
4171 }
4172 
4173 static int ci_update_and_upload_mc_reg_table(struct radeon_device *rdev)
4174 {
4175 	struct ci_power_info *pi = ci_get_pi(rdev);
4176 
4177 	if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4178 		return 0;
4179 
4180 	memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4181 
4182 	ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4183 
4184 	return ci_copy_bytes_to_smc(rdev,
4185 				    pi->mc_reg_table_start +
4186 				    offsetof(SMU7_Discrete_MCRegisters, data[0]),
4187 				    (u8 *)&pi->smc_mc_reg_table.data[0],
4188 				    sizeof(SMU7_Discrete_MCRegisterSet) *
4189 				    pi->dpm_table.mclk_table.count,
4190 				    pi->sram_end);
4191 }
4192 
4193 static void ci_enable_voltage_control(struct radeon_device *rdev)
4194 {
4195 	u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
4196 
4197 	tmp |= VOLT_PWRMGT_EN;
4198 	WREG32_SMC(GENERAL_PWRMGT, tmp);
4199 }
4200 
4201 static enum radeon_pcie_gen ci_get_maximum_link_speed(struct radeon_device *rdev,
4202 						      struct radeon_ps *radeon_state)
4203 {
4204 	struct ci_ps *state = ci_get_ps(radeon_state);
4205 	int i;
4206 	u16 pcie_speed, max_speed = 0;
4207 
4208 	for (i = 0; i < state->performance_level_count; i++) {
4209 		pcie_speed = state->performance_levels[i].pcie_gen;
4210 		if (max_speed < pcie_speed)
4211 			max_speed = pcie_speed;
4212 	}
4213 
4214 	return max_speed;
4215 }
4216 
4217 static u16 ci_get_current_pcie_speed(struct radeon_device *rdev)
4218 {
4219 	u32 speed_cntl = 0;
4220 
4221 	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
4222 	speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
4223 
4224 	return (u16)speed_cntl;
4225 }
4226 
4227 static int ci_get_current_pcie_lane_number(struct radeon_device *rdev)
4228 {
4229 	u32 link_width = 0;
4230 
4231 	link_width = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL) & LC_LINK_WIDTH_RD_MASK;
4232 	link_width >>= LC_LINK_WIDTH_RD_SHIFT;
4233 
4234 	switch (link_width) {
4235 	case RADEON_PCIE_LC_LINK_WIDTH_X1:
4236 		return 1;
4237 	case RADEON_PCIE_LC_LINK_WIDTH_X2:
4238 		return 2;
4239 	case RADEON_PCIE_LC_LINK_WIDTH_X4:
4240 		return 4;
4241 	case RADEON_PCIE_LC_LINK_WIDTH_X8:
4242 		return 8;
4243 	case RADEON_PCIE_LC_LINK_WIDTH_X12:
4244 		/* not actually supported */
4245 		return 12;
4246 	case RADEON_PCIE_LC_LINK_WIDTH_X0:
4247 	case RADEON_PCIE_LC_LINK_WIDTH_X16:
4248 	default:
4249 		return 16;
4250 	}
4251 }
4252 
4253 static void ci_request_link_speed_change_before_state_change(struct radeon_device *rdev,
4254 							     struct radeon_ps *radeon_new_state,
4255 							     struct radeon_ps *radeon_current_state)
4256 {
4257 	struct ci_power_info *pi = ci_get_pi(rdev);
4258 	enum radeon_pcie_gen target_link_speed =
4259 		ci_get_maximum_link_speed(rdev, radeon_new_state);
4260 	enum radeon_pcie_gen current_link_speed;
4261 
4262 	if (pi->force_pcie_gen == RADEON_PCIE_GEN_INVALID)
4263 		current_link_speed = ci_get_maximum_link_speed(rdev, radeon_current_state);
4264 	else
4265 		current_link_speed = pi->force_pcie_gen;
4266 
4267 	pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
4268 	pi->pspp_notify_required = false;
4269 	if (target_link_speed > current_link_speed) {
4270 		switch (target_link_speed) {
4271 #ifdef CONFIG_ACPI
4272 		case RADEON_PCIE_GEN3:
4273 			if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
4274 				break;
4275 			pi->force_pcie_gen = RADEON_PCIE_GEN2;
4276 			if (current_link_speed == RADEON_PCIE_GEN2)
4277 				break;
4278 		case RADEON_PCIE_GEN2:
4279 			if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
4280 				break;
4281 #endif
4282 		default:
4283 			pi->force_pcie_gen = ci_get_current_pcie_speed(rdev);
4284 			break;
4285 		}
4286 	} else {
4287 		if (target_link_speed < current_link_speed)
4288 			pi->pspp_notify_required = true;
4289 	}
4290 }
4291 
4292 static void ci_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
4293 							   struct radeon_ps *radeon_new_state,
4294 							   struct radeon_ps *radeon_current_state)
4295 {
4296 	struct ci_power_info *pi = ci_get_pi(rdev);
4297 	enum radeon_pcie_gen target_link_speed =
4298 		ci_get_maximum_link_speed(rdev, radeon_new_state);
4299 	u8 request;
4300 
4301 	if (pi->pspp_notify_required) {
4302 		if (target_link_speed == RADEON_PCIE_GEN3)
4303 			request = PCIE_PERF_REQ_PECI_GEN3;
4304 		else if (target_link_speed == RADEON_PCIE_GEN2)
4305 			request = PCIE_PERF_REQ_PECI_GEN2;
4306 		else
4307 			request = PCIE_PERF_REQ_PECI_GEN1;
4308 
4309 		if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
4310 		    (ci_get_current_pcie_speed(rdev) > 0))
4311 			return;
4312 
4313 #ifdef CONFIG_ACPI
4314 		radeon_acpi_pcie_performance_request(rdev, request, false);
4315 #endif
4316 	}
4317 }
4318 
4319 static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *rdev)
4320 {
4321 	struct ci_power_info *pi = ci_get_pi(rdev);
4322 	struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
4323 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
4324 	struct radeon_clock_voltage_dependency_table *allowed_mclk_vddc_table =
4325 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
4326 	struct radeon_clock_voltage_dependency_table *allowed_mclk_vddci_table =
4327 		&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
4328 
4329 	if (allowed_sclk_vddc_table == NULL)
4330 		return -EINVAL;
4331 	if (allowed_sclk_vddc_table->count < 1)
4332 		return -EINVAL;
4333 	if (allowed_mclk_vddc_table == NULL)
4334 		return -EINVAL;
4335 	if (allowed_mclk_vddc_table->count < 1)
4336 		return -EINVAL;
4337 	if (allowed_mclk_vddci_table == NULL)
4338 		return -EINVAL;
4339 	if (allowed_mclk_vddci_table->count < 1)
4340 		return -EINVAL;
4341 
4342 	pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
4343 	pi->max_vddc_in_pp_table =
4344 		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4345 
4346 	pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
4347 	pi->max_vddci_in_pp_table =
4348 		allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4349 
4350 	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
4351 		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4352 	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
4353 		allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4354 	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
4355 		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4356         rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
4357 		allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4358 
4359 	return 0;
4360 }
4361 
4362 static void ci_patch_with_vddc_leakage(struct radeon_device *rdev, u16 *vddc)
4363 {
4364 	struct ci_power_info *pi = ci_get_pi(rdev);
4365 	struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
4366 	u32 leakage_index;
4367 
4368 	for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4369 		if (leakage_table->leakage_id[leakage_index] == *vddc) {
4370 			*vddc = leakage_table->actual_voltage[leakage_index];
4371 			break;
4372 		}
4373 	}
4374 }
4375 
4376 static void ci_patch_with_vddci_leakage(struct radeon_device *rdev, u16 *vddci)
4377 {
4378 	struct ci_power_info *pi = ci_get_pi(rdev);
4379 	struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
4380 	u32 leakage_index;
4381 
4382 	for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4383 		if (leakage_table->leakage_id[leakage_index] == *vddci) {
4384 			*vddci = leakage_table->actual_voltage[leakage_index];
4385 			break;
4386 		}
4387 	}
4388 }
4389 
4390 static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4391 								      struct radeon_clock_voltage_dependency_table *table)
4392 {
4393 	u32 i;
4394 
4395 	if (table) {
4396 		for (i = 0; i < table->count; i++)
4397 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4398 	}
4399 }
4400 
4401 static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct radeon_device *rdev,
4402 								       struct radeon_clock_voltage_dependency_table *table)
4403 {
4404 	u32 i;
4405 
4406 	if (table) {
4407 		for (i = 0; i < table->count; i++)
4408 			ci_patch_with_vddci_leakage(rdev, &table->entries[i].v);
4409 	}
4410 }
4411 
4412 static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4413 									  struct radeon_vce_clock_voltage_dependency_table *table)
4414 {
4415 	u32 i;
4416 
4417 	if (table) {
4418 		for (i = 0; i < table->count; i++)
4419 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4420 	}
4421 }
4422 
4423 static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4424 									  struct radeon_uvd_clock_voltage_dependency_table *table)
4425 {
4426 	u32 i;
4427 
4428 	if (table) {
4429 		for (i = 0; i < table->count; i++)
4430 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4431 	}
4432 }
4433 
4434 static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct radeon_device *rdev,
4435 								   struct radeon_phase_shedding_limits_table *table)
4436 {
4437 	u32 i;
4438 
4439 	if (table) {
4440 		for (i = 0; i < table->count; i++)
4441 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].voltage);
4442 	}
4443 }
4444 
4445 static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct radeon_device *rdev,
4446 							    struct radeon_clock_and_voltage_limits *table)
4447 {
4448 	if (table) {
4449 		ci_patch_with_vddc_leakage(rdev, (u16 *)&table->vddc);
4450 		ci_patch_with_vddci_leakage(rdev, (u16 *)&table->vddci);
4451 	}
4452 }
4453 
4454 static void ci_patch_cac_leakage_table_with_vddc_leakage(struct radeon_device *rdev,
4455 							 struct radeon_cac_leakage_table *table)
4456 {
4457 	u32 i;
4458 
4459 	if (table) {
4460 		for (i = 0; i < table->count; i++)
4461 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].vddc);
4462 	}
4463 }
4464 
4465 static void ci_patch_dependency_tables_with_leakage(struct radeon_device *rdev)
4466 {
4467 
4468 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4469 								  &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
4470 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4471 								  &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
4472 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4473 								  &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
4474 	ci_patch_clock_voltage_dependency_table_with_vddci_leakage(rdev,
4475 								   &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
4476 	ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4477 								      &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
4478 	ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4479 								      &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
4480 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4481 								  &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
4482 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4483 								  &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
4484 	ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(rdev,
4485 							       &rdev->pm.dpm.dyn_state.phase_shedding_limits_table);
4486 	ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
4487 							&rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
4488 	ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
4489 							&rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
4490 	ci_patch_cac_leakage_table_with_vddc_leakage(rdev,
4491 						     &rdev->pm.dpm.dyn_state.cac_leakage_table);
4492 
4493 }
4494 
4495 static void ci_get_memory_type(struct radeon_device *rdev)
4496 {
4497 	struct ci_power_info *pi = ci_get_pi(rdev);
4498 	u32 tmp;
4499 
4500 	tmp = RREG32(MC_SEQ_MISC0);
4501 
4502 	if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) ==
4503 	    MC_SEQ_MISC0_GDDR5_VALUE)
4504 		pi->mem_gddr5 = true;
4505 	else
4506 		pi->mem_gddr5 = false;
4507 
4508 }
4509 
4510 static void ci_update_current_ps(struct radeon_device *rdev,
4511 				 struct radeon_ps *rps)
4512 {
4513 	struct ci_ps *new_ps = ci_get_ps(rps);
4514 	struct ci_power_info *pi = ci_get_pi(rdev);
4515 
4516 	pi->current_rps = *rps;
4517 	pi->current_ps = *new_ps;
4518 	pi->current_rps.ps_priv = &pi->current_ps;
4519 }
4520 
4521 static void ci_update_requested_ps(struct radeon_device *rdev,
4522 				   struct radeon_ps *rps)
4523 {
4524 	struct ci_ps *new_ps = ci_get_ps(rps);
4525 	struct ci_power_info *pi = ci_get_pi(rdev);
4526 
4527 	pi->requested_rps = *rps;
4528 	pi->requested_ps = *new_ps;
4529 	pi->requested_rps.ps_priv = &pi->requested_ps;
4530 }
4531 
4532 int ci_dpm_pre_set_power_state(struct radeon_device *rdev)
4533 {
4534 	struct ci_power_info *pi = ci_get_pi(rdev);
4535 	struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
4536 	struct radeon_ps *new_ps = &requested_ps;
4537 
4538 	ci_update_requested_ps(rdev, new_ps);
4539 
4540 	ci_apply_state_adjust_rules(rdev, &pi->requested_rps);
4541 
4542 	return 0;
4543 }
4544 
4545 void ci_dpm_post_set_power_state(struct radeon_device *rdev)
4546 {
4547 	struct ci_power_info *pi = ci_get_pi(rdev);
4548 	struct radeon_ps *new_ps = &pi->requested_rps;
4549 
4550 	ci_update_current_ps(rdev, new_ps);
4551 }
4552 
4553 
4554 void ci_dpm_setup_asic(struct radeon_device *rdev)
4555 {
4556 	int r;
4557 
4558 	r = ci_mc_load_microcode(rdev);
4559 	if (r)
4560 		DRM_ERROR("Failed to load MC firmware!\n");
4561 	ci_read_clock_registers(rdev);
4562 	ci_get_memory_type(rdev);
4563 	ci_enable_acpi_power_management(rdev);
4564 	ci_init_sclk_t(rdev);
4565 }
4566 
4567 int ci_dpm_enable(struct radeon_device *rdev)
4568 {
4569 	struct ci_power_info *pi = ci_get_pi(rdev);
4570 	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
4571 	int ret;
4572 
4573 	if (ci_is_smc_running(rdev))
4574 		return -EINVAL;
4575 	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
4576 		ci_enable_voltage_control(rdev);
4577 		ret = ci_construct_voltage_tables(rdev);
4578 		if (ret) {
4579 			DRM_ERROR("ci_construct_voltage_tables failed\n");
4580 			return ret;
4581 		}
4582 	}
4583 	if (pi->caps_dynamic_ac_timing) {
4584 		ret = ci_initialize_mc_reg_table(rdev);
4585 		if (ret)
4586 			pi->caps_dynamic_ac_timing = false;
4587 	}
4588 	if (pi->dynamic_ss)
4589 		ci_enable_spread_spectrum(rdev, true);
4590 	if (pi->thermal_protection)
4591 		ci_enable_thermal_protection(rdev, true);
4592 	ci_program_sstp(rdev);
4593 	ci_enable_display_gap(rdev);
4594 	ci_program_vc(rdev);
4595 	ret = ci_upload_firmware(rdev);
4596 	if (ret) {
4597 		DRM_ERROR("ci_upload_firmware failed\n");
4598 		return ret;
4599 	}
4600 	ret = ci_process_firmware_header(rdev);
4601 	if (ret) {
4602 		DRM_ERROR("ci_process_firmware_header failed\n");
4603 		return ret;
4604 	}
4605 	ret = ci_initial_switch_from_arb_f0_to_f1(rdev);
4606 	if (ret) {
4607 		DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
4608 		return ret;
4609 	}
4610 	ret = ci_init_smc_table(rdev);
4611 	if (ret) {
4612 		DRM_ERROR("ci_init_smc_table failed\n");
4613 		return ret;
4614 	}
4615 	ret = ci_init_arb_table_index(rdev);
4616 	if (ret) {
4617 		DRM_ERROR("ci_init_arb_table_index failed\n");
4618 		return ret;
4619 	}
4620 	if (pi->caps_dynamic_ac_timing) {
4621 		ret = ci_populate_initial_mc_reg_table(rdev);
4622 		if (ret) {
4623 			DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
4624 			return ret;
4625 		}
4626 	}
4627 	ret = ci_populate_pm_base(rdev);
4628 	if (ret) {
4629 		DRM_ERROR("ci_populate_pm_base failed\n");
4630 		return ret;
4631 	}
4632 	ci_dpm_start_smc(rdev);
4633 	ci_enable_vr_hot_gpio_interrupt(rdev);
4634 	ret = ci_notify_smc_display_change(rdev, false);
4635 	if (ret) {
4636 		DRM_ERROR("ci_notify_smc_display_change failed\n");
4637 		return ret;
4638 	}
4639 	ci_enable_sclk_control(rdev, true);
4640 	ret = ci_enable_ulv(rdev, true);
4641 	if (ret) {
4642 		DRM_ERROR("ci_enable_ulv failed\n");
4643 		return ret;
4644 	}
4645 	ret = ci_enable_ds_master_switch(rdev, true);
4646 	if (ret) {
4647 		DRM_ERROR("ci_enable_ds_master_switch failed\n");
4648 		return ret;
4649 	}
4650 	ret = ci_start_dpm(rdev);
4651 	if (ret) {
4652 		DRM_ERROR("ci_start_dpm failed\n");
4653 		return ret;
4654 	}
4655 	ret = ci_enable_didt(rdev, true);
4656 	if (ret) {
4657 		DRM_ERROR("ci_enable_didt failed\n");
4658 		return ret;
4659 	}
4660 	ret = ci_enable_smc_cac(rdev, true);
4661 	if (ret) {
4662 		DRM_ERROR("ci_enable_smc_cac failed\n");
4663 		return ret;
4664 	}
4665 	ret = ci_enable_power_containment(rdev, true);
4666 	if (ret) {
4667 		DRM_ERROR("ci_enable_power_containment failed\n");
4668 		return ret;
4669 	}
4670 
4671 	ret = ci_power_control_set_level(rdev);
4672 	if (ret) {
4673 		DRM_ERROR("ci_power_control_set_level failed\n");
4674 		return ret;
4675 	}
4676 
4677 	ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
4678 
4679 	ci_update_current_ps(rdev, boot_ps);
4680 
4681 	return 0;
4682 }
4683 
4684 int ci_dpm_late_enable(struct radeon_device *rdev)
4685 {
4686 	int ret;
4687 
4688 	if (rdev->irq.installed &&
4689 	    r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
4690 #if 0
4691 		PPSMC_Result result;
4692 #endif
4693 		ret = ci_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
4694 		if (ret) {
4695 			DRM_ERROR("ci_set_thermal_temperature_range failed\n");
4696 			return ret;
4697 		}
4698 		rdev->irq.dpm_thermal = true;
4699 		radeon_irq_set(rdev);
4700 #if 0
4701 		result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
4702 
4703 		if (result != PPSMC_Result_OK)
4704 			DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
4705 #endif
4706 	}
4707 
4708 	ci_dpm_powergate_uvd(rdev, true);
4709 
4710 	return 0;
4711 }
4712 
4713 void ci_dpm_disable(struct radeon_device *rdev)
4714 {
4715 	struct ci_power_info *pi = ci_get_pi(rdev);
4716 	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
4717 
4718 	ci_dpm_powergate_uvd(rdev, false);
4719 
4720 	if (!ci_is_smc_running(rdev))
4721 		return;
4722 
4723 	if (pi->thermal_protection)
4724 		ci_enable_thermal_protection(rdev, false);
4725 	ci_enable_power_containment(rdev, false);
4726 	ci_enable_smc_cac(rdev, false);
4727 	ci_enable_didt(rdev, false);
4728 	ci_enable_spread_spectrum(rdev, false);
4729 	ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
4730 	ci_stop_dpm(rdev);
4731 	ci_enable_ds_master_switch(rdev, true);
4732 	ci_enable_ulv(rdev, false);
4733 	ci_clear_vc(rdev);
4734 	ci_reset_to_default(rdev);
4735 	ci_dpm_stop_smc(rdev);
4736 	ci_force_switch_to_arb_f0(rdev);
4737 
4738 	ci_update_current_ps(rdev, boot_ps);
4739 }
4740 
4741 int ci_dpm_set_power_state(struct radeon_device *rdev)
4742 {
4743 	struct ci_power_info *pi = ci_get_pi(rdev);
4744 	struct radeon_ps *new_ps = &pi->requested_rps;
4745 	struct radeon_ps *old_ps = &pi->current_rps;
4746 	int ret;
4747 
4748 	ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps);
4749 	if (pi->pcie_performance_request)
4750 		ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps);
4751 	ret = ci_freeze_sclk_mclk_dpm(rdev);
4752 	if (ret) {
4753 		DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
4754 		return ret;
4755 	}
4756 	ret = ci_populate_and_upload_sclk_mclk_dpm_levels(rdev, new_ps);
4757 	if (ret) {
4758 		DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
4759 		return ret;
4760 	}
4761 	ret = ci_generate_dpm_level_enable_mask(rdev, new_ps);
4762 	if (ret) {
4763 		DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
4764 		return ret;
4765 	}
4766 
4767 	ret = ci_update_vce_dpm(rdev, new_ps, old_ps);
4768 	if (ret) {
4769 		DRM_ERROR("ci_update_vce_dpm failed\n");
4770 		return ret;
4771 	}
4772 
4773 	ret = ci_update_sclk_t(rdev);
4774 	if (ret) {
4775 		DRM_ERROR("ci_update_sclk_t failed\n");
4776 		return ret;
4777 	}
4778 	if (pi->caps_dynamic_ac_timing) {
4779 		ret = ci_update_and_upload_mc_reg_table(rdev);
4780 		if (ret) {
4781 			DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
4782 			return ret;
4783 		}
4784 	}
4785 	ret = ci_program_memory_timing_parameters(rdev);
4786 	if (ret) {
4787 		DRM_ERROR("ci_program_memory_timing_parameters failed\n");
4788 		return ret;
4789 	}
4790 	ret = ci_unfreeze_sclk_mclk_dpm(rdev);
4791 	if (ret) {
4792 		DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
4793 		return ret;
4794 	}
4795 	ret = ci_upload_dpm_level_enable_mask(rdev);
4796 	if (ret) {
4797 		DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
4798 		return ret;
4799 	}
4800 	if (pi->pcie_performance_request)
4801 		ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
4802 
4803 	return 0;
4804 }
4805 
4806 #if 0
4807 void ci_dpm_reset_asic(struct radeon_device *rdev)
4808 {
4809 	ci_set_boot_state(rdev);
4810 }
4811 #endif
4812 
4813 void ci_dpm_display_configuration_changed(struct radeon_device *rdev)
4814 {
4815 	ci_program_display_gap(rdev);
4816 }
4817 
4818 union power_info {
4819 	struct _ATOM_POWERPLAY_INFO info;
4820 	struct _ATOM_POWERPLAY_INFO_V2 info_2;
4821 	struct _ATOM_POWERPLAY_INFO_V3 info_3;
4822 	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
4823 	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
4824 	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
4825 };
4826 
4827 union pplib_clock_info {
4828 	struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
4829 	struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
4830 	struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
4831 	struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
4832 	struct _ATOM_PPLIB_SI_CLOCK_INFO si;
4833 	struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
4834 };
4835 
4836 union pplib_power_state {
4837 	struct _ATOM_PPLIB_STATE v1;
4838 	struct _ATOM_PPLIB_STATE_V2 v2;
4839 };
4840 
4841 static void ci_parse_pplib_non_clock_info(struct radeon_device *rdev,
4842 					  struct radeon_ps *rps,
4843 					  struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
4844 					  u8 table_rev)
4845 {
4846 	rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
4847 	rps->class = le16_to_cpu(non_clock_info->usClassification);
4848 	rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
4849 
4850 	if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
4851 		rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
4852 		rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
4853 	} else {
4854 		rps->vclk = 0;
4855 		rps->dclk = 0;
4856 	}
4857 
4858 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
4859 		rdev->pm.dpm.boot_ps = rps;
4860 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
4861 		rdev->pm.dpm.uvd_ps = rps;
4862 }
4863 
4864 static void ci_parse_pplib_clock_info(struct radeon_device *rdev,
4865 				      struct radeon_ps *rps, int index,
4866 				      union pplib_clock_info *clock_info)
4867 {
4868 	struct ci_power_info *pi = ci_get_pi(rdev);
4869 	struct ci_ps *ps = ci_get_ps(rps);
4870 	struct ci_pl *pl = &ps->performance_levels[index];
4871 
4872 	ps->performance_level_count = index + 1;
4873 
4874 	pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
4875 	pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
4876 	pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
4877 	pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
4878 
4879 	pl->pcie_gen = r600_get_pcie_gen_support(rdev,
4880 						 pi->sys_pcie_mask,
4881 						 pi->vbios_boot_state.pcie_gen_bootup_value,
4882 						 clock_info->ci.ucPCIEGen);
4883 	pl->pcie_lane = r600_get_pcie_lane_support(rdev,
4884 						   pi->vbios_boot_state.pcie_lane_bootup_value,
4885 						   le16_to_cpu(clock_info->ci.usPCIELane));
4886 
4887 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
4888 		pi->acpi_pcie_gen = pl->pcie_gen;
4889 	}
4890 
4891 	if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
4892 		pi->ulv.supported = true;
4893 		pi->ulv.pl = *pl;
4894 		pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
4895 	}
4896 
4897 	/* patch up boot state */
4898 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
4899 		pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
4900 		pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
4901 		pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
4902 		pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
4903 	}
4904 
4905 	switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
4906 	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
4907 		pi->use_pcie_powersaving_levels = true;
4908 		if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
4909 			pi->pcie_gen_powersaving.max = pl->pcie_gen;
4910 		if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
4911 			pi->pcie_gen_powersaving.min = pl->pcie_gen;
4912 		if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
4913 			pi->pcie_lane_powersaving.max = pl->pcie_lane;
4914 		if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
4915 			pi->pcie_lane_powersaving.min = pl->pcie_lane;
4916 		break;
4917 	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
4918 		pi->use_pcie_performance_levels = true;
4919 		if (pi->pcie_gen_performance.max < pl->pcie_gen)
4920 			pi->pcie_gen_performance.max = pl->pcie_gen;
4921 		if (pi->pcie_gen_performance.min > pl->pcie_gen)
4922 			pi->pcie_gen_performance.min = pl->pcie_gen;
4923 		if (pi->pcie_lane_performance.max < pl->pcie_lane)
4924 			pi->pcie_lane_performance.max = pl->pcie_lane;
4925 		if (pi->pcie_lane_performance.min > pl->pcie_lane)
4926 			pi->pcie_lane_performance.min = pl->pcie_lane;
4927 		break;
4928 	default:
4929 		break;
4930 	}
4931 }
4932 
4933 static int ci_parse_power_table(struct radeon_device *rdev)
4934 {
4935 	struct radeon_mode_info *mode_info = &rdev->mode_info;
4936 	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
4937 	union pplib_power_state *power_state;
4938 	int i, j, k, non_clock_array_index, clock_array_index;
4939 	union pplib_clock_info *clock_info;
4940 	struct _StateArray *state_array;
4941 	struct _ClockInfoArray *clock_info_array;
4942 	struct _NonClockInfoArray *non_clock_info_array;
4943 	union power_info *power_info;
4944 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
4945         u16 data_offset;
4946 	u8 frev, crev;
4947 	u8 *power_state_offset;
4948 	struct ci_ps *ps;
4949 
4950 	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
4951 				   &frev, &crev, &data_offset))
4952 		return -EINVAL;
4953 	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
4954 
4955 	state_array = (struct _StateArray *)
4956 		(mode_info->atom_context->bios + data_offset +
4957 		 le16_to_cpu(power_info->pplib.usStateArrayOffset));
4958 	clock_info_array = (struct _ClockInfoArray *)
4959 		(mode_info->atom_context->bios + data_offset +
4960 		 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
4961 	non_clock_info_array = (struct _NonClockInfoArray *)
4962 		(mode_info->atom_context->bios + data_offset +
4963 		 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
4964 
4965 	rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
4966 				  state_array->ucNumEntries, GFP_KERNEL);
4967 	if (!rdev->pm.dpm.ps)
4968 		return -ENOMEM;
4969 	power_state_offset = (u8 *)state_array->states;
4970 	for (i = 0; i < state_array->ucNumEntries; i++) {
4971 		u8 *idx;
4972 		power_state = (union pplib_power_state *)power_state_offset;
4973 		non_clock_array_index = power_state->v2.nonClockInfoIndex;
4974 		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
4975 			&non_clock_info_array->nonClockInfo[non_clock_array_index];
4976 		if (!rdev->pm.power_state[i].clock_info)
4977 			return -EINVAL;
4978 		ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
4979 		if (ps == NULL) {
4980 			kfree(rdev->pm.dpm.ps);
4981 			return -ENOMEM;
4982 		}
4983 		rdev->pm.dpm.ps[i].ps_priv = ps;
4984 		ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
4985 					      non_clock_info,
4986 					      non_clock_info_array->ucEntrySize);
4987 		k = 0;
4988 		idx = (u8 *)&power_state->v2.clockInfoIndex[0];
4989 		for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
4990 			clock_array_index = idx[j];
4991 			if (clock_array_index >= clock_info_array->ucNumEntries)
4992 				continue;
4993 			if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
4994 				break;
4995 			clock_info = (union pplib_clock_info *)
4996 				((u8 *)&clock_info_array->clockInfo[0] +
4997 				 (clock_array_index * clock_info_array->ucEntrySize));
4998 			ci_parse_pplib_clock_info(rdev,
4999 						  &rdev->pm.dpm.ps[i], k,
5000 						  clock_info);
5001 			k++;
5002 		}
5003 		power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
5004 	}
5005 	rdev->pm.dpm.num_ps = state_array->ucNumEntries;
5006 
5007 	/* fill in the vce power states */
5008 	for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
5009 		u32 sclk, mclk;
5010 		clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx;
5011 		clock_info = (union pplib_clock_info *)
5012 			&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
5013 		sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5014 		sclk |= clock_info->ci.ucEngineClockHigh << 16;
5015 		mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5016 		mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5017 		rdev->pm.dpm.vce_states[i].sclk = sclk;
5018 		rdev->pm.dpm.vce_states[i].mclk = mclk;
5019 	}
5020 
5021 	return 0;
5022 }
5023 
5024 static int ci_get_vbios_boot_values(struct radeon_device *rdev,
5025 				    struct ci_vbios_boot_state *boot_state)
5026 {
5027 	struct radeon_mode_info *mode_info = &rdev->mode_info;
5028 	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5029 	ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
5030 	u8 frev, crev;
5031 	u16 data_offset;
5032 
5033 	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
5034 				   &frev, &crev, &data_offset)) {
5035 		firmware_info =
5036 			(ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
5037 						    data_offset);
5038 		boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
5039 		boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
5040 		boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
5041 		boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(rdev);
5042 		boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(rdev);
5043 		boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
5044 		boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
5045 
5046 		return 0;
5047 	}
5048 	return -EINVAL;
5049 }
5050 
5051 void ci_dpm_fini(struct radeon_device *rdev)
5052 {
5053 	int i;
5054 
5055 	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
5056 		kfree(rdev->pm.dpm.ps[i].ps_priv);
5057 	}
5058 	kfree(rdev->pm.dpm.ps);
5059 	kfree(rdev->pm.dpm.priv);
5060 	kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
5061 	r600_free_extended_power_table(rdev);
5062 }
5063 
5064 int ci_dpm_init(struct radeon_device *rdev)
5065 {
5066 	int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
5067 	u16 data_offset, size;
5068 	u8 frev, crev;
5069 	struct ci_power_info *pi;
5070 	int ret;
5071 	u32 mask;
5072 
5073 	pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5074 	if (pi == NULL)
5075 		return -ENOMEM;
5076 	rdev->pm.dpm.priv = pi;
5077 
5078 	ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
5079 	if (ret)
5080 		pi->sys_pcie_mask = 0;
5081 	else
5082 		pi->sys_pcie_mask = mask;
5083 	pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
5084 
5085 	pi->pcie_gen_performance.max = RADEON_PCIE_GEN1;
5086 	pi->pcie_gen_performance.min = RADEON_PCIE_GEN3;
5087 	pi->pcie_gen_powersaving.max = RADEON_PCIE_GEN1;
5088 	pi->pcie_gen_powersaving.min = RADEON_PCIE_GEN3;
5089 
5090 	pi->pcie_lane_performance.max = 0;
5091 	pi->pcie_lane_performance.min = 16;
5092 	pi->pcie_lane_powersaving.max = 0;
5093 	pi->pcie_lane_powersaving.min = 16;
5094 
5095 	ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state);
5096 	if (ret) {
5097 		ci_dpm_fini(rdev);
5098 		return ret;
5099 	}
5100 
5101 	ret = r600_get_platform_caps(rdev);
5102 	if (ret) {
5103 		ci_dpm_fini(rdev);
5104 		return ret;
5105 	}
5106 
5107 	ret = r600_parse_extended_power_table(rdev);
5108 	if (ret) {
5109 		ci_dpm_fini(rdev);
5110 		return ret;
5111 	}
5112 
5113 	ret = ci_parse_power_table(rdev);
5114 	if (ret) {
5115 		ci_dpm_fini(rdev);
5116 		return ret;
5117 	}
5118 
5119         pi->dll_default_on = false;
5120         pi->sram_end = SMC_RAM_END;
5121 
5122 	pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5123 	pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5124 	pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5125 	pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5126 	pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5127 	pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5128 	pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5129 	pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5130 
5131 	pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5132 
5133 	pi->sclk_dpm_key_disabled = 0;
5134 	pi->mclk_dpm_key_disabled = 0;
5135 	pi->pcie_dpm_key_disabled = 0;
5136 
5137 	/* mclk dpm is unstable on some R7 260X cards with the old mc ucode */
5138 	if ((rdev->pdev->device == 0x6658) &&
5139 	    (rdev->mc_fw->datasize == (BONAIRE_MC_UCODE_SIZE * 4))) {
5140 		pi->mclk_dpm_key_disabled = 1;
5141 	}
5142 
5143 	pi->caps_sclk_ds = true;
5144 
5145 	pi->mclk_strobe_mode_threshold = 40000;
5146 	pi->mclk_stutter_mode_threshold = 40000;
5147 	pi->mclk_edc_enable_threshold = 40000;
5148 	pi->mclk_edc_wr_enable_threshold = 40000;
5149 
5150 	ci_initialize_powertune_defaults(rdev);
5151 
5152 	pi->caps_fps = false;
5153 
5154 	pi->caps_sclk_throttle_low_notification = false;
5155 
5156 	pi->caps_uvd_dpm = true;
5157 	pi->caps_vce_dpm = true;
5158 
5159         ci_get_leakage_voltages(rdev);
5160         ci_patch_dependency_tables_with_leakage(rdev);
5161         ci_set_private_data_variables_based_on_pptable(rdev);
5162 
5163 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5164 		kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
5165 	if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5166 		ci_dpm_fini(rdev);
5167 		return -ENOMEM;
5168 	}
5169 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5170 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5171 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5172 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5173 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5174 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5175 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5176 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5177 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5178 
5179 	rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5180 	rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5181 	rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5182 
5183 	rdev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5184 	rdev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5185 	rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5186 	rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5187 
5188 	if (rdev->family == CHIP_HAWAII) {
5189 		pi->thermal_temp_setting.temperature_low = 94500;
5190 		pi->thermal_temp_setting.temperature_high = 95000;
5191 		pi->thermal_temp_setting.temperature_shutdown = 104000;
5192 	} else {
5193 		pi->thermal_temp_setting.temperature_low = 99500;
5194 		pi->thermal_temp_setting.temperature_high = 100000;
5195 		pi->thermal_temp_setting.temperature_shutdown = 104000;
5196 	}
5197 
5198 	pi->uvd_enabled = false;
5199 
5200 	pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5201 	pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5202 	pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5203 	if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
5204 		pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5205 	else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
5206 		pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5207 
5208 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
5209 		if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
5210 			pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5211 		else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
5212 			pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5213 		else
5214 			rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
5215         }
5216 
5217 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
5218 		if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
5219 			pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5220 		else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
5221 			pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5222 		else
5223 			rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
5224 	}
5225 
5226 	pi->vddc_phase_shed_control = true;
5227 
5228 #if defined(CONFIG_ACPI)
5229 	pi->pcie_performance_request =
5230 		radeon_acpi_is_pcie_performance_request_supported(rdev);
5231 #else
5232 	pi->pcie_performance_request = false;
5233 #endif
5234 
5235 	if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
5236                                    &frev, &crev, &data_offset)) {
5237 		pi->caps_sclk_ss_support = true;
5238 		pi->caps_mclk_ss_support = true;
5239 		pi->dynamic_ss = true;
5240 	} else {
5241 		pi->caps_sclk_ss_support = false;
5242 		pi->caps_mclk_ss_support = false;
5243 		pi->dynamic_ss = true;
5244 	}
5245 
5246 	if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
5247 		pi->thermal_protection = true;
5248 	else
5249 		pi->thermal_protection = false;
5250 
5251 	pi->caps_dynamic_ac_timing = true;
5252 
5253 	pi->uvd_power_gated = false;
5254 
5255 	/* make sure dc limits are valid */
5256 	if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
5257 	    (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
5258 		rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
5259 			rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
5260 
5261 	return 0;
5262 }
5263 
5264 void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
5265 						    struct seq_file *m)
5266 {
5267 	struct ci_power_info *pi = ci_get_pi(rdev);
5268 	struct radeon_ps *rps = &pi->current_rps;
5269 	u32 sclk = ci_get_average_sclk_freq(rdev);
5270 	u32 mclk = ci_get_average_mclk_freq(rdev);
5271 
5272 	seq_printf(m, "uvd    %sabled\n", pi->uvd_enabled ? "en" : "dis");
5273 	seq_printf(m, "vce    %sabled\n", rps->vce_active ? "en" : "dis");
5274 	seq_printf(m, "power level avg    sclk: %u mclk: %u\n",
5275 		   sclk, mclk);
5276 }
5277 
5278 void ci_dpm_print_power_state(struct radeon_device *rdev,
5279 			      struct radeon_ps *rps)
5280 {
5281 	struct ci_ps *ps = ci_get_ps(rps);
5282 	struct ci_pl *pl;
5283 	int i;
5284 
5285 	r600_dpm_print_class_info(rps->class, rps->class2);
5286 	r600_dpm_print_cap_info(rps->caps);
5287 	printk("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
5288 	for (i = 0; i < ps->performance_level_count; i++) {
5289 		pl = &ps->performance_levels[i];
5290 		printk("\t\tpower level %d    sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
5291 		       i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
5292 	}
5293 	r600_dpm_print_ps_status(rdev, rps);
5294 }
5295 
5296 u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low)
5297 {
5298 	struct ci_power_info *pi = ci_get_pi(rdev);
5299 	struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5300 
5301 	if (low)
5302 		return requested_state->performance_levels[0].sclk;
5303 	else
5304 		return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
5305 }
5306 
5307 u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low)
5308 {
5309 	struct ci_power_info *pi = ci_get_pi(rdev);
5310 	struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5311 
5312 	if (low)
5313 		return requested_state->performance_levels[0].mclk;
5314 	else
5315 		return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
5316 }
5317