1 /*	$NetBSD: radeon_ci_dpm.c,v 1.6 2021/12/19 12:40:43 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2013 Advanced Micro Devices, Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  */
25 
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: radeon_ci_dpm.c,v 1.6 2021/12/19 12:40:43 riastradh Exp $");
28 
29 #include <linux/firmware.h>
30 #include <linux/pci.h>
31 #include <linux/seq_file.h>
32 
33 #include "atom.h"
34 #include "ci_dpm.h"
35 #include "cikd.h"
36 #include "r600_dpm.h"
37 #include "radeon.h"
38 #include "radeon_asic.h"
39 #include "radeon_ucode.h"
40 
41 #define MC_CG_ARB_FREQ_F0           0x0a
42 #define MC_CG_ARB_FREQ_F1           0x0b
43 #define MC_CG_ARB_FREQ_F2           0x0c
44 #define MC_CG_ARB_FREQ_F3           0x0d
45 
46 #define SMC_RAM_END 0x40000
47 
48 #define VOLTAGE_SCALE               4
49 #define VOLTAGE_VID_OFFSET_SCALE1    625
50 #define VOLTAGE_VID_OFFSET_SCALE2    100
51 
52 static const struct ci_pt_defaults defaults_hawaii_xt =
53 {
54 	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
55 	{ 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
56 	{ 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
57 };
58 
59 static const struct ci_pt_defaults defaults_hawaii_pro =
60 {
61 	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
62 	{ 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
63 	{ 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
64 };
65 
66 static const struct ci_pt_defaults defaults_bonaire_xt =
67 {
68 	1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
69 	{ 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
70 	{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
71 };
72 
73 static const struct ci_pt_defaults defaults_bonaire_pro __unused =
74 {
75 	1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
76 	{ 0x8C,  0x23F, 0x244, 0xA6,  0x83,  0x85,  0x86,  0x86,  0x83,  0xDB,  0xDB,  0xDA,  0x67,  0x60,  0x5F  },
77 	{ 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
78 };
79 
80 static const struct ci_pt_defaults defaults_saturn_xt =
81 {
82 	1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
83 	{ 0x8C,  0x247, 0x249, 0xA6,  0x80,  0x81,  0x8B,  0x89,  0x86,  0xC9,  0xCA,  0xC9,  0x4D,  0x4D,  0x4D  },
84 	{ 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
85 };
86 
87 static const struct ci_pt_defaults defaults_saturn_pro __unused =
88 {
89 	1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
90 	{ 0x96,  0x21D, 0x23B, 0xA1,  0x85,  0x87,  0x83,  0x84,  0x81,  0xE6,  0xE6,  0xE6,  0x71,  0x6A,  0x6A  },
91 	{ 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
92 };
93 
94 static const struct ci_pt_config_reg didt_config_ci[] =
95 {
96 	{ 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
97 	{ 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
98 	{ 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
99 	{ 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
100 	{ 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
101 	{ 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
102 	{ 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
103 	{ 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
104 	{ 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
105 	{ 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
106 	{ 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
107 	{ 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
108 	{ 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
109 	{ 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
110 	{ 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
111 	{ 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
112 	{ 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
113 	{ 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
114 	{ 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
115 	{ 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
116 	{ 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
117 	{ 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
118 	{ 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
119 	{ 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
120 	{ 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
121 	{ 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
122 	{ 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
123 	{ 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
124 	{ 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
125 	{ 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
126 	{ 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
127 	{ 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
128 	{ 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
129 	{ 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
130 	{ 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
131 	{ 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
132 	{ 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
133 	{ 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
134 	{ 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
135 	{ 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
136 	{ 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
137 	{ 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
138 	{ 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
139 	{ 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
140 	{ 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
141 	{ 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
142 	{ 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
143 	{ 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
144 	{ 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
145 	{ 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
146 	{ 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
147 	{ 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
148 	{ 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
149 	{ 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
150 	{ 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
151 	{ 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
152 	{ 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
153 	{ 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
154 	{ 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
155 	{ 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
156 	{ 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
157 	{ 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
158 	{ 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
159 	{ 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
160 	{ 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
161 	{ 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
162 	{ 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
163 	{ 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
164 	{ 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
165 	{ 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
166 	{ 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
167 	{ 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
168 	{ 0xFFFFFFFF }
169 };
170 
171 extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
172 extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
173 				       u32 arb_freq_src, u32 arb_freq_dest);
174 extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
175 extern u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode);
176 extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
177 						     u32 max_voltage_steps,
178 						     struct atom_voltage_table *voltage_table);
179 extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
180 extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
181 extern int ci_mc_load_microcode(struct radeon_device *rdev);
182 extern void cik_update_cg(struct radeon_device *rdev,
183 			  u32 block, bool enable);
184 
185 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
186 					 struct atom_voltage_table_entry *voltage_table,
187 					 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
188 static int ci_set_power_limit(struct radeon_device *rdev, u32 n);
189 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
190 				       u32 target_tdp);
191 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
192 
193 static PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg);
194 static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
195 						      PPSMC_Msg msg, u32 parameter);
196 
197 static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev);
198 static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev);
199 
ci_get_pi(struct radeon_device * rdev)200 static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
201 {
202 	struct ci_power_info *pi = rdev->pm.dpm.priv;
203 
204 	return pi;
205 }
206 
ci_get_ps(struct radeon_ps * rps)207 static struct ci_ps *ci_get_ps(struct radeon_ps *rps)
208 {
209 	struct ci_ps *ps = rps->ps_priv;
210 
211 	return ps;
212 }
213 
ci_initialize_powertune_defaults(struct radeon_device * rdev)214 static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
215 {
216 	struct ci_power_info *pi = ci_get_pi(rdev);
217 
218 	switch (rdev->pdev->device) {
219 	case 0x6649:
220 	case 0x6650:
221 	case 0x6651:
222 	case 0x6658:
223 	case 0x665C:
224 	case 0x665D:
225 	default:
226 		pi->powertune_defaults = &defaults_bonaire_xt;
227 		break;
228 	case 0x6640:
229 	case 0x6641:
230 	case 0x6646:
231 	case 0x6647:
232 		pi->powertune_defaults = &defaults_saturn_xt;
233 		break;
234 	case 0x67B8:
235 	case 0x67B0:
236 		pi->powertune_defaults = &defaults_hawaii_xt;
237 		break;
238 	case 0x67BA:
239 	case 0x67B1:
240 		pi->powertune_defaults = &defaults_hawaii_pro;
241 		break;
242 	case 0x67A0:
243 	case 0x67A1:
244 	case 0x67A2:
245 	case 0x67A8:
246 	case 0x67A9:
247 	case 0x67AA:
248 	case 0x67B9:
249 	case 0x67BE:
250 		pi->powertune_defaults = &defaults_bonaire_xt;
251 		break;
252 	}
253 
254 	pi->dte_tj_offset = 0;
255 
256 	pi->caps_power_containment = true;
257 	pi->caps_cac = false;
258 	pi->caps_sq_ramping = false;
259 	pi->caps_db_ramping = false;
260 	pi->caps_td_ramping = false;
261 	pi->caps_tcp_ramping = false;
262 
263 	if (pi->caps_power_containment) {
264 		pi->caps_cac = true;
265 		if (rdev->family == CHIP_HAWAII)
266 			pi->enable_bapm_feature = false;
267 		else
268 			pi->enable_bapm_feature = true;
269 		pi->enable_tdc_limit_feature = true;
270 		pi->enable_pkg_pwr_tracking_feature = true;
271 	}
272 }
273 
ci_convert_to_vid(u16 vddc)274 static u8 ci_convert_to_vid(u16 vddc)
275 {
276 	return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
277 }
278 
ci_populate_bapm_vddc_vid_sidd(struct radeon_device * rdev)279 static int ci_populate_bapm_vddc_vid_sidd(struct radeon_device *rdev)
280 {
281 	struct ci_power_info *pi = ci_get_pi(rdev);
282 	u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
283 	u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
284 	u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
285 	u32 i;
286 
287 	if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
288 		return -EINVAL;
289 	if (rdev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
290 		return -EINVAL;
291 	if (rdev->pm.dpm.dyn_state.cac_leakage_table.count !=
292 	    rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
293 		return -EINVAL;
294 
295 	for (i = 0; i < rdev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
296 		if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
297 			lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
298 			hi_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
299 			hi2_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
300 		} else {
301 			lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
302 			hi_vid[i] = ci_convert_to_vid((u16)rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
303 		}
304 	}
305 	return 0;
306 }
307 
ci_populate_vddc_vid(struct radeon_device * rdev)308 static int ci_populate_vddc_vid(struct radeon_device *rdev)
309 {
310 	struct ci_power_info *pi = ci_get_pi(rdev);
311 	u8 *vid = pi->smc_powertune_table.VddCVid;
312 	u32 i;
313 
314 	if (pi->vddc_voltage_table.count > 8)
315 		return -EINVAL;
316 
317 	for (i = 0; i < pi->vddc_voltage_table.count; i++)
318 		vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
319 
320 	return 0;
321 }
322 
ci_populate_svi_load_line(struct radeon_device * rdev)323 static int ci_populate_svi_load_line(struct radeon_device *rdev)
324 {
325 	struct ci_power_info *pi = ci_get_pi(rdev);
326 	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
327 
328 	pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
329 	pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
330 	pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
331 	pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
332 
333 	return 0;
334 }
335 
ci_populate_tdc_limit(struct radeon_device * rdev)336 static int ci_populate_tdc_limit(struct radeon_device *rdev)
337 {
338 	struct ci_power_info *pi = ci_get_pi(rdev);
339 	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
340 	u16 tdc_limit;
341 
342 	tdc_limit = rdev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
343 	pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
344 	pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
345 		pt_defaults->tdc_vddc_throttle_release_limit_perc;
346 	pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
347 
348 	return 0;
349 }
350 
ci_populate_dw8(struct radeon_device * rdev)351 static int ci_populate_dw8(struct radeon_device *rdev)
352 {
353 	struct ci_power_info *pi = ci_get_pi(rdev);
354 	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
355 	int ret;
356 
357 	ret = ci_read_smc_sram_dword(rdev,
358 				     SMU7_FIRMWARE_HEADER_LOCATION +
359 				     offsetof(SMU7_Firmware_Header, PmFuseTable) +
360 				     offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
361 				     (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
362 				     pi->sram_end);
363 	if (ret)
364 		return -EINVAL;
365 	else
366 		pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
367 
368 	return 0;
369 }
370 
ci_populate_fuzzy_fan(struct radeon_device * rdev)371 static int ci_populate_fuzzy_fan(struct radeon_device *rdev)
372 {
373 	struct ci_power_info *pi = ci_get_pi(rdev);
374 
375 	if ((rdev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
376 	    (rdev->pm.dpm.fan.fan_output_sensitivity == 0))
377 		rdev->pm.dpm.fan.fan_output_sensitivity =
378 			rdev->pm.dpm.fan.default_fan_output_sensitivity;
379 
380 	pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
381 		cpu_to_be16(rdev->pm.dpm.fan.fan_output_sensitivity);
382 
383 	return 0;
384 }
385 
ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device * rdev)386 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev)
387 {
388 	struct ci_power_info *pi = ci_get_pi(rdev);
389 	u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
390 	u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
391 	int i, min, max;
392 
393 	min = max = hi_vid[0];
394 	for (i = 0; i < 8; i++) {
395 		if (0 != hi_vid[i]) {
396 			if (min > hi_vid[i])
397 				min = hi_vid[i];
398 			if (max < hi_vid[i])
399 				max = hi_vid[i];
400 		}
401 
402 		if (0 != lo_vid[i]) {
403 			if (min > lo_vid[i])
404 				min = lo_vid[i];
405 			if (max < lo_vid[i])
406 				max = lo_vid[i];
407 		}
408 	}
409 
410 	if ((min == 0) || (max == 0))
411 		return -EINVAL;
412 	pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
413 	pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
414 
415 	return 0;
416 }
417 
ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device * rdev)418 static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev)
419 {
420 	struct ci_power_info *pi = ci_get_pi(rdev);
421 	u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
422 	u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
423 	struct radeon_cac_tdp_table *cac_tdp_table =
424 		rdev->pm.dpm.dyn_state.cac_tdp_table;
425 
426 	hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
427 	lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
428 
429 	pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
430 	pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
431 
432 	return 0;
433 }
434 
ci_populate_bapm_parameters_in_dpm_table(struct radeon_device * rdev)435 static int ci_populate_bapm_parameters_in_dpm_table(struct radeon_device *rdev)
436 {
437 	struct ci_power_info *pi = ci_get_pi(rdev);
438 	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
439 	SMU7_Discrete_DpmTable  *dpm_table = &pi->smc_state_table;
440 	struct radeon_cac_tdp_table *cac_tdp_table =
441 		rdev->pm.dpm.dyn_state.cac_tdp_table;
442 	struct radeon_ppm_table *ppm = rdev->pm.dpm.dyn_state.ppm_table;
443 	int i, j, k;
444 	const u16 *def1;
445 	const u16 *def2;
446 
447 	dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
448 	dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
449 
450 	dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
451 	dpm_table->GpuTjMax =
452 		(u8)(pi->thermal_temp_setting.temperature_high / 1000);
453 	dpm_table->GpuTjHyst = 8;
454 
455 	dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
456 
457 	if (ppm) {
458 		dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
459 		dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
460 	} else {
461 		dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
462 		dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
463 	}
464 
465 	dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
466 	def1 = pt_defaults->bapmti_r;
467 	def2 = pt_defaults->bapmti_rc;
468 
469 	for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
470 		for (j = 0; j < SMU7_DTE_SOURCES; j++) {
471 			for (k = 0; k < SMU7_DTE_SINKS; k++) {
472 				dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
473 				dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
474 				def1++;
475 				def2++;
476 			}
477 		}
478 	}
479 
480 	return 0;
481 }
482 
ci_populate_pm_base(struct radeon_device * rdev)483 static int ci_populate_pm_base(struct radeon_device *rdev)
484 {
485 	struct ci_power_info *pi = ci_get_pi(rdev);
486 	u32 pm_fuse_table_offset;
487 	int ret;
488 
489 	if (pi->caps_power_containment) {
490 		ret = ci_read_smc_sram_dword(rdev,
491 					     SMU7_FIRMWARE_HEADER_LOCATION +
492 					     offsetof(SMU7_Firmware_Header, PmFuseTable),
493 					     &pm_fuse_table_offset, pi->sram_end);
494 		if (ret)
495 			return ret;
496 		ret = ci_populate_bapm_vddc_vid_sidd(rdev);
497 		if (ret)
498 			return ret;
499 		ret = ci_populate_vddc_vid(rdev);
500 		if (ret)
501 			return ret;
502 		ret = ci_populate_svi_load_line(rdev);
503 		if (ret)
504 			return ret;
505 		ret = ci_populate_tdc_limit(rdev);
506 		if (ret)
507 			return ret;
508 		ret = ci_populate_dw8(rdev);
509 		if (ret)
510 			return ret;
511 		ret = ci_populate_fuzzy_fan(rdev);
512 		if (ret)
513 			return ret;
514 		ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev);
515 		if (ret)
516 			return ret;
517 		ret = ci_populate_bapm_vddc_base_leakage_sidd(rdev);
518 		if (ret)
519 			return ret;
520 		ret = ci_copy_bytes_to_smc(rdev, pm_fuse_table_offset,
521 					   (u8 *)&pi->smc_powertune_table,
522 					   sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
523 		if (ret)
524 			return ret;
525 	}
526 
527 	return 0;
528 }
529 
ci_do_enable_didt(struct radeon_device * rdev,const bool enable)530 static void ci_do_enable_didt(struct radeon_device *rdev, const bool enable)
531 {
532 	struct ci_power_info *pi = ci_get_pi(rdev);
533 	u32 data;
534 
535 	if (pi->caps_sq_ramping) {
536 		data = RREG32_DIDT(DIDT_SQ_CTRL0);
537 		if (enable)
538 			data |= DIDT_CTRL_EN;
539 		else
540 			data &= ~DIDT_CTRL_EN;
541 		WREG32_DIDT(DIDT_SQ_CTRL0, data);
542 	}
543 
544 	if (pi->caps_db_ramping) {
545 		data = RREG32_DIDT(DIDT_DB_CTRL0);
546 		if (enable)
547 			data |= DIDT_CTRL_EN;
548 		else
549 			data &= ~DIDT_CTRL_EN;
550 		WREG32_DIDT(DIDT_DB_CTRL0, data);
551 	}
552 
553 	if (pi->caps_td_ramping) {
554 		data = RREG32_DIDT(DIDT_TD_CTRL0);
555 		if (enable)
556 			data |= DIDT_CTRL_EN;
557 		else
558 			data &= ~DIDT_CTRL_EN;
559 		WREG32_DIDT(DIDT_TD_CTRL0, data);
560 	}
561 
562 	if (pi->caps_tcp_ramping) {
563 		data = RREG32_DIDT(DIDT_TCP_CTRL0);
564 		if (enable)
565 			data |= DIDT_CTRL_EN;
566 		else
567 			data &= ~DIDT_CTRL_EN;
568 		WREG32_DIDT(DIDT_TCP_CTRL0, data);
569 	}
570 }
571 
ci_program_pt_config_registers(struct radeon_device * rdev,const struct ci_pt_config_reg * cac_config_regs)572 static int ci_program_pt_config_registers(struct radeon_device *rdev,
573 					  const struct ci_pt_config_reg *cac_config_regs)
574 {
575 	const struct ci_pt_config_reg *config_regs = cac_config_regs;
576 	u32 data;
577 	u32 cache = 0;
578 
579 	if (config_regs == NULL)
580 		return -EINVAL;
581 
582 	while (config_regs->offset != 0xFFFFFFFF) {
583 		if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
584 			cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
585 		} else {
586 			switch (config_regs->type) {
587 			case CISLANDS_CONFIGREG_SMC_IND:
588 				data = RREG32_SMC(config_regs->offset);
589 				break;
590 			case CISLANDS_CONFIGREG_DIDT_IND:
591 				data = RREG32_DIDT(config_regs->offset);
592 				break;
593 			default:
594 				data = RREG32(config_regs->offset << 2);
595 				break;
596 			}
597 
598 			data &= ~config_regs->mask;
599 			data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
600 			data |= cache;
601 
602 			switch (config_regs->type) {
603 			case CISLANDS_CONFIGREG_SMC_IND:
604 				WREG32_SMC(config_regs->offset, data);
605 				break;
606 			case CISLANDS_CONFIGREG_DIDT_IND:
607 				WREG32_DIDT(config_regs->offset, data);
608 				break;
609 			default:
610 				WREG32(config_regs->offset << 2, data);
611 				break;
612 			}
613 			cache = 0;
614 		}
615 		config_regs++;
616 	}
617 	return 0;
618 }
619 
ci_enable_didt(struct radeon_device * rdev,bool enable)620 static int ci_enable_didt(struct radeon_device *rdev, bool enable)
621 {
622 	struct ci_power_info *pi = ci_get_pi(rdev);
623 	int ret;
624 
625 	if (pi->caps_sq_ramping || pi->caps_db_ramping ||
626 	    pi->caps_td_ramping || pi->caps_tcp_ramping) {
627 		cik_enter_rlc_safe_mode(rdev);
628 
629 		if (enable) {
630 			ret = ci_program_pt_config_registers(rdev, didt_config_ci);
631 			if (ret) {
632 				cik_exit_rlc_safe_mode(rdev);
633 				return ret;
634 			}
635 		}
636 
637 		ci_do_enable_didt(rdev, enable);
638 
639 		cik_exit_rlc_safe_mode(rdev);
640 	}
641 
642 	return 0;
643 }
644 
ci_enable_power_containment(struct radeon_device * rdev,bool enable)645 static int ci_enable_power_containment(struct radeon_device *rdev, bool enable)
646 {
647 	struct ci_power_info *pi = ci_get_pi(rdev);
648 	PPSMC_Result smc_result;
649 	int ret = 0;
650 
651 	if (enable) {
652 		pi->power_containment_features = 0;
653 		if (pi->caps_power_containment) {
654 			if (pi->enable_bapm_feature) {
655 				smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableDTE);
656 				if (smc_result != PPSMC_Result_OK)
657 					ret = -EINVAL;
658 				else
659 					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
660 			}
661 
662 			if (pi->enable_tdc_limit_feature) {
663 				smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitEnable);
664 				if (smc_result != PPSMC_Result_OK)
665 					ret = -EINVAL;
666 				else
667 					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
668 			}
669 
670 			if (pi->enable_pkg_pwr_tracking_feature) {
671 				smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitEnable);
672 				if (smc_result != PPSMC_Result_OK) {
673 					ret = -EINVAL;
674 				} else {
675 					struct radeon_cac_tdp_table *cac_tdp_table =
676 						rdev->pm.dpm.dyn_state.cac_tdp_table;
677 					u32 default_pwr_limit =
678 						(u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
679 
680 					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
681 
682 					ci_set_power_limit(rdev, default_pwr_limit);
683 				}
684 			}
685 		}
686 	} else {
687 		if (pi->caps_power_containment && pi->power_containment_features) {
688 			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
689 				ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitDisable);
690 
691 			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
692 				ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableDTE);
693 
694 			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
695 				ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitDisable);
696 			pi->power_containment_features = 0;
697 		}
698 	}
699 
700 	return ret;
701 }
702 
ci_enable_smc_cac(struct radeon_device * rdev,bool enable)703 static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable)
704 {
705 	struct ci_power_info *pi = ci_get_pi(rdev);
706 	PPSMC_Result smc_result;
707 	int ret = 0;
708 
709 	if (pi->caps_cac) {
710 		if (enable) {
711 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac);
712 			if (smc_result != PPSMC_Result_OK) {
713 				ret = -EINVAL;
714 				pi->cac_enabled = false;
715 			} else {
716 				pi->cac_enabled = true;
717 			}
718 		} else if (pi->cac_enabled) {
719 			ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
720 			pi->cac_enabled = false;
721 		}
722 	}
723 
724 	return ret;
725 }
726 
ci_enable_thermal_based_sclk_dpm(struct radeon_device * rdev,bool enable)727 static int ci_enable_thermal_based_sclk_dpm(struct radeon_device *rdev,
728 					    bool enable)
729 {
730 	struct ci_power_info *pi = ci_get_pi(rdev);
731 	PPSMC_Result smc_result = PPSMC_Result_OK;
732 
733 	if (pi->thermal_sclk_dpm_enabled) {
734 		if (enable)
735 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_ENABLE_THERMAL_DPM);
736 		else
737 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DISABLE_THERMAL_DPM);
738 	}
739 
740 	if (smc_result == PPSMC_Result_OK)
741 		return 0;
742 	else
743 		return -EINVAL;
744 }
745 
ci_power_control_set_level(struct radeon_device * rdev)746 static int ci_power_control_set_level(struct radeon_device *rdev)
747 {
748 	struct ci_power_info *pi = ci_get_pi(rdev);
749 	struct radeon_cac_tdp_table *cac_tdp_table =
750 		rdev->pm.dpm.dyn_state.cac_tdp_table;
751 	s32 adjust_percent;
752 	s32 target_tdp;
753 	int ret = 0;
754 	bool adjust_polarity = false; /* ??? */
755 
756 	if (pi->caps_power_containment) {
757 		adjust_percent = adjust_polarity ?
758 			rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment);
759 		target_tdp = ((100 + adjust_percent) *
760 			      (s32)cac_tdp_table->configurable_tdp) / 100;
761 
762 		ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp);
763 	}
764 
765 	return ret;
766 }
767 
ci_dpm_powergate_uvd(struct radeon_device * rdev,bool gate)768 void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
769 {
770 	struct ci_power_info *pi = ci_get_pi(rdev);
771 
772 	if (pi->uvd_power_gated == gate)
773 		return;
774 
775 	pi->uvd_power_gated = gate;
776 
777 	ci_update_uvd_dpm(rdev, gate);
778 }
779 
ci_dpm_vblank_too_short(struct radeon_device * rdev)780 bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
781 {
782 	struct ci_power_info *pi = ci_get_pi(rdev);
783 	u32 vblank_time = r600_dpm_get_vblank_time(rdev);
784 	u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
785 
786 	/* disable mclk switching if the refresh is >120Hz, even if the
787         * blanking period would allow it
788         */
789 	if (r600_dpm_get_vrefresh(rdev) > 120)
790 		return true;
791 
792 	if (vblank_time < switch_limit)
793 		return true;
794 	else
795 		return false;
796 
797 }
798 
ci_apply_state_adjust_rules(struct radeon_device * rdev,struct radeon_ps * rps)799 static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
800 					struct radeon_ps *rps)
801 {
802 	struct ci_ps *ps = ci_get_ps(rps);
803 	struct ci_power_info *pi = ci_get_pi(rdev);
804 	struct radeon_clock_and_voltage_limits *max_limits;
805 	bool disable_mclk_switching;
806 	u32 sclk, mclk;
807 	int i;
808 
809 	if (rps->vce_active) {
810 		rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
811 		rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk;
812 	} else {
813 		rps->evclk = 0;
814 		rps->ecclk = 0;
815 	}
816 
817 	if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
818 	    ci_dpm_vblank_too_short(rdev))
819 		disable_mclk_switching = true;
820 	else
821 		disable_mclk_switching = false;
822 
823 	if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
824 		pi->battery_state = true;
825 	else
826 		pi->battery_state = false;
827 
828 	if (rdev->pm.dpm.ac_power)
829 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
830 	else
831 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
832 
833 	if (rdev->pm.dpm.ac_power == false) {
834 		for (i = 0; i < ps->performance_level_count; i++) {
835 			if (ps->performance_levels[i].mclk > max_limits->mclk)
836 				ps->performance_levels[i].mclk = max_limits->mclk;
837 			if (ps->performance_levels[i].sclk > max_limits->sclk)
838 				ps->performance_levels[i].sclk = max_limits->sclk;
839 		}
840 	}
841 
842 	/* XXX validate the min clocks required for display */
843 
844 	if (disable_mclk_switching) {
845 		mclk  = ps->performance_levels[ps->performance_level_count - 1].mclk;
846 		sclk = ps->performance_levels[0].sclk;
847 	} else {
848 		mclk = ps->performance_levels[0].mclk;
849 		sclk = ps->performance_levels[0].sclk;
850 	}
851 
852 	if (rps->vce_active) {
853 		if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk)
854 			sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk;
855 		if (mclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk)
856 			mclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk;
857 	}
858 
859 	ps->performance_levels[0].sclk = sclk;
860 	ps->performance_levels[0].mclk = mclk;
861 
862 	if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
863 		ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
864 
865 	if (disable_mclk_switching) {
866 		if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
867 			ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
868 	} else {
869 		if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
870 			ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
871 	}
872 }
873 
ci_thermal_set_temperature_range(struct radeon_device * rdev,int min_temp,int max_temp)874 static int ci_thermal_set_temperature_range(struct radeon_device *rdev,
875 					    int min_temp, int max_temp)
876 {
877 	int low_temp = 0 * 1000;
878 	int high_temp = 255 * 1000;
879 	u32 tmp;
880 
881 	if (low_temp < min_temp)
882 		low_temp = min_temp;
883 	if (high_temp > max_temp)
884 		high_temp = max_temp;
885 	if (high_temp < low_temp) {
886 		DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
887 		return -EINVAL;
888 	}
889 
890 	tmp = RREG32_SMC(CG_THERMAL_INT);
891 	tmp &= ~(CI_DIG_THERM_INTH_MASK | CI_DIG_THERM_INTL_MASK);
892 	tmp |= CI_DIG_THERM_INTH(high_temp / 1000) |
893 		CI_DIG_THERM_INTL(low_temp / 1000);
894 	WREG32_SMC(CG_THERMAL_INT, tmp);
895 
896 #if 0
897 	/* XXX: need to figure out how to handle this properly */
898 	tmp = RREG32_SMC(CG_THERMAL_CTRL);
899 	tmp &= DIG_THERM_DPM_MASK;
900 	tmp |= DIG_THERM_DPM(high_temp / 1000);
901 	WREG32_SMC(CG_THERMAL_CTRL, tmp);
902 #endif
903 
904 	rdev->pm.dpm.thermal.min_temp = low_temp;
905 	rdev->pm.dpm.thermal.max_temp = high_temp;
906 
907 	return 0;
908 }
909 
ci_thermal_enable_alert(struct radeon_device * rdev,bool enable)910 static int ci_thermal_enable_alert(struct radeon_device *rdev,
911 				   bool enable)
912 {
913 	u32 thermal_int = RREG32_SMC(CG_THERMAL_INT);
914 	PPSMC_Result result;
915 
916 	if (enable) {
917 		thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
918 		WREG32_SMC(CG_THERMAL_INT, thermal_int);
919 		rdev->irq.dpm_thermal = false;
920 		result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Enable);
921 		if (result != PPSMC_Result_OK) {
922 			DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
923 			return -EINVAL;
924 		}
925 	} else {
926 		thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
927 		WREG32_SMC(CG_THERMAL_INT, thermal_int);
928 		rdev->irq.dpm_thermal = true;
929 		result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Disable);
930 		if (result != PPSMC_Result_OK) {
931 			DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
932 			return -EINVAL;
933 		}
934 	}
935 
936 	return 0;
937 }
938 
ci_fan_ctrl_set_static_mode(struct radeon_device * rdev,u32 mode)939 static void ci_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode)
940 {
941 	struct ci_power_info *pi = ci_get_pi(rdev);
942 	u32 tmp;
943 
944 	if (pi->fan_ctrl_is_in_default_mode) {
945 		tmp = (RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
946 		pi->fan_ctrl_default_mode = tmp;
947 		tmp = (RREG32_SMC(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
948 		pi->t_min = tmp;
949 		pi->fan_ctrl_is_in_default_mode = false;
950 	}
951 
952 	tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK;
953 	tmp |= TMIN(0);
954 	WREG32_SMC(CG_FDO_CTRL2, tmp);
955 
956 	tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
957 	tmp |= FDO_PWM_MODE(mode);
958 	WREG32_SMC(CG_FDO_CTRL2, tmp);
959 }
960 
ci_thermal_setup_fan_table(struct radeon_device * rdev)961 static int ci_thermal_setup_fan_table(struct radeon_device *rdev)
962 {
963 	struct ci_power_info *pi = ci_get_pi(rdev);
964 	SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
965 	u32 duty100;
966 	u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
967 	u16 fdo_min, slope1, slope2;
968 	u32 reference_clock, tmp;
969 	int ret;
970 	u64 tmp64;
971 
972 	if (!pi->fan_table_start) {
973 		rdev->pm.dpm.fan.ucode_fan_control = false;
974 		return 0;
975 	}
976 
977 	duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
978 
979 	if (duty100 == 0) {
980 		rdev->pm.dpm.fan.ucode_fan_control = false;
981 		return 0;
982 	}
983 
984 	tmp64 = (u64)rdev->pm.dpm.fan.pwm_min * duty100;
985 	do_div(tmp64, 10000);
986 	fdo_min = (u16)tmp64;
987 
988 	t_diff1 = rdev->pm.dpm.fan.t_med - rdev->pm.dpm.fan.t_min;
989 	t_diff2 = rdev->pm.dpm.fan.t_high - rdev->pm.dpm.fan.t_med;
990 
991 	pwm_diff1 = rdev->pm.dpm.fan.pwm_med - rdev->pm.dpm.fan.pwm_min;
992 	pwm_diff2 = rdev->pm.dpm.fan.pwm_high - rdev->pm.dpm.fan.pwm_med;
993 
994 	slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
995 	slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
996 
997 	fan_table.TempMin = cpu_to_be16((50 + rdev->pm.dpm.fan.t_min) / 100);
998 	fan_table.TempMed = cpu_to_be16((50 + rdev->pm.dpm.fan.t_med) / 100);
999 	fan_table.TempMax = cpu_to_be16((50 + rdev->pm.dpm.fan.t_max) / 100);
1000 
1001 	fan_table.Slope1 = cpu_to_be16(slope1);
1002 	fan_table.Slope2 = cpu_to_be16(slope2);
1003 
1004 	fan_table.FdoMin = cpu_to_be16(fdo_min);
1005 
1006 	fan_table.HystDown = cpu_to_be16(rdev->pm.dpm.fan.t_hyst);
1007 
1008 	fan_table.HystUp = cpu_to_be16(1);
1009 
1010 	fan_table.HystSlope = cpu_to_be16(1);
1011 
1012 	fan_table.TempRespLim = cpu_to_be16(5);
1013 
1014 	reference_clock = radeon_get_xclk(rdev);
1015 
1016 	fan_table.RefreshPeriod = cpu_to_be32((rdev->pm.dpm.fan.cycle_delay *
1017 					       reference_clock) / 1600);
1018 
1019 	fan_table.FdoMax = cpu_to_be16((u16)duty100);
1020 
1021 	tmp = (RREG32_SMC(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
1022 	fan_table.TempSrc = (uint8_t)tmp;
1023 
1024 	ret = ci_copy_bytes_to_smc(rdev,
1025 				   pi->fan_table_start,
1026 				   (u8 *)(&fan_table),
1027 				   sizeof(fan_table),
1028 				   pi->sram_end);
1029 
1030 	if (ret) {
1031 		DRM_ERROR("Failed to load fan table to the SMC.");
1032 		rdev->pm.dpm.fan.ucode_fan_control = false;
1033 	}
1034 
1035 	return 0;
1036 }
1037 
ci_fan_ctrl_start_smc_fan_control(struct radeon_device * rdev)1038 static int ci_fan_ctrl_start_smc_fan_control(struct radeon_device *rdev)
1039 {
1040 	struct ci_power_info *pi = ci_get_pi(rdev);
1041 	PPSMC_Result ret;
1042 
1043 	if (pi->caps_od_fuzzy_fan_control_support) {
1044 		ret = ci_send_msg_to_smc_with_parameter(rdev,
1045 							PPSMC_StartFanControl,
1046 							FAN_CONTROL_FUZZY);
1047 		if (ret != PPSMC_Result_OK)
1048 			return -EINVAL;
1049 		ret = ci_send_msg_to_smc_with_parameter(rdev,
1050 							PPSMC_MSG_SetFanPwmMax,
1051 							rdev->pm.dpm.fan.default_max_fan_pwm);
1052 		if (ret != PPSMC_Result_OK)
1053 			return -EINVAL;
1054 	} else {
1055 		ret = ci_send_msg_to_smc_with_parameter(rdev,
1056 							PPSMC_StartFanControl,
1057 							FAN_CONTROL_TABLE);
1058 		if (ret != PPSMC_Result_OK)
1059 			return -EINVAL;
1060 	}
1061 
1062 	pi->fan_is_controlled_by_smc = true;
1063 	return 0;
1064 }
1065 
ci_fan_ctrl_stop_smc_fan_control(struct radeon_device * rdev)1066 static int ci_fan_ctrl_stop_smc_fan_control(struct radeon_device *rdev)
1067 {
1068 	PPSMC_Result ret;
1069 	struct ci_power_info *pi = ci_get_pi(rdev);
1070 
1071 	ret = ci_send_msg_to_smc(rdev, PPSMC_StopFanControl);
1072 	if (ret == PPSMC_Result_OK) {
1073 		pi->fan_is_controlled_by_smc = false;
1074 		return 0;
1075 	} else
1076 		return -EINVAL;
1077 }
1078 
ci_fan_ctrl_get_fan_speed_percent(struct radeon_device * rdev,u32 * speed)1079 int ci_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
1080 					     u32 *speed)
1081 {
1082 	u32 duty, duty100;
1083 	u64 tmp64;
1084 
1085 	if (rdev->pm.no_fan)
1086 		return -ENOENT;
1087 
1088 	duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
1089 	duty = (RREG32_SMC(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
1090 
1091 	if (duty100 == 0)
1092 		return -EINVAL;
1093 
1094 	tmp64 = (u64)duty * 100;
1095 	do_div(tmp64, duty100);
1096 	*speed = (u32)tmp64;
1097 
1098 	if (*speed > 100)
1099 		*speed = 100;
1100 
1101 	return 0;
1102 }
1103 
ci_fan_ctrl_set_fan_speed_percent(struct radeon_device * rdev,u32 speed)1104 int ci_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
1105 					     u32 speed)
1106 {
1107 	u32 tmp;
1108 	u32 duty, duty100;
1109 	u64 tmp64;
1110 	struct ci_power_info *pi = ci_get_pi(rdev);
1111 
1112 	if (rdev->pm.no_fan)
1113 		return -ENOENT;
1114 
1115 	if (pi->fan_is_controlled_by_smc)
1116 		return -EINVAL;
1117 
1118 	if (speed > 100)
1119 		return -EINVAL;
1120 
1121 	duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
1122 
1123 	if (duty100 == 0)
1124 		return -EINVAL;
1125 
1126 	tmp64 = (u64)speed * duty100;
1127 	do_div(tmp64, 100);
1128 	duty = (u32)tmp64;
1129 
1130 	tmp = RREG32_SMC(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
1131 	tmp |= FDO_STATIC_DUTY(duty);
1132 	WREG32_SMC(CG_FDO_CTRL0, tmp);
1133 
1134 	return 0;
1135 }
1136 
ci_fan_ctrl_set_mode(struct radeon_device * rdev,u32 mode)1137 void ci_fan_ctrl_set_mode(struct radeon_device *rdev, u32 mode)
1138 {
1139 	if (mode) {
1140 		/* stop auto-manage */
1141 		if (rdev->pm.dpm.fan.ucode_fan_control)
1142 			ci_fan_ctrl_stop_smc_fan_control(rdev);
1143 		ci_fan_ctrl_set_static_mode(rdev, mode);
1144 	} else {
1145 		/* restart auto-manage */
1146 		if (rdev->pm.dpm.fan.ucode_fan_control)
1147 			ci_thermal_start_smc_fan_control(rdev);
1148 		else
1149 			ci_fan_ctrl_set_default_mode(rdev);
1150 	}
1151 }
1152 
ci_fan_ctrl_get_mode(struct radeon_device * rdev)1153 u32 ci_fan_ctrl_get_mode(struct radeon_device *rdev)
1154 {
1155 	struct ci_power_info *pi = ci_get_pi(rdev);
1156 	u32 tmp;
1157 
1158 	if (pi->fan_is_controlled_by_smc)
1159 		return 0;
1160 
1161 	tmp = RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
1162 	return (tmp >> FDO_PWM_MODE_SHIFT);
1163 }
1164 
1165 #if 0
1166 static int ci_fan_ctrl_get_fan_speed_rpm(struct radeon_device *rdev,
1167 					 u32 *speed)
1168 {
1169 	u32 tach_period;
1170 	u32 xclk = radeon_get_xclk(rdev);
1171 
1172 	if (rdev->pm.no_fan)
1173 		return -ENOENT;
1174 
1175 	if (rdev->pm.fan_pulses_per_revolution == 0)
1176 		return -ENOENT;
1177 
1178 	tach_period = (RREG32_SMC(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
1179 	if (tach_period == 0)
1180 		return -ENOENT;
1181 
1182 	*speed = 60 * xclk * 10000 / tach_period;
1183 
1184 	return 0;
1185 }
1186 
1187 static int ci_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev,
1188 					 u32 speed)
1189 {
1190 	u32 tach_period, tmp;
1191 	u32 xclk = radeon_get_xclk(rdev);
1192 
1193 	if (rdev->pm.no_fan)
1194 		return -ENOENT;
1195 
1196 	if (rdev->pm.fan_pulses_per_revolution == 0)
1197 		return -ENOENT;
1198 
1199 	if ((speed < rdev->pm.fan_min_rpm) ||
1200 	    (speed > rdev->pm.fan_max_rpm))
1201 		return -EINVAL;
1202 
1203 	if (rdev->pm.dpm.fan.ucode_fan_control)
1204 		ci_fan_ctrl_stop_smc_fan_control(rdev);
1205 
1206 	tach_period = 60 * xclk * 10000 / (8 * speed);
1207 	tmp = RREG32_SMC(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
1208 	tmp |= TARGET_PERIOD(tach_period);
1209 	WREG32_SMC(CG_TACH_CTRL, tmp);
1210 
1211 	ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM);
1212 
1213 	return 0;
1214 }
1215 #endif
1216 
ci_fan_ctrl_set_default_mode(struct radeon_device * rdev)1217 static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev)
1218 {
1219 	struct ci_power_info *pi = ci_get_pi(rdev);
1220 	u32 tmp;
1221 
1222 	if (!pi->fan_ctrl_is_in_default_mode) {
1223 		tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
1224 		tmp |= FDO_PWM_MODE(pi->fan_ctrl_default_mode);
1225 		WREG32_SMC(CG_FDO_CTRL2, tmp);
1226 
1227 		tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK;
1228 		tmp |= TMIN(pi->t_min);
1229 		WREG32_SMC(CG_FDO_CTRL2, tmp);
1230 		pi->fan_ctrl_is_in_default_mode = true;
1231 	}
1232 }
1233 
ci_thermal_start_smc_fan_control(struct radeon_device * rdev)1234 static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev)
1235 {
1236 	if (rdev->pm.dpm.fan.ucode_fan_control) {
1237 		ci_fan_ctrl_start_smc_fan_control(rdev);
1238 		ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
1239 	}
1240 }
1241 
ci_thermal_initialize(struct radeon_device * rdev)1242 static void ci_thermal_initialize(struct radeon_device *rdev)
1243 {
1244 	u32 tmp;
1245 
1246 	if (rdev->pm.fan_pulses_per_revolution) {
1247 		tmp = RREG32_SMC(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
1248 		tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution -1);
1249 		WREG32_SMC(CG_TACH_CTRL, tmp);
1250 	}
1251 
1252 	tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
1253 	tmp |= TACH_PWM_RESP_RATE(0x28);
1254 	WREG32_SMC(CG_FDO_CTRL2, tmp);
1255 }
1256 
ci_thermal_start_thermal_controller(struct radeon_device * rdev)1257 static int ci_thermal_start_thermal_controller(struct radeon_device *rdev)
1258 {
1259 	int ret;
1260 
1261 	ci_thermal_initialize(rdev);
1262 	ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
1263 	if (ret)
1264 		return ret;
1265 	ret = ci_thermal_enable_alert(rdev, true);
1266 	if (ret)
1267 		return ret;
1268 	if (rdev->pm.dpm.fan.ucode_fan_control) {
1269 		ret = ci_thermal_setup_fan_table(rdev);
1270 		if (ret)
1271 			return ret;
1272 		ci_thermal_start_smc_fan_control(rdev);
1273 	}
1274 
1275 	return 0;
1276 }
1277 
ci_thermal_stop_thermal_controller(struct radeon_device * rdev)1278 static void ci_thermal_stop_thermal_controller(struct radeon_device *rdev)
1279 {
1280 	if (!rdev->pm.no_fan)
1281 		ci_fan_ctrl_set_default_mode(rdev);
1282 }
1283 
1284 #if 0
1285 static int ci_read_smc_soft_register(struct radeon_device *rdev,
1286 				     u16 reg_offset, u32 *value)
1287 {
1288 	struct ci_power_info *pi = ci_get_pi(rdev);
1289 
1290 	return ci_read_smc_sram_dword(rdev,
1291 				      pi->soft_regs_start + reg_offset,
1292 				      value, pi->sram_end);
1293 }
1294 #endif
1295 
ci_write_smc_soft_register(struct radeon_device * rdev,u16 reg_offset,u32 value)1296 static int ci_write_smc_soft_register(struct radeon_device *rdev,
1297 				      u16 reg_offset, u32 value)
1298 {
1299 	struct ci_power_info *pi = ci_get_pi(rdev);
1300 
1301 	return ci_write_smc_sram_dword(rdev,
1302 				       pi->soft_regs_start + reg_offset,
1303 				       value, pi->sram_end);
1304 }
1305 
ci_init_fps_limits(struct radeon_device * rdev)1306 static void ci_init_fps_limits(struct radeon_device *rdev)
1307 {
1308 	struct ci_power_info *pi = ci_get_pi(rdev);
1309 	SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
1310 
1311 	if (pi->caps_fps) {
1312 		u16 tmp;
1313 
1314 		tmp = 45;
1315 		table->FpsHighT = cpu_to_be16(tmp);
1316 
1317 		tmp = 30;
1318 		table->FpsLowT = cpu_to_be16(tmp);
1319 	}
1320 }
1321 
ci_update_sclk_t(struct radeon_device * rdev)1322 static int ci_update_sclk_t(struct radeon_device *rdev)
1323 {
1324 	struct ci_power_info *pi = ci_get_pi(rdev);
1325 	int ret = 0;
1326 	u32 low_sclk_interrupt_t = 0;
1327 
1328 	if (pi->caps_sclk_throttle_low_notification) {
1329 		low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
1330 
1331 		ret = ci_copy_bytes_to_smc(rdev,
1332 					   pi->dpm_table_start +
1333 					   offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
1334 					   (u8 *)&low_sclk_interrupt_t,
1335 					   sizeof(u32), pi->sram_end);
1336 
1337 	}
1338 
1339 	return ret;
1340 }
1341 
ci_get_leakage_voltages(struct radeon_device * rdev)1342 static void ci_get_leakage_voltages(struct radeon_device *rdev)
1343 {
1344 	struct ci_power_info *pi = ci_get_pi(rdev);
1345 	u16 leakage_id, virtual_voltage_id;
1346 	u16 vddc, vddci;
1347 	int i;
1348 
1349 	pi->vddc_leakage.count = 0;
1350 	pi->vddci_leakage.count = 0;
1351 
1352 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
1353 		for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1354 			virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1355 			if (radeon_atom_get_voltage_evv(rdev, virtual_voltage_id, &vddc) != 0)
1356 				continue;
1357 			if (vddc != 0 && vddc != virtual_voltage_id) {
1358 				pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1359 				pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1360 				pi->vddc_leakage.count++;
1361 			}
1362 		}
1363 	} else if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
1364 		for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1365 			virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1366 			if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci,
1367 										 virtual_voltage_id,
1368 										 leakage_id) == 0) {
1369 				if (vddc != 0 && vddc != virtual_voltage_id) {
1370 					pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1371 					pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1372 					pi->vddc_leakage.count++;
1373 				}
1374 				if (vddci != 0 && vddci != virtual_voltage_id) {
1375 					pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
1376 					pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
1377 					pi->vddci_leakage.count++;
1378 				}
1379 			}
1380 		}
1381 	}
1382 }
1383 
ci_set_dpm_event_sources(struct radeon_device * rdev,u32 sources)1384 static void ci_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
1385 {
1386 	struct ci_power_info *pi = ci_get_pi(rdev);
1387 	bool want_thermal_protection;
1388 	enum radeon_dpm_event_src dpm_event_src;
1389 	u32 tmp;
1390 
1391 	switch (sources) {
1392 	case 0:
1393 	default:
1394 		want_thermal_protection = false;
1395 		break;
1396 	case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
1397 		want_thermal_protection = true;
1398 		dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
1399 		break;
1400 	case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
1401 		want_thermal_protection = true;
1402 		dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
1403 		break;
1404 	case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
1405 	      (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
1406 		want_thermal_protection = true;
1407 		dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
1408 		break;
1409 	}
1410 
1411 	if (want_thermal_protection) {
1412 #if 0
1413 		/* XXX: need to figure out how to handle this properly */
1414 		tmp = RREG32_SMC(CG_THERMAL_CTRL);
1415 		tmp &= DPM_EVENT_SRC_MASK;
1416 		tmp |= DPM_EVENT_SRC(dpm_event_src);
1417 		WREG32_SMC(CG_THERMAL_CTRL, tmp);
1418 #else
1419 		(void)dpm_event_src;
1420 #endif
1421 
1422 		tmp = RREG32_SMC(GENERAL_PWRMGT);
1423 		if (pi->thermal_protection)
1424 			tmp &= ~THERMAL_PROTECTION_DIS;
1425 		else
1426 			tmp |= THERMAL_PROTECTION_DIS;
1427 		WREG32_SMC(GENERAL_PWRMGT, tmp);
1428 	} else {
1429 		tmp = RREG32_SMC(GENERAL_PWRMGT);
1430 		tmp |= THERMAL_PROTECTION_DIS;
1431 		WREG32_SMC(GENERAL_PWRMGT, tmp);
1432 	}
1433 }
1434 
ci_enable_auto_throttle_source(struct radeon_device * rdev,enum radeon_dpm_auto_throttle_src source,bool enable)1435 static void ci_enable_auto_throttle_source(struct radeon_device *rdev,
1436 					   enum radeon_dpm_auto_throttle_src source,
1437 					   bool enable)
1438 {
1439 	struct ci_power_info *pi = ci_get_pi(rdev);
1440 
1441 	if (enable) {
1442 		if (!(pi->active_auto_throttle_sources & (1 << source))) {
1443 			pi->active_auto_throttle_sources |= 1 << source;
1444 			ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1445 		}
1446 	} else {
1447 		if (pi->active_auto_throttle_sources & (1 << source)) {
1448 			pi->active_auto_throttle_sources &= ~(1 << source);
1449 			ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1450 		}
1451 	}
1452 }
1453 
ci_enable_vr_hot_gpio_interrupt(struct radeon_device * rdev)1454 static void ci_enable_vr_hot_gpio_interrupt(struct radeon_device *rdev)
1455 {
1456 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1457 		ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
1458 }
1459 
ci_unfreeze_sclk_mclk_dpm(struct radeon_device * rdev)1460 static int ci_unfreeze_sclk_mclk_dpm(struct radeon_device *rdev)
1461 {
1462 	struct ci_power_info *pi = ci_get_pi(rdev);
1463 	PPSMC_Result smc_result;
1464 
1465 	if (!pi->need_update_smu7_dpm_table)
1466 		return 0;
1467 
1468 	if ((!pi->sclk_dpm_key_disabled) &&
1469 	    (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1470 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
1471 		if (smc_result != PPSMC_Result_OK)
1472 			return -EINVAL;
1473 	}
1474 
1475 	if ((!pi->mclk_dpm_key_disabled) &&
1476 	    (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1477 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
1478 		if (smc_result != PPSMC_Result_OK)
1479 			return -EINVAL;
1480 	}
1481 
1482 	pi->need_update_smu7_dpm_table = 0;
1483 	return 0;
1484 }
1485 
ci_enable_sclk_mclk_dpm(struct radeon_device * rdev,bool enable)1486 static int ci_enable_sclk_mclk_dpm(struct radeon_device *rdev, bool enable)
1487 {
1488 	struct ci_power_info *pi = ci_get_pi(rdev);
1489 	PPSMC_Result smc_result;
1490 
1491 	if (enable) {
1492 		if (!pi->sclk_dpm_key_disabled) {
1493 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Enable);
1494 			if (smc_result != PPSMC_Result_OK)
1495 				return -EINVAL;
1496 		}
1497 
1498 		if (!pi->mclk_dpm_key_disabled) {
1499 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Enable);
1500 			if (smc_result != PPSMC_Result_OK)
1501 				return -EINVAL;
1502 
1503 			WREG32_P(MC_SEQ_CNTL_3, CAC_EN, ~CAC_EN);
1504 
1505 			WREG32_SMC(LCAC_MC0_CNTL, 0x05);
1506 			WREG32_SMC(LCAC_MC1_CNTL, 0x05);
1507 			WREG32_SMC(LCAC_CPL_CNTL, 0x100005);
1508 
1509 			udelay(10);
1510 
1511 			WREG32_SMC(LCAC_MC0_CNTL, 0x400005);
1512 			WREG32_SMC(LCAC_MC1_CNTL, 0x400005);
1513 			WREG32_SMC(LCAC_CPL_CNTL, 0x500005);
1514 		}
1515 	} else {
1516 		if (!pi->sclk_dpm_key_disabled) {
1517 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Disable);
1518 			if (smc_result != PPSMC_Result_OK)
1519 				return -EINVAL;
1520 		}
1521 
1522 		if (!pi->mclk_dpm_key_disabled) {
1523 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Disable);
1524 			if (smc_result != PPSMC_Result_OK)
1525 				return -EINVAL;
1526 		}
1527 	}
1528 
1529 	return 0;
1530 }
1531 
ci_start_dpm(struct radeon_device * rdev)1532 static int ci_start_dpm(struct radeon_device *rdev)
1533 {
1534 	struct ci_power_info *pi = ci_get_pi(rdev);
1535 	PPSMC_Result smc_result;
1536 	int ret;
1537 	u32 tmp;
1538 
1539 	tmp = RREG32_SMC(GENERAL_PWRMGT);
1540 	tmp |= GLOBAL_PWRMGT_EN;
1541 	WREG32_SMC(GENERAL_PWRMGT, tmp);
1542 
1543 	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1544 	tmp |= DYNAMIC_PM_EN;
1545 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1546 
1547 	ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1548 
1549 	WREG32_P(BIF_LNCNT_RESET, 0, ~RESET_LNCNT_EN);
1550 
1551 	smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Enable);
1552 	if (smc_result != PPSMC_Result_OK)
1553 		return -EINVAL;
1554 
1555 	ret = ci_enable_sclk_mclk_dpm(rdev, true);
1556 	if (ret)
1557 		return ret;
1558 
1559 	if (!pi->pcie_dpm_key_disabled) {
1560 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Enable);
1561 		if (smc_result != PPSMC_Result_OK)
1562 			return -EINVAL;
1563 	}
1564 
1565 	return 0;
1566 }
1567 
ci_freeze_sclk_mclk_dpm(struct radeon_device * rdev)1568 static int ci_freeze_sclk_mclk_dpm(struct radeon_device *rdev)
1569 {
1570 	struct ci_power_info *pi = ci_get_pi(rdev);
1571 	PPSMC_Result smc_result;
1572 
1573 	if (!pi->need_update_smu7_dpm_table)
1574 		return 0;
1575 
1576 	if ((!pi->sclk_dpm_key_disabled) &&
1577 	    (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1578 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1579 		if (smc_result != PPSMC_Result_OK)
1580 			return -EINVAL;
1581 	}
1582 
1583 	if ((!pi->mclk_dpm_key_disabled) &&
1584 	    (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1585 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1586 		if (smc_result != PPSMC_Result_OK)
1587 			return -EINVAL;
1588 	}
1589 
1590 	return 0;
1591 }
1592 
ci_stop_dpm(struct radeon_device * rdev)1593 static int ci_stop_dpm(struct radeon_device *rdev)
1594 {
1595 	struct ci_power_info *pi = ci_get_pi(rdev);
1596 	PPSMC_Result smc_result;
1597 	int ret;
1598 	u32 tmp;
1599 
1600 	tmp = RREG32_SMC(GENERAL_PWRMGT);
1601 	tmp &= ~GLOBAL_PWRMGT_EN;
1602 	WREG32_SMC(GENERAL_PWRMGT, tmp);
1603 
1604 	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1605 	tmp &= ~DYNAMIC_PM_EN;
1606 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1607 
1608 	if (!pi->pcie_dpm_key_disabled) {
1609 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Disable);
1610 		if (smc_result != PPSMC_Result_OK)
1611 			return -EINVAL;
1612 	}
1613 
1614 	ret = ci_enable_sclk_mclk_dpm(rdev, false);
1615 	if (ret)
1616 		return ret;
1617 
1618 	smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Disable);
1619 	if (smc_result != PPSMC_Result_OK)
1620 		return -EINVAL;
1621 
1622 	return 0;
1623 }
1624 
ci_enable_sclk_control(struct radeon_device * rdev,bool enable)1625 static void ci_enable_sclk_control(struct radeon_device *rdev, bool enable)
1626 {
1627 	u32 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1628 
1629 	if (enable)
1630 		tmp &= ~SCLK_PWRMGT_OFF;
1631 	else
1632 		tmp |= SCLK_PWRMGT_OFF;
1633 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1634 }
1635 
1636 #if 0
1637 static int ci_notify_hw_of_power_source(struct radeon_device *rdev,
1638 					bool ac_power)
1639 {
1640 	struct ci_power_info *pi = ci_get_pi(rdev);
1641 	struct radeon_cac_tdp_table *cac_tdp_table =
1642 		rdev->pm.dpm.dyn_state.cac_tdp_table;
1643 	u32 power_limit;
1644 
1645 	if (ac_power)
1646 		power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1647 	else
1648 		power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1649 
1650 	ci_set_power_limit(rdev, power_limit);
1651 
1652 	if (pi->caps_automatic_dc_transition) {
1653 		if (ac_power)
1654 			ci_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC);
1655 		else
1656 			ci_send_msg_to_smc(rdev, PPSMC_MSG_Remove_DC_Clamp);
1657 	}
1658 
1659 	return 0;
1660 }
1661 #endif
1662 
ci_send_msg_to_smc(struct radeon_device * rdev,PPSMC_Msg msg)1663 static PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg)
1664 {
1665 	u32 tmp;
1666 	int i;
1667 
1668 	if (!ci_is_smc_running(rdev))
1669 		return PPSMC_Result_Failed;
1670 
1671 	WREG32(SMC_MESSAGE_0, msg);
1672 
1673 	for (i = 0; i < rdev->usec_timeout; i++) {
1674 		tmp = RREG32(SMC_RESP_0);
1675 		if (tmp != 0)
1676 			break;
1677 		udelay(1);
1678 	}
1679 	tmp = RREG32(SMC_RESP_0);
1680 
1681 	return (PPSMC_Result)tmp;
1682 }
1683 
ci_send_msg_to_smc_with_parameter(struct radeon_device * rdev,PPSMC_Msg msg,u32 parameter)1684 static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
1685 						      PPSMC_Msg msg, u32 parameter)
1686 {
1687 	WREG32(SMC_MSG_ARG_0, parameter);
1688 	return ci_send_msg_to_smc(rdev, msg);
1689 }
1690 
ci_send_msg_to_smc_return_parameter(struct radeon_device * rdev,PPSMC_Msg msg,u32 * parameter)1691 static PPSMC_Result ci_send_msg_to_smc_return_parameter(struct radeon_device *rdev,
1692 							PPSMC_Msg msg, u32 *parameter)
1693 {
1694 	PPSMC_Result smc_result;
1695 
1696 	smc_result = ci_send_msg_to_smc(rdev, msg);
1697 
1698 	if ((smc_result == PPSMC_Result_OK) && parameter)
1699 		*parameter = RREG32(SMC_MSG_ARG_0);
1700 
1701 	return smc_result;
1702 }
1703 
ci_dpm_force_state_sclk(struct radeon_device * rdev,u32 n)1704 static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n)
1705 {
1706 	struct ci_power_info *pi = ci_get_pi(rdev);
1707 
1708 	if (!pi->sclk_dpm_key_disabled) {
1709 		PPSMC_Result smc_result =
1710 			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
1711 		if (smc_result != PPSMC_Result_OK)
1712 			return -EINVAL;
1713 	}
1714 
1715 	return 0;
1716 }
1717 
ci_dpm_force_state_mclk(struct radeon_device * rdev,u32 n)1718 static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n)
1719 {
1720 	struct ci_power_info *pi = ci_get_pi(rdev);
1721 
1722 	if (!pi->mclk_dpm_key_disabled) {
1723 		PPSMC_Result smc_result =
1724 			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
1725 		if (smc_result != PPSMC_Result_OK)
1726 			return -EINVAL;
1727 	}
1728 
1729 	return 0;
1730 }
1731 
ci_dpm_force_state_pcie(struct radeon_device * rdev,u32 n)1732 static int ci_dpm_force_state_pcie(struct radeon_device *rdev, u32 n)
1733 {
1734 	struct ci_power_info *pi = ci_get_pi(rdev);
1735 
1736 	if (!pi->pcie_dpm_key_disabled) {
1737 		PPSMC_Result smc_result =
1738 			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1739 		if (smc_result != PPSMC_Result_OK)
1740 			return -EINVAL;
1741 	}
1742 
1743 	return 0;
1744 }
1745 
ci_set_power_limit(struct radeon_device * rdev,u32 n)1746 static int ci_set_power_limit(struct radeon_device *rdev, u32 n)
1747 {
1748 	struct ci_power_info *pi = ci_get_pi(rdev);
1749 
1750 	if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1751 		PPSMC_Result smc_result =
1752 			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PkgPwrSetLimit, n);
1753 		if (smc_result != PPSMC_Result_OK)
1754 			return -EINVAL;
1755 	}
1756 
1757 	return 0;
1758 }
1759 
ci_set_overdrive_target_tdp(struct radeon_device * rdev,u32 target_tdp)1760 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
1761 				       u32 target_tdp)
1762 {
1763 	PPSMC_Result smc_result =
1764 		ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1765 	if (smc_result != PPSMC_Result_OK)
1766 		return -EINVAL;
1767 	return 0;
1768 }
1769 
1770 #if 0
1771 static int ci_set_boot_state(struct radeon_device *rdev)
1772 {
1773 	return ci_enable_sclk_mclk_dpm(rdev, false);
1774 }
1775 #endif
1776 
ci_get_average_sclk_freq(struct radeon_device * rdev)1777 static u32 ci_get_average_sclk_freq(struct radeon_device *rdev)
1778 {
1779 	u32 sclk_freq;
1780 	PPSMC_Result smc_result =
1781 		ci_send_msg_to_smc_return_parameter(rdev,
1782 						    PPSMC_MSG_API_GetSclkFrequency,
1783 						    &sclk_freq);
1784 	if (smc_result != PPSMC_Result_OK)
1785 		sclk_freq = 0;
1786 
1787 	return sclk_freq;
1788 }
1789 
ci_get_average_mclk_freq(struct radeon_device * rdev)1790 static u32 ci_get_average_mclk_freq(struct radeon_device *rdev)
1791 {
1792 	u32 mclk_freq;
1793 	PPSMC_Result smc_result =
1794 		ci_send_msg_to_smc_return_parameter(rdev,
1795 						    PPSMC_MSG_API_GetMclkFrequency,
1796 						    &mclk_freq);
1797 	if (smc_result != PPSMC_Result_OK)
1798 		mclk_freq = 0;
1799 
1800 	return mclk_freq;
1801 }
1802 
ci_dpm_start_smc(struct radeon_device * rdev)1803 static void ci_dpm_start_smc(struct radeon_device *rdev)
1804 {
1805 	int i;
1806 
1807 	ci_program_jump_on_start(rdev);
1808 	ci_start_smc_clock(rdev);
1809 	ci_start_smc(rdev);
1810 	for (i = 0; i < rdev->usec_timeout; i++) {
1811 		if (RREG32_SMC(FIRMWARE_FLAGS) & INTERRUPTS_ENABLED)
1812 			break;
1813 	}
1814 }
1815 
ci_dpm_stop_smc(struct radeon_device * rdev)1816 static void ci_dpm_stop_smc(struct radeon_device *rdev)
1817 {
1818 	ci_reset_smc(rdev);
1819 	ci_stop_smc_clock(rdev);
1820 }
1821 
ci_process_firmware_header(struct radeon_device * rdev)1822 static int ci_process_firmware_header(struct radeon_device *rdev)
1823 {
1824 	struct ci_power_info *pi = ci_get_pi(rdev);
1825 	u32 tmp;
1826 	int ret;
1827 
1828 	ret = ci_read_smc_sram_dword(rdev,
1829 				     SMU7_FIRMWARE_HEADER_LOCATION +
1830 				     offsetof(SMU7_Firmware_Header, DpmTable),
1831 				     &tmp, pi->sram_end);
1832 	if (ret)
1833 		return ret;
1834 
1835 	pi->dpm_table_start = tmp;
1836 
1837 	ret = ci_read_smc_sram_dword(rdev,
1838 				     SMU7_FIRMWARE_HEADER_LOCATION +
1839 				     offsetof(SMU7_Firmware_Header, SoftRegisters),
1840 				     &tmp, pi->sram_end);
1841 	if (ret)
1842 		return ret;
1843 
1844 	pi->soft_regs_start = tmp;
1845 
1846 	ret = ci_read_smc_sram_dword(rdev,
1847 				     SMU7_FIRMWARE_HEADER_LOCATION +
1848 				     offsetof(SMU7_Firmware_Header, mcRegisterTable),
1849 				     &tmp, pi->sram_end);
1850 	if (ret)
1851 		return ret;
1852 
1853 	pi->mc_reg_table_start = tmp;
1854 
1855 	ret = ci_read_smc_sram_dword(rdev,
1856 				     SMU7_FIRMWARE_HEADER_LOCATION +
1857 				     offsetof(SMU7_Firmware_Header, FanTable),
1858 				     &tmp, pi->sram_end);
1859 	if (ret)
1860 		return ret;
1861 
1862 	pi->fan_table_start = tmp;
1863 
1864 	ret = ci_read_smc_sram_dword(rdev,
1865 				     SMU7_FIRMWARE_HEADER_LOCATION +
1866 				     offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1867 				     &tmp, pi->sram_end);
1868 	if (ret)
1869 		return ret;
1870 
1871 	pi->arb_table_start = tmp;
1872 
1873 	return 0;
1874 }
1875 
ci_read_clock_registers(struct radeon_device * rdev)1876 static void ci_read_clock_registers(struct radeon_device *rdev)
1877 {
1878 	struct ci_power_info *pi = ci_get_pi(rdev);
1879 
1880 	pi->clock_registers.cg_spll_func_cntl =
1881 		RREG32_SMC(CG_SPLL_FUNC_CNTL);
1882 	pi->clock_registers.cg_spll_func_cntl_2 =
1883 		RREG32_SMC(CG_SPLL_FUNC_CNTL_2);
1884 	pi->clock_registers.cg_spll_func_cntl_3 =
1885 		RREG32_SMC(CG_SPLL_FUNC_CNTL_3);
1886 	pi->clock_registers.cg_spll_func_cntl_4 =
1887 		RREG32_SMC(CG_SPLL_FUNC_CNTL_4);
1888 	pi->clock_registers.cg_spll_spread_spectrum =
1889 		RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1890 	pi->clock_registers.cg_spll_spread_spectrum_2 =
1891 		RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM_2);
1892 	pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
1893 	pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
1894 	pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
1895 	pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
1896 	pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
1897 	pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
1898 	pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
1899 	pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
1900 	pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
1901 }
1902 
ci_init_sclk_t(struct radeon_device * rdev)1903 static void ci_init_sclk_t(struct radeon_device *rdev)
1904 {
1905 	struct ci_power_info *pi = ci_get_pi(rdev);
1906 
1907 	pi->low_sclk_interrupt_t = 0;
1908 }
1909 
ci_enable_thermal_protection(struct radeon_device * rdev,bool enable)1910 static void ci_enable_thermal_protection(struct radeon_device *rdev,
1911 					 bool enable)
1912 {
1913 	u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1914 
1915 	if (enable)
1916 		tmp &= ~THERMAL_PROTECTION_DIS;
1917 	else
1918 		tmp |= THERMAL_PROTECTION_DIS;
1919 	WREG32_SMC(GENERAL_PWRMGT, tmp);
1920 }
1921 
ci_enable_acpi_power_management(struct radeon_device * rdev)1922 static void ci_enable_acpi_power_management(struct radeon_device *rdev)
1923 {
1924 	u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1925 
1926 	tmp |= STATIC_PM_EN;
1927 
1928 	WREG32_SMC(GENERAL_PWRMGT, tmp);
1929 }
1930 
1931 #if 0
1932 static int ci_enter_ulp_state(struct radeon_device *rdev)
1933 {
1934 
1935 	WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
1936 
1937 	udelay(25000);
1938 
1939 	return 0;
1940 }
1941 
1942 static int ci_exit_ulp_state(struct radeon_device *rdev)
1943 {
1944 	int i;
1945 
1946 	WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
1947 
1948 	udelay(7000);
1949 
1950 	for (i = 0; i < rdev->usec_timeout; i++) {
1951 		if (RREG32(SMC_RESP_0) == 1)
1952 			break;
1953 		udelay(1000);
1954 	}
1955 
1956 	return 0;
1957 }
1958 #endif
1959 
ci_notify_smc_display_change(struct radeon_device * rdev,bool has_display)1960 static int ci_notify_smc_display_change(struct radeon_device *rdev,
1961 					bool has_display)
1962 {
1963 	PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
1964 
1965 	return (ci_send_msg_to_smc(rdev, msg) == PPSMC_Result_OK) ?  0 : -EINVAL;
1966 }
1967 
ci_enable_ds_master_switch(struct radeon_device * rdev,bool enable)1968 static int ci_enable_ds_master_switch(struct radeon_device *rdev,
1969 				      bool enable)
1970 {
1971 	struct ci_power_info *pi = ci_get_pi(rdev);
1972 
1973 	if (enable) {
1974 		if (pi->caps_sclk_ds) {
1975 			if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
1976 				return -EINVAL;
1977 		} else {
1978 			if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1979 				return -EINVAL;
1980 		}
1981 	} else {
1982 		if (pi->caps_sclk_ds) {
1983 			if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1984 				return -EINVAL;
1985 		}
1986 	}
1987 
1988 	return 0;
1989 }
1990 
ci_program_display_gap(struct radeon_device * rdev)1991 static void ci_program_display_gap(struct radeon_device *rdev)
1992 {
1993 	u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1994 	u32 pre_vbi_time_in_us;
1995 	u32 frame_time_in_us;
1996 	u32 ref_clock = rdev->clock.spll.reference_freq;
1997 	u32 refresh_rate = r600_dpm_get_vrefresh(rdev);
1998 	u32 vblank_time = r600_dpm_get_vblank_time(rdev);
1999 
2000 	tmp &= ~DISP_GAP_MASK;
2001 	if (rdev->pm.dpm.new_active_crtc_count > 0)
2002 		tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
2003 	else
2004 		tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE);
2005 	WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
2006 
2007 	if (refresh_rate == 0)
2008 		refresh_rate = 60;
2009 	if (vblank_time == 0xffffffff)
2010 		vblank_time = 500;
2011 	frame_time_in_us = 1000000 / refresh_rate;
2012 	pre_vbi_time_in_us =
2013 		frame_time_in_us - 200 - vblank_time;
2014 	tmp = pre_vbi_time_in_us * (ref_clock / 100);
2015 
2016 	WREG32_SMC(CG_DISPLAY_GAP_CNTL2, tmp);
2017 	ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
2018 	ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
2019 
2020 
2021 	ci_notify_smc_display_change(rdev, (rdev->pm.dpm.new_active_crtc_count == 1));
2022 
2023 }
2024 
ci_enable_spread_spectrum(struct radeon_device * rdev,bool enable)2025 static void ci_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
2026 {
2027 	struct ci_power_info *pi = ci_get_pi(rdev);
2028 	u32 tmp;
2029 
2030 	if (enable) {
2031 		if (pi->caps_sclk_ss_support) {
2032 			tmp = RREG32_SMC(GENERAL_PWRMGT);
2033 			tmp |= DYN_SPREAD_SPECTRUM_EN;
2034 			WREG32_SMC(GENERAL_PWRMGT, tmp);
2035 		}
2036 	} else {
2037 		tmp = RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
2038 		tmp &= ~SSEN;
2039 		WREG32_SMC(CG_SPLL_SPREAD_SPECTRUM, tmp);
2040 
2041 		tmp = RREG32_SMC(GENERAL_PWRMGT);
2042 		tmp &= ~DYN_SPREAD_SPECTRUM_EN;
2043 		WREG32_SMC(GENERAL_PWRMGT, tmp);
2044 	}
2045 }
2046 
ci_program_sstp(struct radeon_device * rdev)2047 static void ci_program_sstp(struct radeon_device *rdev)
2048 {
2049 	WREG32_SMC(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
2050 }
2051 
ci_enable_display_gap(struct radeon_device * rdev)2052 static void ci_enable_display_gap(struct radeon_device *rdev)
2053 {
2054 	u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
2055 
2056 	tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK);
2057 	tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
2058 		DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK));
2059 
2060 	WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
2061 }
2062 
ci_program_vc(struct radeon_device * rdev)2063 static void ci_program_vc(struct radeon_device *rdev)
2064 {
2065 	u32 tmp;
2066 
2067 	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
2068 	tmp &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
2069 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
2070 
2071 	WREG32_SMC(CG_FTV_0, CISLANDS_VRC_DFLT0);
2072 	WREG32_SMC(CG_FTV_1, CISLANDS_VRC_DFLT1);
2073 	WREG32_SMC(CG_FTV_2, CISLANDS_VRC_DFLT2);
2074 	WREG32_SMC(CG_FTV_3, CISLANDS_VRC_DFLT3);
2075 	WREG32_SMC(CG_FTV_4, CISLANDS_VRC_DFLT4);
2076 	WREG32_SMC(CG_FTV_5, CISLANDS_VRC_DFLT5);
2077 	WREG32_SMC(CG_FTV_6, CISLANDS_VRC_DFLT6);
2078 	WREG32_SMC(CG_FTV_7, CISLANDS_VRC_DFLT7);
2079 }
2080 
ci_clear_vc(struct radeon_device * rdev)2081 static void ci_clear_vc(struct radeon_device *rdev)
2082 {
2083 	u32 tmp;
2084 
2085 	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
2086 	tmp |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
2087 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
2088 
2089 	WREG32_SMC(CG_FTV_0, 0);
2090 	WREG32_SMC(CG_FTV_1, 0);
2091 	WREG32_SMC(CG_FTV_2, 0);
2092 	WREG32_SMC(CG_FTV_3, 0);
2093 	WREG32_SMC(CG_FTV_4, 0);
2094 	WREG32_SMC(CG_FTV_5, 0);
2095 	WREG32_SMC(CG_FTV_6, 0);
2096 	WREG32_SMC(CG_FTV_7, 0);
2097 }
2098 
ci_upload_firmware(struct radeon_device * rdev)2099 static int ci_upload_firmware(struct radeon_device *rdev)
2100 {
2101 	struct ci_power_info *pi = ci_get_pi(rdev);
2102 	int i, ret;
2103 
2104 	for (i = 0; i < rdev->usec_timeout; i++) {
2105 		if (RREG32_SMC(RCU_UC_EVENTS) & BOOT_SEQ_DONE)
2106 			break;
2107 	}
2108 	WREG32_SMC(SMC_SYSCON_MISC_CNTL, 1);
2109 
2110 	ci_stop_smc_clock(rdev);
2111 	ci_reset_smc(rdev);
2112 
2113 	ret = ci_load_smc_ucode(rdev, pi->sram_end);
2114 
2115 	return ret;
2116 
2117 }
2118 
ci_get_svi2_voltage_table(struct radeon_device * rdev,struct radeon_clock_voltage_dependency_table * voltage_dependency_table,struct atom_voltage_table * voltage_table)2119 static int ci_get_svi2_voltage_table(struct radeon_device *rdev,
2120 				     struct radeon_clock_voltage_dependency_table *voltage_dependency_table,
2121 				     struct atom_voltage_table *voltage_table)
2122 {
2123 	u32 i;
2124 
2125 	if (voltage_dependency_table == NULL)
2126 		return -EINVAL;
2127 
2128 	voltage_table->mask_low = 0;
2129 	voltage_table->phase_delay = 0;
2130 
2131 	voltage_table->count = voltage_dependency_table->count;
2132 	for (i = 0; i < voltage_table->count; i++) {
2133 		voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
2134 		voltage_table->entries[i].smio_low = 0;
2135 	}
2136 
2137 	return 0;
2138 }
2139 
ci_construct_voltage_tables(struct radeon_device * rdev)2140 static int ci_construct_voltage_tables(struct radeon_device *rdev)
2141 {
2142 	struct ci_power_info *pi = ci_get_pi(rdev);
2143 	int ret;
2144 
2145 	if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2146 		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
2147 						    VOLTAGE_OBJ_GPIO_LUT,
2148 						    &pi->vddc_voltage_table);
2149 		if (ret)
2150 			return ret;
2151 	} else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2152 		ret = ci_get_svi2_voltage_table(rdev,
2153 						&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2154 						&pi->vddc_voltage_table);
2155 		if (ret)
2156 			return ret;
2157 	}
2158 
2159 	if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
2160 		si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDC,
2161 							 &pi->vddc_voltage_table);
2162 
2163 	if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2164 		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
2165 						    VOLTAGE_OBJ_GPIO_LUT,
2166 						    &pi->vddci_voltage_table);
2167 		if (ret)
2168 			return ret;
2169 	} else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2170 		ret = ci_get_svi2_voltage_table(rdev,
2171 						&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2172 						&pi->vddci_voltage_table);
2173 		if (ret)
2174 			return ret;
2175 	}
2176 
2177 	if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
2178 		si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDCI,
2179 							 &pi->vddci_voltage_table);
2180 
2181 	if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2182 		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC,
2183 						    VOLTAGE_OBJ_GPIO_LUT,
2184 						    &pi->mvdd_voltage_table);
2185 		if (ret)
2186 			return ret;
2187 	} else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2188 		ret = ci_get_svi2_voltage_table(rdev,
2189 						&rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2190 						&pi->mvdd_voltage_table);
2191 		if (ret)
2192 			return ret;
2193 	}
2194 
2195 	if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
2196 		si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_MVDD,
2197 							 &pi->mvdd_voltage_table);
2198 
2199 	return 0;
2200 }
2201 
ci_populate_smc_voltage_table(struct radeon_device * rdev,struct atom_voltage_table_entry * voltage_table,SMU7_Discrete_VoltageLevel * smc_voltage_table)2202 static void ci_populate_smc_voltage_table(struct radeon_device *rdev,
2203 					  struct atom_voltage_table_entry *voltage_table,
2204 					  SMU7_Discrete_VoltageLevel *smc_voltage_table)
2205 {
2206 	int ret;
2207 
2208 	ret = ci_get_std_voltage_value_sidd(rdev, voltage_table,
2209 					    &smc_voltage_table->StdVoltageHiSidd,
2210 					    &smc_voltage_table->StdVoltageLoSidd);
2211 
2212 	if (ret) {
2213 		smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
2214 		smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
2215 	}
2216 
2217 	smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
2218 	smc_voltage_table->StdVoltageHiSidd =
2219 		cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
2220 	smc_voltage_table->StdVoltageLoSidd =
2221 		cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
2222 }
2223 
ci_populate_smc_vddc_table(struct radeon_device * rdev,SMU7_Discrete_DpmTable * table)2224 static int ci_populate_smc_vddc_table(struct radeon_device *rdev,
2225 				      SMU7_Discrete_DpmTable *table)
2226 {
2227 	struct ci_power_info *pi = ci_get_pi(rdev);
2228 	unsigned int count;
2229 
2230 	table->VddcLevelCount = pi->vddc_voltage_table.count;
2231 	for (count = 0; count < table->VddcLevelCount; count++) {
2232 		ci_populate_smc_voltage_table(rdev,
2233 					      &pi->vddc_voltage_table.entries[count],
2234 					      &table->VddcLevel[count]);
2235 
2236 		if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2237 			table->VddcLevel[count].Smio |=
2238 				pi->vddc_voltage_table.entries[count].smio_low;
2239 		else
2240 			table->VddcLevel[count].Smio = 0;
2241 	}
2242 	table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
2243 
2244 	return 0;
2245 }
2246 
ci_populate_smc_vddci_table(struct radeon_device * rdev,SMU7_Discrete_DpmTable * table)2247 static int ci_populate_smc_vddci_table(struct radeon_device *rdev,
2248 				       SMU7_Discrete_DpmTable *table)
2249 {
2250 	unsigned int count;
2251 	struct ci_power_info *pi = ci_get_pi(rdev);
2252 
2253 	table->VddciLevelCount = pi->vddci_voltage_table.count;
2254 	for (count = 0; count < table->VddciLevelCount; count++) {
2255 		ci_populate_smc_voltage_table(rdev,
2256 					      &pi->vddci_voltage_table.entries[count],
2257 					      &table->VddciLevel[count]);
2258 
2259 		if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2260 			table->VddciLevel[count].Smio |=
2261 				pi->vddci_voltage_table.entries[count].smio_low;
2262 		else
2263 			table->VddciLevel[count].Smio = 0;
2264 	}
2265 	table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
2266 
2267 	return 0;
2268 }
2269 
ci_populate_smc_mvdd_table(struct radeon_device * rdev,SMU7_Discrete_DpmTable * table)2270 static int ci_populate_smc_mvdd_table(struct radeon_device *rdev,
2271 				      SMU7_Discrete_DpmTable *table)
2272 {
2273 	struct ci_power_info *pi = ci_get_pi(rdev);
2274 	unsigned int count;
2275 
2276 	table->MvddLevelCount = pi->mvdd_voltage_table.count;
2277 	for (count = 0; count < table->MvddLevelCount; count++) {
2278 		ci_populate_smc_voltage_table(rdev,
2279 					      &pi->mvdd_voltage_table.entries[count],
2280 					      &table->MvddLevel[count]);
2281 
2282 		if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2283 			table->MvddLevel[count].Smio |=
2284 				pi->mvdd_voltage_table.entries[count].smio_low;
2285 		else
2286 			table->MvddLevel[count].Smio = 0;
2287 	}
2288 	table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
2289 
2290 	return 0;
2291 }
2292 
ci_populate_smc_voltage_tables(struct radeon_device * rdev,SMU7_Discrete_DpmTable * table)2293 static int ci_populate_smc_voltage_tables(struct radeon_device *rdev,
2294 					  SMU7_Discrete_DpmTable *table)
2295 {
2296 	int ret;
2297 
2298 	ret = ci_populate_smc_vddc_table(rdev, table);
2299 	if (ret)
2300 		return ret;
2301 
2302 	ret = ci_populate_smc_vddci_table(rdev, table);
2303 	if (ret)
2304 		return ret;
2305 
2306 	ret = ci_populate_smc_mvdd_table(rdev, table);
2307 	if (ret)
2308 		return ret;
2309 
2310 	return 0;
2311 }
2312 
ci_populate_mvdd_value(struct radeon_device * rdev,u32 mclk,SMU7_Discrete_VoltageLevel * voltage)2313 static int ci_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
2314 				  SMU7_Discrete_VoltageLevel *voltage)
2315 {
2316 	struct ci_power_info *pi = ci_get_pi(rdev);
2317 	u32 i = 0;
2318 
2319 	if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2320 		for (i = 0; i < rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
2321 			if (mclk <= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
2322 				voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
2323 				break;
2324 			}
2325 		}
2326 
2327 		if (i >= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
2328 			return -EINVAL;
2329 	}
2330 
2331 	return -EINVAL;
2332 }
2333 
ci_get_std_voltage_value_sidd(struct radeon_device * rdev,struct atom_voltage_table_entry * voltage_table,u16 * std_voltage_hi_sidd,u16 * std_voltage_lo_sidd)2334 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
2335 					 struct atom_voltage_table_entry *voltage_table,
2336 					 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
2337 {
2338 	u16 v_index, idx;
2339 	bool voltage_found = false;
2340 	*std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
2341 	*std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
2342 
2343 	if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
2344 		return -EINVAL;
2345 
2346 	if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
2347 		for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2348 			if (voltage_table->value ==
2349 			    rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2350 				voltage_found = true;
2351 				if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
2352 					idx = v_index;
2353 				else
2354 					idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2355 				*std_voltage_lo_sidd =
2356 					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2357 				*std_voltage_hi_sidd =
2358 					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2359 				break;
2360 			}
2361 		}
2362 
2363 		if (!voltage_found) {
2364 			for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2365 				if (voltage_table->value <=
2366 				    rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2367 					voltage_found = true;
2368 					if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
2369 						idx = v_index;
2370 					else
2371 						idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2372 					*std_voltage_lo_sidd =
2373 						rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2374 					*std_voltage_hi_sidd =
2375 						rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2376 					break;
2377 				}
2378 			}
2379 		}
2380 	}
2381 
2382 	return 0;
2383 }
2384 
ci_populate_phase_value_based_on_sclk(struct radeon_device * rdev,const struct radeon_phase_shedding_limits_table * limits,u32 sclk,u32 * phase_shedding)2385 static void ci_populate_phase_value_based_on_sclk(struct radeon_device *rdev,
2386 						  const struct radeon_phase_shedding_limits_table *limits,
2387 						  u32 sclk,
2388 						  u32 *phase_shedding)
2389 {
2390 	unsigned int i;
2391 
2392 	*phase_shedding = 1;
2393 
2394 	for (i = 0; i < limits->count; i++) {
2395 		if (sclk < limits->entries[i].sclk) {
2396 			*phase_shedding = i;
2397 			break;
2398 		}
2399 	}
2400 }
2401 
ci_populate_phase_value_based_on_mclk(struct radeon_device * rdev,const struct radeon_phase_shedding_limits_table * limits,u32 mclk,u32 * phase_shedding)2402 static void ci_populate_phase_value_based_on_mclk(struct radeon_device *rdev,
2403 						  const struct radeon_phase_shedding_limits_table *limits,
2404 						  u32 mclk,
2405 						  u32 *phase_shedding)
2406 {
2407 	unsigned int i;
2408 
2409 	*phase_shedding = 1;
2410 
2411 	for (i = 0; i < limits->count; i++) {
2412 		if (mclk < limits->entries[i].mclk) {
2413 			*phase_shedding = i;
2414 			break;
2415 		}
2416 	}
2417 }
2418 
ci_init_arb_table_index(struct radeon_device * rdev)2419 static int ci_init_arb_table_index(struct radeon_device *rdev)
2420 {
2421 	struct ci_power_info *pi = ci_get_pi(rdev);
2422 	u32 tmp;
2423 	int ret;
2424 
2425 	ret = ci_read_smc_sram_dword(rdev, pi->arb_table_start,
2426 				     &tmp, pi->sram_end);
2427 	if (ret)
2428 		return ret;
2429 
2430 	tmp &= 0x00FFFFFF;
2431 	tmp |= MC_CG_ARB_FREQ_F1 << 24;
2432 
2433 	return ci_write_smc_sram_dword(rdev, pi->arb_table_start,
2434 				       tmp, pi->sram_end);
2435 }
2436 
ci_get_dependency_volt_by_clk(struct radeon_device * rdev,struct radeon_clock_voltage_dependency_table * allowed_clock_voltage_table,u32 clock,u32 * voltage)2437 static int ci_get_dependency_volt_by_clk(struct radeon_device *rdev,
2438 					 struct radeon_clock_voltage_dependency_table *allowed_clock_voltage_table,
2439 					 u32 clock, u32 *voltage)
2440 {
2441 	u32 i = 0;
2442 
2443 	if (allowed_clock_voltage_table->count == 0)
2444 		return -EINVAL;
2445 
2446 	for (i = 0; i < allowed_clock_voltage_table->count; i++) {
2447 		if (allowed_clock_voltage_table->entries[i].clk >= clock) {
2448 			*voltage = allowed_clock_voltage_table->entries[i].v;
2449 			return 0;
2450 		}
2451 	}
2452 
2453 	*voltage = allowed_clock_voltage_table->entries[i-1].v;
2454 
2455 	return 0;
2456 }
2457 
ci_get_sleep_divider_id_from_clock(struct radeon_device * rdev,u32 sclk,u32 min_sclk_in_sr)2458 static u8 ci_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
2459 					     u32 sclk, u32 min_sclk_in_sr)
2460 {
2461 	u32 i;
2462 	u32 tmp;
2463 	u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ?
2464 		min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK;
2465 
2466 	if (sclk < min)
2467 		return 0;
2468 
2469 	for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
2470 		tmp = sclk / (1 << i);
2471 		if (tmp >= min || i == 0)
2472 			break;
2473 	}
2474 
2475 	return (u8)i;
2476 }
2477 
ci_initial_switch_from_arb_f0_to_f1(struct radeon_device * rdev)2478 static int ci_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev)
2479 {
2480 	return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
2481 }
2482 
ci_reset_to_default(struct radeon_device * rdev)2483 static int ci_reset_to_default(struct radeon_device *rdev)
2484 {
2485 	return (ci_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
2486 		0 : -EINVAL;
2487 }
2488 
ci_force_switch_to_arb_f0(struct radeon_device * rdev)2489 static int ci_force_switch_to_arb_f0(struct radeon_device *rdev)
2490 {
2491 	u32 tmp;
2492 
2493 	tmp = (RREG32_SMC(SMC_SCRATCH9) & 0x0000ff00) >> 8;
2494 
2495 	if (tmp == MC_CG_ARB_FREQ_F0)
2496 		return 0;
2497 
2498 	return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
2499 }
2500 
ci_register_patching_mc_arb(struct radeon_device * rdev,const u32 engine_clock,const u32 memory_clock,u32 * dram_timimg2)2501 static void ci_register_patching_mc_arb(struct radeon_device *rdev,
2502 					const u32 engine_clock,
2503 					const u32 memory_clock,
2504 					u32 *dram_timimg2)
2505 {
2506 	bool patch;
2507 	u32 tmp, tmp2;
2508 
2509 	tmp = RREG32(MC_SEQ_MISC0);
2510 	patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
2511 
2512 	if (patch &&
2513 	    ((rdev->pdev->device == 0x67B0) ||
2514 	     (rdev->pdev->device == 0x67B1))) {
2515 		if ((memory_clock > 100000) && (memory_clock <= 125000)) {
2516 			tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
2517 			*dram_timimg2 &= ~0x00ff0000;
2518 			*dram_timimg2 |= tmp2 << 16;
2519 		} else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
2520 			tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
2521 			*dram_timimg2 &= ~0x00ff0000;
2522 			*dram_timimg2 |= tmp2 << 16;
2523 		}
2524 	}
2525 }
2526 
2527 
ci_populate_memory_timing_parameters(struct radeon_device * rdev,u32 sclk,u32 mclk,SMU7_Discrete_MCArbDramTimingTableEntry * arb_regs)2528 static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
2529 						u32 sclk,
2530 						u32 mclk,
2531 						SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
2532 {
2533 	u32 dram_timing;
2534 	u32 dram_timing2;
2535 	u32 burst_time;
2536 
2537 	radeon_atom_set_engine_dram_timings(rdev, sclk, mclk);
2538 
2539 	dram_timing  = RREG32(MC_ARB_DRAM_TIMING);
2540 	dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
2541 	burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
2542 
2543 	ci_register_patching_mc_arb(rdev, sclk, mclk, &dram_timing2);
2544 
2545 	arb_regs->McArbDramTiming  = cpu_to_be32(dram_timing);
2546 	arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
2547 	arb_regs->McArbBurstTime = (u8)burst_time;
2548 
2549 	return 0;
2550 }
2551 
ci_do_program_memory_timing_parameters(struct radeon_device * rdev)2552 static int ci_do_program_memory_timing_parameters(struct radeon_device *rdev)
2553 {
2554 	struct ci_power_info *pi = ci_get_pi(rdev);
2555 	SMU7_Discrete_MCArbDramTimingTable arb_regs;
2556 	u32 i, j;
2557 	int ret =  0;
2558 
2559 	memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2560 
2561 	for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2562 		for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2563 			ret = ci_populate_memory_timing_parameters(rdev,
2564 								   pi->dpm_table.sclk_table.dpm_levels[i].value,
2565 								   pi->dpm_table.mclk_table.dpm_levels[j].value,
2566 								   &arb_regs.entries[i][j]);
2567 			if (ret)
2568 				break;
2569 		}
2570 	}
2571 
2572 	if (ret == 0)
2573 		ret = ci_copy_bytes_to_smc(rdev,
2574 					   pi->arb_table_start,
2575 					   (u8 *)&arb_regs,
2576 					   sizeof(SMU7_Discrete_MCArbDramTimingTable),
2577 					   pi->sram_end);
2578 
2579 	return ret;
2580 }
2581 
ci_program_memory_timing_parameters(struct radeon_device * rdev)2582 static int ci_program_memory_timing_parameters(struct radeon_device *rdev)
2583 {
2584 	struct ci_power_info *pi = ci_get_pi(rdev);
2585 
2586 	if (pi->need_update_smu7_dpm_table == 0)
2587 		return 0;
2588 
2589 	return ci_do_program_memory_timing_parameters(rdev);
2590 }
2591 
ci_populate_smc_initial_state(struct radeon_device * rdev,struct radeon_ps * radeon_boot_state)2592 static void ci_populate_smc_initial_state(struct radeon_device *rdev,
2593 					  struct radeon_ps *radeon_boot_state)
2594 {
2595 	struct ci_ps *boot_state = ci_get_ps(radeon_boot_state);
2596 	struct ci_power_info *pi = ci_get_pi(rdev);
2597 	u32 level = 0;
2598 
2599 	for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2600 		if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2601 		    boot_state->performance_levels[0].sclk) {
2602 			pi->smc_state_table.GraphicsBootLevel = level;
2603 			break;
2604 		}
2605 	}
2606 
2607 	for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2608 		if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2609 		    boot_state->performance_levels[0].mclk) {
2610 			pi->smc_state_table.MemoryBootLevel = level;
2611 			break;
2612 		}
2613 	}
2614 }
2615 
ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table * dpm_table)2616 static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2617 {
2618 	u32 i;
2619 	u32 mask_value = 0;
2620 
2621 	for (i = dpm_table->count; i > 0; i--) {
2622 		mask_value = mask_value << 1;
2623 		if (dpm_table->dpm_levels[i-1].enabled)
2624 			mask_value |= 0x1;
2625 		else
2626 			mask_value &= 0xFFFFFFFE;
2627 	}
2628 
2629 	return mask_value;
2630 }
2631 
ci_populate_smc_link_level(struct radeon_device * rdev,SMU7_Discrete_DpmTable * table)2632 static void ci_populate_smc_link_level(struct radeon_device *rdev,
2633 				       SMU7_Discrete_DpmTable *table)
2634 {
2635 	struct ci_power_info *pi = ci_get_pi(rdev);
2636 	struct ci_dpm_table *dpm_table = &pi->dpm_table;
2637 	u32 i;
2638 
2639 	for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2640 		table->LinkLevel[i].PcieGenSpeed =
2641 			(u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2642 		table->LinkLevel[i].PcieLaneCount =
2643 			r600_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2644 		table->LinkLevel[i].EnabledForActivity = 1;
2645 		table->LinkLevel[i].DownT = cpu_to_be32(5);
2646 		table->LinkLevel[i].UpT = cpu_to_be32(30);
2647 	}
2648 
2649 	pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2650 	pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2651 		ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2652 }
2653 
ci_populate_smc_uvd_level(struct radeon_device * rdev,SMU7_Discrete_DpmTable * table)2654 static int ci_populate_smc_uvd_level(struct radeon_device *rdev,
2655 				     SMU7_Discrete_DpmTable *table)
2656 {
2657 	u32 count;
2658 	struct atom_clock_dividers dividers;
2659 	int ret = -EINVAL;
2660 
2661 	table->UvdLevelCount =
2662 		rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2663 
2664 	for (count = 0; count < table->UvdLevelCount; count++) {
2665 		table->UvdLevel[count].VclkFrequency =
2666 			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2667 		table->UvdLevel[count].DclkFrequency =
2668 			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2669 		table->UvdLevel[count].MinVddc =
2670 			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2671 		table->UvdLevel[count].MinVddcPhases = 1;
2672 
2673 		ret = radeon_atom_get_clock_dividers(rdev,
2674 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2675 						     table->UvdLevel[count].VclkFrequency, false, &dividers);
2676 		if (ret)
2677 			return ret;
2678 
2679 		table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2680 
2681 		ret = radeon_atom_get_clock_dividers(rdev,
2682 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2683 						     table->UvdLevel[count].DclkFrequency, false, &dividers);
2684 		if (ret)
2685 			return ret;
2686 
2687 		table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2688 
2689 		table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2690 		table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2691 		table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2692 	}
2693 
2694 	return ret;
2695 }
2696 
ci_populate_smc_vce_level(struct radeon_device * rdev,SMU7_Discrete_DpmTable * table)2697 static int ci_populate_smc_vce_level(struct radeon_device *rdev,
2698 				     SMU7_Discrete_DpmTable *table)
2699 {
2700 	u32 count;
2701 	struct atom_clock_dividers dividers;
2702 	int ret = -EINVAL;
2703 
2704 	table->VceLevelCount =
2705 		rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2706 
2707 	for (count = 0; count < table->VceLevelCount; count++) {
2708 		table->VceLevel[count].Frequency =
2709 			rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2710 		table->VceLevel[count].MinVoltage =
2711 			(u16)rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2712 		table->VceLevel[count].MinPhases = 1;
2713 
2714 		ret = radeon_atom_get_clock_dividers(rdev,
2715 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2716 						     table->VceLevel[count].Frequency, false, &dividers);
2717 		if (ret)
2718 			return ret;
2719 
2720 		table->VceLevel[count].Divider = (u8)dividers.post_divider;
2721 
2722 		table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2723 		table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2724 	}
2725 
2726 	return ret;
2727 
2728 }
2729 
ci_populate_smc_acp_level(struct radeon_device * rdev,SMU7_Discrete_DpmTable * table)2730 static int ci_populate_smc_acp_level(struct radeon_device *rdev,
2731 				     SMU7_Discrete_DpmTable *table)
2732 {
2733 	u32 count;
2734 	struct atom_clock_dividers dividers;
2735 	int ret = -EINVAL;
2736 
2737 	table->AcpLevelCount = (u8)
2738 		(rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2739 
2740 	for (count = 0; count < table->AcpLevelCount; count++) {
2741 		table->AcpLevel[count].Frequency =
2742 			rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2743 		table->AcpLevel[count].MinVoltage =
2744 			rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2745 		table->AcpLevel[count].MinPhases = 1;
2746 
2747 		ret = radeon_atom_get_clock_dividers(rdev,
2748 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2749 						     table->AcpLevel[count].Frequency, false, &dividers);
2750 		if (ret)
2751 			return ret;
2752 
2753 		table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2754 
2755 		table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2756 		table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2757 	}
2758 
2759 	return ret;
2760 }
2761 
ci_populate_smc_samu_level(struct radeon_device * rdev,SMU7_Discrete_DpmTable * table)2762 static int ci_populate_smc_samu_level(struct radeon_device *rdev,
2763 				      SMU7_Discrete_DpmTable *table)
2764 {
2765 	u32 count;
2766 	struct atom_clock_dividers dividers;
2767 	int ret = -EINVAL;
2768 
2769 	table->SamuLevelCount =
2770 		rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2771 
2772 	for (count = 0; count < table->SamuLevelCount; count++) {
2773 		table->SamuLevel[count].Frequency =
2774 			rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2775 		table->SamuLevel[count].MinVoltage =
2776 			rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2777 		table->SamuLevel[count].MinPhases = 1;
2778 
2779 		ret = radeon_atom_get_clock_dividers(rdev,
2780 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2781 						     table->SamuLevel[count].Frequency, false, &dividers);
2782 		if (ret)
2783 			return ret;
2784 
2785 		table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2786 
2787 		table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2788 		table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2789 	}
2790 
2791 	return ret;
2792 }
2793 
ci_calculate_mclk_params(struct radeon_device * rdev,u32 memory_clock,SMU7_Discrete_MemoryLevel * mclk,bool strobe_mode,bool dll_state_on)2794 static int ci_calculate_mclk_params(struct radeon_device *rdev,
2795 				    u32 memory_clock,
2796 				    SMU7_Discrete_MemoryLevel *mclk,
2797 				    bool strobe_mode,
2798 				    bool dll_state_on)
2799 {
2800 	struct ci_power_info *pi = ci_get_pi(rdev);
2801 	u32  dll_cntl = pi->clock_registers.dll_cntl;
2802 	u32  mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2803 	u32  mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2804 	u32  mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2805 	u32  mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2806 	u32  mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2807 	u32  mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2808 	u32  mpll_ss1 = pi->clock_registers.mpll_ss1;
2809 	u32  mpll_ss2 = pi->clock_registers.mpll_ss2;
2810 	struct atom_mpll_param mpll_param;
2811 	int ret;
2812 
2813 	ret = radeon_atom_get_memory_pll_dividers(rdev, memory_clock, strobe_mode, &mpll_param);
2814 	if (ret)
2815 		return ret;
2816 
2817 	mpll_func_cntl &= ~BWCTRL_MASK;
2818 	mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
2819 
2820 	mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
2821 	mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
2822 		CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
2823 
2824 	mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
2825 	mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
2826 
2827 	if (pi->mem_gddr5) {
2828 		mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
2829 		mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
2830 			YCLK_POST_DIV(mpll_param.post_div);
2831 	}
2832 
2833 	if (pi->caps_mclk_ss_support) {
2834 		struct radeon_atom_ss ss;
2835 		u32 freq_nom;
2836 		u32 tmp;
2837 		u32 reference_clock = rdev->clock.mpll.reference_freq;
2838 
2839 		if (mpll_param.qdr == 1)
2840 			freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
2841 		else
2842 			freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
2843 
2844 		tmp = (freq_nom / reference_clock);
2845 		tmp = tmp * tmp;
2846 		if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2847 						     ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2848 			u32 clks = reference_clock * 5 / ss.rate;
2849 			u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2850 
2851 			mpll_ss1 &= ~CLKV_MASK;
2852 			mpll_ss1 |= CLKV(clkv);
2853 
2854 			mpll_ss2 &= ~CLKS_MASK;
2855 			mpll_ss2 |= CLKS(clks);
2856 		}
2857 	}
2858 
2859 	mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
2860 	mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
2861 
2862 	if (dll_state_on)
2863 		mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
2864 	else
2865 		mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2866 
2867 	mclk->MclkFrequency = memory_clock;
2868 	mclk->MpllFuncCntl = mpll_func_cntl;
2869 	mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
2870 	mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
2871 	mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
2872 	mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
2873 	mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
2874 	mclk->DllCntl = dll_cntl;
2875 	mclk->MpllSs1 = mpll_ss1;
2876 	mclk->MpllSs2 = mpll_ss2;
2877 
2878 	return 0;
2879 }
2880 
ci_populate_single_memory_level(struct radeon_device * rdev,u32 memory_clock,SMU7_Discrete_MemoryLevel * memory_level)2881 static int ci_populate_single_memory_level(struct radeon_device *rdev,
2882 					   u32 memory_clock,
2883 					   SMU7_Discrete_MemoryLevel *memory_level)
2884 {
2885 	struct ci_power_info *pi = ci_get_pi(rdev);
2886 	int ret;
2887 	bool dll_state_on;
2888 
2889 	if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
2890 		ret = ci_get_dependency_volt_by_clk(rdev,
2891 						    &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2892 						    memory_clock, &memory_level->MinVddc);
2893 		if (ret)
2894 			return ret;
2895 	}
2896 
2897 	if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
2898 		ret = ci_get_dependency_volt_by_clk(rdev,
2899 						    &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2900 						    memory_clock, &memory_level->MinVddci);
2901 		if (ret)
2902 			return ret;
2903 	}
2904 
2905 	if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
2906 		ret = ci_get_dependency_volt_by_clk(rdev,
2907 						    &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2908 						    memory_clock, &memory_level->MinMvdd);
2909 		if (ret)
2910 			return ret;
2911 	}
2912 
2913 	memory_level->MinVddcPhases = 1;
2914 
2915 	if (pi->vddc_phase_shed_control)
2916 		ci_populate_phase_value_based_on_mclk(rdev,
2917 						      &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2918 						      memory_clock,
2919 						      &memory_level->MinVddcPhases);
2920 
2921 	memory_level->EnabledForThrottle = 1;
2922 	memory_level->UpH = 0;
2923 	memory_level->DownH = 100;
2924 	memory_level->VoltageDownH = 0;
2925 	memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
2926 
2927 	memory_level->StutterEnable = false;
2928 	memory_level->StrobeEnable = false;
2929 	memory_level->EdcReadEnable = false;
2930 	memory_level->EdcWriteEnable = false;
2931 	memory_level->RttEnable = false;
2932 
2933 	memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2934 
2935 	if (pi->mclk_stutter_mode_threshold &&
2936 	    (memory_clock <= pi->mclk_stutter_mode_threshold) &&
2937 	    (pi->uvd_enabled == false) &&
2938 	    (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
2939 	    (rdev->pm.dpm.new_active_crtc_count <= 2))
2940 		memory_level->StutterEnable = true;
2941 
2942 	if (pi->mclk_strobe_mode_threshold &&
2943 	    (memory_clock <= pi->mclk_strobe_mode_threshold))
2944 		memory_level->StrobeEnable = 1;
2945 
2946 	if (pi->mem_gddr5) {
2947 		memory_level->StrobeRatio =
2948 			si_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
2949 		if (pi->mclk_edc_enable_threshold &&
2950 		    (memory_clock > pi->mclk_edc_enable_threshold))
2951 			memory_level->EdcReadEnable = true;
2952 
2953 		if (pi->mclk_edc_wr_enable_threshold &&
2954 		    (memory_clock > pi->mclk_edc_wr_enable_threshold))
2955 			memory_level->EdcWriteEnable = true;
2956 
2957 		if (memory_level->StrobeEnable) {
2958 			if (si_get_mclk_frequency_ratio(memory_clock, true) >=
2959 			    ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
2960 				dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2961 			else
2962 				dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
2963 		} else {
2964 			dll_state_on = pi->dll_default_on;
2965 		}
2966 	} else {
2967 		memory_level->StrobeRatio = si_get_ddr3_mclk_frequency_ratio(memory_clock);
2968 		dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2969 	}
2970 
2971 	ret = ci_calculate_mclk_params(rdev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
2972 	if (ret)
2973 		return ret;
2974 
2975 	memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
2976 	memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
2977 	memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
2978 	memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
2979 
2980 	memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
2981 	memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
2982 	memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
2983 	memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
2984 	memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
2985 	memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
2986 	memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
2987 	memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
2988 	memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
2989 	memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
2990 	memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
2991 
2992 	return 0;
2993 }
2994 
ci_populate_smc_acpi_level(struct radeon_device * rdev,SMU7_Discrete_DpmTable * table)2995 static int ci_populate_smc_acpi_level(struct radeon_device *rdev,
2996 				      SMU7_Discrete_DpmTable *table)
2997 {
2998 	struct ci_power_info *pi = ci_get_pi(rdev);
2999 	struct atom_clock_dividers dividers;
3000 	SMU7_Discrete_VoltageLevel voltage_level;
3001 	u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
3002 	u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
3003 	u32 dll_cntl = pi->clock_registers.dll_cntl;
3004 	u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
3005 	int ret;
3006 
3007 	table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
3008 
3009 	if (pi->acpi_vddc)
3010 		table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
3011 	else
3012 		table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
3013 
3014 	table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
3015 
3016 	table->ACPILevel.SclkFrequency = rdev->clock.spll.reference_freq;
3017 
3018 	ret = radeon_atom_get_clock_dividers(rdev,
3019 					     COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3020 					     table->ACPILevel.SclkFrequency, false, &dividers);
3021 	if (ret)
3022 		return ret;
3023 
3024 	table->ACPILevel.SclkDid = (u8)dividers.post_divider;
3025 	table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3026 	table->ACPILevel.DeepSleepDivId = 0;
3027 
3028 	spll_func_cntl &= ~SPLL_PWRON;
3029 	spll_func_cntl |= SPLL_RESET;
3030 
3031 	spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
3032 	spll_func_cntl_2 |= SCLK_MUX_SEL(4);
3033 
3034 	table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
3035 	table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
3036 	table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
3037 	table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
3038 	table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
3039 	table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3040 	table->ACPILevel.CcPwrDynRm = 0;
3041 	table->ACPILevel.CcPwrDynRm1 = 0;
3042 
3043 	table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
3044 	table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
3045 	table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
3046 	table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
3047 	table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
3048 	table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
3049 	table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
3050 	table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
3051 	table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
3052 	table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
3053 	table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
3054 
3055 	table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
3056 	table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
3057 
3058 	if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
3059 		if (pi->acpi_vddci)
3060 			table->MemoryACPILevel.MinVddci =
3061 				cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
3062 		else
3063 			table->MemoryACPILevel.MinVddci =
3064 				cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
3065 	}
3066 
3067 	if (ci_populate_mvdd_value(rdev, 0, &voltage_level))
3068 		table->MemoryACPILevel.MinMvdd = 0;
3069 	else
3070 		table->MemoryACPILevel.MinMvdd =
3071 			cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
3072 
3073 	mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
3074 	mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
3075 
3076 	dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
3077 
3078 	table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
3079 	table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
3080 	table->MemoryACPILevel.MpllAdFuncCntl =
3081 		cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
3082 	table->MemoryACPILevel.MpllDqFuncCntl =
3083 		cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
3084 	table->MemoryACPILevel.MpllFuncCntl =
3085 		cpu_to_be32(pi->clock_registers.mpll_func_cntl);
3086 	table->MemoryACPILevel.MpllFuncCntl_1 =
3087 		cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
3088 	table->MemoryACPILevel.MpllFuncCntl_2 =
3089 		cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
3090 	table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
3091 	table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
3092 
3093 	table->MemoryACPILevel.EnabledForThrottle = 0;
3094 	table->MemoryACPILevel.EnabledForActivity = 0;
3095 	table->MemoryACPILevel.UpH = 0;
3096 	table->MemoryACPILevel.DownH = 100;
3097 	table->MemoryACPILevel.VoltageDownH = 0;
3098 	table->MemoryACPILevel.ActivityLevel =
3099 		cpu_to_be16((u16)pi->mclk_activity_target);
3100 
3101 	table->MemoryACPILevel.StutterEnable = false;
3102 	table->MemoryACPILevel.StrobeEnable = false;
3103 	table->MemoryACPILevel.EdcReadEnable = false;
3104 	table->MemoryACPILevel.EdcWriteEnable = false;
3105 	table->MemoryACPILevel.RttEnable = false;
3106 
3107 	return 0;
3108 }
3109 
3110 
ci_enable_ulv(struct radeon_device * rdev,bool enable)3111 static int ci_enable_ulv(struct radeon_device *rdev, bool enable)
3112 {
3113 	struct ci_power_info *pi = ci_get_pi(rdev);
3114 	struct ci_ulv_parm *ulv = &pi->ulv;
3115 
3116 	if (ulv->supported) {
3117 		if (enable)
3118 			return (ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
3119 				0 : -EINVAL;
3120 		else
3121 			return (ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
3122 				0 : -EINVAL;
3123 	}
3124 
3125 	return 0;
3126 }
3127 
ci_populate_ulv_level(struct radeon_device * rdev,SMU7_Discrete_Ulv * state)3128 static int ci_populate_ulv_level(struct radeon_device *rdev,
3129 				 SMU7_Discrete_Ulv *state)
3130 {
3131 	struct ci_power_info *pi = ci_get_pi(rdev);
3132 	u16 ulv_voltage = rdev->pm.dpm.backbias_response_time;
3133 
3134 	state->CcPwrDynRm = 0;
3135 	state->CcPwrDynRm1 = 0;
3136 
3137 	if (ulv_voltage == 0) {
3138 		pi->ulv.supported = false;
3139 		return 0;
3140 	}
3141 
3142 	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
3143 		if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3144 			state->VddcOffset = 0;
3145 		else
3146 			state->VddcOffset =
3147 				rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
3148 	} else {
3149 		if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3150 			state->VddcOffsetVid = 0;
3151 		else
3152 			state->VddcOffsetVid = (u8)
3153 				((rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
3154 				 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
3155 	}
3156 	state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
3157 
3158 	state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
3159 	state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
3160 	state->VddcOffset = cpu_to_be16(state->VddcOffset);
3161 
3162 	return 0;
3163 }
3164 
ci_calculate_sclk_params(struct radeon_device * rdev,u32 engine_clock,SMU7_Discrete_GraphicsLevel * sclk)3165 static int ci_calculate_sclk_params(struct radeon_device *rdev,
3166 				    u32 engine_clock,
3167 				    SMU7_Discrete_GraphicsLevel *sclk)
3168 {
3169 	struct ci_power_info *pi = ci_get_pi(rdev);
3170 	struct atom_clock_dividers dividers;
3171 	u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
3172 	u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
3173 	u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
3174 	u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3175 	u32 reference_clock = rdev->clock.spll.reference_freq;
3176 	u32 reference_divider;
3177 	u32 fbdiv;
3178 	int ret;
3179 
3180 	ret = radeon_atom_get_clock_dividers(rdev,
3181 					     COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3182 					     engine_clock, false, &dividers);
3183 	if (ret)
3184 		return ret;
3185 
3186 	reference_divider = 1 + dividers.ref_div;
3187 	fbdiv = dividers.fb_div & 0x3FFFFFF;
3188 
3189 	spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
3190 	spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
3191 	spll_func_cntl_3 |= SPLL_DITHEN;
3192 
3193 	if (pi->caps_sclk_ss_support) {
3194 		struct radeon_atom_ss ss;
3195 		u32 vco_freq = engine_clock * dividers.post_div;
3196 
3197 		if (radeon_atombios_get_asic_ss_info(rdev, &ss,
3198 						     ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
3199 			u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
3200 			u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
3201 
3202 			cg_spll_spread_spectrum &= ~CLK_S_MASK;
3203 			cg_spll_spread_spectrum |= CLK_S(clk_s);
3204 			cg_spll_spread_spectrum |= SSEN;
3205 
3206 			cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
3207 			cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
3208 		}
3209 	}
3210 
3211 	sclk->SclkFrequency = engine_clock;
3212 	sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
3213 	sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
3214 	sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
3215 	sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
3216 	sclk->SclkDid = (u8)dividers.post_divider;
3217 
3218 	return 0;
3219 }
3220 
ci_populate_single_graphic_level(struct radeon_device * rdev,u32 engine_clock,u16 sclk_activity_level_t,SMU7_Discrete_GraphicsLevel * graphic_level)3221 static int ci_populate_single_graphic_level(struct radeon_device *rdev,
3222 					    u32 engine_clock,
3223 					    u16 sclk_activity_level_t,
3224 					    SMU7_Discrete_GraphicsLevel *graphic_level)
3225 {
3226 	struct ci_power_info *pi = ci_get_pi(rdev);
3227 	int ret;
3228 
3229 	ret = ci_calculate_sclk_params(rdev, engine_clock, graphic_level);
3230 	if (ret)
3231 		return ret;
3232 
3233 	ret = ci_get_dependency_volt_by_clk(rdev,
3234 					    &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
3235 					    engine_clock, &graphic_level->MinVddc);
3236 	if (ret)
3237 		return ret;
3238 
3239 	graphic_level->SclkFrequency = engine_clock;
3240 
3241 	graphic_level->Flags =  0;
3242 	graphic_level->MinVddcPhases = 1;
3243 
3244 	if (pi->vddc_phase_shed_control)
3245 		ci_populate_phase_value_based_on_sclk(rdev,
3246 						      &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
3247 						      engine_clock,
3248 						      &graphic_level->MinVddcPhases);
3249 
3250 	graphic_level->ActivityLevel = sclk_activity_level_t;
3251 
3252 	graphic_level->CcPwrDynRm = 0;
3253 	graphic_level->CcPwrDynRm1 = 0;
3254 	graphic_level->EnabledForThrottle = 1;
3255 	graphic_level->UpH = 0;
3256 	graphic_level->DownH = 0;
3257 	graphic_level->VoltageDownH = 0;
3258 	graphic_level->PowerThrottle = 0;
3259 
3260 	if (pi->caps_sclk_ds)
3261 		graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(rdev,
3262 										   engine_clock,
3263 										   CISLAND_MINIMUM_ENGINE_CLOCK);
3264 
3265 	graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3266 
3267 	graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
3268 	graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
3269 	graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
3270 	graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
3271 	graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
3272 	graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
3273 	graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
3274 	graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
3275 	graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
3276 	graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
3277 	graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
3278 
3279 	return 0;
3280 }
3281 
ci_populate_all_graphic_levels(struct radeon_device * rdev)3282 static int ci_populate_all_graphic_levels(struct radeon_device *rdev)
3283 {
3284 	struct ci_power_info *pi = ci_get_pi(rdev);
3285 	struct ci_dpm_table *dpm_table = &pi->dpm_table;
3286 	u32 level_array_address = pi->dpm_table_start +
3287 		offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
3288 	u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
3289 		SMU7_MAX_LEVELS_GRAPHICS;
3290 	SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
3291 	u32 i, ret;
3292 
3293 	memset(levels, 0, level_array_size);
3294 
3295 	for (i = 0; i < dpm_table->sclk_table.count; i++) {
3296 		ret = ci_populate_single_graphic_level(rdev,
3297 						       dpm_table->sclk_table.dpm_levels[i].value,
3298 						       (u16)pi->activity_target[i],
3299 						       &pi->smc_state_table.GraphicsLevel[i]);
3300 		if (ret)
3301 			return ret;
3302 		if (i > 1)
3303 			pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
3304 		if (i == (dpm_table->sclk_table.count - 1))
3305 			pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
3306 				PPSMC_DISPLAY_WATERMARK_HIGH;
3307 	}
3308 	pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
3309 
3310 	pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
3311 	pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3312 		ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
3313 
3314 	ret = ci_copy_bytes_to_smc(rdev, level_array_address,
3315 				   (u8 *)levels, level_array_size,
3316 				   pi->sram_end);
3317 	if (ret)
3318 		return ret;
3319 
3320 	return 0;
3321 }
3322 
ci_populate_ulv_state(struct radeon_device * rdev,SMU7_Discrete_Ulv * ulv_level)3323 static int ci_populate_ulv_state(struct radeon_device *rdev,
3324 				 SMU7_Discrete_Ulv *ulv_level)
3325 {
3326 	return ci_populate_ulv_level(rdev, ulv_level);
3327 }
3328 
ci_populate_all_memory_levels(struct radeon_device * rdev)3329 static int ci_populate_all_memory_levels(struct radeon_device *rdev)
3330 {
3331 	struct ci_power_info *pi = ci_get_pi(rdev);
3332 	struct ci_dpm_table *dpm_table = &pi->dpm_table;
3333 	u32 level_array_address = pi->dpm_table_start +
3334 		offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
3335 	u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
3336 		SMU7_MAX_LEVELS_MEMORY;
3337 	SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
3338 	u32 i, ret;
3339 
3340 	memset(levels, 0, level_array_size);
3341 
3342 	for (i = 0; i < dpm_table->mclk_table.count; i++) {
3343 		if (dpm_table->mclk_table.dpm_levels[i].value == 0)
3344 			return -EINVAL;
3345 		ret = ci_populate_single_memory_level(rdev,
3346 						      dpm_table->mclk_table.dpm_levels[i].value,
3347 						      &pi->smc_state_table.MemoryLevel[i]);
3348 		if (ret)
3349 			return ret;
3350 	}
3351 
3352 	pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
3353 
3354 	if ((dpm_table->mclk_table.count >= 2) &&
3355 	    ((rdev->pdev->device == 0x67B0) || (rdev->pdev->device == 0x67B1))) {
3356 		pi->smc_state_table.MemoryLevel[1].MinVddc =
3357 			pi->smc_state_table.MemoryLevel[0].MinVddc;
3358 		pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
3359 			pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
3360 	}
3361 
3362 	pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
3363 
3364 	pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
3365 	pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3366 		ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
3367 
3368 	pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
3369 		PPSMC_DISPLAY_WATERMARK_HIGH;
3370 
3371 	ret = ci_copy_bytes_to_smc(rdev, level_array_address,
3372 				   (u8 *)levels, level_array_size,
3373 				   pi->sram_end);
3374 	if (ret)
3375 		return ret;
3376 
3377 	return 0;
3378 }
3379 
ci_reset_single_dpm_table(struct radeon_device * rdev,struct ci_single_dpm_table * dpm_table,u32 count)3380 static void ci_reset_single_dpm_table(struct radeon_device *rdev,
3381 				      struct ci_single_dpm_table* dpm_table,
3382 				      u32 count)
3383 {
3384 	u32 i;
3385 
3386 	dpm_table->count = count;
3387 	for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
3388 		dpm_table->dpm_levels[i].enabled = false;
3389 }
3390 
ci_setup_pcie_table_entry(struct ci_single_dpm_table * dpm_table,u32 index,u32 pcie_gen,u32 pcie_lanes)3391 static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
3392 				      u32 index, u32 pcie_gen, u32 pcie_lanes)
3393 {
3394 	dpm_table->dpm_levels[index].value = pcie_gen;
3395 	dpm_table->dpm_levels[index].param1 = pcie_lanes;
3396 	dpm_table->dpm_levels[index].enabled = true;
3397 }
3398 
ci_setup_default_pcie_tables(struct radeon_device * rdev)3399 static int ci_setup_default_pcie_tables(struct radeon_device *rdev)
3400 {
3401 	struct ci_power_info *pi = ci_get_pi(rdev);
3402 
3403 	if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
3404 		return -EINVAL;
3405 
3406 	if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
3407 		pi->pcie_gen_powersaving = pi->pcie_gen_performance;
3408 		pi->pcie_lane_powersaving = pi->pcie_lane_performance;
3409 	} else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
3410 		pi->pcie_gen_performance = pi->pcie_gen_powersaving;
3411 		pi->pcie_lane_performance = pi->pcie_lane_powersaving;
3412 	}
3413 
3414 	ci_reset_single_dpm_table(rdev,
3415 				  &pi->dpm_table.pcie_speed_table,
3416 				  SMU7_MAX_LEVELS_LINK);
3417 
3418 	if (rdev->family == CHIP_BONAIRE)
3419 		ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3420 					  pi->pcie_gen_powersaving.min,
3421 					  pi->pcie_lane_powersaving.max);
3422 	else
3423 		ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3424 					  pi->pcie_gen_powersaving.min,
3425 					  pi->pcie_lane_powersaving.min);
3426 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
3427 				  pi->pcie_gen_performance.min,
3428 				  pi->pcie_lane_performance.min);
3429 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
3430 				  pi->pcie_gen_powersaving.min,
3431 				  pi->pcie_lane_powersaving.max);
3432 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
3433 				  pi->pcie_gen_performance.min,
3434 				  pi->pcie_lane_performance.max);
3435 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
3436 				  pi->pcie_gen_powersaving.max,
3437 				  pi->pcie_lane_powersaving.max);
3438 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
3439 				  pi->pcie_gen_performance.max,
3440 				  pi->pcie_lane_performance.max);
3441 
3442 	pi->dpm_table.pcie_speed_table.count = 6;
3443 
3444 	return 0;
3445 }
3446 
ci_setup_default_dpm_tables(struct radeon_device * rdev)3447 static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
3448 {
3449 	struct ci_power_info *pi = ci_get_pi(rdev);
3450 	struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
3451 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3452 	struct radeon_clock_voltage_dependency_table *allowed_mclk_table =
3453 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
3454 	struct radeon_cac_leakage_table *std_voltage_table =
3455 		&rdev->pm.dpm.dyn_state.cac_leakage_table;
3456 	u32 i;
3457 
3458 	if (allowed_sclk_vddc_table == NULL)
3459 		return -EINVAL;
3460 	if (allowed_sclk_vddc_table->count < 1)
3461 		return -EINVAL;
3462 	if (allowed_mclk_table == NULL)
3463 		return -EINVAL;
3464 	if (allowed_mclk_table->count < 1)
3465 		return -EINVAL;
3466 
3467 	memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
3468 
3469 	ci_reset_single_dpm_table(rdev,
3470 				  &pi->dpm_table.sclk_table,
3471 				  SMU7_MAX_LEVELS_GRAPHICS);
3472 	ci_reset_single_dpm_table(rdev,
3473 				  &pi->dpm_table.mclk_table,
3474 				  SMU7_MAX_LEVELS_MEMORY);
3475 	ci_reset_single_dpm_table(rdev,
3476 				  &pi->dpm_table.vddc_table,
3477 				  SMU7_MAX_LEVELS_VDDC);
3478 	ci_reset_single_dpm_table(rdev,
3479 				  &pi->dpm_table.vddci_table,
3480 				  SMU7_MAX_LEVELS_VDDCI);
3481 	ci_reset_single_dpm_table(rdev,
3482 				  &pi->dpm_table.mvdd_table,
3483 				  SMU7_MAX_LEVELS_MVDD);
3484 
3485 	pi->dpm_table.sclk_table.count = 0;
3486 	for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3487 		if ((i == 0) ||
3488 		    (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
3489 		     allowed_sclk_vddc_table->entries[i].clk)) {
3490 			pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
3491 				allowed_sclk_vddc_table->entries[i].clk;
3492 			pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
3493 				(i == 0) ? true : false;
3494 			pi->dpm_table.sclk_table.count++;
3495 		}
3496 	}
3497 
3498 	pi->dpm_table.mclk_table.count = 0;
3499 	for (i = 0; i < allowed_mclk_table->count; i++) {
3500 		if ((i == 0) ||
3501 		    (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
3502 		     allowed_mclk_table->entries[i].clk)) {
3503 			pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
3504 				allowed_mclk_table->entries[i].clk;
3505 			pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
3506 				(i == 0) ? true : false;
3507 			pi->dpm_table.mclk_table.count++;
3508 		}
3509 	}
3510 
3511 	for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3512 		pi->dpm_table.vddc_table.dpm_levels[i].value =
3513 			allowed_sclk_vddc_table->entries[i].v;
3514 		pi->dpm_table.vddc_table.dpm_levels[i].param1 =
3515 			std_voltage_table->entries[i].leakage;
3516 		pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
3517 	}
3518 	pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
3519 
3520 	allowed_mclk_table = &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
3521 	if (allowed_mclk_table) {
3522 		for (i = 0; i < allowed_mclk_table->count; i++) {
3523 			pi->dpm_table.vddci_table.dpm_levels[i].value =
3524 				allowed_mclk_table->entries[i].v;
3525 			pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
3526 		}
3527 		pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
3528 	}
3529 
3530 	allowed_mclk_table = &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
3531 	if (allowed_mclk_table) {
3532 		for (i = 0; i < allowed_mclk_table->count; i++) {
3533 			pi->dpm_table.mvdd_table.dpm_levels[i].value =
3534 				allowed_mclk_table->entries[i].v;
3535 			pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
3536 		}
3537 		pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
3538 	}
3539 
3540 	ci_setup_default_pcie_tables(rdev);
3541 
3542 	return 0;
3543 }
3544 
ci_find_boot_level(struct ci_single_dpm_table * table,u32 value,u32 * boot_level)3545 static int ci_find_boot_level(struct ci_single_dpm_table *table,
3546 			      u32 value, u32 *boot_level)
3547 {
3548 	u32 i;
3549 	int ret = -EINVAL;
3550 
3551 	for(i = 0; i < table->count; i++) {
3552 		if (value == table->dpm_levels[i].value) {
3553 			*boot_level = i;
3554 			ret = 0;
3555 		}
3556 	}
3557 
3558 	return ret;
3559 }
3560 
ci_init_smc_table(struct radeon_device * rdev)3561 static int ci_init_smc_table(struct radeon_device *rdev)
3562 {
3563 	struct ci_power_info *pi = ci_get_pi(rdev);
3564 	struct ci_ulv_parm *ulv = &pi->ulv;
3565 	struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps;
3566 	SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
3567 	int ret;
3568 
3569 	ret = ci_setup_default_dpm_tables(rdev);
3570 	if (ret)
3571 		return ret;
3572 
3573 	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
3574 		ci_populate_smc_voltage_tables(rdev, table);
3575 
3576 	ci_init_fps_limits(rdev);
3577 
3578 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
3579 		table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
3580 
3581 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3582 		table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3583 
3584 	if (pi->mem_gddr5)
3585 		table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3586 
3587 	if (ulv->supported) {
3588 		ret = ci_populate_ulv_state(rdev, &pi->smc_state_table.Ulv);
3589 		if (ret)
3590 			return ret;
3591 		WREG32_SMC(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3592 	}
3593 
3594 	ret = ci_populate_all_graphic_levels(rdev);
3595 	if (ret)
3596 		return ret;
3597 
3598 	ret = ci_populate_all_memory_levels(rdev);
3599 	if (ret)
3600 		return ret;
3601 
3602 	ci_populate_smc_link_level(rdev, table);
3603 
3604 	ret = ci_populate_smc_acpi_level(rdev, table);
3605 	if (ret)
3606 		return ret;
3607 
3608 	ret = ci_populate_smc_vce_level(rdev, table);
3609 	if (ret)
3610 		return ret;
3611 
3612 	ret = ci_populate_smc_acp_level(rdev, table);
3613 	if (ret)
3614 		return ret;
3615 
3616 	ret = ci_populate_smc_samu_level(rdev, table);
3617 	if (ret)
3618 		return ret;
3619 
3620 	ret = ci_do_program_memory_timing_parameters(rdev);
3621 	if (ret)
3622 		return ret;
3623 
3624 	ret = ci_populate_smc_uvd_level(rdev, table);
3625 	if (ret)
3626 		return ret;
3627 
3628 	table->UvdBootLevel  = 0;
3629 	table->VceBootLevel  = 0;
3630 	table->AcpBootLevel  = 0;
3631 	table->SamuBootLevel  = 0;
3632 	table->GraphicsBootLevel  = 0;
3633 	table->MemoryBootLevel  = 0;
3634 
3635 	ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3636 				 pi->vbios_boot_state.sclk_bootup_value,
3637 				 (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3638 
3639 	ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3640 				 pi->vbios_boot_state.mclk_bootup_value,
3641 				 (u32 *)&pi->smc_state_table.MemoryBootLevel);
3642 
3643 	table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3644 	table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3645 	table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3646 
3647 	ci_populate_smc_initial_state(rdev, radeon_boot_state);
3648 
3649 	ret = ci_populate_bapm_parameters_in_dpm_table(rdev);
3650 	if (ret)
3651 		return ret;
3652 
3653 	table->UVDInterval = 1;
3654 	table->VCEInterval = 1;
3655 	table->ACPInterval = 1;
3656 	table->SAMUInterval = 1;
3657 	table->GraphicsVoltageChangeEnable = 1;
3658 	table->GraphicsThermThrottleEnable = 1;
3659 	table->GraphicsInterval = 1;
3660 	table->VoltageInterval = 1;
3661 	table->ThermalInterval = 1;
3662 	table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3663 					     CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3664 	table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3665 					    CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3666 	table->MemoryVoltageChangeEnable = 1;
3667 	table->MemoryInterval = 1;
3668 	table->VoltageResponseTime = 0;
3669 	table->VddcVddciDelta = 4000;
3670 	table->PhaseResponseTime = 0;
3671 	table->MemoryThermThrottleEnable = 1;
3672 	table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
3673 	table->PCIeGenInterval = 1;
3674 	if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3675 		table->SVI2Enable  = 1;
3676 	else
3677 		table->SVI2Enable  = 0;
3678 
3679 	table->ThermGpio = 17;
3680 	table->SclkStepSize = 0x4000;
3681 
3682 	table->SystemFlags = cpu_to_be32(table->SystemFlags);
3683 	table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3684 	table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3685 	table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3686 	table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3687 	table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3688 	table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3689 	table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3690 	table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3691 	table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3692 	table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3693 	table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3694 	table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3695 	table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3696 
3697 	ret = ci_copy_bytes_to_smc(rdev,
3698 				   pi->dpm_table_start +
3699 				   offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3700 				   (u8 *)&table->SystemFlags,
3701 				   sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3702 				   pi->sram_end);
3703 	if (ret)
3704 		return ret;
3705 
3706 	return 0;
3707 }
3708 
ci_trim_single_dpm_states(struct radeon_device * rdev,struct ci_single_dpm_table * dpm_table,u32 low_limit,u32 high_limit)3709 static void ci_trim_single_dpm_states(struct radeon_device *rdev,
3710 				      struct ci_single_dpm_table *dpm_table,
3711 				      u32 low_limit, u32 high_limit)
3712 {
3713 	u32 i;
3714 
3715 	for (i = 0; i < dpm_table->count; i++) {
3716 		if ((dpm_table->dpm_levels[i].value < low_limit) ||
3717 		    (dpm_table->dpm_levels[i].value > high_limit))
3718 			dpm_table->dpm_levels[i].enabled = false;
3719 		else
3720 			dpm_table->dpm_levels[i].enabled = true;
3721 	}
3722 }
3723 
ci_trim_pcie_dpm_states(struct radeon_device * rdev,u32 speed_low,u32 lanes_low,u32 speed_high,u32 lanes_high)3724 static void ci_trim_pcie_dpm_states(struct radeon_device *rdev,
3725 				    u32 speed_low, u32 lanes_low,
3726 				    u32 speed_high, u32 lanes_high)
3727 {
3728 	struct ci_power_info *pi = ci_get_pi(rdev);
3729 	struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3730 	u32 i, j;
3731 
3732 	for (i = 0; i < pcie_table->count; i++) {
3733 		if ((pcie_table->dpm_levels[i].value < speed_low) ||
3734 		    (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3735 		    (pcie_table->dpm_levels[i].value > speed_high) ||
3736 		    (pcie_table->dpm_levels[i].param1 > lanes_high))
3737 			pcie_table->dpm_levels[i].enabled = false;
3738 		else
3739 			pcie_table->dpm_levels[i].enabled = true;
3740 	}
3741 
3742 	for (i = 0; i < pcie_table->count; i++) {
3743 		if (pcie_table->dpm_levels[i].enabled) {
3744 			for (j = i + 1; j < pcie_table->count; j++) {
3745 				if (pcie_table->dpm_levels[j].enabled) {
3746 					if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3747 					    (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3748 						pcie_table->dpm_levels[j].enabled = false;
3749 				}
3750 			}
3751 		}
3752 	}
3753 }
3754 
ci_trim_dpm_states(struct radeon_device * rdev,struct radeon_ps * radeon_state)3755 static int ci_trim_dpm_states(struct radeon_device *rdev,
3756 			      struct radeon_ps *radeon_state)
3757 {
3758 	struct ci_ps *state = ci_get_ps(radeon_state);
3759 	struct ci_power_info *pi = ci_get_pi(rdev);
3760 	u32 high_limit_count;
3761 
3762 	if (state->performance_level_count < 1)
3763 		return -EINVAL;
3764 
3765 	if (state->performance_level_count == 1)
3766 		high_limit_count = 0;
3767 	else
3768 		high_limit_count = 1;
3769 
3770 	ci_trim_single_dpm_states(rdev,
3771 				  &pi->dpm_table.sclk_table,
3772 				  state->performance_levels[0].sclk,
3773 				  state->performance_levels[high_limit_count].sclk);
3774 
3775 	ci_trim_single_dpm_states(rdev,
3776 				  &pi->dpm_table.mclk_table,
3777 				  state->performance_levels[0].mclk,
3778 				  state->performance_levels[high_limit_count].mclk);
3779 
3780 	ci_trim_pcie_dpm_states(rdev,
3781 				state->performance_levels[0].pcie_gen,
3782 				state->performance_levels[0].pcie_lane,
3783 				state->performance_levels[high_limit_count].pcie_gen,
3784 				state->performance_levels[high_limit_count].pcie_lane);
3785 
3786 	return 0;
3787 }
3788 
ci_apply_disp_minimum_voltage_request(struct radeon_device * rdev)3789 static int ci_apply_disp_minimum_voltage_request(struct radeon_device *rdev)
3790 {
3791 	struct radeon_clock_voltage_dependency_table *disp_voltage_table =
3792 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3793 	struct radeon_clock_voltage_dependency_table *vddc_table =
3794 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3795 	u32 requested_voltage = 0;
3796 	u32 i;
3797 
3798 	if (disp_voltage_table == NULL)
3799 		return -EINVAL;
3800 	if (!disp_voltage_table->count)
3801 		return -EINVAL;
3802 
3803 	for (i = 0; i < disp_voltage_table->count; i++) {
3804 		if (rdev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3805 			requested_voltage = disp_voltage_table->entries[i].v;
3806 	}
3807 
3808 	for (i = 0; i < vddc_table->count; i++) {
3809 		if (requested_voltage <= vddc_table->entries[i].v) {
3810 			requested_voltage = vddc_table->entries[i].v;
3811 			return (ci_send_msg_to_smc_with_parameter(rdev,
3812 								  PPSMC_MSG_VddC_Request,
3813 								  requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3814 				0 : -EINVAL;
3815 		}
3816 	}
3817 
3818 	return -EINVAL;
3819 }
3820 
ci_upload_dpm_level_enable_mask(struct radeon_device * rdev)3821 static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
3822 {
3823 	struct ci_power_info *pi = ci_get_pi(rdev);
3824 	PPSMC_Result result;
3825 
3826 	ci_apply_disp_minimum_voltage_request(rdev);
3827 
3828 	if (!pi->sclk_dpm_key_disabled) {
3829 		if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3830 			result = ci_send_msg_to_smc_with_parameter(rdev,
3831 								   PPSMC_MSG_SCLKDPM_SetEnabledMask,
3832 								   pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3833 			if (result != PPSMC_Result_OK)
3834 				return -EINVAL;
3835 		}
3836 	}
3837 
3838 	if (!pi->mclk_dpm_key_disabled) {
3839 		if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3840 			result = ci_send_msg_to_smc_with_parameter(rdev,
3841 								   PPSMC_MSG_MCLKDPM_SetEnabledMask,
3842 								   pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3843 			if (result != PPSMC_Result_OK)
3844 				return -EINVAL;
3845 		}
3846 	}
3847 #if 0
3848 	if (!pi->pcie_dpm_key_disabled) {
3849 		if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3850 			result = ci_send_msg_to_smc_with_parameter(rdev,
3851 								   PPSMC_MSG_PCIeDPM_SetEnabledMask,
3852 								   pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3853 			if (result != PPSMC_Result_OK)
3854 				return -EINVAL;
3855 		}
3856 	}
3857 #endif
3858 	return 0;
3859 }
3860 
ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device * rdev,struct radeon_ps * radeon_state)3861 static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
3862 						   struct radeon_ps *radeon_state)
3863 {
3864 	struct ci_power_info *pi = ci_get_pi(rdev);
3865 	struct ci_ps *state = ci_get_ps(radeon_state);
3866 	struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
3867 	u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3868 	struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
3869 	u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3870 	u32 i;
3871 
3872 	pi->need_update_smu7_dpm_table = 0;
3873 
3874 	for (i = 0; i < sclk_table->count; i++) {
3875 		if (sclk == sclk_table->dpm_levels[i].value)
3876 			break;
3877 	}
3878 
3879 	if (i >= sclk_table->count) {
3880 		pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3881 	} else {
3882 		/* XXX The current code always reprogrammed the sclk levels,
3883 		 * but we don't currently handle disp sclk requirements
3884 		 * so just skip it.
3885 		 */
3886 		if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
3887 			pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3888 	}
3889 
3890 	for (i = 0; i < mclk_table->count; i++) {
3891 		if (mclk == mclk_table->dpm_levels[i].value)
3892 			break;
3893 	}
3894 
3895 	if (i >= mclk_table->count)
3896 		pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3897 
3898 	if (rdev->pm.dpm.current_active_crtc_count !=
3899 	    rdev->pm.dpm.new_active_crtc_count)
3900 		pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3901 }
3902 
ci_populate_and_upload_sclk_mclk_dpm_levels(struct radeon_device * rdev,struct radeon_ps * radeon_state)3903 static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct radeon_device *rdev,
3904 						       struct radeon_ps *radeon_state)
3905 {
3906 	struct ci_power_info *pi = ci_get_pi(rdev);
3907 	struct ci_ps *state = ci_get_ps(radeon_state);
3908 	u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3909 	u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3910 	struct ci_dpm_table *dpm_table = &pi->dpm_table;
3911 	int ret;
3912 
3913 	if (!pi->need_update_smu7_dpm_table)
3914 		return 0;
3915 
3916 	if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
3917 		dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
3918 
3919 	if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
3920 		dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
3921 
3922 	if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
3923 		ret = ci_populate_all_graphic_levels(rdev);
3924 		if (ret)
3925 			return ret;
3926 	}
3927 
3928 	if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
3929 		ret = ci_populate_all_memory_levels(rdev);
3930 		if (ret)
3931 			return ret;
3932 	}
3933 
3934 	return 0;
3935 }
3936 
ci_enable_uvd_dpm(struct radeon_device * rdev,bool enable)3937 static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
3938 {
3939 	struct ci_power_info *pi = ci_get_pi(rdev);
3940 	const struct radeon_clock_and_voltage_limits *max_limits;
3941 	int i;
3942 
3943 	if (rdev->pm.dpm.ac_power)
3944 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3945 	else
3946 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3947 
3948 	if (enable) {
3949 		pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
3950 
3951 		for (i = rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3952 			if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3953 				pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
3954 
3955 				if (!pi->caps_uvd_dpm)
3956 					break;
3957 			}
3958 		}
3959 
3960 		ci_send_msg_to_smc_with_parameter(rdev,
3961 						  PPSMC_MSG_UVDDPM_SetEnabledMask,
3962 						  pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
3963 
3964 		if (pi->last_mclk_dpm_enable_mask & 0x1) {
3965 			pi->uvd_enabled = true;
3966 			pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
3967 			ci_send_msg_to_smc_with_parameter(rdev,
3968 							  PPSMC_MSG_MCLKDPM_SetEnabledMask,
3969 							  pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3970 		}
3971 	} else {
3972 		if (pi->last_mclk_dpm_enable_mask & 0x1) {
3973 			pi->uvd_enabled = false;
3974 			pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
3975 			ci_send_msg_to_smc_with_parameter(rdev,
3976 							  PPSMC_MSG_MCLKDPM_SetEnabledMask,
3977 							  pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3978 		}
3979 	}
3980 
3981 	return (ci_send_msg_to_smc(rdev, enable ?
3982 				   PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
3983 		0 : -EINVAL;
3984 }
3985 
ci_enable_vce_dpm(struct radeon_device * rdev,bool enable)3986 static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable)
3987 {
3988 	struct ci_power_info *pi = ci_get_pi(rdev);
3989 	const struct radeon_clock_and_voltage_limits *max_limits;
3990 	int i;
3991 
3992 	if (rdev->pm.dpm.ac_power)
3993 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3994 	else
3995 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3996 
3997 	if (enable) {
3998 		pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
3999 		for (i = rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4000 			if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4001 				pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
4002 
4003 				if (!pi->caps_vce_dpm)
4004 					break;
4005 			}
4006 		}
4007 
4008 		ci_send_msg_to_smc_with_parameter(rdev,
4009 						  PPSMC_MSG_VCEDPM_SetEnabledMask,
4010 						  pi->dpm_level_enable_mask.vce_dpm_enable_mask);
4011 	}
4012 
4013 	return (ci_send_msg_to_smc(rdev, enable ?
4014 				   PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
4015 		0 : -EINVAL;
4016 }
4017 
4018 #if 0
4019 static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable)
4020 {
4021 	struct ci_power_info *pi = ci_get_pi(rdev);
4022 	const struct radeon_clock_and_voltage_limits *max_limits;
4023 	int i;
4024 
4025 	if (rdev->pm.dpm.ac_power)
4026 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4027 	else
4028 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4029 
4030 	if (enable) {
4031 		pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
4032 		for (i = rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4033 			if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4034 				pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
4035 
4036 				if (!pi->caps_samu_dpm)
4037 					break;
4038 			}
4039 		}
4040 
4041 		ci_send_msg_to_smc_with_parameter(rdev,
4042 						  PPSMC_MSG_SAMUDPM_SetEnabledMask,
4043 						  pi->dpm_level_enable_mask.samu_dpm_enable_mask);
4044 	}
4045 	return (ci_send_msg_to_smc(rdev, enable ?
4046 				   PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
4047 		0 : -EINVAL;
4048 }
4049 
4050 static int ci_enable_acp_dpm(struct radeon_device *rdev, bool enable)
4051 {
4052 	struct ci_power_info *pi = ci_get_pi(rdev);
4053 	const struct radeon_clock_and_voltage_limits *max_limits;
4054 	int i;
4055 
4056 	if (rdev->pm.dpm.ac_power)
4057 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4058 	else
4059 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4060 
4061 	if (enable) {
4062 		pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
4063 		for (i = rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4064 			if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4065 				pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
4066 
4067 				if (!pi->caps_acp_dpm)
4068 					break;
4069 			}
4070 		}
4071 
4072 		ci_send_msg_to_smc_with_parameter(rdev,
4073 						  PPSMC_MSG_ACPDPM_SetEnabledMask,
4074 						  pi->dpm_level_enable_mask.acp_dpm_enable_mask);
4075 	}
4076 
4077 	return (ci_send_msg_to_smc(rdev, enable ?
4078 				   PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
4079 		0 : -EINVAL;
4080 }
4081 #endif
4082 
ci_update_uvd_dpm(struct radeon_device * rdev,bool gate)4083 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate)
4084 {
4085 	struct ci_power_info *pi = ci_get_pi(rdev);
4086 	u32 tmp;
4087 
4088 	if (!gate) {
4089 		if (pi->caps_uvd_dpm ||
4090 		    (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
4091 			pi->smc_state_table.UvdBootLevel = 0;
4092 		else
4093 			pi->smc_state_table.UvdBootLevel =
4094 				rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
4095 
4096 		tmp = RREG32_SMC(DPM_TABLE_475);
4097 		tmp &= ~UvdBootLevel_MASK;
4098 		tmp |= UvdBootLevel(pi->smc_state_table.UvdBootLevel);
4099 		WREG32_SMC(DPM_TABLE_475, tmp);
4100 	}
4101 
4102 	return ci_enable_uvd_dpm(rdev, !gate);
4103 }
4104 
ci_get_vce_boot_level(struct radeon_device * rdev)4105 static u8 ci_get_vce_boot_level(struct radeon_device *rdev)
4106 {
4107 	u8 i;
4108 	u32 min_evclk = 30000; /* ??? */
4109 	struct radeon_vce_clock_voltage_dependency_table *table =
4110 		&rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
4111 
4112 	for (i = 0; i < table->count; i++) {
4113 		if (table->entries[i].evclk >= min_evclk)
4114 			return i;
4115 	}
4116 
4117 	return table->count - 1;
4118 }
4119 
ci_update_vce_dpm(struct radeon_device * rdev,struct radeon_ps * radeon_new_state,struct radeon_ps * radeon_current_state)4120 static int ci_update_vce_dpm(struct radeon_device *rdev,
4121 			     struct radeon_ps *radeon_new_state,
4122 			     struct radeon_ps *radeon_current_state)
4123 {
4124 	struct ci_power_info *pi = ci_get_pi(rdev);
4125 	int ret = 0;
4126 	u32 tmp;
4127 
4128 	if (radeon_current_state->evclk != radeon_new_state->evclk) {
4129 		if (radeon_new_state->evclk) {
4130 			/* turn the clocks on when encoding */
4131 			cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false);
4132 
4133 			pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev);
4134 			tmp = RREG32_SMC(DPM_TABLE_475);
4135 			tmp &= ~VceBootLevel_MASK;
4136 			tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel);
4137 			WREG32_SMC(DPM_TABLE_475, tmp);
4138 
4139 			ret = ci_enable_vce_dpm(rdev, true);
4140 		} else {
4141 			/* turn the clocks off when not encoding */
4142 			cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true);
4143 
4144 			ret = ci_enable_vce_dpm(rdev, false);
4145 		}
4146 	}
4147 	return ret;
4148 }
4149 
4150 #if 0
4151 static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate)
4152 {
4153 	return ci_enable_samu_dpm(rdev, gate);
4154 }
4155 
4156 static int ci_update_acp_dpm(struct radeon_device *rdev, bool gate)
4157 {
4158 	struct ci_power_info *pi = ci_get_pi(rdev);
4159 	u32 tmp;
4160 
4161 	if (!gate) {
4162 		pi->smc_state_table.AcpBootLevel = 0;
4163 
4164 		tmp = RREG32_SMC(DPM_TABLE_475);
4165 		tmp &= ~AcpBootLevel_MASK;
4166 		tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
4167 		WREG32_SMC(DPM_TABLE_475, tmp);
4168 	}
4169 
4170 	return ci_enable_acp_dpm(rdev, !gate);
4171 }
4172 #endif
4173 
ci_generate_dpm_level_enable_mask(struct radeon_device * rdev,struct radeon_ps * radeon_state)4174 static int ci_generate_dpm_level_enable_mask(struct radeon_device *rdev,
4175 					     struct radeon_ps *radeon_state)
4176 {
4177 	struct ci_power_info *pi = ci_get_pi(rdev);
4178 	int ret;
4179 
4180 	ret = ci_trim_dpm_states(rdev, radeon_state);
4181 	if (ret)
4182 		return ret;
4183 
4184 	pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
4185 		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
4186 	pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
4187 		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
4188 	pi->last_mclk_dpm_enable_mask =
4189 		pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4190 	if (pi->uvd_enabled) {
4191 		if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
4192 			pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4193 	}
4194 	pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
4195 		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
4196 
4197 	return 0;
4198 }
4199 
ci_get_lowest_enabled_level(struct radeon_device * rdev,u32 level_mask)4200 static u32 ci_get_lowest_enabled_level(struct radeon_device *rdev,
4201 				       u32 level_mask)
4202 {
4203 	u32 level = 0;
4204 
4205 	while ((level_mask & (1 << level)) == 0)
4206 		level++;
4207 
4208 	return level;
4209 }
4210 
4211 
ci_dpm_force_performance_level(struct radeon_device * rdev,enum radeon_dpm_forced_level level)4212 int ci_dpm_force_performance_level(struct radeon_device *rdev,
4213 				   enum radeon_dpm_forced_level level)
4214 {
4215 	struct ci_power_info *pi = ci_get_pi(rdev);
4216 	u32 tmp, levels, i;
4217 	int ret;
4218 
4219 	if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
4220 		if ((!pi->pcie_dpm_key_disabled) &&
4221 		    pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4222 			levels = 0;
4223 			tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
4224 			while (tmp >>= 1)
4225 				levels++;
4226 			if (levels) {
4227 				ret = ci_dpm_force_state_pcie(rdev, level);
4228 				if (ret)
4229 					return ret;
4230 				for (i = 0; i < rdev->usec_timeout; i++) {
4231 					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
4232 					       CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
4233 					if (tmp == levels)
4234 						break;
4235 					udelay(1);
4236 				}
4237 			}
4238 		}
4239 		if ((!pi->sclk_dpm_key_disabled) &&
4240 		    pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4241 			levels = 0;
4242 			tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
4243 			while (tmp >>= 1)
4244 				levels++;
4245 			if (levels) {
4246 				ret = ci_dpm_force_state_sclk(rdev, levels);
4247 				if (ret)
4248 					return ret;
4249 				for (i = 0; i < rdev->usec_timeout; i++) {
4250 					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4251 					       CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
4252 					if (tmp == levels)
4253 						break;
4254 					udelay(1);
4255 				}
4256 			}
4257 		}
4258 		if ((!pi->mclk_dpm_key_disabled) &&
4259 		    pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4260 			levels = 0;
4261 			tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4262 			while (tmp >>= 1)
4263 				levels++;
4264 			if (levels) {
4265 				ret = ci_dpm_force_state_mclk(rdev, levels);
4266 				if (ret)
4267 					return ret;
4268 				for (i = 0; i < rdev->usec_timeout; i++) {
4269 					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4270 					       CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
4271 					if (tmp == levels)
4272 						break;
4273 					udelay(1);
4274 				}
4275 			}
4276 		}
4277 	} else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
4278 		if ((!pi->sclk_dpm_key_disabled) &&
4279 		    pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4280 			levels = ci_get_lowest_enabled_level(rdev,
4281 							     pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
4282 			ret = ci_dpm_force_state_sclk(rdev, levels);
4283 			if (ret)
4284 				return ret;
4285 			for (i = 0; i < rdev->usec_timeout; i++) {
4286 				tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4287 				       CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
4288 				if (tmp == levels)
4289 					break;
4290 				udelay(1);
4291 			}
4292 		}
4293 		if ((!pi->mclk_dpm_key_disabled) &&
4294 		    pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4295 			levels = ci_get_lowest_enabled_level(rdev,
4296 							     pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4297 			ret = ci_dpm_force_state_mclk(rdev, levels);
4298 			if (ret)
4299 				return ret;
4300 			for (i = 0; i < rdev->usec_timeout; i++) {
4301 				tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4302 				       CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
4303 				if (tmp == levels)
4304 					break;
4305 				udelay(1);
4306 			}
4307 		}
4308 		if ((!pi->pcie_dpm_key_disabled) &&
4309 		    pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4310 			levels = ci_get_lowest_enabled_level(rdev,
4311 							     pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
4312 			ret = ci_dpm_force_state_pcie(rdev, levels);
4313 			if (ret)
4314 				return ret;
4315 			for (i = 0; i < rdev->usec_timeout; i++) {
4316 				tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
4317 				       CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
4318 				if (tmp == levels)
4319 					break;
4320 				udelay(1);
4321 			}
4322 		}
4323 	} else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
4324 		if (!pi->pcie_dpm_key_disabled) {
4325 			PPSMC_Result smc_result;
4326 
4327 			smc_result = ci_send_msg_to_smc(rdev,
4328 							PPSMC_MSG_PCIeDPM_UnForceLevel);
4329 			if (smc_result != PPSMC_Result_OK)
4330 				return -EINVAL;
4331 		}
4332 		ret = ci_upload_dpm_level_enable_mask(rdev);
4333 		if (ret)
4334 			return ret;
4335 	}
4336 
4337 	rdev->pm.dpm.forced_level = level;
4338 
4339 	return 0;
4340 }
4341 
ci_set_mc_special_registers(struct radeon_device * rdev,struct ci_mc_reg_table * table)4342 static int ci_set_mc_special_registers(struct radeon_device *rdev,
4343 				       struct ci_mc_reg_table *table)
4344 {
4345 	struct ci_power_info *pi = ci_get_pi(rdev);
4346 	u8 i, j, k;
4347 	u32 temp_reg;
4348 
4349 	for (i = 0, j = table->last; i < table->last; i++) {
4350 		if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4351 			return -EINVAL;
4352 		switch(table->mc_reg_address[i].s1 << 2) {
4353 		case MC_SEQ_MISC1:
4354 			temp_reg = RREG32(MC_PMG_CMD_EMRS);
4355 			table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
4356 			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
4357 			for (k = 0; k < table->num_entries; k++) {
4358 				table->mc_reg_table_entry[k].mc_data[j] =
4359 					((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
4360 			}
4361 			j++;
4362 			if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4363 				return -EINVAL;
4364 
4365 			temp_reg = RREG32(MC_PMG_CMD_MRS);
4366 			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
4367 			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
4368 			for (k = 0; k < table->num_entries; k++) {
4369 				table->mc_reg_table_entry[k].mc_data[j] =
4370 					(temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4371 				if (!pi->mem_gddr5)
4372 					table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
4373 			}
4374 			j++;
4375 			if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4376 				return -EINVAL;
4377 
4378 			if (!pi->mem_gddr5) {
4379 				table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2;
4380 				table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2;
4381 				for (k = 0; k < table->num_entries; k++) {
4382 					table->mc_reg_table_entry[k].mc_data[j] =
4383 						(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
4384 				}
4385 				j++;
4386 				if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4387 					return -EINVAL;
4388 			}
4389 			break;
4390 		case MC_SEQ_RESERVE_M:
4391 			temp_reg = RREG32(MC_PMG_CMD_MRS1);
4392 			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
4393 			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
4394 			for (k = 0; k < table->num_entries; k++) {
4395 				table->mc_reg_table_entry[k].mc_data[j] =
4396 					(temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4397 			}
4398 			j++;
4399 			if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4400 				return -EINVAL;
4401 			break;
4402 		default:
4403 			break;
4404 		}
4405 
4406 	}
4407 
4408 	table->last = j;
4409 
4410 	return 0;
4411 }
4412 
ci_check_s0_mc_reg_index(u16 in_reg,u16 * out_reg)4413 static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
4414 {
4415 	bool result = true;
4416 
4417 	switch(in_reg) {
4418 	case MC_SEQ_RAS_TIMING >> 2:
4419 		*out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
4420 		break;
4421 	case MC_SEQ_DLL_STBY >> 2:
4422 		*out_reg = MC_SEQ_DLL_STBY_LP >> 2;
4423 		break;
4424 	case MC_SEQ_G5PDX_CMD0 >> 2:
4425 		*out_reg = MC_SEQ_G5PDX_CMD0_LP >> 2;
4426 		break;
4427 	case MC_SEQ_G5PDX_CMD1 >> 2:
4428 		*out_reg = MC_SEQ_G5PDX_CMD1_LP >> 2;
4429 		break;
4430 	case MC_SEQ_G5PDX_CTRL >> 2:
4431 		*out_reg = MC_SEQ_G5PDX_CTRL_LP >> 2;
4432 		break;
4433 	case MC_SEQ_CAS_TIMING >> 2:
4434 		*out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
4435 		break;
4436 	case MC_SEQ_MISC_TIMING >> 2:
4437 		*out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
4438 		break;
4439 	case MC_SEQ_MISC_TIMING2 >> 2:
4440 		*out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
4441 		break;
4442 	case MC_SEQ_PMG_DVS_CMD >> 2:
4443 		*out_reg = MC_SEQ_PMG_DVS_CMD_LP >> 2;
4444 		break;
4445 	case MC_SEQ_PMG_DVS_CTL >> 2:
4446 		*out_reg = MC_SEQ_PMG_DVS_CTL_LP >> 2;
4447 		break;
4448 	case MC_SEQ_RD_CTL_D0 >> 2:
4449 		*out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
4450 		break;
4451 	case MC_SEQ_RD_CTL_D1 >> 2:
4452 		*out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
4453 		break;
4454 	case MC_SEQ_WR_CTL_D0 >> 2:
4455 		*out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
4456 		break;
4457 	case MC_SEQ_WR_CTL_D1 >> 2:
4458 		*out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
4459 		break;
4460 	case MC_PMG_CMD_EMRS >> 2:
4461 		*out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
4462 		break;
4463 	case MC_PMG_CMD_MRS >> 2:
4464 		*out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
4465 		break;
4466 	case MC_PMG_CMD_MRS1 >> 2:
4467 		*out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
4468 		break;
4469 	case MC_SEQ_PMG_TIMING >> 2:
4470 		*out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
4471 		break;
4472 	case MC_PMG_CMD_MRS2 >> 2:
4473 		*out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
4474 		break;
4475 	case MC_SEQ_WR_CTL_2 >> 2:
4476 		*out_reg = MC_SEQ_WR_CTL_2_LP >> 2;
4477 		break;
4478 	default:
4479 		result = false;
4480 		break;
4481 	}
4482 
4483 	return result;
4484 }
4485 
ci_set_valid_flag(struct ci_mc_reg_table * table)4486 static void ci_set_valid_flag(struct ci_mc_reg_table *table)
4487 {
4488 	u8 i, j;
4489 
4490 	for (i = 0; i < table->last; i++) {
4491 		for (j = 1; j < table->num_entries; j++) {
4492 			if (table->mc_reg_table_entry[j-1].mc_data[i] !=
4493 			    table->mc_reg_table_entry[j].mc_data[i]) {
4494 				table->valid_flag |= 1 << i;
4495 				break;
4496 			}
4497 		}
4498 	}
4499 }
4500 
ci_set_s0_mc_reg_index(struct ci_mc_reg_table * table)4501 static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
4502 {
4503 	u32 i;
4504 	u16 address;
4505 
4506 	for (i = 0; i < table->last; i++) {
4507 		table->mc_reg_address[i].s0 =
4508 			ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
4509 			address : table->mc_reg_address[i].s1;
4510 	}
4511 }
4512 
ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table * table,struct ci_mc_reg_table * ci_table)4513 static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
4514 				      struct ci_mc_reg_table *ci_table)
4515 {
4516 	u8 i, j;
4517 
4518 	if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4519 		return -EINVAL;
4520 	if (table->num_entries > MAX_AC_TIMING_ENTRIES)
4521 		return -EINVAL;
4522 
4523 	for (i = 0; i < table->last; i++)
4524 		ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
4525 
4526 	ci_table->last = table->last;
4527 
4528 	for (i = 0; i < table->num_entries; i++) {
4529 		ci_table->mc_reg_table_entry[i].mclk_max =
4530 			table->mc_reg_table_entry[i].mclk_max;
4531 		for (j = 0; j < table->last; j++)
4532 			ci_table->mc_reg_table_entry[i].mc_data[j] =
4533 				table->mc_reg_table_entry[i].mc_data[j];
4534 	}
4535 	ci_table->num_entries = table->num_entries;
4536 
4537 	return 0;
4538 }
4539 
ci_register_patching_mc_seq(struct radeon_device * rdev,struct ci_mc_reg_table * table)4540 static int ci_register_patching_mc_seq(struct radeon_device *rdev,
4541 				       struct ci_mc_reg_table *table)
4542 {
4543 	u8 i, k;
4544 	u32 tmp;
4545 	bool patch;
4546 
4547 	tmp = RREG32(MC_SEQ_MISC0);
4548 	patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
4549 
4550 	if (patch &&
4551 	    ((rdev->pdev->device == 0x67B0) ||
4552 	     (rdev->pdev->device == 0x67B1))) {
4553 		for (i = 0; i < table->last; i++) {
4554 			if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4555 				return -EINVAL;
4556 			switch(table->mc_reg_address[i].s1 >> 2) {
4557 			case MC_SEQ_MISC1:
4558 				for (k = 0; k < table->num_entries; k++) {
4559 					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4560 					    (table->mc_reg_table_entry[k].mclk_max == 137500))
4561 						table->mc_reg_table_entry[k].mc_data[i] =
4562 							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
4563 							0x00000007;
4564 				}
4565 				break;
4566 			case MC_SEQ_WR_CTL_D0:
4567 				for (k = 0; k < table->num_entries; k++) {
4568 					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4569 					    (table->mc_reg_table_entry[k].mclk_max == 137500))
4570 						table->mc_reg_table_entry[k].mc_data[i] =
4571 							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4572 							0x0000D0DD;
4573 				}
4574 				break;
4575 			case MC_SEQ_WR_CTL_D1:
4576 				for (k = 0; k < table->num_entries; k++) {
4577 					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4578 					    (table->mc_reg_table_entry[k].mclk_max == 137500))
4579 						table->mc_reg_table_entry[k].mc_data[i] =
4580 							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4581 							0x0000D0DD;
4582 				}
4583 				break;
4584 			case MC_SEQ_WR_CTL_2:
4585 				for (k = 0; k < table->num_entries; k++) {
4586 					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4587 					    (table->mc_reg_table_entry[k].mclk_max == 137500))
4588 						table->mc_reg_table_entry[k].mc_data[i] = 0;
4589 				}
4590 				break;
4591 			case MC_SEQ_CAS_TIMING:
4592 				for (k = 0; k < table->num_entries; k++) {
4593 					if (table->mc_reg_table_entry[k].mclk_max == 125000)
4594 						table->mc_reg_table_entry[k].mc_data[i] =
4595 							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4596 							0x000C0140;
4597 					else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4598 						table->mc_reg_table_entry[k].mc_data[i] =
4599 							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4600 							0x000C0150;
4601 				}
4602 				break;
4603 			case MC_SEQ_MISC_TIMING:
4604 				for (k = 0; k < table->num_entries; k++) {
4605 					if (table->mc_reg_table_entry[k].mclk_max == 125000)
4606 						table->mc_reg_table_entry[k].mc_data[i] =
4607 							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4608 							0x00000030;
4609 					else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4610 						table->mc_reg_table_entry[k].mc_data[i] =
4611 							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4612 							0x00000035;
4613 				}
4614 				break;
4615 			default:
4616 				break;
4617 			}
4618 		}
4619 
4620 		WREG32(MC_SEQ_IO_DEBUG_INDEX, 3);
4621 		tmp = RREG32(MC_SEQ_IO_DEBUG_DATA);
4622 		tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
4623 		WREG32(MC_SEQ_IO_DEBUG_INDEX, 3);
4624 		WREG32(MC_SEQ_IO_DEBUG_DATA, tmp);
4625 	}
4626 
4627 	return 0;
4628 }
4629 
ci_initialize_mc_reg_table(struct radeon_device * rdev)4630 static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
4631 {
4632 	struct ci_power_info *pi = ci_get_pi(rdev);
4633 	struct atom_mc_reg_table *table;
4634 	struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
4635 	u8 module_index = rv770_get_memory_module_index(rdev);
4636 	int ret;
4637 
4638 	table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
4639 	if (!table)
4640 		return -ENOMEM;
4641 
4642 	WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
4643 	WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
4644 	WREG32(MC_SEQ_DLL_STBY_LP, RREG32(MC_SEQ_DLL_STBY));
4645 	WREG32(MC_SEQ_G5PDX_CMD0_LP, RREG32(MC_SEQ_G5PDX_CMD0));
4646 	WREG32(MC_SEQ_G5PDX_CMD1_LP, RREG32(MC_SEQ_G5PDX_CMD1));
4647 	WREG32(MC_SEQ_G5PDX_CTRL_LP, RREG32(MC_SEQ_G5PDX_CTRL));
4648 	WREG32(MC_SEQ_PMG_DVS_CMD_LP, RREG32(MC_SEQ_PMG_DVS_CMD));
4649 	WREG32(MC_SEQ_PMG_DVS_CTL_LP, RREG32(MC_SEQ_PMG_DVS_CTL));
4650 	WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
4651 	WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
4652 	WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
4653 	WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
4654 	WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
4655 	WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
4656 	WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
4657 	WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
4658 	WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
4659 	WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
4660 	WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
4661 	WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
4662 
4663 	ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
4664 	if (ret)
4665 		goto init_mc_done;
4666 
4667 	ret = ci_copy_vbios_mc_reg_table(table, ci_table);
4668 	if (ret)
4669 		goto init_mc_done;
4670 
4671 	ci_set_s0_mc_reg_index(ci_table);
4672 
4673 	ret = ci_register_patching_mc_seq(rdev, ci_table);
4674 	if (ret)
4675 		goto init_mc_done;
4676 
4677 	ret = ci_set_mc_special_registers(rdev, ci_table);
4678 	if (ret)
4679 		goto init_mc_done;
4680 
4681 	ci_set_valid_flag(ci_table);
4682 
4683 init_mc_done:
4684 	kfree(table);
4685 
4686 	return ret;
4687 }
4688 
ci_populate_mc_reg_addresses(struct radeon_device * rdev,SMU7_Discrete_MCRegisters * mc_reg_table)4689 static int ci_populate_mc_reg_addresses(struct radeon_device *rdev,
4690 					SMU7_Discrete_MCRegisters *mc_reg_table)
4691 {
4692 	struct ci_power_info *pi = ci_get_pi(rdev);
4693 	u32 i, j;
4694 
4695 	for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4696 		if (pi->mc_reg_table.valid_flag & (1 << j)) {
4697 			if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4698 				return -EINVAL;
4699 			mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4700 			mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4701 			i++;
4702 		}
4703 	}
4704 
4705 	mc_reg_table->last = (u8)i;
4706 
4707 	return 0;
4708 }
4709 
ci_convert_mc_registers(const struct ci_mc_reg_entry * entry,SMU7_Discrete_MCRegisterSet * data,u32 num_entries,u32 valid_flag)4710 static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4711 				    SMU7_Discrete_MCRegisterSet *data,
4712 				    u32 num_entries, u32 valid_flag)
4713 {
4714 	u32 i, j;
4715 
4716 	for (i = 0, j = 0; j < num_entries; j++) {
4717 		if (valid_flag & (1 << j)) {
4718 			data->value[i] = cpu_to_be32(entry->mc_data[j]);
4719 			i++;
4720 		}
4721 	}
4722 }
4723 
ci_convert_mc_reg_table_entry_to_smc(struct radeon_device * rdev,const u32 memory_clock,SMU7_Discrete_MCRegisterSet * mc_reg_table_data)4724 static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
4725 						 const u32 memory_clock,
4726 						 SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4727 {
4728 	struct ci_power_info *pi = ci_get_pi(rdev);
4729 	u32 i = 0;
4730 
4731 	for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4732 		if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4733 			break;
4734 	}
4735 
4736 	if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4737 		--i;
4738 
4739 	ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4740 				mc_reg_table_data, pi->mc_reg_table.last,
4741 				pi->mc_reg_table.valid_flag);
4742 }
4743 
ci_convert_mc_reg_table_to_smc(struct radeon_device * rdev,SMU7_Discrete_MCRegisters * mc_reg_table)4744 static void ci_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
4745 					   SMU7_Discrete_MCRegisters *mc_reg_table)
4746 {
4747 	struct ci_power_info *pi = ci_get_pi(rdev);
4748 	u32 i;
4749 
4750 	for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4751 		ci_convert_mc_reg_table_entry_to_smc(rdev,
4752 						     pi->dpm_table.mclk_table.dpm_levels[i].value,
4753 						     &mc_reg_table->data[i]);
4754 }
4755 
ci_populate_initial_mc_reg_table(struct radeon_device * rdev)4756 static int ci_populate_initial_mc_reg_table(struct radeon_device *rdev)
4757 {
4758 	struct ci_power_info *pi = ci_get_pi(rdev);
4759 	int ret;
4760 
4761 	memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4762 
4763 	ret = ci_populate_mc_reg_addresses(rdev, &pi->smc_mc_reg_table);
4764 	if (ret)
4765 		return ret;
4766 	ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4767 
4768 	return ci_copy_bytes_to_smc(rdev,
4769 				    pi->mc_reg_table_start,
4770 				    (u8 *)&pi->smc_mc_reg_table,
4771 				    sizeof(SMU7_Discrete_MCRegisters),
4772 				    pi->sram_end);
4773 }
4774 
ci_update_and_upload_mc_reg_table(struct radeon_device * rdev)4775 static int ci_update_and_upload_mc_reg_table(struct radeon_device *rdev)
4776 {
4777 	struct ci_power_info *pi = ci_get_pi(rdev);
4778 
4779 	if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4780 		return 0;
4781 
4782 	memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4783 
4784 	ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4785 
4786 	return ci_copy_bytes_to_smc(rdev,
4787 				    pi->mc_reg_table_start +
4788 				    offsetof(SMU7_Discrete_MCRegisters, data[0]),
4789 				    (u8 *)&pi->smc_mc_reg_table.data[0],
4790 				    sizeof(SMU7_Discrete_MCRegisterSet) *
4791 				    pi->dpm_table.mclk_table.count,
4792 				    pi->sram_end);
4793 }
4794 
ci_enable_voltage_control(struct radeon_device * rdev)4795 static void ci_enable_voltage_control(struct radeon_device *rdev)
4796 {
4797 	u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
4798 
4799 	tmp |= VOLT_PWRMGT_EN;
4800 	WREG32_SMC(GENERAL_PWRMGT, tmp);
4801 }
4802 
ci_get_maximum_link_speed(struct radeon_device * rdev,struct radeon_ps * radeon_state)4803 static enum radeon_pcie_gen ci_get_maximum_link_speed(struct radeon_device *rdev,
4804 						      struct radeon_ps *radeon_state)
4805 {
4806 	struct ci_ps *state = ci_get_ps(radeon_state);
4807 	int i;
4808 	u16 pcie_speed, max_speed = 0;
4809 
4810 	for (i = 0; i < state->performance_level_count; i++) {
4811 		pcie_speed = state->performance_levels[i].pcie_gen;
4812 		if (max_speed < pcie_speed)
4813 			max_speed = pcie_speed;
4814 	}
4815 
4816 	return max_speed;
4817 }
4818 
ci_get_current_pcie_speed(struct radeon_device * rdev)4819 static u16 ci_get_current_pcie_speed(struct radeon_device *rdev)
4820 {
4821 	u32 speed_cntl = 0;
4822 
4823 	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
4824 	speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
4825 
4826 	return (u16)speed_cntl;
4827 }
4828 
ci_get_current_pcie_lane_number(struct radeon_device * rdev)4829 static int ci_get_current_pcie_lane_number(struct radeon_device *rdev)
4830 {
4831 	u32 link_width = 0;
4832 
4833 	link_width = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL) & LC_LINK_WIDTH_RD_MASK;
4834 	link_width >>= LC_LINK_WIDTH_RD_SHIFT;
4835 
4836 	switch (link_width) {
4837 	case RADEON_PCIE_LC_LINK_WIDTH_X1:
4838 		return 1;
4839 	case RADEON_PCIE_LC_LINK_WIDTH_X2:
4840 		return 2;
4841 	case RADEON_PCIE_LC_LINK_WIDTH_X4:
4842 		return 4;
4843 	case RADEON_PCIE_LC_LINK_WIDTH_X8:
4844 		return 8;
4845 	case RADEON_PCIE_LC_LINK_WIDTH_X12:
4846 		/* not actually supported */
4847 		return 12;
4848 	case RADEON_PCIE_LC_LINK_WIDTH_X0:
4849 	case RADEON_PCIE_LC_LINK_WIDTH_X16:
4850 	default:
4851 		return 16;
4852 	}
4853 }
4854 
ci_request_link_speed_change_before_state_change(struct radeon_device * rdev,struct radeon_ps * radeon_new_state,struct radeon_ps * radeon_current_state)4855 static void ci_request_link_speed_change_before_state_change(struct radeon_device *rdev,
4856 							     struct radeon_ps *radeon_new_state,
4857 							     struct radeon_ps *radeon_current_state)
4858 {
4859 	struct ci_power_info *pi = ci_get_pi(rdev);
4860 	enum radeon_pcie_gen target_link_speed =
4861 		ci_get_maximum_link_speed(rdev, radeon_new_state);
4862 	enum radeon_pcie_gen current_link_speed;
4863 
4864 	if (pi->force_pcie_gen == RADEON_PCIE_GEN_INVALID)
4865 		current_link_speed = ci_get_maximum_link_speed(rdev, radeon_current_state);
4866 	else
4867 		current_link_speed = pi->force_pcie_gen;
4868 
4869 	pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
4870 	pi->pspp_notify_required = false;
4871 	if (target_link_speed > current_link_speed) {
4872 		switch (target_link_speed) {
4873 #ifdef CONFIG_ACPI
4874 		case RADEON_PCIE_GEN3:
4875 			if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
4876 				break;
4877 			pi->force_pcie_gen = RADEON_PCIE_GEN2;
4878 			if (current_link_speed == RADEON_PCIE_GEN2)
4879 				break;
4880 			/* fall through */
4881 		case RADEON_PCIE_GEN2:
4882 			if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
4883 				break;
4884 #endif
4885 			/* fall through */
4886 		default:
4887 			pi->force_pcie_gen = ci_get_current_pcie_speed(rdev);
4888 			break;
4889 		}
4890 	} else {
4891 		if (target_link_speed < current_link_speed)
4892 			pi->pspp_notify_required = true;
4893 	}
4894 }
4895 
ci_notify_link_speed_change_after_state_change(struct radeon_device * rdev,struct radeon_ps * radeon_new_state,struct radeon_ps * radeon_current_state)4896 static void ci_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
4897 							   struct radeon_ps *radeon_new_state,
4898 							   struct radeon_ps *radeon_current_state)
4899 {
4900 	struct ci_power_info *pi = ci_get_pi(rdev);
4901 	enum radeon_pcie_gen target_link_speed =
4902 		ci_get_maximum_link_speed(rdev, radeon_new_state);
4903 	u8 request;
4904 
4905 	if (pi->pspp_notify_required) {
4906 		if (target_link_speed == RADEON_PCIE_GEN3)
4907 			request = PCIE_PERF_REQ_PECI_GEN3;
4908 		else if (target_link_speed == RADEON_PCIE_GEN2)
4909 			request = PCIE_PERF_REQ_PECI_GEN2;
4910 		else
4911 			request = PCIE_PERF_REQ_PECI_GEN1;
4912 
4913 		if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
4914 		    (ci_get_current_pcie_speed(rdev) > 0))
4915 			return;
4916 
4917 #ifdef CONFIG_ACPI
4918 		radeon_acpi_pcie_performance_request(rdev, request, false);
4919 #endif
4920 	}
4921 }
4922 
ci_set_private_data_variables_based_on_pptable(struct radeon_device * rdev)4923 static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *rdev)
4924 {
4925 	struct ci_power_info *pi = ci_get_pi(rdev);
4926 	struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
4927 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
4928 	struct radeon_clock_voltage_dependency_table *allowed_mclk_vddc_table =
4929 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
4930 	struct radeon_clock_voltage_dependency_table *allowed_mclk_vddci_table =
4931 		&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
4932 
4933 	if (allowed_sclk_vddc_table == NULL)
4934 		return -EINVAL;
4935 	if (allowed_sclk_vddc_table->count < 1)
4936 		return -EINVAL;
4937 	if (allowed_mclk_vddc_table == NULL)
4938 		return -EINVAL;
4939 	if (allowed_mclk_vddc_table->count < 1)
4940 		return -EINVAL;
4941 	if (allowed_mclk_vddci_table == NULL)
4942 		return -EINVAL;
4943 	if (allowed_mclk_vddci_table->count < 1)
4944 		return -EINVAL;
4945 
4946 	pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
4947 	pi->max_vddc_in_pp_table =
4948 		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4949 
4950 	pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
4951 	pi->max_vddci_in_pp_table =
4952 		allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4953 
4954 	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
4955 		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4956 	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
4957 		allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4958 	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
4959 		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4960 	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
4961 		allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4962 
4963 	return 0;
4964 }
4965 
ci_patch_with_vddc_leakage(struct radeon_device * rdev,u16 * vddc)4966 static void ci_patch_with_vddc_leakage(struct radeon_device *rdev, u16 *vddc)
4967 {
4968 	struct ci_power_info *pi = ci_get_pi(rdev);
4969 	struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
4970 	u32 leakage_index;
4971 
4972 	for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4973 		if (leakage_table->leakage_id[leakage_index] == *vddc) {
4974 			*vddc = leakage_table->actual_voltage[leakage_index];
4975 			break;
4976 		}
4977 	}
4978 }
4979 
ci_patch_with_vddci_leakage(struct radeon_device * rdev,u16 * vddci)4980 static void ci_patch_with_vddci_leakage(struct radeon_device *rdev, u16 *vddci)
4981 {
4982 	struct ci_power_info *pi = ci_get_pi(rdev);
4983 	struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
4984 	u32 leakage_index;
4985 
4986 	for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4987 		if (leakage_table->leakage_id[leakage_index] == *vddci) {
4988 			*vddci = leakage_table->actual_voltage[leakage_index];
4989 			break;
4990 		}
4991 	}
4992 }
4993 
ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device * rdev,struct radeon_clock_voltage_dependency_table * table)4994 static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4995 								      struct radeon_clock_voltage_dependency_table *table)
4996 {
4997 	u32 i;
4998 
4999 	if (table) {
5000 		for (i = 0; i < table->count; i++)
5001 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
5002 	}
5003 }
5004 
ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct radeon_device * rdev,struct radeon_clock_voltage_dependency_table * table)5005 static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct radeon_device *rdev,
5006 								       struct radeon_clock_voltage_dependency_table *table)
5007 {
5008 	u32 i;
5009 
5010 	if (table) {
5011 		for (i = 0; i < table->count; i++)
5012 			ci_patch_with_vddci_leakage(rdev, &table->entries[i].v);
5013 	}
5014 }
5015 
ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device * rdev,struct radeon_vce_clock_voltage_dependency_table * table)5016 static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
5017 									  struct radeon_vce_clock_voltage_dependency_table *table)
5018 {
5019 	u32 i;
5020 
5021 	if (table) {
5022 		for (i = 0; i < table->count; i++)
5023 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
5024 	}
5025 }
5026 
ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device * rdev,struct radeon_uvd_clock_voltage_dependency_table * table)5027 static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
5028 									  struct radeon_uvd_clock_voltage_dependency_table *table)
5029 {
5030 	u32 i;
5031 
5032 	if (table) {
5033 		for (i = 0; i < table->count; i++)
5034 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
5035 	}
5036 }
5037 
ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct radeon_device * rdev,struct radeon_phase_shedding_limits_table * table)5038 static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct radeon_device *rdev,
5039 								   struct radeon_phase_shedding_limits_table *table)
5040 {
5041 	u32 i;
5042 
5043 	if (table) {
5044 		for (i = 0; i < table->count; i++)
5045 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].voltage);
5046 	}
5047 }
5048 
ci_patch_clock_voltage_limits_with_vddc_leakage(struct radeon_device * rdev,struct radeon_clock_and_voltage_limits * table)5049 static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct radeon_device *rdev,
5050 							    struct radeon_clock_and_voltage_limits *table)
5051 {
5052 	if (table) {
5053 		ci_patch_with_vddc_leakage(rdev, (u16 *)&table->vddc);
5054 		ci_patch_with_vddci_leakage(rdev, (u16 *)&table->vddci);
5055 	}
5056 }
5057 
ci_patch_cac_leakage_table_with_vddc_leakage(struct radeon_device * rdev,struct radeon_cac_leakage_table * table)5058 static void ci_patch_cac_leakage_table_with_vddc_leakage(struct radeon_device *rdev,
5059 							 struct radeon_cac_leakage_table *table)
5060 {
5061 	u32 i;
5062 
5063 	if (table) {
5064 		for (i = 0; i < table->count; i++)
5065 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].vddc);
5066 	}
5067 }
5068 
ci_patch_dependency_tables_with_leakage(struct radeon_device * rdev)5069 static void ci_patch_dependency_tables_with_leakage(struct radeon_device *rdev)
5070 {
5071 
5072 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5073 								  &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
5074 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5075 								  &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
5076 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5077 								  &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
5078 	ci_patch_clock_voltage_dependency_table_with_vddci_leakage(rdev,
5079 								   &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
5080 	ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5081 								      &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
5082 	ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5083 								      &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
5084 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5085 								  &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
5086 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5087 								  &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
5088 	ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(rdev,
5089 							       &rdev->pm.dpm.dyn_state.phase_shedding_limits_table);
5090 	ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
5091 							&rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
5092 	ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
5093 							&rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
5094 	ci_patch_cac_leakage_table_with_vddc_leakage(rdev,
5095 						     &rdev->pm.dpm.dyn_state.cac_leakage_table);
5096 
5097 }
5098 
ci_get_memory_type(struct radeon_device * rdev)5099 static void ci_get_memory_type(struct radeon_device *rdev)
5100 {
5101 	struct ci_power_info *pi = ci_get_pi(rdev);
5102 	u32 tmp;
5103 
5104 	tmp = RREG32(MC_SEQ_MISC0);
5105 
5106 	if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) ==
5107 	    MC_SEQ_MISC0_GDDR5_VALUE)
5108 		pi->mem_gddr5 = true;
5109 	else
5110 		pi->mem_gddr5 = false;
5111 
5112 }
5113 
ci_update_current_ps(struct radeon_device * rdev,struct radeon_ps * rps)5114 static void ci_update_current_ps(struct radeon_device *rdev,
5115 				 struct radeon_ps *rps)
5116 {
5117 	struct ci_ps *new_ps = ci_get_ps(rps);
5118 	struct ci_power_info *pi = ci_get_pi(rdev);
5119 
5120 	pi->current_rps = *rps;
5121 	pi->current_ps = *new_ps;
5122 	pi->current_rps.ps_priv = &pi->current_ps;
5123 }
5124 
ci_update_requested_ps(struct radeon_device * rdev,struct radeon_ps * rps)5125 static void ci_update_requested_ps(struct radeon_device *rdev,
5126 				   struct radeon_ps *rps)
5127 {
5128 	struct ci_ps *new_ps = ci_get_ps(rps);
5129 	struct ci_power_info *pi = ci_get_pi(rdev);
5130 
5131 	pi->requested_rps = *rps;
5132 	pi->requested_ps = *new_ps;
5133 	pi->requested_rps.ps_priv = &pi->requested_ps;
5134 }
5135 
ci_dpm_pre_set_power_state(struct radeon_device * rdev)5136 int ci_dpm_pre_set_power_state(struct radeon_device *rdev)
5137 {
5138 	struct ci_power_info *pi = ci_get_pi(rdev);
5139 	struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
5140 	struct radeon_ps *new_ps = &requested_ps;
5141 
5142 	ci_update_requested_ps(rdev, new_ps);
5143 
5144 	ci_apply_state_adjust_rules(rdev, &pi->requested_rps);
5145 
5146 	return 0;
5147 }
5148 
ci_dpm_post_set_power_state(struct radeon_device * rdev)5149 void ci_dpm_post_set_power_state(struct radeon_device *rdev)
5150 {
5151 	struct ci_power_info *pi = ci_get_pi(rdev);
5152 	struct radeon_ps *new_ps = &pi->requested_rps;
5153 
5154 	ci_update_current_ps(rdev, new_ps);
5155 }
5156 
5157 
ci_dpm_setup_asic(struct radeon_device * rdev)5158 void ci_dpm_setup_asic(struct radeon_device *rdev)
5159 {
5160 	int r;
5161 
5162 	r = ci_mc_load_microcode(rdev);
5163 	if (r)
5164 		DRM_ERROR("Failed to load MC firmware!\n");
5165 	ci_read_clock_registers(rdev);
5166 	ci_get_memory_type(rdev);
5167 	ci_enable_acpi_power_management(rdev);
5168 	ci_init_sclk_t(rdev);
5169 }
5170 
ci_dpm_enable(struct radeon_device * rdev)5171 int ci_dpm_enable(struct radeon_device *rdev)
5172 {
5173 	struct ci_power_info *pi = ci_get_pi(rdev);
5174 	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
5175 	int ret;
5176 
5177 	if (ci_is_smc_running(rdev))
5178 		return -EINVAL;
5179 	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
5180 		ci_enable_voltage_control(rdev);
5181 		ret = ci_construct_voltage_tables(rdev);
5182 		if (ret) {
5183 			DRM_ERROR("ci_construct_voltage_tables failed\n");
5184 			return ret;
5185 		}
5186 	}
5187 	if (pi->caps_dynamic_ac_timing) {
5188 		ret = ci_initialize_mc_reg_table(rdev);
5189 		if (ret)
5190 			pi->caps_dynamic_ac_timing = false;
5191 	}
5192 	if (pi->dynamic_ss)
5193 		ci_enable_spread_spectrum(rdev, true);
5194 	if (pi->thermal_protection)
5195 		ci_enable_thermal_protection(rdev, true);
5196 	ci_program_sstp(rdev);
5197 	ci_enable_display_gap(rdev);
5198 	ci_program_vc(rdev);
5199 	ret = ci_upload_firmware(rdev);
5200 	if (ret) {
5201 		DRM_ERROR("ci_upload_firmware failed\n");
5202 		return ret;
5203 	}
5204 	ret = ci_process_firmware_header(rdev);
5205 	if (ret) {
5206 		DRM_ERROR("ci_process_firmware_header failed\n");
5207 		return ret;
5208 	}
5209 	ret = ci_initial_switch_from_arb_f0_to_f1(rdev);
5210 	if (ret) {
5211 		DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
5212 		return ret;
5213 	}
5214 	ret = ci_init_smc_table(rdev);
5215 	if (ret) {
5216 		DRM_ERROR("ci_init_smc_table failed\n");
5217 		return ret;
5218 	}
5219 	ret = ci_init_arb_table_index(rdev);
5220 	if (ret) {
5221 		DRM_ERROR("ci_init_arb_table_index failed\n");
5222 		return ret;
5223 	}
5224 	if (pi->caps_dynamic_ac_timing) {
5225 		ret = ci_populate_initial_mc_reg_table(rdev);
5226 		if (ret) {
5227 			DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
5228 			return ret;
5229 		}
5230 	}
5231 	ret = ci_populate_pm_base(rdev);
5232 	if (ret) {
5233 		DRM_ERROR("ci_populate_pm_base failed\n");
5234 		return ret;
5235 	}
5236 	ci_dpm_start_smc(rdev);
5237 	ci_enable_vr_hot_gpio_interrupt(rdev);
5238 	ret = ci_notify_smc_display_change(rdev, false);
5239 	if (ret) {
5240 		DRM_ERROR("ci_notify_smc_display_change failed\n");
5241 		return ret;
5242 	}
5243 	ci_enable_sclk_control(rdev, true);
5244 	ret = ci_enable_ulv(rdev, true);
5245 	if (ret) {
5246 		DRM_ERROR("ci_enable_ulv failed\n");
5247 		return ret;
5248 	}
5249 	ret = ci_enable_ds_master_switch(rdev, true);
5250 	if (ret) {
5251 		DRM_ERROR("ci_enable_ds_master_switch failed\n");
5252 		return ret;
5253 	}
5254 	ret = ci_start_dpm(rdev);
5255 	if (ret) {
5256 		DRM_ERROR("ci_start_dpm failed\n");
5257 		return ret;
5258 	}
5259 	ret = ci_enable_didt(rdev, true);
5260 	if (ret) {
5261 		DRM_ERROR("ci_enable_didt failed\n");
5262 		return ret;
5263 	}
5264 	ret = ci_enable_smc_cac(rdev, true);
5265 	if (ret) {
5266 		DRM_ERROR("ci_enable_smc_cac failed\n");
5267 		return ret;
5268 	}
5269 	ret = ci_enable_power_containment(rdev, true);
5270 	if (ret) {
5271 		DRM_ERROR("ci_enable_power_containment failed\n");
5272 		return ret;
5273 	}
5274 
5275 	ret = ci_power_control_set_level(rdev);
5276 	if (ret) {
5277 		DRM_ERROR("ci_power_control_set_level failed\n");
5278 		return ret;
5279 	}
5280 
5281 	ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
5282 
5283 	ret = ci_enable_thermal_based_sclk_dpm(rdev, true);
5284 	if (ret) {
5285 		DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
5286 		return ret;
5287 	}
5288 
5289 	ci_thermal_start_thermal_controller(rdev);
5290 
5291 	ci_update_current_ps(rdev, boot_ps);
5292 
5293 	return 0;
5294 }
5295 
ci_set_temperature_range(struct radeon_device * rdev)5296 static int ci_set_temperature_range(struct radeon_device *rdev)
5297 {
5298 	int ret;
5299 
5300 	ret = ci_thermal_enable_alert(rdev, false);
5301 	if (ret)
5302 		return ret;
5303 	ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
5304 	if (ret)
5305 		return ret;
5306 	ret = ci_thermal_enable_alert(rdev, true);
5307 	if (ret)
5308 		return ret;
5309 
5310 	return ret;
5311 }
5312 
ci_dpm_late_enable(struct radeon_device * rdev)5313 int ci_dpm_late_enable(struct radeon_device *rdev)
5314 {
5315 	int ret;
5316 
5317 	ret = ci_set_temperature_range(rdev);
5318 	if (ret)
5319 		return ret;
5320 
5321 	ci_dpm_powergate_uvd(rdev, true);
5322 
5323 	return 0;
5324 }
5325 
ci_dpm_disable(struct radeon_device * rdev)5326 void ci_dpm_disable(struct radeon_device *rdev)
5327 {
5328 	struct ci_power_info *pi = ci_get_pi(rdev);
5329 	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
5330 
5331 	ci_dpm_powergate_uvd(rdev, false);
5332 
5333 	if (!ci_is_smc_running(rdev))
5334 		return;
5335 
5336 	ci_thermal_stop_thermal_controller(rdev);
5337 
5338 	if (pi->thermal_protection)
5339 		ci_enable_thermal_protection(rdev, false);
5340 	ci_enable_power_containment(rdev, false);
5341 	ci_enable_smc_cac(rdev, false);
5342 	ci_enable_didt(rdev, false);
5343 	ci_enable_spread_spectrum(rdev, false);
5344 	ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
5345 	ci_stop_dpm(rdev);
5346 	ci_enable_ds_master_switch(rdev, false);
5347 	ci_enable_ulv(rdev, false);
5348 	ci_clear_vc(rdev);
5349 	ci_reset_to_default(rdev);
5350 	ci_dpm_stop_smc(rdev);
5351 	ci_force_switch_to_arb_f0(rdev);
5352 	ci_enable_thermal_based_sclk_dpm(rdev, false);
5353 
5354 	ci_update_current_ps(rdev, boot_ps);
5355 }
5356 
ci_dpm_set_power_state(struct radeon_device * rdev)5357 int ci_dpm_set_power_state(struct radeon_device *rdev)
5358 {
5359 	struct ci_power_info *pi = ci_get_pi(rdev);
5360 	struct radeon_ps *new_ps = &pi->requested_rps;
5361 	struct radeon_ps *old_ps = &pi->current_rps;
5362 	int ret;
5363 
5364 	ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps);
5365 	if (pi->pcie_performance_request)
5366 		ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps);
5367 	ret = ci_freeze_sclk_mclk_dpm(rdev);
5368 	if (ret) {
5369 		DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
5370 		return ret;
5371 	}
5372 	ret = ci_populate_and_upload_sclk_mclk_dpm_levels(rdev, new_ps);
5373 	if (ret) {
5374 		DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
5375 		return ret;
5376 	}
5377 	ret = ci_generate_dpm_level_enable_mask(rdev, new_ps);
5378 	if (ret) {
5379 		DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
5380 		return ret;
5381 	}
5382 
5383 	ret = ci_update_vce_dpm(rdev, new_ps, old_ps);
5384 	if (ret) {
5385 		DRM_ERROR("ci_update_vce_dpm failed\n");
5386 		return ret;
5387 	}
5388 
5389 	ret = ci_update_sclk_t(rdev);
5390 	if (ret) {
5391 		DRM_ERROR("ci_update_sclk_t failed\n");
5392 		return ret;
5393 	}
5394 	if (pi->caps_dynamic_ac_timing) {
5395 		ret = ci_update_and_upload_mc_reg_table(rdev);
5396 		if (ret) {
5397 			DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
5398 			return ret;
5399 		}
5400 	}
5401 	ret = ci_program_memory_timing_parameters(rdev);
5402 	if (ret) {
5403 		DRM_ERROR("ci_program_memory_timing_parameters failed\n");
5404 		return ret;
5405 	}
5406 	ret = ci_unfreeze_sclk_mclk_dpm(rdev);
5407 	if (ret) {
5408 		DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
5409 		return ret;
5410 	}
5411 	ret = ci_upload_dpm_level_enable_mask(rdev);
5412 	if (ret) {
5413 		DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
5414 		return ret;
5415 	}
5416 	if (pi->pcie_performance_request)
5417 		ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
5418 
5419 	return 0;
5420 }
5421 
5422 #if 0
5423 void ci_dpm_reset_asic(struct radeon_device *rdev)
5424 {
5425 	ci_set_boot_state(rdev);
5426 }
5427 #endif
5428 
ci_dpm_display_configuration_changed(struct radeon_device * rdev)5429 void ci_dpm_display_configuration_changed(struct radeon_device *rdev)
5430 {
5431 	ci_program_display_gap(rdev);
5432 }
5433 
5434 union power_info {
5435 	struct _ATOM_POWERPLAY_INFO info;
5436 	struct _ATOM_POWERPLAY_INFO_V2 info_2;
5437 	struct _ATOM_POWERPLAY_INFO_V3 info_3;
5438 	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
5439 	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
5440 	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
5441 };
5442 
5443 union pplib_clock_info {
5444 	struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
5445 	struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
5446 	struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
5447 	struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
5448 	struct _ATOM_PPLIB_SI_CLOCK_INFO si;
5449 	struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
5450 };
5451 
5452 union pplib_power_state {
5453 	struct _ATOM_PPLIB_STATE v1;
5454 	struct _ATOM_PPLIB_STATE_V2 v2;
5455 };
5456 
ci_parse_pplib_non_clock_info(struct radeon_device * rdev,struct radeon_ps * rps,struct _ATOM_PPLIB_NONCLOCK_INFO * non_clock_info,u8 table_rev)5457 static void ci_parse_pplib_non_clock_info(struct radeon_device *rdev,
5458 					  struct radeon_ps *rps,
5459 					  struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
5460 					  u8 table_rev)
5461 {
5462 	rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
5463 	rps->class = le16_to_cpu(non_clock_info->usClassification);
5464 	rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
5465 
5466 	if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
5467 		rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
5468 		rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
5469 	} else {
5470 		rps->vclk = 0;
5471 		rps->dclk = 0;
5472 	}
5473 
5474 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
5475 		rdev->pm.dpm.boot_ps = rps;
5476 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
5477 		rdev->pm.dpm.uvd_ps = rps;
5478 }
5479 
ci_parse_pplib_clock_info(struct radeon_device * rdev,struct radeon_ps * rps,int index,union pplib_clock_info * clock_info)5480 static void ci_parse_pplib_clock_info(struct radeon_device *rdev,
5481 				      struct radeon_ps *rps, int index,
5482 				      union pplib_clock_info *clock_info)
5483 {
5484 	struct ci_power_info *pi = ci_get_pi(rdev);
5485 	struct ci_ps *ps = ci_get_ps(rps);
5486 	struct ci_pl *pl = &ps->performance_levels[index];
5487 
5488 	ps->performance_level_count = index + 1;
5489 
5490 	pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5491 	pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
5492 	pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5493 	pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5494 
5495 	pl->pcie_gen = r600_get_pcie_gen_support(rdev,
5496 						 pi->sys_pcie_mask,
5497 						 pi->vbios_boot_state.pcie_gen_bootup_value,
5498 						 clock_info->ci.ucPCIEGen);
5499 	pl->pcie_lane = r600_get_pcie_lane_support(rdev,
5500 						   pi->vbios_boot_state.pcie_lane_bootup_value,
5501 						   le16_to_cpu(clock_info->ci.usPCIELane));
5502 
5503 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
5504 		pi->acpi_pcie_gen = pl->pcie_gen;
5505 	}
5506 
5507 	if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
5508 		pi->ulv.supported = true;
5509 		pi->ulv.pl = *pl;
5510 		pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
5511 	}
5512 
5513 	/* patch up boot state */
5514 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
5515 		pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
5516 		pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
5517 		pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
5518 		pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
5519 	}
5520 
5521 	switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
5522 	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
5523 		pi->use_pcie_powersaving_levels = true;
5524 		if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
5525 			pi->pcie_gen_powersaving.max = pl->pcie_gen;
5526 		if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
5527 			pi->pcie_gen_powersaving.min = pl->pcie_gen;
5528 		if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
5529 			pi->pcie_lane_powersaving.max = pl->pcie_lane;
5530 		if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
5531 			pi->pcie_lane_powersaving.min = pl->pcie_lane;
5532 		break;
5533 	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
5534 		pi->use_pcie_performance_levels = true;
5535 		if (pi->pcie_gen_performance.max < pl->pcie_gen)
5536 			pi->pcie_gen_performance.max = pl->pcie_gen;
5537 		if (pi->pcie_gen_performance.min > pl->pcie_gen)
5538 			pi->pcie_gen_performance.min = pl->pcie_gen;
5539 		if (pi->pcie_lane_performance.max < pl->pcie_lane)
5540 			pi->pcie_lane_performance.max = pl->pcie_lane;
5541 		if (pi->pcie_lane_performance.min > pl->pcie_lane)
5542 			pi->pcie_lane_performance.min = pl->pcie_lane;
5543 		break;
5544 	default:
5545 		break;
5546 	}
5547 }
5548 
ci_parse_power_table(struct radeon_device * rdev)5549 static int ci_parse_power_table(struct radeon_device *rdev)
5550 {
5551 	struct radeon_mode_info *mode_info = &rdev->mode_info;
5552 	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
5553 	union pplib_power_state *power_state;
5554 	int i, j, k, non_clock_array_index, clock_array_index;
5555 	union pplib_clock_info *clock_info;
5556 	struct _StateArray *state_array;
5557 	struct _ClockInfoArray *clock_info_array;
5558 	struct _NonClockInfoArray *non_clock_info_array;
5559 	union power_info *power_info;
5560 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
5561 	u16 data_offset;
5562 	u8 frev, crev;
5563 	u8 *power_state_offset;
5564 	struct ci_ps *ps;
5565 
5566 	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
5567 				   &frev, &crev, &data_offset))
5568 		return -EINVAL;
5569 	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
5570 
5571 	state_array = (struct _StateArray *)
5572 		(mode_info->atom_context->bios + data_offset +
5573 		 le16_to_cpu(power_info->pplib.usStateArrayOffset));
5574 	clock_info_array = (struct _ClockInfoArray *)
5575 		(mode_info->atom_context->bios + data_offset +
5576 		 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
5577 	non_clock_info_array = (struct _NonClockInfoArray *)
5578 		(mode_info->atom_context->bios + data_offset +
5579 		 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
5580 
5581 	rdev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
5582 				  sizeof(struct radeon_ps),
5583 				  GFP_KERNEL);
5584 	if (!rdev->pm.dpm.ps)
5585 		return -ENOMEM;
5586 	power_state_offset = (u8 *)state_array->states;
5587 	for (i = 0; i < state_array->ucNumEntries; i++) {
5588 		u8 *idx;
5589 		power_state = (union pplib_power_state *)power_state_offset;
5590 		non_clock_array_index = power_state->v2.nonClockInfoIndex;
5591 		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
5592 			&non_clock_info_array->nonClockInfo[non_clock_array_index];
5593 		if (!rdev->pm.power_state[i].clock_info)
5594 			return -EINVAL;
5595 		ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
5596 		if (ps == NULL) {
5597 			kfree(rdev->pm.dpm.ps);
5598 			return -ENOMEM;
5599 		}
5600 		rdev->pm.dpm.ps[i].ps_priv = ps;
5601 		ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
5602 					      non_clock_info,
5603 					      non_clock_info_array->ucEntrySize);
5604 		k = 0;
5605 		idx = (u8 *)&power_state->v2.clockInfoIndex[0];
5606 		for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
5607 			clock_array_index = idx[j];
5608 			if (clock_array_index >= clock_info_array->ucNumEntries)
5609 				continue;
5610 			if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
5611 				break;
5612 			clock_info = (union pplib_clock_info *)
5613 				((u8 *)&clock_info_array->clockInfo[0] +
5614 				 (clock_array_index * clock_info_array->ucEntrySize));
5615 			ci_parse_pplib_clock_info(rdev,
5616 						  &rdev->pm.dpm.ps[i], k,
5617 						  clock_info);
5618 			k++;
5619 		}
5620 		power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
5621 	}
5622 	rdev->pm.dpm.num_ps = state_array->ucNumEntries;
5623 
5624 	/* fill in the vce power states */
5625 	for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
5626 		u32 sclk, mclk;
5627 		clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx;
5628 		clock_info = (union pplib_clock_info *)
5629 			&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
5630 		sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5631 		sclk |= clock_info->ci.ucEngineClockHigh << 16;
5632 		mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5633 		mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5634 		rdev->pm.dpm.vce_states[i].sclk = sclk;
5635 		rdev->pm.dpm.vce_states[i].mclk = mclk;
5636 	}
5637 
5638 	return 0;
5639 }
5640 
ci_get_vbios_boot_values(struct radeon_device * rdev,struct ci_vbios_boot_state * boot_state)5641 static int ci_get_vbios_boot_values(struct radeon_device *rdev,
5642 				    struct ci_vbios_boot_state *boot_state)
5643 {
5644 	struct radeon_mode_info *mode_info = &rdev->mode_info;
5645 	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5646 	ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
5647 	u8 frev, crev;
5648 	u16 data_offset;
5649 
5650 	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
5651 				   &frev, &crev, &data_offset)) {
5652 		firmware_info =
5653 			(ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
5654 						    data_offset);
5655 		boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
5656 		boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
5657 		boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
5658 		boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(rdev);
5659 		boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(rdev);
5660 		boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
5661 		boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
5662 
5663 		return 0;
5664 	}
5665 	return -EINVAL;
5666 }
5667 
ci_dpm_fini(struct radeon_device * rdev)5668 void ci_dpm_fini(struct radeon_device *rdev)
5669 {
5670 	int i;
5671 
5672 	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
5673 		kfree(rdev->pm.dpm.ps[i].ps_priv);
5674 	}
5675 	kfree(rdev->pm.dpm.ps);
5676 	kfree(rdev->pm.dpm.priv);
5677 	kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
5678 	r600_free_extended_power_table(rdev);
5679 }
5680 
ci_dpm_init(struct radeon_device * rdev)5681 int ci_dpm_init(struct radeon_device *rdev)
5682 {
5683 	int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
5684 	SMU7_Discrete_DpmTable  *dpm_table;
5685 	struct radeon_gpio_rec gpio;
5686 	u16 data_offset, size;
5687 	u8 frev, crev;
5688 	struct ci_power_info *pi;
5689 	enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
5690 #ifndef __NetBSD__		/* XXX radeon pcie */
5691 	struct pci_dev *root = rdev->pdev->bus->self;
5692 #endif
5693 	int ret;
5694 
5695 	pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5696 	if (pi == NULL)
5697 		return -ENOMEM;
5698 	rdev->pm.dpm.priv = pi;
5699 
5700 #ifndef __NetBSD__		/* XXX radeon pcie */
5701 	if (!pci_is_root_bus(rdev->pdev->bus))
5702 		speed_cap = pcie_get_speed_cap(root);
5703 #endif
5704 	if (speed_cap == PCI_SPEED_UNKNOWN) {
5705 		pi->sys_pcie_mask = 0;
5706 	} else {
5707 		if (speed_cap == PCIE_SPEED_8_0GT)
5708 			pi->sys_pcie_mask = RADEON_PCIE_SPEED_25 |
5709 				RADEON_PCIE_SPEED_50 |
5710 				RADEON_PCIE_SPEED_80;
5711 		else if (speed_cap == PCIE_SPEED_5_0GT)
5712 			pi->sys_pcie_mask = RADEON_PCIE_SPEED_25 |
5713 				RADEON_PCIE_SPEED_50;
5714 		else
5715 			pi->sys_pcie_mask = RADEON_PCIE_SPEED_25;
5716 	}
5717 	pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
5718 
5719 	pi->pcie_gen_performance.max = RADEON_PCIE_GEN1;
5720 	pi->pcie_gen_performance.min = RADEON_PCIE_GEN3;
5721 	pi->pcie_gen_powersaving.max = RADEON_PCIE_GEN1;
5722 	pi->pcie_gen_powersaving.min = RADEON_PCIE_GEN3;
5723 
5724 	pi->pcie_lane_performance.max = 0;
5725 	pi->pcie_lane_performance.min = 16;
5726 	pi->pcie_lane_powersaving.max = 0;
5727 	pi->pcie_lane_powersaving.min = 16;
5728 
5729 	ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state);
5730 	if (ret) {
5731 		ci_dpm_fini(rdev);
5732 		return ret;
5733 	}
5734 
5735 	ret = r600_get_platform_caps(rdev);
5736 	if (ret) {
5737 		ci_dpm_fini(rdev);
5738 		return ret;
5739 	}
5740 
5741 	ret = r600_parse_extended_power_table(rdev);
5742 	if (ret) {
5743 		ci_dpm_fini(rdev);
5744 		return ret;
5745 	}
5746 
5747 	ret = ci_parse_power_table(rdev);
5748 	if (ret) {
5749 		ci_dpm_fini(rdev);
5750 		return ret;
5751 	}
5752 
5753 	pi->dll_default_on = false;
5754 	pi->sram_end = SMC_RAM_END;
5755 
5756 	pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5757 	pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5758 	pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5759 	pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5760 	pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5761 	pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5762 	pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5763 	pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5764 
5765 	pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5766 
5767 	pi->sclk_dpm_key_disabled = 0;
5768 	pi->mclk_dpm_key_disabled = 0;
5769 	pi->pcie_dpm_key_disabled = 0;
5770 	pi->thermal_sclk_dpm_enabled = 0;
5771 
5772 	/* mclk dpm is unstable on some R7 260X cards with the old mc ucode */
5773 	if ((rdev->pdev->device == 0x6658) &&
5774 	    (rdev->mc_fw->size == (BONAIRE_MC_UCODE_SIZE * 4))) {
5775 		pi->mclk_dpm_key_disabled = 1;
5776 	}
5777 
5778 	pi->caps_sclk_ds = true;
5779 
5780 	pi->mclk_strobe_mode_threshold = 40000;
5781 	pi->mclk_stutter_mode_threshold = 40000;
5782 	pi->mclk_edc_enable_threshold = 40000;
5783 	pi->mclk_edc_wr_enable_threshold = 40000;
5784 
5785 	ci_initialize_powertune_defaults(rdev);
5786 
5787 	pi->caps_fps = false;
5788 
5789 	pi->caps_sclk_throttle_low_notification = false;
5790 
5791 	pi->caps_uvd_dpm = true;
5792 	pi->caps_vce_dpm = true;
5793 
5794 	ci_get_leakage_voltages(rdev);
5795 	ci_patch_dependency_tables_with_leakage(rdev);
5796 	ci_set_private_data_variables_based_on_pptable(rdev);
5797 
5798 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5799 		kcalloc(4,
5800 			sizeof(struct radeon_clock_voltage_dependency_entry),
5801 			GFP_KERNEL);
5802 	if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5803 		ci_dpm_fini(rdev);
5804 		return -ENOMEM;
5805 	}
5806 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5807 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5808 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5809 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5810 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5811 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5812 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5813 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5814 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5815 
5816 	rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5817 	rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5818 	rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5819 
5820 	rdev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5821 	rdev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5822 	rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5823 	rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5824 
5825 	if (rdev->family == CHIP_HAWAII) {
5826 		pi->thermal_temp_setting.temperature_low = 94500;
5827 		pi->thermal_temp_setting.temperature_high = 95000;
5828 		pi->thermal_temp_setting.temperature_shutdown = 104000;
5829 	} else {
5830 		pi->thermal_temp_setting.temperature_low = 99500;
5831 		pi->thermal_temp_setting.temperature_high = 100000;
5832 		pi->thermal_temp_setting.temperature_shutdown = 104000;
5833 	}
5834 
5835 	pi->uvd_enabled = false;
5836 
5837 	dpm_table = &pi->smc_state_table;
5838 
5839 	gpio = radeon_atombios_lookup_gpio(rdev, VDDC_VRHOT_GPIO_PINID);
5840 	if (gpio.valid) {
5841 		dpm_table->VRHotGpio = gpio.shift;
5842 		rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5843 	} else {
5844 		dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
5845 		rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5846 	}
5847 
5848 	gpio = radeon_atombios_lookup_gpio(rdev, PP_AC_DC_SWITCH_GPIO_PINID);
5849 	if (gpio.valid) {
5850 		dpm_table->AcDcGpio = gpio.shift;
5851 		rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5852 	} else {
5853 		dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
5854 		rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5855 	}
5856 
5857 	gpio = radeon_atombios_lookup_gpio(rdev, VDDC_PCC_GPIO_PINID);
5858 	if (gpio.valid) {
5859 		u32 tmp = RREG32_SMC(CNB_PWRMGT_CNTL);
5860 
5861 		switch (gpio.shift) {
5862 		case 0:
5863 			tmp &= ~GNB_SLOW_MODE_MASK;
5864 			tmp |= GNB_SLOW_MODE(1);
5865 			break;
5866 		case 1:
5867 			tmp &= ~GNB_SLOW_MODE_MASK;
5868 			tmp |= GNB_SLOW_MODE(2);
5869 			break;
5870 		case 2:
5871 			tmp |= GNB_SLOW;
5872 			break;
5873 		case 3:
5874 			tmp |= FORCE_NB_PS1;
5875 			break;
5876 		case 4:
5877 			tmp |= DPM_ENABLED;
5878 			break;
5879 		default:
5880 			DRM_DEBUG("Invalid PCC GPIO: %u!\n", gpio.shift);
5881 			break;
5882 		}
5883 		WREG32_SMC(CNB_PWRMGT_CNTL, tmp);
5884 	}
5885 
5886 	pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5887 	pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5888 	pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5889 	if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
5890 		pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5891 	else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
5892 		pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5893 
5894 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
5895 		if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
5896 			pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5897 		else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
5898 			pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5899 		else
5900 			rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
5901 	}
5902 
5903 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
5904 		if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
5905 			pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5906 		else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
5907 			pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5908 		else
5909 			rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
5910 	}
5911 
5912 	pi->vddc_phase_shed_control = true;
5913 
5914 #if defined(CONFIG_ACPI)
5915 	pi->pcie_performance_request =
5916 		radeon_acpi_is_pcie_performance_request_supported(rdev);
5917 #else
5918 	pi->pcie_performance_request = false;
5919 #endif
5920 
5921 	if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
5922 				   &frev, &crev, &data_offset)) {
5923 		pi->caps_sclk_ss_support = true;
5924 		pi->caps_mclk_ss_support = true;
5925 		pi->dynamic_ss = true;
5926 	} else {
5927 		pi->caps_sclk_ss_support = false;
5928 		pi->caps_mclk_ss_support = false;
5929 		pi->dynamic_ss = true;
5930 	}
5931 
5932 	if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
5933 		pi->thermal_protection = true;
5934 	else
5935 		pi->thermal_protection = false;
5936 
5937 	pi->caps_dynamic_ac_timing = true;
5938 
5939 	pi->uvd_power_gated = false;
5940 
5941 	/* make sure dc limits are valid */
5942 	if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
5943 	    (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
5944 		rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
5945 			rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
5946 
5947 	pi->fan_ctrl_is_in_default_mode = true;
5948 
5949 	return 0;
5950 }
5951 
5952 #ifdef CONFIG_DEBUG_FS
ci_dpm_debugfs_print_current_performance_level(struct radeon_device * rdev,struct seq_file * m)5953 void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
5954 						    struct seq_file *m)
5955 {
5956 	struct ci_power_info *pi = ci_get_pi(rdev);
5957 	struct radeon_ps *rps = &pi->current_rps;
5958 	u32 sclk = ci_get_average_sclk_freq(rdev);
5959 	u32 mclk = ci_get_average_mclk_freq(rdev);
5960 
5961 	seq_printf(m, "uvd    %sabled\n", pi->uvd_enabled ? "en" : "dis");
5962 	seq_printf(m, "vce    %sabled\n", rps->vce_active ? "en" : "dis");
5963 	seq_printf(m, "power level avg    sclk: %u mclk: %u\n",
5964 		   sclk, mclk);
5965 }
5966 #endif
5967 
ci_dpm_print_power_state(struct radeon_device * rdev,struct radeon_ps * rps)5968 void ci_dpm_print_power_state(struct radeon_device *rdev,
5969 			      struct radeon_ps *rps)
5970 {
5971 	struct ci_ps *ps = ci_get_ps(rps);
5972 	struct ci_pl *pl;
5973 	int i;
5974 
5975 	r600_dpm_print_class_info(rps->class, rps->class2);
5976 	r600_dpm_print_cap_info(rps->caps);
5977 	printk("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
5978 	for (i = 0; i < ps->performance_level_count; i++) {
5979 		pl = &ps->performance_levels[i];
5980 		printk("\t\tpower level %d    sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
5981 		       i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
5982 	}
5983 	r600_dpm_print_ps_status(rdev, rps);
5984 }
5985 
ci_dpm_get_current_sclk(struct radeon_device * rdev)5986 u32 ci_dpm_get_current_sclk(struct radeon_device *rdev)
5987 {
5988 	u32 sclk = ci_get_average_sclk_freq(rdev);
5989 
5990 	return sclk;
5991 }
5992 
ci_dpm_get_current_mclk(struct radeon_device * rdev)5993 u32 ci_dpm_get_current_mclk(struct radeon_device *rdev)
5994 {
5995 	u32 mclk = ci_get_average_mclk_freq(rdev);
5996 
5997 	return mclk;
5998 }
5999 
ci_dpm_get_sclk(struct radeon_device * rdev,bool low)6000 u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low)
6001 {
6002 	struct ci_power_info *pi = ci_get_pi(rdev);
6003 	struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
6004 
6005 	if (low)
6006 		return requested_state->performance_levels[0].sclk;
6007 	else
6008 		return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
6009 }
6010 
ci_dpm_get_mclk(struct radeon_device * rdev,bool low)6011 u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low)
6012 {
6013 	struct ci_power_info *pi = ci_get_pi(rdev);
6014 	struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
6015 
6016 	if (low)
6017 		return requested_state->performance_levels[0].mclk;
6018 	else
6019 		return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
6020 }
6021