1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/delay.h>
25 #include <linux/fb.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 
29 #include "hwmgr.h"
30 #include "amd_powerplay.h"
31 #include "hardwaremanager.h"
32 #include "ppatomfwctrl.h"
33 #include "atomfirmware.h"
34 #include "cgs_common.h"
35 #include "vega10_powertune.h"
36 #include "smu9.h"
37 #include "smu9_driver_if.h"
38 #include "vega10_inc.h"
39 #include "soc15_common.h"
40 #include "pppcielanes.h"
41 #include "vega10_hwmgr.h"
42 #include "vega10_processpptables.h"
43 #include "vega10_pptable.h"
44 #include "vega10_thermal.h"
45 #include "pp_debug.h"
46 #include "amd_pcie_helpers.h"
47 #include "ppinterrupt.h"
48 #include "pp_overdriver.h"
49 #include "pp_thermal.h"
50 
51 #include "smuio/smuio_9_0_offset.h"
52 #include "smuio/smuio_9_0_sh_mask.h"
53 
54 #define HBM_MEMORY_CHANNEL_WIDTH    128
55 
56 static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
57 
58 #define mmDF_CS_AON0_DramBaseAddress0                                                                  0x0044
59 #define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX                                                         0
60 
61 //DF_CS_AON0_DramBaseAddress0
62 #define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT                                                        0x0
63 #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT                                                    0x1
64 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT                                                      0x4
65 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT                                                      0x8
66 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT                                                      0xc
67 #define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK                                                          0x00000001L
68 #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK                                                      0x00000002L
69 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK                                                        0x000000F0L
70 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK                                                        0x00000700L
71 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK                                                        0xFFFFF000L
72 
73 static const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic);
74 
75 struct vega10_power_state *cast_phw_vega10_power_state(
76 				  struct pp_hw_power_state *hw_ps);
77 struct vega10_power_state *cast_phw_vega10_power_state(
78 				  struct pp_hw_power_state *hw_ps)
79 {
80 	PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic),
81 				"Invalid Powerstate Type!",
82 				 return NULL;);
83 
84 	return (struct vega10_power_state *)hw_ps;
85 }
86 
87 const struct vega10_power_state *cast_const_phw_vega10_power_state(
88 				 const struct pp_hw_power_state *hw_ps);
89 const struct vega10_power_state *cast_const_phw_vega10_power_state(
90 				 const struct pp_hw_power_state *hw_ps)
91 {
92 	PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic),
93 				"Invalid Powerstate Type!",
94 				 return NULL;);
95 
96 	return (const struct vega10_power_state *)hw_ps;
97 }
98 
99 static void vega10_set_default_registry_data(struct pp_hwmgr *hwmgr)
100 {
101 	struct vega10_hwmgr *data = hwmgr->backend;
102 
103 	data->registry_data.sclk_dpm_key_disabled =
104 			hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
105 	data->registry_data.socclk_dpm_key_disabled =
106 			hwmgr->feature_mask & PP_SOCCLK_DPM_MASK ? false : true;
107 	data->registry_data.mclk_dpm_key_disabled =
108 			hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
109 	data->registry_data.pcie_dpm_key_disabled =
110 			hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
111 
112 	data->registry_data.dcefclk_dpm_key_disabled =
113 			hwmgr->feature_mask & PP_DCEFCLK_DPM_MASK ? false : true;
114 
115 	if (hwmgr->feature_mask & PP_POWER_CONTAINMENT_MASK) {
116 		data->registry_data.power_containment_support = 1;
117 		data->registry_data.enable_pkg_pwr_tracking_feature = 1;
118 		data->registry_data.enable_tdc_limit_feature = 1;
119 	}
120 
121 	data->registry_data.clock_stretcher_support =
122 			hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK ? true : false;
123 
124 	data->registry_data.ulv_support =
125 			hwmgr->feature_mask & PP_ULV_MASK ? true : false;
126 
127 	data->registry_data.sclk_deep_sleep_support =
128 			hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK ? true : false;
129 
130 	data->registry_data.disable_water_mark = 0;
131 
132 	data->registry_data.fan_control_support = 1;
133 	data->registry_data.thermal_support = 1;
134 	data->registry_data.fw_ctf_enabled = 1;
135 
136 	data->registry_data.avfs_support = 1;
137 	data->registry_data.led_dpm_enabled = 1;
138 
139 	data->registry_data.vr0hot_enabled = 1;
140 	data->registry_data.vr1hot_enabled = 1;
141 	data->registry_data.regulator_hot_gpio_support = 1;
142 
143 	data->registry_data.didt_support = 1;
144 	if (data->registry_data.didt_support) {
145 		data->registry_data.didt_mode = 6;
146 		data->registry_data.sq_ramping_support = 1;
147 		data->registry_data.db_ramping_support = 0;
148 		data->registry_data.td_ramping_support = 0;
149 		data->registry_data.tcp_ramping_support = 0;
150 		data->registry_data.dbr_ramping_support = 0;
151 		data->registry_data.edc_didt_support = 1;
152 		data->registry_data.gc_didt_support = 0;
153 		data->registry_data.psm_didt_support = 0;
154 	}
155 
156 	data->display_voltage_mode = PPVEGA10_VEGA10DISPLAYVOLTAGEMODE_DFLT;
157 	data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
158 	data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
159 	data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
160 	data->disp_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
161 	data->disp_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
162 	data->disp_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
163 	data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
164 	data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
165 	data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
166 	data->phy_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
167 	data->phy_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
168 	data->phy_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
169 
170 	data->gfxclk_average_alpha = PPVEGA10_VEGA10GFXCLKAVERAGEALPHA_DFLT;
171 	data->socclk_average_alpha = PPVEGA10_VEGA10SOCCLKAVERAGEALPHA_DFLT;
172 	data->uclk_average_alpha = PPVEGA10_VEGA10UCLKCLKAVERAGEALPHA_DFLT;
173 	data->gfx_activity_average_alpha = PPVEGA10_VEGA10GFXACTIVITYAVERAGEALPHA_DFLT;
174 }
175 
176 static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
177 {
178 	struct vega10_hwmgr *data = hwmgr->backend;
179 	struct phm_ppt_v2_information *table_info =
180 			(struct phm_ppt_v2_information *)hwmgr->pptable;
181 	struct amdgpu_device *adev = hwmgr->adev;
182 
183 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
184 			PHM_PlatformCaps_SclkDeepSleep);
185 
186 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
187 			PHM_PlatformCaps_DynamicPatchPowerState);
188 
189 	if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE)
190 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
191 				PHM_PlatformCaps_ControlVDDCI);
192 
193 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
194 			PHM_PlatformCaps_EnableSMU7ThermalManagement);
195 
196 	if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
197 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
198 				PHM_PlatformCaps_UVDPowerGating);
199 
200 	if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
201 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
202 				PHM_PlatformCaps_VCEPowerGating);
203 
204 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
205 			PHM_PlatformCaps_UnTabledHardwareInterface);
206 
207 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
208 			PHM_PlatformCaps_FanSpeedInTableIsRPM);
209 
210 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
211 			PHM_PlatformCaps_ODFuzzyFanControlSupport);
212 
213 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
214 				PHM_PlatformCaps_DynamicPowerManagement);
215 
216 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
217 			PHM_PlatformCaps_SMC);
218 
219 	/* power tune caps */
220 	/* assume disabled */
221 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
222 			PHM_PlatformCaps_PowerContainment);
223 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
224 			PHM_PlatformCaps_DiDtSupport);
225 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
226 			PHM_PlatformCaps_SQRamping);
227 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
228 			PHM_PlatformCaps_DBRamping);
229 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
230 			PHM_PlatformCaps_TDRamping);
231 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
232 			PHM_PlatformCaps_TCPRamping);
233 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
234 			PHM_PlatformCaps_DBRRamping);
235 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
236 			PHM_PlatformCaps_DiDtEDCEnable);
237 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
238 			PHM_PlatformCaps_GCEDC);
239 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
240 			PHM_PlatformCaps_PSM);
241 
242 	if (data->registry_data.didt_support) {
243 		phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtSupport);
244 		if (data->registry_data.sq_ramping_support)
245 			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping);
246 		if (data->registry_data.db_ramping_support)
247 			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping);
248 		if (data->registry_data.td_ramping_support)
249 			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping);
250 		if (data->registry_data.tcp_ramping_support)
251 			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping);
252 		if (data->registry_data.dbr_ramping_support)
253 			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping);
254 		if (data->registry_data.edc_didt_support)
255 			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable);
256 		if (data->registry_data.gc_didt_support)
257 			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC);
258 		if (data->registry_data.psm_didt_support)
259 			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM);
260 	}
261 
262 	if (data->registry_data.power_containment_support)
263 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
264 				PHM_PlatformCaps_PowerContainment);
265 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
266 			PHM_PlatformCaps_CAC);
267 
268 	if (table_info->tdp_table->usClockStretchAmount &&
269 			data->registry_data.clock_stretcher_support)
270 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
271 				PHM_PlatformCaps_ClockStretcher);
272 
273 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
274 			PHM_PlatformCaps_RegulatorHot);
275 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
276 			PHM_PlatformCaps_AutomaticDCTransition);
277 
278 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
279 			PHM_PlatformCaps_UVDDPM);
280 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
281 			PHM_PlatformCaps_VCEDPM);
282 
283 	return 0;
284 }
285 
286 static int vega10_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
287 {
288 	struct vega10_hwmgr *data = hwmgr->backend;
289 	struct phm_ppt_v2_information *table_info =
290 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
291 	struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
292 	struct vega10_odn_vddc_lookup_table *od_lookup_table;
293 	struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table;
294 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_table[3];
295 	struct phm_ppt_v1_clock_voltage_dependency_table *od_table[3];
296 	struct pp_atomfwctrl_avfs_parameters avfs_params = {0};
297 	uint32_t i;
298 	int result;
299 
300 	result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
301 	if (!result) {
302 		data->odn_dpm_table.max_vddc = avfs_params.ulMaxVddc;
303 		data->odn_dpm_table.min_vddc = avfs_params.ulMinVddc;
304 	}
305 
306 	od_lookup_table = &odn_table->vddc_lookup_table;
307 	vddc_lookup_table = table_info->vddc_lookup_table;
308 
309 	for (i = 0; i < vddc_lookup_table->count; i++)
310 		od_lookup_table->entries[i].us_vdd = vddc_lookup_table->entries[i].us_vdd;
311 
312 	od_lookup_table->count = vddc_lookup_table->count;
313 
314 	dep_table[0] = table_info->vdd_dep_on_sclk;
315 	dep_table[1] = table_info->vdd_dep_on_mclk;
316 	dep_table[2] = table_info->vdd_dep_on_socclk;
317 	od_table[0] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_sclk;
318 	od_table[1] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_mclk;
319 	od_table[2] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_socclk;
320 
321 	for (i = 0; i < 3; i++)
322 		smu_get_voltage_dependency_table_ppt_v1(dep_table[i], od_table[i]);
323 
324 	if (odn_table->max_vddc == 0 || odn_table->max_vddc > 2000)
325 		odn_table->max_vddc = dep_table[0]->entries[dep_table[0]->count - 1].vddc;
326 	if (odn_table->min_vddc == 0 || odn_table->min_vddc > 2000)
327 		odn_table->min_vddc = dep_table[0]->entries[0].vddc;
328 
329 	i = od_table[2]->count - 1;
330 	od_table[2]->entries[i].clk = hwmgr->platform_descriptor.overdriveLimit.memoryClock > od_table[2]->entries[i].clk ?
331 					hwmgr->platform_descriptor.overdriveLimit.memoryClock :
332 					od_table[2]->entries[i].clk;
333 	od_table[2]->entries[i].vddc = odn_table->max_vddc > od_table[2]->entries[i].vddc ?
334 					odn_table->max_vddc :
335 					od_table[2]->entries[i].vddc;
336 
337 	return 0;
338 }
339 
340 static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
341 {
342 	struct vega10_hwmgr *data = hwmgr->backend;
343 	int i;
344 	uint32_t sub_vendor_id, hw_revision;
345 	struct amdgpu_device *adev = hwmgr->adev;
346 
347 	vega10_initialize_power_tune_defaults(hwmgr);
348 
349 	for (i = 0; i < GNLD_FEATURES_MAX; i++) {
350 		data->smu_features[i].smu_feature_id = 0xffff;
351 		data->smu_features[i].smu_feature_bitmap = 1 << i;
352 		data->smu_features[i].enabled = false;
353 		data->smu_features[i].supported = false;
354 	}
355 
356 	data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
357 			FEATURE_DPM_PREFETCHER_BIT;
358 	data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id =
359 			FEATURE_DPM_GFXCLK_BIT;
360 	data->smu_features[GNLD_DPM_UCLK].smu_feature_id =
361 			FEATURE_DPM_UCLK_BIT;
362 	data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id =
363 			FEATURE_DPM_SOCCLK_BIT;
364 	data->smu_features[GNLD_DPM_UVD].smu_feature_id =
365 			FEATURE_DPM_UVD_BIT;
366 	data->smu_features[GNLD_DPM_VCE].smu_feature_id =
367 			FEATURE_DPM_VCE_BIT;
368 	data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id =
369 			FEATURE_DPM_MP0CLK_BIT;
370 	data->smu_features[GNLD_DPM_LINK].smu_feature_id =
371 			FEATURE_DPM_LINK_BIT;
372 	data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id =
373 			FEATURE_DPM_DCEFCLK_BIT;
374 	data->smu_features[GNLD_ULV].smu_feature_id =
375 			FEATURE_ULV_BIT;
376 	data->smu_features[GNLD_AVFS].smu_feature_id =
377 			FEATURE_AVFS_BIT;
378 	data->smu_features[GNLD_DS_GFXCLK].smu_feature_id =
379 			FEATURE_DS_GFXCLK_BIT;
380 	data->smu_features[GNLD_DS_SOCCLK].smu_feature_id =
381 			FEATURE_DS_SOCCLK_BIT;
382 	data->smu_features[GNLD_DS_LCLK].smu_feature_id =
383 			FEATURE_DS_LCLK_BIT;
384 	data->smu_features[GNLD_PPT].smu_feature_id =
385 			FEATURE_PPT_BIT;
386 	data->smu_features[GNLD_TDC].smu_feature_id =
387 			FEATURE_TDC_BIT;
388 	data->smu_features[GNLD_THERMAL].smu_feature_id =
389 			FEATURE_THERMAL_BIT;
390 	data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id =
391 			FEATURE_GFX_PER_CU_CG_BIT;
392 	data->smu_features[GNLD_RM].smu_feature_id =
393 			FEATURE_RM_BIT;
394 	data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id =
395 			FEATURE_DS_DCEFCLK_BIT;
396 	data->smu_features[GNLD_ACDC].smu_feature_id =
397 			FEATURE_ACDC_BIT;
398 	data->smu_features[GNLD_VR0HOT].smu_feature_id =
399 			FEATURE_VR0HOT_BIT;
400 	data->smu_features[GNLD_VR1HOT].smu_feature_id =
401 			FEATURE_VR1HOT_BIT;
402 	data->smu_features[GNLD_FW_CTF].smu_feature_id =
403 			FEATURE_FW_CTF_BIT;
404 	data->smu_features[GNLD_LED_DISPLAY].smu_feature_id =
405 			FEATURE_LED_DISPLAY_BIT;
406 	data->smu_features[GNLD_FAN_CONTROL].smu_feature_id =
407 			FEATURE_FAN_CONTROL_BIT;
408 	data->smu_features[GNLD_ACG].smu_feature_id = FEATURE_ACG_BIT;
409 	data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT;
410 	data->smu_features[GNLD_PCC_LIMIT].smu_feature_id = FEATURE_PCC_LIMIT_CONTROL_BIT;
411 
412 	if (!data->registry_data.prefetcher_dpm_key_disabled)
413 		data->smu_features[GNLD_DPM_PREFETCHER].supported = true;
414 
415 	if (!data->registry_data.sclk_dpm_key_disabled)
416 		data->smu_features[GNLD_DPM_GFXCLK].supported = true;
417 
418 	if (!data->registry_data.mclk_dpm_key_disabled)
419 		data->smu_features[GNLD_DPM_UCLK].supported = true;
420 
421 	if (!data->registry_data.socclk_dpm_key_disabled)
422 		data->smu_features[GNLD_DPM_SOCCLK].supported = true;
423 
424 	if (PP_CAP(PHM_PlatformCaps_UVDDPM))
425 		data->smu_features[GNLD_DPM_UVD].supported = true;
426 
427 	if (PP_CAP(PHM_PlatformCaps_VCEDPM))
428 		data->smu_features[GNLD_DPM_VCE].supported = true;
429 
430 	if (!data->registry_data.pcie_dpm_key_disabled)
431 		data->smu_features[GNLD_DPM_LINK].supported = true;
432 
433 	if (!data->registry_data.dcefclk_dpm_key_disabled)
434 		data->smu_features[GNLD_DPM_DCEFCLK].supported = true;
435 
436 	if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep) &&
437 	    data->registry_data.sclk_deep_sleep_support) {
438 		data->smu_features[GNLD_DS_GFXCLK].supported = true;
439 		data->smu_features[GNLD_DS_SOCCLK].supported = true;
440 		data->smu_features[GNLD_DS_LCLK].supported = true;
441 		data->smu_features[GNLD_DS_DCEFCLK].supported = true;
442 	}
443 
444 	if (data->registry_data.enable_pkg_pwr_tracking_feature)
445 		data->smu_features[GNLD_PPT].supported = true;
446 
447 	if (data->registry_data.enable_tdc_limit_feature)
448 		data->smu_features[GNLD_TDC].supported = true;
449 
450 	if (data->registry_data.thermal_support)
451 		data->smu_features[GNLD_THERMAL].supported = true;
452 
453 	if (data->registry_data.fan_control_support)
454 		data->smu_features[GNLD_FAN_CONTROL].supported = true;
455 
456 	if (data->registry_data.fw_ctf_enabled)
457 		data->smu_features[GNLD_FW_CTF].supported = true;
458 
459 	if (data->registry_data.avfs_support)
460 		data->smu_features[GNLD_AVFS].supported = true;
461 
462 	if (data->registry_data.led_dpm_enabled)
463 		data->smu_features[GNLD_LED_DISPLAY].supported = true;
464 
465 	if (data->registry_data.vr1hot_enabled)
466 		data->smu_features[GNLD_VR1HOT].supported = true;
467 
468 	if (data->registry_data.vr0hot_enabled)
469 		data->smu_features[GNLD_VR0HOT].supported = true;
470 
471 	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
472 	hwmgr->smu_version = smum_get_argument(hwmgr);
473 		/* ACG firmware has major version 5 */
474 	if ((hwmgr->smu_version & 0xff000000) == 0x5000000)
475 		data->smu_features[GNLD_ACG].supported = true;
476 	if (data->registry_data.didt_support)
477 		data->smu_features[GNLD_DIDT].supported = true;
478 
479 	hw_revision = adev->pdev->revision;
480 	sub_vendor_id = adev->pdev->subsystem_vendor;
481 
482 	if ((hwmgr->chip_id == 0x6862 ||
483 		hwmgr->chip_id == 0x6861 ||
484 		hwmgr->chip_id == 0x6868) &&
485 		(hw_revision == 0) &&
486 		(sub_vendor_id != 0x1002))
487 		data->smu_features[GNLD_PCC_LIMIT].supported = true;
488 }
489 
490 #ifdef PPLIB_VEGA10_EVV_SUPPORT
491 static int vega10_get_socclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
492 	phm_ppt_v1_voltage_lookup_table *lookup_table,
493 	uint16_t virtual_voltage_id, int32_t *socclk)
494 {
495 	uint8_t entry_id;
496 	uint8_t voltage_id;
497 	struct phm_ppt_v2_information *table_info =
498 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
499 
500 	PP_ASSERT_WITH_CODE(lookup_table->count != 0,
501 			"Lookup table is empty",
502 			return -EINVAL);
503 
504 	/* search for leakage voltage ID 0xff01 ~ 0xff08 and sclk */
505 	for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
506 		voltage_id = table_info->vdd_dep_on_socclk->entries[entry_id].vddInd;
507 		if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
508 			break;
509 	}
510 
511 	PP_ASSERT_WITH_CODE(entry_id < table_info->vdd_dep_on_socclk->count,
512 			"Can't find requested voltage id in vdd_dep_on_socclk table!",
513 			return -EINVAL);
514 
515 	*socclk = table_info->vdd_dep_on_socclk->entries[entry_id].clk;
516 
517 	return 0;
518 }
519 
520 #define ATOM_VIRTUAL_VOLTAGE_ID0             0xff01
521 /**
522 * Get Leakage VDDC based on leakage ID.
523 *
524 * @param    hwmgr  the address of the powerplay hardware manager.
525 * @return   always 0.
526 */
527 static int vega10_get_evv_voltages(struct pp_hwmgr *hwmgr)
528 {
529 	struct vega10_hwmgr *data = hwmgr->backend;
530 	uint16_t vv_id;
531 	uint32_t vddc = 0;
532 	uint16_t i, j;
533 	uint32_t sclk = 0;
534 	struct phm_ppt_v2_information *table_info =
535 			(struct phm_ppt_v2_information *)hwmgr->pptable;
536 	struct phm_ppt_v1_clock_voltage_dependency_table *socclk_table =
537 			table_info->vdd_dep_on_socclk;
538 	int result;
539 
540 	for (i = 0; i < VEGA10_MAX_LEAKAGE_COUNT; i++) {
541 		vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
542 
543 		if (!vega10_get_socclk_for_voltage_evv(hwmgr,
544 				table_info->vddc_lookup_table, vv_id, &sclk)) {
545 			if (PP_CAP(PHM_PlatformCaps_ClockStretcher)) {
546 				for (j = 1; j < socclk_table->count; j++) {
547 					if (socclk_table->entries[j].clk == sclk &&
548 							socclk_table->entries[j].cks_enable == 0) {
549 						sclk += 5000;
550 						break;
551 					}
552 				}
553 			}
554 
555 			PP_ASSERT_WITH_CODE(!atomctrl_get_voltage_evv_on_sclk_ai(hwmgr,
556 					VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc),
557 					"Error retrieving EVV voltage value!",
558 					continue);
559 
560 
561 			/* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
562 			PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0),
563 					"Invalid VDDC value", result = -EINVAL;);
564 
565 			/* the voltage should not be zero nor equal to leakage ID */
566 			if (vddc != 0 && vddc != vv_id) {
567 				data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc/100);
568 				data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
569 				data->vddc_leakage.count++;
570 			}
571 		}
572 	}
573 
574 	return 0;
575 }
576 
577 /**
578  * Change virtual leakage voltage to actual value.
579  *
580  * @param     hwmgr  the address of the powerplay hardware manager.
581  * @param     pointer to changing voltage
582  * @param     pointer to leakage table
583  */
584 static void vega10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
585 		uint16_t *voltage, struct vega10_leakage_voltage *leakage_table)
586 {
587 	uint32_t index;
588 
589 	/* search for leakage voltage ID 0xff01 ~ 0xff08 */
590 	for (index = 0; index < leakage_table->count; index++) {
591 		/* if this voltage matches a leakage voltage ID */
592 		/* patch with actual leakage voltage */
593 		if (leakage_table->leakage_id[index] == *voltage) {
594 			*voltage = leakage_table->actual_voltage[index];
595 			break;
596 		}
597 	}
598 
599 	if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
600 		pr_info("Voltage value looks like a Leakage ID but it's not patched\n");
601 }
602 
603 /**
604 * Patch voltage lookup table by EVV leakages.
605 *
606 * @param     hwmgr  the address of the powerplay hardware manager.
607 * @param     pointer to voltage lookup table
608 * @param     pointer to leakage table
609 * @return     always 0
610 */
611 static int vega10_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
612 		phm_ppt_v1_voltage_lookup_table *lookup_table,
613 		struct vega10_leakage_voltage *leakage_table)
614 {
615 	uint32_t i;
616 
617 	for (i = 0; i < lookup_table->count; i++)
618 		vega10_patch_with_vdd_leakage(hwmgr,
619 				&lookup_table->entries[i].us_vdd, leakage_table);
620 
621 	return 0;
622 }
623 
624 static int vega10_patch_clock_voltage_limits_with_vddc_leakage(
625 		struct pp_hwmgr *hwmgr, struct vega10_leakage_voltage *leakage_table,
626 		uint16_t *vddc)
627 {
628 	vega10_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
629 
630 	return 0;
631 }
632 #endif
633 
634 static int vega10_patch_voltage_dependency_tables_with_lookup_table(
635 		struct pp_hwmgr *hwmgr)
636 {
637 	uint8_t entry_id, voltage_id;
638 	unsigned i;
639 	struct phm_ppt_v2_information *table_info =
640 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
641 	struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
642 			table_info->mm_dep_table;
643 	struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
644 			table_info->vdd_dep_on_mclk;
645 
646 	for (i = 0; i < 6; i++) {
647 		struct phm_ppt_v1_clock_voltage_dependency_table *vdt = NULL;
648 		switch (i) {
649 			case 0: vdt = table_info->vdd_dep_on_socclk; break;
650 			case 1: vdt = table_info->vdd_dep_on_sclk; break;
651 			case 2: vdt = table_info->vdd_dep_on_dcefclk; break;
652 			case 3: vdt = table_info->vdd_dep_on_pixclk; break;
653 			case 4: vdt = table_info->vdd_dep_on_dispclk; break;
654 			case 5: vdt = table_info->vdd_dep_on_phyclk; break;
655 		}
656 
657 		for (entry_id = 0; entry_id < vdt->count; entry_id++) {
658 			voltage_id = vdt->entries[entry_id].vddInd;
659 			vdt->entries[entry_id].vddc =
660 					table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
661 		}
662 	}
663 
664 	for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
665 		voltage_id = mm_table->entries[entry_id].vddcInd;
666 		mm_table->entries[entry_id].vddc =
667 			table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
668 	}
669 
670 	for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
671 		voltage_id = mclk_table->entries[entry_id].vddInd;
672 		mclk_table->entries[entry_id].vddc =
673 				table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
674 		voltage_id = mclk_table->entries[entry_id].vddciInd;
675 		mclk_table->entries[entry_id].vddci =
676 				table_info->vddci_lookup_table->entries[voltage_id].us_vdd;
677 		voltage_id = mclk_table->entries[entry_id].mvddInd;
678 		mclk_table->entries[entry_id].mvdd =
679 				table_info->vddmem_lookup_table->entries[voltage_id].us_vdd;
680 	}
681 
682 
683 	return 0;
684 
685 }
686 
687 static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr,
688 		struct phm_ppt_v1_voltage_lookup_table *lookup_table)
689 {
690 	uint32_t table_size, i, j;
691 	struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
692 
693 	PP_ASSERT_WITH_CODE(lookup_table && lookup_table->count,
694 		"Lookup table is empty", return -EINVAL);
695 
696 	table_size = lookup_table->count;
697 
698 	/* Sorting voltages */
699 	for (i = 0; i < table_size - 1; i++) {
700 		for (j = i + 1; j > 0; j--) {
701 			if (lookup_table->entries[j].us_vdd <
702 					lookup_table->entries[j - 1].us_vdd) {
703 				tmp_voltage_lookup_record = lookup_table->entries[j - 1];
704 				lookup_table->entries[j - 1] = lookup_table->entries[j];
705 				lookup_table->entries[j] = tmp_voltage_lookup_record;
706 			}
707 		}
708 	}
709 
710 	return 0;
711 }
712 
713 static int vega10_complete_dependency_tables(struct pp_hwmgr *hwmgr)
714 {
715 	int result = 0;
716 	int tmp_result;
717 	struct phm_ppt_v2_information *table_info =
718 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
719 #ifdef PPLIB_VEGA10_EVV_SUPPORT
720 	struct vega10_hwmgr *data = hwmgr->backend;
721 
722 	tmp_result = vega10_patch_lookup_table_with_leakage(hwmgr,
723 			table_info->vddc_lookup_table, &(data->vddc_leakage));
724 	if (tmp_result)
725 		result = tmp_result;
726 
727 	tmp_result = vega10_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
728 			&(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
729 	if (tmp_result)
730 		result = tmp_result;
731 #endif
732 
733 	tmp_result = vega10_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
734 	if (tmp_result)
735 		result = tmp_result;
736 
737 	tmp_result = vega10_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
738 	if (tmp_result)
739 		result = tmp_result;
740 
741 	return result;
742 }
743 
744 static int vega10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
745 {
746 	struct phm_ppt_v2_information *table_info =
747 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
748 	struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
749 			table_info->vdd_dep_on_socclk;
750 	struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
751 			table_info->vdd_dep_on_mclk;
752 
753 	PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table,
754 		"VDD dependency on SCLK table is missing. This table is mandatory", return -EINVAL);
755 	PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
756 		"VDD dependency on SCLK table is empty. This table is mandatory", return -EINVAL);
757 
758 	PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table,
759 		"VDD dependency on MCLK table is missing.  This table is mandatory", return -EINVAL);
760 	PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
761 		"VDD dependency on MCLK table is empty.  This table is mandatory", return -EINVAL);
762 
763 	table_info->max_clock_voltage_on_ac.sclk =
764 		allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
765 	table_info->max_clock_voltage_on_ac.mclk =
766 		allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
767 	table_info->max_clock_voltage_on_ac.vddc =
768 		allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
769 	table_info->max_clock_voltage_on_ac.vddci =
770 		allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
771 
772 	hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
773 		table_info->max_clock_voltage_on_ac.sclk;
774 	hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
775 		table_info->max_clock_voltage_on_ac.mclk;
776 	hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
777 		table_info->max_clock_voltage_on_ac.vddc;
778 	hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =
779 		table_info->max_clock_voltage_on_ac.vddci;
780 
781 	return 0;
782 }
783 
784 static int vega10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
785 {
786 	kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
787 	hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
788 
789 	kfree(hwmgr->backend);
790 	hwmgr->backend = NULL;
791 
792 	return 0;
793 }
794 
795 static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
796 {
797 	int result = 0;
798 	struct vega10_hwmgr *data;
799 	uint32_t config_telemetry = 0;
800 	struct pp_atomfwctrl_voltage_table vol_table;
801 	struct amdgpu_device *adev = hwmgr->adev;
802 
803 	data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL);
804 	if (data == NULL)
805 		return -ENOMEM;
806 
807 	hwmgr->backend = data;
808 
809 	hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO];
810 	hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO;
811 	hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO;
812 
813 	vega10_set_default_registry_data(hwmgr);
814 	data->disable_dpm_mask = 0xff;
815 
816 	/* need to set voltage control types before EVV patching */
817 	data->vddc_control = VEGA10_VOLTAGE_CONTROL_NONE;
818 	data->mvdd_control = VEGA10_VOLTAGE_CONTROL_NONE;
819 	data->vddci_control = VEGA10_VOLTAGE_CONTROL_NONE;
820 
821 	/* VDDCR_SOC */
822 	if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
823 			VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) {
824 		if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr,
825 				VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2,
826 				&vol_table)) {
827 			config_telemetry = ((vol_table.telemetry_slope << 8) & 0xff00) |
828 					(vol_table.telemetry_offset & 0xff);
829 			data->vddc_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2;
830 		}
831 	} else {
832 		kfree(hwmgr->backend);
833 		hwmgr->backend = NULL;
834 		PP_ASSERT_WITH_CODE(false,
835 				"VDDCR_SOC is not SVID2!",
836 				return -1);
837 	}
838 
839 	/* MVDDC */
840 	if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
841 			VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) {
842 		if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr,
843 				VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2,
844 				&vol_table)) {
845 			config_telemetry |=
846 					((vol_table.telemetry_slope << 24) & 0xff000000) |
847 					((vol_table.telemetry_offset << 16) & 0xff0000);
848 			data->mvdd_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2;
849 		}
850 	}
851 
852 	 /* VDDCI_MEM */
853 	if (PP_CAP(PHM_PlatformCaps_ControlVDDCI)) {
854 		if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
855 				VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
856 			data->vddci_control = VEGA10_VOLTAGE_CONTROL_BY_GPIO;
857 	}
858 
859 	data->config_telemetry = config_telemetry;
860 
861 	vega10_set_features_platform_caps(hwmgr);
862 
863 	vega10_init_dpm_defaults(hwmgr);
864 
865 #ifdef PPLIB_VEGA10_EVV_SUPPORT
866 	/* Get leakage voltage based on leakage ID. */
867 	PP_ASSERT_WITH_CODE(!vega10_get_evv_voltages(hwmgr),
868 			"Get EVV Voltage Failed.  Abort Driver loading!",
869 			return -1);
870 #endif
871 
872 	/* Patch our voltage dependency table with actual leakage voltage
873 	 * We need to perform leakage translation before it's used by other functions
874 	 */
875 	vega10_complete_dependency_tables(hwmgr);
876 
877 	/* Parse pptable data read from VBIOS */
878 	vega10_set_private_data_based_on_pptable(hwmgr);
879 
880 	data->is_tlu_enabled = false;
881 
882 	hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
883 			VEGA10_MAX_HARDWARE_POWERLEVELS;
884 	hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
885 	hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
886 
887 	hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
888 	/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
889 	hwmgr->platform_descriptor.clockStep.engineClock = 500;
890 	hwmgr->platform_descriptor.clockStep.memoryClock = 500;
891 
892 	data->total_active_cus = adev->gfx.cu_info.number;
893 	/* Setup default Overdrive Fan control settings */
894 	data->odn_fan_table.target_fan_speed =
895 			hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM;
896 	data->odn_fan_table.target_temperature =
897 			hwmgr->thermal_controller.
898 			advanceFanControlParameters.ucTargetTemperature;
899 	data->odn_fan_table.min_performance_clock =
900 			hwmgr->thermal_controller.advanceFanControlParameters.
901 			ulMinFanSCLKAcousticLimit;
902 	data->odn_fan_table.min_fan_limit =
903 			hwmgr->thermal_controller.
904 			advanceFanControlParameters.usFanPWMMinLimit *
905 			hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
906 
907 	data->mem_channels = (RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0) &
908 			DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >>
909 			DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
910 	PP_ASSERT_WITH_CODE(data->mem_channels < ARRAY_SIZE(channel_number),
911 			"Mem Channel Index Exceeded maximum!",
912 			return -EINVAL);
913 
914 	return result;
915 }
916 
917 static int vega10_init_sclk_threshold(struct pp_hwmgr *hwmgr)
918 {
919 	struct vega10_hwmgr *data = hwmgr->backend;
920 
921 	data->low_sclk_interrupt_threshold = 0;
922 
923 	return 0;
924 }
925 
926 static int vega10_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
927 {
928 	struct vega10_hwmgr *data = hwmgr->backend;
929 	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
930 
931 	struct pp_atomfwctrl_voltage_table table;
932 	uint8_t i, j;
933 	uint32_t mask = 0;
934 	uint32_t tmp;
935 	int32_t ret = 0;
936 
937 	ret = pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_LEDDPM,
938 						VOLTAGE_OBJ_GPIO_LUT, &table);
939 
940 	if (!ret) {
941 		tmp = table.mask_low;
942 		for (i = 0, j = 0; i < 32; i++) {
943 			if (tmp & 1) {
944 				mask |= (uint32_t)(i << (8 * j));
945 				if (++j >= 3)
946 					break;
947 			}
948 			tmp >>= 1;
949 		}
950 	}
951 
952 	pp_table->LedPin0 = (uint8_t)(mask & 0xff);
953 	pp_table->LedPin1 = (uint8_t)((mask >> 8) & 0xff);
954 	pp_table->LedPin2 = (uint8_t)((mask >> 16) & 0xff);
955 	return 0;
956 }
957 
958 static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr)
959 {
960 	PP_ASSERT_WITH_CODE(!vega10_init_sclk_threshold(hwmgr),
961 			"Failed to init sclk threshold!",
962 			return -EINVAL);
963 
964 	PP_ASSERT_WITH_CODE(!vega10_setup_dpm_led_config(hwmgr),
965 			"Failed to set up led dpm config!",
966 			return -EINVAL);
967 
968 	smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_NumOfDisplays, 0);
969 
970 	return 0;
971 }
972 
973 /**
974 * Remove repeated voltage values and create table with unique values.
975 *
976 * @param    hwmgr  the address of the powerplay hardware manager.
977 * @param    vol_table  the pointer to changing voltage table
978 * @return    0 in success
979 */
980 
981 static int vega10_trim_voltage_table(struct pp_hwmgr *hwmgr,
982 		struct pp_atomfwctrl_voltage_table *vol_table)
983 {
984 	uint32_t i, j;
985 	uint16_t vvalue;
986 	bool found = false;
987 	struct pp_atomfwctrl_voltage_table *table;
988 
989 	PP_ASSERT_WITH_CODE(vol_table,
990 			"Voltage Table empty.", return -EINVAL);
991 	table = kzalloc(sizeof(struct pp_atomfwctrl_voltage_table),
992 			GFP_KERNEL);
993 
994 	if (!table)
995 		return -ENOMEM;
996 
997 	table->mask_low = vol_table->mask_low;
998 	table->phase_delay = vol_table->phase_delay;
999 
1000 	for (i = 0; i < vol_table->count; i++) {
1001 		vvalue = vol_table->entries[i].value;
1002 		found = false;
1003 
1004 		for (j = 0; j < table->count; j++) {
1005 			if (vvalue == table->entries[j].value) {
1006 				found = true;
1007 				break;
1008 			}
1009 		}
1010 
1011 		if (!found) {
1012 			table->entries[table->count].value = vvalue;
1013 			table->entries[table->count].smio_low =
1014 					vol_table->entries[i].smio_low;
1015 			table->count++;
1016 		}
1017 	}
1018 
1019 	memcpy(vol_table, table, sizeof(struct pp_atomfwctrl_voltage_table));
1020 	kfree(table);
1021 
1022 	return 0;
1023 }
1024 
1025 static int vega10_get_mvdd_voltage_table(struct pp_hwmgr *hwmgr,
1026 		phm_ppt_v1_clock_voltage_dependency_table *dep_table,
1027 		struct pp_atomfwctrl_voltage_table *vol_table)
1028 {
1029 	int i;
1030 
1031 	PP_ASSERT_WITH_CODE(dep_table->count,
1032 			"Voltage Dependency Table empty.",
1033 			return -EINVAL);
1034 
1035 	vol_table->mask_low = 0;
1036 	vol_table->phase_delay = 0;
1037 	vol_table->count = dep_table->count;
1038 
1039 	for (i = 0; i < vol_table->count; i++) {
1040 		vol_table->entries[i].value = dep_table->entries[i].mvdd;
1041 		vol_table->entries[i].smio_low = 0;
1042 	}
1043 
1044 	PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr,
1045 			vol_table),
1046 			"Failed to trim MVDD Table!",
1047 			return -1);
1048 
1049 	return 0;
1050 }
1051 
1052 static int vega10_get_vddci_voltage_table(struct pp_hwmgr *hwmgr,
1053 		phm_ppt_v1_clock_voltage_dependency_table *dep_table,
1054 		struct pp_atomfwctrl_voltage_table *vol_table)
1055 {
1056 	uint32_t i;
1057 
1058 	PP_ASSERT_WITH_CODE(dep_table->count,
1059 			"Voltage Dependency Table empty.",
1060 			return -EINVAL);
1061 
1062 	vol_table->mask_low = 0;
1063 	vol_table->phase_delay = 0;
1064 	vol_table->count = dep_table->count;
1065 
1066 	for (i = 0; i < dep_table->count; i++) {
1067 		vol_table->entries[i].value = dep_table->entries[i].vddci;
1068 		vol_table->entries[i].smio_low = 0;
1069 	}
1070 
1071 	PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr, vol_table),
1072 			"Failed to trim VDDCI table.",
1073 			return -1);
1074 
1075 	return 0;
1076 }
1077 
1078 static int vega10_get_vdd_voltage_table(struct pp_hwmgr *hwmgr,
1079 		phm_ppt_v1_clock_voltage_dependency_table *dep_table,
1080 		struct pp_atomfwctrl_voltage_table *vol_table)
1081 {
1082 	int i;
1083 
1084 	PP_ASSERT_WITH_CODE(dep_table->count,
1085 			"Voltage Dependency Table empty.",
1086 			return -EINVAL);
1087 
1088 	vol_table->mask_low = 0;
1089 	vol_table->phase_delay = 0;
1090 	vol_table->count = dep_table->count;
1091 
1092 	for (i = 0; i < vol_table->count; i++) {
1093 		vol_table->entries[i].value = dep_table->entries[i].vddc;
1094 		vol_table->entries[i].smio_low = 0;
1095 	}
1096 
1097 	return 0;
1098 }
1099 
1100 /* ---- Voltage Tables ----
1101  * If the voltage table would be bigger than
1102  * what will fit into the state table on
1103  * the SMC keep only the higher entries.
1104  */
1105 static void vega10_trim_voltage_table_to_fit_state_table(
1106 		struct pp_hwmgr *hwmgr,
1107 		uint32_t max_vol_steps,
1108 		struct pp_atomfwctrl_voltage_table *vol_table)
1109 {
1110 	unsigned int i, diff;
1111 
1112 	if (vol_table->count <= max_vol_steps)
1113 		return;
1114 
1115 	diff = vol_table->count - max_vol_steps;
1116 
1117 	for (i = 0; i < max_vol_steps; i++)
1118 		vol_table->entries[i] = vol_table->entries[i + diff];
1119 
1120 	vol_table->count = max_vol_steps;
1121 }
1122 
1123 /**
1124 * Create Voltage Tables.
1125 *
1126 * @param    hwmgr  the address of the powerplay hardware manager.
1127 * @return   always 0
1128 */
1129 static int vega10_construct_voltage_tables(struct pp_hwmgr *hwmgr)
1130 {
1131 	struct vega10_hwmgr *data = hwmgr->backend;
1132 	struct phm_ppt_v2_information *table_info =
1133 			(struct phm_ppt_v2_information *)hwmgr->pptable;
1134 	int result;
1135 
1136 	if (data->mvdd_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 ||
1137 			data->mvdd_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1138 		result = vega10_get_mvdd_voltage_table(hwmgr,
1139 				table_info->vdd_dep_on_mclk,
1140 				&(data->mvdd_voltage_table));
1141 		PP_ASSERT_WITH_CODE(!result,
1142 				"Failed to retrieve MVDDC table!",
1143 				return result);
1144 	}
1145 
1146 	if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1147 		result = vega10_get_vddci_voltage_table(hwmgr,
1148 				table_info->vdd_dep_on_mclk,
1149 				&(data->vddci_voltage_table));
1150 		PP_ASSERT_WITH_CODE(!result,
1151 				"Failed to retrieve VDDCI_MEM table!",
1152 				return result);
1153 	}
1154 
1155 	if (data->vddc_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 ||
1156 			data->vddc_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1157 		result = vega10_get_vdd_voltage_table(hwmgr,
1158 				table_info->vdd_dep_on_sclk,
1159 				&(data->vddc_voltage_table));
1160 		PP_ASSERT_WITH_CODE(!result,
1161 				"Failed to retrieve VDDCR_SOC table!",
1162 				return result);
1163 	}
1164 
1165 	PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 16,
1166 			"Too many voltage values for VDDC. Trimming to fit state table.",
1167 			vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1168 					16, &(data->vddc_voltage_table)));
1169 
1170 	PP_ASSERT_WITH_CODE(data->vddci_voltage_table.count <= 16,
1171 			"Too many voltage values for VDDCI. Trimming to fit state table.",
1172 			vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1173 					16, &(data->vddci_voltage_table)));
1174 
1175 	PP_ASSERT_WITH_CODE(data->mvdd_voltage_table.count <= 16,
1176 			"Too many voltage values for MVDD. Trimming to fit state table.",
1177 			vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1178 					16, &(data->mvdd_voltage_table)));
1179 
1180 
1181 	return 0;
1182 }
1183 
1184 /*
1185  * @fn vega10_init_dpm_state
1186  * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
1187  *
1188  * @param    dpm_state - the address of the DPM Table to initiailize.
1189  * @return   None.
1190  */
1191 static void vega10_init_dpm_state(struct vega10_dpm_state *dpm_state)
1192 {
1193 	dpm_state->soft_min_level = 0xff;
1194 	dpm_state->soft_max_level = 0xff;
1195 	dpm_state->hard_min_level = 0xff;
1196 	dpm_state->hard_max_level = 0xff;
1197 }
1198 
1199 static void vega10_setup_default_single_dpm_table(struct pp_hwmgr *hwmgr,
1200 		struct vega10_single_dpm_table *dpm_table,
1201 		struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
1202 {
1203 	int i;
1204 
1205 	dpm_table->count = 0;
1206 
1207 	for (i = 0; i < dep_table->count; i++) {
1208 		if (i == 0 || dpm_table->dpm_levels[dpm_table->count - 1].value <=
1209 				dep_table->entries[i].clk) {
1210 			dpm_table->dpm_levels[dpm_table->count].value =
1211 					dep_table->entries[i].clk;
1212 			dpm_table->dpm_levels[dpm_table->count].enabled = true;
1213 			dpm_table->count++;
1214 		}
1215 	}
1216 }
1217 static int vega10_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
1218 {
1219 	struct vega10_hwmgr *data = hwmgr->backend;
1220 	struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
1221 	struct phm_ppt_v2_information *table_info =
1222 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
1223 	struct phm_ppt_v1_pcie_table *bios_pcie_table =
1224 			table_info->pcie_table;
1225 	uint32_t i;
1226 
1227 	PP_ASSERT_WITH_CODE(bios_pcie_table->count,
1228 			"Incorrect number of PCIE States from VBIOS!",
1229 			return -1);
1230 
1231 	for (i = 0; i < NUM_LINK_LEVELS; i++) {
1232 		if (data->registry_data.pcieSpeedOverride)
1233 			pcie_table->pcie_gen[i] =
1234 					data->registry_data.pcieSpeedOverride;
1235 		else
1236 			pcie_table->pcie_gen[i] =
1237 					bios_pcie_table->entries[i].gen_speed;
1238 
1239 		if (data->registry_data.pcieLaneOverride)
1240 			pcie_table->pcie_lane[i] = (uint8_t)encode_pcie_lane_width(
1241 					data->registry_data.pcieLaneOverride);
1242 		else
1243 			pcie_table->pcie_lane[i] = (uint8_t)encode_pcie_lane_width(
1244 							bios_pcie_table->entries[i].lane_width);
1245 		if (data->registry_data.pcieClockOverride)
1246 			pcie_table->lclk[i] =
1247 					data->registry_data.pcieClockOverride;
1248 		else
1249 			pcie_table->lclk[i] =
1250 					bios_pcie_table->entries[i].pcie_sclk;
1251 	}
1252 
1253 	pcie_table->count = NUM_LINK_LEVELS;
1254 
1255 	return 0;
1256 }
1257 
1258 /*
1259  * This function is to initialize all DPM state tables
1260  * for SMU based on the dependency table.
1261  * Dynamic state patching function will then trim these
1262  * state tables to the allowed range based
1263  * on the power policy or external client requests,
1264  * such as UVD request, etc.
1265  */
1266 static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
1267 {
1268 	struct vega10_hwmgr *data = hwmgr->backend;
1269 	struct phm_ppt_v2_information *table_info =
1270 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
1271 	struct vega10_single_dpm_table *dpm_table;
1272 	uint32_t i;
1273 
1274 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_soc_table =
1275 			table_info->vdd_dep_on_socclk;
1276 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_gfx_table =
1277 			table_info->vdd_dep_on_sclk;
1278 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
1279 			table_info->vdd_dep_on_mclk;
1280 	struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_mm_table =
1281 			table_info->mm_dep_table;
1282 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_dcef_table =
1283 			table_info->vdd_dep_on_dcefclk;
1284 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_pix_table =
1285 			table_info->vdd_dep_on_pixclk;
1286 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_disp_table =
1287 			table_info->vdd_dep_on_dispclk;
1288 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_phy_table =
1289 			table_info->vdd_dep_on_phyclk;
1290 
1291 	PP_ASSERT_WITH_CODE(dep_soc_table,
1292 			"SOCCLK dependency table is missing. This table is mandatory",
1293 			return -EINVAL);
1294 	PP_ASSERT_WITH_CODE(dep_soc_table->count >= 1,
1295 			"SOCCLK dependency table is empty. This table is mandatory",
1296 			return -EINVAL);
1297 
1298 	PP_ASSERT_WITH_CODE(dep_gfx_table,
1299 			"GFXCLK dependency table is missing. This table is mandatory",
1300 			return -EINVAL);
1301 	PP_ASSERT_WITH_CODE(dep_gfx_table->count >= 1,
1302 			"GFXCLK dependency table is empty. This table is mandatory",
1303 			return -EINVAL);
1304 
1305 	PP_ASSERT_WITH_CODE(dep_mclk_table,
1306 			"MCLK dependency table is missing. This table is mandatory",
1307 			return -EINVAL);
1308 	PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
1309 			"MCLK dependency table has to have is missing. This table is mandatory",
1310 			return -EINVAL);
1311 
1312 	/* Initialize Sclk DPM table based on allow Sclk values */
1313 	dpm_table = &(data->dpm_table.soc_table);
1314 	vega10_setup_default_single_dpm_table(hwmgr,
1315 			dpm_table,
1316 			dep_soc_table);
1317 
1318 	vega10_init_dpm_state(&(dpm_table->dpm_state));
1319 
1320 	dpm_table = &(data->dpm_table.gfx_table);
1321 	vega10_setup_default_single_dpm_table(hwmgr,
1322 			dpm_table,
1323 			dep_gfx_table);
1324 	if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0)
1325 		hwmgr->platform_descriptor.overdriveLimit.engineClock =
1326 					dpm_table->dpm_levels[dpm_table->count-1].value;
1327 	vega10_init_dpm_state(&(dpm_table->dpm_state));
1328 
1329 	/* Initialize Mclk DPM table based on allow Mclk values */
1330 	data->dpm_table.mem_table.count = 0;
1331 	dpm_table = &(data->dpm_table.mem_table);
1332 	vega10_setup_default_single_dpm_table(hwmgr,
1333 			dpm_table,
1334 			dep_mclk_table);
1335 	if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0)
1336 		hwmgr->platform_descriptor.overdriveLimit.memoryClock =
1337 					dpm_table->dpm_levels[dpm_table->count-1].value;
1338 
1339 	vega10_init_dpm_state(&(dpm_table->dpm_state));
1340 
1341 	data->dpm_table.eclk_table.count = 0;
1342 	dpm_table = &(data->dpm_table.eclk_table);
1343 	for (i = 0; i < dep_mm_table->count; i++) {
1344 		if (i == 0 || dpm_table->dpm_levels
1345 				[dpm_table->count - 1].value <=
1346 						dep_mm_table->entries[i].eclk) {
1347 			dpm_table->dpm_levels[dpm_table->count].value =
1348 					dep_mm_table->entries[i].eclk;
1349 			dpm_table->dpm_levels[dpm_table->count].enabled =
1350 					(i == 0) ? true : false;
1351 			dpm_table->count++;
1352 		}
1353 	}
1354 	vega10_init_dpm_state(&(dpm_table->dpm_state));
1355 
1356 	data->dpm_table.vclk_table.count = 0;
1357 	data->dpm_table.dclk_table.count = 0;
1358 	dpm_table = &(data->dpm_table.vclk_table);
1359 	for (i = 0; i < dep_mm_table->count; i++) {
1360 		if (i == 0 || dpm_table->dpm_levels
1361 				[dpm_table->count - 1].value <=
1362 						dep_mm_table->entries[i].vclk) {
1363 			dpm_table->dpm_levels[dpm_table->count].value =
1364 					dep_mm_table->entries[i].vclk;
1365 			dpm_table->dpm_levels[dpm_table->count].enabled =
1366 					(i == 0) ? true : false;
1367 			dpm_table->count++;
1368 		}
1369 	}
1370 	vega10_init_dpm_state(&(dpm_table->dpm_state));
1371 
1372 	dpm_table = &(data->dpm_table.dclk_table);
1373 	for (i = 0; i < dep_mm_table->count; i++) {
1374 		if (i == 0 || dpm_table->dpm_levels
1375 				[dpm_table->count - 1].value <=
1376 						dep_mm_table->entries[i].dclk) {
1377 			dpm_table->dpm_levels[dpm_table->count].value =
1378 					dep_mm_table->entries[i].dclk;
1379 			dpm_table->dpm_levels[dpm_table->count].enabled =
1380 					(i == 0) ? true : false;
1381 			dpm_table->count++;
1382 		}
1383 	}
1384 	vega10_init_dpm_state(&(dpm_table->dpm_state));
1385 
1386 	/* Assume there is no headless Vega10 for now */
1387 	dpm_table = &(data->dpm_table.dcef_table);
1388 	vega10_setup_default_single_dpm_table(hwmgr,
1389 			dpm_table,
1390 			dep_dcef_table);
1391 
1392 	vega10_init_dpm_state(&(dpm_table->dpm_state));
1393 
1394 	dpm_table = &(data->dpm_table.pixel_table);
1395 	vega10_setup_default_single_dpm_table(hwmgr,
1396 			dpm_table,
1397 			dep_pix_table);
1398 
1399 	vega10_init_dpm_state(&(dpm_table->dpm_state));
1400 
1401 	dpm_table = &(data->dpm_table.display_table);
1402 	vega10_setup_default_single_dpm_table(hwmgr,
1403 			dpm_table,
1404 			dep_disp_table);
1405 
1406 	vega10_init_dpm_state(&(dpm_table->dpm_state));
1407 
1408 	dpm_table = &(data->dpm_table.phy_table);
1409 	vega10_setup_default_single_dpm_table(hwmgr,
1410 			dpm_table,
1411 			dep_phy_table);
1412 
1413 	vega10_init_dpm_state(&(dpm_table->dpm_state));
1414 
1415 	vega10_setup_default_pcie_table(hwmgr);
1416 
1417 	/* save a copy of the default DPM table */
1418 	memcpy(&(data->golden_dpm_table), &(data->dpm_table),
1419 			sizeof(struct vega10_dpm_table));
1420 
1421 	return 0;
1422 }
1423 
1424 /*
1425  * @fn vega10_populate_ulv_state
1426  * @brief Function to provide parameters for Utral Low Voltage state to SMC.
1427  *
1428  * @param    hwmgr - the address of the hardware manager.
1429  * @return   Always 0.
1430  */
1431 static int vega10_populate_ulv_state(struct pp_hwmgr *hwmgr)
1432 {
1433 	struct vega10_hwmgr *data = hwmgr->backend;
1434 	struct phm_ppt_v2_information *table_info =
1435 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
1436 
1437 	data->smc_state_table.pp_table.UlvOffsetVid =
1438 			(uint8_t)table_info->us_ulv_voltage_offset;
1439 
1440 	data->smc_state_table.pp_table.UlvSmnclkDid =
1441 			(uint8_t)(table_info->us_ulv_smnclk_did);
1442 	data->smc_state_table.pp_table.UlvMp1clkDid =
1443 			(uint8_t)(table_info->us_ulv_mp1clk_did);
1444 	data->smc_state_table.pp_table.UlvGfxclkBypass =
1445 			(uint8_t)(table_info->us_ulv_gfxclk_bypass);
1446 	data->smc_state_table.pp_table.UlvPhaseSheddingPsi0 =
1447 			(uint8_t)(data->vddc_voltage_table.psi0_enable);
1448 	data->smc_state_table.pp_table.UlvPhaseSheddingPsi1 =
1449 			(uint8_t)(data->vddc_voltage_table.psi1_enable);
1450 
1451 	return 0;
1452 }
1453 
1454 static int vega10_populate_single_lclk_level(struct pp_hwmgr *hwmgr,
1455 		uint32_t lclock, uint8_t *curr_lclk_did)
1456 {
1457 	struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1458 
1459 	PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
1460 			hwmgr,
1461 			COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1462 			lclock, &dividers),
1463 			"Failed to get LCLK clock settings from VBIOS!",
1464 			return -1);
1465 
1466 	*curr_lclk_did = dividers.ulDid;
1467 
1468 	return 0;
1469 }
1470 
1471 static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr)
1472 {
1473 	int result = -1;
1474 	struct vega10_hwmgr *data = hwmgr->backend;
1475 	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1476 	struct vega10_pcie_table *pcie_table =
1477 			&(data->dpm_table.pcie_table);
1478 	uint32_t i, j;
1479 
1480 	for (i = 0; i < pcie_table->count; i++) {
1481 		pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[i];
1482 		pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[i];
1483 
1484 		result = vega10_populate_single_lclk_level(hwmgr,
1485 				pcie_table->lclk[i], &(pp_table->LclkDid[i]));
1486 		if (result) {
1487 			pr_info("Populate LClock Level %d Failed!\n", i);
1488 			return result;
1489 		}
1490 	}
1491 
1492 	j = i - 1;
1493 	while (i < NUM_LINK_LEVELS) {
1494 		pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[j];
1495 		pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[j];
1496 
1497 		result = vega10_populate_single_lclk_level(hwmgr,
1498 				pcie_table->lclk[j], &(pp_table->LclkDid[i]));
1499 		if (result) {
1500 			pr_info("Populate LClock Level %d Failed!\n", i);
1501 			return result;
1502 		}
1503 		i++;
1504 	}
1505 
1506 	return result;
1507 }
1508 
1509 /**
1510 * Populates single SMC GFXSCLK structure using the provided engine clock
1511 *
1512 * @param    hwmgr      the address of the hardware manager
1513 * @param    gfx_clock  the GFX clock to use to populate the structure.
1514 * @param    current_gfxclk_level  location in PPTable for the SMC GFXCLK structure.
1515 */
1516 
1517 static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
1518 		uint32_t gfx_clock, PllSetting_t *current_gfxclk_level,
1519 		uint32_t *acg_freq)
1520 {
1521 	struct phm_ppt_v2_information *table_info =
1522 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
1523 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_sclk;
1524 	struct vega10_hwmgr *data = hwmgr->backend;
1525 	struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1526 	uint32_t gfx_max_clock =
1527 			hwmgr->platform_descriptor.overdriveLimit.engineClock;
1528 	uint32_t i = 0;
1529 
1530 	if (hwmgr->od_enabled)
1531 		dep_on_sclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
1532 						&(data->odn_dpm_table.vdd_dep_on_sclk);
1533 	else
1534 		dep_on_sclk = table_info->vdd_dep_on_sclk;
1535 
1536 	PP_ASSERT_WITH_CODE(dep_on_sclk,
1537 			"Invalid SOC_VDD-GFX_CLK Dependency Table!",
1538 			return -EINVAL);
1539 
1540 	if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
1541 		gfx_clock = gfx_clock > gfx_max_clock ? gfx_max_clock : gfx_clock;
1542 	else {
1543 		for (i = 0; i < dep_on_sclk->count; i++) {
1544 			if (dep_on_sclk->entries[i].clk == gfx_clock)
1545 				break;
1546 		}
1547 		PP_ASSERT_WITH_CODE(dep_on_sclk->count > i,
1548 				"Cannot find gfx_clk in SOC_VDD-GFX_CLK!",
1549 				return -EINVAL);
1550 	}
1551 
1552 	PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1553 			COMPUTE_GPUCLK_INPUT_FLAG_GFXCLK,
1554 			gfx_clock, &dividers),
1555 			"Failed to get GFX Clock settings from VBIOS!",
1556 			return -EINVAL);
1557 
1558 	/* Feedback Multiplier: bit 0:8 int, bit 15:12 post_div, bit 31:16 frac */
1559 	current_gfxclk_level->FbMult =
1560 			cpu_to_le32(dividers.ulPll_fb_mult);
1561 	/* Spread FB Multiplier bit: bit 0:8 int, bit 31:16 frac */
1562 	current_gfxclk_level->SsOn = dividers.ucPll_ss_enable;
1563 	current_gfxclk_level->SsFbMult =
1564 			cpu_to_le32(dividers.ulPll_ss_fbsmult);
1565 	current_gfxclk_level->SsSlewFrac =
1566 			cpu_to_le16(dividers.usPll_ss_slew_frac);
1567 	current_gfxclk_level->Did = (uint8_t)(dividers.ulDid);
1568 
1569 	*acg_freq = gfx_clock / 100; /* 100 Khz to Mhz conversion */
1570 
1571 	return 0;
1572 }
1573 
1574 /**
1575  * @brief Populates single SMC SOCCLK structure using the provided clock.
1576  *
1577  * @param    hwmgr - the address of the hardware manager.
1578  * @param    soc_clock - the SOC clock to use to populate the structure.
1579  * @param    current_socclk_level - location in PPTable for the SMC SOCCLK structure.
1580  * @return   0 on success..
1581  */
1582 static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr,
1583 		uint32_t soc_clock, uint8_t *current_soc_did,
1584 		uint8_t *current_vol_index)
1585 {
1586 	struct vega10_hwmgr *data = hwmgr->backend;
1587 	struct phm_ppt_v2_information *table_info =
1588 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
1589 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_soc;
1590 	struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1591 	uint32_t i;
1592 
1593 	if (hwmgr->od_enabled) {
1594 		dep_on_soc = (struct phm_ppt_v1_clock_voltage_dependency_table *)
1595 						&data->odn_dpm_table.vdd_dep_on_socclk;
1596 		for (i = 0; i < dep_on_soc->count; i++) {
1597 			if (dep_on_soc->entries[i].clk >= soc_clock)
1598 				break;
1599 		}
1600 	} else {
1601 		dep_on_soc = table_info->vdd_dep_on_socclk;
1602 		for (i = 0; i < dep_on_soc->count; i++) {
1603 			if (dep_on_soc->entries[i].clk == soc_clock)
1604 				break;
1605 		}
1606 	}
1607 
1608 	PP_ASSERT_WITH_CODE(dep_on_soc->count > i,
1609 			"Cannot find SOC_CLK in SOC_VDD-SOC_CLK Dependency Table",
1610 			return -EINVAL);
1611 
1612 	PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1613 			COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1614 			soc_clock, &dividers),
1615 			"Failed to get SOC Clock settings from VBIOS!",
1616 			return -EINVAL);
1617 
1618 	*current_soc_did = (uint8_t)dividers.ulDid;
1619 	*current_vol_index = (uint8_t)(dep_on_soc->entries[i].vddInd);
1620 	return 0;
1621 }
1622 
1623 /**
1624 * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
1625 *
1626 * @param    hwmgr      the address of the hardware manager
1627 */
1628 static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
1629 {
1630 	struct vega10_hwmgr *data = hwmgr->backend;
1631 	struct phm_ppt_v2_information *table_info =
1632 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
1633 	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1634 	struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
1635 	int result = 0;
1636 	uint32_t i, j;
1637 
1638 	for (i = 0; i < dpm_table->count; i++) {
1639 		result = vega10_populate_single_gfx_level(hwmgr,
1640 				dpm_table->dpm_levels[i].value,
1641 				&(pp_table->GfxclkLevel[i]),
1642 				&(pp_table->AcgFreqTable[i]));
1643 		if (result)
1644 			return result;
1645 	}
1646 
1647 	j = i - 1;
1648 	while (i < NUM_GFXCLK_DPM_LEVELS) {
1649 		result = vega10_populate_single_gfx_level(hwmgr,
1650 				dpm_table->dpm_levels[j].value,
1651 				&(pp_table->GfxclkLevel[i]),
1652 				&(pp_table->AcgFreqTable[i]));
1653 		if (result)
1654 			return result;
1655 		i++;
1656 	}
1657 
1658 	pp_table->GfxclkSlewRate =
1659 			cpu_to_le16(table_info->us_gfxclk_slew_rate);
1660 
1661 	dpm_table = &(data->dpm_table.soc_table);
1662 	for (i = 0; i < dpm_table->count; i++) {
1663 		result = vega10_populate_single_soc_level(hwmgr,
1664 				dpm_table->dpm_levels[i].value,
1665 				&(pp_table->SocclkDid[i]),
1666 				&(pp_table->SocDpmVoltageIndex[i]));
1667 		if (result)
1668 			return result;
1669 	}
1670 
1671 	j = i - 1;
1672 	while (i < NUM_SOCCLK_DPM_LEVELS) {
1673 		result = vega10_populate_single_soc_level(hwmgr,
1674 				dpm_table->dpm_levels[j].value,
1675 				&(pp_table->SocclkDid[i]),
1676 				&(pp_table->SocDpmVoltageIndex[i]));
1677 		if (result)
1678 			return result;
1679 		i++;
1680 	}
1681 
1682 	return result;
1683 }
1684 
1685 static void vega10_populate_vddc_soc_levels(struct pp_hwmgr *hwmgr)
1686 {
1687 	struct vega10_hwmgr *data = hwmgr->backend;
1688 	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1689 	struct phm_ppt_v2_information *table_info = hwmgr->pptable;
1690 	struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table;
1691 
1692 	uint8_t soc_vid = 0;
1693 	uint32_t i, max_vddc_level;
1694 
1695 	if (hwmgr->od_enabled)
1696 		vddc_lookup_table = (struct phm_ppt_v1_voltage_lookup_table *)&data->odn_dpm_table.vddc_lookup_table;
1697 	else
1698 		vddc_lookup_table = table_info->vddc_lookup_table;
1699 
1700 	max_vddc_level = vddc_lookup_table->count;
1701 	for (i = 0; i < max_vddc_level; i++) {
1702 		soc_vid = (uint8_t)convert_to_vid(vddc_lookup_table->entries[i].us_vdd);
1703 		pp_table->SocVid[i] = soc_vid;
1704 	}
1705 	while (i < MAX_REGULAR_DPM_NUMBER) {
1706 		pp_table->SocVid[i] = soc_vid;
1707 		i++;
1708 	}
1709 }
1710 
1711 /**
1712  * @brief Populates single SMC GFXCLK structure using the provided clock.
1713  *
1714  * @param    hwmgr - the address of the hardware manager.
1715  * @param    mem_clock - the memory clock to use to populate the structure.
1716  * @return   0 on success..
1717  */
1718 static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
1719 		uint32_t mem_clock, uint8_t *current_mem_vid,
1720 		PllSetting_t *current_memclk_level, uint8_t *current_mem_soc_vind)
1721 {
1722 	struct vega10_hwmgr *data = hwmgr->backend;
1723 	struct phm_ppt_v2_information *table_info =
1724 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
1725 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_mclk;
1726 	struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1727 	uint32_t mem_max_clock =
1728 			hwmgr->platform_descriptor.overdriveLimit.memoryClock;
1729 	uint32_t i = 0;
1730 
1731 	if (hwmgr->od_enabled)
1732 		dep_on_mclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
1733 					&data->odn_dpm_table.vdd_dep_on_mclk;
1734 	else
1735 		dep_on_mclk = table_info->vdd_dep_on_mclk;
1736 
1737 	PP_ASSERT_WITH_CODE(dep_on_mclk,
1738 			"Invalid SOC_VDD-UCLK Dependency Table!",
1739 			return -EINVAL);
1740 
1741 	if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
1742 		mem_clock = mem_clock > mem_max_clock ? mem_max_clock : mem_clock;
1743 	} else {
1744 		for (i = 0; i < dep_on_mclk->count; i++) {
1745 			if (dep_on_mclk->entries[i].clk == mem_clock)
1746 				break;
1747 		}
1748 		PP_ASSERT_WITH_CODE(dep_on_mclk->count > i,
1749 				"Cannot find UCLK in SOC_VDD-UCLK Dependency Table!",
1750 				return -EINVAL);
1751 	}
1752 
1753 	PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
1754 			hwmgr, COMPUTE_GPUCLK_INPUT_FLAG_UCLK, mem_clock, &dividers),
1755 			"Failed to get UCLK settings from VBIOS!",
1756 			return -1);
1757 
1758 	*current_mem_vid =
1759 			(uint8_t)(convert_to_vid(dep_on_mclk->entries[i].mvdd));
1760 	*current_mem_soc_vind =
1761 			(uint8_t)(dep_on_mclk->entries[i].vddInd);
1762 	current_memclk_level->FbMult = cpu_to_le32(dividers.ulPll_fb_mult);
1763 	current_memclk_level->Did = (uint8_t)(dividers.ulDid);
1764 
1765 	PP_ASSERT_WITH_CODE(current_memclk_level->Did >= 1,
1766 			"Invalid Divider ID!",
1767 			return -EINVAL);
1768 
1769 	return 0;
1770 }
1771 
1772 /**
1773  * @brief Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states.
1774  *
1775  * @param    pHwMgr - the address of the hardware manager.
1776  * @return   PP_Result_OK on success.
1777  */
1778 static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1779 {
1780 	struct vega10_hwmgr *data = hwmgr->backend;
1781 	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1782 	struct vega10_single_dpm_table *dpm_table =
1783 			&(data->dpm_table.mem_table);
1784 	int result = 0;
1785 	uint32_t i, j;
1786 
1787 	for (i = 0; i < dpm_table->count; i++) {
1788 		result = vega10_populate_single_memory_level(hwmgr,
1789 				dpm_table->dpm_levels[i].value,
1790 				&(pp_table->MemVid[i]),
1791 				&(pp_table->UclkLevel[i]),
1792 				&(pp_table->MemSocVoltageIndex[i]));
1793 		if (result)
1794 			return result;
1795 	}
1796 
1797 	j = i - 1;
1798 	while (i < NUM_UCLK_DPM_LEVELS) {
1799 		result = vega10_populate_single_memory_level(hwmgr,
1800 				dpm_table->dpm_levels[j].value,
1801 				&(pp_table->MemVid[i]),
1802 				&(pp_table->UclkLevel[i]),
1803 				&(pp_table->MemSocVoltageIndex[i]));
1804 		if (result)
1805 			return result;
1806 		i++;
1807 	}
1808 
1809 	pp_table->NumMemoryChannels = (uint16_t)(data->mem_channels);
1810 	pp_table->MemoryChannelWidth =
1811 			(uint16_t)(HBM_MEMORY_CHANNEL_WIDTH *
1812 					channel_number[data->mem_channels]);
1813 
1814 	pp_table->LowestUclkReservedForUlv =
1815 			(uint8_t)(data->lowest_uclk_reserved_for_ulv);
1816 
1817 	return result;
1818 }
1819 
1820 static int vega10_populate_single_display_type(struct pp_hwmgr *hwmgr,
1821 		DSPCLK_e disp_clock)
1822 {
1823 	struct vega10_hwmgr *data = hwmgr->backend;
1824 	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1825 	struct phm_ppt_v2_information *table_info =
1826 			(struct phm_ppt_v2_information *)
1827 			(hwmgr->pptable);
1828 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
1829 	uint32_t i;
1830 	uint16_t clk = 0, vddc = 0;
1831 	uint8_t vid = 0;
1832 
1833 	switch (disp_clock) {
1834 	case DSPCLK_DCEFCLK:
1835 		dep_table = table_info->vdd_dep_on_dcefclk;
1836 		break;
1837 	case DSPCLK_DISPCLK:
1838 		dep_table = table_info->vdd_dep_on_dispclk;
1839 		break;
1840 	case DSPCLK_PIXCLK:
1841 		dep_table = table_info->vdd_dep_on_pixclk;
1842 		break;
1843 	case DSPCLK_PHYCLK:
1844 		dep_table = table_info->vdd_dep_on_phyclk;
1845 		break;
1846 	default:
1847 		return -1;
1848 	}
1849 
1850 	PP_ASSERT_WITH_CODE(dep_table->count <= NUM_DSPCLK_LEVELS,
1851 			"Number Of Entries Exceeded maximum!",
1852 			return -1);
1853 
1854 	for (i = 0; i < dep_table->count; i++) {
1855 		clk = (uint16_t)(dep_table->entries[i].clk / 100);
1856 		vddc = table_info->vddc_lookup_table->
1857 				entries[dep_table->entries[i].vddInd].us_vdd;
1858 		vid = (uint8_t)convert_to_vid(vddc);
1859 		pp_table->DisplayClockTable[disp_clock][i].Freq =
1860 				cpu_to_le16(clk);
1861 		pp_table->DisplayClockTable[disp_clock][i].Vid =
1862 				cpu_to_le16(vid);
1863 	}
1864 
1865 	while (i < NUM_DSPCLK_LEVELS) {
1866 		pp_table->DisplayClockTable[disp_clock][i].Freq =
1867 				cpu_to_le16(clk);
1868 		pp_table->DisplayClockTable[disp_clock][i].Vid =
1869 				cpu_to_le16(vid);
1870 		i++;
1871 	}
1872 
1873 	return 0;
1874 }
1875 
1876 static int vega10_populate_all_display_clock_levels(struct pp_hwmgr *hwmgr)
1877 {
1878 	uint32_t i;
1879 
1880 	for (i = 0; i < DSPCLK_COUNT; i++) {
1881 		PP_ASSERT_WITH_CODE(!vega10_populate_single_display_type(hwmgr, i),
1882 				"Failed to populate Clock in DisplayClockTable!",
1883 				return -1);
1884 	}
1885 
1886 	return 0;
1887 }
1888 
1889 static int vega10_populate_single_eclock_level(struct pp_hwmgr *hwmgr,
1890 		uint32_t eclock, uint8_t *current_eclk_did,
1891 		uint8_t *current_soc_vol)
1892 {
1893 	struct phm_ppt_v2_information *table_info =
1894 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
1895 	struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table =
1896 			table_info->mm_dep_table;
1897 	struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1898 	uint32_t i;
1899 
1900 	PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1901 			COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1902 			eclock, &dividers),
1903 			"Failed to get ECLK clock settings from VBIOS!",
1904 			return -1);
1905 
1906 	*current_eclk_did = (uint8_t)dividers.ulDid;
1907 
1908 	for (i = 0; i < dep_table->count; i++) {
1909 		if (dep_table->entries[i].eclk == eclock)
1910 			*current_soc_vol = dep_table->entries[i].vddcInd;
1911 	}
1912 
1913 	return 0;
1914 }
1915 
1916 static int vega10_populate_smc_vce_levels(struct pp_hwmgr *hwmgr)
1917 {
1918 	struct vega10_hwmgr *data = hwmgr->backend;
1919 	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1920 	struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.eclk_table);
1921 	int result = -EINVAL;
1922 	uint32_t i, j;
1923 
1924 	for (i = 0; i < dpm_table->count; i++) {
1925 		result = vega10_populate_single_eclock_level(hwmgr,
1926 				dpm_table->dpm_levels[i].value,
1927 				&(pp_table->EclkDid[i]),
1928 				&(pp_table->VceDpmVoltageIndex[i]));
1929 		if (result)
1930 			return result;
1931 	}
1932 
1933 	j = i - 1;
1934 	while (i < NUM_VCE_DPM_LEVELS) {
1935 		result = vega10_populate_single_eclock_level(hwmgr,
1936 				dpm_table->dpm_levels[j].value,
1937 				&(pp_table->EclkDid[i]),
1938 				&(pp_table->VceDpmVoltageIndex[i]));
1939 		if (result)
1940 			return result;
1941 		i++;
1942 	}
1943 
1944 	return result;
1945 }
1946 
1947 static int vega10_populate_single_vclock_level(struct pp_hwmgr *hwmgr,
1948 		uint32_t vclock, uint8_t *current_vclk_did)
1949 {
1950 	struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1951 
1952 	PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1953 			COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1954 			vclock, &dividers),
1955 			"Failed to get VCLK clock settings from VBIOS!",
1956 			return -EINVAL);
1957 
1958 	*current_vclk_did = (uint8_t)dividers.ulDid;
1959 
1960 	return 0;
1961 }
1962 
1963 static int vega10_populate_single_dclock_level(struct pp_hwmgr *hwmgr,
1964 		uint32_t dclock, uint8_t *current_dclk_did)
1965 {
1966 	struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1967 
1968 	PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1969 			COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1970 			dclock, &dividers),
1971 			"Failed to get DCLK clock settings from VBIOS!",
1972 			return -EINVAL);
1973 
1974 	*current_dclk_did = (uint8_t)dividers.ulDid;
1975 
1976 	return 0;
1977 }
1978 
1979 static int vega10_populate_smc_uvd_levels(struct pp_hwmgr *hwmgr)
1980 {
1981 	struct vega10_hwmgr *data = hwmgr->backend;
1982 	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1983 	struct vega10_single_dpm_table *vclk_dpm_table =
1984 			&(data->dpm_table.vclk_table);
1985 	struct vega10_single_dpm_table *dclk_dpm_table =
1986 			&(data->dpm_table.dclk_table);
1987 	struct phm_ppt_v2_information *table_info =
1988 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
1989 	struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table =
1990 			table_info->mm_dep_table;
1991 	int result = -EINVAL;
1992 	uint32_t i, j;
1993 
1994 	for (i = 0; i < vclk_dpm_table->count; i++) {
1995 		result = vega10_populate_single_vclock_level(hwmgr,
1996 				vclk_dpm_table->dpm_levels[i].value,
1997 				&(pp_table->VclkDid[i]));
1998 		if (result)
1999 			return result;
2000 	}
2001 
2002 	j = i - 1;
2003 	while (i < NUM_UVD_DPM_LEVELS) {
2004 		result = vega10_populate_single_vclock_level(hwmgr,
2005 				vclk_dpm_table->dpm_levels[j].value,
2006 				&(pp_table->VclkDid[i]));
2007 		if (result)
2008 			return result;
2009 		i++;
2010 	}
2011 
2012 	for (i = 0; i < dclk_dpm_table->count; i++) {
2013 		result = vega10_populate_single_dclock_level(hwmgr,
2014 				dclk_dpm_table->dpm_levels[i].value,
2015 				&(pp_table->DclkDid[i]));
2016 		if (result)
2017 			return result;
2018 	}
2019 
2020 	j = i - 1;
2021 	while (i < NUM_UVD_DPM_LEVELS) {
2022 		result = vega10_populate_single_dclock_level(hwmgr,
2023 				dclk_dpm_table->dpm_levels[j].value,
2024 				&(pp_table->DclkDid[i]));
2025 		if (result)
2026 			return result;
2027 		i++;
2028 	}
2029 
2030 	for (i = 0; i < dep_table->count; i++) {
2031 		if (dep_table->entries[i].vclk ==
2032 				vclk_dpm_table->dpm_levels[i].value &&
2033 			dep_table->entries[i].dclk ==
2034 				dclk_dpm_table->dpm_levels[i].value)
2035 			pp_table->UvdDpmVoltageIndex[i] =
2036 					dep_table->entries[i].vddcInd;
2037 		else
2038 			return -1;
2039 	}
2040 
2041 	j = i - 1;
2042 	while (i < NUM_UVD_DPM_LEVELS) {
2043 		pp_table->UvdDpmVoltageIndex[i] = dep_table->entries[j].vddcInd;
2044 		i++;
2045 	}
2046 
2047 	return 0;
2048 }
2049 
2050 static int vega10_populate_clock_stretcher_table(struct pp_hwmgr *hwmgr)
2051 {
2052 	struct vega10_hwmgr *data = hwmgr->backend;
2053 	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2054 	struct phm_ppt_v2_information *table_info =
2055 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
2056 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
2057 			table_info->vdd_dep_on_sclk;
2058 	uint32_t i;
2059 
2060 	for (i = 0; i < dep_table->count; i++) {
2061 		pp_table->CksEnable[i] = dep_table->entries[i].cks_enable;
2062 		pp_table->CksVidOffset[i] = (uint8_t)(dep_table->entries[i].cks_voffset
2063 				* VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
2064 	}
2065 
2066 	return 0;
2067 }
2068 
2069 static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
2070 {
2071 	struct vega10_hwmgr *data = hwmgr->backend;
2072 	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2073 	struct phm_ppt_v2_information *table_info =
2074 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
2075 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
2076 			table_info->vdd_dep_on_sclk;
2077 	struct pp_atomfwctrl_avfs_parameters avfs_params = {0};
2078 	int result = 0;
2079 	uint32_t i;
2080 
2081 	pp_table->MinVoltageVid = (uint8_t)0xff;
2082 	pp_table->MaxVoltageVid = (uint8_t)0;
2083 
2084 	if (data->smu_features[GNLD_AVFS].supported) {
2085 		result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
2086 		if (!result) {
2087 			pp_table->MinVoltageVid = (uint8_t)
2088 					convert_to_vid((uint16_t)(avfs_params.ulMinVddc));
2089 			pp_table->MaxVoltageVid = (uint8_t)
2090 					convert_to_vid((uint16_t)(avfs_params.ulMaxVddc));
2091 
2092 			pp_table->AConstant[0] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant0);
2093 			pp_table->AConstant[1] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant1);
2094 			pp_table->AConstant[2] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant2);
2095 			pp_table->DC_tol_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
2096 			pp_table->Platform_mean = cpu_to_le16(avfs_params.usMeanNsigmaPlatformMean);
2097 			pp_table->Platform_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
2098 			pp_table->PSM_Age_CompFactor = cpu_to_le16(avfs_params.usPsmAgeComfactor);
2099 
2100 			pp_table->BtcGbVdroopTableCksOff.a0 =
2101 					cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA0);
2102 			pp_table->BtcGbVdroopTableCksOff.a0_shift = 20;
2103 			pp_table->BtcGbVdroopTableCksOff.a1 =
2104 					cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA1);
2105 			pp_table->BtcGbVdroopTableCksOff.a1_shift = 20;
2106 			pp_table->BtcGbVdroopTableCksOff.a2 =
2107 					cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA2);
2108 			pp_table->BtcGbVdroopTableCksOff.a2_shift = 20;
2109 
2110 			pp_table->OverrideBtcGbCksOn = avfs_params.ucEnableGbVdroopTableCkson;
2111 			pp_table->BtcGbVdroopTableCksOn.a0 =
2112 					cpu_to_le32(avfs_params.ulGbVdroopTableCksonA0);
2113 			pp_table->BtcGbVdroopTableCksOn.a0_shift = 20;
2114 			pp_table->BtcGbVdroopTableCksOn.a1 =
2115 					cpu_to_le32(avfs_params.ulGbVdroopTableCksonA1);
2116 			pp_table->BtcGbVdroopTableCksOn.a1_shift = 20;
2117 			pp_table->BtcGbVdroopTableCksOn.a2 =
2118 					cpu_to_le32(avfs_params.ulGbVdroopTableCksonA2);
2119 			pp_table->BtcGbVdroopTableCksOn.a2_shift = 20;
2120 
2121 			pp_table->AvfsGbCksOn.m1 =
2122 					cpu_to_le32(avfs_params.ulGbFuseTableCksonM1);
2123 			pp_table->AvfsGbCksOn.m2 =
2124 					cpu_to_le32(avfs_params.ulGbFuseTableCksonM2);
2125 			pp_table->AvfsGbCksOn.b =
2126 					cpu_to_le32(avfs_params.ulGbFuseTableCksonB);
2127 			pp_table->AvfsGbCksOn.m1_shift = 24;
2128 			pp_table->AvfsGbCksOn.m2_shift = 12;
2129 			pp_table->AvfsGbCksOn.b_shift = 0;
2130 
2131 			pp_table->OverrideAvfsGbCksOn =
2132 					avfs_params.ucEnableGbFuseTableCkson;
2133 			pp_table->AvfsGbCksOff.m1 =
2134 					cpu_to_le32(avfs_params.ulGbFuseTableCksoffM1);
2135 			pp_table->AvfsGbCksOff.m2 =
2136 					cpu_to_le32(avfs_params.ulGbFuseTableCksoffM2);
2137 			pp_table->AvfsGbCksOff.b =
2138 					cpu_to_le32(avfs_params.ulGbFuseTableCksoffB);
2139 			pp_table->AvfsGbCksOff.m1_shift = 24;
2140 			pp_table->AvfsGbCksOff.m2_shift = 12;
2141 			pp_table->AvfsGbCksOff.b_shift = 0;
2142 
2143 			for (i = 0; i < dep_table->count; i++)
2144 				pp_table->StaticVoltageOffsetVid[i] =
2145 						convert_to_vid((uint8_t)(dep_table->entries[i].sclk_offset));
2146 
2147 			if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2148 					data->disp_clk_quad_eqn_a) &&
2149 				(PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2150 					data->disp_clk_quad_eqn_b)) {
2151 				pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
2152 						(int32_t)data->disp_clk_quad_eqn_a;
2153 				pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
2154 						(int32_t)data->disp_clk_quad_eqn_b;
2155 				pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
2156 						(int32_t)data->disp_clk_quad_eqn_c;
2157 			} else {
2158 				pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
2159 						(int32_t)avfs_params.ulDispclk2GfxclkM1;
2160 				pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
2161 						(int32_t)avfs_params.ulDispclk2GfxclkM2;
2162 				pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
2163 						(int32_t)avfs_params.ulDispclk2GfxclkB;
2164 			}
2165 
2166 			pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1_shift = 24;
2167 			pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2_shift = 12;
2168 			pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b_shift = 12;
2169 
2170 			if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2171 					data->dcef_clk_quad_eqn_a) &&
2172 				(PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2173 					data->dcef_clk_quad_eqn_b)) {
2174 				pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
2175 						(int32_t)data->dcef_clk_quad_eqn_a;
2176 				pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
2177 						(int32_t)data->dcef_clk_quad_eqn_b;
2178 				pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
2179 						(int32_t)data->dcef_clk_quad_eqn_c;
2180 			} else {
2181 				pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
2182 						(int32_t)avfs_params.ulDcefclk2GfxclkM1;
2183 				pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
2184 						(int32_t)avfs_params.ulDcefclk2GfxclkM2;
2185 				pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
2186 						(int32_t)avfs_params.ulDcefclk2GfxclkB;
2187 			}
2188 
2189 			pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1_shift = 24;
2190 			pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2_shift = 12;
2191 			pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b_shift = 12;
2192 
2193 			if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2194 					data->pixel_clk_quad_eqn_a) &&
2195 				(PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2196 					data->pixel_clk_quad_eqn_b)) {
2197 				pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
2198 						(int32_t)data->pixel_clk_quad_eqn_a;
2199 				pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
2200 						(int32_t)data->pixel_clk_quad_eqn_b;
2201 				pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
2202 						(int32_t)data->pixel_clk_quad_eqn_c;
2203 			} else {
2204 				pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
2205 						(int32_t)avfs_params.ulPixelclk2GfxclkM1;
2206 				pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
2207 						(int32_t)avfs_params.ulPixelclk2GfxclkM2;
2208 				pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
2209 						(int32_t)avfs_params.ulPixelclk2GfxclkB;
2210 			}
2211 
2212 			pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1_shift = 24;
2213 			pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2_shift = 12;
2214 			pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b_shift = 12;
2215 			if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2216 					data->phy_clk_quad_eqn_a) &&
2217 				(PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2218 					data->phy_clk_quad_eqn_b)) {
2219 				pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
2220 						(int32_t)data->phy_clk_quad_eqn_a;
2221 				pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
2222 						(int32_t)data->phy_clk_quad_eqn_b;
2223 				pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
2224 						(int32_t)data->phy_clk_quad_eqn_c;
2225 			} else {
2226 				pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
2227 						(int32_t)avfs_params.ulPhyclk2GfxclkM1;
2228 				pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
2229 						(int32_t)avfs_params.ulPhyclk2GfxclkM2;
2230 				pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
2231 						(int32_t)avfs_params.ulPhyclk2GfxclkB;
2232 			}
2233 
2234 			pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1_shift = 24;
2235 			pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2_shift = 12;
2236 			pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b_shift = 12;
2237 
2238 			pp_table->AcgBtcGbVdroopTable.a0       = avfs_params.ulAcgGbVdroopTableA0;
2239 			pp_table->AcgBtcGbVdroopTable.a0_shift = 20;
2240 			pp_table->AcgBtcGbVdroopTable.a1       = avfs_params.ulAcgGbVdroopTableA1;
2241 			pp_table->AcgBtcGbVdroopTable.a1_shift = 20;
2242 			pp_table->AcgBtcGbVdroopTable.a2       = avfs_params.ulAcgGbVdroopTableA2;
2243 			pp_table->AcgBtcGbVdroopTable.a2_shift = 20;
2244 
2245 			pp_table->AcgAvfsGb.m1                   = avfs_params.ulAcgGbFuseTableM1;
2246 			pp_table->AcgAvfsGb.m2                   = avfs_params.ulAcgGbFuseTableM2;
2247 			pp_table->AcgAvfsGb.b                    = avfs_params.ulAcgGbFuseTableB;
2248 			pp_table->AcgAvfsGb.m1_shift             = 0;
2249 			pp_table->AcgAvfsGb.m2_shift             = 0;
2250 			pp_table->AcgAvfsGb.b_shift              = 0;
2251 
2252 		} else {
2253 			data->smu_features[GNLD_AVFS].supported = false;
2254 		}
2255 	}
2256 
2257 	return 0;
2258 }
2259 
2260 static int vega10_acg_enable(struct pp_hwmgr *hwmgr)
2261 {
2262 	struct vega10_hwmgr *data = hwmgr->backend;
2263 	uint32_t agc_btc_response;
2264 
2265 	if (data->smu_features[GNLD_ACG].supported) {
2266 		if (0 == vega10_enable_smc_features(hwmgr, true,
2267 					data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap))
2268 			data->smu_features[GNLD_DPM_PREFETCHER].enabled = true;
2269 
2270 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg);
2271 
2272 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc);
2273 		agc_btc_response = smum_get_argument(hwmgr);
2274 
2275 		if (1 == agc_btc_response) {
2276 			if (1 == data->acg_loop_state)
2277 				smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInClosedLoop);
2278 			else if (2 == data->acg_loop_state)
2279 				smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInOpenLoop);
2280 			if (0 == vega10_enable_smc_features(hwmgr, true,
2281 				data->smu_features[GNLD_ACG].smu_feature_bitmap))
2282 					data->smu_features[GNLD_ACG].enabled = true;
2283 		} else {
2284 			pr_info("[ACG_Enable] ACG BTC Returned Failed Status!\n");
2285 			data->smu_features[GNLD_ACG].enabled = false;
2286 		}
2287 	}
2288 
2289 	return 0;
2290 }
2291 
2292 static int vega10_acg_disable(struct pp_hwmgr *hwmgr)
2293 {
2294 	struct vega10_hwmgr *data = hwmgr->backend;
2295 
2296 	if (data->smu_features[GNLD_ACG].supported &&
2297 	    data->smu_features[GNLD_ACG].enabled)
2298 		if (!vega10_enable_smc_features(hwmgr, false,
2299 			data->smu_features[GNLD_ACG].smu_feature_bitmap))
2300 			data->smu_features[GNLD_ACG].enabled = false;
2301 
2302 	return 0;
2303 }
2304 
2305 static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr)
2306 {
2307 	struct vega10_hwmgr *data = hwmgr->backend;
2308 	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2309 	struct pp_atomfwctrl_gpio_parameters gpio_params = {0};
2310 	int result;
2311 
2312 	result = pp_atomfwctrl_get_gpio_information(hwmgr, &gpio_params);
2313 	if (!result) {
2314 		if (PP_CAP(PHM_PlatformCaps_RegulatorHot) &&
2315 		    data->registry_data.regulator_hot_gpio_support) {
2316 			pp_table->VR0HotGpio = gpio_params.ucVR0HotGpio;
2317 			pp_table->VR0HotPolarity = gpio_params.ucVR0HotPolarity;
2318 			pp_table->VR1HotGpio = gpio_params.ucVR1HotGpio;
2319 			pp_table->VR1HotPolarity = gpio_params.ucVR1HotPolarity;
2320 		} else {
2321 			pp_table->VR0HotGpio = 0;
2322 			pp_table->VR0HotPolarity = 0;
2323 			pp_table->VR1HotGpio = 0;
2324 			pp_table->VR1HotPolarity = 0;
2325 		}
2326 
2327 		if (PP_CAP(PHM_PlatformCaps_AutomaticDCTransition) &&
2328 		    data->registry_data.ac_dc_switch_gpio_support) {
2329 			pp_table->AcDcGpio = gpio_params.ucAcDcGpio;
2330 			pp_table->AcDcPolarity = gpio_params.ucAcDcPolarity;
2331 		} else {
2332 			pp_table->AcDcGpio = 0;
2333 			pp_table->AcDcPolarity = 0;
2334 		}
2335 	}
2336 
2337 	return result;
2338 }
2339 
2340 static int vega10_avfs_enable(struct pp_hwmgr *hwmgr, bool enable)
2341 {
2342 	struct vega10_hwmgr *data = hwmgr->backend;
2343 
2344 	if (data->smu_features[GNLD_AVFS].supported) {
2345 		if (enable) {
2346 			PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2347 					true,
2348 					data->smu_features[GNLD_AVFS].smu_feature_bitmap),
2349 					"[avfs_control] Attempt to Enable AVFS feature Failed!",
2350 					return -1);
2351 			data->smu_features[GNLD_AVFS].enabled = true;
2352 		} else {
2353 			PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2354 					false,
2355 					data->smu_features[GNLD_AVFS].smu_feature_bitmap),
2356 					"[avfs_control] Attempt to Disable AVFS feature Failed!",
2357 					return -1);
2358 			data->smu_features[GNLD_AVFS].enabled = false;
2359 		}
2360 	}
2361 
2362 	return 0;
2363 }
2364 
2365 static int vega10_update_avfs(struct pp_hwmgr *hwmgr)
2366 {
2367 	struct vega10_hwmgr *data = hwmgr->backend;
2368 
2369 	if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
2370 		vega10_avfs_enable(hwmgr, false);
2371 	} else if (data->need_update_dpm_table) {
2372 		vega10_avfs_enable(hwmgr, false);
2373 		vega10_avfs_enable(hwmgr, true);
2374 	} else {
2375 		vega10_avfs_enable(hwmgr, true);
2376 	}
2377 
2378 	return 0;
2379 }
2380 
2381 static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
2382 {
2383 	int result = 0;
2384 
2385 	uint64_t serial_number = 0;
2386 	uint32_t top32, bottom32;
2387 	struct phm_fuses_default fuse;
2388 
2389 	struct vega10_hwmgr *data = hwmgr->backend;
2390 	AvfsFuseOverride_t *avfs_fuse_table = &(data->smc_state_table.avfs_fuse_override_table);
2391 
2392 	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
2393 	top32 = smum_get_argument(hwmgr);
2394 
2395 	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
2396 	bottom32 = smum_get_argument(hwmgr);
2397 
2398 	serial_number = ((uint64_t)bottom32 << 32) | top32;
2399 
2400 	if (pp_override_get_default_fuse_value(serial_number, &fuse) == 0) {
2401 		avfs_fuse_table->VFT0_b  = fuse.VFT0_b;
2402 		avfs_fuse_table->VFT0_m1 = fuse.VFT0_m1;
2403 		avfs_fuse_table->VFT0_m2 = fuse.VFT0_m2;
2404 		avfs_fuse_table->VFT1_b  = fuse.VFT1_b;
2405 		avfs_fuse_table->VFT1_m1 = fuse.VFT1_m1;
2406 		avfs_fuse_table->VFT1_m2 = fuse.VFT1_m2;
2407 		avfs_fuse_table->VFT2_b  = fuse.VFT2_b;
2408 		avfs_fuse_table->VFT2_m1 = fuse.VFT2_m1;
2409 		avfs_fuse_table->VFT2_m2 = fuse.VFT2_m2;
2410 		result = smum_smc_table_manager(hwmgr,  (uint8_t *)avfs_fuse_table,
2411 						AVFSFUSETABLE, false);
2412 		PP_ASSERT_WITH_CODE(!result,
2413 			"Failed to upload FuseOVerride!",
2414 			);
2415 	}
2416 
2417 	return result;
2418 }
2419 
2420 static void vega10_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
2421 {
2422 	struct vega10_hwmgr *data = hwmgr->backend;
2423 	struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
2424 	struct phm_ppt_v2_information *table_info = hwmgr->pptable;
2425 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
2426 	struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
2427 	uint32_t i;
2428 
2429 	dep_table = table_info->vdd_dep_on_mclk;
2430 	odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_mclk);
2431 
2432 	for (i = 0; i < dep_table->count; i++) {
2433 		if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
2434 			data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
2435 			return;
2436 		}
2437 	}
2438 
2439 	dep_table = table_info->vdd_dep_on_sclk;
2440 	odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_sclk);
2441 	for (i = 0; i < dep_table->count; i++) {
2442 		if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
2443 			data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
2444 			return;
2445 		}
2446 	}
2447 
2448 	if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
2449 		data->need_update_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
2450 		data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
2451 	}
2452 }
2453 
2454 /**
2455 * Initializes the SMC table and uploads it
2456 *
2457 * @param    hwmgr  the address of the powerplay hardware manager.
2458 * @param    pInput  the pointer to input data (PowerState)
2459 * @return   always 0
2460 */
2461 static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
2462 {
2463 	int result;
2464 	struct vega10_hwmgr *data = hwmgr->backend;
2465 	struct phm_ppt_v2_information *table_info =
2466 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
2467 	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2468 	struct pp_atomfwctrl_voltage_table voltage_table;
2469 	struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
2470 	struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
2471 
2472 	result = vega10_setup_default_dpm_tables(hwmgr);
2473 	PP_ASSERT_WITH_CODE(!result,
2474 			"Failed to setup default DPM tables!",
2475 			return result);
2476 
2477 	/* initialize ODN table */
2478 	if (hwmgr->od_enabled) {
2479 		if (odn_table->max_vddc) {
2480 			data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
2481 			vega10_check_dpm_table_updated(hwmgr);
2482 		} else {
2483 			vega10_odn_initial_default_setting(hwmgr);
2484 		}
2485 	}
2486 
2487 	pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
2488 			VOLTAGE_OBJ_SVID2,  &voltage_table);
2489 	pp_table->MaxVidStep = voltage_table.max_vid_step;
2490 
2491 	pp_table->GfxDpmVoltageMode =
2492 			(uint8_t)(table_info->uc_gfx_dpm_voltage_mode);
2493 	pp_table->SocDpmVoltageMode =
2494 			(uint8_t)(table_info->uc_soc_dpm_voltage_mode);
2495 	pp_table->UclkDpmVoltageMode =
2496 			(uint8_t)(table_info->uc_uclk_dpm_voltage_mode);
2497 	pp_table->UvdDpmVoltageMode =
2498 			(uint8_t)(table_info->uc_uvd_dpm_voltage_mode);
2499 	pp_table->VceDpmVoltageMode =
2500 			(uint8_t)(table_info->uc_vce_dpm_voltage_mode);
2501 	pp_table->Mp0DpmVoltageMode =
2502 			(uint8_t)(table_info->uc_mp0_dpm_voltage_mode);
2503 
2504 	pp_table->DisplayDpmVoltageMode =
2505 			(uint8_t)(table_info->uc_dcef_dpm_voltage_mode);
2506 
2507 	data->vddc_voltage_table.psi0_enable = voltage_table.psi0_enable;
2508 	data->vddc_voltage_table.psi1_enable = voltage_table.psi1_enable;
2509 
2510 	if (data->registry_data.ulv_support &&
2511 			table_info->us_ulv_voltage_offset) {
2512 		result = vega10_populate_ulv_state(hwmgr);
2513 		PP_ASSERT_WITH_CODE(!result,
2514 				"Failed to initialize ULV state!",
2515 				return result);
2516 	}
2517 
2518 	result = vega10_populate_smc_link_levels(hwmgr);
2519 	PP_ASSERT_WITH_CODE(!result,
2520 			"Failed to initialize Link Level!",
2521 			return result);
2522 
2523 	result = vega10_populate_all_graphic_levels(hwmgr);
2524 	PP_ASSERT_WITH_CODE(!result,
2525 			"Failed to initialize Graphics Level!",
2526 			return result);
2527 
2528 	result = vega10_populate_all_memory_levels(hwmgr);
2529 	PP_ASSERT_WITH_CODE(!result,
2530 			"Failed to initialize Memory Level!",
2531 			return result);
2532 
2533 	vega10_populate_vddc_soc_levels(hwmgr);
2534 
2535 	result = vega10_populate_all_display_clock_levels(hwmgr);
2536 	PP_ASSERT_WITH_CODE(!result,
2537 			"Failed to initialize Display Level!",
2538 			return result);
2539 
2540 	result = vega10_populate_smc_vce_levels(hwmgr);
2541 	PP_ASSERT_WITH_CODE(!result,
2542 			"Failed to initialize VCE Level!",
2543 			return result);
2544 
2545 	result = vega10_populate_smc_uvd_levels(hwmgr);
2546 	PP_ASSERT_WITH_CODE(!result,
2547 			"Failed to initialize UVD Level!",
2548 			return result);
2549 
2550 	if (data->registry_data.clock_stretcher_support) {
2551 		result = vega10_populate_clock_stretcher_table(hwmgr);
2552 		PP_ASSERT_WITH_CODE(!result,
2553 				"Failed to populate Clock Stretcher Table!",
2554 				return result);
2555 	}
2556 
2557 	result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values);
2558 	if (!result) {
2559 		data->vbios_boot_state.vddc     = boot_up_values.usVddc;
2560 		data->vbios_boot_state.vddci    = boot_up_values.usVddci;
2561 		data->vbios_boot_state.mvddc    = boot_up_values.usMvddc;
2562 		data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
2563 		data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
2564 		pp_atomfwctrl_get_clk_information_by_clkid(hwmgr,
2565 				SMU9_SYSPLL0_SOCCLK_ID, &boot_up_values.ulSocClk);
2566 
2567 		pp_atomfwctrl_get_clk_information_by_clkid(hwmgr,
2568 				SMU9_SYSPLL0_DCEFCLK_ID, &boot_up_values.ulDCEFClk);
2569 
2570 		data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
2571 		data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
2572 		if (0 != boot_up_values.usVddc) {
2573 			smum_send_msg_to_smc_with_parameter(hwmgr,
2574 						PPSMC_MSG_SetFloorSocVoltage,
2575 						(boot_up_values.usVddc * 4));
2576 			data->vbios_boot_state.bsoc_vddc_lock = true;
2577 		} else {
2578 			data->vbios_boot_state.bsoc_vddc_lock = false;
2579 		}
2580 		smum_send_msg_to_smc_with_parameter(hwmgr,
2581 				PPSMC_MSG_SetMinDeepSleepDcefclk,
2582 			(uint32_t)(data->vbios_boot_state.dcef_clock / 100));
2583 	}
2584 
2585 	result = vega10_populate_avfs_parameters(hwmgr);
2586 	PP_ASSERT_WITH_CODE(!result,
2587 			"Failed to initialize AVFS Parameters!",
2588 			return result);
2589 
2590 	result = vega10_populate_gpio_parameters(hwmgr);
2591 	PP_ASSERT_WITH_CODE(!result,
2592 			"Failed to initialize GPIO Parameters!",
2593 			return result);
2594 
2595 	pp_table->GfxclkAverageAlpha = (uint8_t)
2596 			(data->gfxclk_average_alpha);
2597 	pp_table->SocclkAverageAlpha = (uint8_t)
2598 			(data->socclk_average_alpha);
2599 	pp_table->UclkAverageAlpha = (uint8_t)
2600 			(data->uclk_average_alpha);
2601 	pp_table->GfxActivityAverageAlpha = (uint8_t)
2602 			(data->gfx_activity_average_alpha);
2603 
2604 	vega10_populate_and_upload_avfs_fuse_override(hwmgr);
2605 
2606 	result = smum_smc_table_manager(hwmgr, (uint8_t *)pp_table, PPTABLE, false);
2607 
2608 	PP_ASSERT_WITH_CODE(!result,
2609 			"Failed to upload PPtable!", return result);
2610 
2611 	result = vega10_avfs_enable(hwmgr, true);
2612 	PP_ASSERT_WITH_CODE(!result, "Attempt to enable AVFS feature Failed!",
2613 					return result);
2614 	vega10_acg_enable(hwmgr);
2615 
2616 	return 0;
2617 }
2618 
2619 static int vega10_enable_thermal_protection(struct pp_hwmgr *hwmgr)
2620 {
2621 	struct vega10_hwmgr *data = hwmgr->backend;
2622 
2623 	if (data->smu_features[GNLD_THERMAL].supported) {
2624 		if (data->smu_features[GNLD_THERMAL].enabled)
2625 			pr_info("THERMAL Feature Already enabled!");
2626 
2627 		PP_ASSERT_WITH_CODE(
2628 				!vega10_enable_smc_features(hwmgr,
2629 				true,
2630 				data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
2631 				"Enable THERMAL Feature Failed!",
2632 				return -1);
2633 		data->smu_features[GNLD_THERMAL].enabled = true;
2634 	}
2635 
2636 	return 0;
2637 }
2638 
2639 static int vega10_disable_thermal_protection(struct pp_hwmgr *hwmgr)
2640 {
2641 	struct vega10_hwmgr *data = hwmgr->backend;
2642 
2643 	if (data->smu_features[GNLD_THERMAL].supported) {
2644 		if (!data->smu_features[GNLD_THERMAL].enabled)
2645 			pr_info("THERMAL Feature Already disabled!");
2646 
2647 		PP_ASSERT_WITH_CODE(
2648 				!vega10_enable_smc_features(hwmgr,
2649 				false,
2650 				data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
2651 				"disable THERMAL Feature Failed!",
2652 				return -1);
2653 		data->smu_features[GNLD_THERMAL].enabled = false;
2654 	}
2655 
2656 	return 0;
2657 }
2658 
2659 static int vega10_enable_vrhot_feature(struct pp_hwmgr *hwmgr)
2660 {
2661 	struct vega10_hwmgr *data = hwmgr->backend;
2662 
2663 	if (PP_CAP(PHM_PlatformCaps_RegulatorHot)) {
2664 		if (data->smu_features[GNLD_VR0HOT].supported) {
2665 			PP_ASSERT_WITH_CODE(
2666 					!vega10_enable_smc_features(hwmgr,
2667 					true,
2668 					data->smu_features[GNLD_VR0HOT].smu_feature_bitmap),
2669 					"Attempt to Enable VR0 Hot feature Failed!",
2670 					return -1);
2671 			data->smu_features[GNLD_VR0HOT].enabled = true;
2672 		} else {
2673 			if (data->smu_features[GNLD_VR1HOT].supported) {
2674 				PP_ASSERT_WITH_CODE(
2675 						!vega10_enable_smc_features(hwmgr,
2676 						true,
2677 						data->smu_features[GNLD_VR1HOT].smu_feature_bitmap),
2678 						"Attempt to Enable VR0 Hot feature Failed!",
2679 						return -1);
2680 				data->smu_features[GNLD_VR1HOT].enabled = true;
2681 			}
2682 		}
2683 	}
2684 	return 0;
2685 }
2686 
2687 static int vega10_enable_ulv(struct pp_hwmgr *hwmgr)
2688 {
2689 	struct vega10_hwmgr *data = hwmgr->backend;
2690 
2691 	if (data->registry_data.ulv_support) {
2692 		PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2693 				true, data->smu_features[GNLD_ULV].smu_feature_bitmap),
2694 				"Enable ULV Feature Failed!",
2695 				return -1);
2696 		data->smu_features[GNLD_ULV].enabled = true;
2697 	}
2698 
2699 	return 0;
2700 }
2701 
2702 static int vega10_disable_ulv(struct pp_hwmgr *hwmgr)
2703 {
2704 	struct vega10_hwmgr *data = hwmgr->backend;
2705 
2706 	if (data->registry_data.ulv_support) {
2707 		PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2708 				false, data->smu_features[GNLD_ULV].smu_feature_bitmap),
2709 				"disable ULV Feature Failed!",
2710 				return -EINVAL);
2711 		data->smu_features[GNLD_ULV].enabled = false;
2712 	}
2713 
2714 	return 0;
2715 }
2716 
2717 static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2718 {
2719 	struct vega10_hwmgr *data = hwmgr->backend;
2720 
2721 	if (data->smu_features[GNLD_DS_GFXCLK].supported) {
2722 		PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2723 				true, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap),
2724 				"Attempt to Enable DS_GFXCLK Feature Failed!",
2725 				return -EINVAL);
2726 		data->smu_features[GNLD_DS_GFXCLK].enabled = true;
2727 	}
2728 
2729 	if (data->smu_features[GNLD_DS_SOCCLK].supported) {
2730 		PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2731 				true, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap),
2732 				"Attempt to Enable DS_SOCCLK Feature Failed!",
2733 				return -EINVAL);
2734 		data->smu_features[GNLD_DS_SOCCLK].enabled = true;
2735 	}
2736 
2737 	if (data->smu_features[GNLD_DS_LCLK].supported) {
2738 		PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2739 				true, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap),
2740 				"Attempt to Enable DS_LCLK Feature Failed!",
2741 				return -EINVAL);
2742 		data->smu_features[GNLD_DS_LCLK].enabled = true;
2743 	}
2744 
2745 	if (data->smu_features[GNLD_DS_DCEFCLK].supported) {
2746 		PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2747 				true, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap),
2748 				"Attempt to Enable DS_DCEFCLK Feature Failed!",
2749 				return -EINVAL);
2750 		data->smu_features[GNLD_DS_DCEFCLK].enabled = true;
2751 	}
2752 
2753 	return 0;
2754 }
2755 
2756 static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2757 {
2758 	struct vega10_hwmgr *data = hwmgr->backend;
2759 
2760 	if (data->smu_features[GNLD_DS_GFXCLK].supported) {
2761 		PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2762 				false, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap),
2763 				"Attempt to disable DS_GFXCLK Feature Failed!",
2764 				return -EINVAL);
2765 		data->smu_features[GNLD_DS_GFXCLK].enabled = false;
2766 	}
2767 
2768 	if (data->smu_features[GNLD_DS_SOCCLK].supported) {
2769 		PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2770 				false, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap),
2771 				"Attempt to disable DS_ Feature Failed!",
2772 				return -EINVAL);
2773 		data->smu_features[GNLD_DS_SOCCLK].enabled = false;
2774 	}
2775 
2776 	if (data->smu_features[GNLD_DS_LCLK].supported) {
2777 		PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2778 				false, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap),
2779 				"Attempt to disable DS_LCLK Feature Failed!",
2780 				return -EINVAL);
2781 		data->smu_features[GNLD_DS_LCLK].enabled = false;
2782 	}
2783 
2784 	if (data->smu_features[GNLD_DS_DCEFCLK].supported) {
2785 		PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2786 				false, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap),
2787 				"Attempt to disable DS_DCEFCLK Feature Failed!",
2788 				return -EINVAL);
2789 		data->smu_features[GNLD_DS_DCEFCLK].enabled = false;
2790 	}
2791 
2792 	return 0;
2793 }
2794 
2795 static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
2796 {
2797 	struct vega10_hwmgr *data = hwmgr->backend;
2798 	uint32_t i, feature_mask = 0;
2799 
2800 
2801 	if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
2802 		PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2803 				false, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
2804 		"Attempt to disable LED DPM feature failed!", return -EINVAL);
2805 		data->smu_features[GNLD_LED_DISPLAY].enabled = false;
2806 	}
2807 
2808 	for (i = 0; i < GNLD_DPM_MAX; i++) {
2809 		if (data->smu_features[i].smu_feature_bitmap & bitmap) {
2810 			if (data->smu_features[i].supported) {
2811 				if (data->smu_features[i].enabled) {
2812 					feature_mask |= data->smu_features[i].
2813 							smu_feature_bitmap;
2814 					data->smu_features[i].enabled = false;
2815 				}
2816 			}
2817 		}
2818 	}
2819 
2820 	vega10_enable_smc_features(hwmgr, false, feature_mask);
2821 
2822 	return 0;
2823 }
2824 
2825 /**
2826  * @brief Tell SMC to enabled the supported DPMs.
2827  *
2828  * @param    hwmgr - the address of the powerplay hardware manager.
2829  * @Param    bitmap - bitmap for the features to enabled.
2830  * @return   0 on at least one DPM is successfully enabled.
2831  */
2832 static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
2833 {
2834 	struct vega10_hwmgr *data = hwmgr->backend;
2835 	uint32_t i, feature_mask = 0;
2836 
2837 	for (i = 0; i < GNLD_DPM_MAX; i++) {
2838 		if (data->smu_features[i].smu_feature_bitmap & bitmap) {
2839 			if (data->smu_features[i].supported) {
2840 				if (!data->smu_features[i].enabled) {
2841 					feature_mask |= data->smu_features[i].
2842 							smu_feature_bitmap;
2843 					data->smu_features[i].enabled = true;
2844 				}
2845 			}
2846 		}
2847 	}
2848 
2849 	if (vega10_enable_smc_features(hwmgr,
2850 			true, feature_mask)) {
2851 		for (i = 0; i < GNLD_DPM_MAX; i++) {
2852 			if (data->smu_features[i].smu_feature_bitmap &
2853 					feature_mask)
2854 				data->smu_features[i].enabled = false;
2855 		}
2856 	}
2857 
2858 	if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
2859 		PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2860 				true, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
2861 		"Attempt to Enable LED DPM feature Failed!", return -EINVAL);
2862 		data->smu_features[GNLD_LED_DISPLAY].enabled = true;
2863 	}
2864 
2865 	if (data->vbios_boot_state.bsoc_vddc_lock) {
2866 		smum_send_msg_to_smc_with_parameter(hwmgr,
2867 						PPSMC_MSG_SetFloorSocVoltage, 0);
2868 		data->vbios_boot_state.bsoc_vddc_lock = false;
2869 	}
2870 
2871 	if (PP_CAP(PHM_PlatformCaps_Falcon_QuickTransition)) {
2872 		if (data->smu_features[GNLD_ACDC].supported) {
2873 			PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2874 					true, data->smu_features[GNLD_ACDC].smu_feature_bitmap),
2875 					"Attempt to Enable DS_GFXCLK Feature Failed!",
2876 					return -1);
2877 			data->smu_features[GNLD_ACDC].enabled = true;
2878 		}
2879 	}
2880 
2881 	return 0;
2882 }
2883 
2884 static int vega10_enable_disable_PCC_limit_feature(struct pp_hwmgr *hwmgr, bool enable)
2885 {
2886 	struct vega10_hwmgr *data = hwmgr->backend;
2887 
2888 	if (data->smu_features[GNLD_PCC_LIMIT].supported) {
2889 		if (enable == data->smu_features[GNLD_PCC_LIMIT].enabled)
2890 			pr_info("GNLD_PCC_LIMIT has been %s \n", enable ? "enabled" : "disabled");
2891 		PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2892 				enable, data->smu_features[GNLD_PCC_LIMIT].smu_feature_bitmap),
2893 				"Attempt to Enable PCC Limit feature Failed!",
2894 				return -EINVAL);
2895 		data->smu_features[GNLD_PCC_LIMIT].enabled = enable;
2896 	}
2897 
2898 	return 0;
2899 }
2900 
2901 static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
2902 {
2903 	struct vega10_hwmgr *data = hwmgr->backend;
2904 	int tmp_result, result = 0;
2905 
2906 	vega10_enable_disable_PCC_limit_feature(hwmgr, true);
2907 
2908 	smum_send_msg_to_smc_with_parameter(hwmgr,
2909 		PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
2910 
2911 	tmp_result = vega10_construct_voltage_tables(hwmgr);
2912 	PP_ASSERT_WITH_CODE(!tmp_result,
2913 			"Failed to construct voltage tables!",
2914 			result = tmp_result);
2915 
2916 	tmp_result = vega10_init_smc_table(hwmgr);
2917 	PP_ASSERT_WITH_CODE(!tmp_result,
2918 			"Failed to initialize SMC table!",
2919 			result = tmp_result);
2920 
2921 	if (PP_CAP(PHM_PlatformCaps_ThermalController)) {
2922 		tmp_result = vega10_enable_thermal_protection(hwmgr);
2923 		PP_ASSERT_WITH_CODE(!tmp_result,
2924 				"Failed to enable thermal protection!",
2925 				result = tmp_result);
2926 	}
2927 
2928 	tmp_result = vega10_enable_vrhot_feature(hwmgr);
2929 	PP_ASSERT_WITH_CODE(!tmp_result,
2930 			"Failed to enable VR hot feature!",
2931 			result = tmp_result);
2932 
2933 	tmp_result = vega10_enable_deep_sleep_master_switch(hwmgr);
2934 	PP_ASSERT_WITH_CODE(!tmp_result,
2935 			"Failed to enable deep sleep master switch!",
2936 			result = tmp_result);
2937 
2938 	tmp_result = vega10_start_dpm(hwmgr, SMC_DPM_FEATURES);
2939 	PP_ASSERT_WITH_CODE(!tmp_result,
2940 			"Failed to start DPM!", result = tmp_result);
2941 
2942 	/* enable didt, do not abort if failed didt */
2943 	tmp_result = vega10_enable_didt_config(hwmgr);
2944 	PP_ASSERT(!tmp_result,
2945 			"Failed to enable didt config!");
2946 
2947 	tmp_result = vega10_enable_power_containment(hwmgr);
2948 	PP_ASSERT_WITH_CODE(!tmp_result,
2949 			"Failed to enable power containment!",
2950 			result = tmp_result);
2951 
2952 	tmp_result = vega10_power_control_set_level(hwmgr);
2953 	PP_ASSERT_WITH_CODE(!tmp_result,
2954 			"Failed to power control set level!",
2955 			result = tmp_result);
2956 
2957 	tmp_result = vega10_enable_ulv(hwmgr);
2958 	PP_ASSERT_WITH_CODE(!tmp_result,
2959 			"Failed to enable ULV!",
2960 			result = tmp_result);
2961 
2962 	return result;
2963 }
2964 
2965 static int vega10_get_power_state_size(struct pp_hwmgr *hwmgr)
2966 {
2967 	return sizeof(struct vega10_power_state);
2968 }
2969 
2970 static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
2971 		void *state, struct pp_power_state *power_state,
2972 		void *pp_table, uint32_t classification_flag)
2973 {
2974 	ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_V2;
2975 	struct vega10_power_state *vega10_power_state =
2976 			cast_phw_vega10_power_state(&(power_state->hardware));
2977 	struct vega10_performance_level *performance_level;
2978 	ATOM_Vega10_State *state_entry = (ATOM_Vega10_State *)state;
2979 	ATOM_Vega10_POWERPLAYTABLE *powerplay_table =
2980 			(ATOM_Vega10_POWERPLAYTABLE *)pp_table;
2981 	ATOM_Vega10_SOCCLK_Dependency_Table *socclk_dep_table =
2982 			(ATOM_Vega10_SOCCLK_Dependency_Table *)
2983 			(((unsigned long)powerplay_table) +
2984 			le16_to_cpu(powerplay_table->usSocclkDependencyTableOffset));
2985 	ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table =
2986 			(ATOM_Vega10_GFXCLK_Dependency_Table *)
2987 			(((unsigned long)powerplay_table) +
2988 			le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset));
2989 	ATOM_Vega10_MCLK_Dependency_Table *mclk_dep_table =
2990 			(ATOM_Vega10_MCLK_Dependency_Table *)
2991 			(((unsigned long)powerplay_table) +
2992 			le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
2993 
2994 
2995 	/* The following fields are not initialized here:
2996 	 * id orderedList allStatesList
2997 	 */
2998 	power_state->classification.ui_label =
2999 			(le16_to_cpu(state_entry->usClassification) &
3000 			ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
3001 			ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
3002 	power_state->classification.flags = classification_flag;
3003 	/* NOTE: There is a classification2 flag in BIOS
3004 	 * that is not being used right now
3005 	 */
3006 	power_state->classification.temporary_state = false;
3007 	power_state->classification.to_be_deleted = false;
3008 
3009 	power_state->validation.disallowOnDC =
3010 			((le32_to_cpu(state_entry->ulCapsAndSettings) &
3011 					ATOM_Vega10_DISALLOW_ON_DC) != 0);
3012 
3013 	power_state->display.disableFrameModulation = false;
3014 	power_state->display.limitRefreshrate = false;
3015 	power_state->display.enableVariBright =
3016 			((le32_to_cpu(state_entry->ulCapsAndSettings) &
3017 					ATOM_Vega10_ENABLE_VARIBRIGHT) != 0);
3018 
3019 	power_state->validation.supportedPowerLevels = 0;
3020 	power_state->uvd_clocks.VCLK = 0;
3021 	power_state->uvd_clocks.DCLK = 0;
3022 	power_state->temperatures.min = 0;
3023 	power_state->temperatures.max = 0;
3024 
3025 	performance_level = &(vega10_power_state->performance_levels
3026 			[vega10_power_state->performance_level_count++]);
3027 
3028 	PP_ASSERT_WITH_CODE(
3029 			(vega10_power_state->performance_level_count <
3030 					NUM_GFXCLK_DPM_LEVELS),
3031 			"Performance levels exceeds SMC limit!",
3032 			return -1);
3033 
3034 	PP_ASSERT_WITH_CODE(
3035 			(vega10_power_state->performance_level_count <=
3036 					hwmgr->platform_descriptor.
3037 					hardwareActivityPerformanceLevels),
3038 			"Performance levels exceeds Driver limit!",
3039 			return -1);
3040 
3041 	/* Performance levels are arranged from low to high. */
3042 	performance_level->soc_clock = socclk_dep_table->entries
3043 			[state_entry->ucSocClockIndexLow].ulClk;
3044 	performance_level->gfx_clock = gfxclk_dep_table->entries
3045 			[state_entry->ucGfxClockIndexLow].ulClk;
3046 	performance_level->mem_clock = mclk_dep_table->entries
3047 			[state_entry->ucMemClockIndexLow].ulMemClk;
3048 
3049 	performance_level = &(vega10_power_state->performance_levels
3050 				[vega10_power_state->performance_level_count++]);
3051 	performance_level->soc_clock = socclk_dep_table->entries
3052 				[state_entry->ucSocClockIndexHigh].ulClk;
3053 	if (gfxclk_dep_table->ucRevId == 0) {
3054 		performance_level->gfx_clock = gfxclk_dep_table->entries
3055 			[state_entry->ucGfxClockIndexHigh].ulClk;
3056 	} else if (gfxclk_dep_table->ucRevId == 1) {
3057 		patom_record_V2 = (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries;
3058 		performance_level->gfx_clock = patom_record_V2[state_entry->ucGfxClockIndexHigh].ulClk;
3059 	}
3060 
3061 	performance_level->mem_clock = mclk_dep_table->entries
3062 			[state_entry->ucMemClockIndexHigh].ulMemClk;
3063 	return 0;
3064 }
3065 
3066 static int vega10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3067 		unsigned long entry_index, struct pp_power_state *state)
3068 {
3069 	int result;
3070 	struct vega10_power_state *ps;
3071 
3072 	state->hardware.magic = PhwVega10_Magic;
3073 
3074 	ps = cast_phw_vega10_power_state(&state->hardware);
3075 
3076 	result = vega10_get_powerplay_table_entry(hwmgr, entry_index, state,
3077 			vega10_get_pp_table_entry_callback_func);
3078 
3079 	/*
3080 	 * This is the earliest time we have all the dependency table
3081 	 * and the VBIOS boot state
3082 	 */
3083 	/* set DC compatible flag if this state supports DC */
3084 	if (!state->validation.disallowOnDC)
3085 		ps->dc_compatible = true;
3086 
3087 	ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3088 	ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3089 
3090 	return 0;
3091 }
3092 
3093 static int vega10_patch_boot_state(struct pp_hwmgr *hwmgr,
3094 	     struct pp_hw_power_state *hw_ps)
3095 {
3096 	return 0;
3097 }
3098 
3099 static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
3100 				struct pp_power_state  *request_ps,
3101 			const struct pp_power_state *current_ps)
3102 {
3103 	struct amdgpu_device *adev = hwmgr->adev;
3104 	struct vega10_power_state *vega10_ps =
3105 				cast_phw_vega10_power_state(&request_ps->hardware);
3106 	uint32_t sclk;
3107 	uint32_t mclk;
3108 	struct PP_Clocks minimum_clocks = {0};
3109 	bool disable_mclk_switching;
3110 	bool disable_mclk_switching_for_frame_lock;
3111 	bool disable_mclk_switching_for_vr;
3112 	bool force_mclk_high;
3113 	const struct phm_clock_and_voltage_limits *max_limits;
3114 	uint32_t i;
3115 	struct vega10_hwmgr *data = hwmgr->backend;
3116 	struct phm_ppt_v2_information *table_info =
3117 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
3118 	int32_t count;
3119 	uint32_t stable_pstate_sclk_dpm_percentage;
3120 	uint32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
3121 	uint32_t latency;
3122 
3123 	data->battery_state = (PP_StateUILabel_Battery ==
3124 			request_ps->classification.ui_label);
3125 
3126 	if (vega10_ps->performance_level_count != 2)
3127 		pr_info("VI should always have 2 performance levels");
3128 
3129 	max_limits = adev->pm.ac_power ?
3130 			&(hwmgr->dyn_state.max_clock_voltage_on_ac) :
3131 			&(hwmgr->dyn_state.max_clock_voltage_on_dc);
3132 
3133 	/* Cap clock DPM tables at DC MAX if it is in DC. */
3134 	if (!adev->pm.ac_power) {
3135 		for (i = 0; i < vega10_ps->performance_level_count; i++) {
3136 			if (vega10_ps->performance_levels[i].mem_clock >
3137 				max_limits->mclk)
3138 				vega10_ps->performance_levels[i].mem_clock =
3139 						max_limits->mclk;
3140 			if (vega10_ps->performance_levels[i].gfx_clock >
3141 				max_limits->sclk)
3142 				vega10_ps->performance_levels[i].gfx_clock =
3143 						max_limits->sclk;
3144 		}
3145 	}
3146 
3147 	/* result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
3148 	minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock;
3149 	minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
3150 
3151 	if (PP_CAP(PHM_PlatformCaps_StablePState)) {
3152 		stable_pstate_sclk_dpm_percentage =
3153 			data->registry_data.stable_pstate_sclk_dpm_percentage;
3154 		PP_ASSERT_WITH_CODE(
3155 			data->registry_data.stable_pstate_sclk_dpm_percentage >= 1 &&
3156 			data->registry_data.stable_pstate_sclk_dpm_percentage <= 100,
3157 			"percent sclk value must range from 1% to 100%, setting default value",
3158 			stable_pstate_sclk_dpm_percentage = 75);
3159 
3160 		max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
3161 		stable_pstate_sclk = (max_limits->sclk *
3162 				stable_pstate_sclk_dpm_percentage) / 100;
3163 
3164 		for (count = table_info->vdd_dep_on_sclk->count - 1;
3165 				count >= 0; count--) {
3166 			if (stable_pstate_sclk >=
3167 					table_info->vdd_dep_on_sclk->entries[count].clk) {
3168 				stable_pstate_sclk =
3169 						table_info->vdd_dep_on_sclk->entries[count].clk;
3170 				break;
3171 			}
3172 		}
3173 
3174 		if (count < 0)
3175 			stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
3176 
3177 		stable_pstate_mclk = max_limits->mclk;
3178 
3179 		minimum_clocks.engineClock = stable_pstate_sclk;
3180 		minimum_clocks.memoryClock = stable_pstate_mclk;
3181 	}
3182 
3183 	disable_mclk_switching_for_frame_lock =
3184 		PP_CAP(PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
3185 	disable_mclk_switching_for_vr =
3186 		PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR);
3187 	force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh);
3188 
3189 	if (hwmgr->display_config->num_display == 0)
3190 		disable_mclk_switching = false;
3191 	else
3192 		disable_mclk_switching = (hwmgr->display_config->num_display > 1) ||
3193 			disable_mclk_switching_for_frame_lock ||
3194 			disable_mclk_switching_for_vr ||
3195 			force_mclk_high;
3196 
3197 	sclk = vega10_ps->performance_levels[0].gfx_clock;
3198 	mclk = vega10_ps->performance_levels[0].mem_clock;
3199 
3200 	if (sclk < minimum_clocks.engineClock)
3201 		sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
3202 				max_limits->sclk : minimum_clocks.engineClock;
3203 
3204 	if (mclk < minimum_clocks.memoryClock)
3205 		mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
3206 				max_limits->mclk : minimum_clocks.memoryClock;
3207 
3208 	vega10_ps->performance_levels[0].gfx_clock = sclk;
3209 	vega10_ps->performance_levels[0].mem_clock = mclk;
3210 
3211 	if (vega10_ps->performance_levels[1].gfx_clock <
3212 			vega10_ps->performance_levels[0].gfx_clock)
3213 		vega10_ps->performance_levels[0].gfx_clock =
3214 				vega10_ps->performance_levels[1].gfx_clock;
3215 
3216 	if (disable_mclk_switching) {
3217 		/* Set Mclk the max of level 0 and level 1 */
3218 		if (mclk < vega10_ps->performance_levels[1].mem_clock)
3219 			mclk = vega10_ps->performance_levels[1].mem_clock;
3220 
3221 		/* Find the lowest MCLK frequency that is within
3222 		 * the tolerable latency defined in DAL
3223 		 */
3224 		latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
3225 		for (i = 0; i < data->mclk_latency_table.count; i++) {
3226 			if ((data->mclk_latency_table.entries[i].latency <= latency) &&
3227 				(data->mclk_latency_table.entries[i].frequency >=
3228 						vega10_ps->performance_levels[0].mem_clock) &&
3229 				(data->mclk_latency_table.entries[i].frequency <=
3230 						vega10_ps->performance_levels[1].mem_clock))
3231 				mclk = data->mclk_latency_table.entries[i].frequency;
3232 		}
3233 		vega10_ps->performance_levels[0].mem_clock = mclk;
3234 	} else {
3235 		if (vega10_ps->performance_levels[1].mem_clock <
3236 				vega10_ps->performance_levels[0].mem_clock)
3237 			vega10_ps->performance_levels[0].mem_clock =
3238 					vega10_ps->performance_levels[1].mem_clock;
3239 	}
3240 
3241 	if (PP_CAP(PHM_PlatformCaps_StablePState)) {
3242 		for (i = 0; i < vega10_ps->performance_level_count; i++) {
3243 			vega10_ps->performance_levels[i].gfx_clock = stable_pstate_sclk;
3244 			vega10_ps->performance_levels[i].mem_clock = stable_pstate_mclk;
3245 		}
3246 	}
3247 
3248 	return 0;
3249 }
3250 
3251 static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
3252 {
3253 	struct vega10_hwmgr *data = hwmgr->backend;
3254 
3255 	if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
3256 		data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
3257 
3258 	return 0;
3259 }
3260 
3261 static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
3262 		struct pp_hwmgr *hwmgr, const void *input)
3263 {
3264 	int result = 0;
3265 	struct vega10_hwmgr *data = hwmgr->backend;
3266 	struct vega10_dpm_table *dpm_table = &data->dpm_table;
3267 	struct vega10_odn_dpm_table *odn_table = &data->odn_dpm_table;
3268 	struct vega10_odn_clock_voltage_dependency_table *odn_clk_table = &odn_table->vdd_dep_on_sclk;
3269 	int count;
3270 
3271 	if (!data->need_update_dpm_table)
3272 		return 0;
3273 
3274 	if (hwmgr->od_enabled && data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
3275 		for (count = 0; count < dpm_table->gfx_table.count; count++)
3276 			dpm_table->gfx_table.dpm_levels[count].value = odn_clk_table->entries[count].clk;
3277 	}
3278 
3279 	odn_clk_table = &odn_table->vdd_dep_on_mclk;
3280 	if (hwmgr->od_enabled && data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
3281 		for (count = 0; count < dpm_table->mem_table.count; count++)
3282 			dpm_table->mem_table.dpm_levels[count].value = odn_clk_table->entries[count].clk;
3283 	}
3284 
3285 	if (data->need_update_dpm_table &
3286 			(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK + DPMTABLE_UPDATE_SOCCLK)) {
3287 		result = vega10_populate_all_graphic_levels(hwmgr);
3288 		PP_ASSERT_WITH_CODE((0 == result),
3289 				"Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
3290 				return result);
3291 	}
3292 
3293 	if (data->need_update_dpm_table &
3294 			(DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
3295 		result = vega10_populate_all_memory_levels(hwmgr);
3296 		PP_ASSERT_WITH_CODE((0 == result),
3297 				"Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
3298 				return result);
3299 	}
3300 
3301 	vega10_populate_vddc_soc_levels(hwmgr);
3302 
3303 	return result;
3304 }
3305 
3306 static int vega10_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
3307 		struct vega10_single_dpm_table *dpm_table,
3308 		uint32_t low_limit, uint32_t high_limit)
3309 {
3310 	uint32_t i;
3311 
3312 	for (i = 0; i < dpm_table->count; i++) {
3313 		if ((dpm_table->dpm_levels[i].value < low_limit) ||
3314 		    (dpm_table->dpm_levels[i].value > high_limit))
3315 			dpm_table->dpm_levels[i].enabled = false;
3316 		else
3317 			dpm_table->dpm_levels[i].enabled = true;
3318 	}
3319 	return 0;
3320 }
3321 
3322 static int vega10_trim_single_dpm_states_with_mask(struct pp_hwmgr *hwmgr,
3323 		struct vega10_single_dpm_table *dpm_table,
3324 		uint32_t low_limit, uint32_t high_limit,
3325 		uint32_t disable_dpm_mask)
3326 {
3327 	uint32_t i;
3328 
3329 	for (i = 0; i < dpm_table->count; i++) {
3330 		if ((dpm_table->dpm_levels[i].value < low_limit) ||
3331 		    (dpm_table->dpm_levels[i].value > high_limit))
3332 			dpm_table->dpm_levels[i].enabled = false;
3333 		else if (!((1 << i) & disable_dpm_mask))
3334 			dpm_table->dpm_levels[i].enabled = false;
3335 		else
3336 			dpm_table->dpm_levels[i].enabled = true;
3337 	}
3338 	return 0;
3339 }
3340 
3341 static int vega10_trim_dpm_states(struct pp_hwmgr *hwmgr,
3342 		const struct vega10_power_state *vega10_ps)
3343 {
3344 	struct vega10_hwmgr *data = hwmgr->backend;
3345 	uint32_t high_limit_count;
3346 
3347 	PP_ASSERT_WITH_CODE((vega10_ps->performance_level_count >= 1),
3348 			"power state did not have any performance level",
3349 			return -1);
3350 
3351 	high_limit_count = (vega10_ps->performance_level_count == 1) ? 0 : 1;
3352 
3353 	vega10_trim_single_dpm_states(hwmgr,
3354 			&(data->dpm_table.soc_table),
3355 			vega10_ps->performance_levels[0].soc_clock,
3356 			vega10_ps->performance_levels[high_limit_count].soc_clock);
3357 
3358 	vega10_trim_single_dpm_states_with_mask(hwmgr,
3359 			&(data->dpm_table.gfx_table),
3360 			vega10_ps->performance_levels[0].gfx_clock,
3361 			vega10_ps->performance_levels[high_limit_count].gfx_clock,
3362 			data->disable_dpm_mask);
3363 
3364 	vega10_trim_single_dpm_states(hwmgr,
3365 			&(data->dpm_table.mem_table),
3366 			vega10_ps->performance_levels[0].mem_clock,
3367 			vega10_ps->performance_levels[high_limit_count].mem_clock);
3368 
3369 	return 0;
3370 }
3371 
3372 static uint32_t vega10_find_lowest_dpm_level(
3373 		struct vega10_single_dpm_table *table)
3374 {
3375 	uint32_t i;
3376 
3377 	for (i = 0; i < table->count; i++) {
3378 		if (table->dpm_levels[i].enabled)
3379 			break;
3380 	}
3381 
3382 	return i;
3383 }
3384 
3385 static uint32_t vega10_find_highest_dpm_level(
3386 		struct vega10_single_dpm_table *table)
3387 {
3388 	uint32_t i = 0;
3389 
3390 	if (table->count <= MAX_REGULAR_DPM_NUMBER) {
3391 		for (i = table->count; i > 0; i--) {
3392 			if (table->dpm_levels[i - 1].enabled)
3393 				return i - 1;
3394 		}
3395 	} else {
3396 		pr_info("DPM Table Has Too Many Entries!");
3397 		return MAX_REGULAR_DPM_NUMBER - 1;
3398 	}
3399 
3400 	return i;
3401 }
3402 
3403 static void vega10_apply_dal_minimum_voltage_request(
3404 		struct pp_hwmgr *hwmgr)
3405 {
3406 	return;
3407 }
3408 
3409 static int vega10_get_soc_index_for_max_uclk(struct pp_hwmgr *hwmgr)
3410 {
3411 	struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table_on_mclk;
3412 	struct phm_ppt_v2_information *table_info =
3413 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
3414 
3415 	vdd_dep_table_on_mclk  = table_info->vdd_dep_on_mclk;
3416 
3417 	return vdd_dep_table_on_mclk->entries[NUM_UCLK_DPM_LEVELS - 1].vddInd + 1;
3418 }
3419 
3420 static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
3421 {
3422 	struct vega10_hwmgr *data = hwmgr->backend;
3423 	uint32_t socclk_idx;
3424 
3425 	vega10_apply_dal_minimum_voltage_request(hwmgr);
3426 
3427 	if (!data->registry_data.sclk_dpm_key_disabled) {
3428 		if (data->smc_state_table.gfx_boot_level !=
3429 				data->dpm_table.gfx_table.dpm_state.soft_min_level) {
3430 			smum_send_msg_to_smc_with_parameter(hwmgr,
3431 				PPSMC_MSG_SetSoftMinGfxclkByIndex,
3432 				data->smc_state_table.gfx_boot_level);
3433 			data->dpm_table.gfx_table.dpm_state.soft_min_level =
3434 					data->smc_state_table.gfx_boot_level;
3435 		}
3436 	}
3437 
3438 	if (!data->registry_data.mclk_dpm_key_disabled) {
3439 		if (data->smc_state_table.mem_boot_level !=
3440 				data->dpm_table.mem_table.dpm_state.soft_min_level) {
3441 			if (data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1) {
3442 				socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr);
3443 				smum_send_msg_to_smc_with_parameter(hwmgr,
3444 						PPSMC_MSG_SetSoftMinSocclkByIndex,
3445 						socclk_idx);
3446 			} else {
3447 				smum_send_msg_to_smc_with_parameter(hwmgr,
3448 						PPSMC_MSG_SetSoftMinUclkByIndex,
3449 						data->smc_state_table.mem_boot_level);
3450 			}
3451 			data->dpm_table.mem_table.dpm_state.soft_min_level =
3452 					data->smc_state_table.mem_boot_level;
3453 		}
3454 	}
3455 
3456 	return 0;
3457 }
3458 
3459 static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
3460 {
3461 	struct vega10_hwmgr *data = hwmgr->backend;
3462 
3463 	vega10_apply_dal_minimum_voltage_request(hwmgr);
3464 
3465 	if (!data->registry_data.sclk_dpm_key_disabled) {
3466 		if (data->smc_state_table.gfx_max_level !=
3467 			data->dpm_table.gfx_table.dpm_state.soft_max_level) {
3468 			smum_send_msg_to_smc_with_parameter(hwmgr,
3469 				PPSMC_MSG_SetSoftMaxGfxclkByIndex,
3470 				data->smc_state_table.gfx_max_level);
3471 			data->dpm_table.gfx_table.dpm_state.soft_max_level =
3472 					data->smc_state_table.gfx_max_level;
3473 		}
3474 	}
3475 
3476 	if (!data->registry_data.mclk_dpm_key_disabled) {
3477 		if (data->smc_state_table.mem_max_level !=
3478 			data->dpm_table.mem_table.dpm_state.soft_max_level) {
3479 			smum_send_msg_to_smc_with_parameter(hwmgr,
3480 					PPSMC_MSG_SetSoftMaxUclkByIndex,
3481 					data->smc_state_table.mem_max_level);
3482 			data->dpm_table.mem_table.dpm_state.soft_max_level =
3483 					data->smc_state_table.mem_max_level;
3484 		}
3485 	}
3486 
3487 	return 0;
3488 }
3489 
3490 static int vega10_generate_dpm_level_enable_mask(
3491 		struct pp_hwmgr *hwmgr, const void *input)
3492 {
3493 	struct vega10_hwmgr *data = hwmgr->backend;
3494 	const struct phm_set_power_state_input *states =
3495 			(const struct phm_set_power_state_input *)input;
3496 	const struct vega10_power_state *vega10_ps =
3497 			cast_const_phw_vega10_power_state(states->pnew_state);
3498 	int i;
3499 
3500 	PP_ASSERT_WITH_CODE(!vega10_trim_dpm_states(hwmgr, vega10_ps),
3501 			"Attempt to Trim DPM States Failed!",
3502 			return -1);
3503 
3504 	data->smc_state_table.gfx_boot_level =
3505 			vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
3506 	data->smc_state_table.gfx_max_level =
3507 			vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
3508 	data->smc_state_table.mem_boot_level =
3509 			vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
3510 	data->smc_state_table.mem_max_level =
3511 			vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
3512 
3513 	PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3514 			"Attempt to upload DPM Bootup Levels Failed!",
3515 			return -1);
3516 	PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3517 			"Attempt to upload DPM Max Levels Failed!",
3518 			return -1);
3519 	for(i = data->smc_state_table.gfx_boot_level; i < data->smc_state_table.gfx_max_level; i++)
3520 		data->dpm_table.gfx_table.dpm_levels[i].enabled = true;
3521 
3522 
3523 	for(i = data->smc_state_table.mem_boot_level; i < data->smc_state_table.mem_max_level; i++)
3524 		data->dpm_table.mem_table.dpm_levels[i].enabled = true;
3525 
3526 	return 0;
3527 }
3528 
3529 int vega10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
3530 {
3531 	struct vega10_hwmgr *data = hwmgr->backend;
3532 
3533 	if (data->smu_features[GNLD_DPM_VCE].supported) {
3534 		PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
3535 				enable,
3536 				data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap),
3537 				"Attempt to Enable/Disable DPM VCE Failed!",
3538 				return -1);
3539 		data->smu_features[GNLD_DPM_VCE].enabled = enable;
3540 	}
3541 
3542 	return 0;
3543 }
3544 
3545 static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
3546 {
3547 	struct vega10_hwmgr *data = hwmgr->backend;
3548 	uint32_t low_sclk_interrupt_threshold = 0;
3549 
3550 	if (PP_CAP(PHM_PlatformCaps_SclkThrottleLowNotification) &&
3551 		(data->low_sclk_interrupt_threshold != 0)) {
3552 		low_sclk_interrupt_threshold =
3553 				data->low_sclk_interrupt_threshold;
3554 
3555 		data->smc_state_table.pp_table.LowGfxclkInterruptThreshold =
3556 				cpu_to_le32(low_sclk_interrupt_threshold);
3557 
3558 		/* This message will also enable SmcToHost Interrupt */
3559 		smum_send_msg_to_smc_with_parameter(hwmgr,
3560 				PPSMC_MSG_SetLowGfxclkInterruptThreshold,
3561 				(uint32_t)low_sclk_interrupt_threshold);
3562 	}
3563 
3564 	return 0;
3565 }
3566 
3567 static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
3568 		const void *input)
3569 {
3570 	int tmp_result, result = 0;
3571 	struct vega10_hwmgr *data = hwmgr->backend;
3572 	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
3573 
3574 	tmp_result = vega10_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
3575 	PP_ASSERT_WITH_CODE(!tmp_result,
3576 			"Failed to find DPM states clocks in DPM table!",
3577 			result = tmp_result);
3578 
3579 	tmp_result = vega10_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
3580 	PP_ASSERT_WITH_CODE(!tmp_result,
3581 			"Failed to populate and upload SCLK MCLK DPM levels!",
3582 			result = tmp_result);
3583 
3584 	tmp_result = vega10_generate_dpm_level_enable_mask(hwmgr, input);
3585 	PP_ASSERT_WITH_CODE(!tmp_result,
3586 			"Failed to generate DPM level enabled mask!",
3587 			result = tmp_result);
3588 
3589 	tmp_result = vega10_update_sclk_threshold(hwmgr);
3590 	PP_ASSERT_WITH_CODE(!tmp_result,
3591 			"Failed to update SCLK threshold!",
3592 			result = tmp_result);
3593 
3594 	result = smum_smc_table_manager(hwmgr, (uint8_t *)pp_table, PPTABLE, false);
3595 	PP_ASSERT_WITH_CODE(!result,
3596 			"Failed to upload PPtable!", return result);
3597 
3598 	/*
3599 	 * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag.
3600 	 * That effectively disables AVFS feature.
3601 	 */
3602 	if(hwmgr->hardcode_pp_table != NULL)
3603 		data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
3604 
3605 	vega10_update_avfs(hwmgr);
3606 
3607 	data->need_update_dpm_table &= DPMTABLE_OD_UPDATE_VDDC;
3608 
3609 	return 0;
3610 }
3611 
3612 static uint32_t vega10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
3613 {
3614 	struct pp_power_state *ps;
3615 	struct vega10_power_state *vega10_ps;
3616 
3617 	if (hwmgr == NULL)
3618 		return -EINVAL;
3619 
3620 	ps = hwmgr->request_ps;
3621 
3622 	if (ps == NULL)
3623 		return -EINVAL;
3624 
3625 	vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
3626 
3627 	if (low)
3628 		return vega10_ps->performance_levels[0].gfx_clock;
3629 	else
3630 		return vega10_ps->performance_levels
3631 				[vega10_ps->performance_level_count - 1].gfx_clock;
3632 }
3633 
3634 static uint32_t vega10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
3635 {
3636 	struct pp_power_state *ps;
3637 	struct vega10_power_state *vega10_ps;
3638 
3639 	if (hwmgr == NULL)
3640 		return -EINVAL;
3641 
3642 	ps = hwmgr->request_ps;
3643 
3644 	if (ps == NULL)
3645 		return -EINVAL;
3646 
3647 	vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
3648 
3649 	if (low)
3650 		return vega10_ps->performance_levels[0].mem_clock;
3651 	else
3652 		return vega10_ps->performance_levels
3653 				[vega10_ps->performance_level_count-1].mem_clock;
3654 }
3655 
3656 static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
3657 		uint32_t *query)
3658 {
3659 	uint32_t value;
3660 
3661 	if (!query)
3662 		return -EINVAL;
3663 
3664 	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr);
3665 	value = smum_get_argument(hwmgr);
3666 
3667 	/* SMC returning actual watts, keep consistent with legacy asics, low 8 bit as 8 fractional bits */
3668 	*query = value << 8;
3669 
3670 	return 0;
3671 }
3672 
3673 static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
3674 			      void *value, int *size)
3675 {
3676 	struct amdgpu_device *adev = hwmgr->adev;
3677 	uint32_t sclk_mhz, mclk_idx, activity_percent = 0;
3678 	struct vega10_hwmgr *data = hwmgr->backend;
3679 	struct vega10_dpm_table *dpm_table = &data->dpm_table;
3680 	int ret = 0;
3681 	uint32_t val_vid;
3682 
3683 	switch (idx) {
3684 	case AMDGPU_PP_SENSOR_GFX_SCLK:
3685 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGfxclkActualFrequency);
3686 		sclk_mhz = smum_get_argument(hwmgr);
3687 		*((uint32_t *)value) = sclk_mhz * 100;
3688 		break;
3689 	case AMDGPU_PP_SENSOR_GFX_MCLK:
3690 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
3691 		mclk_idx = smum_get_argument(hwmgr);
3692 		if (mclk_idx < dpm_table->mem_table.count) {
3693 			*((uint32_t *)value) = dpm_table->mem_table.dpm_levels[mclk_idx].value;
3694 			*size = 4;
3695 		} else {
3696 			ret = -EINVAL;
3697 		}
3698 		break;
3699 	case AMDGPU_PP_SENSOR_GPU_LOAD:
3700 		smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0);
3701 		activity_percent = smum_get_argument(hwmgr);
3702 		*((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
3703 		*size = 4;
3704 		break;
3705 	case AMDGPU_PP_SENSOR_GPU_TEMP:
3706 		*((uint32_t *)value) = vega10_thermal_get_temperature(hwmgr);
3707 		*size = 4;
3708 		break;
3709 	case AMDGPU_PP_SENSOR_UVD_POWER:
3710 		*((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
3711 		*size = 4;
3712 		break;
3713 	case AMDGPU_PP_SENSOR_VCE_POWER:
3714 		*((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
3715 		*size = 4;
3716 		break;
3717 	case AMDGPU_PP_SENSOR_GPU_POWER:
3718 		ret = vega10_get_gpu_power(hwmgr, (uint32_t *)value);
3719 		break;
3720 	case AMDGPU_PP_SENSOR_VDDGFX:
3721 		val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_PLANE0_CURRENTVID) &
3722 			SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID_MASK) >>
3723 			SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID__SHIFT;
3724 		*((uint32_t *)value) = (uint32_t)convert_to_vddc((uint8_t)val_vid);
3725 		return 0;
3726 	default:
3727 		ret = -EINVAL;
3728 		break;
3729 	}
3730 
3731 	return ret;
3732 }
3733 
3734 static void vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
3735 		bool has_disp)
3736 {
3737 	smum_send_msg_to_smc_with_parameter(hwmgr,
3738 			PPSMC_MSG_SetUclkFastSwitch,
3739 			has_disp ? 1 : 0);
3740 }
3741 
3742 int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
3743 		struct pp_display_clock_request *clock_req);
3744 int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
3745 		struct pp_display_clock_request *clock_req)
3746 {
3747 	int result = 0;
3748 	enum amd_pp_clock_type clk_type = clock_req->clock_type;
3749 	uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
3750 	DSPCLK_e clk_select = 0;
3751 	uint32_t clk_request = 0;
3752 
3753 	switch (clk_type) {
3754 	case amd_pp_dcef_clock:
3755 		clk_select = DSPCLK_DCEFCLK;
3756 		break;
3757 	case amd_pp_disp_clock:
3758 		clk_select = DSPCLK_DISPCLK;
3759 		break;
3760 	case amd_pp_pixel_clock:
3761 		clk_select = DSPCLK_PIXCLK;
3762 		break;
3763 	case amd_pp_phy_clock:
3764 		clk_select = DSPCLK_PHYCLK;
3765 		break;
3766 	default:
3767 		pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
3768 		result = -1;
3769 		break;
3770 	}
3771 
3772 	if (!result) {
3773 		clk_request = (clk_freq << 16) | clk_select;
3774 		smum_send_msg_to_smc_with_parameter(hwmgr,
3775 				PPSMC_MSG_RequestDisplayClockByFreq,
3776 				clk_request);
3777 	}
3778 
3779 	return result;
3780 }
3781 
3782 static uint8_t vega10_get_uclk_index(struct pp_hwmgr *hwmgr,
3783 			struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table,
3784 						uint32_t frequency)
3785 {
3786 	uint8_t count;
3787 	uint8_t i;
3788 
3789 	if (mclk_table == NULL || mclk_table->count == 0)
3790 		return 0;
3791 
3792 	count = (uint8_t)(mclk_table->count);
3793 
3794 	for(i = 0; i < count; i++) {
3795 		if(mclk_table->entries[i].clk >= frequency)
3796 			return i;
3797 	}
3798 
3799 	return i-1;
3800 }
3801 
3802 static int vega10_notify_smc_display_config_after_ps_adjustment(
3803 		struct pp_hwmgr *hwmgr)
3804 {
3805 	struct vega10_hwmgr *data = hwmgr->backend;
3806 	struct vega10_single_dpm_table *dpm_table =
3807 			&data->dpm_table.dcef_table;
3808 	struct phm_ppt_v2_information *table_info =
3809 			(struct phm_ppt_v2_information *)hwmgr->pptable;
3810 	struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = table_info->vdd_dep_on_mclk;
3811 	uint32_t idx;
3812 	struct PP_Clocks min_clocks = {0};
3813 	uint32_t i;
3814 	struct pp_display_clock_request clock_req;
3815 
3816 	if ((hwmgr->display_config->num_display > 1) &&
3817 	     !hwmgr->display_config->multi_monitor_in_sync &&
3818 	     !hwmgr->display_config->nb_pstate_switch_disable)
3819 		vega10_notify_smc_display_change(hwmgr, false);
3820 	else
3821 		vega10_notify_smc_display_change(hwmgr, true);
3822 
3823 	min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
3824 	min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk;
3825 	min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
3826 
3827 	for (i = 0; i < dpm_table->count; i++) {
3828 		if (dpm_table->dpm_levels[i].value == min_clocks.dcefClock)
3829 			break;
3830 	}
3831 
3832 	if (i < dpm_table->count) {
3833 		clock_req.clock_type = amd_pp_dcef_clock;
3834 		clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value * 10;
3835 		if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) {
3836 			smum_send_msg_to_smc_with_parameter(
3837 					hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
3838 					min_clocks.dcefClockInSR / 100);
3839 		} else {
3840 			pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
3841 		}
3842 	} else {
3843 		pr_debug("Cannot find requested DCEFCLK!");
3844 	}
3845 
3846 	if (min_clocks.memoryClock != 0) {
3847 		idx = vega10_get_uclk_index(hwmgr, mclk_table, min_clocks.memoryClock);
3848 		smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx);
3849 		data->dpm_table.mem_table.dpm_state.soft_min_level= idx;
3850 	}
3851 
3852 	return 0;
3853 }
3854 
3855 static int vega10_force_dpm_highest(struct pp_hwmgr *hwmgr)
3856 {
3857 	struct vega10_hwmgr *data = hwmgr->backend;
3858 
3859 	data->smc_state_table.gfx_boot_level =
3860 	data->smc_state_table.gfx_max_level =
3861 			vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
3862 	data->smc_state_table.mem_boot_level =
3863 	data->smc_state_table.mem_max_level =
3864 			vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
3865 
3866 	PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3867 			"Failed to upload boot level to highest!",
3868 			return -1);
3869 
3870 	PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3871 			"Failed to upload dpm max level to highest!",
3872 			return -1);
3873 
3874 	return 0;
3875 }
3876 
3877 static int vega10_force_dpm_lowest(struct pp_hwmgr *hwmgr)
3878 {
3879 	struct vega10_hwmgr *data = hwmgr->backend;
3880 
3881 	data->smc_state_table.gfx_boot_level =
3882 	data->smc_state_table.gfx_max_level =
3883 			vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
3884 	data->smc_state_table.mem_boot_level =
3885 	data->smc_state_table.mem_max_level =
3886 			vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
3887 
3888 	PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3889 			"Failed to upload boot level to highest!",
3890 			return -1);
3891 
3892 	PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3893 			"Failed to upload dpm max level to highest!",
3894 			return -1);
3895 
3896 	return 0;
3897 
3898 }
3899 
3900 static int vega10_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
3901 {
3902 	struct vega10_hwmgr *data = hwmgr->backend;
3903 
3904 	data->smc_state_table.gfx_boot_level =
3905 			vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
3906 	data->smc_state_table.gfx_max_level =
3907 			vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
3908 	data->smc_state_table.mem_boot_level =
3909 			vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
3910 	data->smc_state_table.mem_max_level =
3911 			vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
3912 
3913 	PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3914 			"Failed to upload DPM Bootup Levels!",
3915 			return -1);
3916 
3917 	PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3918 			"Failed to upload DPM Max Levels!",
3919 			return -1);
3920 	return 0;
3921 }
3922 
3923 static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
3924 				uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
3925 {
3926 	struct phm_ppt_v2_information *table_info =
3927 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
3928 
3929 	if (table_info->vdd_dep_on_sclk->count > VEGA10_UMD_PSTATE_GFXCLK_LEVEL &&
3930 		table_info->vdd_dep_on_socclk->count > VEGA10_UMD_PSTATE_SOCCLK_LEVEL &&
3931 		table_info->vdd_dep_on_mclk->count > VEGA10_UMD_PSTATE_MCLK_LEVEL) {
3932 		*sclk_mask = VEGA10_UMD_PSTATE_GFXCLK_LEVEL;
3933 		*soc_mask = VEGA10_UMD_PSTATE_SOCCLK_LEVEL;
3934 		*mclk_mask = VEGA10_UMD_PSTATE_MCLK_LEVEL;
3935 		hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[VEGA10_UMD_PSTATE_GFXCLK_LEVEL].clk;
3936 		hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[VEGA10_UMD_PSTATE_MCLK_LEVEL].clk;
3937 	}
3938 
3939 	if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
3940 		*sclk_mask = 0;
3941 	} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
3942 		*mclk_mask = 0;
3943 	} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
3944 		*sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
3945 		*soc_mask = table_info->vdd_dep_on_socclk->count - 1;
3946 		*mclk_mask = table_info->vdd_dep_on_mclk->count - 1;
3947 	}
3948 	return 0;
3949 }
3950 
3951 static void vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
3952 {
3953 	switch (mode) {
3954 	case AMD_FAN_CTRL_NONE:
3955 		vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
3956 		break;
3957 	case AMD_FAN_CTRL_MANUAL:
3958 		if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
3959 			vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
3960 		break;
3961 	case AMD_FAN_CTRL_AUTO:
3962 		if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
3963 			vega10_fan_ctrl_start_smc_fan_control(hwmgr);
3964 		break;
3965 	default:
3966 		break;
3967 	}
3968 }
3969 
3970 static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
3971 		enum pp_clock_type type, uint32_t mask)
3972 {
3973 	struct vega10_hwmgr *data = hwmgr->backend;
3974 
3975 	switch (type) {
3976 	case PP_SCLK:
3977 		data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0;
3978 		data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0;
3979 
3980 		PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3981 			"Failed to upload boot level to lowest!",
3982 			return -EINVAL);
3983 
3984 		PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3985 			"Failed to upload dpm max level to highest!",
3986 			return -EINVAL);
3987 		break;
3988 
3989 	case PP_MCLK:
3990 		data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0;
3991 		data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0;
3992 
3993 		PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3994 			"Failed to upload boot level to lowest!",
3995 			return -EINVAL);
3996 
3997 		PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3998 			"Failed to upload dpm max level to highest!",
3999 			return -EINVAL);
4000 
4001 		break;
4002 
4003 	case PP_PCIE:
4004 	default:
4005 		break;
4006 	}
4007 
4008 	return 0;
4009 }
4010 
4011 static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
4012 				enum amd_dpm_forced_level level)
4013 {
4014 	int ret = 0;
4015 	uint32_t sclk_mask = 0;
4016 	uint32_t mclk_mask = 0;
4017 	uint32_t soc_mask = 0;
4018 
4019 	if (hwmgr->pstate_sclk == 0)
4020 		vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
4021 
4022 	switch (level) {
4023 	case AMD_DPM_FORCED_LEVEL_HIGH:
4024 		ret = vega10_force_dpm_highest(hwmgr);
4025 		break;
4026 	case AMD_DPM_FORCED_LEVEL_LOW:
4027 		ret = vega10_force_dpm_lowest(hwmgr);
4028 		break;
4029 	case AMD_DPM_FORCED_LEVEL_AUTO:
4030 		ret = vega10_unforce_dpm_levels(hwmgr);
4031 		break;
4032 	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
4033 	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
4034 	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
4035 	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
4036 		ret = vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
4037 		if (ret)
4038 			return ret;
4039 		vega10_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
4040 		vega10_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
4041 		break;
4042 	case AMD_DPM_FORCED_LEVEL_MANUAL:
4043 	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
4044 	default:
4045 		break;
4046 	}
4047 
4048 	if (!ret) {
4049 		if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
4050 			vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
4051 		else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
4052 			vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO);
4053 	}
4054 
4055 	return ret;
4056 }
4057 
4058 static uint32_t vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
4059 {
4060 	struct vega10_hwmgr *data = hwmgr->backend;
4061 
4062 	if (data->smu_features[GNLD_FAN_CONTROL].enabled == false)
4063 		return AMD_FAN_CTRL_MANUAL;
4064 	else
4065 		return AMD_FAN_CTRL_AUTO;
4066 }
4067 
4068 static int vega10_get_dal_power_level(struct pp_hwmgr *hwmgr,
4069 		struct amd_pp_simple_clock_info *info)
4070 {
4071 	struct phm_ppt_v2_information *table_info =
4072 			(struct phm_ppt_v2_information *)hwmgr->pptable;
4073 	struct phm_clock_and_voltage_limits *max_limits =
4074 			&table_info->max_clock_voltage_on_ac;
4075 
4076 	info->engine_max_clock = max_limits->sclk;
4077 	info->memory_max_clock = max_limits->mclk;
4078 
4079 	return 0;
4080 }
4081 
4082 static void vega10_get_sclks(struct pp_hwmgr *hwmgr,
4083 		struct pp_clock_levels_with_latency *clocks)
4084 {
4085 	struct phm_ppt_v2_information *table_info =
4086 			(struct phm_ppt_v2_information *)hwmgr->pptable;
4087 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4088 			table_info->vdd_dep_on_sclk;
4089 	uint32_t i;
4090 
4091 	clocks->num_levels = 0;
4092 	for (i = 0; i < dep_table->count; i++) {
4093 		if (dep_table->entries[i].clk) {
4094 			clocks->data[clocks->num_levels].clocks_in_khz =
4095 					dep_table->entries[i].clk * 10;
4096 			clocks->num_levels++;
4097 		}
4098 	}
4099 
4100 }
4101 
4102 static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
4103 		struct pp_clock_levels_with_latency *clocks)
4104 {
4105 	struct phm_ppt_v2_information *table_info =
4106 			(struct phm_ppt_v2_information *)hwmgr->pptable;
4107 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4108 			table_info->vdd_dep_on_mclk;
4109 	struct vega10_hwmgr *data = hwmgr->backend;
4110 	uint32_t j = 0;
4111 	uint32_t i;
4112 
4113 	for (i = 0; i < dep_table->count; i++) {
4114 		if (dep_table->entries[i].clk) {
4115 
4116 			clocks->data[j].clocks_in_khz =
4117 						dep_table->entries[i].clk * 10;
4118 			data->mclk_latency_table.entries[j].frequency =
4119 							dep_table->entries[i].clk;
4120 			clocks->data[j].latency_in_us =
4121 				data->mclk_latency_table.entries[j].latency = 25;
4122 			j++;
4123 		}
4124 	}
4125 	clocks->num_levels = data->mclk_latency_table.count = j;
4126 }
4127 
4128 static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr,
4129 		struct pp_clock_levels_with_latency *clocks)
4130 {
4131 	struct phm_ppt_v2_information *table_info =
4132 			(struct phm_ppt_v2_information *)hwmgr->pptable;
4133 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4134 			table_info->vdd_dep_on_dcefclk;
4135 	uint32_t i;
4136 
4137 	for (i = 0; i < dep_table->count; i++) {
4138 		clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
4139 		clocks->data[i].latency_in_us = 0;
4140 		clocks->num_levels++;
4141 	}
4142 }
4143 
4144 static void vega10_get_socclocks(struct pp_hwmgr *hwmgr,
4145 		struct pp_clock_levels_with_latency *clocks)
4146 {
4147 	struct phm_ppt_v2_information *table_info =
4148 			(struct phm_ppt_v2_information *)hwmgr->pptable;
4149 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4150 			table_info->vdd_dep_on_socclk;
4151 	uint32_t i;
4152 
4153 	for (i = 0; i < dep_table->count; i++) {
4154 		clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
4155 		clocks->data[i].latency_in_us = 0;
4156 		clocks->num_levels++;
4157 	}
4158 }
4159 
4160 static int vega10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
4161 		enum amd_pp_clock_type type,
4162 		struct pp_clock_levels_with_latency *clocks)
4163 {
4164 	switch (type) {
4165 	case amd_pp_sys_clock:
4166 		vega10_get_sclks(hwmgr, clocks);
4167 		break;
4168 	case amd_pp_mem_clock:
4169 		vega10_get_memclocks(hwmgr, clocks);
4170 		break;
4171 	case amd_pp_dcef_clock:
4172 		vega10_get_dcefclocks(hwmgr, clocks);
4173 		break;
4174 	case amd_pp_soc_clock:
4175 		vega10_get_socclocks(hwmgr, clocks);
4176 		break;
4177 	default:
4178 		return -1;
4179 	}
4180 
4181 	return 0;
4182 }
4183 
4184 static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
4185 		enum amd_pp_clock_type type,
4186 		struct pp_clock_levels_with_voltage *clocks)
4187 {
4188 	struct phm_ppt_v2_information *table_info =
4189 			(struct phm_ppt_v2_information *)hwmgr->pptable;
4190 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
4191 	uint32_t i;
4192 
4193 	switch (type) {
4194 	case amd_pp_mem_clock:
4195 		dep_table = table_info->vdd_dep_on_mclk;
4196 		break;
4197 	case amd_pp_dcef_clock:
4198 		dep_table = table_info->vdd_dep_on_dcefclk;
4199 		break;
4200 	case amd_pp_disp_clock:
4201 		dep_table = table_info->vdd_dep_on_dispclk;
4202 		break;
4203 	case amd_pp_pixel_clock:
4204 		dep_table = table_info->vdd_dep_on_pixclk;
4205 		break;
4206 	case amd_pp_phy_clock:
4207 		dep_table = table_info->vdd_dep_on_phyclk;
4208 		break;
4209 	default:
4210 		return -1;
4211 	}
4212 
4213 	for (i = 0; i < dep_table->count; i++) {
4214 		clocks->data[i].clocks_in_khz = dep_table->entries[i].clk  * 10;
4215 		clocks->data[i].voltage_in_mv = (uint32_t)(table_info->vddc_lookup_table->
4216 				entries[dep_table->entries[i].vddInd].us_vdd);
4217 		clocks->num_levels++;
4218 	}
4219 
4220 	if (i < dep_table->count)
4221 		return -1;
4222 
4223 	return 0;
4224 }
4225 
4226 static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
4227 							void *clock_range)
4228 {
4229 	struct vega10_hwmgr *data = hwmgr->backend;
4230 	struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_range;
4231 	Watermarks_t *table = &(data->smc_state_table.water_marks_table);
4232 	int result = 0;
4233 
4234 	if (!data->registry_data.disable_water_mark) {
4235 		smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges);
4236 		data->water_marks_bitmap = WaterMarksExist;
4237 	}
4238 
4239 	return result;
4240 }
4241 
4242 static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
4243 		enum pp_clock_type type, char *buf)
4244 {
4245 	struct vega10_hwmgr *data = hwmgr->backend;
4246 	struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
4247 	struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
4248 	struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
4249 	struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep = NULL;
4250 
4251 	int i, now, size = 0;
4252 
4253 	switch (type) {
4254 	case PP_SCLK:
4255 		if (data->registry_data.sclk_dpm_key_disabled)
4256 			break;
4257 
4258 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex);
4259 		now = smum_get_argument(hwmgr);
4260 
4261 		for (i = 0; i < sclk_table->count; i++)
4262 			size += sprintf(buf + size, "%d: %uMhz %s\n",
4263 					i, sclk_table->dpm_levels[i].value / 100,
4264 					(i == now) ? "*" : "");
4265 		break;
4266 	case PP_MCLK:
4267 		if (data->registry_data.mclk_dpm_key_disabled)
4268 			break;
4269 
4270 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
4271 		now = smum_get_argument(hwmgr);
4272 
4273 		for (i = 0; i < mclk_table->count; i++)
4274 			size += sprintf(buf + size, "%d: %uMhz %s\n",
4275 					i, mclk_table->dpm_levels[i].value / 100,
4276 					(i == now) ? "*" : "");
4277 		break;
4278 	case PP_PCIE:
4279 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex);
4280 		now = smum_get_argument(hwmgr);
4281 
4282 		for (i = 0; i < pcie_table->count; i++)
4283 			size += sprintf(buf + size, "%d: %s %s\n", i,
4284 					(pcie_table->pcie_gen[i] == 0) ? "2.5GT/s, x1" :
4285 					(pcie_table->pcie_gen[i] == 1) ? "5.0GT/s, x16" :
4286 					(pcie_table->pcie_gen[i] == 2) ? "8.0GT/s, x16" : "",
4287 					(i == now) ? "*" : "");
4288 		break;
4289 	case OD_SCLK:
4290 		if (hwmgr->od_enabled) {
4291 			size = sprintf(buf, "%s:\n", "OD_SCLK");
4292 			podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
4293 			for (i = 0; i < podn_vdd_dep->count; i++)
4294 				size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
4295 					i, podn_vdd_dep->entries[i].clk / 100,
4296 						podn_vdd_dep->entries[i].vddc);
4297 		}
4298 		break;
4299 	case OD_MCLK:
4300 		if (hwmgr->od_enabled) {
4301 			size = sprintf(buf, "%s:\n", "OD_MCLK");
4302 			podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
4303 			for (i = 0; i < podn_vdd_dep->count; i++)
4304 				size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
4305 					i, podn_vdd_dep->entries[i].clk/100,
4306 						podn_vdd_dep->entries[i].vddc);
4307 		}
4308 		break;
4309 	case OD_RANGE:
4310 		if (hwmgr->od_enabled) {
4311 			size = sprintf(buf, "%s:\n", "OD_RANGE");
4312 			size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
4313 				data->golden_dpm_table.gfx_table.dpm_levels[0].value/100,
4314 				hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
4315 			size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
4316 				data->golden_dpm_table.mem_table.dpm_levels[0].value/100,
4317 				hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
4318 			size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
4319 				data->odn_dpm_table.min_vddc,
4320 				data->odn_dpm_table.max_vddc);
4321 		}
4322 		break;
4323 	default:
4324 		break;
4325 	}
4326 	return size;
4327 }
4328 
4329 static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
4330 {
4331 	struct vega10_hwmgr *data = hwmgr->backend;
4332 	Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
4333 	int result = 0;
4334 
4335 	if ((data->water_marks_bitmap & WaterMarksExist) &&
4336 			!(data->water_marks_bitmap & WaterMarksLoaded)) {
4337 		result = smum_smc_table_manager(hwmgr, (uint8_t *)wm_table, WMTABLE, false);
4338 		PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return EINVAL);
4339 		data->water_marks_bitmap |= WaterMarksLoaded;
4340 	}
4341 
4342 	if (data->water_marks_bitmap & WaterMarksLoaded) {
4343 		smum_send_msg_to_smc_with_parameter(hwmgr,
4344 			PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display);
4345 	}
4346 
4347 	return result;
4348 }
4349 
4350 int vega10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable);
4351 int vega10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
4352 {
4353 	struct vega10_hwmgr *data = hwmgr->backend;
4354 
4355 	if (data->smu_features[GNLD_DPM_UVD].supported) {
4356 		PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
4357 				enable,
4358 				data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap),
4359 				"Attempt to Enable/Disable DPM UVD Failed!",
4360 				return -1);
4361 		data->smu_features[GNLD_DPM_UVD].enabled = enable;
4362 	}
4363 	return 0;
4364 }
4365 
4366 static void vega10_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
4367 {
4368 	struct vega10_hwmgr *data = hwmgr->backend;
4369 
4370 	data->vce_power_gated = bgate;
4371 	vega10_enable_disable_vce_dpm(hwmgr, !bgate);
4372 }
4373 
4374 static void vega10_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
4375 {
4376 	struct vega10_hwmgr *data = hwmgr->backend;
4377 
4378 	data->uvd_power_gated = bgate;
4379 	vega10_enable_disable_uvd_dpm(hwmgr, !bgate);
4380 }
4381 
4382 static inline bool vega10_are_power_levels_equal(
4383 				const struct vega10_performance_level *pl1,
4384 				const struct vega10_performance_level *pl2)
4385 {
4386 	return ((pl1->soc_clock == pl2->soc_clock) &&
4387 			(pl1->gfx_clock == pl2->gfx_clock) &&
4388 			(pl1->mem_clock == pl2->mem_clock));
4389 }
4390 
4391 static int vega10_check_states_equal(struct pp_hwmgr *hwmgr,
4392 				const struct pp_hw_power_state *pstate1,
4393 			const struct pp_hw_power_state *pstate2, bool *equal)
4394 {
4395 	const struct vega10_power_state *psa;
4396 	const struct vega10_power_state *psb;
4397 	int i;
4398 
4399 	if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
4400 		return -EINVAL;
4401 
4402 	psa = cast_const_phw_vega10_power_state(pstate1);
4403 	psb = cast_const_phw_vega10_power_state(pstate2);
4404 	/* If the two states don't even have the same number of performance levels they cannot be the same state. */
4405 	if (psa->performance_level_count != psb->performance_level_count) {
4406 		*equal = false;
4407 		return 0;
4408 	}
4409 
4410 	for (i = 0; i < psa->performance_level_count; i++) {
4411 		if (!vega10_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
4412 			/* If we have found even one performance level pair that is different the states are different. */
4413 			*equal = false;
4414 			return 0;
4415 		}
4416 	}
4417 
4418 	/* If all performance levels are the same try to use the UVD clocks to break the tie.*/
4419 	*equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
4420 	*equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
4421 	*equal &= (psa->sclk_threshold == psb->sclk_threshold);
4422 
4423 	return 0;
4424 }
4425 
4426 static bool
4427 vega10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
4428 {
4429 	struct vega10_hwmgr *data = hwmgr->backend;
4430 	bool is_update_required = false;
4431 
4432 	if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
4433 		is_update_required = true;
4434 
4435 	if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep)) {
4436 		if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr)
4437 			is_update_required = true;
4438 	}
4439 
4440 	return is_update_required;
4441 }
4442 
4443 static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
4444 {
4445 	int tmp_result, result = 0;
4446 
4447 	if (PP_CAP(PHM_PlatformCaps_ThermalController))
4448 		vega10_disable_thermal_protection(hwmgr);
4449 
4450 	tmp_result = vega10_disable_power_containment(hwmgr);
4451 	PP_ASSERT_WITH_CODE((tmp_result == 0),
4452 			"Failed to disable power containment!", result = tmp_result);
4453 
4454 	tmp_result = vega10_disable_didt_config(hwmgr);
4455 	PP_ASSERT_WITH_CODE((tmp_result == 0),
4456 			"Failed to disable didt config!", result = tmp_result);
4457 
4458 	tmp_result = vega10_avfs_enable(hwmgr, false);
4459 	PP_ASSERT_WITH_CODE((tmp_result == 0),
4460 			"Failed to disable AVFS!", result = tmp_result);
4461 
4462 	tmp_result = vega10_stop_dpm(hwmgr, SMC_DPM_FEATURES);
4463 	PP_ASSERT_WITH_CODE((tmp_result == 0),
4464 			"Failed to stop DPM!", result = tmp_result);
4465 
4466 	tmp_result = vega10_disable_deep_sleep_master_switch(hwmgr);
4467 	PP_ASSERT_WITH_CODE((tmp_result == 0),
4468 			"Failed to disable deep sleep!", result = tmp_result);
4469 
4470 	tmp_result = vega10_disable_ulv(hwmgr);
4471 	PP_ASSERT_WITH_CODE((tmp_result == 0),
4472 			"Failed to disable ulv!", result = tmp_result);
4473 
4474 	tmp_result =  vega10_acg_disable(hwmgr);
4475 	PP_ASSERT_WITH_CODE((tmp_result == 0),
4476 			"Failed to disable acg!", result = tmp_result);
4477 
4478 	vega10_enable_disable_PCC_limit_feature(hwmgr, false);
4479 	return result;
4480 }
4481 
4482 static int vega10_power_off_asic(struct pp_hwmgr *hwmgr)
4483 {
4484 	struct vega10_hwmgr *data = hwmgr->backend;
4485 	int result;
4486 
4487 	result = vega10_disable_dpm_tasks(hwmgr);
4488 	PP_ASSERT_WITH_CODE((0 == result),
4489 			"[disable_dpm_tasks] Failed to disable DPM!",
4490 			);
4491 	data->water_marks_bitmap &= ~(WaterMarksLoaded);
4492 
4493 	return result;
4494 }
4495 
4496 static int vega10_get_sclk_od(struct pp_hwmgr *hwmgr)
4497 {
4498 	struct vega10_hwmgr *data = hwmgr->backend;
4499 	struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
4500 	struct vega10_single_dpm_table *golden_sclk_table =
4501 			&(data->golden_dpm_table.gfx_table);
4502 	int value;
4503 
4504 	value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
4505 			golden_sclk_table->dpm_levels
4506 			[golden_sclk_table->count - 1].value) *
4507 			100 /
4508 			golden_sclk_table->dpm_levels
4509 			[golden_sclk_table->count - 1].value;
4510 
4511 	return value;
4512 }
4513 
4514 static int vega10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4515 {
4516 	struct vega10_hwmgr *data = hwmgr->backend;
4517 	struct vega10_single_dpm_table *golden_sclk_table =
4518 			&(data->golden_dpm_table.gfx_table);
4519 	struct pp_power_state *ps;
4520 	struct vega10_power_state *vega10_ps;
4521 
4522 	ps = hwmgr->request_ps;
4523 
4524 	if (ps == NULL)
4525 		return -EINVAL;
4526 
4527 	vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
4528 
4529 	vega10_ps->performance_levels
4530 	[vega10_ps->performance_level_count - 1].gfx_clock =
4531 			golden_sclk_table->dpm_levels
4532 			[golden_sclk_table->count - 1].value *
4533 			value / 100 +
4534 			golden_sclk_table->dpm_levels
4535 			[golden_sclk_table->count - 1].value;
4536 
4537 	if (vega10_ps->performance_levels
4538 			[vega10_ps->performance_level_count - 1].gfx_clock >
4539 			hwmgr->platform_descriptor.overdriveLimit.engineClock)
4540 		vega10_ps->performance_levels
4541 		[vega10_ps->performance_level_count - 1].gfx_clock =
4542 				hwmgr->platform_descriptor.overdriveLimit.engineClock;
4543 
4544 	return 0;
4545 }
4546 
4547 static int vega10_get_mclk_od(struct pp_hwmgr *hwmgr)
4548 {
4549 	struct vega10_hwmgr *data = hwmgr->backend;
4550 	struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
4551 	struct vega10_single_dpm_table *golden_mclk_table =
4552 			&(data->golden_dpm_table.mem_table);
4553 	int value;
4554 
4555 	value = (mclk_table->dpm_levels
4556 			[mclk_table->count - 1].value -
4557 			golden_mclk_table->dpm_levels
4558 			[golden_mclk_table->count - 1].value) *
4559 			100 /
4560 			golden_mclk_table->dpm_levels
4561 			[golden_mclk_table->count - 1].value;
4562 
4563 	return value;
4564 }
4565 
4566 static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4567 {
4568 	struct vega10_hwmgr *data = hwmgr->backend;
4569 	struct vega10_single_dpm_table *golden_mclk_table =
4570 			&(data->golden_dpm_table.mem_table);
4571 	struct pp_power_state  *ps;
4572 	struct vega10_power_state  *vega10_ps;
4573 
4574 	ps = hwmgr->request_ps;
4575 
4576 	if (ps == NULL)
4577 		return -EINVAL;
4578 
4579 	vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
4580 
4581 	vega10_ps->performance_levels
4582 	[vega10_ps->performance_level_count - 1].mem_clock =
4583 			golden_mclk_table->dpm_levels
4584 			[golden_mclk_table->count - 1].value *
4585 			value / 100 +
4586 			golden_mclk_table->dpm_levels
4587 			[golden_mclk_table->count - 1].value;
4588 
4589 	if (vega10_ps->performance_levels
4590 			[vega10_ps->performance_level_count - 1].mem_clock >
4591 			hwmgr->platform_descriptor.overdriveLimit.memoryClock)
4592 		vega10_ps->performance_levels
4593 		[vega10_ps->performance_level_count - 1].mem_clock =
4594 				hwmgr->platform_descriptor.overdriveLimit.memoryClock;
4595 
4596 	return 0;
4597 }
4598 
4599 static int vega10_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
4600 					uint32_t virtual_addr_low,
4601 					uint32_t virtual_addr_hi,
4602 					uint32_t mc_addr_low,
4603 					uint32_t mc_addr_hi,
4604 					uint32_t size)
4605 {
4606 	smum_send_msg_to_smc_with_parameter(hwmgr,
4607 					PPSMC_MSG_SetSystemVirtualDramAddrHigh,
4608 					virtual_addr_hi);
4609 	smum_send_msg_to_smc_with_parameter(hwmgr,
4610 					PPSMC_MSG_SetSystemVirtualDramAddrLow,
4611 					virtual_addr_low);
4612 	smum_send_msg_to_smc_with_parameter(hwmgr,
4613 					PPSMC_MSG_DramLogSetDramAddrHigh,
4614 					mc_addr_hi);
4615 
4616 	smum_send_msg_to_smc_with_parameter(hwmgr,
4617 					PPSMC_MSG_DramLogSetDramAddrLow,
4618 					mc_addr_low);
4619 
4620 	smum_send_msg_to_smc_with_parameter(hwmgr,
4621 					PPSMC_MSG_DramLogSetDramSize,
4622 					size);
4623 	return 0;
4624 }
4625 
4626 static int vega10_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
4627 		struct PP_TemperatureRange *thermal_data)
4628 {
4629 	struct phm_ppt_v2_information *table_info =
4630 			(struct phm_ppt_v2_information *)hwmgr->pptable;
4631 
4632 	memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
4633 
4634 	thermal_data->max = table_info->tdp_table->usSoftwareShutdownTemp *
4635 		PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4636 
4637 	return 0;
4638 }
4639 
4640 static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
4641 {
4642 	struct vega10_hwmgr *data = hwmgr->backend;
4643 	uint32_t i, size = 0;
4644 	static const uint8_t profile_mode_setting[5][4] = {{70, 60, 1, 3,},
4645 						{90, 60, 0, 0,},
4646 						{70, 60, 0, 0,},
4647 						{70, 90, 0, 0,},
4648 						{30, 60, 0, 6,},
4649 						};
4650 	static const char *profile_name[6] = {"3D_FULL_SCREEN",
4651 					"POWER_SAVING",
4652 					"VIDEO",
4653 					"VR",
4654 					"COMPUTE",
4655 					"CUSTOM"};
4656 	static const char *title[6] = {"NUM",
4657 			"MODE_NAME",
4658 			"BUSY_SET_POINT",
4659 			"FPS",
4660 			"USE_RLC_BUSY",
4661 			"MIN_ACTIVE_LEVEL"};
4662 
4663 	if (!buf)
4664 		return -EINVAL;
4665 
4666 	size += sprintf(buf + size, "%s %16s %s %s %s %s\n",title[0],
4667 			title[1], title[2], title[3], title[4], title[5]);
4668 
4669 	for (i = 0; i < PP_SMC_POWER_PROFILE_CUSTOM; i++)
4670 		size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n",
4671 			i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
4672 			profile_mode_setting[i][0], profile_mode_setting[i][1],
4673 			profile_mode_setting[i][2], profile_mode_setting[i][3]);
4674 	size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n", i,
4675 			profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
4676 			data->custom_profile_mode[0], data->custom_profile_mode[1],
4677 			data->custom_profile_mode[2], data->custom_profile_mode[3]);
4678 	return size;
4679 }
4680 
4681 static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
4682 {
4683 	struct vega10_hwmgr *data = hwmgr->backend;
4684 	uint8_t busy_set_point;
4685 	uint8_t FPS;
4686 	uint8_t use_rlc_busy;
4687 	uint8_t min_active_level;
4688 
4689 	hwmgr->power_profile_mode = input[size];
4690 
4691 	smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
4692 						1<<hwmgr->power_profile_mode);
4693 
4694 	if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
4695 		if (size == 0 || size > 4)
4696 			return -EINVAL;
4697 
4698 		data->custom_profile_mode[0] = busy_set_point = input[0];
4699 		data->custom_profile_mode[1] = FPS = input[1];
4700 		data->custom_profile_mode[2] = use_rlc_busy = input[2];
4701 		data->custom_profile_mode[3] = min_active_level = input[3];
4702 		smum_send_msg_to_smc_with_parameter(hwmgr,
4703 					PPSMC_MSG_SetCustomGfxDpmParameters,
4704 					busy_set_point | FPS<<8 |
4705 					use_rlc_busy << 16 | min_active_level<<24);
4706 	}
4707 
4708 	return 0;
4709 }
4710 
4711 
4712 static bool vega10_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
4713 					enum PP_OD_DPM_TABLE_COMMAND type,
4714 					uint32_t clk,
4715 					uint32_t voltage)
4716 {
4717 	struct vega10_hwmgr *data = hwmgr->backend;
4718 	struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
4719 	struct vega10_single_dpm_table *golden_table;
4720 
4721 	if (voltage < odn_table->min_vddc || voltage > odn_table->max_vddc) {
4722 		pr_info("OD voltage is out of range [%d - %d] mV\n", odn_table->min_vddc, odn_table->max_vddc);
4723 		return false;
4724 	}
4725 
4726 	if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
4727 		golden_table = &(data->golden_dpm_table.gfx_table);
4728 		if (golden_table->dpm_levels[0].value > clk ||
4729 			hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) {
4730 			pr_info("OD engine clock is out of range [%d - %d] MHz\n",
4731 				golden_table->dpm_levels[0].value/100,
4732 				hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
4733 			return false;
4734 		}
4735 	} else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
4736 		golden_table = &(data->golden_dpm_table.mem_table);
4737 		if (golden_table->dpm_levels[0].value > clk ||
4738 			hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) {
4739 			pr_info("OD memory clock is out of range [%d - %d] MHz\n",
4740 				golden_table->dpm_levels[0].value/100,
4741 				hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
4742 			return false;
4743 		}
4744 	} else {
4745 		return false;
4746 	}
4747 
4748 	return true;
4749 }
4750 
4751 static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr,
4752 						enum PP_OD_DPM_TABLE_COMMAND type)
4753 {
4754 	struct vega10_hwmgr *data = hwmgr->backend;
4755 	struct phm_ppt_v2_information *table_info = hwmgr->pptable;
4756 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = table_info->vdd_dep_on_socclk;
4757 	struct vega10_single_dpm_table *dpm_table = &data->golden_dpm_table.soc_table;
4758 
4759 	struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep_on_socclk =
4760 							&data->odn_dpm_table.vdd_dep_on_socclk;
4761 	struct vega10_odn_vddc_lookup_table *od_vddc_lookup_table = &data->odn_dpm_table.vddc_lookup_table;
4762 
4763 	struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep;
4764 	uint8_t i, j;
4765 
4766 	if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
4767 		podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
4768 		for (i = 0; i < podn_vdd_dep->count; i++)
4769 			od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc;
4770 	} else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
4771 		podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
4772 		for (i = 0; i < dpm_table->count; i++) {
4773 			for (j = 0; j < od_vddc_lookup_table->count; j++) {
4774 				if (od_vddc_lookup_table->entries[j].us_vdd >
4775 					podn_vdd_dep->entries[i].vddc)
4776 					break;
4777 			}
4778 			if (j == od_vddc_lookup_table->count) {
4779 				od_vddc_lookup_table->entries[j-1].us_vdd =
4780 					podn_vdd_dep->entries[i].vddc;
4781 				data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
4782 			}
4783 			podn_vdd_dep->entries[i].vddInd = j;
4784 		}
4785 		dpm_table = &data->dpm_table.soc_table;
4786 		for (i = 0; i < dep_table->count; i++) {
4787 			if (dep_table->entries[i].vddInd == podn_vdd_dep->entries[dep_table->count-1].vddInd &&
4788 					dep_table->entries[i].clk < podn_vdd_dep->entries[dep_table->count-1].clk) {
4789 				data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
4790 				podn_vdd_dep_on_socclk->entries[i].clk = podn_vdd_dep->entries[dep_table->count-1].clk;
4791 				dpm_table->dpm_levels[i].value = podn_vdd_dep_on_socclk->entries[i].clk;
4792 			}
4793 		}
4794 		if (podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].clk <
4795 					podn_vdd_dep->entries[dep_table->count-1].clk) {
4796 			data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
4797 			podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].clk = podn_vdd_dep->entries[dep_table->count-1].clk;
4798 			dpm_table->dpm_levels[podn_vdd_dep_on_socclk->count - 1].value = podn_vdd_dep->entries[dep_table->count-1].clk;
4799 		}
4800 		if (podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].vddInd <
4801 					podn_vdd_dep->entries[dep_table->count-1].vddInd) {
4802 			data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
4803 			podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].vddInd = podn_vdd_dep->entries[dep_table->count-1].vddInd;
4804 		}
4805 	}
4806 }
4807 
4808 static int vega10_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
4809 					enum PP_OD_DPM_TABLE_COMMAND type,
4810 					long *input, uint32_t size)
4811 {
4812 	struct vega10_hwmgr *data = hwmgr->backend;
4813 	struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep_table;
4814 	struct vega10_single_dpm_table *dpm_table;
4815 
4816 	uint32_t input_clk;
4817 	uint32_t input_vol;
4818 	uint32_t input_level;
4819 	uint32_t i;
4820 
4821 	PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage",
4822 				return -EINVAL);
4823 
4824 	if (!hwmgr->od_enabled) {
4825 		pr_info("OverDrive feature not enabled\n");
4826 		return -EINVAL;
4827 	}
4828 
4829 	if (PP_OD_EDIT_SCLK_VDDC_TABLE == type) {
4830 		dpm_table = &data->dpm_table.gfx_table;
4831 		podn_vdd_dep_table = &data->odn_dpm_table.vdd_dep_on_sclk;
4832 		data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
4833 	} else if (PP_OD_EDIT_MCLK_VDDC_TABLE == type) {
4834 		dpm_table = &data->dpm_table.mem_table;
4835 		podn_vdd_dep_table = &data->odn_dpm_table.vdd_dep_on_mclk;
4836 		data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
4837 	} else if (PP_OD_RESTORE_DEFAULT_TABLE == type) {
4838 		memcpy(&(data->dpm_table), &(data->golden_dpm_table), sizeof(struct vega10_dpm_table));
4839 		vega10_odn_initial_default_setting(hwmgr);
4840 		return 0;
4841 	} else if (PP_OD_COMMIT_DPM_TABLE == type) {
4842 		vega10_check_dpm_table_updated(hwmgr);
4843 		return 0;
4844 	} else {
4845 		return -EINVAL;
4846 	}
4847 
4848 	for (i = 0; i < size; i += 3) {
4849 		if (i + 3 > size || input[i] >= podn_vdd_dep_table->count) {
4850 			pr_info("invalid clock voltage input\n");
4851 			return 0;
4852 		}
4853 		input_level = input[i];
4854 		input_clk = input[i+1] * 100;
4855 		input_vol = input[i+2];
4856 
4857 		if (vega10_check_clk_voltage_valid(hwmgr, type, input_clk, input_vol)) {
4858 			dpm_table->dpm_levels[input_level].value = input_clk;
4859 			podn_vdd_dep_table->entries[input_level].clk = input_clk;
4860 			podn_vdd_dep_table->entries[input_level].vddc = input_vol;
4861 		} else {
4862 			return -EINVAL;
4863 		}
4864 	}
4865 	vega10_odn_update_soc_table(hwmgr, type);
4866 	return 0;
4867 }
4868 
4869 static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
4870 	.backend_init = vega10_hwmgr_backend_init,
4871 	.backend_fini = vega10_hwmgr_backend_fini,
4872 	.asic_setup = vega10_setup_asic_task,
4873 	.dynamic_state_management_enable = vega10_enable_dpm_tasks,
4874 	.dynamic_state_management_disable = vega10_disable_dpm_tasks,
4875 	.get_num_of_pp_table_entries =
4876 			vega10_get_number_of_powerplay_table_entries,
4877 	.get_power_state_size = vega10_get_power_state_size,
4878 	.get_pp_table_entry = vega10_get_pp_table_entry,
4879 	.patch_boot_state = vega10_patch_boot_state,
4880 	.apply_state_adjust_rules = vega10_apply_state_adjust_rules,
4881 	.power_state_set = vega10_set_power_state_tasks,
4882 	.get_sclk = vega10_dpm_get_sclk,
4883 	.get_mclk = vega10_dpm_get_mclk,
4884 	.notify_smc_display_config_after_ps_adjustment =
4885 			vega10_notify_smc_display_config_after_ps_adjustment,
4886 	.force_dpm_level = vega10_dpm_force_dpm_level,
4887 	.stop_thermal_controller = vega10_thermal_stop_thermal_controller,
4888 	.get_fan_speed_info = vega10_fan_ctrl_get_fan_speed_info,
4889 	.get_fan_speed_percent = vega10_fan_ctrl_get_fan_speed_percent,
4890 	.set_fan_speed_percent = vega10_fan_ctrl_set_fan_speed_percent,
4891 	.reset_fan_speed_to_default =
4892 			vega10_fan_ctrl_reset_fan_speed_to_default,
4893 	.get_fan_speed_rpm = vega10_fan_ctrl_get_fan_speed_rpm,
4894 	.set_fan_speed_rpm = vega10_fan_ctrl_set_fan_speed_rpm,
4895 	.uninitialize_thermal_controller =
4896 			vega10_thermal_ctrl_uninitialize_thermal_controller,
4897 	.set_fan_control_mode = vega10_set_fan_control_mode,
4898 	.get_fan_control_mode = vega10_get_fan_control_mode,
4899 	.read_sensor = vega10_read_sensor,
4900 	.get_dal_power_level = vega10_get_dal_power_level,
4901 	.get_clock_by_type_with_latency = vega10_get_clock_by_type_with_latency,
4902 	.get_clock_by_type_with_voltage = vega10_get_clock_by_type_with_voltage,
4903 	.set_watermarks_for_clocks_ranges = vega10_set_watermarks_for_clocks_ranges,
4904 	.display_clock_voltage_request = vega10_display_clock_voltage_request,
4905 	.force_clock_level = vega10_force_clock_level,
4906 	.print_clock_levels = vega10_print_clock_levels,
4907 	.display_config_changed = vega10_display_configuration_changed_task,
4908 	.powergate_uvd = vega10_power_gate_uvd,
4909 	.powergate_vce = vega10_power_gate_vce,
4910 	.check_states_equal = vega10_check_states_equal,
4911 	.check_smc_update_required_for_display_configuration =
4912 			vega10_check_smc_update_required_for_display_configuration,
4913 	.power_off_asic = vega10_power_off_asic,
4914 	.disable_smc_firmware_ctf = vega10_thermal_disable_alert,
4915 	.get_sclk_od = vega10_get_sclk_od,
4916 	.set_sclk_od = vega10_set_sclk_od,
4917 	.get_mclk_od = vega10_get_mclk_od,
4918 	.set_mclk_od = vega10_set_mclk_od,
4919 	.avfs_control = vega10_avfs_enable,
4920 	.notify_cac_buffer_info = vega10_notify_cac_buffer_info,
4921 	.get_thermal_temperature_range = vega10_get_thermal_temperature_range,
4922 	.register_irq_handlers = smu9_register_irq_handlers,
4923 	.start_thermal_controller = vega10_start_thermal_controller,
4924 	.get_power_profile_mode = vega10_get_power_profile_mode,
4925 	.set_power_profile_mode = vega10_set_power_profile_mode,
4926 	.set_power_limit = vega10_set_power_limit,
4927 	.odn_edit_dpm_table = vega10_odn_edit_dpm_table,
4928 };
4929 
4930 int vega10_enable_smc_features(struct pp_hwmgr *hwmgr,
4931 		bool enable, uint32_t feature_mask)
4932 {
4933 	int msg = enable ? PPSMC_MSG_EnableSmuFeatures :
4934 			PPSMC_MSG_DisableSmuFeatures;
4935 
4936 	return smum_send_msg_to_smc_with_parameter(hwmgr,
4937 			msg, feature_mask);
4938 }
4939 
4940 int vega10_hwmgr_init(struct pp_hwmgr *hwmgr);
4941 int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)
4942 {
4943 	hwmgr->hwmgr_func = &vega10_hwmgr_funcs;
4944 	hwmgr->pptable_func = &vega10_pptable_funcs;
4945 
4946 	return 0;
4947 }
4948