1 /*
2  * Copyright 2017 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/delay.h>
25 #include <linux/fb.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 
29 #include "hwmgr.h"
30 #include "amd_powerplay.h"
31 #include "vega12_smumgr.h"
32 #include "hardwaremanager.h"
33 #include "ppatomfwctrl.h"
34 #include "atomfirmware.h"
35 #include "cgs_common.h"
36 #include "vega12_inc.h"
37 #include "pppcielanes.h"
38 #include "vega12_hwmgr.h"
39 #include "vega12_processpptables.h"
40 #include "vega12_pptable.h"
41 #include "vega12_thermal.h"
42 #include "vega12_ppsmc.h"
43 #include "pp_debug.h"
44 #include "amd_pcie_helpers.h"
45 #include "ppinterrupt.h"
46 #include "pp_overdriver.h"
47 #include "pp_thermal.h"
48 
49 
50 static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
51 		enum pp_clock_type type, uint32_t mask);
52 static int vega12_get_clock_ranges(struct pp_hwmgr *hwmgr,
53 		uint32_t *clock,
54 		PPCLK_e clock_select,
55 		bool max);
56 
57 static void vega12_set_default_registry_data(struct pp_hwmgr *hwmgr)
58 {
59 	struct vega12_hwmgr *data =
60 			(struct vega12_hwmgr *)(hwmgr->backend);
61 
62 	data->gfxclk_average_alpha = PPVEGA12_VEGA12GFXCLKAVERAGEALPHA_DFLT;
63 	data->socclk_average_alpha = PPVEGA12_VEGA12SOCCLKAVERAGEALPHA_DFLT;
64 	data->uclk_average_alpha = PPVEGA12_VEGA12UCLKCLKAVERAGEALPHA_DFLT;
65 	data->gfx_activity_average_alpha = PPVEGA12_VEGA12GFXACTIVITYAVERAGEALPHA_DFLT;
66 	data->lowest_uclk_reserved_for_ulv = PPVEGA12_VEGA12LOWESTUCLKRESERVEDFORULV_DFLT;
67 
68 	data->display_voltage_mode = PPVEGA12_VEGA12DISPLAYVOLTAGEMODE_DFLT;
69 	data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
70 	data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
71 	data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
72 	data->disp_clk_quad_eqn_a = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
73 	data->disp_clk_quad_eqn_b = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
74 	data->disp_clk_quad_eqn_c = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
75 	data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
76 	data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
77 	data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
78 	data->phy_clk_quad_eqn_a = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
79 	data->phy_clk_quad_eqn_b = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
80 	data->phy_clk_quad_eqn_c = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
81 
82 	data->registry_data.disallowed_features = 0x0;
83 	data->registry_data.od_state_in_dc_support = 0;
84 	data->registry_data.thermal_support = 1;
85 	data->registry_data.skip_baco_hardware = 0;
86 
87 	data->registry_data.log_avfs_param = 0;
88 	data->registry_data.sclk_throttle_low_notification = 1;
89 	data->registry_data.force_dpm_high = 0;
90 	data->registry_data.stable_pstate_sclk_dpm_percentage = 75;
91 
92 	data->registry_data.didt_support = 0;
93 	if (data->registry_data.didt_support) {
94 		data->registry_data.didt_mode = 6;
95 		data->registry_data.sq_ramping_support = 1;
96 		data->registry_data.db_ramping_support = 0;
97 		data->registry_data.td_ramping_support = 0;
98 		data->registry_data.tcp_ramping_support = 0;
99 		data->registry_data.dbr_ramping_support = 0;
100 		data->registry_data.edc_didt_support = 1;
101 		data->registry_data.gc_didt_support = 0;
102 		data->registry_data.psm_didt_support = 0;
103 	}
104 
105 	data->registry_data.pcie_lane_override = 0xff;
106 	data->registry_data.pcie_speed_override = 0xff;
107 	data->registry_data.pcie_clock_override = 0xffffffff;
108 	data->registry_data.regulator_hot_gpio_support = 1;
109 	data->registry_data.ac_dc_switch_gpio_support = 0;
110 	data->registry_data.quick_transition_support = 0;
111 	data->registry_data.zrpm_start_temp = 0xffff;
112 	data->registry_data.zrpm_stop_temp = 0xffff;
113 	data->registry_data.odn_feature_enable = 1;
114 	data->registry_data.disable_water_mark = 0;
115 	data->registry_data.disable_pp_tuning = 0;
116 	data->registry_data.disable_xlpp_tuning = 0;
117 	data->registry_data.disable_workload_policy = 0;
118 	data->registry_data.perf_ui_tuning_profile_turbo = 0x19190F0F;
119 	data->registry_data.perf_ui_tuning_profile_powerSave = 0x19191919;
120 	data->registry_data.perf_ui_tuning_profile_xl = 0x00000F0A;
121 	data->registry_data.force_workload_policy_mask = 0;
122 	data->registry_data.disable_3d_fs_detection = 0;
123 	data->registry_data.fps_support = 1;
124 	data->registry_data.disable_auto_wattman = 1;
125 	data->registry_data.auto_wattman_debug = 0;
126 	data->registry_data.auto_wattman_sample_period = 100;
127 	data->registry_data.auto_wattman_threshold = 50;
128 }
129 
130 static int vega12_set_features_platform_caps(struct pp_hwmgr *hwmgr)
131 {
132 	struct vega12_hwmgr *data =
133 			(struct vega12_hwmgr *)(hwmgr->backend);
134 	struct amdgpu_device *adev = hwmgr->adev;
135 
136 	if (data->vddci_control == VEGA12_VOLTAGE_CONTROL_NONE)
137 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
138 				PHM_PlatformCaps_ControlVDDCI);
139 
140 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
141 			PHM_PlatformCaps_TablelessHardwareInterface);
142 
143 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
144 			PHM_PlatformCaps_EnableSMU7ThermalManagement);
145 
146 	if (adev->pg_flags & AMD_PG_SUPPORT_UVD) {
147 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
148 				PHM_PlatformCaps_UVDPowerGating);
149 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
150 				PHM_PlatformCaps_UVDDynamicPowerGating);
151 	}
152 
153 	if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
154 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
155 				PHM_PlatformCaps_VCEPowerGating);
156 
157 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
158 			PHM_PlatformCaps_UnTabledHardwareInterface);
159 
160 	if (data->registry_data.odn_feature_enable)
161 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
162 				PHM_PlatformCaps_ODNinACSupport);
163 	else {
164 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
165 				PHM_PlatformCaps_OD6inACSupport);
166 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
167 				PHM_PlatformCaps_OD6PlusinACSupport);
168 	}
169 
170 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
171 			PHM_PlatformCaps_ActivityReporting);
172 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
173 			PHM_PlatformCaps_FanSpeedInTableIsRPM);
174 
175 	if (data->registry_data.od_state_in_dc_support) {
176 		if (data->registry_data.odn_feature_enable)
177 			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
178 					PHM_PlatformCaps_ODNinDCSupport);
179 		else {
180 			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
181 					PHM_PlatformCaps_OD6inDCSupport);
182 			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
183 					PHM_PlatformCaps_OD6PlusinDCSupport);
184 		}
185 	}
186 
187 	if (data->registry_data.thermal_support
188 			&& data->registry_data.fuzzy_fan_control_support
189 			&& hwmgr->thermal_controller.advanceFanControlParameters.usTMax)
190 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
191 				PHM_PlatformCaps_ODFuzzyFanControlSupport);
192 
193 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
194 				PHM_PlatformCaps_DynamicPowerManagement);
195 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
196 			PHM_PlatformCaps_SMC);
197 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
198 			PHM_PlatformCaps_ThermalPolicyDelay);
199 
200 	if (data->registry_data.force_dpm_high)
201 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
202 				PHM_PlatformCaps_ExclusiveModeAlwaysHigh);
203 
204 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
205 			PHM_PlatformCaps_DynamicUVDState);
206 
207 	if (data->registry_data.sclk_throttle_low_notification)
208 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
209 				PHM_PlatformCaps_SclkThrottleLowNotification);
210 
211 	/* power tune caps */
212 	/* assume disabled */
213 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
214 			PHM_PlatformCaps_PowerContainment);
215 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
216 			PHM_PlatformCaps_DiDtSupport);
217 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
218 			PHM_PlatformCaps_SQRamping);
219 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
220 			PHM_PlatformCaps_DBRamping);
221 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
222 			PHM_PlatformCaps_TDRamping);
223 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
224 			PHM_PlatformCaps_TCPRamping);
225 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
226 			PHM_PlatformCaps_DBRRamping);
227 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
228 			PHM_PlatformCaps_DiDtEDCEnable);
229 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
230 			PHM_PlatformCaps_GCEDC);
231 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
232 			PHM_PlatformCaps_PSM);
233 
234 	if (data->registry_data.didt_support) {
235 		phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtSupport);
236 		if (data->registry_data.sq_ramping_support)
237 			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping);
238 		if (data->registry_data.db_ramping_support)
239 			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping);
240 		if (data->registry_data.td_ramping_support)
241 			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping);
242 		if (data->registry_data.tcp_ramping_support)
243 			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping);
244 		if (data->registry_data.dbr_ramping_support)
245 			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping);
246 		if (data->registry_data.edc_didt_support)
247 			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable);
248 		if (data->registry_data.gc_didt_support)
249 			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC);
250 		if (data->registry_data.psm_didt_support)
251 			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM);
252 	}
253 
254 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
255 			PHM_PlatformCaps_RegulatorHot);
256 
257 	if (data->registry_data.ac_dc_switch_gpio_support) {
258 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
259 				PHM_PlatformCaps_AutomaticDCTransition);
260 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
261 				PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
262 	}
263 
264 	if (data->registry_data.quick_transition_support) {
265 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
266 				PHM_PlatformCaps_AutomaticDCTransition);
267 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
268 				PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
269 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
270 				PHM_PlatformCaps_Falcon_QuickTransition);
271 	}
272 
273 	if (data->lowest_uclk_reserved_for_ulv != PPVEGA12_VEGA12LOWESTUCLKRESERVEDFORULV_DFLT) {
274 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
275 				PHM_PlatformCaps_LowestUclkReservedForUlv);
276 		if (data->lowest_uclk_reserved_for_ulv == 1)
277 			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
278 					PHM_PlatformCaps_LowestUclkReservedForUlv);
279 	}
280 
281 	if (data->registry_data.custom_fan_support)
282 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
283 				PHM_PlatformCaps_CustomFanControlSupport);
284 
285 	return 0;
286 }
287 
288 static void vega12_init_dpm_defaults(struct pp_hwmgr *hwmgr)
289 {
290 	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
291 	int i;
292 
293 	data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
294 			FEATURE_DPM_PREFETCHER_BIT;
295 	data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id =
296 			FEATURE_DPM_GFXCLK_BIT;
297 	data->smu_features[GNLD_DPM_UCLK].smu_feature_id =
298 			FEATURE_DPM_UCLK_BIT;
299 	data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id =
300 			FEATURE_DPM_SOCCLK_BIT;
301 	data->smu_features[GNLD_DPM_UVD].smu_feature_id =
302 			FEATURE_DPM_UVD_BIT;
303 	data->smu_features[GNLD_DPM_VCE].smu_feature_id =
304 			FEATURE_DPM_VCE_BIT;
305 	data->smu_features[GNLD_ULV].smu_feature_id =
306 			FEATURE_ULV_BIT;
307 	data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id =
308 			FEATURE_DPM_MP0CLK_BIT;
309 	data->smu_features[GNLD_DPM_LINK].smu_feature_id =
310 			FEATURE_DPM_LINK_BIT;
311 	data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id =
312 			FEATURE_DPM_DCEFCLK_BIT;
313 	data->smu_features[GNLD_DS_GFXCLK].smu_feature_id =
314 			FEATURE_DS_GFXCLK_BIT;
315 	data->smu_features[GNLD_DS_SOCCLK].smu_feature_id =
316 			FEATURE_DS_SOCCLK_BIT;
317 	data->smu_features[GNLD_DS_LCLK].smu_feature_id =
318 			FEATURE_DS_LCLK_BIT;
319 	data->smu_features[GNLD_PPT].smu_feature_id =
320 			FEATURE_PPT_BIT;
321 	data->smu_features[GNLD_TDC].smu_feature_id =
322 			FEATURE_TDC_BIT;
323 	data->smu_features[GNLD_THERMAL].smu_feature_id =
324 			FEATURE_THERMAL_BIT;
325 	data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id =
326 			FEATURE_GFX_PER_CU_CG_BIT;
327 	data->smu_features[GNLD_RM].smu_feature_id =
328 			FEATURE_RM_BIT;
329 	data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id =
330 			FEATURE_DS_DCEFCLK_BIT;
331 	data->smu_features[GNLD_ACDC].smu_feature_id =
332 			FEATURE_ACDC_BIT;
333 	data->smu_features[GNLD_VR0HOT].smu_feature_id =
334 			FEATURE_VR0HOT_BIT;
335 	data->smu_features[GNLD_VR1HOT].smu_feature_id =
336 			FEATURE_VR1HOT_BIT;
337 	data->smu_features[GNLD_FW_CTF].smu_feature_id =
338 			FEATURE_FW_CTF_BIT;
339 	data->smu_features[GNLD_LED_DISPLAY].smu_feature_id =
340 			FEATURE_LED_DISPLAY_BIT;
341 	data->smu_features[GNLD_FAN_CONTROL].smu_feature_id =
342 			FEATURE_FAN_CONTROL_BIT;
343 	data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT;
344 	data->smu_features[GNLD_GFXOFF].smu_feature_id = FEATURE_GFXOFF_BIT;
345 	data->smu_features[GNLD_CG].smu_feature_id = FEATURE_CG_BIT;
346 	data->smu_features[GNLD_ACG].smu_feature_id = FEATURE_ACG_BIT;
347 
348 	for (i = 0; i < GNLD_FEATURES_MAX; i++) {
349 		data->smu_features[i].smu_feature_bitmap =
350 			(uint64_t)(1ULL << data->smu_features[i].smu_feature_id);
351 		data->smu_features[i].allowed =
352 			((data->registry_data.disallowed_features >> i) & 1) ?
353 			false : true;
354 	}
355 }
356 
357 static int vega12_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
358 {
359 	return 0;
360 }
361 
362 static int vega12_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
363 {
364 	kfree(hwmgr->backend);
365 	hwmgr->backend = NULL;
366 
367 	return 0;
368 }
369 
370 static int vega12_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
371 {
372 	int result = 0;
373 	struct vega12_hwmgr *data;
374 	struct amdgpu_device *adev = hwmgr->adev;
375 
376 	data = kzalloc(sizeof(struct vega12_hwmgr), GFP_KERNEL);
377 	if (data == NULL)
378 		return -ENOMEM;
379 
380 	hwmgr->backend = data;
381 
382 	vega12_set_default_registry_data(hwmgr);
383 
384 	data->disable_dpm_mask = 0xff;
385 	data->workload_mask = 0xff;
386 
387 	/* need to set voltage control types before EVV patching */
388 	data->vddc_control = VEGA12_VOLTAGE_CONTROL_NONE;
389 	data->mvdd_control = VEGA12_VOLTAGE_CONTROL_NONE;
390 	data->vddci_control = VEGA12_VOLTAGE_CONTROL_NONE;
391 
392 	data->water_marks_bitmap = 0;
393 	data->avfs_exist = false;
394 
395 	vega12_set_features_platform_caps(hwmgr);
396 
397 	vega12_init_dpm_defaults(hwmgr);
398 
399 	/* Parse pptable data read from VBIOS */
400 	vega12_set_private_data_based_on_pptable(hwmgr);
401 
402 	data->is_tlu_enabled = false;
403 
404 	hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
405 			VEGA12_MAX_HARDWARE_POWERLEVELS;
406 	hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
407 	hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
408 
409 	hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
410 	/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
411 	hwmgr->platform_descriptor.clockStep.engineClock = 500;
412 	hwmgr->platform_descriptor.clockStep.memoryClock = 500;
413 
414 	data->total_active_cus = adev->gfx.cu_info.number;
415 	/* Setup default Overdrive Fan control settings */
416 	data->odn_fan_table.target_fan_speed =
417 			hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM;
418 	data->odn_fan_table.target_temperature =
419 			hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature;
420 	data->odn_fan_table.min_performance_clock =
421 			hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit;
422 	data->odn_fan_table.min_fan_limit =
423 			hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit *
424 			hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
425 
426 	if (hwmgr->feature_mask & PP_GFXOFF_MASK)
427 		data->gfxoff_controlled_by_driver = true;
428 	else
429 		data->gfxoff_controlled_by_driver = false;
430 
431 	return result;
432 }
433 
434 static int vega12_init_sclk_threshold(struct pp_hwmgr *hwmgr)
435 {
436 	struct vega12_hwmgr *data =
437 			(struct vega12_hwmgr *)(hwmgr->backend);
438 
439 	data->low_sclk_interrupt_threshold = 0;
440 
441 	return 0;
442 }
443 
444 static int vega12_setup_asic_task(struct pp_hwmgr *hwmgr)
445 {
446 	PP_ASSERT_WITH_CODE(!vega12_init_sclk_threshold(hwmgr),
447 			"Failed to init sclk threshold!",
448 			return -EINVAL);
449 
450 	return 0;
451 }
452 
453 /*
454  * @fn vega12_init_dpm_state
455  * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
456  *
457  * @param    dpm_state - the address of the DPM Table to initiailize.
458  * @return   None.
459  */
460 static void vega12_init_dpm_state(struct vega12_dpm_state *dpm_state)
461 {
462 	dpm_state->soft_min_level = 0x0;
463 	dpm_state->soft_max_level = 0xffff;
464 	dpm_state->hard_min_level = 0x0;
465 	dpm_state->hard_max_level = 0xffff;
466 }
467 
468 static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
469 		PPCLK_e clk_id, uint32_t *num_of_levels)
470 {
471 	int ret = 0;
472 
473 	ret = smum_send_msg_to_smc_with_parameter(hwmgr,
474 			PPSMC_MSG_GetDpmFreqByIndex,
475 			(clk_id << 16 | 0xFF));
476 	PP_ASSERT_WITH_CODE(!ret,
477 			"[GetNumOfDpmLevel] failed to get dpm levels!",
478 			return ret);
479 
480 	*num_of_levels = smum_get_argument(hwmgr);
481 	PP_ASSERT_WITH_CODE(*num_of_levels > 0,
482 			"[GetNumOfDpmLevel] number of clk levels is invalid!",
483 			return -EINVAL);
484 
485 	return ret;
486 }
487 
488 static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
489 		PPCLK_e clkID, uint32_t index, uint32_t *clock)
490 {
491 	int result = 0;
492 
493 	/*
494 	 *SMU expects the Clock ID to be in the top 16 bits.
495 	 *Lower 16 bits specify the level
496 	 */
497 	PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
498 		PPSMC_MSG_GetDpmFreqByIndex, (clkID << 16 | index)) == 0,
499 		"[GetDpmFrequencyByIndex] Failed to get dpm frequency from SMU!",
500 		return -EINVAL);
501 
502 	*clock = smum_get_argument(hwmgr);
503 
504 	return result;
505 }
506 
507 static int vega12_setup_single_dpm_table(struct pp_hwmgr *hwmgr,
508 		struct vega12_single_dpm_table *dpm_table, PPCLK_e clk_id)
509 {
510 	int ret = 0;
511 	uint32_t i, num_of_levels, clk;
512 
513 	ret = vega12_get_number_of_dpm_level(hwmgr, clk_id, &num_of_levels);
514 	PP_ASSERT_WITH_CODE(!ret,
515 			"[SetupSingleDpmTable] failed to get clk levels!",
516 			return ret);
517 
518 	dpm_table->count = num_of_levels;
519 
520 	for (i = 0; i < num_of_levels; i++) {
521 		ret = vega12_get_dpm_frequency_by_index(hwmgr, clk_id, i, &clk);
522 		PP_ASSERT_WITH_CODE(!ret,
523 			"[SetupSingleDpmTable] failed to get clk of specific level!",
524 			return ret);
525 		dpm_table->dpm_levels[i].value = clk;
526 		dpm_table->dpm_levels[i].enabled = true;
527 	}
528 
529 	return ret;
530 }
531 
532 /*
533  * This function is to initialize all DPM state tables
534  * for SMU based on the dependency table.
535  * Dynamic state patching function will then trim these
536  * state tables to the allowed range based
537  * on the power policy or external client requests,
538  * such as UVD request, etc.
539  */
540 static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
541 {
542 
543 	struct vega12_hwmgr *data =
544 			(struct vega12_hwmgr *)(hwmgr->backend);
545 	struct vega12_single_dpm_table *dpm_table;
546 	int ret = 0;
547 
548 	memset(&data->dpm_table, 0, sizeof(data->dpm_table));
549 
550 	/* socclk */
551 	dpm_table = &(data->dpm_table.soc_table);
552 	if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
553 		ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_SOCCLK);
554 		PP_ASSERT_WITH_CODE(!ret,
555 				"[SetupDefaultDpmTable] failed to get socclk dpm levels!",
556 				return ret);
557 	} else {
558 		dpm_table->count = 1;
559 		dpm_table->dpm_levels[0].value = data->vbios_boot_state.soc_clock / 100;
560 	}
561 	vega12_init_dpm_state(&(dpm_table->dpm_state));
562 
563 	/* gfxclk */
564 	dpm_table = &(data->dpm_table.gfx_table);
565 	if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
566 		ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_GFXCLK);
567 		PP_ASSERT_WITH_CODE(!ret,
568 				"[SetupDefaultDpmTable] failed to get gfxclk dpm levels!",
569 				return ret);
570 	} else {
571 		dpm_table->count = 1;
572 		dpm_table->dpm_levels[0].value = data->vbios_boot_state.gfx_clock / 100;
573 	}
574 	vega12_init_dpm_state(&(dpm_table->dpm_state));
575 
576 	/* memclk */
577 	dpm_table = &(data->dpm_table.mem_table);
578 	if (data->smu_features[GNLD_DPM_UCLK].enabled) {
579 		ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_UCLK);
580 		PP_ASSERT_WITH_CODE(!ret,
581 				"[SetupDefaultDpmTable] failed to get memclk dpm levels!",
582 				return ret);
583 	} else {
584 		dpm_table->count = 1;
585 		dpm_table->dpm_levels[0].value = data->vbios_boot_state.mem_clock / 100;
586 	}
587 	vega12_init_dpm_state(&(dpm_table->dpm_state));
588 
589 	/* eclk */
590 	dpm_table = &(data->dpm_table.eclk_table);
591 	if (data->smu_features[GNLD_DPM_VCE].enabled) {
592 		ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_ECLK);
593 		PP_ASSERT_WITH_CODE(!ret,
594 				"[SetupDefaultDpmTable] failed to get eclk dpm levels!",
595 				return ret);
596 	} else {
597 		dpm_table->count = 1;
598 		dpm_table->dpm_levels[0].value = data->vbios_boot_state.eclock / 100;
599 	}
600 	vega12_init_dpm_state(&(dpm_table->dpm_state));
601 
602 	/* vclk */
603 	dpm_table = &(data->dpm_table.vclk_table);
604 	if (data->smu_features[GNLD_DPM_UVD].enabled) {
605 		ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_VCLK);
606 		PP_ASSERT_WITH_CODE(!ret,
607 				"[SetupDefaultDpmTable] failed to get vclk dpm levels!",
608 				return ret);
609 	} else {
610 		dpm_table->count = 1;
611 		dpm_table->dpm_levels[0].value = data->vbios_boot_state.vclock / 100;
612 	}
613 	vega12_init_dpm_state(&(dpm_table->dpm_state));
614 
615 	/* dclk */
616 	dpm_table = &(data->dpm_table.dclk_table);
617 	if (data->smu_features[GNLD_DPM_UVD].enabled) {
618 		ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCLK);
619 		PP_ASSERT_WITH_CODE(!ret,
620 				"[SetupDefaultDpmTable] failed to get dclk dpm levels!",
621 				return ret);
622 	} else {
623 		dpm_table->count = 1;
624 		dpm_table->dpm_levels[0].value = data->vbios_boot_state.dclock / 100;
625 	}
626 	vega12_init_dpm_state(&(dpm_table->dpm_state));
627 
628 	/* dcefclk */
629 	dpm_table = &(data->dpm_table.dcef_table);
630 	if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
631 		ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCEFCLK);
632 		PP_ASSERT_WITH_CODE(!ret,
633 				"[SetupDefaultDpmTable] failed to get dcefclk dpm levels!",
634 				return ret);
635 	} else {
636 		dpm_table->count = 1;
637 		dpm_table->dpm_levels[0].value = data->vbios_boot_state.dcef_clock / 100;
638 	}
639 	vega12_init_dpm_state(&(dpm_table->dpm_state));
640 
641 	/* pixclk */
642 	dpm_table = &(data->dpm_table.pixel_table);
643 	if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
644 		ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PIXCLK);
645 		PP_ASSERT_WITH_CODE(!ret,
646 				"[SetupDefaultDpmTable] failed to get pixclk dpm levels!",
647 				return ret);
648 	} else
649 		dpm_table->count = 0;
650 	vega12_init_dpm_state(&(dpm_table->dpm_state));
651 
652 	/* dispclk */
653 	dpm_table = &(data->dpm_table.display_table);
654 	if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
655 		ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DISPCLK);
656 		PP_ASSERT_WITH_CODE(!ret,
657 				"[SetupDefaultDpmTable] failed to get dispclk dpm levels!",
658 				return ret);
659 	} else
660 		dpm_table->count = 0;
661 	vega12_init_dpm_state(&(dpm_table->dpm_state));
662 
663 	/* phyclk */
664 	dpm_table = &(data->dpm_table.phy_table);
665 	if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
666 		ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PHYCLK);
667 		PP_ASSERT_WITH_CODE(!ret,
668 				"[SetupDefaultDpmTable] failed to get phyclk dpm levels!",
669 				return ret);
670 	} else
671 		dpm_table->count = 0;
672 	vega12_init_dpm_state(&(dpm_table->dpm_state));
673 
674 	/* save a copy of the default DPM table */
675 	memcpy(&(data->golden_dpm_table), &(data->dpm_table),
676 			sizeof(struct vega12_dpm_table));
677 
678 	return 0;
679 }
680 
681 #if 0
682 static int vega12_save_default_power_profile(struct pp_hwmgr *hwmgr)
683 {
684 	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
685 	struct vega12_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
686 	uint32_t min_level;
687 
688 	hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
689 	hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
690 
691 	/* Optimize compute power profile: Use only highest
692 	 * 2 power levels (if more than 2 are available)
693 	 */
694 	if (dpm_table->count > 2)
695 		min_level = dpm_table->count - 2;
696 	else if (dpm_table->count == 2)
697 		min_level = 1;
698 	else
699 		min_level = 0;
700 
701 	hwmgr->default_compute_power_profile.min_sclk =
702 			dpm_table->dpm_levels[min_level].value;
703 
704 	hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
705 	hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
706 
707 	return 0;
708 }
709 #endif
710 
711 /**
712 * Initializes the SMC table and uploads it
713 *
714 * @param    hwmgr  the address of the powerplay hardware manager.
715 * @param    pInput  the pointer to input data (PowerState)
716 * @return   always 0
717 */
718 static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
719 {
720 	int result;
721 	struct vega12_hwmgr *data =
722 			(struct vega12_hwmgr *)(hwmgr->backend);
723 	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
724 	struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
725 	struct phm_ppt_v3_information *pptable_information =
726 		(struct phm_ppt_v3_information *)hwmgr->pptable;
727 
728 	result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values);
729 	if (!result) {
730 		data->vbios_boot_state.vddc     = boot_up_values.usVddc;
731 		data->vbios_boot_state.vddci    = boot_up_values.usVddci;
732 		data->vbios_boot_state.mvddc    = boot_up_values.usMvddc;
733 		data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
734 		data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
735 		data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
736 		data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
737 		data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID;
738 		data->vbios_boot_state.eclock = boot_up_values.ulEClk;
739 		data->vbios_boot_state.dclock = boot_up_values.ulDClk;
740 		data->vbios_boot_state.vclock = boot_up_values.ulVClk;
741 		smum_send_msg_to_smc_with_parameter(hwmgr,
742 				PPSMC_MSG_SetMinDeepSleepDcefclk,
743 			(uint32_t)(data->vbios_boot_state.dcef_clock / 100));
744 	}
745 
746 	memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t));
747 
748 	result = vega12_copy_table_to_smc(hwmgr,
749 			(uint8_t *)pp_table, TABLE_PPTABLE);
750 	PP_ASSERT_WITH_CODE(!result,
751 			"Failed to upload PPtable!", return result);
752 
753 	return 0;
754 }
755 
756 static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
757 {
758 	struct vega12_hwmgr *data =
759 			(struct vega12_hwmgr *)(hwmgr->backend);
760 	int i;
761 	uint32_t allowed_features_low = 0, allowed_features_high = 0;
762 
763 	for (i = 0; i < GNLD_FEATURES_MAX; i++)
764 		if (data->smu_features[i].allowed)
765 			data->smu_features[i].smu_feature_id > 31 ?
766 				(allowed_features_high |= ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_HIGH_SHIFT) & 0xFFFFFFFF)) :
767 				(allowed_features_low |= ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_LOW_SHIFT) & 0xFFFFFFFF));
768 
769 	PP_ASSERT_WITH_CODE(
770 		smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high) == 0,
771 		"[SetAllowedFeaturesMask] Attempt to set allowed features mask (high) failed!",
772 		return -1);
773 
774 	PP_ASSERT_WITH_CODE(
775 		smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low) == 0,
776 		"[SetAllowedFeaturesMask] Attempt to set allowed features mask (low) failed!",
777 		return -1);
778 
779 	return 0;
780 }
781 
782 static void vega12_init_powergate_state(struct pp_hwmgr *hwmgr)
783 {
784 	struct vega12_hwmgr *data =
785 			(struct vega12_hwmgr *)(hwmgr->backend);
786 
787 	data->uvd_power_gated = true;
788 	data->vce_power_gated = true;
789 
790 	if (data->smu_features[GNLD_DPM_UVD].enabled)
791 		data->uvd_power_gated = false;
792 
793 	if (data->smu_features[GNLD_DPM_VCE].enabled)
794 		data->vce_power_gated = false;
795 }
796 
797 static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr)
798 {
799 	struct vega12_hwmgr *data =
800 			(struct vega12_hwmgr *)(hwmgr->backend);
801 	uint64_t features_enabled;
802 	int i;
803 	bool enabled;
804 
805 	PP_ASSERT_WITH_CODE(
806 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAllSmuFeatures) == 0,
807 		"[EnableAllSMUFeatures] Failed to enable all smu features!",
808 		return -1);
809 
810 	if (vega12_get_enabled_smc_features(hwmgr, &features_enabled) == 0) {
811 		for (i = 0; i < GNLD_FEATURES_MAX; i++) {
812 			enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? true : false;
813 			data->smu_features[i].enabled = enabled;
814 			data->smu_features[i].supported = enabled;
815 		}
816 	}
817 
818 	vega12_init_powergate_state(hwmgr);
819 
820 	return 0;
821 }
822 
823 static int vega12_disable_all_smu_features(struct pp_hwmgr *hwmgr)
824 {
825 	struct vega12_hwmgr *data =
826 			(struct vega12_hwmgr *)(hwmgr->backend);
827 	uint64_t features_enabled;
828 	int i;
829 	bool enabled;
830 
831 	PP_ASSERT_WITH_CODE(
832 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableAllSmuFeatures) == 0,
833 		"[DisableAllSMUFeatures] Failed to disable all smu features!",
834 		return -1);
835 
836 	if (vega12_get_enabled_smc_features(hwmgr, &features_enabled) == 0) {
837 		for (i = 0; i < GNLD_FEATURES_MAX; i++) {
838 			enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? true : false;
839 			data->smu_features[i].enabled = enabled;
840 			data->smu_features[i].supported = enabled;
841 		}
842 	}
843 
844 	return 0;
845 }
846 
847 static int vega12_odn_initialize_default_settings(
848 		struct pp_hwmgr *hwmgr)
849 {
850 	return 0;
851 }
852 
853 static int vega12_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
854 		uint32_t adjust_percent)
855 {
856 	return smum_send_msg_to_smc_with_parameter(hwmgr,
857 			PPSMC_MSG_OverDriveSetPercentage, adjust_percent);
858 }
859 
860 static int vega12_power_control_set_level(struct pp_hwmgr *hwmgr)
861 {
862 	int adjust_percent, result = 0;
863 
864 	if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
865 		adjust_percent =
866 				hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
867 				hwmgr->platform_descriptor.TDPAdjustment :
868 				(-1 * hwmgr->platform_descriptor.TDPAdjustment);
869 		result = vega12_set_overdrive_target_percentage(hwmgr,
870 				(uint32_t)adjust_percent);
871 	}
872 	return result;
873 }
874 
875 static int vega12_get_all_clock_ranges_helper(struct pp_hwmgr *hwmgr,
876 		PPCLK_e clkid, struct vega12_clock_range *clock)
877 {
878 	/* AC Max */
879 	PP_ASSERT_WITH_CODE(
880 		smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clkid << 16)) == 0,
881 		"[GetClockRanges] Failed to get max ac clock from SMC!",
882 		return -EINVAL);
883 	clock->ACMax = smum_get_argument(hwmgr);
884 
885 	/* AC Min */
886 	PP_ASSERT_WITH_CODE(
887 		smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clkid << 16)) == 0,
888 		"[GetClockRanges] Failed to get min ac clock from SMC!",
889 		return -EINVAL);
890 	clock->ACMin = smum_get_argument(hwmgr);
891 
892 	/* DC Max */
893 	PP_ASSERT_WITH_CODE(
894 		smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDcModeMaxDpmFreq, (clkid << 16)) == 0,
895 		"[GetClockRanges] Failed to get max dc clock from SMC!",
896 		return -EINVAL);
897 	clock->DCMax = smum_get_argument(hwmgr);
898 
899 	return 0;
900 }
901 
902 static int vega12_get_all_clock_ranges(struct pp_hwmgr *hwmgr)
903 {
904 	struct vega12_hwmgr *data =
905 			(struct vega12_hwmgr *)(hwmgr->backend);
906 	uint32_t i;
907 
908 	for (i = 0; i < PPCLK_COUNT; i++)
909 		PP_ASSERT_WITH_CODE(!vega12_get_all_clock_ranges_helper(hwmgr,
910 					i, &(data->clk_range[i])),
911 				"Failed to get clk range from SMC!",
912 				return -EINVAL);
913 
914 	return 0;
915 }
916 
917 static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
918 {
919 	int tmp_result, result = 0;
920 
921 	smum_send_msg_to_smc_with_parameter(hwmgr,
922 			PPSMC_MSG_NumOfDisplays, 0);
923 
924 	result = vega12_set_allowed_featuresmask(hwmgr);
925 	PP_ASSERT_WITH_CODE(result == 0,
926 			"[EnableDPMTasks] Failed to set allowed featuresmask!\n",
927 			return result);
928 
929 	tmp_result = vega12_init_smc_table(hwmgr);
930 	PP_ASSERT_WITH_CODE(!tmp_result,
931 			"Failed to initialize SMC table!",
932 			result = tmp_result);
933 
934 	result = vega12_enable_all_smu_features(hwmgr);
935 	PP_ASSERT_WITH_CODE(!result,
936 			"Failed to enable all smu features!",
937 			return result);
938 
939 	tmp_result = vega12_power_control_set_level(hwmgr);
940 	PP_ASSERT_WITH_CODE(!tmp_result,
941 			"Failed to power control set level!",
942 			result = tmp_result);
943 
944 	result = vega12_get_all_clock_ranges(hwmgr);
945 	PP_ASSERT_WITH_CODE(!result,
946 			"Failed to get all clock ranges!",
947 			return result);
948 
949 	result = vega12_odn_initialize_default_settings(hwmgr);
950 	PP_ASSERT_WITH_CODE(!result,
951 			"Failed to power control set level!",
952 			return result);
953 
954 	result = vega12_setup_default_dpm_tables(hwmgr);
955 	PP_ASSERT_WITH_CODE(!result,
956 			"Failed to setup default DPM tables!",
957 			return result);
958 	return result;
959 }
960 
961 static int vega12_patch_boot_state(struct pp_hwmgr *hwmgr,
962 	     struct pp_hw_power_state *hw_ps)
963 {
964 	return 0;
965 }
966 
967 static uint32_t vega12_find_lowest_dpm_level(
968 		struct vega12_single_dpm_table *table)
969 {
970 	uint32_t i;
971 
972 	for (i = 0; i < table->count; i++) {
973 		if (table->dpm_levels[i].enabled)
974 			break;
975 	}
976 
977 	if (i >= table->count) {
978 		i = 0;
979 		table->dpm_levels[i].enabled = true;
980 	}
981 
982 	return i;
983 }
984 
985 static uint32_t vega12_find_highest_dpm_level(
986 		struct vega12_single_dpm_table *table)
987 {
988 	int32_t i = 0;
989 	PP_ASSERT_WITH_CODE(table->count <= MAX_REGULAR_DPM_NUMBER,
990 			"[FindHighestDPMLevel] DPM Table has too many entries!",
991 			return MAX_REGULAR_DPM_NUMBER - 1);
992 
993 	for (i = table->count - 1; i >= 0; i--) {
994 		if (table->dpm_levels[i].enabled)
995 			break;
996 	}
997 
998 	if (i < 0) {
999 		i = 0;
1000 		table->dpm_levels[i].enabled = true;
1001 	}
1002 
1003 	return (uint32_t)i;
1004 }
1005 
1006 static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
1007 {
1008 	struct vega12_hwmgr *data = hwmgr->backend;
1009 	uint32_t min_freq;
1010 	int ret = 0;
1011 
1012 	if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
1013 		min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
1014 		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1015 					hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1016 					(PPCLK_GFXCLK << 16) | (min_freq & 0xffff))),
1017 					"Failed to set soft min gfxclk !",
1018 					return ret);
1019 	}
1020 
1021 	if (data->smu_features[GNLD_DPM_UCLK].enabled) {
1022 		min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
1023 		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1024 					hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1025 					(PPCLK_UCLK << 16) | (min_freq & 0xffff))),
1026 					"Failed to set soft min memclk !",
1027 					return ret);
1028 
1029 		min_freq = data->dpm_table.mem_table.dpm_state.hard_min_level;
1030 		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1031 					hwmgr, PPSMC_MSG_SetHardMinByFreq,
1032 					(PPCLK_UCLK << 16) | (min_freq & 0xffff))),
1033 					"Failed to set hard min memclk !",
1034 					return ret);
1035 	}
1036 
1037 	if (data->smu_features[GNLD_DPM_UVD].enabled) {
1038 		min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level;
1039 
1040 		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1041 					hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1042 					(PPCLK_VCLK << 16) | (min_freq & 0xffff))),
1043 					"Failed to set soft min vclk!",
1044 					return ret);
1045 
1046 		min_freq = data->dpm_table.dclk_table.dpm_state.soft_min_level;
1047 
1048 		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1049 					hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1050 					(PPCLK_DCLK << 16) | (min_freq & 0xffff))),
1051 					"Failed to set soft min dclk!",
1052 					return ret);
1053 	}
1054 
1055 	if (data->smu_features[GNLD_DPM_VCE].enabled) {
1056 		min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level;
1057 
1058 		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1059 					hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1060 					(PPCLK_ECLK << 16) | (min_freq & 0xffff))),
1061 					"Failed to set soft min eclk!",
1062 					return ret);
1063 	}
1064 
1065 	if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
1066 		min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level;
1067 
1068 		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1069 					hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1070 					(PPCLK_SOCCLK << 16) | (min_freq & 0xffff))),
1071 					"Failed to set soft min socclk!",
1072 					return ret);
1073 	}
1074 
1075 	return ret;
1076 
1077 }
1078 
1079 static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
1080 {
1081 	struct vega12_hwmgr *data = hwmgr->backend;
1082 	uint32_t max_freq;
1083 	int ret = 0;
1084 
1085 	if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
1086 		max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level;
1087 
1088 		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1089 					hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1090 					(PPCLK_GFXCLK << 16) | (max_freq & 0xffff))),
1091 					"Failed to set soft max gfxclk!",
1092 					return ret);
1093 	}
1094 
1095 	if (data->smu_features[GNLD_DPM_UCLK].enabled) {
1096 		max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level;
1097 
1098 		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1099 					hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1100 					(PPCLK_UCLK << 16) | (max_freq & 0xffff))),
1101 					"Failed to set soft max memclk!",
1102 					return ret);
1103 	}
1104 
1105 	if (data->smu_features[GNLD_DPM_UVD].enabled) {
1106 		max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level;
1107 
1108 		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1109 					hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1110 					(PPCLK_VCLK << 16) | (max_freq & 0xffff))),
1111 					"Failed to set soft max vclk!",
1112 					return ret);
1113 
1114 		max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level;
1115 		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1116 					hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1117 					(PPCLK_DCLK << 16) | (max_freq & 0xffff))),
1118 					"Failed to set soft max dclk!",
1119 					return ret);
1120 	}
1121 
1122 	if (data->smu_features[GNLD_DPM_VCE].enabled) {
1123 		max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level;
1124 
1125 		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1126 					hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1127 					(PPCLK_ECLK << 16) | (max_freq & 0xffff))),
1128 					"Failed to set soft max eclk!",
1129 					return ret);
1130 	}
1131 
1132 	if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
1133 		max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level;
1134 
1135 		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1136 					hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1137 					(PPCLK_SOCCLK << 16) | (max_freq & 0xffff))),
1138 					"Failed to set soft max socclk!",
1139 					return ret);
1140 	}
1141 
1142 	return ret;
1143 }
1144 
1145 int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
1146 {
1147 	struct vega12_hwmgr *data =
1148 			(struct vega12_hwmgr *)(hwmgr->backend);
1149 
1150 	if (data->smu_features[GNLD_DPM_VCE].supported) {
1151 		PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr,
1152 				enable,
1153 				data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap),
1154 				"Attempt to Enable/Disable DPM VCE Failed!",
1155 				return -1);
1156 		data->smu_features[GNLD_DPM_VCE].enabled = enable;
1157 	}
1158 
1159 	return 0;
1160 }
1161 
1162 static uint32_t vega12_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
1163 {
1164 	struct vega12_hwmgr *data =
1165 			(struct vega12_hwmgr *)(hwmgr->backend);
1166 	uint32_t gfx_clk;
1167 
1168 	if (!data->smu_features[GNLD_DPM_GFXCLK].enabled)
1169 		return -1;
1170 
1171 	if (low)
1172 		PP_ASSERT_WITH_CODE(
1173 			vega12_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, false) == 0,
1174 			"[GetSclks]: fail to get min PPCLK_GFXCLK\n",
1175 			return -1);
1176 	else
1177 		PP_ASSERT_WITH_CODE(
1178 			vega12_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, true) == 0,
1179 			"[GetSclks]: fail to get max PPCLK_GFXCLK\n",
1180 			return -1);
1181 
1182 	return (gfx_clk * 100);
1183 }
1184 
1185 static uint32_t vega12_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
1186 {
1187 	struct vega12_hwmgr *data =
1188 			(struct vega12_hwmgr *)(hwmgr->backend);
1189 	uint32_t mem_clk;
1190 
1191 	if (!data->smu_features[GNLD_DPM_UCLK].enabled)
1192 		return -1;
1193 
1194 	if (low)
1195 		PP_ASSERT_WITH_CODE(
1196 			vega12_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, false) == 0,
1197 			"[GetMclks]: fail to get min PPCLK_UCLK\n",
1198 			return -1);
1199 	else
1200 		PP_ASSERT_WITH_CODE(
1201 			vega12_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, true) == 0,
1202 			"[GetMclks]: fail to get max PPCLK_UCLK\n",
1203 			return -1);
1204 
1205 	return (mem_clk * 100);
1206 }
1207 
1208 static int vega12_get_gpu_power(struct pp_hwmgr *hwmgr, uint32_t *query)
1209 {
1210 #if 0
1211 	uint32_t value;
1212 
1213 	PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
1214 			PPSMC_MSG_GetCurrPkgPwr),
1215 			"Failed to get current package power!",
1216 			return -EINVAL);
1217 
1218 	value = smum_get_argument(hwmgr);
1219 	/* power value is an integer */
1220 	*query = value << 8;
1221 #endif
1222 	return 0;
1223 }
1224 
1225 static int vega12_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx_freq)
1226 {
1227 	uint32_t gfx_clk = 0;
1228 
1229 	*gfx_freq = 0;
1230 
1231 	PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
1232 			PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0,
1233 			"[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!",
1234 			return -EINVAL);
1235 	gfx_clk = smum_get_argument(hwmgr);
1236 
1237 	*gfx_freq = gfx_clk * 100;
1238 
1239 	return 0;
1240 }
1241 
1242 static int vega12_get_current_mclk_freq(struct pp_hwmgr *hwmgr, uint32_t *mclk_freq)
1243 {
1244 	uint32_t mem_clk = 0;
1245 
1246 	*mclk_freq = 0;
1247 
1248 	PP_ASSERT_WITH_CODE(
1249 			smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16)) == 0,
1250 			"[GetCurrentMClkFreq] Attempt to get Current MCLK Frequency Failed!",
1251 			return -EINVAL);
1252 	mem_clk = smum_get_argument(hwmgr);
1253 
1254 	*mclk_freq = mem_clk * 100;
1255 
1256 	return 0;
1257 }
1258 
1259 static int vega12_get_current_activity_percent(
1260 		struct pp_hwmgr *hwmgr,
1261 		uint32_t *activity_percent)
1262 {
1263 	int ret = 0;
1264 	uint32_t current_activity = 50;
1265 
1266 #if 0
1267 	ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0);
1268 	if (!ret) {
1269 		current_activity = smum_get_argument(hwmgr);
1270 		if (current_activity > 100) {
1271 			PP_ASSERT(false,
1272 				  "[GetCurrentActivityPercent] Activity Percentage Exceeds 100!");
1273 			current_activity = 100;
1274 		}
1275 	} else
1276 		PP_ASSERT(false,
1277 			"[GetCurrentActivityPercent] Attempt To Send Get Average Graphics Activity to SMU Failed!");
1278 #endif
1279 	*activity_percent = current_activity;
1280 
1281 	return ret;
1282 }
1283 
1284 static int vega12_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1285 			      void *value, int *size)
1286 {
1287 	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1288 	int ret = 0;
1289 
1290 	switch (idx) {
1291 	case AMDGPU_PP_SENSOR_GFX_SCLK:
1292 		ret = vega12_get_current_gfx_clk_freq(hwmgr, (uint32_t *)value);
1293 		if (!ret)
1294 			*size = 4;
1295 		break;
1296 	case AMDGPU_PP_SENSOR_GFX_MCLK:
1297 		ret = vega12_get_current_mclk_freq(hwmgr, (uint32_t *)value);
1298 		if (!ret)
1299 			*size = 4;
1300 		break;
1301 	case AMDGPU_PP_SENSOR_GPU_LOAD:
1302 		ret = vega12_get_current_activity_percent(hwmgr, (uint32_t *)value);
1303 		if (!ret)
1304 			*size = 4;
1305 		break;
1306 	case AMDGPU_PP_SENSOR_GPU_TEMP:
1307 		*((uint32_t *)value) = vega12_thermal_get_temperature(hwmgr);
1308 		*size = 4;
1309 		break;
1310 	case AMDGPU_PP_SENSOR_UVD_POWER:
1311 		*((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
1312 		*size = 4;
1313 		break;
1314 	case AMDGPU_PP_SENSOR_VCE_POWER:
1315 		*((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
1316 		*size = 4;
1317 		break;
1318 	case AMDGPU_PP_SENSOR_GPU_POWER:
1319 		ret = vega12_get_gpu_power(hwmgr, (uint32_t *)value);
1320 
1321 		break;
1322 	default:
1323 		ret = -EINVAL;
1324 		break;
1325 	}
1326 	return ret;
1327 }
1328 
1329 static int vega12_notify_smc_display_change(struct pp_hwmgr *hwmgr,
1330 		bool has_disp)
1331 {
1332 	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1333 
1334 	if (data->smu_features[GNLD_DPM_UCLK].enabled)
1335 		return smum_send_msg_to_smc_with_parameter(hwmgr,
1336 			PPSMC_MSG_SetUclkFastSwitch,
1337 			has_disp ? 1 : 0);
1338 
1339 	return 0;
1340 }
1341 
1342 int vega12_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
1343 		struct pp_display_clock_request *clock_req);
1344 int vega12_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
1345 		struct pp_display_clock_request *clock_req)
1346 {
1347 	int result = 0;
1348 	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1349 	enum amd_pp_clock_type clk_type = clock_req->clock_type;
1350 	uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
1351 	PPCLK_e clk_select = 0;
1352 	uint32_t clk_request = 0;
1353 
1354 	if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
1355 		switch (clk_type) {
1356 		case amd_pp_dcef_clock:
1357 			clk_select = PPCLK_DCEFCLK;
1358 			break;
1359 		case amd_pp_disp_clock:
1360 			clk_select = PPCLK_DISPCLK;
1361 			break;
1362 		case amd_pp_pixel_clock:
1363 			clk_select = PPCLK_PIXCLK;
1364 			break;
1365 		case amd_pp_phy_clock:
1366 			clk_select = PPCLK_PHYCLK;
1367 			break;
1368 		default:
1369 			pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
1370 			result = -1;
1371 			break;
1372 		}
1373 
1374 		if (!result) {
1375 			clk_request = (clk_select << 16) | clk_freq;
1376 			result = smum_send_msg_to_smc_with_parameter(hwmgr,
1377 					PPSMC_MSG_SetHardMinByFreq,
1378 					clk_request);
1379 		}
1380 	}
1381 
1382 	return result;
1383 }
1384 
1385 static int vega12_notify_smc_display_config_after_ps_adjustment(
1386 		struct pp_hwmgr *hwmgr)
1387 {
1388 	struct vega12_hwmgr *data =
1389 			(struct vega12_hwmgr *)(hwmgr->backend);
1390 	struct PP_Clocks min_clocks = {0};
1391 	struct pp_display_clock_request clock_req;
1392 
1393 	if ((hwmgr->display_config->num_display > 1) &&
1394 	     !hwmgr->display_config->multi_monitor_in_sync &&
1395 	     !hwmgr->display_config->nb_pstate_switch_disable)
1396 		vega12_notify_smc_display_change(hwmgr, false);
1397 	else
1398 		vega12_notify_smc_display_change(hwmgr, true);
1399 
1400 	min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
1401 	min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk;
1402 	min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
1403 
1404 	if (data->smu_features[GNLD_DPM_DCEFCLK].supported) {
1405 		clock_req.clock_type = amd_pp_dcef_clock;
1406 		clock_req.clock_freq_in_khz = min_clocks.dcefClock/10;
1407 		if (!vega12_display_clock_voltage_request(hwmgr, &clock_req)) {
1408 			if (data->smu_features[GNLD_DS_DCEFCLK].supported)
1409 				PP_ASSERT_WITH_CODE(
1410 					!smum_send_msg_to_smc_with_parameter(
1411 					hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
1412 					min_clocks.dcefClockInSR /100),
1413 					"Attempt to set divider for DCEFCLK Failed!",
1414 					return -1);
1415 		} else {
1416 			pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
1417 		}
1418 	}
1419 
1420 	return 0;
1421 }
1422 
1423 static int vega12_force_dpm_highest(struct pp_hwmgr *hwmgr)
1424 {
1425 	struct vega12_hwmgr *data =
1426 			(struct vega12_hwmgr *)(hwmgr->backend);
1427 
1428 	uint32_t soft_level;
1429 
1430 	soft_level = vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
1431 
1432 	data->dpm_table.gfx_table.dpm_state.soft_min_level =
1433 		data->dpm_table.gfx_table.dpm_state.soft_max_level =
1434 		data->dpm_table.gfx_table.dpm_levels[soft_level].value;
1435 
1436 	soft_level = vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
1437 
1438 	data->dpm_table.mem_table.dpm_state.soft_min_level =
1439 		data->dpm_table.mem_table.dpm_state.soft_max_level =
1440 		data->dpm_table.mem_table.dpm_levels[soft_level].value;
1441 
1442 	PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
1443 			"Failed to upload boot level to highest!",
1444 			return -1);
1445 
1446 	PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
1447 			"Failed to upload dpm max level to highest!",
1448 			return -1);
1449 
1450 	return 0;
1451 }
1452 
1453 static int vega12_force_dpm_lowest(struct pp_hwmgr *hwmgr)
1454 {
1455 	struct vega12_hwmgr *data =
1456 			(struct vega12_hwmgr *)(hwmgr->backend);
1457 	uint32_t soft_level;
1458 
1459 	soft_level = vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
1460 
1461 	data->dpm_table.gfx_table.dpm_state.soft_min_level =
1462 		data->dpm_table.gfx_table.dpm_state.soft_max_level =
1463 		data->dpm_table.gfx_table.dpm_levels[soft_level].value;
1464 
1465 	soft_level = vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
1466 
1467 	data->dpm_table.mem_table.dpm_state.soft_min_level =
1468 		data->dpm_table.mem_table.dpm_state.soft_max_level =
1469 		data->dpm_table.mem_table.dpm_levels[soft_level].value;
1470 
1471 	PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
1472 			"Failed to upload boot level to highest!",
1473 			return -1);
1474 
1475 	PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
1476 			"Failed to upload dpm max level to highest!",
1477 			return -1);
1478 
1479 	return 0;
1480 
1481 }
1482 
1483 static int vega12_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
1484 {
1485 	PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
1486 			"Failed to upload DPM Bootup Levels!",
1487 			return -1);
1488 
1489 	PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
1490 			"Failed to upload DPM Max Levels!",
1491 			return -1);
1492 
1493 	return 0;
1494 }
1495 
1496 static int vega12_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
1497 				uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
1498 {
1499 	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1500 	struct vega12_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table);
1501 	struct vega12_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table);
1502 	struct vega12_single_dpm_table *soc_dpm_table = &(data->dpm_table.soc_table);
1503 
1504 	*sclk_mask = 0;
1505 	*mclk_mask = 0;
1506 	*soc_mask  = 0;
1507 
1508 	if (gfx_dpm_table->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL &&
1509 	    mem_dpm_table->count > VEGA12_UMD_PSTATE_MCLK_LEVEL &&
1510 	    soc_dpm_table->count > VEGA12_UMD_PSTATE_SOCCLK_LEVEL) {
1511 		*sclk_mask = VEGA12_UMD_PSTATE_GFXCLK_LEVEL;
1512 		*mclk_mask = VEGA12_UMD_PSTATE_MCLK_LEVEL;
1513 		*soc_mask  = VEGA12_UMD_PSTATE_SOCCLK_LEVEL;
1514 	}
1515 
1516 	if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
1517 		*sclk_mask = 0;
1518 	} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
1519 		*mclk_mask = 0;
1520 	} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
1521 		*sclk_mask = gfx_dpm_table->count - 1;
1522 		*mclk_mask = mem_dpm_table->count - 1;
1523 		*soc_mask  = soc_dpm_table->count - 1;
1524 	}
1525 
1526 	return 0;
1527 }
1528 
1529 static void vega12_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
1530 {
1531 	switch (mode) {
1532 	case AMD_FAN_CTRL_NONE:
1533 		break;
1534 	case AMD_FAN_CTRL_MANUAL:
1535 		if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
1536 			vega12_fan_ctrl_stop_smc_fan_control(hwmgr);
1537 		break;
1538 	case AMD_FAN_CTRL_AUTO:
1539 		if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
1540 			vega12_fan_ctrl_start_smc_fan_control(hwmgr);
1541 		break;
1542 	default:
1543 		break;
1544 	}
1545 }
1546 
1547 static int vega12_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
1548 				enum amd_dpm_forced_level level)
1549 {
1550 	int ret = 0;
1551 	uint32_t sclk_mask = 0;
1552 	uint32_t mclk_mask = 0;
1553 	uint32_t soc_mask = 0;
1554 
1555 	switch (level) {
1556 	case AMD_DPM_FORCED_LEVEL_HIGH:
1557 		ret = vega12_force_dpm_highest(hwmgr);
1558 		break;
1559 	case AMD_DPM_FORCED_LEVEL_LOW:
1560 		ret = vega12_force_dpm_lowest(hwmgr);
1561 		break;
1562 	case AMD_DPM_FORCED_LEVEL_AUTO:
1563 		ret = vega12_unforce_dpm_levels(hwmgr);
1564 		break;
1565 	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1566 	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1567 	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1568 	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1569 		ret = vega12_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
1570 		if (ret)
1571 			return ret;
1572 		vega12_force_clock_level(hwmgr, PP_SCLK, 1 << sclk_mask);
1573 		vega12_force_clock_level(hwmgr, PP_MCLK, 1 << mclk_mask);
1574 		break;
1575 	case AMD_DPM_FORCED_LEVEL_MANUAL:
1576 	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1577 	default:
1578 		break;
1579 	}
1580 
1581 	return ret;
1582 }
1583 
1584 static uint32_t vega12_get_fan_control_mode(struct pp_hwmgr *hwmgr)
1585 {
1586 	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1587 
1588 	if (data->smu_features[GNLD_FAN_CONTROL].enabled == false)
1589 		return AMD_FAN_CTRL_MANUAL;
1590 	else
1591 		return AMD_FAN_CTRL_AUTO;
1592 }
1593 
1594 static int vega12_get_dal_power_level(struct pp_hwmgr *hwmgr,
1595 		struct amd_pp_simple_clock_info *info)
1596 {
1597 #if 0
1598 	struct phm_ppt_v2_information *table_info =
1599 			(struct phm_ppt_v2_information *)hwmgr->pptable;
1600 	struct phm_clock_and_voltage_limits *max_limits =
1601 			&table_info->max_clock_voltage_on_ac;
1602 
1603 	info->engine_max_clock = max_limits->sclk;
1604 	info->memory_max_clock = max_limits->mclk;
1605 #endif
1606 	return 0;
1607 }
1608 
1609 static int vega12_get_clock_ranges(struct pp_hwmgr *hwmgr,
1610 		uint32_t *clock,
1611 		PPCLK_e clock_select,
1612 		bool max)
1613 {
1614 	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1615 
1616 	if (max)
1617 		*clock = data->clk_range[clock_select].ACMax;
1618 	else
1619 		*clock = data->clk_range[clock_select].ACMin;
1620 
1621 	return 0;
1622 }
1623 
1624 static int vega12_get_sclks(struct pp_hwmgr *hwmgr,
1625 		struct pp_clock_levels_with_latency *clocks)
1626 {
1627 	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1628 	uint32_t ucount;
1629 	int i;
1630 	struct vega12_single_dpm_table *dpm_table;
1631 
1632 	if (!data->smu_features[GNLD_DPM_GFXCLK].enabled)
1633 		return -1;
1634 
1635 	dpm_table = &(data->dpm_table.gfx_table);
1636 	ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
1637 		MAX_NUM_CLOCKS : dpm_table->count;
1638 
1639 	for (i = 0; i < ucount; i++) {
1640 		clocks->data[i].clocks_in_khz =
1641 			dpm_table->dpm_levels[i].value * 1000;
1642 
1643 		clocks->data[i].latency_in_us = 0;
1644 	}
1645 
1646 	clocks->num_levels = ucount;
1647 
1648 	return 0;
1649 }
1650 
1651 static uint32_t vega12_get_mem_latency(struct pp_hwmgr *hwmgr,
1652 		uint32_t clock)
1653 {
1654 	return 25;
1655 }
1656 
1657 static int vega12_get_memclocks(struct pp_hwmgr *hwmgr,
1658 		struct pp_clock_levels_with_latency *clocks)
1659 {
1660 	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1661 	uint32_t ucount;
1662 	int i;
1663 	struct vega12_single_dpm_table *dpm_table;
1664 	if (!data->smu_features[GNLD_DPM_UCLK].enabled)
1665 		return -1;
1666 
1667 	dpm_table = &(data->dpm_table.mem_table);
1668 	ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
1669 		MAX_NUM_CLOCKS : dpm_table->count;
1670 
1671 	for (i = 0; i < ucount; i++) {
1672 		clocks->data[i].clocks_in_khz = dpm_table->dpm_levels[i].value * 1000;
1673 		data->mclk_latency_table.entries[i].frequency = dpm_table->dpm_levels[i].value * 100;
1674 		clocks->data[i].latency_in_us =
1675 			data->mclk_latency_table.entries[i].latency =
1676 			vega12_get_mem_latency(hwmgr, dpm_table->dpm_levels[i].value);
1677 	}
1678 
1679 	clocks->num_levels = data->mclk_latency_table.count = ucount;
1680 
1681 	return 0;
1682 }
1683 
1684 static int vega12_get_dcefclocks(struct pp_hwmgr *hwmgr,
1685 		struct pp_clock_levels_with_latency *clocks)
1686 {
1687 	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1688 	uint32_t ucount;
1689 	int i;
1690 	struct vega12_single_dpm_table *dpm_table;
1691 
1692 	if (!data->smu_features[GNLD_DPM_DCEFCLK].enabled)
1693 		return -1;
1694 
1695 
1696 	dpm_table = &(data->dpm_table.dcef_table);
1697 	ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
1698 		MAX_NUM_CLOCKS : dpm_table->count;
1699 
1700 	for (i = 0; i < ucount; i++) {
1701 		clocks->data[i].clocks_in_khz =
1702 			dpm_table->dpm_levels[i].value * 1000;
1703 
1704 		clocks->data[i].latency_in_us = 0;
1705 	}
1706 
1707 	clocks->num_levels = ucount;
1708 
1709 	return 0;
1710 }
1711 
1712 static int vega12_get_socclocks(struct pp_hwmgr *hwmgr,
1713 		struct pp_clock_levels_with_latency *clocks)
1714 {
1715 	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1716 	uint32_t ucount;
1717 	int i;
1718 	struct vega12_single_dpm_table *dpm_table;
1719 
1720 	if (!data->smu_features[GNLD_DPM_SOCCLK].enabled)
1721 		return -1;
1722 
1723 
1724 	dpm_table = &(data->dpm_table.soc_table);
1725 	ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
1726 		MAX_NUM_CLOCKS : dpm_table->count;
1727 
1728 	for (i = 0; i < ucount; i++) {
1729 		clocks->data[i].clocks_in_khz =
1730 			dpm_table->dpm_levels[i].value * 1000;
1731 
1732 		clocks->data[i].latency_in_us = 0;
1733 	}
1734 
1735 	clocks->num_levels = ucount;
1736 
1737 	return 0;
1738 
1739 }
1740 
1741 static int vega12_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
1742 		enum amd_pp_clock_type type,
1743 		struct pp_clock_levels_with_latency *clocks)
1744 {
1745 	int ret;
1746 
1747 	switch (type) {
1748 	case amd_pp_sys_clock:
1749 		ret = vega12_get_sclks(hwmgr, clocks);
1750 		break;
1751 	case amd_pp_mem_clock:
1752 		ret = vega12_get_memclocks(hwmgr, clocks);
1753 		break;
1754 	case amd_pp_dcef_clock:
1755 		ret = vega12_get_dcefclocks(hwmgr, clocks);
1756 		break;
1757 	case amd_pp_soc_clock:
1758 		ret = vega12_get_socclocks(hwmgr, clocks);
1759 		break;
1760 	default:
1761 		return -EINVAL;
1762 	}
1763 
1764 	return ret;
1765 }
1766 
1767 static int vega12_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
1768 		enum amd_pp_clock_type type,
1769 		struct pp_clock_levels_with_voltage *clocks)
1770 {
1771 	clocks->num_levels = 0;
1772 
1773 	return 0;
1774 }
1775 
1776 static int vega12_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
1777 							void *clock_ranges)
1778 {
1779 	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1780 	Watermarks_t *table = &(data->smc_state_table.water_marks_table);
1781 	struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
1782 
1783 	if (!data->registry_data.disable_water_mark &&
1784 			data->smu_features[GNLD_DPM_DCEFCLK].supported &&
1785 			data->smu_features[GNLD_DPM_SOCCLK].supported) {
1786 		smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges);
1787 		data->water_marks_bitmap |= WaterMarksExist;
1788 		data->water_marks_bitmap &= ~WaterMarksLoaded;
1789 	}
1790 
1791 	return 0;
1792 }
1793 
1794 static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
1795 		enum pp_clock_type type, uint32_t mask)
1796 {
1797 	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1798 	uint32_t soft_min_level, soft_max_level;
1799 	int ret = 0;
1800 
1801 	switch (type) {
1802 	case PP_SCLK:
1803 		soft_min_level = mask ? (ffs(mask) - 1) : 0;
1804 		soft_max_level = mask ? (fls(mask) - 1) : 0;
1805 
1806 		data->dpm_table.gfx_table.dpm_state.soft_min_level =
1807 			data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
1808 		data->dpm_table.gfx_table.dpm_state.soft_max_level =
1809 			data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
1810 
1811 		ret = vega12_upload_dpm_min_level(hwmgr);
1812 		PP_ASSERT_WITH_CODE(!ret,
1813 			"Failed to upload boot level to lowest!",
1814 			return ret);
1815 
1816 		ret = vega12_upload_dpm_max_level(hwmgr);
1817 		PP_ASSERT_WITH_CODE(!ret,
1818 			"Failed to upload dpm max level to highest!",
1819 			return ret);
1820 		break;
1821 
1822 	case PP_MCLK:
1823 		soft_min_level = mask ? (ffs(mask) - 1) : 0;
1824 		soft_max_level = mask ? (fls(mask) - 1) : 0;
1825 
1826 		data->dpm_table.mem_table.dpm_state.soft_min_level =
1827 			data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
1828 		data->dpm_table.mem_table.dpm_state.soft_max_level =
1829 			data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
1830 
1831 		ret = vega12_upload_dpm_min_level(hwmgr);
1832 		PP_ASSERT_WITH_CODE(!ret,
1833 			"Failed to upload boot level to lowest!",
1834 			return ret);
1835 
1836 		ret = vega12_upload_dpm_max_level(hwmgr);
1837 		PP_ASSERT_WITH_CODE(!ret,
1838 			"Failed to upload dpm max level to highest!",
1839 			return ret);
1840 
1841 		break;
1842 
1843 	case PP_PCIE:
1844 		break;
1845 
1846 	default:
1847 		break;
1848 	}
1849 
1850 	return 0;
1851 }
1852 
1853 static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
1854 		enum pp_clock_type type, char *buf)
1855 {
1856 	int i, now, size = 0;
1857 	struct pp_clock_levels_with_latency clocks;
1858 
1859 	switch (type) {
1860 	case PP_SCLK:
1861 		PP_ASSERT_WITH_CODE(
1862 				vega12_get_current_gfx_clk_freq(hwmgr, &now) == 0,
1863 				"Attempt to get current gfx clk Failed!",
1864 				return -1);
1865 
1866 		PP_ASSERT_WITH_CODE(
1867 				vega12_get_sclks(hwmgr, &clocks) == 0,
1868 				"Attempt to get gfx clk levels Failed!",
1869 				return -1);
1870 		for (i = 0; i < clocks.num_levels; i++)
1871 			size += sprintf(buf + size, "%d: %uMhz %s\n",
1872 				i, clocks.data[i].clocks_in_khz / 1000,
1873 				(clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
1874 		break;
1875 
1876 	case PP_MCLK:
1877 		PP_ASSERT_WITH_CODE(
1878 				vega12_get_current_mclk_freq(hwmgr, &now) == 0,
1879 				"Attempt to get current mclk freq Failed!",
1880 				return -1);
1881 
1882 		PP_ASSERT_WITH_CODE(
1883 				vega12_get_memclocks(hwmgr, &clocks) == 0,
1884 				"Attempt to get memory clk levels Failed!",
1885 				return -1);
1886 		for (i = 0; i < clocks.num_levels; i++)
1887 			size += sprintf(buf + size, "%d: %uMhz %s\n",
1888 				i, clocks.data[i].clocks_in_khz / 1000,
1889 				(clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
1890 		break;
1891 
1892 	case PP_PCIE:
1893 		break;
1894 
1895 	default:
1896 		break;
1897 	}
1898 	return size;
1899 }
1900 
1901 static int vega12_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
1902 {
1903 	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1904 	struct vega12_single_dpm_table *dpm_table;
1905 	bool vblank_too_short = false;
1906 	bool disable_mclk_switching;
1907 	uint32_t i, latency;
1908 
1909 	disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
1910 			          !hwmgr->display_config->multi_monitor_in_sync) ||
1911 			          vblank_too_short;
1912 	latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
1913 
1914 	/* gfxclk */
1915 	dpm_table = &(data->dpm_table.gfx_table);
1916 	dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
1917 	dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
1918 	dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
1919 	dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
1920 
1921 	if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
1922 		if (VEGA12_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) {
1923 			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
1924 			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
1925 		}
1926 
1927 		if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
1928 			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
1929 			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
1930 		}
1931 
1932 		if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
1933 			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
1934 			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
1935 		}
1936 	}
1937 
1938 	/* memclk */
1939 	dpm_table = &(data->dpm_table.mem_table);
1940 	dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
1941 	dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
1942 	dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
1943 	dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
1944 
1945 	if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
1946 		if (VEGA12_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) {
1947 			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
1948 			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
1949 		}
1950 
1951 		if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
1952 			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
1953 			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
1954 		}
1955 
1956 		if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
1957 			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
1958 			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
1959 		}
1960 	}
1961 
1962 	/* honour DAL's UCLK Hardmin */
1963 	if (dpm_table->dpm_state.hard_min_level < (hwmgr->display_config->min_mem_set_clock / 100))
1964 		dpm_table->dpm_state.hard_min_level = hwmgr->display_config->min_mem_set_clock / 100;
1965 
1966 	/* Hardmin is dependent on displayconfig */
1967 	if (disable_mclk_switching) {
1968 		dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
1969 		for (i = 0; i < data->mclk_latency_table.count - 1; i++) {
1970 			if (data->mclk_latency_table.entries[i].latency <= latency) {
1971 				if (dpm_table->dpm_levels[i].value >= (hwmgr->display_config->min_mem_set_clock / 100)) {
1972 					dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[i].value;
1973 					break;
1974 				}
1975 			}
1976 		}
1977 	}
1978 
1979 	if (hwmgr->display_config->nb_pstate_switch_disable)
1980 		dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
1981 
1982 	/* vclk */
1983 	dpm_table = &(data->dpm_table.vclk_table);
1984 	dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
1985 	dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
1986 	dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
1987 	dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
1988 
1989 	if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
1990 		if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
1991 			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
1992 			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
1993 		}
1994 
1995 		if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
1996 			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
1997 			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
1998 		}
1999 	}
2000 
2001 	/* dclk */
2002 	dpm_table = &(data->dpm_table.dclk_table);
2003 	dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
2004 	dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2005 	dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
2006 	dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2007 
2008 	if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
2009 		if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
2010 			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
2011 			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
2012 		}
2013 
2014 		if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
2015 			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2016 			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2017 		}
2018 	}
2019 
2020 	/* socclk */
2021 	dpm_table = &(data->dpm_table.soc_table);
2022 	dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
2023 	dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2024 	dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
2025 	dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2026 
2027 	if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
2028 		if (VEGA12_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) {
2029 			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
2030 			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
2031 		}
2032 
2033 		if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
2034 			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2035 			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2036 		}
2037 	}
2038 
2039 	/* eclk */
2040 	dpm_table = &(data->dpm_table.eclk_table);
2041 	dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
2042 	dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2043 	dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
2044 	dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2045 
2046 	if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
2047 		if (VEGA12_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) {
2048 			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
2049 			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
2050 		}
2051 
2052 		if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
2053 			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2054 			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2055 		}
2056 	}
2057 
2058 	return 0;
2059 }
2060 
2061 static int vega12_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr,
2062 		struct vega12_single_dpm_table *dpm_table)
2063 {
2064 	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2065 	int ret = 0;
2066 
2067 	if (data->smu_features[GNLD_DPM_UCLK].enabled) {
2068 		PP_ASSERT_WITH_CODE(dpm_table->count > 0,
2069 				"[SetUclkToHightestDpmLevel] Dpm table has no entry!",
2070 				return -EINVAL);
2071 		PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_UCLK_DPM_LEVELS,
2072 				"[SetUclkToHightestDpmLevel] Dpm table has too many entries!",
2073 				return -EINVAL);
2074 
2075 		dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2076 		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
2077 				PPSMC_MSG_SetHardMinByFreq,
2078 				(PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)),
2079 				"[SetUclkToHightestDpmLevel] Set hard min uclk failed!",
2080 				return ret);
2081 	}
2082 
2083 	return ret;
2084 }
2085 
2086 static int vega12_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
2087 {
2088 	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2089 	int ret = 0;
2090 
2091 	smum_send_msg_to_smc_with_parameter(hwmgr,
2092 			PPSMC_MSG_NumOfDisplays, 0);
2093 
2094 	ret = vega12_set_uclk_to_highest_dpm_level(hwmgr,
2095 			&data->dpm_table.mem_table);
2096 
2097 	return ret;
2098 }
2099 
2100 static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
2101 {
2102 	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2103 	int result = 0;
2104 	Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
2105 
2106 	if ((data->water_marks_bitmap & WaterMarksExist) &&
2107 			!(data->water_marks_bitmap & WaterMarksLoaded)) {
2108 		result = vega12_copy_table_to_smc(hwmgr,
2109 			(uint8_t *)wm_table, TABLE_WATERMARKS);
2110 		PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return EINVAL);
2111 		data->water_marks_bitmap |= WaterMarksLoaded;
2112 	}
2113 
2114 	if ((data->water_marks_bitmap & WaterMarksExist) &&
2115 		data->smu_features[GNLD_DPM_DCEFCLK].supported &&
2116 		data->smu_features[GNLD_DPM_SOCCLK].supported)
2117 		smum_send_msg_to_smc_with_parameter(hwmgr,
2118 			PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display);
2119 
2120 	return result;
2121 }
2122 
2123 int vega12_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable);
2124 int vega12_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
2125 {
2126 	struct vega12_hwmgr *data =
2127 			(struct vega12_hwmgr *)(hwmgr->backend);
2128 
2129 	if (data->smu_features[GNLD_DPM_UVD].supported) {
2130 		PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr,
2131 				enable,
2132 				data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap),
2133 				"Attempt to Enable/Disable DPM UVD Failed!",
2134 				return -1);
2135 		data->smu_features[GNLD_DPM_UVD].enabled = enable;
2136 	}
2137 
2138 	return 0;
2139 }
2140 
2141 static void vega12_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
2142 {
2143 	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2144 
2145 	if (data->vce_power_gated == bgate)
2146 		return;
2147 
2148 	data->vce_power_gated = bgate;
2149 	vega12_enable_disable_vce_dpm(hwmgr, !bgate);
2150 }
2151 
2152 static void vega12_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
2153 {
2154 	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2155 
2156 	if (data->uvd_power_gated == bgate)
2157 		return;
2158 
2159 	data->uvd_power_gated = bgate;
2160 	vega12_enable_disable_uvd_dpm(hwmgr, !bgate);
2161 }
2162 
2163 static bool
2164 vega12_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
2165 {
2166 	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2167 	bool is_update_required = false;
2168 
2169 	if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
2170 		is_update_required = true;
2171 
2172 	if (data->registry_data.gfx_clk_deep_sleep_support) {
2173 		if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr)
2174 			is_update_required = true;
2175 	}
2176 
2177 	return is_update_required;
2178 }
2179 
2180 static int vega12_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
2181 {
2182 	int tmp_result, result = 0;
2183 
2184 	tmp_result = vega12_disable_all_smu_features(hwmgr);
2185 	PP_ASSERT_WITH_CODE((tmp_result == 0),
2186 			"Failed to disable all smu features!", result = tmp_result);
2187 
2188 	return result;
2189 }
2190 
2191 static int vega12_power_off_asic(struct pp_hwmgr *hwmgr)
2192 {
2193 	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2194 	int result;
2195 
2196 	result = vega12_disable_dpm_tasks(hwmgr);
2197 	PP_ASSERT_WITH_CODE((0 == result),
2198 			"[disable_dpm_tasks] Failed to disable DPM!",
2199 			);
2200 	data->water_marks_bitmap &= ~(WaterMarksLoaded);
2201 
2202 	return result;
2203 }
2204 
2205 #if 0
2206 static void vega12_find_min_clock_index(struct pp_hwmgr *hwmgr,
2207 		uint32_t *sclk_idx, uint32_t *mclk_idx,
2208 		uint32_t min_sclk, uint32_t min_mclk)
2209 {
2210 	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2211 	struct vega12_dpm_table *dpm_table = &(data->dpm_table);
2212 	uint32_t i;
2213 
2214 	for (i = 0; i < dpm_table->gfx_table.count; i++) {
2215 		if (dpm_table->gfx_table.dpm_levels[i].enabled &&
2216 			dpm_table->gfx_table.dpm_levels[i].value >= min_sclk) {
2217 			*sclk_idx = i;
2218 			break;
2219 		}
2220 	}
2221 
2222 	for (i = 0; i < dpm_table->mem_table.count; i++) {
2223 		if (dpm_table->mem_table.dpm_levels[i].enabled &&
2224 			dpm_table->mem_table.dpm_levels[i].value >= min_mclk) {
2225 			*mclk_idx = i;
2226 			break;
2227 		}
2228 	}
2229 }
2230 #endif
2231 
2232 #if 0
2233 static int vega12_set_power_profile_state(struct pp_hwmgr *hwmgr,
2234 		struct amd_pp_profile *request)
2235 {
2236 	return 0;
2237 }
2238 
2239 static int vega12_get_sclk_od(struct pp_hwmgr *hwmgr)
2240 {
2241 	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2242 	struct vega12_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
2243 	struct vega12_single_dpm_table *golden_sclk_table =
2244 			&(data->golden_dpm_table.gfx_table);
2245 	int value;
2246 
2247 	value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
2248 			golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
2249 			100 /
2250 			golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
2251 
2252 	return value;
2253 }
2254 
2255 static int vega12_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
2256 {
2257 	return 0;
2258 }
2259 
2260 static int vega12_get_mclk_od(struct pp_hwmgr *hwmgr)
2261 {
2262 	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2263 	struct vega12_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
2264 	struct vega12_single_dpm_table *golden_mclk_table =
2265 			&(data->golden_dpm_table.mem_table);
2266 	int value;
2267 
2268 	value = (mclk_table->dpm_levels
2269 			[mclk_table->count - 1].value -
2270 			golden_mclk_table->dpm_levels
2271 			[golden_mclk_table->count - 1].value) *
2272 			100 /
2273 			golden_mclk_table->dpm_levels
2274 			[golden_mclk_table->count - 1].value;
2275 
2276 	return value;
2277 }
2278 
2279 static int vega12_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
2280 {
2281 	return 0;
2282 }
2283 #endif
2284 
2285 static int vega12_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
2286 					uint32_t virtual_addr_low,
2287 					uint32_t virtual_addr_hi,
2288 					uint32_t mc_addr_low,
2289 					uint32_t mc_addr_hi,
2290 					uint32_t size)
2291 {
2292 	smum_send_msg_to_smc_with_parameter(hwmgr,
2293 					PPSMC_MSG_SetSystemVirtualDramAddrHigh,
2294 					virtual_addr_hi);
2295 	smum_send_msg_to_smc_with_parameter(hwmgr,
2296 					PPSMC_MSG_SetSystemVirtualDramAddrLow,
2297 					virtual_addr_low);
2298 	smum_send_msg_to_smc_with_parameter(hwmgr,
2299 					PPSMC_MSG_DramLogSetDramAddrHigh,
2300 					mc_addr_hi);
2301 
2302 	smum_send_msg_to_smc_with_parameter(hwmgr,
2303 					PPSMC_MSG_DramLogSetDramAddrLow,
2304 					mc_addr_low);
2305 
2306 	smum_send_msg_to_smc_with_parameter(hwmgr,
2307 					PPSMC_MSG_DramLogSetDramSize,
2308 					size);
2309 	return 0;
2310 }
2311 
2312 static int vega12_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
2313 		struct PP_TemperatureRange *thermal_data)
2314 {
2315 	struct phm_ppt_v3_information *pptable_information =
2316 		(struct phm_ppt_v3_information *)hwmgr->pptable;
2317 
2318 	memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
2319 
2320 	thermal_data->max = pptable_information->us_software_shutdown_temp *
2321 		PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2322 
2323 	return 0;
2324 }
2325 
2326 static int vega12_enable_gfx_off(struct pp_hwmgr *hwmgr)
2327 {
2328 	struct vega12_hwmgr *data =
2329 			(struct vega12_hwmgr *)(hwmgr->backend);
2330 	int ret = 0;
2331 
2332 	if (data->gfxoff_controlled_by_driver)
2333 		ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_AllowGfxOff);
2334 
2335 	return ret;
2336 }
2337 
2338 static int vega12_disable_gfx_off(struct pp_hwmgr *hwmgr)
2339 {
2340 	struct vega12_hwmgr *data =
2341 			(struct vega12_hwmgr *)(hwmgr->backend);
2342 	int ret = 0;
2343 
2344 	if (data->gfxoff_controlled_by_driver)
2345 		ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisallowGfxOff);
2346 
2347 	return ret;
2348 }
2349 
2350 static int vega12_gfx_off_control(struct pp_hwmgr *hwmgr, bool enable)
2351 {
2352 	if (enable)
2353 		return vega12_enable_gfx_off(hwmgr);
2354 	else
2355 		return vega12_disable_gfx_off(hwmgr);
2356 }
2357 
2358 static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
2359 	.backend_init = vega12_hwmgr_backend_init,
2360 	.backend_fini = vega12_hwmgr_backend_fini,
2361 	.asic_setup = vega12_setup_asic_task,
2362 	.dynamic_state_management_enable = vega12_enable_dpm_tasks,
2363 	.dynamic_state_management_disable = vega12_disable_dpm_tasks,
2364 	.patch_boot_state = vega12_patch_boot_state,
2365 	.get_sclk = vega12_dpm_get_sclk,
2366 	.get_mclk = vega12_dpm_get_mclk,
2367 	.notify_smc_display_config_after_ps_adjustment =
2368 			vega12_notify_smc_display_config_after_ps_adjustment,
2369 	.force_dpm_level = vega12_dpm_force_dpm_level,
2370 	.stop_thermal_controller = vega12_thermal_stop_thermal_controller,
2371 	.get_fan_speed_info = vega12_fan_ctrl_get_fan_speed_info,
2372 	.reset_fan_speed_to_default =
2373 			vega12_fan_ctrl_reset_fan_speed_to_default,
2374 	.get_fan_speed_rpm = vega12_fan_ctrl_get_fan_speed_rpm,
2375 	.set_fan_control_mode = vega12_set_fan_control_mode,
2376 	.get_fan_control_mode = vega12_get_fan_control_mode,
2377 	.read_sensor = vega12_read_sensor,
2378 	.get_dal_power_level = vega12_get_dal_power_level,
2379 	.get_clock_by_type_with_latency = vega12_get_clock_by_type_with_latency,
2380 	.get_clock_by_type_with_voltage = vega12_get_clock_by_type_with_voltage,
2381 	.set_watermarks_for_clocks_ranges = vega12_set_watermarks_for_clocks_ranges,
2382 	.display_clock_voltage_request = vega12_display_clock_voltage_request,
2383 	.force_clock_level = vega12_force_clock_level,
2384 	.print_clock_levels = vega12_print_clock_levels,
2385 	.apply_clocks_adjust_rules =
2386 		vega12_apply_clocks_adjust_rules,
2387 	.pre_display_config_changed =
2388 		vega12_pre_display_configuration_changed_task,
2389 	.display_config_changed = vega12_display_configuration_changed_task,
2390 	.powergate_uvd = vega12_power_gate_uvd,
2391 	.powergate_vce = vega12_power_gate_vce,
2392 	.check_smc_update_required_for_display_configuration =
2393 			vega12_check_smc_update_required_for_display_configuration,
2394 	.power_off_asic = vega12_power_off_asic,
2395 	.disable_smc_firmware_ctf = vega12_thermal_disable_alert,
2396 #if 0
2397 	.set_power_profile_state = vega12_set_power_profile_state,
2398 	.get_sclk_od = vega12_get_sclk_od,
2399 	.set_sclk_od = vega12_set_sclk_od,
2400 	.get_mclk_od = vega12_get_mclk_od,
2401 	.set_mclk_od = vega12_set_mclk_od,
2402 #endif
2403 	.notify_cac_buffer_info = vega12_notify_cac_buffer_info,
2404 	.get_thermal_temperature_range = vega12_get_thermal_temperature_range,
2405 	.register_irq_handlers = smu9_register_irq_handlers,
2406 	.start_thermal_controller = vega12_start_thermal_controller,
2407 	.powergate_gfx = vega12_gfx_off_control,
2408 };
2409 
2410 int vega12_hwmgr_init(struct pp_hwmgr *hwmgr);
2411 int vega12_hwmgr_init(struct pp_hwmgr *hwmgr)
2412 {
2413 	hwmgr->hwmgr_func = &vega12_hwmgr_funcs;
2414 	hwmgr->pptable_func = &vega12_pptable_funcs;
2415 
2416 	return 0;
2417 }
2418