1 /*
2  * Copyright 2021 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #define SWSMU_CODE_LAYER_L2
25 
26 #include <linux/firmware.h>
27 #include <linux/pci.h>
28 #include <linux/i2c.h>
29 #include "amdgpu.h"
30 #include "amdgpu_smu.h"
31 #include "atomfirmware.h"
32 #include "amdgpu_atomfirmware.h"
33 #include "amdgpu_atombios.h"
34 #include "smu_v13_0.h"
35 #include "smu13_driver_if_v13_0_0.h"
36 #include "soc15_common.h"
37 #include "atom.h"
38 #include "smu_v13_0_0_ppt.h"
39 #include "smu_v13_0_0_pptable.h"
40 #include "smu_v13_0_0_ppsmc.h"
41 #include "nbio/nbio_4_3_0_offset.h"
42 #include "nbio/nbio_4_3_0_sh_mask.h"
43 #include "mp/mp_13_0_0_offset.h"
44 #include "mp/mp_13_0_0_sh_mask.h"
45 
46 #include "asic_reg/mp/mp_13_0_0_sh_mask.h"
47 #include "smu_cmn.h"
48 #include "amdgpu_ras.h"
49 
50 /*
51  * DO NOT use these for err/warn/info/debug messages.
52  * Use dev_err, dev_warn, dev_info and dev_dbg instead.
53  * They are more MGPU friendly.
54  */
55 #undef pr_err
56 #undef pr_warn
57 #undef pr_info
58 #undef pr_debug
59 
60 #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
61 
62 #define FEATURE_MASK(feature) (1ULL << feature)
63 #define SMC_DPM_FEATURE ( \
64 	FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT)     | \
65 	FEATURE_MASK(FEATURE_DPM_UCLK_BIT)	 | \
66 	FEATURE_MASK(FEATURE_DPM_LINK_BIT)       | \
67 	FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT)     | \
68 	FEATURE_MASK(FEATURE_DPM_FCLK_BIT)	 | \
69 	FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT))
70 
71 #define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE	0x4000
72 
73 static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] = {
74 	MSG_MAP(TestMessage,			PPSMC_MSG_TestMessage,                 1),
75 	MSG_MAP(GetSmuVersion,			PPSMC_MSG_GetSmuVersion,               1),
76 	MSG_MAP(GetDriverIfVersion,		PPSMC_MSG_GetDriverIfVersion,          1),
77 	MSG_MAP(SetAllowedFeaturesMaskLow,	PPSMC_MSG_SetAllowedFeaturesMaskLow,   0),
78 	MSG_MAP(SetAllowedFeaturesMaskHigh,	PPSMC_MSG_SetAllowedFeaturesMaskHigh,  0),
79 	MSG_MAP(EnableAllSmuFeatures,		PPSMC_MSG_EnableAllSmuFeatures,        0),
80 	MSG_MAP(DisableAllSmuFeatures,		PPSMC_MSG_DisableAllSmuFeatures,       0),
81 	MSG_MAP(EnableSmuFeaturesLow,		PPSMC_MSG_EnableSmuFeaturesLow,        1),
82 	MSG_MAP(EnableSmuFeaturesHigh,		PPSMC_MSG_EnableSmuFeaturesHigh,       1),
83 	MSG_MAP(DisableSmuFeaturesLow,		PPSMC_MSG_DisableSmuFeaturesLow,       1),
84 	MSG_MAP(DisableSmuFeaturesHigh,		PPSMC_MSG_DisableSmuFeaturesHigh,      1),
85 	MSG_MAP(GetEnabledSmuFeaturesLow,       PPSMC_MSG_GetRunningSmuFeaturesLow,    1),
86 	MSG_MAP(GetEnabledSmuFeaturesHigh,	PPSMC_MSG_GetRunningSmuFeaturesHigh,   1),
87 	MSG_MAP(SetWorkloadMask,		PPSMC_MSG_SetWorkloadMask,             1),
88 	MSG_MAP(SetPptLimit,			PPSMC_MSG_SetPptLimit,                 0),
89 	MSG_MAP(SetDriverDramAddrHigh,		PPSMC_MSG_SetDriverDramAddrHigh,       1),
90 	MSG_MAP(SetDriverDramAddrLow,		PPSMC_MSG_SetDriverDramAddrLow,        1),
91 	MSG_MAP(SetToolsDramAddrHigh,		PPSMC_MSG_SetToolsDramAddrHigh,        0),
92 	MSG_MAP(SetToolsDramAddrLow,		PPSMC_MSG_SetToolsDramAddrLow,         0),
93 	MSG_MAP(TransferTableSmu2Dram,		PPSMC_MSG_TransferTableSmu2Dram,       1),
94 	MSG_MAP(TransferTableDram2Smu,		PPSMC_MSG_TransferTableDram2Smu,       0),
95 	MSG_MAP(UseDefaultPPTable,		PPSMC_MSG_UseDefaultPPTable,           0),
96 	MSG_MAP(RunDcBtc,			PPSMC_MSG_RunDcBtc,                    0),
97 	MSG_MAP(EnterBaco,			PPSMC_MSG_EnterBaco,                   0),
98 	MSG_MAP(ExitBaco,			PPSMC_MSG_ExitBaco,                    0),
99 	MSG_MAP(SetSoftMinByFreq,		PPSMC_MSG_SetSoftMinByFreq,            1),
100 	MSG_MAP(SetSoftMaxByFreq,		PPSMC_MSG_SetSoftMaxByFreq,            1),
101 	MSG_MAP(SetHardMinByFreq,		PPSMC_MSG_SetHardMinByFreq,            1),
102 	MSG_MAP(SetHardMaxByFreq,		PPSMC_MSG_SetHardMaxByFreq,            0),
103 	MSG_MAP(GetMinDpmFreq,			PPSMC_MSG_GetMinDpmFreq,               1),
104 	MSG_MAP(GetMaxDpmFreq,			PPSMC_MSG_GetMaxDpmFreq,               1),
105 	MSG_MAP(GetDpmFreqByIndex,		PPSMC_MSG_GetDpmFreqByIndex,           1),
106 	MSG_MAP(PowerUpVcn,			PPSMC_MSG_PowerUpVcn,                  0),
107 	MSG_MAP(PowerDownVcn,			PPSMC_MSG_PowerDownVcn,                0),
108 	MSG_MAP(PowerUpJpeg,			PPSMC_MSG_PowerUpJpeg,                 0),
109 	MSG_MAP(PowerDownJpeg,			PPSMC_MSG_PowerDownJpeg,               0),
110 	MSG_MAP(GetDcModeMaxDpmFreq,		PPSMC_MSG_GetDcModeMaxDpmFreq,         1),
111 	MSG_MAP(OverridePcieParameters,		PPSMC_MSG_OverridePcieParameters,      0),
112 	MSG_MAP(DramLogSetDramAddrHigh,		PPSMC_MSG_DramLogSetDramAddrHigh,      0),
113 	MSG_MAP(DramLogSetDramAddrLow,		PPSMC_MSG_DramLogSetDramAddrLow,       0),
114 	MSG_MAP(DramLogSetDramSize,		PPSMC_MSG_DramLogSetDramSize,          0),
115 	MSG_MAP(AllowGfxOff,			PPSMC_MSG_AllowGfxOff,                 0),
116 	MSG_MAP(DisallowGfxOff,			PPSMC_MSG_DisallowGfxOff,              0),
117 	MSG_MAP(SetMGpuFanBoostLimitRpm,	PPSMC_MSG_SetMGpuFanBoostLimitRpm,     0),
118 	MSG_MAP(GetPptLimit,			PPSMC_MSG_GetPptLimit,                 0),
119 	MSG_MAP(NotifyPowerSource,		PPSMC_MSG_NotifyPowerSource,           0),
120 	MSG_MAP(Mode1Reset,			PPSMC_MSG_Mode1Reset,                  0),
121 	MSG_MAP(PrepareMp1ForUnload,		PPSMC_MSG_PrepareMp1ForUnload,         0),
122 	MSG_MAP(DFCstateControl,		PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
123 	MSG_MAP(ArmD3,				PPSMC_MSG_ArmD3,                       0),
124 };
125 
126 static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {
127 	CLK_MAP(GFXCLK,		PPCLK_GFXCLK),
128 	CLK_MAP(SCLK,		PPCLK_GFXCLK),
129 	CLK_MAP(SOCCLK,		PPCLK_SOCCLK),
130 	CLK_MAP(FCLK,		PPCLK_FCLK),
131 	CLK_MAP(UCLK,		PPCLK_UCLK),
132 	CLK_MAP(MCLK,		PPCLK_UCLK),
133 	CLK_MAP(VCLK,		PPCLK_VCLK_0),
134 	CLK_MAP(VCLK1,		PPCLK_VCLK_1),
135 	CLK_MAP(DCLK,		PPCLK_DCLK_0),
136 	CLK_MAP(DCLK1,		PPCLK_DCLK_1),
137 };
138 
139 static struct cmn2asic_mapping smu_v13_0_0_feature_mask_map[SMU_FEATURE_COUNT] = {
140 	FEA_MAP(FW_DATA_READ),
141 	FEA_MAP(DPM_GFXCLK),
142 	FEA_MAP(DPM_GFX_POWER_OPTIMIZER),
143 	FEA_MAP(DPM_UCLK),
144 	FEA_MAP(DPM_FCLK),
145 	FEA_MAP(DPM_SOCCLK),
146 	FEA_MAP(DPM_MP0CLK),
147 	FEA_MAP(DPM_LINK),
148 	FEA_MAP(DPM_DCN),
149 	FEA_MAP(VMEMP_SCALING),
150 	FEA_MAP(VDDIO_MEM_SCALING),
151 	FEA_MAP(DS_GFXCLK),
152 	FEA_MAP(DS_SOCCLK),
153 	FEA_MAP(DS_FCLK),
154 	FEA_MAP(DS_LCLK),
155 	FEA_MAP(DS_DCFCLK),
156 	FEA_MAP(DS_UCLK),
157 	FEA_MAP(GFX_ULV),
158 	FEA_MAP(FW_DSTATE),
159 	FEA_MAP(GFXOFF),
160 	FEA_MAP(BACO),
161 	FEA_MAP(MM_DPM),
162 	FEA_MAP(SOC_MPCLK_DS),
163 	FEA_MAP(BACO_MPCLK_DS),
164 	FEA_MAP(THROTTLERS),
165 	FEA_MAP(SMARTSHIFT),
166 	FEA_MAP(GTHR),
167 	FEA_MAP(ACDC),
168 	FEA_MAP(VR0HOT),
169 	FEA_MAP(FW_CTF),
170 	FEA_MAP(FAN_CONTROL),
171 	FEA_MAP(GFX_DCS),
172 	FEA_MAP(GFX_READ_MARGIN),
173 	FEA_MAP(LED_DISPLAY),
174 	FEA_MAP(GFXCLK_SPREAD_SPECTRUM),
175 	FEA_MAP(OUT_OF_BAND_MONITOR),
176 	FEA_MAP(OPTIMIZED_VMIN),
177 	FEA_MAP(GFX_IMU),
178 	FEA_MAP(BOOT_TIME_CAL),
179 	FEA_MAP(GFX_PCC_DFLL),
180 	FEA_MAP(SOC_CG),
181 	FEA_MAP(DF_CSTATE),
182 	FEA_MAP(GFX_EDC),
183 	FEA_MAP(BOOT_POWER_OPT),
184 	FEA_MAP(CLOCK_POWER_DOWN_BYPASS),
185 	FEA_MAP(DS_VCN),
186 	FEA_MAP(BACO_CG),
187 	FEA_MAP(MEM_TEMP_READ),
188 	FEA_MAP(ATHUB_MMHUB_PG),
189 	FEA_MAP(SOC_PCC),
190 	[SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
191 	[SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
192 };
193 
194 static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = {
195 	TAB_MAP(PPTABLE),
196 	TAB_MAP(WATERMARKS),
197 	TAB_MAP(AVFS_PSM_DEBUG),
198 	TAB_MAP(PMSTATUSLOG),
199 	TAB_MAP(SMU_METRICS),
200 	TAB_MAP(DRIVER_SMU_CONFIG),
201 	TAB_MAP(ACTIVITY_MONITOR_COEFF),
202 	[SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE},
203 	TAB_MAP(I2C_COMMANDS),
204 };
205 
206 static struct cmn2asic_mapping smu_v13_0_0_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
207 	PWR_MAP(AC),
208 	PWR_MAP(DC),
209 };
210 
211 static struct cmn2asic_mapping smu_v13_0_0_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
212 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT,	WORKLOAD_PPLIB_DEFAULT_BIT),
213 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D,		WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
214 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING,		WORKLOAD_PPLIB_POWER_SAVING_BIT),
215 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO,		WORKLOAD_PPLIB_VIDEO_BIT),
216 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR,			WORKLOAD_PPLIB_VR_BIT),
217 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE,		WORKLOAD_PPLIB_COMPUTE_BIT),
218 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM,		WORKLOAD_PPLIB_CUSTOM_BIT),
219 };
220 
221 static const uint8_t smu_v13_0_0_throttler_map[] = {
222 	[THROTTLER_PPT0_BIT]		= (SMU_THROTTLER_PPT0_BIT),
223 	[THROTTLER_PPT1_BIT]		= (SMU_THROTTLER_PPT1_BIT),
224 	[THROTTLER_PPT2_BIT]		= (SMU_THROTTLER_PPT2_BIT),
225 	[THROTTLER_PPT3_BIT]		= (SMU_THROTTLER_PPT3_BIT),
226 	[THROTTLER_TDC_GFX_BIT]		= (SMU_THROTTLER_TDC_GFX_BIT),
227 	[THROTTLER_TDC_SOC_BIT]		= (SMU_THROTTLER_TDC_SOC_BIT),
228 	[THROTTLER_TEMP_EDGE_BIT]	= (SMU_THROTTLER_TEMP_EDGE_BIT),
229 	[THROTTLER_TEMP_HOTSPOT_BIT]	= (SMU_THROTTLER_TEMP_HOTSPOT_BIT),
230 	[THROTTLER_TEMP_MEM_BIT]	= (SMU_THROTTLER_TEMP_MEM_BIT),
231 	[THROTTLER_TEMP_VR_GFX_BIT]	= (SMU_THROTTLER_TEMP_VR_GFX_BIT),
232 	[THROTTLER_TEMP_VR_SOC_BIT]	= (SMU_THROTTLER_TEMP_VR_SOC_BIT),
233 	[THROTTLER_TEMP_VR_MEM0_BIT]	= (SMU_THROTTLER_TEMP_VR_MEM0_BIT),
234 	[THROTTLER_TEMP_VR_MEM1_BIT]	= (SMU_THROTTLER_TEMP_VR_MEM1_BIT),
235 	[THROTTLER_TEMP_LIQUID0_BIT]	= (SMU_THROTTLER_TEMP_LIQUID0_BIT),
236 	[THROTTLER_TEMP_LIQUID1_BIT]	= (SMU_THROTTLER_TEMP_LIQUID1_BIT),
237 	[THROTTLER_GFX_APCC_PLUS_BIT]	= (SMU_THROTTLER_APCC_BIT),
238 	[THROTTLER_FIT_BIT]		= (SMU_THROTTLER_FIT_BIT),
239 };
240 
241 static int
242 smu_v13_0_0_get_allowed_feature_mask(struct smu_context *smu,
243 				  uint32_t *feature_mask, uint32_t num)
244 {
245 	struct amdgpu_device *adev = smu->adev;
246 	u32 smu_version;
247 
248 	if (num > 2)
249 		return -EINVAL;
250 
251 	memset(feature_mask, 0xff, sizeof(uint32_t) * num);
252 
253 	if (!(adev->pm.pp_feature & PP_SCLK_DPM_MASK)) {
254 		*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
255 		*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_IMU_BIT);
256 	}
257 
258 	if (!(adev->pg_flags & AMD_PG_SUPPORT_ATHUB) ||
259 	    !(adev->pg_flags & AMD_PG_SUPPORT_MMHUB))
260 		*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT);
261 
262 	if (!(adev->pm.pp_feature & PP_SOCCLK_DPM_MASK))
263 		*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
264 
265 	/* PMFW 78.58 contains a critical fix for gfxoff feature */
266 	smu_cmn_get_smc_version(smu, NULL, &smu_version);
267 	if ((smu_version < 0x004e3a00) ||
268 	     !(adev->pm.pp_feature & PP_GFXOFF_MASK))
269 		*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFXOFF_BIT);
270 
271 	if (!(adev->pm.pp_feature & PP_MCLK_DPM_MASK)) {
272 		*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_UCLK_BIT);
273 		*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT);
274 		*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT);
275 	}
276 
277 	if (!(adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK))
278 		*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
279 
280 	if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
281 		*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_LINK_BIT);
282 		*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_LCLK_BIT);
283 	}
284 
285 	if (!(adev->pm.pp_feature & PP_ULV_MASK))
286 		*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_ULV_BIT);
287 
288 	return 0;
289 }
290 
291 static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu)
292 {
293 	struct smu_table_context *table_context = &smu->smu_table;
294 	struct smu_13_0_0_powerplay_table *powerplay_table =
295 		table_context->power_play_table;
296 	struct smu_baco_context *smu_baco = &smu->smu_baco;
297 
298 	if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_HARDWAREDC)
299 		smu->dc_controlled_by_gpio = true;
300 
301 	if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_BACO ||
302 	    powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
303 		smu_baco->platform_support = true;
304 
305 	if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
306 		smu_baco->maco_support = true;
307 
308 	table_context->thermal_controller_type =
309 		powerplay_table->thermal_controller_type;
310 
311 	/*
312 	 * Instead of having its own buffer space and get overdrive_table copied,
313 	 * smu->od_settings just points to the actual overdrive_table
314 	 */
315 	smu->od_settings = &powerplay_table->overdrive_table;
316 
317 	return 0;
318 }
319 
320 static int smu_v13_0_0_store_powerplay_table(struct smu_context *smu)
321 {
322 	struct smu_table_context *table_context = &smu->smu_table;
323 	struct smu_13_0_0_powerplay_table *powerplay_table =
324 		table_context->power_play_table;
325 
326 	memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable,
327 	       sizeof(PPTable_t));
328 
329 	return 0;
330 }
331 
332 #ifndef atom_smc_dpm_info_table_13_0_0
333 struct atom_smc_dpm_info_table_13_0_0 {
334 	struct atom_common_table_header table_header;
335 	BoardTable_t BoardTable;
336 };
337 #endif
338 
339 static int smu_v13_0_0_append_powerplay_table(struct smu_context *smu)
340 {
341 	struct smu_table_context *table_context = &smu->smu_table;
342 	PPTable_t *smc_pptable = table_context->driver_pptable;
343 	struct atom_smc_dpm_info_table_13_0_0 *smc_dpm_table;
344 	BoardTable_t *BoardTable = &smc_pptable->BoardTable;
345 	int index, ret;
346 
347 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
348 					    smc_dpm_info);
349 
350 	ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL, NULL, NULL,
351 					     (uint8_t **)&smc_dpm_table);
352 	if (ret)
353 		return ret;
354 
355 	memcpy(BoardTable, &smc_dpm_table->BoardTable, sizeof(BoardTable_t));
356 
357 	return 0;
358 }
359 
360 static int smu_v13_0_0_get_pptable_from_pmfw(struct smu_context *smu,
361 					     void **table,
362 					     uint32_t *size)
363 {
364 	struct smu_table_context *smu_table = &smu->smu_table;
365 	void *combo_pptable = smu_table->combo_pptable;
366 	int ret = 0;
367 
368 	ret = smu_cmn_get_combo_pptable(smu);
369 	if (ret)
370 		return ret;
371 
372 	*table = combo_pptable;
373 	*size = sizeof(struct smu_13_0_0_powerplay_table);
374 
375 	return 0;
376 }
377 
378 static int smu_v13_0_0_setup_pptable(struct smu_context *smu)
379 {
380 	struct smu_table_context *smu_table = &smu->smu_table;
381 	struct amdgpu_device *adev = smu->adev;
382 	int ret = 0;
383 
384 	ret = smu_v13_0_0_get_pptable_from_pmfw(smu,
385 						&smu_table->power_play_table,
386 						&smu_table->power_play_table_size);
387 	if (ret)
388 		return ret;
389 
390 	ret = smu_v13_0_0_store_powerplay_table(smu);
391 	if (ret)
392 		return ret;
393 
394 	/*
395 	 * With SCPM enabled, the operation below will be handled
396 	 * by PSP. Driver involvment is unnecessary and useless.
397 	 */
398 	if (!adev->scpm_enabled) {
399 		ret = smu_v13_0_0_append_powerplay_table(smu);
400 		if (ret)
401 			return ret;
402 	}
403 
404 	ret = smu_v13_0_0_check_powerplay_table(smu);
405 	if (ret)
406 		return ret;
407 
408 	return ret;
409 }
410 
411 static int smu_v13_0_0_tables_init(struct smu_context *smu)
412 {
413 	struct smu_table_context *smu_table = &smu->smu_table;
414 	struct smu_table *tables = smu_table->tables;
415 
416 	SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
417 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
418 	SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
419 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
420 	SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetricsExternal_t),
421 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
422 	SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
423 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
424 	SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t),
425 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
426 	SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE,
427 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
428 	SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF,
429 		       sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE,
430 		       AMDGPU_GEM_DOMAIN_VRAM);
431 	SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE,
432 			PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
433 
434 	smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL);
435 	if (!smu_table->metrics_table)
436 		goto err0_out;
437 	smu_table->metrics_time = 0;
438 
439 	smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
440 	smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
441 	if (!smu_table->gpu_metrics_table)
442 		goto err1_out;
443 
444 	smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
445 	if (!smu_table->watermarks_table)
446 		goto err2_out;
447 
448 	return 0;
449 
450 err2_out:
451 	kfree(smu_table->gpu_metrics_table);
452 err1_out:
453 	kfree(smu_table->metrics_table);
454 err0_out:
455 	return -ENOMEM;
456 }
457 
458 static int smu_v13_0_0_allocate_dpm_context(struct smu_context *smu)
459 {
460 	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
461 
462 	smu_dpm->dpm_context = kzalloc(sizeof(struct smu_13_0_dpm_context),
463 				       GFP_KERNEL);
464 	if (!smu_dpm->dpm_context)
465 		return -ENOMEM;
466 
467 	smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context);
468 
469 	return 0;
470 }
471 
472 static int smu_v13_0_0_init_smc_tables(struct smu_context *smu)
473 {
474 	int ret = 0;
475 
476 	ret = smu_v13_0_0_tables_init(smu);
477 	if (ret)
478 		return ret;
479 
480 	ret = smu_v13_0_0_allocate_dpm_context(smu);
481 	if (ret)
482 		return ret;
483 
484 	return smu_v13_0_init_smc_tables(smu);
485 }
486 
487 static int smu_v13_0_0_set_default_dpm_table(struct smu_context *smu)
488 {
489 	struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
490 	struct smu_table_context *table_context = &smu->smu_table;
491 	PPTable_t *pptable = table_context->driver_pptable;
492 	SkuTable_t *skutable = &pptable->SkuTable;
493 	struct smu_13_0_dpm_table *dpm_table;
494 	struct smu_13_0_pcie_table *pcie_table;
495 	uint32_t link_level;
496 	int ret = 0;
497 
498 	/* socclk dpm table setup */
499 	dpm_table = &dpm_context->dpm_tables.soc_table;
500 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
501 		ret = smu_v13_0_set_single_dpm_table(smu,
502 						     SMU_SOCCLK,
503 						     dpm_table);
504 		if (ret)
505 			return ret;
506 	} else {
507 		dpm_table->count = 1;
508 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100;
509 		dpm_table->dpm_levels[0].enabled = true;
510 		dpm_table->min = dpm_table->dpm_levels[0].value;
511 		dpm_table->max = dpm_table->dpm_levels[0].value;
512 	}
513 
514 	/* gfxclk dpm table setup */
515 	dpm_table = &dpm_context->dpm_tables.gfx_table;
516 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
517 		ret = smu_v13_0_set_single_dpm_table(smu,
518 						     SMU_GFXCLK,
519 						     dpm_table);
520 		if (ret)
521 			return ret;
522 
523 		/*
524 		 * Update the reported maximum shader clock to the value
525 		 * which can be guarded to be achieved on all cards. This
526 		 * is aligned with Window setting. And considering that value
527 		 * might be not the peak frequency the card can achieve, it
528 		 * is normal some real-time clock frequency can overtake this
529 		 * labelled maximum clock frequency(for example in pp_dpm_sclk
530 		 * sysfs output).
531 		 */
532 		if (skutable->DriverReportedClocks.GameClockAc &&
533 		    (dpm_table->dpm_levels[dpm_table->count - 1].value >
534 		    skutable->DriverReportedClocks.GameClockAc)) {
535 			dpm_table->dpm_levels[dpm_table->count - 1].value =
536 				skutable->DriverReportedClocks.GameClockAc;
537 			dpm_table->max = skutable->DriverReportedClocks.GameClockAc;
538 		}
539 	} else {
540 		dpm_table->count = 1;
541 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
542 		dpm_table->dpm_levels[0].enabled = true;
543 		dpm_table->min = dpm_table->dpm_levels[0].value;
544 		dpm_table->max = dpm_table->dpm_levels[0].value;
545 	}
546 
547 	/* uclk dpm table setup */
548 	dpm_table = &dpm_context->dpm_tables.uclk_table;
549 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
550 		ret = smu_v13_0_set_single_dpm_table(smu,
551 						     SMU_UCLK,
552 						     dpm_table);
553 		if (ret)
554 			return ret;
555 	} else {
556 		dpm_table->count = 1;
557 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100;
558 		dpm_table->dpm_levels[0].enabled = true;
559 		dpm_table->min = dpm_table->dpm_levels[0].value;
560 		dpm_table->max = dpm_table->dpm_levels[0].value;
561 	}
562 
563 	/* fclk dpm table setup */
564 	dpm_table = &dpm_context->dpm_tables.fclk_table;
565 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
566 		ret = smu_v13_0_set_single_dpm_table(smu,
567 						     SMU_FCLK,
568 						     dpm_table);
569 		if (ret)
570 			return ret;
571 	} else {
572 		dpm_table->count = 1;
573 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100;
574 		dpm_table->dpm_levels[0].enabled = true;
575 		dpm_table->min = dpm_table->dpm_levels[0].value;
576 		dpm_table->max = dpm_table->dpm_levels[0].value;
577 	}
578 
579 	/* vclk dpm table setup */
580 	dpm_table = &dpm_context->dpm_tables.vclk_table;
581 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_VCLK_BIT)) {
582 		ret = smu_v13_0_set_single_dpm_table(smu,
583 						     SMU_VCLK,
584 						     dpm_table);
585 		if (ret)
586 			return ret;
587 	} else {
588 		dpm_table->count = 1;
589 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100;
590 		dpm_table->dpm_levels[0].enabled = true;
591 		dpm_table->min = dpm_table->dpm_levels[0].value;
592 		dpm_table->max = dpm_table->dpm_levels[0].value;
593 	}
594 
595 	/* dclk dpm table setup */
596 	dpm_table = &dpm_context->dpm_tables.dclk_table;
597 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCLK_BIT)) {
598 		ret = smu_v13_0_set_single_dpm_table(smu,
599 						     SMU_DCLK,
600 						     dpm_table);
601 		if (ret)
602 			return ret;
603 	} else {
604 		dpm_table->count = 1;
605 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100;
606 		dpm_table->dpm_levels[0].enabled = true;
607 		dpm_table->min = dpm_table->dpm_levels[0].value;
608 		dpm_table->max = dpm_table->dpm_levels[0].value;
609 	}
610 
611 	/* lclk dpm table setup */
612 	pcie_table = &dpm_context->dpm_tables.pcie_table;
613 	pcie_table->num_of_link_levels = 0;
614 	for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
615 		if (!skutable->PcieGenSpeed[link_level] &&
616 		    !skutable->PcieLaneCount[link_level] &&
617 		    !skutable->LclkFreq[link_level])
618 			continue;
619 
620 		pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
621 					skutable->PcieGenSpeed[link_level];
622 		pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
623 					skutable->PcieLaneCount[link_level];
624 		pcie_table->clk_freq[pcie_table->num_of_link_levels] =
625 					skutable->LclkFreq[link_level];
626 		pcie_table->num_of_link_levels++;
627 	}
628 
629 	return 0;
630 }
631 
632 static bool smu_v13_0_0_is_dpm_running(struct smu_context *smu)
633 {
634 	int ret = 0;
635 	uint64_t feature_enabled;
636 
637 	ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
638 	if (ret)
639 		return false;
640 
641 	return !!(feature_enabled & SMC_DPM_FEATURE);
642 }
643 
644 static void smu_v13_0_0_dump_pptable(struct smu_context *smu)
645 {
646        struct smu_table_context *table_context = &smu->smu_table;
647        PPTable_t *pptable = table_context->driver_pptable;
648        SkuTable_t *skutable = &pptable->SkuTable;
649 
650        dev_info(smu->adev->dev, "Dumped PPTable:\n");
651 
652        dev_info(smu->adev->dev, "Version = 0x%08x\n", skutable->Version);
653        dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", skutable->FeaturesToRun[0]);
654        dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", skutable->FeaturesToRun[1]);
655 }
656 
657 static int smu_v13_0_0_system_features_control(struct smu_context *smu,
658 						  bool en)
659 {
660 	return smu_v13_0_system_features_control(smu, en);
661 }
662 
663 static uint32_t smu_v13_0_get_throttler_status(SmuMetrics_t *metrics)
664 {
665 	uint32_t throttler_status = 0;
666 	int i;
667 
668 	for (i = 0; i < THROTTLER_COUNT; i++)
669 		throttler_status |=
670 			(metrics->ThrottlingPercentage[i] ? 1U << i : 0);
671 
672 	return throttler_status;
673 }
674 
675 #define SMU_13_0_0_BUSY_THRESHOLD	15
676 static int smu_v13_0_0_get_smu_metrics_data(struct smu_context *smu,
677 					    MetricsMember_t member,
678 					    uint32_t *value)
679 {
680 	struct smu_table_context *smu_table = &smu->smu_table;
681 	SmuMetrics_t *metrics =
682 		&(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics);
683 	int ret = 0;
684 
685 	ret = smu_cmn_get_metrics_table(smu,
686 					NULL,
687 					false);
688 	if (ret)
689 		return ret;
690 
691 	switch (member) {
692 	case METRICS_CURR_GFXCLK:
693 		*value = metrics->CurrClock[PPCLK_GFXCLK];
694 		break;
695 	case METRICS_CURR_SOCCLK:
696 		*value = metrics->CurrClock[PPCLK_SOCCLK];
697 		break;
698 	case METRICS_CURR_UCLK:
699 		*value = metrics->CurrClock[PPCLK_UCLK];
700 		break;
701 	case METRICS_CURR_VCLK:
702 		*value = metrics->CurrClock[PPCLK_VCLK_0];
703 		break;
704 	case METRICS_CURR_VCLK1:
705 		*value = metrics->CurrClock[PPCLK_VCLK_1];
706 		break;
707 	case METRICS_CURR_DCLK:
708 		*value = metrics->CurrClock[PPCLK_DCLK_0];
709 		break;
710 	case METRICS_CURR_DCLK1:
711 		*value = metrics->CurrClock[PPCLK_DCLK_1];
712 		break;
713 	case METRICS_CURR_FCLK:
714 		*value = metrics->CurrClock[PPCLK_FCLK];
715 		break;
716 	case METRICS_AVERAGE_GFXCLK:
717 		if (metrics->AverageGfxActivity <= SMU_13_0_0_BUSY_THRESHOLD)
718 			*value = metrics->AverageGfxclkFrequencyPostDs;
719 		else
720 			*value = metrics->AverageGfxclkFrequencyPreDs;
721 		break;
722 	case METRICS_AVERAGE_FCLK:
723 		if (metrics->AverageUclkActivity <= SMU_13_0_0_BUSY_THRESHOLD)
724 			*value = metrics->AverageFclkFrequencyPostDs;
725 		else
726 			*value = metrics->AverageFclkFrequencyPreDs;
727 		break;
728 	case METRICS_AVERAGE_UCLK:
729 		if (metrics->AverageUclkActivity <= SMU_13_0_0_BUSY_THRESHOLD)
730 			*value = metrics->AverageMemclkFrequencyPostDs;
731 		else
732 			*value = metrics->AverageMemclkFrequencyPreDs;
733 		break;
734 	case METRICS_AVERAGE_VCLK:
735 		*value = metrics->AverageVclk0Frequency;
736 		break;
737 	case METRICS_AVERAGE_DCLK:
738 		*value = metrics->AverageDclk0Frequency;
739 		break;
740 	case METRICS_AVERAGE_VCLK1:
741 		*value = metrics->AverageVclk1Frequency;
742 		break;
743 	case METRICS_AVERAGE_DCLK1:
744 		*value = metrics->AverageDclk1Frequency;
745 		break;
746 	case METRICS_AVERAGE_GFXACTIVITY:
747 		*value = metrics->AverageGfxActivity;
748 		break;
749 	case METRICS_AVERAGE_MEMACTIVITY:
750 		*value = metrics->AverageUclkActivity;
751 		break;
752 	case METRICS_AVERAGE_SOCKETPOWER:
753 		*value = metrics->AverageSocketPower << 8;
754 		break;
755 	case METRICS_TEMPERATURE_EDGE:
756 		*value = metrics->AvgTemperature[TEMP_EDGE] *
757 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
758 		break;
759 	case METRICS_TEMPERATURE_HOTSPOT:
760 		*value = metrics->AvgTemperature[TEMP_HOTSPOT] *
761 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
762 		break;
763 	case METRICS_TEMPERATURE_MEM:
764 		*value = metrics->AvgTemperature[TEMP_MEM] *
765 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
766 		break;
767 	case METRICS_TEMPERATURE_VRGFX:
768 		*value = metrics->AvgTemperature[TEMP_VR_GFX] *
769 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
770 		break;
771 	case METRICS_TEMPERATURE_VRSOC:
772 		*value = metrics->AvgTemperature[TEMP_VR_SOC] *
773 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
774 		break;
775 	case METRICS_THROTTLER_STATUS:
776 		*value = smu_v13_0_get_throttler_status(metrics);
777 		break;
778 	case METRICS_CURR_FANSPEED:
779 		*value = metrics->AvgFanRpm;
780 		break;
781 	case METRICS_CURR_FANPWM:
782 		*value = metrics->AvgFanPwm;
783 		break;
784 	case METRICS_VOLTAGE_VDDGFX:
785 		*value = metrics->AvgVoltage[SVI_PLANE_GFX];
786 		break;
787 	case METRICS_PCIE_RATE:
788 		*value = metrics->PcieRate;
789 		break;
790 	case METRICS_PCIE_WIDTH:
791 		*value = metrics->PcieWidth;
792 		break;
793 	default:
794 		*value = UINT_MAX;
795 		break;
796 	}
797 
798 	return ret;
799 }
800 
801 static int smu_v13_0_0_get_dpm_ultimate_freq(struct smu_context *smu,
802 					     enum smu_clk_type clk_type,
803 					     uint32_t *min,
804 					     uint32_t *max)
805 {
806 	struct smu_13_0_dpm_context *dpm_context =
807 		smu->smu_dpm.dpm_context;
808 	struct smu_13_0_dpm_table *dpm_table;
809 
810 	switch (clk_type) {
811 	case SMU_MCLK:
812 	case SMU_UCLK:
813 		/* uclk dpm table */
814 		dpm_table = &dpm_context->dpm_tables.uclk_table;
815 		break;
816 	case SMU_GFXCLK:
817 	case SMU_SCLK:
818 		/* gfxclk dpm table */
819 		dpm_table = &dpm_context->dpm_tables.gfx_table;
820 		break;
821 	case SMU_SOCCLK:
822 		/* socclk dpm table */
823 		dpm_table = &dpm_context->dpm_tables.soc_table;
824 		break;
825 	case SMU_FCLK:
826 		/* fclk dpm table */
827 		dpm_table = &dpm_context->dpm_tables.fclk_table;
828 		break;
829 	case SMU_VCLK:
830 	case SMU_VCLK1:
831 		/* vclk dpm table */
832 		dpm_table = &dpm_context->dpm_tables.vclk_table;
833 		break;
834 	case SMU_DCLK:
835 	case SMU_DCLK1:
836 		/* dclk dpm table */
837 		dpm_table = &dpm_context->dpm_tables.dclk_table;
838 		break;
839 	default:
840 		dev_err(smu->adev->dev, "Unsupported clock type!\n");
841 		return -EINVAL;
842 	}
843 
844 	if (min)
845 		*min = dpm_table->min;
846 	if (max)
847 		*max = dpm_table->max;
848 
849 	return 0;
850 }
851 
852 static int smu_v13_0_0_read_sensor(struct smu_context *smu,
853 				   enum amd_pp_sensors sensor,
854 				   void *data,
855 				   uint32_t *size)
856 {
857 	struct smu_table_context *table_context = &smu->smu_table;
858 	PPTable_t *smc_pptable = table_context->driver_pptable;
859 	int ret = 0;
860 
861 	switch (sensor) {
862 	case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
863 		*(uint16_t *)data = smc_pptable->SkuTable.FanMaximumRpm;
864 		*size = 4;
865 		break;
866 	case AMDGPU_PP_SENSOR_MEM_LOAD:
867 		ret = smu_v13_0_0_get_smu_metrics_data(smu,
868 						       METRICS_AVERAGE_MEMACTIVITY,
869 						       (uint32_t *)data);
870 		*size = 4;
871 		break;
872 	case AMDGPU_PP_SENSOR_GPU_LOAD:
873 		ret = smu_v13_0_0_get_smu_metrics_data(smu,
874 						       METRICS_AVERAGE_GFXACTIVITY,
875 						       (uint32_t *)data);
876 		*size = 4;
877 		break;
878 	case AMDGPU_PP_SENSOR_GPU_POWER:
879 		ret = smu_v13_0_0_get_smu_metrics_data(smu,
880 						       METRICS_AVERAGE_SOCKETPOWER,
881 						       (uint32_t *)data);
882 		*size = 4;
883 		break;
884 	case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
885 		ret = smu_v13_0_0_get_smu_metrics_data(smu,
886 						       METRICS_TEMPERATURE_HOTSPOT,
887 						       (uint32_t *)data);
888 		*size = 4;
889 		break;
890 	case AMDGPU_PP_SENSOR_EDGE_TEMP:
891 		ret = smu_v13_0_0_get_smu_metrics_data(smu,
892 						       METRICS_TEMPERATURE_EDGE,
893 						       (uint32_t *)data);
894 		*size = 4;
895 		break;
896 	case AMDGPU_PP_SENSOR_MEM_TEMP:
897 		ret = smu_v13_0_0_get_smu_metrics_data(smu,
898 						       METRICS_TEMPERATURE_MEM,
899 						       (uint32_t *)data);
900 		*size = 4;
901 		break;
902 	case AMDGPU_PP_SENSOR_GFX_MCLK:
903 		ret = smu_v13_0_0_get_smu_metrics_data(smu,
904 						       METRICS_CURR_UCLK,
905 						       (uint32_t *)data);
906 		*(uint32_t *)data *= 100;
907 		*size = 4;
908 		break;
909 	case AMDGPU_PP_SENSOR_GFX_SCLK:
910 		ret = smu_v13_0_0_get_smu_metrics_data(smu,
911 						       METRICS_AVERAGE_GFXCLK,
912 						       (uint32_t *)data);
913 		*(uint32_t *)data *= 100;
914 		*size = 4;
915 		break;
916 	case AMDGPU_PP_SENSOR_VDDGFX:
917 		ret = smu_v13_0_0_get_smu_metrics_data(smu,
918 						       METRICS_VOLTAGE_VDDGFX,
919 						       (uint32_t *)data);
920 		*size = 4;
921 		break;
922 	default:
923 		ret = -EOPNOTSUPP;
924 		break;
925 	}
926 
927 	return ret;
928 }
929 
930 static int smu_v13_0_0_get_current_clk_freq_by_table(struct smu_context *smu,
931 						     enum smu_clk_type clk_type,
932 						     uint32_t *value)
933 {
934 	MetricsMember_t member_type;
935 	int clk_id = 0;
936 
937 	clk_id = smu_cmn_to_asic_specific_index(smu,
938 						CMN2ASIC_MAPPING_CLK,
939 						clk_type);
940 	if (clk_id < 0)
941 		return -EINVAL;
942 
943 	switch (clk_id) {
944 	case PPCLK_GFXCLK:
945 		member_type = METRICS_AVERAGE_GFXCLK;
946 		break;
947 	case PPCLK_UCLK:
948 		member_type = METRICS_CURR_UCLK;
949 		break;
950 	case PPCLK_FCLK:
951 		member_type = METRICS_CURR_FCLK;
952 		break;
953 	case PPCLK_SOCCLK:
954 		member_type = METRICS_CURR_SOCCLK;
955 		break;
956 	case PPCLK_VCLK_0:
957 		member_type = METRICS_AVERAGE_VCLK;
958 		break;
959 	case PPCLK_DCLK_0:
960 		member_type = METRICS_AVERAGE_DCLK;
961 		break;
962 	case PPCLK_VCLK_1:
963 		member_type = METRICS_AVERAGE_VCLK1;
964 		break;
965 	case PPCLK_DCLK_1:
966 		member_type = METRICS_AVERAGE_DCLK1;
967 		break;
968 	default:
969 		return -EINVAL;
970 	}
971 
972 	return smu_v13_0_0_get_smu_metrics_data(smu,
973 						member_type,
974 						value);
975 }
976 
977 static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
978 					enum smu_clk_type clk_type,
979 					char *buf)
980 {
981 	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
982 	struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
983 	struct smu_13_0_dpm_table *single_dpm_table;
984 	struct smu_13_0_pcie_table *pcie_table;
985 	const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
986 	uint32_t gen_speed, lane_width;
987 	int i, curr_freq, size = 0;
988 	int ret = 0;
989 
990 	smu_cmn_get_sysfs_buf(&buf, &size);
991 
992 	if (amdgpu_ras_intr_triggered()) {
993 		size += sysfs_emit_at(buf, size, "unavailable\n");
994 		return size;
995 	}
996 
997 	switch (clk_type) {
998 	case SMU_SCLK:
999 		single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
1000 		break;
1001 	case SMU_MCLK:
1002 		single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
1003 		break;
1004 	case SMU_SOCCLK:
1005 		single_dpm_table = &(dpm_context->dpm_tables.soc_table);
1006 		break;
1007 	case SMU_FCLK:
1008 		single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
1009 		break;
1010 	case SMU_VCLK:
1011 	case SMU_VCLK1:
1012 		single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
1013 		break;
1014 	case SMU_DCLK:
1015 	case SMU_DCLK1:
1016 		single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
1017 		break;
1018 	default:
1019 		break;
1020 	}
1021 
1022 	switch (clk_type) {
1023 	case SMU_SCLK:
1024 	case SMU_MCLK:
1025 	case SMU_SOCCLK:
1026 	case SMU_FCLK:
1027 	case SMU_VCLK:
1028 	case SMU_VCLK1:
1029 	case SMU_DCLK:
1030 	case SMU_DCLK1:
1031 		ret = smu_v13_0_0_get_current_clk_freq_by_table(smu, clk_type, &curr_freq);
1032 		if (ret) {
1033 			dev_err(smu->adev->dev, "Failed to get current clock freq!");
1034 			return ret;
1035 		}
1036 
1037 		if (single_dpm_table->is_fine_grained) {
1038 			/*
1039 			 * For fine grained dpms, there are only two dpm levels:
1040 			 *   - level 0 -> min clock freq
1041 			 *   - level 1 -> max clock freq
1042 			 * And the current clock frequency can be any value between them.
1043 			 * So, if the current clock frequency is not at level 0 or level 1,
1044 			 * we will fake it as three dpm levels:
1045 			 *   - level 0 -> min clock freq
1046 			 *   - level 1 -> current actual clock freq
1047 			 *   - level 2 -> max clock freq
1048 			 */
1049 			if ((single_dpm_table->dpm_levels[0].value != curr_freq) &&
1050 			     (single_dpm_table->dpm_levels[1].value != curr_freq)) {
1051 				size += sysfs_emit_at(buf, size, "0: %uMhz\n",
1052 						single_dpm_table->dpm_levels[0].value);
1053 				size += sysfs_emit_at(buf, size, "1: %uMhz *\n",
1054 						curr_freq);
1055 				size += sysfs_emit_at(buf, size, "2: %uMhz\n",
1056 						single_dpm_table->dpm_levels[1].value);
1057 			} else {
1058 				size += sysfs_emit_at(buf, size, "0: %uMhz %s\n",
1059 						single_dpm_table->dpm_levels[0].value,
1060 						single_dpm_table->dpm_levels[0].value == curr_freq ? "*" : "");
1061 				size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
1062 						single_dpm_table->dpm_levels[1].value,
1063 						single_dpm_table->dpm_levels[1].value == curr_freq ? "*" : "");
1064 			}
1065 		} else {
1066 			for (i = 0; i < single_dpm_table->count; i++)
1067 				size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
1068 						i, single_dpm_table->dpm_levels[i].value,
1069 						single_dpm_table->dpm_levels[i].value == curr_freq ? "*" : "");
1070 		}
1071 		break;
1072 	case SMU_PCIE:
1073 		ret = smu_v13_0_0_get_smu_metrics_data(smu,
1074 						       METRICS_PCIE_RATE,
1075 						       &gen_speed);
1076 		if (ret)
1077 			return ret;
1078 
1079 		ret = smu_v13_0_0_get_smu_metrics_data(smu,
1080 						       METRICS_PCIE_WIDTH,
1081 						       &lane_width);
1082 		if (ret)
1083 			return ret;
1084 
1085 		pcie_table = &(dpm_context->dpm_tables.pcie_table);
1086 		for (i = 0; i < pcie_table->num_of_link_levels; i++)
1087 			size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i,
1088 					(pcie_table->pcie_gen[i] == 0) ? "2.5GT/s," :
1089 					(pcie_table->pcie_gen[i] == 1) ? "5.0GT/s," :
1090 					(pcie_table->pcie_gen[i] == 2) ? "8.0GT/s," :
1091 					(pcie_table->pcie_gen[i] == 3) ? "16.0GT/s," : "",
1092 					(pcie_table->pcie_lane[i] == 1) ? "x1" :
1093 					(pcie_table->pcie_lane[i] == 2) ? "x2" :
1094 					(pcie_table->pcie_lane[i] == 3) ? "x4" :
1095 					(pcie_table->pcie_lane[i] == 4) ? "x8" :
1096 					(pcie_table->pcie_lane[i] == 5) ? "x12" :
1097 					(pcie_table->pcie_lane[i] == 6) ? "x16" : "",
1098 					pcie_table->clk_freq[i],
1099 					((gen_speed - 1) == pcie_table->pcie_gen[i]) &&
1100 					(lane_width == link_width[pcie_table->pcie_lane[i]]) ?
1101 					"*" : "");
1102 		break;
1103 
1104 	default:
1105 		break;
1106 	}
1107 
1108 	return size;
1109 }
1110 
1111 static int smu_v13_0_0_force_clk_levels(struct smu_context *smu,
1112 					enum smu_clk_type clk_type,
1113 					uint32_t mask)
1114 {
1115 	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
1116 	struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1117 	struct smu_13_0_dpm_table *single_dpm_table;
1118 	uint32_t soft_min_level, soft_max_level;
1119 	uint32_t min_freq, max_freq;
1120 	int ret = 0;
1121 
1122 	soft_min_level = mask ? (ffs(mask) - 1) : 0;
1123 	soft_max_level = mask ? (fls(mask) - 1) : 0;
1124 
1125 	switch (clk_type) {
1126 	case SMU_GFXCLK:
1127 	case SMU_SCLK:
1128 		single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
1129 		break;
1130 	case SMU_MCLK:
1131 	case SMU_UCLK:
1132 		single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
1133 		break;
1134 	case SMU_SOCCLK:
1135 		single_dpm_table = &(dpm_context->dpm_tables.soc_table);
1136 		break;
1137 	case SMU_FCLK:
1138 		single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
1139 		break;
1140 	case SMU_VCLK:
1141 	case SMU_VCLK1:
1142 		single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
1143 		break;
1144 	case SMU_DCLK:
1145 	case SMU_DCLK1:
1146 		single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
1147 		break;
1148 	default:
1149 		break;
1150 	}
1151 
1152 	switch (clk_type) {
1153 	case SMU_GFXCLK:
1154 	case SMU_SCLK:
1155 	case SMU_MCLK:
1156 	case SMU_UCLK:
1157 	case SMU_SOCCLK:
1158 	case SMU_FCLK:
1159 	case SMU_VCLK:
1160 	case SMU_VCLK1:
1161 	case SMU_DCLK:
1162 	case SMU_DCLK1:
1163 		if (single_dpm_table->is_fine_grained) {
1164 			/* There is only 2 levels for fine grained DPM */
1165 			soft_max_level = (soft_max_level >= 1 ? 1 : 0);
1166 			soft_min_level = (soft_min_level >= 1 ? 1 : 0);
1167 		} else {
1168 			if ((soft_max_level >= single_dpm_table->count) ||
1169 			    (soft_min_level >= single_dpm_table->count))
1170 				return -EINVAL;
1171 		}
1172 
1173 		min_freq = single_dpm_table->dpm_levels[soft_min_level].value;
1174 		max_freq = single_dpm_table->dpm_levels[soft_max_level].value;
1175 
1176 		ret = smu_v13_0_set_soft_freq_limited_range(smu,
1177 							    clk_type,
1178 							    min_freq,
1179 							    max_freq);
1180 		break;
1181 	case SMU_DCEFCLK:
1182 	case SMU_PCIE:
1183 	default:
1184 		break;
1185 	}
1186 
1187 	return ret;
1188 }
1189 
1190 static int smu_v13_0_0_update_pcie_parameters(struct smu_context *smu,
1191 					      uint32_t pcie_gen_cap,
1192 					      uint32_t pcie_width_cap)
1193 {
1194 	struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
1195 	struct smu_13_0_pcie_table *pcie_table =
1196 				&dpm_context->dpm_tables.pcie_table;
1197 	uint32_t smu_pcie_arg;
1198 	int ret, i;
1199 
1200 	for (i = 0; i < pcie_table->num_of_link_levels; i++) {
1201 		if (pcie_table->pcie_gen[i] > pcie_gen_cap)
1202 			pcie_table->pcie_gen[i] = pcie_gen_cap;
1203 		if (pcie_table->pcie_lane[i] > pcie_width_cap)
1204 			pcie_table->pcie_lane[i] = pcie_width_cap;
1205 
1206 		smu_pcie_arg = i << 16;
1207 		smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
1208 		smu_pcie_arg |= pcie_table->pcie_lane[i];
1209 
1210 		ret = smu_cmn_send_smc_msg_with_param(smu,
1211 						      SMU_MSG_OverridePcieParameters,
1212 						      smu_pcie_arg,
1213 						      NULL);
1214 		if (ret)
1215 			return ret;
1216 	}
1217 
1218 	return 0;
1219 }
1220 
1221 static const struct smu_temperature_range smu13_thermal_policy[] = {
1222 	{-273150,  99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
1223 	{ 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
1224 };
1225 
1226 static int smu_v13_0_0_get_thermal_temperature_range(struct smu_context *smu,
1227 						     struct smu_temperature_range *range)
1228 {
1229 	struct smu_table_context *table_context = &smu->smu_table;
1230 	struct smu_13_0_0_powerplay_table *powerplay_table =
1231 		table_context->power_play_table;
1232 	PPTable_t *pptable = smu->smu_table.driver_pptable;
1233 
1234 	if (!range)
1235 		return -EINVAL;
1236 
1237 	memcpy(range, &smu13_thermal_policy[0], sizeof(struct smu_temperature_range));
1238 
1239 	range->max = pptable->SkuTable.TemperatureLimit[TEMP_EDGE] *
1240 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1241 	range->edge_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_EDGE] + CTF_OFFSET_EDGE) *
1242 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1243 	range->hotspot_crit_max = pptable->SkuTable.TemperatureLimit[TEMP_HOTSPOT] *
1244 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1245 	range->hotspot_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_HOTSPOT] + CTF_OFFSET_HOTSPOT) *
1246 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1247 	range->mem_crit_max = pptable->SkuTable.TemperatureLimit[TEMP_MEM] *
1248 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1249 	range->mem_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_MEM] + CTF_OFFSET_MEM)*
1250 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1251 	range->software_shutdown_temp = powerplay_table->software_shutdown_temp;
1252 
1253 	return 0;
1254 }
1255 
1256 #ifndef MAX
1257 #define MAX(a, b)	((a) > (b) ? (a) : (b))
1258 #endif
1259 static ssize_t smu_v13_0_0_get_gpu_metrics(struct smu_context *smu,
1260 					   void **table)
1261 {
1262 	struct smu_table_context *smu_table = &smu->smu_table;
1263 	struct gpu_metrics_v1_3 *gpu_metrics =
1264 		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
1265 	SmuMetricsExternal_t metrics_ext;
1266 	SmuMetrics_t *metrics = &metrics_ext.SmuMetrics;
1267 	int ret = 0;
1268 
1269 	ret = smu_cmn_get_metrics_table(smu,
1270 					&metrics_ext,
1271 					true);
1272 	if (ret)
1273 		return ret;
1274 
1275 	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
1276 
1277 	gpu_metrics->temperature_edge = metrics->AvgTemperature[TEMP_EDGE];
1278 	gpu_metrics->temperature_hotspot = metrics->AvgTemperature[TEMP_HOTSPOT];
1279 	gpu_metrics->temperature_mem = metrics->AvgTemperature[TEMP_MEM];
1280 	gpu_metrics->temperature_vrgfx = metrics->AvgTemperature[TEMP_VR_GFX];
1281 	gpu_metrics->temperature_vrsoc = metrics->AvgTemperature[TEMP_VR_SOC];
1282 	gpu_metrics->temperature_vrmem = MAX(metrics->AvgTemperature[TEMP_VR_MEM0],
1283 					     metrics->AvgTemperature[TEMP_VR_MEM1]);
1284 
1285 	gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity;
1286 	gpu_metrics->average_umc_activity = metrics->AverageUclkActivity;
1287 	gpu_metrics->average_mm_activity = MAX(metrics->Vcn0ActivityPercentage,
1288 					       metrics->Vcn1ActivityPercentage);
1289 
1290 	gpu_metrics->average_socket_power = metrics->AverageSocketPower;
1291 	gpu_metrics->energy_accumulator = metrics->EnergyAccumulator;
1292 
1293 	if (metrics->AverageGfxActivity <= SMU_13_0_0_BUSY_THRESHOLD)
1294 		gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPostDs;
1295 	else
1296 		gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPreDs;
1297 
1298 	if (metrics->AverageUclkActivity <= SMU_13_0_0_BUSY_THRESHOLD)
1299 		gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPostDs;
1300 	else
1301 		gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPreDs;
1302 
1303 	gpu_metrics->average_vclk0_frequency = metrics->AverageVclk0Frequency;
1304 	gpu_metrics->average_dclk0_frequency = metrics->AverageDclk0Frequency;
1305 	gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency;
1306 	gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency;
1307 
1308 	gpu_metrics->current_gfxclk = metrics->CurrClock[PPCLK_GFXCLK];
1309 	gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK];
1310 	gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK];
1311 	gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0];
1312 	gpu_metrics->current_dclk0 = metrics->CurrClock[PPCLK_DCLK_0];
1313 	gpu_metrics->current_vclk1 = metrics->CurrClock[PPCLK_VCLK_1];
1314 	gpu_metrics->current_dclk1 = metrics->CurrClock[PPCLK_DCLK_1];
1315 
1316 	gpu_metrics->throttle_status =
1317 			smu_v13_0_get_throttler_status(metrics);
1318 	gpu_metrics->indep_throttle_status =
1319 			smu_cmn_get_indep_throttler_status(gpu_metrics->throttle_status,
1320 							   smu_v13_0_0_throttler_map);
1321 
1322 	gpu_metrics->current_fan_speed = metrics->AvgFanRpm;
1323 
1324 	gpu_metrics->pcie_link_width = metrics->PcieWidth;
1325 	gpu_metrics->pcie_link_speed = metrics->PcieRate;
1326 
1327 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
1328 
1329 	gpu_metrics->voltage_gfx = metrics->AvgVoltage[SVI_PLANE_GFX];
1330 	gpu_metrics->voltage_soc = metrics->AvgVoltage[SVI_PLANE_SOC];
1331 	gpu_metrics->voltage_mem = metrics->AvgVoltage[SVI_PLANE_VMEMP];
1332 
1333 	*table = (void *)gpu_metrics;
1334 
1335 	return sizeof(struct gpu_metrics_v1_3);
1336 }
1337 
1338 static int smu_v13_0_0_populate_umd_state_clk(struct smu_context *smu)
1339 {
1340 	struct smu_13_0_dpm_context *dpm_context =
1341 				smu->smu_dpm.dpm_context;
1342 	struct smu_13_0_dpm_table *gfx_table =
1343 				&dpm_context->dpm_tables.gfx_table;
1344 	struct smu_13_0_dpm_table *mem_table =
1345 				&dpm_context->dpm_tables.uclk_table;
1346 	struct smu_13_0_dpm_table *soc_table =
1347 				&dpm_context->dpm_tables.soc_table;
1348 	struct smu_13_0_dpm_table *vclk_table =
1349 				&dpm_context->dpm_tables.vclk_table;
1350 	struct smu_13_0_dpm_table *dclk_table =
1351 				&dpm_context->dpm_tables.dclk_table;
1352 	struct smu_13_0_dpm_table *fclk_table =
1353 				&dpm_context->dpm_tables.fclk_table;
1354 	struct smu_umd_pstate_table *pstate_table =
1355 				&smu->pstate_table;
1356 	struct smu_table_context *table_context = &smu->smu_table;
1357 	PPTable_t *pptable = table_context->driver_pptable;
1358 	DriverReportedClocks_t driver_clocks =
1359 			pptable->SkuTable.DriverReportedClocks;
1360 
1361 	pstate_table->gfxclk_pstate.min = gfx_table->min;
1362 	if (driver_clocks.GameClockAc &&
1363 	    (driver_clocks.GameClockAc < gfx_table->max))
1364 		pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc;
1365 	else
1366 		pstate_table->gfxclk_pstate.peak = gfx_table->max;
1367 
1368 	pstate_table->uclk_pstate.min = mem_table->min;
1369 	pstate_table->uclk_pstate.peak = mem_table->max;
1370 
1371 	pstate_table->socclk_pstate.min = soc_table->min;
1372 	pstate_table->socclk_pstate.peak = soc_table->max;
1373 
1374 	pstate_table->vclk_pstate.min = vclk_table->min;
1375 	pstate_table->vclk_pstate.peak = vclk_table->max;
1376 
1377 	pstate_table->dclk_pstate.min = dclk_table->min;
1378 	pstate_table->dclk_pstate.peak = dclk_table->max;
1379 
1380 	pstate_table->fclk_pstate.min = fclk_table->min;
1381 	pstate_table->fclk_pstate.peak = fclk_table->max;
1382 
1383 	if (driver_clocks.BaseClockAc &&
1384 	    driver_clocks.BaseClockAc < gfx_table->max)
1385 		pstate_table->gfxclk_pstate.standard = driver_clocks.BaseClockAc;
1386 	else
1387 		pstate_table->gfxclk_pstate.standard = gfx_table->max;
1388 	pstate_table->uclk_pstate.standard = mem_table->max;
1389 	pstate_table->socclk_pstate.standard = soc_table->min;
1390 	pstate_table->vclk_pstate.standard = vclk_table->min;
1391 	pstate_table->dclk_pstate.standard = dclk_table->min;
1392 	pstate_table->fclk_pstate.standard = fclk_table->min;
1393 
1394 	return 0;
1395 }
1396 
1397 static void smu_v13_0_0_get_unique_id(struct smu_context *smu)
1398 {
1399 	struct smu_table_context *smu_table = &smu->smu_table;
1400 	SmuMetrics_t *metrics =
1401 		&(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics);
1402 	struct amdgpu_device *adev = smu->adev;
1403 	uint32_t upper32 = 0, lower32 = 0;
1404 	int ret;
1405 
1406 	ret = smu_cmn_get_metrics_table(smu, NULL, false);
1407 	if (ret)
1408 		goto out;
1409 
1410 	upper32 = metrics->PublicSerialNumberUpper;
1411 	lower32 = metrics->PublicSerialNumberLower;
1412 
1413 out:
1414 	adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
1415 	if (adev->serial[0] == '\0')
1416 		snprintf(adev->serial, sizeof(adev->serial), "%016llx", adev->unique_id);
1417 }
1418 
1419 static int smu_v13_0_0_get_fan_speed_pwm(struct smu_context *smu,
1420 					 uint32_t *speed)
1421 {
1422 	if (!speed)
1423 		return -EINVAL;
1424 
1425 	return smu_v13_0_0_get_smu_metrics_data(smu,
1426 						METRICS_CURR_FANPWM,
1427 						speed);
1428 }
1429 
1430 static int smu_v13_0_0_get_fan_speed_rpm(struct smu_context *smu,
1431 					 uint32_t *speed)
1432 {
1433 	if (!speed)
1434 		return -EINVAL;
1435 
1436 	return smu_v13_0_0_get_smu_metrics_data(smu,
1437 						METRICS_CURR_FANSPEED,
1438 						speed);
1439 }
1440 
1441 static int smu_v13_0_0_enable_mgpu_fan_boost(struct smu_context *smu)
1442 {
1443 	struct smu_table_context *table_context = &smu->smu_table;
1444 	PPTable_t *pptable = table_context->driver_pptable;
1445 	SkuTable_t *skutable = &pptable->SkuTable;
1446 
1447 	/*
1448 	 * Skip the MGpuFanBoost setting for those ASICs
1449 	 * which do not support it
1450 	 */
1451 	if (skutable->MGpuAcousticLimitRpmThreshold == 0)
1452 		return 0;
1453 
1454 	return smu_cmn_send_smc_msg_with_param(smu,
1455 					       SMU_MSG_SetMGpuFanBoostLimitRpm,
1456 					       0,
1457 					       NULL);
1458 }
1459 
1460 static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
1461 				       uint32_t *current_power_limit,
1462 				       uint32_t *default_power_limit,
1463 				       uint32_t *max_power_limit)
1464 {
1465 	struct smu_table_context *table_context = &smu->smu_table;
1466 	struct smu_13_0_0_powerplay_table *powerplay_table =
1467 		(struct smu_13_0_0_powerplay_table *)table_context->power_play_table;
1468 	PPTable_t *pptable = table_context->driver_pptable;
1469 	SkuTable_t *skutable = &pptable->SkuTable;
1470 	uint32_t power_limit, od_percent;
1471 
1472 	if (smu_v13_0_get_current_power_limit(smu, &power_limit))
1473 		power_limit = smu->adev->pm.ac_power ?
1474 			      skutable->SocketPowerLimitAc[PPT_THROTTLER_PPT0] :
1475 			      skutable->SocketPowerLimitDc[PPT_THROTTLER_PPT0];
1476 
1477 	if (current_power_limit)
1478 		*current_power_limit = power_limit;
1479 	if (default_power_limit)
1480 		*default_power_limit = power_limit;
1481 
1482 	if (max_power_limit) {
1483 		if (smu->od_enabled) {
1484 			od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
1485 
1486 			dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit);
1487 
1488 			power_limit *= (100 + od_percent);
1489 			power_limit /= 100;
1490 		}
1491 		*max_power_limit = power_limit;
1492 	}
1493 
1494 	return 0;
1495 }
1496 
1497 static int smu_v13_0_0_get_power_profile_mode(struct smu_context *smu,
1498 					      char *buf)
1499 {
1500 	DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
1501 	DpmActivityMonitorCoeffInt_t *activity_monitor =
1502 		&(activity_monitor_external.DpmActivityMonitorCoeffInt);
1503 	static const char *title[] = {
1504 			"PROFILE_INDEX(NAME)",
1505 			"CLOCK_TYPE(NAME)",
1506 			"FPS",
1507 			"MinActiveFreqType",
1508 			"MinActiveFreq",
1509 			"BoosterFreqType",
1510 			"BoosterFreq",
1511 			"PD_Data_limit_c",
1512 			"PD_Data_error_coeff",
1513 			"PD_Data_error_rate_coeff"};
1514 	int16_t workload_type = 0;
1515 	uint32_t i, size = 0;
1516 	int result = 0;
1517 
1518 	if (!buf)
1519 		return -EINVAL;
1520 
1521 	size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s\n",
1522 			title[0], title[1], title[2], title[3], title[4], title[5],
1523 			title[6], title[7], title[8], title[9]);
1524 
1525 	for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
1526 		/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1527 		workload_type = smu_cmn_to_asic_specific_index(smu,
1528 							       CMN2ASIC_MAPPING_WORKLOAD,
1529 							       i);
1530 		if (workload_type < 0)
1531 			return -EINVAL;
1532 
1533 		result = smu_cmn_update_table(smu,
1534 					      SMU_TABLE_ACTIVITY_MONITOR_COEFF,
1535 					      workload_type,
1536 					      (void *)(&activity_monitor_external),
1537 					      false);
1538 		if (result) {
1539 			dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
1540 			return result;
1541 		}
1542 
1543 		size += sysfs_emit_at(buf, size, "%2d %14s%s:\n",
1544 			i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
1545 
1546 		size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n",
1547 			" ",
1548 			0,
1549 			"GFXCLK",
1550 			activity_monitor->Gfx_FPS,
1551 			activity_monitor->Gfx_MinActiveFreqType,
1552 			activity_monitor->Gfx_MinActiveFreq,
1553 			activity_monitor->Gfx_BoosterFreqType,
1554 			activity_monitor->Gfx_BoosterFreq,
1555 			activity_monitor->Gfx_PD_Data_limit_c,
1556 			activity_monitor->Gfx_PD_Data_error_coeff,
1557 			activity_monitor->Gfx_PD_Data_error_rate_coeff);
1558 
1559 		size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n",
1560 			" ",
1561 			1,
1562 			"FCLK",
1563 			activity_monitor->Fclk_FPS,
1564 			activity_monitor->Fclk_MinActiveFreqType,
1565 			activity_monitor->Fclk_MinActiveFreq,
1566 			activity_monitor->Fclk_BoosterFreqType,
1567 			activity_monitor->Fclk_BoosterFreq,
1568 			activity_monitor->Fclk_PD_Data_limit_c,
1569 			activity_monitor->Fclk_PD_Data_error_coeff,
1570 			activity_monitor->Fclk_PD_Data_error_rate_coeff);
1571 	}
1572 
1573 	return size;
1574 }
1575 
1576 static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
1577 					      long *input,
1578 					      uint32_t size)
1579 {
1580 	DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
1581 	DpmActivityMonitorCoeffInt_t *activity_monitor =
1582 		&(activity_monitor_external.DpmActivityMonitorCoeffInt);
1583 	int workload_type, ret = 0;
1584 
1585 	smu->power_profile_mode = input[size];
1586 
1587 	if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
1588 		dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
1589 		return -EINVAL;
1590 	}
1591 
1592 	if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1593 		ret = smu_cmn_update_table(smu,
1594 					   SMU_TABLE_ACTIVITY_MONITOR_COEFF,
1595 					   WORKLOAD_PPLIB_CUSTOM_BIT,
1596 					   (void *)(&activity_monitor_external),
1597 					   false);
1598 		if (ret) {
1599 			dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
1600 			return ret;
1601 		}
1602 
1603 		switch (input[0]) {
1604 		case 0: /* Gfxclk */
1605 			activity_monitor->Gfx_FPS = input[1];
1606 			activity_monitor->Gfx_MinActiveFreqType = input[2];
1607 			activity_monitor->Gfx_MinActiveFreq = input[3];
1608 			activity_monitor->Gfx_BoosterFreqType = input[4];
1609 			activity_monitor->Gfx_BoosterFreq = input[5];
1610 			activity_monitor->Gfx_PD_Data_limit_c = input[6];
1611 			activity_monitor->Gfx_PD_Data_error_coeff = input[7];
1612 			activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8];
1613 			break;
1614 		case 1: /* Fclk */
1615 			activity_monitor->Fclk_FPS = input[1];
1616 			activity_monitor->Fclk_MinActiveFreqType = input[2];
1617 			activity_monitor->Fclk_MinActiveFreq = input[3];
1618 			activity_monitor->Fclk_BoosterFreqType = input[4];
1619 			activity_monitor->Fclk_BoosterFreq = input[5];
1620 			activity_monitor->Fclk_PD_Data_limit_c = input[6];
1621 			activity_monitor->Fclk_PD_Data_error_coeff = input[7];
1622 			activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8];
1623 			break;
1624 		}
1625 
1626 		ret = smu_cmn_update_table(smu,
1627 					   SMU_TABLE_ACTIVITY_MONITOR_COEFF,
1628 					   WORKLOAD_PPLIB_CUSTOM_BIT,
1629 					   (void *)(&activity_monitor_external),
1630 					   true);
1631 		if (ret) {
1632 			dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
1633 			return ret;
1634 		}
1635 	}
1636 
1637 	/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1638 	workload_type = smu_cmn_to_asic_specific_index(smu,
1639 						       CMN2ASIC_MAPPING_WORKLOAD,
1640 						       smu->power_profile_mode);
1641 	if (workload_type < 0)
1642 		return -EINVAL;
1643 
1644 	return smu_cmn_send_smc_msg_with_param(smu,
1645 					       SMU_MSG_SetWorkloadMask,
1646 					       1 << workload_type,
1647 					       NULL);
1648 }
1649 
1650 static int smu_v13_0_0_baco_enter(struct smu_context *smu)
1651 {
1652 	struct smu_baco_context *smu_baco = &smu->smu_baco;
1653 	struct amdgpu_device *adev = smu->adev;
1654 
1655 	if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
1656 		return smu_v13_0_baco_set_armd3_sequence(smu,
1657 				smu_baco->maco_support ? BACO_SEQ_BAMACO : BACO_SEQ_BACO);
1658 	else
1659 		return smu_v13_0_baco_enter(smu);
1660 }
1661 
1662 static int smu_v13_0_0_baco_exit(struct smu_context *smu)
1663 {
1664 	struct amdgpu_device *adev = smu->adev;
1665 
1666 	if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
1667 		/* Wait for PMFW handling for the Dstate change */
1668 		usleep_range(10000, 11000);
1669 		return smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
1670 	} else {
1671 		return smu_v13_0_baco_exit(smu);
1672 	}
1673 }
1674 
1675 static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu)
1676 {
1677 	struct amdgpu_device *adev = smu->adev;
1678 	u32 smu_version;
1679 
1680 	/* SRIOV does not support SMU mode1 reset */
1681 	if (amdgpu_sriov_vf(adev))
1682 		return false;
1683 
1684 	/* PMFW support is available since 78.41 */
1685 	smu_cmn_get_smc_version(smu, NULL, &smu_version);
1686 	if (smu_version < 0x004e2900)
1687 		return false;
1688 
1689 	return true;
1690 }
1691 
1692 static int smu_v13_0_0_i2c_xfer(struct i2c_adapter *i2c_adap,
1693 				   struct i2c_msg *msg, int num_msgs)
1694 {
1695 	struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap);
1696 	struct amdgpu_device *adev = smu_i2c->adev;
1697 	struct smu_context *smu = adev->powerplay.pp_handle;
1698 	struct smu_table_context *smu_table = &smu->smu_table;
1699 	struct smu_table *table = &smu_table->driver_table;
1700 	SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
1701 	int i, j, r, c;
1702 	u16 dir;
1703 
1704 	if (!adev->pm.dpm_enabled)
1705 		return -EBUSY;
1706 
1707 	req = kzalloc(sizeof(*req), GFP_KERNEL);
1708 	if (!req)
1709 		return -ENOMEM;
1710 
1711 	req->I2CcontrollerPort = smu_i2c->port;
1712 	req->I2CSpeed = I2C_SPEED_FAST_400K;
1713 	req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */
1714 	dir = msg[0].flags & I2C_M_RD;
1715 
1716 	for (c = i = 0; i < num_msgs; i++) {
1717 		for (j = 0; j < msg[i].len; j++, c++) {
1718 			SwI2cCmd_t *cmd = &req->SwI2cCmds[c];
1719 
1720 			if (!(msg[i].flags & I2C_M_RD)) {
1721 				/* write */
1722 				cmd->CmdConfig |= CMDCONFIG_READWRITE_MASK;
1723 				cmd->ReadWriteData = msg[i].buf[j];
1724 			}
1725 
1726 			if ((dir ^ msg[i].flags) & I2C_M_RD) {
1727 				/* The direction changes.
1728 				 */
1729 				dir = msg[i].flags & I2C_M_RD;
1730 				cmd->CmdConfig |= CMDCONFIG_RESTART_MASK;
1731 			}
1732 
1733 			req->NumCmds++;
1734 
1735 			/*
1736 			 * Insert STOP if we are at the last byte of either last
1737 			 * message for the transaction or the client explicitly
1738 			 * requires a STOP at this particular message.
1739 			 */
1740 			if ((j == msg[i].len - 1) &&
1741 			    ((i == num_msgs - 1) || (msg[i].flags & I2C_M_STOP))) {
1742 				cmd->CmdConfig &= ~CMDCONFIG_RESTART_MASK;
1743 				cmd->CmdConfig |= CMDCONFIG_STOP_MASK;
1744 			}
1745 		}
1746 	}
1747 	mutex_lock(&adev->pm.mutex);
1748 	r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
1749 	mutex_unlock(&adev->pm.mutex);
1750 	if (r)
1751 		goto fail;
1752 
1753 	for (c = i = 0; i < num_msgs; i++) {
1754 		if (!(msg[i].flags & I2C_M_RD)) {
1755 			c += msg[i].len;
1756 			continue;
1757 		}
1758 		for (j = 0; j < msg[i].len; j++, c++) {
1759 			SwI2cCmd_t *cmd = &res->SwI2cCmds[c];
1760 
1761 			msg[i].buf[j] = cmd->ReadWriteData;
1762 		}
1763 	}
1764 	r = num_msgs;
1765 fail:
1766 	kfree(req);
1767 	return r;
1768 }
1769 
1770 static u32 smu_v13_0_0_i2c_func(struct i2c_adapter *adap)
1771 {
1772 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
1773 }
1774 
1775 static const struct i2c_algorithm smu_v13_0_0_i2c_algo = {
1776 	.master_xfer = smu_v13_0_0_i2c_xfer,
1777 	.functionality = smu_v13_0_0_i2c_func,
1778 };
1779 
1780 static const struct i2c_adapter_quirks smu_v13_0_0_i2c_control_quirks = {
1781 	.flags = I2C_AQ_COMB | I2C_AQ_COMB_SAME_ADDR | I2C_AQ_NO_ZERO_LEN,
1782 	.max_read_len  = MAX_SW_I2C_COMMANDS,
1783 	.max_write_len = MAX_SW_I2C_COMMANDS,
1784 	.max_comb_1st_msg_len = 2,
1785 	.max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2,
1786 };
1787 
1788 static int smu_v13_0_0_i2c_control_init(struct smu_context *smu)
1789 {
1790 	struct amdgpu_device *adev = smu->adev;
1791 	int res, i;
1792 
1793 	for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
1794 		struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
1795 		struct i2c_adapter *control = &smu_i2c->adapter;
1796 
1797 		smu_i2c->adev = adev;
1798 		smu_i2c->port = i;
1799 		rw_init(&smu_i2c->mutex, "smu13iic");
1800 #ifdef __linux__
1801 		control->owner = THIS_MODULE;
1802 		control->class = I2C_CLASS_SPD;
1803 		control->dev.parent = &adev->pdev->dev;
1804 #endif
1805 		control->algo = &smu_v13_0_0_i2c_algo;
1806 		snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
1807 		control->quirks = &smu_v13_0_0_i2c_control_quirks;
1808 		i2c_set_adapdata(control, smu_i2c);
1809 
1810 		res = i2c_add_adapter(control);
1811 		if (res) {
1812 			DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
1813 			goto Out_err;
1814 		}
1815 	}
1816 
1817 	/* assign the buses used for the FRU EEPROM and RAS EEPROM */
1818 	/* XXX ideally this would be something in a vbios data table */
1819 	adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter;
1820 	adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
1821 
1822 	return 0;
1823 Out_err:
1824 	for ( ; i >= 0; i--) {
1825 		struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
1826 		struct i2c_adapter *control = &smu_i2c->adapter;
1827 
1828 		i2c_del_adapter(control);
1829 	}
1830 	return res;
1831 }
1832 
1833 static void smu_v13_0_0_i2c_control_fini(struct smu_context *smu)
1834 {
1835 	struct amdgpu_device *adev = smu->adev;
1836 	int i;
1837 
1838 	for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
1839 		struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
1840 		struct i2c_adapter *control = &smu_i2c->adapter;
1841 
1842 		i2c_del_adapter(control);
1843 	}
1844 	adev->pm.ras_eeprom_i2c_bus = NULL;
1845 	adev->pm.fru_eeprom_i2c_bus = NULL;
1846 }
1847 
1848 static int smu_v13_0_0_set_mp1_state(struct smu_context *smu,
1849 				     enum pp_mp1_state mp1_state)
1850 {
1851 	int ret;
1852 
1853 	switch (mp1_state) {
1854 	case PP_MP1_STATE_UNLOAD:
1855 		ret = smu_cmn_set_mp1_state(smu, mp1_state);
1856 		break;
1857 	default:
1858 		/* Ignore others */
1859 		ret = 0;
1860 	}
1861 
1862 	return ret;
1863 }
1864 
1865 static int smu_v13_0_0_set_df_cstate(struct smu_context *smu,
1866 				     enum pp_df_cstate state)
1867 {
1868 	return smu_cmn_send_smc_msg_with_param(smu,
1869 					       SMU_MSG_DFCstateControl,
1870 					       state,
1871 					       NULL);
1872 }
1873 
1874 static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
1875 	.get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask,
1876 	.set_default_dpm_table = smu_v13_0_0_set_default_dpm_table,
1877 	.i2c_init = smu_v13_0_0_i2c_control_init,
1878 	.i2c_fini = smu_v13_0_0_i2c_control_fini,
1879 	.is_dpm_running = smu_v13_0_0_is_dpm_running,
1880 	.dump_pptable = smu_v13_0_0_dump_pptable,
1881 	.init_microcode = smu_v13_0_init_microcode,
1882 	.load_microcode = smu_v13_0_load_microcode,
1883 	.fini_microcode = smu_v13_0_fini_microcode,
1884 	.init_smc_tables = smu_v13_0_0_init_smc_tables,
1885 	.fini_smc_tables = smu_v13_0_fini_smc_tables,
1886 	.init_power = smu_v13_0_init_power,
1887 	.fini_power = smu_v13_0_fini_power,
1888 	.check_fw_status = smu_v13_0_check_fw_status,
1889 	.setup_pptable = smu_v13_0_0_setup_pptable,
1890 	.check_fw_version = smu_v13_0_check_fw_version,
1891 	.write_pptable = smu_cmn_write_pptable,
1892 	.set_driver_table_location = smu_v13_0_set_driver_table_location,
1893 	.system_features_control = smu_v13_0_0_system_features_control,
1894 	.set_allowed_mask = smu_v13_0_set_allowed_mask,
1895 	.get_enabled_mask = smu_cmn_get_enabled_mask,
1896 	.dpm_set_vcn_enable = smu_v13_0_set_vcn_enable,
1897 	.dpm_set_jpeg_enable = smu_v13_0_set_jpeg_enable,
1898 	.get_dpm_ultimate_freq = smu_v13_0_0_get_dpm_ultimate_freq,
1899 	.get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values,
1900 	.read_sensor = smu_v13_0_0_read_sensor,
1901 	.feature_is_enabled = smu_cmn_feature_is_enabled,
1902 	.print_clk_levels = smu_v13_0_0_print_clk_levels,
1903 	.force_clk_levels = smu_v13_0_0_force_clk_levels,
1904 	.update_pcie_parameters = smu_v13_0_0_update_pcie_parameters,
1905 	.get_thermal_temperature_range = smu_v13_0_0_get_thermal_temperature_range,
1906 	.register_irq_handler = smu_v13_0_register_irq_handler,
1907 	.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
1908 	.disable_thermal_alert = smu_v13_0_disable_thermal_alert,
1909 	.notify_memory_pool_location = smu_v13_0_notify_memory_pool_location,
1910 	.get_gpu_metrics = smu_v13_0_0_get_gpu_metrics,
1911 	.set_soft_freq_limited_range = smu_v13_0_set_soft_freq_limited_range,
1912 	.init_pptable_microcode = smu_v13_0_init_pptable_microcode,
1913 	.populate_umd_state_clk = smu_v13_0_0_populate_umd_state_clk,
1914 	.set_performance_level = smu_v13_0_set_performance_level,
1915 	.gfx_off_control = smu_v13_0_gfx_off_control,
1916 	.get_unique_id = smu_v13_0_0_get_unique_id,
1917 	.get_fan_speed_pwm = smu_v13_0_0_get_fan_speed_pwm,
1918 	.get_fan_speed_rpm = smu_v13_0_0_get_fan_speed_rpm,
1919 	.set_fan_speed_pwm = smu_v13_0_set_fan_speed_pwm,
1920 	.set_fan_speed_rpm = smu_v13_0_set_fan_speed_rpm,
1921 	.get_fan_control_mode = smu_v13_0_get_fan_control_mode,
1922 	.set_fan_control_mode = smu_v13_0_set_fan_control_mode,
1923 	.enable_mgpu_fan_boost = smu_v13_0_0_enable_mgpu_fan_boost,
1924 	.get_power_limit = smu_v13_0_0_get_power_limit,
1925 	.set_power_limit = smu_v13_0_set_power_limit,
1926 	.set_power_source = smu_v13_0_set_power_source,
1927 	.get_power_profile_mode = smu_v13_0_0_get_power_profile_mode,
1928 	.set_power_profile_mode = smu_v13_0_0_set_power_profile_mode,
1929 	.run_btc = smu_v13_0_run_btc,
1930 	.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
1931 	.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
1932 	.set_tool_table_location = smu_v13_0_set_tool_table_location,
1933 	.deep_sleep_control = smu_v13_0_deep_sleep_control,
1934 	.gfx_ulv_control = smu_v13_0_gfx_ulv_control,
1935 	.baco_is_support = smu_v13_0_baco_is_support,
1936 	.baco_get_state = smu_v13_0_baco_get_state,
1937 	.baco_set_state = smu_v13_0_baco_set_state,
1938 	.baco_enter = smu_v13_0_0_baco_enter,
1939 	.baco_exit = smu_v13_0_0_baco_exit,
1940 	.mode1_reset_is_support = smu_v13_0_0_is_mode1_reset_supported,
1941 	.mode1_reset = smu_v13_0_mode1_reset,
1942 	.set_mp1_state = smu_v13_0_0_set_mp1_state,
1943 	.set_df_cstate = smu_v13_0_0_set_df_cstate,
1944 };
1945 
1946 void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
1947 {
1948 	smu->ppt_funcs = &smu_v13_0_0_ppt_funcs;
1949 	smu->message_map = smu_v13_0_0_message_map;
1950 	smu->clock_map = smu_v13_0_0_clk_map;
1951 	smu->feature_map = smu_v13_0_0_feature_mask_map;
1952 	smu->table_map = smu_v13_0_0_table_map;
1953 	smu->pwr_src_map = smu_v13_0_0_pwr_src_map;
1954 	smu->workload_map = smu_v13_0_0_workload_map;
1955 	smu_v13_0_set_smu_mailbox_registers(smu);
1956 }
1957