1 /*
2  * Copyright 2021 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #define SWSMU_CODE_LAYER_L2
25 
26 #include <linux/firmware.h>
27 #include "amdgpu.h"
28 #include "amdgpu_smu.h"
29 #include "atomfirmware.h"
30 #include "amdgpu_atomfirmware.h"
31 #include "amdgpu_atombios.h"
32 #include "smu_v13_0_6_pmfw.h"
33 #include "smu13_driver_if_v13_0_6.h"
34 #include "smu_v13_0_6_ppsmc.h"
35 #include "soc15_common.h"
36 #include "atom.h"
37 #include "power_state.h"
38 #include "smu_v13_0.h"
39 #include "smu_v13_0_6_ppt.h"
40 #include "nbio/nbio_7_4_offset.h"
41 #include "nbio/nbio_7_4_sh_mask.h"
42 #include "thm/thm_11_0_2_offset.h"
43 #include "thm/thm_11_0_2_sh_mask.h"
44 #include "amdgpu_xgmi.h"
45 #include <linux/pci.h>
46 #include "amdgpu_ras.h"
47 #include "smu_cmn.h"
48 #include "mp/mp_13_0_6_offset.h"
49 #include "mp/mp_13_0_6_sh_mask.h"
50 
51 #undef MP1_Public
52 #undef smnMP1_FIRMWARE_FLAGS
53 
54 /* TODO: Check final register offsets */
55 #define MP1_Public 0x03b00000
56 #define smnMP1_FIRMWARE_FLAGS 0x3010028
57 /*
58  * DO NOT use these for err/warn/info/debug messages.
59  * Use dev_err, dev_warn, dev_info and dev_dbg instead.
60  * They are more MGPU friendly.
61  */
62 #undef pr_err
63 #undef pr_warn
64 #undef pr_info
65 #undef pr_debug
66 
67 #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
68 
69 #define SMU_13_0_6_FEA_MAP(smu_feature, smu_13_0_6_feature)                    \
70 	[smu_feature] = { 1, (smu_13_0_6_feature) }
71 
72 #define FEATURE_MASK(feature) (1ULL << feature)
73 #define SMC_DPM_FEATURE                                                        \
74 	(FEATURE_MASK(FEATURE_DATA_CALCULATION) |                              \
75 	 FEATURE_MASK(FEATURE_DPM_GFXCLK) | FEATURE_MASK(FEATURE_DPM_UCLK) |   \
76 	 FEATURE_MASK(FEATURE_DPM_SOCCLK) | FEATURE_MASK(FEATURE_DPM_FCLK) |   \
77 	 FEATURE_MASK(FEATURE_DPM_LCLK) | FEATURE_MASK(FEATURE_DPM_XGMI) |     \
78 	 FEATURE_MASK(FEATURE_DPM_VCN))
79 
80 /* possible frequency drift (1Mhz) */
81 #define EPSILON 1
82 
83 #define smnPCIE_ESM_CTRL 0x111003D0
84 
85 static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] = {
86 	MSG_MAP(TestMessage,			     PPSMC_MSG_TestMessage,			0),
87 	MSG_MAP(GetSmuVersion,			     PPSMC_MSG_GetSmuVersion,			1),
88 	MSG_MAP(GetDriverIfVersion,		     PPSMC_MSG_GetDriverIfVersion,		1),
89 	MSG_MAP(EnableAllSmuFeatures,		     PPSMC_MSG_EnableAllSmuFeatures,		1),
90 	MSG_MAP(DisableAllSmuFeatures,		     PPSMC_MSG_DisableAllSmuFeatures,		1),
91 	MSG_MAP(RequestI2cTransaction,		     PPSMC_MSG_RequestI2cTransaction,		0),
92 	MSG_MAP(GetMetricsTable,		     PPSMC_MSG_GetMetricsTable,			1),
93 	MSG_MAP(GetEnabledSmuFeaturesHigh,	     PPSMC_MSG_GetEnabledSmuFeaturesHigh,	1),
94 	MSG_MAP(GetEnabledSmuFeaturesLow,	     PPSMC_MSG_GetEnabledSmuFeaturesLow,	1),
95 	MSG_MAP(SetDriverDramAddrHigh,		     PPSMC_MSG_SetDriverDramAddrHigh,		1),
96 	MSG_MAP(SetDriverDramAddrLow,		     PPSMC_MSG_SetDriverDramAddrLow,		1),
97 	MSG_MAP(SetToolsDramAddrHigh,		     PPSMC_MSG_SetToolsDramAddrHigh,		0),
98 	MSG_MAP(SetToolsDramAddrLow,		     PPSMC_MSG_SetToolsDramAddrLow,		0),
99 	MSG_MAP(SetSoftMinByFreq,		     PPSMC_MSG_SetSoftMinByFreq,		0),
100 	MSG_MAP(SetSoftMaxByFreq,		     PPSMC_MSG_SetSoftMaxByFreq,		0),
101 	MSG_MAP(GetMinDpmFreq,			     PPSMC_MSG_GetMinDpmFreq,			0),
102 	MSG_MAP(GetMaxDpmFreq,			     PPSMC_MSG_GetMaxDpmFreq,			0),
103 	MSG_MAP(GetDpmFreqByIndex,		     PPSMC_MSG_GetDpmFreqByIndex,		1),
104 	MSG_MAP(SetPptLimit,			     PPSMC_MSG_SetPptLimit,			0),
105 	MSG_MAP(GetPptLimit,			     PPSMC_MSG_GetPptLimit,			1),
106 	MSG_MAP(GfxDeviceDriverReset,		     PPSMC_MSG_GfxDriverReset,			0),
107 	MSG_MAP(DramLogSetDramAddrHigh,		     PPSMC_MSG_DramLogSetDramAddrHigh,		0),
108 	MSG_MAP(DramLogSetDramAddrLow,		     PPSMC_MSG_DramLogSetDramAddrLow,		0),
109 	MSG_MAP(DramLogSetDramSize,		     PPSMC_MSG_DramLogSetDramSize,		0),
110 	MSG_MAP(GetDebugData,			     PPSMC_MSG_GetDebugData,			0),
111 	MSG_MAP(SetNumBadHbmPagesRetired,	     PPSMC_MSG_SetNumBadHbmPagesRetired,	0),
112 	MSG_MAP(DFCstateControl,		     PPSMC_MSG_DFCstateControl,			0),
113 	MSG_MAP(GetGmiPwrDnHyst,		     PPSMC_MSG_GetGmiPwrDnHyst,			0),
114 	MSG_MAP(SetGmiPwrDnHyst,		     PPSMC_MSG_SetGmiPwrDnHyst,			0),
115 	MSG_MAP(GmiPwrDnControl,		     PPSMC_MSG_GmiPwrDnControl,			0),
116 	MSG_MAP(EnterGfxoff,			     PPSMC_MSG_EnterGfxoff,			0),
117 	MSG_MAP(ExitGfxoff,			     PPSMC_MSG_ExitGfxoff,			0),
118 	MSG_MAP(EnableDeterminism,		     PPSMC_MSG_EnableDeterminism,		0),
119 	MSG_MAP(DisableDeterminism,		     PPSMC_MSG_DisableDeterminism,		0),
120 	MSG_MAP(GfxDriverResetRecovery,		     PPSMC_MSG_GfxDriverResetRecovery,		0),
121 	MSG_MAP(GetMinGfxclkFrequency,               PPSMC_MSG_GetMinGfxDpmFreq,                0),
122 	MSG_MAP(GetMaxGfxclkFrequency,               PPSMC_MSG_GetMaxGfxDpmFreq,                0),
123 	MSG_MAP(SetSoftMinGfxclk,                    PPSMC_MSG_SetSoftMinGfxClk,                0),
124 	MSG_MAP(SetSoftMaxGfxClk,                    PPSMC_MSG_SetSoftMaxGfxClk,                0),
125 };
126 
127 static const struct cmn2asic_mapping smu_v13_0_6_clk_map[SMU_CLK_COUNT] = {
128 	CLK_MAP(SOCCLK, PPCLK_SOCCLK),
129 	CLK_MAP(FCLK, PPCLK_FCLK),
130 	CLK_MAP(UCLK, PPCLK_UCLK),
131 	CLK_MAP(MCLK, PPCLK_UCLK),
132 	CLK_MAP(DCLK, PPCLK_DCLK),
133 	CLK_MAP(VCLK, PPCLK_VCLK),
134 	CLK_MAP(LCLK, PPCLK_LCLK),
135 };
136 
137 static const struct cmn2asic_mapping smu_v13_0_6_feature_mask_map[SMU_FEATURE_COUNT] = {
138 	SMU_13_0_6_FEA_MAP(SMU_FEATURE_DATA_CALCULATIONS_BIT, 		FEATURE_DATA_CALCULATION),
139 	SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_GFXCLK_BIT, 			FEATURE_DPM_GFXCLK),
140 	SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_UCLK_BIT, 			FEATURE_DPM_UCLK),
141 	SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_SOCCLK_BIT, 			FEATURE_DPM_SOCCLK),
142 	SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_FCLK_BIT, 			FEATURE_DPM_FCLK),
143 	SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_LCLK_BIT, 			FEATURE_DPM_LCLK),
144 	SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_VCLK_BIT,			FEATURE_DPM_VCN),
145 	SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_DCLK_BIT,			FEATURE_DPM_VCN),
146 	SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_XGMI_BIT, 			FEATURE_DPM_XGMI),
147 	SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_GFXCLK_BIT, 			FEATURE_DS_GFXCLK),
148 	SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_SOCCLK_BIT, 			FEATURE_DS_SOCCLK),
149 	SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_LCLK_BIT, 			FEATURE_DS_LCLK),
150 	SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_FCLK_BIT, 			FEATURE_DS_FCLK),
151 	SMU_13_0_6_FEA_MAP(SMU_FEATURE_VCN_DPM_BIT, 			FEATURE_DPM_VCN),
152 	SMU_13_0_6_FEA_MAP(SMU_FEATURE_PPT_BIT, 			FEATURE_PPT),
153 	SMU_13_0_6_FEA_MAP(SMU_FEATURE_TDC_BIT, 			FEATURE_TDC),
154 	SMU_13_0_6_FEA_MAP(SMU_FEATURE_APCC_DFLL_BIT, 			FEATURE_APCC_DFLL),
155 	SMU_13_0_6_FEA_MAP(SMU_FEATURE_MP1_CG_BIT, 			FEATURE_SMU_CG),
156 	SMU_13_0_6_FEA_MAP(SMU_FEATURE_GFXOFF_BIT, 			FEATURE_GFXOFF),
157 	SMU_13_0_6_FEA_MAP(SMU_FEATURE_FW_CTF_BIT, 			FEATURE_FW_CTF),
158 	SMU_13_0_6_FEA_MAP(SMU_FEATURE_THERMAL_BIT, 			FEATURE_THERMAL),
159 	SMU_13_0_6_FEA_MAP(SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT,	FEATURE_XGMI_PER_LINK_PWR_DOWN),
160 	SMU_13_0_6_FEA_MAP(SMU_FEATURE_DF_CSTATE_BIT, 			FEATURE_DF_CSTATE),
161 };
162 
163 #define TABLE_PMSTATUSLOG             0
164 #define TABLE_SMU_METRICS             1
165 #define TABLE_I2C_COMMANDS            2
166 #define TABLE_COUNT                   3
167 
168 static const struct cmn2asic_mapping smu_v13_0_6_table_map[SMU_TABLE_COUNT] = {
169 	TAB_MAP(PMSTATUSLOG),
170 	TAB_MAP(SMU_METRICS),
171 	TAB_MAP(I2C_COMMANDS),
172 };
173 
174 #define THROTTLER_PROCHOT_GFX_BIT  0
175 #define THROTTLER_PPT_BIT 1
176 #define THROTTLER_TEMP_SOC_BIT 2
177 #define THROTTLER_TEMP_VR_GFX_BIT 3
178 #define THROTTLER_TEMP_HBM_BIT 4
179 
180 static const uint8_t smu_v13_0_6_throttler_map[] = {
181 	[THROTTLER_PPT_BIT]		= (SMU_THROTTLER_PPT0_BIT),
182 	[THROTTLER_TEMP_SOC_BIT]	= (SMU_THROTTLER_TEMP_GPU_BIT),
183 	[THROTTLER_TEMP_HBM_BIT]	= (SMU_THROTTLER_TEMP_MEM_BIT),
184 	[THROTTLER_TEMP_VR_GFX_BIT]	= (SMU_THROTTLER_TEMP_VR_GFX_BIT),
185 	[THROTTLER_PROCHOT_GFX_BIT]	= (SMU_THROTTLER_PROCHOT_GFX_BIT),
186 };
187 
188 struct PPTable_t {
189 	uint32_t MaxSocketPowerLimit;
190 	uint32_t MaxGfxclkFrequency;
191 	uint32_t MinGfxclkFrequency;
192 	uint32_t FclkFrequencyTable[4];
193 	uint32_t UclkFrequencyTable[4];
194 	uint32_t SocclkFrequencyTable[4];
195 	uint32_t VclkFrequencyTable[4];
196 	uint32_t DclkFrequencyTable[4];
197 	uint32_t LclkFrequencyTable[4];
198 	uint32_t MaxLclkDpmRange;
199 	uint32_t MinLclkDpmRange;
200 	bool Init;
201 };
202 
203 #define SMUQ10_TO_UINT(x) ((x) >> 10)
204 
205 struct smu_v13_0_6_dpm_map {
206 	enum smu_clk_type clk_type;
207 	uint32_t feature_num;
208 	struct smu_13_0_dpm_table *dpm_table;
209 	uint32_t *freq_table;
210 };
211 
212 static int smu_v13_0_6_tables_init(struct smu_context *smu)
213 {
214 	struct smu_table_context *smu_table = &smu->smu_table;
215 	struct smu_table *tables = smu_table->tables;
216 	struct amdgpu_device *adev = smu->adev;
217 
218 	if (!(adev->flags & AMD_IS_APU))
219 		SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE,
220 			       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
221 
222 	SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(MetricsTable_t),
223 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
224 
225 	SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
226 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
227 
228 	smu_table->metrics_table = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
229 	if (!smu_table->metrics_table)
230 		return -ENOMEM;
231 	smu_table->metrics_time = 0;
232 
233 	smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
234 	smu_table->gpu_metrics_table =
235 		kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
236 	if (!smu_table->gpu_metrics_table) {
237 		kfree(smu_table->metrics_table);
238 		return -ENOMEM;
239 	}
240 
241 	smu_table->driver_pptable =
242 		kzalloc(sizeof(struct PPTable_t), GFP_KERNEL);
243 	if (!smu_table->driver_pptable) {
244 		kfree(smu_table->metrics_table);
245 		kfree(smu_table->gpu_metrics_table);
246 		return -ENOMEM;
247 	}
248 
249 	return 0;
250 }
251 
252 static int smu_v13_0_6_allocate_dpm_context(struct smu_context *smu)
253 {
254 	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
255 
256 	smu_dpm->dpm_context =
257 		kzalloc(sizeof(struct smu_13_0_dpm_context), GFP_KERNEL);
258 	if (!smu_dpm->dpm_context)
259 		return -ENOMEM;
260 	smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context);
261 
262 	return 0;
263 }
264 
265 static int smu_v13_0_6_init_smc_tables(struct smu_context *smu)
266 {
267 	int ret = 0;
268 
269 	ret = smu_v13_0_6_tables_init(smu);
270 	if (ret)
271 		return ret;
272 
273 	ret = smu_v13_0_6_allocate_dpm_context(smu);
274 
275 	return ret;
276 }
277 
278 static int smu_v13_0_6_get_allowed_feature_mask(struct smu_context *smu,
279 						uint32_t *feature_mask,
280 						uint32_t num)
281 {
282 	if (num > 2)
283 		return -EINVAL;
284 
285 	/* pptable will handle the features to enable */
286 	memset(feature_mask, 0xFF, sizeof(uint32_t) * num);
287 
288 	return 0;
289 }
290 
291 static int smu_v13_0_6_get_metrics_table(struct smu_context *smu,
292 					 void *metrics_table, bool bypass_cache)
293 {
294 	struct smu_table_context *smu_table = &smu->smu_table;
295 	uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size;
296 	struct smu_table *table = &smu_table->driver_table;
297 	int ret;
298 
299 	if (bypass_cache || !smu_table->metrics_time ||
300 	    time_after(jiffies,
301 		       smu_table->metrics_time + msecs_to_jiffies(1))) {
302 		ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMetricsTable, NULL);
303 		if (ret) {
304 			dev_info(smu->adev->dev,
305 				 "Failed to export SMU metrics table!\n");
306 			return ret;
307 		}
308 
309 		amdgpu_asic_invalidate_hdp(smu->adev, NULL);
310 		memcpy(smu_table->metrics_table, table->cpu_addr, table_size);
311 
312 		smu_table->metrics_time = jiffies;
313 	}
314 
315 	if (metrics_table)
316 		memcpy(metrics_table, smu_table->metrics_table, table_size);
317 
318 	return 0;
319 }
320 
321 static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
322 {
323 	struct smu_table_context *smu_table = &smu->smu_table;
324 	MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
325 	struct PPTable_t *pptable =
326 		(struct PPTable_t *)smu_table->driver_pptable;
327 	int ret;
328 	int i;
329 
330 	/* Store one-time values in driver PPTable */
331 	if (!pptable->Init) {
332 		ret = smu_v13_0_6_get_metrics_table(smu, NULL, false);
333 		if (ret)
334 			return ret;
335 
336 		pptable->MaxSocketPowerLimit =
337 			SMUQ10_TO_UINT(metrics->MaxSocketPowerLimit);
338 		pptable->MaxGfxclkFrequency =
339 			SMUQ10_TO_UINT(metrics->MaxGfxclkFrequency);
340 		pptable->MinGfxclkFrequency =
341 			SMUQ10_TO_UINT(metrics->MinGfxclkFrequency);
342 
343 		for (i = 0; i < 4; ++i) {
344 			pptable->FclkFrequencyTable[i] =
345 				SMUQ10_TO_UINT(metrics->FclkFrequencyTable[i]);
346 			pptable->UclkFrequencyTable[i] =
347 				SMUQ10_TO_UINT(metrics->UclkFrequencyTable[i]);
348 			pptable->SocclkFrequencyTable[i] = SMUQ10_TO_UINT(
349 				metrics->SocclkFrequencyTable[i]);
350 			pptable->VclkFrequencyTable[i] =
351 				SMUQ10_TO_UINT(metrics->VclkFrequencyTable[i]);
352 			pptable->DclkFrequencyTable[i] =
353 				SMUQ10_TO_UINT(metrics->DclkFrequencyTable[i]);
354 			pptable->LclkFrequencyTable[i] =
355 				SMUQ10_TO_UINT(metrics->LclkFrequencyTable[i]);
356 		}
357 
358 		pptable->Init = true;
359 	}
360 
361 	return 0;
362 }
363 
364 static int smu_v13_0_6_get_dpm_ultimate_freq(struct smu_context *smu,
365 					     enum smu_clk_type clk_type,
366 					     uint32_t *min, uint32_t *max)
367 {
368 	struct smu_table_context *smu_table = &smu->smu_table;
369 	struct PPTable_t *pptable =
370 		(struct PPTable_t *)smu_table->driver_pptable;
371 	uint32_t clock_limit = 0, param;
372 	int ret = 0, clk_id = 0;
373 
374 	if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) {
375 		switch (clk_type) {
376 		case SMU_MCLK:
377 		case SMU_UCLK:
378 			if (pptable->Init)
379 				clock_limit = pptable->UclkFrequencyTable[0];
380 			break;
381 		case SMU_GFXCLK:
382 		case SMU_SCLK:
383 			if (pptable->Init)
384 				clock_limit = pptable->MinGfxclkFrequency;
385 			break;
386 		case SMU_SOCCLK:
387 			if (pptable->Init)
388 				clock_limit = pptable->UclkFrequencyTable[0];
389 			break;
390 		case SMU_FCLK:
391 			if (pptable->Init)
392 				clock_limit = pptable->FclkFrequencyTable[0];
393 			break;
394 		case SMU_VCLK:
395 			if (pptable->Init)
396 				clock_limit = pptable->VclkFrequencyTable[0];
397 			break;
398 		case SMU_DCLK:
399 			if (pptable->Init)
400 				clock_limit = pptable->DclkFrequencyTable[0];
401 			break;
402 		default:
403 			break;
404 		}
405 
406 		if (min)
407 			*min = clock_limit;
408 
409 		if (max)
410 			*max = clock_limit;
411 
412 		return 0;
413 	}
414 
415 	if (!(clk_type == SMU_GFXCLK || clk_type == SMU_SCLK)) {
416 		clk_id = smu_cmn_to_asic_specific_index(
417 			smu, CMN2ASIC_MAPPING_CLK, clk_type);
418 		if (clk_id < 0) {
419 			ret = -EINVAL;
420 			goto failed;
421 		}
422 		param = (clk_id & 0xffff) << 16;
423 	}
424 
425 	if (max) {
426 		if (clk_type == SMU_GFXCLK || clk_type == SMU_SCLK)
427 			ret = smu_cmn_send_smc_msg(
428 				smu, SMU_MSG_GetMaxGfxclkFrequency, max);
429 		else
430 			ret = smu_cmn_send_smc_msg_with_param(
431 				smu, SMU_MSG_GetMaxDpmFreq, param, max);
432 		if (ret)
433 			goto failed;
434 	}
435 
436 	if (min) {
437 		if (clk_type == SMU_GFXCLK || clk_type == SMU_SCLK)
438 			ret = smu_cmn_send_smc_msg(
439 				smu, SMU_MSG_GetMinGfxclkFrequency, min);
440 		else
441 			ret = smu_cmn_send_smc_msg_with_param(
442 				smu, SMU_MSG_GetMinDpmFreq, param, min);
443 	}
444 
445 failed:
446 	return ret;
447 }
448 
449 static int smu_v13_0_6_get_dpm_level_count(struct smu_context *smu,
450 					  enum smu_clk_type clk_type,
451 					  uint32_t *levels)
452 {
453 	int ret;
454 
455 	ret = smu_v13_0_get_dpm_freq_by_index(smu, clk_type, 0xff, levels);
456 	if (!ret)
457 		++(*levels);
458 
459 	return ret;
460 }
461 
462 static int smu_v13_0_6_set_default_dpm_table(struct smu_context *smu)
463 {
464 	struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
465 	struct smu_table_context *smu_table = &smu->smu_table;
466 	struct smu_13_0_dpm_table *dpm_table = NULL;
467 	struct PPTable_t *pptable =
468 		(struct PPTable_t *)smu_table->driver_pptable;
469 	uint32_t gfxclkmin, gfxclkmax, levels;
470 	int ret = 0, i, j;
471 	struct smu_v13_0_6_dpm_map dpm_map[] = {
472 		{ SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT,
473 		  &dpm_context->dpm_tables.soc_table,
474 		  pptable->SocclkFrequencyTable },
475 		{ SMU_UCLK, SMU_FEATURE_DPM_UCLK_BIT,
476 		  &dpm_context->dpm_tables.uclk_table,
477 		  pptable->UclkFrequencyTable },
478 		{ SMU_FCLK, SMU_FEATURE_DPM_FCLK_BIT,
479 		  &dpm_context->dpm_tables.fclk_table,
480 		  pptable->FclkFrequencyTable },
481 		{ SMU_VCLK, SMU_FEATURE_DPM_VCLK_BIT,
482 		  &dpm_context->dpm_tables.vclk_table,
483 		  pptable->VclkFrequencyTable },
484 		{ SMU_DCLK, SMU_FEATURE_DPM_DCLK_BIT,
485 		  &dpm_context->dpm_tables.dclk_table,
486 		  pptable->DclkFrequencyTable },
487 	};
488 
489 	smu_v13_0_6_setup_driver_pptable(smu);
490 
491 	/* gfxclk dpm table setup */
492 	dpm_table = &dpm_context->dpm_tables.gfx_table;
493 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
494 		/* In the case of gfxclk, only fine-grained dpm is honored.
495 		 * Get min/max values from FW.
496 		 */
497 		ret = smu_v13_0_6_get_dpm_ultimate_freq(smu, SMU_GFXCLK,
498 							&gfxclkmin, &gfxclkmax);
499 		if (ret)
500 			return ret;
501 
502 		dpm_table->count = 2;
503 		dpm_table->dpm_levels[0].value = gfxclkmin;
504 		dpm_table->dpm_levels[0].enabled = true;
505 		dpm_table->dpm_levels[1].value = gfxclkmax;
506 		dpm_table->dpm_levels[1].enabled = true;
507 		dpm_table->min = dpm_table->dpm_levels[0].value;
508 		dpm_table->max = dpm_table->dpm_levels[1].value;
509 	} else {
510 		dpm_table->count = 1;
511 		dpm_table->dpm_levels[0].value = pptable->MinGfxclkFrequency;
512 		dpm_table->dpm_levels[0].enabled = true;
513 		dpm_table->min = dpm_table->dpm_levels[0].value;
514 		dpm_table->max = dpm_table->dpm_levels[0].value;
515 	}
516 
517 	for (j = 0; j < ARRAY_SIZE(dpm_map); j++) {
518 		dpm_table = dpm_map[j].dpm_table;
519 		levels = 1;
520 		if (smu_cmn_feature_is_enabled(smu, dpm_map[j].feature_num)) {
521 			ret = smu_v13_0_6_get_dpm_level_count(
522 				smu, dpm_map[j].clk_type, &levels);
523 			if (ret)
524 				return ret;
525 		}
526 		dpm_table->count = levels;
527 		for (i = 0; i < dpm_table->count; ++i) {
528 			dpm_table->dpm_levels[i].value =
529 				dpm_map[j].freq_table[i];
530 			dpm_table->dpm_levels[i].enabled = true;
531 
532 		}
533 		dpm_table->min = dpm_table->dpm_levels[0].value;
534 		dpm_table->max = dpm_table->dpm_levels[levels - 1].value;
535 
536 	}
537 
538 	return 0;
539 }
540 
541 static int smu_v13_0_6_setup_pptable(struct smu_context *smu)
542 {
543 	struct smu_table_context *table_context = &smu->smu_table;
544 
545 	/* TODO: PPTable is not available.
546 	 * 1) Find an alternate way to get 'PPTable values' here.
547 	 * 2) Check if there is SW CTF
548 	 */
549 	table_context->thermal_controller_type = 0;
550 
551 	return 0;
552 }
553 
554 static int smu_v13_0_6_check_fw_status(struct smu_context *smu)
555 {
556 	struct amdgpu_device *adev = smu->adev;
557 	uint32_t mp1_fw_flags;
558 
559 	mp1_fw_flags =
560 		RREG32_PCIE(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
561 
562 	if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
563 	    MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
564 		return 0;
565 
566 	return -EIO;
567 }
568 
569 static int smu_v13_0_6_populate_umd_state_clk(struct smu_context *smu)
570 {
571 	struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
572 	struct smu_13_0_dpm_table *gfx_table =
573 		&dpm_context->dpm_tables.gfx_table;
574 	struct smu_13_0_dpm_table *mem_table =
575 		&dpm_context->dpm_tables.uclk_table;
576 	struct smu_13_0_dpm_table *soc_table =
577 		&dpm_context->dpm_tables.soc_table;
578 	struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
579 
580 	pstate_table->gfxclk_pstate.min = gfx_table->min;
581 	pstate_table->gfxclk_pstate.peak = gfx_table->max;
582 	pstate_table->gfxclk_pstate.curr.min = gfx_table->min;
583 	pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
584 
585 	pstate_table->uclk_pstate.min = mem_table->min;
586 	pstate_table->uclk_pstate.peak = mem_table->max;
587 	pstate_table->uclk_pstate.curr.min = mem_table->min;
588 	pstate_table->uclk_pstate.curr.max = mem_table->max;
589 
590 	pstate_table->socclk_pstate.min = soc_table->min;
591 	pstate_table->socclk_pstate.peak = soc_table->max;
592 	pstate_table->socclk_pstate.curr.min = soc_table->min;
593 	pstate_table->socclk_pstate.curr.max = soc_table->max;
594 
595 	if (gfx_table->count > SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL &&
596 	    mem_table->count > SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL &&
597 	    soc_table->count > SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL) {
598 		pstate_table->gfxclk_pstate.standard =
599 			gfx_table->dpm_levels[SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL].value;
600 		pstate_table->uclk_pstate.standard =
601 			mem_table->dpm_levels[SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL].value;
602 		pstate_table->socclk_pstate.standard =
603 			soc_table->dpm_levels[SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL].value;
604 	} else {
605 		pstate_table->gfxclk_pstate.standard =
606 			pstate_table->gfxclk_pstate.min;
607 		pstate_table->uclk_pstate.standard =
608 			pstate_table->uclk_pstate.min;
609 		pstate_table->socclk_pstate.standard =
610 			pstate_table->socclk_pstate.min;
611 	}
612 
613 	return 0;
614 }
615 
616 static int smu_v13_0_6_get_clk_table(struct smu_context *smu,
617 				     struct pp_clock_levels_with_latency *clocks,
618 				     struct smu_13_0_dpm_table *dpm_table)
619 {
620 	int i, count;
621 
622 	count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS :
623 						      dpm_table->count;
624 	clocks->num_levels = count;
625 
626 	for (i = 0; i < count; i++) {
627 		clocks->data[i].clocks_in_khz =
628 			dpm_table->dpm_levels[i].value * 1000;
629 		clocks->data[i].latency_in_us = 0;
630 	}
631 
632 	return 0;
633 }
634 
635 static int smu_v13_0_6_freqs_in_same_level(int32_t frequency1,
636 					   int32_t frequency2)
637 {
638 	return (abs(frequency1 - frequency2) <= EPSILON);
639 }
640 
641 static uint32_t smu_v13_0_6_get_throttler_status(struct smu_context *smu,
642 						 MetricsTable_t *metrics)
643 {
644 	uint32_t  throttler_status = 0;
645 
646 	throttler_status |= metrics->ProchotResidencyAcc > 0 ? 1U << THROTTLER_PROCHOT_GFX_BIT : 0;
647 	throttler_status |= metrics->PptResidencyAcc > 0 ? 1U << THROTTLER_PPT_BIT : 0;
648 	throttler_status |= metrics->SocketThmResidencyAcc > 0 ?  1U << THROTTLER_TEMP_SOC_BIT : 0;
649 	throttler_status |= metrics->VrThmResidencyAcc > 0 ? 1U << THROTTLER_TEMP_VR_GFX_BIT : 0;
650 	throttler_status |= metrics->HbmThmResidencyAcc > 0 ? 1U << THROTTLER_TEMP_HBM_BIT : 0;
651 
652 	return throttler_status;
653 }
654 
655 static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
656 					    MetricsMember_t member,
657 					    uint32_t *value)
658 {
659 	struct smu_table_context *smu_table = &smu->smu_table;
660 	MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
661 	int ret = 0;
662 
663 	ret = smu_v13_0_6_get_metrics_table(smu, NULL, false);
664 	if (ret)
665 		return ret;
666 
667 	/* For clocks with multiple instances, only report the first one */
668 	switch (member) {
669 	case METRICS_CURR_GFXCLK:
670 	case METRICS_AVERAGE_GFXCLK:
671 		*value = 0;
672 		break;
673 	case METRICS_CURR_SOCCLK:
674 	case METRICS_AVERAGE_SOCCLK:
675 		*value = SMUQ10_TO_UINT(metrics->SocclkFrequency[0]);
676 		break;
677 	case METRICS_CURR_UCLK:
678 	case METRICS_AVERAGE_UCLK:
679 		*value = SMUQ10_TO_UINT(metrics->UclkFrequency);
680 		break;
681 	case METRICS_CURR_VCLK:
682 		*value = SMUQ10_TO_UINT(metrics->VclkFrequency[0]);
683 		break;
684 	case METRICS_CURR_DCLK:
685 		*value = SMUQ10_TO_UINT(metrics->DclkFrequency[0]);
686 		break;
687 	case METRICS_CURR_FCLK:
688 		*value = SMUQ10_TO_UINT(metrics->FclkFrequency);
689 		break;
690 	case METRICS_AVERAGE_GFXACTIVITY:
691 		*value = SMUQ10_TO_UINT(metrics->SocketGfxBusy);
692 		break;
693 	case METRICS_AVERAGE_MEMACTIVITY:
694 		*value = SMUQ10_TO_UINT(metrics->DramBandwidthUtilization);
695 		break;
696 	case METRICS_AVERAGE_SOCKETPOWER:
697 		*value = SMUQ10_TO_UINT(metrics->SocketPower) << 8;
698 		break;
699 	case METRICS_TEMPERATURE_HOTSPOT:
700 		*value = SMUQ10_TO_UINT(metrics->MaxSocketTemperature);
701 		break;
702 	case METRICS_TEMPERATURE_MEM:
703 		*value = SMUQ10_TO_UINT(metrics->MaxHbmTemperature);
704 		break;
705 	/* This is the max of all VRs and not just SOC VR.
706 	 * No need to define another data type for the same.
707 	 */
708 	case METRICS_TEMPERATURE_VRSOC:
709 		*value = SMUQ10_TO_UINT(metrics->MaxVrTemperature);
710 		break;
711 	case METRICS_THROTTLER_STATUS:
712 		*value = smu_v13_0_6_get_throttler_status(smu, metrics);
713 		break;
714 	default:
715 		*value = UINT_MAX;
716 		break;
717 	}
718 
719 	return ret;
720 }
721 
722 static int smu_v13_0_6_get_current_clk_freq_by_table(struct smu_context *smu,
723 						     enum smu_clk_type clk_type,
724 						     uint32_t *value)
725 {
726 	MetricsMember_t member_type;
727 
728 	if (!value)
729 		return -EINVAL;
730 
731 	switch (clk_type) {
732 	case SMU_GFXCLK:
733 		member_type = METRICS_CURR_GFXCLK;
734 		break;
735 	case SMU_UCLK:
736 		member_type = METRICS_CURR_UCLK;
737 		break;
738 	case SMU_SOCCLK:
739 		member_type = METRICS_CURR_SOCCLK;
740 		break;
741 	case SMU_VCLK:
742 		member_type = METRICS_CURR_VCLK;
743 		break;
744 	case SMU_DCLK:
745 		member_type = METRICS_CURR_DCLK;
746 		break;
747 	case SMU_FCLK:
748 		member_type = METRICS_CURR_FCLK;
749 		break;
750 	default:
751 		return -EINVAL;
752 	}
753 
754 	return smu_v13_0_6_get_smu_metrics_data(smu, member_type, value);
755 }
756 
757 static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
758 					enum smu_clk_type type, char *buf)
759 {
760 	int i, now, size = 0;
761 	int ret = 0;
762 	struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
763 	struct pp_clock_levels_with_latency clocks;
764 	struct smu_13_0_dpm_table *single_dpm_table;
765 	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
766 	struct smu_13_0_dpm_context *dpm_context = NULL;
767 	uint32_t display_levels;
768 	uint32_t freq_values[3] = { 0 };
769 	uint32_t min_clk, max_clk;
770 
771 	smu_cmn_get_sysfs_buf(&buf, &size);
772 
773 	if (amdgpu_ras_intr_triggered()) {
774 		size += sysfs_emit_at(buf, size, "unavailable\n");
775 		return size;
776 	}
777 
778 	dpm_context = smu_dpm->dpm_context;
779 
780 	switch (type) {
781 	case SMU_OD_SCLK:
782 		size += sysfs_emit_at(buf, size, "%s:\n", "GFXCLK");
783 		fallthrough;
784 	case SMU_SCLK:
785 		ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_GFXCLK,
786 								&now);
787 		if (ret) {
788 			dev_err(smu->adev->dev,
789 				"Attempt to get current gfx clk Failed!");
790 			return ret;
791 		}
792 
793 		single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
794 		ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
795 		if (ret) {
796 			dev_err(smu->adev->dev,
797 				"Attempt to get gfx clk levels Failed!");
798 			return ret;
799 		}
800 
801 		display_levels = clocks.num_levels;
802 
803 		min_clk = pstate_table->gfxclk_pstate.curr.min;
804 		max_clk = pstate_table->gfxclk_pstate.curr.max;
805 
806 		freq_values[0] = min_clk;
807 		freq_values[1] = max_clk;
808 
809 		/* fine-grained dpm has only 2 levels */
810 		if (now > min_clk && now < max_clk) {
811 			display_levels = clocks.num_levels + 1;
812 			freq_values[2] = max_clk;
813 			freq_values[1] = now;
814 		}
815 
816 		/*
817 		 * For DPM disabled case, there will be only one clock level.
818 		 * And it's safe to assume that is always the current clock.
819 		 */
820 		if (display_levels == clocks.num_levels) {
821 			for (i = 0; i < clocks.num_levels; i++)
822 				size += sysfs_emit_at(
823 					buf, size, "%d: %uMhz %s\n", i,
824 					freq_values[i],
825 					(clocks.num_levels == 1) ?
826 						"*" :
827 						(smu_v13_0_6_freqs_in_same_level(
828 							 freq_values[i], now) ?
829 							 "*" :
830 							 ""));
831 		} else {
832 			for (i = 0; i < display_levels; i++)
833 				size += sysfs_emit_at(buf, size,
834 						      "%d: %uMhz %s\n", i,
835 						      freq_values[i],
836 						      i == 1 ? "*" : "");
837 		}
838 
839 		break;
840 
841 	case SMU_OD_MCLK:
842 		size += sysfs_emit_at(buf, size, "%s:\n", "MCLK");
843 		fallthrough;
844 	case SMU_MCLK:
845 		ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_UCLK,
846 								&now);
847 		if (ret) {
848 			dev_err(smu->adev->dev,
849 				"Attempt to get current mclk Failed!");
850 			return ret;
851 		}
852 
853 		single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
854 		ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
855 		if (ret) {
856 			dev_err(smu->adev->dev,
857 				"Attempt to get memory clk levels Failed!");
858 			return ret;
859 		}
860 
861 		for (i = 0; i < clocks.num_levels; i++)
862 			size += sysfs_emit_at(
863 				buf, size, "%d: %uMhz %s\n", i,
864 				clocks.data[i].clocks_in_khz / 1000,
865 				(clocks.num_levels == 1) ?
866 					"*" :
867 					(smu_v13_0_6_freqs_in_same_level(
868 						 clocks.data[i].clocks_in_khz /
869 							 1000,
870 						 now) ?
871 						 "*" :
872 						 ""));
873 		break;
874 
875 	case SMU_SOCCLK:
876 		ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_SOCCLK,
877 								&now);
878 		if (ret) {
879 			dev_err(smu->adev->dev,
880 				"Attempt to get current socclk Failed!");
881 			return ret;
882 		}
883 
884 		single_dpm_table = &(dpm_context->dpm_tables.soc_table);
885 		ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
886 		if (ret) {
887 			dev_err(smu->adev->dev,
888 				"Attempt to get socclk levels Failed!");
889 			return ret;
890 		}
891 
892 		for (i = 0; i < clocks.num_levels; i++)
893 			size += sysfs_emit_at(
894 				buf, size, "%d: %uMhz %s\n", i,
895 				clocks.data[i].clocks_in_khz / 1000,
896 				(clocks.num_levels == 1) ?
897 					"*" :
898 					(smu_v13_0_6_freqs_in_same_level(
899 						 clocks.data[i].clocks_in_khz /
900 							 1000,
901 						 now) ?
902 						 "*" :
903 						 ""));
904 		break;
905 
906 	case SMU_FCLK:
907 		ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_FCLK,
908 								&now);
909 		if (ret) {
910 			dev_err(smu->adev->dev,
911 				"Attempt to get current fclk Failed!");
912 			return ret;
913 		}
914 
915 		single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
916 		ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
917 		if (ret) {
918 			dev_err(smu->adev->dev,
919 				"Attempt to get fclk levels Failed!");
920 			return ret;
921 		}
922 
923 		for (i = 0; i < single_dpm_table->count; i++)
924 			size += sysfs_emit_at(
925 				buf, size, "%d: %uMhz %s\n", i,
926 				single_dpm_table->dpm_levels[i].value,
927 				(clocks.num_levels == 1) ?
928 					"*" :
929 					(smu_v13_0_6_freqs_in_same_level(
930 						 clocks.data[i].clocks_in_khz /
931 							 1000,
932 						 now) ?
933 						 "*" :
934 						 ""));
935 		break;
936 
937 	case SMU_VCLK:
938 		ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_VCLK,
939 								&now);
940 		if (ret) {
941 			dev_err(smu->adev->dev,
942 				"Attempt to get current vclk Failed!");
943 			return ret;
944 		}
945 
946 		single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
947 		ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
948 		if (ret) {
949 			dev_err(smu->adev->dev,
950 				"Attempt to get vclk levels Failed!");
951 			return ret;
952 		}
953 
954 		for (i = 0; i < single_dpm_table->count; i++)
955 			size += sysfs_emit_at(
956 				buf, size, "%d: %uMhz %s\n", i,
957 				single_dpm_table->dpm_levels[i].value,
958 				(clocks.num_levels == 1) ?
959 					"*" :
960 					(smu_v13_0_6_freqs_in_same_level(
961 						 clocks.data[i].clocks_in_khz /
962 							 1000,
963 						 now) ?
964 						 "*" :
965 						 ""));
966 		break;
967 
968 	case SMU_DCLK:
969 		ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_DCLK,
970 							       &now);
971 		if (ret) {
972 			dev_err(smu->adev->dev,
973 				"Attempt to get current dclk Failed!");
974 			return ret;
975 		}
976 
977 		single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
978 		ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
979 		if (ret) {
980 			dev_err(smu->adev->dev,
981 				"Attempt to get dclk levels Failed!");
982 			return ret;
983 		}
984 
985 		for (i = 0; i < single_dpm_table->count; i++)
986 			size += sysfs_emit_at(
987 				buf, size, "%d: %uMhz %s\n", i,
988 				single_dpm_table->dpm_levels[i].value,
989 				(clocks.num_levels == 1) ?
990 					"*" :
991 					(smu_v13_0_6_freqs_in_same_level(
992 						 clocks.data[i].clocks_in_khz /
993 							 1000,
994 						 now) ?
995 						 "*" :
996 						 ""));
997 		break;
998 
999 	default:
1000 		break;
1001 	}
1002 
1003 	return size;
1004 }
1005 
1006 static int smu_v13_0_6_upload_dpm_level(struct smu_context *smu, bool max,
1007 					uint32_t feature_mask, uint32_t level)
1008 {
1009 	struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
1010 	uint32_t freq;
1011 	int ret = 0;
1012 
1013 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
1014 	    (feature_mask & FEATURE_MASK(FEATURE_DPM_GFXCLK))) {
1015 		freq = dpm_context->dpm_tables.gfx_table.dpm_levels[level].value;
1016 		ret = smu_cmn_send_smc_msg_with_param(
1017 			smu,
1018 			(max ? SMU_MSG_SetSoftMaxGfxClk :
1019 			       SMU_MSG_SetSoftMinGfxclk),
1020 			freq & 0xffff, NULL);
1021 		if (ret) {
1022 			dev_err(smu->adev->dev,
1023 				"Failed to set soft %s gfxclk !\n",
1024 				max ? "max" : "min");
1025 			return ret;
1026 		}
1027 	}
1028 
1029 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
1030 	    (feature_mask & FEATURE_MASK(FEATURE_DPM_UCLK))) {
1031 		freq = dpm_context->dpm_tables.uclk_table.dpm_levels[level]
1032 			       .value;
1033 		ret = smu_cmn_send_smc_msg_with_param(
1034 			smu,
1035 			(max ? SMU_MSG_SetSoftMaxByFreq :
1036 			       SMU_MSG_SetSoftMinByFreq),
1037 			(PPCLK_UCLK << 16) | (freq & 0xffff), NULL);
1038 		if (ret) {
1039 			dev_err(smu->adev->dev,
1040 				"Failed to set soft %s memclk !\n",
1041 				max ? "max" : "min");
1042 			return ret;
1043 		}
1044 	}
1045 
1046 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT) &&
1047 	    (feature_mask & FEATURE_MASK(FEATURE_DPM_SOCCLK))) {
1048 		freq = dpm_context->dpm_tables.soc_table.dpm_levels[level].value;
1049 		ret = smu_cmn_send_smc_msg_with_param(
1050 			smu,
1051 			(max ? SMU_MSG_SetSoftMaxByFreq :
1052 			       SMU_MSG_SetSoftMinByFreq),
1053 			(PPCLK_SOCCLK << 16) | (freq & 0xffff), NULL);
1054 		if (ret) {
1055 			dev_err(smu->adev->dev,
1056 				"Failed to set soft %s socclk !\n",
1057 				max ? "max" : "min");
1058 			return ret;
1059 		}
1060 	}
1061 
1062 	return ret;
1063 }
1064 
1065 static int smu_v13_0_6_force_clk_levels(struct smu_context *smu,
1066 					enum smu_clk_type type, uint32_t mask)
1067 {
1068 	struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
1069 	struct smu_13_0_dpm_table *single_dpm_table = NULL;
1070 	uint32_t soft_min_level, soft_max_level;
1071 	int ret = 0;
1072 
1073 	soft_min_level = mask ? (ffs(mask) - 1) : 0;
1074 	soft_max_level = mask ? (fls(mask) - 1) : 0;
1075 
1076 	switch (type) {
1077 	case SMU_SCLK:
1078 		single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
1079 		if (soft_max_level >= single_dpm_table->count) {
1080 			dev_err(smu->adev->dev,
1081 				"Clock level specified %d is over max allowed %d\n",
1082 				soft_max_level, single_dpm_table->count - 1);
1083 			ret = -EINVAL;
1084 			break;
1085 		}
1086 
1087 		ret = smu_v13_0_6_upload_dpm_level(
1088 			smu, false, FEATURE_MASK(FEATURE_DPM_GFXCLK),
1089 			soft_min_level);
1090 		if (ret) {
1091 			dev_err(smu->adev->dev,
1092 				"Failed to upload boot level to lowest!\n");
1093 			break;
1094 		}
1095 
1096 		ret = smu_v13_0_6_upload_dpm_level(
1097 			smu, true, FEATURE_MASK(FEATURE_DPM_GFXCLK),
1098 			soft_max_level);
1099 		if (ret)
1100 			dev_err(smu->adev->dev,
1101 				"Failed to upload dpm max level to highest!\n");
1102 
1103 		break;
1104 
1105 	case SMU_MCLK:
1106 	case SMU_SOCCLK:
1107 	case SMU_FCLK:
1108 		/*
1109 		 * Should not arrive here since smu_13_0_6 does not
1110 		 * support mclk/socclk/fclk softmin/softmax settings
1111 		 */
1112 		ret = -EINVAL;
1113 		break;
1114 
1115 	default:
1116 		break;
1117 	}
1118 
1119 	return ret;
1120 }
1121 
1122 static int smu_v13_0_6_get_current_activity_percent(struct smu_context *smu,
1123 						    enum amd_pp_sensors sensor,
1124 						    uint32_t *value)
1125 {
1126 	int ret = 0;
1127 
1128 	if (!value)
1129 		return -EINVAL;
1130 
1131 	switch (sensor) {
1132 	case AMDGPU_PP_SENSOR_GPU_LOAD:
1133 		ret = smu_v13_0_6_get_smu_metrics_data(
1134 			smu, METRICS_AVERAGE_GFXACTIVITY, value);
1135 		break;
1136 	case AMDGPU_PP_SENSOR_MEM_LOAD:
1137 		ret = smu_v13_0_6_get_smu_metrics_data(
1138 			smu, METRICS_AVERAGE_MEMACTIVITY, value);
1139 		break;
1140 	default:
1141 		dev_err(smu->adev->dev,
1142 			"Invalid sensor for retrieving clock activity\n");
1143 		return -EINVAL;
1144 	}
1145 
1146 	return ret;
1147 }
1148 
1149 static int smu_v13_0_6_get_gpu_power(struct smu_context *smu, uint32_t *value)
1150 {
1151 	if (!value)
1152 		return -EINVAL;
1153 
1154 	return smu_v13_0_6_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER,
1155 					       value);
1156 }
1157 
1158 static int smu_v13_0_6_thermal_get_temperature(struct smu_context *smu,
1159 					       enum amd_pp_sensors sensor,
1160 					       uint32_t *value)
1161 {
1162 	int ret = 0;
1163 
1164 	if (!value)
1165 		return -EINVAL;
1166 
1167 	switch (sensor) {
1168 	case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
1169 		ret = smu_v13_0_6_get_smu_metrics_data(
1170 			smu, METRICS_TEMPERATURE_HOTSPOT, value);
1171 		break;
1172 	case AMDGPU_PP_SENSOR_MEM_TEMP:
1173 		ret = smu_v13_0_6_get_smu_metrics_data(
1174 			smu, METRICS_TEMPERATURE_MEM, value);
1175 		break;
1176 	default:
1177 		dev_err(smu->adev->dev, "Invalid sensor for retrieving temp\n");
1178 		return -EINVAL;
1179 	}
1180 
1181 	return ret;
1182 }
1183 
1184 static int smu_v13_0_6_read_sensor(struct smu_context *smu,
1185 				   enum amd_pp_sensors sensor, void *data,
1186 				   uint32_t *size)
1187 {
1188 	int ret = 0;
1189 
1190 	if (amdgpu_ras_intr_triggered())
1191 		return 0;
1192 
1193 	if (!data || !size)
1194 		return -EINVAL;
1195 
1196 	switch (sensor) {
1197 	case AMDGPU_PP_SENSOR_MEM_LOAD:
1198 	case AMDGPU_PP_SENSOR_GPU_LOAD:
1199 		ret = smu_v13_0_6_get_current_activity_percent(smu, sensor,
1200 							       (uint32_t *)data);
1201 		*size = 4;
1202 		break;
1203 	case AMDGPU_PP_SENSOR_GPU_POWER:
1204 		ret = smu_v13_0_6_get_gpu_power(smu, (uint32_t *)data);
1205 		*size = 4;
1206 		break;
1207 	case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
1208 	case AMDGPU_PP_SENSOR_MEM_TEMP:
1209 		ret = smu_v13_0_6_thermal_get_temperature(smu, sensor,
1210 							  (uint32_t *)data);
1211 		*size = 4;
1212 		break;
1213 	case AMDGPU_PP_SENSOR_GFX_MCLK:
1214 		ret = smu_v13_0_6_get_current_clk_freq_by_table(
1215 			smu, SMU_UCLK, (uint32_t *)data);
1216 		/* the output clock frequency in 10K unit */
1217 		*(uint32_t *)data *= 100;
1218 		*size = 4;
1219 		break;
1220 	case AMDGPU_PP_SENSOR_GFX_SCLK:
1221 		ret = smu_v13_0_6_get_current_clk_freq_by_table(
1222 			smu, SMU_GFXCLK, (uint32_t *)data);
1223 		*(uint32_t *)data *= 100;
1224 		*size = 4;
1225 		break;
1226 	case AMDGPU_PP_SENSOR_VDDGFX:
1227 		ret = smu_v13_0_get_gfx_vdd(smu, (uint32_t *)data);
1228 		*size = 4;
1229 		break;
1230 	default:
1231 		ret = -EOPNOTSUPP;
1232 		break;
1233 	}
1234 
1235 	return ret;
1236 }
1237 
1238 static int smu_v13_0_6_get_power_limit(struct smu_context *smu,
1239 				       uint32_t *current_power_limit,
1240 				       uint32_t *default_power_limit,
1241 				       uint32_t *max_power_limit)
1242 {
1243         struct smu_table_context *smu_table = &smu->smu_table;
1244         struct PPTable_t *pptable =
1245                 (struct PPTable_t *)smu_table->driver_pptable;
1246 	uint32_t power_limit = 0;
1247 	int ret;
1248 
1249 	if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
1250 		if (current_power_limit)
1251 			*current_power_limit = 0;
1252 		if (default_power_limit)
1253 			*default_power_limit = 0;
1254 		if (max_power_limit)
1255 			*max_power_limit = 0;
1256 
1257 		dev_warn(
1258 			smu->adev->dev,
1259 			"PPT feature is not enabled, power values can't be fetched.");
1260 
1261 		return 0;
1262 	}
1263 
1264 	ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetPptLimit, &power_limit);
1265 
1266 	if (ret) {
1267 		dev_err(smu->adev->dev, "Couldn't get PPT limit");
1268 		return -EINVAL;
1269 	}
1270 
1271 	if (current_power_limit)
1272 		*current_power_limit = power_limit;
1273 	if (default_power_limit)
1274 		*default_power_limit = power_limit;
1275 
1276 	if (max_power_limit) {
1277 		*max_power_limit = pptable->MaxSocketPowerLimit;
1278 	}
1279 
1280 	return 0;
1281 }
1282 
1283 static int smu_v13_0_6_set_power_limit(struct smu_context *smu,
1284 				       enum smu_ppt_limit_type limit_type,
1285 				       uint32_t limit)
1286 {
1287 	return smu_v13_0_set_power_limit(smu, limit_type, limit);
1288 }
1289 
1290 static int smu_v13_0_6_system_features_control(struct smu_context *smu,
1291 					       bool enable)
1292 {
1293 	int ret;
1294 
1295 	/* Nothing to be done for APU */
1296 	if (smu->adev->flags & AMD_IS_APU)
1297 		return 0;
1298 
1299 	ret = smu_v13_0_system_features_control(smu, enable);
1300 
1301 	return ret;
1302 }
1303 
1304 static int smu_v13_0_6_set_gfx_soft_freq_limited_range(struct smu_context *smu,
1305 						       uint32_t min,
1306 						       uint32_t max)
1307 {
1308 	int ret;
1309 
1310 	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
1311 					      max & 0xffff, NULL);
1312 	if (ret)
1313 		return ret;
1314 
1315 	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinGfxclk,
1316 					      min & 0xffff, NULL);
1317 
1318 	return ret;
1319 }
1320 
1321 static int smu_v13_0_6_set_performance_level(struct smu_context *smu,
1322 					     enum amd_dpm_forced_level level)
1323 {
1324 	struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
1325 	struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1326 	struct smu_13_0_dpm_table *gfx_table =
1327 		&dpm_context->dpm_tables.gfx_table;
1328 	struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
1329 	int ret;
1330 
1331 	/* Disable determinism if switching to another mode */
1332 	if ((smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) &&
1333 	    (level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)) {
1334 		smu_cmn_send_smc_msg(smu, SMU_MSG_DisableDeterminism, NULL);
1335 		pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
1336 	}
1337 
1338 	switch (level) {
1339 	case AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM:
1340 		return 0;
1341 
1342 	case AMD_DPM_FORCED_LEVEL_AUTO:
1343 		if ((gfx_table->min == pstate_table->gfxclk_pstate.curr.min) &&
1344 		    (gfx_table->max == pstate_table->gfxclk_pstate.curr.max))
1345 			return 0;
1346 
1347 		ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(
1348 			smu, gfx_table->min, gfx_table->max);
1349 		if (ret)
1350 			return ret;
1351 
1352 		pstate_table->gfxclk_pstate.curr.min = gfx_table->min;
1353 		pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
1354 		return 0;
1355 	case AMD_DPM_FORCED_LEVEL_MANUAL:
1356 		return 0;
1357 	default:
1358 		break;
1359 	}
1360 
1361 	return -EINVAL;
1362 }
1363 
1364 static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu,
1365 						   enum smu_clk_type clk_type,
1366 						   uint32_t min, uint32_t max)
1367 {
1368 	struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
1369 	struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1370 	struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
1371 	struct amdgpu_device *adev = smu->adev;
1372 	uint32_t min_clk;
1373 	uint32_t max_clk;
1374 	int ret = 0;
1375 
1376 	if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK)
1377 		return -EINVAL;
1378 
1379 	if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) &&
1380 	    (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM))
1381 		return -EINVAL;
1382 
1383 	if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
1384 		if (min >= max) {
1385 			dev_err(smu->adev->dev,
1386 				"Minimum GFX clk should be less than the maximum allowed clock\n");
1387 			return -EINVAL;
1388 		}
1389 
1390 		if ((min == pstate_table->gfxclk_pstate.curr.min) &&
1391 		    (max == pstate_table->gfxclk_pstate.curr.max))
1392 			return 0;
1393 
1394 		ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(smu, min, max);
1395 		if (!ret) {
1396 			pstate_table->gfxclk_pstate.curr.min = min;
1397 			pstate_table->gfxclk_pstate.curr.max = max;
1398 		}
1399 
1400 		return ret;
1401 	}
1402 
1403 	if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
1404 		if (!max || (max < dpm_context->dpm_tables.gfx_table.min) ||
1405 		    (max > dpm_context->dpm_tables.gfx_table.max)) {
1406 			dev_warn(
1407 				adev->dev,
1408 				"Invalid max frequency %d MHz specified for determinism\n",
1409 				max);
1410 			return -EINVAL;
1411 		}
1412 
1413 		/* Restore default min/max clocks and enable determinism */
1414 		min_clk = dpm_context->dpm_tables.gfx_table.min;
1415 		max_clk = dpm_context->dpm_tables.gfx_table.max;
1416 		ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(smu, min_clk,
1417 								 max_clk);
1418 		if (!ret) {
1419 			usleep_range(500, 1000);
1420 			ret = smu_cmn_send_smc_msg_with_param(
1421 				smu, SMU_MSG_EnableDeterminism, max, NULL);
1422 			if (ret) {
1423 				dev_err(adev->dev,
1424 					"Failed to enable determinism at GFX clock %d MHz\n",
1425 					max);
1426 			} else {
1427 				pstate_table->gfxclk_pstate.curr.min = min_clk;
1428 				pstate_table->gfxclk_pstate.curr.max = max;
1429 			}
1430 		}
1431 	}
1432 
1433 	return ret;
1434 }
1435 
1436 static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
1437 					  enum PP_OD_DPM_TABLE_COMMAND type,
1438 					  long input[], uint32_t size)
1439 {
1440 	struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
1441 	struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1442 	struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
1443 	uint32_t min_clk;
1444 	uint32_t max_clk;
1445 	int ret = 0;
1446 
1447 	/* Only allowed in manual or determinism mode */
1448 	if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) &&
1449 	    (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM))
1450 		return -EINVAL;
1451 
1452 	switch (type) {
1453 	case PP_OD_EDIT_SCLK_VDDC_TABLE:
1454 		if (size != 2) {
1455 			dev_err(smu->adev->dev,
1456 				"Input parameter number not correct\n");
1457 			return -EINVAL;
1458 		}
1459 
1460 		if (input[0] == 0) {
1461 			if (input[1] < dpm_context->dpm_tables.gfx_table.min) {
1462 				dev_warn(
1463 					smu->adev->dev,
1464 					"Minimum GFX clk (%ld) MHz specified is less than the minimum allowed (%d) MHz\n",
1465 					input[1],
1466 					dpm_context->dpm_tables.gfx_table.min);
1467 				pstate_table->gfxclk_pstate.custom.min =
1468 					pstate_table->gfxclk_pstate.curr.min;
1469 				return -EINVAL;
1470 			}
1471 
1472 			pstate_table->gfxclk_pstate.custom.min = input[1];
1473 		} else if (input[0] == 1) {
1474 			if (input[1] > dpm_context->dpm_tables.gfx_table.max) {
1475 				dev_warn(
1476 					smu->adev->dev,
1477 					"Maximum GFX clk (%ld) MHz specified is greater than the maximum allowed (%d) MHz\n",
1478 					input[1],
1479 					dpm_context->dpm_tables.gfx_table.max);
1480 				pstate_table->gfxclk_pstate.custom.max =
1481 					pstate_table->gfxclk_pstate.curr.max;
1482 				return -EINVAL;
1483 			}
1484 
1485 			pstate_table->gfxclk_pstate.custom.max = input[1];
1486 		} else {
1487 			return -EINVAL;
1488 		}
1489 		break;
1490 	case PP_OD_RESTORE_DEFAULT_TABLE:
1491 		if (size != 0) {
1492 			dev_err(smu->adev->dev,
1493 				"Input parameter number not correct\n");
1494 			return -EINVAL;
1495 		} else {
1496 			/* Use the default frequencies for manual and determinism mode */
1497 			min_clk = dpm_context->dpm_tables.gfx_table.min;
1498 			max_clk = dpm_context->dpm_tables.gfx_table.max;
1499 
1500 			return smu_v13_0_6_set_soft_freq_limited_range(
1501 				smu, SMU_GFXCLK, min_clk, max_clk);
1502 		}
1503 		break;
1504 	case PP_OD_COMMIT_DPM_TABLE:
1505 		if (size != 0) {
1506 			dev_err(smu->adev->dev,
1507 				"Input parameter number not correct\n");
1508 			return -EINVAL;
1509 		} else {
1510 			if (!pstate_table->gfxclk_pstate.custom.min)
1511 				pstate_table->gfxclk_pstate.custom.min =
1512 					pstate_table->gfxclk_pstate.curr.min;
1513 
1514 			if (!pstate_table->gfxclk_pstate.custom.max)
1515 				pstate_table->gfxclk_pstate.custom.max =
1516 					pstate_table->gfxclk_pstate.curr.max;
1517 
1518 			min_clk = pstate_table->gfxclk_pstate.custom.min;
1519 			max_clk = pstate_table->gfxclk_pstate.custom.max;
1520 
1521 			return smu_v13_0_6_set_soft_freq_limited_range(
1522 				smu, SMU_GFXCLK, min_clk, max_clk);
1523 		}
1524 		break;
1525 	default:
1526 		return -ENOSYS;
1527 	}
1528 
1529 	return ret;
1530 }
1531 
1532 static int smu_v13_0_6_get_enabled_mask(struct smu_context *smu,
1533 					uint64_t *feature_mask)
1534 {
1535 	uint32_t smu_version;
1536 	int ret;
1537 
1538 	smu_cmn_get_smc_version(smu, NULL, &smu_version);
1539 	ret = smu_cmn_get_enabled_mask(smu, feature_mask);
1540 
1541 	if (ret == -EIO && smu_version < 0x552F00) {
1542 		*feature_mask = 0;
1543 		ret = 0;
1544 	}
1545 
1546 	return ret;
1547 }
1548 
1549 static bool smu_v13_0_6_is_dpm_running(struct smu_context *smu)
1550 {
1551 	int ret;
1552 	uint64_t feature_enabled;
1553 
1554 	ret = smu_v13_0_6_get_enabled_mask(smu, &feature_enabled);
1555 
1556 	if (ret)
1557 		return false;
1558 
1559 	return !!(feature_enabled & SMC_DPM_FEATURE);
1560 }
1561 
1562 static int smu_v13_0_6_request_i2c_xfer(struct smu_context *smu,
1563 					void *table_data)
1564 {
1565 	struct smu_table_context *smu_table = &smu->smu_table;
1566 	struct smu_table *table = &smu_table->driver_table;
1567 	struct amdgpu_device *adev = smu->adev;
1568 	uint32_t table_size;
1569 	int ret = 0;
1570 
1571 	if (!table_data)
1572 		return -EINVAL;
1573 
1574 	table_size = smu_table->tables[SMU_TABLE_I2C_COMMANDS].size;
1575 
1576 	memcpy(table->cpu_addr, table_data, table_size);
1577 	/* Flush hdp cache */
1578 	amdgpu_asic_flush_hdp(adev, NULL);
1579 	ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RequestI2cTransaction,
1580 					  NULL);
1581 
1582 	return ret;
1583 }
1584 
1585 static int smu_v13_0_6_i2c_xfer(struct i2c_adapter *i2c_adap,
1586 				struct i2c_msg *msg, int num_msgs)
1587 {
1588 	struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap);
1589 	struct amdgpu_device *adev = smu_i2c->adev;
1590 	struct smu_context *smu = adev->powerplay.pp_handle;
1591 	struct smu_table_context *smu_table = &smu->smu_table;
1592 	struct smu_table *table = &smu_table->driver_table;
1593 	SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
1594 	int i, j, r, c;
1595 	u16 dir;
1596 
1597 	if (!adev->pm.dpm_enabled)
1598 		return -EBUSY;
1599 
1600 	req = kzalloc(sizeof(*req), GFP_KERNEL);
1601 	if (!req)
1602 		return -ENOMEM;
1603 
1604 	req->I2CcontrollerPort = smu_i2c->port;
1605 	req->I2CSpeed = I2C_SPEED_FAST_400K;
1606 	req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */
1607 	dir = msg[0].flags & I2C_M_RD;
1608 
1609 	for (c = i = 0; i < num_msgs; i++) {
1610 		for (j = 0; j < msg[i].len; j++, c++) {
1611 			SwI2cCmd_t *cmd = &req->SwI2cCmds[c];
1612 
1613 			if (!(msg[i].flags & I2C_M_RD)) {
1614 				/* write */
1615 				cmd->CmdConfig |= CMDCONFIG_READWRITE_MASK;
1616 				cmd->ReadWriteData = msg[i].buf[j];
1617 			}
1618 
1619 			if ((dir ^ msg[i].flags) & I2C_M_RD) {
1620 				/* The direction changes.
1621 				 */
1622 				dir = msg[i].flags & I2C_M_RD;
1623 				cmd->CmdConfig |= CMDCONFIG_RESTART_MASK;
1624 			}
1625 
1626 			req->NumCmds++;
1627 
1628 			/*
1629 			 * Insert STOP if we are at the last byte of either last
1630 			 * message for the transaction or the client explicitly
1631 			 * requires a STOP at this particular message.
1632 			 */
1633 			if ((j == msg[i].len - 1) &&
1634 			    ((i == num_msgs - 1) || (msg[i].flags & I2C_M_STOP))) {
1635 				cmd->CmdConfig &= ~CMDCONFIG_RESTART_MASK;
1636 				cmd->CmdConfig |= CMDCONFIG_STOP_MASK;
1637 			}
1638 		}
1639 	}
1640 	mutex_lock(&adev->pm.mutex);
1641 	r = smu_v13_0_6_request_i2c_xfer(smu, req);
1642 	mutex_unlock(&adev->pm.mutex);
1643 	if (r)
1644 		goto fail;
1645 
1646 	for (c = i = 0; i < num_msgs; i++) {
1647 		if (!(msg[i].flags & I2C_M_RD)) {
1648 			c += msg[i].len;
1649 			continue;
1650 		}
1651 		for (j = 0; j < msg[i].len; j++, c++) {
1652 			SwI2cCmd_t *cmd = &res->SwI2cCmds[c];
1653 
1654 			msg[i].buf[j] = cmd->ReadWriteData;
1655 		}
1656 	}
1657 	r = num_msgs;
1658 fail:
1659 	kfree(req);
1660 	return r;
1661 }
1662 
1663 static u32 smu_v13_0_6_i2c_func(struct i2c_adapter *adap)
1664 {
1665 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
1666 }
1667 
1668 static const struct i2c_algorithm smu_v13_0_6_i2c_algo = {
1669 	.master_xfer = smu_v13_0_6_i2c_xfer,
1670 	.functionality = smu_v13_0_6_i2c_func,
1671 };
1672 
1673 static const struct i2c_adapter_quirks smu_v13_0_6_i2c_control_quirks = {
1674 	.flags = I2C_AQ_COMB | I2C_AQ_COMB_SAME_ADDR | I2C_AQ_NO_ZERO_LEN,
1675 	.max_read_len = MAX_SW_I2C_COMMANDS,
1676 	.max_write_len = MAX_SW_I2C_COMMANDS,
1677 	.max_comb_1st_msg_len = 2,
1678 	.max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2,
1679 };
1680 
1681 static int smu_v13_0_6_i2c_control_init(struct smu_context *smu)
1682 {
1683 	struct amdgpu_device *adev = smu->adev;
1684 	int res, i;
1685 
1686 	for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
1687 		struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
1688 		struct i2c_adapter *control = &smu_i2c->adapter;
1689 
1690 		smu_i2c->adev = adev;
1691 		smu_i2c->port = i;
1692 		mutex_init(&smu_i2c->mutex);
1693 		control->owner = THIS_MODULE;
1694 		control->class = I2C_CLASS_SPD;
1695 		control->dev.parent = &adev->pdev->dev;
1696 		control->algo = &smu_v13_0_6_i2c_algo;
1697 		snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
1698 		control->quirks = &smu_v13_0_6_i2c_control_quirks;
1699 		i2c_set_adapdata(control, smu_i2c);
1700 
1701 		res = i2c_add_adapter(control);
1702 		if (res) {
1703 			DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
1704 			goto Out_err;
1705 		}
1706 	}
1707 
1708 	adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
1709 	adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
1710 
1711 	return 0;
1712 Out_err:
1713 	for ( ; i >= 0; i--) {
1714 		struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
1715 		struct i2c_adapter *control = &smu_i2c->adapter;
1716 
1717 		i2c_del_adapter(control);
1718 	}
1719 	return res;
1720 }
1721 
1722 static void smu_v13_0_6_i2c_control_fini(struct smu_context *smu)
1723 {
1724 	struct amdgpu_device *adev = smu->adev;
1725 	int i;
1726 
1727 	for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
1728 		struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
1729 		struct i2c_adapter *control = &smu_i2c->adapter;
1730 
1731 		i2c_del_adapter(control);
1732 	}
1733 	adev->pm.ras_eeprom_i2c_bus = NULL;
1734 	adev->pm.fru_eeprom_i2c_bus = NULL;
1735 }
1736 
1737 static void smu_v13_0_6_get_unique_id(struct smu_context *smu)
1738 {
1739 	struct amdgpu_device *adev = smu->adev;
1740 	//SmuMetrics_t *metrics = smu->smu_table.metrics_table;
1741 	uint32_t upper32 = 0, lower32 = 0;
1742 	int ret;
1743 
1744 	ret = smu_cmn_get_metrics_table(smu, NULL, false);
1745 	if (ret)
1746 		goto out;
1747 
1748 	//upper32 = metrics->PublicSerialNumUpper32;
1749 	//lower32 = metrics->PublicSerialNumLower32;
1750 
1751 out:
1752 	adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
1753 	if (adev->serial[0] == '\0')
1754 		sprintf(adev->serial, "%016llx", adev->unique_id);
1755 }
1756 
1757 static bool smu_v13_0_6_is_baco_supported(struct smu_context *smu)
1758 {
1759 	/* smu_13_0_6 does not support baco */
1760 
1761 	return false;
1762 }
1763 
1764 static int smu_v13_0_6_set_df_cstate(struct smu_context *smu,
1765 				     enum pp_df_cstate state)
1766 {
1767 	return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl,
1768 					       state, NULL);
1769 }
1770 
1771 static int smu_v13_0_6_allow_xgmi_power_down(struct smu_context *smu, bool en)
1772 {
1773 	return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GmiPwrDnControl,
1774 					       en ? 0 : 1, NULL);
1775 }
1776 
1777 static const struct throttling_logging_label {
1778 	uint32_t feature_mask;
1779 	const char *label;
1780 } logging_label[] = {
1781 	{ (1U << THROTTLER_TEMP_HBM_BIT), "HBM" },
1782 	{ (1U << THROTTLER_TEMP_SOC_BIT), "SOC" },
1783 	{ (1U << THROTTLER_TEMP_VR_GFX_BIT), "VR limit" },
1784 };
1785 static void smu_v13_0_6_log_thermal_throttling_event(struct smu_context *smu)
1786 {
1787 	int ret;
1788 	int throttler_idx, throtting_events = 0, buf_idx = 0;
1789 	struct amdgpu_device *adev = smu->adev;
1790 	uint32_t throttler_status;
1791 	char log_buf[256];
1792 
1793 	ret = smu_v13_0_6_get_smu_metrics_data(smu, METRICS_THROTTLER_STATUS,
1794 					      &throttler_status);
1795 	if (ret)
1796 		return;
1797 
1798 	memset(log_buf, 0, sizeof(log_buf));
1799 	for (throttler_idx = 0; throttler_idx < ARRAY_SIZE(logging_label);
1800 	     throttler_idx++) {
1801 		if (throttler_status &
1802 		    logging_label[throttler_idx].feature_mask) {
1803 			throtting_events++;
1804 			buf_idx += snprintf(log_buf + buf_idx,
1805 					    sizeof(log_buf) - buf_idx, "%s%s",
1806 					    throtting_events > 1 ? " and " : "",
1807 					    logging_label[throttler_idx].label);
1808 			if (buf_idx >= sizeof(log_buf)) {
1809 				dev_err(adev->dev, "buffer overflow!\n");
1810 				log_buf[sizeof(log_buf) - 1] = '\0';
1811 				break;
1812 			}
1813 		}
1814 	}
1815 
1816 	dev_warn(
1817 		adev->dev,
1818 		"WARN: GPU thermal throttling temperature reached, expect performance decrease. %s.\n",
1819 		log_buf);
1820 	kgd2kfd_smi_event_throttle(
1821 		smu->adev->kfd.dev,
1822 		smu_cmn_get_indep_throttler_status(throttler_status,
1823 						   smu_v13_0_6_throttler_map));
1824 }
1825 
1826 static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu)
1827 {
1828 	struct amdgpu_device *adev = smu->adev;
1829 	uint32_t esm_ctrl;
1830 
1831 	/* TODO: confirm this on real target */
1832 	esm_ctrl = RREG32_PCIE(smnPCIE_ESM_CTRL);
1833 	if ((esm_ctrl >> 15) & 0x1FFFF)
1834 		return (((esm_ctrl >> 8) & 0x3F) + 128);
1835 
1836 	return smu_v13_0_get_current_pcie_link_speed(smu);
1837 }
1838 
1839 static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table)
1840 {
1841 	struct smu_table_context *smu_table = &smu->smu_table;
1842 	struct gpu_metrics_v1_3 *gpu_metrics =
1843 		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
1844 	MetricsTable_t *metrics;
1845 	int i, ret = 0;
1846 
1847 	metrics = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
1848 	ret = smu_v13_0_6_get_metrics_table(smu, metrics, true);
1849 	if (ret)
1850 		return ret;
1851 
1852 	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
1853 
1854 	/* TODO: Decide on how to fill in zero value fields */
1855 	gpu_metrics->temperature_edge = 0;
1856 	gpu_metrics->temperature_hotspot = 0;
1857 	gpu_metrics->temperature_mem = 0;
1858 	gpu_metrics->temperature_vrgfx = 0;
1859 	gpu_metrics->temperature_vrsoc = 0;
1860 	gpu_metrics->temperature_vrmem = 0;
1861 
1862 	gpu_metrics->average_gfx_activity = 0;
1863 	gpu_metrics->average_umc_activity = 0;
1864 	gpu_metrics->average_mm_activity = 0;
1865 
1866 	gpu_metrics->average_socket_power = 0;
1867 	gpu_metrics->energy_accumulator = 0;
1868 
1869 	gpu_metrics->average_gfxclk_frequency = 0;
1870 	gpu_metrics->average_socclk_frequency = 0;
1871 	gpu_metrics->average_uclk_frequency = 0;
1872 	gpu_metrics->average_vclk0_frequency = 0;
1873 	gpu_metrics->average_dclk0_frequency = 0;
1874 
1875 	gpu_metrics->current_gfxclk = 0;
1876 	gpu_metrics->current_socclk = 0;
1877 	gpu_metrics->current_uclk = 0;
1878 	gpu_metrics->current_vclk0 = 0;
1879 	gpu_metrics->current_dclk0 = 0;
1880 
1881 	gpu_metrics->throttle_status = 0;
1882 	gpu_metrics->indep_throttle_status = smu_cmn_get_indep_throttler_status(
1883 		gpu_metrics->throttle_status, smu_v13_0_6_throttler_map);
1884 
1885 	gpu_metrics->current_fan_speed = 0;
1886 
1887 	gpu_metrics->pcie_link_width = 0;
1888 	gpu_metrics->pcie_link_speed = smu_v13_0_6_get_current_pcie_link_speed(smu);
1889 
1890 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
1891 
1892 	gpu_metrics->gfx_activity_acc = 0;
1893 	gpu_metrics->mem_activity_acc = 0;
1894 
1895 	for (i = 0; i < NUM_HBM_INSTANCES; i++)
1896 		gpu_metrics->temperature_hbm[i] = 0;
1897 
1898 	gpu_metrics->firmware_timestamp = 0;
1899 
1900 	*table = (void *)gpu_metrics;
1901 	kfree(metrics);
1902 
1903 	return sizeof(struct gpu_metrics_v1_3);
1904 }
1905 
1906 static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
1907 {
1908 	u32 smu_version;
1909 	int ret = 0, index;
1910 	struct amdgpu_device *adev = smu->adev;
1911 	int timeout = 10;
1912 
1913 	smu_cmn_get_smc_version(smu, NULL, &smu_version);
1914 
1915 	index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
1916 					       SMU_MSG_GfxDeviceDriverReset);
1917 
1918 	mutex_lock(&smu->message_lock);
1919 	ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index,
1920 					       SMU_RESET_MODE_2);
1921 	/* This is similar to FLR, wait till max FLR timeout */
1922 	msleep(100);
1923 	dev_dbg(smu->adev->dev, "restore config space...\n");
1924 	/* Restore the config space saved during init */
1925 	amdgpu_device_load_pci_state(adev->pdev);
1926 
1927 	dev_dbg(smu->adev->dev, "wait for reset ack\n");
1928 	while (ret == -ETIME && timeout) {
1929 		ret = smu_cmn_wait_for_response(smu);
1930 		/* Wait a bit more time for getting ACK */
1931 		if (ret == -ETIME) {
1932 			--timeout;
1933 			usleep_range(500, 1000);
1934 			continue;
1935 		}
1936 
1937 		if (ret != 1) {
1938 			dev_err(adev->dev,
1939 				"failed to send mode2 message \tparam: 0x%08x response %#x\n",
1940 				SMU_RESET_MODE_2, ret);
1941 			goto out;
1942 		}
1943 	}
1944 
1945 	if (ret == 1)
1946 		ret = 0;
1947 out:
1948 	mutex_unlock(&smu->message_lock);
1949 
1950 	return ret;
1951 }
1952 
1953 static int smu_v13_0_6_mode1_reset(struct smu_context *smu)
1954 {
1955 	struct amdgpu_device *adev = smu->adev;
1956 	struct amdgpu_ras *ras;
1957 	u32 fatal_err, param;
1958 	int ret = 0;
1959 
1960 	ras = amdgpu_ras_get_context(adev);
1961 	fatal_err = 0;
1962 	param = SMU_RESET_MODE_1;
1963 
1964 	/* fatal error triggered by ras, PMFW supports the flag */
1965 	if (ras && atomic_read(&ras->in_recovery))
1966 		fatal_err = 1;
1967 
1968 	param |= (fatal_err << 16);
1969 	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
1970 					      param, NULL);
1971 
1972 	if (!ret)
1973 		msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS);
1974 
1975 	return ret;
1976 }
1977 
1978 static bool smu_v13_0_6_is_mode1_reset_supported(struct smu_context *smu)
1979 {
1980 	/* TODO: Enable this when FW support is added */
1981 	return false;
1982 }
1983 
1984 static bool smu_v13_0_6_is_mode2_reset_supported(struct smu_context *smu)
1985 {
1986 	return true;
1987 }
1988 
1989 static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu,
1990 						 uint32_t size)
1991 {
1992 	int ret = 0;
1993 
1994 	/* message SMU to update the bad page number on SMUBUS */
1995 	ret = smu_cmn_send_smc_msg_with_param(
1996 		smu, SMU_MSG_SetNumBadHbmPagesRetired, size, NULL);
1997 	if (ret)
1998 		dev_err(smu->adev->dev,
1999 			"[%s] failed to message SMU to update HBM bad pages number\n",
2000 			__func__);
2001 
2002 	return ret;
2003 }
2004 
2005 static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
2006 	/* init dpm */
2007 	.get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask,
2008 	/* dpm/clk tables */
2009 	.set_default_dpm_table = smu_v13_0_6_set_default_dpm_table,
2010 	.populate_umd_state_clk = smu_v13_0_6_populate_umd_state_clk,
2011 	.print_clk_levels = smu_v13_0_6_print_clk_levels,
2012 	.force_clk_levels = smu_v13_0_6_force_clk_levels,
2013 	.read_sensor = smu_v13_0_6_read_sensor,
2014 	.set_performance_level = smu_v13_0_6_set_performance_level,
2015 	.get_power_limit = smu_v13_0_6_get_power_limit,
2016 	.is_dpm_running = smu_v13_0_6_is_dpm_running,
2017 	.get_unique_id = smu_v13_0_6_get_unique_id,
2018 	.init_smc_tables = smu_v13_0_6_init_smc_tables,
2019 	.fini_smc_tables = smu_v13_0_fini_smc_tables,
2020 	.init_power = smu_v13_0_init_power,
2021 	.fini_power = smu_v13_0_fini_power,
2022 	.check_fw_status = smu_v13_0_6_check_fw_status,
2023 	/* pptable related */
2024 	.check_fw_version = smu_v13_0_check_fw_version,
2025 	.set_driver_table_location = smu_v13_0_set_driver_table_location,
2026 	.set_tool_table_location = smu_v13_0_set_tool_table_location,
2027 	.notify_memory_pool_location = smu_v13_0_notify_memory_pool_location,
2028 	.system_features_control = smu_v13_0_6_system_features_control,
2029 	.send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
2030 	.send_smc_msg = smu_cmn_send_smc_msg,
2031 	.get_enabled_mask = smu_v13_0_6_get_enabled_mask,
2032 	.feature_is_enabled = smu_cmn_feature_is_enabled,
2033 	.set_power_limit = smu_v13_0_6_set_power_limit,
2034 	.set_xgmi_pstate = smu_v13_0_set_xgmi_pstate,
2035 	/* TODO: Thermal limits unknown, skip these for now
2036 	.register_irq_handler = smu_v13_0_register_irq_handler,
2037 	.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
2038 	.disable_thermal_alert = smu_v13_0_disable_thermal_alert,
2039 	*/
2040 	.setup_pptable = smu_v13_0_6_setup_pptable,
2041 	.baco_is_support = smu_v13_0_6_is_baco_supported,
2042 	.get_dpm_ultimate_freq = smu_v13_0_6_get_dpm_ultimate_freq,
2043 	.set_soft_freq_limited_range = smu_v13_0_6_set_soft_freq_limited_range,
2044 	.od_edit_dpm_table = smu_v13_0_6_usr_edit_dpm_table,
2045 	.set_df_cstate = smu_v13_0_6_set_df_cstate,
2046 	.allow_xgmi_power_down = smu_v13_0_6_allow_xgmi_power_down,
2047 	.log_thermal_throttling_event = smu_v13_0_6_log_thermal_throttling_event,
2048 	.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
2049 	.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
2050 	.get_gpu_metrics = smu_v13_0_6_get_gpu_metrics,
2051 	.mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported,
2052 	.mode2_reset_is_support = smu_v13_0_6_is_mode2_reset_supported,
2053 	.mode1_reset = smu_v13_0_6_mode1_reset,
2054 	.mode2_reset = smu_v13_0_6_mode2_reset,
2055 	.wait_for_event = smu_v13_0_wait_for_event,
2056 	.i2c_init = smu_v13_0_6_i2c_control_init,
2057 	.i2c_fini = smu_v13_0_6_i2c_control_fini,
2058 	.send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num,
2059 };
2060 
2061 void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
2062 {
2063 	smu->ppt_funcs = &smu_v13_0_6_ppt_funcs;
2064 	smu->message_map = smu_v13_0_6_message_map;
2065 	smu->clock_map = smu_v13_0_6_clk_map;
2066 	smu->feature_map = smu_v13_0_6_feature_mask_map;
2067 	smu->table_map = smu_v13_0_6_table_map;
2068 	smu_v13_0_set_smu_mailbox_registers(smu);
2069 }
2070