1 /* 2 * Copyright 2021 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #define SWSMU_CODE_LAYER_L2 25 26 #include <linux/firmware.h> 27 #include "amdgpu.h" 28 #include "amdgpu_smu.h" 29 #include "atomfirmware.h" 30 #include "amdgpu_atomfirmware.h" 31 #include "amdgpu_atombios.h" 32 #include "smu_v13_0_6_pmfw.h" 33 #include "smu13_driver_if_v13_0_6.h" 34 #include "smu_v13_0_6_ppsmc.h" 35 #include "soc15_common.h" 36 #include "atom.h" 37 #include "power_state.h" 38 #include "smu_v13_0.h" 39 #include "smu_v13_0_6_ppt.h" 40 #include "nbio/nbio_7_4_offset.h" 41 #include "nbio/nbio_7_4_sh_mask.h" 42 #include "thm/thm_11_0_2_offset.h" 43 #include "thm/thm_11_0_2_sh_mask.h" 44 #include "amdgpu_xgmi.h" 45 #include <linux/pci.h> 46 #include "amdgpu_ras.h" 47 #include "amdgpu_mca.h" 48 #include "smu_cmn.h" 49 #include "mp/mp_13_0_6_offset.h" 50 #include "mp/mp_13_0_6_sh_mask.h" 51 #include "umc_v12_0.h" 52 53 #undef MP1_Public 54 #undef smnMP1_FIRMWARE_FLAGS 55 56 /* TODO: Check final register offsets */ 57 #define MP1_Public 0x03b00000 58 #define smnMP1_FIRMWARE_FLAGS 0x3010028 59 /* 60 * DO NOT use these for err/warn/info/debug messages. 61 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 62 * They are more MGPU friendly. 63 */ 64 #undef pr_err 65 #undef pr_warn 66 #undef pr_info 67 #undef pr_debug 68 69 MODULE_FIRMWARE("amdgpu/smu_13_0_6.bin"); 70 71 #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c)) 72 73 #define SMU_13_0_6_FEA_MAP(smu_feature, smu_13_0_6_feature) \ 74 [smu_feature] = { 1, (smu_13_0_6_feature) } 75 76 #define FEATURE_MASK(feature) (1ULL << feature) 77 #define SMC_DPM_FEATURE \ 78 (FEATURE_MASK(FEATURE_DATA_CALCULATION) | \ 79 FEATURE_MASK(FEATURE_DPM_GFXCLK) | FEATURE_MASK(FEATURE_DPM_UCLK) | \ 80 FEATURE_MASK(FEATURE_DPM_SOCCLK) | FEATURE_MASK(FEATURE_DPM_FCLK) | \ 81 FEATURE_MASK(FEATURE_DPM_LCLK) | FEATURE_MASK(FEATURE_DPM_XGMI) | \ 82 FEATURE_MASK(FEATURE_DPM_VCN)) 83 84 /* possible frequency drift (1Mhz) */ 85 #define EPSILON 1 86 87 #define smnPCIE_ESM_CTRL 0x93D0 88 #define smnPCIE_LC_LINK_WIDTH_CNTL 0x1a340288 89 #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L 90 #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4 91 #define MAX_LINK_WIDTH 6 92 93 #define smnPCIE_LC_SPEED_CNTL 0x1a340290 94 #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xE0 95 #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0x5 96 #define LINK_SPEED_MAX 4 97 98 #define SMU_13_0_6_DSCLK_THRESHOLD 140 99 100 #define MCA_BANK_IPID(_ip, _hwid, _type) \ 101 [AMDGPU_MCA_IP_##_ip] = { .hwid = _hwid, .mcatype = _type, } 102 103 struct mca_bank_ipid { 104 enum amdgpu_mca_ip ip; 105 uint16_t hwid; 106 uint16_t mcatype; 107 }; 108 109 struct mca_ras_info { 110 enum amdgpu_ras_block blkid; 111 enum amdgpu_mca_ip ip; 112 int *err_code_array; 113 int err_code_count; 114 int (*get_err_count)(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev, 115 enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count); 116 bool (*bank_is_valid)(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev, 117 enum amdgpu_mca_error_type type, struct mca_bank_entry *entry); 118 }; 119 120 #define P2S_TABLE_ID_A 0x50325341 121 #define P2S_TABLE_ID_X 0x50325358 122 123 // clang-format off 124 static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] = { 125 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), 126 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), 127 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1), 128 MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0), 129 MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0), 130 MSG_MAP(RequestI2cTransaction, PPSMC_MSG_RequestI2cTransaction, 0), 131 MSG_MAP(GetMetricsTable, PPSMC_MSG_GetMetricsTable, 1), 132 MSG_MAP(GetMetricsVersion, PPSMC_MSG_GetMetricsVersion, 1), 133 MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh, 1), 134 MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetEnabledSmuFeaturesLow, 1), 135 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), 136 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), 137 MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0), 138 MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0), 139 MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0), 140 MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 0), 141 MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1), 142 MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1), 143 MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1), 144 MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0), 145 MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1), 146 MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, 0), 147 MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0), 148 MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0), 149 MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0), 150 MSG_MAP(GetDebugData, PPSMC_MSG_GetDebugData, 0), 151 MSG_MAP(SetNumBadHbmPagesRetired, PPSMC_MSG_SetNumBadHbmPagesRetired, 0), 152 MSG_MAP(DFCstateControl, PPSMC_MSG_DFCstateControl, 0), 153 MSG_MAP(GetGmiPwrDnHyst, PPSMC_MSG_GetGmiPwrDnHyst, 0), 154 MSG_MAP(SetGmiPwrDnHyst, PPSMC_MSG_SetGmiPwrDnHyst, 0), 155 MSG_MAP(GmiPwrDnControl, PPSMC_MSG_GmiPwrDnControl, 0), 156 MSG_MAP(EnterGfxoff, PPSMC_MSG_EnterGfxoff, 0), 157 MSG_MAP(ExitGfxoff, PPSMC_MSG_ExitGfxoff, 0), 158 MSG_MAP(EnableDeterminism, PPSMC_MSG_EnableDeterminism, 0), 159 MSG_MAP(DisableDeterminism, PPSMC_MSG_DisableDeterminism, 0), 160 MSG_MAP(GfxDriverResetRecovery, PPSMC_MSG_GfxDriverResetRecovery, 0), 161 MSG_MAP(GetMinGfxclkFrequency, PPSMC_MSG_GetMinGfxDpmFreq, 1), 162 MSG_MAP(GetMaxGfxclkFrequency, PPSMC_MSG_GetMaxGfxDpmFreq, 1), 163 MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxClk, 1), 164 MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 1), 165 MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareForDriverUnload, 0), 166 MSG_MAP(GetCTFLimit, PPSMC_MSG_GetCTFLimit, 0), 167 MSG_MAP(GetThermalLimit, PPSMC_MSG_ReadThrottlerLimit, 0), 168 MSG_MAP(ClearMcaOnRead, PPSMC_MSG_ClearMcaOnRead, 0), 169 MSG_MAP(QueryValidMcaCount, PPSMC_MSG_QueryValidMcaCount, 0), 170 MSG_MAP(QueryValidMcaCeCount, PPSMC_MSG_QueryValidMcaCeCount, 0), 171 MSG_MAP(McaBankDumpDW, PPSMC_MSG_McaBankDumpDW, 0), 172 MSG_MAP(McaBankCeDumpDW, PPSMC_MSG_McaBankCeDumpDW, 0), 173 MSG_MAP(SelectPLPDMode, PPSMC_MSG_SelectPLPDMode, 0), 174 }; 175 176 // clang-format on 177 static const struct cmn2asic_mapping smu_v13_0_6_clk_map[SMU_CLK_COUNT] = { 178 CLK_MAP(SOCCLK, PPCLK_SOCCLK), 179 CLK_MAP(FCLK, PPCLK_FCLK), 180 CLK_MAP(UCLK, PPCLK_UCLK), 181 CLK_MAP(MCLK, PPCLK_UCLK), 182 CLK_MAP(DCLK, PPCLK_DCLK), 183 CLK_MAP(VCLK, PPCLK_VCLK), 184 CLK_MAP(LCLK, PPCLK_LCLK), 185 }; 186 187 static const struct cmn2asic_mapping smu_v13_0_6_feature_mask_map[SMU_FEATURE_COUNT] = { 188 SMU_13_0_6_FEA_MAP(SMU_FEATURE_DATA_CALCULATIONS_BIT, FEATURE_DATA_CALCULATION), 189 SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_GFXCLK_BIT, FEATURE_DPM_GFXCLK), 190 SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_UCLK_BIT, FEATURE_DPM_UCLK), 191 SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_SOCCLK_BIT, FEATURE_DPM_SOCCLK), 192 SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_FCLK_BIT, FEATURE_DPM_FCLK), 193 SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_LCLK_BIT, FEATURE_DPM_LCLK), 194 SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_VCLK_BIT, FEATURE_DPM_VCN), 195 SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_DCLK_BIT, FEATURE_DPM_VCN), 196 SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_XGMI_BIT, FEATURE_DPM_XGMI), 197 SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_GFXCLK_BIT, FEATURE_DS_GFXCLK), 198 SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_SOCCLK_BIT, FEATURE_DS_SOCCLK), 199 SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_LCLK_BIT, FEATURE_DS_LCLK), 200 SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_FCLK_BIT, FEATURE_DS_FCLK), 201 SMU_13_0_6_FEA_MAP(SMU_FEATURE_VCN_DPM_BIT, FEATURE_DPM_VCN), 202 SMU_13_0_6_FEA_MAP(SMU_FEATURE_PPT_BIT, FEATURE_PPT), 203 SMU_13_0_6_FEA_MAP(SMU_FEATURE_TDC_BIT, FEATURE_TDC), 204 SMU_13_0_6_FEA_MAP(SMU_FEATURE_APCC_DFLL_BIT, FEATURE_APCC_DFLL), 205 SMU_13_0_6_FEA_MAP(SMU_FEATURE_MP1_CG_BIT, FEATURE_SMU_CG), 206 SMU_13_0_6_FEA_MAP(SMU_FEATURE_GFXOFF_BIT, FEATURE_GFXOFF), 207 SMU_13_0_6_FEA_MAP(SMU_FEATURE_FW_CTF_BIT, FEATURE_FW_CTF), 208 SMU_13_0_6_FEA_MAP(SMU_FEATURE_THERMAL_BIT, FEATURE_THERMAL), 209 SMU_13_0_6_FEA_MAP(SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT, FEATURE_XGMI_PER_LINK_PWR_DOWN), 210 SMU_13_0_6_FEA_MAP(SMU_FEATURE_DF_CSTATE_BIT, FEATURE_DF_CSTATE), 211 }; 212 213 #define TABLE_PMSTATUSLOG 0 214 #define TABLE_SMU_METRICS 1 215 #define TABLE_I2C_COMMANDS 2 216 #define TABLE_COUNT 3 217 218 static const struct cmn2asic_mapping smu_v13_0_6_table_map[SMU_TABLE_COUNT] = { 219 TAB_MAP(PMSTATUSLOG), 220 TAB_MAP(SMU_METRICS), 221 TAB_MAP(I2C_COMMANDS), 222 }; 223 224 static const uint8_t smu_v13_0_6_throttler_map[] = { 225 [THROTTLER_PPT_BIT] = (SMU_THROTTLER_PPT0_BIT), 226 [THROTTLER_THERMAL_SOCKET_BIT] = (SMU_THROTTLER_TEMP_GPU_BIT), 227 [THROTTLER_THERMAL_HBM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT), 228 [THROTTLER_THERMAL_VR_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT), 229 [THROTTLER_PROCHOT_BIT] = (SMU_THROTTLER_PROCHOT_GFX_BIT), 230 }; 231 232 struct PPTable_t { 233 uint32_t MaxSocketPowerLimit; 234 uint32_t MaxGfxclkFrequency; 235 uint32_t MinGfxclkFrequency; 236 uint32_t FclkFrequencyTable[4]; 237 uint32_t UclkFrequencyTable[4]; 238 uint32_t SocclkFrequencyTable[4]; 239 uint32_t VclkFrequencyTable[4]; 240 uint32_t DclkFrequencyTable[4]; 241 uint32_t LclkFrequencyTable[4]; 242 uint32_t MaxLclkDpmRange; 243 uint32_t MinLclkDpmRange; 244 uint64_t PublicSerialNumber_AID; 245 bool Init; 246 }; 247 248 #define SMUQ10_TO_UINT(x) ((x) >> 10) 249 #define SMUQ10_FRAC(x) ((x) & 0x3ff) 250 #define SMUQ10_ROUND(x) ((SMUQ10_TO_UINT(x)) + ((SMUQ10_FRAC(x)) >= 0x200)) 251 #define GET_METRIC_FIELD(field) ((adev->flags & AMD_IS_APU) ?\ 252 (metrics_a->field) : (metrics_x->field)) 253 254 struct smu_v13_0_6_dpm_map { 255 enum smu_clk_type clk_type; 256 uint32_t feature_num; 257 struct smu_13_0_dpm_table *dpm_table; 258 uint32_t *freq_table; 259 }; 260 261 static int smu_v13_0_6_init_microcode(struct smu_context *smu) 262 { 263 const struct smc_firmware_header_v2_1 *v2_1; 264 const struct common_firmware_header *hdr; 265 struct amdgpu_firmware_info *ucode = NULL; 266 struct smc_soft_pptable_entry *entries; 267 struct amdgpu_device *adev = smu->adev; 268 uint32_t p2s_table_id = P2S_TABLE_ID_A; 269 int ret = 0, i, p2stable_count; 270 char ucode_prefix[15]; 271 char fw_name[30]; 272 273 /* No need to load P2S tables in IOV mode */ 274 if (amdgpu_sriov_vf(adev)) 275 return 0; 276 277 if (!(adev->flags & AMD_IS_APU)) 278 p2s_table_id = P2S_TABLE_ID_X; 279 280 amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, 281 sizeof(ucode_prefix)); 282 283 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix); 284 285 ret = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name); 286 if (ret) 287 goto out; 288 289 hdr = (const struct common_firmware_header *)adev->pm.fw->data; 290 amdgpu_ucode_print_smc_hdr(hdr); 291 292 /* SMU v13.0.6 binary file doesn't carry pptables, instead the entries 293 * are used to carry p2s tables. 294 */ 295 v2_1 = (const struct smc_firmware_header_v2_1 *)adev->pm.fw->data; 296 entries = (struct smc_soft_pptable_entry 297 *)((uint8_t *)v2_1 + 298 le32_to_cpu(v2_1->pptable_entry_offset)); 299 p2stable_count = le32_to_cpu(v2_1->pptable_count); 300 for (i = 0; i < p2stable_count; i++) { 301 if (le32_to_cpu(entries[i].id) == p2s_table_id) { 302 smu->pptable_firmware.data = 303 ((uint8_t *)v2_1 + 304 le32_to_cpu(entries[i].ppt_offset_bytes)); 305 smu->pptable_firmware.size = 306 le32_to_cpu(entries[i].ppt_size_bytes); 307 break; 308 } 309 } 310 311 if (smu->pptable_firmware.data && smu->pptable_firmware.size) { 312 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE]; 313 ucode->ucode_id = AMDGPU_UCODE_ID_P2S_TABLE; 314 ucode->fw = &smu->pptable_firmware; 315 adev->firmware.fw_size += ALIGN(ucode->fw->size, PAGE_SIZE); 316 } 317 318 return 0; 319 out: 320 amdgpu_ucode_release(&adev->pm.fw); 321 322 return ret; 323 } 324 325 static int smu_v13_0_6_tables_init(struct smu_context *smu) 326 { 327 struct smu_table_context *smu_table = &smu->smu_table; 328 struct smu_table *tables = smu_table->tables; 329 struct amdgpu_device *adev = smu->adev; 330 331 if (!(adev->flags & AMD_IS_APU)) 332 SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE, 333 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 334 335 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, 336 max(sizeof(MetricsTableX_t), sizeof(MetricsTableA_t)), 337 PAGE_SIZE, 338 AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT); 339 340 SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t), 341 PAGE_SIZE, 342 AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT); 343 344 smu_table->metrics_table = kzalloc(max(sizeof(MetricsTableX_t), 345 sizeof(MetricsTableA_t)), GFP_KERNEL); 346 if (!smu_table->metrics_table) 347 return -ENOMEM; 348 smu_table->metrics_time = 0; 349 350 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_5); 351 smu_table->gpu_metrics_table = 352 kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); 353 if (!smu_table->gpu_metrics_table) { 354 kfree(smu_table->metrics_table); 355 return -ENOMEM; 356 } 357 358 smu_table->driver_pptable = 359 kzalloc(sizeof(struct PPTable_t), GFP_KERNEL); 360 if (!smu_table->driver_pptable) { 361 kfree(smu_table->metrics_table); 362 kfree(smu_table->gpu_metrics_table); 363 return -ENOMEM; 364 } 365 366 return 0; 367 } 368 369 static int smu_v13_0_6_allocate_dpm_context(struct smu_context *smu) 370 { 371 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 372 373 smu_dpm->dpm_context = 374 kzalloc(sizeof(struct smu_13_0_dpm_context), GFP_KERNEL); 375 if (!smu_dpm->dpm_context) 376 return -ENOMEM; 377 smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context); 378 379 return 0; 380 } 381 382 static int smu_v13_0_6_init_smc_tables(struct smu_context *smu) 383 { 384 int ret = 0; 385 386 ret = smu_v13_0_6_tables_init(smu); 387 if (ret) 388 return ret; 389 390 ret = smu_v13_0_6_allocate_dpm_context(smu); 391 392 return ret; 393 } 394 395 static int smu_v13_0_6_get_allowed_feature_mask(struct smu_context *smu, 396 uint32_t *feature_mask, 397 uint32_t num) 398 { 399 if (num > 2) 400 return -EINVAL; 401 402 /* pptable will handle the features to enable */ 403 memset(feature_mask, 0xFF, sizeof(uint32_t) * num); 404 405 return 0; 406 } 407 408 static int smu_v13_0_6_get_metrics_table(struct smu_context *smu, 409 void *metrics_table, bool bypass_cache) 410 { 411 struct smu_table_context *smu_table = &smu->smu_table; 412 uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size; 413 struct smu_table *table = &smu_table->driver_table; 414 int ret; 415 416 if (bypass_cache || !smu_table->metrics_time || 417 time_after(jiffies, 418 smu_table->metrics_time + msecs_to_jiffies(1))) { 419 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMetricsTable, NULL); 420 if (ret) { 421 dev_info(smu->adev->dev, 422 "Failed to export SMU metrics table!\n"); 423 return ret; 424 } 425 426 amdgpu_asic_invalidate_hdp(smu->adev, NULL); 427 memcpy(smu_table->metrics_table, table->cpu_addr, table_size); 428 429 smu_table->metrics_time = jiffies; 430 } 431 432 if (metrics_table) 433 memcpy(metrics_table, smu_table->metrics_table, table_size); 434 435 return 0; 436 } 437 438 static ssize_t smu_v13_0_6_get_pm_metrics(struct smu_context *smu, 439 void *metrics, size_t max_size) 440 { 441 struct smu_table_context *smu_tbl_ctxt = &smu->smu_table; 442 uint32_t table_version = smu_tbl_ctxt->tables[SMU_TABLE_SMU_METRICS].version; 443 uint32_t table_size = smu_tbl_ctxt->tables[SMU_TABLE_SMU_METRICS].size; 444 struct amdgpu_pm_metrics *pm_metrics = metrics; 445 uint32_t pmfw_version; 446 int ret; 447 448 if (!pm_metrics || !max_size) 449 return -EINVAL; 450 451 if (max_size < (table_size + sizeof(pm_metrics->common_header))) 452 return -EOVERFLOW; 453 454 /* Don't use cached metrics data */ 455 ret = smu_v13_0_6_get_metrics_table(smu, pm_metrics->data, true); 456 if (ret) 457 return ret; 458 459 smu_cmn_get_smc_version(smu, NULL, &pmfw_version); 460 461 memset(&pm_metrics->common_header, 0, 462 sizeof(pm_metrics->common_header)); 463 pm_metrics->common_header.mp1_ip_discovery_version = 464 IP_VERSION(13, 0, 6); 465 pm_metrics->common_header.pmfw_version = pmfw_version; 466 pm_metrics->common_header.pmmetrics_version = table_version; 467 pm_metrics->common_header.structure_size = 468 sizeof(pm_metrics->common_header) + table_size; 469 470 return pm_metrics->common_header.structure_size; 471 } 472 473 static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) 474 { 475 struct smu_table_context *smu_table = &smu->smu_table; 476 MetricsTableX_t *metrics_x = (MetricsTableX_t *)smu_table->metrics_table; 477 MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table; 478 struct PPTable_t *pptable = 479 (struct PPTable_t *)smu_table->driver_pptable; 480 struct amdgpu_device *adev = smu->adev; 481 int ret, i, retry = 100; 482 uint32_t table_version; 483 484 /* Store one-time values in driver PPTable */ 485 if (!pptable->Init) { 486 while (--retry) { 487 ret = smu_v13_0_6_get_metrics_table(smu, NULL, true); 488 if (ret) 489 return ret; 490 491 /* Ensure that metrics have been updated */ 492 if (GET_METRIC_FIELD(AccumulationCounter)) 493 break; 494 495 usleep_range(1000, 1100); 496 } 497 498 if (!retry) 499 return -ETIME; 500 501 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMetricsVersion, 502 &table_version); 503 if (ret) 504 return ret; 505 smu_table->tables[SMU_TABLE_SMU_METRICS].version = 506 table_version; 507 508 pptable->MaxSocketPowerLimit = 509 SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketPowerLimit)); 510 pptable->MaxGfxclkFrequency = 511 SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency)); 512 pptable->MinGfxclkFrequency = 513 SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency)); 514 515 for (i = 0; i < 4; ++i) { 516 pptable->FclkFrequencyTable[i] = 517 SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequencyTable)[i]); 518 pptable->UclkFrequencyTable[i] = 519 SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequencyTable)[i]); 520 pptable->SocclkFrequencyTable[i] = SMUQ10_ROUND( 521 GET_METRIC_FIELD(SocclkFrequencyTable)[i]); 522 pptable->VclkFrequencyTable[i] = 523 SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequencyTable)[i]); 524 pptable->DclkFrequencyTable[i] = 525 SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequencyTable)[i]); 526 pptable->LclkFrequencyTable[i] = 527 SMUQ10_ROUND(GET_METRIC_FIELD(LclkFrequencyTable)[i]); 528 } 529 530 /* use AID0 serial number by default */ 531 pptable->PublicSerialNumber_AID = GET_METRIC_FIELD(PublicSerialNumber_AID)[0]; 532 533 pptable->Init = true; 534 } 535 536 return 0; 537 } 538 539 static int smu_v13_0_6_get_dpm_ultimate_freq(struct smu_context *smu, 540 enum smu_clk_type clk_type, 541 uint32_t *min, uint32_t *max) 542 { 543 struct smu_table_context *smu_table = &smu->smu_table; 544 struct PPTable_t *pptable = 545 (struct PPTable_t *)smu_table->driver_pptable; 546 uint32_t clock_limit = 0, param; 547 int ret = 0, clk_id = 0; 548 549 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) { 550 switch (clk_type) { 551 case SMU_MCLK: 552 case SMU_UCLK: 553 if (pptable->Init) 554 clock_limit = pptable->UclkFrequencyTable[0]; 555 break; 556 case SMU_GFXCLK: 557 case SMU_SCLK: 558 if (pptable->Init) 559 clock_limit = pptable->MinGfxclkFrequency; 560 break; 561 case SMU_SOCCLK: 562 if (pptable->Init) 563 clock_limit = pptable->SocclkFrequencyTable[0]; 564 break; 565 case SMU_FCLK: 566 if (pptable->Init) 567 clock_limit = pptable->FclkFrequencyTable[0]; 568 break; 569 case SMU_VCLK: 570 if (pptable->Init) 571 clock_limit = pptable->VclkFrequencyTable[0]; 572 break; 573 case SMU_DCLK: 574 if (pptable->Init) 575 clock_limit = pptable->DclkFrequencyTable[0]; 576 break; 577 default: 578 break; 579 } 580 581 if (min) 582 *min = clock_limit; 583 584 if (max) 585 *max = clock_limit; 586 587 return 0; 588 } 589 590 if (!(clk_type == SMU_GFXCLK || clk_type == SMU_SCLK)) { 591 clk_id = smu_cmn_to_asic_specific_index( 592 smu, CMN2ASIC_MAPPING_CLK, clk_type); 593 if (clk_id < 0) { 594 ret = -EINVAL; 595 goto failed; 596 } 597 param = (clk_id & 0xffff) << 16; 598 } 599 600 if (max) { 601 if (clk_type == SMU_GFXCLK || clk_type == SMU_SCLK) 602 ret = smu_cmn_send_smc_msg( 603 smu, SMU_MSG_GetMaxGfxclkFrequency, max); 604 else 605 ret = smu_cmn_send_smc_msg_with_param( 606 smu, SMU_MSG_GetMaxDpmFreq, param, max); 607 if (ret) 608 goto failed; 609 } 610 611 if (min) { 612 if (clk_type == SMU_GFXCLK || clk_type == SMU_SCLK) 613 ret = smu_cmn_send_smc_msg( 614 smu, SMU_MSG_GetMinGfxclkFrequency, min); 615 else 616 ret = smu_cmn_send_smc_msg_with_param( 617 smu, SMU_MSG_GetMinDpmFreq, param, min); 618 } 619 620 failed: 621 return ret; 622 } 623 624 static int smu_v13_0_6_get_dpm_level_count(struct smu_context *smu, 625 enum smu_clk_type clk_type, 626 uint32_t *levels) 627 { 628 int ret; 629 630 ret = smu_v13_0_get_dpm_freq_by_index(smu, clk_type, 0xff, levels); 631 if (!ret) 632 ++(*levels); 633 634 return ret; 635 } 636 637 static int smu_v13_0_6_set_default_dpm_table(struct smu_context *smu) 638 { 639 struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; 640 struct smu_table_context *smu_table = &smu->smu_table; 641 struct smu_13_0_dpm_table *dpm_table = NULL; 642 struct PPTable_t *pptable = 643 (struct PPTable_t *)smu_table->driver_pptable; 644 uint32_t gfxclkmin, gfxclkmax, levels; 645 int ret = 0, i, j; 646 struct smu_v13_0_6_dpm_map dpm_map[] = { 647 { SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT, 648 &dpm_context->dpm_tables.soc_table, 649 pptable->SocclkFrequencyTable }, 650 { SMU_UCLK, SMU_FEATURE_DPM_UCLK_BIT, 651 &dpm_context->dpm_tables.uclk_table, 652 pptable->UclkFrequencyTable }, 653 { SMU_FCLK, SMU_FEATURE_DPM_FCLK_BIT, 654 &dpm_context->dpm_tables.fclk_table, 655 pptable->FclkFrequencyTable }, 656 { SMU_VCLK, SMU_FEATURE_DPM_VCLK_BIT, 657 &dpm_context->dpm_tables.vclk_table, 658 pptable->VclkFrequencyTable }, 659 { SMU_DCLK, SMU_FEATURE_DPM_DCLK_BIT, 660 &dpm_context->dpm_tables.dclk_table, 661 pptable->DclkFrequencyTable }, 662 }; 663 664 smu_v13_0_6_setup_driver_pptable(smu); 665 666 /* gfxclk dpm table setup */ 667 dpm_table = &dpm_context->dpm_tables.gfx_table; 668 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) { 669 /* In the case of gfxclk, only fine-grained dpm is honored. 670 * Get min/max values from FW. 671 */ 672 ret = smu_v13_0_6_get_dpm_ultimate_freq(smu, SMU_GFXCLK, 673 &gfxclkmin, &gfxclkmax); 674 if (ret) 675 return ret; 676 677 dpm_table->count = 2; 678 dpm_table->dpm_levels[0].value = gfxclkmin; 679 dpm_table->dpm_levels[0].enabled = true; 680 dpm_table->dpm_levels[1].value = gfxclkmax; 681 dpm_table->dpm_levels[1].enabled = true; 682 dpm_table->min = dpm_table->dpm_levels[0].value; 683 dpm_table->max = dpm_table->dpm_levels[1].value; 684 } else { 685 dpm_table->count = 1; 686 dpm_table->dpm_levels[0].value = pptable->MinGfxclkFrequency; 687 dpm_table->dpm_levels[0].enabled = true; 688 dpm_table->min = dpm_table->dpm_levels[0].value; 689 dpm_table->max = dpm_table->dpm_levels[0].value; 690 } 691 692 for (j = 0; j < ARRAY_SIZE(dpm_map); j++) { 693 dpm_table = dpm_map[j].dpm_table; 694 levels = 1; 695 if (smu_cmn_feature_is_enabled(smu, dpm_map[j].feature_num)) { 696 ret = smu_v13_0_6_get_dpm_level_count( 697 smu, dpm_map[j].clk_type, &levels); 698 if (ret) 699 return ret; 700 } 701 dpm_table->count = levels; 702 for (i = 0; i < dpm_table->count; ++i) { 703 dpm_table->dpm_levels[i].value = 704 dpm_map[j].freq_table[i]; 705 dpm_table->dpm_levels[i].enabled = true; 706 707 } 708 dpm_table->min = dpm_table->dpm_levels[0].value; 709 dpm_table->max = dpm_table->dpm_levels[levels - 1].value; 710 711 } 712 713 return 0; 714 } 715 716 static int smu_v13_0_6_setup_pptable(struct smu_context *smu) 717 { 718 struct smu_table_context *table_context = &smu->smu_table; 719 720 /* TODO: PPTable is not available. 721 * 1) Find an alternate way to get 'PPTable values' here. 722 * 2) Check if there is SW CTF 723 */ 724 table_context->thermal_controller_type = 0; 725 726 return 0; 727 } 728 729 static int smu_v13_0_6_check_fw_status(struct smu_context *smu) 730 { 731 struct amdgpu_device *adev = smu->adev; 732 uint32_t mp1_fw_flags; 733 734 mp1_fw_flags = 735 RREG32_PCIE(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); 736 737 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> 738 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) 739 return 0; 740 741 return -EIO; 742 } 743 744 static int smu_v13_0_6_populate_umd_state_clk(struct smu_context *smu) 745 { 746 struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; 747 struct smu_13_0_dpm_table *gfx_table = 748 &dpm_context->dpm_tables.gfx_table; 749 struct smu_13_0_dpm_table *mem_table = 750 &dpm_context->dpm_tables.uclk_table; 751 struct smu_13_0_dpm_table *soc_table = 752 &dpm_context->dpm_tables.soc_table; 753 struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; 754 755 pstate_table->gfxclk_pstate.min = gfx_table->min; 756 pstate_table->gfxclk_pstate.peak = gfx_table->max; 757 pstate_table->gfxclk_pstate.curr.min = gfx_table->min; 758 pstate_table->gfxclk_pstate.curr.max = gfx_table->max; 759 760 pstate_table->uclk_pstate.min = mem_table->min; 761 pstate_table->uclk_pstate.peak = mem_table->max; 762 pstate_table->uclk_pstate.curr.min = mem_table->min; 763 pstate_table->uclk_pstate.curr.max = mem_table->max; 764 765 pstate_table->socclk_pstate.min = soc_table->min; 766 pstate_table->socclk_pstate.peak = soc_table->max; 767 pstate_table->socclk_pstate.curr.min = soc_table->min; 768 pstate_table->socclk_pstate.curr.max = soc_table->max; 769 770 if (gfx_table->count > SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL && 771 mem_table->count > SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL && 772 soc_table->count > SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL) { 773 pstate_table->gfxclk_pstate.standard = 774 gfx_table->dpm_levels[SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL].value; 775 pstate_table->uclk_pstate.standard = 776 mem_table->dpm_levels[SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL].value; 777 pstate_table->socclk_pstate.standard = 778 soc_table->dpm_levels[SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL].value; 779 } else { 780 pstate_table->gfxclk_pstate.standard = 781 pstate_table->gfxclk_pstate.min; 782 pstate_table->uclk_pstate.standard = 783 pstate_table->uclk_pstate.min; 784 pstate_table->socclk_pstate.standard = 785 pstate_table->socclk_pstate.min; 786 } 787 788 return 0; 789 } 790 791 static int smu_v13_0_6_get_clk_table(struct smu_context *smu, 792 struct pp_clock_levels_with_latency *clocks, 793 struct smu_13_0_dpm_table *dpm_table) 794 { 795 int i, count; 796 797 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : 798 dpm_table->count; 799 clocks->num_levels = count; 800 801 for (i = 0; i < count; i++) { 802 clocks->data[i].clocks_in_khz = 803 dpm_table->dpm_levels[i].value * 1000; 804 clocks->data[i].latency_in_us = 0; 805 } 806 807 return 0; 808 } 809 810 static int smu_v13_0_6_freqs_in_same_level(int32_t frequency1, 811 int32_t frequency2) 812 { 813 return (abs(frequency1 - frequency2) <= EPSILON); 814 } 815 816 static uint32_t smu_v13_0_6_get_throttler_status(struct smu_context *smu) 817 { 818 struct smu_power_context *smu_power = &smu->smu_power; 819 struct smu_13_0_power_context *power_context = smu_power->power_context; 820 uint32_t throttler_status = 0; 821 822 throttler_status = atomic_read(&power_context->throttle_status); 823 dev_dbg(smu->adev->dev, "SMU Throttler status: %u", throttler_status); 824 825 return throttler_status; 826 } 827 828 static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu, 829 MetricsMember_t member, 830 uint32_t *value) 831 { 832 struct smu_table_context *smu_table = &smu->smu_table; 833 MetricsTableX_t *metrics_x = (MetricsTableX_t *)smu_table->metrics_table; 834 MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table; 835 struct amdgpu_device *adev = smu->adev; 836 int ret = 0; 837 int xcc_id; 838 839 ret = smu_v13_0_6_get_metrics_table(smu, NULL, false); 840 if (ret) 841 return ret; 842 843 /* For clocks with multiple instances, only report the first one */ 844 switch (member) { 845 case METRICS_CURR_GFXCLK: 846 case METRICS_AVERAGE_GFXCLK: 847 if (smu->smc_fw_version >= 0x552F00) { 848 xcc_id = GET_INST(GC, 0); 849 *value = SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency)[xcc_id]); 850 } else { 851 *value = 0; 852 } 853 break; 854 case METRICS_CURR_SOCCLK: 855 case METRICS_AVERAGE_SOCCLK: 856 *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency)[0]); 857 break; 858 case METRICS_CURR_UCLK: 859 case METRICS_AVERAGE_UCLK: 860 *value = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency)); 861 break; 862 case METRICS_CURR_VCLK: 863 *value = SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency)[0]); 864 break; 865 case METRICS_CURR_DCLK: 866 *value = SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency)[0]); 867 break; 868 case METRICS_CURR_FCLK: 869 *value = SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequency)); 870 break; 871 case METRICS_AVERAGE_GFXACTIVITY: 872 *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy)); 873 break; 874 case METRICS_AVERAGE_MEMACTIVITY: 875 *value = SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization)); 876 break; 877 case METRICS_CURR_SOCKETPOWER: 878 *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower)) << 8; 879 break; 880 case METRICS_TEMPERATURE_HOTSPOT: 881 *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature)) * 882 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 883 break; 884 case METRICS_TEMPERATURE_MEM: 885 *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature)) * 886 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 887 break; 888 /* This is the max of all VRs and not just SOC VR. 889 * No need to define another data type for the same. 890 */ 891 case METRICS_TEMPERATURE_VRSOC: 892 *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature)) * 893 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 894 break; 895 default: 896 *value = UINT_MAX; 897 break; 898 } 899 900 return ret; 901 } 902 903 static int smu_v13_0_6_get_current_clk_freq_by_table(struct smu_context *smu, 904 enum smu_clk_type clk_type, 905 uint32_t *value) 906 { 907 MetricsMember_t member_type; 908 909 if (!value) 910 return -EINVAL; 911 912 switch (clk_type) { 913 case SMU_GFXCLK: 914 member_type = METRICS_CURR_GFXCLK; 915 break; 916 case SMU_UCLK: 917 member_type = METRICS_CURR_UCLK; 918 break; 919 case SMU_SOCCLK: 920 member_type = METRICS_CURR_SOCCLK; 921 break; 922 case SMU_VCLK: 923 member_type = METRICS_CURR_VCLK; 924 break; 925 case SMU_DCLK: 926 member_type = METRICS_CURR_DCLK; 927 break; 928 case SMU_FCLK: 929 member_type = METRICS_CURR_FCLK; 930 break; 931 default: 932 return -EINVAL; 933 } 934 935 return smu_v13_0_6_get_smu_metrics_data(smu, member_type, value); 936 } 937 938 static int smu_v13_0_6_print_clks(struct smu_context *smu, char *buf, int size, 939 struct smu_13_0_dpm_table *single_dpm_table, 940 uint32_t curr_clk, const char *clk_name) 941 { 942 struct pp_clock_levels_with_latency clocks; 943 int i, ret, level = -1; 944 uint32_t clk1, clk2; 945 946 ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table); 947 if (ret) { 948 dev_err(smu->adev->dev, "Attempt to get %s clk levels failed!", 949 clk_name); 950 return ret; 951 } 952 953 if (!clocks.num_levels) 954 return -EINVAL; 955 956 if (curr_clk < SMU_13_0_6_DSCLK_THRESHOLD) { 957 size = sysfs_emit_at(buf, size, "S: %uMhz *\n", curr_clk); 958 for (i = 0; i < clocks.num_levels; i++) 959 size += sysfs_emit_at(buf, size, "%d: %uMhz\n", i, 960 clocks.data[i].clocks_in_khz / 961 1000); 962 963 } else { 964 if ((clocks.num_levels == 1) || 965 (curr_clk < (clocks.data[0].clocks_in_khz / 1000))) 966 level = 0; 967 for (i = 0; i < clocks.num_levels; i++) { 968 clk1 = clocks.data[i].clocks_in_khz / 1000; 969 970 if (i < (clocks.num_levels - 1)) 971 clk2 = clocks.data[i + 1].clocks_in_khz / 1000; 972 973 if (curr_clk == clk1) { 974 level = i; 975 } else if (curr_clk >= clk1 && curr_clk < clk2) { 976 level = (curr_clk - clk1) <= (clk2 - curr_clk) ? 977 i : 978 i + 1; 979 } 980 981 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, 982 clk1, (level == i) ? "*" : ""); 983 } 984 } 985 986 return size; 987 } 988 989 static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, 990 enum smu_clk_type type, char *buf) 991 { 992 int now, size = 0; 993 int ret = 0; 994 struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; 995 struct smu_13_0_dpm_table *single_dpm_table; 996 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 997 struct smu_13_0_dpm_context *dpm_context = NULL; 998 uint32_t min_clk, max_clk; 999 1000 smu_cmn_get_sysfs_buf(&buf, &size); 1001 1002 if (amdgpu_ras_intr_triggered()) { 1003 size += sysfs_emit_at(buf, size, "unavailable\n"); 1004 return size; 1005 } 1006 1007 dpm_context = smu_dpm->dpm_context; 1008 1009 switch (type) { 1010 case SMU_OD_SCLK: 1011 size += sysfs_emit_at(buf, size, "%s:\n", "GFXCLK"); 1012 fallthrough; 1013 case SMU_SCLK: 1014 ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_GFXCLK, 1015 &now); 1016 if (ret) { 1017 dev_err(smu->adev->dev, 1018 "Attempt to get current gfx clk Failed!"); 1019 return ret; 1020 } 1021 1022 min_clk = pstate_table->gfxclk_pstate.curr.min; 1023 max_clk = pstate_table->gfxclk_pstate.curr.max; 1024 1025 if (now < SMU_13_0_6_DSCLK_THRESHOLD) { 1026 size += sysfs_emit_at(buf, size, "S: %uMhz *\n", 1027 now); 1028 size += sysfs_emit_at(buf, size, "0: %uMhz\n", 1029 min_clk); 1030 size += sysfs_emit_at(buf, size, "1: %uMhz\n", 1031 max_clk); 1032 1033 } else if (!smu_v13_0_6_freqs_in_same_level(now, min_clk) && 1034 !smu_v13_0_6_freqs_in_same_level(now, max_clk)) { 1035 size += sysfs_emit_at(buf, size, "0: %uMhz\n", 1036 min_clk); 1037 size += sysfs_emit_at(buf, size, "1: %uMhz *\n", 1038 now); 1039 size += sysfs_emit_at(buf, size, "2: %uMhz\n", 1040 max_clk); 1041 } else { 1042 size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", 1043 min_clk, 1044 smu_v13_0_6_freqs_in_same_level(now, min_clk) ? "*" : ""); 1045 size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", 1046 max_clk, 1047 smu_v13_0_6_freqs_in_same_level(now, max_clk) ? "*" : ""); 1048 } 1049 1050 break; 1051 1052 case SMU_OD_MCLK: 1053 size += sysfs_emit_at(buf, size, "%s:\n", "MCLK"); 1054 fallthrough; 1055 case SMU_MCLK: 1056 ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_UCLK, 1057 &now); 1058 if (ret) { 1059 dev_err(smu->adev->dev, 1060 "Attempt to get current mclk Failed!"); 1061 return ret; 1062 } 1063 1064 single_dpm_table = &(dpm_context->dpm_tables.uclk_table); 1065 1066 return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table, 1067 now, "mclk"); 1068 1069 case SMU_SOCCLK: 1070 ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_SOCCLK, 1071 &now); 1072 if (ret) { 1073 dev_err(smu->adev->dev, 1074 "Attempt to get current socclk Failed!"); 1075 return ret; 1076 } 1077 1078 single_dpm_table = &(dpm_context->dpm_tables.soc_table); 1079 1080 return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table, 1081 now, "socclk"); 1082 1083 case SMU_FCLK: 1084 ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_FCLK, 1085 &now); 1086 if (ret) { 1087 dev_err(smu->adev->dev, 1088 "Attempt to get current fclk Failed!"); 1089 return ret; 1090 } 1091 1092 single_dpm_table = &(dpm_context->dpm_tables.fclk_table); 1093 1094 return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table, 1095 now, "fclk"); 1096 1097 case SMU_VCLK: 1098 ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_VCLK, 1099 &now); 1100 if (ret) { 1101 dev_err(smu->adev->dev, 1102 "Attempt to get current vclk Failed!"); 1103 return ret; 1104 } 1105 1106 single_dpm_table = &(dpm_context->dpm_tables.vclk_table); 1107 1108 return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table, 1109 now, "vclk"); 1110 1111 case SMU_DCLK: 1112 ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_DCLK, 1113 &now); 1114 if (ret) { 1115 dev_err(smu->adev->dev, 1116 "Attempt to get current dclk Failed!"); 1117 return ret; 1118 } 1119 1120 single_dpm_table = &(dpm_context->dpm_tables.dclk_table); 1121 1122 return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table, 1123 now, "dclk"); 1124 1125 default: 1126 break; 1127 } 1128 1129 return size; 1130 } 1131 1132 static int smu_v13_0_6_upload_dpm_level(struct smu_context *smu, bool max, 1133 uint32_t feature_mask, uint32_t level) 1134 { 1135 struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; 1136 uint32_t freq; 1137 int ret = 0; 1138 1139 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) && 1140 (feature_mask & FEATURE_MASK(FEATURE_DPM_GFXCLK))) { 1141 freq = dpm_context->dpm_tables.gfx_table.dpm_levels[level].value; 1142 ret = smu_cmn_send_smc_msg_with_param( 1143 smu, 1144 (max ? SMU_MSG_SetSoftMaxGfxClk : 1145 SMU_MSG_SetSoftMinGfxclk), 1146 freq & 0xffff, NULL); 1147 if (ret) { 1148 dev_err(smu->adev->dev, 1149 "Failed to set soft %s gfxclk !\n", 1150 max ? "max" : "min"); 1151 return ret; 1152 } 1153 } 1154 1155 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) && 1156 (feature_mask & FEATURE_MASK(FEATURE_DPM_UCLK))) { 1157 freq = dpm_context->dpm_tables.uclk_table.dpm_levels[level] 1158 .value; 1159 ret = smu_cmn_send_smc_msg_with_param( 1160 smu, 1161 (max ? SMU_MSG_SetSoftMaxByFreq : 1162 SMU_MSG_SetSoftMinByFreq), 1163 (PPCLK_UCLK << 16) | (freq & 0xffff), NULL); 1164 if (ret) { 1165 dev_err(smu->adev->dev, 1166 "Failed to set soft %s memclk !\n", 1167 max ? "max" : "min"); 1168 return ret; 1169 } 1170 } 1171 1172 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT) && 1173 (feature_mask & FEATURE_MASK(FEATURE_DPM_SOCCLK))) { 1174 freq = dpm_context->dpm_tables.soc_table.dpm_levels[level].value; 1175 ret = smu_cmn_send_smc_msg_with_param( 1176 smu, 1177 (max ? SMU_MSG_SetSoftMaxByFreq : 1178 SMU_MSG_SetSoftMinByFreq), 1179 (PPCLK_SOCCLK << 16) | (freq & 0xffff), NULL); 1180 if (ret) { 1181 dev_err(smu->adev->dev, 1182 "Failed to set soft %s socclk !\n", 1183 max ? "max" : "min"); 1184 return ret; 1185 } 1186 } 1187 1188 return ret; 1189 } 1190 1191 static int smu_v13_0_6_force_clk_levels(struct smu_context *smu, 1192 enum smu_clk_type type, uint32_t mask) 1193 { 1194 struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; 1195 struct smu_13_0_dpm_table *single_dpm_table = NULL; 1196 uint32_t soft_min_level, soft_max_level; 1197 int ret = 0; 1198 1199 soft_min_level = mask ? (ffs(mask) - 1) : 0; 1200 soft_max_level = mask ? (fls(mask) - 1) : 0; 1201 1202 switch (type) { 1203 case SMU_SCLK: 1204 single_dpm_table = &(dpm_context->dpm_tables.gfx_table); 1205 if (soft_max_level >= single_dpm_table->count) { 1206 dev_err(smu->adev->dev, 1207 "Clock level specified %d is over max allowed %d\n", 1208 soft_max_level, single_dpm_table->count - 1); 1209 ret = -EINVAL; 1210 break; 1211 } 1212 1213 ret = smu_v13_0_6_upload_dpm_level( 1214 smu, false, FEATURE_MASK(FEATURE_DPM_GFXCLK), 1215 soft_min_level); 1216 if (ret) { 1217 dev_err(smu->adev->dev, 1218 "Failed to upload boot level to lowest!\n"); 1219 break; 1220 } 1221 1222 ret = smu_v13_0_6_upload_dpm_level( 1223 smu, true, FEATURE_MASK(FEATURE_DPM_GFXCLK), 1224 soft_max_level); 1225 if (ret) 1226 dev_err(smu->adev->dev, 1227 "Failed to upload dpm max level to highest!\n"); 1228 1229 break; 1230 1231 case SMU_MCLK: 1232 case SMU_SOCCLK: 1233 case SMU_FCLK: 1234 /* 1235 * Should not arrive here since smu_13_0_6 does not 1236 * support mclk/socclk/fclk softmin/softmax settings 1237 */ 1238 ret = -EINVAL; 1239 break; 1240 1241 default: 1242 break; 1243 } 1244 1245 return ret; 1246 } 1247 1248 static int smu_v13_0_6_get_current_activity_percent(struct smu_context *smu, 1249 enum amd_pp_sensors sensor, 1250 uint32_t *value) 1251 { 1252 int ret = 0; 1253 1254 if (!value) 1255 return -EINVAL; 1256 1257 switch (sensor) { 1258 case AMDGPU_PP_SENSOR_GPU_LOAD: 1259 ret = smu_v13_0_6_get_smu_metrics_data( 1260 smu, METRICS_AVERAGE_GFXACTIVITY, value); 1261 break; 1262 case AMDGPU_PP_SENSOR_MEM_LOAD: 1263 ret = smu_v13_0_6_get_smu_metrics_data( 1264 smu, METRICS_AVERAGE_MEMACTIVITY, value); 1265 break; 1266 default: 1267 dev_err(smu->adev->dev, 1268 "Invalid sensor for retrieving clock activity\n"); 1269 return -EINVAL; 1270 } 1271 1272 return ret; 1273 } 1274 1275 static int smu_v13_0_6_thermal_get_temperature(struct smu_context *smu, 1276 enum amd_pp_sensors sensor, 1277 uint32_t *value) 1278 { 1279 int ret = 0; 1280 1281 if (!value) 1282 return -EINVAL; 1283 1284 switch (sensor) { 1285 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: 1286 ret = smu_v13_0_6_get_smu_metrics_data( 1287 smu, METRICS_TEMPERATURE_HOTSPOT, value); 1288 break; 1289 case AMDGPU_PP_SENSOR_MEM_TEMP: 1290 ret = smu_v13_0_6_get_smu_metrics_data( 1291 smu, METRICS_TEMPERATURE_MEM, value); 1292 break; 1293 default: 1294 dev_err(smu->adev->dev, "Invalid sensor for retrieving temp\n"); 1295 return -EINVAL; 1296 } 1297 1298 return ret; 1299 } 1300 1301 static int smu_v13_0_6_read_sensor(struct smu_context *smu, 1302 enum amd_pp_sensors sensor, void *data, 1303 uint32_t *size) 1304 { 1305 int ret = 0; 1306 1307 if (amdgpu_ras_intr_triggered()) 1308 return 0; 1309 1310 if (!data || !size) 1311 return -EINVAL; 1312 1313 switch (sensor) { 1314 case AMDGPU_PP_SENSOR_MEM_LOAD: 1315 case AMDGPU_PP_SENSOR_GPU_LOAD: 1316 ret = smu_v13_0_6_get_current_activity_percent(smu, sensor, 1317 (uint32_t *)data); 1318 *size = 4; 1319 break; 1320 case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: 1321 ret = smu_v13_0_6_get_smu_metrics_data(smu, 1322 METRICS_CURR_SOCKETPOWER, 1323 (uint32_t *)data); 1324 *size = 4; 1325 break; 1326 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: 1327 case AMDGPU_PP_SENSOR_MEM_TEMP: 1328 ret = smu_v13_0_6_thermal_get_temperature(smu, sensor, 1329 (uint32_t *)data); 1330 *size = 4; 1331 break; 1332 case AMDGPU_PP_SENSOR_GFX_MCLK: 1333 ret = smu_v13_0_6_get_current_clk_freq_by_table( 1334 smu, SMU_UCLK, (uint32_t *)data); 1335 /* the output clock frequency in 10K unit */ 1336 *(uint32_t *)data *= 100; 1337 *size = 4; 1338 break; 1339 case AMDGPU_PP_SENSOR_GFX_SCLK: 1340 ret = smu_v13_0_6_get_current_clk_freq_by_table( 1341 smu, SMU_GFXCLK, (uint32_t *)data); 1342 *(uint32_t *)data *= 100; 1343 *size = 4; 1344 break; 1345 case AMDGPU_PP_SENSOR_VDDGFX: 1346 ret = smu_v13_0_get_gfx_vdd(smu, (uint32_t *)data); 1347 *size = 4; 1348 break; 1349 case AMDGPU_PP_SENSOR_GPU_AVG_POWER: 1350 default: 1351 ret = -EOPNOTSUPP; 1352 break; 1353 } 1354 1355 return ret; 1356 } 1357 1358 static int smu_v13_0_6_get_power_limit(struct smu_context *smu, 1359 uint32_t *current_power_limit, 1360 uint32_t *default_power_limit, 1361 uint32_t *max_power_limit, 1362 uint32_t *min_power_limit) 1363 { 1364 struct smu_table_context *smu_table = &smu->smu_table; 1365 struct PPTable_t *pptable = 1366 (struct PPTable_t *)smu_table->driver_pptable; 1367 uint32_t power_limit = 0; 1368 int ret; 1369 1370 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetPptLimit, &power_limit); 1371 1372 if (ret) { 1373 dev_err(smu->adev->dev, "Couldn't get PPT limit"); 1374 return -EINVAL; 1375 } 1376 1377 if (current_power_limit) 1378 *current_power_limit = power_limit; 1379 if (default_power_limit) 1380 *default_power_limit = power_limit; 1381 1382 if (max_power_limit) { 1383 *max_power_limit = pptable->MaxSocketPowerLimit; 1384 } 1385 1386 if (min_power_limit) 1387 *min_power_limit = 0; 1388 return 0; 1389 } 1390 1391 static int smu_v13_0_6_set_power_limit(struct smu_context *smu, 1392 enum smu_ppt_limit_type limit_type, 1393 uint32_t limit) 1394 { 1395 return smu_v13_0_set_power_limit(smu, limit_type, limit); 1396 } 1397 1398 static int smu_v13_0_6_irq_process(struct amdgpu_device *adev, 1399 struct amdgpu_irq_src *source, 1400 struct amdgpu_iv_entry *entry) 1401 { 1402 struct smu_context *smu = adev->powerplay.pp_handle; 1403 struct smu_power_context *smu_power = &smu->smu_power; 1404 struct smu_13_0_power_context *power_context = smu_power->power_context; 1405 uint32_t client_id = entry->client_id; 1406 uint32_t ctxid = entry->src_data[0]; 1407 uint32_t src_id = entry->src_id; 1408 uint32_t data; 1409 1410 if (client_id == SOC15_IH_CLIENTID_MP1) { 1411 if (src_id == IH_INTERRUPT_ID_TO_DRIVER) { 1412 /* ACK SMUToHost interrupt */ 1413 data = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); 1414 data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1); 1415 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, data); 1416 /* 1417 * ctxid is used to distinguish different events for SMCToHost 1418 * interrupt. 1419 */ 1420 switch (ctxid) { 1421 case IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING: 1422 /* 1423 * Increment the throttle interrupt counter 1424 */ 1425 atomic64_inc(&smu->throttle_int_counter); 1426 1427 if (!atomic_read(&adev->throttling_logging_enabled)) 1428 return 0; 1429 1430 /* This uses the new method which fixes the 1431 * incorrect throttling status reporting 1432 * through metrics table. For older FWs, 1433 * it will be ignored. 1434 */ 1435 if (__ratelimit(&adev->throttling_logging_rs)) { 1436 atomic_set( 1437 &power_context->throttle_status, 1438 entry->src_data[1]); 1439 schedule_work(&smu->throttling_logging_work); 1440 } 1441 1442 break; 1443 } 1444 } 1445 } 1446 1447 return 0; 1448 } 1449 1450 static int smu_v13_0_6_set_irq_state(struct amdgpu_device *adev, 1451 struct amdgpu_irq_src *source, 1452 unsigned tyep, 1453 enum amdgpu_interrupt_state state) 1454 { 1455 uint32_t val = 0; 1456 1457 switch (state) { 1458 case AMDGPU_IRQ_STATE_DISABLE: 1459 /* For MP1 SW irqs */ 1460 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); 1461 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1); 1462 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); 1463 1464 break; 1465 case AMDGPU_IRQ_STATE_ENABLE: 1466 /* For MP1 SW irqs */ 1467 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT); 1468 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE); 1469 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0); 1470 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val); 1471 1472 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); 1473 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0); 1474 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); 1475 1476 break; 1477 default: 1478 break; 1479 } 1480 1481 return 0; 1482 } 1483 1484 static const struct amdgpu_irq_src_funcs smu_v13_0_6_irq_funcs = { 1485 .set = smu_v13_0_6_set_irq_state, 1486 .process = smu_v13_0_6_irq_process, 1487 }; 1488 1489 static int smu_v13_0_6_register_irq_handler(struct smu_context *smu) 1490 { 1491 struct amdgpu_device *adev = smu->adev; 1492 struct amdgpu_irq_src *irq_src = &smu->irq_source; 1493 int ret = 0; 1494 1495 if (amdgpu_sriov_vf(adev)) 1496 return 0; 1497 1498 irq_src->num_types = 1; 1499 irq_src->funcs = &smu_v13_0_6_irq_funcs; 1500 1501 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1, 1502 IH_INTERRUPT_ID_TO_DRIVER, 1503 irq_src); 1504 if (ret) 1505 return ret; 1506 1507 return ret; 1508 } 1509 1510 static int smu_v13_0_6_notify_unload(struct smu_context *smu) 1511 { 1512 if (amdgpu_in_reset(smu->adev)) 1513 return 0; 1514 1515 dev_dbg(smu->adev->dev, "Notify PMFW about driver unload"); 1516 /* Ignore return, just intimate FW that driver is not going to be there */ 1517 smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL); 1518 1519 return 0; 1520 } 1521 1522 static int smu_v13_0_6_mca_set_debug_mode(struct smu_context *smu, bool enable) 1523 { 1524 /* NOTE: this ClearMcaOnRead message is only supported for smu version 85.72.0 or higher */ 1525 if (smu->smc_fw_version < 0x554800) 1526 return 0; 1527 1528 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ClearMcaOnRead, 1529 enable ? 0 : ClearMcaOnRead_UE_FLAG_MASK | ClearMcaOnRead_CE_POLL_MASK, 1530 NULL); 1531 } 1532 1533 static int smu_v13_0_6_system_features_control(struct smu_context *smu, 1534 bool enable) 1535 { 1536 struct amdgpu_device *adev = smu->adev; 1537 int ret = 0; 1538 1539 if (amdgpu_sriov_vf(adev)) 1540 return 0; 1541 1542 if (enable) { 1543 if (!(adev->flags & AMD_IS_APU)) 1544 ret = smu_v13_0_system_features_control(smu, enable); 1545 } else { 1546 /* Notify FW that the device is no longer driver managed */ 1547 smu_v13_0_6_notify_unload(smu); 1548 } 1549 1550 return ret; 1551 } 1552 1553 static int smu_v13_0_6_set_gfx_soft_freq_limited_range(struct smu_context *smu, 1554 uint32_t min, 1555 uint32_t max) 1556 { 1557 int ret; 1558 1559 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, 1560 max & 0xffff, NULL); 1561 if (ret) 1562 return ret; 1563 1564 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinGfxclk, 1565 min & 0xffff, NULL); 1566 1567 return ret; 1568 } 1569 1570 static int smu_v13_0_6_set_performance_level(struct smu_context *smu, 1571 enum amd_dpm_forced_level level) 1572 { 1573 struct smu_dpm_context *smu_dpm = &(smu->smu_dpm); 1574 struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; 1575 struct smu_13_0_dpm_table *gfx_table = 1576 &dpm_context->dpm_tables.gfx_table; 1577 struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; 1578 int ret; 1579 1580 /* Disable determinism if switching to another mode */ 1581 if ((smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) && 1582 (level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)) { 1583 smu_cmn_send_smc_msg(smu, SMU_MSG_DisableDeterminism, NULL); 1584 pstate_table->gfxclk_pstate.curr.max = gfx_table->max; 1585 } 1586 1587 switch (level) { 1588 case AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM: 1589 return 0; 1590 1591 case AMD_DPM_FORCED_LEVEL_AUTO: 1592 if ((gfx_table->min == pstate_table->gfxclk_pstate.curr.min) && 1593 (gfx_table->max == pstate_table->gfxclk_pstate.curr.max)) 1594 return 0; 1595 1596 ret = smu_v13_0_6_set_gfx_soft_freq_limited_range( 1597 smu, gfx_table->min, gfx_table->max); 1598 if (ret) 1599 return ret; 1600 1601 pstate_table->gfxclk_pstate.curr.min = gfx_table->min; 1602 pstate_table->gfxclk_pstate.curr.max = gfx_table->max; 1603 return 0; 1604 case AMD_DPM_FORCED_LEVEL_MANUAL: 1605 return 0; 1606 default: 1607 break; 1608 } 1609 1610 return -EINVAL; 1611 } 1612 1613 static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu, 1614 enum smu_clk_type clk_type, 1615 uint32_t min, uint32_t max) 1616 { 1617 struct smu_dpm_context *smu_dpm = &(smu->smu_dpm); 1618 struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; 1619 struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; 1620 struct amdgpu_device *adev = smu->adev; 1621 uint32_t min_clk; 1622 uint32_t max_clk; 1623 int ret = 0; 1624 1625 if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) 1626 return -EINVAL; 1627 1628 if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) && 1629 (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)) 1630 return -EINVAL; 1631 1632 if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 1633 if (min >= max) { 1634 dev_err(smu->adev->dev, 1635 "Minimum GFX clk should be less than the maximum allowed clock\n"); 1636 return -EINVAL; 1637 } 1638 1639 if ((min == pstate_table->gfxclk_pstate.curr.min) && 1640 (max == pstate_table->gfxclk_pstate.curr.max)) 1641 return 0; 1642 1643 ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(smu, min, max); 1644 if (!ret) { 1645 pstate_table->gfxclk_pstate.curr.min = min; 1646 pstate_table->gfxclk_pstate.curr.max = max; 1647 } 1648 1649 return ret; 1650 } 1651 1652 if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { 1653 if (!max || (max < dpm_context->dpm_tables.gfx_table.min) || 1654 (max > dpm_context->dpm_tables.gfx_table.max)) { 1655 dev_warn( 1656 adev->dev, 1657 "Invalid max frequency %d MHz specified for determinism\n", 1658 max); 1659 return -EINVAL; 1660 } 1661 1662 /* Restore default min/max clocks and enable determinism */ 1663 min_clk = dpm_context->dpm_tables.gfx_table.min; 1664 max_clk = dpm_context->dpm_tables.gfx_table.max; 1665 ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(smu, min_clk, 1666 max_clk); 1667 if (!ret) { 1668 usleep_range(500, 1000); 1669 ret = smu_cmn_send_smc_msg_with_param( 1670 smu, SMU_MSG_EnableDeterminism, max, NULL); 1671 if (ret) { 1672 dev_err(adev->dev, 1673 "Failed to enable determinism at GFX clock %d MHz\n", 1674 max); 1675 } else { 1676 pstate_table->gfxclk_pstate.curr.min = min_clk; 1677 pstate_table->gfxclk_pstate.curr.max = max; 1678 } 1679 } 1680 } 1681 1682 return ret; 1683 } 1684 1685 static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu, 1686 enum PP_OD_DPM_TABLE_COMMAND type, 1687 long input[], uint32_t size) 1688 { 1689 struct smu_dpm_context *smu_dpm = &(smu->smu_dpm); 1690 struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; 1691 struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; 1692 uint32_t min_clk; 1693 uint32_t max_clk; 1694 int ret = 0; 1695 1696 /* Only allowed in manual or determinism mode */ 1697 if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) && 1698 (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)) 1699 return -EINVAL; 1700 1701 switch (type) { 1702 case PP_OD_EDIT_SCLK_VDDC_TABLE: 1703 if (size != 2) { 1704 dev_err(smu->adev->dev, 1705 "Input parameter number not correct\n"); 1706 return -EINVAL; 1707 } 1708 1709 if (input[0] == 0) { 1710 if (input[1] < dpm_context->dpm_tables.gfx_table.min) { 1711 dev_warn( 1712 smu->adev->dev, 1713 "Minimum GFX clk (%ld) MHz specified is less than the minimum allowed (%d) MHz\n", 1714 input[1], 1715 dpm_context->dpm_tables.gfx_table.min); 1716 pstate_table->gfxclk_pstate.custom.min = 1717 pstate_table->gfxclk_pstate.curr.min; 1718 return -EINVAL; 1719 } 1720 1721 pstate_table->gfxclk_pstate.custom.min = input[1]; 1722 } else if (input[0] == 1) { 1723 if (input[1] > dpm_context->dpm_tables.gfx_table.max) { 1724 dev_warn( 1725 smu->adev->dev, 1726 "Maximum GFX clk (%ld) MHz specified is greater than the maximum allowed (%d) MHz\n", 1727 input[1], 1728 dpm_context->dpm_tables.gfx_table.max); 1729 pstate_table->gfxclk_pstate.custom.max = 1730 pstate_table->gfxclk_pstate.curr.max; 1731 return -EINVAL; 1732 } 1733 1734 pstate_table->gfxclk_pstate.custom.max = input[1]; 1735 } else { 1736 return -EINVAL; 1737 } 1738 break; 1739 case PP_OD_RESTORE_DEFAULT_TABLE: 1740 if (size != 0) { 1741 dev_err(smu->adev->dev, 1742 "Input parameter number not correct\n"); 1743 return -EINVAL; 1744 } else { 1745 /* Use the default frequencies for manual and determinism mode */ 1746 min_clk = dpm_context->dpm_tables.gfx_table.min; 1747 max_clk = dpm_context->dpm_tables.gfx_table.max; 1748 1749 return smu_v13_0_6_set_soft_freq_limited_range( 1750 smu, SMU_GFXCLK, min_clk, max_clk); 1751 } 1752 break; 1753 case PP_OD_COMMIT_DPM_TABLE: 1754 if (size != 0) { 1755 dev_err(smu->adev->dev, 1756 "Input parameter number not correct\n"); 1757 return -EINVAL; 1758 } else { 1759 if (!pstate_table->gfxclk_pstate.custom.min) 1760 pstate_table->gfxclk_pstate.custom.min = 1761 pstate_table->gfxclk_pstate.curr.min; 1762 1763 if (!pstate_table->gfxclk_pstate.custom.max) 1764 pstate_table->gfxclk_pstate.custom.max = 1765 pstate_table->gfxclk_pstate.curr.max; 1766 1767 min_clk = pstate_table->gfxclk_pstate.custom.min; 1768 max_clk = pstate_table->gfxclk_pstate.custom.max; 1769 1770 return smu_v13_0_6_set_soft_freq_limited_range( 1771 smu, SMU_GFXCLK, min_clk, max_clk); 1772 } 1773 break; 1774 default: 1775 return -ENOSYS; 1776 } 1777 1778 return ret; 1779 } 1780 1781 static int smu_v13_0_6_get_enabled_mask(struct smu_context *smu, 1782 uint64_t *feature_mask) 1783 { 1784 int ret; 1785 1786 ret = smu_cmn_get_enabled_mask(smu, feature_mask); 1787 1788 if (ret == -EIO && smu->smc_fw_version < 0x552F00) { 1789 *feature_mask = 0; 1790 ret = 0; 1791 } 1792 1793 return ret; 1794 } 1795 1796 static bool smu_v13_0_6_is_dpm_running(struct smu_context *smu) 1797 { 1798 int ret; 1799 uint64_t feature_enabled; 1800 1801 ret = smu_v13_0_6_get_enabled_mask(smu, &feature_enabled); 1802 1803 if (ret) 1804 return false; 1805 1806 return !!(feature_enabled & SMC_DPM_FEATURE); 1807 } 1808 1809 static int smu_v13_0_6_request_i2c_xfer(struct smu_context *smu, 1810 void *table_data) 1811 { 1812 struct smu_table_context *smu_table = &smu->smu_table; 1813 struct smu_table *table = &smu_table->driver_table; 1814 struct amdgpu_device *adev = smu->adev; 1815 uint32_t table_size; 1816 int ret = 0; 1817 1818 if (!table_data) 1819 return -EINVAL; 1820 1821 table_size = smu_table->tables[SMU_TABLE_I2C_COMMANDS].size; 1822 1823 memcpy(table->cpu_addr, table_data, table_size); 1824 /* Flush hdp cache */ 1825 amdgpu_asic_flush_hdp(adev, NULL); 1826 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RequestI2cTransaction, 1827 NULL); 1828 1829 return ret; 1830 } 1831 1832 static int smu_v13_0_6_i2c_xfer(struct i2c_adapter *i2c_adap, 1833 struct i2c_msg *msg, int num_msgs) 1834 { 1835 struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap); 1836 struct amdgpu_device *adev = smu_i2c->adev; 1837 struct smu_context *smu = adev->powerplay.pp_handle; 1838 struct smu_table_context *smu_table = &smu->smu_table; 1839 struct smu_table *table = &smu_table->driver_table; 1840 SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr; 1841 int i, j, r, c; 1842 u16 dir; 1843 1844 if (!adev->pm.dpm_enabled) 1845 return -EBUSY; 1846 1847 req = kzalloc(sizeof(*req), GFP_KERNEL); 1848 if (!req) 1849 return -ENOMEM; 1850 1851 req->I2CcontrollerPort = smu_i2c->port; 1852 req->I2CSpeed = I2C_SPEED_FAST_400K; 1853 req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */ 1854 dir = msg[0].flags & I2C_M_RD; 1855 1856 for (c = i = 0; i < num_msgs; i++) { 1857 for (j = 0; j < msg[i].len; j++, c++) { 1858 SwI2cCmd_t *cmd = &req->SwI2cCmds[c]; 1859 1860 if (!(msg[i].flags & I2C_M_RD)) { 1861 /* write */ 1862 cmd->CmdConfig |= CMDCONFIG_READWRITE_MASK; 1863 cmd->ReadWriteData = msg[i].buf[j]; 1864 } 1865 1866 if ((dir ^ msg[i].flags) & I2C_M_RD) { 1867 /* The direction changes. 1868 */ 1869 dir = msg[i].flags & I2C_M_RD; 1870 cmd->CmdConfig |= CMDCONFIG_RESTART_MASK; 1871 } 1872 1873 req->NumCmds++; 1874 1875 /* 1876 * Insert STOP if we are at the last byte of either last 1877 * message for the transaction or the client explicitly 1878 * requires a STOP at this particular message. 1879 */ 1880 if ((j == msg[i].len - 1) && 1881 ((i == num_msgs - 1) || (msg[i].flags & I2C_M_STOP))) { 1882 cmd->CmdConfig &= ~CMDCONFIG_RESTART_MASK; 1883 cmd->CmdConfig |= CMDCONFIG_STOP_MASK; 1884 } 1885 } 1886 } 1887 mutex_lock(&adev->pm.mutex); 1888 r = smu_v13_0_6_request_i2c_xfer(smu, req); 1889 if (r) 1890 goto fail; 1891 1892 for (c = i = 0; i < num_msgs; i++) { 1893 if (!(msg[i].flags & I2C_M_RD)) { 1894 c += msg[i].len; 1895 continue; 1896 } 1897 for (j = 0; j < msg[i].len; j++, c++) { 1898 SwI2cCmd_t *cmd = &res->SwI2cCmds[c]; 1899 1900 msg[i].buf[j] = cmd->ReadWriteData; 1901 } 1902 } 1903 r = num_msgs; 1904 fail: 1905 mutex_unlock(&adev->pm.mutex); 1906 kfree(req); 1907 return r; 1908 } 1909 1910 static u32 smu_v13_0_6_i2c_func(struct i2c_adapter *adap) 1911 { 1912 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 1913 } 1914 1915 static const struct i2c_algorithm smu_v13_0_6_i2c_algo = { 1916 .master_xfer = smu_v13_0_6_i2c_xfer, 1917 .functionality = smu_v13_0_6_i2c_func, 1918 }; 1919 1920 static const struct i2c_adapter_quirks smu_v13_0_6_i2c_control_quirks = { 1921 .flags = I2C_AQ_COMB | I2C_AQ_COMB_SAME_ADDR | I2C_AQ_NO_ZERO_LEN, 1922 .max_read_len = MAX_SW_I2C_COMMANDS, 1923 .max_write_len = MAX_SW_I2C_COMMANDS, 1924 .max_comb_1st_msg_len = 2, 1925 .max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2, 1926 }; 1927 1928 static int smu_v13_0_6_i2c_control_init(struct smu_context *smu) 1929 { 1930 struct amdgpu_device *adev = smu->adev; 1931 int res, i; 1932 1933 for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { 1934 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; 1935 struct i2c_adapter *control = &smu_i2c->adapter; 1936 1937 smu_i2c->adev = adev; 1938 smu_i2c->port = i; 1939 mutex_init(&smu_i2c->mutex); 1940 control->owner = THIS_MODULE; 1941 control->dev.parent = &adev->pdev->dev; 1942 control->algo = &smu_v13_0_6_i2c_algo; 1943 snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i); 1944 control->quirks = &smu_v13_0_6_i2c_control_quirks; 1945 i2c_set_adapdata(control, smu_i2c); 1946 1947 res = i2c_add_adapter(control); 1948 if (res) { 1949 DRM_ERROR("Failed to register hw i2c, err: %d\n", res); 1950 goto Out_err; 1951 } 1952 } 1953 1954 adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; 1955 adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; 1956 1957 return 0; 1958 Out_err: 1959 for ( ; i >= 0; i--) { 1960 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; 1961 struct i2c_adapter *control = &smu_i2c->adapter; 1962 1963 i2c_del_adapter(control); 1964 } 1965 return res; 1966 } 1967 1968 static void smu_v13_0_6_i2c_control_fini(struct smu_context *smu) 1969 { 1970 struct amdgpu_device *adev = smu->adev; 1971 int i; 1972 1973 for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { 1974 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; 1975 struct i2c_adapter *control = &smu_i2c->adapter; 1976 1977 i2c_del_adapter(control); 1978 } 1979 adev->pm.ras_eeprom_i2c_bus = NULL; 1980 adev->pm.fru_eeprom_i2c_bus = NULL; 1981 } 1982 1983 static void smu_v13_0_6_get_unique_id(struct smu_context *smu) 1984 { 1985 struct amdgpu_device *adev = smu->adev; 1986 struct smu_table_context *smu_table = &smu->smu_table; 1987 struct PPTable_t *pptable = 1988 (struct PPTable_t *)smu_table->driver_pptable; 1989 1990 adev->unique_id = pptable->PublicSerialNumber_AID; 1991 } 1992 1993 static bool smu_v13_0_6_is_baco_supported(struct smu_context *smu) 1994 { 1995 /* smu_13_0_6 does not support baco */ 1996 1997 return false; 1998 } 1999 2000 static const char *const throttling_logging_label[] = { 2001 [THROTTLER_PROCHOT_BIT] = "Prochot", 2002 [THROTTLER_PPT_BIT] = "PPT", 2003 [THROTTLER_THERMAL_SOCKET_BIT] = "SOC", 2004 [THROTTLER_THERMAL_VR_BIT] = "VR", 2005 [THROTTLER_THERMAL_HBM_BIT] = "HBM" 2006 }; 2007 2008 static void smu_v13_0_6_log_thermal_throttling_event(struct smu_context *smu) 2009 { 2010 int throttler_idx, throttling_events = 0, buf_idx = 0; 2011 struct amdgpu_device *adev = smu->adev; 2012 uint32_t throttler_status; 2013 char log_buf[256]; 2014 2015 throttler_status = smu_v13_0_6_get_throttler_status(smu); 2016 if (!throttler_status) 2017 return; 2018 2019 memset(log_buf, 0, sizeof(log_buf)); 2020 for (throttler_idx = 0; 2021 throttler_idx < ARRAY_SIZE(throttling_logging_label); 2022 throttler_idx++) { 2023 if (throttler_status & (1U << throttler_idx)) { 2024 throttling_events++; 2025 buf_idx += snprintf( 2026 log_buf + buf_idx, sizeof(log_buf) - buf_idx, 2027 "%s%s", throttling_events > 1 ? " and " : "", 2028 throttling_logging_label[throttler_idx]); 2029 if (buf_idx >= sizeof(log_buf)) { 2030 dev_err(adev->dev, "buffer overflow!\n"); 2031 log_buf[sizeof(log_buf) - 1] = '\0'; 2032 break; 2033 } 2034 } 2035 } 2036 2037 dev_warn(adev->dev, 2038 "WARN: GPU is throttled, expect performance decrease. %s.\n", 2039 log_buf); 2040 kgd2kfd_smi_event_throttle( 2041 smu->adev->kfd.dev, 2042 smu_cmn_get_indep_throttler_status(throttler_status, 2043 smu_v13_0_6_throttler_map)); 2044 } 2045 2046 static int 2047 smu_v13_0_6_get_current_pcie_link_width_level(struct smu_context *smu) 2048 { 2049 struct amdgpu_device *adev = smu->adev; 2050 2051 return REG_GET_FIELD(RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL), 2052 PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD); 2053 } 2054 2055 static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu) 2056 { 2057 struct amdgpu_device *adev = smu->adev; 2058 uint32_t speed_level; 2059 uint32_t esm_ctrl; 2060 2061 /* TODO: confirm this on real target */ 2062 esm_ctrl = RREG32_PCIE(smnPCIE_ESM_CTRL); 2063 if ((esm_ctrl >> 15) & 0x1FFFF) 2064 return (((esm_ctrl >> 8) & 0x3F) + 128); 2065 2066 speed_level = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & 2067 PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) 2068 >> PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; 2069 if (speed_level > LINK_SPEED_MAX) 2070 speed_level = 0; 2071 2072 return pcie_gen_to_speed(speed_level + 1); 2073 } 2074 2075 static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table) 2076 { 2077 struct smu_table_context *smu_table = &smu->smu_table; 2078 struct gpu_metrics_v1_5 *gpu_metrics = 2079 (struct gpu_metrics_v1_5 *)smu_table->gpu_metrics_table; 2080 struct amdgpu_device *adev = smu->adev; 2081 int ret = 0, xcc_id, inst, i, j; 2082 MetricsTableX_t *metrics_x; 2083 MetricsTableA_t *metrics_a; 2084 u16 link_width_level; 2085 2086 metrics_x = kzalloc(max(sizeof(MetricsTableX_t), sizeof(MetricsTableA_t)), GFP_KERNEL); 2087 ret = smu_v13_0_6_get_metrics_table(smu, metrics_x, true); 2088 if (ret) { 2089 kfree(metrics_x); 2090 return ret; 2091 } 2092 2093 metrics_a = (MetricsTableA_t *)metrics_x; 2094 2095 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 5); 2096 2097 gpu_metrics->temperature_hotspot = 2098 SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature)); 2099 /* Individual HBM stack temperature is not reported */ 2100 gpu_metrics->temperature_mem = 2101 SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature)); 2102 /* Reports max temperature of all voltage rails */ 2103 gpu_metrics->temperature_vrsoc = 2104 SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature)); 2105 2106 gpu_metrics->average_gfx_activity = 2107 SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy)); 2108 gpu_metrics->average_umc_activity = 2109 SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization)); 2110 2111 gpu_metrics->curr_socket_power = 2112 SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower)); 2113 /* Energy counter reported in 15.259uJ (2^-16) units */ 2114 gpu_metrics->energy_accumulator = GET_METRIC_FIELD(SocketEnergyAcc); 2115 2116 for (i = 0; i < MAX_GFX_CLKS; i++) { 2117 xcc_id = GET_INST(GC, i); 2118 if (xcc_id >= 0) 2119 gpu_metrics->current_gfxclk[i] = 2120 SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency)[xcc_id]); 2121 2122 if (i < MAX_CLKS) { 2123 gpu_metrics->current_socclk[i] = 2124 SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency)[i]); 2125 inst = GET_INST(VCN, i); 2126 if (inst >= 0) { 2127 gpu_metrics->current_vclk0[i] = 2128 SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency)[inst]); 2129 gpu_metrics->current_dclk0[i] = 2130 SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency)[inst]); 2131 } 2132 } 2133 } 2134 2135 gpu_metrics->current_uclk = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency)); 2136 2137 /* Throttle status is not reported through metrics now */ 2138 gpu_metrics->throttle_status = 0; 2139 2140 /* Clock Lock Status. Each bit corresponds to each GFXCLK instance */ 2141 gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak) >> GET_INST(GC, 0); 2142 2143 if (!(adev->flags & AMD_IS_APU)) { 2144 link_width_level = smu_v13_0_6_get_current_pcie_link_width_level(smu); 2145 if (link_width_level > MAX_LINK_WIDTH) 2146 link_width_level = 0; 2147 2148 gpu_metrics->pcie_link_width = 2149 DECODE_LANE_WIDTH(link_width_level); 2150 gpu_metrics->pcie_link_speed = 2151 smu_v13_0_6_get_current_pcie_link_speed(smu); 2152 gpu_metrics->pcie_bandwidth_acc = 2153 SMUQ10_ROUND(metrics_x->PcieBandwidthAcc[0]); 2154 gpu_metrics->pcie_bandwidth_inst = 2155 SMUQ10_ROUND(metrics_x->PcieBandwidth[0]); 2156 gpu_metrics->pcie_l0_to_recov_count_acc = 2157 metrics_x->PCIeL0ToRecoveryCountAcc; 2158 gpu_metrics->pcie_replay_count_acc = 2159 metrics_x->PCIenReplayAAcc; 2160 gpu_metrics->pcie_replay_rover_count_acc = 2161 metrics_x->PCIenReplayARolloverCountAcc; 2162 gpu_metrics->pcie_nak_sent_count_acc = 2163 metrics_x->PCIeNAKSentCountAcc; 2164 gpu_metrics->pcie_nak_rcvd_count_acc = 2165 metrics_x->PCIeNAKReceivedCountAcc; 2166 } 2167 2168 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 2169 2170 gpu_metrics->gfx_activity_acc = 2171 SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusyAcc)); 2172 gpu_metrics->mem_activity_acc = 2173 SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc)); 2174 2175 for (i = 0; i < NUM_XGMI_LINKS; i++) { 2176 gpu_metrics->xgmi_read_data_acc[i] = 2177 SMUQ10_ROUND(GET_METRIC_FIELD(XgmiReadDataSizeAcc)[i]); 2178 gpu_metrics->xgmi_write_data_acc[i] = 2179 SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWriteDataSizeAcc)[i]); 2180 } 2181 2182 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 2183 inst = GET_INST(JPEG, i); 2184 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { 2185 gpu_metrics->jpeg_activity[(i * adev->jpeg.num_jpeg_rings) + j] = 2186 SMUQ10_ROUND(GET_METRIC_FIELD(JpegBusy) 2187 [(inst * adev->jpeg.num_jpeg_rings) + j]); 2188 } 2189 } 2190 2191 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 2192 inst = GET_INST(VCN, i); 2193 gpu_metrics->vcn_activity[i] = 2194 SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy)[inst]); 2195 } 2196 2197 gpu_metrics->xgmi_link_width = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWidth)); 2198 gpu_metrics->xgmi_link_speed = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiBitrate)); 2199 2200 gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp); 2201 2202 *table = (void *)gpu_metrics; 2203 kfree(metrics_x); 2204 2205 return sizeof(*gpu_metrics); 2206 } 2207 2208 static int smu_v13_0_6_mode2_reset(struct smu_context *smu) 2209 { 2210 int ret = 0, index; 2211 struct amdgpu_device *adev = smu->adev; 2212 int timeout = 10; 2213 2214 index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, 2215 SMU_MSG_GfxDeviceDriverReset); 2216 2217 mutex_lock(&smu->message_lock); 2218 2219 ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, 2220 SMU_RESET_MODE_2); 2221 2222 /* This is similar to FLR, wait till max FLR timeout */ 2223 msleep(100); 2224 2225 dev_dbg(smu->adev->dev, "restore config space...\n"); 2226 /* Restore the config space saved during init */ 2227 amdgpu_device_load_pci_state(adev->pdev); 2228 2229 dev_dbg(smu->adev->dev, "wait for reset ack\n"); 2230 do { 2231 ret = smu_cmn_wait_for_response(smu); 2232 /* Wait a bit more time for getting ACK */ 2233 if (ret == -ETIME) { 2234 --timeout; 2235 usleep_range(500, 1000); 2236 continue; 2237 } 2238 2239 if (ret) 2240 goto out; 2241 2242 } while (ret == -ETIME && timeout); 2243 2244 out: 2245 mutex_unlock(&smu->message_lock); 2246 2247 if (ret) 2248 dev_err(adev->dev, "failed to send mode2 reset, error code %d", 2249 ret); 2250 2251 return ret; 2252 } 2253 2254 static int smu_v13_0_6_get_thermal_temperature_range(struct smu_context *smu, 2255 struct smu_temperature_range *range) 2256 { 2257 struct amdgpu_device *adev = smu->adev; 2258 u32 aid_temp, xcd_temp, max_temp; 2259 u32 ccd_temp = 0; 2260 int ret; 2261 2262 if (amdgpu_sriov_vf(smu->adev)) 2263 return 0; 2264 2265 if (!range) 2266 return -EINVAL; 2267 2268 /*Check smu version, GetCtfLimit message only supported for smu version 85.69 or higher */ 2269 if (smu->smc_fw_version < 0x554500) 2270 return 0; 2271 2272 /* Get SOC Max operating temperature */ 2273 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit, 2274 PPSMC_AID_THM_TYPE, &aid_temp); 2275 if (ret) 2276 goto failed; 2277 if (adev->flags & AMD_IS_APU) { 2278 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit, 2279 PPSMC_CCD_THM_TYPE, &ccd_temp); 2280 if (ret) 2281 goto failed; 2282 } 2283 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit, 2284 PPSMC_XCD_THM_TYPE, &xcd_temp); 2285 if (ret) 2286 goto failed; 2287 range->hotspot_emergency_max = max3(aid_temp, xcd_temp, ccd_temp) * 2288 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 2289 2290 /* Get HBM Max operating temperature */ 2291 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit, 2292 PPSMC_HBM_THM_TYPE, &max_temp); 2293 if (ret) 2294 goto failed; 2295 range->mem_emergency_max = 2296 max_temp * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 2297 2298 /* Get SOC thermal throttle limit */ 2299 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetThermalLimit, 2300 PPSMC_THROTTLING_LIMIT_TYPE_SOCKET, 2301 &max_temp); 2302 if (ret) 2303 goto failed; 2304 range->hotspot_crit_max = 2305 max_temp * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 2306 2307 /* Get HBM thermal throttle limit */ 2308 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetThermalLimit, 2309 PPSMC_THROTTLING_LIMIT_TYPE_HBM, 2310 &max_temp); 2311 if (ret) 2312 goto failed; 2313 2314 range->mem_crit_max = max_temp * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 2315 2316 failed: 2317 return ret; 2318 } 2319 2320 static int smu_v13_0_6_mode1_reset(struct smu_context *smu) 2321 { 2322 struct amdgpu_device *adev = smu->adev; 2323 struct amdgpu_hive_info *hive = NULL; 2324 u32 hive_ras_recovery = 0; 2325 struct amdgpu_ras *ras; 2326 u32 fatal_err, param; 2327 int ret = 0; 2328 2329 hive = amdgpu_get_xgmi_hive(adev); 2330 ras = amdgpu_ras_get_context(adev); 2331 fatal_err = 0; 2332 param = SMU_RESET_MODE_1; 2333 2334 if (hive) { 2335 hive_ras_recovery = atomic_read(&hive->ras_recovery); 2336 amdgpu_put_xgmi_hive(hive); 2337 } 2338 2339 /* fatal error triggered by ras, PMFW supports the flag */ 2340 if (ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery)) 2341 fatal_err = 1; 2342 2343 param |= (fatal_err << 16); 2344 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, 2345 param, NULL); 2346 2347 if (!ret) 2348 msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS); 2349 2350 return ret; 2351 } 2352 2353 static bool smu_v13_0_6_is_mode1_reset_supported(struct smu_context *smu) 2354 { 2355 return true; 2356 } 2357 2358 static bool smu_v13_0_6_is_mode2_reset_supported(struct smu_context *smu) 2359 { 2360 return true; 2361 } 2362 2363 static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu, 2364 uint32_t size) 2365 { 2366 int ret = 0; 2367 2368 /* message SMU to update the bad page number on SMUBUS */ 2369 ret = smu_cmn_send_smc_msg_with_param( 2370 smu, SMU_MSG_SetNumBadHbmPagesRetired, size, NULL); 2371 if (ret) 2372 dev_err(smu->adev->dev, 2373 "[%s] failed to message SMU to update HBM bad pages number\n", 2374 __func__); 2375 2376 return ret; 2377 } 2378 2379 static int mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable) 2380 { 2381 struct smu_context *smu = adev->powerplay.pp_handle; 2382 2383 return smu_v13_0_6_mca_set_debug_mode(smu, enable); 2384 } 2385 2386 static int smu_v13_0_6_get_valid_mca_count(struct smu_context *smu, enum amdgpu_mca_error_type type, uint32_t *count) 2387 { 2388 uint32_t msg; 2389 int ret; 2390 2391 if (!count) 2392 return -EINVAL; 2393 2394 switch (type) { 2395 case AMDGPU_MCA_ERROR_TYPE_UE: 2396 msg = SMU_MSG_QueryValidMcaCount; 2397 break; 2398 case AMDGPU_MCA_ERROR_TYPE_CE: 2399 msg = SMU_MSG_QueryValidMcaCeCount; 2400 break; 2401 default: 2402 return -EINVAL; 2403 } 2404 2405 ret = smu_cmn_send_smc_msg(smu, msg, count); 2406 if (ret) { 2407 *count = 0; 2408 return ret; 2409 } 2410 2411 return 0; 2412 } 2413 2414 static int __smu_v13_0_6_mca_dump_bank(struct smu_context *smu, enum amdgpu_mca_error_type type, 2415 int idx, int offset, uint32_t *val) 2416 { 2417 uint32_t msg, param; 2418 2419 switch (type) { 2420 case AMDGPU_MCA_ERROR_TYPE_UE: 2421 msg = SMU_MSG_McaBankDumpDW; 2422 break; 2423 case AMDGPU_MCA_ERROR_TYPE_CE: 2424 msg = SMU_MSG_McaBankCeDumpDW; 2425 break; 2426 default: 2427 return -EINVAL; 2428 } 2429 2430 param = ((idx & 0xffff) << 16) | (offset & 0xfffc); 2431 2432 return smu_cmn_send_smc_msg_with_param(smu, msg, param, val); 2433 } 2434 2435 static int smu_v13_0_6_mca_dump_bank(struct smu_context *smu, enum amdgpu_mca_error_type type, 2436 int idx, int offset, uint32_t *val, int count) 2437 { 2438 int ret, i; 2439 2440 if (!val) 2441 return -EINVAL; 2442 2443 for (i = 0; i < count; i++) { 2444 ret = __smu_v13_0_6_mca_dump_bank(smu, type, idx, offset + (i << 2), &val[i]); 2445 if (ret) 2446 return ret; 2447 } 2448 2449 return 0; 2450 } 2451 2452 static const struct mca_bank_ipid smu_v13_0_6_mca_ipid_table[AMDGPU_MCA_IP_COUNT] = { 2453 MCA_BANK_IPID(UMC, 0x96, 0x0), 2454 MCA_BANK_IPID(SMU, 0x01, 0x1), 2455 MCA_BANK_IPID(MP5, 0x01, 0x2), 2456 MCA_BANK_IPID(PCS_XGMI, 0x50, 0x0), 2457 }; 2458 2459 static void mca_bank_entry_info_decode(struct mca_bank_entry *entry, struct mca_bank_info *info) 2460 { 2461 u64 ipid = entry->regs[MCA_REG_IDX_IPID]; 2462 u32 instidhi, instid; 2463 2464 /* NOTE: All MCA IPID register share the same format, 2465 * so the driver can share the MCMP1 register header file. 2466 * */ 2467 2468 info->hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID); 2469 info->mcatype = REG_GET_FIELD(ipid, MCMP1_IPIDT0, McaType); 2470 2471 /* 2472 * Unfied DieID Format: SAASS. A:AID, S:Socket. 2473 * Unfied DieID[4] = InstanceId[0] 2474 * Unfied DieID[0:3] = InstanceIdHi[0:3] 2475 */ 2476 instidhi = REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi); 2477 instid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo); 2478 info->aid = ((instidhi >> 2) & 0x03); 2479 info->socket_id = ((instid & 0x1) << 2) | (instidhi & 0x03); 2480 } 2481 2482 static int mca_bank_read_reg(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, 2483 int idx, int reg_idx, uint64_t *val) 2484 { 2485 struct smu_context *smu = adev->powerplay.pp_handle; 2486 uint32_t data[2] = {0, 0}; 2487 int ret; 2488 2489 if (!val || reg_idx >= MCA_REG_IDX_COUNT) 2490 return -EINVAL; 2491 2492 ret = smu_v13_0_6_mca_dump_bank(smu, type, idx, reg_idx * 8, data, ARRAY_SIZE(data)); 2493 if (ret) 2494 return ret; 2495 2496 *val = (uint64_t)data[1] << 32 | data[0]; 2497 2498 dev_dbg(adev->dev, "mca read bank reg: type:%s, index: %d, reg_idx: %d, val: 0x%016llx\n", 2499 type == AMDGPU_MCA_ERROR_TYPE_UE ? "UE" : "CE", idx, reg_idx, *val); 2500 2501 return 0; 2502 } 2503 2504 static int mca_get_mca_entry(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, 2505 int idx, struct mca_bank_entry *entry) 2506 { 2507 int i, ret; 2508 2509 /* NOTE: populated all mca register by default */ 2510 for (i = 0; i < ARRAY_SIZE(entry->regs); i++) { 2511 ret = mca_bank_read_reg(adev, type, idx, i, &entry->regs[i]); 2512 if (ret) 2513 return ret; 2514 } 2515 2516 entry->idx = idx; 2517 entry->type = type; 2518 2519 mca_bank_entry_info_decode(entry, &entry->info); 2520 2521 return 0; 2522 } 2523 2524 static int mca_decode_ipid_to_hwip(uint64_t val) 2525 { 2526 const struct mca_bank_ipid *ipid; 2527 uint16_t hwid, mcatype; 2528 int i; 2529 2530 hwid = REG_GET_FIELD(val, MCMP1_IPIDT0, HardwareID); 2531 mcatype = REG_GET_FIELD(val, MCMP1_IPIDT0, McaType); 2532 2533 for (i = 0; i < ARRAY_SIZE(smu_v13_0_6_mca_ipid_table); i++) { 2534 ipid = &smu_v13_0_6_mca_ipid_table[i]; 2535 2536 if (!ipid->hwid) 2537 continue; 2538 2539 if (ipid->hwid == hwid && ipid->mcatype == mcatype) 2540 return i; 2541 } 2542 2543 return AMDGPU_MCA_IP_UNKNOW; 2544 } 2545 2546 static int mca_umc_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev, 2547 enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count) 2548 { 2549 uint64_t status0; 2550 2551 status0 = entry->regs[MCA_REG_IDX_STATUS]; 2552 2553 if (!REG_GET_FIELD(status0, MCMP1_STATUST0, Val)) { 2554 *count = 0; 2555 return 0; 2556 } 2557 2558 if (type == AMDGPU_MCA_ERROR_TYPE_UE && umc_v12_0_is_uncorrectable_error(adev, status0)) 2559 *count = 1; 2560 else if (type == AMDGPU_MCA_ERROR_TYPE_CE && umc_v12_0_is_correctable_error(adev, status0)) 2561 *count = 1; 2562 2563 return 0; 2564 } 2565 2566 static int mca_pcs_xgmi_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev, 2567 enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, 2568 uint32_t *count) 2569 { 2570 u32 ext_error_code; 2571 u32 err_cnt; 2572 2573 ext_error_code = MCA_REG__STATUS__ERRORCODEEXT(entry->regs[MCA_REG_IDX_STATUS]); 2574 err_cnt = MCA_REG__MISC0__ERRCNT(entry->regs[MCA_REG_IDX_MISC0]); 2575 2576 if (type == AMDGPU_MCA_ERROR_TYPE_UE && ext_error_code == 0) 2577 *count = err_cnt; 2578 else if (type == AMDGPU_MCA_ERROR_TYPE_CE && ext_error_code == 6) 2579 *count = err_cnt; 2580 2581 return 0; 2582 } 2583 2584 static bool mca_smu_check_error_code(struct amdgpu_device *adev, const struct mca_ras_info *mca_ras, 2585 uint32_t errcode) 2586 { 2587 int i; 2588 2589 if (!mca_ras->err_code_count || !mca_ras->err_code_array) 2590 return true; 2591 2592 for (i = 0; i < mca_ras->err_code_count; i++) { 2593 if (errcode == mca_ras->err_code_array[i]) 2594 return true; 2595 } 2596 2597 return false; 2598 } 2599 2600 static int mca_gfx_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev, 2601 enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count) 2602 { 2603 uint64_t status0, misc0; 2604 2605 status0 = entry->regs[MCA_REG_IDX_STATUS]; 2606 if (!REG_GET_FIELD(status0, MCMP1_STATUST0, Val)) { 2607 *count = 0; 2608 return 0; 2609 } 2610 2611 if (type == AMDGPU_MCA_ERROR_TYPE_UE && 2612 REG_GET_FIELD(status0, MCMP1_STATUST0, UC) == 1 && 2613 REG_GET_FIELD(status0, MCMP1_STATUST0, PCC) == 1) { 2614 *count = 1; 2615 return 0; 2616 } else { 2617 misc0 = entry->regs[MCA_REG_IDX_MISC0]; 2618 *count = REG_GET_FIELD(misc0, MCMP1_MISC0T0, ErrCnt); 2619 } 2620 2621 return 0; 2622 } 2623 2624 static int mca_smu_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev, 2625 enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count) 2626 { 2627 uint64_t status0, misc0; 2628 2629 status0 = entry->regs[MCA_REG_IDX_STATUS]; 2630 if (!REG_GET_FIELD(status0, MCMP1_STATUST0, Val)) { 2631 *count = 0; 2632 return 0; 2633 } 2634 2635 if (type == AMDGPU_MCA_ERROR_TYPE_UE && 2636 REG_GET_FIELD(status0, MCMP1_STATUST0, UC) == 1 && 2637 REG_GET_FIELD(status0, MCMP1_STATUST0, PCC) == 1) { 2638 if (count) 2639 *count = 1; 2640 return 0; 2641 } 2642 2643 misc0 = entry->regs[MCA_REG_IDX_MISC0]; 2644 *count = REG_GET_FIELD(misc0, MCMP1_MISC0T0, ErrCnt); 2645 2646 return 0; 2647 } 2648 2649 static bool mca_gfx_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev, 2650 enum amdgpu_mca_error_type type, struct mca_bank_entry *entry) 2651 { 2652 uint32_t instlo; 2653 2654 instlo = REG_GET_FIELD(entry->regs[MCA_REG_IDX_IPID], MCMP1_IPIDT0, InstanceIdLo); 2655 instlo &= GENMASK(31, 1); 2656 switch (instlo) { 2657 case 0x36430400: /* SMNAID XCD 0 */ 2658 case 0x38430400: /* SMNAID XCD 1 */ 2659 case 0x40430400: /* SMNXCD XCD 0, NOTE: FIXME: fix this error later */ 2660 return true; 2661 default: 2662 return false; 2663 } 2664 2665 return false; 2666 }; 2667 2668 static bool mca_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev, 2669 enum amdgpu_mca_error_type type, struct mca_bank_entry *entry) 2670 { 2671 struct smu_context *smu = adev->powerplay.pp_handle; 2672 uint32_t errcode, instlo; 2673 2674 instlo = REG_GET_FIELD(entry->regs[MCA_REG_IDX_IPID], MCMP1_IPIDT0, InstanceIdLo); 2675 instlo &= GENMASK(31, 1); 2676 if (instlo != 0x03b30400) 2677 return false; 2678 2679 if (!(adev->flags & AMD_IS_APU) && smu->smc_fw_version >= 0x00555600) { 2680 errcode = MCA_REG__SYND__ERRORINFORMATION(entry->regs[MCA_REG_IDX_SYND]); 2681 errcode &= 0xff; 2682 } else { 2683 errcode = REG_GET_FIELD(entry->regs[MCA_REG_IDX_STATUS], MCMP1_STATUST0, ErrorCode); 2684 } 2685 2686 return mca_smu_check_error_code(adev, mca_ras, errcode); 2687 } 2688 2689 static int sdma_err_codes[] = { CODE_SDMA0, CODE_SDMA1, CODE_SDMA2, CODE_SDMA3 }; 2690 static int mmhub_err_codes[] = { 2691 CODE_DAGB0, CODE_DAGB0 + 1, CODE_DAGB0 + 2, CODE_DAGB0 + 3, CODE_DAGB0 + 4, /* DAGB0-4 */ 2692 CODE_EA0, CODE_EA0 + 1, CODE_EA0 + 2, CODE_EA0 + 3, CODE_EA0 + 4, /* MMEA0-4*/ 2693 CODE_VML2, CODE_VML2_WALKER, CODE_MMCANE, 2694 }; 2695 2696 static const struct mca_ras_info mca_ras_table[] = { 2697 { 2698 .blkid = AMDGPU_RAS_BLOCK__UMC, 2699 .ip = AMDGPU_MCA_IP_UMC, 2700 .get_err_count = mca_umc_mca_get_err_count, 2701 }, { 2702 .blkid = AMDGPU_RAS_BLOCK__GFX, 2703 .ip = AMDGPU_MCA_IP_SMU, 2704 .get_err_count = mca_gfx_mca_get_err_count, 2705 .bank_is_valid = mca_gfx_smu_bank_is_valid, 2706 }, { 2707 .blkid = AMDGPU_RAS_BLOCK__SDMA, 2708 .ip = AMDGPU_MCA_IP_SMU, 2709 .err_code_array = sdma_err_codes, 2710 .err_code_count = ARRAY_SIZE(sdma_err_codes), 2711 .get_err_count = mca_smu_mca_get_err_count, 2712 .bank_is_valid = mca_smu_bank_is_valid, 2713 }, { 2714 .blkid = AMDGPU_RAS_BLOCK__MMHUB, 2715 .ip = AMDGPU_MCA_IP_SMU, 2716 .err_code_array = mmhub_err_codes, 2717 .err_code_count = ARRAY_SIZE(mmhub_err_codes), 2718 .get_err_count = mca_smu_mca_get_err_count, 2719 .bank_is_valid = mca_smu_bank_is_valid, 2720 }, { 2721 .blkid = AMDGPU_RAS_BLOCK__XGMI_WAFL, 2722 .ip = AMDGPU_MCA_IP_PCS_XGMI, 2723 .get_err_count = mca_pcs_xgmi_mca_get_err_count, 2724 }, 2725 }; 2726 2727 static const struct mca_ras_info *mca_get_mca_ras_info(struct amdgpu_device *adev, enum amdgpu_ras_block blkid) 2728 { 2729 int i; 2730 2731 for (i = 0; i < ARRAY_SIZE(mca_ras_table); i++) { 2732 if (mca_ras_table[i].blkid == blkid) 2733 return &mca_ras_table[i]; 2734 } 2735 2736 return NULL; 2737 } 2738 2739 static int mca_get_valid_mca_count(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, uint32_t *count) 2740 { 2741 struct smu_context *smu = adev->powerplay.pp_handle; 2742 int ret; 2743 2744 switch (type) { 2745 case AMDGPU_MCA_ERROR_TYPE_UE: 2746 case AMDGPU_MCA_ERROR_TYPE_CE: 2747 ret = smu_v13_0_6_get_valid_mca_count(smu, type, count); 2748 break; 2749 default: 2750 ret = -EINVAL; 2751 break; 2752 } 2753 2754 return ret; 2755 } 2756 2757 static bool mca_bank_is_valid(struct amdgpu_device *adev, const struct mca_ras_info *mca_ras, 2758 enum amdgpu_mca_error_type type, struct mca_bank_entry *entry) 2759 { 2760 if (mca_decode_ipid_to_hwip(entry->regs[MCA_REG_IDX_IPID]) != mca_ras->ip) 2761 return false; 2762 2763 if (mca_ras->bank_is_valid) 2764 return mca_ras->bank_is_valid(mca_ras, adev, type, entry); 2765 2766 return true; 2767 } 2768 2769 static int __mca_smu_get_ras_mca_set(struct amdgpu_device *adev, const struct mca_ras_info *mca_ras, 2770 enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set) 2771 { 2772 struct mca_bank_entry entry; 2773 uint32_t mca_cnt; 2774 int i, ret; 2775 2776 ret = mca_get_valid_mca_count(adev, type, &mca_cnt); 2777 if (ret) 2778 return ret; 2779 2780 /* if valid mca bank count is 0, the driver can return 0 directly */ 2781 if (!mca_cnt) 2782 return 0; 2783 2784 for (i = 0; i < mca_cnt; i++) { 2785 memset(&entry, 0, sizeof(entry)); 2786 ret = mca_get_mca_entry(adev, type, i, &entry); 2787 if (ret) 2788 return ret; 2789 2790 if (mca_ras && !mca_bank_is_valid(adev, mca_ras, type, &entry)) 2791 continue; 2792 2793 ret = amdgpu_mca_bank_set_add_entry(mca_set, &entry); 2794 if (ret) 2795 return ret; 2796 } 2797 2798 return 0; 2799 } 2800 2801 static int mca_smu_get_ras_mca_set(struct amdgpu_device *adev, enum amdgpu_ras_block blk, 2802 enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set) 2803 { 2804 const struct mca_ras_info *mca_ras = NULL; 2805 2806 if (!mca_set) 2807 return -EINVAL; 2808 2809 if (blk != AMDGPU_RAS_BLOCK_COUNT) { 2810 mca_ras = mca_get_mca_ras_info(adev, blk); 2811 if (!mca_ras) 2812 return -EOPNOTSUPP; 2813 } 2814 2815 return __mca_smu_get_ras_mca_set(adev, mca_ras, type, mca_set); 2816 } 2817 2818 static int mca_smu_parse_mca_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, 2819 struct mca_bank_entry *entry, uint32_t *count) 2820 { 2821 const struct mca_ras_info *mca_ras; 2822 2823 if (!entry || !count) 2824 return -EINVAL; 2825 2826 mca_ras = mca_get_mca_ras_info(adev, blk); 2827 if (!mca_ras) 2828 return -EOPNOTSUPP; 2829 2830 if (!mca_bank_is_valid(adev, mca_ras, type, entry)) { 2831 *count = 0; 2832 return 0; 2833 } 2834 2835 return mca_ras->get_err_count(mca_ras, adev, type, entry, count); 2836 } 2837 2838 static int mca_smu_get_mca_entry(struct amdgpu_device *adev, 2839 enum amdgpu_mca_error_type type, int idx, struct mca_bank_entry *entry) 2840 { 2841 return mca_get_mca_entry(adev, type, idx, entry); 2842 } 2843 2844 static int mca_smu_get_valid_mca_count(struct amdgpu_device *adev, 2845 enum amdgpu_mca_error_type type, uint32_t *count) 2846 { 2847 return mca_get_valid_mca_count(adev, type, count); 2848 } 2849 2850 static const struct amdgpu_mca_smu_funcs smu_v13_0_6_mca_smu_funcs = { 2851 .max_ue_count = 12, 2852 .max_ce_count = 12, 2853 .mca_set_debug_mode = mca_smu_set_debug_mode, 2854 .mca_get_ras_mca_set = mca_smu_get_ras_mca_set, 2855 .mca_parse_mca_error_count = mca_smu_parse_mca_error_count, 2856 .mca_get_mca_entry = mca_smu_get_mca_entry, 2857 .mca_get_valid_mca_count = mca_smu_get_valid_mca_count, 2858 }; 2859 2860 static int smu_v13_0_6_select_xgmi_plpd_policy(struct smu_context *smu, 2861 enum pp_xgmi_plpd_mode mode) 2862 { 2863 struct amdgpu_device *adev = smu->adev; 2864 int ret, param; 2865 2866 switch (mode) { 2867 case XGMI_PLPD_DEFAULT: 2868 param = PPSMC_PLPD_MODE_DEFAULT; 2869 break; 2870 case XGMI_PLPD_OPTIMIZED: 2871 param = PPSMC_PLPD_MODE_OPTIMIZED; 2872 break; 2873 case XGMI_PLPD_DISALLOW: 2874 param = 0; 2875 break; 2876 default: 2877 return -EINVAL; 2878 } 2879 2880 if (mode == XGMI_PLPD_DISALLOW) 2881 ret = smu_cmn_send_smc_msg_with_param(smu, 2882 SMU_MSG_GmiPwrDnControl, 2883 param, NULL); 2884 else 2885 /* change xgmi per-link power down policy */ 2886 ret = smu_cmn_send_smc_msg_with_param(smu, 2887 SMU_MSG_SelectPLPDMode, 2888 param, NULL); 2889 2890 if (ret) 2891 dev_err(adev->dev, 2892 "select xgmi per-link power down policy %d failed\n", 2893 mode); 2894 2895 return ret; 2896 } 2897 2898 static ssize_t smu_v13_0_6_get_ecc_info(struct smu_context *smu, 2899 void *table) 2900 { 2901 /* Support ecc info by default */ 2902 return 0; 2903 } 2904 2905 static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { 2906 /* init dpm */ 2907 .get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask, 2908 /* dpm/clk tables */ 2909 .set_default_dpm_table = smu_v13_0_6_set_default_dpm_table, 2910 .populate_umd_state_clk = smu_v13_0_6_populate_umd_state_clk, 2911 .print_clk_levels = smu_v13_0_6_print_clk_levels, 2912 .force_clk_levels = smu_v13_0_6_force_clk_levels, 2913 .read_sensor = smu_v13_0_6_read_sensor, 2914 .set_performance_level = smu_v13_0_6_set_performance_level, 2915 .get_power_limit = smu_v13_0_6_get_power_limit, 2916 .is_dpm_running = smu_v13_0_6_is_dpm_running, 2917 .get_unique_id = smu_v13_0_6_get_unique_id, 2918 .init_microcode = smu_v13_0_6_init_microcode, 2919 .fini_microcode = smu_v13_0_fini_microcode, 2920 .init_smc_tables = smu_v13_0_6_init_smc_tables, 2921 .fini_smc_tables = smu_v13_0_fini_smc_tables, 2922 .init_power = smu_v13_0_init_power, 2923 .fini_power = smu_v13_0_fini_power, 2924 .check_fw_status = smu_v13_0_6_check_fw_status, 2925 /* pptable related */ 2926 .check_fw_version = smu_v13_0_check_fw_version, 2927 .set_driver_table_location = smu_v13_0_set_driver_table_location, 2928 .set_tool_table_location = smu_v13_0_set_tool_table_location, 2929 .notify_memory_pool_location = smu_v13_0_notify_memory_pool_location, 2930 .system_features_control = smu_v13_0_6_system_features_control, 2931 .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param, 2932 .send_smc_msg = smu_cmn_send_smc_msg, 2933 .get_enabled_mask = smu_v13_0_6_get_enabled_mask, 2934 .feature_is_enabled = smu_cmn_feature_is_enabled, 2935 .set_power_limit = smu_v13_0_6_set_power_limit, 2936 .set_xgmi_pstate = smu_v13_0_set_xgmi_pstate, 2937 .register_irq_handler = smu_v13_0_6_register_irq_handler, 2938 .enable_thermal_alert = smu_v13_0_enable_thermal_alert, 2939 .disable_thermal_alert = smu_v13_0_disable_thermal_alert, 2940 .setup_pptable = smu_v13_0_6_setup_pptable, 2941 .baco_is_support = smu_v13_0_6_is_baco_supported, 2942 .get_dpm_ultimate_freq = smu_v13_0_6_get_dpm_ultimate_freq, 2943 .set_soft_freq_limited_range = smu_v13_0_6_set_soft_freq_limited_range, 2944 .od_edit_dpm_table = smu_v13_0_6_usr_edit_dpm_table, 2945 .select_xgmi_plpd_policy = smu_v13_0_6_select_xgmi_plpd_policy, 2946 .log_thermal_throttling_event = smu_v13_0_6_log_thermal_throttling_event, 2947 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, 2948 .get_gpu_metrics = smu_v13_0_6_get_gpu_metrics, 2949 .get_pm_metrics = smu_v13_0_6_get_pm_metrics, 2950 .get_thermal_temperature_range = smu_v13_0_6_get_thermal_temperature_range, 2951 .mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported, 2952 .mode2_reset_is_support = smu_v13_0_6_is_mode2_reset_supported, 2953 .mode1_reset = smu_v13_0_6_mode1_reset, 2954 .mode2_reset = smu_v13_0_6_mode2_reset, 2955 .wait_for_event = smu_v13_0_wait_for_event, 2956 .i2c_init = smu_v13_0_6_i2c_control_init, 2957 .i2c_fini = smu_v13_0_6_i2c_control_fini, 2958 .send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num, 2959 .get_ecc_info = smu_v13_0_6_get_ecc_info, 2960 }; 2961 2962 void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu) 2963 { 2964 smu->ppt_funcs = &smu_v13_0_6_ppt_funcs; 2965 smu->message_map = smu_v13_0_6_message_map; 2966 smu->clock_map = smu_v13_0_6_clk_map; 2967 smu->feature_map = smu_v13_0_6_feature_mask_map; 2968 smu->table_map = smu_v13_0_6_table_map; 2969 smu->smc_driver_if_version = SMU13_0_6_DRIVER_IF_VERSION; 2970 smu_v13_0_set_smu_mailbox_registers(smu); 2971 amdgpu_mca_smu_init_funcs(smu->adev, &smu_v13_0_6_mca_smu_funcs); 2972 } 2973