1 /* 2 * Copyright 2022 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include "smu_types.h" 25 #define SWSMU_CODE_LAYER_L2 26 27 #include "amdgpu.h" 28 #include "amdgpu_smu.h" 29 #include "smu_v13_0.h" 30 #include "smu13_driver_if_v13_0_4.h" 31 #include "smu_v13_0_4_ppt.h" 32 #include "smu_v13_0_4_ppsmc.h" 33 #include "smu_v13_0_4_pmfw.h" 34 #include "smu_cmn.h" 35 36 /* 37 * DO NOT use these for err/warn/info/debug messages. 38 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 39 * They are more MGPU friendly. 40 */ 41 #undef pr_err 42 #undef pr_warn 43 #undef pr_info 44 #undef pr_debug 45 46 #define mmMP1_SMN_C2PMSG_66 0x0282 47 #define mmMP1_SMN_C2PMSG_66_BASE_IDX 1 48 49 #define mmMP1_SMN_C2PMSG_82 0x0292 50 #define mmMP1_SMN_C2PMSG_82_BASE_IDX 1 51 52 #define mmMP1_SMN_C2PMSG_90 0x029a 53 #define mmMP1_SMN_C2PMSG_90_BASE_IDX 1 54 55 #define FEATURE_MASK(feature) (1ULL << feature) 56 57 #define SMC_DPM_FEATURE ( \ 58 FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \ 59 FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \ 60 FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \ 61 FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT) | \ 62 FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT) | \ 63 FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \ 64 FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT) | \ 65 FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT) | \ 66 FEATURE_MASK(FEATURE_ISP_DPM_BIT) | \ 67 FEATURE_MASK(FEATURE_IPU_DPM_BIT) | \ 68 FEATURE_MASK(FEATURE_GFX_DPM_BIT)) 69 70 static struct cmn2asic_msg_mapping smu_v13_0_4_message_map[SMU_MSG_MAX_COUNT] = { 71 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), 72 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetPmfwVersion, 1), 73 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1), 74 MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 1), 75 MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 1), 76 MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1), 77 MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 1), 78 MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 1), 79 MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 1), 80 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), 81 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), 82 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1), 83 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 1), 84 MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDeviceDriverReset, 1), 85 MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 1), 86 MSG_MAP(SetHardMinSocclkByFreq, PPSMC_MSG_SetHardMinSocclkByFreq, 1), 87 MSG_MAP(SetSoftMinVcn, PPSMC_MSG_SetSoftMinVcn, 1), 88 MSG_MAP(GetGfxclkFrequency, PPSMC_MSG_GetGfxclkFrequency, 1), 89 MSG_MAP(GetFclkFrequency, PPSMC_MSG_GetFclkFrequency, 1), 90 MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 1), 91 MSG_MAP(SetHardMinGfxClk, PPSMC_MSG_SetHardMinGfxClk, 1), 92 MSG_MAP(SetSoftMaxSocclkByFreq, PPSMC_MSG_SetSoftMaxSocclkByFreq, 1), 93 MSG_MAP(SetSoftMaxFclkByFreq, PPSMC_MSG_SetSoftMaxFclkByFreq, 1), 94 MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn, 1), 95 MSG_MAP(SetPowerLimitPercentage, PPSMC_MSG_SetPowerLimitPercentage, 1), 96 MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 1), 97 MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 1), 98 MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq, 1), 99 MSG_MAP(SetSoftMinSocclkByFreq, PPSMC_MSG_SetSoftMinSocclkByFreq, 1), 100 MSG_MAP(EnableGfxImu, PPSMC_MSG_EnableGfxImu, 1), 101 MSG_MAP(PowerUpIspByTile, PPSMC_MSG_PowerUpIspByTile, 1), 102 MSG_MAP(PowerDownIspByTile, PPSMC_MSG_PowerDownIspByTile, 1), 103 }; 104 105 static struct cmn2asic_mapping smu_v13_0_4_feature_mask_map[SMU_FEATURE_COUNT] = { 106 FEA_MAP(CCLK_DPM), 107 FEA_MAP(FAN_CONTROLLER), 108 FEA_MAP(PPT), 109 FEA_MAP(TDC), 110 FEA_MAP(THERMAL), 111 FEA_MAP(VCN_DPM), 112 FEA_MAP_REVERSE(FCLK), 113 FEA_MAP_REVERSE(SOCCLK), 114 FEA_MAP(LCLK_DPM), 115 FEA_MAP(SHUBCLK_DPM), 116 FEA_MAP(DCFCLK_DPM), 117 FEA_MAP_HALF_REVERSE(GFX), 118 FEA_MAP(DS_GFXCLK), 119 FEA_MAP(DS_SOCCLK), 120 FEA_MAP(DS_LCLK), 121 FEA_MAP(DS_DCFCLK), 122 FEA_MAP(DS_FCLK), 123 FEA_MAP(DS_MP1CLK), 124 FEA_MAP(DS_MP0CLK), 125 FEA_MAP(GFX_DEM), 126 FEA_MAP(PSI), 127 FEA_MAP(PROCHOT), 128 FEA_MAP(CPUOFF), 129 FEA_MAP(STAPM), 130 FEA_MAP(S0I3), 131 FEA_MAP(PERF_LIMIT), 132 FEA_MAP(CORE_DLDO), 133 FEA_MAP(DS_VCN), 134 FEA_MAP(CPPC), 135 FEA_MAP(DF_CSTATES), 136 FEA_MAP(ATHUB_PG), 137 }; 138 139 static struct cmn2asic_mapping smu_v13_0_4_table_map[SMU_TABLE_COUNT] = { 140 TAB_MAP_VALID(WATERMARKS), 141 TAB_MAP_VALID(SMU_METRICS), 142 TAB_MAP_VALID(CUSTOM_DPM), 143 TAB_MAP_VALID(DPMCLOCKS), 144 }; 145 146 static int smu_v13_0_4_init_smc_tables(struct smu_context *smu) 147 { 148 struct smu_table_context *smu_table = &smu->smu_table; 149 struct smu_table *tables = smu_table->tables; 150 151 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), 152 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 153 SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t), 154 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 155 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), 156 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 157 158 smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL); 159 if (!smu_table->clocks_table) 160 goto err0_out; 161 162 smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); 163 if (!smu_table->metrics_table) 164 goto err1_out; 165 smu_table->metrics_time = 0; 166 167 smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); 168 if (!smu_table->watermarks_table) 169 goto err2_out; 170 171 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_1); 172 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); 173 if (!smu_table->gpu_metrics_table) 174 goto err3_out; 175 176 return 0; 177 178 err3_out: 179 kfree(smu_table->watermarks_table); 180 err2_out: 181 kfree(smu_table->metrics_table); 182 err1_out: 183 kfree(smu_table->clocks_table); 184 err0_out: 185 return -ENOMEM; 186 } 187 188 static int smu_v13_0_4_fini_smc_tables(struct smu_context *smu) 189 { 190 struct smu_table_context *smu_table = &smu->smu_table; 191 192 kfree(smu_table->clocks_table); 193 smu_table->clocks_table = NULL; 194 195 kfree(smu_table->metrics_table); 196 smu_table->metrics_table = NULL; 197 198 kfree(smu_table->watermarks_table); 199 smu_table->watermarks_table = NULL; 200 201 kfree(smu_table->gpu_metrics_table); 202 smu_table->gpu_metrics_table = NULL; 203 204 return 0; 205 } 206 207 static bool smu_v13_0_4_is_dpm_running(struct smu_context *smu) 208 { 209 int ret = 0; 210 uint64_t feature_enabled; 211 212 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); 213 214 if (ret) 215 return false; 216 217 return !!(feature_enabled & SMC_DPM_FEATURE); 218 } 219 220 static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en) 221 { 222 struct amdgpu_device *adev = smu->adev; 223 int ret = 0; 224 225 if (!en && !adev->in_s0ix) 226 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL); 227 228 return ret; 229 } 230 231 static ssize_t smu_v13_0_4_get_gpu_metrics(struct smu_context *smu, 232 void **table) 233 { 234 struct smu_table_context *smu_table = &smu->smu_table; 235 struct gpu_metrics_v2_1 *gpu_metrics = 236 (struct gpu_metrics_v2_1 *)smu_table->gpu_metrics_table; 237 SmuMetrics_t metrics; 238 int ret = 0; 239 240 ret = smu_cmn_get_metrics_table(smu, &metrics, true); 241 if (ret) 242 return ret; 243 244 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1); 245 246 gpu_metrics->temperature_gfx = metrics.GfxTemperature; 247 gpu_metrics->temperature_soc = metrics.SocTemperature; 248 memcpy(&gpu_metrics->temperature_core[0], 249 &metrics.CoreTemperature[0], 250 sizeof(uint16_t) * 8); 251 gpu_metrics->temperature_l3[0] = metrics.L3Temperature; 252 253 gpu_metrics->average_gfx_activity = metrics.GfxActivity; 254 gpu_metrics->average_mm_activity = metrics.UvdActivity; 255 256 gpu_metrics->average_socket_power = metrics.CurrentSocketPower; 257 gpu_metrics->average_gfx_power = metrics.Power[0]; 258 gpu_metrics->average_soc_power = metrics.Power[1]; 259 memcpy(&gpu_metrics->average_core_power[0], 260 &metrics.CorePower[0], 261 sizeof(uint16_t) * 8); 262 263 gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency; 264 gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency; 265 gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency; 266 gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency; 267 gpu_metrics->average_vclk_frequency = metrics.VclkFrequency; 268 gpu_metrics->average_dclk_frequency = metrics.DclkFrequency; 269 270 memcpy(&gpu_metrics->current_coreclk[0], 271 &metrics.CoreFrequency[0], 272 sizeof(uint16_t) * 8); 273 gpu_metrics->current_l3clk[0] = metrics.L3Frequency; 274 275 gpu_metrics->throttle_status = metrics.ThrottlerStatus; 276 277 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 278 279 *table = (void *)gpu_metrics; 280 281 return sizeof(struct gpu_metrics_v2_1); 282 } 283 284 static int smu_v13_0_4_get_smu_metrics_data(struct smu_context *smu, 285 MetricsMember_t member, 286 uint32_t *value) 287 { 288 struct smu_table_context *smu_table = &smu->smu_table; 289 290 SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; 291 int ret = 0; 292 293 ret = smu_cmn_get_metrics_table(smu, NULL, false); 294 if (ret) 295 return ret; 296 297 switch (member) { 298 case METRICS_AVERAGE_GFXCLK: 299 *value = metrics->GfxclkFrequency; 300 break; 301 case METRICS_AVERAGE_SOCCLK: 302 *value = metrics->SocclkFrequency; 303 break; 304 case METRICS_AVERAGE_VCLK: 305 *value = metrics->VclkFrequency; 306 break; 307 case METRICS_AVERAGE_DCLK: 308 *value = metrics->DclkFrequency; 309 break; 310 case METRICS_AVERAGE_UCLK: 311 *value = metrics->MemclkFrequency; 312 break; 313 case METRICS_AVERAGE_GFXACTIVITY: 314 *value = metrics->GfxActivity / 100; 315 break; 316 case METRICS_AVERAGE_VCNACTIVITY: 317 *value = metrics->UvdActivity; 318 break; 319 case METRICS_AVERAGE_SOCKETPOWER: 320 *value = (metrics->CurrentSocketPower << 8) / 1000; 321 break; 322 case METRICS_TEMPERATURE_EDGE: 323 *value = metrics->GfxTemperature / 100 * 324 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 325 break; 326 case METRICS_TEMPERATURE_HOTSPOT: 327 *value = metrics->SocTemperature / 100 * 328 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 329 break; 330 case METRICS_THROTTLER_STATUS: 331 *value = metrics->ThrottlerStatus; 332 break; 333 case METRICS_VOLTAGE_VDDGFX: 334 *value = metrics->Voltage[0]; 335 break; 336 case METRICS_VOLTAGE_VDDSOC: 337 *value = metrics->Voltage[1]; 338 break; 339 case METRICS_SS_APU_SHARE: 340 /* return the percentage of APU power with respect to APU's power limit. 341 * percentage is reported, this isn't boost value. Smartshift power 342 * boost/shift is only when the percentage is more than 100. 343 */ 344 if (metrics->StapmOpnLimit > 0) 345 *value = (metrics->ApuPower * 100) / metrics->StapmOpnLimit; 346 else 347 *value = 0; 348 break; 349 case METRICS_SS_DGPU_SHARE: 350 /* return the percentage of dGPU power with respect to dGPU's power limit. 351 * percentage is reported, this isn't boost value. Smartshift power 352 * boost/shift is only when the percentage is more than 100. 353 */ 354 if ((metrics->dGpuPower > 0) && 355 (metrics->StapmCurrentLimit > metrics->StapmOpnLimit)) 356 *value = (metrics->dGpuPower * 100) / 357 (metrics->StapmCurrentLimit - metrics->StapmOpnLimit); 358 else 359 *value = 0; 360 break; 361 default: 362 *value = UINT_MAX; 363 break; 364 } 365 366 return ret; 367 } 368 369 static int smu_v13_0_4_get_current_clk_freq(struct smu_context *smu, 370 enum smu_clk_type clk_type, 371 uint32_t *value) 372 { 373 MetricsMember_t member_type; 374 375 switch (clk_type) { 376 case SMU_SOCCLK: 377 member_type = METRICS_AVERAGE_SOCCLK; 378 break; 379 case SMU_VCLK: 380 member_type = METRICS_AVERAGE_VCLK; 381 break; 382 case SMU_DCLK: 383 member_type = METRICS_AVERAGE_DCLK; 384 break; 385 case SMU_MCLK: 386 member_type = METRICS_AVERAGE_UCLK; 387 break; 388 case SMU_FCLK: 389 return smu_cmn_send_smc_msg_with_param(smu, 390 SMU_MSG_GetFclkFrequency, 391 0, value); 392 case SMU_GFXCLK: 393 case SMU_SCLK: 394 return smu_cmn_send_smc_msg_with_param(smu, 395 SMU_MSG_GetGfxclkFrequency, 396 0, value); 397 break; 398 default: 399 return -EINVAL; 400 } 401 402 return smu_v13_0_4_get_smu_metrics_data(smu, member_type, value); 403 } 404 405 static int smu_v13_0_4_get_dpm_freq_by_index(struct smu_context *smu, 406 enum smu_clk_type clk_type, 407 uint32_t dpm_level, 408 uint32_t *freq) 409 { 410 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 411 412 if (!clk_table || clk_type >= SMU_CLK_COUNT) 413 return -EINVAL; 414 415 switch (clk_type) { 416 case SMU_SOCCLK: 417 if (dpm_level >= clk_table->NumSocClkLevelsEnabled) 418 return -EINVAL; 419 *freq = clk_table->SocClocks[dpm_level]; 420 break; 421 case SMU_VCLK: 422 if (dpm_level >= clk_table->VcnClkLevelsEnabled) 423 return -EINVAL; 424 *freq = clk_table->VClocks[dpm_level]; 425 break; 426 case SMU_DCLK: 427 if (dpm_level >= clk_table->VcnClkLevelsEnabled) 428 return -EINVAL; 429 *freq = clk_table->DClocks[dpm_level]; 430 break; 431 case SMU_UCLK: 432 case SMU_MCLK: 433 if (dpm_level >= clk_table->NumDfPstatesEnabled) 434 return -EINVAL; 435 *freq = clk_table->DfPstateTable[dpm_level].MemClk; 436 break; 437 case SMU_FCLK: 438 if (dpm_level >= clk_table->NumDfPstatesEnabled) 439 return -EINVAL; 440 *freq = clk_table->DfPstateTable[dpm_level].FClk; 441 break; 442 default: 443 return -EINVAL; 444 } 445 446 return 0; 447 } 448 449 static int smu_v13_0_4_get_dpm_level_count(struct smu_context *smu, 450 enum smu_clk_type clk_type, 451 uint32_t *count) 452 { 453 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 454 455 switch (clk_type) { 456 case SMU_SOCCLK: 457 *count = clk_table->NumSocClkLevelsEnabled; 458 break; 459 case SMU_VCLK: 460 *count = clk_table->VcnClkLevelsEnabled; 461 break; 462 case SMU_DCLK: 463 *count = clk_table->VcnClkLevelsEnabled; 464 break; 465 case SMU_MCLK: 466 *count = clk_table->NumDfPstatesEnabled; 467 break; 468 case SMU_FCLK: 469 *count = clk_table->NumDfPstatesEnabled; 470 break; 471 default: 472 break; 473 } 474 475 return 0; 476 } 477 478 static int smu_v13_0_4_print_clk_levels(struct smu_context *smu, 479 enum smu_clk_type clk_type, char *buf) 480 { 481 int i, idx, size = 0, ret = 0; 482 uint32_t cur_value = 0, value = 0, count = 0; 483 uint32_t min, max; 484 485 smu_cmn_get_sysfs_buf(&buf, &size); 486 487 switch (clk_type) { 488 case SMU_OD_SCLK: 489 size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); 490 size += sysfs_emit_at(buf, size, "0: %10uMhz\n", 491 (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); 492 size += sysfs_emit_at(buf, size, "1: %10uMhz\n", 493 (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); 494 break; 495 case SMU_OD_RANGE: 496 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); 497 size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", 498 smu->gfx_default_hard_min_freq, 499 smu->gfx_default_soft_max_freq); 500 break; 501 case SMU_SOCCLK: 502 case SMU_VCLK: 503 case SMU_DCLK: 504 case SMU_MCLK: 505 case SMU_FCLK: 506 ret = smu_v13_0_4_get_current_clk_freq(smu, clk_type, &cur_value); 507 if (ret) 508 break; 509 510 ret = smu_v13_0_4_get_dpm_level_count(smu, clk_type, &count); 511 if (ret) 512 break; 513 514 for (i = 0; i < count; i++) { 515 idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; 516 ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, idx, &value); 517 if (ret) 518 break; 519 520 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value, 521 cur_value == value ? "*" : ""); 522 } 523 break; 524 case SMU_GFXCLK: 525 case SMU_SCLK: 526 ret = smu_v13_0_4_get_current_clk_freq(smu, clk_type, &cur_value); 527 if (ret) 528 break; 529 min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq; 530 max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq; 531 if (cur_value == max) 532 i = 2; 533 else if (cur_value == min) 534 i = 0; 535 else 536 i = 1; 537 size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min, 538 i == 0 ? "*" : ""); 539 size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", 540 i == 1 ? cur_value : 1100, /* UMD PSTATE GFXCLK 1100 */ 541 i == 1 ? "*" : ""); 542 size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max, 543 i == 2 ? "*" : ""); 544 break; 545 default: 546 break; 547 } 548 549 return size; 550 } 551 552 static int smu_v13_0_4_read_sensor(struct smu_context *smu, 553 enum amd_pp_sensors sensor, 554 void *data, uint32_t *size) 555 { 556 int ret = 0; 557 558 if (!data || !size) 559 return -EINVAL; 560 561 switch (sensor) { 562 case AMDGPU_PP_SENSOR_GPU_LOAD: 563 ret = smu_v13_0_4_get_smu_metrics_data(smu, 564 METRICS_AVERAGE_GFXACTIVITY, 565 (uint32_t *)data); 566 *size = 4; 567 break; 568 case AMDGPU_PP_SENSOR_GPU_POWER: 569 ret = smu_v13_0_4_get_smu_metrics_data(smu, 570 METRICS_AVERAGE_SOCKETPOWER, 571 (uint32_t *)data); 572 *size = 4; 573 break; 574 case AMDGPU_PP_SENSOR_EDGE_TEMP: 575 ret = smu_v13_0_4_get_smu_metrics_data(smu, 576 METRICS_TEMPERATURE_EDGE, 577 (uint32_t *)data); 578 *size = 4; 579 break; 580 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: 581 ret = smu_v13_0_4_get_smu_metrics_data(smu, 582 METRICS_TEMPERATURE_HOTSPOT, 583 (uint32_t *)data); 584 *size = 4; 585 break; 586 case AMDGPU_PP_SENSOR_GFX_MCLK: 587 ret = smu_v13_0_4_get_smu_metrics_data(smu, 588 METRICS_AVERAGE_UCLK, 589 (uint32_t *)data); 590 *(uint32_t *)data *= 100; 591 *size = 4; 592 break; 593 case AMDGPU_PP_SENSOR_GFX_SCLK: 594 ret = smu_v13_0_4_get_smu_metrics_data(smu, 595 METRICS_AVERAGE_GFXCLK, 596 (uint32_t *)data); 597 *(uint32_t *)data *= 100; 598 *size = 4; 599 break; 600 case AMDGPU_PP_SENSOR_VDDGFX: 601 ret = smu_v13_0_4_get_smu_metrics_data(smu, 602 METRICS_VOLTAGE_VDDGFX, 603 (uint32_t *)data); 604 *size = 4; 605 break; 606 case AMDGPU_PP_SENSOR_VDDNB: 607 ret = smu_v13_0_4_get_smu_metrics_data(smu, 608 METRICS_VOLTAGE_VDDSOC, 609 (uint32_t *)data); 610 *size = 4; 611 break; 612 case AMDGPU_PP_SENSOR_SS_APU_SHARE: 613 ret = smu_v13_0_4_get_smu_metrics_data(smu, 614 METRICS_SS_APU_SHARE, 615 (uint32_t *)data); 616 *size = 4; 617 break; 618 case AMDGPU_PP_SENSOR_SS_DGPU_SHARE: 619 ret = smu_v13_0_4_get_smu_metrics_data(smu, 620 METRICS_SS_DGPU_SHARE, 621 (uint32_t *)data); 622 *size = 4; 623 break; 624 default: 625 ret = -EOPNOTSUPP; 626 break; 627 } 628 629 return ret; 630 } 631 632 static int smu_v13_0_4_set_watermarks_table(struct smu_context *smu, 633 struct pp_smu_wm_range_sets *clock_ranges) 634 { 635 int i; 636 int ret = 0; 637 Watermarks_t *table = smu->smu_table.watermarks_table; 638 639 if (!table || !clock_ranges) 640 return -EINVAL; 641 642 if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES || 643 clock_ranges->num_writer_wm_sets > NUM_WM_RANGES) 644 return -EINVAL; 645 646 for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) { 647 table->WatermarkRow[WM_DCFCLK][i].MinClock = 648 clock_ranges->reader_wm_sets[i].min_drain_clk_mhz; 649 table->WatermarkRow[WM_DCFCLK][i].MaxClock = 650 clock_ranges->reader_wm_sets[i].max_drain_clk_mhz; 651 table->WatermarkRow[WM_DCFCLK][i].MinMclk = 652 clock_ranges->reader_wm_sets[i].min_fill_clk_mhz; 653 table->WatermarkRow[WM_DCFCLK][i].MaxMclk = 654 clock_ranges->reader_wm_sets[i].max_fill_clk_mhz; 655 656 table->WatermarkRow[WM_DCFCLK][i].WmSetting = 657 clock_ranges->reader_wm_sets[i].wm_inst; 658 } 659 660 for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) { 661 table->WatermarkRow[WM_SOCCLK][i].MinClock = 662 clock_ranges->writer_wm_sets[i].min_fill_clk_mhz; 663 table->WatermarkRow[WM_SOCCLK][i].MaxClock = 664 clock_ranges->writer_wm_sets[i].max_fill_clk_mhz; 665 table->WatermarkRow[WM_SOCCLK][i].MinMclk = 666 clock_ranges->writer_wm_sets[i].min_drain_clk_mhz; 667 table->WatermarkRow[WM_SOCCLK][i].MaxMclk = 668 clock_ranges->writer_wm_sets[i].max_drain_clk_mhz; 669 670 table->WatermarkRow[WM_SOCCLK][i].WmSetting = 671 clock_ranges->writer_wm_sets[i].wm_inst; 672 } 673 674 smu->watermarks_bitmap |= WATERMARKS_EXIST; 675 676 /* pass data to smu controller */ 677 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && 678 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) { 679 ret = smu_cmn_write_watermarks_table(smu); 680 if (ret) { 681 dev_err(smu->adev->dev, "Failed to update WMTABLE!"); 682 return ret; 683 } 684 smu->watermarks_bitmap |= WATERMARKS_LOADED; 685 } 686 687 return 0; 688 } 689 690 static bool smu_v13_0_4_clk_dpm_is_enabled(struct smu_context *smu, 691 enum smu_clk_type clk_type) 692 { 693 enum smu_feature_mask feature_id = 0; 694 695 switch (clk_type) { 696 case SMU_MCLK: 697 case SMU_UCLK: 698 case SMU_FCLK: 699 feature_id = SMU_FEATURE_DPM_FCLK_BIT; 700 break; 701 case SMU_GFXCLK: 702 case SMU_SCLK: 703 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT; 704 break; 705 case SMU_SOCCLK: 706 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT; 707 break; 708 case SMU_VCLK: 709 case SMU_DCLK: 710 feature_id = SMU_FEATURE_VCN_DPM_BIT; 711 break; 712 default: 713 return true; 714 } 715 716 return smu_cmn_feature_is_enabled(smu, feature_id); 717 } 718 719 static int smu_v13_0_4_get_dpm_ultimate_freq(struct smu_context *smu, 720 enum smu_clk_type clk_type, 721 uint32_t *min, 722 uint32_t *max) 723 { 724 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 725 uint32_t clock_limit; 726 uint32_t max_dpm_level, min_dpm_level; 727 int ret = 0; 728 729 if (!smu_v13_0_4_clk_dpm_is_enabled(smu, clk_type)) { 730 switch (clk_type) { 731 case SMU_MCLK: 732 case SMU_UCLK: 733 clock_limit = smu->smu_table.boot_values.uclk; 734 break; 735 case SMU_FCLK: 736 clock_limit = smu->smu_table.boot_values.fclk; 737 break; 738 case SMU_GFXCLK: 739 case SMU_SCLK: 740 clock_limit = smu->smu_table.boot_values.gfxclk; 741 break; 742 case SMU_SOCCLK: 743 clock_limit = smu->smu_table.boot_values.socclk; 744 break; 745 case SMU_VCLK: 746 clock_limit = smu->smu_table.boot_values.vclk; 747 break; 748 case SMU_DCLK: 749 clock_limit = smu->smu_table.boot_values.dclk; 750 break; 751 default: 752 clock_limit = 0; 753 break; 754 } 755 756 /* clock in Mhz unit */ 757 if (min) 758 *min = clock_limit / 100; 759 if (max) 760 *max = clock_limit / 100; 761 762 return 0; 763 } 764 765 if (max) { 766 switch (clk_type) { 767 case SMU_GFXCLK: 768 case SMU_SCLK: 769 *max = clk_table->MaxGfxClk; 770 break; 771 case SMU_MCLK: 772 case SMU_UCLK: 773 case SMU_FCLK: 774 max_dpm_level = 0; 775 break; 776 case SMU_SOCCLK: 777 max_dpm_level = clk_table->NumSocClkLevelsEnabled - 1; 778 break; 779 case SMU_VCLK: 780 case SMU_DCLK: 781 max_dpm_level = clk_table->VcnClkLevelsEnabled - 1; 782 break; 783 default: 784 return -EINVAL; 785 } 786 787 if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) { 788 ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, 789 max_dpm_level, 790 max); 791 if (ret) 792 return ret; 793 } 794 } 795 796 if (min) { 797 switch (clk_type) { 798 case SMU_GFXCLK: 799 case SMU_SCLK: 800 *min = clk_table->MinGfxClk; 801 break; 802 case SMU_MCLK: 803 case SMU_UCLK: 804 case SMU_FCLK: 805 min_dpm_level = clk_table->NumDfPstatesEnabled - 1; 806 break; 807 case SMU_SOCCLK: 808 min_dpm_level = 0; 809 break; 810 case SMU_VCLK: 811 case SMU_DCLK: 812 min_dpm_level = 0; 813 break; 814 default: 815 return -EINVAL; 816 } 817 818 if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) { 819 ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, 820 min_dpm_level, 821 min); 822 } 823 } 824 825 return ret; 826 } 827 828 static int smu_v13_0_4_set_soft_freq_limited_range(struct smu_context *smu, 829 enum smu_clk_type clk_type, 830 uint32_t min, 831 uint32_t max) 832 { 833 enum smu_message_type msg_set_min, msg_set_max; 834 int ret = 0; 835 836 if (!smu_v13_0_4_clk_dpm_is_enabled(smu, clk_type)) 837 return -EINVAL; 838 839 switch (clk_type) { 840 case SMU_GFXCLK: 841 case SMU_SCLK: 842 msg_set_min = SMU_MSG_SetHardMinGfxClk; 843 msg_set_max = SMU_MSG_SetSoftMaxGfxClk; 844 break; 845 case SMU_FCLK: 846 msg_set_min = SMU_MSG_SetHardMinFclkByFreq; 847 msg_set_max = SMU_MSG_SetSoftMaxFclkByFreq; 848 break; 849 case SMU_SOCCLK: 850 msg_set_min = SMU_MSG_SetHardMinSocclkByFreq; 851 msg_set_max = SMU_MSG_SetSoftMaxSocclkByFreq; 852 break; 853 case SMU_VCLK: 854 case SMU_DCLK: 855 msg_set_min = SMU_MSG_SetHardMinVcn; 856 msg_set_max = SMU_MSG_SetSoftMaxVcn; 857 break; 858 default: 859 return -EINVAL; 860 } 861 862 ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min, NULL); 863 if (ret) 864 return ret; 865 866 return smu_cmn_send_smc_msg_with_param(smu, msg_set_max, 867 max, NULL); 868 } 869 870 static int smu_v13_0_4_force_clk_levels(struct smu_context *smu, 871 enum smu_clk_type clk_type, 872 uint32_t mask) 873 { 874 uint32_t soft_min_level = 0, soft_max_level = 0; 875 uint32_t min_freq = 0, max_freq = 0; 876 int ret = 0; 877 878 soft_min_level = mask ? (ffs(mask) - 1) : 0; 879 soft_max_level = mask ? (fls(mask) - 1) : 0; 880 881 switch (clk_type) { 882 case SMU_SOCCLK: 883 case SMU_FCLK: 884 case SMU_VCLK: 885 case SMU_DCLK: 886 ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq); 887 if (ret) 888 break; 889 890 ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq); 891 if (ret) 892 break; 893 894 ret = smu_v13_0_4_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); 895 break; 896 default: 897 ret = -EINVAL; 898 break; 899 } 900 901 return ret; 902 } 903 904 static int smu_v13_0_4_set_performance_level(struct smu_context *smu, 905 enum amd_dpm_forced_level level) 906 { 907 struct amdgpu_device *adev = smu->adev; 908 uint32_t sclk_min = 0, sclk_max = 0; 909 uint32_t fclk_min = 0, fclk_max = 0; 910 uint32_t socclk_min = 0, socclk_max = 0; 911 int ret = 0; 912 913 switch (level) { 914 case AMD_DPM_FORCED_LEVEL_HIGH: 915 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max); 916 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max); 917 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max); 918 sclk_min = sclk_max; 919 fclk_min = fclk_max; 920 socclk_min = socclk_max; 921 break; 922 case AMD_DPM_FORCED_LEVEL_LOW: 923 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL); 924 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL); 925 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL); 926 sclk_max = sclk_min; 927 fclk_max = fclk_min; 928 socclk_max = socclk_min; 929 break; 930 case AMD_DPM_FORCED_LEVEL_AUTO: 931 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max); 932 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max); 933 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max); 934 break; 935 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 936 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 937 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 938 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 939 /* Temporarily do nothing since the optimal clocks haven't been provided yet */ 940 break; 941 case AMD_DPM_FORCED_LEVEL_MANUAL: 942 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 943 return 0; 944 default: 945 dev_err(adev->dev, "Invalid performance level %d\n", level); 946 return -EINVAL; 947 } 948 949 if (sclk_min && sclk_max) { 950 ret = smu_v13_0_4_set_soft_freq_limited_range(smu, 951 SMU_SCLK, 952 sclk_min, 953 sclk_max); 954 if (ret) 955 return ret; 956 957 smu->gfx_actual_hard_min_freq = sclk_min; 958 smu->gfx_actual_soft_max_freq = sclk_max; 959 } 960 961 if (fclk_min && fclk_max) { 962 ret = smu_v13_0_4_set_soft_freq_limited_range(smu, 963 SMU_FCLK, 964 fclk_min, 965 fclk_max); 966 if (ret) 967 return ret; 968 } 969 970 if (socclk_min && socclk_max) { 971 ret = smu_v13_0_4_set_soft_freq_limited_range(smu, 972 SMU_SOCCLK, 973 socclk_min, 974 socclk_max); 975 if (ret) 976 return ret; 977 } 978 979 return ret; 980 } 981 982 static int smu_v13_0_4_mode2_reset(struct smu_context *smu) 983 { 984 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, 985 SMU_RESET_MODE_2, NULL); 986 } 987 988 static int smu_v13_0_4_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) 989 { 990 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 991 992 smu->gfx_default_hard_min_freq = clk_table->MinGfxClk; 993 smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk; 994 smu->gfx_actual_hard_min_freq = 0; 995 smu->gfx_actual_soft_max_freq = 0; 996 997 return 0; 998 } 999 1000 static const struct pptable_funcs smu_v13_0_4_ppt_funcs = { 1001 .check_fw_status = smu_v13_0_check_fw_status, 1002 .check_fw_version = smu_v13_0_check_fw_version, 1003 .init_smc_tables = smu_v13_0_4_init_smc_tables, 1004 .fini_smc_tables = smu_v13_0_4_fini_smc_tables, 1005 .get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values, 1006 .system_features_control = smu_v13_0_4_system_features_control, 1007 .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param, 1008 .send_smc_msg = smu_cmn_send_smc_msg, 1009 .dpm_set_vcn_enable = smu_v13_0_set_vcn_enable, 1010 .dpm_set_jpeg_enable = smu_v13_0_set_jpeg_enable, 1011 .set_default_dpm_table = smu_v13_0_set_default_dpm_tables, 1012 .read_sensor = smu_v13_0_4_read_sensor, 1013 .is_dpm_running = smu_v13_0_4_is_dpm_running, 1014 .set_watermarks_table = smu_v13_0_4_set_watermarks_table, 1015 .get_gpu_metrics = smu_v13_0_4_get_gpu_metrics, 1016 .get_enabled_mask = smu_cmn_get_enabled_mask, 1017 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, 1018 .set_driver_table_location = smu_v13_0_set_driver_table_location, 1019 .gfx_off_control = smu_v13_0_gfx_off_control, 1020 .mode2_reset = smu_v13_0_4_mode2_reset, 1021 .get_dpm_ultimate_freq = smu_v13_0_4_get_dpm_ultimate_freq, 1022 .od_edit_dpm_table = smu_v13_0_od_edit_dpm_table, 1023 .print_clk_levels = smu_v13_0_4_print_clk_levels, 1024 .force_clk_levels = smu_v13_0_4_force_clk_levels, 1025 .set_performance_level = smu_v13_0_4_set_performance_level, 1026 .set_fine_grain_gfx_freq_parameters = smu_v13_0_4_set_fine_grain_gfx_freq_parameters, 1027 .set_gfx_power_up_by_imu = smu_v13_0_set_gfx_power_up_by_imu, 1028 }; 1029 1030 static void smu_v13_0_4_set_smu_mailbox_registers(struct smu_context *smu) 1031 { 1032 struct amdgpu_device *adev = smu->adev; 1033 1034 smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82); 1035 smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66); 1036 smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90); 1037 } 1038 1039 void smu_v13_0_4_set_ppt_funcs(struct smu_context *smu) 1040 { 1041 struct amdgpu_device *adev = smu->adev; 1042 1043 smu->ppt_funcs = &smu_v13_0_4_ppt_funcs; 1044 smu->message_map = smu_v13_0_4_message_map; 1045 smu->feature_map = smu_v13_0_4_feature_mask_map; 1046 smu->table_map = smu_v13_0_4_table_map; 1047 smu->is_apu = true; 1048 1049 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 4)) 1050 smu_v13_0_4_set_smu_mailbox_registers(smu); 1051 else 1052 smu_v13_0_set_smu_mailbox_registers(smu); 1053 } 1054