1 /* 2 * Copyright 2018 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/delay.h> 25 #include <linux/fb.h> 26 #include <linux/module.h> 27 #include <linux/slab.h> 28 29 #include "hwmgr.h" 30 #include "amd_powerplay.h" 31 #include "vega20_smumgr.h" 32 #include "hardwaremanager.h" 33 #include "ppatomfwctrl.h" 34 #include "atomfirmware.h" 35 #include "cgs_common.h" 36 #include "vega20_powertune.h" 37 #include "vega20_inc.h" 38 #include "pppcielanes.h" 39 #include "vega20_hwmgr.h" 40 #include "vega20_processpptables.h" 41 #include "vega20_pptable.h" 42 #include "vega20_thermal.h" 43 #include "vega20_ppsmc.h" 44 #include "pp_debug.h" 45 #include "amd_pcie_helpers.h" 46 #include "ppinterrupt.h" 47 #include "pp_overdriver.h" 48 #include "pp_thermal.h" 49 #include "soc15_common.h" 50 #include "vega20_baco.h" 51 #include "smuio/smuio_9_0_offset.h" 52 #include "smuio/smuio_9_0_sh_mask.h" 53 #include "nbio/nbio_7_4_sh_mask.h" 54 55 #define smnPCIE_LC_SPEED_CNTL 0x11140290 56 #define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288 57 58 #define LINK_WIDTH_MAX 6 59 #define LINK_SPEED_MAX 3 60 static int link_width[] = {0, 1, 2, 4, 8, 12, 16}; 61 static int link_speed[] = {25, 50, 80, 160}; 62 63 static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr) 64 { 65 struct vega20_hwmgr *data = 66 (struct vega20_hwmgr *)(hwmgr->backend); 67 68 data->gfxclk_average_alpha = PPVEGA20_VEGA20GFXCLKAVERAGEALPHA_DFLT; 69 data->socclk_average_alpha = PPVEGA20_VEGA20SOCCLKAVERAGEALPHA_DFLT; 70 data->uclk_average_alpha = PPVEGA20_VEGA20UCLKCLKAVERAGEALPHA_DFLT; 71 data->gfx_activity_average_alpha = PPVEGA20_VEGA20GFXACTIVITYAVERAGEALPHA_DFLT; 72 data->lowest_uclk_reserved_for_ulv = PPVEGA20_VEGA20LOWESTUCLKRESERVEDFORULV_DFLT; 73 74 data->display_voltage_mode = PPVEGA20_VEGA20DISPLAYVOLTAGEMODE_DFLT; 75 data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 76 data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 77 data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 78 data->disp_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 79 data->disp_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 80 data->disp_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 81 data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 82 data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 83 data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 84 data->phy_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 85 data->phy_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 86 data->phy_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 87 88 /* 89 * Disable the following features for now: 90 * GFXCLK DS 91 * SOCLK DS 92 * LCLK DS 93 * DCEFCLK DS 94 * FCLK DS 95 * MP1CLK DS 96 * MP0CLK DS 97 */ 98 data->registry_data.disallowed_features = 0xE0041C00; 99 /* ECC feature should be disabled on old SMUs */ 100 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion, &hwmgr->smu_version); 101 if (hwmgr->smu_version < 0x282100) 102 data->registry_data.disallowed_features |= FEATURE_ECC_MASK; 103 104 if (!(hwmgr->feature_mask & PP_PCIE_DPM_MASK)) 105 data->registry_data.disallowed_features |= FEATURE_DPM_LINK_MASK; 106 107 if (!(hwmgr->feature_mask & PP_SCLK_DPM_MASK)) 108 data->registry_data.disallowed_features |= FEATURE_DPM_GFXCLK_MASK; 109 110 if (!(hwmgr->feature_mask & PP_SOCCLK_DPM_MASK)) 111 data->registry_data.disallowed_features |= FEATURE_DPM_SOCCLK_MASK; 112 113 if (!(hwmgr->feature_mask & PP_MCLK_DPM_MASK)) 114 data->registry_data.disallowed_features |= FEATURE_DPM_UCLK_MASK; 115 116 if (!(hwmgr->feature_mask & PP_DCEFCLK_DPM_MASK)) 117 data->registry_data.disallowed_features |= FEATURE_DPM_DCEFCLK_MASK; 118 119 if (!(hwmgr->feature_mask & PP_ULV_MASK)) 120 data->registry_data.disallowed_features |= FEATURE_ULV_MASK; 121 122 if (!(hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK)) 123 data->registry_data.disallowed_features |= FEATURE_DS_GFXCLK_MASK; 124 125 data->registry_data.od_state_in_dc_support = 0; 126 data->registry_data.thermal_support = 1; 127 data->registry_data.skip_baco_hardware = 0; 128 129 data->registry_data.log_avfs_param = 0; 130 data->registry_data.sclk_throttle_low_notification = 1; 131 data->registry_data.force_dpm_high = 0; 132 data->registry_data.stable_pstate_sclk_dpm_percentage = 75; 133 134 data->registry_data.didt_support = 0; 135 if (data->registry_data.didt_support) { 136 data->registry_data.didt_mode = 6; 137 data->registry_data.sq_ramping_support = 1; 138 data->registry_data.db_ramping_support = 0; 139 data->registry_data.td_ramping_support = 0; 140 data->registry_data.tcp_ramping_support = 0; 141 data->registry_data.dbr_ramping_support = 0; 142 data->registry_data.edc_didt_support = 1; 143 data->registry_data.gc_didt_support = 0; 144 data->registry_data.psm_didt_support = 0; 145 } 146 147 data->registry_data.pcie_lane_override = 0xff; 148 data->registry_data.pcie_speed_override = 0xff; 149 data->registry_data.pcie_clock_override = 0xffffffff; 150 data->registry_data.regulator_hot_gpio_support = 1; 151 data->registry_data.ac_dc_switch_gpio_support = 0; 152 data->registry_data.quick_transition_support = 0; 153 data->registry_data.zrpm_start_temp = 0xffff; 154 data->registry_data.zrpm_stop_temp = 0xffff; 155 data->registry_data.od8_feature_enable = 1; 156 data->registry_data.disable_water_mark = 0; 157 data->registry_data.disable_pp_tuning = 0; 158 data->registry_data.disable_xlpp_tuning = 0; 159 data->registry_data.disable_workload_policy = 0; 160 data->registry_data.perf_ui_tuning_profile_turbo = 0x19190F0F; 161 data->registry_data.perf_ui_tuning_profile_powerSave = 0x19191919; 162 data->registry_data.perf_ui_tuning_profile_xl = 0x00000F0A; 163 data->registry_data.force_workload_policy_mask = 0; 164 data->registry_data.disable_3d_fs_detection = 0; 165 data->registry_data.fps_support = 1; 166 data->registry_data.disable_auto_wattman = 1; 167 data->registry_data.auto_wattman_debug = 0; 168 data->registry_data.auto_wattman_sample_period = 100; 169 data->registry_data.fclk_gfxclk_ratio = 0; 170 data->registry_data.auto_wattman_threshold = 50; 171 data->registry_data.gfxoff_controlled_by_driver = 1; 172 data->gfxoff_allowed = false; 173 data->counter_gfxoff = 0; 174 data->registry_data.pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK); 175 } 176 177 static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr) 178 { 179 struct vega20_hwmgr *data = 180 (struct vega20_hwmgr *)(hwmgr->backend); 181 struct amdgpu_device *adev = hwmgr->adev; 182 183 if (data->vddci_control == VEGA20_VOLTAGE_CONTROL_NONE) 184 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 185 PHM_PlatformCaps_ControlVDDCI); 186 187 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 188 PHM_PlatformCaps_TablelessHardwareInterface); 189 190 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 191 PHM_PlatformCaps_BACO); 192 193 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 194 PHM_PlatformCaps_EnableSMU7ThermalManagement); 195 196 if (adev->pg_flags & AMD_PG_SUPPORT_UVD) 197 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 198 PHM_PlatformCaps_UVDPowerGating); 199 200 if (adev->pg_flags & AMD_PG_SUPPORT_VCE) 201 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 202 PHM_PlatformCaps_VCEPowerGating); 203 204 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 205 PHM_PlatformCaps_UnTabledHardwareInterface); 206 207 if (data->registry_data.od8_feature_enable) 208 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 209 PHM_PlatformCaps_OD8inACSupport); 210 211 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 212 PHM_PlatformCaps_ActivityReporting); 213 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 214 PHM_PlatformCaps_FanSpeedInTableIsRPM); 215 216 if (data->registry_data.od_state_in_dc_support) { 217 if (data->registry_data.od8_feature_enable) 218 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 219 PHM_PlatformCaps_OD8inDCSupport); 220 } 221 222 if (data->registry_data.thermal_support && 223 data->registry_data.fuzzy_fan_control_support && 224 hwmgr->thermal_controller.advanceFanControlParameters.usTMax) 225 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 226 PHM_PlatformCaps_ODFuzzyFanControlSupport); 227 228 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 229 PHM_PlatformCaps_DynamicPowerManagement); 230 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 231 PHM_PlatformCaps_SMC); 232 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 233 PHM_PlatformCaps_ThermalPolicyDelay); 234 235 if (data->registry_data.force_dpm_high) 236 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 237 PHM_PlatformCaps_ExclusiveModeAlwaysHigh); 238 239 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 240 PHM_PlatformCaps_DynamicUVDState); 241 242 if (data->registry_data.sclk_throttle_low_notification) 243 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 244 PHM_PlatformCaps_SclkThrottleLowNotification); 245 246 /* power tune caps */ 247 /* assume disabled */ 248 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 249 PHM_PlatformCaps_PowerContainment); 250 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 251 PHM_PlatformCaps_DiDtSupport); 252 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 253 PHM_PlatformCaps_SQRamping); 254 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 255 PHM_PlatformCaps_DBRamping); 256 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 257 PHM_PlatformCaps_TDRamping); 258 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 259 PHM_PlatformCaps_TCPRamping); 260 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 261 PHM_PlatformCaps_DBRRamping); 262 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 263 PHM_PlatformCaps_DiDtEDCEnable); 264 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 265 PHM_PlatformCaps_GCEDC); 266 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 267 PHM_PlatformCaps_PSM); 268 269 if (data->registry_data.didt_support) { 270 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 271 PHM_PlatformCaps_DiDtSupport); 272 if (data->registry_data.sq_ramping_support) 273 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 274 PHM_PlatformCaps_SQRamping); 275 if (data->registry_data.db_ramping_support) 276 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 277 PHM_PlatformCaps_DBRamping); 278 if (data->registry_data.td_ramping_support) 279 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 280 PHM_PlatformCaps_TDRamping); 281 if (data->registry_data.tcp_ramping_support) 282 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 283 PHM_PlatformCaps_TCPRamping); 284 if (data->registry_data.dbr_ramping_support) 285 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 286 PHM_PlatformCaps_DBRRamping); 287 if (data->registry_data.edc_didt_support) 288 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 289 PHM_PlatformCaps_DiDtEDCEnable); 290 if (data->registry_data.gc_didt_support) 291 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 292 PHM_PlatformCaps_GCEDC); 293 if (data->registry_data.psm_didt_support) 294 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 295 PHM_PlatformCaps_PSM); 296 } 297 298 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 299 PHM_PlatformCaps_RegulatorHot); 300 301 if (data->registry_data.ac_dc_switch_gpio_support) { 302 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 303 PHM_PlatformCaps_AutomaticDCTransition); 304 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 305 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme); 306 } 307 308 if (data->registry_data.quick_transition_support) { 309 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 310 PHM_PlatformCaps_AutomaticDCTransition); 311 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 312 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme); 313 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 314 PHM_PlatformCaps_Falcon_QuickTransition); 315 } 316 317 if (data->lowest_uclk_reserved_for_ulv != PPVEGA20_VEGA20LOWESTUCLKRESERVEDFORULV_DFLT) { 318 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 319 PHM_PlatformCaps_LowestUclkReservedForUlv); 320 if (data->lowest_uclk_reserved_for_ulv == 1) 321 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 322 PHM_PlatformCaps_LowestUclkReservedForUlv); 323 } 324 325 if (data->registry_data.custom_fan_support) 326 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 327 PHM_PlatformCaps_CustomFanControlSupport); 328 329 return 0; 330 } 331 332 static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr) 333 { 334 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 335 struct amdgpu_device *adev = hwmgr->adev; 336 uint32_t top32, bottom32; 337 int i; 338 339 data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id = 340 FEATURE_DPM_PREFETCHER_BIT; 341 data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id = 342 FEATURE_DPM_GFXCLK_BIT; 343 data->smu_features[GNLD_DPM_UCLK].smu_feature_id = 344 FEATURE_DPM_UCLK_BIT; 345 data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id = 346 FEATURE_DPM_SOCCLK_BIT; 347 data->smu_features[GNLD_DPM_UVD].smu_feature_id = 348 FEATURE_DPM_UVD_BIT; 349 data->smu_features[GNLD_DPM_VCE].smu_feature_id = 350 FEATURE_DPM_VCE_BIT; 351 data->smu_features[GNLD_ULV].smu_feature_id = 352 FEATURE_ULV_BIT; 353 data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id = 354 FEATURE_DPM_MP0CLK_BIT; 355 data->smu_features[GNLD_DPM_LINK].smu_feature_id = 356 FEATURE_DPM_LINK_BIT; 357 data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id = 358 FEATURE_DPM_DCEFCLK_BIT; 359 data->smu_features[GNLD_DS_GFXCLK].smu_feature_id = 360 FEATURE_DS_GFXCLK_BIT; 361 data->smu_features[GNLD_DS_SOCCLK].smu_feature_id = 362 FEATURE_DS_SOCCLK_BIT; 363 data->smu_features[GNLD_DS_LCLK].smu_feature_id = 364 FEATURE_DS_LCLK_BIT; 365 data->smu_features[GNLD_PPT].smu_feature_id = 366 FEATURE_PPT_BIT; 367 data->smu_features[GNLD_TDC].smu_feature_id = 368 FEATURE_TDC_BIT; 369 data->smu_features[GNLD_THERMAL].smu_feature_id = 370 FEATURE_THERMAL_BIT; 371 data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id = 372 FEATURE_GFX_PER_CU_CG_BIT; 373 data->smu_features[GNLD_RM].smu_feature_id = 374 FEATURE_RM_BIT; 375 data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id = 376 FEATURE_DS_DCEFCLK_BIT; 377 data->smu_features[GNLD_ACDC].smu_feature_id = 378 FEATURE_ACDC_BIT; 379 data->smu_features[GNLD_VR0HOT].smu_feature_id = 380 FEATURE_VR0HOT_BIT; 381 data->smu_features[GNLD_VR1HOT].smu_feature_id = 382 FEATURE_VR1HOT_BIT; 383 data->smu_features[GNLD_FW_CTF].smu_feature_id = 384 FEATURE_FW_CTF_BIT; 385 data->smu_features[GNLD_LED_DISPLAY].smu_feature_id = 386 FEATURE_LED_DISPLAY_BIT; 387 data->smu_features[GNLD_FAN_CONTROL].smu_feature_id = 388 FEATURE_FAN_CONTROL_BIT; 389 data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT; 390 data->smu_features[GNLD_GFXOFF].smu_feature_id = FEATURE_GFXOFF_BIT; 391 data->smu_features[GNLD_CG].smu_feature_id = FEATURE_CG_BIT; 392 data->smu_features[GNLD_DPM_FCLK].smu_feature_id = FEATURE_DPM_FCLK_BIT; 393 data->smu_features[GNLD_DS_FCLK].smu_feature_id = FEATURE_DS_FCLK_BIT; 394 data->smu_features[GNLD_DS_MP1CLK].smu_feature_id = FEATURE_DS_MP1CLK_BIT; 395 data->smu_features[GNLD_DS_MP0CLK].smu_feature_id = FEATURE_DS_MP0CLK_BIT; 396 data->smu_features[GNLD_XGMI].smu_feature_id = FEATURE_XGMI_BIT; 397 data->smu_features[GNLD_ECC].smu_feature_id = FEATURE_ECC_BIT; 398 399 for (i = 0; i < GNLD_FEATURES_MAX; i++) { 400 data->smu_features[i].smu_feature_bitmap = 401 (uint64_t)(1ULL << data->smu_features[i].smu_feature_id); 402 data->smu_features[i].allowed = 403 ((data->registry_data.disallowed_features >> i) & 1) ? 404 false : true; 405 } 406 407 /* Get the SN to turn into a Unique ID */ 408 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32); 409 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32); 410 411 adev->unique_id = ((uint64_t)bottom32 << 32) | top32; 412 } 413 414 static int vega20_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr) 415 { 416 return 0; 417 } 418 419 static int vega20_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) 420 { 421 kfree(hwmgr->backend); 422 hwmgr->backend = NULL; 423 424 return 0; 425 } 426 427 static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr) 428 { 429 struct vega20_hwmgr *data; 430 struct amdgpu_device *adev = hwmgr->adev; 431 432 data = kzalloc(sizeof(struct vega20_hwmgr), GFP_KERNEL); 433 if (data == NULL) 434 return -ENOMEM; 435 436 hwmgr->backend = data; 437 438 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; 439 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 440 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 441 442 vega20_set_default_registry_data(hwmgr); 443 444 data->disable_dpm_mask = 0xff; 445 446 /* need to set voltage control types before EVV patching */ 447 data->vddc_control = VEGA20_VOLTAGE_CONTROL_NONE; 448 data->mvdd_control = VEGA20_VOLTAGE_CONTROL_NONE; 449 data->vddci_control = VEGA20_VOLTAGE_CONTROL_NONE; 450 451 data->water_marks_bitmap = 0; 452 data->avfs_exist = false; 453 454 vega20_set_features_platform_caps(hwmgr); 455 456 vega20_init_dpm_defaults(hwmgr); 457 458 /* Parse pptable data read from VBIOS */ 459 vega20_set_private_data_based_on_pptable(hwmgr); 460 461 data->is_tlu_enabled = false; 462 463 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = 464 VEGA20_MAX_HARDWARE_POWERLEVELS; 465 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; 466 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; 467 468 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */ 469 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */ 470 hwmgr->platform_descriptor.clockStep.engineClock = 500; 471 hwmgr->platform_descriptor.clockStep.memoryClock = 500; 472 473 data->total_active_cus = adev->gfx.cu_info.number; 474 data->is_custom_profile_set = false; 475 476 return 0; 477 } 478 479 static int vega20_init_sclk_threshold(struct pp_hwmgr *hwmgr) 480 { 481 struct vega20_hwmgr *data = 482 (struct vega20_hwmgr *)(hwmgr->backend); 483 484 data->low_sclk_interrupt_threshold = 0; 485 486 return 0; 487 } 488 489 static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr) 490 { 491 struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); 492 int ret = 0; 493 bool use_baco = (amdgpu_in_reset(adev) && 494 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) || 495 (adev->in_runpm && amdgpu_asic_supports_baco(adev)); 496 497 ret = vega20_init_sclk_threshold(hwmgr); 498 PP_ASSERT_WITH_CODE(!ret, 499 "Failed to init sclk threshold!", 500 return ret); 501 502 if (use_baco) { 503 ret = vega20_baco_apply_vdci_flush_workaround(hwmgr); 504 if (ret) 505 pr_err("Failed to apply vega20 baco workaround!\n"); 506 } 507 508 return ret; 509 } 510 511 /* 512 * @fn vega20_init_dpm_state 513 * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff. 514 * 515 * @param dpm_state - the address of the DPM Table to initiailize. 516 * @return None. 517 */ 518 static void vega20_init_dpm_state(struct vega20_dpm_state *dpm_state) 519 { 520 dpm_state->soft_min_level = 0x0; 521 dpm_state->soft_max_level = VG20_CLOCK_MAX_DEFAULT; 522 dpm_state->hard_min_level = 0x0; 523 dpm_state->hard_max_level = VG20_CLOCK_MAX_DEFAULT; 524 } 525 526 static int vega20_get_number_of_dpm_level(struct pp_hwmgr *hwmgr, 527 PPCLK_e clk_id, uint32_t *num_of_levels) 528 { 529 int ret = 0; 530 531 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 532 PPSMC_MSG_GetDpmFreqByIndex, 533 (clk_id << 16 | 0xFF), 534 num_of_levels); 535 PP_ASSERT_WITH_CODE(!ret, 536 "[GetNumOfDpmLevel] failed to get dpm levels!", 537 return ret); 538 539 return ret; 540 } 541 542 static int vega20_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr, 543 PPCLK_e clk_id, uint32_t index, uint32_t *clk) 544 { 545 int ret = 0; 546 547 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 548 PPSMC_MSG_GetDpmFreqByIndex, 549 (clk_id << 16 | index), 550 clk); 551 PP_ASSERT_WITH_CODE(!ret, 552 "[GetDpmFreqByIndex] failed to get dpm freq by index!", 553 return ret); 554 555 return ret; 556 } 557 558 static int vega20_setup_single_dpm_table(struct pp_hwmgr *hwmgr, 559 struct vega20_single_dpm_table *dpm_table, PPCLK_e clk_id) 560 { 561 int ret = 0; 562 uint32_t i, num_of_levels, clk; 563 564 ret = vega20_get_number_of_dpm_level(hwmgr, clk_id, &num_of_levels); 565 PP_ASSERT_WITH_CODE(!ret, 566 "[SetupSingleDpmTable] failed to get clk levels!", 567 return ret); 568 569 dpm_table->count = num_of_levels; 570 571 for (i = 0; i < num_of_levels; i++) { 572 ret = vega20_get_dpm_frequency_by_index(hwmgr, clk_id, i, &clk); 573 PP_ASSERT_WITH_CODE(!ret, 574 "[SetupSingleDpmTable] failed to get clk of specific level!", 575 return ret); 576 dpm_table->dpm_levels[i].value = clk; 577 dpm_table->dpm_levels[i].enabled = true; 578 } 579 580 return ret; 581 } 582 583 static int vega20_setup_gfxclk_dpm_table(struct pp_hwmgr *hwmgr) 584 { 585 struct vega20_hwmgr *data = 586 (struct vega20_hwmgr *)(hwmgr->backend); 587 struct vega20_single_dpm_table *dpm_table; 588 int ret = 0; 589 590 dpm_table = &(data->dpm_table.gfx_table); 591 if (data->smu_features[GNLD_DPM_GFXCLK].enabled) { 592 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_GFXCLK); 593 PP_ASSERT_WITH_CODE(!ret, 594 "[SetupDefaultDpmTable] failed to get gfxclk dpm levels!", 595 return ret); 596 } else { 597 dpm_table->count = 1; 598 dpm_table->dpm_levels[0].value = data->vbios_boot_state.gfx_clock / 100; 599 } 600 601 return ret; 602 } 603 604 static int vega20_setup_memclk_dpm_table(struct pp_hwmgr *hwmgr) 605 { 606 struct vega20_hwmgr *data = 607 (struct vega20_hwmgr *)(hwmgr->backend); 608 struct vega20_single_dpm_table *dpm_table; 609 int ret = 0; 610 611 dpm_table = &(data->dpm_table.mem_table); 612 if (data->smu_features[GNLD_DPM_UCLK].enabled) { 613 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_UCLK); 614 PP_ASSERT_WITH_CODE(!ret, 615 "[SetupDefaultDpmTable] failed to get memclk dpm levels!", 616 return ret); 617 } else { 618 dpm_table->count = 1; 619 dpm_table->dpm_levels[0].value = data->vbios_boot_state.mem_clock / 100; 620 } 621 622 return ret; 623 } 624 625 /* 626 * This function is to initialize all DPM state tables 627 * for SMU based on the dependency table. 628 * Dynamic state patching function will then trim these 629 * state tables to the allowed range based 630 * on the power policy or external client requests, 631 * such as UVD request, etc. 632 */ 633 static int vega20_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) 634 { 635 struct vega20_hwmgr *data = 636 (struct vega20_hwmgr *)(hwmgr->backend); 637 struct vega20_single_dpm_table *dpm_table; 638 int ret = 0; 639 640 memset(&data->dpm_table, 0, sizeof(data->dpm_table)); 641 642 /* socclk */ 643 dpm_table = &(data->dpm_table.soc_table); 644 if (data->smu_features[GNLD_DPM_SOCCLK].enabled) { 645 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_SOCCLK); 646 PP_ASSERT_WITH_CODE(!ret, 647 "[SetupDefaultDpmTable] failed to get socclk dpm levels!", 648 return ret); 649 } else { 650 dpm_table->count = 1; 651 dpm_table->dpm_levels[0].value = data->vbios_boot_state.soc_clock / 100; 652 } 653 vega20_init_dpm_state(&(dpm_table->dpm_state)); 654 655 /* gfxclk */ 656 dpm_table = &(data->dpm_table.gfx_table); 657 ret = vega20_setup_gfxclk_dpm_table(hwmgr); 658 if (ret) 659 return ret; 660 vega20_init_dpm_state(&(dpm_table->dpm_state)); 661 662 /* memclk */ 663 dpm_table = &(data->dpm_table.mem_table); 664 ret = vega20_setup_memclk_dpm_table(hwmgr); 665 if (ret) 666 return ret; 667 vega20_init_dpm_state(&(dpm_table->dpm_state)); 668 669 /* eclk */ 670 dpm_table = &(data->dpm_table.eclk_table); 671 if (data->smu_features[GNLD_DPM_VCE].enabled) { 672 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_ECLK); 673 PP_ASSERT_WITH_CODE(!ret, 674 "[SetupDefaultDpmTable] failed to get eclk dpm levels!", 675 return ret); 676 } else { 677 dpm_table->count = 1; 678 dpm_table->dpm_levels[0].value = data->vbios_boot_state.eclock / 100; 679 } 680 vega20_init_dpm_state(&(dpm_table->dpm_state)); 681 682 /* vclk */ 683 dpm_table = &(data->dpm_table.vclk_table); 684 if (data->smu_features[GNLD_DPM_UVD].enabled) { 685 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_VCLK); 686 PP_ASSERT_WITH_CODE(!ret, 687 "[SetupDefaultDpmTable] failed to get vclk dpm levels!", 688 return ret); 689 } else { 690 dpm_table->count = 1; 691 dpm_table->dpm_levels[0].value = data->vbios_boot_state.vclock / 100; 692 } 693 vega20_init_dpm_state(&(dpm_table->dpm_state)); 694 695 /* dclk */ 696 dpm_table = &(data->dpm_table.dclk_table); 697 if (data->smu_features[GNLD_DPM_UVD].enabled) { 698 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCLK); 699 PP_ASSERT_WITH_CODE(!ret, 700 "[SetupDefaultDpmTable] failed to get dclk dpm levels!", 701 return ret); 702 } else { 703 dpm_table->count = 1; 704 dpm_table->dpm_levels[0].value = data->vbios_boot_state.dclock / 100; 705 } 706 vega20_init_dpm_state(&(dpm_table->dpm_state)); 707 708 /* dcefclk */ 709 dpm_table = &(data->dpm_table.dcef_table); 710 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { 711 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCEFCLK); 712 PP_ASSERT_WITH_CODE(!ret, 713 "[SetupDefaultDpmTable] failed to get dcefclk dpm levels!", 714 return ret); 715 } else { 716 dpm_table->count = 1; 717 dpm_table->dpm_levels[0].value = data->vbios_boot_state.dcef_clock / 100; 718 } 719 vega20_init_dpm_state(&(dpm_table->dpm_state)); 720 721 /* pixclk */ 722 dpm_table = &(data->dpm_table.pixel_table); 723 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { 724 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PIXCLK); 725 PP_ASSERT_WITH_CODE(!ret, 726 "[SetupDefaultDpmTable] failed to get pixclk dpm levels!", 727 return ret); 728 } else 729 dpm_table->count = 0; 730 vega20_init_dpm_state(&(dpm_table->dpm_state)); 731 732 /* dispclk */ 733 dpm_table = &(data->dpm_table.display_table); 734 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { 735 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DISPCLK); 736 PP_ASSERT_WITH_CODE(!ret, 737 "[SetupDefaultDpmTable] failed to get dispclk dpm levels!", 738 return ret); 739 } else 740 dpm_table->count = 0; 741 vega20_init_dpm_state(&(dpm_table->dpm_state)); 742 743 /* phyclk */ 744 dpm_table = &(data->dpm_table.phy_table); 745 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { 746 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PHYCLK); 747 PP_ASSERT_WITH_CODE(!ret, 748 "[SetupDefaultDpmTable] failed to get phyclk dpm levels!", 749 return ret); 750 } else 751 dpm_table->count = 0; 752 vega20_init_dpm_state(&(dpm_table->dpm_state)); 753 754 /* fclk */ 755 dpm_table = &(data->dpm_table.fclk_table); 756 if (data->smu_features[GNLD_DPM_FCLK].enabled) { 757 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_FCLK); 758 PP_ASSERT_WITH_CODE(!ret, 759 "[SetupDefaultDpmTable] failed to get fclk dpm levels!", 760 return ret); 761 } else { 762 dpm_table->count = 1; 763 dpm_table->dpm_levels[0].value = data->vbios_boot_state.fclock / 100; 764 } 765 vega20_init_dpm_state(&(dpm_table->dpm_state)); 766 767 /* save a copy of the default DPM table */ 768 memcpy(&(data->golden_dpm_table), &(data->dpm_table), 769 sizeof(struct vega20_dpm_table)); 770 771 return 0; 772 } 773 774 /** 775 * Initializes the SMC table and uploads it 776 * 777 * @param hwmgr the address of the powerplay hardware manager. 778 * @param pInput the pointer to input data (PowerState) 779 * @return always 0 780 */ 781 static int vega20_init_smc_table(struct pp_hwmgr *hwmgr) 782 { 783 int result; 784 struct vega20_hwmgr *data = 785 (struct vega20_hwmgr *)(hwmgr->backend); 786 PPTable_t *pp_table = &(data->smc_state_table.pp_table); 787 struct pp_atomfwctrl_bios_boot_up_values boot_up_values; 788 struct phm_ppt_v3_information *pptable_information = 789 (struct phm_ppt_v3_information *)hwmgr->pptable; 790 791 result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values); 792 PP_ASSERT_WITH_CODE(!result, 793 "[InitSMCTable] Failed to get vbios bootup values!", 794 return result); 795 796 data->vbios_boot_state.vddc = boot_up_values.usVddc; 797 data->vbios_boot_state.vddci = boot_up_values.usVddci; 798 data->vbios_boot_state.mvddc = boot_up_values.usMvddc; 799 data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk; 800 data->vbios_boot_state.mem_clock = boot_up_values.ulUClk; 801 data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk; 802 data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk; 803 data->vbios_boot_state.eclock = boot_up_values.ulEClk; 804 data->vbios_boot_state.vclock = boot_up_values.ulVClk; 805 data->vbios_boot_state.dclock = boot_up_values.ulDClk; 806 data->vbios_boot_state.fclock = boot_up_values.ulFClk; 807 data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID; 808 809 smum_send_msg_to_smc_with_parameter(hwmgr, 810 PPSMC_MSG_SetMinDeepSleepDcefclk, 811 (uint32_t)(data->vbios_boot_state.dcef_clock / 100), 812 NULL); 813 814 memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t)); 815 816 result = smum_smc_table_manager(hwmgr, 817 (uint8_t *)pp_table, TABLE_PPTABLE, false); 818 PP_ASSERT_WITH_CODE(!result, 819 "[InitSMCTable] Failed to upload PPtable!", 820 return result); 821 822 return 0; 823 } 824 825 /* 826 * Override PCIe link speed and link width for DPM Level 1. PPTable entries 827 * reflect the ASIC capabilities and not the system capabilities. For e.g. 828 * Vega20 board in a PCI Gen3 system. In this case, when SMU's tries to switch 829 * to DPM1, it fails as system doesn't support Gen4. 830 */ 831 static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr) 832 { 833 struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); 834 struct vega20_hwmgr *data = 835 (struct vega20_hwmgr *)(hwmgr->backend); 836 uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg, pcie_gen_arg, pcie_width_arg; 837 PPTable_t *pp_table = &(data->smc_state_table.pp_table); 838 int i; 839 int ret; 840 841 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) 842 pcie_gen = 3; 843 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) 844 pcie_gen = 2; 845 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) 846 pcie_gen = 1; 847 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1) 848 pcie_gen = 0; 849 850 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) 851 pcie_width = 6; 852 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) 853 pcie_width = 5; 854 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8) 855 pcie_width = 4; 856 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4) 857 pcie_width = 3; 858 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2) 859 pcie_width = 2; 860 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1) 861 pcie_width = 1; 862 863 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1 864 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4 865 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 866 */ 867 for (i = 0; i < NUM_LINK_LEVELS; i++) { 868 pcie_gen_arg = (pp_table->PcieGenSpeed[i] > pcie_gen) ? pcie_gen : 869 pp_table->PcieGenSpeed[i]; 870 pcie_width_arg = (pp_table->PcieLaneCount[i] > pcie_width) ? pcie_width : 871 pp_table->PcieLaneCount[i]; 872 873 if (pcie_gen_arg != pp_table->PcieGenSpeed[i] || pcie_width_arg != 874 pp_table->PcieLaneCount[i]) { 875 smu_pcie_arg = (i << 16) | (pcie_gen_arg << 8) | pcie_width_arg; 876 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 877 PPSMC_MSG_OverridePcieParameters, smu_pcie_arg, 878 NULL); 879 PP_ASSERT_WITH_CODE(!ret, 880 "[OverridePcieParameters] Attempt to override pcie params failed!", 881 return ret); 882 } 883 884 /* update the pptable */ 885 pp_table->PcieGenSpeed[i] = pcie_gen_arg; 886 pp_table->PcieLaneCount[i] = pcie_width_arg; 887 } 888 889 /* override to the highest if it's disabled from ppfeaturmask */ 890 if (data->registry_data.pcie_dpm_key_disabled) { 891 for (i = 0; i < NUM_LINK_LEVELS; i++) { 892 smu_pcie_arg = (i << 16) | (pcie_gen << 8) | pcie_width; 893 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 894 PPSMC_MSG_OverridePcieParameters, smu_pcie_arg, 895 NULL); 896 PP_ASSERT_WITH_CODE(!ret, 897 "[OverridePcieParameters] Attempt to override pcie params failed!", 898 return ret); 899 900 pp_table->PcieGenSpeed[i] = pcie_gen; 901 pp_table->PcieLaneCount[i] = pcie_width; 902 } 903 ret = vega20_enable_smc_features(hwmgr, 904 false, 905 data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap); 906 PP_ASSERT_WITH_CODE(!ret, 907 "Attempt to Disable DPM LINK Failed!", 908 return ret); 909 data->smu_features[GNLD_DPM_LINK].enabled = false; 910 data->smu_features[GNLD_DPM_LINK].supported = false; 911 } 912 913 return 0; 914 } 915 916 static int vega20_set_allowed_featuresmask(struct pp_hwmgr *hwmgr) 917 { 918 struct vega20_hwmgr *data = 919 (struct vega20_hwmgr *)(hwmgr->backend); 920 uint32_t allowed_features_low = 0, allowed_features_high = 0; 921 int i; 922 int ret = 0; 923 924 for (i = 0; i < GNLD_FEATURES_MAX; i++) 925 if (data->smu_features[i].allowed) 926 data->smu_features[i].smu_feature_id > 31 ? 927 (allowed_features_high |= 928 ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_HIGH_SHIFT) 929 & 0xFFFFFFFF)) : 930 (allowed_features_low |= 931 ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_LOW_SHIFT) 932 & 0xFFFFFFFF)); 933 934 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 935 PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high, NULL); 936 PP_ASSERT_WITH_CODE(!ret, 937 "[SetAllowedFeaturesMask] Attempt to set allowed features mask(high) failed!", 938 return ret); 939 940 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 941 PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low, NULL); 942 PP_ASSERT_WITH_CODE(!ret, 943 "[SetAllowedFeaturesMask] Attempt to set allowed features mask (low) failed!", 944 return ret); 945 946 return 0; 947 } 948 949 static int vega20_run_btc(struct pp_hwmgr *hwmgr) 950 { 951 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunBtc, NULL); 952 } 953 954 static int vega20_run_btc_afll(struct pp_hwmgr *hwmgr) 955 { 956 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAfllBtc, NULL); 957 } 958 959 static int vega20_enable_all_smu_features(struct pp_hwmgr *hwmgr) 960 { 961 struct vega20_hwmgr *data = 962 (struct vega20_hwmgr *)(hwmgr->backend); 963 uint64_t features_enabled; 964 int i; 965 bool enabled; 966 int ret = 0; 967 968 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, 969 PPSMC_MSG_EnableAllSmuFeatures, 970 NULL)) == 0, 971 "[EnableAllSMUFeatures] Failed to enable all smu features!", 972 return ret); 973 974 ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled); 975 PP_ASSERT_WITH_CODE(!ret, 976 "[EnableAllSmuFeatures] Failed to get enabled smc features!", 977 return ret); 978 979 for (i = 0; i < GNLD_FEATURES_MAX; i++) { 980 enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? 981 true : false; 982 data->smu_features[i].enabled = enabled; 983 data->smu_features[i].supported = enabled; 984 985 #if 0 986 if (data->smu_features[i].allowed && !enabled) 987 pr_info("[EnableAllSMUFeatures] feature %d is expected enabled!", i); 988 else if (!data->smu_features[i].allowed && enabled) 989 pr_info("[EnableAllSMUFeatures] feature %d is expected disabled!", i); 990 #endif 991 } 992 993 return 0; 994 } 995 996 static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr) 997 { 998 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 999 1000 if (data->smu_features[GNLD_DPM_UCLK].enabled) 1001 return smum_send_msg_to_smc_with_parameter(hwmgr, 1002 PPSMC_MSG_SetUclkFastSwitch, 1003 1, 1004 NULL); 1005 1006 return 0; 1007 } 1008 1009 static int vega20_send_clock_ratio(struct pp_hwmgr *hwmgr) 1010 { 1011 struct vega20_hwmgr *data = 1012 (struct vega20_hwmgr *)(hwmgr->backend); 1013 1014 return smum_send_msg_to_smc_with_parameter(hwmgr, 1015 PPSMC_MSG_SetFclkGfxClkRatio, 1016 data->registry_data.fclk_gfxclk_ratio, 1017 NULL); 1018 } 1019 1020 static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr) 1021 { 1022 struct vega20_hwmgr *data = 1023 (struct vega20_hwmgr *)(hwmgr->backend); 1024 int i, ret = 0; 1025 1026 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, 1027 PPSMC_MSG_DisableAllSmuFeatures, 1028 NULL)) == 0, 1029 "[DisableAllSMUFeatures] Failed to disable all smu features!", 1030 return ret); 1031 1032 for (i = 0; i < GNLD_FEATURES_MAX; i++) 1033 data->smu_features[i].enabled = 0; 1034 1035 return 0; 1036 } 1037 1038 static int vega20_od8_set_feature_capabilities( 1039 struct pp_hwmgr *hwmgr) 1040 { 1041 struct phm_ppt_v3_information *pptable_information = 1042 (struct phm_ppt_v3_information *)hwmgr->pptable; 1043 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 1044 PPTable_t *pp_table = &(data->smc_state_table.pp_table); 1045 struct vega20_od8_settings *od_settings = &(data->od8_settings); 1046 1047 od_settings->overdrive8_capabilities = 0; 1048 1049 if (data->smu_features[GNLD_DPM_GFXCLK].enabled) { 1050 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_GFXCLK_LIMITS] && 1051 pptable_information->od_settings_max[OD8_SETTING_GFXCLK_FMAX] > 0 && 1052 pptable_information->od_settings_min[OD8_SETTING_GFXCLK_FMIN] > 0 && 1053 (pptable_information->od_settings_max[OD8_SETTING_GFXCLK_FMAX] >= 1054 pptable_information->od_settings_min[OD8_SETTING_GFXCLK_FMIN])) 1055 od_settings->overdrive8_capabilities |= OD8_GFXCLK_LIMITS; 1056 1057 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_GFXCLK_CURVE] && 1058 (pptable_information->od_settings_min[OD8_SETTING_GFXCLK_VOLTAGE1] >= 1059 pp_table->MinVoltageGfx / VOLTAGE_SCALE) && 1060 (pptable_information->od_settings_max[OD8_SETTING_GFXCLK_VOLTAGE3] <= 1061 pp_table->MaxVoltageGfx / VOLTAGE_SCALE) && 1062 (pptable_information->od_settings_max[OD8_SETTING_GFXCLK_VOLTAGE3] >= 1063 pptable_information->od_settings_min[OD8_SETTING_GFXCLK_VOLTAGE1])) 1064 od_settings->overdrive8_capabilities |= OD8_GFXCLK_CURVE; 1065 } 1066 1067 if (data->smu_features[GNLD_DPM_UCLK].enabled) { 1068 pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX] = 1069 data->dpm_table.mem_table.dpm_levels[data->dpm_table.mem_table.count - 2].value; 1070 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_UCLK_MAX] && 1071 pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX] > 0 && 1072 pptable_information->od_settings_max[OD8_SETTING_UCLK_FMAX] > 0 && 1073 (pptable_information->od_settings_max[OD8_SETTING_UCLK_FMAX] >= 1074 pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX])) 1075 od_settings->overdrive8_capabilities |= OD8_UCLK_MAX; 1076 } 1077 1078 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_POWER_LIMIT] && 1079 pptable_information->od_settings_max[OD8_SETTING_POWER_PERCENTAGE] > 0 && 1080 pptable_information->od_settings_max[OD8_SETTING_POWER_PERCENTAGE] <= 100 && 1081 pptable_information->od_settings_min[OD8_SETTING_POWER_PERCENTAGE] > 0 && 1082 pptable_information->od_settings_min[OD8_SETTING_POWER_PERCENTAGE] <= 100) 1083 od_settings->overdrive8_capabilities |= OD8_POWER_LIMIT; 1084 1085 if (data->smu_features[GNLD_FAN_CONTROL].enabled) { 1086 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_ACOUSTIC_LIMIT] && 1087 pptable_information->od_settings_min[OD8_SETTING_FAN_ACOUSTIC_LIMIT] > 0 && 1088 pptable_information->od_settings_max[OD8_SETTING_FAN_ACOUSTIC_LIMIT] > 0 && 1089 (pptable_information->od_settings_max[OD8_SETTING_FAN_ACOUSTIC_LIMIT] >= 1090 pptable_information->od_settings_min[OD8_SETTING_FAN_ACOUSTIC_LIMIT])) 1091 od_settings->overdrive8_capabilities |= OD8_ACOUSTIC_LIMIT_SCLK; 1092 1093 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_SPEED_MIN] && 1094 (pptable_information->od_settings_min[OD8_SETTING_FAN_MIN_SPEED] >= 1095 (pp_table->FanPwmMin * pp_table->FanMaximumRpm / 100)) && 1096 pptable_information->od_settings_max[OD8_SETTING_FAN_MIN_SPEED] > 0 && 1097 (pptable_information->od_settings_max[OD8_SETTING_FAN_MIN_SPEED] >= 1098 pptable_information->od_settings_min[OD8_SETTING_FAN_MIN_SPEED])) 1099 od_settings->overdrive8_capabilities |= OD8_FAN_SPEED_MIN; 1100 } 1101 1102 if (data->smu_features[GNLD_THERMAL].enabled) { 1103 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_TEMPERATURE_FAN] && 1104 pptable_information->od_settings_max[OD8_SETTING_FAN_TARGET_TEMP] > 0 && 1105 pptable_information->od_settings_min[OD8_SETTING_FAN_TARGET_TEMP] > 0 && 1106 (pptable_information->od_settings_max[OD8_SETTING_FAN_TARGET_TEMP] >= 1107 pptable_information->od_settings_min[OD8_SETTING_FAN_TARGET_TEMP])) 1108 od_settings->overdrive8_capabilities |= OD8_TEMPERATURE_FAN; 1109 1110 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_TEMPERATURE_SYSTEM] && 1111 pptable_information->od_settings_max[OD8_SETTING_OPERATING_TEMP_MAX] > 0 && 1112 pptable_information->od_settings_min[OD8_SETTING_OPERATING_TEMP_MAX] > 0 && 1113 (pptable_information->od_settings_max[OD8_SETTING_OPERATING_TEMP_MAX] >= 1114 pptable_information->od_settings_min[OD8_SETTING_OPERATING_TEMP_MAX])) 1115 od_settings->overdrive8_capabilities |= OD8_TEMPERATURE_SYSTEM; 1116 } 1117 1118 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_MEMORY_TIMING_TUNE]) 1119 od_settings->overdrive8_capabilities |= OD8_MEMORY_TIMING_TUNE; 1120 1121 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_ZERO_RPM_CONTROL] && 1122 pp_table->FanZeroRpmEnable) 1123 od_settings->overdrive8_capabilities |= OD8_FAN_ZERO_RPM_CONTROL; 1124 1125 if (!od_settings->overdrive8_capabilities) 1126 hwmgr->od_enabled = false; 1127 1128 return 0; 1129 } 1130 1131 static int vega20_od8_set_feature_id( 1132 struct pp_hwmgr *hwmgr) 1133 { 1134 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 1135 struct vega20_od8_settings *od_settings = &(data->od8_settings); 1136 1137 if (od_settings->overdrive8_capabilities & OD8_GFXCLK_LIMITS) { 1138 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id = 1139 OD8_GFXCLK_LIMITS; 1140 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id = 1141 OD8_GFXCLK_LIMITS; 1142 } else { 1143 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id = 1144 0; 1145 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id = 1146 0; 1147 } 1148 1149 if (od_settings->overdrive8_capabilities & OD8_GFXCLK_CURVE) { 1150 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id = 1151 OD8_GFXCLK_CURVE; 1152 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id = 1153 OD8_GFXCLK_CURVE; 1154 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id = 1155 OD8_GFXCLK_CURVE; 1156 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id = 1157 OD8_GFXCLK_CURVE; 1158 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id = 1159 OD8_GFXCLK_CURVE; 1160 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id = 1161 OD8_GFXCLK_CURVE; 1162 } else { 1163 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id = 1164 0; 1165 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id = 1166 0; 1167 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id = 1168 0; 1169 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id = 1170 0; 1171 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id = 1172 0; 1173 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id = 1174 0; 1175 } 1176 1177 if (od_settings->overdrive8_capabilities & OD8_UCLK_MAX) 1178 od_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id = OD8_UCLK_MAX; 1179 else 1180 od_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id = 0; 1181 1182 if (od_settings->overdrive8_capabilities & OD8_POWER_LIMIT) 1183 od_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].feature_id = OD8_POWER_LIMIT; 1184 else 1185 od_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].feature_id = 0; 1186 1187 if (od_settings->overdrive8_capabilities & OD8_ACOUSTIC_LIMIT_SCLK) 1188 od_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].feature_id = 1189 OD8_ACOUSTIC_LIMIT_SCLK; 1190 else 1191 od_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].feature_id = 1192 0; 1193 1194 if (od_settings->overdrive8_capabilities & OD8_FAN_SPEED_MIN) 1195 od_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].feature_id = 1196 OD8_FAN_SPEED_MIN; 1197 else 1198 od_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].feature_id = 1199 0; 1200 1201 if (od_settings->overdrive8_capabilities & OD8_TEMPERATURE_FAN) 1202 od_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].feature_id = 1203 OD8_TEMPERATURE_FAN; 1204 else 1205 od_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].feature_id = 1206 0; 1207 1208 if (od_settings->overdrive8_capabilities & OD8_TEMPERATURE_SYSTEM) 1209 od_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].feature_id = 1210 OD8_TEMPERATURE_SYSTEM; 1211 else 1212 od_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].feature_id = 1213 0; 1214 1215 return 0; 1216 } 1217 1218 static int vega20_od8_get_gfx_clock_base_voltage( 1219 struct pp_hwmgr *hwmgr, 1220 uint32_t *voltage, 1221 uint32_t freq) 1222 { 1223 int ret = 0; 1224 1225 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 1226 PPSMC_MSG_GetAVFSVoltageByDpm, 1227 ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq), 1228 voltage); 1229 PP_ASSERT_WITH_CODE(!ret, 1230 "[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!", 1231 return ret); 1232 1233 *voltage = *voltage / VOLTAGE_SCALE; 1234 1235 return 0; 1236 } 1237 1238 static int vega20_od8_initialize_default_settings( 1239 struct pp_hwmgr *hwmgr) 1240 { 1241 struct phm_ppt_v3_information *pptable_information = 1242 (struct phm_ppt_v3_information *)hwmgr->pptable; 1243 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 1244 struct vega20_od8_settings *od8_settings = &(data->od8_settings); 1245 OverDriveTable_t *od_table = &(data->smc_state_table.overdrive_table); 1246 int i, ret = 0; 1247 1248 /* Set Feature Capabilities */ 1249 vega20_od8_set_feature_capabilities(hwmgr); 1250 1251 /* Map FeatureID to individual settings */ 1252 vega20_od8_set_feature_id(hwmgr); 1253 1254 /* Set default values */ 1255 ret = smum_smc_table_manager(hwmgr, (uint8_t *)od_table, TABLE_OVERDRIVE, true); 1256 PP_ASSERT_WITH_CODE(!ret, 1257 "Failed to export over drive table!", 1258 return ret); 1259 1260 if (od8_settings->overdrive8_capabilities & OD8_GFXCLK_LIMITS) { 1261 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].default_value = 1262 od_table->GfxclkFmin; 1263 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].default_value = 1264 od_table->GfxclkFmax; 1265 } else { 1266 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].default_value = 1267 0; 1268 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].default_value = 1269 0; 1270 } 1271 1272 if (od8_settings->overdrive8_capabilities & OD8_GFXCLK_CURVE) { 1273 od_table->GfxclkFreq1 = od_table->GfxclkFmin; 1274 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].default_value = 1275 od_table->GfxclkFreq1; 1276 1277 od_table->GfxclkFreq3 = od_table->GfxclkFmax; 1278 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].default_value = 1279 od_table->GfxclkFreq3; 1280 1281 od_table->GfxclkFreq2 = (od_table->GfxclkFreq1 + od_table->GfxclkFreq3) / 2; 1282 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].default_value = 1283 od_table->GfxclkFreq2; 1284 1285 PP_ASSERT_WITH_CODE(!vega20_od8_get_gfx_clock_base_voltage(hwmgr, 1286 &(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value), 1287 od_table->GfxclkFreq1), 1288 "[PhwVega20_OD8_InitializeDefaultSettings] Failed to get Base clock voltage from SMU!", 1289 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value = 0); 1290 od_table->GfxclkVolt1 = od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value 1291 * VOLTAGE_SCALE; 1292 1293 PP_ASSERT_WITH_CODE(!vega20_od8_get_gfx_clock_base_voltage(hwmgr, 1294 &(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value), 1295 od_table->GfxclkFreq2), 1296 "[PhwVega20_OD8_InitializeDefaultSettings] Failed to get Base clock voltage from SMU!", 1297 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value = 0); 1298 od_table->GfxclkVolt2 = od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value 1299 * VOLTAGE_SCALE; 1300 1301 PP_ASSERT_WITH_CODE(!vega20_od8_get_gfx_clock_base_voltage(hwmgr, 1302 &(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value), 1303 od_table->GfxclkFreq3), 1304 "[PhwVega20_OD8_InitializeDefaultSettings] Failed to get Base clock voltage from SMU!", 1305 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value = 0); 1306 od_table->GfxclkVolt3 = od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value 1307 * VOLTAGE_SCALE; 1308 } else { 1309 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].default_value = 1310 0; 1311 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value = 1312 0; 1313 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].default_value = 1314 0; 1315 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value = 1316 0; 1317 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].default_value = 1318 0; 1319 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value = 1320 0; 1321 } 1322 1323 if (od8_settings->overdrive8_capabilities & OD8_UCLK_MAX) 1324 od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].default_value = 1325 od_table->UclkFmax; 1326 else 1327 od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].default_value = 1328 0; 1329 1330 if (od8_settings->overdrive8_capabilities & OD8_POWER_LIMIT) 1331 od8_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].default_value = 1332 od_table->OverDrivePct; 1333 else 1334 od8_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].default_value = 1335 0; 1336 1337 if (od8_settings->overdrive8_capabilities & OD8_ACOUSTIC_LIMIT_SCLK) 1338 od8_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].default_value = 1339 od_table->FanMaximumRpm; 1340 else 1341 od8_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].default_value = 1342 0; 1343 1344 if (od8_settings->overdrive8_capabilities & OD8_FAN_SPEED_MIN) 1345 od8_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].default_value = 1346 od_table->FanMinimumPwm * data->smc_state_table.pp_table.FanMaximumRpm / 100; 1347 else 1348 od8_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].default_value = 1349 0; 1350 1351 if (od8_settings->overdrive8_capabilities & OD8_TEMPERATURE_FAN) 1352 od8_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].default_value = 1353 od_table->FanTargetTemperature; 1354 else 1355 od8_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].default_value = 1356 0; 1357 1358 if (od8_settings->overdrive8_capabilities & OD8_TEMPERATURE_SYSTEM) 1359 od8_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].default_value = 1360 od_table->MaxOpTemp; 1361 else 1362 od8_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].default_value = 1363 0; 1364 1365 for (i = 0; i < OD8_SETTING_COUNT; i++) { 1366 if (od8_settings->od8_settings_array[i].feature_id) { 1367 od8_settings->od8_settings_array[i].min_value = 1368 pptable_information->od_settings_min[i]; 1369 od8_settings->od8_settings_array[i].max_value = 1370 pptable_information->od_settings_max[i]; 1371 od8_settings->od8_settings_array[i].current_value = 1372 od8_settings->od8_settings_array[i].default_value; 1373 } else { 1374 od8_settings->od8_settings_array[i].min_value = 1375 0; 1376 od8_settings->od8_settings_array[i].max_value = 1377 0; 1378 od8_settings->od8_settings_array[i].current_value = 1379 0; 1380 } 1381 } 1382 1383 ret = smum_smc_table_manager(hwmgr, (uint8_t *)od_table, TABLE_OVERDRIVE, false); 1384 PP_ASSERT_WITH_CODE(!ret, 1385 "Failed to import over drive table!", 1386 return ret); 1387 1388 return 0; 1389 } 1390 1391 static int vega20_od8_set_settings( 1392 struct pp_hwmgr *hwmgr, 1393 uint32_t index, 1394 uint32_t value) 1395 { 1396 OverDriveTable_t od_table; 1397 int ret = 0; 1398 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 1399 struct vega20_od8_single_setting *od8_settings = 1400 data->od8_settings.od8_settings_array; 1401 1402 ret = smum_smc_table_manager(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE, true); 1403 PP_ASSERT_WITH_CODE(!ret, 1404 "Failed to export over drive table!", 1405 return ret); 1406 1407 switch(index) { 1408 case OD8_SETTING_GFXCLK_FMIN: 1409 od_table.GfxclkFmin = (uint16_t)value; 1410 break; 1411 case OD8_SETTING_GFXCLK_FMAX: 1412 if (value < od8_settings[OD8_SETTING_GFXCLK_FMAX].min_value || 1413 value > od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value) 1414 return -EINVAL; 1415 1416 od_table.GfxclkFmax = (uint16_t)value; 1417 break; 1418 case OD8_SETTING_GFXCLK_FREQ1: 1419 od_table.GfxclkFreq1 = (uint16_t)value; 1420 break; 1421 case OD8_SETTING_GFXCLK_VOLTAGE1: 1422 od_table.GfxclkVolt1 = (uint16_t)value; 1423 break; 1424 case OD8_SETTING_GFXCLK_FREQ2: 1425 od_table.GfxclkFreq2 = (uint16_t)value; 1426 break; 1427 case OD8_SETTING_GFXCLK_VOLTAGE2: 1428 od_table.GfxclkVolt2 = (uint16_t)value; 1429 break; 1430 case OD8_SETTING_GFXCLK_FREQ3: 1431 od_table.GfxclkFreq3 = (uint16_t)value; 1432 break; 1433 case OD8_SETTING_GFXCLK_VOLTAGE3: 1434 od_table.GfxclkVolt3 = (uint16_t)value; 1435 break; 1436 case OD8_SETTING_UCLK_FMAX: 1437 if (value < od8_settings[OD8_SETTING_UCLK_FMAX].min_value || 1438 value > od8_settings[OD8_SETTING_UCLK_FMAX].max_value) 1439 return -EINVAL; 1440 od_table.UclkFmax = (uint16_t)value; 1441 break; 1442 case OD8_SETTING_POWER_PERCENTAGE: 1443 od_table.OverDrivePct = (int16_t)value; 1444 break; 1445 case OD8_SETTING_FAN_ACOUSTIC_LIMIT: 1446 od_table.FanMaximumRpm = (uint16_t)value; 1447 break; 1448 case OD8_SETTING_FAN_MIN_SPEED: 1449 od_table.FanMinimumPwm = (uint16_t)value; 1450 break; 1451 case OD8_SETTING_FAN_TARGET_TEMP: 1452 od_table.FanTargetTemperature = (uint16_t)value; 1453 break; 1454 case OD8_SETTING_OPERATING_TEMP_MAX: 1455 od_table.MaxOpTemp = (uint16_t)value; 1456 break; 1457 } 1458 1459 ret = smum_smc_table_manager(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE, false); 1460 PP_ASSERT_WITH_CODE(!ret, 1461 "Failed to import over drive table!", 1462 return ret); 1463 1464 return 0; 1465 } 1466 1467 static int vega20_get_sclk_od( 1468 struct pp_hwmgr *hwmgr) 1469 { 1470 struct vega20_hwmgr *data = hwmgr->backend; 1471 struct vega20_single_dpm_table *sclk_table = 1472 &(data->dpm_table.gfx_table); 1473 struct vega20_single_dpm_table *golden_sclk_table = 1474 &(data->golden_dpm_table.gfx_table); 1475 int value = sclk_table->dpm_levels[sclk_table->count - 1].value; 1476 int golden_value = golden_sclk_table->dpm_levels 1477 [golden_sclk_table->count - 1].value; 1478 1479 /* od percentage */ 1480 value -= golden_value; 1481 value = DIV_ROUND_UP(value * 100, golden_value); 1482 1483 return value; 1484 } 1485 1486 static int vega20_set_sclk_od( 1487 struct pp_hwmgr *hwmgr, uint32_t value) 1488 { 1489 struct vega20_hwmgr *data = hwmgr->backend; 1490 struct vega20_single_dpm_table *golden_sclk_table = 1491 &(data->golden_dpm_table.gfx_table); 1492 uint32_t od_sclk; 1493 int ret = 0; 1494 1495 od_sclk = golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * value; 1496 od_sclk /= 100; 1497 od_sclk += golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; 1498 1499 ret = vega20_od8_set_settings(hwmgr, OD8_SETTING_GFXCLK_FMAX, od_sclk); 1500 PP_ASSERT_WITH_CODE(!ret, 1501 "[SetSclkOD] failed to set od gfxclk!", 1502 return ret); 1503 1504 /* retrieve updated gfxclk table */ 1505 ret = vega20_setup_gfxclk_dpm_table(hwmgr); 1506 PP_ASSERT_WITH_CODE(!ret, 1507 "[SetSclkOD] failed to refresh gfxclk table!", 1508 return ret); 1509 1510 return 0; 1511 } 1512 1513 static int vega20_get_mclk_od( 1514 struct pp_hwmgr *hwmgr) 1515 { 1516 struct vega20_hwmgr *data = hwmgr->backend; 1517 struct vega20_single_dpm_table *mclk_table = 1518 &(data->dpm_table.mem_table); 1519 struct vega20_single_dpm_table *golden_mclk_table = 1520 &(data->golden_dpm_table.mem_table); 1521 int value = mclk_table->dpm_levels[mclk_table->count - 1].value; 1522 int golden_value = golden_mclk_table->dpm_levels 1523 [golden_mclk_table->count - 1].value; 1524 1525 /* od percentage */ 1526 value -= golden_value; 1527 value = DIV_ROUND_UP(value * 100, golden_value); 1528 1529 return value; 1530 } 1531 1532 static int vega20_set_mclk_od( 1533 struct pp_hwmgr *hwmgr, uint32_t value) 1534 { 1535 struct vega20_hwmgr *data = hwmgr->backend; 1536 struct vega20_single_dpm_table *golden_mclk_table = 1537 &(data->golden_dpm_table.mem_table); 1538 uint32_t od_mclk; 1539 int ret = 0; 1540 1541 od_mclk = golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * value; 1542 od_mclk /= 100; 1543 od_mclk += golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; 1544 1545 ret = vega20_od8_set_settings(hwmgr, OD8_SETTING_UCLK_FMAX, od_mclk); 1546 PP_ASSERT_WITH_CODE(!ret, 1547 "[SetMclkOD] failed to set od memclk!", 1548 return ret); 1549 1550 /* retrieve updated memclk table */ 1551 ret = vega20_setup_memclk_dpm_table(hwmgr); 1552 PP_ASSERT_WITH_CODE(!ret, 1553 "[SetMclkOD] failed to refresh memclk table!", 1554 return ret); 1555 1556 return 0; 1557 } 1558 1559 static int vega20_populate_umdpstate_clocks( 1560 struct pp_hwmgr *hwmgr) 1561 { 1562 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 1563 struct vega20_single_dpm_table *gfx_table = &(data->dpm_table.gfx_table); 1564 struct vega20_single_dpm_table *mem_table = &(data->dpm_table.mem_table); 1565 1566 hwmgr->pstate_sclk = gfx_table->dpm_levels[0].value; 1567 hwmgr->pstate_mclk = mem_table->dpm_levels[0].value; 1568 1569 if (gfx_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL && 1570 mem_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL) { 1571 hwmgr->pstate_sclk = gfx_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value; 1572 hwmgr->pstate_mclk = mem_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value; 1573 } 1574 1575 hwmgr->pstate_sclk = hwmgr->pstate_sclk * 100; 1576 hwmgr->pstate_mclk = hwmgr->pstate_mclk * 100; 1577 1578 return 0; 1579 } 1580 1581 static int vega20_get_max_sustainable_clock(struct pp_hwmgr *hwmgr, 1582 PP_Clock *clock, PPCLK_e clock_select) 1583 { 1584 int ret = 0; 1585 1586 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 1587 PPSMC_MSG_GetDcModeMaxDpmFreq, 1588 (clock_select << 16), 1589 clock)) == 0, 1590 "[GetMaxSustainableClock] Failed to get max DC clock from SMC!", 1591 return ret); 1592 1593 /* if DC limit is zero, return AC limit */ 1594 if (*clock == 0) { 1595 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 1596 PPSMC_MSG_GetMaxDpmFreq, 1597 (clock_select << 16), 1598 clock)) == 0, 1599 "[GetMaxSustainableClock] failed to get max AC clock from SMC!", 1600 return ret); 1601 } 1602 1603 return 0; 1604 } 1605 1606 static int vega20_init_max_sustainable_clocks(struct pp_hwmgr *hwmgr) 1607 { 1608 struct vega20_hwmgr *data = 1609 (struct vega20_hwmgr *)(hwmgr->backend); 1610 struct vega20_max_sustainable_clocks *max_sustainable_clocks = 1611 &(data->max_sustainable_clocks); 1612 int ret = 0; 1613 1614 max_sustainable_clocks->uclock = data->vbios_boot_state.mem_clock / 100; 1615 max_sustainable_clocks->soc_clock = data->vbios_boot_state.soc_clock / 100; 1616 max_sustainable_clocks->dcef_clock = data->vbios_boot_state.dcef_clock / 100; 1617 max_sustainable_clocks->display_clock = 0xFFFFFFFF; 1618 max_sustainable_clocks->phy_clock = 0xFFFFFFFF; 1619 max_sustainable_clocks->pixel_clock = 0xFFFFFFFF; 1620 1621 if (data->smu_features[GNLD_DPM_UCLK].enabled) 1622 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr, 1623 &(max_sustainable_clocks->uclock), 1624 PPCLK_UCLK)) == 0, 1625 "[InitMaxSustainableClocks] failed to get max UCLK from SMC!", 1626 return ret); 1627 1628 if (data->smu_features[GNLD_DPM_SOCCLK].enabled) 1629 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr, 1630 &(max_sustainable_clocks->soc_clock), 1631 PPCLK_SOCCLK)) == 0, 1632 "[InitMaxSustainableClocks] failed to get max SOCCLK from SMC!", 1633 return ret); 1634 1635 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { 1636 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr, 1637 &(max_sustainable_clocks->dcef_clock), 1638 PPCLK_DCEFCLK)) == 0, 1639 "[InitMaxSustainableClocks] failed to get max DCEFCLK from SMC!", 1640 return ret); 1641 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr, 1642 &(max_sustainable_clocks->display_clock), 1643 PPCLK_DISPCLK)) == 0, 1644 "[InitMaxSustainableClocks] failed to get max DISPCLK from SMC!", 1645 return ret); 1646 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr, 1647 &(max_sustainable_clocks->phy_clock), 1648 PPCLK_PHYCLK)) == 0, 1649 "[InitMaxSustainableClocks] failed to get max PHYCLK from SMC!", 1650 return ret); 1651 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr, 1652 &(max_sustainable_clocks->pixel_clock), 1653 PPCLK_PIXCLK)) == 0, 1654 "[InitMaxSustainableClocks] failed to get max PIXCLK from SMC!", 1655 return ret); 1656 } 1657 1658 if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock) 1659 max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock; 1660 1661 return 0; 1662 } 1663 1664 static int vega20_enable_mgpu_fan_boost(struct pp_hwmgr *hwmgr) 1665 { 1666 int result; 1667 1668 result = smum_send_msg_to_smc(hwmgr, 1669 PPSMC_MSG_SetMGpuFanBoostLimitRpm, 1670 NULL); 1671 PP_ASSERT_WITH_CODE(!result, 1672 "[EnableMgpuFan] Failed to enable mgpu fan boost!", 1673 return result); 1674 1675 return 0; 1676 } 1677 1678 static void vega20_init_powergate_state(struct pp_hwmgr *hwmgr) 1679 { 1680 struct vega20_hwmgr *data = 1681 (struct vega20_hwmgr *)(hwmgr->backend); 1682 1683 data->uvd_power_gated = true; 1684 data->vce_power_gated = true; 1685 } 1686 1687 static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr) 1688 { 1689 int result = 0; 1690 1691 smum_send_msg_to_smc_with_parameter(hwmgr, 1692 PPSMC_MSG_NumOfDisplays, 0, NULL); 1693 1694 result = vega20_set_allowed_featuresmask(hwmgr); 1695 PP_ASSERT_WITH_CODE(!result, 1696 "[EnableDPMTasks] Failed to set allowed featuresmask!\n", 1697 return result); 1698 1699 result = vega20_init_smc_table(hwmgr); 1700 PP_ASSERT_WITH_CODE(!result, 1701 "[EnableDPMTasks] Failed to initialize SMC table!", 1702 return result); 1703 1704 result = vega20_run_btc(hwmgr); 1705 PP_ASSERT_WITH_CODE(!result, 1706 "[EnableDPMTasks] Failed to run btc!", 1707 return result); 1708 1709 result = vega20_run_btc_afll(hwmgr); 1710 PP_ASSERT_WITH_CODE(!result, 1711 "[EnableDPMTasks] Failed to run btc afll!", 1712 return result); 1713 1714 result = vega20_enable_all_smu_features(hwmgr); 1715 PP_ASSERT_WITH_CODE(!result, 1716 "[EnableDPMTasks] Failed to enable all smu features!", 1717 return result); 1718 1719 result = vega20_override_pcie_parameters(hwmgr); 1720 PP_ASSERT_WITH_CODE(!result, 1721 "[EnableDPMTasks] Failed to override pcie parameters!", 1722 return result); 1723 1724 result = vega20_notify_smc_display_change(hwmgr); 1725 PP_ASSERT_WITH_CODE(!result, 1726 "[EnableDPMTasks] Failed to notify smc display change!", 1727 return result); 1728 1729 result = vega20_send_clock_ratio(hwmgr); 1730 PP_ASSERT_WITH_CODE(!result, 1731 "[EnableDPMTasks] Failed to send clock ratio!", 1732 return result); 1733 1734 /* Initialize UVD/VCE powergating state */ 1735 vega20_init_powergate_state(hwmgr); 1736 1737 result = vega20_setup_default_dpm_tables(hwmgr); 1738 PP_ASSERT_WITH_CODE(!result, 1739 "[EnableDPMTasks] Failed to setup default DPM tables!", 1740 return result); 1741 1742 result = vega20_init_max_sustainable_clocks(hwmgr); 1743 PP_ASSERT_WITH_CODE(!result, 1744 "[EnableDPMTasks] Failed to get maximum sustainable clocks!", 1745 return result); 1746 1747 result = vega20_power_control_set_level(hwmgr); 1748 PP_ASSERT_WITH_CODE(!result, 1749 "[EnableDPMTasks] Failed to power control set level!", 1750 return result); 1751 1752 result = vega20_od8_initialize_default_settings(hwmgr); 1753 PP_ASSERT_WITH_CODE(!result, 1754 "[EnableDPMTasks] Failed to initialize odn settings!", 1755 return result); 1756 1757 result = vega20_populate_umdpstate_clocks(hwmgr); 1758 PP_ASSERT_WITH_CODE(!result, 1759 "[EnableDPMTasks] Failed to populate umdpstate clocks!", 1760 return result); 1761 1762 result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetPptLimit, 1763 POWER_SOURCE_AC << 16, &hwmgr->default_power_limit); 1764 PP_ASSERT_WITH_CODE(!result, 1765 "[GetPptLimit] get default PPT limit failed!", 1766 return result); 1767 hwmgr->power_limit = 1768 hwmgr->default_power_limit; 1769 1770 return 0; 1771 } 1772 1773 static uint32_t vega20_find_lowest_dpm_level( 1774 struct vega20_single_dpm_table *table) 1775 { 1776 uint32_t i; 1777 1778 for (i = 0; i < table->count; i++) { 1779 if (table->dpm_levels[i].enabled) 1780 break; 1781 } 1782 if (i >= table->count) { 1783 i = 0; 1784 table->dpm_levels[i].enabled = true; 1785 } 1786 1787 return i; 1788 } 1789 1790 static uint32_t vega20_find_highest_dpm_level( 1791 struct vega20_single_dpm_table *table) 1792 { 1793 int i = 0; 1794 1795 PP_ASSERT_WITH_CODE(table != NULL, 1796 "[FindHighestDPMLevel] DPM Table does not exist!", 1797 return 0); 1798 PP_ASSERT_WITH_CODE(table->count > 0, 1799 "[FindHighestDPMLevel] DPM Table has no entry!", 1800 return 0); 1801 PP_ASSERT_WITH_CODE(table->count <= MAX_REGULAR_DPM_NUMBER, 1802 "[FindHighestDPMLevel] DPM Table has too many entries!", 1803 return MAX_REGULAR_DPM_NUMBER - 1); 1804 1805 for (i = table->count - 1; i >= 0; i--) { 1806 if (table->dpm_levels[i].enabled) 1807 break; 1808 } 1809 if (i < 0) { 1810 i = 0; 1811 table->dpm_levels[i].enabled = true; 1812 } 1813 1814 return i; 1815 } 1816 1817 static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask) 1818 { 1819 struct vega20_hwmgr *data = 1820 (struct vega20_hwmgr *)(hwmgr->backend); 1821 uint32_t min_freq; 1822 int ret = 0; 1823 1824 if (data->smu_features[GNLD_DPM_GFXCLK].enabled && 1825 (feature_mask & FEATURE_DPM_GFXCLK_MASK)) { 1826 min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level; 1827 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1828 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1829 (PPCLK_GFXCLK << 16) | (min_freq & 0xffff), 1830 NULL)), 1831 "Failed to set soft min gfxclk !", 1832 return ret); 1833 } 1834 1835 if (data->smu_features[GNLD_DPM_UCLK].enabled && 1836 (feature_mask & FEATURE_DPM_UCLK_MASK)) { 1837 min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level; 1838 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1839 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1840 (PPCLK_UCLK << 16) | (min_freq & 0xffff), 1841 NULL)), 1842 "Failed to set soft min memclk !", 1843 return ret); 1844 } 1845 1846 if (data->smu_features[GNLD_DPM_UVD].enabled && 1847 (feature_mask & FEATURE_DPM_UVD_MASK)) { 1848 min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level; 1849 1850 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1851 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1852 (PPCLK_VCLK << 16) | (min_freq & 0xffff), 1853 NULL)), 1854 "Failed to set soft min vclk!", 1855 return ret); 1856 1857 min_freq = data->dpm_table.dclk_table.dpm_state.soft_min_level; 1858 1859 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1860 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1861 (PPCLK_DCLK << 16) | (min_freq & 0xffff), 1862 NULL)), 1863 "Failed to set soft min dclk!", 1864 return ret); 1865 } 1866 1867 if (data->smu_features[GNLD_DPM_VCE].enabled && 1868 (feature_mask & FEATURE_DPM_VCE_MASK)) { 1869 min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level; 1870 1871 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1872 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1873 (PPCLK_ECLK << 16) | (min_freq & 0xffff), 1874 NULL)), 1875 "Failed to set soft min eclk!", 1876 return ret); 1877 } 1878 1879 if (data->smu_features[GNLD_DPM_SOCCLK].enabled && 1880 (feature_mask & FEATURE_DPM_SOCCLK_MASK)) { 1881 min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level; 1882 1883 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1884 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1885 (PPCLK_SOCCLK << 16) | (min_freq & 0xffff), 1886 NULL)), 1887 "Failed to set soft min socclk!", 1888 return ret); 1889 } 1890 1891 if (data->smu_features[GNLD_DPM_FCLK].enabled && 1892 (feature_mask & FEATURE_DPM_FCLK_MASK)) { 1893 min_freq = data->dpm_table.fclk_table.dpm_state.soft_min_level; 1894 1895 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1896 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1897 (PPCLK_FCLK << 16) | (min_freq & 0xffff), 1898 NULL)), 1899 "Failed to set soft min fclk!", 1900 return ret); 1901 } 1902 1903 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled && 1904 (feature_mask & FEATURE_DPM_DCEFCLK_MASK)) { 1905 min_freq = data->dpm_table.dcef_table.dpm_state.hard_min_level; 1906 1907 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1908 hwmgr, PPSMC_MSG_SetHardMinByFreq, 1909 (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff), 1910 NULL)), 1911 "Failed to set hard min dcefclk!", 1912 return ret); 1913 } 1914 1915 return ret; 1916 } 1917 1918 static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask) 1919 { 1920 struct vega20_hwmgr *data = 1921 (struct vega20_hwmgr *)(hwmgr->backend); 1922 uint32_t max_freq; 1923 int ret = 0; 1924 1925 if (data->smu_features[GNLD_DPM_GFXCLK].enabled && 1926 (feature_mask & FEATURE_DPM_GFXCLK_MASK)) { 1927 max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level; 1928 1929 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1930 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1931 (PPCLK_GFXCLK << 16) | (max_freq & 0xffff), 1932 NULL)), 1933 "Failed to set soft max gfxclk!", 1934 return ret); 1935 } 1936 1937 if (data->smu_features[GNLD_DPM_UCLK].enabled && 1938 (feature_mask & FEATURE_DPM_UCLK_MASK)) { 1939 max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level; 1940 1941 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1942 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1943 (PPCLK_UCLK << 16) | (max_freq & 0xffff), 1944 NULL)), 1945 "Failed to set soft max memclk!", 1946 return ret); 1947 } 1948 1949 if (data->smu_features[GNLD_DPM_UVD].enabled && 1950 (feature_mask & FEATURE_DPM_UVD_MASK)) { 1951 max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level; 1952 1953 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1954 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1955 (PPCLK_VCLK << 16) | (max_freq & 0xffff), 1956 NULL)), 1957 "Failed to set soft max vclk!", 1958 return ret); 1959 1960 max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level; 1961 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1962 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1963 (PPCLK_DCLK << 16) | (max_freq & 0xffff), 1964 NULL)), 1965 "Failed to set soft max dclk!", 1966 return ret); 1967 } 1968 1969 if (data->smu_features[GNLD_DPM_VCE].enabled && 1970 (feature_mask & FEATURE_DPM_VCE_MASK)) { 1971 max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level; 1972 1973 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1974 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1975 (PPCLK_ECLK << 16) | (max_freq & 0xffff), 1976 NULL)), 1977 "Failed to set soft max eclk!", 1978 return ret); 1979 } 1980 1981 if (data->smu_features[GNLD_DPM_SOCCLK].enabled && 1982 (feature_mask & FEATURE_DPM_SOCCLK_MASK)) { 1983 max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level; 1984 1985 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1986 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1987 (PPCLK_SOCCLK << 16) | (max_freq & 0xffff), 1988 NULL)), 1989 "Failed to set soft max socclk!", 1990 return ret); 1991 } 1992 1993 if (data->smu_features[GNLD_DPM_FCLK].enabled && 1994 (feature_mask & FEATURE_DPM_FCLK_MASK)) { 1995 max_freq = data->dpm_table.fclk_table.dpm_state.soft_max_level; 1996 1997 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1998 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1999 (PPCLK_FCLK << 16) | (max_freq & 0xffff), 2000 NULL)), 2001 "Failed to set soft max fclk!", 2002 return ret); 2003 } 2004 2005 return ret; 2006 } 2007 2008 static int vega20_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) 2009 { 2010 struct vega20_hwmgr *data = 2011 (struct vega20_hwmgr *)(hwmgr->backend); 2012 int ret = 0; 2013 2014 if (data->smu_features[GNLD_DPM_VCE].supported) { 2015 if (data->smu_features[GNLD_DPM_VCE].enabled == enable) { 2016 if (enable) 2017 PP_DBG_LOG("[EnableDisableVCEDPM] feature VCE DPM already enabled!\n"); 2018 else 2019 PP_DBG_LOG("[EnableDisableVCEDPM] feature VCE DPM already disabled!\n"); 2020 } 2021 2022 ret = vega20_enable_smc_features(hwmgr, 2023 enable, 2024 data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap); 2025 PP_ASSERT_WITH_CODE(!ret, 2026 "Attempt to Enable/Disable DPM VCE Failed!", 2027 return ret); 2028 data->smu_features[GNLD_DPM_VCE].enabled = enable; 2029 } 2030 2031 return 0; 2032 } 2033 2034 static int vega20_get_clock_ranges(struct pp_hwmgr *hwmgr, 2035 uint32_t *clock, 2036 PPCLK_e clock_select, 2037 bool max) 2038 { 2039 int ret; 2040 *clock = 0; 2041 2042 if (max) { 2043 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 2044 PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16), 2045 clock)) == 0, 2046 "[GetClockRanges] Failed to get max clock from SMC!", 2047 return ret); 2048 } else { 2049 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 2050 PPSMC_MSG_GetMinDpmFreq, 2051 (clock_select << 16), 2052 clock)) == 0, 2053 "[GetClockRanges] Failed to get min clock from SMC!", 2054 return ret); 2055 } 2056 2057 return 0; 2058 } 2059 2060 static uint32_t vega20_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) 2061 { 2062 struct vega20_hwmgr *data = 2063 (struct vega20_hwmgr *)(hwmgr->backend); 2064 uint32_t gfx_clk; 2065 int ret = 0; 2066 2067 PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_GFXCLK].enabled, 2068 "[GetSclks]: gfxclk dpm not enabled!\n", 2069 return -EPERM); 2070 2071 if (low) { 2072 ret = vega20_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, false); 2073 PP_ASSERT_WITH_CODE(!ret, 2074 "[GetSclks]: fail to get min PPCLK_GFXCLK\n", 2075 return ret); 2076 } else { 2077 ret = vega20_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, true); 2078 PP_ASSERT_WITH_CODE(!ret, 2079 "[GetSclks]: fail to get max PPCLK_GFXCLK\n", 2080 return ret); 2081 } 2082 2083 return (gfx_clk * 100); 2084 } 2085 2086 static uint32_t vega20_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) 2087 { 2088 struct vega20_hwmgr *data = 2089 (struct vega20_hwmgr *)(hwmgr->backend); 2090 uint32_t mem_clk; 2091 int ret = 0; 2092 2093 PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_UCLK].enabled, 2094 "[MemMclks]: memclk dpm not enabled!\n", 2095 return -EPERM); 2096 2097 if (low) { 2098 ret = vega20_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, false); 2099 PP_ASSERT_WITH_CODE(!ret, 2100 "[GetMclks]: fail to get min PPCLK_UCLK\n", 2101 return ret); 2102 } else { 2103 ret = vega20_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, true); 2104 PP_ASSERT_WITH_CODE(!ret, 2105 "[GetMclks]: fail to get max PPCLK_UCLK\n", 2106 return ret); 2107 } 2108 2109 return (mem_clk * 100); 2110 } 2111 2112 static int vega20_get_metrics_table(struct pp_hwmgr *hwmgr, 2113 SmuMetrics_t *metrics_table, 2114 bool bypass_cache) 2115 { 2116 struct vega20_hwmgr *data = 2117 (struct vega20_hwmgr *)(hwmgr->backend); 2118 int ret = 0; 2119 2120 if (bypass_cache || 2121 !data->metrics_time || 2122 time_after(jiffies, data->metrics_time + msecs_to_jiffies(1))) { 2123 ret = smum_smc_table_manager(hwmgr, 2124 (uint8_t *)(&data->metrics_table), 2125 TABLE_SMU_METRICS, 2126 true); 2127 if (ret) { 2128 pr_info("Failed to export SMU metrics table!\n"); 2129 return ret; 2130 } 2131 data->metrics_time = jiffies; 2132 } 2133 2134 if (metrics_table) 2135 memcpy(metrics_table, &data->metrics_table, sizeof(SmuMetrics_t)); 2136 2137 return ret; 2138 } 2139 2140 static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr, 2141 uint32_t *query) 2142 { 2143 int ret = 0; 2144 SmuMetrics_t metrics_table; 2145 2146 ret = vega20_get_metrics_table(hwmgr, &metrics_table, false); 2147 if (ret) 2148 return ret; 2149 2150 /* For the 40.46 release, they changed the value name */ 2151 if (hwmgr->smu_version == 0x282e00) 2152 *query = metrics_table.AverageSocketPower << 8; 2153 else 2154 *query = metrics_table.CurrSocketPower << 8; 2155 2156 return ret; 2157 } 2158 2159 static int vega20_get_current_clk_freq(struct pp_hwmgr *hwmgr, 2160 PPCLK_e clk_id, uint32_t *clk_freq) 2161 { 2162 int ret = 0; 2163 2164 *clk_freq = 0; 2165 2166 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 2167 PPSMC_MSG_GetDpmClockFreq, (clk_id << 16), 2168 clk_freq)) == 0, 2169 "[GetCurrentClkFreq] Attempt to get Current Frequency Failed!", 2170 return ret); 2171 2172 *clk_freq = *clk_freq * 100; 2173 2174 return 0; 2175 } 2176 2177 static int vega20_get_current_activity_percent(struct pp_hwmgr *hwmgr, 2178 int idx, 2179 uint32_t *activity_percent) 2180 { 2181 int ret = 0; 2182 SmuMetrics_t metrics_table; 2183 2184 ret = vega20_get_metrics_table(hwmgr, &metrics_table, false); 2185 if (ret) 2186 return ret; 2187 2188 switch (idx) { 2189 case AMDGPU_PP_SENSOR_GPU_LOAD: 2190 *activity_percent = metrics_table.AverageGfxActivity; 2191 break; 2192 case AMDGPU_PP_SENSOR_MEM_LOAD: 2193 *activity_percent = metrics_table.AverageUclkActivity; 2194 break; 2195 default: 2196 pr_err("Invalid index for retrieving clock activity\n"); 2197 return -EINVAL; 2198 } 2199 2200 return ret; 2201 } 2202 2203 static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx, 2204 void *value, int *size) 2205 { 2206 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2207 struct amdgpu_device *adev = hwmgr->adev; 2208 SmuMetrics_t metrics_table; 2209 uint32_t val_vid; 2210 int ret = 0; 2211 2212 switch (idx) { 2213 case AMDGPU_PP_SENSOR_GFX_SCLK: 2214 ret = vega20_get_metrics_table(hwmgr, &metrics_table, false); 2215 if (ret) 2216 return ret; 2217 2218 *((uint32_t *)value) = metrics_table.AverageGfxclkFrequency * 100; 2219 *size = 4; 2220 break; 2221 case AMDGPU_PP_SENSOR_GFX_MCLK: 2222 ret = vega20_get_current_clk_freq(hwmgr, 2223 PPCLK_UCLK, 2224 (uint32_t *)value); 2225 if (!ret) 2226 *size = 4; 2227 break; 2228 case AMDGPU_PP_SENSOR_GPU_LOAD: 2229 case AMDGPU_PP_SENSOR_MEM_LOAD: 2230 ret = vega20_get_current_activity_percent(hwmgr, idx, (uint32_t *)value); 2231 if (!ret) 2232 *size = 4; 2233 break; 2234 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: 2235 *((uint32_t *)value) = vega20_thermal_get_temperature(hwmgr); 2236 *size = 4; 2237 break; 2238 case AMDGPU_PP_SENSOR_EDGE_TEMP: 2239 ret = vega20_get_metrics_table(hwmgr, &metrics_table, false); 2240 if (ret) 2241 return ret; 2242 2243 *((uint32_t *)value) = metrics_table.TemperatureEdge * 2244 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 2245 *size = 4; 2246 break; 2247 case AMDGPU_PP_SENSOR_MEM_TEMP: 2248 ret = vega20_get_metrics_table(hwmgr, &metrics_table, false); 2249 if (ret) 2250 return ret; 2251 2252 *((uint32_t *)value) = metrics_table.TemperatureHBM * 2253 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 2254 *size = 4; 2255 break; 2256 case AMDGPU_PP_SENSOR_UVD_POWER: 2257 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1; 2258 *size = 4; 2259 break; 2260 case AMDGPU_PP_SENSOR_VCE_POWER: 2261 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1; 2262 *size = 4; 2263 break; 2264 case AMDGPU_PP_SENSOR_GPU_POWER: 2265 *size = 16; 2266 ret = vega20_get_gpu_power(hwmgr, (uint32_t *)value); 2267 break; 2268 case AMDGPU_PP_SENSOR_VDDGFX: 2269 val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) & 2270 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >> 2271 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT; 2272 *((uint32_t *)value) = 2273 (uint32_t)convert_to_vddc((uint8_t)val_vid); 2274 break; 2275 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK: 2276 ret = vega20_get_enabled_smc_features(hwmgr, (uint64_t *)value); 2277 if (!ret) 2278 *size = 8; 2279 break; 2280 default: 2281 ret = -EINVAL; 2282 break; 2283 } 2284 return ret; 2285 } 2286 2287 static int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr, 2288 struct pp_display_clock_request *clock_req) 2289 { 2290 int result = 0; 2291 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2292 enum amd_pp_clock_type clk_type = clock_req->clock_type; 2293 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000; 2294 PPCLK_e clk_select = 0; 2295 uint32_t clk_request = 0; 2296 2297 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { 2298 switch (clk_type) { 2299 case amd_pp_dcef_clock: 2300 clk_select = PPCLK_DCEFCLK; 2301 break; 2302 case amd_pp_disp_clock: 2303 clk_select = PPCLK_DISPCLK; 2304 break; 2305 case amd_pp_pixel_clock: 2306 clk_select = PPCLK_PIXCLK; 2307 break; 2308 case amd_pp_phy_clock: 2309 clk_select = PPCLK_PHYCLK; 2310 break; 2311 default: 2312 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!"); 2313 result = -EINVAL; 2314 break; 2315 } 2316 2317 if (!result) { 2318 clk_request = (clk_select << 16) | clk_freq; 2319 result = smum_send_msg_to_smc_with_parameter(hwmgr, 2320 PPSMC_MSG_SetHardMinByFreq, 2321 clk_request, 2322 NULL); 2323 } 2324 } 2325 2326 return result; 2327 } 2328 2329 static int vega20_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, 2330 PHM_PerformanceLevelDesignation designation, uint32_t index, 2331 PHM_PerformanceLevel *level) 2332 { 2333 return 0; 2334 } 2335 2336 static int vega20_notify_smc_display_config_after_ps_adjustment( 2337 struct pp_hwmgr *hwmgr) 2338 { 2339 struct vega20_hwmgr *data = 2340 (struct vega20_hwmgr *)(hwmgr->backend); 2341 struct vega20_single_dpm_table *dpm_table = 2342 &data->dpm_table.mem_table; 2343 struct PP_Clocks min_clocks = {0}; 2344 struct pp_display_clock_request clock_req; 2345 int ret = 0; 2346 2347 min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk; 2348 min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk; 2349 min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock; 2350 2351 if (data->smu_features[GNLD_DPM_DCEFCLK].supported) { 2352 clock_req.clock_type = amd_pp_dcef_clock; 2353 clock_req.clock_freq_in_khz = min_clocks.dcefClock * 10; 2354 if (!vega20_display_clock_voltage_request(hwmgr, &clock_req)) { 2355 if (data->smu_features[GNLD_DS_DCEFCLK].supported) 2356 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter( 2357 hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, 2358 min_clocks.dcefClockInSR / 100, 2359 NULL)) == 0, 2360 "Attempt to set divider for DCEFCLK Failed!", 2361 return ret); 2362 } else { 2363 pr_info("Attempt to set Hard Min for DCEFCLK Failed!"); 2364 } 2365 } 2366 2367 if (data->smu_features[GNLD_DPM_UCLK].enabled) { 2368 dpm_table->dpm_state.hard_min_level = min_clocks.memoryClock / 100; 2369 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr, 2370 PPSMC_MSG_SetHardMinByFreq, 2371 (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level, 2372 NULL)), 2373 "[SetHardMinFreq] Set hard min uclk failed!", 2374 return ret); 2375 } 2376 2377 return 0; 2378 } 2379 2380 static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr) 2381 { 2382 struct vega20_hwmgr *data = 2383 (struct vega20_hwmgr *)(hwmgr->backend); 2384 uint32_t soft_level; 2385 int ret = 0; 2386 2387 soft_level = vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table)); 2388 2389 data->dpm_table.gfx_table.dpm_state.soft_min_level = 2390 data->dpm_table.gfx_table.dpm_state.soft_max_level = 2391 data->dpm_table.gfx_table.dpm_levels[soft_level].value; 2392 2393 soft_level = vega20_find_highest_dpm_level(&(data->dpm_table.mem_table)); 2394 2395 data->dpm_table.mem_table.dpm_state.soft_min_level = 2396 data->dpm_table.mem_table.dpm_state.soft_max_level = 2397 data->dpm_table.mem_table.dpm_levels[soft_level].value; 2398 2399 soft_level = vega20_find_highest_dpm_level(&(data->dpm_table.soc_table)); 2400 2401 data->dpm_table.soc_table.dpm_state.soft_min_level = 2402 data->dpm_table.soc_table.dpm_state.soft_max_level = 2403 data->dpm_table.soc_table.dpm_levels[soft_level].value; 2404 2405 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | 2406 FEATURE_DPM_UCLK_MASK | 2407 FEATURE_DPM_SOCCLK_MASK); 2408 PP_ASSERT_WITH_CODE(!ret, 2409 "Failed to upload boot level to highest!", 2410 return ret); 2411 2412 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | 2413 FEATURE_DPM_UCLK_MASK | 2414 FEATURE_DPM_SOCCLK_MASK); 2415 PP_ASSERT_WITH_CODE(!ret, 2416 "Failed to upload dpm max level to highest!", 2417 return ret); 2418 2419 return 0; 2420 } 2421 2422 static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr) 2423 { 2424 struct vega20_hwmgr *data = 2425 (struct vega20_hwmgr *)(hwmgr->backend); 2426 uint32_t soft_level; 2427 int ret = 0; 2428 2429 soft_level = vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table)); 2430 2431 data->dpm_table.gfx_table.dpm_state.soft_min_level = 2432 data->dpm_table.gfx_table.dpm_state.soft_max_level = 2433 data->dpm_table.gfx_table.dpm_levels[soft_level].value; 2434 2435 soft_level = vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table)); 2436 2437 data->dpm_table.mem_table.dpm_state.soft_min_level = 2438 data->dpm_table.mem_table.dpm_state.soft_max_level = 2439 data->dpm_table.mem_table.dpm_levels[soft_level].value; 2440 2441 soft_level = vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table)); 2442 2443 data->dpm_table.soc_table.dpm_state.soft_min_level = 2444 data->dpm_table.soc_table.dpm_state.soft_max_level = 2445 data->dpm_table.soc_table.dpm_levels[soft_level].value; 2446 2447 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | 2448 FEATURE_DPM_UCLK_MASK | 2449 FEATURE_DPM_SOCCLK_MASK); 2450 PP_ASSERT_WITH_CODE(!ret, 2451 "Failed to upload boot level to highest!", 2452 return ret); 2453 2454 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | 2455 FEATURE_DPM_UCLK_MASK | 2456 FEATURE_DPM_SOCCLK_MASK); 2457 PP_ASSERT_WITH_CODE(!ret, 2458 "Failed to upload dpm max level to highest!", 2459 return ret); 2460 2461 return 0; 2462 2463 } 2464 2465 static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr) 2466 { 2467 struct vega20_hwmgr *data = 2468 (struct vega20_hwmgr *)(hwmgr->backend); 2469 uint32_t soft_min_level, soft_max_level; 2470 int ret = 0; 2471 2472 /* gfxclk soft min/max settings */ 2473 soft_min_level = 2474 vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table)); 2475 soft_max_level = 2476 vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table)); 2477 2478 data->dpm_table.gfx_table.dpm_state.soft_min_level = 2479 data->dpm_table.gfx_table.dpm_levels[soft_min_level].value; 2480 data->dpm_table.gfx_table.dpm_state.soft_max_level = 2481 data->dpm_table.gfx_table.dpm_levels[soft_max_level].value; 2482 2483 /* uclk soft min/max settings */ 2484 soft_min_level = 2485 vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table)); 2486 soft_max_level = 2487 vega20_find_highest_dpm_level(&(data->dpm_table.mem_table)); 2488 2489 data->dpm_table.mem_table.dpm_state.soft_min_level = 2490 data->dpm_table.mem_table.dpm_levels[soft_min_level].value; 2491 data->dpm_table.mem_table.dpm_state.soft_max_level = 2492 data->dpm_table.mem_table.dpm_levels[soft_max_level].value; 2493 2494 /* socclk soft min/max settings */ 2495 soft_min_level = 2496 vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table)); 2497 soft_max_level = 2498 vega20_find_highest_dpm_level(&(data->dpm_table.soc_table)); 2499 2500 data->dpm_table.soc_table.dpm_state.soft_min_level = 2501 data->dpm_table.soc_table.dpm_levels[soft_min_level].value; 2502 data->dpm_table.soc_table.dpm_state.soft_max_level = 2503 data->dpm_table.soc_table.dpm_levels[soft_max_level].value; 2504 2505 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | 2506 FEATURE_DPM_UCLK_MASK | 2507 FEATURE_DPM_SOCCLK_MASK); 2508 PP_ASSERT_WITH_CODE(!ret, 2509 "Failed to upload DPM Bootup Levels!", 2510 return ret); 2511 2512 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | 2513 FEATURE_DPM_UCLK_MASK | 2514 FEATURE_DPM_SOCCLK_MASK); 2515 PP_ASSERT_WITH_CODE(!ret, 2516 "Failed to upload DPM Max Levels!", 2517 return ret); 2518 2519 return 0; 2520 } 2521 2522 static int vega20_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level, 2523 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask) 2524 { 2525 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2526 struct vega20_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table); 2527 struct vega20_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table); 2528 struct vega20_single_dpm_table *soc_dpm_table = &(data->dpm_table.soc_table); 2529 2530 *sclk_mask = 0; 2531 *mclk_mask = 0; 2532 *soc_mask = 0; 2533 2534 if (gfx_dpm_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL && 2535 mem_dpm_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL && 2536 soc_dpm_table->count > VEGA20_UMD_PSTATE_SOCCLK_LEVEL) { 2537 *sclk_mask = VEGA20_UMD_PSTATE_GFXCLK_LEVEL; 2538 *mclk_mask = VEGA20_UMD_PSTATE_MCLK_LEVEL; 2539 *soc_mask = VEGA20_UMD_PSTATE_SOCCLK_LEVEL; 2540 } 2541 2542 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { 2543 *sclk_mask = 0; 2544 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { 2545 *mclk_mask = 0; 2546 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 2547 *sclk_mask = gfx_dpm_table->count - 1; 2548 *mclk_mask = mem_dpm_table->count - 1; 2549 *soc_mask = soc_dpm_table->count - 1; 2550 } 2551 2552 return 0; 2553 } 2554 2555 static int vega20_force_clock_level(struct pp_hwmgr *hwmgr, 2556 enum pp_clock_type type, uint32_t mask) 2557 { 2558 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2559 uint32_t soft_min_level, soft_max_level, hard_min_level; 2560 int ret = 0; 2561 2562 switch (type) { 2563 case PP_SCLK: 2564 soft_min_level = mask ? (ffs(mask) - 1) : 0; 2565 soft_max_level = mask ? (fls(mask) - 1) : 0; 2566 2567 if (soft_max_level >= data->dpm_table.gfx_table.count) { 2568 pr_err("Clock level specified %d is over max allowed %d\n", 2569 soft_max_level, 2570 data->dpm_table.gfx_table.count - 1); 2571 return -EINVAL; 2572 } 2573 2574 data->dpm_table.gfx_table.dpm_state.soft_min_level = 2575 data->dpm_table.gfx_table.dpm_levels[soft_min_level].value; 2576 data->dpm_table.gfx_table.dpm_state.soft_max_level = 2577 data->dpm_table.gfx_table.dpm_levels[soft_max_level].value; 2578 2579 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK); 2580 PP_ASSERT_WITH_CODE(!ret, 2581 "Failed to upload boot level to lowest!", 2582 return ret); 2583 2584 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK); 2585 PP_ASSERT_WITH_CODE(!ret, 2586 "Failed to upload dpm max level to highest!", 2587 return ret); 2588 break; 2589 2590 case PP_MCLK: 2591 soft_min_level = mask ? (ffs(mask) - 1) : 0; 2592 soft_max_level = mask ? (fls(mask) - 1) : 0; 2593 2594 if (soft_max_level >= data->dpm_table.mem_table.count) { 2595 pr_err("Clock level specified %d is over max allowed %d\n", 2596 soft_max_level, 2597 data->dpm_table.mem_table.count - 1); 2598 return -EINVAL; 2599 } 2600 2601 data->dpm_table.mem_table.dpm_state.soft_min_level = 2602 data->dpm_table.mem_table.dpm_levels[soft_min_level].value; 2603 data->dpm_table.mem_table.dpm_state.soft_max_level = 2604 data->dpm_table.mem_table.dpm_levels[soft_max_level].value; 2605 2606 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_UCLK_MASK); 2607 PP_ASSERT_WITH_CODE(!ret, 2608 "Failed to upload boot level to lowest!", 2609 return ret); 2610 2611 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_UCLK_MASK); 2612 PP_ASSERT_WITH_CODE(!ret, 2613 "Failed to upload dpm max level to highest!", 2614 return ret); 2615 2616 break; 2617 2618 case PP_SOCCLK: 2619 soft_min_level = mask ? (ffs(mask) - 1) : 0; 2620 soft_max_level = mask ? (fls(mask) - 1) : 0; 2621 2622 if (soft_max_level >= data->dpm_table.soc_table.count) { 2623 pr_err("Clock level specified %d is over max allowed %d\n", 2624 soft_max_level, 2625 data->dpm_table.soc_table.count - 1); 2626 return -EINVAL; 2627 } 2628 2629 data->dpm_table.soc_table.dpm_state.soft_min_level = 2630 data->dpm_table.soc_table.dpm_levels[soft_min_level].value; 2631 data->dpm_table.soc_table.dpm_state.soft_max_level = 2632 data->dpm_table.soc_table.dpm_levels[soft_max_level].value; 2633 2634 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_SOCCLK_MASK); 2635 PP_ASSERT_WITH_CODE(!ret, 2636 "Failed to upload boot level to lowest!", 2637 return ret); 2638 2639 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_SOCCLK_MASK); 2640 PP_ASSERT_WITH_CODE(!ret, 2641 "Failed to upload dpm max level to highest!", 2642 return ret); 2643 2644 break; 2645 2646 case PP_FCLK: 2647 soft_min_level = mask ? (ffs(mask) - 1) : 0; 2648 soft_max_level = mask ? (fls(mask) - 1) : 0; 2649 2650 if (soft_max_level >= data->dpm_table.fclk_table.count) { 2651 pr_err("Clock level specified %d is over max allowed %d\n", 2652 soft_max_level, 2653 data->dpm_table.fclk_table.count - 1); 2654 return -EINVAL; 2655 } 2656 2657 data->dpm_table.fclk_table.dpm_state.soft_min_level = 2658 data->dpm_table.fclk_table.dpm_levels[soft_min_level].value; 2659 data->dpm_table.fclk_table.dpm_state.soft_max_level = 2660 data->dpm_table.fclk_table.dpm_levels[soft_max_level].value; 2661 2662 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_FCLK_MASK); 2663 PP_ASSERT_WITH_CODE(!ret, 2664 "Failed to upload boot level to lowest!", 2665 return ret); 2666 2667 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_FCLK_MASK); 2668 PP_ASSERT_WITH_CODE(!ret, 2669 "Failed to upload dpm max level to highest!", 2670 return ret); 2671 2672 break; 2673 2674 case PP_DCEFCLK: 2675 hard_min_level = mask ? (ffs(mask) - 1) : 0; 2676 2677 if (hard_min_level >= data->dpm_table.dcef_table.count) { 2678 pr_err("Clock level specified %d is over max allowed %d\n", 2679 hard_min_level, 2680 data->dpm_table.dcef_table.count - 1); 2681 return -EINVAL; 2682 } 2683 2684 data->dpm_table.dcef_table.dpm_state.hard_min_level = 2685 data->dpm_table.dcef_table.dpm_levels[hard_min_level].value; 2686 2687 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_DCEFCLK_MASK); 2688 PP_ASSERT_WITH_CODE(!ret, 2689 "Failed to upload boot level to lowest!", 2690 return ret); 2691 2692 //TODO: Setting DCEFCLK max dpm level is not supported 2693 2694 break; 2695 2696 case PP_PCIE: 2697 soft_min_level = mask ? (ffs(mask) - 1) : 0; 2698 soft_max_level = mask ? (fls(mask) - 1) : 0; 2699 if (soft_min_level >= NUM_LINK_LEVELS || 2700 soft_max_level >= NUM_LINK_LEVELS) 2701 return -EINVAL; 2702 2703 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 2704 PPSMC_MSG_SetMinLinkDpmByIndex, soft_min_level, 2705 NULL); 2706 PP_ASSERT_WITH_CODE(!ret, 2707 "Failed to set min link dpm level!", 2708 return ret); 2709 2710 break; 2711 2712 default: 2713 break; 2714 } 2715 2716 return 0; 2717 } 2718 2719 static int vega20_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, 2720 enum amd_dpm_forced_level level) 2721 { 2722 int ret = 0; 2723 uint32_t sclk_mask, mclk_mask, soc_mask; 2724 2725 switch (level) { 2726 case AMD_DPM_FORCED_LEVEL_HIGH: 2727 ret = vega20_force_dpm_highest(hwmgr); 2728 break; 2729 2730 case AMD_DPM_FORCED_LEVEL_LOW: 2731 ret = vega20_force_dpm_lowest(hwmgr); 2732 break; 2733 2734 case AMD_DPM_FORCED_LEVEL_AUTO: 2735 ret = vega20_unforce_dpm_levels(hwmgr); 2736 break; 2737 2738 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 2739 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 2740 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 2741 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 2742 ret = vega20_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask); 2743 if (ret) 2744 return ret; 2745 vega20_force_clock_level(hwmgr, PP_SCLK, 1 << sclk_mask); 2746 vega20_force_clock_level(hwmgr, PP_MCLK, 1 << mclk_mask); 2747 vega20_force_clock_level(hwmgr, PP_SOCCLK, 1 << soc_mask); 2748 break; 2749 2750 case AMD_DPM_FORCED_LEVEL_MANUAL: 2751 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 2752 default: 2753 break; 2754 } 2755 2756 return ret; 2757 } 2758 2759 static uint32_t vega20_get_fan_control_mode(struct pp_hwmgr *hwmgr) 2760 { 2761 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2762 2763 if (data->smu_features[GNLD_FAN_CONTROL].enabled == false) 2764 return AMD_FAN_CTRL_MANUAL; 2765 else 2766 return AMD_FAN_CTRL_AUTO; 2767 } 2768 2769 static void vega20_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) 2770 { 2771 switch (mode) { 2772 case AMD_FAN_CTRL_NONE: 2773 vega20_fan_ctrl_set_fan_speed_percent(hwmgr, 100); 2774 break; 2775 case AMD_FAN_CTRL_MANUAL: 2776 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) 2777 vega20_fan_ctrl_stop_smc_fan_control(hwmgr); 2778 break; 2779 case AMD_FAN_CTRL_AUTO: 2780 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) 2781 vega20_fan_ctrl_start_smc_fan_control(hwmgr); 2782 break; 2783 default: 2784 break; 2785 } 2786 } 2787 2788 static int vega20_get_dal_power_level(struct pp_hwmgr *hwmgr, 2789 struct amd_pp_simple_clock_info *info) 2790 { 2791 #if 0 2792 struct phm_ppt_v2_information *table_info = 2793 (struct phm_ppt_v2_information *)hwmgr->pptable; 2794 struct phm_clock_and_voltage_limits *max_limits = 2795 &table_info->max_clock_voltage_on_ac; 2796 2797 info->engine_max_clock = max_limits->sclk; 2798 info->memory_max_clock = max_limits->mclk; 2799 #endif 2800 return 0; 2801 } 2802 2803 2804 static int vega20_get_sclks(struct pp_hwmgr *hwmgr, 2805 struct pp_clock_levels_with_latency *clocks) 2806 { 2807 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2808 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table); 2809 int i, count; 2810 2811 if (!data->smu_features[GNLD_DPM_GFXCLK].enabled) 2812 return -1; 2813 2814 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; 2815 clocks->num_levels = count; 2816 2817 for (i = 0; i < count; i++) { 2818 clocks->data[i].clocks_in_khz = 2819 dpm_table->dpm_levels[i].value * 1000; 2820 clocks->data[i].latency_in_us = 0; 2821 } 2822 2823 return 0; 2824 } 2825 2826 static uint32_t vega20_get_mem_latency(struct pp_hwmgr *hwmgr, 2827 uint32_t clock) 2828 { 2829 return 25; 2830 } 2831 2832 static int vega20_get_memclocks(struct pp_hwmgr *hwmgr, 2833 struct pp_clock_levels_with_latency *clocks) 2834 { 2835 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2836 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.mem_table); 2837 int i, count; 2838 2839 if (!data->smu_features[GNLD_DPM_UCLK].enabled) 2840 return -1; 2841 2842 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; 2843 clocks->num_levels = data->mclk_latency_table.count = count; 2844 2845 for (i = 0; i < count; i++) { 2846 clocks->data[i].clocks_in_khz = 2847 data->mclk_latency_table.entries[i].frequency = 2848 dpm_table->dpm_levels[i].value * 1000; 2849 clocks->data[i].latency_in_us = 2850 data->mclk_latency_table.entries[i].latency = 2851 vega20_get_mem_latency(hwmgr, dpm_table->dpm_levels[i].value); 2852 } 2853 2854 return 0; 2855 } 2856 2857 static int vega20_get_dcefclocks(struct pp_hwmgr *hwmgr, 2858 struct pp_clock_levels_with_latency *clocks) 2859 { 2860 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2861 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.dcef_table); 2862 int i, count; 2863 2864 if (!data->smu_features[GNLD_DPM_DCEFCLK].enabled) 2865 return -1; 2866 2867 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; 2868 clocks->num_levels = count; 2869 2870 for (i = 0; i < count; i++) { 2871 clocks->data[i].clocks_in_khz = 2872 dpm_table->dpm_levels[i].value * 1000; 2873 clocks->data[i].latency_in_us = 0; 2874 } 2875 2876 return 0; 2877 } 2878 2879 static int vega20_get_socclocks(struct pp_hwmgr *hwmgr, 2880 struct pp_clock_levels_with_latency *clocks) 2881 { 2882 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2883 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.soc_table); 2884 int i, count; 2885 2886 if (!data->smu_features[GNLD_DPM_SOCCLK].enabled) 2887 return -1; 2888 2889 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; 2890 clocks->num_levels = count; 2891 2892 for (i = 0; i < count; i++) { 2893 clocks->data[i].clocks_in_khz = 2894 dpm_table->dpm_levels[i].value * 1000; 2895 clocks->data[i].latency_in_us = 0; 2896 } 2897 2898 return 0; 2899 2900 } 2901 2902 static int vega20_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr, 2903 enum amd_pp_clock_type type, 2904 struct pp_clock_levels_with_latency *clocks) 2905 { 2906 int ret; 2907 2908 switch (type) { 2909 case amd_pp_sys_clock: 2910 ret = vega20_get_sclks(hwmgr, clocks); 2911 break; 2912 case amd_pp_mem_clock: 2913 ret = vega20_get_memclocks(hwmgr, clocks); 2914 break; 2915 case amd_pp_dcef_clock: 2916 ret = vega20_get_dcefclocks(hwmgr, clocks); 2917 break; 2918 case amd_pp_soc_clock: 2919 ret = vega20_get_socclocks(hwmgr, clocks); 2920 break; 2921 default: 2922 return -EINVAL; 2923 } 2924 2925 return ret; 2926 } 2927 2928 static int vega20_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, 2929 enum amd_pp_clock_type type, 2930 struct pp_clock_levels_with_voltage *clocks) 2931 { 2932 clocks->num_levels = 0; 2933 2934 return 0; 2935 } 2936 2937 static int vega20_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, 2938 void *clock_ranges) 2939 { 2940 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2941 Watermarks_t *table = &(data->smc_state_table.water_marks_table); 2942 struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges; 2943 2944 if (!data->registry_data.disable_water_mark && 2945 data->smu_features[GNLD_DPM_DCEFCLK].supported && 2946 data->smu_features[GNLD_DPM_SOCCLK].supported) { 2947 smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges); 2948 data->water_marks_bitmap |= WaterMarksExist; 2949 data->water_marks_bitmap &= ~WaterMarksLoaded; 2950 } 2951 2952 return 0; 2953 } 2954 2955 static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, 2956 enum PP_OD_DPM_TABLE_COMMAND type, 2957 long *input, uint32_t size) 2958 { 2959 struct vega20_hwmgr *data = 2960 (struct vega20_hwmgr *)(hwmgr->backend); 2961 struct vega20_od8_single_setting *od8_settings = 2962 data->od8_settings.od8_settings_array; 2963 OverDriveTable_t *od_table = 2964 &(data->smc_state_table.overdrive_table); 2965 int32_t input_index, input_clk, input_vol, i; 2966 int od8_id; 2967 int ret; 2968 2969 PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage", 2970 return -EINVAL); 2971 2972 switch (type) { 2973 case PP_OD_EDIT_SCLK_VDDC_TABLE: 2974 if (!(od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id && 2975 od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id)) { 2976 pr_info("Sclk min/max frequency overdrive not supported\n"); 2977 return -EOPNOTSUPP; 2978 } 2979 2980 for (i = 0; i < size; i += 2) { 2981 if (i + 2 > size) { 2982 pr_info("invalid number of input parameters %d\n", 2983 size); 2984 return -EINVAL; 2985 } 2986 2987 input_index = input[i]; 2988 input_clk = input[i + 1]; 2989 2990 if (input_index != 0 && input_index != 1) { 2991 pr_info("Invalid index %d\n", input_index); 2992 pr_info("Support min/max sclk frequency setting only which index by 0/1\n"); 2993 return -EINVAL; 2994 } 2995 2996 if (input_clk < od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value || 2997 input_clk > od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value) { 2998 pr_info("clock freq %d is not within allowed range [%d - %d]\n", 2999 input_clk, 3000 od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value, 3001 od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value); 3002 return -EINVAL; 3003 } 3004 3005 if ((input_index == 0 && od_table->GfxclkFmin != input_clk) || 3006 (input_index == 1 && od_table->GfxclkFmax != input_clk)) 3007 data->gfxclk_overdrive = true; 3008 3009 if (input_index == 0) 3010 od_table->GfxclkFmin = input_clk; 3011 else 3012 od_table->GfxclkFmax = input_clk; 3013 } 3014 3015 break; 3016 3017 case PP_OD_EDIT_MCLK_VDDC_TABLE: 3018 if (!od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) { 3019 pr_info("Mclk max frequency overdrive not supported\n"); 3020 return -EOPNOTSUPP; 3021 } 3022 3023 for (i = 0; i < size; i += 2) { 3024 if (i + 2 > size) { 3025 pr_info("invalid number of input parameters %d\n", 3026 size); 3027 return -EINVAL; 3028 } 3029 3030 input_index = input[i]; 3031 input_clk = input[i + 1]; 3032 3033 if (input_index != 1) { 3034 pr_info("Invalid index %d\n", input_index); 3035 pr_info("Support max Mclk frequency setting only which index by 1\n"); 3036 return -EINVAL; 3037 } 3038 3039 if (input_clk < od8_settings[OD8_SETTING_UCLK_FMAX].min_value || 3040 input_clk > od8_settings[OD8_SETTING_UCLK_FMAX].max_value) { 3041 pr_info("clock freq %d is not within allowed range [%d - %d]\n", 3042 input_clk, 3043 od8_settings[OD8_SETTING_UCLK_FMAX].min_value, 3044 od8_settings[OD8_SETTING_UCLK_FMAX].max_value); 3045 return -EINVAL; 3046 } 3047 3048 if (input_index == 1 && od_table->UclkFmax != input_clk) 3049 data->memclk_overdrive = true; 3050 3051 od_table->UclkFmax = input_clk; 3052 } 3053 3054 break; 3055 3056 case PP_OD_EDIT_VDDC_CURVE: 3057 if (!(od8_settings[OD8_SETTING_GFXCLK_FREQ1].feature_id && 3058 od8_settings[OD8_SETTING_GFXCLK_FREQ2].feature_id && 3059 od8_settings[OD8_SETTING_GFXCLK_FREQ3].feature_id && 3060 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id && 3061 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id && 3062 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id)) { 3063 pr_info("Voltage curve calibrate not supported\n"); 3064 return -EOPNOTSUPP; 3065 } 3066 3067 for (i = 0; i < size; i += 3) { 3068 if (i + 3 > size) { 3069 pr_info("invalid number of input parameters %d\n", 3070 size); 3071 return -EINVAL; 3072 } 3073 3074 input_index = input[i]; 3075 input_clk = input[i + 1]; 3076 input_vol = input[i + 2]; 3077 3078 if (input_index > 2) { 3079 pr_info("Setting for point %d is not supported\n", 3080 input_index + 1); 3081 pr_info("Three supported points index by 0, 1, 2\n"); 3082 return -EINVAL; 3083 } 3084 3085 od8_id = OD8_SETTING_GFXCLK_FREQ1 + 2 * input_index; 3086 if (input_clk < od8_settings[od8_id].min_value || 3087 input_clk > od8_settings[od8_id].max_value) { 3088 pr_info("clock freq %d is not within allowed range [%d - %d]\n", 3089 input_clk, 3090 od8_settings[od8_id].min_value, 3091 od8_settings[od8_id].max_value); 3092 return -EINVAL; 3093 } 3094 3095 od8_id = OD8_SETTING_GFXCLK_VOLTAGE1 + 2 * input_index; 3096 if (input_vol < od8_settings[od8_id].min_value || 3097 input_vol > od8_settings[od8_id].max_value) { 3098 pr_info("clock voltage %d is not within allowed range [%d - %d]\n", 3099 input_vol, 3100 od8_settings[od8_id].min_value, 3101 od8_settings[od8_id].max_value); 3102 return -EINVAL; 3103 } 3104 3105 switch (input_index) { 3106 case 0: 3107 od_table->GfxclkFreq1 = input_clk; 3108 od_table->GfxclkVolt1 = input_vol * VOLTAGE_SCALE; 3109 break; 3110 case 1: 3111 od_table->GfxclkFreq2 = input_clk; 3112 od_table->GfxclkVolt2 = input_vol * VOLTAGE_SCALE; 3113 break; 3114 case 2: 3115 od_table->GfxclkFreq3 = input_clk; 3116 od_table->GfxclkVolt3 = input_vol * VOLTAGE_SCALE; 3117 break; 3118 } 3119 } 3120 break; 3121 3122 case PP_OD_RESTORE_DEFAULT_TABLE: 3123 data->gfxclk_overdrive = false; 3124 data->memclk_overdrive = false; 3125 3126 ret = smum_smc_table_manager(hwmgr, 3127 (uint8_t *)od_table, 3128 TABLE_OVERDRIVE, true); 3129 PP_ASSERT_WITH_CODE(!ret, 3130 "Failed to export overdrive table!", 3131 return ret); 3132 break; 3133 3134 case PP_OD_COMMIT_DPM_TABLE: 3135 ret = smum_smc_table_manager(hwmgr, 3136 (uint8_t *)od_table, 3137 TABLE_OVERDRIVE, false); 3138 PP_ASSERT_WITH_CODE(!ret, 3139 "Failed to import overdrive table!", 3140 return ret); 3141 3142 /* retrieve updated gfxclk table */ 3143 if (data->gfxclk_overdrive) { 3144 data->gfxclk_overdrive = false; 3145 3146 ret = vega20_setup_gfxclk_dpm_table(hwmgr); 3147 if (ret) 3148 return ret; 3149 } 3150 3151 /* retrieve updated memclk table */ 3152 if (data->memclk_overdrive) { 3153 data->memclk_overdrive = false; 3154 3155 ret = vega20_setup_memclk_dpm_table(hwmgr); 3156 if (ret) 3157 return ret; 3158 } 3159 break; 3160 3161 default: 3162 return -EINVAL; 3163 } 3164 3165 return 0; 3166 } 3167 3168 static int vega20_set_mp1_state(struct pp_hwmgr *hwmgr, 3169 enum pp_mp1_state mp1_state) 3170 { 3171 uint16_t msg; 3172 int ret; 3173 3174 switch (mp1_state) { 3175 case PP_MP1_STATE_SHUTDOWN: 3176 msg = PPSMC_MSG_PrepareMp1ForShutdown; 3177 break; 3178 case PP_MP1_STATE_UNLOAD: 3179 msg = PPSMC_MSG_PrepareMp1ForUnload; 3180 break; 3181 case PP_MP1_STATE_RESET: 3182 msg = PPSMC_MSG_PrepareMp1ForReset; 3183 break; 3184 case PP_MP1_STATE_NONE: 3185 default: 3186 return 0; 3187 } 3188 3189 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg, NULL)) == 0, 3190 "[PrepareMp1] Failed!", 3191 return ret); 3192 3193 return 0; 3194 } 3195 3196 static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) 3197 { 3198 static const char *ppfeature_name[] = { 3199 "DPM_PREFETCHER", 3200 "GFXCLK_DPM", 3201 "UCLK_DPM", 3202 "SOCCLK_DPM", 3203 "UVD_DPM", 3204 "VCE_DPM", 3205 "ULV", 3206 "MP0CLK_DPM", 3207 "LINK_DPM", 3208 "DCEFCLK_DPM", 3209 "GFXCLK_DS", 3210 "SOCCLK_DS", 3211 "LCLK_DS", 3212 "PPT", 3213 "TDC", 3214 "THERMAL", 3215 "GFX_PER_CU_CG", 3216 "RM", 3217 "DCEFCLK_DS", 3218 "ACDC", 3219 "VR0HOT", 3220 "VR1HOT", 3221 "FW_CTF", 3222 "LED_DISPLAY", 3223 "FAN_CONTROL", 3224 "GFX_EDC", 3225 "GFXOFF", 3226 "CG", 3227 "FCLK_DPM", 3228 "FCLK_DS", 3229 "MP1CLK_DS", 3230 "MP0CLK_DS", 3231 "XGMI", 3232 "ECC"}; 3233 static const char *output_title[] = { 3234 "FEATURES", 3235 "BITMASK", 3236 "ENABLEMENT"}; 3237 uint64_t features_enabled; 3238 int i; 3239 int ret = 0; 3240 int size = 0; 3241 3242 ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled); 3243 PP_ASSERT_WITH_CODE(!ret, 3244 "[EnableAllSmuFeatures] Failed to get enabled smc features!", 3245 return ret); 3246 3247 size += snprintf(buf + size, PAGE_SIZE - size, "Current ppfeatures: 0x%016llx\n", features_enabled); 3248 size += snprintf(buf + size, PAGE_SIZE - size, "%-19s %-22s %s\n", 3249 output_title[0], 3250 output_title[1], 3251 output_title[2]); 3252 for (i = 0; i < GNLD_FEATURES_MAX; i++) { 3253 size += snprintf(buf + size, PAGE_SIZE - size, "%-19s 0x%016llx %6s\n", 3254 ppfeature_name[i], 3255 1ULL << i, 3256 (features_enabled & (1ULL << i)) ? "Y" : "N"); 3257 } 3258 3259 return size; 3260 } 3261 3262 static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfeature_masks) 3263 { 3264 struct vega20_hwmgr *data = 3265 (struct vega20_hwmgr *)(hwmgr->backend); 3266 uint64_t features_enabled, features_to_enable, features_to_disable; 3267 int i, ret = 0; 3268 bool enabled; 3269 3270 if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX)) 3271 return -EINVAL; 3272 3273 ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled); 3274 if (ret) 3275 return ret; 3276 3277 features_to_disable = 3278 features_enabled & ~new_ppfeature_masks; 3279 features_to_enable = 3280 ~features_enabled & new_ppfeature_masks; 3281 3282 pr_debug("features_to_disable 0x%llx\n", features_to_disable); 3283 pr_debug("features_to_enable 0x%llx\n", features_to_enable); 3284 3285 if (features_to_disable) { 3286 ret = vega20_enable_smc_features(hwmgr, false, features_to_disable); 3287 if (ret) 3288 return ret; 3289 } 3290 3291 if (features_to_enable) { 3292 ret = vega20_enable_smc_features(hwmgr, true, features_to_enable); 3293 if (ret) 3294 return ret; 3295 } 3296 3297 /* Update the cached feature enablement state */ 3298 ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled); 3299 if (ret) 3300 return ret; 3301 3302 for (i = 0; i < GNLD_FEATURES_MAX; i++) { 3303 enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? 3304 true : false; 3305 data->smu_features[i].enabled = enabled; 3306 } 3307 3308 return 0; 3309 } 3310 3311 static int vega20_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr) 3312 { 3313 struct amdgpu_device *adev = hwmgr->adev; 3314 3315 return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & 3316 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) 3317 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; 3318 } 3319 3320 static int vega20_get_current_pcie_link_width(struct pp_hwmgr *hwmgr) 3321 { 3322 uint32_t width_level; 3323 3324 width_level = vega20_get_current_pcie_link_width_level(hwmgr); 3325 if (width_level > LINK_WIDTH_MAX) 3326 width_level = 0; 3327 3328 return link_width[width_level]; 3329 } 3330 3331 static int vega20_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr) 3332 { 3333 struct amdgpu_device *adev = hwmgr->adev; 3334 3335 return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & 3336 PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) 3337 >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; 3338 } 3339 3340 static int vega20_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr) 3341 { 3342 uint32_t speed_level; 3343 3344 speed_level = vega20_get_current_pcie_link_speed_level(hwmgr); 3345 if (speed_level > LINK_SPEED_MAX) 3346 speed_level = 0; 3347 3348 return link_speed[speed_level]; 3349 } 3350 3351 static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, 3352 enum pp_clock_type type, char *buf) 3353 { 3354 struct vega20_hwmgr *data = 3355 (struct vega20_hwmgr *)(hwmgr->backend); 3356 struct vega20_od8_single_setting *od8_settings = 3357 data->od8_settings.od8_settings_array; 3358 OverDriveTable_t *od_table = 3359 &(data->smc_state_table.overdrive_table); 3360 PPTable_t *pptable = &(data->smc_state_table.pp_table); 3361 struct pp_clock_levels_with_latency clocks; 3362 struct vega20_single_dpm_table *fclk_dpm_table = 3363 &(data->dpm_table.fclk_table); 3364 int i, now, size = 0; 3365 int ret = 0; 3366 uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width; 3367 3368 switch (type) { 3369 case PP_SCLK: 3370 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_GFXCLK, &now); 3371 PP_ASSERT_WITH_CODE(!ret, 3372 "Attempt to get current gfx clk Failed!", 3373 return ret); 3374 3375 if (vega20_get_sclks(hwmgr, &clocks)) { 3376 size += snprintf(buf + size, PAGE_SIZE - size, "0: %uMhz * (DPM disabled)\n", 3377 now / 100); 3378 break; 3379 } 3380 3381 for (i = 0; i < clocks.num_levels; i++) 3382 size += snprintf(buf + size, PAGE_SIZE - size, "%d: %uMhz %s\n", 3383 i, clocks.data[i].clocks_in_khz / 1000, 3384 (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); 3385 break; 3386 3387 case PP_MCLK: 3388 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_UCLK, &now); 3389 PP_ASSERT_WITH_CODE(!ret, 3390 "Attempt to get current mclk freq Failed!", 3391 return ret); 3392 3393 if (vega20_get_memclocks(hwmgr, &clocks)) { 3394 size += snprintf(buf + size, PAGE_SIZE - size, "0: %uMhz * (DPM disabled)\n", 3395 now / 100); 3396 break; 3397 } 3398 3399 for (i = 0; i < clocks.num_levels; i++) 3400 size += snprintf(buf + size, PAGE_SIZE - size, "%d: %uMhz %s\n", 3401 i, clocks.data[i].clocks_in_khz / 1000, 3402 (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); 3403 break; 3404 3405 case PP_SOCCLK: 3406 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_SOCCLK, &now); 3407 PP_ASSERT_WITH_CODE(!ret, 3408 "Attempt to get current socclk freq Failed!", 3409 return ret); 3410 3411 if (vega20_get_socclocks(hwmgr, &clocks)) { 3412 size += snprintf(buf + size, PAGE_SIZE - size, "0: %uMhz * (DPM disabled)\n", 3413 now / 100); 3414 break; 3415 } 3416 3417 for (i = 0; i < clocks.num_levels; i++) 3418 size += snprintf(buf + size, PAGE_SIZE - size, "%d: %uMhz %s\n", 3419 i, clocks.data[i].clocks_in_khz / 1000, 3420 (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); 3421 break; 3422 3423 case PP_FCLK: 3424 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_FCLK, &now); 3425 PP_ASSERT_WITH_CODE(!ret, 3426 "Attempt to get current fclk freq Failed!", 3427 return ret); 3428 3429 for (i = 0; i < fclk_dpm_table->count; i++) 3430 size += snprintf(buf + size, PAGE_SIZE - size, "%d: %uMhz %s\n", 3431 i, fclk_dpm_table->dpm_levels[i].value, 3432 fclk_dpm_table->dpm_levels[i].value == (now / 100) ? "*" : ""); 3433 break; 3434 3435 case PP_DCEFCLK: 3436 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_DCEFCLK, &now); 3437 PP_ASSERT_WITH_CODE(!ret, 3438 "Attempt to get current dcefclk freq Failed!", 3439 return ret); 3440 3441 if (vega20_get_dcefclocks(hwmgr, &clocks)) { 3442 size += snprintf(buf + size, PAGE_SIZE - size, "0: %uMhz * (DPM disabled)\n", 3443 now / 100); 3444 break; 3445 } 3446 3447 for (i = 0; i < clocks.num_levels; i++) 3448 size += snprintf(buf + size, PAGE_SIZE - size, "%d: %uMhz %s\n", 3449 i, clocks.data[i].clocks_in_khz / 1000, 3450 (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); 3451 break; 3452 3453 case PP_PCIE: 3454 current_gen_speed = 3455 vega20_get_current_pcie_link_speed_level(hwmgr); 3456 current_lane_width = 3457 vega20_get_current_pcie_link_width_level(hwmgr); 3458 for (i = 0; i < NUM_LINK_LEVELS; i++) { 3459 gen_speed = pptable->PcieGenSpeed[i]; 3460 lane_width = pptable->PcieLaneCount[i]; 3461 3462 size += snprintf(buf + size, PAGE_SIZE - size, "%d: %s %s %dMhz %s\n", i, 3463 (gen_speed == 0) ? "2.5GT/s," : 3464 (gen_speed == 1) ? "5.0GT/s," : 3465 (gen_speed == 2) ? "8.0GT/s," : 3466 (gen_speed == 3) ? "16.0GT/s," : "", 3467 (lane_width == 1) ? "x1" : 3468 (lane_width == 2) ? "x2" : 3469 (lane_width == 3) ? "x4" : 3470 (lane_width == 4) ? "x8" : 3471 (lane_width == 5) ? "x12" : 3472 (lane_width == 6) ? "x16" : "", 3473 pptable->LclkFreq[i], 3474 (current_gen_speed == gen_speed) && 3475 (current_lane_width == lane_width) ? 3476 "*" : ""); 3477 } 3478 break; 3479 3480 case OD_SCLK: 3481 if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id && 3482 od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) { 3483 size = snprintf(buf, PAGE_SIZE, "%s:\n", "OD_SCLK"); 3484 size += snprintf(buf + size, PAGE_SIZE - size, "0: %10uMhz\n", 3485 od_table->GfxclkFmin); 3486 size += snprintf(buf + size, PAGE_SIZE - size, "1: %10uMhz\n", 3487 od_table->GfxclkFmax); 3488 } 3489 break; 3490 3491 case OD_MCLK: 3492 if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) { 3493 size = snprintf(buf, PAGE_SIZE, "%s:\n", "OD_MCLK"); 3494 size += snprintf(buf + size, PAGE_SIZE - size, "1: %10uMhz\n", 3495 od_table->UclkFmax); 3496 } 3497 3498 break; 3499 3500 case OD_VDDC_CURVE: 3501 if (od8_settings[OD8_SETTING_GFXCLK_FREQ1].feature_id && 3502 od8_settings[OD8_SETTING_GFXCLK_FREQ2].feature_id && 3503 od8_settings[OD8_SETTING_GFXCLK_FREQ3].feature_id && 3504 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id && 3505 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id && 3506 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) { 3507 size = snprintf(buf, PAGE_SIZE, "%s:\n", "OD_VDDC_CURVE"); 3508 size += snprintf(buf + size, PAGE_SIZE - size, "0: %10uMhz %10dmV\n", 3509 od_table->GfxclkFreq1, 3510 od_table->GfxclkVolt1 / VOLTAGE_SCALE); 3511 size += snprintf(buf + size, PAGE_SIZE - size, "1: %10uMhz %10dmV\n", 3512 od_table->GfxclkFreq2, 3513 od_table->GfxclkVolt2 / VOLTAGE_SCALE); 3514 size += snprintf(buf + size, PAGE_SIZE - size, "2: %10uMhz %10dmV\n", 3515 od_table->GfxclkFreq3, 3516 od_table->GfxclkVolt3 / VOLTAGE_SCALE); 3517 } 3518 3519 break; 3520 3521 case OD_RANGE: 3522 size = snprintf(buf, PAGE_SIZE, "%s:\n", "OD_RANGE"); 3523 3524 if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id && 3525 od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) { 3526 size += snprintf(buf + size, PAGE_SIZE - size, "SCLK: %7uMhz %10uMhz\n", 3527 od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value, 3528 od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value); 3529 } 3530 3531 if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) { 3532 size += snprintf(buf + size, PAGE_SIZE - size, "MCLK: %7uMhz %10uMhz\n", 3533 od8_settings[OD8_SETTING_UCLK_FMAX].min_value, 3534 od8_settings[OD8_SETTING_UCLK_FMAX].max_value); 3535 } 3536 3537 if (od8_settings[OD8_SETTING_GFXCLK_FREQ1].feature_id && 3538 od8_settings[OD8_SETTING_GFXCLK_FREQ2].feature_id && 3539 od8_settings[OD8_SETTING_GFXCLK_FREQ3].feature_id && 3540 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id && 3541 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id && 3542 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) { 3543 size += snprintf(buf + size, PAGE_SIZE - size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n", 3544 od8_settings[OD8_SETTING_GFXCLK_FREQ1].min_value, 3545 od8_settings[OD8_SETTING_GFXCLK_FREQ1].max_value); 3546 size += snprintf(buf + size, PAGE_SIZE - size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n", 3547 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].min_value, 3548 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].max_value); 3549 size += snprintf(buf + size, PAGE_SIZE - size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n", 3550 od8_settings[OD8_SETTING_GFXCLK_FREQ2].min_value, 3551 od8_settings[OD8_SETTING_GFXCLK_FREQ2].max_value); 3552 size += snprintf(buf + size, PAGE_SIZE - size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n", 3553 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].min_value, 3554 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].max_value); 3555 size += snprintf(buf + size, PAGE_SIZE - size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n", 3556 od8_settings[OD8_SETTING_GFXCLK_FREQ3].min_value, 3557 od8_settings[OD8_SETTING_GFXCLK_FREQ3].max_value); 3558 size += snprintf(buf + size, PAGE_SIZE - size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n", 3559 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].min_value, 3560 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].max_value); 3561 } 3562 3563 break; 3564 default: 3565 break; 3566 } 3567 return size; 3568 } 3569 3570 static int vega20_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr, 3571 struct vega20_single_dpm_table *dpm_table) 3572 { 3573 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3574 int ret = 0; 3575 3576 if (data->smu_features[GNLD_DPM_UCLK].enabled) { 3577 PP_ASSERT_WITH_CODE(dpm_table->count > 0, 3578 "[SetUclkToHightestDpmLevel] Dpm table has no entry!", 3579 return -EINVAL); 3580 PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_UCLK_DPM_LEVELS, 3581 "[SetUclkToHightestDpmLevel] Dpm table has too many entries!", 3582 return -EINVAL); 3583 3584 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3585 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr, 3586 PPSMC_MSG_SetHardMinByFreq, 3587 (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level, 3588 NULL)), 3589 "[SetUclkToHightestDpmLevel] Set hard min uclk failed!", 3590 return ret); 3591 } 3592 3593 return ret; 3594 } 3595 3596 static int vega20_set_fclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr) 3597 { 3598 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3599 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.fclk_table); 3600 int ret = 0; 3601 3602 if (data->smu_features[GNLD_DPM_FCLK].enabled) { 3603 PP_ASSERT_WITH_CODE(dpm_table->count > 0, 3604 "[SetFclkToHightestDpmLevel] Dpm table has no entry!", 3605 return -EINVAL); 3606 PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_FCLK_DPM_LEVELS, 3607 "[SetFclkToHightestDpmLevel] Dpm table has too many entries!", 3608 return -EINVAL); 3609 3610 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3611 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr, 3612 PPSMC_MSG_SetSoftMinByFreq, 3613 (PPCLK_FCLK << 16 ) | dpm_table->dpm_state.soft_min_level, 3614 NULL)), 3615 "[SetFclkToHightestDpmLevel] Set soft min fclk failed!", 3616 return ret); 3617 } 3618 3619 return ret; 3620 } 3621 3622 static int vega20_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr) 3623 { 3624 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3625 int ret = 0; 3626 3627 smum_send_msg_to_smc_with_parameter(hwmgr, 3628 PPSMC_MSG_NumOfDisplays, 0, NULL); 3629 3630 ret = vega20_set_uclk_to_highest_dpm_level(hwmgr, 3631 &data->dpm_table.mem_table); 3632 if (ret) 3633 return ret; 3634 3635 return vega20_set_fclk_to_highest_dpm_level(hwmgr); 3636 } 3637 3638 static int vega20_display_configuration_changed_task(struct pp_hwmgr *hwmgr) 3639 { 3640 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3641 int result = 0; 3642 Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table); 3643 3644 if ((data->water_marks_bitmap & WaterMarksExist) && 3645 !(data->water_marks_bitmap & WaterMarksLoaded)) { 3646 result = smum_smc_table_manager(hwmgr, 3647 (uint8_t *)wm_table, TABLE_WATERMARKS, false); 3648 PP_ASSERT_WITH_CODE(!result, 3649 "Failed to update WMTABLE!", 3650 return result); 3651 data->water_marks_bitmap |= WaterMarksLoaded; 3652 } 3653 3654 if ((data->water_marks_bitmap & WaterMarksExist) && 3655 data->smu_features[GNLD_DPM_DCEFCLK].supported && 3656 data->smu_features[GNLD_DPM_SOCCLK].supported) { 3657 result = smum_send_msg_to_smc_with_parameter(hwmgr, 3658 PPSMC_MSG_NumOfDisplays, 3659 hwmgr->display_config->num_display, 3660 NULL); 3661 } 3662 3663 return result; 3664 } 3665 3666 static int vega20_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable) 3667 { 3668 struct vega20_hwmgr *data = 3669 (struct vega20_hwmgr *)(hwmgr->backend); 3670 int ret = 0; 3671 3672 if (data->smu_features[GNLD_DPM_UVD].supported) { 3673 if (data->smu_features[GNLD_DPM_UVD].enabled == enable) { 3674 if (enable) 3675 PP_DBG_LOG("[EnableDisableUVDDPM] feature DPM UVD already enabled!\n"); 3676 else 3677 PP_DBG_LOG("[EnableDisableUVDDPM] feature DPM UVD already disabled!\n"); 3678 } 3679 3680 ret = vega20_enable_smc_features(hwmgr, 3681 enable, 3682 data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap); 3683 PP_ASSERT_WITH_CODE(!ret, 3684 "[EnableDisableUVDDPM] Attempt to Enable/Disable DPM UVD Failed!", 3685 return ret); 3686 data->smu_features[GNLD_DPM_UVD].enabled = enable; 3687 } 3688 3689 return 0; 3690 } 3691 3692 static void vega20_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate) 3693 { 3694 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3695 3696 if (data->vce_power_gated == bgate) 3697 return ; 3698 3699 data->vce_power_gated = bgate; 3700 if (bgate) { 3701 vega20_enable_disable_vce_dpm(hwmgr, !bgate); 3702 amdgpu_device_ip_set_powergating_state(hwmgr->adev, 3703 AMD_IP_BLOCK_TYPE_VCE, 3704 AMD_PG_STATE_GATE); 3705 } else { 3706 amdgpu_device_ip_set_powergating_state(hwmgr->adev, 3707 AMD_IP_BLOCK_TYPE_VCE, 3708 AMD_PG_STATE_UNGATE); 3709 vega20_enable_disable_vce_dpm(hwmgr, !bgate); 3710 } 3711 3712 } 3713 3714 static void vega20_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate) 3715 { 3716 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3717 3718 if (data->uvd_power_gated == bgate) 3719 return ; 3720 3721 data->uvd_power_gated = bgate; 3722 vega20_enable_disable_uvd_dpm(hwmgr, !bgate); 3723 } 3724 3725 static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) 3726 { 3727 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3728 struct vega20_single_dpm_table *dpm_table; 3729 bool vblank_too_short = false; 3730 bool disable_mclk_switching; 3731 bool disable_fclk_switching; 3732 uint32_t i, latency; 3733 3734 disable_mclk_switching = ((1 < hwmgr->display_config->num_display) && 3735 !hwmgr->display_config->multi_monitor_in_sync) || 3736 vblank_too_short; 3737 latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency; 3738 3739 /* gfxclk */ 3740 dpm_table = &(data->dpm_table.gfx_table); 3741 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3742 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3743 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3744 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3745 3746 if (PP_CAP(PHM_PlatformCaps_UMDPState)) { 3747 if (VEGA20_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) { 3748 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value; 3749 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value; 3750 } 3751 3752 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { 3753 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3754 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value; 3755 } 3756 3757 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 3758 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3759 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3760 } 3761 } 3762 3763 /* memclk */ 3764 dpm_table = &(data->dpm_table.mem_table); 3765 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3766 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3767 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3768 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3769 3770 if (PP_CAP(PHM_PlatformCaps_UMDPState)) { 3771 if (VEGA20_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) { 3772 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value; 3773 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value; 3774 } 3775 3776 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { 3777 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3778 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value; 3779 } 3780 3781 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 3782 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3783 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3784 } 3785 } 3786 3787 /* honour DAL's UCLK Hardmin */ 3788 if (dpm_table->dpm_state.hard_min_level < (hwmgr->display_config->min_mem_set_clock / 100)) 3789 dpm_table->dpm_state.hard_min_level = hwmgr->display_config->min_mem_set_clock / 100; 3790 3791 /* Hardmin is dependent on displayconfig */ 3792 if (disable_mclk_switching) { 3793 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3794 for (i = 0; i < data->mclk_latency_table.count - 1; i++) { 3795 if (data->mclk_latency_table.entries[i].latency <= latency) { 3796 if (dpm_table->dpm_levels[i].value >= (hwmgr->display_config->min_mem_set_clock / 100)) { 3797 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[i].value; 3798 break; 3799 } 3800 } 3801 } 3802 } 3803 3804 if (hwmgr->display_config->nb_pstate_switch_disable) 3805 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3806 3807 if ((disable_mclk_switching && 3808 (dpm_table->dpm_state.hard_min_level == dpm_table->dpm_levels[dpm_table->count - 1].value)) || 3809 hwmgr->display_config->min_mem_set_clock / 100 >= dpm_table->dpm_levels[dpm_table->count - 1].value) 3810 disable_fclk_switching = true; 3811 else 3812 disable_fclk_switching = false; 3813 3814 /* fclk */ 3815 dpm_table = &(data->dpm_table.fclk_table); 3816 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3817 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3818 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3819 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3820 if (hwmgr->display_config->nb_pstate_switch_disable || disable_fclk_switching) 3821 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3822 3823 /* vclk */ 3824 dpm_table = &(data->dpm_table.vclk_table); 3825 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3826 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3827 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3828 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3829 3830 if (PP_CAP(PHM_PlatformCaps_UMDPState)) { 3831 if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) { 3832 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value; 3833 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value; 3834 } 3835 3836 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 3837 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3838 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3839 } 3840 } 3841 3842 /* dclk */ 3843 dpm_table = &(data->dpm_table.dclk_table); 3844 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3845 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3846 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3847 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3848 3849 if (PP_CAP(PHM_PlatformCaps_UMDPState)) { 3850 if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) { 3851 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value; 3852 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value; 3853 } 3854 3855 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 3856 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3857 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3858 } 3859 } 3860 3861 /* socclk */ 3862 dpm_table = &(data->dpm_table.soc_table); 3863 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3864 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3865 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3866 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3867 3868 if (PP_CAP(PHM_PlatformCaps_UMDPState)) { 3869 if (VEGA20_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) { 3870 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_SOCCLK_LEVEL].value; 3871 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_SOCCLK_LEVEL].value; 3872 } 3873 3874 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 3875 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3876 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3877 } 3878 } 3879 3880 /* eclk */ 3881 dpm_table = &(data->dpm_table.eclk_table); 3882 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3883 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3884 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3885 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3886 3887 if (PP_CAP(PHM_PlatformCaps_UMDPState)) { 3888 if (VEGA20_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) { 3889 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_VCEMCLK_LEVEL].value; 3890 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_VCEMCLK_LEVEL].value; 3891 } 3892 3893 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 3894 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3895 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3896 } 3897 } 3898 3899 return 0; 3900 } 3901 3902 static bool 3903 vega20_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) 3904 { 3905 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3906 bool is_update_required = false; 3907 3908 if (data->display_timing.num_existing_displays != 3909 hwmgr->display_config->num_display) 3910 is_update_required = true; 3911 3912 if (data->registry_data.gfx_clk_deep_sleep_support && 3913 (data->display_timing.min_clock_in_sr != 3914 hwmgr->display_config->min_core_set_clock_in_sr)) 3915 is_update_required = true; 3916 3917 return is_update_required; 3918 } 3919 3920 static int vega20_disable_dpm_tasks(struct pp_hwmgr *hwmgr) 3921 { 3922 int ret = 0; 3923 3924 ret = vega20_disable_all_smu_features(hwmgr); 3925 PP_ASSERT_WITH_CODE(!ret, 3926 "[DisableDpmTasks] Failed to disable all smu features!", 3927 return ret); 3928 3929 return 0; 3930 } 3931 3932 static int vega20_power_off_asic(struct pp_hwmgr *hwmgr) 3933 { 3934 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3935 int result; 3936 3937 result = vega20_disable_dpm_tasks(hwmgr); 3938 PP_ASSERT_WITH_CODE((0 == result), 3939 "[PowerOffAsic] Failed to disable DPM!", 3940 ); 3941 data->water_marks_bitmap &= ~(WaterMarksLoaded); 3942 3943 return result; 3944 } 3945 3946 static int conv_power_profile_to_pplib_workload(int power_profile) 3947 { 3948 int pplib_workload = 0; 3949 3950 switch (power_profile) { 3951 case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT: 3952 pplib_workload = WORKLOAD_DEFAULT_BIT; 3953 break; 3954 case PP_SMC_POWER_PROFILE_FULLSCREEN3D: 3955 pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT; 3956 break; 3957 case PP_SMC_POWER_PROFILE_POWERSAVING: 3958 pplib_workload = WORKLOAD_PPLIB_POWER_SAVING_BIT; 3959 break; 3960 case PP_SMC_POWER_PROFILE_VIDEO: 3961 pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT; 3962 break; 3963 case PP_SMC_POWER_PROFILE_VR: 3964 pplib_workload = WORKLOAD_PPLIB_VR_BIT; 3965 break; 3966 case PP_SMC_POWER_PROFILE_COMPUTE: 3967 pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT; 3968 break; 3969 case PP_SMC_POWER_PROFILE_CUSTOM: 3970 pplib_workload = WORKLOAD_PPLIB_CUSTOM_BIT; 3971 break; 3972 } 3973 3974 return pplib_workload; 3975 } 3976 3977 static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) 3978 { 3979 DpmActivityMonitorCoeffInt_t activity_monitor; 3980 uint32_t i, size = 0; 3981 uint16_t workload_type = 0; 3982 static const char *profile_name[] = { 3983 "BOOTUP_DEFAULT", 3984 "3D_FULL_SCREEN", 3985 "POWER_SAVING", 3986 "VIDEO", 3987 "VR", 3988 "COMPUTE", 3989 "CUSTOM"}; 3990 static const char *title[] = { 3991 "PROFILE_INDEX(NAME)", 3992 "CLOCK_TYPE(NAME)", 3993 "FPS", 3994 "UseRlcBusy", 3995 "MinActiveFreqType", 3996 "MinActiveFreq", 3997 "BoosterFreqType", 3998 "BoosterFreq", 3999 "PD_Data_limit_c", 4000 "PD_Data_error_coeff", 4001 "PD_Data_error_rate_coeff"}; 4002 int result = 0; 4003 4004 if (!buf) 4005 return -EINVAL; 4006 4007 size += snprintf(buf + size, PAGE_SIZE - size, "%16s %s %s %s %s %s %s %s %s %s %s\n", 4008 title[0], title[1], title[2], title[3], title[4], title[5], 4009 title[6], title[7], title[8], title[9], title[10]); 4010 4011 for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { 4012 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ 4013 workload_type = conv_power_profile_to_pplib_workload(i); 4014 result = vega20_get_activity_monitor_coeff(hwmgr, 4015 (uint8_t *)(&activity_monitor), workload_type); 4016 PP_ASSERT_WITH_CODE(!result, 4017 "[GetPowerProfile] Failed to get activity monitor!", 4018 return result); 4019 4020 size += snprintf(buf + size, PAGE_SIZE - size, "%2d %14s%s:\n", 4021 i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " "); 4022 4023 size += snprintf(buf + size, PAGE_SIZE - size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", 4024 " ", 4025 0, 4026 "GFXCLK", 4027 activity_monitor.Gfx_FPS, 4028 activity_monitor.Gfx_UseRlcBusy, 4029 activity_monitor.Gfx_MinActiveFreqType, 4030 activity_monitor.Gfx_MinActiveFreq, 4031 activity_monitor.Gfx_BoosterFreqType, 4032 activity_monitor.Gfx_BoosterFreq, 4033 activity_monitor.Gfx_PD_Data_limit_c, 4034 activity_monitor.Gfx_PD_Data_error_coeff, 4035 activity_monitor.Gfx_PD_Data_error_rate_coeff); 4036 4037 size += snprintf(buf + size, PAGE_SIZE - size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", 4038 " ", 4039 1, 4040 "SOCCLK", 4041 activity_monitor.Soc_FPS, 4042 activity_monitor.Soc_UseRlcBusy, 4043 activity_monitor.Soc_MinActiveFreqType, 4044 activity_monitor.Soc_MinActiveFreq, 4045 activity_monitor.Soc_BoosterFreqType, 4046 activity_monitor.Soc_BoosterFreq, 4047 activity_monitor.Soc_PD_Data_limit_c, 4048 activity_monitor.Soc_PD_Data_error_coeff, 4049 activity_monitor.Soc_PD_Data_error_rate_coeff); 4050 4051 size += snprintf(buf + size, PAGE_SIZE - size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", 4052 " ", 4053 2, 4054 "UCLK", 4055 activity_monitor.Mem_FPS, 4056 activity_monitor.Mem_UseRlcBusy, 4057 activity_monitor.Mem_MinActiveFreqType, 4058 activity_monitor.Mem_MinActiveFreq, 4059 activity_monitor.Mem_BoosterFreqType, 4060 activity_monitor.Mem_BoosterFreq, 4061 activity_monitor.Mem_PD_Data_limit_c, 4062 activity_monitor.Mem_PD_Data_error_coeff, 4063 activity_monitor.Mem_PD_Data_error_rate_coeff); 4064 4065 size += snprintf(buf + size, PAGE_SIZE - size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", 4066 " ", 4067 3, 4068 "FCLK", 4069 activity_monitor.Fclk_FPS, 4070 activity_monitor.Fclk_UseRlcBusy, 4071 activity_monitor.Fclk_MinActiveFreqType, 4072 activity_monitor.Fclk_MinActiveFreq, 4073 activity_monitor.Fclk_BoosterFreqType, 4074 activity_monitor.Fclk_BoosterFreq, 4075 activity_monitor.Fclk_PD_Data_limit_c, 4076 activity_monitor.Fclk_PD_Data_error_coeff, 4077 activity_monitor.Fclk_PD_Data_error_rate_coeff); 4078 } 4079 4080 return size; 4081 } 4082 4083 static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size) 4084 { 4085 DpmActivityMonitorCoeffInt_t activity_monitor; 4086 int workload_type, result = 0; 4087 uint32_t power_profile_mode = input[size]; 4088 4089 if (power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { 4090 pr_err("Invalid power profile mode %d\n", power_profile_mode); 4091 return -EINVAL; 4092 } 4093 4094 if (power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { 4095 struct vega20_hwmgr *data = 4096 (struct vega20_hwmgr *)(hwmgr->backend); 4097 if (size == 0 && !data->is_custom_profile_set) 4098 return -EINVAL; 4099 if (size < 10 && size != 0) 4100 return -EINVAL; 4101 4102 result = vega20_get_activity_monitor_coeff(hwmgr, 4103 (uint8_t *)(&activity_monitor), 4104 WORKLOAD_PPLIB_CUSTOM_BIT); 4105 PP_ASSERT_WITH_CODE(!result, 4106 "[SetPowerProfile] Failed to get activity monitor!", 4107 return result); 4108 4109 /* If size==0, then we want to apply the already-configured 4110 * CUSTOM profile again. Just apply it, since we checked its 4111 * validity above 4112 */ 4113 if (size == 0) 4114 goto out; 4115 4116 switch (input[0]) { 4117 case 0: /* Gfxclk */ 4118 activity_monitor.Gfx_FPS = input[1]; 4119 activity_monitor.Gfx_UseRlcBusy = input[2]; 4120 activity_monitor.Gfx_MinActiveFreqType = input[3]; 4121 activity_monitor.Gfx_MinActiveFreq = input[4]; 4122 activity_monitor.Gfx_BoosterFreqType = input[5]; 4123 activity_monitor.Gfx_BoosterFreq = input[6]; 4124 activity_monitor.Gfx_PD_Data_limit_c = input[7]; 4125 activity_monitor.Gfx_PD_Data_error_coeff = input[8]; 4126 activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9]; 4127 break; 4128 case 1: /* Socclk */ 4129 activity_monitor.Soc_FPS = input[1]; 4130 activity_monitor.Soc_UseRlcBusy = input[2]; 4131 activity_monitor.Soc_MinActiveFreqType = input[3]; 4132 activity_monitor.Soc_MinActiveFreq = input[4]; 4133 activity_monitor.Soc_BoosterFreqType = input[5]; 4134 activity_monitor.Soc_BoosterFreq = input[6]; 4135 activity_monitor.Soc_PD_Data_limit_c = input[7]; 4136 activity_monitor.Soc_PD_Data_error_coeff = input[8]; 4137 activity_monitor.Soc_PD_Data_error_rate_coeff = input[9]; 4138 break; 4139 case 2: /* Uclk */ 4140 activity_monitor.Mem_FPS = input[1]; 4141 activity_monitor.Mem_UseRlcBusy = input[2]; 4142 activity_monitor.Mem_MinActiveFreqType = input[3]; 4143 activity_monitor.Mem_MinActiveFreq = input[4]; 4144 activity_monitor.Mem_BoosterFreqType = input[5]; 4145 activity_monitor.Mem_BoosterFreq = input[6]; 4146 activity_monitor.Mem_PD_Data_limit_c = input[7]; 4147 activity_monitor.Mem_PD_Data_error_coeff = input[8]; 4148 activity_monitor.Mem_PD_Data_error_rate_coeff = input[9]; 4149 break; 4150 case 3: /* Fclk */ 4151 activity_monitor.Fclk_FPS = input[1]; 4152 activity_monitor.Fclk_UseRlcBusy = input[2]; 4153 activity_monitor.Fclk_MinActiveFreqType = input[3]; 4154 activity_monitor.Fclk_MinActiveFreq = input[4]; 4155 activity_monitor.Fclk_BoosterFreqType = input[5]; 4156 activity_monitor.Fclk_BoosterFreq = input[6]; 4157 activity_monitor.Fclk_PD_Data_limit_c = input[7]; 4158 activity_monitor.Fclk_PD_Data_error_coeff = input[8]; 4159 activity_monitor.Fclk_PD_Data_error_rate_coeff = input[9]; 4160 break; 4161 } 4162 4163 result = vega20_set_activity_monitor_coeff(hwmgr, 4164 (uint8_t *)(&activity_monitor), 4165 WORKLOAD_PPLIB_CUSTOM_BIT); 4166 data->is_custom_profile_set = true; 4167 PP_ASSERT_WITH_CODE(!result, 4168 "[SetPowerProfile] Failed to set activity monitor!", 4169 return result); 4170 } 4171 4172 out: 4173 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ 4174 workload_type = 4175 conv_power_profile_to_pplib_workload(power_profile_mode); 4176 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask, 4177 1 << workload_type, 4178 NULL); 4179 4180 hwmgr->power_profile_mode = power_profile_mode; 4181 4182 return 0; 4183 } 4184 4185 static int vega20_notify_cac_buffer_info(struct pp_hwmgr *hwmgr, 4186 uint32_t virtual_addr_low, 4187 uint32_t virtual_addr_hi, 4188 uint32_t mc_addr_low, 4189 uint32_t mc_addr_hi, 4190 uint32_t size) 4191 { 4192 smum_send_msg_to_smc_with_parameter(hwmgr, 4193 PPSMC_MSG_SetSystemVirtualDramAddrHigh, 4194 virtual_addr_hi, 4195 NULL); 4196 smum_send_msg_to_smc_with_parameter(hwmgr, 4197 PPSMC_MSG_SetSystemVirtualDramAddrLow, 4198 virtual_addr_low, 4199 NULL); 4200 smum_send_msg_to_smc_with_parameter(hwmgr, 4201 PPSMC_MSG_DramLogSetDramAddrHigh, 4202 mc_addr_hi, 4203 NULL); 4204 4205 smum_send_msg_to_smc_with_parameter(hwmgr, 4206 PPSMC_MSG_DramLogSetDramAddrLow, 4207 mc_addr_low, 4208 NULL); 4209 4210 smum_send_msg_to_smc_with_parameter(hwmgr, 4211 PPSMC_MSG_DramLogSetDramSize, 4212 size, 4213 NULL); 4214 return 0; 4215 } 4216 4217 static int vega20_get_thermal_temperature_range(struct pp_hwmgr *hwmgr, 4218 struct PP_TemperatureRange *thermal_data) 4219 { 4220 struct vega20_hwmgr *data = 4221 (struct vega20_hwmgr *)(hwmgr->backend); 4222 PPTable_t *pp_table = &(data->smc_state_table.pp_table); 4223 4224 memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange)); 4225 4226 thermal_data->max = pp_table->TedgeLimit * 4227 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 4228 thermal_data->edge_emergency_max = (pp_table->TedgeLimit + CTF_OFFSET_EDGE) * 4229 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 4230 thermal_data->hotspot_crit_max = pp_table->ThotspotLimit * 4231 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 4232 thermal_data->hotspot_emergency_max = (pp_table->ThotspotLimit + CTF_OFFSET_HOTSPOT) * 4233 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 4234 thermal_data->mem_crit_max = pp_table->ThbmLimit * 4235 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 4236 thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)* 4237 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 4238 4239 return 0; 4240 } 4241 4242 static int vega20_smu_i2c_bus_access(struct pp_hwmgr *hwmgr, bool acquire) 4243 { 4244 int res; 4245 4246 /* I2C bus access can happen very early, when SMU not loaded yet */ 4247 if (!vega20_is_smc_ram_running(hwmgr)) 4248 return 0; 4249 4250 res = smum_send_msg_to_smc_with_parameter(hwmgr, 4251 (acquire ? 4252 PPSMC_MSG_RequestI2CBus : 4253 PPSMC_MSG_ReleaseI2CBus), 4254 0, 4255 NULL); 4256 4257 PP_ASSERT_WITH_CODE(!res, "[SmuI2CAccessBus] Failed to access bus!", return res); 4258 return res; 4259 } 4260 4261 static int vega20_set_df_cstate(struct pp_hwmgr *hwmgr, 4262 enum pp_df_cstate state) 4263 { 4264 int ret; 4265 4266 /* PPSMC_MSG_DFCstateControl is supported with 40.50 and later fws */ 4267 if (hwmgr->smu_version < 0x283200) { 4268 pr_err("Df cstate control is supported with 40.50 and later SMC fw!\n"); 4269 return -EINVAL; 4270 } 4271 4272 ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DFCstateControl, state, 4273 NULL); 4274 if (ret) 4275 pr_err("SetDfCstate failed!\n"); 4276 4277 return ret; 4278 } 4279 4280 static int vega20_set_xgmi_pstate(struct pp_hwmgr *hwmgr, 4281 uint32_t pstate) 4282 { 4283 int ret; 4284 4285 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 4286 PPSMC_MSG_SetXgmiMode, 4287 pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3, 4288 NULL); 4289 if (ret) 4290 pr_err("SetXgmiPstate failed!\n"); 4291 4292 return ret; 4293 } 4294 4295 static void vega20_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 *gpu_metrics) 4296 { 4297 memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v1_0)); 4298 4299 gpu_metrics->common_header.structure_size = 4300 sizeof(struct gpu_metrics_v1_0); 4301 gpu_metrics->common_header.format_revision = 1; 4302 gpu_metrics->common_header.content_revision = 0; 4303 4304 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 4305 } 4306 4307 static ssize_t vega20_get_gpu_metrics(struct pp_hwmgr *hwmgr, 4308 void **table) 4309 { 4310 struct vega20_hwmgr *data = 4311 (struct vega20_hwmgr *)(hwmgr->backend); 4312 struct gpu_metrics_v1_0 *gpu_metrics = 4313 &data->gpu_metrics_table; 4314 SmuMetrics_t metrics; 4315 uint32_t fan_speed_rpm; 4316 int ret; 4317 4318 ret = vega20_get_metrics_table(hwmgr, &metrics, true); 4319 if (ret) 4320 return ret; 4321 4322 vega20_init_gpu_metrics_v1_0(gpu_metrics); 4323 4324 gpu_metrics->temperature_edge = metrics.TemperatureEdge; 4325 gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot; 4326 gpu_metrics->temperature_mem = metrics.TemperatureHBM; 4327 gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx; 4328 gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc; 4329 gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0; 4330 4331 gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity; 4332 gpu_metrics->average_umc_activity = metrics.AverageUclkActivity; 4333 4334 gpu_metrics->average_socket_power = metrics.AverageSocketPower; 4335 4336 gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency; 4337 gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency; 4338 gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency; 4339 4340 gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK]; 4341 gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK]; 4342 gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK]; 4343 gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK]; 4344 gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK]; 4345 4346 gpu_metrics->throttle_status = metrics.ThrottlerStatus; 4347 4348 vega20_fan_ctrl_get_fan_speed_rpm(hwmgr, &fan_speed_rpm); 4349 gpu_metrics->current_fan_speed = (uint16_t)fan_speed_rpm; 4350 4351 gpu_metrics->pcie_link_width = 4352 vega20_get_current_pcie_link_width(hwmgr); 4353 gpu_metrics->pcie_link_speed = 4354 vega20_get_current_pcie_link_speed(hwmgr); 4355 4356 *table = (void *)gpu_metrics; 4357 4358 return sizeof(struct gpu_metrics_v1_0); 4359 } 4360 4361 static const struct pp_hwmgr_func vega20_hwmgr_funcs = { 4362 /* init/fini related */ 4363 .backend_init = vega20_hwmgr_backend_init, 4364 .backend_fini = vega20_hwmgr_backend_fini, 4365 .asic_setup = vega20_setup_asic_task, 4366 .power_off_asic = vega20_power_off_asic, 4367 .dynamic_state_management_enable = vega20_enable_dpm_tasks, 4368 .dynamic_state_management_disable = vega20_disable_dpm_tasks, 4369 /* power state related */ 4370 .apply_clocks_adjust_rules = vega20_apply_clocks_adjust_rules, 4371 .pre_display_config_changed = vega20_pre_display_configuration_changed_task, 4372 .display_config_changed = vega20_display_configuration_changed_task, 4373 .check_smc_update_required_for_display_configuration = 4374 vega20_check_smc_update_required_for_display_configuration, 4375 .notify_smc_display_config_after_ps_adjustment = 4376 vega20_notify_smc_display_config_after_ps_adjustment, 4377 /* export to DAL */ 4378 .get_sclk = vega20_dpm_get_sclk, 4379 .get_mclk = vega20_dpm_get_mclk, 4380 .get_dal_power_level = vega20_get_dal_power_level, 4381 .get_clock_by_type_with_latency = vega20_get_clock_by_type_with_latency, 4382 .get_clock_by_type_with_voltage = vega20_get_clock_by_type_with_voltage, 4383 .set_watermarks_for_clocks_ranges = vega20_set_watermarks_for_clocks_ranges, 4384 .display_clock_voltage_request = vega20_display_clock_voltage_request, 4385 .get_performance_level = vega20_get_performance_level, 4386 /* UMD pstate, profile related */ 4387 .force_dpm_level = vega20_dpm_force_dpm_level, 4388 .get_power_profile_mode = vega20_get_power_profile_mode, 4389 .set_power_profile_mode = vega20_set_power_profile_mode, 4390 /* od related */ 4391 .set_power_limit = vega20_set_power_limit, 4392 .get_sclk_od = vega20_get_sclk_od, 4393 .set_sclk_od = vega20_set_sclk_od, 4394 .get_mclk_od = vega20_get_mclk_od, 4395 .set_mclk_od = vega20_set_mclk_od, 4396 .odn_edit_dpm_table = vega20_odn_edit_dpm_table, 4397 /* for sysfs to retrive/set gfxclk/memclk */ 4398 .force_clock_level = vega20_force_clock_level, 4399 .print_clock_levels = vega20_print_clock_levels, 4400 .read_sensor = vega20_read_sensor, 4401 .get_ppfeature_status = vega20_get_ppfeature_status, 4402 .set_ppfeature_status = vega20_set_ppfeature_status, 4403 /* powergate related */ 4404 .powergate_uvd = vega20_power_gate_uvd, 4405 .powergate_vce = vega20_power_gate_vce, 4406 /* thermal related */ 4407 .start_thermal_controller = vega20_start_thermal_controller, 4408 .stop_thermal_controller = vega20_thermal_stop_thermal_controller, 4409 .get_thermal_temperature_range = vega20_get_thermal_temperature_range, 4410 .register_irq_handlers = smu9_register_irq_handlers, 4411 .disable_smc_firmware_ctf = vega20_thermal_disable_alert, 4412 /* fan control related */ 4413 .get_fan_speed_percent = vega20_fan_ctrl_get_fan_speed_percent, 4414 .set_fan_speed_percent = vega20_fan_ctrl_set_fan_speed_percent, 4415 .get_fan_speed_info = vega20_fan_ctrl_get_fan_speed_info, 4416 .get_fan_speed_rpm = vega20_fan_ctrl_get_fan_speed_rpm, 4417 .set_fan_speed_rpm = vega20_fan_ctrl_set_fan_speed_rpm, 4418 .get_fan_control_mode = vega20_get_fan_control_mode, 4419 .set_fan_control_mode = vega20_set_fan_control_mode, 4420 /* smu memory related */ 4421 .notify_cac_buffer_info = vega20_notify_cac_buffer_info, 4422 .enable_mgpu_fan_boost = vega20_enable_mgpu_fan_boost, 4423 /* BACO related */ 4424 .get_asic_baco_capability = vega20_baco_get_capability, 4425 .get_asic_baco_state = vega20_baco_get_state, 4426 .set_asic_baco_state = vega20_baco_set_state, 4427 .set_mp1_state = vega20_set_mp1_state, 4428 .smu_i2c_bus_access = vega20_smu_i2c_bus_access, 4429 .set_df_cstate = vega20_set_df_cstate, 4430 .set_xgmi_pstate = vega20_set_xgmi_pstate, 4431 .get_gpu_metrics = vega20_get_gpu_metrics, 4432 }; 4433 4434 int vega20_hwmgr_init(struct pp_hwmgr *hwmgr) 4435 { 4436 hwmgr->hwmgr_func = &vega20_hwmgr_funcs; 4437 hwmgr->pptable_func = &vega20_pptable_funcs; 4438 4439 return 0; 4440 } 4441