1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #define SWSMU_CODE_LAYER_L1 24 25 #include <linux/firmware.h> 26 #include <linux/pci.h> 27 28 #include "amdgpu.h" 29 #include "amdgpu_smu.h" 30 #include "smu_internal.h" 31 #include "atom.h" 32 #include "arcturus_ppt.h" 33 #include "navi10_ppt.h" 34 #include "sienna_cichlid_ppt.h" 35 #include "renoir_ppt.h" 36 #include "vangogh_ppt.h" 37 #include "aldebaran_ppt.h" 38 #include "yellow_carp_ppt.h" 39 #include "cyan_skillfish_ppt.h" 40 #include "smu_v13_0_0_ppt.h" 41 #include "smu_v13_0_4_ppt.h" 42 #include "smu_v13_0_5_ppt.h" 43 #include "smu_v13_0_7_ppt.h" 44 #include "amd_pcie.h" 45 46 /* 47 * DO NOT use these for err/warn/info/debug messages. 48 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 49 * They are more MGPU friendly. 50 */ 51 #undef pr_err 52 #undef pr_warn 53 #undef pr_info 54 #undef pr_debug 55 56 static const struct amd_pm_funcs swsmu_pm_funcs; 57 static int smu_force_smuclk_levels(struct smu_context *smu, 58 enum smu_clk_type clk_type, 59 uint32_t mask); 60 static int smu_handle_task(struct smu_context *smu, 61 enum amd_dpm_forced_level level, 62 enum amd_pp_task task_id); 63 static int smu_reset(struct smu_context *smu); 64 static int smu_set_fan_speed_pwm(void *handle, u32 speed); 65 static int smu_set_fan_control_mode(void *handle, u32 value); 66 static int smu_set_power_limit(void *handle, uint32_t limit); 67 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed); 68 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled); 69 static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state); 70 71 static int smu_sys_get_pp_feature_mask(void *handle, 72 char *buf) 73 { 74 struct smu_context *smu = handle; 75 76 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 77 return -EOPNOTSUPP; 78 79 return smu_get_pp_feature_mask(smu, buf); 80 } 81 82 static int smu_sys_set_pp_feature_mask(void *handle, 83 uint64_t new_mask) 84 { 85 struct smu_context *smu = handle; 86 87 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 88 return -EOPNOTSUPP; 89 90 return smu_set_pp_feature_mask(smu, new_mask); 91 } 92 93 int smu_set_residency_gfxoff(struct smu_context *smu, bool value) 94 { 95 if (!smu->ppt_funcs->set_gfx_off_residency) 96 return -EINVAL; 97 98 return smu_set_gfx_off_residency(smu, value); 99 } 100 101 int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value) 102 { 103 if (!smu->ppt_funcs->get_gfx_off_residency) 104 return -EINVAL; 105 106 return smu_get_gfx_off_residency(smu, value); 107 } 108 109 int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value) 110 { 111 if (!smu->ppt_funcs->get_gfx_off_entrycount) 112 return -EINVAL; 113 114 return smu_get_gfx_off_entrycount(smu, value); 115 } 116 117 int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value) 118 { 119 if (!smu->ppt_funcs->get_gfx_off_status) 120 return -EINVAL; 121 122 *value = smu_get_gfx_off_status(smu); 123 124 return 0; 125 } 126 127 int smu_set_soft_freq_range(struct smu_context *smu, 128 enum smu_clk_type clk_type, 129 uint32_t min, 130 uint32_t max) 131 { 132 int ret = 0; 133 134 if (smu->ppt_funcs->set_soft_freq_limited_range) 135 ret = smu->ppt_funcs->set_soft_freq_limited_range(smu, 136 clk_type, 137 min, 138 max); 139 140 return ret; 141 } 142 143 int smu_get_dpm_freq_range(struct smu_context *smu, 144 enum smu_clk_type clk_type, 145 uint32_t *min, 146 uint32_t *max) 147 { 148 int ret = -ENOTSUPP; 149 150 if (!min && !max) 151 return -EINVAL; 152 153 if (smu->ppt_funcs->get_dpm_ultimate_freq) 154 ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu, 155 clk_type, 156 min, 157 max); 158 159 return ret; 160 } 161 162 int smu_set_gfx_power_up_by_imu(struct smu_context *smu) 163 { 164 if (!smu->ppt_funcs && !smu->ppt_funcs->set_gfx_power_up_by_imu) 165 return -EOPNOTSUPP; 166 167 return smu->ppt_funcs->set_gfx_power_up_by_imu(smu); 168 } 169 170 static u32 smu_get_mclk(void *handle, bool low) 171 { 172 struct smu_context *smu = handle; 173 uint32_t clk_freq; 174 int ret = 0; 175 176 ret = smu_get_dpm_freq_range(smu, SMU_UCLK, 177 low ? &clk_freq : NULL, 178 !low ? &clk_freq : NULL); 179 if (ret) 180 return 0; 181 return clk_freq * 100; 182 } 183 184 static u32 smu_get_sclk(void *handle, bool low) 185 { 186 struct smu_context *smu = handle; 187 uint32_t clk_freq; 188 int ret = 0; 189 190 ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, 191 low ? &clk_freq : NULL, 192 !low ? &clk_freq : NULL); 193 if (ret) 194 return 0; 195 return clk_freq * 100; 196 } 197 198 static int smu_dpm_set_vcn_enable(struct smu_context *smu, 199 bool enable) 200 { 201 struct smu_power_context *smu_power = &smu->smu_power; 202 struct smu_power_gate *power_gate = &smu_power->power_gate; 203 int ret = 0; 204 205 if (!smu->ppt_funcs->dpm_set_vcn_enable) 206 return 0; 207 208 if (atomic_read(&power_gate->vcn_gated) ^ enable) 209 return 0; 210 211 ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable); 212 if (!ret) 213 atomic_set(&power_gate->vcn_gated, !enable); 214 215 return ret; 216 } 217 218 static int smu_dpm_set_jpeg_enable(struct smu_context *smu, 219 bool enable) 220 { 221 struct smu_power_context *smu_power = &smu->smu_power; 222 struct smu_power_gate *power_gate = &smu_power->power_gate; 223 int ret = 0; 224 225 if (!smu->ppt_funcs->dpm_set_jpeg_enable) 226 return 0; 227 228 if (atomic_read(&power_gate->jpeg_gated) ^ enable) 229 return 0; 230 231 ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable); 232 if (!ret) 233 atomic_set(&power_gate->jpeg_gated, !enable); 234 235 return ret; 236 } 237 238 /** 239 * smu_dpm_set_power_gate - power gate/ungate the specific IP block 240 * 241 * @handle: smu_context pointer 242 * @block_type: the IP block to power gate/ungate 243 * @gate: to power gate if true, ungate otherwise 244 * 245 * This API uses no smu->mutex lock protection due to: 246 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce). 247 * This is guarded to be race condition free by the caller. 248 * 2. Or get called on user setting request of power_dpm_force_performance_level. 249 * Under this case, the smu->mutex lock protection is already enforced on 250 * the parent API smu_force_performance_level of the call path. 251 */ 252 static int smu_dpm_set_power_gate(void *handle, 253 uint32_t block_type, 254 bool gate) 255 { 256 struct smu_context *smu = handle; 257 int ret = 0; 258 259 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) { 260 dev_WARN(smu->adev->dev, 261 "SMU uninitialized but power %s requested for %u!\n", 262 gate ? "gate" : "ungate", block_type); 263 return -EOPNOTSUPP; 264 } 265 266 switch (block_type) { 267 /* 268 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses 269 * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept. 270 */ 271 case AMD_IP_BLOCK_TYPE_UVD: 272 case AMD_IP_BLOCK_TYPE_VCN: 273 ret = smu_dpm_set_vcn_enable(smu, !gate); 274 if (ret) 275 dev_err(smu->adev->dev, "Failed to power %s VCN!\n", 276 gate ? "gate" : "ungate"); 277 break; 278 case AMD_IP_BLOCK_TYPE_GFX: 279 ret = smu_gfx_off_control(smu, gate); 280 if (ret) 281 dev_err(smu->adev->dev, "Failed to %s gfxoff!\n", 282 gate ? "enable" : "disable"); 283 break; 284 case AMD_IP_BLOCK_TYPE_SDMA: 285 ret = smu_powergate_sdma(smu, gate); 286 if (ret) 287 dev_err(smu->adev->dev, "Failed to power %s SDMA!\n", 288 gate ? "gate" : "ungate"); 289 break; 290 case AMD_IP_BLOCK_TYPE_JPEG: 291 ret = smu_dpm_set_jpeg_enable(smu, !gate); 292 if (ret) 293 dev_err(smu->adev->dev, "Failed to power %s JPEG!\n", 294 gate ? "gate" : "ungate"); 295 break; 296 default: 297 dev_err(smu->adev->dev, "Unsupported block type!\n"); 298 return -EINVAL; 299 } 300 301 return ret; 302 } 303 304 /** 305 * smu_set_user_clk_dependencies - set user profile clock dependencies 306 * 307 * @smu: smu_context pointer 308 * @clk: enum smu_clk_type type 309 * 310 * Enable/Disable the clock dependency for the @clk type. 311 */ 312 static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk) 313 { 314 if (smu->adev->in_suspend) 315 return; 316 317 if (clk == SMU_MCLK) { 318 smu->user_dpm_profile.clk_dependency = 0; 319 smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK); 320 } else if (clk == SMU_FCLK) { 321 /* MCLK takes precedence over FCLK */ 322 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK))) 323 return; 324 325 smu->user_dpm_profile.clk_dependency = 0; 326 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK); 327 } else if (clk == SMU_SOCCLK) { 328 /* MCLK takes precedence over SOCCLK */ 329 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK))) 330 return; 331 332 smu->user_dpm_profile.clk_dependency = 0; 333 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK); 334 } else 335 /* Add clk dependencies here, if any */ 336 return; 337 } 338 339 /** 340 * smu_restore_dpm_user_profile - reinstate user dpm profile 341 * 342 * @smu: smu_context pointer 343 * 344 * Restore the saved user power configurations include power limit, 345 * clock frequencies, fan control mode and fan speed. 346 */ 347 static void smu_restore_dpm_user_profile(struct smu_context *smu) 348 { 349 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 350 int ret = 0; 351 352 if (!smu->adev->in_suspend) 353 return; 354 355 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 356 return; 357 358 /* Enable restore flag */ 359 smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE; 360 361 /* set the user dpm power limit */ 362 if (smu->user_dpm_profile.power_limit) { 363 ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit); 364 if (ret) 365 dev_err(smu->adev->dev, "Failed to set power limit value\n"); 366 } 367 368 /* set the user dpm clock configurations */ 369 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 370 enum smu_clk_type clk_type; 371 372 for (clk_type = 0; clk_type < SMU_CLK_COUNT; clk_type++) { 373 /* 374 * Iterate over smu clk type and force the saved user clk 375 * configs, skip if clock dependency is enabled 376 */ 377 if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) && 378 smu->user_dpm_profile.clk_mask[clk_type]) { 379 ret = smu_force_smuclk_levels(smu, clk_type, 380 smu->user_dpm_profile.clk_mask[clk_type]); 381 if (ret) 382 dev_err(smu->adev->dev, 383 "Failed to set clock type = %d\n", clk_type); 384 } 385 } 386 } 387 388 /* set the user dpm fan configurations */ 389 if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL || 390 smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) { 391 ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode); 392 if (ret != -EOPNOTSUPP) { 393 smu->user_dpm_profile.fan_speed_pwm = 0; 394 smu->user_dpm_profile.fan_speed_rpm = 0; 395 smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO; 396 dev_err(smu->adev->dev, "Failed to set manual fan control mode\n"); 397 } 398 399 if (smu->user_dpm_profile.fan_speed_pwm) { 400 ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm); 401 if (ret != -EOPNOTSUPP) 402 dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n"); 403 } 404 405 if (smu->user_dpm_profile.fan_speed_rpm) { 406 ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm); 407 if (ret != -EOPNOTSUPP) 408 dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n"); 409 } 410 } 411 412 /* Restore user customized OD settings */ 413 if (smu->user_dpm_profile.user_od) { 414 if (smu->ppt_funcs->restore_user_od_settings) { 415 ret = smu->ppt_funcs->restore_user_od_settings(smu); 416 if (ret) 417 dev_err(smu->adev->dev, "Failed to upload customized OD settings\n"); 418 } 419 } 420 421 /* Disable restore flag */ 422 smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE; 423 } 424 425 static int smu_get_power_num_states(void *handle, 426 struct pp_states_info *state_info) 427 { 428 if (!state_info) 429 return -EINVAL; 430 431 /* not support power state */ 432 memset(state_info, 0, sizeof(struct pp_states_info)); 433 state_info->nums = 1; 434 state_info->states[0] = POWER_STATE_TYPE_DEFAULT; 435 436 return 0; 437 } 438 439 bool is_support_sw_smu(struct amdgpu_device *adev) 440 { 441 /* vega20 is 11.0.2, but it's supported via the powerplay code */ 442 if (adev->asic_type == CHIP_VEGA20) 443 return false; 444 445 if (adev->ip_versions[MP1_HWIP][0] >= IP_VERSION(11, 0, 0)) 446 return true; 447 448 return false; 449 } 450 451 bool is_support_cclk_dpm(struct amdgpu_device *adev) 452 { 453 struct smu_context *smu = adev->powerplay.pp_handle; 454 455 if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT)) 456 return false; 457 458 return true; 459 } 460 461 462 static int smu_sys_get_pp_table(void *handle, 463 char **table) 464 { 465 struct smu_context *smu = handle; 466 struct smu_table_context *smu_table = &smu->smu_table; 467 468 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 469 return -EOPNOTSUPP; 470 471 if (!smu_table->power_play_table && !smu_table->hardcode_pptable) 472 return -EINVAL; 473 474 if (smu_table->hardcode_pptable) 475 *table = smu_table->hardcode_pptable; 476 else 477 *table = smu_table->power_play_table; 478 479 return smu_table->power_play_table_size; 480 } 481 482 static int smu_sys_set_pp_table(void *handle, 483 const char *buf, 484 size_t size) 485 { 486 struct smu_context *smu = handle; 487 struct smu_table_context *smu_table = &smu->smu_table; 488 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf; 489 int ret = 0; 490 491 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 492 return -EOPNOTSUPP; 493 494 if (header->usStructureSize != size) { 495 dev_err(smu->adev->dev, "pp table size not matched !\n"); 496 return -EIO; 497 } 498 499 if (!smu_table->hardcode_pptable) { 500 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL); 501 if (!smu_table->hardcode_pptable) 502 return -ENOMEM; 503 } 504 505 memcpy(smu_table->hardcode_pptable, buf, size); 506 smu_table->power_play_table = smu_table->hardcode_pptable; 507 smu_table->power_play_table_size = size; 508 509 /* 510 * Special hw_fini action(for Navi1x, the DPMs disablement will be 511 * skipped) may be needed for custom pptable uploading. 512 */ 513 smu->uploading_custom_pp_table = true; 514 515 ret = smu_reset(smu); 516 if (ret) 517 dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret); 518 519 smu->uploading_custom_pp_table = false; 520 521 return ret; 522 } 523 524 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu) 525 { 526 struct smu_feature *feature = &smu->smu_feature; 527 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32]; 528 int ret = 0; 529 530 /* 531 * With SCPM enabled, the allowed featuremasks setting(via 532 * PPSMC_MSG_SetAllowedFeaturesMaskLow/High) is not permitted. 533 * That means there is no way to let PMFW knows the settings below. 534 * Thus, we just assume all the features are allowed under 535 * such scenario. 536 */ 537 if (smu->adev->scpm_enabled) { 538 bitmap_fill(feature->allowed, SMU_FEATURE_MAX); 539 return 0; 540 } 541 542 bitmap_zero(feature->allowed, SMU_FEATURE_MAX); 543 544 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask, 545 SMU_FEATURE_MAX/32); 546 if (ret) 547 return ret; 548 549 bitmap_or(feature->allowed, feature->allowed, 550 (unsigned long *)allowed_feature_mask, 551 feature->feature_num); 552 553 return ret; 554 } 555 556 static int smu_set_funcs(struct amdgpu_device *adev) 557 { 558 struct smu_context *smu = adev->powerplay.pp_handle; 559 560 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) 561 smu->od_enabled = true; 562 563 switch (adev->ip_versions[MP1_HWIP][0]) { 564 case IP_VERSION(11, 0, 0): 565 case IP_VERSION(11, 0, 5): 566 case IP_VERSION(11, 0, 9): 567 navi10_set_ppt_funcs(smu); 568 break; 569 case IP_VERSION(11, 0, 7): 570 case IP_VERSION(11, 0, 11): 571 case IP_VERSION(11, 0, 12): 572 case IP_VERSION(11, 0, 13): 573 sienna_cichlid_set_ppt_funcs(smu); 574 break; 575 case IP_VERSION(12, 0, 0): 576 case IP_VERSION(12, 0, 1): 577 renoir_set_ppt_funcs(smu); 578 break; 579 case IP_VERSION(11, 5, 0): 580 vangogh_set_ppt_funcs(smu); 581 break; 582 case IP_VERSION(13, 0, 1): 583 case IP_VERSION(13, 0, 3): 584 case IP_VERSION(13, 0, 8): 585 yellow_carp_set_ppt_funcs(smu); 586 break; 587 case IP_VERSION(13, 0, 4): 588 smu_v13_0_4_set_ppt_funcs(smu); 589 break; 590 case IP_VERSION(13, 0, 5): 591 smu_v13_0_5_set_ppt_funcs(smu); 592 break; 593 case IP_VERSION(11, 0, 8): 594 cyan_skillfish_set_ppt_funcs(smu); 595 break; 596 case IP_VERSION(11, 0, 2): 597 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 598 arcturus_set_ppt_funcs(smu); 599 /* OD is not supported on Arcturus */ 600 smu->od_enabled =false; 601 break; 602 case IP_VERSION(13, 0, 2): 603 aldebaran_set_ppt_funcs(smu); 604 /* Enable pp_od_clk_voltage node */ 605 smu->od_enabled = true; 606 break; 607 case IP_VERSION(13, 0, 0): 608 case IP_VERSION(13, 0, 10): 609 smu_v13_0_0_set_ppt_funcs(smu); 610 break; 611 case IP_VERSION(13, 0, 7): 612 smu_v13_0_7_set_ppt_funcs(smu); 613 break; 614 default: 615 return -EINVAL; 616 } 617 618 return 0; 619 } 620 621 static int smu_early_init(void *handle) 622 { 623 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 624 struct smu_context *smu; 625 626 smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL); 627 if (!smu) 628 return -ENOMEM; 629 630 smu->adev = adev; 631 smu->pm_enabled = !!amdgpu_dpm; 632 smu->is_apu = false; 633 smu->smu_baco.state = SMU_BACO_STATE_EXIT; 634 smu->smu_baco.platform_support = false; 635 smu->user_dpm_profile.fan_mode = -1; 636 637 mutex_init(&smu->message_lock); 638 639 adev->powerplay.pp_handle = smu; 640 adev->powerplay.pp_funcs = &swsmu_pm_funcs; 641 642 return smu_set_funcs(adev); 643 } 644 645 static int smu_set_default_dpm_table(struct smu_context *smu) 646 { 647 struct smu_power_context *smu_power = &smu->smu_power; 648 struct smu_power_gate *power_gate = &smu_power->power_gate; 649 int vcn_gate, jpeg_gate; 650 int ret = 0; 651 652 if (!smu->ppt_funcs->set_default_dpm_table) 653 return 0; 654 655 vcn_gate = atomic_read(&power_gate->vcn_gated); 656 jpeg_gate = atomic_read(&power_gate->jpeg_gated); 657 658 ret = smu_dpm_set_vcn_enable(smu, true); 659 if (ret) 660 return ret; 661 662 ret = smu_dpm_set_jpeg_enable(smu, true); 663 if (ret) 664 goto err_out; 665 666 ret = smu->ppt_funcs->set_default_dpm_table(smu); 667 if (ret) 668 dev_err(smu->adev->dev, 669 "Failed to setup default dpm clock tables!\n"); 670 671 smu_dpm_set_jpeg_enable(smu, !jpeg_gate); 672 err_out: 673 smu_dpm_set_vcn_enable(smu, !vcn_gate); 674 return ret; 675 } 676 677 static int smu_apply_default_config_table_settings(struct smu_context *smu) 678 { 679 struct amdgpu_device *adev = smu->adev; 680 int ret = 0; 681 682 ret = smu_get_default_config_table_settings(smu, 683 &adev->pm.config_table); 684 if (ret) 685 return ret; 686 687 return smu_set_config_table(smu, &adev->pm.config_table); 688 } 689 690 static int smu_late_init(void *handle) 691 { 692 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 693 struct smu_context *smu = adev->powerplay.pp_handle; 694 int ret = 0; 695 696 smu_set_fine_grain_gfx_freq_parameters(smu); 697 698 if (!smu->pm_enabled) 699 return 0; 700 701 ret = smu_post_init(smu); 702 if (ret) { 703 dev_err(adev->dev, "Failed to post smu init!\n"); 704 return ret; 705 } 706 707 if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 1)) || 708 (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 3))) 709 return 0; 710 711 if (!amdgpu_sriov_vf(adev) || smu->od_enabled) { 712 ret = smu_set_default_od_settings(smu); 713 if (ret) { 714 dev_err(adev->dev, "Failed to setup default OD settings!\n"); 715 return ret; 716 } 717 } 718 719 ret = smu_populate_umd_state_clk(smu); 720 if (ret) { 721 dev_err(adev->dev, "Failed to populate UMD state clocks!\n"); 722 return ret; 723 } 724 725 ret = smu_get_asic_power_limits(smu, 726 &smu->current_power_limit, 727 &smu->default_power_limit, 728 &smu->max_power_limit); 729 if (ret) { 730 dev_err(adev->dev, "Failed to get asic power limits!\n"); 731 return ret; 732 } 733 734 if (!amdgpu_sriov_vf(adev)) 735 smu_get_unique_id(smu); 736 737 smu_get_fan_parameters(smu); 738 739 smu_handle_task(smu, 740 smu->smu_dpm.dpm_level, 741 AMD_PP_TASK_COMPLETE_INIT); 742 743 ret = smu_apply_default_config_table_settings(smu); 744 if (ret && (ret != -EOPNOTSUPP)) { 745 dev_err(adev->dev, "Failed to apply default DriverSmuConfig settings!\n"); 746 return ret; 747 } 748 749 smu_restore_dpm_user_profile(smu); 750 751 return 0; 752 } 753 754 static int smu_init_fb_allocations(struct smu_context *smu) 755 { 756 struct amdgpu_device *adev = smu->adev; 757 struct smu_table_context *smu_table = &smu->smu_table; 758 struct smu_table *tables = smu_table->tables; 759 struct smu_table *driver_table = &(smu_table->driver_table); 760 uint32_t max_table_size = 0; 761 int ret, i; 762 763 /* VRAM allocation for tool table */ 764 if (tables[SMU_TABLE_PMSTATUSLOG].size) { 765 ret = amdgpu_bo_create_kernel(adev, 766 tables[SMU_TABLE_PMSTATUSLOG].size, 767 tables[SMU_TABLE_PMSTATUSLOG].align, 768 tables[SMU_TABLE_PMSTATUSLOG].domain, 769 &tables[SMU_TABLE_PMSTATUSLOG].bo, 770 &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 771 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 772 if (ret) { 773 dev_err(adev->dev, "VRAM allocation for tool table failed!\n"); 774 return ret; 775 } 776 } 777 778 /* VRAM allocation for driver table */ 779 for (i = 0; i < SMU_TABLE_COUNT; i++) { 780 if (tables[i].size == 0) 781 continue; 782 783 if (i == SMU_TABLE_PMSTATUSLOG) 784 continue; 785 786 if (max_table_size < tables[i].size) 787 max_table_size = tables[i].size; 788 } 789 790 driver_table->size = max_table_size; 791 driver_table->align = PAGE_SIZE; 792 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM; 793 794 ret = amdgpu_bo_create_kernel(adev, 795 driver_table->size, 796 driver_table->align, 797 driver_table->domain, 798 &driver_table->bo, 799 &driver_table->mc_address, 800 &driver_table->cpu_addr); 801 if (ret) { 802 dev_err(adev->dev, "VRAM allocation for driver table failed!\n"); 803 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address) 804 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo, 805 &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 806 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 807 } 808 809 return ret; 810 } 811 812 static int smu_fini_fb_allocations(struct smu_context *smu) 813 { 814 struct smu_table_context *smu_table = &smu->smu_table; 815 struct smu_table *tables = smu_table->tables; 816 struct smu_table *driver_table = &(smu_table->driver_table); 817 818 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address) 819 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo, 820 &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 821 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 822 823 amdgpu_bo_free_kernel(&driver_table->bo, 824 &driver_table->mc_address, 825 &driver_table->cpu_addr); 826 827 return 0; 828 } 829 830 /** 831 * smu_alloc_memory_pool - allocate memory pool in the system memory 832 * 833 * @smu: amdgpu_device pointer 834 * 835 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr 836 * and DramLogSetDramAddr can notify it changed. 837 * 838 * Returns 0 on success, error on failure. 839 */ 840 static int smu_alloc_memory_pool(struct smu_context *smu) 841 { 842 struct amdgpu_device *adev = smu->adev; 843 struct smu_table_context *smu_table = &smu->smu_table; 844 struct smu_table *memory_pool = &smu_table->memory_pool; 845 uint64_t pool_size = smu->pool_size; 846 int ret = 0; 847 848 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO) 849 return ret; 850 851 memory_pool->size = pool_size; 852 memory_pool->align = PAGE_SIZE; 853 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT; 854 855 switch (pool_size) { 856 case SMU_MEMORY_POOL_SIZE_256_MB: 857 case SMU_MEMORY_POOL_SIZE_512_MB: 858 case SMU_MEMORY_POOL_SIZE_1_GB: 859 case SMU_MEMORY_POOL_SIZE_2_GB: 860 ret = amdgpu_bo_create_kernel(adev, 861 memory_pool->size, 862 memory_pool->align, 863 memory_pool->domain, 864 &memory_pool->bo, 865 &memory_pool->mc_address, 866 &memory_pool->cpu_addr); 867 if (ret) 868 dev_err(adev->dev, "VRAM allocation for dramlog failed!\n"); 869 break; 870 default: 871 break; 872 } 873 874 return ret; 875 } 876 877 static int smu_free_memory_pool(struct smu_context *smu) 878 { 879 struct smu_table_context *smu_table = &smu->smu_table; 880 struct smu_table *memory_pool = &smu_table->memory_pool; 881 882 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO) 883 return 0; 884 885 amdgpu_bo_free_kernel(&memory_pool->bo, 886 &memory_pool->mc_address, 887 &memory_pool->cpu_addr); 888 889 memset(memory_pool, 0, sizeof(struct smu_table)); 890 891 return 0; 892 } 893 894 static int smu_alloc_dummy_read_table(struct smu_context *smu) 895 { 896 struct smu_table_context *smu_table = &smu->smu_table; 897 struct smu_table *dummy_read_1_table = 898 &smu_table->dummy_read_1_table; 899 struct amdgpu_device *adev = smu->adev; 900 int ret = 0; 901 902 dummy_read_1_table->size = 0x40000; 903 dummy_read_1_table->align = PAGE_SIZE; 904 dummy_read_1_table->domain = AMDGPU_GEM_DOMAIN_VRAM; 905 906 ret = amdgpu_bo_create_kernel(adev, 907 dummy_read_1_table->size, 908 dummy_read_1_table->align, 909 dummy_read_1_table->domain, 910 &dummy_read_1_table->bo, 911 &dummy_read_1_table->mc_address, 912 &dummy_read_1_table->cpu_addr); 913 if (ret) 914 dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n"); 915 916 return ret; 917 } 918 919 static void smu_free_dummy_read_table(struct smu_context *smu) 920 { 921 struct smu_table_context *smu_table = &smu->smu_table; 922 struct smu_table *dummy_read_1_table = 923 &smu_table->dummy_read_1_table; 924 925 926 amdgpu_bo_free_kernel(&dummy_read_1_table->bo, 927 &dummy_read_1_table->mc_address, 928 &dummy_read_1_table->cpu_addr); 929 930 memset(dummy_read_1_table, 0, sizeof(struct smu_table)); 931 } 932 933 static int smu_smc_table_sw_init(struct smu_context *smu) 934 { 935 int ret; 936 937 /** 938 * Create smu_table structure, and init smc tables such as 939 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc. 940 */ 941 ret = smu_init_smc_tables(smu); 942 if (ret) { 943 dev_err(smu->adev->dev, "Failed to init smc tables!\n"); 944 return ret; 945 } 946 947 /** 948 * Create smu_power_context structure, and allocate smu_dpm_context and 949 * context size to fill the smu_power_context data. 950 */ 951 ret = smu_init_power(smu); 952 if (ret) { 953 dev_err(smu->adev->dev, "Failed to init smu_init_power!\n"); 954 return ret; 955 } 956 957 /* 958 * allocate vram bos to store smc table contents. 959 */ 960 ret = smu_init_fb_allocations(smu); 961 if (ret) 962 return ret; 963 964 ret = smu_alloc_memory_pool(smu); 965 if (ret) 966 return ret; 967 968 ret = smu_alloc_dummy_read_table(smu); 969 if (ret) 970 return ret; 971 972 ret = smu_i2c_init(smu); 973 if (ret) 974 return ret; 975 976 return 0; 977 } 978 979 static int smu_smc_table_sw_fini(struct smu_context *smu) 980 { 981 int ret; 982 983 smu_i2c_fini(smu); 984 985 smu_free_dummy_read_table(smu); 986 987 ret = smu_free_memory_pool(smu); 988 if (ret) 989 return ret; 990 991 ret = smu_fini_fb_allocations(smu); 992 if (ret) 993 return ret; 994 995 ret = smu_fini_power(smu); 996 if (ret) { 997 dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n"); 998 return ret; 999 } 1000 1001 ret = smu_fini_smc_tables(smu); 1002 if (ret) { 1003 dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n"); 1004 return ret; 1005 } 1006 1007 return 0; 1008 } 1009 1010 static void smu_throttling_logging_work_fn(struct work_struct *work) 1011 { 1012 struct smu_context *smu = container_of(work, struct smu_context, 1013 throttling_logging_work); 1014 1015 smu_log_thermal_throttling(smu); 1016 } 1017 1018 static void smu_interrupt_work_fn(struct work_struct *work) 1019 { 1020 struct smu_context *smu = container_of(work, struct smu_context, 1021 interrupt_work); 1022 1023 if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work) 1024 smu->ppt_funcs->interrupt_work(smu); 1025 } 1026 1027 static int smu_sw_init(void *handle) 1028 { 1029 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1030 struct smu_context *smu = adev->powerplay.pp_handle; 1031 int ret; 1032 1033 smu->pool_size = adev->pm.smu_prv_buffer_size; 1034 smu->smu_feature.feature_num = SMU_FEATURE_MAX; 1035 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX); 1036 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX); 1037 1038 INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn); 1039 INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn); 1040 atomic64_set(&smu->throttle_int_counter, 0); 1041 smu->watermarks_bitmap = 0; 1042 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 1043 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 1044 1045 atomic_set(&smu->smu_power.power_gate.vcn_gated, 1); 1046 atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1); 1047 1048 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; 1049 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; 1050 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; 1051 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; 1052 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3; 1053 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4; 1054 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; 1055 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; 1056 1057 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 1058 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; 1059 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING; 1060 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO; 1061 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR; 1062 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE; 1063 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM; 1064 smu->display_config = &adev->pm.pm_display_cfg; 1065 1066 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; 1067 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; 1068 1069 ret = smu_init_microcode(smu); 1070 if (ret) { 1071 dev_err(adev->dev, "Failed to load smu firmware!\n"); 1072 return ret; 1073 } 1074 1075 ret = smu_smc_table_sw_init(smu); 1076 if (ret) { 1077 dev_err(adev->dev, "Failed to sw init smc table!\n"); 1078 return ret; 1079 } 1080 1081 /* get boot_values from vbios to set revision, gfxclk, and etc. */ 1082 ret = smu_get_vbios_bootup_values(smu); 1083 if (ret) { 1084 dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n"); 1085 return ret; 1086 } 1087 1088 ret = smu_init_pptable_microcode(smu); 1089 if (ret) { 1090 dev_err(adev->dev, "Failed to setup pptable firmware!\n"); 1091 return ret; 1092 } 1093 1094 ret = smu_register_irq_handler(smu); 1095 if (ret) { 1096 dev_err(adev->dev, "Failed to register smc irq handler!\n"); 1097 return ret; 1098 } 1099 1100 /* If there is no way to query fan control mode, fan control is not supported */ 1101 if (!smu->ppt_funcs->get_fan_control_mode) 1102 smu->adev->pm.no_fan = true; 1103 1104 return 0; 1105 } 1106 1107 static int smu_sw_fini(void *handle) 1108 { 1109 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1110 struct smu_context *smu = adev->powerplay.pp_handle; 1111 int ret; 1112 1113 ret = smu_smc_table_sw_fini(smu); 1114 if (ret) { 1115 dev_err(adev->dev, "Failed to sw fini smc table!\n"); 1116 return ret; 1117 } 1118 1119 smu_fini_microcode(smu); 1120 1121 return 0; 1122 } 1123 1124 static int smu_get_thermal_temperature_range(struct smu_context *smu) 1125 { 1126 struct amdgpu_device *adev = smu->adev; 1127 struct smu_temperature_range *range = 1128 &smu->thermal_range; 1129 int ret = 0; 1130 1131 if (!smu->ppt_funcs->get_thermal_temperature_range) 1132 return 0; 1133 1134 ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range); 1135 if (ret) 1136 return ret; 1137 1138 adev->pm.dpm.thermal.min_temp = range->min; 1139 adev->pm.dpm.thermal.max_temp = range->max; 1140 adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max; 1141 adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min; 1142 adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max; 1143 adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max; 1144 adev->pm.dpm.thermal.min_mem_temp = range->mem_min; 1145 adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max; 1146 adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max; 1147 1148 return ret; 1149 } 1150 1151 static int smu_smc_hw_setup(struct smu_context *smu) 1152 { 1153 struct smu_feature *feature = &smu->smu_feature; 1154 struct amdgpu_device *adev = smu->adev; 1155 uint32_t pcie_gen = 0, pcie_width = 0; 1156 uint64_t features_supported; 1157 int ret = 0; 1158 1159 switch (adev->ip_versions[MP1_HWIP][0]) { 1160 case IP_VERSION(11, 0, 7): 1161 case IP_VERSION(11, 0, 11): 1162 case IP_VERSION(11, 5, 0): 1163 case IP_VERSION(11, 0, 12): 1164 if (adev->in_suspend && smu_is_dpm_running(smu)) { 1165 dev_info(adev->dev, "dpm has been enabled\n"); 1166 ret = smu_system_features_control(smu, true); 1167 if (ret) 1168 dev_err(adev->dev, "Failed system features control!\n"); 1169 return ret; 1170 } 1171 break; 1172 default: 1173 break; 1174 } 1175 1176 ret = smu_init_display_count(smu, 0); 1177 if (ret) { 1178 dev_info(adev->dev, "Failed to pre-set display count as 0!\n"); 1179 return ret; 1180 } 1181 1182 ret = smu_set_driver_table_location(smu); 1183 if (ret) { 1184 dev_err(adev->dev, "Failed to SetDriverDramAddr!\n"); 1185 return ret; 1186 } 1187 1188 /* 1189 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools. 1190 */ 1191 ret = smu_set_tool_table_location(smu); 1192 if (ret) { 1193 dev_err(adev->dev, "Failed to SetToolsDramAddr!\n"); 1194 return ret; 1195 } 1196 1197 /* 1198 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify 1199 * pool location. 1200 */ 1201 ret = smu_notify_memory_pool_location(smu); 1202 if (ret) { 1203 dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n"); 1204 return ret; 1205 } 1206 1207 ret = smu_setup_pptable(smu); 1208 if (ret) { 1209 dev_err(adev->dev, "Failed to setup pptable!\n"); 1210 return ret; 1211 } 1212 1213 /* smu_dump_pptable(smu); */ 1214 1215 /* 1216 * With SCPM enabled, PSP is responsible for the PPTable transferring 1217 * (to SMU). Driver involvement is not needed and permitted. 1218 */ 1219 if (!adev->scpm_enabled) { 1220 /* 1221 * Copy pptable bo in the vram to smc with SMU MSGs such as 1222 * SetDriverDramAddr and TransferTableDram2Smu. 1223 */ 1224 ret = smu_write_pptable(smu); 1225 if (ret) { 1226 dev_err(adev->dev, "Failed to transfer pptable to SMC!\n"); 1227 return ret; 1228 } 1229 } 1230 1231 /* issue Run*Btc msg */ 1232 ret = smu_run_btc(smu); 1233 if (ret) 1234 return ret; 1235 1236 /* 1237 * With SCPM enabled, these actions(and relevant messages) are 1238 * not needed and permitted. 1239 */ 1240 if (!adev->scpm_enabled) { 1241 ret = smu_feature_set_allowed_mask(smu); 1242 if (ret) { 1243 dev_err(adev->dev, "Failed to set driver allowed features mask!\n"); 1244 return ret; 1245 } 1246 } 1247 1248 ret = smu_system_features_control(smu, true); 1249 if (ret) { 1250 dev_err(adev->dev, "Failed to enable requested dpm features!\n"); 1251 return ret; 1252 } 1253 1254 ret = smu_feature_get_enabled_mask(smu, &features_supported); 1255 if (ret) { 1256 dev_err(adev->dev, "Failed to retrieve supported dpm features!\n"); 1257 return ret; 1258 } 1259 bitmap_copy(feature->supported, 1260 (unsigned long *)&features_supported, 1261 feature->feature_num); 1262 1263 if (!smu_is_dpm_running(smu)) 1264 dev_info(adev->dev, "dpm has been disabled\n"); 1265 1266 /* 1267 * Set initialized values (get from vbios) to dpm tables context such as 1268 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each 1269 * type of clks. 1270 */ 1271 ret = smu_set_default_dpm_table(smu); 1272 if (ret) { 1273 dev_err(adev->dev, "Failed to setup default dpm clock tables!\n"); 1274 return ret; 1275 } 1276 1277 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) 1278 pcie_gen = 3; 1279 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) 1280 pcie_gen = 2; 1281 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) 1282 pcie_gen = 1; 1283 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1) 1284 pcie_gen = 0; 1285 1286 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1 1287 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4 1288 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 1289 */ 1290 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) 1291 pcie_width = 6; 1292 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) 1293 pcie_width = 5; 1294 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8) 1295 pcie_width = 4; 1296 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4) 1297 pcie_width = 3; 1298 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2) 1299 pcie_width = 2; 1300 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1) 1301 pcie_width = 1; 1302 ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width); 1303 if (ret) { 1304 dev_err(adev->dev, "Attempt to override pcie params failed!\n"); 1305 return ret; 1306 } 1307 1308 ret = smu_get_thermal_temperature_range(smu); 1309 if (ret) { 1310 dev_err(adev->dev, "Failed to get thermal temperature ranges!\n"); 1311 return ret; 1312 } 1313 1314 ret = smu_enable_thermal_alert(smu); 1315 if (ret) { 1316 dev_err(adev->dev, "Failed to enable thermal alert!\n"); 1317 return ret; 1318 } 1319 1320 ret = smu_notify_display_change(smu); 1321 if (ret) { 1322 dev_err(adev->dev, "Failed to notify display change!\n"); 1323 return ret; 1324 } 1325 1326 /* 1327 * Set min deep sleep dce fclk with bootup value from vbios via 1328 * SetMinDeepSleepDcefclk MSG. 1329 */ 1330 ret = smu_set_min_dcef_deep_sleep(smu, 1331 smu->smu_table.boot_values.dcefclk / 100); 1332 1333 return ret; 1334 } 1335 1336 static int smu_start_smc_engine(struct smu_context *smu) 1337 { 1338 struct amdgpu_device *adev = smu->adev; 1339 int ret = 0; 1340 1341 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 1342 if (adev->ip_versions[MP1_HWIP][0] < IP_VERSION(11, 0, 0)) { 1343 if (smu->ppt_funcs->load_microcode) { 1344 ret = smu->ppt_funcs->load_microcode(smu); 1345 if (ret) 1346 return ret; 1347 } 1348 } 1349 } 1350 1351 if (smu->ppt_funcs->check_fw_status) { 1352 ret = smu->ppt_funcs->check_fw_status(smu); 1353 if (ret) { 1354 dev_err(adev->dev, "SMC is not ready\n"); 1355 return ret; 1356 } 1357 } 1358 1359 /* 1360 * Send msg GetDriverIfVersion to check if the return value is equal 1361 * with DRIVER_IF_VERSION of smc header. 1362 */ 1363 ret = smu_check_fw_version(smu); 1364 if (ret) 1365 return ret; 1366 1367 return ret; 1368 } 1369 1370 static int smu_hw_init(void *handle) 1371 { 1372 int ret; 1373 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1374 struct smu_context *smu = adev->powerplay.pp_handle; 1375 1376 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) { 1377 smu->pm_enabled = false; 1378 return 0; 1379 } 1380 1381 ret = smu_start_smc_engine(smu); 1382 if (ret) { 1383 dev_err(adev->dev, "SMC engine is not correctly up!\n"); 1384 return ret; 1385 } 1386 1387 if (smu->is_apu) { 1388 if ((smu->ppt_funcs->set_gfx_power_up_by_imu) && 1389 likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 1390 ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu); 1391 if (ret) { 1392 dev_err(adev->dev, "Failed to Enable gfx imu!\n"); 1393 return ret; 1394 } 1395 } 1396 1397 smu_dpm_set_vcn_enable(smu, true); 1398 smu_dpm_set_jpeg_enable(smu, true); 1399 smu_set_gfx_cgpg(smu, true); 1400 } 1401 1402 if (!smu->pm_enabled) 1403 return 0; 1404 1405 ret = smu_get_driver_allowed_feature_mask(smu); 1406 if (ret) 1407 return ret; 1408 1409 ret = smu_smc_hw_setup(smu); 1410 if (ret) { 1411 dev_err(adev->dev, "Failed to setup smc hw!\n"); 1412 return ret; 1413 } 1414 1415 /* 1416 * Move maximum sustainable clock retrieving here considering 1417 * 1. It is not needed on resume(from S3). 1418 * 2. DAL settings come between .hw_init and .late_init of SMU. 1419 * And DAL needs to know the maximum sustainable clocks. Thus 1420 * it cannot be put in .late_init(). 1421 */ 1422 ret = smu_init_max_sustainable_clocks(smu); 1423 if (ret) { 1424 dev_err(adev->dev, "Failed to init max sustainable clocks!\n"); 1425 return ret; 1426 } 1427 1428 adev->pm.dpm_enabled = true; 1429 1430 dev_info(adev->dev, "SMU is initialized successfully!\n"); 1431 1432 return 0; 1433 } 1434 1435 static int smu_disable_dpms(struct smu_context *smu) 1436 { 1437 struct amdgpu_device *adev = smu->adev; 1438 int ret = 0; 1439 bool use_baco = !smu->is_apu && 1440 ((amdgpu_in_reset(adev) && 1441 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) || 1442 ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev))); 1443 1444 /* 1445 * For SMU 13.0.0 and 13.0.7, PMFW will handle the DPM features(disablement or others) 1446 * properly on suspend/reset/unload. Driver involvement may cause some unexpected issues. 1447 */ 1448 switch (adev->ip_versions[MP1_HWIP][0]) { 1449 case IP_VERSION(13, 0, 0): 1450 case IP_VERSION(13, 0, 7): 1451 return 0; 1452 default: 1453 break; 1454 } 1455 1456 /* 1457 * For custom pptable uploading, skip the DPM features 1458 * disable process on Navi1x ASICs. 1459 * - As the gfx related features are under control of 1460 * RLC on those ASICs. RLC reinitialization will be 1461 * needed to reenable them. That will cost much more 1462 * efforts. 1463 * 1464 * - SMU firmware can handle the DPM reenablement 1465 * properly. 1466 */ 1467 if (smu->uploading_custom_pp_table) { 1468 switch (adev->ip_versions[MP1_HWIP][0]) { 1469 case IP_VERSION(11, 0, 0): 1470 case IP_VERSION(11, 0, 5): 1471 case IP_VERSION(11, 0, 9): 1472 case IP_VERSION(11, 0, 7): 1473 case IP_VERSION(11, 0, 11): 1474 case IP_VERSION(11, 5, 0): 1475 case IP_VERSION(11, 0, 12): 1476 case IP_VERSION(11, 0, 13): 1477 return 0; 1478 default: 1479 break; 1480 } 1481 } 1482 1483 /* 1484 * For Sienna_Cichlid, PMFW will handle the features disablement properly 1485 * on BACO in. Driver involvement is unnecessary. 1486 */ 1487 if (use_baco) { 1488 switch (adev->ip_versions[MP1_HWIP][0]) { 1489 case IP_VERSION(11, 0, 7): 1490 case IP_VERSION(11, 0, 0): 1491 case IP_VERSION(11, 0, 5): 1492 case IP_VERSION(11, 0, 9): 1493 case IP_VERSION(13, 0, 7): 1494 return 0; 1495 default: 1496 break; 1497 } 1498 } 1499 1500 /* 1501 * For gpu reset, runpm and hibernation through BACO, 1502 * BACO feature has to be kept enabled. 1503 */ 1504 if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) { 1505 ret = smu_disable_all_features_with_exception(smu, 1506 SMU_FEATURE_BACO_BIT); 1507 if (ret) 1508 dev_err(adev->dev, "Failed to disable smu features except BACO.\n"); 1509 } else { 1510 /* DisableAllSmuFeatures message is not permitted with SCPM enabled */ 1511 if (!adev->scpm_enabled) { 1512 ret = smu_system_features_control(smu, false); 1513 if (ret) 1514 dev_err(adev->dev, "Failed to disable smu features.\n"); 1515 } 1516 } 1517 1518 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) && 1519 adev->gfx.rlc.funcs->stop) 1520 adev->gfx.rlc.funcs->stop(adev); 1521 1522 return ret; 1523 } 1524 1525 static int smu_smc_hw_cleanup(struct smu_context *smu) 1526 { 1527 struct amdgpu_device *adev = smu->adev; 1528 int ret = 0; 1529 1530 cancel_work_sync(&smu->throttling_logging_work); 1531 cancel_work_sync(&smu->interrupt_work); 1532 1533 ret = smu_disable_thermal_alert(smu); 1534 if (ret) { 1535 dev_err(adev->dev, "Fail to disable thermal alert!\n"); 1536 return ret; 1537 } 1538 1539 ret = smu_disable_dpms(smu); 1540 if (ret) { 1541 dev_err(adev->dev, "Fail to disable dpm features!\n"); 1542 return ret; 1543 } 1544 1545 return 0; 1546 } 1547 1548 static int smu_hw_fini(void *handle) 1549 { 1550 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1551 struct smu_context *smu = adev->powerplay.pp_handle; 1552 1553 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) 1554 return 0; 1555 1556 smu_dpm_set_vcn_enable(smu, false); 1557 smu_dpm_set_jpeg_enable(smu, false); 1558 1559 adev->vcn.cur_state = AMD_PG_STATE_GATE; 1560 adev->jpeg.cur_state = AMD_PG_STATE_GATE; 1561 1562 if (!smu->pm_enabled) 1563 return 0; 1564 1565 adev->pm.dpm_enabled = false; 1566 1567 return smu_smc_hw_cleanup(smu); 1568 } 1569 1570 static void smu_late_fini(void *handle) 1571 { 1572 struct amdgpu_device *adev = handle; 1573 struct smu_context *smu = adev->powerplay.pp_handle; 1574 1575 kfree(smu); 1576 } 1577 1578 static int smu_reset(struct smu_context *smu) 1579 { 1580 struct amdgpu_device *adev = smu->adev; 1581 int ret; 1582 1583 ret = smu_hw_fini(adev); 1584 if (ret) 1585 return ret; 1586 1587 ret = smu_hw_init(adev); 1588 if (ret) 1589 return ret; 1590 1591 ret = smu_late_init(adev); 1592 if (ret) 1593 return ret; 1594 1595 return 0; 1596 } 1597 1598 static int smu_suspend(void *handle) 1599 { 1600 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1601 struct smu_context *smu = adev->powerplay.pp_handle; 1602 int ret; 1603 uint64_t count; 1604 1605 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) 1606 return 0; 1607 1608 if (!smu->pm_enabled) 1609 return 0; 1610 1611 adev->pm.dpm_enabled = false; 1612 1613 ret = smu_smc_hw_cleanup(smu); 1614 if (ret) 1615 return ret; 1616 1617 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED); 1618 1619 smu_set_gfx_cgpg(smu, false); 1620 1621 /* 1622 * pwfw resets entrycount when device is suspended, so we save the 1623 * last value to be used when we resume to keep it consistent 1624 */ 1625 ret = smu_get_entrycount_gfxoff(smu, &count); 1626 if (!ret) 1627 adev->gfx.gfx_off_entrycount = count; 1628 1629 return 0; 1630 } 1631 1632 static int smu_resume(void *handle) 1633 { 1634 int ret; 1635 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1636 struct smu_context *smu = adev->powerplay.pp_handle; 1637 1638 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) 1639 return 0; 1640 1641 if (!smu->pm_enabled) 1642 return 0; 1643 1644 dev_info(adev->dev, "SMU is resuming...\n"); 1645 1646 ret = smu_start_smc_engine(smu); 1647 if (ret) { 1648 dev_err(adev->dev, "SMC engine is not correctly up!\n"); 1649 return ret; 1650 } 1651 1652 ret = smu_smc_hw_setup(smu); 1653 if (ret) { 1654 dev_err(adev->dev, "Failed to setup smc hw!\n"); 1655 return ret; 1656 } 1657 1658 smu_set_gfx_cgpg(smu, true); 1659 1660 smu->disable_uclk_switch = 0; 1661 1662 adev->pm.dpm_enabled = true; 1663 1664 dev_info(adev->dev, "SMU is resumed successfully!\n"); 1665 1666 return 0; 1667 } 1668 1669 static int smu_display_configuration_change(void *handle, 1670 const struct amd_pp_display_configuration *display_config) 1671 { 1672 struct smu_context *smu = handle; 1673 int index = 0; 1674 int num_of_active_display = 0; 1675 1676 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1677 return -EOPNOTSUPP; 1678 1679 if (!display_config) 1680 return -EINVAL; 1681 1682 smu_set_min_dcef_deep_sleep(smu, 1683 display_config->min_dcef_deep_sleep_set_clk / 100); 1684 1685 for (index = 0; index < display_config->num_path_including_non_display; index++) { 1686 if (display_config->displays[index].controller_id != 0) 1687 num_of_active_display++; 1688 } 1689 1690 return 0; 1691 } 1692 1693 static int smu_set_clockgating_state(void *handle, 1694 enum amd_clockgating_state state) 1695 { 1696 return 0; 1697 } 1698 1699 static int smu_set_powergating_state(void *handle, 1700 enum amd_powergating_state state) 1701 { 1702 return 0; 1703 } 1704 1705 static int smu_enable_umd_pstate(void *handle, 1706 enum amd_dpm_forced_level *level) 1707 { 1708 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 1709 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 1710 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 1711 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 1712 1713 struct smu_context *smu = (struct smu_context*)(handle); 1714 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1715 1716 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) 1717 return -EINVAL; 1718 1719 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) { 1720 /* enter umd pstate, save current level, disable gfx cg*/ 1721 if (*level & profile_mode_mask) { 1722 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level; 1723 smu_gpo_control(smu, false); 1724 smu_gfx_ulv_control(smu, false); 1725 smu_deep_sleep_control(smu, false); 1726 amdgpu_asic_update_umd_stable_pstate(smu->adev, true); 1727 } 1728 } else { 1729 /* exit umd pstate, restore level, enable gfx cg*/ 1730 if (!(*level & profile_mode_mask)) { 1731 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) 1732 *level = smu_dpm_ctx->saved_dpm_level; 1733 amdgpu_asic_update_umd_stable_pstate(smu->adev, false); 1734 smu_deep_sleep_control(smu, true); 1735 smu_gfx_ulv_control(smu, true); 1736 smu_gpo_control(smu, true); 1737 } 1738 } 1739 1740 return 0; 1741 } 1742 1743 static int smu_bump_power_profile_mode(struct smu_context *smu, 1744 long *param, 1745 uint32_t param_size) 1746 { 1747 int ret = 0; 1748 1749 if (smu->ppt_funcs->set_power_profile_mode) 1750 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size); 1751 1752 return ret; 1753 } 1754 1755 static int smu_adjust_power_state_dynamic(struct smu_context *smu, 1756 enum amd_dpm_forced_level level, 1757 bool skip_display_settings) 1758 { 1759 int ret = 0; 1760 int index = 0; 1761 long workload; 1762 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1763 1764 if (!skip_display_settings) { 1765 ret = smu_display_config_changed(smu); 1766 if (ret) { 1767 dev_err(smu->adev->dev, "Failed to change display config!"); 1768 return ret; 1769 } 1770 } 1771 1772 ret = smu_apply_clocks_adjust_rules(smu); 1773 if (ret) { 1774 dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!"); 1775 return ret; 1776 } 1777 1778 if (!skip_display_settings) { 1779 ret = smu_notify_smc_display_config(smu); 1780 if (ret) { 1781 dev_err(smu->adev->dev, "Failed to notify smc display config!"); 1782 return ret; 1783 } 1784 } 1785 1786 if (smu_dpm_ctx->dpm_level != level) { 1787 ret = smu_asic_set_performance_level(smu, level); 1788 if (ret) { 1789 dev_err(smu->adev->dev, "Failed to set performance level!"); 1790 return ret; 1791 } 1792 1793 /* update the saved copy */ 1794 smu_dpm_ctx->dpm_level = level; 1795 } 1796 1797 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && 1798 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { 1799 index = fls(smu->workload_mask); 1800 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; 1801 workload = smu->workload_setting[index]; 1802 1803 if (smu->power_profile_mode != workload) 1804 smu_bump_power_profile_mode(smu, &workload, 0); 1805 } 1806 1807 return ret; 1808 } 1809 1810 static int smu_handle_task(struct smu_context *smu, 1811 enum amd_dpm_forced_level level, 1812 enum amd_pp_task task_id) 1813 { 1814 int ret = 0; 1815 1816 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1817 return -EOPNOTSUPP; 1818 1819 switch (task_id) { 1820 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: 1821 ret = smu_pre_display_config_changed(smu); 1822 if (ret) 1823 return ret; 1824 ret = smu_adjust_power_state_dynamic(smu, level, false); 1825 break; 1826 case AMD_PP_TASK_COMPLETE_INIT: 1827 case AMD_PP_TASK_READJUST_POWER_STATE: 1828 ret = smu_adjust_power_state_dynamic(smu, level, true); 1829 break; 1830 default: 1831 break; 1832 } 1833 1834 return ret; 1835 } 1836 1837 static int smu_handle_dpm_task(void *handle, 1838 enum amd_pp_task task_id, 1839 enum amd_pm_state_type *user_state) 1840 { 1841 struct smu_context *smu = handle; 1842 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 1843 1844 return smu_handle_task(smu, smu_dpm->dpm_level, task_id); 1845 1846 } 1847 1848 static int smu_switch_power_profile(void *handle, 1849 enum PP_SMC_POWER_PROFILE type, 1850 bool en) 1851 { 1852 struct smu_context *smu = handle; 1853 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1854 long workload; 1855 uint32_t index; 1856 1857 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1858 return -EOPNOTSUPP; 1859 1860 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM)) 1861 return -EINVAL; 1862 1863 if (!en) { 1864 smu->workload_mask &= ~(1 << smu->workload_prority[type]); 1865 index = fls(smu->workload_mask); 1866 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; 1867 workload = smu->workload_setting[index]; 1868 } else { 1869 smu->workload_mask |= (1 << smu->workload_prority[type]); 1870 index = fls(smu->workload_mask); 1871 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; 1872 workload = smu->workload_setting[index]; 1873 } 1874 1875 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && 1876 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) 1877 smu_bump_power_profile_mode(smu, &workload, 0); 1878 1879 return 0; 1880 } 1881 1882 static enum amd_dpm_forced_level smu_get_performance_level(void *handle) 1883 { 1884 struct smu_context *smu = handle; 1885 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1886 1887 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1888 return -EOPNOTSUPP; 1889 1890 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) 1891 return -EINVAL; 1892 1893 return smu_dpm_ctx->dpm_level; 1894 } 1895 1896 static int smu_force_performance_level(void *handle, 1897 enum amd_dpm_forced_level level) 1898 { 1899 struct smu_context *smu = handle; 1900 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1901 int ret = 0; 1902 1903 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1904 return -EOPNOTSUPP; 1905 1906 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) 1907 return -EINVAL; 1908 1909 ret = smu_enable_umd_pstate(smu, &level); 1910 if (ret) 1911 return ret; 1912 1913 ret = smu_handle_task(smu, level, 1914 AMD_PP_TASK_READJUST_POWER_STATE); 1915 1916 /* reset user dpm clock state */ 1917 if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { 1918 memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask)); 1919 smu->user_dpm_profile.clk_dependency = 0; 1920 } 1921 1922 return ret; 1923 } 1924 1925 static int smu_set_display_count(void *handle, uint32_t count) 1926 { 1927 struct smu_context *smu = handle; 1928 1929 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1930 return -EOPNOTSUPP; 1931 1932 return smu_init_display_count(smu, count); 1933 } 1934 1935 static int smu_force_smuclk_levels(struct smu_context *smu, 1936 enum smu_clk_type clk_type, 1937 uint32_t mask) 1938 { 1939 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1940 int ret = 0; 1941 1942 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1943 return -EOPNOTSUPP; 1944 1945 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { 1946 dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n"); 1947 return -EINVAL; 1948 } 1949 1950 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) { 1951 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask); 1952 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { 1953 smu->user_dpm_profile.clk_mask[clk_type] = mask; 1954 smu_set_user_clk_dependencies(smu, clk_type); 1955 } 1956 } 1957 1958 return ret; 1959 } 1960 1961 static int smu_force_ppclk_levels(void *handle, 1962 enum pp_clock_type type, 1963 uint32_t mask) 1964 { 1965 struct smu_context *smu = handle; 1966 enum smu_clk_type clk_type; 1967 1968 switch (type) { 1969 case PP_SCLK: 1970 clk_type = SMU_SCLK; break; 1971 case PP_MCLK: 1972 clk_type = SMU_MCLK; break; 1973 case PP_PCIE: 1974 clk_type = SMU_PCIE; break; 1975 case PP_SOCCLK: 1976 clk_type = SMU_SOCCLK; break; 1977 case PP_FCLK: 1978 clk_type = SMU_FCLK; break; 1979 case PP_DCEFCLK: 1980 clk_type = SMU_DCEFCLK; break; 1981 case PP_VCLK: 1982 clk_type = SMU_VCLK; break; 1983 case PP_DCLK: 1984 clk_type = SMU_DCLK; break; 1985 case OD_SCLK: 1986 clk_type = SMU_OD_SCLK; break; 1987 case OD_MCLK: 1988 clk_type = SMU_OD_MCLK; break; 1989 case OD_VDDC_CURVE: 1990 clk_type = SMU_OD_VDDC_CURVE; break; 1991 case OD_RANGE: 1992 clk_type = SMU_OD_RANGE; break; 1993 default: 1994 return -EINVAL; 1995 } 1996 1997 return smu_force_smuclk_levels(smu, clk_type, mask); 1998 } 1999 2000 /* 2001 * On system suspending or resetting, the dpm_enabled 2002 * flag will be cleared. So that those SMU services which 2003 * are not supported will be gated. 2004 * However, the mp1 state setting should still be granted 2005 * even if the dpm_enabled cleared. 2006 */ 2007 static int smu_set_mp1_state(void *handle, 2008 enum pp_mp1_state mp1_state) 2009 { 2010 struct smu_context *smu = handle; 2011 int ret = 0; 2012 2013 if (!smu->pm_enabled) 2014 return -EOPNOTSUPP; 2015 2016 if (smu->ppt_funcs && 2017 smu->ppt_funcs->set_mp1_state) 2018 ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state); 2019 2020 return ret; 2021 } 2022 2023 static int smu_set_df_cstate(void *handle, 2024 enum pp_df_cstate state) 2025 { 2026 struct smu_context *smu = handle; 2027 int ret = 0; 2028 2029 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2030 return -EOPNOTSUPP; 2031 2032 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate) 2033 return 0; 2034 2035 ret = smu->ppt_funcs->set_df_cstate(smu, state); 2036 if (ret) 2037 dev_err(smu->adev->dev, "[SetDfCstate] failed!\n"); 2038 2039 return ret; 2040 } 2041 2042 int smu_allow_xgmi_power_down(struct smu_context *smu, bool en) 2043 { 2044 int ret = 0; 2045 2046 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2047 return -EOPNOTSUPP; 2048 2049 if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down) 2050 return 0; 2051 2052 ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en); 2053 if (ret) 2054 dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n"); 2055 2056 return ret; 2057 } 2058 2059 int smu_write_watermarks_table(struct smu_context *smu) 2060 { 2061 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2062 return -EOPNOTSUPP; 2063 2064 return smu_set_watermarks_table(smu, NULL); 2065 } 2066 2067 static int smu_set_watermarks_for_clock_ranges(void *handle, 2068 struct pp_smu_wm_range_sets *clock_ranges) 2069 { 2070 struct smu_context *smu = handle; 2071 2072 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2073 return -EOPNOTSUPP; 2074 2075 if (smu->disable_watermark) 2076 return 0; 2077 2078 return smu_set_watermarks_table(smu, clock_ranges); 2079 } 2080 2081 int smu_set_ac_dc(struct smu_context *smu) 2082 { 2083 int ret = 0; 2084 2085 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2086 return -EOPNOTSUPP; 2087 2088 /* controlled by firmware */ 2089 if (smu->dc_controlled_by_gpio) 2090 return 0; 2091 2092 ret = smu_set_power_source(smu, 2093 smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC : 2094 SMU_POWER_SOURCE_DC); 2095 if (ret) 2096 dev_err(smu->adev->dev, "Failed to switch to %s mode!\n", 2097 smu->adev->pm.ac_power ? "AC" : "DC"); 2098 2099 return ret; 2100 } 2101 2102 const struct amd_ip_funcs smu_ip_funcs = { 2103 .name = "smu", 2104 .early_init = smu_early_init, 2105 .late_init = smu_late_init, 2106 .sw_init = smu_sw_init, 2107 .sw_fini = smu_sw_fini, 2108 .hw_init = smu_hw_init, 2109 .hw_fini = smu_hw_fini, 2110 .late_fini = smu_late_fini, 2111 .suspend = smu_suspend, 2112 .resume = smu_resume, 2113 .is_idle = NULL, 2114 .check_soft_reset = NULL, 2115 .wait_for_idle = NULL, 2116 .soft_reset = NULL, 2117 .set_clockgating_state = smu_set_clockgating_state, 2118 .set_powergating_state = smu_set_powergating_state, 2119 }; 2120 2121 const struct amdgpu_ip_block_version smu_v11_0_ip_block = 2122 { 2123 .type = AMD_IP_BLOCK_TYPE_SMC, 2124 .major = 11, 2125 .minor = 0, 2126 .rev = 0, 2127 .funcs = &smu_ip_funcs, 2128 }; 2129 2130 const struct amdgpu_ip_block_version smu_v12_0_ip_block = 2131 { 2132 .type = AMD_IP_BLOCK_TYPE_SMC, 2133 .major = 12, 2134 .minor = 0, 2135 .rev = 0, 2136 .funcs = &smu_ip_funcs, 2137 }; 2138 2139 const struct amdgpu_ip_block_version smu_v13_0_ip_block = 2140 { 2141 .type = AMD_IP_BLOCK_TYPE_SMC, 2142 .major = 13, 2143 .minor = 0, 2144 .rev = 0, 2145 .funcs = &smu_ip_funcs, 2146 }; 2147 2148 static int smu_load_microcode(void *handle) 2149 { 2150 struct smu_context *smu = handle; 2151 struct amdgpu_device *adev = smu->adev; 2152 int ret = 0; 2153 2154 if (!smu->pm_enabled) 2155 return -EOPNOTSUPP; 2156 2157 /* This should be used for non PSP loading */ 2158 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) 2159 return 0; 2160 2161 if (smu->ppt_funcs->load_microcode) { 2162 ret = smu->ppt_funcs->load_microcode(smu); 2163 if (ret) { 2164 dev_err(adev->dev, "Load microcode failed\n"); 2165 return ret; 2166 } 2167 } 2168 2169 if (smu->ppt_funcs->check_fw_status) { 2170 ret = smu->ppt_funcs->check_fw_status(smu); 2171 if (ret) { 2172 dev_err(adev->dev, "SMC is not ready\n"); 2173 return ret; 2174 } 2175 } 2176 2177 return ret; 2178 } 2179 2180 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled) 2181 { 2182 int ret = 0; 2183 2184 if (smu->ppt_funcs->set_gfx_cgpg) 2185 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled); 2186 2187 return ret; 2188 } 2189 2190 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed) 2191 { 2192 struct smu_context *smu = handle; 2193 int ret = 0; 2194 2195 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2196 return -EOPNOTSUPP; 2197 2198 if (!smu->ppt_funcs->set_fan_speed_rpm) 2199 return -EOPNOTSUPP; 2200 2201 if (speed == U32_MAX) 2202 return -EINVAL; 2203 2204 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed); 2205 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { 2206 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM; 2207 smu->user_dpm_profile.fan_speed_rpm = speed; 2208 2209 /* Override custom PWM setting as they cannot co-exist */ 2210 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM; 2211 smu->user_dpm_profile.fan_speed_pwm = 0; 2212 } 2213 2214 return ret; 2215 } 2216 2217 /** 2218 * smu_get_power_limit - Request one of the SMU Power Limits 2219 * 2220 * @handle: pointer to smu context 2221 * @limit: requested limit is written back to this variable 2222 * @pp_limit_level: &pp_power_limit_level which limit of the power to return 2223 * @pp_power_type: &pp_power_type type of power 2224 * Return: 0 on success, <0 on error 2225 * 2226 */ 2227 int smu_get_power_limit(void *handle, 2228 uint32_t *limit, 2229 enum pp_power_limit_level pp_limit_level, 2230 enum pp_power_type pp_power_type) 2231 { 2232 struct smu_context *smu = handle; 2233 struct amdgpu_device *adev = smu->adev; 2234 enum smu_ppt_limit_level limit_level; 2235 uint32_t limit_type; 2236 int ret = 0; 2237 2238 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2239 return -EOPNOTSUPP; 2240 2241 switch(pp_power_type) { 2242 case PP_PWR_TYPE_SUSTAINED: 2243 limit_type = SMU_DEFAULT_PPT_LIMIT; 2244 break; 2245 case PP_PWR_TYPE_FAST: 2246 limit_type = SMU_FAST_PPT_LIMIT; 2247 break; 2248 default: 2249 return -EOPNOTSUPP; 2250 break; 2251 } 2252 2253 switch(pp_limit_level){ 2254 case PP_PWR_LIMIT_CURRENT: 2255 limit_level = SMU_PPT_LIMIT_CURRENT; 2256 break; 2257 case PP_PWR_LIMIT_DEFAULT: 2258 limit_level = SMU_PPT_LIMIT_DEFAULT; 2259 break; 2260 case PP_PWR_LIMIT_MAX: 2261 limit_level = SMU_PPT_LIMIT_MAX; 2262 break; 2263 case PP_PWR_LIMIT_MIN: 2264 default: 2265 return -EOPNOTSUPP; 2266 break; 2267 } 2268 2269 if (limit_type != SMU_DEFAULT_PPT_LIMIT) { 2270 if (smu->ppt_funcs->get_ppt_limit) 2271 ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level); 2272 } else { 2273 switch (limit_level) { 2274 case SMU_PPT_LIMIT_CURRENT: 2275 switch (adev->ip_versions[MP1_HWIP][0]) { 2276 case IP_VERSION(13, 0, 2): 2277 case IP_VERSION(11, 0, 7): 2278 case IP_VERSION(11, 0, 11): 2279 case IP_VERSION(11, 0, 12): 2280 case IP_VERSION(11, 0, 13): 2281 ret = smu_get_asic_power_limits(smu, 2282 &smu->current_power_limit, 2283 NULL, 2284 NULL); 2285 break; 2286 default: 2287 break; 2288 } 2289 *limit = smu->current_power_limit; 2290 break; 2291 case SMU_PPT_LIMIT_DEFAULT: 2292 *limit = smu->default_power_limit; 2293 break; 2294 case SMU_PPT_LIMIT_MAX: 2295 *limit = smu->max_power_limit; 2296 break; 2297 default: 2298 break; 2299 } 2300 } 2301 2302 return ret; 2303 } 2304 2305 static int smu_set_power_limit(void *handle, uint32_t limit) 2306 { 2307 struct smu_context *smu = handle; 2308 uint32_t limit_type = limit >> 24; 2309 int ret = 0; 2310 2311 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2312 return -EOPNOTSUPP; 2313 2314 limit &= (1<<24)-1; 2315 if (limit_type != SMU_DEFAULT_PPT_LIMIT) 2316 if (smu->ppt_funcs->set_power_limit) 2317 return smu->ppt_funcs->set_power_limit(smu, limit_type, limit); 2318 2319 if (limit > smu->max_power_limit) { 2320 dev_err(smu->adev->dev, 2321 "New power limit (%d) is over the max allowed %d\n", 2322 limit, smu->max_power_limit); 2323 return -EINVAL; 2324 } 2325 2326 if (!limit) 2327 limit = smu->current_power_limit; 2328 2329 if (smu->ppt_funcs->set_power_limit) { 2330 ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit); 2331 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) 2332 smu->user_dpm_profile.power_limit = limit; 2333 } 2334 2335 return ret; 2336 } 2337 2338 static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) 2339 { 2340 int ret = 0; 2341 2342 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2343 return -EOPNOTSUPP; 2344 2345 if (smu->ppt_funcs->print_clk_levels) 2346 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf); 2347 2348 return ret; 2349 } 2350 2351 static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type) 2352 { 2353 enum smu_clk_type clk_type; 2354 2355 switch (type) { 2356 case PP_SCLK: 2357 clk_type = SMU_SCLK; break; 2358 case PP_MCLK: 2359 clk_type = SMU_MCLK; break; 2360 case PP_PCIE: 2361 clk_type = SMU_PCIE; break; 2362 case PP_SOCCLK: 2363 clk_type = SMU_SOCCLK; break; 2364 case PP_FCLK: 2365 clk_type = SMU_FCLK; break; 2366 case PP_DCEFCLK: 2367 clk_type = SMU_DCEFCLK; break; 2368 case PP_VCLK: 2369 clk_type = SMU_VCLK; break; 2370 case PP_DCLK: 2371 clk_type = SMU_DCLK; break; 2372 case OD_SCLK: 2373 clk_type = SMU_OD_SCLK; break; 2374 case OD_MCLK: 2375 clk_type = SMU_OD_MCLK; break; 2376 case OD_VDDC_CURVE: 2377 clk_type = SMU_OD_VDDC_CURVE; break; 2378 case OD_RANGE: 2379 clk_type = SMU_OD_RANGE; break; 2380 case OD_VDDGFX_OFFSET: 2381 clk_type = SMU_OD_VDDGFX_OFFSET; break; 2382 case OD_CCLK: 2383 clk_type = SMU_OD_CCLK; break; 2384 default: 2385 clk_type = SMU_CLK_COUNT; break; 2386 } 2387 2388 return clk_type; 2389 } 2390 2391 static int smu_print_ppclk_levels(void *handle, 2392 enum pp_clock_type type, 2393 char *buf) 2394 { 2395 struct smu_context *smu = handle; 2396 enum smu_clk_type clk_type; 2397 2398 clk_type = smu_convert_to_smuclk(type); 2399 if (clk_type == SMU_CLK_COUNT) 2400 return -EINVAL; 2401 2402 return smu_print_smuclk_levels(smu, clk_type, buf); 2403 } 2404 2405 static int smu_emit_ppclk_levels(void *handle, enum pp_clock_type type, char *buf, int *offset) 2406 { 2407 struct smu_context *smu = handle; 2408 enum smu_clk_type clk_type; 2409 2410 clk_type = smu_convert_to_smuclk(type); 2411 if (clk_type == SMU_CLK_COUNT) 2412 return -EINVAL; 2413 2414 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2415 return -EOPNOTSUPP; 2416 2417 if (!smu->ppt_funcs->emit_clk_levels) 2418 return -ENOENT; 2419 2420 return smu->ppt_funcs->emit_clk_levels(smu, clk_type, buf, offset); 2421 2422 } 2423 2424 static int smu_od_edit_dpm_table(void *handle, 2425 enum PP_OD_DPM_TABLE_COMMAND type, 2426 long *input, uint32_t size) 2427 { 2428 struct smu_context *smu = handle; 2429 int ret = 0; 2430 2431 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2432 return -EOPNOTSUPP; 2433 2434 if (smu->ppt_funcs->od_edit_dpm_table) { 2435 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size); 2436 } 2437 2438 return ret; 2439 } 2440 2441 static int smu_read_sensor(void *handle, 2442 int sensor, 2443 void *data, 2444 int *size_arg) 2445 { 2446 struct smu_context *smu = handle; 2447 struct smu_umd_pstate_table *pstate_table = 2448 &smu->pstate_table; 2449 int ret = 0; 2450 uint32_t *size, size_val; 2451 2452 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2453 return -EOPNOTSUPP; 2454 2455 if (!data || !size_arg) 2456 return -EINVAL; 2457 2458 size_val = *size_arg; 2459 size = &size_val; 2460 2461 if (smu->ppt_funcs->read_sensor) 2462 if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size)) 2463 goto unlock; 2464 2465 switch (sensor) { 2466 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK: 2467 *((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100; 2468 *size = 4; 2469 break; 2470 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK: 2471 *((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100; 2472 *size = 4; 2473 break; 2474 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK: 2475 ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data); 2476 *size = 8; 2477 break; 2478 case AMDGPU_PP_SENSOR_UVD_POWER: 2479 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0; 2480 *size = 4; 2481 break; 2482 case AMDGPU_PP_SENSOR_VCE_POWER: 2483 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0; 2484 *size = 4; 2485 break; 2486 case AMDGPU_PP_SENSOR_VCN_POWER_STATE: 2487 *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0: 1; 2488 *size = 4; 2489 break; 2490 case AMDGPU_PP_SENSOR_MIN_FAN_RPM: 2491 *(uint32_t *)data = 0; 2492 *size = 4; 2493 break; 2494 default: 2495 *size = 0; 2496 ret = -EOPNOTSUPP; 2497 break; 2498 } 2499 2500 unlock: 2501 // assign uint32_t to int 2502 *size_arg = size_val; 2503 2504 return ret; 2505 } 2506 2507 static int smu_get_power_profile_mode(void *handle, char *buf) 2508 { 2509 struct smu_context *smu = handle; 2510 2511 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || 2512 !smu->ppt_funcs->get_power_profile_mode) 2513 return -EOPNOTSUPP; 2514 if (!buf) 2515 return -EINVAL; 2516 2517 return smu->ppt_funcs->get_power_profile_mode(smu, buf); 2518 } 2519 2520 static int smu_set_power_profile_mode(void *handle, 2521 long *param, 2522 uint32_t param_size) 2523 { 2524 struct smu_context *smu = handle; 2525 2526 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || 2527 !smu->ppt_funcs->set_power_profile_mode) 2528 return -EOPNOTSUPP; 2529 2530 return smu_bump_power_profile_mode(smu, param, param_size); 2531 } 2532 2533 static int smu_get_fan_control_mode(void *handle, u32 *fan_mode) 2534 { 2535 struct smu_context *smu = handle; 2536 2537 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2538 return -EOPNOTSUPP; 2539 2540 if (!smu->ppt_funcs->get_fan_control_mode) 2541 return -EOPNOTSUPP; 2542 2543 if (!fan_mode) 2544 return -EINVAL; 2545 2546 *fan_mode = smu->ppt_funcs->get_fan_control_mode(smu); 2547 2548 return 0; 2549 } 2550 2551 static int smu_set_fan_control_mode(void *handle, u32 value) 2552 { 2553 struct smu_context *smu = handle; 2554 int ret = 0; 2555 2556 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2557 return -EOPNOTSUPP; 2558 2559 if (!smu->ppt_funcs->set_fan_control_mode) 2560 return -EOPNOTSUPP; 2561 2562 if (value == U32_MAX) 2563 return -EINVAL; 2564 2565 ret = smu->ppt_funcs->set_fan_control_mode(smu, value); 2566 if (ret) 2567 goto out; 2568 2569 if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { 2570 smu->user_dpm_profile.fan_mode = value; 2571 2572 /* reset user dpm fan speed */ 2573 if (value != AMD_FAN_CTRL_MANUAL) { 2574 smu->user_dpm_profile.fan_speed_pwm = 0; 2575 smu->user_dpm_profile.fan_speed_rpm = 0; 2576 smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM); 2577 } 2578 } 2579 2580 out: 2581 return ret; 2582 } 2583 2584 static int smu_get_fan_speed_pwm(void *handle, u32 *speed) 2585 { 2586 struct smu_context *smu = handle; 2587 int ret = 0; 2588 2589 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2590 return -EOPNOTSUPP; 2591 2592 if (!smu->ppt_funcs->get_fan_speed_pwm) 2593 return -EOPNOTSUPP; 2594 2595 if (!speed) 2596 return -EINVAL; 2597 2598 ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed); 2599 2600 return ret; 2601 } 2602 2603 static int smu_set_fan_speed_pwm(void *handle, u32 speed) 2604 { 2605 struct smu_context *smu = handle; 2606 int ret = 0; 2607 2608 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2609 return -EOPNOTSUPP; 2610 2611 if (!smu->ppt_funcs->set_fan_speed_pwm) 2612 return -EOPNOTSUPP; 2613 2614 if (speed == U32_MAX) 2615 return -EINVAL; 2616 2617 ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed); 2618 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { 2619 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM; 2620 smu->user_dpm_profile.fan_speed_pwm = speed; 2621 2622 /* Override custom RPM setting as they cannot co-exist */ 2623 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM; 2624 smu->user_dpm_profile.fan_speed_rpm = 0; 2625 } 2626 2627 return ret; 2628 } 2629 2630 static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed) 2631 { 2632 struct smu_context *smu = handle; 2633 int ret = 0; 2634 2635 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2636 return -EOPNOTSUPP; 2637 2638 if (!smu->ppt_funcs->get_fan_speed_rpm) 2639 return -EOPNOTSUPP; 2640 2641 if (!speed) 2642 return -EINVAL; 2643 2644 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed); 2645 2646 return ret; 2647 } 2648 2649 static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk) 2650 { 2651 struct smu_context *smu = handle; 2652 2653 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2654 return -EOPNOTSUPP; 2655 2656 return smu_set_min_dcef_deep_sleep(smu, clk); 2657 } 2658 2659 static int smu_get_clock_by_type_with_latency(void *handle, 2660 enum amd_pp_clock_type type, 2661 struct pp_clock_levels_with_latency *clocks) 2662 { 2663 struct smu_context *smu = handle; 2664 enum smu_clk_type clk_type; 2665 int ret = 0; 2666 2667 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2668 return -EOPNOTSUPP; 2669 2670 if (smu->ppt_funcs->get_clock_by_type_with_latency) { 2671 switch (type) { 2672 case amd_pp_sys_clock: 2673 clk_type = SMU_GFXCLK; 2674 break; 2675 case amd_pp_mem_clock: 2676 clk_type = SMU_MCLK; 2677 break; 2678 case amd_pp_dcef_clock: 2679 clk_type = SMU_DCEFCLK; 2680 break; 2681 case amd_pp_disp_clock: 2682 clk_type = SMU_DISPCLK; 2683 break; 2684 default: 2685 dev_err(smu->adev->dev, "Invalid clock type!\n"); 2686 return -EINVAL; 2687 } 2688 2689 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks); 2690 } 2691 2692 return ret; 2693 } 2694 2695 static int smu_display_clock_voltage_request(void *handle, 2696 struct pp_display_clock_request *clock_req) 2697 { 2698 struct smu_context *smu = handle; 2699 int ret = 0; 2700 2701 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2702 return -EOPNOTSUPP; 2703 2704 if (smu->ppt_funcs->display_clock_voltage_request) 2705 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req); 2706 2707 return ret; 2708 } 2709 2710 2711 static int smu_display_disable_memory_clock_switch(void *handle, 2712 bool disable_memory_clock_switch) 2713 { 2714 struct smu_context *smu = handle; 2715 int ret = -EINVAL; 2716 2717 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2718 return -EOPNOTSUPP; 2719 2720 if (smu->ppt_funcs->display_disable_memory_clock_switch) 2721 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch); 2722 2723 return ret; 2724 } 2725 2726 static int smu_set_xgmi_pstate(void *handle, 2727 uint32_t pstate) 2728 { 2729 struct smu_context *smu = handle; 2730 int ret = 0; 2731 2732 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2733 return -EOPNOTSUPP; 2734 2735 if (smu->ppt_funcs->set_xgmi_pstate) 2736 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate); 2737 2738 if(ret) 2739 dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n"); 2740 2741 return ret; 2742 } 2743 2744 static int smu_get_baco_capability(void *handle, bool *cap) 2745 { 2746 struct smu_context *smu = handle; 2747 2748 *cap = false; 2749 2750 if (!smu->pm_enabled) 2751 return 0; 2752 2753 if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support) 2754 *cap = smu->ppt_funcs->baco_is_support(smu); 2755 2756 return 0; 2757 } 2758 2759 static int smu_baco_set_state(void *handle, int state) 2760 { 2761 struct smu_context *smu = handle; 2762 int ret = 0; 2763 2764 if (!smu->pm_enabled) 2765 return -EOPNOTSUPP; 2766 2767 if (state == 0) { 2768 if (smu->ppt_funcs->baco_exit) 2769 ret = smu->ppt_funcs->baco_exit(smu); 2770 } else if (state == 1) { 2771 if (smu->ppt_funcs->baco_enter) 2772 ret = smu->ppt_funcs->baco_enter(smu); 2773 } else { 2774 return -EINVAL; 2775 } 2776 2777 if (ret) 2778 dev_err(smu->adev->dev, "Failed to %s BACO state!\n", 2779 (state)?"enter":"exit"); 2780 2781 return ret; 2782 } 2783 2784 bool smu_mode1_reset_is_support(struct smu_context *smu) 2785 { 2786 bool ret = false; 2787 2788 if (!smu->pm_enabled) 2789 return false; 2790 2791 if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support) 2792 ret = smu->ppt_funcs->mode1_reset_is_support(smu); 2793 2794 return ret; 2795 } 2796 2797 bool smu_mode2_reset_is_support(struct smu_context *smu) 2798 { 2799 bool ret = false; 2800 2801 if (!smu->pm_enabled) 2802 return false; 2803 2804 if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support) 2805 ret = smu->ppt_funcs->mode2_reset_is_support(smu); 2806 2807 return ret; 2808 } 2809 2810 int smu_mode1_reset(struct smu_context *smu) 2811 { 2812 int ret = 0; 2813 2814 if (!smu->pm_enabled) 2815 return -EOPNOTSUPP; 2816 2817 if (smu->ppt_funcs->mode1_reset) 2818 ret = smu->ppt_funcs->mode1_reset(smu); 2819 2820 return ret; 2821 } 2822 2823 static int smu_mode2_reset(void *handle) 2824 { 2825 struct smu_context *smu = handle; 2826 int ret = 0; 2827 2828 if (!smu->pm_enabled) 2829 return -EOPNOTSUPP; 2830 2831 if (smu->ppt_funcs->mode2_reset) 2832 ret = smu->ppt_funcs->mode2_reset(smu); 2833 2834 if (ret) 2835 dev_err(smu->adev->dev, "Mode2 reset failed!\n"); 2836 2837 return ret; 2838 } 2839 2840 static int smu_get_max_sustainable_clocks_by_dc(void *handle, 2841 struct pp_smu_nv_clock_table *max_clocks) 2842 { 2843 struct smu_context *smu = handle; 2844 int ret = 0; 2845 2846 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2847 return -EOPNOTSUPP; 2848 2849 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc) 2850 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks); 2851 2852 return ret; 2853 } 2854 2855 static int smu_get_uclk_dpm_states(void *handle, 2856 unsigned int *clock_values_in_khz, 2857 unsigned int *num_states) 2858 { 2859 struct smu_context *smu = handle; 2860 int ret = 0; 2861 2862 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2863 return -EOPNOTSUPP; 2864 2865 if (smu->ppt_funcs->get_uclk_dpm_states) 2866 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states); 2867 2868 return ret; 2869 } 2870 2871 static enum amd_pm_state_type smu_get_current_power_state(void *handle) 2872 { 2873 struct smu_context *smu = handle; 2874 enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT; 2875 2876 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2877 return -EOPNOTSUPP; 2878 2879 if (smu->ppt_funcs->get_current_power_state) 2880 pm_state = smu->ppt_funcs->get_current_power_state(smu); 2881 2882 return pm_state; 2883 } 2884 2885 static int smu_get_dpm_clock_table(void *handle, 2886 struct dpm_clocks *clock_table) 2887 { 2888 struct smu_context *smu = handle; 2889 int ret = 0; 2890 2891 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2892 return -EOPNOTSUPP; 2893 2894 if (smu->ppt_funcs->get_dpm_clock_table) 2895 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table); 2896 2897 return ret; 2898 } 2899 2900 static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table) 2901 { 2902 struct smu_context *smu = handle; 2903 2904 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2905 return -EOPNOTSUPP; 2906 2907 if (!smu->ppt_funcs->get_gpu_metrics) 2908 return -EOPNOTSUPP; 2909 2910 return smu->ppt_funcs->get_gpu_metrics(smu, table); 2911 } 2912 2913 static int smu_enable_mgpu_fan_boost(void *handle) 2914 { 2915 struct smu_context *smu = handle; 2916 int ret = 0; 2917 2918 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2919 return -EOPNOTSUPP; 2920 2921 if (smu->ppt_funcs->enable_mgpu_fan_boost) 2922 ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu); 2923 2924 return ret; 2925 } 2926 2927 static int smu_gfx_state_change_set(void *handle, 2928 uint32_t state) 2929 { 2930 struct smu_context *smu = handle; 2931 int ret = 0; 2932 2933 if (smu->ppt_funcs->gfx_state_change_set) 2934 ret = smu->ppt_funcs->gfx_state_change_set(smu, state); 2935 2936 return ret; 2937 } 2938 2939 int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable) 2940 { 2941 int ret = 0; 2942 2943 if (smu->ppt_funcs->smu_handle_passthrough_sbr) 2944 ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable); 2945 2946 return ret; 2947 } 2948 2949 int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc) 2950 { 2951 int ret = -EOPNOTSUPP; 2952 2953 if (smu->ppt_funcs && 2954 smu->ppt_funcs->get_ecc_info) 2955 ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc); 2956 2957 return ret; 2958 2959 } 2960 2961 static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size) 2962 { 2963 struct smu_context *smu = handle; 2964 struct smu_table_context *smu_table = &smu->smu_table; 2965 struct smu_table *memory_pool = &smu_table->memory_pool; 2966 2967 if (!addr || !size) 2968 return -EINVAL; 2969 2970 *addr = NULL; 2971 *size = 0; 2972 if (memory_pool->bo) { 2973 *addr = memory_pool->cpu_addr; 2974 *size = memory_pool->size; 2975 } 2976 2977 return 0; 2978 } 2979 2980 static const struct amd_pm_funcs swsmu_pm_funcs = { 2981 /* export for sysfs */ 2982 .set_fan_control_mode = smu_set_fan_control_mode, 2983 .get_fan_control_mode = smu_get_fan_control_mode, 2984 .set_fan_speed_pwm = smu_set_fan_speed_pwm, 2985 .get_fan_speed_pwm = smu_get_fan_speed_pwm, 2986 .force_clock_level = smu_force_ppclk_levels, 2987 .print_clock_levels = smu_print_ppclk_levels, 2988 .emit_clock_levels = smu_emit_ppclk_levels, 2989 .force_performance_level = smu_force_performance_level, 2990 .read_sensor = smu_read_sensor, 2991 .get_performance_level = smu_get_performance_level, 2992 .get_current_power_state = smu_get_current_power_state, 2993 .get_fan_speed_rpm = smu_get_fan_speed_rpm, 2994 .set_fan_speed_rpm = smu_set_fan_speed_rpm, 2995 .get_pp_num_states = smu_get_power_num_states, 2996 .get_pp_table = smu_sys_get_pp_table, 2997 .set_pp_table = smu_sys_set_pp_table, 2998 .switch_power_profile = smu_switch_power_profile, 2999 /* export to amdgpu */ 3000 .dispatch_tasks = smu_handle_dpm_task, 3001 .load_firmware = smu_load_microcode, 3002 .set_powergating_by_smu = smu_dpm_set_power_gate, 3003 .set_power_limit = smu_set_power_limit, 3004 .get_power_limit = smu_get_power_limit, 3005 .get_power_profile_mode = smu_get_power_profile_mode, 3006 .set_power_profile_mode = smu_set_power_profile_mode, 3007 .odn_edit_dpm_table = smu_od_edit_dpm_table, 3008 .set_mp1_state = smu_set_mp1_state, 3009 .gfx_state_change_set = smu_gfx_state_change_set, 3010 /* export to DC */ 3011 .get_sclk = smu_get_sclk, 3012 .get_mclk = smu_get_mclk, 3013 .display_configuration_change = smu_display_configuration_change, 3014 .get_clock_by_type_with_latency = smu_get_clock_by_type_with_latency, 3015 .display_clock_voltage_request = smu_display_clock_voltage_request, 3016 .enable_mgpu_fan_boost = smu_enable_mgpu_fan_boost, 3017 .set_active_display_count = smu_set_display_count, 3018 .set_min_deep_sleep_dcefclk = smu_set_deep_sleep_dcefclk, 3019 .get_asic_baco_capability = smu_get_baco_capability, 3020 .set_asic_baco_state = smu_baco_set_state, 3021 .get_ppfeature_status = smu_sys_get_pp_feature_mask, 3022 .set_ppfeature_status = smu_sys_set_pp_feature_mask, 3023 .asic_reset_mode_2 = smu_mode2_reset, 3024 .set_df_cstate = smu_set_df_cstate, 3025 .set_xgmi_pstate = smu_set_xgmi_pstate, 3026 .get_gpu_metrics = smu_sys_get_gpu_metrics, 3027 .set_watermarks_for_clock_ranges = smu_set_watermarks_for_clock_ranges, 3028 .display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch, 3029 .get_max_sustainable_clocks_by_dc = smu_get_max_sustainable_clocks_by_dc, 3030 .get_uclk_dpm_states = smu_get_uclk_dpm_states, 3031 .get_dpm_clock_table = smu_get_dpm_clock_table, 3032 .get_smu_prv_buf_details = smu_get_prv_buffer_details, 3033 }; 3034 3035 int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event, 3036 uint64_t event_arg) 3037 { 3038 int ret = -EINVAL; 3039 3040 if (smu->ppt_funcs->wait_for_event) 3041 ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg); 3042 3043 return ret; 3044 } 3045 3046 int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size) 3047 { 3048 3049 if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled) 3050 return -EOPNOTSUPP; 3051 3052 /* Confirm the buffer allocated is of correct size */ 3053 if (size != smu->stb_context.stb_buf_size) 3054 return -EINVAL; 3055 3056 /* 3057 * No need to lock smu mutex as we access STB directly through MMIO 3058 * and not going through SMU messaging route (for now at least). 3059 * For registers access rely on implementation internal locking. 3060 */ 3061 return smu->ppt_funcs->stb_collect_info(smu, buf, size); 3062 } 3063 3064 #if defined(CONFIG_DEBUG_FS) 3065 3066 static int smu_stb_debugfs_open(struct inode *inode, struct file *filp) 3067 { 3068 struct amdgpu_device *adev = filp->f_inode->i_private; 3069 struct smu_context *smu = adev->powerplay.pp_handle; 3070 unsigned char *buf; 3071 int r; 3072 3073 buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL); 3074 if (!buf) 3075 return -ENOMEM; 3076 3077 r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size); 3078 if (r) 3079 goto out; 3080 3081 filp->private_data = buf; 3082 3083 return 0; 3084 3085 out: 3086 kvfree(buf); 3087 return r; 3088 } 3089 3090 static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t size, 3091 loff_t *pos) 3092 { 3093 struct amdgpu_device *adev = filp->f_inode->i_private; 3094 struct smu_context *smu = adev->powerplay.pp_handle; 3095 3096 3097 if (!filp->private_data) 3098 return -EINVAL; 3099 3100 return simple_read_from_buffer(buf, 3101 size, 3102 pos, filp->private_data, 3103 smu->stb_context.stb_buf_size); 3104 } 3105 3106 static int smu_stb_debugfs_release(struct inode *inode, struct file *filp) 3107 { 3108 kvfree(filp->private_data); 3109 filp->private_data = NULL; 3110 3111 return 0; 3112 } 3113 3114 /* 3115 * We have to define not only read method but also 3116 * open and release because .read takes up to PAGE_SIZE 3117 * data each time so and so is invoked multiple times. 3118 * We allocate the STB buffer in .open and release it 3119 * in .release 3120 */ 3121 static const struct file_operations smu_stb_debugfs_fops = { 3122 .owner = THIS_MODULE, 3123 .open = smu_stb_debugfs_open, 3124 .read = smu_stb_debugfs_read, 3125 .release = smu_stb_debugfs_release, 3126 .llseek = default_llseek, 3127 }; 3128 3129 #endif 3130 3131 void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev) 3132 { 3133 #if defined(CONFIG_DEBUG_FS) 3134 3135 struct smu_context *smu = adev->powerplay.pp_handle; 3136 3137 if (!smu || (!smu->stb_context.stb_buf_size)) 3138 return; 3139 3140 debugfs_create_file_size("amdgpu_smu_stb_dump", 3141 S_IRUSR, 3142 adev_to_drm(adev)->primary->debugfs_root, 3143 adev, 3144 &smu_stb_debugfs_fops, 3145 smu->stb_context.stb_buf_size); 3146 #endif 3147 } 3148 3149 int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size) 3150 { 3151 int ret = 0; 3152 3153 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num) 3154 ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size); 3155 3156 return ret; 3157 } 3158 3159 int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size) 3160 { 3161 int ret = 0; 3162 3163 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_channel_flag) 3164 ret = smu->ppt_funcs->send_hbm_bad_channel_flag(smu, size); 3165 3166 return ret; 3167 } 3168