Lines Matching refs:smu

193 static int vangogh_tables_init(struct smu_context *smu)  in vangogh_tables_init()  argument
195 struct smu_table_context *smu_table = &smu->smu_table; in vangogh_tables_init()
197 struct amdgpu_device *adev = smu->adev; in vangogh_tables_init()
201 ret = smu_cmn_get_smc_version(smu, &if_version, NULL); in vangogh_tables_init()
254 static int vangogh_get_legacy_smu_metrics_data(struct smu_context *smu, in vangogh_get_legacy_smu_metrics_data() argument
258 struct smu_table_context *smu_table = &smu->smu_table; in vangogh_get_legacy_smu_metrics_data()
262 mutex_lock(&smu->metrics_lock); in vangogh_get_legacy_smu_metrics_data()
264 ret = smu_cmn_get_metrics_table_locked(smu, in vangogh_get_legacy_smu_metrics_data()
268 mutex_unlock(&smu->metrics_lock); in vangogh_get_legacy_smu_metrics_data()
317 smu->cpu_core_num * sizeof(uint16_t)); in vangogh_get_legacy_smu_metrics_data()
324 mutex_unlock(&smu->metrics_lock); in vangogh_get_legacy_smu_metrics_data()
329 static int vangogh_get_smu_metrics_data(struct smu_context *smu, in vangogh_get_smu_metrics_data() argument
333 struct smu_table_context *smu_table = &smu->smu_table; in vangogh_get_smu_metrics_data()
337 mutex_lock(&smu->metrics_lock); in vangogh_get_smu_metrics_data()
339 ret = smu_cmn_get_metrics_table_locked(smu, in vangogh_get_smu_metrics_data()
343 mutex_unlock(&smu->metrics_lock); in vangogh_get_smu_metrics_data()
392 smu->cpu_core_num * sizeof(uint16_t)); in vangogh_get_smu_metrics_data()
399 mutex_unlock(&smu->metrics_lock); in vangogh_get_smu_metrics_data()
404 static int vangogh_common_get_smu_metrics_data(struct smu_context *smu, in vangogh_common_get_smu_metrics_data() argument
408 struct amdgpu_device *adev = smu->adev; in vangogh_common_get_smu_metrics_data()
412 ret = smu_cmn_get_smc_version(smu, &if_version, NULL); in vangogh_common_get_smu_metrics_data()
419 ret = vangogh_get_legacy_smu_metrics_data(smu, member, value); in vangogh_common_get_smu_metrics_data()
421 ret = vangogh_get_smu_metrics_data(smu, member, value); in vangogh_common_get_smu_metrics_data()
426 static int vangogh_allocate_dpm_context(struct smu_context *smu) in vangogh_allocate_dpm_context() argument
428 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; in vangogh_allocate_dpm_context()
440 static int vangogh_init_smc_tables(struct smu_context *smu) in vangogh_init_smc_tables() argument
444 ret = vangogh_tables_init(smu); in vangogh_init_smc_tables()
448 ret = vangogh_allocate_dpm_context(smu); in vangogh_init_smc_tables()
454 smu->cpu_core_num = boot_cpu_data.x86_max_cores; in vangogh_init_smc_tables()
456 smu->cpu_core_num = 4; in vangogh_init_smc_tables()
459 return smu_v11_0_init_smc_tables(smu); in vangogh_init_smc_tables()
462 static int vangogh_dpm_set_vcn_enable(struct smu_context *smu, bool enable) in vangogh_dpm_set_vcn_enable() argument
468 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL); in vangogh_dpm_set_vcn_enable()
472 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL); in vangogh_dpm_set_vcn_enable()
480 static int vangogh_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) in vangogh_dpm_set_jpeg_enable() argument
485 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL); in vangogh_dpm_set_jpeg_enable()
489 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL); in vangogh_dpm_set_jpeg_enable()
497 static bool vangogh_is_dpm_running(struct smu_context *smu) in vangogh_is_dpm_running() argument
499 struct amdgpu_device *adev = smu->adev; in vangogh_is_dpm_running()
508 ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2); in vangogh_is_dpm_running()
519 static int vangogh_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type clk_type, in vangogh_get_dpm_clk_limited() argument
522 DpmClocks_t *clk_table = smu->smu_table.clocks_table; in vangogh_get_dpm_clk_limited()
562 static int vangogh_print_legacy_clk_levels(struct smu_context *smu, in vangogh_print_legacy_clk_levels() argument
565 DpmClocks_t *clk_table = smu->smu_table.clocks_table; in vangogh_print_legacy_clk_levels()
567 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); in vangogh_print_legacy_clk_levels()
574 ret = smu_cmn_get_metrics_table(smu, &metrics, false); in vangogh_print_legacy_clk_levels()
583 …(smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_fr… in vangogh_print_legacy_clk_levels()
585 …(smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_fr… in vangogh_print_legacy_clk_levels()
590 size = sprintf(buf, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); in vangogh_print_legacy_clk_levels()
592 …(smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_fr… in vangogh_print_legacy_clk_levels()
594 …(smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_fr… in vangogh_print_legacy_clk_levels()
601 smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); in vangogh_print_legacy_clk_levels()
603 smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq); in vangogh_print_legacy_clk_levels()
625 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value); in vangogh_print_legacy_clk_levels()
640 ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value); in vangogh_print_legacy_clk_levels()
661 static int vangogh_print_clk_levels(struct smu_context *smu, in vangogh_print_clk_levels() argument
664 DpmClocks_t *clk_table = smu->smu_table.clocks_table; in vangogh_print_clk_levels()
666 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); in vangogh_print_clk_levels()
673 ret = smu_cmn_get_metrics_table(smu, &metrics, false); in vangogh_print_clk_levels()
682 …(smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_fr… in vangogh_print_clk_levels()
684 …(smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_fr… in vangogh_print_clk_levels()
689 size = sprintf(buf, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); in vangogh_print_clk_levels()
691 …(smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_fr… in vangogh_print_clk_levels()
693 …(smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_fr… in vangogh_print_clk_levels()
700 smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); in vangogh_print_clk_levels()
702 smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq); in vangogh_print_clk_levels()
724 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value); in vangogh_print_clk_levels()
739 ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value); in vangogh_print_clk_levels()
760 static int vangogh_common_print_clk_levels(struct smu_context *smu, in vangogh_common_print_clk_levels() argument
763 struct amdgpu_device *adev = smu->adev; in vangogh_common_print_clk_levels()
767 ret = smu_cmn_get_smc_version(smu, &if_version, NULL); in vangogh_common_print_clk_levels()
774 ret = vangogh_print_legacy_clk_levels(smu, clk_type, buf); in vangogh_common_print_clk_levels()
776 ret = vangogh_print_clk_levels(smu, clk_type, buf); in vangogh_common_print_clk_levels()
781 static int vangogh_get_profiling_clk_mask(struct smu_context *smu, in vangogh_get_profiling_clk_mask() argument
789 DpmClocks_t *clk_table = smu->smu_table.clocks_table; in vangogh_get_profiling_clk_mask()
835 static bool vangogh_clk_dpm_is_enabled(struct smu_context *smu, in vangogh_clk_dpm_is_enabled() argument
861 if (!smu_cmn_feature_is_enabled(smu, feature_id)) in vangogh_clk_dpm_is_enabled()
867 static int vangogh_get_dpm_ultimate_freq(struct smu_context *smu, in vangogh_get_dpm_ultimate_freq() argument
880 if (!vangogh_clk_dpm_is_enabled(smu, clk_type)) { in vangogh_get_dpm_ultimate_freq()
884 clock_limit = smu->smu_table.boot_values.uclk; in vangogh_get_dpm_ultimate_freq()
887 clock_limit = smu->smu_table.boot_values.fclk; in vangogh_get_dpm_ultimate_freq()
891 clock_limit = smu->smu_table.boot_values.gfxclk; in vangogh_get_dpm_ultimate_freq()
894 clock_limit = smu->smu_table.boot_values.socclk; in vangogh_get_dpm_ultimate_freq()
897 clock_limit = smu->smu_table.boot_values.vclk; in vangogh_get_dpm_ultimate_freq()
900 clock_limit = smu->smu_table.boot_values.dclk; in vangogh_get_dpm_ultimate_freq()
916 ret = vangogh_get_profiling_clk_mask(smu, in vangogh_get_dpm_ultimate_freq()
929 ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, max); in vangogh_get_dpm_ultimate_freq()
934 ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, max); in vangogh_get_dpm_ultimate_freq()
939 ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, max); in vangogh_get_dpm_ultimate_freq()
944 ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, max); in vangogh_get_dpm_ultimate_freq()
949 ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, max); in vangogh_get_dpm_ultimate_freq()
962 ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, min); in vangogh_get_dpm_ultimate_freq()
967 ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, min); in vangogh_get_dpm_ultimate_freq()
972 ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, min); in vangogh_get_dpm_ultimate_freq()
977 ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, min); in vangogh_get_dpm_ultimate_freq()
982 ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, min); in vangogh_get_dpm_ultimate_freq()
995 static int vangogh_get_power_profile_mode(struct smu_context *smu, in vangogh_get_power_profile_mode() argument
1017 workload_type = smu_cmn_to_asic_specific_index(smu, in vangogh_get_power_profile_mode()
1025 i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); in vangogh_get_power_profile_mode()
1031 static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) in vangogh_set_power_profile_mode() argument
1037 dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode); in vangogh_set_power_profile_mode()
1046 workload_type = smu_cmn_to_asic_specific_index(smu, in vangogh_set_power_profile_mode()
1050 dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on VANGOGH\n", in vangogh_set_power_profile_mode()
1055 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, in vangogh_set_power_profile_mode()
1059 dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", in vangogh_set_power_profile_mode()
1064 smu->power_profile_mode = profile_mode; in vangogh_set_power_profile_mode()
1069 static int vangogh_set_soft_freq_limited_range(struct smu_context *smu, in vangogh_set_soft_freq_limited_range() argument
1076 if (!vangogh_clk_dpm_is_enabled(smu, clk_type)) in vangogh_set_soft_freq_limited_range()
1082 ret = smu_cmn_send_smc_msg_with_param(smu, in vangogh_set_soft_freq_limited_range()
1088 ret = smu_cmn_send_smc_msg_with_param(smu, in vangogh_set_soft_freq_limited_range()
1095 ret = smu_cmn_send_smc_msg_with_param(smu, in vangogh_set_soft_freq_limited_range()
1101 ret = smu_cmn_send_smc_msg_with_param(smu, in vangogh_set_soft_freq_limited_range()
1108 ret = smu_cmn_send_smc_msg_with_param(smu, in vangogh_set_soft_freq_limited_range()
1114 ret = smu_cmn_send_smc_msg_with_param(smu, in vangogh_set_soft_freq_limited_range()
1121 ret = smu_cmn_send_smc_msg_with_param(smu, in vangogh_set_soft_freq_limited_range()
1126 ret = smu_cmn_send_smc_msg_with_param(smu, in vangogh_set_soft_freq_limited_range()
1133 ret = smu_cmn_send_smc_msg_with_param(smu, in vangogh_set_soft_freq_limited_range()
1138 ret = smu_cmn_send_smc_msg_with_param(smu, in vangogh_set_soft_freq_limited_range()
1151 static int vangogh_force_clk_levels(struct smu_context *smu, in vangogh_force_clk_levels() argument
1163 ret = vangogh_get_dpm_clk_limited(smu, clk_type, in vangogh_force_clk_levels()
1167 ret = vangogh_get_dpm_clk_limited(smu, clk_type, in vangogh_force_clk_levels()
1171 ret = smu_cmn_send_smc_msg_with_param(smu, in vangogh_force_clk_levels()
1176 ret = smu_cmn_send_smc_msg_with_param(smu, in vangogh_force_clk_levels()
1183 ret = vangogh_get_dpm_clk_limited(smu, in vangogh_force_clk_levels()
1187 ret = vangogh_get_dpm_clk_limited(smu, in vangogh_force_clk_levels()
1191 ret = smu_cmn_send_smc_msg_with_param(smu, in vangogh_force_clk_levels()
1196 ret = smu_cmn_send_smc_msg_with_param(smu, in vangogh_force_clk_levels()
1203 ret = vangogh_get_dpm_clk_limited(smu, in vangogh_force_clk_levels()
1208 ret = vangogh_get_dpm_clk_limited(smu, in vangogh_force_clk_levels()
1214 ret = smu_cmn_send_smc_msg_with_param(smu, in vangogh_force_clk_levels()
1220 ret = smu_cmn_send_smc_msg_with_param(smu, in vangogh_force_clk_levels()
1228 ret = vangogh_get_dpm_clk_limited(smu, in vangogh_force_clk_levels()
1233 ret = vangogh_get_dpm_clk_limited(smu, in vangogh_force_clk_levels()
1238 ret = smu_cmn_send_smc_msg_with_param(smu, in vangogh_force_clk_levels()
1244 ret = smu_cmn_send_smc_msg_with_param(smu, in vangogh_force_clk_levels()
1258 static int vangogh_force_dpm_limit_value(struct smu_context *smu, bool highest) in vangogh_force_dpm_limit_value() argument
1273 ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq); in vangogh_force_dpm_limit_value()
1278 ret = vangogh_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq); in vangogh_force_dpm_limit_value()
1286 static int vangogh_unforce_dpm_levels(struct smu_context *smu) in vangogh_unforce_dpm_levels() argument
1304 if (!smu_cmn_feature_is_enabled(smu, clk_feature_map[i].feature)) in vangogh_unforce_dpm_levels()
1309 ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq); in vangogh_unforce_dpm_levels()
1314 ret = vangogh_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); in vangogh_unforce_dpm_levels()
1323 static int vangogh_set_peak_clock_by_device(struct smu_context *smu) in vangogh_set_peak_clock_by_device() argument
1329 ret = vangogh_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_freq); in vangogh_set_peak_clock_by_device()
1333 ret = vangogh_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_freq, fclk_freq); in vangogh_set_peak_clock_by_device()
1337 ret = vangogh_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_freq); in vangogh_set_peak_clock_by_device()
1341 ret = vangogh_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_freq, socclk_freq); in vangogh_set_peak_clock_by_device()
1345 ret = vangogh_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &vclk_freq); in vangogh_set_peak_clock_by_device()
1349 ret = vangogh_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_freq, vclk_freq); in vangogh_set_peak_clock_by_device()
1353 ret = vangogh_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &dclk_freq); in vangogh_set_peak_clock_by_device()
1357 ret = vangogh_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_freq, dclk_freq); in vangogh_set_peak_clock_by_device()
1364 static int vangogh_set_performance_level(struct smu_context *smu, in vangogh_set_performance_level() argument
1373 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; in vangogh_set_performance_level()
1374 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; in vangogh_set_performance_level()
1376 smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; in vangogh_set_performance_level()
1377 smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; in vangogh_set_performance_level()
1379 ret = vangogh_force_dpm_limit_value(smu, true); in vangogh_set_performance_level()
1382 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; in vangogh_set_performance_level()
1383 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; in vangogh_set_performance_level()
1385 smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; in vangogh_set_performance_level()
1386 smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; in vangogh_set_performance_level()
1388 ret = vangogh_force_dpm_limit_value(smu, false); in vangogh_set_performance_level()
1391 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; in vangogh_set_performance_level()
1392 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; in vangogh_set_performance_level()
1394 smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; in vangogh_set_performance_level()
1395 smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; in vangogh_set_performance_level()
1397 ret = vangogh_unforce_dpm_levels(smu); in vangogh_set_performance_level()
1400 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; in vangogh_set_performance_level()
1401 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; in vangogh_set_performance_level()
1403 smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; in vangogh_set_performance_level()
1404 smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; in vangogh_set_performance_level()
1406 ret = smu_cmn_send_smc_msg_with_param(smu, in vangogh_set_performance_level()
1412 ret = smu_cmn_send_smc_msg_with_param(smu, in vangogh_set_performance_level()
1418 ret = vangogh_get_profiling_clk_mask(smu, level, in vangogh_set_performance_level()
1427 vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask); in vangogh_set_performance_level()
1428 vangogh_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask); in vangogh_set_performance_level()
1429 vangogh_force_clk_levels(smu, SMU_VCLK, 1 << vclk_mask); in vangogh_set_performance_level()
1430 vangogh_force_clk_levels(smu, SMU_DCLK, 1 << dclk_mask); in vangogh_set_performance_level()
1434 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; in vangogh_set_performance_level()
1435 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; in vangogh_set_performance_level()
1437 smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; in vangogh_set_performance_level()
1438 smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; in vangogh_set_performance_level()
1440 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, in vangogh_set_performance_level()
1445 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, in vangogh_set_performance_level()
1451 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; in vangogh_set_performance_level()
1452 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; in vangogh_set_performance_level()
1454 smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; in vangogh_set_performance_level()
1455 smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; in vangogh_set_performance_level()
1457 ret = vangogh_get_profiling_clk_mask(smu, level, in vangogh_set_performance_level()
1466 vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask); in vangogh_set_performance_level()
1469 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; in vangogh_set_performance_level()
1470 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; in vangogh_set_performance_level()
1472 smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; in vangogh_set_performance_level()
1473 smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; in vangogh_set_performance_level()
1475 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, in vangogh_set_performance_level()
1480 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, in vangogh_set_performance_level()
1485 ret = vangogh_set_peak_clock_by_device(smu); in vangogh_set_performance_level()
1495 static int vangogh_read_sensor(struct smu_context *smu, in vangogh_read_sensor() argument
1504 mutex_lock(&smu->sensor_lock); in vangogh_read_sensor()
1507 ret = vangogh_common_get_smu_metrics_data(smu, in vangogh_read_sensor()
1513 ret = vangogh_common_get_smu_metrics_data(smu, in vangogh_read_sensor()
1519 ret = vangogh_common_get_smu_metrics_data(smu, in vangogh_read_sensor()
1525 ret = vangogh_common_get_smu_metrics_data(smu, in vangogh_read_sensor()
1531 ret = vangogh_common_get_smu_metrics_data(smu, in vangogh_read_sensor()
1538 ret = vangogh_common_get_smu_metrics_data(smu, in vangogh_read_sensor()
1545 ret = vangogh_common_get_smu_metrics_data(smu, in vangogh_read_sensor()
1551 ret = vangogh_common_get_smu_metrics_data(smu, in vangogh_read_sensor()
1557 ret = vangogh_common_get_smu_metrics_data(smu, in vangogh_read_sensor()
1560 *size = smu->cpu_core_num * sizeof(uint16_t); in vangogh_read_sensor()
1566 mutex_unlock(&smu->sensor_lock); in vangogh_read_sensor()
1571 static int vangogh_set_watermarks_table(struct smu_context *smu, in vangogh_set_watermarks_table() argument
1576 Watermarks_t *table = smu->smu_table.watermarks_table; in vangogh_set_watermarks_table()
1614 smu->watermarks_bitmap |= WATERMARKS_EXIST; in vangogh_set_watermarks_table()
1618 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && in vangogh_set_watermarks_table()
1619 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) { in vangogh_set_watermarks_table()
1620 ret = smu_cmn_write_watermarks_table(smu); in vangogh_set_watermarks_table()
1622 dev_err(smu->adev->dev, "Failed to update WMTABLE!"); in vangogh_set_watermarks_table()
1625 smu->watermarks_bitmap |= WATERMARKS_LOADED; in vangogh_set_watermarks_table()
1631 static ssize_t vangogh_get_legacy_gpu_metrics(struct smu_context *smu, in vangogh_get_legacy_gpu_metrics() argument
1634 struct smu_table_context *smu_table = &smu->smu_table; in vangogh_get_legacy_gpu_metrics()
1640 ret = smu_cmn_get_metrics_table(smu, &metrics, true); in vangogh_get_legacy_gpu_metrics()
1685 static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu, in vangogh_get_gpu_metrics() argument
1688 struct smu_table_context *smu_table = &smu->smu_table; in vangogh_get_gpu_metrics()
1694 ret = smu_cmn_get_metrics_table(smu, &metrics, true); in vangogh_get_gpu_metrics()
1746 static ssize_t vangogh_common_get_gpu_metrics(struct smu_context *smu, in vangogh_common_get_gpu_metrics() argument
1749 struct amdgpu_device *adev = smu->adev; in vangogh_common_get_gpu_metrics()
1753 ret = smu_cmn_get_smc_version(smu, &if_version, NULL); in vangogh_common_get_gpu_metrics()
1760 ret = vangogh_get_legacy_gpu_metrics(smu, table); in vangogh_common_get_gpu_metrics()
1762 ret = vangogh_get_gpu_metrics(smu, table); in vangogh_common_get_gpu_metrics()
1767 static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, in vangogh_od_edit_dpm_table() argument
1771 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); in vangogh_od_edit_dpm_table()
1774 dev_warn(smu->adev->dev, in vangogh_od_edit_dpm_table()
1782 dev_err(smu->adev->dev, "Input parameter number not correct (should be 4 for processor)\n"); in vangogh_od_edit_dpm_table()
1785 if (input[0] >= smu->cpu_core_num) { in vangogh_od_edit_dpm_table()
1786 dev_err(smu->adev->dev, "core index is overflow, should be less than %d\n", in vangogh_od_edit_dpm_table()
1787 smu->cpu_core_num); in vangogh_od_edit_dpm_table()
1789 smu->cpu_core_id_select = input[0]; in vangogh_od_edit_dpm_table()
1791 if (input[2] < smu->cpu_default_soft_min_freq) { in vangogh_od_edit_dpm_table()
1792 …dev_warn(smu->adev->dev, "Fine grain setting minimum cclk (%ld) MHz is less than the minimum allow… in vangogh_od_edit_dpm_table()
1793 input[2], smu->cpu_default_soft_min_freq); in vangogh_od_edit_dpm_table()
1796 smu->cpu_actual_soft_min_freq = input[2]; in vangogh_od_edit_dpm_table()
1798 if (input[2] > smu->cpu_default_soft_max_freq) { in vangogh_od_edit_dpm_table()
1799 …dev_warn(smu->adev->dev, "Fine grain setting maximum cclk (%ld) MHz is greater than the maximum al… in vangogh_od_edit_dpm_table()
1800 input[2], smu->cpu_default_soft_max_freq); in vangogh_od_edit_dpm_table()
1803 smu->cpu_actual_soft_max_freq = input[2]; in vangogh_od_edit_dpm_table()
1810 dev_err(smu->adev->dev, "Input parameter number not correct\n"); in vangogh_od_edit_dpm_table()
1815 if (input[1] < smu->gfx_default_hard_min_freq) { in vangogh_od_edit_dpm_table()
1816 dev_warn(smu->adev->dev, in vangogh_od_edit_dpm_table()
1818 input[1], smu->gfx_default_hard_min_freq); in vangogh_od_edit_dpm_table()
1821 smu->gfx_actual_hard_min_freq = input[1]; in vangogh_od_edit_dpm_table()
1823 if (input[1] > smu->gfx_default_soft_max_freq) { in vangogh_od_edit_dpm_table()
1824 dev_warn(smu->adev->dev, in vangogh_od_edit_dpm_table()
1826 input[1], smu->gfx_default_soft_max_freq); in vangogh_od_edit_dpm_table()
1829 smu->gfx_actual_soft_max_freq = input[1]; in vangogh_od_edit_dpm_table()
1836 dev_err(smu->adev->dev, "Input parameter number not correct\n"); in vangogh_od_edit_dpm_table()
1839 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; in vangogh_od_edit_dpm_table()
1840 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; in vangogh_od_edit_dpm_table()
1841 smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; in vangogh_od_edit_dpm_table()
1842 smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; in vangogh_od_edit_dpm_table()
1847 dev_err(smu->adev->dev, "Input parameter number not correct\n"); in vangogh_od_edit_dpm_table()
1850 if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) { in vangogh_od_edit_dpm_table()
1851 dev_err(smu->adev->dev, in vangogh_od_edit_dpm_table()
1853 smu->gfx_actual_hard_min_freq, in vangogh_od_edit_dpm_table()
1854 smu->gfx_actual_soft_max_freq); in vangogh_od_edit_dpm_table()
1858 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, in vangogh_od_edit_dpm_table()
1859 smu->gfx_actual_hard_min_freq, NULL); in vangogh_od_edit_dpm_table()
1861 dev_err(smu->adev->dev, "Set hard min sclk failed!"); in vangogh_od_edit_dpm_table()
1865 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, in vangogh_od_edit_dpm_table()
1866 smu->gfx_actual_soft_max_freq, NULL); in vangogh_od_edit_dpm_table()
1868 dev_err(smu->adev->dev, "Set soft max sclk failed!"); in vangogh_od_edit_dpm_table()
1872 if (smu->adev->pm.fw_version < 0x43f1b00) { in vangogh_od_edit_dpm_table()
1873 dev_warn(smu->adev->dev, "CPUSoftMax/CPUSoftMin are not supported, please update SBIOS!\n"); in vangogh_od_edit_dpm_table()
1877 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk, in vangogh_od_edit_dpm_table()
1878 ((smu->cpu_core_id_select << 20) in vangogh_od_edit_dpm_table()
1879 | smu->cpu_actual_soft_min_freq), in vangogh_od_edit_dpm_table()
1882 dev_err(smu->adev->dev, "Set hard min cclk failed!"); in vangogh_od_edit_dpm_table()
1886 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk, in vangogh_od_edit_dpm_table()
1887 ((smu->cpu_core_id_select << 20) in vangogh_od_edit_dpm_table()
1888 | smu->cpu_actual_soft_max_freq), in vangogh_od_edit_dpm_table()
1891 dev_err(smu->adev->dev, "Set soft max cclk failed!"); in vangogh_od_edit_dpm_table()
1903 static int vangogh_set_default_dpm_tables(struct smu_context *smu) in vangogh_set_default_dpm_tables() argument
1905 struct smu_table_context *smu_table = &smu->smu_table; in vangogh_set_default_dpm_tables()
1907 return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false); in vangogh_set_default_dpm_tables()
1910 static int vangogh_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) in vangogh_set_fine_grain_gfx_freq_parameters() argument
1912 DpmClocks_t *clk_table = smu->smu_table.clocks_table; in vangogh_set_fine_grain_gfx_freq_parameters()
1914 smu->gfx_default_hard_min_freq = clk_table->MinGfxClk; in vangogh_set_fine_grain_gfx_freq_parameters()
1915 smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk; in vangogh_set_fine_grain_gfx_freq_parameters()
1916 smu->gfx_actual_hard_min_freq = 0; in vangogh_set_fine_grain_gfx_freq_parameters()
1917 smu->gfx_actual_soft_max_freq = 0; in vangogh_set_fine_grain_gfx_freq_parameters()
1919 smu->cpu_default_soft_min_freq = 1400; in vangogh_set_fine_grain_gfx_freq_parameters()
1920 smu->cpu_default_soft_max_freq = 3500; in vangogh_set_fine_grain_gfx_freq_parameters()
1921 smu->cpu_actual_soft_min_freq = 0; in vangogh_set_fine_grain_gfx_freq_parameters()
1922 smu->cpu_actual_soft_max_freq = 0; in vangogh_set_fine_grain_gfx_freq_parameters()
1927 static int vangogh_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks *clock_table) in vangogh_get_dpm_clock_table() argument
1929 DpmClocks_t *table = smu->smu_table.clocks_table; in vangogh_get_dpm_clock_table()
1954 static int vangogh_system_features_control(struct smu_context *smu, bool en) in vangogh_system_features_control() argument
1956 struct amdgpu_device *adev = smu->adev; in vangogh_system_features_control()
1957 struct smu_feature *feature = &smu->smu_feature; in vangogh_system_features_control()
1962 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RlcPowerNotify, in vangogh_system_features_control()
1971 ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2); in vangogh_system_features_control()
1983 static int vangogh_post_smu_init(struct smu_context *smu) in vangogh_post_smu_init() argument
1985 struct amdgpu_device *adev = smu->adev; in vangogh_post_smu_init()
1995 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_GFXCLK_BIT) && in vangogh_post_smu_init()
1997 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxOff, NULL); in vangogh_post_smu_init()
2025 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RequestActiveWgp, req_active_wgps, NULL); in vangogh_post_smu_init()
2029 static int vangogh_mode_reset(struct smu_context *smu, int type) in vangogh_mode_reset() argument
2033 index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, in vangogh_mode_reset()
2038 mutex_lock(&smu->message_lock); in vangogh_mode_reset()
2040 ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, type); in vangogh_mode_reset()
2042 mutex_unlock(&smu->message_lock); in vangogh_mode_reset()
2049 static int vangogh_mode2_reset(struct smu_context *smu) in vangogh_mode2_reset() argument
2051 return vangogh_mode_reset(smu, SMU_RESET_MODE_2); in vangogh_mode2_reset()
2054 static int vangogh_get_power_limit(struct smu_context *smu) in vangogh_get_power_limit() argument
2057 smu->smu_power.power_context; in vangogh_get_power_limit()
2061 if (smu->adev->pm.fw_version < 0x43f1e00) in vangogh_get_power_limit()
2064 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSlowPPTLimit, &ppt_limit); in vangogh_get_power_limit()
2066 dev_err(smu->adev->dev, "Get slow PPT limit failed!\n"); in vangogh_get_power_limit()
2070 smu->current_power_limit = smu->default_power_limit = ppt_limit / 1000; in vangogh_get_power_limit()
2071 smu->max_power_limit = 29; in vangogh_get_power_limit()
2073 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetFastPPTLimit, &ppt_limit); in vangogh_get_power_limit()
2075 dev_err(smu->adev->dev, "Get fast PPT limit failed!\n"); in vangogh_get_power_limit()
2086 static int vangogh_get_ppt_limit(struct smu_context *smu, in vangogh_get_ppt_limit() argument
2092 smu->smu_power.power_context; in vangogh_get_ppt_limit()
2116 static int vangogh_set_power_limit(struct smu_context *smu, uint32_t ppt_limit) in vangogh_set_power_limit() argument
2119 smu->smu_power.power_context; in vangogh_set_power_limit()
2123 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { in vangogh_set_power_limit()
2124 dev_err(smu->adev->dev, "Setting new power limit is not supported!\n"); in vangogh_set_power_limit()
2130 ret = smu_cmn_send_smc_msg_with_param(smu, in vangogh_set_power_limit()
2137 smu->current_power_limit = ppt_limit; in vangogh_set_power_limit()
2142 dev_err(smu->adev->dev, in vangogh_set_power_limit()
2148 ret = smu_cmn_send_smc_msg_with_param(smu, in vangogh_set_power_limit()
2206 void vangogh_set_ppt_funcs(struct smu_context *smu) in vangogh_set_ppt_funcs() argument
2208 smu->ppt_funcs = &vangogh_ppt_funcs; in vangogh_set_ppt_funcs()
2209 smu->message_map = vangogh_message_map; in vangogh_set_ppt_funcs()
2210 smu->feature_map = vangogh_feature_mask_map; in vangogh_set_ppt_funcs()
2211 smu->table_map = vangogh_table_map; in vangogh_set_ppt_funcs()
2212 smu->workload_map = vangogh_workload_map; in vangogh_set_ppt_funcs()
2213 smu->is_apu = true; in vangogh_set_ppt_funcs()