1 /*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #define SWSMU_CODE_LAYER_L1
24
25 #include <linux/firmware.h>
26 #include <linux/pci.h>
27 #include <linux/power_supply.h>
28 #include <linux/reboot.h>
29
30 #include "amdgpu.h"
31 #include "amdgpu_smu.h"
32 #include "smu_internal.h"
33 #include "atom.h"
34 #include "arcturus_ppt.h"
35 #include "navi10_ppt.h"
36 #include "sienna_cichlid_ppt.h"
37 #include "renoir_ppt.h"
38 #include "vangogh_ppt.h"
39 #include "aldebaran_ppt.h"
40 #include "yellow_carp_ppt.h"
41 #include "cyan_skillfish_ppt.h"
42 #include "smu_v13_0_0_ppt.h"
43 #include "smu_v13_0_4_ppt.h"
44 #include "smu_v13_0_5_ppt.h"
45 #include "smu_v13_0_6_ppt.h"
46 #include "smu_v13_0_7_ppt.h"
47 #include "smu_v14_0_0_ppt.h"
48 #include "smu_v14_0_2_ppt.h"
49 #include "amd_pcie.h"
50
51 /*
52 * DO NOT use these for err/warn/info/debug messages.
53 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
54 * They are more MGPU friendly.
55 */
56 #undef pr_err
57 #undef pr_warn
58 #undef pr_info
59 #undef pr_debug
60
61 static const struct amd_pm_funcs swsmu_pm_funcs;
62 static int smu_force_smuclk_levels(struct smu_context *smu,
63 enum smu_clk_type clk_type,
64 uint32_t mask);
65 static int smu_handle_task(struct smu_context *smu,
66 enum amd_dpm_forced_level level,
67 enum amd_pp_task task_id);
68 static int smu_reset(struct smu_context *smu);
69 static int smu_set_fan_speed_pwm(void *handle, u32 speed);
70 static int smu_set_fan_control_mode(void *handle, u32 value);
71 static int smu_set_power_limit(void *handle, uint32_t limit);
72 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
73 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
74 static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state);
75
smu_sys_get_pp_feature_mask(void * handle,char * buf)76 static int smu_sys_get_pp_feature_mask(void *handle,
77 char *buf)
78 {
79 struct smu_context *smu = handle;
80
81 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
82 return -EOPNOTSUPP;
83
84 return smu_get_pp_feature_mask(smu, buf);
85 }
86
smu_sys_set_pp_feature_mask(void * handle,uint64_t new_mask)87 static int smu_sys_set_pp_feature_mask(void *handle,
88 uint64_t new_mask)
89 {
90 struct smu_context *smu = handle;
91
92 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
93 return -EOPNOTSUPP;
94
95 return smu_set_pp_feature_mask(smu, new_mask);
96 }
97
smu_set_residency_gfxoff(struct smu_context * smu,bool value)98 int smu_set_residency_gfxoff(struct smu_context *smu, bool value)
99 {
100 if (!smu->ppt_funcs->set_gfx_off_residency)
101 return -EINVAL;
102
103 return smu_set_gfx_off_residency(smu, value);
104 }
105
smu_get_residency_gfxoff(struct smu_context * smu,u32 * value)106 int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value)
107 {
108 if (!smu->ppt_funcs->get_gfx_off_residency)
109 return -EINVAL;
110
111 return smu_get_gfx_off_residency(smu, value);
112 }
113
smu_get_entrycount_gfxoff(struct smu_context * smu,u64 * value)114 int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value)
115 {
116 if (!smu->ppt_funcs->get_gfx_off_entrycount)
117 return -EINVAL;
118
119 return smu_get_gfx_off_entrycount(smu, value);
120 }
121
smu_get_status_gfxoff(struct smu_context * smu,uint32_t * value)122 int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
123 {
124 if (!smu->ppt_funcs->get_gfx_off_status)
125 return -EINVAL;
126
127 *value = smu_get_gfx_off_status(smu);
128
129 return 0;
130 }
131
smu_set_soft_freq_range(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t min,uint32_t max)132 int smu_set_soft_freq_range(struct smu_context *smu,
133 enum smu_clk_type clk_type,
134 uint32_t min,
135 uint32_t max)
136 {
137 int ret = 0;
138
139 if (smu->ppt_funcs->set_soft_freq_limited_range)
140 ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
141 clk_type,
142 min,
143 max);
144
145 return ret;
146 }
147
smu_get_dpm_freq_range(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t * min,uint32_t * max)148 int smu_get_dpm_freq_range(struct smu_context *smu,
149 enum smu_clk_type clk_type,
150 uint32_t *min,
151 uint32_t *max)
152 {
153 int ret = -ENOTSUPP;
154
155 if (!min && !max)
156 return -EINVAL;
157
158 if (smu->ppt_funcs->get_dpm_ultimate_freq)
159 ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
160 clk_type,
161 min,
162 max);
163
164 return ret;
165 }
166
smu_set_gfx_power_up_by_imu(struct smu_context * smu)167 int smu_set_gfx_power_up_by_imu(struct smu_context *smu)
168 {
169 int ret = 0;
170 struct amdgpu_device *adev = smu->adev;
171
172 if (smu->ppt_funcs->set_gfx_power_up_by_imu) {
173 ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
174 if (ret)
175 dev_err(adev->dev, "Failed to enable gfx imu!\n");
176 }
177 return ret;
178 }
179
smu_get_mclk(void * handle,bool low)180 static u32 smu_get_mclk(void *handle, bool low)
181 {
182 struct smu_context *smu = handle;
183 uint32_t clk_freq;
184 int ret = 0;
185
186 ret = smu_get_dpm_freq_range(smu, SMU_UCLK,
187 low ? &clk_freq : NULL,
188 !low ? &clk_freq : NULL);
189 if (ret)
190 return 0;
191 return clk_freq * 100;
192 }
193
smu_get_sclk(void * handle,bool low)194 static u32 smu_get_sclk(void *handle, bool low)
195 {
196 struct smu_context *smu = handle;
197 uint32_t clk_freq;
198 int ret = 0;
199
200 ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK,
201 low ? &clk_freq : NULL,
202 !low ? &clk_freq : NULL);
203 if (ret)
204 return 0;
205 return clk_freq * 100;
206 }
207
smu_set_gfx_imu_enable(struct smu_context * smu)208 static int smu_set_gfx_imu_enable(struct smu_context *smu)
209 {
210 struct amdgpu_device *adev = smu->adev;
211
212 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
213 return 0;
214
215 if (amdgpu_in_reset(smu->adev) || adev->in_s0ix)
216 return 0;
217
218 return smu_set_gfx_power_up_by_imu(smu);
219 }
220
is_vcn_enabled(struct amdgpu_device * adev)221 static bool is_vcn_enabled(struct amdgpu_device *adev)
222 {
223 int i;
224
225 for (i = 0; i < adev->num_ip_blocks; i++) {
226 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_VCN ||
227 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_JPEG) &&
228 !adev->ip_blocks[i].status.valid)
229 return false;
230 }
231
232 return true;
233 }
234
smu_dpm_set_vcn_enable(struct smu_context * smu,bool enable)235 static int smu_dpm_set_vcn_enable(struct smu_context *smu,
236 bool enable)
237 {
238 struct smu_power_context *smu_power = &smu->smu_power;
239 struct smu_power_gate *power_gate = &smu_power->power_gate;
240 int ret = 0;
241
242 /*
243 * don't poweron vcn/jpeg when they are skipped.
244 */
245 if (!is_vcn_enabled(smu->adev))
246 return 0;
247
248 if (!smu->ppt_funcs->dpm_set_vcn_enable)
249 return 0;
250
251 if (atomic_read(&power_gate->vcn_gated) ^ enable)
252 return 0;
253
254 ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable);
255 if (!ret)
256 atomic_set(&power_gate->vcn_gated, !enable);
257
258 return ret;
259 }
260
smu_dpm_set_jpeg_enable(struct smu_context * smu,bool enable)261 static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
262 bool enable)
263 {
264 struct smu_power_context *smu_power = &smu->smu_power;
265 struct smu_power_gate *power_gate = &smu_power->power_gate;
266 int ret = 0;
267
268 if (!is_vcn_enabled(smu->adev))
269 return 0;
270
271 if (!smu->ppt_funcs->dpm_set_jpeg_enable)
272 return 0;
273
274 if (atomic_read(&power_gate->jpeg_gated) ^ enable)
275 return 0;
276
277 ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable);
278 if (!ret)
279 atomic_set(&power_gate->jpeg_gated, !enable);
280
281 return ret;
282 }
283
smu_dpm_set_vpe_enable(struct smu_context * smu,bool enable)284 static int smu_dpm_set_vpe_enable(struct smu_context *smu,
285 bool enable)
286 {
287 struct smu_power_context *smu_power = &smu->smu_power;
288 struct smu_power_gate *power_gate = &smu_power->power_gate;
289 int ret = 0;
290
291 if (!smu->ppt_funcs->dpm_set_vpe_enable)
292 return 0;
293
294 if (atomic_read(&power_gate->vpe_gated) ^ enable)
295 return 0;
296
297 ret = smu->ppt_funcs->dpm_set_vpe_enable(smu, enable);
298 if (!ret)
299 atomic_set(&power_gate->vpe_gated, !enable);
300
301 return ret;
302 }
303
smu_dpm_set_umsch_mm_enable(struct smu_context * smu,bool enable)304 static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu,
305 bool enable)
306 {
307 struct smu_power_context *smu_power = &smu->smu_power;
308 struct smu_power_gate *power_gate = &smu_power->power_gate;
309 int ret = 0;
310
311 if (!smu->adev->enable_umsch_mm)
312 return 0;
313
314 if (!smu->ppt_funcs->dpm_set_umsch_mm_enable)
315 return 0;
316
317 if (atomic_read(&power_gate->umsch_mm_gated) ^ enable)
318 return 0;
319
320 ret = smu->ppt_funcs->dpm_set_umsch_mm_enable(smu, enable);
321 if (!ret)
322 atomic_set(&power_gate->umsch_mm_gated, !enable);
323
324 return ret;
325 }
326
327 /**
328 * smu_dpm_set_power_gate - power gate/ungate the specific IP block
329 *
330 * @handle: smu_context pointer
331 * @block_type: the IP block to power gate/ungate
332 * @gate: to power gate if true, ungate otherwise
333 *
334 * This API uses no smu->mutex lock protection due to:
335 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
336 * This is guarded to be race condition free by the caller.
337 * 2. Or get called on user setting request of power_dpm_force_performance_level.
338 * Under this case, the smu->mutex lock protection is already enforced on
339 * the parent API smu_force_performance_level of the call path.
340 */
smu_dpm_set_power_gate(void * handle,uint32_t block_type,bool gate)341 static int smu_dpm_set_power_gate(void *handle,
342 uint32_t block_type,
343 bool gate)
344 {
345 struct smu_context *smu = handle;
346 int ret = 0;
347
348 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) {
349 dev_WARN(smu->adev->dev,
350 "SMU uninitialized but power %s requested for %u!\n",
351 gate ? "gate" : "ungate", block_type);
352 return -EOPNOTSUPP;
353 }
354
355 switch (block_type) {
356 /*
357 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses
358 * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept.
359 */
360 case AMD_IP_BLOCK_TYPE_UVD:
361 case AMD_IP_BLOCK_TYPE_VCN:
362 ret = smu_dpm_set_vcn_enable(smu, !gate);
363 if (ret)
364 dev_err(smu->adev->dev, "Failed to power %s VCN!\n",
365 gate ? "gate" : "ungate");
366 break;
367 case AMD_IP_BLOCK_TYPE_GFX:
368 ret = smu_gfx_off_control(smu, gate);
369 if (ret)
370 dev_err(smu->adev->dev, "Failed to %s gfxoff!\n",
371 gate ? "enable" : "disable");
372 break;
373 case AMD_IP_BLOCK_TYPE_SDMA:
374 ret = smu_powergate_sdma(smu, gate);
375 if (ret)
376 dev_err(smu->adev->dev, "Failed to power %s SDMA!\n",
377 gate ? "gate" : "ungate");
378 break;
379 case AMD_IP_BLOCK_TYPE_JPEG:
380 ret = smu_dpm_set_jpeg_enable(smu, !gate);
381 if (ret)
382 dev_err(smu->adev->dev, "Failed to power %s JPEG!\n",
383 gate ? "gate" : "ungate");
384 break;
385 case AMD_IP_BLOCK_TYPE_VPE:
386 ret = smu_dpm_set_vpe_enable(smu, !gate);
387 if (ret)
388 dev_err(smu->adev->dev, "Failed to power %s VPE!\n",
389 gate ? "gate" : "ungate");
390 break;
391 default:
392 dev_err(smu->adev->dev, "Unsupported block type!\n");
393 return -EINVAL;
394 }
395
396 return ret;
397 }
398
399 /**
400 * smu_set_user_clk_dependencies - set user profile clock dependencies
401 *
402 * @smu: smu_context pointer
403 * @clk: enum smu_clk_type type
404 *
405 * Enable/Disable the clock dependency for the @clk type.
406 */
smu_set_user_clk_dependencies(struct smu_context * smu,enum smu_clk_type clk)407 static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk)
408 {
409 if (smu->adev->in_suspend)
410 return;
411
412 if (clk == SMU_MCLK) {
413 smu->user_dpm_profile.clk_dependency = 0;
414 smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK);
415 } else if (clk == SMU_FCLK) {
416 /* MCLK takes precedence over FCLK */
417 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
418 return;
419
420 smu->user_dpm_profile.clk_dependency = 0;
421 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK);
422 } else if (clk == SMU_SOCCLK) {
423 /* MCLK takes precedence over SOCCLK */
424 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
425 return;
426
427 smu->user_dpm_profile.clk_dependency = 0;
428 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK);
429 } else
430 /* Add clk dependencies here, if any */
431 return;
432 }
433
434 /**
435 * smu_restore_dpm_user_profile - reinstate user dpm profile
436 *
437 * @smu: smu_context pointer
438 *
439 * Restore the saved user power configurations include power limit,
440 * clock frequencies, fan control mode and fan speed.
441 */
smu_restore_dpm_user_profile(struct smu_context * smu)442 static void smu_restore_dpm_user_profile(struct smu_context *smu)
443 {
444 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
445 int ret = 0;
446
447 if (!smu->adev->in_suspend)
448 return;
449
450 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
451 return;
452
453 /* Enable restore flag */
454 smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE;
455
456 /* set the user dpm power limit */
457 if (smu->user_dpm_profile.power_limit) {
458 ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit);
459 if (ret)
460 dev_err(smu->adev->dev, "Failed to set power limit value\n");
461 }
462
463 /* set the user dpm clock configurations */
464 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
465 enum smu_clk_type clk_type;
466
467 for (clk_type = 0; clk_type < SMU_CLK_COUNT; clk_type++) {
468 /*
469 * Iterate over smu clk type and force the saved user clk
470 * configs, skip if clock dependency is enabled
471 */
472 if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) &&
473 smu->user_dpm_profile.clk_mask[clk_type]) {
474 ret = smu_force_smuclk_levels(smu, clk_type,
475 smu->user_dpm_profile.clk_mask[clk_type]);
476 if (ret)
477 dev_err(smu->adev->dev,
478 "Failed to set clock type = %d\n", clk_type);
479 }
480 }
481 }
482
483 /* set the user dpm fan configurations */
484 if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL ||
485 smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) {
486 ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode);
487 if (ret != -EOPNOTSUPP) {
488 smu->user_dpm_profile.fan_speed_pwm = 0;
489 smu->user_dpm_profile.fan_speed_rpm = 0;
490 smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO;
491 dev_err(smu->adev->dev, "Failed to set manual fan control mode\n");
492 }
493
494 if (smu->user_dpm_profile.fan_speed_pwm) {
495 ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm);
496 if (ret != -EOPNOTSUPP)
497 dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n");
498 }
499
500 if (smu->user_dpm_profile.fan_speed_rpm) {
501 ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm);
502 if (ret != -EOPNOTSUPP)
503 dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n");
504 }
505 }
506
507 /* Restore user customized OD settings */
508 if (smu->user_dpm_profile.user_od) {
509 if (smu->ppt_funcs->restore_user_od_settings) {
510 ret = smu->ppt_funcs->restore_user_od_settings(smu);
511 if (ret)
512 dev_err(smu->adev->dev, "Failed to upload customized OD settings\n");
513 }
514 }
515
516 /* Disable restore flag */
517 smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE;
518 }
519
smu_get_power_num_states(void * handle,struct pp_states_info * state_info)520 static int smu_get_power_num_states(void *handle,
521 struct pp_states_info *state_info)
522 {
523 if (!state_info)
524 return -EINVAL;
525
526 /* not support power state */
527 memset(state_info, 0, sizeof(struct pp_states_info));
528 state_info->nums = 1;
529 state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
530
531 return 0;
532 }
533
is_support_sw_smu(struct amdgpu_device * adev)534 bool is_support_sw_smu(struct amdgpu_device *adev)
535 {
536 /* vega20 is 11.0.2, but it's supported via the powerplay code */
537 if (adev->asic_type == CHIP_VEGA20)
538 return false;
539
540 if (amdgpu_ip_version(adev, MP1_HWIP, 0) >= IP_VERSION(11, 0, 0))
541 return true;
542
543 return false;
544 }
545
is_support_cclk_dpm(struct amdgpu_device * adev)546 bool is_support_cclk_dpm(struct amdgpu_device *adev)
547 {
548 struct smu_context *smu = adev->powerplay.pp_handle;
549
550 if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT))
551 return false;
552
553 return true;
554 }
555
556
smu_sys_get_pp_table(void * handle,char ** table)557 static int smu_sys_get_pp_table(void *handle,
558 char **table)
559 {
560 struct smu_context *smu = handle;
561 struct smu_table_context *smu_table = &smu->smu_table;
562
563 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
564 return -EOPNOTSUPP;
565
566 if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
567 return -EINVAL;
568
569 if (smu_table->hardcode_pptable)
570 *table = smu_table->hardcode_pptable;
571 else
572 *table = smu_table->power_play_table;
573
574 return smu_table->power_play_table_size;
575 }
576
smu_sys_set_pp_table(void * handle,const char * buf,size_t size)577 static int smu_sys_set_pp_table(void *handle,
578 const char *buf,
579 size_t size)
580 {
581 struct smu_context *smu = handle;
582 struct smu_table_context *smu_table = &smu->smu_table;
583 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
584 int ret = 0;
585
586 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
587 return -EOPNOTSUPP;
588
589 if (header->usStructureSize != size) {
590 dev_err(smu->adev->dev, "pp table size not matched !\n");
591 return -EIO;
592 }
593
594 if (!smu_table->hardcode_pptable) {
595 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
596 if (!smu_table->hardcode_pptable)
597 return -ENOMEM;
598 }
599
600 memcpy(smu_table->hardcode_pptable, buf, size);
601 smu_table->power_play_table = smu_table->hardcode_pptable;
602 smu_table->power_play_table_size = size;
603
604 /*
605 * Special hw_fini action(for Navi1x, the DPMs disablement will be
606 * skipped) may be needed for custom pptable uploading.
607 */
608 smu->uploading_custom_pp_table = true;
609
610 ret = smu_reset(smu);
611 if (ret)
612 dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret);
613
614 smu->uploading_custom_pp_table = false;
615
616 return ret;
617 }
618
smu_get_driver_allowed_feature_mask(struct smu_context * smu)619 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
620 {
621 struct smu_feature *feature = &smu->smu_feature;
622 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
623 int ret = 0;
624
625 /*
626 * With SCPM enabled, the allowed featuremasks setting(via
627 * PPSMC_MSG_SetAllowedFeaturesMaskLow/High) is not permitted.
628 * That means there is no way to let PMFW knows the settings below.
629 * Thus, we just assume all the features are allowed under
630 * such scenario.
631 */
632 if (smu->adev->scpm_enabled) {
633 bitmap_fill(feature->allowed, SMU_FEATURE_MAX);
634 return 0;
635 }
636
637 bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
638
639 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
640 SMU_FEATURE_MAX/32);
641 if (ret)
642 return ret;
643
644 bitmap_or(feature->allowed, feature->allowed,
645 (unsigned long *)allowed_feature_mask,
646 feature->feature_num);
647
648 return ret;
649 }
650
smu_set_funcs(struct amdgpu_device * adev)651 static int smu_set_funcs(struct amdgpu_device *adev)
652 {
653 struct smu_context *smu = adev->powerplay.pp_handle;
654
655 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
656 smu->od_enabled = true;
657
658 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
659 case IP_VERSION(11, 0, 0):
660 case IP_VERSION(11, 0, 5):
661 case IP_VERSION(11, 0, 9):
662 navi10_set_ppt_funcs(smu);
663 break;
664 case IP_VERSION(11, 0, 7):
665 case IP_VERSION(11, 0, 11):
666 case IP_VERSION(11, 0, 12):
667 case IP_VERSION(11, 0, 13):
668 sienna_cichlid_set_ppt_funcs(smu);
669 break;
670 case IP_VERSION(12, 0, 0):
671 case IP_VERSION(12, 0, 1):
672 renoir_set_ppt_funcs(smu);
673 break;
674 case IP_VERSION(11, 5, 0):
675 vangogh_set_ppt_funcs(smu);
676 break;
677 case IP_VERSION(13, 0, 1):
678 case IP_VERSION(13, 0, 3):
679 case IP_VERSION(13, 0, 8):
680 yellow_carp_set_ppt_funcs(smu);
681 break;
682 case IP_VERSION(13, 0, 4):
683 case IP_VERSION(13, 0, 11):
684 smu_v13_0_4_set_ppt_funcs(smu);
685 break;
686 case IP_VERSION(13, 0, 5):
687 smu_v13_0_5_set_ppt_funcs(smu);
688 break;
689 case IP_VERSION(11, 0, 8):
690 cyan_skillfish_set_ppt_funcs(smu);
691 break;
692 case IP_VERSION(11, 0, 2):
693 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
694 arcturus_set_ppt_funcs(smu);
695 /* OD is not supported on Arcturus */
696 smu->od_enabled = false;
697 break;
698 case IP_VERSION(13, 0, 2):
699 aldebaran_set_ppt_funcs(smu);
700 /* Enable pp_od_clk_voltage node */
701 smu->od_enabled = true;
702 break;
703 case IP_VERSION(13, 0, 0):
704 case IP_VERSION(13, 0, 10):
705 smu_v13_0_0_set_ppt_funcs(smu);
706 break;
707 case IP_VERSION(13, 0, 6):
708 smu_v13_0_6_set_ppt_funcs(smu);
709 /* Enable pp_od_clk_voltage node */
710 smu->od_enabled = true;
711 break;
712 case IP_VERSION(13, 0, 7):
713 smu_v13_0_7_set_ppt_funcs(smu);
714 break;
715 case IP_VERSION(14, 0, 0):
716 case IP_VERSION(14, 0, 1):
717 smu_v14_0_0_set_ppt_funcs(smu);
718 break;
719 case IP_VERSION(14, 0, 2):
720 case IP_VERSION(14, 0, 3):
721 smu_v14_0_2_set_ppt_funcs(smu);
722 break;
723 default:
724 return -EINVAL;
725 }
726
727 return 0;
728 }
729
smu_early_init(void * handle)730 static int smu_early_init(void *handle)
731 {
732 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
733 struct smu_context *smu;
734 int r;
735
736 smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL);
737 if (!smu)
738 return -ENOMEM;
739
740 smu->adev = adev;
741 smu->pm_enabled = !!amdgpu_dpm;
742 smu->is_apu = false;
743 smu->smu_baco.state = SMU_BACO_STATE_NONE;
744 smu->smu_baco.platform_support = false;
745 smu->smu_baco.maco_support = false;
746 smu->user_dpm_profile.fan_mode = -1;
747
748 mutex_init(&smu->message_lock);
749
750 adev->powerplay.pp_handle = smu;
751 adev->powerplay.pp_funcs = &swsmu_pm_funcs;
752
753 r = smu_set_funcs(adev);
754 if (r)
755 return r;
756 return smu_init_microcode(smu);
757 }
758
smu_set_default_dpm_table(struct smu_context * smu)759 static int smu_set_default_dpm_table(struct smu_context *smu)
760 {
761 struct amdgpu_device *adev = smu->adev;
762 struct smu_power_context *smu_power = &smu->smu_power;
763 struct smu_power_gate *power_gate = &smu_power->power_gate;
764 int vcn_gate, jpeg_gate;
765 int ret = 0;
766
767 if (!smu->ppt_funcs->set_default_dpm_table)
768 return 0;
769
770 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
771 vcn_gate = atomic_read(&power_gate->vcn_gated);
772 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)
773 jpeg_gate = atomic_read(&power_gate->jpeg_gated);
774
775 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
776 ret = smu_dpm_set_vcn_enable(smu, true);
777 if (ret)
778 return ret;
779 }
780
781 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
782 ret = smu_dpm_set_jpeg_enable(smu, true);
783 if (ret)
784 goto err_out;
785 }
786
787 ret = smu->ppt_funcs->set_default_dpm_table(smu);
788 if (ret)
789 dev_err(smu->adev->dev,
790 "Failed to setup default dpm clock tables!\n");
791
792 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)
793 smu_dpm_set_jpeg_enable(smu, !jpeg_gate);
794 err_out:
795 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
796 smu_dpm_set_vcn_enable(smu, !vcn_gate);
797
798 return ret;
799 }
800
smu_apply_default_config_table_settings(struct smu_context * smu)801 static int smu_apply_default_config_table_settings(struct smu_context *smu)
802 {
803 struct amdgpu_device *adev = smu->adev;
804 int ret = 0;
805
806 ret = smu_get_default_config_table_settings(smu,
807 &adev->pm.config_table);
808 if (ret)
809 return ret;
810
811 return smu_set_config_table(smu, &adev->pm.config_table);
812 }
813
smu_late_init(void * handle)814 static int smu_late_init(void *handle)
815 {
816 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
817 struct smu_context *smu = adev->powerplay.pp_handle;
818 int ret = 0;
819
820 smu_set_fine_grain_gfx_freq_parameters(smu);
821
822 if (!smu->pm_enabled)
823 return 0;
824
825 ret = smu_post_init(smu);
826 if (ret) {
827 dev_err(adev->dev, "Failed to post smu init!\n");
828 return ret;
829 }
830
831 /*
832 * Explicitly notify PMFW the power mode the system in. Since
833 * the PMFW may boot the ASIC with a different mode.
834 * For those supporting ACDC switch via gpio, PMFW will
835 * handle the switch automatically. Driver involvement
836 * is unnecessary.
837 */
838 adev->pm.ac_power = power_supply_is_system_supplied() > 0;
839 smu_set_ac_dc(smu);
840
841 if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 1)) ||
842 (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 3)))
843 return 0;
844
845 if (!amdgpu_sriov_vf(adev) || smu->od_enabled) {
846 ret = smu_set_default_od_settings(smu);
847 if (ret) {
848 dev_err(adev->dev, "Failed to setup default OD settings!\n");
849 return ret;
850 }
851 }
852
853 ret = smu_populate_umd_state_clk(smu);
854 if (ret) {
855 dev_err(adev->dev, "Failed to populate UMD state clocks!\n");
856 return ret;
857 }
858
859 ret = smu_get_asic_power_limits(smu,
860 &smu->current_power_limit,
861 &smu->default_power_limit,
862 &smu->max_power_limit,
863 &smu->min_power_limit);
864 if (ret) {
865 dev_err(adev->dev, "Failed to get asic power limits!\n");
866 return ret;
867 }
868
869 if (!amdgpu_sriov_vf(adev))
870 smu_get_unique_id(smu);
871
872 smu_get_fan_parameters(smu);
873
874 smu_handle_task(smu,
875 smu->smu_dpm.dpm_level,
876 AMD_PP_TASK_COMPLETE_INIT);
877
878 ret = smu_apply_default_config_table_settings(smu);
879 if (ret && (ret != -EOPNOTSUPP)) {
880 dev_err(adev->dev, "Failed to apply default DriverSmuConfig settings!\n");
881 return ret;
882 }
883
884 smu_restore_dpm_user_profile(smu);
885
886 return 0;
887 }
888
smu_init_fb_allocations(struct smu_context * smu)889 static int smu_init_fb_allocations(struct smu_context *smu)
890 {
891 struct amdgpu_device *adev = smu->adev;
892 struct smu_table_context *smu_table = &smu->smu_table;
893 struct smu_table *tables = smu_table->tables;
894 struct smu_table *driver_table = &(smu_table->driver_table);
895 uint32_t max_table_size = 0;
896 int ret, i;
897
898 /* VRAM allocation for tool table */
899 if (tables[SMU_TABLE_PMSTATUSLOG].size) {
900 ret = amdgpu_bo_create_kernel(adev,
901 tables[SMU_TABLE_PMSTATUSLOG].size,
902 tables[SMU_TABLE_PMSTATUSLOG].align,
903 tables[SMU_TABLE_PMSTATUSLOG].domain,
904 &tables[SMU_TABLE_PMSTATUSLOG].bo,
905 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
906 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
907 if (ret) {
908 dev_err(adev->dev, "VRAM allocation for tool table failed!\n");
909 return ret;
910 }
911 }
912
913 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT;
914 /* VRAM allocation for driver table */
915 for (i = 0; i < SMU_TABLE_COUNT; i++) {
916 if (tables[i].size == 0)
917 continue;
918
919 /* If one of the tables has VRAM domain restriction, keep it in
920 * VRAM
921 */
922 if ((tables[i].domain &
923 (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) ==
924 AMDGPU_GEM_DOMAIN_VRAM)
925 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
926
927 if (i == SMU_TABLE_PMSTATUSLOG)
928 continue;
929
930 if (max_table_size < tables[i].size)
931 max_table_size = tables[i].size;
932 }
933
934 driver_table->size = max_table_size;
935 driver_table->align = PAGE_SIZE;
936
937 ret = amdgpu_bo_create_kernel(adev,
938 driver_table->size,
939 driver_table->align,
940 driver_table->domain,
941 &driver_table->bo,
942 &driver_table->mc_address,
943 &driver_table->cpu_addr);
944 if (ret) {
945 dev_err(adev->dev, "VRAM allocation for driver table failed!\n");
946 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
947 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
948 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
949 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
950 }
951
952 return ret;
953 }
954
smu_fini_fb_allocations(struct smu_context * smu)955 static int smu_fini_fb_allocations(struct smu_context *smu)
956 {
957 struct smu_table_context *smu_table = &smu->smu_table;
958 struct smu_table *tables = smu_table->tables;
959 struct smu_table *driver_table = &(smu_table->driver_table);
960
961 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
962 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
963 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
964 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
965
966 amdgpu_bo_free_kernel(&driver_table->bo,
967 &driver_table->mc_address,
968 &driver_table->cpu_addr);
969
970 return 0;
971 }
972
973 /**
974 * smu_alloc_memory_pool - allocate memory pool in the system memory
975 *
976 * @smu: amdgpu_device pointer
977 *
978 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
979 * and DramLogSetDramAddr can notify it changed.
980 *
981 * Returns 0 on success, error on failure.
982 */
smu_alloc_memory_pool(struct smu_context * smu)983 static int smu_alloc_memory_pool(struct smu_context *smu)
984 {
985 struct amdgpu_device *adev = smu->adev;
986 struct smu_table_context *smu_table = &smu->smu_table;
987 struct smu_table *memory_pool = &smu_table->memory_pool;
988 uint64_t pool_size = smu->pool_size;
989 int ret = 0;
990
991 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
992 return ret;
993
994 memory_pool->size = pool_size;
995 memory_pool->align = PAGE_SIZE;
996 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
997
998 switch (pool_size) {
999 case SMU_MEMORY_POOL_SIZE_256_MB:
1000 case SMU_MEMORY_POOL_SIZE_512_MB:
1001 case SMU_MEMORY_POOL_SIZE_1_GB:
1002 case SMU_MEMORY_POOL_SIZE_2_GB:
1003 ret = amdgpu_bo_create_kernel(adev,
1004 memory_pool->size,
1005 memory_pool->align,
1006 memory_pool->domain,
1007 &memory_pool->bo,
1008 &memory_pool->mc_address,
1009 &memory_pool->cpu_addr);
1010 if (ret)
1011 dev_err(adev->dev, "VRAM allocation for dramlog failed!\n");
1012 break;
1013 default:
1014 break;
1015 }
1016
1017 return ret;
1018 }
1019
smu_free_memory_pool(struct smu_context * smu)1020 static int smu_free_memory_pool(struct smu_context *smu)
1021 {
1022 struct smu_table_context *smu_table = &smu->smu_table;
1023 struct smu_table *memory_pool = &smu_table->memory_pool;
1024
1025 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
1026 return 0;
1027
1028 amdgpu_bo_free_kernel(&memory_pool->bo,
1029 &memory_pool->mc_address,
1030 &memory_pool->cpu_addr);
1031
1032 memset(memory_pool, 0, sizeof(struct smu_table));
1033
1034 return 0;
1035 }
1036
smu_alloc_dummy_read_table(struct smu_context * smu)1037 static int smu_alloc_dummy_read_table(struct smu_context *smu)
1038 {
1039 struct smu_table_context *smu_table = &smu->smu_table;
1040 struct smu_table *dummy_read_1_table =
1041 &smu_table->dummy_read_1_table;
1042 struct amdgpu_device *adev = smu->adev;
1043 int ret = 0;
1044
1045 if (!dummy_read_1_table->size)
1046 return 0;
1047
1048 ret = amdgpu_bo_create_kernel(adev,
1049 dummy_read_1_table->size,
1050 dummy_read_1_table->align,
1051 dummy_read_1_table->domain,
1052 &dummy_read_1_table->bo,
1053 &dummy_read_1_table->mc_address,
1054 &dummy_read_1_table->cpu_addr);
1055 if (ret)
1056 dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n");
1057
1058 return ret;
1059 }
1060
smu_free_dummy_read_table(struct smu_context * smu)1061 static void smu_free_dummy_read_table(struct smu_context *smu)
1062 {
1063 struct smu_table_context *smu_table = &smu->smu_table;
1064 struct smu_table *dummy_read_1_table =
1065 &smu_table->dummy_read_1_table;
1066
1067
1068 amdgpu_bo_free_kernel(&dummy_read_1_table->bo,
1069 &dummy_read_1_table->mc_address,
1070 &dummy_read_1_table->cpu_addr);
1071
1072 memset(dummy_read_1_table, 0, sizeof(struct smu_table));
1073 }
1074
smu_smc_table_sw_init(struct smu_context * smu)1075 static int smu_smc_table_sw_init(struct smu_context *smu)
1076 {
1077 int ret;
1078
1079 /**
1080 * Create smu_table structure, and init smc tables such as
1081 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
1082 */
1083 ret = smu_init_smc_tables(smu);
1084 if (ret) {
1085 dev_err(smu->adev->dev, "Failed to init smc tables!\n");
1086 return ret;
1087 }
1088
1089 /**
1090 * Create smu_power_context structure, and allocate smu_dpm_context and
1091 * context size to fill the smu_power_context data.
1092 */
1093 ret = smu_init_power(smu);
1094 if (ret) {
1095 dev_err(smu->adev->dev, "Failed to init smu_init_power!\n");
1096 return ret;
1097 }
1098
1099 /*
1100 * allocate vram bos to store smc table contents.
1101 */
1102 ret = smu_init_fb_allocations(smu);
1103 if (ret)
1104 return ret;
1105
1106 ret = smu_alloc_memory_pool(smu);
1107 if (ret)
1108 return ret;
1109
1110 ret = smu_alloc_dummy_read_table(smu);
1111 if (ret)
1112 return ret;
1113
1114 ret = smu_i2c_init(smu);
1115 if (ret)
1116 return ret;
1117
1118 return 0;
1119 }
1120
smu_smc_table_sw_fini(struct smu_context * smu)1121 static int smu_smc_table_sw_fini(struct smu_context *smu)
1122 {
1123 int ret;
1124
1125 smu_i2c_fini(smu);
1126
1127 smu_free_dummy_read_table(smu);
1128
1129 ret = smu_free_memory_pool(smu);
1130 if (ret)
1131 return ret;
1132
1133 ret = smu_fini_fb_allocations(smu);
1134 if (ret)
1135 return ret;
1136
1137 ret = smu_fini_power(smu);
1138 if (ret) {
1139 dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n");
1140 return ret;
1141 }
1142
1143 ret = smu_fini_smc_tables(smu);
1144 if (ret) {
1145 dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n");
1146 return ret;
1147 }
1148
1149 return 0;
1150 }
1151
smu_throttling_logging_work_fn(struct work_struct * work)1152 static void smu_throttling_logging_work_fn(struct work_struct *work)
1153 {
1154 struct smu_context *smu = container_of(work, struct smu_context,
1155 throttling_logging_work);
1156
1157 smu_log_thermal_throttling(smu);
1158 }
1159
smu_interrupt_work_fn(struct work_struct * work)1160 static void smu_interrupt_work_fn(struct work_struct *work)
1161 {
1162 struct smu_context *smu = container_of(work, struct smu_context,
1163 interrupt_work);
1164
1165 if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
1166 smu->ppt_funcs->interrupt_work(smu);
1167 }
1168
smu_swctf_delayed_work_handler(struct work_struct * work)1169 static void smu_swctf_delayed_work_handler(struct work_struct *work)
1170 {
1171 struct smu_context *smu =
1172 container_of(work, struct smu_context, swctf_delayed_work.work);
1173 struct smu_temperature_range *range =
1174 &smu->thermal_range;
1175 struct amdgpu_device *adev = smu->adev;
1176 uint32_t hotspot_tmp, size;
1177
1178 /*
1179 * If the hotspot temperature is confirmed as below SW CTF setting point
1180 * after the delay enforced, nothing will be done.
1181 * Otherwise, a graceful shutdown will be performed to prevent further damage.
1182 */
1183 if (range->software_shutdown_temp &&
1184 smu->ppt_funcs->read_sensor &&
1185 !smu->ppt_funcs->read_sensor(smu,
1186 AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
1187 &hotspot_tmp,
1188 &size) &&
1189 hotspot_tmp / 1000 < range->software_shutdown_temp)
1190 return;
1191
1192 dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
1193 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
1194 orderly_poweroff(true);
1195 }
1196
smu_init_xgmi_plpd_mode(struct smu_context * smu)1197 static void smu_init_xgmi_plpd_mode(struct smu_context *smu)
1198 {
1199 if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) {
1200 smu->plpd_mode = XGMI_PLPD_DEFAULT;
1201 return;
1202 }
1203
1204 /* PMFW put PLPD into default policy after enabling the feature */
1205 if (smu_feature_is_enabled(smu,
1206 SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT))
1207 smu->plpd_mode = XGMI_PLPD_DEFAULT;
1208 else
1209 smu->plpd_mode = XGMI_PLPD_NONE;
1210 }
1211
smu_sw_init(void * handle)1212 static int smu_sw_init(void *handle)
1213 {
1214 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1215 struct smu_context *smu = adev->powerplay.pp_handle;
1216 int ret;
1217
1218 smu->pool_size = adev->pm.smu_prv_buffer_size;
1219 smu->smu_feature.feature_num = SMU_FEATURE_MAX;
1220 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
1221 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
1222
1223 INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
1224 INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);
1225 atomic64_set(&smu->throttle_int_counter, 0);
1226 smu->watermarks_bitmap = 0;
1227 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1228 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1229
1230 atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
1231 atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
1232 atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
1233 atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
1234
1235 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
1236 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
1237 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
1238 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
1239 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
1240 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
1241 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
1242 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
1243
1244 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1245 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1246 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
1247 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
1248 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
1249 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
1250 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
1251 smu->display_config = &adev->pm.pm_display_cfg;
1252
1253 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1254 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1255
1256 INIT_DELAYED_WORK(&smu->swctf_delayed_work,
1257 smu_swctf_delayed_work_handler);
1258
1259 ret = smu_smc_table_sw_init(smu);
1260 if (ret) {
1261 dev_err(adev->dev, "Failed to sw init smc table!\n");
1262 return ret;
1263 }
1264
1265 /* get boot_values from vbios to set revision, gfxclk, and etc. */
1266 ret = smu_get_vbios_bootup_values(smu);
1267 if (ret) {
1268 dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n");
1269 return ret;
1270 }
1271
1272 ret = smu_init_pptable_microcode(smu);
1273 if (ret) {
1274 dev_err(adev->dev, "Failed to setup pptable firmware!\n");
1275 return ret;
1276 }
1277
1278 ret = smu_register_irq_handler(smu);
1279 if (ret) {
1280 dev_err(adev->dev, "Failed to register smc irq handler!\n");
1281 return ret;
1282 }
1283
1284 /* If there is no way to query fan control mode, fan control is not supported */
1285 if (!smu->ppt_funcs->get_fan_control_mode)
1286 smu->adev->pm.no_fan = true;
1287
1288 return 0;
1289 }
1290
smu_sw_fini(void * handle)1291 static int smu_sw_fini(void *handle)
1292 {
1293 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1294 struct smu_context *smu = adev->powerplay.pp_handle;
1295 int ret;
1296
1297 ret = smu_smc_table_sw_fini(smu);
1298 if (ret) {
1299 dev_err(adev->dev, "Failed to sw fini smc table!\n");
1300 return ret;
1301 }
1302
1303 smu_fini_microcode(smu);
1304
1305 return 0;
1306 }
1307
smu_get_thermal_temperature_range(struct smu_context * smu)1308 static int smu_get_thermal_temperature_range(struct smu_context *smu)
1309 {
1310 struct amdgpu_device *adev = smu->adev;
1311 struct smu_temperature_range *range =
1312 &smu->thermal_range;
1313 int ret = 0;
1314
1315 if (!smu->ppt_funcs->get_thermal_temperature_range)
1316 return 0;
1317
1318 ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range);
1319 if (ret)
1320 return ret;
1321
1322 adev->pm.dpm.thermal.min_temp = range->min;
1323 adev->pm.dpm.thermal.max_temp = range->max;
1324 adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max;
1325 adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min;
1326 adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max;
1327 adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max;
1328 adev->pm.dpm.thermal.min_mem_temp = range->mem_min;
1329 adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max;
1330 adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max;
1331
1332 return ret;
1333 }
1334
1335 /**
1336 * smu_wbrf_handle_exclusion_ranges - consume the wbrf exclusion ranges
1337 *
1338 * @smu: smu_context pointer
1339 *
1340 * Retrieve the wbrf exclusion ranges and send them to PMFW for proper handling.
1341 * Returns 0 on success, error on failure.
1342 */
smu_wbrf_handle_exclusion_ranges(struct smu_context * smu)1343 static int smu_wbrf_handle_exclusion_ranges(struct smu_context *smu)
1344 {
1345 struct wbrf_ranges_in_out wbrf_exclusion = {0};
1346 struct freq_band_range *wifi_bands = wbrf_exclusion.band_list;
1347 struct amdgpu_device *adev = smu->adev;
1348 uint32_t num_of_wbrf_ranges = MAX_NUM_OF_WBRF_RANGES;
1349 uint64_t start, end;
1350 int ret, i, j;
1351
1352 ret = amd_wbrf_retrieve_freq_band(adev->dev, &wbrf_exclusion);
1353 if (ret) {
1354 dev_err(adev->dev, "Failed to retrieve exclusion ranges!\n");
1355 return ret;
1356 }
1357
1358 /*
1359 * The exclusion ranges array we got might be filled with holes and duplicate
1360 * entries. For example:
1361 * {(2400, 2500), (0, 0), (6882, 6962), (2400, 2500), (0, 0), (6117, 6189), (0, 0)...}
1362 * We need to do some sortups to eliminate those holes and duplicate entries.
1363 * Expected output: {(2400, 2500), (6117, 6189), (6882, 6962), (0, 0)...}
1364 */
1365 for (i = 0; i < num_of_wbrf_ranges; i++) {
1366 start = wifi_bands[i].start;
1367 end = wifi_bands[i].end;
1368
1369 /* get the last valid entry to fill the intermediate hole */
1370 if (!start && !end) {
1371 for (j = num_of_wbrf_ranges - 1; j > i; j--)
1372 if (wifi_bands[j].start && wifi_bands[j].end)
1373 break;
1374
1375 /* no valid entry left */
1376 if (j <= i)
1377 break;
1378
1379 start = wifi_bands[i].start = wifi_bands[j].start;
1380 end = wifi_bands[i].end = wifi_bands[j].end;
1381 wifi_bands[j].start = 0;
1382 wifi_bands[j].end = 0;
1383 num_of_wbrf_ranges = j;
1384 }
1385
1386 /* eliminate duplicate entries */
1387 for (j = i + 1; j < num_of_wbrf_ranges; j++) {
1388 if ((wifi_bands[j].start == start) && (wifi_bands[j].end == end)) {
1389 wifi_bands[j].start = 0;
1390 wifi_bands[j].end = 0;
1391 }
1392 }
1393 }
1394
1395 /* Send the sorted wifi_bands to PMFW */
1396 ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands);
1397 /* Try to set the wifi_bands again */
1398 if (unlikely(ret == -EBUSY)) {
1399 mdelay(5);
1400 ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands);
1401 }
1402
1403 return ret;
1404 }
1405
1406 /**
1407 * smu_wbrf_event_handler - handle notify events
1408 *
1409 * @nb: notifier block
1410 * @action: event type
1411 * @_arg: event data
1412 *
1413 * Calls relevant amdgpu function in response to wbrf event
1414 * notification from kernel.
1415 */
smu_wbrf_event_handler(struct notifier_block * nb,unsigned long action,void * _arg)1416 static int smu_wbrf_event_handler(struct notifier_block *nb,
1417 unsigned long action, void *_arg)
1418 {
1419 struct smu_context *smu = container_of(nb, struct smu_context, wbrf_notifier);
1420
1421 switch (action) {
1422 case WBRF_CHANGED:
1423 schedule_delayed_work(&smu->wbrf_delayed_work,
1424 msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
1425 break;
1426 default:
1427 return NOTIFY_DONE;
1428 }
1429
1430 return NOTIFY_OK;
1431 }
1432
1433 /**
1434 * smu_wbrf_delayed_work_handler - callback on delayed work timer expired
1435 *
1436 * @work: struct work_struct pointer
1437 *
1438 * Flood is over and driver will consume the latest exclusion ranges.
1439 */
smu_wbrf_delayed_work_handler(struct work_struct * work)1440 static void smu_wbrf_delayed_work_handler(struct work_struct *work)
1441 {
1442 struct smu_context *smu = container_of(work, struct smu_context, wbrf_delayed_work.work);
1443
1444 smu_wbrf_handle_exclusion_ranges(smu);
1445 }
1446
1447 /**
1448 * smu_wbrf_support_check - check wbrf support
1449 *
1450 * @smu: smu_context pointer
1451 *
1452 * Verifies the ACPI interface whether wbrf is supported.
1453 */
smu_wbrf_support_check(struct smu_context * smu)1454 static void smu_wbrf_support_check(struct smu_context *smu)
1455 {
1456 struct amdgpu_device *adev = smu->adev;
1457
1458 smu->wbrf_supported = smu_is_asic_wbrf_supported(smu) && amdgpu_wbrf &&
1459 acpi_amd_wbrf_supported_consumer(adev->dev);
1460
1461 if (smu->wbrf_supported)
1462 dev_info(adev->dev, "RF interference mitigation is supported\n");
1463 }
1464
1465 /**
1466 * smu_wbrf_init - init driver wbrf support
1467 *
1468 * @smu: smu_context pointer
1469 *
1470 * Verifies the AMD ACPI interfaces and registers with the wbrf
1471 * notifier chain if wbrf feature is supported.
1472 * Returns 0 on success, error on failure.
1473 */
smu_wbrf_init(struct smu_context * smu)1474 static int smu_wbrf_init(struct smu_context *smu)
1475 {
1476 int ret;
1477
1478 if (!smu->wbrf_supported)
1479 return 0;
1480
1481 INIT_DELAYED_WORK(&smu->wbrf_delayed_work, smu_wbrf_delayed_work_handler);
1482
1483 smu->wbrf_notifier.notifier_call = smu_wbrf_event_handler;
1484 ret = amd_wbrf_register_notifier(&smu->wbrf_notifier);
1485 if (ret)
1486 return ret;
1487
1488 /*
1489 * Some wifiband exclusion ranges may be already there
1490 * before our driver loaded. To make sure our driver
1491 * is awared of those exclusion ranges.
1492 */
1493 schedule_delayed_work(&smu->wbrf_delayed_work,
1494 msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
1495
1496 return 0;
1497 }
1498
1499 /**
1500 * smu_wbrf_fini - tear down driver wbrf support
1501 *
1502 * @smu: smu_context pointer
1503 *
1504 * Unregisters with the wbrf notifier chain.
1505 */
smu_wbrf_fini(struct smu_context * smu)1506 static void smu_wbrf_fini(struct smu_context *smu)
1507 {
1508 if (!smu->wbrf_supported)
1509 return;
1510
1511 amd_wbrf_unregister_notifier(&smu->wbrf_notifier);
1512
1513 cancel_delayed_work_sync(&smu->wbrf_delayed_work);
1514 }
1515
smu_smc_hw_setup(struct smu_context * smu)1516 static int smu_smc_hw_setup(struct smu_context *smu)
1517 {
1518 struct smu_feature *feature = &smu->smu_feature;
1519 struct amdgpu_device *adev = smu->adev;
1520 uint8_t pcie_gen = 0, pcie_width = 0;
1521 uint64_t features_supported;
1522 int ret = 0;
1523
1524 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1525 case IP_VERSION(11, 0, 7):
1526 case IP_VERSION(11, 0, 11):
1527 case IP_VERSION(11, 5, 0):
1528 case IP_VERSION(11, 0, 12):
1529 if (adev->in_suspend && smu_is_dpm_running(smu)) {
1530 dev_info(adev->dev, "dpm has been enabled\n");
1531 ret = smu_system_features_control(smu, true);
1532 if (ret)
1533 dev_err(adev->dev, "Failed system features control!\n");
1534 return ret;
1535 }
1536 break;
1537 default:
1538 break;
1539 }
1540
1541 ret = smu_init_display_count(smu, 0);
1542 if (ret) {
1543 dev_info(adev->dev, "Failed to pre-set display count as 0!\n");
1544 return ret;
1545 }
1546
1547 ret = smu_set_driver_table_location(smu);
1548 if (ret) {
1549 dev_err(adev->dev, "Failed to SetDriverDramAddr!\n");
1550 return ret;
1551 }
1552
1553 /*
1554 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1555 */
1556 ret = smu_set_tool_table_location(smu);
1557 if (ret) {
1558 dev_err(adev->dev, "Failed to SetToolsDramAddr!\n");
1559 return ret;
1560 }
1561
1562 /*
1563 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1564 * pool location.
1565 */
1566 ret = smu_notify_memory_pool_location(smu);
1567 if (ret) {
1568 dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n");
1569 return ret;
1570 }
1571
1572 /*
1573 * It is assumed the pptable used before runpm is same as
1574 * the one used afterwards. Thus, we can reuse the stored
1575 * copy and do not need to resetup the pptable again.
1576 */
1577 if (!adev->in_runpm) {
1578 ret = smu_setup_pptable(smu);
1579 if (ret) {
1580 dev_err(adev->dev, "Failed to setup pptable!\n");
1581 return ret;
1582 }
1583 }
1584
1585 /* smu_dump_pptable(smu); */
1586
1587 /*
1588 * With SCPM enabled, PSP is responsible for the PPTable transferring
1589 * (to SMU). Driver involvement is not needed and permitted.
1590 */
1591 if (!adev->scpm_enabled) {
1592 /*
1593 * Copy pptable bo in the vram to smc with SMU MSGs such as
1594 * SetDriverDramAddr and TransferTableDram2Smu.
1595 */
1596 ret = smu_write_pptable(smu);
1597 if (ret) {
1598 dev_err(adev->dev, "Failed to transfer pptable to SMC!\n");
1599 return ret;
1600 }
1601 }
1602
1603 /* issue Run*Btc msg */
1604 ret = smu_run_btc(smu);
1605 if (ret)
1606 return ret;
1607
1608 /* Enable UclkShadow on wbrf supported */
1609 if (smu->wbrf_supported) {
1610 ret = smu_enable_uclk_shadow(smu, true);
1611 if (ret) {
1612 dev_err(adev->dev, "Failed to enable UclkShadow feature to support wbrf!\n");
1613 return ret;
1614 }
1615 }
1616
1617 /*
1618 * With SCPM enabled, these actions(and relevant messages) are
1619 * not needed and permitted.
1620 */
1621 if (!adev->scpm_enabled) {
1622 ret = smu_feature_set_allowed_mask(smu);
1623 if (ret) {
1624 dev_err(adev->dev, "Failed to set driver allowed features mask!\n");
1625 return ret;
1626 }
1627 }
1628
1629 ret = smu_system_features_control(smu, true);
1630 if (ret) {
1631 dev_err(adev->dev, "Failed to enable requested dpm features!\n");
1632 return ret;
1633 }
1634
1635 smu_init_xgmi_plpd_mode(smu);
1636
1637 ret = smu_feature_get_enabled_mask(smu, &features_supported);
1638 if (ret) {
1639 dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");
1640 return ret;
1641 }
1642 bitmap_copy(feature->supported,
1643 (unsigned long *)&features_supported,
1644 feature->feature_num);
1645
1646 if (!smu_is_dpm_running(smu))
1647 dev_info(adev->dev, "dpm has been disabled\n");
1648
1649 /*
1650 * Set initialized values (get from vbios) to dpm tables context such as
1651 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1652 * type of clks.
1653 */
1654 ret = smu_set_default_dpm_table(smu);
1655 if (ret) {
1656 dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
1657 return ret;
1658 }
1659
1660 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
1661 pcie_gen = 3;
1662 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
1663 pcie_gen = 2;
1664 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
1665 pcie_gen = 1;
1666 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
1667 pcie_gen = 0;
1668
1669 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
1670 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
1671 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
1672 */
1673 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
1674 pcie_width = 6;
1675 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
1676 pcie_width = 5;
1677 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
1678 pcie_width = 4;
1679 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
1680 pcie_width = 3;
1681 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
1682 pcie_width = 2;
1683 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
1684 pcie_width = 1;
1685 ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
1686 if (ret) {
1687 dev_err(adev->dev, "Attempt to override pcie params failed!\n");
1688 return ret;
1689 }
1690
1691 ret = smu_get_thermal_temperature_range(smu);
1692 if (ret) {
1693 dev_err(adev->dev, "Failed to get thermal temperature ranges!\n");
1694 return ret;
1695 }
1696
1697 ret = smu_enable_thermal_alert(smu);
1698 if (ret) {
1699 dev_err(adev->dev, "Failed to enable thermal alert!\n");
1700 return ret;
1701 }
1702
1703 ret = smu_notify_display_change(smu);
1704 if (ret) {
1705 dev_err(adev->dev, "Failed to notify display change!\n");
1706 return ret;
1707 }
1708
1709 /*
1710 * Set min deep sleep dce fclk with bootup value from vbios via
1711 * SetMinDeepSleepDcefclk MSG.
1712 */
1713 ret = smu_set_min_dcef_deep_sleep(smu,
1714 smu->smu_table.boot_values.dcefclk / 100);
1715 if (ret) {
1716 dev_err(adev->dev, "Error setting min deepsleep dcefclk\n");
1717 return ret;
1718 }
1719
1720 /* Init wbrf support. Properly setup the notifier */
1721 ret = smu_wbrf_init(smu);
1722 if (ret)
1723 dev_err(adev->dev, "Error during wbrf init call\n");
1724
1725 return ret;
1726 }
1727
smu_start_smc_engine(struct smu_context * smu)1728 static int smu_start_smc_engine(struct smu_context *smu)
1729 {
1730 struct amdgpu_device *adev = smu->adev;
1731 int ret = 0;
1732
1733 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1734 if (amdgpu_ip_version(adev, MP1_HWIP, 0) < IP_VERSION(11, 0, 0)) {
1735 if (smu->ppt_funcs->load_microcode) {
1736 ret = smu->ppt_funcs->load_microcode(smu);
1737 if (ret)
1738 return ret;
1739 }
1740 }
1741 }
1742
1743 if (smu->ppt_funcs->check_fw_status) {
1744 ret = smu->ppt_funcs->check_fw_status(smu);
1745 if (ret) {
1746 dev_err(adev->dev, "SMC is not ready\n");
1747 return ret;
1748 }
1749 }
1750
1751 /*
1752 * Send msg GetDriverIfVersion to check if the return value is equal
1753 * with DRIVER_IF_VERSION of smc header.
1754 */
1755 ret = smu_check_fw_version(smu);
1756 if (ret)
1757 return ret;
1758
1759 return ret;
1760 }
1761
smu_hw_init(void * handle)1762 static int smu_hw_init(void *handle)
1763 {
1764 int ret;
1765 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1766 struct smu_context *smu = adev->powerplay.pp_handle;
1767
1768 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
1769 smu->pm_enabled = false;
1770 return 0;
1771 }
1772
1773 ret = smu_start_smc_engine(smu);
1774 if (ret) {
1775 dev_err(adev->dev, "SMC engine is not correctly up!\n");
1776 return ret;
1777 }
1778
1779 /*
1780 * Check whether wbrf is supported. This needs to be done
1781 * before SMU setup starts since part of SMU configuration
1782 * relies on this.
1783 */
1784 smu_wbrf_support_check(smu);
1785
1786 if (smu->is_apu) {
1787 ret = smu_set_gfx_imu_enable(smu);
1788 if (ret)
1789 return ret;
1790 smu_dpm_set_vcn_enable(smu, true);
1791 smu_dpm_set_jpeg_enable(smu, true);
1792 smu_dpm_set_vpe_enable(smu, true);
1793 smu_dpm_set_umsch_mm_enable(smu, true);
1794 smu_set_gfx_cgpg(smu, true);
1795 }
1796
1797 if (!smu->pm_enabled)
1798 return 0;
1799
1800 ret = smu_get_driver_allowed_feature_mask(smu);
1801 if (ret)
1802 return ret;
1803
1804 ret = smu_smc_hw_setup(smu);
1805 if (ret) {
1806 dev_err(adev->dev, "Failed to setup smc hw!\n");
1807 return ret;
1808 }
1809
1810 /*
1811 * Move maximum sustainable clock retrieving here considering
1812 * 1. It is not needed on resume(from S3).
1813 * 2. DAL settings come between .hw_init and .late_init of SMU.
1814 * And DAL needs to know the maximum sustainable clocks. Thus
1815 * it cannot be put in .late_init().
1816 */
1817 ret = smu_init_max_sustainable_clocks(smu);
1818 if (ret) {
1819 dev_err(adev->dev, "Failed to init max sustainable clocks!\n");
1820 return ret;
1821 }
1822
1823 adev->pm.dpm_enabled = true;
1824
1825 dev_info(adev->dev, "SMU is initialized successfully!\n");
1826
1827 return 0;
1828 }
1829
smu_disable_dpms(struct smu_context * smu)1830 static int smu_disable_dpms(struct smu_context *smu)
1831 {
1832 struct amdgpu_device *adev = smu->adev;
1833 int ret = 0;
1834 bool use_baco = !smu->is_apu &&
1835 ((amdgpu_in_reset(adev) &&
1836 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
1837 ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
1838
1839 /*
1840 * For SMU 13.0.0 and 13.0.7, PMFW will handle the DPM features(disablement or others)
1841 * properly on suspend/reset/unload. Driver involvement may cause some unexpected issues.
1842 */
1843 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1844 case IP_VERSION(13, 0, 0):
1845 case IP_VERSION(13, 0, 7):
1846 case IP_VERSION(13, 0, 10):
1847 return 0;
1848 default:
1849 break;
1850 }
1851
1852 /*
1853 * For custom pptable uploading, skip the DPM features
1854 * disable process on Navi1x ASICs.
1855 * - As the gfx related features are under control of
1856 * RLC on those ASICs. RLC reinitialization will be
1857 * needed to reenable them. That will cost much more
1858 * efforts.
1859 *
1860 * - SMU firmware can handle the DPM reenablement
1861 * properly.
1862 */
1863 if (smu->uploading_custom_pp_table) {
1864 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1865 case IP_VERSION(11, 0, 0):
1866 case IP_VERSION(11, 0, 5):
1867 case IP_VERSION(11, 0, 9):
1868 case IP_VERSION(11, 0, 7):
1869 case IP_VERSION(11, 0, 11):
1870 case IP_VERSION(11, 5, 0):
1871 case IP_VERSION(11, 0, 12):
1872 case IP_VERSION(11, 0, 13):
1873 return 0;
1874 default:
1875 break;
1876 }
1877 }
1878
1879 /*
1880 * For Sienna_Cichlid, PMFW will handle the features disablement properly
1881 * on BACO in. Driver involvement is unnecessary.
1882 */
1883 if (use_baco) {
1884 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1885 case IP_VERSION(11, 0, 7):
1886 case IP_VERSION(11, 0, 0):
1887 case IP_VERSION(11, 0, 5):
1888 case IP_VERSION(11, 0, 9):
1889 case IP_VERSION(13, 0, 7):
1890 return 0;
1891 default:
1892 break;
1893 }
1894 }
1895
1896 /*
1897 * For SMU 13.0.4/11 and 14.0.0, PMFW will handle the features disablement properly
1898 * for gpu reset and S0i3 cases. Driver involvement is unnecessary.
1899 */
1900 if (amdgpu_in_reset(adev) || adev->in_s0ix) {
1901 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1902 case IP_VERSION(13, 0, 4):
1903 case IP_VERSION(13, 0, 11):
1904 case IP_VERSION(14, 0, 0):
1905 case IP_VERSION(14, 0, 1):
1906 return 0;
1907 default:
1908 break;
1909 }
1910 }
1911
1912 /*
1913 * For gpu reset, runpm and hibernation through BACO,
1914 * BACO feature has to be kept enabled.
1915 */
1916 if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
1917 ret = smu_disable_all_features_with_exception(smu,
1918 SMU_FEATURE_BACO_BIT);
1919 if (ret)
1920 dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
1921 } else {
1922 /* DisableAllSmuFeatures message is not permitted with SCPM enabled */
1923 if (!adev->scpm_enabled) {
1924 ret = smu_system_features_control(smu, false);
1925 if (ret)
1926 dev_err(adev->dev, "Failed to disable smu features.\n");
1927 }
1928 }
1929
1930 /* Notify SMU RLC is going to be off, stop RLC and SMU interaction.
1931 * otherwise SMU will hang while interacting with RLC if RLC is halted
1932 * this is a WA for Vangogh asic which fix the SMU hang issue.
1933 */
1934 ret = smu_notify_rlc_state(smu, false);
1935 if (ret) {
1936 dev_err(adev->dev, "Fail to notify rlc status!\n");
1937 return ret;
1938 }
1939
1940 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2) &&
1941 !((adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs) &&
1942 !amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->stop)
1943 adev->gfx.rlc.funcs->stop(adev);
1944
1945 return ret;
1946 }
1947
smu_smc_hw_cleanup(struct smu_context * smu)1948 static int smu_smc_hw_cleanup(struct smu_context *smu)
1949 {
1950 struct amdgpu_device *adev = smu->adev;
1951 int ret = 0;
1952
1953 smu_wbrf_fini(smu);
1954
1955 cancel_work_sync(&smu->throttling_logging_work);
1956 cancel_work_sync(&smu->interrupt_work);
1957
1958 ret = smu_disable_thermal_alert(smu);
1959 if (ret) {
1960 dev_err(adev->dev, "Fail to disable thermal alert!\n");
1961 return ret;
1962 }
1963
1964 cancel_delayed_work_sync(&smu->swctf_delayed_work);
1965
1966 ret = smu_disable_dpms(smu);
1967 if (ret) {
1968 dev_err(adev->dev, "Fail to disable dpm features!\n");
1969 return ret;
1970 }
1971
1972 return 0;
1973 }
1974
smu_reset_mp1_state(struct smu_context * smu)1975 static int smu_reset_mp1_state(struct smu_context *smu)
1976 {
1977 struct amdgpu_device *adev = smu->adev;
1978 int ret = 0;
1979
1980 if ((!adev->in_runpm) && (!adev->in_suspend) &&
1981 (!amdgpu_in_reset(adev)) && amdgpu_ip_version(adev, MP1_HWIP, 0) ==
1982 IP_VERSION(13, 0, 10) &&
1983 !amdgpu_device_has_display_hardware(adev))
1984 ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD);
1985
1986 return ret;
1987 }
1988
smu_hw_fini(void * handle)1989 static int smu_hw_fini(void *handle)
1990 {
1991 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1992 struct smu_context *smu = adev->powerplay.pp_handle;
1993 int ret;
1994
1995 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1996 return 0;
1997
1998 smu_dpm_set_vcn_enable(smu, false);
1999 smu_dpm_set_jpeg_enable(smu, false);
2000 smu_dpm_set_vpe_enable(smu, false);
2001 smu_dpm_set_umsch_mm_enable(smu, false);
2002
2003 adev->vcn.cur_state = AMD_PG_STATE_GATE;
2004 adev->jpeg.cur_state = AMD_PG_STATE_GATE;
2005
2006 if (!smu->pm_enabled)
2007 return 0;
2008
2009 adev->pm.dpm_enabled = false;
2010
2011 ret = smu_smc_hw_cleanup(smu);
2012 if (ret)
2013 return ret;
2014
2015 ret = smu_reset_mp1_state(smu);
2016 if (ret)
2017 return ret;
2018
2019 return 0;
2020 }
2021
smu_late_fini(void * handle)2022 static void smu_late_fini(void *handle)
2023 {
2024 struct amdgpu_device *adev = handle;
2025 struct smu_context *smu = adev->powerplay.pp_handle;
2026
2027 kfree(smu);
2028 }
2029
smu_reset(struct smu_context * smu)2030 static int smu_reset(struct smu_context *smu)
2031 {
2032 struct amdgpu_device *adev = smu->adev;
2033 int ret;
2034
2035 ret = smu_hw_fini(adev);
2036 if (ret)
2037 return ret;
2038
2039 ret = smu_hw_init(adev);
2040 if (ret)
2041 return ret;
2042
2043 ret = smu_late_init(adev);
2044 if (ret)
2045 return ret;
2046
2047 return 0;
2048 }
2049
smu_suspend(void * handle)2050 static int smu_suspend(void *handle)
2051 {
2052 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2053 struct smu_context *smu = adev->powerplay.pp_handle;
2054 int ret;
2055 uint64_t count;
2056
2057 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
2058 return 0;
2059
2060 if (!smu->pm_enabled)
2061 return 0;
2062
2063 adev->pm.dpm_enabled = false;
2064
2065 ret = smu_smc_hw_cleanup(smu);
2066 if (ret)
2067 return ret;
2068
2069 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
2070
2071 smu_set_gfx_cgpg(smu, false);
2072
2073 /*
2074 * pwfw resets entrycount when device is suspended, so we save the
2075 * last value to be used when we resume to keep it consistent
2076 */
2077 ret = smu_get_entrycount_gfxoff(smu, &count);
2078 if (!ret)
2079 adev->gfx.gfx_off_entrycount = count;
2080
2081 return 0;
2082 }
2083
smu_resume(void * handle)2084 static int smu_resume(void *handle)
2085 {
2086 int ret;
2087 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2088 struct smu_context *smu = adev->powerplay.pp_handle;
2089
2090 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
2091 return 0;
2092
2093 if (!smu->pm_enabled)
2094 return 0;
2095
2096 dev_info(adev->dev, "SMU is resuming...\n");
2097
2098 ret = smu_start_smc_engine(smu);
2099 if (ret) {
2100 dev_err(adev->dev, "SMC engine is not correctly up!\n");
2101 return ret;
2102 }
2103
2104 ret = smu_smc_hw_setup(smu);
2105 if (ret) {
2106 dev_err(adev->dev, "Failed to setup smc hw!\n");
2107 return ret;
2108 }
2109
2110 ret = smu_set_gfx_imu_enable(smu);
2111 if (ret)
2112 return ret;
2113
2114 smu_set_gfx_cgpg(smu, true);
2115
2116 smu->disable_uclk_switch = 0;
2117
2118 adev->pm.dpm_enabled = true;
2119
2120 dev_info(adev->dev, "SMU is resumed successfully!\n");
2121
2122 return 0;
2123 }
2124
smu_display_configuration_change(void * handle,const struct amd_pp_display_configuration * display_config)2125 static int smu_display_configuration_change(void *handle,
2126 const struct amd_pp_display_configuration *display_config)
2127 {
2128 struct smu_context *smu = handle;
2129
2130 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2131 return -EOPNOTSUPP;
2132
2133 if (!display_config)
2134 return -EINVAL;
2135
2136 smu_set_min_dcef_deep_sleep(smu,
2137 display_config->min_dcef_deep_sleep_set_clk / 100);
2138
2139 return 0;
2140 }
2141
smu_set_clockgating_state(void * handle,enum amd_clockgating_state state)2142 static int smu_set_clockgating_state(void *handle,
2143 enum amd_clockgating_state state)
2144 {
2145 return 0;
2146 }
2147
smu_set_powergating_state(void * handle,enum amd_powergating_state state)2148 static int smu_set_powergating_state(void *handle,
2149 enum amd_powergating_state state)
2150 {
2151 return 0;
2152 }
2153
smu_enable_umd_pstate(void * handle,enum amd_dpm_forced_level * level)2154 static int smu_enable_umd_pstate(void *handle,
2155 enum amd_dpm_forced_level *level)
2156 {
2157 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
2158 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
2159 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
2160 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
2161
2162 struct smu_context *smu = (struct smu_context*)(handle);
2163 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2164
2165 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2166 return -EINVAL;
2167
2168 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
2169 /* enter umd pstate, save current level, disable gfx cg*/
2170 if (*level & profile_mode_mask) {
2171 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
2172 smu_gpo_control(smu, false);
2173 smu_gfx_ulv_control(smu, false);
2174 smu_deep_sleep_control(smu, false);
2175 amdgpu_asic_update_umd_stable_pstate(smu->adev, true);
2176 }
2177 } else {
2178 /* exit umd pstate, restore level, enable gfx cg*/
2179 if (!(*level & profile_mode_mask)) {
2180 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
2181 *level = smu_dpm_ctx->saved_dpm_level;
2182 amdgpu_asic_update_umd_stable_pstate(smu->adev, false);
2183 smu_deep_sleep_control(smu, true);
2184 smu_gfx_ulv_control(smu, true);
2185 smu_gpo_control(smu, true);
2186 }
2187 }
2188
2189 return 0;
2190 }
2191
smu_bump_power_profile_mode(struct smu_context * smu,long * param,uint32_t param_size)2192 static int smu_bump_power_profile_mode(struct smu_context *smu,
2193 long *param,
2194 uint32_t param_size)
2195 {
2196 int ret = 0;
2197
2198 if (smu->ppt_funcs->set_power_profile_mode)
2199 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
2200
2201 return ret;
2202 }
2203
smu_adjust_power_state_dynamic(struct smu_context * smu,enum amd_dpm_forced_level level,bool skip_display_settings)2204 static int smu_adjust_power_state_dynamic(struct smu_context *smu,
2205 enum amd_dpm_forced_level level,
2206 bool skip_display_settings)
2207 {
2208 int ret = 0;
2209 int index = 0;
2210 long workload;
2211 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2212
2213 if (!skip_display_settings) {
2214 ret = smu_display_config_changed(smu);
2215 if (ret) {
2216 dev_err(smu->adev->dev, "Failed to change display config!");
2217 return ret;
2218 }
2219 }
2220
2221 ret = smu_apply_clocks_adjust_rules(smu);
2222 if (ret) {
2223 dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!");
2224 return ret;
2225 }
2226
2227 if (!skip_display_settings) {
2228 ret = smu_notify_smc_display_config(smu);
2229 if (ret) {
2230 dev_err(smu->adev->dev, "Failed to notify smc display config!");
2231 return ret;
2232 }
2233 }
2234
2235 if (smu_dpm_ctx->dpm_level != level) {
2236 ret = smu_asic_set_performance_level(smu, level);
2237 if (ret) {
2238 dev_err(smu->adev->dev, "Failed to set performance level!");
2239 return ret;
2240 }
2241
2242 /* update the saved copy */
2243 smu_dpm_ctx->dpm_level = level;
2244 }
2245
2246 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
2247 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
2248 index = fls(smu->workload_mask);
2249 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
2250 workload = smu->workload_setting[index];
2251
2252 if (smu->power_profile_mode != workload)
2253 smu_bump_power_profile_mode(smu, &workload, 0);
2254 }
2255
2256 return ret;
2257 }
2258
smu_handle_task(struct smu_context * smu,enum amd_dpm_forced_level level,enum amd_pp_task task_id)2259 static int smu_handle_task(struct smu_context *smu,
2260 enum amd_dpm_forced_level level,
2261 enum amd_pp_task task_id)
2262 {
2263 int ret = 0;
2264
2265 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2266 return -EOPNOTSUPP;
2267
2268 switch (task_id) {
2269 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
2270 ret = smu_pre_display_config_changed(smu);
2271 if (ret)
2272 return ret;
2273 ret = smu_adjust_power_state_dynamic(smu, level, false);
2274 break;
2275 case AMD_PP_TASK_COMPLETE_INIT:
2276 case AMD_PP_TASK_READJUST_POWER_STATE:
2277 ret = smu_adjust_power_state_dynamic(smu, level, true);
2278 break;
2279 default:
2280 break;
2281 }
2282
2283 return ret;
2284 }
2285
smu_handle_dpm_task(void * handle,enum amd_pp_task task_id,enum amd_pm_state_type * user_state)2286 static int smu_handle_dpm_task(void *handle,
2287 enum amd_pp_task task_id,
2288 enum amd_pm_state_type *user_state)
2289 {
2290 struct smu_context *smu = handle;
2291 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
2292
2293 return smu_handle_task(smu, smu_dpm->dpm_level, task_id);
2294
2295 }
2296
smu_switch_power_profile(void * handle,enum PP_SMC_POWER_PROFILE type,bool en)2297 static int smu_switch_power_profile(void *handle,
2298 enum PP_SMC_POWER_PROFILE type,
2299 bool en)
2300 {
2301 struct smu_context *smu = handle;
2302 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2303 long workload;
2304 uint32_t index;
2305
2306 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2307 return -EOPNOTSUPP;
2308
2309 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
2310 return -EINVAL;
2311
2312 if (!en) {
2313 smu->workload_mask &= ~(1 << smu->workload_prority[type]);
2314 index = fls(smu->workload_mask);
2315 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
2316 workload = smu->workload_setting[index];
2317 } else {
2318 smu->workload_mask |= (1 << smu->workload_prority[type]);
2319 index = fls(smu->workload_mask);
2320 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
2321 workload = smu->workload_setting[index];
2322 }
2323
2324 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
2325 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
2326 smu_bump_power_profile_mode(smu, &workload, 0);
2327
2328 return 0;
2329 }
2330
smu_get_performance_level(void * handle)2331 static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
2332 {
2333 struct smu_context *smu = handle;
2334 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2335
2336 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2337 return -EOPNOTSUPP;
2338
2339 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2340 return -EINVAL;
2341
2342 return smu_dpm_ctx->dpm_level;
2343 }
2344
smu_force_performance_level(void * handle,enum amd_dpm_forced_level level)2345 static int smu_force_performance_level(void *handle,
2346 enum amd_dpm_forced_level level)
2347 {
2348 struct smu_context *smu = handle;
2349 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2350 int ret = 0;
2351
2352 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2353 return -EOPNOTSUPP;
2354
2355 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2356 return -EINVAL;
2357
2358 ret = smu_enable_umd_pstate(smu, &level);
2359 if (ret)
2360 return ret;
2361
2362 ret = smu_handle_task(smu, level,
2363 AMD_PP_TASK_READJUST_POWER_STATE);
2364
2365 /* reset user dpm clock state */
2366 if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
2367 memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask));
2368 smu->user_dpm_profile.clk_dependency = 0;
2369 }
2370
2371 return ret;
2372 }
2373
smu_set_display_count(void * handle,uint32_t count)2374 static int smu_set_display_count(void *handle, uint32_t count)
2375 {
2376 struct smu_context *smu = handle;
2377
2378 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2379 return -EOPNOTSUPP;
2380
2381 return smu_init_display_count(smu, count);
2382 }
2383
smu_force_smuclk_levels(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t mask)2384 static int smu_force_smuclk_levels(struct smu_context *smu,
2385 enum smu_clk_type clk_type,
2386 uint32_t mask)
2387 {
2388 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2389 int ret = 0;
2390
2391 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2392 return -EOPNOTSUPP;
2393
2394 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
2395 dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n");
2396 return -EINVAL;
2397 }
2398
2399 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
2400 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
2401 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2402 smu->user_dpm_profile.clk_mask[clk_type] = mask;
2403 smu_set_user_clk_dependencies(smu, clk_type);
2404 }
2405 }
2406
2407 return ret;
2408 }
2409
smu_force_ppclk_levels(void * handle,enum pp_clock_type type,uint32_t mask)2410 static int smu_force_ppclk_levels(void *handle,
2411 enum pp_clock_type type,
2412 uint32_t mask)
2413 {
2414 struct smu_context *smu = handle;
2415 enum smu_clk_type clk_type;
2416
2417 switch (type) {
2418 case PP_SCLK:
2419 clk_type = SMU_SCLK; break;
2420 case PP_MCLK:
2421 clk_type = SMU_MCLK; break;
2422 case PP_PCIE:
2423 clk_type = SMU_PCIE; break;
2424 case PP_SOCCLK:
2425 clk_type = SMU_SOCCLK; break;
2426 case PP_FCLK:
2427 clk_type = SMU_FCLK; break;
2428 case PP_DCEFCLK:
2429 clk_type = SMU_DCEFCLK; break;
2430 case PP_VCLK:
2431 clk_type = SMU_VCLK; break;
2432 case PP_VCLK1:
2433 clk_type = SMU_VCLK1; break;
2434 case PP_DCLK:
2435 clk_type = SMU_DCLK; break;
2436 case PP_DCLK1:
2437 clk_type = SMU_DCLK1; break;
2438 case OD_SCLK:
2439 clk_type = SMU_OD_SCLK; break;
2440 case OD_MCLK:
2441 clk_type = SMU_OD_MCLK; break;
2442 case OD_VDDC_CURVE:
2443 clk_type = SMU_OD_VDDC_CURVE; break;
2444 case OD_RANGE:
2445 clk_type = SMU_OD_RANGE; break;
2446 default:
2447 return -EINVAL;
2448 }
2449
2450 return smu_force_smuclk_levels(smu, clk_type, mask);
2451 }
2452
2453 /*
2454 * On system suspending or resetting, the dpm_enabled
2455 * flag will be cleared. So that those SMU services which
2456 * are not supported will be gated.
2457 * However, the mp1 state setting should still be granted
2458 * even if the dpm_enabled cleared.
2459 */
smu_set_mp1_state(void * handle,enum pp_mp1_state mp1_state)2460 static int smu_set_mp1_state(void *handle,
2461 enum pp_mp1_state mp1_state)
2462 {
2463 struct smu_context *smu = handle;
2464 int ret = 0;
2465
2466 if (!smu->pm_enabled)
2467 return -EOPNOTSUPP;
2468
2469 if (smu->ppt_funcs &&
2470 smu->ppt_funcs->set_mp1_state)
2471 ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
2472
2473 return ret;
2474 }
2475
smu_set_df_cstate(void * handle,enum pp_df_cstate state)2476 static int smu_set_df_cstate(void *handle,
2477 enum pp_df_cstate state)
2478 {
2479 struct smu_context *smu = handle;
2480 int ret = 0;
2481
2482 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2483 return -EOPNOTSUPP;
2484
2485 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
2486 return 0;
2487
2488 ret = smu->ppt_funcs->set_df_cstate(smu, state);
2489 if (ret)
2490 dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
2491
2492 return ret;
2493 }
2494
smu_write_watermarks_table(struct smu_context * smu)2495 int smu_write_watermarks_table(struct smu_context *smu)
2496 {
2497 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2498 return -EOPNOTSUPP;
2499
2500 return smu_set_watermarks_table(smu, NULL);
2501 }
2502
smu_set_watermarks_for_clock_ranges(void * handle,struct pp_smu_wm_range_sets * clock_ranges)2503 static int smu_set_watermarks_for_clock_ranges(void *handle,
2504 struct pp_smu_wm_range_sets *clock_ranges)
2505 {
2506 struct smu_context *smu = handle;
2507
2508 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2509 return -EOPNOTSUPP;
2510
2511 if (smu->disable_watermark)
2512 return 0;
2513
2514 return smu_set_watermarks_table(smu, clock_ranges);
2515 }
2516
smu_set_ac_dc(struct smu_context * smu)2517 int smu_set_ac_dc(struct smu_context *smu)
2518 {
2519 int ret = 0;
2520
2521 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2522 return -EOPNOTSUPP;
2523
2524 /* controlled by firmware */
2525 if (smu->dc_controlled_by_gpio)
2526 return 0;
2527
2528 ret = smu_set_power_source(smu,
2529 smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
2530 SMU_POWER_SOURCE_DC);
2531 if (ret)
2532 dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
2533 smu->adev->pm.ac_power ? "AC" : "DC");
2534
2535 return ret;
2536 }
2537
2538 const struct amd_ip_funcs smu_ip_funcs = {
2539 .name = "smu",
2540 .early_init = smu_early_init,
2541 .late_init = smu_late_init,
2542 .sw_init = smu_sw_init,
2543 .sw_fini = smu_sw_fini,
2544 .hw_init = smu_hw_init,
2545 .hw_fini = smu_hw_fini,
2546 .late_fini = smu_late_fini,
2547 .suspend = smu_suspend,
2548 .resume = smu_resume,
2549 .is_idle = NULL,
2550 .check_soft_reset = NULL,
2551 .wait_for_idle = NULL,
2552 .soft_reset = NULL,
2553 .set_clockgating_state = smu_set_clockgating_state,
2554 .set_powergating_state = smu_set_powergating_state,
2555 };
2556
2557 const struct amdgpu_ip_block_version smu_v11_0_ip_block = {
2558 .type = AMD_IP_BLOCK_TYPE_SMC,
2559 .major = 11,
2560 .minor = 0,
2561 .rev = 0,
2562 .funcs = &smu_ip_funcs,
2563 };
2564
2565 const struct amdgpu_ip_block_version smu_v12_0_ip_block = {
2566 .type = AMD_IP_BLOCK_TYPE_SMC,
2567 .major = 12,
2568 .minor = 0,
2569 .rev = 0,
2570 .funcs = &smu_ip_funcs,
2571 };
2572
2573 const struct amdgpu_ip_block_version smu_v13_0_ip_block = {
2574 .type = AMD_IP_BLOCK_TYPE_SMC,
2575 .major = 13,
2576 .minor = 0,
2577 .rev = 0,
2578 .funcs = &smu_ip_funcs,
2579 };
2580
2581 const struct amdgpu_ip_block_version smu_v14_0_ip_block = {
2582 .type = AMD_IP_BLOCK_TYPE_SMC,
2583 .major = 14,
2584 .minor = 0,
2585 .rev = 0,
2586 .funcs = &smu_ip_funcs,
2587 };
2588
smu_load_microcode(void * handle)2589 static int smu_load_microcode(void *handle)
2590 {
2591 struct smu_context *smu = handle;
2592 struct amdgpu_device *adev = smu->adev;
2593 int ret = 0;
2594
2595 if (!smu->pm_enabled)
2596 return -EOPNOTSUPP;
2597
2598 /* This should be used for non PSP loading */
2599 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
2600 return 0;
2601
2602 if (smu->ppt_funcs->load_microcode) {
2603 ret = smu->ppt_funcs->load_microcode(smu);
2604 if (ret) {
2605 dev_err(adev->dev, "Load microcode failed\n");
2606 return ret;
2607 }
2608 }
2609
2610 if (smu->ppt_funcs->check_fw_status) {
2611 ret = smu->ppt_funcs->check_fw_status(smu);
2612 if (ret) {
2613 dev_err(adev->dev, "SMC is not ready\n");
2614 return ret;
2615 }
2616 }
2617
2618 return ret;
2619 }
2620
smu_set_gfx_cgpg(struct smu_context * smu,bool enabled)2621 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2622 {
2623 int ret = 0;
2624
2625 if (smu->ppt_funcs->set_gfx_cgpg)
2626 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
2627
2628 return ret;
2629 }
2630
smu_set_fan_speed_rpm(void * handle,uint32_t speed)2631 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
2632 {
2633 struct smu_context *smu = handle;
2634 int ret = 0;
2635
2636 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2637 return -EOPNOTSUPP;
2638
2639 if (!smu->ppt_funcs->set_fan_speed_rpm)
2640 return -EOPNOTSUPP;
2641
2642 if (speed == U32_MAX)
2643 return -EINVAL;
2644
2645 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
2646 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2647 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM;
2648 smu->user_dpm_profile.fan_speed_rpm = speed;
2649
2650 /* Override custom PWM setting as they cannot co-exist */
2651 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM;
2652 smu->user_dpm_profile.fan_speed_pwm = 0;
2653 }
2654
2655 return ret;
2656 }
2657
2658 /**
2659 * smu_get_power_limit - Request one of the SMU Power Limits
2660 *
2661 * @handle: pointer to smu context
2662 * @limit: requested limit is written back to this variable
2663 * @pp_limit_level: &pp_power_limit_level which limit of the power to return
2664 * @pp_power_type: &pp_power_type type of power
2665 * Return: 0 on success, <0 on error
2666 *
2667 */
smu_get_power_limit(void * handle,uint32_t * limit,enum pp_power_limit_level pp_limit_level,enum pp_power_type pp_power_type)2668 int smu_get_power_limit(void *handle,
2669 uint32_t *limit,
2670 enum pp_power_limit_level pp_limit_level,
2671 enum pp_power_type pp_power_type)
2672 {
2673 struct smu_context *smu = handle;
2674 struct amdgpu_device *adev = smu->adev;
2675 enum smu_ppt_limit_level limit_level;
2676 uint32_t limit_type;
2677 int ret = 0;
2678
2679 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2680 return -EOPNOTSUPP;
2681
2682 switch (pp_power_type) {
2683 case PP_PWR_TYPE_SUSTAINED:
2684 limit_type = SMU_DEFAULT_PPT_LIMIT;
2685 break;
2686 case PP_PWR_TYPE_FAST:
2687 limit_type = SMU_FAST_PPT_LIMIT;
2688 break;
2689 default:
2690 return -EOPNOTSUPP;
2691 }
2692
2693 switch (pp_limit_level) {
2694 case PP_PWR_LIMIT_CURRENT:
2695 limit_level = SMU_PPT_LIMIT_CURRENT;
2696 break;
2697 case PP_PWR_LIMIT_DEFAULT:
2698 limit_level = SMU_PPT_LIMIT_DEFAULT;
2699 break;
2700 case PP_PWR_LIMIT_MAX:
2701 limit_level = SMU_PPT_LIMIT_MAX;
2702 break;
2703 case PP_PWR_LIMIT_MIN:
2704 limit_level = SMU_PPT_LIMIT_MIN;
2705 break;
2706 default:
2707 return -EOPNOTSUPP;
2708 }
2709
2710 if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
2711 if (smu->ppt_funcs->get_ppt_limit)
2712 ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);
2713 } else {
2714 switch (limit_level) {
2715 case SMU_PPT_LIMIT_CURRENT:
2716 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
2717 case IP_VERSION(13, 0, 2):
2718 case IP_VERSION(13, 0, 6):
2719 case IP_VERSION(11, 0, 7):
2720 case IP_VERSION(11, 0, 11):
2721 case IP_VERSION(11, 0, 12):
2722 case IP_VERSION(11, 0, 13):
2723 ret = smu_get_asic_power_limits(smu,
2724 &smu->current_power_limit,
2725 NULL, NULL, NULL);
2726 break;
2727 default:
2728 break;
2729 }
2730 *limit = smu->current_power_limit;
2731 break;
2732 case SMU_PPT_LIMIT_DEFAULT:
2733 *limit = smu->default_power_limit;
2734 break;
2735 case SMU_PPT_LIMIT_MAX:
2736 *limit = smu->max_power_limit;
2737 break;
2738 case SMU_PPT_LIMIT_MIN:
2739 *limit = smu->min_power_limit;
2740 break;
2741 default:
2742 return -EINVAL;
2743 }
2744 }
2745
2746 return ret;
2747 }
2748
smu_set_power_limit(void * handle,uint32_t limit)2749 static int smu_set_power_limit(void *handle, uint32_t limit)
2750 {
2751 struct smu_context *smu = handle;
2752 uint32_t limit_type = limit >> 24;
2753 int ret = 0;
2754
2755 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2756 return -EOPNOTSUPP;
2757
2758 limit &= (1<<24)-1;
2759 if (limit_type != SMU_DEFAULT_PPT_LIMIT)
2760 if (smu->ppt_funcs->set_power_limit)
2761 return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2762
2763 if ((limit > smu->max_power_limit) || (limit < smu->min_power_limit)) {
2764 dev_err(smu->adev->dev,
2765 "New power limit (%d) is out of range [%d,%d]\n",
2766 limit, smu->min_power_limit, smu->max_power_limit);
2767 return -EINVAL;
2768 }
2769
2770 if (!limit)
2771 limit = smu->current_power_limit;
2772
2773 if (smu->ppt_funcs->set_power_limit) {
2774 ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2775 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
2776 smu->user_dpm_profile.power_limit = limit;
2777 }
2778
2779 return ret;
2780 }
2781
smu_print_smuclk_levels(struct smu_context * smu,enum smu_clk_type clk_type,char * buf)2782 static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2783 {
2784 int ret = 0;
2785
2786 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2787 return -EOPNOTSUPP;
2788
2789 if (smu->ppt_funcs->print_clk_levels)
2790 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2791
2792 return ret;
2793 }
2794
smu_convert_to_smuclk(enum pp_clock_type type)2795 static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type)
2796 {
2797 enum smu_clk_type clk_type;
2798
2799 switch (type) {
2800 case PP_SCLK:
2801 clk_type = SMU_SCLK; break;
2802 case PP_MCLK:
2803 clk_type = SMU_MCLK; break;
2804 case PP_PCIE:
2805 clk_type = SMU_PCIE; break;
2806 case PP_SOCCLK:
2807 clk_type = SMU_SOCCLK; break;
2808 case PP_FCLK:
2809 clk_type = SMU_FCLK; break;
2810 case PP_DCEFCLK:
2811 clk_type = SMU_DCEFCLK; break;
2812 case PP_VCLK:
2813 clk_type = SMU_VCLK; break;
2814 case PP_VCLK1:
2815 clk_type = SMU_VCLK1; break;
2816 case PP_DCLK:
2817 clk_type = SMU_DCLK; break;
2818 case PP_DCLK1:
2819 clk_type = SMU_DCLK1; break;
2820 case OD_SCLK:
2821 clk_type = SMU_OD_SCLK; break;
2822 case OD_MCLK:
2823 clk_type = SMU_OD_MCLK; break;
2824 case OD_VDDC_CURVE:
2825 clk_type = SMU_OD_VDDC_CURVE; break;
2826 case OD_RANGE:
2827 clk_type = SMU_OD_RANGE; break;
2828 case OD_VDDGFX_OFFSET:
2829 clk_type = SMU_OD_VDDGFX_OFFSET; break;
2830 case OD_CCLK:
2831 clk_type = SMU_OD_CCLK; break;
2832 case OD_FAN_CURVE:
2833 clk_type = SMU_OD_FAN_CURVE; break;
2834 case OD_ACOUSTIC_LIMIT:
2835 clk_type = SMU_OD_ACOUSTIC_LIMIT; break;
2836 case OD_ACOUSTIC_TARGET:
2837 clk_type = SMU_OD_ACOUSTIC_TARGET; break;
2838 case OD_FAN_TARGET_TEMPERATURE:
2839 clk_type = SMU_OD_FAN_TARGET_TEMPERATURE; break;
2840 case OD_FAN_MINIMUM_PWM:
2841 clk_type = SMU_OD_FAN_MINIMUM_PWM; break;
2842 default:
2843 clk_type = SMU_CLK_COUNT; break;
2844 }
2845
2846 return clk_type;
2847 }
2848
smu_print_ppclk_levels(void * handle,enum pp_clock_type type,char * buf)2849 static int smu_print_ppclk_levels(void *handle,
2850 enum pp_clock_type type,
2851 char *buf)
2852 {
2853 struct smu_context *smu = handle;
2854 enum smu_clk_type clk_type;
2855
2856 clk_type = smu_convert_to_smuclk(type);
2857 if (clk_type == SMU_CLK_COUNT)
2858 return -EINVAL;
2859
2860 return smu_print_smuclk_levels(smu, clk_type, buf);
2861 }
2862
smu_emit_ppclk_levels(void * handle,enum pp_clock_type type,char * buf,int * offset)2863 static int smu_emit_ppclk_levels(void *handle, enum pp_clock_type type, char *buf, int *offset)
2864 {
2865 struct smu_context *smu = handle;
2866 enum smu_clk_type clk_type;
2867
2868 clk_type = smu_convert_to_smuclk(type);
2869 if (clk_type == SMU_CLK_COUNT)
2870 return -EINVAL;
2871
2872 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2873 return -EOPNOTSUPP;
2874
2875 if (!smu->ppt_funcs->emit_clk_levels)
2876 return -ENOENT;
2877
2878 return smu->ppt_funcs->emit_clk_levels(smu, clk_type, buf, offset);
2879
2880 }
2881
smu_od_edit_dpm_table(void * handle,enum PP_OD_DPM_TABLE_COMMAND type,long * input,uint32_t size)2882 static int smu_od_edit_dpm_table(void *handle,
2883 enum PP_OD_DPM_TABLE_COMMAND type,
2884 long *input, uint32_t size)
2885 {
2886 struct smu_context *smu = handle;
2887 int ret = 0;
2888
2889 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2890 return -EOPNOTSUPP;
2891
2892 if (smu->ppt_funcs->od_edit_dpm_table) {
2893 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2894 }
2895
2896 return ret;
2897 }
2898
smu_read_sensor(void * handle,int sensor,void * data,int * size_arg)2899 static int smu_read_sensor(void *handle,
2900 int sensor,
2901 void *data,
2902 int *size_arg)
2903 {
2904 struct smu_context *smu = handle;
2905 struct smu_umd_pstate_table *pstate_table =
2906 &smu->pstate_table;
2907 int ret = 0;
2908 uint32_t *size, size_val;
2909
2910 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2911 return -EOPNOTSUPP;
2912
2913 if (!data || !size_arg)
2914 return -EINVAL;
2915
2916 size_val = *size_arg;
2917 size = &size_val;
2918
2919 if (smu->ppt_funcs->read_sensor)
2920 if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
2921 goto unlock;
2922
2923 switch (sensor) {
2924 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
2925 *((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100;
2926 *size = 4;
2927 break;
2928 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
2929 *((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100;
2930 *size = 4;
2931 break;
2932 case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:
2933 *((uint32_t *)data) = pstate_table->gfxclk_pstate.peak * 100;
2934 *size = 4;
2935 break;
2936 case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:
2937 *((uint32_t *)data) = pstate_table->uclk_pstate.peak * 100;
2938 *size = 4;
2939 break;
2940 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
2941 ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data);
2942 *size = 8;
2943 break;
2944 case AMDGPU_PP_SENSOR_UVD_POWER:
2945 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
2946 *size = 4;
2947 break;
2948 case AMDGPU_PP_SENSOR_VCE_POWER:
2949 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
2950 *size = 4;
2951 break;
2952 case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
2953 *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0 : 1;
2954 *size = 4;
2955 break;
2956 case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
2957 *(uint32_t *)data = 0;
2958 *size = 4;
2959 break;
2960 default:
2961 *size = 0;
2962 ret = -EOPNOTSUPP;
2963 break;
2964 }
2965
2966 unlock:
2967 // assign uint32_t to int
2968 *size_arg = size_val;
2969
2970 return ret;
2971 }
2972
smu_get_apu_thermal_limit(void * handle,uint32_t * limit)2973 static int smu_get_apu_thermal_limit(void *handle, uint32_t *limit)
2974 {
2975 int ret = -EOPNOTSUPP;
2976 struct smu_context *smu = handle;
2977
2978 if (smu->ppt_funcs && smu->ppt_funcs->get_apu_thermal_limit)
2979 ret = smu->ppt_funcs->get_apu_thermal_limit(smu, limit);
2980
2981 return ret;
2982 }
2983
smu_set_apu_thermal_limit(void * handle,uint32_t limit)2984 static int smu_set_apu_thermal_limit(void *handle, uint32_t limit)
2985 {
2986 int ret = -EOPNOTSUPP;
2987 struct smu_context *smu = handle;
2988
2989 if (smu->ppt_funcs && smu->ppt_funcs->set_apu_thermal_limit)
2990 ret = smu->ppt_funcs->set_apu_thermal_limit(smu, limit);
2991
2992 return ret;
2993 }
2994
smu_get_power_profile_mode(void * handle,char * buf)2995 static int smu_get_power_profile_mode(void *handle, char *buf)
2996 {
2997 struct smu_context *smu = handle;
2998
2999 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
3000 !smu->ppt_funcs->get_power_profile_mode)
3001 return -EOPNOTSUPP;
3002 if (!buf)
3003 return -EINVAL;
3004
3005 return smu->ppt_funcs->get_power_profile_mode(smu, buf);
3006 }
3007
smu_set_power_profile_mode(void * handle,long * param,uint32_t param_size)3008 static int smu_set_power_profile_mode(void *handle,
3009 long *param,
3010 uint32_t param_size)
3011 {
3012 struct smu_context *smu = handle;
3013
3014 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
3015 !smu->ppt_funcs->set_power_profile_mode)
3016 return -EOPNOTSUPP;
3017
3018 return smu_bump_power_profile_mode(smu, param, param_size);
3019 }
3020
smu_get_fan_control_mode(void * handle,u32 * fan_mode)3021 static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
3022 {
3023 struct smu_context *smu = handle;
3024
3025 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3026 return -EOPNOTSUPP;
3027
3028 if (!smu->ppt_funcs->get_fan_control_mode)
3029 return -EOPNOTSUPP;
3030
3031 if (!fan_mode)
3032 return -EINVAL;
3033
3034 *fan_mode = smu->ppt_funcs->get_fan_control_mode(smu);
3035
3036 return 0;
3037 }
3038
smu_set_fan_control_mode(void * handle,u32 value)3039 static int smu_set_fan_control_mode(void *handle, u32 value)
3040 {
3041 struct smu_context *smu = handle;
3042 int ret = 0;
3043
3044 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3045 return -EOPNOTSUPP;
3046
3047 if (!smu->ppt_funcs->set_fan_control_mode)
3048 return -EOPNOTSUPP;
3049
3050 if (value == U32_MAX)
3051 return -EINVAL;
3052
3053 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
3054 if (ret)
3055 goto out;
3056
3057 if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
3058 smu->user_dpm_profile.fan_mode = value;
3059
3060 /* reset user dpm fan speed */
3061 if (value != AMD_FAN_CTRL_MANUAL) {
3062 smu->user_dpm_profile.fan_speed_pwm = 0;
3063 smu->user_dpm_profile.fan_speed_rpm = 0;
3064 smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM);
3065 }
3066 }
3067
3068 out:
3069 return ret;
3070 }
3071
smu_get_fan_speed_pwm(void * handle,u32 * speed)3072 static int smu_get_fan_speed_pwm(void *handle, u32 *speed)
3073 {
3074 struct smu_context *smu = handle;
3075 int ret = 0;
3076
3077 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3078 return -EOPNOTSUPP;
3079
3080 if (!smu->ppt_funcs->get_fan_speed_pwm)
3081 return -EOPNOTSUPP;
3082
3083 if (!speed)
3084 return -EINVAL;
3085
3086 ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed);
3087
3088 return ret;
3089 }
3090
smu_set_fan_speed_pwm(void * handle,u32 speed)3091 static int smu_set_fan_speed_pwm(void *handle, u32 speed)
3092 {
3093 struct smu_context *smu = handle;
3094 int ret = 0;
3095
3096 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3097 return -EOPNOTSUPP;
3098
3099 if (!smu->ppt_funcs->set_fan_speed_pwm)
3100 return -EOPNOTSUPP;
3101
3102 if (speed == U32_MAX)
3103 return -EINVAL;
3104
3105 ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed);
3106 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
3107 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM;
3108 smu->user_dpm_profile.fan_speed_pwm = speed;
3109
3110 /* Override custom RPM setting as they cannot co-exist */
3111 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM;
3112 smu->user_dpm_profile.fan_speed_rpm = 0;
3113 }
3114
3115 return ret;
3116 }
3117
smu_get_fan_speed_rpm(void * handle,uint32_t * speed)3118 static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
3119 {
3120 struct smu_context *smu = handle;
3121 int ret = 0;
3122
3123 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3124 return -EOPNOTSUPP;
3125
3126 if (!smu->ppt_funcs->get_fan_speed_rpm)
3127 return -EOPNOTSUPP;
3128
3129 if (!speed)
3130 return -EINVAL;
3131
3132 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
3133
3134 return ret;
3135 }
3136
smu_set_deep_sleep_dcefclk(void * handle,uint32_t clk)3137 static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)
3138 {
3139 struct smu_context *smu = handle;
3140
3141 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3142 return -EOPNOTSUPP;
3143
3144 return smu_set_min_dcef_deep_sleep(smu, clk);
3145 }
3146
smu_get_clock_by_type_with_latency(void * handle,enum amd_pp_clock_type type,struct pp_clock_levels_with_latency * clocks)3147 static int smu_get_clock_by_type_with_latency(void *handle,
3148 enum amd_pp_clock_type type,
3149 struct pp_clock_levels_with_latency *clocks)
3150 {
3151 struct smu_context *smu = handle;
3152 enum smu_clk_type clk_type;
3153 int ret = 0;
3154
3155 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3156 return -EOPNOTSUPP;
3157
3158 if (smu->ppt_funcs->get_clock_by_type_with_latency) {
3159 switch (type) {
3160 case amd_pp_sys_clock:
3161 clk_type = SMU_GFXCLK;
3162 break;
3163 case amd_pp_mem_clock:
3164 clk_type = SMU_MCLK;
3165 break;
3166 case amd_pp_dcef_clock:
3167 clk_type = SMU_DCEFCLK;
3168 break;
3169 case amd_pp_disp_clock:
3170 clk_type = SMU_DISPCLK;
3171 break;
3172 default:
3173 dev_err(smu->adev->dev, "Invalid clock type!\n");
3174 return -EINVAL;
3175 }
3176
3177 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
3178 }
3179
3180 return ret;
3181 }
3182
smu_display_clock_voltage_request(void * handle,struct pp_display_clock_request * clock_req)3183 static int smu_display_clock_voltage_request(void *handle,
3184 struct pp_display_clock_request *clock_req)
3185 {
3186 struct smu_context *smu = handle;
3187 int ret = 0;
3188
3189 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3190 return -EOPNOTSUPP;
3191
3192 if (smu->ppt_funcs->display_clock_voltage_request)
3193 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
3194
3195 return ret;
3196 }
3197
3198
smu_display_disable_memory_clock_switch(void * handle,bool disable_memory_clock_switch)3199 static int smu_display_disable_memory_clock_switch(void *handle,
3200 bool disable_memory_clock_switch)
3201 {
3202 struct smu_context *smu = handle;
3203 int ret = -EINVAL;
3204
3205 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3206 return -EOPNOTSUPP;
3207
3208 if (smu->ppt_funcs->display_disable_memory_clock_switch)
3209 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
3210
3211 return ret;
3212 }
3213
smu_set_xgmi_pstate(void * handle,uint32_t pstate)3214 static int smu_set_xgmi_pstate(void *handle,
3215 uint32_t pstate)
3216 {
3217 struct smu_context *smu = handle;
3218 int ret = 0;
3219
3220 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3221 return -EOPNOTSUPP;
3222
3223 if (smu->ppt_funcs->set_xgmi_pstate)
3224 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
3225
3226 if (ret)
3227 dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
3228
3229 return ret;
3230 }
3231
smu_get_baco_capability(void * handle)3232 static int smu_get_baco_capability(void *handle)
3233 {
3234 struct smu_context *smu = handle;
3235
3236 if (!smu->pm_enabled)
3237 return false;
3238
3239 if (!smu->ppt_funcs || !smu->ppt_funcs->get_bamaco_support)
3240 return false;
3241
3242 return smu->ppt_funcs->get_bamaco_support(smu);
3243 }
3244
smu_baco_set_state(void * handle,int state)3245 static int smu_baco_set_state(void *handle, int state)
3246 {
3247 struct smu_context *smu = handle;
3248 int ret = 0;
3249
3250 if (!smu->pm_enabled)
3251 return -EOPNOTSUPP;
3252
3253 if (state == 0) {
3254 if (smu->ppt_funcs->baco_exit)
3255 ret = smu->ppt_funcs->baco_exit(smu);
3256 } else if (state == 1) {
3257 if (smu->ppt_funcs->baco_enter)
3258 ret = smu->ppt_funcs->baco_enter(smu);
3259 } else {
3260 return -EINVAL;
3261 }
3262
3263 if (ret)
3264 dev_err(smu->adev->dev, "Failed to %s BACO state!\n",
3265 (state)?"enter":"exit");
3266
3267 return ret;
3268 }
3269
smu_mode1_reset_is_support(struct smu_context * smu)3270 bool smu_mode1_reset_is_support(struct smu_context *smu)
3271 {
3272 bool ret = false;
3273
3274 if (!smu->pm_enabled)
3275 return false;
3276
3277 if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
3278 ret = smu->ppt_funcs->mode1_reset_is_support(smu);
3279
3280 return ret;
3281 }
3282
smu_mode2_reset_is_support(struct smu_context * smu)3283 bool smu_mode2_reset_is_support(struct smu_context *smu)
3284 {
3285 bool ret = false;
3286
3287 if (!smu->pm_enabled)
3288 return false;
3289
3290 if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support)
3291 ret = smu->ppt_funcs->mode2_reset_is_support(smu);
3292
3293 return ret;
3294 }
3295
smu_mode1_reset(struct smu_context * smu)3296 int smu_mode1_reset(struct smu_context *smu)
3297 {
3298 int ret = 0;
3299
3300 if (!smu->pm_enabled)
3301 return -EOPNOTSUPP;
3302
3303 if (smu->ppt_funcs->mode1_reset)
3304 ret = smu->ppt_funcs->mode1_reset(smu);
3305
3306 return ret;
3307 }
3308
smu_mode2_reset(void * handle)3309 static int smu_mode2_reset(void *handle)
3310 {
3311 struct smu_context *smu = handle;
3312 int ret = 0;
3313
3314 if (!smu->pm_enabled)
3315 return -EOPNOTSUPP;
3316
3317 if (smu->ppt_funcs->mode2_reset)
3318 ret = smu->ppt_funcs->mode2_reset(smu);
3319
3320 if (ret)
3321 dev_err(smu->adev->dev, "Mode2 reset failed!\n");
3322
3323 return ret;
3324 }
3325
smu_enable_gfx_features(void * handle)3326 static int smu_enable_gfx_features(void *handle)
3327 {
3328 struct smu_context *smu = handle;
3329 int ret = 0;
3330
3331 if (!smu->pm_enabled)
3332 return -EOPNOTSUPP;
3333
3334 if (smu->ppt_funcs->enable_gfx_features)
3335 ret = smu->ppt_funcs->enable_gfx_features(smu);
3336
3337 if (ret)
3338 dev_err(smu->adev->dev, "enable gfx features failed!\n");
3339
3340 return ret;
3341 }
3342
smu_get_max_sustainable_clocks_by_dc(void * handle,struct pp_smu_nv_clock_table * max_clocks)3343 static int smu_get_max_sustainable_clocks_by_dc(void *handle,
3344 struct pp_smu_nv_clock_table *max_clocks)
3345 {
3346 struct smu_context *smu = handle;
3347 int ret = 0;
3348
3349 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3350 return -EOPNOTSUPP;
3351
3352 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
3353 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
3354
3355 return ret;
3356 }
3357
smu_get_uclk_dpm_states(void * handle,unsigned int * clock_values_in_khz,unsigned int * num_states)3358 static int smu_get_uclk_dpm_states(void *handle,
3359 unsigned int *clock_values_in_khz,
3360 unsigned int *num_states)
3361 {
3362 struct smu_context *smu = handle;
3363 int ret = 0;
3364
3365 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3366 return -EOPNOTSUPP;
3367
3368 if (smu->ppt_funcs->get_uclk_dpm_states)
3369 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
3370
3371 return ret;
3372 }
3373
smu_get_current_power_state(void * handle)3374 static enum amd_pm_state_type smu_get_current_power_state(void *handle)
3375 {
3376 struct smu_context *smu = handle;
3377 enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
3378
3379 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3380 return -EOPNOTSUPP;
3381
3382 if (smu->ppt_funcs->get_current_power_state)
3383 pm_state = smu->ppt_funcs->get_current_power_state(smu);
3384
3385 return pm_state;
3386 }
3387
smu_get_dpm_clock_table(void * handle,struct dpm_clocks * clock_table)3388 static int smu_get_dpm_clock_table(void *handle,
3389 struct dpm_clocks *clock_table)
3390 {
3391 struct smu_context *smu = handle;
3392 int ret = 0;
3393
3394 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3395 return -EOPNOTSUPP;
3396
3397 if (smu->ppt_funcs->get_dpm_clock_table)
3398 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
3399
3400 return ret;
3401 }
3402
smu_sys_get_gpu_metrics(void * handle,void ** table)3403 static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
3404 {
3405 struct smu_context *smu = handle;
3406
3407 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3408 return -EOPNOTSUPP;
3409
3410 if (!smu->ppt_funcs->get_gpu_metrics)
3411 return -EOPNOTSUPP;
3412
3413 return smu->ppt_funcs->get_gpu_metrics(smu, table);
3414 }
3415
smu_sys_get_pm_metrics(void * handle,void * pm_metrics,size_t size)3416 static ssize_t smu_sys_get_pm_metrics(void *handle, void *pm_metrics,
3417 size_t size)
3418 {
3419 struct smu_context *smu = handle;
3420
3421 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3422 return -EOPNOTSUPP;
3423
3424 if (!smu->ppt_funcs->get_pm_metrics)
3425 return -EOPNOTSUPP;
3426
3427 return smu->ppt_funcs->get_pm_metrics(smu, pm_metrics, size);
3428 }
3429
smu_enable_mgpu_fan_boost(void * handle)3430 static int smu_enable_mgpu_fan_boost(void *handle)
3431 {
3432 struct smu_context *smu = handle;
3433 int ret = 0;
3434
3435 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3436 return -EOPNOTSUPP;
3437
3438 if (smu->ppt_funcs->enable_mgpu_fan_boost)
3439 ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
3440
3441 return ret;
3442 }
3443
smu_gfx_state_change_set(void * handle,uint32_t state)3444 static int smu_gfx_state_change_set(void *handle,
3445 uint32_t state)
3446 {
3447 struct smu_context *smu = handle;
3448 int ret = 0;
3449
3450 if (smu->ppt_funcs->gfx_state_change_set)
3451 ret = smu->ppt_funcs->gfx_state_change_set(smu, state);
3452
3453 return ret;
3454 }
3455
smu_handle_passthrough_sbr(struct smu_context * smu,bool enable)3456 int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
3457 {
3458 int ret = 0;
3459
3460 if (smu->ppt_funcs->smu_handle_passthrough_sbr)
3461 ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable);
3462
3463 return ret;
3464 }
3465
smu_get_ecc_info(struct smu_context * smu,void * umc_ecc)3466 int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc)
3467 {
3468 int ret = -EOPNOTSUPP;
3469
3470 if (smu->ppt_funcs &&
3471 smu->ppt_funcs->get_ecc_info)
3472 ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc);
3473
3474 return ret;
3475
3476 }
3477
smu_get_prv_buffer_details(void * handle,void ** addr,size_t * size)3478 static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)
3479 {
3480 struct smu_context *smu = handle;
3481 struct smu_table_context *smu_table = &smu->smu_table;
3482 struct smu_table *memory_pool = &smu_table->memory_pool;
3483
3484 if (!addr || !size)
3485 return -EINVAL;
3486
3487 *addr = NULL;
3488 *size = 0;
3489 if (memory_pool->bo) {
3490 *addr = memory_pool->cpu_addr;
3491 *size = memory_pool->size;
3492 }
3493
3494 return 0;
3495 }
3496
smu_set_xgmi_plpd_mode(struct smu_context * smu,enum pp_xgmi_plpd_mode mode)3497 int smu_set_xgmi_plpd_mode(struct smu_context *smu,
3498 enum pp_xgmi_plpd_mode mode)
3499 {
3500 int ret = -EOPNOTSUPP;
3501
3502 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3503 return ret;
3504
3505 /* PLPD policy is not supported if it's NONE */
3506 if (smu->plpd_mode == XGMI_PLPD_NONE)
3507 return ret;
3508
3509 if (smu->plpd_mode == mode)
3510 return 0;
3511
3512 if (smu->ppt_funcs && smu->ppt_funcs->select_xgmi_plpd_policy)
3513 ret = smu->ppt_funcs->select_xgmi_plpd_policy(smu, mode);
3514
3515 if (!ret)
3516 smu->plpd_mode = mode;
3517
3518 return ret;
3519 }
3520
3521 static const struct amd_pm_funcs swsmu_pm_funcs = {
3522 /* export for sysfs */
3523 .set_fan_control_mode = smu_set_fan_control_mode,
3524 .get_fan_control_mode = smu_get_fan_control_mode,
3525 .set_fan_speed_pwm = smu_set_fan_speed_pwm,
3526 .get_fan_speed_pwm = smu_get_fan_speed_pwm,
3527 .force_clock_level = smu_force_ppclk_levels,
3528 .print_clock_levels = smu_print_ppclk_levels,
3529 .emit_clock_levels = smu_emit_ppclk_levels,
3530 .force_performance_level = smu_force_performance_level,
3531 .read_sensor = smu_read_sensor,
3532 .get_apu_thermal_limit = smu_get_apu_thermal_limit,
3533 .set_apu_thermal_limit = smu_set_apu_thermal_limit,
3534 .get_performance_level = smu_get_performance_level,
3535 .get_current_power_state = smu_get_current_power_state,
3536 .get_fan_speed_rpm = smu_get_fan_speed_rpm,
3537 .set_fan_speed_rpm = smu_set_fan_speed_rpm,
3538 .get_pp_num_states = smu_get_power_num_states,
3539 .get_pp_table = smu_sys_get_pp_table,
3540 .set_pp_table = smu_sys_set_pp_table,
3541 .switch_power_profile = smu_switch_power_profile,
3542 /* export to amdgpu */
3543 .dispatch_tasks = smu_handle_dpm_task,
3544 .load_firmware = smu_load_microcode,
3545 .set_powergating_by_smu = smu_dpm_set_power_gate,
3546 .set_power_limit = smu_set_power_limit,
3547 .get_power_limit = smu_get_power_limit,
3548 .get_power_profile_mode = smu_get_power_profile_mode,
3549 .set_power_profile_mode = smu_set_power_profile_mode,
3550 .odn_edit_dpm_table = smu_od_edit_dpm_table,
3551 .set_mp1_state = smu_set_mp1_state,
3552 .gfx_state_change_set = smu_gfx_state_change_set,
3553 /* export to DC */
3554 .get_sclk = smu_get_sclk,
3555 .get_mclk = smu_get_mclk,
3556 .display_configuration_change = smu_display_configuration_change,
3557 .get_clock_by_type_with_latency = smu_get_clock_by_type_with_latency,
3558 .display_clock_voltage_request = smu_display_clock_voltage_request,
3559 .enable_mgpu_fan_boost = smu_enable_mgpu_fan_boost,
3560 .set_active_display_count = smu_set_display_count,
3561 .set_min_deep_sleep_dcefclk = smu_set_deep_sleep_dcefclk,
3562 .get_asic_baco_capability = smu_get_baco_capability,
3563 .set_asic_baco_state = smu_baco_set_state,
3564 .get_ppfeature_status = smu_sys_get_pp_feature_mask,
3565 .set_ppfeature_status = smu_sys_set_pp_feature_mask,
3566 .asic_reset_mode_2 = smu_mode2_reset,
3567 .asic_reset_enable_gfx_features = smu_enable_gfx_features,
3568 .set_df_cstate = smu_set_df_cstate,
3569 .set_xgmi_pstate = smu_set_xgmi_pstate,
3570 .get_gpu_metrics = smu_sys_get_gpu_metrics,
3571 .get_pm_metrics = smu_sys_get_pm_metrics,
3572 .set_watermarks_for_clock_ranges = smu_set_watermarks_for_clock_ranges,
3573 .display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch,
3574 .get_max_sustainable_clocks_by_dc = smu_get_max_sustainable_clocks_by_dc,
3575 .get_uclk_dpm_states = smu_get_uclk_dpm_states,
3576 .get_dpm_clock_table = smu_get_dpm_clock_table,
3577 .get_smu_prv_buf_details = smu_get_prv_buffer_details,
3578 };
3579
smu_wait_for_event(struct smu_context * smu,enum smu_event_type event,uint64_t event_arg)3580 int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
3581 uint64_t event_arg)
3582 {
3583 int ret = -EINVAL;
3584
3585 if (smu->ppt_funcs->wait_for_event)
3586 ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg);
3587
3588 return ret;
3589 }
3590
smu_stb_collect_info(struct smu_context * smu,void * buf,uint32_t size)3591 int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size)
3592 {
3593
3594 if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled)
3595 return -EOPNOTSUPP;
3596
3597 /* Confirm the buffer allocated is of correct size */
3598 if (size != smu->stb_context.stb_buf_size)
3599 return -EINVAL;
3600
3601 /*
3602 * No need to lock smu mutex as we access STB directly through MMIO
3603 * and not going through SMU messaging route (for now at least).
3604 * For registers access rely on implementation internal locking.
3605 */
3606 return smu->ppt_funcs->stb_collect_info(smu, buf, size);
3607 }
3608
3609 #if defined(CONFIG_DEBUG_FS)
3610
smu_stb_debugfs_open(struct inode * inode,struct file * filp)3611 static int smu_stb_debugfs_open(struct inode *inode, struct file *filp)
3612 {
3613 struct amdgpu_device *adev = filp->f_inode->i_private;
3614 struct smu_context *smu = adev->powerplay.pp_handle;
3615 unsigned char *buf;
3616 int r;
3617
3618 buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL);
3619 if (!buf)
3620 return -ENOMEM;
3621
3622 r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size);
3623 if (r)
3624 goto out;
3625
3626 filp->private_data = buf;
3627
3628 return 0;
3629
3630 out:
3631 kvfree(buf);
3632 return r;
3633 }
3634
smu_stb_debugfs_read(struct file * filp,char __user * buf,size_t size,loff_t * pos)3635 static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t size,
3636 loff_t *pos)
3637 {
3638 struct amdgpu_device *adev = filp->f_inode->i_private;
3639 struct smu_context *smu = adev->powerplay.pp_handle;
3640
3641
3642 if (!filp->private_data)
3643 return -EINVAL;
3644
3645 return simple_read_from_buffer(buf,
3646 size,
3647 pos, filp->private_data,
3648 smu->stb_context.stb_buf_size);
3649 }
3650
smu_stb_debugfs_release(struct inode * inode,struct file * filp)3651 static int smu_stb_debugfs_release(struct inode *inode, struct file *filp)
3652 {
3653 kvfree(filp->private_data);
3654 filp->private_data = NULL;
3655
3656 return 0;
3657 }
3658
3659 /*
3660 * We have to define not only read method but also
3661 * open and release because .read takes up to PAGE_SIZE
3662 * data each time so and so is invoked multiple times.
3663 * We allocate the STB buffer in .open and release it
3664 * in .release
3665 */
3666 static const struct file_operations smu_stb_debugfs_fops = {
3667 .owner = THIS_MODULE,
3668 .open = smu_stb_debugfs_open,
3669 .read = smu_stb_debugfs_read,
3670 .release = smu_stb_debugfs_release,
3671 .llseek = default_llseek,
3672 };
3673
3674 #endif
3675
amdgpu_smu_stb_debug_fs_init(struct amdgpu_device * adev)3676 void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev)
3677 {
3678 #if defined(CONFIG_DEBUG_FS)
3679
3680 struct smu_context *smu = adev->powerplay.pp_handle;
3681
3682 if (!smu || (!smu->stb_context.stb_buf_size))
3683 return;
3684
3685 debugfs_create_file_size("amdgpu_smu_stb_dump",
3686 S_IRUSR,
3687 adev_to_drm(adev)->primary->debugfs_root,
3688 adev,
3689 &smu_stb_debugfs_fops,
3690 smu->stb_context.stb_buf_size);
3691 #endif
3692 }
3693
smu_send_hbm_bad_pages_num(struct smu_context * smu,uint32_t size)3694 int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size)
3695 {
3696 int ret = 0;
3697
3698 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num)
3699 ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size);
3700
3701 return ret;
3702 }
3703
smu_send_hbm_bad_channel_flag(struct smu_context * smu,uint32_t size)3704 int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size)
3705 {
3706 int ret = 0;
3707
3708 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_channel_flag)
3709 ret = smu->ppt_funcs->send_hbm_bad_channel_flag(smu, size);
3710
3711 return ret;
3712 }
3713
smu_send_rma_reason(struct smu_context * smu)3714 int smu_send_rma_reason(struct smu_context *smu)
3715 {
3716 int ret = 0;
3717
3718 if (smu->ppt_funcs && smu->ppt_funcs->send_rma_reason)
3719 ret = smu->ppt_funcs->send_rma_reason(smu);
3720
3721 return ret;
3722 }
3723