xref: /linux/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c (revision dd093fb0)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #define SWSMU_CODE_LAYER_L1
24 
25 #include <linux/firmware.h>
26 #include <linux/pci.h>
27 
28 #include "amdgpu.h"
29 #include "amdgpu_smu.h"
30 #include "smu_internal.h"
31 #include "atom.h"
32 #include "arcturus_ppt.h"
33 #include "navi10_ppt.h"
34 #include "sienna_cichlid_ppt.h"
35 #include "renoir_ppt.h"
36 #include "vangogh_ppt.h"
37 #include "aldebaran_ppt.h"
38 #include "yellow_carp_ppt.h"
39 #include "cyan_skillfish_ppt.h"
40 #include "smu_v13_0_0_ppt.h"
41 #include "smu_v13_0_4_ppt.h"
42 #include "smu_v13_0_5_ppt.h"
43 #include "smu_v13_0_7_ppt.h"
44 #include "amd_pcie.h"
45 
46 /*
47  * DO NOT use these for err/warn/info/debug messages.
48  * Use dev_err, dev_warn, dev_info and dev_dbg instead.
49  * They are more MGPU friendly.
50  */
51 #undef pr_err
52 #undef pr_warn
53 #undef pr_info
54 #undef pr_debug
55 
56 static const struct amd_pm_funcs swsmu_pm_funcs;
57 static int smu_force_smuclk_levels(struct smu_context *smu,
58 				   enum smu_clk_type clk_type,
59 				   uint32_t mask);
60 static int smu_handle_task(struct smu_context *smu,
61 			   enum amd_dpm_forced_level level,
62 			   enum amd_pp_task task_id);
63 static int smu_reset(struct smu_context *smu);
64 static int smu_set_fan_speed_pwm(void *handle, u32 speed);
65 static int smu_set_fan_control_mode(void *handle, u32 value);
66 static int smu_set_power_limit(void *handle, uint32_t limit);
67 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
68 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
69 static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state);
70 
71 static int smu_sys_get_pp_feature_mask(void *handle,
72 				       char *buf)
73 {
74 	struct smu_context *smu = handle;
75 
76 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
77 		return -EOPNOTSUPP;
78 
79 	return smu_get_pp_feature_mask(smu, buf);
80 }
81 
82 static int smu_sys_set_pp_feature_mask(void *handle,
83 				       uint64_t new_mask)
84 {
85 	struct smu_context *smu = handle;
86 
87 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
88 		return -EOPNOTSUPP;
89 
90 	return smu_set_pp_feature_mask(smu, new_mask);
91 }
92 
93 int smu_set_residency_gfxoff(struct smu_context *smu, bool value)
94 {
95 	if (!smu->ppt_funcs->set_gfx_off_residency)
96 		return -EINVAL;
97 
98 	return smu_set_gfx_off_residency(smu, value);
99 }
100 
101 int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value)
102 {
103 	if (!smu->ppt_funcs->get_gfx_off_residency)
104 		return -EINVAL;
105 
106 	return smu_get_gfx_off_residency(smu, value);
107 }
108 
109 int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value)
110 {
111 	if (!smu->ppt_funcs->get_gfx_off_entrycount)
112 		return -EINVAL;
113 
114 	return smu_get_gfx_off_entrycount(smu, value);
115 }
116 
117 int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
118 {
119 	if (!smu->ppt_funcs->get_gfx_off_status)
120 		return -EINVAL;
121 
122 	*value = smu_get_gfx_off_status(smu);
123 
124 	return 0;
125 }
126 
127 int smu_set_soft_freq_range(struct smu_context *smu,
128 			    enum smu_clk_type clk_type,
129 			    uint32_t min,
130 			    uint32_t max)
131 {
132 	int ret = 0;
133 
134 	if (smu->ppt_funcs->set_soft_freq_limited_range)
135 		ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
136 								  clk_type,
137 								  min,
138 								  max);
139 
140 	return ret;
141 }
142 
143 int smu_get_dpm_freq_range(struct smu_context *smu,
144 			   enum smu_clk_type clk_type,
145 			   uint32_t *min,
146 			   uint32_t *max)
147 {
148 	int ret = -ENOTSUPP;
149 
150 	if (!min && !max)
151 		return -EINVAL;
152 
153 	if (smu->ppt_funcs->get_dpm_ultimate_freq)
154 		ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
155 							    clk_type,
156 							    min,
157 							    max);
158 
159 	return ret;
160 }
161 
162 int smu_set_gfx_power_up_by_imu(struct smu_context *smu)
163 {
164 	if (!smu->ppt_funcs || !smu->ppt_funcs->set_gfx_power_up_by_imu)
165 		return -EOPNOTSUPP;
166 
167 	return smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
168 }
169 
170 static u32 smu_get_mclk(void *handle, bool low)
171 {
172 	struct smu_context *smu = handle;
173 	uint32_t clk_freq;
174 	int ret = 0;
175 
176 	ret = smu_get_dpm_freq_range(smu, SMU_UCLK,
177 				     low ? &clk_freq : NULL,
178 				     !low ? &clk_freq : NULL);
179 	if (ret)
180 		return 0;
181 	return clk_freq * 100;
182 }
183 
184 static u32 smu_get_sclk(void *handle, bool low)
185 {
186 	struct smu_context *smu = handle;
187 	uint32_t clk_freq;
188 	int ret = 0;
189 
190 	ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK,
191 				     low ? &clk_freq : NULL,
192 				     !low ? &clk_freq : NULL);
193 	if (ret)
194 		return 0;
195 	return clk_freq * 100;
196 }
197 
198 static int smu_dpm_set_vcn_enable(struct smu_context *smu,
199 				  bool enable)
200 {
201 	struct smu_power_context *smu_power = &smu->smu_power;
202 	struct smu_power_gate *power_gate = &smu_power->power_gate;
203 	int ret = 0;
204 
205 	if (!smu->ppt_funcs->dpm_set_vcn_enable)
206 		return 0;
207 
208 	if (atomic_read(&power_gate->vcn_gated) ^ enable)
209 		return 0;
210 
211 	ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable);
212 	if (!ret)
213 		atomic_set(&power_gate->vcn_gated, !enable);
214 
215 	return ret;
216 }
217 
218 static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
219 				   bool enable)
220 {
221 	struct smu_power_context *smu_power = &smu->smu_power;
222 	struct smu_power_gate *power_gate = &smu_power->power_gate;
223 	int ret = 0;
224 
225 	if (!smu->ppt_funcs->dpm_set_jpeg_enable)
226 		return 0;
227 
228 	if (atomic_read(&power_gate->jpeg_gated) ^ enable)
229 		return 0;
230 
231 	ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable);
232 	if (!ret)
233 		atomic_set(&power_gate->jpeg_gated, !enable);
234 
235 	return ret;
236 }
237 
238 /**
239  * smu_dpm_set_power_gate - power gate/ungate the specific IP block
240  *
241  * @handle:        smu_context pointer
242  * @block_type: the IP block to power gate/ungate
243  * @gate:       to power gate if true, ungate otherwise
244  *
245  * This API uses no smu->mutex lock protection due to:
246  * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
247  *    This is guarded to be race condition free by the caller.
248  * 2. Or get called on user setting request of power_dpm_force_performance_level.
249  *    Under this case, the smu->mutex lock protection is already enforced on
250  *    the parent API smu_force_performance_level of the call path.
251  */
252 static int smu_dpm_set_power_gate(void *handle,
253 				  uint32_t block_type,
254 				  bool gate)
255 {
256 	struct smu_context *smu = handle;
257 	int ret = 0;
258 
259 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) {
260 		dev_WARN(smu->adev->dev,
261 			 "SMU uninitialized but power %s requested for %u!\n",
262 			 gate ? "gate" : "ungate", block_type);
263 		return -EOPNOTSUPP;
264 	}
265 
266 	switch (block_type) {
267 	/*
268 	 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses
269 	 * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept.
270 	 */
271 	case AMD_IP_BLOCK_TYPE_UVD:
272 	case AMD_IP_BLOCK_TYPE_VCN:
273 		ret = smu_dpm_set_vcn_enable(smu, !gate);
274 		if (ret)
275 			dev_err(smu->adev->dev, "Failed to power %s VCN!\n",
276 				gate ? "gate" : "ungate");
277 		break;
278 	case AMD_IP_BLOCK_TYPE_GFX:
279 		ret = smu_gfx_off_control(smu, gate);
280 		if (ret)
281 			dev_err(smu->adev->dev, "Failed to %s gfxoff!\n",
282 				gate ? "enable" : "disable");
283 		break;
284 	case AMD_IP_BLOCK_TYPE_SDMA:
285 		ret = smu_powergate_sdma(smu, gate);
286 		if (ret)
287 			dev_err(smu->adev->dev, "Failed to power %s SDMA!\n",
288 				gate ? "gate" : "ungate");
289 		break;
290 	case AMD_IP_BLOCK_TYPE_JPEG:
291 		ret = smu_dpm_set_jpeg_enable(smu, !gate);
292 		if (ret)
293 			dev_err(smu->adev->dev, "Failed to power %s JPEG!\n",
294 				gate ? "gate" : "ungate");
295 		break;
296 	default:
297 		dev_err(smu->adev->dev, "Unsupported block type!\n");
298 		return -EINVAL;
299 	}
300 
301 	return ret;
302 }
303 
304 /**
305  * smu_set_user_clk_dependencies - set user profile clock dependencies
306  *
307  * @smu:	smu_context pointer
308  * @clk:	enum smu_clk_type type
309  *
310  * Enable/Disable the clock dependency for the @clk type.
311  */
312 static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk)
313 {
314 	if (smu->adev->in_suspend)
315 		return;
316 
317 	if (clk == SMU_MCLK) {
318 		smu->user_dpm_profile.clk_dependency = 0;
319 		smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK);
320 	} else if (clk == SMU_FCLK) {
321 		/* MCLK takes precedence over FCLK */
322 		if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
323 			return;
324 
325 		smu->user_dpm_profile.clk_dependency = 0;
326 		smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK);
327 	} else if (clk == SMU_SOCCLK) {
328 		/* MCLK takes precedence over SOCCLK */
329 		if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
330 			return;
331 
332 		smu->user_dpm_profile.clk_dependency = 0;
333 		smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK);
334 	} else
335 		/* Add clk dependencies here, if any */
336 		return;
337 }
338 
339 /**
340  * smu_restore_dpm_user_profile - reinstate user dpm profile
341  *
342  * @smu:	smu_context pointer
343  *
344  * Restore the saved user power configurations include power limit,
345  * clock frequencies, fan control mode and fan speed.
346  */
347 static void smu_restore_dpm_user_profile(struct smu_context *smu)
348 {
349 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
350 	int ret = 0;
351 
352 	if (!smu->adev->in_suspend)
353 		return;
354 
355 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
356 		return;
357 
358 	/* Enable restore flag */
359 	smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE;
360 
361 	/* set the user dpm power limit */
362 	if (smu->user_dpm_profile.power_limit) {
363 		ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit);
364 		if (ret)
365 			dev_err(smu->adev->dev, "Failed to set power limit value\n");
366 	}
367 
368 	/* set the user dpm clock configurations */
369 	if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
370 		enum smu_clk_type clk_type;
371 
372 		for (clk_type = 0; clk_type < SMU_CLK_COUNT; clk_type++) {
373 			/*
374 			 * Iterate over smu clk type and force the saved user clk
375 			 * configs, skip if clock dependency is enabled
376 			 */
377 			if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) &&
378 					smu->user_dpm_profile.clk_mask[clk_type]) {
379 				ret = smu_force_smuclk_levels(smu, clk_type,
380 						smu->user_dpm_profile.clk_mask[clk_type]);
381 				if (ret)
382 					dev_err(smu->adev->dev,
383 						"Failed to set clock type = %d\n", clk_type);
384 			}
385 		}
386 	}
387 
388 	/* set the user dpm fan configurations */
389 	if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL ||
390 	    smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) {
391 		ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode);
392 		if (ret != -EOPNOTSUPP) {
393 			smu->user_dpm_profile.fan_speed_pwm = 0;
394 			smu->user_dpm_profile.fan_speed_rpm = 0;
395 			smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO;
396 			dev_err(smu->adev->dev, "Failed to set manual fan control mode\n");
397 		}
398 
399 		if (smu->user_dpm_profile.fan_speed_pwm) {
400 			ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm);
401 			if (ret != -EOPNOTSUPP)
402 				dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n");
403 		}
404 
405 		if (smu->user_dpm_profile.fan_speed_rpm) {
406 			ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm);
407 			if (ret != -EOPNOTSUPP)
408 				dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n");
409 		}
410 	}
411 
412 	/* Restore user customized OD settings */
413 	if (smu->user_dpm_profile.user_od) {
414 		if (smu->ppt_funcs->restore_user_od_settings) {
415 			ret = smu->ppt_funcs->restore_user_od_settings(smu);
416 			if (ret)
417 				dev_err(smu->adev->dev, "Failed to upload customized OD settings\n");
418 		}
419 	}
420 
421 	/* Disable restore flag */
422 	smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE;
423 }
424 
425 static int smu_get_power_num_states(void *handle,
426 				    struct pp_states_info *state_info)
427 {
428 	if (!state_info)
429 		return -EINVAL;
430 
431 	/* not support power state */
432 	memset(state_info, 0, sizeof(struct pp_states_info));
433 	state_info->nums = 1;
434 	state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
435 
436 	return 0;
437 }
438 
439 bool is_support_sw_smu(struct amdgpu_device *adev)
440 {
441 	/* vega20 is 11.0.2, but it's supported via the powerplay code */
442 	if (adev->asic_type == CHIP_VEGA20)
443 		return false;
444 
445 	if (adev->ip_versions[MP1_HWIP][0] >= IP_VERSION(11, 0, 0))
446 		return true;
447 
448 	return false;
449 }
450 
451 bool is_support_cclk_dpm(struct amdgpu_device *adev)
452 {
453 	struct smu_context *smu = adev->powerplay.pp_handle;
454 
455 	if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT))
456 		return false;
457 
458 	return true;
459 }
460 
461 
462 static int smu_sys_get_pp_table(void *handle,
463 				char **table)
464 {
465 	struct smu_context *smu = handle;
466 	struct smu_table_context *smu_table = &smu->smu_table;
467 
468 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
469 		return -EOPNOTSUPP;
470 
471 	if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
472 		return -EINVAL;
473 
474 	if (smu_table->hardcode_pptable)
475 		*table = smu_table->hardcode_pptable;
476 	else
477 		*table = smu_table->power_play_table;
478 
479 	return smu_table->power_play_table_size;
480 }
481 
482 static int smu_sys_set_pp_table(void *handle,
483 				const char *buf,
484 				size_t size)
485 {
486 	struct smu_context *smu = handle;
487 	struct smu_table_context *smu_table = &smu->smu_table;
488 	ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
489 	int ret = 0;
490 
491 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
492 		return -EOPNOTSUPP;
493 
494 	if (header->usStructureSize != size) {
495 		dev_err(smu->adev->dev, "pp table size not matched !\n");
496 		return -EIO;
497 	}
498 
499 	if (!smu_table->hardcode_pptable) {
500 		smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
501 		if (!smu_table->hardcode_pptable)
502 			return -ENOMEM;
503 	}
504 
505 	memcpy(smu_table->hardcode_pptable, buf, size);
506 	smu_table->power_play_table = smu_table->hardcode_pptable;
507 	smu_table->power_play_table_size = size;
508 
509 	/*
510 	 * Special hw_fini action(for Navi1x, the DPMs disablement will be
511 	 * skipped) may be needed for custom pptable uploading.
512 	 */
513 	smu->uploading_custom_pp_table = true;
514 
515 	ret = smu_reset(smu);
516 	if (ret)
517 		dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret);
518 
519 	smu->uploading_custom_pp_table = false;
520 
521 	return ret;
522 }
523 
524 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
525 {
526 	struct smu_feature *feature = &smu->smu_feature;
527 	uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
528 	int ret = 0;
529 
530 	/*
531 	 * With SCPM enabled, the allowed featuremasks setting(via
532 	 * PPSMC_MSG_SetAllowedFeaturesMaskLow/High) is not permitted.
533 	 * That means there is no way to let PMFW knows the settings below.
534 	 * Thus, we just assume all the features are allowed under
535 	 * such scenario.
536 	 */
537 	if (smu->adev->scpm_enabled) {
538 		bitmap_fill(feature->allowed, SMU_FEATURE_MAX);
539 		return 0;
540 	}
541 
542 	bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
543 
544 	ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
545 					     SMU_FEATURE_MAX/32);
546 	if (ret)
547 		return ret;
548 
549 	bitmap_or(feature->allowed, feature->allowed,
550 		      (unsigned long *)allowed_feature_mask,
551 		      feature->feature_num);
552 
553 	return ret;
554 }
555 
556 static int smu_set_funcs(struct amdgpu_device *adev)
557 {
558 	struct smu_context *smu = adev->powerplay.pp_handle;
559 
560 	if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
561 		smu->od_enabled = true;
562 
563 	switch (adev->ip_versions[MP1_HWIP][0]) {
564 	case IP_VERSION(11, 0, 0):
565 	case IP_VERSION(11, 0, 5):
566 	case IP_VERSION(11, 0, 9):
567 		navi10_set_ppt_funcs(smu);
568 		break;
569 	case IP_VERSION(11, 0, 7):
570 	case IP_VERSION(11, 0, 11):
571 	case IP_VERSION(11, 0, 12):
572 	case IP_VERSION(11, 0, 13):
573 		sienna_cichlid_set_ppt_funcs(smu);
574 		break;
575 	case IP_VERSION(12, 0, 0):
576 	case IP_VERSION(12, 0, 1):
577 		renoir_set_ppt_funcs(smu);
578 		break;
579 	case IP_VERSION(11, 5, 0):
580 		vangogh_set_ppt_funcs(smu);
581 		break;
582 	case IP_VERSION(13, 0, 1):
583 	case IP_VERSION(13, 0, 3):
584 	case IP_VERSION(13, 0, 8):
585 		yellow_carp_set_ppt_funcs(smu);
586 		break;
587 	case IP_VERSION(13, 0, 4):
588 	case IP_VERSION(13, 0, 11):
589 		smu_v13_0_4_set_ppt_funcs(smu);
590 		break;
591 	case IP_VERSION(13, 0, 5):
592 		smu_v13_0_5_set_ppt_funcs(smu);
593 		break;
594 	case IP_VERSION(11, 0, 8):
595 		cyan_skillfish_set_ppt_funcs(smu);
596 		break;
597 	case IP_VERSION(11, 0, 2):
598 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
599 		arcturus_set_ppt_funcs(smu);
600 		/* OD is not supported on Arcturus */
601 		smu->od_enabled =false;
602 		break;
603 	case IP_VERSION(13, 0, 2):
604 		aldebaran_set_ppt_funcs(smu);
605 		/* Enable pp_od_clk_voltage node */
606 		smu->od_enabled = true;
607 		break;
608 	case IP_VERSION(13, 0, 0):
609 	case IP_VERSION(13, 0, 10):
610 		smu_v13_0_0_set_ppt_funcs(smu);
611 		break;
612 	case IP_VERSION(13, 0, 7):
613 		smu_v13_0_7_set_ppt_funcs(smu);
614 		break;
615 	default:
616 		return -EINVAL;
617 	}
618 
619 	return 0;
620 }
621 
622 static int smu_early_init(void *handle)
623 {
624 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
625 	struct smu_context *smu;
626 
627 	smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL);
628 	if (!smu)
629 		return -ENOMEM;
630 
631 	smu->adev = adev;
632 	smu->pm_enabled = !!amdgpu_dpm;
633 	smu->is_apu = false;
634 	smu->smu_baco.state = SMU_BACO_STATE_EXIT;
635 	smu->smu_baco.platform_support = false;
636 	smu->user_dpm_profile.fan_mode = -1;
637 
638 	mutex_init(&smu->message_lock);
639 
640 	adev->powerplay.pp_handle = smu;
641 	adev->powerplay.pp_funcs = &swsmu_pm_funcs;
642 
643 	return smu_set_funcs(adev);
644 }
645 
646 static int smu_set_default_dpm_table(struct smu_context *smu)
647 {
648 	struct smu_power_context *smu_power = &smu->smu_power;
649 	struct smu_power_gate *power_gate = &smu_power->power_gate;
650 	int vcn_gate, jpeg_gate;
651 	int ret = 0;
652 
653 	if (!smu->ppt_funcs->set_default_dpm_table)
654 		return 0;
655 
656 	vcn_gate = atomic_read(&power_gate->vcn_gated);
657 	jpeg_gate = atomic_read(&power_gate->jpeg_gated);
658 
659 	ret = smu_dpm_set_vcn_enable(smu, true);
660 	if (ret)
661 		return ret;
662 
663 	ret = smu_dpm_set_jpeg_enable(smu, true);
664 	if (ret)
665 		goto err_out;
666 
667 	ret = smu->ppt_funcs->set_default_dpm_table(smu);
668 	if (ret)
669 		dev_err(smu->adev->dev,
670 			"Failed to setup default dpm clock tables!\n");
671 
672 	smu_dpm_set_jpeg_enable(smu, !jpeg_gate);
673 err_out:
674 	smu_dpm_set_vcn_enable(smu, !vcn_gate);
675 	return ret;
676 }
677 
678 static int smu_apply_default_config_table_settings(struct smu_context *smu)
679 {
680 	struct amdgpu_device *adev = smu->adev;
681 	int ret = 0;
682 
683 	ret = smu_get_default_config_table_settings(smu,
684 						    &adev->pm.config_table);
685 	if (ret)
686 		return ret;
687 
688 	return smu_set_config_table(smu, &adev->pm.config_table);
689 }
690 
691 static int smu_late_init(void *handle)
692 {
693 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
694 	struct smu_context *smu = adev->powerplay.pp_handle;
695 	int ret = 0;
696 
697 	smu_set_fine_grain_gfx_freq_parameters(smu);
698 
699 	if (!smu->pm_enabled)
700 		return 0;
701 
702 	ret = smu_post_init(smu);
703 	if (ret) {
704 		dev_err(adev->dev, "Failed to post smu init!\n");
705 		return ret;
706 	}
707 
708 	if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 1)) ||
709 	    (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 3)))
710 		return 0;
711 
712 	if (!amdgpu_sriov_vf(adev) || smu->od_enabled) {
713 		ret = smu_set_default_od_settings(smu);
714 		if (ret) {
715 			dev_err(adev->dev, "Failed to setup default OD settings!\n");
716 			return ret;
717 		}
718 	}
719 
720 	ret = smu_populate_umd_state_clk(smu);
721 	if (ret) {
722 		dev_err(adev->dev, "Failed to populate UMD state clocks!\n");
723 		return ret;
724 	}
725 
726 	ret = smu_get_asic_power_limits(smu,
727 					&smu->current_power_limit,
728 					&smu->default_power_limit,
729 					&smu->max_power_limit);
730 	if (ret) {
731 		dev_err(adev->dev, "Failed to get asic power limits!\n");
732 		return ret;
733 	}
734 
735 	if (!amdgpu_sriov_vf(adev))
736 		smu_get_unique_id(smu);
737 
738 	smu_get_fan_parameters(smu);
739 
740 	smu_handle_task(smu,
741 			smu->smu_dpm.dpm_level,
742 			AMD_PP_TASK_COMPLETE_INIT);
743 
744 	ret = smu_apply_default_config_table_settings(smu);
745 	if (ret && (ret != -EOPNOTSUPP)) {
746 		dev_err(adev->dev, "Failed to apply default DriverSmuConfig settings!\n");
747 		return ret;
748 	}
749 
750 	smu_restore_dpm_user_profile(smu);
751 
752 	return 0;
753 }
754 
755 static int smu_init_fb_allocations(struct smu_context *smu)
756 {
757 	struct amdgpu_device *adev = smu->adev;
758 	struct smu_table_context *smu_table = &smu->smu_table;
759 	struct smu_table *tables = smu_table->tables;
760 	struct smu_table *driver_table = &(smu_table->driver_table);
761 	uint32_t max_table_size = 0;
762 	int ret, i;
763 
764 	/* VRAM allocation for tool table */
765 	if (tables[SMU_TABLE_PMSTATUSLOG].size) {
766 		ret = amdgpu_bo_create_kernel(adev,
767 					      tables[SMU_TABLE_PMSTATUSLOG].size,
768 					      tables[SMU_TABLE_PMSTATUSLOG].align,
769 					      tables[SMU_TABLE_PMSTATUSLOG].domain,
770 					      &tables[SMU_TABLE_PMSTATUSLOG].bo,
771 					      &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
772 					      &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
773 		if (ret) {
774 			dev_err(adev->dev, "VRAM allocation for tool table failed!\n");
775 			return ret;
776 		}
777 	}
778 
779 	/* VRAM allocation for driver table */
780 	for (i = 0; i < SMU_TABLE_COUNT; i++) {
781 		if (tables[i].size == 0)
782 			continue;
783 
784 		if (i == SMU_TABLE_PMSTATUSLOG)
785 			continue;
786 
787 		if (max_table_size < tables[i].size)
788 			max_table_size = tables[i].size;
789 	}
790 
791 	driver_table->size = max_table_size;
792 	driver_table->align = PAGE_SIZE;
793 	driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
794 
795 	ret = amdgpu_bo_create_kernel(adev,
796 				      driver_table->size,
797 				      driver_table->align,
798 				      driver_table->domain,
799 				      &driver_table->bo,
800 				      &driver_table->mc_address,
801 				      &driver_table->cpu_addr);
802 	if (ret) {
803 		dev_err(adev->dev, "VRAM allocation for driver table failed!\n");
804 		if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
805 			amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
806 					      &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
807 					      &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
808 	}
809 
810 	return ret;
811 }
812 
813 static int smu_fini_fb_allocations(struct smu_context *smu)
814 {
815 	struct smu_table_context *smu_table = &smu->smu_table;
816 	struct smu_table *tables = smu_table->tables;
817 	struct smu_table *driver_table = &(smu_table->driver_table);
818 
819 	if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
820 		amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
821 				      &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
822 				      &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
823 
824 	amdgpu_bo_free_kernel(&driver_table->bo,
825 			      &driver_table->mc_address,
826 			      &driver_table->cpu_addr);
827 
828 	return 0;
829 }
830 
831 /**
832  * smu_alloc_memory_pool - allocate memory pool in the system memory
833  *
834  * @smu: amdgpu_device pointer
835  *
836  * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
837  * and DramLogSetDramAddr can notify it changed.
838  *
839  * Returns 0 on success, error on failure.
840  */
841 static int smu_alloc_memory_pool(struct smu_context *smu)
842 {
843 	struct amdgpu_device *adev = smu->adev;
844 	struct smu_table_context *smu_table = &smu->smu_table;
845 	struct smu_table *memory_pool = &smu_table->memory_pool;
846 	uint64_t pool_size = smu->pool_size;
847 	int ret = 0;
848 
849 	if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
850 		return ret;
851 
852 	memory_pool->size = pool_size;
853 	memory_pool->align = PAGE_SIZE;
854 	memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
855 
856 	switch (pool_size) {
857 	case SMU_MEMORY_POOL_SIZE_256_MB:
858 	case SMU_MEMORY_POOL_SIZE_512_MB:
859 	case SMU_MEMORY_POOL_SIZE_1_GB:
860 	case SMU_MEMORY_POOL_SIZE_2_GB:
861 		ret = amdgpu_bo_create_kernel(adev,
862 					      memory_pool->size,
863 					      memory_pool->align,
864 					      memory_pool->domain,
865 					      &memory_pool->bo,
866 					      &memory_pool->mc_address,
867 					      &memory_pool->cpu_addr);
868 		if (ret)
869 			dev_err(adev->dev, "VRAM allocation for dramlog failed!\n");
870 		break;
871 	default:
872 		break;
873 	}
874 
875 	return ret;
876 }
877 
878 static int smu_free_memory_pool(struct smu_context *smu)
879 {
880 	struct smu_table_context *smu_table = &smu->smu_table;
881 	struct smu_table *memory_pool = &smu_table->memory_pool;
882 
883 	if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
884 		return 0;
885 
886 	amdgpu_bo_free_kernel(&memory_pool->bo,
887 			      &memory_pool->mc_address,
888 			      &memory_pool->cpu_addr);
889 
890 	memset(memory_pool, 0, sizeof(struct smu_table));
891 
892 	return 0;
893 }
894 
895 static int smu_alloc_dummy_read_table(struct smu_context *smu)
896 {
897 	struct smu_table_context *smu_table = &smu->smu_table;
898 	struct smu_table *dummy_read_1_table =
899 			&smu_table->dummy_read_1_table;
900 	struct amdgpu_device *adev = smu->adev;
901 	int ret = 0;
902 
903 	dummy_read_1_table->size = 0x40000;
904 	dummy_read_1_table->align = PAGE_SIZE;
905 	dummy_read_1_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
906 
907 	ret = amdgpu_bo_create_kernel(adev,
908 				      dummy_read_1_table->size,
909 				      dummy_read_1_table->align,
910 				      dummy_read_1_table->domain,
911 				      &dummy_read_1_table->bo,
912 				      &dummy_read_1_table->mc_address,
913 				      &dummy_read_1_table->cpu_addr);
914 	if (ret)
915 		dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n");
916 
917 	return ret;
918 }
919 
920 static void smu_free_dummy_read_table(struct smu_context *smu)
921 {
922 	struct smu_table_context *smu_table = &smu->smu_table;
923 	struct smu_table *dummy_read_1_table =
924 			&smu_table->dummy_read_1_table;
925 
926 
927 	amdgpu_bo_free_kernel(&dummy_read_1_table->bo,
928 			      &dummy_read_1_table->mc_address,
929 			      &dummy_read_1_table->cpu_addr);
930 
931 	memset(dummy_read_1_table, 0, sizeof(struct smu_table));
932 }
933 
934 static int smu_smc_table_sw_init(struct smu_context *smu)
935 {
936 	int ret;
937 
938 	/**
939 	 * Create smu_table structure, and init smc tables such as
940 	 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
941 	 */
942 	ret = smu_init_smc_tables(smu);
943 	if (ret) {
944 		dev_err(smu->adev->dev, "Failed to init smc tables!\n");
945 		return ret;
946 	}
947 
948 	/**
949 	 * Create smu_power_context structure, and allocate smu_dpm_context and
950 	 * context size to fill the smu_power_context data.
951 	 */
952 	ret = smu_init_power(smu);
953 	if (ret) {
954 		dev_err(smu->adev->dev, "Failed to init smu_init_power!\n");
955 		return ret;
956 	}
957 
958 	/*
959 	 * allocate vram bos to store smc table contents.
960 	 */
961 	ret = smu_init_fb_allocations(smu);
962 	if (ret)
963 		return ret;
964 
965 	ret = smu_alloc_memory_pool(smu);
966 	if (ret)
967 		return ret;
968 
969 	ret = smu_alloc_dummy_read_table(smu);
970 	if (ret)
971 		return ret;
972 
973 	ret = smu_i2c_init(smu);
974 	if (ret)
975 		return ret;
976 
977 	return 0;
978 }
979 
980 static int smu_smc_table_sw_fini(struct smu_context *smu)
981 {
982 	int ret;
983 
984 	smu_i2c_fini(smu);
985 
986 	smu_free_dummy_read_table(smu);
987 
988 	ret = smu_free_memory_pool(smu);
989 	if (ret)
990 		return ret;
991 
992 	ret = smu_fini_fb_allocations(smu);
993 	if (ret)
994 		return ret;
995 
996 	ret = smu_fini_power(smu);
997 	if (ret) {
998 		dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n");
999 		return ret;
1000 	}
1001 
1002 	ret = smu_fini_smc_tables(smu);
1003 	if (ret) {
1004 		dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n");
1005 		return ret;
1006 	}
1007 
1008 	return 0;
1009 }
1010 
1011 static void smu_throttling_logging_work_fn(struct work_struct *work)
1012 {
1013 	struct smu_context *smu = container_of(work, struct smu_context,
1014 					       throttling_logging_work);
1015 
1016 	smu_log_thermal_throttling(smu);
1017 }
1018 
1019 static void smu_interrupt_work_fn(struct work_struct *work)
1020 {
1021 	struct smu_context *smu = container_of(work, struct smu_context,
1022 					       interrupt_work);
1023 
1024 	if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
1025 		smu->ppt_funcs->interrupt_work(smu);
1026 }
1027 
1028 static int smu_sw_init(void *handle)
1029 {
1030 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1031 	struct smu_context *smu = adev->powerplay.pp_handle;
1032 	int ret;
1033 
1034 	smu->pool_size = adev->pm.smu_prv_buffer_size;
1035 	smu->smu_feature.feature_num = SMU_FEATURE_MAX;
1036 	bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
1037 	bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
1038 
1039 	INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
1040 	INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);
1041 	atomic64_set(&smu->throttle_int_counter, 0);
1042 	smu->watermarks_bitmap = 0;
1043 	smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1044 	smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1045 
1046 	atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
1047 	atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
1048 
1049 	smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
1050 	smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
1051 	smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
1052 	smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
1053 	smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
1054 	smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
1055 	smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
1056 	smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
1057 
1058 	smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1059 	smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1060 	smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
1061 	smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
1062 	smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
1063 	smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
1064 	smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
1065 	smu->display_config = &adev->pm.pm_display_cfg;
1066 
1067 	smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1068 	smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1069 
1070 	ret = smu_init_microcode(smu);
1071 	if (ret) {
1072 		dev_err(adev->dev, "Failed to load smu firmware!\n");
1073 		return ret;
1074 	}
1075 
1076 	ret = smu_smc_table_sw_init(smu);
1077 	if (ret) {
1078 		dev_err(adev->dev, "Failed to sw init smc table!\n");
1079 		return ret;
1080 	}
1081 
1082 	/* get boot_values from vbios to set revision, gfxclk, and etc. */
1083 	ret = smu_get_vbios_bootup_values(smu);
1084 	if (ret) {
1085 		dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n");
1086 		return ret;
1087 	}
1088 
1089 	ret = smu_init_pptable_microcode(smu);
1090 	if (ret) {
1091 		dev_err(adev->dev, "Failed to setup pptable firmware!\n");
1092 		return ret;
1093 	}
1094 
1095 	ret = smu_register_irq_handler(smu);
1096 	if (ret) {
1097 		dev_err(adev->dev, "Failed to register smc irq handler!\n");
1098 		return ret;
1099 	}
1100 
1101 	/* If there is no way to query fan control mode, fan control is not supported */
1102 	if (!smu->ppt_funcs->get_fan_control_mode)
1103 		smu->adev->pm.no_fan = true;
1104 
1105 	return 0;
1106 }
1107 
1108 static int smu_sw_fini(void *handle)
1109 {
1110 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1111 	struct smu_context *smu = adev->powerplay.pp_handle;
1112 	int ret;
1113 
1114 	ret = smu_smc_table_sw_fini(smu);
1115 	if (ret) {
1116 		dev_err(adev->dev, "Failed to sw fini smc table!\n");
1117 		return ret;
1118 	}
1119 
1120 	smu_fini_microcode(smu);
1121 
1122 	return 0;
1123 }
1124 
1125 static int smu_get_thermal_temperature_range(struct smu_context *smu)
1126 {
1127 	struct amdgpu_device *adev = smu->adev;
1128 	struct smu_temperature_range *range =
1129 				&smu->thermal_range;
1130 	int ret = 0;
1131 
1132 	if (!smu->ppt_funcs->get_thermal_temperature_range)
1133 		return 0;
1134 
1135 	ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range);
1136 	if (ret)
1137 		return ret;
1138 
1139 	adev->pm.dpm.thermal.min_temp = range->min;
1140 	adev->pm.dpm.thermal.max_temp = range->max;
1141 	adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max;
1142 	adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min;
1143 	adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max;
1144 	adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max;
1145 	adev->pm.dpm.thermal.min_mem_temp = range->mem_min;
1146 	adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max;
1147 	adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max;
1148 
1149 	return ret;
1150 }
1151 
1152 static int smu_smc_hw_setup(struct smu_context *smu)
1153 {
1154 	struct smu_feature *feature = &smu->smu_feature;
1155 	struct amdgpu_device *adev = smu->adev;
1156 	uint32_t pcie_gen = 0, pcie_width = 0;
1157 	uint64_t features_supported;
1158 	int ret = 0;
1159 
1160 	switch (adev->ip_versions[MP1_HWIP][0]) {
1161 	case IP_VERSION(11, 0, 7):
1162 	case IP_VERSION(11, 0, 11):
1163 	case IP_VERSION(11, 5, 0):
1164 	case IP_VERSION(11, 0, 12):
1165 		if (adev->in_suspend && smu_is_dpm_running(smu)) {
1166 			dev_info(adev->dev, "dpm has been enabled\n");
1167 			ret = smu_system_features_control(smu, true);
1168 			if (ret)
1169 				dev_err(adev->dev, "Failed system features control!\n");
1170 			return ret;
1171 		}
1172 		break;
1173 	default:
1174 		break;
1175 	}
1176 
1177 	ret = smu_init_display_count(smu, 0);
1178 	if (ret) {
1179 		dev_info(adev->dev, "Failed to pre-set display count as 0!\n");
1180 		return ret;
1181 	}
1182 
1183 	ret = smu_set_driver_table_location(smu);
1184 	if (ret) {
1185 		dev_err(adev->dev, "Failed to SetDriverDramAddr!\n");
1186 		return ret;
1187 	}
1188 
1189 	/*
1190 	 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1191 	 */
1192 	ret = smu_set_tool_table_location(smu);
1193 	if (ret) {
1194 		dev_err(adev->dev, "Failed to SetToolsDramAddr!\n");
1195 		return ret;
1196 	}
1197 
1198 	/*
1199 	 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1200 	 * pool location.
1201 	 */
1202 	ret = smu_notify_memory_pool_location(smu);
1203 	if (ret) {
1204 		dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n");
1205 		return ret;
1206 	}
1207 
1208 	ret = smu_setup_pptable(smu);
1209 	if (ret) {
1210 		dev_err(adev->dev, "Failed to setup pptable!\n");
1211 		return ret;
1212 	}
1213 
1214 	/* smu_dump_pptable(smu); */
1215 
1216 	/*
1217 	 * With SCPM enabled, PSP is responsible for the PPTable transferring
1218 	 * (to SMU). Driver involvement is not needed and permitted.
1219 	 */
1220 	if (!adev->scpm_enabled) {
1221 		/*
1222 		 * Copy pptable bo in the vram to smc with SMU MSGs such as
1223 		 * SetDriverDramAddr and TransferTableDram2Smu.
1224 		 */
1225 		ret = smu_write_pptable(smu);
1226 		if (ret) {
1227 			dev_err(adev->dev, "Failed to transfer pptable to SMC!\n");
1228 			return ret;
1229 		}
1230 	}
1231 
1232 	/* issue Run*Btc msg */
1233 	ret = smu_run_btc(smu);
1234 	if (ret)
1235 		return ret;
1236 
1237 	/*
1238 	 * With SCPM enabled, these actions(and relevant messages) are
1239 	 * not needed and permitted.
1240 	 */
1241 	if (!adev->scpm_enabled) {
1242 		ret = smu_feature_set_allowed_mask(smu);
1243 		if (ret) {
1244 			dev_err(adev->dev, "Failed to set driver allowed features mask!\n");
1245 			return ret;
1246 		}
1247 	}
1248 
1249 	ret = smu_system_features_control(smu, true);
1250 	if (ret) {
1251 		dev_err(adev->dev, "Failed to enable requested dpm features!\n");
1252 		return ret;
1253 	}
1254 
1255 	ret = smu_feature_get_enabled_mask(smu, &features_supported);
1256 	if (ret) {
1257 		dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");
1258 		return ret;
1259 	}
1260 	bitmap_copy(feature->supported,
1261 		    (unsigned long *)&features_supported,
1262 		    feature->feature_num);
1263 
1264 	if (!smu_is_dpm_running(smu))
1265 		dev_info(adev->dev, "dpm has been disabled\n");
1266 
1267 	/*
1268 	 * Set initialized values (get from vbios) to dpm tables context such as
1269 	 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1270 	 * type of clks.
1271 	 */
1272 	ret = smu_set_default_dpm_table(smu);
1273 	if (ret) {
1274 		dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
1275 		return ret;
1276 	}
1277 
1278 	if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
1279 		pcie_gen = 3;
1280 	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
1281 		pcie_gen = 2;
1282 	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
1283 		pcie_gen = 1;
1284 	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
1285 		pcie_gen = 0;
1286 
1287 	/* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
1288 	 * Bit 15:8:  PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
1289 	 * Bit 7:0:   PCIE lane width, 1 to 7 corresponds is x1 to x32
1290 	 */
1291 	if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
1292 		pcie_width = 6;
1293 	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
1294 		pcie_width = 5;
1295 	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
1296 		pcie_width = 4;
1297 	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
1298 		pcie_width = 3;
1299 	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
1300 		pcie_width = 2;
1301 	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
1302 		pcie_width = 1;
1303 	ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
1304 	if (ret) {
1305 		dev_err(adev->dev, "Attempt to override pcie params failed!\n");
1306 		return ret;
1307 	}
1308 
1309 	ret = smu_get_thermal_temperature_range(smu);
1310 	if (ret) {
1311 		dev_err(adev->dev, "Failed to get thermal temperature ranges!\n");
1312 		return ret;
1313 	}
1314 
1315 	ret = smu_enable_thermal_alert(smu);
1316 	if (ret) {
1317 	  dev_err(adev->dev, "Failed to enable thermal alert!\n");
1318 	  return ret;
1319 	}
1320 
1321 	ret = smu_notify_display_change(smu);
1322 	if (ret) {
1323 		dev_err(adev->dev, "Failed to notify display change!\n");
1324 		return ret;
1325 	}
1326 
1327 	/*
1328 	 * Set min deep sleep dce fclk with bootup value from vbios via
1329 	 * SetMinDeepSleepDcefclk MSG.
1330 	 */
1331 	ret = smu_set_min_dcef_deep_sleep(smu,
1332 					  smu->smu_table.boot_values.dcefclk / 100);
1333 
1334 	return ret;
1335 }
1336 
1337 static int smu_start_smc_engine(struct smu_context *smu)
1338 {
1339 	struct amdgpu_device *adev = smu->adev;
1340 	int ret = 0;
1341 
1342 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1343 		if (adev->ip_versions[MP1_HWIP][0] < IP_VERSION(11, 0, 0)) {
1344 			if (smu->ppt_funcs->load_microcode) {
1345 				ret = smu->ppt_funcs->load_microcode(smu);
1346 				if (ret)
1347 					return ret;
1348 			}
1349 		}
1350 	}
1351 
1352 	if (smu->ppt_funcs->check_fw_status) {
1353 		ret = smu->ppt_funcs->check_fw_status(smu);
1354 		if (ret) {
1355 			dev_err(adev->dev, "SMC is not ready\n");
1356 			return ret;
1357 		}
1358 	}
1359 
1360 	/*
1361 	 * Send msg GetDriverIfVersion to check if the return value is equal
1362 	 * with DRIVER_IF_VERSION of smc header.
1363 	 */
1364 	ret = smu_check_fw_version(smu);
1365 	if (ret)
1366 		return ret;
1367 
1368 	return ret;
1369 }
1370 
1371 static int smu_hw_init(void *handle)
1372 {
1373 	int ret;
1374 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1375 	struct smu_context *smu = adev->powerplay.pp_handle;
1376 
1377 	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
1378 		smu->pm_enabled = false;
1379 		return 0;
1380 	}
1381 
1382 	ret = smu_start_smc_engine(smu);
1383 	if (ret) {
1384 		dev_err(adev->dev, "SMC engine is not correctly up!\n");
1385 		return ret;
1386 	}
1387 
1388 	if (smu->is_apu) {
1389 		if ((smu->ppt_funcs->set_gfx_power_up_by_imu) &&
1390 				likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
1391 			ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
1392 			if (ret) {
1393 				dev_err(adev->dev, "Failed to Enable gfx imu!\n");
1394 				return ret;
1395 			}
1396 		}
1397 
1398 		smu_dpm_set_vcn_enable(smu, true);
1399 		smu_dpm_set_jpeg_enable(smu, true);
1400 		smu_set_gfx_cgpg(smu, true);
1401 	}
1402 
1403 	if (!smu->pm_enabled)
1404 		return 0;
1405 
1406 	ret = smu_get_driver_allowed_feature_mask(smu);
1407 	if (ret)
1408 		return ret;
1409 
1410 	ret = smu_smc_hw_setup(smu);
1411 	if (ret) {
1412 		dev_err(adev->dev, "Failed to setup smc hw!\n");
1413 		return ret;
1414 	}
1415 
1416 	/*
1417 	 * Move maximum sustainable clock retrieving here considering
1418 	 * 1. It is not needed on resume(from S3).
1419 	 * 2. DAL settings come between .hw_init and .late_init of SMU.
1420 	 *    And DAL needs to know the maximum sustainable clocks. Thus
1421 	 *    it cannot be put in .late_init().
1422 	 */
1423 	ret = smu_init_max_sustainable_clocks(smu);
1424 	if (ret) {
1425 		dev_err(adev->dev, "Failed to init max sustainable clocks!\n");
1426 		return ret;
1427 	}
1428 
1429 	adev->pm.dpm_enabled = true;
1430 
1431 	dev_info(adev->dev, "SMU is initialized successfully!\n");
1432 
1433 	return 0;
1434 }
1435 
1436 static int smu_disable_dpms(struct smu_context *smu)
1437 {
1438 	struct amdgpu_device *adev = smu->adev;
1439 	int ret = 0;
1440 	bool use_baco = !smu->is_apu &&
1441 		((amdgpu_in_reset(adev) &&
1442 		  (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
1443 		 ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
1444 
1445 	/*
1446 	 * For SMU 13.0.0 and 13.0.7, PMFW will handle the DPM features(disablement or others)
1447 	 * properly on suspend/reset/unload. Driver involvement may cause some unexpected issues.
1448 	 */
1449 	switch (adev->ip_versions[MP1_HWIP][0]) {
1450 	case IP_VERSION(13, 0, 0):
1451 	case IP_VERSION(13, 0, 7):
1452 	case IP_VERSION(13, 0, 10):
1453 		return 0;
1454 	default:
1455 		break;
1456 	}
1457 
1458 	/*
1459 	 * For custom pptable uploading, skip the DPM features
1460 	 * disable process on Navi1x ASICs.
1461 	 *   - As the gfx related features are under control of
1462 	 *     RLC on those ASICs. RLC reinitialization will be
1463 	 *     needed to reenable them. That will cost much more
1464 	 *     efforts.
1465 	 *
1466 	 *   - SMU firmware can handle the DPM reenablement
1467 	 *     properly.
1468 	 */
1469 	if (smu->uploading_custom_pp_table) {
1470 		switch (adev->ip_versions[MP1_HWIP][0]) {
1471 		case IP_VERSION(11, 0, 0):
1472 		case IP_VERSION(11, 0, 5):
1473 		case IP_VERSION(11, 0, 9):
1474 		case IP_VERSION(11, 0, 7):
1475 		case IP_VERSION(11, 0, 11):
1476 		case IP_VERSION(11, 5, 0):
1477 		case IP_VERSION(11, 0, 12):
1478 		case IP_VERSION(11, 0, 13):
1479 			return 0;
1480 		default:
1481 			break;
1482 		}
1483 	}
1484 
1485 	/*
1486 	 * For Sienna_Cichlid, PMFW will handle the features disablement properly
1487 	 * on BACO in. Driver involvement is unnecessary.
1488 	 */
1489 	if (use_baco) {
1490 		switch (adev->ip_versions[MP1_HWIP][0]) {
1491 		case IP_VERSION(11, 0, 7):
1492 		case IP_VERSION(11, 0, 0):
1493 		case IP_VERSION(11, 0, 5):
1494 		case IP_VERSION(11, 0, 9):
1495 		case IP_VERSION(13, 0, 7):
1496 			return 0;
1497 		default:
1498 			break;
1499 		}
1500 	}
1501 
1502 	/*
1503 	 * For SMU 13.0.4/11, PMFW will handle the features disablement properly
1504 	 * for gpu reset case. Driver involvement is unnecessary.
1505 	 */
1506 	if (amdgpu_in_reset(adev)) {
1507 		switch (adev->ip_versions[MP1_HWIP][0]) {
1508 		case IP_VERSION(13, 0, 4):
1509 		case IP_VERSION(13, 0, 11):
1510 			return 0;
1511 		default:
1512 			break;
1513 		}
1514 	}
1515 
1516 	/*
1517 	 * For gpu reset, runpm and hibernation through BACO,
1518 	 * BACO feature has to be kept enabled.
1519 	 */
1520 	if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
1521 		ret = smu_disable_all_features_with_exception(smu,
1522 							      SMU_FEATURE_BACO_BIT);
1523 		if (ret)
1524 			dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
1525 	} else {
1526 		/* DisableAllSmuFeatures message is not permitted with SCPM enabled */
1527 		if (!adev->scpm_enabled) {
1528 			ret = smu_system_features_control(smu, false);
1529 			if (ret)
1530 				dev_err(adev->dev, "Failed to disable smu features.\n");
1531 		}
1532 	}
1533 
1534 	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) &&
1535 	    !amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->stop)
1536 		adev->gfx.rlc.funcs->stop(adev);
1537 
1538 	return ret;
1539 }
1540 
1541 static int smu_smc_hw_cleanup(struct smu_context *smu)
1542 {
1543 	struct amdgpu_device *adev = smu->adev;
1544 	int ret = 0;
1545 
1546 	cancel_work_sync(&smu->throttling_logging_work);
1547 	cancel_work_sync(&smu->interrupt_work);
1548 
1549 	ret = smu_disable_thermal_alert(smu);
1550 	if (ret) {
1551 		dev_err(adev->dev, "Fail to disable thermal alert!\n");
1552 		return ret;
1553 	}
1554 
1555 	ret = smu_disable_dpms(smu);
1556 	if (ret) {
1557 		dev_err(adev->dev, "Fail to disable dpm features!\n");
1558 		return ret;
1559 	}
1560 
1561 	return 0;
1562 }
1563 
1564 static int smu_hw_fini(void *handle)
1565 {
1566 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1567 	struct smu_context *smu = adev->powerplay.pp_handle;
1568 
1569 	if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1570 		return 0;
1571 
1572 	smu_dpm_set_vcn_enable(smu, false);
1573 	smu_dpm_set_jpeg_enable(smu, false);
1574 
1575 	adev->vcn.cur_state = AMD_PG_STATE_GATE;
1576 	adev->jpeg.cur_state = AMD_PG_STATE_GATE;
1577 
1578 	if (!smu->pm_enabled)
1579 		return 0;
1580 
1581 	adev->pm.dpm_enabled = false;
1582 
1583 	return smu_smc_hw_cleanup(smu);
1584 }
1585 
1586 static void smu_late_fini(void *handle)
1587 {
1588 	struct amdgpu_device *adev = handle;
1589 	struct smu_context *smu = adev->powerplay.pp_handle;
1590 
1591 	kfree(smu);
1592 }
1593 
1594 static int smu_reset(struct smu_context *smu)
1595 {
1596 	struct amdgpu_device *adev = smu->adev;
1597 	int ret;
1598 
1599 	ret = smu_hw_fini(adev);
1600 	if (ret)
1601 		return ret;
1602 
1603 	ret = smu_hw_init(adev);
1604 	if (ret)
1605 		return ret;
1606 
1607 	ret = smu_late_init(adev);
1608 	if (ret)
1609 		return ret;
1610 
1611 	return 0;
1612 }
1613 
1614 static int smu_suspend(void *handle)
1615 {
1616 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1617 	struct smu_context *smu = adev->powerplay.pp_handle;
1618 	int ret;
1619 	uint64_t count;
1620 
1621 	if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1622 		return 0;
1623 
1624 	if (!smu->pm_enabled)
1625 		return 0;
1626 
1627 	adev->pm.dpm_enabled = false;
1628 
1629 	ret = smu_smc_hw_cleanup(smu);
1630 	if (ret)
1631 		return ret;
1632 
1633 	smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1634 
1635 	smu_set_gfx_cgpg(smu, false);
1636 
1637 	/*
1638 	 * pwfw resets entrycount when device is suspended, so we save the
1639 	 * last value to be used when we resume to keep it consistent
1640 	 */
1641 	ret = smu_get_entrycount_gfxoff(smu, &count);
1642 	if (!ret)
1643 		adev->gfx.gfx_off_entrycount = count;
1644 
1645 	return 0;
1646 }
1647 
1648 static int smu_resume(void *handle)
1649 {
1650 	int ret;
1651 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1652 	struct smu_context *smu = adev->powerplay.pp_handle;
1653 
1654 	if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1655 		return 0;
1656 
1657 	if (!smu->pm_enabled)
1658 		return 0;
1659 
1660 	dev_info(adev->dev, "SMU is resuming...\n");
1661 
1662 	ret = smu_start_smc_engine(smu);
1663 	if (ret) {
1664 		dev_err(adev->dev, "SMC engine is not correctly up!\n");
1665 		return ret;
1666 	}
1667 
1668 	ret = smu_smc_hw_setup(smu);
1669 	if (ret) {
1670 		dev_err(adev->dev, "Failed to setup smc hw!\n");
1671 		return ret;
1672 	}
1673 
1674 	smu_set_gfx_cgpg(smu, true);
1675 
1676 	smu->disable_uclk_switch = 0;
1677 
1678 	adev->pm.dpm_enabled = true;
1679 
1680 	dev_info(adev->dev, "SMU is resumed successfully!\n");
1681 
1682 	return 0;
1683 }
1684 
1685 static int smu_display_configuration_change(void *handle,
1686 					    const struct amd_pp_display_configuration *display_config)
1687 {
1688 	struct smu_context *smu = handle;
1689 	int index = 0;
1690 	int num_of_active_display = 0;
1691 
1692 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1693 		return -EOPNOTSUPP;
1694 
1695 	if (!display_config)
1696 		return -EINVAL;
1697 
1698 	smu_set_min_dcef_deep_sleep(smu,
1699 				    display_config->min_dcef_deep_sleep_set_clk / 100);
1700 
1701 	for (index = 0; index < display_config->num_path_including_non_display; index++) {
1702 		if (display_config->displays[index].controller_id != 0)
1703 			num_of_active_display++;
1704 	}
1705 
1706 	return 0;
1707 }
1708 
1709 static int smu_set_clockgating_state(void *handle,
1710 				     enum amd_clockgating_state state)
1711 {
1712 	return 0;
1713 }
1714 
1715 static int smu_set_powergating_state(void *handle,
1716 				     enum amd_powergating_state state)
1717 {
1718 	return 0;
1719 }
1720 
1721 static int smu_enable_umd_pstate(void *handle,
1722 		      enum amd_dpm_forced_level *level)
1723 {
1724 	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1725 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1726 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1727 					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1728 
1729 	struct smu_context *smu = (struct smu_context*)(handle);
1730 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1731 
1732 	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1733 		return -EINVAL;
1734 
1735 	if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1736 		/* enter umd pstate, save current level, disable gfx cg*/
1737 		if (*level & profile_mode_mask) {
1738 			smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1739 			smu_gpo_control(smu, false);
1740 			smu_gfx_ulv_control(smu, false);
1741 			smu_deep_sleep_control(smu, false);
1742 			amdgpu_asic_update_umd_stable_pstate(smu->adev, true);
1743 		}
1744 	} else {
1745 		/* exit umd pstate, restore level, enable gfx cg*/
1746 		if (!(*level & profile_mode_mask)) {
1747 			if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1748 				*level = smu_dpm_ctx->saved_dpm_level;
1749 			amdgpu_asic_update_umd_stable_pstate(smu->adev, false);
1750 			smu_deep_sleep_control(smu, true);
1751 			smu_gfx_ulv_control(smu, true);
1752 			smu_gpo_control(smu, true);
1753 		}
1754 	}
1755 
1756 	return 0;
1757 }
1758 
1759 static int smu_bump_power_profile_mode(struct smu_context *smu,
1760 					   long *param,
1761 					   uint32_t param_size)
1762 {
1763 	int ret = 0;
1764 
1765 	if (smu->ppt_funcs->set_power_profile_mode)
1766 		ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
1767 
1768 	return ret;
1769 }
1770 
1771 static int smu_adjust_power_state_dynamic(struct smu_context *smu,
1772 				   enum amd_dpm_forced_level level,
1773 				   bool skip_display_settings)
1774 {
1775 	int ret = 0;
1776 	int index = 0;
1777 	long workload;
1778 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1779 
1780 	if (!skip_display_settings) {
1781 		ret = smu_display_config_changed(smu);
1782 		if (ret) {
1783 			dev_err(smu->adev->dev, "Failed to change display config!");
1784 			return ret;
1785 		}
1786 	}
1787 
1788 	ret = smu_apply_clocks_adjust_rules(smu);
1789 	if (ret) {
1790 		dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!");
1791 		return ret;
1792 	}
1793 
1794 	if (!skip_display_settings) {
1795 		ret = smu_notify_smc_display_config(smu);
1796 		if (ret) {
1797 			dev_err(smu->adev->dev, "Failed to notify smc display config!");
1798 			return ret;
1799 		}
1800 	}
1801 
1802 	if (smu_dpm_ctx->dpm_level != level) {
1803 		ret = smu_asic_set_performance_level(smu, level);
1804 		if (ret) {
1805 			dev_err(smu->adev->dev, "Failed to set performance level!");
1806 			return ret;
1807 		}
1808 
1809 		/* update the saved copy */
1810 		smu_dpm_ctx->dpm_level = level;
1811 	}
1812 
1813 	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
1814 		smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
1815 		index = fls(smu->workload_mask);
1816 		index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1817 		workload = smu->workload_setting[index];
1818 
1819 		if (smu->power_profile_mode != workload)
1820 			smu_bump_power_profile_mode(smu, &workload, 0);
1821 	}
1822 
1823 	return ret;
1824 }
1825 
1826 static int smu_handle_task(struct smu_context *smu,
1827 			   enum amd_dpm_forced_level level,
1828 			   enum amd_pp_task task_id)
1829 {
1830 	int ret = 0;
1831 
1832 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1833 		return -EOPNOTSUPP;
1834 
1835 	switch (task_id) {
1836 	case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1837 		ret = smu_pre_display_config_changed(smu);
1838 		if (ret)
1839 			return ret;
1840 		ret = smu_adjust_power_state_dynamic(smu, level, false);
1841 		break;
1842 	case AMD_PP_TASK_COMPLETE_INIT:
1843 	case AMD_PP_TASK_READJUST_POWER_STATE:
1844 		ret = smu_adjust_power_state_dynamic(smu, level, true);
1845 		break;
1846 	default:
1847 		break;
1848 	}
1849 
1850 	return ret;
1851 }
1852 
1853 static int smu_handle_dpm_task(void *handle,
1854 			       enum amd_pp_task task_id,
1855 			       enum amd_pm_state_type *user_state)
1856 {
1857 	struct smu_context *smu = handle;
1858 	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
1859 
1860 	return smu_handle_task(smu, smu_dpm->dpm_level, task_id);
1861 
1862 }
1863 
1864 static int smu_switch_power_profile(void *handle,
1865 				    enum PP_SMC_POWER_PROFILE type,
1866 				    bool en)
1867 {
1868 	struct smu_context *smu = handle;
1869 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1870 	long workload;
1871 	uint32_t index;
1872 
1873 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1874 		return -EOPNOTSUPP;
1875 
1876 	if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1877 		return -EINVAL;
1878 
1879 	if (!en) {
1880 		smu->workload_mask &= ~(1 << smu->workload_prority[type]);
1881 		index = fls(smu->workload_mask);
1882 		index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1883 		workload = smu->workload_setting[index];
1884 	} else {
1885 		smu->workload_mask |= (1 << smu->workload_prority[type]);
1886 		index = fls(smu->workload_mask);
1887 		index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1888 		workload = smu->workload_setting[index];
1889 	}
1890 
1891 	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
1892 		smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
1893 		smu_bump_power_profile_mode(smu, &workload, 0);
1894 
1895 	return 0;
1896 }
1897 
1898 static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
1899 {
1900 	struct smu_context *smu = handle;
1901 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1902 
1903 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1904 		return -EOPNOTSUPP;
1905 
1906 	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1907 		return -EINVAL;
1908 
1909 	return smu_dpm_ctx->dpm_level;
1910 }
1911 
1912 static int smu_force_performance_level(void *handle,
1913 				       enum amd_dpm_forced_level level)
1914 {
1915 	struct smu_context *smu = handle;
1916 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1917 	int ret = 0;
1918 
1919 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1920 		return -EOPNOTSUPP;
1921 
1922 	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1923 		return -EINVAL;
1924 
1925 	ret = smu_enable_umd_pstate(smu, &level);
1926 	if (ret)
1927 		return ret;
1928 
1929 	ret = smu_handle_task(smu, level,
1930 			      AMD_PP_TASK_READJUST_POWER_STATE);
1931 
1932 	/* reset user dpm clock state */
1933 	if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1934 		memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask));
1935 		smu->user_dpm_profile.clk_dependency = 0;
1936 	}
1937 
1938 	return ret;
1939 }
1940 
1941 static int smu_set_display_count(void *handle, uint32_t count)
1942 {
1943 	struct smu_context *smu = handle;
1944 
1945 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1946 		return -EOPNOTSUPP;
1947 
1948 	return smu_init_display_count(smu, count);
1949 }
1950 
1951 static int smu_force_smuclk_levels(struct smu_context *smu,
1952 			 enum smu_clk_type clk_type,
1953 			 uint32_t mask)
1954 {
1955 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1956 	int ret = 0;
1957 
1958 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1959 		return -EOPNOTSUPP;
1960 
1961 	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1962 		dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n");
1963 		return -EINVAL;
1964 	}
1965 
1966 	if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
1967 		ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
1968 		if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
1969 			smu->user_dpm_profile.clk_mask[clk_type] = mask;
1970 			smu_set_user_clk_dependencies(smu, clk_type);
1971 		}
1972 	}
1973 
1974 	return ret;
1975 }
1976 
1977 static int smu_force_ppclk_levels(void *handle,
1978 				  enum pp_clock_type type,
1979 				  uint32_t mask)
1980 {
1981 	struct smu_context *smu = handle;
1982 	enum smu_clk_type clk_type;
1983 
1984 	switch (type) {
1985 	case PP_SCLK:
1986 		clk_type = SMU_SCLK; break;
1987 	case PP_MCLK:
1988 		clk_type = SMU_MCLK; break;
1989 	case PP_PCIE:
1990 		clk_type = SMU_PCIE; break;
1991 	case PP_SOCCLK:
1992 		clk_type = SMU_SOCCLK; break;
1993 	case PP_FCLK:
1994 		clk_type = SMU_FCLK; break;
1995 	case PP_DCEFCLK:
1996 		clk_type = SMU_DCEFCLK; break;
1997 	case PP_VCLK:
1998 		clk_type = SMU_VCLK; break;
1999 	case PP_DCLK:
2000 		clk_type = SMU_DCLK; break;
2001 	case OD_SCLK:
2002 		clk_type = SMU_OD_SCLK; break;
2003 	case OD_MCLK:
2004 		clk_type = SMU_OD_MCLK; break;
2005 	case OD_VDDC_CURVE:
2006 		clk_type = SMU_OD_VDDC_CURVE; break;
2007 	case OD_RANGE:
2008 		clk_type = SMU_OD_RANGE; break;
2009 	default:
2010 		return -EINVAL;
2011 	}
2012 
2013 	return smu_force_smuclk_levels(smu, clk_type, mask);
2014 }
2015 
2016 /*
2017  * On system suspending or resetting, the dpm_enabled
2018  * flag will be cleared. So that those SMU services which
2019  * are not supported will be gated.
2020  * However, the mp1 state setting should still be granted
2021  * even if the dpm_enabled cleared.
2022  */
2023 static int smu_set_mp1_state(void *handle,
2024 			     enum pp_mp1_state mp1_state)
2025 {
2026 	struct smu_context *smu = handle;
2027 	int ret = 0;
2028 
2029 	if (!smu->pm_enabled)
2030 		return -EOPNOTSUPP;
2031 
2032 	if (smu->ppt_funcs &&
2033 	    smu->ppt_funcs->set_mp1_state)
2034 		ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
2035 
2036 	return ret;
2037 }
2038 
2039 static int smu_set_df_cstate(void *handle,
2040 			     enum pp_df_cstate state)
2041 {
2042 	struct smu_context *smu = handle;
2043 	int ret = 0;
2044 
2045 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2046 		return -EOPNOTSUPP;
2047 
2048 	if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
2049 		return 0;
2050 
2051 	ret = smu->ppt_funcs->set_df_cstate(smu, state);
2052 	if (ret)
2053 		dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
2054 
2055 	return ret;
2056 }
2057 
2058 int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
2059 {
2060 	int ret = 0;
2061 
2062 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2063 		return -EOPNOTSUPP;
2064 
2065 	if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
2066 		return 0;
2067 
2068 	ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
2069 	if (ret)
2070 		dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n");
2071 
2072 	return ret;
2073 }
2074 
2075 int smu_write_watermarks_table(struct smu_context *smu)
2076 {
2077 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2078 		return -EOPNOTSUPP;
2079 
2080 	return smu_set_watermarks_table(smu, NULL);
2081 }
2082 
2083 static int smu_set_watermarks_for_clock_ranges(void *handle,
2084 					       struct pp_smu_wm_range_sets *clock_ranges)
2085 {
2086 	struct smu_context *smu = handle;
2087 
2088 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2089 		return -EOPNOTSUPP;
2090 
2091 	if (smu->disable_watermark)
2092 		return 0;
2093 
2094 	return smu_set_watermarks_table(smu, clock_ranges);
2095 }
2096 
2097 int smu_set_ac_dc(struct smu_context *smu)
2098 {
2099 	int ret = 0;
2100 
2101 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2102 		return -EOPNOTSUPP;
2103 
2104 	/* controlled by firmware */
2105 	if (smu->dc_controlled_by_gpio)
2106 		return 0;
2107 
2108 	ret = smu_set_power_source(smu,
2109 				   smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
2110 				   SMU_POWER_SOURCE_DC);
2111 	if (ret)
2112 		dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
2113 		       smu->adev->pm.ac_power ? "AC" : "DC");
2114 
2115 	return ret;
2116 }
2117 
2118 const struct amd_ip_funcs smu_ip_funcs = {
2119 	.name = "smu",
2120 	.early_init = smu_early_init,
2121 	.late_init = smu_late_init,
2122 	.sw_init = smu_sw_init,
2123 	.sw_fini = smu_sw_fini,
2124 	.hw_init = smu_hw_init,
2125 	.hw_fini = smu_hw_fini,
2126 	.late_fini = smu_late_fini,
2127 	.suspend = smu_suspend,
2128 	.resume = smu_resume,
2129 	.is_idle = NULL,
2130 	.check_soft_reset = NULL,
2131 	.wait_for_idle = NULL,
2132 	.soft_reset = NULL,
2133 	.set_clockgating_state = smu_set_clockgating_state,
2134 	.set_powergating_state = smu_set_powergating_state,
2135 };
2136 
2137 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
2138 {
2139 	.type = AMD_IP_BLOCK_TYPE_SMC,
2140 	.major = 11,
2141 	.minor = 0,
2142 	.rev = 0,
2143 	.funcs = &smu_ip_funcs,
2144 };
2145 
2146 const struct amdgpu_ip_block_version smu_v12_0_ip_block =
2147 {
2148 	.type = AMD_IP_BLOCK_TYPE_SMC,
2149 	.major = 12,
2150 	.minor = 0,
2151 	.rev = 0,
2152 	.funcs = &smu_ip_funcs,
2153 };
2154 
2155 const struct amdgpu_ip_block_version smu_v13_0_ip_block =
2156 {
2157 	.type = AMD_IP_BLOCK_TYPE_SMC,
2158 	.major = 13,
2159 	.minor = 0,
2160 	.rev = 0,
2161 	.funcs = &smu_ip_funcs,
2162 };
2163 
2164 static int smu_load_microcode(void *handle)
2165 {
2166 	struct smu_context *smu = handle;
2167 	struct amdgpu_device *adev = smu->adev;
2168 	int ret = 0;
2169 
2170 	if (!smu->pm_enabled)
2171 		return -EOPNOTSUPP;
2172 
2173 	/* This should be used for non PSP loading */
2174 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
2175 		return 0;
2176 
2177 	if (smu->ppt_funcs->load_microcode) {
2178 		ret = smu->ppt_funcs->load_microcode(smu);
2179 		if (ret) {
2180 			dev_err(adev->dev, "Load microcode failed\n");
2181 			return ret;
2182 		}
2183 	}
2184 
2185 	if (smu->ppt_funcs->check_fw_status) {
2186 		ret = smu->ppt_funcs->check_fw_status(smu);
2187 		if (ret) {
2188 			dev_err(adev->dev, "SMC is not ready\n");
2189 			return ret;
2190 		}
2191 	}
2192 
2193 	return ret;
2194 }
2195 
2196 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2197 {
2198 	int ret = 0;
2199 
2200 	if (smu->ppt_funcs->set_gfx_cgpg)
2201 		ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
2202 
2203 	return ret;
2204 }
2205 
2206 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
2207 {
2208 	struct smu_context *smu = handle;
2209 	int ret = 0;
2210 
2211 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2212 		return -EOPNOTSUPP;
2213 
2214 	if (!smu->ppt_funcs->set_fan_speed_rpm)
2215 		return -EOPNOTSUPP;
2216 
2217 	if (speed == U32_MAX)
2218 		return -EINVAL;
2219 
2220 	ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
2221 	if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2222 		smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM;
2223 		smu->user_dpm_profile.fan_speed_rpm = speed;
2224 
2225 		/* Override custom PWM setting as they cannot co-exist */
2226 		smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM;
2227 		smu->user_dpm_profile.fan_speed_pwm = 0;
2228 	}
2229 
2230 	return ret;
2231 }
2232 
2233 /**
2234  * smu_get_power_limit - Request one of the SMU Power Limits
2235  *
2236  * @handle: pointer to smu context
2237  * @limit: requested limit is written back to this variable
2238  * @pp_limit_level: &pp_power_limit_level which limit of the power to return
2239  * @pp_power_type: &pp_power_type type of power
2240  * Return:  0 on success, <0 on error
2241  *
2242  */
2243 int smu_get_power_limit(void *handle,
2244 			uint32_t *limit,
2245 			enum pp_power_limit_level pp_limit_level,
2246 			enum pp_power_type pp_power_type)
2247 {
2248 	struct smu_context *smu = handle;
2249 	struct amdgpu_device *adev = smu->adev;
2250 	enum smu_ppt_limit_level limit_level;
2251 	uint32_t limit_type;
2252 	int ret = 0;
2253 
2254 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2255 		return -EOPNOTSUPP;
2256 
2257 	switch(pp_power_type) {
2258 	case PP_PWR_TYPE_SUSTAINED:
2259 		limit_type = SMU_DEFAULT_PPT_LIMIT;
2260 		break;
2261 	case PP_PWR_TYPE_FAST:
2262 		limit_type = SMU_FAST_PPT_LIMIT;
2263 		break;
2264 	default:
2265 		return -EOPNOTSUPP;
2266 		break;
2267 	}
2268 
2269 	switch(pp_limit_level){
2270 	case PP_PWR_LIMIT_CURRENT:
2271 		limit_level = SMU_PPT_LIMIT_CURRENT;
2272 		break;
2273 	case PP_PWR_LIMIT_DEFAULT:
2274 		limit_level = SMU_PPT_LIMIT_DEFAULT;
2275 		break;
2276 	case PP_PWR_LIMIT_MAX:
2277 		limit_level = SMU_PPT_LIMIT_MAX;
2278 		break;
2279 	case PP_PWR_LIMIT_MIN:
2280 	default:
2281 		return -EOPNOTSUPP;
2282 		break;
2283 	}
2284 
2285 	if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
2286 		if (smu->ppt_funcs->get_ppt_limit)
2287 			ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);
2288 	} else {
2289 		switch (limit_level) {
2290 		case SMU_PPT_LIMIT_CURRENT:
2291 			switch (adev->ip_versions[MP1_HWIP][0]) {
2292 			case IP_VERSION(13, 0, 2):
2293 			case IP_VERSION(11, 0, 7):
2294 			case IP_VERSION(11, 0, 11):
2295 			case IP_VERSION(11, 0, 12):
2296 			case IP_VERSION(11, 0, 13):
2297 				ret = smu_get_asic_power_limits(smu,
2298 								&smu->current_power_limit,
2299 								NULL,
2300 								NULL);
2301 				break;
2302 			default:
2303 				break;
2304 			}
2305 			*limit = smu->current_power_limit;
2306 			break;
2307 		case SMU_PPT_LIMIT_DEFAULT:
2308 			*limit = smu->default_power_limit;
2309 			break;
2310 		case SMU_PPT_LIMIT_MAX:
2311 			*limit = smu->max_power_limit;
2312 			break;
2313 		default:
2314 			break;
2315 		}
2316 	}
2317 
2318 	return ret;
2319 }
2320 
2321 static int smu_set_power_limit(void *handle, uint32_t limit)
2322 {
2323 	struct smu_context *smu = handle;
2324 	uint32_t limit_type = limit >> 24;
2325 	int ret = 0;
2326 
2327 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2328 		return -EOPNOTSUPP;
2329 
2330 	limit &= (1<<24)-1;
2331 	if (limit_type != SMU_DEFAULT_PPT_LIMIT)
2332 		if (smu->ppt_funcs->set_power_limit)
2333 			return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2334 
2335 	if (limit > smu->max_power_limit) {
2336 		dev_err(smu->adev->dev,
2337 			"New power limit (%d) is over the max allowed %d\n",
2338 			limit, smu->max_power_limit);
2339 		return -EINVAL;
2340 	}
2341 
2342 	if (!limit)
2343 		limit = smu->current_power_limit;
2344 
2345 	if (smu->ppt_funcs->set_power_limit) {
2346 		ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2347 		if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
2348 			smu->user_dpm_profile.power_limit = limit;
2349 	}
2350 
2351 	return ret;
2352 }
2353 
2354 static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2355 {
2356 	int ret = 0;
2357 
2358 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2359 		return -EOPNOTSUPP;
2360 
2361 	if (smu->ppt_funcs->print_clk_levels)
2362 		ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2363 
2364 	return ret;
2365 }
2366 
2367 static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type)
2368 {
2369 	enum smu_clk_type clk_type;
2370 
2371 	switch (type) {
2372 	case PP_SCLK:
2373 		clk_type = SMU_SCLK; break;
2374 	case PP_MCLK:
2375 		clk_type = SMU_MCLK; break;
2376 	case PP_PCIE:
2377 		clk_type = SMU_PCIE; break;
2378 	case PP_SOCCLK:
2379 		clk_type = SMU_SOCCLK; break;
2380 	case PP_FCLK:
2381 		clk_type = SMU_FCLK; break;
2382 	case PP_DCEFCLK:
2383 		clk_type = SMU_DCEFCLK; break;
2384 	case PP_VCLK:
2385 		clk_type = SMU_VCLK; break;
2386 	case PP_DCLK:
2387 		clk_type = SMU_DCLK; break;
2388 	case OD_SCLK:
2389 		clk_type = SMU_OD_SCLK; break;
2390 	case OD_MCLK:
2391 		clk_type = SMU_OD_MCLK; break;
2392 	case OD_VDDC_CURVE:
2393 		clk_type = SMU_OD_VDDC_CURVE; break;
2394 	case OD_RANGE:
2395 		clk_type = SMU_OD_RANGE; break;
2396 	case OD_VDDGFX_OFFSET:
2397 		clk_type = SMU_OD_VDDGFX_OFFSET; break;
2398 	case OD_CCLK:
2399 		clk_type = SMU_OD_CCLK; break;
2400 	default:
2401 		clk_type = SMU_CLK_COUNT; break;
2402 	}
2403 
2404 	return clk_type;
2405 }
2406 
2407 static int smu_print_ppclk_levels(void *handle,
2408 				  enum pp_clock_type type,
2409 				  char *buf)
2410 {
2411 	struct smu_context *smu = handle;
2412 	enum smu_clk_type clk_type;
2413 
2414 	clk_type = smu_convert_to_smuclk(type);
2415 	if (clk_type == SMU_CLK_COUNT)
2416 		return -EINVAL;
2417 
2418 	return smu_print_smuclk_levels(smu, clk_type, buf);
2419 }
2420 
2421 static int smu_emit_ppclk_levels(void *handle, enum pp_clock_type type, char *buf, int *offset)
2422 {
2423 	struct smu_context *smu = handle;
2424 	enum smu_clk_type clk_type;
2425 
2426 	clk_type = smu_convert_to_smuclk(type);
2427 	if (clk_type == SMU_CLK_COUNT)
2428 		return -EINVAL;
2429 
2430 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2431 		return -EOPNOTSUPP;
2432 
2433 	if (!smu->ppt_funcs->emit_clk_levels)
2434 		return -ENOENT;
2435 
2436 	return smu->ppt_funcs->emit_clk_levels(smu, clk_type, buf, offset);
2437 
2438 }
2439 
2440 static int smu_od_edit_dpm_table(void *handle,
2441 				 enum PP_OD_DPM_TABLE_COMMAND type,
2442 				 long *input, uint32_t size)
2443 {
2444 	struct smu_context *smu = handle;
2445 	int ret = 0;
2446 
2447 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2448 		return -EOPNOTSUPP;
2449 
2450 	if (smu->ppt_funcs->od_edit_dpm_table) {
2451 		ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2452 	}
2453 
2454 	return ret;
2455 }
2456 
2457 static int smu_read_sensor(void *handle,
2458 			   int sensor,
2459 			   void *data,
2460 			   int *size_arg)
2461 {
2462 	struct smu_context *smu = handle;
2463 	struct smu_umd_pstate_table *pstate_table =
2464 				&smu->pstate_table;
2465 	int ret = 0;
2466 	uint32_t *size, size_val;
2467 
2468 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2469 		return -EOPNOTSUPP;
2470 
2471 	if (!data || !size_arg)
2472 		return -EINVAL;
2473 
2474 	size_val = *size_arg;
2475 	size = &size_val;
2476 
2477 	if (smu->ppt_funcs->read_sensor)
2478 		if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
2479 			goto unlock;
2480 
2481 	switch (sensor) {
2482 	case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
2483 		*((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100;
2484 		*size = 4;
2485 		break;
2486 	case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
2487 		*((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100;
2488 		*size = 4;
2489 		break;
2490 	case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
2491 		ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data);
2492 		*size = 8;
2493 		break;
2494 	case AMDGPU_PP_SENSOR_UVD_POWER:
2495 		*(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
2496 		*size = 4;
2497 		break;
2498 	case AMDGPU_PP_SENSOR_VCE_POWER:
2499 		*(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
2500 		*size = 4;
2501 		break;
2502 	case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
2503 		*(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0: 1;
2504 		*size = 4;
2505 		break;
2506 	case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
2507 		*(uint32_t *)data = 0;
2508 		*size = 4;
2509 		break;
2510 	default:
2511 		*size = 0;
2512 		ret = -EOPNOTSUPP;
2513 		break;
2514 	}
2515 
2516 unlock:
2517 	// assign uint32_t to int
2518 	*size_arg = size_val;
2519 
2520 	return ret;
2521 }
2522 
2523 static int smu_get_power_profile_mode(void *handle, char *buf)
2524 {
2525 	struct smu_context *smu = handle;
2526 
2527 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
2528 	    !smu->ppt_funcs->get_power_profile_mode)
2529 		return -EOPNOTSUPP;
2530 	if (!buf)
2531 		return -EINVAL;
2532 
2533 	return smu->ppt_funcs->get_power_profile_mode(smu, buf);
2534 }
2535 
2536 static int smu_set_power_profile_mode(void *handle,
2537 				      long *param,
2538 				      uint32_t param_size)
2539 {
2540 	struct smu_context *smu = handle;
2541 
2542 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
2543 	    !smu->ppt_funcs->set_power_profile_mode)
2544 		return -EOPNOTSUPP;
2545 
2546 	return smu_bump_power_profile_mode(smu, param, param_size);
2547 }
2548 
2549 static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
2550 {
2551 	struct smu_context *smu = handle;
2552 
2553 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2554 		return -EOPNOTSUPP;
2555 
2556 	if (!smu->ppt_funcs->get_fan_control_mode)
2557 		return -EOPNOTSUPP;
2558 
2559 	if (!fan_mode)
2560 		return -EINVAL;
2561 
2562 	*fan_mode = smu->ppt_funcs->get_fan_control_mode(smu);
2563 
2564 	return 0;
2565 }
2566 
2567 static int smu_set_fan_control_mode(void *handle, u32 value)
2568 {
2569 	struct smu_context *smu = handle;
2570 	int ret = 0;
2571 
2572 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2573 		return -EOPNOTSUPP;
2574 
2575 	if (!smu->ppt_funcs->set_fan_control_mode)
2576 		return -EOPNOTSUPP;
2577 
2578 	if (value == U32_MAX)
2579 		return -EINVAL;
2580 
2581 	ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
2582 	if (ret)
2583 		goto out;
2584 
2585 	if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2586 		smu->user_dpm_profile.fan_mode = value;
2587 
2588 		/* reset user dpm fan speed */
2589 		if (value != AMD_FAN_CTRL_MANUAL) {
2590 			smu->user_dpm_profile.fan_speed_pwm = 0;
2591 			smu->user_dpm_profile.fan_speed_rpm = 0;
2592 			smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM);
2593 		}
2594 	}
2595 
2596 out:
2597 	return ret;
2598 }
2599 
2600 static int smu_get_fan_speed_pwm(void *handle, u32 *speed)
2601 {
2602 	struct smu_context *smu = handle;
2603 	int ret = 0;
2604 
2605 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2606 		return -EOPNOTSUPP;
2607 
2608 	if (!smu->ppt_funcs->get_fan_speed_pwm)
2609 		return -EOPNOTSUPP;
2610 
2611 	if (!speed)
2612 		return -EINVAL;
2613 
2614 	ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed);
2615 
2616 	return ret;
2617 }
2618 
2619 static int smu_set_fan_speed_pwm(void *handle, u32 speed)
2620 {
2621 	struct smu_context *smu = handle;
2622 	int ret = 0;
2623 
2624 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2625 		return -EOPNOTSUPP;
2626 
2627 	if (!smu->ppt_funcs->set_fan_speed_pwm)
2628 		return -EOPNOTSUPP;
2629 
2630 	if (speed == U32_MAX)
2631 		return -EINVAL;
2632 
2633 	ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed);
2634 	if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2635 		smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM;
2636 		smu->user_dpm_profile.fan_speed_pwm = speed;
2637 
2638 		/* Override custom RPM setting as they cannot co-exist */
2639 		smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM;
2640 		smu->user_dpm_profile.fan_speed_rpm = 0;
2641 	}
2642 
2643 	return ret;
2644 }
2645 
2646 static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
2647 {
2648 	struct smu_context *smu = handle;
2649 	int ret = 0;
2650 
2651 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2652 		return -EOPNOTSUPP;
2653 
2654 	if (!smu->ppt_funcs->get_fan_speed_rpm)
2655 		return -EOPNOTSUPP;
2656 
2657 	if (!speed)
2658 		return -EINVAL;
2659 
2660 	ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
2661 
2662 	return ret;
2663 }
2664 
2665 static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)
2666 {
2667 	struct smu_context *smu = handle;
2668 
2669 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2670 		return -EOPNOTSUPP;
2671 
2672 	return smu_set_min_dcef_deep_sleep(smu, clk);
2673 }
2674 
2675 static int smu_get_clock_by_type_with_latency(void *handle,
2676 					      enum amd_pp_clock_type type,
2677 					      struct pp_clock_levels_with_latency *clocks)
2678 {
2679 	struct smu_context *smu = handle;
2680 	enum smu_clk_type clk_type;
2681 	int ret = 0;
2682 
2683 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2684 		return -EOPNOTSUPP;
2685 
2686 	if (smu->ppt_funcs->get_clock_by_type_with_latency) {
2687 		switch (type) {
2688 		case amd_pp_sys_clock:
2689 			clk_type = SMU_GFXCLK;
2690 			break;
2691 		case amd_pp_mem_clock:
2692 			clk_type = SMU_MCLK;
2693 			break;
2694 		case amd_pp_dcef_clock:
2695 			clk_type = SMU_DCEFCLK;
2696 			break;
2697 		case amd_pp_disp_clock:
2698 			clk_type = SMU_DISPCLK;
2699 			break;
2700 		default:
2701 			dev_err(smu->adev->dev, "Invalid clock type!\n");
2702 			return -EINVAL;
2703 		}
2704 
2705 		ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
2706 	}
2707 
2708 	return ret;
2709 }
2710 
2711 static int smu_display_clock_voltage_request(void *handle,
2712 					     struct pp_display_clock_request *clock_req)
2713 {
2714 	struct smu_context *smu = handle;
2715 	int ret = 0;
2716 
2717 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2718 		return -EOPNOTSUPP;
2719 
2720 	if (smu->ppt_funcs->display_clock_voltage_request)
2721 		ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
2722 
2723 	return ret;
2724 }
2725 
2726 
2727 static int smu_display_disable_memory_clock_switch(void *handle,
2728 						   bool disable_memory_clock_switch)
2729 {
2730 	struct smu_context *smu = handle;
2731 	int ret = -EINVAL;
2732 
2733 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2734 		return -EOPNOTSUPP;
2735 
2736 	if (smu->ppt_funcs->display_disable_memory_clock_switch)
2737 		ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
2738 
2739 	return ret;
2740 }
2741 
2742 static int smu_set_xgmi_pstate(void *handle,
2743 			       uint32_t pstate)
2744 {
2745 	struct smu_context *smu = handle;
2746 	int ret = 0;
2747 
2748 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2749 		return -EOPNOTSUPP;
2750 
2751 	if (smu->ppt_funcs->set_xgmi_pstate)
2752 		ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
2753 
2754 	if(ret)
2755 		dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
2756 
2757 	return ret;
2758 }
2759 
2760 static int smu_get_baco_capability(void *handle, bool *cap)
2761 {
2762 	struct smu_context *smu = handle;
2763 
2764 	*cap = false;
2765 
2766 	if (!smu->pm_enabled)
2767 		return 0;
2768 
2769 	if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
2770 		*cap = smu->ppt_funcs->baco_is_support(smu);
2771 
2772 	return 0;
2773 }
2774 
2775 static int smu_baco_set_state(void *handle, int state)
2776 {
2777 	struct smu_context *smu = handle;
2778 	int ret = 0;
2779 
2780 	if (!smu->pm_enabled)
2781 		return -EOPNOTSUPP;
2782 
2783 	if (state == 0) {
2784 		if (smu->ppt_funcs->baco_exit)
2785 			ret = smu->ppt_funcs->baco_exit(smu);
2786 	} else if (state == 1) {
2787 		if (smu->ppt_funcs->baco_enter)
2788 			ret = smu->ppt_funcs->baco_enter(smu);
2789 	} else {
2790 		return -EINVAL;
2791 	}
2792 
2793 	if (ret)
2794 		dev_err(smu->adev->dev, "Failed to %s BACO state!\n",
2795 				(state)?"enter":"exit");
2796 
2797 	return ret;
2798 }
2799 
2800 bool smu_mode1_reset_is_support(struct smu_context *smu)
2801 {
2802 	bool ret = false;
2803 
2804 	if (!smu->pm_enabled)
2805 		return false;
2806 
2807 	if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
2808 		ret = smu->ppt_funcs->mode1_reset_is_support(smu);
2809 
2810 	return ret;
2811 }
2812 
2813 bool smu_mode2_reset_is_support(struct smu_context *smu)
2814 {
2815 	bool ret = false;
2816 
2817 	if (!smu->pm_enabled)
2818 		return false;
2819 
2820 	if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support)
2821 		ret = smu->ppt_funcs->mode2_reset_is_support(smu);
2822 
2823 	return ret;
2824 }
2825 
2826 int smu_mode1_reset(struct smu_context *smu)
2827 {
2828 	int ret = 0;
2829 
2830 	if (!smu->pm_enabled)
2831 		return -EOPNOTSUPP;
2832 
2833 	if (smu->ppt_funcs->mode1_reset)
2834 		ret = smu->ppt_funcs->mode1_reset(smu);
2835 
2836 	return ret;
2837 }
2838 
2839 static int smu_mode2_reset(void *handle)
2840 {
2841 	struct smu_context *smu = handle;
2842 	int ret = 0;
2843 
2844 	if (!smu->pm_enabled)
2845 		return -EOPNOTSUPP;
2846 
2847 	if (smu->ppt_funcs->mode2_reset)
2848 		ret = smu->ppt_funcs->mode2_reset(smu);
2849 
2850 	if (ret)
2851 		dev_err(smu->adev->dev, "Mode2 reset failed!\n");
2852 
2853 	return ret;
2854 }
2855 
2856 static int smu_get_max_sustainable_clocks_by_dc(void *handle,
2857 						struct pp_smu_nv_clock_table *max_clocks)
2858 {
2859 	struct smu_context *smu = handle;
2860 	int ret = 0;
2861 
2862 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2863 		return -EOPNOTSUPP;
2864 
2865 	if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
2866 		ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
2867 
2868 	return ret;
2869 }
2870 
2871 static int smu_get_uclk_dpm_states(void *handle,
2872 				   unsigned int *clock_values_in_khz,
2873 				   unsigned int *num_states)
2874 {
2875 	struct smu_context *smu = handle;
2876 	int ret = 0;
2877 
2878 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2879 		return -EOPNOTSUPP;
2880 
2881 	if (smu->ppt_funcs->get_uclk_dpm_states)
2882 		ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
2883 
2884 	return ret;
2885 }
2886 
2887 static enum amd_pm_state_type smu_get_current_power_state(void *handle)
2888 {
2889 	struct smu_context *smu = handle;
2890 	enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
2891 
2892 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2893 		return -EOPNOTSUPP;
2894 
2895 	if (smu->ppt_funcs->get_current_power_state)
2896 		pm_state = smu->ppt_funcs->get_current_power_state(smu);
2897 
2898 	return pm_state;
2899 }
2900 
2901 static int smu_get_dpm_clock_table(void *handle,
2902 				   struct dpm_clocks *clock_table)
2903 {
2904 	struct smu_context *smu = handle;
2905 	int ret = 0;
2906 
2907 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2908 		return -EOPNOTSUPP;
2909 
2910 	if (smu->ppt_funcs->get_dpm_clock_table)
2911 		ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
2912 
2913 	return ret;
2914 }
2915 
2916 static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
2917 {
2918 	struct smu_context *smu = handle;
2919 
2920 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2921 		return -EOPNOTSUPP;
2922 
2923 	if (!smu->ppt_funcs->get_gpu_metrics)
2924 		return -EOPNOTSUPP;
2925 
2926 	return smu->ppt_funcs->get_gpu_metrics(smu, table);
2927 }
2928 
2929 static int smu_enable_mgpu_fan_boost(void *handle)
2930 {
2931 	struct smu_context *smu = handle;
2932 	int ret = 0;
2933 
2934 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2935 		return -EOPNOTSUPP;
2936 
2937 	if (smu->ppt_funcs->enable_mgpu_fan_boost)
2938 		ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
2939 
2940 	return ret;
2941 }
2942 
2943 static int smu_gfx_state_change_set(void *handle,
2944 				    uint32_t state)
2945 {
2946 	struct smu_context *smu = handle;
2947 	int ret = 0;
2948 
2949 	if (smu->ppt_funcs->gfx_state_change_set)
2950 		ret = smu->ppt_funcs->gfx_state_change_set(smu, state);
2951 
2952 	return ret;
2953 }
2954 
2955 int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
2956 {
2957 	int ret = 0;
2958 
2959 	if (smu->ppt_funcs->smu_handle_passthrough_sbr)
2960 		ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable);
2961 
2962 	return ret;
2963 }
2964 
2965 int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc)
2966 {
2967 	int ret = -EOPNOTSUPP;
2968 
2969 	if (smu->ppt_funcs &&
2970 		smu->ppt_funcs->get_ecc_info)
2971 		ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc);
2972 
2973 	return ret;
2974 
2975 }
2976 
2977 static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)
2978 {
2979 	struct smu_context *smu = handle;
2980 	struct smu_table_context *smu_table = &smu->smu_table;
2981 	struct smu_table *memory_pool = &smu_table->memory_pool;
2982 
2983 	if (!addr || !size)
2984 		return -EINVAL;
2985 
2986 	*addr = NULL;
2987 	*size = 0;
2988 	if (memory_pool->bo) {
2989 		*addr = memory_pool->cpu_addr;
2990 		*size = memory_pool->size;
2991 	}
2992 
2993 	return 0;
2994 }
2995 
2996 static const struct amd_pm_funcs swsmu_pm_funcs = {
2997 	/* export for sysfs */
2998 	.set_fan_control_mode    = smu_set_fan_control_mode,
2999 	.get_fan_control_mode    = smu_get_fan_control_mode,
3000 	.set_fan_speed_pwm   = smu_set_fan_speed_pwm,
3001 	.get_fan_speed_pwm   = smu_get_fan_speed_pwm,
3002 	.force_clock_level       = smu_force_ppclk_levels,
3003 	.print_clock_levels      = smu_print_ppclk_levels,
3004 	.emit_clock_levels       = smu_emit_ppclk_levels,
3005 	.force_performance_level = smu_force_performance_level,
3006 	.read_sensor             = smu_read_sensor,
3007 	.get_performance_level   = smu_get_performance_level,
3008 	.get_current_power_state = smu_get_current_power_state,
3009 	.get_fan_speed_rpm       = smu_get_fan_speed_rpm,
3010 	.set_fan_speed_rpm       = smu_set_fan_speed_rpm,
3011 	.get_pp_num_states       = smu_get_power_num_states,
3012 	.get_pp_table            = smu_sys_get_pp_table,
3013 	.set_pp_table            = smu_sys_set_pp_table,
3014 	.switch_power_profile    = smu_switch_power_profile,
3015 	/* export to amdgpu */
3016 	.dispatch_tasks          = smu_handle_dpm_task,
3017 	.load_firmware           = smu_load_microcode,
3018 	.set_powergating_by_smu  = smu_dpm_set_power_gate,
3019 	.set_power_limit         = smu_set_power_limit,
3020 	.get_power_limit         = smu_get_power_limit,
3021 	.get_power_profile_mode  = smu_get_power_profile_mode,
3022 	.set_power_profile_mode  = smu_set_power_profile_mode,
3023 	.odn_edit_dpm_table      = smu_od_edit_dpm_table,
3024 	.set_mp1_state           = smu_set_mp1_state,
3025 	.gfx_state_change_set    = smu_gfx_state_change_set,
3026 	/* export to DC */
3027 	.get_sclk                         = smu_get_sclk,
3028 	.get_mclk                         = smu_get_mclk,
3029 	.display_configuration_change     = smu_display_configuration_change,
3030 	.get_clock_by_type_with_latency   = smu_get_clock_by_type_with_latency,
3031 	.display_clock_voltage_request    = smu_display_clock_voltage_request,
3032 	.enable_mgpu_fan_boost            = smu_enable_mgpu_fan_boost,
3033 	.set_active_display_count         = smu_set_display_count,
3034 	.set_min_deep_sleep_dcefclk       = smu_set_deep_sleep_dcefclk,
3035 	.get_asic_baco_capability         = smu_get_baco_capability,
3036 	.set_asic_baco_state              = smu_baco_set_state,
3037 	.get_ppfeature_status             = smu_sys_get_pp_feature_mask,
3038 	.set_ppfeature_status             = smu_sys_set_pp_feature_mask,
3039 	.asic_reset_mode_2                = smu_mode2_reset,
3040 	.set_df_cstate                    = smu_set_df_cstate,
3041 	.set_xgmi_pstate                  = smu_set_xgmi_pstate,
3042 	.get_gpu_metrics                  = smu_sys_get_gpu_metrics,
3043 	.set_watermarks_for_clock_ranges     = smu_set_watermarks_for_clock_ranges,
3044 	.display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch,
3045 	.get_max_sustainable_clocks_by_dc    = smu_get_max_sustainable_clocks_by_dc,
3046 	.get_uclk_dpm_states              = smu_get_uclk_dpm_states,
3047 	.get_dpm_clock_table              = smu_get_dpm_clock_table,
3048 	.get_smu_prv_buf_details = smu_get_prv_buffer_details,
3049 };
3050 
3051 int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
3052 		       uint64_t event_arg)
3053 {
3054 	int ret = -EINVAL;
3055 
3056 	if (smu->ppt_funcs->wait_for_event)
3057 		ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg);
3058 
3059 	return ret;
3060 }
3061 
3062 int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size)
3063 {
3064 
3065 	if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled)
3066 		return -EOPNOTSUPP;
3067 
3068 	/* Confirm the buffer allocated is of correct size */
3069 	if (size != smu->stb_context.stb_buf_size)
3070 		return -EINVAL;
3071 
3072 	/*
3073 	 * No need to lock smu mutex as we access STB directly through MMIO
3074 	 * and not going through SMU messaging route (for now at least).
3075 	 * For registers access rely on implementation internal locking.
3076 	 */
3077 	return smu->ppt_funcs->stb_collect_info(smu, buf, size);
3078 }
3079 
3080 #if defined(CONFIG_DEBUG_FS)
3081 
3082 static int smu_stb_debugfs_open(struct inode *inode, struct file *filp)
3083 {
3084 	struct amdgpu_device *adev = filp->f_inode->i_private;
3085 	struct smu_context *smu = adev->powerplay.pp_handle;
3086 	unsigned char *buf;
3087 	int r;
3088 
3089 	buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL);
3090 	if (!buf)
3091 		return -ENOMEM;
3092 
3093 	r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size);
3094 	if (r)
3095 		goto out;
3096 
3097 	filp->private_data = buf;
3098 
3099 	return 0;
3100 
3101 out:
3102 	kvfree(buf);
3103 	return r;
3104 }
3105 
3106 static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t size,
3107 				loff_t *pos)
3108 {
3109 	struct amdgpu_device *adev = filp->f_inode->i_private;
3110 	struct smu_context *smu = adev->powerplay.pp_handle;
3111 
3112 
3113 	if (!filp->private_data)
3114 		return -EINVAL;
3115 
3116 	return simple_read_from_buffer(buf,
3117 				       size,
3118 				       pos, filp->private_data,
3119 				       smu->stb_context.stb_buf_size);
3120 }
3121 
3122 static int smu_stb_debugfs_release(struct inode *inode, struct file *filp)
3123 {
3124 	kvfree(filp->private_data);
3125 	filp->private_data = NULL;
3126 
3127 	return 0;
3128 }
3129 
3130 /*
3131  * We have to define not only read method but also
3132  * open and release because .read takes up to PAGE_SIZE
3133  * data each time so and so is invoked multiple times.
3134  *  We allocate the STB buffer in .open and release it
3135  *  in .release
3136  */
3137 static const struct file_operations smu_stb_debugfs_fops = {
3138 	.owner = THIS_MODULE,
3139 	.open = smu_stb_debugfs_open,
3140 	.read = smu_stb_debugfs_read,
3141 	.release = smu_stb_debugfs_release,
3142 	.llseek = default_llseek,
3143 };
3144 
3145 #endif
3146 
3147 void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev)
3148 {
3149 #if defined(CONFIG_DEBUG_FS)
3150 
3151 	struct smu_context *smu = adev->powerplay.pp_handle;
3152 
3153 	if (!smu || (!smu->stb_context.stb_buf_size))
3154 		return;
3155 
3156 	debugfs_create_file_size("amdgpu_smu_stb_dump",
3157 			    S_IRUSR,
3158 			    adev_to_drm(adev)->primary->debugfs_root,
3159 			    adev,
3160 			    &smu_stb_debugfs_fops,
3161 			    smu->stb_context.stb_buf_size);
3162 #endif
3163 }
3164 
3165 int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size)
3166 {
3167 	int ret = 0;
3168 
3169 	if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num)
3170 		ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size);
3171 
3172 	return ret;
3173 }
3174 
3175 int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size)
3176 {
3177 	int ret = 0;
3178 
3179 	if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_channel_flag)
3180 		ret = smu->ppt_funcs->send_hbm_bad_channel_flag(smu, size);
3181 
3182 	return ret;
3183 }
3184