1 /*	$NetBSD: amdgpu_smu.c,v 1.5 2021/12/19 12:37:54 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2019 Advanced Micro Devices, Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(0, "$NetBSD: amdgpu_smu.c,v 1.5 2021/12/19 12:37:54 riastradh Exp $");
27 
28 #include <linux/firmware.h>
29 #include <linux/pci.h>
30 
31 #include "pp_debug.h"
32 #include "amdgpu.h"
33 #include "amdgpu_smu.h"
34 #include "smu_internal.h"
35 #include "soc15_common.h"
36 #include "smu_v11_0.h"
37 #include "smu_v12_0.h"
38 #include "atom.h"
39 #include "amd_pcie.h"
40 #include "vega20_ppt.h"
41 #include "arcturus_ppt.h"
42 #include "navi10_ppt.h"
43 #include "renoir_ppt.h"
44 
45 #include <linux/nbsd-namespace.h>
46 
47 #undef __SMU_DUMMY_MAP
48 #define __SMU_DUMMY_MAP(type)	#type
49 static const char* __smu_message_names[] = {
50 	SMU_MESSAGE_TYPES
51 };
52 
smu_get_message_name(struct smu_context * smu,enum smu_message_type type)53 const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type)
54 {
55 	if (type < 0 || type >= SMU_MSG_MAX_COUNT)
56 		return "unknown smu message";
57 	return __smu_message_names[type];
58 }
59 
60 #undef __SMU_DUMMY_MAP
61 #define __SMU_DUMMY_MAP(fea)	#fea
62 static const char* __smu_feature_names[] = {
63 	SMU_FEATURE_MASKS
64 };
65 
smu_get_feature_name(struct smu_context * smu,enum smu_feature_mask feature)66 const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature)
67 {
68 	if (feature < 0 || feature >= SMU_FEATURE_COUNT)
69 		return "unknown smu feature";
70 	return __smu_feature_names[feature];
71 }
72 
smu_sys_get_pp_feature_mask(struct smu_context * smu,char * buf)73 size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
74 {
75 	size_t size = 0;
76 	int ret = 0, i = 0;
77 	uint32_t feature_mask[2] = { 0 };
78 	int32_t feature_index = 0;
79 	uint32_t count = 0;
80 	uint32_t sort_feature[SMU_FEATURE_COUNT];
81 	uint64_t hw_feature_count = 0;
82 
83 	mutex_lock(&smu->mutex);
84 
85 	ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
86 	if (ret)
87 		goto failed;
88 
89 	size =  sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
90 			feature_mask[1], feature_mask[0]);
91 
92 	for (i = 0; i < SMU_FEATURE_COUNT; i++) {
93 		feature_index = smu_feature_get_index(smu, i);
94 		if (feature_index < 0)
95 			continue;
96 		sort_feature[feature_index] = i;
97 		hw_feature_count++;
98 	}
99 
100 	for (i = 0; i < hw_feature_count; i++) {
101 		size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
102 			       count++,
103 			       smu_get_feature_name(smu, sort_feature[i]),
104 			       i,
105 			       !!smu_feature_is_enabled(smu, sort_feature[i]) ?
106 			       "enabled" : "disabled");
107 	}
108 
109 failed:
110 	mutex_unlock(&smu->mutex);
111 
112 	return size;
113 }
114 
smu_feature_update_enable_state(struct smu_context * smu,uint64_t feature_mask,bool enabled)115 static int smu_feature_update_enable_state(struct smu_context *smu,
116 					   uint64_t feature_mask,
117 					   bool enabled)
118 {
119 	struct smu_feature *feature = &smu->smu_feature;
120 	uint32_t feature_low = 0, feature_high = 0;
121 	int ret = 0;
122 
123 	if (!smu->pm_enabled)
124 		return ret;
125 
126 	feature_low = (feature_mask >> 0 ) & 0xffffffff;
127 	feature_high = (feature_mask >> 32) & 0xffffffff;
128 
129 	if (enabled) {
130 		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
131 						  feature_low);
132 		if (ret)
133 			return ret;
134 		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
135 						  feature_high);
136 		if (ret)
137 			return ret;
138 	} else {
139 		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
140 						  feature_low);
141 		if (ret)
142 			return ret;
143 		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
144 						  feature_high);
145 		if (ret)
146 			return ret;
147 	}
148 
149 	mutex_lock(&feature->mutex);
150 	if (enabled)
151 		bitmap_or(feature->enabled, feature->enabled,
152 				(unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
153 	else
154 		bitmap_andnot(feature->enabled, feature->enabled,
155 				(unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
156 	mutex_unlock(&feature->mutex);
157 
158 	return ret;
159 }
160 
smu_sys_set_pp_feature_mask(struct smu_context * smu,uint64_t new_mask)161 int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
162 {
163 	int ret = 0;
164 	uint32_t feature_mask[2] = { 0 };
165 	uint64_t feature_2_enabled = 0;
166 	uint64_t feature_2_disabled = 0;
167 	uint64_t feature_enables = 0;
168 
169 	mutex_lock(&smu->mutex);
170 
171 	ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
172 	if (ret)
173 		goto out;
174 
175 	feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]);
176 
177 	feature_2_enabled  = ~feature_enables & new_mask;
178 	feature_2_disabled = feature_enables & ~new_mask;
179 
180 	if (feature_2_enabled) {
181 		ret = smu_feature_update_enable_state(smu, feature_2_enabled, true);
182 		if (ret)
183 			goto out;
184 	}
185 	if (feature_2_disabled) {
186 		ret = smu_feature_update_enable_state(smu, feature_2_disabled, false);
187 		if (ret)
188 			goto out;
189 	}
190 
191 out:
192 	mutex_unlock(&smu->mutex);
193 
194 	return ret;
195 }
196 
smu_get_smc_version(struct smu_context * smu,uint32_t * if_version,uint32_t * smu_version)197 int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
198 {
199 	int ret = 0;
200 
201 	if (!if_version && !smu_version)
202 		return -EINVAL;
203 
204 	if (if_version) {
205 		ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
206 		if (ret)
207 			return ret;
208 
209 		ret = smu_read_smc_arg(smu, if_version);
210 		if (ret)
211 			return ret;
212 	}
213 
214 	if (smu_version) {
215 		ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion);
216 		if (ret)
217 			return ret;
218 
219 		ret = smu_read_smc_arg(smu, smu_version);
220 		if (ret)
221 			return ret;
222 	}
223 
224 	return ret;
225 }
226 
smu_set_soft_freq_range(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t min,uint32_t max)227 int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
228 			    uint32_t min, uint32_t max)
229 {
230 	int ret = 0;
231 
232 	if (min <= 0 && max <= 0)
233 		return -EINVAL;
234 
235 	if (!smu_clk_dpm_is_enabled(smu, clk_type))
236 		return 0;
237 
238 	ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max);
239 	return ret;
240 }
241 
smu_set_hard_freq_range(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t min,uint32_t max)242 int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
243 			    uint32_t min, uint32_t max)
244 {
245 	int ret = 0, clk_id = 0;
246 	uint32_t param;
247 
248 	if (min <= 0 && max <= 0)
249 		return -EINVAL;
250 
251 	if (!smu_clk_dpm_is_enabled(smu, clk_type))
252 		return 0;
253 
254 	clk_id = smu_clk_get_index(smu, clk_type);
255 	if (clk_id < 0)
256 		return clk_id;
257 
258 	if (max > 0) {
259 		param = (uint32_t)((clk_id << 16) | (max & 0xffff));
260 		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
261 						  param);
262 		if (ret)
263 			return ret;
264 	}
265 
266 	if (min > 0) {
267 		param = (uint32_t)((clk_id << 16) | (min & 0xffff));
268 		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
269 						  param);
270 		if (ret)
271 			return ret;
272 	}
273 
274 
275 	return ret;
276 }
277 
smu_get_dpm_freq_range(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t * min,uint32_t * max,bool lock_needed)278 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
279 			   uint32_t *min, uint32_t *max, bool lock_needed)
280 {
281 	uint32_t clock_limit;
282 	int ret = 0;
283 
284 	if (!min && !max)
285 		return -EINVAL;
286 
287 	if (lock_needed)
288 		mutex_lock(&smu->mutex);
289 
290 	if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
291 		switch (clk_type) {
292 		case SMU_MCLK:
293 		case SMU_UCLK:
294 			clock_limit = smu->smu_table.boot_values.uclk;
295 			break;
296 		case SMU_GFXCLK:
297 		case SMU_SCLK:
298 			clock_limit = smu->smu_table.boot_values.gfxclk;
299 			break;
300 		case SMU_SOCCLK:
301 			clock_limit = smu->smu_table.boot_values.socclk;
302 			break;
303 		default:
304 			clock_limit = 0;
305 			break;
306 		}
307 
308 		/* clock in Mhz unit */
309 		if (min)
310 			*min = clock_limit / 100;
311 		if (max)
312 			*max = clock_limit / 100;
313 	} else {
314 		/*
315 		 * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
316 		 * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
317 		 */
318 		ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max);
319 	}
320 
321 	if (lock_needed)
322 		mutex_unlock(&smu->mutex);
323 
324 	return ret;
325 }
326 
smu_get_dpm_freq_by_index(struct smu_context * smu,enum smu_clk_type clk_type,uint16_t level,uint32_t * value)327 int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
328 			      uint16_t level, uint32_t *value)
329 {
330 	int ret = 0, clk_id = 0;
331 	uint32_t param;
332 
333 	if (!value)
334 		return -EINVAL;
335 
336 	if (!smu_clk_dpm_is_enabled(smu, clk_type))
337 		return 0;
338 
339 	clk_id = smu_clk_get_index(smu, clk_type);
340 	if (clk_id < 0)
341 		return clk_id;
342 
343 	param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
344 
345 	ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex,
346 					  param);
347 	if (ret)
348 		return ret;
349 
350 	ret = smu_read_smc_arg(smu, &param);
351 	if (ret)
352 		return ret;
353 
354 	/* BIT31:  0 - Fine grained DPM, 1 - Dicrete DPM
355 	 * now, we un-support it */
356 	*value = param & 0x7fffffff;
357 
358 	return ret;
359 }
360 
smu_get_dpm_level_count(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t * value)361 int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
362 			    uint32_t *value)
363 {
364 	return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
365 }
366 
smu_get_dpm_level_range(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t * min_value,uint32_t * max_value)367 int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
368 			    uint32_t *min_value, uint32_t *max_value)
369 {
370 	int ret = 0;
371 	uint32_t level_count = 0;
372 
373 	if (!min_value && !max_value)
374 		return -EINVAL;
375 
376 	if (min_value) {
377 		/* by default, level 0 clock value as min value */
378 		ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, min_value);
379 		if (ret)
380 			return ret;
381 	}
382 
383 	if (max_value) {
384 		ret = smu_get_dpm_level_count(smu, clk_type, &level_count);
385 		if (ret)
386 			return ret;
387 
388 		ret = smu_get_dpm_freq_by_index(smu, clk_type, level_count - 1, max_value);
389 		if (ret)
390 			return ret;
391 	}
392 
393 	return ret;
394 }
395 
smu_clk_dpm_is_enabled(struct smu_context * smu,enum smu_clk_type clk_type)396 bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
397 {
398 	enum smu_feature_mask feature_id = 0;
399 
400 	switch (clk_type) {
401 	case SMU_MCLK:
402 	case SMU_UCLK:
403 		feature_id = SMU_FEATURE_DPM_UCLK_BIT;
404 		break;
405 	case SMU_GFXCLK:
406 	case SMU_SCLK:
407 		feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
408 		break;
409 	case SMU_SOCCLK:
410 		feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
411 		break;
412 	default:
413 		return true;
414 	}
415 
416 	if(!smu_feature_is_enabled(smu, feature_id)) {
417 		return false;
418 	}
419 
420 	return true;
421 }
422 
423 /**
424  * smu_dpm_set_power_gate - power gate/ungate the specific IP block
425  *
426  * @smu:        smu_context pointer
427  * @block_type: the IP block to power gate/ungate
428  * @gate:       to power gate if true, ungate otherwise
429  *
430  * This API uses no smu->mutex lock protection due to:
431  * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
432  *    This is guarded to be race condition free by the caller.
433  * 2. Or get called on user setting request of power_dpm_force_performance_level.
434  *    Under this case, the smu->mutex lock protection is already enforced on
435  *    the parent API smu_force_performance_level of the call path.
436  */
smu_dpm_set_power_gate(struct smu_context * smu,uint32_t block_type,bool gate)437 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
438 			   bool gate)
439 {
440 	int ret = 0;
441 
442 	switch (block_type) {
443 	case AMD_IP_BLOCK_TYPE_UVD:
444 		ret = smu_dpm_set_uvd_enable(smu, !gate);
445 		break;
446 	case AMD_IP_BLOCK_TYPE_VCE:
447 		ret = smu_dpm_set_vce_enable(smu, !gate);
448 		break;
449 	case AMD_IP_BLOCK_TYPE_GFX:
450 		ret = smu_gfx_off_control(smu, gate);
451 		break;
452 	case AMD_IP_BLOCK_TYPE_SDMA:
453 		ret = smu_powergate_sdma(smu, gate);
454 		break;
455 	case AMD_IP_BLOCK_TYPE_JPEG:
456 		ret = smu_dpm_set_jpeg_enable(smu, !gate);
457 		break;
458 	default:
459 		break;
460 	}
461 
462 	return ret;
463 }
464 
smu_get_power_num_states(struct smu_context * smu,struct pp_states_info * state_info)465 int smu_get_power_num_states(struct smu_context *smu,
466 			     struct pp_states_info *state_info)
467 {
468 	if (!state_info)
469 		return -EINVAL;
470 
471 	/* not support power state */
472 	memset(state_info, 0, sizeof(struct pp_states_info));
473 	state_info->nums = 1;
474 	state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
475 
476 	return 0;
477 }
478 
smu_common_read_sensor(struct smu_context * smu,enum amd_pp_sensors sensor,void * data,uint32_t * size)479 int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
480 			   void *data, uint32_t *size)
481 {
482 	struct smu_power_context *smu_power = &smu->smu_power;
483 	struct smu_power_gate *power_gate = &smu_power->power_gate;
484 	int ret = 0;
485 
486 	if(!data || !size)
487 		return -EINVAL;
488 
489 	switch (sensor) {
490 	case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
491 		*((uint32_t *)data) = smu->pstate_sclk;
492 		*size = 4;
493 		break;
494 	case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
495 		*((uint32_t *)data) = smu->pstate_mclk;
496 		*size = 4;
497 		break;
498 	case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
499 		ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
500 		*size = 8;
501 		break;
502 	case AMDGPU_PP_SENSOR_UVD_POWER:
503 		*(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
504 		*size = 4;
505 		break;
506 	case AMDGPU_PP_SENSOR_VCE_POWER:
507 		*(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
508 		*size = 4;
509 		break;
510 	case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
511 		*(uint32_t *)data = power_gate->vcn_gated ? 0 : 1;
512 		*size = 4;
513 		break;
514 	default:
515 		ret = -EINVAL;
516 		break;
517 	}
518 
519 	if (ret)
520 		*size = 0;
521 
522 	return ret;
523 }
524 
smu_update_table(struct smu_context * smu,enum smu_table_id table_index,int argument,void * table_data,bool drv2smu)525 int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
526 		     void *table_data, bool drv2smu)
527 {
528 	struct smu_table_context *smu_table = &smu->smu_table;
529 	struct amdgpu_device *adev = smu->adev;
530 	struct smu_table *table = &smu_table->driver_table;
531 	int table_id = smu_table_get_index(smu, table_index);
532 	uint32_t table_size;
533 	int ret = 0;
534 
535 	if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
536 		return -EINVAL;
537 
538 	table_size = smu_table->tables[table_index].size;
539 
540 	if (drv2smu) {
541 		memcpy(table->cpu_addr, table_data, table_size);
542 		/*
543 		 * Flush hdp cache: to guard the content seen by
544 		 * GPU is consitent with CPU.
545 		 */
546 		amdgpu_asic_flush_hdp(adev, NULL);
547 	}
548 
549 	ret = smu_send_smc_msg_with_param(smu, drv2smu ?
550 					  SMU_MSG_TransferTableDram2Smu :
551 					  SMU_MSG_TransferTableSmu2Dram,
552 					  table_id | ((argument & 0xFFFF) << 16));
553 	if (ret)
554 		return ret;
555 
556 	if (!drv2smu) {
557 		amdgpu_asic_flush_hdp(adev, NULL);
558 		memcpy(table_data, table->cpu_addr, table_size);
559 	}
560 
561 	return ret;
562 }
563 
is_support_sw_smu(struct amdgpu_device * adev)564 bool is_support_sw_smu(struct amdgpu_device *adev)
565 {
566 	if (adev->asic_type == CHIP_VEGA20)
567 		return (amdgpu_dpm == 2) ? true : false;
568 	else if (adev->asic_type >= CHIP_ARCTURUS) {
569 		if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
570 			return false;
571 		else
572 			return true;
573 	} else
574 		return false;
575 }
576 
is_support_sw_smu_xgmi(struct amdgpu_device * adev)577 bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
578 {
579 	if (!is_support_sw_smu(adev))
580 		return false;
581 
582 	if (adev->asic_type == CHIP_VEGA20)
583 		return true;
584 
585 	return false;
586 }
587 
smu_sys_get_pp_table(struct smu_context * smu,const void ** table)588 int smu_sys_get_pp_table(struct smu_context *smu, const void **table)
589 {
590 	struct smu_table_context *smu_table = &smu->smu_table;
591 	uint32_t powerplay_table_size;
592 
593 	if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
594 		return -EINVAL;
595 
596 	mutex_lock(&smu->mutex);
597 
598 	if (smu_table->hardcode_pptable)
599 		*table = smu_table->hardcode_pptable;
600 	else
601 		*table = smu_table->power_play_table;
602 
603 	powerplay_table_size = smu_table->power_play_table_size;
604 
605 	mutex_unlock(&smu->mutex);
606 
607 	return powerplay_table_size;
608 }
609 
smu_sys_set_pp_table(struct smu_context * smu,void * buf,size_t size)610 int smu_sys_set_pp_table(struct smu_context *smu,  void *buf, size_t size)
611 {
612 	struct smu_table_context *smu_table = &smu->smu_table;
613 	ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
614 	int ret = 0;
615 
616 	if (!smu->pm_enabled)
617 		return -EINVAL;
618 	if (header->usStructureSize != size) {
619 		pr_err("pp table size not matched !\n");
620 		return -EIO;
621 	}
622 
623 	mutex_lock(&smu->mutex);
624 	if (!smu_table->hardcode_pptable)
625 		smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
626 	if (!smu_table->hardcode_pptable) {
627 		ret = -ENOMEM;
628 		goto failed;
629 	}
630 
631 	memcpy(smu_table->hardcode_pptable, buf, size);
632 	smu_table->power_play_table = smu_table->hardcode_pptable;
633 	smu_table->power_play_table_size = size;
634 
635 	/*
636 	 * Special hw_fini action(for Navi1x, the DPMs disablement will be
637 	 * skipped) may be needed for custom pptable uploading.
638 	 */
639 	smu->uploading_custom_pp_table = true;
640 
641 	ret = smu_reset(smu);
642 	if (ret)
643 		pr_info("smu reset failed, ret = %d\n", ret);
644 
645 	smu->uploading_custom_pp_table = false;
646 
647 failed:
648 	mutex_unlock(&smu->mutex);
649 	return ret;
650 }
651 
smu_feature_init_dpm(struct smu_context * smu)652 int smu_feature_init_dpm(struct smu_context *smu)
653 {
654 	struct smu_feature *feature = &smu->smu_feature;
655 	int ret = 0;
656 	uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
657 
658 	if (!smu->pm_enabled)
659 		return ret;
660 	mutex_lock(&feature->mutex);
661 	bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
662 	mutex_unlock(&feature->mutex);
663 
664 	ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
665 					     SMU_FEATURE_MAX/32);
666 	if (ret)
667 		return ret;
668 
669 	mutex_lock(&feature->mutex);
670 	bitmap_or(feature->allowed, feature->allowed,
671 		      (unsigned long *)allowed_feature_mask,
672 		      feature->feature_num);
673 	mutex_unlock(&feature->mutex);
674 
675 	return ret;
676 }
677 
678 
smu_feature_is_enabled(struct smu_context * smu,enum smu_feature_mask mask)679 int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
680 {
681 	struct smu_feature *feature = &smu->smu_feature;
682 	int feature_id;
683 	int ret = 0;
684 
685 	if (smu->is_apu)
686 		return 1;
687 
688 	feature_id = smu_feature_get_index(smu, mask);
689 	if (feature_id < 0)
690 		return 0;
691 
692 	WARN_ON(feature_id > feature->feature_num);
693 
694 	mutex_lock(&feature->mutex);
695 	ret = test_bit(feature_id, feature->enabled);
696 	mutex_unlock(&feature->mutex);
697 
698 	return ret;
699 }
700 
smu_feature_set_enabled(struct smu_context * smu,enum smu_feature_mask mask,bool enable)701 int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
702 			    bool enable)
703 {
704 	struct smu_feature *feature = &smu->smu_feature;
705 	int feature_id;
706 
707 	feature_id = smu_feature_get_index(smu, mask);
708 	if (feature_id < 0)
709 		return -EINVAL;
710 
711 	WARN_ON(feature_id > feature->feature_num);
712 
713 	return smu_feature_update_enable_state(smu,
714 					       1ULL << feature_id,
715 					       enable);
716 }
717 
smu_feature_is_supported(struct smu_context * smu,enum smu_feature_mask mask)718 int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
719 {
720 	struct smu_feature *feature = &smu->smu_feature;
721 	int feature_id;
722 	int ret = 0;
723 
724 	feature_id = smu_feature_get_index(smu, mask);
725 	if (feature_id < 0)
726 		return 0;
727 
728 	WARN_ON(feature_id > feature->feature_num);
729 
730 	mutex_lock(&feature->mutex);
731 	ret = test_bit(feature_id, feature->supported);
732 	mutex_unlock(&feature->mutex);
733 
734 	return ret;
735 }
736 
smu_feature_set_supported(struct smu_context * smu,enum smu_feature_mask mask,bool enable)737 int smu_feature_set_supported(struct smu_context *smu,
738 			      enum smu_feature_mask mask,
739 			      bool enable)
740 {
741 	struct smu_feature *feature = &smu->smu_feature;
742 	int feature_id;
743 	int ret = 0;
744 
745 	feature_id = smu_feature_get_index(smu, mask);
746 	if (feature_id < 0)
747 		return -EINVAL;
748 
749 	WARN_ON(feature_id > feature->feature_num);
750 
751 	mutex_lock(&feature->mutex);
752 	if (enable)
753 		test_and_set_bit(feature_id, feature->supported);
754 	else
755 		test_and_clear_bit(feature_id, feature->supported);
756 	mutex_unlock(&feature->mutex);
757 
758 	return ret;
759 }
760 
smu_set_funcs(struct amdgpu_device * adev)761 static int smu_set_funcs(struct amdgpu_device *adev)
762 {
763 	struct smu_context *smu = &adev->smu;
764 
765 	if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
766 		smu->od_enabled = true;
767 
768 	switch (adev->asic_type) {
769 	case CHIP_VEGA20:
770 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
771 		vega20_set_ppt_funcs(smu);
772 		break;
773 	case CHIP_NAVI10:
774 	case CHIP_NAVI14:
775 	case CHIP_NAVI12:
776 		navi10_set_ppt_funcs(smu);
777 		break;
778 	case CHIP_ARCTURUS:
779 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
780 		arcturus_set_ppt_funcs(smu);
781 		/* OD is not supported on Arcturus */
782 		smu->od_enabled =false;
783 		break;
784 	case CHIP_RENOIR:
785 		renoir_set_ppt_funcs(smu);
786 		break;
787 	default:
788 		return -EINVAL;
789 	}
790 
791 	return 0;
792 }
793 
smu_early_init(void * handle)794 static int smu_early_init(void *handle)
795 {
796 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
797 	struct smu_context *smu = &adev->smu;
798 
799 	smu->adev = adev;
800 	smu->pm_enabled = !!amdgpu_dpm;
801 	smu->is_apu = false;
802 	mutex_init(&smu->mutex);
803 
804 	return smu_set_funcs(adev);
805 }
806 
smu_late_init(void * handle)807 static int smu_late_init(void *handle)
808 {
809 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
810 	struct smu_context *smu = &adev->smu;
811 
812 	if (!smu->pm_enabled)
813 		return 0;
814 
815 	smu_handle_task(&adev->smu,
816 			smu->smu_dpm.dpm_level,
817 			AMD_PP_TASK_COMPLETE_INIT,
818 			false);
819 
820 	return 0;
821 }
822 
smu_get_atom_data_table(struct smu_context * smu,uint32_t table,uint16_t * size,uint8_t * frev,uint8_t * crev,uint8_t ** addr)823 int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
824 			    uint16_t *size, uint8_t *frev, uint8_t *crev,
825 			    uint8_t **addr)
826 {
827 	struct amdgpu_device *adev = smu->adev;
828 	uint16_t data_start;
829 
830 	if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
831 					   size, frev, crev, &data_start))
832 		return -EINVAL;
833 
834 	*addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
835 
836 	return 0;
837 }
838 
smu_initialize_pptable(struct smu_context * smu)839 static int smu_initialize_pptable(struct smu_context *smu)
840 {
841 	/* TODO */
842 	return 0;
843 }
844 
smu_smc_table_sw_init(struct smu_context * smu)845 static int smu_smc_table_sw_init(struct smu_context *smu)
846 {
847 	int ret;
848 
849 	ret = smu_initialize_pptable(smu);
850 	if (ret) {
851 		pr_err("Failed to init smu_initialize_pptable!\n");
852 		return ret;
853 	}
854 
855 	/**
856 	 * Create smu_table structure, and init smc tables such as
857 	 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
858 	 */
859 	ret = smu_init_smc_tables(smu);
860 	if (ret) {
861 		pr_err("Failed to init smc tables!\n");
862 		return ret;
863 	}
864 
865 	/**
866 	 * Create smu_power_context structure, and allocate smu_dpm_context and
867 	 * context size to fill the smu_power_context data.
868 	 */
869 	ret = smu_init_power(smu);
870 	if (ret) {
871 		pr_err("Failed to init smu_init_power!\n");
872 		return ret;
873 	}
874 
875 	return 0;
876 }
877 
smu_smc_table_sw_fini(struct smu_context * smu)878 static int smu_smc_table_sw_fini(struct smu_context *smu)
879 {
880 	int ret;
881 
882 	ret = smu_fini_smc_tables(smu);
883 	if (ret) {
884 		pr_err("Failed to smu_fini_smc_tables!\n");
885 		return ret;
886 	}
887 
888 	return 0;
889 }
890 
smu_sw_init(void * handle)891 static int smu_sw_init(void *handle)
892 {
893 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
894 	struct smu_context *smu = &adev->smu;
895 	int ret;
896 
897 	smu->pool_size = adev->pm.smu_prv_buffer_size;
898 	smu->smu_feature.feature_num = SMU_FEATURE_MAX;
899 	mutex_init(&smu->smu_feature.mutex);
900 	bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
901 	bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
902 	bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
903 
904 	mutex_init(&smu->smu_baco.mutex);
905 	smu->smu_baco.state = SMU_BACO_STATE_EXIT;
906 	smu->smu_baco.platform_support = false;
907 
908 	mutex_init(&smu->sensor_lock);
909 	mutex_init(&smu->metrics_lock);
910 
911 	smu->watermarks_bitmap = 0;
912 	smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
913 	smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
914 
915 	smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
916 	smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
917 	smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
918 	smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
919 	smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
920 	smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
921 	smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
922 	smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
923 
924 	smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
925 	smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
926 	smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
927 	smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
928 	smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
929 	smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
930 	smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
931 	smu->display_config = &adev->pm.pm_display_cfg;
932 
933 	smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
934 	smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
935 	ret = smu_init_microcode(smu);
936 	if (ret) {
937 		pr_err("Failed to load smu firmware!\n");
938 		return ret;
939 	}
940 
941 	ret = smu_smc_table_sw_init(smu);
942 	if (ret) {
943 		pr_err("Failed to sw init smc table!\n");
944 		return ret;
945 	}
946 
947 	ret = smu_register_irq_handler(smu);
948 	if (ret) {
949 		pr_err("Failed to register smc irq handler!\n");
950 		return ret;
951 	}
952 
953 	return 0;
954 }
955 
smu_sw_fini(void * handle)956 static int smu_sw_fini(void *handle)
957 {
958 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
959 	struct smu_context *smu = &adev->smu;
960 	int ret;
961 
962 	kfree(smu->irq_source);
963 	smu->irq_source = NULL;
964 
965 	ret = smu_smc_table_sw_fini(smu);
966 	if (ret) {
967 		pr_err("Failed to sw fini smc table!\n");
968 		return ret;
969 	}
970 
971 	ret = smu_fini_power(smu);
972 	if (ret) {
973 		pr_err("Failed to init smu_fini_power!\n");
974 		return ret;
975 	}
976 
977 	mutex_destroy(&smu->metrics_lock);
978 	mutex_destroy(&smu->sensor_lock);
979 	mutex_destroy(&smu->smu_baco.mutex);
980 	mutex_destroy(&smu->smu_feature.mutex);
981 	mutex_destroy(&smu->mutex);
982 
983 	return 0;
984 }
985 
smu_init_fb_allocations(struct smu_context * smu)986 static int smu_init_fb_allocations(struct smu_context *smu)
987 {
988 	struct amdgpu_device *adev = smu->adev;
989 	struct smu_table_context *smu_table = &smu->smu_table;
990 	struct smu_table *tables = smu_table->tables;
991 	struct smu_table *driver_table = &(smu_table->driver_table);
992 	uint32_t max_table_size = 0;
993 	int ret, i;
994 
995 	/* VRAM allocation for tool table */
996 	if (tables[SMU_TABLE_PMSTATUSLOG].size) {
997 		ret = amdgpu_bo_create_kernel(adev,
998 					      tables[SMU_TABLE_PMSTATUSLOG].size,
999 					      tables[SMU_TABLE_PMSTATUSLOG].align,
1000 					      tables[SMU_TABLE_PMSTATUSLOG].domain,
1001 					      &tables[SMU_TABLE_PMSTATUSLOG].bo,
1002 					      &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
1003 					      &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
1004 		if (ret) {
1005 			pr_err("VRAM allocation for tool table failed!\n");
1006 			return ret;
1007 		}
1008 	}
1009 
1010 	/* VRAM allocation for driver table */
1011 	for (i = 0; i < SMU_TABLE_COUNT; i++) {
1012 		if (tables[i].size == 0)
1013 			continue;
1014 
1015 		if (i == SMU_TABLE_PMSTATUSLOG)
1016 			continue;
1017 
1018 		if (max_table_size < tables[i].size)
1019 			max_table_size = tables[i].size;
1020 	}
1021 
1022 	driver_table->size = max_table_size;
1023 	driver_table->align = PAGE_SIZE;
1024 	driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
1025 
1026 	ret = amdgpu_bo_create_kernel(adev,
1027 				      driver_table->size,
1028 				      driver_table->align,
1029 				      driver_table->domain,
1030 				      &driver_table->bo,
1031 				      &driver_table->mc_address,
1032 				      &driver_table->cpu_addr);
1033 	if (ret) {
1034 		pr_err("VRAM allocation for driver table failed!\n");
1035 		if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
1036 			amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
1037 					      &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
1038 					      &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
1039 	}
1040 
1041 	return ret;
1042 }
1043 
smu_fini_fb_allocations(struct smu_context * smu)1044 static int smu_fini_fb_allocations(struct smu_context *smu)
1045 {
1046 	struct smu_table_context *smu_table = &smu->smu_table;
1047 	struct smu_table *tables = smu_table->tables;
1048 	struct smu_table *driver_table = &(smu_table->driver_table);
1049 
1050 	if (!tables)
1051 		return 0;
1052 
1053 	if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
1054 		amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
1055 				      &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
1056 				      &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
1057 
1058 	amdgpu_bo_free_kernel(&driver_table->bo,
1059 			      &driver_table->mc_address,
1060 			      &driver_table->cpu_addr);
1061 
1062 	return 0;
1063 }
1064 
smu_smc_table_hw_init(struct smu_context * smu,bool initialize)1065 static int smu_smc_table_hw_init(struct smu_context *smu,
1066 				 bool initialize)
1067 {
1068 	struct amdgpu_device *adev = smu->adev;
1069 	int ret;
1070 
1071 	if (smu_is_dpm_running(smu) && adev->in_suspend) {
1072 		pr_info("dpm has been enabled\n");
1073 		return 0;
1074 	}
1075 
1076 	if (adev->asic_type != CHIP_ARCTURUS) {
1077 		ret = smu_init_display_count(smu, 0);
1078 		if (ret)
1079 			return ret;
1080 	}
1081 
1082 	if (initialize) {
1083 		/* get boot_values from vbios to set revision, gfxclk, and etc. */
1084 		ret = smu_get_vbios_bootup_values(smu);
1085 		if (ret)
1086 			return ret;
1087 
1088 		ret = smu_setup_pptable(smu);
1089 		if (ret)
1090 			return ret;
1091 
1092 		ret = smu_get_clk_info_from_vbios(smu);
1093 		if (ret)
1094 			return ret;
1095 
1096 		/*
1097 		 * check if the format_revision in vbios is up to pptable header
1098 		 * version, and the structure size is not 0.
1099 		 */
1100 		ret = smu_check_pptable(smu);
1101 		if (ret)
1102 			return ret;
1103 
1104 		/*
1105 		 * allocate vram bos to store smc table contents.
1106 		 */
1107 		ret = smu_init_fb_allocations(smu);
1108 		if (ret)
1109 			return ret;
1110 
1111 		/*
1112 		 * Parse pptable format and fill PPTable_t smc_pptable to
1113 		 * smu_table_context structure. And read the smc_dpm_table from vbios,
1114 		 * then fill it into smc_pptable.
1115 		 */
1116 		ret = smu_parse_pptable(smu);
1117 		if (ret)
1118 			return ret;
1119 
1120 		/*
1121 		 * Send msg GetDriverIfVersion to check if the return value is equal
1122 		 * with DRIVER_IF_VERSION of smc header.
1123 		 */
1124 		ret = smu_check_fw_version(smu);
1125 		if (ret)
1126 			return ret;
1127 	}
1128 
1129 	/* smu_dump_pptable(smu); */
1130 	if (!amdgpu_sriov_vf(adev)) {
1131 		ret = smu_set_driver_table_location(smu);
1132 		if (ret)
1133 			return ret;
1134 
1135 		/*
1136 		 * Copy pptable bo in the vram to smc with SMU MSGs such as
1137 		 * SetDriverDramAddr and TransferTableDram2Smu.
1138 		 */
1139 		ret = smu_write_pptable(smu);
1140 		if (ret)
1141 			return ret;
1142 
1143 		/* issue Run*Btc msg */
1144 		ret = smu_run_btc(smu);
1145 		if (ret)
1146 			return ret;
1147 		ret = smu_feature_set_allowed_mask(smu);
1148 		if (ret)
1149 			return ret;
1150 
1151 		ret = smu_system_features_control(smu, true);
1152 		if (ret)
1153 			return ret;
1154 
1155 		if (adev->asic_type == CHIP_NAVI10) {
1156 			if ((adev->pdev->device == 0x731f && (adev->pdev->revision == 0xc2 ||
1157 							      adev->pdev->revision == 0xc3 ||
1158 							      adev->pdev->revision == 0xca ||
1159 							      adev->pdev->revision == 0xcb)) ||
1160 			    (adev->pdev->device == 0x66af && (adev->pdev->revision == 0xf3 ||
1161 							      adev->pdev->revision == 0xf4 ||
1162 							      adev->pdev->revision == 0xf5 ||
1163 							      adev->pdev->revision == 0xf6))) {
1164 				ret = smu_disable_umc_cdr_12gbps_workaround(smu);
1165 				if (ret) {
1166 					pr_err("Workaround failed to disable UMC CDR feature on 12Gbps SKU!\n");
1167 					return ret;
1168 				}
1169 			}
1170 		}
1171 	}
1172 	if (adev->asic_type != CHIP_ARCTURUS) {
1173 		ret = smu_notify_display_change(smu);
1174 		if (ret)
1175 			return ret;
1176 
1177 		/*
1178 		 * Set min deep sleep dce fclk with bootup value from vbios via
1179 		 * SetMinDeepSleepDcefclk MSG.
1180 		 */
1181 		ret = smu_set_min_dcef_deep_sleep(smu);
1182 		if (ret)
1183 			return ret;
1184 	}
1185 
1186 	/*
1187 	 * Set initialized values (get from vbios) to dpm tables context such as
1188 	 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1189 	 * type of clks.
1190 	 */
1191 	if (initialize) {
1192 		ret = smu_populate_smc_tables(smu);
1193 		if (ret)
1194 			return ret;
1195 
1196 		ret = smu_init_max_sustainable_clocks(smu);
1197 		if (ret)
1198 			return ret;
1199 	}
1200 
1201 	if (adev->asic_type != CHIP_ARCTURUS) {
1202 		ret = smu_override_pcie_parameters(smu);
1203 		if (ret)
1204 			return ret;
1205 	}
1206 
1207 	ret = smu_set_default_od_settings(smu, initialize);
1208 	if (ret)
1209 		return ret;
1210 
1211 	if (initialize) {
1212 		ret = smu_populate_umd_state_clk(smu);
1213 		if (ret)
1214 			return ret;
1215 
1216 		ret = smu_get_power_limit(smu, &smu->default_power_limit, false, false);
1217 		if (ret)
1218 			return ret;
1219 	}
1220 
1221 	/*
1222 	 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1223 	 */
1224 	if (!amdgpu_sriov_vf(adev)) {
1225 		ret = smu_set_tool_table_location(smu);
1226 	}
1227 	if (!smu_is_dpm_running(smu))
1228 		pr_info("dpm has been disabled\n");
1229 
1230 	return ret;
1231 }
1232 
1233 /**
1234  * smu_alloc_memory_pool - allocate memory pool in the system memory
1235  *
1236  * @smu: amdgpu_device pointer
1237  *
1238  * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
1239  * and DramLogSetDramAddr can notify it changed.
1240  *
1241  * Returns 0 on success, error on failure.
1242  */
smu_alloc_memory_pool(struct smu_context * smu)1243 static int smu_alloc_memory_pool(struct smu_context *smu)
1244 {
1245 	struct amdgpu_device *adev = smu->adev;
1246 	struct smu_table_context *smu_table = &smu->smu_table;
1247 	struct smu_table *memory_pool = &smu_table->memory_pool;
1248 	uint64_t pool_size = smu->pool_size;
1249 	int ret = 0;
1250 
1251 	if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
1252 		return ret;
1253 
1254 	memory_pool->size = pool_size;
1255 	memory_pool->align = PAGE_SIZE;
1256 	memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
1257 
1258 	switch (pool_size) {
1259 	case SMU_MEMORY_POOL_SIZE_256_MB:
1260 	case SMU_MEMORY_POOL_SIZE_512_MB:
1261 	case SMU_MEMORY_POOL_SIZE_1_GB:
1262 	case SMU_MEMORY_POOL_SIZE_2_GB:
1263 		ret = amdgpu_bo_create_kernel(adev,
1264 					      memory_pool->size,
1265 					      memory_pool->align,
1266 					      memory_pool->domain,
1267 					      &memory_pool->bo,
1268 					      &memory_pool->mc_address,
1269 					      &memory_pool->cpu_addr);
1270 		break;
1271 	default:
1272 		break;
1273 	}
1274 
1275 	return ret;
1276 }
1277 
smu_free_memory_pool(struct smu_context * smu)1278 static int smu_free_memory_pool(struct smu_context *smu)
1279 {
1280 	struct smu_table_context *smu_table = &smu->smu_table;
1281 	struct smu_table *memory_pool = &smu_table->memory_pool;
1282 
1283 	if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
1284 		return 0;
1285 
1286 	amdgpu_bo_free_kernel(&memory_pool->bo,
1287 			      &memory_pool->mc_address,
1288 			      &memory_pool->cpu_addr);
1289 
1290 	memset(memory_pool, 0, sizeof(struct smu_table));
1291 
1292 	return 0;
1293 }
1294 
smu_start_smc_engine(struct smu_context * smu)1295 static int smu_start_smc_engine(struct smu_context *smu)
1296 {
1297 	struct amdgpu_device *adev = smu->adev;
1298 	int ret = 0;
1299 
1300 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1301 		if (adev->asic_type < CHIP_NAVI10) {
1302 			if (smu->ppt_funcs->load_microcode) {
1303 				ret = smu->ppt_funcs->load_microcode(smu);
1304 				if (ret)
1305 					return ret;
1306 			}
1307 		}
1308 	}
1309 
1310 	if (smu->ppt_funcs->check_fw_status) {
1311 		ret = smu->ppt_funcs->check_fw_status(smu);
1312 		if (ret)
1313 			pr_err("SMC is not ready\n");
1314 	}
1315 
1316 	return ret;
1317 }
1318 
smu_hw_init(void * handle)1319 static int smu_hw_init(void *handle)
1320 {
1321 	int ret;
1322 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1323 	struct smu_context *smu = &adev->smu;
1324 
1325 	ret = smu_start_smc_engine(smu);
1326 	if (ret) {
1327 		pr_err("SMU is not ready yet!\n");
1328 		return ret;
1329 	}
1330 
1331 	if (smu->is_apu) {
1332 		smu_powergate_sdma(&adev->smu, false);
1333 		smu_powergate_vcn(&adev->smu, false);
1334 		smu_powergate_jpeg(&adev->smu, false);
1335 		smu_set_gfx_cgpg(&adev->smu, true);
1336 	}
1337 
1338 	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1339 		return 0;
1340 
1341 	if (!smu->pm_enabled)
1342 		return 0;
1343 
1344 	ret = smu_feature_init_dpm(smu);
1345 	if (ret)
1346 		goto failed;
1347 
1348 	ret = smu_smc_table_hw_init(smu, true);
1349 	if (ret)
1350 		goto failed;
1351 
1352 	ret = smu_alloc_memory_pool(smu);
1353 	if (ret)
1354 		goto failed;
1355 
1356 	/*
1357 	 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1358 	 * pool location.
1359 	 */
1360 	ret = smu_notify_memory_pool_location(smu);
1361 	if (ret)
1362 		goto failed;
1363 
1364 	ret = smu_start_thermal_control(smu);
1365 	if (ret)
1366 		goto failed;
1367 
1368 	if (!smu->pm_enabled)
1369 		adev->pm.dpm_enabled = false;
1370 	else
1371 		adev->pm.dpm_enabled = true;	/* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */
1372 
1373 	pr_info("SMU is initialized successfully!\n");
1374 
1375 	return 0;
1376 
1377 failed:
1378 	return ret;
1379 }
1380 
smu_stop_dpms(struct smu_context * smu)1381 static int smu_stop_dpms(struct smu_context *smu)
1382 {
1383 	return smu_system_features_control(smu, false);
1384 }
1385 
smu_hw_fini(void * handle)1386 static int smu_hw_fini(void *handle)
1387 {
1388 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1389 	struct smu_context *smu = &adev->smu;
1390 	struct smu_table_context *table_context = &smu->smu_table;
1391 	int ret = 0;
1392 
1393 	if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1394 		return 0;
1395 
1396 	if (smu->is_apu) {
1397 		smu_powergate_sdma(&adev->smu, true);
1398 		smu_powergate_vcn(&adev->smu, true);
1399 		smu_powergate_jpeg(&adev->smu, true);
1400 	}
1401 
1402 	if (!smu->pm_enabled)
1403 		return 0;
1404 
1405 	if (!amdgpu_sriov_vf(adev)){
1406 		ret = smu_stop_thermal_control(smu);
1407 		if (ret) {
1408 			pr_warn("Fail to stop thermal control!\n");
1409 			return ret;
1410 		}
1411 
1412 		/*
1413 		 * For custom pptable uploading, skip the DPM features
1414 		 * disable process on Navi1x ASICs.
1415 		 *   - As the gfx related features are under control of
1416 		 *     RLC on those ASICs. RLC reinitialization will be
1417 		 *     needed to reenable them. That will cost much more
1418 		 *     efforts.
1419 		 *
1420 		 *   - SMU firmware can handle the DPM reenablement
1421 		 *     properly.
1422 		 */
1423 		if (!smu->uploading_custom_pp_table ||
1424 				!((adev->asic_type >= CHIP_NAVI10) &&
1425 					(adev->asic_type <= CHIP_NAVI12))) {
1426 			ret = smu_stop_dpms(smu);
1427 			if (ret) {
1428 				pr_warn("Fail to stop Dpms!\n");
1429 				return ret;
1430 			}
1431 		}
1432 	}
1433 
1434 	kfree(table_context->driver_pptable);
1435 	table_context->driver_pptable = NULL;
1436 
1437 	kfree(table_context->max_sustainable_clocks);
1438 	table_context->max_sustainable_clocks = NULL;
1439 
1440 	kfree(table_context->overdrive_table);
1441 	table_context->overdrive_table = NULL;
1442 
1443 	ret = smu_fini_fb_allocations(smu);
1444 	if (ret)
1445 		return ret;
1446 
1447 	ret = smu_free_memory_pool(smu);
1448 	if (ret)
1449 		return ret;
1450 
1451 	return 0;
1452 }
1453 
smu_reset(struct smu_context * smu)1454 int smu_reset(struct smu_context *smu)
1455 {
1456 	struct amdgpu_device *adev = smu->adev;
1457 	int ret = 0;
1458 
1459 	ret = smu_hw_fini(adev);
1460 	if (ret)
1461 		return ret;
1462 
1463 	ret = smu_hw_init(adev);
1464 	if (ret)
1465 		return ret;
1466 
1467 	return ret;
1468 }
1469 
smu_suspend(void * handle)1470 static int smu_suspend(void *handle)
1471 {
1472 	int ret;
1473 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1474 	struct smu_context *smu = &adev->smu;
1475 	bool baco_feature_is_enabled = false;
1476 
1477 	if (!smu->pm_enabled)
1478 		return 0;
1479 
1480 	if(!smu->is_apu)
1481 		baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
1482 
1483 	ret = smu_system_features_control(smu, false);
1484 	if (ret)
1485 		return ret;
1486 
1487 	if (baco_feature_is_enabled) {
1488 		ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
1489 		if (ret) {
1490 			pr_warn("set BACO feature enabled failed, return %d\n", ret);
1491 			return ret;
1492 		}
1493 	}
1494 
1495 	smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1496 
1497 	if (adev->asic_type >= CHIP_NAVI10 &&
1498 	    adev->gfx.rlc.funcs->stop)
1499 		adev->gfx.rlc.funcs->stop(adev);
1500 	if (smu->is_apu)
1501 		smu_set_gfx_cgpg(&adev->smu, false);
1502 
1503 	return 0;
1504 }
1505 
smu_resume(void * handle)1506 static int smu_resume(void *handle)
1507 {
1508 	int ret;
1509 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1510 	struct smu_context *smu = &adev->smu;
1511 
1512 	if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1513 		return 0;
1514 
1515 	if (!smu->pm_enabled)
1516 		return 0;
1517 
1518 	pr_info("SMU is resuming...\n");
1519 
1520 	ret = smu_start_smc_engine(smu);
1521 	if (ret) {
1522 		pr_err("SMU is not ready yet!\n");
1523 		goto failed;
1524 	}
1525 
1526 	ret = smu_smc_table_hw_init(smu, false);
1527 	if (ret)
1528 		goto failed;
1529 
1530 	ret = smu_start_thermal_control(smu);
1531 	if (ret)
1532 		goto failed;
1533 
1534 	if (smu->is_apu)
1535 		smu_set_gfx_cgpg(&adev->smu, true);
1536 
1537 	smu->disable_uclk_switch = 0;
1538 
1539 	pr_info("SMU is resumed successfully!\n");
1540 
1541 	return 0;
1542 
1543 failed:
1544 	return ret;
1545 }
1546 
smu_display_configuration_change(struct smu_context * smu,const struct amd_pp_display_configuration * display_config)1547 int smu_display_configuration_change(struct smu_context *smu,
1548 				     const struct amd_pp_display_configuration *display_config)
1549 {
1550 	int index = 0;
1551 	int num_of_active_display = 0;
1552 
1553 	if (!smu->pm_enabled || !is_support_sw_smu(smu->adev))
1554 		return -EINVAL;
1555 
1556 	if (!display_config)
1557 		return -EINVAL;
1558 
1559 	mutex_lock(&smu->mutex);
1560 
1561 	if (smu->ppt_funcs->set_deep_sleep_dcefclk)
1562 		smu->ppt_funcs->set_deep_sleep_dcefclk(smu,
1563 				display_config->min_dcef_deep_sleep_set_clk / 100);
1564 
1565 	for (index = 0; index < display_config->num_path_including_non_display; index++) {
1566 		if (display_config->displays[index].controller_id != 0)
1567 			num_of_active_display++;
1568 	}
1569 
1570 	smu_set_active_display_count(smu, num_of_active_display);
1571 
1572 	smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
1573 			   display_config->cpu_cc6_disable,
1574 			   display_config->cpu_pstate_disable,
1575 			   display_config->nb_pstate_switch_disable);
1576 
1577 	mutex_unlock(&smu->mutex);
1578 
1579 	return 0;
1580 }
1581 
smu_get_clock_info(struct smu_context * smu,struct smu_clock_info * clk_info,enum smu_perf_level_designation designation)1582 static int smu_get_clock_info(struct smu_context *smu,
1583 			      struct smu_clock_info *clk_info,
1584 			      enum smu_perf_level_designation designation)
1585 {
1586 	int ret;
1587 	struct smu_performance_level level = {0};
1588 
1589 	if (!clk_info)
1590 		return -EINVAL;
1591 
1592 	ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
1593 	if (ret)
1594 		return -EINVAL;
1595 
1596 	clk_info->min_mem_clk = level.memory_clock;
1597 	clk_info->min_eng_clk = level.core_clock;
1598 	clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1599 
1600 	ret = smu_get_perf_level(smu, designation, &level);
1601 	if (ret)
1602 		return -EINVAL;
1603 
1604 	clk_info->min_mem_clk = level.memory_clock;
1605 	clk_info->min_eng_clk = level.core_clock;
1606 	clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1607 
1608 	return 0;
1609 }
1610 
smu_get_current_clocks(struct smu_context * smu,struct amd_pp_clock_info * clocks)1611 int smu_get_current_clocks(struct smu_context *smu,
1612 			   struct amd_pp_clock_info *clocks)
1613 {
1614 	struct amd_pp_simple_clock_info simple_clocks = {0};
1615 	struct smu_clock_info hw_clocks;
1616 	int ret = 0;
1617 
1618 	if (!is_support_sw_smu(smu->adev))
1619 		return -EINVAL;
1620 
1621 	mutex_lock(&smu->mutex);
1622 
1623 	smu_get_dal_power_level(smu, &simple_clocks);
1624 
1625 	if (smu->support_power_containment)
1626 		ret = smu_get_clock_info(smu, &hw_clocks,
1627 					 PERF_LEVEL_POWER_CONTAINMENT);
1628 	else
1629 		ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1630 
1631 	if (ret) {
1632 		pr_err("Error in smu_get_clock_info\n");
1633 		goto failed;
1634 	}
1635 
1636 	clocks->min_engine_clock = hw_clocks.min_eng_clk;
1637 	clocks->max_engine_clock = hw_clocks.max_eng_clk;
1638 	clocks->min_memory_clock = hw_clocks.min_mem_clk;
1639 	clocks->max_memory_clock = hw_clocks.max_mem_clk;
1640 	clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1641 	clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1642 	clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1643 	clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1644 
1645         if (simple_clocks.level == 0)
1646                 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1647         else
1648                 clocks->max_clocks_state = simple_clocks.level;
1649 
1650         if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1651                 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1652                 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1653         }
1654 
1655 failed:
1656 	mutex_unlock(&smu->mutex);
1657 	return ret;
1658 }
1659 
smu_set_clockgating_state(void * handle,enum amd_clockgating_state state)1660 static int smu_set_clockgating_state(void *handle,
1661 				     enum amd_clockgating_state state)
1662 {
1663 	return 0;
1664 }
1665 
smu_set_powergating_state(void * handle,enum amd_powergating_state state)1666 static int smu_set_powergating_state(void *handle,
1667 				     enum amd_powergating_state state)
1668 {
1669 	return 0;
1670 }
1671 
smu_enable_umd_pstate(void * handle,enum amd_dpm_forced_level * level)1672 static int smu_enable_umd_pstate(void *handle,
1673 		      enum amd_dpm_forced_level *level)
1674 {
1675 	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1676 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1677 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1678 					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1679 
1680 	struct smu_context *smu = (struct smu_context*)(handle);
1681 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1682 
1683 	if (!smu->is_apu && (!smu->pm_enabled || !smu_dpm_ctx->dpm_context))
1684 		return -EINVAL;
1685 
1686 	if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1687 		/* enter umd pstate, save current level, disable gfx cg*/
1688 		if (*level & profile_mode_mask) {
1689 			smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1690 			smu_dpm_ctx->enable_umd_pstate = true;
1691 			amdgpu_device_ip_set_clockgating_state(smu->adev,
1692 							       AMD_IP_BLOCK_TYPE_GFX,
1693 							       AMD_CG_STATE_UNGATE);
1694 			amdgpu_device_ip_set_powergating_state(smu->adev,
1695 							       AMD_IP_BLOCK_TYPE_GFX,
1696 							       AMD_PG_STATE_UNGATE);
1697 		}
1698 	} else {
1699 		/* exit umd pstate, restore level, enable gfx cg*/
1700 		if (!(*level & profile_mode_mask)) {
1701 			if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1702 				*level = smu_dpm_ctx->saved_dpm_level;
1703 			smu_dpm_ctx->enable_umd_pstate = false;
1704 			amdgpu_device_ip_set_clockgating_state(smu->adev,
1705 							       AMD_IP_BLOCK_TYPE_GFX,
1706 							       AMD_CG_STATE_GATE);
1707 			amdgpu_device_ip_set_powergating_state(smu->adev,
1708 							       AMD_IP_BLOCK_TYPE_GFX,
1709 							       AMD_PG_STATE_GATE);
1710 		}
1711 	}
1712 
1713 	return 0;
1714 }
1715 
smu_adjust_power_state_dynamic(struct smu_context * smu,enum amd_dpm_forced_level level,bool skip_display_settings)1716 int smu_adjust_power_state_dynamic(struct smu_context *smu,
1717 				   enum amd_dpm_forced_level level,
1718 				   bool skip_display_settings)
1719 {
1720 	int ret = 0;
1721 	int index = 0;
1722 	long workload;
1723 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1724 
1725 	if (!smu->pm_enabled)
1726 		return -EINVAL;
1727 
1728 	if (!skip_display_settings) {
1729 		ret = smu_display_config_changed(smu);
1730 		if (ret) {
1731 			pr_err("Failed to change display config!");
1732 			return ret;
1733 		}
1734 	}
1735 
1736 	ret = smu_apply_clocks_adjust_rules(smu);
1737 	if (ret) {
1738 		pr_err("Failed to apply clocks adjust rules!");
1739 		return ret;
1740 	}
1741 
1742 	if (!skip_display_settings) {
1743 		ret = smu_notify_smc_display_config(smu);
1744 		if (ret) {
1745 			pr_err("Failed to notify smc display config!");
1746 			return ret;
1747 		}
1748 	}
1749 
1750 	if (smu_dpm_ctx->dpm_level != level) {
1751 		ret = smu_asic_set_performance_level(smu, level);
1752 		if (ret) {
1753 			pr_err("Failed to set performance level!");
1754 			return ret;
1755 		}
1756 
1757 		/* update the saved copy */
1758 		smu_dpm_ctx->dpm_level = level;
1759 	}
1760 
1761 	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1762 		index = fls(smu->workload_mask);
1763 		index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1764 		workload = smu->workload_setting[index];
1765 
1766 		if (smu->power_profile_mode != workload)
1767 			smu_set_power_profile_mode(smu, &workload, 0, false);
1768 	}
1769 
1770 	return ret;
1771 }
1772 
smu_handle_task(struct smu_context * smu,enum amd_dpm_forced_level level,enum amd_pp_task task_id,bool lock_needed)1773 int smu_handle_task(struct smu_context *smu,
1774 		    enum amd_dpm_forced_level level,
1775 		    enum amd_pp_task task_id,
1776 		    bool lock_needed)
1777 {
1778 	int ret = 0;
1779 
1780 	if (lock_needed)
1781 		mutex_lock(&smu->mutex);
1782 
1783 	switch (task_id) {
1784 	case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1785 		ret = smu_pre_display_config_changed(smu);
1786 		if (ret)
1787 			goto out;
1788 		ret = smu_set_cpu_power_state(smu);
1789 		if (ret)
1790 			goto out;
1791 		ret = smu_adjust_power_state_dynamic(smu, level, false);
1792 		break;
1793 	case AMD_PP_TASK_COMPLETE_INIT:
1794 	case AMD_PP_TASK_READJUST_POWER_STATE:
1795 		ret = smu_adjust_power_state_dynamic(smu, level, true);
1796 		break;
1797 	default:
1798 		break;
1799 	}
1800 
1801 out:
1802 	if (lock_needed)
1803 		mutex_unlock(&smu->mutex);
1804 
1805 	return ret;
1806 }
1807 
smu_switch_power_profile(struct smu_context * smu,enum PP_SMC_POWER_PROFILE type,bool en)1808 int smu_switch_power_profile(struct smu_context *smu,
1809 			     enum PP_SMC_POWER_PROFILE type,
1810 			     bool en)
1811 {
1812 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1813 	long workload;
1814 	uint32_t index;
1815 
1816 	if (!smu->pm_enabled)
1817 		return -EINVAL;
1818 
1819 	if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1820 		return -EINVAL;
1821 
1822 	mutex_lock(&smu->mutex);
1823 
1824 	if (!en) {
1825 		smu->workload_mask &= ~(1 << smu->workload_prority[type]);
1826 		index = fls(smu->workload_mask);
1827 		index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1828 		workload = smu->workload_setting[index];
1829 	} else {
1830 		smu->workload_mask |= (1 << smu->workload_prority[type]);
1831 		index = fls(smu->workload_mask);
1832 		index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1833 		workload = smu->workload_setting[index];
1834 	}
1835 
1836 	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1837 		smu_set_power_profile_mode(smu, &workload, 0, false);
1838 
1839 	mutex_unlock(&smu->mutex);
1840 
1841 	return 0;
1842 }
1843 
smu_get_performance_level(struct smu_context * smu)1844 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
1845 {
1846 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1847 	enum amd_dpm_forced_level level;
1848 
1849 	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1850 		return -EINVAL;
1851 
1852 	mutex_lock(&(smu->mutex));
1853 	level = smu_dpm_ctx->dpm_level;
1854 	mutex_unlock(&(smu->mutex));
1855 
1856 	return level;
1857 }
1858 
smu_force_performance_level(struct smu_context * smu,enum amd_dpm_forced_level level)1859 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1860 {
1861 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1862 	int ret = 0;
1863 
1864 	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1865 		return -EINVAL;
1866 
1867 	mutex_lock(&smu->mutex);
1868 
1869 	ret = smu_enable_umd_pstate(smu, &level);
1870 	if (ret) {
1871 		mutex_unlock(&smu->mutex);
1872 		return ret;
1873 	}
1874 
1875 	ret = smu_handle_task(smu, level,
1876 			      AMD_PP_TASK_READJUST_POWER_STATE,
1877 			      false);
1878 
1879 	mutex_unlock(&smu->mutex);
1880 
1881 	return ret;
1882 }
1883 
smu_set_display_count(struct smu_context * smu,uint32_t count)1884 int smu_set_display_count(struct smu_context *smu, uint32_t count)
1885 {
1886 	int ret = 0;
1887 
1888 	mutex_lock(&smu->mutex);
1889 	ret = smu_init_display_count(smu, count);
1890 	mutex_unlock(&smu->mutex);
1891 
1892 	return ret;
1893 }
1894 
smu_force_clk_levels(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t mask,bool lock_needed)1895 int smu_force_clk_levels(struct smu_context *smu,
1896 			 enum smu_clk_type clk_type,
1897 			 uint32_t mask,
1898 			 bool lock_needed)
1899 {
1900 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1901 	int ret = 0;
1902 
1903 	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1904 		pr_debug("force clock level is for dpm manual mode only.\n");
1905 		return -EINVAL;
1906 	}
1907 
1908 	if (lock_needed)
1909 		mutex_lock(&smu->mutex);
1910 
1911 	if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
1912 		ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
1913 
1914 	if (lock_needed)
1915 		mutex_unlock(&smu->mutex);
1916 
1917 	return ret;
1918 }
1919 
smu_set_mp1_state(struct smu_context * smu,enum pp_mp1_state mp1_state)1920 int smu_set_mp1_state(struct smu_context *smu,
1921 		      enum pp_mp1_state mp1_state)
1922 {
1923 	uint16_t msg;
1924 	int ret;
1925 
1926 	/*
1927 	 * The SMC is not fully ready. That may be
1928 	 * expected as the IP may be masked.
1929 	 * So, just return without error.
1930 	 */
1931 	if (!smu->pm_enabled)
1932 		return 0;
1933 
1934 	mutex_lock(&smu->mutex);
1935 
1936 	switch (mp1_state) {
1937 	case PP_MP1_STATE_SHUTDOWN:
1938 		msg = SMU_MSG_PrepareMp1ForShutdown;
1939 		break;
1940 	case PP_MP1_STATE_UNLOAD:
1941 		msg = SMU_MSG_PrepareMp1ForUnload;
1942 		break;
1943 	case PP_MP1_STATE_RESET:
1944 		msg = SMU_MSG_PrepareMp1ForReset;
1945 		break;
1946 	case PP_MP1_STATE_NONE:
1947 	default:
1948 		mutex_unlock(&smu->mutex);
1949 		return 0;
1950 	}
1951 
1952 	/* some asics may not support those messages */
1953 	if (smu_msg_get_index(smu, msg) < 0) {
1954 		mutex_unlock(&smu->mutex);
1955 		return 0;
1956 	}
1957 
1958 	ret = smu_send_smc_msg(smu, msg);
1959 	if (ret)
1960 		pr_err("[PrepareMp1] Failed!\n");
1961 
1962 	mutex_unlock(&smu->mutex);
1963 
1964 	return ret;
1965 }
1966 
smu_set_df_cstate(struct smu_context * smu,enum pp_df_cstate state)1967 int smu_set_df_cstate(struct smu_context *smu,
1968 		      enum pp_df_cstate state)
1969 {
1970 	int ret = 0;
1971 
1972 	/*
1973 	 * The SMC is not fully ready. That may be
1974 	 * expected as the IP may be masked.
1975 	 * So, just return without error.
1976 	 */
1977 	if (!smu->pm_enabled)
1978 		return 0;
1979 
1980 	if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
1981 		return 0;
1982 
1983 	mutex_lock(&smu->mutex);
1984 
1985 	ret = smu->ppt_funcs->set_df_cstate(smu, state);
1986 	if (ret)
1987 		pr_err("[SetDfCstate] failed!\n");
1988 
1989 	mutex_unlock(&smu->mutex);
1990 
1991 	return ret;
1992 }
1993 
smu_write_watermarks_table(struct smu_context * smu)1994 int smu_write_watermarks_table(struct smu_context *smu)
1995 {
1996 	void *watermarks_table = smu->smu_table.watermarks_table;
1997 
1998 	if (!watermarks_table)
1999 		return -EINVAL;
2000 
2001 	return smu_update_table(smu,
2002 				SMU_TABLE_WATERMARKS,
2003 				0,
2004 				watermarks_table,
2005 				true);
2006 }
2007 
smu_set_watermarks_for_clock_ranges(struct smu_context * smu,struct dm_pp_wm_sets_with_clock_ranges_soc15 * clock_ranges)2008 int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
2009 		struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
2010 {
2011 	void *table = smu->smu_table.watermarks_table;
2012 
2013 	if (!table)
2014 		return -EINVAL;
2015 
2016 	mutex_lock(&smu->mutex);
2017 
2018 	if (!smu->disable_watermark &&
2019 			smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
2020 			smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
2021 		smu_set_watermarks_table(smu, table, clock_ranges);
2022 		smu->watermarks_bitmap |= WATERMARKS_EXIST;
2023 		smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
2024 	}
2025 
2026 	mutex_unlock(&smu->mutex);
2027 
2028 	return 0;
2029 }
2030 
2031 const struct amd_ip_funcs smu_ip_funcs = {
2032 	.name = "smu",
2033 	.early_init = smu_early_init,
2034 	.late_init = smu_late_init,
2035 	.sw_init = smu_sw_init,
2036 	.sw_fini = smu_sw_fini,
2037 	.hw_init = smu_hw_init,
2038 	.hw_fini = smu_hw_fini,
2039 	.suspend = smu_suspend,
2040 	.resume = smu_resume,
2041 	.is_idle = NULL,
2042 	.check_soft_reset = NULL,
2043 	.wait_for_idle = NULL,
2044 	.soft_reset = NULL,
2045 	.set_clockgating_state = smu_set_clockgating_state,
2046 	.set_powergating_state = smu_set_powergating_state,
2047 	.enable_umd_pstate = smu_enable_umd_pstate,
2048 };
2049 
2050 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
2051 {
2052 	.type = AMD_IP_BLOCK_TYPE_SMC,
2053 	.major = 11,
2054 	.minor = 0,
2055 	.rev = 0,
2056 	.funcs = &smu_ip_funcs,
2057 };
2058 
2059 const struct amdgpu_ip_block_version smu_v12_0_ip_block =
2060 {
2061 	.type = AMD_IP_BLOCK_TYPE_SMC,
2062 	.major = 12,
2063 	.minor = 0,
2064 	.rev = 0,
2065 	.funcs = &smu_ip_funcs,
2066 };
2067 
smu_load_microcode(struct smu_context * smu)2068 int smu_load_microcode(struct smu_context *smu)
2069 {
2070 	int ret = 0;
2071 
2072 	mutex_lock(&smu->mutex);
2073 
2074 	if (smu->ppt_funcs->load_microcode)
2075 		ret = smu->ppt_funcs->load_microcode(smu);
2076 
2077 	mutex_unlock(&smu->mutex);
2078 
2079 	return ret;
2080 }
2081 
smu_check_fw_status(struct smu_context * smu)2082 int smu_check_fw_status(struct smu_context *smu)
2083 {
2084 	int ret = 0;
2085 
2086 	mutex_lock(&smu->mutex);
2087 
2088 	if (smu->ppt_funcs->check_fw_status)
2089 		ret = smu->ppt_funcs->check_fw_status(smu);
2090 
2091 	mutex_unlock(&smu->mutex);
2092 
2093 	return ret;
2094 }
2095 
smu_set_gfx_cgpg(struct smu_context * smu,bool enabled)2096 int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2097 {
2098 	int ret = 0;
2099 
2100 	mutex_lock(&smu->mutex);
2101 
2102 	if (smu->ppt_funcs->set_gfx_cgpg)
2103 		ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
2104 
2105 	mutex_unlock(&smu->mutex);
2106 
2107 	return ret;
2108 }
2109 
smu_set_fan_speed_rpm(struct smu_context * smu,uint32_t speed)2110 int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
2111 {
2112 	int ret = 0;
2113 
2114 	mutex_lock(&smu->mutex);
2115 
2116 	if (smu->ppt_funcs->set_fan_speed_rpm)
2117 		ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
2118 
2119 	mutex_unlock(&smu->mutex);
2120 
2121 	return ret;
2122 }
2123 
smu_get_power_limit(struct smu_context * smu,uint32_t * limit,bool def,bool lock_needed)2124 int smu_get_power_limit(struct smu_context *smu,
2125 			uint32_t *limit,
2126 			bool def,
2127 			bool lock_needed)
2128 {
2129 	int ret = 0;
2130 
2131 	if (lock_needed)
2132 		mutex_lock(&smu->mutex);
2133 
2134 	if (smu->ppt_funcs->get_power_limit)
2135 		ret = smu->ppt_funcs->get_power_limit(smu, limit, def);
2136 
2137 	if (lock_needed)
2138 		mutex_unlock(&smu->mutex);
2139 
2140 	return ret;
2141 }
2142 
smu_set_power_limit(struct smu_context * smu,uint32_t limit)2143 int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
2144 {
2145 	int ret = 0;
2146 
2147 	mutex_lock(&smu->mutex);
2148 
2149 	if (smu->ppt_funcs->set_power_limit)
2150 		ret = smu->ppt_funcs->set_power_limit(smu, limit);
2151 
2152 	mutex_unlock(&smu->mutex);
2153 
2154 	return ret;
2155 }
2156 
smu_print_clk_levels(struct smu_context * smu,enum smu_clk_type clk_type,char * buf)2157 int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2158 {
2159 	int ret = 0;
2160 
2161 	mutex_lock(&smu->mutex);
2162 
2163 	if (smu->ppt_funcs->print_clk_levels)
2164 		ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2165 
2166 	mutex_unlock(&smu->mutex);
2167 
2168 	return ret;
2169 }
2170 
smu_get_od_percentage(struct smu_context * smu,enum smu_clk_type type)2171 int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
2172 {
2173 	int ret = 0;
2174 
2175 	mutex_lock(&smu->mutex);
2176 
2177 	if (smu->ppt_funcs->get_od_percentage)
2178 		ret = smu->ppt_funcs->get_od_percentage(smu, type);
2179 
2180 	mutex_unlock(&smu->mutex);
2181 
2182 	return ret;
2183 }
2184 
smu_set_od_percentage(struct smu_context * smu,enum smu_clk_type type,uint32_t value)2185 int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
2186 {
2187 	int ret = 0;
2188 
2189 	mutex_lock(&smu->mutex);
2190 
2191 	if (smu->ppt_funcs->set_od_percentage)
2192 		ret = smu->ppt_funcs->set_od_percentage(smu, type, value);
2193 
2194 	mutex_unlock(&smu->mutex);
2195 
2196 	return ret;
2197 }
2198 
smu_od_edit_dpm_table(struct smu_context * smu,enum PP_OD_DPM_TABLE_COMMAND type,long * input,uint32_t size)2199 int smu_od_edit_dpm_table(struct smu_context *smu,
2200 			  enum PP_OD_DPM_TABLE_COMMAND type,
2201 			  long *input, uint32_t size)
2202 {
2203 	int ret = 0;
2204 
2205 	mutex_lock(&smu->mutex);
2206 
2207 	if (smu->ppt_funcs->od_edit_dpm_table)
2208 		ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2209 
2210 	mutex_unlock(&smu->mutex);
2211 
2212 	return ret;
2213 }
2214 
smu_read_sensor(struct smu_context * smu,enum amd_pp_sensors sensor,void * data,uint32_t * size)2215 int smu_read_sensor(struct smu_context *smu,
2216 		    enum amd_pp_sensors sensor,
2217 		    void *data, uint32_t *size)
2218 {
2219 	int ret = 0;
2220 
2221 	mutex_lock(&smu->mutex);
2222 
2223 	if (smu->ppt_funcs->read_sensor)
2224 		ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size);
2225 
2226 	mutex_unlock(&smu->mutex);
2227 
2228 	return ret;
2229 }
2230 
smu_get_power_profile_mode(struct smu_context * smu,char * buf)2231 int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
2232 {
2233 	int ret = 0;
2234 
2235 	mutex_lock(&smu->mutex);
2236 
2237 	if (smu->ppt_funcs->get_power_profile_mode)
2238 		ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
2239 
2240 	mutex_unlock(&smu->mutex);
2241 
2242 	return ret;
2243 }
2244 
smu_set_power_profile_mode(struct smu_context * smu,long * param,uint32_t param_size,bool lock_needed)2245 int smu_set_power_profile_mode(struct smu_context *smu,
2246 			       long *param,
2247 			       uint32_t param_size,
2248 			       bool lock_needed)
2249 {
2250 	int ret = 0;
2251 
2252 	if (lock_needed)
2253 		mutex_lock(&smu->mutex);
2254 
2255 	if (smu->ppt_funcs->set_power_profile_mode)
2256 		ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
2257 
2258 	if (lock_needed)
2259 		mutex_unlock(&smu->mutex);
2260 
2261 	return ret;
2262 }
2263 
2264 
smu_get_fan_control_mode(struct smu_context * smu)2265 int smu_get_fan_control_mode(struct smu_context *smu)
2266 {
2267 	int ret = 0;
2268 
2269 	mutex_lock(&smu->mutex);
2270 
2271 	if (smu->ppt_funcs->get_fan_control_mode)
2272 		ret = smu->ppt_funcs->get_fan_control_mode(smu);
2273 
2274 	mutex_unlock(&smu->mutex);
2275 
2276 	return ret;
2277 }
2278 
smu_set_fan_control_mode(struct smu_context * smu,int value)2279 int smu_set_fan_control_mode(struct smu_context *smu, int value)
2280 {
2281 	int ret = 0;
2282 
2283 	mutex_lock(&smu->mutex);
2284 
2285 	if (smu->ppt_funcs->set_fan_control_mode)
2286 		ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
2287 
2288 	mutex_unlock(&smu->mutex);
2289 
2290 	return ret;
2291 }
2292 
smu_get_fan_speed_percent(struct smu_context * smu,uint32_t * speed)2293 int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
2294 {
2295 	int ret = 0;
2296 
2297 	mutex_lock(&smu->mutex);
2298 
2299 	if (smu->ppt_funcs->get_fan_speed_percent)
2300 		ret = smu->ppt_funcs->get_fan_speed_percent(smu, speed);
2301 
2302 	mutex_unlock(&smu->mutex);
2303 
2304 	return ret;
2305 }
2306 
smu_set_fan_speed_percent(struct smu_context * smu,uint32_t speed)2307 int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
2308 {
2309 	int ret = 0;
2310 
2311 	mutex_lock(&smu->mutex);
2312 
2313 	if (smu->ppt_funcs->set_fan_speed_percent)
2314 		ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
2315 
2316 	mutex_unlock(&smu->mutex);
2317 
2318 	return ret;
2319 }
2320 
smu_get_fan_speed_rpm(struct smu_context * smu,uint32_t * speed)2321 int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
2322 {
2323 	int ret = 0;
2324 
2325 	mutex_lock(&smu->mutex);
2326 
2327 	if (smu->ppt_funcs->get_fan_speed_rpm)
2328 		ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
2329 
2330 	mutex_unlock(&smu->mutex);
2331 
2332 	return ret;
2333 }
2334 
smu_set_deep_sleep_dcefclk(struct smu_context * smu,int clk)2335 int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
2336 {
2337 	int ret = 0;
2338 
2339 	mutex_lock(&smu->mutex);
2340 
2341 	if (smu->ppt_funcs->set_deep_sleep_dcefclk)
2342 		ret = smu->ppt_funcs->set_deep_sleep_dcefclk(smu, clk);
2343 
2344 	mutex_unlock(&smu->mutex);
2345 
2346 	return ret;
2347 }
2348 
smu_set_active_display_count(struct smu_context * smu,uint32_t count)2349 int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
2350 {
2351 	int ret = 0;
2352 
2353 	if (smu->ppt_funcs->set_active_display_count)
2354 		ret = smu->ppt_funcs->set_active_display_count(smu, count);
2355 
2356 	return ret;
2357 }
2358 
smu_get_clock_by_type(struct smu_context * smu,enum amd_pp_clock_type type,struct amd_pp_clocks * clocks)2359 int smu_get_clock_by_type(struct smu_context *smu,
2360 			  enum amd_pp_clock_type type,
2361 			  struct amd_pp_clocks *clocks)
2362 {
2363 	int ret = 0;
2364 
2365 	mutex_lock(&smu->mutex);
2366 
2367 	if (smu->ppt_funcs->get_clock_by_type)
2368 		ret = smu->ppt_funcs->get_clock_by_type(smu, type, clocks);
2369 
2370 	mutex_unlock(&smu->mutex);
2371 
2372 	return ret;
2373 }
2374 
smu_get_max_high_clocks(struct smu_context * smu,struct amd_pp_simple_clock_info * clocks)2375 int smu_get_max_high_clocks(struct smu_context *smu,
2376 			    struct amd_pp_simple_clock_info *clocks)
2377 {
2378 	int ret = 0;
2379 
2380 	mutex_lock(&smu->mutex);
2381 
2382 	if (smu->ppt_funcs->get_max_high_clocks)
2383 		ret = smu->ppt_funcs->get_max_high_clocks(smu, clocks);
2384 
2385 	mutex_unlock(&smu->mutex);
2386 
2387 	return ret;
2388 }
2389 
smu_get_clock_by_type_with_latency(struct smu_context * smu,enum smu_clk_type clk_type,struct pp_clock_levels_with_latency * clocks)2390 int smu_get_clock_by_type_with_latency(struct smu_context *smu,
2391 				       enum smu_clk_type clk_type,
2392 				       struct pp_clock_levels_with_latency *clocks)
2393 {
2394 	int ret = 0;
2395 
2396 	mutex_lock(&smu->mutex);
2397 
2398 	if (smu->ppt_funcs->get_clock_by_type_with_latency)
2399 		ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
2400 
2401 	mutex_unlock(&smu->mutex);
2402 
2403 	return ret;
2404 }
2405 
smu_get_clock_by_type_with_voltage(struct smu_context * smu,enum amd_pp_clock_type type,struct pp_clock_levels_with_voltage * clocks)2406 int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
2407 				       enum amd_pp_clock_type type,
2408 				       struct pp_clock_levels_with_voltage *clocks)
2409 {
2410 	int ret = 0;
2411 
2412 	mutex_lock(&smu->mutex);
2413 
2414 	if (smu->ppt_funcs->get_clock_by_type_with_voltage)
2415 		ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks);
2416 
2417 	mutex_unlock(&smu->mutex);
2418 
2419 	return ret;
2420 }
2421 
2422 
smu_display_clock_voltage_request(struct smu_context * smu,struct pp_display_clock_request * clock_req)2423 int smu_display_clock_voltage_request(struct smu_context *smu,
2424 				      struct pp_display_clock_request *clock_req)
2425 {
2426 	int ret = 0;
2427 
2428 	mutex_lock(&smu->mutex);
2429 
2430 	if (smu->ppt_funcs->display_clock_voltage_request)
2431 		ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
2432 
2433 	mutex_unlock(&smu->mutex);
2434 
2435 	return ret;
2436 }
2437 
2438 
smu_display_disable_memory_clock_switch(struct smu_context * smu,bool disable_memory_clock_switch)2439 int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
2440 {
2441 	int ret = -EINVAL;
2442 
2443 	mutex_lock(&smu->mutex);
2444 
2445 	if (smu->ppt_funcs->display_disable_memory_clock_switch)
2446 		ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
2447 
2448 	mutex_unlock(&smu->mutex);
2449 
2450 	return ret;
2451 }
2452 
smu_notify_smu_enable_pwe(struct smu_context * smu)2453 int smu_notify_smu_enable_pwe(struct smu_context *smu)
2454 {
2455 	int ret = 0;
2456 
2457 	mutex_lock(&smu->mutex);
2458 
2459 	if (smu->ppt_funcs->notify_smu_enable_pwe)
2460 		ret = smu->ppt_funcs->notify_smu_enable_pwe(smu);
2461 
2462 	mutex_unlock(&smu->mutex);
2463 
2464 	return ret;
2465 }
2466 
smu_set_xgmi_pstate(struct smu_context * smu,uint32_t pstate)2467 int smu_set_xgmi_pstate(struct smu_context *smu,
2468 			uint32_t pstate)
2469 {
2470 	int ret = 0;
2471 
2472 	mutex_lock(&smu->mutex);
2473 
2474 	if (smu->ppt_funcs->set_xgmi_pstate)
2475 		ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
2476 
2477 	mutex_unlock(&smu->mutex);
2478 
2479 	return ret;
2480 }
2481 
smu_set_azalia_d3_pme(struct smu_context * smu)2482 int smu_set_azalia_d3_pme(struct smu_context *smu)
2483 {
2484 	int ret = 0;
2485 
2486 	mutex_lock(&smu->mutex);
2487 
2488 	if (smu->ppt_funcs->set_azalia_d3_pme)
2489 		ret = smu->ppt_funcs->set_azalia_d3_pme(smu);
2490 
2491 	mutex_unlock(&smu->mutex);
2492 
2493 	return ret;
2494 }
2495 
smu_baco_is_support(struct smu_context * smu)2496 bool smu_baco_is_support(struct smu_context *smu)
2497 {
2498 	bool ret = false;
2499 
2500 	mutex_lock(&smu->mutex);
2501 
2502 	if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
2503 		ret = smu->ppt_funcs->baco_is_support(smu);
2504 
2505 	mutex_unlock(&smu->mutex);
2506 
2507 	return ret;
2508 }
2509 
smu_baco_get_state(struct smu_context * smu,enum smu_baco_state * state)2510 int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
2511 {
2512 	if (smu->ppt_funcs->baco_get_state)
2513 		return -EINVAL;
2514 
2515 	mutex_lock(&smu->mutex);
2516 	*state = smu->ppt_funcs->baco_get_state(smu);
2517 	mutex_unlock(&smu->mutex);
2518 
2519 	return 0;
2520 }
2521 
smu_baco_enter(struct smu_context * smu)2522 int smu_baco_enter(struct smu_context *smu)
2523 {
2524 	int ret = 0;
2525 
2526 	mutex_lock(&smu->mutex);
2527 
2528 	if (smu->ppt_funcs->baco_enter)
2529 		ret = smu->ppt_funcs->baco_enter(smu);
2530 
2531 	mutex_unlock(&smu->mutex);
2532 
2533 	return ret;
2534 }
2535 
smu_baco_exit(struct smu_context * smu)2536 int smu_baco_exit(struct smu_context *smu)
2537 {
2538 	int ret = 0;
2539 
2540 	mutex_lock(&smu->mutex);
2541 
2542 	if (smu->ppt_funcs->baco_exit)
2543 		ret = smu->ppt_funcs->baco_exit(smu);
2544 
2545 	mutex_unlock(&smu->mutex);
2546 
2547 	return ret;
2548 }
2549 
smu_mode2_reset(struct smu_context * smu)2550 int smu_mode2_reset(struct smu_context *smu)
2551 {
2552 	int ret = 0;
2553 
2554 	mutex_lock(&smu->mutex);
2555 
2556 	if (smu->ppt_funcs->mode2_reset)
2557 		ret = smu->ppt_funcs->mode2_reset(smu);
2558 
2559 	mutex_unlock(&smu->mutex);
2560 
2561 	return ret;
2562 }
2563 
smu_get_max_sustainable_clocks_by_dc(struct smu_context * smu,struct pp_smu_nv_clock_table * max_clocks)2564 int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
2565 					 struct pp_smu_nv_clock_table *max_clocks)
2566 {
2567 	int ret = 0;
2568 
2569 	mutex_lock(&smu->mutex);
2570 
2571 	if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
2572 		ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
2573 
2574 	mutex_unlock(&smu->mutex);
2575 
2576 	return ret;
2577 }
2578 
smu_get_uclk_dpm_states(struct smu_context * smu,unsigned int * clock_values_in_khz,unsigned int * num_states)2579 int smu_get_uclk_dpm_states(struct smu_context *smu,
2580 			    unsigned int *clock_values_in_khz,
2581 			    unsigned int *num_states)
2582 {
2583 	int ret = 0;
2584 
2585 	mutex_lock(&smu->mutex);
2586 
2587 	if (smu->ppt_funcs->get_uclk_dpm_states)
2588 		ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
2589 
2590 	mutex_unlock(&smu->mutex);
2591 
2592 	return ret;
2593 }
2594 
smu_get_current_power_state(struct smu_context * smu)2595 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
2596 {
2597 	enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
2598 
2599 	mutex_lock(&smu->mutex);
2600 
2601 	if (smu->ppt_funcs->get_current_power_state)
2602 		pm_state = smu->ppt_funcs->get_current_power_state(smu);
2603 
2604 	mutex_unlock(&smu->mutex);
2605 
2606 	return pm_state;
2607 }
2608 
smu_get_dpm_clock_table(struct smu_context * smu,struct dpm_clocks * clock_table)2609 int smu_get_dpm_clock_table(struct smu_context *smu,
2610 			    struct dpm_clocks *clock_table)
2611 {
2612 	int ret = 0;
2613 
2614 	mutex_lock(&smu->mutex);
2615 
2616 	if (smu->ppt_funcs->get_dpm_clock_table)
2617 		ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
2618 
2619 	mutex_unlock(&smu->mutex);
2620 
2621 	return ret;
2622 }
2623 
smu_get_pptable_power_limit(struct smu_context * smu)2624 uint32_t smu_get_pptable_power_limit(struct smu_context *smu)
2625 {
2626 	uint32_t ret = 0;
2627 
2628 	if (smu->ppt_funcs->get_pptable_power_limit)
2629 		ret = smu->ppt_funcs->get_pptable_power_limit(smu);
2630 
2631 	return ret;
2632 }
2633 
smu_send_smc_msg(struct smu_context * smu,enum smu_message_type msg)2634 int smu_send_smc_msg(struct smu_context *smu,
2635 		     enum smu_message_type msg)
2636 {
2637 	int ret;
2638 
2639 	ret = smu_send_smc_msg_with_param(smu, msg, 0);
2640 	return ret;
2641 }
2642