1 /*	$NetBSD: amdgpu_smu_v12_0.c,v 1.2 2021/12/18 23:45:26 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2019 Advanced Micro Devices, Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(0, "$NetBSD: amdgpu_smu_v12_0.c,v 1.2 2021/12/18 23:45:26 riastradh Exp $");
27 
28 #include "pp_debug.h"
29 #include <linux/firmware.h>
30 #include "amdgpu.h"
31 #include "amdgpu_smu.h"
32 #include "smu_internal.h"
33 #include "atomfirmware.h"
34 #include "amdgpu_atomfirmware.h"
35 #include "smu_v12_0.h"
36 #include "soc15_common.h"
37 #include "atom.h"
38 
39 #include "asic_reg/mp/mp_12_0_0_offset.h"
40 #include "asic_reg/mp/mp_12_0_0_sh_mask.h"
41 
42 #define smnMP1_FIRMWARE_FLAGS                                0x3010024
43 
44 #define mmSMUIO_GFX_MISC_CNTL                                0x00c8
45 #define mmSMUIO_GFX_MISC_CNTL_BASE_IDX                       0
46 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK          0x00000006L
47 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT        0x1
48 
smu_v12_0_send_msg_without_waiting(struct smu_context * smu,uint16_t msg)49 int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
50 					      uint16_t msg)
51 {
52 	struct amdgpu_device *adev = smu->adev;
53 
54 	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
55 	return 0;
56 }
57 
smu_v12_0_read_arg(struct smu_context * smu,uint32_t * arg)58 int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg)
59 {
60 	struct amdgpu_device *adev = smu->adev;
61 
62 	*arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
63 	return 0;
64 }
65 
smu_v12_0_wait_for_response(struct smu_context * smu)66 int smu_v12_0_wait_for_response(struct smu_context *smu)
67 {
68 	struct amdgpu_device *adev = smu->adev;
69 	uint32_t cur_value, i;
70 
71 	for (i = 0; i < adev->usec_timeout; i++) {
72 		cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
73 		if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
74 			return cur_value == 0x1 ? 0 : -EIO;
75 
76 		udelay(1);
77 	}
78 
79 	/* timeout means wrong logic */
80 	return -ETIME;
81 }
82 
83 int
smu_v12_0_send_msg_with_param(struct smu_context * smu,enum smu_message_type msg,uint32_t param)84 smu_v12_0_send_msg_with_param(struct smu_context *smu,
85 			      enum smu_message_type msg,
86 			      uint32_t param)
87 {
88 	struct amdgpu_device *adev = smu->adev;
89 	int ret = 0, index = 0;
90 
91 	index = smu_msg_get_index(smu, msg);
92 	if (index < 0)
93 		return index;
94 
95 	ret = smu_v12_0_wait_for_response(smu);
96 	if (ret) {
97 		pr_err("Msg issuing pre-check failed and "
98 		       "SMU may be not in the right state!\n");
99 		return ret;
100 	}
101 
102 	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
103 
104 	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
105 
106 	smu_v12_0_send_msg_without_waiting(smu, (uint16_t)index);
107 
108 	ret = smu_v12_0_wait_for_response(smu);
109 	if (ret)
110 		pr_err("Failed to send message 0x%x, response 0x%x param 0x%x\n",
111 		       index, ret, param);
112 
113 	return ret;
114 }
115 
smu_v12_0_check_fw_status(struct smu_context * smu)116 int smu_v12_0_check_fw_status(struct smu_context *smu)
117 {
118 	struct amdgpu_device *adev = smu->adev;
119 	uint32_t mp1_fw_flags;
120 
121 	mp1_fw_flags = RREG32_PCIE(MP1_Public |
122 		(smnMP1_FIRMWARE_FLAGS & 0xffffffff));
123 
124 	if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
125 		MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
126 		return 0;
127 
128 	return -EIO;
129 }
130 
smu_v12_0_check_fw_version(struct smu_context * smu)131 int smu_v12_0_check_fw_version(struct smu_context *smu)
132 {
133 	uint32_t if_version = 0xff, smu_version = 0xff;
134 	uint16_t smu_major;
135 	uint8_t smu_minor, smu_debug;
136 	int ret = 0;
137 
138 	ret = smu_get_smc_version(smu, &if_version, &smu_version);
139 	if (ret)
140 		return ret;
141 
142 	smu_major = (smu_version >> 16) & 0xffff;
143 	smu_minor = (smu_version >> 8) & 0xff;
144 	smu_debug = (smu_version >> 0) & 0xff;
145 
146 	/*
147 	 * 1. if_version mismatch is not critical as our fw is designed
148 	 * to be backward compatible.
149 	 * 2. New fw usually brings some optimizations. But that's visible
150 	 * only on the paired driver.
151 	 * Considering above, we just leave user a warning message instead
152 	 * of halt driver loading.
153 	 */
154 	if (if_version != smu->smc_if_version) {
155 		pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
156 			"smu fw version = 0x%08x (%d.%d.%d)\n",
157 			smu->smc_if_version, if_version,
158 			smu_version, smu_major, smu_minor, smu_debug);
159 		pr_warn("SMU driver if version not matched\n");
160 	}
161 
162 	return ret;
163 }
164 
smu_v12_0_powergate_sdma(struct smu_context * smu,bool gate)165 int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
166 {
167 	if (!smu->is_apu)
168 		return 0;
169 
170 	if (gate)
171 		return smu_send_smc_msg(smu, SMU_MSG_PowerDownSdma);
172 	else
173 		return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma);
174 }
175 
smu_v12_0_powergate_vcn(struct smu_context * smu,bool gate)176 int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
177 {
178 	if (!smu->is_apu)
179 		return 0;
180 
181 	if (gate)
182 		return smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
183 	else
184 		return smu_send_smc_msg(smu, SMU_MSG_PowerUpVcn);
185 }
186 
smu_v12_0_powergate_jpeg(struct smu_context * smu,bool gate)187 int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate)
188 {
189 	if (!smu->is_apu)
190 		return 0;
191 
192 	if (gate)
193 		return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0);
194 	else
195 		return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0);
196 }
197 
smu_v12_0_set_gfx_cgpg(struct smu_context * smu,bool enable)198 int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
199 {
200 	if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
201 		return 0;
202 
203 	return smu_v12_0_send_msg_with_param(smu,
204 		SMU_MSG_SetGfxCGPG, enable ? 1 : 0);
205 }
206 
smu_v12_0_read_sensor(struct smu_context * smu,enum amd_pp_sensors sensor,void * data,uint32_t * size)207 int smu_v12_0_read_sensor(struct smu_context *smu,
208 				 enum amd_pp_sensors sensor,
209 				 void *data, uint32_t *size)
210 {
211 	int ret = 0;
212 
213 	if(!data || !size)
214 		return -EINVAL;
215 
216 	switch (sensor) {
217 	case AMDGPU_PP_SENSOR_GFX_MCLK:
218 		ret = smu_get_current_clk_freq(smu, SMU_UCLK, (uint32_t *)data);
219 		*size = 4;
220 		break;
221 	case AMDGPU_PP_SENSOR_GFX_SCLK:
222 		ret = smu_get_current_clk_freq(smu, SMU_GFXCLK, (uint32_t *)data);
223 		*size = 4;
224 		break;
225 	case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
226 		*(uint32_t *)data = 0;
227 		*size = 4;
228 		break;
229 	default:
230 		ret = smu_common_read_sensor(smu, sensor, data, size);
231 		break;
232 	}
233 
234 	if (ret)
235 		*size = 0;
236 
237 	return ret;
238 }
239 
240 /**
241  * smu_v12_0_get_gfxoff_status - get gfxoff status
242  *
243  * @smu: amdgpu_device pointer
244  *
245  * This function will be used to get gfxoff status
246  *
247  * Returns 0=GFXOFF(default).
248  * Returns 1=Transition out of GFX State.
249  * Returns 2=Not in GFXOFF.
250  * Returns 3=Transition into GFXOFF.
251  */
smu_v12_0_get_gfxoff_status(struct smu_context * smu)252 uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu)
253 {
254 	uint32_t reg;
255 	uint32_t gfxOff_Status = 0;
256 	struct amdgpu_device *adev = smu->adev;
257 
258 	reg = RREG32_SOC15(SMUIO, 0, mmSMUIO_GFX_MISC_CNTL);
259 	gfxOff_Status = (reg & SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK)
260 		>> SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT;
261 
262 	return gfxOff_Status;
263 }
264 
smu_v12_0_gfx_off_control(struct smu_context * smu,bool enable)265 int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
266 {
267 	int ret = 0, timeout = 500;
268 
269 	if (enable) {
270 		ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff);
271 
272 	} else {
273 		ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff);
274 
275 		/* confirm gfx is back to "on" state, timeout is 0.5 second */
276 		while (!(smu_v12_0_get_gfxoff_status(smu) == 2)) {
277 			msleep(1);
278 			timeout--;
279 			if (timeout == 0) {
280 				DRM_ERROR("disable gfxoff timeout and failed!\n");
281 				break;
282 			}
283 		}
284 	}
285 
286 	return ret;
287 }
288 
smu_v12_0_init_smc_tables(struct smu_context * smu)289 int smu_v12_0_init_smc_tables(struct smu_context *smu)
290 {
291 	struct smu_table_context *smu_table = &smu->smu_table;
292 	struct smu_table *tables = NULL;
293 
294 	if (smu_table->tables)
295 		return -EINVAL;
296 
297 	tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
298 			 GFP_KERNEL);
299 	if (!tables)
300 		return -ENOMEM;
301 
302 	smu_table->tables = tables;
303 
304 	return smu_tables_init(smu, tables);
305 }
306 
smu_v12_0_fini_smc_tables(struct smu_context * smu)307 int smu_v12_0_fini_smc_tables(struct smu_context *smu)
308 {
309 	struct smu_table_context *smu_table = &smu->smu_table;
310 
311 	if (!smu_table->tables)
312 		return -EINVAL;
313 
314 	kfree(smu_table->clocks_table);
315 	kfree(smu_table->tables);
316 
317 	smu_table->clocks_table = NULL;
318 	smu_table->tables = NULL;
319 
320 	return 0;
321 }
322 
smu_v12_0_populate_smc_tables(struct smu_context * smu)323 int smu_v12_0_populate_smc_tables(struct smu_context *smu)
324 {
325 	struct smu_table_context *smu_table = &smu->smu_table;
326 
327 	return smu_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
328 }
329 
smu_v12_0_get_enabled_mask(struct smu_context * smu,uint32_t * feature_mask,uint32_t num)330 int smu_v12_0_get_enabled_mask(struct smu_context *smu,
331 				      uint32_t *feature_mask, uint32_t num)
332 {
333 	uint32_t feature_mask_high = 0, feature_mask_low = 0;
334 	int ret = 0;
335 
336 	if (!feature_mask || num < 2)
337 		return -EINVAL;
338 
339 	ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
340 	if (ret)
341 		return ret;
342 	ret = smu_read_smc_arg(smu, &feature_mask_high);
343 	if (ret)
344 		return ret;
345 
346 	ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
347 	if (ret)
348 		return ret;
349 	ret = smu_read_smc_arg(smu, &feature_mask_low);
350 	if (ret)
351 		return ret;
352 
353 	feature_mask[0] = feature_mask_low;
354 	feature_mask[1] = feature_mask_high;
355 
356 	return ret;
357 }
358 
smu_v12_0_get_current_clk_freq(struct smu_context * smu,enum smu_clk_type clk_id,uint32_t * value)359 int smu_v12_0_get_current_clk_freq(struct smu_context *smu,
360 					  enum smu_clk_type clk_id,
361 					  uint32_t *value)
362 {
363 	int ret = 0;
364 	uint32_t freq = 0;
365 
366 	if (clk_id >= SMU_CLK_COUNT || !value)
367 		return -EINVAL;
368 
369 	ret = smu_get_current_clk_freq_by_table(smu, clk_id, &freq);
370 	if (ret)
371 		return ret;
372 
373 	freq *= 100;
374 	*value = freq;
375 
376 	return ret;
377 }
378 
smu_v12_0_get_dpm_ultimate_freq(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t * min,uint32_t * max)379 int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
380 						 uint32_t *min, uint32_t *max)
381 {
382 	int ret = 0;
383 	uint32_t mclk_mask, soc_mask;
384 
385 	if (max) {
386 		ret = smu_get_profiling_clk_mask(smu, AMD_DPM_FORCED_LEVEL_PROFILE_PEAK,
387 						 NULL,
388 						 &mclk_mask,
389 						 &soc_mask);
390 		if (ret)
391 			goto failed;
392 
393 		switch (clk_type) {
394 		case SMU_GFXCLK:
395 		case SMU_SCLK:
396 			ret = smu_send_smc_msg(smu, SMU_MSG_GetMaxGfxclkFrequency);
397 			if (ret) {
398 				pr_err("Attempt to get max GX frequency from SMC Failed !\n");
399 				goto failed;
400 			}
401 			ret = smu_read_smc_arg(smu, max);
402 			if (ret)
403 				goto failed;
404 			break;
405 		case SMU_UCLK:
406 		case SMU_FCLK:
407 		case SMU_MCLK:
408 			ret = smu_get_dpm_clk_limited(smu, clk_type, mclk_mask, max);
409 			if (ret)
410 				goto failed;
411 			break;
412 		case SMU_SOCCLK:
413 			ret = smu_get_dpm_clk_limited(smu, clk_type, soc_mask, max);
414 			if (ret)
415 				goto failed;
416 			break;
417 		default:
418 			ret = -EINVAL;
419 			goto failed;
420 		}
421 	}
422 
423 	if (min) {
424 		switch (clk_type) {
425 		case SMU_GFXCLK:
426 		case SMU_SCLK:
427 			ret = smu_send_smc_msg(smu, SMU_MSG_GetMinGfxclkFrequency);
428 			if (ret) {
429 				pr_err("Attempt to get min GX frequency from SMC Failed !\n");
430 				goto failed;
431 			}
432 			ret = smu_read_smc_arg(smu, min);
433 			if (ret)
434 				goto failed;
435 			break;
436 		case SMU_UCLK:
437 		case SMU_FCLK:
438 		case SMU_MCLK:
439 			ret = smu_get_dpm_clk_limited(smu, clk_type, 0, min);
440 			if (ret)
441 				goto failed;
442 			break;
443 		case SMU_SOCCLK:
444 			ret = smu_get_dpm_clk_limited(smu, clk_type, 0, min);
445 			if (ret)
446 				goto failed;
447 			break;
448 		default:
449 			ret = -EINVAL;
450 			goto failed;
451 		}
452 	}
453 failed:
454 	return ret;
455 }
456 
smu_v12_0_mode2_reset(struct smu_context * smu)457 int smu_v12_0_mode2_reset(struct smu_context *smu){
458 	return smu_v12_0_send_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2);
459 }
460 
smu_v12_0_set_soft_freq_limited_range(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t min,uint32_t max)461 int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
462 			    uint32_t min, uint32_t max)
463 {
464 	int ret = 0;
465 
466 	if (max < min)
467 		return -EINVAL;
468 
469 	switch (clk_type) {
470 	case SMU_GFXCLK:
471 	case SMU_SCLK:
472 		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min);
473 		if (ret)
474 			return ret;
475 
476 		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max);
477 		if (ret)
478 			return ret;
479 	break;
480 	case SMU_FCLK:
481 	case SMU_MCLK:
482 		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min);
483 		if (ret)
484 			return ret;
485 
486 		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max);
487 		if (ret)
488 			return ret;
489 	break;
490 	case SMU_SOCCLK:
491 		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min);
492 		if (ret)
493 			return ret;
494 
495 		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max);
496 		if (ret)
497 			return ret;
498 	break;
499 	case SMU_VCLK:
500 		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min);
501 		if (ret)
502 			return ret;
503 
504 		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max);
505 		if (ret)
506 			return ret;
507 	break;
508 	default:
509 		return -EINVAL;
510 	}
511 
512 	return ret;
513 }
514 
smu_v12_0_set_driver_table_location(struct smu_context * smu)515 int smu_v12_0_set_driver_table_location(struct smu_context *smu)
516 {
517 	struct smu_table *driver_table = &smu->smu_table.driver_table;
518 	int ret = 0;
519 
520 	if (driver_table->mc_address) {
521 		ret = smu_send_smc_msg_with_param(smu,
522 				SMU_MSG_SetDriverDramAddrHigh,
523 				upper_32_bits(driver_table->mc_address));
524 		if (!ret)
525 			ret = smu_send_smc_msg_with_param(smu,
526 				SMU_MSG_SetDriverDramAddrLow,
527 				lower_32_bits(driver_table->mc_address));
528 	}
529 
530 	return ret;
531 }
532