1 /*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include <linux/firmware.h>
24 #include <linux/module.h>
25 #include <linux/pci.h>
26 #include <linux/reboot.h>
27
28 #define SMU_11_0_PARTIAL_PPTABLE
29 #define SWSMU_CODE_LAYER_L3
30
31 #include "amdgpu.h"
32 #include "amdgpu_smu.h"
33 #include "atomfirmware.h"
34 #include "amdgpu_atomfirmware.h"
35 #include "amdgpu_atombios.h"
36 #include "smu_v11_0.h"
37 #include "soc15_common.h"
38 #include "atom.h"
39 #include "amdgpu_ras.h"
40 #include "smu_cmn.h"
41
42 #include "asic_reg/thm/thm_11_0_2_offset.h"
43 #include "asic_reg/thm/thm_11_0_2_sh_mask.h"
44 #include "asic_reg/mp/mp_11_0_offset.h"
45 #include "asic_reg/mp/mp_11_0_sh_mask.h"
46 #include "asic_reg/smuio/smuio_11_0_0_offset.h"
47 #include "asic_reg/smuio/smuio_11_0_0_sh_mask.h"
48
49 /*
50 * DO NOT use these for err/warn/info/debug messages.
51 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
52 * They are more MGPU friendly.
53 */
54 #undef pr_err
55 #undef pr_warn
56 #undef pr_info
57 #undef pr_debug
58
59 MODULE_FIRMWARE("amdgpu/arcturus_smc.bin");
60 MODULE_FIRMWARE("amdgpu/navi10_smc.bin");
61 MODULE_FIRMWARE("amdgpu/navi14_smc.bin");
62 MODULE_FIRMWARE("amdgpu/navi12_smc.bin");
63 MODULE_FIRMWARE("amdgpu/sienna_cichlid_smc.bin");
64 MODULE_FIRMWARE("amdgpu/navy_flounder_smc.bin");
65 MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_smc.bin");
66
67 #define SMU11_VOLTAGE_SCALE 4
68
69 #define SMU11_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
70
71 #define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
72 #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L
73 #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4
74 #define smnPCIE_LC_SPEED_CNTL 0x11140290
75 #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xC000
76 #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xE
77
78 #define mmTHM_BACO_CNTL_ARCT 0xA7
79 #define mmTHM_BACO_CNTL_ARCT_BASE_IDX 0
80
smu_v11_0_init_microcode(struct smu_context * smu)81 int smu_v11_0_init_microcode(struct smu_context *smu)
82 {
83 struct amdgpu_device *adev = smu->adev;
84 const char *chip_name;
85 char fw_name[SMU_FW_NAME_LEN];
86 int err = 0;
87 const struct smc_firmware_header_v1_0 *hdr;
88 const struct common_firmware_header *header;
89 struct amdgpu_firmware_info *ucode = NULL;
90
91 if (amdgpu_sriov_vf(adev) &&
92 ((adev->asic_type == CHIP_NAVI12) ||
93 (adev->asic_type == CHIP_SIENNA_CICHLID)))
94 return 0;
95
96 switch (adev->asic_type) {
97 case CHIP_ARCTURUS:
98 chip_name = "arcturus";
99 break;
100 case CHIP_NAVI10:
101 chip_name = "navi10";
102 break;
103 case CHIP_NAVI14:
104 chip_name = "navi14";
105 break;
106 case CHIP_NAVI12:
107 chip_name = "navi12";
108 break;
109 case CHIP_SIENNA_CICHLID:
110 chip_name = "sienna_cichlid";
111 break;
112 case CHIP_NAVY_FLOUNDER:
113 chip_name = "navy_flounder";
114 break;
115 case CHIP_DIMGREY_CAVEFISH:
116 chip_name = "dimgrey_cavefish";
117 break;
118 default:
119 dev_err(adev->dev, "Unsupported ASIC type %d\n", adev->asic_type);
120 return -EINVAL;
121 }
122
123 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
124
125 err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
126 if (err)
127 goto out;
128 err = amdgpu_ucode_validate(adev->pm.fw);
129 if (err)
130 goto out;
131
132 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
133 amdgpu_ucode_print_smc_hdr(&hdr->header);
134 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
135
136 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
137 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
138 ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
139 ucode->fw = adev->pm.fw;
140 header = (const struct common_firmware_header *)ucode->fw->data;
141 adev->firmware.fw_size +=
142 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
143 }
144
145 out:
146 if (err) {
147 DRM_ERROR("smu_v11_0: Failed to load firmware \"%s\"\n",
148 fw_name);
149 release_firmware(adev->pm.fw);
150 adev->pm.fw = NULL;
151 }
152 return err;
153 }
154
smu_v11_0_fini_microcode(struct smu_context * smu)155 void smu_v11_0_fini_microcode(struct smu_context *smu)
156 {
157 struct amdgpu_device *adev = smu->adev;
158
159 release_firmware(adev->pm.fw);
160 adev->pm.fw = NULL;
161 adev->pm.fw_version = 0;
162 }
163
smu_v11_0_load_microcode(struct smu_context * smu)164 int smu_v11_0_load_microcode(struct smu_context *smu)
165 {
166 struct amdgpu_device *adev = smu->adev;
167 const uint32_t *src;
168 const struct smc_firmware_header_v1_0 *hdr;
169 uint32_t addr_start = MP1_SRAM;
170 uint32_t i;
171 uint32_t smc_fw_size;
172 uint32_t mp1_fw_flags;
173
174 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
175 src = (const uint32_t *)(adev->pm.fw->data +
176 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
177 smc_fw_size = hdr->header.ucode_size_bytes;
178
179 for (i = 1; i < smc_fw_size/4 - 1; i++) {
180 WREG32_PCIE(addr_start, src[i]);
181 addr_start += 4;
182 }
183
184 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
185 1 & MP1_SMN_PUB_CTRL__RESET_MASK);
186 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
187 1 & ~MP1_SMN_PUB_CTRL__RESET_MASK);
188
189 for (i = 0; i < adev->usec_timeout; i++) {
190 mp1_fw_flags = RREG32_PCIE(MP1_Public |
191 (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
192 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
193 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
194 break;
195 udelay(1);
196 }
197
198 if (i == adev->usec_timeout)
199 return -ETIME;
200
201 return 0;
202 }
203
smu_v11_0_check_fw_status(struct smu_context * smu)204 int smu_v11_0_check_fw_status(struct smu_context *smu)
205 {
206 struct amdgpu_device *adev = smu->adev;
207 uint32_t mp1_fw_flags;
208
209 mp1_fw_flags = RREG32_PCIE(MP1_Public |
210 (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
211
212 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
213 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
214 return 0;
215
216 return -EIO;
217 }
218
smu_v11_0_check_fw_version(struct smu_context * smu)219 int smu_v11_0_check_fw_version(struct smu_context *smu)
220 {
221 struct amdgpu_device *adev = smu->adev;
222 uint32_t if_version = 0xff, smu_version = 0xff;
223 uint16_t smu_major;
224 uint8_t smu_minor, smu_debug;
225 int ret = 0;
226
227 ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
228 if (ret)
229 return ret;
230
231 smu_major = (smu_version >> 16) & 0xffff;
232 smu_minor = (smu_version >> 8) & 0xff;
233 smu_debug = (smu_version >> 0) & 0xff;
234 if (smu->is_apu)
235 adev->pm.fw_version = smu_version;
236
237 switch (smu->adev->asic_type) {
238 case CHIP_ARCTURUS:
239 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_ARCT;
240 break;
241 case CHIP_NAVI10:
242 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV10;
243 break;
244 case CHIP_NAVI12:
245 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV12;
246 break;
247 case CHIP_NAVI14:
248 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV14;
249 break;
250 case CHIP_SIENNA_CICHLID:
251 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Sienna_Cichlid;
252 break;
253 case CHIP_NAVY_FLOUNDER:
254 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Navy_Flounder;
255 break;
256 case CHIP_VANGOGH:
257 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_VANGOGH;
258 break;
259 case CHIP_DIMGREY_CAVEFISH:
260 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish;
261 break;
262 default:
263 dev_err(smu->adev->dev, "smu unsupported asic type:%d.\n", smu->adev->asic_type);
264 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_INV;
265 break;
266 }
267
268 /*
269 * 1. if_version mismatch is not critical as our fw is designed
270 * to be backward compatible.
271 * 2. New fw usually brings some optimizations. But that's visible
272 * only on the paired driver.
273 * Considering above, we just leave user a warning message instead
274 * of halt driver loading.
275 */
276 if (if_version != smu->smc_driver_if_version) {
277 dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
278 "smu fw version = 0x%08x (%d.%d.%d)\n",
279 smu->smc_driver_if_version, if_version,
280 smu_version, smu_major, smu_minor, smu_debug);
281 dev_warn(smu->adev->dev, "SMU driver if version not matched\n");
282 }
283
284 return ret;
285 }
286
smu_v11_0_set_pptable_v2_0(struct smu_context * smu,void ** table,uint32_t * size)287 static int smu_v11_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size)
288 {
289 struct amdgpu_device *adev = smu->adev;
290 uint32_t ppt_offset_bytes;
291 const struct smc_firmware_header_v2_0 *v2;
292
293 v2 = (const struct smc_firmware_header_v2_0 *) adev->pm.fw->data;
294
295 ppt_offset_bytes = le32_to_cpu(v2->ppt_offset_bytes);
296 *size = le32_to_cpu(v2->ppt_size_bytes);
297 *table = (uint8_t *)v2 + ppt_offset_bytes;
298
299 return 0;
300 }
301
smu_v11_0_set_pptable_v2_1(struct smu_context * smu,void ** table,uint32_t * size,uint32_t pptable_id)302 static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, void **table,
303 uint32_t *size, uint32_t pptable_id)
304 {
305 struct amdgpu_device *adev = smu->adev;
306 const struct smc_firmware_header_v2_1 *v2_1;
307 struct smc_soft_pptable_entry *entries;
308 uint32_t pptable_count = 0;
309 int i = 0;
310
311 v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data;
312 entries = (struct smc_soft_pptable_entry *)
313 ((uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset));
314 pptable_count = le32_to_cpu(v2_1->pptable_count);
315 for (i = 0; i < pptable_count; i++) {
316 if (le32_to_cpu(entries[i].id) == pptable_id) {
317 *table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes));
318 *size = le32_to_cpu(entries[i].ppt_size_bytes);
319 break;
320 }
321 }
322
323 if (i == pptable_count)
324 return -EINVAL;
325
326 return 0;
327 }
328
smu_v11_0_setup_pptable(struct smu_context * smu)329 int smu_v11_0_setup_pptable(struct smu_context *smu)
330 {
331 struct amdgpu_device *adev = smu->adev;
332 const struct smc_firmware_header_v1_0 *hdr;
333 int ret, index;
334 uint32_t size = 0;
335 uint16_t atom_table_size;
336 uint8_t frev, crev;
337 void *table;
338 uint16_t version_major, version_minor;
339
340 if (!amdgpu_sriov_vf(adev)) {
341 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
342 version_major = le16_to_cpu(hdr->header.header_version_major);
343 version_minor = le16_to_cpu(hdr->header.header_version_minor);
344 if (version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) {
345 dev_info(adev->dev, "use driver provided pptable %d\n", smu->smu_table.boot_values.pp_table_id);
346 switch (version_minor) {
347 case 0:
348 ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size);
349 break;
350 case 1:
351 ret = smu_v11_0_set_pptable_v2_1(smu, &table, &size,
352 smu->smu_table.boot_values.pp_table_id);
353 break;
354 default:
355 ret = -EINVAL;
356 break;
357 }
358 if (ret)
359 return ret;
360 goto out;
361 }
362 }
363
364 dev_info(adev->dev, "use vbios provided pptable\n");
365 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
366 powerplayinfo);
367
368 ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev,
369 (uint8_t **)&table);
370 if (ret)
371 return ret;
372 size = atom_table_size;
373
374 out:
375 if (!smu->smu_table.power_play_table)
376 smu->smu_table.power_play_table = table;
377 if (!smu->smu_table.power_play_table_size)
378 smu->smu_table.power_play_table_size = size;
379
380 return 0;
381 }
382
smu_v11_0_init_smc_tables(struct smu_context * smu)383 int smu_v11_0_init_smc_tables(struct smu_context *smu)
384 {
385 struct smu_table_context *smu_table = &smu->smu_table;
386 struct smu_table *tables = smu_table->tables;
387 int ret = 0;
388
389 smu_table->driver_pptable =
390 kzalloc(tables[SMU_TABLE_PPTABLE].size, GFP_KERNEL);
391 if (!smu_table->driver_pptable) {
392 ret = -ENOMEM;
393 goto err0_out;
394 }
395
396 smu_table->max_sustainable_clocks =
397 kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks), GFP_KERNEL);
398 if (!smu_table->max_sustainable_clocks) {
399 ret = -ENOMEM;
400 goto err1_out;
401 }
402
403 /* Arcturus does not support OVERDRIVE */
404 if (tables[SMU_TABLE_OVERDRIVE].size) {
405 smu_table->overdrive_table =
406 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
407 if (!smu_table->overdrive_table) {
408 ret = -ENOMEM;
409 goto err2_out;
410 }
411
412 smu_table->boot_overdrive_table =
413 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
414 if (!smu_table->boot_overdrive_table) {
415 ret = -ENOMEM;
416 goto err3_out;
417 }
418 }
419
420 return 0;
421
422 err3_out:
423 kfree(smu_table->overdrive_table);
424 err2_out:
425 kfree(smu_table->max_sustainable_clocks);
426 err1_out:
427 kfree(smu_table->driver_pptable);
428 err0_out:
429 return ret;
430 }
431
smu_v11_0_fini_smc_tables(struct smu_context * smu)432 int smu_v11_0_fini_smc_tables(struct smu_context *smu)
433 {
434 struct smu_table_context *smu_table = &smu->smu_table;
435 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
436
437 kfree(smu_table->gpu_metrics_table);
438 kfree(smu_table->boot_overdrive_table);
439 kfree(smu_table->overdrive_table);
440 kfree(smu_table->max_sustainable_clocks);
441 kfree(smu_table->driver_pptable);
442 kfree(smu_table->clocks_table);
443 smu_table->gpu_metrics_table = NULL;
444 smu_table->boot_overdrive_table = NULL;
445 smu_table->overdrive_table = NULL;
446 smu_table->max_sustainable_clocks = NULL;
447 smu_table->driver_pptable = NULL;
448 smu_table->clocks_table = NULL;
449 kfree(smu_table->hardcode_pptable);
450 smu_table->hardcode_pptable = NULL;
451
452 kfree(smu_table->metrics_table);
453 kfree(smu_table->watermarks_table);
454 smu_table->metrics_table = NULL;
455 smu_table->watermarks_table = NULL;
456 smu_table->metrics_time = 0;
457
458 kfree(smu_dpm->dpm_context);
459 kfree(smu_dpm->golden_dpm_context);
460 kfree(smu_dpm->dpm_current_power_state);
461 kfree(smu_dpm->dpm_request_power_state);
462 smu_dpm->dpm_context = NULL;
463 smu_dpm->golden_dpm_context = NULL;
464 smu_dpm->dpm_context_size = 0;
465 smu_dpm->dpm_current_power_state = NULL;
466 smu_dpm->dpm_request_power_state = NULL;
467
468 return 0;
469 }
470
smu_v11_0_init_power(struct smu_context * smu)471 int smu_v11_0_init_power(struct smu_context *smu)
472 {
473 struct smu_power_context *smu_power = &smu->smu_power;
474 size_t size = smu->adev->asic_type == CHIP_VANGOGH ?
475 sizeof(struct smu_11_5_power_context) :
476 sizeof(struct smu_11_0_power_context);
477
478 smu_power->power_context = kzalloc(size, GFP_KERNEL);
479 if (!smu_power->power_context)
480 return -ENOMEM;
481 smu_power->power_context_size = size;
482
483 return 0;
484 }
485
smu_v11_0_fini_power(struct smu_context * smu)486 int smu_v11_0_fini_power(struct smu_context *smu)
487 {
488 struct smu_power_context *smu_power = &smu->smu_power;
489
490 kfree(smu_power->power_context);
491 smu_power->power_context = NULL;
492 smu_power->power_context_size = 0;
493
494 return 0;
495 }
496
smu_v11_0_atom_get_smu_clockinfo(struct amdgpu_device * adev,uint8_t clk_id,uint8_t syspll_id,uint32_t * clk_freq)497 static int smu_v11_0_atom_get_smu_clockinfo(struct amdgpu_device *adev,
498 uint8_t clk_id,
499 uint8_t syspll_id,
500 uint32_t *clk_freq)
501 {
502 struct atom_get_smu_clock_info_parameters_v3_1 input = {0};
503 struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
504 int ret, index;
505
506 input.clk_id = clk_id;
507 input.syspll_id = syspll_id;
508 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
509 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
510 getsmuclockinfo);
511
512 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
513 (uint32_t *)&input);
514 if (ret)
515 return -EINVAL;
516
517 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
518 *clk_freq = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
519
520 return 0;
521 }
522
smu_v11_0_get_vbios_bootup_values(struct smu_context * smu)523 int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
524 {
525 int ret, index;
526 uint16_t size;
527 uint8_t frev, crev;
528 struct atom_common_table_header *header;
529 struct atom_firmware_info_v3_3 *v_3_3;
530 struct atom_firmware_info_v3_1 *v_3_1;
531
532 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
533 firmwareinfo);
534
535 ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
536 (uint8_t **)&header);
537 if (ret)
538 return ret;
539
540 if (header->format_revision != 3) {
541 dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu11\n");
542 return -EINVAL;
543 }
544
545 switch (header->content_revision) {
546 case 0:
547 case 1:
548 case 2:
549 v_3_1 = (struct atom_firmware_info_v3_1 *)header;
550 smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
551 smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
552 smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
553 smu->smu_table.boot_values.socclk = 0;
554 smu->smu_table.boot_values.dcefclk = 0;
555 smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
556 smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
557 smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
558 smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
559 smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
560 smu->smu_table.boot_values.pp_table_id = 0;
561 smu->smu_table.boot_values.firmware_caps = v_3_1->firmware_capability;
562 break;
563 case 3:
564 case 4:
565 default:
566 v_3_3 = (struct atom_firmware_info_v3_3 *)header;
567 smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
568 smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
569 smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
570 smu->smu_table.boot_values.socclk = 0;
571 smu->smu_table.boot_values.dcefclk = 0;
572 smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
573 smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
574 smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
575 smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
576 smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
577 smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
578 smu->smu_table.boot_values.firmware_caps = v_3_3->firmware_capability;
579 }
580
581 smu->smu_table.boot_values.format_revision = header->format_revision;
582 smu->smu_table.boot_values.content_revision = header->content_revision;
583
584 smu_v11_0_atom_get_smu_clockinfo(smu->adev,
585 (uint8_t)SMU11_SYSPLL0_SOCCLK_ID,
586 (uint8_t)0,
587 &smu->smu_table.boot_values.socclk);
588
589 smu_v11_0_atom_get_smu_clockinfo(smu->adev,
590 (uint8_t)SMU11_SYSPLL0_DCEFCLK_ID,
591 (uint8_t)0,
592 &smu->smu_table.boot_values.dcefclk);
593
594 smu_v11_0_atom_get_smu_clockinfo(smu->adev,
595 (uint8_t)SMU11_SYSPLL0_ECLK_ID,
596 (uint8_t)0,
597 &smu->smu_table.boot_values.eclk);
598
599 smu_v11_0_atom_get_smu_clockinfo(smu->adev,
600 (uint8_t)SMU11_SYSPLL0_VCLK_ID,
601 (uint8_t)0,
602 &smu->smu_table.boot_values.vclk);
603
604 smu_v11_0_atom_get_smu_clockinfo(smu->adev,
605 (uint8_t)SMU11_SYSPLL0_DCLK_ID,
606 (uint8_t)0,
607 &smu->smu_table.boot_values.dclk);
608
609 if ((smu->smu_table.boot_values.format_revision == 3) &&
610 (smu->smu_table.boot_values.content_revision >= 2))
611 smu_v11_0_atom_get_smu_clockinfo(smu->adev,
612 (uint8_t)SMU11_SYSPLL1_0_FCLK_ID,
613 (uint8_t)SMU11_SYSPLL1_2_ID,
614 &smu->smu_table.boot_values.fclk);
615
616 smu_v11_0_atom_get_smu_clockinfo(smu->adev,
617 (uint8_t)SMU11_SYSPLL3_1_LCLK_ID,
618 (uint8_t)SMU11_SYSPLL3_1_ID,
619 &smu->smu_table.boot_values.lclk);
620
621 return 0;
622 }
623
smu_v11_0_notify_memory_pool_location(struct smu_context * smu)624 int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
625 {
626 struct smu_table_context *smu_table = &smu->smu_table;
627 struct smu_table *memory_pool = &smu_table->memory_pool;
628 int ret = 0;
629 uint64_t address;
630 uint32_t address_low, address_high;
631
632 if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL)
633 return ret;
634
635 address = (uintptr_t)memory_pool->cpu_addr;
636 address_high = (uint32_t)upper_32_bits(address);
637 address_low = (uint32_t)lower_32_bits(address);
638
639 ret = smu_cmn_send_smc_msg_with_param(smu,
640 SMU_MSG_SetSystemVirtualDramAddrHigh,
641 address_high,
642 NULL);
643 if (ret)
644 return ret;
645 ret = smu_cmn_send_smc_msg_with_param(smu,
646 SMU_MSG_SetSystemVirtualDramAddrLow,
647 address_low,
648 NULL);
649 if (ret)
650 return ret;
651
652 address = memory_pool->mc_address;
653 address_high = (uint32_t)upper_32_bits(address);
654 address_low = (uint32_t)lower_32_bits(address);
655
656 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
657 address_high, NULL);
658 if (ret)
659 return ret;
660 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
661 address_low, NULL);
662 if (ret)
663 return ret;
664 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
665 (uint32_t)memory_pool->size, NULL);
666 if (ret)
667 return ret;
668
669 return ret;
670 }
671
smu_v11_0_set_min_deep_sleep_dcefclk(struct smu_context * smu,uint32_t clk)672 int smu_v11_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
673 {
674 int ret;
675
676 ret = smu_cmn_send_smc_msg_with_param(smu,
677 SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL);
678 if (ret)
679 dev_err(smu->adev->dev, "SMU11 attempt to set divider for DCEFCLK Failed!");
680
681 return ret;
682 }
683
smu_v11_0_set_driver_table_location(struct smu_context * smu)684 int smu_v11_0_set_driver_table_location(struct smu_context *smu)
685 {
686 struct smu_table *driver_table = &smu->smu_table.driver_table;
687 int ret = 0;
688
689 if (driver_table->mc_address) {
690 ret = smu_cmn_send_smc_msg_with_param(smu,
691 SMU_MSG_SetDriverDramAddrHigh,
692 upper_32_bits(driver_table->mc_address),
693 NULL);
694 if (!ret)
695 ret = smu_cmn_send_smc_msg_with_param(smu,
696 SMU_MSG_SetDriverDramAddrLow,
697 lower_32_bits(driver_table->mc_address),
698 NULL);
699 }
700
701 return ret;
702 }
703
smu_v11_0_set_tool_table_location(struct smu_context * smu)704 int smu_v11_0_set_tool_table_location(struct smu_context *smu)
705 {
706 int ret = 0;
707 struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
708
709 if (tool_table->mc_address) {
710 ret = smu_cmn_send_smc_msg_with_param(smu,
711 SMU_MSG_SetToolsDramAddrHigh,
712 upper_32_bits(tool_table->mc_address),
713 NULL);
714 if (!ret)
715 ret = smu_cmn_send_smc_msg_with_param(smu,
716 SMU_MSG_SetToolsDramAddrLow,
717 lower_32_bits(tool_table->mc_address),
718 NULL);
719 }
720
721 return ret;
722 }
723
smu_v11_0_init_display_count(struct smu_context * smu,uint32_t count)724 int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
725 {
726 struct amdgpu_device *adev = smu->adev;
727
728 /* Navy_Flounder/Dimgrey_Cavefish do not support to change
729 * display num currently
730 */
731 if (adev->asic_type >= CHIP_NAVY_FLOUNDER &&
732 adev->asic_type <= CHIP_DIMGREY_CAVEFISH)
733 return 0;
734
735 return smu_cmn_send_smc_msg_with_param(smu,
736 SMU_MSG_NumOfDisplays,
737 count,
738 NULL);
739 }
740
741
smu_v11_0_set_allowed_mask(struct smu_context * smu)742 int smu_v11_0_set_allowed_mask(struct smu_context *smu)
743 {
744 struct smu_feature *feature = &smu->smu_feature;
745 int ret = 0;
746 uint32_t feature_mask[2];
747
748 if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64) {
749 ret = -EINVAL;
750 goto failed;
751 }
752
753 bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
754
755 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
756 feature_mask[1], NULL);
757 if (ret)
758 goto failed;
759
760 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
761 feature_mask[0], NULL);
762 if (ret)
763 goto failed;
764
765 failed:
766 return ret;
767 }
768
smu_v11_0_system_features_control(struct smu_context * smu,bool en)769 int smu_v11_0_system_features_control(struct smu_context *smu,
770 bool en)
771 {
772 struct smu_feature *feature = &smu->smu_feature;
773 uint32_t feature_mask[2];
774 int ret = 0;
775
776 ret = smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
777 SMU_MSG_DisableAllSmuFeatures), NULL);
778 if (ret)
779 return ret;
780
781 bitmap_zero(feature->enabled, feature->feature_num);
782 bitmap_zero(feature->supported, feature->feature_num);
783
784 if (en) {
785 ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
786 if (ret)
787 return ret;
788
789 bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
790 feature->feature_num);
791 bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
792 feature->feature_num);
793 }
794
795 return ret;
796 }
797
smu_v11_0_notify_display_change(struct smu_context * smu)798 int smu_v11_0_notify_display_change(struct smu_context *smu)
799 {
800 int ret = 0;
801
802 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
803 smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
804 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
805
806 return ret;
807 }
808
809 static int
smu_v11_0_get_max_sustainable_clock(struct smu_context * smu,uint32_t * clock,enum smu_clk_type clock_select)810 smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
811 enum smu_clk_type clock_select)
812 {
813 int ret = 0;
814 int clk_id;
815
816 if ((smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetDcModeMaxDpmFreq) < 0) ||
817 (smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetMaxDpmFreq) < 0))
818 return 0;
819
820 clk_id = smu_cmn_to_asic_specific_index(smu,
821 CMN2ASIC_MAPPING_CLK,
822 clock_select);
823 if (clk_id < 0)
824 return -EINVAL;
825
826 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
827 clk_id << 16, clock);
828 if (ret) {
829 dev_err(smu->adev->dev, "[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
830 return ret;
831 }
832
833 if (*clock != 0)
834 return 0;
835
836 /* if DC limit is zero, return AC limit */
837 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
838 clk_id << 16, clock);
839 if (ret) {
840 dev_err(smu->adev->dev, "[GetMaxSustainableClock] failed to get max AC clock from SMC!");
841 return ret;
842 }
843
844 return 0;
845 }
846
smu_v11_0_init_max_sustainable_clocks(struct smu_context * smu)847 int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
848 {
849 struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks =
850 smu->smu_table.max_sustainable_clocks;
851 int ret = 0;
852
853 max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100;
854 max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100;
855 max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100;
856 max_sustainable_clocks->display_clock = 0xFFFFFFFF;
857 max_sustainable_clocks->phy_clock = 0xFFFFFFFF;
858 max_sustainable_clocks->pixel_clock = 0xFFFFFFFF;
859
860 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
861 ret = smu_v11_0_get_max_sustainable_clock(smu,
862 &(max_sustainable_clocks->uclock),
863 SMU_UCLK);
864 if (ret) {
865 dev_err(smu->adev->dev, "[%s] failed to get max UCLK from SMC!",
866 __func__);
867 return ret;
868 }
869 }
870
871 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
872 ret = smu_v11_0_get_max_sustainable_clock(smu,
873 &(max_sustainable_clocks->soc_clock),
874 SMU_SOCCLK);
875 if (ret) {
876 dev_err(smu->adev->dev, "[%s] failed to get max SOCCLK from SMC!",
877 __func__);
878 return ret;
879 }
880 }
881
882 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
883 ret = smu_v11_0_get_max_sustainable_clock(smu,
884 &(max_sustainable_clocks->dcef_clock),
885 SMU_DCEFCLK);
886 if (ret) {
887 dev_err(smu->adev->dev, "[%s] failed to get max DCEFCLK from SMC!",
888 __func__);
889 return ret;
890 }
891
892 ret = smu_v11_0_get_max_sustainable_clock(smu,
893 &(max_sustainable_clocks->display_clock),
894 SMU_DISPCLK);
895 if (ret) {
896 dev_err(smu->adev->dev, "[%s] failed to get max DISPCLK from SMC!",
897 __func__);
898 return ret;
899 }
900 ret = smu_v11_0_get_max_sustainable_clock(smu,
901 &(max_sustainable_clocks->phy_clock),
902 SMU_PHYCLK);
903 if (ret) {
904 dev_err(smu->adev->dev, "[%s] failed to get max PHYCLK from SMC!",
905 __func__);
906 return ret;
907 }
908 ret = smu_v11_0_get_max_sustainable_clock(smu,
909 &(max_sustainable_clocks->pixel_clock),
910 SMU_PIXCLK);
911 if (ret) {
912 dev_err(smu->adev->dev, "[%s] failed to get max PIXCLK from SMC!",
913 __func__);
914 return ret;
915 }
916 }
917
918 if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock)
919 max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock;
920
921 return 0;
922 }
923
smu_v11_0_get_current_power_limit(struct smu_context * smu,uint32_t * power_limit)924 int smu_v11_0_get_current_power_limit(struct smu_context *smu,
925 uint32_t *power_limit)
926 {
927 int power_src;
928 int ret = 0;
929
930 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT))
931 return -EINVAL;
932
933 power_src = smu_cmn_to_asic_specific_index(smu,
934 CMN2ASIC_MAPPING_PWR,
935 smu->adev->pm.ac_power ?
936 SMU_POWER_SOURCE_AC :
937 SMU_POWER_SOURCE_DC);
938 if (power_src < 0)
939 return -EINVAL;
940
941 /*
942 * BIT 24-31: ControllerId (only PPT0 is supported for now)
943 * BIT 16-23: PowerSource
944 */
945 ret = smu_cmn_send_smc_msg_with_param(smu,
946 SMU_MSG_GetPptLimit,
947 (0 << 24) | (power_src << 16),
948 power_limit);
949 if (ret)
950 dev_err(smu->adev->dev, "[%s] get PPT limit failed!", __func__);
951
952 return ret;
953 }
954
smu_v11_0_set_power_limit(struct smu_context * smu,uint32_t n)955 int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
956 {
957 int power_src;
958 int ret = 0;
959
960 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
961 dev_err(smu->adev->dev, "Setting new power limit is not supported!\n");
962 return -EOPNOTSUPP;
963 }
964
965 power_src = smu_cmn_to_asic_specific_index(smu,
966 CMN2ASIC_MAPPING_PWR,
967 smu->adev->pm.ac_power ?
968 SMU_POWER_SOURCE_AC :
969 SMU_POWER_SOURCE_DC);
970 if (power_src < 0)
971 return -EINVAL;
972
973 /*
974 * BIT 24-31: ControllerId (only PPT0 is supported for now)
975 * BIT 16-23: PowerSource
976 * BIT 0-15: PowerLimit
977 */
978 n &= 0xFFFF;
979 n |= 0 << 24;
980 n |= (power_src) << 16;
981 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL);
982 if (ret) {
983 dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__);
984 return ret;
985 }
986
987 smu->current_power_limit = n;
988
989 return 0;
990 }
991
smu_v11_0_ack_ac_dc_interrupt(struct smu_context * smu)992 static int smu_v11_0_ack_ac_dc_interrupt(struct smu_context *smu)
993 {
994 return smu_cmn_send_smc_msg(smu,
995 SMU_MSG_ReenableAcDcInterrupt,
996 NULL);
997 }
998
smu_v11_0_process_pending_interrupt(struct smu_context * smu)999 static int smu_v11_0_process_pending_interrupt(struct smu_context *smu)
1000 {
1001 int ret = 0;
1002
1003 if (smu->dc_controlled_by_gpio &&
1004 smu_cmn_feature_is_enabled(smu, SMU_FEATURE_ACDC_BIT))
1005 ret = smu_v11_0_ack_ac_dc_interrupt(smu);
1006
1007 return ret;
1008 }
1009
smu_v11_0_interrupt_work(struct smu_context * smu)1010 void smu_v11_0_interrupt_work(struct smu_context *smu)
1011 {
1012 if (smu_v11_0_ack_ac_dc_interrupt(smu))
1013 dev_err(smu->adev->dev, "Ack AC/DC interrupt Failed!\n");
1014 }
1015
smu_v11_0_enable_thermal_alert(struct smu_context * smu)1016 int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
1017 {
1018 int ret = 0;
1019
1020 if (smu->smu_table.thermal_controller_type) {
1021 ret = amdgpu_irq_get(smu->adev, &smu->irq_source, 0);
1022 if (ret)
1023 return ret;
1024 }
1025
1026 /*
1027 * After init there might have been missed interrupts triggered
1028 * before driver registers for interrupt (Ex. AC/DC).
1029 */
1030 return smu_v11_0_process_pending_interrupt(smu);
1031 }
1032
smu_v11_0_disable_thermal_alert(struct smu_context * smu)1033 int smu_v11_0_disable_thermal_alert(struct smu_context *smu)
1034 {
1035 return amdgpu_irq_put(smu->adev, &smu->irq_source, 0);
1036 }
1037
convert_to_vddc(uint8_t vid)1038 static uint16_t convert_to_vddc(uint8_t vid)
1039 {
1040 return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE);
1041 }
1042
smu_v11_0_get_gfx_vdd(struct smu_context * smu,uint32_t * value)1043 int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
1044 {
1045 struct amdgpu_device *adev = smu->adev;
1046 uint32_t vdd = 0, val_vid = 0;
1047
1048 if (!value)
1049 return -EINVAL;
1050 val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) &
1051 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >>
1052 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT;
1053
1054 vdd = (uint32_t)convert_to_vddc((uint8_t)val_vid);
1055
1056 *value = vdd;
1057
1058 return 0;
1059
1060 }
1061
1062 int
smu_v11_0_display_clock_voltage_request(struct smu_context * smu,struct pp_display_clock_request * clock_req)1063 smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
1064 struct pp_display_clock_request
1065 *clock_req)
1066 {
1067 enum amd_pp_clock_type clk_type = clock_req->clock_type;
1068 int ret = 0;
1069 enum smu_clk_type clk_select = 0;
1070 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
1071
1072 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
1073 smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
1074 switch (clk_type) {
1075 case amd_pp_dcef_clock:
1076 clk_select = SMU_DCEFCLK;
1077 break;
1078 case amd_pp_disp_clock:
1079 clk_select = SMU_DISPCLK;
1080 break;
1081 case amd_pp_pixel_clock:
1082 clk_select = SMU_PIXCLK;
1083 break;
1084 case amd_pp_phy_clock:
1085 clk_select = SMU_PHYCLK;
1086 break;
1087 case amd_pp_mem_clock:
1088 clk_select = SMU_UCLK;
1089 break;
1090 default:
1091 dev_info(smu->adev->dev, "[%s] Invalid Clock Type!", __func__);
1092 ret = -EINVAL;
1093 break;
1094 }
1095
1096 if (ret)
1097 goto failed;
1098
1099 if (clk_select == SMU_UCLK && smu->disable_uclk_switch)
1100 return 0;
1101
1102 ret = smu_v11_0_set_hard_freq_limited_range(smu, clk_select, clk_freq, 0);
1103
1104 if(clk_select == SMU_UCLK)
1105 smu->hard_min_uclk_req_from_dal = clk_freq;
1106 }
1107
1108 failed:
1109 return ret;
1110 }
1111
smu_v11_0_gfx_off_control(struct smu_context * smu,bool enable)1112 int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
1113 {
1114 int ret = 0;
1115 struct amdgpu_device *adev = smu->adev;
1116
1117 switch (adev->asic_type) {
1118 case CHIP_NAVI10:
1119 case CHIP_NAVI14:
1120 case CHIP_NAVI12:
1121 case CHIP_SIENNA_CICHLID:
1122 case CHIP_NAVY_FLOUNDER:
1123 case CHIP_DIMGREY_CAVEFISH:
1124 case CHIP_VANGOGH:
1125 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
1126 return 0;
1127 if (enable)
1128 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
1129 else
1130 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
1131 break;
1132 default:
1133 break;
1134 }
1135
1136 return ret;
1137 }
1138
1139 uint32_t
smu_v11_0_get_fan_control_mode(struct smu_context * smu)1140 smu_v11_0_get_fan_control_mode(struct smu_context *smu)
1141 {
1142 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
1143 return AMD_FAN_CTRL_AUTO;
1144 else
1145 return smu->user_dpm_profile.fan_mode;
1146 }
1147
1148 static int
smu_v11_0_auto_fan_control(struct smu_context * smu,bool auto_fan_control)1149 smu_v11_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control)
1150 {
1151 int ret = 0;
1152
1153 if (!smu_cmn_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
1154 return 0;
1155
1156 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control);
1157 if (ret)
1158 dev_err(smu->adev->dev, "[%s]%s smc FAN CONTROL feature failed!",
1159 __func__, (auto_fan_control ? "Start" : "Stop"));
1160
1161 return ret;
1162 }
1163
1164 static int
smu_v11_0_set_fan_static_mode(struct smu_context * smu,uint32_t mode)1165 smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
1166 {
1167 struct amdgpu_device *adev = smu->adev;
1168
1169 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
1170 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
1171 CG_FDO_CTRL2, TMIN, 0));
1172 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
1173 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
1174 CG_FDO_CTRL2, FDO_PWM_MODE, mode));
1175
1176 return 0;
1177 }
1178
1179 int
smu_v11_0_set_fan_speed_percent(struct smu_context * smu,uint32_t speed)1180 smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
1181 {
1182 struct amdgpu_device *adev = smu->adev;
1183 uint32_t duty100, duty;
1184 uint64_t tmp64;
1185
1186 if (speed > 100)
1187 speed = 100;
1188
1189 if (smu_v11_0_auto_fan_control(smu, 0))
1190 return -EINVAL;
1191
1192 duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
1193 CG_FDO_CTRL1, FMAX_DUTY100);
1194 if (!duty100)
1195 return -EINVAL;
1196
1197 tmp64 = (uint64_t)speed * duty100;
1198 do_div(tmp64, 100);
1199 duty = (uint32_t)tmp64;
1200
1201 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
1202 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0),
1203 CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
1204
1205 return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
1206 }
1207
1208 int
smu_v11_0_set_fan_control_mode(struct smu_context * smu,uint32_t mode)1209 smu_v11_0_set_fan_control_mode(struct smu_context *smu,
1210 uint32_t mode)
1211 {
1212 int ret = 0;
1213
1214 switch (mode) {
1215 case AMD_FAN_CTRL_NONE:
1216 ret = smu_v11_0_set_fan_speed_percent(smu, 100);
1217 break;
1218 case AMD_FAN_CTRL_MANUAL:
1219 ret = smu_v11_0_auto_fan_control(smu, 0);
1220 break;
1221 case AMD_FAN_CTRL_AUTO:
1222 ret = smu_v11_0_auto_fan_control(smu, 1);
1223 break;
1224 default:
1225 break;
1226 }
1227
1228 if (ret) {
1229 dev_err(smu->adev->dev, "[%s]Set fan control mode failed!", __func__);
1230 return -EINVAL;
1231 }
1232
1233 return ret;
1234 }
1235
smu_v11_0_set_xgmi_pstate(struct smu_context * smu,uint32_t pstate)1236 int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
1237 uint32_t pstate)
1238 {
1239 return smu_cmn_send_smc_msg_with_param(smu,
1240 SMU_MSG_SetXgmiMode,
1241 pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3,
1242 NULL);
1243 }
1244
smu_v11_0_set_irq_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned tyep,enum amdgpu_interrupt_state state)1245 static int smu_v11_0_set_irq_state(struct amdgpu_device *adev,
1246 struct amdgpu_irq_src *source,
1247 unsigned tyep,
1248 enum amdgpu_interrupt_state state)
1249 {
1250 struct smu_context *smu = &adev->smu;
1251 uint32_t low, high;
1252 uint32_t val = 0;
1253
1254 switch (state) {
1255 case AMDGPU_IRQ_STATE_DISABLE:
1256 /* For THM irqs */
1257 val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
1258 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 1);
1259 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 1);
1260 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
1261
1262 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0);
1263
1264 /* For MP1 SW irqs */
1265 val = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL);
1266 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1);
1267 WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, val);
1268
1269 break;
1270 case AMDGPU_IRQ_STATE_ENABLE:
1271 /* For THM irqs */
1272 low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP,
1273 smu->thermal_range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES);
1274 high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP,
1275 smu->thermal_range.software_shutdown_temp);
1276
1277 val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
1278 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
1279 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
1280 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0);
1281 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0);
1282 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff));
1283 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff));
1284 val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
1285 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
1286
1287 val = (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
1288 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
1289 val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
1290 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val);
1291
1292 /* For MP1 SW irqs */
1293 val = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT);
1294 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE);
1295 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0);
1296 WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT, val);
1297
1298 val = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL);
1299 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0);
1300 WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, val);
1301
1302 break;
1303 default:
1304 break;
1305 }
1306
1307 return 0;
1308 }
1309
1310 #define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
1311 #define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */
1312
1313 #define SMUIO_11_0__SRCID__SMUIO_GPIO19 83
1314
smu_v11_0_irq_process(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)1315 static int smu_v11_0_irq_process(struct amdgpu_device *adev,
1316 struct amdgpu_irq_src *source,
1317 struct amdgpu_iv_entry *entry)
1318 {
1319 struct smu_context *smu = &adev->smu;
1320 uint32_t client_id = entry->client_id;
1321 uint32_t src_id = entry->src_id;
1322 /*
1323 * ctxid is used to distinguish different
1324 * events for SMCToHost interrupt.
1325 */
1326 uint32_t ctxid = entry->src_data[0];
1327 uint32_t data;
1328
1329 if (client_id == SOC15_IH_CLIENTID_THM) {
1330 switch (src_id) {
1331 case THM_11_0__SRCID__THM_DIG_THERM_L2H:
1332 dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
1333 /*
1334 * SW CTF just occurred.
1335 * Try to do a graceful shutdown to prevent further damage.
1336 */
1337 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
1338 orderly_poweroff(true);
1339 break;
1340 case THM_11_0__SRCID__THM_DIG_THERM_H2L:
1341 dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n");
1342 break;
1343 default:
1344 dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n",
1345 src_id);
1346 break;
1347 }
1348 } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) {
1349 dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
1350 /*
1351 * HW CTF just occurred. Shutdown to prevent further damage.
1352 */
1353 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
1354 orderly_poweroff(true);
1355 } else if (client_id == SOC15_IH_CLIENTID_MP1) {
1356 if (src_id == 0xfe) {
1357 /* ACK SMUToHost interrupt */
1358 data = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL);
1359 data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1);
1360 WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, data);
1361
1362 switch (ctxid) {
1363 case 0x3:
1364 dev_dbg(adev->dev, "Switched to AC mode!\n");
1365 schedule_work(&smu->interrupt_work);
1366 break;
1367 case 0x4:
1368 dev_dbg(adev->dev, "Switched to DC mode!\n");
1369 schedule_work(&smu->interrupt_work);
1370 break;
1371 case 0x7:
1372 /*
1373 * Increment the throttle interrupt counter
1374 */
1375 atomic64_inc(&smu->throttle_int_counter);
1376
1377 if (!atomic_read(&adev->throttling_logging_enabled))
1378 return 0;
1379
1380 if (__ratelimit(&adev->throttling_logging_rs))
1381 schedule_work(&smu->throttling_logging_work);
1382
1383 break;
1384 }
1385 }
1386 }
1387
1388 return 0;
1389 }
1390
1391 static const struct amdgpu_irq_src_funcs smu_v11_0_irq_funcs =
1392 {
1393 .set = smu_v11_0_set_irq_state,
1394 .process = smu_v11_0_irq_process,
1395 };
1396
smu_v11_0_register_irq_handler(struct smu_context * smu)1397 int smu_v11_0_register_irq_handler(struct smu_context *smu)
1398 {
1399 struct amdgpu_device *adev = smu->adev;
1400 struct amdgpu_irq_src *irq_src = &smu->irq_source;
1401 int ret = 0;
1402
1403 irq_src->num_types = 1;
1404 irq_src->funcs = &smu_v11_0_irq_funcs;
1405
1406 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
1407 THM_11_0__SRCID__THM_DIG_THERM_L2H,
1408 irq_src);
1409 if (ret)
1410 return ret;
1411
1412 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
1413 THM_11_0__SRCID__THM_DIG_THERM_H2L,
1414 irq_src);
1415 if (ret)
1416 return ret;
1417
1418 /* Register CTF(GPIO_19) interrupt */
1419 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_ROM_SMUIO,
1420 SMUIO_11_0__SRCID__SMUIO_GPIO19,
1421 irq_src);
1422 if (ret)
1423 return ret;
1424
1425 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
1426 0xfe,
1427 irq_src);
1428 if (ret)
1429 return ret;
1430
1431 return ret;
1432 }
1433
smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context * smu,struct pp_smu_nv_clock_table * max_clocks)1434 int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
1435 struct pp_smu_nv_clock_table *max_clocks)
1436 {
1437 struct smu_table_context *table_context = &smu->smu_table;
1438 struct smu_11_0_max_sustainable_clocks *sustainable_clocks = NULL;
1439
1440 if (!max_clocks || !table_context->max_sustainable_clocks)
1441 return -EINVAL;
1442
1443 sustainable_clocks = table_context->max_sustainable_clocks;
1444
1445 max_clocks->dcfClockInKhz =
1446 (unsigned int) sustainable_clocks->dcef_clock * 1000;
1447 max_clocks->displayClockInKhz =
1448 (unsigned int) sustainable_clocks->display_clock * 1000;
1449 max_clocks->phyClockInKhz =
1450 (unsigned int) sustainable_clocks->phy_clock * 1000;
1451 max_clocks->pixelClockInKhz =
1452 (unsigned int) sustainable_clocks->pixel_clock * 1000;
1453 max_clocks->uClockInKhz =
1454 (unsigned int) sustainable_clocks->uclock * 1000;
1455 max_clocks->socClockInKhz =
1456 (unsigned int) sustainable_clocks->soc_clock * 1000;
1457 max_clocks->dscClockInKhz = 0;
1458 max_clocks->dppClockInKhz = 0;
1459 max_clocks->fabricClockInKhz = 0;
1460
1461 return 0;
1462 }
1463
smu_v11_0_set_azalia_d3_pme(struct smu_context * smu)1464 int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
1465 {
1466 return smu_cmn_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL);
1467 }
1468
smu_v11_0_baco_set_armd3_sequence(struct smu_context * smu,enum smu_v11_0_baco_seq baco_seq)1469 static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v11_0_baco_seq baco_seq)
1470 {
1471 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL);
1472 }
1473
smu_v11_0_baco_is_support(struct smu_context * smu)1474 bool smu_v11_0_baco_is_support(struct smu_context *smu)
1475 {
1476 struct smu_baco_context *smu_baco = &smu->smu_baco;
1477
1478 if (!smu_baco->platform_support)
1479 return false;
1480
1481 /* Arcturus does not support this bit mask */
1482 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
1483 !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
1484 return false;
1485
1486 return true;
1487 }
1488
smu_v11_0_baco_get_state(struct smu_context * smu)1489 enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
1490 {
1491 struct smu_baco_context *smu_baco = &smu->smu_baco;
1492 enum smu_baco_state baco_state;
1493
1494 mutex_lock(&smu_baco->mutex);
1495 baco_state = smu_baco->state;
1496 mutex_unlock(&smu_baco->mutex);
1497
1498 return baco_state;
1499 }
1500
1501 #define D3HOT_BACO_SEQUENCE 0
1502 #define D3HOT_BAMACO_SEQUENCE 2
1503
smu_v11_0_baco_set_state(struct smu_context * smu,enum smu_baco_state state)1504 int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
1505 {
1506 struct smu_baco_context *smu_baco = &smu->smu_baco;
1507 struct amdgpu_device *adev = smu->adev;
1508 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1509 uint32_t data;
1510 int ret = 0;
1511
1512 if (smu_v11_0_baco_get_state(smu) == state)
1513 return 0;
1514
1515 mutex_lock(&smu_baco->mutex);
1516
1517 if (state == SMU_BACO_STATE_ENTER) {
1518 switch (adev->asic_type) {
1519 case CHIP_SIENNA_CICHLID:
1520 case CHIP_NAVY_FLOUNDER:
1521 case CHIP_DIMGREY_CAVEFISH:
1522 if (amdgpu_runtime_pm == 2)
1523 ret = smu_cmn_send_smc_msg_with_param(smu,
1524 SMU_MSG_EnterBaco,
1525 D3HOT_BAMACO_SEQUENCE,
1526 NULL);
1527 else
1528 ret = smu_cmn_send_smc_msg_with_param(smu,
1529 SMU_MSG_EnterBaco,
1530 D3HOT_BACO_SEQUENCE,
1531 NULL);
1532 break;
1533 default:
1534 if (!ras || !ras->supported || adev->gmc.xgmi.pending_reset) {
1535 if (adev->asic_type == CHIP_ARCTURUS) {
1536 data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT);
1537 data |= 0x80000000;
1538 WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT, data);
1539 } else {
1540 data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
1541 data |= 0x80000000;
1542 WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
1543 }
1544
1545 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0, NULL);
1546 } else {
1547 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1, NULL);
1548 }
1549 break;
1550 }
1551
1552 } else {
1553 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_ExitBaco, NULL);
1554 if (ret)
1555 goto out;
1556
1557 /* clear vbios scratch 6 and 7 for coming asic reinit */
1558 WREG32(adev->bios_scratch_reg_offset + 6, 0);
1559 WREG32(adev->bios_scratch_reg_offset + 7, 0);
1560 }
1561 if (ret)
1562 goto out;
1563
1564 smu_baco->state = state;
1565 out:
1566 mutex_unlock(&smu_baco->mutex);
1567 return ret;
1568 }
1569
smu_v11_0_baco_enter(struct smu_context * smu)1570 int smu_v11_0_baco_enter(struct smu_context *smu)
1571 {
1572 struct amdgpu_device *adev = smu->adev;
1573 int ret = 0;
1574
1575 /* Arcturus does not need this audio workaround */
1576 if (adev->asic_type != CHIP_ARCTURUS) {
1577 ret = smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
1578 if (ret)
1579 return ret;
1580 }
1581
1582 ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_ENTER);
1583 if (ret)
1584 return ret;
1585
1586 msleep(10);
1587
1588 return ret;
1589 }
1590
smu_v11_0_baco_exit(struct smu_context * smu)1591 int smu_v11_0_baco_exit(struct smu_context *smu)
1592 {
1593 return smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_EXIT);
1594 }
1595
smu_v11_0_mode1_reset(struct smu_context * smu)1596 int smu_v11_0_mode1_reset(struct smu_context *smu)
1597 {
1598 int ret = 0;
1599
1600 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
1601 if (!ret)
1602 msleep(SMU11_MODE1_RESET_WAIT_TIME_IN_MS);
1603
1604 return ret;
1605 }
1606
smu_v11_0_set_light_sbr(struct smu_context * smu,bool enable)1607 int smu_v11_0_set_light_sbr(struct smu_context *smu, bool enable)
1608 {
1609 int ret = 0;
1610
1611 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_LightSBR, enable ? 1 : 0, NULL);
1612
1613 return ret;
1614 }
1615
1616
smu_v11_0_get_dpm_ultimate_freq(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t * min,uint32_t * max)1617 int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
1618 uint32_t *min, uint32_t *max)
1619 {
1620 int ret = 0, clk_id = 0;
1621 uint32_t param = 0;
1622 uint32_t clock_limit;
1623
1624 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) {
1625 switch (clk_type) {
1626 case SMU_MCLK:
1627 case SMU_UCLK:
1628 clock_limit = smu->smu_table.boot_values.uclk;
1629 break;
1630 case SMU_GFXCLK:
1631 case SMU_SCLK:
1632 clock_limit = smu->smu_table.boot_values.gfxclk;
1633 break;
1634 case SMU_SOCCLK:
1635 clock_limit = smu->smu_table.boot_values.socclk;
1636 break;
1637 default:
1638 clock_limit = 0;
1639 break;
1640 }
1641
1642 /* clock in Mhz unit */
1643 if (min)
1644 *min = clock_limit / 100;
1645 if (max)
1646 *max = clock_limit / 100;
1647
1648 return 0;
1649 }
1650
1651 clk_id = smu_cmn_to_asic_specific_index(smu,
1652 CMN2ASIC_MAPPING_CLK,
1653 clk_type);
1654 if (clk_id < 0) {
1655 ret = -EINVAL;
1656 goto failed;
1657 }
1658 param = (clk_id & 0xffff) << 16;
1659
1660 if (max) {
1661 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param, max);
1662 if (ret)
1663 goto failed;
1664 }
1665
1666 if (min) {
1667 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min);
1668 if (ret)
1669 goto failed;
1670 }
1671
1672 failed:
1673 return ret;
1674 }
1675
smu_v11_0_set_soft_freq_limited_range(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t min,uint32_t max)1676 int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu,
1677 enum smu_clk_type clk_type,
1678 uint32_t min,
1679 uint32_t max)
1680 {
1681 struct amdgpu_device *adev = smu->adev;
1682 int ret = 0, clk_id = 0;
1683 uint32_t param;
1684
1685 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1686 return 0;
1687
1688 clk_id = smu_cmn_to_asic_specific_index(smu,
1689 CMN2ASIC_MAPPING_CLK,
1690 clk_type);
1691 if (clk_id < 0)
1692 return clk_id;
1693
1694 if (clk_type == SMU_GFXCLK)
1695 amdgpu_gfx_off_ctrl(adev, false);
1696
1697 if (max > 0) {
1698 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
1699 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
1700 param, NULL);
1701 if (ret)
1702 goto out;
1703 }
1704
1705 if (min > 0) {
1706 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
1707 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
1708 param, NULL);
1709 if (ret)
1710 goto out;
1711 }
1712
1713 out:
1714 if (clk_type == SMU_GFXCLK)
1715 amdgpu_gfx_off_ctrl(adev, true);
1716
1717 return ret;
1718 }
1719
smu_v11_0_set_hard_freq_limited_range(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t min,uint32_t max)1720 int smu_v11_0_set_hard_freq_limited_range(struct smu_context *smu,
1721 enum smu_clk_type clk_type,
1722 uint32_t min,
1723 uint32_t max)
1724 {
1725 int ret = 0, clk_id = 0;
1726 uint32_t param;
1727
1728 if (min <= 0 && max <= 0)
1729 return -EINVAL;
1730
1731 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1732 return 0;
1733
1734 clk_id = smu_cmn_to_asic_specific_index(smu,
1735 CMN2ASIC_MAPPING_CLK,
1736 clk_type);
1737 if (clk_id < 0)
1738 return clk_id;
1739
1740 if (max > 0) {
1741 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
1742 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
1743 param, NULL);
1744 if (ret)
1745 return ret;
1746 }
1747
1748 if (min > 0) {
1749 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
1750 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
1751 param, NULL);
1752 if (ret)
1753 return ret;
1754 }
1755
1756 return ret;
1757 }
1758
smu_v11_0_set_performance_level(struct smu_context * smu,enum amd_dpm_forced_level level)1759 int smu_v11_0_set_performance_level(struct smu_context *smu,
1760 enum amd_dpm_forced_level level)
1761 {
1762 struct smu_11_0_dpm_context *dpm_context =
1763 smu->smu_dpm.dpm_context;
1764 struct smu_11_0_dpm_table *gfx_table =
1765 &dpm_context->dpm_tables.gfx_table;
1766 struct smu_11_0_dpm_table *mem_table =
1767 &dpm_context->dpm_tables.uclk_table;
1768 struct smu_11_0_dpm_table *soc_table =
1769 &dpm_context->dpm_tables.soc_table;
1770 struct smu_umd_pstate_table *pstate_table =
1771 &smu->pstate_table;
1772 struct amdgpu_device *adev = smu->adev;
1773 uint32_t sclk_min = 0, sclk_max = 0;
1774 uint32_t mclk_min = 0, mclk_max = 0;
1775 uint32_t socclk_min = 0, socclk_max = 0;
1776 int ret = 0;
1777
1778 switch (level) {
1779 case AMD_DPM_FORCED_LEVEL_HIGH:
1780 sclk_min = sclk_max = gfx_table->max;
1781 mclk_min = mclk_max = mem_table->max;
1782 socclk_min = socclk_max = soc_table->max;
1783 break;
1784 case AMD_DPM_FORCED_LEVEL_LOW:
1785 sclk_min = sclk_max = gfx_table->min;
1786 mclk_min = mclk_max = mem_table->min;
1787 socclk_min = socclk_max = soc_table->min;
1788 break;
1789 case AMD_DPM_FORCED_LEVEL_AUTO:
1790 sclk_min = gfx_table->min;
1791 sclk_max = gfx_table->max;
1792 mclk_min = mem_table->min;
1793 mclk_max = mem_table->max;
1794 socclk_min = soc_table->min;
1795 socclk_max = soc_table->max;
1796 break;
1797 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1798 sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard;
1799 mclk_min = mclk_max = pstate_table->uclk_pstate.standard;
1800 socclk_min = socclk_max = pstate_table->socclk_pstate.standard;
1801 break;
1802 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1803 sclk_min = sclk_max = pstate_table->gfxclk_pstate.min;
1804 break;
1805 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1806 mclk_min = mclk_max = pstate_table->uclk_pstate.min;
1807 break;
1808 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1809 sclk_min = sclk_max = pstate_table->gfxclk_pstate.peak;
1810 mclk_min = mclk_max = pstate_table->uclk_pstate.peak;
1811 socclk_min = socclk_max = pstate_table->socclk_pstate.peak;
1812 break;
1813 case AMD_DPM_FORCED_LEVEL_MANUAL:
1814 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1815 return 0;
1816 default:
1817 dev_err(adev->dev, "Invalid performance level %d\n", level);
1818 return -EINVAL;
1819 }
1820
1821 /*
1822 * Separate MCLK and SOCCLK soft min/max settings are not allowed
1823 * on Arcturus.
1824 */
1825 if (adev->asic_type == CHIP_ARCTURUS) {
1826 mclk_min = mclk_max = 0;
1827 socclk_min = socclk_max = 0;
1828 }
1829
1830 if (sclk_min && sclk_max) {
1831 ret = smu_v11_0_set_soft_freq_limited_range(smu,
1832 SMU_GFXCLK,
1833 sclk_min,
1834 sclk_max);
1835 if (ret)
1836 return ret;
1837 }
1838
1839 if (mclk_min && mclk_max) {
1840 ret = smu_v11_0_set_soft_freq_limited_range(smu,
1841 SMU_MCLK,
1842 mclk_min,
1843 mclk_max);
1844 if (ret)
1845 return ret;
1846 }
1847
1848 if (socclk_min && socclk_max) {
1849 ret = smu_v11_0_set_soft_freq_limited_range(smu,
1850 SMU_SOCCLK,
1851 socclk_min,
1852 socclk_max);
1853 if (ret)
1854 return ret;
1855 }
1856
1857 return ret;
1858 }
1859
smu_v11_0_set_power_source(struct smu_context * smu,enum smu_power_src_type power_src)1860 int smu_v11_0_set_power_source(struct smu_context *smu,
1861 enum smu_power_src_type power_src)
1862 {
1863 int pwr_source;
1864
1865 pwr_source = smu_cmn_to_asic_specific_index(smu,
1866 CMN2ASIC_MAPPING_PWR,
1867 (uint32_t)power_src);
1868 if (pwr_source < 0)
1869 return -EINVAL;
1870
1871 return smu_cmn_send_smc_msg_with_param(smu,
1872 SMU_MSG_NotifyPowerSource,
1873 pwr_source,
1874 NULL);
1875 }
1876
smu_v11_0_get_dpm_freq_by_index(struct smu_context * smu,enum smu_clk_type clk_type,uint16_t level,uint32_t * value)1877 int smu_v11_0_get_dpm_freq_by_index(struct smu_context *smu,
1878 enum smu_clk_type clk_type,
1879 uint16_t level,
1880 uint32_t *value)
1881 {
1882 int ret = 0, clk_id = 0;
1883 uint32_t param;
1884
1885 if (!value)
1886 return -EINVAL;
1887
1888 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1889 return 0;
1890
1891 clk_id = smu_cmn_to_asic_specific_index(smu,
1892 CMN2ASIC_MAPPING_CLK,
1893 clk_type);
1894 if (clk_id < 0)
1895 return clk_id;
1896
1897 param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
1898
1899 ret = smu_cmn_send_smc_msg_with_param(smu,
1900 SMU_MSG_GetDpmFreqByIndex,
1901 param,
1902 value);
1903 if (ret)
1904 return ret;
1905
1906 /*
1907 * BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM
1908 * now, we un-support it
1909 */
1910 *value = *value & 0x7fffffff;
1911
1912 return ret;
1913 }
1914
smu_v11_0_get_dpm_level_count(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t * value)1915 int smu_v11_0_get_dpm_level_count(struct smu_context *smu,
1916 enum smu_clk_type clk_type,
1917 uint32_t *value)
1918 {
1919 return smu_v11_0_get_dpm_freq_by_index(smu,
1920 clk_type,
1921 0xff,
1922 value);
1923 }
1924
smu_v11_0_set_single_dpm_table(struct smu_context * smu,enum smu_clk_type clk_type,struct smu_11_0_dpm_table * single_dpm_table)1925 int smu_v11_0_set_single_dpm_table(struct smu_context *smu,
1926 enum smu_clk_type clk_type,
1927 struct smu_11_0_dpm_table *single_dpm_table)
1928 {
1929 int ret = 0;
1930 uint32_t clk;
1931 int i;
1932
1933 ret = smu_v11_0_get_dpm_level_count(smu,
1934 clk_type,
1935 &single_dpm_table->count);
1936 if (ret) {
1937 dev_err(smu->adev->dev, "[%s] failed to get dpm levels!\n", __func__);
1938 return ret;
1939 }
1940
1941 for (i = 0; i < single_dpm_table->count; i++) {
1942 ret = smu_v11_0_get_dpm_freq_by_index(smu,
1943 clk_type,
1944 i,
1945 &clk);
1946 if (ret) {
1947 dev_err(smu->adev->dev, "[%s] failed to get dpm freq by index!\n", __func__);
1948 return ret;
1949 }
1950
1951 single_dpm_table->dpm_levels[i].value = clk;
1952 single_dpm_table->dpm_levels[i].enabled = true;
1953
1954 if (i == 0)
1955 single_dpm_table->min = clk;
1956 else if (i == single_dpm_table->count - 1)
1957 single_dpm_table->max = clk;
1958 }
1959
1960 return 0;
1961 }
1962
smu_v11_0_get_dpm_level_range(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t * min_value,uint32_t * max_value)1963 int smu_v11_0_get_dpm_level_range(struct smu_context *smu,
1964 enum smu_clk_type clk_type,
1965 uint32_t *min_value,
1966 uint32_t *max_value)
1967 {
1968 uint32_t level_count = 0;
1969 int ret = 0;
1970
1971 if (!min_value && !max_value)
1972 return -EINVAL;
1973
1974 if (min_value) {
1975 /* by default, level 0 clock value as min value */
1976 ret = smu_v11_0_get_dpm_freq_by_index(smu,
1977 clk_type,
1978 0,
1979 min_value);
1980 if (ret)
1981 return ret;
1982 }
1983
1984 if (max_value) {
1985 ret = smu_v11_0_get_dpm_level_count(smu,
1986 clk_type,
1987 &level_count);
1988 if (ret)
1989 return ret;
1990
1991 ret = smu_v11_0_get_dpm_freq_by_index(smu,
1992 clk_type,
1993 level_count - 1,
1994 max_value);
1995 if (ret)
1996 return ret;
1997 }
1998
1999 return ret;
2000 }
2001
smu_v11_0_get_current_pcie_link_width_level(struct smu_context * smu)2002 int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu)
2003 {
2004 struct amdgpu_device *adev = smu->adev;
2005
2006 return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
2007 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
2008 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
2009 }
2010
smu_v11_0_get_current_pcie_link_width(struct smu_context * smu)2011 uint16_t smu_v11_0_get_current_pcie_link_width(struct smu_context *smu)
2012 {
2013 uint32_t width_level;
2014
2015 width_level = smu_v11_0_get_current_pcie_link_width_level(smu);
2016 if (width_level > LINK_WIDTH_MAX)
2017 width_level = 0;
2018
2019 return link_width[width_level];
2020 }
2021
smu_v11_0_get_current_pcie_link_speed_level(struct smu_context * smu)2022 int smu_v11_0_get_current_pcie_link_speed_level(struct smu_context *smu)
2023 {
2024 struct amdgpu_device *adev = smu->adev;
2025
2026 return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
2027 PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
2028 >> PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
2029 }
2030
smu_v11_0_get_current_pcie_link_speed(struct smu_context * smu)2031 uint16_t smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu)
2032 {
2033 uint32_t speed_level;
2034
2035 speed_level = smu_v11_0_get_current_pcie_link_speed_level(smu);
2036 if (speed_level > LINK_SPEED_MAX)
2037 speed_level = 0;
2038
2039 return link_speed[speed_level];
2040 }
2041
smu_v11_0_gfx_ulv_control(struct smu_context * smu,bool enablement)2042 int smu_v11_0_gfx_ulv_control(struct smu_context *smu,
2043 bool enablement)
2044 {
2045 int ret = 0;
2046
2047 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_GFX_ULV_BIT))
2048 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_GFX_ULV_BIT, enablement);
2049
2050 return ret;
2051 }
2052
smu_v11_0_deep_sleep_control(struct smu_context * smu,bool enablement)2053 int smu_v11_0_deep_sleep_control(struct smu_context *smu,
2054 bool enablement)
2055 {
2056 struct amdgpu_device *adev = smu->adev;
2057 int ret = 0;
2058
2059 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_GFXCLK_BIT)) {
2060 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_GFXCLK_BIT, enablement);
2061 if (ret) {
2062 dev_err(adev->dev, "Failed to %s GFXCLK DS!\n", enablement ? "enable" : "disable");
2063 return ret;
2064 }
2065 }
2066
2067 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_UCLK_BIT)) {
2068 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_UCLK_BIT, enablement);
2069 if (ret) {
2070 dev_err(adev->dev, "Failed to %s UCLK DS!\n", enablement ? "enable" : "disable");
2071 return ret;
2072 }
2073 }
2074
2075 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_FCLK_BIT)) {
2076 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_FCLK_BIT, enablement);
2077 if (ret) {
2078 dev_err(adev->dev, "Failed to %s FCLK DS!\n", enablement ? "enable" : "disable");
2079 return ret;
2080 }
2081 }
2082
2083 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_SOCCLK_BIT)) {
2084 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_SOCCLK_BIT, enablement);
2085 if (ret) {
2086 dev_err(adev->dev, "Failed to %s SOCCLK DS!\n", enablement ? "enable" : "disable");
2087 return ret;
2088 }
2089 }
2090
2091 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_LCLK_BIT)) {
2092 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_LCLK_BIT, enablement);
2093 if (ret) {
2094 dev_err(adev->dev, "Failed to %s LCLK DS!\n", enablement ? "enable" : "disable");
2095 return ret;
2096 }
2097 }
2098
2099 return ret;
2100 }
2101