1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/delay.h>
25 #include <linux/gfp.h>
26 #include <linux/kernel.h>
27 #include <linux/slab.h>
28 #include <linux/types.h>
29
30 #include "cgs_common.h"
31 #include "smu/smu_8_0_d.h"
32 #include "smu/smu_8_0_sh_mask.h"
33 #include "smu8.h"
34 #include "smu8_fusion.h"
35 #include "smu8_smumgr.h"
36 #include "cz_ppsmc.h"
37 #include "smu_ucode_xfer_cz.h"
38 #include "gca/gfx_8_0_d.h"
39 #include "gca/gfx_8_0_sh_mask.h"
40 #include "smumgr.h"
41
42 #define SIZE_ALIGN_32(x) (((x) + 31) / 32 * 32)
43
44 static const enum smu8_scratch_entry firmware_list[] = {
45 SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA0,
46 SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA1,
47 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE,
48 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
49 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME,
50 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
51 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
52 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G,
53 };
54
smu8_get_argument(struct pp_hwmgr * hwmgr)55 static uint32_t smu8_get_argument(struct pp_hwmgr *hwmgr)
56 {
57 if (hwmgr == NULL || hwmgr->device == NULL)
58 return 0;
59
60 return cgs_read_register(hwmgr->device,
61 mmSMU_MP1_SRBM2P_ARG_0);
62 }
63
smu8_send_msg_to_smc_async(struct pp_hwmgr * hwmgr,uint16_t msg)64 static int smu8_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg)
65 {
66 int result = 0;
67
68 if (hwmgr == NULL || hwmgr->device == NULL)
69 return -EINVAL;
70
71 result = PHM_WAIT_FIELD_UNEQUAL(hwmgr,
72 SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
73 if (result != 0) {
74 pr_err("smu8_send_msg_to_smc_async (0x%04x) failed\n", msg);
75 return result;
76 }
77
78 cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0);
79 cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg);
80
81 return 0;
82 }
83
84 /* Send a message to the SMC, and wait for its response.*/
smu8_send_msg_to_smc(struct pp_hwmgr * hwmgr,uint16_t msg)85 static int smu8_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
86 {
87 int result = 0;
88
89 result = smu8_send_msg_to_smc_async(hwmgr, msg);
90 if (result != 0)
91 return result;
92
93 return PHM_WAIT_FIELD_UNEQUAL(hwmgr,
94 SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
95 }
96
smu8_set_smc_sram_address(struct pp_hwmgr * hwmgr,uint32_t smc_address,uint32_t limit)97 static int smu8_set_smc_sram_address(struct pp_hwmgr *hwmgr,
98 uint32_t smc_address, uint32_t limit)
99 {
100 if (hwmgr == NULL || hwmgr->device == NULL)
101 return -EINVAL;
102
103 if (0 != (3 & smc_address)) {
104 pr_err("SMC address must be 4 byte aligned\n");
105 return -EINVAL;
106 }
107
108 if (limit <= (smc_address + 3)) {
109 pr_err("SMC address beyond the SMC RAM area\n");
110 return -EINVAL;
111 }
112
113 cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX_0,
114 SMN_MP1_SRAM_START_ADDR + smc_address);
115
116 return 0;
117 }
118
smu8_write_smc_sram_dword(struct pp_hwmgr * hwmgr,uint32_t smc_address,uint32_t value,uint32_t limit)119 static int smu8_write_smc_sram_dword(struct pp_hwmgr *hwmgr,
120 uint32_t smc_address, uint32_t value, uint32_t limit)
121 {
122 int result;
123
124 if (hwmgr == NULL || hwmgr->device == NULL)
125 return -EINVAL;
126
127 result = smu8_set_smc_sram_address(hwmgr, smc_address, limit);
128 if (!result)
129 cgs_write_register(hwmgr->device, mmMP0PUB_IND_DATA_0, value);
130
131 return result;
132 }
133
smu8_send_msg_to_smc_with_parameter(struct pp_hwmgr * hwmgr,uint16_t msg,uint32_t parameter)134 static int smu8_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
135 uint16_t msg, uint32_t parameter)
136 {
137 if (hwmgr == NULL || hwmgr->device == NULL)
138 return -EINVAL;
139
140 cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter);
141
142 return smu8_send_msg_to_smc(hwmgr, msg);
143 }
144
smu8_check_fw_load_finish(struct pp_hwmgr * hwmgr,uint32_t firmware)145 static int smu8_check_fw_load_finish(struct pp_hwmgr *hwmgr,
146 uint32_t firmware)
147 {
148 int i;
149 uint32_t index = SMN_MP1_SRAM_START_ADDR +
150 SMU8_FIRMWARE_HEADER_LOCATION +
151 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
152
153 if (hwmgr == NULL || hwmgr->device == NULL)
154 return -EINVAL;
155
156 cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
157
158 for (i = 0; i < hwmgr->usec_timeout; i++) {
159 if (firmware ==
160 (cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA) & firmware))
161 break;
162 udelay(1);
163 }
164
165 if (i >= hwmgr->usec_timeout) {
166 pr_err("SMU check loaded firmware failed.\n");
167 return -EINVAL;
168 }
169
170 return 0;
171 }
172
smu8_load_mec_firmware(struct pp_hwmgr * hwmgr)173 static int smu8_load_mec_firmware(struct pp_hwmgr *hwmgr)
174 {
175 uint32_t reg_data;
176 uint32_t tmp;
177 int ret = 0;
178 struct cgs_firmware_info info = {0};
179 struct smu8_smumgr *smu8_smu;
180
181 if (hwmgr == NULL || hwmgr->device == NULL)
182 return -EINVAL;
183
184 smu8_smu = hwmgr->smu_backend;
185 ret = cgs_get_firmware_info(hwmgr->device,
186 CGS_UCODE_ID_CP_MEC, &info);
187
188 if (ret)
189 return -EINVAL;
190
191 /* Disable MEC parsing/prefetching */
192 tmp = cgs_read_register(hwmgr->device,
193 mmCP_MEC_CNTL);
194 tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
195 tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
196 cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, tmp);
197
198 tmp = cgs_read_register(hwmgr->device,
199 mmCP_CPC_IC_BASE_CNTL);
200
201 tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
202 tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
203 tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
204 tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
205 cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_CNTL, tmp);
206
207 reg_data = lower_32_bits(info.mc_addr) &
208 PHM_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
209 cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_LO, reg_data);
210
211 reg_data = upper_32_bits(info.mc_addr) &
212 PHM_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
213 cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_HI, reg_data);
214
215 return 0;
216 }
217
smu8_translate_firmware_enum_to_arg(struct pp_hwmgr * hwmgr,enum smu8_scratch_entry firmware_enum)218 static uint8_t smu8_translate_firmware_enum_to_arg(struct pp_hwmgr *hwmgr,
219 enum smu8_scratch_entry firmware_enum)
220 {
221 uint8_t ret = 0;
222
223 switch (firmware_enum) {
224 case SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA0:
225 ret = UCODE_ID_SDMA0;
226 break;
227 case SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA1:
228 if (hwmgr->chip_id == CHIP_STONEY)
229 ret = UCODE_ID_SDMA0;
230 else
231 ret = UCODE_ID_SDMA1;
232 break;
233 case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE:
234 ret = UCODE_ID_CP_CE;
235 break;
236 case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP:
237 ret = UCODE_ID_CP_PFP;
238 break;
239 case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME:
240 ret = UCODE_ID_CP_ME;
241 break;
242 case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1:
243 ret = UCODE_ID_CP_MEC_JT1;
244 break;
245 case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
246 if (hwmgr->chip_id == CHIP_STONEY)
247 ret = UCODE_ID_CP_MEC_JT1;
248 else
249 ret = UCODE_ID_CP_MEC_JT2;
250 break;
251 case SMU8_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
252 ret = UCODE_ID_GMCON_RENG;
253 break;
254 case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G:
255 ret = UCODE_ID_RLC_G;
256 break;
257 case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH:
258 ret = UCODE_ID_RLC_SCRATCH;
259 break;
260 case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM:
261 ret = UCODE_ID_RLC_SRM_ARAM;
262 break;
263 case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM:
264 ret = UCODE_ID_RLC_SRM_DRAM;
265 break;
266 case SMU8_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM:
267 ret = UCODE_ID_DMCU_ERAM;
268 break;
269 case SMU8_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM:
270 ret = UCODE_ID_DMCU_IRAM;
271 break;
272 case SMU8_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING:
273 ret = TASK_ARG_INIT_MM_PWR_LOG;
274 break;
275 case SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_HALT:
276 case SMU8_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING:
277 case SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS:
278 case SMU8_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT:
279 case SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_START:
280 case SMU8_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS:
281 ret = TASK_ARG_REG_MMIO;
282 break;
283 case SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE:
284 ret = TASK_ARG_INIT_CLK_TABLE;
285 break;
286 }
287
288 return ret;
289 }
290
smu8_convert_fw_type_to_cgs(uint32_t fw_type)291 static enum cgs_ucode_id smu8_convert_fw_type_to_cgs(uint32_t fw_type)
292 {
293 enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
294
295 switch (fw_type) {
296 case UCODE_ID_SDMA0:
297 result = CGS_UCODE_ID_SDMA0;
298 break;
299 case UCODE_ID_SDMA1:
300 result = CGS_UCODE_ID_SDMA1;
301 break;
302 case UCODE_ID_CP_CE:
303 result = CGS_UCODE_ID_CP_CE;
304 break;
305 case UCODE_ID_CP_PFP:
306 result = CGS_UCODE_ID_CP_PFP;
307 break;
308 case UCODE_ID_CP_ME:
309 result = CGS_UCODE_ID_CP_ME;
310 break;
311 case UCODE_ID_CP_MEC_JT1:
312 result = CGS_UCODE_ID_CP_MEC_JT1;
313 break;
314 case UCODE_ID_CP_MEC_JT2:
315 result = CGS_UCODE_ID_CP_MEC_JT2;
316 break;
317 case UCODE_ID_RLC_G:
318 result = CGS_UCODE_ID_RLC_G;
319 break;
320 default:
321 break;
322 }
323
324 return result;
325 }
326
smu8_smu_populate_single_scratch_task(struct pp_hwmgr * hwmgr,enum smu8_scratch_entry fw_enum,uint8_t type,bool is_last)327 static int smu8_smu_populate_single_scratch_task(
328 struct pp_hwmgr *hwmgr,
329 enum smu8_scratch_entry fw_enum,
330 uint8_t type, bool is_last)
331 {
332 uint8_t i;
333 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
334 struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
335 struct SMU_Task *task = &toc->tasks[smu8_smu->toc_entry_used_count++];
336
337 task->type = type;
338 task->arg = smu8_translate_firmware_enum_to_arg(hwmgr, fw_enum);
339 task->next = is_last ? END_OF_TASK_LIST : smu8_smu->toc_entry_used_count;
340
341 for (i = 0; i < smu8_smu->scratch_buffer_length; i++)
342 if (smu8_smu->scratch_buffer[i].firmware_ID == fw_enum)
343 break;
344
345 if (i >= smu8_smu->scratch_buffer_length) {
346 pr_err("Invalid Firmware Type\n");
347 return -EINVAL;
348 }
349
350 task->addr.low = lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr);
351 task->addr.high = upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr);
352 task->size_bytes = smu8_smu->scratch_buffer[i].data_size;
353
354 if (SMU8_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == fw_enum) {
355 struct smu8_ih_meta_data *pIHReg_restore =
356 (struct smu8_ih_meta_data *)smu8_smu->scratch_buffer[i].kaddr;
357 pIHReg_restore->command =
358 METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD;
359 }
360
361 return 0;
362 }
363
smu8_smu_populate_single_ucode_load_task(struct pp_hwmgr * hwmgr,enum smu8_scratch_entry fw_enum,bool is_last)364 static int smu8_smu_populate_single_ucode_load_task(
365 struct pp_hwmgr *hwmgr,
366 enum smu8_scratch_entry fw_enum,
367 bool is_last)
368 {
369 uint8_t i;
370 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
371 struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
372 struct SMU_Task *task = &toc->tasks[smu8_smu->toc_entry_used_count++];
373
374 task->type = TASK_TYPE_UCODE_LOAD;
375 task->arg = smu8_translate_firmware_enum_to_arg(hwmgr, fw_enum);
376 task->next = is_last ? END_OF_TASK_LIST : smu8_smu->toc_entry_used_count;
377
378 for (i = 0; i < smu8_smu->driver_buffer_length; i++)
379 if (smu8_smu->driver_buffer[i].firmware_ID == fw_enum)
380 break;
381
382 if (i >= smu8_smu->driver_buffer_length) {
383 pr_err("Invalid Firmware Type\n");
384 return -EINVAL;
385 }
386
387 task->addr.low = lower_32_bits(smu8_smu->driver_buffer[i].mc_addr);
388 task->addr.high = upper_32_bits(smu8_smu->driver_buffer[i].mc_addr);
389 task->size_bytes = smu8_smu->driver_buffer[i].data_size;
390
391 return 0;
392 }
393
smu8_smu_construct_toc_for_rlc_aram_save(struct pp_hwmgr * hwmgr)394 static int smu8_smu_construct_toc_for_rlc_aram_save(struct pp_hwmgr *hwmgr)
395 {
396 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
397
398 smu8_smu->toc_entry_aram = smu8_smu->toc_entry_used_count;
399 smu8_smu_populate_single_scratch_task(hwmgr,
400 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
401 TASK_TYPE_UCODE_SAVE, true);
402
403 return 0;
404 }
405
smu8_smu_initialize_toc_empty_job_list(struct pp_hwmgr * hwmgr)406 static int smu8_smu_initialize_toc_empty_job_list(struct pp_hwmgr *hwmgr)
407 {
408 int i;
409 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
410 struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
411
412 for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
413 toc->JobList[i] = (uint8_t)IGNORE_JOB;
414
415 return 0;
416 }
417
smu8_smu_construct_toc_for_vddgfx_enter(struct pp_hwmgr * hwmgr)418 static int smu8_smu_construct_toc_for_vddgfx_enter(struct pp_hwmgr *hwmgr)
419 {
420 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
421 struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
422
423 toc->JobList[JOB_GFX_SAVE] = (uint8_t)smu8_smu->toc_entry_used_count;
424 smu8_smu_populate_single_scratch_task(hwmgr,
425 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
426 TASK_TYPE_UCODE_SAVE, false);
427
428 smu8_smu_populate_single_scratch_task(hwmgr,
429 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
430 TASK_TYPE_UCODE_SAVE, true);
431
432 return 0;
433 }
434
435
smu8_smu_construct_toc_for_vddgfx_exit(struct pp_hwmgr * hwmgr)436 static int smu8_smu_construct_toc_for_vddgfx_exit(struct pp_hwmgr *hwmgr)
437 {
438 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
439 struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
440
441 toc->JobList[JOB_GFX_RESTORE] = (uint8_t)smu8_smu->toc_entry_used_count;
442
443 smu8_smu_populate_single_ucode_load_task(hwmgr,
444 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
445 smu8_smu_populate_single_ucode_load_task(hwmgr,
446 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
447 smu8_smu_populate_single_ucode_load_task(hwmgr,
448 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
449 smu8_smu_populate_single_ucode_load_task(hwmgr,
450 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
451
452 if (hwmgr->chip_id == CHIP_STONEY)
453 smu8_smu_populate_single_ucode_load_task(hwmgr,
454 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
455 else
456 smu8_smu_populate_single_ucode_load_task(hwmgr,
457 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
458
459 smu8_smu_populate_single_ucode_load_task(hwmgr,
460 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
461
462 /* populate scratch */
463 smu8_smu_populate_single_scratch_task(hwmgr,
464 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
465 TASK_TYPE_UCODE_LOAD, false);
466
467 smu8_smu_populate_single_scratch_task(hwmgr,
468 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
469 TASK_TYPE_UCODE_LOAD, false);
470
471 smu8_smu_populate_single_scratch_task(hwmgr,
472 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
473 TASK_TYPE_UCODE_LOAD, true);
474
475 return 0;
476 }
477
smu8_smu_construct_toc_for_power_profiling(struct pp_hwmgr * hwmgr)478 static int smu8_smu_construct_toc_for_power_profiling(struct pp_hwmgr *hwmgr)
479 {
480 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
481
482 smu8_smu->toc_entry_power_profiling_index = smu8_smu->toc_entry_used_count;
483
484 smu8_smu_populate_single_scratch_task(hwmgr,
485 SMU8_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
486 TASK_TYPE_INITIALIZE, true);
487 return 0;
488 }
489
smu8_smu_construct_toc_for_bootup(struct pp_hwmgr * hwmgr)490 static int smu8_smu_construct_toc_for_bootup(struct pp_hwmgr *hwmgr)
491 {
492 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
493
494 smu8_smu->toc_entry_initialize_index = smu8_smu->toc_entry_used_count;
495
496 smu8_smu_populate_single_ucode_load_task(hwmgr,
497 SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
498 if (hwmgr->chip_id != CHIP_STONEY)
499 smu8_smu_populate_single_ucode_load_task(hwmgr,
500 SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
501 smu8_smu_populate_single_ucode_load_task(hwmgr,
502 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
503 smu8_smu_populate_single_ucode_load_task(hwmgr,
504 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
505 smu8_smu_populate_single_ucode_load_task(hwmgr,
506 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
507 smu8_smu_populate_single_ucode_load_task(hwmgr,
508 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
509 if (hwmgr->chip_id != CHIP_STONEY)
510 smu8_smu_populate_single_ucode_load_task(hwmgr,
511 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
512 smu8_smu_populate_single_ucode_load_task(hwmgr,
513 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
514
515 return 0;
516 }
517
smu8_smu_construct_toc_for_clock_table(struct pp_hwmgr * hwmgr)518 static int smu8_smu_construct_toc_for_clock_table(struct pp_hwmgr *hwmgr)
519 {
520 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
521
522 smu8_smu->toc_entry_clock_table = smu8_smu->toc_entry_used_count;
523
524 smu8_smu_populate_single_scratch_task(hwmgr,
525 SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
526 TASK_TYPE_INITIALIZE, true);
527
528 return 0;
529 }
530
smu8_smu_construct_toc(struct pp_hwmgr * hwmgr)531 static int smu8_smu_construct_toc(struct pp_hwmgr *hwmgr)
532 {
533 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
534
535 smu8_smu->toc_entry_used_count = 0;
536 smu8_smu_initialize_toc_empty_job_list(hwmgr);
537 smu8_smu_construct_toc_for_rlc_aram_save(hwmgr);
538 smu8_smu_construct_toc_for_vddgfx_enter(hwmgr);
539 smu8_smu_construct_toc_for_vddgfx_exit(hwmgr);
540 smu8_smu_construct_toc_for_power_profiling(hwmgr);
541 smu8_smu_construct_toc_for_bootup(hwmgr);
542 smu8_smu_construct_toc_for_clock_table(hwmgr);
543
544 return 0;
545 }
546
smu8_smu_populate_firmware_entries(struct pp_hwmgr * hwmgr)547 static int smu8_smu_populate_firmware_entries(struct pp_hwmgr *hwmgr)
548 {
549 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
550 uint32_t firmware_type;
551 uint32_t i;
552 int ret;
553 enum cgs_ucode_id ucode_id;
554 struct cgs_firmware_info info = {0};
555
556 smu8_smu->driver_buffer_length = 0;
557
558 for (i = 0; i < ARRAY_SIZE(firmware_list); i++) {
559
560 firmware_type = smu8_translate_firmware_enum_to_arg(hwmgr,
561 firmware_list[i]);
562
563 ucode_id = smu8_convert_fw_type_to_cgs(firmware_type);
564
565 ret = cgs_get_firmware_info(hwmgr->device,
566 ucode_id, &info);
567
568 if (ret == 0) {
569 smu8_smu->driver_buffer[i].mc_addr = info.mc_addr;
570
571 smu8_smu->driver_buffer[i].data_size = info.image_size;
572
573 smu8_smu->driver_buffer[i].firmware_ID = firmware_list[i];
574 smu8_smu->driver_buffer_length++;
575 }
576 }
577
578 return 0;
579 }
580
smu8_smu_populate_single_scratch_entry(struct pp_hwmgr * hwmgr,enum smu8_scratch_entry scratch_type,uint32_t ulsize_byte,struct smu8_buffer_entry * entry)581 static int smu8_smu_populate_single_scratch_entry(
582 struct pp_hwmgr *hwmgr,
583 enum smu8_scratch_entry scratch_type,
584 uint32_t ulsize_byte,
585 struct smu8_buffer_entry *entry)
586 {
587 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
588 uint32_t ulsize_aligned = SIZE_ALIGN_32(ulsize_byte);
589
590 entry->data_size = ulsize_byte;
591 entry->kaddr = (char *) smu8_smu->smu_buffer.kaddr +
592 smu8_smu->smu_buffer_used_bytes;
593 entry->mc_addr = smu8_smu->smu_buffer.mc_addr + smu8_smu->smu_buffer_used_bytes;
594 entry->firmware_ID = scratch_type;
595
596 smu8_smu->smu_buffer_used_bytes += ulsize_aligned;
597
598 return 0;
599 }
600
smu8_download_pptable_settings(struct pp_hwmgr * hwmgr,void ** table)601 static int smu8_download_pptable_settings(struct pp_hwmgr *hwmgr, void **table)
602 {
603 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
604 unsigned long i;
605
606 for (i = 0; i < smu8_smu->scratch_buffer_length; i++) {
607 if (smu8_smu->scratch_buffer[i].firmware_ID
608 == SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
609 break;
610 }
611
612 *table = (struct SMU8_Fusion_ClkTable *)smu8_smu->scratch_buffer[i].kaddr;
613
614 smu8_send_msg_to_smc_with_parameter(hwmgr,
615 PPSMC_MSG_SetClkTableAddrHi,
616 upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
617
618 smu8_send_msg_to_smc_with_parameter(hwmgr,
619 PPSMC_MSG_SetClkTableAddrLo,
620 lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
621
622 smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
623 smu8_smu->toc_entry_clock_table);
624
625 smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram);
626
627 return 0;
628 }
629
smu8_upload_pptable_settings(struct pp_hwmgr * hwmgr)630 static int smu8_upload_pptable_settings(struct pp_hwmgr *hwmgr)
631 {
632 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
633 unsigned long i;
634
635 for (i = 0; i < smu8_smu->scratch_buffer_length; i++) {
636 if (smu8_smu->scratch_buffer[i].firmware_ID
637 == SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
638 break;
639 }
640
641 smu8_send_msg_to_smc_with_parameter(hwmgr,
642 PPSMC_MSG_SetClkTableAddrHi,
643 upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
644
645 smu8_send_msg_to_smc_with_parameter(hwmgr,
646 PPSMC_MSG_SetClkTableAddrLo,
647 lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
648
649 smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
650 smu8_smu->toc_entry_clock_table);
651
652 smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu);
653
654 return 0;
655 }
656
smu8_request_smu_load_fw(struct pp_hwmgr * hwmgr)657 static int smu8_request_smu_load_fw(struct pp_hwmgr *hwmgr)
658 {
659 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
660 uint32_t smc_address;
661
662 if (!hwmgr->reload_fw) {
663 pr_info("skip reloading...\n");
664 return 0;
665 }
666
667 smu8_smu_populate_firmware_entries(hwmgr);
668
669 smu8_smu_construct_toc(hwmgr);
670
671 smc_address = SMU8_FIRMWARE_HEADER_LOCATION +
672 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
673
674 smu8_write_smc_sram_dword(hwmgr, smc_address, 0, smc_address+4);
675
676 smu8_send_msg_to_smc_with_parameter(hwmgr,
677 PPSMC_MSG_DriverDramAddrHi,
678 upper_32_bits(smu8_smu->toc_buffer.mc_addr));
679
680 smu8_send_msg_to_smc_with_parameter(hwmgr,
681 PPSMC_MSG_DriverDramAddrLo,
682 lower_32_bits(smu8_smu->toc_buffer.mc_addr));
683
684 smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs);
685
686 smu8_send_msg_to_smc_with_parameter(hwmgr,
687 PPSMC_MSG_ExecuteJob,
688 smu8_smu->toc_entry_aram);
689 smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
690 smu8_smu->toc_entry_power_profiling_index);
691
692 return smu8_send_msg_to_smc_with_parameter(hwmgr,
693 PPSMC_MSG_ExecuteJob,
694 smu8_smu->toc_entry_initialize_index);
695 }
696
smu8_start_smu(struct pp_hwmgr * hwmgr)697 static int smu8_start_smu(struct pp_hwmgr *hwmgr)
698 {
699 int ret = 0;
700 uint32_t fw_to_check = 0;
701 struct amdgpu_device *adev = hwmgr->adev;
702
703 uint32_t index = SMN_MP1_SRAM_START_ADDR +
704 SMU8_FIRMWARE_HEADER_LOCATION +
705 offsetof(struct SMU8_Firmware_Header, Version);
706
707
708 if (hwmgr == NULL || hwmgr->device == NULL)
709 return -EINVAL;
710
711 cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
712 hwmgr->smu_version = cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA);
713 adev->pm.fw_version = hwmgr->smu_version >> 8;
714
715 fw_to_check = UCODE_ID_RLC_G_MASK |
716 UCODE_ID_SDMA0_MASK |
717 UCODE_ID_SDMA1_MASK |
718 UCODE_ID_CP_CE_MASK |
719 UCODE_ID_CP_ME_MASK |
720 UCODE_ID_CP_PFP_MASK |
721 UCODE_ID_CP_MEC_JT1_MASK |
722 UCODE_ID_CP_MEC_JT2_MASK;
723
724 if (hwmgr->chip_id == CHIP_STONEY)
725 fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
726
727 ret = smu8_request_smu_load_fw(hwmgr);
728 if (ret)
729 pr_err("SMU firmware load failed\n");
730
731 smu8_check_fw_load_finish(hwmgr, fw_to_check);
732
733 ret = smu8_load_mec_firmware(hwmgr);
734 if (ret)
735 pr_err("Mec Firmware load failed\n");
736
737 return ret;
738 }
739
smu8_smu_init(struct pp_hwmgr * hwmgr)740 static int smu8_smu_init(struct pp_hwmgr *hwmgr)
741 {
742 int ret = 0;
743 struct smu8_smumgr *smu8_smu;
744
745 smu8_smu = kzalloc(sizeof(struct smu8_smumgr), GFP_KERNEL);
746 if (smu8_smu == NULL)
747 return -ENOMEM;
748
749 hwmgr->smu_backend = smu8_smu;
750
751 smu8_smu->toc_buffer.data_size = 4096;
752 smu8_smu->smu_buffer.data_size =
753 ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) +
754 ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) +
755 ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) +
756 ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
757 ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
758
759 ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
760 smu8_smu->toc_buffer.data_size,
761 PAGE_SIZE,
762 AMDGPU_GEM_DOMAIN_VRAM,
763 &smu8_smu->toc_buffer.handle,
764 (u64 *)&smu8_smu->toc_buffer.mc_addr,
765 &smu8_smu->toc_buffer.kaddr);
766 if (ret)
767 goto err2;
768
769 ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
770 smu8_smu->smu_buffer.data_size,
771 PAGE_SIZE,
772 AMDGPU_GEM_DOMAIN_VRAM,
773 &smu8_smu->smu_buffer.handle,
774 (u64 *)&smu8_smu->smu_buffer.mc_addr,
775 &smu8_smu->smu_buffer.kaddr);
776 if (ret)
777 goto err1;
778
779 if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
780 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
781 UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
782 &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
783 pr_err("Error when Populate Firmware Entry.\n");
784 goto err0;
785 }
786
787 if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
788 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
789 UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
790 &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
791 pr_err("Error when Populate Firmware Entry.\n");
792 goto err0;
793 }
794 if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
795 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
796 UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
797 &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
798 pr_err("Error when Populate Firmware Entry.\n");
799 goto err0;
800 }
801
802 if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
803 SMU8_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
804 sizeof(struct SMU8_MultimediaPowerLogData),
805 &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
806 pr_err("Error when Populate Firmware Entry.\n");
807 goto err0;
808 }
809
810 if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
811 SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
812 sizeof(struct SMU8_Fusion_ClkTable),
813 &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
814 pr_err("Error when Populate Firmware Entry.\n");
815 goto err0;
816 }
817
818 return 0;
819
820 err0:
821 amdgpu_bo_free_kernel(&smu8_smu->smu_buffer.handle,
822 (u64 *)&smu8_smu->smu_buffer.mc_addr,
823 &smu8_smu->smu_buffer.kaddr);
824 err1:
825 amdgpu_bo_free_kernel(&smu8_smu->toc_buffer.handle,
826 (u64 *)&smu8_smu->toc_buffer.mc_addr,
827 &smu8_smu->toc_buffer.kaddr);
828 err2:
829 kfree(smu8_smu);
830 return -EINVAL;
831 }
832
smu8_smu_fini(struct pp_hwmgr * hwmgr)833 static int smu8_smu_fini(struct pp_hwmgr *hwmgr)
834 {
835 struct smu8_smumgr *smu8_smu;
836
837 if (hwmgr == NULL || hwmgr->device == NULL)
838 return -EINVAL;
839
840 smu8_smu = hwmgr->smu_backend;
841 if (smu8_smu) {
842 amdgpu_bo_free_kernel(&smu8_smu->toc_buffer.handle,
843 (u64 *)&smu8_smu->toc_buffer.mc_addr,
844 &smu8_smu->toc_buffer.kaddr);
845 amdgpu_bo_free_kernel(&smu8_smu->smu_buffer.handle,
846 (u64 *)&smu8_smu->smu_buffer.mc_addr,
847 &smu8_smu->smu_buffer.kaddr);
848 kfree(smu8_smu);
849 }
850
851 return 0;
852 }
853
smu8_dpm_check_smu_features(struct pp_hwmgr * hwmgr,unsigned long check_feature)854 static bool smu8_dpm_check_smu_features(struct pp_hwmgr *hwmgr,
855 unsigned long check_feature)
856 {
857 int result;
858 unsigned long features;
859
860 result = smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetFeatureStatus, 0);
861 if (result == 0) {
862 features = smum_get_argument(hwmgr);
863 if (features & check_feature)
864 return true;
865 }
866
867 return false;
868 }
869
smu8_is_dpm_running(struct pp_hwmgr * hwmgr)870 static bool smu8_is_dpm_running(struct pp_hwmgr *hwmgr)
871 {
872 if (smu8_dpm_check_smu_features(hwmgr, SMU_EnabledFeatureScoreboard_SclkDpmOn))
873 return true;
874 return false;
875 }
876
877 const struct pp_smumgr_func smu8_smu_funcs = {
878 .smu_init = smu8_smu_init,
879 .smu_fini = smu8_smu_fini,
880 .start_smu = smu8_start_smu,
881 .check_fw_load_finish = smu8_check_fw_load_finish,
882 .request_smu_load_fw = NULL,
883 .request_smu_load_specific_fw = NULL,
884 .get_argument = smu8_get_argument,
885 .send_msg_to_smc = smu8_send_msg_to_smc,
886 .send_msg_to_smc_with_parameter = smu8_send_msg_to_smc_with_parameter,
887 .download_pptable_settings = smu8_download_pptable_settings,
888 .upload_pptable_settings = smu8_upload_pptable_settings,
889 .is_dpm_running = smu8_is_dpm_running,
890 };
891
892