1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Huang Rui
23 *
24 */
25
26 #include <linux/firmware.h>
27 #include <drm/drm_drv.h>
28
29 #include "amdgpu.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_ucode.h"
32 #include "amdgpu_xgmi.h"
33 #include "soc15_common.h"
34 #include "psp_v3_1.h"
35 #include "psp_v10_0.h"
36 #include "psp_v11_0.h"
37 #include "psp_v11_0_8.h"
38 #include "psp_v12_0.h"
39 #include "psp_v13_0.h"
40 #include "psp_v13_0_4.h"
41
42 #include "amdgpu_ras.h"
43 #include "amdgpu_securedisplay.h"
44 #include "amdgpu_atomfirmware.h"
45
46 #define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*3)
47
48 static int psp_load_smu_fw(struct psp_context *psp);
49 static int psp_rap_terminate(struct psp_context *psp);
50 static int psp_securedisplay_terminate(struct psp_context *psp);
51
psp_ring_init(struct psp_context * psp,enum psp_ring_type ring_type)52 static int psp_ring_init(struct psp_context *psp,
53 enum psp_ring_type ring_type)
54 {
55 int ret = 0;
56 struct psp_ring *ring;
57 struct amdgpu_device *adev = psp->adev;
58
59 ring = &psp->km_ring;
60
61 ring->ring_type = ring_type;
62
63 /* allocate 4k Page of Local Frame Buffer memory for ring */
64 ring->ring_size = 0x1000;
65 ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
66 AMDGPU_GEM_DOMAIN_VRAM |
67 AMDGPU_GEM_DOMAIN_GTT,
68 &adev->firmware.rbuf,
69 &ring->ring_mem_mc_addr,
70 (void **)&ring->ring_mem);
71 if (ret) {
72 ring->ring_size = 0;
73 return ret;
74 }
75
76 return 0;
77 }
78
79 /*
80 * Due to DF Cstate management centralized to PMFW, the firmware
81 * loading sequence will be updated as below:
82 * - Load KDB
83 * - Load SYS_DRV
84 * - Load tOS
85 * - Load PMFW
86 * - Setup TMR
87 * - Load other non-psp fw
88 * - Load ASD
89 * - Load XGMI/RAS/HDCP/DTM TA if any
90 *
91 * This new sequence is required for
92 * - Arcturus and onwards
93 */
psp_check_pmfw_centralized_cstate_management(struct psp_context * psp)94 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
95 {
96 struct amdgpu_device *adev = psp->adev;
97
98 if (amdgpu_sriov_vf(adev)) {
99 psp->pmfw_centralized_cstate_management = false;
100 return;
101 }
102
103 switch (adev->ip_versions[MP0_HWIP][0]) {
104 case IP_VERSION(11, 0, 0):
105 case IP_VERSION(11, 0, 4):
106 case IP_VERSION(11, 0, 5):
107 case IP_VERSION(11, 0, 7):
108 case IP_VERSION(11, 0, 9):
109 case IP_VERSION(11, 0, 11):
110 case IP_VERSION(11, 0, 12):
111 case IP_VERSION(11, 0, 13):
112 case IP_VERSION(13, 0, 0):
113 case IP_VERSION(13, 0, 2):
114 case IP_VERSION(13, 0, 7):
115 psp->pmfw_centralized_cstate_management = true;
116 break;
117 default:
118 psp->pmfw_centralized_cstate_management = false;
119 break;
120 }
121 }
122
psp_init_sriov_microcode(struct psp_context * psp)123 static int psp_init_sriov_microcode(struct psp_context *psp)
124 {
125 struct amdgpu_device *adev = psp->adev;
126 char ucode_prefix[30];
127 int ret = 0;
128
129 amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
130
131 switch (adev->ip_versions[MP0_HWIP][0]) {
132 case IP_VERSION(9, 0, 0):
133 case IP_VERSION(11, 0, 7):
134 case IP_VERSION(11, 0, 9):
135 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
136 ret = psp_init_cap_microcode(psp, ucode_prefix);
137 break;
138 case IP_VERSION(13, 0, 2):
139 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
140 ret = psp_init_cap_microcode(psp, ucode_prefix);
141 ret &= psp_init_ta_microcode(psp, ucode_prefix);
142 break;
143 case IP_VERSION(13, 0, 0):
144 adev->virt.autoload_ucode_id = 0;
145 break;
146 case IP_VERSION(13, 0, 6):
147 ret = psp_init_cap_microcode(psp, ucode_prefix);
148 ret &= psp_init_ta_microcode(psp, ucode_prefix);
149 break;
150 case IP_VERSION(13, 0, 10):
151 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
152 ret = psp_init_cap_microcode(psp, ucode_prefix);
153 break;
154 default:
155 return -EINVAL;
156 }
157 return ret;
158 }
159
psp_early_init(void * handle)160 static int psp_early_init(void *handle)
161 {
162 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
163 struct psp_context *psp = &adev->psp;
164
165 switch (adev->ip_versions[MP0_HWIP][0]) {
166 case IP_VERSION(9, 0, 0):
167 psp_v3_1_set_psp_funcs(psp);
168 psp->autoload_supported = false;
169 break;
170 case IP_VERSION(10, 0, 0):
171 case IP_VERSION(10, 0, 1):
172 psp_v10_0_set_psp_funcs(psp);
173 psp->autoload_supported = false;
174 break;
175 case IP_VERSION(11, 0, 2):
176 case IP_VERSION(11, 0, 4):
177 psp_v11_0_set_psp_funcs(psp);
178 psp->autoload_supported = false;
179 break;
180 case IP_VERSION(11, 0, 0):
181 case IP_VERSION(11, 0, 7):
182 adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev);
183 fallthrough;
184 case IP_VERSION(11, 0, 5):
185 case IP_VERSION(11, 0, 9):
186 case IP_VERSION(11, 0, 11):
187 case IP_VERSION(11, 5, 0):
188 case IP_VERSION(11, 0, 12):
189 case IP_VERSION(11, 0, 13):
190 psp_v11_0_set_psp_funcs(psp);
191 psp->autoload_supported = true;
192 break;
193 case IP_VERSION(11, 0, 3):
194 case IP_VERSION(12, 0, 1):
195 psp_v12_0_set_psp_funcs(psp);
196 break;
197 case IP_VERSION(13, 0, 2):
198 case IP_VERSION(13, 0, 6):
199 psp_v13_0_set_psp_funcs(psp);
200 break;
201 case IP_VERSION(13, 0, 1):
202 case IP_VERSION(13, 0, 3):
203 case IP_VERSION(13, 0, 5):
204 case IP_VERSION(13, 0, 8):
205 case IP_VERSION(13, 0, 11):
206 case IP_VERSION(14, 0, 0):
207 psp_v13_0_set_psp_funcs(psp);
208 psp->autoload_supported = true;
209 break;
210 case IP_VERSION(11, 0, 8):
211 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
212 psp_v11_0_8_set_psp_funcs(psp);
213 psp->autoload_supported = false;
214 }
215 break;
216 case IP_VERSION(13, 0, 0):
217 case IP_VERSION(13, 0, 7):
218 case IP_VERSION(13, 0, 10):
219 psp_v13_0_set_psp_funcs(psp);
220 psp->autoload_supported = true;
221 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
222 break;
223 case IP_VERSION(13, 0, 4):
224 psp_v13_0_4_set_psp_funcs(psp);
225 psp->autoload_supported = true;
226 break;
227 default:
228 return -EINVAL;
229 }
230
231 psp->adev = adev;
232
233 psp_check_pmfw_centralized_cstate_management(psp);
234
235 if (amdgpu_sriov_vf(adev))
236 return psp_init_sriov_microcode(psp);
237 else
238 return psp_init_microcode(psp);
239 }
240
psp_ta_free_shared_buf(struct ta_mem_context * mem_ctx)241 void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
242 {
243 amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr,
244 &mem_ctx->shared_buf);
245 mem_ctx->shared_bo = NULL;
246 }
247
psp_free_shared_bufs(struct psp_context * psp)248 static void psp_free_shared_bufs(struct psp_context *psp)
249 {
250 void *tmr_buf;
251 void **pptr;
252
253 /* free TMR memory buffer */
254 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
255 amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
256 psp->tmr_bo = NULL;
257
258 /* free xgmi shared memory */
259 psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context);
260
261 /* free ras shared memory */
262 psp_ta_free_shared_buf(&psp->ras_context.context.mem_context);
263
264 /* free hdcp shared memory */
265 psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context);
266
267 /* free dtm shared memory */
268 psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context);
269
270 /* free rap shared memory */
271 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
272
273 /* free securedisplay shared memory */
274 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
275
276
277 }
278
psp_memory_training_fini(struct psp_context * psp)279 static void psp_memory_training_fini(struct psp_context *psp)
280 {
281 struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
282
283 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
284 kfree(ctx->sys_cache);
285 ctx->sys_cache = NULL;
286 }
287
psp_memory_training_init(struct psp_context * psp)288 static int psp_memory_training_init(struct psp_context *psp)
289 {
290 int ret;
291 struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
292
293 if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
294 DRM_DEBUG("memory training is not supported!\n");
295 return 0;
296 }
297
298 ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
299 if (ctx->sys_cache == NULL) {
300 DRM_ERROR("alloc mem_train_ctx.sys_cache failed!\n");
301 ret = -ENOMEM;
302 goto Err_out;
303 }
304
305 DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
306 ctx->train_data_size,
307 ctx->p2c_train_data_offset,
308 ctx->c2p_train_data_offset);
309 ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
310 return 0;
311
312 Err_out:
313 psp_memory_training_fini(psp);
314 return ret;
315 }
316
317 /*
318 * Helper funciton to query psp runtime database entry
319 *
320 * @adev: amdgpu_device pointer
321 * @entry_type: the type of psp runtime database entry
322 * @db_entry: runtime database entry pointer
323 *
324 * Return false if runtime database doesn't exit or entry is invalid
325 * or true if the specific database entry is found, and copy to @db_entry
326 */
psp_get_runtime_db_entry(struct amdgpu_device * adev,enum psp_runtime_entry_type entry_type,void * db_entry)327 static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
328 enum psp_runtime_entry_type entry_type,
329 void *db_entry)
330 {
331 uint64_t db_header_pos, db_dir_pos;
332 struct psp_runtime_data_header db_header = {0};
333 struct psp_runtime_data_directory db_dir = {0};
334 bool ret = false;
335 int i;
336
337 if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 6))
338 return false;
339
340 db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET;
341 db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header);
342
343 /* read runtime db header from vram */
344 amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header,
345 sizeof(struct psp_runtime_data_header), false);
346
347 if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) {
348 /* runtime db doesn't exist, exit */
349 dev_dbg(adev->dev, "PSP runtime database doesn't exist\n");
350 return false;
351 }
352
353 /* read runtime database entry from vram */
354 amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir,
355 sizeof(struct psp_runtime_data_directory), false);
356
357 if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) {
358 /* invalid db entry count, exit */
359 dev_warn(adev->dev, "Invalid PSP runtime database entry count\n");
360 return false;
361 }
362
363 /* look up for requested entry type */
364 for (i = 0; i < db_dir.entry_count && !ret; i++) {
365 if (db_dir.entry_list[i].entry_type == entry_type) {
366 switch (entry_type) {
367 case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG:
368 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) {
369 /* invalid db entry size */
370 dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n");
371 return false;
372 }
373 /* read runtime database entry */
374 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
375 (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false);
376 ret = true;
377 break;
378 case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS:
379 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) {
380 /* invalid db entry size */
381 dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n");
382 return false;
383 }
384 /* read runtime database entry */
385 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
386 (uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false);
387 ret = true;
388 break;
389 default:
390 ret = false;
391 break;
392 }
393 }
394 }
395
396 return ret;
397 }
398
psp_sw_init(void * handle)399 static int psp_sw_init(void *handle)
400 {
401 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
402 struct psp_context *psp = &adev->psp;
403 int ret;
404 struct psp_runtime_boot_cfg_entry boot_cfg_entry;
405 struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx;
406 struct psp_runtime_scpm_entry scpm_entry;
407
408 psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
409 if (!psp->cmd) {
410 DRM_ERROR("Failed to allocate memory to command buffer!\n");
411 ret = -ENOMEM;
412 }
413
414 adev->psp.xgmi_context.supports_extended_data =
415 !adev->gmc.xgmi.connected_to_cpu &&
416 adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2);
417
418 memset(&scpm_entry, 0, sizeof(scpm_entry));
419 if ((psp_get_runtime_db_entry(adev,
420 PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS,
421 &scpm_entry)) &&
422 (scpm_entry.scpm_status != SCPM_DISABLE)) {
423 adev->scpm_enabled = true;
424 adev->scpm_status = scpm_entry.scpm_status;
425 } else {
426 adev->scpm_enabled = false;
427 adev->scpm_status = SCPM_DISABLE;
428 }
429
430 /* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */
431
432 memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry));
433 if (psp_get_runtime_db_entry(adev,
434 PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG,
435 &boot_cfg_entry)) {
436 psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask;
437 if ((psp->boot_cfg_bitmask) &
438 BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) {
439 /* If psp runtime database exists, then
440 * only enable two stage memory training
441 * when TWO_STAGE_DRAM_TRAINING bit is set
442 * in runtime database
443 */
444 mem_training_ctx->enable_mem_training = true;
445 }
446
447 } else {
448 /* If psp runtime database doesn't exist or is
449 * invalid, force enable two stage memory training
450 */
451 mem_training_ctx->enable_mem_training = true;
452 }
453
454 if (mem_training_ctx->enable_mem_training) {
455 ret = psp_memory_training_init(psp);
456 if (ret) {
457 DRM_ERROR("Failed to initialize memory training!\n");
458 return ret;
459 }
460
461 ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
462 if (ret) {
463 DRM_ERROR("Failed to process memory training!\n");
464 return ret;
465 }
466 }
467
468 ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
469 amdgpu_sriov_vf(adev) ?
470 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
471 &psp->fw_pri_bo,
472 &psp->fw_pri_mc_addr,
473 &psp->fw_pri_buf);
474 if (ret)
475 return ret;
476
477 ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
478 AMDGPU_GEM_DOMAIN_VRAM |
479 AMDGPU_GEM_DOMAIN_GTT,
480 &psp->fence_buf_bo,
481 &psp->fence_buf_mc_addr,
482 &psp->fence_buf);
483 if (ret)
484 goto failed1;
485
486 ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
487 AMDGPU_GEM_DOMAIN_VRAM |
488 AMDGPU_GEM_DOMAIN_GTT,
489 &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
490 (void **)&psp->cmd_buf_mem);
491 if (ret)
492 goto failed2;
493
494 return 0;
495
496 failed2:
497 amdgpu_bo_free_kernel(&psp->fence_buf_bo,
498 &psp->fence_buf_mc_addr, &psp->fence_buf);
499 failed1:
500 amdgpu_bo_free_kernel(&psp->fw_pri_bo,
501 &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
502 return ret;
503 }
504
psp_sw_fini(void * handle)505 static int psp_sw_fini(void *handle)
506 {
507 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
508 struct psp_context *psp = &adev->psp;
509 struct psp_gfx_cmd_resp *cmd = psp->cmd;
510
511 psp_memory_training_fini(psp);
512
513 amdgpu_ucode_release(&psp->sos_fw);
514 amdgpu_ucode_release(&psp->asd_fw);
515 amdgpu_ucode_release(&psp->ta_fw);
516 amdgpu_ucode_release(&psp->cap_fw);
517 amdgpu_ucode_release(&psp->toc_fw);
518
519 kfree(cmd);
520 cmd = NULL;
521
522 psp_free_shared_bufs(psp);
523
524 if (psp->km_ring.ring_mem)
525 amdgpu_bo_free_kernel(&adev->firmware.rbuf,
526 &psp->km_ring.ring_mem_mc_addr,
527 (void **)&psp->km_ring.ring_mem);
528
529 amdgpu_bo_free_kernel(&psp->fw_pri_bo,
530 &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
531 amdgpu_bo_free_kernel(&psp->fence_buf_bo,
532 &psp->fence_buf_mc_addr, &psp->fence_buf);
533 amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
534 (void **)&psp->cmd_buf_mem);
535
536 return 0;
537 }
538
psp_wait_for(struct psp_context * psp,uint32_t reg_index,uint32_t reg_val,uint32_t mask,bool check_changed)539 int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
540 uint32_t reg_val, uint32_t mask, bool check_changed)
541 {
542 uint32_t val;
543 int i;
544 struct amdgpu_device *adev = psp->adev;
545
546 if (psp->adev->no_hw_access)
547 return 0;
548
549 for (i = 0; i < adev->usec_timeout; i++) {
550 val = RREG32(reg_index);
551 if (check_changed) {
552 if (val != reg_val)
553 return 0;
554 } else {
555 if ((val & mask) == reg_val)
556 return 0;
557 }
558 udelay(1);
559 }
560
561 return -ETIME;
562 }
563
psp_wait_for_spirom_update(struct psp_context * psp,uint32_t reg_index,uint32_t reg_val,uint32_t mask,uint32_t msec_timeout)564 int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
565 uint32_t reg_val, uint32_t mask, uint32_t msec_timeout)
566 {
567 uint32_t val;
568 int i;
569 struct amdgpu_device *adev = psp->adev;
570
571 if (psp->adev->no_hw_access)
572 return 0;
573
574 for (i = 0; i < msec_timeout; i++) {
575 val = RREG32(reg_index);
576 if ((val & mask) == reg_val)
577 return 0;
578 drm_msleep(1);
579 }
580
581 return -ETIME;
582 }
583
psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)584 static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
585 {
586 switch (cmd_id) {
587 case GFX_CMD_ID_LOAD_TA:
588 return "LOAD_TA";
589 case GFX_CMD_ID_UNLOAD_TA:
590 return "UNLOAD_TA";
591 case GFX_CMD_ID_INVOKE_CMD:
592 return "INVOKE_CMD";
593 case GFX_CMD_ID_LOAD_ASD:
594 return "LOAD_ASD";
595 case GFX_CMD_ID_SETUP_TMR:
596 return "SETUP_TMR";
597 case GFX_CMD_ID_LOAD_IP_FW:
598 return "LOAD_IP_FW";
599 case GFX_CMD_ID_DESTROY_TMR:
600 return "DESTROY_TMR";
601 case GFX_CMD_ID_SAVE_RESTORE:
602 return "SAVE_RESTORE_IP_FW";
603 case GFX_CMD_ID_SETUP_VMR:
604 return "SETUP_VMR";
605 case GFX_CMD_ID_DESTROY_VMR:
606 return "DESTROY_VMR";
607 case GFX_CMD_ID_PROG_REG:
608 return "PROG_REG";
609 case GFX_CMD_ID_GET_FW_ATTESTATION:
610 return "GET_FW_ATTESTATION";
611 case GFX_CMD_ID_LOAD_TOC:
612 return "ID_LOAD_TOC";
613 case GFX_CMD_ID_AUTOLOAD_RLC:
614 return "AUTOLOAD_RLC";
615 case GFX_CMD_ID_BOOT_CFG:
616 return "BOOT_CFG";
617 default:
618 return "UNKNOWN CMD";
619 }
620 }
621
622 static int
psp_cmd_submit_buf(struct psp_context * psp,struct amdgpu_firmware_info * ucode,struct psp_gfx_cmd_resp * cmd,uint64_t fence_mc_addr)623 psp_cmd_submit_buf(struct psp_context *psp,
624 struct amdgpu_firmware_info *ucode,
625 struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
626 {
627 int ret;
628 int index;
629 int timeout = 20000;
630 bool ras_intr = false;
631 bool skip_unsupport = false;
632
633 if (psp->adev->no_hw_access)
634 return 0;
635
636 memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
637
638 memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
639
640 index = atomic_inc_return(&psp->fence_value);
641 ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index);
642 if (ret) {
643 atomic_dec(&psp->fence_value);
644 goto exit;
645 }
646
647 amdgpu_device_invalidate_hdp(psp->adev, NULL);
648 while (*((unsigned int *)psp->fence_buf) != index) {
649 if (--timeout == 0)
650 break;
651 /*
652 * Shouldn't wait for timeout when err_event_athub occurs,
653 * because gpu reset thread triggered and lock resource should
654 * be released for psp resume sequence.
655 */
656 ras_intr = amdgpu_ras_intr_triggered();
657 if (ras_intr)
658 break;
659 usleep_range(10, 100);
660 amdgpu_device_invalidate_hdp(psp->adev, NULL);
661 }
662
663 /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */
664 skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED ||
665 psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev);
666
667 memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp));
668
669 /* In some cases, psp response status is not 0 even there is no
670 * problem while the command is submitted. Some version of PSP FW
671 * doesn't write 0 to that field.
672 * So here we would like to only print a warning instead of an error
673 * during psp initialization to avoid breaking hw_init and it doesn't
674 * return -EINVAL.
675 */
676 if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
677 if (ucode)
678 DRM_WARN("failed to load ucode %s(0x%X) ",
679 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
680 DRM_WARN("psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
681 psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), psp->cmd_buf_mem->cmd_id,
682 psp->cmd_buf_mem->resp.status);
683 /* If any firmware (including CAP) load fails under SRIOV, it should
684 * return failure to stop the VF from initializing.
685 * Also return failure in case of timeout
686 */
687 if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) {
688 ret = -EINVAL;
689 goto exit;
690 }
691 }
692
693 if (ucode) {
694 ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
695 ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
696 }
697
698 exit:
699 return ret;
700 }
701
acquire_psp_cmd_buf(struct psp_context * psp)702 static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp)
703 {
704 struct psp_gfx_cmd_resp *cmd = psp->cmd;
705
706 mutex_lock(&psp->mutex);
707
708 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
709
710 return cmd;
711 }
712
release_psp_cmd_buf(struct psp_context * psp)713 static void release_psp_cmd_buf(struct psp_context *psp)
714 {
715 mutex_unlock(&psp->mutex);
716 }
717
psp_prep_tmr_cmd_buf(struct psp_context * psp,struct psp_gfx_cmd_resp * cmd,uint64_t tmr_mc,struct amdgpu_bo * tmr_bo)718 static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
719 struct psp_gfx_cmd_resp *cmd,
720 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo)
721 {
722 struct amdgpu_device *adev = psp->adev;
723 uint32_t size = 0;
724 uint64_t tmr_pa = 0;
725
726 if (tmr_bo) {
727 size = amdgpu_bo_size(tmr_bo);
728 tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo);
729 }
730
731 if (amdgpu_sriov_vf(psp->adev))
732 cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
733 else
734 cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
735 cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
736 cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
737 cmd->cmd.cmd_setup_tmr.buf_size = size;
738 cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1;
739 cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa);
740 cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa);
741 }
742
psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp * cmd,uint64_t pri_buf_mc,uint32_t size)743 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd,
744 uint64_t pri_buf_mc, uint32_t size)
745 {
746 cmd->cmd_id = GFX_CMD_ID_LOAD_TOC;
747 cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc);
748 cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc);
749 cmd->cmd.cmd_load_toc.toc_size = size;
750 }
751
752 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */
psp_load_toc(struct psp_context * psp,uint32_t * tmr_size)753 static int psp_load_toc(struct psp_context *psp,
754 uint32_t *tmr_size)
755 {
756 int ret;
757 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
758
759 /* Copy toc to psp firmware private buffer */
760 psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes);
761
762 psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes);
763
764 ret = psp_cmd_submit_buf(psp, NULL, cmd,
765 psp->fence_buf_mc_addr);
766 if (!ret)
767 *tmr_size = psp->cmd_buf_mem->resp.tmr_size;
768
769 release_psp_cmd_buf(psp);
770
771 return ret;
772 }
773
psp_boottime_tmr(struct psp_context * psp)774 static bool psp_boottime_tmr(struct psp_context *psp)
775 {
776 switch (psp->adev->ip_versions[MP0_HWIP][0]) {
777 case IP_VERSION(13, 0, 6):
778 return true;
779 default:
780 return false;
781 }
782 }
783
784 /* Set up Trusted Memory Region */
psp_tmr_init(struct psp_context * psp)785 static int psp_tmr_init(struct psp_context *psp)
786 {
787 int ret = 0;
788 int tmr_size;
789 void *tmr_buf;
790 void **pptr;
791
792 /*
793 * According to HW engineer, they prefer the TMR address be "naturally
794 * aligned" , e.g. the start address be an integer divide of TMR size.
795 *
796 * Note: this memory need be reserved till the driver
797 * uninitializes.
798 */
799 tmr_size = PSP_TMR_SIZE(psp->adev);
800
801 /* For ASICs support RLC autoload, psp will parse the toc
802 * and calculate the total size of TMR needed
803 */
804 if (!amdgpu_sriov_vf(psp->adev) &&
805 psp->toc.start_addr &&
806 psp->toc.size_bytes &&
807 psp->fw_pri_buf) {
808 ret = psp_load_toc(psp, &tmr_size);
809 if (ret) {
810 DRM_ERROR("Failed to load toc\n");
811 return ret;
812 }
813 }
814
815 if (!psp->tmr_bo) {
816 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
817 ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
818 PSP_TMR_ALIGNMENT,
819 AMDGPU_HAS_VRAM(psp->adev) ?
820 AMDGPU_GEM_DOMAIN_VRAM :
821 AMDGPU_GEM_DOMAIN_GTT,
822 &psp->tmr_bo, &psp->tmr_mc_addr,
823 pptr);
824 }
825
826 return ret;
827 }
828
psp_skip_tmr(struct psp_context * psp)829 static bool psp_skip_tmr(struct psp_context *psp)
830 {
831 switch (psp->adev->ip_versions[MP0_HWIP][0]) {
832 case IP_VERSION(11, 0, 9):
833 case IP_VERSION(11, 0, 7):
834 case IP_VERSION(13, 0, 2):
835 case IP_VERSION(13, 0, 6):
836 case IP_VERSION(13, 0, 10):
837 return true;
838 default:
839 return false;
840 }
841 }
842
psp_tmr_load(struct psp_context * psp)843 static int psp_tmr_load(struct psp_context *psp)
844 {
845 int ret;
846 struct psp_gfx_cmd_resp *cmd;
847
848 /* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR.
849 * Already set up by host driver.
850 */
851 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
852 return 0;
853
854 cmd = acquire_psp_cmd_buf(psp);
855
856 psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo);
857 if (psp->tmr_bo)
858 DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n",
859 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
860
861 ret = psp_cmd_submit_buf(psp, NULL, cmd,
862 psp->fence_buf_mc_addr);
863
864 release_psp_cmd_buf(psp);
865
866 return ret;
867 }
868
psp_prep_tmr_unload_cmd_buf(struct psp_context * psp,struct psp_gfx_cmd_resp * cmd)869 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
870 struct psp_gfx_cmd_resp *cmd)
871 {
872 if (amdgpu_sriov_vf(psp->adev))
873 cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR;
874 else
875 cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR;
876 }
877
psp_tmr_unload(struct psp_context * psp)878 static int psp_tmr_unload(struct psp_context *psp)
879 {
880 int ret;
881 struct psp_gfx_cmd_resp *cmd;
882
883 /* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV,
884 * as TMR is not loaded at all
885 */
886 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
887 return 0;
888
889 cmd = acquire_psp_cmd_buf(psp);
890
891 psp_prep_tmr_unload_cmd_buf(psp, cmd);
892 dev_dbg(psp->adev->dev, "free PSP TMR buffer\n");
893
894 ret = psp_cmd_submit_buf(psp, NULL, cmd,
895 psp->fence_buf_mc_addr);
896
897 release_psp_cmd_buf(psp);
898
899 return ret;
900 }
901
psp_tmr_terminate(struct psp_context * psp)902 static int psp_tmr_terminate(struct psp_context *psp)
903 {
904 return psp_tmr_unload(psp);
905 }
906
psp_get_fw_attestation_records_addr(struct psp_context * psp,uint64_t * output_ptr)907 int psp_get_fw_attestation_records_addr(struct psp_context *psp,
908 uint64_t *output_ptr)
909 {
910 int ret;
911 struct psp_gfx_cmd_resp *cmd;
912
913 if (!output_ptr)
914 return -EINVAL;
915
916 if (amdgpu_sriov_vf(psp->adev))
917 return 0;
918
919 cmd = acquire_psp_cmd_buf(psp);
920
921 cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION;
922
923 ret = psp_cmd_submit_buf(psp, NULL, cmd,
924 psp->fence_buf_mc_addr);
925
926 if (!ret) {
927 *output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) +
928 ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32);
929 }
930
931 release_psp_cmd_buf(psp);
932
933 return ret;
934 }
935
psp_boot_config_get(struct amdgpu_device * adev,uint32_t * boot_cfg)936 static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg)
937 {
938 struct psp_context *psp = &adev->psp;
939 struct psp_gfx_cmd_resp *cmd;
940 int ret;
941
942 if (amdgpu_sriov_vf(adev))
943 return 0;
944
945 cmd = acquire_psp_cmd_buf(psp);
946
947 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
948 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET;
949
950 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
951 if (!ret) {
952 *boot_cfg =
953 (cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0;
954 }
955
956 release_psp_cmd_buf(psp);
957
958 return ret;
959 }
960
psp_boot_config_set(struct amdgpu_device * adev,uint32_t boot_cfg)961 static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg)
962 {
963 int ret;
964 struct psp_context *psp = &adev->psp;
965 struct psp_gfx_cmd_resp *cmd;
966
967 if (amdgpu_sriov_vf(adev))
968 return 0;
969
970 cmd = acquire_psp_cmd_buf(psp);
971
972 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
973 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET;
974 cmd->cmd.boot_cfg.boot_config = boot_cfg;
975 cmd->cmd.boot_cfg.boot_config_valid = boot_cfg;
976
977 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
978
979 release_psp_cmd_buf(psp);
980
981 return ret;
982 }
983
psp_rl_load(struct amdgpu_device * adev)984 static int psp_rl_load(struct amdgpu_device *adev)
985 {
986 int ret;
987 struct psp_context *psp = &adev->psp;
988 struct psp_gfx_cmd_resp *cmd;
989
990 if (!is_psp_fw_valid(psp->rl))
991 return 0;
992
993 cmd = acquire_psp_cmd_buf(psp);
994
995 memset(psp->fw_pri_buf, 0, PSP_1_MEG);
996 memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes);
997
998 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
999 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr);
1000 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr);
1001 cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes;
1002 cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST;
1003
1004 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1005
1006 release_psp_cmd_buf(psp);
1007
1008 return ret;
1009 }
1010
psp_spatial_partition(struct psp_context * psp,int mode)1011 int psp_spatial_partition(struct psp_context *psp, int mode)
1012 {
1013 struct psp_gfx_cmd_resp *cmd;
1014 int ret;
1015
1016 if (amdgpu_sriov_vf(psp->adev))
1017 return 0;
1018
1019 cmd = acquire_psp_cmd_buf(psp);
1020
1021 cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART;
1022 cmd->cmd.cmd_spatial_part.mode = mode;
1023
1024 dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode);
1025 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1026
1027 release_psp_cmd_buf(psp);
1028
1029 return ret;
1030 }
1031
psp_asd_initialize(struct psp_context * psp)1032 static int psp_asd_initialize(struct psp_context *psp)
1033 {
1034 int ret;
1035
1036 /* If PSP version doesn't match ASD version, asd loading will be failed.
1037 * add workaround to bypass it for sriov now.
1038 * TODO: add version check to make it common
1039 */
1040 if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes)
1041 return 0;
1042
1043 psp->asd_context.mem_context.shared_mc_addr = 0;
1044 psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE;
1045 psp->asd_context.ta_load_type = GFX_CMD_ID_LOAD_ASD;
1046
1047 ret = psp_ta_load(psp, &psp->asd_context);
1048 if (!ret)
1049 psp->asd_context.initialized = true;
1050
1051 return ret;
1052 }
1053
psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp * cmd,uint32_t session_id)1054 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1055 uint32_t session_id)
1056 {
1057 cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
1058 cmd->cmd.cmd_unload_ta.session_id = session_id;
1059 }
1060
psp_ta_unload(struct psp_context * psp,struct ta_context * context)1061 int psp_ta_unload(struct psp_context *psp, struct ta_context *context)
1062 {
1063 int ret;
1064 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1065
1066 psp_prep_ta_unload_cmd_buf(cmd, context->session_id);
1067
1068 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1069
1070 context->resp_status = cmd->resp.status;
1071
1072 release_psp_cmd_buf(psp);
1073
1074 return ret;
1075 }
1076
psp_asd_terminate(struct psp_context * psp)1077 static int psp_asd_terminate(struct psp_context *psp)
1078 {
1079 int ret;
1080
1081 if (amdgpu_sriov_vf(psp->adev))
1082 return 0;
1083
1084 if (!psp->asd_context.initialized)
1085 return 0;
1086
1087 ret = psp_ta_unload(psp, &psp->asd_context);
1088 if (!ret)
1089 psp->asd_context.initialized = false;
1090
1091 return ret;
1092 }
1093
psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp * cmd,uint32_t id,uint32_t value)1094 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1095 uint32_t id, uint32_t value)
1096 {
1097 cmd->cmd_id = GFX_CMD_ID_PROG_REG;
1098 cmd->cmd.cmd_setup_reg_prog.reg_value = value;
1099 cmd->cmd.cmd_setup_reg_prog.reg_id = id;
1100 }
1101
psp_reg_program(struct psp_context * psp,enum psp_reg_prog_id reg,uint32_t value)1102 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
1103 uint32_t value)
1104 {
1105 struct psp_gfx_cmd_resp *cmd;
1106 int ret = 0;
1107
1108 if (reg >= PSP_REG_LAST)
1109 return -EINVAL;
1110
1111 cmd = acquire_psp_cmd_buf(psp);
1112
1113 psp_prep_reg_prog_cmd_buf(cmd, reg, value);
1114 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1115 if (ret)
1116 DRM_ERROR("PSP failed to program reg id %d", reg);
1117
1118 release_psp_cmd_buf(psp);
1119
1120 return ret;
1121 }
1122
psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp * cmd,uint64_t ta_bin_mc,struct ta_context * context)1123 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1124 uint64_t ta_bin_mc,
1125 struct ta_context *context)
1126 {
1127 cmd->cmd_id = context->ta_load_type;
1128 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc);
1129 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc);
1130 cmd->cmd.cmd_load_ta.app_len = context->bin_desc.size_bytes;
1131
1132 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
1133 lower_32_bits(context->mem_context.shared_mc_addr);
1134 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
1135 upper_32_bits(context->mem_context.shared_mc_addr);
1136 cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size;
1137 }
1138
psp_ta_init_shared_buf(struct psp_context * psp,struct ta_mem_context * mem_ctx)1139 int psp_ta_init_shared_buf(struct psp_context *psp,
1140 struct ta_mem_context *mem_ctx)
1141 {
1142 /*
1143 * Allocate 16k memory aligned to 4k from Frame Buffer (local
1144 * physical) for ta to host memory
1145 */
1146 return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size,
1147 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
1148 AMDGPU_GEM_DOMAIN_GTT,
1149 &mem_ctx->shared_bo,
1150 &mem_ctx->shared_mc_addr,
1151 &mem_ctx->shared_buf);
1152 }
1153
psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp * cmd,uint32_t ta_cmd_id,uint32_t session_id)1154 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1155 uint32_t ta_cmd_id,
1156 uint32_t session_id)
1157 {
1158 cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
1159 cmd->cmd.cmd_invoke_cmd.session_id = session_id;
1160 cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
1161 }
1162
psp_ta_invoke(struct psp_context * psp,uint32_t ta_cmd_id,struct ta_context * context)1163 int psp_ta_invoke(struct psp_context *psp,
1164 uint32_t ta_cmd_id,
1165 struct ta_context *context)
1166 {
1167 int ret;
1168 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1169
1170 psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id);
1171
1172 ret = psp_cmd_submit_buf(psp, NULL, cmd,
1173 psp->fence_buf_mc_addr);
1174
1175 context->resp_status = cmd->resp.status;
1176
1177 release_psp_cmd_buf(psp);
1178
1179 return ret;
1180 }
1181
psp_ta_load(struct psp_context * psp,struct ta_context * context)1182 int psp_ta_load(struct psp_context *psp, struct ta_context *context)
1183 {
1184 int ret;
1185 struct psp_gfx_cmd_resp *cmd;
1186
1187 cmd = acquire_psp_cmd_buf(psp);
1188
1189 psp_copy_fw(psp, context->bin_desc.start_addr,
1190 context->bin_desc.size_bytes);
1191
1192 psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context);
1193
1194 ret = psp_cmd_submit_buf(psp, NULL, cmd,
1195 psp->fence_buf_mc_addr);
1196
1197 context->resp_status = cmd->resp.status;
1198
1199 if (!ret)
1200 context->session_id = cmd->resp.session_id;
1201
1202 release_psp_cmd_buf(psp);
1203
1204 return ret;
1205 }
1206
psp_xgmi_invoke(struct psp_context * psp,uint32_t ta_cmd_id)1207 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1208 {
1209 return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context);
1210 }
1211
psp_xgmi_terminate(struct psp_context * psp)1212 int psp_xgmi_terminate(struct psp_context *psp)
1213 {
1214 int ret;
1215 struct amdgpu_device *adev = psp->adev;
1216
1217 /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */
1218 if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 4) ||
1219 (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2) &&
1220 adev->gmc.xgmi.connected_to_cpu))
1221 return 0;
1222
1223 if (!psp->xgmi_context.context.initialized)
1224 return 0;
1225
1226 ret = psp_ta_unload(psp, &psp->xgmi_context.context);
1227
1228 psp->xgmi_context.context.initialized = false;
1229
1230 return ret;
1231 }
1232
psp_xgmi_initialize(struct psp_context * psp,bool set_extended_data,bool load_ta)1233 int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta)
1234 {
1235 struct ta_xgmi_shared_memory *xgmi_cmd;
1236 int ret;
1237
1238 if (!psp->ta_fw ||
1239 !psp->xgmi_context.context.bin_desc.size_bytes ||
1240 !psp->xgmi_context.context.bin_desc.start_addr)
1241 return -ENOENT;
1242
1243 if (!load_ta)
1244 goto invoke;
1245
1246 psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE;
1247 psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1248
1249 if (!psp->xgmi_context.context.mem_context.shared_buf) {
1250 ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context);
1251 if (ret)
1252 return ret;
1253 }
1254
1255 /* Load XGMI TA */
1256 ret = psp_ta_load(psp, &psp->xgmi_context.context);
1257 if (!ret)
1258 psp->xgmi_context.context.initialized = true;
1259 else
1260 return ret;
1261
1262 invoke:
1263 /* Initialize XGMI session */
1264 xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf);
1265 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1266 xgmi_cmd->flag_extend_link_record = set_extended_data;
1267 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
1268
1269 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1270
1271 return ret;
1272 }
1273
psp_xgmi_get_hive_id(struct psp_context * psp,uint64_t * hive_id)1274 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
1275 {
1276 struct ta_xgmi_shared_memory *xgmi_cmd;
1277 int ret;
1278
1279 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1280 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1281
1282 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
1283
1284 /* Invoke xgmi ta to get hive id */
1285 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1286 if (ret)
1287 return ret;
1288
1289 *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
1290
1291 return 0;
1292 }
1293
psp_xgmi_get_node_id(struct psp_context * psp,uint64_t * node_id)1294 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
1295 {
1296 struct ta_xgmi_shared_memory *xgmi_cmd;
1297 int ret;
1298
1299 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1300 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1301
1302 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
1303
1304 /* Invoke xgmi ta to get the node id */
1305 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1306 if (ret)
1307 return ret;
1308
1309 *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
1310
1311 return 0;
1312 }
1313
psp_xgmi_peer_link_info_supported(struct psp_context * psp)1314 static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp)
1315 {
1316 return (psp->adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2) &&
1317 psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) ||
1318 psp->adev->ip_versions[MP0_HWIP][0] >= IP_VERSION(13, 0, 6);
1319 }
1320
1321 /*
1322 * Chips that support extended topology information require the driver to
1323 * reflect topology information in the opposite direction. This is
1324 * because the TA has already exceeded its link record limit and if the
1325 * TA holds bi-directional information, the driver would have to do
1326 * multiple fetches instead of just two.
1327 */
psp_xgmi_reflect_topology_info(struct psp_context * psp,struct psp_xgmi_node_info node_info)1328 static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
1329 struct psp_xgmi_node_info node_info)
1330 {
1331 struct amdgpu_device *mirror_adev;
1332 struct amdgpu_hive_info *hive;
1333 uint64_t src_node_id = psp->adev->gmc.xgmi.node_id;
1334 uint64_t dst_node_id = node_info.node_id;
1335 uint8_t dst_num_hops = node_info.num_hops;
1336 uint8_t dst_num_links = node_info.num_links;
1337
1338 hive = amdgpu_get_xgmi_hive(psp->adev);
1339 if (WARN_ON(!hive))
1340 return;
1341
1342 list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
1343 struct psp_xgmi_topology_info *mirror_top_info;
1344 int j;
1345
1346 if (mirror_adev->gmc.xgmi.node_id != dst_node_id)
1347 continue;
1348
1349 mirror_top_info = &mirror_adev->psp.xgmi_context.top_info;
1350 for (j = 0; j < mirror_top_info->num_nodes; j++) {
1351 if (mirror_top_info->nodes[j].node_id != src_node_id)
1352 continue;
1353
1354 mirror_top_info->nodes[j].num_hops = dst_num_hops;
1355 /*
1356 * prevent 0 num_links value re-reflection since reflection
1357 * criteria is based on num_hops (direct or indirect).
1358 *
1359 */
1360 if (dst_num_links)
1361 mirror_top_info->nodes[j].num_links = dst_num_links;
1362
1363 break;
1364 }
1365
1366 break;
1367 }
1368
1369 amdgpu_put_xgmi_hive(hive);
1370 }
1371
psp_xgmi_get_topology_info(struct psp_context * psp,int number_devices,struct psp_xgmi_topology_info * topology,bool get_extended_data)1372 int psp_xgmi_get_topology_info(struct psp_context *psp,
1373 int number_devices,
1374 struct psp_xgmi_topology_info *topology,
1375 bool get_extended_data)
1376 {
1377 struct ta_xgmi_shared_memory *xgmi_cmd;
1378 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1379 struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
1380 int i;
1381 int ret;
1382
1383 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1384 return -EINVAL;
1385
1386 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1387 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1388 xgmi_cmd->flag_extend_link_record = get_extended_data;
1389
1390 /* Fill in the shared memory with topology information as input */
1391 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1392 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO;
1393 topology_info_input->num_nodes = number_devices;
1394
1395 for (i = 0; i < topology_info_input->num_nodes; i++) {
1396 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1397 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1398 topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
1399 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1400 }
1401
1402 /* Invoke xgmi ta to get the topology information */
1403 ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO);
1404 if (ret)
1405 return ret;
1406
1407 /* Read the output topology information from the shared memory */
1408 topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
1409 topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
1410 for (i = 0; i < topology->num_nodes; i++) {
1411 /* extended data will either be 0 or equal to non-extended data */
1412 if (topology_info_output->nodes[i].num_hops)
1413 topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
1414
1415 /* non-extended data gets everything here so no need to update */
1416 if (!get_extended_data) {
1417 topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
1418 topology->nodes[i].is_sharing_enabled =
1419 topology_info_output->nodes[i].is_sharing_enabled;
1420 topology->nodes[i].sdma_engine =
1421 topology_info_output->nodes[i].sdma_engine;
1422 }
1423
1424 }
1425
1426 /* Invoke xgmi ta again to get the link information */
1427 if (psp_xgmi_peer_link_info_supported(psp)) {
1428 struct ta_xgmi_cmd_get_peer_link_info_output *link_info_output;
1429 bool requires_reflection =
1430 (psp->xgmi_context.supports_extended_data && get_extended_data) ||
1431 psp->adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 6);
1432
1433 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS;
1434
1435 ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_PEER_LINKS);
1436
1437 if (ret)
1438 return ret;
1439
1440 link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info;
1441 for (i = 0; i < topology->num_nodes; i++) {
1442 /* accumulate num_links on extended data */
1443 topology->nodes[i].num_links = get_extended_data ?
1444 topology->nodes[i].num_links +
1445 link_info_output->nodes[i].num_links :
1446 ((requires_reflection && topology->nodes[i].num_links) ? topology->nodes[i].num_links :
1447 link_info_output->nodes[i].num_links);
1448
1449 /* reflect the topology information for bi-directionality */
1450 if (requires_reflection && topology->nodes[i].num_hops)
1451 psp_xgmi_reflect_topology_info(psp, topology->nodes[i]);
1452 }
1453 }
1454
1455 return 0;
1456 }
1457
psp_xgmi_set_topology_info(struct psp_context * psp,int number_devices,struct psp_xgmi_topology_info * topology)1458 int psp_xgmi_set_topology_info(struct psp_context *psp,
1459 int number_devices,
1460 struct psp_xgmi_topology_info *topology)
1461 {
1462 struct ta_xgmi_shared_memory *xgmi_cmd;
1463 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1464 int i;
1465
1466 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1467 return -EINVAL;
1468
1469 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1470 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1471
1472 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1473 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
1474 topology_info_input->num_nodes = number_devices;
1475
1476 for (i = 0; i < topology_info_input->num_nodes; i++) {
1477 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1478 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1479 topology_info_input->nodes[i].is_sharing_enabled = 1;
1480 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1481 }
1482
1483 /* Invoke xgmi ta to set topology information */
1484 return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
1485 }
1486
1487 // ras begin
psp_ras_ta_check_status(struct psp_context * psp)1488 static void psp_ras_ta_check_status(struct psp_context *psp)
1489 {
1490 struct ta_ras_shared_memory *ras_cmd =
1491 (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1492
1493 switch (ras_cmd->ras_status) {
1494 case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP:
1495 dev_warn(psp->adev->dev,
1496 "RAS WARNING: cmd failed due to unsupported ip\n");
1497 break;
1498 case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ:
1499 dev_warn(psp->adev->dev,
1500 "RAS WARNING: cmd failed due to unsupported error injection\n");
1501 break;
1502 case TA_RAS_STATUS__SUCCESS:
1503 break;
1504 case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED:
1505 if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR)
1506 dev_warn(psp->adev->dev,
1507 "RAS WARNING: Inject error to critical region is not allowed\n");
1508 break;
1509 default:
1510 dev_warn(psp->adev->dev,
1511 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
1512 break;
1513 }
1514 }
1515
psp_ras_invoke(struct psp_context * psp,uint32_t ta_cmd_id)1516 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1517 {
1518 struct ta_ras_shared_memory *ras_cmd;
1519 int ret;
1520
1521 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1522
1523 /*
1524 * TODO: bypass the loading in sriov for now
1525 */
1526 if (amdgpu_sriov_vf(psp->adev))
1527 return 0;
1528
1529 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context);
1530
1531 if (amdgpu_ras_intr_triggered())
1532 return ret;
1533
1534 if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) {
1535 DRM_WARN("RAS: Unsupported Interface");
1536 return -EINVAL;
1537 }
1538
1539 if (!ret) {
1540 if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
1541 dev_warn(psp->adev->dev, "ECC switch disabled\n");
1542
1543 ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
1544 } else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
1545 dev_warn(psp->adev->dev,
1546 "RAS internal register access blocked\n");
1547
1548 psp_ras_ta_check_status(psp);
1549 }
1550
1551 return ret;
1552 }
1553
psp_ras_enable_features(struct psp_context * psp,union ta_ras_cmd_input * info,bool enable)1554 int psp_ras_enable_features(struct psp_context *psp,
1555 union ta_ras_cmd_input *info, bool enable)
1556 {
1557 struct ta_ras_shared_memory *ras_cmd;
1558 int ret;
1559
1560 if (!psp->ras_context.context.initialized)
1561 return -EINVAL;
1562
1563 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1564 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1565
1566 if (enable)
1567 ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES;
1568 else
1569 ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES;
1570
1571 ras_cmd->ras_in_message = *info;
1572
1573 ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1574 if (ret)
1575 return -EINVAL;
1576
1577 return 0;
1578 }
1579
psp_ras_terminate(struct psp_context * psp)1580 int psp_ras_terminate(struct psp_context *psp)
1581 {
1582 int ret;
1583
1584 /*
1585 * TODO: bypass the terminate in sriov for now
1586 */
1587 if (amdgpu_sriov_vf(psp->adev))
1588 return 0;
1589
1590 if (!psp->ras_context.context.initialized)
1591 return 0;
1592
1593 ret = psp_ta_unload(psp, &psp->ras_context.context);
1594
1595 psp->ras_context.context.initialized = false;
1596
1597 return ret;
1598 }
1599
psp_ras_initialize(struct psp_context * psp)1600 int psp_ras_initialize(struct psp_context *psp)
1601 {
1602 int ret;
1603 uint32_t boot_cfg = 0xFF;
1604 struct amdgpu_device *adev = psp->adev;
1605 struct ta_ras_shared_memory *ras_cmd;
1606
1607 /*
1608 * TODO: bypass the initialize in sriov for now
1609 */
1610 if (amdgpu_sriov_vf(adev))
1611 return 0;
1612
1613 if (!adev->psp.ras_context.context.bin_desc.size_bytes ||
1614 !adev->psp.ras_context.context.bin_desc.start_addr) {
1615 dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
1616 return 0;
1617 }
1618
1619 if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) {
1620 /* query GECC enablement status from boot config
1621 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled
1622 */
1623 ret = psp_boot_config_get(adev, &boot_cfg);
1624 if (ret)
1625 dev_warn(adev->dev, "PSP get boot config failed\n");
1626
1627 if (!amdgpu_ras_is_supported(psp->adev, AMDGPU_RAS_BLOCK__UMC)) {
1628 if (!boot_cfg) {
1629 dev_info(adev->dev, "GECC is disabled\n");
1630 } else {
1631 /* disable GECC in next boot cycle if ras is
1632 * disabled by module parameter amdgpu_ras_enable
1633 * and/or amdgpu_ras_mask, or boot_config_get call
1634 * is failed
1635 */
1636 ret = psp_boot_config_set(adev, 0);
1637 if (ret)
1638 dev_warn(adev->dev, "PSP set boot config failed\n");
1639 else
1640 dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
1641 }
1642 } else {
1643 if (boot_cfg == 1) {
1644 dev_info(adev->dev, "GECC is enabled\n");
1645 } else {
1646 /* enable GECC in next boot cycle if it is disabled
1647 * in boot config, or force enable GECC if failed to
1648 * get boot configuration
1649 */
1650 ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC);
1651 if (ret)
1652 dev_warn(adev->dev, "PSP set boot config failed\n");
1653 else
1654 dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n");
1655 }
1656 }
1657 }
1658
1659 psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE;
1660 psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1661
1662 if (!psp->ras_context.context.mem_context.shared_buf) {
1663 ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context);
1664 if (ret)
1665 return ret;
1666 }
1667
1668 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1669 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1670
1671 if (amdgpu_ras_is_poison_mode_supported(adev))
1672 ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
1673 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
1674 ras_cmd->ras_in_message.init_flags.dgpu_mode = 1;
1675 ras_cmd->ras_in_message.init_flags.xcc_mask =
1676 adev->gfx.xcc_mask;
1677 ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2;
1678
1679 ret = psp_ta_load(psp, &psp->ras_context.context);
1680
1681 if (!ret && !ras_cmd->ras_status)
1682 psp->ras_context.context.initialized = true;
1683 else {
1684 if (ras_cmd->ras_status)
1685 dev_warn(psp->adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
1686
1687 /* fail to load RAS TA */
1688 psp->ras_context.context.initialized = false;
1689 }
1690
1691 return ret;
1692 }
1693
psp_ras_trigger_error(struct psp_context * psp,struct ta_ras_trigger_error_input * info,uint32_t instance_mask)1694 int psp_ras_trigger_error(struct psp_context *psp,
1695 struct ta_ras_trigger_error_input *info, uint32_t instance_mask)
1696 {
1697 struct ta_ras_shared_memory *ras_cmd;
1698 struct amdgpu_device *adev = psp->adev;
1699 int ret;
1700 uint32_t dev_mask;
1701
1702 if (!psp->ras_context.context.initialized)
1703 return -EINVAL;
1704
1705 switch (info->block_id) {
1706 case TA_RAS_BLOCK__GFX:
1707 dev_mask = GET_MASK(GC, instance_mask);
1708 break;
1709 case TA_RAS_BLOCK__SDMA:
1710 dev_mask = GET_MASK(SDMA0, instance_mask);
1711 break;
1712 case TA_RAS_BLOCK__VCN:
1713 case TA_RAS_BLOCK__JPEG:
1714 dev_mask = GET_MASK(VCN, instance_mask);
1715 break;
1716 default:
1717 dev_mask = instance_mask;
1718 break;
1719 }
1720
1721 /* reuse sub_block_index for backward compatibility */
1722 dev_mask <<= AMDGPU_RAS_INST_SHIFT;
1723 dev_mask &= AMDGPU_RAS_INST_MASK;
1724 info->sub_block_index |= dev_mask;
1725
1726 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1727 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1728
1729 ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
1730 ras_cmd->ras_in_message.trigger_error = *info;
1731
1732 ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1733 if (ret)
1734 return -EINVAL;
1735
1736 /* If err_event_athub occurs error inject was successful, however
1737 * return status from TA is no long reliable
1738 */
1739 if (amdgpu_ras_intr_triggered())
1740 return 0;
1741
1742 if (ras_cmd->ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED)
1743 return -EACCES;
1744 else if (ras_cmd->ras_status)
1745 return -EINVAL;
1746
1747 return 0;
1748 }
1749 // ras end
1750
1751 // HDCP start
psp_hdcp_initialize(struct psp_context * psp)1752 static int psp_hdcp_initialize(struct psp_context *psp)
1753 {
1754 int ret;
1755
1756 /*
1757 * TODO: bypass the initialize in sriov for now
1758 */
1759 if (amdgpu_sriov_vf(psp->adev))
1760 return 0;
1761
1762 if (!psp->hdcp_context.context.bin_desc.size_bytes ||
1763 !psp->hdcp_context.context.bin_desc.start_addr) {
1764 dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
1765 return 0;
1766 }
1767
1768 psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE;
1769 psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1770
1771 if (!psp->hdcp_context.context.mem_context.shared_buf) {
1772 ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context);
1773 if (ret)
1774 return ret;
1775 }
1776
1777 ret = psp_ta_load(psp, &psp->hdcp_context.context);
1778 if (!ret) {
1779 psp->hdcp_context.context.initialized = true;
1780 rw_init(&psp->hdcp_context.mutex, "pspcp");
1781 }
1782
1783 return ret;
1784 }
1785
psp_hdcp_invoke(struct psp_context * psp,uint32_t ta_cmd_id)1786 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1787 {
1788 /*
1789 * TODO: bypass the loading in sriov for now
1790 */
1791 if (amdgpu_sriov_vf(psp->adev))
1792 return 0;
1793
1794 return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context);
1795 }
1796
psp_hdcp_terminate(struct psp_context * psp)1797 static int psp_hdcp_terminate(struct psp_context *psp)
1798 {
1799 int ret;
1800
1801 /*
1802 * TODO: bypass the terminate in sriov for now
1803 */
1804 if (amdgpu_sriov_vf(psp->adev))
1805 return 0;
1806
1807 if (!psp->hdcp_context.context.initialized)
1808 return 0;
1809
1810 ret = psp_ta_unload(psp, &psp->hdcp_context.context);
1811
1812 psp->hdcp_context.context.initialized = false;
1813
1814 return ret;
1815 }
1816 // HDCP end
1817
1818 // DTM start
psp_dtm_initialize(struct psp_context * psp)1819 static int psp_dtm_initialize(struct psp_context *psp)
1820 {
1821 int ret;
1822
1823 /*
1824 * TODO: bypass the initialize in sriov for now
1825 */
1826 if (amdgpu_sriov_vf(psp->adev))
1827 return 0;
1828
1829 if (!psp->dtm_context.context.bin_desc.size_bytes ||
1830 !psp->dtm_context.context.bin_desc.start_addr) {
1831 dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
1832 return 0;
1833 }
1834
1835 psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE;
1836 psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1837
1838 if (!psp->dtm_context.context.mem_context.shared_buf) {
1839 ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context);
1840 if (ret)
1841 return ret;
1842 }
1843
1844 ret = psp_ta_load(psp, &psp->dtm_context.context);
1845 if (!ret) {
1846 psp->dtm_context.context.initialized = true;
1847 rw_init(&psp->dtm_context.mutex, "pspdtm");
1848 }
1849
1850 return ret;
1851 }
1852
psp_dtm_invoke(struct psp_context * psp,uint32_t ta_cmd_id)1853 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1854 {
1855 /*
1856 * TODO: bypass the loading in sriov for now
1857 */
1858 if (amdgpu_sriov_vf(psp->adev))
1859 return 0;
1860
1861 return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context);
1862 }
1863
psp_dtm_terminate(struct psp_context * psp)1864 static int psp_dtm_terminate(struct psp_context *psp)
1865 {
1866 int ret;
1867
1868 /*
1869 * TODO: bypass the terminate in sriov for now
1870 */
1871 if (amdgpu_sriov_vf(psp->adev))
1872 return 0;
1873
1874 if (!psp->dtm_context.context.initialized)
1875 return 0;
1876
1877 ret = psp_ta_unload(psp, &psp->dtm_context.context);
1878
1879 psp->dtm_context.context.initialized = false;
1880
1881 return ret;
1882 }
1883 // DTM end
1884
1885 // RAP start
psp_rap_initialize(struct psp_context * psp)1886 static int psp_rap_initialize(struct psp_context *psp)
1887 {
1888 int ret;
1889 enum ta_rap_status status = TA_RAP_STATUS__SUCCESS;
1890
1891 /*
1892 * TODO: bypass the initialize in sriov for now
1893 */
1894 if (amdgpu_sriov_vf(psp->adev))
1895 return 0;
1896
1897 if (!psp->rap_context.context.bin_desc.size_bytes ||
1898 !psp->rap_context.context.bin_desc.start_addr) {
1899 dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n");
1900 return 0;
1901 }
1902
1903 psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE;
1904 psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1905
1906 if (!psp->rap_context.context.mem_context.shared_buf) {
1907 ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context);
1908 if (ret)
1909 return ret;
1910 }
1911
1912 ret = psp_ta_load(psp, &psp->rap_context.context);
1913 if (!ret) {
1914 psp->rap_context.context.initialized = true;
1915 rw_init(&psp->rap_context.mutex, "psprap");
1916 } else
1917 return ret;
1918
1919 ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status);
1920 if (ret || status != TA_RAP_STATUS__SUCCESS) {
1921 psp_rap_terminate(psp);
1922 /* free rap shared memory */
1923 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
1924
1925 dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n",
1926 ret, status);
1927
1928 return ret;
1929 }
1930
1931 return 0;
1932 }
1933
psp_rap_terminate(struct psp_context * psp)1934 static int psp_rap_terminate(struct psp_context *psp)
1935 {
1936 int ret;
1937
1938 if (!psp->rap_context.context.initialized)
1939 return 0;
1940
1941 ret = psp_ta_unload(psp, &psp->rap_context.context);
1942
1943 psp->rap_context.context.initialized = false;
1944
1945 return ret;
1946 }
1947
psp_rap_invoke(struct psp_context * psp,uint32_t ta_cmd_id,enum ta_rap_status * status)1948 int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status)
1949 {
1950 struct ta_rap_shared_memory *rap_cmd;
1951 int ret = 0;
1952
1953 if (!psp->rap_context.context.initialized)
1954 return 0;
1955
1956 if (ta_cmd_id != TA_CMD_RAP__INITIALIZE &&
1957 ta_cmd_id != TA_CMD_RAP__VALIDATE_L0)
1958 return -EINVAL;
1959
1960 mutex_lock(&psp->rap_context.mutex);
1961
1962 rap_cmd = (struct ta_rap_shared_memory *)
1963 psp->rap_context.context.mem_context.shared_buf;
1964 memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory));
1965
1966 rap_cmd->cmd_id = ta_cmd_id;
1967 rap_cmd->validation_method_id = METHOD_A;
1968
1969 ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context);
1970 if (ret)
1971 goto out_unlock;
1972
1973 if (status)
1974 *status = rap_cmd->rap_status;
1975
1976 out_unlock:
1977 mutex_unlock(&psp->rap_context.mutex);
1978
1979 return ret;
1980 }
1981 // RAP end
1982
1983 /* securedisplay start */
psp_securedisplay_initialize(struct psp_context * psp)1984 static int psp_securedisplay_initialize(struct psp_context *psp)
1985 {
1986 int ret;
1987 struct ta_securedisplay_cmd *securedisplay_cmd;
1988
1989 /*
1990 * TODO: bypass the initialize in sriov for now
1991 */
1992 if (amdgpu_sriov_vf(psp->adev))
1993 return 0;
1994
1995 if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
1996 !psp->securedisplay_context.context.bin_desc.start_addr) {
1997 dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
1998 return 0;
1999 }
2000
2001 #ifdef __OpenBSD__
2002 /*
2003 * with 20230117 or later firmware or later on renoir:
2004 *
2005 * [drm] psp gfx command LOAD_TA(0x1) failed and response status is (0x7)
2006 * [drm] psp gfx command INVOKE_CMD(0x3) failed and response status is (0x4)
2007 * psp_securedisplay_parse_resp_status *ERROR* Secure display: Generic Failure
2008 * psp_securedisplay_initialize *ERROR* SECUREDISPLAY: query
2009 * securedisplay TA failed. ret 0x0
2010 */
2011 return 0;
2012 #endif
2013
2014 psp->securedisplay_context.context.mem_context.shared_mem_size =
2015 PSP_SECUREDISPLAY_SHARED_MEM_SIZE;
2016 psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2017
2018 if (!psp->securedisplay_context.context.initialized) {
2019 ret = psp_ta_init_shared_buf(psp,
2020 &psp->securedisplay_context.context.mem_context);
2021 if (ret)
2022 return ret;
2023 }
2024
2025 ret = psp_ta_load(psp, &psp->securedisplay_context.context);
2026 if (!ret) {
2027 psp->securedisplay_context.context.initialized = true;
2028 rw_init(&psp->securedisplay_context.mutex, "pscm");
2029 } else
2030 return ret;
2031
2032 mutex_lock(&psp->securedisplay_context.mutex);
2033
2034 psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
2035 TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2036
2037 ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2038
2039 mutex_unlock(&psp->securedisplay_context.mutex);
2040
2041 if (ret) {
2042 psp_securedisplay_terminate(psp);
2043 /* free securedisplay shared memory */
2044 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
2045 dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n");
2046 return -EINVAL;
2047 }
2048
2049 if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
2050 psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
2051 dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n",
2052 securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
2053 /* don't try again */
2054 psp->securedisplay_context.context.bin_desc.size_bytes = 0;
2055 }
2056
2057 return 0;
2058 }
2059
psp_securedisplay_terminate(struct psp_context * psp)2060 static int psp_securedisplay_terminate(struct psp_context *psp)
2061 {
2062 int ret;
2063
2064 /*
2065 * TODO:bypass the terminate in sriov for now
2066 */
2067 if (amdgpu_sriov_vf(psp->adev))
2068 return 0;
2069
2070 if (!psp->securedisplay_context.context.initialized)
2071 return 0;
2072
2073 ret = psp_ta_unload(psp, &psp->securedisplay_context.context);
2074
2075 psp->securedisplay_context.context.initialized = false;
2076
2077 return ret;
2078 }
2079
psp_securedisplay_invoke(struct psp_context * psp,uint32_t ta_cmd_id)2080 int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2081 {
2082 int ret;
2083
2084 if (!psp->securedisplay_context.context.initialized)
2085 return -EINVAL;
2086
2087 if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
2088 ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC)
2089 return -EINVAL;
2090
2091 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context);
2092
2093 return ret;
2094 }
2095 /* SECUREDISPLAY end */
2096
amdgpu_psp_wait_for_bootloader(struct amdgpu_device * adev)2097 int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev)
2098 {
2099 struct psp_context *psp = &adev->psp;
2100 int ret = 0;
2101
2102 if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL)
2103 ret = psp->funcs->wait_for_bootloader(psp);
2104
2105 return ret;
2106 }
2107
psp_hw_start(struct psp_context * psp)2108 static int psp_hw_start(struct psp_context *psp)
2109 {
2110 struct amdgpu_device *adev = psp->adev;
2111 int ret;
2112
2113 if (!amdgpu_sriov_vf(adev)) {
2114 if ((is_psp_fw_valid(psp->kdb)) &&
2115 (psp->funcs->bootloader_load_kdb != NULL)) {
2116 ret = psp_bootloader_load_kdb(psp);
2117 if (ret) {
2118 DRM_ERROR("PSP load kdb failed!\n");
2119 return ret;
2120 }
2121 }
2122
2123 if ((is_psp_fw_valid(psp->spl)) &&
2124 (psp->funcs->bootloader_load_spl != NULL)) {
2125 ret = psp_bootloader_load_spl(psp);
2126 if (ret) {
2127 DRM_ERROR("PSP load spl failed!\n");
2128 return ret;
2129 }
2130 }
2131
2132 if ((is_psp_fw_valid(psp->sys)) &&
2133 (psp->funcs->bootloader_load_sysdrv != NULL)) {
2134 ret = psp_bootloader_load_sysdrv(psp);
2135 if (ret) {
2136 DRM_ERROR("PSP load sys drv failed!\n");
2137 return ret;
2138 }
2139 }
2140
2141 if ((is_psp_fw_valid(psp->soc_drv)) &&
2142 (psp->funcs->bootloader_load_soc_drv != NULL)) {
2143 ret = psp_bootloader_load_soc_drv(psp);
2144 if (ret) {
2145 DRM_ERROR("PSP load soc drv failed!\n");
2146 return ret;
2147 }
2148 }
2149
2150 if ((is_psp_fw_valid(psp->intf_drv)) &&
2151 (psp->funcs->bootloader_load_intf_drv != NULL)) {
2152 ret = psp_bootloader_load_intf_drv(psp);
2153 if (ret) {
2154 DRM_ERROR("PSP load intf drv failed!\n");
2155 return ret;
2156 }
2157 }
2158
2159 if ((is_psp_fw_valid(psp->dbg_drv)) &&
2160 (psp->funcs->bootloader_load_dbg_drv != NULL)) {
2161 ret = psp_bootloader_load_dbg_drv(psp);
2162 if (ret) {
2163 DRM_ERROR("PSP load dbg drv failed!\n");
2164 return ret;
2165 }
2166 }
2167
2168 if ((is_psp_fw_valid(psp->ras_drv)) &&
2169 (psp->funcs->bootloader_load_ras_drv != NULL)) {
2170 ret = psp_bootloader_load_ras_drv(psp);
2171 if (ret) {
2172 DRM_ERROR("PSP load ras_drv failed!\n");
2173 return ret;
2174 }
2175 }
2176
2177 if ((is_psp_fw_valid(psp->sos)) &&
2178 (psp->funcs->bootloader_load_sos != NULL)) {
2179 ret = psp_bootloader_load_sos(psp);
2180 if (ret) {
2181 DRM_ERROR("PSP load sos failed!\n");
2182 return ret;
2183 }
2184 }
2185 }
2186
2187 ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
2188 if (ret) {
2189 DRM_ERROR("PSP create ring failed!\n");
2190 return ret;
2191 }
2192
2193 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
2194 goto skip_pin_bo;
2195
2196 if (!psp_boottime_tmr(psp)) {
2197 ret = psp_tmr_init(psp);
2198 if (ret) {
2199 DRM_ERROR("PSP tmr init failed!\n");
2200 return ret;
2201 }
2202 }
2203
2204 skip_pin_bo:
2205 /*
2206 * For ASICs with DF Cstate management centralized
2207 * to PMFW, TMR setup should be performed after PMFW
2208 * loaded and before other non-psp firmware loaded.
2209 */
2210 if (psp->pmfw_centralized_cstate_management) {
2211 ret = psp_load_smu_fw(psp);
2212 if (ret)
2213 return ret;
2214 }
2215
2216 ret = psp_tmr_load(psp);
2217 if (ret) {
2218 DRM_ERROR("PSP load tmr failed!\n");
2219 return ret;
2220 }
2221
2222 return 0;
2223 }
2224
psp_get_fw_type(struct amdgpu_firmware_info * ucode,enum psp_gfx_fw_type * type)2225 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
2226 enum psp_gfx_fw_type *type)
2227 {
2228 switch (ucode->ucode_id) {
2229 case AMDGPU_UCODE_ID_CAP:
2230 *type = GFX_FW_TYPE_CAP;
2231 break;
2232 case AMDGPU_UCODE_ID_SDMA0:
2233 *type = GFX_FW_TYPE_SDMA0;
2234 break;
2235 case AMDGPU_UCODE_ID_SDMA1:
2236 *type = GFX_FW_TYPE_SDMA1;
2237 break;
2238 case AMDGPU_UCODE_ID_SDMA2:
2239 *type = GFX_FW_TYPE_SDMA2;
2240 break;
2241 case AMDGPU_UCODE_ID_SDMA3:
2242 *type = GFX_FW_TYPE_SDMA3;
2243 break;
2244 case AMDGPU_UCODE_ID_SDMA4:
2245 *type = GFX_FW_TYPE_SDMA4;
2246 break;
2247 case AMDGPU_UCODE_ID_SDMA5:
2248 *type = GFX_FW_TYPE_SDMA5;
2249 break;
2250 case AMDGPU_UCODE_ID_SDMA6:
2251 *type = GFX_FW_TYPE_SDMA6;
2252 break;
2253 case AMDGPU_UCODE_ID_SDMA7:
2254 *type = GFX_FW_TYPE_SDMA7;
2255 break;
2256 case AMDGPU_UCODE_ID_CP_MES:
2257 *type = GFX_FW_TYPE_CP_MES;
2258 break;
2259 case AMDGPU_UCODE_ID_CP_MES_DATA:
2260 *type = GFX_FW_TYPE_MES_STACK;
2261 break;
2262 case AMDGPU_UCODE_ID_CP_MES1:
2263 *type = GFX_FW_TYPE_CP_MES_KIQ;
2264 break;
2265 case AMDGPU_UCODE_ID_CP_MES1_DATA:
2266 *type = GFX_FW_TYPE_MES_KIQ_STACK;
2267 break;
2268 case AMDGPU_UCODE_ID_CP_CE:
2269 *type = GFX_FW_TYPE_CP_CE;
2270 break;
2271 case AMDGPU_UCODE_ID_CP_PFP:
2272 *type = GFX_FW_TYPE_CP_PFP;
2273 break;
2274 case AMDGPU_UCODE_ID_CP_ME:
2275 *type = GFX_FW_TYPE_CP_ME;
2276 break;
2277 case AMDGPU_UCODE_ID_CP_MEC1:
2278 *type = GFX_FW_TYPE_CP_MEC;
2279 break;
2280 case AMDGPU_UCODE_ID_CP_MEC1_JT:
2281 *type = GFX_FW_TYPE_CP_MEC_ME1;
2282 break;
2283 case AMDGPU_UCODE_ID_CP_MEC2:
2284 *type = GFX_FW_TYPE_CP_MEC;
2285 break;
2286 case AMDGPU_UCODE_ID_CP_MEC2_JT:
2287 *type = GFX_FW_TYPE_CP_MEC_ME2;
2288 break;
2289 case AMDGPU_UCODE_ID_RLC_P:
2290 *type = GFX_FW_TYPE_RLC_P;
2291 break;
2292 case AMDGPU_UCODE_ID_RLC_V:
2293 *type = GFX_FW_TYPE_RLC_V;
2294 break;
2295 case AMDGPU_UCODE_ID_RLC_G:
2296 *type = GFX_FW_TYPE_RLC_G;
2297 break;
2298 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
2299 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL;
2300 break;
2301 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
2302 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
2303 break;
2304 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
2305 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
2306 break;
2307 case AMDGPU_UCODE_ID_RLC_IRAM:
2308 *type = GFX_FW_TYPE_RLC_IRAM;
2309 break;
2310 case AMDGPU_UCODE_ID_RLC_DRAM:
2311 *type = GFX_FW_TYPE_RLC_DRAM_BOOT;
2312 break;
2313 case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS:
2314 *type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS;
2315 break;
2316 case AMDGPU_UCODE_ID_SE0_TAP_DELAYS:
2317 *type = GFX_FW_TYPE_SE0_TAP_DELAYS;
2318 break;
2319 case AMDGPU_UCODE_ID_SE1_TAP_DELAYS:
2320 *type = GFX_FW_TYPE_SE1_TAP_DELAYS;
2321 break;
2322 case AMDGPU_UCODE_ID_SE2_TAP_DELAYS:
2323 *type = GFX_FW_TYPE_SE2_TAP_DELAYS;
2324 break;
2325 case AMDGPU_UCODE_ID_SE3_TAP_DELAYS:
2326 *type = GFX_FW_TYPE_SE3_TAP_DELAYS;
2327 break;
2328 case AMDGPU_UCODE_ID_SMC:
2329 *type = GFX_FW_TYPE_SMU;
2330 break;
2331 case AMDGPU_UCODE_ID_PPTABLE:
2332 *type = GFX_FW_TYPE_PPTABLE;
2333 break;
2334 case AMDGPU_UCODE_ID_UVD:
2335 *type = GFX_FW_TYPE_UVD;
2336 break;
2337 case AMDGPU_UCODE_ID_UVD1:
2338 *type = GFX_FW_TYPE_UVD1;
2339 break;
2340 case AMDGPU_UCODE_ID_VCE:
2341 *type = GFX_FW_TYPE_VCE;
2342 break;
2343 case AMDGPU_UCODE_ID_VCN:
2344 *type = GFX_FW_TYPE_VCN;
2345 break;
2346 case AMDGPU_UCODE_ID_VCN1:
2347 *type = GFX_FW_TYPE_VCN1;
2348 break;
2349 case AMDGPU_UCODE_ID_DMCU_ERAM:
2350 *type = GFX_FW_TYPE_DMCU_ERAM;
2351 break;
2352 case AMDGPU_UCODE_ID_DMCU_INTV:
2353 *type = GFX_FW_TYPE_DMCU_ISR;
2354 break;
2355 case AMDGPU_UCODE_ID_VCN0_RAM:
2356 *type = GFX_FW_TYPE_VCN0_RAM;
2357 break;
2358 case AMDGPU_UCODE_ID_VCN1_RAM:
2359 *type = GFX_FW_TYPE_VCN1_RAM;
2360 break;
2361 case AMDGPU_UCODE_ID_DMCUB:
2362 *type = GFX_FW_TYPE_DMUB;
2363 break;
2364 case AMDGPU_UCODE_ID_SDMA_UCODE_TH0:
2365 *type = GFX_FW_TYPE_SDMA_UCODE_TH0;
2366 break;
2367 case AMDGPU_UCODE_ID_SDMA_UCODE_TH1:
2368 *type = GFX_FW_TYPE_SDMA_UCODE_TH1;
2369 break;
2370 case AMDGPU_UCODE_ID_IMU_I:
2371 *type = GFX_FW_TYPE_IMU_I;
2372 break;
2373 case AMDGPU_UCODE_ID_IMU_D:
2374 *type = GFX_FW_TYPE_IMU_D;
2375 break;
2376 case AMDGPU_UCODE_ID_CP_RS64_PFP:
2377 *type = GFX_FW_TYPE_RS64_PFP;
2378 break;
2379 case AMDGPU_UCODE_ID_CP_RS64_ME:
2380 *type = GFX_FW_TYPE_RS64_ME;
2381 break;
2382 case AMDGPU_UCODE_ID_CP_RS64_MEC:
2383 *type = GFX_FW_TYPE_RS64_MEC;
2384 break;
2385 case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
2386 *type = GFX_FW_TYPE_RS64_PFP_P0_STACK;
2387 break;
2388 case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
2389 *type = GFX_FW_TYPE_RS64_PFP_P1_STACK;
2390 break;
2391 case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
2392 *type = GFX_FW_TYPE_RS64_ME_P0_STACK;
2393 break;
2394 case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
2395 *type = GFX_FW_TYPE_RS64_ME_P1_STACK;
2396 break;
2397 case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
2398 *type = GFX_FW_TYPE_RS64_MEC_P0_STACK;
2399 break;
2400 case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
2401 *type = GFX_FW_TYPE_RS64_MEC_P1_STACK;
2402 break;
2403 case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
2404 *type = GFX_FW_TYPE_RS64_MEC_P2_STACK;
2405 break;
2406 case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
2407 *type = GFX_FW_TYPE_RS64_MEC_P3_STACK;
2408 break;
2409 case AMDGPU_UCODE_ID_MAXIMUM:
2410 default:
2411 return -EINVAL;
2412 }
2413
2414 return 0;
2415 }
2416
psp_print_fw_hdr(struct psp_context * psp,struct amdgpu_firmware_info * ucode)2417 static void psp_print_fw_hdr(struct psp_context *psp,
2418 struct amdgpu_firmware_info *ucode)
2419 {
2420 struct amdgpu_device *adev = psp->adev;
2421 struct common_firmware_header *hdr;
2422
2423 switch (ucode->ucode_id) {
2424 case AMDGPU_UCODE_ID_SDMA0:
2425 case AMDGPU_UCODE_ID_SDMA1:
2426 case AMDGPU_UCODE_ID_SDMA2:
2427 case AMDGPU_UCODE_ID_SDMA3:
2428 case AMDGPU_UCODE_ID_SDMA4:
2429 case AMDGPU_UCODE_ID_SDMA5:
2430 case AMDGPU_UCODE_ID_SDMA6:
2431 case AMDGPU_UCODE_ID_SDMA7:
2432 hdr = (struct common_firmware_header *)
2433 adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
2434 amdgpu_ucode_print_sdma_hdr(hdr);
2435 break;
2436 case AMDGPU_UCODE_ID_CP_CE:
2437 hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
2438 amdgpu_ucode_print_gfx_hdr(hdr);
2439 break;
2440 case AMDGPU_UCODE_ID_CP_PFP:
2441 hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
2442 amdgpu_ucode_print_gfx_hdr(hdr);
2443 break;
2444 case AMDGPU_UCODE_ID_CP_ME:
2445 hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
2446 amdgpu_ucode_print_gfx_hdr(hdr);
2447 break;
2448 case AMDGPU_UCODE_ID_CP_MEC1:
2449 hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
2450 amdgpu_ucode_print_gfx_hdr(hdr);
2451 break;
2452 case AMDGPU_UCODE_ID_RLC_G:
2453 hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
2454 amdgpu_ucode_print_rlc_hdr(hdr);
2455 break;
2456 case AMDGPU_UCODE_ID_SMC:
2457 hdr = (struct common_firmware_header *)adev->pm.fw->data;
2458 amdgpu_ucode_print_smc_hdr(hdr);
2459 break;
2460 default:
2461 break;
2462 }
2463 }
2464
psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info * ucode,struct psp_gfx_cmd_resp * cmd)2465 static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode,
2466 struct psp_gfx_cmd_resp *cmd)
2467 {
2468 int ret;
2469 uint64_t fw_mem_mc_addr = ucode->mc_addr;
2470
2471 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
2472 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
2473 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
2474 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
2475
2476 ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
2477 if (ret)
2478 DRM_ERROR("Unknown firmware type\n");
2479
2480 return ret;
2481 }
2482
psp_execute_ip_fw_load(struct psp_context * psp,struct amdgpu_firmware_info * ucode)2483 int psp_execute_ip_fw_load(struct psp_context *psp,
2484 struct amdgpu_firmware_info *ucode)
2485 {
2486 int ret = 0;
2487 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
2488
2489 ret = psp_prep_load_ip_fw_cmd_buf(ucode, cmd);
2490 if (!ret) {
2491 ret = psp_cmd_submit_buf(psp, ucode, cmd,
2492 psp->fence_buf_mc_addr);
2493 }
2494
2495 release_psp_cmd_buf(psp);
2496
2497 return ret;
2498 }
2499
psp_load_smu_fw(struct psp_context * psp)2500 static int psp_load_smu_fw(struct psp_context *psp)
2501 {
2502 int ret;
2503 struct amdgpu_device *adev = psp->adev;
2504 struct amdgpu_firmware_info *ucode =
2505 &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
2506 struct amdgpu_ras *ras = psp->ras_context.ras;
2507
2508 /*
2509 * Skip SMU FW reloading in case of using BACO for runpm only,
2510 * as SMU is always alive.
2511 */
2512 if (adev->in_runpm && (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO))
2513 return 0;
2514
2515 if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2516 return 0;
2517
2518 if ((amdgpu_in_reset(adev) &&
2519 ras && adev->ras_enabled &&
2520 (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 4) ||
2521 adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 2)))) {
2522 ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
2523 if (ret)
2524 DRM_WARN("Failed to set MP1 state prepare for reload\n");
2525 }
2526
2527 ret = psp_execute_ip_fw_load(psp, ucode);
2528
2529 if (ret)
2530 DRM_ERROR("PSP load smu failed!\n");
2531
2532 return ret;
2533 }
2534
fw_load_skip_check(struct psp_context * psp,struct amdgpu_firmware_info * ucode)2535 static bool fw_load_skip_check(struct psp_context *psp,
2536 struct amdgpu_firmware_info *ucode)
2537 {
2538 if (!ucode->fw || !ucode->ucode_size)
2539 return true;
2540
2541 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2542 (psp_smu_reload_quirk(psp) ||
2543 psp->autoload_supported ||
2544 psp->pmfw_centralized_cstate_management))
2545 return true;
2546
2547 if (amdgpu_sriov_vf(psp->adev) &&
2548 amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id))
2549 return true;
2550
2551 if (psp->autoload_supported &&
2552 (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
2553 ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
2554 /* skip mec JT when autoload is enabled */
2555 return true;
2556
2557 return false;
2558 }
2559
psp_load_fw_list(struct psp_context * psp,struct amdgpu_firmware_info ** ucode_list,int ucode_count)2560 int psp_load_fw_list(struct psp_context *psp,
2561 struct amdgpu_firmware_info **ucode_list, int ucode_count)
2562 {
2563 int ret = 0, i;
2564 struct amdgpu_firmware_info *ucode;
2565
2566 for (i = 0; i < ucode_count; ++i) {
2567 ucode = ucode_list[i];
2568 psp_print_fw_hdr(psp, ucode);
2569 ret = psp_execute_ip_fw_load(psp, ucode);
2570 if (ret)
2571 return ret;
2572 }
2573 return ret;
2574 }
2575
psp_load_non_psp_fw(struct psp_context * psp)2576 static int psp_load_non_psp_fw(struct psp_context *psp)
2577 {
2578 int i, ret;
2579 struct amdgpu_firmware_info *ucode;
2580 struct amdgpu_device *adev = psp->adev;
2581
2582 if (psp->autoload_supported &&
2583 !psp->pmfw_centralized_cstate_management) {
2584 ret = psp_load_smu_fw(psp);
2585 if (ret)
2586 return ret;
2587 }
2588
2589 for (i = 0; i < adev->firmware.max_ucodes; i++) {
2590 ucode = &adev->firmware.ucode[i];
2591
2592 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2593 !fw_load_skip_check(psp, ucode)) {
2594 ret = psp_load_smu_fw(psp);
2595 if (ret)
2596 return ret;
2597 continue;
2598 }
2599
2600 if (fw_load_skip_check(psp, ucode))
2601 continue;
2602
2603 if (psp->autoload_supported &&
2604 (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7) ||
2605 adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 11) ||
2606 adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 12)) &&
2607 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 ||
2608 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 ||
2609 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3))
2610 /* PSP only receive one SDMA fw for sienna_cichlid,
2611 * as all four sdma fw are same
2612 */
2613 continue;
2614
2615 psp_print_fw_hdr(psp, ucode);
2616
2617 ret = psp_execute_ip_fw_load(psp, ucode);
2618 if (ret)
2619 return ret;
2620
2621 /* Start rlc autoload after psp recieved all the gfx firmware */
2622 if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
2623 adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) {
2624 ret = psp_rlc_autoload_start(psp);
2625 if (ret) {
2626 DRM_ERROR("Failed to start rlc autoload\n");
2627 return ret;
2628 }
2629 }
2630 }
2631
2632 return 0;
2633 }
2634
psp_load_fw(struct amdgpu_device * adev)2635 static int psp_load_fw(struct amdgpu_device *adev)
2636 {
2637 int ret;
2638 struct psp_context *psp = &adev->psp;
2639
2640 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
2641 /* should not destroy ring, only stop */
2642 psp_ring_stop(psp, PSP_RING_TYPE__KM);
2643 } else {
2644 memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
2645
2646 ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
2647 if (ret) {
2648 DRM_ERROR("PSP ring init failed!\n");
2649 goto failed;
2650 }
2651 }
2652
2653 ret = psp_hw_start(psp);
2654 if (ret)
2655 goto failed;
2656
2657 ret = psp_load_non_psp_fw(psp);
2658 if (ret)
2659 goto failed1;
2660
2661 ret = psp_asd_initialize(psp);
2662 if (ret) {
2663 DRM_ERROR("PSP load asd failed!\n");
2664 goto failed1;
2665 }
2666
2667 ret = psp_rl_load(adev);
2668 if (ret) {
2669 DRM_ERROR("PSP load RL failed!\n");
2670 goto failed1;
2671 }
2672
2673 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
2674 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2675 ret = psp_xgmi_initialize(psp, false, true);
2676 /* Warning the XGMI seesion initialize failure
2677 * Instead of stop driver initialization
2678 */
2679 if (ret)
2680 dev_err(psp->adev->dev,
2681 "XGMI: Failed to initialize XGMI session\n");
2682 }
2683 }
2684
2685 if (psp->ta_fw) {
2686 ret = psp_ras_initialize(psp);
2687 if (ret)
2688 dev_err(psp->adev->dev,
2689 "RAS: Failed to initialize RAS\n");
2690
2691 ret = psp_hdcp_initialize(psp);
2692 if (ret)
2693 dev_err(psp->adev->dev,
2694 "HDCP: Failed to initialize HDCP\n");
2695
2696 ret = psp_dtm_initialize(psp);
2697 if (ret)
2698 dev_err(psp->adev->dev,
2699 "DTM: Failed to initialize DTM\n");
2700
2701 ret = psp_rap_initialize(psp);
2702 if (ret)
2703 dev_err(psp->adev->dev,
2704 "RAP: Failed to initialize RAP\n");
2705
2706 ret = psp_securedisplay_initialize(psp);
2707 if (ret)
2708 dev_err(psp->adev->dev,
2709 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
2710 }
2711
2712 return 0;
2713
2714 failed1:
2715 psp_free_shared_bufs(psp);
2716 failed:
2717 /*
2718 * all cleanup jobs (xgmi terminate, ras terminate,
2719 * ring destroy, cmd/fence/fw buffers destory,
2720 * psp->cmd destory) are delayed to psp_hw_fini
2721 */
2722 psp_ring_destroy(psp, PSP_RING_TYPE__KM);
2723 return ret;
2724 }
2725
psp_hw_init(void * handle)2726 static int psp_hw_init(void *handle)
2727 {
2728 int ret;
2729 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2730
2731 mutex_lock(&adev->firmware.mutex);
2732 /*
2733 * This sequence is just used on hw_init only once, no need on
2734 * resume.
2735 */
2736 ret = amdgpu_ucode_init_bo(adev);
2737 if (ret)
2738 goto failed;
2739
2740 ret = psp_load_fw(adev);
2741 if (ret) {
2742 DRM_ERROR("PSP firmware loading failed\n");
2743 goto failed;
2744 }
2745
2746 mutex_unlock(&adev->firmware.mutex);
2747 return 0;
2748
2749 failed:
2750 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
2751 mutex_unlock(&adev->firmware.mutex);
2752 return -EINVAL;
2753 }
2754
psp_hw_fini(void * handle)2755 static int psp_hw_fini(void *handle)
2756 {
2757 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2758 struct psp_context *psp = &adev->psp;
2759
2760 if (psp->ta_fw) {
2761 psp_ras_terminate(psp);
2762 psp_securedisplay_terminate(psp);
2763 psp_rap_terminate(psp);
2764 psp_dtm_terminate(psp);
2765 psp_hdcp_terminate(psp);
2766
2767 if (adev->gmc.xgmi.num_physical_nodes > 1)
2768 psp_xgmi_terminate(psp);
2769 }
2770
2771 psp_asd_terminate(psp);
2772 psp_tmr_terminate(psp);
2773
2774 psp_ring_destroy(psp, PSP_RING_TYPE__KM);
2775
2776 return 0;
2777 }
2778
psp_suspend(void * handle)2779 static int psp_suspend(void *handle)
2780 {
2781 int ret = 0;
2782 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2783 struct psp_context *psp = &adev->psp;
2784
2785 if (adev->gmc.xgmi.num_physical_nodes > 1 &&
2786 psp->xgmi_context.context.initialized) {
2787 ret = psp_xgmi_terminate(psp);
2788 if (ret) {
2789 DRM_ERROR("Failed to terminate xgmi ta\n");
2790 goto out;
2791 }
2792 }
2793
2794 if (psp->ta_fw) {
2795 ret = psp_ras_terminate(psp);
2796 if (ret) {
2797 DRM_ERROR("Failed to terminate ras ta\n");
2798 goto out;
2799 }
2800 ret = psp_hdcp_terminate(psp);
2801 if (ret) {
2802 DRM_ERROR("Failed to terminate hdcp ta\n");
2803 goto out;
2804 }
2805 ret = psp_dtm_terminate(psp);
2806 if (ret) {
2807 DRM_ERROR("Failed to terminate dtm ta\n");
2808 goto out;
2809 }
2810 ret = psp_rap_terminate(psp);
2811 if (ret) {
2812 DRM_ERROR("Failed to terminate rap ta\n");
2813 goto out;
2814 }
2815 ret = psp_securedisplay_terminate(psp);
2816 if (ret) {
2817 DRM_ERROR("Failed to terminate securedisplay ta\n");
2818 goto out;
2819 }
2820 }
2821
2822 ret = psp_asd_terminate(psp);
2823 if (ret) {
2824 DRM_ERROR("Failed to terminate asd\n");
2825 goto out;
2826 }
2827
2828 ret = psp_tmr_terminate(psp);
2829 if (ret) {
2830 DRM_ERROR("Failed to terminate tmr\n");
2831 goto out;
2832 }
2833
2834 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
2835 if (ret)
2836 DRM_ERROR("PSP ring stop failed\n");
2837
2838 out:
2839 return ret;
2840 }
2841
psp_resume(void * handle)2842 static int psp_resume(void *handle)
2843 {
2844 int ret;
2845 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2846 struct psp_context *psp = &adev->psp;
2847
2848 DRM_INFO("PSP is resuming...\n");
2849
2850 if (psp->mem_train_ctx.enable_mem_training) {
2851 ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
2852 if (ret) {
2853 DRM_ERROR("Failed to process memory training!\n");
2854 return ret;
2855 }
2856 }
2857
2858 mutex_lock(&adev->firmware.mutex);
2859
2860 ret = psp_hw_start(psp);
2861 if (ret)
2862 goto failed;
2863
2864 ret = psp_load_non_psp_fw(psp);
2865 if (ret)
2866 goto failed;
2867
2868 ret = psp_asd_initialize(psp);
2869 if (ret) {
2870 DRM_ERROR("PSP load asd failed!\n");
2871 goto failed;
2872 }
2873
2874 ret = psp_rl_load(adev);
2875 if (ret) {
2876 dev_err(adev->dev, "PSP load RL failed!\n");
2877 goto failed;
2878 }
2879
2880 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2881 ret = psp_xgmi_initialize(psp, false, true);
2882 /* Warning the XGMI seesion initialize failure
2883 * Instead of stop driver initialization
2884 */
2885 if (ret)
2886 dev_err(psp->adev->dev,
2887 "XGMI: Failed to initialize XGMI session\n");
2888 }
2889
2890 if (psp->ta_fw) {
2891 ret = psp_ras_initialize(psp);
2892 if (ret)
2893 dev_err(psp->adev->dev,
2894 "RAS: Failed to initialize RAS\n");
2895
2896 ret = psp_hdcp_initialize(psp);
2897 if (ret)
2898 dev_err(psp->adev->dev,
2899 "HDCP: Failed to initialize HDCP\n");
2900
2901 ret = psp_dtm_initialize(psp);
2902 if (ret)
2903 dev_err(psp->adev->dev,
2904 "DTM: Failed to initialize DTM\n");
2905
2906 ret = psp_rap_initialize(psp);
2907 if (ret)
2908 dev_err(psp->adev->dev,
2909 "RAP: Failed to initialize RAP\n");
2910
2911 ret = psp_securedisplay_initialize(psp);
2912 if (ret)
2913 dev_err(psp->adev->dev,
2914 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
2915 }
2916
2917 mutex_unlock(&adev->firmware.mutex);
2918
2919 return 0;
2920
2921 failed:
2922 DRM_ERROR("PSP resume failed\n");
2923 mutex_unlock(&adev->firmware.mutex);
2924 return ret;
2925 }
2926
psp_gpu_reset(struct amdgpu_device * adev)2927 int psp_gpu_reset(struct amdgpu_device *adev)
2928 {
2929 int ret;
2930
2931 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
2932 return 0;
2933
2934 mutex_lock(&adev->psp.mutex);
2935 ret = psp_mode1_reset(&adev->psp);
2936 mutex_unlock(&adev->psp.mutex);
2937
2938 return ret;
2939 }
2940
psp_rlc_autoload_start(struct psp_context * psp)2941 int psp_rlc_autoload_start(struct psp_context *psp)
2942 {
2943 int ret;
2944 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
2945
2946 cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC;
2947
2948 ret = psp_cmd_submit_buf(psp, NULL, cmd,
2949 psp->fence_buf_mc_addr);
2950
2951 release_psp_cmd_buf(psp);
2952
2953 return ret;
2954 }
2955
psp_ring_cmd_submit(struct psp_context * psp,uint64_t cmd_buf_mc_addr,uint64_t fence_mc_addr,int index)2956 int psp_ring_cmd_submit(struct psp_context *psp,
2957 uint64_t cmd_buf_mc_addr,
2958 uint64_t fence_mc_addr,
2959 int index)
2960 {
2961 unsigned int psp_write_ptr_reg = 0;
2962 struct psp_gfx_rb_frame *write_frame;
2963 struct psp_ring *ring = &psp->km_ring;
2964 struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
2965 struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
2966 ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
2967 struct amdgpu_device *adev = psp->adev;
2968 uint32_t ring_size_dw = ring->ring_size / 4;
2969 uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
2970
2971 /* KM (GPCOM) prepare write pointer */
2972 psp_write_ptr_reg = psp_ring_get_wptr(psp);
2973
2974 /* Update KM RB frame pointer to new frame */
2975 /* write_frame ptr increments by size of rb_frame in bytes */
2976 /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
2977 if ((psp_write_ptr_reg % ring_size_dw) == 0)
2978 write_frame = ring_buffer_start;
2979 else
2980 write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
2981 /* Check invalid write_frame ptr address */
2982 if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
2983 DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
2984 ring_buffer_start, ring_buffer_end, write_frame);
2985 DRM_ERROR("write_frame is pointing to address out of bounds\n");
2986 return -EINVAL;
2987 }
2988
2989 /* Initialize KM RB frame */
2990 memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
2991
2992 /* Update KM RB frame */
2993 write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
2994 write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
2995 write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
2996 write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
2997 write_frame->fence_value = index;
2998 amdgpu_device_flush_hdp(adev, NULL);
2999
3000 /* Update the write Pointer in DWORDs */
3001 psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
3002 psp_ring_set_wptr(psp, psp_write_ptr_reg);
3003 return 0;
3004 }
3005
psp_init_asd_microcode(struct psp_context * psp,const char * chip_name)3006 int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name)
3007 {
3008 struct amdgpu_device *adev = psp->adev;
3009 char fw_name[PSP_FW_NAME_LEN];
3010 const struct psp_firmware_header_v1_0 *asd_hdr;
3011 int err = 0;
3012
3013 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
3014 err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, fw_name);
3015 if (err)
3016 goto out;
3017
3018 asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
3019 adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
3020 adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
3021 adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
3022 adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr +
3023 le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
3024 return 0;
3025 out:
3026 amdgpu_ucode_release(&adev->psp.asd_fw);
3027 return err;
3028 }
3029
psp_init_toc_microcode(struct psp_context * psp,const char * chip_name)3030 int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name)
3031 {
3032 struct amdgpu_device *adev = psp->adev;
3033 char fw_name[PSP_FW_NAME_LEN];
3034 const struct psp_firmware_header_v1_0 *toc_hdr;
3035 int err = 0;
3036
3037 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", chip_name);
3038 err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, fw_name);
3039 if (err)
3040 goto out;
3041
3042 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
3043 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
3044 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
3045 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
3046 adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
3047 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
3048 return 0;
3049 out:
3050 amdgpu_ucode_release(&adev->psp.toc_fw);
3051 return err;
3052 }
3053
parse_sos_bin_descriptor(struct psp_context * psp,const struct psp_fw_bin_desc * desc,const struct psp_firmware_header_v2_0 * sos_hdr)3054 static int parse_sos_bin_descriptor(struct psp_context *psp,
3055 const struct psp_fw_bin_desc *desc,
3056 const struct psp_firmware_header_v2_0 *sos_hdr)
3057 {
3058 uint8_t *ucode_start_addr = NULL;
3059
3060 if (!psp || !desc || !sos_hdr)
3061 return -EINVAL;
3062
3063 ucode_start_addr = (uint8_t *)sos_hdr +
3064 le32_to_cpu(desc->offset_bytes) +
3065 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3066
3067 switch (desc->fw_type) {
3068 case PSP_FW_TYPE_PSP_SOS:
3069 psp->sos.fw_version = le32_to_cpu(desc->fw_version);
3070 psp->sos.feature_version = le32_to_cpu(desc->fw_version);
3071 psp->sos.size_bytes = le32_to_cpu(desc->size_bytes);
3072 psp->sos.start_addr = ucode_start_addr;
3073 break;
3074 case PSP_FW_TYPE_PSP_SYS_DRV:
3075 psp->sys.fw_version = le32_to_cpu(desc->fw_version);
3076 psp->sys.feature_version = le32_to_cpu(desc->fw_version);
3077 psp->sys.size_bytes = le32_to_cpu(desc->size_bytes);
3078 psp->sys.start_addr = ucode_start_addr;
3079 break;
3080 case PSP_FW_TYPE_PSP_KDB:
3081 psp->kdb.fw_version = le32_to_cpu(desc->fw_version);
3082 psp->kdb.feature_version = le32_to_cpu(desc->fw_version);
3083 psp->kdb.size_bytes = le32_to_cpu(desc->size_bytes);
3084 psp->kdb.start_addr = ucode_start_addr;
3085 break;
3086 case PSP_FW_TYPE_PSP_TOC:
3087 psp->toc.fw_version = le32_to_cpu(desc->fw_version);
3088 psp->toc.feature_version = le32_to_cpu(desc->fw_version);
3089 psp->toc.size_bytes = le32_to_cpu(desc->size_bytes);
3090 psp->toc.start_addr = ucode_start_addr;
3091 break;
3092 case PSP_FW_TYPE_PSP_SPL:
3093 psp->spl.fw_version = le32_to_cpu(desc->fw_version);
3094 psp->spl.feature_version = le32_to_cpu(desc->fw_version);
3095 psp->spl.size_bytes = le32_to_cpu(desc->size_bytes);
3096 psp->spl.start_addr = ucode_start_addr;
3097 break;
3098 case PSP_FW_TYPE_PSP_RL:
3099 psp->rl.fw_version = le32_to_cpu(desc->fw_version);
3100 psp->rl.feature_version = le32_to_cpu(desc->fw_version);
3101 psp->rl.size_bytes = le32_to_cpu(desc->size_bytes);
3102 psp->rl.start_addr = ucode_start_addr;
3103 break;
3104 case PSP_FW_TYPE_PSP_SOC_DRV:
3105 psp->soc_drv.fw_version = le32_to_cpu(desc->fw_version);
3106 psp->soc_drv.feature_version = le32_to_cpu(desc->fw_version);
3107 psp->soc_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3108 psp->soc_drv.start_addr = ucode_start_addr;
3109 break;
3110 case PSP_FW_TYPE_PSP_INTF_DRV:
3111 psp->intf_drv.fw_version = le32_to_cpu(desc->fw_version);
3112 psp->intf_drv.feature_version = le32_to_cpu(desc->fw_version);
3113 psp->intf_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3114 psp->intf_drv.start_addr = ucode_start_addr;
3115 break;
3116 case PSP_FW_TYPE_PSP_DBG_DRV:
3117 psp->dbg_drv.fw_version = le32_to_cpu(desc->fw_version);
3118 psp->dbg_drv.feature_version = le32_to_cpu(desc->fw_version);
3119 psp->dbg_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3120 psp->dbg_drv.start_addr = ucode_start_addr;
3121 break;
3122 case PSP_FW_TYPE_PSP_RAS_DRV:
3123 psp->ras_drv.fw_version = le32_to_cpu(desc->fw_version);
3124 psp->ras_drv.feature_version = le32_to_cpu(desc->fw_version);
3125 psp->ras_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3126 psp->ras_drv.start_addr = ucode_start_addr;
3127 break;
3128 default:
3129 dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
3130 break;
3131 }
3132
3133 return 0;
3134 }
3135
psp_init_sos_base_fw(struct amdgpu_device * adev)3136 static int psp_init_sos_base_fw(struct amdgpu_device *adev)
3137 {
3138 const struct psp_firmware_header_v1_0 *sos_hdr;
3139 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3140 uint8_t *ucode_array_start_addr;
3141
3142 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3143 ucode_array_start_addr = (uint8_t *)sos_hdr +
3144 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3145
3146 if (adev->gmc.xgmi.connected_to_cpu ||
3147 (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 2))) {
3148 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
3149 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version);
3150
3151 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes);
3152 adev->psp.sys.start_addr = ucode_array_start_addr;
3153
3154 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes);
3155 adev->psp.sos.start_addr = ucode_array_start_addr +
3156 le32_to_cpu(sos_hdr->sos.offset_bytes);
3157 } else {
3158 /* Load alternate PSP SOS FW */
3159 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3160
3161 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3162 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3163
3164 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes);
3165 adev->psp.sys.start_addr = ucode_array_start_addr +
3166 le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes);
3167
3168 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes);
3169 adev->psp.sos.start_addr = ucode_array_start_addr +
3170 le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes);
3171 }
3172
3173 if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) {
3174 dev_warn(adev->dev, "PSP SOS FW not available");
3175 return -EINVAL;
3176 }
3177
3178 return 0;
3179 }
3180
psp_init_sos_microcode(struct psp_context * psp,const char * chip_name)3181 int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
3182 {
3183 struct amdgpu_device *adev = psp->adev;
3184 char fw_name[PSP_FW_NAME_LEN];
3185 const struct psp_firmware_header_v1_0 *sos_hdr;
3186 const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
3187 const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
3188 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3189 const struct psp_firmware_header_v2_0 *sos_hdr_v2_0;
3190 int err = 0;
3191 uint8_t *ucode_array_start_addr;
3192 int fw_index = 0;
3193
3194 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name);
3195 err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, fw_name);
3196 if (err)
3197 goto out;
3198
3199 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3200 ucode_array_start_addr = (uint8_t *)sos_hdr +
3201 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3202 amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
3203
3204 switch (sos_hdr->header.header_version_major) {
3205 case 1:
3206 err = psp_init_sos_base_fw(adev);
3207 if (err)
3208 goto out;
3209
3210 if (sos_hdr->header.header_version_minor == 1) {
3211 sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
3212 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes);
3213 adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3214 le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes);
3215 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes);
3216 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3217 le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes);
3218 }
3219 if (sos_hdr->header.header_version_minor == 2) {
3220 sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
3221 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes);
3222 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3223 le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes);
3224 }
3225 if (sos_hdr->header.header_version_minor == 3) {
3226 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3227 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes);
3228 adev->psp.toc.start_addr = ucode_array_start_addr +
3229 le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes);
3230 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes);
3231 adev->psp.kdb.start_addr = ucode_array_start_addr +
3232 le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes);
3233 adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes);
3234 adev->psp.spl.start_addr = ucode_array_start_addr +
3235 le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes);
3236 adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes);
3237 adev->psp.rl.start_addr = ucode_array_start_addr +
3238 le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes);
3239 }
3240 break;
3241 case 2:
3242 sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data;
3243
3244 if (le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
3245 dev_err(adev->dev, "packed SOS count exceeds maximum limit\n");
3246 err = -EINVAL;
3247 goto out;
3248 }
3249
3250 for (fw_index = 0; fw_index < le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count); fw_index++) {
3251 err = parse_sos_bin_descriptor(psp,
3252 &sos_hdr_v2_0->psp_fw_bin[fw_index],
3253 sos_hdr_v2_0);
3254 if (err)
3255 goto out;
3256 }
3257 break;
3258 default:
3259 dev_err(adev->dev,
3260 "unsupported psp sos firmware\n");
3261 err = -EINVAL;
3262 goto out;
3263 }
3264
3265 return 0;
3266 out:
3267 amdgpu_ucode_release(&adev->psp.sos_fw);
3268
3269 return err;
3270 }
3271
parse_ta_bin_descriptor(struct psp_context * psp,const struct psp_fw_bin_desc * desc,const struct ta_firmware_header_v2_0 * ta_hdr)3272 static int parse_ta_bin_descriptor(struct psp_context *psp,
3273 const struct psp_fw_bin_desc *desc,
3274 const struct ta_firmware_header_v2_0 *ta_hdr)
3275 {
3276 uint8_t *ucode_start_addr = NULL;
3277
3278 if (!psp || !desc || !ta_hdr)
3279 return -EINVAL;
3280
3281 ucode_start_addr = (uint8_t *)ta_hdr +
3282 le32_to_cpu(desc->offset_bytes) +
3283 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3284
3285 switch (desc->fw_type) {
3286 case TA_FW_TYPE_PSP_ASD:
3287 psp->asd_context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3288 psp->asd_context.bin_desc.feature_version = le32_to_cpu(desc->fw_version);
3289 psp->asd_context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3290 psp->asd_context.bin_desc.start_addr = ucode_start_addr;
3291 break;
3292 case TA_FW_TYPE_PSP_XGMI:
3293 psp->xgmi_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3294 psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3295 psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr;
3296 break;
3297 case TA_FW_TYPE_PSP_RAS:
3298 psp->ras_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3299 psp->ras_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3300 psp->ras_context.context.bin_desc.start_addr = ucode_start_addr;
3301 break;
3302 case TA_FW_TYPE_PSP_HDCP:
3303 psp->hdcp_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3304 psp->hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3305 psp->hdcp_context.context.bin_desc.start_addr = ucode_start_addr;
3306 break;
3307 case TA_FW_TYPE_PSP_DTM:
3308 psp->dtm_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3309 psp->dtm_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3310 psp->dtm_context.context.bin_desc.start_addr = ucode_start_addr;
3311 break;
3312 case TA_FW_TYPE_PSP_RAP:
3313 psp->rap_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3314 psp->rap_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3315 psp->rap_context.context.bin_desc.start_addr = ucode_start_addr;
3316 break;
3317 case TA_FW_TYPE_PSP_SECUREDISPLAY:
3318 psp->securedisplay_context.context.bin_desc.fw_version =
3319 le32_to_cpu(desc->fw_version);
3320 psp->securedisplay_context.context.bin_desc.size_bytes =
3321 le32_to_cpu(desc->size_bytes);
3322 psp->securedisplay_context.context.bin_desc.start_addr =
3323 ucode_start_addr;
3324 break;
3325 default:
3326 dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
3327 break;
3328 }
3329
3330 return 0;
3331 }
3332
parse_ta_v1_microcode(struct psp_context * psp)3333 static int parse_ta_v1_microcode(struct psp_context *psp)
3334 {
3335 const struct ta_firmware_header_v1_0 *ta_hdr;
3336 struct amdgpu_device *adev = psp->adev;
3337
3338 ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data;
3339
3340 if (le16_to_cpu(ta_hdr->header.header_version_major) != 1)
3341 return -EINVAL;
3342
3343 adev->psp.xgmi_context.context.bin_desc.fw_version =
3344 le32_to_cpu(ta_hdr->xgmi.fw_version);
3345 adev->psp.xgmi_context.context.bin_desc.size_bytes =
3346 le32_to_cpu(ta_hdr->xgmi.size_bytes);
3347 adev->psp.xgmi_context.context.bin_desc.start_addr =
3348 (uint8_t *)ta_hdr +
3349 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3350
3351 adev->psp.ras_context.context.bin_desc.fw_version =
3352 le32_to_cpu(ta_hdr->ras.fw_version);
3353 adev->psp.ras_context.context.bin_desc.size_bytes =
3354 le32_to_cpu(ta_hdr->ras.size_bytes);
3355 adev->psp.ras_context.context.bin_desc.start_addr =
3356 (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr +
3357 le32_to_cpu(ta_hdr->ras.offset_bytes);
3358
3359 adev->psp.hdcp_context.context.bin_desc.fw_version =
3360 le32_to_cpu(ta_hdr->hdcp.fw_version);
3361 adev->psp.hdcp_context.context.bin_desc.size_bytes =
3362 le32_to_cpu(ta_hdr->hdcp.size_bytes);
3363 adev->psp.hdcp_context.context.bin_desc.start_addr =
3364 (uint8_t *)ta_hdr +
3365 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3366
3367 adev->psp.dtm_context.context.bin_desc.fw_version =
3368 le32_to_cpu(ta_hdr->dtm.fw_version);
3369 adev->psp.dtm_context.context.bin_desc.size_bytes =
3370 le32_to_cpu(ta_hdr->dtm.size_bytes);
3371 adev->psp.dtm_context.context.bin_desc.start_addr =
3372 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3373 le32_to_cpu(ta_hdr->dtm.offset_bytes);
3374
3375 adev->psp.securedisplay_context.context.bin_desc.fw_version =
3376 le32_to_cpu(ta_hdr->securedisplay.fw_version);
3377 adev->psp.securedisplay_context.context.bin_desc.size_bytes =
3378 le32_to_cpu(ta_hdr->securedisplay.size_bytes);
3379 adev->psp.securedisplay_context.context.bin_desc.start_addr =
3380 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3381 le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
3382
3383 adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
3384
3385 return 0;
3386 }
3387
parse_ta_v2_microcode(struct psp_context * psp)3388 static int parse_ta_v2_microcode(struct psp_context *psp)
3389 {
3390 const struct ta_firmware_header_v2_0 *ta_hdr;
3391 struct amdgpu_device *adev = psp->adev;
3392 int err = 0;
3393 int ta_index = 0;
3394
3395 ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data;
3396
3397 if (le16_to_cpu(ta_hdr->header.header_version_major) != 2)
3398 return -EINVAL;
3399
3400 if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
3401 dev_err(adev->dev, "packed TA count exceeds maximum limit\n");
3402 return -EINVAL;
3403 }
3404
3405 for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) {
3406 err = parse_ta_bin_descriptor(psp,
3407 &ta_hdr->ta_fw_bin[ta_index],
3408 ta_hdr);
3409 if (err)
3410 return err;
3411 }
3412
3413 return 0;
3414 }
3415
psp_init_ta_microcode(struct psp_context * psp,const char * chip_name)3416 int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
3417 {
3418 const struct common_firmware_header *hdr;
3419 struct amdgpu_device *adev = psp->adev;
3420 char fw_name[PSP_FW_NAME_LEN];
3421 int err;
3422
3423 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
3424 err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, fw_name);
3425 if (err)
3426 return err;
3427
3428 hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data;
3429 switch (le16_to_cpu(hdr->header_version_major)) {
3430 case 1:
3431 err = parse_ta_v1_microcode(psp);
3432 break;
3433 case 2:
3434 err = parse_ta_v2_microcode(psp);
3435 break;
3436 default:
3437 dev_err(adev->dev, "unsupported TA header version\n");
3438 err = -EINVAL;
3439 }
3440
3441 if (err)
3442 amdgpu_ucode_release(&adev->psp.ta_fw);
3443
3444 return err;
3445 }
3446
psp_init_cap_microcode(struct psp_context * psp,const char * chip_name)3447 int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
3448 {
3449 struct amdgpu_device *adev = psp->adev;
3450 char fw_name[PSP_FW_NAME_LEN];
3451 const struct psp_firmware_header_v1_0 *cap_hdr_v1_0;
3452 struct amdgpu_firmware_info *info = NULL;
3453 int err = 0;
3454
3455 if (!amdgpu_sriov_vf(adev)) {
3456 dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n");
3457 return -EINVAL;
3458 }
3459
3460 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_cap.bin", chip_name);
3461 err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, fw_name);
3462 if (err) {
3463 if (err == -ENODEV) {
3464 dev_warn(adev->dev, "cap microcode does not exist, skip\n");
3465 err = 0;
3466 goto out;
3467 }
3468 dev_err(adev->dev, "fail to initialize cap microcode\n");
3469 }
3470
3471 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
3472 info->ucode_id = AMDGPU_UCODE_ID_CAP;
3473 info->fw = adev->psp.cap_fw;
3474 cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *)
3475 adev->psp.cap_fw->data;
3476 adev->firmware.fw_size += ALIGN(
3477 le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE);
3478 adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version);
3479 adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version);
3480 adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes);
3481
3482 return 0;
3483
3484 out:
3485 amdgpu_ucode_release(&adev->psp.cap_fw);
3486 return err;
3487 }
3488
psp_set_clockgating_state(void * handle,enum amd_clockgating_state state)3489 static int psp_set_clockgating_state(void *handle,
3490 enum amd_clockgating_state state)
3491 {
3492 return 0;
3493 }
3494
psp_set_powergating_state(void * handle,enum amd_powergating_state state)3495 static int psp_set_powergating_state(void *handle,
3496 enum amd_powergating_state state)
3497 {
3498 return 0;
3499 }
3500
psp_usbc_pd_fw_sysfs_read(struct device * dev,struct device_attribute * attr,char * buf)3501 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
3502 struct device_attribute *attr,
3503 char *buf)
3504 {
3505 struct drm_device *ddev = dev_get_drvdata(dev);
3506 struct amdgpu_device *adev = drm_to_adev(ddev);
3507 uint32_t fw_ver;
3508 int ret;
3509
3510 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
3511 DRM_INFO("PSP block is not ready yet.");
3512 return -EBUSY;
3513 }
3514
3515 mutex_lock(&adev->psp.mutex);
3516 ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver);
3517 mutex_unlock(&adev->psp.mutex);
3518
3519 if (ret) {
3520 DRM_ERROR("Failed to read USBC PD FW, err = %d", ret);
3521 return ret;
3522 }
3523
3524 return sysfs_emit(buf, "%x\n", fw_ver);
3525 }
3526
psp_usbc_pd_fw_sysfs_write(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3527 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
3528 struct device_attribute *attr,
3529 const char *buf,
3530 size_t count)
3531 {
3532 struct drm_device *ddev = dev_get_drvdata(dev);
3533 struct amdgpu_device *adev = drm_to_adev(ddev);
3534 int ret, idx;
3535 char fw_name[100];
3536 const struct firmware *usbc_pd_fw;
3537 struct amdgpu_bo *fw_buf_bo = NULL;
3538 uint64_t fw_pri_mc_addr;
3539 void *fw_pri_cpu_addr;
3540
3541 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
3542 DRM_INFO("PSP block is not ready yet.");
3543 return -EBUSY;
3544 }
3545
3546 if (!drm_dev_enter(ddev, &idx))
3547 return -ENODEV;
3548
3549 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf);
3550 ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev);
3551 if (ret)
3552 goto fail;
3553
3554 /* LFB address which is aligned to 1MB boundary per PSP request */
3555 ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000,
3556 AMDGPU_GEM_DOMAIN_VRAM |
3557 AMDGPU_GEM_DOMAIN_GTT,
3558 &fw_buf_bo, &fw_pri_mc_addr,
3559 &fw_pri_cpu_addr);
3560 if (ret)
3561 goto rel_buf;
3562
3563 memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
3564
3565 mutex_lock(&adev->psp.mutex);
3566 ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr);
3567 mutex_unlock(&adev->psp.mutex);
3568
3569 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
3570
3571 rel_buf:
3572 release_firmware(usbc_pd_fw);
3573 fail:
3574 if (ret) {
3575 DRM_ERROR("Failed to load USBC PD FW, err = %d", ret);
3576 count = ret;
3577 }
3578
3579 drm_dev_exit(idx);
3580 return count;
3581 }
3582
psp_copy_fw(struct psp_context * psp,uint8_t * start_addr,uint32_t bin_size)3583 void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size)
3584 {
3585 int idx;
3586
3587 if (!drm_dev_enter(adev_to_drm(psp->adev), &idx))
3588 return;
3589
3590 memset(psp->fw_pri_buf, 0, PSP_1_MEG);
3591 memcpy(psp->fw_pri_buf, start_addr, bin_size);
3592
3593 drm_dev_exit(idx);
3594 }
3595
3596 /**
3597 * DOC: usbc_pd_fw
3598 * Reading from this file will retrieve the USB-C PD firmware version. Writing to
3599 * this file will trigger the update process.
3600 */
3601 static DEVICE_ATTR(usbc_pd_fw, 0644,
3602 psp_usbc_pd_fw_sysfs_read,
3603 psp_usbc_pd_fw_sysfs_write);
3604
is_psp_fw_valid(struct psp_bin_desc bin)3605 int is_psp_fw_valid(struct psp_bin_desc bin)
3606 {
3607 return bin.size_bytes;
3608 }
3609
amdgpu_psp_vbflash_write(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buffer,loff_t pos,size_t count)3610 static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
3611 struct bin_attribute *bin_attr,
3612 char *buffer, loff_t pos, size_t count)
3613 {
3614 STUB();
3615 return -ENOSYS;
3616 #ifdef notyet
3617 struct device *dev = kobj_to_dev(kobj);
3618 struct drm_device *ddev = dev_get_drvdata(dev);
3619 struct amdgpu_device *adev = drm_to_adev(ddev);
3620
3621 adev->psp.vbflash_done = false;
3622
3623 /* Safeguard against memory drain */
3624 if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) {
3625 dev_err(adev->dev, "File size cannot exceed %u", AMD_VBIOS_FILE_MAX_SIZE_B);
3626 kvfree(adev->psp.vbflash_tmp_buf);
3627 adev->psp.vbflash_tmp_buf = NULL;
3628 adev->psp.vbflash_image_size = 0;
3629 return -ENOMEM;
3630 }
3631
3632 /* TODO Just allocate max for now and optimize to realloc later if needed */
3633 if (!adev->psp.vbflash_tmp_buf) {
3634 adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL);
3635 if (!adev->psp.vbflash_tmp_buf)
3636 return -ENOMEM;
3637 }
3638
3639 mutex_lock(&adev->psp.mutex);
3640 memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count);
3641 adev->psp.vbflash_image_size += count;
3642 mutex_unlock(&adev->psp.mutex);
3643
3644 dev_dbg(adev->dev, "IFWI staged for update");
3645
3646 return count;
3647 #endif
3648 }
3649
amdgpu_psp_vbflash_read(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buffer,loff_t pos,size_t count)3650 static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
3651 struct bin_attribute *bin_attr, char *buffer,
3652 loff_t pos, size_t count)
3653 {
3654 STUB();
3655 return -ENOSYS;
3656 #ifdef notyet
3657 struct device *dev = kobj_to_dev(kobj);
3658 struct drm_device *ddev = dev_get_drvdata(dev);
3659 struct amdgpu_device *adev = drm_to_adev(ddev);
3660 struct amdgpu_bo *fw_buf_bo = NULL;
3661 uint64_t fw_pri_mc_addr;
3662 void *fw_pri_cpu_addr;
3663 int ret;
3664
3665 if (adev->psp.vbflash_image_size == 0)
3666 return -EINVAL;
3667
3668 dev_dbg(adev->dev, "PSP IFWI flash process initiated");
3669
3670 ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size,
3671 AMDGPU_GPU_PAGE_SIZE,
3672 AMDGPU_GEM_DOMAIN_VRAM,
3673 &fw_buf_bo,
3674 &fw_pri_mc_addr,
3675 &fw_pri_cpu_addr);
3676 if (ret)
3677 goto rel_buf;
3678
3679 memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size);
3680
3681 mutex_lock(&adev->psp.mutex);
3682 ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr);
3683 mutex_unlock(&adev->psp.mutex);
3684
3685 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
3686
3687 rel_buf:
3688 kvfree(adev->psp.vbflash_tmp_buf);
3689 adev->psp.vbflash_tmp_buf = NULL;
3690 adev->psp.vbflash_image_size = 0;
3691
3692 if (ret) {
3693 dev_err(adev->dev, "Failed to load IFWI, err = %d", ret);
3694 return ret;
3695 }
3696
3697 dev_dbg(adev->dev, "PSP IFWI flash process done");
3698 return 0;
3699 #endif
3700 }
3701
3702 /**
3703 * DOC: psp_vbflash
3704 * Writing to this file will stage an IFWI for update. Reading from this file
3705 * will trigger the update process.
3706 */
3707 #ifdef notyet
3708 static struct bin_attribute psp_vbflash_bin_attr = {
3709 .attr = {.name = "psp_vbflash", .mode = 0660},
3710 .size = 0,
3711 .write = amdgpu_psp_vbflash_write,
3712 .read = amdgpu_psp_vbflash_read,
3713 };
3714 #endif
3715
3716 /**
3717 * DOC: psp_vbflash_status
3718 * The status of the flash process.
3719 * 0: IFWI flash not complete.
3720 * 1: IFWI flash complete.
3721 */
amdgpu_psp_vbflash_status(struct device * dev,struct device_attribute * attr,char * buf)3722 static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
3723 struct device_attribute *attr,
3724 char *buf)
3725 {
3726 struct drm_device *ddev = dev_get_drvdata(dev);
3727 struct amdgpu_device *adev = drm_to_adev(ddev);
3728 uint32_t vbflash_status;
3729
3730 vbflash_status = psp_vbflash_status(&adev->psp);
3731 if (!adev->psp.vbflash_done)
3732 vbflash_status = 0;
3733 else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000))
3734 vbflash_status = 1;
3735
3736 return sysfs_emit(buf, "0x%x\n", vbflash_status);
3737 }
3738 static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
3739
3740 #ifdef notyet
3741 static struct bin_attribute *bin_flash_attrs[] = {
3742 &psp_vbflash_bin_attr,
3743 NULL
3744 };
3745 #endif
3746
3747 static struct attribute *flash_attrs[] = {
3748 &dev_attr_psp_vbflash_status.attr,
3749 &dev_attr_usbc_pd_fw.attr,
3750 NULL
3751 };
3752
3753 #ifdef notyet
3754
amdgpu_flash_attr_is_visible(struct kobject * kobj,struct attribute * attr,int idx)3755 static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
3756 {
3757 struct device *dev = kobj_to_dev(kobj);
3758 struct drm_device *ddev = dev_get_drvdata(dev);
3759 struct amdgpu_device *adev = drm_to_adev(ddev);
3760
3761 if (attr == &dev_attr_usbc_pd_fw.attr)
3762 return adev->psp.sup_pd_fw_up ? 0660 : 0;
3763
3764 return adev->psp.sup_ifwi_up ? 0440 : 0;
3765 }
3766
amdgpu_bin_flash_attr_is_visible(struct kobject * kobj,struct bin_attribute * attr,int idx)3767 static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj,
3768 struct bin_attribute *attr,
3769 int idx)
3770 {
3771 struct device *dev = kobj_to_dev(kobj);
3772 struct drm_device *ddev = dev_get_drvdata(dev);
3773 struct amdgpu_device *adev = drm_to_adev(ddev);
3774
3775 return adev->psp.sup_ifwi_up ? 0660 : 0;
3776 }
3777
3778 const struct attribute_group amdgpu_flash_attr_group = {
3779 .attrs = flash_attrs,
3780 .bin_attrs = bin_flash_attrs,
3781 .is_bin_visible = amdgpu_bin_flash_attr_is_visible,
3782 .is_visible = amdgpu_flash_attr_is_visible,
3783 };
3784
3785 #endif /* notyet */
3786
3787 const struct amd_ip_funcs psp_ip_funcs = {
3788 .name = "psp",
3789 .early_init = psp_early_init,
3790 .late_init = NULL,
3791 .sw_init = psp_sw_init,
3792 .sw_fini = psp_sw_fini,
3793 .hw_init = psp_hw_init,
3794 .hw_fini = psp_hw_fini,
3795 .suspend = psp_suspend,
3796 .resume = psp_resume,
3797 .is_idle = NULL,
3798 .check_soft_reset = NULL,
3799 .wait_for_idle = NULL,
3800 .soft_reset = NULL,
3801 .set_clockgating_state = psp_set_clockgating_state,
3802 .set_powergating_state = psp_set_powergating_state,
3803 };
3804
3805 const struct amdgpu_ip_block_version psp_v3_1_ip_block = {
3806 .type = AMD_IP_BLOCK_TYPE_PSP,
3807 .major = 3,
3808 .minor = 1,
3809 .rev = 0,
3810 .funcs = &psp_ip_funcs,
3811 };
3812
3813 const struct amdgpu_ip_block_version psp_v10_0_ip_block = {
3814 .type = AMD_IP_BLOCK_TYPE_PSP,
3815 .major = 10,
3816 .minor = 0,
3817 .rev = 0,
3818 .funcs = &psp_ip_funcs,
3819 };
3820
3821 const struct amdgpu_ip_block_version psp_v11_0_ip_block = {
3822 .type = AMD_IP_BLOCK_TYPE_PSP,
3823 .major = 11,
3824 .minor = 0,
3825 .rev = 0,
3826 .funcs = &psp_ip_funcs,
3827 };
3828
3829 const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = {
3830 .type = AMD_IP_BLOCK_TYPE_PSP,
3831 .major = 11,
3832 .minor = 0,
3833 .rev = 8,
3834 .funcs = &psp_ip_funcs,
3835 };
3836
3837 const struct amdgpu_ip_block_version psp_v12_0_ip_block = {
3838 .type = AMD_IP_BLOCK_TYPE_PSP,
3839 .major = 12,
3840 .minor = 0,
3841 .rev = 0,
3842 .funcs = &psp_ip_funcs,
3843 };
3844
3845 const struct amdgpu_ip_block_version psp_v13_0_ip_block = {
3846 .type = AMD_IP_BLOCK_TYPE_PSP,
3847 .major = 13,
3848 .minor = 0,
3849 .rev = 0,
3850 .funcs = &psp_ip_funcs,
3851 };
3852
3853 const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = {
3854 .type = AMD_IP_BLOCK_TYPE_PSP,
3855 .major = 13,
3856 .minor = 0,
3857 .rev = 4,
3858 .funcs = &psp_ip_funcs,
3859 };
3860