xref: /linux/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c (revision 9a6b55ac)
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include <linux/firmware.h>
24 #include <linux/module.h>
25 
26 #include "amdgpu.h"
27 #include "amdgpu_psp.h"
28 #include "amdgpu_ucode.h"
29 #include "soc15_common.h"
30 #include "psp_v11_0.h"
31 
32 #include "mp/mp_11_0_offset.h"
33 #include "mp/mp_11_0_sh_mask.h"
34 #include "gc/gc_9_0_offset.h"
35 #include "sdma0/sdma0_4_0_offset.h"
36 #include "nbio/nbio_7_4_offset.h"
37 
38 #include "oss/osssys_4_0_offset.h"
39 #include "oss/osssys_4_0_sh_mask.h"
40 
41 MODULE_FIRMWARE("amdgpu/vega20_sos.bin");
42 MODULE_FIRMWARE("amdgpu/vega20_asd.bin");
43 MODULE_FIRMWARE("amdgpu/vega20_ta.bin");
44 MODULE_FIRMWARE("amdgpu/navi10_sos.bin");
45 MODULE_FIRMWARE("amdgpu/navi10_asd.bin");
46 MODULE_FIRMWARE("amdgpu/navi14_sos.bin");
47 MODULE_FIRMWARE("amdgpu/navi14_asd.bin");
48 MODULE_FIRMWARE("amdgpu/navi12_sos.bin");
49 MODULE_FIRMWARE("amdgpu/navi12_asd.bin");
50 MODULE_FIRMWARE("amdgpu/arcturus_sos.bin");
51 MODULE_FIRMWARE("amdgpu/arcturus_asd.bin");
52 MODULE_FIRMWARE("amdgpu/arcturus_ta.bin");
53 
54 /* address block */
55 #define smnMP1_FIRMWARE_FLAGS		0x3010024
56 /* navi10 reg offset define */
57 #define mmRLC_GPM_UCODE_ADDR_NV10	0x5b61
58 #define mmRLC_GPM_UCODE_DATA_NV10	0x5b62
59 #define mmSDMA0_UCODE_ADDR_NV10		0x5880
60 #define mmSDMA0_UCODE_DATA_NV10		0x5881
61 /* memory training timeout define */
62 #define MEM_TRAIN_SEND_MSG_TIMEOUT_US	3000000
63 
64 static int psp_v11_0_init_microcode(struct psp_context *psp)
65 {
66 	struct amdgpu_device *adev = psp->adev;
67 	const char *chip_name;
68 	char fw_name[30];
69 	int err = 0;
70 	const struct psp_firmware_header_v1_0 *sos_hdr;
71 	const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
72 	const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
73 	const struct psp_firmware_header_v1_0 *asd_hdr;
74 	const struct ta_firmware_header_v1_0 *ta_hdr;
75 
76 	DRM_DEBUG("\n");
77 
78 	switch (adev->asic_type) {
79 	case CHIP_VEGA20:
80 		chip_name = "vega20";
81 		break;
82 	case CHIP_NAVI10:
83 		chip_name = "navi10";
84 		break;
85 	case CHIP_NAVI14:
86 		chip_name = "navi14";
87 		break;
88 	case CHIP_NAVI12:
89 		chip_name = "navi12";
90 		break;
91 	case CHIP_ARCTURUS:
92 		chip_name = "arcturus";
93 		break;
94 	default:
95 		BUG();
96 	}
97 
98 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name);
99 	err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev);
100 	if (err)
101 		goto out;
102 
103 	err = amdgpu_ucode_validate(adev->psp.sos_fw);
104 	if (err)
105 		goto out;
106 
107 	sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
108 	amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
109 
110 	switch (sos_hdr->header.header_version_major) {
111 	case 1:
112 		adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
113 		adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version);
114 		adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes);
115 		adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->sos_offset_bytes);
116 		adev->psp.sys_start_addr = (uint8_t *)sos_hdr +
117 				le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
118 		adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr +
119 				le32_to_cpu(sos_hdr->sos_offset_bytes);
120 		if (sos_hdr->header.header_version_minor == 1) {
121 			sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
122 			adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_1->toc_size_bytes);
123 			adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr +
124 					le32_to_cpu(sos_hdr_v1_1->toc_offset_bytes);
125 			adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_1->kdb_size_bytes);
126 			adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
127 					le32_to_cpu(sos_hdr_v1_1->kdb_offset_bytes);
128 		}
129 		if (sos_hdr->header.header_version_minor == 2) {
130 			sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
131 			adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_2->kdb_size_bytes);
132 			adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
133 						    le32_to_cpu(sos_hdr_v1_2->kdb_offset_bytes);
134 		}
135 		break;
136 	default:
137 		dev_err(adev->dev,
138 			"Unsupported psp sos firmware\n");
139 		err = -EINVAL;
140 		goto out;
141 	}
142 
143 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
144 	err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
145 	if (err)
146 		goto out1;
147 
148 	err = amdgpu_ucode_validate(adev->psp.asd_fw);
149 	if (err)
150 		goto out1;
151 
152 	asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
153 	adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
154 	adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version);
155 	adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
156 	adev->psp.asd_start_addr = (uint8_t *)asd_hdr +
157 				le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
158 
159 	switch (adev->asic_type) {
160 	case CHIP_VEGA20:
161 	case CHIP_ARCTURUS:
162 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
163 		err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
164 		if (err) {
165 			release_firmware(adev->psp.ta_fw);
166 			adev->psp.ta_fw = NULL;
167 			dev_info(adev->dev,
168 				 "psp v11.0: Failed to load firmware \"%s\"\n", fw_name);
169 		} else {
170 			err = amdgpu_ucode_validate(adev->psp.ta_fw);
171 			if (err)
172 				goto out2;
173 
174 			ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
175 			adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version);
176 			adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes);
177 			adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr +
178 				le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
179 			adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
180 			adev->psp.ta_ras_ucode_version = le32_to_cpu(ta_hdr->ta_ras_ucode_version);
181 			adev->psp.ta_ras_ucode_size = le32_to_cpu(ta_hdr->ta_ras_size_bytes);
182 			adev->psp.ta_ras_start_addr = (uint8_t *)adev->psp.ta_xgmi_start_addr +
183 				le32_to_cpu(ta_hdr->ta_ras_offset_bytes);
184 		}
185 		break;
186 	case CHIP_NAVI10:
187 	case CHIP_NAVI14:
188 	case CHIP_NAVI12:
189 		break;
190 	default:
191 		BUG();
192 	}
193 
194 	return 0;
195 
196 out2:
197 	release_firmware(adev->psp.ta_fw);
198 	adev->psp.ta_fw = NULL;
199 out1:
200 	release_firmware(adev->psp.asd_fw);
201 	adev->psp.asd_fw = NULL;
202 out:
203 	dev_err(adev->dev,
204 		"psp v11.0: Failed to load firmware \"%s\"\n", fw_name);
205 	release_firmware(adev->psp.sos_fw);
206 	adev->psp.sos_fw = NULL;
207 
208 	return err;
209 }
210 
211 static bool psp_v11_0_is_sos_alive(struct psp_context *psp)
212 {
213 	struct amdgpu_device *adev = psp->adev;
214 	uint32_t sol_reg;
215 
216 	sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
217 
218 	return sol_reg != 0x0;
219 }
220 
221 static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
222 {
223 	int ret;
224 	uint32_t psp_gfxdrv_command_reg = 0;
225 	struct amdgpu_device *adev = psp->adev;
226 
227 	/* Check tOS sign of life register to confirm sys driver and sOS
228 	 * are already been loaded.
229 	 */
230 	if (psp_v11_0_is_sos_alive(psp)) {
231 		psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
232 		dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version);
233 		return 0;
234 	}
235 
236 	/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
237 	ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
238 			   0x80000000, 0x80000000, false);
239 	if (ret)
240 		return ret;
241 
242 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
243 
244 	/* Copy PSP KDB binary to memory */
245 	memcpy(psp->fw_pri_buf, psp->kdb_start_addr, psp->kdb_bin_size);
246 
247 	/* Provide the PSP KDB to bootloader */
248 	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
249 	       (uint32_t)(psp->fw_pri_mc_addr >> 20));
250 	psp_gfxdrv_command_reg = PSP_BL__LOAD_KEY_DATABASE;
251 	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
252 	       psp_gfxdrv_command_reg);
253 
254 	/* Wait for bootloader to signify that is ready having  bit 31 of C2PMSG_35 set to 1*/
255 	ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
256 			   0x80000000, 0x80000000, false);
257 
258 	return ret;
259 }
260 
261 static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
262 {
263 	int ret;
264 	uint32_t psp_gfxdrv_command_reg = 0;
265 	struct amdgpu_device *adev = psp->adev;
266 
267 	/* Check sOS sign of life register to confirm sys driver and sOS
268 	 * are already been loaded.
269 	 */
270 	if (psp_v11_0_is_sos_alive(psp)) {
271 		psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
272 		dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version);
273 		return 0;
274 	}
275 
276 	/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
277 	ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
278 			   0x80000000, 0x80000000, false);
279 	if (ret)
280 		return ret;
281 
282 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
283 
284 	/* Copy PSP System Driver binary to memory */
285 	memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size);
286 
287 	/* Provide the sys driver to bootloader */
288 	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
289 	       (uint32_t)(psp->fw_pri_mc_addr >> 20));
290 	psp_gfxdrv_command_reg = PSP_BL__LOAD_SYSDRV;
291 	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
292 	       psp_gfxdrv_command_reg);
293 
294 	/* there might be handshake issue with hardware which needs delay */
295 	mdelay(20);
296 
297 	ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
298 			   0x80000000, 0x80000000, false);
299 
300 	return ret;
301 }
302 
303 static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
304 {
305 	int ret;
306 	unsigned int psp_gfxdrv_command_reg = 0;
307 	struct amdgpu_device *adev = psp->adev;
308 
309 	/* Check sOS sign of life register to confirm sys driver and sOS
310 	 * are already been loaded.
311 	 */
312 	if (psp_v11_0_is_sos_alive(psp))
313 		return 0;
314 
315 	/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
316 	ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
317 			   0x80000000, 0x80000000, false);
318 	if (ret)
319 		return ret;
320 
321 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
322 
323 	/* Copy Secure OS binary to PSP memory */
324 	memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size);
325 
326 	/* Provide the PSP secure OS to bootloader */
327 	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
328 	       (uint32_t)(psp->fw_pri_mc_addr >> 20));
329 	psp_gfxdrv_command_reg = PSP_BL__LOAD_SOSDRV;
330 	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
331 	       psp_gfxdrv_command_reg);
332 
333 	/* there might be handshake issue with hardware which needs delay */
334 	mdelay(20);
335 	ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81),
336 			   RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81),
337 			   0, true);
338 
339 	return ret;
340 }
341 
342 static void psp_v11_0_reroute_ih(struct psp_context *psp)
343 {
344 	struct amdgpu_device *adev = psp->adev;
345 	uint32_t tmp;
346 
347 	/* Change IH ring for VMC */
348 	tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1244b);
349 	tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
350 	tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
351 
352 	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 3);
353 	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
354 	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
355 
356 	mdelay(20);
357 	psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
358 		     0x80000000, 0x8000FFFF, false);
359 
360 	/* Change IH ring for UMC */
361 	tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1216b);
362 	tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
363 
364 	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 4);
365 	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
366 	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
367 
368 	mdelay(20);
369 	psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
370 		     0x80000000, 0x8000FFFF, false);
371 }
372 
373 static int psp_v11_0_ring_init(struct psp_context *psp,
374 			      enum psp_ring_type ring_type)
375 {
376 	int ret = 0;
377 	struct psp_ring *ring;
378 	struct amdgpu_device *adev = psp->adev;
379 
380 	psp_v11_0_reroute_ih(psp);
381 
382 	ring = &psp->km_ring;
383 
384 	ring->ring_type = ring_type;
385 
386 	/* allocate 4k Page of Local Frame Buffer memory for ring */
387 	ring->ring_size = 0x1000;
388 	ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
389 				      AMDGPU_GEM_DOMAIN_VRAM,
390 				      &adev->firmware.rbuf,
391 				      &ring->ring_mem_mc_addr,
392 				      (void **)&ring->ring_mem);
393 	if (ret) {
394 		ring->ring_size = 0;
395 		return ret;
396 	}
397 
398 	return 0;
399 }
400 
401 static bool psp_v11_0_support_vmr_ring(struct psp_context *psp)
402 {
403 	if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version > 0x80045)
404 		return true;
405 	return false;
406 }
407 
408 static int psp_v11_0_ring_stop(struct psp_context *psp,
409 			      enum psp_ring_type ring_type)
410 {
411 	int ret = 0;
412 	struct amdgpu_device *adev = psp->adev;
413 
414 	/* Write the ring destroy command*/
415 	if (psp_v11_0_support_vmr_ring(psp))
416 		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
417 				     GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
418 	else
419 		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
420 				     GFX_CTRL_CMD_ID_DESTROY_RINGS);
421 
422 	/* there might be handshake issue with hardware which needs delay */
423 	mdelay(20);
424 
425 	/* Wait for response flag (bit 31) */
426 	if (psp_v11_0_support_vmr_ring(psp))
427 		ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
428 				   0x80000000, 0x80000000, false);
429 	else
430 		ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
431 				   0x80000000, 0x80000000, false);
432 
433 	return ret;
434 }
435 
436 static int psp_v11_0_ring_create(struct psp_context *psp,
437 				enum psp_ring_type ring_type)
438 {
439 	int ret = 0;
440 	unsigned int psp_ring_reg = 0;
441 	struct psp_ring *ring = &psp->km_ring;
442 	struct amdgpu_device *adev = psp->adev;
443 
444 	if (psp_v11_0_support_vmr_ring(psp)) {
445 		ret = psp_v11_0_ring_stop(psp, ring_type);
446 		if (ret) {
447 			DRM_ERROR("psp_v11_0_ring_stop_sriov failed!\n");
448 			return ret;
449 		}
450 
451 		/* Write low address of the ring to C2PMSG_102 */
452 		psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
453 		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg);
454 		/* Write high address of the ring to C2PMSG_103 */
455 		psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
456 		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_103, psp_ring_reg);
457 
458 		/* Write the ring initialization command to C2PMSG_101 */
459 		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
460 					     GFX_CTRL_CMD_ID_INIT_GPCOM_RING);
461 
462 		/* there might be handshake issue with hardware which needs delay */
463 		mdelay(20);
464 
465 		/* Wait for response flag (bit 31) in C2PMSG_101 */
466 		ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
467 				   0x80000000, 0x8000FFFF, false);
468 
469 	} else {
470 		/* Wait for sOS ready for ring creation */
471 		ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
472 				   0x80000000, 0x80000000, false);
473 		if (ret) {
474 			DRM_ERROR("Failed to wait for sOS ready for ring creation\n");
475 			return ret;
476 		}
477 
478 		/* Write low address of the ring to C2PMSG_69 */
479 		psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
480 		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
481 		/* Write high address of the ring to C2PMSG_70 */
482 		psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
483 		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg);
484 		/* Write size of ring to C2PMSG_71 */
485 		psp_ring_reg = ring->ring_size;
486 		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg);
487 		/* Write the ring initialization command to C2PMSG_64 */
488 		psp_ring_reg = ring_type;
489 		psp_ring_reg = psp_ring_reg << 16;
490 		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
491 
492 		/* there might be handshake issue with hardware which needs delay */
493 		mdelay(20);
494 
495 		/* Wait for response flag (bit 31) in C2PMSG_64 */
496 		ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
497 				   0x80000000, 0x8000FFFF, false);
498 	}
499 
500 	return ret;
501 }
502 
503 
504 static int psp_v11_0_ring_destroy(struct psp_context *psp,
505 				 enum psp_ring_type ring_type)
506 {
507 	int ret = 0;
508 	struct psp_ring *ring = &psp->km_ring;
509 	struct amdgpu_device *adev = psp->adev;
510 
511 	ret = psp_v11_0_ring_stop(psp, ring_type);
512 	if (ret)
513 		DRM_ERROR("Fail to stop psp ring\n");
514 
515 	amdgpu_bo_free_kernel(&adev->firmware.rbuf,
516 			      &ring->ring_mem_mc_addr,
517 			      (void **)&ring->ring_mem);
518 
519 	return ret;
520 }
521 
522 static int psp_v11_0_cmd_submit(struct psp_context *psp,
523 			       uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
524 			       int index)
525 {
526 	unsigned int psp_write_ptr_reg = 0;
527 	struct psp_gfx_rb_frame *write_frame = psp->km_ring.ring_mem;
528 	struct psp_ring *ring = &psp->km_ring;
529 	struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
530 	struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
531 		ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
532 	struct amdgpu_device *adev = psp->adev;
533 	uint32_t ring_size_dw = ring->ring_size / 4;
534 	uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
535 
536 	/* KM (GPCOM) prepare write pointer */
537 	if (psp_v11_0_support_vmr_ring(psp))
538 		psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
539 	else
540 		psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
541 
542 	/* Update KM RB frame pointer to new frame */
543 	/* write_frame ptr increments by size of rb_frame in bytes */
544 	/* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
545 	if ((psp_write_ptr_reg % ring_size_dw) == 0)
546 		write_frame = ring_buffer_start;
547 	else
548 		write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
549 	/* Check invalid write_frame ptr address */
550 	if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
551 		DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
552 			  ring_buffer_start, ring_buffer_end, write_frame);
553 		DRM_ERROR("write_frame is pointing to address out of bounds\n");
554 		return -EINVAL;
555 	}
556 
557 	/* Initialize KM RB frame */
558 	memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
559 
560 	/* Update KM RB frame */
561 	write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
562 	write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
563 	write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
564 	write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
565 	write_frame->fence_value = index;
566 	amdgpu_asic_flush_hdp(adev, NULL);
567 
568 	/* Update the write Pointer in DWORDs */
569 	psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
570 	if (psp_v11_0_support_vmr_ring(psp)) {
571 		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_write_ptr_reg);
572 		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
573 	} else
574 		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
575 
576 	return 0;
577 }
578 
579 static int
580 psp_v11_0_sram_map(struct amdgpu_device *adev,
581 		  unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
582 		  unsigned int *sram_data_reg_offset,
583 		  enum AMDGPU_UCODE_ID ucode_id)
584 {
585 	int ret = 0;
586 
587 	switch (ucode_id) {
588 /* TODO: needs to confirm */
589 #if 0
590 	case AMDGPU_UCODE_ID_SMC:
591 		*sram_offset = 0;
592 		*sram_addr_reg_offset = 0;
593 		*sram_data_reg_offset = 0;
594 		break;
595 #endif
596 
597 	case AMDGPU_UCODE_ID_CP_CE:
598 		*sram_offset = 0x0;
599 		*sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_ADDR);
600 		*sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_DATA);
601 		break;
602 
603 	case AMDGPU_UCODE_ID_CP_PFP:
604 		*sram_offset = 0x0;
605 		*sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_ADDR);
606 		*sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_DATA);
607 		break;
608 
609 	case AMDGPU_UCODE_ID_CP_ME:
610 		*sram_offset = 0x0;
611 		*sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_ADDR);
612 		*sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_DATA);
613 		break;
614 
615 	case AMDGPU_UCODE_ID_CP_MEC1:
616 		*sram_offset = 0x10000;
617 		*sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_ADDR);
618 		*sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_DATA);
619 		break;
620 
621 	case AMDGPU_UCODE_ID_CP_MEC2:
622 		*sram_offset = 0x10000;
623 		*sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_ADDR);
624 		*sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_DATA);
625 		break;
626 
627 	case AMDGPU_UCODE_ID_RLC_G:
628 		*sram_offset = 0x2000;
629 		if (adev->asic_type < CHIP_NAVI10) {
630 			*sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR);
631 			*sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA);
632 		} else {
633 			*sram_addr_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmRLC_GPM_UCODE_ADDR_NV10;
634 			*sram_data_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmRLC_GPM_UCODE_DATA_NV10;
635 		}
636 		break;
637 
638 	case AMDGPU_UCODE_ID_SDMA0:
639 		*sram_offset = 0x0;
640 		if (adev->asic_type < CHIP_NAVI10) {
641 			*sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR);
642 			*sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA);
643 		} else {
644 			*sram_addr_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmSDMA0_UCODE_ADDR_NV10;
645 			*sram_data_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmSDMA0_UCODE_DATA_NV10;
646 		}
647 		break;
648 
649 /* TODO: needs to confirm */
650 #if 0
651 	case AMDGPU_UCODE_ID_SDMA1:
652 		*sram_offset = ;
653 		*sram_addr_reg_offset = ;
654 		break;
655 
656 	case AMDGPU_UCODE_ID_UVD:
657 		*sram_offset = ;
658 		*sram_addr_reg_offset = ;
659 		break;
660 
661 	case AMDGPU_UCODE_ID_VCE:
662 		*sram_offset = ;
663 		*sram_addr_reg_offset = ;
664 		break;
665 #endif
666 
667 	case AMDGPU_UCODE_ID_MAXIMUM:
668 	default:
669 		ret = -EINVAL;
670 		break;
671 	}
672 
673 	return ret;
674 }
675 
676 static bool psp_v11_0_compare_sram_data(struct psp_context *psp,
677 				       struct amdgpu_firmware_info *ucode,
678 				       enum AMDGPU_UCODE_ID ucode_type)
679 {
680 	int err = 0;
681 	unsigned int fw_sram_reg_val = 0;
682 	unsigned int fw_sram_addr_reg_offset = 0;
683 	unsigned int fw_sram_data_reg_offset = 0;
684 	unsigned int ucode_size;
685 	uint32_t *ucode_mem = NULL;
686 	struct amdgpu_device *adev = psp->adev;
687 
688 	err = psp_v11_0_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset,
689 				&fw_sram_data_reg_offset, ucode_type);
690 	if (err)
691 		return false;
692 
693 	WREG32(fw_sram_addr_reg_offset, fw_sram_reg_val);
694 
695 	ucode_size = ucode->ucode_size;
696 	ucode_mem = (uint32_t *)ucode->kaddr;
697 	while (ucode_size) {
698 		fw_sram_reg_val = RREG32(fw_sram_data_reg_offset);
699 
700 		if (*ucode_mem != fw_sram_reg_val)
701 			return false;
702 
703 		ucode_mem++;
704 		/* 4 bytes */
705 		ucode_size -= 4;
706 	}
707 
708 	return true;
709 }
710 
711 static int psp_v11_0_mode1_reset(struct psp_context *psp)
712 {
713 	int ret;
714 	uint32_t offset;
715 	struct amdgpu_device *adev = psp->adev;
716 
717 	offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64);
718 
719 	ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, false);
720 
721 	if (ret) {
722 		DRM_INFO("psp is not working correctly before mode1 reset!\n");
723 		return -EINVAL;
724 	}
725 
726 	/*send the mode 1 reset command*/
727 	WREG32(offset, GFX_CTRL_CMD_ID_MODE1_RST);
728 
729 	msleep(500);
730 
731 	offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
732 
733 	ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, false);
734 
735 	if (ret) {
736 		DRM_INFO("psp mode 1 reset failed!\n");
737 		return -EINVAL;
738 	}
739 
740 	DRM_INFO("psp mode1 reset succeed \n");
741 
742 	return 0;
743 }
744 
745 /* TODO: Fill in follow functions once PSP firmware interface for XGMI is ready.
746  * For now, return success and hack the hive_id so high level code can
747  * start testing
748  */
749 static int psp_v11_0_xgmi_get_topology_info(struct psp_context *psp,
750 	int number_devices, struct psp_xgmi_topology_info *topology)
751 {
752 	struct ta_xgmi_shared_memory *xgmi_cmd;
753 	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
754 	struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
755 	int i;
756 	int ret;
757 
758 	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
759 		return -EINVAL;
760 
761 	xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
762 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
763 
764 	/* Fill in the shared memory with topology information as input */
765 	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
766 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO;
767 	topology_info_input->num_nodes = number_devices;
768 
769 	for (i = 0; i < topology_info_input->num_nodes; i++) {
770 		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
771 		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
772 		topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
773 		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
774 	}
775 
776 	/* Invoke xgmi ta to get the topology information */
777 	ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO);
778 	if (ret)
779 		return ret;
780 
781 	/* Read the output topology information from the shared memory */
782 	topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
783 	topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
784 	for (i = 0; i < topology->num_nodes; i++) {
785 		topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
786 		topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
787 		topology->nodes[i].is_sharing_enabled = topology_info_output->nodes[i].is_sharing_enabled;
788 		topology->nodes[i].sdma_engine = topology_info_output->nodes[i].sdma_engine;
789 	}
790 
791 	return 0;
792 }
793 
794 static int psp_v11_0_xgmi_set_topology_info(struct psp_context *psp,
795 	int number_devices, struct psp_xgmi_topology_info *topology)
796 {
797 	struct ta_xgmi_shared_memory *xgmi_cmd;
798 	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
799 	int i;
800 
801 	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
802 		return -EINVAL;
803 
804 	xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
805 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
806 
807 	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
808 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
809 	topology_info_input->num_nodes = number_devices;
810 
811 	for (i = 0; i < topology_info_input->num_nodes; i++) {
812 		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
813 		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
814 		topology_info_input->nodes[i].is_sharing_enabled = 1;
815 		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
816 	}
817 
818 	/* Invoke xgmi ta to set topology information */
819 	return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
820 }
821 
822 static int psp_v11_0_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
823 {
824 	struct ta_xgmi_shared_memory *xgmi_cmd;
825 	int ret;
826 
827 	xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
828 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
829 
830 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
831 
832 	/* Invoke xgmi ta to get hive id */
833 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
834 	if (ret)
835 		return ret;
836 
837 	*hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
838 
839 	return 0;
840 }
841 
842 static int psp_v11_0_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
843 {
844 	struct ta_xgmi_shared_memory *xgmi_cmd;
845 	int ret;
846 
847 	xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
848 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
849 
850 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
851 
852 	/* Invoke xgmi ta to get the node id */
853 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
854 	if (ret)
855 		return ret;
856 
857 	*node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
858 
859 	return 0;
860 }
861 
862 static int psp_v11_0_ras_trigger_error(struct psp_context *psp,
863 		struct ta_ras_trigger_error_input *info)
864 {
865 	struct ta_ras_shared_memory *ras_cmd;
866 	int ret;
867 
868 	if (!psp->ras.ras_initialized)
869 		return -EINVAL;
870 
871 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
872 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
873 
874 	ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
875 	ras_cmd->ras_in_message.trigger_error = *info;
876 
877 	ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
878 	if (ret)
879 		return -EINVAL;
880 
881 	return ras_cmd->ras_status;
882 }
883 
884 static int psp_v11_0_ras_cure_posion(struct psp_context *psp, uint64_t *mode_ptr)
885 {
886 #if 0
887 	// not support yet.
888 	struct ta_ras_shared_memory *ras_cmd;
889 	int ret;
890 
891 	if (!psp->ras.ras_initialized)
892 		return -EINVAL;
893 
894 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
895 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
896 
897 	ras_cmd->cmd_id = TA_RAS_COMMAND__CURE_POISON;
898 	ras_cmd->ras_in_message.cure_poison.mode_ptr = mode_ptr;
899 
900 	ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
901 	if (ret)
902 		return -EINVAL;
903 
904 	return ras_cmd->ras_status;
905 #else
906 	return -EINVAL;
907 #endif
908 }
909 
910 static int psp_v11_0_rlc_autoload_start(struct psp_context *psp)
911 {
912 	return psp_rlc_autoload_start(psp);
913 }
914 
915 static int psp_v11_0_memory_training_send_msg(struct psp_context *psp, int msg)
916 {
917 	int ret;
918 	int i;
919 	uint32_t data_32;
920 	int max_wait;
921 	struct amdgpu_device *adev = psp->adev;
922 
923 	data_32 = (psp->mem_train_ctx.c2p_train_data_offset >> 20);
924 	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, data_32);
925 	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, msg);
926 
927 	max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout;
928 	for (i = 0; i < max_wait; i++) {
929 		ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
930 				   0x80000000, 0x80000000, false);
931 		if (ret == 0)
932 			break;
933 	}
934 	if (i < max_wait)
935 		ret = 0;
936 	else
937 		ret = -ETIME;
938 
939 	DRM_DEBUG("training %s %s, cost %d @ %d ms\n",
940 		  (msg == PSP_BL__DRAM_SHORT_TRAIN) ? "short" : "long",
941 		  (ret == 0) ? "succeed" : "failed",
942 		  i, adev->usec_timeout/1000);
943 	return ret;
944 }
945 
946 static void psp_v11_0_memory_training_fini(struct psp_context *psp)
947 {
948 	struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
949 
950 	ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
951 	kfree(ctx->sys_cache);
952 	ctx->sys_cache = NULL;
953 }
954 
955 static int psp_v11_0_memory_training_init(struct psp_context *psp)
956 {
957 	int ret;
958 	struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
959 
960 	if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
961 		DRM_DEBUG("memory training is not supported!\n");
962 		return 0;
963 	}
964 
965 	ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
966 	if (ctx->sys_cache == NULL) {
967 		DRM_ERROR("alloc mem_train_ctx.sys_cache failed!\n");
968 		ret = -ENOMEM;
969 		goto Err_out;
970 	}
971 
972 	DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
973 		  ctx->train_data_size,
974 		  ctx->p2c_train_data_offset,
975 		  ctx->c2p_train_data_offset);
976 	ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
977 	return 0;
978 
979 Err_out:
980 	psp_v11_0_memory_training_fini(psp);
981 	return ret;
982 }
983 
984 /*
985  * save and restore proces
986  */
987 static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
988 {
989 	int ret;
990 	uint32_t p2c_header[4];
991 	struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
992 	uint32_t *pcache = (uint32_t*)ctx->sys_cache;
993 
994 	if (ctx->init == PSP_MEM_TRAIN_NOT_SUPPORT) {
995 		DRM_DEBUG("Memory training is not supported.\n");
996 		return 0;
997 	} else if (ctx->init != PSP_MEM_TRAIN_INIT_SUCCESS) {
998 		DRM_ERROR("Memory training initialization failure.\n");
999 		return -EINVAL;
1000 	}
1001 
1002 	if (psp_v11_0_is_sos_alive(psp)) {
1003 		DRM_DEBUG("SOS is alive, skip memory training.\n");
1004 		return 0;
1005 	}
1006 
1007 	amdgpu_device_vram_access(psp->adev, ctx->p2c_train_data_offset, p2c_header, sizeof(p2c_header), false);
1008 	DRM_DEBUG("sys_cache[%08x,%08x,%08x,%08x] p2c_header[%08x,%08x,%08x,%08x]\n",
1009 		  pcache[0], pcache[1], pcache[2], pcache[3],
1010 		  p2c_header[0], p2c_header[1], p2c_header[2], p2c_header[3]);
1011 
1012 	if (ops & PSP_MEM_TRAIN_SEND_SHORT_MSG) {
1013 		DRM_DEBUG("Short training depends on restore.\n");
1014 		ops |= PSP_MEM_TRAIN_RESTORE;
1015 	}
1016 
1017 	if ((ops & PSP_MEM_TRAIN_RESTORE) &&
1018 	    pcache[0] != MEM_TRAIN_SYSTEM_SIGNATURE) {
1019 		DRM_DEBUG("sys_cache[0] is invalid, restore depends on save.\n");
1020 		ops |= PSP_MEM_TRAIN_SAVE;
1021 	}
1022 
1023 	if (p2c_header[0] == MEM_TRAIN_SYSTEM_SIGNATURE &&
1024 	    !(pcache[0] == MEM_TRAIN_SYSTEM_SIGNATURE &&
1025 	      pcache[3] == p2c_header[3])) {
1026 		DRM_DEBUG("sys_cache is invalid or out-of-date, need save training data to sys_cache.\n");
1027 		ops |= PSP_MEM_TRAIN_SAVE;
1028 	}
1029 
1030 	if ((ops & PSP_MEM_TRAIN_SAVE) &&
1031 	    p2c_header[0] != MEM_TRAIN_SYSTEM_SIGNATURE) {
1032 		DRM_DEBUG("p2c_header[0] is invalid, save depends on long training.\n");
1033 		ops |= PSP_MEM_TRAIN_SEND_LONG_MSG;
1034 	}
1035 
1036 	if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) {
1037 		ops &= ~PSP_MEM_TRAIN_SEND_SHORT_MSG;
1038 		ops |= PSP_MEM_TRAIN_SAVE;
1039 	}
1040 
1041 	DRM_DEBUG("Memory training ops:%x.\n", ops);
1042 
1043 	if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) {
1044 		ret = psp_v11_0_memory_training_send_msg(psp, PSP_BL__DRAM_LONG_TRAIN);
1045 		if (ret) {
1046 			DRM_ERROR("Send long training msg failed.\n");
1047 			return ret;
1048 		}
1049 	}
1050 
1051 	if (ops & PSP_MEM_TRAIN_SAVE) {
1052 		amdgpu_device_vram_access(psp->adev, ctx->p2c_train_data_offset, ctx->sys_cache, ctx->train_data_size, false);
1053 	}
1054 
1055 	if (ops & PSP_MEM_TRAIN_RESTORE) {
1056 		amdgpu_device_vram_access(psp->adev, ctx->c2p_train_data_offset, ctx->sys_cache, ctx->train_data_size, true);
1057 	}
1058 
1059 	if (ops & PSP_MEM_TRAIN_SEND_SHORT_MSG) {
1060 		ret = psp_v11_0_memory_training_send_msg(psp, (amdgpu_force_long_training > 0) ?
1061 							 PSP_BL__DRAM_LONG_TRAIN : PSP_BL__DRAM_SHORT_TRAIN);
1062 		if (ret) {
1063 			DRM_ERROR("send training msg failed.\n");
1064 			return ret;
1065 		}
1066 	}
1067 	ctx->training_cnt++;
1068 	return 0;
1069 }
1070 
1071 static const struct psp_funcs psp_v11_0_funcs = {
1072 	.init_microcode = psp_v11_0_init_microcode,
1073 	.bootloader_load_kdb = psp_v11_0_bootloader_load_kdb,
1074 	.bootloader_load_sysdrv = psp_v11_0_bootloader_load_sysdrv,
1075 	.bootloader_load_sos = psp_v11_0_bootloader_load_sos,
1076 	.ring_init = psp_v11_0_ring_init,
1077 	.ring_create = psp_v11_0_ring_create,
1078 	.ring_stop = psp_v11_0_ring_stop,
1079 	.ring_destroy = psp_v11_0_ring_destroy,
1080 	.cmd_submit = psp_v11_0_cmd_submit,
1081 	.compare_sram_data = psp_v11_0_compare_sram_data,
1082 	.mode1_reset = psp_v11_0_mode1_reset,
1083 	.xgmi_get_topology_info = psp_v11_0_xgmi_get_topology_info,
1084 	.xgmi_set_topology_info = psp_v11_0_xgmi_set_topology_info,
1085 	.xgmi_get_hive_id = psp_v11_0_xgmi_get_hive_id,
1086 	.xgmi_get_node_id = psp_v11_0_xgmi_get_node_id,
1087 	.support_vmr_ring = psp_v11_0_support_vmr_ring,
1088 	.ras_trigger_error = psp_v11_0_ras_trigger_error,
1089 	.ras_cure_posion = psp_v11_0_ras_cure_posion,
1090 	.rlc_autoload_start = psp_v11_0_rlc_autoload_start,
1091 	.mem_training_init = psp_v11_0_memory_training_init,
1092 	.mem_training_fini = psp_v11_0_memory_training_fini,
1093 	.mem_training = psp_v11_0_memory_training,
1094 };
1095 
1096 void psp_v11_0_set_psp_funcs(struct psp_context *psp)
1097 {
1098 	psp->funcs = &psp_v11_0_funcs;
1099 }
1100