xref: /dragonfly/sys/dev/drm/amd/amdgpu/amdgpu_vcn.c (revision 809f3802)
1b843c749SSergey Zigachev /*
2b843c749SSergey Zigachev  * Copyright 2016 Advanced Micro Devices, Inc.
3b843c749SSergey Zigachev  * All Rights Reserved.
4b843c749SSergey Zigachev  *
5b843c749SSergey Zigachev  * Permission is hereby granted, free of charge, to any person obtaining a
6b843c749SSergey Zigachev  * copy of this software and associated documentation files (the
7b843c749SSergey Zigachev  * "Software"), to deal in the Software without restriction, including
8b843c749SSergey Zigachev  * without limitation the rights to use, copy, modify, merge, publish,
9b843c749SSergey Zigachev  * distribute, sub license, and/or sell copies of the Software, and to
10b843c749SSergey Zigachev  * permit persons to whom the Software is furnished to do so, subject to
11b843c749SSergey Zigachev  * the following conditions:
12b843c749SSergey Zigachev  *
13b843c749SSergey Zigachev  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14b843c749SSergey Zigachev  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15b843c749SSergey Zigachev  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16b843c749SSergey Zigachev  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17b843c749SSergey Zigachev  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18b843c749SSergey Zigachev  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19b843c749SSergey Zigachev  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20b843c749SSergey Zigachev  *
21b843c749SSergey Zigachev  * The above copyright notice and this permission notice (including the
22b843c749SSergey Zigachev  * next paragraph) shall be included in all copies or substantial portions
23b843c749SSergey Zigachev  * of the Software.
24b843c749SSergey Zigachev  *
25b843c749SSergey Zigachev  */
26b843c749SSergey Zigachev 
27b843c749SSergey Zigachev #include <linux/firmware.h>
28b843c749SSergey Zigachev #include <linux/module.h>
29b843c749SSergey Zigachev #include <drm/drmP.h>
30b843c749SSergey Zigachev #include <drm/drm.h>
31b843c749SSergey Zigachev 
32b843c749SSergey Zigachev #include "amdgpu.h"
33b843c749SSergey Zigachev #include "amdgpu_pm.h"
34b843c749SSergey Zigachev #include "amdgpu_vcn.h"
35b843c749SSergey Zigachev #include "soc15d.h"
36b843c749SSergey Zigachev #include "soc15_common.h"
37b843c749SSergey Zigachev 
38b843c749SSergey Zigachev #include "vcn/vcn_1_0_offset.h"
39b843c749SSergey Zigachev 
40b843c749SSergey Zigachev /* 1 second timeout */
41b843c749SSergey Zigachev #define VCN_IDLE_TIMEOUT	msecs_to_jiffies(1000)
42b843c749SSergey Zigachev 
43b843c749SSergey Zigachev /* Firmware Names */
44*809f3802SSergey Zigachev #define FIRMWARE_RAVEN		"amdgpufw_raven_vcn"
45b843c749SSergey Zigachev 
46b843c749SSergey Zigachev MODULE_FIRMWARE(FIRMWARE_RAVEN);
47b843c749SSergey Zigachev 
48b843c749SSergey Zigachev static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
49b843c749SSergey Zigachev 
amdgpu_vcn_sw_init(struct amdgpu_device * adev)50b843c749SSergey Zigachev int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
51b843c749SSergey Zigachev {
52b843c749SSergey Zigachev 	unsigned long bo_size;
53b843c749SSergey Zigachev 	const char *fw_name;
54b843c749SSergey Zigachev 	const struct common_firmware_header *hdr;
55b843c749SSergey Zigachev 	unsigned char fw_check;
56b843c749SSergey Zigachev 	int r;
57b843c749SSergey Zigachev 
58b843c749SSergey Zigachev 	INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
59b843c749SSergey Zigachev 
60b843c749SSergey Zigachev 	switch (adev->asic_type) {
61b843c749SSergey Zigachev 	case CHIP_RAVEN:
62b843c749SSergey Zigachev 		fw_name = FIRMWARE_RAVEN;
63b843c749SSergey Zigachev 		break;
64b843c749SSergey Zigachev 	default:
65b843c749SSergey Zigachev 		return -EINVAL;
66b843c749SSergey Zigachev 	}
67b843c749SSergey Zigachev 
68b843c749SSergey Zigachev 	r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
69b843c749SSergey Zigachev 	if (r) {
70b843c749SSergey Zigachev 		dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
71b843c749SSergey Zigachev 			fw_name);
72b843c749SSergey Zigachev 		return r;
73b843c749SSergey Zigachev 	}
74b843c749SSergey Zigachev 
75b843c749SSergey Zigachev 	r = amdgpu_ucode_validate(adev->vcn.fw);
76b843c749SSergey Zigachev 	if (r) {
77b843c749SSergey Zigachev 		dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
78b843c749SSergey Zigachev 			fw_name);
79b843c749SSergey Zigachev 		release_firmware(adev->vcn.fw);
80b843c749SSergey Zigachev 		adev->vcn.fw = NULL;
81b843c749SSergey Zigachev 		return r;
82b843c749SSergey Zigachev 	}
83b843c749SSergey Zigachev 
84b843c749SSergey Zigachev 	hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
85b843c749SSergey Zigachev 	adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
86b843c749SSergey Zigachev 
87b843c749SSergey Zigachev 	/* Bit 20-23, it is encode major and non-zero for new naming convention.
88b843c749SSergey Zigachev 	 * This field is part of version minor and DRM_DISABLED_FLAG in old naming
89b843c749SSergey Zigachev 	 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
90b843c749SSergey Zigachev 	 * is zero in old naming convention, this field is always zero so far.
91b843c749SSergey Zigachev 	 * These four bits are used to tell which naming convention is present.
92b843c749SSergey Zigachev 	 */
93b843c749SSergey Zigachev 	fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
94b843c749SSergey Zigachev 	if (fw_check) {
95b843c749SSergey Zigachev 		unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
96b843c749SSergey Zigachev 
97b843c749SSergey Zigachev 		fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
98b843c749SSergey Zigachev 		enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
99b843c749SSergey Zigachev 		enc_major = fw_check;
100b843c749SSergey Zigachev 		dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
101b843c749SSergey Zigachev 		vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
102b843c749SSergey Zigachev 		DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n",
103b843c749SSergey Zigachev 			enc_major, enc_minor, dec_ver, vep, fw_rev);
104b843c749SSergey Zigachev 	} else {
105b843c749SSergey Zigachev 		unsigned int version_major, version_minor, family_id;
106b843c749SSergey Zigachev 
107b843c749SSergey Zigachev 		family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
108b843c749SSergey Zigachev 		version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
109b843c749SSergey Zigachev 		version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
110b843c749SSergey Zigachev 		DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
111b843c749SSergey Zigachev 			version_major, version_minor, family_id);
112b843c749SSergey Zigachev 	}
113b843c749SSergey Zigachev 
114b843c749SSergey Zigachev 	bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
115b843c749SSergey Zigachev 		  +  AMDGPU_VCN_SESSION_SIZE * 40;
116b843c749SSergey Zigachev 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
117b843c749SSergey Zigachev 		bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
118b843c749SSergey Zigachev 	r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
119b843c749SSergey Zigachev 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
12078973132SSergey Zigachev 				    (u64 *)&adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
121b843c749SSergey Zigachev 	if (r) {
122b843c749SSergey Zigachev 		dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
123b843c749SSergey Zigachev 		return r;
124b843c749SSergey Zigachev 	}
125b843c749SSergey Zigachev 
126b843c749SSergey Zigachev 	return 0;
127b843c749SSergey Zigachev }
128b843c749SSergey Zigachev 
amdgpu_vcn_sw_fini(struct amdgpu_device * adev)129b843c749SSergey Zigachev int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
130b843c749SSergey Zigachev {
131b843c749SSergey Zigachev 	int i;
132b843c749SSergey Zigachev 
133b843c749SSergey Zigachev 	kvfree(adev->vcn.saved_bo);
134b843c749SSergey Zigachev 
135b843c749SSergey Zigachev 	amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
13678973132SSergey Zigachev 			      (u64 *)&adev->vcn.gpu_addr,
137b843c749SSergey Zigachev 			      (void **)&adev->vcn.cpu_addr);
138b843c749SSergey Zigachev 
139b843c749SSergey Zigachev 	amdgpu_ring_fini(&adev->vcn.ring_dec);
140b843c749SSergey Zigachev 
141b843c749SSergey Zigachev 	for (i = 0; i < adev->vcn.num_enc_rings; ++i)
142b843c749SSergey Zigachev 		amdgpu_ring_fini(&adev->vcn.ring_enc[i]);
143b843c749SSergey Zigachev 
144b843c749SSergey Zigachev 	amdgpu_ring_fini(&adev->vcn.ring_jpeg);
145b843c749SSergey Zigachev 
146b843c749SSergey Zigachev 	release_firmware(adev->vcn.fw);
147b843c749SSergey Zigachev 
148b843c749SSergey Zigachev 	return 0;
149b843c749SSergey Zigachev }
150b843c749SSergey Zigachev 
amdgpu_vcn_suspend(struct amdgpu_device * adev)151b843c749SSergey Zigachev int amdgpu_vcn_suspend(struct amdgpu_device *adev)
152b843c749SSergey Zigachev {
153b843c749SSergey Zigachev 	unsigned size;
154b843c749SSergey Zigachev 	void *ptr;
155b843c749SSergey Zigachev 
156b843c749SSergey Zigachev 	cancel_delayed_work_sync(&adev->vcn.idle_work);
157b843c749SSergey Zigachev 
158b843c749SSergey Zigachev 	if (adev->vcn.vcpu_bo == NULL)
159b843c749SSergey Zigachev 		return 0;
160b843c749SSergey Zigachev 
161b843c749SSergey Zigachev 	size = amdgpu_bo_size(adev->vcn.vcpu_bo);
162b843c749SSergey Zigachev 	ptr = adev->vcn.cpu_addr;
163b843c749SSergey Zigachev 
16478973132SSergey Zigachev 	adev->vcn.saved_bo = kmalloc(size, M_DRM, GFP_KERNEL);
165b843c749SSergey Zigachev 	if (!adev->vcn.saved_bo)
166b843c749SSergey Zigachev 		return -ENOMEM;
167b843c749SSergey Zigachev 
168b843c749SSergey Zigachev 	memcpy_fromio(adev->vcn.saved_bo, ptr, size);
169b843c749SSergey Zigachev 
170b843c749SSergey Zigachev 	return 0;
171b843c749SSergey Zigachev }
172b843c749SSergey Zigachev 
amdgpu_vcn_resume(struct amdgpu_device * adev)173b843c749SSergey Zigachev int amdgpu_vcn_resume(struct amdgpu_device *adev)
174b843c749SSergey Zigachev {
175b843c749SSergey Zigachev 	unsigned size;
176b843c749SSergey Zigachev 	void *ptr;
177b843c749SSergey Zigachev 
178b843c749SSergey Zigachev 	if (adev->vcn.vcpu_bo == NULL)
179b843c749SSergey Zigachev 		return -EINVAL;
180b843c749SSergey Zigachev 
181b843c749SSergey Zigachev 	size = amdgpu_bo_size(adev->vcn.vcpu_bo);
182b843c749SSergey Zigachev 	ptr = adev->vcn.cpu_addr;
183b843c749SSergey Zigachev 
184b843c749SSergey Zigachev 	if (adev->vcn.saved_bo != NULL) {
185b843c749SSergey Zigachev 		memcpy_toio(ptr, adev->vcn.saved_bo, size);
186b843c749SSergey Zigachev 		kvfree(adev->vcn.saved_bo);
187b843c749SSergey Zigachev 		adev->vcn.saved_bo = NULL;
188b843c749SSergey Zigachev 	} else {
189b843c749SSergey Zigachev 		const struct common_firmware_header *hdr;
190b843c749SSergey Zigachev 		unsigned offset;
191b843c749SSergey Zigachev 
192b843c749SSergey Zigachev 		hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
193b843c749SSergey Zigachev 		if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
194b843c749SSergey Zigachev 			offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
195b843c749SSergey Zigachev 			memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
196b843c749SSergey Zigachev 				    le32_to_cpu(hdr->ucode_size_bytes));
197b843c749SSergey Zigachev 			size -= le32_to_cpu(hdr->ucode_size_bytes);
198b843c749SSergey Zigachev 			ptr += le32_to_cpu(hdr->ucode_size_bytes);
199b843c749SSergey Zigachev 		}
200b843c749SSergey Zigachev 		memset_io(ptr, 0, size);
201b843c749SSergey Zigachev 	}
202b843c749SSergey Zigachev 
203b843c749SSergey Zigachev 	return 0;
204b843c749SSergey Zigachev }
205b843c749SSergey Zigachev 
amdgpu_vcn_idle_work_handler(struct work_struct * work)206b843c749SSergey Zigachev static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
207b843c749SSergey Zigachev {
208b843c749SSergey Zigachev 	struct amdgpu_device *adev =
209b843c749SSergey Zigachev 		container_of(work, struct amdgpu_device, vcn.idle_work.work);
210b843c749SSergey Zigachev 	unsigned fences = amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
211b843c749SSergey Zigachev 	unsigned i;
212b843c749SSergey Zigachev 
213b843c749SSergey Zigachev 	for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
214b843c749SSergey Zigachev 		fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
215b843c749SSergey Zigachev 	}
216b843c749SSergey Zigachev 
217b843c749SSergey Zigachev 	fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg);
218b843c749SSergey Zigachev 
219b843c749SSergey Zigachev 	if (fences == 0) {
220b843c749SSergey Zigachev 		if (adev->pm.dpm_enabled)
221b843c749SSergey Zigachev 			amdgpu_dpm_enable_uvd(adev, false);
222b843c749SSergey Zigachev 		else
223b843c749SSergey Zigachev 			amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
224b843c749SSergey Zigachev 							       AMD_PG_STATE_GATE);
225b843c749SSergey Zigachev 	} else {
226b843c749SSergey Zigachev 		schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
227b843c749SSergey Zigachev 	}
228b843c749SSergey Zigachev }
229b843c749SSergey Zigachev 
amdgpu_vcn_ring_begin_use(struct amdgpu_ring * ring)230b843c749SSergey Zigachev void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
231b843c749SSergey Zigachev {
232b843c749SSergey Zigachev 	struct amdgpu_device *adev = ring->adev;
233b843c749SSergey Zigachev 	bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
234b843c749SSergey Zigachev 
235b843c749SSergey Zigachev 	if (set_clocks) {
236b843c749SSergey Zigachev 		if (adev->pm.dpm_enabled)
237b843c749SSergey Zigachev 			amdgpu_dpm_enable_uvd(adev, true);
238b843c749SSergey Zigachev 		else
239b843c749SSergey Zigachev 			amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
240b843c749SSergey Zigachev 							       AMD_PG_STATE_UNGATE);
241b843c749SSergey Zigachev 	}
242b843c749SSergey Zigachev }
243b843c749SSergey Zigachev 
amdgpu_vcn_ring_end_use(struct amdgpu_ring * ring)244b843c749SSergey Zigachev void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
245b843c749SSergey Zigachev {
246b843c749SSergey Zigachev 	schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
247b843c749SSergey Zigachev }
248b843c749SSergey Zigachev 
amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring * ring)249b843c749SSergey Zigachev int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
250b843c749SSergey Zigachev {
251b843c749SSergey Zigachev 	struct amdgpu_device *adev = ring->adev;
252b843c749SSergey Zigachev 	uint32_t tmp = 0;
253b843c749SSergey Zigachev 	unsigned i;
254b843c749SSergey Zigachev 	int r;
255b843c749SSergey Zigachev 
256b843c749SSergey Zigachev 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD);
257b843c749SSergey Zigachev 	r = amdgpu_ring_alloc(ring, 3);
258b843c749SSergey Zigachev 	if (r) {
259b843c749SSergey Zigachev 		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
260b843c749SSergey Zigachev 			  ring->idx, r);
261b843c749SSergey Zigachev 		return r;
262b843c749SSergey Zigachev 	}
263b843c749SSergey Zigachev 	amdgpu_ring_write(ring,
264b843c749SSergey Zigachev 		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
265b843c749SSergey Zigachev 	amdgpu_ring_write(ring, 0xDEADBEEF);
266b843c749SSergey Zigachev 	amdgpu_ring_commit(ring);
267b843c749SSergey Zigachev 	for (i = 0; i < adev->usec_timeout; i++) {
268b843c749SSergey Zigachev 		tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID));
269b843c749SSergey Zigachev 		if (tmp == 0xDEADBEEF)
270b843c749SSergey Zigachev 			break;
271b843c749SSergey Zigachev 		DRM_UDELAY(1);
272b843c749SSergey Zigachev 	}
273b843c749SSergey Zigachev 
274b843c749SSergey Zigachev 	if (i < adev->usec_timeout) {
275b843c749SSergey Zigachev 		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
276b843c749SSergey Zigachev 			 ring->idx, i);
277b843c749SSergey Zigachev 	} else {
278b843c749SSergey Zigachev 		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
279b843c749SSergey Zigachev 			  ring->idx, tmp);
280b843c749SSergey Zigachev 		r = -EINVAL;
281b843c749SSergey Zigachev 	}
282b843c749SSergey Zigachev 	return r;
283b843c749SSergey Zigachev }
284b843c749SSergey Zigachev 
amdgpu_vcn_dec_send_msg(struct amdgpu_ring * ring,struct amdgpu_bo * bo,struct dma_fence ** fence)285b843c749SSergey Zigachev static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
286b843c749SSergey Zigachev 				   struct amdgpu_bo *bo,
287b843c749SSergey Zigachev 				   struct dma_fence **fence)
288b843c749SSergey Zigachev {
289b843c749SSergey Zigachev 	struct amdgpu_device *adev = ring->adev;
290b843c749SSergey Zigachev 	struct dma_fence *f = NULL;
291b843c749SSergey Zigachev 	struct amdgpu_job *job;
292b843c749SSergey Zigachev 	struct amdgpu_ib *ib;
293b843c749SSergey Zigachev 	uint64_t addr;
294b843c749SSergey Zigachev 	int i, r;
295b843c749SSergey Zigachev 
296b843c749SSergey Zigachev 	r = amdgpu_job_alloc_with_ib(adev, 64, &job);
297b843c749SSergey Zigachev 	if (r)
298b843c749SSergey Zigachev 		goto err;
299b843c749SSergey Zigachev 
300b843c749SSergey Zigachev 	ib = &job->ibs[0];
301b843c749SSergey Zigachev 	addr = amdgpu_bo_gpu_offset(bo);
302b843c749SSergey Zigachev 	ib->ptr[0] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0);
303b843c749SSergey Zigachev 	ib->ptr[1] = addr;
304b843c749SSergey Zigachev 	ib->ptr[2] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0);
305b843c749SSergey Zigachev 	ib->ptr[3] = addr >> 32;
306b843c749SSergey Zigachev 	ib->ptr[4] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0);
307b843c749SSergey Zigachev 	ib->ptr[5] = 0;
308b843c749SSergey Zigachev 	for (i = 6; i < 16; i += 2) {
309b843c749SSergey Zigachev 		ib->ptr[i] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0);
310b843c749SSergey Zigachev 		ib->ptr[i+1] = 0;
311b843c749SSergey Zigachev 	}
312b843c749SSergey Zigachev 	ib->length_dw = 16;
313b843c749SSergey Zigachev 
314b843c749SSergey Zigachev 	r = amdgpu_job_submit_direct(job, ring, &f);
315b843c749SSergey Zigachev 	if (r)
316b843c749SSergey Zigachev 		goto err_free;
317b843c749SSergey Zigachev 
318b843c749SSergey Zigachev 	amdgpu_bo_fence(bo, f, false);
319b843c749SSergey Zigachev 	amdgpu_bo_unreserve(bo);
320b843c749SSergey Zigachev 	amdgpu_bo_unref(&bo);
321b843c749SSergey Zigachev 
322b843c749SSergey Zigachev 	if (fence)
323b843c749SSergey Zigachev 		*fence = dma_fence_get(f);
324b843c749SSergey Zigachev 	dma_fence_put(f);
325b843c749SSergey Zigachev 
326b843c749SSergey Zigachev 	return 0;
327b843c749SSergey Zigachev 
328b843c749SSergey Zigachev err_free:
329b843c749SSergey Zigachev 	amdgpu_job_free(job);
330b843c749SSergey Zigachev 
331b843c749SSergey Zigachev err:
332b843c749SSergey Zigachev 	amdgpu_bo_unreserve(bo);
333b843c749SSergey Zigachev 	amdgpu_bo_unref(&bo);
334b843c749SSergey Zigachev 	return r;
335b843c749SSergey Zigachev }
336b843c749SSergey Zigachev 
amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring * ring,uint32_t handle,struct dma_fence ** fence)337b843c749SSergey Zigachev static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
338b843c749SSergey Zigachev 			      struct dma_fence **fence)
339b843c749SSergey Zigachev {
340b843c749SSergey Zigachev 	struct amdgpu_device *adev = ring->adev;
341b843c749SSergey Zigachev 	struct amdgpu_bo *bo = NULL;
342b843c749SSergey Zigachev 	uint32_t *msg;
343b843c749SSergey Zigachev 	int r, i;
344b843c749SSergey Zigachev 
345b843c749SSergey Zigachev 	r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
346b843c749SSergey Zigachev 				      AMDGPU_GEM_DOMAIN_VRAM,
347b843c749SSergey Zigachev 				      &bo, NULL, (void **)&msg);
348b843c749SSergey Zigachev 	if (r)
349b843c749SSergey Zigachev 		return r;
350b843c749SSergey Zigachev 
351b843c749SSergey Zigachev 	msg[0] = cpu_to_le32(0x00000028);
352b843c749SSergey Zigachev 	msg[1] = cpu_to_le32(0x00000038);
353b843c749SSergey Zigachev 	msg[2] = cpu_to_le32(0x00000001);
354b843c749SSergey Zigachev 	msg[3] = cpu_to_le32(0x00000000);
355b843c749SSergey Zigachev 	msg[4] = cpu_to_le32(handle);
356b843c749SSergey Zigachev 	msg[5] = cpu_to_le32(0x00000000);
357b843c749SSergey Zigachev 	msg[6] = cpu_to_le32(0x00000001);
358b843c749SSergey Zigachev 	msg[7] = cpu_to_le32(0x00000028);
359b843c749SSergey Zigachev 	msg[8] = cpu_to_le32(0x00000010);
360b843c749SSergey Zigachev 	msg[9] = cpu_to_le32(0x00000000);
361b843c749SSergey Zigachev 	msg[10] = cpu_to_le32(0x00000007);
362b843c749SSergey Zigachev 	msg[11] = cpu_to_le32(0x00000000);
363b843c749SSergey Zigachev 	msg[12] = cpu_to_le32(0x00000780);
364b843c749SSergey Zigachev 	msg[13] = cpu_to_le32(0x00000440);
365b843c749SSergey Zigachev 	for (i = 14; i < 1024; ++i)
366b843c749SSergey Zigachev 		msg[i] = cpu_to_le32(0x0);
367b843c749SSergey Zigachev 
368b843c749SSergey Zigachev 	return amdgpu_vcn_dec_send_msg(ring, bo, fence);
369b843c749SSergey Zigachev }
370b843c749SSergey Zigachev 
amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring * ring,uint32_t handle,struct dma_fence ** fence)371b843c749SSergey Zigachev static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
372b843c749SSergey Zigachev 			       struct dma_fence **fence)
373b843c749SSergey Zigachev {
374b843c749SSergey Zigachev 	struct amdgpu_device *adev = ring->adev;
375b843c749SSergey Zigachev 	struct amdgpu_bo *bo = NULL;
376b843c749SSergey Zigachev 	uint32_t *msg;
377b843c749SSergey Zigachev 	int r, i;
378b843c749SSergey Zigachev 
379b843c749SSergey Zigachev 	r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
380b843c749SSergey Zigachev 				      AMDGPU_GEM_DOMAIN_VRAM,
381b843c749SSergey Zigachev 				      &bo, NULL, (void **)&msg);
382b843c749SSergey Zigachev 	if (r)
383b843c749SSergey Zigachev 		return r;
384b843c749SSergey Zigachev 
385b843c749SSergey Zigachev 	msg[0] = cpu_to_le32(0x00000028);
386b843c749SSergey Zigachev 	msg[1] = cpu_to_le32(0x00000018);
387b843c749SSergey Zigachev 	msg[2] = cpu_to_le32(0x00000000);
388b843c749SSergey Zigachev 	msg[3] = cpu_to_le32(0x00000002);
389b843c749SSergey Zigachev 	msg[4] = cpu_to_le32(handle);
390b843c749SSergey Zigachev 	msg[5] = cpu_to_le32(0x00000000);
391b843c749SSergey Zigachev 	for (i = 6; i < 1024; ++i)
392b843c749SSergey Zigachev 		msg[i] = cpu_to_le32(0x0);
393b843c749SSergey Zigachev 
394b843c749SSergey Zigachev 	return amdgpu_vcn_dec_send_msg(ring, bo, fence);
395b843c749SSergey Zigachev }
396b843c749SSergey Zigachev 
amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring * ring,long timeout)397b843c749SSergey Zigachev int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
398b843c749SSergey Zigachev {
399b843c749SSergey Zigachev 	struct dma_fence *fence;
400b843c749SSergey Zigachev 	long r;
401b843c749SSergey Zigachev 
402b843c749SSergey Zigachev 	r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
403b843c749SSergey Zigachev 	if (r) {
404b843c749SSergey Zigachev 		DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
405b843c749SSergey Zigachev 		goto error;
406b843c749SSergey Zigachev 	}
407b843c749SSergey Zigachev 
408b843c749SSergey Zigachev 	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence);
409b843c749SSergey Zigachev 	if (r) {
410b843c749SSergey Zigachev 		DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
411b843c749SSergey Zigachev 		goto error;
412b843c749SSergey Zigachev 	}
413b843c749SSergey Zigachev 
414b843c749SSergey Zigachev 	r = dma_fence_wait_timeout(fence, false, timeout);
415b843c749SSergey Zigachev 	if (r == 0) {
416b843c749SSergey Zigachev 		DRM_ERROR("amdgpu: IB test timed out.\n");
417b843c749SSergey Zigachev 		r = -ETIMEDOUT;
418b843c749SSergey Zigachev 	} else if (r < 0) {
419b843c749SSergey Zigachev 		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
420b843c749SSergey Zigachev 	} else {
421b843c749SSergey Zigachev 		DRM_DEBUG("ib test on ring %d succeeded\n",  ring->idx);
422b843c749SSergey Zigachev 		r = 0;
423b843c749SSergey Zigachev 	}
424b843c749SSergey Zigachev 
425b843c749SSergey Zigachev 	dma_fence_put(fence);
426b843c749SSergey Zigachev 
427b843c749SSergey Zigachev error:
428b843c749SSergey Zigachev 	return r;
429b843c749SSergey Zigachev }
430b843c749SSergey Zigachev 
amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring * ring)431b843c749SSergey Zigachev int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
432b843c749SSergey Zigachev {
433b843c749SSergey Zigachev 	struct amdgpu_device *adev = ring->adev;
434b843c749SSergey Zigachev 	uint32_t rptr;
435b843c749SSergey Zigachev 	unsigned i;
436b843c749SSergey Zigachev 	int r;
437b843c749SSergey Zigachev 
438b843c749SSergey Zigachev 	r = amdgpu_ring_alloc(ring, 16);
439b843c749SSergey Zigachev 	if (r) {
440b843c749SSergey Zigachev 		DRM_ERROR("amdgpu: vcn enc failed to lock ring %d (%d).\n",
441b843c749SSergey Zigachev 			  ring->idx, r);
442b843c749SSergey Zigachev 		return r;
443b843c749SSergey Zigachev 	}
444b843c749SSergey Zigachev 
445b843c749SSergey Zigachev 	rptr = amdgpu_ring_get_rptr(ring);
446b843c749SSergey Zigachev 
447b843c749SSergey Zigachev 	amdgpu_ring_write(ring, VCN_ENC_CMD_END);
448b843c749SSergey Zigachev 	amdgpu_ring_commit(ring);
449b843c749SSergey Zigachev 
450b843c749SSergey Zigachev 	for (i = 0; i < adev->usec_timeout; i++) {
451b843c749SSergey Zigachev 		if (amdgpu_ring_get_rptr(ring) != rptr)
452b843c749SSergey Zigachev 			break;
453b843c749SSergey Zigachev 		DRM_UDELAY(1);
454b843c749SSergey Zigachev 	}
455b843c749SSergey Zigachev 
456b843c749SSergey Zigachev 	if (i < adev->usec_timeout) {
457b843c749SSergey Zigachev 		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
458b843c749SSergey Zigachev 			 ring->idx, i);
459b843c749SSergey Zigachev 	} else {
460b843c749SSergey Zigachev 		DRM_ERROR("amdgpu: ring %d test failed\n",
461b843c749SSergey Zigachev 			  ring->idx);
462b843c749SSergey Zigachev 		r = -ETIMEDOUT;
463b843c749SSergey Zigachev 	}
464b843c749SSergey Zigachev 
465b843c749SSergey Zigachev 	return r;
466b843c749SSergey Zigachev }
467b843c749SSergey Zigachev 
amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring * ring,uint32_t handle,struct dma_fence ** fence)468b843c749SSergey Zigachev static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
469b843c749SSergey Zigachev 			      struct dma_fence **fence)
470b843c749SSergey Zigachev {
471b843c749SSergey Zigachev 	const unsigned ib_size_dw = 16;
472b843c749SSergey Zigachev 	struct amdgpu_job *job;
473b843c749SSergey Zigachev 	struct amdgpu_ib *ib;
474b843c749SSergey Zigachev 	struct dma_fence *f = NULL;
475b843c749SSergey Zigachev 	uint64_t dummy;
476b843c749SSergey Zigachev 	int i, r;
477b843c749SSergey Zigachev 
478b843c749SSergey Zigachev 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
479b843c749SSergey Zigachev 	if (r)
480b843c749SSergey Zigachev 		return r;
481b843c749SSergey Zigachev 
482b843c749SSergey Zigachev 	ib = &job->ibs[0];
483b843c749SSergey Zigachev 	dummy = ib->gpu_addr + 1024;
484b843c749SSergey Zigachev 
485b843c749SSergey Zigachev 	ib->length_dw = 0;
486b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000018;
487b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
488b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = handle;
489b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
490b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = dummy;
491b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x0000000b;
492b843c749SSergey Zigachev 
493b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000014;
494b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
495b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x0000001c;
496b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000000;
497b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000000;
498b843c749SSergey Zigachev 
499b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000008;
500b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
501b843c749SSergey Zigachev 
502b843c749SSergey Zigachev 	for (i = ib->length_dw; i < ib_size_dw; ++i)
503b843c749SSergey Zigachev 		ib->ptr[i] = 0x0;
504b843c749SSergey Zigachev 
505b843c749SSergey Zigachev 	r = amdgpu_job_submit_direct(job, ring, &f);
506b843c749SSergey Zigachev 	if (r)
507b843c749SSergey Zigachev 		goto err;
508b843c749SSergey Zigachev 
509b843c749SSergey Zigachev 	if (fence)
510b843c749SSergey Zigachev 		*fence = dma_fence_get(f);
511b843c749SSergey Zigachev 	dma_fence_put(f);
512b843c749SSergey Zigachev 
513b843c749SSergey Zigachev 	return 0;
514b843c749SSergey Zigachev 
515b843c749SSergey Zigachev err:
516b843c749SSergey Zigachev 	amdgpu_job_free(job);
517b843c749SSergey Zigachev 	return r;
518b843c749SSergey Zigachev }
519b843c749SSergey Zigachev 
amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring * ring,uint32_t handle,struct dma_fence ** fence)520b843c749SSergey Zigachev static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
521b843c749SSergey Zigachev 				struct dma_fence **fence)
522b843c749SSergey Zigachev {
523b843c749SSergey Zigachev 	const unsigned ib_size_dw = 16;
524b843c749SSergey Zigachev 	struct amdgpu_job *job;
525b843c749SSergey Zigachev 	struct amdgpu_ib *ib;
526b843c749SSergey Zigachev 	struct dma_fence *f = NULL;
527b843c749SSergey Zigachev 	uint64_t dummy;
528b843c749SSergey Zigachev 	int i, r;
529b843c749SSergey Zigachev 
530b843c749SSergey Zigachev 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
531b843c749SSergey Zigachev 	if (r)
532b843c749SSergey Zigachev 		return r;
533b843c749SSergey Zigachev 
534b843c749SSergey Zigachev 	ib = &job->ibs[0];
535b843c749SSergey Zigachev 	dummy = ib->gpu_addr + 1024;
536b843c749SSergey Zigachev 
537b843c749SSergey Zigachev 	ib->length_dw = 0;
538b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000018;
539b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000001;
540b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = handle;
541b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
542b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = dummy;
543b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x0000000b;
544b843c749SSergey Zigachev 
545b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000014;
546b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000002;
547b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x0000001c;
548b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000000;
549b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000000;
550b843c749SSergey Zigachev 
551b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000008;
552b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
553b843c749SSergey Zigachev 
554b843c749SSergey Zigachev 	for (i = ib->length_dw; i < ib_size_dw; ++i)
555b843c749SSergey Zigachev 		ib->ptr[i] = 0x0;
556b843c749SSergey Zigachev 
557b843c749SSergey Zigachev 	r = amdgpu_job_submit_direct(job, ring, &f);
558b843c749SSergey Zigachev 	if (r)
559b843c749SSergey Zigachev 		goto err;
560b843c749SSergey Zigachev 
561b843c749SSergey Zigachev 	if (fence)
562b843c749SSergey Zigachev 		*fence = dma_fence_get(f);
563b843c749SSergey Zigachev 	dma_fence_put(f);
564b843c749SSergey Zigachev 
565b843c749SSergey Zigachev 	return 0;
566b843c749SSergey Zigachev 
567b843c749SSergey Zigachev err:
568b843c749SSergey Zigachev 	amdgpu_job_free(job);
569b843c749SSergey Zigachev 	return r;
570b843c749SSergey Zigachev }
571b843c749SSergey Zigachev 
amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring * ring,long timeout)572b843c749SSergey Zigachev int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
573b843c749SSergey Zigachev {
574b843c749SSergey Zigachev 	struct dma_fence *fence = NULL;
575b843c749SSergey Zigachev 	long r;
576b843c749SSergey Zigachev 
577b843c749SSergey Zigachev 	r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
578b843c749SSergey Zigachev 	if (r) {
579b843c749SSergey Zigachev 		DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
580b843c749SSergey Zigachev 		goto error;
581b843c749SSergey Zigachev 	}
582b843c749SSergey Zigachev 
583b843c749SSergey Zigachev 	r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
584b843c749SSergey Zigachev 	if (r) {
585b843c749SSergey Zigachev 		DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
586b843c749SSergey Zigachev 		goto error;
587b843c749SSergey Zigachev 	}
588b843c749SSergey Zigachev 
589b843c749SSergey Zigachev 	r = dma_fence_wait_timeout(fence, false, timeout);
590b843c749SSergey Zigachev 	if (r == 0) {
591b843c749SSergey Zigachev 		DRM_ERROR("amdgpu: IB test timed out.\n");
592b843c749SSergey Zigachev 		r = -ETIMEDOUT;
593b843c749SSergey Zigachev 	} else if (r < 0) {
594b843c749SSergey Zigachev 		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
595b843c749SSergey Zigachev 	} else {
596b843c749SSergey Zigachev 		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
597b843c749SSergey Zigachev 		r = 0;
598b843c749SSergey Zigachev 	}
599b843c749SSergey Zigachev error:
600b843c749SSergey Zigachev 	dma_fence_put(fence);
601b843c749SSergey Zigachev 	return r;
602b843c749SSergey Zigachev }
603b843c749SSergey Zigachev 
amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring * ring)604b843c749SSergey Zigachev int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
605b843c749SSergey Zigachev {
606b843c749SSergey Zigachev 	struct amdgpu_device *adev = ring->adev;
607b843c749SSergey Zigachev 	uint32_t tmp = 0;
608b843c749SSergey Zigachev 	unsigned i;
609b843c749SSergey Zigachev 	int r;
610b843c749SSergey Zigachev 
611b843c749SSergey Zigachev 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD);
612b843c749SSergey Zigachev 	r = amdgpu_ring_alloc(ring, 3);
613b843c749SSergey Zigachev 
614b843c749SSergey Zigachev 	if (r) {
615b843c749SSergey Zigachev 		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
616b843c749SSergey Zigachev 				  ring->idx, r);
617b843c749SSergey Zigachev 		return r;
618b843c749SSergey Zigachev 	}
619b843c749SSergey Zigachev 
620b843c749SSergey Zigachev 	amdgpu_ring_write(ring,
621b843c749SSergey Zigachev 		PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0, 0, 0));
622b843c749SSergey Zigachev 	amdgpu_ring_write(ring, 0xDEADBEEF);
623b843c749SSergey Zigachev 	amdgpu_ring_commit(ring);
624b843c749SSergey Zigachev 
625b843c749SSergey Zigachev 	for (i = 0; i < adev->usec_timeout; i++) {
626b843c749SSergey Zigachev 		tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID));
627b843c749SSergey Zigachev 		if (tmp == 0xDEADBEEF)
628b843c749SSergey Zigachev 			break;
629b843c749SSergey Zigachev 		DRM_UDELAY(1);
630b843c749SSergey Zigachev 	}
631b843c749SSergey Zigachev 
632b843c749SSergey Zigachev 	if (i < adev->usec_timeout) {
633b843c749SSergey Zigachev 		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
634b843c749SSergey Zigachev 				  ring->idx, i);
635b843c749SSergey Zigachev 	} else {
636b843c749SSergey Zigachev 		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
637b843c749SSergey Zigachev 				  ring->idx, tmp);
638b843c749SSergey Zigachev 		r = -EINVAL;
639b843c749SSergey Zigachev 	}
640b843c749SSergey Zigachev 
641b843c749SSergey Zigachev 	return r;
642b843c749SSergey Zigachev }
643b843c749SSergey Zigachev 
amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring * ring,uint32_t handle,struct dma_fence ** fence)644b843c749SSergey Zigachev static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle,
645b843c749SSergey Zigachev 		struct dma_fence **fence)
646b843c749SSergey Zigachev {
647b843c749SSergey Zigachev 	struct amdgpu_device *adev = ring->adev;
648b843c749SSergey Zigachev 	struct amdgpu_job *job;
649b843c749SSergey Zigachev 	struct amdgpu_ib *ib;
650b843c749SSergey Zigachev 	struct dma_fence *f = NULL;
651b843c749SSergey Zigachev 	const unsigned ib_size_dw = 16;
652b843c749SSergey Zigachev 	int i, r;
653b843c749SSergey Zigachev 
654b843c749SSergey Zigachev 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
655b843c749SSergey Zigachev 	if (r)
656b843c749SSergey Zigachev 		return r;
657b843c749SSergey Zigachev 
658b843c749SSergey Zigachev 	ib = &job->ibs[0];
659b843c749SSergey Zigachev 
660b843c749SSergey Zigachev 	ib->ptr[0] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH), 0, 0, PACKETJ_TYPE0);
661b843c749SSergey Zigachev 	ib->ptr[1] = 0xDEADBEEF;
662b843c749SSergey Zigachev 	for (i = 2; i < 16; i += 2) {
663b843c749SSergey Zigachev 		ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
664b843c749SSergey Zigachev 		ib->ptr[i+1] = 0;
665b843c749SSergey Zigachev 	}
666b843c749SSergey Zigachev 	ib->length_dw = 16;
667b843c749SSergey Zigachev 
668b843c749SSergey Zigachev 	r = amdgpu_job_submit_direct(job, ring, &f);
669b843c749SSergey Zigachev 	if (r)
670b843c749SSergey Zigachev 		goto err;
671b843c749SSergey Zigachev 
672b843c749SSergey Zigachev 	if (fence)
673b843c749SSergey Zigachev 		*fence = dma_fence_get(f);
674b843c749SSergey Zigachev 	dma_fence_put(f);
675b843c749SSergey Zigachev 
676b843c749SSergey Zigachev 	return 0;
677b843c749SSergey Zigachev 
678b843c749SSergey Zigachev err:
679b843c749SSergey Zigachev 	amdgpu_job_free(job);
680b843c749SSergey Zigachev 	return r;
681b843c749SSergey Zigachev }
682b843c749SSergey Zigachev 
amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring * ring,long timeout)683b843c749SSergey Zigachev int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
684b843c749SSergey Zigachev {
685b843c749SSergey Zigachev 	struct amdgpu_device *adev = ring->adev;
686b843c749SSergey Zigachev 	uint32_t tmp = 0;
687b843c749SSergey Zigachev 	unsigned i;
688b843c749SSergey Zigachev 	struct dma_fence *fence = NULL;
689b843c749SSergey Zigachev 	long r = 0;
690b843c749SSergey Zigachev 
691b843c749SSergey Zigachev 	r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence);
692b843c749SSergey Zigachev 	if (r) {
693b843c749SSergey Zigachev 		DRM_ERROR("amdgpu: failed to set jpeg register (%ld).\n", r);
694b843c749SSergey Zigachev 		goto error;
695b843c749SSergey Zigachev 	}
696b843c749SSergey Zigachev 
697b843c749SSergey Zigachev 	r = dma_fence_wait_timeout(fence, false, timeout);
698b843c749SSergey Zigachev 	if (r == 0) {
699b843c749SSergey Zigachev 		DRM_ERROR("amdgpu: IB test timed out.\n");
700b843c749SSergey Zigachev 		r = -ETIMEDOUT;
701b843c749SSergey Zigachev 		goto error;
702b843c749SSergey Zigachev 	} else if (r < 0) {
703b843c749SSergey Zigachev 		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
704b843c749SSergey Zigachev 		goto error;
705b843c749SSergey Zigachev 	} else
706b843c749SSergey Zigachev 		r = 0;
707b843c749SSergey Zigachev 
708b843c749SSergey Zigachev 	for (i = 0; i < adev->usec_timeout; i++) {
709b843c749SSergey Zigachev 		tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH));
710b843c749SSergey Zigachev 		if (tmp == 0xDEADBEEF)
711b843c749SSergey Zigachev 			break;
712b843c749SSergey Zigachev 		DRM_UDELAY(1);
713b843c749SSergey Zigachev 	}
714b843c749SSergey Zigachev 
715b843c749SSergey Zigachev 	if (i < adev->usec_timeout)
716b843c749SSergey Zigachev 		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
717b843c749SSergey Zigachev 	else {
718b843c749SSergey Zigachev 		DRM_ERROR("ib test failed (0x%08X)\n", tmp);
719b843c749SSergey Zigachev 		r = -EINVAL;
720b843c749SSergey Zigachev 	}
721b843c749SSergey Zigachev 
722b843c749SSergey Zigachev 	dma_fence_put(fence);
723b843c749SSergey Zigachev 
724b843c749SSergey Zigachev error:
725b843c749SSergey Zigachev 	return r;
726b843c749SSergey Zigachev }
727