xref: /dragonfly/sys/dev/drm/amd/amdgpu/uvd_v6_0.c (revision 78973132)
1b843c749SSergey Zigachev /*
2b843c749SSergey Zigachev  * Copyright 2014 Advanced Micro Devices, Inc.
3b843c749SSergey Zigachev  *
4b843c749SSergey Zigachev  * Permission is hereby granted, free of charge, to any person obtaining a
5b843c749SSergey Zigachev  * copy of this software and associated documentation files (the "Software"),
6b843c749SSergey Zigachev  * to deal in the Software without restriction, including without limitation
7b843c749SSergey Zigachev  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8b843c749SSergey Zigachev  * and/or sell copies of the Software, and to permit persons to whom the
9b843c749SSergey Zigachev  * Software is furnished to do so, subject to the following conditions:
10b843c749SSergey Zigachev  *
11b843c749SSergey Zigachev  * The above copyright notice and this permission notice shall be included in
12b843c749SSergey Zigachev  * all copies or substantial portions of the Software.
13b843c749SSergey Zigachev  *
14b843c749SSergey Zigachev  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15b843c749SSergey Zigachev  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16b843c749SSergey Zigachev  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17b843c749SSergey Zigachev  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18b843c749SSergey Zigachev  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19b843c749SSergey Zigachev  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20b843c749SSergey Zigachev  * OTHER DEALINGS IN THE SOFTWARE.
21b843c749SSergey Zigachev  *
22b843c749SSergey Zigachev  * Authors: Christian König <christian.koenig@amd.com>
23b843c749SSergey Zigachev  */
24b843c749SSergey Zigachev 
25b843c749SSergey Zigachev #include <linux/firmware.h>
26b843c749SSergey Zigachev #include <drm/drmP.h>
27b843c749SSergey Zigachev #include "amdgpu.h"
28b843c749SSergey Zigachev #include "amdgpu_uvd.h"
29b843c749SSergey Zigachev #include "vid.h"
30b843c749SSergey Zigachev #include "uvd/uvd_6_0_d.h"
31b843c749SSergey Zigachev #include "uvd/uvd_6_0_sh_mask.h"
32b843c749SSergey Zigachev #include "oss/oss_2_0_d.h"
33b843c749SSergey Zigachev #include "oss/oss_2_0_sh_mask.h"
34b843c749SSergey Zigachev #include "smu/smu_7_1_3_d.h"
35b843c749SSergey Zigachev #include "smu/smu_7_1_3_sh_mask.h"
36b843c749SSergey Zigachev #include "bif/bif_5_1_d.h"
37b843c749SSergey Zigachev #include "gmc/gmc_8_1_d.h"
38b843c749SSergey Zigachev #include "vi.h"
39b843c749SSergey Zigachev #include "ivsrcid/ivsrcid_vislands30.h"
40b843c749SSergey Zigachev 
41b843c749SSergey Zigachev /* Polaris10/11/12 firmware version */
42b843c749SSergey Zigachev #define FW_1_130_16 ((1 << 24) | (130 << 16) | (16 << 8))
43b843c749SSergey Zigachev 
44b843c749SSergey Zigachev static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
45b843c749SSergey Zigachev static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev);
46b843c749SSergey Zigachev 
47b843c749SSergey Zigachev static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
48b843c749SSergey Zigachev static int uvd_v6_0_start(struct amdgpu_device *adev);
49b843c749SSergey Zigachev static void uvd_v6_0_stop(struct amdgpu_device *adev);
50b843c749SSergey Zigachev static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev);
51b843c749SSergey Zigachev static int uvd_v6_0_set_clockgating_state(void *handle,
52b843c749SSergey Zigachev 					  enum amd_clockgating_state state);
53b843c749SSergey Zigachev static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
54b843c749SSergey Zigachev 				 bool enable);
55b843c749SSergey Zigachev 
56b843c749SSergey Zigachev /**
57b843c749SSergey Zigachev * uvd_v6_0_enc_support - get encode support status
58b843c749SSergey Zigachev *
59b843c749SSergey Zigachev * @adev: amdgpu_device pointer
60b843c749SSergey Zigachev *
61b843c749SSergey Zigachev * Returns the current hardware encode support status
62b843c749SSergey Zigachev */
uvd_v6_0_enc_support(struct amdgpu_device * adev)63b843c749SSergey Zigachev static inline bool uvd_v6_0_enc_support(struct amdgpu_device *adev)
64b843c749SSergey Zigachev {
65b843c749SSergey Zigachev 	return ((adev->asic_type >= CHIP_POLARIS10) &&
66b843c749SSergey Zigachev 			(adev->asic_type <= CHIP_VEGAM) &&
67b843c749SSergey Zigachev 			(!adev->uvd.fw_version || adev->uvd.fw_version >= FW_1_130_16));
68b843c749SSergey Zigachev }
69b843c749SSergey Zigachev 
70b843c749SSergey Zigachev /**
71b843c749SSergey Zigachev  * uvd_v6_0_ring_get_rptr - get read pointer
72b843c749SSergey Zigachev  *
73b843c749SSergey Zigachev  * @ring: amdgpu_ring pointer
74b843c749SSergey Zigachev  *
75b843c749SSergey Zigachev  * Returns the current hardware read pointer
76b843c749SSergey Zigachev  */
uvd_v6_0_ring_get_rptr(struct amdgpu_ring * ring)77b843c749SSergey Zigachev static uint64_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
78b843c749SSergey Zigachev {
79b843c749SSergey Zigachev 	struct amdgpu_device *adev = ring->adev;
80b843c749SSergey Zigachev 
81b843c749SSergey Zigachev 	return RREG32(mmUVD_RBC_RB_RPTR);
82b843c749SSergey Zigachev }
83b843c749SSergey Zigachev 
84b843c749SSergey Zigachev /**
85b843c749SSergey Zigachev  * uvd_v6_0_enc_ring_get_rptr - get enc read pointer
86b843c749SSergey Zigachev  *
87b843c749SSergey Zigachev  * @ring: amdgpu_ring pointer
88b843c749SSergey Zigachev  *
89b843c749SSergey Zigachev  * Returns the current hardware enc read pointer
90b843c749SSergey Zigachev  */
uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring * ring)91b843c749SSergey Zigachev static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
92b843c749SSergey Zigachev {
93b843c749SSergey Zigachev 	struct amdgpu_device *adev = ring->adev;
94b843c749SSergey Zigachev 
95b843c749SSergey Zigachev 	if (ring == &adev->uvd.inst->ring_enc[0])
96b843c749SSergey Zigachev 		return RREG32(mmUVD_RB_RPTR);
97b843c749SSergey Zigachev 	else
98b843c749SSergey Zigachev 		return RREG32(mmUVD_RB_RPTR2);
99b843c749SSergey Zigachev }
100b843c749SSergey Zigachev /**
101b843c749SSergey Zigachev  * uvd_v6_0_ring_get_wptr - get write pointer
102b843c749SSergey Zigachev  *
103b843c749SSergey Zigachev  * @ring: amdgpu_ring pointer
104b843c749SSergey Zigachev  *
105b843c749SSergey Zigachev  * Returns the current hardware write pointer
106b843c749SSergey Zigachev  */
uvd_v6_0_ring_get_wptr(struct amdgpu_ring * ring)107b843c749SSergey Zigachev static uint64_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
108b843c749SSergey Zigachev {
109b843c749SSergey Zigachev 	struct amdgpu_device *adev = ring->adev;
110b843c749SSergey Zigachev 
111b843c749SSergey Zigachev 	return RREG32(mmUVD_RBC_RB_WPTR);
112b843c749SSergey Zigachev }
113b843c749SSergey Zigachev 
114b843c749SSergey Zigachev /**
115b843c749SSergey Zigachev  * uvd_v6_0_enc_ring_get_wptr - get enc write pointer
116b843c749SSergey Zigachev  *
117b843c749SSergey Zigachev  * @ring: amdgpu_ring pointer
118b843c749SSergey Zigachev  *
119b843c749SSergey Zigachev  * Returns the current hardware enc write pointer
120b843c749SSergey Zigachev  */
uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring * ring)121b843c749SSergey Zigachev static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
122b843c749SSergey Zigachev {
123b843c749SSergey Zigachev 	struct amdgpu_device *adev = ring->adev;
124b843c749SSergey Zigachev 
125b843c749SSergey Zigachev 	if (ring == &adev->uvd.inst->ring_enc[0])
126b843c749SSergey Zigachev 		return RREG32(mmUVD_RB_WPTR);
127b843c749SSergey Zigachev 	else
128b843c749SSergey Zigachev 		return RREG32(mmUVD_RB_WPTR2);
129b843c749SSergey Zigachev }
130b843c749SSergey Zigachev 
131b843c749SSergey Zigachev /**
132b843c749SSergey Zigachev  * uvd_v6_0_ring_set_wptr - set write pointer
133b843c749SSergey Zigachev  *
134b843c749SSergey Zigachev  * @ring: amdgpu_ring pointer
135b843c749SSergey Zigachev  *
136b843c749SSergey Zigachev  * Commits the write pointer to the hardware
137b843c749SSergey Zigachev  */
uvd_v6_0_ring_set_wptr(struct amdgpu_ring * ring)138b843c749SSergey Zigachev static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
139b843c749SSergey Zigachev {
140b843c749SSergey Zigachev 	struct amdgpu_device *adev = ring->adev;
141b843c749SSergey Zigachev 
142b843c749SSergey Zigachev 	WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
143b843c749SSergey Zigachev }
144b843c749SSergey Zigachev 
145b843c749SSergey Zigachev /**
146b843c749SSergey Zigachev  * uvd_v6_0_enc_ring_set_wptr - set enc write pointer
147b843c749SSergey Zigachev  *
148b843c749SSergey Zigachev  * @ring: amdgpu_ring pointer
149b843c749SSergey Zigachev  *
150b843c749SSergey Zigachev  * Commits the enc write pointer to the hardware
151b843c749SSergey Zigachev  */
uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring * ring)152b843c749SSergey Zigachev static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
153b843c749SSergey Zigachev {
154b843c749SSergey Zigachev 	struct amdgpu_device *adev = ring->adev;
155b843c749SSergey Zigachev 
156b843c749SSergey Zigachev 	if (ring == &adev->uvd.inst->ring_enc[0])
157b843c749SSergey Zigachev 		WREG32(mmUVD_RB_WPTR,
158b843c749SSergey Zigachev 			lower_32_bits(ring->wptr));
159b843c749SSergey Zigachev 	else
160b843c749SSergey Zigachev 		WREG32(mmUVD_RB_WPTR2,
161b843c749SSergey Zigachev 			lower_32_bits(ring->wptr));
162b843c749SSergey Zigachev }
163b843c749SSergey Zigachev 
164b843c749SSergey Zigachev /**
165b843c749SSergey Zigachev  * uvd_v6_0_enc_ring_test_ring - test if UVD ENC ring is working
166b843c749SSergey Zigachev  *
167b843c749SSergey Zigachev  * @ring: the engine to test on
168b843c749SSergey Zigachev  *
169b843c749SSergey Zigachev  */
uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring * ring)170b843c749SSergey Zigachev static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
171b843c749SSergey Zigachev {
172b843c749SSergey Zigachev 	struct amdgpu_device *adev = ring->adev;
173b843c749SSergey Zigachev 	uint32_t rptr;
174b843c749SSergey Zigachev 	unsigned i;
175b843c749SSergey Zigachev 	int r;
176b843c749SSergey Zigachev 
177b843c749SSergey Zigachev 	r = amdgpu_ring_alloc(ring, 16);
178b843c749SSergey Zigachev 	if (r) {
179b843c749SSergey Zigachev 		DRM_ERROR("amdgpu: uvd enc failed to lock ring %d (%d).\n",
180b843c749SSergey Zigachev 			  ring->idx, r);
181b843c749SSergey Zigachev 		return r;
182b843c749SSergey Zigachev 	}
183b843c749SSergey Zigachev 
184b843c749SSergey Zigachev 	rptr = amdgpu_ring_get_rptr(ring);
185b843c749SSergey Zigachev 
186b843c749SSergey Zigachev 	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
187b843c749SSergey Zigachev 	amdgpu_ring_commit(ring);
188b843c749SSergey Zigachev 
189b843c749SSergey Zigachev 	for (i = 0; i < adev->usec_timeout; i++) {
190b843c749SSergey Zigachev 		if (amdgpu_ring_get_rptr(ring) != rptr)
191b843c749SSergey Zigachev 			break;
192b843c749SSergey Zigachev 		DRM_UDELAY(1);
193b843c749SSergey Zigachev 	}
194b843c749SSergey Zigachev 
195b843c749SSergey Zigachev 	if (i < adev->usec_timeout) {
196b843c749SSergey Zigachev 		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
197b843c749SSergey Zigachev 			 ring->idx, i);
198b843c749SSergey Zigachev 	} else {
199b843c749SSergey Zigachev 		DRM_ERROR("amdgpu: ring %d test failed\n",
200b843c749SSergey Zigachev 			  ring->idx);
201b843c749SSergey Zigachev 		r = -ETIMEDOUT;
202b843c749SSergey Zigachev 	}
203b843c749SSergey Zigachev 
204b843c749SSergey Zigachev 	return r;
205b843c749SSergey Zigachev }
206b843c749SSergey Zigachev 
207b843c749SSergey Zigachev /**
208b843c749SSergey Zigachev  * uvd_v6_0_enc_get_create_msg - generate a UVD ENC create msg
209b843c749SSergey Zigachev  *
210b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
211b843c749SSergey Zigachev  * @ring: ring we should submit the msg to
212b843c749SSergey Zigachev  * @handle: session handle to use
213b843c749SSergey Zigachev  * @fence: optional fence to return
214b843c749SSergey Zigachev  *
215b843c749SSergey Zigachev  * Open up a stream for HW test
216b843c749SSergey Zigachev  */
uvd_v6_0_enc_get_create_msg(struct amdgpu_ring * ring,uint32_t handle,struct dma_fence ** fence)217b843c749SSergey Zigachev static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
218b843c749SSergey Zigachev 				       struct dma_fence **fence)
219b843c749SSergey Zigachev {
220b843c749SSergey Zigachev 	const unsigned ib_size_dw = 16;
221b843c749SSergey Zigachev 	struct amdgpu_job *job;
222b843c749SSergey Zigachev 	struct amdgpu_ib *ib;
223b843c749SSergey Zigachev 	struct dma_fence *f = NULL;
224b843c749SSergey Zigachev 	uint64_t dummy;
225b843c749SSergey Zigachev 	int i, r;
226b843c749SSergey Zigachev 
227b843c749SSergey Zigachev 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
228b843c749SSergey Zigachev 	if (r)
229b843c749SSergey Zigachev 		return r;
230b843c749SSergey Zigachev 
231b843c749SSergey Zigachev 	ib = &job->ibs[0];
232b843c749SSergey Zigachev 	dummy = ib->gpu_addr + 1024;
233b843c749SSergey Zigachev 
234b843c749SSergey Zigachev 	ib->length_dw = 0;
235b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000018;
236b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
237b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = handle;
238b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00010000;
239b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
240b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = dummy;
241b843c749SSergey Zigachev 
242b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000014;
243b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
244b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x0000001c;
245b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000001;
246b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000000;
247b843c749SSergey Zigachev 
248b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000008;
249b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
250b843c749SSergey Zigachev 
251b843c749SSergey Zigachev 	for (i = ib->length_dw; i < ib_size_dw; ++i)
252b843c749SSergey Zigachev 		ib->ptr[i] = 0x0;
253b843c749SSergey Zigachev 
254b843c749SSergey Zigachev 	r = amdgpu_job_submit_direct(job, ring, &f);
255b843c749SSergey Zigachev 	if (r)
256b843c749SSergey Zigachev 		goto err;
257b843c749SSergey Zigachev 
258b843c749SSergey Zigachev 	if (fence)
259b843c749SSergey Zigachev 		*fence = dma_fence_get(f);
260b843c749SSergey Zigachev 	dma_fence_put(f);
261b843c749SSergey Zigachev 	return 0;
262b843c749SSergey Zigachev 
263b843c749SSergey Zigachev err:
264b843c749SSergey Zigachev 	amdgpu_job_free(job);
265b843c749SSergey Zigachev 	return r;
266b843c749SSergey Zigachev }
267b843c749SSergey Zigachev 
268b843c749SSergey Zigachev /**
269b843c749SSergey Zigachev  * uvd_v6_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
270b843c749SSergey Zigachev  *
271b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
272b843c749SSergey Zigachev  * @ring: ring we should submit the msg to
273b843c749SSergey Zigachev  * @handle: session handle to use
274b843c749SSergey Zigachev  * @fence: optional fence to return
275b843c749SSergey Zigachev  *
276b843c749SSergey Zigachev  * Close up a stream for HW test or if userspace failed to do so
277b843c749SSergey Zigachev  */
uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring * ring,uint32_t handle,bool direct,struct dma_fence ** fence)278b843c749SSergey Zigachev static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
279b843c749SSergey Zigachev 					uint32_t handle,
280b843c749SSergey Zigachev 					bool direct, struct dma_fence **fence)
281b843c749SSergey Zigachev {
282b843c749SSergey Zigachev 	const unsigned ib_size_dw = 16;
283b843c749SSergey Zigachev 	struct amdgpu_job *job;
284b843c749SSergey Zigachev 	struct amdgpu_ib *ib;
285b843c749SSergey Zigachev 	struct dma_fence *f = NULL;
286b843c749SSergey Zigachev 	uint64_t dummy;
287b843c749SSergey Zigachev 	int i, r;
288b843c749SSergey Zigachev 
289b843c749SSergey Zigachev 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
290b843c749SSergey Zigachev 	if (r)
291b843c749SSergey Zigachev 		return r;
292b843c749SSergey Zigachev 
293b843c749SSergey Zigachev 	ib = &job->ibs[0];
294b843c749SSergey Zigachev 	dummy = ib->gpu_addr + 1024;
295b843c749SSergey Zigachev 
296b843c749SSergey Zigachev 	ib->length_dw = 0;
297b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000018;
298b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
299b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = handle;
300b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00010000;
301b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
302b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = dummy;
303b843c749SSergey Zigachev 
304b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000014;
305b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
306b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x0000001c;
307b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000001;
308b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000000;
309b843c749SSergey Zigachev 
310b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000008;
311b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
312b843c749SSergey Zigachev 
313b843c749SSergey Zigachev 	for (i = ib->length_dw; i < ib_size_dw; ++i)
314b843c749SSergey Zigachev 		ib->ptr[i] = 0x0;
315b843c749SSergey Zigachev 
316b843c749SSergey Zigachev 	if (direct)
317b843c749SSergey Zigachev 		r = amdgpu_job_submit_direct(job, ring, &f);
318b843c749SSergey Zigachev 	else
319b843c749SSergey Zigachev 		r = amdgpu_job_submit(job, &ring->adev->vce.entity,
320b843c749SSergey Zigachev 				      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
321b843c749SSergey Zigachev 	if (r)
322b843c749SSergey Zigachev 		goto err;
323b843c749SSergey Zigachev 
324b843c749SSergey Zigachev 	if (fence)
325b843c749SSergey Zigachev 		*fence = dma_fence_get(f);
326b843c749SSergey Zigachev 	dma_fence_put(f);
327b843c749SSergey Zigachev 	return 0;
328b843c749SSergey Zigachev 
329b843c749SSergey Zigachev err:
330b843c749SSergey Zigachev 	amdgpu_job_free(job);
331b843c749SSergey Zigachev 	return r;
332b843c749SSergey Zigachev }
333b843c749SSergey Zigachev 
334b843c749SSergey Zigachev /**
335b843c749SSergey Zigachev  * uvd_v6_0_enc_ring_test_ib - test if UVD ENC IBs are working
336b843c749SSergey Zigachev  *
337b843c749SSergey Zigachev  * @ring: the engine to test on
338b843c749SSergey Zigachev  *
339b843c749SSergey Zigachev  */
uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring * ring,long timeout)340b843c749SSergey Zigachev static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
341b843c749SSergey Zigachev {
342b843c749SSergey Zigachev 	struct dma_fence *fence = NULL;
343b843c749SSergey Zigachev 	long r;
344b843c749SSergey Zigachev 
345b843c749SSergey Zigachev 	r = uvd_v6_0_enc_get_create_msg(ring, 1, NULL);
346b843c749SSergey Zigachev 	if (r) {
347b843c749SSergey Zigachev 		DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
348b843c749SSergey Zigachev 		goto error;
349b843c749SSergey Zigachev 	}
350b843c749SSergey Zigachev 
351b843c749SSergey Zigachev 	r = uvd_v6_0_enc_get_destroy_msg(ring, 1, true, &fence);
352b843c749SSergey Zigachev 	if (r) {
353b843c749SSergey Zigachev 		DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
354b843c749SSergey Zigachev 		goto error;
355b843c749SSergey Zigachev 	}
356b843c749SSergey Zigachev 
357b843c749SSergey Zigachev 	r = dma_fence_wait_timeout(fence, false, timeout);
358b843c749SSergey Zigachev 	if (r == 0) {
359b843c749SSergey Zigachev 		DRM_ERROR("amdgpu: IB test timed out.\n");
360b843c749SSergey Zigachev 		r = -ETIMEDOUT;
361b843c749SSergey Zigachev 	} else if (r < 0) {
362b843c749SSergey Zigachev 		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
363b843c749SSergey Zigachev 	} else {
364b843c749SSergey Zigachev 		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
365b843c749SSergey Zigachev 		r = 0;
366b843c749SSergey Zigachev 	}
367b843c749SSergey Zigachev error:
368b843c749SSergey Zigachev 	dma_fence_put(fence);
369b843c749SSergey Zigachev 	return r;
370b843c749SSergey Zigachev }
uvd_v6_0_early_init(void * handle)371b843c749SSergey Zigachev static int uvd_v6_0_early_init(void *handle)
372b843c749SSergey Zigachev {
373b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
374b843c749SSergey Zigachev 	adev->uvd.num_uvd_inst = 1;
375b843c749SSergey Zigachev 
376b843c749SSergey Zigachev 	if (!(adev->flags & AMD_IS_APU) &&
377b843c749SSergey Zigachev 	    (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK))
378b843c749SSergey Zigachev 		return -ENOENT;
379b843c749SSergey Zigachev 
380b843c749SSergey Zigachev 	uvd_v6_0_set_ring_funcs(adev);
381b843c749SSergey Zigachev 
382b843c749SSergey Zigachev 	if (uvd_v6_0_enc_support(adev)) {
383b843c749SSergey Zigachev 		adev->uvd.num_enc_rings = 2;
384b843c749SSergey Zigachev 		uvd_v6_0_set_enc_ring_funcs(adev);
385b843c749SSergey Zigachev 	}
386b843c749SSergey Zigachev 
387b843c749SSergey Zigachev 	uvd_v6_0_set_irq_funcs(adev);
388b843c749SSergey Zigachev 
389b843c749SSergey Zigachev 	return 0;
390b843c749SSergey Zigachev }
391b843c749SSergey Zigachev 
uvd_v6_0_sw_init(void * handle)392b843c749SSergey Zigachev static int uvd_v6_0_sw_init(void *handle)
393b843c749SSergey Zigachev {
394b843c749SSergey Zigachev 	struct amdgpu_ring *ring;
395b843c749SSergey Zigachev 	int i, r;
396b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
397b843c749SSergey Zigachev 
398b843c749SSergey Zigachev 	/* UVD TRAP */
399b843c749SSergey Zigachev 	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
400b843c749SSergey Zigachev 	if (r)
401b843c749SSergey Zigachev 		return r;
402b843c749SSergey Zigachev 
403b843c749SSergey Zigachev 	/* UVD ENC TRAP */
404b843c749SSergey Zigachev 	if (uvd_v6_0_enc_support(adev)) {
405b843c749SSergey Zigachev 		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
406b843c749SSergey Zigachev 			r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP, &adev->uvd.inst->irq);
407b843c749SSergey Zigachev 			if (r)
408b843c749SSergey Zigachev 				return r;
409b843c749SSergey Zigachev 		}
410b843c749SSergey Zigachev 	}
411b843c749SSergey Zigachev 
412b843c749SSergey Zigachev 	r = amdgpu_uvd_sw_init(adev);
413b843c749SSergey Zigachev 	if (r)
414b843c749SSergey Zigachev 		return r;
415b843c749SSergey Zigachev 
416b843c749SSergey Zigachev 	if (!uvd_v6_0_enc_support(adev)) {
417b843c749SSergey Zigachev 		for (i = 0; i < adev->uvd.num_enc_rings; ++i)
418b843c749SSergey Zigachev 			adev->uvd.inst->ring_enc[i].funcs = NULL;
419b843c749SSergey Zigachev 
420b843c749SSergey Zigachev 		adev->uvd.inst->irq.num_types = 1;
421b843c749SSergey Zigachev 		adev->uvd.num_enc_rings = 0;
422b843c749SSergey Zigachev 
423b843c749SSergey Zigachev 		DRM_INFO("UVD ENC is disabled\n");
424b843c749SSergey Zigachev 	}
425b843c749SSergey Zigachev 
426b843c749SSergey Zigachev 	ring = &adev->uvd.inst->ring;
427*78973132SSergey Zigachev 	ksprintf(ring->name, "uvd");
428b843c749SSergey Zigachev 	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
429b843c749SSergey Zigachev 	if (r)
430b843c749SSergey Zigachev 		return r;
431b843c749SSergey Zigachev 
432b843c749SSergey Zigachev 	r = amdgpu_uvd_resume(adev);
433b843c749SSergey Zigachev 	if (r)
434b843c749SSergey Zigachev 		return r;
435b843c749SSergey Zigachev 
436b843c749SSergey Zigachev 	if (uvd_v6_0_enc_support(adev)) {
437b843c749SSergey Zigachev 		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
438b843c749SSergey Zigachev 			ring = &adev->uvd.inst->ring_enc[i];
439*78973132SSergey Zigachev 			ksprintf(ring->name, "uvd_enc%d", i);
440b843c749SSergey Zigachev 			r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
441b843c749SSergey Zigachev 			if (r)
442b843c749SSergey Zigachev 				return r;
443b843c749SSergey Zigachev 		}
444b843c749SSergey Zigachev 	}
445b843c749SSergey Zigachev 
446b843c749SSergey Zigachev 	r = amdgpu_uvd_entity_init(adev);
447b843c749SSergey Zigachev 
448b843c749SSergey Zigachev 	return r;
449b843c749SSergey Zigachev }
450b843c749SSergey Zigachev 
uvd_v6_0_sw_fini(void * handle)451b843c749SSergey Zigachev static int uvd_v6_0_sw_fini(void *handle)
452b843c749SSergey Zigachev {
453b843c749SSergey Zigachev 	int i, r;
454b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
455b843c749SSergey Zigachev 
456b843c749SSergey Zigachev 	r = amdgpu_uvd_suspend(adev);
457b843c749SSergey Zigachev 	if (r)
458b843c749SSergey Zigachev 		return r;
459b843c749SSergey Zigachev 
460b843c749SSergey Zigachev 	if (uvd_v6_0_enc_support(adev)) {
461b843c749SSergey Zigachev 		for (i = 0; i < adev->uvd.num_enc_rings; ++i)
462b843c749SSergey Zigachev 			amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
463b843c749SSergey Zigachev 	}
464b843c749SSergey Zigachev 
465b843c749SSergey Zigachev 	return amdgpu_uvd_sw_fini(adev);
466b843c749SSergey Zigachev }
467b843c749SSergey Zigachev 
468b843c749SSergey Zigachev /**
469b843c749SSergey Zigachev  * uvd_v6_0_hw_init - start and test UVD block
470b843c749SSergey Zigachev  *
471b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
472b843c749SSergey Zigachev  *
473b843c749SSergey Zigachev  * Initialize the hardware, boot up the VCPU and do some testing
474b843c749SSergey Zigachev  */
uvd_v6_0_hw_init(void * handle)475b843c749SSergey Zigachev static int uvd_v6_0_hw_init(void *handle)
476b843c749SSergey Zigachev {
477b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
478b843c749SSergey Zigachev 	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
479b843c749SSergey Zigachev 	uint32_t tmp;
480b843c749SSergey Zigachev 	int i, r;
481b843c749SSergey Zigachev 
482b843c749SSergey Zigachev 	amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
483b843c749SSergey Zigachev 	uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
484b843c749SSergey Zigachev 	uvd_v6_0_enable_mgcg(adev, true);
485b843c749SSergey Zigachev 
486b843c749SSergey Zigachev 	ring->ready = true;
487b843c749SSergey Zigachev 	r = amdgpu_ring_test_ring(ring);
488b843c749SSergey Zigachev 	if (r) {
489b843c749SSergey Zigachev 		ring->ready = false;
490b843c749SSergey Zigachev 		goto done;
491b843c749SSergey Zigachev 	}
492b843c749SSergey Zigachev 
493b843c749SSergey Zigachev 	r = amdgpu_ring_alloc(ring, 10);
494b843c749SSergey Zigachev 	if (r) {
495b843c749SSergey Zigachev 		DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
496b843c749SSergey Zigachev 		goto done;
497b843c749SSergey Zigachev 	}
498b843c749SSergey Zigachev 
499b843c749SSergey Zigachev 	tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
500b843c749SSergey Zigachev 	amdgpu_ring_write(ring, tmp);
501b843c749SSergey Zigachev 	amdgpu_ring_write(ring, 0xFFFFF);
502b843c749SSergey Zigachev 
503b843c749SSergey Zigachev 	tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
504b843c749SSergey Zigachev 	amdgpu_ring_write(ring, tmp);
505b843c749SSergey Zigachev 	amdgpu_ring_write(ring, 0xFFFFF);
506b843c749SSergey Zigachev 
507b843c749SSergey Zigachev 	tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
508b843c749SSergey Zigachev 	amdgpu_ring_write(ring, tmp);
509b843c749SSergey Zigachev 	amdgpu_ring_write(ring, 0xFFFFF);
510b843c749SSergey Zigachev 
511b843c749SSergey Zigachev 	/* Clear timeout status bits */
512b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
513b843c749SSergey Zigachev 	amdgpu_ring_write(ring, 0x8);
514b843c749SSergey Zigachev 
515b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
516b843c749SSergey Zigachev 	amdgpu_ring_write(ring, 3);
517b843c749SSergey Zigachev 
518b843c749SSergey Zigachev 	amdgpu_ring_commit(ring);
519b843c749SSergey Zigachev 
520b843c749SSergey Zigachev 	if (uvd_v6_0_enc_support(adev)) {
521b843c749SSergey Zigachev 		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
522b843c749SSergey Zigachev 			ring = &adev->uvd.inst->ring_enc[i];
523b843c749SSergey Zigachev 			ring->ready = true;
524b843c749SSergey Zigachev 			r = amdgpu_ring_test_ring(ring);
525b843c749SSergey Zigachev 			if (r) {
526b843c749SSergey Zigachev 				ring->ready = false;
527b843c749SSergey Zigachev 				goto done;
528b843c749SSergey Zigachev 			}
529b843c749SSergey Zigachev 		}
530b843c749SSergey Zigachev 	}
531b843c749SSergey Zigachev 
532b843c749SSergey Zigachev done:
533b843c749SSergey Zigachev 	if (!r) {
534b843c749SSergey Zigachev 		if (uvd_v6_0_enc_support(adev))
535b843c749SSergey Zigachev 			DRM_INFO("UVD and UVD ENC initialized successfully.\n");
536b843c749SSergey Zigachev 		else
537b843c749SSergey Zigachev 			DRM_INFO("UVD initialized successfully.\n");
538b843c749SSergey Zigachev 	}
539b843c749SSergey Zigachev 
540b843c749SSergey Zigachev 	return r;
541b843c749SSergey Zigachev }
542b843c749SSergey Zigachev 
543b843c749SSergey Zigachev /**
544b843c749SSergey Zigachev  * uvd_v6_0_hw_fini - stop the hardware block
545b843c749SSergey Zigachev  *
546b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
547b843c749SSergey Zigachev  *
548b843c749SSergey Zigachev  * Stop the UVD block, mark ring as not ready any more
549b843c749SSergey Zigachev  */
uvd_v6_0_hw_fini(void * handle)550b843c749SSergey Zigachev static int uvd_v6_0_hw_fini(void *handle)
551b843c749SSergey Zigachev {
552b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
553b843c749SSergey Zigachev 	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
554b843c749SSergey Zigachev 
555b843c749SSergey Zigachev 	if (RREG32(mmUVD_STATUS) != 0)
556b843c749SSergey Zigachev 		uvd_v6_0_stop(adev);
557b843c749SSergey Zigachev 
558b843c749SSergey Zigachev 	ring->ready = false;
559b843c749SSergey Zigachev 
560b843c749SSergey Zigachev 	return 0;
561b843c749SSergey Zigachev }
562b843c749SSergey Zigachev 
uvd_v6_0_suspend(void * handle)563b843c749SSergey Zigachev static int uvd_v6_0_suspend(void *handle)
564b843c749SSergey Zigachev {
565b843c749SSergey Zigachev 	int r;
566b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
567b843c749SSergey Zigachev 
568b843c749SSergey Zigachev 	r = uvd_v6_0_hw_fini(adev);
569b843c749SSergey Zigachev 	if (r)
570b843c749SSergey Zigachev 		return r;
571b843c749SSergey Zigachev 
572b843c749SSergey Zigachev 	return amdgpu_uvd_suspend(adev);
573b843c749SSergey Zigachev }
574b843c749SSergey Zigachev 
uvd_v6_0_resume(void * handle)575b843c749SSergey Zigachev static int uvd_v6_0_resume(void *handle)
576b843c749SSergey Zigachev {
577b843c749SSergey Zigachev 	int r;
578b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
579b843c749SSergey Zigachev 
580b843c749SSergey Zigachev 	r = amdgpu_uvd_resume(adev);
581b843c749SSergey Zigachev 	if (r)
582b843c749SSergey Zigachev 		return r;
583b843c749SSergey Zigachev 
584b843c749SSergey Zigachev 	return uvd_v6_0_hw_init(adev);
585b843c749SSergey Zigachev }
586b843c749SSergey Zigachev 
587b843c749SSergey Zigachev /**
588b843c749SSergey Zigachev  * uvd_v6_0_mc_resume - memory controller programming
589b843c749SSergey Zigachev  *
590b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
591b843c749SSergey Zigachev  *
592b843c749SSergey Zigachev  * Let the UVD memory controller know it's offsets
593b843c749SSergey Zigachev  */
uvd_v6_0_mc_resume(struct amdgpu_device * adev)594b843c749SSergey Zigachev static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
595b843c749SSergey Zigachev {
596b843c749SSergey Zigachev 	uint64_t offset;
597b843c749SSergey Zigachev 	uint32_t size;
598b843c749SSergey Zigachev 
599b843c749SSergey Zigachev 	/* programm memory controller bits 0-27 */
600b843c749SSergey Zigachev 	WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
601b843c749SSergey Zigachev 			lower_32_bits(adev->uvd.inst->gpu_addr));
602b843c749SSergey Zigachev 	WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
603b843c749SSergey Zigachev 			upper_32_bits(adev->uvd.inst->gpu_addr));
604b843c749SSergey Zigachev 
605b843c749SSergey Zigachev 	offset = AMDGPU_UVD_FIRMWARE_OFFSET;
606b843c749SSergey Zigachev 	size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
607b843c749SSergey Zigachev 	WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
608b843c749SSergey Zigachev 	WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
609b843c749SSergey Zigachev 
610b843c749SSergey Zigachev 	offset += size;
611b843c749SSergey Zigachev 	size = AMDGPU_UVD_HEAP_SIZE;
612b843c749SSergey Zigachev 	WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
613b843c749SSergey Zigachev 	WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
614b843c749SSergey Zigachev 
615b843c749SSergey Zigachev 	offset += size;
616b843c749SSergey Zigachev 	size = AMDGPU_UVD_STACK_SIZE +
617b843c749SSergey Zigachev 	       (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
618b843c749SSergey Zigachev 	WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
619b843c749SSergey Zigachev 	WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
620b843c749SSergey Zigachev 
621b843c749SSergey Zigachev 	WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
622b843c749SSergey Zigachev 	WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
623b843c749SSergey Zigachev 	WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
624b843c749SSergey Zigachev 
625b843c749SSergey Zigachev 	WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
626b843c749SSergey Zigachev }
627b843c749SSergey Zigachev 
628b843c749SSergey Zigachev #if 0
629b843c749SSergey Zigachev static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
630b843c749SSergey Zigachev 		bool enable)
631b843c749SSergey Zigachev {
632b843c749SSergey Zigachev 	u32 data, data1;
633b843c749SSergey Zigachev 
634b843c749SSergey Zigachev 	data = RREG32(mmUVD_CGC_GATE);
635b843c749SSergey Zigachev 	data1 = RREG32(mmUVD_SUVD_CGC_GATE);
636b843c749SSergey Zigachev 	if (enable) {
637b843c749SSergey Zigachev 		data |= UVD_CGC_GATE__SYS_MASK |
638b843c749SSergey Zigachev 				UVD_CGC_GATE__UDEC_MASK |
639b843c749SSergey Zigachev 				UVD_CGC_GATE__MPEG2_MASK |
640b843c749SSergey Zigachev 				UVD_CGC_GATE__RBC_MASK |
641b843c749SSergey Zigachev 				UVD_CGC_GATE__LMI_MC_MASK |
642b843c749SSergey Zigachev 				UVD_CGC_GATE__IDCT_MASK |
643b843c749SSergey Zigachev 				UVD_CGC_GATE__MPRD_MASK |
644b843c749SSergey Zigachev 				UVD_CGC_GATE__MPC_MASK |
645b843c749SSergey Zigachev 				UVD_CGC_GATE__LBSI_MASK |
646b843c749SSergey Zigachev 				UVD_CGC_GATE__LRBBM_MASK |
647b843c749SSergey Zigachev 				UVD_CGC_GATE__UDEC_RE_MASK |
648b843c749SSergey Zigachev 				UVD_CGC_GATE__UDEC_CM_MASK |
649b843c749SSergey Zigachev 				UVD_CGC_GATE__UDEC_IT_MASK |
650b843c749SSergey Zigachev 				UVD_CGC_GATE__UDEC_DB_MASK |
651b843c749SSergey Zigachev 				UVD_CGC_GATE__UDEC_MP_MASK |
652b843c749SSergey Zigachev 				UVD_CGC_GATE__WCB_MASK |
653b843c749SSergey Zigachev 				UVD_CGC_GATE__VCPU_MASK |
654b843c749SSergey Zigachev 				UVD_CGC_GATE__SCPU_MASK;
655b843c749SSergey Zigachev 		data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
656b843c749SSergey Zigachev 				UVD_SUVD_CGC_GATE__SIT_MASK |
657b843c749SSergey Zigachev 				UVD_SUVD_CGC_GATE__SMP_MASK |
658b843c749SSergey Zigachev 				UVD_SUVD_CGC_GATE__SCM_MASK |
659b843c749SSergey Zigachev 				UVD_SUVD_CGC_GATE__SDB_MASK |
660b843c749SSergey Zigachev 				UVD_SUVD_CGC_GATE__SRE_H264_MASK |
661b843c749SSergey Zigachev 				UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
662b843c749SSergey Zigachev 				UVD_SUVD_CGC_GATE__SIT_H264_MASK |
663b843c749SSergey Zigachev 				UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
664b843c749SSergey Zigachev 				UVD_SUVD_CGC_GATE__SCM_H264_MASK |
665b843c749SSergey Zigachev 				UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
666b843c749SSergey Zigachev 				UVD_SUVD_CGC_GATE__SDB_H264_MASK |
667b843c749SSergey Zigachev 				UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
668b843c749SSergey Zigachev 	} else {
669b843c749SSergey Zigachev 		data &= ~(UVD_CGC_GATE__SYS_MASK |
670b843c749SSergey Zigachev 				UVD_CGC_GATE__UDEC_MASK |
671b843c749SSergey Zigachev 				UVD_CGC_GATE__MPEG2_MASK |
672b843c749SSergey Zigachev 				UVD_CGC_GATE__RBC_MASK |
673b843c749SSergey Zigachev 				UVD_CGC_GATE__LMI_MC_MASK |
674b843c749SSergey Zigachev 				UVD_CGC_GATE__LMI_UMC_MASK |
675b843c749SSergey Zigachev 				UVD_CGC_GATE__IDCT_MASK |
676b843c749SSergey Zigachev 				UVD_CGC_GATE__MPRD_MASK |
677b843c749SSergey Zigachev 				UVD_CGC_GATE__MPC_MASK |
678b843c749SSergey Zigachev 				UVD_CGC_GATE__LBSI_MASK |
679b843c749SSergey Zigachev 				UVD_CGC_GATE__LRBBM_MASK |
680b843c749SSergey Zigachev 				UVD_CGC_GATE__UDEC_RE_MASK |
681b843c749SSergey Zigachev 				UVD_CGC_GATE__UDEC_CM_MASK |
682b843c749SSergey Zigachev 				UVD_CGC_GATE__UDEC_IT_MASK |
683b843c749SSergey Zigachev 				UVD_CGC_GATE__UDEC_DB_MASK |
684b843c749SSergey Zigachev 				UVD_CGC_GATE__UDEC_MP_MASK |
685b843c749SSergey Zigachev 				UVD_CGC_GATE__WCB_MASK |
686b843c749SSergey Zigachev 				UVD_CGC_GATE__VCPU_MASK |
687b843c749SSergey Zigachev 				UVD_CGC_GATE__SCPU_MASK);
688b843c749SSergey Zigachev 		data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK |
689b843c749SSergey Zigachev 				UVD_SUVD_CGC_GATE__SIT_MASK |
690b843c749SSergey Zigachev 				UVD_SUVD_CGC_GATE__SMP_MASK |
691b843c749SSergey Zigachev 				UVD_SUVD_CGC_GATE__SCM_MASK |
692b843c749SSergey Zigachev 				UVD_SUVD_CGC_GATE__SDB_MASK |
693b843c749SSergey Zigachev 				UVD_SUVD_CGC_GATE__SRE_H264_MASK |
694b843c749SSergey Zigachev 				UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
695b843c749SSergey Zigachev 				UVD_SUVD_CGC_GATE__SIT_H264_MASK |
696b843c749SSergey Zigachev 				UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
697b843c749SSergey Zigachev 				UVD_SUVD_CGC_GATE__SCM_H264_MASK |
698b843c749SSergey Zigachev 				UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
699b843c749SSergey Zigachev 				UVD_SUVD_CGC_GATE__SDB_H264_MASK |
700b843c749SSergey Zigachev 				UVD_SUVD_CGC_GATE__SDB_HEVC_MASK);
701b843c749SSergey Zigachev 	}
702b843c749SSergey Zigachev 	WREG32(mmUVD_CGC_GATE, data);
703b843c749SSergey Zigachev 	WREG32(mmUVD_SUVD_CGC_GATE, data1);
704b843c749SSergey Zigachev }
705b843c749SSergey Zigachev #endif
706b843c749SSergey Zigachev 
707b843c749SSergey Zigachev /**
708b843c749SSergey Zigachev  * uvd_v6_0_start - start UVD block
709b843c749SSergey Zigachev  *
710b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
711b843c749SSergey Zigachev  *
712b843c749SSergey Zigachev  * Setup and start the UVD block
713b843c749SSergey Zigachev  */
uvd_v6_0_start(struct amdgpu_device * adev)714b843c749SSergey Zigachev static int uvd_v6_0_start(struct amdgpu_device *adev)
715b843c749SSergey Zigachev {
716b843c749SSergey Zigachev 	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
717b843c749SSergey Zigachev 	uint32_t rb_bufsz, tmp;
718b843c749SSergey Zigachev 	uint32_t lmi_swap_cntl;
719b843c749SSergey Zigachev 	uint32_t mp_swap_cntl;
720b843c749SSergey Zigachev 	int i, j, r;
721b843c749SSergey Zigachev 
722b843c749SSergey Zigachev 	/* disable DPG */
723b843c749SSergey Zigachev 	WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
724b843c749SSergey Zigachev 
725b843c749SSergey Zigachev 	/* disable byte swapping */
726b843c749SSergey Zigachev 	lmi_swap_cntl = 0;
727b843c749SSergey Zigachev 	mp_swap_cntl = 0;
728b843c749SSergey Zigachev 
729b843c749SSergey Zigachev 	uvd_v6_0_mc_resume(adev);
730b843c749SSergey Zigachev 
731b843c749SSergey Zigachev 	/* disable interupt */
732b843c749SSergey Zigachev 	WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0);
733b843c749SSergey Zigachev 
734b843c749SSergey Zigachev 	/* stall UMC and register bus before resetting VCPU */
735b843c749SSergey Zigachev 	WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1);
736b843c749SSergey Zigachev 	mdelay(1);
737b843c749SSergey Zigachev 
738b843c749SSergey Zigachev 	/* put LMI, VCPU, RBC etc... into reset */
739b843c749SSergey Zigachev 	WREG32(mmUVD_SOFT_RESET,
740b843c749SSergey Zigachev 		UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
741b843c749SSergey Zigachev 		UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
742b843c749SSergey Zigachev 		UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
743b843c749SSergey Zigachev 		UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
744b843c749SSergey Zigachev 		UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
745b843c749SSergey Zigachev 		UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
746b843c749SSergey Zigachev 		UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
747b843c749SSergey Zigachev 		UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
748b843c749SSergey Zigachev 	mdelay(5);
749b843c749SSergey Zigachev 
750b843c749SSergey Zigachev 	/* take UVD block out of reset */
751b843c749SSergey Zigachev 	WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0);
752b843c749SSergey Zigachev 	mdelay(5);
753b843c749SSergey Zigachev 
754b843c749SSergey Zigachev 	/* initialize UVD memory controller */
755b843c749SSergey Zigachev 	WREG32(mmUVD_LMI_CTRL,
756b843c749SSergey Zigachev 		(0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
757b843c749SSergey Zigachev 		UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
758b843c749SSergey Zigachev 		UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
759b843c749SSergey Zigachev 		UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
760b843c749SSergey Zigachev 		UVD_LMI_CTRL__REQ_MODE_MASK |
761b843c749SSergey Zigachev 		UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK);
762b843c749SSergey Zigachev 
763b843c749SSergey Zigachev #ifdef __BIG_ENDIAN
764b843c749SSergey Zigachev 	/* swap (8 in 32) RB and IB */
765b843c749SSergey Zigachev 	lmi_swap_cntl = 0xa;
766b843c749SSergey Zigachev 	mp_swap_cntl = 0;
767b843c749SSergey Zigachev #endif
768b843c749SSergey Zigachev 	WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
769b843c749SSergey Zigachev 	WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
770b843c749SSergey Zigachev 
771b843c749SSergey Zigachev 	WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
772b843c749SSergey Zigachev 	WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
773b843c749SSergey Zigachev 	WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
774b843c749SSergey Zigachev 	WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
775b843c749SSergey Zigachev 	WREG32(mmUVD_MPC_SET_ALU, 0);
776b843c749SSergey Zigachev 	WREG32(mmUVD_MPC_SET_MUX, 0x88);
777b843c749SSergey Zigachev 
778b843c749SSergey Zigachev 	/* take all subblocks out of reset, except VCPU */
779b843c749SSergey Zigachev 	WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
780b843c749SSergey Zigachev 	mdelay(5);
781b843c749SSergey Zigachev 
782b843c749SSergey Zigachev 	/* enable VCPU clock */
783b843c749SSergey Zigachev 	WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
784b843c749SSergey Zigachev 
785b843c749SSergey Zigachev 	/* enable UMC */
786b843c749SSergey Zigachev 	WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0);
787b843c749SSergey Zigachev 
788b843c749SSergey Zigachev 	/* boot up the VCPU */
789b843c749SSergey Zigachev 	WREG32(mmUVD_SOFT_RESET, 0);
790b843c749SSergey Zigachev 	mdelay(10);
791b843c749SSergey Zigachev 
792b843c749SSergey Zigachev 	for (i = 0; i < 10; ++i) {
793b843c749SSergey Zigachev 		uint32_t status;
794b843c749SSergey Zigachev 
795b843c749SSergey Zigachev 		for (j = 0; j < 100; ++j) {
796b843c749SSergey Zigachev 			status = RREG32(mmUVD_STATUS);
797b843c749SSergey Zigachev 			if (status & 2)
798b843c749SSergey Zigachev 				break;
799b843c749SSergey Zigachev 			mdelay(10);
800b843c749SSergey Zigachev 		}
801b843c749SSergey Zigachev 		r = 0;
802b843c749SSergey Zigachev 		if (status & 2)
803b843c749SSergey Zigachev 			break;
804b843c749SSergey Zigachev 
805b843c749SSergey Zigachev 		DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
806b843c749SSergey Zigachev 		WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1);
807b843c749SSergey Zigachev 		mdelay(10);
808b843c749SSergey Zigachev 		WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0);
809b843c749SSergey Zigachev 		mdelay(10);
810b843c749SSergey Zigachev 		r = -1;
811b843c749SSergey Zigachev 	}
812b843c749SSergey Zigachev 
813b843c749SSergey Zigachev 	if (r) {
814b843c749SSergey Zigachev 		DRM_ERROR("UVD not responding, giving up!!!\n");
815b843c749SSergey Zigachev 		return r;
816b843c749SSergey Zigachev 	}
817b843c749SSergey Zigachev 	/* enable master interrupt */
818b843c749SSergey Zigachev 	WREG32_P(mmUVD_MASTINT_EN,
819b843c749SSergey Zigachev 		(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
820b843c749SSergey Zigachev 		~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
821b843c749SSergey Zigachev 
822b843c749SSergey Zigachev 	/* clear the bit 4 of UVD_STATUS */
823b843c749SSergey Zigachev 	WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
824b843c749SSergey Zigachev 
825b843c749SSergey Zigachev 	/* force RBC into idle state */
826b843c749SSergey Zigachev 	rb_bufsz = order_base_2(ring->ring_size);
827b843c749SSergey Zigachev 	tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
828b843c749SSergey Zigachev 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
829b843c749SSergey Zigachev 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
830b843c749SSergey Zigachev 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
831b843c749SSergey Zigachev 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
832b843c749SSergey Zigachev 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
833b843c749SSergey Zigachev 	WREG32(mmUVD_RBC_RB_CNTL, tmp);
834b843c749SSergey Zigachev 
835b843c749SSergey Zigachev 	/* set the write pointer delay */
836b843c749SSergey Zigachev 	WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
837b843c749SSergey Zigachev 
838b843c749SSergey Zigachev 	/* set the wb address */
839b843c749SSergey Zigachev 	WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
840b843c749SSergey Zigachev 
841b843c749SSergey Zigachev 	/* programm the RB_BASE for ring buffer */
842b843c749SSergey Zigachev 	WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
843b843c749SSergey Zigachev 			lower_32_bits(ring->gpu_addr));
844b843c749SSergey Zigachev 	WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
845b843c749SSergey Zigachev 			upper_32_bits(ring->gpu_addr));
846b843c749SSergey Zigachev 
847b843c749SSergey Zigachev 	/* Initialize the ring buffer's read and write pointers */
848b843c749SSergey Zigachev 	WREG32(mmUVD_RBC_RB_RPTR, 0);
849b843c749SSergey Zigachev 
850b843c749SSergey Zigachev 	ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
851b843c749SSergey Zigachev 	WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
852b843c749SSergey Zigachev 
853b843c749SSergey Zigachev 	WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
854b843c749SSergey Zigachev 
855b843c749SSergey Zigachev 	if (uvd_v6_0_enc_support(adev)) {
856b843c749SSergey Zigachev 		ring = &adev->uvd.inst->ring_enc[0];
857b843c749SSergey Zigachev 		WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
858b843c749SSergey Zigachev 		WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
859b843c749SSergey Zigachev 		WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr);
860b843c749SSergey Zigachev 		WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
861b843c749SSergey Zigachev 		WREG32(mmUVD_RB_SIZE, ring->ring_size / 4);
862b843c749SSergey Zigachev 
863b843c749SSergey Zigachev 		ring = &adev->uvd.inst->ring_enc[1];
864b843c749SSergey Zigachev 		WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
865b843c749SSergey Zigachev 		WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
866b843c749SSergey Zigachev 		WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr);
867b843c749SSergey Zigachev 		WREG32(mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
868b843c749SSergey Zigachev 		WREG32(mmUVD_RB_SIZE2, ring->ring_size / 4);
869b843c749SSergey Zigachev 	}
870b843c749SSergey Zigachev 
871b843c749SSergey Zigachev 	return 0;
872b843c749SSergey Zigachev }
873b843c749SSergey Zigachev 
874b843c749SSergey Zigachev /**
875b843c749SSergey Zigachev  * uvd_v6_0_stop - stop UVD block
876b843c749SSergey Zigachev  *
877b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
878b843c749SSergey Zigachev  *
879b843c749SSergey Zigachev  * stop the UVD block
880b843c749SSergey Zigachev  */
uvd_v6_0_stop(struct amdgpu_device * adev)881b843c749SSergey Zigachev static void uvd_v6_0_stop(struct amdgpu_device *adev)
882b843c749SSergey Zigachev {
883b843c749SSergey Zigachev 	/* force RBC into idle state */
884b843c749SSergey Zigachev 	WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
885b843c749SSergey Zigachev 
886b843c749SSergey Zigachev 	/* Stall UMC and register bus before resetting VCPU */
887b843c749SSergey Zigachev 	WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
888b843c749SSergey Zigachev 	mdelay(1);
889b843c749SSergey Zigachev 
890b843c749SSergey Zigachev 	/* put VCPU into reset */
891b843c749SSergey Zigachev 	WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
892b843c749SSergey Zigachev 	mdelay(5);
893b843c749SSergey Zigachev 
894b843c749SSergey Zigachev 	/* disable VCPU clock */
895b843c749SSergey Zigachev 	WREG32(mmUVD_VCPU_CNTL, 0x0);
896b843c749SSergey Zigachev 
897b843c749SSergey Zigachev 	/* Unstall UMC and register bus */
898b843c749SSergey Zigachev 	WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
899b843c749SSergey Zigachev 
900b843c749SSergey Zigachev 	WREG32(mmUVD_STATUS, 0);
901b843c749SSergey Zigachev }
902b843c749SSergey Zigachev 
903b843c749SSergey Zigachev /**
904b843c749SSergey Zigachev  * uvd_v6_0_ring_emit_fence - emit an fence & trap command
905b843c749SSergey Zigachev  *
906b843c749SSergey Zigachev  * @ring: amdgpu_ring pointer
907b843c749SSergey Zigachev  * @fence: fence to emit
908b843c749SSergey Zigachev  *
909b843c749SSergey Zigachev  * Write a fence and a trap command to the ring.
910b843c749SSergey Zigachev  */
uvd_v6_0_ring_emit_fence(struct amdgpu_ring * ring,uint64_t addr,uint64_t seq,unsigned flags)911*78973132SSergey Zigachev static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, uint64_t addr, uint64_t seq,
912b843c749SSergey Zigachev 				     unsigned flags)
913b843c749SSergey Zigachev {
914b843c749SSergey Zigachev 	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
915b843c749SSergey Zigachev 
916b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
917b843c749SSergey Zigachev 	amdgpu_ring_write(ring, seq);
918b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
919b843c749SSergey Zigachev 	amdgpu_ring_write(ring, addr & 0xffffffff);
920b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
921b843c749SSergey Zigachev 	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
922b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
923b843c749SSergey Zigachev 	amdgpu_ring_write(ring, 0);
924b843c749SSergey Zigachev 
925b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
926b843c749SSergey Zigachev 	amdgpu_ring_write(ring, 0);
927b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
928b843c749SSergey Zigachev 	amdgpu_ring_write(ring, 0);
929b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
930b843c749SSergey Zigachev 	amdgpu_ring_write(ring, 2);
931b843c749SSergey Zigachev }
932b843c749SSergey Zigachev 
933b843c749SSergey Zigachev /**
934b843c749SSergey Zigachev  * uvd_v6_0_enc_ring_emit_fence - emit an enc fence & trap command
935b843c749SSergey Zigachev  *
936b843c749SSergey Zigachev  * @ring: amdgpu_ring pointer
937b843c749SSergey Zigachev  * @fence: fence to emit
938b843c749SSergey Zigachev  *
939b843c749SSergey Zigachev  * Write enc a fence and a trap command to the ring.
940b843c749SSergey Zigachev  */
uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring * ring,uint64_t addr,uint64_t seq,unsigned flags)941*78973132SSergey Zigachev static void uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, uint64_t addr,
942*78973132SSergey Zigachev 			uint64_t seq, unsigned flags)
943b843c749SSergey Zigachev {
944b843c749SSergey Zigachev 	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
945b843c749SSergey Zigachev 
946b843c749SSergey Zigachev 	amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
947b843c749SSergey Zigachev 	amdgpu_ring_write(ring, addr);
948b843c749SSergey Zigachev 	amdgpu_ring_write(ring, upper_32_bits(addr));
949b843c749SSergey Zigachev 	amdgpu_ring_write(ring, seq);
950b843c749SSergey Zigachev 	amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
951b843c749SSergey Zigachev }
952b843c749SSergey Zigachev 
953b843c749SSergey Zigachev /**
954b843c749SSergey Zigachev  * uvd_v6_0_ring_emit_hdp_flush - skip HDP flushing
955b843c749SSergey Zigachev  *
956b843c749SSergey Zigachev  * @ring: amdgpu_ring pointer
957b843c749SSergey Zigachev  */
uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring * ring)958b843c749SSergey Zigachev static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
959b843c749SSergey Zigachev {
960b843c749SSergey Zigachev 	/* The firmware doesn't seem to like touching registers at this point. */
961b843c749SSergey Zigachev }
962b843c749SSergey Zigachev 
963b843c749SSergey Zigachev /**
964b843c749SSergey Zigachev  * uvd_v6_0_ring_test_ring - register write test
965b843c749SSergey Zigachev  *
966b843c749SSergey Zigachev  * @ring: amdgpu_ring pointer
967b843c749SSergey Zigachev  *
968b843c749SSergey Zigachev  * Test if we can successfully write to the context register
969b843c749SSergey Zigachev  */
uvd_v6_0_ring_test_ring(struct amdgpu_ring * ring)970b843c749SSergey Zigachev static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
971b843c749SSergey Zigachev {
972b843c749SSergey Zigachev 	struct amdgpu_device *adev = ring->adev;
973b843c749SSergey Zigachev 	uint32_t tmp = 0;
974b843c749SSergey Zigachev 	unsigned i;
975b843c749SSergey Zigachev 	int r;
976b843c749SSergey Zigachev 
977b843c749SSergey Zigachev 	WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
978b843c749SSergey Zigachev 	r = amdgpu_ring_alloc(ring, 3);
979b843c749SSergey Zigachev 	if (r) {
980b843c749SSergey Zigachev 		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
981b843c749SSergey Zigachev 			  ring->idx, r);
982b843c749SSergey Zigachev 		return r;
983b843c749SSergey Zigachev 	}
984b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
985b843c749SSergey Zigachev 	amdgpu_ring_write(ring, 0xDEADBEEF);
986b843c749SSergey Zigachev 	amdgpu_ring_commit(ring);
987b843c749SSergey Zigachev 	for (i = 0; i < adev->usec_timeout; i++) {
988b843c749SSergey Zigachev 		tmp = RREG32(mmUVD_CONTEXT_ID);
989b843c749SSergey Zigachev 		if (tmp == 0xDEADBEEF)
990b843c749SSergey Zigachev 			break;
991b843c749SSergey Zigachev 		DRM_UDELAY(1);
992b843c749SSergey Zigachev 	}
993b843c749SSergey Zigachev 
994b843c749SSergey Zigachev 	if (i < adev->usec_timeout) {
995b843c749SSergey Zigachev 		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
996b843c749SSergey Zigachev 			 ring->idx, i);
997b843c749SSergey Zigachev 	} else {
998b843c749SSergey Zigachev 		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
999b843c749SSergey Zigachev 			  ring->idx, tmp);
1000b843c749SSergey Zigachev 		r = -EINVAL;
1001b843c749SSergey Zigachev 	}
1002b843c749SSergey Zigachev 	return r;
1003b843c749SSergey Zigachev }
1004b843c749SSergey Zigachev 
1005b843c749SSergey Zigachev /**
1006b843c749SSergey Zigachev  * uvd_v6_0_ring_emit_ib - execute indirect buffer
1007b843c749SSergey Zigachev  *
1008b843c749SSergey Zigachev  * @ring: amdgpu_ring pointer
1009b843c749SSergey Zigachev  * @ib: indirect buffer to execute
1010b843c749SSergey Zigachev  *
1011b843c749SSergey Zigachev  * Write ring commands to execute the indirect buffer
1012b843c749SSergey Zigachev  */
uvd_v6_0_ring_emit_ib(struct amdgpu_ring * ring,struct amdgpu_ib * ib,unsigned vmid,bool ctx_switch)1013b843c749SSergey Zigachev static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
1014b843c749SSergey Zigachev 				  struct amdgpu_ib *ib,
1015b843c749SSergey Zigachev 				  unsigned vmid, bool ctx_switch)
1016b843c749SSergey Zigachev {
1017b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
1018b843c749SSergey Zigachev 	amdgpu_ring_write(ring, vmid);
1019b843c749SSergey Zigachev 
1020b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
1021b843c749SSergey Zigachev 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1022b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
1023b843c749SSergey Zigachev 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1024b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
1025b843c749SSergey Zigachev 	amdgpu_ring_write(ring, ib->length_dw);
1026b843c749SSergey Zigachev }
1027b843c749SSergey Zigachev 
1028b843c749SSergey Zigachev /**
1029b843c749SSergey Zigachev  * uvd_v6_0_enc_ring_emit_ib - enc execute indirect buffer
1030b843c749SSergey Zigachev  *
1031b843c749SSergey Zigachev  * @ring: amdgpu_ring pointer
1032b843c749SSergey Zigachev  * @ib: indirect buffer to execute
1033b843c749SSergey Zigachev  *
1034b843c749SSergey Zigachev  * Write enc ring commands to execute the indirect buffer
1035b843c749SSergey Zigachev  */
uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring * ring,struct amdgpu_ib * ib,unsigned int vmid,bool ctx_switch)1036b843c749SSergey Zigachev static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1037b843c749SSergey Zigachev 		struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
1038b843c749SSergey Zigachev {
1039b843c749SSergey Zigachev 	amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1040b843c749SSergey Zigachev 	amdgpu_ring_write(ring, vmid);
1041b843c749SSergey Zigachev 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1042b843c749SSergey Zigachev 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1043b843c749SSergey Zigachev 	amdgpu_ring_write(ring, ib->length_dw);
1044b843c749SSergey Zigachev }
1045b843c749SSergey Zigachev 
uvd_v6_0_ring_emit_wreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t val)1046b843c749SSergey Zigachev static void uvd_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
1047b843c749SSergey Zigachev 				    uint32_t reg, uint32_t val)
1048b843c749SSergey Zigachev {
1049b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1050b843c749SSergey Zigachev 	amdgpu_ring_write(ring, reg << 2);
1051b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1052b843c749SSergey Zigachev 	amdgpu_ring_write(ring, val);
1053b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1054b843c749SSergey Zigachev 	amdgpu_ring_write(ring, 0x8);
1055b843c749SSergey Zigachev }
1056b843c749SSergey Zigachev 
uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring * ring,unsigned vmid,uint64_t pd_addr)1057b843c749SSergey Zigachev static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1058b843c749SSergey Zigachev 					unsigned vmid, uint64_t pd_addr)
1059b843c749SSergey Zigachev {
1060b843c749SSergey Zigachev 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1061b843c749SSergey Zigachev 
1062b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1063b843c749SSergey Zigachev 	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
1064b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1065b843c749SSergey Zigachev 	amdgpu_ring_write(ring, 0);
1066b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
1067b843c749SSergey Zigachev 	amdgpu_ring_write(ring, 1 << vmid); /* mask */
1068b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1069b843c749SSergey Zigachev 	amdgpu_ring_write(ring, 0xC);
1070b843c749SSergey Zigachev }
1071b843c749SSergey Zigachev 
uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring * ring)1072b843c749SSergey Zigachev static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1073b843c749SSergey Zigachev {
1074b843c749SSergey Zigachev 	uint32_t seq = ring->fence_drv.sync_seq;
1075b843c749SSergey Zigachev 	uint64_t addr = ring->fence_drv.gpu_addr;
1076b843c749SSergey Zigachev 
1077b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1078b843c749SSergey Zigachev 	amdgpu_ring_write(ring, lower_32_bits(addr));
1079b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1080b843c749SSergey Zigachev 	amdgpu_ring_write(ring, upper_32_bits(addr));
1081b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
1082b843c749SSergey Zigachev 	amdgpu_ring_write(ring, 0xffffffff); /* mask */
1083b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0));
1084b843c749SSergey Zigachev 	amdgpu_ring_write(ring, seq);
1085b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1086b843c749SSergey Zigachev 	amdgpu_ring_write(ring, 0xE);
1087b843c749SSergey Zigachev }
1088b843c749SSergey Zigachev 
uvd_v6_0_ring_insert_nop(struct amdgpu_ring * ring,uint32_t count)1089b843c749SSergey Zigachev static void uvd_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1090b843c749SSergey Zigachev {
1091b843c749SSergey Zigachev 	int i;
1092b843c749SSergey Zigachev 
1093b843c749SSergey Zigachev 	WARN_ON(ring->wptr % 2 || count % 2);
1094b843c749SSergey Zigachev 
1095b843c749SSergey Zigachev 	for (i = 0; i < count / 2; i++) {
1096b843c749SSergey Zigachev 		amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
1097b843c749SSergey Zigachev 		amdgpu_ring_write(ring, 0);
1098b843c749SSergey Zigachev 	}
1099b843c749SSergey Zigachev }
1100b843c749SSergey Zigachev 
uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring * ring)1101b843c749SSergey Zigachev static void uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1102b843c749SSergey Zigachev {
1103b843c749SSergey Zigachev 	uint32_t seq = ring->fence_drv.sync_seq;
1104b843c749SSergey Zigachev 	uint64_t addr = ring->fence_drv.gpu_addr;
1105b843c749SSergey Zigachev 
1106b843c749SSergey Zigachev 	amdgpu_ring_write(ring, HEVC_ENC_CMD_WAIT_GE);
1107b843c749SSergey Zigachev 	amdgpu_ring_write(ring, lower_32_bits(addr));
1108b843c749SSergey Zigachev 	amdgpu_ring_write(ring, upper_32_bits(addr));
1109b843c749SSergey Zigachev 	amdgpu_ring_write(ring, seq);
1110b843c749SSergey Zigachev }
1111b843c749SSergey Zigachev 
uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring * ring)1112b843c749SSergey Zigachev static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1113b843c749SSergey Zigachev {
1114b843c749SSergey Zigachev 	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1115b843c749SSergey Zigachev }
1116b843c749SSergey Zigachev 
uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring * ring,unsigned int vmid,uint64_t pd_addr)1117b843c749SSergey Zigachev static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1118b843c749SSergey Zigachev 					    unsigned int vmid, uint64_t pd_addr)
1119b843c749SSergey Zigachev {
1120b843c749SSergey Zigachev 	amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB);
1121b843c749SSergey Zigachev 	amdgpu_ring_write(ring, vmid);
1122b843c749SSergey Zigachev 	amdgpu_ring_write(ring, pd_addr >> 12);
1123b843c749SSergey Zigachev 
1124b843c749SSergey Zigachev 	amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB);
1125b843c749SSergey Zigachev 	amdgpu_ring_write(ring, vmid);
1126b843c749SSergey Zigachev }
1127b843c749SSergey Zigachev 
uvd_v6_0_is_idle(void * handle)1128b843c749SSergey Zigachev static bool uvd_v6_0_is_idle(void *handle)
1129b843c749SSergey Zigachev {
1130b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1131b843c749SSergey Zigachev 
1132b843c749SSergey Zigachev 	return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1133b843c749SSergey Zigachev }
1134b843c749SSergey Zigachev 
uvd_v6_0_wait_for_idle(void * handle)1135b843c749SSergey Zigachev static int uvd_v6_0_wait_for_idle(void *handle)
1136b843c749SSergey Zigachev {
1137b843c749SSergey Zigachev 	unsigned i;
1138b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1139b843c749SSergey Zigachev 
1140b843c749SSergey Zigachev 	for (i = 0; i < adev->usec_timeout; i++) {
1141b843c749SSergey Zigachev 		if (uvd_v6_0_is_idle(handle))
1142b843c749SSergey Zigachev 			return 0;
1143b843c749SSergey Zigachev 	}
1144b843c749SSergey Zigachev 	return -ETIMEDOUT;
1145b843c749SSergey Zigachev }
1146b843c749SSergey Zigachev 
1147b843c749SSergey Zigachev #define AMDGPU_UVD_STATUS_BUSY_MASK    0xfd
uvd_v6_0_check_soft_reset(void * handle)1148b843c749SSergey Zigachev static bool uvd_v6_0_check_soft_reset(void *handle)
1149b843c749SSergey Zigachev {
1150b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1151b843c749SSergey Zigachev 	u32 srbm_soft_reset = 0;
1152b843c749SSergey Zigachev 	u32 tmp = RREG32(mmSRBM_STATUS);
1153b843c749SSergey Zigachev 
1154b843c749SSergey Zigachev 	if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1155b843c749SSergey Zigachev 	    REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1156b843c749SSergey Zigachev 	    (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK))
1157b843c749SSergey Zigachev 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1158b843c749SSergey Zigachev 
1159b843c749SSergey Zigachev 	if (srbm_soft_reset) {
1160b843c749SSergey Zigachev 		adev->uvd.inst->srbm_soft_reset = srbm_soft_reset;
1161b843c749SSergey Zigachev 		return true;
1162b843c749SSergey Zigachev 	} else {
1163b843c749SSergey Zigachev 		adev->uvd.inst->srbm_soft_reset = 0;
1164b843c749SSergey Zigachev 		return false;
1165b843c749SSergey Zigachev 	}
1166b843c749SSergey Zigachev }
1167b843c749SSergey Zigachev 
uvd_v6_0_pre_soft_reset(void * handle)1168b843c749SSergey Zigachev static int uvd_v6_0_pre_soft_reset(void *handle)
1169b843c749SSergey Zigachev {
1170b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1171b843c749SSergey Zigachev 
1172b843c749SSergey Zigachev 	if (!adev->uvd.inst->srbm_soft_reset)
1173b843c749SSergey Zigachev 		return 0;
1174b843c749SSergey Zigachev 
1175b843c749SSergey Zigachev 	uvd_v6_0_stop(adev);
1176b843c749SSergey Zigachev 	return 0;
1177b843c749SSergey Zigachev }
1178b843c749SSergey Zigachev 
uvd_v6_0_soft_reset(void * handle)1179b843c749SSergey Zigachev static int uvd_v6_0_soft_reset(void *handle)
1180b843c749SSergey Zigachev {
1181b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1182b843c749SSergey Zigachev 	u32 srbm_soft_reset;
1183b843c749SSergey Zigachev 
1184b843c749SSergey Zigachev 	if (!adev->uvd.inst->srbm_soft_reset)
1185b843c749SSergey Zigachev 		return 0;
1186b843c749SSergey Zigachev 	srbm_soft_reset = adev->uvd.inst->srbm_soft_reset;
1187b843c749SSergey Zigachev 
1188b843c749SSergey Zigachev 	if (srbm_soft_reset) {
1189b843c749SSergey Zigachev 		u32 tmp;
1190b843c749SSergey Zigachev 
1191b843c749SSergey Zigachev 		tmp = RREG32(mmSRBM_SOFT_RESET);
1192b843c749SSergey Zigachev 		tmp |= srbm_soft_reset;
1193b843c749SSergey Zigachev 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1194b843c749SSergey Zigachev 		WREG32(mmSRBM_SOFT_RESET, tmp);
1195b843c749SSergey Zigachev 		tmp = RREG32(mmSRBM_SOFT_RESET);
1196b843c749SSergey Zigachev 
1197b843c749SSergey Zigachev 		udelay(50);
1198b843c749SSergey Zigachev 
1199b843c749SSergey Zigachev 		tmp &= ~srbm_soft_reset;
1200b843c749SSergey Zigachev 		WREG32(mmSRBM_SOFT_RESET, tmp);
1201b843c749SSergey Zigachev 		tmp = RREG32(mmSRBM_SOFT_RESET);
1202b843c749SSergey Zigachev 
1203b843c749SSergey Zigachev 		/* Wait a little for things to settle down */
1204b843c749SSergey Zigachev 		udelay(50);
1205b843c749SSergey Zigachev 	}
1206b843c749SSergey Zigachev 
1207b843c749SSergey Zigachev 	return 0;
1208b843c749SSergey Zigachev }
1209b843c749SSergey Zigachev 
uvd_v6_0_post_soft_reset(void * handle)1210b843c749SSergey Zigachev static int uvd_v6_0_post_soft_reset(void *handle)
1211b843c749SSergey Zigachev {
1212b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1213b843c749SSergey Zigachev 
1214b843c749SSergey Zigachev 	if (!adev->uvd.inst->srbm_soft_reset)
1215b843c749SSergey Zigachev 		return 0;
1216b843c749SSergey Zigachev 
1217b843c749SSergey Zigachev 	mdelay(5);
1218b843c749SSergey Zigachev 
1219b843c749SSergey Zigachev 	return uvd_v6_0_start(adev);
1220b843c749SSergey Zigachev }
1221b843c749SSergey Zigachev 
uvd_v6_0_set_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)1222b843c749SSergey Zigachev static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev,
1223b843c749SSergey Zigachev 					struct amdgpu_irq_src *source,
1224b843c749SSergey Zigachev 					unsigned type,
1225b843c749SSergey Zigachev 					enum amdgpu_interrupt_state state)
1226b843c749SSergey Zigachev {
1227b843c749SSergey Zigachev 	// TODO
1228b843c749SSergey Zigachev 	return 0;
1229b843c749SSergey Zigachev }
1230b843c749SSergey Zigachev 
uvd_v6_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)1231b843c749SSergey Zigachev static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
1232b843c749SSergey Zigachev 				      struct amdgpu_irq_src *source,
1233b843c749SSergey Zigachev 				      struct amdgpu_iv_entry *entry)
1234b843c749SSergey Zigachev {
1235b843c749SSergey Zigachev 	bool int_handled = true;
1236b843c749SSergey Zigachev 	DRM_DEBUG("IH: UVD TRAP\n");
1237b843c749SSergey Zigachev 
1238b843c749SSergey Zigachev 	switch (entry->src_id) {
1239b843c749SSergey Zigachev 	case 124:
1240b843c749SSergey Zigachev 		amdgpu_fence_process(&adev->uvd.inst->ring);
1241b843c749SSergey Zigachev 		break;
1242b843c749SSergey Zigachev 	case 119:
1243b843c749SSergey Zigachev 		if (likely(uvd_v6_0_enc_support(adev)))
1244b843c749SSergey Zigachev 			amdgpu_fence_process(&adev->uvd.inst->ring_enc[0]);
1245b843c749SSergey Zigachev 		else
1246b843c749SSergey Zigachev 			int_handled = false;
1247b843c749SSergey Zigachev 		break;
1248b843c749SSergey Zigachev 	case 120:
1249b843c749SSergey Zigachev 		if (likely(uvd_v6_0_enc_support(adev)))
1250b843c749SSergey Zigachev 			amdgpu_fence_process(&adev->uvd.inst->ring_enc[1]);
1251b843c749SSergey Zigachev 		else
1252b843c749SSergey Zigachev 			int_handled = false;
1253b843c749SSergey Zigachev 		break;
1254b843c749SSergey Zigachev 	}
1255b843c749SSergey Zigachev 
1256b843c749SSergey Zigachev 	if (false == int_handled)
1257b843c749SSergey Zigachev 			DRM_ERROR("Unhandled interrupt: %d %d\n",
1258b843c749SSergey Zigachev 			  entry->src_id, entry->src_data[0]);
1259b843c749SSergey Zigachev 
1260b843c749SSergey Zigachev 	return 0;
1261b843c749SSergey Zigachev }
1262b843c749SSergey Zigachev 
uvd_v6_0_enable_clock_gating(struct amdgpu_device * adev,bool enable)1263b843c749SSergey Zigachev static void uvd_v6_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
1264b843c749SSergey Zigachev {
1265b843c749SSergey Zigachev 	uint32_t data1, data3;
1266b843c749SSergey Zigachev 
1267b843c749SSergey Zigachev 	data1 = RREG32(mmUVD_SUVD_CGC_GATE);
1268b843c749SSergey Zigachev 	data3 = RREG32(mmUVD_CGC_GATE);
1269b843c749SSergey Zigachev 
1270b843c749SSergey Zigachev 	data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
1271b843c749SSergey Zigachev 		     UVD_SUVD_CGC_GATE__SIT_MASK |
1272b843c749SSergey Zigachev 		     UVD_SUVD_CGC_GATE__SMP_MASK |
1273b843c749SSergey Zigachev 		     UVD_SUVD_CGC_GATE__SCM_MASK |
1274b843c749SSergey Zigachev 		     UVD_SUVD_CGC_GATE__SDB_MASK |
1275b843c749SSergey Zigachev 		     UVD_SUVD_CGC_GATE__SRE_H264_MASK |
1276b843c749SSergey Zigachev 		     UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
1277b843c749SSergey Zigachev 		     UVD_SUVD_CGC_GATE__SIT_H264_MASK |
1278b843c749SSergey Zigachev 		     UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
1279b843c749SSergey Zigachev 		     UVD_SUVD_CGC_GATE__SCM_H264_MASK |
1280b843c749SSergey Zigachev 		     UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
1281b843c749SSergey Zigachev 		     UVD_SUVD_CGC_GATE__SDB_H264_MASK |
1282b843c749SSergey Zigachev 		     UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
1283b843c749SSergey Zigachev 
1284b843c749SSergey Zigachev 	if (enable) {
1285b843c749SSergey Zigachev 		data3 |= (UVD_CGC_GATE__SYS_MASK       |
1286b843c749SSergey Zigachev 			UVD_CGC_GATE__UDEC_MASK      |
1287b843c749SSergey Zigachev 			UVD_CGC_GATE__MPEG2_MASK     |
1288b843c749SSergey Zigachev 			UVD_CGC_GATE__RBC_MASK       |
1289b843c749SSergey Zigachev 			UVD_CGC_GATE__LMI_MC_MASK    |
1290b843c749SSergey Zigachev 			UVD_CGC_GATE__LMI_UMC_MASK   |
1291b843c749SSergey Zigachev 			UVD_CGC_GATE__IDCT_MASK      |
1292b843c749SSergey Zigachev 			UVD_CGC_GATE__MPRD_MASK      |
1293b843c749SSergey Zigachev 			UVD_CGC_GATE__MPC_MASK       |
1294b843c749SSergey Zigachev 			UVD_CGC_GATE__LBSI_MASK      |
1295b843c749SSergey Zigachev 			UVD_CGC_GATE__LRBBM_MASK     |
1296b843c749SSergey Zigachev 			UVD_CGC_GATE__UDEC_RE_MASK   |
1297b843c749SSergey Zigachev 			UVD_CGC_GATE__UDEC_CM_MASK   |
1298b843c749SSergey Zigachev 			UVD_CGC_GATE__UDEC_IT_MASK   |
1299b843c749SSergey Zigachev 			UVD_CGC_GATE__UDEC_DB_MASK   |
1300b843c749SSergey Zigachev 			UVD_CGC_GATE__UDEC_MP_MASK   |
1301b843c749SSergey Zigachev 			UVD_CGC_GATE__WCB_MASK       |
1302b843c749SSergey Zigachev 			UVD_CGC_GATE__JPEG_MASK      |
1303b843c749SSergey Zigachev 			UVD_CGC_GATE__SCPU_MASK      |
1304b843c749SSergey Zigachev 			UVD_CGC_GATE__JPEG2_MASK);
1305b843c749SSergey Zigachev 		/* only in pg enabled, we can gate clock to vcpu*/
1306b843c749SSergey Zigachev 		if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
1307b843c749SSergey Zigachev 			data3 |= UVD_CGC_GATE__VCPU_MASK;
1308b843c749SSergey Zigachev 
1309b843c749SSergey Zigachev 		data3 &= ~UVD_CGC_GATE__REGS_MASK;
1310b843c749SSergey Zigachev 	} else {
1311b843c749SSergey Zigachev 		data3 = 0;
1312b843c749SSergey Zigachev 	}
1313b843c749SSergey Zigachev 
1314b843c749SSergey Zigachev 	WREG32(mmUVD_SUVD_CGC_GATE, data1);
1315b843c749SSergey Zigachev 	WREG32(mmUVD_CGC_GATE, data3);
1316b843c749SSergey Zigachev }
1317b843c749SSergey Zigachev 
uvd_v6_0_set_sw_clock_gating(struct amdgpu_device * adev)1318b843c749SSergey Zigachev static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev)
1319b843c749SSergey Zigachev {
1320b843c749SSergey Zigachev 	uint32_t data, data2;
1321b843c749SSergey Zigachev 
1322b843c749SSergey Zigachev 	data = RREG32(mmUVD_CGC_CTRL);
1323b843c749SSergey Zigachev 	data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
1324b843c749SSergey Zigachev 
1325b843c749SSergey Zigachev 
1326b843c749SSergey Zigachev 	data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1327b843c749SSergey Zigachev 		  UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1328b843c749SSergey Zigachev 
1329b843c749SSergey Zigachev 
1330b843c749SSergey Zigachev 	data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1331b843c749SSergey Zigachev 		(1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1332b843c749SSergey Zigachev 		(4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1333b843c749SSergey Zigachev 
1334b843c749SSergey Zigachev 	data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1335b843c749SSergey Zigachev 			UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1336b843c749SSergey Zigachev 			UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1337b843c749SSergey Zigachev 			UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1338b843c749SSergey Zigachev 			UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1339b843c749SSergey Zigachev 			UVD_CGC_CTRL__SYS_MODE_MASK |
1340b843c749SSergey Zigachev 			UVD_CGC_CTRL__UDEC_MODE_MASK |
1341b843c749SSergey Zigachev 			UVD_CGC_CTRL__MPEG2_MODE_MASK |
1342b843c749SSergey Zigachev 			UVD_CGC_CTRL__REGS_MODE_MASK |
1343b843c749SSergey Zigachev 			UVD_CGC_CTRL__RBC_MODE_MASK |
1344b843c749SSergey Zigachev 			UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1345b843c749SSergey Zigachev 			UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1346b843c749SSergey Zigachev 			UVD_CGC_CTRL__IDCT_MODE_MASK |
1347b843c749SSergey Zigachev 			UVD_CGC_CTRL__MPRD_MODE_MASK |
1348b843c749SSergey Zigachev 			UVD_CGC_CTRL__MPC_MODE_MASK |
1349b843c749SSergey Zigachev 			UVD_CGC_CTRL__LBSI_MODE_MASK |
1350b843c749SSergey Zigachev 			UVD_CGC_CTRL__LRBBM_MODE_MASK |
1351b843c749SSergey Zigachev 			UVD_CGC_CTRL__WCB_MODE_MASK |
1352b843c749SSergey Zigachev 			UVD_CGC_CTRL__VCPU_MODE_MASK |
1353b843c749SSergey Zigachev 			UVD_CGC_CTRL__JPEG_MODE_MASK |
1354b843c749SSergey Zigachev 			UVD_CGC_CTRL__SCPU_MODE_MASK |
1355b843c749SSergey Zigachev 			UVD_CGC_CTRL__JPEG2_MODE_MASK);
1356b843c749SSergey Zigachev 	data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1357b843c749SSergey Zigachev 			UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1358b843c749SSergey Zigachev 			UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1359b843c749SSergey Zigachev 			UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1360b843c749SSergey Zigachev 			UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1361b843c749SSergey Zigachev 
1362b843c749SSergey Zigachev 	WREG32(mmUVD_CGC_CTRL, data);
1363b843c749SSergey Zigachev 	WREG32(mmUVD_SUVD_CGC_CTRL, data2);
1364b843c749SSergey Zigachev }
1365b843c749SSergey Zigachev 
1366b843c749SSergey Zigachev #if 0
1367b843c749SSergey Zigachev static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev)
1368b843c749SSergey Zigachev {
1369b843c749SSergey Zigachev 	uint32_t data, data1, cgc_flags, suvd_flags;
1370b843c749SSergey Zigachev 
1371b843c749SSergey Zigachev 	data = RREG32(mmUVD_CGC_GATE);
1372b843c749SSergey Zigachev 	data1 = RREG32(mmUVD_SUVD_CGC_GATE);
1373b843c749SSergey Zigachev 
1374b843c749SSergey Zigachev 	cgc_flags = UVD_CGC_GATE__SYS_MASK |
1375b843c749SSergey Zigachev 		UVD_CGC_GATE__UDEC_MASK |
1376b843c749SSergey Zigachev 		UVD_CGC_GATE__MPEG2_MASK |
1377b843c749SSergey Zigachev 		UVD_CGC_GATE__RBC_MASK |
1378b843c749SSergey Zigachev 		UVD_CGC_GATE__LMI_MC_MASK |
1379b843c749SSergey Zigachev 		UVD_CGC_GATE__IDCT_MASK |
1380b843c749SSergey Zigachev 		UVD_CGC_GATE__MPRD_MASK |
1381b843c749SSergey Zigachev 		UVD_CGC_GATE__MPC_MASK |
1382b843c749SSergey Zigachev 		UVD_CGC_GATE__LBSI_MASK |
1383b843c749SSergey Zigachev 		UVD_CGC_GATE__LRBBM_MASK |
1384b843c749SSergey Zigachev 		UVD_CGC_GATE__UDEC_RE_MASK |
1385b843c749SSergey Zigachev 		UVD_CGC_GATE__UDEC_CM_MASK |
1386b843c749SSergey Zigachev 		UVD_CGC_GATE__UDEC_IT_MASK |
1387b843c749SSergey Zigachev 		UVD_CGC_GATE__UDEC_DB_MASK |
1388b843c749SSergey Zigachev 		UVD_CGC_GATE__UDEC_MP_MASK |
1389b843c749SSergey Zigachev 		UVD_CGC_GATE__WCB_MASK |
1390b843c749SSergey Zigachev 		UVD_CGC_GATE__VCPU_MASK |
1391b843c749SSergey Zigachev 		UVD_CGC_GATE__SCPU_MASK |
1392b843c749SSergey Zigachev 		UVD_CGC_GATE__JPEG_MASK |
1393b843c749SSergey Zigachev 		UVD_CGC_GATE__JPEG2_MASK;
1394b843c749SSergey Zigachev 
1395b843c749SSergey Zigachev 	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1396b843c749SSergey Zigachev 				UVD_SUVD_CGC_GATE__SIT_MASK |
1397b843c749SSergey Zigachev 				UVD_SUVD_CGC_GATE__SMP_MASK |
1398b843c749SSergey Zigachev 				UVD_SUVD_CGC_GATE__SCM_MASK |
1399b843c749SSergey Zigachev 				UVD_SUVD_CGC_GATE__SDB_MASK;
1400b843c749SSergey Zigachev 
1401b843c749SSergey Zigachev 	data |= cgc_flags;
1402b843c749SSergey Zigachev 	data1 |= suvd_flags;
1403b843c749SSergey Zigachev 
1404b843c749SSergey Zigachev 	WREG32(mmUVD_CGC_GATE, data);
1405b843c749SSergey Zigachev 	WREG32(mmUVD_SUVD_CGC_GATE, data1);
1406b843c749SSergey Zigachev }
1407b843c749SSergey Zigachev #endif
1408b843c749SSergey Zigachev 
uvd_v6_0_enable_mgcg(struct amdgpu_device * adev,bool enable)1409b843c749SSergey Zigachev static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
1410b843c749SSergey Zigachev 				 bool enable)
1411b843c749SSergey Zigachev {
1412b843c749SSergey Zigachev 	u32 orig, data;
1413b843c749SSergey Zigachev 
1414b843c749SSergey Zigachev 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
1415b843c749SSergey Zigachev 		data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
1416b843c749SSergey Zigachev 		data |= 0xfff;
1417b843c749SSergey Zigachev 		WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
1418b843c749SSergey Zigachev 
1419b843c749SSergey Zigachev 		orig = data = RREG32(mmUVD_CGC_CTRL);
1420b843c749SSergey Zigachev 		data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1421b843c749SSergey Zigachev 		if (orig != data)
1422b843c749SSergey Zigachev 			WREG32(mmUVD_CGC_CTRL, data);
1423b843c749SSergey Zigachev 	} else {
1424b843c749SSergey Zigachev 		data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
1425b843c749SSergey Zigachev 		data &= ~0xfff;
1426b843c749SSergey Zigachev 		WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
1427b843c749SSergey Zigachev 
1428b843c749SSergey Zigachev 		orig = data = RREG32(mmUVD_CGC_CTRL);
1429b843c749SSergey Zigachev 		data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1430b843c749SSergey Zigachev 		if (orig != data)
1431b843c749SSergey Zigachev 			WREG32(mmUVD_CGC_CTRL, data);
1432b843c749SSergey Zigachev 	}
1433b843c749SSergey Zigachev }
1434b843c749SSergey Zigachev 
uvd_v6_0_set_clockgating_state(void * handle,enum amd_clockgating_state state)1435b843c749SSergey Zigachev static int uvd_v6_0_set_clockgating_state(void *handle,
1436b843c749SSergey Zigachev 					  enum amd_clockgating_state state)
1437b843c749SSergey Zigachev {
1438b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1439b843c749SSergey Zigachev 	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1440b843c749SSergey Zigachev 
1441b843c749SSergey Zigachev 	if (enable) {
1442b843c749SSergey Zigachev 		/* wait for STATUS to clear */
1443b843c749SSergey Zigachev 		if (uvd_v6_0_wait_for_idle(handle))
1444b843c749SSergey Zigachev 			return -EBUSY;
1445b843c749SSergey Zigachev 		uvd_v6_0_enable_clock_gating(adev, true);
1446b843c749SSergey Zigachev 		/* enable HW gates because UVD is idle */
1447b843c749SSergey Zigachev /*		uvd_v6_0_set_hw_clock_gating(adev); */
1448b843c749SSergey Zigachev 	} else {
1449b843c749SSergey Zigachev 		/* disable HW gating and enable Sw gating */
1450b843c749SSergey Zigachev 		uvd_v6_0_enable_clock_gating(adev, false);
1451b843c749SSergey Zigachev 	}
1452b843c749SSergey Zigachev 	uvd_v6_0_set_sw_clock_gating(adev);
1453b843c749SSergey Zigachev 	return 0;
1454b843c749SSergey Zigachev }
1455b843c749SSergey Zigachev 
uvd_v6_0_set_powergating_state(void * handle,enum amd_powergating_state state)1456b843c749SSergey Zigachev static int uvd_v6_0_set_powergating_state(void *handle,
1457b843c749SSergey Zigachev 					  enum amd_powergating_state state)
1458b843c749SSergey Zigachev {
1459b843c749SSergey Zigachev 	/* This doesn't actually powergate the UVD block.
1460b843c749SSergey Zigachev 	 * That's done in the dpm code via the SMC.  This
1461b843c749SSergey Zigachev 	 * just re-inits the block as necessary.  The actual
1462b843c749SSergey Zigachev 	 * gating still happens in the dpm code.  We should
1463b843c749SSergey Zigachev 	 * revisit this when there is a cleaner line between
1464b843c749SSergey Zigachev 	 * the smc and the hw blocks
1465b843c749SSergey Zigachev 	 */
1466b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1467b843c749SSergey Zigachev 	int ret = 0;
1468b843c749SSergey Zigachev 
1469b843c749SSergey Zigachev 	WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1470b843c749SSergey Zigachev 
1471b843c749SSergey Zigachev 	if (state == AMD_PG_STATE_GATE) {
1472b843c749SSergey Zigachev 		uvd_v6_0_stop(adev);
1473b843c749SSergey Zigachev 	} else {
1474b843c749SSergey Zigachev 		ret = uvd_v6_0_start(adev);
1475b843c749SSergey Zigachev 		if (ret)
1476b843c749SSergey Zigachev 			goto out;
1477b843c749SSergey Zigachev 	}
1478b843c749SSergey Zigachev 
1479b843c749SSergey Zigachev out:
1480b843c749SSergey Zigachev 	return ret;
1481b843c749SSergey Zigachev }
1482b843c749SSergey Zigachev 
uvd_v6_0_get_clockgating_state(void * handle,u32 * flags)1483b843c749SSergey Zigachev static void uvd_v6_0_get_clockgating_state(void *handle, u32 *flags)
1484b843c749SSergey Zigachev {
1485b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1486b843c749SSergey Zigachev 	int data;
1487b843c749SSergey Zigachev 
1488b843c749SSergey Zigachev 	mutex_lock(&adev->pm.mutex);
1489b843c749SSergey Zigachev 
1490b843c749SSergey Zigachev 	if (adev->flags & AMD_IS_APU)
1491b843c749SSergey Zigachev 		data = RREG32_SMC(ixCURRENT_PG_STATUS_APU);
1492b843c749SSergey Zigachev 	else
1493b843c749SSergey Zigachev 		data = RREG32_SMC(ixCURRENT_PG_STATUS);
1494b843c749SSergey Zigachev 
1495b843c749SSergey Zigachev 	if (data & CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
1496b843c749SSergey Zigachev 		DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
1497b843c749SSergey Zigachev 		goto out;
1498b843c749SSergey Zigachev 	}
1499b843c749SSergey Zigachev 
1500b843c749SSergey Zigachev 	/* AMD_CG_SUPPORT_UVD_MGCG */
1501b843c749SSergey Zigachev 	data = RREG32(mmUVD_CGC_CTRL);
1502b843c749SSergey Zigachev 	if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
1503b843c749SSergey Zigachev 		*flags |= AMD_CG_SUPPORT_UVD_MGCG;
1504b843c749SSergey Zigachev 
1505b843c749SSergey Zigachev out:
1506b843c749SSergey Zigachev 	mutex_unlock(&adev->pm.mutex);
1507b843c749SSergey Zigachev }
1508b843c749SSergey Zigachev 
1509b843c749SSergey Zigachev static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
1510b843c749SSergey Zigachev 	.name = "uvd_v6_0",
1511b843c749SSergey Zigachev 	.early_init = uvd_v6_0_early_init,
1512b843c749SSergey Zigachev 	.late_init = NULL,
1513b843c749SSergey Zigachev 	.sw_init = uvd_v6_0_sw_init,
1514b843c749SSergey Zigachev 	.sw_fini = uvd_v6_0_sw_fini,
1515b843c749SSergey Zigachev 	.hw_init = uvd_v6_0_hw_init,
1516b843c749SSergey Zigachev 	.hw_fini = uvd_v6_0_hw_fini,
1517b843c749SSergey Zigachev 	.suspend = uvd_v6_0_suspend,
1518b843c749SSergey Zigachev 	.resume = uvd_v6_0_resume,
1519b843c749SSergey Zigachev 	.is_idle = uvd_v6_0_is_idle,
1520b843c749SSergey Zigachev 	.wait_for_idle = uvd_v6_0_wait_for_idle,
1521b843c749SSergey Zigachev 	.check_soft_reset = uvd_v6_0_check_soft_reset,
1522b843c749SSergey Zigachev 	.pre_soft_reset = uvd_v6_0_pre_soft_reset,
1523b843c749SSergey Zigachev 	.soft_reset = uvd_v6_0_soft_reset,
1524b843c749SSergey Zigachev 	.post_soft_reset = uvd_v6_0_post_soft_reset,
1525b843c749SSergey Zigachev 	.set_clockgating_state = uvd_v6_0_set_clockgating_state,
1526b843c749SSergey Zigachev 	.set_powergating_state = uvd_v6_0_set_powergating_state,
1527b843c749SSergey Zigachev 	.get_clockgating_state = uvd_v6_0_get_clockgating_state,
1528b843c749SSergey Zigachev };
1529b843c749SSergey Zigachev 
1530b843c749SSergey Zigachev static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
1531b843c749SSergey Zigachev 	.type = AMDGPU_RING_TYPE_UVD,
1532b843c749SSergey Zigachev 	.align_mask = 0xf,
1533b843c749SSergey Zigachev 	.support_64bit_ptrs = false,
1534b843c749SSergey Zigachev 	.get_rptr = uvd_v6_0_ring_get_rptr,
1535b843c749SSergey Zigachev 	.get_wptr = uvd_v6_0_ring_get_wptr,
1536b843c749SSergey Zigachev 	.set_wptr = uvd_v6_0_ring_set_wptr,
1537b843c749SSergey Zigachev 	.parse_cs = amdgpu_uvd_ring_parse_cs,
1538b843c749SSergey Zigachev 	.emit_frame_size =
1539b843c749SSergey Zigachev 		6 + /* hdp invalidate */
1540b843c749SSergey Zigachev 		10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1541b843c749SSergey Zigachev 		14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
1542b843c749SSergey Zigachev 	.emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1543b843c749SSergey Zigachev 	.emit_ib = uvd_v6_0_ring_emit_ib,
1544b843c749SSergey Zigachev 	.emit_fence = uvd_v6_0_ring_emit_fence,
1545b843c749SSergey Zigachev 	.emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1546b843c749SSergey Zigachev 	.test_ring = uvd_v6_0_ring_test_ring,
1547b843c749SSergey Zigachev 	.test_ib = amdgpu_uvd_ring_test_ib,
1548b843c749SSergey Zigachev 	.insert_nop = uvd_v6_0_ring_insert_nop,
1549b843c749SSergey Zigachev 	.pad_ib = amdgpu_ring_generic_pad_ib,
1550b843c749SSergey Zigachev 	.begin_use = amdgpu_uvd_ring_begin_use,
1551b843c749SSergey Zigachev 	.end_use = amdgpu_uvd_ring_end_use,
1552b843c749SSergey Zigachev 	.emit_wreg = uvd_v6_0_ring_emit_wreg,
1553b843c749SSergey Zigachev };
1554b843c749SSergey Zigachev 
1555b843c749SSergey Zigachev static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
1556b843c749SSergey Zigachev 	.type = AMDGPU_RING_TYPE_UVD,
1557b843c749SSergey Zigachev 	.align_mask = 0xf,
1558b843c749SSergey Zigachev 	.support_64bit_ptrs = false,
1559b843c749SSergey Zigachev 	.get_rptr = uvd_v6_0_ring_get_rptr,
1560b843c749SSergey Zigachev 	.get_wptr = uvd_v6_0_ring_get_wptr,
1561b843c749SSergey Zigachev 	.set_wptr = uvd_v6_0_ring_set_wptr,
1562b843c749SSergey Zigachev 	.emit_frame_size =
1563b843c749SSergey Zigachev 		6 + /* hdp invalidate */
1564b843c749SSergey Zigachev 		10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1565b843c749SSergey Zigachev 		VI_FLUSH_GPU_TLB_NUM_WREG * 6 + 8 + /* uvd_v6_0_ring_emit_vm_flush */
1566b843c749SSergey Zigachev 		14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
1567b843c749SSergey Zigachev 	.emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1568b843c749SSergey Zigachev 	.emit_ib = uvd_v6_0_ring_emit_ib,
1569b843c749SSergey Zigachev 	.emit_fence = uvd_v6_0_ring_emit_fence,
1570b843c749SSergey Zigachev 	.emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
1571b843c749SSergey Zigachev 	.emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
1572b843c749SSergey Zigachev 	.emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1573b843c749SSergey Zigachev 	.test_ring = uvd_v6_0_ring_test_ring,
1574b843c749SSergey Zigachev 	.test_ib = amdgpu_uvd_ring_test_ib,
1575b843c749SSergey Zigachev 	.insert_nop = uvd_v6_0_ring_insert_nop,
1576b843c749SSergey Zigachev 	.pad_ib = amdgpu_ring_generic_pad_ib,
1577b843c749SSergey Zigachev 	.begin_use = amdgpu_uvd_ring_begin_use,
1578b843c749SSergey Zigachev 	.end_use = amdgpu_uvd_ring_end_use,
1579b843c749SSergey Zigachev 	.emit_wreg = uvd_v6_0_ring_emit_wreg,
1580b843c749SSergey Zigachev };
1581b843c749SSergey Zigachev 
1582b843c749SSergey Zigachev static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
1583b843c749SSergey Zigachev 	.type = AMDGPU_RING_TYPE_UVD_ENC,
1584b843c749SSergey Zigachev 	.align_mask = 0x3f,
1585b843c749SSergey Zigachev 	.nop = HEVC_ENC_CMD_NO_OP,
1586b843c749SSergey Zigachev 	.support_64bit_ptrs = false,
1587b843c749SSergey Zigachev 	.get_rptr = uvd_v6_0_enc_ring_get_rptr,
1588b843c749SSergey Zigachev 	.get_wptr = uvd_v6_0_enc_ring_get_wptr,
1589b843c749SSergey Zigachev 	.set_wptr = uvd_v6_0_enc_ring_set_wptr,
1590b843c749SSergey Zigachev 	.emit_frame_size =
1591b843c749SSergey Zigachev 		4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */
1592b843c749SSergey Zigachev 		5 + /* uvd_v6_0_enc_ring_emit_vm_flush */
1593b843c749SSergey Zigachev 		5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */
1594b843c749SSergey Zigachev 		1, /* uvd_v6_0_enc_ring_insert_end */
1595b843c749SSergey Zigachev 	.emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */
1596b843c749SSergey Zigachev 	.emit_ib = uvd_v6_0_enc_ring_emit_ib,
1597b843c749SSergey Zigachev 	.emit_fence = uvd_v6_0_enc_ring_emit_fence,
1598b843c749SSergey Zigachev 	.emit_vm_flush = uvd_v6_0_enc_ring_emit_vm_flush,
1599b843c749SSergey Zigachev 	.emit_pipeline_sync = uvd_v6_0_enc_ring_emit_pipeline_sync,
1600b843c749SSergey Zigachev 	.test_ring = uvd_v6_0_enc_ring_test_ring,
1601b843c749SSergey Zigachev 	.test_ib = uvd_v6_0_enc_ring_test_ib,
1602b843c749SSergey Zigachev 	.insert_nop = amdgpu_ring_insert_nop,
1603b843c749SSergey Zigachev 	.insert_end = uvd_v6_0_enc_ring_insert_end,
1604b843c749SSergey Zigachev 	.pad_ib = amdgpu_ring_generic_pad_ib,
1605b843c749SSergey Zigachev 	.begin_use = amdgpu_uvd_ring_begin_use,
1606b843c749SSergey Zigachev 	.end_use = amdgpu_uvd_ring_end_use,
1607b843c749SSergey Zigachev };
1608b843c749SSergey Zigachev 
uvd_v6_0_set_ring_funcs(struct amdgpu_device * adev)1609b843c749SSergey Zigachev static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
1610b843c749SSergey Zigachev {
1611b843c749SSergey Zigachev 	if (adev->asic_type >= CHIP_POLARIS10) {
1612b843c749SSergey Zigachev 		adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_vm_funcs;
1613b843c749SSergey Zigachev 		DRM_INFO("UVD is enabled in VM mode\n");
1614b843c749SSergey Zigachev 	} else {
1615b843c749SSergey Zigachev 		adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_phys_funcs;
1616b843c749SSergey Zigachev 		DRM_INFO("UVD is enabled in physical mode\n");
1617b843c749SSergey Zigachev 	}
1618b843c749SSergey Zigachev }
1619b843c749SSergey Zigachev 
uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device * adev)1620b843c749SSergey Zigachev static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1621b843c749SSergey Zigachev {
1622b843c749SSergey Zigachev 	int i;
1623b843c749SSergey Zigachev 
1624b843c749SSergey Zigachev 	for (i = 0; i < adev->uvd.num_enc_rings; ++i)
1625b843c749SSergey Zigachev 		adev->uvd.inst->ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs;
1626b843c749SSergey Zigachev 
1627b843c749SSergey Zigachev 	DRM_INFO("UVD ENC is enabled in VM mode\n");
1628b843c749SSergey Zigachev }
1629b843c749SSergey Zigachev 
1630b843c749SSergey Zigachev static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
1631b843c749SSergey Zigachev 	.set = uvd_v6_0_set_interrupt_state,
1632b843c749SSergey Zigachev 	.process = uvd_v6_0_process_interrupt,
1633b843c749SSergey Zigachev };
1634b843c749SSergey Zigachev 
uvd_v6_0_set_irq_funcs(struct amdgpu_device * adev)1635b843c749SSergey Zigachev static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1636b843c749SSergey Zigachev {
1637b843c749SSergey Zigachev 	if (uvd_v6_0_enc_support(adev))
1638b843c749SSergey Zigachev 		adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1;
1639b843c749SSergey Zigachev 	else
1640b843c749SSergey Zigachev 		adev->uvd.inst->irq.num_types = 1;
1641b843c749SSergey Zigachev 
1642b843c749SSergey Zigachev 	adev->uvd.inst->irq.funcs = &uvd_v6_0_irq_funcs;
1643b843c749SSergey Zigachev }
1644b843c749SSergey Zigachev 
1645b843c749SSergey Zigachev const struct amdgpu_ip_block_version uvd_v6_0_ip_block =
1646b843c749SSergey Zigachev {
1647b843c749SSergey Zigachev 		.type = AMD_IP_BLOCK_TYPE_UVD,
1648b843c749SSergey Zigachev 		.major = 6,
1649b843c749SSergey Zigachev 		.minor = 0,
1650b843c749SSergey Zigachev 		.rev = 0,
1651b843c749SSergey Zigachev 		.funcs = &uvd_v6_0_ip_funcs,
1652b843c749SSergey Zigachev };
1653b843c749SSergey Zigachev 
1654b843c749SSergey Zigachev const struct amdgpu_ip_block_version uvd_v6_2_ip_block =
1655b843c749SSergey Zigachev {
1656b843c749SSergey Zigachev 		.type = AMD_IP_BLOCK_TYPE_UVD,
1657b843c749SSergey Zigachev 		.major = 6,
1658b843c749SSergey Zigachev 		.minor = 2,
1659b843c749SSergey Zigachev 		.rev = 0,
1660b843c749SSergey Zigachev 		.funcs = &uvd_v6_0_ip_funcs,
1661b843c749SSergey Zigachev };
1662b843c749SSergey Zigachev 
1663b843c749SSergey Zigachev const struct amdgpu_ip_block_version uvd_v6_3_ip_block =
1664b843c749SSergey Zigachev {
1665b843c749SSergey Zigachev 		.type = AMD_IP_BLOCK_TYPE_UVD,
1666b843c749SSergey Zigachev 		.major = 6,
1667b843c749SSergey Zigachev 		.minor = 3,
1668b843c749SSergey Zigachev 		.rev = 0,
1669b843c749SSergey Zigachev 		.funcs = &uvd_v6_0_ip_funcs,
1670b843c749SSergey Zigachev };
1671