xref: /dragonfly/sys/dev/drm/amd/amdgpu/uvd_v7_0.c (revision 78973132)
1b843c749SSergey Zigachev /*
2b843c749SSergey Zigachev  * Copyright 2016 Advanced Micro Devices, Inc.
3b843c749SSergey Zigachev  *
4b843c749SSergey Zigachev  * Permission is hereby granted, free of charge, to any person obtaining a
5b843c749SSergey Zigachev  * copy of this software and associated documentation files (the "Software"),
6b843c749SSergey Zigachev  * to deal in the Software without restriction, including without limitation
7b843c749SSergey Zigachev  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8b843c749SSergey Zigachev  * and/or sell copies of the Software, and to permit persons to whom the
9b843c749SSergey Zigachev  * Software is furnished to do so, subject to the following conditions:
10b843c749SSergey Zigachev  *
11b843c749SSergey Zigachev  * The above copyright notice and this permission notice shall be included in
12b843c749SSergey Zigachev  * all copies or substantial portions of the Software.
13b843c749SSergey Zigachev  *
14b843c749SSergey Zigachev  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15b843c749SSergey Zigachev  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16b843c749SSergey Zigachev  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17b843c749SSergey Zigachev  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18b843c749SSergey Zigachev  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19b843c749SSergey Zigachev  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20b843c749SSergey Zigachev  * OTHER DEALINGS IN THE SOFTWARE.
21b843c749SSergey Zigachev  *
22b843c749SSergey Zigachev  */
23b843c749SSergey Zigachev 
24b843c749SSergey Zigachev #include <linux/firmware.h>
25b843c749SSergey Zigachev #include <drm/drmP.h>
26b843c749SSergey Zigachev #include "amdgpu.h"
27b843c749SSergey Zigachev #include "amdgpu_uvd.h"
28b843c749SSergey Zigachev #include "soc15.h"
29b843c749SSergey Zigachev #include "soc15d.h"
30b843c749SSergey Zigachev #include "soc15_common.h"
31b843c749SSergey Zigachev #include "mmsch_v1_0.h"
32b843c749SSergey Zigachev 
33b843c749SSergey Zigachev #include "uvd/uvd_7_0_offset.h"
34b843c749SSergey Zigachev #include "uvd/uvd_7_0_sh_mask.h"
35b843c749SSergey Zigachev #include "vce/vce_4_0_offset.h"
36b843c749SSergey Zigachev #include "vce/vce_4_0_default.h"
37b843c749SSergey Zigachev #include "vce/vce_4_0_sh_mask.h"
38b843c749SSergey Zigachev #include "nbif/nbif_6_1_offset.h"
39b843c749SSergey Zigachev #include "hdp/hdp_4_0_offset.h"
40b843c749SSergey Zigachev #include "mmhub/mmhub_1_0_offset.h"
41b843c749SSergey Zigachev #include "mmhub/mmhub_1_0_sh_mask.h"
42b843c749SSergey Zigachev #include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
43b843c749SSergey Zigachev 
44b843c749SSergey Zigachev #define mmUVD_PG0_CC_UVD_HARVESTING                                                                    0x00c7
45b843c749SSergey Zigachev #define mmUVD_PG0_CC_UVD_HARVESTING_BASE_IDX                                                           1
46b843c749SSergey Zigachev //UVD_PG0_CC_UVD_HARVESTING
47b843c749SSergey Zigachev #define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE__SHIFT                                                         0x1
48b843c749SSergey Zigachev #define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK                                                           0x00000002L
49b843c749SSergey Zigachev 
50b843c749SSergey Zigachev #define UVD7_MAX_HW_INSTANCES_VEGA20			2
51b843c749SSergey Zigachev 
52b843c749SSergey Zigachev static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
53b843c749SSergey Zigachev static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev);
54b843c749SSergey Zigachev static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev);
55b843c749SSergey Zigachev static int uvd_v7_0_start(struct amdgpu_device *adev);
56b843c749SSergey Zigachev static void uvd_v7_0_stop(struct amdgpu_device *adev);
57b843c749SSergey Zigachev static int uvd_v7_0_sriov_start(struct amdgpu_device *adev);
58b843c749SSergey Zigachev 
59b843c749SSergey Zigachev static int amdgpu_ih_clientid_uvds[] = {
60b843c749SSergey Zigachev 	SOC15_IH_CLIENTID_UVD,
61b843c749SSergey Zigachev 	SOC15_IH_CLIENTID_UVD1
62b843c749SSergey Zigachev };
63b843c749SSergey Zigachev 
64b843c749SSergey Zigachev /**
65b843c749SSergey Zigachev  * uvd_v7_0_ring_get_rptr - get read pointer
66b843c749SSergey Zigachev  *
67b843c749SSergey Zigachev  * @ring: amdgpu_ring pointer
68b843c749SSergey Zigachev  *
69b843c749SSergey Zigachev  * Returns the current hardware read pointer
70b843c749SSergey Zigachev  */
uvd_v7_0_ring_get_rptr(struct amdgpu_ring * ring)71b843c749SSergey Zigachev static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
72b843c749SSergey Zigachev {
73b843c749SSergey Zigachev 	struct amdgpu_device *adev = ring->adev;
74b843c749SSergey Zigachev 
75b843c749SSergey Zigachev 	return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
76b843c749SSergey Zigachev }
77b843c749SSergey Zigachev 
78b843c749SSergey Zigachev /**
79b843c749SSergey Zigachev  * uvd_v7_0_enc_ring_get_rptr - get enc read pointer
80b843c749SSergey Zigachev  *
81b843c749SSergey Zigachev  * @ring: amdgpu_ring pointer
82b843c749SSergey Zigachev  *
83b843c749SSergey Zigachev  * Returns the current hardware enc read pointer
84b843c749SSergey Zigachev  */
uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring * ring)85b843c749SSergey Zigachev static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
86b843c749SSergey Zigachev {
87b843c749SSergey Zigachev 	struct amdgpu_device *adev = ring->adev;
88b843c749SSergey Zigachev 
89b843c749SSergey Zigachev 	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
90b843c749SSergey Zigachev 		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
91b843c749SSergey Zigachev 	else
92b843c749SSergey Zigachev 		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
93b843c749SSergey Zigachev }
94b843c749SSergey Zigachev 
95b843c749SSergey Zigachev /**
96b843c749SSergey Zigachev  * uvd_v7_0_ring_get_wptr - get write pointer
97b843c749SSergey Zigachev  *
98b843c749SSergey Zigachev  * @ring: amdgpu_ring pointer
99b843c749SSergey Zigachev  *
100b843c749SSergey Zigachev  * Returns the current hardware write pointer
101b843c749SSergey Zigachev  */
uvd_v7_0_ring_get_wptr(struct amdgpu_ring * ring)102b843c749SSergey Zigachev static uint64_t uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
103b843c749SSergey Zigachev {
104b843c749SSergey Zigachev 	struct amdgpu_device *adev = ring->adev;
105b843c749SSergey Zigachev 
106b843c749SSergey Zigachev 	return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
107b843c749SSergey Zigachev }
108b843c749SSergey Zigachev 
109b843c749SSergey Zigachev /**
110b843c749SSergey Zigachev  * uvd_v7_0_enc_ring_get_wptr - get enc write pointer
111b843c749SSergey Zigachev  *
112b843c749SSergey Zigachev  * @ring: amdgpu_ring pointer
113b843c749SSergey Zigachev  *
114b843c749SSergey Zigachev  * Returns the current hardware enc write pointer
115b843c749SSergey Zigachev  */
uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring * ring)116b843c749SSergey Zigachev static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
117b843c749SSergey Zigachev {
118b843c749SSergey Zigachev 	struct amdgpu_device *adev = ring->adev;
119b843c749SSergey Zigachev 
120b843c749SSergey Zigachev 	if (ring->use_doorbell)
121b843c749SSergey Zigachev 		return adev->wb.wb[ring->wptr_offs];
122b843c749SSergey Zigachev 
123b843c749SSergey Zigachev 	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
124b843c749SSergey Zigachev 		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
125b843c749SSergey Zigachev 	else
126b843c749SSergey Zigachev 		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
127b843c749SSergey Zigachev }
128b843c749SSergey Zigachev 
129b843c749SSergey Zigachev /**
130b843c749SSergey Zigachev  * uvd_v7_0_ring_set_wptr - set write pointer
131b843c749SSergey Zigachev  *
132b843c749SSergey Zigachev  * @ring: amdgpu_ring pointer
133b843c749SSergey Zigachev  *
134b843c749SSergey Zigachev  * Commits the write pointer to the hardware
135b843c749SSergey Zigachev  */
uvd_v7_0_ring_set_wptr(struct amdgpu_ring * ring)136b843c749SSergey Zigachev static void uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
137b843c749SSergey Zigachev {
138b843c749SSergey Zigachev 	struct amdgpu_device *adev = ring->adev;
139b843c749SSergey Zigachev 
140b843c749SSergey Zigachev 	WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
141b843c749SSergey Zigachev }
142b843c749SSergey Zigachev 
143b843c749SSergey Zigachev /**
144b843c749SSergey Zigachev  * uvd_v7_0_enc_ring_set_wptr - set enc write pointer
145b843c749SSergey Zigachev  *
146b843c749SSergey Zigachev  * @ring: amdgpu_ring pointer
147b843c749SSergey Zigachev  *
148b843c749SSergey Zigachev  * Commits the enc write pointer to the hardware
149b843c749SSergey Zigachev  */
uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring * ring)150b843c749SSergey Zigachev static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
151b843c749SSergey Zigachev {
152b843c749SSergey Zigachev 	struct amdgpu_device *adev = ring->adev;
153b843c749SSergey Zigachev 
154b843c749SSergey Zigachev 	if (ring->use_doorbell) {
155b843c749SSergey Zigachev 		/* XXX check if swapping is necessary on BE */
156b843c749SSergey Zigachev 		adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
157b843c749SSergey Zigachev 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
158b843c749SSergey Zigachev 		return;
159b843c749SSergey Zigachev 	}
160b843c749SSergey Zigachev 
161b843c749SSergey Zigachev 	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
162b843c749SSergey Zigachev 		WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR,
163b843c749SSergey Zigachev 			lower_32_bits(ring->wptr));
164b843c749SSergey Zigachev 	else
165b843c749SSergey Zigachev 		WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2,
166b843c749SSergey Zigachev 			lower_32_bits(ring->wptr));
167b843c749SSergey Zigachev }
168b843c749SSergey Zigachev 
169b843c749SSergey Zigachev /**
170b843c749SSergey Zigachev  * uvd_v7_0_enc_ring_test_ring - test if UVD ENC ring is working
171b843c749SSergey Zigachev  *
172b843c749SSergey Zigachev  * @ring: the engine to test on
173b843c749SSergey Zigachev  *
174b843c749SSergey Zigachev  */
uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring * ring)175b843c749SSergey Zigachev static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
176b843c749SSergey Zigachev {
177b843c749SSergey Zigachev 	struct amdgpu_device *adev = ring->adev;
178b843c749SSergey Zigachev 	uint32_t rptr;
179b843c749SSergey Zigachev 	unsigned i;
180b843c749SSergey Zigachev 	int r;
181b843c749SSergey Zigachev 
182b843c749SSergey Zigachev 	if (amdgpu_sriov_vf(adev))
183b843c749SSergey Zigachev 		return 0;
184b843c749SSergey Zigachev 
185b843c749SSergey Zigachev 	r = amdgpu_ring_alloc(ring, 16);
186b843c749SSergey Zigachev 	if (r) {
187b843c749SSergey Zigachev 		DRM_ERROR("amdgpu: uvd enc failed to lock (%d)ring %d (%d).\n",
188b843c749SSergey Zigachev 			  ring->me, ring->idx, r);
189b843c749SSergey Zigachev 		return r;
190b843c749SSergey Zigachev 	}
191b843c749SSergey Zigachev 
192b843c749SSergey Zigachev 	rptr = amdgpu_ring_get_rptr(ring);
193b843c749SSergey Zigachev 
194b843c749SSergey Zigachev 	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
195b843c749SSergey Zigachev 	amdgpu_ring_commit(ring);
196b843c749SSergey Zigachev 
197b843c749SSergey Zigachev 	for (i = 0; i < adev->usec_timeout; i++) {
198b843c749SSergey Zigachev 		if (amdgpu_ring_get_rptr(ring) != rptr)
199b843c749SSergey Zigachev 			break;
200b843c749SSergey Zigachev 		DRM_UDELAY(1);
201b843c749SSergey Zigachev 	}
202b843c749SSergey Zigachev 
203b843c749SSergey Zigachev 	if (i < adev->usec_timeout) {
204b843c749SSergey Zigachev 		DRM_DEBUG("(%d)ring test on %d succeeded in %d usecs\n",
205b843c749SSergey Zigachev 			 ring->me, ring->idx, i);
206b843c749SSergey Zigachev 	} else {
207b843c749SSergey Zigachev 		DRM_ERROR("amdgpu: (%d)ring %d test failed\n",
208b843c749SSergey Zigachev 			  ring->me, ring->idx);
209b843c749SSergey Zigachev 		r = -ETIMEDOUT;
210b843c749SSergey Zigachev 	}
211b843c749SSergey Zigachev 
212b843c749SSergey Zigachev 	return r;
213b843c749SSergey Zigachev }
214b843c749SSergey Zigachev 
215b843c749SSergey Zigachev /**
216b843c749SSergey Zigachev  * uvd_v7_0_enc_get_create_msg - generate a UVD ENC create msg
217b843c749SSergey Zigachev  *
218b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
219b843c749SSergey Zigachev  * @ring: ring we should submit the msg to
220b843c749SSergey Zigachev  * @handle: session handle to use
221b843c749SSergey Zigachev  * @fence: optional fence to return
222b843c749SSergey Zigachev  *
223b843c749SSergey Zigachev  * Open up a stream for HW test
224b843c749SSergey Zigachev  */
uvd_v7_0_enc_get_create_msg(struct amdgpu_ring * ring,uint32_t handle,struct dma_fence ** fence)225b843c749SSergey Zigachev static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
226b843c749SSergey Zigachev 				       struct dma_fence **fence)
227b843c749SSergey Zigachev {
228b843c749SSergey Zigachev 	const unsigned ib_size_dw = 16;
229b843c749SSergey Zigachev 	struct amdgpu_job *job;
230b843c749SSergey Zigachev 	struct amdgpu_ib *ib;
231b843c749SSergey Zigachev 	struct dma_fence *f = NULL;
232b843c749SSergey Zigachev 	uint64_t dummy;
233b843c749SSergey Zigachev 	int i, r;
234b843c749SSergey Zigachev 
235b843c749SSergey Zigachev 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
236b843c749SSergey Zigachev 	if (r)
237b843c749SSergey Zigachev 		return r;
238b843c749SSergey Zigachev 
239b843c749SSergey Zigachev 	ib = &job->ibs[0];
240b843c749SSergey Zigachev 	dummy = ib->gpu_addr + 1024;
241b843c749SSergey Zigachev 
242b843c749SSergey Zigachev 	ib->length_dw = 0;
243b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000018;
244b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
245b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = handle;
246b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000000;
247b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
248b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = dummy;
249b843c749SSergey Zigachev 
250b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000014;
251b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
252b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x0000001c;
253b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000000;
254b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000000;
255b843c749SSergey Zigachev 
256b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000008;
257b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
258b843c749SSergey Zigachev 
259b843c749SSergey Zigachev 	for (i = ib->length_dw; i < ib_size_dw; ++i)
260b843c749SSergey Zigachev 		ib->ptr[i] = 0x0;
261b843c749SSergey Zigachev 
262b843c749SSergey Zigachev 	r = amdgpu_job_submit_direct(job, ring, &f);
263b843c749SSergey Zigachev 	if (r)
264b843c749SSergey Zigachev 		goto err;
265b843c749SSergey Zigachev 
266b843c749SSergey Zigachev 	if (fence)
267b843c749SSergey Zigachev 		*fence = dma_fence_get(f);
268b843c749SSergey Zigachev 	dma_fence_put(f);
269b843c749SSergey Zigachev 	return 0;
270b843c749SSergey Zigachev 
271b843c749SSergey Zigachev err:
272b843c749SSergey Zigachev 	amdgpu_job_free(job);
273b843c749SSergey Zigachev 	return r;
274b843c749SSergey Zigachev }
275b843c749SSergey Zigachev 
276b843c749SSergey Zigachev /**
277b843c749SSergey Zigachev  * uvd_v7_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
278b843c749SSergey Zigachev  *
279b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
280b843c749SSergey Zigachev  * @ring: ring we should submit the msg to
281b843c749SSergey Zigachev  * @handle: session handle to use
282b843c749SSergey Zigachev  * @fence: optional fence to return
283b843c749SSergey Zigachev  *
284b843c749SSergey Zigachev  * Close up a stream for HW test or if userspace failed to do so
285b843c749SSergey Zigachev  */
286b843c749SSergey Zigachev int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
287*78973132SSergey Zigachev 				 bool direct, struct dma_fence **fence);
uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring * ring,uint32_t handle,bool direct,struct dma_fence ** fence)288*78973132SSergey Zigachev int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
289b843c749SSergey Zigachev 				 bool direct, struct dma_fence **fence)
290b843c749SSergey Zigachev {
291b843c749SSergey Zigachev 	const unsigned ib_size_dw = 16;
292b843c749SSergey Zigachev 	struct amdgpu_job *job;
293b843c749SSergey Zigachev 	struct amdgpu_ib *ib;
294b843c749SSergey Zigachev 	struct dma_fence *f = NULL;
295b843c749SSergey Zigachev 	uint64_t dummy;
296b843c749SSergey Zigachev 	int i, r;
297b843c749SSergey Zigachev 
298b843c749SSergey Zigachev 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
299b843c749SSergey Zigachev 	if (r)
300b843c749SSergey Zigachev 		return r;
301b843c749SSergey Zigachev 
302b843c749SSergey Zigachev 	ib = &job->ibs[0];
303b843c749SSergey Zigachev 	dummy = ib->gpu_addr + 1024;
304b843c749SSergey Zigachev 
305b843c749SSergey Zigachev 	ib->length_dw = 0;
306b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000018;
307b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000001;
308b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = handle;
309b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000000;
310b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
311b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = dummy;
312b843c749SSergey Zigachev 
313b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000014;
314b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000002;
315b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x0000001c;
316b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000000;
317b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000000;
318b843c749SSergey Zigachev 
319b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x00000008;
320b843c749SSergey Zigachev 	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
321b843c749SSergey Zigachev 
322b843c749SSergey Zigachev 	for (i = ib->length_dw; i < ib_size_dw; ++i)
323b843c749SSergey Zigachev 		ib->ptr[i] = 0x0;
324b843c749SSergey Zigachev 
325b843c749SSergey Zigachev 	if (direct)
326b843c749SSergey Zigachev 		r = amdgpu_job_submit_direct(job, ring, &f);
327b843c749SSergey Zigachev 	else
328b843c749SSergey Zigachev 		r = amdgpu_job_submit(job, &ring->adev->vce.entity,
329b843c749SSergey Zigachev 				      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
330b843c749SSergey Zigachev 	if (r)
331b843c749SSergey Zigachev 		goto err;
332b843c749SSergey Zigachev 
333b843c749SSergey Zigachev 	if (fence)
334b843c749SSergey Zigachev 		*fence = dma_fence_get(f);
335b843c749SSergey Zigachev 	dma_fence_put(f);
336b843c749SSergey Zigachev 	return 0;
337b843c749SSergey Zigachev 
338b843c749SSergey Zigachev err:
339b843c749SSergey Zigachev 	amdgpu_job_free(job);
340b843c749SSergey Zigachev 	return r;
341b843c749SSergey Zigachev }
342b843c749SSergey Zigachev 
343b843c749SSergey Zigachev /**
344b843c749SSergey Zigachev  * uvd_v7_0_enc_ring_test_ib - test if UVD ENC IBs are working
345b843c749SSergey Zigachev  *
346b843c749SSergey Zigachev  * @ring: the engine to test on
347b843c749SSergey Zigachev  *
348b843c749SSergey Zigachev  */
uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring * ring,long timeout)349b843c749SSergey Zigachev static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
350b843c749SSergey Zigachev {
351b843c749SSergey Zigachev 	struct dma_fence *fence = NULL;
352b843c749SSergey Zigachev 	long r;
353b843c749SSergey Zigachev 
354b843c749SSergey Zigachev 	r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL);
355b843c749SSergey Zigachev 	if (r) {
356b843c749SSergey Zigachev 		DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ring->me, r);
357b843c749SSergey Zigachev 		goto error;
358b843c749SSergey Zigachev 	}
359b843c749SSergey Zigachev 
360b843c749SSergey Zigachev 	r = uvd_v7_0_enc_get_destroy_msg(ring, 1, true, &fence);
361b843c749SSergey Zigachev 	if (r) {
362b843c749SSergey Zigachev 		DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ring->me, r);
363b843c749SSergey Zigachev 		goto error;
364b843c749SSergey Zigachev 	}
365b843c749SSergey Zigachev 
366b843c749SSergey Zigachev 	r = dma_fence_wait_timeout(fence, false, timeout);
367b843c749SSergey Zigachev 	if (r == 0) {
368b843c749SSergey Zigachev 		DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ring->me);
369b843c749SSergey Zigachev 		r = -ETIMEDOUT;
370b843c749SSergey Zigachev 	} else if (r < 0) {
371b843c749SSergey Zigachev 		DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ring->me, r);
372b843c749SSergey Zigachev 	} else {
373b843c749SSergey Zigachev 		DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ring->me, ring->idx);
374b843c749SSergey Zigachev 		r = 0;
375b843c749SSergey Zigachev 	}
376b843c749SSergey Zigachev error:
377b843c749SSergey Zigachev 	dma_fence_put(fence);
378b843c749SSergey Zigachev 	return r;
379b843c749SSergey Zigachev }
380b843c749SSergey Zigachev 
uvd_v7_0_early_init(void * handle)381b843c749SSergey Zigachev static int uvd_v7_0_early_init(void *handle)
382b843c749SSergey Zigachev {
383b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
384b843c749SSergey Zigachev 
385b843c749SSergey Zigachev 	if (adev->asic_type == CHIP_VEGA20) {
386b843c749SSergey Zigachev 		u32 harvest;
387b843c749SSergey Zigachev 		int i;
388b843c749SSergey Zigachev 
389b843c749SSergey Zigachev 		adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
390b843c749SSergey Zigachev 		for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
391b843c749SSergey Zigachev 			harvest = RREG32_SOC15(UVD, i, mmUVD_PG0_CC_UVD_HARVESTING);
392b843c749SSergey Zigachev 			if (harvest & UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK) {
393b843c749SSergey Zigachev 				adev->uvd.harvest_config |= 1 << i;
394b843c749SSergey Zigachev 			}
395b843c749SSergey Zigachev 		}
396b843c749SSergey Zigachev 		if (adev->uvd.harvest_config == (AMDGPU_UVD_HARVEST_UVD0 |
397b843c749SSergey Zigachev 						 AMDGPU_UVD_HARVEST_UVD1))
398b843c749SSergey Zigachev 			/* both instances are harvested, disable the block */
399b843c749SSergey Zigachev 			return -ENOENT;
400b843c749SSergey Zigachev 	} else {
401b843c749SSergey Zigachev 		adev->uvd.num_uvd_inst = 1;
402b843c749SSergey Zigachev 	}
403b843c749SSergey Zigachev 
404b843c749SSergey Zigachev 	if (amdgpu_sriov_vf(adev))
405b843c749SSergey Zigachev 		adev->uvd.num_enc_rings = 1;
406b843c749SSergey Zigachev 	else
407b843c749SSergey Zigachev 		adev->uvd.num_enc_rings = 2;
408b843c749SSergey Zigachev 	uvd_v7_0_set_ring_funcs(adev);
409b843c749SSergey Zigachev 	uvd_v7_0_set_enc_ring_funcs(adev);
410b843c749SSergey Zigachev 	uvd_v7_0_set_irq_funcs(adev);
411b843c749SSergey Zigachev 
412b843c749SSergey Zigachev 	return 0;
413b843c749SSergey Zigachev }
414b843c749SSergey Zigachev 
uvd_v7_0_sw_init(void * handle)415b843c749SSergey Zigachev static int uvd_v7_0_sw_init(void *handle)
416b843c749SSergey Zigachev {
417b843c749SSergey Zigachev 	struct amdgpu_ring *ring;
418b843c749SSergey Zigachev 
419b843c749SSergey Zigachev 	int i, j, r;
420b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
421b843c749SSergey Zigachev 
422b843c749SSergey Zigachev 	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
423b843c749SSergey Zigachev 		if (adev->uvd.harvest_config & (1 << j))
424b843c749SSergey Zigachev 			continue;
425b843c749SSergey Zigachev 		/* UVD TRAP */
426b843c749SSergey Zigachev 		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
427b843c749SSergey Zigachev 		if (r)
428b843c749SSergey Zigachev 			return r;
429b843c749SSergey Zigachev 
430b843c749SSergey Zigachev 		/* UVD ENC TRAP */
431b843c749SSergey Zigachev 		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
432b843c749SSergey Zigachev 			r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + UVD_7_0__SRCID__UVD_ENC_GEN_PURP, &adev->uvd.inst[j].irq);
433b843c749SSergey Zigachev 			if (r)
434b843c749SSergey Zigachev 				return r;
435b843c749SSergey Zigachev 		}
436b843c749SSergey Zigachev 	}
437b843c749SSergey Zigachev 
438b843c749SSergey Zigachev 	r = amdgpu_uvd_sw_init(adev);
439b843c749SSergey Zigachev 	if (r)
440b843c749SSergey Zigachev 		return r;
441b843c749SSergey Zigachev 
442b843c749SSergey Zigachev 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
443b843c749SSergey Zigachev 		const struct common_firmware_header *hdr;
444b843c749SSergey Zigachev 		hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
445b843c749SSergey Zigachev 		adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].ucode_id = AMDGPU_UCODE_ID_UVD;
446b843c749SSergey Zigachev 		adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].fw = adev->uvd.fw;
447b843c749SSergey Zigachev 		adev->firmware.fw_size +=
448b843c749SSergey Zigachev 			ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
449b843c749SSergey Zigachev 		DRM_INFO("PSP loading UVD firmware\n");
450b843c749SSergey Zigachev 	}
451b843c749SSergey Zigachev 
452b843c749SSergey Zigachev 	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
453b843c749SSergey Zigachev 		if (adev->uvd.harvest_config & (1 << j))
454b843c749SSergey Zigachev 			continue;
455b843c749SSergey Zigachev 		if (!amdgpu_sriov_vf(adev)) {
456b843c749SSergey Zigachev 			ring = &adev->uvd.inst[j].ring;
457*78973132SSergey Zigachev 			ksprintf(ring->name, "uvd<%d>", j);
458b843c749SSergey Zigachev 			r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
459b843c749SSergey Zigachev 			if (r)
460b843c749SSergey Zigachev 				return r;
461b843c749SSergey Zigachev 		}
462b843c749SSergey Zigachev 
463b843c749SSergey Zigachev 		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
464b843c749SSergey Zigachev 			ring = &adev->uvd.inst[j].ring_enc[i];
465*78973132SSergey Zigachev 			ksprintf(ring->name, "uvd_enc%d<%d>", i, j);
466b843c749SSergey Zigachev 			if (amdgpu_sriov_vf(adev)) {
467b843c749SSergey Zigachev 				ring->use_doorbell = true;
468b843c749SSergey Zigachev 
469b843c749SSergey Zigachev 				/* currently only use the first enconding ring for
470b843c749SSergey Zigachev 				 * sriov, so set unused location for other unused rings.
471b843c749SSergey Zigachev 				 */
472b843c749SSergey Zigachev 				if (i == 0)
473b843c749SSergey Zigachev 					ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2;
474b843c749SSergey Zigachev 				else
475b843c749SSergey Zigachev 					ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1;
476b843c749SSergey Zigachev 			}
477b843c749SSergey Zigachev 			r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
478b843c749SSergey Zigachev 			if (r)
479b843c749SSergey Zigachev 				return r;
480b843c749SSergey Zigachev 		}
481b843c749SSergey Zigachev 	}
482b843c749SSergey Zigachev 
483b843c749SSergey Zigachev 	r = amdgpu_uvd_resume(adev);
484b843c749SSergey Zigachev 	if (r)
485b843c749SSergey Zigachev 		return r;
486b843c749SSergey Zigachev 
487b843c749SSergey Zigachev 	r = amdgpu_uvd_entity_init(adev);
488b843c749SSergey Zigachev 	if (r)
489b843c749SSergey Zigachev 		return r;
490b843c749SSergey Zigachev 
491b843c749SSergey Zigachev 	r = amdgpu_virt_alloc_mm_table(adev);
492b843c749SSergey Zigachev 	if (r)
493b843c749SSergey Zigachev 		return r;
494b843c749SSergey Zigachev 
495b843c749SSergey Zigachev 	return r;
496b843c749SSergey Zigachev }
497b843c749SSergey Zigachev 
uvd_v7_0_sw_fini(void * handle)498b843c749SSergey Zigachev static int uvd_v7_0_sw_fini(void *handle)
499b843c749SSergey Zigachev {
500b843c749SSergey Zigachev 	int i, j, r;
501b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
502b843c749SSergey Zigachev 
503b843c749SSergey Zigachev 	amdgpu_virt_free_mm_table(adev);
504b843c749SSergey Zigachev 
505b843c749SSergey Zigachev 	r = amdgpu_uvd_suspend(adev);
506b843c749SSergey Zigachev 	if (r)
507b843c749SSergey Zigachev 		return r;
508b843c749SSergey Zigachev 
509b843c749SSergey Zigachev 	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
510b843c749SSergey Zigachev 		if (adev->uvd.harvest_config & (1 << j))
511b843c749SSergey Zigachev 			continue;
512b843c749SSergey Zigachev 		for (i = 0; i < adev->uvd.num_enc_rings; ++i)
513b843c749SSergey Zigachev 			amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
514b843c749SSergey Zigachev 	}
515b843c749SSergey Zigachev 	return amdgpu_uvd_sw_fini(adev);
516b843c749SSergey Zigachev }
517b843c749SSergey Zigachev 
518b843c749SSergey Zigachev /**
519b843c749SSergey Zigachev  * uvd_v7_0_hw_init - start and test UVD block
520b843c749SSergey Zigachev  *
521b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
522b843c749SSergey Zigachev  *
523b843c749SSergey Zigachev  * Initialize the hardware, boot up the VCPU and do some testing
524b843c749SSergey Zigachev  */
uvd_v7_0_hw_init(void * handle)525b843c749SSergey Zigachev static int uvd_v7_0_hw_init(void *handle)
526b843c749SSergey Zigachev {
527b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
528b843c749SSergey Zigachev 	struct amdgpu_ring *ring;
529b843c749SSergey Zigachev 	uint32_t tmp;
530b843c749SSergey Zigachev 	int i, j, r;
531b843c749SSergey Zigachev 
532b843c749SSergey Zigachev 	if (amdgpu_sriov_vf(adev))
533b843c749SSergey Zigachev 		r = uvd_v7_0_sriov_start(adev);
534b843c749SSergey Zigachev 	else
535b843c749SSergey Zigachev 		r = uvd_v7_0_start(adev);
536b843c749SSergey Zigachev 	if (r)
537b843c749SSergey Zigachev 		goto done;
538b843c749SSergey Zigachev 
539b843c749SSergey Zigachev 	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
540b843c749SSergey Zigachev 		if (adev->uvd.harvest_config & (1 << j))
541b843c749SSergey Zigachev 			continue;
542b843c749SSergey Zigachev 		ring = &adev->uvd.inst[j].ring;
543b843c749SSergey Zigachev 
544b843c749SSergey Zigachev 		if (!amdgpu_sriov_vf(adev)) {
545b843c749SSergey Zigachev 			ring->ready = true;
546b843c749SSergey Zigachev 			r = amdgpu_ring_test_ring(ring);
547b843c749SSergey Zigachev 			if (r) {
548b843c749SSergey Zigachev 				ring->ready = false;
549b843c749SSergey Zigachev 				goto done;
550b843c749SSergey Zigachev 			}
551b843c749SSergey Zigachev 
552b843c749SSergey Zigachev 			r = amdgpu_ring_alloc(ring, 10);
553b843c749SSergey Zigachev 			if (r) {
554b843c749SSergey Zigachev 				DRM_ERROR("amdgpu: (%d)ring failed to lock UVD ring (%d).\n", j, r);
555b843c749SSergey Zigachev 				goto done;
556b843c749SSergey Zigachev 			}
557b843c749SSergey Zigachev 
558b843c749SSergey Zigachev 			tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
559b843c749SSergey Zigachev 				mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0);
560b843c749SSergey Zigachev 			amdgpu_ring_write(ring, tmp);
561b843c749SSergey Zigachev 			amdgpu_ring_write(ring, 0xFFFFF);
562b843c749SSergey Zigachev 
563b843c749SSergey Zigachev 			tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
564b843c749SSergey Zigachev 				mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0);
565b843c749SSergey Zigachev 			amdgpu_ring_write(ring, tmp);
566b843c749SSergey Zigachev 			amdgpu_ring_write(ring, 0xFFFFF);
567b843c749SSergey Zigachev 
568b843c749SSergey Zigachev 			tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
569b843c749SSergey Zigachev 				mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0);
570b843c749SSergey Zigachev 			amdgpu_ring_write(ring, tmp);
571b843c749SSergey Zigachev 			amdgpu_ring_write(ring, 0xFFFFF);
572b843c749SSergey Zigachev 
573b843c749SSergey Zigachev 			/* Clear timeout status bits */
574b843c749SSergey Zigachev 			amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
575b843c749SSergey Zigachev 				mmUVD_SEMA_TIMEOUT_STATUS), 0));
576b843c749SSergey Zigachev 			amdgpu_ring_write(ring, 0x8);
577b843c749SSergey Zigachev 
578b843c749SSergey Zigachev 			amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
579b843c749SSergey Zigachev 				mmUVD_SEMA_CNTL), 0));
580b843c749SSergey Zigachev 			amdgpu_ring_write(ring, 3);
581b843c749SSergey Zigachev 
582b843c749SSergey Zigachev 			amdgpu_ring_commit(ring);
583b843c749SSergey Zigachev 		}
584b843c749SSergey Zigachev 
585b843c749SSergey Zigachev 		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
586b843c749SSergey Zigachev 			ring = &adev->uvd.inst[j].ring_enc[i];
587b843c749SSergey Zigachev 			ring->ready = true;
588b843c749SSergey Zigachev 			r = amdgpu_ring_test_ring(ring);
589b843c749SSergey Zigachev 			if (r) {
590b843c749SSergey Zigachev 				ring->ready = false;
591b843c749SSergey Zigachev 				goto done;
592b843c749SSergey Zigachev 			}
593b843c749SSergey Zigachev 		}
594b843c749SSergey Zigachev 	}
595b843c749SSergey Zigachev done:
596b843c749SSergey Zigachev 	if (!r)
597b843c749SSergey Zigachev 		DRM_INFO("UVD and UVD ENC initialized successfully.\n");
598b843c749SSergey Zigachev 
599b843c749SSergey Zigachev 	return r;
600b843c749SSergey Zigachev }
601b843c749SSergey Zigachev 
602b843c749SSergey Zigachev /**
603b843c749SSergey Zigachev  * uvd_v7_0_hw_fini - stop the hardware block
604b843c749SSergey Zigachev  *
605b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
606b843c749SSergey Zigachev  *
607b843c749SSergey Zigachev  * Stop the UVD block, mark ring as not ready any more
608b843c749SSergey Zigachev  */
uvd_v7_0_hw_fini(void * handle)609b843c749SSergey Zigachev static int uvd_v7_0_hw_fini(void *handle)
610b843c749SSergey Zigachev {
611b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
612b843c749SSergey Zigachev 	int i;
613b843c749SSergey Zigachev 
614b843c749SSergey Zigachev 	if (!amdgpu_sriov_vf(adev))
615b843c749SSergey Zigachev 		uvd_v7_0_stop(adev);
616b843c749SSergey Zigachev 	else {
617b843c749SSergey Zigachev 		/* full access mode, so don't touch any UVD register */
618b843c749SSergey Zigachev 		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
619b843c749SSergey Zigachev 	}
620b843c749SSergey Zigachev 
621b843c749SSergey Zigachev 	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
622b843c749SSergey Zigachev 		if (adev->uvd.harvest_config & (1 << i))
623b843c749SSergey Zigachev 			continue;
624b843c749SSergey Zigachev 		adev->uvd.inst[i].ring.ready = false;
625b843c749SSergey Zigachev 	}
626b843c749SSergey Zigachev 
627b843c749SSergey Zigachev 	return 0;
628b843c749SSergey Zigachev }
629b843c749SSergey Zigachev 
uvd_v7_0_suspend(void * handle)630b843c749SSergey Zigachev static int uvd_v7_0_suspend(void *handle)
631b843c749SSergey Zigachev {
632b843c749SSergey Zigachev 	int r;
633b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
634b843c749SSergey Zigachev 
635b843c749SSergey Zigachev 	r = uvd_v7_0_hw_fini(adev);
636b843c749SSergey Zigachev 	if (r)
637b843c749SSergey Zigachev 		return r;
638b843c749SSergey Zigachev 
639b843c749SSergey Zigachev 	return amdgpu_uvd_suspend(adev);
640b843c749SSergey Zigachev }
641b843c749SSergey Zigachev 
uvd_v7_0_resume(void * handle)642b843c749SSergey Zigachev static int uvd_v7_0_resume(void *handle)
643b843c749SSergey Zigachev {
644b843c749SSergey Zigachev 	int r;
645b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
646b843c749SSergey Zigachev 
647b843c749SSergey Zigachev 	r = amdgpu_uvd_resume(adev);
648b843c749SSergey Zigachev 	if (r)
649b843c749SSergey Zigachev 		return r;
650b843c749SSergey Zigachev 
651b843c749SSergey Zigachev 	return uvd_v7_0_hw_init(adev);
652b843c749SSergey Zigachev }
653b843c749SSergey Zigachev 
654b843c749SSergey Zigachev /**
655b843c749SSergey Zigachev  * uvd_v7_0_mc_resume - memory controller programming
656b843c749SSergey Zigachev  *
657b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
658b843c749SSergey Zigachev  *
659b843c749SSergey Zigachev  * Let the UVD memory controller know it's offsets
660b843c749SSergey Zigachev  */
uvd_v7_0_mc_resume(struct amdgpu_device * adev)661b843c749SSergey Zigachev static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
662b843c749SSergey Zigachev {
663b843c749SSergey Zigachev 	uint32_t size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
664b843c749SSergey Zigachev 	uint32_t offset;
665b843c749SSergey Zigachev 	int i;
666b843c749SSergey Zigachev 
667b843c749SSergey Zigachev 	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
668b843c749SSergey Zigachev 		if (adev->uvd.harvest_config & (1 << i))
669b843c749SSergey Zigachev 			continue;
670b843c749SSergey Zigachev 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
671b843c749SSergey Zigachev 			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
672b843c749SSergey Zigachev 				lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
673b843c749SSergey Zigachev 			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
674b843c749SSergey Zigachev 				upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
675b843c749SSergey Zigachev 			offset = 0;
676b843c749SSergey Zigachev 		} else {
677b843c749SSergey Zigachev 			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
678b843c749SSergey Zigachev 				lower_32_bits(adev->uvd.inst[i].gpu_addr));
679b843c749SSergey Zigachev 			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
680b843c749SSergey Zigachev 				upper_32_bits(adev->uvd.inst[i].gpu_addr));
681b843c749SSergey Zigachev 			offset = size;
682b843c749SSergey Zigachev 		}
683b843c749SSergey Zigachev 
684b843c749SSergey Zigachev 		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
685b843c749SSergey Zigachev 					AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
686b843c749SSergey Zigachev 		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
687b843c749SSergey Zigachev 
688b843c749SSergey Zigachev 		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
689b843c749SSergey Zigachev 				lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
690b843c749SSergey Zigachev 		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
691b843c749SSergey Zigachev 				upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
692b843c749SSergey Zigachev 		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21));
693b843c749SSergey Zigachev 		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE);
694b843c749SSergey Zigachev 
695b843c749SSergey Zigachev 		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
696b843c749SSergey Zigachev 				lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
697b843c749SSergey Zigachev 		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
698b843c749SSergey Zigachev 				upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
699b843c749SSergey Zigachev 		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21));
700b843c749SSergey Zigachev 		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2,
701b843c749SSergey Zigachev 				AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
702b843c749SSergey Zigachev 
703b843c749SSergey Zigachev 		WREG32_SOC15(UVD, i, mmUVD_UDEC_ADDR_CONFIG,
704b843c749SSergey Zigachev 				adev->gfx.config.gb_addr_config);
705b843c749SSergey Zigachev 		WREG32_SOC15(UVD, i, mmUVD_UDEC_DB_ADDR_CONFIG,
706b843c749SSergey Zigachev 				adev->gfx.config.gb_addr_config);
707b843c749SSergey Zigachev 		WREG32_SOC15(UVD, i, mmUVD_UDEC_DBW_ADDR_CONFIG,
708b843c749SSergey Zigachev 				adev->gfx.config.gb_addr_config);
709b843c749SSergey Zigachev 
710b843c749SSergey Zigachev 		WREG32_SOC15(UVD, i, mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
711b843c749SSergey Zigachev 	}
712b843c749SSergey Zigachev }
713b843c749SSergey Zigachev 
uvd_v7_0_mmsch_start(struct amdgpu_device * adev,struct amdgpu_mm_table * table)714b843c749SSergey Zigachev static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
715b843c749SSergey Zigachev 				struct amdgpu_mm_table *table)
716b843c749SSergey Zigachev {
717b843c749SSergey Zigachev 	uint32_t data = 0, loop;
718b843c749SSergey Zigachev 	uint64_t addr = table->gpu_addr;
719b843c749SSergey Zigachev 	struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
720b843c749SSergey Zigachev 	uint32_t size;
721b843c749SSergey Zigachev 	int i;
722b843c749SSergey Zigachev 
723b843c749SSergey Zigachev 	size = header->header_size + header->vce_table_size + header->uvd_table_size;
724b843c749SSergey Zigachev 
725b843c749SSergey Zigachev 	/* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of memory descriptor location */
726b843c749SSergey Zigachev 	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
727b843c749SSergey Zigachev 	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
728b843c749SSergey Zigachev 
729b843c749SSergey Zigachev 	/* 2, update vmid of descriptor */
730b843c749SSergey Zigachev 	data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID);
731b843c749SSergey Zigachev 	data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK;
732b843c749SSergey Zigachev 	data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); /* use domain0 for MM scheduler */
733b843c749SSergey Zigachev 	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID, data);
734b843c749SSergey Zigachev 
735b843c749SSergey Zigachev 	/* 3, notify mmsch about the size of this descriptor */
736b843c749SSergey Zigachev 	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE, size);
737b843c749SSergey Zigachev 
738b843c749SSergey Zigachev 	/* 4, set resp to zero */
739b843c749SSergey Zigachev 	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
740b843c749SSergey Zigachev 
741b843c749SSergey Zigachev 	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
742b843c749SSergey Zigachev 		if (adev->uvd.harvest_config & (1 << i))
743b843c749SSergey Zigachev 			continue;
744b843c749SSergey Zigachev 		WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0);
745b843c749SSergey Zigachev 		adev->wb.wb[adev->uvd.inst[i].ring_enc[0].wptr_offs] = 0;
746b843c749SSergey Zigachev 		adev->uvd.inst[i].ring_enc[0].wptr = 0;
747b843c749SSergey Zigachev 		adev->uvd.inst[i].ring_enc[0].wptr_old = 0;
748b843c749SSergey Zigachev 	}
749b843c749SSergey Zigachev 	/* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
750b843c749SSergey Zigachev 	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001);
751b843c749SSergey Zigachev 
752b843c749SSergey Zigachev 	data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
753b843c749SSergey Zigachev 	loop = 1000;
754b843c749SSergey Zigachev 	while ((data & 0x10000002) != 0x10000002) {
755b843c749SSergey Zigachev 		udelay(10);
756b843c749SSergey Zigachev 		data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
757b843c749SSergey Zigachev 		loop--;
758b843c749SSergey Zigachev 		if (!loop)
759b843c749SSergey Zigachev 			break;
760b843c749SSergey Zigachev 	}
761b843c749SSergey Zigachev 
762b843c749SSergey Zigachev 	if (!loop) {
763b843c749SSergey Zigachev 		dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
764b843c749SSergey Zigachev 		return -EBUSY;
765b843c749SSergey Zigachev 	}
766b843c749SSergey Zigachev 
767b843c749SSergey Zigachev 	return 0;
768b843c749SSergey Zigachev }
769b843c749SSergey Zigachev 
uvd_v7_0_sriov_start(struct amdgpu_device * adev)770b843c749SSergey Zigachev static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
771b843c749SSergey Zigachev {
772b843c749SSergey Zigachev 	struct amdgpu_ring *ring;
773b843c749SSergey Zigachev 	uint32_t offset, size, tmp;
774b843c749SSergey Zigachev 	uint32_t table_size = 0;
775b843c749SSergey Zigachev 	struct mmsch_v1_0_cmd_direct_write direct_wt = { {0} };
776b843c749SSergey Zigachev 	struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
777b843c749SSergey Zigachev 	struct mmsch_v1_0_cmd_direct_polling direct_poll = { {0} };
778b843c749SSergey Zigachev 	struct mmsch_v1_0_cmd_end end = { {0} };
779b843c749SSergey Zigachev 	uint32_t *init_table = adev->virt.mm_table.cpu_addr;
780b843c749SSergey Zigachev 	struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table;
781b843c749SSergey Zigachev 	uint8_t i = 0;
782b843c749SSergey Zigachev 
783b843c749SSergey Zigachev 	direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
784b843c749SSergey Zigachev 	direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
785b843c749SSergey Zigachev 	direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
786b843c749SSergey Zigachev 	end.cmd_header.command_type = MMSCH_COMMAND__END;
787b843c749SSergey Zigachev 
788b843c749SSergey Zigachev 	if (header->uvd_table_offset == 0 && header->uvd_table_size == 0) {
789b843c749SSergey Zigachev 		header->version = MMSCH_VERSION;
790b843c749SSergey Zigachev 		header->header_size = sizeof(struct mmsch_v1_0_init_header) >> 2;
791b843c749SSergey Zigachev 
792b843c749SSergey Zigachev 		if (header->vce_table_offset == 0 && header->vce_table_size == 0)
793b843c749SSergey Zigachev 			header->uvd_table_offset = header->header_size;
794b843c749SSergey Zigachev 		else
795b843c749SSergey Zigachev 			header->uvd_table_offset = header->vce_table_size + header->vce_table_offset;
796b843c749SSergey Zigachev 
797b843c749SSergey Zigachev 		init_table += header->uvd_table_offset;
798b843c749SSergey Zigachev 
799b843c749SSergey Zigachev 		for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
800b843c749SSergey Zigachev 			if (adev->uvd.harvest_config & (1 << i))
801b843c749SSergey Zigachev 				continue;
802b843c749SSergey Zigachev 			ring = &adev->uvd.inst[i].ring;
803b843c749SSergey Zigachev 			ring->wptr = 0;
804*78973132SSergey Zigachev 			size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->datasize + 4);
805b843c749SSergey Zigachev 
806b843c749SSergey Zigachev 			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
807b843c749SSergey Zigachev 							   0xFFFFFFFF, 0x00000004);
808b843c749SSergey Zigachev 			/* mc resume*/
809b843c749SSergey Zigachev 			if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
810b843c749SSergey Zigachev 				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
811b843c749SSergey Zigachev 							    lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
812b843c749SSergey Zigachev 				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
813b843c749SSergey Zigachev 							    upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
814b843c749SSergey Zigachev 				offset = 0;
815b843c749SSergey Zigachev 			} else {
816b843c749SSergey Zigachev 				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
817b843c749SSergey Zigachev 							    lower_32_bits(adev->uvd.inst[i].gpu_addr));
818b843c749SSergey Zigachev 				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
819b843c749SSergey Zigachev 							    upper_32_bits(adev->uvd.inst[i].gpu_addr));
820b843c749SSergey Zigachev 				offset = size;
821b843c749SSergey Zigachev 			}
822b843c749SSergey Zigachev 
823b843c749SSergey Zigachev 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
824b843c749SSergey Zigachev 						    AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
825b843c749SSergey Zigachev 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size);
826b843c749SSergey Zigachev 
827b843c749SSergey Zigachev 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
828b843c749SSergey Zigachev 						    lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
829b843c749SSergey Zigachev 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
830b843c749SSergey Zigachev 						    upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
831b843c749SSergey Zigachev 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
832b843c749SSergey Zigachev 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
833b843c749SSergey Zigachev 
834b843c749SSergey Zigachev 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
835b843c749SSergey Zigachev 						    lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
836b843c749SSergey Zigachev 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
837b843c749SSergey Zigachev 						    upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
838b843c749SSergey Zigachev 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
839b843c749SSergey Zigachev 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
840b843c749SSergey Zigachev 						    AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
841b843c749SSergey Zigachev 
842b843c749SSergey Zigachev 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
843b843c749SSergey Zigachev 			/* mc resume end*/
844b843c749SSergey Zigachev 
845b843c749SSergey Zigachev 			/* disable clock gating */
846b843c749SSergey Zigachev 			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_CGC_CTRL),
847b843c749SSergey Zigachev 							   ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0);
848b843c749SSergey Zigachev 
849b843c749SSergey Zigachev 			/* disable interupt */
850b843c749SSergey Zigachev 			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
851b843c749SSergey Zigachev 							   ~UVD_MASTINT_EN__VCPU_EN_MASK, 0);
852b843c749SSergey Zigachev 
853b843c749SSergey Zigachev 			/* stall UMC and register bus before resetting VCPU */
854b843c749SSergey Zigachev 			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
855b843c749SSergey Zigachev 							   ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
856b843c749SSergey Zigachev 							   UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
857b843c749SSergey Zigachev 
858b843c749SSergey Zigachev 			/* put LMI, VCPU, RBC etc... into reset */
859b843c749SSergey Zigachev 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
860b843c749SSergey Zigachev 						    (uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
861b843c749SSergey Zigachev 							       UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
862b843c749SSergey Zigachev 							       UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
863b843c749SSergey Zigachev 							       UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
864b843c749SSergey Zigachev 							       UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
865b843c749SSergey Zigachev 							       UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
866b843c749SSergey Zigachev 							       UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
867b843c749SSergey Zigachev 							       UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK));
868b843c749SSergey Zigachev 
869b843c749SSergey Zigachev 			/* initialize UVD memory controller */
870b843c749SSergey Zigachev 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL),
871b843c749SSergey Zigachev 						    (uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
872b843c749SSergey Zigachev 							       UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
873b843c749SSergey Zigachev 							       UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
874b843c749SSergey Zigachev 							       UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
875b843c749SSergey Zigachev 							       UVD_LMI_CTRL__REQ_MODE_MASK |
876b843c749SSergey Zigachev 							       0x00100000L));
877b843c749SSergey Zigachev 
878b843c749SSergey Zigachev 			/* take all subblocks out of reset, except VCPU */
879b843c749SSergey Zigachev 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
880b843c749SSergey Zigachev 						    UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
881b843c749SSergey Zigachev 
882b843c749SSergey Zigachev 			/* enable VCPU clock */
883b843c749SSergey Zigachev 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
884b843c749SSergey Zigachev 						    UVD_VCPU_CNTL__CLK_EN_MASK);
885b843c749SSergey Zigachev 
886b843c749SSergey Zigachev 			/* enable master interrupt */
887b843c749SSergey Zigachev 			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
888b843c749SSergey Zigachev 							   ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
889b843c749SSergey Zigachev 							   (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
890b843c749SSergey Zigachev 
891b843c749SSergey Zigachev 			/* clear the bit 4 of UVD_STATUS */
892b843c749SSergey Zigachev 			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
893b843c749SSergey Zigachev 							   ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0);
894b843c749SSergey Zigachev 
895b843c749SSergey Zigachev 			/* force RBC into idle state */
896b843c749SSergey Zigachev 			size = order_base_2(ring->ring_size);
897b843c749SSergey Zigachev 			tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size);
898b843c749SSergey Zigachev 			tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
899b843c749SSergey Zigachev 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
900b843c749SSergey Zigachev 
901b843c749SSergey Zigachev 			ring = &adev->uvd.inst[i].ring_enc[0];
902b843c749SSergey Zigachev 			ring->wptr = 0;
903b843c749SSergey Zigachev 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), ring->gpu_addr);
904b843c749SSergey Zigachev 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
905b843c749SSergey Zigachev 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), ring->ring_size / 4);
906b843c749SSergey Zigachev 
907b843c749SSergey Zigachev 			/* boot up the VCPU */
908b843c749SSergey Zigachev 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET), 0);
909b843c749SSergey Zigachev 
910b843c749SSergey Zigachev 			/* enable UMC */
911b843c749SSergey Zigachev 			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
912b843c749SSergey Zigachev 											   ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
913b843c749SSergey Zigachev 
914b843c749SSergey Zigachev 			MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0x02, 0x02);
915b843c749SSergey Zigachev 		}
916b843c749SSergey Zigachev 		/* add end packet */
917b843c749SSergey Zigachev 		memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
918b843c749SSergey Zigachev 		table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
919b843c749SSergey Zigachev 		header->uvd_table_size = table_size;
920b843c749SSergey Zigachev 
921b843c749SSergey Zigachev 	}
922b843c749SSergey Zigachev 	return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table);
923b843c749SSergey Zigachev }
924b843c749SSergey Zigachev 
925b843c749SSergey Zigachev /**
926b843c749SSergey Zigachev  * uvd_v7_0_start - start UVD block
927b843c749SSergey Zigachev  *
928b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
929b843c749SSergey Zigachev  *
930b843c749SSergey Zigachev  * Setup and start the UVD block
931b843c749SSergey Zigachev  */
uvd_v7_0_start(struct amdgpu_device * adev)932b843c749SSergey Zigachev static int uvd_v7_0_start(struct amdgpu_device *adev)
933b843c749SSergey Zigachev {
934b843c749SSergey Zigachev 	struct amdgpu_ring *ring;
935b843c749SSergey Zigachev 	uint32_t rb_bufsz, tmp;
936b843c749SSergey Zigachev 	uint32_t lmi_swap_cntl;
937b843c749SSergey Zigachev 	uint32_t mp_swap_cntl;
938b843c749SSergey Zigachev 	int i, j, k, r;
939b843c749SSergey Zigachev 
940b843c749SSergey Zigachev 	for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
941b843c749SSergey Zigachev 		if (adev->uvd.harvest_config & (1 << k))
942b843c749SSergey Zigachev 			continue;
943b843c749SSergey Zigachev 		/* disable DPG */
944b843c749SSergey Zigachev 		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0,
945b843c749SSergey Zigachev 				~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
946b843c749SSergey Zigachev 	}
947b843c749SSergey Zigachev 
948b843c749SSergey Zigachev 	/* disable byte swapping */
949b843c749SSergey Zigachev 	lmi_swap_cntl = 0;
950b843c749SSergey Zigachev 	mp_swap_cntl = 0;
951b843c749SSergey Zigachev 
952b843c749SSergey Zigachev 	uvd_v7_0_mc_resume(adev);
953b843c749SSergey Zigachev 
954b843c749SSergey Zigachev 	for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
955b843c749SSergey Zigachev 		if (adev->uvd.harvest_config & (1 << k))
956b843c749SSergey Zigachev 			continue;
957b843c749SSergey Zigachev 		ring = &adev->uvd.inst[k].ring;
958b843c749SSergey Zigachev 		/* disable clock gating */
959b843c749SSergey Zigachev 		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0,
960b843c749SSergey Zigachev 				~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK);
961b843c749SSergey Zigachev 
962b843c749SSergey Zigachev 		/* disable interupt */
963b843c749SSergey Zigachev 		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN), 0,
964b843c749SSergey Zigachev 				~UVD_MASTINT_EN__VCPU_EN_MASK);
965b843c749SSergey Zigachev 
966b843c749SSergey Zigachev 		/* stall UMC and register bus before resetting VCPU */
967b843c749SSergey Zigachev 		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2),
968b843c749SSergey Zigachev 				UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
969b843c749SSergey Zigachev 				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
970b843c749SSergey Zigachev 		mdelay(1);
971b843c749SSergey Zigachev 
972b843c749SSergey Zigachev 		/* put LMI, VCPU, RBC etc... into reset */
973b843c749SSergey Zigachev 		WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
974b843c749SSergey Zigachev 			UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
975b843c749SSergey Zigachev 			UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
976b843c749SSergey Zigachev 			UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
977b843c749SSergey Zigachev 			UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
978b843c749SSergey Zigachev 			UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
979b843c749SSergey Zigachev 			UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
980b843c749SSergey Zigachev 			UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
981b843c749SSergey Zigachev 			UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
982b843c749SSergey Zigachev 		mdelay(5);
983b843c749SSergey Zigachev 
984b843c749SSergey Zigachev 		/* initialize UVD memory controller */
985b843c749SSergey Zigachev 		WREG32_SOC15(UVD, k, mmUVD_LMI_CTRL,
986b843c749SSergey Zigachev 			(0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
987b843c749SSergey Zigachev 			UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
988b843c749SSergey Zigachev 			UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
989b843c749SSergey Zigachev 			UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
990b843c749SSergey Zigachev 			UVD_LMI_CTRL__REQ_MODE_MASK |
991b843c749SSergey Zigachev 			0x00100000L);
992b843c749SSergey Zigachev 
993b843c749SSergey Zigachev #ifdef __BIG_ENDIAN
994b843c749SSergey Zigachev 		/* swap (8 in 32) RB and IB */
995b843c749SSergey Zigachev 		lmi_swap_cntl = 0xa;
996b843c749SSergey Zigachev 		mp_swap_cntl = 0;
997b843c749SSergey Zigachev #endif
998b843c749SSergey Zigachev 		WREG32_SOC15(UVD, k, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
999b843c749SSergey Zigachev 		WREG32_SOC15(UVD, k, mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
1000b843c749SSergey Zigachev 
1001b843c749SSergey Zigachev 		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA0, 0x40c2040);
1002b843c749SSergey Zigachev 		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA1, 0x0);
1003b843c749SSergey Zigachev 		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB0, 0x40c2040);
1004b843c749SSergey Zigachev 		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB1, 0x0);
1005b843c749SSergey Zigachev 		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_ALU, 0);
1006b843c749SSergey Zigachev 		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUX, 0x88);
1007b843c749SSergey Zigachev 
1008b843c749SSergey Zigachev 		/* take all subblocks out of reset, except VCPU */
1009b843c749SSergey Zigachev 		WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
1010b843c749SSergey Zigachev 				UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1011b843c749SSergey Zigachev 		mdelay(5);
1012b843c749SSergey Zigachev 
1013b843c749SSergey Zigachev 		/* enable VCPU clock */
1014b843c749SSergey Zigachev 		WREG32_SOC15(UVD, k, mmUVD_VCPU_CNTL,
1015b843c749SSergey Zigachev 				UVD_VCPU_CNTL__CLK_EN_MASK);
1016b843c749SSergey Zigachev 
1017b843c749SSergey Zigachev 		/* enable UMC */
1018b843c749SSergey Zigachev 		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2), 0,
1019b843c749SSergey Zigachev 				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1020b843c749SSergey Zigachev 
1021b843c749SSergey Zigachev 		/* boot up the VCPU */
1022b843c749SSergey Zigachev 		WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET, 0);
1023b843c749SSergey Zigachev 		mdelay(10);
1024b843c749SSergey Zigachev 
1025b843c749SSergey Zigachev 		for (i = 0; i < 10; ++i) {
1026b843c749SSergey Zigachev 			uint32_t status;
1027b843c749SSergey Zigachev 
1028b843c749SSergey Zigachev 			for (j = 0; j < 100; ++j) {
1029b843c749SSergey Zigachev 				status = RREG32_SOC15(UVD, k, mmUVD_STATUS);
1030b843c749SSergey Zigachev 				if (status & 2)
1031b843c749SSergey Zigachev 					break;
1032b843c749SSergey Zigachev 				mdelay(10);
1033b843c749SSergey Zigachev 			}
1034b843c749SSergey Zigachev 			r = 0;
1035b843c749SSergey Zigachev 			if (status & 2)
1036b843c749SSergey Zigachev 				break;
1037b843c749SSergey Zigachev 
1038b843c749SSergey Zigachev 			DRM_ERROR("UVD(%d) not responding, trying to reset the VCPU!!!\n", k);
1039b843c749SSergey Zigachev 			WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET),
1040b843c749SSergey Zigachev 					UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1041b843c749SSergey Zigachev 					~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1042b843c749SSergey Zigachev 			mdelay(10);
1043b843c749SSergey Zigachev 			WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET), 0,
1044b843c749SSergey Zigachev 					~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1045b843c749SSergey Zigachev 			mdelay(10);
1046b843c749SSergey Zigachev 			r = -1;
1047b843c749SSergey Zigachev 		}
1048b843c749SSergey Zigachev 
1049b843c749SSergey Zigachev 		if (r) {
1050b843c749SSergey Zigachev 			DRM_ERROR("UVD(%d) not responding, giving up!!!\n", k);
1051b843c749SSergey Zigachev 			return r;
1052b843c749SSergey Zigachev 		}
1053b843c749SSergey Zigachev 		/* enable master interrupt */
1054b843c749SSergey Zigachev 		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN),
1055b843c749SSergey Zigachev 			(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
1056b843c749SSergey Zigachev 			~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
1057b843c749SSergey Zigachev 
1058b843c749SSergey Zigachev 		/* clear the bit 4 of UVD_STATUS */
1059b843c749SSergey Zigachev 		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_STATUS), 0,
1060b843c749SSergey Zigachev 				~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1061b843c749SSergey Zigachev 
1062b843c749SSergey Zigachev 		/* force RBC into idle state */
1063b843c749SSergey Zigachev 		rb_bufsz = order_base_2(ring->ring_size);
1064b843c749SSergey Zigachev 		tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1065b843c749SSergey Zigachev 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1066b843c749SSergey Zigachev 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1067b843c749SSergey Zigachev 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
1068b843c749SSergey Zigachev 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1069b843c749SSergey Zigachev 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1070b843c749SSergey Zigachev 		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_CNTL, tmp);
1071b843c749SSergey Zigachev 
1072b843c749SSergey Zigachev 		/* set the write pointer delay */
1073b843c749SSergey Zigachev 		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR_CNTL, 0);
1074b843c749SSergey Zigachev 
1075b843c749SSergey Zigachev 		/* set the wb address */
1076b843c749SSergey Zigachev 		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR_ADDR,
1077b843c749SSergey Zigachev 				(upper_32_bits(ring->gpu_addr) >> 2));
1078b843c749SSergey Zigachev 
1079b843c749SSergey Zigachev 		/* programm the RB_BASE for ring buffer */
1080b843c749SSergey Zigachev 		WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1081b843c749SSergey Zigachev 				lower_32_bits(ring->gpu_addr));
1082b843c749SSergey Zigachev 		WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1083b843c749SSergey Zigachev 				upper_32_bits(ring->gpu_addr));
1084b843c749SSergey Zigachev 
1085b843c749SSergey Zigachev 		/* Initialize the ring buffer's read and write pointers */
1086b843c749SSergey Zigachev 		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR, 0);
1087b843c749SSergey Zigachev 
1088b843c749SSergey Zigachev 		ring->wptr = RREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR);
1089b843c749SSergey Zigachev 		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR,
1090b843c749SSergey Zigachev 				lower_32_bits(ring->wptr));
1091b843c749SSergey Zigachev 
1092b843c749SSergey Zigachev 		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_RBC_RB_CNTL), 0,
1093b843c749SSergey Zigachev 				~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
1094b843c749SSergey Zigachev 
1095b843c749SSergey Zigachev 		ring = &adev->uvd.inst[k].ring_enc[0];
1096b843c749SSergey Zigachev 		WREG32_SOC15(UVD, k, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1097b843c749SSergey Zigachev 		WREG32_SOC15(UVD, k, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1098b843c749SSergey Zigachev 		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO, ring->gpu_addr);
1099b843c749SSergey Zigachev 		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1100b843c749SSergey Zigachev 		WREG32_SOC15(UVD, k, mmUVD_RB_SIZE, ring->ring_size / 4);
1101b843c749SSergey Zigachev 
1102b843c749SSergey Zigachev 		ring = &adev->uvd.inst[k].ring_enc[1];
1103b843c749SSergey Zigachev 		WREG32_SOC15(UVD, k, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1104b843c749SSergey Zigachev 		WREG32_SOC15(UVD, k, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1105b843c749SSergey Zigachev 		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1106b843c749SSergey Zigachev 		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1107b843c749SSergey Zigachev 		WREG32_SOC15(UVD, k, mmUVD_RB_SIZE2, ring->ring_size / 4);
1108b843c749SSergey Zigachev 	}
1109b843c749SSergey Zigachev 	return 0;
1110b843c749SSergey Zigachev }
1111b843c749SSergey Zigachev 
1112b843c749SSergey Zigachev /**
1113b843c749SSergey Zigachev  * uvd_v7_0_stop - stop UVD block
1114b843c749SSergey Zigachev  *
1115b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1116b843c749SSergey Zigachev  *
1117b843c749SSergey Zigachev  * stop the UVD block
1118b843c749SSergey Zigachev  */
uvd_v7_0_stop(struct amdgpu_device * adev)1119b843c749SSergey Zigachev static void uvd_v7_0_stop(struct amdgpu_device *adev)
1120b843c749SSergey Zigachev {
1121b843c749SSergey Zigachev 	uint8_t i = 0;
1122b843c749SSergey Zigachev 
1123b843c749SSergey Zigachev 	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
1124b843c749SSergey Zigachev 		if (adev->uvd.harvest_config & (1 << i))
1125b843c749SSergey Zigachev 			continue;
1126b843c749SSergey Zigachev 		/* force RBC into idle state */
1127b843c749SSergey Zigachev 		WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101);
1128b843c749SSergey Zigachev 
1129b843c749SSergey Zigachev 		/* Stall UMC and register bus before resetting VCPU */
1130b843c749SSergey Zigachev 		WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
1131b843c749SSergey Zigachev 				UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
1132b843c749SSergey Zigachev 				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1133b843c749SSergey Zigachev 		mdelay(1);
1134b843c749SSergey Zigachev 
1135b843c749SSergey Zigachev 		/* put VCPU into reset */
1136b843c749SSergey Zigachev 		WREG32_SOC15(UVD, i, mmUVD_SOFT_RESET,
1137b843c749SSergey Zigachev 				UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1138b843c749SSergey Zigachev 		mdelay(5);
1139b843c749SSergey Zigachev 
1140b843c749SSergey Zigachev 		/* disable VCPU clock */
1141b843c749SSergey Zigachev 		WREG32_SOC15(UVD, i, mmUVD_VCPU_CNTL, 0x0);
1142b843c749SSergey Zigachev 
1143b843c749SSergey Zigachev 		/* Unstall UMC and register bus */
1144b843c749SSergey Zigachev 		WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
1145b843c749SSergey Zigachev 				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1146b843c749SSergey Zigachev 	}
1147b843c749SSergey Zigachev }
1148b843c749SSergey Zigachev 
1149b843c749SSergey Zigachev /**
1150b843c749SSergey Zigachev  * uvd_v7_0_ring_emit_fence - emit an fence & trap command
1151b843c749SSergey Zigachev  *
1152b843c749SSergey Zigachev  * @ring: amdgpu_ring pointer
1153b843c749SSergey Zigachev  * @fence: fence to emit
1154b843c749SSergey Zigachev  *
1155b843c749SSergey Zigachev  * Write a fence and a trap command to the ring.
1156b843c749SSergey Zigachev  */
uvd_v7_0_ring_emit_fence(struct amdgpu_ring * ring,uint64_t addr,uint64_t seq,unsigned flags)1157*78973132SSergey Zigachev static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, uint64_t addr, uint64_t seq,
1158b843c749SSergey Zigachev 				     unsigned flags)
1159b843c749SSergey Zigachev {
1160b843c749SSergey Zigachev 	struct amdgpu_device *adev = ring->adev;
1161b843c749SSergey Zigachev 
1162b843c749SSergey Zigachev 	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1163b843c749SSergey Zigachev 
1164b843c749SSergey Zigachev 	amdgpu_ring_write(ring,
1165b843c749SSergey Zigachev 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1166b843c749SSergey Zigachev 	amdgpu_ring_write(ring, seq);
1167b843c749SSergey Zigachev 	amdgpu_ring_write(ring,
1168b843c749SSergey Zigachev 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1169b843c749SSergey Zigachev 	amdgpu_ring_write(ring, addr & 0xffffffff);
1170b843c749SSergey Zigachev 	amdgpu_ring_write(ring,
1171b843c749SSergey Zigachev 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1172b843c749SSergey Zigachev 	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1173b843c749SSergey Zigachev 	amdgpu_ring_write(ring,
1174b843c749SSergey Zigachev 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1175b843c749SSergey Zigachev 	amdgpu_ring_write(ring, 0);
1176b843c749SSergey Zigachev 
1177b843c749SSergey Zigachev 	amdgpu_ring_write(ring,
1178b843c749SSergey Zigachev 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1179b843c749SSergey Zigachev 	amdgpu_ring_write(ring, 0);
1180b843c749SSergey Zigachev 	amdgpu_ring_write(ring,
1181b843c749SSergey Zigachev 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1182b843c749SSergey Zigachev 	amdgpu_ring_write(ring, 0);
1183b843c749SSergey Zigachev 	amdgpu_ring_write(ring,
1184b843c749SSergey Zigachev 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1185b843c749SSergey Zigachev 	amdgpu_ring_write(ring, 2);
1186b843c749SSergey Zigachev }
1187b843c749SSergey Zigachev 
1188b843c749SSergey Zigachev /**
1189b843c749SSergey Zigachev  * uvd_v7_0_enc_ring_emit_fence - emit an enc fence & trap command
1190b843c749SSergey Zigachev  *
1191b843c749SSergey Zigachev  * @ring: amdgpu_ring pointer
1192b843c749SSergey Zigachev  * @fence: fence to emit
1193b843c749SSergey Zigachev  *
1194b843c749SSergey Zigachev  * Write enc a fence and a trap command to the ring.
1195b843c749SSergey Zigachev  */
uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring * ring,uint64_t addr,uint64_t seq,unsigned flags)1196*78973132SSergey Zigachev static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, uint64_t addr,
1197*78973132SSergey Zigachev 			uint64_t seq, unsigned flags)
1198b843c749SSergey Zigachev {
1199b843c749SSergey Zigachev 
1200b843c749SSergey Zigachev 	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1201b843c749SSergey Zigachev 
1202b843c749SSergey Zigachev 	amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
1203b843c749SSergey Zigachev 	amdgpu_ring_write(ring, addr);
1204b843c749SSergey Zigachev 	amdgpu_ring_write(ring, upper_32_bits(addr));
1205b843c749SSergey Zigachev 	amdgpu_ring_write(ring, seq);
1206b843c749SSergey Zigachev 	amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
1207b843c749SSergey Zigachev }
1208b843c749SSergey Zigachev 
1209b843c749SSergey Zigachev /**
1210b843c749SSergey Zigachev  * uvd_v7_0_ring_emit_hdp_flush - skip HDP flushing
1211b843c749SSergey Zigachev  *
1212b843c749SSergey Zigachev  * @ring: amdgpu_ring pointer
1213b843c749SSergey Zigachev  */
uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring * ring)1214b843c749SSergey Zigachev static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
1215b843c749SSergey Zigachev {
1216b843c749SSergey Zigachev 	/* The firmware doesn't seem to like touching registers at this point. */
1217b843c749SSergey Zigachev }
1218b843c749SSergey Zigachev 
1219b843c749SSergey Zigachev /**
1220b843c749SSergey Zigachev  * uvd_v7_0_ring_test_ring - register write test
1221b843c749SSergey Zigachev  *
1222b843c749SSergey Zigachev  * @ring: amdgpu_ring pointer
1223b843c749SSergey Zigachev  *
1224b843c749SSergey Zigachev  * Test if we can successfully write to the context register
1225b843c749SSergey Zigachev  */
uvd_v7_0_ring_test_ring(struct amdgpu_ring * ring)1226b843c749SSergey Zigachev static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
1227b843c749SSergey Zigachev {
1228b843c749SSergey Zigachev 	struct amdgpu_device *adev = ring->adev;
1229b843c749SSergey Zigachev 	uint32_t tmp = 0;
1230b843c749SSergey Zigachev 	unsigned i;
1231b843c749SSergey Zigachev 	int r;
1232b843c749SSergey Zigachev 
1233b843c749SSergey Zigachev 	WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
1234b843c749SSergey Zigachev 	r = amdgpu_ring_alloc(ring, 3);
1235b843c749SSergey Zigachev 	if (r) {
1236b843c749SSergey Zigachev 		DRM_ERROR("amdgpu: (%d)cp failed to lock ring %d (%d).\n",
1237b843c749SSergey Zigachev 			  ring->me, ring->idx, r);
1238b843c749SSergey Zigachev 		return r;
1239b843c749SSergey Zigachev 	}
1240b843c749SSergey Zigachev 	amdgpu_ring_write(ring,
1241b843c749SSergey Zigachev 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1242b843c749SSergey Zigachev 	amdgpu_ring_write(ring, 0xDEADBEEF);
1243b843c749SSergey Zigachev 	amdgpu_ring_commit(ring);
1244b843c749SSergey Zigachev 	for (i = 0; i < adev->usec_timeout; i++) {
1245b843c749SSergey Zigachev 		tmp = RREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID);
1246b843c749SSergey Zigachev 		if (tmp == 0xDEADBEEF)
1247b843c749SSergey Zigachev 			break;
1248b843c749SSergey Zigachev 		DRM_UDELAY(1);
1249b843c749SSergey Zigachev 	}
1250b843c749SSergey Zigachev 
1251b843c749SSergey Zigachev 	if (i < adev->usec_timeout) {
1252b843c749SSergey Zigachev 		DRM_DEBUG("(%d)ring test on %d succeeded in %d usecs\n",
1253b843c749SSergey Zigachev 			 ring->me, ring->idx, i);
1254b843c749SSergey Zigachev 	} else {
1255b843c749SSergey Zigachev 		DRM_ERROR("(%d)amdgpu: ring %d test failed (0x%08X)\n",
1256b843c749SSergey Zigachev 			  ring->me, ring->idx, tmp);
1257b843c749SSergey Zigachev 		r = -EINVAL;
1258b843c749SSergey Zigachev 	}
1259b843c749SSergey Zigachev 	return r;
1260b843c749SSergey Zigachev }
1261b843c749SSergey Zigachev 
1262b843c749SSergey Zigachev /**
1263b843c749SSergey Zigachev  * uvd_v7_0_ring_patch_cs_in_place - Patch the IB for command submission.
1264b843c749SSergey Zigachev  *
1265b843c749SSergey Zigachev  * @p: the CS parser with the IBs
1266b843c749SSergey Zigachev  * @ib_idx: which IB to patch
1267b843c749SSergey Zigachev  *
1268b843c749SSergey Zigachev  */
uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser * p,uint32_t ib_idx)1269b843c749SSergey Zigachev static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1270b843c749SSergey Zigachev 					   uint32_t ib_idx)
1271b843c749SSergey Zigachev {
1272b843c749SSergey Zigachev 	struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
1273b843c749SSergey Zigachev 	unsigned i;
1274b843c749SSergey Zigachev 
1275b843c749SSergey Zigachev 	/* No patching necessary for the first instance */
1276b843c749SSergey Zigachev 	if (!p->ring->me)
1277b843c749SSergey Zigachev 		return 0;
1278b843c749SSergey Zigachev 
1279b843c749SSergey Zigachev 	for (i = 0; i < ib->length_dw; i += 2) {
1280b843c749SSergey Zigachev 		uint32_t reg = amdgpu_get_ib_value(p, ib_idx, i);
1281b843c749SSergey Zigachev 
1282b843c749SSergey Zigachev 		reg -= p->adev->reg_offset[UVD_HWIP][0][1];
1283b843c749SSergey Zigachev 		reg += p->adev->reg_offset[UVD_HWIP][1][1];
1284b843c749SSergey Zigachev 
1285b843c749SSergey Zigachev 		amdgpu_set_ib_value(p, ib_idx, i, reg);
1286b843c749SSergey Zigachev 	}
1287b843c749SSergey Zigachev 	return 0;
1288b843c749SSergey Zigachev }
1289b843c749SSergey Zigachev 
1290b843c749SSergey Zigachev /**
1291b843c749SSergey Zigachev  * uvd_v7_0_ring_emit_ib - execute indirect buffer
1292b843c749SSergey Zigachev  *
1293b843c749SSergey Zigachev  * @ring: amdgpu_ring pointer
1294b843c749SSergey Zigachev  * @ib: indirect buffer to execute
1295b843c749SSergey Zigachev  *
1296b843c749SSergey Zigachev  * Write ring commands to execute the indirect buffer
1297b843c749SSergey Zigachev  */
uvd_v7_0_ring_emit_ib(struct amdgpu_ring * ring,struct amdgpu_ib * ib,unsigned vmid,bool ctx_switch)1298b843c749SSergey Zigachev static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
1299b843c749SSergey Zigachev 				  struct amdgpu_ib *ib,
1300b843c749SSergey Zigachev 				  unsigned vmid, bool ctx_switch)
1301b843c749SSergey Zigachev {
1302b843c749SSergey Zigachev 	struct amdgpu_device *adev = ring->adev;
1303b843c749SSergey Zigachev 
1304b843c749SSergey Zigachev 	amdgpu_ring_write(ring,
1305b843c749SSergey Zigachev 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0));
1306b843c749SSergey Zigachev 	amdgpu_ring_write(ring, vmid);
1307b843c749SSergey Zigachev 
1308b843c749SSergey Zigachev 	amdgpu_ring_write(ring,
1309b843c749SSergey Zigachev 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
1310b843c749SSergey Zigachev 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1311b843c749SSergey Zigachev 	amdgpu_ring_write(ring,
1312b843c749SSergey Zigachev 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
1313b843c749SSergey Zigachev 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1314b843c749SSergey Zigachev 	amdgpu_ring_write(ring,
1315b843c749SSergey Zigachev 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_RBC_IB_SIZE), 0));
1316b843c749SSergey Zigachev 	amdgpu_ring_write(ring, ib->length_dw);
1317b843c749SSergey Zigachev }
1318b843c749SSergey Zigachev 
1319b843c749SSergey Zigachev /**
1320b843c749SSergey Zigachev  * uvd_v7_0_enc_ring_emit_ib - enc execute indirect buffer
1321b843c749SSergey Zigachev  *
1322b843c749SSergey Zigachev  * @ring: amdgpu_ring pointer
1323b843c749SSergey Zigachev  * @ib: indirect buffer to execute
1324b843c749SSergey Zigachev  *
1325b843c749SSergey Zigachev  * Write enc ring commands to execute the indirect buffer
1326b843c749SSergey Zigachev  */
uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring * ring,struct amdgpu_ib * ib,unsigned int vmid,bool ctx_switch)1327b843c749SSergey Zigachev static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1328b843c749SSergey Zigachev 		struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
1329b843c749SSergey Zigachev {
1330b843c749SSergey Zigachev 	amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1331b843c749SSergey Zigachev 	amdgpu_ring_write(ring, vmid);
1332b843c749SSergey Zigachev 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1333b843c749SSergey Zigachev 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1334b843c749SSergey Zigachev 	amdgpu_ring_write(ring, ib->length_dw);
1335b843c749SSergey Zigachev }
1336b843c749SSergey Zigachev 
uvd_v7_0_ring_emit_wreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t val)1337b843c749SSergey Zigachev static void uvd_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
1338b843c749SSergey Zigachev 				    uint32_t reg, uint32_t val)
1339b843c749SSergey Zigachev {
1340b843c749SSergey Zigachev 	struct amdgpu_device *adev = ring->adev;
1341b843c749SSergey Zigachev 
1342b843c749SSergey Zigachev 	amdgpu_ring_write(ring,
1343b843c749SSergey Zigachev 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1344b843c749SSergey Zigachev 	amdgpu_ring_write(ring, reg << 2);
1345b843c749SSergey Zigachev 	amdgpu_ring_write(ring,
1346b843c749SSergey Zigachev 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1347b843c749SSergey Zigachev 	amdgpu_ring_write(ring, val);
1348b843c749SSergey Zigachev 	amdgpu_ring_write(ring,
1349b843c749SSergey Zigachev 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1350b843c749SSergey Zigachev 	amdgpu_ring_write(ring, 8);
1351b843c749SSergey Zigachev }
1352b843c749SSergey Zigachev 
uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring * ring,uint32_t reg,uint32_t val,uint32_t mask)1353b843c749SSergey Zigachev static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1354b843c749SSergey Zigachev 					uint32_t val, uint32_t mask)
1355b843c749SSergey Zigachev {
1356b843c749SSergey Zigachev 	struct amdgpu_device *adev = ring->adev;
1357b843c749SSergey Zigachev 
1358b843c749SSergey Zigachev 	amdgpu_ring_write(ring,
1359b843c749SSergey Zigachev 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1360b843c749SSergey Zigachev 	amdgpu_ring_write(ring, reg << 2);
1361b843c749SSergey Zigachev 	amdgpu_ring_write(ring,
1362b843c749SSergey Zigachev 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1363b843c749SSergey Zigachev 	amdgpu_ring_write(ring, val);
1364b843c749SSergey Zigachev 	amdgpu_ring_write(ring,
1365b843c749SSergey Zigachev 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GP_SCRATCH8), 0));
1366b843c749SSergey Zigachev 	amdgpu_ring_write(ring, mask);
1367b843c749SSergey Zigachev 	amdgpu_ring_write(ring,
1368b843c749SSergey Zigachev 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1369b843c749SSergey Zigachev 	amdgpu_ring_write(ring, 12);
1370b843c749SSergey Zigachev }
1371b843c749SSergey Zigachev 
uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring * ring,unsigned vmid,uint64_t pd_addr)1372b843c749SSergey Zigachev static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1373b843c749SSergey Zigachev 					unsigned vmid, uint64_t pd_addr)
1374b843c749SSergey Zigachev {
1375b843c749SSergey Zigachev 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1376b843c749SSergey Zigachev 	uint32_t data0, data1, mask;
1377b843c749SSergey Zigachev 
1378b843c749SSergey Zigachev 	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1379b843c749SSergey Zigachev 
1380b843c749SSergey Zigachev 	/* wait for reg writes */
1381b843c749SSergey Zigachev 	data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
1382b843c749SSergey Zigachev 	data1 = lower_32_bits(pd_addr);
1383b843c749SSergey Zigachev 	mask = 0xffffffff;
1384b843c749SSergey Zigachev 	uvd_v7_0_ring_emit_reg_wait(ring, data0, data1, mask);
1385b843c749SSergey Zigachev }
1386b843c749SSergey Zigachev 
uvd_v7_0_ring_insert_nop(struct amdgpu_ring * ring,uint32_t count)1387b843c749SSergey Zigachev static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1388b843c749SSergey Zigachev {
1389b843c749SSergey Zigachev 	struct amdgpu_device *adev = ring->adev;
1390b843c749SSergey Zigachev 	int i;
1391b843c749SSergey Zigachev 
1392b843c749SSergey Zigachev 	WARN_ON(ring->wptr % 2 || count % 2);
1393b843c749SSergey Zigachev 
1394b843c749SSergey Zigachev 	for (i = 0; i < count / 2; i++) {
1395b843c749SSergey Zigachev 		amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_NO_OP), 0));
1396b843c749SSergey Zigachev 		amdgpu_ring_write(ring, 0);
1397b843c749SSergey Zigachev 	}
1398b843c749SSergey Zigachev }
1399b843c749SSergey Zigachev 
uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring * ring)1400b843c749SSergey Zigachev static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1401b843c749SSergey Zigachev {
1402b843c749SSergey Zigachev 	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1403b843c749SSergey Zigachev }
1404b843c749SSergey Zigachev 
uvd_v7_0_enc_ring_emit_reg_wait(struct amdgpu_ring * ring,uint32_t reg,uint32_t val,uint32_t mask)1405b843c749SSergey Zigachev static void uvd_v7_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1406b843c749SSergey Zigachev 					    uint32_t reg, uint32_t val,
1407b843c749SSergey Zigachev 					    uint32_t mask)
1408b843c749SSergey Zigachev {
1409b843c749SSergey Zigachev 	amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
1410b843c749SSergey Zigachev 	amdgpu_ring_write(ring,	reg << 2);
1411b843c749SSergey Zigachev 	amdgpu_ring_write(ring, mask);
1412b843c749SSergey Zigachev 	amdgpu_ring_write(ring, val);
1413b843c749SSergey Zigachev }
1414b843c749SSergey Zigachev 
uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring * ring,unsigned int vmid,uint64_t pd_addr)1415b843c749SSergey Zigachev static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1416b843c749SSergey Zigachev 					    unsigned int vmid, uint64_t pd_addr)
1417b843c749SSergey Zigachev {
1418b843c749SSergey Zigachev 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1419b843c749SSergey Zigachev 
1420b843c749SSergey Zigachev 	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1421b843c749SSergey Zigachev 
1422b843c749SSergey Zigachev 	/* wait for reg writes */
1423b843c749SSergey Zigachev 	uvd_v7_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2,
1424b843c749SSergey Zigachev 					lower_32_bits(pd_addr), 0xffffffff);
1425b843c749SSergey Zigachev }
1426b843c749SSergey Zigachev 
uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t val)1427b843c749SSergey Zigachev static void uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1428b843c749SSergey Zigachev 					uint32_t reg, uint32_t val)
1429b843c749SSergey Zigachev {
1430b843c749SSergey Zigachev 	amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
1431b843c749SSergey Zigachev 	amdgpu_ring_write(ring,	reg << 2);
1432b843c749SSergey Zigachev 	amdgpu_ring_write(ring, val);
1433b843c749SSergey Zigachev }
1434b843c749SSergey Zigachev 
1435b843c749SSergey Zigachev #if 0
1436b843c749SSergey Zigachev static bool uvd_v7_0_is_idle(void *handle)
1437b843c749SSergey Zigachev {
1438b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1439b843c749SSergey Zigachev 
1440b843c749SSergey Zigachev 	return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1441b843c749SSergey Zigachev }
1442b843c749SSergey Zigachev 
1443b843c749SSergey Zigachev static int uvd_v7_0_wait_for_idle(void *handle)
1444b843c749SSergey Zigachev {
1445b843c749SSergey Zigachev 	unsigned i;
1446b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1447b843c749SSergey Zigachev 
1448b843c749SSergey Zigachev 	for (i = 0; i < adev->usec_timeout; i++) {
1449b843c749SSergey Zigachev 		if (uvd_v7_0_is_idle(handle))
1450b843c749SSergey Zigachev 			return 0;
1451b843c749SSergey Zigachev 	}
1452b843c749SSergey Zigachev 	return -ETIMEDOUT;
1453b843c749SSergey Zigachev }
1454b843c749SSergey Zigachev 
1455b843c749SSergey Zigachev #define AMDGPU_UVD_STATUS_BUSY_MASK    0xfd
1456b843c749SSergey Zigachev static bool uvd_v7_0_check_soft_reset(void *handle)
1457b843c749SSergey Zigachev {
1458b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1459b843c749SSergey Zigachev 	u32 srbm_soft_reset = 0;
1460b843c749SSergey Zigachev 	u32 tmp = RREG32(mmSRBM_STATUS);
1461b843c749SSergey Zigachev 
1462b843c749SSergey Zigachev 	if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1463b843c749SSergey Zigachev 	    REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1464b843c749SSergey Zigachev 	    (RREG32_SOC15(UVD, ring->me, mmUVD_STATUS) &
1465b843c749SSergey Zigachev 		    AMDGPU_UVD_STATUS_BUSY_MASK))
1466b843c749SSergey Zigachev 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1467b843c749SSergey Zigachev 				SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1468b843c749SSergey Zigachev 
1469b843c749SSergey Zigachev 	if (srbm_soft_reset) {
1470b843c749SSergey Zigachev 		adev->uvd.inst[ring->me].srbm_soft_reset = srbm_soft_reset;
1471b843c749SSergey Zigachev 		return true;
1472b843c749SSergey Zigachev 	} else {
1473b843c749SSergey Zigachev 		adev->uvd.inst[ring->me].srbm_soft_reset = 0;
1474b843c749SSergey Zigachev 		return false;
1475b843c749SSergey Zigachev 	}
1476b843c749SSergey Zigachev }
1477b843c749SSergey Zigachev 
1478b843c749SSergey Zigachev static int uvd_v7_0_pre_soft_reset(void *handle)
1479b843c749SSergey Zigachev {
1480b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1481b843c749SSergey Zigachev 
1482b843c749SSergey Zigachev 	if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1483b843c749SSergey Zigachev 		return 0;
1484b843c749SSergey Zigachev 
1485b843c749SSergey Zigachev 	uvd_v7_0_stop(adev);
1486b843c749SSergey Zigachev 	return 0;
1487b843c749SSergey Zigachev }
1488b843c749SSergey Zigachev 
1489b843c749SSergey Zigachev static int uvd_v7_0_soft_reset(void *handle)
1490b843c749SSergey Zigachev {
1491b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1492b843c749SSergey Zigachev 	u32 srbm_soft_reset;
1493b843c749SSergey Zigachev 
1494b843c749SSergey Zigachev 	if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1495b843c749SSergey Zigachev 		return 0;
1496b843c749SSergey Zigachev 	srbm_soft_reset = adev->uvd.inst[ring->me].srbm_soft_reset;
1497b843c749SSergey Zigachev 
1498b843c749SSergey Zigachev 	if (srbm_soft_reset) {
1499b843c749SSergey Zigachev 		u32 tmp;
1500b843c749SSergey Zigachev 
1501b843c749SSergey Zigachev 		tmp = RREG32(mmSRBM_SOFT_RESET);
1502b843c749SSergey Zigachev 		tmp |= srbm_soft_reset;
1503b843c749SSergey Zigachev 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1504b843c749SSergey Zigachev 		WREG32(mmSRBM_SOFT_RESET, tmp);
1505b843c749SSergey Zigachev 		tmp = RREG32(mmSRBM_SOFT_RESET);
1506b843c749SSergey Zigachev 
1507b843c749SSergey Zigachev 		udelay(50);
1508b843c749SSergey Zigachev 
1509b843c749SSergey Zigachev 		tmp &= ~srbm_soft_reset;
1510b843c749SSergey Zigachev 		WREG32(mmSRBM_SOFT_RESET, tmp);
1511b843c749SSergey Zigachev 		tmp = RREG32(mmSRBM_SOFT_RESET);
1512b843c749SSergey Zigachev 
1513b843c749SSergey Zigachev 		/* Wait a little for things to settle down */
1514b843c749SSergey Zigachev 		udelay(50);
1515b843c749SSergey Zigachev 	}
1516b843c749SSergey Zigachev 
1517b843c749SSergey Zigachev 	return 0;
1518b843c749SSergey Zigachev }
1519b843c749SSergey Zigachev 
1520b843c749SSergey Zigachev static int uvd_v7_0_post_soft_reset(void *handle)
1521b843c749SSergey Zigachev {
1522b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1523b843c749SSergey Zigachev 
1524b843c749SSergey Zigachev 	if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1525b843c749SSergey Zigachev 		return 0;
1526b843c749SSergey Zigachev 
1527b843c749SSergey Zigachev 	mdelay(5);
1528b843c749SSergey Zigachev 
1529b843c749SSergey Zigachev 	return uvd_v7_0_start(adev);
1530b843c749SSergey Zigachev }
1531b843c749SSergey Zigachev #endif
1532b843c749SSergey Zigachev 
uvd_v7_0_set_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)1533b843c749SSergey Zigachev static int uvd_v7_0_set_interrupt_state(struct amdgpu_device *adev,
1534b843c749SSergey Zigachev 					struct amdgpu_irq_src *source,
1535b843c749SSergey Zigachev 					unsigned type,
1536b843c749SSergey Zigachev 					enum amdgpu_interrupt_state state)
1537b843c749SSergey Zigachev {
1538b843c749SSergey Zigachev 	// TODO
1539b843c749SSergey Zigachev 	return 0;
1540b843c749SSergey Zigachev }
1541b843c749SSergey Zigachev 
uvd_v7_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)1542b843c749SSergey Zigachev static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
1543b843c749SSergey Zigachev 				      struct amdgpu_irq_src *source,
1544b843c749SSergey Zigachev 				      struct amdgpu_iv_entry *entry)
1545b843c749SSergey Zigachev {
1546b843c749SSergey Zigachev 	uint32_t ip_instance;
1547b843c749SSergey Zigachev 
1548b843c749SSergey Zigachev 	switch (entry->client_id) {
1549b843c749SSergey Zigachev 	case SOC15_IH_CLIENTID_UVD:
1550b843c749SSergey Zigachev 		ip_instance = 0;
1551b843c749SSergey Zigachev 		break;
1552b843c749SSergey Zigachev 	case SOC15_IH_CLIENTID_UVD1:
1553b843c749SSergey Zigachev 		ip_instance = 1;
1554b843c749SSergey Zigachev 		break;
1555b843c749SSergey Zigachev 	default:
1556b843c749SSergey Zigachev 		DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1557b843c749SSergey Zigachev 		return 0;
1558b843c749SSergey Zigachev 	}
1559b843c749SSergey Zigachev 
1560b843c749SSergey Zigachev 	DRM_DEBUG("IH: UVD TRAP\n");
1561b843c749SSergey Zigachev 
1562b843c749SSergey Zigachev 	switch (entry->src_id) {
1563b843c749SSergey Zigachev 	case 124:
1564b843c749SSergey Zigachev 		amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring);
1565b843c749SSergey Zigachev 		break;
1566b843c749SSergey Zigachev 	case 119:
1567b843c749SSergey Zigachev 		amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[0]);
1568b843c749SSergey Zigachev 		break;
1569b843c749SSergey Zigachev 	case 120:
1570b843c749SSergey Zigachev 		if (!amdgpu_sriov_vf(adev))
1571b843c749SSergey Zigachev 			amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[1]);
1572b843c749SSergey Zigachev 		break;
1573b843c749SSergey Zigachev 	default:
1574b843c749SSergey Zigachev 		DRM_ERROR("Unhandled interrupt: %d %d\n",
1575b843c749SSergey Zigachev 			  entry->src_id, entry->src_data[0]);
1576b843c749SSergey Zigachev 		break;
1577b843c749SSergey Zigachev 	}
1578b843c749SSergey Zigachev 
1579b843c749SSergey Zigachev 	return 0;
1580b843c749SSergey Zigachev }
1581b843c749SSergey Zigachev 
1582b843c749SSergey Zigachev #if 0
1583b843c749SSergey Zigachev static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev)
1584b843c749SSergey Zigachev {
1585b843c749SSergey Zigachev 	uint32_t data, data1, data2, suvd_flags;
1586b843c749SSergey Zigachev 
1587b843c749SSergey Zigachev 	data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL);
1588b843c749SSergey Zigachev 	data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1589b843c749SSergey Zigachev 	data2 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL);
1590b843c749SSergey Zigachev 
1591b843c749SSergey Zigachev 	data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1592b843c749SSergey Zigachev 		  UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1593b843c749SSergey Zigachev 
1594b843c749SSergey Zigachev 	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1595b843c749SSergey Zigachev 		     UVD_SUVD_CGC_GATE__SIT_MASK |
1596b843c749SSergey Zigachev 		     UVD_SUVD_CGC_GATE__SMP_MASK |
1597b843c749SSergey Zigachev 		     UVD_SUVD_CGC_GATE__SCM_MASK |
1598b843c749SSergey Zigachev 		     UVD_SUVD_CGC_GATE__SDB_MASK;
1599b843c749SSergey Zigachev 
1600b843c749SSergey Zigachev 	data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1601b843c749SSergey Zigachev 		(1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1602b843c749SSergey Zigachev 		(4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1603b843c749SSergey Zigachev 
1604b843c749SSergey Zigachev 	data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1605b843c749SSergey Zigachev 			UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1606b843c749SSergey Zigachev 			UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1607b843c749SSergey Zigachev 			UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1608b843c749SSergey Zigachev 			UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1609b843c749SSergey Zigachev 			UVD_CGC_CTRL__SYS_MODE_MASK |
1610b843c749SSergey Zigachev 			UVD_CGC_CTRL__UDEC_MODE_MASK |
1611b843c749SSergey Zigachev 			UVD_CGC_CTRL__MPEG2_MODE_MASK |
1612b843c749SSergey Zigachev 			UVD_CGC_CTRL__REGS_MODE_MASK |
1613b843c749SSergey Zigachev 			UVD_CGC_CTRL__RBC_MODE_MASK |
1614b843c749SSergey Zigachev 			UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1615b843c749SSergey Zigachev 			UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1616b843c749SSergey Zigachev 			UVD_CGC_CTRL__IDCT_MODE_MASK |
1617b843c749SSergey Zigachev 			UVD_CGC_CTRL__MPRD_MODE_MASK |
1618b843c749SSergey Zigachev 			UVD_CGC_CTRL__MPC_MODE_MASK |
1619b843c749SSergey Zigachev 			UVD_CGC_CTRL__LBSI_MODE_MASK |
1620b843c749SSergey Zigachev 			UVD_CGC_CTRL__LRBBM_MODE_MASK |
1621b843c749SSergey Zigachev 			UVD_CGC_CTRL__WCB_MODE_MASK |
1622b843c749SSergey Zigachev 			UVD_CGC_CTRL__VCPU_MODE_MASK |
1623b843c749SSergey Zigachev 			UVD_CGC_CTRL__JPEG_MODE_MASK |
1624b843c749SSergey Zigachev 			UVD_CGC_CTRL__JPEG2_MODE_MASK |
1625b843c749SSergey Zigachev 			UVD_CGC_CTRL__SCPU_MODE_MASK);
1626b843c749SSergey Zigachev 	data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1627b843c749SSergey Zigachev 			UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1628b843c749SSergey Zigachev 			UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1629b843c749SSergey Zigachev 			UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1630b843c749SSergey Zigachev 			UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1631b843c749SSergey Zigachev 	data1 |= suvd_flags;
1632b843c749SSergey Zigachev 
1633b843c749SSergey Zigachev 	WREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL, data);
1634b843c749SSergey Zigachev 	WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, 0);
1635b843c749SSergey Zigachev 	WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1636b843c749SSergey Zigachev 	WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL, data2);
1637b843c749SSergey Zigachev }
1638b843c749SSergey Zigachev 
1639b843c749SSergey Zigachev static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev)
1640b843c749SSergey Zigachev {
1641b843c749SSergey Zigachev 	uint32_t data, data1, cgc_flags, suvd_flags;
1642b843c749SSergey Zigachev 
1643b843c749SSergey Zigachev 	data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE);
1644b843c749SSergey Zigachev 	data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1645b843c749SSergey Zigachev 
1646b843c749SSergey Zigachev 	cgc_flags = UVD_CGC_GATE__SYS_MASK |
1647b843c749SSergey Zigachev 		UVD_CGC_GATE__UDEC_MASK |
1648b843c749SSergey Zigachev 		UVD_CGC_GATE__MPEG2_MASK |
1649b843c749SSergey Zigachev 		UVD_CGC_GATE__RBC_MASK |
1650b843c749SSergey Zigachev 		UVD_CGC_GATE__LMI_MC_MASK |
1651b843c749SSergey Zigachev 		UVD_CGC_GATE__IDCT_MASK |
1652b843c749SSergey Zigachev 		UVD_CGC_GATE__MPRD_MASK |
1653b843c749SSergey Zigachev 		UVD_CGC_GATE__MPC_MASK |
1654b843c749SSergey Zigachev 		UVD_CGC_GATE__LBSI_MASK |
1655b843c749SSergey Zigachev 		UVD_CGC_GATE__LRBBM_MASK |
1656b843c749SSergey Zigachev 		UVD_CGC_GATE__UDEC_RE_MASK |
1657b843c749SSergey Zigachev 		UVD_CGC_GATE__UDEC_CM_MASK |
1658b843c749SSergey Zigachev 		UVD_CGC_GATE__UDEC_IT_MASK |
1659b843c749SSergey Zigachev 		UVD_CGC_GATE__UDEC_DB_MASK |
1660b843c749SSergey Zigachev 		UVD_CGC_GATE__UDEC_MP_MASK |
1661b843c749SSergey Zigachev 		UVD_CGC_GATE__WCB_MASK |
1662b843c749SSergey Zigachev 		UVD_CGC_GATE__VCPU_MASK |
1663b843c749SSergey Zigachev 		UVD_CGC_GATE__SCPU_MASK |
1664b843c749SSergey Zigachev 		UVD_CGC_GATE__JPEG_MASK |
1665b843c749SSergey Zigachev 		UVD_CGC_GATE__JPEG2_MASK;
1666b843c749SSergey Zigachev 
1667b843c749SSergey Zigachev 	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1668b843c749SSergey Zigachev 				UVD_SUVD_CGC_GATE__SIT_MASK |
1669b843c749SSergey Zigachev 				UVD_SUVD_CGC_GATE__SMP_MASK |
1670b843c749SSergey Zigachev 				UVD_SUVD_CGC_GATE__SCM_MASK |
1671b843c749SSergey Zigachev 				UVD_SUVD_CGC_GATE__SDB_MASK;
1672b843c749SSergey Zigachev 
1673b843c749SSergey Zigachev 	data |= cgc_flags;
1674b843c749SSergey Zigachev 	data1 |= suvd_flags;
1675b843c749SSergey Zigachev 
1676b843c749SSergey Zigachev 	WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, data);
1677b843c749SSergey Zigachev 	WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1678b843c749SSergey Zigachev }
1679b843c749SSergey Zigachev 
1680b843c749SSergey Zigachev static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
1681b843c749SSergey Zigachev {
1682b843c749SSergey Zigachev 	u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
1683b843c749SSergey Zigachev 
1684b843c749SSergey Zigachev 	if (enable)
1685b843c749SSergey Zigachev 		tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1686b843c749SSergey Zigachev 			GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1687b843c749SSergey Zigachev 	else
1688b843c749SSergey Zigachev 		tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1689b843c749SSergey Zigachev 			 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1690b843c749SSergey Zigachev 
1691b843c749SSergey Zigachev 	WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
1692b843c749SSergey Zigachev }
1693b843c749SSergey Zigachev 
1694b843c749SSergey Zigachev 
1695b843c749SSergey Zigachev static int uvd_v7_0_set_clockgating_state(void *handle,
1696b843c749SSergey Zigachev 					  enum amd_clockgating_state state)
1697b843c749SSergey Zigachev {
1698b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1699b843c749SSergey Zigachev 	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1700b843c749SSergey Zigachev 
1701b843c749SSergey Zigachev 	uvd_v7_0_set_bypass_mode(adev, enable);
1702b843c749SSergey Zigachev 
1703b843c749SSergey Zigachev 	if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
1704b843c749SSergey Zigachev 		return 0;
1705b843c749SSergey Zigachev 
1706b843c749SSergey Zigachev 	if (enable) {
1707b843c749SSergey Zigachev 		/* disable HW gating and enable Sw gating */
1708b843c749SSergey Zigachev 		uvd_v7_0_set_sw_clock_gating(adev);
1709b843c749SSergey Zigachev 	} else {
1710b843c749SSergey Zigachev 		/* wait for STATUS to clear */
1711b843c749SSergey Zigachev 		if (uvd_v7_0_wait_for_idle(handle))
1712b843c749SSergey Zigachev 			return -EBUSY;
1713b843c749SSergey Zigachev 
1714b843c749SSergey Zigachev 		/* enable HW gates because UVD is idle */
1715b843c749SSergey Zigachev 		/* uvd_v7_0_set_hw_clock_gating(adev); */
1716b843c749SSergey Zigachev 	}
1717b843c749SSergey Zigachev 
1718b843c749SSergey Zigachev 	return 0;
1719b843c749SSergey Zigachev }
1720b843c749SSergey Zigachev 
1721b843c749SSergey Zigachev static int uvd_v7_0_set_powergating_state(void *handle,
1722b843c749SSergey Zigachev 					  enum amd_powergating_state state)
1723b843c749SSergey Zigachev {
1724b843c749SSergey Zigachev 	/* This doesn't actually powergate the UVD block.
1725b843c749SSergey Zigachev 	 * That's done in the dpm code via the SMC.  This
1726b843c749SSergey Zigachev 	 * just re-inits the block as necessary.  The actual
1727b843c749SSergey Zigachev 	 * gating still happens in the dpm code.  We should
1728b843c749SSergey Zigachev 	 * revisit this when there is a cleaner line between
1729b843c749SSergey Zigachev 	 * the smc and the hw blocks
1730b843c749SSergey Zigachev 	 */
1731b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1732b843c749SSergey Zigachev 
1733b843c749SSergey Zigachev 	if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
1734b843c749SSergey Zigachev 		return 0;
1735b843c749SSergey Zigachev 
1736b843c749SSergey Zigachev 	WREG32_SOC15(UVD, ring->me, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1737b843c749SSergey Zigachev 
1738b843c749SSergey Zigachev 	if (state == AMD_PG_STATE_GATE) {
1739b843c749SSergey Zigachev 		uvd_v7_0_stop(adev);
1740b843c749SSergey Zigachev 		return 0;
1741b843c749SSergey Zigachev 	} else {
1742b843c749SSergey Zigachev 		return uvd_v7_0_start(adev);
1743b843c749SSergey Zigachev 	}
1744b843c749SSergey Zigachev }
1745b843c749SSergey Zigachev #endif
1746b843c749SSergey Zigachev 
uvd_v7_0_set_clockgating_state(void * handle,enum amd_clockgating_state state)1747b843c749SSergey Zigachev static int uvd_v7_0_set_clockgating_state(void *handle,
1748b843c749SSergey Zigachev 					  enum amd_clockgating_state state)
1749b843c749SSergey Zigachev {
1750b843c749SSergey Zigachev 	/* needed for driver unload*/
1751b843c749SSergey Zigachev 	return 0;
1752b843c749SSergey Zigachev }
1753b843c749SSergey Zigachev 
1754b843c749SSergey Zigachev const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
1755b843c749SSergey Zigachev 	.name = "uvd_v7_0",
1756b843c749SSergey Zigachev 	.early_init = uvd_v7_0_early_init,
1757b843c749SSergey Zigachev 	.late_init = NULL,
1758b843c749SSergey Zigachev 	.sw_init = uvd_v7_0_sw_init,
1759b843c749SSergey Zigachev 	.sw_fini = uvd_v7_0_sw_fini,
1760b843c749SSergey Zigachev 	.hw_init = uvd_v7_0_hw_init,
1761b843c749SSergey Zigachev 	.hw_fini = uvd_v7_0_hw_fini,
1762b843c749SSergey Zigachev 	.suspend = uvd_v7_0_suspend,
1763b843c749SSergey Zigachev 	.resume = uvd_v7_0_resume,
1764b843c749SSergey Zigachev 	.is_idle = NULL /* uvd_v7_0_is_idle */,
1765b843c749SSergey Zigachev 	.wait_for_idle = NULL /* uvd_v7_0_wait_for_idle */,
1766b843c749SSergey Zigachev 	.check_soft_reset = NULL /* uvd_v7_0_check_soft_reset */,
1767b843c749SSergey Zigachev 	.pre_soft_reset = NULL /* uvd_v7_0_pre_soft_reset */,
1768b843c749SSergey Zigachev 	.soft_reset = NULL /* uvd_v7_0_soft_reset */,
1769b843c749SSergey Zigachev 	.post_soft_reset = NULL /* uvd_v7_0_post_soft_reset */,
1770b843c749SSergey Zigachev 	.set_clockgating_state = uvd_v7_0_set_clockgating_state,
1771b843c749SSergey Zigachev 	.set_powergating_state = NULL /* uvd_v7_0_set_powergating_state */,
1772b843c749SSergey Zigachev };
1773b843c749SSergey Zigachev 
1774b843c749SSergey Zigachev static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
1775b843c749SSergey Zigachev 	.type = AMDGPU_RING_TYPE_UVD,
1776b843c749SSergey Zigachev 	.align_mask = 0xf,
1777b843c749SSergey Zigachev 	.support_64bit_ptrs = false,
1778b843c749SSergey Zigachev 	.vmhub = AMDGPU_MMHUB,
1779b843c749SSergey Zigachev 	.get_rptr = uvd_v7_0_ring_get_rptr,
1780b843c749SSergey Zigachev 	.get_wptr = uvd_v7_0_ring_get_wptr,
1781b843c749SSergey Zigachev 	.set_wptr = uvd_v7_0_ring_set_wptr,
1782b843c749SSergey Zigachev 	.patch_cs_in_place = uvd_v7_0_ring_patch_cs_in_place,
1783b843c749SSergey Zigachev 	.emit_frame_size =
1784b843c749SSergey Zigachev 		6 + /* hdp invalidate */
1785b843c749SSergey Zigachev 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1786b843c749SSergey Zigachev 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1787b843c749SSergey Zigachev 		8 + /* uvd_v7_0_ring_emit_vm_flush */
1788b843c749SSergey Zigachev 		14 + 14, /* uvd_v7_0_ring_emit_fence x2 vm fence */
1789b843c749SSergey Zigachev 	.emit_ib_size = 8, /* uvd_v7_0_ring_emit_ib */
1790b843c749SSergey Zigachev 	.emit_ib = uvd_v7_0_ring_emit_ib,
1791b843c749SSergey Zigachev 	.emit_fence = uvd_v7_0_ring_emit_fence,
1792b843c749SSergey Zigachev 	.emit_vm_flush = uvd_v7_0_ring_emit_vm_flush,
1793b843c749SSergey Zigachev 	.emit_hdp_flush = uvd_v7_0_ring_emit_hdp_flush,
1794b843c749SSergey Zigachev 	.test_ring = uvd_v7_0_ring_test_ring,
1795b843c749SSergey Zigachev 	.test_ib = amdgpu_uvd_ring_test_ib,
1796b843c749SSergey Zigachev 	.insert_nop = uvd_v7_0_ring_insert_nop,
1797b843c749SSergey Zigachev 	.pad_ib = amdgpu_ring_generic_pad_ib,
1798b843c749SSergey Zigachev 	.begin_use = amdgpu_uvd_ring_begin_use,
1799b843c749SSergey Zigachev 	.end_use = amdgpu_uvd_ring_end_use,
1800b843c749SSergey Zigachev 	.emit_wreg = uvd_v7_0_ring_emit_wreg,
1801b843c749SSergey Zigachev 	.emit_reg_wait = uvd_v7_0_ring_emit_reg_wait,
1802b843c749SSergey Zigachev 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1803b843c749SSergey Zigachev };
1804b843c749SSergey Zigachev 
1805b843c749SSergey Zigachev static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
1806b843c749SSergey Zigachev 	.type = AMDGPU_RING_TYPE_UVD_ENC,
1807b843c749SSergey Zigachev 	.align_mask = 0x3f,
1808b843c749SSergey Zigachev 	.nop = HEVC_ENC_CMD_NO_OP,
1809b843c749SSergey Zigachev 	.support_64bit_ptrs = false,
1810b843c749SSergey Zigachev 	.vmhub = AMDGPU_MMHUB,
1811b843c749SSergey Zigachev 	.get_rptr = uvd_v7_0_enc_ring_get_rptr,
1812b843c749SSergey Zigachev 	.get_wptr = uvd_v7_0_enc_ring_get_wptr,
1813b843c749SSergey Zigachev 	.set_wptr = uvd_v7_0_enc_ring_set_wptr,
1814b843c749SSergey Zigachev 	.emit_frame_size =
1815b843c749SSergey Zigachev 		3 + 3 + /* hdp flush / invalidate */
1816b843c749SSergey Zigachev 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1817b843c749SSergey Zigachev 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1818b843c749SSergey Zigachev 		4 + /* uvd_v7_0_enc_ring_emit_vm_flush */
1819b843c749SSergey Zigachev 		5 + 5 + /* uvd_v7_0_enc_ring_emit_fence x2 vm fence */
1820b843c749SSergey Zigachev 		1, /* uvd_v7_0_enc_ring_insert_end */
1821b843c749SSergey Zigachev 	.emit_ib_size = 5, /* uvd_v7_0_enc_ring_emit_ib */
1822b843c749SSergey Zigachev 	.emit_ib = uvd_v7_0_enc_ring_emit_ib,
1823b843c749SSergey Zigachev 	.emit_fence = uvd_v7_0_enc_ring_emit_fence,
1824b843c749SSergey Zigachev 	.emit_vm_flush = uvd_v7_0_enc_ring_emit_vm_flush,
1825b843c749SSergey Zigachev 	.test_ring = uvd_v7_0_enc_ring_test_ring,
1826b843c749SSergey Zigachev 	.test_ib = uvd_v7_0_enc_ring_test_ib,
1827b843c749SSergey Zigachev 	.insert_nop = amdgpu_ring_insert_nop,
1828b843c749SSergey Zigachev 	.insert_end = uvd_v7_0_enc_ring_insert_end,
1829b843c749SSergey Zigachev 	.pad_ib = amdgpu_ring_generic_pad_ib,
1830b843c749SSergey Zigachev 	.begin_use = amdgpu_uvd_ring_begin_use,
1831b843c749SSergey Zigachev 	.end_use = amdgpu_uvd_ring_end_use,
1832b843c749SSergey Zigachev 	.emit_wreg = uvd_v7_0_enc_ring_emit_wreg,
1833b843c749SSergey Zigachev 	.emit_reg_wait = uvd_v7_0_enc_ring_emit_reg_wait,
1834b843c749SSergey Zigachev 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1835b843c749SSergey Zigachev };
1836b843c749SSergey Zigachev 
uvd_v7_0_set_ring_funcs(struct amdgpu_device * adev)1837b843c749SSergey Zigachev static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
1838b843c749SSergey Zigachev {
1839b843c749SSergey Zigachev 	int i;
1840b843c749SSergey Zigachev 
1841b843c749SSergey Zigachev 	for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1842b843c749SSergey Zigachev 		if (adev->uvd.harvest_config & (1 << i))
1843b843c749SSergey Zigachev 			continue;
1844b843c749SSergey Zigachev 		adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
1845b843c749SSergey Zigachev 		adev->uvd.inst[i].ring.me = i;
1846b843c749SSergey Zigachev 		DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
1847b843c749SSergey Zigachev 	}
1848b843c749SSergey Zigachev }
1849b843c749SSergey Zigachev 
uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device * adev)1850b843c749SSergey Zigachev static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1851b843c749SSergey Zigachev {
1852b843c749SSergey Zigachev 	int i, j;
1853b843c749SSergey Zigachev 
1854b843c749SSergey Zigachev 	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
1855b843c749SSergey Zigachev 		if (adev->uvd.harvest_config & (1 << j))
1856b843c749SSergey Zigachev 			continue;
1857b843c749SSergey Zigachev 		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
1858b843c749SSergey Zigachev 			adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
1859b843c749SSergey Zigachev 			adev->uvd.inst[j].ring_enc[i].me = j;
1860b843c749SSergey Zigachev 		}
1861b843c749SSergey Zigachev 
1862b843c749SSergey Zigachev 		DRM_INFO("UVD(%d) ENC is enabled in VM mode\n", j);
1863b843c749SSergey Zigachev 	}
1864b843c749SSergey Zigachev }
1865b843c749SSergey Zigachev 
1866b843c749SSergey Zigachev static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = {
1867b843c749SSergey Zigachev 	.set = uvd_v7_0_set_interrupt_state,
1868b843c749SSergey Zigachev 	.process = uvd_v7_0_process_interrupt,
1869b843c749SSergey Zigachev };
1870b843c749SSergey Zigachev 
uvd_v7_0_set_irq_funcs(struct amdgpu_device * adev)1871b843c749SSergey Zigachev static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1872b843c749SSergey Zigachev {
1873b843c749SSergey Zigachev 	int i;
1874b843c749SSergey Zigachev 
1875b843c749SSergey Zigachev 	for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1876b843c749SSergey Zigachev 		if (adev->uvd.harvest_config & (1 << i))
1877b843c749SSergey Zigachev 			continue;
1878b843c749SSergey Zigachev 		adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1;
1879b843c749SSergey Zigachev 		adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs;
1880b843c749SSergey Zigachev 	}
1881b843c749SSergey Zigachev }
1882b843c749SSergey Zigachev 
1883b843c749SSergey Zigachev const struct amdgpu_ip_block_version uvd_v7_0_ip_block =
1884b843c749SSergey Zigachev {
1885b843c749SSergey Zigachev 		.type = AMD_IP_BLOCK_TYPE_UVD,
1886b843c749SSergey Zigachev 		.major = 7,
1887b843c749SSergey Zigachev 		.minor = 0,
1888b843c749SSergey Zigachev 		.rev = 0,
1889b843c749SSergey Zigachev 		.funcs = &uvd_v7_0_ip_funcs,
1890b843c749SSergey Zigachev };
1891