1 /*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/firmware.h>
25
26 #include "amdgpu.h"
27 #include "amdgpu_vcn.h"
28 #include "soc15.h"
29 #include "soc15d.h"
30 #include "amdgpu_pm.h"
31 #include "amdgpu_psp.h"
32 #include "mmsch_v2_0.h"
33 #include "vcn_v2_0.h"
34
35 #include "vcn/vcn_2_0_0_offset.h"
36 #include "vcn/vcn_2_0_0_sh_mask.h"
37 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
38
39 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x1fd
40 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x503
41 #define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x504
42 #define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET 0x505
43 #define mmUVD_NO_OP_INTERNAL_OFFSET 0x53f
44 #define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET 0x54a
45 #define mmUVD_SCRATCH9_INTERNAL_OFFSET 0xc01d
46
47 #define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x1e1
48 #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x5a6
49 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x5a7
50 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x1e2
51
52 static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev);
53 static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev);
54 static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev);
55 static int vcn_v2_0_set_powergating_state(void *handle,
56 enum amd_powergating_state state);
57 static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
58 int inst_idx, struct dpg_pause_state *new_state);
59 static int vcn_v2_0_start_sriov(struct amdgpu_device *adev);
60 /**
61 * vcn_v2_0_early_init - set function pointers
62 *
63 * @handle: amdgpu_device pointer
64 *
65 * Set ring and irq function pointers
66 */
vcn_v2_0_early_init(void * handle)67 static int vcn_v2_0_early_init(void *handle)
68 {
69 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
70
71 adev->vcn.num_vcn_inst = 1;
72 if (amdgpu_sriov_vf(adev))
73 adev->vcn.num_enc_rings = 1;
74 else
75 adev->vcn.num_enc_rings = 2;
76
77 vcn_v2_0_set_dec_ring_funcs(adev);
78 vcn_v2_0_set_enc_ring_funcs(adev);
79 vcn_v2_0_set_irq_funcs(adev);
80
81 return 0;
82 }
83
84 /**
85 * vcn_v2_0_sw_init - sw init for VCN block
86 *
87 * @handle: amdgpu_device pointer
88 *
89 * Load firmware and sw initialization
90 */
vcn_v2_0_sw_init(void * handle)91 static int vcn_v2_0_sw_init(void *handle)
92 {
93 struct amdgpu_ring *ring;
94 int i, r;
95 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
96 volatile struct amdgpu_fw_shared *fw_shared;
97
98 /* VCN DEC TRAP */
99 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
100 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT,
101 &adev->vcn.inst->irq);
102 if (r)
103 return r;
104
105 /* VCN ENC TRAP */
106 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
107 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
108 i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
109 &adev->vcn.inst->irq);
110 if (r)
111 return r;
112 }
113
114 r = amdgpu_vcn_sw_init(adev);
115 if (r)
116 return r;
117
118 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
119 const struct common_firmware_header *hdr;
120 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
121 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
122 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
123 adev->firmware.fw_size +=
124 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
125 DRM_INFO("PSP loading VCN firmware\n");
126 }
127
128 r = amdgpu_vcn_resume(adev);
129 if (r)
130 return r;
131
132 ring = &adev->vcn.inst->ring_dec;
133
134 ring->use_doorbell = true;
135 ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1;
136
137 sprintf(ring->name, "vcn_dec");
138 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
139 AMDGPU_RING_PRIO_DEFAULT, NULL);
140 if (r)
141 return r;
142
143 adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
144 adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
145 adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
146 adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
147 adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
148 adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
149
150 adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
151 adev->vcn.inst->external.scratch9 = SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
152 adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
153 adev->vcn.inst->external.data0 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
154 adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
155 adev->vcn.inst->external.data1 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
156 adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
157 adev->vcn.inst->external.cmd = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
158 adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
159 adev->vcn.inst->external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
160
161 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
162 ring = &adev->vcn.inst->ring_enc[i];
163 ring->use_doorbell = true;
164 if (!amdgpu_sriov_vf(adev))
165 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i;
166 else
167 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i;
168 sprintf(ring->name, "vcn_enc%d", i);
169 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
170 AMDGPU_RING_PRIO_DEFAULT, NULL);
171 if (r)
172 return r;
173 }
174
175 adev->vcn.pause_dpg_mode = vcn_v2_0_pause_dpg_mode;
176
177 r = amdgpu_virt_alloc_mm_table(adev);
178 if (r)
179 return r;
180
181 fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
182 fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);
183 return 0;
184 }
185
186 /**
187 * vcn_v2_0_sw_fini - sw fini for VCN block
188 *
189 * @handle: amdgpu_device pointer
190 *
191 * VCN suspend and free up sw allocation
192 */
vcn_v2_0_sw_fini(void * handle)193 static int vcn_v2_0_sw_fini(void *handle)
194 {
195 int r;
196 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
197 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
198
199 fw_shared->present_flag_0 = 0;
200
201 amdgpu_virt_free_mm_table(adev);
202
203 r = amdgpu_vcn_suspend(adev);
204 if (r)
205 return r;
206
207 r = amdgpu_vcn_sw_fini(adev);
208
209 return r;
210 }
211
212 /**
213 * vcn_v2_0_hw_init - start and test VCN block
214 *
215 * @handle: amdgpu_device pointer
216 *
217 * Initialize the hardware, boot up the VCPU and do some testing
218 */
vcn_v2_0_hw_init(void * handle)219 static int vcn_v2_0_hw_init(void *handle)
220 {
221 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
222 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
223 int i, r;
224
225 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
226 ring->doorbell_index, 0);
227
228 if (amdgpu_sriov_vf(adev))
229 vcn_v2_0_start_sriov(adev);
230
231 r = amdgpu_ring_test_helper(ring);
232 if (r)
233 goto done;
234
235 //Disable vcn decode for sriov
236 if (amdgpu_sriov_vf(adev))
237 ring->sched.ready = false;
238
239 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
240 ring = &adev->vcn.inst->ring_enc[i];
241 r = amdgpu_ring_test_helper(ring);
242 if (r)
243 goto done;
244 }
245
246 done:
247 if (!r)
248 DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
249 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
250
251 return r;
252 }
253
254 /**
255 * vcn_v2_0_hw_fini - stop the hardware block
256 *
257 * @handle: amdgpu_device pointer
258 *
259 * Stop the VCN block, mark ring as not ready any more
260 */
vcn_v2_0_hw_fini(void * handle)261 static int vcn_v2_0_hw_fini(void *handle)
262 {
263 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
264
265 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
266 (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
267 RREG32_SOC15(VCN, 0, mmUVD_STATUS)))
268 vcn_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
269
270 return 0;
271 }
272
273 /**
274 * vcn_v2_0_suspend - suspend VCN block
275 *
276 * @handle: amdgpu_device pointer
277 *
278 * HW fini and suspend VCN block
279 */
vcn_v2_0_suspend(void * handle)280 static int vcn_v2_0_suspend(void *handle)
281 {
282 int r;
283 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
284
285 r = vcn_v2_0_hw_fini(adev);
286 if (r)
287 return r;
288
289 r = amdgpu_vcn_suspend(adev);
290
291 return r;
292 }
293
294 /**
295 * vcn_v2_0_resume - resume VCN block
296 *
297 * @handle: amdgpu_device pointer
298 *
299 * Resume firmware and hw init VCN block
300 */
vcn_v2_0_resume(void * handle)301 static int vcn_v2_0_resume(void *handle)
302 {
303 int r;
304 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
305
306 r = amdgpu_vcn_resume(adev);
307 if (r)
308 return r;
309
310 r = vcn_v2_0_hw_init(adev);
311
312 return r;
313 }
314
315 /**
316 * vcn_v2_0_mc_resume - memory controller programming
317 *
318 * @adev: amdgpu_device pointer
319 *
320 * Let the VCN memory controller know it's offsets
321 */
vcn_v2_0_mc_resume(struct amdgpu_device * adev)322 static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
323 {
324 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
325 uint32_t offset;
326
327 if (amdgpu_sriov_vf(adev))
328 return;
329
330 /* cache window 0: fw */
331 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
332 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
333 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
334 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
335 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
336 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
337 offset = 0;
338 } else {
339 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
340 lower_32_bits(adev->vcn.inst->gpu_addr));
341 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
342 upper_32_bits(adev->vcn.inst->gpu_addr));
343 offset = size;
344 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
345 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
346 }
347
348 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
349
350 /* cache window 1: stack */
351 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
352 lower_32_bits(adev->vcn.inst->gpu_addr + offset));
353 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
354 upper_32_bits(adev->vcn.inst->gpu_addr + offset));
355 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
356 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
357
358 /* cache window 2: context */
359 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
360 lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
361 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
362 upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
363 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
364 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
365
366 /* non-cache window */
367 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
368 lower_32_bits(adev->vcn.inst->fw_shared_gpu_addr));
369 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
370 upper_32_bits(adev->vcn.inst->fw_shared_gpu_addr));
371 WREG32_SOC15(UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
372 WREG32_SOC15(UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0,
373 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
374
375 WREG32_SOC15(UVD, 0, mmUVD_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
376 }
377
vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device * adev,bool indirect)378 static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirect)
379 {
380 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
381 uint32_t offset;
382
383 /* cache window 0: fw */
384 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
385 if (!indirect) {
386 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
387 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
388 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo), 0, indirect);
389 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
390 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
391 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi), 0, indirect);
392 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
393 UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
394 } else {
395 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
396 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
397 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
398 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
399 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
400 UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
401 }
402 offset = 0;
403 } else {
404 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
405 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
406 lower_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
407 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
408 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
409 upper_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
410 offset = size;
411 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
412 UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
413 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
414 }
415
416 if (!indirect)
417 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
418 UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
419 else
420 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
421 UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
422
423 /* cache window 1: stack */
424 if (!indirect) {
425 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
426 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
427 lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect);
428 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
429 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
430 upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect);
431 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
432 UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
433 } else {
434 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
435 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
436 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
437 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
438 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
439 UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
440 }
441 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
442 UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
443
444 /* cache window 2: context */
445 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
446 UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
447 lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
448 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
449 UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
450 upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
451 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
452 UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
453 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
454 UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
455
456 /* non-cache window */
457 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
458 UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
459 lower_32_bits(adev->vcn.inst->fw_shared_gpu_addr), 0, indirect);
460 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
461 UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
462 upper_32_bits(adev->vcn.inst->fw_shared_gpu_addr), 0, indirect);
463 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
464 UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
465 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
466 UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0),
467 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
468
469 /* VCN global tiling registers */
470 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
471 UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
472 }
473
474 /**
475 * vcn_v2_0_disable_clock_gating - disable VCN clock gating
476 *
477 * @adev: amdgpu_device pointer
478 *
479 * Disable clock gating for VCN block
480 */
vcn_v2_0_disable_clock_gating(struct amdgpu_device * adev)481 static void vcn_v2_0_disable_clock_gating(struct amdgpu_device *adev)
482 {
483 uint32_t data;
484
485 if (amdgpu_sriov_vf(adev))
486 return;
487
488 /* UVD disable CGC */
489 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
490 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
491 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
492 else
493 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
494 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
495 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
496 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
497
498 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_GATE);
499 data &= ~(UVD_CGC_GATE__SYS_MASK
500 | UVD_CGC_GATE__UDEC_MASK
501 | UVD_CGC_GATE__MPEG2_MASK
502 | UVD_CGC_GATE__REGS_MASK
503 | UVD_CGC_GATE__RBC_MASK
504 | UVD_CGC_GATE__LMI_MC_MASK
505 | UVD_CGC_GATE__LMI_UMC_MASK
506 | UVD_CGC_GATE__IDCT_MASK
507 | UVD_CGC_GATE__MPRD_MASK
508 | UVD_CGC_GATE__MPC_MASK
509 | UVD_CGC_GATE__LBSI_MASK
510 | UVD_CGC_GATE__LRBBM_MASK
511 | UVD_CGC_GATE__UDEC_RE_MASK
512 | UVD_CGC_GATE__UDEC_CM_MASK
513 | UVD_CGC_GATE__UDEC_IT_MASK
514 | UVD_CGC_GATE__UDEC_DB_MASK
515 | UVD_CGC_GATE__UDEC_MP_MASK
516 | UVD_CGC_GATE__WCB_MASK
517 | UVD_CGC_GATE__VCPU_MASK
518 | UVD_CGC_GATE__SCPU_MASK);
519 WREG32_SOC15(VCN, 0, mmUVD_CGC_GATE, data);
520
521 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
522 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
523 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
524 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
525 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
526 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
527 | UVD_CGC_CTRL__SYS_MODE_MASK
528 | UVD_CGC_CTRL__UDEC_MODE_MASK
529 | UVD_CGC_CTRL__MPEG2_MODE_MASK
530 | UVD_CGC_CTRL__REGS_MODE_MASK
531 | UVD_CGC_CTRL__RBC_MODE_MASK
532 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
533 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
534 | UVD_CGC_CTRL__IDCT_MODE_MASK
535 | UVD_CGC_CTRL__MPRD_MODE_MASK
536 | UVD_CGC_CTRL__MPC_MODE_MASK
537 | UVD_CGC_CTRL__LBSI_MODE_MASK
538 | UVD_CGC_CTRL__LRBBM_MODE_MASK
539 | UVD_CGC_CTRL__WCB_MODE_MASK
540 | UVD_CGC_CTRL__VCPU_MODE_MASK
541 | UVD_CGC_CTRL__SCPU_MODE_MASK);
542 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
543
544 /* turn on */
545 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE);
546 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
547 | UVD_SUVD_CGC_GATE__SIT_MASK
548 | UVD_SUVD_CGC_GATE__SMP_MASK
549 | UVD_SUVD_CGC_GATE__SCM_MASK
550 | UVD_SUVD_CGC_GATE__SDB_MASK
551 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
552 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
553 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
554 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
555 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
556 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
557 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
558 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
559 | UVD_SUVD_CGC_GATE__SCLR_MASK
560 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
561 | UVD_SUVD_CGC_GATE__ENT_MASK
562 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
563 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
564 | UVD_SUVD_CGC_GATE__SITE_MASK
565 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
566 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
567 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
568 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
569 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
570 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE, data);
571
572 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
573 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
574 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
575 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
576 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
577 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
578 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
579 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
580 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
581 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
582 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
583 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
584 }
585
vcn_v2_0_clock_gating_dpg_mode(struct amdgpu_device * adev,uint8_t sram_sel,uint8_t indirect)586 static void vcn_v2_0_clock_gating_dpg_mode(struct amdgpu_device *adev,
587 uint8_t sram_sel, uint8_t indirect)
588 {
589 uint32_t reg_data = 0;
590
591 /* enable sw clock gating control */
592 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
593 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
594 else
595 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
596 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
597 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
598 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
599 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
600 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
601 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
602 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
603 UVD_CGC_CTRL__SYS_MODE_MASK |
604 UVD_CGC_CTRL__UDEC_MODE_MASK |
605 UVD_CGC_CTRL__MPEG2_MODE_MASK |
606 UVD_CGC_CTRL__REGS_MODE_MASK |
607 UVD_CGC_CTRL__RBC_MODE_MASK |
608 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
609 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
610 UVD_CGC_CTRL__IDCT_MODE_MASK |
611 UVD_CGC_CTRL__MPRD_MODE_MASK |
612 UVD_CGC_CTRL__MPC_MODE_MASK |
613 UVD_CGC_CTRL__LBSI_MODE_MASK |
614 UVD_CGC_CTRL__LRBBM_MODE_MASK |
615 UVD_CGC_CTRL__WCB_MODE_MASK |
616 UVD_CGC_CTRL__VCPU_MODE_MASK |
617 UVD_CGC_CTRL__SCPU_MODE_MASK);
618 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
619 UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
620
621 /* turn off clock gating */
622 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
623 UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
624
625 /* turn on SUVD clock gating */
626 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
627 UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
628
629 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
630 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
631 UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
632 }
633
634 /**
635 * vcn_v2_0_enable_clock_gating - enable VCN clock gating
636 *
637 * @adev: amdgpu_device pointer
638 *
639 * Enable clock gating for VCN block
640 */
vcn_v2_0_enable_clock_gating(struct amdgpu_device * adev)641 static void vcn_v2_0_enable_clock_gating(struct amdgpu_device *adev)
642 {
643 uint32_t data = 0;
644
645 if (amdgpu_sriov_vf(adev))
646 return;
647
648 /* enable UVD CGC */
649 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
650 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
651 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
652 else
653 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
654 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
655 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
656 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
657
658 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
659 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
660 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
661 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
662 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
663 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
664 | UVD_CGC_CTRL__SYS_MODE_MASK
665 | UVD_CGC_CTRL__UDEC_MODE_MASK
666 | UVD_CGC_CTRL__MPEG2_MODE_MASK
667 | UVD_CGC_CTRL__REGS_MODE_MASK
668 | UVD_CGC_CTRL__RBC_MODE_MASK
669 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
670 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
671 | UVD_CGC_CTRL__IDCT_MODE_MASK
672 | UVD_CGC_CTRL__MPRD_MODE_MASK
673 | UVD_CGC_CTRL__MPC_MODE_MASK
674 | UVD_CGC_CTRL__LBSI_MODE_MASK
675 | UVD_CGC_CTRL__LRBBM_MODE_MASK
676 | UVD_CGC_CTRL__WCB_MODE_MASK
677 | UVD_CGC_CTRL__VCPU_MODE_MASK
678 | UVD_CGC_CTRL__SCPU_MODE_MASK);
679 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
680
681 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
682 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
683 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
684 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
685 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
686 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
687 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
688 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
689 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
690 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
691 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
692 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
693 }
694
vcn_v2_0_disable_static_power_gating(struct amdgpu_device * adev)695 static void vcn_v2_0_disable_static_power_gating(struct amdgpu_device *adev)
696 {
697 uint32_t data = 0;
698
699 if (amdgpu_sriov_vf(adev))
700 return;
701
702 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
703 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
704 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
705 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
706 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
707 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
708 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
709 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
710 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
711 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
712 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT);
713
714 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
715 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS,
716 UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON_2_0, 0xFFFFF);
717 } else {
718 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
719 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
720 | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
721 | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
722 | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
723 | 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
724 | 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
725 | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
726 | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
727 | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT);
728 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
729 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0, 0xFFFFF);
730 }
731
732 /* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS,
733 * UVDU_PWR_STATUS are 0 (power on) */
734
735 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
736 data &= ~0x103;
737 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
738 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON |
739 UVD_POWER_STATUS__UVD_PG_EN_MASK;
740
741 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
742 }
743
vcn_v2_0_enable_static_power_gating(struct amdgpu_device * adev)744 static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev)
745 {
746 uint32_t data = 0;
747
748 if (amdgpu_sriov_vf(adev))
749 return;
750
751 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
752 /* Before power off, this indicator has to be turned on */
753 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
754 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
755 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
756 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
757
758
759 data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
760 | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
761 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
762 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
763 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
764 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
765 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
766 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
767 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
768 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT);
769
770 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
771
772 data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
773 | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
774 | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
775 | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
776 | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
777 | 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT
778 | 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT
779 | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
780 | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
781 | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT);
782 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFF);
783 }
784 }
785
vcn_v2_0_start_dpg_mode(struct amdgpu_device * adev,bool indirect)786 static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
787 {
788 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
789 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
790 uint32_t rb_bufsz, tmp;
791
792 vcn_v2_0_enable_static_power_gating(adev);
793
794 /* enable dynamic power gating mode */
795 tmp = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
796 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
797 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
798 WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp);
799
800 if (indirect)
801 adev->vcn.inst->dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst->dpg_sram_cpu_addr;
802
803 /* enable clock gating */
804 vcn_v2_0_clock_gating_dpg_mode(adev, 0, indirect);
805
806 /* enable VCPU clock */
807 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
808 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
809 tmp |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK;
810 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
811 UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
812
813 /* disable master interupt */
814 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
815 UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
816
817 /* setup mmUVD_LMI_CTRL */
818 tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
819 UVD_LMI_CTRL__REQ_MODE_MASK |
820 UVD_LMI_CTRL__CRC_RESET_MASK |
821 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
822 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
823 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
824 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
825 0x00100000L);
826 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
827 UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
828
829 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
830 UVD, 0, mmUVD_MPC_CNTL),
831 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
832
833 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
834 UVD, 0, mmUVD_MPC_SET_MUXA0),
835 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
836 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
837 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
838 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
839
840 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
841 UVD, 0, mmUVD_MPC_SET_MUXB0),
842 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
843 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
844 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
845 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
846
847 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
848 UVD, 0, mmUVD_MPC_SET_MUX),
849 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
850 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
851 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
852
853 vcn_v2_0_mc_resume_dpg_mode(adev, indirect);
854
855 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
856 UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
857 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
858 UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
859
860 /* release VCPU reset to boot */
861 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
862 UVD, 0, mmUVD_SOFT_RESET), 0, 0, indirect);
863
864 /* enable LMI MC and UMC channels */
865 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
866 UVD, 0, mmUVD_LMI_CTRL2),
867 0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT, 0, indirect);
868
869 /* enable master interrupt */
870 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
871 UVD, 0, mmUVD_MASTINT_EN),
872 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
873
874 if (indirect)
875 psp_update_vcn_sram(adev, 0, adev->vcn.inst->dpg_sram_gpu_addr,
876 (uint32_t)((uintptr_t)adev->vcn.inst->dpg_sram_curr_addr -
877 (uintptr_t)adev->vcn.inst->dpg_sram_cpu_addr));
878
879 /* force RBC into idle state */
880 rb_bufsz = order_base_2(ring->ring_size);
881 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
882 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
883 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
884 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
885 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
886 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
887
888 /* Stall DPG before WPTR/RPTR reset */
889 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
890 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
891 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
892 fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
893
894 /* set the write pointer delay */
895 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
896
897 /* set the wb address */
898 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
899 (upper_32_bits(ring->gpu_addr) >> 2));
900
901 /* program the RB_BASE for ring buffer */
902 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
903 lower_32_bits(ring->gpu_addr));
904 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
905 upper_32_bits(ring->gpu_addr));
906
907 /* Initialize the ring buffer's read and write pointers */
908 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
909
910 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0);
911
912 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
913 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
914 lower_32_bits(ring->wptr));
915
916 fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
917 /* Unstall DPG */
918 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
919 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
920 return 0;
921 }
922
vcn_v2_0_start(struct amdgpu_device * adev)923 static int vcn_v2_0_start(struct amdgpu_device *adev)
924 {
925 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
926 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
927 uint32_t rb_bufsz, tmp;
928 uint32_t lmi_swap_cntl;
929 int i, j, r;
930
931 if (adev->pm.dpm_enabled)
932 amdgpu_dpm_enable_uvd(adev, true);
933
934 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
935 return vcn_v2_0_start_dpg_mode(adev, adev->vcn.indirect_sram);
936
937 vcn_v2_0_disable_static_power_gating(adev);
938
939 /* set uvd status busy */
940 tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
941 WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
942
943 /*SW clock gating */
944 vcn_v2_0_disable_clock_gating(adev);
945
946 /* enable VCPU clock */
947 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL),
948 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
949
950 /* disable master interrupt */
951 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
952 ~UVD_MASTINT_EN__VCPU_EN_MASK);
953
954 /* setup mmUVD_LMI_CTRL */
955 tmp = RREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL);
956 WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL, tmp |
957 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
958 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
959 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
960 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
961
962 /* setup mmUVD_MPC_CNTL */
963 tmp = RREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL);
964 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
965 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
966 WREG32_SOC15(VCN, 0, mmUVD_MPC_CNTL, tmp);
967
968 /* setup UVD_MPC_SET_MUXA0 */
969 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0,
970 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
971 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
972 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
973 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
974
975 /* setup UVD_MPC_SET_MUXB0 */
976 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0,
977 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
978 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
979 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
980 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
981
982 /* setup mmUVD_MPC_SET_MUX */
983 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX,
984 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
985 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
986 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
987
988 vcn_v2_0_mc_resume(adev);
989
990 /* release VCPU reset to boot */
991 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
992 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
993
994 /* enable LMI MC and UMC channels */
995 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
996 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
997
998 tmp = RREG32_SOC15(VCN, 0, mmUVD_SOFT_RESET);
999 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1000 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1001 WREG32_SOC15(VCN, 0, mmUVD_SOFT_RESET, tmp);
1002
1003 /* disable byte swapping */
1004 lmi_swap_cntl = 0;
1005 #ifdef __BIG_ENDIAN
1006 /* swap (8 in 32) RB and IB */
1007 lmi_swap_cntl = 0xa;
1008 #endif
1009 WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
1010
1011 for (i = 0; i < 10; ++i) {
1012 uint32_t status;
1013
1014 for (j = 0; j < 100; ++j) {
1015 status = RREG32_SOC15(UVD, 0, mmUVD_STATUS);
1016 if (status & 2)
1017 break;
1018 mdelay(10);
1019 }
1020 r = 0;
1021 if (status & 2)
1022 break;
1023
1024 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
1025 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1026 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1027 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1028 mdelay(10);
1029 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
1030 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1031 mdelay(10);
1032 r = -1;
1033 }
1034
1035 if (r) {
1036 DRM_ERROR("VCN decode not responding, giving up!!!\n");
1037 return r;
1038 }
1039
1040 /* enable master interrupt */
1041 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
1042 UVD_MASTINT_EN__VCPU_EN_MASK,
1043 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1044
1045 /* clear the busy bit of VCN_STATUS */
1046 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0,
1047 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1048
1049 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_VMID, 0);
1050
1051 /* force RBC into idle state */
1052 rb_bufsz = order_base_2(ring->ring_size);
1053 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1054 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1055 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1056 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1057 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1058 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
1059
1060 fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
1061 /* program the RB_BASE for ring buffer */
1062 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1063 lower_32_bits(ring->gpu_addr));
1064 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1065 upper_32_bits(ring->gpu_addr));
1066
1067 /* Initialize the ring buffer's read and write pointers */
1068 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
1069
1070 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1071 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1072 lower_32_bits(ring->wptr));
1073 fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
1074
1075 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1076 ring = &adev->vcn.inst->ring_enc[0];
1077 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1078 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1079 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
1080 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1081 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
1082 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1083
1084 fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1085 ring = &adev->vcn.inst->ring_enc[1];
1086 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1087 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1088 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1089 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1090 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
1091 fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1092
1093 return 0;
1094 }
1095
vcn_v2_0_stop_dpg_mode(struct amdgpu_device * adev)1096 static int vcn_v2_0_stop_dpg_mode(struct amdgpu_device *adev)
1097 {
1098 uint32_t tmp;
1099
1100 /* Wait for power status to be 1 */
1101 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1,
1102 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1103
1104 /* wait for read ptr to be equal to write ptr */
1105 tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1106 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1107
1108 tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1109 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
1110
1111 tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1112 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
1113
1114 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1,
1115 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1116
1117 /* disable dynamic power gating mode */
1118 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
1119 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1120
1121 return 0;
1122 }
1123
vcn_v2_0_stop(struct amdgpu_device * adev)1124 static int vcn_v2_0_stop(struct amdgpu_device *adev)
1125 {
1126 uint32_t tmp;
1127 int r;
1128
1129 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1130 r = vcn_v2_0_stop_dpg_mode(adev);
1131 if (r)
1132 return r;
1133 goto power_off;
1134 }
1135
1136 /* wait for uvd idle */
1137 r = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1138 if (r)
1139 return r;
1140
1141 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1142 UVD_LMI_STATUS__READ_CLEAN_MASK |
1143 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1144 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1145 r = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_LMI_STATUS, tmp, tmp);
1146 if (r)
1147 return r;
1148
1149 /* stall UMC channel */
1150 tmp = RREG32_SOC15(VCN, 0, mmUVD_LMI_CTRL2);
1151 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1152 WREG32_SOC15(VCN, 0, mmUVD_LMI_CTRL2, tmp);
1153
1154 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
1155 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1156 r = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_LMI_STATUS, tmp, tmp);
1157 if (r)
1158 return r;
1159
1160 /* disable VCPU clock */
1161 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0,
1162 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1163
1164 /* reset LMI UMC */
1165 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1166 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK,
1167 ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
1168
1169 /* reset LMI */
1170 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1171 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK,
1172 ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
1173
1174 /* reset VCPU */
1175 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1176 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1177 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1178
1179 /* clear status */
1180 WREG32_SOC15(VCN, 0, mmUVD_STATUS, 0);
1181
1182 vcn_v2_0_enable_clock_gating(adev);
1183 vcn_v2_0_enable_static_power_gating(adev);
1184
1185 power_off:
1186 if (adev->pm.dpm_enabled)
1187 amdgpu_dpm_enable_uvd(adev, false);
1188
1189 return 0;
1190 }
1191
vcn_v2_0_pause_dpg_mode(struct amdgpu_device * adev,int inst_idx,struct dpg_pause_state * new_state)1192 static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
1193 int inst_idx, struct dpg_pause_state *new_state)
1194 {
1195 struct amdgpu_ring *ring;
1196 uint32_t reg_data = 0;
1197 int ret_code;
1198
1199 /* pause/unpause if state is changed */
1200 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1201 DRM_DEBUG("dpg pause state changed %d -> %d",
1202 adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
1203 reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
1204 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1205
1206 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1207 ret_code = SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 0x1,
1208 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1209
1210 if (!ret_code) {
1211 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
1212 /* pause DPG */
1213 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1214 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1215
1216 /* wait for ACK */
1217 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
1218 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1219 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1220
1221 /* Stall DPG before WPTR/RPTR reset */
1222 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
1223 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
1224 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1225 /* Restore */
1226 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1227 ring = &adev->vcn.inst->ring_enc[0];
1228 ring->wptr = 0;
1229 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
1230 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1231 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
1232 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1233 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1234 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1235
1236 fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1237 ring = &adev->vcn.inst->ring_enc[1];
1238 ring->wptr = 0;
1239 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1240 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1241 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
1242 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1243 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1244 fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1245
1246 fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
1247 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1248 RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
1249 fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
1250 /* Unstall DPG */
1251 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
1252 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1253
1254 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1255 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
1256 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1257 }
1258 } else {
1259 /* unpause dpg, no need to wait */
1260 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1261 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1262 }
1263 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1264 }
1265
1266 return 0;
1267 }
1268
vcn_v2_0_is_idle(void * handle)1269 static bool vcn_v2_0_is_idle(void *handle)
1270 {
1271 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1272
1273 return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE);
1274 }
1275
vcn_v2_0_wait_for_idle(void * handle)1276 static int vcn_v2_0_wait_for_idle(void *handle)
1277 {
1278 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1279 int ret;
1280
1281 ret = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE,
1282 UVD_STATUS__IDLE);
1283
1284 return ret;
1285 }
1286
vcn_v2_0_set_clockgating_state(void * handle,enum amd_clockgating_state state)1287 static int vcn_v2_0_set_clockgating_state(void *handle,
1288 enum amd_clockgating_state state)
1289 {
1290 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1291 bool enable = (state == AMD_CG_STATE_GATE);
1292
1293 if (amdgpu_sriov_vf(adev))
1294 return 0;
1295
1296 if (enable) {
1297 /* wait for STATUS to clear */
1298 if (!vcn_v2_0_is_idle(handle))
1299 return -EBUSY;
1300 vcn_v2_0_enable_clock_gating(adev);
1301 } else {
1302 /* disable HW gating and enable Sw gating */
1303 vcn_v2_0_disable_clock_gating(adev);
1304 }
1305 return 0;
1306 }
1307
1308 /**
1309 * vcn_v2_0_dec_ring_get_rptr - get read pointer
1310 *
1311 * @ring: amdgpu_ring pointer
1312 *
1313 * Returns the current hardware read pointer
1314 */
vcn_v2_0_dec_ring_get_rptr(struct amdgpu_ring * ring)1315 static uint64_t vcn_v2_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
1316 {
1317 struct amdgpu_device *adev = ring->adev;
1318
1319 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1320 }
1321
1322 /**
1323 * vcn_v2_0_dec_ring_get_wptr - get write pointer
1324 *
1325 * @ring: amdgpu_ring pointer
1326 *
1327 * Returns the current hardware write pointer
1328 */
vcn_v2_0_dec_ring_get_wptr(struct amdgpu_ring * ring)1329 static uint64_t vcn_v2_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
1330 {
1331 struct amdgpu_device *adev = ring->adev;
1332
1333 if (ring->use_doorbell)
1334 return adev->wb.wb[ring->wptr_offs];
1335 else
1336 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR);
1337 }
1338
1339 /**
1340 * vcn_v2_0_dec_ring_set_wptr - set write pointer
1341 *
1342 * @ring: amdgpu_ring pointer
1343 *
1344 * Commits the write pointer to the hardware
1345 */
vcn_v2_0_dec_ring_set_wptr(struct amdgpu_ring * ring)1346 static void vcn_v2_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
1347 {
1348 struct amdgpu_device *adev = ring->adev;
1349
1350 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1351 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2,
1352 lower_32_bits(ring->wptr) | 0x80000000);
1353
1354 if (ring->use_doorbell) {
1355 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1356 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1357 } else {
1358 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1359 }
1360 }
1361
1362 /**
1363 * vcn_v2_0_dec_ring_insert_start - insert a start command
1364 *
1365 * @ring: amdgpu_ring pointer
1366 *
1367 * Write a start command to the ring.
1368 */
vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring * ring)1369 void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring)
1370 {
1371 struct amdgpu_device *adev = ring->adev;
1372
1373 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
1374 amdgpu_ring_write(ring, 0);
1375 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1376 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
1377 }
1378
1379 /**
1380 * vcn_v2_0_dec_ring_insert_end - insert a end command
1381 *
1382 * @ring: amdgpu_ring pointer
1383 *
1384 * Write a end command to the ring.
1385 */
vcn_v2_0_dec_ring_insert_end(struct amdgpu_ring * ring)1386 void vcn_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring)
1387 {
1388 struct amdgpu_device *adev = ring->adev;
1389
1390 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1391 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_END << 1));
1392 }
1393
1394 /**
1395 * vcn_v2_0_dec_ring_insert_nop - insert a nop command
1396 *
1397 * @ring: amdgpu_ring pointer
1398 * @count: the number of NOP packets to insert
1399 *
1400 * Write a nop command to the ring.
1401 */
vcn_v2_0_dec_ring_insert_nop(struct amdgpu_ring * ring,uint32_t count)1402 void vcn_v2_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1403 {
1404 struct amdgpu_device *adev = ring->adev;
1405 int i;
1406
1407 WARN_ON(ring->wptr % 2 || count % 2);
1408
1409 for (i = 0; i < count / 2; i++) {
1410 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.nop, 0));
1411 amdgpu_ring_write(ring, 0);
1412 }
1413 }
1414
1415 /**
1416 * vcn_v2_0_dec_ring_emit_fence - emit an fence & trap command
1417 *
1418 * @ring: amdgpu_ring pointer
1419 * @addr: address
1420 * @seq: sequence number
1421 * @flags: fence related flags
1422 *
1423 * Write a fence and a trap command to the ring.
1424 */
vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned flags)1425 void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1426 unsigned flags)
1427 {
1428 struct amdgpu_device *adev = ring->adev;
1429
1430 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1431 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.context_id, 0));
1432 amdgpu_ring_write(ring, seq);
1433
1434 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
1435 amdgpu_ring_write(ring, addr & 0xffffffff);
1436
1437 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
1438 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1439
1440 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1441 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_FENCE << 1));
1442
1443 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
1444 amdgpu_ring_write(ring, 0);
1445
1446 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
1447 amdgpu_ring_write(ring, 0);
1448
1449 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1450
1451 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_TRAP << 1));
1452 }
1453
1454 /**
1455 * vcn_v2_0_dec_ring_emit_ib - execute indirect buffer
1456 *
1457 * @ring: amdgpu_ring pointer
1458 * @job: job to retrieve vmid from
1459 * @ib: indirect buffer to execute
1460 * @flags: unused
1461 *
1462 * Write ring commands to execute the indirect buffer
1463 */
vcn_v2_0_dec_ring_emit_ib(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)1464 void vcn_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
1465 struct amdgpu_job *job,
1466 struct amdgpu_ib *ib,
1467 uint32_t flags)
1468 {
1469 struct amdgpu_device *adev = ring->adev;
1470 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1471
1472 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_vmid, 0));
1473 amdgpu_ring_write(ring, vmid);
1474
1475 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_bar_low, 0));
1476 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1477 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_bar_high, 0));
1478 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1479 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_size, 0));
1480 amdgpu_ring_write(ring, ib->length_dw);
1481 }
1482
vcn_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring * ring,uint32_t reg,uint32_t val,uint32_t mask)1483 void vcn_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1484 uint32_t val, uint32_t mask)
1485 {
1486 struct amdgpu_device *adev = ring->adev;
1487
1488 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
1489 amdgpu_ring_write(ring, reg << 2);
1490
1491 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
1492 amdgpu_ring_write(ring, val);
1493
1494 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.gp_scratch8, 0));
1495 amdgpu_ring_write(ring, mask);
1496
1497 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1498
1499 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_REG_READ_COND_WAIT << 1));
1500 }
1501
vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring * ring,unsigned vmid,uint64_t pd_addr)1502 void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
1503 unsigned vmid, uint64_t pd_addr)
1504 {
1505 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1506 uint32_t data0, data1, mask;
1507
1508 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1509
1510 /* wait for register write */
1511 data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
1512 data1 = lower_32_bits(pd_addr);
1513 mask = 0xffffffff;
1514 vcn_v2_0_dec_ring_emit_reg_wait(ring, data0, data1, mask);
1515 }
1516
vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t val)1517 void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
1518 uint32_t reg, uint32_t val)
1519 {
1520 struct amdgpu_device *adev = ring->adev;
1521
1522 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
1523 amdgpu_ring_write(ring, reg << 2);
1524
1525 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
1526 amdgpu_ring_write(ring, val);
1527
1528 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1529
1530 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_WRITE_REG << 1));
1531 }
1532
1533 /**
1534 * vcn_v2_0_enc_ring_get_rptr - get enc read pointer
1535 *
1536 * @ring: amdgpu_ring pointer
1537 *
1538 * Returns the current hardware enc read pointer
1539 */
vcn_v2_0_enc_ring_get_rptr(struct amdgpu_ring * ring)1540 static uint64_t vcn_v2_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
1541 {
1542 struct amdgpu_device *adev = ring->adev;
1543
1544 if (ring == &adev->vcn.inst->ring_enc[0])
1545 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
1546 else
1547 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
1548 }
1549
1550 /**
1551 * vcn_v2_0_enc_ring_get_wptr - get enc write pointer
1552 *
1553 * @ring: amdgpu_ring pointer
1554 *
1555 * Returns the current hardware enc write pointer
1556 */
vcn_v2_0_enc_ring_get_wptr(struct amdgpu_ring * ring)1557 static uint64_t vcn_v2_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
1558 {
1559 struct amdgpu_device *adev = ring->adev;
1560
1561 if (ring == &adev->vcn.inst->ring_enc[0]) {
1562 if (ring->use_doorbell)
1563 return adev->wb.wb[ring->wptr_offs];
1564 else
1565 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1566 } else {
1567 if (ring->use_doorbell)
1568 return adev->wb.wb[ring->wptr_offs];
1569 else
1570 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1571 }
1572 }
1573
1574 /**
1575 * vcn_v2_0_enc_ring_set_wptr - set enc write pointer
1576 *
1577 * @ring: amdgpu_ring pointer
1578 *
1579 * Commits the enc write pointer to the hardware
1580 */
vcn_v2_0_enc_ring_set_wptr(struct amdgpu_ring * ring)1581 static void vcn_v2_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
1582 {
1583 struct amdgpu_device *adev = ring->adev;
1584
1585 if (ring == &adev->vcn.inst->ring_enc[0]) {
1586 if (ring->use_doorbell) {
1587 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1588 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1589 } else {
1590 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1591 }
1592 } else {
1593 if (ring->use_doorbell) {
1594 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1595 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1596 } else {
1597 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1598 }
1599 }
1600 }
1601
1602 /**
1603 * vcn_v2_0_enc_ring_emit_fence - emit an enc fence & trap command
1604 *
1605 * @ring: amdgpu_ring pointer
1606 * @addr: address
1607 * @seq: sequence number
1608 * @flags: fence related flags
1609 *
1610 * Write enc a fence and a trap command to the ring.
1611 */
vcn_v2_0_enc_ring_emit_fence(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned flags)1612 void vcn_v2_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1613 u64 seq, unsigned flags)
1614 {
1615 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1616
1617 amdgpu_ring_write(ring, VCN_ENC_CMD_FENCE);
1618 amdgpu_ring_write(ring, addr);
1619 amdgpu_ring_write(ring, upper_32_bits(addr));
1620 amdgpu_ring_write(ring, seq);
1621 amdgpu_ring_write(ring, VCN_ENC_CMD_TRAP);
1622 }
1623
vcn_v2_0_enc_ring_insert_end(struct amdgpu_ring * ring)1624 void vcn_v2_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1625 {
1626 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
1627 }
1628
1629 /**
1630 * vcn_v2_0_enc_ring_emit_ib - enc execute indirect buffer
1631 *
1632 * @ring: amdgpu_ring pointer
1633 * @job: job to retrive vmid from
1634 * @ib: indirect buffer to execute
1635 * @flags: unused
1636 *
1637 * Write enc ring commands to execute the indirect buffer
1638 */
vcn_v2_0_enc_ring_emit_ib(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)1639 void vcn_v2_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1640 struct amdgpu_job *job,
1641 struct amdgpu_ib *ib,
1642 uint32_t flags)
1643 {
1644 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1645
1646 amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
1647 amdgpu_ring_write(ring, vmid);
1648 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1649 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1650 amdgpu_ring_write(ring, ib->length_dw);
1651 }
1652
vcn_v2_0_enc_ring_emit_reg_wait(struct amdgpu_ring * ring,uint32_t reg,uint32_t val,uint32_t mask)1653 void vcn_v2_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1654 uint32_t val, uint32_t mask)
1655 {
1656 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
1657 amdgpu_ring_write(ring, reg << 2);
1658 amdgpu_ring_write(ring, mask);
1659 amdgpu_ring_write(ring, val);
1660 }
1661
vcn_v2_0_enc_ring_emit_vm_flush(struct amdgpu_ring * ring,unsigned int vmid,uint64_t pd_addr)1662 void vcn_v2_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1663 unsigned int vmid, uint64_t pd_addr)
1664 {
1665 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1666
1667 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1668
1669 /* wait for reg writes */
1670 vcn_v2_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
1671 vmid * hub->ctx_addr_distance,
1672 lower_32_bits(pd_addr), 0xffffffff);
1673 }
1674
vcn_v2_0_enc_ring_emit_wreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t val)1675 void vcn_v2_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
1676 {
1677 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
1678 amdgpu_ring_write(ring, reg << 2);
1679 amdgpu_ring_write(ring, val);
1680 }
1681
vcn_v2_0_set_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)1682 static int vcn_v2_0_set_interrupt_state(struct amdgpu_device *adev,
1683 struct amdgpu_irq_src *source,
1684 unsigned type,
1685 enum amdgpu_interrupt_state state)
1686 {
1687 return 0;
1688 }
1689
vcn_v2_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)1690 static int vcn_v2_0_process_interrupt(struct amdgpu_device *adev,
1691 struct amdgpu_irq_src *source,
1692 struct amdgpu_iv_entry *entry)
1693 {
1694 DRM_DEBUG("IH: VCN TRAP\n");
1695
1696 switch (entry->src_id) {
1697 case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
1698 amdgpu_fence_process(&adev->vcn.inst->ring_dec);
1699 break;
1700 case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1701 amdgpu_fence_process(&adev->vcn.inst->ring_enc[0]);
1702 break;
1703 case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
1704 amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]);
1705 break;
1706 default:
1707 DRM_ERROR("Unhandled interrupt: %d %d\n",
1708 entry->src_id, entry->src_data[0]);
1709 break;
1710 }
1711
1712 return 0;
1713 }
1714
vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring * ring)1715 int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
1716 {
1717 struct amdgpu_device *adev = ring->adev;
1718 uint32_t tmp = 0;
1719 unsigned i;
1720 int r;
1721
1722 if (amdgpu_sriov_vf(adev))
1723 return 0;
1724
1725 WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
1726 r = amdgpu_ring_alloc(ring, 4);
1727 if (r)
1728 return r;
1729 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1730 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
1731 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
1732 amdgpu_ring_write(ring, 0xDEADBEEF);
1733 amdgpu_ring_commit(ring);
1734 for (i = 0; i < adev->usec_timeout; i++) {
1735 tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
1736 if (tmp == 0xDEADBEEF)
1737 break;
1738 udelay(1);
1739 }
1740
1741 if (i >= adev->usec_timeout)
1742 r = -ETIMEDOUT;
1743
1744 return r;
1745 }
1746
1747
vcn_v2_0_set_powergating_state(void * handle,enum amd_powergating_state state)1748 static int vcn_v2_0_set_powergating_state(void *handle,
1749 enum amd_powergating_state state)
1750 {
1751 /* This doesn't actually powergate the VCN block.
1752 * That's done in the dpm code via the SMC. This
1753 * just re-inits the block as necessary. The actual
1754 * gating still happens in the dpm code. We should
1755 * revisit this when there is a cleaner line between
1756 * the smc and the hw blocks
1757 */
1758 int ret;
1759 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1760
1761 if (amdgpu_sriov_vf(adev)) {
1762 adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
1763 return 0;
1764 }
1765
1766 if (state == adev->vcn.cur_state)
1767 return 0;
1768
1769 if (state == AMD_PG_STATE_GATE)
1770 ret = vcn_v2_0_stop(adev);
1771 else
1772 ret = vcn_v2_0_start(adev);
1773
1774 if (!ret)
1775 adev->vcn.cur_state = state;
1776 return ret;
1777 }
1778
vcn_v2_0_start_mmsch(struct amdgpu_device * adev,struct amdgpu_mm_table * table)1779 static int vcn_v2_0_start_mmsch(struct amdgpu_device *adev,
1780 struct amdgpu_mm_table *table)
1781 {
1782 uint32_t data = 0, loop;
1783 uint64_t addr = table->gpu_addr;
1784 struct mmsch_v2_0_init_header *header;
1785 uint32_t size;
1786 int i;
1787
1788 header = (struct mmsch_v2_0_init_header *)table->cpu_addr;
1789 size = header->header_size + header->vcn_table_size;
1790
1791 /* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr
1792 * of memory descriptor location
1793 */
1794 WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
1795 WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
1796
1797 /* 2, update vmid of descriptor */
1798 data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID);
1799 data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1800 /* use domain0 for MM scheduler */
1801 data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1802 WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data);
1803
1804 /* 3, notify mmsch about the size of this descriptor */
1805 WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size);
1806
1807 /* 4, set resp to zero */
1808 WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
1809
1810 adev->vcn.inst->ring_dec.wptr = 0;
1811 adev->vcn.inst->ring_dec.wptr_old = 0;
1812 vcn_v2_0_dec_ring_set_wptr(&adev->vcn.inst->ring_dec);
1813
1814 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
1815 adev->vcn.inst->ring_enc[i].wptr = 0;
1816 adev->vcn.inst->ring_enc[i].wptr_old = 0;
1817 vcn_v2_0_enc_ring_set_wptr(&adev->vcn.inst->ring_enc[i]);
1818 }
1819
1820 /* 5, kick off the initialization and wait until
1821 * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
1822 */
1823 WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
1824
1825 data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
1826 loop = 1000;
1827 while ((data & 0x10000002) != 0x10000002) {
1828 udelay(10);
1829 data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
1830 loop--;
1831 if (!loop)
1832 break;
1833 }
1834
1835 if (!loop) {
1836 DRM_ERROR("failed to init MMSCH, " \
1837 "mmMMSCH_VF_MAILBOX_RESP = 0x%08x\n", data);
1838 return -EBUSY;
1839 }
1840
1841 return 0;
1842 }
1843
vcn_v2_0_start_sriov(struct amdgpu_device * adev)1844 static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
1845 {
1846 int r;
1847 uint32_t tmp;
1848 struct amdgpu_ring *ring;
1849 uint32_t offset, size;
1850 uint32_t table_size = 0;
1851 struct mmsch_v2_0_cmd_direct_write direct_wt = { {0} };
1852 struct mmsch_v2_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
1853 struct mmsch_v2_0_cmd_end end = { {0} };
1854 struct mmsch_v2_0_init_header *header;
1855 uint32_t *init_table = adev->virt.mm_table.cpu_addr;
1856 uint8_t i = 0;
1857
1858 header = (struct mmsch_v2_0_init_header *)init_table;
1859 direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
1860 direct_rd_mod_wt.cmd_header.command_type =
1861 MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
1862 end.cmd_header.command_type = MMSCH_COMMAND__END;
1863
1864 if (header->vcn_table_offset == 0 && header->vcn_table_size == 0) {
1865 header->version = MMSCH_VERSION;
1866 header->header_size = sizeof(struct mmsch_v2_0_init_header) >> 2;
1867
1868 header->vcn_table_offset = header->header_size;
1869
1870 init_table += header->vcn_table_offset;
1871
1872 size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
1873
1874 MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT(
1875 SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
1876 0xFFFFFFFF, 0x00000004);
1877
1878 /* mc resume*/
1879 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1880 tmp = AMDGPU_UCODE_ID_VCN;
1881 MMSCH_V2_0_INSERT_DIRECT_WT(
1882 SOC15_REG_OFFSET(UVD, i,
1883 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1884 adev->firmware.ucode[tmp].tmr_mc_addr_lo);
1885 MMSCH_V2_0_INSERT_DIRECT_WT(
1886 SOC15_REG_OFFSET(UVD, i,
1887 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1888 adev->firmware.ucode[tmp].tmr_mc_addr_hi);
1889 offset = 0;
1890 } else {
1891 MMSCH_V2_0_INSERT_DIRECT_WT(
1892 SOC15_REG_OFFSET(UVD, i,
1893 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1894 lower_32_bits(adev->vcn.inst->gpu_addr));
1895 MMSCH_V2_0_INSERT_DIRECT_WT(
1896 SOC15_REG_OFFSET(UVD, i,
1897 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1898 upper_32_bits(adev->vcn.inst->gpu_addr));
1899 offset = size;
1900 }
1901
1902 MMSCH_V2_0_INSERT_DIRECT_WT(
1903 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
1904 0);
1905 MMSCH_V2_0_INSERT_DIRECT_WT(
1906 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0),
1907 size);
1908
1909 MMSCH_V2_0_INSERT_DIRECT_WT(
1910 SOC15_REG_OFFSET(UVD, i,
1911 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
1912 lower_32_bits(adev->vcn.inst->gpu_addr + offset));
1913 MMSCH_V2_0_INSERT_DIRECT_WT(
1914 SOC15_REG_OFFSET(UVD, i,
1915 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
1916 upper_32_bits(adev->vcn.inst->gpu_addr + offset));
1917 MMSCH_V2_0_INSERT_DIRECT_WT(
1918 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1),
1919 0);
1920 MMSCH_V2_0_INSERT_DIRECT_WT(
1921 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1),
1922 AMDGPU_VCN_STACK_SIZE);
1923
1924 MMSCH_V2_0_INSERT_DIRECT_WT(
1925 SOC15_REG_OFFSET(UVD, i,
1926 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
1927 lower_32_bits(adev->vcn.inst->gpu_addr + offset +
1928 AMDGPU_VCN_STACK_SIZE));
1929 MMSCH_V2_0_INSERT_DIRECT_WT(
1930 SOC15_REG_OFFSET(UVD, i,
1931 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
1932 upper_32_bits(adev->vcn.inst->gpu_addr + offset +
1933 AMDGPU_VCN_STACK_SIZE));
1934 MMSCH_V2_0_INSERT_DIRECT_WT(
1935 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2),
1936 0);
1937 MMSCH_V2_0_INSERT_DIRECT_WT(
1938 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
1939 AMDGPU_VCN_CONTEXT_SIZE);
1940
1941 for (r = 0; r < adev->vcn.num_enc_rings; ++r) {
1942 ring = &adev->vcn.inst->ring_enc[r];
1943 ring->wptr = 0;
1944 MMSCH_V2_0_INSERT_DIRECT_WT(
1945 SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO),
1946 lower_32_bits(ring->gpu_addr));
1947 MMSCH_V2_0_INSERT_DIRECT_WT(
1948 SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI),
1949 upper_32_bits(ring->gpu_addr));
1950 MMSCH_V2_0_INSERT_DIRECT_WT(
1951 SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE),
1952 ring->ring_size / 4);
1953 }
1954
1955 ring = &adev->vcn.inst->ring_dec;
1956 ring->wptr = 0;
1957 MMSCH_V2_0_INSERT_DIRECT_WT(
1958 SOC15_REG_OFFSET(UVD, i,
1959 mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
1960 lower_32_bits(ring->gpu_addr));
1961 MMSCH_V2_0_INSERT_DIRECT_WT(
1962 SOC15_REG_OFFSET(UVD, i,
1963 mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
1964 upper_32_bits(ring->gpu_addr));
1965 /* force RBC into idle state */
1966 tmp = order_base_2(ring->ring_size);
1967 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, tmp);
1968 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1969 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1970 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1971 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1972 MMSCH_V2_0_INSERT_DIRECT_WT(
1973 SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
1974
1975 /* add end packet */
1976 tmp = sizeof(struct mmsch_v2_0_cmd_end);
1977 memcpy((void *)init_table, &end, tmp);
1978 table_size += (tmp / 4);
1979 header->vcn_table_size = table_size;
1980
1981 }
1982 return vcn_v2_0_start_mmsch(adev, &adev->virt.mm_table);
1983 }
1984
1985 static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
1986 .name = "vcn_v2_0",
1987 .early_init = vcn_v2_0_early_init,
1988 .late_init = NULL,
1989 .sw_init = vcn_v2_0_sw_init,
1990 .sw_fini = vcn_v2_0_sw_fini,
1991 .hw_init = vcn_v2_0_hw_init,
1992 .hw_fini = vcn_v2_0_hw_fini,
1993 .suspend = vcn_v2_0_suspend,
1994 .resume = vcn_v2_0_resume,
1995 .is_idle = vcn_v2_0_is_idle,
1996 .wait_for_idle = vcn_v2_0_wait_for_idle,
1997 .check_soft_reset = NULL,
1998 .pre_soft_reset = NULL,
1999 .soft_reset = NULL,
2000 .post_soft_reset = NULL,
2001 .set_clockgating_state = vcn_v2_0_set_clockgating_state,
2002 .set_powergating_state = vcn_v2_0_set_powergating_state,
2003 };
2004
2005 static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = {
2006 .type = AMDGPU_RING_TYPE_VCN_DEC,
2007 .align_mask = 0xf,
2008 .vmhub = AMDGPU_MMHUB_0,
2009 .get_rptr = vcn_v2_0_dec_ring_get_rptr,
2010 .get_wptr = vcn_v2_0_dec_ring_get_wptr,
2011 .set_wptr = vcn_v2_0_dec_ring_set_wptr,
2012 .emit_frame_size =
2013 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
2014 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
2015 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
2016 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
2017 6,
2018 .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
2019 .emit_ib = vcn_v2_0_dec_ring_emit_ib,
2020 .emit_fence = vcn_v2_0_dec_ring_emit_fence,
2021 .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
2022 .test_ring = vcn_v2_0_dec_ring_test_ring,
2023 .test_ib = amdgpu_vcn_dec_ring_test_ib,
2024 .insert_nop = vcn_v2_0_dec_ring_insert_nop,
2025 .insert_start = vcn_v2_0_dec_ring_insert_start,
2026 .insert_end = vcn_v2_0_dec_ring_insert_end,
2027 .pad_ib = amdgpu_ring_generic_pad_ib,
2028 .begin_use = amdgpu_vcn_ring_begin_use,
2029 .end_use = amdgpu_vcn_ring_end_use,
2030 .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
2031 .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
2032 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2033 };
2034
2035 static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = {
2036 .type = AMDGPU_RING_TYPE_VCN_ENC,
2037 .align_mask = 0x3f,
2038 .nop = VCN_ENC_CMD_NO_OP,
2039 .vmhub = AMDGPU_MMHUB_0,
2040 .get_rptr = vcn_v2_0_enc_ring_get_rptr,
2041 .get_wptr = vcn_v2_0_enc_ring_get_wptr,
2042 .set_wptr = vcn_v2_0_enc_ring_set_wptr,
2043 .emit_frame_size =
2044 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2045 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
2046 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
2047 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
2048 1, /* vcn_v2_0_enc_ring_insert_end */
2049 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
2050 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
2051 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
2052 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
2053 .test_ring = amdgpu_vcn_enc_ring_test_ring,
2054 .test_ib = amdgpu_vcn_enc_ring_test_ib,
2055 .insert_nop = amdgpu_ring_insert_nop,
2056 .insert_end = vcn_v2_0_enc_ring_insert_end,
2057 .pad_ib = amdgpu_ring_generic_pad_ib,
2058 .begin_use = amdgpu_vcn_ring_begin_use,
2059 .end_use = amdgpu_vcn_ring_end_use,
2060 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
2061 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
2062 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2063 };
2064
vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device * adev)2065 static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev)
2066 {
2067 adev->vcn.inst->ring_dec.funcs = &vcn_v2_0_dec_ring_vm_funcs;
2068 DRM_INFO("VCN decode is enabled in VM mode\n");
2069 }
2070
vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device * adev)2071 static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev)
2072 {
2073 int i;
2074
2075 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
2076 adev->vcn.inst->ring_enc[i].funcs = &vcn_v2_0_enc_ring_vm_funcs;
2077
2078 DRM_INFO("VCN encode is enabled in VM mode\n");
2079 }
2080
2081 static const struct amdgpu_irq_src_funcs vcn_v2_0_irq_funcs = {
2082 .set = vcn_v2_0_set_interrupt_state,
2083 .process = vcn_v2_0_process_interrupt,
2084 };
2085
vcn_v2_0_set_irq_funcs(struct amdgpu_device * adev)2086 static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev)
2087 {
2088 adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 1;
2089 adev->vcn.inst->irq.funcs = &vcn_v2_0_irq_funcs;
2090 }
2091
2092 const struct amdgpu_ip_block_version vcn_v2_0_ip_block =
2093 {
2094 .type = AMD_IP_BLOCK_TYPE_VCN,
2095 .major = 2,
2096 .minor = 0,
2097 .rev = 0,
2098 .funcs = &vcn_v2_0_ip_funcs,
2099 };
2100