1 /*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/firmware.h>
25
26 #include "amdgpu.h"
27 #include "amdgpu_vcn.h"
28 #include "amdgpu_pm.h"
29 #include "soc15.h"
30 #include "soc15d.h"
31 #include "vcn_v2_0.h"
32 #include "mmsch_v1_0.h"
33
34 #include "vcn/vcn_2_5_offset.h"
35 #include "vcn/vcn_2_5_sh_mask.h"
36 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
37
38 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27
39 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f
40 #define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x10
41 #define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET 0x11
42 #define mmUVD_NO_OP_INTERNAL_OFFSET 0x29
43 #define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET 0x66
44 #define mmUVD_SCRATCH9_INTERNAL_OFFSET 0xc01d
45
46 #define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x431
47 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x3b4
48 #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x3b5
49 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x25c
50
51 #define VCN25_MAX_HW_INSTANCES_ARCTURUS 2
52
53 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
54 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev);
55 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
56 static int vcn_v2_5_set_powergating_state(void *handle,
57 enum amd_powergating_state state);
58 static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
59 int inst_idx, struct dpg_pause_state *new_state);
60 static int vcn_v2_5_sriov_start(struct amdgpu_device *adev);
61
62 static int amdgpu_ih_clientid_vcns[] = {
63 SOC15_IH_CLIENTID_VCN,
64 SOC15_IH_CLIENTID_VCN1
65 };
66
67 /**
68 * vcn_v2_5_early_init - set function pointers
69 *
70 * @handle: amdgpu_device pointer
71 *
72 * Set ring and irq function pointers
73 */
vcn_v2_5_early_init(void * handle)74 static int vcn_v2_5_early_init(void *handle)
75 {
76 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
77
78 if (amdgpu_sriov_vf(adev)) {
79 adev->vcn.num_vcn_inst = 2;
80 adev->vcn.harvest_config = 0;
81 adev->vcn.num_enc_rings = 1;
82 } else {
83 u32 harvest;
84 int i;
85 adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS;
86 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
87 harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING);
88 if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
89 adev->vcn.harvest_config |= 1 << i;
90 }
91 if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
92 AMDGPU_VCN_HARVEST_VCN1))
93 /* both instances are harvested, disable the block */
94 return -ENOENT;
95
96 adev->vcn.num_enc_rings = 2;
97 }
98
99 vcn_v2_5_set_dec_ring_funcs(adev);
100 vcn_v2_5_set_enc_ring_funcs(adev);
101 vcn_v2_5_set_irq_funcs(adev);
102
103 return 0;
104 }
105
106 /**
107 * vcn_v2_5_sw_init - sw init for VCN block
108 *
109 * @handle: amdgpu_device pointer
110 *
111 * Load firmware and sw initialization
112 */
vcn_v2_5_sw_init(void * handle)113 static int vcn_v2_5_sw_init(void *handle)
114 {
115 struct amdgpu_ring *ring;
116 int i, j, r;
117 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
118
119 for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
120 if (adev->vcn.harvest_config & (1 << j))
121 continue;
122 /* VCN DEC TRAP */
123 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
124 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[j].irq);
125 if (r)
126 return r;
127
128 /* VCN ENC TRAP */
129 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
130 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
131 i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[j].irq);
132 if (r)
133 return r;
134 }
135 }
136
137 r = amdgpu_vcn_sw_init(adev);
138 if (r)
139 return r;
140
141 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
142 const struct common_firmware_header *hdr;
143 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
144 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
145 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
146 adev->firmware.fw_size +=
147 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
148
149 if (adev->vcn.num_vcn_inst == VCN25_MAX_HW_INSTANCES_ARCTURUS) {
150 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].ucode_id = AMDGPU_UCODE_ID_VCN1;
151 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].fw = adev->vcn.fw;
152 adev->firmware.fw_size +=
153 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
154 }
155 DRM_INFO("PSP loading VCN firmware\n");
156 }
157
158 r = amdgpu_vcn_resume(adev);
159 if (r)
160 return r;
161
162 for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
163 volatile struct amdgpu_fw_shared *fw_shared;
164
165 if (adev->vcn.harvest_config & (1 << j))
166 continue;
167 adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
168 adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
169 adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
170 adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
171 adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
172 adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
173
174 adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
175 adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(VCN, j, mmUVD_SCRATCH9);
176 adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
177 adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA0);
178 adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
179 adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA1);
180 adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
181 adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_CMD);
182 adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
183 adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(VCN, j, mmUVD_NO_OP);
184
185 ring = &adev->vcn.inst[j].ring_dec;
186 ring->use_doorbell = true;
187
188 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
189 (amdgpu_sriov_vf(adev) ? 2*j : 8*j);
190 sprintf(ring->name, "vcn_dec_%d", j);
191 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq,
192 0, AMDGPU_RING_PRIO_DEFAULT, NULL);
193 if (r)
194 return r;
195
196 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
197 ring = &adev->vcn.inst[j].ring_enc[i];
198 ring->use_doorbell = true;
199
200 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
201 (amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j));
202
203 sprintf(ring->name, "vcn_enc_%d.%d", j, i);
204 r = amdgpu_ring_init(adev, ring, 512,
205 &adev->vcn.inst[j].irq, 0,
206 AMDGPU_RING_PRIO_DEFAULT, NULL);
207 if (r)
208 return r;
209 }
210
211 fw_shared = adev->vcn.inst[j].fw_shared_cpu_addr;
212 fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);
213 }
214
215 if (amdgpu_sriov_vf(adev)) {
216 r = amdgpu_virt_alloc_mm_table(adev);
217 if (r)
218 return r;
219 }
220
221 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
222 adev->vcn.pause_dpg_mode = vcn_v2_5_pause_dpg_mode;
223
224 return 0;
225 }
226
227 /**
228 * vcn_v2_5_sw_fini - sw fini for VCN block
229 *
230 * @handle: amdgpu_device pointer
231 *
232 * VCN suspend and free up sw allocation
233 */
vcn_v2_5_sw_fini(void * handle)234 static int vcn_v2_5_sw_fini(void *handle)
235 {
236 int i, r;
237 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
238 volatile struct amdgpu_fw_shared *fw_shared;
239
240 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
241 if (adev->vcn.harvest_config & (1 << i))
242 continue;
243 fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
244 fw_shared->present_flag_0 = 0;
245 }
246
247 if (amdgpu_sriov_vf(adev))
248 amdgpu_virt_free_mm_table(adev);
249
250 r = amdgpu_vcn_suspend(adev);
251 if (r)
252 return r;
253
254 r = amdgpu_vcn_sw_fini(adev);
255
256 return r;
257 }
258
259 /**
260 * vcn_v2_5_hw_init - start and test VCN block
261 *
262 * @handle: amdgpu_device pointer
263 *
264 * Initialize the hardware, boot up the VCPU and do some testing
265 */
vcn_v2_5_hw_init(void * handle)266 static int vcn_v2_5_hw_init(void *handle)
267 {
268 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
269 struct amdgpu_ring *ring;
270 int i, j, r = 0;
271
272 if (amdgpu_sriov_vf(adev))
273 r = vcn_v2_5_sriov_start(adev);
274
275 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
276 if (adev->vcn.harvest_config & (1 << j))
277 continue;
278
279 if (amdgpu_sriov_vf(adev)) {
280 adev->vcn.inst[j].ring_enc[0].sched.ready = true;
281 adev->vcn.inst[j].ring_enc[1].sched.ready = false;
282 adev->vcn.inst[j].ring_enc[2].sched.ready = false;
283 adev->vcn.inst[j].ring_dec.sched.ready = true;
284 } else {
285
286 ring = &adev->vcn.inst[j].ring_dec;
287
288 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
289 ring->doorbell_index, j);
290
291 r = amdgpu_ring_test_helper(ring);
292 if (r)
293 goto done;
294
295 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
296 ring = &adev->vcn.inst[j].ring_enc[i];
297 r = amdgpu_ring_test_helper(ring);
298 if (r)
299 goto done;
300 }
301 }
302 }
303
304 done:
305 if (!r)
306 DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
307 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
308
309 return r;
310 }
311
312 /**
313 * vcn_v2_5_hw_fini - stop the hardware block
314 *
315 * @handle: amdgpu_device pointer
316 *
317 * Stop the VCN block, mark ring as not ready any more
318 */
vcn_v2_5_hw_fini(void * handle)319 static int vcn_v2_5_hw_fini(void *handle)
320 {
321 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
322 int i;
323
324 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
325 if (adev->vcn.harvest_config & (1 << i))
326 continue;
327
328 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
329 (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
330 RREG32_SOC15(VCN, i, mmUVD_STATUS)))
331 vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
332 }
333
334 return 0;
335 }
336
337 /**
338 * vcn_v2_5_suspend - suspend VCN block
339 *
340 * @handle: amdgpu_device pointer
341 *
342 * HW fini and suspend VCN block
343 */
vcn_v2_5_suspend(void * handle)344 static int vcn_v2_5_suspend(void *handle)
345 {
346 int r;
347 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
348
349 r = vcn_v2_5_hw_fini(adev);
350 if (r)
351 return r;
352
353 r = amdgpu_vcn_suspend(adev);
354
355 return r;
356 }
357
358 /**
359 * vcn_v2_5_resume - resume VCN block
360 *
361 * @handle: amdgpu_device pointer
362 *
363 * Resume firmware and hw init VCN block
364 */
vcn_v2_5_resume(void * handle)365 static int vcn_v2_5_resume(void *handle)
366 {
367 int r;
368 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
369
370 r = amdgpu_vcn_resume(adev);
371 if (r)
372 return r;
373
374 r = vcn_v2_5_hw_init(adev);
375
376 return r;
377 }
378
379 /**
380 * vcn_v2_5_mc_resume - memory controller programming
381 *
382 * @adev: amdgpu_device pointer
383 *
384 * Let the VCN memory controller know it's offsets
385 */
vcn_v2_5_mc_resume(struct amdgpu_device * adev)386 static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
387 {
388 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
389 uint32_t offset;
390 int i;
391
392 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
393 if (adev->vcn.harvest_config & (1 << i))
394 continue;
395 /* cache window 0: fw */
396 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
397 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
398 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo));
399 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
400 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi));
401 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
402 offset = 0;
403 } else {
404 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
405 lower_32_bits(adev->vcn.inst[i].gpu_addr));
406 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
407 upper_32_bits(adev->vcn.inst[i].gpu_addr));
408 offset = size;
409 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0,
410 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
411 }
412 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE0, size);
413
414 /* cache window 1: stack */
415 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
416 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
417 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
418 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
419 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
420 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
421
422 /* cache window 2: context */
423 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
424 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
425 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
426 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
427 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
428 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
429
430 /* non-cache window */
431 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
432 lower_32_bits(adev->vcn.inst[i].fw_shared_gpu_addr));
433 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
434 upper_32_bits(adev->vcn.inst[i].fw_shared_gpu_addr));
435 WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
436 WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_SIZE0,
437 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
438 }
439 }
440
vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device * adev,int inst_idx,bool indirect)441 static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
442 {
443 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
444 uint32_t offset;
445
446 /* cache window 0: fw */
447 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
448 if (!indirect) {
449 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
450 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
451 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
452 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
453 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
454 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
455 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
456 VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
457 } else {
458 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
459 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
460 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
461 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
462 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
463 VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
464 }
465 offset = 0;
466 } else {
467 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
468 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
469 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
470 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
471 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
472 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
473 offset = size;
474 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
475 VCN, 0, mmUVD_VCPU_CACHE_OFFSET0),
476 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
477 }
478
479 if (!indirect)
480 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
481 VCN, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
482 else
483 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
484 VCN, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
485
486 /* cache window 1: stack */
487 if (!indirect) {
488 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
489 VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
490 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
491 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
492 VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
493 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
494 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
495 VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
496 } else {
497 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
498 VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
499 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
500 VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
501 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
502 VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
503 }
504 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
505 VCN, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
506
507 /* cache window 2: context */
508 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
509 VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
510 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
511 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
512 VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
513 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
514 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
515 VCN, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
516 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
517 VCN, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
518
519 /* non-cache window */
520 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
521 VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
522 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect);
523 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
524 VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
525 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect);
526 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
527 VCN, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
528 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
529 VCN, 0, mmUVD_VCPU_NONCACHE_SIZE0),
530 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
531
532 /* VCN global tiling registers */
533 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
534 VCN, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
535 }
536
537 /**
538 * vcn_v2_5_disable_clock_gating - disable VCN clock gating
539 *
540 * @adev: amdgpu_device pointer
541 *
542 * Disable clock gating for VCN block
543 */
vcn_v2_5_disable_clock_gating(struct amdgpu_device * adev)544 static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev)
545 {
546 uint32_t data;
547 int i;
548
549 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
550 if (adev->vcn.harvest_config & (1 << i))
551 continue;
552 /* UVD disable CGC */
553 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
554 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
555 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
556 else
557 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
558 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
559 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
560 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
561
562 data = RREG32_SOC15(VCN, i, mmUVD_CGC_GATE);
563 data &= ~(UVD_CGC_GATE__SYS_MASK
564 | UVD_CGC_GATE__UDEC_MASK
565 | UVD_CGC_GATE__MPEG2_MASK
566 | UVD_CGC_GATE__REGS_MASK
567 | UVD_CGC_GATE__RBC_MASK
568 | UVD_CGC_GATE__LMI_MC_MASK
569 | UVD_CGC_GATE__LMI_UMC_MASK
570 | UVD_CGC_GATE__IDCT_MASK
571 | UVD_CGC_GATE__MPRD_MASK
572 | UVD_CGC_GATE__MPC_MASK
573 | UVD_CGC_GATE__LBSI_MASK
574 | UVD_CGC_GATE__LRBBM_MASK
575 | UVD_CGC_GATE__UDEC_RE_MASK
576 | UVD_CGC_GATE__UDEC_CM_MASK
577 | UVD_CGC_GATE__UDEC_IT_MASK
578 | UVD_CGC_GATE__UDEC_DB_MASK
579 | UVD_CGC_GATE__UDEC_MP_MASK
580 | UVD_CGC_GATE__WCB_MASK
581 | UVD_CGC_GATE__VCPU_MASK
582 | UVD_CGC_GATE__MMSCH_MASK);
583
584 WREG32_SOC15(VCN, i, mmUVD_CGC_GATE, data);
585
586 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_CGC_GATE, 0, 0xFFFFFFFF);
587
588 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
589 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
590 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
591 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
592 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
593 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
594 | UVD_CGC_CTRL__SYS_MODE_MASK
595 | UVD_CGC_CTRL__UDEC_MODE_MASK
596 | UVD_CGC_CTRL__MPEG2_MODE_MASK
597 | UVD_CGC_CTRL__REGS_MODE_MASK
598 | UVD_CGC_CTRL__RBC_MODE_MASK
599 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
600 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
601 | UVD_CGC_CTRL__IDCT_MODE_MASK
602 | UVD_CGC_CTRL__MPRD_MODE_MASK
603 | UVD_CGC_CTRL__MPC_MODE_MASK
604 | UVD_CGC_CTRL__LBSI_MODE_MASK
605 | UVD_CGC_CTRL__LRBBM_MODE_MASK
606 | UVD_CGC_CTRL__WCB_MODE_MASK
607 | UVD_CGC_CTRL__VCPU_MODE_MASK
608 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
609 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
610
611 /* turn on */
612 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE);
613 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
614 | UVD_SUVD_CGC_GATE__SIT_MASK
615 | UVD_SUVD_CGC_GATE__SMP_MASK
616 | UVD_SUVD_CGC_GATE__SCM_MASK
617 | UVD_SUVD_CGC_GATE__SDB_MASK
618 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
619 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
620 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
621 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
622 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
623 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
624 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
625 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
626 | UVD_SUVD_CGC_GATE__SCLR_MASK
627 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
628 | UVD_SUVD_CGC_GATE__ENT_MASK
629 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
630 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
631 | UVD_SUVD_CGC_GATE__SITE_MASK
632 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
633 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
634 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
635 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
636 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
637 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE, data);
638
639 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
640 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
641 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
642 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
643 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
644 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
645 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
646 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
647 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
648 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
649 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
650 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
651 }
652 }
653
vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device * adev,uint8_t sram_sel,int inst_idx,uint8_t indirect)654 static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev,
655 uint8_t sram_sel, int inst_idx, uint8_t indirect)
656 {
657 uint32_t reg_data = 0;
658
659 /* enable sw clock gating control */
660 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
661 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
662 else
663 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
664 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
665 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
666 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
667 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
668 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
669 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
670 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
671 UVD_CGC_CTRL__SYS_MODE_MASK |
672 UVD_CGC_CTRL__UDEC_MODE_MASK |
673 UVD_CGC_CTRL__MPEG2_MODE_MASK |
674 UVD_CGC_CTRL__REGS_MODE_MASK |
675 UVD_CGC_CTRL__RBC_MODE_MASK |
676 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
677 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
678 UVD_CGC_CTRL__IDCT_MODE_MASK |
679 UVD_CGC_CTRL__MPRD_MODE_MASK |
680 UVD_CGC_CTRL__MPC_MODE_MASK |
681 UVD_CGC_CTRL__LBSI_MODE_MASK |
682 UVD_CGC_CTRL__LRBBM_MODE_MASK |
683 UVD_CGC_CTRL__WCB_MODE_MASK |
684 UVD_CGC_CTRL__VCPU_MODE_MASK |
685 UVD_CGC_CTRL__MMSCH_MODE_MASK);
686 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
687 VCN, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
688
689 /* turn off clock gating */
690 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
691 VCN, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
692
693 /* turn on SUVD clock gating */
694 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
695 VCN, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
696
697 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
698 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
699 VCN, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
700 }
701
702 /**
703 * vcn_v2_5_enable_clock_gating - enable VCN clock gating
704 *
705 * @adev: amdgpu_device pointer
706 *
707 * Enable clock gating for VCN block
708 */
vcn_v2_5_enable_clock_gating(struct amdgpu_device * adev)709 static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev)
710 {
711 uint32_t data = 0;
712 int i;
713
714 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
715 if (adev->vcn.harvest_config & (1 << i))
716 continue;
717 /* enable UVD CGC */
718 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
719 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
720 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
721 else
722 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
723 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
724 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
725 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
726
727 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
728 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
729 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
730 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
731 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
732 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
733 | UVD_CGC_CTRL__SYS_MODE_MASK
734 | UVD_CGC_CTRL__UDEC_MODE_MASK
735 | UVD_CGC_CTRL__MPEG2_MODE_MASK
736 | UVD_CGC_CTRL__REGS_MODE_MASK
737 | UVD_CGC_CTRL__RBC_MODE_MASK
738 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
739 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
740 | UVD_CGC_CTRL__IDCT_MODE_MASK
741 | UVD_CGC_CTRL__MPRD_MODE_MASK
742 | UVD_CGC_CTRL__MPC_MODE_MASK
743 | UVD_CGC_CTRL__LBSI_MODE_MASK
744 | UVD_CGC_CTRL__LRBBM_MODE_MASK
745 | UVD_CGC_CTRL__WCB_MODE_MASK
746 | UVD_CGC_CTRL__VCPU_MODE_MASK);
747 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
748
749 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
750 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
751 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
752 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
753 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
754 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
755 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
756 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
757 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
758 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
759 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
760 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
761 }
762 }
763
vcn_v2_5_start_dpg_mode(struct amdgpu_device * adev,int inst_idx,bool indirect)764 static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
765 {
766 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr;
767 struct amdgpu_ring *ring;
768 uint32_t rb_bufsz, tmp;
769
770 /* disable register anti-hang mechanism */
771 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
772 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
773 /* enable dynamic power gating mode */
774 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS);
775 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
776 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
777 WREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS, tmp);
778
779 if (indirect)
780 adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
781
782 /* enable clock gating */
783 vcn_v2_5_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
784
785 /* enable VCPU clock */
786 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
787 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
788 tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
789 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
790 VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
791
792 /* disable master interupt */
793 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
794 VCN, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
795
796 /* setup mmUVD_LMI_CTRL */
797 tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
798 UVD_LMI_CTRL__REQ_MODE_MASK |
799 UVD_LMI_CTRL__CRC_RESET_MASK |
800 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
801 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
802 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
803 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
804 0x00100000L);
805 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
806 VCN, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
807
808 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
809 VCN, 0, mmUVD_MPC_CNTL),
810 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
811
812 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
813 VCN, 0, mmUVD_MPC_SET_MUXA0),
814 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
815 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
816 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
817 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
818
819 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
820 VCN, 0, mmUVD_MPC_SET_MUXB0),
821 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
822 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
823 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
824 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
825
826 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
827 VCN, 0, mmUVD_MPC_SET_MUX),
828 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
829 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
830 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
831
832 vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect);
833
834 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
835 VCN, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
836 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
837 VCN, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
838
839 /* enable LMI MC and UMC channels */
840 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
841 VCN, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
842
843 /* unblock VCPU register access */
844 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
845 VCN, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
846
847 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
848 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
849 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
850 VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
851
852 /* enable master interrupt */
853 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
854 VCN, 0, mmUVD_MASTINT_EN),
855 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
856
857 if (indirect)
858 psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
859 (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
860 (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr));
861
862 ring = &adev->vcn.inst[inst_idx].ring_dec;
863 /* force RBC into idle state */
864 rb_bufsz = order_base_2(ring->ring_size);
865 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
866 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
867 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
868 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
869 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
870 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
871
872 /* Stall DPG before WPTR/RPTR reset */
873 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
874 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
875 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
876 fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
877
878 /* set the write pointer delay */
879 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
880
881 /* set the wb address */
882 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
883 (upper_32_bits(ring->gpu_addr) >> 2));
884
885 /* program the RB_BASE for ring buffer */
886 WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
887 lower_32_bits(ring->gpu_addr));
888 WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
889 upper_32_bits(ring->gpu_addr));
890
891 /* Initialize the ring buffer's read and write pointers */
892 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, 0);
893
894 WREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2, 0);
895
896 ring->wptr = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR);
897 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
898 lower_32_bits(ring->wptr));
899
900 fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
901 /* Unstall DPG */
902 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
903 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
904
905 return 0;
906 }
907
vcn_v2_5_start(struct amdgpu_device * adev)908 static int vcn_v2_5_start(struct amdgpu_device *adev)
909 {
910 struct amdgpu_ring *ring;
911 uint32_t rb_bufsz, tmp;
912 int i, j, k, r;
913
914 if (adev->pm.dpm_enabled)
915 amdgpu_dpm_enable_uvd(adev, true);
916
917 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
918 if (adev->vcn.harvest_config & (1 << i))
919 continue;
920 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
921 r = vcn_v2_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
922 continue;
923 }
924
925 /* disable register anti-hang mechanism */
926 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS), 0,
927 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
928
929 /* set uvd status busy */
930 tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
931 WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
932 }
933
934 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
935 return 0;
936
937 /*SW clock gating */
938 vcn_v2_5_disable_clock_gating(adev);
939
940 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
941 if (adev->vcn.harvest_config & (1 << i))
942 continue;
943 /* enable VCPU clock */
944 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
945 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
946
947 /* disable master interrupt */
948 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
949 ~UVD_MASTINT_EN__VCPU_EN_MASK);
950
951 /* setup mmUVD_LMI_CTRL */
952 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
953 tmp &= ~0xff;
954 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp | 0x8|
955 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
956 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
957 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
958 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
959
960 /* setup mmUVD_MPC_CNTL */
961 tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
962 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
963 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
964 WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
965
966 /* setup UVD_MPC_SET_MUXA0 */
967 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
968 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
969 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
970 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
971 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
972
973 /* setup UVD_MPC_SET_MUXB0 */
974 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
975 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
976 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
977 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
978 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
979
980 /* setup mmUVD_MPC_SET_MUX */
981 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
982 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
983 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
984 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
985 }
986
987 vcn_v2_5_mc_resume(adev);
988
989 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
990 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
991 if (adev->vcn.harvest_config & (1 << i))
992 continue;
993 /* VCN global tiling registers */
994 WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
995 adev->gfx.config.gb_addr_config);
996 WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
997 adev->gfx.config.gb_addr_config);
998
999 /* enable LMI MC and UMC channels */
1000 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
1001 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1002
1003 /* unblock VCPU register access */
1004 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
1005 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1006
1007 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1008 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1009
1010 for (k = 0; k < 10; ++k) {
1011 uint32_t status;
1012
1013 for (j = 0; j < 100; ++j) {
1014 status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
1015 if (status & 2)
1016 break;
1017 if (amdgpu_emu_mode == 1)
1018 msleep(500);
1019 else
1020 mdelay(10);
1021 }
1022 r = 0;
1023 if (status & 2)
1024 break;
1025
1026 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
1027 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1028 UVD_VCPU_CNTL__BLK_RST_MASK,
1029 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1030 mdelay(10);
1031 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1032 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1033
1034 mdelay(10);
1035 r = -1;
1036 }
1037
1038 if (r) {
1039 DRM_ERROR("VCN decode not responding, giving up!!!\n");
1040 return r;
1041 }
1042
1043 /* enable master interrupt */
1044 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
1045 UVD_MASTINT_EN__VCPU_EN_MASK,
1046 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1047
1048 /* clear the busy bit of VCN_STATUS */
1049 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
1050 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1051
1052 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
1053
1054 ring = &adev->vcn.inst[i].ring_dec;
1055 /* force RBC into idle state */
1056 rb_bufsz = order_base_2(ring->ring_size);
1057 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1058 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1059 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1060 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1061 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1062 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
1063
1064 fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
1065 /* program the RB_BASE for ring buffer */
1066 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1067 lower_32_bits(ring->gpu_addr));
1068 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1069 upper_32_bits(ring->gpu_addr));
1070
1071 /* Initialize the ring buffer's read and write pointers */
1072 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
1073
1074 ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
1075 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
1076 lower_32_bits(ring->wptr));
1077 fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
1078
1079 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1080 ring = &adev->vcn.inst[i].ring_enc[0];
1081 WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1082 WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1083 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
1084 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1085 WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
1086 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1087
1088 fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1089 ring = &adev->vcn.inst[i].ring_enc[1];
1090 WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1091 WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1092 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1093 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1094 WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
1095 fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1096 }
1097
1098 return 0;
1099 }
1100
vcn_v2_5_mmsch_start(struct amdgpu_device * adev,struct amdgpu_mm_table * table)1101 static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev,
1102 struct amdgpu_mm_table *table)
1103 {
1104 uint32_t data = 0, loop = 0, size = 0;
1105 uint64_t addr = table->gpu_addr;
1106 struct mmsch_v1_1_init_header *header = NULL;
1107
1108 header = (struct mmsch_v1_1_init_header *)table->cpu_addr;
1109 size = header->total_size;
1110
1111 /*
1112 * 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of
1113 * memory descriptor location
1114 */
1115 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
1116 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
1117
1118 /* 2, update vmid of descriptor */
1119 data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID);
1120 data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1121 /* use domain0 for MM scheduler */
1122 data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1123 WREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID, data);
1124
1125 /* 3, notify mmsch about the size of this descriptor */
1126 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_SIZE, size);
1127
1128 /* 4, set resp to zero */
1129 WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
1130
1131 /*
1132 * 5, kick off the initialization and wait until
1133 * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
1134 */
1135 WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
1136
1137 data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
1138 loop = 10;
1139 while ((data & 0x10000002) != 0x10000002) {
1140 udelay(100);
1141 data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
1142 loop--;
1143 if (!loop)
1144 break;
1145 }
1146
1147 if (!loop) {
1148 dev_err(adev->dev,
1149 "failed to init MMSCH, mmMMSCH_VF_MAILBOX_RESP = %x\n",
1150 data);
1151 return -EBUSY;
1152 }
1153
1154 return 0;
1155 }
1156
vcn_v2_5_sriov_start(struct amdgpu_device * adev)1157 static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
1158 {
1159 struct amdgpu_ring *ring;
1160 uint32_t offset, size, tmp, i, rb_bufsz;
1161 uint32_t table_size = 0;
1162 struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } };
1163 struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } };
1164 struct mmsch_v1_0_cmd_end end = { { 0 } };
1165 uint32_t *init_table = adev->virt.mm_table.cpu_addr;
1166 struct mmsch_v1_1_init_header *header = (struct mmsch_v1_1_init_header *)init_table;
1167
1168 direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
1169 direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
1170 end.cmd_header.command_type = MMSCH_COMMAND__END;
1171
1172 header->version = MMSCH_VERSION;
1173 header->total_size = sizeof(struct mmsch_v1_1_init_header) >> 2;
1174 init_table += header->total_size;
1175
1176 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1177 header->eng[i].table_offset = header->total_size;
1178 header->eng[i].init_status = 0;
1179 header->eng[i].table_size = 0;
1180
1181 table_size = 0;
1182
1183 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(
1184 SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS),
1185 ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
1186
1187 size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
1188 /* mc resume*/
1189 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1190 MMSCH_V1_0_INSERT_DIRECT_WT(
1191 SOC15_REG_OFFSET(VCN, i,
1192 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1193 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
1194 MMSCH_V1_0_INSERT_DIRECT_WT(
1195 SOC15_REG_OFFSET(VCN, i,
1196 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1197 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
1198 offset = 0;
1199 MMSCH_V1_0_INSERT_DIRECT_WT(
1200 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0), 0);
1201 } else {
1202 MMSCH_V1_0_INSERT_DIRECT_WT(
1203 SOC15_REG_OFFSET(VCN, i,
1204 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1205 lower_32_bits(adev->vcn.inst[i].gpu_addr));
1206 MMSCH_V1_0_INSERT_DIRECT_WT(
1207 SOC15_REG_OFFSET(VCN, i,
1208 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1209 upper_32_bits(adev->vcn.inst[i].gpu_addr));
1210 offset = size;
1211 MMSCH_V1_0_INSERT_DIRECT_WT(
1212 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0),
1213 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
1214 }
1215
1216 MMSCH_V1_0_INSERT_DIRECT_WT(
1217 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE0),
1218 size);
1219 MMSCH_V1_0_INSERT_DIRECT_WT(
1220 SOC15_REG_OFFSET(VCN, i,
1221 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
1222 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
1223 MMSCH_V1_0_INSERT_DIRECT_WT(
1224 SOC15_REG_OFFSET(VCN, i,
1225 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
1226 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
1227 MMSCH_V1_0_INSERT_DIRECT_WT(
1228 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET1),
1229 0);
1230 MMSCH_V1_0_INSERT_DIRECT_WT(
1231 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE1),
1232 AMDGPU_VCN_STACK_SIZE);
1233 MMSCH_V1_0_INSERT_DIRECT_WT(
1234 SOC15_REG_OFFSET(VCN, i,
1235 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
1236 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset +
1237 AMDGPU_VCN_STACK_SIZE));
1238 MMSCH_V1_0_INSERT_DIRECT_WT(
1239 SOC15_REG_OFFSET(VCN, i,
1240 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
1241 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset +
1242 AMDGPU_VCN_STACK_SIZE));
1243 MMSCH_V1_0_INSERT_DIRECT_WT(
1244 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET2),
1245 0);
1246 MMSCH_V1_0_INSERT_DIRECT_WT(
1247 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE2),
1248 AMDGPU_VCN_CONTEXT_SIZE);
1249
1250 ring = &adev->vcn.inst[i].ring_enc[0];
1251 ring->wptr = 0;
1252
1253 MMSCH_V1_0_INSERT_DIRECT_WT(
1254 SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_LO),
1255 lower_32_bits(ring->gpu_addr));
1256 MMSCH_V1_0_INSERT_DIRECT_WT(
1257 SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_HI),
1258 upper_32_bits(ring->gpu_addr));
1259 MMSCH_V1_0_INSERT_DIRECT_WT(
1260 SOC15_REG_OFFSET(VCN, i, mmUVD_RB_SIZE),
1261 ring->ring_size / 4);
1262
1263 ring = &adev->vcn.inst[i].ring_dec;
1264 ring->wptr = 0;
1265 MMSCH_V1_0_INSERT_DIRECT_WT(
1266 SOC15_REG_OFFSET(VCN, i,
1267 mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
1268 lower_32_bits(ring->gpu_addr));
1269 MMSCH_V1_0_INSERT_DIRECT_WT(
1270 SOC15_REG_OFFSET(VCN, i,
1271 mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
1272 upper_32_bits(ring->gpu_addr));
1273
1274 /* force RBC into idle state */
1275 rb_bufsz = order_base_2(ring->ring_size);
1276 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1277 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1278 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1279 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1280 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1281 MMSCH_V1_0_INSERT_DIRECT_WT(
1282 SOC15_REG_OFFSET(VCN, i, mmUVD_RBC_RB_CNTL), tmp);
1283
1284 /* add end packet */
1285 memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
1286 table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
1287 init_table += sizeof(struct mmsch_v1_0_cmd_end) / 4;
1288
1289 /* refine header */
1290 header->eng[i].table_size = table_size;
1291 header->total_size += table_size;
1292 }
1293
1294 return vcn_v2_5_mmsch_start(adev, &adev->virt.mm_table);
1295 }
1296
vcn_v2_5_stop_dpg_mode(struct amdgpu_device * adev,int inst_idx)1297 static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
1298 {
1299 uint32_t tmp;
1300
1301 /* Wait for power status to be 1 */
1302 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1303 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1304
1305 /* wait for read ptr to be equal to write ptr */
1306 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR);
1307 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1308
1309 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2);
1310 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
1311
1312 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1313 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
1314
1315 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1316 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1317
1318 /* disable dynamic power gating mode */
1319 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
1320 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1321
1322 return 0;
1323 }
1324
vcn_v2_5_stop(struct amdgpu_device * adev)1325 static int vcn_v2_5_stop(struct amdgpu_device *adev)
1326 {
1327 uint32_t tmp;
1328 int i, r = 0;
1329
1330 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1331 if (adev->vcn.harvest_config & (1 << i))
1332 continue;
1333 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1334 r = vcn_v2_5_stop_dpg_mode(adev, i);
1335 continue;
1336 }
1337
1338 /* wait for vcn idle */
1339 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1340 if (r)
1341 return r;
1342
1343 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1344 UVD_LMI_STATUS__READ_CLEAN_MASK |
1345 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1346 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1347 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1348 if (r)
1349 return r;
1350
1351 /* block LMI UMC channel */
1352 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
1353 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1354 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
1355
1356 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
1357 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1358 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1359 if (r)
1360 return r;
1361
1362 /* block VCPU register access */
1363 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
1364 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1365 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1366
1367 /* reset VCPU */
1368 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1369 UVD_VCPU_CNTL__BLK_RST_MASK,
1370 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1371
1372 /* disable VCPU clock */
1373 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1374 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1375
1376 /* clear status */
1377 WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
1378
1379 vcn_v2_5_enable_clock_gating(adev);
1380
1381 /* enable register anti-hang mechanism */
1382 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS),
1383 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
1384 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1385 }
1386
1387 if (adev->pm.dpm_enabled)
1388 amdgpu_dpm_enable_uvd(adev, false);
1389
1390 return 0;
1391 }
1392
vcn_v2_5_pause_dpg_mode(struct amdgpu_device * adev,int inst_idx,struct dpg_pause_state * new_state)1393 static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
1394 int inst_idx, struct dpg_pause_state *new_state)
1395 {
1396 struct amdgpu_ring *ring;
1397 uint32_t reg_data = 0;
1398 int ret_code = 0;
1399
1400 /* pause/unpause if state is changed */
1401 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1402 DRM_DEBUG("dpg pause state changed %d -> %d",
1403 adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
1404 reg_data = RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE) &
1405 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1406
1407 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1408 ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
1409 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1410
1411 if (!ret_code) {
1412 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr;
1413
1414 /* pause DPG */
1415 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1416 WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1417
1418 /* wait for ACK */
1419 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE,
1420 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1421 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1422
1423 /* Stall DPG before WPTR/RPTR reset */
1424 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1425 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
1426 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1427
1428 /* Restore */
1429 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1430 ring = &adev->vcn.inst[inst_idx].ring_enc[0];
1431 ring->wptr = 0;
1432 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
1433 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1434 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
1435 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1436 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1437 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1438
1439 fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1440 ring = &adev->vcn.inst[inst_idx].ring_enc[1];
1441 ring->wptr = 0;
1442 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1443 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1444 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
1445 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1446 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1447 fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1448
1449 /* Unstall DPG */
1450 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1451 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1452
1453 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS,
1454 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1455 }
1456 } else {
1457 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1458 WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1459 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
1460 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1461 }
1462 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1463 }
1464
1465 return 0;
1466 }
1467
1468 /**
1469 * vcn_v2_5_dec_ring_get_rptr - get read pointer
1470 *
1471 * @ring: amdgpu_ring pointer
1472 *
1473 * Returns the current hardware read pointer
1474 */
vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring * ring)1475 static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
1476 {
1477 struct amdgpu_device *adev = ring->adev;
1478
1479 return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_RPTR);
1480 }
1481
1482 /**
1483 * vcn_v2_5_dec_ring_get_wptr - get write pointer
1484 *
1485 * @ring: amdgpu_ring pointer
1486 *
1487 * Returns the current hardware write pointer
1488 */
vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring * ring)1489 static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
1490 {
1491 struct amdgpu_device *adev = ring->adev;
1492
1493 if (ring->use_doorbell)
1494 return adev->wb.wb[ring->wptr_offs];
1495 else
1496 return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR);
1497 }
1498
1499 /**
1500 * vcn_v2_5_dec_ring_set_wptr - set write pointer
1501 *
1502 * @ring: amdgpu_ring pointer
1503 *
1504 * Commits the write pointer to the hardware
1505 */
vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring * ring)1506 static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
1507 {
1508 struct amdgpu_device *adev = ring->adev;
1509
1510 if (ring->use_doorbell) {
1511 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1512 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1513 } else {
1514 WREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1515 }
1516 }
1517
1518 static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
1519 .type = AMDGPU_RING_TYPE_VCN_DEC,
1520 .align_mask = 0xf,
1521 .vmhub = AMDGPU_MMHUB_1,
1522 .get_rptr = vcn_v2_5_dec_ring_get_rptr,
1523 .get_wptr = vcn_v2_5_dec_ring_get_wptr,
1524 .set_wptr = vcn_v2_5_dec_ring_set_wptr,
1525 .emit_frame_size =
1526 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1527 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1528 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
1529 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
1530 6,
1531 .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
1532 .emit_ib = vcn_v2_0_dec_ring_emit_ib,
1533 .emit_fence = vcn_v2_0_dec_ring_emit_fence,
1534 .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
1535 .test_ring = vcn_v2_0_dec_ring_test_ring,
1536 .test_ib = amdgpu_vcn_dec_ring_test_ib,
1537 .insert_nop = vcn_v2_0_dec_ring_insert_nop,
1538 .insert_start = vcn_v2_0_dec_ring_insert_start,
1539 .insert_end = vcn_v2_0_dec_ring_insert_end,
1540 .pad_ib = amdgpu_ring_generic_pad_ib,
1541 .begin_use = amdgpu_vcn_ring_begin_use,
1542 .end_use = amdgpu_vcn_ring_end_use,
1543 .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
1544 .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
1545 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1546 };
1547
1548 static const struct amdgpu_ring_funcs vcn_v2_6_dec_ring_vm_funcs = {
1549 .type = AMDGPU_RING_TYPE_VCN_DEC,
1550 .align_mask = 0xf,
1551 .vmhub = AMDGPU_MMHUB_0,
1552 .get_rptr = vcn_v2_5_dec_ring_get_rptr,
1553 .get_wptr = vcn_v2_5_dec_ring_get_wptr,
1554 .set_wptr = vcn_v2_5_dec_ring_set_wptr,
1555 .emit_frame_size =
1556 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1557 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1558 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
1559 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
1560 6,
1561 .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
1562 .emit_ib = vcn_v2_0_dec_ring_emit_ib,
1563 .emit_fence = vcn_v2_0_dec_ring_emit_fence,
1564 .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
1565 .test_ring = vcn_v2_0_dec_ring_test_ring,
1566 .test_ib = amdgpu_vcn_dec_ring_test_ib,
1567 .insert_nop = vcn_v2_0_dec_ring_insert_nop,
1568 .insert_start = vcn_v2_0_dec_ring_insert_start,
1569 .insert_end = vcn_v2_0_dec_ring_insert_end,
1570 .pad_ib = amdgpu_ring_generic_pad_ib,
1571 .begin_use = amdgpu_vcn_ring_begin_use,
1572 .end_use = amdgpu_vcn_ring_end_use,
1573 .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
1574 .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
1575 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1576 };
1577
1578 /**
1579 * vcn_v2_5_enc_ring_get_rptr - get enc read pointer
1580 *
1581 * @ring: amdgpu_ring pointer
1582 *
1583 * Returns the current hardware enc read pointer
1584 */
vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring * ring)1585 static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring)
1586 {
1587 struct amdgpu_device *adev = ring->adev;
1588
1589 if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
1590 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR);
1591 else
1592 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR2);
1593 }
1594
1595 /**
1596 * vcn_v2_5_enc_ring_get_wptr - get enc write pointer
1597 *
1598 * @ring: amdgpu_ring pointer
1599 *
1600 * Returns the current hardware enc write pointer
1601 */
vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring * ring)1602 static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring)
1603 {
1604 struct amdgpu_device *adev = ring->adev;
1605
1606 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1607 if (ring->use_doorbell)
1608 return adev->wb.wb[ring->wptr_offs];
1609 else
1610 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR);
1611 } else {
1612 if (ring->use_doorbell)
1613 return adev->wb.wb[ring->wptr_offs];
1614 else
1615 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2);
1616 }
1617 }
1618
1619 /**
1620 * vcn_v2_5_enc_ring_set_wptr - set enc write pointer
1621 *
1622 * @ring: amdgpu_ring pointer
1623 *
1624 * Commits the enc write pointer to the hardware
1625 */
vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring * ring)1626 static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring)
1627 {
1628 struct amdgpu_device *adev = ring->adev;
1629
1630 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1631 if (ring->use_doorbell) {
1632 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1633 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1634 } else {
1635 WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1636 }
1637 } else {
1638 if (ring->use_doorbell) {
1639 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1640 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1641 } else {
1642 WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1643 }
1644 }
1645 }
1646
1647 static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
1648 .type = AMDGPU_RING_TYPE_VCN_ENC,
1649 .align_mask = 0x3f,
1650 .nop = VCN_ENC_CMD_NO_OP,
1651 .vmhub = AMDGPU_MMHUB_1,
1652 .get_rptr = vcn_v2_5_enc_ring_get_rptr,
1653 .get_wptr = vcn_v2_5_enc_ring_get_wptr,
1654 .set_wptr = vcn_v2_5_enc_ring_set_wptr,
1655 .emit_frame_size =
1656 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1657 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1658 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1659 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1660 1, /* vcn_v2_0_enc_ring_insert_end */
1661 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1662 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
1663 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
1664 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1665 .test_ring = amdgpu_vcn_enc_ring_test_ring,
1666 .test_ib = amdgpu_vcn_enc_ring_test_ib,
1667 .insert_nop = amdgpu_ring_insert_nop,
1668 .insert_end = vcn_v2_0_enc_ring_insert_end,
1669 .pad_ib = amdgpu_ring_generic_pad_ib,
1670 .begin_use = amdgpu_vcn_ring_begin_use,
1671 .end_use = amdgpu_vcn_ring_end_use,
1672 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1673 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1674 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1675 };
1676
1677 static const struct amdgpu_ring_funcs vcn_v2_6_enc_ring_vm_funcs = {
1678 .type = AMDGPU_RING_TYPE_VCN_ENC,
1679 .align_mask = 0x3f,
1680 .nop = VCN_ENC_CMD_NO_OP,
1681 .vmhub = AMDGPU_MMHUB_0,
1682 .get_rptr = vcn_v2_5_enc_ring_get_rptr,
1683 .get_wptr = vcn_v2_5_enc_ring_get_wptr,
1684 .set_wptr = vcn_v2_5_enc_ring_set_wptr,
1685 .emit_frame_size =
1686 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1687 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1688 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1689 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1690 1, /* vcn_v2_0_enc_ring_insert_end */
1691 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1692 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
1693 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
1694 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1695 .test_ring = amdgpu_vcn_enc_ring_test_ring,
1696 .test_ib = amdgpu_vcn_enc_ring_test_ib,
1697 .insert_nop = amdgpu_ring_insert_nop,
1698 .insert_end = vcn_v2_0_enc_ring_insert_end,
1699 .pad_ib = amdgpu_ring_generic_pad_ib,
1700 .begin_use = amdgpu_vcn_ring_begin_use,
1701 .end_use = amdgpu_vcn_ring_end_use,
1702 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1703 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1704 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1705 };
1706
vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device * adev)1707 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
1708 {
1709 int i;
1710
1711 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1712 if (adev->vcn.harvest_config & (1 << i))
1713 continue;
1714 if (adev->asic_type == CHIP_ARCTURUS)
1715 adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
1716 else /* CHIP_ALDEBARAN */
1717 adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_6_dec_ring_vm_funcs;
1718 adev->vcn.inst[i].ring_dec.me = i;
1719 DRM_INFO("VCN(%d) decode is enabled in VM mode\n", i);
1720 }
1721 }
1722
vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device * adev)1723 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
1724 {
1725 int i, j;
1726
1727 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
1728 if (adev->vcn.harvest_config & (1 << j))
1729 continue;
1730 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
1731 if (adev->asic_type == CHIP_ARCTURUS)
1732 adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
1733 else /* CHIP_ALDEBARAN */
1734 adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_6_enc_ring_vm_funcs;
1735 adev->vcn.inst[j].ring_enc[i].me = j;
1736 }
1737 DRM_INFO("VCN(%d) encode is enabled in VM mode\n", j);
1738 }
1739 }
1740
vcn_v2_5_is_idle(void * handle)1741 static bool vcn_v2_5_is_idle(void *handle)
1742 {
1743 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1744 int i, ret = 1;
1745
1746 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1747 if (adev->vcn.harvest_config & (1 << i))
1748 continue;
1749 ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE);
1750 }
1751
1752 return ret;
1753 }
1754
vcn_v2_5_wait_for_idle(void * handle)1755 static int vcn_v2_5_wait_for_idle(void *handle)
1756 {
1757 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1758 int i, ret = 0;
1759
1760 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1761 if (adev->vcn.harvest_config & (1 << i))
1762 continue;
1763 ret = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE,
1764 UVD_STATUS__IDLE);
1765 if (ret)
1766 return ret;
1767 }
1768
1769 return ret;
1770 }
1771
vcn_v2_5_set_clockgating_state(void * handle,enum amd_clockgating_state state)1772 static int vcn_v2_5_set_clockgating_state(void *handle,
1773 enum amd_clockgating_state state)
1774 {
1775 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1776 bool enable = (state == AMD_CG_STATE_GATE);
1777
1778 if (amdgpu_sriov_vf(adev))
1779 return 0;
1780
1781 if (enable) {
1782 if (!vcn_v2_5_is_idle(handle))
1783 return -EBUSY;
1784 vcn_v2_5_enable_clock_gating(adev);
1785 } else {
1786 vcn_v2_5_disable_clock_gating(adev);
1787 }
1788
1789 return 0;
1790 }
1791
vcn_v2_5_set_powergating_state(void * handle,enum amd_powergating_state state)1792 static int vcn_v2_5_set_powergating_state(void *handle,
1793 enum amd_powergating_state state)
1794 {
1795 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1796 int ret;
1797
1798 if (amdgpu_sriov_vf(adev))
1799 return 0;
1800
1801 if(state == adev->vcn.cur_state)
1802 return 0;
1803
1804 if (state == AMD_PG_STATE_GATE)
1805 ret = vcn_v2_5_stop(adev);
1806 else
1807 ret = vcn_v2_5_start(adev);
1808
1809 if(!ret)
1810 adev->vcn.cur_state = state;
1811
1812 return ret;
1813 }
1814
vcn_v2_5_set_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)1815 static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev,
1816 struct amdgpu_irq_src *source,
1817 unsigned type,
1818 enum amdgpu_interrupt_state state)
1819 {
1820 return 0;
1821 }
1822
vcn_v2_5_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)1823 static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
1824 struct amdgpu_irq_src *source,
1825 struct amdgpu_iv_entry *entry)
1826 {
1827 uint32_t ip_instance;
1828
1829 switch (entry->client_id) {
1830 case SOC15_IH_CLIENTID_VCN:
1831 ip_instance = 0;
1832 break;
1833 case SOC15_IH_CLIENTID_VCN1:
1834 ip_instance = 1;
1835 break;
1836 default:
1837 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1838 return 0;
1839 }
1840
1841 DRM_DEBUG("IH: VCN TRAP\n");
1842
1843 switch (entry->src_id) {
1844 case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
1845 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec);
1846 break;
1847 case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1848 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
1849 break;
1850 case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
1851 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
1852 break;
1853 default:
1854 DRM_ERROR("Unhandled interrupt: %d %d\n",
1855 entry->src_id, entry->src_data[0]);
1856 break;
1857 }
1858
1859 return 0;
1860 }
1861
1862 static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = {
1863 .set = vcn_v2_5_set_interrupt_state,
1864 .process = vcn_v2_5_process_interrupt,
1865 };
1866
vcn_v2_5_set_irq_funcs(struct amdgpu_device * adev)1867 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
1868 {
1869 int i;
1870
1871 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1872 if (adev->vcn.harvest_config & (1 << i))
1873 continue;
1874 adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
1875 adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs;
1876 }
1877 }
1878
1879 static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
1880 .name = "vcn_v2_5",
1881 .early_init = vcn_v2_5_early_init,
1882 .late_init = NULL,
1883 .sw_init = vcn_v2_5_sw_init,
1884 .sw_fini = vcn_v2_5_sw_fini,
1885 .hw_init = vcn_v2_5_hw_init,
1886 .hw_fini = vcn_v2_5_hw_fini,
1887 .suspend = vcn_v2_5_suspend,
1888 .resume = vcn_v2_5_resume,
1889 .is_idle = vcn_v2_5_is_idle,
1890 .wait_for_idle = vcn_v2_5_wait_for_idle,
1891 .check_soft_reset = NULL,
1892 .pre_soft_reset = NULL,
1893 .soft_reset = NULL,
1894 .post_soft_reset = NULL,
1895 .set_clockgating_state = vcn_v2_5_set_clockgating_state,
1896 .set_powergating_state = vcn_v2_5_set_powergating_state,
1897 };
1898
1899 static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
1900 .name = "vcn_v2_6",
1901 .early_init = vcn_v2_5_early_init,
1902 .late_init = NULL,
1903 .sw_init = vcn_v2_5_sw_init,
1904 .sw_fini = vcn_v2_5_sw_fini,
1905 .hw_init = vcn_v2_5_hw_init,
1906 .hw_fini = vcn_v2_5_hw_fini,
1907 .suspend = vcn_v2_5_suspend,
1908 .resume = vcn_v2_5_resume,
1909 .is_idle = vcn_v2_5_is_idle,
1910 .wait_for_idle = vcn_v2_5_wait_for_idle,
1911 .check_soft_reset = NULL,
1912 .pre_soft_reset = NULL,
1913 .soft_reset = NULL,
1914 .post_soft_reset = NULL,
1915 .set_clockgating_state = vcn_v2_5_set_clockgating_state,
1916 .set_powergating_state = vcn_v2_5_set_powergating_state,
1917 };
1918
1919 const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
1920 {
1921 .type = AMD_IP_BLOCK_TYPE_VCN,
1922 .major = 2,
1923 .minor = 5,
1924 .rev = 0,
1925 .funcs = &vcn_v2_5_ip_funcs,
1926 };
1927
1928 const struct amdgpu_ip_block_version vcn_v2_6_ip_block =
1929 {
1930 .type = AMD_IP_BLOCK_TYPE_VCN,
1931 .major = 2,
1932 .minor = 6,
1933 .rev = 0,
1934 .funcs = &vcn_v2_6_ip_funcs,
1935 };
1936