1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/firmware.h>
25 #include <drm/drmP.h>
26 #include "amdgpu.h"
27 #include "amdgpu_uvd.h"
28 #include "soc15.h"
29 #include "soc15d.h"
30 #include "soc15_common.h"
31 #include "mmsch_v1_0.h"
32
33 #include "uvd/uvd_7_0_offset.h"
34 #include "uvd/uvd_7_0_sh_mask.h"
35 #include "vce/vce_4_0_offset.h"
36 #include "vce/vce_4_0_default.h"
37 #include "vce/vce_4_0_sh_mask.h"
38 #include "nbif/nbif_6_1_offset.h"
39 #include "hdp/hdp_4_0_offset.h"
40 #include "mmhub/mmhub_1_0_offset.h"
41 #include "mmhub/mmhub_1_0_sh_mask.h"
42 #include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
43
44 #define mmUVD_PG0_CC_UVD_HARVESTING 0x00c7
45 #define mmUVD_PG0_CC_UVD_HARVESTING_BASE_IDX 1
46 //UVD_PG0_CC_UVD_HARVESTING
47 #define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE__SHIFT 0x1
48 #define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK 0x00000002L
49
50 #define UVD7_MAX_HW_INSTANCES_VEGA20 2
51
52 static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
53 static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev);
54 static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev);
55 static int uvd_v7_0_start(struct amdgpu_device *adev);
56 static void uvd_v7_0_stop(struct amdgpu_device *adev);
57 static int uvd_v7_0_sriov_start(struct amdgpu_device *adev);
58
59 static int amdgpu_ih_clientid_uvds[] = {
60 SOC15_IH_CLIENTID_UVD,
61 SOC15_IH_CLIENTID_UVD1
62 };
63
64 /**
65 * uvd_v7_0_ring_get_rptr - get read pointer
66 *
67 * @ring: amdgpu_ring pointer
68 *
69 * Returns the current hardware read pointer
70 */
uvd_v7_0_ring_get_rptr(struct amdgpu_ring * ring)71 static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
72 {
73 struct amdgpu_device *adev = ring->adev;
74
75 return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
76 }
77
78 /**
79 * uvd_v7_0_enc_ring_get_rptr - get enc read pointer
80 *
81 * @ring: amdgpu_ring pointer
82 *
83 * Returns the current hardware enc read pointer
84 */
uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring * ring)85 static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
86 {
87 struct amdgpu_device *adev = ring->adev;
88
89 if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
90 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
91 else
92 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
93 }
94
95 /**
96 * uvd_v7_0_ring_get_wptr - get write pointer
97 *
98 * @ring: amdgpu_ring pointer
99 *
100 * Returns the current hardware write pointer
101 */
uvd_v7_0_ring_get_wptr(struct amdgpu_ring * ring)102 static uint64_t uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
103 {
104 struct amdgpu_device *adev = ring->adev;
105
106 return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
107 }
108
109 /**
110 * uvd_v7_0_enc_ring_get_wptr - get enc write pointer
111 *
112 * @ring: amdgpu_ring pointer
113 *
114 * Returns the current hardware enc write pointer
115 */
uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring * ring)116 static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
117 {
118 struct amdgpu_device *adev = ring->adev;
119
120 if (ring->use_doorbell)
121 return adev->wb.wb[ring->wptr_offs];
122
123 if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
124 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
125 else
126 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
127 }
128
129 /**
130 * uvd_v7_0_ring_set_wptr - set write pointer
131 *
132 * @ring: amdgpu_ring pointer
133 *
134 * Commits the write pointer to the hardware
135 */
uvd_v7_0_ring_set_wptr(struct amdgpu_ring * ring)136 static void uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
137 {
138 struct amdgpu_device *adev = ring->adev;
139
140 WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
141 }
142
143 /**
144 * uvd_v7_0_enc_ring_set_wptr - set enc write pointer
145 *
146 * @ring: amdgpu_ring pointer
147 *
148 * Commits the enc write pointer to the hardware
149 */
uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring * ring)150 static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
151 {
152 struct amdgpu_device *adev = ring->adev;
153
154 if (ring->use_doorbell) {
155 /* XXX check if swapping is necessary on BE */
156 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
157 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
158 return;
159 }
160
161 if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
162 WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR,
163 lower_32_bits(ring->wptr));
164 else
165 WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2,
166 lower_32_bits(ring->wptr));
167 }
168
169 /**
170 * uvd_v7_0_enc_ring_test_ring - test if UVD ENC ring is working
171 *
172 * @ring: the engine to test on
173 *
174 */
uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring * ring)175 static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
176 {
177 struct amdgpu_device *adev = ring->adev;
178 uint32_t rptr;
179 unsigned i;
180 int r;
181
182 if (amdgpu_sriov_vf(adev))
183 return 0;
184
185 r = amdgpu_ring_alloc(ring, 16);
186 if (r) {
187 DRM_ERROR("amdgpu: uvd enc failed to lock (%d)ring %d (%d).\n",
188 ring->me, ring->idx, r);
189 return r;
190 }
191
192 rptr = amdgpu_ring_get_rptr(ring);
193
194 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
195 amdgpu_ring_commit(ring);
196
197 for (i = 0; i < adev->usec_timeout; i++) {
198 if (amdgpu_ring_get_rptr(ring) != rptr)
199 break;
200 DRM_UDELAY(1);
201 }
202
203 if (i < adev->usec_timeout) {
204 DRM_DEBUG("(%d)ring test on %d succeeded in %d usecs\n",
205 ring->me, ring->idx, i);
206 } else {
207 DRM_ERROR("amdgpu: (%d)ring %d test failed\n",
208 ring->me, ring->idx);
209 r = -ETIMEDOUT;
210 }
211
212 return r;
213 }
214
215 /**
216 * uvd_v7_0_enc_get_create_msg - generate a UVD ENC create msg
217 *
218 * @adev: amdgpu_device pointer
219 * @ring: ring we should submit the msg to
220 * @handle: session handle to use
221 * @fence: optional fence to return
222 *
223 * Open up a stream for HW test
224 */
uvd_v7_0_enc_get_create_msg(struct amdgpu_ring * ring,uint32_t handle,struct dma_fence ** fence)225 static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
226 struct dma_fence **fence)
227 {
228 const unsigned ib_size_dw = 16;
229 struct amdgpu_job *job;
230 struct amdgpu_ib *ib;
231 struct dma_fence *f = NULL;
232 uint64_t dummy;
233 int i, r;
234
235 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
236 if (r)
237 return r;
238
239 ib = &job->ibs[0];
240 dummy = ib->gpu_addr + 1024;
241
242 ib->length_dw = 0;
243 ib->ptr[ib->length_dw++] = 0x00000018;
244 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
245 ib->ptr[ib->length_dw++] = handle;
246 ib->ptr[ib->length_dw++] = 0x00000000;
247 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
248 ib->ptr[ib->length_dw++] = dummy;
249
250 ib->ptr[ib->length_dw++] = 0x00000014;
251 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
252 ib->ptr[ib->length_dw++] = 0x0000001c;
253 ib->ptr[ib->length_dw++] = 0x00000000;
254 ib->ptr[ib->length_dw++] = 0x00000000;
255
256 ib->ptr[ib->length_dw++] = 0x00000008;
257 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
258
259 for (i = ib->length_dw; i < ib_size_dw; ++i)
260 ib->ptr[i] = 0x0;
261
262 r = amdgpu_job_submit_direct(job, ring, &f);
263 if (r)
264 goto err;
265
266 if (fence)
267 *fence = dma_fence_get(f);
268 dma_fence_put(f);
269 return 0;
270
271 err:
272 amdgpu_job_free(job);
273 return r;
274 }
275
276 /**
277 * uvd_v7_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
278 *
279 * @adev: amdgpu_device pointer
280 * @ring: ring we should submit the msg to
281 * @handle: session handle to use
282 * @fence: optional fence to return
283 *
284 * Close up a stream for HW test or if userspace failed to do so
285 */
286 int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
287 bool direct, struct dma_fence **fence);
uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring * ring,uint32_t handle,bool direct,struct dma_fence ** fence)288 int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
289 bool direct, struct dma_fence **fence)
290 {
291 const unsigned ib_size_dw = 16;
292 struct amdgpu_job *job;
293 struct amdgpu_ib *ib;
294 struct dma_fence *f = NULL;
295 uint64_t dummy;
296 int i, r;
297
298 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
299 if (r)
300 return r;
301
302 ib = &job->ibs[0];
303 dummy = ib->gpu_addr + 1024;
304
305 ib->length_dw = 0;
306 ib->ptr[ib->length_dw++] = 0x00000018;
307 ib->ptr[ib->length_dw++] = 0x00000001;
308 ib->ptr[ib->length_dw++] = handle;
309 ib->ptr[ib->length_dw++] = 0x00000000;
310 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
311 ib->ptr[ib->length_dw++] = dummy;
312
313 ib->ptr[ib->length_dw++] = 0x00000014;
314 ib->ptr[ib->length_dw++] = 0x00000002;
315 ib->ptr[ib->length_dw++] = 0x0000001c;
316 ib->ptr[ib->length_dw++] = 0x00000000;
317 ib->ptr[ib->length_dw++] = 0x00000000;
318
319 ib->ptr[ib->length_dw++] = 0x00000008;
320 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
321
322 for (i = ib->length_dw; i < ib_size_dw; ++i)
323 ib->ptr[i] = 0x0;
324
325 if (direct)
326 r = amdgpu_job_submit_direct(job, ring, &f);
327 else
328 r = amdgpu_job_submit(job, &ring->adev->vce.entity,
329 AMDGPU_FENCE_OWNER_UNDEFINED, &f);
330 if (r)
331 goto err;
332
333 if (fence)
334 *fence = dma_fence_get(f);
335 dma_fence_put(f);
336 return 0;
337
338 err:
339 amdgpu_job_free(job);
340 return r;
341 }
342
343 /**
344 * uvd_v7_0_enc_ring_test_ib - test if UVD ENC IBs are working
345 *
346 * @ring: the engine to test on
347 *
348 */
uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring * ring,long timeout)349 static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
350 {
351 struct dma_fence *fence = NULL;
352 long r;
353
354 r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL);
355 if (r) {
356 DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ring->me, r);
357 goto error;
358 }
359
360 r = uvd_v7_0_enc_get_destroy_msg(ring, 1, true, &fence);
361 if (r) {
362 DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ring->me, r);
363 goto error;
364 }
365
366 r = dma_fence_wait_timeout(fence, false, timeout);
367 if (r == 0) {
368 DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ring->me);
369 r = -ETIMEDOUT;
370 } else if (r < 0) {
371 DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ring->me, r);
372 } else {
373 DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ring->me, ring->idx);
374 r = 0;
375 }
376 error:
377 dma_fence_put(fence);
378 return r;
379 }
380
uvd_v7_0_early_init(void * handle)381 static int uvd_v7_0_early_init(void *handle)
382 {
383 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
384
385 if (adev->asic_type == CHIP_VEGA20) {
386 u32 harvest;
387 int i;
388
389 adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
390 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
391 harvest = RREG32_SOC15(UVD, i, mmUVD_PG0_CC_UVD_HARVESTING);
392 if (harvest & UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK) {
393 adev->uvd.harvest_config |= 1 << i;
394 }
395 }
396 if (adev->uvd.harvest_config == (AMDGPU_UVD_HARVEST_UVD0 |
397 AMDGPU_UVD_HARVEST_UVD1))
398 /* both instances are harvested, disable the block */
399 return -ENOENT;
400 } else {
401 adev->uvd.num_uvd_inst = 1;
402 }
403
404 if (amdgpu_sriov_vf(adev))
405 adev->uvd.num_enc_rings = 1;
406 else
407 adev->uvd.num_enc_rings = 2;
408 uvd_v7_0_set_ring_funcs(adev);
409 uvd_v7_0_set_enc_ring_funcs(adev);
410 uvd_v7_0_set_irq_funcs(adev);
411
412 return 0;
413 }
414
uvd_v7_0_sw_init(void * handle)415 static int uvd_v7_0_sw_init(void *handle)
416 {
417 struct amdgpu_ring *ring;
418
419 int i, j, r;
420 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
421
422 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
423 if (adev->uvd.harvest_config & (1 << j))
424 continue;
425 /* UVD TRAP */
426 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
427 if (r)
428 return r;
429
430 /* UVD ENC TRAP */
431 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
432 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + UVD_7_0__SRCID__UVD_ENC_GEN_PURP, &adev->uvd.inst[j].irq);
433 if (r)
434 return r;
435 }
436 }
437
438 r = amdgpu_uvd_sw_init(adev);
439 if (r)
440 return r;
441
442 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
443 const struct common_firmware_header *hdr;
444 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
445 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].ucode_id = AMDGPU_UCODE_ID_UVD;
446 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].fw = adev->uvd.fw;
447 adev->firmware.fw_size +=
448 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
449 DRM_INFO("PSP loading UVD firmware\n");
450 }
451
452 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
453 if (adev->uvd.harvest_config & (1 << j))
454 continue;
455 if (!amdgpu_sriov_vf(adev)) {
456 ring = &adev->uvd.inst[j].ring;
457 ksprintf(ring->name, "uvd<%d>", j);
458 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
459 if (r)
460 return r;
461 }
462
463 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
464 ring = &adev->uvd.inst[j].ring_enc[i];
465 ksprintf(ring->name, "uvd_enc%d<%d>", i, j);
466 if (amdgpu_sriov_vf(adev)) {
467 ring->use_doorbell = true;
468
469 /* currently only use the first enconding ring for
470 * sriov, so set unused location for other unused rings.
471 */
472 if (i == 0)
473 ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2;
474 else
475 ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1;
476 }
477 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
478 if (r)
479 return r;
480 }
481 }
482
483 r = amdgpu_uvd_resume(adev);
484 if (r)
485 return r;
486
487 r = amdgpu_uvd_entity_init(adev);
488 if (r)
489 return r;
490
491 r = amdgpu_virt_alloc_mm_table(adev);
492 if (r)
493 return r;
494
495 return r;
496 }
497
uvd_v7_0_sw_fini(void * handle)498 static int uvd_v7_0_sw_fini(void *handle)
499 {
500 int i, j, r;
501 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
502
503 amdgpu_virt_free_mm_table(adev);
504
505 r = amdgpu_uvd_suspend(adev);
506 if (r)
507 return r;
508
509 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
510 if (adev->uvd.harvest_config & (1 << j))
511 continue;
512 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
513 amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
514 }
515 return amdgpu_uvd_sw_fini(adev);
516 }
517
518 /**
519 * uvd_v7_0_hw_init - start and test UVD block
520 *
521 * @adev: amdgpu_device pointer
522 *
523 * Initialize the hardware, boot up the VCPU and do some testing
524 */
uvd_v7_0_hw_init(void * handle)525 static int uvd_v7_0_hw_init(void *handle)
526 {
527 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
528 struct amdgpu_ring *ring;
529 uint32_t tmp;
530 int i, j, r;
531
532 if (amdgpu_sriov_vf(adev))
533 r = uvd_v7_0_sriov_start(adev);
534 else
535 r = uvd_v7_0_start(adev);
536 if (r)
537 goto done;
538
539 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
540 if (adev->uvd.harvest_config & (1 << j))
541 continue;
542 ring = &adev->uvd.inst[j].ring;
543
544 if (!amdgpu_sriov_vf(adev)) {
545 ring->ready = true;
546 r = amdgpu_ring_test_ring(ring);
547 if (r) {
548 ring->ready = false;
549 goto done;
550 }
551
552 r = amdgpu_ring_alloc(ring, 10);
553 if (r) {
554 DRM_ERROR("amdgpu: (%d)ring failed to lock UVD ring (%d).\n", j, r);
555 goto done;
556 }
557
558 tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
559 mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0);
560 amdgpu_ring_write(ring, tmp);
561 amdgpu_ring_write(ring, 0xFFFFF);
562
563 tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
564 mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0);
565 amdgpu_ring_write(ring, tmp);
566 amdgpu_ring_write(ring, 0xFFFFF);
567
568 tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
569 mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0);
570 amdgpu_ring_write(ring, tmp);
571 amdgpu_ring_write(ring, 0xFFFFF);
572
573 /* Clear timeout status bits */
574 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
575 mmUVD_SEMA_TIMEOUT_STATUS), 0));
576 amdgpu_ring_write(ring, 0x8);
577
578 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
579 mmUVD_SEMA_CNTL), 0));
580 amdgpu_ring_write(ring, 3);
581
582 amdgpu_ring_commit(ring);
583 }
584
585 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
586 ring = &adev->uvd.inst[j].ring_enc[i];
587 ring->ready = true;
588 r = amdgpu_ring_test_ring(ring);
589 if (r) {
590 ring->ready = false;
591 goto done;
592 }
593 }
594 }
595 done:
596 if (!r)
597 DRM_INFO("UVD and UVD ENC initialized successfully.\n");
598
599 return r;
600 }
601
602 /**
603 * uvd_v7_0_hw_fini - stop the hardware block
604 *
605 * @adev: amdgpu_device pointer
606 *
607 * Stop the UVD block, mark ring as not ready any more
608 */
uvd_v7_0_hw_fini(void * handle)609 static int uvd_v7_0_hw_fini(void *handle)
610 {
611 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
612 int i;
613
614 if (!amdgpu_sriov_vf(adev))
615 uvd_v7_0_stop(adev);
616 else {
617 /* full access mode, so don't touch any UVD register */
618 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
619 }
620
621 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
622 if (adev->uvd.harvest_config & (1 << i))
623 continue;
624 adev->uvd.inst[i].ring.ready = false;
625 }
626
627 return 0;
628 }
629
uvd_v7_0_suspend(void * handle)630 static int uvd_v7_0_suspend(void *handle)
631 {
632 int r;
633 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
634
635 r = uvd_v7_0_hw_fini(adev);
636 if (r)
637 return r;
638
639 return amdgpu_uvd_suspend(adev);
640 }
641
uvd_v7_0_resume(void * handle)642 static int uvd_v7_0_resume(void *handle)
643 {
644 int r;
645 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
646
647 r = amdgpu_uvd_resume(adev);
648 if (r)
649 return r;
650
651 return uvd_v7_0_hw_init(adev);
652 }
653
654 /**
655 * uvd_v7_0_mc_resume - memory controller programming
656 *
657 * @adev: amdgpu_device pointer
658 *
659 * Let the UVD memory controller know it's offsets
660 */
uvd_v7_0_mc_resume(struct amdgpu_device * adev)661 static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
662 {
663 uint32_t size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
664 uint32_t offset;
665 int i;
666
667 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
668 if (adev->uvd.harvest_config & (1 << i))
669 continue;
670 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
671 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
672 lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
673 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
674 upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
675 offset = 0;
676 } else {
677 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
678 lower_32_bits(adev->uvd.inst[i].gpu_addr));
679 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
680 upper_32_bits(adev->uvd.inst[i].gpu_addr));
681 offset = size;
682 }
683
684 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
685 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
686 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
687
688 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
689 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
690 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
691 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
692 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21));
693 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE);
694
695 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
696 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
697 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
698 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
699 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21));
700 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2,
701 AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
702
703 WREG32_SOC15(UVD, i, mmUVD_UDEC_ADDR_CONFIG,
704 adev->gfx.config.gb_addr_config);
705 WREG32_SOC15(UVD, i, mmUVD_UDEC_DB_ADDR_CONFIG,
706 adev->gfx.config.gb_addr_config);
707 WREG32_SOC15(UVD, i, mmUVD_UDEC_DBW_ADDR_CONFIG,
708 adev->gfx.config.gb_addr_config);
709
710 WREG32_SOC15(UVD, i, mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
711 }
712 }
713
uvd_v7_0_mmsch_start(struct amdgpu_device * adev,struct amdgpu_mm_table * table)714 static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
715 struct amdgpu_mm_table *table)
716 {
717 uint32_t data = 0, loop;
718 uint64_t addr = table->gpu_addr;
719 struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
720 uint32_t size;
721 int i;
722
723 size = header->header_size + header->vce_table_size + header->uvd_table_size;
724
725 /* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of memory descriptor location */
726 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
727 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
728
729 /* 2, update vmid of descriptor */
730 data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID);
731 data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK;
732 data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); /* use domain0 for MM scheduler */
733 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID, data);
734
735 /* 3, notify mmsch about the size of this descriptor */
736 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE, size);
737
738 /* 4, set resp to zero */
739 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
740
741 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
742 if (adev->uvd.harvest_config & (1 << i))
743 continue;
744 WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0);
745 adev->wb.wb[adev->uvd.inst[i].ring_enc[0].wptr_offs] = 0;
746 adev->uvd.inst[i].ring_enc[0].wptr = 0;
747 adev->uvd.inst[i].ring_enc[0].wptr_old = 0;
748 }
749 /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
750 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001);
751
752 data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
753 loop = 1000;
754 while ((data & 0x10000002) != 0x10000002) {
755 udelay(10);
756 data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
757 loop--;
758 if (!loop)
759 break;
760 }
761
762 if (!loop) {
763 dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
764 return -EBUSY;
765 }
766
767 return 0;
768 }
769
uvd_v7_0_sriov_start(struct amdgpu_device * adev)770 static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
771 {
772 struct amdgpu_ring *ring;
773 uint32_t offset, size, tmp;
774 uint32_t table_size = 0;
775 struct mmsch_v1_0_cmd_direct_write direct_wt = { {0} };
776 struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
777 struct mmsch_v1_0_cmd_direct_polling direct_poll = { {0} };
778 struct mmsch_v1_0_cmd_end end = { {0} };
779 uint32_t *init_table = adev->virt.mm_table.cpu_addr;
780 struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table;
781 uint8_t i = 0;
782
783 direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
784 direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
785 direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
786 end.cmd_header.command_type = MMSCH_COMMAND__END;
787
788 if (header->uvd_table_offset == 0 && header->uvd_table_size == 0) {
789 header->version = MMSCH_VERSION;
790 header->header_size = sizeof(struct mmsch_v1_0_init_header) >> 2;
791
792 if (header->vce_table_offset == 0 && header->vce_table_size == 0)
793 header->uvd_table_offset = header->header_size;
794 else
795 header->uvd_table_offset = header->vce_table_size + header->vce_table_offset;
796
797 init_table += header->uvd_table_offset;
798
799 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
800 if (adev->uvd.harvest_config & (1 << i))
801 continue;
802 ring = &adev->uvd.inst[i].ring;
803 ring->wptr = 0;
804 size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->datasize + 4);
805
806 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
807 0xFFFFFFFF, 0x00000004);
808 /* mc resume*/
809 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
810 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
811 lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
812 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
813 upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
814 offset = 0;
815 } else {
816 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
817 lower_32_bits(adev->uvd.inst[i].gpu_addr));
818 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
819 upper_32_bits(adev->uvd.inst[i].gpu_addr));
820 offset = size;
821 }
822
823 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
824 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
825 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size);
826
827 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
828 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
829 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
830 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
831 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
832 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
833
834 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
835 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
836 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
837 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
838 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
839 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
840 AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
841
842 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
843 /* mc resume end*/
844
845 /* disable clock gating */
846 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_CGC_CTRL),
847 ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0);
848
849 /* disable interupt */
850 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
851 ~UVD_MASTINT_EN__VCPU_EN_MASK, 0);
852
853 /* stall UMC and register bus before resetting VCPU */
854 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
855 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
856 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
857
858 /* put LMI, VCPU, RBC etc... into reset */
859 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
860 (uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
861 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
862 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
863 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
864 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
865 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
866 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
867 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK));
868
869 /* initialize UVD memory controller */
870 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL),
871 (uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
872 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
873 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
874 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
875 UVD_LMI_CTRL__REQ_MODE_MASK |
876 0x00100000L));
877
878 /* take all subblocks out of reset, except VCPU */
879 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
880 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
881
882 /* enable VCPU clock */
883 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
884 UVD_VCPU_CNTL__CLK_EN_MASK);
885
886 /* enable master interrupt */
887 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
888 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
889 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
890
891 /* clear the bit 4 of UVD_STATUS */
892 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
893 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0);
894
895 /* force RBC into idle state */
896 size = order_base_2(ring->ring_size);
897 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size);
898 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
899 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
900
901 ring = &adev->uvd.inst[i].ring_enc[0];
902 ring->wptr = 0;
903 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), ring->gpu_addr);
904 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
905 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), ring->ring_size / 4);
906
907 /* boot up the VCPU */
908 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET), 0);
909
910 /* enable UMC */
911 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
912 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
913
914 MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0x02, 0x02);
915 }
916 /* add end packet */
917 memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
918 table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
919 header->uvd_table_size = table_size;
920
921 }
922 return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table);
923 }
924
925 /**
926 * uvd_v7_0_start - start UVD block
927 *
928 * @adev: amdgpu_device pointer
929 *
930 * Setup and start the UVD block
931 */
uvd_v7_0_start(struct amdgpu_device * adev)932 static int uvd_v7_0_start(struct amdgpu_device *adev)
933 {
934 struct amdgpu_ring *ring;
935 uint32_t rb_bufsz, tmp;
936 uint32_t lmi_swap_cntl;
937 uint32_t mp_swap_cntl;
938 int i, j, k, r;
939
940 for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
941 if (adev->uvd.harvest_config & (1 << k))
942 continue;
943 /* disable DPG */
944 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0,
945 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
946 }
947
948 /* disable byte swapping */
949 lmi_swap_cntl = 0;
950 mp_swap_cntl = 0;
951
952 uvd_v7_0_mc_resume(adev);
953
954 for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
955 if (adev->uvd.harvest_config & (1 << k))
956 continue;
957 ring = &adev->uvd.inst[k].ring;
958 /* disable clock gating */
959 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0,
960 ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK);
961
962 /* disable interupt */
963 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN), 0,
964 ~UVD_MASTINT_EN__VCPU_EN_MASK);
965
966 /* stall UMC and register bus before resetting VCPU */
967 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2),
968 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
969 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
970 mdelay(1);
971
972 /* put LMI, VCPU, RBC etc... into reset */
973 WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
974 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
975 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
976 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
977 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
978 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
979 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
980 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
981 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
982 mdelay(5);
983
984 /* initialize UVD memory controller */
985 WREG32_SOC15(UVD, k, mmUVD_LMI_CTRL,
986 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
987 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
988 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
989 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
990 UVD_LMI_CTRL__REQ_MODE_MASK |
991 0x00100000L);
992
993 #ifdef __BIG_ENDIAN
994 /* swap (8 in 32) RB and IB */
995 lmi_swap_cntl = 0xa;
996 mp_swap_cntl = 0;
997 #endif
998 WREG32_SOC15(UVD, k, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
999 WREG32_SOC15(UVD, k, mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
1000
1001 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA0, 0x40c2040);
1002 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA1, 0x0);
1003 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB0, 0x40c2040);
1004 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB1, 0x0);
1005 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_ALU, 0);
1006 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUX, 0x88);
1007
1008 /* take all subblocks out of reset, except VCPU */
1009 WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
1010 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1011 mdelay(5);
1012
1013 /* enable VCPU clock */
1014 WREG32_SOC15(UVD, k, mmUVD_VCPU_CNTL,
1015 UVD_VCPU_CNTL__CLK_EN_MASK);
1016
1017 /* enable UMC */
1018 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2), 0,
1019 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1020
1021 /* boot up the VCPU */
1022 WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET, 0);
1023 mdelay(10);
1024
1025 for (i = 0; i < 10; ++i) {
1026 uint32_t status;
1027
1028 for (j = 0; j < 100; ++j) {
1029 status = RREG32_SOC15(UVD, k, mmUVD_STATUS);
1030 if (status & 2)
1031 break;
1032 mdelay(10);
1033 }
1034 r = 0;
1035 if (status & 2)
1036 break;
1037
1038 DRM_ERROR("UVD(%d) not responding, trying to reset the VCPU!!!\n", k);
1039 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET),
1040 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1041 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1042 mdelay(10);
1043 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET), 0,
1044 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1045 mdelay(10);
1046 r = -1;
1047 }
1048
1049 if (r) {
1050 DRM_ERROR("UVD(%d) not responding, giving up!!!\n", k);
1051 return r;
1052 }
1053 /* enable master interrupt */
1054 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN),
1055 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
1056 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
1057
1058 /* clear the bit 4 of UVD_STATUS */
1059 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_STATUS), 0,
1060 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1061
1062 /* force RBC into idle state */
1063 rb_bufsz = order_base_2(ring->ring_size);
1064 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1065 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1066 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1067 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
1068 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1069 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1070 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_CNTL, tmp);
1071
1072 /* set the write pointer delay */
1073 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR_CNTL, 0);
1074
1075 /* set the wb address */
1076 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR_ADDR,
1077 (upper_32_bits(ring->gpu_addr) >> 2));
1078
1079 /* programm the RB_BASE for ring buffer */
1080 WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1081 lower_32_bits(ring->gpu_addr));
1082 WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1083 upper_32_bits(ring->gpu_addr));
1084
1085 /* Initialize the ring buffer's read and write pointers */
1086 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR, 0);
1087
1088 ring->wptr = RREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR);
1089 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR,
1090 lower_32_bits(ring->wptr));
1091
1092 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_RBC_RB_CNTL), 0,
1093 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
1094
1095 ring = &adev->uvd.inst[k].ring_enc[0];
1096 WREG32_SOC15(UVD, k, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1097 WREG32_SOC15(UVD, k, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1098 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO, ring->gpu_addr);
1099 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1100 WREG32_SOC15(UVD, k, mmUVD_RB_SIZE, ring->ring_size / 4);
1101
1102 ring = &adev->uvd.inst[k].ring_enc[1];
1103 WREG32_SOC15(UVD, k, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1104 WREG32_SOC15(UVD, k, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1105 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1106 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1107 WREG32_SOC15(UVD, k, mmUVD_RB_SIZE2, ring->ring_size / 4);
1108 }
1109 return 0;
1110 }
1111
1112 /**
1113 * uvd_v7_0_stop - stop UVD block
1114 *
1115 * @adev: amdgpu_device pointer
1116 *
1117 * stop the UVD block
1118 */
uvd_v7_0_stop(struct amdgpu_device * adev)1119 static void uvd_v7_0_stop(struct amdgpu_device *adev)
1120 {
1121 uint8_t i = 0;
1122
1123 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
1124 if (adev->uvd.harvest_config & (1 << i))
1125 continue;
1126 /* force RBC into idle state */
1127 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101);
1128
1129 /* Stall UMC and register bus before resetting VCPU */
1130 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
1131 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
1132 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1133 mdelay(1);
1134
1135 /* put VCPU into reset */
1136 WREG32_SOC15(UVD, i, mmUVD_SOFT_RESET,
1137 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1138 mdelay(5);
1139
1140 /* disable VCPU clock */
1141 WREG32_SOC15(UVD, i, mmUVD_VCPU_CNTL, 0x0);
1142
1143 /* Unstall UMC and register bus */
1144 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
1145 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1146 }
1147 }
1148
1149 /**
1150 * uvd_v7_0_ring_emit_fence - emit an fence & trap command
1151 *
1152 * @ring: amdgpu_ring pointer
1153 * @fence: fence to emit
1154 *
1155 * Write a fence and a trap command to the ring.
1156 */
uvd_v7_0_ring_emit_fence(struct amdgpu_ring * ring,uint64_t addr,uint64_t seq,unsigned flags)1157 static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, uint64_t addr, uint64_t seq,
1158 unsigned flags)
1159 {
1160 struct amdgpu_device *adev = ring->adev;
1161
1162 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1163
1164 amdgpu_ring_write(ring,
1165 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1166 amdgpu_ring_write(ring, seq);
1167 amdgpu_ring_write(ring,
1168 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1169 amdgpu_ring_write(ring, addr & 0xffffffff);
1170 amdgpu_ring_write(ring,
1171 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1172 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1173 amdgpu_ring_write(ring,
1174 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1175 amdgpu_ring_write(ring, 0);
1176
1177 amdgpu_ring_write(ring,
1178 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1179 amdgpu_ring_write(ring, 0);
1180 amdgpu_ring_write(ring,
1181 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1182 amdgpu_ring_write(ring, 0);
1183 amdgpu_ring_write(ring,
1184 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1185 amdgpu_ring_write(ring, 2);
1186 }
1187
1188 /**
1189 * uvd_v7_0_enc_ring_emit_fence - emit an enc fence & trap command
1190 *
1191 * @ring: amdgpu_ring pointer
1192 * @fence: fence to emit
1193 *
1194 * Write enc a fence and a trap command to the ring.
1195 */
uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring * ring,uint64_t addr,uint64_t seq,unsigned flags)1196 static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, uint64_t addr,
1197 uint64_t seq, unsigned flags)
1198 {
1199
1200 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1201
1202 amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
1203 amdgpu_ring_write(ring, addr);
1204 amdgpu_ring_write(ring, upper_32_bits(addr));
1205 amdgpu_ring_write(ring, seq);
1206 amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
1207 }
1208
1209 /**
1210 * uvd_v7_0_ring_emit_hdp_flush - skip HDP flushing
1211 *
1212 * @ring: amdgpu_ring pointer
1213 */
uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring * ring)1214 static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
1215 {
1216 /* The firmware doesn't seem to like touching registers at this point. */
1217 }
1218
1219 /**
1220 * uvd_v7_0_ring_test_ring - register write test
1221 *
1222 * @ring: amdgpu_ring pointer
1223 *
1224 * Test if we can successfully write to the context register
1225 */
uvd_v7_0_ring_test_ring(struct amdgpu_ring * ring)1226 static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
1227 {
1228 struct amdgpu_device *adev = ring->adev;
1229 uint32_t tmp = 0;
1230 unsigned i;
1231 int r;
1232
1233 WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
1234 r = amdgpu_ring_alloc(ring, 3);
1235 if (r) {
1236 DRM_ERROR("amdgpu: (%d)cp failed to lock ring %d (%d).\n",
1237 ring->me, ring->idx, r);
1238 return r;
1239 }
1240 amdgpu_ring_write(ring,
1241 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1242 amdgpu_ring_write(ring, 0xDEADBEEF);
1243 amdgpu_ring_commit(ring);
1244 for (i = 0; i < adev->usec_timeout; i++) {
1245 tmp = RREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID);
1246 if (tmp == 0xDEADBEEF)
1247 break;
1248 DRM_UDELAY(1);
1249 }
1250
1251 if (i < adev->usec_timeout) {
1252 DRM_DEBUG("(%d)ring test on %d succeeded in %d usecs\n",
1253 ring->me, ring->idx, i);
1254 } else {
1255 DRM_ERROR("(%d)amdgpu: ring %d test failed (0x%08X)\n",
1256 ring->me, ring->idx, tmp);
1257 r = -EINVAL;
1258 }
1259 return r;
1260 }
1261
1262 /**
1263 * uvd_v7_0_ring_patch_cs_in_place - Patch the IB for command submission.
1264 *
1265 * @p: the CS parser with the IBs
1266 * @ib_idx: which IB to patch
1267 *
1268 */
uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser * p,uint32_t ib_idx)1269 static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1270 uint32_t ib_idx)
1271 {
1272 struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
1273 unsigned i;
1274
1275 /* No patching necessary for the first instance */
1276 if (!p->ring->me)
1277 return 0;
1278
1279 for (i = 0; i < ib->length_dw; i += 2) {
1280 uint32_t reg = amdgpu_get_ib_value(p, ib_idx, i);
1281
1282 reg -= p->adev->reg_offset[UVD_HWIP][0][1];
1283 reg += p->adev->reg_offset[UVD_HWIP][1][1];
1284
1285 amdgpu_set_ib_value(p, ib_idx, i, reg);
1286 }
1287 return 0;
1288 }
1289
1290 /**
1291 * uvd_v7_0_ring_emit_ib - execute indirect buffer
1292 *
1293 * @ring: amdgpu_ring pointer
1294 * @ib: indirect buffer to execute
1295 *
1296 * Write ring commands to execute the indirect buffer
1297 */
uvd_v7_0_ring_emit_ib(struct amdgpu_ring * ring,struct amdgpu_ib * ib,unsigned vmid,bool ctx_switch)1298 static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
1299 struct amdgpu_ib *ib,
1300 unsigned vmid, bool ctx_switch)
1301 {
1302 struct amdgpu_device *adev = ring->adev;
1303
1304 amdgpu_ring_write(ring,
1305 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0));
1306 amdgpu_ring_write(ring, vmid);
1307
1308 amdgpu_ring_write(ring,
1309 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
1310 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1311 amdgpu_ring_write(ring,
1312 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
1313 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1314 amdgpu_ring_write(ring,
1315 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_RBC_IB_SIZE), 0));
1316 amdgpu_ring_write(ring, ib->length_dw);
1317 }
1318
1319 /**
1320 * uvd_v7_0_enc_ring_emit_ib - enc execute indirect buffer
1321 *
1322 * @ring: amdgpu_ring pointer
1323 * @ib: indirect buffer to execute
1324 *
1325 * Write enc ring commands to execute the indirect buffer
1326 */
uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring * ring,struct amdgpu_ib * ib,unsigned int vmid,bool ctx_switch)1327 static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1328 struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
1329 {
1330 amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1331 amdgpu_ring_write(ring, vmid);
1332 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1333 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1334 amdgpu_ring_write(ring, ib->length_dw);
1335 }
1336
uvd_v7_0_ring_emit_wreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t val)1337 static void uvd_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
1338 uint32_t reg, uint32_t val)
1339 {
1340 struct amdgpu_device *adev = ring->adev;
1341
1342 amdgpu_ring_write(ring,
1343 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1344 amdgpu_ring_write(ring, reg << 2);
1345 amdgpu_ring_write(ring,
1346 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1347 amdgpu_ring_write(ring, val);
1348 amdgpu_ring_write(ring,
1349 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1350 amdgpu_ring_write(ring, 8);
1351 }
1352
uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring * ring,uint32_t reg,uint32_t val,uint32_t mask)1353 static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1354 uint32_t val, uint32_t mask)
1355 {
1356 struct amdgpu_device *adev = ring->adev;
1357
1358 amdgpu_ring_write(ring,
1359 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1360 amdgpu_ring_write(ring, reg << 2);
1361 amdgpu_ring_write(ring,
1362 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1363 amdgpu_ring_write(ring, val);
1364 amdgpu_ring_write(ring,
1365 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GP_SCRATCH8), 0));
1366 amdgpu_ring_write(ring, mask);
1367 amdgpu_ring_write(ring,
1368 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1369 amdgpu_ring_write(ring, 12);
1370 }
1371
uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring * ring,unsigned vmid,uint64_t pd_addr)1372 static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1373 unsigned vmid, uint64_t pd_addr)
1374 {
1375 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1376 uint32_t data0, data1, mask;
1377
1378 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1379
1380 /* wait for reg writes */
1381 data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
1382 data1 = lower_32_bits(pd_addr);
1383 mask = 0xffffffff;
1384 uvd_v7_0_ring_emit_reg_wait(ring, data0, data1, mask);
1385 }
1386
uvd_v7_0_ring_insert_nop(struct amdgpu_ring * ring,uint32_t count)1387 static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1388 {
1389 struct amdgpu_device *adev = ring->adev;
1390 int i;
1391
1392 WARN_ON(ring->wptr % 2 || count % 2);
1393
1394 for (i = 0; i < count / 2; i++) {
1395 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_NO_OP), 0));
1396 amdgpu_ring_write(ring, 0);
1397 }
1398 }
1399
uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring * ring)1400 static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1401 {
1402 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1403 }
1404
uvd_v7_0_enc_ring_emit_reg_wait(struct amdgpu_ring * ring,uint32_t reg,uint32_t val,uint32_t mask)1405 static void uvd_v7_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1406 uint32_t reg, uint32_t val,
1407 uint32_t mask)
1408 {
1409 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
1410 amdgpu_ring_write(ring, reg << 2);
1411 amdgpu_ring_write(ring, mask);
1412 amdgpu_ring_write(ring, val);
1413 }
1414
uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring * ring,unsigned int vmid,uint64_t pd_addr)1415 static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1416 unsigned int vmid, uint64_t pd_addr)
1417 {
1418 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1419
1420 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1421
1422 /* wait for reg writes */
1423 uvd_v7_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2,
1424 lower_32_bits(pd_addr), 0xffffffff);
1425 }
1426
uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t val)1427 static void uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1428 uint32_t reg, uint32_t val)
1429 {
1430 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
1431 amdgpu_ring_write(ring, reg << 2);
1432 amdgpu_ring_write(ring, val);
1433 }
1434
1435 #if 0
1436 static bool uvd_v7_0_is_idle(void *handle)
1437 {
1438 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1439
1440 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1441 }
1442
1443 static int uvd_v7_0_wait_for_idle(void *handle)
1444 {
1445 unsigned i;
1446 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1447
1448 for (i = 0; i < adev->usec_timeout; i++) {
1449 if (uvd_v7_0_is_idle(handle))
1450 return 0;
1451 }
1452 return -ETIMEDOUT;
1453 }
1454
1455 #define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
1456 static bool uvd_v7_0_check_soft_reset(void *handle)
1457 {
1458 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1459 u32 srbm_soft_reset = 0;
1460 u32 tmp = RREG32(mmSRBM_STATUS);
1461
1462 if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1463 REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1464 (RREG32_SOC15(UVD, ring->me, mmUVD_STATUS) &
1465 AMDGPU_UVD_STATUS_BUSY_MASK))
1466 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1467 SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1468
1469 if (srbm_soft_reset) {
1470 adev->uvd.inst[ring->me].srbm_soft_reset = srbm_soft_reset;
1471 return true;
1472 } else {
1473 adev->uvd.inst[ring->me].srbm_soft_reset = 0;
1474 return false;
1475 }
1476 }
1477
1478 static int uvd_v7_0_pre_soft_reset(void *handle)
1479 {
1480 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1481
1482 if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1483 return 0;
1484
1485 uvd_v7_0_stop(adev);
1486 return 0;
1487 }
1488
1489 static int uvd_v7_0_soft_reset(void *handle)
1490 {
1491 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1492 u32 srbm_soft_reset;
1493
1494 if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1495 return 0;
1496 srbm_soft_reset = adev->uvd.inst[ring->me].srbm_soft_reset;
1497
1498 if (srbm_soft_reset) {
1499 u32 tmp;
1500
1501 tmp = RREG32(mmSRBM_SOFT_RESET);
1502 tmp |= srbm_soft_reset;
1503 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1504 WREG32(mmSRBM_SOFT_RESET, tmp);
1505 tmp = RREG32(mmSRBM_SOFT_RESET);
1506
1507 udelay(50);
1508
1509 tmp &= ~srbm_soft_reset;
1510 WREG32(mmSRBM_SOFT_RESET, tmp);
1511 tmp = RREG32(mmSRBM_SOFT_RESET);
1512
1513 /* Wait a little for things to settle down */
1514 udelay(50);
1515 }
1516
1517 return 0;
1518 }
1519
1520 static int uvd_v7_0_post_soft_reset(void *handle)
1521 {
1522 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1523
1524 if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1525 return 0;
1526
1527 mdelay(5);
1528
1529 return uvd_v7_0_start(adev);
1530 }
1531 #endif
1532
uvd_v7_0_set_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)1533 static int uvd_v7_0_set_interrupt_state(struct amdgpu_device *adev,
1534 struct amdgpu_irq_src *source,
1535 unsigned type,
1536 enum amdgpu_interrupt_state state)
1537 {
1538 // TODO
1539 return 0;
1540 }
1541
uvd_v7_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)1542 static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
1543 struct amdgpu_irq_src *source,
1544 struct amdgpu_iv_entry *entry)
1545 {
1546 uint32_t ip_instance;
1547
1548 switch (entry->client_id) {
1549 case SOC15_IH_CLIENTID_UVD:
1550 ip_instance = 0;
1551 break;
1552 case SOC15_IH_CLIENTID_UVD1:
1553 ip_instance = 1;
1554 break;
1555 default:
1556 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1557 return 0;
1558 }
1559
1560 DRM_DEBUG("IH: UVD TRAP\n");
1561
1562 switch (entry->src_id) {
1563 case 124:
1564 amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring);
1565 break;
1566 case 119:
1567 amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[0]);
1568 break;
1569 case 120:
1570 if (!amdgpu_sriov_vf(adev))
1571 amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[1]);
1572 break;
1573 default:
1574 DRM_ERROR("Unhandled interrupt: %d %d\n",
1575 entry->src_id, entry->src_data[0]);
1576 break;
1577 }
1578
1579 return 0;
1580 }
1581
1582 #if 0
1583 static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev)
1584 {
1585 uint32_t data, data1, data2, suvd_flags;
1586
1587 data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL);
1588 data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1589 data2 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL);
1590
1591 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1592 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1593
1594 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1595 UVD_SUVD_CGC_GATE__SIT_MASK |
1596 UVD_SUVD_CGC_GATE__SMP_MASK |
1597 UVD_SUVD_CGC_GATE__SCM_MASK |
1598 UVD_SUVD_CGC_GATE__SDB_MASK;
1599
1600 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1601 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1602 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1603
1604 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1605 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1606 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1607 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1608 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1609 UVD_CGC_CTRL__SYS_MODE_MASK |
1610 UVD_CGC_CTRL__UDEC_MODE_MASK |
1611 UVD_CGC_CTRL__MPEG2_MODE_MASK |
1612 UVD_CGC_CTRL__REGS_MODE_MASK |
1613 UVD_CGC_CTRL__RBC_MODE_MASK |
1614 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1615 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1616 UVD_CGC_CTRL__IDCT_MODE_MASK |
1617 UVD_CGC_CTRL__MPRD_MODE_MASK |
1618 UVD_CGC_CTRL__MPC_MODE_MASK |
1619 UVD_CGC_CTRL__LBSI_MODE_MASK |
1620 UVD_CGC_CTRL__LRBBM_MODE_MASK |
1621 UVD_CGC_CTRL__WCB_MODE_MASK |
1622 UVD_CGC_CTRL__VCPU_MODE_MASK |
1623 UVD_CGC_CTRL__JPEG_MODE_MASK |
1624 UVD_CGC_CTRL__JPEG2_MODE_MASK |
1625 UVD_CGC_CTRL__SCPU_MODE_MASK);
1626 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1627 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1628 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1629 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1630 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1631 data1 |= suvd_flags;
1632
1633 WREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL, data);
1634 WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, 0);
1635 WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1636 WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL, data2);
1637 }
1638
1639 static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev)
1640 {
1641 uint32_t data, data1, cgc_flags, suvd_flags;
1642
1643 data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE);
1644 data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1645
1646 cgc_flags = UVD_CGC_GATE__SYS_MASK |
1647 UVD_CGC_GATE__UDEC_MASK |
1648 UVD_CGC_GATE__MPEG2_MASK |
1649 UVD_CGC_GATE__RBC_MASK |
1650 UVD_CGC_GATE__LMI_MC_MASK |
1651 UVD_CGC_GATE__IDCT_MASK |
1652 UVD_CGC_GATE__MPRD_MASK |
1653 UVD_CGC_GATE__MPC_MASK |
1654 UVD_CGC_GATE__LBSI_MASK |
1655 UVD_CGC_GATE__LRBBM_MASK |
1656 UVD_CGC_GATE__UDEC_RE_MASK |
1657 UVD_CGC_GATE__UDEC_CM_MASK |
1658 UVD_CGC_GATE__UDEC_IT_MASK |
1659 UVD_CGC_GATE__UDEC_DB_MASK |
1660 UVD_CGC_GATE__UDEC_MP_MASK |
1661 UVD_CGC_GATE__WCB_MASK |
1662 UVD_CGC_GATE__VCPU_MASK |
1663 UVD_CGC_GATE__SCPU_MASK |
1664 UVD_CGC_GATE__JPEG_MASK |
1665 UVD_CGC_GATE__JPEG2_MASK;
1666
1667 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1668 UVD_SUVD_CGC_GATE__SIT_MASK |
1669 UVD_SUVD_CGC_GATE__SMP_MASK |
1670 UVD_SUVD_CGC_GATE__SCM_MASK |
1671 UVD_SUVD_CGC_GATE__SDB_MASK;
1672
1673 data |= cgc_flags;
1674 data1 |= suvd_flags;
1675
1676 WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, data);
1677 WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1678 }
1679
1680 static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
1681 {
1682 u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
1683
1684 if (enable)
1685 tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1686 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1687 else
1688 tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1689 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1690
1691 WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
1692 }
1693
1694
1695 static int uvd_v7_0_set_clockgating_state(void *handle,
1696 enum amd_clockgating_state state)
1697 {
1698 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1699 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1700
1701 uvd_v7_0_set_bypass_mode(adev, enable);
1702
1703 if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
1704 return 0;
1705
1706 if (enable) {
1707 /* disable HW gating and enable Sw gating */
1708 uvd_v7_0_set_sw_clock_gating(adev);
1709 } else {
1710 /* wait for STATUS to clear */
1711 if (uvd_v7_0_wait_for_idle(handle))
1712 return -EBUSY;
1713
1714 /* enable HW gates because UVD is idle */
1715 /* uvd_v7_0_set_hw_clock_gating(adev); */
1716 }
1717
1718 return 0;
1719 }
1720
1721 static int uvd_v7_0_set_powergating_state(void *handle,
1722 enum amd_powergating_state state)
1723 {
1724 /* This doesn't actually powergate the UVD block.
1725 * That's done in the dpm code via the SMC. This
1726 * just re-inits the block as necessary. The actual
1727 * gating still happens in the dpm code. We should
1728 * revisit this when there is a cleaner line between
1729 * the smc and the hw blocks
1730 */
1731 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1732
1733 if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
1734 return 0;
1735
1736 WREG32_SOC15(UVD, ring->me, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1737
1738 if (state == AMD_PG_STATE_GATE) {
1739 uvd_v7_0_stop(adev);
1740 return 0;
1741 } else {
1742 return uvd_v7_0_start(adev);
1743 }
1744 }
1745 #endif
1746
uvd_v7_0_set_clockgating_state(void * handle,enum amd_clockgating_state state)1747 static int uvd_v7_0_set_clockgating_state(void *handle,
1748 enum amd_clockgating_state state)
1749 {
1750 /* needed for driver unload*/
1751 return 0;
1752 }
1753
1754 const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
1755 .name = "uvd_v7_0",
1756 .early_init = uvd_v7_0_early_init,
1757 .late_init = NULL,
1758 .sw_init = uvd_v7_0_sw_init,
1759 .sw_fini = uvd_v7_0_sw_fini,
1760 .hw_init = uvd_v7_0_hw_init,
1761 .hw_fini = uvd_v7_0_hw_fini,
1762 .suspend = uvd_v7_0_suspend,
1763 .resume = uvd_v7_0_resume,
1764 .is_idle = NULL /* uvd_v7_0_is_idle */,
1765 .wait_for_idle = NULL /* uvd_v7_0_wait_for_idle */,
1766 .check_soft_reset = NULL /* uvd_v7_0_check_soft_reset */,
1767 .pre_soft_reset = NULL /* uvd_v7_0_pre_soft_reset */,
1768 .soft_reset = NULL /* uvd_v7_0_soft_reset */,
1769 .post_soft_reset = NULL /* uvd_v7_0_post_soft_reset */,
1770 .set_clockgating_state = uvd_v7_0_set_clockgating_state,
1771 .set_powergating_state = NULL /* uvd_v7_0_set_powergating_state */,
1772 };
1773
1774 static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
1775 .type = AMDGPU_RING_TYPE_UVD,
1776 .align_mask = 0xf,
1777 .support_64bit_ptrs = false,
1778 .vmhub = AMDGPU_MMHUB,
1779 .get_rptr = uvd_v7_0_ring_get_rptr,
1780 .get_wptr = uvd_v7_0_ring_get_wptr,
1781 .set_wptr = uvd_v7_0_ring_set_wptr,
1782 .patch_cs_in_place = uvd_v7_0_ring_patch_cs_in_place,
1783 .emit_frame_size =
1784 6 + /* hdp invalidate */
1785 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1786 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1787 8 + /* uvd_v7_0_ring_emit_vm_flush */
1788 14 + 14, /* uvd_v7_0_ring_emit_fence x2 vm fence */
1789 .emit_ib_size = 8, /* uvd_v7_0_ring_emit_ib */
1790 .emit_ib = uvd_v7_0_ring_emit_ib,
1791 .emit_fence = uvd_v7_0_ring_emit_fence,
1792 .emit_vm_flush = uvd_v7_0_ring_emit_vm_flush,
1793 .emit_hdp_flush = uvd_v7_0_ring_emit_hdp_flush,
1794 .test_ring = uvd_v7_0_ring_test_ring,
1795 .test_ib = amdgpu_uvd_ring_test_ib,
1796 .insert_nop = uvd_v7_0_ring_insert_nop,
1797 .pad_ib = amdgpu_ring_generic_pad_ib,
1798 .begin_use = amdgpu_uvd_ring_begin_use,
1799 .end_use = amdgpu_uvd_ring_end_use,
1800 .emit_wreg = uvd_v7_0_ring_emit_wreg,
1801 .emit_reg_wait = uvd_v7_0_ring_emit_reg_wait,
1802 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1803 };
1804
1805 static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
1806 .type = AMDGPU_RING_TYPE_UVD_ENC,
1807 .align_mask = 0x3f,
1808 .nop = HEVC_ENC_CMD_NO_OP,
1809 .support_64bit_ptrs = false,
1810 .vmhub = AMDGPU_MMHUB,
1811 .get_rptr = uvd_v7_0_enc_ring_get_rptr,
1812 .get_wptr = uvd_v7_0_enc_ring_get_wptr,
1813 .set_wptr = uvd_v7_0_enc_ring_set_wptr,
1814 .emit_frame_size =
1815 3 + 3 + /* hdp flush / invalidate */
1816 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1817 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1818 4 + /* uvd_v7_0_enc_ring_emit_vm_flush */
1819 5 + 5 + /* uvd_v7_0_enc_ring_emit_fence x2 vm fence */
1820 1, /* uvd_v7_0_enc_ring_insert_end */
1821 .emit_ib_size = 5, /* uvd_v7_0_enc_ring_emit_ib */
1822 .emit_ib = uvd_v7_0_enc_ring_emit_ib,
1823 .emit_fence = uvd_v7_0_enc_ring_emit_fence,
1824 .emit_vm_flush = uvd_v7_0_enc_ring_emit_vm_flush,
1825 .test_ring = uvd_v7_0_enc_ring_test_ring,
1826 .test_ib = uvd_v7_0_enc_ring_test_ib,
1827 .insert_nop = amdgpu_ring_insert_nop,
1828 .insert_end = uvd_v7_0_enc_ring_insert_end,
1829 .pad_ib = amdgpu_ring_generic_pad_ib,
1830 .begin_use = amdgpu_uvd_ring_begin_use,
1831 .end_use = amdgpu_uvd_ring_end_use,
1832 .emit_wreg = uvd_v7_0_enc_ring_emit_wreg,
1833 .emit_reg_wait = uvd_v7_0_enc_ring_emit_reg_wait,
1834 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1835 };
1836
uvd_v7_0_set_ring_funcs(struct amdgpu_device * adev)1837 static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
1838 {
1839 int i;
1840
1841 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1842 if (adev->uvd.harvest_config & (1 << i))
1843 continue;
1844 adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
1845 adev->uvd.inst[i].ring.me = i;
1846 DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
1847 }
1848 }
1849
uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device * adev)1850 static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1851 {
1852 int i, j;
1853
1854 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
1855 if (adev->uvd.harvest_config & (1 << j))
1856 continue;
1857 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
1858 adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
1859 adev->uvd.inst[j].ring_enc[i].me = j;
1860 }
1861
1862 DRM_INFO("UVD(%d) ENC is enabled in VM mode\n", j);
1863 }
1864 }
1865
1866 static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = {
1867 .set = uvd_v7_0_set_interrupt_state,
1868 .process = uvd_v7_0_process_interrupt,
1869 };
1870
uvd_v7_0_set_irq_funcs(struct amdgpu_device * adev)1871 static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1872 {
1873 int i;
1874
1875 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1876 if (adev->uvd.harvest_config & (1 << i))
1877 continue;
1878 adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1;
1879 adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs;
1880 }
1881 }
1882
1883 const struct amdgpu_ip_block_version uvd_v7_0_ip_block =
1884 {
1885 .type = AMD_IP_BLOCK_TYPE_UVD,
1886 .major = 7,
1887 .minor = 0,
1888 .rev = 0,
1889 .funcs = &uvd_v7_0_ip_funcs,
1890 };
1891