1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Christian König <christian.koenig@amd.com> 23 */ 24 25 #include <linux/firmware.h> 26 #include <drm/drmP.h> 27 #include "amdgpu.h" 28 #include "amdgpu_uvd.h" 29 #include "vid.h" 30 #include "uvd/uvd_6_0_d.h" 31 #include "uvd/uvd_6_0_sh_mask.h" 32 #include "oss/oss_2_0_d.h" 33 #include "oss/oss_2_0_sh_mask.h" 34 #include "smu/smu_7_1_3_d.h" 35 #include "smu/smu_7_1_3_sh_mask.h" 36 #include "bif/bif_5_1_d.h" 37 #include "gmc/gmc_8_1_d.h" 38 #include "vi.h" 39 #include "ivsrcid/ivsrcid_vislands30.h" 40 41 /* Polaris10/11/12 firmware version */ 42 #define FW_1_130_16 ((1 << 24) | (130 << 16) | (16 << 8)) 43 44 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev); 45 static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev); 46 47 static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev); 48 static int uvd_v6_0_start(struct amdgpu_device *adev); 49 static void uvd_v6_0_stop(struct amdgpu_device *adev); 50 static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev); 51 static int uvd_v6_0_set_clockgating_state(void *handle, 52 enum amd_clockgating_state state); 53 static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev, 54 bool enable); 55 56 /** 57 * uvd_v6_0_enc_support - get encode support status 58 * 59 * @adev: amdgpu_device pointer 60 * 61 * Returns the current hardware encode support status 62 */ 63 static inline bool uvd_v6_0_enc_support(struct amdgpu_device *adev) 64 { 65 return ((adev->asic_type >= CHIP_POLARIS10) && 66 (adev->asic_type <= CHIP_VEGAM) && 67 (!adev->uvd.fw_version || adev->uvd.fw_version >= FW_1_130_16)); 68 } 69 70 /** 71 * uvd_v6_0_ring_get_rptr - get read pointer 72 * 73 * @ring: amdgpu_ring pointer 74 * 75 * Returns the current hardware read pointer 76 */ 77 static uint64_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring) 78 { 79 struct amdgpu_device *adev = ring->adev; 80 81 return RREG32(mmUVD_RBC_RB_RPTR); 82 } 83 84 /** 85 * uvd_v6_0_enc_ring_get_rptr - get enc read pointer 86 * 87 * @ring: amdgpu_ring pointer 88 * 89 * Returns the current hardware enc read pointer 90 */ 91 static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring) 92 { 93 struct amdgpu_device *adev = ring->adev; 94 95 if (ring == &adev->uvd.inst->ring_enc[0]) 96 return RREG32(mmUVD_RB_RPTR); 97 else 98 return RREG32(mmUVD_RB_RPTR2); 99 } 100 /** 101 * uvd_v6_0_ring_get_wptr - get write pointer 102 * 103 * @ring: amdgpu_ring pointer 104 * 105 * Returns the current hardware write pointer 106 */ 107 static uint64_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring) 108 { 109 struct amdgpu_device *adev = ring->adev; 110 111 return RREG32(mmUVD_RBC_RB_WPTR); 112 } 113 114 /** 115 * uvd_v6_0_enc_ring_get_wptr - get enc write pointer 116 * 117 * @ring: amdgpu_ring pointer 118 * 119 * Returns the current hardware enc write pointer 120 */ 121 static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring) 122 { 123 struct amdgpu_device *adev = ring->adev; 124 125 if (ring == &adev->uvd.inst->ring_enc[0]) 126 return RREG32(mmUVD_RB_WPTR); 127 else 128 return RREG32(mmUVD_RB_WPTR2); 129 } 130 131 /** 132 * uvd_v6_0_ring_set_wptr - set write pointer 133 * 134 * @ring: amdgpu_ring pointer 135 * 136 * Commits the write pointer to the hardware 137 */ 138 static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring) 139 { 140 struct amdgpu_device *adev = ring->adev; 141 142 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); 143 } 144 145 /** 146 * uvd_v6_0_enc_ring_set_wptr - set enc write pointer 147 * 148 * @ring: amdgpu_ring pointer 149 * 150 * Commits the enc write pointer to the hardware 151 */ 152 static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring) 153 { 154 struct amdgpu_device *adev = ring->adev; 155 156 if (ring == &adev->uvd.inst->ring_enc[0]) 157 WREG32(mmUVD_RB_WPTR, 158 lower_32_bits(ring->wptr)); 159 else 160 WREG32(mmUVD_RB_WPTR2, 161 lower_32_bits(ring->wptr)); 162 } 163 164 /** 165 * uvd_v6_0_enc_ring_test_ring - test if UVD ENC ring is working 166 * 167 * @ring: the engine to test on 168 * 169 */ 170 static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring) 171 { 172 struct amdgpu_device *adev = ring->adev; 173 uint32_t rptr; 174 unsigned i; 175 int r; 176 177 r = amdgpu_ring_alloc(ring, 16); 178 if (r) { 179 DRM_ERROR("amdgpu: uvd enc failed to lock ring %d (%d).\n", 180 ring->idx, r); 181 return r; 182 } 183 184 rptr = amdgpu_ring_get_rptr(ring); 185 186 amdgpu_ring_write(ring, HEVC_ENC_CMD_END); 187 amdgpu_ring_commit(ring); 188 189 for (i = 0; i < adev->usec_timeout; i++) { 190 if (amdgpu_ring_get_rptr(ring) != rptr) 191 break; 192 DRM_UDELAY(1); 193 } 194 195 if (i < adev->usec_timeout) { 196 DRM_DEBUG("ring test on %d succeeded in %d usecs\n", 197 ring->idx, i); 198 } else { 199 DRM_ERROR("amdgpu: ring %d test failed\n", 200 ring->idx); 201 r = -ETIMEDOUT; 202 } 203 204 return r; 205 } 206 207 /** 208 * uvd_v6_0_enc_get_create_msg - generate a UVD ENC create msg 209 * 210 * @adev: amdgpu_device pointer 211 * @ring: ring we should submit the msg to 212 * @handle: session handle to use 213 * @fence: optional fence to return 214 * 215 * Open up a stream for HW test 216 */ 217 static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, 218 struct dma_fence **fence) 219 { 220 const unsigned ib_size_dw = 16; 221 struct amdgpu_job *job; 222 struct amdgpu_ib *ib; 223 struct dma_fence *f = NULL; 224 uint64_t dummy; 225 int i, r; 226 227 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); 228 if (r) 229 return r; 230 231 ib = &job->ibs[0]; 232 dummy = ib->gpu_addr + 1024; 233 234 ib->length_dw = 0; 235 ib->ptr[ib->length_dw++] = 0x00000018; 236 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */ 237 ib->ptr[ib->length_dw++] = handle; 238 ib->ptr[ib->length_dw++] = 0x00010000; 239 ib->ptr[ib->length_dw++] = upper_32_bits(dummy); 240 ib->ptr[ib->length_dw++] = dummy; 241 242 ib->ptr[ib->length_dw++] = 0x00000014; 243 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */ 244 ib->ptr[ib->length_dw++] = 0x0000001c; 245 ib->ptr[ib->length_dw++] = 0x00000001; 246 ib->ptr[ib->length_dw++] = 0x00000000; 247 248 ib->ptr[ib->length_dw++] = 0x00000008; 249 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */ 250 251 for (i = ib->length_dw; i < ib_size_dw; ++i) 252 ib->ptr[i] = 0x0; 253 254 r = amdgpu_job_submit_direct(job, ring, &f); 255 if (r) 256 goto err; 257 258 if (fence) 259 *fence = dma_fence_get(f); 260 dma_fence_put(f); 261 return 0; 262 263 err: 264 amdgpu_job_free(job); 265 return r; 266 } 267 268 /** 269 * uvd_v6_0_enc_get_destroy_msg - generate a UVD ENC destroy msg 270 * 271 * @adev: amdgpu_device pointer 272 * @ring: ring we should submit the msg to 273 * @handle: session handle to use 274 * @fence: optional fence to return 275 * 276 * Close up a stream for HW test or if userspace failed to do so 277 */ 278 static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring, 279 uint32_t handle, 280 bool direct, struct dma_fence **fence) 281 { 282 const unsigned ib_size_dw = 16; 283 struct amdgpu_job *job; 284 struct amdgpu_ib *ib; 285 struct dma_fence *f = NULL; 286 uint64_t dummy; 287 int i, r; 288 289 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); 290 if (r) 291 return r; 292 293 ib = &job->ibs[0]; 294 dummy = ib->gpu_addr + 1024; 295 296 ib->length_dw = 0; 297 ib->ptr[ib->length_dw++] = 0x00000018; 298 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */ 299 ib->ptr[ib->length_dw++] = handle; 300 ib->ptr[ib->length_dw++] = 0x00010000; 301 ib->ptr[ib->length_dw++] = upper_32_bits(dummy); 302 ib->ptr[ib->length_dw++] = dummy; 303 304 ib->ptr[ib->length_dw++] = 0x00000014; 305 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */ 306 ib->ptr[ib->length_dw++] = 0x0000001c; 307 ib->ptr[ib->length_dw++] = 0x00000001; 308 ib->ptr[ib->length_dw++] = 0x00000000; 309 310 ib->ptr[ib->length_dw++] = 0x00000008; 311 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */ 312 313 for (i = ib->length_dw; i < ib_size_dw; ++i) 314 ib->ptr[i] = 0x0; 315 316 if (direct) 317 r = amdgpu_job_submit_direct(job, ring, &f); 318 else 319 r = amdgpu_job_submit(job, &ring->adev->vce.entity, 320 AMDGPU_FENCE_OWNER_UNDEFINED, &f); 321 if (r) 322 goto err; 323 324 if (fence) 325 *fence = dma_fence_get(f); 326 dma_fence_put(f); 327 return 0; 328 329 err: 330 amdgpu_job_free(job); 331 return r; 332 } 333 334 /** 335 * uvd_v6_0_enc_ring_test_ib - test if UVD ENC IBs are working 336 * 337 * @ring: the engine to test on 338 * 339 */ 340 static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) 341 { 342 struct dma_fence *fence = NULL; 343 long r; 344 345 r = uvd_v6_0_enc_get_create_msg(ring, 1, NULL); 346 if (r) { 347 DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); 348 goto error; 349 } 350 351 r = uvd_v6_0_enc_get_destroy_msg(ring, 1, true, &fence); 352 if (r) { 353 DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); 354 goto error; 355 } 356 357 r = dma_fence_wait_timeout(fence, false, timeout); 358 if (r == 0) { 359 DRM_ERROR("amdgpu: IB test timed out.\n"); 360 r = -ETIMEDOUT; 361 } else if (r < 0) { 362 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); 363 } else { 364 DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); 365 r = 0; 366 } 367 error: 368 dma_fence_put(fence); 369 return r; 370 } 371 static int uvd_v6_0_early_init(void *handle) 372 { 373 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 374 adev->uvd.num_uvd_inst = 1; 375 376 if (!(adev->flags & AMD_IS_APU) && 377 (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK)) 378 return -ENOENT; 379 380 uvd_v6_0_set_ring_funcs(adev); 381 382 if (uvd_v6_0_enc_support(adev)) { 383 adev->uvd.num_enc_rings = 2; 384 uvd_v6_0_set_enc_ring_funcs(adev); 385 } 386 387 uvd_v6_0_set_irq_funcs(adev); 388 389 return 0; 390 } 391 392 static int uvd_v6_0_sw_init(void *handle) 393 { 394 struct amdgpu_ring *ring; 395 int i, r; 396 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 397 398 /* UVD TRAP */ 399 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq); 400 if (r) 401 return r; 402 403 /* UVD ENC TRAP */ 404 if (uvd_v6_0_enc_support(adev)) { 405 for (i = 0; i < adev->uvd.num_enc_rings; ++i) { 406 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP, &adev->uvd.inst->irq); 407 if (r) 408 return r; 409 } 410 } 411 412 r = amdgpu_uvd_sw_init(adev); 413 if (r) 414 return r; 415 416 if (!uvd_v6_0_enc_support(adev)) { 417 for (i = 0; i < adev->uvd.num_enc_rings; ++i) 418 adev->uvd.inst->ring_enc[i].funcs = NULL; 419 420 adev->uvd.inst->irq.num_types = 1; 421 adev->uvd.num_enc_rings = 0; 422 423 DRM_INFO("UVD ENC is disabled\n"); 424 } 425 426 ring = &adev->uvd.inst->ring; 427 sprintf(ring->name, "uvd"); 428 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); 429 if (r) 430 return r; 431 432 r = amdgpu_uvd_resume(adev); 433 if (r) 434 return r; 435 436 if (uvd_v6_0_enc_support(adev)) { 437 for (i = 0; i < adev->uvd.num_enc_rings; ++i) { 438 ring = &adev->uvd.inst->ring_enc[i]; 439 sprintf(ring->name, "uvd_enc%d", i); 440 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); 441 if (r) 442 return r; 443 } 444 } 445 446 r = amdgpu_uvd_entity_init(adev); 447 448 return r; 449 } 450 451 static int uvd_v6_0_sw_fini(void *handle) 452 { 453 int i, r; 454 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 455 456 r = amdgpu_uvd_suspend(adev); 457 if (r) 458 return r; 459 460 if (uvd_v6_0_enc_support(adev)) { 461 for (i = 0; i < adev->uvd.num_enc_rings; ++i) 462 amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]); 463 } 464 465 return amdgpu_uvd_sw_fini(adev); 466 } 467 468 /** 469 * uvd_v6_0_hw_init - start and test UVD block 470 * 471 * @adev: amdgpu_device pointer 472 * 473 * Initialize the hardware, boot up the VCPU and do some testing 474 */ 475 static int uvd_v6_0_hw_init(void *handle) 476 { 477 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 478 struct amdgpu_ring *ring = &adev->uvd.inst->ring; 479 uint32_t tmp; 480 int i, r; 481 482 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); 483 uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE); 484 uvd_v6_0_enable_mgcg(adev, true); 485 486 ring->ready = true; 487 r = amdgpu_ring_test_ring(ring); 488 if (r) { 489 ring->ready = false; 490 goto done; 491 } 492 493 r = amdgpu_ring_alloc(ring, 10); 494 if (r) { 495 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); 496 goto done; 497 } 498 499 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0); 500 amdgpu_ring_write(ring, tmp); 501 amdgpu_ring_write(ring, 0xFFFFF); 502 503 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0); 504 amdgpu_ring_write(ring, tmp); 505 amdgpu_ring_write(ring, 0xFFFFF); 506 507 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0); 508 amdgpu_ring_write(ring, tmp); 509 amdgpu_ring_write(ring, 0xFFFFF); 510 511 /* Clear timeout status bits */ 512 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0)); 513 amdgpu_ring_write(ring, 0x8); 514 515 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); 516 amdgpu_ring_write(ring, 3); 517 518 amdgpu_ring_commit(ring); 519 520 if (uvd_v6_0_enc_support(adev)) { 521 for (i = 0; i < adev->uvd.num_enc_rings; ++i) { 522 ring = &adev->uvd.inst->ring_enc[i]; 523 ring->ready = true; 524 r = amdgpu_ring_test_ring(ring); 525 if (r) { 526 ring->ready = false; 527 goto done; 528 } 529 } 530 } 531 532 done: 533 if (!r) { 534 if (uvd_v6_0_enc_support(adev)) 535 DRM_INFO("UVD and UVD ENC initialized successfully.\n"); 536 else 537 DRM_INFO("UVD initialized successfully.\n"); 538 } 539 540 return r; 541 } 542 543 /** 544 * uvd_v6_0_hw_fini - stop the hardware block 545 * 546 * @adev: amdgpu_device pointer 547 * 548 * Stop the UVD block, mark ring as not ready any more 549 */ 550 static int uvd_v6_0_hw_fini(void *handle) 551 { 552 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 553 struct amdgpu_ring *ring = &adev->uvd.inst->ring; 554 555 if (RREG32(mmUVD_STATUS) != 0) 556 uvd_v6_0_stop(adev); 557 558 ring->ready = false; 559 560 return 0; 561 } 562 563 static int uvd_v6_0_suspend(void *handle) 564 { 565 int r; 566 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 567 568 r = uvd_v6_0_hw_fini(adev); 569 if (r) 570 return r; 571 572 return amdgpu_uvd_suspend(adev); 573 } 574 575 static int uvd_v6_0_resume(void *handle) 576 { 577 int r; 578 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 579 580 r = amdgpu_uvd_resume(adev); 581 if (r) 582 return r; 583 584 return uvd_v6_0_hw_init(adev); 585 } 586 587 /** 588 * uvd_v6_0_mc_resume - memory controller programming 589 * 590 * @adev: amdgpu_device pointer 591 * 592 * Let the UVD memory controller know it's offsets 593 */ 594 static void uvd_v6_0_mc_resume(struct amdgpu_device *adev) 595 { 596 uint64_t offset; 597 uint32_t size; 598 599 /* programm memory controller bits 0-27 */ 600 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 601 lower_32_bits(adev->uvd.inst->gpu_addr)); 602 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 603 upper_32_bits(adev->uvd.inst->gpu_addr)); 604 605 offset = AMDGPU_UVD_FIRMWARE_OFFSET; 606 size = AMDGPU_UVD_FIRMWARE_SIZE(adev); 607 WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3); 608 WREG32(mmUVD_VCPU_CACHE_SIZE0, size); 609 610 offset += size; 611 size = AMDGPU_UVD_HEAP_SIZE; 612 WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3); 613 WREG32(mmUVD_VCPU_CACHE_SIZE1, size); 614 615 offset += size; 616 size = AMDGPU_UVD_STACK_SIZE + 617 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles); 618 WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3); 619 WREG32(mmUVD_VCPU_CACHE_SIZE2, size); 620 621 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 622 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 623 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 624 625 WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles); 626 } 627 628 #if 0 629 static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev, 630 bool enable) 631 { 632 u32 data, data1; 633 634 data = RREG32(mmUVD_CGC_GATE); 635 data1 = RREG32(mmUVD_SUVD_CGC_GATE); 636 if (enable) { 637 data |= UVD_CGC_GATE__SYS_MASK | 638 UVD_CGC_GATE__UDEC_MASK | 639 UVD_CGC_GATE__MPEG2_MASK | 640 UVD_CGC_GATE__RBC_MASK | 641 UVD_CGC_GATE__LMI_MC_MASK | 642 UVD_CGC_GATE__IDCT_MASK | 643 UVD_CGC_GATE__MPRD_MASK | 644 UVD_CGC_GATE__MPC_MASK | 645 UVD_CGC_GATE__LBSI_MASK | 646 UVD_CGC_GATE__LRBBM_MASK | 647 UVD_CGC_GATE__UDEC_RE_MASK | 648 UVD_CGC_GATE__UDEC_CM_MASK | 649 UVD_CGC_GATE__UDEC_IT_MASK | 650 UVD_CGC_GATE__UDEC_DB_MASK | 651 UVD_CGC_GATE__UDEC_MP_MASK | 652 UVD_CGC_GATE__WCB_MASK | 653 UVD_CGC_GATE__VCPU_MASK | 654 UVD_CGC_GATE__SCPU_MASK; 655 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK | 656 UVD_SUVD_CGC_GATE__SIT_MASK | 657 UVD_SUVD_CGC_GATE__SMP_MASK | 658 UVD_SUVD_CGC_GATE__SCM_MASK | 659 UVD_SUVD_CGC_GATE__SDB_MASK | 660 UVD_SUVD_CGC_GATE__SRE_H264_MASK | 661 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK | 662 UVD_SUVD_CGC_GATE__SIT_H264_MASK | 663 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK | 664 UVD_SUVD_CGC_GATE__SCM_H264_MASK | 665 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK | 666 UVD_SUVD_CGC_GATE__SDB_H264_MASK | 667 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK; 668 } else { 669 data &= ~(UVD_CGC_GATE__SYS_MASK | 670 UVD_CGC_GATE__UDEC_MASK | 671 UVD_CGC_GATE__MPEG2_MASK | 672 UVD_CGC_GATE__RBC_MASK | 673 UVD_CGC_GATE__LMI_MC_MASK | 674 UVD_CGC_GATE__LMI_UMC_MASK | 675 UVD_CGC_GATE__IDCT_MASK | 676 UVD_CGC_GATE__MPRD_MASK | 677 UVD_CGC_GATE__MPC_MASK | 678 UVD_CGC_GATE__LBSI_MASK | 679 UVD_CGC_GATE__LRBBM_MASK | 680 UVD_CGC_GATE__UDEC_RE_MASK | 681 UVD_CGC_GATE__UDEC_CM_MASK | 682 UVD_CGC_GATE__UDEC_IT_MASK | 683 UVD_CGC_GATE__UDEC_DB_MASK | 684 UVD_CGC_GATE__UDEC_MP_MASK | 685 UVD_CGC_GATE__WCB_MASK | 686 UVD_CGC_GATE__VCPU_MASK | 687 UVD_CGC_GATE__SCPU_MASK); 688 data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK | 689 UVD_SUVD_CGC_GATE__SIT_MASK | 690 UVD_SUVD_CGC_GATE__SMP_MASK | 691 UVD_SUVD_CGC_GATE__SCM_MASK | 692 UVD_SUVD_CGC_GATE__SDB_MASK | 693 UVD_SUVD_CGC_GATE__SRE_H264_MASK | 694 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK | 695 UVD_SUVD_CGC_GATE__SIT_H264_MASK | 696 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK | 697 UVD_SUVD_CGC_GATE__SCM_H264_MASK | 698 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK | 699 UVD_SUVD_CGC_GATE__SDB_H264_MASK | 700 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK); 701 } 702 WREG32(mmUVD_CGC_GATE, data); 703 WREG32(mmUVD_SUVD_CGC_GATE, data1); 704 } 705 #endif 706 707 /** 708 * uvd_v6_0_start - start UVD block 709 * 710 * @adev: amdgpu_device pointer 711 * 712 * Setup and start the UVD block 713 */ 714 static int uvd_v6_0_start(struct amdgpu_device *adev) 715 { 716 struct amdgpu_ring *ring = &adev->uvd.inst->ring; 717 uint32_t rb_bufsz, tmp; 718 uint32_t lmi_swap_cntl; 719 uint32_t mp_swap_cntl; 720 int i, j, r; 721 722 /* disable DPG */ 723 WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); 724 725 /* disable byte swapping */ 726 lmi_swap_cntl = 0; 727 mp_swap_cntl = 0; 728 729 uvd_v6_0_mc_resume(adev); 730 731 /* disable interupt */ 732 WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0); 733 734 /* stall UMC and register bus before resetting VCPU */ 735 WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1); 736 mdelay(1); 737 738 /* put LMI, VCPU, RBC etc... into reset */ 739 WREG32(mmUVD_SOFT_RESET, 740 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | 741 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | 742 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | 743 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | 744 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | 745 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | 746 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | 747 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); 748 mdelay(5); 749 750 /* take UVD block out of reset */ 751 WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0); 752 mdelay(5); 753 754 /* initialize UVD memory controller */ 755 WREG32(mmUVD_LMI_CTRL, 756 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | 757 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | 758 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | 759 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | 760 UVD_LMI_CTRL__REQ_MODE_MASK | 761 UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK); 762 763 #ifdef __BIG_ENDIAN 764 /* swap (8 in 32) RB and IB */ 765 lmi_swap_cntl = 0xa; 766 mp_swap_cntl = 0; 767 #endif 768 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); 769 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl); 770 771 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040); 772 WREG32(mmUVD_MPC_SET_MUXA1, 0x0); 773 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040); 774 WREG32(mmUVD_MPC_SET_MUXB1, 0x0); 775 WREG32(mmUVD_MPC_SET_ALU, 0); 776 WREG32(mmUVD_MPC_SET_MUX, 0x88); 777 778 /* take all subblocks out of reset, except VCPU */ 779 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 780 mdelay(5); 781 782 /* enable VCPU clock */ 783 WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK); 784 785 /* enable UMC */ 786 WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0); 787 788 /* boot up the VCPU */ 789 WREG32(mmUVD_SOFT_RESET, 0); 790 mdelay(10); 791 792 for (i = 0; i < 10; ++i) { 793 uint32_t status; 794 795 for (j = 0; j < 100; ++j) { 796 status = RREG32(mmUVD_STATUS); 797 if (status & 2) 798 break; 799 mdelay(10); 800 } 801 r = 0; 802 if (status & 2) 803 break; 804 805 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); 806 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1); 807 mdelay(10); 808 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0); 809 mdelay(10); 810 r = -1; 811 } 812 813 if (r) { 814 DRM_ERROR("UVD not responding, giving up!!!\n"); 815 return r; 816 } 817 /* enable master interrupt */ 818 WREG32_P(mmUVD_MASTINT_EN, 819 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK), 820 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK)); 821 822 /* clear the bit 4 of UVD_STATUS */ 823 WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); 824 825 /* force RBC into idle state */ 826 rb_bufsz = order_base_2(ring->ring_size); 827 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); 828 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); 829 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); 830 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0); 831 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); 832 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); 833 WREG32(mmUVD_RBC_RB_CNTL, tmp); 834 835 /* set the write pointer delay */ 836 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0); 837 838 /* set the wb address */ 839 WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2)); 840 841 /* programm the RB_BASE for ring buffer */ 842 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, 843 lower_32_bits(ring->gpu_addr)); 844 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, 845 upper_32_bits(ring->gpu_addr)); 846 847 /* Initialize the ring buffer's read and write pointers */ 848 WREG32(mmUVD_RBC_RB_RPTR, 0); 849 850 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR); 851 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); 852 853 WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0); 854 855 if (uvd_v6_0_enc_support(adev)) { 856 ring = &adev->uvd.inst->ring_enc[0]; 857 WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); 858 WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); 859 WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr); 860 WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 861 WREG32(mmUVD_RB_SIZE, ring->ring_size / 4); 862 863 ring = &adev->uvd.inst->ring_enc[1]; 864 WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); 865 WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); 866 WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr); 867 WREG32(mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); 868 WREG32(mmUVD_RB_SIZE2, ring->ring_size / 4); 869 } 870 871 return 0; 872 } 873 874 /** 875 * uvd_v6_0_stop - stop UVD block 876 * 877 * @adev: amdgpu_device pointer 878 * 879 * stop the UVD block 880 */ 881 static void uvd_v6_0_stop(struct amdgpu_device *adev) 882 { 883 /* force RBC into idle state */ 884 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); 885 886 /* Stall UMC and register bus before resetting VCPU */ 887 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); 888 mdelay(1); 889 890 /* put VCPU into reset */ 891 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 892 mdelay(5); 893 894 /* disable VCPU clock */ 895 WREG32(mmUVD_VCPU_CNTL, 0x0); 896 897 /* Unstall UMC and register bus */ 898 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); 899 900 WREG32(mmUVD_STATUS, 0); 901 } 902 903 /** 904 * uvd_v6_0_ring_emit_fence - emit an fence & trap command 905 * 906 * @ring: amdgpu_ring pointer 907 * @fence: fence to emit 908 * 909 * Write a fence and a trap command to the ring. 910 */ 911 static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 912 unsigned flags) 913 { 914 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 915 916 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 917 amdgpu_ring_write(ring, seq); 918 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 919 amdgpu_ring_write(ring, addr & 0xffffffff); 920 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 921 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); 922 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 923 amdgpu_ring_write(ring, 0); 924 925 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 926 amdgpu_ring_write(ring, 0); 927 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 928 amdgpu_ring_write(ring, 0); 929 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 930 amdgpu_ring_write(ring, 2); 931 } 932 933 /** 934 * uvd_v6_0_enc_ring_emit_fence - emit an enc fence & trap command 935 * 936 * @ring: amdgpu_ring pointer 937 * @fence: fence to emit 938 * 939 * Write enc a fence and a trap command to the ring. 940 */ 941 static void uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, 942 u64 seq, unsigned flags) 943 { 944 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 945 946 amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE); 947 amdgpu_ring_write(ring, addr); 948 amdgpu_ring_write(ring, upper_32_bits(addr)); 949 amdgpu_ring_write(ring, seq); 950 amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP); 951 } 952 953 /** 954 * uvd_v6_0_ring_emit_hdp_flush - skip HDP flushing 955 * 956 * @ring: amdgpu_ring pointer 957 */ 958 static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) 959 { 960 /* The firmware doesn't seem to like touching registers at this point. */ 961 } 962 963 /** 964 * uvd_v6_0_ring_test_ring - register write test 965 * 966 * @ring: amdgpu_ring pointer 967 * 968 * Test if we can successfully write to the context register 969 */ 970 static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring) 971 { 972 struct amdgpu_device *adev = ring->adev; 973 uint32_t tmp = 0; 974 unsigned i; 975 int r; 976 977 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); 978 r = amdgpu_ring_alloc(ring, 3); 979 if (r) { 980 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", 981 ring->idx, r); 982 return r; 983 } 984 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 985 amdgpu_ring_write(ring, 0xDEADBEEF); 986 amdgpu_ring_commit(ring); 987 for (i = 0; i < adev->usec_timeout; i++) { 988 tmp = RREG32(mmUVD_CONTEXT_ID); 989 if (tmp == 0xDEADBEEF) 990 break; 991 DRM_UDELAY(1); 992 } 993 994 if (i < adev->usec_timeout) { 995 DRM_DEBUG("ring test on %d succeeded in %d usecs\n", 996 ring->idx, i); 997 } else { 998 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", 999 ring->idx, tmp); 1000 r = -EINVAL; 1001 } 1002 return r; 1003 } 1004 1005 /** 1006 * uvd_v6_0_ring_emit_ib - execute indirect buffer 1007 * 1008 * @ring: amdgpu_ring pointer 1009 * @ib: indirect buffer to execute 1010 * 1011 * Write ring commands to execute the indirect buffer 1012 */ 1013 static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, 1014 struct amdgpu_ib *ib, 1015 unsigned vmid, bool ctx_switch) 1016 { 1017 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0)); 1018 amdgpu_ring_write(ring, vmid); 1019 1020 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); 1021 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); 1022 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0)); 1023 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 1024 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0)); 1025 amdgpu_ring_write(ring, ib->length_dw); 1026 } 1027 1028 /** 1029 * uvd_v6_0_enc_ring_emit_ib - enc execute indirect buffer 1030 * 1031 * @ring: amdgpu_ring pointer 1032 * @ib: indirect buffer to execute 1033 * 1034 * Write enc ring commands to execute the indirect buffer 1035 */ 1036 static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring, 1037 struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch) 1038 { 1039 amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM); 1040 amdgpu_ring_write(ring, vmid); 1041 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); 1042 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 1043 amdgpu_ring_write(ring, ib->length_dw); 1044 } 1045 1046 static void uvd_v6_0_ring_emit_wreg(struct amdgpu_ring *ring, 1047 uint32_t reg, uint32_t val) 1048 { 1049 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 1050 amdgpu_ring_write(ring, reg << 2); 1051 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 1052 amdgpu_ring_write(ring, val); 1053 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 1054 amdgpu_ring_write(ring, 0x8); 1055 } 1056 1057 static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, 1058 unsigned vmid, uint64_t pd_addr) 1059 { 1060 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 1061 1062 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 1063 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); 1064 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 1065 amdgpu_ring_write(ring, 0); 1066 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0)); 1067 amdgpu_ring_write(ring, 1 << vmid); /* mask */ 1068 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 1069 amdgpu_ring_write(ring, 0xC); 1070 } 1071 1072 static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 1073 { 1074 uint32_t seq = ring->fence_drv.sync_seq; 1075 uint64_t addr = ring->fence_drv.gpu_addr; 1076 1077 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 1078 amdgpu_ring_write(ring, lower_32_bits(addr)); 1079 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 1080 amdgpu_ring_write(ring, upper_32_bits(addr)); 1081 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0)); 1082 amdgpu_ring_write(ring, 0xffffffff); /* mask */ 1083 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0)); 1084 amdgpu_ring_write(ring, seq); 1085 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 1086 amdgpu_ring_write(ring, 0xE); 1087 } 1088 1089 static void uvd_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) 1090 { 1091 int i; 1092 1093 WARN_ON(ring->wptr % 2 || count % 2); 1094 1095 for (i = 0; i < count / 2; i++) { 1096 amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0)); 1097 amdgpu_ring_write(ring, 0); 1098 } 1099 } 1100 1101 static void uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 1102 { 1103 uint32_t seq = ring->fence_drv.sync_seq; 1104 uint64_t addr = ring->fence_drv.gpu_addr; 1105 1106 amdgpu_ring_write(ring, HEVC_ENC_CMD_WAIT_GE); 1107 amdgpu_ring_write(ring, lower_32_bits(addr)); 1108 amdgpu_ring_write(ring, upper_32_bits(addr)); 1109 amdgpu_ring_write(ring, seq); 1110 } 1111 1112 static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring) 1113 { 1114 amdgpu_ring_write(ring, HEVC_ENC_CMD_END); 1115 } 1116 1117 static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, 1118 unsigned int vmid, uint64_t pd_addr) 1119 { 1120 amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB); 1121 amdgpu_ring_write(ring, vmid); 1122 amdgpu_ring_write(ring, pd_addr >> 12); 1123 1124 amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB); 1125 amdgpu_ring_write(ring, vmid); 1126 } 1127 1128 static bool uvd_v6_0_is_idle(void *handle) 1129 { 1130 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1131 1132 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); 1133 } 1134 1135 static int uvd_v6_0_wait_for_idle(void *handle) 1136 { 1137 unsigned i; 1138 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1139 1140 for (i = 0; i < adev->usec_timeout; i++) { 1141 if (uvd_v6_0_is_idle(handle)) 1142 return 0; 1143 } 1144 return -ETIMEDOUT; 1145 } 1146 1147 #define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd 1148 static bool uvd_v6_0_check_soft_reset(void *handle) 1149 { 1150 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1151 u32 srbm_soft_reset = 0; 1152 u32 tmp = RREG32(mmSRBM_STATUS); 1153 1154 if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) || 1155 REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) || 1156 (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK)) 1157 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1); 1158 1159 if (srbm_soft_reset) { 1160 adev->uvd.inst->srbm_soft_reset = srbm_soft_reset; 1161 return true; 1162 } else { 1163 adev->uvd.inst->srbm_soft_reset = 0; 1164 return false; 1165 } 1166 } 1167 1168 static int uvd_v6_0_pre_soft_reset(void *handle) 1169 { 1170 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1171 1172 if (!adev->uvd.inst->srbm_soft_reset) 1173 return 0; 1174 1175 uvd_v6_0_stop(adev); 1176 return 0; 1177 } 1178 1179 static int uvd_v6_0_soft_reset(void *handle) 1180 { 1181 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1182 u32 srbm_soft_reset; 1183 1184 if (!adev->uvd.inst->srbm_soft_reset) 1185 return 0; 1186 srbm_soft_reset = adev->uvd.inst->srbm_soft_reset; 1187 1188 if (srbm_soft_reset) { 1189 u32 tmp; 1190 1191 tmp = RREG32(mmSRBM_SOFT_RESET); 1192 tmp |= srbm_soft_reset; 1193 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 1194 WREG32(mmSRBM_SOFT_RESET, tmp); 1195 tmp = RREG32(mmSRBM_SOFT_RESET); 1196 1197 udelay(50); 1198 1199 tmp &= ~srbm_soft_reset; 1200 WREG32(mmSRBM_SOFT_RESET, tmp); 1201 tmp = RREG32(mmSRBM_SOFT_RESET); 1202 1203 /* Wait a little for things to settle down */ 1204 udelay(50); 1205 } 1206 1207 return 0; 1208 } 1209 1210 static int uvd_v6_0_post_soft_reset(void *handle) 1211 { 1212 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1213 1214 if (!adev->uvd.inst->srbm_soft_reset) 1215 return 0; 1216 1217 mdelay(5); 1218 1219 return uvd_v6_0_start(adev); 1220 } 1221 1222 static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev, 1223 struct amdgpu_irq_src *source, 1224 unsigned type, 1225 enum amdgpu_interrupt_state state) 1226 { 1227 // TODO 1228 return 0; 1229 } 1230 1231 static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev, 1232 struct amdgpu_irq_src *source, 1233 struct amdgpu_iv_entry *entry) 1234 { 1235 bool int_handled = true; 1236 DRM_DEBUG("IH: UVD TRAP\n"); 1237 1238 switch (entry->src_id) { 1239 case 124: 1240 amdgpu_fence_process(&adev->uvd.inst->ring); 1241 break; 1242 case 119: 1243 if (likely(uvd_v6_0_enc_support(adev))) 1244 amdgpu_fence_process(&adev->uvd.inst->ring_enc[0]); 1245 else 1246 int_handled = false; 1247 break; 1248 case 120: 1249 if (likely(uvd_v6_0_enc_support(adev))) 1250 amdgpu_fence_process(&adev->uvd.inst->ring_enc[1]); 1251 else 1252 int_handled = false; 1253 break; 1254 } 1255 1256 if (false == int_handled) 1257 DRM_ERROR("Unhandled interrupt: %d %d\n", 1258 entry->src_id, entry->src_data[0]); 1259 1260 return 0; 1261 } 1262 1263 static void uvd_v6_0_enable_clock_gating(struct amdgpu_device *adev, bool enable) 1264 { 1265 uint32_t data1, data3; 1266 1267 data1 = RREG32(mmUVD_SUVD_CGC_GATE); 1268 data3 = RREG32(mmUVD_CGC_GATE); 1269 1270 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK | 1271 UVD_SUVD_CGC_GATE__SIT_MASK | 1272 UVD_SUVD_CGC_GATE__SMP_MASK | 1273 UVD_SUVD_CGC_GATE__SCM_MASK | 1274 UVD_SUVD_CGC_GATE__SDB_MASK | 1275 UVD_SUVD_CGC_GATE__SRE_H264_MASK | 1276 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK | 1277 UVD_SUVD_CGC_GATE__SIT_H264_MASK | 1278 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK | 1279 UVD_SUVD_CGC_GATE__SCM_H264_MASK | 1280 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK | 1281 UVD_SUVD_CGC_GATE__SDB_H264_MASK | 1282 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK; 1283 1284 if (enable) { 1285 data3 |= (UVD_CGC_GATE__SYS_MASK | 1286 UVD_CGC_GATE__UDEC_MASK | 1287 UVD_CGC_GATE__MPEG2_MASK | 1288 UVD_CGC_GATE__RBC_MASK | 1289 UVD_CGC_GATE__LMI_MC_MASK | 1290 UVD_CGC_GATE__LMI_UMC_MASK | 1291 UVD_CGC_GATE__IDCT_MASK | 1292 UVD_CGC_GATE__MPRD_MASK | 1293 UVD_CGC_GATE__MPC_MASK | 1294 UVD_CGC_GATE__LBSI_MASK | 1295 UVD_CGC_GATE__LRBBM_MASK | 1296 UVD_CGC_GATE__UDEC_RE_MASK | 1297 UVD_CGC_GATE__UDEC_CM_MASK | 1298 UVD_CGC_GATE__UDEC_IT_MASK | 1299 UVD_CGC_GATE__UDEC_DB_MASK | 1300 UVD_CGC_GATE__UDEC_MP_MASK | 1301 UVD_CGC_GATE__WCB_MASK | 1302 UVD_CGC_GATE__JPEG_MASK | 1303 UVD_CGC_GATE__SCPU_MASK | 1304 UVD_CGC_GATE__JPEG2_MASK); 1305 /* only in pg enabled, we can gate clock to vcpu*/ 1306 if (adev->pg_flags & AMD_PG_SUPPORT_UVD) 1307 data3 |= UVD_CGC_GATE__VCPU_MASK; 1308 1309 data3 &= ~UVD_CGC_GATE__REGS_MASK; 1310 } else { 1311 data3 = 0; 1312 } 1313 1314 WREG32(mmUVD_SUVD_CGC_GATE, data1); 1315 WREG32(mmUVD_CGC_GATE, data3); 1316 } 1317 1318 static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev) 1319 { 1320 uint32_t data, data2; 1321 1322 data = RREG32(mmUVD_CGC_CTRL); 1323 data2 = RREG32(mmUVD_SUVD_CGC_CTRL); 1324 1325 1326 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | 1327 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK); 1328 1329 1330 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK | 1331 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) | 1332 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY)); 1333 1334 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK | 1335 UVD_CGC_CTRL__UDEC_CM_MODE_MASK | 1336 UVD_CGC_CTRL__UDEC_IT_MODE_MASK | 1337 UVD_CGC_CTRL__UDEC_DB_MODE_MASK | 1338 UVD_CGC_CTRL__UDEC_MP_MODE_MASK | 1339 UVD_CGC_CTRL__SYS_MODE_MASK | 1340 UVD_CGC_CTRL__UDEC_MODE_MASK | 1341 UVD_CGC_CTRL__MPEG2_MODE_MASK | 1342 UVD_CGC_CTRL__REGS_MODE_MASK | 1343 UVD_CGC_CTRL__RBC_MODE_MASK | 1344 UVD_CGC_CTRL__LMI_MC_MODE_MASK | 1345 UVD_CGC_CTRL__LMI_UMC_MODE_MASK | 1346 UVD_CGC_CTRL__IDCT_MODE_MASK | 1347 UVD_CGC_CTRL__MPRD_MODE_MASK | 1348 UVD_CGC_CTRL__MPC_MODE_MASK | 1349 UVD_CGC_CTRL__LBSI_MODE_MASK | 1350 UVD_CGC_CTRL__LRBBM_MODE_MASK | 1351 UVD_CGC_CTRL__WCB_MODE_MASK | 1352 UVD_CGC_CTRL__VCPU_MODE_MASK | 1353 UVD_CGC_CTRL__JPEG_MODE_MASK | 1354 UVD_CGC_CTRL__SCPU_MODE_MASK | 1355 UVD_CGC_CTRL__JPEG2_MODE_MASK); 1356 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK | 1357 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK | 1358 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK | 1359 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK | 1360 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK); 1361 1362 WREG32(mmUVD_CGC_CTRL, data); 1363 WREG32(mmUVD_SUVD_CGC_CTRL, data2); 1364 } 1365 1366 #if 0 1367 static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev) 1368 { 1369 uint32_t data, data1, cgc_flags, suvd_flags; 1370 1371 data = RREG32(mmUVD_CGC_GATE); 1372 data1 = RREG32(mmUVD_SUVD_CGC_GATE); 1373 1374 cgc_flags = UVD_CGC_GATE__SYS_MASK | 1375 UVD_CGC_GATE__UDEC_MASK | 1376 UVD_CGC_GATE__MPEG2_MASK | 1377 UVD_CGC_GATE__RBC_MASK | 1378 UVD_CGC_GATE__LMI_MC_MASK | 1379 UVD_CGC_GATE__IDCT_MASK | 1380 UVD_CGC_GATE__MPRD_MASK | 1381 UVD_CGC_GATE__MPC_MASK | 1382 UVD_CGC_GATE__LBSI_MASK | 1383 UVD_CGC_GATE__LRBBM_MASK | 1384 UVD_CGC_GATE__UDEC_RE_MASK | 1385 UVD_CGC_GATE__UDEC_CM_MASK | 1386 UVD_CGC_GATE__UDEC_IT_MASK | 1387 UVD_CGC_GATE__UDEC_DB_MASK | 1388 UVD_CGC_GATE__UDEC_MP_MASK | 1389 UVD_CGC_GATE__WCB_MASK | 1390 UVD_CGC_GATE__VCPU_MASK | 1391 UVD_CGC_GATE__SCPU_MASK | 1392 UVD_CGC_GATE__JPEG_MASK | 1393 UVD_CGC_GATE__JPEG2_MASK; 1394 1395 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK | 1396 UVD_SUVD_CGC_GATE__SIT_MASK | 1397 UVD_SUVD_CGC_GATE__SMP_MASK | 1398 UVD_SUVD_CGC_GATE__SCM_MASK | 1399 UVD_SUVD_CGC_GATE__SDB_MASK; 1400 1401 data |= cgc_flags; 1402 data1 |= suvd_flags; 1403 1404 WREG32(mmUVD_CGC_GATE, data); 1405 WREG32(mmUVD_SUVD_CGC_GATE, data1); 1406 } 1407 #endif 1408 1409 static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev, 1410 bool enable) 1411 { 1412 u32 orig, data; 1413 1414 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) { 1415 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); 1416 data |= 0xfff; 1417 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); 1418 1419 orig = data = RREG32(mmUVD_CGC_CTRL); 1420 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 1421 if (orig != data) 1422 WREG32(mmUVD_CGC_CTRL, data); 1423 } else { 1424 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); 1425 data &= ~0xfff; 1426 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); 1427 1428 orig = data = RREG32(mmUVD_CGC_CTRL); 1429 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 1430 if (orig != data) 1431 WREG32(mmUVD_CGC_CTRL, data); 1432 } 1433 } 1434 1435 static int uvd_v6_0_set_clockgating_state(void *handle, 1436 enum amd_clockgating_state state) 1437 { 1438 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1439 bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 1440 1441 if (enable) { 1442 /* wait for STATUS to clear */ 1443 if (uvd_v6_0_wait_for_idle(handle)) 1444 return -EBUSY; 1445 uvd_v6_0_enable_clock_gating(adev, true); 1446 /* enable HW gates because UVD is idle */ 1447 /* uvd_v6_0_set_hw_clock_gating(adev); */ 1448 } else { 1449 /* disable HW gating and enable Sw gating */ 1450 uvd_v6_0_enable_clock_gating(adev, false); 1451 } 1452 uvd_v6_0_set_sw_clock_gating(adev); 1453 return 0; 1454 } 1455 1456 static int uvd_v6_0_set_powergating_state(void *handle, 1457 enum amd_powergating_state state) 1458 { 1459 /* This doesn't actually powergate the UVD block. 1460 * That's done in the dpm code via the SMC. This 1461 * just re-inits the block as necessary. The actual 1462 * gating still happens in the dpm code. We should 1463 * revisit this when there is a cleaner line between 1464 * the smc and the hw blocks 1465 */ 1466 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1467 int ret = 0; 1468 1469 WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK); 1470 1471 if (state == AMD_PG_STATE_GATE) { 1472 uvd_v6_0_stop(adev); 1473 } else { 1474 ret = uvd_v6_0_start(adev); 1475 if (ret) 1476 goto out; 1477 } 1478 1479 out: 1480 return ret; 1481 } 1482 1483 static void uvd_v6_0_get_clockgating_state(void *handle, u32 *flags) 1484 { 1485 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1486 int data; 1487 1488 mutex_lock(&adev->pm.mutex); 1489 1490 if (adev->flags & AMD_IS_APU) 1491 data = RREG32_SMC(ixCURRENT_PG_STATUS_APU); 1492 else 1493 data = RREG32_SMC(ixCURRENT_PG_STATUS); 1494 1495 if (data & CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) { 1496 DRM_INFO("Cannot get clockgating state when UVD is powergated.\n"); 1497 goto out; 1498 } 1499 1500 /* AMD_CG_SUPPORT_UVD_MGCG */ 1501 data = RREG32(mmUVD_CGC_CTRL); 1502 if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK) 1503 *flags |= AMD_CG_SUPPORT_UVD_MGCG; 1504 1505 out: 1506 mutex_unlock(&adev->pm.mutex); 1507 } 1508 1509 static const struct amd_ip_funcs uvd_v6_0_ip_funcs = { 1510 .name = "uvd_v6_0", 1511 .early_init = uvd_v6_0_early_init, 1512 .late_init = NULL, 1513 .sw_init = uvd_v6_0_sw_init, 1514 .sw_fini = uvd_v6_0_sw_fini, 1515 .hw_init = uvd_v6_0_hw_init, 1516 .hw_fini = uvd_v6_0_hw_fini, 1517 .suspend = uvd_v6_0_suspend, 1518 .resume = uvd_v6_0_resume, 1519 .is_idle = uvd_v6_0_is_idle, 1520 .wait_for_idle = uvd_v6_0_wait_for_idle, 1521 .check_soft_reset = uvd_v6_0_check_soft_reset, 1522 .pre_soft_reset = uvd_v6_0_pre_soft_reset, 1523 .soft_reset = uvd_v6_0_soft_reset, 1524 .post_soft_reset = uvd_v6_0_post_soft_reset, 1525 .set_clockgating_state = uvd_v6_0_set_clockgating_state, 1526 .set_powergating_state = uvd_v6_0_set_powergating_state, 1527 .get_clockgating_state = uvd_v6_0_get_clockgating_state, 1528 }; 1529 1530 static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = { 1531 .type = AMDGPU_RING_TYPE_UVD, 1532 .align_mask = 0xf, 1533 .support_64bit_ptrs = false, 1534 .get_rptr = uvd_v6_0_ring_get_rptr, 1535 .get_wptr = uvd_v6_0_ring_get_wptr, 1536 .set_wptr = uvd_v6_0_ring_set_wptr, 1537 .parse_cs = amdgpu_uvd_ring_parse_cs, 1538 .emit_frame_size = 1539 6 + /* hdp invalidate */ 1540 10 + /* uvd_v6_0_ring_emit_pipeline_sync */ 1541 14, /* uvd_v6_0_ring_emit_fence x1 no user fence */ 1542 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */ 1543 .emit_ib = uvd_v6_0_ring_emit_ib, 1544 .emit_fence = uvd_v6_0_ring_emit_fence, 1545 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush, 1546 .test_ring = uvd_v6_0_ring_test_ring, 1547 .test_ib = amdgpu_uvd_ring_test_ib, 1548 .insert_nop = uvd_v6_0_ring_insert_nop, 1549 .pad_ib = amdgpu_ring_generic_pad_ib, 1550 .begin_use = amdgpu_uvd_ring_begin_use, 1551 .end_use = amdgpu_uvd_ring_end_use, 1552 .emit_wreg = uvd_v6_0_ring_emit_wreg, 1553 }; 1554 1555 static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = { 1556 .type = AMDGPU_RING_TYPE_UVD, 1557 .align_mask = 0xf, 1558 .support_64bit_ptrs = false, 1559 .get_rptr = uvd_v6_0_ring_get_rptr, 1560 .get_wptr = uvd_v6_0_ring_get_wptr, 1561 .set_wptr = uvd_v6_0_ring_set_wptr, 1562 .emit_frame_size = 1563 6 + /* hdp invalidate */ 1564 10 + /* uvd_v6_0_ring_emit_pipeline_sync */ 1565 VI_FLUSH_GPU_TLB_NUM_WREG * 6 + 8 + /* uvd_v6_0_ring_emit_vm_flush */ 1566 14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */ 1567 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */ 1568 .emit_ib = uvd_v6_0_ring_emit_ib, 1569 .emit_fence = uvd_v6_0_ring_emit_fence, 1570 .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush, 1571 .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync, 1572 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush, 1573 .test_ring = uvd_v6_0_ring_test_ring, 1574 .test_ib = amdgpu_uvd_ring_test_ib, 1575 .insert_nop = uvd_v6_0_ring_insert_nop, 1576 .pad_ib = amdgpu_ring_generic_pad_ib, 1577 .begin_use = amdgpu_uvd_ring_begin_use, 1578 .end_use = amdgpu_uvd_ring_end_use, 1579 .emit_wreg = uvd_v6_0_ring_emit_wreg, 1580 }; 1581 1582 static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = { 1583 .type = AMDGPU_RING_TYPE_UVD_ENC, 1584 .align_mask = 0x3f, 1585 .nop = HEVC_ENC_CMD_NO_OP, 1586 .support_64bit_ptrs = false, 1587 .get_rptr = uvd_v6_0_enc_ring_get_rptr, 1588 .get_wptr = uvd_v6_0_enc_ring_get_wptr, 1589 .set_wptr = uvd_v6_0_enc_ring_set_wptr, 1590 .emit_frame_size = 1591 4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */ 1592 5 + /* uvd_v6_0_enc_ring_emit_vm_flush */ 1593 5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */ 1594 1, /* uvd_v6_0_enc_ring_insert_end */ 1595 .emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */ 1596 .emit_ib = uvd_v6_0_enc_ring_emit_ib, 1597 .emit_fence = uvd_v6_0_enc_ring_emit_fence, 1598 .emit_vm_flush = uvd_v6_0_enc_ring_emit_vm_flush, 1599 .emit_pipeline_sync = uvd_v6_0_enc_ring_emit_pipeline_sync, 1600 .test_ring = uvd_v6_0_enc_ring_test_ring, 1601 .test_ib = uvd_v6_0_enc_ring_test_ib, 1602 .insert_nop = amdgpu_ring_insert_nop, 1603 .insert_end = uvd_v6_0_enc_ring_insert_end, 1604 .pad_ib = amdgpu_ring_generic_pad_ib, 1605 .begin_use = amdgpu_uvd_ring_begin_use, 1606 .end_use = amdgpu_uvd_ring_end_use, 1607 }; 1608 1609 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev) 1610 { 1611 if (adev->asic_type >= CHIP_POLARIS10) { 1612 adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_vm_funcs; 1613 DRM_INFO("UVD is enabled in VM mode\n"); 1614 } else { 1615 adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_phys_funcs; 1616 DRM_INFO("UVD is enabled in physical mode\n"); 1617 } 1618 } 1619 1620 static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev) 1621 { 1622 int i; 1623 1624 for (i = 0; i < adev->uvd.num_enc_rings; ++i) 1625 adev->uvd.inst->ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs; 1626 1627 DRM_INFO("UVD ENC is enabled in VM mode\n"); 1628 } 1629 1630 static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = { 1631 .set = uvd_v6_0_set_interrupt_state, 1632 .process = uvd_v6_0_process_interrupt, 1633 }; 1634 1635 static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev) 1636 { 1637 if (uvd_v6_0_enc_support(adev)) 1638 adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1; 1639 else 1640 adev->uvd.inst->irq.num_types = 1; 1641 1642 adev->uvd.inst->irq.funcs = &uvd_v6_0_irq_funcs; 1643 } 1644 1645 const struct amdgpu_ip_block_version uvd_v6_0_ip_block = 1646 { 1647 .type = AMD_IP_BLOCK_TYPE_UVD, 1648 .major = 6, 1649 .minor = 0, 1650 .rev = 0, 1651 .funcs = &uvd_v6_0_ip_funcs, 1652 }; 1653 1654 const struct amdgpu_ip_block_version uvd_v6_2_ip_block = 1655 { 1656 .type = AMD_IP_BLOCK_TYPE_UVD, 1657 .major = 6, 1658 .minor = 2, 1659 .rev = 0, 1660 .funcs = &uvd_v6_0_ip_funcs, 1661 }; 1662 1663 const struct amdgpu_ip_block_version uvd_v6_3_ip_block = 1664 { 1665 .type = AMD_IP_BLOCK_TYPE_UVD, 1666 .major = 6, 1667 .minor = 3, 1668 .rev = 0, 1669 .funcs = &uvd_v6_0_ip_funcs, 1670 }; 1671