1 /* 2 * Copyright 2011 Advanced Micro Devices, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 /* 27 * Authors: 28 * Christian König <deathsimple@vodafone.de> 29 */ 30 31 #include <linux/firmware.h> 32 #include <linux/module.h> 33 #include <drm/drmP.h> 34 35 #include "radeon.h" 36 #include "r600d.h" 37 38 /* 1 second timeout */ 39 #define UVD_IDLE_TIMEOUT_MS 1000 40 41 /* Firmware Names */ 42 #define FIRMWARE_RV710 "radeonkmsfw_RV710_uvd" 43 #define FIRMWARE_CYPRESS "radeonkmsfw_CYPRESS_uvd" 44 #define FIRMWARE_SUMO "radeonkmsfw_SUMO_uvd" 45 #define FIRMWARE_TAHITI "radeonkmsfw_TAHITI_uvd" 46 #define FIRMWARE_BONAIRE "radeonkmsfw_BONAIRE_uvd" 47 48 MODULE_FIRMWARE(FIRMWARE_RV710); 49 MODULE_FIRMWARE(FIRMWARE_CYPRESS); 50 MODULE_FIRMWARE(FIRMWARE_SUMO); 51 MODULE_FIRMWARE(FIRMWARE_TAHITI); 52 MODULE_FIRMWARE(FIRMWARE_BONAIRE); 53 54 static void radeon_uvd_idle_work_handler(struct work_struct *work); 55 56 int radeon_uvd_init(struct radeon_device *rdev) 57 { 58 unsigned long bo_size; 59 const char *fw_name; 60 int i, r; 61 62 INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler); 63 64 switch (rdev->family) { 65 case CHIP_RV710: 66 case CHIP_RV730: 67 case CHIP_RV740: 68 fw_name = FIRMWARE_RV710; 69 break; 70 71 case CHIP_CYPRESS: 72 case CHIP_HEMLOCK: 73 case CHIP_JUNIPER: 74 case CHIP_REDWOOD: 75 case CHIP_CEDAR: 76 fw_name = FIRMWARE_CYPRESS; 77 break; 78 79 case CHIP_SUMO: 80 case CHIP_SUMO2: 81 case CHIP_PALM: 82 case CHIP_CAYMAN: 83 case CHIP_BARTS: 84 case CHIP_TURKS: 85 case CHIP_CAICOS: 86 fw_name = FIRMWARE_SUMO; 87 break; 88 89 case CHIP_TAHITI: 90 case CHIP_VERDE: 91 case CHIP_PITCAIRN: 92 case CHIP_ARUBA: 93 fw_name = FIRMWARE_TAHITI; 94 break; 95 96 case CHIP_BONAIRE: 97 case CHIP_KABINI: 98 case CHIP_KAVERI: 99 fw_name = FIRMWARE_BONAIRE; 100 break; 101 102 default: 103 return -EINVAL; 104 } 105 106 r = request_firmware(&rdev->uvd_fw, fw_name, rdev->dev); 107 if (r) { 108 dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n", 109 fw_name); 110 return r; 111 } 112 113 bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->datasize + 8) + 114 RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE; 115 r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true, 116 RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo); 117 if (r) { 118 dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r); 119 return r; 120 } 121 122 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false); 123 if (r) { 124 radeon_bo_unref(&rdev->uvd.vcpu_bo); 125 dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r); 126 return r; 127 } 128 129 r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, 130 &rdev->uvd.gpu_addr); 131 if (r) { 132 radeon_bo_unreserve(rdev->uvd.vcpu_bo); 133 radeon_bo_unref(&rdev->uvd.vcpu_bo); 134 dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r); 135 return r; 136 } 137 138 r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr); 139 if (r) { 140 dev_err(rdev->dev, "(%d) UVD map failed\n", r); 141 return r; 142 } 143 144 radeon_bo_unreserve(rdev->uvd.vcpu_bo); 145 146 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { 147 atomic_set(&rdev->uvd.handles[i], 0); 148 rdev->uvd.filp[i] = NULL; 149 } 150 151 return 0; 152 } 153 154 void radeon_uvd_fini(struct radeon_device *rdev) 155 { 156 int r; 157 158 if (rdev->uvd.vcpu_bo == NULL) 159 return; 160 161 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false); 162 if (!r) { 163 radeon_bo_kunmap(rdev->uvd.vcpu_bo); 164 radeon_bo_unpin(rdev->uvd.vcpu_bo); 165 radeon_bo_unreserve(rdev->uvd.vcpu_bo); 166 } 167 168 radeon_bo_unref(&rdev->uvd.vcpu_bo); 169 170 release_firmware(rdev->uvd_fw); 171 } 172 173 int radeon_uvd_suspend(struct radeon_device *rdev) 174 { 175 unsigned size; 176 char *ptr; 177 int i; 178 179 if (rdev->uvd.vcpu_bo == NULL) 180 return 0; 181 182 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) 183 if (atomic_read(&rdev->uvd.handles[i])) 184 break; 185 186 if (i == RADEON_MAX_UVD_HANDLES) 187 return 0; 188 189 size = radeon_bo_size(rdev->uvd.vcpu_bo); 190 size -= rdev->uvd_fw->datasize; 191 192 ptr = rdev->uvd.cpu_addr; 193 ptr += rdev->uvd_fw->datasize; 194 195 rdev->uvd.saved_bo = kmalloc(size, M_DRM, M_WAITOK); 196 memcpy(rdev->uvd.saved_bo, ptr, size); 197 198 return 0; 199 } 200 201 int radeon_uvd_resume(struct radeon_device *rdev) 202 { 203 unsigned size; 204 char *ptr; 205 206 if (rdev->uvd.vcpu_bo == NULL) 207 return -EINVAL; 208 209 memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->datasize); 210 211 size = radeon_bo_size(rdev->uvd.vcpu_bo); 212 size -= rdev->uvd_fw->datasize; 213 214 ptr = rdev->uvd.cpu_addr; 215 ptr += rdev->uvd_fw->datasize; 216 217 if (rdev->uvd.saved_bo != NULL) { 218 memcpy(ptr, rdev->uvd.saved_bo, size); 219 kfree(rdev->uvd.saved_bo); 220 rdev->uvd.saved_bo = NULL; 221 } else 222 memset(ptr, 0, size); 223 224 return 0; 225 } 226 227 void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo) 228 { 229 rbo->placement.fpfn = 0 >> PAGE_SHIFT; 230 rbo->placement.lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; 231 } 232 233 void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp) 234 { 235 int i, r; 236 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { 237 uint32_t handle = atomic_read(&rdev->uvd.handles[i]); 238 if (handle != 0 && rdev->uvd.filp[i] == filp) { 239 struct radeon_fence *fence; 240 241 r = radeon_uvd_get_destroy_msg(rdev, 242 R600_RING_TYPE_UVD_INDEX, handle, &fence); 243 if (r) { 244 DRM_ERROR("Error destroying UVD (%d)!\n", r); 245 continue; 246 } 247 248 radeon_fence_wait(fence, false); 249 radeon_fence_unref(&fence); 250 251 rdev->uvd.filp[i] = NULL; 252 atomic_set(&rdev->uvd.handles[i], 0); 253 } 254 } 255 } 256 257 static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) 258 { 259 unsigned stream_type = msg[4]; 260 unsigned width = msg[6]; 261 unsigned height = msg[7]; 262 unsigned dpb_size = msg[9]; 263 unsigned pitch = msg[28]; 264 265 unsigned width_in_mb = width / 16; 266 unsigned height_in_mb = ALIGN(height / 16, 2); 267 268 unsigned image_size, tmp, min_dpb_size; 269 270 image_size = width * height; 271 image_size += image_size / 2; 272 image_size = ALIGN(image_size, 1024); 273 274 switch (stream_type) { 275 case 0: /* H264 */ 276 277 /* reference picture buffer */ 278 min_dpb_size = image_size * 17; 279 280 /* macroblock context buffer */ 281 min_dpb_size += width_in_mb * height_in_mb * 17 * 192; 282 283 /* IT surface buffer */ 284 min_dpb_size += width_in_mb * height_in_mb * 32; 285 break; 286 287 case 1: /* VC1 */ 288 289 /* reference picture buffer */ 290 min_dpb_size = image_size * 3; 291 292 /* CONTEXT_BUFFER */ 293 min_dpb_size += width_in_mb * height_in_mb * 128; 294 295 /* IT surface buffer */ 296 min_dpb_size += width_in_mb * 64; 297 298 /* DB surface buffer */ 299 min_dpb_size += width_in_mb * 128; 300 301 /* BP */ 302 tmp = max(width_in_mb, height_in_mb); 303 min_dpb_size += ALIGN(tmp * 7 * 16, 64); 304 break; 305 306 case 3: /* MPEG2 */ 307 308 /* reference picture buffer */ 309 min_dpb_size = image_size * 3; 310 break; 311 312 case 4: /* MPEG4 */ 313 314 /* reference picture buffer */ 315 min_dpb_size = image_size * 3; 316 317 /* CM */ 318 min_dpb_size += width_in_mb * height_in_mb * 64; 319 320 /* IT surface buffer */ 321 min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64); 322 break; 323 324 default: 325 DRM_ERROR("UVD codec not handled %d!\n", stream_type); 326 return -EINVAL; 327 } 328 329 if (width > pitch) { 330 DRM_ERROR("Invalid UVD decoding target pitch!\n"); 331 return -EINVAL; 332 } 333 334 if (dpb_size < min_dpb_size) { 335 DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n", 336 dpb_size, min_dpb_size); 337 return -EINVAL; 338 } 339 340 buf_sizes[0x1] = dpb_size; 341 buf_sizes[0x2] = image_size; 342 return 0; 343 } 344 345 static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, 346 unsigned offset, unsigned buf_sizes[]) 347 { 348 int32_t *msg, msg_type, handle; 349 void *ptr; 350 351 int i, r; 352 353 if (offset & 0x3F) { 354 DRM_ERROR("UVD messages must be 64 byte aligned!\n"); 355 return -EINVAL; 356 } 357 358 if (bo->tbo.sync_obj) { 359 r = radeon_fence_wait(bo->tbo.sync_obj, false); 360 if (r) { 361 DRM_ERROR("Failed waiting for UVD message (%d)!\n", r); 362 return r; 363 } 364 } 365 366 r = radeon_bo_kmap(bo, &ptr); 367 if (r) { 368 DRM_ERROR("Failed mapping the UVD message (%d)!\n", r); 369 return r; 370 } 371 372 msg = (uint32_t*)((uint8_t*)ptr + offset); 373 374 msg_type = msg[1]; 375 handle = msg[2]; 376 377 if (handle == 0) { 378 DRM_ERROR("Invalid UVD handle!\n"); 379 return -EINVAL; 380 } 381 382 if (msg_type == 1) { 383 /* it's a decode msg, calc buffer sizes */ 384 r = radeon_uvd_cs_msg_decode(msg, buf_sizes); 385 radeon_bo_kunmap(bo); 386 if (r) 387 return r; 388 389 } else if (msg_type == 2) { 390 /* it's a destroy msg, free the handle */ 391 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) 392 atomic_cmpset(&p->rdev->uvd.handles[i], handle, 0); 393 radeon_bo_kunmap(bo); 394 return 0; 395 } else { 396 radeon_bo_kunmap(bo); 397 398 if (msg_type != 0) { 399 DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); 400 return -EINVAL; 401 } 402 403 /* it's a create msg, no special handling needed */ 404 } 405 406 /* create or decode, validate the handle */ 407 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { 408 if (atomic_read(&p->rdev->uvd.handles[i]) == handle) 409 return 0; 410 } 411 412 /* handle not found try to alloc a new one */ 413 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { 414 #if 0 415 if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) { 416 #endif 417 if (atomic_cmpset(&p->rdev->uvd.handles[i], 0, handle) == 1) { 418 p->rdev->uvd.filp[i] = p->filp; 419 return 0; 420 } 421 } 422 423 DRM_ERROR("No more free UVD handles!\n"); 424 return -EINVAL; 425 } 426 427 static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, 428 int data0, int data1, 429 unsigned buf_sizes[], bool *has_msg_cmd) 430 { 431 struct radeon_cs_chunk *relocs_chunk; 432 struct radeon_cs_reloc *reloc; 433 unsigned idx, cmd, offset; 434 uint64_t start, end; 435 int r; 436 437 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 438 offset = radeon_get_ib_value(p, data0); 439 idx = radeon_get_ib_value(p, data1); 440 if (idx >= relocs_chunk->length_dw) { 441 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 442 idx, relocs_chunk->length_dw); 443 return -EINVAL; 444 } 445 446 reloc = p->relocs_ptr[(idx / 4)]; 447 start = reloc->lobj.gpu_offset; 448 end = start + radeon_bo_size(reloc->robj); 449 start += offset; 450 451 p->ib.ptr[data0] = start & 0xFFFFFFFF; 452 p->ib.ptr[data1] = start >> 32; 453 454 cmd = radeon_get_ib_value(p, p->idx) >> 1; 455 456 if (cmd < 0x4) { 457 if ((end - start) < buf_sizes[cmd]) { 458 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, 459 (unsigned)(end - start), buf_sizes[cmd]); 460 return -EINVAL; 461 } 462 463 } else if (cmd != 0x100) { 464 DRM_ERROR("invalid UVD command %X!\n", cmd); 465 return -EINVAL; 466 } 467 468 if ((start >> 28) != (end >> 28)) { 469 DRM_ERROR("reloc %lX-%lX crossing 256MB boundary!\n", 470 start, end); 471 return -EINVAL; 472 } 473 474 /* TODO: is this still necessary on NI+ ? */ 475 if ((cmd == 0 || cmd == 0x3) && 476 (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) { 477 DRM_ERROR("msg/fb buffer %lX-%lX out of 256MB segment!\n", 478 start, end); 479 return -EINVAL; 480 } 481 482 if (cmd == 0) { 483 if (*has_msg_cmd) { 484 DRM_ERROR("More than one message in a UVD-IB!\n"); 485 return -EINVAL; 486 } 487 *has_msg_cmd = true; 488 r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes); 489 if (r) 490 return r; 491 } else if (!*has_msg_cmd) { 492 DRM_ERROR("Message needed before other commands are send!\n"); 493 return -EINVAL; 494 } 495 496 return 0; 497 } 498 499 static int radeon_uvd_cs_reg(struct radeon_cs_parser *p, 500 struct radeon_cs_packet *pkt, 501 int *data0, int *data1, 502 unsigned buf_sizes[], 503 bool *has_msg_cmd) 504 { 505 int i, r; 506 507 p->idx++; 508 for (i = 0; i <= pkt->count; ++i) { 509 switch (pkt->reg + i*4) { 510 case UVD_GPCOM_VCPU_DATA0: 511 *data0 = p->idx; 512 break; 513 case UVD_GPCOM_VCPU_DATA1: 514 *data1 = p->idx; 515 break; 516 case UVD_GPCOM_VCPU_CMD: 517 r = radeon_uvd_cs_reloc(p, *data0, *data1, 518 buf_sizes, has_msg_cmd); 519 if (r) 520 return r; 521 break; 522 case UVD_ENGINE_CNTL: 523 break; 524 default: 525 DRM_ERROR("Invalid reg 0x%X!\n", 526 pkt->reg + i*4); 527 return -EINVAL; 528 } 529 p->idx++; 530 } 531 return 0; 532 } 533 534 int radeon_uvd_cs_parse(struct radeon_cs_parser *p) 535 { 536 struct radeon_cs_packet pkt; 537 int r, data0 = 0, data1 = 0; 538 539 /* does the IB has a msg command */ 540 bool has_msg_cmd = false; 541 542 /* minimum buffer sizes */ 543 unsigned buf_sizes[] = { 544 [0x00000000] = 2048, 545 [0x00000001] = 32 * 1024 * 1024, 546 [0x00000002] = 2048 * 1152 * 3, 547 [0x00000003] = 2048, 548 }; 549 550 if (p->chunks[p->chunk_ib_idx].length_dw % 16) { 551 DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n", 552 p->chunks[p->chunk_ib_idx].length_dw); 553 return -EINVAL; 554 } 555 556 if (p->chunk_relocs_idx == -1) { 557 DRM_ERROR("No relocation chunk !\n"); 558 return -EINVAL; 559 } 560 561 562 do { 563 r = radeon_cs_packet_parse(p, &pkt, p->idx); 564 if (r) 565 return r; 566 switch (pkt.type) { 567 case RADEON_PACKET_TYPE0: 568 r = radeon_uvd_cs_reg(p, &pkt, &data0, &data1, 569 buf_sizes, &has_msg_cmd); 570 if (r) 571 return r; 572 break; 573 case RADEON_PACKET_TYPE2: 574 p->idx += pkt.count + 2; 575 break; 576 default: 577 DRM_ERROR("Unknown packet type %d !\n", pkt.type); 578 return -EINVAL; 579 } 580 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 581 582 if (!has_msg_cmd) { 583 DRM_ERROR("UVD-IBs need a msg command!\n"); 584 return -EINVAL; 585 } 586 587 return 0; 588 } 589 590 static int radeon_uvd_send_msg(struct radeon_device *rdev, 591 int ring, struct radeon_bo *bo, 592 struct radeon_fence **fence) 593 { 594 struct ttm_validate_buffer tv; 595 struct list_head head; 596 struct radeon_ib ib; 597 uint64_t addr; 598 int i, r; 599 600 memset(&tv, 0, sizeof(tv)); 601 tv.bo = &bo->tbo; 602 603 INIT_LIST_HEAD(&head); 604 list_add(&tv.head, &head); 605 606 r = ttm_eu_reserve_buffers(&head); 607 if (r) 608 return r; 609 610 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_VRAM); 611 radeon_uvd_force_into_uvd_segment(bo); 612 613 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 614 if (r) { 615 ttm_eu_backoff_reservation(&head); 616 return r; 617 } 618 619 r = radeon_ib_get(rdev, ring, &ib, NULL, 16); 620 if (r) { 621 ttm_eu_backoff_reservation(&head); 622 return r; 623 } 624 625 addr = radeon_bo_gpu_offset(bo); 626 ib.ptr[0] = PACKET0(UVD_GPCOM_VCPU_DATA0, 0); 627 ib.ptr[1] = addr; 628 ib.ptr[2] = PACKET0(UVD_GPCOM_VCPU_DATA1, 0); 629 ib.ptr[3] = addr >> 32; 630 ib.ptr[4] = PACKET0(UVD_GPCOM_VCPU_CMD, 0); 631 ib.ptr[5] = 0; 632 for (i = 6; i < 16; ++i) 633 ib.ptr[i] = PACKET2(0); 634 ib.length_dw = 16; 635 636 r = radeon_ib_schedule(rdev, &ib, NULL); 637 if (r) { 638 ttm_eu_backoff_reservation(&head); 639 return r; 640 } 641 ttm_eu_fence_buffer_objects(&head, ib.fence); 642 643 if (fence) 644 *fence = radeon_fence_ref(ib.fence); 645 646 radeon_ib_free(rdev, &ib); 647 radeon_bo_unref(&bo); 648 return 0; 649 } 650 651 /* multiple fence commands without any stream commands in between can 652 crash the vcpu so just try to emmit a dummy create/destroy msg to 653 avoid this */ 654 int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring, 655 uint32_t handle, struct radeon_fence **fence) 656 { 657 struct radeon_bo *bo; 658 uint32_t *msg; 659 int r, i; 660 661 r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true, 662 RADEON_GEM_DOMAIN_VRAM, NULL, &bo); 663 if (r) 664 return r; 665 666 r = radeon_bo_reserve(bo, false); 667 if (r) { 668 radeon_bo_unref(&bo); 669 return r; 670 } 671 672 r = radeon_bo_kmap(bo, (void **)&msg); 673 if (r) { 674 radeon_bo_unreserve(bo); 675 radeon_bo_unref(&bo); 676 return r; 677 } 678 679 /* stitch together an UVD create msg */ 680 msg[0] = cpu_to_le32(0x00000de4); 681 msg[1] = cpu_to_le32(0x00000000); 682 msg[2] = cpu_to_le32(handle); 683 msg[3] = cpu_to_le32(0x00000000); 684 msg[4] = cpu_to_le32(0x00000000); 685 msg[5] = cpu_to_le32(0x00000000); 686 msg[6] = cpu_to_le32(0x00000000); 687 msg[7] = cpu_to_le32(0x00000780); 688 msg[8] = cpu_to_le32(0x00000440); 689 msg[9] = cpu_to_le32(0x00000000); 690 msg[10] = cpu_to_le32(0x01b37000); 691 for (i = 11; i < 1024; ++i) 692 msg[i] = cpu_to_le32(0x0); 693 694 radeon_bo_kunmap(bo); 695 radeon_bo_unreserve(bo); 696 697 return radeon_uvd_send_msg(rdev, ring, bo, fence); 698 } 699 700 int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring, 701 uint32_t handle, struct radeon_fence **fence) 702 { 703 struct radeon_bo *bo; 704 uint32_t *msg; 705 int r, i; 706 707 r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true, 708 RADEON_GEM_DOMAIN_VRAM, NULL, &bo); 709 if (r) 710 return r; 711 712 r = radeon_bo_reserve(bo, false); 713 if (r) { 714 radeon_bo_unref(&bo); 715 return r; 716 } 717 718 r = radeon_bo_kmap(bo, (void **)&msg); 719 if (r) { 720 radeon_bo_unreserve(bo); 721 radeon_bo_unref(&bo); 722 return r; 723 } 724 725 /* stitch together an UVD destroy msg */ 726 msg[0] = cpu_to_le32(0x00000de4); 727 msg[1] = cpu_to_le32(0x00000002); 728 msg[2] = cpu_to_le32(handle); 729 msg[3] = cpu_to_le32(0x00000000); 730 for (i = 4; i < 1024; ++i) 731 msg[i] = cpu_to_le32(0x0); 732 733 radeon_bo_kunmap(bo); 734 radeon_bo_unreserve(bo); 735 736 return radeon_uvd_send_msg(rdev, ring, bo, fence); 737 } 738 739 static void radeon_uvd_idle_work_handler(struct work_struct *work) 740 { 741 struct radeon_device *rdev = 742 container_of(work, struct radeon_device, uvd.idle_work.work); 743 744 if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0) { 745 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { 746 lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE); 747 rdev->pm.dpm.uvd_active = false; 748 lockmgr(&rdev->pm.mutex, LK_RELEASE); 749 radeon_pm_compute_clocks(rdev); 750 } else { 751 radeon_set_uvd_clocks(rdev, 0, 0); 752 } 753 } else { 754 schedule_delayed_work(&rdev->uvd.idle_work, 755 msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); 756 } 757 } 758 759 void radeon_uvd_note_usage(struct radeon_device *rdev) 760 { 761 bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work); 762 set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work, 763 msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); 764 if (set_clocks) { 765 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { 766 /* XXX pick SD/HD/MVC */ 767 radeon_dpm_enable_power_state(rdev, POWER_STATE_TYPE_INTERNAL_UVD); 768 } else { 769 radeon_set_uvd_clocks(rdev, 53300, 40000); 770 } 771 } 772 } 773 774 static unsigned radeon_uvd_calc_upll_post_div(unsigned vco_freq, 775 unsigned target_freq, 776 unsigned pd_min, 777 unsigned pd_even) 778 { 779 unsigned post_div = vco_freq / target_freq; 780 781 /* adjust to post divider minimum value */ 782 if (post_div < pd_min) 783 post_div = pd_min; 784 785 /* we alway need a frequency less than or equal the target */ 786 if ((vco_freq / post_div) > target_freq) 787 post_div += 1; 788 789 /* post dividers above a certain value must be even */ 790 if (post_div > pd_even && post_div % 2) 791 post_div += 1; 792 793 return post_div; 794 } 795 796 /** 797 * radeon_uvd_calc_upll_dividers - calc UPLL clock dividers 798 * 799 * @rdev: radeon_device pointer 800 * @vclk: wanted VCLK 801 * @dclk: wanted DCLK 802 * @vco_min: minimum VCO frequency 803 * @vco_max: maximum VCO frequency 804 * @fb_factor: factor to multiply vco freq with 805 * @fb_mask: limit and bitmask for feedback divider 806 * @pd_min: post divider minimum 807 * @pd_max: post divider maximum 808 * @pd_even: post divider must be even above this value 809 * @optimal_fb_div: resulting feedback divider 810 * @optimal_vclk_div: resulting vclk post divider 811 * @optimal_dclk_div: resulting dclk post divider 812 * 813 * Calculate dividers for UVDs UPLL (R6xx-SI, except APUs). 814 * Returns zero on success -EINVAL on error. 815 */ 816 int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev, 817 unsigned vclk, unsigned dclk, 818 unsigned vco_min, unsigned vco_max, 819 unsigned fb_factor, unsigned fb_mask, 820 unsigned pd_min, unsigned pd_max, 821 unsigned pd_even, 822 unsigned *optimal_fb_div, 823 unsigned *optimal_vclk_div, 824 unsigned *optimal_dclk_div) 825 { 826 unsigned vco_freq, ref_freq = rdev->clock.spll.reference_freq; 827 828 /* start off with something large */ 829 unsigned optimal_score = ~0; 830 831 /* loop through vco from low to high */ 832 vco_min = max(max(vco_min, vclk), dclk); 833 for (vco_freq = vco_min; vco_freq <= vco_max; vco_freq += 100) { 834 835 uint64_t fb_div = (uint64_t)vco_freq * fb_factor; 836 unsigned vclk_div, dclk_div, score; 837 838 do_div(fb_div, ref_freq); 839 840 /* fb div out of range ? */ 841 if (fb_div > fb_mask) 842 break; /* it can oly get worse */ 843 844 fb_div &= fb_mask; 845 846 /* calc vclk divider with current vco freq */ 847 vclk_div = radeon_uvd_calc_upll_post_div(vco_freq, vclk, 848 pd_min, pd_even); 849 if (vclk_div > pd_max) 850 break; /* vco is too big, it has to stop */ 851 852 /* calc dclk divider with current vco freq */ 853 dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk, 854 pd_min, pd_even); 855 if (vclk_div > pd_max) 856 break; /* vco is too big, it has to stop */ 857 858 /* calc score with current vco freq */ 859 score = vclk - (vco_freq / vclk_div) + dclk - (vco_freq / dclk_div); 860 861 /* determine if this vco setting is better than current optimal settings */ 862 if (score < optimal_score) { 863 *optimal_fb_div = fb_div; 864 *optimal_vclk_div = vclk_div; 865 *optimal_dclk_div = dclk_div; 866 optimal_score = score; 867 if (optimal_score == 0) 868 break; /* it can't get better than this */ 869 } 870 } 871 872 /* did we found a valid setup ? */ 873 if (optimal_score == ~0) 874 return -EINVAL; 875 876 return 0; 877 } 878 879 int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev, 880 unsigned cg_upll_func_cntl) 881 { 882 unsigned i; 883 884 /* make sure UPLL_CTLREQ is deasserted */ 885 WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK); 886 887 mdelay(10); 888 889 /* assert UPLL_CTLREQ */ 890 WREG32_P(cg_upll_func_cntl, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK); 891 892 /* wait for CTLACK and CTLACK2 to get asserted */ 893 for (i = 0; i < 100; ++i) { 894 uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK; 895 if ((RREG32(cg_upll_func_cntl) & mask) == mask) 896 break; 897 mdelay(10); 898 } 899 900 /* deassert UPLL_CTLREQ */ 901 WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK); 902 903 if (i == 100) { 904 DRM_ERROR("Timeout setting UVD clocks!\n"); 905 return -ETIMEDOUT; 906 } 907 908 return 0; 909 } 910