1 /* 2 * Copyright 2011 Advanced Micro Devices, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 /* 27 * Authors: 28 * Christian König <deathsimple@vodafone.de> 29 */ 30 31 #include <linux/firmware.h> 32 #include <linux/module.h> 33 #include <drm/drmP.h> 34 35 #include "radeon.h" 36 #include "r600d.h" 37 38 /* 1 second timeout */ 39 #define UVD_IDLE_TIMEOUT_MS 1000 40 41 /* Firmware Names */ 42 #define FIRMWARE_RV710 "radeonkmsfw_RV710_uvd" 43 #define FIRMWARE_CYPRESS "radeonkmsfw_CYPRESS_uvd" 44 #define FIRMWARE_SUMO "radeonkmsfw_SUMO_uvd" 45 #define FIRMWARE_TAHITI "radeonkmsfw_TAHITI_uvd" 46 #define FIRMWARE_BONAIRE "radeonkmsfw_BONAIRE_uvd" 47 48 MODULE_FIRMWARE(FIRMWARE_RV710); 49 MODULE_FIRMWARE(FIRMWARE_CYPRESS); 50 MODULE_FIRMWARE(FIRMWARE_SUMO); 51 MODULE_FIRMWARE(FIRMWARE_TAHITI); 52 MODULE_FIRMWARE(FIRMWARE_BONAIRE); 53 54 static void radeon_uvd_idle_work_handler(struct work_struct *work); 55 56 int radeon_uvd_init(struct radeon_device *rdev) 57 { 58 unsigned long bo_size; 59 const char *fw_name; 60 int i, r; 61 62 INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler); 63 64 switch (rdev->family) { 65 case CHIP_RV710: 66 case CHIP_RV730: 67 case CHIP_RV740: 68 fw_name = FIRMWARE_RV710; 69 break; 70 71 case CHIP_CYPRESS: 72 case CHIP_HEMLOCK: 73 case CHIP_JUNIPER: 74 case CHIP_REDWOOD: 75 case CHIP_CEDAR: 76 fw_name = FIRMWARE_CYPRESS; 77 break; 78 79 case CHIP_SUMO: 80 case CHIP_SUMO2: 81 case CHIP_PALM: 82 case CHIP_CAYMAN: 83 case CHIP_BARTS: 84 case CHIP_TURKS: 85 case CHIP_CAICOS: 86 fw_name = FIRMWARE_SUMO; 87 break; 88 89 case CHIP_TAHITI: 90 case CHIP_VERDE: 91 case CHIP_PITCAIRN: 92 case CHIP_ARUBA: 93 fw_name = FIRMWARE_TAHITI; 94 break; 95 96 case CHIP_BONAIRE: 97 case CHIP_KABINI: 98 case CHIP_KAVERI: 99 fw_name = FIRMWARE_BONAIRE; 100 break; 101 102 default: 103 return -EINVAL; 104 } 105 106 r = request_firmware(&rdev->uvd_fw, fw_name, rdev->dev); 107 if (r) { 108 dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n", 109 fw_name); 110 return r; 111 } 112 113 bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->datasize + 8) + 114 RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE; 115 r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true, 116 RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo); 117 if (r) { 118 dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r); 119 return r; 120 } 121 122 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false); 123 if (r) { 124 radeon_bo_unref(&rdev->uvd.vcpu_bo); 125 dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r); 126 return r; 127 } 128 129 r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, 130 &rdev->uvd.gpu_addr); 131 if (r) { 132 radeon_bo_unreserve(rdev->uvd.vcpu_bo); 133 radeon_bo_unref(&rdev->uvd.vcpu_bo); 134 dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r); 135 return r; 136 } 137 138 r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr); 139 if (r) { 140 dev_err(rdev->dev, "(%d) UVD map failed\n", r); 141 return r; 142 } 143 144 radeon_bo_unreserve(rdev->uvd.vcpu_bo); 145 146 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { 147 atomic_set(&rdev->uvd.handles[i], 0); 148 rdev->uvd.filp[i] = NULL; 149 rdev->uvd.img_size[i] = 0; 150 } 151 152 return 0; 153 } 154 155 void radeon_uvd_fini(struct radeon_device *rdev) 156 { 157 int r; 158 159 if (rdev->uvd.vcpu_bo == NULL) 160 return; 161 162 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false); 163 if (!r) { 164 radeon_bo_kunmap(rdev->uvd.vcpu_bo); 165 radeon_bo_unpin(rdev->uvd.vcpu_bo); 166 radeon_bo_unreserve(rdev->uvd.vcpu_bo); 167 } 168 169 radeon_bo_unref(&rdev->uvd.vcpu_bo); 170 171 release_firmware(rdev->uvd_fw); 172 } 173 174 int radeon_uvd_suspend(struct radeon_device *rdev) 175 { 176 unsigned size; 177 char *ptr; 178 int i; 179 180 if (rdev->uvd.vcpu_bo == NULL) 181 return 0; 182 183 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) 184 if (atomic_read(&rdev->uvd.handles[i])) 185 break; 186 187 if (i == RADEON_MAX_UVD_HANDLES) 188 return 0; 189 190 size = radeon_bo_size(rdev->uvd.vcpu_bo); 191 size -= rdev->uvd_fw->datasize; 192 193 ptr = rdev->uvd.cpu_addr; 194 ptr += rdev->uvd_fw->datasize; 195 196 rdev->uvd.saved_bo = kmalloc(size, M_DRM, M_WAITOK); 197 memcpy(rdev->uvd.saved_bo, ptr, size); 198 199 return 0; 200 } 201 202 int radeon_uvd_resume(struct radeon_device *rdev) 203 { 204 unsigned size; 205 char *ptr; 206 207 if (rdev->uvd.vcpu_bo == NULL) 208 return -EINVAL; 209 210 memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->datasize); 211 212 size = radeon_bo_size(rdev->uvd.vcpu_bo); 213 size -= rdev->uvd_fw->datasize; 214 215 ptr = rdev->uvd.cpu_addr; 216 ptr += rdev->uvd_fw->datasize; 217 218 if (rdev->uvd.saved_bo != NULL) { 219 memcpy(ptr, rdev->uvd.saved_bo, size); 220 kfree(rdev->uvd.saved_bo); 221 rdev->uvd.saved_bo = NULL; 222 } else 223 memset(ptr, 0, size); 224 225 return 0; 226 } 227 228 void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo) 229 { 230 rbo->placement.fpfn = 0 >> PAGE_SHIFT; 231 rbo->placement.lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; 232 } 233 234 void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp) 235 { 236 int i, r; 237 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { 238 uint32_t handle = atomic_read(&rdev->uvd.handles[i]); 239 if (handle != 0 && rdev->uvd.filp[i] == filp) { 240 struct radeon_fence *fence; 241 242 r = radeon_uvd_get_destroy_msg(rdev, 243 R600_RING_TYPE_UVD_INDEX, handle, &fence); 244 if (r) { 245 DRM_ERROR("Error destroying UVD (%d)!\n", r); 246 continue; 247 } 248 249 radeon_fence_wait(fence, false); 250 radeon_fence_unref(&fence); 251 252 rdev->uvd.filp[i] = NULL; 253 atomic_set(&rdev->uvd.handles[i], 0); 254 } 255 } 256 } 257 258 static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) 259 { 260 unsigned stream_type = msg[4]; 261 unsigned width = msg[6]; 262 unsigned height = msg[7]; 263 unsigned dpb_size = msg[9]; 264 unsigned pitch = msg[28]; 265 266 unsigned width_in_mb = width / 16; 267 unsigned height_in_mb = ALIGN(height / 16, 2); 268 269 unsigned image_size, tmp, min_dpb_size; 270 271 image_size = width * height; 272 image_size += image_size / 2; 273 image_size = ALIGN(image_size, 1024); 274 275 switch (stream_type) { 276 case 0: /* H264 */ 277 278 /* reference picture buffer */ 279 min_dpb_size = image_size * 17; 280 281 /* macroblock context buffer */ 282 min_dpb_size += width_in_mb * height_in_mb * 17 * 192; 283 284 /* IT surface buffer */ 285 min_dpb_size += width_in_mb * height_in_mb * 32; 286 break; 287 288 case 1: /* VC1 */ 289 290 /* reference picture buffer */ 291 min_dpb_size = image_size * 3; 292 293 /* CONTEXT_BUFFER */ 294 min_dpb_size += width_in_mb * height_in_mb * 128; 295 296 /* IT surface buffer */ 297 min_dpb_size += width_in_mb * 64; 298 299 /* DB surface buffer */ 300 min_dpb_size += width_in_mb * 128; 301 302 /* BP */ 303 tmp = max(width_in_mb, height_in_mb); 304 min_dpb_size += ALIGN(tmp * 7 * 16, 64); 305 break; 306 307 case 3: /* MPEG2 */ 308 309 /* reference picture buffer */ 310 min_dpb_size = image_size * 3; 311 break; 312 313 case 4: /* MPEG4 */ 314 315 /* reference picture buffer */ 316 min_dpb_size = image_size * 3; 317 318 /* CM */ 319 min_dpb_size += width_in_mb * height_in_mb * 64; 320 321 /* IT surface buffer */ 322 min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64); 323 break; 324 325 default: 326 DRM_ERROR("UVD codec not handled %d!\n", stream_type); 327 return -EINVAL; 328 } 329 330 if (width > pitch) { 331 DRM_ERROR("Invalid UVD decoding target pitch!\n"); 332 return -EINVAL; 333 } 334 335 if (dpb_size < min_dpb_size) { 336 DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n", 337 dpb_size, min_dpb_size); 338 return -EINVAL; 339 } 340 341 buf_sizes[0x1] = dpb_size; 342 buf_sizes[0x2] = image_size; 343 return 0; 344 } 345 346 static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, 347 unsigned offset, unsigned buf_sizes[]) 348 { 349 int32_t *msg, msg_type, handle; 350 unsigned img_size = 0; 351 void *ptr; 352 353 int i, r; 354 355 if (offset & 0x3F) { 356 DRM_ERROR("UVD messages must be 64 byte aligned!\n"); 357 return -EINVAL; 358 } 359 360 if (bo->tbo.sync_obj) { 361 r = radeon_fence_wait(bo->tbo.sync_obj, false); 362 if (r) { 363 DRM_ERROR("Failed waiting for UVD message (%d)!\n", r); 364 return r; 365 } 366 } 367 368 r = radeon_bo_kmap(bo, &ptr); 369 if (r) { 370 DRM_ERROR("Failed mapping the UVD message (%d)!\n", r); 371 return r; 372 } 373 374 msg = (uint32_t*)((uint8_t*)ptr + offset); 375 376 msg_type = msg[1]; 377 handle = msg[2]; 378 379 if (handle == 0) { 380 DRM_ERROR("Invalid UVD handle!\n"); 381 return -EINVAL; 382 } 383 384 if (msg_type == 1) { 385 /* it's a decode msg, calc buffer sizes */ 386 r = radeon_uvd_cs_msg_decode(msg, buf_sizes); 387 /* calc image size (width * height) */ 388 img_size = msg[6] * msg[7]; 389 radeon_bo_kunmap(bo); 390 if (r) 391 return r; 392 393 } else if (msg_type == 2) { 394 /* it's a destroy msg, free the handle */ 395 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) 396 atomic_cmpset(&p->rdev->uvd.handles[i], handle, 0); 397 radeon_bo_kunmap(bo); 398 return 0; 399 } else { 400 /* it's a create msg, calc image size (width * height) */ 401 img_size = msg[7] * msg[8]; 402 radeon_bo_kunmap(bo); 403 404 if (msg_type != 0) { 405 DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); 406 return -EINVAL; 407 } 408 409 /* it's a create msg, no special handling needed */ 410 } 411 412 /* create or decode, validate the handle */ 413 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { 414 if (atomic_read(&p->rdev->uvd.handles[i]) == handle) 415 return 0; 416 } 417 418 /* handle not found try to alloc a new one */ 419 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { 420 #if 0 421 if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) { 422 #endif 423 if (atomic_cmpset(&p->rdev->uvd.handles[i], 0, handle) == 1) { 424 p->rdev->uvd.filp[i] = p->filp; 425 p->rdev->uvd.img_size[i] = img_size; 426 return 0; 427 } 428 } 429 430 DRM_ERROR("No more free UVD handles!\n"); 431 return -EINVAL; 432 } 433 434 static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, 435 int data0, int data1, 436 unsigned buf_sizes[], bool *has_msg_cmd) 437 { 438 struct radeon_cs_chunk *relocs_chunk; 439 struct radeon_cs_reloc *reloc; 440 unsigned idx, cmd, offset; 441 uint64_t start, end; 442 int r; 443 444 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 445 offset = radeon_get_ib_value(p, data0); 446 idx = radeon_get_ib_value(p, data1); 447 if (idx >= relocs_chunk->length_dw) { 448 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 449 idx, relocs_chunk->length_dw); 450 return -EINVAL; 451 } 452 453 reloc = p->relocs_ptr[(idx / 4)]; 454 start = reloc->lobj.gpu_offset; 455 end = start + radeon_bo_size(reloc->robj); 456 start += offset; 457 458 p->ib.ptr[data0] = start & 0xFFFFFFFF; 459 p->ib.ptr[data1] = start >> 32; 460 461 cmd = radeon_get_ib_value(p, p->idx) >> 1; 462 463 if (cmd < 0x4) { 464 if ((end - start) < buf_sizes[cmd]) { 465 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, 466 (unsigned)(end - start), buf_sizes[cmd]); 467 return -EINVAL; 468 } 469 470 } else if (cmd != 0x100) { 471 DRM_ERROR("invalid UVD command %X!\n", cmd); 472 return -EINVAL; 473 } 474 475 if ((start >> 28) != (end >> 28)) { 476 DRM_ERROR("reloc %lX-%lX crossing 256MB boundary!\n", 477 start, end); 478 return -EINVAL; 479 } 480 481 /* TODO: is this still necessary on NI+ ? */ 482 if ((cmd == 0 || cmd == 0x3) && 483 (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) { 484 DRM_ERROR("msg/fb buffer %lX-%lX out of 256MB segment!\n", 485 start, end); 486 return -EINVAL; 487 } 488 489 if (cmd == 0) { 490 if (*has_msg_cmd) { 491 DRM_ERROR("More than one message in a UVD-IB!\n"); 492 return -EINVAL; 493 } 494 *has_msg_cmd = true; 495 r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes); 496 if (r) 497 return r; 498 } else if (!*has_msg_cmd) { 499 DRM_ERROR("Message needed before other commands are send!\n"); 500 return -EINVAL; 501 } 502 503 return 0; 504 } 505 506 static int radeon_uvd_cs_reg(struct radeon_cs_parser *p, 507 struct radeon_cs_packet *pkt, 508 int *data0, int *data1, 509 unsigned buf_sizes[], 510 bool *has_msg_cmd) 511 { 512 int i, r; 513 514 p->idx++; 515 for (i = 0; i <= pkt->count; ++i) { 516 switch (pkt->reg + i*4) { 517 case UVD_GPCOM_VCPU_DATA0: 518 *data0 = p->idx; 519 break; 520 case UVD_GPCOM_VCPU_DATA1: 521 *data1 = p->idx; 522 break; 523 case UVD_GPCOM_VCPU_CMD: 524 r = radeon_uvd_cs_reloc(p, *data0, *data1, 525 buf_sizes, has_msg_cmd); 526 if (r) 527 return r; 528 break; 529 case UVD_ENGINE_CNTL: 530 break; 531 default: 532 DRM_ERROR("Invalid reg 0x%X!\n", 533 pkt->reg + i*4); 534 return -EINVAL; 535 } 536 p->idx++; 537 } 538 return 0; 539 } 540 541 int radeon_uvd_cs_parse(struct radeon_cs_parser *p) 542 { 543 struct radeon_cs_packet pkt; 544 int r, data0 = 0, data1 = 0; 545 546 /* does the IB has a msg command */ 547 bool has_msg_cmd = false; 548 549 /* minimum buffer sizes */ 550 unsigned buf_sizes[] = { 551 [0x00000000] = 2048, 552 [0x00000001] = 32 * 1024 * 1024, 553 [0x00000002] = 2048 * 1152 * 3, 554 [0x00000003] = 2048, 555 }; 556 557 if (p->chunks[p->chunk_ib_idx].length_dw % 16) { 558 DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n", 559 p->chunks[p->chunk_ib_idx].length_dw); 560 return -EINVAL; 561 } 562 563 if (p->chunk_relocs_idx == -1) { 564 DRM_ERROR("No relocation chunk !\n"); 565 return -EINVAL; 566 } 567 568 569 do { 570 r = radeon_cs_packet_parse(p, &pkt, p->idx); 571 if (r) 572 return r; 573 switch (pkt.type) { 574 case RADEON_PACKET_TYPE0: 575 r = radeon_uvd_cs_reg(p, &pkt, &data0, &data1, 576 buf_sizes, &has_msg_cmd); 577 if (r) 578 return r; 579 break; 580 case RADEON_PACKET_TYPE2: 581 p->idx += pkt.count + 2; 582 break; 583 default: 584 DRM_ERROR("Unknown packet type %d !\n", pkt.type); 585 return -EINVAL; 586 } 587 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 588 589 if (!has_msg_cmd) { 590 DRM_ERROR("UVD-IBs need a msg command!\n"); 591 return -EINVAL; 592 } 593 594 return 0; 595 } 596 597 static int radeon_uvd_send_msg(struct radeon_device *rdev, 598 int ring, struct radeon_bo *bo, 599 struct radeon_fence **fence) 600 { 601 struct ttm_validate_buffer tv; 602 struct ww_acquire_ctx ticket; 603 struct list_head head; 604 struct radeon_ib ib; 605 uint64_t addr; 606 int i, r; 607 608 memset(&tv, 0, sizeof(tv)); 609 tv.bo = &bo->tbo; 610 611 INIT_LIST_HEAD(&head); 612 list_add(&tv.head, &head); 613 614 r = ttm_eu_reserve_buffers(&ticket, &head); 615 if (r) 616 return r; 617 618 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_VRAM); 619 radeon_uvd_force_into_uvd_segment(bo); 620 621 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 622 if (r) 623 goto err; 624 625 r = radeon_ib_get(rdev, ring, &ib, NULL, 16); 626 if (r) 627 goto err; 628 629 addr = radeon_bo_gpu_offset(bo); 630 ib.ptr[0] = PACKET0(UVD_GPCOM_VCPU_DATA0, 0); 631 ib.ptr[1] = addr; 632 ib.ptr[2] = PACKET0(UVD_GPCOM_VCPU_DATA1, 0); 633 ib.ptr[3] = addr >> 32; 634 ib.ptr[4] = PACKET0(UVD_GPCOM_VCPU_CMD, 0); 635 ib.ptr[5] = 0; 636 for (i = 6; i < 16; ++i) 637 ib.ptr[i] = PACKET2(0); 638 ib.length_dw = 16; 639 640 r = radeon_ib_schedule(rdev, &ib, NULL); 641 if (r) 642 goto err; 643 ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence); 644 645 if (fence) 646 *fence = radeon_fence_ref(ib.fence); 647 648 radeon_ib_free(rdev, &ib); 649 radeon_bo_unref(&bo); 650 return 0; 651 652 err: 653 ttm_eu_backoff_reservation(&ticket, &head); 654 return r; 655 } 656 657 /* multiple fence commands without any stream commands in between can 658 crash the vcpu so just try to emmit a dummy create/destroy msg to 659 avoid this */ 660 int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring, 661 uint32_t handle, struct radeon_fence **fence) 662 { 663 struct radeon_bo *bo; 664 uint32_t *msg; 665 int r, i; 666 667 r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true, 668 RADEON_GEM_DOMAIN_VRAM, NULL, &bo); 669 if (r) 670 return r; 671 672 r = radeon_bo_reserve(bo, false); 673 if (r) { 674 radeon_bo_unref(&bo); 675 return r; 676 } 677 678 r = radeon_bo_kmap(bo, (void **)&msg); 679 if (r) { 680 radeon_bo_unreserve(bo); 681 radeon_bo_unref(&bo); 682 return r; 683 } 684 685 /* stitch together an UVD create msg */ 686 msg[0] = cpu_to_le32(0x00000de4); 687 msg[1] = cpu_to_le32(0x00000000); 688 msg[2] = cpu_to_le32(handle); 689 msg[3] = cpu_to_le32(0x00000000); 690 msg[4] = cpu_to_le32(0x00000000); 691 msg[5] = cpu_to_le32(0x00000000); 692 msg[6] = cpu_to_le32(0x00000000); 693 msg[7] = cpu_to_le32(0x00000780); 694 msg[8] = cpu_to_le32(0x00000440); 695 msg[9] = cpu_to_le32(0x00000000); 696 msg[10] = cpu_to_le32(0x01b37000); 697 for (i = 11; i < 1024; ++i) 698 msg[i] = cpu_to_le32(0x0); 699 700 radeon_bo_kunmap(bo); 701 radeon_bo_unreserve(bo); 702 703 return radeon_uvd_send_msg(rdev, ring, bo, fence); 704 } 705 706 int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring, 707 uint32_t handle, struct radeon_fence **fence) 708 { 709 struct radeon_bo *bo; 710 uint32_t *msg; 711 int r, i; 712 713 r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true, 714 RADEON_GEM_DOMAIN_VRAM, NULL, &bo); 715 if (r) 716 return r; 717 718 r = radeon_bo_reserve(bo, false); 719 if (r) { 720 radeon_bo_unref(&bo); 721 return r; 722 } 723 724 r = radeon_bo_kmap(bo, (void **)&msg); 725 if (r) { 726 radeon_bo_unreserve(bo); 727 radeon_bo_unref(&bo); 728 return r; 729 } 730 731 /* stitch together an UVD destroy msg */ 732 msg[0] = cpu_to_le32(0x00000de4); 733 msg[1] = cpu_to_le32(0x00000002); 734 msg[2] = cpu_to_le32(handle); 735 msg[3] = cpu_to_le32(0x00000000); 736 for (i = 4; i < 1024; ++i) 737 msg[i] = cpu_to_le32(0x0); 738 739 radeon_bo_kunmap(bo); 740 radeon_bo_unreserve(bo); 741 742 return radeon_uvd_send_msg(rdev, ring, bo, fence); 743 } 744 745 /** 746 * radeon_uvd_count_handles - count number of open streams 747 * 748 * @rdev: radeon_device pointer 749 * @sd: number of SD streams 750 * @hd: number of HD streams 751 * 752 * Count the number of open SD/HD streams as a hint for power mangement 753 */ 754 static void radeon_uvd_count_handles(struct radeon_device *rdev, 755 unsigned *sd, unsigned *hd) 756 { 757 unsigned i; 758 759 *sd = 0; 760 *hd = 0; 761 762 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { 763 if (!atomic_read(&rdev->uvd.handles[i])) 764 continue; 765 766 if (rdev->uvd.img_size[i] >= 720*576) 767 ++(*hd); 768 else 769 ++(*sd); 770 } 771 } 772 773 static void radeon_uvd_idle_work_handler(struct work_struct *work) 774 { 775 struct radeon_device *rdev = 776 container_of(work, struct radeon_device, uvd.idle_work.work); 777 778 if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0) { 779 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { 780 radeon_dpm_enable_uvd(rdev, false); 781 } else { 782 radeon_set_uvd_clocks(rdev, 0, 0); 783 } 784 } else { 785 schedule_delayed_work(&rdev->uvd.idle_work, 786 msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); 787 } 788 } 789 790 void radeon_uvd_note_usage(struct radeon_device *rdev) 791 { 792 bool streams_changed = false; 793 bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work); 794 set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work, 795 msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); 796 797 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { 798 unsigned hd = 0, sd = 0; 799 radeon_uvd_count_handles(rdev, &sd, &hd); 800 if ((rdev->pm.dpm.sd != sd) || 801 (rdev->pm.dpm.hd != hd)) { 802 rdev->pm.dpm.sd = sd; 803 rdev->pm.dpm.hd = hd; 804 /* disable this for now */ 805 /*streams_changed = true;*/ 806 } 807 } 808 809 if (set_clocks || streams_changed) { 810 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { 811 radeon_dpm_enable_uvd(rdev, true); 812 } else { 813 radeon_set_uvd_clocks(rdev, 53300, 40000); 814 } 815 } 816 } 817 818 static unsigned radeon_uvd_calc_upll_post_div(unsigned vco_freq, 819 unsigned target_freq, 820 unsigned pd_min, 821 unsigned pd_even) 822 { 823 unsigned post_div = vco_freq / target_freq; 824 825 /* adjust to post divider minimum value */ 826 if (post_div < pd_min) 827 post_div = pd_min; 828 829 /* we alway need a frequency less than or equal the target */ 830 if ((vco_freq / post_div) > target_freq) 831 post_div += 1; 832 833 /* post dividers above a certain value must be even */ 834 if (post_div > pd_even && post_div % 2) 835 post_div += 1; 836 837 return post_div; 838 } 839 840 /** 841 * radeon_uvd_calc_upll_dividers - calc UPLL clock dividers 842 * 843 * @rdev: radeon_device pointer 844 * @vclk: wanted VCLK 845 * @dclk: wanted DCLK 846 * @vco_min: minimum VCO frequency 847 * @vco_max: maximum VCO frequency 848 * @fb_factor: factor to multiply vco freq with 849 * @fb_mask: limit and bitmask for feedback divider 850 * @pd_min: post divider minimum 851 * @pd_max: post divider maximum 852 * @pd_even: post divider must be even above this value 853 * @optimal_fb_div: resulting feedback divider 854 * @optimal_vclk_div: resulting vclk post divider 855 * @optimal_dclk_div: resulting dclk post divider 856 * 857 * Calculate dividers for UVDs UPLL (R6xx-SI, except APUs). 858 * Returns zero on success -EINVAL on error. 859 */ 860 int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev, 861 unsigned vclk, unsigned dclk, 862 unsigned vco_min, unsigned vco_max, 863 unsigned fb_factor, unsigned fb_mask, 864 unsigned pd_min, unsigned pd_max, 865 unsigned pd_even, 866 unsigned *optimal_fb_div, 867 unsigned *optimal_vclk_div, 868 unsigned *optimal_dclk_div) 869 { 870 unsigned vco_freq, ref_freq = rdev->clock.spll.reference_freq; 871 872 /* start off with something large */ 873 unsigned optimal_score = ~0; 874 875 /* loop through vco from low to high */ 876 vco_min = max(max(vco_min, vclk), dclk); 877 for (vco_freq = vco_min; vco_freq <= vco_max; vco_freq += 100) { 878 879 uint64_t fb_div = (uint64_t)vco_freq * fb_factor; 880 unsigned vclk_div, dclk_div, score; 881 882 do_div(fb_div, ref_freq); 883 884 /* fb div out of range ? */ 885 if (fb_div > fb_mask) 886 break; /* it can oly get worse */ 887 888 fb_div &= fb_mask; 889 890 /* calc vclk divider with current vco freq */ 891 vclk_div = radeon_uvd_calc_upll_post_div(vco_freq, vclk, 892 pd_min, pd_even); 893 if (vclk_div > pd_max) 894 break; /* vco is too big, it has to stop */ 895 896 /* calc dclk divider with current vco freq */ 897 dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk, 898 pd_min, pd_even); 899 if (vclk_div > pd_max) 900 break; /* vco is too big, it has to stop */ 901 902 /* calc score with current vco freq */ 903 score = vclk - (vco_freq / vclk_div) + dclk - (vco_freq / dclk_div); 904 905 /* determine if this vco setting is better than current optimal settings */ 906 if (score < optimal_score) { 907 *optimal_fb_div = fb_div; 908 *optimal_vclk_div = vclk_div; 909 *optimal_dclk_div = dclk_div; 910 optimal_score = score; 911 if (optimal_score == 0) 912 break; /* it can't get better than this */ 913 } 914 } 915 916 /* did we found a valid setup ? */ 917 if (optimal_score == ~0) 918 return -EINVAL; 919 920 return 0; 921 } 922 923 int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev, 924 unsigned cg_upll_func_cntl) 925 { 926 unsigned i; 927 928 /* make sure UPLL_CTLREQ is deasserted */ 929 WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK); 930 931 mdelay(10); 932 933 /* assert UPLL_CTLREQ */ 934 WREG32_P(cg_upll_func_cntl, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK); 935 936 /* wait for CTLACK and CTLACK2 to get asserted */ 937 for (i = 0; i < 100; ++i) { 938 uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK; 939 if ((RREG32(cg_upll_func_cntl) & mask) == mask) 940 break; 941 mdelay(10); 942 } 943 944 /* deassert UPLL_CTLREQ */ 945 WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK); 946 947 if (i == 100) { 948 DRM_ERROR("Timeout setting UVD clocks!\n"); 949 return -ETIMEDOUT; 950 } 951 952 return 0; 953 } 954