1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 * Christian König 28 * $FreeBSD: head/sys/dev/drm2/radeon/radeon_ring.c 254885 2013-08-25 19:37:15Z dumbbell $ 29 */ 30 31 #include <drm/drmP.h> 32 #include <uapi_drm/radeon_drm.h> 33 #include "radeon_reg.h" 34 #include "radeon.h" 35 #include "atom.h" 36 37 #ifdef DUMBBELL_WIP 38 /* 39 * IB 40 * IBs (Indirect Buffers) and areas of GPU accessible memory where 41 * commands are stored. You can put a pointer to the IB in the 42 * command ring and the hw will fetch the commands from the IB 43 * and execute them. Generally userspace acceleration drivers 44 * produce command buffers which are send to the kernel and 45 * put in IBs for execution by the requested ring. 46 */ 47 static int radeon_debugfs_sa_init(struct radeon_device *rdev); 48 #endif /* DUMBBELL_WIP */ 49 50 /** 51 * radeon_ib_get - request an IB (Indirect Buffer) 52 * 53 * @rdev: radeon_device pointer 54 * @ring: ring index the IB is associated with 55 * @ib: IB object returned 56 * @size: requested IB size 57 * 58 * Request an IB (all asics). IBs are allocated using the 59 * suballocator. 60 * Returns 0 on success, error on failure. 61 */ 62 int radeon_ib_get(struct radeon_device *rdev, int ring, 63 struct radeon_ib *ib, struct radeon_vm *vm, 64 unsigned size) 65 { 66 int i, r; 67 68 r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true); 69 if (r) { 70 dev_err(rdev->dev, "failed to get a new IB (%d)\n", r); 71 return r; 72 } 73 74 r = radeon_semaphore_create(rdev, &ib->semaphore); 75 if (r) { 76 return r; 77 } 78 79 ib->ring = ring; 80 ib->fence = NULL; 81 ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo); 82 ib->vm = vm; 83 if (vm) { 84 /* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address 85 * space and soffset is the offset inside the pool bo 86 */ 87 ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET; 88 } else { 89 ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo); 90 } 91 ib->is_const_ib = false; 92 for (i = 0; i < RADEON_NUM_RINGS; ++i) 93 ib->sync_to[i] = NULL; 94 95 return 0; 96 } 97 98 /** 99 * radeon_ib_free - free an IB (Indirect Buffer) 100 * 101 * @rdev: radeon_device pointer 102 * @ib: IB object to free 103 * 104 * Free an IB (all asics). 105 */ 106 void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib) 107 { 108 radeon_semaphore_free(rdev, &ib->semaphore, ib->fence); 109 radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence); 110 radeon_fence_unref(&ib->fence); 111 } 112 113 /** 114 * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring 115 * 116 * @rdev: radeon_device pointer 117 * @ib: IB object to schedule 118 * @const_ib: Const IB to schedule (SI only) 119 * 120 * Schedule an IB on the associated ring (all asics). 121 * Returns 0 on success, error on failure. 122 * 123 * On SI, there are two parallel engines fed from the primary ring, 124 * the CE (Constant Engine) and the DE (Drawing Engine). Since 125 * resource descriptors have moved to memory, the CE allows you to 126 * prime the caches while the DE is updating register state so that 127 * the resource descriptors will be already in cache when the draw is 128 * processed. To accomplish this, the userspace driver submits two 129 * IBs, one for the CE and one for the DE. If there is a CE IB (called 130 * a CONST_IB), it will be put on the ring prior to the DE IB. Prior 131 * to SI there was just a DE IB. 132 */ 133 int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, 134 struct radeon_ib *const_ib) 135 { 136 struct radeon_ring *ring = &rdev->ring[ib->ring]; 137 bool need_sync = false; 138 int i, r = 0; 139 140 if (!ib->length_dw || !ring->ready) { 141 /* TODO: Nothings in the ib we should report. */ 142 dev_err(rdev->dev, "couldn't schedule ib\n"); 143 return -EINVAL; 144 } 145 146 /* 64 dwords should be enough for fence too */ 147 r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_RINGS * 8); 148 if (r) { 149 dev_err(rdev->dev, "scheduling IB failed (%d).\n", r); 150 return r; 151 } 152 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 153 struct radeon_fence *fence = ib->sync_to[i]; 154 if (radeon_fence_need_sync(fence, ib->ring)) { 155 need_sync = true; 156 radeon_semaphore_sync_rings(rdev, ib->semaphore, 157 fence->ring, ib->ring); 158 radeon_fence_note_sync(fence, ib->ring); 159 } 160 } 161 /* immediately free semaphore when we don't need to sync */ 162 if (!need_sync) { 163 radeon_semaphore_free(rdev, &ib->semaphore, NULL); 164 } 165 /* if we can't remember our last VM flush then flush now! */ 166 if (ib->vm && !ib->vm->last_flush) { 167 radeon_ring_vm_flush(rdev, ib->ring, ib->vm); 168 } 169 if (const_ib) { 170 radeon_ring_ib_execute(rdev, const_ib->ring, const_ib); 171 radeon_semaphore_free(rdev, &const_ib->semaphore, NULL); 172 } 173 radeon_ring_ib_execute(rdev, ib->ring, ib); 174 r = radeon_fence_emit(rdev, &ib->fence, ib->ring); 175 if (r) { 176 dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r); 177 radeon_ring_unlock_undo(rdev, ring); 178 return r; 179 } 180 if (const_ib) { 181 const_ib->fence = radeon_fence_ref(ib->fence); 182 } 183 /* we just flushed the VM, remember that */ 184 if (ib->vm && !ib->vm->last_flush) { 185 ib->vm->last_flush = radeon_fence_ref(ib->fence); 186 } 187 radeon_ring_unlock_commit(rdev, ring); 188 return 0; 189 } 190 191 /** 192 * radeon_ib_pool_init - Init the IB (Indirect Buffer) pool 193 * 194 * @rdev: radeon_device pointer 195 * 196 * Initialize the suballocator to manage a pool of memory 197 * for use as IBs (all asics). 198 * Returns 0 on success, error on failure. 199 */ 200 int radeon_ib_pool_init(struct radeon_device *rdev) 201 { 202 int r; 203 204 if (rdev->ib_pool_ready) { 205 return 0; 206 } 207 r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo, 208 RADEON_IB_POOL_SIZE*64*1024, 209 RADEON_GEM_DOMAIN_GTT); 210 if (r) { 211 return r; 212 } 213 214 r = radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo); 215 if (r) { 216 return r; 217 } 218 219 rdev->ib_pool_ready = true; 220 #ifdef DUMBBELL_WIP 221 if (radeon_debugfs_sa_init(rdev)) { 222 dev_err(rdev->dev, "failed to register debugfs file for SA\n"); 223 } 224 #endif /* DUMBBELL_WIP */ 225 return 0; 226 } 227 228 /** 229 * radeon_ib_pool_fini - Free the IB (Indirect Buffer) pool 230 * 231 * @rdev: radeon_device pointer 232 * 233 * Tear down the suballocator managing the pool of memory 234 * for use as IBs (all asics). 235 */ 236 void radeon_ib_pool_fini(struct radeon_device *rdev) 237 { 238 if (rdev->ib_pool_ready) { 239 radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo); 240 radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo); 241 rdev->ib_pool_ready = false; 242 } 243 } 244 245 /** 246 * radeon_ib_ring_tests - test IBs on the rings 247 * 248 * @rdev: radeon_device pointer 249 * 250 * Test an IB (Indirect Buffer) on each ring. 251 * If the test fails, disable the ring. 252 * Returns 0 on success, error if the primary GFX ring 253 * IB test fails. 254 */ 255 int radeon_ib_ring_tests(struct radeon_device *rdev) 256 { 257 unsigned i; 258 int r; 259 260 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 261 struct radeon_ring *ring = &rdev->ring[i]; 262 263 if (!ring->ready) 264 continue; 265 266 r = radeon_ib_test(rdev, i, ring); 267 if (r) { 268 ring->ready = false; 269 270 if (i == RADEON_RING_TYPE_GFX_INDEX) { 271 /* oh, oh, that's really bad */ 272 DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r); 273 rdev->accel_working = false; 274 return r; 275 276 } else { 277 /* still not good, but we can live with it */ 278 DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r); 279 } 280 } 281 } 282 return 0; 283 } 284 285 #ifdef DUMBBELL_WIP 286 /* 287 * Rings 288 * Most engines on the GPU are fed via ring buffers. Ring 289 * buffers are areas of GPU accessible memory that the host 290 * writes commands into and the GPU reads commands out of. 291 * There is a rptr (read pointer) that determines where the 292 * GPU is currently reading, and a wptr (write pointer) 293 * which determines where the host has written. When the 294 * pointers are equal, the ring is idle. When the host 295 * writes commands to the ring buffer, it increments the 296 * wptr. The GPU then starts fetching commands and executes 297 * them until the pointers are equal again. 298 */ 299 static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring); 300 #endif /* DUMBBELL_WIP */ 301 302 #if defined(DRM_DEBUG_CODE) && DRM_DEBUG_CODE != 0 303 /** 304 * radeon_ring_write - write a value to the ring 305 * 306 * @ring: radeon_ring structure holding ring information 307 * @v: dword (dw) value to write 308 * 309 * Write a value to the requested ring buffer (all asics). 310 */ 311 void radeon_ring_write(struct radeon_ring *ring, uint32_t v) 312 { 313 #if DRM_DEBUG_CODE 314 if (ring->count_dw <= 0) { 315 DRM_ERROR("radeon: writing more dwords to the ring than expected!\n"); 316 } 317 #endif 318 ring->ring[ring->wptr++] = v; 319 ring->wptr &= ring->ptr_mask; 320 ring->count_dw--; 321 ring->ring_free_dw--; 322 } 323 #endif 324 325 /** 326 * radeon_ring_supports_scratch_reg - check if the ring supports 327 * writing to scratch registers 328 * 329 * @rdev: radeon_device pointer 330 * @ring: radeon_ring structure holding ring information 331 * 332 * Check if a specific ring supports writing to scratch registers (all asics). 333 * Returns true if the ring supports writing to scratch regs, false if not. 334 */ 335 bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev, 336 struct radeon_ring *ring) 337 { 338 switch (ring->idx) { 339 case RADEON_RING_TYPE_GFX_INDEX: 340 case CAYMAN_RING_TYPE_CP1_INDEX: 341 case CAYMAN_RING_TYPE_CP2_INDEX: 342 return true; 343 default: 344 return false; 345 } 346 } 347 348 /** 349 * radeon_ring_free_size - update the free size 350 * 351 * @rdev: radeon_device pointer 352 * @ring: radeon_ring structure holding ring information 353 * 354 * Update the free dw slots in the ring buffer (all asics). 355 */ 356 void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring) 357 { 358 u32 rptr; 359 360 if (rdev->wb.enabled) 361 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); 362 else 363 rptr = RREG32(ring->rptr_reg); 364 ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; 365 /* This works because ring_size is a power of 2 */ 366 ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4)); 367 ring->ring_free_dw -= ring->wptr; 368 ring->ring_free_dw &= ring->ptr_mask; 369 if (!ring->ring_free_dw) { 370 ring->ring_free_dw = ring->ring_size / 4; 371 } 372 } 373 374 /** 375 * radeon_ring_alloc - allocate space on the ring buffer 376 * 377 * @rdev: radeon_device pointer 378 * @ring: radeon_ring structure holding ring information 379 * @ndw: number of dwords to allocate in the ring buffer 380 * 381 * Allocate @ndw dwords in the ring buffer (all asics). 382 * Returns 0 on success, error on failure. 383 */ 384 int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw) 385 { 386 int r; 387 388 /* make sure we aren't trying to allocate more space than there is on the ring */ 389 if (ndw > (ring->ring_size / 4)) 390 return -ENOMEM; 391 /* Align requested size with padding so unlock_commit can 392 * pad safely */ 393 ndw = (ndw + ring->align_mask) & ~ring->align_mask; 394 while (ndw > (ring->ring_free_dw - 1)) { 395 radeon_ring_free_size(rdev, ring); 396 if (ndw < ring->ring_free_dw) { 397 break; 398 } 399 r = radeon_fence_wait_next_locked(rdev, ring->idx); 400 if (r) 401 return r; 402 } 403 ring->count_dw = ndw; 404 ring->wptr_old = ring->wptr; 405 return 0; 406 } 407 408 /** 409 * radeon_ring_lock - lock the ring and allocate space on it 410 * 411 * @rdev: radeon_device pointer 412 * @ring: radeon_ring structure holding ring information 413 * @ndw: number of dwords to allocate in the ring buffer 414 * 415 * Lock the ring and allocate @ndw dwords in the ring buffer 416 * (all asics). 417 * Returns 0 on success, error on failure. 418 */ 419 int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw) 420 { 421 int r; 422 423 lockmgr(&rdev->ring_lock, LK_EXCLUSIVE); 424 r = radeon_ring_alloc(rdev, ring, ndw); 425 if (r) { 426 lockmgr(&rdev->ring_lock, LK_RELEASE); 427 return r; 428 } 429 return 0; 430 } 431 432 /** 433 * radeon_ring_commit - tell the GPU to execute the new 434 * commands on the ring buffer 435 * 436 * @rdev: radeon_device pointer 437 * @ring: radeon_ring structure holding ring information 438 * 439 * Update the wptr (write pointer) to tell the GPU to 440 * execute new commands on the ring buffer (all asics). 441 */ 442 void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring) 443 { 444 /* We pad to match fetch size */ 445 while (ring->wptr & ring->align_mask) { 446 radeon_ring_write(ring, ring->nop); 447 } 448 DRM_MEMORYBARRIER(); 449 WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask); 450 (void)RREG32(ring->wptr_reg); 451 } 452 453 /** 454 * radeon_ring_unlock_commit - tell the GPU to execute the new 455 * commands on the ring buffer and unlock it 456 * 457 * @rdev: radeon_device pointer 458 * @ring: radeon_ring structure holding ring information 459 * 460 * Call radeon_ring_commit() then unlock the ring (all asics). 461 */ 462 void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring) 463 { 464 radeon_ring_commit(rdev, ring); 465 lockmgr(&rdev->ring_lock, LK_RELEASE); 466 } 467 468 /** 469 * radeon_ring_undo - reset the wptr 470 * 471 * @ring: radeon_ring structure holding ring information 472 * 473 * Reset the driver's copy of the wptr (all asics). 474 */ 475 void radeon_ring_undo(struct radeon_ring *ring) 476 { 477 ring->wptr = ring->wptr_old; 478 } 479 480 /** 481 * radeon_ring_unlock_undo - reset the wptr and unlock the ring 482 * 483 * @ring: radeon_ring structure holding ring information 484 * 485 * Call radeon_ring_undo() then unlock the ring (all asics). 486 */ 487 void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring) 488 { 489 radeon_ring_undo(ring); 490 lockmgr(&rdev->ring_lock, LK_RELEASE); 491 } 492 493 /** 494 * radeon_ring_force_activity - add some nop packets to the ring 495 * 496 * @rdev: radeon_device pointer 497 * @ring: radeon_ring structure holding ring information 498 * 499 * Add some nop packets to the ring to force activity (all asics). 500 * Used for lockup detection to see if the rptr is advancing. 501 */ 502 void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring) 503 { 504 int r; 505 506 radeon_ring_free_size(rdev, ring); 507 if (ring->rptr == ring->wptr) { 508 r = radeon_ring_alloc(rdev, ring, 1); 509 if (!r) { 510 radeon_ring_write(ring, ring->nop); 511 radeon_ring_commit(rdev, ring); 512 } 513 } 514 } 515 516 /** 517 * radeon_ring_lockup_update - update lockup variables 518 * 519 * @ring: radeon_ring structure holding ring information 520 * 521 * Update the last rptr value and timestamp (all asics). 522 */ 523 void radeon_ring_lockup_update(struct radeon_ring *ring) 524 { 525 ring->last_rptr = ring->rptr; 526 ring->last_activity = jiffies; 527 } 528 529 /** 530 * radeon_ring_test_lockup() - check if ring is lockedup by recording information 531 * @rdev: radeon device structure 532 * @ring: radeon_ring structure holding ring information 533 * 534 * We don't need to initialize the lockup tracking information as we will either 535 * have CP rptr to a different value of jiffies wrap around which will force 536 * initialization of the lockup tracking informations. 537 * 538 * A possible false positivie is if we get call after while and last_cp_rptr == 539 * the current CP rptr, even if it's unlikely it might happen. To avoid this 540 * if the elapsed time since last call is bigger than 2 second than we return 541 * false and update the tracking information. Due to this the caller must call 542 * radeon_ring_test_lockup several time in less than 2sec for lockup to be reported 543 * the fencing code should be cautious about that. 544 * 545 * Caller should write to the ring to force CP to do something so we don't get 546 * false positive when CP is just gived nothing to do. 547 * 548 **/ 549 bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 550 { 551 unsigned long cjiffies, elapsed; 552 uint32_t rptr; 553 554 cjiffies = jiffies; 555 if (!time_after(cjiffies, ring->last_activity)) { 556 /* likely a wrap around */ 557 radeon_ring_lockup_update(ring); 558 return false; 559 } 560 rptr = RREG32(ring->rptr_reg); 561 ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; 562 if (ring->rptr != ring->last_rptr) { 563 /* CP is still working no lockup */ 564 radeon_ring_lockup_update(ring); 565 return false; 566 } 567 elapsed = jiffies_to_msecs(cjiffies - ring->last_activity); 568 if (radeon_lockup_timeout && elapsed >= radeon_lockup_timeout) { 569 dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed); 570 return true; 571 } 572 /* give a chance to the GPU ... */ 573 return false; 574 } 575 576 /** 577 * radeon_ring_backup - Back up the content of a ring 578 * 579 * @rdev: radeon_device pointer 580 * @ring: the ring we want to back up 581 * 582 * Saves all unprocessed commits from a ring, returns the number of dwords saved. 583 */ 584 unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring, 585 uint32_t **data) 586 { 587 unsigned size, ptr, i; 588 589 /* just in case lock the ring */ 590 lockmgr(&rdev->ring_lock, LK_EXCLUSIVE); 591 *data = NULL; 592 593 if (ring->ring_obj == NULL) { 594 lockmgr(&rdev->ring_lock, LK_RELEASE); 595 return 0; 596 } 597 598 /* it doesn't make sense to save anything if all fences are signaled */ 599 if (!radeon_fence_count_emitted(rdev, ring->idx)) { 600 lockmgr(&rdev->ring_lock, LK_RELEASE); 601 return 0; 602 } 603 604 /* calculate the number of dw on the ring */ 605 if (ring->rptr_save_reg) 606 ptr = RREG32(ring->rptr_save_reg); 607 else if (rdev->wb.enabled) 608 ptr = le32_to_cpu(*ring->next_rptr_cpu_addr); 609 else { 610 /* no way to read back the next rptr */ 611 lockmgr(&rdev->ring_lock, LK_RELEASE); 612 return 0; 613 } 614 615 size = ring->wptr + (ring->ring_size / 4); 616 size -= ptr; 617 size &= ring->ptr_mask; 618 if (size == 0) { 619 lockmgr(&rdev->ring_lock, LK_RELEASE); 620 return 0; 621 } 622 623 /* and then save the content of the ring */ 624 *data = kmalloc(size * sizeof(uint32_t), M_DRM, M_WAITOK); 625 if (!*data) { 626 lockmgr(&rdev->ring_lock, LK_RELEASE); 627 return 0; 628 } 629 for (i = 0; i < size; ++i) { 630 (*data)[i] = ring->ring[ptr++]; 631 ptr &= ring->ptr_mask; 632 } 633 634 lockmgr(&rdev->ring_lock, LK_RELEASE); 635 return size; 636 } 637 638 /** 639 * radeon_ring_restore - append saved commands to the ring again 640 * 641 * @rdev: radeon_device pointer 642 * @ring: ring to append commands to 643 * @size: number of dwords we want to write 644 * @data: saved commands 645 * 646 * Allocates space on the ring and restore the previously saved commands. 647 */ 648 int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring, 649 unsigned size, uint32_t *data) 650 { 651 int i, r; 652 653 if (!size || !data) 654 return 0; 655 656 /* restore the saved ring content */ 657 r = radeon_ring_lock(rdev, ring, size); 658 if (r) 659 return r; 660 661 for (i = 0; i < size; ++i) { 662 radeon_ring_write(ring, data[i]); 663 } 664 665 radeon_ring_unlock_commit(rdev, ring); 666 drm_free(data, M_DRM); 667 return 0; 668 } 669 670 /** 671 * radeon_ring_init - init driver ring struct. 672 * 673 * @rdev: radeon_device pointer 674 * @ring: radeon_ring structure holding ring information 675 * @ring_size: size of the ring 676 * @rptr_offs: offset of the rptr writeback location in the WB buffer 677 * @rptr_reg: MMIO offset of the rptr register 678 * @wptr_reg: MMIO offset of the wptr register 679 * @ptr_reg_shift: bit offset of the rptr/wptr values 680 * @ptr_reg_mask: bit mask of the rptr/wptr values 681 * @nop: nop packet for this ring 682 * 683 * Initialize the driver information for the selected ring (all asics). 684 * Returns 0 on success, error on failure. 685 */ 686 int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size, 687 unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, 688 u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop) 689 { 690 int r; 691 void *ring_ptr; 692 693 ring->ring_size = ring_size; 694 ring->rptr_offs = rptr_offs; 695 ring->rptr_reg = rptr_reg; 696 ring->wptr_reg = wptr_reg; 697 ring->ptr_reg_shift = ptr_reg_shift; 698 ring->ptr_reg_mask = ptr_reg_mask; 699 ring->nop = nop; 700 /* Allocate ring buffer */ 701 if (ring->ring_obj == NULL) { 702 r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true, 703 RADEON_GEM_DOMAIN_GTT, 704 NULL, &ring->ring_obj); 705 if (r) { 706 dev_err(rdev->dev, "(%d) ring create failed\n", r); 707 return r; 708 } 709 r = radeon_bo_reserve(ring->ring_obj, false); 710 if (unlikely(r != 0)) { 711 radeon_bo_unref(&ring->ring_obj); 712 return r; 713 } 714 r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT, 715 &ring->gpu_addr); 716 if (r) { 717 radeon_bo_unreserve(ring->ring_obj); 718 radeon_bo_unref(&ring->ring_obj); 719 dev_err(rdev->dev, "(%d) ring pin failed\n", r); 720 return r; 721 } 722 ring_ptr = &ring->ring; 723 r = radeon_bo_kmap(ring->ring_obj, 724 ring_ptr); 725 radeon_bo_unreserve(ring->ring_obj); 726 if (r) { 727 dev_err(rdev->dev, "(%d) ring map failed\n", r); 728 radeon_bo_unref(&ring->ring_obj); 729 return r; 730 } 731 } 732 ring->ptr_mask = (ring->ring_size / 4) - 1; 733 ring->ring_free_dw = ring->ring_size / 4; 734 if (rdev->wb.enabled) { 735 u32 index = RADEON_WB_RING0_NEXT_RPTR + (ring->idx * 4); 736 ring->next_rptr_gpu_addr = rdev->wb.gpu_addr + index; 737 ring->next_rptr_cpu_addr = &rdev->wb.wb[index/4]; 738 } 739 #ifdef DUMBBELL_WIP 740 if (radeon_debugfs_ring_init(rdev, ring)) { 741 DRM_ERROR("Failed to register debugfs file for rings !\n"); 742 } 743 #endif /* DUMBBELL_WIP */ 744 radeon_ring_lockup_update(ring); 745 return 0; 746 } 747 748 /** 749 * radeon_ring_fini - tear down the driver ring struct. 750 * 751 * @rdev: radeon_device pointer 752 * @ring: radeon_ring structure holding ring information 753 * 754 * Tear down the driver information for the selected ring (all asics). 755 */ 756 void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring) 757 { 758 int r; 759 struct radeon_bo *ring_obj; 760 761 lockmgr(&rdev->ring_lock, LK_EXCLUSIVE); 762 ring_obj = ring->ring_obj; 763 ring->ready = false; 764 ring->ring = NULL; 765 ring->ring_obj = NULL; 766 lockmgr(&rdev->ring_lock, LK_RELEASE); 767 768 if (ring_obj) { 769 r = radeon_bo_reserve(ring_obj, false); 770 if (likely(r == 0)) { 771 radeon_bo_kunmap(ring_obj); 772 radeon_bo_unpin(ring_obj); 773 radeon_bo_unreserve(ring_obj); 774 } 775 radeon_bo_unref(&ring_obj); 776 } 777 } 778 779 /* 780 * Debugfs info 781 */ 782 #if defined(CONFIG_DEBUG_FS) 783 784 static int radeon_debugfs_ring_info(struct seq_file *m, void *data) 785 { 786 struct drm_info_node *node = (struct drm_info_node *) m->private; 787 struct drm_device *dev = node->minor->dev; 788 struct radeon_device *rdev = dev->dev_private; 789 int ridx = *(int*)node->info_ent->data; 790 struct radeon_ring *ring = &rdev->ring[ridx]; 791 unsigned count, i, j; 792 u32 tmp; 793 794 radeon_ring_free_size(rdev, ring); 795 count = (ring->ring_size / 4) - ring->ring_free_dw; 796 tmp = RREG32(ring->wptr_reg) >> ring->ptr_reg_shift; 797 seq_printf(m, "wptr(0x%04x): 0x%08x [%5d]\n", ring->wptr_reg, tmp, tmp); 798 tmp = RREG32(ring->rptr_reg) >> ring->ptr_reg_shift; 799 seq_printf(m, "rptr(0x%04x): 0x%08x [%5d]\n", ring->rptr_reg, tmp, tmp); 800 if (ring->rptr_save_reg) { 801 seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg, 802 RREG32(ring->rptr_save_reg)); 803 } 804 seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr); 805 seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", ring->rptr, ring->rptr); 806 seq_printf(m, "last semaphore signal addr : 0x%016llx\n", ring->last_semaphore_signal_addr); 807 seq_printf(m, "last semaphore wait addr : 0x%016llx\n", ring->last_semaphore_wait_addr); 808 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); 809 seq_printf(m, "%u dwords in ring\n", count); 810 /* print 8 dw before current rptr as often it's the last executed 811 * packet that is the root issue 812 */ 813 i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask; 814 for (j = 0; j <= (count + 32); j++) { 815 seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]); 816 i = (i + 1) & ring->ptr_mask; 817 } 818 return 0; 819 } 820 821 static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX; 822 static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX; 823 static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX; 824 static int radeon_ring_type_dma1_index = R600_RING_TYPE_DMA_INDEX; 825 static int radeon_ring_type_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX; 826 827 static struct drm_info_list radeon_debugfs_ring_info_list[] = { 828 {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index}, 829 {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index}, 830 {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index}, 831 {"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_ring_type_dma1_index}, 832 {"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_ring_type_dma2_index}, 833 }; 834 835 static int radeon_debugfs_sa_info(struct seq_file *m, void *data) 836 { 837 struct drm_info_node *node = (struct drm_info_node *) m->private; 838 struct drm_device *dev = node->minor->dev; 839 struct radeon_device *rdev = dev->dev_private; 840 841 radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m); 842 843 return 0; 844 845 } 846 847 static struct drm_info_list radeon_debugfs_sa_list[] = { 848 {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL}, 849 }; 850 851 #endif 852 853 #ifdef DUMBBELL_WIP 854 static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring) 855 { 856 #if defined(CONFIG_DEBUG_FS) 857 unsigned i; 858 for (i = 0; i < ARRAY_SIZE(radeon_debugfs_ring_info_list); ++i) { 859 struct drm_info_list *info = &radeon_debugfs_ring_info_list[i]; 860 int ridx = *(int*)radeon_debugfs_ring_info_list[i].data; 861 unsigned r; 862 863 if (&rdev->ring[ridx] != ring) 864 continue; 865 866 r = radeon_debugfs_add_files(rdev, info, 1); 867 if (r) 868 return r; 869 } 870 #endif 871 return 0; 872 } 873 874 static int radeon_debugfs_sa_init(struct radeon_device *rdev) 875 { 876 #if defined(CONFIG_DEBUG_FS) 877 return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1); 878 #else 879 return 0; 880 #endif 881 } 882 #endif /* DUMBBELL_WIP */ 883