1 /* 2 * Copyright 2009 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 /* 27 * Authors: 28 * Jerome Glisse <glisse@freedesktop.org> 29 * Dave Airlie 30 */ 31 #include <drm/drmP.h> 32 #include "radeon_reg.h" 33 #include "radeon.h" 34 #ifdef TRACE_TODO 35 #include "radeon_trace.h" 36 #endif 37 38 /* 39 * Fences 40 * Fences mark an event in the GPUs pipeline and are used 41 * for GPU/CPU synchronization. When the fence is written, 42 * it is expected that all buffers associated with that fence 43 * are no longer in use by the associated ring on the GPU and 44 * that the the relevant GPU caches have been flushed. Whether 45 * we use a scratch register or memory location depends on the asic 46 * and whether writeback is enabled. 47 */ 48 49 /** 50 * radeon_fence_write - write a fence value 51 * 52 * @rdev: radeon_device pointer 53 * @seq: sequence number to write 54 * @ring: ring index the fence is associated with 55 * 56 * Writes a fence value to memory or a scratch register (all asics). 57 */ 58 static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring) 59 { 60 struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; 61 if (likely(rdev->wb.enabled || !drv->scratch_reg)) { 62 if (drv->cpu_addr) { 63 *drv->cpu_addr = cpu_to_le32(seq); 64 } 65 } else { 66 WREG32(drv->scratch_reg, seq); 67 } 68 } 69 70 /** 71 * radeon_fence_read - read a fence value 72 * 73 * @rdev: radeon_device pointer 74 * @ring: ring index the fence is associated with 75 * 76 * Reads a fence value from memory or a scratch register (all asics). 77 * Returns the value of the fence read from memory or register. 78 */ 79 static u32 radeon_fence_read(struct radeon_device *rdev, int ring) 80 { 81 struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; 82 u32 seq = 0; 83 84 if (likely(rdev->wb.enabled || !drv->scratch_reg)) { 85 if (drv->cpu_addr) { 86 seq = le32_to_cpu(*drv->cpu_addr); 87 } else { 88 seq = lower_32_bits(atomic64_read(&drv->last_seq)); 89 } 90 } else { 91 seq = RREG32(drv->scratch_reg); 92 } 93 return seq; 94 } 95 96 /** 97 * radeon_fence_schedule_check - schedule lockup check 98 * 99 * @rdev: radeon_device pointer 100 * @ring: ring index we should work with 101 * 102 * Queues a delayed work item to check for lockups. 103 */ 104 static void radeon_fence_schedule_check(struct radeon_device *rdev, int ring) 105 { 106 /* 107 * Do not reset the timer here with mod_delayed_work, 108 * this can livelock in an interaction with TTM delayed destroy. 109 */ 110 queue_delayed_work(system_power_efficient_wq, 111 &rdev->fence_drv[ring].lockup_work, 112 RADEON_FENCE_JIFFIES_TIMEOUT); 113 } 114 115 /** 116 * radeon_fence_emit - emit a fence on the requested ring 117 * 118 * @rdev: radeon_device pointer 119 * @fence: radeon fence object 120 * @ring: ring index the fence is associated with 121 * 122 * Emits a fence command on the requested ring (all asics). 123 * Returns 0 on success, -ENOMEM on failure. 124 */ 125 int radeon_fence_emit(struct radeon_device *rdev, 126 struct radeon_fence **fence, 127 int ring) 128 { 129 /* we are protected by the ring emission mutex */ 130 *fence = kmalloc(sizeof(struct radeon_fence), M_DRM, 131 M_WAITOK); 132 if ((*fence) == NULL) { 133 return -ENOMEM; 134 } 135 refcount_init(&((*fence)->kref), 1); 136 (*fence)->rdev = rdev; 137 (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring]; 138 (*fence)->ring = ring; 139 radeon_fence_ring_emit(rdev, ring, *fence); 140 #ifdef TRACE_TODO 141 trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq); 142 #endif 143 radeon_fence_schedule_check(rdev, ring); 144 return 0; 145 } 146 147 /** 148 * radeon_fence_activity - check for fence activity 149 * 150 * @rdev: radeon_device pointer 151 * @ring: ring index the fence is associated with 152 * 153 * Checks the current fence value and calculates the last 154 * signalled fence value. Returns true if activity occured 155 * on the ring, and the fence_queue should be waken up. 156 */ 157 static bool radeon_fence_activity(struct radeon_device *rdev, int ring) 158 { 159 uint64_t seq, last_seq, last_emitted; 160 unsigned count_loop = 0; 161 bool wake = false; 162 163 /* Note there is a scenario here for an infinite loop but it's 164 * very unlikely to happen. For it to happen, the current polling 165 * process need to be interrupted by another process and another 166 * process needs to update the last_seq btw the atomic read and 167 * xchg of the current process. 168 * 169 * More over for this to go in infinite loop there need to be 170 * continuously new fence signaled ie radeon_fence_read needs 171 * to return a different value each time for both the currently 172 * polling process and the other process that xchg the last_seq 173 * btw atomic read and xchg of the current process. And the 174 * value the other process set as last seq must be higher than 175 * the seq value we just read. Which means that current process 176 * need to be interrupted after radeon_fence_read and before 177 * atomic xchg. 178 * 179 * To be even more safe we count the number of time we loop and 180 * we bail after 10 loop just accepting the fact that we might 181 * have temporarly set the last_seq not to the true real last 182 * seq but to an older one. 183 */ 184 last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq); 185 do { 186 last_emitted = rdev->fence_drv[ring].sync_seq[ring]; 187 seq = radeon_fence_read(rdev, ring); 188 seq |= last_seq & 0xffffffff00000000LL; 189 if (seq < last_seq) { 190 seq &= 0xffffffff; 191 seq |= last_emitted & 0xffffffff00000000LL; 192 } 193 194 if (seq <= last_seq || seq > last_emitted) { 195 break; 196 } 197 /* If we loop over we don't want to return without 198 * checking if a fence is signaled as it means that the 199 * seq we just read is different from the previous on. 200 */ 201 wake = true; 202 last_seq = seq; 203 if ((count_loop++) > 10) { 204 /* We looped over too many time leave with the 205 * fact that we might have set an older fence 206 * seq then the current real last seq as signaled 207 * by the hw. 208 */ 209 break; 210 } 211 } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq); 212 213 if (seq < last_emitted) 214 radeon_fence_schedule_check(rdev, ring); 215 216 return wake; 217 } 218 219 /** 220 * radeon_fence_check_lockup - check for hardware lockup 221 * 222 * @work: delayed work item 223 * 224 * Checks for fence activity and if there is none probe 225 * the hardware if a lockup occured. 226 */ 227 static void radeon_fence_check_lockup(struct work_struct *work) 228 { 229 struct radeon_fence_driver *fence_drv; 230 struct radeon_device *rdev; 231 int ring; 232 233 fence_drv = container_of(work, struct radeon_fence_driver, 234 lockup_work.work); 235 rdev = fence_drv->rdev; 236 ring = fence_drv - &rdev->fence_drv[0]; 237 238 if (lockmgr(&rdev->exclusive_lock, LK_EXCLUSIVE|LK_NOWAIT)) { 239 /* just reschedule the check if a reset is going on */ 240 radeon_fence_schedule_check(rdev, ring); 241 return; 242 } 243 244 if (radeon_fence_activity(rdev, ring)) 245 wake_up_all(&rdev->fence_queue); 246 247 else if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { 248 249 /* good news we believe it's a lockup */ 250 dev_warn(rdev->dev, "GPU lockup (current fence id " 251 "0x%016lx last fence id 0x%016lx on ring %d)\n", 252 (uint64_t)atomic64_read(&fence_drv->last_seq), 253 fence_drv->sync_seq[ring], ring); 254 255 /* remember that we need an reset */ 256 rdev->needs_reset = true; 257 wake_up_all(&rdev->fence_queue); 258 } 259 lockmgr(&rdev->exclusive_lock, LK_RELEASE); 260 } 261 262 /** 263 * radeon_fence_process - process a fence 264 * 265 * @rdev: radeon_device pointer 266 * @ring: ring index the fence is associated with 267 * 268 * Checks the current fence value and wakes the fence queue 269 * if the sequence number has increased (all asics). 270 */ 271 void radeon_fence_process(struct radeon_device *rdev, int ring) 272 { 273 if (radeon_fence_activity(rdev, ring)) 274 wake_up_all(&rdev->fence_queue); 275 } 276 277 /** 278 * radeon_fence_destroy - destroy a fence 279 * 280 * @kref: fence kref 281 * 282 * Frees the fence object (all asics). 283 */ 284 static void radeon_fence_destroy(struct radeon_fence *fence) 285 { 286 287 kfree(fence); 288 } 289 290 /** 291 * radeon_fence_seq_signaled - check if a fence sequence number has signaled 292 * 293 * @rdev: radeon device pointer 294 * @seq: sequence number 295 * @ring: ring index the fence is associated with 296 * 297 * Check if the last signaled fence sequnce number is >= the requested 298 * sequence number (all asics). 299 * Returns true if the fence has signaled (current fence value 300 * is >= requested value) or false if it has not (current fence 301 * value is < the requested value. Helper function for 302 * radeon_fence_signaled(). 303 */ 304 static bool radeon_fence_seq_signaled(struct radeon_device *rdev, 305 u64 seq, unsigned ring) 306 { 307 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { 308 return true; 309 } 310 /* poll new last sequence at least once */ 311 radeon_fence_process(rdev, ring); 312 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { 313 return true; 314 } 315 return false; 316 } 317 318 /** 319 * radeon_fence_signaled - check if a fence has signaled 320 * 321 * @fence: radeon fence object 322 * 323 * Check if the requested fence has signaled (all asics). 324 * Returns true if the fence has signaled or false if it has not. 325 */ 326 bool radeon_fence_signaled(struct radeon_fence *fence) 327 { 328 if (!fence) 329 return true; 330 if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) 331 return true; 332 return false; 333 } 334 335 /** 336 * radeon_fence_any_seq_signaled - check if any sequence number is signaled 337 * 338 * @rdev: radeon device pointer 339 * @seq: sequence numbers 340 * 341 * Check if the last signaled fence sequnce number is >= the requested 342 * sequence number (all asics). 343 * Returns true if any has signaled (current value is >= requested value) 344 * or false if it has not. Helper function for radeon_fence_wait_seq. 345 */ 346 static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) 347 { 348 unsigned i; 349 350 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 351 if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) 352 return true; 353 } 354 return false; 355 } 356 357 /** 358 * radeon_fence_wait_seq_timeout - wait for a specific sequence numbers 359 * 360 * @rdev: radeon device pointer 361 * @target_seq: sequence number(s) we want to wait for 362 * @intr: use interruptable sleep 363 * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait 364 * 365 * Wait for the requested sequence number(s) to be written by any ring 366 * (all asics). Sequnce number array is indexed by ring id. 367 * @intr selects whether to use interruptable (true) or non-interruptable 368 * (false) sleep when waiting for the sequence number. Helper function 369 * for radeon_fence_wait_*(). 370 * Returns remaining time if the sequence number has passed, 0 when 371 * the wait timeout, or an error for all other cases. 372 * -EDEADLK is returned when a GPU lockup has been detected. 373 */ 374 static int radeon_fence_wait_seq_timeout(struct radeon_device *rdev, 375 u64 *target_seq, bool intr, 376 int timeout) 377 { 378 long r; 379 int i; 380 381 if (radeon_fence_any_seq_signaled(rdev, target_seq)) 382 return timeout; 383 384 /* enable IRQs and tracing */ 385 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 386 if (!target_seq[i]) 387 continue; 388 389 #ifdef TRACE_TODO 390 trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]); 391 #endif 392 radeon_irq_kms_sw_irq_get(rdev, i); 393 } 394 395 if (intr) { 396 r = wait_event_interruptible_timeout(rdev->fence_queue, ( 397 radeon_fence_any_seq_signaled(rdev, target_seq) 398 || rdev->needs_reset), timeout); 399 } else { 400 r = wait_event_timeout(rdev->fence_queue, ( 401 radeon_fence_any_seq_signaled(rdev, target_seq) 402 || rdev->needs_reset), timeout); 403 } 404 405 if (rdev->needs_reset) 406 r = -EDEADLK; 407 408 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 409 if (!target_seq[i]) 410 continue; 411 412 radeon_irq_kms_sw_irq_put(rdev, i); 413 #if TRACE_TODO 414 trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]); 415 #endif 416 } 417 418 return r; 419 } 420 421 /** 422 * radeon_fence_wait_timeout - wait for a fence to signal with timeout 423 * 424 * @fence: radeon fence object 425 * @intr: use interruptible sleep 426 * 427 * Wait for the requested fence to signal (all asics). 428 * @intr selects whether to use interruptable (true) or non-interruptable 429 * (false) sleep when waiting for the fence. 430 * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait 431 * Returns remaining time if the sequence number has passed, 0 when 432 * the wait timeout, or an error for all other cases. 433 */ 434 int radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, int timeout) 435 { 436 u64 seq[RADEON_NUM_RINGS] = {}; 437 int r; 438 439 if (timeout < 0) { 440 DRM_ERROR("radeon_fence_wait_timeout (%d) < 0!\n", timeout); 441 } 442 443 if (fence == NULL) { 444 WARN(1, "Querying an invalid fence : %p !\n", fence); 445 return -EINVAL; 446 } 447 448 seq[fence->ring] = fence->seq; 449 r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, timeout); 450 if (r <= 0) { 451 return r; 452 } 453 454 return 0; 455 } 456 457 /** 458 * radeon_fence_wait - wait for a fence to signal 459 * 460 * @fence: radeon fence object 461 * @intr: use interruptible sleep 462 * 463 * Wait for the requested fence to signal (all asics). 464 * @intr selects whether to use interruptable (true) or non-interruptable 465 * (false) sleep when waiting for the fence. 466 * Returns 0 if the fence has passed, error for all other cases. 467 */ 468 int radeon_fence_wait(struct radeon_fence *fence, bool intr) 469 { 470 int r = radeon_fence_wait_timeout(fence, intr, INT_MAX); 471 if (r > 0) { 472 return 0; 473 } else { 474 return r; 475 } 476 } 477 478 /** 479 * radeon_fence_wait_any - wait for a fence to signal on any ring 480 * 481 * @rdev: radeon device pointer 482 * @fences: radeon fence object(s) 483 * @intr: use interruptable sleep 484 * 485 * Wait for any requested fence to signal (all asics). Fence 486 * array is indexed by ring id. @intr selects whether to use 487 * interruptable (true) or non-interruptable (false) sleep when 488 * waiting for the fences. Used by the suballocator. 489 * Returns 0 if any fence has passed, error for all other cases. 490 */ 491 int radeon_fence_wait_any(struct radeon_device *rdev, 492 struct radeon_fence **fences, 493 bool intr) 494 { 495 u64 seq[RADEON_NUM_RINGS]; 496 unsigned i, num_rings = 0; 497 int r; 498 499 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 500 seq[i] = 0; 501 502 if (!fences[i]) { 503 continue; 504 } 505 506 seq[i] = fences[i]->seq; 507 ++num_rings; 508 } 509 510 /* nothing to wait for ? */ 511 if (num_rings == 0) 512 return -ENOENT; 513 514 r = radeon_fence_wait_seq_timeout(rdev, seq, intr, INT_MAX); 515 if (r < 0) { 516 return r; 517 } 518 return 0; 519 } 520 521 /** 522 * radeon_fence_wait_next - wait for the next fence to signal 523 * 524 * @rdev: radeon device pointer 525 * @ring: ring index the fence is associated with 526 * 527 * Wait for the next fence on the requested ring to signal (all asics). 528 * Returns 0 if the next fence has passed, error for all other cases. 529 * Caller must hold ring lock. 530 */ 531 int radeon_fence_wait_next(struct radeon_device *rdev, int ring) 532 { 533 uint64_t seq[RADEON_NUM_RINGS] = {}; 534 int r; 535 536 seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL; 537 if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) { 538 /* nothing to wait for, last_seq is 539 already the last emited fence */ 540 return -ENOENT; 541 } 542 r = radeon_fence_wait_seq_timeout(rdev, (u64 *)seq, false, INT_MAX); 543 if (r < 0) 544 return r; 545 return 0; 546 } 547 548 /** 549 * radeon_fence_wait_empty - wait for all fences to signal 550 * 551 * @rdev: radeon device pointer 552 * @ring: ring index the fence is associated with 553 * 554 * Wait for all fences on the requested ring to signal (all asics). 555 * Returns 0 if the fences have passed, error for all other cases. 556 * Caller must hold ring lock. 557 */ 558 int radeon_fence_wait_empty(struct radeon_device *rdev, int ring) 559 { 560 u64 seq[RADEON_NUM_RINGS] = {}; 561 int r; 562 563 seq[ring] = rdev->fence_drv[ring].sync_seq[ring]; 564 if (!seq[ring]) 565 return 0; 566 567 r = radeon_fence_wait_seq_timeout(rdev, seq, false, INT_MAX); 568 if (r < 0) { 569 if (r == -EDEADLK) 570 return -EDEADLK; 571 572 dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n", 573 ring, r); 574 } 575 return 0; 576 } 577 578 /** 579 * radeon_fence_ref - take a ref on a fence 580 * 581 * @fence: radeon fence object 582 * 583 * Take a reference on a fence (all asics). 584 * Returns the fence. 585 */ 586 struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) 587 { 588 refcount_acquire(&fence->kref); 589 return fence; 590 } 591 592 /** 593 * radeon_fence_unref - remove a ref on a fence 594 * 595 * @fence: radeon fence object 596 * 597 * Remove a reference on a fence (all asics). 598 */ 599 void radeon_fence_unref(struct radeon_fence **fence) 600 { 601 struct radeon_fence *tmp = *fence; 602 603 *fence = NULL; 604 if (tmp) { 605 if (refcount_release(&tmp->kref)) { 606 radeon_fence_destroy(tmp); 607 } 608 } 609 } 610 611 /** 612 * radeon_fence_count_emitted - get the count of emitted fences 613 * 614 * @rdev: radeon device pointer 615 * @ring: ring index the fence is associated with 616 * 617 * Get the number of fences emitted on the requested ring (all asics). 618 * Returns the number of emitted fences on the ring. Used by the 619 * dynpm code to ring track activity. 620 */ 621 unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring) 622 { 623 uint64_t emitted; 624 625 /* We are not protected by ring lock when reading the last sequence 626 * but it's ok to report slightly wrong fence count here. 627 */ 628 radeon_fence_process(rdev, ring); 629 emitted = rdev->fence_drv[ring].sync_seq[ring] 630 - atomic64_read(&rdev->fence_drv[ring].last_seq); 631 /* to avoid 32bits warp around */ 632 if (emitted > 0x10000000) { 633 emitted = 0x10000000; 634 } 635 return (unsigned)emitted; 636 } 637 638 /** 639 * radeon_fence_need_sync - do we need a semaphore 640 * 641 * @fence: radeon fence object 642 * @dst_ring: which ring to check against 643 * 644 * Check if the fence needs to be synced against another ring 645 * (all asics). If so, we need to emit a semaphore. 646 * Returns true if we need to sync with another ring, false if 647 * not. 648 */ 649 bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring) 650 { 651 struct radeon_fence_driver *fdrv; 652 653 if (!fence) { 654 return false; 655 } 656 657 if (fence->ring == dst_ring) { 658 return false; 659 } 660 661 /* we are protected by the ring mutex */ 662 fdrv = &fence->rdev->fence_drv[dst_ring]; 663 if (fence->seq <= fdrv->sync_seq[fence->ring]) { 664 return false; 665 } 666 667 return true; 668 } 669 670 /** 671 * radeon_fence_note_sync - record the sync point 672 * 673 * @fence: radeon fence object 674 * @dst_ring: which ring to check against 675 * 676 * Note the sequence number at which point the fence will 677 * be synced with the requested ring (all asics). 678 */ 679 void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring) 680 { 681 struct radeon_fence_driver *dst, *src; 682 unsigned i; 683 684 if (!fence) { 685 return; 686 } 687 688 if (fence->ring == dst_ring) { 689 return; 690 } 691 692 /* we are protected by the ring mutex */ 693 src = &fence->rdev->fence_drv[fence->ring]; 694 dst = &fence->rdev->fence_drv[dst_ring]; 695 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 696 if (i == dst_ring) { 697 continue; 698 } 699 dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]); 700 } 701 } 702 703 /** 704 * radeon_fence_driver_start_ring - make the fence driver 705 * ready for use on the requested ring. 706 * 707 * @rdev: radeon device pointer 708 * @ring: ring index to start the fence driver on 709 * 710 * Make the fence driver ready for processing (all asics). 711 * Not all asics have all rings, so each asic will only 712 * start the fence driver on the rings it has. 713 * Returns 0 for success, errors for failure. 714 */ 715 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) 716 { 717 uint64_t index; 718 int r; 719 720 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); 721 if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) { 722 rdev->fence_drv[ring].scratch_reg = 0; 723 if (ring != R600_RING_TYPE_UVD_INDEX) { 724 index = R600_WB_EVENT_OFFSET + ring * 4; 725 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; 726 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + 727 index; 728 729 } else { 730 /* put fence directly behind firmware */ 731 index = ALIGN(rdev->uvd_fw->datasize, 8); 732 rdev->fence_drv[ring].cpu_addr = (void*)((uint8_t*)rdev->uvd.cpu_addr + index); 733 rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index; 734 } 735 736 } else { 737 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg); 738 if (r) { 739 dev_err(rdev->dev, "fence failed to get scratch register\n"); 740 return r; 741 } 742 index = RADEON_WB_SCRATCH_OFFSET + 743 rdev->fence_drv[ring].scratch_reg - 744 rdev->scratch.reg_base; 745 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; 746 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index; 747 } 748 radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring); 749 rdev->fence_drv[ring].initialized = true; 750 dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016jx and cpu addr 0x%p\n", 751 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr); 752 return 0; 753 } 754 755 /** 756 * radeon_fence_driver_init_ring - init the fence driver 757 * for the requested ring. 758 * 759 * @rdev: radeon device pointer 760 * @ring: ring index to start the fence driver on 761 * 762 * Init the fence driver for the requested ring (all asics). 763 * Helper function for radeon_fence_driver_init(). 764 */ 765 static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring) 766 { 767 int i; 768 769 rdev->fence_drv[ring].scratch_reg = -1; 770 rdev->fence_drv[ring].cpu_addr = NULL; 771 rdev->fence_drv[ring].gpu_addr = 0; 772 for (i = 0; i < RADEON_NUM_RINGS; ++i) 773 rdev->fence_drv[ring].sync_seq[i] = 0; 774 atomic64_set(&rdev->fence_drv[ring].last_seq, 0); 775 rdev->fence_drv[ring].initialized = false; 776 INIT_DELAYED_WORK(&rdev->fence_drv[ring].lockup_work, 777 radeon_fence_check_lockup); 778 rdev->fence_drv[ring].rdev = rdev; 779 } 780 781 /** 782 * radeon_fence_driver_init - init the fence driver 783 * for all possible rings. 784 * 785 * @rdev: radeon device pointer 786 * 787 * Init the fence driver for all possible rings (all asics). 788 * Not all asics have all rings, so each asic will only 789 * start the fence driver on the rings it has using 790 * radeon_fence_driver_start_ring(). 791 * Returns 0 for success. 792 */ 793 int radeon_fence_driver_init(struct radeon_device *rdev) 794 { 795 int ring; 796 797 init_waitqueue_head(&rdev->fence_queue); 798 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { 799 radeon_fence_driver_init_ring(rdev, ring); 800 } 801 if (radeon_debugfs_fence_init(rdev)) { 802 dev_err(rdev->dev, "fence debugfs file creation failed\n"); 803 } 804 return 0; 805 } 806 807 /** 808 * radeon_fence_driver_fini - tear down the fence driver 809 * for all possible rings. 810 * 811 * @rdev: radeon device pointer 812 * 813 * Tear down the fence driver for all possible rings (all asics). 814 */ 815 void radeon_fence_driver_fini(struct radeon_device *rdev) 816 { 817 int ring, r; 818 819 lockmgr(&rdev->ring_lock, LK_EXCLUSIVE); 820 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { 821 if (!rdev->fence_drv[ring].initialized) 822 continue; 823 r = radeon_fence_wait_empty(rdev, ring); 824 if (r) { 825 /* no need to trigger GPU reset as we are unloading */ 826 radeon_fence_driver_force_completion(rdev, ring); 827 } 828 cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work); 829 wake_up_all(&rdev->fence_queue); 830 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); 831 rdev->fence_drv[ring].initialized = false; 832 } 833 lockmgr(&rdev->ring_lock, LK_RELEASE); 834 } 835 836 /** 837 * radeon_fence_driver_force_completion - force all fence waiter to complete 838 * 839 * @rdev: radeon device pointer 840 * @ring: the ring to complete 841 * 842 * In case of GPU reset failure make sure no process keep waiting on fence 843 * that will never complete. 844 */ 845 void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring) 846 { 847 if (rdev->fence_drv[ring].initialized) { 848 radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring); 849 cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work); 850 } 851 } 852 853 854 /* 855 * Fence debugfs 856 */ 857 #if defined(CONFIG_DEBUG_FS) 858 static int radeon_debugfs_fence_info(struct seq_file *m, void *data) 859 { 860 struct drm_info_node *node = (struct drm_info_node *)m->private; 861 struct drm_device *dev = node->minor->dev; 862 struct radeon_device *rdev = dev->dev_private; 863 int i, j; 864 865 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 866 if (!rdev->fence_drv[i].initialized) 867 continue; 868 869 radeon_fence_process(rdev, i); 870 871 seq_printf(m, "--- ring %d ---\n", i); 872 seq_printf(m, "Last signaled fence 0x%016llx\n", 873 (unsigned long long)atomic_load_acq_64(&rdev->fence_drv[i].last_seq)); 874 seq_printf(m, "Last emitted 0x%016llx\n", 875 rdev->fence_drv[i].sync_seq[i]); 876 877 for (j = 0; j < RADEON_NUM_RINGS; ++j) { 878 if (i != j && rdev->fence_drv[j].initialized) 879 seq_printf(m, "Last sync to ring %d 0x%016llx\n", 880 j, rdev->fence_drv[i].sync_seq[j]); 881 } 882 } 883 return 0; 884 } 885 886 /** 887 * radeon_debugfs_gpu_reset - manually trigger a gpu reset 888 * 889 * Manually trigger a gpu reset at the next fence wait. 890 */ 891 static int radeon_debugfs_gpu_reset(struct seq_file *m, void *data) 892 { 893 struct drm_info_node *node = (struct drm_info_node *) m->private; 894 struct drm_device *dev = node->minor->dev; 895 struct radeon_device *rdev = dev->dev_private; 896 897 down_read(&rdev->exclusive_lock); 898 seq_printf(m, "%d\n", rdev->needs_reset); 899 rdev->needs_reset = true; 900 wake_up_all(&rdev->fence_queue); 901 up_read(&rdev->exclusive_lock); 902 903 return 0; 904 } 905 906 static struct drm_info_list radeon_debugfs_fence_list[] = { 907 {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL}, 908 {"radeon_gpu_reset", &radeon_debugfs_gpu_reset, 0, NULL} 909 }; 910 #endif 911 912 int radeon_debugfs_fence_init(struct radeon_device *rdev) 913 { 914 #if defined(CONFIG_DEBUG_FS) 915 return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 2); 916 #else 917 return 0; 918 #endif 919 } 920