1 /* 2 * Copyright 2009 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 /* 27 * Authors: 28 * Jerome Glisse <glisse@freedesktop.org> 29 * Dave Airlie 30 * 31 * $FreeBSD: head/sys/dev/drm2/radeon/radeon_fence.c 254885 2013-08-25 19:37:15Z dumbbell $ 32 */ 33 34 #include <drm/drmP.h> 35 #include "radeon_reg.h" 36 #include "radeon.h" 37 #ifdef DUMBBELL_WIP 38 #include "radeon_trace.h" 39 #endif /* DUMBBELL_WIP */ 40 41 /* 42 * Fences 43 * Fences mark an event in the GPUs pipeline and are used 44 * for GPU/CPU synchronization. When the fence is written, 45 * it is expected that all buffers associated with that fence 46 * are no longer in use by the associated ring on the GPU and 47 * that the the relevant GPU caches have been flushed. Whether 48 * we use a scratch register or memory location depends on the asic 49 * and whether writeback is enabled. 50 */ 51 52 /** 53 * radeon_fence_write - write a fence value 54 * 55 * @rdev: radeon_device pointer 56 * @seq: sequence number to write 57 * @ring: ring index the fence is associated with 58 * 59 * Writes a fence value to memory or a scratch register (all asics). 60 */ 61 static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring) 62 { 63 struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; 64 if (likely(rdev->wb.enabled || !drv->scratch_reg)) { 65 *drv->cpu_addr = cpu_to_le32(seq); 66 } else { 67 WREG32(drv->scratch_reg, seq); 68 } 69 } 70 71 /** 72 * radeon_fence_read - read a fence value 73 * 74 * @rdev: radeon_device pointer 75 * @ring: ring index the fence is associated with 76 * 77 * Reads a fence value from memory or a scratch register (all asics). 78 * Returns the value of the fence read from memory or register. 79 */ 80 static u32 radeon_fence_read(struct radeon_device *rdev, int ring) 81 { 82 struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; 83 u32 seq = 0; 84 85 if (likely(rdev->wb.enabled || !drv->scratch_reg)) { 86 seq = le32_to_cpu(*drv->cpu_addr); 87 } else { 88 seq = RREG32(drv->scratch_reg); 89 } 90 return seq; 91 } 92 93 /** 94 * radeon_fence_emit - emit a fence on the requested ring 95 * 96 * @rdev: radeon_device pointer 97 * @fence: radeon fence object 98 * @ring: ring index the fence is associated with 99 * 100 * Emits a fence command on the requested ring (all asics). 101 * Returns 0 on success, -ENOMEM on failure. 102 */ 103 int radeon_fence_emit(struct radeon_device *rdev, 104 struct radeon_fence **fence, 105 int ring) 106 { 107 /* we are protected by the ring emission mutex */ 108 *fence = kmalloc(sizeof(struct radeon_fence), M_DRM, 109 M_WAITOK); 110 if ((*fence) == NULL) { 111 return -ENOMEM; 112 } 113 refcount_init(&((*fence)->kref), 1); 114 (*fence)->rdev = rdev; 115 (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring]; 116 (*fence)->ring = ring; 117 radeon_fence_ring_emit(rdev, ring, *fence); 118 CTR2(KTR_DRM, "radeon fence: emit (ring=%d, seq=%d)", ring, (*fence)->seq); 119 return 0; 120 } 121 122 /** 123 * radeon_fence_process - process a fence 124 * 125 * @rdev: radeon_device pointer 126 * @ring: ring index the fence is associated with 127 * 128 * Checks the current fence value and wakes the fence queue 129 * if the sequence number has increased (all asics). 130 */ 131 void radeon_fence_process(struct radeon_device *rdev, int ring) 132 { 133 uint64_t seq, last_seq, last_emitted; 134 unsigned count_loop = 0; 135 bool wake = false; 136 137 /* Note there is a scenario here for an infinite loop but it's 138 * very unlikely to happen. For it to happen, the current polling 139 * process need to be interrupted by another process and another 140 * process needs to update the last_seq btw the atomic read and 141 * xchg of the current process. 142 * 143 * More over for this to go in infinite loop there need to be 144 * continuously new fence signaled ie radeon_fence_read needs 145 * to return a different value each time for both the currently 146 * polling process and the other process that xchg the last_seq 147 * btw atomic read and xchg of the current process. And the 148 * value the other process set as last seq must be higher than 149 * the seq value we just read. Which means that current process 150 * need to be interrupted after radeon_fence_read and before 151 * atomic xchg. 152 * 153 * To be even more safe we count the number of time we loop and 154 * we bail after 10 loop just accepting the fact that we might 155 * have temporarly set the last_seq not to the true real last 156 * seq but to an older one. 157 */ 158 last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq); 159 do { 160 last_emitted = rdev->fence_drv[ring].sync_seq[ring]; 161 seq = radeon_fence_read(rdev, ring); 162 seq |= last_seq & 0xffffffff00000000LL; 163 if (seq < last_seq) { 164 seq &= 0xffffffff; 165 seq |= last_emitted & 0xffffffff00000000LL; 166 } 167 168 if (seq <= last_seq || seq > last_emitted) { 169 break; 170 } 171 /* If we loop over we don't want to return without 172 * checking if a fence is signaled as it means that the 173 * seq we just read is different from the previous on. 174 */ 175 wake = true; 176 last_seq = seq; 177 if ((count_loop++) > 10) { 178 /* We looped over too many time leave with the 179 * fact that we might have set an older fence 180 * seq then the current real last seq as signaled 181 * by the hw. 182 */ 183 break; 184 } 185 } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq); 186 187 if (wake) { 188 rdev->fence_drv[ring].last_activity = jiffies; 189 cv_broadcast(&rdev->fence_queue); 190 } 191 } 192 193 /** 194 * radeon_fence_destroy - destroy a fence 195 * 196 * @kref: fence kref 197 * 198 * Frees the fence object (all asics). 199 */ 200 static void radeon_fence_destroy(struct radeon_fence *fence) 201 { 202 203 drm_free(fence, M_DRM); 204 } 205 206 /** 207 * radeon_fence_seq_signaled - check if a fence sequeuce number has signaled 208 * 209 * @rdev: radeon device pointer 210 * @seq: sequence number 211 * @ring: ring index the fence is associated with 212 * 213 * Check if the last singled fence sequnce number is >= the requested 214 * sequence number (all asics). 215 * Returns true if the fence has signaled (current fence value 216 * is >= requested value) or false if it has not (current fence 217 * value is < the requested value. Helper function for 218 * radeon_fence_signaled(). 219 */ 220 static bool radeon_fence_seq_signaled(struct radeon_device *rdev, 221 u64 seq, unsigned ring) 222 { 223 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { 224 return true; 225 } 226 /* poll new last sequence at least once */ 227 radeon_fence_process(rdev, ring); 228 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { 229 return true; 230 } 231 return false; 232 } 233 234 /** 235 * radeon_fence_signaled - check if a fence has signaled 236 * 237 * @fence: radeon fence object 238 * 239 * Check if the requested fence has signaled (all asics). 240 * Returns true if the fence has signaled or false if it has not. 241 */ 242 bool radeon_fence_signaled(struct radeon_fence *fence) 243 { 244 if (!fence) { 245 return true; 246 } 247 if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) { 248 return true; 249 } 250 if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) { 251 fence->seq = RADEON_FENCE_SIGNALED_SEQ; 252 return true; 253 } 254 return false; 255 } 256 257 /** 258 * radeon_fence_wait_seq - wait for a specific sequence number 259 * 260 * @rdev: radeon device pointer 261 * @target_seq: sequence number we want to wait for 262 * @ring: ring index the fence is associated with 263 * @intr: use interruptable sleep 264 * @lock_ring: whether the ring should be locked or not 265 * 266 * Wait for the requested sequence number to be written (all asics). 267 * @intr selects whether to use interruptable (true) or non-interruptable 268 * (false) sleep when waiting for the sequence number. Helper function 269 * for radeon_fence_wait(), et al. 270 * Returns 0 if the sequence number has passed, error for all other cases. 271 * -EDEADLK is returned when a GPU lockup has been detected and the ring is 272 * marked as not ready so no further jobs get scheduled until a successful 273 * reset. 274 */ 275 static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq, 276 unsigned ring, bool intr, bool lock_ring) 277 { 278 unsigned long timeout, last_activity; 279 uint64_t seq; 280 unsigned i; 281 bool signaled, fence_queue_locked; 282 int r; 283 284 while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) { 285 if (!rdev->ring[ring].ready) { 286 return -EBUSY; 287 } 288 289 timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT; 290 if (time_after(rdev->fence_drv[ring].last_activity, timeout)) { 291 /* the normal case, timeout is somewhere before last_activity */ 292 timeout = rdev->fence_drv[ring].last_activity - timeout; 293 } else { 294 /* either jiffies wrapped around, or no fence was signaled in the last 500ms 295 * anyway we will just wait for the minimum amount and then check for a lockup 296 */ 297 timeout = 1; 298 } 299 seq = atomic64_read(&rdev->fence_drv[ring].last_seq); 300 /* Save current last activity valuee, used to check for GPU lockups */ 301 last_activity = rdev->fence_drv[ring].last_activity; 302 303 CTR2(KTR_DRM, "radeon fence: wait begin (ring=%d, seq=%d)", 304 ring, seq); 305 306 radeon_irq_kms_sw_irq_get(rdev, ring); 307 fence_queue_locked = false; 308 r = 0; 309 while (!(signaled = radeon_fence_seq_signaled(rdev, 310 target_seq, ring))) { 311 if (!fence_queue_locked) { 312 lockmgr(&rdev->fence_queue_mtx, LK_EXCLUSIVE); 313 fence_queue_locked = true; 314 } 315 if (intr) { 316 r = cv_timedwait_sig(&rdev->fence_queue, 317 &rdev->fence_queue_mtx, 318 timeout); 319 } else { 320 r = cv_timedwait(&rdev->fence_queue, 321 &rdev->fence_queue_mtx, 322 timeout); 323 } 324 if (r != 0) { 325 if (r == EWOULDBLOCK) { 326 signaled = 327 radeon_fence_seq_signaled( 328 rdev, target_seq, ring); 329 } 330 break; 331 } 332 } 333 if (fence_queue_locked) { 334 lockmgr(&rdev->fence_queue_mtx, LK_RELEASE); 335 } 336 radeon_irq_kms_sw_irq_put(rdev, ring); 337 if (unlikely(r == EINTR || r == ERESTART)) { 338 return -r; 339 } 340 CTR2(KTR_DRM, "radeon fence: wait end (ring=%d, seq=%d)", 341 ring, seq); 342 343 if (unlikely(!signaled)) { 344 #ifndef __FreeBSD__ 345 /* we were interrupted for some reason and fence 346 * isn't signaled yet, resume waiting */ 347 if (r) { 348 continue; 349 } 350 #endif 351 352 /* check if sequence value has changed since last_activity */ 353 if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) { 354 continue; 355 } 356 357 if (lock_ring) { 358 lockmgr(&rdev->ring_lock, LK_EXCLUSIVE); 359 } 360 361 /* test if somebody else has already decided that this is a lockup */ 362 if (last_activity != rdev->fence_drv[ring].last_activity) { 363 if (lock_ring) { 364 lockmgr(&rdev->ring_lock, LK_RELEASE); 365 } 366 continue; 367 } 368 369 if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { 370 /* good news we believe it's a lockup */ 371 dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016jx last fence id 0x%016jx)\n", 372 (uintmax_t)target_seq, (uintmax_t)seq); 373 374 /* change last activity so nobody else think there is a lockup */ 375 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 376 rdev->fence_drv[i].last_activity = jiffies; 377 } 378 379 /* mark the ring as not ready any more */ 380 rdev->ring[ring].ready = false; 381 if (lock_ring) { 382 lockmgr(&rdev->ring_lock, LK_RELEASE); 383 } 384 return -EDEADLK; 385 } 386 387 if (lock_ring) { 388 lockmgr(&rdev->ring_lock, LK_RELEASE); 389 } 390 } 391 } 392 return 0; 393 } 394 395 /** 396 * radeon_fence_wait - wait for a fence to signal 397 * 398 * @fence: radeon fence object 399 * @intr: use interruptable sleep 400 * 401 * Wait for the requested fence to signal (all asics). 402 * @intr selects whether to use interruptable (true) or non-interruptable 403 * (false) sleep when waiting for the fence. 404 * Returns 0 if the fence has passed, error for all other cases. 405 */ 406 int radeon_fence_wait(struct radeon_fence *fence, bool intr) 407 { 408 int r; 409 410 if (fence == NULL) { 411 DRM_ERROR("Querying an invalid fence : %p !\n", fence); 412 return -EINVAL; 413 } 414 415 r = radeon_fence_wait_seq(fence->rdev, fence->seq, 416 fence->ring, intr, true); 417 if (r) { 418 return r; 419 } 420 fence->seq = RADEON_FENCE_SIGNALED_SEQ; 421 return 0; 422 } 423 424 static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) 425 { 426 unsigned i; 427 428 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 429 if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) { 430 return true; 431 } 432 } 433 return false; 434 } 435 436 /** 437 * radeon_fence_wait_any_seq - wait for a sequence number on any ring 438 * 439 * @rdev: radeon device pointer 440 * @target_seq: sequence number(s) we want to wait for 441 * @intr: use interruptable sleep 442 * 443 * Wait for the requested sequence number(s) to be written by any ring 444 * (all asics). Sequnce number array is indexed by ring id. 445 * @intr selects whether to use interruptable (true) or non-interruptable 446 * (false) sleep when waiting for the sequence number. Helper function 447 * for radeon_fence_wait_any(), et al. 448 * Returns 0 if the sequence number has passed, error for all other cases. 449 */ 450 static int radeon_fence_wait_any_seq(struct radeon_device *rdev, 451 u64 *target_seq, bool intr) 452 { 453 unsigned long timeout, last_activity, tmp; 454 unsigned i, ring = RADEON_NUM_RINGS; 455 bool signaled, fence_queue_locked; 456 int r; 457 458 for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) { 459 if (!target_seq[i]) { 460 continue; 461 } 462 463 /* use the most recent one as indicator */ 464 if (time_after(rdev->fence_drv[i].last_activity, last_activity)) { 465 last_activity = rdev->fence_drv[i].last_activity; 466 } 467 468 /* For lockup detection just pick the lowest ring we are 469 * actively waiting for 470 */ 471 if (i < ring) { 472 ring = i; 473 } 474 } 475 476 /* nothing to wait for ? */ 477 if (ring == RADEON_NUM_RINGS) { 478 return -ENOENT; 479 } 480 481 while (!radeon_fence_any_seq_signaled(rdev, target_seq)) { 482 timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT; 483 if (time_after(last_activity, timeout)) { 484 /* the normal case, timeout is somewhere before last_activity */ 485 timeout = last_activity - timeout; 486 } else { 487 /* either jiffies wrapped around, or no fence was signaled in the last 500ms 488 * anyway we will just wait for the minimum amount and then check for a lockup 489 */ 490 timeout = 1; 491 } 492 493 CTR2(KTR_DRM, "radeon fence: wait begin (ring=%d, target_seq=%d)", 494 ring, target_seq[ring]); 495 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 496 if (target_seq[i]) { 497 radeon_irq_kms_sw_irq_get(rdev, i); 498 } 499 } 500 fence_queue_locked = false; 501 r = 0; 502 while (!(signaled = radeon_fence_any_seq_signaled(rdev, 503 target_seq))) { 504 if (!fence_queue_locked) { 505 lockmgr(&rdev->fence_queue_mtx, LK_EXCLUSIVE); 506 fence_queue_locked = true; 507 } 508 if (intr) { 509 r = cv_timedwait_sig(&rdev->fence_queue, 510 &rdev->fence_queue_mtx, 511 timeout); 512 } else { 513 r = cv_timedwait(&rdev->fence_queue, 514 &rdev->fence_queue_mtx, 515 timeout); 516 } 517 if (r != 0) { 518 if (r == EWOULDBLOCK) { 519 signaled = 520 radeon_fence_any_seq_signaled( 521 rdev, target_seq); 522 } 523 break; 524 } 525 } 526 if (fence_queue_locked) { 527 lockmgr(&rdev->fence_queue_mtx, LK_RELEASE); 528 } 529 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 530 if (target_seq[i]) { 531 radeon_irq_kms_sw_irq_put(rdev, i); 532 } 533 } 534 if (unlikely(r == EINTR || r == ERESTART)) { 535 return -r; 536 } 537 CTR2(KTR_DRM, "radeon fence: wait end (ring=%d, target_seq=%d)", 538 ring, target_seq[ring]); 539 540 if (unlikely(!signaled)) { 541 #ifndef __FreeBSD__ 542 /* we were interrupted for some reason and fence 543 * isn't signaled yet, resume waiting */ 544 if (r) { 545 continue; 546 } 547 #endif 548 549 lockmgr(&rdev->ring_lock, LK_EXCLUSIVE); 550 for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) { 551 if (time_after(rdev->fence_drv[i].last_activity, tmp)) { 552 tmp = rdev->fence_drv[i].last_activity; 553 } 554 } 555 /* test if somebody else has already decided that this is a lockup */ 556 if (last_activity != tmp) { 557 last_activity = tmp; 558 lockmgr(&rdev->ring_lock, LK_RELEASE); 559 continue; 560 } 561 562 if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { 563 /* good news we believe it's a lockup */ 564 dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016jx)\n", 565 (uintmax_t)target_seq[ring]); 566 567 /* change last activity so nobody else think there is a lockup */ 568 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 569 rdev->fence_drv[i].last_activity = jiffies; 570 } 571 572 /* mark the ring as not ready any more */ 573 rdev->ring[ring].ready = false; 574 lockmgr(&rdev->ring_lock, LK_RELEASE); 575 return -EDEADLK; 576 } 577 lockmgr(&rdev->ring_lock, LK_RELEASE); 578 } 579 } 580 return 0; 581 } 582 583 /** 584 * radeon_fence_wait_any - wait for a fence to signal on any ring 585 * 586 * @rdev: radeon device pointer 587 * @fences: radeon fence object(s) 588 * @intr: use interruptable sleep 589 * 590 * Wait for any requested fence to signal (all asics). Fence 591 * array is indexed by ring id. @intr selects whether to use 592 * interruptable (true) or non-interruptable (false) sleep when 593 * waiting for the fences. Used by the suballocator. 594 * Returns 0 if any fence has passed, error for all other cases. 595 */ 596 int radeon_fence_wait_any(struct radeon_device *rdev, 597 struct radeon_fence **fences, 598 bool intr) 599 { 600 uint64_t seq[RADEON_NUM_RINGS]; 601 unsigned i; 602 int r; 603 604 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 605 seq[i] = 0; 606 607 if (!fences[i]) { 608 continue; 609 } 610 611 if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) { 612 /* something was allready signaled */ 613 return 0; 614 } 615 616 seq[i] = fences[i]->seq; 617 } 618 619 r = radeon_fence_wait_any_seq(rdev, seq, intr); 620 if (r) { 621 return r; 622 } 623 return 0; 624 } 625 626 /** 627 * radeon_fence_wait_next_locked - wait for the next fence to signal 628 * 629 * @rdev: radeon device pointer 630 * @ring: ring index the fence is associated with 631 * 632 * Wait for the next fence on the requested ring to signal (all asics). 633 * Returns 0 if the next fence has passed, error for all other cases. 634 * Caller must hold ring lock. 635 */ 636 int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring) 637 { 638 uint64_t seq; 639 640 seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL; 641 if (seq >= rdev->fence_drv[ring].sync_seq[ring]) { 642 /* nothing to wait for, last_seq is 643 already the last emited fence */ 644 return -ENOENT; 645 } 646 return radeon_fence_wait_seq(rdev, seq, ring, false, false); 647 } 648 649 /** 650 * radeon_fence_wait_empty_locked - wait for all fences to signal 651 * 652 * @rdev: radeon device pointer 653 * @ring: ring index the fence is associated with 654 * 655 * Wait for all fences on the requested ring to signal (all asics). 656 * Returns 0 if the fences have passed, error for all other cases. 657 * Caller must hold ring lock. 658 */ 659 int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) 660 { 661 uint64_t seq = rdev->fence_drv[ring].sync_seq[ring]; 662 int r; 663 664 r = radeon_fence_wait_seq(rdev, seq, ring, false, false); 665 if (r) { 666 if (r == -EDEADLK) { 667 return -EDEADLK; 668 } 669 dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n", 670 ring, r); 671 } 672 return 0; 673 } 674 675 /** 676 * radeon_fence_ref - take a ref on a fence 677 * 678 * @fence: radeon fence object 679 * 680 * Take a reference on a fence (all asics). 681 * Returns the fence. 682 */ 683 struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) 684 { 685 refcount_acquire(&fence->kref); 686 return fence; 687 } 688 689 /** 690 * radeon_fence_unref - remove a ref on a fence 691 * 692 * @fence: radeon fence object 693 * 694 * Remove a reference on a fence (all asics). 695 */ 696 void radeon_fence_unref(struct radeon_fence **fence) 697 { 698 struct radeon_fence *tmp = *fence; 699 700 *fence = NULL; 701 if (tmp) { 702 if (refcount_release(&tmp->kref)) { 703 radeon_fence_destroy(tmp); 704 } 705 } 706 } 707 708 /** 709 * radeon_fence_count_emitted - get the count of emitted fences 710 * 711 * @rdev: radeon device pointer 712 * @ring: ring index the fence is associated with 713 * 714 * Get the number of fences emitted on the requested ring (all asics). 715 * Returns the number of emitted fences on the ring. Used by the 716 * dynpm code to ring track activity. 717 */ 718 unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring) 719 { 720 uint64_t emitted; 721 722 /* We are not protected by ring lock when reading the last sequence 723 * but it's ok to report slightly wrong fence count here. 724 */ 725 radeon_fence_process(rdev, ring); 726 emitted = rdev->fence_drv[ring].sync_seq[ring] 727 - atomic64_read(&rdev->fence_drv[ring].last_seq); 728 /* to avoid 32bits warp around */ 729 if (emitted > 0x10000000) { 730 emitted = 0x10000000; 731 } 732 return (unsigned)emitted; 733 } 734 735 /** 736 * radeon_fence_need_sync - do we need a semaphore 737 * 738 * @fence: radeon fence object 739 * @dst_ring: which ring to check against 740 * 741 * Check if the fence needs to be synced against another ring 742 * (all asics). If so, we need to emit a semaphore. 743 * Returns true if we need to sync with another ring, false if 744 * not. 745 */ 746 bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring) 747 { 748 struct radeon_fence_driver *fdrv; 749 750 if (!fence) { 751 return false; 752 } 753 754 if (fence->ring == dst_ring) { 755 return false; 756 } 757 758 /* we are protected by the ring mutex */ 759 fdrv = &fence->rdev->fence_drv[dst_ring]; 760 if (fence->seq <= fdrv->sync_seq[fence->ring]) { 761 return false; 762 } 763 764 return true; 765 } 766 767 /** 768 * radeon_fence_note_sync - record the sync point 769 * 770 * @fence: radeon fence object 771 * @dst_ring: which ring to check against 772 * 773 * Note the sequence number at which point the fence will 774 * be synced with the requested ring (all asics). 775 */ 776 void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring) 777 { 778 struct radeon_fence_driver *dst, *src; 779 unsigned i; 780 781 if (!fence) { 782 return; 783 } 784 785 if (fence->ring == dst_ring) { 786 return; 787 } 788 789 /* we are protected by the ring mutex */ 790 src = &fence->rdev->fence_drv[fence->ring]; 791 dst = &fence->rdev->fence_drv[dst_ring]; 792 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 793 if (i == dst_ring) { 794 continue; 795 } 796 dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]); 797 } 798 } 799 800 /** 801 * radeon_fence_driver_start_ring - make the fence driver 802 * ready for use on the requested ring. 803 * 804 * @rdev: radeon device pointer 805 * @ring: ring index to start the fence driver on 806 * 807 * Make the fence driver ready for processing (all asics). 808 * Not all asics have all rings, so each asic will only 809 * start the fence driver on the rings it has. 810 * Returns 0 for success, errors for failure. 811 */ 812 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) 813 { 814 uint64_t index; 815 int r; 816 817 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); 818 if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) { 819 rdev->fence_drv[ring].scratch_reg = 0; 820 index = R600_WB_EVENT_OFFSET + ring * 4; 821 } else { 822 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg); 823 if (r) { 824 dev_err(rdev->dev, "fence failed to get scratch register\n"); 825 return r; 826 } 827 index = RADEON_WB_SCRATCH_OFFSET + 828 rdev->fence_drv[ring].scratch_reg - 829 rdev->scratch.reg_base; 830 } 831 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; 832 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index; 833 radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring); 834 rdev->fence_drv[ring].initialized = true; 835 dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016jx and cpu addr 0x%p\n", 836 ring, (uintmax_t)rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr); 837 return 0; 838 } 839 840 /** 841 * radeon_fence_driver_init_ring - init the fence driver 842 * for the requested ring. 843 * 844 * @rdev: radeon device pointer 845 * @ring: ring index to start the fence driver on 846 * 847 * Init the fence driver for the requested ring (all asics). 848 * Helper function for radeon_fence_driver_init(). 849 */ 850 static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring) 851 { 852 int i; 853 854 rdev->fence_drv[ring].scratch_reg = -1; 855 rdev->fence_drv[ring].cpu_addr = NULL; 856 rdev->fence_drv[ring].gpu_addr = 0; 857 for (i = 0; i < RADEON_NUM_RINGS; ++i) 858 rdev->fence_drv[ring].sync_seq[i] = 0; 859 atomic64_set(&rdev->fence_drv[ring].last_seq, 0); 860 rdev->fence_drv[ring].last_activity = jiffies; 861 rdev->fence_drv[ring].initialized = false; 862 } 863 864 /** 865 * radeon_fence_driver_init - init the fence driver 866 * for all possible rings. 867 * 868 * @rdev: radeon device pointer 869 * 870 * Init the fence driver for all possible rings (all asics). 871 * Not all asics have all rings, so each asic will only 872 * start the fence driver on the rings it has using 873 * radeon_fence_driver_start_ring(). 874 * Returns 0 for success. 875 */ 876 int radeon_fence_driver_init(struct radeon_device *rdev) 877 { 878 int ring; 879 880 lockinit(&rdev->fence_queue_mtx, 881 "drm__radeon_device__fence_queue_mtx", 0, LK_CANRECURSE); 882 cv_init(&rdev->fence_queue, "drm__radeon_device__fence_queue"); 883 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { 884 radeon_fence_driver_init_ring(rdev, ring); 885 } 886 if (radeon_debugfs_fence_init(rdev)) { 887 dev_err(rdev->dev, "fence debugfs file creation failed\n"); 888 } 889 return 0; 890 } 891 892 /** 893 * radeon_fence_driver_fini - tear down the fence driver 894 * for all possible rings. 895 * 896 * @rdev: radeon device pointer 897 * 898 * Tear down the fence driver for all possible rings (all asics). 899 */ 900 void radeon_fence_driver_fini(struct radeon_device *rdev) 901 { 902 int ring, r; 903 904 lockmgr(&rdev->ring_lock, LK_EXCLUSIVE); 905 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { 906 if (!rdev->fence_drv[ring].initialized) 907 continue; 908 r = radeon_fence_wait_empty_locked(rdev, ring); 909 if (r) { 910 /* no need to trigger GPU reset as we are unloading */ 911 radeon_fence_driver_force_completion(rdev); 912 } 913 cv_broadcast(&rdev->fence_queue); 914 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); 915 rdev->fence_drv[ring].initialized = false; 916 cv_destroy(&rdev->fence_queue); 917 } 918 lockmgr(&rdev->ring_lock, LK_RELEASE); 919 } 920 921 /** 922 * radeon_fence_driver_force_completion - force all fence waiter to complete 923 * 924 * @rdev: radeon device pointer 925 * 926 * In case of GPU reset failure make sure no process keep waiting on fence 927 * that will never complete. 928 */ 929 void radeon_fence_driver_force_completion(struct radeon_device *rdev) 930 { 931 int ring; 932 933 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { 934 if (!rdev->fence_drv[ring].initialized) 935 continue; 936 radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring); 937 } 938 } 939 940 941 /* 942 * Fence debugfs 943 */ 944 #if defined(CONFIG_DEBUG_FS) 945 static int radeon_debugfs_fence_info(struct seq_file *m, void *data) 946 { 947 struct drm_info_node *node = (struct drm_info_node *)m->private; 948 struct drm_device *dev = node->minor->dev; 949 struct radeon_device *rdev = dev->dev_private; 950 int i, j; 951 952 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 953 if (!rdev->fence_drv[i].initialized) 954 continue; 955 956 seq_printf(m, "--- ring %d ---\n", i); 957 seq_printf(m, "Last signaled fence 0x%016llx\n", 958 (unsigned long long)atomic_load_acq_64(&rdev->fence_drv[i].last_seq)); 959 seq_printf(m, "Last emitted 0x%016llx\n", 960 rdev->fence_drv[i].sync_seq[i]); 961 962 for (j = 0; j < RADEON_NUM_RINGS; ++j) { 963 if (i != j && rdev->fence_drv[j].initialized) 964 seq_printf(m, "Last sync to ring %d 0x%016llx\n", 965 j, rdev->fence_drv[i].sync_seq[j]); 966 } 967 } 968 return 0; 969 } 970 971 static struct drm_info_list radeon_debugfs_fence_list[] = { 972 {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL}, 973 }; 974 #endif 975 976 int radeon_debugfs_fence_init(struct radeon_device *rdev) 977 { 978 #if defined(CONFIG_DEBUG_FS) 979 return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1); 980 #else 981 return 0; 982 #endif 983 } 984