1 /* 2 * Copyright 2009 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 /* 27 * Authors: 28 * Jerome Glisse <glisse@freedesktop.org> 29 * Dave Airlie 30 * 31 * $FreeBSD: head/sys/dev/drm2/radeon/radeon_fence.c 254885 2013-08-25 19:37:15Z dumbbell $ 32 */ 33 #include <drm/drmP.h> 34 #include "radeon_reg.h" 35 #include "radeon.h" 36 #ifdef DUMBBELL_WIP 37 #include "radeon_trace.h" 38 #endif /* DUMBBELL_WIP */ 39 40 /* 41 * Fences 42 * Fences mark an event in the GPUs pipeline and are used 43 * for GPU/CPU synchronization. When the fence is written, 44 * it is expected that all buffers associated with that fence 45 * are no longer in use by the associated ring on the GPU and 46 * that the the relevant GPU caches have been flushed. Whether 47 * we use a scratch register or memory location depends on the asic 48 * and whether writeback is enabled. 49 */ 50 51 /** 52 * radeon_fence_write - write a fence value 53 * 54 * @rdev: radeon_device pointer 55 * @seq: sequence number to write 56 * @ring: ring index the fence is associated with 57 * 58 * Writes a fence value to memory or a scratch register (all asics). 59 */ 60 static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring) 61 { 62 struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; 63 if (likely(rdev->wb.enabled || !drv->scratch_reg)) { 64 if (drv->cpu_addr) { 65 *drv->cpu_addr = cpu_to_le32(seq); 66 } 67 } else { 68 WREG32(drv->scratch_reg, seq); 69 } 70 } 71 72 /** 73 * radeon_fence_read - read a fence value 74 * 75 * @rdev: radeon_device pointer 76 * @ring: ring index the fence is associated with 77 * 78 * Reads a fence value from memory or a scratch register (all asics). 79 * Returns the value of the fence read from memory or register. 80 */ 81 static u32 radeon_fence_read(struct radeon_device *rdev, int ring) 82 { 83 struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; 84 u32 seq = 0; 85 86 if (likely(rdev->wb.enabled || !drv->scratch_reg)) { 87 if (drv->cpu_addr) { 88 seq = le32_to_cpu(*drv->cpu_addr); 89 } else { 90 seq = lower_32_bits(atomic64_read(&drv->last_seq)); 91 } 92 } else { 93 seq = RREG32(drv->scratch_reg); 94 } 95 return seq; 96 } 97 98 /** 99 * radeon_fence_emit - emit a fence on the requested ring 100 * 101 * @rdev: radeon_device pointer 102 * @fence: radeon fence object 103 * @ring: ring index the fence is associated with 104 * 105 * Emits a fence command on the requested ring (all asics). 106 * Returns 0 on success, -ENOMEM on failure. 107 */ 108 int radeon_fence_emit(struct radeon_device *rdev, 109 struct radeon_fence **fence, 110 int ring) 111 { 112 /* we are protected by the ring emission mutex */ 113 *fence = kmalloc(sizeof(struct radeon_fence), M_DRM, 114 M_WAITOK); 115 if ((*fence) == NULL) { 116 return -ENOMEM; 117 } 118 refcount_init(&((*fence)->kref), 1); 119 (*fence)->rdev = rdev; 120 (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring]; 121 (*fence)->ring = ring; 122 radeon_fence_ring_emit(rdev, ring, *fence); 123 return 0; 124 } 125 126 /** 127 * radeon_fence_process - process a fence 128 * 129 * @rdev: radeon_device pointer 130 * @ring: ring index the fence is associated with 131 * 132 * Checks the current fence value and wakes the fence queue 133 * if the sequence number has increased (all asics). 134 */ 135 void radeon_fence_process(struct radeon_device *rdev, int ring) 136 { 137 uint64_t seq, last_seq, last_emitted; 138 unsigned count_loop = 0; 139 bool wake = false; 140 141 /* Note there is a scenario here for an infinite loop but it's 142 * very unlikely to happen. For it to happen, the current polling 143 * process need to be interrupted by another process and another 144 * process needs to update the last_seq btw the atomic read and 145 * xchg of the current process. 146 * 147 * More over for this to go in infinite loop there need to be 148 * continuously new fence signaled ie radeon_fence_read needs 149 * to return a different value each time for both the currently 150 * polling process and the other process that xchg the last_seq 151 * btw atomic read and xchg of the current process. And the 152 * value the other process set as last seq must be higher than 153 * the seq value we just read. Which means that current process 154 * need to be interrupted after radeon_fence_read and before 155 * atomic xchg. 156 * 157 * To be even more safe we count the number of time we loop and 158 * we bail after 10 loop just accepting the fact that we might 159 * have temporarly set the last_seq not to the true real last 160 * seq but to an older one. 161 */ 162 last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq); 163 do { 164 last_emitted = rdev->fence_drv[ring].sync_seq[ring]; 165 seq = radeon_fence_read(rdev, ring); 166 seq |= last_seq & 0xffffffff00000000LL; 167 if (seq < last_seq) { 168 seq &= 0xffffffff; 169 seq |= last_emitted & 0xffffffff00000000LL; 170 } 171 172 if (seq <= last_seq || seq > last_emitted) { 173 break; 174 } 175 /* If we loop over we don't want to return without 176 * checking if a fence is signaled as it means that the 177 * seq we just read is different from the previous on. 178 */ 179 wake = true; 180 last_seq = seq; 181 if ((count_loop++) > 10) { 182 /* We looped over too many time leave with the 183 * fact that we might have set an older fence 184 * seq then the current real last seq as signaled 185 * by the hw. 186 */ 187 break; 188 } 189 } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq); 190 191 if (wake) { 192 rdev->fence_drv[ring].last_activity = jiffies; 193 wake_up_all(&rdev->fence_queue); 194 } 195 } 196 197 /** 198 * radeon_fence_destroy - destroy a fence 199 * 200 * @kref: fence kref 201 * 202 * Frees the fence object (all asics). 203 */ 204 static void radeon_fence_destroy(struct radeon_fence *fence) 205 { 206 207 kfree(fence); 208 } 209 210 /** 211 * radeon_fence_seq_signaled - check if a fence sequeuce number has signaled 212 * 213 * @rdev: radeon device pointer 214 * @seq: sequence number 215 * @ring: ring index the fence is associated with 216 * 217 * Check if the last singled fence sequnce number is >= the requested 218 * sequence number (all asics). 219 * Returns true if the fence has signaled (current fence value 220 * is >= requested value) or false if it has not (current fence 221 * value is < the requested value. Helper function for 222 * radeon_fence_signaled(). 223 */ 224 static bool radeon_fence_seq_signaled(struct radeon_device *rdev, 225 u64 seq, unsigned ring) 226 { 227 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { 228 return true; 229 } 230 /* poll new last sequence at least once */ 231 radeon_fence_process(rdev, ring); 232 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { 233 return true; 234 } 235 return false; 236 } 237 238 /** 239 * radeon_fence_signaled - check if a fence has signaled 240 * 241 * @fence: radeon fence object 242 * 243 * Check if the requested fence has signaled (all asics). 244 * Returns true if the fence has signaled or false if it has not. 245 */ 246 bool radeon_fence_signaled(struct radeon_fence *fence) 247 { 248 if (!fence) { 249 return true; 250 } 251 if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) { 252 return true; 253 } 254 if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) { 255 fence->seq = RADEON_FENCE_SIGNALED_SEQ; 256 return true; 257 } 258 return false; 259 } 260 261 /** 262 * radeon_fence_wait_seq - wait for a specific sequence number 263 * 264 * @rdev: radeon device pointer 265 * @target_seq: sequence number we want to wait for 266 * @ring: ring index the fence is associated with 267 * @intr: use interruptable sleep 268 * @lock_ring: whether the ring should be locked or not 269 * 270 * Wait for the requested sequence number to be written (all asics). 271 * @intr selects whether to use interruptable (true) or non-interruptable 272 * (false) sleep when waiting for the sequence number. Helper function 273 * for radeon_fence_wait(), et al. 274 * Returns 0 if the sequence number has passed, error for all other cases. 275 * -EDEADLK is returned when a GPU lockup has been detected and the ring is 276 * marked as not ready so no further jobs get scheduled until a successful 277 * reset. 278 */ 279 static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq, 280 unsigned ring, bool intr, bool lock_ring) 281 { 282 unsigned long timeout, last_activity; 283 uint64_t seq; 284 unsigned i; 285 bool signaled; 286 int r; 287 288 while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) { 289 if (!rdev->ring[ring].ready) { 290 return -EBUSY; 291 } 292 293 timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT; 294 if (time_after(rdev->fence_drv[ring].last_activity, timeout)) { 295 /* the normal case, timeout is somewhere before last_activity */ 296 timeout = rdev->fence_drv[ring].last_activity - timeout; 297 } else { 298 /* either jiffies wrapped around, or no fence was signaled in the last 500ms 299 * anyway we will just wait for the minimum amount and then check for a lockup 300 */ 301 timeout = 1; 302 } 303 seq = atomic64_read(&rdev->fence_drv[ring].last_seq); 304 /* Save current last activity valuee, used to check for GPU lockups */ 305 last_activity = rdev->fence_drv[ring].last_activity; 306 307 radeon_irq_kms_sw_irq_get(rdev, ring); 308 if (intr) { 309 r = wait_event_interruptible_timeout(rdev->fence_queue, 310 (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)), 311 timeout); 312 } else { 313 r = wait_event_timeout(rdev->fence_queue, 314 (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)), 315 timeout); 316 } 317 radeon_irq_kms_sw_irq_put(rdev, ring); 318 if (unlikely(r < 0)) { 319 return r; 320 } 321 322 if (unlikely(!signaled)) { 323 /* we were interrupted for some reason and fence 324 * isn't signaled yet, resume waiting */ 325 if (r) { 326 continue; 327 } 328 329 /* check if sequence value has changed since last_activity */ 330 if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) { 331 continue; 332 } 333 334 if (lock_ring) { 335 lockmgr(&rdev->ring_lock, LK_EXCLUSIVE); 336 } 337 338 /* test if somebody else has already decided that this is a lockup */ 339 if (last_activity != rdev->fence_drv[ring].last_activity) { 340 if (lock_ring) { 341 lockmgr(&rdev->ring_lock, LK_RELEASE); 342 } 343 continue; 344 } 345 346 if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { 347 /* good news we believe it's a lockup */ 348 dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016jx last fence id 0x%016jx)\n", 349 target_seq, seq); 350 351 /* change last activity so nobody else think there is a lockup */ 352 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 353 rdev->fence_drv[i].last_activity = jiffies; 354 } 355 356 /* mark the ring as not ready any more */ 357 rdev->ring[ring].ready = false; 358 if (lock_ring) { 359 lockmgr(&rdev->ring_lock, LK_RELEASE); 360 } 361 return -EDEADLK; 362 } 363 364 if (lock_ring) { 365 lockmgr(&rdev->ring_lock, LK_RELEASE); 366 } 367 } 368 } 369 return 0; 370 } 371 372 /** 373 * radeon_fence_wait - wait for a fence to signal 374 * 375 * @fence: radeon fence object 376 * @intr: use interruptable sleep 377 * 378 * Wait for the requested fence to signal (all asics). 379 * @intr selects whether to use interruptable (true) or non-interruptable 380 * (false) sleep when waiting for the fence. 381 * Returns 0 if the fence has passed, error for all other cases. 382 */ 383 int radeon_fence_wait(struct radeon_fence *fence, bool intr) 384 { 385 int r; 386 387 if (fence == NULL) { 388 WARN(1, "Querying an invalid fence : %p !\n", fence); 389 return -EINVAL; 390 } 391 392 r = radeon_fence_wait_seq(fence->rdev, fence->seq, 393 fence->ring, intr, true); 394 if (r) { 395 return r; 396 } 397 fence->seq = RADEON_FENCE_SIGNALED_SEQ; 398 return 0; 399 } 400 401 static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) 402 { 403 unsigned i; 404 405 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 406 if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) { 407 return true; 408 } 409 } 410 return false; 411 } 412 413 /** 414 * radeon_fence_wait_any_seq - wait for a sequence number on any ring 415 * 416 * @rdev: radeon device pointer 417 * @target_seq: sequence number(s) we want to wait for 418 * @intr: use interruptable sleep 419 * 420 * Wait for the requested sequence number(s) to be written by any ring 421 * (all asics). Sequnce number array is indexed by ring id. 422 * @intr selects whether to use interruptable (true) or non-interruptable 423 * (false) sleep when waiting for the sequence number. Helper function 424 * for radeon_fence_wait_any(), et al. 425 * Returns 0 if the sequence number has passed, error for all other cases. 426 */ 427 static int radeon_fence_wait_any_seq(struct radeon_device *rdev, 428 u64 *target_seq, bool intr) 429 { 430 unsigned long timeout, last_activity, tmp; 431 unsigned i, ring = RADEON_NUM_RINGS; 432 bool signaled; 433 int r; 434 435 for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) { 436 if (!target_seq[i]) { 437 continue; 438 } 439 440 /* use the most recent one as indicator */ 441 if (time_after(rdev->fence_drv[i].last_activity, last_activity)) { 442 last_activity = rdev->fence_drv[i].last_activity; 443 } 444 445 /* For lockup detection just pick the lowest ring we are 446 * actively waiting for 447 */ 448 if (i < ring) { 449 ring = i; 450 } 451 } 452 453 /* nothing to wait for ? */ 454 if (ring == RADEON_NUM_RINGS) { 455 return -ENOENT; 456 } 457 458 while (!radeon_fence_any_seq_signaled(rdev, target_seq)) { 459 timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT; 460 if (time_after(last_activity, timeout)) { 461 /* the normal case, timeout is somewhere before last_activity */ 462 timeout = last_activity - timeout; 463 } else { 464 /* either jiffies wrapped around, or no fence was signaled in the last 500ms 465 * anyway we will just wait for the minimum amount and then check for a lockup 466 */ 467 timeout = 1; 468 } 469 470 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 471 if (target_seq[i]) { 472 radeon_irq_kms_sw_irq_get(rdev, i); 473 } 474 } 475 if (intr) { 476 r = wait_event_interruptible_timeout(rdev->fence_queue, 477 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)), 478 timeout); 479 } else { 480 r = wait_event_timeout(rdev->fence_queue, 481 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)), 482 timeout); 483 } 484 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 485 if (target_seq[i]) { 486 radeon_irq_kms_sw_irq_put(rdev, i); 487 } 488 } 489 if (unlikely(r < 0)) { 490 return r; 491 } 492 493 if (unlikely(!signaled)) { 494 /* we were interrupted for some reason and fence 495 * isn't signaled yet, resume waiting */ 496 if (r) { 497 continue; 498 } 499 500 lockmgr(&rdev->ring_lock, LK_EXCLUSIVE); 501 for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) { 502 if (time_after(rdev->fence_drv[i].last_activity, tmp)) { 503 tmp = rdev->fence_drv[i].last_activity; 504 } 505 } 506 /* test if somebody else has already decided that this is a lockup */ 507 if (last_activity != tmp) { 508 last_activity = tmp; 509 lockmgr(&rdev->ring_lock, LK_RELEASE); 510 continue; 511 } 512 513 if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { 514 /* good news we believe it's a lockup */ 515 dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016jx)\n", 516 target_seq[ring]); 517 518 /* change last activity so nobody else think there is a lockup */ 519 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 520 rdev->fence_drv[i].last_activity = jiffies; 521 } 522 523 /* mark the ring as not ready any more */ 524 rdev->ring[ring].ready = false; 525 lockmgr(&rdev->ring_lock, LK_RELEASE); 526 return -EDEADLK; 527 } 528 lockmgr(&rdev->ring_lock, LK_RELEASE); 529 } 530 } 531 return 0; 532 } 533 534 /** 535 * radeon_fence_wait_any - wait for a fence to signal on any ring 536 * 537 * @rdev: radeon device pointer 538 * @fences: radeon fence object(s) 539 * @intr: use interruptable sleep 540 * 541 * Wait for any requested fence to signal (all asics). Fence 542 * array is indexed by ring id. @intr selects whether to use 543 * interruptable (true) or non-interruptable (false) sleep when 544 * waiting for the fences. Used by the suballocator. 545 * Returns 0 if any fence has passed, error for all other cases. 546 */ 547 int radeon_fence_wait_any(struct radeon_device *rdev, 548 struct radeon_fence **fences, 549 bool intr) 550 { 551 uint64_t seq[RADEON_NUM_RINGS]; 552 unsigned i; 553 int r; 554 555 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 556 seq[i] = 0; 557 558 if (!fences[i]) { 559 continue; 560 } 561 562 if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) { 563 /* something was allready signaled */ 564 return 0; 565 } 566 567 seq[i] = fences[i]->seq; 568 } 569 570 r = radeon_fence_wait_any_seq(rdev, seq, intr); 571 if (r) { 572 return r; 573 } 574 return 0; 575 } 576 577 /** 578 * radeon_fence_wait_next_locked - wait for the next fence to signal 579 * 580 * @rdev: radeon device pointer 581 * @ring: ring index the fence is associated with 582 * 583 * Wait for the next fence on the requested ring to signal (all asics). 584 * Returns 0 if the next fence has passed, error for all other cases. 585 * Caller must hold ring lock. 586 */ 587 int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring) 588 { 589 uint64_t seq; 590 591 seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL; 592 if (seq >= rdev->fence_drv[ring].sync_seq[ring]) { 593 /* nothing to wait for, last_seq is 594 already the last emited fence */ 595 return -ENOENT; 596 } 597 return radeon_fence_wait_seq(rdev, seq, ring, false, false); 598 } 599 600 /** 601 * radeon_fence_wait_empty_locked - wait for all fences to signal 602 * 603 * @rdev: radeon device pointer 604 * @ring: ring index the fence is associated with 605 * 606 * Wait for all fences on the requested ring to signal (all asics). 607 * Returns 0 if the fences have passed, error for all other cases. 608 * Caller must hold ring lock. 609 */ 610 int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) 611 { 612 uint64_t seq = rdev->fence_drv[ring].sync_seq[ring]; 613 int r; 614 615 r = radeon_fence_wait_seq(rdev, seq, ring, false, false); 616 if (r) { 617 if (r == -EDEADLK) { 618 return -EDEADLK; 619 } 620 dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n", 621 ring, r); 622 } 623 return 0; 624 } 625 626 /** 627 * radeon_fence_ref - take a ref on a fence 628 * 629 * @fence: radeon fence object 630 * 631 * Take a reference on a fence (all asics). 632 * Returns the fence. 633 */ 634 struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) 635 { 636 refcount_acquire(&fence->kref); 637 return fence; 638 } 639 640 /** 641 * radeon_fence_unref - remove a ref on a fence 642 * 643 * @fence: radeon fence object 644 * 645 * Remove a reference on a fence (all asics). 646 */ 647 void radeon_fence_unref(struct radeon_fence **fence) 648 { 649 struct radeon_fence *tmp = *fence; 650 651 *fence = NULL; 652 if (tmp) { 653 if (refcount_release(&tmp->kref)) { 654 radeon_fence_destroy(tmp); 655 } 656 } 657 } 658 659 /** 660 * radeon_fence_count_emitted - get the count of emitted fences 661 * 662 * @rdev: radeon device pointer 663 * @ring: ring index the fence is associated with 664 * 665 * Get the number of fences emitted on the requested ring (all asics). 666 * Returns the number of emitted fences on the ring. Used by the 667 * dynpm code to ring track activity. 668 */ 669 unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring) 670 { 671 uint64_t emitted; 672 673 /* We are not protected by ring lock when reading the last sequence 674 * but it's ok to report slightly wrong fence count here. 675 */ 676 radeon_fence_process(rdev, ring); 677 emitted = rdev->fence_drv[ring].sync_seq[ring] 678 - atomic64_read(&rdev->fence_drv[ring].last_seq); 679 /* to avoid 32bits warp around */ 680 if (emitted > 0x10000000) { 681 emitted = 0x10000000; 682 } 683 return (unsigned)emitted; 684 } 685 686 /** 687 * radeon_fence_need_sync - do we need a semaphore 688 * 689 * @fence: radeon fence object 690 * @dst_ring: which ring to check against 691 * 692 * Check if the fence needs to be synced against another ring 693 * (all asics). If so, we need to emit a semaphore. 694 * Returns true if we need to sync with another ring, false if 695 * not. 696 */ 697 bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring) 698 { 699 struct radeon_fence_driver *fdrv; 700 701 if (!fence) { 702 return false; 703 } 704 705 if (fence->ring == dst_ring) { 706 return false; 707 } 708 709 /* we are protected by the ring mutex */ 710 fdrv = &fence->rdev->fence_drv[dst_ring]; 711 if (fence->seq <= fdrv->sync_seq[fence->ring]) { 712 return false; 713 } 714 715 return true; 716 } 717 718 /** 719 * radeon_fence_note_sync - record the sync point 720 * 721 * @fence: radeon fence object 722 * @dst_ring: which ring to check against 723 * 724 * Note the sequence number at which point the fence will 725 * be synced with the requested ring (all asics). 726 */ 727 void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring) 728 { 729 struct radeon_fence_driver *dst, *src; 730 unsigned i; 731 732 if (!fence) { 733 return; 734 } 735 736 if (fence->ring == dst_ring) { 737 return; 738 } 739 740 /* we are protected by the ring mutex */ 741 src = &fence->rdev->fence_drv[fence->ring]; 742 dst = &fence->rdev->fence_drv[dst_ring]; 743 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 744 if (i == dst_ring) { 745 continue; 746 } 747 dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]); 748 } 749 } 750 751 /** 752 * radeon_fence_driver_start_ring - make the fence driver 753 * ready for use on the requested ring. 754 * 755 * @rdev: radeon device pointer 756 * @ring: ring index to start the fence driver on 757 * 758 * Make the fence driver ready for processing (all asics). 759 * Not all asics have all rings, so each asic will only 760 * start the fence driver on the rings it has. 761 * Returns 0 for success, errors for failure. 762 */ 763 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) 764 { 765 uint64_t index; 766 int r; 767 768 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); 769 if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) { 770 rdev->fence_drv[ring].scratch_reg = 0; 771 if (ring != R600_RING_TYPE_UVD_INDEX) { 772 index = R600_WB_EVENT_OFFSET + ring * 4; 773 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; 774 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + 775 index; 776 777 } else { 778 /* put fence directly behind firmware */ 779 index = ALIGN(rdev->uvd_fw->datasize, 8); 780 rdev->fence_drv[ring].cpu_addr = (void*)((uint8_t*)rdev->uvd.cpu_addr + index); 781 rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index; 782 } 783 784 } else { 785 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg); 786 if (r) { 787 dev_err(rdev->dev, "fence failed to get scratch register\n"); 788 return r; 789 } 790 index = RADEON_WB_SCRATCH_OFFSET + 791 rdev->fence_drv[ring].scratch_reg - 792 rdev->scratch.reg_base; 793 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; 794 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index; 795 } 796 radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring); 797 rdev->fence_drv[ring].initialized = true; 798 dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016jx and cpu addr 0x%p\n", 799 ring, (uintmax_t)rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr); 800 return 0; 801 } 802 803 /** 804 * radeon_fence_driver_init_ring - init the fence driver 805 * for the requested ring. 806 * 807 * @rdev: radeon device pointer 808 * @ring: ring index to start the fence driver on 809 * 810 * Init the fence driver for the requested ring (all asics). 811 * Helper function for radeon_fence_driver_init(). 812 */ 813 static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring) 814 { 815 int i; 816 817 rdev->fence_drv[ring].scratch_reg = -1; 818 rdev->fence_drv[ring].cpu_addr = NULL; 819 rdev->fence_drv[ring].gpu_addr = 0; 820 for (i = 0; i < RADEON_NUM_RINGS; ++i) 821 rdev->fence_drv[ring].sync_seq[i] = 0; 822 atomic64_set(&rdev->fence_drv[ring].last_seq, 0); 823 rdev->fence_drv[ring].last_activity = jiffies; 824 rdev->fence_drv[ring].initialized = false; 825 } 826 827 /** 828 * radeon_fence_driver_init - init the fence driver 829 * for all possible rings. 830 * 831 * @rdev: radeon device pointer 832 * 833 * Init the fence driver for all possible rings (all asics). 834 * Not all asics have all rings, so each asic will only 835 * start the fence driver on the rings it has using 836 * radeon_fence_driver_start_ring(). 837 * Returns 0 for success. 838 */ 839 int radeon_fence_driver_init(struct radeon_device *rdev) 840 { 841 int ring; 842 843 init_waitqueue_head(&rdev->fence_queue); 844 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { 845 radeon_fence_driver_init_ring(rdev, ring); 846 } 847 if (radeon_debugfs_fence_init(rdev)) { 848 dev_err(rdev->dev, "fence debugfs file creation failed\n"); 849 } 850 return 0; 851 } 852 853 /** 854 * radeon_fence_driver_fini - tear down the fence driver 855 * for all possible rings. 856 * 857 * @rdev: radeon device pointer 858 * 859 * Tear down the fence driver for all possible rings (all asics). 860 */ 861 void radeon_fence_driver_fini(struct radeon_device *rdev) 862 { 863 int ring, r; 864 865 lockmgr(&rdev->ring_lock, LK_EXCLUSIVE); 866 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { 867 if (!rdev->fence_drv[ring].initialized) 868 continue; 869 r = radeon_fence_wait_empty_locked(rdev, ring); 870 if (r) { 871 /* no need to trigger GPU reset as we are unloading */ 872 radeon_fence_driver_force_completion(rdev); 873 } 874 wake_up_all(&rdev->fence_queue); 875 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); 876 rdev->fence_drv[ring].initialized = false; 877 } 878 lockmgr(&rdev->ring_lock, LK_RELEASE); 879 } 880 881 /** 882 * radeon_fence_driver_force_completion - force all fence waiter to complete 883 * 884 * @rdev: radeon device pointer 885 * 886 * In case of GPU reset failure make sure no process keep waiting on fence 887 * that will never complete. 888 */ 889 void radeon_fence_driver_force_completion(struct radeon_device *rdev) 890 { 891 int ring; 892 893 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { 894 if (!rdev->fence_drv[ring].initialized) 895 continue; 896 radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring); 897 } 898 } 899 900 901 /* 902 * Fence debugfs 903 */ 904 #if defined(CONFIG_DEBUG_FS) 905 static int radeon_debugfs_fence_info(struct seq_file *m, void *data) 906 { 907 struct drm_info_node *node = (struct drm_info_node *)m->private; 908 struct drm_device *dev = node->minor->dev; 909 struct radeon_device *rdev = dev->dev_private; 910 int i, j; 911 912 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 913 if (!rdev->fence_drv[i].initialized) 914 continue; 915 916 seq_printf(m, "--- ring %d ---\n", i); 917 seq_printf(m, "Last signaled fence 0x%016llx\n", 918 (unsigned long long)atomic_load_acq_64(&rdev->fence_drv[i].last_seq)); 919 seq_printf(m, "Last emitted 0x%016llx\n", 920 rdev->fence_drv[i].sync_seq[i]); 921 922 for (j = 0; j < RADEON_NUM_RINGS; ++j) { 923 if (i != j && rdev->fence_drv[j].initialized) 924 seq_printf(m, "Last sync to ring %d 0x%016llx\n", 925 j, rdev->fence_drv[i].sync_seq[j]); 926 } 927 } 928 return 0; 929 } 930 931 static struct drm_info_list radeon_debugfs_fence_list[] = { 932 {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL}, 933 }; 934 #endif 935 936 int radeon_debugfs_fence_init(struct radeon_device *rdev) 937 { 938 #if defined(CONFIG_DEBUG_FS) 939 return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1); 940 #else 941 return 0; 942 #endif 943 } 944