1 /* 2 * Copyright 2009 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 /* 27 * Authors: 28 * Jerome Glisse <glisse@freedesktop.org> 29 * Dave Airlie 30 */ 31 #include <linux/seq_file.h> 32 #include <linux/atomic.h> 33 #include <linux/wait.h> 34 #include <linux/kref.h> 35 #include <linux/firmware.h> 36 #include <drm/drmP.h> 37 #include "radeon_reg.h" 38 #include "radeon.h" 39 #ifdef TRACE_TODO 40 #include "radeon_trace.h" 41 #endif 42 43 /* 44 * Fences 45 * Fences mark an event in the GPUs pipeline and are used 46 * for GPU/CPU synchronization. When the fence is written, 47 * it is expected that all buffers associated with that fence 48 * are no longer in use by the associated ring on the GPU and 49 * that the the relevant GPU caches have been flushed. Whether 50 * we use a scratch register or memory location depends on the asic 51 * and whether writeback is enabled. 52 */ 53 54 /** 55 * radeon_fence_write - write a fence value 56 * 57 * @rdev: radeon_device pointer 58 * @seq: sequence number to write 59 * @ring: ring index the fence is associated with 60 * 61 * Writes a fence value to memory or a scratch register (all asics). 62 */ 63 static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring) 64 { 65 struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; 66 if (likely(rdev->wb.enabled || !drv->scratch_reg)) { 67 if (drv->cpu_addr) { 68 *drv->cpu_addr = cpu_to_le32(seq); 69 } 70 } else { 71 WREG32(drv->scratch_reg, seq); 72 } 73 } 74 75 /** 76 * radeon_fence_read - read a fence value 77 * 78 * @rdev: radeon_device pointer 79 * @ring: ring index the fence is associated with 80 * 81 * Reads a fence value from memory or a scratch register (all asics). 82 * Returns the value of the fence read from memory or register. 83 */ 84 static u32 radeon_fence_read(struct radeon_device *rdev, int ring) 85 { 86 struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; 87 u32 seq = 0; 88 89 if (likely(rdev->wb.enabled || !drv->scratch_reg)) { 90 if (drv->cpu_addr) { 91 seq = le32_to_cpu(*drv->cpu_addr); 92 } else { 93 seq = lower_32_bits(atomic64_read(&drv->last_seq)); 94 } 95 } else { 96 seq = RREG32(drv->scratch_reg); 97 } 98 return seq; 99 } 100 101 /** 102 * radeon_fence_schedule_check - schedule lockup check 103 * 104 * @rdev: radeon_device pointer 105 * @ring: ring index we should work with 106 * 107 * Queues a delayed work item to check for lockups. 108 */ 109 static void radeon_fence_schedule_check(struct radeon_device *rdev, int ring) 110 { 111 /* 112 * Do not reset the timer here with mod_delayed_work, 113 * this can livelock in an interaction with TTM delayed destroy. 114 */ 115 queue_delayed_work(system_power_efficient_wq, 116 &rdev->fence_drv[ring].lockup_work, 117 RADEON_FENCE_JIFFIES_TIMEOUT); 118 } 119 120 /** 121 * radeon_fence_emit - emit a fence on the requested ring 122 * 123 * @rdev: radeon_device pointer 124 * @fence: radeon fence object 125 * @ring: ring index the fence is associated with 126 * 127 * Emits a fence command on the requested ring (all asics). 128 * Returns 0 on success, -ENOMEM on failure. 129 */ 130 int radeon_fence_emit(struct radeon_device *rdev, 131 struct radeon_fence **fence, 132 int ring) 133 { 134 u64 seq = ++rdev->fence_drv[ring].sync_seq[ring]; 135 136 /* we are protected by the ring emission mutex */ 137 *fence = kmalloc(sizeof(struct radeon_fence), M_DRM, M_WAITOK); 138 if ((*fence) == NULL) { 139 return -ENOMEM; 140 } 141 (*fence)->rdev = rdev; 142 (*fence)->seq = seq; 143 (*fence)->ring = ring; 144 fence_init(&(*fence)->base, &radeon_fence_ops, 145 &rdev->fence_queue.lock, rdev->fence_context + ring, seq); 146 radeon_fence_ring_emit(rdev, ring, *fence); 147 #ifdef TRACE_TODO 148 trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq); 149 #endif 150 radeon_fence_schedule_check(rdev, ring); 151 return 0; 152 } 153 154 /** 155 * radeon_fence_check_signaled - callback from fence_queue 156 * 157 * this function is called with fence_queue lock held, which is also used 158 * for the fence locking itself, so unlocked variants are used for 159 * fence_signal, and remove_wait_queue. 160 */ 161 static int radeon_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key) 162 { 163 struct radeon_fence *fence; 164 u64 seq; 165 166 fence = container_of(wait, struct radeon_fence, fence_wake); 167 168 /* 169 * We cannot use radeon_fence_process here because we're already 170 * in the waitqueue, in a call from wake_up_all. 171 */ 172 seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq); 173 if (seq >= fence->seq) { 174 int ret = fence_signal_locked(&fence->base); 175 176 if (!ret) 177 FENCE_TRACE(&fence->base, "signaled from irq context\n"); 178 else 179 FENCE_TRACE(&fence->base, "was already signaled\n"); 180 181 radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring); 182 __remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake); 183 fence_put(&fence->base); 184 } else 185 FENCE_TRACE(&fence->base, "pending\n"); 186 return 0; 187 } 188 189 /** 190 * radeon_fence_activity - check for fence activity 191 * 192 * @rdev: radeon_device pointer 193 * @ring: ring index the fence is associated with 194 * 195 * Checks the current fence value and calculates the last 196 * signalled fence value. Returns true if activity occured 197 * on the ring, and the fence_queue should be waken up. 198 */ 199 static bool radeon_fence_activity(struct radeon_device *rdev, int ring) 200 { 201 uint64_t seq, last_seq, last_emitted; 202 unsigned count_loop = 0; 203 bool wake = false; 204 205 /* Note there is a scenario here for an infinite loop but it's 206 * very unlikely to happen. For it to happen, the current polling 207 * process need to be interrupted by another process and another 208 * process needs to update the last_seq btw the atomic read and 209 * xchg of the current process. 210 * 211 * More over for this to go in infinite loop there need to be 212 * continuously new fence signaled ie radeon_fence_read needs 213 * to return a different value each time for both the currently 214 * polling process and the other process that xchg the last_seq 215 * btw atomic read and xchg of the current process. And the 216 * value the other process set as last seq must be higher than 217 * the seq value we just read. Which means that current process 218 * need to be interrupted after radeon_fence_read and before 219 * atomic xchg. 220 * 221 * To be even more safe we count the number of time we loop and 222 * we bail after 10 loop just accepting the fact that we might 223 * have temporarly set the last_seq not to the true real last 224 * seq but to an older one. 225 */ 226 last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq); 227 do { 228 last_emitted = rdev->fence_drv[ring].sync_seq[ring]; 229 seq = radeon_fence_read(rdev, ring); 230 seq |= last_seq & 0xffffffff00000000LL; 231 if (seq < last_seq) { 232 seq &= 0xffffffff; 233 seq |= last_emitted & 0xffffffff00000000LL; 234 } 235 236 if (seq <= last_seq || seq > last_emitted) { 237 break; 238 } 239 /* If we loop over we don't want to return without 240 * checking if a fence is signaled as it means that the 241 * seq we just read is different from the previous on. 242 */ 243 wake = true; 244 last_seq = seq; 245 if ((count_loop++) > 10) { 246 /* We looped over too many time leave with the 247 * fact that we might have set an older fence 248 * seq then the current real last seq as signaled 249 * by the hw. 250 */ 251 break; 252 } 253 } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq); 254 255 if (seq < last_emitted) 256 radeon_fence_schedule_check(rdev, ring); 257 258 return wake; 259 } 260 261 /** 262 * radeon_fence_check_lockup - check for hardware lockup 263 * 264 * @work: delayed work item 265 * 266 * Checks for fence activity and if there is none probe 267 * the hardware if a lockup occured. 268 */ 269 static void radeon_fence_check_lockup(struct work_struct *work) 270 { 271 struct radeon_fence_driver *fence_drv; 272 struct radeon_device *rdev; 273 int ring; 274 275 fence_drv = container_of(work, struct radeon_fence_driver, 276 lockup_work.work); 277 rdev = fence_drv->rdev; 278 ring = fence_drv - &rdev->fence_drv[0]; 279 280 if (!down_read_trylock(&rdev->exclusive_lock)) { 281 /* just reschedule the check if a reset is going on */ 282 radeon_fence_schedule_check(rdev, ring); 283 return; 284 } 285 286 if (fence_drv->delayed_irq && rdev->ddev->irq_enabled) { 287 unsigned long irqflags; 288 289 fence_drv->delayed_irq = false; 290 spin_lock_irqsave(&rdev->irq.lock, irqflags); 291 radeon_irq_set(rdev); 292 spin_unlock_irqrestore(&rdev->irq.lock, irqflags); 293 } 294 295 if (radeon_fence_activity(rdev, ring)) 296 wake_up_all(&rdev->fence_queue); 297 298 else if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { 299 300 /* good news we believe it's a lockup */ 301 dev_warn(rdev->dev, "GPU lockup (current fence id " 302 "0x%016lx last fence id 0x%016lx on ring %d)\n", 303 (uint64_t)atomic64_read(&fence_drv->last_seq), 304 fence_drv->sync_seq[ring], ring); 305 306 /* remember that we need an reset */ 307 rdev->needs_reset = true; 308 wake_up_all(&rdev->fence_queue); 309 } 310 up_read(&rdev->exclusive_lock); 311 } 312 313 /** 314 * radeon_fence_process - process a fence 315 * 316 * @rdev: radeon_device pointer 317 * @ring: ring index the fence is associated with 318 * 319 * Checks the current fence value and wakes the fence queue 320 * if the sequence number has increased (all asics). 321 */ 322 void radeon_fence_process(struct radeon_device *rdev, int ring) 323 { 324 if (radeon_fence_activity(rdev, ring)) 325 wake_up_all(&rdev->fence_queue); 326 } 327 328 /** 329 * radeon_fence_seq_signaled - check if a fence sequence number has signaled 330 * 331 * @rdev: radeon device pointer 332 * @seq: sequence number 333 * @ring: ring index the fence is associated with 334 * 335 * Check if the last signaled fence sequnce number is >= the requested 336 * sequence number (all asics). 337 * Returns true if the fence has signaled (current fence value 338 * is >= requested value) or false if it has not (current fence 339 * value is < the requested value. Helper function for 340 * radeon_fence_signaled(). 341 */ 342 static bool radeon_fence_seq_signaled(struct radeon_device *rdev, 343 u64 seq, unsigned ring) 344 { 345 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { 346 return true; 347 } 348 /* poll new last sequence at least once */ 349 radeon_fence_process(rdev, ring); 350 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { 351 return true; 352 } 353 return false; 354 } 355 356 static bool radeon_fence_is_signaled(struct fence *f) 357 { 358 struct radeon_fence *fence = to_radeon_fence(f); 359 struct radeon_device *rdev = fence->rdev; 360 unsigned ring = fence->ring; 361 u64 seq = fence->seq; 362 363 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { 364 return true; 365 } 366 367 if (down_read_trylock(&rdev->exclusive_lock)) { 368 radeon_fence_process(rdev, ring); 369 up_read(&rdev->exclusive_lock); 370 371 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { 372 return true; 373 } 374 } 375 return false; 376 } 377 378 /** 379 * radeon_fence_enable_signaling - enable signalling on fence 380 * @fence: fence 381 * 382 * This function is called with fence_queue lock held, and adds a callback 383 * to fence_queue that checks if this fence is signaled, and if so it 384 * signals the fence and removes itself. 385 */ 386 static bool radeon_fence_enable_signaling(struct fence *f) 387 { 388 struct radeon_fence *fence = to_radeon_fence(f); 389 struct radeon_device *rdev = fence->rdev; 390 391 if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq) 392 return false; 393 394 if (down_read_trylock(&rdev->exclusive_lock)) { 395 radeon_irq_kms_sw_irq_get(rdev, fence->ring); 396 397 if (radeon_fence_activity(rdev, fence->ring)) 398 wake_up_all_locked(&rdev->fence_queue); 399 400 /* did fence get signaled after we enabled the sw irq? */ 401 if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq) { 402 radeon_irq_kms_sw_irq_put(rdev, fence->ring); 403 up_read(&rdev->exclusive_lock); 404 return false; 405 } 406 407 up_read(&rdev->exclusive_lock); 408 } else { 409 /* we're probably in a lockup, lets not fiddle too much */ 410 if (radeon_irq_kms_sw_irq_get_delayed(rdev, fence->ring)) 411 rdev->fence_drv[fence->ring].delayed_irq = true; 412 radeon_fence_schedule_check(rdev, fence->ring); 413 } 414 415 fence->fence_wake.flags = 0; 416 fence->fence_wake.private = NULL; 417 fence->fence_wake.func = radeon_fence_check_signaled; 418 __add_wait_queue(&rdev->fence_queue, &fence->fence_wake); 419 fence_get(f); 420 421 FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring); 422 return true; 423 } 424 425 /** 426 * radeon_fence_signaled - check if a fence has signaled 427 * 428 * @fence: radeon fence object 429 * 430 * Check if the requested fence has signaled (all asics). 431 * Returns true if the fence has signaled or false if it has not. 432 */ 433 bool radeon_fence_signaled(struct radeon_fence *fence) 434 { 435 if (!fence) 436 return true; 437 438 if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) { 439 int ret; 440 441 ret = fence_signal(&fence->base); 442 if (!ret) 443 FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n"); 444 return true; 445 } 446 return false; 447 } 448 449 /** 450 * radeon_fence_any_seq_signaled - check if any sequence number is signaled 451 * 452 * @rdev: radeon device pointer 453 * @seq: sequence numbers 454 * 455 * Check if the last signaled fence sequnce number is >= the requested 456 * sequence number (all asics). 457 * Returns true if any has signaled (current value is >= requested value) 458 * or false if it has not. Helper function for radeon_fence_wait_seq. 459 */ 460 static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) 461 { 462 unsigned i; 463 464 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 465 if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) 466 return true; 467 } 468 return false; 469 } 470 471 /** 472 * radeon_fence_wait_seq_timeout - wait for a specific sequence numbers 473 * 474 * @rdev: radeon device pointer 475 * @target_seq: sequence number(s) we want to wait for 476 * @intr: use interruptable sleep 477 * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait 478 * 479 * Wait for the requested sequence number(s) to be written by any ring 480 * (all asics). Sequnce number array is indexed by ring id. 481 * @intr selects whether to use interruptable (true) or non-interruptable 482 * (false) sleep when waiting for the sequence number. Helper function 483 * for radeon_fence_wait_*(). 484 * Returns remaining time if the sequence number has passed, 0 when 485 * the wait timeout, or an error for all other cases. 486 * -EDEADLK is returned when a GPU lockup has been detected. 487 */ 488 static long radeon_fence_wait_seq_timeout(struct radeon_device *rdev, 489 u64 *target_seq, bool intr, 490 long timeout) 491 { 492 long r; 493 int i; 494 495 if (radeon_fence_any_seq_signaled(rdev, target_seq)) 496 return timeout; 497 498 /* enable IRQs and tracing */ 499 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 500 if (!target_seq[i]) 501 continue; 502 503 #ifdef TRACE_TODO 504 trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]); 505 #endif 506 radeon_irq_kms_sw_irq_get(rdev, i); 507 } 508 509 if (intr) { 510 r = wait_event_interruptible_timeout(rdev->fence_queue, ( 511 radeon_fence_any_seq_signaled(rdev, target_seq) 512 || rdev->needs_reset), timeout); 513 } else { 514 r = wait_event_timeout(rdev->fence_queue, ( 515 radeon_fence_any_seq_signaled(rdev, target_seq) 516 || rdev->needs_reset), timeout); 517 } 518 519 if (rdev->needs_reset) 520 r = -EDEADLK; 521 522 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 523 if (!target_seq[i]) 524 continue; 525 526 radeon_irq_kms_sw_irq_put(rdev, i); 527 #ifdef TRACE_TODO 528 trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]); 529 #endif 530 } 531 532 return r; 533 } 534 535 /** 536 * radeon_fence_wait - wait for a fence to signal 537 * 538 * @fence: radeon fence object 539 * @intr: use interruptible sleep 540 * 541 * Wait for the requested fence to signal (all asics). 542 * @intr selects whether to use interruptable (true) or non-interruptable 543 * (false) sleep when waiting for the fence. 544 * Returns 0 if the fence has passed, error for all other cases. 545 */ 546 int radeon_fence_wait(struct radeon_fence *fence, bool intr) 547 { 548 u64 seq[RADEON_NUM_RINGS] = {}; 549 long r; 550 551 seq[fence->ring] = fence->seq; 552 r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, MAX_SCHEDULE_TIMEOUT); 553 if (r < 0) { 554 return r; 555 } 556 557 r = fence_signal(&fence->base); 558 if (!r) 559 FENCE_TRACE(&fence->base, "signaled from fence_wait\n"); 560 return 0; 561 } 562 563 /** 564 * radeon_fence_wait_any - wait for a fence to signal on any ring 565 * 566 * @rdev: radeon device pointer 567 * @fences: radeon fence object(s) 568 * @intr: use interruptable sleep 569 * 570 * Wait for any requested fence to signal (all asics). Fence 571 * array is indexed by ring id. @intr selects whether to use 572 * interruptable (true) or non-interruptable (false) sleep when 573 * waiting for the fences. Used by the suballocator. 574 * Returns 0 if any fence has passed, error for all other cases. 575 */ 576 int radeon_fence_wait_any(struct radeon_device *rdev, 577 struct radeon_fence **fences, 578 bool intr) 579 { 580 u64 seq[RADEON_NUM_RINGS]; 581 unsigned i, num_rings = 0; 582 long r; 583 584 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 585 seq[i] = 0; 586 587 if (!fences[i]) { 588 continue; 589 } 590 591 seq[i] = fences[i]->seq; 592 ++num_rings; 593 } 594 595 /* nothing to wait for ? */ 596 if (num_rings == 0) 597 return -ENOENT; 598 599 r = radeon_fence_wait_seq_timeout(rdev, seq, intr, MAX_SCHEDULE_TIMEOUT); 600 if (r < 0) { 601 return r; 602 } 603 return 0; 604 } 605 606 /** 607 * radeon_fence_wait_next - wait for the next fence to signal 608 * 609 * @rdev: radeon device pointer 610 * @ring: ring index the fence is associated with 611 * 612 * Wait for the next fence on the requested ring to signal (all asics). 613 * Returns 0 if the next fence has passed, error for all other cases. 614 * Caller must hold ring lock. 615 */ 616 int radeon_fence_wait_next(struct radeon_device *rdev, int ring) 617 { 618 u64 seq[RADEON_NUM_RINGS] = {}; 619 long r; 620 621 seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL; 622 if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) { 623 /* nothing to wait for, last_seq is 624 already the last emited fence */ 625 return -ENOENT; 626 } 627 r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT); 628 if (r < 0) 629 return r; 630 return 0; 631 } 632 633 /** 634 * radeon_fence_wait_empty - wait for all fences to signal 635 * 636 * @rdev: radeon device pointer 637 * @ring: ring index the fence is associated with 638 * 639 * Wait for all fences on the requested ring to signal (all asics). 640 * Returns 0 if the fences have passed, error for all other cases. 641 * Caller must hold ring lock. 642 */ 643 int radeon_fence_wait_empty(struct radeon_device *rdev, int ring) 644 { 645 u64 seq[RADEON_NUM_RINGS] = {}; 646 long r; 647 648 seq[ring] = rdev->fence_drv[ring].sync_seq[ring]; 649 if (!seq[ring]) 650 return 0; 651 652 r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT); 653 if (r < 0) { 654 if (r == -EDEADLK) 655 return -EDEADLK; 656 657 dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%ld)\n", 658 ring, r); 659 } 660 return 0; 661 } 662 663 /** 664 * radeon_fence_ref - take a ref on a fence 665 * 666 * @fence: radeon fence object 667 * 668 * Take a reference on a fence (all asics). 669 * Returns the fence. 670 */ 671 struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) 672 { 673 fence_get(&fence->base); 674 return fence; 675 } 676 677 /** 678 * radeon_fence_unref - remove a ref on a fence 679 * 680 * @fence: radeon fence object 681 * 682 * Remove a reference on a fence (all asics). 683 */ 684 void radeon_fence_unref(struct radeon_fence **fence) 685 { 686 struct radeon_fence *tmp = *fence; 687 688 *fence = NULL; 689 if (tmp) { 690 fence_put(&tmp->base); 691 } 692 } 693 694 /** 695 * radeon_fence_count_emitted - get the count of emitted fences 696 * 697 * @rdev: radeon device pointer 698 * @ring: ring index the fence is associated with 699 * 700 * Get the number of fences emitted on the requested ring (all asics). 701 * Returns the number of emitted fences on the ring. Used by the 702 * dynpm code to ring track activity. 703 */ 704 unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring) 705 { 706 uint64_t emitted; 707 708 /* We are not protected by ring lock when reading the last sequence 709 * but it's ok to report slightly wrong fence count here. 710 */ 711 radeon_fence_process(rdev, ring); 712 emitted = rdev->fence_drv[ring].sync_seq[ring] 713 - atomic64_read(&rdev->fence_drv[ring].last_seq); 714 /* to avoid 32bits warp around */ 715 if (emitted > 0x10000000) { 716 emitted = 0x10000000; 717 } 718 return (unsigned)emitted; 719 } 720 721 /** 722 * radeon_fence_need_sync - do we need a semaphore 723 * 724 * @fence: radeon fence object 725 * @dst_ring: which ring to check against 726 * 727 * Check if the fence needs to be synced against another ring 728 * (all asics). If so, we need to emit a semaphore. 729 * Returns true if we need to sync with another ring, false if 730 * not. 731 */ 732 bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring) 733 { 734 struct radeon_fence_driver *fdrv; 735 736 if (!fence) { 737 return false; 738 } 739 740 if (fence->ring == dst_ring) { 741 return false; 742 } 743 744 /* we are protected by the ring mutex */ 745 fdrv = &fence->rdev->fence_drv[dst_ring]; 746 if (fence->seq <= fdrv->sync_seq[fence->ring]) { 747 return false; 748 } 749 750 return true; 751 } 752 753 /** 754 * radeon_fence_note_sync - record the sync point 755 * 756 * @fence: radeon fence object 757 * @dst_ring: which ring to check against 758 * 759 * Note the sequence number at which point the fence will 760 * be synced with the requested ring (all asics). 761 */ 762 void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring) 763 { 764 struct radeon_fence_driver *dst, *src; 765 unsigned i; 766 767 if (!fence) { 768 return; 769 } 770 771 if (fence->ring == dst_ring) { 772 return; 773 } 774 775 /* we are protected by the ring mutex */ 776 src = &fence->rdev->fence_drv[fence->ring]; 777 dst = &fence->rdev->fence_drv[dst_ring]; 778 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 779 if (i == dst_ring) { 780 continue; 781 } 782 dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]); 783 } 784 } 785 786 /** 787 * radeon_fence_driver_start_ring - make the fence driver 788 * ready for use on the requested ring. 789 * 790 * @rdev: radeon device pointer 791 * @ring: ring index to start the fence driver on 792 * 793 * Make the fence driver ready for processing (all asics). 794 * Not all asics have all rings, so each asic will only 795 * start the fence driver on the rings it has. 796 * Returns 0 for success, errors for failure. 797 */ 798 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) 799 { 800 uint64_t index; 801 int r; 802 803 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); 804 if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) { 805 rdev->fence_drv[ring].scratch_reg = 0; 806 if (ring != R600_RING_TYPE_UVD_INDEX) { 807 index = R600_WB_EVENT_OFFSET + ring * 4; 808 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; 809 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + 810 index; 811 812 } else { 813 /* put fence directly behind firmware */ 814 index = ALIGN(rdev->uvd_fw->datasize, 8); 815 rdev->fence_drv[ring].cpu_addr = (void*)((uint8_t*)rdev->uvd.cpu_addr + index); 816 rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index; 817 } 818 819 } else { 820 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg); 821 if (r) { 822 dev_err(rdev->dev, "fence failed to get scratch register\n"); 823 return r; 824 } 825 index = RADEON_WB_SCRATCH_OFFSET + 826 rdev->fence_drv[ring].scratch_reg - 827 rdev->scratch.reg_base; 828 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; 829 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index; 830 } 831 radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring); 832 rdev->fence_drv[ring].initialized = true; 833 dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016lx and cpu addr 0x%p\n", 834 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr); 835 return 0; 836 } 837 838 /** 839 * radeon_fence_driver_init_ring - init the fence driver 840 * for the requested ring. 841 * 842 * @rdev: radeon device pointer 843 * @ring: ring index to start the fence driver on 844 * 845 * Init the fence driver for the requested ring (all asics). 846 * Helper function for radeon_fence_driver_init(). 847 */ 848 static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring) 849 { 850 int i; 851 852 rdev->fence_drv[ring].scratch_reg = -1; 853 rdev->fence_drv[ring].cpu_addr = NULL; 854 rdev->fence_drv[ring].gpu_addr = 0; 855 for (i = 0; i < RADEON_NUM_RINGS; ++i) 856 rdev->fence_drv[ring].sync_seq[i] = 0; 857 atomic64_set(&rdev->fence_drv[ring].last_seq, 0); 858 rdev->fence_drv[ring].initialized = false; 859 INIT_DELAYED_WORK(&rdev->fence_drv[ring].lockup_work, 860 radeon_fence_check_lockup); 861 rdev->fence_drv[ring].rdev = rdev; 862 } 863 864 /** 865 * radeon_fence_driver_init - init the fence driver 866 * for all possible rings. 867 * 868 * @rdev: radeon device pointer 869 * 870 * Init the fence driver for all possible rings (all asics). 871 * Not all asics have all rings, so each asic will only 872 * start the fence driver on the rings it has using 873 * radeon_fence_driver_start_ring(). 874 * Returns 0 for success. 875 */ 876 int radeon_fence_driver_init(struct radeon_device *rdev) 877 { 878 int ring; 879 880 init_waitqueue_head(&rdev->fence_queue); 881 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { 882 radeon_fence_driver_init_ring(rdev, ring); 883 } 884 if (radeon_debugfs_fence_init(rdev)) { 885 dev_err(rdev->dev, "fence debugfs file creation failed\n"); 886 } 887 return 0; 888 } 889 890 /** 891 * radeon_fence_driver_fini - tear down the fence driver 892 * for all possible rings. 893 * 894 * @rdev: radeon device pointer 895 * 896 * Tear down the fence driver for all possible rings (all asics). 897 */ 898 void radeon_fence_driver_fini(struct radeon_device *rdev) 899 { 900 int ring, r; 901 902 mutex_lock(&rdev->ring_lock); 903 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { 904 if (!rdev->fence_drv[ring].initialized) 905 continue; 906 r = radeon_fence_wait_empty(rdev, ring); 907 if (r) { 908 /* no need to trigger GPU reset as we are unloading */ 909 radeon_fence_driver_force_completion(rdev, ring); 910 } 911 cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work); 912 wake_up_all(&rdev->fence_queue); 913 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); 914 rdev->fence_drv[ring].initialized = false; 915 } 916 mutex_unlock(&rdev->ring_lock); 917 } 918 919 /** 920 * radeon_fence_driver_force_completion - force all fence waiter to complete 921 * 922 * @rdev: radeon device pointer 923 * @ring: the ring to complete 924 * 925 * In case of GPU reset failure make sure no process keep waiting on fence 926 * that will never complete. 927 */ 928 void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring) 929 { 930 if (rdev->fence_drv[ring].initialized) { 931 radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring); 932 cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work); 933 } 934 } 935 936 937 /* 938 * Fence debugfs 939 */ 940 #if defined(CONFIG_DEBUG_FS) 941 static int radeon_debugfs_fence_info(struct seq_file *m, void *data) 942 { 943 struct drm_info_node *node = (struct drm_info_node *)m->private; 944 struct drm_device *dev = node->minor->dev; 945 struct radeon_device *rdev = dev->dev_private; 946 int i, j; 947 948 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 949 if (!rdev->fence_drv[i].initialized) 950 continue; 951 952 radeon_fence_process(rdev, i); 953 954 seq_printf(m, "--- ring %d ---\n", i); 955 seq_printf(m, "Last signaled fence 0x%016llx\n", 956 (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq)); 957 seq_printf(m, "Last emitted 0x%016llx\n", 958 rdev->fence_drv[i].sync_seq[i]); 959 960 for (j = 0; j < RADEON_NUM_RINGS; ++j) { 961 if (i != j && rdev->fence_drv[j].initialized) 962 seq_printf(m, "Last sync to ring %d 0x%016llx\n", 963 j, rdev->fence_drv[i].sync_seq[j]); 964 } 965 } 966 return 0; 967 } 968 969 /** 970 * radeon_debugfs_gpu_reset - manually trigger a gpu reset 971 * 972 * Manually trigger a gpu reset at the next fence wait. 973 */ 974 static int radeon_debugfs_gpu_reset(struct seq_file *m, void *data) 975 { 976 struct drm_info_node *node = (struct drm_info_node *) m->private; 977 struct drm_device *dev = node->minor->dev; 978 struct radeon_device *rdev = dev->dev_private; 979 980 down_read(&rdev->exclusive_lock); 981 seq_printf(m, "%d\n", rdev->needs_reset); 982 rdev->needs_reset = true; 983 wake_up_all(&rdev->fence_queue); 984 up_read(&rdev->exclusive_lock); 985 986 return 0; 987 } 988 989 static struct drm_info_list radeon_debugfs_fence_list[] = { 990 {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL}, 991 {"radeon_gpu_reset", &radeon_debugfs_gpu_reset, 0, NULL} 992 }; 993 #endif 994 995 int radeon_debugfs_fence_init(struct radeon_device *rdev) 996 { 997 #if defined(CONFIG_DEBUG_FS) 998 return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 2); 999 #else 1000 return 0; 1001 #endif 1002 } 1003 1004 static const char *radeon_fence_get_driver_name(struct fence *fence) 1005 { 1006 return "radeon"; 1007 } 1008 1009 static const char *radeon_fence_get_timeline_name(struct fence *f) 1010 { 1011 struct radeon_fence *fence = to_radeon_fence(f); 1012 switch (fence->ring) { 1013 case RADEON_RING_TYPE_GFX_INDEX: return "radeon.gfx"; 1014 case CAYMAN_RING_TYPE_CP1_INDEX: return "radeon.cp1"; 1015 case CAYMAN_RING_TYPE_CP2_INDEX: return "radeon.cp2"; 1016 case R600_RING_TYPE_DMA_INDEX: return "radeon.dma"; 1017 case CAYMAN_RING_TYPE_DMA1_INDEX: return "radeon.dma1"; 1018 case R600_RING_TYPE_UVD_INDEX: return "radeon.uvd"; 1019 case TN_RING_TYPE_VCE1_INDEX: return "radeon.vce1"; 1020 case TN_RING_TYPE_VCE2_INDEX: return "radeon.vce2"; 1021 default: WARN_ON_ONCE(1); return "radeon.unk"; 1022 } 1023 } 1024 1025 static inline bool radeon_test_signaled(struct radeon_fence *fence) 1026 { 1027 /* XXX: This flag is probably not set as it should */ 1028 return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); 1029 } 1030 1031 static signed long radeon_fence_default_wait(struct fence *f, bool intr, 1032 signed long t) 1033 { 1034 struct radeon_fence *fence = to_radeon_fence(f); 1035 struct radeon_device *rdev = fence->rdev; 1036 bool signaled; 1037 1038 fence_enable_sw_signaling(&fence->base); 1039 1040 /* 1041 * This function has to return -EDEADLK, but cannot hold 1042 * exclusive_lock during the wait because some callers 1043 * may already hold it. This means checking needs_reset without 1044 * lock, and not fiddling with any gpu internals. 1045 * 1046 * The callback installed with fence_enable_sw_signaling will 1047 * run before our wait_event_*timeout call, so we will see 1048 * both the signaled fence and the changes to needs_reset. 1049 */ 1050 1051 if (intr) 1052 t = wait_event_interruptible_timeout(rdev->fence_queue, 1053 /* XXX: there is something very wrong here */ 1054 #ifdef __DragonFly__ 1055 ((signaled = radeon_test_signaled(fence)) || 1 || 1056 #else 1057 ((signaled = radeon_test_signaled(fence)) || 1058 #endif 1059 rdev->needs_reset), t); 1060 else 1061 t = wait_event_timeout(rdev->fence_queue, 1062 #ifdef __DragonFly__ 1063 ((signaled = radeon_test_signaled(fence)) || 1 || 1064 #else 1065 ((signaled = radeon_test_signaled(fence)) || 1066 #endif 1067 rdev->needs_reset), t); 1068 1069 if (t > 0 && !signaled) 1070 return -EDEADLK; 1071 return t; 1072 } 1073 1074 const struct fence_ops radeon_fence_ops = { 1075 .get_driver_name = radeon_fence_get_driver_name, 1076 .get_timeline_name = radeon_fence_get_timeline_name, 1077 .enable_signaling = radeon_fence_enable_signaling, 1078 .signaled = radeon_fence_is_signaled, 1079 .wait = radeon_fence_default_wait, 1080 .release = NULL, 1081 }; 1082