1 /* 2 * Copyright 2009 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 /* 27 * Authors: 28 * Jerome Glisse <glisse@freedesktop.org> 29 * Dave Airlie 30 * 31 * $FreeBSD: head/sys/dev/drm2/radeon/radeon_fence.c 254885 2013-08-25 19:37:15Z dumbbell $ 32 */ 33 #include <drm/drmP.h> 34 #include "radeon_reg.h" 35 #include "radeon.h" 36 #ifdef DUMBBELL_WIP 37 #include "radeon_trace.h" 38 #endif /* DUMBBELL_WIP */ 39 40 /* 41 * Fences 42 * Fences mark an event in the GPUs pipeline and are used 43 * for GPU/CPU synchronization. When the fence is written, 44 * it is expected that all buffers associated with that fence 45 * are no longer in use by the associated ring on the GPU and 46 * that the the relevant GPU caches have been flushed. Whether 47 * we use a scratch register or memory location depends on the asic 48 * and whether writeback is enabled. 49 */ 50 51 /** 52 * radeon_fence_write - write a fence value 53 * 54 * @rdev: radeon_device pointer 55 * @seq: sequence number to write 56 * @ring: ring index the fence is associated with 57 * 58 * Writes a fence value to memory or a scratch register (all asics). 59 */ 60 static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring) 61 { 62 struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; 63 if (likely(rdev->wb.enabled || !drv->scratch_reg)) { 64 if (drv->cpu_addr) { 65 *drv->cpu_addr = cpu_to_le32(seq); 66 } 67 } else { 68 WREG32(drv->scratch_reg, seq); 69 } 70 } 71 72 /** 73 * radeon_fence_read - read a fence value 74 * 75 * @rdev: radeon_device pointer 76 * @ring: ring index the fence is associated with 77 * 78 * Reads a fence value from memory or a scratch register (all asics). 79 * Returns the value of the fence read from memory or register. 80 */ 81 static u32 radeon_fence_read(struct radeon_device *rdev, int ring) 82 { 83 struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; 84 u32 seq = 0; 85 86 if (likely(rdev->wb.enabled || !drv->scratch_reg)) { 87 if (drv->cpu_addr) { 88 seq = le32_to_cpu(*drv->cpu_addr); 89 } else { 90 seq = lower_32_bits(atomic64_read(&drv->last_seq)); 91 } 92 } else { 93 seq = RREG32(drv->scratch_reg); 94 } 95 return seq; 96 } 97 98 /** 99 * radeon_fence_schedule_check - schedule lockup check 100 * 101 * @rdev: radeon_device pointer 102 * @ring: ring index we should work with 103 * 104 * Queues a delayed work item to check for lockups. 105 */ 106 static void radeon_fence_schedule_check(struct radeon_device *rdev, int ring) 107 { 108 /* 109 * Do not reset the timer here with mod_delayed_work, 110 * this can livelock in an interaction with TTM delayed destroy. 111 */ 112 queue_delayed_work(system_power_efficient_wq, 113 &rdev->fence_drv[ring].lockup_work, 114 RADEON_FENCE_JIFFIES_TIMEOUT); 115 } 116 117 /** 118 * radeon_fence_emit - emit a fence on the requested ring 119 * 120 * @rdev: radeon_device pointer 121 * @fence: radeon fence object 122 * @ring: ring index the fence is associated with 123 * 124 * Emits a fence command on the requested ring (all asics). 125 * Returns 0 on success, -ENOMEM on failure. 126 */ 127 int radeon_fence_emit(struct radeon_device *rdev, 128 struct radeon_fence **fence, 129 int ring) 130 { 131 /* we are protected by the ring emission mutex */ 132 *fence = kmalloc(sizeof(struct radeon_fence), M_DRM, 133 M_WAITOK); 134 if ((*fence) == NULL) { 135 return -ENOMEM; 136 } 137 refcount_init(&((*fence)->kref), 1); 138 (*fence)->rdev = rdev; 139 (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring]; 140 (*fence)->ring = ring; 141 radeon_fence_ring_emit(rdev, ring, *fence); 142 #ifdef TRACE_TODO 143 trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq); 144 #endif 145 radeon_fence_schedule_check(rdev, ring); 146 return 0; 147 } 148 149 /** 150 * radeon_fence_activity - check for fence activity 151 * 152 * @rdev: radeon_device pointer 153 * @ring: ring index the fence is associated with 154 * 155 * Checks the current fence value and calculates the last 156 * signalled fence value. Returns true if activity occured 157 * on the ring, and the fence_queue should be waken up. 158 */ 159 static bool radeon_fence_activity(struct radeon_device *rdev, int ring) 160 { 161 uint64_t seq, last_seq, last_emitted; 162 unsigned count_loop = 0; 163 bool wake = false; 164 165 /* Note there is a scenario here for an infinite loop but it's 166 * very unlikely to happen. For it to happen, the current polling 167 * process need to be interrupted by another process and another 168 * process needs to update the last_seq btw the atomic read and 169 * xchg of the current process. 170 * 171 * More over for this to go in infinite loop there need to be 172 * continuously new fence signaled ie radeon_fence_read needs 173 * to return a different value each time for both the currently 174 * polling process and the other process that xchg the last_seq 175 * btw atomic read and xchg of the current process. And the 176 * value the other process set as last seq must be higher than 177 * the seq value we just read. Which means that current process 178 * need to be interrupted after radeon_fence_read and before 179 * atomic xchg. 180 * 181 * To be even more safe we count the number of time we loop and 182 * we bail after 10 loop just accepting the fact that we might 183 * have temporarly set the last_seq not to the true real last 184 * seq but to an older one. 185 */ 186 last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq); 187 do { 188 last_emitted = rdev->fence_drv[ring].sync_seq[ring]; 189 seq = radeon_fence_read(rdev, ring); 190 seq |= last_seq & 0xffffffff00000000LL; 191 if (seq < last_seq) { 192 seq &= 0xffffffff; 193 seq |= last_emitted & 0xffffffff00000000LL; 194 } 195 196 if (seq <= last_seq || seq > last_emitted) { 197 break; 198 } 199 /* If we loop over we don't want to return without 200 * checking if a fence is signaled as it means that the 201 * seq we just read is different from the previous on. 202 */ 203 wake = true; 204 last_seq = seq; 205 if ((count_loop++) > 10) { 206 /* We looped over too many time leave with the 207 * fact that we might have set an older fence 208 * seq then the current real last seq as signaled 209 * by the hw. 210 */ 211 break; 212 } 213 } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq); 214 215 if (seq < last_emitted) 216 radeon_fence_schedule_check(rdev, ring); 217 218 return wake; 219 } 220 221 /** 222 * radeon_fence_check_lockup - check for hardware lockup 223 * 224 * @work: delayed work item 225 * 226 * Checks for fence activity and if there is none probe 227 * the hardware if a lockup occured. 228 */ 229 static void radeon_fence_check_lockup(struct work_struct *work) 230 { 231 struct radeon_fence_driver *fence_drv; 232 struct radeon_device *rdev; 233 int ring; 234 235 fence_drv = container_of(work, struct radeon_fence_driver, 236 lockup_work.work); 237 rdev = fence_drv->rdev; 238 ring = fence_drv - &rdev->fence_drv[0]; 239 240 if (lockmgr(&rdev->exclusive_lock, LK_EXCLUSIVE|LK_NOWAIT)) { 241 /* just reschedule the check if a reset is going on */ 242 radeon_fence_schedule_check(rdev, ring); 243 return; 244 } 245 246 if (radeon_fence_activity(rdev, ring)) 247 wake_up_all(&rdev->fence_queue); 248 249 else if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { 250 251 /* good news we believe it's a lockup */ 252 dev_warn(rdev->dev, "GPU lockup (current fence id " 253 "0x%016lx last fence id 0x%016lx on ring %d)\n", 254 (uint64_t)atomic64_read(&fence_drv->last_seq), 255 fence_drv->sync_seq[ring], ring); 256 257 /* remember that we need an reset */ 258 rdev->needs_reset = true; 259 wake_up_all(&rdev->fence_queue); 260 } 261 lockmgr(&rdev->exclusive_lock, LK_RELEASE); 262 } 263 264 /** 265 * radeon_fence_process - process a fence 266 * 267 * @rdev: radeon_device pointer 268 * @ring: ring index the fence is associated with 269 * 270 * Checks the current fence value and wakes the fence queue 271 * if the sequence number has increased (all asics). 272 */ 273 void radeon_fence_process(struct radeon_device *rdev, int ring) 274 { 275 if (radeon_fence_activity(rdev, ring)) 276 wake_up_all(&rdev->fence_queue); 277 } 278 279 /** 280 * radeon_fence_destroy - destroy a fence 281 * 282 * @kref: fence kref 283 * 284 * Frees the fence object (all asics). 285 */ 286 static void radeon_fence_destroy(struct radeon_fence *fence) 287 { 288 289 kfree(fence); 290 } 291 292 /** 293 * radeon_fence_seq_signaled - check if a fence sequence number has signaled 294 * 295 * @rdev: radeon device pointer 296 * @seq: sequence number 297 * @ring: ring index the fence is associated with 298 * 299 * Check if the last signaled fence sequnce number is >= the requested 300 * sequence number (all asics). 301 * Returns true if the fence has signaled (current fence value 302 * is >= requested value) or false if it has not (current fence 303 * value is < the requested value. Helper function for 304 * radeon_fence_signaled(). 305 */ 306 static bool radeon_fence_seq_signaled(struct radeon_device *rdev, 307 u64 seq, unsigned ring) 308 { 309 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { 310 return true; 311 } 312 /* poll new last sequence at least once */ 313 radeon_fence_process(rdev, ring); 314 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { 315 return true; 316 } 317 return false; 318 } 319 320 /** 321 * radeon_fence_signaled - check if a fence has signaled 322 * 323 * @fence: radeon fence object 324 * 325 * Check if the requested fence has signaled (all asics). 326 * Returns true if the fence has signaled or false if it has not. 327 */ 328 bool radeon_fence_signaled(struct radeon_fence *fence) 329 { 330 if (!fence) 331 return true; 332 if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) 333 return true; 334 return false; 335 } 336 337 /** 338 * radeon_fence_any_seq_signaled - check if any sequence number is signaled 339 * 340 * @rdev: radeon device pointer 341 * @seq: sequence numbers 342 * 343 * Check if the last signaled fence sequnce number is >= the requested 344 * sequence number (all asics). 345 * Returns true if any has signaled (current value is >= requested value) 346 * or false if it has not. Helper function for radeon_fence_wait_seq. 347 */ 348 static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) 349 { 350 unsigned i; 351 352 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 353 if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) 354 return true; 355 } 356 return false; 357 } 358 359 /** 360 * radeon_fence_wait_seq_timeout - wait for a specific sequence numbers 361 * 362 * @rdev: radeon device pointer 363 * @target_seq: sequence number(s) we want to wait for 364 * @intr: use interruptable sleep 365 * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait 366 * 367 * Wait for the requested sequence number(s) to be written by any ring 368 * (all asics). Sequnce number array is indexed by ring id. 369 * @intr selects whether to use interruptable (true) or non-interruptable 370 * (false) sleep when waiting for the sequence number. Helper function 371 * for radeon_fence_wait_*(). 372 * Returns remaining time if the sequence number has passed, 0 when 373 * the wait timeout, or an error for all other cases. 374 * -EDEADLK is returned when a GPU lockup has been detected. 375 */ 376 static int radeon_fence_wait_seq_timeout(struct radeon_device *rdev, 377 u64 *target_seq, bool intr, 378 int timeout) 379 { 380 long r; 381 int i; 382 383 if (radeon_fence_any_seq_signaled(rdev, target_seq)) 384 return timeout; 385 386 /* enable IRQs and tracing */ 387 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 388 if (!target_seq[i]) 389 continue; 390 391 #ifdef TRACE_TODO 392 trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]); 393 #endif 394 radeon_irq_kms_sw_irq_get(rdev, i); 395 } 396 397 if (intr) { 398 r = wait_event_interruptible_timeout(rdev->fence_queue, ( 399 radeon_fence_any_seq_signaled(rdev, target_seq) 400 || rdev->needs_reset), timeout); 401 } else { 402 r = wait_event_timeout(rdev->fence_queue, ( 403 radeon_fence_any_seq_signaled(rdev, target_seq) 404 || rdev->needs_reset), timeout); 405 } 406 407 if (rdev->needs_reset) 408 r = -EDEADLK; 409 410 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 411 if (!target_seq[i]) 412 continue; 413 414 radeon_irq_kms_sw_irq_put(rdev, i); 415 #if TRACE_TODO 416 trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]); 417 #endif 418 } 419 420 return r; 421 } 422 423 /** 424 * radeon_fence_wait - wait for a fence to signal 425 * 426 * @fence: radeon fence object 427 * @intr: use interruptible sleep 428 * 429 * Wait for the requested fence to signal (all asics). 430 * @intr selects whether to use interruptable (true) or non-interruptable 431 * (false) sleep when waiting for the fence. 432 * Returns 0 if the fence has passed, error for all other cases. 433 */ 434 int radeon_fence_wait(struct radeon_fence *fence, bool intr) 435 { 436 uint64_t seq[RADEON_NUM_RINGS] = {}; 437 int r; 438 439 if (fence == NULL) { 440 WARN(1, "Querying an invalid fence : %p !\n", fence); 441 return -EINVAL; 442 } 443 444 seq[fence->ring] = fence->seq; 445 r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, INT_MAX); 446 if (r < 0) { 447 return r; 448 } 449 450 return 0; 451 } 452 453 /** 454 * radeon_fence_wait_any - wait for a fence to signal on any ring 455 * 456 * @rdev: radeon device pointer 457 * @fences: radeon fence object(s) 458 * @intr: use interruptable sleep 459 * 460 * Wait for any requested fence to signal (all asics). Fence 461 * array is indexed by ring id. @intr selects whether to use 462 * interruptable (true) or non-interruptable (false) sleep when 463 * waiting for the fences. Used by the suballocator. 464 * Returns 0 if any fence has passed, error for all other cases. 465 */ 466 int radeon_fence_wait_any(struct radeon_device *rdev, 467 struct radeon_fence **fences, 468 bool intr) 469 { 470 uint64_t seq[RADEON_NUM_RINGS]; 471 unsigned i, num_rings = 0; 472 int r; 473 474 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 475 seq[i] = 0; 476 477 if (!fences[i]) { 478 continue; 479 } 480 481 seq[i] = fences[i]->seq; 482 ++num_rings; 483 } 484 485 /* nothing to wait for ? */ 486 if (num_rings == 0) 487 return -ENOENT; 488 489 r = radeon_fence_wait_seq_timeout(rdev, seq, intr, INT_MAX); 490 if (r < 0) { 491 return r; 492 } 493 return 0; 494 } 495 496 /** 497 * radeon_fence_wait_next - wait for the next fence to signal 498 * 499 * @rdev: radeon device pointer 500 * @ring: ring index the fence is associated with 501 * 502 * Wait for the next fence on the requested ring to signal (all asics). 503 * Returns 0 if the next fence has passed, error for all other cases. 504 * Caller must hold ring lock. 505 */ 506 int radeon_fence_wait_next(struct radeon_device *rdev, int ring) 507 { 508 uint64_t seq[RADEON_NUM_RINGS] = {}; 509 int r; 510 511 seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL; 512 if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) { 513 /* nothing to wait for, last_seq is 514 already the last emited fence */ 515 return -ENOENT; 516 } 517 r = radeon_fence_wait_seq_timeout(rdev, seq, false, INT_MAX); 518 if (r < 0) 519 return r; 520 return 0; 521 } 522 523 /** 524 * radeon_fence_wait_empty - wait for all fences to signal 525 * 526 * @rdev: radeon device pointer 527 * @ring: ring index the fence is associated with 528 * 529 * Wait for all fences on the requested ring to signal (all asics). 530 * Returns 0 if the fences have passed, error for all other cases. 531 * Caller must hold ring lock. 532 */ 533 int radeon_fence_wait_empty(struct radeon_device *rdev, int ring) 534 { 535 uint64_t seq[RADEON_NUM_RINGS] = {}; 536 int r; 537 538 seq[ring] = rdev->fence_drv[ring].sync_seq[ring]; 539 if (!seq[ring]) 540 return 0; 541 542 r = radeon_fence_wait_seq_timeout(rdev, seq, false, INT_MAX); 543 if (r < 0) { 544 if (r == -EDEADLK) 545 return -EDEADLK; 546 547 dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n", 548 ring, r); 549 } 550 return 0; 551 } 552 553 /** 554 * radeon_fence_ref - take a ref on a fence 555 * 556 * @fence: radeon fence object 557 * 558 * Take a reference on a fence (all asics). 559 * Returns the fence. 560 */ 561 struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) 562 { 563 refcount_acquire(&fence->kref); 564 return fence; 565 } 566 567 /** 568 * radeon_fence_unref - remove a ref on a fence 569 * 570 * @fence: radeon fence object 571 * 572 * Remove a reference on a fence (all asics). 573 */ 574 void radeon_fence_unref(struct radeon_fence **fence) 575 { 576 struct radeon_fence *tmp = *fence; 577 578 *fence = NULL; 579 if (tmp) { 580 if (refcount_release(&tmp->kref)) { 581 radeon_fence_destroy(tmp); 582 } 583 } 584 } 585 586 /** 587 * radeon_fence_count_emitted - get the count of emitted fences 588 * 589 * @rdev: radeon device pointer 590 * @ring: ring index the fence is associated with 591 * 592 * Get the number of fences emitted on the requested ring (all asics). 593 * Returns the number of emitted fences on the ring. Used by the 594 * dynpm code to ring track activity. 595 */ 596 unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring) 597 { 598 uint64_t emitted; 599 600 /* We are not protected by ring lock when reading the last sequence 601 * but it's ok to report slightly wrong fence count here. 602 */ 603 radeon_fence_process(rdev, ring); 604 emitted = rdev->fence_drv[ring].sync_seq[ring] 605 - atomic64_read(&rdev->fence_drv[ring].last_seq); 606 /* to avoid 32bits warp around */ 607 if (emitted > 0x10000000) { 608 emitted = 0x10000000; 609 } 610 return (unsigned)emitted; 611 } 612 613 /** 614 * radeon_fence_need_sync - do we need a semaphore 615 * 616 * @fence: radeon fence object 617 * @dst_ring: which ring to check against 618 * 619 * Check if the fence needs to be synced against another ring 620 * (all asics). If so, we need to emit a semaphore. 621 * Returns true if we need to sync with another ring, false if 622 * not. 623 */ 624 bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring) 625 { 626 struct radeon_fence_driver *fdrv; 627 628 if (!fence) { 629 return false; 630 } 631 632 if (fence->ring == dst_ring) { 633 return false; 634 } 635 636 /* we are protected by the ring mutex */ 637 fdrv = &fence->rdev->fence_drv[dst_ring]; 638 if (fence->seq <= fdrv->sync_seq[fence->ring]) { 639 return false; 640 } 641 642 return true; 643 } 644 645 /** 646 * radeon_fence_note_sync - record the sync point 647 * 648 * @fence: radeon fence object 649 * @dst_ring: which ring to check against 650 * 651 * Note the sequence number at which point the fence will 652 * be synced with the requested ring (all asics). 653 */ 654 void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring) 655 { 656 struct radeon_fence_driver *dst, *src; 657 unsigned i; 658 659 if (!fence) { 660 return; 661 } 662 663 if (fence->ring == dst_ring) { 664 return; 665 } 666 667 /* we are protected by the ring mutex */ 668 src = &fence->rdev->fence_drv[fence->ring]; 669 dst = &fence->rdev->fence_drv[dst_ring]; 670 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 671 if (i == dst_ring) { 672 continue; 673 } 674 dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]); 675 } 676 } 677 678 /** 679 * radeon_fence_driver_start_ring - make the fence driver 680 * ready for use on the requested ring. 681 * 682 * @rdev: radeon device pointer 683 * @ring: ring index to start the fence driver on 684 * 685 * Make the fence driver ready for processing (all asics). 686 * Not all asics have all rings, so each asic will only 687 * start the fence driver on the rings it has. 688 * Returns 0 for success, errors for failure. 689 */ 690 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) 691 { 692 uint64_t index; 693 int r; 694 695 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); 696 if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) { 697 rdev->fence_drv[ring].scratch_reg = 0; 698 if (ring != R600_RING_TYPE_UVD_INDEX) { 699 index = R600_WB_EVENT_OFFSET + ring * 4; 700 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; 701 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + 702 index; 703 704 } else { 705 /* put fence directly behind firmware */ 706 index = ALIGN(rdev->uvd_fw->datasize, 8); 707 rdev->fence_drv[ring].cpu_addr = (void*)((uint8_t*)rdev->uvd.cpu_addr + index); 708 rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index; 709 } 710 711 } else { 712 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg); 713 if (r) { 714 dev_err(rdev->dev, "fence failed to get scratch register\n"); 715 return r; 716 } 717 index = RADEON_WB_SCRATCH_OFFSET + 718 rdev->fence_drv[ring].scratch_reg - 719 rdev->scratch.reg_base; 720 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; 721 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index; 722 } 723 radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring); 724 rdev->fence_drv[ring].initialized = true; 725 dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016jx and cpu addr 0x%p\n", 726 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr); 727 return 0; 728 } 729 730 /** 731 * radeon_fence_driver_init_ring - init the fence driver 732 * for the requested ring. 733 * 734 * @rdev: radeon device pointer 735 * @ring: ring index to start the fence driver on 736 * 737 * Init the fence driver for the requested ring (all asics). 738 * Helper function for radeon_fence_driver_init(). 739 */ 740 static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring) 741 { 742 int i; 743 744 rdev->fence_drv[ring].scratch_reg = -1; 745 rdev->fence_drv[ring].cpu_addr = NULL; 746 rdev->fence_drv[ring].gpu_addr = 0; 747 for (i = 0; i < RADEON_NUM_RINGS; ++i) 748 rdev->fence_drv[ring].sync_seq[i] = 0; 749 atomic64_set(&rdev->fence_drv[ring].last_seq, 0); 750 rdev->fence_drv[ring].initialized = false; 751 INIT_DELAYED_WORK(&rdev->fence_drv[ring].lockup_work, 752 radeon_fence_check_lockup); 753 rdev->fence_drv[ring].rdev = rdev; 754 } 755 756 /** 757 * radeon_fence_driver_init - init the fence driver 758 * for all possible rings. 759 * 760 * @rdev: radeon device pointer 761 * 762 * Init the fence driver for all possible rings (all asics). 763 * Not all asics have all rings, so each asic will only 764 * start the fence driver on the rings it has using 765 * radeon_fence_driver_start_ring(). 766 * Returns 0 for success. 767 */ 768 int radeon_fence_driver_init(struct radeon_device *rdev) 769 { 770 int ring; 771 772 init_waitqueue_head(&rdev->fence_queue); 773 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { 774 radeon_fence_driver_init_ring(rdev, ring); 775 } 776 if (radeon_debugfs_fence_init(rdev)) { 777 dev_err(rdev->dev, "fence debugfs file creation failed\n"); 778 } 779 return 0; 780 } 781 782 /** 783 * radeon_fence_driver_fini - tear down the fence driver 784 * for all possible rings. 785 * 786 * @rdev: radeon device pointer 787 * 788 * Tear down the fence driver for all possible rings (all asics). 789 */ 790 void radeon_fence_driver_fini(struct radeon_device *rdev) 791 { 792 int ring, r; 793 794 lockmgr(&rdev->ring_lock, LK_EXCLUSIVE); 795 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { 796 if (!rdev->fence_drv[ring].initialized) 797 continue; 798 r = radeon_fence_wait_empty(rdev, ring); 799 if (r) { 800 /* no need to trigger GPU reset as we are unloading */ 801 radeon_fence_driver_force_completion(rdev, ring); 802 } 803 cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work); 804 wake_up_all(&rdev->fence_queue); 805 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); 806 rdev->fence_drv[ring].initialized = false; 807 } 808 lockmgr(&rdev->ring_lock, LK_RELEASE); 809 } 810 811 /** 812 * radeon_fence_driver_force_completion - force all fence waiter to complete 813 * 814 * @rdev: radeon device pointer 815 * @ring: the ring to complete 816 * 817 * In case of GPU reset failure make sure no process keep waiting on fence 818 * that will never complete. 819 */ 820 void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring) 821 { 822 if (rdev->fence_drv[ring].initialized) { 823 radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring); 824 cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work); 825 } 826 } 827 828 829 /* 830 * Fence debugfs 831 */ 832 #if defined(CONFIG_DEBUG_FS) 833 static int radeon_debugfs_fence_info(struct seq_file *m, void *data) 834 { 835 struct drm_info_node *node = (struct drm_info_node *)m->private; 836 struct drm_device *dev = node->minor->dev; 837 struct radeon_device *rdev = dev->dev_private; 838 int i, j; 839 840 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 841 if (!rdev->fence_drv[i].initialized) 842 continue; 843 844 radeon_fence_process(rdev, i); 845 846 seq_printf(m, "--- ring %d ---\n", i); 847 seq_printf(m, "Last signaled fence 0x%016llx\n", 848 (unsigned long long)atomic_load_acq_64(&rdev->fence_drv[i].last_seq)); 849 seq_printf(m, "Last emitted 0x%016llx\n", 850 rdev->fence_drv[i].sync_seq[i]); 851 852 for (j = 0; j < RADEON_NUM_RINGS; ++j) { 853 if (i != j && rdev->fence_drv[j].initialized) 854 seq_printf(m, "Last sync to ring %d 0x%016llx\n", 855 j, rdev->fence_drv[i].sync_seq[j]); 856 } 857 } 858 return 0; 859 } 860 861 /** 862 * radeon_debugfs_gpu_reset - manually trigger a gpu reset 863 * 864 * Manually trigger a gpu reset at the next fence wait. 865 */ 866 static int radeon_debugfs_gpu_reset(struct seq_file *m, void *data) 867 { 868 struct drm_info_node *node = (struct drm_info_node *) m->private; 869 struct drm_device *dev = node->minor->dev; 870 struct radeon_device *rdev = dev->dev_private; 871 872 down_read(&rdev->exclusive_lock); 873 seq_printf(m, "%d\n", rdev->needs_reset); 874 rdev->needs_reset = true; 875 wake_up_all(&rdev->fence_queue); 876 up_read(&rdev->exclusive_lock); 877 878 return 0; 879 } 880 881 static struct drm_info_list radeon_debugfs_fence_list[] = { 882 {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL}, 883 {"radeon_gpu_reset", &radeon_debugfs_gpu_reset, 0, NULL} 884 }; 885 #endif 886 887 int radeon_debugfs_fence_init(struct radeon_device *rdev) 888 { 889 #if defined(CONFIG_DEBUG_FS) 890 return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 2); 891 #else 892 return 0; 893 #endif 894 } 895