1 /* 2 * Copyright © 2008-2015 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include <linux/prefetch.h> 26 27 #include "i915_drv.h" 28 29 static const char *i915_fence_get_driver_name(struct fence *fence) 30 { 31 return "i915"; 32 } 33 34 static const char *i915_fence_get_timeline_name(struct fence *fence) 35 { 36 /* Timelines are bound by eviction to a VM. However, since 37 * we only have a global seqno at the moment, we only have 38 * a single timeline. Note that each timeline will have 39 * multiple execution contexts (fence contexts) as we allow 40 * engines within a single timeline to execute in parallel. 41 */ 42 return "global"; 43 } 44 45 static bool i915_fence_signaled(struct fence *fence) 46 { 47 return i915_gem_request_completed(to_request(fence)); 48 } 49 50 static bool i915_fence_enable_signaling(struct fence *fence) 51 { 52 if (i915_fence_signaled(fence)) 53 return false; 54 55 intel_engine_enable_signaling(to_request(fence)); 56 return true; 57 } 58 59 static signed long i915_fence_wait(struct fence *fence, 60 bool interruptible, 61 signed long timeout_jiffies) 62 { 63 s64 timeout_ns, *timeout; 64 int ret; 65 66 if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT) { 67 timeout_ns = jiffies_to_nsecs(timeout_jiffies); 68 timeout = &timeout_ns; 69 } else { 70 timeout = NULL; 71 } 72 73 ret = i915_wait_request(to_request(fence), 74 interruptible, timeout, 75 NO_WAITBOOST); 76 if (ret == -ETIME) 77 return 0; 78 79 if (ret < 0) 80 return ret; 81 82 if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT) 83 timeout_jiffies = nsecs_to_jiffies(timeout_ns); 84 85 return timeout_jiffies; 86 } 87 88 static void i915_fence_value_str(struct fence *fence, char *str, int size) 89 { 90 snprintf(str, size, "%u", fence->seqno); 91 } 92 93 static void i915_fence_timeline_value_str(struct fence *fence, char *str, 94 int size) 95 { 96 snprintf(str, size, "%u", 97 intel_engine_get_seqno(to_request(fence)->engine)); 98 } 99 100 static void i915_fence_release(struct fence *fence) 101 { 102 struct drm_i915_gem_request *req = to_request(fence); 103 104 kmem_cache_free(req->i915->requests, req); 105 } 106 107 const struct fence_ops i915_fence_ops = { 108 .get_driver_name = i915_fence_get_driver_name, 109 .get_timeline_name = i915_fence_get_timeline_name, 110 .enable_signaling = i915_fence_enable_signaling, 111 .signaled = i915_fence_signaled, 112 .wait = i915_fence_wait, 113 .release = i915_fence_release, 114 .fence_value_str = i915_fence_value_str, 115 .timeline_value_str = i915_fence_timeline_value_str, 116 }; 117 118 int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, 119 struct drm_file *file) 120 { 121 struct drm_i915_private *dev_private; 122 struct drm_i915_file_private *file_priv; 123 124 WARN_ON(!req || !file || req->file_priv); 125 126 if (!req || !file) 127 return -EINVAL; 128 129 if (req->file_priv) 130 return -EINVAL; 131 132 dev_private = req->i915; 133 file_priv = file->driver_priv; 134 135 lockmgr(&file_priv->mm.lock, LK_EXCLUSIVE); 136 req->file_priv = file_priv; 137 list_add_tail(&req->client_list, &file_priv->mm.request_list); 138 lockmgr(&file_priv->mm.lock, LK_RELEASE); 139 140 req->pid = curproc->p_pid; 141 142 return 0; 143 } 144 145 static inline void 146 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) 147 { 148 struct drm_i915_file_private *file_priv = request->file_priv; 149 150 if (!file_priv) 151 return; 152 153 lockmgr(&file_priv->mm.lock, LK_EXCLUSIVE); 154 list_del(&request->client_list); 155 request->file_priv = NULL; 156 lockmgr(&file_priv->mm.lock, LK_RELEASE); 157 158 #if 0 159 put_pid(request->pid); 160 request->pid = NULL; 161 #else 162 request->pid = 0; 163 #endif 164 } 165 166 void i915_gem_retire_noop(struct i915_gem_active *active, 167 struct drm_i915_gem_request *request) 168 { 169 /* Space left intentionally blank */ 170 } 171 172 static void i915_gem_request_retire(struct drm_i915_gem_request *request) 173 { 174 struct i915_gem_active *active, *next; 175 176 trace_i915_gem_request_retire(request); 177 list_del(&request->link); 178 179 /* We know the GPU must have read the request to have 180 * sent us the seqno + interrupt, so use the position 181 * of tail of the request to update the last known position 182 * of the GPU head. 183 * 184 * Note this requires that we are always called in request 185 * completion order. 186 */ 187 list_del(&request->ring_link); 188 request->ring->last_retired_head = request->postfix; 189 190 /* Walk through the active list, calling retire on each. This allows 191 * objects to track their GPU activity and mark themselves as idle 192 * when their *last* active request is completed (updating state 193 * tracking lists for eviction, active references for GEM, etc). 194 * 195 * As the ->retire() may free the node, we decouple it first and 196 * pass along the auxiliary information (to avoid dereferencing 197 * the node after the callback). 198 */ 199 list_for_each_entry_safe(active, next, &request->active_list, link) { 200 /* In microbenchmarks or focusing upon time inside the kernel, 201 * we may spend an inordinate amount of time simply handling 202 * the retirement of requests and processing their callbacks. 203 * Of which, this loop itself is particularly hot due to the 204 * cache misses when jumping around the list of i915_gem_active. 205 * So we try to keep this loop as streamlined as possible and 206 * also prefetch the next i915_gem_active to try and hide 207 * the likely cache miss. 208 */ 209 prefetchw(next); 210 211 INIT_LIST_HEAD(&active->link); 212 RCU_INIT_POINTER(active->request, NULL); 213 214 active->retire(active, request); 215 } 216 217 i915_gem_request_remove_from_client(request); 218 219 if (request->previous_context) { 220 if (i915.enable_execlists) 221 intel_lr_context_unpin(request->previous_context, 222 request->engine); 223 } 224 225 i915_gem_context_put(request->ctx); 226 i915_gem_request_put(request); 227 } 228 229 void i915_gem_request_retire_upto(struct drm_i915_gem_request *req) 230 { 231 struct intel_engine_cs *engine = req->engine; 232 struct drm_i915_gem_request *tmp; 233 234 lockdep_assert_held(&req->i915->drm.struct_mutex); 235 GEM_BUG_ON(list_empty(&req->link)); 236 237 do { 238 tmp = list_first_entry(&engine->request_list, 239 typeof(*tmp), link); 240 241 i915_gem_request_retire(tmp); 242 } while (tmp != req); 243 } 244 245 static int i915_gem_check_wedge(unsigned int reset_counter, bool interruptible) 246 { 247 if (__i915_terminally_wedged(reset_counter)) 248 return -EIO; 249 250 if (__i915_reset_in_progress(reset_counter)) { 251 /* Non-interruptible callers can't handle -EAGAIN, hence return 252 * -EIO unconditionally for these. 253 */ 254 if (!interruptible) 255 return -EIO; 256 257 return -EAGAIN; 258 } 259 260 return 0; 261 } 262 263 static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno) 264 { 265 struct intel_engine_cs *engine; 266 int ret; 267 268 /* Carefully retire all requests without writing to the rings */ 269 for_each_engine(engine, dev_priv) { 270 ret = intel_engine_idle(engine, true); 271 if (ret) 272 return ret; 273 } 274 i915_gem_retire_requests(dev_priv); 275 276 /* If the seqno wraps around, we need to clear the breadcrumb rbtree */ 277 if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) { 278 while (intel_kick_waiters(dev_priv) || 279 intel_kick_signalers(dev_priv)) 280 yield(); 281 } 282 283 /* Finally reset hw state */ 284 for_each_engine(engine, dev_priv) 285 intel_engine_init_seqno(engine, seqno); 286 287 return 0; 288 } 289 290 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno) 291 { 292 struct drm_i915_private *dev_priv = to_i915(dev); 293 int ret; 294 295 if (seqno == 0) 296 return -EINVAL; 297 298 /* HWS page needs to be set less than what we 299 * will inject to ring 300 */ 301 ret = i915_gem_init_seqno(dev_priv, seqno - 1); 302 if (ret) 303 return ret; 304 305 dev_priv->next_seqno = seqno; 306 return 0; 307 } 308 309 static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno) 310 { 311 /* reserve 0 for non-seqno */ 312 if (unlikely(dev_priv->next_seqno == 0)) { 313 int ret; 314 315 ret = i915_gem_init_seqno(dev_priv, 0); 316 if (ret) 317 return ret; 318 319 dev_priv->next_seqno = 1; 320 } 321 322 *seqno = dev_priv->next_seqno++; 323 return 0; 324 } 325 326 /** 327 * i915_gem_request_alloc - allocate a request structure 328 * 329 * @engine: engine that we wish to issue the request on. 330 * @ctx: context that the request will be associated with. 331 * This can be NULL if the request is not directly related to 332 * any specific user context, in which case this function will 333 * choose an appropriate context to use. 334 * 335 * Returns a pointer to the allocated request if successful, 336 * or an error code if not. 337 */ 338 struct drm_i915_gem_request * 339 i915_gem_request_alloc(struct intel_engine_cs *engine, 340 struct i915_gem_context *ctx) 341 { 342 struct drm_i915_private *dev_priv = engine->i915; 343 unsigned int reset_counter = i915_reset_counter(&dev_priv->gpu_error); 344 struct drm_i915_gem_request *req; 345 u32 seqno; 346 int ret; 347 348 /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report 349 * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex 350 * and restart. 351 */ 352 ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible); 353 if (ret) 354 return ERR_PTR(ret); 355 356 /* Move the oldest request to the slab-cache (if not in use!) */ 357 req = list_first_entry_or_null(&engine->request_list, 358 typeof(*req), link); 359 if (req && i915_gem_request_completed(req)) 360 i915_gem_request_retire(req); 361 362 /* Beware: Dragons be flying overhead. 363 * 364 * We use RCU to look up requests in flight. The lookups may 365 * race with the request being allocated from the slab freelist. 366 * That is the request we are writing to here, may be in the process 367 * of being read by __i915_gem_active_get_request_rcu(). As such, 368 * we have to be very careful when overwriting the contents. During 369 * the RCU lookup, we change chase the request->engine pointer, 370 * read the request->fence.seqno and increment the reference count. 371 * 372 * The reference count is incremented atomically. If it is zero, 373 * the lookup knows the request is unallocated and complete. Otherwise, 374 * it is either still in use, or has been reallocated and reset 375 * with fence_init(). This increment is safe for release as we check 376 * that the request we have a reference to and matches the active 377 * request. 378 * 379 * Before we increment the refcount, we chase the request->engine 380 * pointer. We must not call kmem_cache_zalloc() or else we set 381 * that pointer to NULL and cause a crash during the lookup. If 382 * we see the request is completed (based on the value of the 383 * old engine and seqno), the lookup is complete and reports NULL. 384 * If we decide the request is not completed (new engine or seqno), 385 * then we grab a reference and double check that it is still the 386 * active request - which it won't be and restart the lookup. 387 * 388 * Do not use kmem_cache_zalloc() here! 389 */ 390 req = kzalloc(sizeof(*req), GFP_KERNEL); 391 if (!req) 392 return ERR_PTR(-ENOMEM); 393 394 ret = i915_gem_get_seqno(dev_priv, &seqno); 395 if (ret) 396 goto err; 397 398 lockinit(&req->lock, "i915_rl", 0, 0); 399 fence_init(&req->fence, 400 &i915_fence_ops, 401 &req->lock, 402 engine->fence_context, 403 seqno); 404 405 INIT_LIST_HEAD(&req->active_list); 406 req->i915 = dev_priv; 407 req->engine = engine; 408 req->ctx = i915_gem_context_get(ctx); 409 410 /* No zalloc, must clear what we need by hand */ 411 req->previous_context = NULL; 412 req->file_priv = NULL; 413 req->batch_obj = NULL; 414 req->pid = 0; 415 req->elsp_submitted = 0; 416 417 /* 418 * Reserve space in the ring buffer for all the commands required to 419 * eventually emit this request. This is to guarantee that the 420 * i915_add_request() call can't fail. Note that the reserve may need 421 * to be redone if the request is not actually submitted straight 422 * away, e.g. because a GPU scheduler has deferred it. 423 */ 424 req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST; 425 426 if (i915.enable_execlists) 427 ret = intel_logical_ring_alloc_request_extras(req); 428 else 429 ret = intel_ring_alloc_request_extras(req); 430 if (ret) 431 goto err_ctx; 432 433 return req; 434 435 err_ctx: 436 i915_gem_context_put(ctx); 437 err: 438 kmem_cache_free(dev_priv->requests, req); 439 return ERR_PTR(ret); 440 } 441 442 static void i915_gem_mark_busy(const struct intel_engine_cs *engine) 443 { 444 struct drm_i915_private *dev_priv = engine->i915; 445 446 dev_priv->gt.active_engines |= intel_engine_flag(engine); 447 if (dev_priv->gt.awake) 448 return; 449 450 intel_runtime_pm_get_noresume(dev_priv); 451 dev_priv->gt.awake = true; 452 453 intel_enable_gt_powersave(dev_priv); 454 i915_update_gfx_val(dev_priv); 455 if (INTEL_GEN(dev_priv) >= 6) 456 gen6_rps_busy(dev_priv); 457 458 queue_delayed_work(dev_priv->wq, 459 &dev_priv->gt.retire_work, 460 round_jiffies_up_relative(HZ)); 461 } 462 463 /* 464 * NB: This function is not allowed to fail. Doing so would mean the the 465 * request is not being tracked for completion but the work itself is 466 * going to happen on the hardware. This would be a Bad Thing(tm). 467 */ 468 void __i915_add_request(struct drm_i915_gem_request *request, 469 struct drm_i915_gem_object *obj, 470 bool flush_caches) 471 { 472 struct intel_engine_cs *engine; 473 struct intel_ring *ring; 474 u32 request_start; 475 u32 reserved_tail; 476 int ret; 477 478 if (WARN_ON(!request)) 479 return; 480 481 engine = request->engine; 482 ring = request->ring; 483 484 /* 485 * To ensure that this call will not fail, space for its emissions 486 * should already have been reserved in the ring buffer. Let the ring 487 * know that it is time to use that space up. 488 */ 489 request_start = ring->tail; 490 reserved_tail = request->reserved_space; 491 request->reserved_space = 0; 492 493 /* 494 * Emit any outstanding flushes - execbuf can fail to emit the flush 495 * after having emitted the batchbuffer command. Hence we need to fix 496 * things up similar to emitting the lazy request. The difference here 497 * is that the flush _must_ happen before the next request, no matter 498 * what. 499 */ 500 if (flush_caches) { 501 ret = engine->emit_flush(request, EMIT_FLUSH); 502 503 /* Not allowed to fail! */ 504 WARN(ret, "engine->emit_flush() failed: %d!\n", ret); 505 } 506 507 trace_i915_gem_request_add(request); 508 509 request->head = request_start; 510 511 /* Whilst this request exists, batch_obj will be on the 512 * active_list, and so will hold the active reference. Only when this 513 * request is retired will the the batch_obj be moved onto the 514 * inactive_list and lose its active reference. Hence we do not need 515 * to explicitly hold another reference here. 516 */ 517 request->batch_obj = obj; 518 519 /* Seal the request and mark it as pending execution. Note that 520 * we may inspect this state, without holding any locks, during 521 * hangcheck. Hence we apply the barrier to ensure that we do not 522 * see a more recent value in the hws than we are tracking. 523 */ 524 request->emitted_jiffies = jiffies; 525 request->previous_seqno = engine->last_submitted_seqno; 526 engine->last_submitted_seqno = request->fence.seqno; 527 i915_gem_active_set(&engine->last_request, request); 528 list_add_tail(&request->link, &engine->request_list); 529 list_add_tail(&request->ring_link, &ring->request_list); 530 531 /* Record the position of the start of the request so that 532 * should we detect the updated seqno part-way through the 533 * GPU processing the request, we never over-estimate the 534 * position of the head. 535 */ 536 request->postfix = ring->tail; 537 538 /* Not allowed to fail! */ 539 ret = engine->emit_request(request); 540 WARN(ret, "(%s)->emit_request failed: %d!\n", engine->name, ret); 541 542 /* Sanity check that the reserved size was large enough. */ 543 ret = ring->tail - request_start; 544 if (ret < 0) 545 ret += ring->size; 546 WARN_ONCE(ret > reserved_tail, 547 "Not enough space reserved (%d bytes) " 548 "for adding the request (%d bytes)\n", 549 reserved_tail, ret); 550 551 i915_gem_mark_busy(engine); 552 engine->submit_request(request); 553 } 554 555 static unsigned long local_clock_us(unsigned int *cpu) 556 { 557 unsigned long t; 558 559 /* Cheaply and approximately convert from nanoseconds to microseconds. 560 * The result and subsequent calculations are also defined in the same 561 * approximate microseconds units. The principal source of timing 562 * error here is from the simple truncation. 563 * 564 * Note that local_clock() is only defined wrt to the current CPU; 565 * the comparisons are no longer valid if we switch CPUs. Instead of 566 * blocking preemption for the entire busywait, we can detect the CPU 567 * switch and use that as indicator of system load and a reason to 568 * stop busywaiting, see busywait_stop(). 569 */ 570 *cpu = get_cpu(); 571 t = local_clock() >> 10; 572 put_cpu(); 573 574 return t; 575 } 576 577 static bool busywait_stop(unsigned long timeout, unsigned int cpu) 578 { 579 unsigned int this_cpu; 580 581 if (time_after(local_clock_us(&this_cpu), timeout)) 582 return true; 583 584 return this_cpu != cpu; 585 } 586 587 bool __i915_spin_request(const struct drm_i915_gem_request *req, 588 int state, unsigned long timeout_us) 589 { 590 unsigned int cpu; 591 592 /* When waiting for high frequency requests, e.g. during synchronous 593 * rendering split between the CPU and GPU, the finite amount of time 594 * required to set up the irq and wait upon it limits the response 595 * rate. By busywaiting on the request completion for a short while we 596 * can service the high frequency waits as quick as possible. However, 597 * if it is a slow request, we want to sleep as quickly as possible. 598 * The tradeoff between waiting and sleeping is roughly the time it 599 * takes to sleep on a request, on the order of a microsecond. 600 */ 601 602 timeout_us += local_clock_us(&cpu); 603 do { 604 if (i915_gem_request_completed(req)) 605 return true; 606 607 if (signal_pending_state(state, current)) 608 break; 609 610 if (busywait_stop(timeout_us, cpu)) 611 break; 612 613 cpu_relax(); 614 } while (!need_resched()); 615 616 return false; 617 } 618 619 /** 620 * i915_wait_request - wait until execution of request has finished 621 * @req: duh! 622 * @interruptible: do an interruptible wait (normally yes) 623 * @timeout: in - how long to wait (NULL forever); out - how much time remaining 624 * @rps: client to charge for RPS boosting 625 * 626 * Note: It is of utmost importance that the passed in seqno and reset_counter 627 * values have been read by the caller in an smp safe manner. Where read-side 628 * locks are involved, it is sufficient to read the reset_counter before 629 * unlocking the lock that protects the seqno. For lockless tricks, the 630 * reset_counter _must_ be read before, and an appropriate smp_rmb must be 631 * inserted. 632 * 633 * Returns 0 if the request was found within the alloted time. Else returns the 634 * errno with remaining time filled in timeout argument. 635 */ 636 int i915_wait_request(struct drm_i915_gem_request *req, 637 bool interruptible, 638 s64 *timeout, 639 struct intel_rps_client *rps) 640 { 641 int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; 642 DEFINE_WAIT(reset); 643 struct intel_wait wait; 644 unsigned long timeout_remain; 645 int ret = 0; 646 647 might_sleep(); 648 649 if (i915_gem_request_completed(req)) 650 return 0; 651 652 timeout_remain = MAX_SCHEDULE_TIMEOUT; 653 if (timeout) { 654 if (WARN_ON(*timeout < 0)) 655 return -EINVAL; 656 657 if (*timeout == 0) 658 return -ETIME; 659 660 /* Record current time in case interrupted, or wedged */ 661 timeout_remain = nsecs_to_jiffies_timeout(*timeout); 662 *timeout += ktime_get_raw_ns(); 663 } 664 665 trace_i915_gem_request_wait_begin(req); 666 667 /* This client is about to stall waiting for the GPU. In many cases 668 * this is undesirable and limits the throughput of the system, as 669 * many clients cannot continue processing user input/output whilst 670 * blocked. RPS autotuning may take tens of milliseconds to respond 671 * to the GPU load and thus incurs additional latency for the client. 672 * We can circumvent that by promoting the GPU frequency to maximum 673 * before we wait. This makes the GPU throttle up much more quickly 674 * (good for benchmarks and user experience, e.g. window animations), 675 * but at a cost of spending more power processing the workload 676 * (bad for battery). Not all clients even want their results 677 * immediately and for them we should just let the GPU select its own 678 * frequency to maximise efficiency. To prevent a single client from 679 * forcing the clocks too high for the whole system, we only allow 680 * each client to waitboost once in a busy period. 681 */ 682 if (IS_RPS_CLIENT(rps) && INTEL_GEN(req->i915) >= 6) 683 gen6_rps_boost(req->i915, rps, req->emitted_jiffies); 684 685 /* Optimistic short spin before touching IRQs */ 686 if (i915_spin_request(req, state, 5)) 687 goto complete; 688 689 set_current_state(state); 690 add_wait_queue(&req->i915->gpu_error.wait_queue, &reset); 691 692 intel_wait_init(&wait, req->fence.seqno); 693 if (intel_engine_add_wait(req->engine, &wait)) 694 /* In order to check that we haven't missed the interrupt 695 * as we enabled it, we need to kick ourselves to do a 696 * coherent check on the seqno before we sleep. 697 */ 698 goto wakeup; 699 700 for (;;) { 701 if (signal_pending_state(state, current)) { 702 ret = -ERESTARTSYS; 703 break; 704 } 705 706 timeout_remain = io_schedule_timeout(timeout_remain); 707 if (timeout_remain == 0) { 708 ret = -ETIME; 709 break; 710 } 711 712 if (intel_wait_complete(&wait)) 713 break; 714 715 set_current_state(state); 716 717 wakeup: 718 /* Carefully check if the request is complete, giving time 719 * for the seqno to be visible following the interrupt. 720 * We also have to check in case we are kicked by the GPU 721 * reset in order to drop the struct_mutex. 722 */ 723 if (__i915_request_irq_complete(req)) 724 break; 725 726 /* Only spin if we know the GPU is processing this request */ 727 if (i915_spin_request(req, state, 2)) 728 break; 729 } 730 remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset); 731 732 intel_engine_remove_wait(req->engine, &wait); 733 __set_current_state(TASK_RUNNING); 734 complete: 735 trace_i915_gem_request_wait_end(req); 736 737 if (timeout) { 738 *timeout -= ktime_get_raw_ns(); 739 if (*timeout < 0) 740 *timeout = 0; 741 742 /* 743 * Apparently ktime isn't accurate enough and occasionally has a 744 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch 745 * things up to make the test happy. We allow up to 1 jiffy. 746 * 747 * This is a regrssion from the timespec->ktime conversion. 748 */ 749 if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000) 750 *timeout = 0; 751 } 752 753 if (IS_RPS_USER(rps) && 754 req->fence.seqno == req->engine->last_submitted_seqno) { 755 /* The GPU is now idle and this client has stalled. 756 * Since no other client has submitted a request in the 757 * meantime, assume that this client is the only one 758 * supplying work to the GPU but is unable to keep that 759 * work supplied because it is waiting. Since the GPU is 760 * then never kept fully busy, RPS autoclocking will 761 * keep the clocks relatively low, causing further delays. 762 * Compensate by giving the synchronous client credit for 763 * a waitboost next time. 764 */ 765 lockmgr(&req->i915->rps.client_lock, LK_EXCLUSIVE); 766 list_del_init(&rps->link); 767 lockmgr(&req->i915->rps.client_lock, LK_RELEASE); 768 } 769 770 return ret; 771 } 772 773 static void engine_retire_requests(struct intel_engine_cs *engine) 774 { 775 struct drm_i915_gem_request *request, *next; 776 777 list_for_each_entry_safe(request, next, &engine->request_list, link) { 778 if (!i915_gem_request_completed(request)) 779 break; 780 781 i915_gem_request_retire(request); 782 } 783 } 784 785 void i915_gem_retire_requests(struct drm_i915_private *dev_priv) 786 { 787 struct intel_engine_cs *engine; 788 789 lockdep_assert_held(&dev_priv->drm.struct_mutex); 790 791 if (dev_priv->gt.active_engines == 0) 792 return; 793 794 GEM_BUG_ON(!dev_priv->gt.awake); 795 796 for_each_engine(engine, dev_priv) { 797 engine_retire_requests(engine); 798 if (!intel_engine_is_active(engine)) 799 dev_priv->gt.active_engines &= ~intel_engine_flag(engine); 800 } 801 802 if (dev_priv->gt.active_engines == 0) 803 queue_delayed_work(dev_priv->wq, 804 &dev_priv->gt.idle_work, 805 msecs_to_jiffies(100)); 806 } 807