1 /* 2 * Copyright © 2008-2015 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include <linux/prefetch.h> 26 #include <linux/dma-fence-array.h> 27 #include <linux/sched.h> 28 #include <linux/sched/clock.h> 29 #include <linux/sched/signal.h> 30 31 #include "i915_drv.h" 32 33 static const char *i915_fence_get_driver_name(struct dma_fence *fence) 34 { 35 return "i915"; 36 } 37 38 static const char *i915_fence_get_timeline_name(struct dma_fence *fence) 39 { 40 /* The timeline struct (as part of the ppgtt underneath a context) 41 * may be freed when the request is no longer in use by the GPU. 42 * We could extend the life of a context to beyond that of all 43 * fences, possibly keeping the hw resource around indefinitely, 44 * or we just give them a false name. Since 45 * dma_fence_ops.get_timeline_name is a debug feature, the occasional 46 * lie seems justifiable. 47 */ 48 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 49 return "signaled"; 50 51 return to_request(fence)->timeline->common->name; 52 } 53 54 static bool i915_fence_signaled(struct dma_fence *fence) 55 { 56 return i915_gem_request_completed(to_request(fence)); 57 } 58 59 static bool i915_fence_enable_signaling(struct dma_fence *fence) 60 { 61 if (i915_fence_signaled(fence)) 62 return false; 63 64 intel_engine_enable_signaling(to_request(fence)); 65 return true; 66 } 67 68 static signed long i915_fence_wait(struct dma_fence *fence, 69 bool interruptible, 70 signed long timeout) 71 { 72 return i915_wait_request(to_request(fence), interruptible, timeout); 73 } 74 75 static void i915_fence_release(struct dma_fence *fence) 76 { 77 struct drm_i915_gem_request *req = to_request(fence); 78 79 /* The request is put onto a RCU freelist (i.e. the address 80 * is immediately reused), mark the fences as being freed now. 81 * Otherwise the debugobjects for the fences are only marked as 82 * freed when the slab cache itself is freed, and so we would get 83 * caught trying to reuse dead objects. 84 */ 85 i915_sw_fence_fini(&req->submit); 86 87 kmem_cache_free(req->i915->requests, req); 88 } 89 90 const struct dma_fence_ops i915_fence_ops = { 91 .get_driver_name = i915_fence_get_driver_name, 92 .get_timeline_name = i915_fence_get_timeline_name, 93 .enable_signaling = i915_fence_enable_signaling, 94 .signaled = i915_fence_signaled, 95 .wait = i915_fence_wait, 96 .release = i915_fence_release, 97 }; 98 99 static inline void 100 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) 101 { 102 struct drm_i915_file_private *file_priv; 103 104 file_priv = request->file_priv; 105 if (!file_priv) 106 return; 107 108 lockmgr(&file_priv->mm.lock, LK_EXCLUSIVE); 109 if (request->file_priv) { 110 list_del(&request->client_link); 111 request->file_priv = NULL; 112 } 113 lockmgr(&file_priv->mm.lock, LK_RELEASE); 114 } 115 116 static struct i915_dependency * 117 i915_dependency_alloc(struct drm_i915_private *i915) 118 { 119 return kmem_cache_alloc(i915->dependencies, GFP_KERNEL); 120 } 121 122 static void 123 i915_dependency_free(struct drm_i915_private *i915, 124 struct i915_dependency *dep) 125 { 126 kmem_cache_free(i915->dependencies, dep); 127 } 128 129 static void 130 __i915_priotree_add_dependency(struct i915_priotree *pt, 131 struct i915_priotree *signal, 132 struct i915_dependency *dep, 133 unsigned long flags) 134 { 135 INIT_LIST_HEAD(&dep->dfs_link); 136 list_add(&dep->wait_link, &signal->waiters_list); 137 list_add(&dep->signal_link, &pt->signalers_list); 138 dep->signaler = signal; 139 dep->flags = flags; 140 } 141 142 static int 143 i915_priotree_add_dependency(struct drm_i915_private *i915, 144 struct i915_priotree *pt, 145 struct i915_priotree *signal) 146 { 147 struct i915_dependency *dep; 148 149 dep = i915_dependency_alloc(i915); 150 if (!dep) 151 return -ENOMEM; 152 153 __i915_priotree_add_dependency(pt, signal, dep, I915_DEPENDENCY_ALLOC); 154 return 0; 155 } 156 157 static void 158 i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt) 159 { 160 struct i915_dependency *dep, *next; 161 162 GEM_BUG_ON(!RB_EMPTY_NODE(&pt->node)); 163 164 /* Everyone we depended upon (the fences we wait to be signaled) 165 * should retire before us and remove themselves from our list. 166 * However, retirement is run independently on each timeline and 167 * so we may be called out-of-order. 168 */ 169 list_for_each_entry_safe(dep, next, &pt->signalers_list, signal_link) { 170 list_del(&dep->wait_link); 171 if (dep->flags & I915_DEPENDENCY_ALLOC) 172 i915_dependency_free(i915, dep); 173 } 174 175 /* Remove ourselves from everyone who depends upon us */ 176 list_for_each_entry_safe(dep, next, &pt->waiters_list, wait_link) { 177 list_del(&dep->signal_link); 178 if (dep->flags & I915_DEPENDENCY_ALLOC) 179 i915_dependency_free(i915, dep); 180 } 181 } 182 183 static void 184 i915_priotree_init(struct i915_priotree *pt) 185 { 186 INIT_LIST_HEAD(&pt->signalers_list); 187 INIT_LIST_HEAD(&pt->waiters_list); 188 RB_CLEAR_NODE(&pt->node); 189 pt->priority = INT_MIN; 190 } 191 192 static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno) 193 { 194 struct intel_engine_cs *engine; 195 enum intel_engine_id id; 196 int ret; 197 198 /* Carefully retire all requests without writing to the rings */ 199 ret = i915_gem_wait_for_idle(i915, 200 I915_WAIT_INTERRUPTIBLE | 201 I915_WAIT_LOCKED); 202 if (ret) 203 return ret; 204 205 /* If the seqno wraps around, we need to clear the breadcrumb rbtree */ 206 for_each_engine(engine, i915, id) { 207 struct i915_gem_timeline *timeline; 208 struct intel_timeline *tl = engine->timeline; 209 210 if (!i915_seqno_passed(seqno, tl->seqno)) { 211 /* spin until threads are complete */ 212 while (intel_breadcrumbs_busy(engine)) 213 cond_resched(); 214 } 215 216 /* Finally reset hw state */ 217 tl->seqno = seqno; 218 intel_engine_init_global_seqno(engine, seqno); 219 220 list_for_each_entry(timeline, &i915->gt.timelines, link) 221 memset(timeline->engine[id].sync_seqno, 0, 222 sizeof(timeline->engine[id].sync_seqno)); 223 } 224 225 return 0; 226 } 227 228 int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno) 229 { 230 struct drm_i915_private *dev_priv = to_i915(dev); 231 232 lockdep_assert_held(&dev_priv->drm.struct_mutex); 233 234 if (seqno == 0) 235 return -EINVAL; 236 237 /* HWS page needs to be set less than what we 238 * will inject to ring 239 */ 240 return reset_all_global_seqno(dev_priv, seqno - 1); 241 } 242 243 static int reserve_seqno(struct intel_engine_cs *engine) 244 { 245 u32 active = ++engine->timeline->inflight_seqnos; 246 u32 seqno = engine->timeline->seqno; 247 int ret; 248 249 /* Reservation is fine until we need to wrap around */ 250 if (likely(!add_overflows(seqno, active))) 251 return 0; 252 253 ret = reset_all_global_seqno(engine->i915, 0); 254 if (ret) { 255 engine->timeline->inflight_seqnos--; 256 return ret; 257 } 258 259 return 0; 260 } 261 262 static void unreserve_seqno(struct intel_engine_cs *engine) 263 { 264 GEM_BUG_ON(!engine->timeline->inflight_seqnos); 265 engine->timeline->inflight_seqnos--; 266 } 267 268 void i915_gem_retire_noop(struct i915_gem_active *active, 269 struct drm_i915_gem_request *request) 270 { 271 /* Space left intentionally blank */ 272 } 273 274 static void i915_gem_request_retire(struct drm_i915_gem_request *request) 275 { 276 struct intel_engine_cs *engine = request->engine; 277 struct i915_gem_active *active, *next; 278 279 lockdep_assert_held(&request->i915->drm.struct_mutex); 280 GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit)); 281 GEM_BUG_ON(!i915_gem_request_completed(request)); 282 GEM_BUG_ON(!request->i915->gt.active_requests); 283 284 trace_i915_gem_request_retire(request); 285 286 spin_lock_irq(&engine->timeline->lock); 287 list_del_init(&request->link); 288 spin_unlock_irq(&engine->timeline->lock); 289 290 /* We know the GPU must have read the request to have 291 * sent us the seqno + interrupt, so use the position 292 * of tail of the request to update the last known position 293 * of the GPU head. 294 * 295 * Note this requires that we are always called in request 296 * completion order. 297 */ 298 list_del(&request->ring_link); 299 request->ring->head = request->postfix; 300 if (!--request->i915->gt.active_requests) { 301 GEM_BUG_ON(!request->i915->gt.awake); 302 mod_delayed_work(request->i915->wq, 303 &request->i915->gt.idle_work, 304 msecs_to_jiffies(100)); 305 } 306 unreserve_seqno(request->engine); 307 308 /* Walk through the active list, calling retire on each. This allows 309 * objects to track their GPU activity and mark themselves as idle 310 * when their *last* active request is completed (updating state 311 * tracking lists for eviction, active references for GEM, etc). 312 * 313 * As the ->retire() may free the node, we decouple it first and 314 * pass along the auxiliary information (to avoid dereferencing 315 * the node after the callback). 316 */ 317 list_for_each_entry_safe(active, next, &request->active_list, link) { 318 /* In microbenchmarks or focusing upon time inside the kernel, 319 * we may spend an inordinate amount of time simply handling 320 * the retirement of requests and processing their callbacks. 321 * Of which, this loop itself is particularly hot due to the 322 * cache misses when jumping around the list of i915_gem_active. 323 * So we try to keep this loop as streamlined as possible and 324 * also prefetch the next i915_gem_active to try and hide 325 * the likely cache miss. 326 */ 327 prefetchw(next); 328 329 INIT_LIST_HEAD(&active->link); 330 RCU_INIT_POINTER(active->request, NULL); 331 332 active->retire(active, request); 333 } 334 335 i915_gem_request_remove_from_client(request); 336 337 /* Retirement decays the ban score as it is a sign of ctx progress */ 338 if (request->ctx->ban_score > 0) 339 request->ctx->ban_score--; 340 341 /* The backing object for the context is done after switching to the 342 * *next* context. Therefore we cannot retire the previous context until 343 * the next context has already started running. However, since we 344 * cannot take the required locks at i915_gem_request_submit() we 345 * defer the unpinning of the active context to now, retirement of 346 * the subsequent request. 347 */ 348 if (engine->last_retired_context) 349 engine->context_unpin(engine, engine->last_retired_context); 350 engine->last_retired_context = request->ctx; 351 352 dma_fence_signal(&request->fence); 353 354 i915_priotree_fini(request->i915, &request->priotree); 355 i915_gem_request_put(request); 356 } 357 358 void i915_gem_request_retire_upto(struct drm_i915_gem_request *req) 359 { 360 struct intel_engine_cs *engine = req->engine; 361 struct drm_i915_gem_request *tmp; 362 363 lockdep_assert_held(&req->i915->drm.struct_mutex); 364 GEM_BUG_ON(!i915_gem_request_completed(req)); 365 366 if (list_empty(&req->link)) 367 return; 368 369 do { 370 tmp = list_first_entry(&engine->timeline->requests, 371 typeof(*tmp), link); 372 373 i915_gem_request_retire(tmp); 374 } while (tmp != req); 375 } 376 377 static u32 timeline_get_seqno(struct intel_timeline *tl) 378 { 379 return ++tl->seqno; 380 } 381 382 void __i915_gem_request_submit(struct drm_i915_gem_request *request) 383 { 384 struct intel_engine_cs *engine = request->engine; 385 struct intel_timeline *timeline; 386 u32 seqno; 387 388 // GEM_BUG_ON(!irqs_disabled()); 389 lockdep_assert_held(&engine->timeline->lock); 390 391 trace_i915_gem_request_execute(request); 392 393 /* Transfer from per-context onto the global per-engine timeline */ 394 timeline = engine->timeline; 395 GEM_BUG_ON(timeline == request->timeline); 396 397 seqno = timeline_get_seqno(timeline); 398 GEM_BUG_ON(!seqno); 399 GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno)); 400 401 /* We may be recursing from the signal callback of another i915 fence */ 402 lockmgr(&request->lock, LK_EXCLUSIVE); 403 request->global_seqno = seqno; 404 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) 405 intel_engine_enable_signaling(request); 406 lockmgr(&request->lock, LK_RELEASE); 407 408 engine->emit_breadcrumb(request, 409 request->ring->vaddr + request->postfix); 410 411 lockmgr(&request->timeline->lock, LK_EXCLUSIVE); 412 list_move_tail(&request->link, &timeline->requests); 413 lockmgr(&request->timeline->lock, LK_RELEASE); 414 415 wake_up_all(&request->execute); 416 } 417 418 void i915_gem_request_submit(struct drm_i915_gem_request *request) 419 { 420 struct intel_engine_cs *engine = request->engine; 421 unsigned long flags; 422 423 /* Will be called from irq-context when using foreign fences. */ 424 spin_lock_irqsave(&engine->timeline->lock, flags); 425 426 __i915_gem_request_submit(request); 427 428 spin_unlock_irqrestore(&engine->timeline->lock, flags); 429 } 430 431 void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request) 432 { 433 struct intel_engine_cs *engine = request->engine; 434 struct intel_timeline *timeline; 435 436 GEM_BUG_ON(!irqs_disabled()); 437 lockdep_assert_held(&engine->timeline->lock); 438 439 /* Only unwind in reverse order, required so that the per-context list 440 * is kept in seqno/ring order. 441 */ 442 GEM_BUG_ON(request->global_seqno != engine->timeline->seqno); 443 engine->timeline->seqno--; 444 445 /* We may be recursing from the signal callback of another i915 fence */ 446 lockmgr(&request->lock, LK_EXCLUSIVE); 447 request->global_seqno = 0; 448 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) 449 intel_engine_cancel_signaling(request); 450 lockmgr(&request->lock, LK_RELEASE); 451 452 /* Transfer back from the global per-engine timeline to per-context */ 453 timeline = request->timeline; 454 GEM_BUG_ON(timeline == engine->timeline); 455 456 lockmgr(&timeline->lock, LK_EXCLUSIVE); 457 list_move(&request->link, &timeline->requests); 458 lockmgr(&timeline->lock, LK_RELEASE); 459 460 /* We don't need to wake_up any waiters on request->execute, they 461 * will get woken by any other event or us re-adding this request 462 * to the engine timeline (__i915_gem_request_submit()). The waiters 463 * should be quite adapt at finding that the request now has a new 464 * global_seqno to the one they went to sleep on. 465 */ 466 } 467 468 void i915_gem_request_unsubmit(struct drm_i915_gem_request *request) 469 { 470 struct intel_engine_cs *engine = request->engine; 471 unsigned long flags; 472 473 /* Will be called from irq-context when using foreign fences. */ 474 spin_lock_irqsave(&engine->timeline->lock, flags); 475 476 __i915_gem_request_unsubmit(request); 477 478 spin_unlock_irqrestore(&engine->timeline->lock, flags); 479 } 480 481 static int __i915_sw_fence_call 482 submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) 483 { 484 struct drm_i915_gem_request *request = 485 container_of(fence, typeof(*request), submit); 486 487 switch (state) { 488 case FENCE_COMPLETE: 489 trace_i915_gem_request_submit(request); 490 request->engine->submit_request(request); 491 break; 492 493 case FENCE_FREE: 494 i915_gem_request_put(request); 495 break; 496 } 497 498 return NOTIFY_DONE; 499 } 500 501 /** 502 * i915_gem_request_alloc - allocate a request structure 503 * 504 * @engine: engine that we wish to issue the request on. 505 * @ctx: context that the request will be associated with. 506 * This can be NULL if the request is not directly related to 507 * any specific user context, in which case this function will 508 * choose an appropriate context to use. 509 * 510 * Returns a pointer to the allocated request if successful, 511 * or an error code if not. 512 */ 513 struct drm_i915_gem_request * 514 i915_gem_request_alloc(struct intel_engine_cs *engine, 515 struct i915_gem_context *ctx) 516 { 517 struct drm_i915_private *dev_priv = engine->i915; 518 struct drm_i915_gem_request *req; 519 int ret; 520 521 lockdep_assert_held(&dev_priv->drm.struct_mutex); 522 523 /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report 524 * EIO if the GPU is already wedged. 525 */ 526 if (i915_terminally_wedged(&dev_priv->gpu_error)) 527 return ERR_PTR(-EIO); 528 529 /* Pinning the contexts may generate requests in order to acquire 530 * GGTT space, so do this first before we reserve a seqno for 531 * ourselves. 532 */ 533 ret = engine->context_pin(engine, ctx); 534 if (ret) 535 return ERR_PTR(ret); 536 537 ret = reserve_seqno(engine); 538 if (ret) 539 goto err_unpin; 540 541 /* Move the oldest request to the slab-cache (if not in use!) */ 542 req = list_first_entry_or_null(&engine->timeline->requests, 543 typeof(*req), link); 544 if (req && i915_gem_request_completed(req)) 545 i915_gem_request_retire(req); 546 547 /* Beware: Dragons be flying overhead. 548 * 549 * We use RCU to look up requests in flight. The lookups may 550 * race with the request being allocated from the slab freelist. 551 * That is the request we are writing to here, may be in the process 552 * of being read by __i915_gem_active_get_rcu(). As such, 553 * we have to be very careful when overwriting the contents. During 554 * the RCU lookup, we change chase the request->engine pointer, 555 * read the request->global_seqno and increment the reference count. 556 * 557 * The reference count is incremented atomically. If it is zero, 558 * the lookup knows the request is unallocated and complete. Otherwise, 559 * it is either still in use, or has been reallocated and reset 560 * with dma_fence_init(). This increment is safe for release as we 561 * check that the request we have a reference to and matches the active 562 * request. 563 * 564 * Before we increment the refcount, we chase the request->engine 565 * pointer. We must not call kmem_cache_zalloc() or else we set 566 * that pointer to NULL and cause a crash during the lookup. If 567 * we see the request is completed (based on the value of the 568 * old engine and seqno), the lookup is complete and reports NULL. 569 * If we decide the request is not completed (new engine or seqno), 570 * then we grab a reference and double check that it is still the 571 * active request - which it won't be and restart the lookup. 572 * 573 * Do not use kmem_cache_zalloc() here! 574 */ 575 req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL); 576 if (!req) { 577 ret = -ENOMEM; 578 goto err_unreserve; 579 } 580 581 req->timeline = i915_gem_context_lookup_timeline(ctx, engine); 582 GEM_BUG_ON(req->timeline == engine->timeline); 583 584 lockinit(&req->lock, "i915_rl", 0, 0); 585 dma_fence_init(&req->fence, 586 &i915_fence_ops, 587 &req->lock, 588 req->timeline->fence_context, 589 timeline_get_seqno(req->timeline)); 590 591 /* We bump the ref for the fence chain */ 592 i915_sw_fence_init(&i915_gem_request_get(req)->submit, submit_notify); 593 init_waitqueue_head(&req->execute); 594 595 i915_priotree_init(&req->priotree); 596 597 INIT_LIST_HEAD(&req->active_list); 598 req->i915 = dev_priv; 599 req->engine = engine; 600 req->ctx = ctx; 601 602 /* No zalloc, must clear what we need by hand */ 603 req->global_seqno = 0; 604 req->file_priv = NULL; 605 req->batch = NULL; 606 607 /* 608 * Reserve space in the ring buffer for all the commands required to 609 * eventually emit this request. This is to guarantee that the 610 * i915_add_request() call can't fail. Note that the reserve may need 611 * to be redone if the request is not actually submitted straight 612 * away, e.g. because a GPU scheduler has deferred it. 613 */ 614 req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST; 615 GEM_BUG_ON(req->reserved_space < engine->emit_breadcrumb_sz); 616 617 ret = engine->request_alloc(req); 618 if (ret) 619 goto err_ctx; 620 621 /* Record the position of the start of the request so that 622 * should we detect the updated seqno part-way through the 623 * GPU processing the request, we never over-estimate the 624 * position of the head. 625 */ 626 req->head = req->ring->emit; 627 628 /* Check that we didn't interrupt ourselves with a new request */ 629 GEM_BUG_ON(req->timeline->seqno != req->fence.seqno); 630 return req; 631 632 err_ctx: 633 /* Make sure we didn't add ourselves to external state before freeing */ 634 GEM_BUG_ON(!list_empty(&req->active_list)); 635 GEM_BUG_ON(!list_empty(&req->priotree.signalers_list)); 636 GEM_BUG_ON(!list_empty(&req->priotree.waiters_list)); 637 638 kmem_cache_free(dev_priv->requests, req); 639 err_unreserve: 640 unreserve_seqno(engine); 641 err_unpin: 642 engine->context_unpin(engine, ctx); 643 return ERR_PTR(ret); 644 } 645 646 static int 647 i915_gem_request_await_request(struct drm_i915_gem_request *to, 648 struct drm_i915_gem_request *from) 649 { 650 u32 seqno; 651 int ret; 652 653 GEM_BUG_ON(to == from); 654 655 if (i915_gem_request_completed(from)) 656 return 0; 657 658 if (to->engine->schedule) { 659 ret = i915_priotree_add_dependency(to->i915, 660 &to->priotree, 661 &from->priotree); 662 if (ret < 0) 663 return ret; 664 } 665 666 if (to->timeline == from->timeline) 667 return 0; 668 669 if (to->engine == from->engine) { 670 ret = i915_sw_fence_await_sw_fence_gfp(&to->submit, 671 &from->submit, 672 GFP_KERNEL); 673 return ret < 0 ? ret : 0; 674 } 675 676 seqno = i915_gem_request_global_seqno(from); 677 if (!seqno) { 678 ret = i915_sw_fence_await_dma_fence(&to->submit, 679 &from->fence, 0, 680 GFP_KERNEL); 681 return ret < 0 ? ret : 0; 682 } 683 684 if (seqno <= to->timeline->sync_seqno[from->engine->id]) 685 return 0; 686 687 trace_i915_gem_ring_sync_to(to, from); 688 if (!i915.semaphores) { 689 if (!i915_spin_request(from, TASK_INTERRUPTIBLE, 2)) { 690 ret = i915_sw_fence_await_dma_fence(&to->submit, 691 &from->fence, 0, 692 GFP_KERNEL); 693 if (ret < 0) 694 return ret; 695 } 696 } else { 697 ret = to->engine->semaphore.sync_to(to, from); 698 if (ret) 699 return ret; 700 } 701 702 to->timeline->sync_seqno[from->engine->id] = seqno; 703 return 0; 704 } 705 706 int 707 i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req, 708 struct dma_fence *fence) 709 { 710 struct dma_fence_array *array; 711 int ret; 712 int i; 713 714 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 715 return 0; 716 717 if (dma_fence_is_i915(fence)) 718 return i915_gem_request_await_request(req, to_request(fence)); 719 720 if (!dma_fence_is_array(fence)) { 721 ret = i915_sw_fence_await_dma_fence(&req->submit, 722 fence, I915_FENCE_TIMEOUT, 723 GFP_KERNEL); 724 return ret < 0 ? ret : 0; 725 } 726 727 /* Note that if the fence-array was created in signal-on-any mode, 728 * we should *not* decompose it into its individual fences. However, 729 * we don't currently store which mode the fence-array is operating 730 * in. Fortunately, the only user of signal-on-any is private to 731 * amdgpu and we should not see any incoming fence-array from 732 * sync-file being in signal-on-any mode. 733 */ 734 735 array = to_dma_fence_array(fence); 736 for (i = 0; i < array->num_fences; i++) { 737 struct dma_fence *child = array->fences[i]; 738 739 if (dma_fence_is_i915(child)) 740 ret = i915_gem_request_await_request(req, 741 to_request(child)); 742 else 743 ret = i915_sw_fence_await_dma_fence(&req->submit, 744 child, I915_FENCE_TIMEOUT, 745 GFP_KERNEL); 746 if (ret < 0) 747 return ret; 748 } 749 750 return 0; 751 } 752 753 /** 754 * i915_gem_request_await_object - set this request to (async) wait upon a bo 755 * 756 * @to: request we are wishing to use 757 * @obj: object which may be in use on another ring. 758 * 759 * This code is meant to abstract object synchronization with the GPU. 760 * Conceptually we serialise writes between engines inside the GPU. 761 * We only allow one engine to write into a buffer at any time, but 762 * multiple readers. To ensure each has a coherent view of memory, we must: 763 * 764 * - If there is an outstanding write request to the object, the new 765 * request must wait for it to complete (either CPU or in hw, requests 766 * on the same ring will be naturally ordered). 767 * 768 * - If we are a write request (pending_write_domain is set), the new 769 * request must wait for outstanding read requests to complete. 770 * 771 * Returns 0 if successful, else propagates up the lower layer error. 772 */ 773 int 774 i915_gem_request_await_object(struct drm_i915_gem_request *to, 775 struct drm_i915_gem_object *obj, 776 bool write) 777 { 778 struct dma_fence *excl; 779 int ret = 0; 780 781 if (write) { 782 struct dma_fence **shared; 783 unsigned int count, i; 784 785 ret = reservation_object_get_fences_rcu(obj->resv, 786 &excl, &count, &shared); 787 if (ret) 788 return ret; 789 790 for (i = 0; i < count; i++) { 791 ret = i915_gem_request_await_dma_fence(to, shared[i]); 792 if (ret) 793 break; 794 795 dma_fence_put(shared[i]); 796 } 797 798 for (; i < count; i++) 799 dma_fence_put(shared[i]); 800 kfree(shared); 801 } else { 802 excl = reservation_object_get_excl_rcu(obj->resv); 803 } 804 805 if (excl) { 806 if (ret == 0) 807 ret = i915_gem_request_await_dma_fence(to, excl); 808 809 dma_fence_put(excl); 810 } 811 812 return ret; 813 } 814 815 static void i915_gem_mark_busy(const struct intel_engine_cs *engine) 816 { 817 struct drm_i915_private *dev_priv = engine->i915; 818 819 if (dev_priv->gt.awake) 820 return; 821 822 GEM_BUG_ON(!dev_priv->gt.active_requests); 823 824 intel_runtime_pm_get_noresume(dev_priv); 825 dev_priv->gt.awake = true; 826 827 intel_enable_gt_powersave(dev_priv); 828 i915_update_gfx_val(dev_priv); 829 if (INTEL_GEN(dev_priv) >= 6) 830 gen6_rps_busy(dev_priv); 831 832 queue_delayed_work(dev_priv->wq, 833 &dev_priv->gt.retire_work, 834 round_jiffies_up_relative(HZ)); 835 } 836 837 /* 838 * NB: This function is not allowed to fail. Doing so would mean the the 839 * request is not being tracked for completion but the work itself is 840 * going to happen on the hardware. This would be a Bad Thing(tm). 841 */ 842 void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches) 843 { 844 struct intel_engine_cs *engine = request->engine; 845 struct intel_ring *ring = request->ring; 846 struct intel_timeline *timeline = request->timeline; 847 struct drm_i915_gem_request *prev; 848 u32 *cs; 849 int err; 850 851 lockdep_assert_held(&request->i915->drm.struct_mutex); 852 trace_i915_gem_request_add(request); 853 854 /* Make sure that no request gazumped us - if it was allocated after 855 * our i915_gem_request_alloc() and called __i915_add_request() before 856 * us, the timeline will hold its seqno which is later than ours. 857 */ 858 GEM_BUG_ON(timeline->seqno != request->fence.seqno); 859 860 /* 861 * To ensure that this call will not fail, space for its emissions 862 * should already have been reserved in the ring buffer. Let the ring 863 * know that it is time to use that space up. 864 */ 865 request->reserved_space = 0; 866 867 /* 868 * Emit any outstanding flushes - execbuf can fail to emit the flush 869 * after having emitted the batchbuffer command. Hence we need to fix 870 * things up similar to emitting the lazy request. The difference here 871 * is that the flush _must_ happen before the next request, no matter 872 * what. 873 */ 874 if (flush_caches) { 875 err = engine->emit_flush(request, EMIT_FLUSH); 876 877 /* Not allowed to fail! */ 878 WARN(err, "engine->emit_flush() failed: %d!\n", err); 879 } 880 881 /* Record the position of the start of the breadcrumb so that 882 * should we detect the updated seqno part-way through the 883 * GPU processing the request, we never over-estimate the 884 * position of the ring's HEAD. 885 */ 886 cs = intel_ring_begin(request, engine->emit_breadcrumb_sz); 887 GEM_BUG_ON(IS_ERR(cs)); 888 request->postfix = intel_ring_offset(request, cs); 889 890 /* Seal the request and mark it as pending execution. Note that 891 * we may inspect this state, without holding any locks, during 892 * hangcheck. Hence we apply the barrier to ensure that we do not 893 * see a more recent value in the hws than we are tracking. 894 */ 895 896 prev = i915_gem_active_raw(&timeline->last_request, 897 &request->i915->drm.struct_mutex); 898 if (prev) { 899 i915_sw_fence_await_sw_fence(&request->submit, &prev->submit, 900 &request->submitq); 901 if (engine->schedule) 902 __i915_priotree_add_dependency(&request->priotree, 903 &prev->priotree, 904 &request->dep, 905 0); 906 } 907 908 spin_lock_irq(&timeline->lock); 909 list_add_tail(&request->link, &timeline->requests); 910 spin_unlock_irq(&timeline->lock); 911 912 GEM_BUG_ON(timeline->seqno != request->fence.seqno); 913 i915_gem_active_set(&timeline->last_request, request); 914 915 list_add_tail(&request->ring_link, &ring->request_list); 916 request->emitted_jiffies = jiffies; 917 918 if (!request->i915->gt.active_requests++) 919 i915_gem_mark_busy(engine); 920 921 /* Let the backend know a new request has arrived that may need 922 * to adjust the existing execution schedule due to a high priority 923 * request - i.e. we may want to preempt the current request in order 924 * to run a high priority dependency chain *before* we can execute this 925 * request. 926 * 927 * This is called before the request is ready to run so that we can 928 * decide whether to preempt the entire chain so that it is ready to 929 * run at the earliest possible convenience. 930 */ 931 if (engine->schedule) 932 engine->schedule(request, request->ctx->priority); 933 934 local_bh_disable(); 935 i915_sw_fence_commit(&request->submit); 936 local_bh_enable(); /* Kick the execlists tasklet if just scheduled */ 937 } 938 939 static unsigned long local_clock_us(unsigned int *cpu) 940 { 941 unsigned long t; 942 943 /* Cheaply and approximately convert from nanoseconds to microseconds. 944 * The result and subsequent calculations are also defined in the same 945 * approximate microseconds units. The principal source of timing 946 * error here is from the simple truncation. 947 * 948 * Note that local_clock() is only defined wrt to the current CPU; 949 * the comparisons are no longer valid if we switch CPUs. Instead of 950 * blocking preemption for the entire busywait, we can detect the CPU 951 * switch and use that as indicator of system load and a reason to 952 * stop busywaiting, see busywait_stop(). 953 */ 954 *cpu = get_cpu(); 955 t = local_clock() >> 10; 956 put_cpu(); 957 958 return t; 959 } 960 961 static bool busywait_stop(unsigned long timeout, unsigned int cpu) 962 { 963 unsigned int this_cpu; 964 965 if (time_after(local_clock_us(&this_cpu), timeout)) 966 return true; 967 968 return this_cpu != cpu; 969 } 970 971 bool __i915_spin_request(const struct drm_i915_gem_request *req, 972 u32 seqno, int state, unsigned long timeout_us) 973 { 974 struct intel_engine_cs *engine = req->engine; 975 unsigned int irq, cpu; 976 977 /* When waiting for high frequency requests, e.g. during synchronous 978 * rendering split between the CPU and GPU, the finite amount of time 979 * required to set up the irq and wait upon it limits the response 980 * rate. By busywaiting on the request completion for a short while we 981 * can service the high frequency waits as quick as possible. However, 982 * if it is a slow request, we want to sleep as quickly as possible. 983 * The tradeoff between waiting and sleeping is roughly the time it 984 * takes to sleep on a request, on the order of a microsecond. 985 */ 986 987 irq = atomic_read(&engine->irq_count); 988 timeout_us += local_clock_us(&cpu); 989 do { 990 if (seqno != i915_gem_request_global_seqno(req)) 991 break; 992 993 if (i915_seqno_passed(intel_engine_get_seqno(req->engine), 994 seqno)) 995 return true; 996 997 /* Seqno are meant to be ordered *before* the interrupt. If 998 * we see an interrupt without a corresponding seqno advance, 999 * assume we won't see one in the near future but require 1000 * the engine->seqno_barrier() to fixup coherency. 1001 */ 1002 if (atomic_read(&engine->irq_count) != irq) 1003 break; 1004 1005 if (signal_pending_state(state, current)) 1006 break; 1007 1008 if (busywait_stop(timeout_us, cpu)) 1009 break; 1010 1011 cpu_relax(); 1012 } while (!need_resched()); 1013 1014 return false; 1015 } 1016 1017 static bool __i915_wait_request_check_and_reset(struct drm_i915_gem_request *request) 1018 { 1019 if (likely(!i915_reset_handoff(&request->i915->gpu_error))) 1020 return false; 1021 1022 __set_current_state(TASK_RUNNING); 1023 i915_reset(request->i915); 1024 return true; 1025 } 1026 1027 /** 1028 * i915_wait_request - wait until execution of request has finished 1029 * @req: the request to wait upon 1030 * @flags: how to wait 1031 * @timeout: how long to wait in jiffies 1032 * 1033 * i915_wait_request() waits for the request to be completed, for a 1034 * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an 1035 * unbounded wait). 1036 * 1037 * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED 1038 * in via the flags, and vice versa if the struct_mutex is not held, the caller 1039 * must not specify that the wait is locked. 1040 * 1041 * Returns the remaining time (in jiffies) if the request completed, which may 1042 * be zero or -ETIME if the request is unfinished after the timeout expires. 1043 * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is 1044 * pending before the request completes. 1045 */ 1046 long i915_wait_request(struct drm_i915_gem_request *req, 1047 unsigned int flags, 1048 long timeout) 1049 { 1050 const int state = flags & I915_WAIT_INTERRUPTIBLE ? 1051 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; 1052 wait_queue_head_t *errq = &req->i915->gpu_error.wait_queue; 1053 DEFINE_WAIT_FUNC(reset, default_wake_function); 1054 DEFINE_WAIT_FUNC(exec, default_wake_function); 1055 struct intel_wait wait; 1056 1057 might_sleep(); 1058 #if IS_ENABLED(CONFIG_LOCKDEP) 1059 GEM_BUG_ON(debug_locks && 1060 !!lockdep_is_held(&req->i915->drm.struct_mutex) != 1061 !!(flags & I915_WAIT_LOCKED)); 1062 #endif 1063 GEM_BUG_ON(timeout < 0); 1064 1065 if (i915_gem_request_completed(req)) 1066 return timeout; 1067 1068 if (!timeout) 1069 return -ETIME; 1070 1071 trace_i915_gem_request_wait_begin(req, flags); 1072 1073 add_wait_queue(&req->execute, &exec); 1074 if (flags & I915_WAIT_LOCKED) 1075 add_wait_queue(errq, &reset); 1076 1077 intel_wait_init(&wait, req); 1078 1079 restart: 1080 do { 1081 set_current_state(state); 1082 if (intel_wait_update_request(&wait, req)) 1083 break; 1084 1085 if (flags & I915_WAIT_LOCKED && 1086 __i915_wait_request_check_and_reset(req)) 1087 continue; 1088 1089 if (signal_pending_state(state, current)) { 1090 timeout = -ERESTARTSYS; 1091 goto complete; 1092 } 1093 1094 if (!timeout) { 1095 timeout = -ETIME; 1096 goto complete; 1097 } 1098 1099 timeout = io_schedule_timeout(timeout); 1100 } while (1); 1101 1102 GEM_BUG_ON(!intel_wait_has_seqno(&wait)); 1103 GEM_BUG_ON(!i915_sw_fence_signaled(&req->submit)); 1104 1105 /* Optimistic short spin before touching IRQs */ 1106 if (i915_spin_request(req, state, 5)) 1107 goto complete; 1108 1109 set_current_state(state); 1110 if (intel_engine_add_wait(req->engine, &wait)) 1111 /* In order to check that we haven't missed the interrupt 1112 * as we enabled it, we need to kick ourselves to do a 1113 * coherent check on the seqno before we sleep. 1114 */ 1115 goto wakeup; 1116 1117 if (flags & I915_WAIT_LOCKED) 1118 __i915_wait_request_check_and_reset(req); 1119 1120 for (;;) { 1121 if (signal_pending_state(state, current)) { 1122 timeout = -ERESTARTSYS; 1123 break; 1124 } 1125 1126 if (!timeout) { 1127 timeout = -ETIME; 1128 break; 1129 } 1130 1131 timeout = io_schedule_timeout(timeout); 1132 1133 if (intel_wait_complete(&wait) && 1134 intel_wait_check_request(&wait, req)) 1135 break; 1136 1137 set_current_state(state); 1138 1139 wakeup: 1140 /* Carefully check if the request is complete, giving time 1141 * for the seqno to be visible following the interrupt. 1142 * We also have to check in case we are kicked by the GPU 1143 * reset in order to drop the struct_mutex. 1144 */ 1145 if (__i915_request_irq_complete(req)) 1146 break; 1147 1148 /* If the GPU is hung, and we hold the lock, reset the GPU 1149 * and then check for completion. On a full reset, the engine's 1150 * HW seqno will be advanced passed us and we are complete. 1151 * If we do a partial reset, we have to wait for the GPU to 1152 * resume and update the breadcrumb. 1153 * 1154 * If we don't hold the mutex, we can just wait for the worker 1155 * to come along and update the breadcrumb (either directly 1156 * itself, or indirectly by recovering the GPU). 1157 */ 1158 if (flags & I915_WAIT_LOCKED && 1159 __i915_wait_request_check_and_reset(req)) 1160 continue; 1161 1162 /* Only spin if we know the GPU is processing this request */ 1163 if (i915_spin_request(req, state, 2)) 1164 break; 1165 1166 if (!intel_wait_check_request(&wait, req)) { 1167 intel_engine_remove_wait(req->engine, &wait); 1168 goto restart; 1169 } 1170 } 1171 1172 intel_engine_remove_wait(req->engine, &wait); 1173 complete: 1174 __set_current_state(TASK_RUNNING); 1175 if (flags & I915_WAIT_LOCKED) 1176 remove_wait_queue(errq, &reset); 1177 remove_wait_queue(&req->execute, &exec); 1178 trace_i915_gem_request_wait_end(req); 1179 1180 return timeout; 1181 } 1182 1183 static void engine_retire_requests(struct intel_engine_cs *engine) 1184 { 1185 struct drm_i915_gem_request *request, *next; 1186 u32 seqno = intel_engine_get_seqno(engine); 1187 LINUX_LIST_HEAD(retire); 1188 1189 spin_lock_irq(&engine->timeline->lock); 1190 list_for_each_entry_safe(request, next, 1191 &engine->timeline->requests, link) { 1192 if (!i915_seqno_passed(seqno, request->global_seqno)) 1193 break; 1194 1195 list_move_tail(&request->link, &retire); 1196 } 1197 spin_unlock_irq(&engine->timeline->lock); 1198 1199 list_for_each_entry_safe(request, next, &retire, link) 1200 i915_gem_request_retire(request); 1201 } 1202 1203 void i915_gem_retire_requests(struct drm_i915_private *dev_priv) 1204 { 1205 struct intel_engine_cs *engine; 1206 enum intel_engine_id id; 1207 1208 lockdep_assert_held(&dev_priv->drm.struct_mutex); 1209 1210 if (!dev_priv->gt.active_requests) 1211 return; 1212 1213 for_each_engine(engine, dev_priv, id) 1214 engine_retire_requests(engine); 1215 } 1216 1217 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1218 #include "selftests/mock_request.c" 1219 #include "selftests/i915_gem_request.c" 1220 #endif 1221