1 /* 2 * Copyright © 2011-2012 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Ben Widawsky <ben@bwidawsk.net> 25 * 26 */ 27 28 /* 29 * This file implements HW context support. On gen5+ a HW context consists of an 30 * opaque GPU object which is referenced at times of context saves and restores. 31 * With RC6 enabled, the context is also referenced as the GPU enters and exists 32 * from RC6 (GPU has it's own internal power context, except on gen5). Though 33 * something like a context does exist for the media ring, the code only 34 * supports contexts for the render ring. 35 * 36 * In software, there is a distinction between contexts created by the user, 37 * and the default HW context. The default HW context is used by GPU clients 38 * that do not request setup of their own hardware context. The default 39 * context's state is never restored to help prevent programming errors. This 40 * would happen if a client ran and piggy-backed off another clients GPU state. 41 * The default context only exists to give the GPU some offset to load as the 42 * current to invoke a save of the context we actually care about. In fact, the 43 * code could likely be constructed, albeit in a more complicated fashion, to 44 * never use the default context, though that limits the driver's ability to 45 * swap out, and/or destroy other contexts. 46 * 47 * All other contexts are created as a request by the GPU client. These contexts 48 * store GPU state, and thus allow GPU clients to not re-emit state (and 49 * potentially query certain state) at any time. The kernel driver makes 50 * certain that the appropriate commands are inserted. 51 * 52 * The context life cycle is semi-complicated in that context BOs may live 53 * longer than the context itself because of the way the hardware, and object 54 * tracking works. Below is a very crude representation of the state machine 55 * describing the context life. 56 * refcount pincount active 57 * S0: initial state 0 0 0 58 * S1: context created 1 0 0 59 * S2: context is currently running 2 1 X 60 * S3: GPU referenced, but not current 2 0 1 61 * S4: context is current, but destroyed 1 1 0 62 * S5: like S3, but destroyed 1 0 1 63 * 64 * The most common (but not all) transitions: 65 * S0->S1: client creates a context 66 * S1->S2: client submits execbuf with context 67 * S2->S3: other clients submits execbuf with context 68 * S3->S1: context object was retired 69 * S3->S2: clients submits another execbuf 70 * S2->S4: context destroy called with current context 71 * S3->S5->S0: destroy path 72 * S4->S5->S0: destroy path on current context 73 * 74 * There are two confusing terms used above: 75 * The "current context" means the context which is currently running on the 76 * GPU. The GPU has loaded its state already and has stored away the gtt 77 * offset of the BO. The GPU is not actively referencing the data at this 78 * offset, but it will on the next context switch. The only way to avoid this 79 * is to do a GPU reset. 80 * 81 * An "active context' is one which was previously the "current context" and is 82 * on the active list waiting for the next context switch to occur. Until this 83 * happens, the object must remain at the same gtt offset. It is therefore 84 * possible to destroy a context, but it is still active. 85 * 86 */ 87 88 #include <drm/drmP.h> 89 #include <drm/i915_drm.h> 90 #include "i915_drv.h" 91 #include "i915_trace.h" 92 93 /* This is a HW constraint. The value below is the largest known requirement 94 * I've seen in a spec to date, and that was a workaround for a non-shipping 95 * part. It should be safe to decrease this, but it's more future proof as is. 96 */ 97 #define GEN6_CONTEXT_ALIGN (64<<10) 98 #define GEN7_CONTEXT_ALIGN 4096 99 100 static size_t get_context_alignment(struct drm_device *dev) 101 { 102 if (IS_GEN6(dev)) 103 return GEN6_CONTEXT_ALIGN; 104 105 return GEN7_CONTEXT_ALIGN; 106 } 107 108 static int get_context_size(struct drm_device *dev) 109 { 110 struct drm_i915_private *dev_priv = dev->dev_private; 111 int ret; 112 u32 reg; 113 114 switch (INTEL_INFO(dev)->gen) { 115 case 6: 116 reg = I915_READ(CXT_SIZE); 117 ret = GEN6_CXT_TOTAL_SIZE(reg) * 64; 118 break; 119 case 7: 120 reg = I915_READ(GEN7_CXT_SIZE); 121 if (IS_HASWELL(dev)) 122 ret = HSW_CXT_TOTAL_SIZE; 123 else 124 ret = GEN7_CXT_TOTAL_SIZE(reg) * 64; 125 break; 126 case 8: 127 ret = GEN8_CXT_TOTAL_SIZE; 128 break; 129 default: 130 BUG(); 131 } 132 133 return ret; 134 } 135 136 static void i915_gem_context_clean(struct intel_context *ctx) 137 { 138 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; 139 struct i915_vma *vma, *next; 140 141 if (!ppgtt) 142 return; 143 144 list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list, 145 mm_list) { 146 if (WARN_ON(__i915_vma_unbind_no_wait(vma))) 147 break; 148 } 149 } 150 151 void i915_gem_context_free(struct kref *ctx_ref) 152 { 153 struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref); 154 155 trace_i915_context_free(ctx); 156 157 if (i915.enable_execlists) 158 intel_lr_context_free(ctx); 159 160 /* 161 * This context is going away and we need to remove all VMAs still 162 * around. This is to handle imported shared objects for which 163 * destructor did not run when their handles were closed. 164 */ 165 i915_gem_context_clean(ctx); 166 167 i915_ppgtt_put(ctx->ppgtt); 168 169 if (ctx->legacy_hw_ctx.rcs_state) 170 drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base); 171 list_del(&ctx->link); 172 kfree(ctx); 173 } 174 175 struct drm_i915_gem_object * 176 i915_gem_alloc_context_obj(struct drm_device *dev, size_t size) 177 { 178 struct drm_i915_gem_object *obj; 179 int ret; 180 181 obj = i915_gem_alloc_object(dev, size); 182 if (obj == NULL) 183 return ERR_PTR(-ENOMEM); 184 185 /* 186 * Try to make the context utilize L3 as well as LLC. 187 * 188 * On VLV we don't have L3 controls in the PTEs so we 189 * shouldn't touch the cache level, especially as that 190 * would make the object snooped which might have a 191 * negative performance impact. 192 * 193 * Snooping is required on non-llc platforms in execlist 194 * mode, but since all GGTT accesses use PAT entry 0 we 195 * get snooping anyway regardless of cache_level. 196 * 197 * This is only applicable for Ivy Bridge devices since 198 * later platforms don't have L3 control bits in the PTE. 199 */ 200 if (IS_IVYBRIDGE(dev)) { 201 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC); 202 /* Failure shouldn't ever happen this early */ 203 if (WARN_ON(ret)) { 204 drm_gem_object_unreference(&obj->base); 205 return ERR_PTR(ret); 206 } 207 } 208 209 return obj; 210 } 211 212 static struct intel_context * 213 __create_hw_context(struct drm_device *dev, 214 struct drm_i915_file_private *file_priv) 215 { 216 struct drm_i915_private *dev_priv = dev->dev_private; 217 struct intel_context *ctx; 218 int ret; 219 220 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 221 if (ctx == NULL) 222 return ERR_PTR(-ENOMEM); 223 224 kref_init(&ctx->ref); 225 list_add_tail(&ctx->link, &dev_priv->context_list); 226 ctx->i915 = dev_priv; 227 228 if (dev_priv->hw_context_size) { 229 struct drm_i915_gem_object *obj = 230 i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size); 231 if (IS_ERR(obj)) { 232 ret = PTR_ERR(obj); 233 goto err_out; 234 } 235 ctx->legacy_hw_ctx.rcs_state = obj; 236 } 237 238 /* Default context will never have a file_priv */ 239 if (file_priv != NULL) { 240 ret = idr_alloc(&file_priv->context_idr, ctx, 241 DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL); 242 if (ret < 0) 243 goto err_out; 244 } else 245 ret = DEFAULT_CONTEXT_HANDLE; 246 247 ctx->file_priv = file_priv; 248 ctx->user_handle = ret; 249 /* NB: Mark all slices as needing a remap so that when the context first 250 * loads it will restore whatever remap state already exists. If there 251 * is no remap info, it will be a NOP. */ 252 ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1; 253 254 ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD; 255 256 return ctx; 257 258 err_out: 259 i915_gem_context_unreference(ctx); 260 return ERR_PTR(ret); 261 } 262 263 /** 264 * The default context needs to exist per ring that uses contexts. It stores the 265 * context state of the GPU for applications that don't utilize HW contexts, as 266 * well as an idle case. 267 */ 268 static struct intel_context * 269 i915_gem_create_context(struct drm_device *dev, 270 struct drm_i915_file_private *file_priv) 271 { 272 const bool is_global_default_ctx = file_priv == NULL; 273 struct intel_context *ctx; 274 int ret = 0; 275 276 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 277 278 ctx = __create_hw_context(dev, file_priv); 279 if (IS_ERR(ctx)) 280 return ctx; 281 282 if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) { 283 /* We may need to do things with the shrinker which 284 * require us to immediately switch back to the default 285 * context. This can cause a problem as pinning the 286 * default context also requires GTT space which may not 287 * be available. To avoid this we always pin the default 288 * context. 289 */ 290 ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state, 291 get_context_alignment(dev), 0); 292 if (ret) { 293 DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret); 294 goto err_destroy; 295 } 296 } 297 298 if (USES_FULL_PPGTT(dev)) { 299 struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv); 300 301 if (IS_ERR_OR_NULL(ppgtt)) { 302 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", 303 PTR_ERR(ppgtt)); 304 ret = PTR_ERR(ppgtt); 305 goto err_unpin; 306 } 307 308 ctx->ppgtt = ppgtt; 309 } 310 311 trace_i915_context_create(ctx); 312 313 return ctx; 314 315 err_unpin: 316 if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) 317 i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state); 318 err_destroy: 319 idr_remove(&file_priv->context_idr, ctx->user_handle); 320 i915_gem_context_unreference(ctx); 321 return ERR_PTR(ret); 322 } 323 324 void i915_gem_context_reset(struct drm_device *dev) 325 { 326 struct drm_i915_private *dev_priv = dev->dev_private; 327 int i; 328 329 if (i915.enable_execlists) { 330 struct intel_context *ctx; 331 332 list_for_each_entry(ctx, &dev_priv->context_list, link) { 333 intel_lr_context_reset(dev, ctx); 334 } 335 336 return; 337 } 338 339 for (i = 0; i < I915_NUM_RINGS; i++) { 340 struct intel_engine_cs *ring = &dev_priv->ring[i]; 341 struct intel_context *lctx = ring->last_context; 342 343 if (lctx) { 344 if (lctx->legacy_hw_ctx.rcs_state && i == RCS) 345 i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state); 346 347 i915_gem_context_unreference(lctx); 348 ring->last_context = NULL; 349 } 350 351 /* Force the GPU state to be reinitialised on enabling */ 352 if (ring->default_context) 353 ring->default_context->legacy_hw_ctx.initialized = false; 354 } 355 } 356 357 int i915_gem_context_init(struct drm_device *dev) 358 { 359 struct drm_i915_private *dev_priv = dev->dev_private; 360 struct intel_context *ctx; 361 int i; 362 363 /* Init should only be called once per module load. Eventually the 364 * restriction on the context_disabled check can be loosened. */ 365 if (WARN_ON(dev_priv->ring[RCS].default_context)) 366 return 0; 367 368 if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) { 369 if (!i915.enable_execlists) { 370 DRM_INFO("Only EXECLIST mode is supported in vgpu.\n"); 371 return -EINVAL; 372 } 373 } 374 375 if (i915.enable_execlists) { 376 /* NB: intentionally left blank. We will allocate our own 377 * backing objects as we need them, thank you very much */ 378 dev_priv->hw_context_size = 0; 379 } else if (HAS_HW_CONTEXTS(dev)) { 380 dev_priv->hw_context_size = round_up(get_context_size(dev), 4096); 381 if (dev_priv->hw_context_size > (1<<20)) { 382 DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n", 383 dev_priv->hw_context_size); 384 dev_priv->hw_context_size = 0; 385 } 386 } 387 388 ctx = i915_gem_create_context(dev, NULL); 389 if (IS_ERR(ctx)) { 390 DRM_ERROR("Failed to create default global context (error %ld)\n", 391 PTR_ERR(ctx)); 392 return PTR_ERR(ctx); 393 } 394 395 for (i = 0; i < I915_NUM_RINGS; i++) { 396 struct intel_engine_cs *ring = &dev_priv->ring[i]; 397 398 /* NB: RCS will hold a ref for all rings */ 399 ring->default_context = ctx; 400 } 401 402 DRM_DEBUG_DRIVER("%s context support initialized\n", 403 i915.enable_execlists ? "LR" : 404 dev_priv->hw_context_size ? "HW" : "fake"); 405 return 0; 406 } 407 408 void i915_gem_context_fini(struct drm_device *dev) 409 { 410 struct drm_i915_private *dev_priv = dev->dev_private; 411 struct intel_context *dctx = dev_priv->ring[RCS].default_context; 412 int i; 413 414 if (dctx->legacy_hw_ctx.rcs_state) { 415 /* The only known way to stop the gpu from accessing the hw context is 416 * to reset it. Do this as the very last operation to avoid confusing 417 * other code, leading to spurious errors. */ 418 intel_gpu_reset(dev); 419 420 /* When default context is created and switched to, base object refcount 421 * will be 2 (+1 from object creation and +1 from do_switch()). 422 * i915_gem_context_fini() will be called after gpu_idle() has switched 423 * to default context. So we need to unreference the base object once 424 * to offset the do_switch part, so that i915_gem_context_unreference() 425 * can then free the base object correctly. */ 426 WARN_ON(!dev_priv->ring[RCS].last_context); 427 if (dev_priv->ring[RCS].last_context == dctx) { 428 /* Fake switch to NULL context */ 429 WARN_ON(dctx->legacy_hw_ctx.rcs_state->active); 430 i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state); 431 i915_gem_context_unreference(dctx); 432 dev_priv->ring[RCS].last_context = NULL; 433 } 434 435 i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state); 436 } 437 438 for (i = 0; i < I915_NUM_RINGS; i++) { 439 struct intel_engine_cs *ring = &dev_priv->ring[i]; 440 441 if (ring->last_context) 442 i915_gem_context_unreference(ring->last_context); 443 444 ring->default_context = NULL; 445 ring->last_context = NULL; 446 } 447 448 i915_gem_context_unreference(dctx); 449 } 450 451 int i915_gem_context_enable(struct drm_i915_gem_request *req) 452 { 453 struct intel_engine_cs *ring = req->ring; 454 int ret; 455 456 if (i915.enable_execlists) { 457 if (ring->init_context == NULL) 458 return 0; 459 460 ret = ring->init_context(req); 461 } else 462 ret = i915_switch_context(req); 463 464 if (ret) { 465 DRM_ERROR("ring init context: %d\n", ret); 466 return ret; 467 } 468 469 return 0; 470 } 471 472 static int context_idr_cleanup(int id, void *p, void *data) 473 { 474 struct intel_context *ctx = p; 475 476 i915_gem_context_unreference(ctx); 477 return 0; 478 } 479 480 int i915_gem_context_open(struct drm_device *dev, struct drm_file *file) 481 { 482 struct drm_i915_file_private *file_priv = file->driver_priv; 483 struct intel_context *ctx; 484 485 idr_init(&file_priv->context_idr); 486 487 mutex_lock(&dev->struct_mutex); 488 ctx = i915_gem_create_context(dev, file_priv); 489 mutex_unlock(&dev->struct_mutex); 490 491 if (IS_ERR(ctx)) { 492 idr_destroy(&file_priv->context_idr); 493 return PTR_ERR(ctx); 494 } 495 496 return 0; 497 } 498 499 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) 500 { 501 struct drm_i915_file_private *file_priv = file->driver_priv; 502 503 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); 504 idr_destroy(&file_priv->context_idr); 505 } 506 507 struct intel_context * 508 i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id) 509 { 510 struct intel_context *ctx; 511 512 ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id); 513 if (!ctx) 514 return ERR_PTR(-ENOENT); 515 516 return ctx; 517 } 518 519 static inline int 520 mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) 521 { 522 struct intel_engine_cs *ring = req->ring; 523 u32 flags = hw_flags | MI_MM_SPACE_GTT; 524 const int num_rings = 525 /* Use an extended w/a on ivb+ if signalling from other rings */ 526 i915_semaphore_is_enabled(ring->dev) ? 527 hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 : 528 0; 529 int len, i, ret; 530 531 /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB 532 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value 533 * explicitly, so we rely on the value at ring init, stored in 534 * itlb_before_ctx_switch. 535 */ 536 if (IS_GEN6(ring->dev)) { 537 ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0); 538 if (ret) 539 return ret; 540 } 541 542 /* These flags are for resource streamer on HSW+ */ 543 if (IS_HASWELL(ring->dev) || INTEL_INFO(ring->dev)->gen >= 8) 544 flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN); 545 else if (INTEL_INFO(ring->dev)->gen < 8) 546 flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN); 547 548 549 len = 4; 550 if (INTEL_INFO(ring->dev)->gen >= 7) 551 len += 2 + (num_rings ? 4*num_rings + 2 : 0); 552 553 ret = intel_ring_begin(req, len); 554 if (ret) 555 return ret; 556 557 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ 558 if (INTEL_INFO(ring->dev)->gen >= 7) { 559 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE); 560 if (num_rings) { 561 struct intel_engine_cs *signaller; 562 563 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings)); 564 for_each_ring(signaller, to_i915(ring->dev), i) { 565 if (signaller == ring) 566 continue; 567 568 intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base)); 569 intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); 570 } 571 } 572 } 573 574 intel_ring_emit(ring, MI_NOOP); 575 intel_ring_emit(ring, MI_SET_CONTEXT); 576 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) | 577 flags); 578 /* 579 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP 580 * WaMiSetContext_Hang:snb,ivb,vlv 581 */ 582 intel_ring_emit(ring, MI_NOOP); 583 584 if (INTEL_INFO(ring->dev)->gen >= 7) { 585 if (num_rings) { 586 struct intel_engine_cs *signaller; 587 588 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings)); 589 for_each_ring(signaller, to_i915(ring->dev), i) { 590 if (signaller == ring) 591 continue; 592 593 intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base)); 594 intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); 595 } 596 } 597 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE); 598 } 599 600 intel_ring_advance(ring); 601 602 return ret; 603 } 604 605 static inline bool should_skip_switch(struct intel_engine_cs *ring, 606 struct intel_context *from, 607 struct intel_context *to) 608 { 609 if (to->remap_slice) 610 return false; 611 612 if (to->ppgtt && from == to && 613 !(intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) 614 return true; 615 616 return false; 617 } 618 619 static bool 620 needs_pd_load_pre(struct intel_engine_cs *ring, struct intel_context *to) 621 { 622 struct drm_i915_private *dev_priv = ring->dev->dev_private; 623 624 if (!to->ppgtt) 625 return false; 626 627 if (INTEL_INFO(ring->dev)->gen < 8) 628 return true; 629 630 if (ring != &dev_priv->ring[RCS]) 631 return true; 632 633 return false; 634 } 635 636 static bool 637 needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to, 638 u32 hw_flags) 639 { 640 struct drm_i915_private *dev_priv = ring->dev->dev_private; 641 642 if (!to->ppgtt) 643 return false; 644 645 if (!IS_GEN8(ring->dev)) 646 return false; 647 648 if (ring != &dev_priv->ring[RCS]) 649 return false; 650 651 if (hw_flags & MI_RESTORE_INHIBIT) 652 return true; 653 654 return false; 655 } 656 657 static int do_switch(struct drm_i915_gem_request *req) 658 { 659 struct intel_context *to = req->ctx; 660 struct intel_engine_cs *ring = req->ring; 661 struct drm_i915_private *dev_priv = ring->dev->dev_private; 662 struct intel_context *from = ring->last_context; 663 u32 hw_flags = 0; 664 bool uninitialized = false; 665 int ret, i; 666 667 if (from != NULL && ring == &dev_priv->ring[RCS]) { 668 BUG_ON(from->legacy_hw_ctx.rcs_state == NULL); 669 BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state)); 670 } 671 672 if (should_skip_switch(ring, from, to)) 673 return 0; 674 675 /* Trying to pin first makes error handling easier. */ 676 if (ring == &dev_priv->ring[RCS]) { 677 ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state, 678 get_context_alignment(ring->dev), 0); 679 if (ret) 680 return ret; 681 } 682 683 /* 684 * Pin can switch back to the default context if we end up calling into 685 * evict_everything - as a last ditch gtt defrag effort that also 686 * switches to the default context. Hence we need to reload from here. 687 */ 688 from = ring->last_context; 689 690 if (needs_pd_load_pre(ring, to)) { 691 /* Older GENs and non render rings still want the load first, 692 * "PP_DCLV followed by PP_DIR_BASE register through Load 693 * Register Immediate commands in Ring Buffer before submitting 694 * a context."*/ 695 trace_switch_mm(ring, to); 696 ret = to->ppgtt->switch_mm(to->ppgtt, req); 697 if (ret) 698 goto unpin_out; 699 700 /* Doing a PD load always reloads the page dirs */ 701 to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring); 702 } 703 704 if (ring != &dev_priv->ring[RCS]) { 705 if (from) 706 i915_gem_context_unreference(from); 707 goto done; 708 } 709 710 /* 711 * Clear this page out of any CPU caches for coherent swap-in/out. Note 712 * that thanks to write = false in this call and us not setting any gpu 713 * write domains when putting a context object onto the active list 714 * (when switching away from it), this won't block. 715 * 716 * XXX: We need a real interface to do this instead of trickery. 717 */ 718 ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false); 719 if (ret) 720 goto unpin_out; 721 722 if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) { 723 hw_flags |= MI_RESTORE_INHIBIT; 724 /* NB: If we inhibit the restore, the context is not allowed to 725 * die because future work may end up depending on valid address 726 * space. This means we must enforce that a page table load 727 * occur when this occurs. */ 728 } else if (to->ppgtt && 729 (intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) { 730 hw_flags |= MI_FORCE_RESTORE; 731 to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring); 732 } 733 734 /* We should never emit switch_mm more than once */ 735 WARN_ON(needs_pd_load_pre(ring, to) && 736 needs_pd_load_post(ring, to, hw_flags)); 737 738 ret = mi_set_context(req, hw_flags); 739 if (ret) 740 goto unpin_out; 741 742 /* GEN8 does *not* require an explicit reload if the PDPs have been 743 * setup, and we do not wish to move them. 744 */ 745 if (needs_pd_load_post(ring, to, hw_flags)) { 746 trace_switch_mm(ring, to); 747 ret = to->ppgtt->switch_mm(to->ppgtt, req); 748 /* The hardware context switch is emitted, but we haven't 749 * actually changed the state - so it's probably safe to bail 750 * here. Still, let the user know something dangerous has 751 * happened. 752 */ 753 if (ret) { 754 DRM_ERROR("Failed to change address space on context switch\n"); 755 goto unpin_out; 756 } 757 } 758 759 for (i = 0; i < MAX_L3_SLICES; i++) { 760 if (!(to->remap_slice & (1<<i))) 761 continue; 762 763 ret = i915_gem_l3_remap(req, i); 764 /* If it failed, try again next round */ 765 if (ret) 766 DRM_DEBUG_DRIVER("L3 remapping failed\n"); 767 else 768 to->remap_slice &= ~(1<<i); 769 } 770 771 /* The backing object for the context is done after switching to the 772 * *next* context. Therefore we cannot retire the previous context until 773 * the next context has already started running. In fact, the below code 774 * is a bit suboptimal because the retiring can occur simply after the 775 * MI_SET_CONTEXT instead of when the next seqno has completed. 776 */ 777 if (from != NULL) { 778 from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; 779 i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req); 780 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the 781 * whole damn pipeline, we don't need to explicitly mark the 782 * object dirty. The only exception is that the context must be 783 * correct in case the object gets swapped out. Ideally we'd be 784 * able to defer doing this until we know the object would be 785 * swapped, but there is no way to do that yet. 786 */ 787 from->legacy_hw_ctx.rcs_state->dirty = 1; 788 789 /* obj is kept alive until the next request by its active ref */ 790 i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state); 791 i915_gem_context_unreference(from); 792 } 793 794 uninitialized = !to->legacy_hw_ctx.initialized; 795 to->legacy_hw_ctx.initialized = true; 796 797 done: 798 i915_gem_context_reference(to); 799 ring->last_context = to; 800 801 if (uninitialized) { 802 if (ring->init_context) { 803 ret = ring->init_context(req); 804 if (ret) 805 DRM_ERROR("ring init context: %d\n", ret); 806 } 807 } 808 809 return 0; 810 811 unpin_out: 812 if (ring->id == RCS) 813 i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state); 814 return ret; 815 } 816 817 /** 818 * i915_switch_context() - perform a GPU context switch. 819 * @req: request for which we'll execute the context switch 820 * 821 * The context life cycle is simple. The context refcount is incremented and 822 * decremented by 1 and create and destroy. If the context is in use by the GPU, 823 * it will have a refcount > 1. This allows us to destroy the context abstract 824 * object while letting the normal object tracking destroy the backing BO. 825 * 826 * This function should not be used in execlists mode. Instead the context is 827 * switched by writing to the ELSP and requests keep a reference to their 828 * context. 829 */ 830 int i915_switch_context(struct drm_i915_gem_request *req) 831 { 832 struct intel_engine_cs *ring = req->ring; 833 struct drm_i915_private *dev_priv = ring->dev->dev_private; 834 835 WARN_ON(i915.enable_execlists); 836 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); 837 838 if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */ 839 if (req->ctx != ring->last_context) { 840 i915_gem_context_reference(req->ctx); 841 if (ring->last_context) 842 i915_gem_context_unreference(ring->last_context); 843 ring->last_context = req->ctx; 844 } 845 return 0; 846 } 847 848 return do_switch(req); 849 } 850 851 static bool contexts_enabled(struct drm_device *dev) 852 { 853 return i915.enable_execlists || to_i915(dev)->hw_context_size; 854 } 855 856 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 857 struct drm_file *file) 858 { 859 struct drm_i915_gem_context_create *args = data; 860 struct drm_i915_file_private *file_priv = file->driver_priv; 861 struct intel_context *ctx; 862 int ret; 863 864 if (!contexts_enabled(dev)) 865 return -ENODEV; 866 867 ret = i915_mutex_lock_interruptible(dev); 868 if (ret) 869 return ret; 870 871 ctx = i915_gem_create_context(dev, file_priv); 872 mutex_unlock(&dev->struct_mutex); 873 if (IS_ERR(ctx)) 874 return PTR_ERR(ctx); 875 876 args->ctx_id = ctx->user_handle; 877 DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id); 878 879 return 0; 880 } 881 882 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 883 struct drm_file *file) 884 { 885 struct drm_i915_gem_context_destroy *args = data; 886 struct drm_i915_file_private *file_priv = file->driver_priv; 887 struct intel_context *ctx; 888 int ret; 889 890 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE) 891 return -ENOENT; 892 893 ret = i915_mutex_lock_interruptible(dev); 894 if (ret) 895 return ret; 896 897 ctx = i915_gem_context_get(file_priv, args->ctx_id); 898 if (IS_ERR(ctx)) { 899 mutex_unlock(&dev->struct_mutex); 900 return PTR_ERR(ctx); 901 } 902 903 idr_remove(&ctx->file_priv->context_idr, ctx->user_handle); 904 i915_gem_context_unreference(ctx); 905 mutex_unlock(&dev->struct_mutex); 906 907 DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id); 908 return 0; 909 } 910 911 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, 912 struct drm_file *file) 913 { 914 struct drm_i915_file_private *file_priv = file->driver_priv; 915 struct drm_i915_gem_context_param *args = data; 916 struct intel_context *ctx; 917 int ret; 918 919 ret = i915_mutex_lock_interruptible(dev); 920 if (ret) 921 return ret; 922 923 ctx = i915_gem_context_get(file_priv, args->ctx_id); 924 if (IS_ERR(ctx)) { 925 mutex_unlock(&dev->struct_mutex); 926 return PTR_ERR(ctx); 927 } 928 929 args->size = 0; 930 switch (args->param) { 931 case I915_CONTEXT_PARAM_BAN_PERIOD: 932 args->value = ctx->hang_stats.ban_period_seconds; 933 break; 934 case I915_CONTEXT_PARAM_NO_ZEROMAP: 935 args->value = ctx->flags & CONTEXT_NO_ZEROMAP; 936 break; 937 case I915_CONTEXT_PARAM_GTT_SIZE: 938 if (ctx->ppgtt) 939 args->value = ctx->ppgtt->base.total; 940 else if (to_i915(dev)->mm.aliasing_ppgtt) 941 args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total; 942 else 943 args->value = to_i915(dev)->gtt.base.total; 944 break; 945 default: 946 ret = -EINVAL; 947 break; 948 } 949 mutex_unlock(&dev->struct_mutex); 950 951 return ret; 952 } 953 954 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, 955 struct drm_file *file) 956 { 957 struct drm_i915_file_private *file_priv = file->driver_priv; 958 struct drm_i915_gem_context_param *args = data; 959 struct intel_context *ctx; 960 int ret; 961 962 ret = i915_mutex_lock_interruptible(dev); 963 if (ret) 964 return ret; 965 966 ctx = i915_gem_context_get(file_priv, args->ctx_id); 967 if (IS_ERR(ctx)) { 968 mutex_unlock(&dev->struct_mutex); 969 return PTR_ERR(ctx); 970 } 971 972 switch (args->param) { 973 case I915_CONTEXT_PARAM_BAN_PERIOD: 974 if (args->size) 975 ret = -EINVAL; 976 else if (args->value < ctx->hang_stats.ban_period_seconds && 977 !capable(CAP_SYS_ADMIN)) 978 ret = -EPERM; 979 else 980 ctx->hang_stats.ban_period_seconds = args->value; 981 break; 982 case I915_CONTEXT_PARAM_NO_ZEROMAP: 983 if (args->size) { 984 ret = -EINVAL; 985 } else { 986 ctx->flags &= ~CONTEXT_NO_ZEROMAP; 987 ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0; 988 } 989 break; 990 default: 991 ret = -EINVAL; 992 break; 993 } 994 mutex_unlock(&dev->struct_mutex); 995 996 return ret; 997 } 998