1 /* 2 * Copyright © 2011-2012 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Ben Widawsky <ben@bwidawsk.net> 25 * 26 */ 27 28 /* 29 * This file implements HW context support. On gen5+ a HW context consists of an 30 * opaque GPU object which is referenced at times of context saves and restores. 31 * With RC6 enabled, the context is also referenced as the GPU enters and exists 32 * from RC6 (GPU has it's own internal power context, except on gen5). Though 33 * something like a context does exist for the media ring, the code only 34 * supports contexts for the render ring. 35 * 36 * In software, there is a distinction between contexts created by the user, 37 * and the default HW context. The default HW context is used by GPU clients 38 * that do not request setup of their own hardware context. The default 39 * context's state is never restored to help prevent programming errors. This 40 * would happen if a client ran and piggy-backed off another clients GPU state. 41 * The default context only exists to give the GPU some offset to load as the 42 * current to invoke a save of the context we actually care about. In fact, the 43 * code could likely be constructed, albeit in a more complicated fashion, to 44 * never use the default context, though that limits the driver's ability to 45 * swap out, and/or destroy other contexts. 46 * 47 * All other contexts are created as a request by the GPU client. These contexts 48 * store GPU state, and thus allow GPU clients to not re-emit state (and 49 * potentially query certain state) at any time. The kernel driver makes 50 * certain that the appropriate commands are inserted. 51 * 52 * The context life cycle is semi-complicated in that context BOs may live 53 * longer than the context itself because of the way the hardware, and object 54 * tracking works. Below is a very crude representation of the state machine 55 * describing the context life. 56 * refcount pincount active 57 * S0: initial state 0 0 0 58 * S1: context created 1 0 0 59 * S2: context is currently running 2 1 X 60 * S3: GPU referenced, but not current 2 0 1 61 * S4: context is current, but destroyed 1 1 0 62 * S5: like S3, but destroyed 1 0 1 63 * 64 * The most common (but not all) transitions: 65 * S0->S1: client creates a context 66 * S1->S2: client submits execbuf with context 67 * S2->S3: other clients submits execbuf with context 68 * S3->S1: context object was retired 69 * S3->S2: clients submits another execbuf 70 * S2->S4: context destroy called with current context 71 * S3->S5->S0: destroy path 72 * S4->S5->S0: destroy path on current context 73 * 74 * There are two confusing terms used above: 75 * The "current context" means the context which is currently running on the 76 * GPU. The GPU has loaded its state already and has stored away the gtt 77 * offset of the BO. The GPU is not actively referencing the data at this 78 * offset, but it will on the next context switch. The only way to avoid this 79 * is to do a GPU reset. 80 * 81 * An "active context' is one which was previously the "current context" and is 82 * on the active list waiting for the next context switch to occur. Until this 83 * happens, the object must remain at the same gtt offset. It is therefore 84 * possible to destroy a context, but it is still active. 85 * 86 */ 87 88 #include <drm/drmP.h> 89 #include <drm/i915_drm.h> 90 #include "i915_drv.h" 91 #include "i915_trace.h" 92 93 /* This is a HW constraint. The value below is the largest known requirement 94 * I've seen in a spec to date, and that was a workaround for a non-shipping 95 * part. It should be safe to decrease this, but it's more future proof as is. 96 */ 97 #define GEN6_CONTEXT_ALIGN (64<<10) 98 #define GEN7_CONTEXT_ALIGN 4096 99 100 static size_t get_context_alignment(struct drm_device *dev) 101 { 102 if (IS_GEN6(dev)) 103 return GEN6_CONTEXT_ALIGN; 104 105 return GEN7_CONTEXT_ALIGN; 106 } 107 108 static int get_context_size(struct drm_device *dev) 109 { 110 struct drm_i915_private *dev_priv = dev->dev_private; 111 int ret; 112 u32 reg; 113 114 switch (INTEL_INFO(dev)->gen) { 115 case 6: 116 reg = I915_READ(CXT_SIZE); 117 ret = GEN6_CXT_TOTAL_SIZE(reg) * 64; 118 break; 119 case 7: 120 reg = I915_READ(GEN7_CXT_SIZE); 121 if (IS_HASWELL(dev)) 122 ret = HSW_CXT_TOTAL_SIZE; 123 else 124 ret = GEN7_CXT_TOTAL_SIZE(reg) * 64; 125 break; 126 case 8: 127 ret = GEN8_CXT_TOTAL_SIZE; 128 break; 129 default: 130 BUG(); 131 } 132 133 return ret; 134 } 135 136 static void i915_gem_context_clean(struct intel_context *ctx) 137 { 138 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; 139 struct i915_vma *vma, *next; 140 141 if (!ppgtt) 142 return; 143 144 list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list, 145 vm_link) { 146 if (WARN_ON(__i915_vma_unbind_no_wait(vma))) 147 break; 148 } 149 } 150 151 void i915_gem_context_free(struct kref *ctx_ref) 152 { 153 struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref); 154 155 trace_i915_context_free(ctx); 156 157 if (i915.enable_execlists) 158 intel_lr_context_free(ctx); 159 160 /* 161 * This context is going away and we need to remove all VMAs still 162 * around. This is to handle imported shared objects for which 163 * destructor did not run when their handles were closed. 164 */ 165 i915_gem_context_clean(ctx); 166 167 i915_ppgtt_put(ctx->ppgtt); 168 169 if (ctx->legacy_hw_ctx.rcs_state) 170 drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base); 171 list_del(&ctx->link); 172 kfree(ctx); 173 } 174 175 struct drm_i915_gem_object * 176 i915_gem_alloc_context_obj(struct drm_device *dev, size_t size) 177 { 178 struct drm_i915_gem_object *obj; 179 int ret; 180 181 obj = i915_gem_alloc_object(dev, size); 182 if (obj == NULL) 183 return ERR_PTR(-ENOMEM); 184 185 /* 186 * Try to make the context utilize L3 as well as LLC. 187 * 188 * On VLV we don't have L3 controls in the PTEs so we 189 * shouldn't touch the cache level, especially as that 190 * would make the object snooped which might have a 191 * negative performance impact. 192 * 193 * Snooping is required on non-llc platforms in execlist 194 * mode, but since all GGTT accesses use PAT entry 0 we 195 * get snooping anyway regardless of cache_level. 196 * 197 * This is only applicable for Ivy Bridge devices since 198 * later platforms don't have L3 control bits in the PTE. 199 */ 200 if (IS_IVYBRIDGE(dev)) { 201 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC); 202 /* Failure shouldn't ever happen this early */ 203 if (WARN_ON(ret)) { 204 drm_gem_object_unreference(&obj->base); 205 return ERR_PTR(ret); 206 } 207 } 208 209 return obj; 210 } 211 212 static struct intel_context * 213 __create_hw_context(struct drm_device *dev, 214 struct drm_i915_file_private *file_priv) 215 { 216 struct drm_i915_private *dev_priv = dev->dev_private; 217 struct intel_context *ctx; 218 int ret; 219 220 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 221 if (ctx == NULL) 222 return ERR_PTR(-ENOMEM); 223 224 kref_init(&ctx->ref); 225 list_add_tail(&ctx->link, &dev_priv->context_list); 226 ctx->i915 = dev_priv; 227 228 if (dev_priv->hw_context_size) { 229 struct drm_i915_gem_object *obj = 230 i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size); 231 if (IS_ERR(obj)) { 232 ret = PTR_ERR(obj); 233 goto err_out; 234 } 235 ctx->legacy_hw_ctx.rcs_state = obj; 236 } 237 238 /* Default context will never have a file_priv */ 239 if (file_priv != NULL) { 240 ret = idr_alloc(&file_priv->context_idr, ctx, 241 DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL); 242 if (ret < 0) 243 goto err_out; 244 } else 245 ret = DEFAULT_CONTEXT_HANDLE; 246 247 ctx->file_priv = file_priv; 248 ctx->user_handle = ret; 249 /* NB: Mark all slices as needing a remap so that when the context first 250 * loads it will restore whatever remap state already exists. If there 251 * is no remap info, it will be a NOP. */ 252 ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1; 253 254 ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD; 255 256 return ctx; 257 258 err_out: 259 i915_gem_context_unreference(ctx); 260 return ERR_PTR(ret); 261 } 262 263 /** 264 * The default context needs to exist per ring that uses contexts. It stores the 265 * context state of the GPU for applications that don't utilize HW contexts, as 266 * well as an idle case. 267 */ 268 static struct intel_context * 269 i915_gem_create_context(struct drm_device *dev, 270 struct drm_i915_file_private *file_priv) 271 { 272 const bool is_global_default_ctx = file_priv == NULL; 273 struct intel_context *ctx; 274 int ret = 0; 275 276 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 277 278 ctx = __create_hw_context(dev, file_priv); 279 if (IS_ERR(ctx)) 280 return ctx; 281 282 if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) { 283 /* We may need to do things with the shrinker which 284 * require us to immediately switch back to the default 285 * context. This can cause a problem as pinning the 286 * default context also requires GTT space which may not 287 * be available. To avoid this we always pin the default 288 * context. 289 */ 290 ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state, 291 get_context_alignment(dev), 0); 292 if (ret) { 293 DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret); 294 goto err_destroy; 295 } 296 } 297 298 if (USES_FULL_PPGTT(dev)) { 299 struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv); 300 301 if (IS_ERR_OR_NULL(ppgtt)) { 302 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", 303 PTR_ERR(ppgtt)); 304 ret = PTR_ERR(ppgtt); 305 goto err_unpin; 306 } 307 308 ctx->ppgtt = ppgtt; 309 } 310 311 trace_i915_context_create(ctx); 312 313 return ctx; 314 315 err_unpin: 316 if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) 317 i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state); 318 err_destroy: 319 idr_remove(&file_priv->context_idr, ctx->user_handle); 320 i915_gem_context_unreference(ctx); 321 return ERR_PTR(ret); 322 } 323 324 static void i915_gem_context_unpin(struct intel_context *ctx, 325 struct intel_engine_cs *engine) 326 { 327 if (i915.enable_execlists) { 328 intel_lr_context_unpin(ctx, engine); 329 } else { 330 if (engine->id == RCS && ctx->legacy_hw_ctx.rcs_state) 331 i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state); 332 i915_gem_context_unreference(ctx); 333 } 334 } 335 336 void i915_gem_context_reset(struct drm_device *dev) 337 { 338 struct drm_i915_private *dev_priv = dev->dev_private; 339 int i; 340 341 if (i915.enable_execlists) { 342 struct intel_context *ctx; 343 344 list_for_each_entry(ctx, &dev_priv->context_list, link) 345 intel_lr_context_reset(dev_priv, ctx); 346 } 347 348 for (i = 0; i < I915_NUM_ENGINES; i++) { 349 struct intel_engine_cs *engine = &dev_priv->engine[i]; 350 351 if (engine->last_context) { 352 i915_gem_context_unpin(engine->last_context, engine); 353 engine->last_context = NULL; 354 } 355 } 356 357 /* Force the GPU state to be reinitialised on enabling */ 358 dev_priv->kernel_context->legacy_hw_ctx.initialized = false; 359 } 360 361 int i915_gem_context_init(struct drm_device *dev) 362 { 363 struct drm_i915_private *dev_priv = dev->dev_private; 364 struct intel_context *ctx; 365 366 /* Init should only be called once per module load. Eventually the 367 * restriction on the context_disabled check can be loosened. */ 368 if (WARN_ON(dev_priv->kernel_context)) 369 return 0; 370 371 if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) { 372 if (!i915.enable_execlists) { 373 DRM_INFO("Only EXECLIST mode is supported in vgpu.\n"); 374 return -EINVAL; 375 } 376 } 377 378 if (i915.enable_execlists) { 379 /* NB: intentionally left blank. We will allocate our own 380 * backing objects as we need them, thank you very much */ 381 dev_priv->hw_context_size = 0; 382 } else if (HAS_HW_CONTEXTS(dev)) { 383 dev_priv->hw_context_size = round_up(get_context_size(dev), 4096); 384 if (dev_priv->hw_context_size > (1<<20)) { 385 DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n", 386 dev_priv->hw_context_size); 387 dev_priv->hw_context_size = 0; 388 } 389 } 390 391 ctx = i915_gem_create_context(dev, NULL); 392 if (IS_ERR(ctx)) { 393 DRM_ERROR("Failed to create default global context (error %ld)\n", 394 PTR_ERR(ctx)); 395 return PTR_ERR(ctx); 396 } 397 398 dev_priv->kernel_context = ctx; 399 400 DRM_DEBUG_DRIVER("%s context support initialized\n", 401 i915.enable_execlists ? "LR" : 402 dev_priv->hw_context_size ? "HW" : "fake"); 403 return 0; 404 } 405 406 void i915_gem_context_fini(struct drm_device *dev) 407 { 408 struct drm_i915_private *dev_priv = dev->dev_private; 409 struct intel_context *dctx = dev_priv->kernel_context; 410 int i; 411 412 if (dctx->legacy_hw_ctx.rcs_state) { 413 /* The only known way to stop the gpu from accessing the hw context is 414 * to reset it. Do this as the very last operation to avoid confusing 415 * other code, leading to spurious errors. */ 416 intel_gpu_reset(dev, ALL_ENGINES); 417 418 /* When default context is created and switched to, base object refcount 419 * will be 2 (+1 from object creation and +1 from do_switch()). 420 * i915_gem_context_fini() will be called after gpu_idle() has switched 421 * to default context. So we need to unreference the base object once 422 * to offset the do_switch part, so that i915_gem_context_unreference() 423 * can then free the base object correctly. */ 424 WARN_ON(!dev_priv->engine[RCS].last_context); 425 426 i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state); 427 } 428 429 for (i = I915_NUM_ENGINES; --i >= 0;) { 430 struct intel_engine_cs *engine = &dev_priv->engine[i]; 431 432 if (engine->last_context) { 433 i915_gem_context_unpin(engine->last_context, engine); 434 engine->last_context = NULL; 435 } 436 } 437 438 i915_gem_context_unreference(dctx); 439 dev_priv->kernel_context = NULL; 440 } 441 442 int i915_gem_context_enable(struct drm_i915_gem_request *req) 443 { 444 struct intel_engine_cs *engine = req->engine; 445 int ret; 446 447 if (i915.enable_execlists) { 448 if (engine->init_context == NULL) 449 return 0; 450 451 ret = engine->init_context(req); 452 } else 453 ret = i915_switch_context(req); 454 455 if (ret) { 456 DRM_ERROR("ring init context: %d\n", ret); 457 return ret; 458 } 459 460 return 0; 461 } 462 463 static int context_idr_cleanup(int id, void *p, void *data) 464 { 465 struct intel_context *ctx = p; 466 467 i915_gem_context_unreference(ctx); 468 return 0; 469 } 470 471 int i915_gem_context_open(struct drm_device *dev, struct drm_file *file) 472 { 473 struct drm_i915_file_private *file_priv = file->driver_priv; 474 struct intel_context *ctx; 475 476 idr_init(&file_priv->context_idr); 477 478 mutex_lock(&dev->struct_mutex); 479 ctx = i915_gem_create_context(dev, file_priv); 480 mutex_unlock(&dev->struct_mutex); 481 482 if (IS_ERR(ctx)) { 483 idr_destroy(&file_priv->context_idr); 484 return PTR_ERR(ctx); 485 } 486 487 return 0; 488 } 489 490 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) 491 { 492 struct drm_i915_file_private *file_priv = file->driver_priv; 493 494 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); 495 idr_destroy(&file_priv->context_idr); 496 } 497 498 struct intel_context * 499 i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id) 500 { 501 struct intel_context *ctx; 502 503 ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id); 504 if (!ctx) 505 return ERR_PTR(-ENOENT); 506 507 return ctx; 508 } 509 510 static inline int 511 mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) 512 { 513 struct intel_engine_cs *engine = req->engine; 514 u32 flags = hw_flags | MI_MM_SPACE_GTT; 515 const int num_rings = 516 /* Use an extended w/a on ivb+ if signalling from other rings */ 517 i915_semaphore_is_enabled(engine->dev) ? 518 hweight32(INTEL_INFO(engine->dev)->ring_mask) - 1 : 519 0; 520 int len, ret; 521 522 /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB 523 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value 524 * explicitly, so we rely on the value at ring init, stored in 525 * itlb_before_ctx_switch. 526 */ 527 if (IS_GEN6(engine->dev)) { 528 ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0); 529 if (ret) 530 return ret; 531 } 532 533 /* These flags are for resource streamer on HSW+ */ 534 if (IS_HASWELL(engine->dev) || INTEL_INFO(engine->dev)->gen >= 8) 535 flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN); 536 else if (INTEL_INFO(engine->dev)->gen < 8) 537 flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN); 538 539 540 len = 4; 541 if (INTEL_INFO(engine->dev)->gen >= 7) 542 len += 2 + (num_rings ? 4*num_rings + 6 : 0); 543 544 ret = intel_ring_begin(req, len); 545 if (ret) 546 return ret; 547 548 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ 549 if (INTEL_INFO(engine->dev)->gen >= 7) { 550 intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE); 551 if (num_rings) { 552 struct intel_engine_cs *signaller; 553 554 intel_ring_emit(engine, 555 MI_LOAD_REGISTER_IMM(num_rings)); 556 for_each_engine(signaller, to_i915(engine->dev)) { 557 if (signaller == engine) 558 continue; 559 560 intel_ring_emit_reg(engine, 561 RING_PSMI_CTL(signaller->mmio_base)); 562 intel_ring_emit(engine, 563 _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); 564 } 565 } 566 } 567 568 intel_ring_emit(engine, MI_NOOP); 569 intel_ring_emit(engine, MI_SET_CONTEXT); 570 intel_ring_emit(engine, 571 i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) | 572 flags); 573 /* 574 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP 575 * WaMiSetContext_Hang:snb,ivb,vlv 576 */ 577 intel_ring_emit(engine, MI_NOOP); 578 579 if (INTEL_INFO(engine->dev)->gen >= 7) { 580 if (num_rings) { 581 struct intel_engine_cs *signaller; 582 i915_reg_t last_reg = {}; /* keep gcc quiet */ 583 584 intel_ring_emit(engine, 585 MI_LOAD_REGISTER_IMM(num_rings)); 586 for_each_engine(signaller, to_i915(engine->dev)) { 587 if (signaller == engine) 588 continue; 589 590 last_reg = RING_PSMI_CTL(signaller->mmio_base); 591 intel_ring_emit_reg(engine, last_reg); 592 intel_ring_emit(engine, 593 _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); 594 } 595 596 /* Insert a delay before the next switch! */ 597 intel_ring_emit(engine, 598 MI_STORE_REGISTER_MEM | 599 MI_SRM_LRM_GLOBAL_GTT); 600 intel_ring_emit_reg(engine, last_reg); 601 intel_ring_emit(engine, engine->scratch.gtt_offset); 602 intel_ring_emit(engine, MI_NOOP); 603 } 604 intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_ENABLE); 605 } 606 607 intel_ring_advance(engine); 608 609 return ret; 610 } 611 612 static inline bool skip_rcs_switch(struct intel_engine_cs *engine, 613 struct intel_context *to) 614 { 615 if (to->remap_slice) 616 return false; 617 618 if (!to->legacy_hw_ctx.initialized) 619 return false; 620 621 if (to->ppgtt && 622 !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)) 623 return false; 624 625 return to == engine->last_context; 626 } 627 628 static bool 629 needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to) 630 { 631 if (!to->ppgtt) 632 return false; 633 634 if (engine->last_context == to && 635 !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)) 636 return false; 637 638 if (engine->id != RCS) 639 return true; 640 641 if (INTEL_INFO(engine->dev)->gen < 8) 642 return true; 643 644 return false; 645 } 646 647 static bool 648 needs_pd_load_post(struct intel_context *to, u32 hw_flags) 649 { 650 if (!to->ppgtt) 651 return false; 652 653 if (!IS_GEN8(to->i915)) 654 return false; 655 656 if (hw_flags & MI_RESTORE_INHIBIT) 657 return true; 658 659 return false; 660 } 661 662 static int do_rcs_switch(struct drm_i915_gem_request *req) 663 { 664 struct intel_context *to = req->ctx; 665 struct intel_engine_cs *engine = req->engine; 666 struct intel_context *from; 667 u32 hw_flags; 668 int ret, i; 669 670 if (skip_rcs_switch(engine, to)) 671 return 0; 672 673 /* Trying to pin first makes error handling easier. */ 674 ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state, 675 get_context_alignment(engine->dev), 676 0); 677 if (ret) 678 return ret; 679 680 /* 681 * Pin can switch back to the default context if we end up calling into 682 * evict_everything - as a last ditch gtt defrag effort that also 683 * switches to the default context. Hence we need to reload from here. 684 * 685 * XXX: Doing so is painfully broken! 686 */ 687 from = engine->last_context; 688 689 /* 690 * Clear this page out of any CPU caches for coherent swap-in/out. Note 691 * that thanks to write = false in this call and us not setting any gpu 692 * write domains when putting a context object onto the active list 693 * (when switching away from it), this won't block. 694 * 695 * XXX: We need a real interface to do this instead of trickery. 696 */ 697 ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false); 698 if (ret) 699 goto unpin_out; 700 701 if (needs_pd_load_pre(engine, to)) { 702 /* Older GENs and non render rings still want the load first, 703 * "PP_DCLV followed by PP_DIR_BASE register through Load 704 * Register Immediate commands in Ring Buffer before submitting 705 * a context."*/ 706 trace_switch_mm(engine, to); 707 ret = to->ppgtt->switch_mm(to->ppgtt, req); 708 if (ret) 709 goto unpin_out; 710 } 711 712 if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) 713 /* NB: If we inhibit the restore, the context is not allowed to 714 * die because future work may end up depending on valid address 715 * space. This means we must enforce that a page table load 716 * occur when this occurs. */ 717 hw_flags = MI_RESTORE_INHIBIT; 718 else if (to->ppgtt && 719 intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings) 720 hw_flags = MI_FORCE_RESTORE; 721 else 722 hw_flags = 0; 723 724 /* We should never emit switch_mm more than once */ 725 WARN_ON(needs_pd_load_pre(engine, to) && 726 needs_pd_load_post(to, hw_flags)); 727 728 if (to != from || (hw_flags & MI_FORCE_RESTORE)) { 729 ret = mi_set_context(req, hw_flags); 730 if (ret) 731 goto unpin_out; 732 } 733 734 /* The backing object for the context is done after switching to the 735 * *next* context. Therefore we cannot retire the previous context until 736 * the next context has already started running. In fact, the below code 737 * is a bit suboptimal because the retiring can occur simply after the 738 * MI_SET_CONTEXT instead of when the next seqno has completed. 739 */ 740 if (from != NULL) { 741 from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; 742 i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req); 743 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the 744 * whole damn pipeline, we don't need to explicitly mark the 745 * object dirty. The only exception is that the context must be 746 * correct in case the object gets swapped out. Ideally we'd be 747 * able to defer doing this until we know the object would be 748 * swapped, but there is no way to do that yet. 749 */ 750 from->legacy_hw_ctx.rcs_state->dirty = 1; 751 752 /* obj is kept alive until the next request by its active ref */ 753 i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state); 754 i915_gem_context_unreference(from); 755 } 756 i915_gem_context_reference(to); 757 engine->last_context = to; 758 759 /* GEN8 does *not* require an explicit reload if the PDPs have been 760 * setup, and we do not wish to move them. 761 */ 762 if (needs_pd_load_post(to, hw_flags)) { 763 trace_switch_mm(engine, to); 764 ret = to->ppgtt->switch_mm(to->ppgtt, req); 765 /* The hardware context switch is emitted, but we haven't 766 * actually changed the state - so it's probably safe to bail 767 * here. Still, let the user know something dangerous has 768 * happened. 769 */ 770 if (ret) 771 return ret; 772 } 773 774 if (to->ppgtt) 775 to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); 776 777 for (i = 0; i < MAX_L3_SLICES; i++) { 778 if (!(to->remap_slice & (1<<i))) 779 continue; 780 781 ret = i915_gem_l3_remap(req, i); 782 if (ret) 783 return ret; 784 785 to->remap_slice &= ~(1<<i); 786 } 787 788 if (!to->legacy_hw_ctx.initialized) { 789 if (engine->init_context) { 790 ret = engine->init_context(req); 791 if (ret) 792 return ret; 793 } 794 to->legacy_hw_ctx.initialized = true; 795 } 796 797 return 0; 798 799 unpin_out: 800 i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state); 801 return ret; 802 } 803 804 /** 805 * i915_switch_context() - perform a GPU context switch. 806 * @req: request for which we'll execute the context switch 807 * 808 * The context life cycle is simple. The context refcount is incremented and 809 * decremented by 1 and create and destroy. If the context is in use by the GPU, 810 * it will have a refcount > 1. This allows us to destroy the context abstract 811 * object while letting the normal object tracking destroy the backing BO. 812 * 813 * This function should not be used in execlists mode. Instead the context is 814 * switched by writing to the ELSP and requests keep a reference to their 815 * context. 816 */ 817 int i915_switch_context(struct drm_i915_gem_request *req) 818 { 819 struct intel_engine_cs *engine = req->engine; 820 struct drm_i915_private *dev_priv = req->i915; 821 822 WARN_ON(i915.enable_execlists); 823 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); 824 825 if (engine->id != RCS || 826 req->ctx->legacy_hw_ctx.rcs_state == NULL) { 827 struct intel_context *to = req->ctx; 828 829 if (needs_pd_load_pre(engine, to)) { 830 int ret; 831 832 trace_switch_mm(engine, to); 833 ret = to->ppgtt->switch_mm(to->ppgtt, req); 834 if (ret) 835 return ret; 836 837 /* Doing a PD load always reloads the page dirs */ 838 to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); 839 } 840 841 if (to != engine->last_context) { 842 i915_gem_context_reference(to); 843 if (engine->last_context) 844 i915_gem_context_unreference(engine->last_context); 845 engine->last_context = to; 846 } 847 848 return 0; 849 } 850 851 return do_rcs_switch(req); 852 } 853 854 static bool contexts_enabled(struct drm_device *dev) 855 { 856 return i915.enable_execlists || to_i915(dev)->hw_context_size; 857 } 858 859 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 860 struct drm_file *file) 861 { 862 struct drm_i915_gem_context_create *args = data; 863 struct drm_i915_file_private *file_priv = file->driver_priv; 864 struct intel_context *ctx; 865 int ret; 866 867 if (!contexts_enabled(dev)) 868 return -ENODEV; 869 870 if (args->pad != 0) 871 return -EINVAL; 872 873 ret = i915_mutex_lock_interruptible(dev); 874 if (ret) 875 return ret; 876 877 ctx = i915_gem_create_context(dev, file_priv); 878 mutex_unlock(&dev->struct_mutex); 879 if (IS_ERR(ctx)) 880 return PTR_ERR(ctx); 881 882 args->ctx_id = ctx->user_handle; 883 DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id); 884 885 return 0; 886 } 887 888 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 889 struct drm_file *file) 890 { 891 struct drm_i915_gem_context_destroy *args = data; 892 struct drm_i915_file_private *file_priv = file->driver_priv; 893 struct intel_context *ctx; 894 int ret; 895 896 if (args->pad != 0) 897 return -EINVAL; 898 899 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE) 900 return -ENOENT; 901 902 ret = i915_mutex_lock_interruptible(dev); 903 if (ret) 904 return ret; 905 906 ctx = i915_gem_context_get(file_priv, args->ctx_id); 907 if (IS_ERR(ctx)) { 908 mutex_unlock(&dev->struct_mutex); 909 return PTR_ERR(ctx); 910 } 911 912 idr_remove(&ctx->file_priv->context_idr, ctx->user_handle); 913 i915_gem_context_unreference(ctx); 914 mutex_unlock(&dev->struct_mutex); 915 916 DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id); 917 return 0; 918 } 919 920 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, 921 struct drm_file *file) 922 { 923 struct drm_i915_file_private *file_priv = file->driver_priv; 924 struct drm_i915_gem_context_param *args = data; 925 struct intel_context *ctx; 926 int ret; 927 928 ret = i915_mutex_lock_interruptible(dev); 929 if (ret) 930 return ret; 931 932 ctx = i915_gem_context_get(file_priv, args->ctx_id); 933 if (IS_ERR(ctx)) { 934 mutex_unlock(&dev->struct_mutex); 935 return PTR_ERR(ctx); 936 } 937 938 args->size = 0; 939 switch (args->param) { 940 case I915_CONTEXT_PARAM_BAN_PERIOD: 941 args->value = ctx->hang_stats.ban_period_seconds; 942 break; 943 case I915_CONTEXT_PARAM_NO_ZEROMAP: 944 args->value = ctx->flags & CONTEXT_NO_ZEROMAP; 945 break; 946 case I915_CONTEXT_PARAM_GTT_SIZE: 947 if (ctx->ppgtt) 948 args->value = ctx->ppgtt->base.total; 949 else if (to_i915(dev)->mm.aliasing_ppgtt) 950 args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total; 951 else 952 args->value = to_i915(dev)->ggtt.base.total; 953 break; 954 default: 955 ret = -EINVAL; 956 break; 957 } 958 mutex_unlock(&dev->struct_mutex); 959 960 return ret; 961 } 962 963 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, 964 struct drm_file *file) 965 { 966 struct drm_i915_file_private *file_priv = file->driver_priv; 967 struct drm_i915_gem_context_param *args = data; 968 struct intel_context *ctx; 969 int ret; 970 971 ret = i915_mutex_lock_interruptible(dev); 972 if (ret) 973 return ret; 974 975 ctx = i915_gem_context_get(file_priv, args->ctx_id); 976 if (IS_ERR(ctx)) { 977 mutex_unlock(&dev->struct_mutex); 978 return PTR_ERR(ctx); 979 } 980 981 switch (args->param) { 982 case I915_CONTEXT_PARAM_BAN_PERIOD: 983 if (args->size) 984 ret = -EINVAL; 985 else if (args->value < ctx->hang_stats.ban_period_seconds && 986 !capable(CAP_SYS_ADMIN)) 987 ret = -EPERM; 988 else 989 ctx->hang_stats.ban_period_seconds = args->value; 990 break; 991 case I915_CONTEXT_PARAM_NO_ZEROMAP: 992 if (args->size) { 993 ret = -EINVAL; 994 } else { 995 ctx->flags &= ~CONTEXT_NO_ZEROMAP; 996 ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0; 997 } 998 break; 999 default: 1000 ret = -EINVAL; 1001 break; 1002 } 1003 mutex_unlock(&dev->struct_mutex); 1004 1005 return ret; 1006 } 1007