1 /* 2 * Copyright © 2011-2012 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Ben Widawsky <ben@bwidawsk.net> 25 * 26 */ 27 28 /* 29 * This file implements HW context support. On gen5+ a HW context consists of an 30 * opaque GPU object which is referenced at times of context saves and restores. 31 * With RC6 enabled, the context is also referenced as the GPU enters and exists 32 * from RC6 (GPU has it's own internal power context, except on gen5). Though 33 * something like a context does exist for the media ring, the code only 34 * supports contexts for the render ring. 35 * 36 * In software, there is a distinction between contexts created by the user, 37 * and the default HW context. The default HW context is used by GPU clients 38 * that do not request setup of their own hardware context. The default 39 * context's state is never restored to help prevent programming errors. This 40 * would happen if a client ran and piggy-backed off another clients GPU state. 41 * The default context only exists to give the GPU some offset to load as the 42 * current to invoke a save of the context we actually care about. In fact, the 43 * code could likely be constructed, albeit in a more complicated fashion, to 44 * never use the default context, though that limits the driver's ability to 45 * swap out, and/or destroy other contexts. 46 * 47 * All other contexts are created as a request by the GPU client. These contexts 48 * store GPU state, and thus allow GPU clients to not re-emit state (and 49 * potentially query certain state) at any time. The kernel driver makes 50 * certain that the appropriate commands are inserted. 51 * 52 * The context life cycle is semi-complicated in that context BOs may live 53 * longer than the context itself because of the way the hardware, and object 54 * tracking works. Below is a very crude representation of the state machine 55 * describing the context life. 56 * refcount pincount active 57 * S0: initial state 0 0 0 58 * S1: context created 1 0 0 59 * S2: context is currently running 2 1 X 60 * S3: GPU referenced, but not current 2 0 1 61 * S4: context is current, but destroyed 1 1 0 62 * S5: like S3, but destroyed 1 0 1 63 * 64 * The most common (but not all) transitions: 65 * S0->S1: client creates a context 66 * S1->S2: client submits execbuf with context 67 * S2->S3: other clients submits execbuf with context 68 * S3->S1: context object was retired 69 * S3->S2: clients submits another execbuf 70 * S2->S4: context destroy called with current context 71 * S3->S5->S0: destroy path 72 * S4->S5->S0: destroy path on current context 73 * 74 * There are two confusing terms used above: 75 * The "current context" means the context which is currently running on the 76 * GPU. The GPU has loaded its state already and has stored away the gtt 77 * offset of the BO. The GPU is not actively referencing the data at this 78 * offset, but it will on the next context switch. The only way to avoid this 79 * is to do a GPU reset. 80 * 81 * An "active context' is one which was previously the "current context" and is 82 * on the active list waiting for the next context switch to occur. Until this 83 * happens, the object must remain at the same gtt offset. It is therefore 84 * possible to destroy a context, but it is still active. 85 * 86 */ 87 88 #include <drm/drmP.h> 89 #include <drm/i915_drm.h> 90 #include "i915_drv.h" 91 #include "i915_trace.h" 92 93 /* This is a HW constraint. The value below is the largest known requirement 94 * I've seen in a spec to date, and that was a workaround for a non-shipping 95 * part. It should be safe to decrease this, but it's more future proof as is. 96 */ 97 #define GEN6_CONTEXT_ALIGN (64<<10) 98 #define GEN7_CONTEXT_ALIGN 4096 99 100 static size_t get_context_alignment(struct drm_device *dev) 101 { 102 if (IS_GEN6(dev)) 103 return GEN6_CONTEXT_ALIGN; 104 105 return GEN7_CONTEXT_ALIGN; 106 } 107 108 static int get_context_size(struct drm_device *dev) 109 { 110 struct drm_i915_private *dev_priv = dev->dev_private; 111 int ret; 112 u32 reg; 113 114 switch (INTEL_INFO(dev)->gen) { 115 case 6: 116 reg = I915_READ(CXT_SIZE); 117 ret = GEN6_CXT_TOTAL_SIZE(reg) * 64; 118 break; 119 case 7: 120 reg = I915_READ(GEN7_CXT_SIZE); 121 if (IS_HASWELL(dev)) 122 ret = HSW_CXT_TOTAL_SIZE; 123 else 124 ret = GEN7_CXT_TOTAL_SIZE(reg) * 64; 125 break; 126 case 8: 127 ret = GEN8_CXT_TOTAL_SIZE; 128 break; 129 default: 130 BUG(); 131 } 132 133 return ret; 134 } 135 136 void i915_gem_context_free(struct kref *ctx_ref) 137 { 138 struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref); 139 140 trace_i915_context_free(ctx); 141 142 if (i915.enable_execlists) 143 intel_lr_context_free(ctx); 144 145 i915_ppgtt_put(ctx->ppgtt); 146 147 if (ctx->legacy_hw_ctx.rcs_state) 148 drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base); 149 list_del(&ctx->link); 150 kfree(ctx); 151 } 152 153 struct drm_i915_gem_object * 154 i915_gem_alloc_context_obj(struct drm_device *dev, size_t size) 155 { 156 struct drm_i915_gem_object *obj; 157 int ret; 158 159 obj = i915_gem_alloc_object(dev, size); 160 if (obj == NULL) 161 return ERR_PTR(-ENOMEM); 162 163 /* 164 * Try to make the context utilize L3 as well as LLC. 165 * 166 * On VLV we don't have L3 controls in the PTEs so we 167 * shouldn't touch the cache level, especially as that 168 * would make the object snooped which might have a 169 * negative performance impact. 170 */ 171 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) { 172 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC); 173 /* Failure shouldn't ever happen this early */ 174 if (WARN_ON(ret)) { 175 drm_gem_object_unreference(&obj->base); 176 return ERR_PTR(ret); 177 } 178 } 179 180 return obj; 181 } 182 183 static struct intel_context * 184 __create_hw_context(struct drm_device *dev, 185 struct drm_i915_file_private *file_priv) 186 { 187 struct drm_i915_private *dev_priv = dev->dev_private; 188 struct intel_context *ctx; 189 int ret; 190 191 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 192 if (ctx == NULL) 193 return ERR_PTR(-ENOMEM); 194 195 kref_init(&ctx->ref); 196 list_add_tail(&ctx->link, &dev_priv->context_list); 197 ctx->i915 = dev_priv; 198 199 if (dev_priv->hw_context_size) { 200 struct drm_i915_gem_object *obj = 201 i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size); 202 if (IS_ERR(obj)) { 203 ret = PTR_ERR(obj); 204 goto err_out; 205 } 206 ctx->legacy_hw_ctx.rcs_state = obj; 207 } 208 209 /* Default context will never have a file_priv */ 210 if (file_priv != NULL) { 211 ret = idr_alloc(&file_priv->context_idr, ctx, 212 DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL); 213 if (ret < 0) 214 goto err_out; 215 } else 216 ret = DEFAULT_CONTEXT_HANDLE; 217 218 ctx->file_priv = file_priv; 219 ctx->user_handle = ret; 220 /* NB: Mark all slices as needing a remap so that when the context first 221 * loads it will restore whatever remap state already exists. If there 222 * is no remap info, it will be a NOP. */ 223 ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1; 224 225 ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD; 226 227 return ctx; 228 229 err_out: 230 i915_gem_context_unreference(ctx); 231 return ERR_PTR(ret); 232 } 233 234 /** 235 * The default context needs to exist per ring that uses contexts. It stores the 236 * context state of the GPU for applications that don't utilize HW contexts, as 237 * well as an idle case. 238 */ 239 static struct intel_context * 240 i915_gem_create_context(struct drm_device *dev, 241 struct drm_i915_file_private *file_priv) 242 { 243 const bool is_global_default_ctx = file_priv == NULL; 244 struct intel_context *ctx; 245 int ret = 0; 246 247 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 248 249 ctx = __create_hw_context(dev, file_priv); 250 if (IS_ERR(ctx)) 251 return ctx; 252 253 if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) { 254 /* We may need to do things with the shrinker which 255 * require us to immediately switch back to the default 256 * context. This can cause a problem as pinning the 257 * default context also requires GTT space which may not 258 * be available. To avoid this we always pin the default 259 * context. 260 */ 261 ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state, 262 get_context_alignment(dev), 0); 263 if (ret) { 264 DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret); 265 goto err_destroy; 266 } 267 } 268 269 if (USES_FULL_PPGTT(dev)) { 270 struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv); 271 272 if (IS_ERR_OR_NULL(ppgtt)) { 273 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", 274 PTR_ERR(ppgtt)); 275 ret = PTR_ERR(ppgtt); 276 goto err_unpin; 277 } 278 279 ctx->ppgtt = ppgtt; 280 } 281 282 trace_i915_context_create(ctx); 283 284 return ctx; 285 286 err_unpin: 287 if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) 288 i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state); 289 err_destroy: 290 idr_remove(&file_priv->context_idr, ctx->user_handle); 291 i915_gem_context_unreference(ctx); 292 return ERR_PTR(ret); 293 } 294 295 void i915_gem_context_reset(struct drm_device *dev) 296 { 297 struct drm_i915_private *dev_priv = dev->dev_private; 298 int i; 299 300 if (i915.enable_execlists) { 301 struct intel_context *ctx; 302 303 list_for_each_entry(ctx, &dev_priv->context_list, link) { 304 intel_lr_context_reset(dev, ctx); 305 } 306 307 return; 308 } 309 310 for (i = 0; i < I915_NUM_RINGS; i++) { 311 struct intel_engine_cs *ring = &dev_priv->ring[i]; 312 struct intel_context *lctx = ring->last_context; 313 314 if (lctx) { 315 if (lctx->legacy_hw_ctx.rcs_state && i == RCS) 316 i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state); 317 318 i915_gem_context_unreference(lctx); 319 ring->last_context = NULL; 320 } 321 } 322 } 323 324 int i915_gem_context_init(struct drm_device *dev) 325 { 326 struct drm_i915_private *dev_priv = dev->dev_private; 327 struct intel_context *ctx; 328 int i; 329 330 /* Init should only be called once per module load. Eventually the 331 * restriction on the context_disabled check can be loosened. */ 332 if (WARN_ON(dev_priv->ring[RCS].default_context)) 333 return 0; 334 335 if (i915.enable_execlists) { 336 /* NB: intentionally left blank. We will allocate our own 337 * backing objects as we need them, thank you very much */ 338 dev_priv->hw_context_size = 0; 339 } else if (HAS_HW_CONTEXTS(dev)) { 340 dev_priv->hw_context_size = round_up(get_context_size(dev), 4096); 341 if (dev_priv->hw_context_size > (1<<20)) { 342 DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n", 343 dev_priv->hw_context_size); 344 dev_priv->hw_context_size = 0; 345 } 346 } 347 348 ctx = i915_gem_create_context(dev, NULL); 349 if (IS_ERR(ctx)) { 350 DRM_ERROR("Failed to create default global context (error %ld)\n", 351 PTR_ERR(ctx)); 352 return PTR_ERR(ctx); 353 } 354 355 for (i = 0; i < I915_NUM_RINGS; i++) { 356 struct intel_engine_cs *ring = &dev_priv->ring[i]; 357 358 /* NB: RCS will hold a ref for all rings */ 359 ring->default_context = ctx; 360 } 361 362 DRM_DEBUG_DRIVER("%s context support initialized\n", 363 i915.enable_execlists ? "LR" : 364 dev_priv->hw_context_size ? "HW" : "fake"); 365 return 0; 366 } 367 368 void i915_gem_context_fini(struct drm_device *dev) 369 { 370 struct drm_i915_private *dev_priv = dev->dev_private; 371 struct intel_context *dctx = dev_priv->ring[RCS].default_context; 372 int i; 373 374 if (dctx->legacy_hw_ctx.rcs_state) { 375 /* The only known way to stop the gpu from accessing the hw context is 376 * to reset it. Do this as the very last operation to avoid confusing 377 * other code, leading to spurious errors. */ 378 intel_gpu_reset(dev); 379 380 /* When default context is created and switched to, base object refcount 381 * will be 2 (+1 from object creation and +1 from do_switch()). 382 * i915_gem_context_fini() will be called after gpu_idle() has switched 383 * to default context. So we need to unreference the base object once 384 * to offset the do_switch part, so that i915_gem_context_unreference() 385 * can then free the base object correctly. */ 386 WARN_ON(!dev_priv->ring[RCS].last_context); 387 if (dev_priv->ring[RCS].last_context == dctx) { 388 /* Fake switch to NULL context */ 389 WARN_ON(dctx->legacy_hw_ctx.rcs_state->active); 390 i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state); 391 i915_gem_context_unreference(dctx); 392 dev_priv->ring[RCS].last_context = NULL; 393 } 394 395 i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state); 396 } 397 398 for (i = 0; i < I915_NUM_RINGS; i++) { 399 struct intel_engine_cs *ring = &dev_priv->ring[i]; 400 401 if (ring->last_context) 402 i915_gem_context_unreference(ring->last_context); 403 404 ring->default_context = NULL; 405 ring->last_context = NULL; 406 } 407 408 i915_gem_context_unreference(dctx); 409 } 410 411 int i915_gem_context_enable(struct drm_i915_gem_request *req) 412 { 413 struct intel_engine_cs *ring = req->ring; 414 int ret; 415 416 if (i915.enable_execlists) { 417 if (ring->init_context == NULL) 418 return 0; 419 420 ret = ring->init_context(req); 421 } else 422 ret = i915_switch_context(req); 423 424 if (ret) { 425 DRM_ERROR("ring init context: %d\n", ret); 426 return ret; 427 } 428 429 return 0; 430 } 431 432 static int context_idr_cleanup(int id, void *p, void *data) 433 { 434 struct intel_context *ctx = p; 435 436 i915_gem_context_unreference(ctx); 437 return 0; 438 } 439 440 int i915_gem_context_open(struct drm_device *dev, struct drm_file *file) 441 { 442 struct drm_i915_file_private *file_priv = file->driver_priv; 443 struct intel_context *ctx; 444 445 idr_init(&file_priv->context_idr); 446 447 mutex_lock(&dev->struct_mutex); 448 ctx = i915_gem_create_context(dev, file_priv); 449 mutex_unlock(&dev->struct_mutex); 450 451 if (IS_ERR(ctx)) { 452 idr_destroy(&file_priv->context_idr); 453 return PTR_ERR(ctx); 454 } 455 456 return 0; 457 } 458 459 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) 460 { 461 struct drm_i915_file_private *file_priv = file->driver_priv; 462 463 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); 464 idr_destroy(&file_priv->context_idr); 465 } 466 467 struct intel_context * 468 i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id) 469 { 470 struct intel_context *ctx; 471 472 ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id); 473 if (!ctx) 474 return ERR_PTR(-ENOENT); 475 476 return ctx; 477 } 478 479 static inline int 480 mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) 481 { 482 struct intel_engine_cs *ring = req->ring; 483 u32 flags = hw_flags | MI_MM_SPACE_GTT; 484 const int num_rings = 485 /* Use an extended w/a on ivb+ if signalling from other rings */ 486 i915_semaphore_is_enabled(ring->dev) ? 487 hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 : 488 0; 489 int len, i, ret; 490 491 /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB 492 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value 493 * explicitly, so we rely on the value at ring init, stored in 494 * itlb_before_ctx_switch. 495 */ 496 if (IS_GEN6(ring->dev)) { 497 ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0); 498 if (ret) 499 return ret; 500 } 501 502 /* These flags are for resource streamer on HSW+ */ 503 if (IS_HASWELL(ring->dev) || INTEL_INFO(ring->dev)->gen >= 8) 504 flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN); 505 else if (INTEL_INFO(ring->dev)->gen < 8) 506 flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN); 507 508 509 len = 4; 510 if (INTEL_INFO(ring->dev)->gen >= 7) 511 len += 2 + (num_rings ? 4*num_rings + 2 : 0); 512 513 ret = intel_ring_begin(req, len); 514 if (ret) 515 return ret; 516 517 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ 518 if (INTEL_INFO(ring->dev)->gen >= 7) { 519 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE); 520 if (num_rings) { 521 struct intel_engine_cs *signaller; 522 523 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings)); 524 for_each_ring(signaller, to_i915(ring->dev), i) { 525 if (signaller == ring) 526 continue; 527 528 intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base)); 529 intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); 530 } 531 } 532 } 533 534 intel_ring_emit(ring, MI_NOOP); 535 intel_ring_emit(ring, MI_SET_CONTEXT); 536 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) | 537 flags); 538 /* 539 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP 540 * WaMiSetContext_Hang:snb,ivb,vlv 541 */ 542 intel_ring_emit(ring, MI_NOOP); 543 544 if (INTEL_INFO(ring->dev)->gen >= 7) { 545 if (num_rings) { 546 struct intel_engine_cs *signaller; 547 548 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings)); 549 for_each_ring(signaller, to_i915(ring->dev), i) { 550 if (signaller == ring) 551 continue; 552 553 intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base)); 554 intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); 555 } 556 } 557 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE); 558 } 559 560 intel_ring_advance(ring); 561 562 return ret; 563 } 564 565 static inline bool should_skip_switch(struct intel_engine_cs *ring, 566 struct intel_context *from, 567 struct intel_context *to) 568 { 569 if (to->remap_slice) 570 return false; 571 572 if (to->ppgtt && from == to && 573 !(intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) 574 return true; 575 576 return false; 577 } 578 579 static bool 580 needs_pd_load_pre(struct intel_engine_cs *ring, struct intel_context *to) 581 { 582 struct drm_i915_private *dev_priv = ring->dev->dev_private; 583 584 if (!to->ppgtt) 585 return false; 586 587 if (INTEL_INFO(ring->dev)->gen < 8) 588 return true; 589 590 if (ring != &dev_priv->ring[RCS]) 591 return true; 592 593 return false; 594 } 595 596 static bool 597 needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to, 598 u32 hw_flags) 599 { 600 struct drm_i915_private *dev_priv = ring->dev->dev_private; 601 602 if (!to->ppgtt) 603 return false; 604 605 if (!IS_GEN8(ring->dev)) 606 return false; 607 608 if (ring != &dev_priv->ring[RCS]) 609 return false; 610 611 if (hw_flags & MI_RESTORE_INHIBIT) 612 return true; 613 614 return false; 615 } 616 617 static int do_switch(struct drm_i915_gem_request *req) 618 { 619 struct intel_context *to = req->ctx; 620 struct intel_engine_cs *ring = req->ring; 621 struct drm_i915_private *dev_priv = ring->dev->dev_private; 622 struct intel_context *from = ring->last_context; 623 u32 hw_flags = 0; 624 bool uninitialized = false; 625 int ret, i; 626 627 if (from != NULL && ring == &dev_priv->ring[RCS]) { 628 BUG_ON(from->legacy_hw_ctx.rcs_state == NULL); 629 BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state)); 630 } 631 632 if (should_skip_switch(ring, from, to)) 633 return 0; 634 635 /* Trying to pin first makes error handling easier. */ 636 if (ring == &dev_priv->ring[RCS]) { 637 ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state, 638 get_context_alignment(ring->dev), 0); 639 if (ret) 640 return ret; 641 } 642 643 /* 644 * Pin can switch back to the default context if we end up calling into 645 * evict_everything - as a last ditch gtt defrag effort that also 646 * switches to the default context. Hence we need to reload from here. 647 */ 648 from = ring->last_context; 649 650 if (needs_pd_load_pre(ring, to)) { 651 /* Older GENs and non render rings still want the load first, 652 * "PP_DCLV followed by PP_DIR_BASE register through Load 653 * Register Immediate commands in Ring Buffer before submitting 654 * a context."*/ 655 trace_switch_mm(ring, to); 656 ret = to->ppgtt->switch_mm(to->ppgtt, req); 657 if (ret) 658 goto unpin_out; 659 660 /* Doing a PD load always reloads the page dirs */ 661 to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring); 662 } 663 664 if (ring != &dev_priv->ring[RCS]) { 665 if (from) 666 i915_gem_context_unreference(from); 667 goto done; 668 } 669 670 /* 671 * Clear this page out of any CPU caches for coherent swap-in/out. Note 672 * that thanks to write = false in this call and us not setting any gpu 673 * write domains when putting a context object onto the active list 674 * (when switching away from it), this won't block. 675 * 676 * XXX: We need a real interface to do this instead of trickery. 677 */ 678 ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false); 679 if (ret) 680 goto unpin_out; 681 682 if (!to->legacy_hw_ctx.initialized) { 683 hw_flags |= MI_RESTORE_INHIBIT; 684 /* NB: If we inhibit the restore, the context is not allowed to 685 * die because future work may end up depending on valid address 686 * space. This means we must enforce that a page table load 687 * occur when this occurs. */ 688 } else if (to->ppgtt && 689 (intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) { 690 hw_flags |= MI_FORCE_RESTORE; 691 to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring); 692 } 693 694 /* We should never emit switch_mm more than once */ 695 WARN_ON(needs_pd_load_pre(ring, to) && 696 needs_pd_load_post(ring, to, hw_flags)); 697 698 ret = mi_set_context(req, hw_flags); 699 if (ret) 700 goto unpin_out; 701 702 /* GEN8 does *not* require an explicit reload if the PDPs have been 703 * setup, and we do not wish to move them. 704 */ 705 if (needs_pd_load_post(ring, to, hw_flags)) { 706 trace_switch_mm(ring, to); 707 ret = to->ppgtt->switch_mm(to->ppgtt, req); 708 /* The hardware context switch is emitted, but we haven't 709 * actually changed the state - so it's probably safe to bail 710 * here. Still, let the user know something dangerous has 711 * happened. 712 */ 713 if (ret) { 714 DRM_ERROR("Failed to change address space on context switch\n"); 715 goto unpin_out; 716 } 717 } 718 719 for (i = 0; i < MAX_L3_SLICES; i++) { 720 if (!(to->remap_slice & (1<<i))) 721 continue; 722 723 ret = i915_gem_l3_remap(req, i); 724 /* If it failed, try again next round */ 725 if (ret) 726 DRM_DEBUG_DRIVER("L3 remapping failed\n"); 727 else 728 to->remap_slice &= ~(1<<i); 729 } 730 731 /* The backing object for the context is done after switching to the 732 * *next* context. Therefore we cannot retire the previous context until 733 * the next context has already started running. In fact, the below code 734 * is a bit suboptimal because the retiring can occur simply after the 735 * MI_SET_CONTEXT instead of when the next seqno has completed. 736 */ 737 if (from != NULL) { 738 from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; 739 i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req); 740 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the 741 * whole damn pipeline, we don't need to explicitly mark the 742 * object dirty. The only exception is that the context must be 743 * correct in case the object gets swapped out. Ideally we'd be 744 * able to defer doing this until we know the object would be 745 * swapped, but there is no way to do that yet. 746 */ 747 from->legacy_hw_ctx.rcs_state->dirty = 1; 748 749 /* obj is kept alive until the next request by its active ref */ 750 i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state); 751 i915_gem_context_unreference(from); 752 } 753 754 uninitialized = !to->legacy_hw_ctx.initialized; 755 to->legacy_hw_ctx.initialized = true; 756 757 done: 758 i915_gem_context_reference(to); 759 ring->last_context = to; 760 761 if (uninitialized) { 762 if (ring->init_context) { 763 ret = ring->init_context(req); 764 if (ret) 765 DRM_ERROR("ring init context: %d\n", ret); 766 } 767 } 768 769 return 0; 770 771 unpin_out: 772 if (ring->id == RCS) 773 i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state); 774 return ret; 775 } 776 777 /** 778 * i915_switch_context() - perform a GPU context switch. 779 * @req: request for which we'll execute the context switch 780 * 781 * The context life cycle is simple. The context refcount is incremented and 782 * decremented by 1 and create and destroy. If the context is in use by the GPU, 783 * it will have a refcount > 1. This allows us to destroy the context abstract 784 * object while letting the normal object tracking destroy the backing BO. 785 * 786 * This function should not be used in execlists mode. Instead the context is 787 * switched by writing to the ELSP and requests keep a reference to their 788 * context. 789 */ 790 int i915_switch_context(struct drm_i915_gem_request *req) 791 { 792 struct intel_engine_cs *ring = req->ring; 793 struct drm_i915_private *dev_priv = ring->dev->dev_private; 794 795 WARN_ON(i915.enable_execlists); 796 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); 797 798 if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */ 799 if (req->ctx != ring->last_context) { 800 i915_gem_context_reference(req->ctx); 801 if (ring->last_context) 802 i915_gem_context_unreference(ring->last_context); 803 ring->last_context = req->ctx; 804 } 805 return 0; 806 } 807 808 return do_switch(req); 809 } 810 811 static bool contexts_enabled(struct drm_device *dev) 812 { 813 return i915.enable_execlists || to_i915(dev)->hw_context_size; 814 } 815 816 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 817 struct drm_file *file) 818 { 819 struct drm_i915_gem_context_create *args = data; 820 struct drm_i915_file_private *file_priv = file->driver_priv; 821 struct intel_context *ctx; 822 int ret; 823 824 if (!contexts_enabled(dev)) 825 return -ENODEV; 826 827 ret = i915_mutex_lock_interruptible(dev); 828 if (ret) 829 return ret; 830 831 ctx = i915_gem_create_context(dev, file_priv); 832 mutex_unlock(&dev->struct_mutex); 833 if (IS_ERR(ctx)) 834 return PTR_ERR(ctx); 835 836 args->ctx_id = ctx->user_handle; 837 DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id); 838 839 return 0; 840 } 841 842 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 843 struct drm_file *file) 844 { 845 struct drm_i915_gem_context_destroy *args = data; 846 struct drm_i915_file_private *file_priv = file->driver_priv; 847 struct intel_context *ctx; 848 int ret; 849 850 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE) 851 return -ENOENT; 852 853 ret = i915_mutex_lock_interruptible(dev); 854 if (ret) 855 return ret; 856 857 ctx = i915_gem_context_get(file_priv, args->ctx_id); 858 if (IS_ERR(ctx)) { 859 mutex_unlock(&dev->struct_mutex); 860 return PTR_ERR(ctx); 861 } 862 863 idr_remove(&ctx->file_priv->context_idr, ctx->user_handle); 864 i915_gem_context_unreference(ctx); 865 mutex_unlock(&dev->struct_mutex); 866 867 DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id); 868 return 0; 869 } 870 871 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, 872 struct drm_file *file) 873 { 874 struct drm_i915_file_private *file_priv = file->driver_priv; 875 struct drm_i915_gem_context_param *args = data; 876 struct intel_context *ctx; 877 int ret; 878 879 ret = i915_mutex_lock_interruptible(dev); 880 if (ret) 881 return ret; 882 883 ctx = i915_gem_context_get(file_priv, args->ctx_id); 884 if (IS_ERR(ctx)) { 885 mutex_unlock(&dev->struct_mutex); 886 return PTR_ERR(ctx); 887 } 888 889 args->size = 0; 890 switch (args->param) { 891 case I915_CONTEXT_PARAM_BAN_PERIOD: 892 args->value = ctx->hang_stats.ban_period_seconds; 893 break; 894 case I915_CONTEXT_PARAM_NO_ZEROMAP: 895 args->value = ctx->flags & CONTEXT_NO_ZEROMAP; 896 break; 897 default: 898 ret = -EINVAL; 899 break; 900 } 901 mutex_unlock(&dev->struct_mutex); 902 903 return ret; 904 } 905 906 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, 907 struct drm_file *file) 908 { 909 struct drm_i915_file_private *file_priv = file->driver_priv; 910 struct drm_i915_gem_context_param *args = data; 911 struct intel_context *ctx; 912 int ret; 913 914 ret = i915_mutex_lock_interruptible(dev); 915 if (ret) 916 return ret; 917 918 ctx = i915_gem_context_get(file_priv, args->ctx_id); 919 if (IS_ERR(ctx)) { 920 mutex_unlock(&dev->struct_mutex); 921 return PTR_ERR(ctx); 922 } 923 924 switch (args->param) { 925 case I915_CONTEXT_PARAM_BAN_PERIOD: 926 if (args->size) 927 ret = -EINVAL; 928 else if (args->value < ctx->hang_stats.ban_period_seconds && 929 !capable(CAP_SYS_ADMIN)) 930 ret = -EPERM; 931 else 932 ctx->hang_stats.ban_period_seconds = args->value; 933 break; 934 case I915_CONTEXT_PARAM_NO_ZEROMAP: 935 if (args->size) { 936 ret = -EINVAL; 937 } else { 938 ctx->flags &= ~CONTEXT_NO_ZEROMAP; 939 ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0; 940 } 941 break; 942 default: 943 ret = -EINVAL; 944 break; 945 } 946 mutex_unlock(&dev->struct_mutex); 947 948 return ret; 949 } 950