1 /* 2 * Copyright © 2011-2012 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Ben Widawsky <ben@bwidawsk.net> 25 * 26 */ 27 28 /* 29 * This file implements HW context support. On gen5+ a HW context consists of an 30 * opaque GPU object which is referenced at times of context saves and restores. 31 * With RC6 enabled, the context is also referenced as the GPU enters and exists 32 * from RC6 (GPU has it's own internal power context, except on gen5). Though 33 * something like a context does exist for the media ring, the code only 34 * supports contexts for the render ring. 35 * 36 * In software, there is a distinction between contexts created by the user, 37 * and the default HW context. The default HW context is used by GPU clients 38 * that do not request setup of their own hardware context. The default 39 * context's state is never restored to help prevent programming errors. This 40 * would happen if a client ran and piggy-backed off another clients GPU state. 41 * The default context only exists to give the GPU some offset to load as the 42 * current to invoke a save of the context we actually care about. In fact, the 43 * code could likely be constructed, albeit in a more complicated fashion, to 44 * never use the default context, though that limits the driver's ability to 45 * swap out, and/or destroy other contexts. 46 * 47 * All other contexts are created as a request by the GPU client. These contexts 48 * store GPU state, and thus allow GPU clients to not re-emit state (and 49 * potentially query certain state) at any time. The kernel driver makes 50 * certain that the appropriate commands are inserted. 51 * 52 * The context life cycle is semi-complicated in that context BOs may live 53 * longer than the context itself because of the way the hardware, and object 54 * tracking works. Below is a very crude representation of the state machine 55 * describing the context life. 56 * refcount pincount active 57 * S0: initial state 0 0 0 58 * S1: context created 1 0 0 59 * S2: context is currently running 2 1 X 60 * S3: GPU referenced, but not current 2 0 1 61 * S4: context is current, but destroyed 1 1 0 62 * S5: like S3, but destroyed 1 0 1 63 * 64 * The most common (but not all) transitions: 65 * S0->S1: client creates a context 66 * S1->S2: client submits execbuf with context 67 * S2->S3: other clients submits execbuf with context 68 * S3->S1: context object was retired 69 * S3->S2: clients submits another execbuf 70 * S2->S4: context destroy called with current context 71 * S3->S5->S0: destroy path 72 * S4->S5->S0: destroy path on current context 73 * 74 * There are two confusing terms used above: 75 * The "current context" means the context which is currently running on the 76 * GPU. The GPU has loaded its state already and has stored away the gtt 77 * offset of the BO. The GPU is not actively referencing the data at this 78 * offset, but it will on the next context switch. The only way to avoid this 79 * is to do a GPU reset. 80 * 81 * An "active context' is one which was previously the "current context" and is 82 * on the active list waiting for the next context switch to occur. Until this 83 * happens, the object must remain at the same gtt offset. It is therefore 84 * possible to destroy a context, but it is still active. 85 * 86 */ 87 88 #include <drm/drmP.h> 89 #include <drm/i915_drm.h> 90 #include "i915_drv.h" 91 92 /* This is a HW constraint. The value below is the largest known requirement 93 * I've seen in a spec to date, and that was a workaround for a non-shipping 94 * part. It should be safe to decrease this, but it's more future proof as is. 95 */ 96 #define GEN6_CONTEXT_ALIGN (64<<10) 97 #define GEN7_CONTEXT_ALIGN 4096 98 99 static void do_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt) 100 { 101 struct drm_device *dev = ppgtt->base.dev; 102 struct drm_i915_private *dev_priv = dev->dev_private; 103 struct i915_address_space *vm = &ppgtt->base; 104 105 if (ppgtt == dev_priv->mm.aliasing_ppgtt || 106 (list_empty(&vm->active_list) && list_empty(&vm->inactive_list))) { 107 ppgtt->base.cleanup(&ppgtt->base); 108 return; 109 } 110 111 /* 112 * Make sure vmas are unbound before we take down the drm_mm 113 * 114 * FIXME: Proper refcounting should take care of this, this shouldn't be 115 * needed at all. 116 */ 117 if (!list_empty(&vm->active_list)) { 118 struct i915_vma *vma; 119 120 list_for_each_entry(vma, &vm->active_list, mm_list) 121 if (WARN_ON(list_empty(&vma->vma_link) || 122 list_is_singular(&vma->vma_link))) 123 break; 124 125 i915_gem_evict_vm(&ppgtt->base, true); 126 } else { 127 i915_gem_retire_requests(dev); 128 i915_gem_evict_vm(&ppgtt->base, false); 129 } 130 131 ppgtt->base.cleanup(&ppgtt->base); 132 } 133 134 static void ppgtt_release(struct kref *kref) 135 { 136 struct i915_hw_ppgtt *ppgtt = 137 container_of(kref, struct i915_hw_ppgtt, ref); 138 139 do_ppgtt_cleanup(ppgtt); 140 kfree(ppgtt); 141 } 142 143 static size_t get_context_alignment(struct drm_device *dev) 144 { 145 if (IS_GEN6(dev)) 146 return GEN6_CONTEXT_ALIGN; 147 148 return GEN7_CONTEXT_ALIGN; 149 } 150 151 static int get_context_size(struct drm_device *dev) 152 { 153 struct drm_i915_private *dev_priv = dev->dev_private; 154 int ret; 155 u32 reg; 156 157 switch (INTEL_INFO(dev)->gen) { 158 case 6: 159 reg = I915_READ(CXT_SIZE); 160 ret = GEN6_CXT_TOTAL_SIZE(reg) * 64; 161 break; 162 case 7: 163 reg = I915_READ(GEN7_CXT_SIZE); 164 if (IS_HASWELL(dev)) 165 ret = HSW_CXT_TOTAL_SIZE; 166 else 167 ret = GEN7_CXT_TOTAL_SIZE(reg) * 64; 168 break; 169 case 8: 170 ret = GEN8_CXT_TOTAL_SIZE; 171 break; 172 default: 173 BUG(); 174 } 175 176 return ret; 177 } 178 179 void i915_gem_context_free(struct kref *ctx_ref) 180 { 181 struct intel_context *ctx = container_of(ctx_ref, 182 typeof(*ctx), ref); 183 struct i915_hw_ppgtt *ppgtt = NULL; 184 185 if (ctx->legacy_hw_ctx.rcs_state) { 186 /* We refcount even the aliasing PPGTT to keep the code symmetric */ 187 if (USES_PPGTT(ctx->legacy_hw_ctx.rcs_state->base.dev)) 188 ppgtt = ctx_to_ppgtt(ctx); 189 } 190 191 if (ppgtt) 192 kref_put(&ppgtt->ref, ppgtt_release); 193 if (ctx->legacy_hw_ctx.rcs_state) 194 drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base); 195 list_del(&ctx->link); 196 kfree(ctx); 197 } 198 199 static struct drm_i915_gem_object * 200 i915_gem_alloc_context_obj(struct drm_device *dev, size_t size) 201 { 202 struct drm_i915_gem_object *obj; 203 int ret; 204 205 obj = i915_gem_alloc_object(dev, size); 206 if (obj == NULL) 207 return ERR_PTR(-ENOMEM); 208 209 /* 210 * Try to make the context utilize L3 as well as LLC. 211 * 212 * On VLV we don't have L3 controls in the PTEs so we 213 * shouldn't touch the cache level, especially as that 214 * would make the object snooped which might have a 215 * negative performance impact. 216 */ 217 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) { 218 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC); 219 /* Failure shouldn't ever happen this early */ 220 if (WARN_ON(ret)) { 221 drm_gem_object_unreference(&obj->base); 222 return ERR_PTR(ret); 223 } 224 } 225 226 return obj; 227 } 228 229 static struct i915_hw_ppgtt * 230 create_vm_for_ctx(struct drm_device *dev, struct intel_context *ctx) 231 { 232 struct i915_hw_ppgtt *ppgtt; 233 int ret; 234 235 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); 236 if (!ppgtt) 237 return ERR_PTR(-ENOMEM); 238 239 ret = i915_gem_init_ppgtt(dev, ppgtt); 240 if (ret) { 241 kfree(ppgtt); 242 return ERR_PTR(ret); 243 } 244 245 ppgtt->ctx = ctx; 246 return ppgtt; 247 } 248 249 static struct intel_context * 250 __create_hw_context(struct drm_device *dev, 251 struct drm_i915_file_private *file_priv) 252 { 253 struct drm_i915_private *dev_priv = dev->dev_private; 254 struct intel_context *ctx; 255 int ret; 256 257 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 258 if (ctx == NULL) 259 return ERR_PTR(-ENOMEM); 260 261 kref_init(&ctx->ref); 262 list_add_tail(&ctx->link, &dev_priv->context_list); 263 264 if (dev_priv->hw_context_size) { 265 struct drm_i915_gem_object *obj = 266 i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size); 267 if (IS_ERR(obj)) { 268 ret = PTR_ERR(obj); 269 goto err_out; 270 } 271 ctx->legacy_hw_ctx.rcs_state = obj; 272 } 273 274 /* Default context will never have a file_priv */ 275 if (file_priv != NULL) { 276 ret = idr_alloc(&file_priv->context_idr, ctx, 277 DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL); 278 if (ret < 0) 279 goto err_out; 280 } else 281 ret = DEFAULT_CONTEXT_HANDLE; 282 283 ctx->file_priv = file_priv; 284 ctx->user_handle = ret; 285 /* NB: Mark all slices as needing a remap so that when the context first 286 * loads it will restore whatever remap state already exists. If there 287 * is no remap info, it will be a NOP. */ 288 ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1; 289 290 return ctx; 291 292 err_out: 293 i915_gem_context_unreference(ctx); 294 return ERR_PTR(ret); 295 } 296 297 /** 298 * The default context needs to exist per ring that uses contexts. It stores the 299 * context state of the GPU for applications that don't utilize HW contexts, as 300 * well as an idle case. 301 */ 302 static struct intel_context * 303 i915_gem_create_context(struct drm_device *dev, 304 struct drm_i915_file_private *file_priv, 305 bool create_vm) 306 { 307 const bool is_global_default_ctx = file_priv == NULL; 308 struct drm_i915_private *dev_priv = dev->dev_private; 309 struct intel_context *ctx; 310 int ret = 0; 311 312 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 313 314 ctx = __create_hw_context(dev, file_priv); 315 if (IS_ERR(ctx)) 316 return ctx; 317 318 if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) { 319 /* We may need to do things with the shrinker which 320 * require us to immediately switch back to the default 321 * context. This can cause a problem as pinning the 322 * default context also requires GTT space which may not 323 * be available. To avoid this we always pin the default 324 * context. 325 */ 326 ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state, 327 get_context_alignment(dev), 0); 328 if (ret) { 329 DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret); 330 goto err_destroy; 331 } 332 } 333 334 if (create_vm) { 335 struct i915_hw_ppgtt *ppgtt = create_vm_for_ctx(dev, ctx); 336 337 if (IS_ERR_OR_NULL(ppgtt)) { 338 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", 339 PTR_ERR(ppgtt)); 340 ret = PTR_ERR(ppgtt); 341 goto err_unpin; 342 } else 343 ctx->vm = &ppgtt->base; 344 345 /* This case is reserved for the global default context and 346 * should only happen once. */ 347 if (is_global_default_ctx) { 348 if (WARN_ON(dev_priv->mm.aliasing_ppgtt)) { 349 ret = -EEXIST; 350 goto err_unpin; 351 } 352 353 dev_priv->mm.aliasing_ppgtt = ppgtt; 354 } 355 } else if (USES_PPGTT(dev)) { 356 /* For platforms which only have aliasing PPGTT, we fake the 357 * address space and refcounting. */ 358 ctx->vm = &dev_priv->mm.aliasing_ppgtt->base; 359 kref_get(&dev_priv->mm.aliasing_ppgtt->ref); 360 } else 361 ctx->vm = &dev_priv->gtt.base; 362 363 return ctx; 364 365 err_unpin: 366 if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) 367 i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state); 368 err_destroy: 369 i915_gem_context_unreference(ctx); 370 return ERR_PTR(ret); 371 } 372 373 void i915_gem_context_reset(struct drm_device *dev) 374 { 375 struct drm_i915_private *dev_priv = dev->dev_private; 376 int i; 377 378 /* Prevent the hardware from restoring the last context (which hung) on 379 * the next switch */ 380 for (i = 0; i < I915_NUM_RINGS; i++) { 381 struct intel_engine_cs *ring = &dev_priv->ring[i]; 382 struct intel_context *dctx = ring->default_context; 383 struct intel_context *lctx = ring->last_context; 384 385 /* Do a fake switch to the default context */ 386 if (lctx == dctx) 387 continue; 388 389 if (!lctx) 390 continue; 391 392 if (dctx->legacy_hw_ctx.rcs_state && i == RCS) { 393 WARN_ON(i915_gem_obj_ggtt_pin(dctx->legacy_hw_ctx.rcs_state, 394 get_context_alignment(dev), 0)); 395 /* Fake a finish/inactive */ 396 dctx->legacy_hw_ctx.rcs_state->base.write_domain = 0; 397 dctx->legacy_hw_ctx.rcs_state->active = 0; 398 } 399 400 if (lctx->legacy_hw_ctx.rcs_state && i == RCS) 401 i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state); 402 403 i915_gem_context_unreference(lctx); 404 i915_gem_context_reference(dctx); 405 ring->last_context = dctx; 406 } 407 } 408 409 int i915_gem_context_init(struct drm_device *dev) 410 { 411 struct drm_i915_private *dev_priv = dev->dev_private; 412 struct intel_context *ctx; 413 int i; 414 415 /* Init should only be called once per module load. Eventually the 416 * restriction on the context_disabled check can be loosened. */ 417 if (WARN_ON(dev_priv->ring[RCS].default_context)) 418 return 0; 419 420 if (HAS_HW_CONTEXTS(dev)) { 421 dev_priv->hw_context_size = round_up(get_context_size(dev), 4096); 422 if (dev_priv->hw_context_size > (1<<20)) { 423 DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n", 424 dev_priv->hw_context_size); 425 dev_priv->hw_context_size = 0; 426 } 427 } 428 429 ctx = i915_gem_create_context(dev, NULL, USES_PPGTT(dev)); 430 if (IS_ERR(ctx)) { 431 DRM_ERROR("Failed to create default global context (error %ld)\n", 432 PTR_ERR(ctx)); 433 return PTR_ERR(ctx); 434 } 435 436 /* NB: RCS will hold a ref for all rings */ 437 for (i = 0; i < I915_NUM_RINGS; i++) 438 dev_priv->ring[i].default_context = ctx; 439 440 DRM_DEBUG_DRIVER("%s context support initialized\n", dev_priv->hw_context_size ? "HW" : "fake"); 441 return 0; 442 } 443 444 void i915_gem_context_fini(struct drm_device *dev) 445 { 446 struct drm_i915_private *dev_priv = dev->dev_private; 447 struct intel_context *dctx = dev_priv->ring[RCS].default_context; 448 int i; 449 450 if (dctx->legacy_hw_ctx.rcs_state) { 451 /* The only known way to stop the gpu from accessing the hw context is 452 * to reset it. Do this as the very last operation to avoid confusing 453 * other code, leading to spurious errors. */ 454 intel_gpu_reset(dev); 455 456 /* When default context is created and switched to, base object refcount 457 * will be 2 (+1 from object creation and +1 from do_switch()). 458 * i915_gem_context_fini() will be called after gpu_idle() has switched 459 * to default context. So we need to unreference the base object once 460 * to offset the do_switch part, so that i915_gem_context_unreference() 461 * can then free the base object correctly. */ 462 WARN_ON(!dev_priv->ring[RCS].last_context); 463 if (dev_priv->ring[RCS].last_context == dctx) { 464 /* Fake switch to NULL context */ 465 WARN_ON(dctx->legacy_hw_ctx.rcs_state->active); 466 i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state); 467 i915_gem_context_unreference(dctx); 468 dev_priv->ring[RCS].last_context = NULL; 469 } 470 471 i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state); 472 } 473 474 for (i = 0; i < I915_NUM_RINGS; i++) { 475 struct intel_engine_cs *ring = &dev_priv->ring[i]; 476 477 if (ring->last_context) 478 i915_gem_context_unreference(ring->last_context); 479 480 ring->default_context = NULL; 481 ring->last_context = NULL; 482 } 483 484 i915_gem_context_unreference(dctx); 485 } 486 487 int i915_gem_context_enable(struct drm_i915_private *dev_priv) 488 { 489 struct intel_engine_cs *ring; 490 int ret, i; 491 492 /* This is the only place the aliasing PPGTT gets enabled, which means 493 * it has to happen before we bail on reset */ 494 if (dev_priv->mm.aliasing_ppgtt) { 495 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 496 ppgtt->enable(ppgtt); 497 } 498 499 /* FIXME: We should make this work, even in reset */ 500 if (i915_reset_in_progress(&dev_priv->gpu_error)) 501 return 0; 502 503 BUG_ON(!dev_priv->ring[RCS].default_context); 504 505 for_each_ring(ring, dev_priv, i) { 506 ret = i915_switch_context(ring, ring->default_context); 507 if (ret) 508 return ret; 509 } 510 511 return 0; 512 } 513 514 static int context_idr_cleanup(int id, void *p, void *data) 515 { 516 struct intel_context *ctx = p; 517 518 i915_gem_context_unreference(ctx); 519 return 0; 520 } 521 522 int i915_gem_context_open(struct drm_device *dev, struct drm_file *file) 523 { 524 struct drm_i915_file_private *file_priv = file->driver_priv; 525 struct intel_context *ctx; 526 527 idr_init(&file_priv->context_idr); 528 529 mutex_lock(&dev->struct_mutex); 530 ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev)); 531 mutex_unlock(&dev->struct_mutex); 532 533 if (IS_ERR(ctx)) { 534 idr_destroy(&file_priv->context_idr); 535 return PTR_ERR(ctx); 536 } 537 538 return 0; 539 } 540 541 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) 542 { 543 struct drm_i915_file_private *file_priv = file->driver_priv; 544 545 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); 546 idr_destroy(&file_priv->context_idr); 547 } 548 549 struct intel_context * 550 i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id) 551 { 552 struct intel_context *ctx; 553 554 ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id); 555 if (!ctx) 556 return ERR_PTR(-ENOENT); 557 558 return ctx; 559 } 560 561 static inline int 562 mi_set_context(struct intel_engine_cs *ring, 563 struct intel_context *new_context, 564 u32 hw_flags) 565 { 566 const int num_rings = 567 /* Use an extended w/a on ivb+ if signalling from other rings */ 568 i915_semaphore_is_enabled(ring->dev) ? 569 hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 : 570 0; 571 int len, i, ret; 572 573 /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB 574 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value 575 * explicitly, so we rely on the value at ring init, stored in 576 * itlb_before_ctx_switch. 577 */ 578 if (IS_GEN6(ring->dev)) { 579 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0); 580 if (ret) 581 return ret; 582 } 583 584 585 len = 4; 586 if (INTEL_INFO(ring->dev)->gen >= 7) 587 len += 2 + (num_rings ? 4*num_rings + 2 : 0); 588 589 ret = intel_ring_begin(ring, len); 590 if (ret) 591 return ret; 592 593 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ 594 if (INTEL_INFO(ring->dev)->gen >= 7) { 595 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE); 596 if (num_rings) { 597 struct intel_engine_cs *signaller; 598 599 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings)); 600 for_each_ring(signaller, to_i915(ring->dev), i) { 601 if (signaller == ring) 602 continue; 603 604 intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base)); 605 intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); 606 } 607 } 608 } 609 610 intel_ring_emit(ring, MI_NOOP); 611 intel_ring_emit(ring, MI_SET_CONTEXT); 612 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) | 613 MI_MM_SPACE_GTT | 614 MI_SAVE_EXT_STATE_EN | 615 MI_RESTORE_EXT_STATE_EN | 616 hw_flags); 617 /* 618 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP 619 * WaMiSetContext_Hang:snb,ivb,vlv 620 */ 621 intel_ring_emit(ring, MI_NOOP); 622 623 if (INTEL_INFO(ring->dev)->gen >= 7) { 624 if (num_rings) { 625 struct intel_engine_cs *signaller; 626 627 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings)); 628 for_each_ring(signaller, to_i915(ring->dev), i) { 629 if (signaller == ring) 630 continue; 631 632 intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base)); 633 intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); 634 } 635 } 636 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE); 637 } 638 639 intel_ring_advance(ring); 640 641 return ret; 642 } 643 644 static int do_switch(struct intel_engine_cs *ring, 645 struct intel_context *to) 646 { 647 struct drm_i915_private *dev_priv = ring->dev->dev_private; 648 struct intel_context *from = ring->last_context; 649 struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to); 650 u32 hw_flags = 0; 651 bool uninitialized = false; 652 int ret, i; 653 654 if (from != NULL && ring == &dev_priv->ring[RCS]) { 655 BUG_ON(from->legacy_hw_ctx.rcs_state == NULL); 656 BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state)); 657 } 658 659 if (from == to && !to->remap_slice) 660 return 0; 661 662 /* Trying to pin first makes error handling easier. */ 663 if (ring == &dev_priv->ring[RCS]) { 664 ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state, 665 get_context_alignment(ring->dev), 0); 666 if (ret) 667 return ret; 668 } 669 670 /* 671 * Pin can switch back to the default context if we end up calling into 672 * evict_everything - as a last ditch gtt defrag effort that also 673 * switches to the default context. Hence we need to reload from here. 674 */ 675 from = ring->last_context; 676 677 if (USES_FULL_PPGTT(ring->dev)) { 678 ret = ppgtt->switch_mm(ppgtt, ring, false); 679 if (ret) 680 goto unpin_out; 681 } 682 683 if (ring != &dev_priv->ring[RCS]) { 684 if (from) 685 i915_gem_context_unreference(from); 686 goto done; 687 } 688 689 /* 690 * Clear this page out of any CPU caches for coherent swap-in/out. Note 691 * that thanks to write = false in this call and us not setting any gpu 692 * write domains when putting a context object onto the active list 693 * (when switching away from it), this won't block. 694 * 695 * XXX: We need a real interface to do this instead of trickery. 696 */ 697 ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false); 698 if (ret) 699 goto unpin_out; 700 701 if (!to->legacy_hw_ctx.rcs_state->has_global_gtt_mapping) { 702 struct i915_vma *vma = i915_gem_obj_to_vma(to->legacy_hw_ctx.rcs_state, 703 &dev_priv->gtt.base); 704 vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level, GLOBAL_BIND); 705 } 706 707 if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) 708 hw_flags |= MI_RESTORE_INHIBIT; 709 710 ret = mi_set_context(ring, to, hw_flags); 711 if (ret) 712 goto unpin_out; 713 714 for (i = 0; i < MAX_L3_SLICES; i++) { 715 if (!(to->remap_slice & (1<<i))) 716 continue; 717 718 ret = i915_gem_l3_remap(ring, i); 719 /* If it failed, try again next round */ 720 if (ret) 721 DRM_DEBUG_DRIVER("L3 remapping failed\n"); 722 else 723 to->remap_slice &= ~(1<<i); 724 } 725 726 /* The backing object for the context is done after switching to the 727 * *next* context. Therefore we cannot retire the previous context until 728 * the next context has already started running. In fact, the below code 729 * is a bit suboptimal because the retiring can occur simply after the 730 * MI_SET_CONTEXT instead of when the next seqno has completed. 731 */ 732 if (from != NULL) { 733 from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; 734 i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), ring); 735 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the 736 * whole damn pipeline, we don't need to explicitly mark the 737 * object dirty. The only exception is that the context must be 738 * correct in case the object gets swapped out. Ideally we'd be 739 * able to defer doing this until we know the object would be 740 * swapped, but there is no way to do that yet. 741 */ 742 from->legacy_hw_ctx.rcs_state->dirty = 1; 743 BUG_ON(from->legacy_hw_ctx.rcs_state->ring != ring); 744 745 /* obj is kept alive until the next request by its active ref */ 746 i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state); 747 i915_gem_context_unreference(from); 748 } 749 750 uninitialized = !to->legacy_hw_ctx.initialized && from == NULL; 751 to->legacy_hw_ctx.initialized = true; 752 753 done: 754 i915_gem_context_reference(to); 755 ring->last_context = to; 756 757 if (uninitialized) { 758 ret = i915_gem_render_state_init(ring); 759 if (ret) 760 DRM_ERROR("init render state: %d\n", ret); 761 } 762 763 return 0; 764 765 unpin_out: 766 if (ring->id == RCS) 767 i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state); 768 return ret; 769 } 770 771 /** 772 * i915_switch_context() - perform a GPU context switch. 773 * @ring: ring for which we'll execute the context switch 774 * @to: the context to switch to 775 * 776 * The context life cycle is simple. The context refcount is incremented and 777 * decremented by 1 and create and destroy. If the context is in use by the GPU, 778 * it will have a refoucnt > 1. This allows us to destroy the context abstract 779 * object while letting the normal object tracking destroy the backing BO. 780 */ 781 int i915_switch_context(struct intel_engine_cs *ring, 782 struct intel_context *to) 783 { 784 struct drm_i915_private *dev_priv = ring->dev->dev_private; 785 786 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); 787 788 if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */ 789 if (to != ring->last_context) { 790 i915_gem_context_reference(to); 791 if (ring->last_context) 792 i915_gem_context_unreference(ring->last_context); 793 ring->last_context = to; 794 } 795 return 0; 796 } 797 798 return do_switch(ring, to); 799 } 800 801 static bool hw_context_enabled(struct drm_device *dev) 802 { 803 return to_i915(dev)->hw_context_size; 804 } 805 806 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 807 struct drm_file *file) 808 { 809 struct drm_i915_gem_context_create *args = data; 810 struct drm_i915_file_private *file_priv = file->driver_priv; 811 struct intel_context *ctx; 812 int ret; 813 814 if (!hw_context_enabled(dev)) 815 return -ENODEV; 816 817 ret = i915_mutex_lock_interruptible(dev); 818 if (ret) 819 return ret; 820 821 ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev)); 822 mutex_unlock(&dev->struct_mutex); 823 if (IS_ERR(ctx)) 824 return PTR_ERR(ctx); 825 826 args->ctx_id = ctx->user_handle; 827 DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id); 828 829 return 0; 830 } 831 832 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 833 struct drm_file *file) 834 { 835 struct drm_i915_gem_context_destroy *args = data; 836 struct drm_i915_file_private *file_priv = file->driver_priv; 837 struct intel_context *ctx; 838 int ret; 839 840 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE) 841 return -ENOENT; 842 843 ret = i915_mutex_lock_interruptible(dev); 844 if (ret) 845 return ret; 846 847 ctx = i915_gem_context_get(file_priv, args->ctx_id); 848 if (IS_ERR(ctx)) { 849 mutex_unlock(&dev->struct_mutex); 850 return PTR_ERR(ctx); 851 } 852 853 idr_remove(&ctx->file_priv->context_idr, ctx->user_handle); 854 i915_gem_context_unreference(ctx); 855 mutex_unlock(&dev->struct_mutex); 856 857 DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id); 858 return 0; 859 } 860