xref: /dragonfly/sys/dev/drm/i915/i915_gem_context.c (revision a1282e19)
1 /*
2  * Copyright © 2011-2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Ben Widawsky <ben@bwidawsk.net>
25  *
26  */
27 
28 /*
29  * This file implements HW context support. On gen5+ a HW context consists of an
30  * opaque GPU object which is referenced at times of context saves and restores.
31  * With RC6 enabled, the context is also referenced as the GPU enters and exists
32  * from RC6 (GPU has it's own internal power context, except on gen5). Though
33  * something like a context does exist for the media ring, the code only
34  * supports contexts for the render ring.
35  *
36  * In software, there is a distinction between contexts created by the user,
37  * and the default HW context. The default HW context is used by GPU clients
38  * that do not request setup of their own hardware context. The default
39  * context's state is never restored to help prevent programming errors. This
40  * would happen if a client ran and piggy-backed off another clients GPU state.
41  * The default context only exists to give the GPU some offset to load as the
42  * current to invoke a save of the context we actually care about. In fact, the
43  * code could likely be constructed, albeit in a more complicated fashion, to
44  * never use the default context, though that limits the driver's ability to
45  * swap out, and/or destroy other contexts.
46  *
47  * All other contexts are created as a request by the GPU client. These contexts
48  * store GPU state, and thus allow GPU clients to not re-emit state (and
49  * potentially query certain state) at any time. The kernel driver makes
50  * certain that the appropriate commands are inserted.
51  *
52  * The context life cycle is semi-complicated in that context BOs may live
53  * longer than the context itself because of the way the hardware, and object
54  * tracking works. Below is a very crude representation of the state machine
55  * describing the context life.
56  *                                         refcount     pincount     active
57  * S0: initial state                          0            0           0
58  * S1: context created                        1            0           0
59  * S2: context is currently running           2            1           X
60  * S3: GPU referenced, but not current        2            0           1
61  * S4: context is current, but destroyed      1            1           0
62  * S5: like S3, but destroyed                 1            0           1
63  *
64  * The most common (but not all) transitions:
65  * S0->S1: client creates a context
66  * S1->S2: client submits execbuf with context
67  * S2->S3: other clients submits execbuf with context
68  * S3->S1: context object was retired
69  * S3->S2: clients submits another execbuf
70  * S2->S4: context destroy called with current context
71  * S3->S5->S0: destroy path
72  * S4->S5->S0: destroy path on current context
73  *
74  * There are two confusing terms used above:
75  *  The "current context" means the context which is currently running on the
76  *  GPU. The GPU has loaded its state already and has stored away the gtt
77  *  offset of the BO. The GPU is not actively referencing the data at this
78  *  offset, but it will on the next context switch. The only way to avoid this
79  *  is to do a GPU reset.
80  *
81  *  An "active context' is one which was previously the "current context" and is
82  *  on the active list waiting for the next context switch to occur. Until this
83  *  happens, the object must remain at the same gtt offset. It is therefore
84  *  possible to destroy a context, but it is still active.
85  *
86  */
87 
88 #include <drm/drmP.h>
89 #include <drm/i915_drm.h>
90 #include "i915_drv.h"
91 
92 /* This is a HW constraint. The value below is the largest known requirement
93  * I've seen in a spec to date, and that was a workaround for a non-shipping
94  * part. It should be safe to decrease this, but it's more future proof as is.
95  */
96 #define GEN6_CONTEXT_ALIGN (64<<10)
97 #define GEN7_CONTEXT_ALIGN 4096
98 
99 static size_t get_context_alignment(struct drm_device *dev)
100 {
101 	if (IS_GEN6(dev))
102 		return GEN6_CONTEXT_ALIGN;
103 
104 	return GEN7_CONTEXT_ALIGN;
105 }
106 
107 static int get_context_size(struct drm_device *dev)
108 {
109 	struct drm_i915_private *dev_priv = dev->dev_private;
110 	int ret;
111 	u32 reg;
112 
113 	switch (INTEL_INFO(dev)->gen) {
114 	case 6:
115 		reg = I915_READ(CXT_SIZE);
116 		ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
117 		break;
118 	case 7:
119 		reg = I915_READ(GEN7_CXT_SIZE);
120 		if (IS_HASWELL(dev))
121 			ret = HSW_CXT_TOTAL_SIZE;
122 		else
123 			ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
124 		break;
125 	case 8:
126 		ret = GEN8_CXT_TOTAL_SIZE;
127 		break;
128 	default:
129 		BUG();
130 	}
131 
132 	return ret;
133 }
134 
135 void i915_gem_context_free(struct kref *ctx_ref)
136 {
137 	struct intel_context *ctx = container_of(ctx_ref,
138 						 typeof(*ctx), ref);
139 
140 	if (i915.enable_execlists)
141 		intel_lr_context_free(ctx);
142 
143 	i915_ppgtt_put(ctx->ppgtt);
144 
145 	if (ctx->legacy_hw_ctx.rcs_state)
146 		drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
147 	list_del(&ctx->link);
148 	kfree(ctx);
149 }
150 
151 struct drm_i915_gem_object *
152 i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
153 {
154 	struct drm_i915_gem_object *obj;
155 	int ret;
156 
157 	obj = i915_gem_alloc_object(dev, size);
158 	if (obj == NULL)
159 		return ERR_PTR(-ENOMEM);
160 
161 	/*
162 	 * Try to make the context utilize L3 as well as LLC.
163 	 *
164 	 * On VLV we don't have L3 controls in the PTEs so we
165 	 * shouldn't touch the cache level, especially as that
166 	 * would make the object snooped which might have a
167 	 * negative performance impact.
168 	 */
169 	if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
170 		ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
171 		/* Failure shouldn't ever happen this early */
172 		if (WARN_ON(ret)) {
173 			drm_gem_object_unreference(&obj->base);
174 			return ERR_PTR(ret);
175 		}
176 	}
177 
178 	return obj;
179 }
180 
181 static struct intel_context *
182 __create_hw_context(struct drm_device *dev,
183 		    struct drm_i915_file_private *file_priv)
184 {
185 	struct drm_i915_private *dev_priv = dev->dev_private;
186 	struct intel_context *ctx;
187 	int ret;
188 
189 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
190 	if (ctx == NULL)
191 		return ERR_PTR(-ENOMEM);
192 
193 	kref_init(&ctx->ref);
194 	list_add_tail(&ctx->link, &dev_priv->context_list);
195 
196 	if (dev_priv->hw_context_size) {
197 		struct drm_i915_gem_object *obj =
198 				i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
199 		if (IS_ERR(obj)) {
200 			ret = PTR_ERR(obj);
201 			goto err_out;
202 		}
203 		ctx->legacy_hw_ctx.rcs_state = obj;
204 	}
205 
206 	/* Default context will never have a file_priv */
207 	if (file_priv != NULL) {
208 		ret = idr_alloc(&file_priv->context_idr, ctx,
209 				DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
210 		if (ret < 0)
211 			goto err_out;
212 	} else
213 		ret = DEFAULT_CONTEXT_HANDLE;
214 
215 	ctx->file_priv = file_priv;
216 	ctx->user_handle = ret;
217 	/* NB: Mark all slices as needing a remap so that when the context first
218 	 * loads it will restore whatever remap state already exists. If there
219 	 * is no remap info, it will be a NOP. */
220 	ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
221 
222 	return ctx;
223 
224 err_out:
225 	i915_gem_context_unreference(ctx);
226 	return ERR_PTR(ret);
227 }
228 
229 /**
230  * The default context needs to exist per ring that uses contexts. It stores the
231  * context state of the GPU for applications that don't utilize HW contexts, as
232  * well as an idle case.
233  */
234 static struct intel_context *
235 i915_gem_create_context(struct drm_device *dev,
236 			struct drm_i915_file_private *file_priv)
237 {
238 	const bool is_global_default_ctx = file_priv == NULL;
239 	struct intel_context *ctx;
240 	int ret = 0;
241 
242 	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
243 
244 	ctx = __create_hw_context(dev, file_priv);
245 	if (IS_ERR(ctx))
246 		return ctx;
247 
248 	if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) {
249 		/* We may need to do things with the shrinker which
250 		 * require us to immediately switch back to the default
251 		 * context. This can cause a problem as pinning the
252 		 * default context also requires GTT space which may not
253 		 * be available. To avoid this we always pin the default
254 		 * context.
255 		 */
256 		ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
257 					    get_context_alignment(dev), 0);
258 		if (ret) {
259 			DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
260 			goto err_destroy;
261 		}
262 	}
263 
264 	if (USES_FULL_PPGTT(dev)) {
265 		struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
266 
267 		if (IS_ERR_OR_NULL(ppgtt)) {
268 			DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
269 					 PTR_ERR(ppgtt));
270 			ret = PTR_ERR(ppgtt);
271 			goto err_unpin;
272 		}
273 
274 		ctx->ppgtt = ppgtt;
275 	}
276 
277 	return ctx;
278 
279 err_unpin:
280 	if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
281 		i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
282 err_destroy:
283 	i915_gem_context_unreference(ctx);
284 	return ERR_PTR(ret);
285 }
286 
287 void i915_gem_context_reset(struct drm_device *dev)
288 {
289 	struct drm_i915_private *dev_priv = dev->dev_private;
290 	int i;
291 
292 	/* In execlists mode we will unreference the context when the execlist
293 	 * queue is cleared and the requests destroyed.
294 	 */
295 	if (i915.enable_execlists)
296 		return;
297 
298 	for (i = 0; i < I915_NUM_RINGS; i++) {
299 		struct intel_engine_cs *ring = &dev_priv->ring[i];
300 		struct intel_context *lctx = ring->last_context;
301 
302 		if (lctx) {
303 			if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
304 				i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
305 
306 			i915_gem_context_unreference(lctx);
307 			ring->last_context = NULL;
308 		}
309 	}
310 }
311 
312 int i915_gem_context_init(struct drm_device *dev)
313 {
314 	struct drm_i915_private *dev_priv = dev->dev_private;
315 	struct intel_context *ctx;
316 	int i;
317 
318 	/* Init should only be called once per module load. Eventually the
319 	 * restriction on the context_disabled check can be loosened. */
320 	if (WARN_ON(dev_priv->ring[RCS].default_context))
321 		return 0;
322 
323 	if (i915.enable_execlists) {
324 		/* NB: intentionally left blank. We will allocate our own
325 		 * backing objects as we need them, thank you very much */
326 		dev_priv->hw_context_size = 0;
327 	} else if (HAS_HW_CONTEXTS(dev)) {
328 		dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
329 		if (dev_priv->hw_context_size > (1<<20)) {
330 			DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
331 					 dev_priv->hw_context_size);
332 			dev_priv->hw_context_size = 0;
333 		}
334 	}
335 
336 	ctx = i915_gem_create_context(dev, NULL);
337 	if (IS_ERR(ctx)) {
338 		DRM_ERROR("Failed to create default global context (error %ld)\n",
339 			  PTR_ERR(ctx));
340 		return PTR_ERR(ctx);
341 	}
342 
343 	for (i = 0; i < I915_NUM_RINGS; i++) {
344 		struct intel_engine_cs *ring = &dev_priv->ring[i];
345 
346 		/* NB: RCS will hold a ref for all rings */
347 		ring->default_context = ctx;
348 	}
349 
350 	DRM_DEBUG_DRIVER("%s context support initialized\n",
351 			i915.enable_execlists ? "LR" :
352 			dev_priv->hw_context_size ? "HW" : "fake");
353 	return 0;
354 }
355 
356 void i915_gem_context_fini(struct drm_device *dev)
357 {
358 	struct drm_i915_private *dev_priv = dev->dev_private;
359 	struct intel_context *dctx = dev_priv->ring[RCS].default_context;
360 	int i;
361 
362 	if (dctx->legacy_hw_ctx.rcs_state) {
363 		/* The only known way to stop the gpu from accessing the hw context is
364 		 * to reset it. Do this as the very last operation to avoid confusing
365 		 * other code, leading to spurious errors. */
366 		intel_gpu_reset(dev);
367 
368 		/* When default context is created and switched to, base object refcount
369 		 * will be 2 (+1 from object creation and +1 from do_switch()).
370 		 * i915_gem_context_fini() will be called after gpu_idle() has switched
371 		 * to default context. So we need to unreference the base object once
372 		 * to offset the do_switch part, so that i915_gem_context_unreference()
373 		 * can then free the base object correctly. */
374 		WARN_ON(!dev_priv->ring[RCS].last_context);
375 		if (dev_priv->ring[RCS].last_context == dctx) {
376 			/* Fake switch to NULL context */
377 			WARN_ON(dctx->legacy_hw_ctx.rcs_state->active);
378 			i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
379 			i915_gem_context_unreference(dctx);
380 			dev_priv->ring[RCS].last_context = NULL;
381 		}
382 
383 		i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
384 	}
385 
386 	for (i = 0; i < I915_NUM_RINGS; i++) {
387 		struct intel_engine_cs *ring = &dev_priv->ring[i];
388 
389 		if (ring->last_context)
390 			i915_gem_context_unreference(ring->last_context);
391 
392 		ring->default_context = NULL;
393 		ring->last_context = NULL;
394 	}
395 
396 	i915_gem_context_unreference(dctx);
397 }
398 
399 int i915_gem_context_enable(struct drm_i915_private *dev_priv)
400 {
401 	struct intel_engine_cs *ring;
402 	int ret, i;
403 
404 	BUG_ON(!dev_priv->ring[RCS].default_context);
405 
406 	if (i915.enable_execlists)
407 		return 0;
408 
409 	for_each_ring(ring, dev_priv, i) {
410 		ret = i915_switch_context(ring, ring->default_context);
411 		if (ret)
412 			return ret;
413 	}
414 
415 	return 0;
416 }
417 
418 static int context_idr_cleanup(int id, void *p, void *data)
419 {
420 	struct intel_context *ctx = p;
421 
422 	i915_gem_context_unreference(ctx);
423 	return 0;
424 }
425 
426 int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
427 {
428 	struct drm_i915_file_private *file_priv = file->driver_priv;
429 	struct intel_context *ctx;
430 
431 	idr_init(&file_priv->context_idr);
432 
433 	mutex_lock(&dev->struct_mutex);
434 	ctx = i915_gem_create_context(dev, file_priv);
435 	mutex_unlock(&dev->struct_mutex);
436 
437 	if (IS_ERR(ctx)) {
438 		idr_destroy(&file_priv->context_idr);
439 		return PTR_ERR(ctx);
440 	}
441 
442 	return 0;
443 }
444 
445 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
446 {
447 	struct drm_i915_file_private *file_priv = file->driver_priv;
448 
449 	idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
450 	idr_destroy(&file_priv->context_idr);
451 }
452 
453 struct intel_context *
454 i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
455 {
456 	struct intel_context *ctx;
457 
458 	ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id);
459 	if (!ctx)
460 		return ERR_PTR(-ENOENT);
461 
462 	return ctx;
463 }
464 
465 static inline int
466 mi_set_context(struct intel_engine_cs *ring,
467 	       struct intel_context *new_context,
468 	       u32 hw_flags)
469 {
470 	u32 flags = hw_flags | MI_MM_SPACE_GTT;
471 	const int num_rings =
472 		/* Use an extended w/a on ivb+ if signalling from other rings */
473 		i915_semaphore_is_enabled(ring->dev) ?
474 		hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
475 		0;
476 	int len, i, ret;
477 
478 	/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
479 	 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
480 	 * explicitly, so we rely on the value at ring init, stored in
481 	 * itlb_before_ctx_switch.
482 	 */
483 	if (IS_GEN6(ring->dev)) {
484 		ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
485 		if (ret)
486 			return ret;
487 	}
488 
489 	/* These flags are for resource streamer on HSW+ */
490 	if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8)
491 		flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
492 
493 	len = 4;
494 	if (INTEL_INFO(ring->dev)->gen >= 7)
495 		len += 2 + (num_rings ? 4*num_rings + 2 : 0);
496 
497 	ret = intel_ring_begin(ring, len);
498 	if (ret)
499 		return ret;
500 
501 	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
502 	if (INTEL_INFO(ring->dev)->gen >= 7) {
503 		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
504 		if (num_rings) {
505 			struct intel_engine_cs *signaller;
506 
507 			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
508 			for_each_ring(signaller, to_i915(ring->dev), i) {
509 				if (signaller == ring)
510 					continue;
511 
512 				intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
513 				intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
514 			}
515 		}
516 	}
517 
518 	intel_ring_emit(ring, MI_NOOP);
519 	intel_ring_emit(ring, MI_SET_CONTEXT);
520 	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
521 			flags);
522 	/*
523 	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
524 	 * WaMiSetContext_Hang:snb,ivb,vlv
525 	 */
526 	intel_ring_emit(ring, MI_NOOP);
527 
528 	if (INTEL_INFO(ring->dev)->gen >= 7) {
529 		if (num_rings) {
530 			struct intel_engine_cs *signaller;
531 
532 			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
533 			for_each_ring(signaller, to_i915(ring->dev), i) {
534 				if (signaller == ring)
535 					continue;
536 
537 				intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
538 				intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
539 			}
540 		}
541 		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
542 	}
543 
544 	intel_ring_advance(ring);
545 
546 	return ret;
547 }
548 
549 static int do_switch(struct intel_engine_cs *ring,
550 		     struct intel_context *to)
551 {
552 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
553 	struct intel_context *from = ring->last_context;
554 	u32 hw_flags = 0;
555 	bool uninitialized = false;
556 	int ret, i;
557 
558 	if (from != NULL && ring == &dev_priv->ring[RCS]) {
559 		BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
560 		BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
561 	}
562 
563 	if (from == to && !to->remap_slice)
564 		return 0;
565 
566 	/* Trying to pin first makes error handling easier. */
567 	if (ring == &dev_priv->ring[RCS]) {
568 		ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
569 					    get_context_alignment(ring->dev), 0);
570 		if (ret)
571 			return ret;
572 	}
573 
574 	/*
575 	 * Pin can switch back to the default context if we end up calling into
576 	 * evict_everything - as a last ditch gtt defrag effort that also
577 	 * switches to the default context. Hence we need to reload from here.
578 	 */
579 	from = ring->last_context;
580 
581 	if (to->ppgtt) {
582 		ret = to->ppgtt->switch_mm(to->ppgtt, ring);
583 		if (ret)
584 			goto unpin_out;
585 	}
586 
587 	if (ring != &dev_priv->ring[RCS]) {
588 		if (from)
589 			i915_gem_context_unreference(from);
590 		goto done;
591 	}
592 
593 	/*
594 	 * Clear this page out of any CPU caches for coherent swap-in/out. Note
595 	 * that thanks to write = false in this call and us not setting any gpu
596 	 * write domains when putting a context object onto the active list
597 	 * (when switching away from it), this won't block.
598 	 *
599 	 * XXX: We need a real interface to do this instead of trickery.
600 	 */
601 	ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false);
602 	if (ret)
603 		goto unpin_out;
604 
605 	if (!to->legacy_hw_ctx.rcs_state->has_global_gtt_mapping) {
606 		struct i915_vma *vma = i915_gem_obj_to_vma(to->legacy_hw_ctx.rcs_state,
607 							   &dev_priv->gtt.base);
608 		vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level, GLOBAL_BIND);
609 	}
610 
611 	if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
612 		hw_flags |= MI_RESTORE_INHIBIT;
613 
614 	ret = mi_set_context(ring, to, hw_flags);
615 	if (ret)
616 		goto unpin_out;
617 
618 	for (i = 0; i < MAX_L3_SLICES; i++) {
619 		if (!(to->remap_slice & (1<<i)))
620 			continue;
621 
622 		ret = i915_gem_l3_remap(ring, i);
623 		/* If it failed, try again next round */
624 		if (ret)
625 			DRM_DEBUG_DRIVER("L3 remapping failed\n");
626 		else
627 			to->remap_slice &= ~(1<<i);
628 	}
629 
630 	/* The backing object for the context is done after switching to the
631 	 * *next* context. Therefore we cannot retire the previous context until
632 	 * the next context has already started running. In fact, the below code
633 	 * is a bit suboptimal because the retiring can occur simply after the
634 	 * MI_SET_CONTEXT instead of when the next seqno has completed.
635 	 */
636 	if (from != NULL) {
637 		from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
638 		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), ring);
639 		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
640 		 * whole damn pipeline, we don't need to explicitly mark the
641 		 * object dirty. The only exception is that the context must be
642 		 * correct in case the object gets swapped out. Ideally we'd be
643 		 * able to defer doing this until we know the object would be
644 		 * swapped, but there is no way to do that yet.
645 		 */
646 		from->legacy_hw_ctx.rcs_state->dirty = 1;
647 		BUG_ON(from->legacy_hw_ctx.rcs_state->ring != ring);
648 
649 		/* obj is kept alive until the next request by its active ref */
650 		i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
651 		i915_gem_context_unreference(from);
652 	}
653 
654 	uninitialized = !to->legacy_hw_ctx.initialized && from == NULL;
655 	to->legacy_hw_ctx.initialized = true;
656 
657 done:
658 	i915_gem_context_reference(to);
659 	ring->last_context = to;
660 
661 	if (uninitialized) {
662 		if (ring->init_context) {
663 			ret = ring->init_context(ring);
664 			if (ret)
665 				DRM_ERROR("ring init context: %d\n", ret);
666 		}
667 
668 		ret = i915_gem_render_state_init(ring);
669 		if (ret)
670 			DRM_ERROR("init render state: %d\n", ret);
671 	}
672 
673 	return 0;
674 
675 unpin_out:
676 	if (ring->id == RCS)
677 		i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
678 	return ret;
679 }
680 
681 /**
682  * i915_switch_context() - perform a GPU context switch.
683  * @ring: ring for which we'll execute the context switch
684  * @to: the context to switch to
685  *
686  * The context life cycle is simple. The context refcount is incremented and
687  * decremented by 1 and create and destroy. If the context is in use by the GPU,
688  * it will have a refcount > 1. This allows us to destroy the context abstract
689  * object while letting the normal object tracking destroy the backing BO.
690  *
691  * This function should not be used in execlists mode.  Instead the context is
692  * switched by writing to the ELSP and requests keep a reference to their
693  * context.
694  */
695 int i915_switch_context(struct intel_engine_cs *ring,
696 			struct intel_context *to)
697 {
698 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
699 
700 	WARN_ON(i915.enable_execlists);
701 	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
702 
703 	if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
704 		if (to != ring->last_context) {
705 			i915_gem_context_reference(to);
706 			if (ring->last_context)
707 				i915_gem_context_unreference(ring->last_context);
708 			ring->last_context = to;
709 		}
710 		return 0;
711 	}
712 
713 	return do_switch(ring, to);
714 }
715 
716 static bool contexts_enabled(struct drm_device *dev)
717 {
718 	return i915.enable_execlists || to_i915(dev)->hw_context_size;
719 }
720 
721 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
722 				  struct drm_file *file)
723 {
724 	struct drm_i915_gem_context_create *args = data;
725 	struct drm_i915_file_private *file_priv = file->driver_priv;
726 	struct intel_context *ctx;
727 	int ret;
728 
729 	if (!contexts_enabled(dev))
730 		return -ENODEV;
731 
732 	ret = i915_mutex_lock_interruptible(dev);
733 	if (ret)
734 		return ret;
735 
736 	ctx = i915_gem_create_context(dev, file_priv);
737 	mutex_unlock(&dev->struct_mutex);
738 	if (IS_ERR(ctx))
739 		return PTR_ERR(ctx);
740 
741 	args->ctx_id = ctx->user_handle;
742 	DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);
743 
744 	return 0;
745 }
746 
747 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
748 				   struct drm_file *file)
749 {
750 	struct drm_i915_gem_context_destroy *args = data;
751 	struct drm_i915_file_private *file_priv = file->driver_priv;
752 	struct intel_context *ctx;
753 	int ret;
754 
755 	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
756 		return -ENOENT;
757 
758 	ret = i915_mutex_lock_interruptible(dev);
759 	if (ret)
760 		return ret;
761 
762 	ctx = i915_gem_context_get(file_priv, args->ctx_id);
763 	if (IS_ERR(ctx)) {
764 		mutex_unlock(&dev->struct_mutex);
765 		return PTR_ERR(ctx);
766 	}
767 
768 	idr_remove(&ctx->file_priv->context_idr, ctx->user_handle);
769 	i915_gem_context_unreference(ctx);
770 	mutex_unlock(&dev->struct_mutex);
771 
772 	DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
773 	return 0;
774 }
775