xref: /dragonfly/sys/dev/drm/i915/i915_gem_context.c (revision 78478697)
1 /*
2  * Copyright © 2011-2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Ben Widawsky <ben@bwidawsk.net>
25  *
26  */
27 
28 /*
29  * This file implements HW context support. On gen5+ a HW context consists of an
30  * opaque GPU object which is referenced at times of context saves and restores.
31  * With RC6 enabled, the context is also referenced as the GPU enters and exists
32  * from RC6 (GPU has it's own internal power context, except on gen5). Though
33  * something like a context does exist for the media ring, the code only
34  * supports contexts for the render ring.
35  *
36  * In software, there is a distinction between contexts created by the user,
37  * and the default HW context. The default HW context is used by GPU clients
38  * that do not request setup of their own hardware context. The default
39  * context's state is never restored to help prevent programming errors. This
40  * would happen if a client ran and piggy-backed off another clients GPU state.
41  * The default context only exists to give the GPU some offset to load as the
42  * current to invoke a save of the context we actually care about. In fact, the
43  * code could likely be constructed, albeit in a more complicated fashion, to
44  * never use the default context, though that limits the driver's ability to
45  * swap out, and/or destroy other contexts.
46  *
47  * All other contexts are created as a request by the GPU client. These contexts
48  * store GPU state, and thus allow GPU clients to not re-emit state (and
49  * potentially query certain state) at any time. The kernel driver makes
50  * certain that the appropriate commands are inserted.
51  *
52  * The context life cycle is semi-complicated in that context BOs may live
53  * longer than the context itself because of the way the hardware, and object
54  * tracking works. Below is a very crude representation of the state machine
55  * describing the context life.
56  *                                         refcount     pincount     active
57  * S0: initial state                          0            0           0
58  * S1: context created                        1            0           0
59  * S2: context is currently running           2            1           X
60  * S3: GPU referenced, but not current        2            0           1
61  * S4: context is current, but destroyed      1            1           0
62  * S5: like S3, but destroyed                 1            0           1
63  *
64  * The most common (but not all) transitions:
65  * S0->S1: client creates a context
66  * S1->S2: client submits execbuf with context
67  * S2->S3: other clients submits execbuf with context
68  * S3->S1: context object was retired
69  * S3->S2: clients submits another execbuf
70  * S2->S4: context destroy called with current context
71  * S3->S5->S0: destroy path
72  * S4->S5->S0: destroy path on current context
73  *
74  * There are two confusing terms used above:
75  *  The "current context" means the context which is currently running on the
76  *  GPU. The GPU has loaded its state already and has stored away the gtt
77  *  offset of the BO. The GPU is not actively referencing the data at this
78  *  offset, but it will on the next context switch. The only way to avoid this
79  *  is to do a GPU reset.
80  *
81  *  An "active context' is one which was previously the "current context" and is
82  *  on the active list waiting for the next context switch to occur. Until this
83  *  happens, the object must remain at the same gtt offset. It is therefore
84  *  possible to destroy a context, but it is still active.
85  *
86  */
87 
88 #include <drm/drmP.h>
89 #include <drm/i915_drm.h>
90 #include "i915_drv.h"
91 #include "i915_trace.h"
92 
93 /* This is a HW constraint. The value below is the largest known requirement
94  * I've seen in a spec to date, and that was a workaround for a non-shipping
95  * part. It should be safe to decrease this, but it's more future proof as is.
96  */
97 #define GEN6_CONTEXT_ALIGN (64<<10)
98 #define GEN7_CONTEXT_ALIGN 4096
99 
100 static size_t get_context_alignment(struct drm_device *dev)
101 {
102 	if (IS_GEN6(dev))
103 		return GEN6_CONTEXT_ALIGN;
104 
105 	return GEN7_CONTEXT_ALIGN;
106 }
107 
108 static int get_context_size(struct drm_device *dev)
109 {
110 	struct drm_i915_private *dev_priv = dev->dev_private;
111 	int ret;
112 	u32 reg;
113 
114 	switch (INTEL_INFO(dev)->gen) {
115 	case 6:
116 		reg = I915_READ(CXT_SIZE);
117 		ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
118 		break;
119 	case 7:
120 		reg = I915_READ(GEN7_CXT_SIZE);
121 		if (IS_HASWELL(dev))
122 			ret = HSW_CXT_TOTAL_SIZE;
123 		else
124 			ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
125 		break;
126 	case 8:
127 		ret = GEN8_CXT_TOTAL_SIZE;
128 		break;
129 	default:
130 		BUG();
131 	}
132 
133 	return ret;
134 }
135 
136 void i915_gem_context_free(struct kref *ctx_ref)
137 {
138 	struct intel_context *ctx = container_of(ctx_ref,
139 						 typeof(*ctx), ref);
140 
141 	trace_i915_context_free(ctx);
142 
143 	if (i915.enable_execlists)
144 		intel_lr_context_free(ctx);
145 
146 	i915_ppgtt_put(ctx->ppgtt);
147 
148 	if (ctx->legacy_hw_ctx.rcs_state)
149 		drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
150 	list_del(&ctx->link);
151 	kfree(ctx);
152 }
153 
154 struct drm_i915_gem_object *
155 i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
156 {
157 	struct drm_i915_gem_object *obj;
158 	int ret;
159 
160 	obj = i915_gem_alloc_object(dev, size);
161 	if (obj == NULL)
162 		return ERR_PTR(-ENOMEM);
163 
164 	/*
165 	 * Try to make the context utilize L3 as well as LLC.
166 	 *
167 	 * On VLV we don't have L3 controls in the PTEs so we
168 	 * shouldn't touch the cache level, especially as that
169 	 * would make the object snooped which might have a
170 	 * negative performance impact.
171 	 */
172 	if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
173 		ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
174 		/* Failure shouldn't ever happen this early */
175 		if (WARN_ON(ret)) {
176 			drm_gem_object_unreference(&obj->base);
177 			return ERR_PTR(ret);
178 		}
179 	}
180 
181 	return obj;
182 }
183 
184 static struct intel_context *
185 __create_hw_context(struct drm_device *dev,
186 		    struct drm_i915_file_private *file_priv)
187 {
188 	struct drm_i915_private *dev_priv = dev->dev_private;
189 	struct intel_context *ctx;
190 	int ret;
191 
192 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
193 	if (ctx == NULL)
194 		return ERR_PTR(-ENOMEM);
195 
196 	kref_init(&ctx->ref);
197 	list_add_tail(&ctx->link, &dev_priv->context_list);
198 
199 	if (dev_priv->hw_context_size) {
200 		struct drm_i915_gem_object *obj =
201 				i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
202 		if (IS_ERR(obj)) {
203 			ret = PTR_ERR(obj);
204 			goto err_out;
205 		}
206 		ctx->legacy_hw_ctx.rcs_state = obj;
207 	}
208 
209 	/* Default context will never have a file_priv */
210 	if (file_priv != NULL) {
211 		ret = idr_alloc(&file_priv->context_idr, ctx,
212 				DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
213 		if (ret < 0)
214 			goto err_out;
215 	} else
216 		ret = DEFAULT_CONTEXT_HANDLE;
217 
218 	ctx->file_priv = file_priv;
219 	ctx->user_handle = ret;
220 	/* NB: Mark all slices as needing a remap so that when the context first
221 	 * loads it will restore whatever remap state already exists. If there
222 	 * is no remap info, it will be a NOP. */
223 	ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
224 
225 	ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;
226 
227 	return ctx;
228 
229 err_out:
230 	i915_gem_context_unreference(ctx);
231 	return ERR_PTR(ret);
232 }
233 
234 /**
235  * The default context needs to exist per ring that uses contexts. It stores the
236  * context state of the GPU for applications that don't utilize HW contexts, as
237  * well as an idle case.
238  */
239 static struct intel_context *
240 i915_gem_create_context(struct drm_device *dev,
241 			struct drm_i915_file_private *file_priv)
242 {
243 	const bool is_global_default_ctx = file_priv == NULL;
244 	struct intel_context *ctx;
245 	int ret = 0;
246 
247 	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
248 
249 	ctx = __create_hw_context(dev, file_priv);
250 	if (IS_ERR(ctx))
251 		return ctx;
252 
253 	if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) {
254 		/* We may need to do things with the shrinker which
255 		 * require us to immediately switch back to the default
256 		 * context. This can cause a problem as pinning the
257 		 * default context also requires GTT space which may not
258 		 * be available. To avoid this we always pin the default
259 		 * context.
260 		 */
261 		ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
262 					    get_context_alignment(dev), 0);
263 		if (ret) {
264 			DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
265 			goto err_destroy;
266 		}
267 	}
268 
269 	if (USES_FULL_PPGTT(dev)) {
270 		struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
271 
272 		if (IS_ERR_OR_NULL(ppgtt)) {
273 			DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
274 					 PTR_ERR(ppgtt));
275 			ret = PTR_ERR(ppgtt);
276 			goto err_unpin;
277 		}
278 
279 		ctx->ppgtt = ppgtt;
280 	}
281 
282 	trace_i915_context_create(ctx);
283 
284 	return ctx;
285 
286 err_unpin:
287 	if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
288 		i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
289 err_destroy:
290 	i915_gem_context_unreference(ctx);
291 	return ERR_PTR(ret);
292 }
293 
294 void i915_gem_context_reset(struct drm_device *dev)
295 {
296 	struct drm_i915_private *dev_priv = dev->dev_private;
297 	int i;
298 
299 	/* In execlists mode we will unreference the context when the execlist
300 	 * queue is cleared and the requests destroyed.
301 	 */
302 	if (i915.enable_execlists)
303 		return;
304 
305 	for (i = 0; i < I915_NUM_RINGS; i++) {
306 		struct intel_engine_cs *ring = &dev_priv->ring[i];
307 		struct intel_context *lctx = ring->last_context;
308 
309 		if (lctx) {
310 			if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
311 				i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
312 
313 			i915_gem_context_unreference(lctx);
314 			ring->last_context = NULL;
315 		}
316 	}
317 }
318 
319 int i915_gem_context_init(struct drm_device *dev)
320 {
321 	struct drm_i915_private *dev_priv = dev->dev_private;
322 	struct intel_context *ctx;
323 	int i;
324 
325 	/* Init should only be called once per module load. Eventually the
326 	 * restriction on the context_disabled check can be loosened. */
327 	if (WARN_ON(dev_priv->ring[RCS].default_context))
328 		return 0;
329 
330 	if (i915.enable_execlists) {
331 		/* NB: intentionally left blank. We will allocate our own
332 		 * backing objects as we need them, thank you very much */
333 		dev_priv->hw_context_size = 0;
334 	} else if (HAS_HW_CONTEXTS(dev)) {
335 		dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
336 		if (dev_priv->hw_context_size > (1<<20)) {
337 			DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
338 					 dev_priv->hw_context_size);
339 			dev_priv->hw_context_size = 0;
340 		}
341 	}
342 
343 	ctx = i915_gem_create_context(dev, NULL);
344 	if (IS_ERR(ctx)) {
345 		DRM_ERROR("Failed to create default global context (error %ld)\n",
346 			  PTR_ERR(ctx));
347 		return PTR_ERR(ctx);
348 	}
349 
350 	for (i = 0; i < I915_NUM_RINGS; i++) {
351 		struct intel_engine_cs *ring = &dev_priv->ring[i];
352 
353 		/* NB: RCS will hold a ref for all rings */
354 		ring->default_context = ctx;
355 	}
356 
357 	DRM_DEBUG_DRIVER("%s context support initialized\n",
358 			i915.enable_execlists ? "LR" :
359 			dev_priv->hw_context_size ? "HW" : "fake");
360 	return 0;
361 }
362 
363 void i915_gem_context_fini(struct drm_device *dev)
364 {
365 	struct drm_i915_private *dev_priv = dev->dev_private;
366 	struct intel_context *dctx = dev_priv->ring[RCS].default_context;
367 	int i;
368 
369 	if (dctx->legacy_hw_ctx.rcs_state) {
370 		/* The only known way to stop the gpu from accessing the hw context is
371 		 * to reset it. Do this as the very last operation to avoid confusing
372 		 * other code, leading to spurious errors. */
373 		intel_gpu_reset(dev);
374 
375 		/* When default context is created and switched to, base object refcount
376 		 * will be 2 (+1 from object creation and +1 from do_switch()).
377 		 * i915_gem_context_fini() will be called after gpu_idle() has switched
378 		 * to default context. So we need to unreference the base object once
379 		 * to offset the do_switch part, so that i915_gem_context_unreference()
380 		 * can then free the base object correctly. */
381 		WARN_ON(!dev_priv->ring[RCS].last_context);
382 		if (dev_priv->ring[RCS].last_context == dctx) {
383 			/* Fake switch to NULL context */
384 			WARN_ON(dctx->legacy_hw_ctx.rcs_state->active);
385 			i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
386 			i915_gem_context_unreference(dctx);
387 			dev_priv->ring[RCS].last_context = NULL;
388 		}
389 
390 		i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
391 	}
392 
393 	for (i = 0; i < I915_NUM_RINGS; i++) {
394 		struct intel_engine_cs *ring = &dev_priv->ring[i];
395 
396 		if (ring->last_context)
397 			i915_gem_context_unreference(ring->last_context);
398 
399 		ring->default_context = NULL;
400 		ring->last_context = NULL;
401 	}
402 
403 	i915_gem_context_unreference(dctx);
404 }
405 
406 int i915_gem_context_enable(struct drm_i915_private *dev_priv)
407 {
408 	struct intel_engine_cs *ring;
409 	int ret, i;
410 
411 	BUG_ON(!dev_priv->ring[RCS].default_context);
412 
413 	if (i915.enable_execlists) {
414 		for_each_ring(ring, dev_priv, i) {
415 			if (ring->init_context) {
416 				ret = ring->init_context(ring,
417 						ring->default_context);
418 				if (ret) {
419 					DRM_ERROR("ring init context: %d\n",
420 							ret);
421 					return ret;
422 				}
423 			}
424 		}
425 
426 	} else
427 		for_each_ring(ring, dev_priv, i) {
428 			ret = i915_switch_context(ring, ring->default_context);
429 			if (ret)
430 				return ret;
431 		}
432 
433 	return 0;
434 }
435 
436 static int context_idr_cleanup(int id, void *p, void *data)
437 {
438 	struct intel_context *ctx = p;
439 
440 	i915_gem_context_unreference(ctx);
441 	return 0;
442 }
443 
444 int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
445 {
446 	struct drm_i915_file_private *file_priv = file->driver_priv;
447 	struct intel_context *ctx;
448 
449 	idr_init(&file_priv->context_idr);
450 
451 	mutex_lock(&dev->struct_mutex);
452 	ctx = i915_gem_create_context(dev, file_priv);
453 	mutex_unlock(&dev->struct_mutex);
454 
455 	if (IS_ERR(ctx)) {
456 		idr_destroy(&file_priv->context_idr);
457 		return PTR_ERR(ctx);
458 	}
459 
460 	return 0;
461 }
462 
463 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
464 {
465 	struct drm_i915_file_private *file_priv = file->driver_priv;
466 
467 	idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
468 	idr_destroy(&file_priv->context_idr);
469 }
470 
471 struct intel_context *
472 i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
473 {
474 	struct intel_context *ctx;
475 
476 	ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id);
477 	if (!ctx)
478 		return ERR_PTR(-ENOENT);
479 
480 	return ctx;
481 }
482 
483 static inline int
484 mi_set_context(struct intel_engine_cs *ring,
485 	       struct intel_context *new_context,
486 	       u32 hw_flags)
487 {
488 	u32 flags = hw_flags | MI_MM_SPACE_GTT;
489 	const int num_rings =
490 		/* Use an extended w/a on ivb+ if signalling from other rings */
491 		i915_semaphore_is_enabled(ring->dev) ?
492 		hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
493 		0;
494 	int len, i, ret;
495 
496 	/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
497 	 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
498 	 * explicitly, so we rely on the value at ring init, stored in
499 	 * itlb_before_ctx_switch.
500 	 */
501 	if (IS_GEN6(ring->dev)) {
502 		ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
503 		if (ret)
504 			return ret;
505 	}
506 
507 	/* These flags are for resource streamer on HSW+ */
508 	if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8)
509 		flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
510 
511 
512 	len = 4;
513 	if (INTEL_INFO(ring->dev)->gen >= 7)
514 		len += 2 + (num_rings ? 4*num_rings + 2 : 0);
515 
516 	ret = intel_ring_begin(ring, len);
517 	if (ret)
518 		return ret;
519 
520 	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
521 	if (INTEL_INFO(ring->dev)->gen >= 7) {
522 		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
523 		if (num_rings) {
524 			struct intel_engine_cs *signaller;
525 
526 			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
527 			for_each_ring(signaller, to_i915(ring->dev), i) {
528 				if (signaller == ring)
529 					continue;
530 
531 				intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
532 				intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
533 			}
534 		}
535 	}
536 
537 	intel_ring_emit(ring, MI_NOOP);
538 	intel_ring_emit(ring, MI_SET_CONTEXT);
539 	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
540 			flags);
541 	/*
542 	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
543 	 * WaMiSetContext_Hang:snb,ivb,vlv
544 	 */
545 	intel_ring_emit(ring, MI_NOOP);
546 
547 	if (INTEL_INFO(ring->dev)->gen >= 7) {
548 		if (num_rings) {
549 			struct intel_engine_cs *signaller;
550 
551 			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
552 			for_each_ring(signaller, to_i915(ring->dev), i) {
553 				if (signaller == ring)
554 					continue;
555 
556 				intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
557 				intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
558 			}
559 		}
560 		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
561 	}
562 
563 	intel_ring_advance(ring);
564 
565 	return ret;
566 }
567 
568 static int do_switch(struct intel_engine_cs *ring,
569 		     struct intel_context *to)
570 {
571 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
572 	struct intel_context *from = ring->last_context;
573 	u32 hw_flags = 0;
574 	bool uninitialized = false;
575 	struct i915_vma *vma;
576 	int ret, i;
577 
578 	if (from != NULL && ring == &dev_priv->ring[RCS]) {
579 		BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
580 		BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
581 	}
582 
583 	if (from == to && !to->remap_slice)
584 		return 0;
585 
586 	/* Trying to pin first makes error handling easier. */
587 	if (ring == &dev_priv->ring[RCS]) {
588 		ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
589 					    get_context_alignment(ring->dev), 0);
590 		if (ret)
591 			return ret;
592 	}
593 
594 	/*
595 	 * Pin can switch back to the default context if we end up calling into
596 	 * evict_everything - as a last ditch gtt defrag effort that also
597 	 * switches to the default context. Hence we need to reload from here.
598 	 */
599 	from = ring->last_context;
600 
601 	if (to->ppgtt) {
602 		trace_switch_mm(ring, to);
603 		ret = to->ppgtt->switch_mm(to->ppgtt, ring);
604 		if (ret)
605 			goto unpin_out;
606 	}
607 
608 	if (ring != &dev_priv->ring[RCS]) {
609 		if (from)
610 			i915_gem_context_unreference(from);
611 		goto done;
612 	}
613 
614 	/*
615 	 * Clear this page out of any CPU caches for coherent swap-in/out. Note
616 	 * that thanks to write = false in this call and us not setting any gpu
617 	 * write domains when putting a context object onto the active list
618 	 * (when switching away from it), this won't block.
619 	 *
620 	 * XXX: We need a real interface to do this instead of trickery.
621 	 */
622 	ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false);
623 	if (ret)
624 		goto unpin_out;
625 
626 	vma = i915_gem_obj_to_ggtt(to->legacy_hw_ctx.rcs_state);
627 	if (!(vma->bound & GLOBAL_BIND)) {
628 		ret = i915_vma_bind(vma,
629 				    to->legacy_hw_ctx.rcs_state->cache_level,
630 				    GLOBAL_BIND);
631 		/* This shouldn't ever fail. */
632 		if (WARN_ONCE(ret, "GGTT context bind failed!"))
633 			goto unpin_out;
634 	}
635 
636 	if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
637 		hw_flags |= MI_RESTORE_INHIBIT;
638 
639 	ret = mi_set_context(ring, to, hw_flags);
640 	if (ret)
641 		goto unpin_out;
642 
643 	for (i = 0; i < MAX_L3_SLICES; i++) {
644 		if (!(to->remap_slice & (1<<i)))
645 			continue;
646 
647 		ret = i915_gem_l3_remap(ring, i);
648 		/* If it failed, try again next round */
649 		if (ret)
650 			DRM_DEBUG_DRIVER("L3 remapping failed\n");
651 		else
652 			to->remap_slice &= ~(1<<i);
653 	}
654 
655 	/* The backing object for the context is done after switching to the
656 	 * *next* context. Therefore we cannot retire the previous context until
657 	 * the next context has already started running. In fact, the below code
658 	 * is a bit suboptimal because the retiring can occur simply after the
659 	 * MI_SET_CONTEXT instead of when the next seqno has completed.
660 	 */
661 	if (from != NULL) {
662 		from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
663 		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), ring);
664 		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
665 		 * whole damn pipeline, we don't need to explicitly mark the
666 		 * object dirty. The only exception is that the context must be
667 		 * correct in case the object gets swapped out. Ideally we'd be
668 		 * able to defer doing this until we know the object would be
669 		 * swapped, but there is no way to do that yet.
670 		 */
671 		from->legacy_hw_ctx.rcs_state->dirty = 1;
672 		BUG_ON(i915_gem_request_get_ring(
673 			from->legacy_hw_ctx.rcs_state->last_read_req) != ring);
674 
675 		/* obj is kept alive until the next request by its active ref */
676 		i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
677 		i915_gem_context_unreference(from);
678 	}
679 
680 	uninitialized = !to->legacy_hw_ctx.initialized && from == NULL;
681 	to->legacy_hw_ctx.initialized = true;
682 
683 done:
684 	i915_gem_context_reference(to);
685 	ring->last_context = to;
686 
687 	if (uninitialized) {
688 		if (ring->init_context) {
689 			ret = ring->init_context(ring, to);
690 			if (ret)
691 				DRM_ERROR("ring init context: %d\n", ret);
692 		}
693 	}
694 
695 	return 0;
696 
697 unpin_out:
698 	if (ring->id == RCS)
699 		i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
700 	return ret;
701 }
702 
703 /**
704  * i915_switch_context() - perform a GPU context switch.
705  * @ring: ring for which we'll execute the context switch
706  * @to: the context to switch to
707  *
708  * The context life cycle is simple. The context refcount is incremented and
709  * decremented by 1 and create and destroy. If the context is in use by the GPU,
710  * it will have a refcount > 1. This allows us to destroy the context abstract
711  * object while letting the normal object tracking destroy the backing BO.
712  *
713  * This function should not be used in execlists mode.  Instead the context is
714  * switched by writing to the ELSP and requests keep a reference to their
715  * context.
716  */
717 int i915_switch_context(struct intel_engine_cs *ring,
718 			struct intel_context *to)
719 {
720 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
721 
722 	WARN_ON(i915.enable_execlists);
723 	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
724 
725 	if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
726 		if (to != ring->last_context) {
727 			i915_gem_context_reference(to);
728 			if (ring->last_context)
729 				i915_gem_context_unreference(ring->last_context);
730 			ring->last_context = to;
731 		}
732 		return 0;
733 	}
734 
735 	return do_switch(ring, to);
736 }
737 
738 static bool contexts_enabled(struct drm_device *dev)
739 {
740 	return i915.enable_execlists || to_i915(dev)->hw_context_size;
741 }
742 
743 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
744 				  struct drm_file *file)
745 {
746 	struct drm_i915_gem_context_create *args = data;
747 	struct drm_i915_file_private *file_priv = file->driver_priv;
748 	struct intel_context *ctx;
749 	int ret;
750 
751 	if (!contexts_enabled(dev))
752 		return -ENODEV;
753 
754 	ret = i915_mutex_lock_interruptible(dev);
755 	if (ret)
756 		return ret;
757 
758 	ctx = i915_gem_create_context(dev, file_priv);
759 	mutex_unlock(&dev->struct_mutex);
760 	if (IS_ERR(ctx))
761 		return PTR_ERR(ctx);
762 
763 	args->ctx_id = ctx->user_handle;
764 	DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);
765 
766 	return 0;
767 }
768 
769 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
770 				   struct drm_file *file)
771 {
772 	struct drm_i915_gem_context_destroy *args = data;
773 	struct drm_i915_file_private *file_priv = file->driver_priv;
774 	struct intel_context *ctx;
775 	int ret;
776 
777 	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
778 		return -ENOENT;
779 
780 	ret = i915_mutex_lock_interruptible(dev);
781 	if (ret)
782 		return ret;
783 
784 	ctx = i915_gem_context_get(file_priv, args->ctx_id);
785 	if (IS_ERR(ctx)) {
786 		mutex_unlock(&dev->struct_mutex);
787 		return PTR_ERR(ctx);
788 	}
789 
790 	idr_remove(&ctx->file_priv->context_idr, ctx->user_handle);
791 	i915_gem_context_unreference(ctx);
792 	mutex_unlock(&dev->struct_mutex);
793 
794 	DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
795 	return 0;
796 }
797 
798 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
799 				    struct drm_file *file)
800 {
801 	struct drm_i915_file_private *file_priv = file->driver_priv;
802 	struct drm_i915_gem_context_param *args = data;
803 	struct intel_context *ctx;
804 	int ret;
805 
806 	ret = i915_mutex_lock_interruptible(dev);
807 	if (ret)
808 		return ret;
809 
810 	ctx = i915_gem_context_get(file_priv, args->ctx_id);
811 	if (IS_ERR(ctx)) {
812 		mutex_unlock(&dev->struct_mutex);
813 		return PTR_ERR(ctx);
814 	}
815 
816 	args->size = 0;
817 	switch (args->param) {
818 	case I915_CONTEXT_PARAM_BAN_PERIOD:
819 		args->value = ctx->hang_stats.ban_period_seconds;
820 		break;
821 	default:
822 		ret = -EINVAL;
823 		break;
824 	}
825 	mutex_unlock(&dev->struct_mutex);
826 
827 	return ret;
828 }
829 
830 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
831 				    struct drm_file *file)
832 {
833 	struct drm_i915_file_private *file_priv = file->driver_priv;
834 	struct drm_i915_gem_context_param *args = data;
835 	struct intel_context *ctx;
836 	int ret;
837 
838 	ret = i915_mutex_lock_interruptible(dev);
839 	if (ret)
840 		return ret;
841 
842 	ctx = i915_gem_context_get(file_priv, args->ctx_id);
843 	if (IS_ERR(ctx)) {
844 		mutex_unlock(&dev->struct_mutex);
845 		return PTR_ERR(ctx);
846 	}
847 
848 	switch (args->param) {
849 	case I915_CONTEXT_PARAM_BAN_PERIOD:
850 		if (args->size)
851 			ret = -EINVAL;
852 		else if (args->value < ctx->hang_stats.ban_period_seconds &&
853 			 !capable(CAP_SYS_ADMIN))
854 			ret = -EPERM;
855 		else
856 			ctx->hang_stats.ban_period_seconds = args->value;
857 		break;
858 	default:
859 		ret = -EINVAL;
860 		break;
861 	}
862 	mutex_unlock(&dev->struct_mutex);
863 
864 	return ret;
865 }
866