xref: /dragonfly/sys/dev/drm/i915/i915_gem_context.c (revision 5cef369f)
1 /*
2  * Copyright © 2011-2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Ben Widawsky <ben@bwidawsk.net>
25  *
26  */
27 
28 /*
29  * This file implements HW context support. On gen5+ a HW context consists of an
30  * opaque GPU object which is referenced at times of context saves and restores.
31  * With RC6 enabled, the context is also referenced as the GPU enters and exists
32  * from RC6 (GPU has it's own internal power context, except on gen5). Though
33  * something like a context does exist for the media ring, the code only
34  * supports contexts for the render ring.
35  *
36  * In software, there is a distinction between contexts created by the user,
37  * and the default HW context. The default HW context is used by GPU clients
38  * that do not request setup of their own hardware context. The default
39  * context's state is never restored to help prevent programming errors. This
40  * would happen if a client ran and piggy-backed off another clients GPU state.
41  * The default context only exists to give the GPU some offset to load as the
42  * current to invoke a save of the context we actually care about. In fact, the
43  * code could likely be constructed, albeit in a more complicated fashion, to
44  * never use the default context, though that limits the driver's ability to
45  * swap out, and/or destroy other contexts.
46  *
47  * All other contexts are created as a request by the GPU client. These contexts
48  * store GPU state, and thus allow GPU clients to not re-emit state (and
49  * potentially query certain state) at any time. The kernel driver makes
50  * certain that the appropriate commands are inserted.
51  *
52  * The context life cycle is semi-complicated in that context BOs may live
53  * longer than the context itself because of the way the hardware, and object
54  * tracking works. Below is a very crude representation of the state machine
55  * describing the context life.
56  *                                         refcount     pincount     active
57  * S0: initial state                          0            0           0
58  * S1: context created                        1            0           0
59  * S2: context is currently running           2            1           X
60  * S3: GPU referenced, but not current        2            0           1
61  * S4: context is current, but destroyed      1            1           0
62  * S5: like S3, but destroyed                 1            0           1
63  *
64  * The most common (but not all) transitions:
65  * S0->S1: client creates a context
66  * S1->S2: client submits execbuf with context
67  * S2->S3: other clients submits execbuf with context
68  * S3->S1: context object was retired
69  * S3->S2: clients submits another execbuf
70  * S2->S4: context destroy called with current context
71  * S3->S5->S0: destroy path
72  * S4->S5->S0: destroy path on current context
73  *
74  * There are two confusing terms used above:
75  *  The "current context" means the context which is currently running on the
76  *  GPU. The GPU has loaded its state already and has stored away the gtt
77  *  offset of the BO. The GPU is not actively referencing the data at this
78  *  offset, but it will on the next context switch. The only way to avoid this
79  *  is to do a GPU reset.
80  *
81  *  An "active context' is one which was previously the "current context" and is
82  *  on the active list waiting for the next context switch to occur. Until this
83  *  happens, the object must remain at the same gtt offset. It is therefore
84  *  possible to destroy a context, but it is still active.
85  *
86  */
87 
88 #include <drm/drmP.h>
89 #include <drm/i915_drm.h>
90 #include "i915_drv.h"
91 #include "i915_trace.h"
92 
93 /* This is a HW constraint. The value below is the largest known requirement
94  * I've seen in a spec to date, and that was a workaround for a non-shipping
95  * part. It should be safe to decrease this, but it's more future proof as is.
96  */
97 #define GEN6_CONTEXT_ALIGN (64<<10)
98 #define GEN7_CONTEXT_ALIGN 4096
99 
100 static size_t get_context_alignment(struct drm_device *dev)
101 {
102 	if (IS_GEN6(dev))
103 		return GEN6_CONTEXT_ALIGN;
104 
105 	return GEN7_CONTEXT_ALIGN;
106 }
107 
108 static int get_context_size(struct drm_device *dev)
109 {
110 	struct drm_i915_private *dev_priv = dev->dev_private;
111 	int ret;
112 	u32 reg;
113 
114 	switch (INTEL_INFO(dev)->gen) {
115 	case 6:
116 		reg = I915_READ(CXT_SIZE);
117 		ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
118 		break;
119 	case 7:
120 		reg = I915_READ(GEN7_CXT_SIZE);
121 		if (IS_HASWELL(dev))
122 			ret = HSW_CXT_TOTAL_SIZE;
123 		else
124 			ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
125 		break;
126 	case 8:
127 		ret = GEN8_CXT_TOTAL_SIZE;
128 		break;
129 	default:
130 		BUG();
131 	}
132 
133 	return ret;
134 }
135 
136 void i915_gem_context_free(struct kref *ctx_ref)
137 {
138 	struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
139 
140 	trace_i915_context_free(ctx);
141 
142 	if (i915.enable_execlists)
143 		intel_lr_context_free(ctx);
144 
145 	i915_ppgtt_put(ctx->ppgtt);
146 
147 	if (ctx->legacy_hw_ctx.rcs_state)
148 		drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
149 	list_del(&ctx->link);
150 	kfree(ctx);
151 }
152 
153 struct drm_i915_gem_object *
154 i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
155 {
156 	struct drm_i915_gem_object *obj;
157 	int ret;
158 
159 	obj = i915_gem_alloc_object(dev, size);
160 	if (obj == NULL)
161 		return ERR_PTR(-ENOMEM);
162 
163 	/*
164 	 * Try to make the context utilize L3 as well as LLC.
165 	 *
166 	 * On VLV we don't have L3 controls in the PTEs so we
167 	 * shouldn't touch the cache level, especially as that
168 	 * would make the object snooped which might have a
169 	 * negative performance impact.
170 	 */
171 	if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
172 		ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
173 		/* Failure shouldn't ever happen this early */
174 		if (WARN_ON(ret)) {
175 			drm_gem_object_unreference(&obj->base);
176 			return ERR_PTR(ret);
177 		}
178 	}
179 
180 	return obj;
181 }
182 
183 static struct intel_context *
184 __create_hw_context(struct drm_device *dev,
185 		    struct drm_i915_file_private *file_priv)
186 {
187 	struct drm_i915_private *dev_priv = dev->dev_private;
188 	struct intel_context *ctx;
189 	int ret;
190 
191 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
192 	if (ctx == NULL)
193 		return ERR_PTR(-ENOMEM);
194 
195 	kref_init(&ctx->ref);
196 	list_add_tail(&ctx->link, &dev_priv->context_list);
197 	ctx->i915 = dev_priv;
198 
199 	if (dev_priv->hw_context_size) {
200 		struct drm_i915_gem_object *obj =
201 				i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
202 		if (IS_ERR(obj)) {
203 			ret = PTR_ERR(obj);
204 			goto err_out;
205 		}
206 		ctx->legacy_hw_ctx.rcs_state = obj;
207 	}
208 
209 	/* Default context will never have a file_priv */
210 	if (file_priv != NULL) {
211 		ret = idr_alloc(&file_priv->context_idr, ctx,
212 				DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
213 		if (ret < 0)
214 			goto err_out;
215 	} else
216 		ret = DEFAULT_CONTEXT_HANDLE;
217 
218 	ctx->file_priv = file_priv;
219 	ctx->user_handle = ret;
220 	/* NB: Mark all slices as needing a remap so that when the context first
221 	 * loads it will restore whatever remap state already exists. If there
222 	 * is no remap info, it will be a NOP. */
223 	ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
224 
225 	ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;
226 
227 	return ctx;
228 
229 err_out:
230 	i915_gem_context_unreference(ctx);
231 	return ERR_PTR(ret);
232 }
233 
234 /**
235  * The default context needs to exist per ring that uses contexts. It stores the
236  * context state of the GPU for applications that don't utilize HW contexts, as
237  * well as an idle case.
238  */
239 static struct intel_context *
240 i915_gem_create_context(struct drm_device *dev,
241 			struct drm_i915_file_private *file_priv)
242 {
243 	const bool is_global_default_ctx = file_priv == NULL;
244 	struct intel_context *ctx;
245 	int ret = 0;
246 
247 	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
248 
249 	ctx = __create_hw_context(dev, file_priv);
250 	if (IS_ERR(ctx))
251 		return ctx;
252 
253 	if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) {
254 		/* We may need to do things with the shrinker which
255 		 * require us to immediately switch back to the default
256 		 * context. This can cause a problem as pinning the
257 		 * default context also requires GTT space which may not
258 		 * be available. To avoid this we always pin the default
259 		 * context.
260 		 */
261 		ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
262 					    get_context_alignment(dev), 0);
263 		if (ret) {
264 			DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
265 			goto err_destroy;
266 		}
267 	}
268 
269 	if (USES_FULL_PPGTT(dev)) {
270 		struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
271 
272 		if (IS_ERR_OR_NULL(ppgtt)) {
273 			DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
274 					 PTR_ERR(ppgtt));
275 			ret = PTR_ERR(ppgtt);
276 			goto err_unpin;
277 		}
278 
279 		ctx->ppgtt = ppgtt;
280 	}
281 
282 	trace_i915_context_create(ctx);
283 
284 	return ctx;
285 
286 err_unpin:
287 	if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
288 		i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
289 err_destroy:
290 	i915_gem_context_unreference(ctx);
291 	return ERR_PTR(ret);
292 }
293 
294 void i915_gem_context_reset(struct drm_device *dev)
295 {
296 	struct drm_i915_private *dev_priv = dev->dev_private;
297 	int i;
298 
299 	if (i915.enable_execlists) {
300 		struct intel_context *ctx;
301 
302 		list_for_each_entry(ctx, &dev_priv->context_list, link) {
303 			intel_lr_context_reset(dev, ctx);
304 		}
305 
306 		return;
307 	}
308 
309 	for (i = 0; i < I915_NUM_RINGS; i++) {
310 		struct intel_engine_cs *ring = &dev_priv->ring[i];
311 		struct intel_context *lctx = ring->last_context;
312 
313 		if (lctx) {
314 			if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
315 				i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
316 
317 			i915_gem_context_unreference(lctx);
318 			ring->last_context = NULL;
319 		}
320 	}
321 }
322 
323 int i915_gem_context_init(struct drm_device *dev)
324 {
325 	struct drm_i915_private *dev_priv = dev->dev_private;
326 	struct intel_context *ctx;
327 	int i;
328 
329 	/* Init should only be called once per module load. Eventually the
330 	 * restriction on the context_disabled check can be loosened. */
331 	if (WARN_ON(dev_priv->ring[RCS].default_context))
332 		return 0;
333 
334 	if (i915.enable_execlists) {
335 		/* NB: intentionally left blank. We will allocate our own
336 		 * backing objects as we need them, thank you very much */
337 		dev_priv->hw_context_size = 0;
338 	} else if (HAS_HW_CONTEXTS(dev)) {
339 		dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
340 		if (dev_priv->hw_context_size > (1<<20)) {
341 			DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
342 					 dev_priv->hw_context_size);
343 			dev_priv->hw_context_size = 0;
344 		}
345 	}
346 
347 	ctx = i915_gem_create_context(dev, NULL);
348 	if (IS_ERR(ctx)) {
349 		DRM_ERROR("Failed to create default global context (error %ld)\n",
350 			  PTR_ERR(ctx));
351 		return PTR_ERR(ctx);
352 	}
353 
354 	for (i = 0; i < I915_NUM_RINGS; i++) {
355 		struct intel_engine_cs *ring = &dev_priv->ring[i];
356 
357 		/* NB: RCS will hold a ref for all rings */
358 		ring->default_context = ctx;
359 	}
360 
361 	DRM_DEBUG_DRIVER("%s context support initialized\n",
362 			i915.enable_execlists ? "LR" :
363 			dev_priv->hw_context_size ? "HW" : "fake");
364 	return 0;
365 }
366 
367 void i915_gem_context_fini(struct drm_device *dev)
368 {
369 	struct drm_i915_private *dev_priv = dev->dev_private;
370 	struct intel_context *dctx = dev_priv->ring[RCS].default_context;
371 	int i;
372 
373 	if (dctx->legacy_hw_ctx.rcs_state) {
374 		/* The only known way to stop the gpu from accessing the hw context is
375 		 * to reset it. Do this as the very last operation to avoid confusing
376 		 * other code, leading to spurious errors. */
377 		intel_gpu_reset(dev);
378 
379 		/* When default context is created and switched to, base object refcount
380 		 * will be 2 (+1 from object creation and +1 from do_switch()).
381 		 * i915_gem_context_fini() will be called after gpu_idle() has switched
382 		 * to default context. So we need to unreference the base object once
383 		 * to offset the do_switch part, so that i915_gem_context_unreference()
384 		 * can then free the base object correctly. */
385 		WARN_ON(!dev_priv->ring[RCS].last_context);
386 		if (dev_priv->ring[RCS].last_context == dctx) {
387 			/* Fake switch to NULL context */
388 			WARN_ON(dctx->legacy_hw_ctx.rcs_state->active);
389 			i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
390 			i915_gem_context_unreference(dctx);
391 			dev_priv->ring[RCS].last_context = NULL;
392 		}
393 
394 		i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
395 	}
396 
397 	for (i = 0; i < I915_NUM_RINGS; i++) {
398 		struct intel_engine_cs *ring = &dev_priv->ring[i];
399 
400 		if (ring->last_context)
401 			i915_gem_context_unreference(ring->last_context);
402 
403 		ring->default_context = NULL;
404 		ring->last_context = NULL;
405 	}
406 
407 	i915_gem_context_unreference(dctx);
408 }
409 
410 int i915_gem_context_enable(struct drm_i915_private *dev_priv)
411 {
412 	struct intel_engine_cs *ring;
413 	int ret, i;
414 
415 	BUG_ON(!dev_priv->ring[RCS].default_context);
416 
417 	if (i915.enable_execlists) {
418 		for_each_ring(ring, dev_priv, i) {
419 			if (ring->init_context) {
420 				ret = ring->init_context(ring,
421 						ring->default_context);
422 				if (ret) {
423 					DRM_ERROR("ring init context: %d\n",
424 							ret);
425 					return ret;
426 				}
427 			}
428 		}
429 
430 	} else
431 		for_each_ring(ring, dev_priv, i) {
432 			ret = i915_switch_context(ring, ring->default_context);
433 			if (ret)
434 				return ret;
435 		}
436 
437 	return 0;
438 }
439 
440 static int context_idr_cleanup(int id, void *p, void *data)
441 {
442 	struct intel_context *ctx = p;
443 
444 	i915_gem_context_unreference(ctx);
445 	return 0;
446 }
447 
448 int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
449 {
450 	struct drm_i915_file_private *file_priv = file->driver_priv;
451 	struct intel_context *ctx;
452 
453 	idr_init(&file_priv->context_idr);
454 
455 	mutex_lock(&dev->struct_mutex);
456 	ctx = i915_gem_create_context(dev, file_priv);
457 	mutex_unlock(&dev->struct_mutex);
458 
459 	if (IS_ERR(ctx)) {
460 		idr_destroy(&file_priv->context_idr);
461 		return PTR_ERR(ctx);
462 	}
463 
464 	return 0;
465 }
466 
467 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
468 {
469 	struct drm_i915_file_private *file_priv = file->driver_priv;
470 
471 	idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
472 	idr_destroy(&file_priv->context_idr);
473 }
474 
475 struct intel_context *
476 i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
477 {
478 	struct intel_context *ctx;
479 
480 	ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id);
481 	if (!ctx)
482 		return ERR_PTR(-ENOENT);
483 
484 	return ctx;
485 }
486 
487 static inline int
488 mi_set_context(struct intel_engine_cs *ring,
489 	       struct intel_context *new_context,
490 	       u32 hw_flags)
491 {
492 	u32 flags = hw_flags | MI_MM_SPACE_GTT;
493 	const int num_rings =
494 		/* Use an extended w/a on ivb+ if signalling from other rings */
495 		i915_semaphore_is_enabled(ring->dev) ?
496 		hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
497 		0;
498 	int len, i, ret;
499 
500 	/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
501 	 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
502 	 * explicitly, so we rely on the value at ring init, stored in
503 	 * itlb_before_ctx_switch.
504 	 */
505 	if (IS_GEN6(ring->dev)) {
506 		ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
507 		if (ret)
508 			return ret;
509 	}
510 
511 	/* These flags are for resource streamer on HSW+ */
512 	if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8)
513 		flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
514 
515 
516 	len = 4;
517 	if (INTEL_INFO(ring->dev)->gen >= 7)
518 		len += 2 + (num_rings ? 4*num_rings + 2 : 0);
519 
520 	ret = intel_ring_begin(ring, len);
521 	if (ret)
522 		return ret;
523 
524 	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
525 	if (INTEL_INFO(ring->dev)->gen >= 7) {
526 		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
527 		if (num_rings) {
528 			struct intel_engine_cs *signaller;
529 
530 			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
531 			for_each_ring(signaller, to_i915(ring->dev), i) {
532 				if (signaller == ring)
533 					continue;
534 
535 				intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
536 				intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
537 			}
538 		}
539 	}
540 
541 	intel_ring_emit(ring, MI_NOOP);
542 	intel_ring_emit(ring, MI_SET_CONTEXT);
543 	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
544 			flags);
545 	/*
546 	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
547 	 * WaMiSetContext_Hang:snb,ivb,vlv
548 	 */
549 	intel_ring_emit(ring, MI_NOOP);
550 
551 	if (INTEL_INFO(ring->dev)->gen >= 7) {
552 		if (num_rings) {
553 			struct intel_engine_cs *signaller;
554 
555 			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
556 			for_each_ring(signaller, to_i915(ring->dev), i) {
557 				if (signaller == ring)
558 					continue;
559 
560 				intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
561 				intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
562 			}
563 		}
564 		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
565 	}
566 
567 	intel_ring_advance(ring);
568 
569 	return ret;
570 }
571 
572 static inline bool should_skip_switch(struct intel_engine_cs *ring,
573 				      struct intel_context *from,
574 				      struct intel_context *to)
575 {
576 	if (to->remap_slice)
577 		return false;
578 
579 	if (to->ppgtt && from == to &&
580 	    !(intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings))
581 		return true;
582 
583 	return false;
584 }
585 
586 static bool
587 needs_pd_load_pre(struct intel_engine_cs *ring, struct intel_context *to)
588 {
589 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
590 
591 	if (!to->ppgtt)
592 		return false;
593 
594 	if (INTEL_INFO(ring->dev)->gen < 8)
595 		return true;
596 
597 	if (ring != &dev_priv->ring[RCS])
598 		return true;
599 
600 	return false;
601 }
602 
603 static bool
604 needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
605 		u32 hw_flags)
606 {
607 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
608 
609 	if (!to->ppgtt)
610 		return false;
611 
612 	if (!IS_GEN8(ring->dev))
613 		return false;
614 
615 	if (ring != &dev_priv->ring[RCS])
616 		return false;
617 
618 	if (hw_flags & MI_RESTORE_INHIBIT)
619 		return true;
620 
621 	return false;
622 }
623 
624 static int do_switch(struct intel_engine_cs *ring,
625 		     struct intel_context *to)
626 {
627 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
628 	struct intel_context *from = ring->last_context;
629 	u32 hw_flags = 0;
630 	bool uninitialized = false;
631 	int ret, i;
632 
633 	if (from != NULL && ring == &dev_priv->ring[RCS]) {
634 		BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
635 		BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
636 	}
637 
638 	if (should_skip_switch(ring, from, to))
639 		return 0;
640 
641 	/* Trying to pin first makes error handling easier. */
642 	if (ring == &dev_priv->ring[RCS]) {
643 		ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
644 					    get_context_alignment(ring->dev), 0);
645 		if (ret)
646 			return ret;
647 	}
648 
649 	/*
650 	 * Pin can switch back to the default context if we end up calling into
651 	 * evict_everything - as a last ditch gtt defrag effort that also
652 	 * switches to the default context. Hence we need to reload from here.
653 	 */
654 	from = ring->last_context;
655 
656 	if (needs_pd_load_pre(ring, to)) {
657 		/* Older GENs and non render rings still want the load first,
658 		 * "PP_DCLV followed by PP_DIR_BASE register through Load
659 		 * Register Immediate commands in Ring Buffer before submitting
660 		 * a context."*/
661 		trace_switch_mm(ring, to);
662 		ret = to->ppgtt->switch_mm(to->ppgtt, ring);
663 		if (ret)
664 			goto unpin_out;
665 
666 		/* Doing a PD load always reloads the page dirs */
667 		to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
668 	}
669 
670 	if (ring != &dev_priv->ring[RCS]) {
671 		if (from)
672 			i915_gem_context_unreference(from);
673 		goto done;
674 	}
675 
676 	/*
677 	 * Clear this page out of any CPU caches for coherent swap-in/out. Note
678 	 * that thanks to write = false in this call and us not setting any gpu
679 	 * write domains when putting a context object onto the active list
680 	 * (when switching away from it), this won't block.
681 	 *
682 	 * XXX: We need a real interface to do this instead of trickery.
683 	 */
684 	ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false);
685 	if (ret)
686 		goto unpin_out;
687 
688 	if (!to->legacy_hw_ctx.initialized) {
689 		hw_flags |= MI_RESTORE_INHIBIT;
690 		/* NB: If we inhibit the restore, the context is not allowed to
691 		 * die because future work may end up depending on valid address
692 		 * space. This means we must enforce that a page table load
693 		 * occur when this occurs. */
694 	} else if (to->ppgtt &&
695 		   (intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) {
696 		hw_flags |= MI_FORCE_RESTORE;
697 		to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
698 	}
699 
700 	/* We should never emit switch_mm more than once */
701 	WARN_ON(needs_pd_load_pre(ring, to) &&
702 		needs_pd_load_post(ring, to, hw_flags));
703 
704 	ret = mi_set_context(ring, to, hw_flags);
705 	if (ret)
706 		goto unpin_out;
707 
708 	/* GEN8 does *not* require an explicit reload if the PDPs have been
709 	 * setup, and we do not wish to move them.
710 	 */
711 	if (needs_pd_load_post(ring, to, hw_flags)) {
712 		trace_switch_mm(ring, to);
713 		ret = to->ppgtt->switch_mm(to->ppgtt, ring);
714 		/* The hardware context switch is emitted, but we haven't
715 		 * actually changed the state - so it's probably safe to bail
716 		 * here. Still, let the user know something dangerous has
717 		 * happened.
718 		 */
719 		if (ret) {
720 			DRM_ERROR("Failed to change address space on context switch\n");
721 			goto unpin_out;
722 		}
723 	}
724 
725 	for (i = 0; i < MAX_L3_SLICES; i++) {
726 		if (!(to->remap_slice & (1<<i)))
727 			continue;
728 
729 		ret = i915_gem_l3_remap(ring, i);
730 		/* If it failed, try again next round */
731 		if (ret)
732 			DRM_DEBUG_DRIVER("L3 remapping failed\n");
733 		else
734 			to->remap_slice &= ~(1<<i);
735 	}
736 
737 	/* The backing object for the context is done after switching to the
738 	 * *next* context. Therefore we cannot retire the previous context until
739 	 * the next context has already started running. In fact, the below code
740 	 * is a bit suboptimal because the retiring can occur simply after the
741 	 * MI_SET_CONTEXT instead of when the next seqno has completed.
742 	 */
743 	if (from != NULL) {
744 		from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
745 		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), ring);
746 		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
747 		 * whole damn pipeline, we don't need to explicitly mark the
748 		 * object dirty. The only exception is that the context must be
749 		 * correct in case the object gets swapped out. Ideally we'd be
750 		 * able to defer doing this until we know the object would be
751 		 * swapped, but there is no way to do that yet.
752 		 */
753 		from->legacy_hw_ctx.rcs_state->dirty = 1;
754 
755 		/* obj is kept alive until the next request by its active ref */
756 		i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
757 		i915_gem_context_unreference(from);
758 	}
759 
760 	uninitialized = !to->legacy_hw_ctx.initialized;
761 	to->legacy_hw_ctx.initialized = true;
762 
763 done:
764 	i915_gem_context_reference(to);
765 	ring->last_context = to;
766 
767 	if (uninitialized) {
768 		if (ring->init_context) {
769 			ret = ring->init_context(ring, to);
770 			if (ret)
771 				DRM_ERROR("ring init context: %d\n", ret);
772 		}
773 	}
774 
775 	return 0;
776 
777 unpin_out:
778 	if (ring->id == RCS)
779 		i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
780 	return ret;
781 }
782 
783 /**
784  * i915_switch_context() - perform a GPU context switch.
785  * @ring: ring for which we'll execute the context switch
786  * @to: the context to switch to
787  *
788  * The context life cycle is simple. The context refcount is incremented and
789  * decremented by 1 and create and destroy. If the context is in use by the GPU,
790  * it will have a refcount > 1. This allows us to destroy the context abstract
791  * object while letting the normal object tracking destroy the backing BO.
792  *
793  * This function should not be used in execlists mode.  Instead the context is
794  * switched by writing to the ELSP and requests keep a reference to their
795  * context.
796  */
797 int i915_switch_context(struct intel_engine_cs *ring,
798 			struct intel_context *to)
799 {
800 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
801 
802 	WARN_ON(i915.enable_execlists);
803 	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
804 
805 	if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
806 		if (to != ring->last_context) {
807 			i915_gem_context_reference(to);
808 			if (ring->last_context)
809 				i915_gem_context_unreference(ring->last_context);
810 			ring->last_context = to;
811 		}
812 		return 0;
813 	}
814 
815 	return do_switch(ring, to);
816 }
817 
818 static bool contexts_enabled(struct drm_device *dev)
819 {
820 	return i915.enable_execlists || to_i915(dev)->hw_context_size;
821 }
822 
823 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
824 				  struct drm_file *file)
825 {
826 	struct drm_i915_gem_context_create *args = data;
827 	struct drm_i915_file_private *file_priv = file->driver_priv;
828 	struct intel_context *ctx;
829 	int ret;
830 
831 	if (!contexts_enabled(dev))
832 		return -ENODEV;
833 
834 	ret = i915_mutex_lock_interruptible(dev);
835 	if (ret)
836 		return ret;
837 
838 	ctx = i915_gem_create_context(dev, file_priv);
839 	mutex_unlock(&dev->struct_mutex);
840 	if (IS_ERR(ctx))
841 		return PTR_ERR(ctx);
842 
843 	args->ctx_id = ctx->user_handle;
844 	DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);
845 
846 	return 0;
847 }
848 
849 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
850 				   struct drm_file *file)
851 {
852 	struct drm_i915_gem_context_destroy *args = data;
853 	struct drm_i915_file_private *file_priv = file->driver_priv;
854 	struct intel_context *ctx;
855 	int ret;
856 
857 	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
858 		return -ENOENT;
859 
860 	ret = i915_mutex_lock_interruptible(dev);
861 	if (ret)
862 		return ret;
863 
864 	ctx = i915_gem_context_get(file_priv, args->ctx_id);
865 	if (IS_ERR(ctx)) {
866 		mutex_unlock(&dev->struct_mutex);
867 		return PTR_ERR(ctx);
868 	}
869 
870 	idr_remove(&ctx->file_priv->context_idr, ctx->user_handle);
871 	i915_gem_context_unreference(ctx);
872 	mutex_unlock(&dev->struct_mutex);
873 
874 	DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
875 	return 0;
876 }
877 
878 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
879 				    struct drm_file *file)
880 {
881 	struct drm_i915_file_private *file_priv = file->driver_priv;
882 	struct drm_i915_gem_context_param *args = data;
883 	struct intel_context *ctx;
884 	int ret;
885 
886 	ret = i915_mutex_lock_interruptible(dev);
887 	if (ret)
888 		return ret;
889 
890 	ctx = i915_gem_context_get(file_priv, args->ctx_id);
891 	if (IS_ERR(ctx)) {
892 		mutex_unlock(&dev->struct_mutex);
893 		return PTR_ERR(ctx);
894 	}
895 
896 	args->size = 0;
897 	switch (args->param) {
898 	case I915_CONTEXT_PARAM_BAN_PERIOD:
899 		args->value = ctx->hang_stats.ban_period_seconds;
900 		break;
901 	default:
902 		ret = -EINVAL;
903 		break;
904 	}
905 	mutex_unlock(&dev->struct_mutex);
906 
907 	return ret;
908 }
909 
910 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
911 				    struct drm_file *file)
912 {
913 	struct drm_i915_file_private *file_priv = file->driver_priv;
914 	struct drm_i915_gem_context_param *args = data;
915 	struct intel_context *ctx;
916 	int ret;
917 
918 	ret = i915_mutex_lock_interruptible(dev);
919 	if (ret)
920 		return ret;
921 
922 	ctx = i915_gem_context_get(file_priv, args->ctx_id);
923 	if (IS_ERR(ctx)) {
924 		mutex_unlock(&dev->struct_mutex);
925 		return PTR_ERR(ctx);
926 	}
927 
928 	switch (args->param) {
929 	case I915_CONTEXT_PARAM_BAN_PERIOD:
930 		if (args->size)
931 			ret = -EINVAL;
932 		else if (args->value < ctx->hang_stats.ban_period_seconds &&
933 			 !capable(CAP_SYS_ADMIN))
934 			ret = -EPERM;
935 		else
936 			ctx->hang_stats.ban_period_seconds = args->value;
937 		break;
938 	default:
939 		ret = -EINVAL;
940 		break;
941 	}
942 	mutex_unlock(&dev->struct_mutex);
943 
944 	return ret;
945 }
946