xref: /dragonfly/sys/dev/drm/i915/i915_gem_evict.c (revision 3f2dd94a)
1e3adcf8fSFrançois Tigeot /*
2e3adcf8fSFrançois Tigeot  * Copyright © 2008-2010 Intel Corporation
3e3adcf8fSFrançois Tigeot  *
4e3adcf8fSFrançois Tigeot  * Permission is hereby granted, free of charge, to any person obtaining a
5e3adcf8fSFrançois Tigeot  * copy of this software and associated documentation files (the "Software"),
6e3adcf8fSFrançois Tigeot  * to deal in the Software without restriction, including without limitation
7e3adcf8fSFrançois Tigeot  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8e3adcf8fSFrançois Tigeot  * and/or sell copies of the Software, and to permit persons to whom the
9e3adcf8fSFrançois Tigeot  * Software is furnished to do so, subject to the following conditions:
10e3adcf8fSFrançois Tigeot  *
11e3adcf8fSFrançois Tigeot  * The above copyright notice and this permission notice (including the next
12e3adcf8fSFrançois Tigeot  * paragraph) shall be included in all copies or substantial portions of the
13e3adcf8fSFrançois Tigeot  * Software.
14e3adcf8fSFrançois Tigeot  *
15e3adcf8fSFrançois Tigeot  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16e3adcf8fSFrançois Tigeot  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17e3adcf8fSFrançois Tigeot  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18e3adcf8fSFrançois Tigeot  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19e3adcf8fSFrançois Tigeot  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20e3adcf8fSFrançois Tigeot  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21e3adcf8fSFrançois Tigeot  * IN THE SOFTWARE.
22e3adcf8fSFrançois Tigeot  *
23e3adcf8fSFrançois Tigeot  * Authors:
24e3adcf8fSFrançois Tigeot  *    Eric Anholt <eric@anholt.net>
25e3adcf8fSFrançois Tigeot  *    Chris Wilson <chris@chris-wilson.co.uuk>
26e3adcf8fSFrançois Tigeot  *
27e3adcf8fSFrançois Tigeot  */
28e3adcf8fSFrançois Tigeot 
2918e26a6dSFrançois Tigeot #include <drm/drmP.h>
30a2fdbec6SFrançois Tigeot #include <drm/i915_drm.h>
319edbd4a0SFrançois Tigeot 
329edbd4a0SFrançois Tigeot #include "i915_drv.h"
339edbd4a0SFrançois Tigeot #include "intel_drv.h"
34a2fdbec6SFrançois Tigeot #include "i915_trace.h"
35e3adcf8fSFrançois Tigeot 
I915_SELFTEST_DECLARE(static struct igt_evict_ctl{ bool fail_if_busy:1; } igt_evict_ctl;)36*3f2dd94aSFrançois Tigeot I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
37*3f2dd94aSFrançois Tigeot 	bool fail_if_busy:1;
38*3f2dd94aSFrançois Tigeot } igt_evict_ctl;)
39*3f2dd94aSFrançois Tigeot 
40*3f2dd94aSFrançois Tigeot static bool ggtt_is_idle(struct drm_i915_private *i915)
4171f41f3eSFrançois Tigeot {
4271f41f3eSFrançois Tigeot        struct intel_engine_cs *engine;
431e12ee3bSFrançois Tigeot        enum intel_engine_id id;
4471f41f3eSFrançois Tigeot 
45*3f2dd94aSFrançois Tigeot        if (i915->gt.active_requests)
46*3f2dd94aSFrançois Tigeot 	       return false;
474be47400SFrançois Tigeot 
48*3f2dd94aSFrançois Tigeot        for_each_engine(engine, i915, id) {
49*3f2dd94aSFrançois Tigeot 	       if (engine->last_retired_context != i915->kernel_context)
5071f41f3eSFrançois Tigeot 		       return false;
5171f41f3eSFrançois Tigeot        }
5271f41f3eSFrançois Tigeot 
5371f41f3eSFrançois Tigeot        return true;
5471f41f3eSFrançois Tigeot }
5571f41f3eSFrançois Tigeot 
ggtt_flush(struct drm_i915_private * i915)56*3f2dd94aSFrançois Tigeot static int ggtt_flush(struct drm_i915_private *i915)
57*3f2dd94aSFrançois Tigeot {
58*3f2dd94aSFrançois Tigeot 	int err;
59*3f2dd94aSFrançois Tigeot 
60*3f2dd94aSFrançois Tigeot 	/* Not everything in the GGTT is tracked via vma (otherwise we
61*3f2dd94aSFrançois Tigeot 	 * could evict as required with minimal stalling) so we are forced
62*3f2dd94aSFrançois Tigeot 	 * to idle the GPU and explicitly retire outstanding requests in
63*3f2dd94aSFrançois Tigeot 	 * the hopes that we can then remove contexts and the like only
64*3f2dd94aSFrançois Tigeot 	 * bound by their active reference.
65*3f2dd94aSFrançois Tigeot 	 */
66*3f2dd94aSFrançois Tigeot 	err = i915_gem_switch_to_kernel_context(i915);
67*3f2dd94aSFrançois Tigeot 	if (err)
68*3f2dd94aSFrançois Tigeot 		return err;
69*3f2dd94aSFrançois Tigeot 
70*3f2dd94aSFrançois Tigeot 	err = i915_gem_wait_for_idle(i915,
71*3f2dd94aSFrançois Tigeot 				     I915_WAIT_INTERRUPTIBLE |
72*3f2dd94aSFrançois Tigeot 				     I915_WAIT_LOCKED);
73*3f2dd94aSFrançois Tigeot 	if (err)
74*3f2dd94aSFrançois Tigeot 		return err;
75*3f2dd94aSFrançois Tigeot 
76*3f2dd94aSFrançois Tigeot 	return 0;
77*3f2dd94aSFrançois Tigeot }
78*3f2dd94aSFrançois Tigeot 
7971f41f3eSFrançois Tigeot static bool
mark_free(struct drm_mm_scan * scan,struct i915_vma * vma,unsigned int flags,struct list_head * unwind)80a85cb24fSFrançois Tigeot mark_free(struct drm_mm_scan *scan,
81a85cb24fSFrançois Tigeot 	  struct i915_vma *vma,
82a85cb24fSFrançois Tigeot 	  unsigned int flags,
83a85cb24fSFrançois Tigeot 	  struct list_head *unwind)
84e3adcf8fSFrançois Tigeot {
8571f41f3eSFrançois Tigeot 	if (i915_vma_is_pinned(vma))
86e9243325SFrançois Tigeot 		return false;
87e9243325SFrançois Tigeot 
88*3f2dd94aSFrançois Tigeot 	if (flags & PIN_NONFAULT && i915_vma_has_userfault(vma))
899edbd4a0SFrançois Tigeot 		return false;
909edbd4a0SFrançois Tigeot 
91*3f2dd94aSFrançois Tigeot 	list_add(&vma->evict_link, unwind);
92a85cb24fSFrançois Tigeot 	return drm_mm_scan_add_block(scan, &vma->node);
93e3adcf8fSFrançois Tigeot }
94e3adcf8fSFrançois Tigeot 
95ba55f2f5SFrançois Tigeot /**
96ba55f2f5SFrançois Tigeot  * i915_gem_evict_something - Evict vmas to make room for binding a new one
97ba55f2f5SFrançois Tigeot  * @vm: address space to evict from
982c9916cdSFrançois Tigeot  * @min_size: size of the desired free space
99ba55f2f5SFrançois Tigeot  * @alignment: alignment constraint of the desired free space
100ba55f2f5SFrançois Tigeot  * @cache_level: cache_level for the desired space
1012c9916cdSFrançois Tigeot  * @start: start (inclusive) of the range from which to evict objects
1022c9916cdSFrançois Tigeot  * @end: end (exclusive) of the range from which to evict objects
1032c9916cdSFrançois Tigeot  * @flags: additional flags to control the eviction algorithm
104ba55f2f5SFrançois Tigeot  *
105ba55f2f5SFrançois Tigeot  * This function will try to evict vmas until a free space satisfying the
106ba55f2f5SFrançois Tigeot  * requirements is found. Callers must check first whether any such hole exists
107ba55f2f5SFrançois Tigeot  * already before calling this function.
108ba55f2f5SFrançois Tigeot  *
109ba55f2f5SFrançois Tigeot  * This function is used by the object/vma binding code.
110ba55f2f5SFrançois Tigeot  *
111477eb7f9SFrançois Tigeot  * Since this function is only used to free up virtual address space it only
112477eb7f9SFrançois Tigeot  * ignores pinned vmas, and not object where the backing storage itself is
113477eb7f9SFrançois Tigeot  * pinned. Hence obj->pages_pin_count does not protect against eviction.
114477eb7f9SFrançois Tigeot  *
115ba55f2f5SFrançois Tigeot  * To clarify: This is for freeing up virtual address space, not for freeing
116ba55f2f5SFrançois Tigeot  * memory in e.g. the shrinker.
117ba55f2f5SFrançois Tigeot  */
118e3adcf8fSFrançois Tigeot int
i915_gem_evict_something(struct i915_address_space * vm,u64 min_size,u64 alignment,unsigned cache_level,u64 start,u64 end,unsigned flags)11971f41f3eSFrançois Tigeot i915_gem_evict_something(struct i915_address_space *vm,
12071f41f3eSFrançois Tigeot 			 u64 min_size, u64 alignment,
12171f41f3eSFrançois Tigeot 			 unsigned cache_level,
12271f41f3eSFrançois Tigeot 			 u64 start, u64 end,
123ba55f2f5SFrançois Tigeot 			 unsigned flags)
124e3adcf8fSFrançois Tigeot {
125a85cb24fSFrançois Tigeot 	struct drm_i915_private *dev_priv = vm->i915;
126a85cb24fSFrançois Tigeot 	struct drm_mm_scan scan;
12771f41f3eSFrançois Tigeot 	struct list_head eviction_list;
12871f41f3eSFrançois Tigeot 	struct list_head *phases[] = {
12971f41f3eSFrançois Tigeot 		&vm->inactive_list,
13071f41f3eSFrançois Tigeot 		&vm->active_list,
13171f41f3eSFrançois Tigeot 		NULL,
13271f41f3eSFrançois Tigeot 	}, **phase;
13371f41f3eSFrançois Tigeot 	struct i915_vma *vma, *next;
134a85cb24fSFrançois Tigeot 	struct drm_mm_node *node;
135a85cb24fSFrançois Tigeot 	enum drm_mm_insert_mode mode;
13671f41f3eSFrançois Tigeot 	int ret;
137e3adcf8fSFrançois Tigeot 
138a85cb24fSFrançois Tigeot 	lockdep_assert_held(&vm->i915->drm.struct_mutex);
13971f41f3eSFrançois Tigeot 	trace_i915_gem_evict(vm, min_size, alignment, flags);
140a2fdbec6SFrançois Tigeot 
141e3adcf8fSFrançois Tigeot 	/*
142e3adcf8fSFrançois Tigeot 	 * The goal is to evict objects and amalgamate space in LRU order.
143e3adcf8fSFrançois Tigeot 	 * The oldest idle objects reside on the inactive list, which is in
14471f41f3eSFrançois Tigeot 	 * retirement order. The next objects to retire are those in flight,
14571f41f3eSFrançois Tigeot 	 * on the active list, again in retirement order.
146e3adcf8fSFrançois Tigeot 	 *
147e3adcf8fSFrançois Tigeot 	 * The retirement sequence is thus:
148e3adcf8fSFrançois Tigeot 	 *   1. Inactive objects (already retired)
14971f41f3eSFrançois Tigeot 	 *   2. Active objects (will stall on unbinding)
150e3adcf8fSFrançois Tigeot 	 *
151e3adcf8fSFrançois Tigeot 	 * On each list, the oldest objects lie at the HEAD with the freshest
152e3adcf8fSFrançois Tigeot 	 * object on the TAIL.
153e3adcf8fSFrançois Tigeot 	 */
154a85cb24fSFrançois Tigeot 	kprintf("i915_gem_evict_something: %016llx-%016llx\n", start, end);
155a85cb24fSFrançois Tigeot 	mode = DRM_MM_INSERT_BEST;
156a85cb24fSFrançois Tigeot 	if (flags & PIN_HIGH)
157a85cb24fSFrançois Tigeot 		mode = DRM_MM_INSERT_HIGH;
158a85cb24fSFrançois Tigeot 	if (flags & PIN_MAPPABLE)
159a85cb24fSFrançois Tigeot 		mode = DRM_MM_INSERT_LOW;
160a85cb24fSFrançois Tigeot 	drm_mm_scan_init_with_range(&scan, &vm->mm,
161a85cb24fSFrançois Tigeot 				    min_size, alignment, cache_level,
162a85cb24fSFrançois Tigeot 				    start, end, mode);
163e3adcf8fSFrançois Tigeot 
164*3f2dd94aSFrançois Tigeot 	/*
165*3f2dd94aSFrançois Tigeot 	 * Retire before we search the active list. Although we have
166a85cb24fSFrançois Tigeot 	 * reasonable accuracy in our retirement lists, we may have
167a85cb24fSFrançois Tigeot 	 * a stray pin (preventing eviction) that can only be resolved by
168a85cb24fSFrançois Tigeot 	 * retiring.
169a85cb24fSFrançois Tigeot 	 */
170a85cb24fSFrançois Tigeot 	if (!(flags & PIN_NONBLOCK))
171a85cb24fSFrançois Tigeot 		i915_gem_retire_requests(dev_priv);
172a85cb24fSFrançois Tigeot 	else
17371f41f3eSFrançois Tigeot 		phases[1] = NULL;
1749f16360bSFrançois Tigeot 
17571f41f3eSFrançois Tigeot search_again:
17671f41f3eSFrançois Tigeot 	INIT_LIST_HEAD(&eviction_list);
17771f41f3eSFrançois Tigeot 	phase = phases;
17871f41f3eSFrançois Tigeot 	do {
17971f41f3eSFrançois Tigeot 		list_for_each_entry(vma, *phase, vm_link)
180a85cb24fSFrançois Tigeot 			if (mark_free(&scan, vma, flags, &eviction_list))
181e3adcf8fSFrançois Tigeot 				goto found;
18271f41f3eSFrançois Tigeot 	} while (*++phase);
183e3adcf8fSFrançois Tigeot 
184e3adcf8fSFrançois Tigeot 	/* Nothing found, clean up and bail out! */
185*3f2dd94aSFrançois Tigeot 	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
186a85cb24fSFrançois Tigeot 		ret = drm_mm_scan_remove_block(&scan, &vma->node);
187f192107fSFrançois Tigeot 		BUG_ON(ret);
188e3adcf8fSFrançois Tigeot 	}
189e3adcf8fSFrançois Tigeot 
190*3f2dd94aSFrançois Tigeot 	/*
191*3f2dd94aSFrançois Tigeot 	 * Can we unpin some objects such as idle hw contents,
19271f41f3eSFrançois Tigeot 	 * or pending flips? But since only the GGTT has global entries
19371f41f3eSFrançois Tigeot 	 * such as scanouts, rinbuffers and contexts, we can skip the
19471f41f3eSFrançois Tigeot 	 * purge when inspecting per-process local address spaces.
195e3adcf8fSFrançois Tigeot 	 */
196a85cb24fSFrançois Tigeot 	if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK) {
197e3adcf8fSFrançois Tigeot 		return -ENOSPC;
198a85cb24fSFrançois Tigeot 	}
199a85cb24fSFrançois Tigeot 	kprintf("i915_gem_evict_something: Nothing found %d,%d\n",
200a85cb24fSFrançois Tigeot 		ggtt_is_idle(dev_priv),
201a85cb24fSFrançois Tigeot 		intel_has_pending_fb_unpin(dev_priv));
202e3adcf8fSFrançois Tigeot 
203*3f2dd94aSFrançois Tigeot 	/*
204*3f2dd94aSFrançois Tigeot 	 * Not everything in the GGTT is tracked via VMA using
205*3f2dd94aSFrançois Tigeot 	 * i915_vma_move_to_active(), otherwise we could evict as required
206*3f2dd94aSFrançois Tigeot 	 * with minimal stalling. Instead we are forced to idle the GPU and
207*3f2dd94aSFrançois Tigeot 	 * explicitly retire outstanding requests which will then remove
208*3f2dd94aSFrançois Tigeot 	 * the pinning for active objects such as contexts and ring,
209*3f2dd94aSFrançois Tigeot 	 * enabling us to evict them on the next iteration.
210*3f2dd94aSFrançois Tigeot 	 *
211*3f2dd94aSFrançois Tigeot 	 * To ensure that all user contexts are evictable, we perform
212*3f2dd94aSFrançois Tigeot 	 * a switch to the perma-pinned kernel context. This all also gives
213*3f2dd94aSFrançois Tigeot 	 * us a termination condition, when the last retired context is
214*3f2dd94aSFrançois Tigeot 	 * the kernel's there is no more we can evict.
21571f41f3eSFrançois Tigeot 	 */
216*3f2dd94aSFrançois Tigeot 	if (!ggtt_is_idle(dev_priv)) {
217*3f2dd94aSFrançois Tigeot 		if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy))
218*3f2dd94aSFrançois Tigeot 			return -EBUSY;
2191487f786SFrançois Tigeot 
220*3f2dd94aSFrançois Tigeot 		ret = ggtt_flush(dev_priv);
2219edbd4a0SFrançois Tigeot 		if (ret)
2229edbd4a0SFrançois Tigeot 			return ret;
2239edbd4a0SFrançois Tigeot 
2249edbd4a0SFrançois Tigeot 		goto search_again;
225*3f2dd94aSFrançois Tigeot 	}
226*3f2dd94aSFrançois Tigeot 
227*3f2dd94aSFrançois Tigeot 	/*
228*3f2dd94aSFrançois Tigeot 	 * If we still have pending pageflip completions, drop
229*3f2dd94aSFrançois Tigeot 	 * back to userspace to give our workqueues time to
230*3f2dd94aSFrançois Tigeot 	 * acquire our locks and unpin the old scanouts.
231*3f2dd94aSFrançois Tigeot 	 */
232*3f2dd94aSFrançois Tigeot 	return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
2339edbd4a0SFrançois Tigeot 
234e3adcf8fSFrançois Tigeot found:
235e3adcf8fSFrançois Tigeot 	/* drm_mm doesn't allow any other other operations while
23671f41f3eSFrançois Tigeot 	 * scanning, therefore store to-be-evicted objects on a
23771f41f3eSFrançois Tigeot 	 * temporary list and take a reference for all before
23871f41f3eSFrançois Tigeot 	 * calling unbind (which may remove the active reference
23971f41f3eSFrançois Tigeot 	 * of any of our objects, thus corrupting the list).
24071f41f3eSFrançois Tigeot 	 */
241*3f2dd94aSFrançois Tigeot 	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
242a85cb24fSFrançois Tigeot 		if (drm_mm_scan_remove_block(&scan, &vma->node))
24371f41f3eSFrançois Tigeot 			__i915_vma_pin(vma);
24471f41f3eSFrançois Tigeot 		else
245*3f2dd94aSFrançois Tigeot 			list_del(&vma->evict_link);
246e3adcf8fSFrançois Tigeot 	}
247e3adcf8fSFrançois Tigeot 
248e3adcf8fSFrançois Tigeot 	/* Unbinding will emit any required flushes */
2494be47400SFrançois Tigeot 	ret = 0;
250*3f2dd94aSFrançois Tigeot 	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
251a85cb24fSFrançois Tigeot 		__i915_vma_unpin(vma);
252a85cb24fSFrançois Tigeot 		if (ret == 0)
253a85cb24fSFrançois Tigeot 			ret = i915_vma_unbind(vma);
254a85cb24fSFrançois Tigeot 	}
255a85cb24fSFrançois Tigeot 
256a85cb24fSFrançois Tigeot 	while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) {
257a85cb24fSFrançois Tigeot 		vma = container_of(node, struct i915_vma, node);
258a85cb24fSFrançois Tigeot 		ret = i915_vma_unbind(vma);
259a85cb24fSFrançois Tigeot 	}
260a85cb24fSFrançois Tigeot 
261a85cb24fSFrançois Tigeot 	return ret;
262a85cb24fSFrançois Tigeot }
263a85cb24fSFrançois Tigeot 
264a85cb24fSFrançois Tigeot /**
265a85cb24fSFrançois Tigeot  * i915_gem_evict_for_vma - Evict vmas to make room for binding a new one
266a85cb24fSFrançois Tigeot  * @vm: address space to evict from
267a85cb24fSFrançois Tigeot  * @target: range (and color) to evict for
268a85cb24fSFrançois Tigeot  * @flags: additional flags to control the eviction algorithm
269a85cb24fSFrançois Tigeot  *
270a85cb24fSFrançois Tigeot  * This function will try to evict vmas that overlap the target node.
271a85cb24fSFrançois Tigeot  *
272a85cb24fSFrançois Tigeot  * To clarify: This is for freeing up virtual address space, not for freeing
273a85cb24fSFrançois Tigeot  * memory in e.g. the shrinker.
274a85cb24fSFrançois Tigeot  */
i915_gem_evict_for_node(struct i915_address_space * vm,struct drm_mm_node * target,unsigned int flags)275a85cb24fSFrançois Tigeot int i915_gem_evict_for_node(struct i915_address_space *vm,
276a85cb24fSFrançois Tigeot 			    struct drm_mm_node *target,
277a85cb24fSFrançois Tigeot 			    unsigned int flags)
278a85cb24fSFrançois Tigeot {
279a85cb24fSFrançois Tigeot 	LINUX_LIST_HEAD(eviction_list);
280a85cb24fSFrançois Tigeot 	struct drm_mm_node *node;
281a85cb24fSFrançois Tigeot 	u64 start = target->start;
282a85cb24fSFrançois Tigeot 	u64 end = start + target->size;
283a85cb24fSFrançois Tigeot 	struct i915_vma *vma, *next;
284a85cb24fSFrançois Tigeot 	bool check_color;
285a85cb24fSFrançois Tigeot 	int ret = 0;
286a85cb24fSFrançois Tigeot 
287a85cb24fSFrançois Tigeot 	lockdep_assert_held(&vm->i915->drm.struct_mutex);
288a85cb24fSFrançois Tigeot 	GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
289a85cb24fSFrançois Tigeot 	GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
290a85cb24fSFrançois Tigeot 
291a85cb24fSFrançois Tigeot 	trace_i915_gem_evict_node(vm, target, flags);
292a85cb24fSFrançois Tigeot 
293a85cb24fSFrançois Tigeot 	/* Retire before we search the active list. Although we have
294a85cb24fSFrançois Tigeot 	 * reasonable accuracy in our retirement lists, we may have
295a85cb24fSFrançois Tigeot 	 * a stray pin (preventing eviction) that can only be resolved by
296a85cb24fSFrançois Tigeot 	 * retiring.
297a85cb24fSFrançois Tigeot 	 */
298a85cb24fSFrançois Tigeot 	if (!(flags & PIN_NONBLOCK))
299a85cb24fSFrançois Tigeot 		i915_gem_retire_requests(vm->i915);
300a85cb24fSFrançois Tigeot 
301a85cb24fSFrançois Tigeot 	check_color = vm->mm.color_adjust;
302a85cb24fSFrançois Tigeot 	if (check_color) {
303a85cb24fSFrançois Tigeot 		/* Expand search to cover neighbouring guard pages (or lack!) */
304a85cb24fSFrançois Tigeot 		if (start)
305a85cb24fSFrançois Tigeot 			start -= I915_GTT_PAGE_SIZE;
306a85cb24fSFrançois Tigeot 
307a85cb24fSFrançois Tigeot 		/* Always look at the page afterwards to avoid the end-of-GTT */
308a85cb24fSFrançois Tigeot 		end += I915_GTT_PAGE_SIZE;
309a85cb24fSFrançois Tigeot 	}
310a85cb24fSFrançois Tigeot 	GEM_BUG_ON(start >= end);
311a85cb24fSFrançois Tigeot 
312a85cb24fSFrançois Tigeot 	drm_mm_for_each_node_in_range(node, &vm->mm, start, end) {
313a85cb24fSFrançois Tigeot 		/* If we find any non-objects (!vma), we cannot evict them */
314a85cb24fSFrançois Tigeot 		if (node->color == I915_COLOR_UNEVICTABLE) {
315a85cb24fSFrançois Tigeot 			ret = -ENOSPC;
316a85cb24fSFrançois Tigeot 			break;
317a85cb24fSFrançois Tigeot 		}
318a85cb24fSFrançois Tigeot 
319a85cb24fSFrançois Tigeot 		GEM_BUG_ON(!node->allocated);
320a85cb24fSFrançois Tigeot 		vma = container_of(node, typeof(*vma), node);
321a85cb24fSFrançois Tigeot 
322a85cb24fSFrançois Tigeot 		/* If we are using coloring to insert guard pages between
323a85cb24fSFrançois Tigeot 		 * different cache domains within the address space, we have
324a85cb24fSFrançois Tigeot 		 * to check whether the objects on either side of our range
325a85cb24fSFrançois Tigeot 		 * abutt and conflict. If they are in conflict, then we evict
326a85cb24fSFrançois Tigeot 		 * those as well to make room for our guard pages.
327a85cb24fSFrançois Tigeot 		 */
328a85cb24fSFrançois Tigeot 		if (check_color) {
329a85cb24fSFrançois Tigeot 			if (node->start + node->size == target->start) {
330a85cb24fSFrançois Tigeot 				if (node->color == target->color)
331a85cb24fSFrançois Tigeot 					continue;
332a85cb24fSFrançois Tigeot 			}
333a85cb24fSFrançois Tigeot 			if (node->start == target->start + target->size) {
334a85cb24fSFrançois Tigeot 				if (node->color == target->color)
335a85cb24fSFrançois Tigeot 					continue;
336a85cb24fSFrançois Tigeot 			}
337a85cb24fSFrançois Tigeot 		}
338a85cb24fSFrançois Tigeot 
339a85cb24fSFrançois Tigeot 		if (flags & PIN_NONBLOCK &&
340a85cb24fSFrançois Tigeot 		    (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))) {
341a85cb24fSFrançois Tigeot 			ret = -ENOSPC;
342a85cb24fSFrançois Tigeot 			break;
343a85cb24fSFrançois Tigeot 		}
344a85cb24fSFrançois Tigeot 
345*3f2dd94aSFrançois Tigeot 		if (flags & PIN_NONFAULT && i915_vma_has_userfault(vma)) {
346a85cb24fSFrançois Tigeot 			ret = -ENOSPC;
347*3f2dd94aSFrançois Tigeot 			break;
348*3f2dd94aSFrançois Tigeot 		}
349*3f2dd94aSFrançois Tigeot 
350*3f2dd94aSFrançois Tigeot 		/* Overlap of objects in the same batch? */
351*3f2dd94aSFrançois Tigeot 		if (i915_vma_is_pinned(vma)) {
352*3f2dd94aSFrançois Tigeot 			ret = -ENOSPC;
353*3f2dd94aSFrançois Tigeot 			if (vma->exec_flags &&
354*3f2dd94aSFrançois Tigeot 			    *vma->exec_flags & EXEC_OBJECT_PINNED)
355a85cb24fSFrançois Tigeot 				ret = -EINVAL;
356a85cb24fSFrançois Tigeot 			break;
357a85cb24fSFrançois Tigeot 		}
358a85cb24fSFrançois Tigeot 
359a85cb24fSFrançois Tigeot 		/* Never show fear in the face of dragons!
360a85cb24fSFrançois Tigeot 		 *
361a85cb24fSFrançois Tigeot 		 * We cannot directly remove this node from within this
362a85cb24fSFrançois Tigeot 		 * iterator and as with i915_gem_evict_something() we employ
363a85cb24fSFrançois Tigeot 		 * the vma pin_count in order to prevent the action of
364a85cb24fSFrançois Tigeot 		 * unbinding one vma from freeing (by dropping its active
365a85cb24fSFrançois Tigeot 		 * reference) another in our eviction list.
366a85cb24fSFrançois Tigeot 		 */
367a85cb24fSFrançois Tigeot 		__i915_vma_pin(vma);
368*3f2dd94aSFrançois Tigeot 		list_add(&vma->evict_link, &eviction_list);
369a85cb24fSFrançois Tigeot 	}
370a85cb24fSFrançois Tigeot 
371*3f2dd94aSFrançois Tigeot 	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
37271f41f3eSFrançois Tigeot 		__i915_vma_unpin(vma);
3739edbd4a0SFrançois Tigeot 		if (ret == 0)
3749edbd4a0SFrançois Tigeot 			ret = i915_vma_unbind(vma);
375e3adcf8fSFrançois Tigeot 	}
376a85cb24fSFrançois Tigeot 
377e3adcf8fSFrançois Tigeot 	return ret;
378e3adcf8fSFrançois Tigeot }
379e3adcf8fSFrançois Tigeot 
3809edbd4a0SFrançois Tigeot /**
381ba55f2f5SFrançois Tigeot  * i915_gem_evict_vm - Evict all idle vmas from a vm
382ba55f2f5SFrançois Tigeot  * @vm: Address space to cleanse
3839edbd4a0SFrançois Tigeot  *
384*3f2dd94aSFrançois Tigeot  * This function evicts all vmas from a vm.
3859edbd4a0SFrançois Tigeot  *
386ba55f2f5SFrançois Tigeot  * This is used by the execbuf code as a last-ditch effort to defragment the
387ba55f2f5SFrançois Tigeot  * address space.
388ba55f2f5SFrançois Tigeot  *
389ba55f2f5SFrançois Tigeot  * To clarify: This is for freeing up virtual address space, not for freeing
390ba55f2f5SFrançois Tigeot  * memory in e.g. the shrinker.
3919edbd4a0SFrançois Tigeot  */
i915_gem_evict_vm(struct i915_address_space * vm)392*3f2dd94aSFrançois Tigeot int i915_gem_evict_vm(struct i915_address_space *vm)
3939edbd4a0SFrançois Tigeot {
394*3f2dd94aSFrançois Tigeot 	struct list_head *phases[] = {
395*3f2dd94aSFrançois Tigeot 		&vm->inactive_list,
396*3f2dd94aSFrançois Tigeot 		&vm->active_list,
397*3f2dd94aSFrançois Tigeot 		NULL
398*3f2dd94aSFrançois Tigeot 	}, **phase;
399*3f2dd94aSFrançois Tigeot 	struct list_head eviction_list;
4009edbd4a0SFrançois Tigeot 	struct i915_vma *vma, *next;
4019edbd4a0SFrançois Tigeot 	int ret;
4029edbd4a0SFrançois Tigeot 
403a85cb24fSFrançois Tigeot 	lockdep_assert_held(&vm->i915->drm.struct_mutex);
4049edbd4a0SFrançois Tigeot 	trace_i915_gem_evict_vm(vm);
4059edbd4a0SFrançois Tigeot 
406*3f2dd94aSFrançois Tigeot 	/* Switch back to the default context in order to unpin
407*3f2dd94aSFrançois Tigeot 	 * the existing context objects. However, such objects only
408*3f2dd94aSFrançois Tigeot 	 * pin themselves inside the global GTT and performing the
409*3f2dd94aSFrançois Tigeot 	 * switch otherwise is ineffective.
410*3f2dd94aSFrançois Tigeot 	 */
4111487f786SFrançois Tigeot 	if (i915_is_ggtt(vm)) {
412*3f2dd94aSFrançois Tigeot 		ret = ggtt_flush(vm->i915);
4131487f786SFrançois Tigeot 		if (ret)
4141487f786SFrançois Tigeot 			return ret;
4151487f786SFrançois Tigeot 	}
4161487f786SFrançois Tigeot 
417*3f2dd94aSFrançois Tigeot 	INIT_LIST_HEAD(&eviction_list);
418*3f2dd94aSFrançois Tigeot 	phase = phases;
419*3f2dd94aSFrançois Tigeot 	do {
420*3f2dd94aSFrançois Tigeot 		list_for_each_entry(vma, *phase, vm_link) {
421*3f2dd94aSFrançois Tigeot 			if (i915_vma_is_pinned(vma))
422*3f2dd94aSFrançois Tigeot 				continue;
4239edbd4a0SFrançois Tigeot 
424*3f2dd94aSFrançois Tigeot 			__i915_vma_pin(vma);
425*3f2dd94aSFrançois Tigeot 			list_add(&vma->evict_link, &eviction_list);
4269edbd4a0SFrançois Tigeot 		}
427*3f2dd94aSFrançois Tigeot 	} while (*++phase);
4289edbd4a0SFrançois Tigeot 
429*3f2dd94aSFrançois Tigeot 	ret = 0;
430*3f2dd94aSFrançois Tigeot 	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
431*3f2dd94aSFrançois Tigeot 		__i915_vma_unpin(vma);
432*3f2dd94aSFrançois Tigeot 		if (ret == 0)
433*3f2dd94aSFrançois Tigeot 			ret = i915_vma_unbind(vma);
434*3f2dd94aSFrançois Tigeot 	}
435*3f2dd94aSFrançois Tigeot 	return ret;
4369edbd4a0SFrançois Tigeot }
437a85cb24fSFrançois Tigeot 
438a85cb24fSFrançois Tigeot #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
439a85cb24fSFrançois Tigeot #include "selftests/i915_gem_evict.c"
440a85cb24fSFrançois Tigeot #endif
441