xref: /openbsd/sys/dev/pci/drm/i915/i915_gem_gtt.c (revision 771fbea0)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2010 Daniel Vetter
4  * Copyright © 2020 Intel Corporation
5  */
6 
7 #include <linux/slab.h> /* fault-inject.h is not standalone! */
8 
9 #include <linux/fault-inject.h>
10 #include <linux/log2.h>
11 #include <linux/random.h>
12 #include <linux/seq_file.h>
13 #include <linux/stop_machine.h>
14 
15 #include <asm/set_memory.h>
16 #include <asm/smp.h>
17 
18 #include "display/intel_frontbuffer.h"
19 #include "gt/intel_gt.h"
20 #include "gt/intel_gt_requests.h"
21 
22 #include "i915_drv.h"
23 #include "i915_scatterlist.h"
24 #include "i915_trace.h"
25 #include "i915_vgpu.h"
26 
27 int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
28 			       struct sg_table *pages)
29 {
30 #ifdef __linux__
31 	do {
32 		if (dma_map_sg_attrs(&obj->base.dev->pdev->dev,
33 				     pages->sgl, pages->nents,
34 				     PCI_DMA_BIDIRECTIONAL,
35 				     DMA_ATTR_NO_WARN))
36 			return 0;
37 
38 		/*
39 		 * If the DMA remap fails, one cause can be that we have
40 		 * too many objects pinned in a small remapping table,
41 		 * such as swiotlb. Incrementally purge all other objects and
42 		 * try again - if there are no more pages to remove from
43 		 * the DMA remapper, i915_gem_shrink will return 0.
44 		 */
45 		GEM_BUG_ON(obj->mm.pages == pages);
46 	} while (i915_gem_shrink(to_i915(obj->base.dev),
47 				 obj->base.size >> PAGE_SHIFT, NULL,
48 				 I915_SHRINK_BOUND |
49 				 I915_SHRINK_UNBOUND));
50 
51 	return -ENOSPC;
52 #else
53 	return 0;
54 #endif
55 }
56 
57 void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
58 			       struct sg_table *pages)
59 {
60 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
61 #ifdef notyet
62 	struct device *kdev = &dev_priv->drm.pdev->dev;
63 #endif
64 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
65 
66 	if (unlikely(ggtt->do_idle_maps)) {
67 		/* XXX This does not prevent more requests being submitted! */
68 		if (intel_gt_retire_requests_timeout(ggtt->vm.gt,
69 						     -MAX_SCHEDULE_TIMEOUT)) {
70 			drm_err(&dev_priv->drm,
71 				"Failed to wait for idle; VT'd may hang.\n");
72 			/* Wait a bit, in hopes it avoids the hang */
73 			udelay(10);
74 		}
75 	}
76 
77 #ifdef notyet
78 	dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
79 #endif
80 }
81 
82 /**
83  * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
84  * @vm: the &struct i915_address_space
85  * @node: the &struct drm_mm_node (typically i915_vma.mode)
86  * @size: how much space to allocate inside the GTT,
87  *        must be #I915_GTT_PAGE_SIZE aligned
88  * @offset: where to insert inside the GTT,
89  *          must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
90  *          (@offset + @size) must fit within the address space
91  * @color: color to apply to node, if this node is not from a VMA,
92  *         color must be #I915_COLOR_UNEVICTABLE
93  * @flags: control search and eviction behaviour
94  *
95  * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
96  * the address space (using @size and @color). If the @node does not fit, it
97  * tries to evict any overlapping nodes from the GTT, including any
98  * neighbouring nodes if the colors do not match (to ensure guard pages between
99  * differing domains). See i915_gem_evict_for_node() for the gory details
100  * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
101  * evicting active overlapping objects, and any overlapping node that is pinned
102  * or marked as unevictable will also result in failure.
103  *
104  * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
105  * asked to wait for eviction and interrupted.
106  */
107 int i915_gem_gtt_reserve(struct i915_address_space *vm,
108 			 struct drm_mm_node *node,
109 			 u64 size, u64 offset, unsigned long color,
110 			 unsigned int flags)
111 {
112 	int err;
113 
114 	GEM_BUG_ON(!size);
115 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
116 	GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
117 	GEM_BUG_ON(range_overflows(offset, size, vm->total));
118 	GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
119 	GEM_BUG_ON(drm_mm_node_allocated(node));
120 
121 	node->size = size;
122 	node->start = offset;
123 	node->color = color;
124 
125 	err = drm_mm_reserve_node(&vm->mm, node);
126 	if (err != -ENOSPC)
127 		return err;
128 
129 	if (flags & PIN_NOEVICT)
130 		return -ENOSPC;
131 
132 	err = i915_gem_evict_for_node(vm, node, flags);
133 	if (err == 0)
134 		err = drm_mm_reserve_node(&vm->mm, node);
135 
136 	return err;
137 }
138 
139 static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
140 {
141 	u64 range, addr;
142 
143 	GEM_BUG_ON(range_overflows(start, len, end));
144 	GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
145 
146 	range = round_down(end - len, align) - round_up(start, align);
147 	if (range) {
148 		if (sizeof(unsigned long) == sizeof(u64)) {
149 			addr = get_random_long();
150 		} else {
151 			addr = get_random_int();
152 			if (range > U32_MAX) {
153 				addr <<= 32;
154 				addr |= get_random_int();
155 			}
156 		}
157 		div64_u64_rem(addr, range, &addr);
158 		start += addr;
159 	}
160 
161 	return round_up(start, align);
162 }
163 
164 /**
165  * i915_gem_gtt_insert - insert a node into an address_space (GTT)
166  * @vm: the &struct i915_address_space
167  * @node: the &struct drm_mm_node (typically i915_vma.node)
168  * @size: how much space to allocate inside the GTT,
169  *        must be #I915_GTT_PAGE_SIZE aligned
170  * @alignment: required alignment of starting offset, may be 0 but
171  *             if specified, this must be a power-of-two and at least
172  *             #I915_GTT_MIN_ALIGNMENT
173  * @color: color to apply to node
174  * @start: start of any range restriction inside GTT (0 for all),
175  *         must be #I915_GTT_PAGE_SIZE aligned
176  * @end: end of any range restriction inside GTT (U64_MAX for all),
177  *       must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
178  * @flags: control search and eviction behaviour
179  *
180  * i915_gem_gtt_insert() first searches for an available hole into which
181  * is can insert the node. The hole address is aligned to @alignment and
182  * its @size must then fit entirely within the [@start, @end] bounds. The
183  * nodes on either side of the hole must match @color, or else a guard page
184  * will be inserted between the two nodes (or the node evicted). If no
185  * suitable hole is found, first a victim is randomly selected and tested
186  * for eviction, otherwise then the LRU list of objects within the GTT
187  * is scanned to find the first set of replacement nodes to create the hole.
188  * Those old overlapping nodes are evicted from the GTT (and so must be
189  * rebound before any future use). Any node that is currently pinned cannot
190  * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
191  * active and #PIN_NONBLOCK is specified, that node is also skipped when
192  * searching for an eviction candidate. See i915_gem_evict_something() for
193  * the gory details on the eviction algorithm.
194  *
195  * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
196  * asked to wait for eviction and interrupted.
197  */
198 int i915_gem_gtt_insert(struct i915_address_space *vm,
199 			struct drm_mm_node *node,
200 			u64 size, u64 alignment, unsigned long color,
201 			u64 start, u64 end, unsigned int flags)
202 {
203 	enum drm_mm_insert_mode mode;
204 	u64 offset;
205 	int err;
206 
207 	lockdep_assert_held(&vm->mutex);
208 
209 	GEM_BUG_ON(!size);
210 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
211 	GEM_BUG_ON(alignment && !is_power_of_2(alignment));
212 	GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
213 	GEM_BUG_ON(start >= end);
214 	GEM_BUG_ON(start > 0  && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
215 	GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
216 	GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
217 	GEM_BUG_ON(drm_mm_node_allocated(node));
218 
219 	if (unlikely(range_overflows(start, size, end)))
220 		return -ENOSPC;
221 
222 	if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
223 		return -ENOSPC;
224 
225 	mode = DRM_MM_INSERT_BEST;
226 	if (flags & PIN_HIGH)
227 		mode = DRM_MM_INSERT_HIGHEST;
228 	if (flags & PIN_MAPPABLE)
229 		mode = DRM_MM_INSERT_LOW;
230 
231 	/* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
232 	 * so we know that we always have a minimum alignment of 4096.
233 	 * The drm_mm range manager is optimised to return results
234 	 * with zero alignment, so where possible use the optimal
235 	 * path.
236 	 */
237 	BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
238 	if (alignment <= I915_GTT_MIN_ALIGNMENT)
239 		alignment = 0;
240 
241 	err = drm_mm_insert_node_in_range(&vm->mm, node,
242 					  size, alignment, color,
243 					  start, end, mode);
244 	if (err != -ENOSPC)
245 		return err;
246 
247 	if (mode & DRM_MM_INSERT_ONCE) {
248 		err = drm_mm_insert_node_in_range(&vm->mm, node,
249 						  size, alignment, color,
250 						  start, end,
251 						  DRM_MM_INSERT_BEST);
252 		if (err != -ENOSPC)
253 			return err;
254 	}
255 
256 	if (flags & PIN_NOEVICT)
257 		return -ENOSPC;
258 
259 	/*
260 	 * No free space, pick a slot at random.
261 	 *
262 	 * There is a pathological case here using a GTT shared between
263 	 * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
264 	 *
265 	 *    |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
266 	 *         (64k objects)             (448k objects)
267 	 *
268 	 * Now imagine that the eviction LRU is ordered top-down (just because
269 	 * pathology meets real life), and that we need to evict an object to
270 	 * make room inside the aperture. The eviction scan then has to walk
271 	 * the 448k list before it finds one within range. And now imagine that
272 	 * it has to search for a new hole between every byte inside the memcpy,
273 	 * for several simultaneous clients.
274 	 *
275 	 * On a full-ppgtt system, if we have run out of available space, there
276 	 * will be lots and lots of objects in the eviction list! Again,
277 	 * searching that LRU list may be slow if we are also applying any
278 	 * range restrictions (e.g. restriction to low 4GiB) and so, for
279 	 * simplicity and similarilty between different GTT, try the single
280 	 * random replacement first.
281 	 */
282 	offset = random_offset(start, end,
283 			       size, alignment ?: I915_GTT_MIN_ALIGNMENT);
284 	err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
285 	if (err != -ENOSPC)
286 		return err;
287 
288 	if (flags & PIN_NOSEARCH)
289 		return -ENOSPC;
290 
291 	/* Randomly selected placement is pinned, do a search */
292 	err = i915_gem_evict_something(vm, size, alignment, color,
293 				       start, end, flags);
294 	if (err)
295 		return err;
296 
297 	return drm_mm_insert_node_in_range(&vm->mm, node,
298 					   size, alignment, color,
299 					   start, end, DRM_MM_INSERT_EVICT);
300 }
301 
302 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
303 #include "selftests/i915_gem_gtt.c"
304 #endif
305