1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2010 Daniel Vetter
4 * Copyright © 2020 Intel Corporation
5 */
6
7 #include <linux/slab.h> /* fault-inject.h is not standalone! */
8
9 #include <linux/fault-inject.h>
10 #include <linux/log2.h>
11 #include <linux/random.h>
12 #include <linux/seq_file.h>
13 #include <linux/stop_machine.h>
14
15 #include <asm/set_memory.h>
16 #include <asm/smp.h>
17
18 #include "display/intel_frontbuffer.h"
19 #include "gt/intel_gt.h"
20 #include "gt/intel_gt_requests.h"
21
22 #include "i915_drv.h"
23 #include "i915_gem_evict.h"
24 #include "i915_scatterlist.h"
25 #include "i915_trace.h"
26 #include "i915_vgpu.h"
27
i915_gem_gtt_prepare_pages(struct drm_i915_gem_object * obj,struct sg_table * pages)28 int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
29 struct sg_table *pages)
30 {
31 #ifdef __linux__
32 do {
33 if (dma_map_sg_attrs(obj->base.dev->dev,
34 pages->sgl, pages->nents,
35 DMA_BIDIRECTIONAL,
36 DMA_ATTR_SKIP_CPU_SYNC |
37 DMA_ATTR_NO_KERNEL_MAPPING |
38 DMA_ATTR_NO_WARN))
39 return 0;
40
41 /*
42 * If the DMA remap fails, one cause can be that we have
43 * too many objects pinned in a small remapping table,
44 * such as swiotlb. Incrementally purge all other objects and
45 * try again - if there are no more pages to remove from
46 * the DMA remapper, i915_gem_shrink will return 0.
47 */
48 GEM_BUG_ON(obj->mm.pages == pages);
49 } while (i915_gem_shrink(NULL, to_i915(obj->base.dev),
50 obj->base.size >> PAGE_SHIFT, NULL,
51 I915_SHRINK_BOUND |
52 I915_SHRINK_UNBOUND));
53
54 return -ENOSPC;
55 #else
56 return 0;
57 #endif
58 }
59
i915_gem_gtt_finish_pages(struct drm_i915_gem_object * obj,struct sg_table * pages)60 void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
61 struct sg_table *pages)
62 {
63 struct drm_i915_private *i915 = to_i915(obj->base.dev);
64 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
65
66 /* XXX This does not prevent more requests being submitted! */
67 if (unlikely(ggtt->do_idle_maps))
68 /* Wait a bit, in the hope it avoids the hang */
69 usleep_range(100, 250);
70
71 #ifdef notyet
72 dma_unmap_sg(i915->drm.dev, pages->sgl, pages->nents,
73 DMA_BIDIRECTIONAL);
74 #endif
75 }
76
77 /**
78 * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
79 * @vm: the &struct i915_address_space
80 * @ww: An optional struct i915_gem_ww_ctx.
81 * @node: the &struct drm_mm_node (typically i915_vma.mode)
82 * @size: how much space to allocate inside the GTT,
83 * must be #I915_GTT_PAGE_SIZE aligned
84 * @offset: where to insert inside the GTT,
85 * must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
86 * (@offset + @size) must fit within the address space
87 * @color: color to apply to node, if this node is not from a VMA,
88 * color must be #I915_COLOR_UNEVICTABLE
89 * @flags: control search and eviction behaviour
90 *
91 * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
92 * the address space (using @size and @color). If the @node does not fit, it
93 * tries to evict any overlapping nodes from the GTT, including any
94 * neighbouring nodes if the colors do not match (to ensure guard pages between
95 * differing domains). See i915_gem_evict_for_node() for the gory details
96 * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
97 * evicting active overlapping objects, and any overlapping node that is pinned
98 * or marked as unevictable will also result in failure.
99 *
100 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
101 * asked to wait for eviction and interrupted.
102 */
i915_gem_gtt_reserve(struct i915_address_space * vm,struct i915_gem_ww_ctx * ww,struct drm_mm_node * node,u64 size,u64 offset,unsigned long color,unsigned int flags)103 int i915_gem_gtt_reserve(struct i915_address_space *vm,
104 struct i915_gem_ww_ctx *ww,
105 struct drm_mm_node *node,
106 u64 size, u64 offset, unsigned long color,
107 unsigned int flags)
108 {
109 int err;
110
111 GEM_BUG_ON(!size);
112 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
113 GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
114 GEM_BUG_ON(range_overflows(offset, size, vm->total));
115 GEM_BUG_ON(vm == &to_gt(vm->i915)->ggtt->alias->vm);
116 GEM_BUG_ON(drm_mm_node_allocated(node));
117
118 node->size = size;
119 node->start = offset;
120 node->color = color;
121
122 err = drm_mm_reserve_node(&vm->mm, node);
123 if (err != -ENOSPC)
124 return err;
125
126 if (flags & PIN_NOEVICT)
127 return -ENOSPC;
128
129 err = i915_gem_evict_for_node(vm, ww, node, flags);
130 if (err == 0)
131 err = drm_mm_reserve_node(&vm->mm, node);
132
133 return err;
134 }
135
random_offset(u64 start,u64 end,u64 len,u64 align)136 static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
137 {
138 u64 range, addr;
139
140 GEM_BUG_ON(range_overflows(start, len, end));
141 GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
142
143 range = round_down(end - len, align) - round_up(start, align);
144 if (range) {
145 if (sizeof(unsigned long) == sizeof(u64)) {
146 addr = get_random_u64();
147 } else {
148 addr = get_random_u32();
149 if (range > U32_MAX) {
150 addr <<= 32;
151 addr |= get_random_u32();
152 }
153 }
154 div64_u64_rem(addr, range, &addr);
155 start += addr;
156 }
157
158 return round_up(start, align);
159 }
160
161 /**
162 * i915_gem_gtt_insert - insert a node into an address_space (GTT)
163 * @vm: the &struct i915_address_space
164 * @ww: An optional struct i915_gem_ww_ctx.
165 * @node: the &struct drm_mm_node (typically i915_vma.node)
166 * @size: how much space to allocate inside the GTT,
167 * must be #I915_GTT_PAGE_SIZE aligned
168 * @alignment: required alignment of starting offset, may be 0 but
169 * if specified, this must be a power-of-two and at least
170 * #I915_GTT_MIN_ALIGNMENT
171 * @color: color to apply to node
172 * @start: start of any range restriction inside GTT (0 for all),
173 * must be #I915_GTT_PAGE_SIZE aligned
174 * @end: end of any range restriction inside GTT (U64_MAX for all),
175 * must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
176 * @flags: control search and eviction behaviour
177 *
178 * i915_gem_gtt_insert() first searches for an available hole into which
179 * is can insert the node. The hole address is aligned to @alignment and
180 * its @size must then fit entirely within the [@start, @end] bounds. The
181 * nodes on either side of the hole must match @color, or else a guard page
182 * will be inserted between the two nodes (or the node evicted). If no
183 * suitable hole is found, first a victim is randomly selected and tested
184 * for eviction, otherwise then the LRU list of objects within the GTT
185 * is scanned to find the first set of replacement nodes to create the hole.
186 * Those old overlapping nodes are evicted from the GTT (and so must be
187 * rebound before any future use). Any node that is currently pinned cannot
188 * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
189 * active and #PIN_NONBLOCK is specified, that node is also skipped when
190 * searching for an eviction candidate. See i915_gem_evict_something() for
191 * the gory details on the eviction algorithm.
192 *
193 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
194 * asked to wait for eviction and interrupted.
195 */
i915_gem_gtt_insert(struct i915_address_space * vm,struct i915_gem_ww_ctx * ww,struct drm_mm_node * node,u64 size,u64 alignment,unsigned long color,u64 start,u64 end,unsigned int flags)196 int i915_gem_gtt_insert(struct i915_address_space *vm,
197 struct i915_gem_ww_ctx *ww,
198 struct drm_mm_node *node,
199 u64 size, u64 alignment, unsigned long color,
200 u64 start, u64 end, unsigned int flags)
201 {
202 enum drm_mm_insert_mode mode;
203 u64 offset;
204 int err;
205
206 lockdep_assert_held(&vm->mutex);
207
208 GEM_BUG_ON(!size);
209 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
210 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
211 GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
212 GEM_BUG_ON(start >= end);
213 GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
214 GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
215 GEM_BUG_ON(vm == &to_gt(vm->i915)->ggtt->alias->vm);
216 GEM_BUG_ON(drm_mm_node_allocated(node));
217
218 if (unlikely(range_overflows(start, size, end)))
219 return -ENOSPC;
220
221 if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
222 return -ENOSPC;
223
224 mode = DRM_MM_INSERT_BEST;
225 if (flags & PIN_HIGH)
226 mode = DRM_MM_INSERT_HIGHEST;
227 if (flags & PIN_MAPPABLE)
228 mode = DRM_MM_INSERT_LOW;
229
230 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
231 * so we know that we always have a minimum alignment of 4096.
232 * The drm_mm range manager is optimised to return results
233 * with zero alignment, so where possible use the optimal
234 * path.
235 */
236 BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
237 if (alignment <= I915_GTT_MIN_ALIGNMENT)
238 alignment = 0;
239
240 err = drm_mm_insert_node_in_range(&vm->mm, node,
241 size, alignment, color,
242 start, end, mode);
243 if (err != -ENOSPC)
244 return err;
245
246 if (mode & DRM_MM_INSERT_ONCE) {
247 err = drm_mm_insert_node_in_range(&vm->mm, node,
248 size, alignment, color,
249 start, end,
250 DRM_MM_INSERT_BEST);
251 if (err != -ENOSPC)
252 return err;
253 }
254
255 if (flags & PIN_NOEVICT)
256 return -ENOSPC;
257
258 /*
259 * No free space, pick a slot at random.
260 *
261 * There is a pathological case here using a GTT shared between
262 * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
263 *
264 * |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
265 * (64k objects) (448k objects)
266 *
267 * Now imagine that the eviction LRU is ordered top-down (just because
268 * pathology meets real life), and that we need to evict an object to
269 * make room inside the aperture. The eviction scan then has to walk
270 * the 448k list before it finds one within range. And now imagine that
271 * it has to search for a new hole between every byte inside the memcpy,
272 * for several simultaneous clients.
273 *
274 * On a full-ppgtt system, if we have run out of available space, there
275 * will be lots and lots of objects in the eviction list! Again,
276 * searching that LRU list may be slow if we are also applying any
277 * range restrictions (e.g. restriction to low 4GiB) and so, for
278 * simplicity and similarilty between different GTT, try the single
279 * random replacement first.
280 */
281 offset = random_offset(start, end,
282 size, alignment ?: I915_GTT_MIN_ALIGNMENT);
283 err = i915_gem_gtt_reserve(vm, ww, node, size, offset, color, flags);
284 if (err != -ENOSPC)
285 return err;
286
287 if (flags & PIN_NOSEARCH)
288 return -ENOSPC;
289
290 /* Randomly selected placement is pinned, do a search */
291 err = i915_gem_evict_something(vm, ww, size, alignment, color,
292 start, end, flags);
293 if (err)
294 return err;
295
296 return drm_mm_insert_node_in_range(&vm->mm, node,
297 size, alignment, color,
298 start, end, DRM_MM_INSERT_EVICT);
299 }
300
301 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
302 #include "selftests/i915_gem_gtt.c"
303 #endif
304