1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #ifndef __I915_VMA_H__
26 #define __I915_VMA_H__
27
28 #include <linux/io-mapping.h>
29
30 #include <drm/drm_mm.h>
31
32 #include "i915_gem_gtt.h"
33 #include "i915_gem_fence_reg.h"
34 #include "i915_gem_object.h"
35 #include "i915_gem_request.h"
36
37
38 enum i915_cache_level;
39
40 /**
41 * A VMA represents a GEM BO that is bound into an address space. Therefore, a
42 * VMA's presence cannot be guaranteed before binding, or after unbinding the
43 * object into/from the address space.
44 *
45 * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
46 * will always be <= an objects lifetime. So object refcounting should cover us.
47 */
48 struct i915_vma {
49 struct drm_mm_node node;
50 struct drm_i915_gem_object *obj;
51 struct i915_address_space *vm;
52 struct drm_i915_fence_reg *fence;
53 struct reservation_object *resv; /** Alias of obj->resv */
54 struct sg_table *pages;
55 void __iomem *iomap;
56 u64 size;
57 u64 display_alignment;
58 struct i915_page_sizes page_sizes;
59
60 u32 fence_size;
61 u32 fence_alignment;
62
63 /**
64 * Count of the number of times this vma has been opened by different
65 * handles (but same file) for execbuf, i.e. the number of aliases
66 * that exist in the ctx->handle_vmas LUT for this vma.
67 */
68 unsigned int open_count;
69 unsigned long flags;
70 /**
71 * How many users have pinned this object in GTT space. The following
72 * users can each hold at most one reference: pwrite/pread, execbuffer
73 * (objects are not allowed multiple times for the same batchbuffer),
74 * and the framebuffer code. When switching/pageflipping, the
75 * framebuffer code has at most two buffers pinned per crtc.
76 *
77 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
78 * bits with absolutely no headroom. So use 4 bits.
79 */
80 #define I915_VMA_PIN_MASK 0xf
81 #define I915_VMA_PIN_OVERFLOW BIT(5)
82
83 /** Flags and address space this VMA is bound to */
84 #define I915_VMA_GLOBAL_BIND BIT(6)
85 #define I915_VMA_LOCAL_BIND BIT(7)
86 #define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
87
88 #define I915_VMA_GGTT BIT(8)
89 #define I915_VMA_CAN_FENCE BIT(9)
90 #define I915_VMA_CLOSED BIT(10)
91 #define I915_VMA_USERFAULT_BIT 11
92 #define I915_VMA_USERFAULT BIT(I915_VMA_USERFAULT_BIT)
93
94 unsigned int active;
95 struct i915_gem_active last_read[I915_NUM_ENGINES];
96 struct i915_gem_active last_fence;
97
98 /**
99 * Support different GGTT views into the same object.
100 * This means there can be multiple VMA mappings per object and per VM.
101 * i915_ggtt_view_type is used to distinguish between those entries.
102 * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
103 * assumed in GEM functions which take no ggtt view parameter.
104 */
105 struct i915_ggtt_view ggtt_view;
106
107 /** This object's place on the active/inactive lists */
108 struct list_head vm_link;
109
110 struct list_head obj_link; /* Link in the object's VMA list */
111 struct rb_node obj_node;
112 struct hlist_node obj_hash;
113
114 /** This vma's place in the execbuf reservation list */
115 struct list_head exec_link;
116 struct list_head reloc_link;
117
118 /** This vma's place in the eviction list */
119 struct list_head evict_link;
120
121 /**
122 * Used for performing relocations during execbuffer insertion.
123 */
124 unsigned int *exec_flags;
125 struct hlist_node exec_node;
126 u32 exec_handle;
127 };
128
129 struct i915_vma *
130 i915_vma_instance(struct drm_i915_gem_object *obj,
131 struct i915_address_space *vm,
132 const struct i915_ggtt_view *view);
133
134 void i915_vma_unpin_and_release(struct i915_vma **p_vma);
135
i915_vma_is_ggtt(const struct i915_vma * vma)136 static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
137 {
138 return vma->flags & I915_VMA_GGTT;
139 }
140
i915_vma_is_map_and_fenceable(const struct i915_vma * vma)141 static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
142 {
143 return vma->flags & I915_VMA_CAN_FENCE;
144 }
145
i915_vma_is_closed(const struct i915_vma * vma)146 static inline bool i915_vma_is_closed(const struct i915_vma *vma)
147 {
148 return vma->flags & I915_VMA_CLOSED;
149 }
150
i915_vma_set_userfault(struct i915_vma * vma)151 static inline bool i915_vma_set_userfault(struct i915_vma *vma)
152 {
153 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
154 return __test_and_set_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
155 }
156
i915_vma_unset_userfault(struct i915_vma * vma)157 static inline void i915_vma_unset_userfault(struct i915_vma *vma)
158 {
159 return __clear_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
160 }
161
i915_vma_has_userfault(const struct i915_vma * vma)162 static inline bool i915_vma_has_userfault(const struct i915_vma *vma)
163 {
164 return test_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
165 }
166
i915_vma_get_active(const struct i915_vma * vma)167 static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
168 {
169 return vma->active;
170 }
171
i915_vma_is_active(const struct i915_vma * vma)172 static inline bool i915_vma_is_active(const struct i915_vma *vma)
173 {
174 return i915_vma_get_active(vma);
175 }
176
i915_vma_set_active(struct i915_vma * vma,unsigned int engine)177 static inline void i915_vma_set_active(struct i915_vma *vma,
178 unsigned int engine)
179 {
180 vma->active |= BIT(engine);
181 }
182
i915_vma_clear_active(struct i915_vma * vma,unsigned int engine)183 static inline void i915_vma_clear_active(struct i915_vma *vma,
184 unsigned int engine)
185 {
186 vma->active &= ~BIT(engine);
187 }
188
i915_vma_has_active_engine(const struct i915_vma * vma,unsigned int engine)189 static inline bool i915_vma_has_active_engine(const struct i915_vma *vma,
190 unsigned int engine)
191 {
192 return vma->active & BIT(engine);
193 }
194
i915_ggtt_offset(const struct i915_vma * vma)195 static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
196 {
197 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
198 GEM_BUG_ON(!vma->node.allocated);
199 GEM_BUG_ON(upper_32_bits(vma->node.start));
200 GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1));
201 return lower_32_bits(vma->node.start);
202 }
203
i915_vma_get(struct i915_vma * vma)204 static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
205 {
206 i915_gem_object_get(vma->obj);
207 return vma;
208 }
209
i915_vma_put(struct i915_vma * vma)210 static inline void i915_vma_put(struct i915_vma *vma)
211 {
212 i915_gem_object_put(vma->obj);
213 }
214
ptrdiff(const void * a,const void * b)215 static __always_inline ptrdiff_t ptrdiff(const void *a, const void *b)
216 {
217 return a - b;
218 }
219
220 static inline long
i915_vma_compare(struct i915_vma * vma,struct i915_address_space * vm,const struct i915_ggtt_view * view)221 i915_vma_compare(struct i915_vma *vma,
222 struct i915_address_space *vm,
223 const struct i915_ggtt_view *view)
224 {
225 ptrdiff_t cmp;
226
227 GEM_BUG_ON(view && !i915_is_ggtt(vm));
228
229 cmp = ptrdiff(vma->vm, vm);
230 if (cmp)
231 return cmp;
232
233 BUILD_BUG_ON(I915_GGTT_VIEW_NORMAL != 0);
234 cmp = vma->ggtt_view.type;
235 if (!view)
236 return cmp;
237
238 cmp -= view->type;
239 if (cmp)
240 return cmp;
241
242 /* ggtt_view.type also encodes its size so that we both distinguish
243 * different views using it as a "type" and also use a compact (no
244 * accessing of uninitialised padding bytes) memcmp without storing
245 * an extra parameter or adding more code.
246 *
247 * To ensure that the memcmp is valid for all branches of the union,
248 * even though the code looks like it is just comparing one branch,
249 * we assert above that all branches have the same address, and that
250 * each branch has a unique type/size.
251 */
252 BUILD_BUG_ON(I915_GGTT_VIEW_NORMAL >= I915_GGTT_VIEW_PARTIAL);
253 BUILD_BUG_ON(I915_GGTT_VIEW_PARTIAL >= I915_GGTT_VIEW_ROTATED);
254 BUILD_BUG_ON(offsetof(typeof(*view), rotated) !=
255 offsetof(typeof(*view), partial));
256 return memcmp(&vma->ggtt_view.partial, &view->partial, view->type);
257 }
258
259 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
260 u32 flags);
261 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level);
262 bool i915_vma_misplaced(const struct i915_vma *vma,
263 u64 size, u64 alignment, u64 flags);
264 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
265 void i915_vma_revoke_mmap(struct i915_vma *vma);
266 int __must_check i915_vma_unbind(struct i915_vma *vma);
267 void i915_vma_unlink_ctx(struct i915_vma *vma);
268 void i915_vma_close(struct i915_vma *vma);
269
270 int __i915_vma_do_pin(struct i915_vma *vma,
271 u64 size, u64 alignment, u64 flags);
272 static inline int __must_check
i915_vma_pin(struct i915_vma * vma,u64 size,u64 alignment,u64 flags)273 i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
274 {
275 BUILD_BUG_ON(PIN_MBZ != I915_VMA_PIN_OVERFLOW);
276 BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
277 BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
278
279 /* Pin early to prevent the shrinker/eviction logic from destroying
280 * our vma as we insert and bind.
281 */
282 if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0)) {
283 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
284 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
285 return 0;
286 }
287
288 return __i915_vma_do_pin(vma, size, alignment, flags);
289 }
290
i915_vma_pin_count(const struct i915_vma * vma)291 static inline int i915_vma_pin_count(const struct i915_vma *vma)
292 {
293 return vma->flags & I915_VMA_PIN_MASK;
294 }
295
i915_vma_is_pinned(const struct i915_vma * vma)296 static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
297 {
298 return i915_vma_pin_count(vma);
299 }
300
__i915_vma_pin(struct i915_vma * vma)301 static inline void __i915_vma_pin(struct i915_vma *vma)
302 {
303 vma->flags++;
304 GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW);
305 }
306
__i915_vma_unpin(struct i915_vma * vma)307 static inline void __i915_vma_unpin(struct i915_vma *vma)
308 {
309 vma->flags--;
310 }
311
i915_vma_unpin(struct i915_vma * vma)312 static inline void i915_vma_unpin(struct i915_vma *vma)
313 {
314 GEM_BUG_ON(!i915_vma_is_pinned(vma));
315 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
316 __i915_vma_unpin(vma);
317 }
318
319 /**
320 * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
321 * @vma: VMA to iomap
322 *
323 * The passed in VMA has to be pinned in the global GTT mappable region.
324 * An extra pinning of the VMA is acquired for the return iomapping,
325 * the caller must call i915_vma_unpin_iomap to relinquish the pinning
326 * after the iomapping is no longer required.
327 *
328 * Callers must hold the struct_mutex.
329 *
330 * Returns a valid iomapped pointer or ERR_PTR.
331 */
332 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
333 #define IO_ERR_PTR(x) ((void __iomem *)ERR_PTR(x))
334
335 /**
336 * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
337 * @vma: VMA to unpin
338 *
339 * Unpins the previously iomapped VMA from i915_vma_pin_iomap().
340 *
341 * Callers must hold the struct_mutex. This function is only valid to be
342 * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
343 */
344 void i915_vma_unpin_iomap(struct i915_vma *vma);
345
i915_vma_first_page(struct i915_vma * vma)346 static inline struct page *i915_vma_first_page(struct i915_vma *vma)
347 {
348 GEM_BUG_ON(!vma->pages);
349 return sg_page(vma->pages->sgl);
350 }
351
352 /**
353 * i915_vma_pin_fence - pin fencing state
354 * @vma: vma to pin fencing for
355 *
356 * This pins the fencing state (whether tiled or untiled) to make sure the
357 * vma (and its object) is ready to be used as a scanout target. Fencing
358 * status must be synchronize first by calling i915_vma_get_fence():
359 *
360 * The resulting fence pin reference must be released again with
361 * i915_vma_unpin_fence().
362 *
363 * Returns:
364 *
365 * True if the vma has a fence, false otherwise.
366 */
367 int i915_vma_pin_fence(struct i915_vma *vma);
368 int __must_check i915_vma_put_fence(struct i915_vma *vma);
369
__i915_vma_unpin_fence(struct i915_vma * vma)370 static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
371 {
372 GEM_BUG_ON(vma->fence->pin_count <= 0);
373 vma->fence->pin_count--;
374 }
375
376 /**
377 * i915_vma_unpin_fence - unpin fencing state
378 * @vma: vma to unpin fencing for
379 *
380 * This releases the fence pin reference acquired through
381 * i915_vma_pin_fence. It will handle both objects with and without an
382 * attached fence correctly, callers do not need to distinguish this.
383 */
384 static inline void
i915_vma_unpin_fence(struct i915_vma * vma)385 i915_vma_unpin_fence(struct i915_vma *vma)
386 {
387 lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
388 if (vma->fence)
389 __i915_vma_unpin_fence(vma);
390 }
391
392 #endif
393
394