1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2016 Intel Corporation
5 */
6
7 #ifndef __I915_GEM_OBJECT_TYPES_H__
8 #define __I915_GEM_OBJECT_TYPES_H__
9
10 #include <linux/mmu_notifier.h>
11
12 #include <drm/drm_gem.h>
13 #include <uapi/drm/i915_drm.h>
14
15 #include "i915_active.h"
16 #include "i915_selftest.h"
17
18 struct drm_i915_gem_object;
19 struct intel_fronbuffer;
20
21 /*
22 * struct i915_lut_handle tracks the fast lookups from handle to vma used
23 * for execbuf. Although we use a radixtree for that mapping, in order to
24 * remove them as the object or context is closed, we need a secondary list
25 * and a translation entry (i915_lut_handle).
26 */
27 struct i915_lut_handle {
28 struct list_head obj_link;
29 struct i915_gem_context *ctx;
30 u32 handle;
31 };
32
33 struct drm_i915_gem_object_ops {
34 unsigned int flags;
35 #define I915_GEM_OBJECT_HAS_IOMEM BIT(1)
36 #define I915_GEM_OBJECT_IS_SHRINKABLE BIT(2)
37 #define I915_GEM_OBJECT_IS_PROXY BIT(3)
38 #define I915_GEM_OBJECT_NO_MMAP BIT(4)
39
40 /* Interface between the GEM object and its backing storage.
41 * get_pages() is called once prior to the use of the associated set
42 * of pages before to binding them into the GTT, and put_pages() is
43 * called after we no longer need them. As we expect there to be
44 * associated cost with migrating pages between the backing storage
45 * and making them available for the GPU (e.g. clflush), we may hold
46 * onto the pages after they are no longer referenced by the GPU
47 * in case they may be used again shortly (for example migrating the
48 * pages to a different memory domain within the GTT). put_pages()
49 * will therefore most likely be called when the object itself is
50 * being released or under memory pressure (where we attempt to
51 * reap pages for the shrinker).
52 */
53 int (*get_pages)(struct drm_i915_gem_object *obj);
54 void (*put_pages)(struct drm_i915_gem_object *obj,
55 struct sg_table *pages);
56 void (*truncate)(struct drm_i915_gem_object *obj);
57 void (*writeback)(struct drm_i915_gem_object *obj);
58
59 int (*pread)(struct drm_i915_gem_object *obj,
60 const struct drm_i915_gem_pread *arg);
61 int (*pwrite)(struct drm_i915_gem_object *obj,
62 const struct drm_i915_gem_pwrite *arg);
63
64 int (*dmabuf_export)(struct drm_i915_gem_object *obj);
65 void (*release)(struct drm_i915_gem_object *obj);
66
67 const char *name; /* friendly name for debug, e.g. lockdep classes */
68 };
69
70 enum i915_map_type {
71 I915_MAP_WB = 0,
72 I915_MAP_WC,
73 #define I915_MAP_OVERRIDE BIT(31)
74 I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
75 I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
76 };
77
78 enum i915_mmap_type {
79 I915_MMAP_TYPE_GTT = 0,
80 I915_MMAP_TYPE_WC,
81 I915_MMAP_TYPE_WB,
82 I915_MMAP_TYPE_UC,
83 };
84
85 struct i915_mmap_offset {
86 struct drm_vma_offset_node vma_node;
87 struct drm_i915_gem_object *obj;
88 enum i915_mmap_type mmap_type;
89
90 struct rb_node offset;
91 };
92
93 struct i915_gem_object_page_iter {
94 struct scatterlist *sg_pos;
95 unsigned int sg_idx; /* in pages, but 32bit eek! */
96
97 struct radix_tree_root radix;
98 struct mutex lock; /* protects this cache */
99 };
100
101 struct drm_i915_gem_object {
102 struct drm_gem_object base;
103
104 const struct drm_i915_gem_object_ops *ops;
105
106 struct {
107 /**
108 * @vma.lock: protect the list/tree of vmas
109 */
110 spinlock_t lock;
111
112 /**
113 * @vma.list: List of VMAs backed by this object
114 *
115 * The VMA on this list are ordered by type, all GGTT vma are
116 * placed at the head and all ppGTT vma are placed at the tail.
117 * The different types of GGTT vma are unordered between
118 * themselves, use the @vma.tree (which has a defined order
119 * between all VMA) to quickly find an exact match.
120 */
121 struct list_head list;
122
123 /**
124 * @vma.tree: Ordered tree of VMAs backed by this object
125 *
126 * All VMA created for this object are placed in the @vma.tree
127 * for fast retrieval via a binary search in
128 * i915_vma_instance(). They are also added to @vma.list for
129 * easy iteration.
130 */
131 struct rb_root tree;
132 } vma;
133
134 /**
135 * @lut_list: List of vma lookup entries in use for this object.
136 *
137 * If this object is closed, we need to remove all of its VMA from
138 * the fast lookup index in associated contexts; @lut_list provides
139 * this translation from object to context->handles_vma.
140 */
141 struct list_head lut_list;
142 spinlock_t lut_lock; /* guards lut_list */
143
144 /**
145 * @obj_link: Link into @i915_gem_ww_ctx.obj_list
146 *
147 * When we lock this object through i915_gem_object_lock() with a
148 * context, we add it to the list to ensure we can unlock everything
149 * when i915_gem_ww_ctx_backoff() or i915_gem_ww_ctx_fini() are called.
150 */
151 struct list_head obj_link;
152
153 union {
154 struct rcu_head rcu;
155 struct llist_node freed;
156 };
157
158 /**
159 * Whether the object is currently in the GGTT mmap.
160 */
161 unsigned int userfault_count;
162 struct list_head userfault_link;
163
164 struct {
165 spinlock_t lock; /* Protects access to mmo offsets */
166 struct rb_root offsets;
167 } mmo;
168
169 I915_SELFTEST_DECLARE(struct list_head st_link);
170
171 unsigned long flags;
172 #define I915_BO_ALLOC_CONTIGUOUS BIT(0)
173 #define I915_BO_ALLOC_VOLATILE BIT(1)
174 #define I915_BO_ALLOC_STRUCT_PAGE BIT(2)
175 #define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \
176 I915_BO_ALLOC_VOLATILE | \
177 I915_BO_ALLOC_STRUCT_PAGE)
178 #define I915_BO_READONLY BIT(3)
179 #define I915_TILING_QUIRK_BIT 4 /* unknown swizzling; do not release! */
180
181 /*
182 * Is the object to be mapped as read-only to the GPU
183 * Only honoured if hardware has relevant pte bit
184 */
185 unsigned int cache_level:3;
186 unsigned int cache_coherent:2;
187 #define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
188 #define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
189 unsigned int cache_dirty:1;
190
191 /**
192 * @read_domains: Read memory domains.
193 *
194 * These monitor which caches contain read/write data related to the
195 * object. When transitioning from one set of domains to another,
196 * the driver is called to ensure that caches are suitably flushed and
197 * invalidated.
198 */
199 u16 read_domains;
200
201 /**
202 * @write_domain: Corresponding unique write memory domain.
203 */
204 u16 write_domain;
205
206 struct intel_frontbuffer __rcu *frontbuffer;
207
208 /** Current tiling stride for the object, if it's tiled. */
209 unsigned int tiling_and_stride;
210 #define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
211 #define TILING_MASK (FENCE_MINIMUM_STRIDE - 1)
212 #define STRIDE_MASK (~TILING_MASK)
213
214 struct {
215 /*
216 * Protects the pages and their use. Do not use directly, but
217 * instead go through the pin/unpin interfaces.
218 */
219 atomic_t pages_pin_count;
220 atomic_t shrink_pin;
221
222 /**
223 * Memory region for this object.
224 */
225 struct intel_memory_region *region;
226 /**
227 * List of memory region blocks allocated for this object.
228 */
229 struct list_head blocks;
230 /**
231 * Element within memory_region->objects or region->purgeable
232 * if the object is marked as DONTNEED. Access is protected by
233 * region->obj_lock.
234 */
235 struct list_head region_link;
236
237 struct sg_table *pages;
238 void *mapping;
239
240 struct i915_page_sizes {
241 /**
242 * The sg mask of the pages sg_table. i.e the mask of
243 * of the lengths for each sg entry.
244 */
245 unsigned int phys;
246
247 /**
248 * The gtt page sizes we are allowed to use given the
249 * sg mask and the supported page sizes. This will
250 * express the smallest unit we can use for the whole
251 * object, as well as the larger sizes we may be able
252 * to use opportunistically.
253 */
254 unsigned int sg;
255
256 /**
257 * The actual gtt page size usage. Since we can have
258 * multiple vma associated with this object we need to
259 * prevent any trampling of state, hence a copy of this
260 * struct also lives in each vma, therefore the gtt
261 * value here should only be read/write through the vma.
262 */
263 unsigned int gtt;
264 } page_sizes;
265
266 I915_SELFTEST_DECLARE(unsigned int page_mask);
267
268 struct i915_gem_object_page_iter get_page;
269 struct i915_gem_object_page_iter get_dma_page;
270
271 /**
272 * Element within i915->mm.unbound_list or i915->mm.bound_list,
273 * locked by i915->mm.obj_lock.
274 */
275 struct list_head link;
276
277 /**
278 * Advice: are the backing pages purgeable?
279 */
280 unsigned int madv:2;
281
282 /**
283 * This is set if the object has been written to since the
284 * pages were last acquired.
285 */
286 bool dirty:1;
287 } mm;
288
289 /** Record of address bit 17 of each page at last unbind. */
290 unsigned long *bit_17;
291
292 union {
293 #ifdef CONFIG_MMU_NOTIFIER
294 struct i915_gem_userptr {
295 uintptr_t ptr;
296 unsigned long notifier_seq;
297
298 struct mmu_interval_notifier notifier;
299 struct page **pvec;
300 int page_ref;
301 } userptr;
302 #endif
303
304 struct drm_mm_node *stolen;
305
306 unsigned long scratch;
307 u64 encode;
308
309 void *gvt_info;
310 };
311 };
312
313 static inline struct drm_i915_gem_object *
to_intel_bo(struct drm_gem_object * gem)314 to_intel_bo(struct drm_gem_object *gem)
315 {
316 /* Assert that to_intel_bo(NULL) == NULL */
317 BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
318
319 return container_of(gem, struct drm_i915_gem_object, base);
320 }
321
322 #endif
323