1 /* $NetBSD: i915_gem_object_types.h,v 1.7 2021/12/19 12:26:55 riastradh Exp $ */
2
3 /*
4 * SPDX-License-Identifier: MIT
5 *
6 * Copyright © 2016 Intel Corporation
7 */
8
9 #ifndef __I915_GEM_OBJECT_TYPES_H__
10 #define __I915_GEM_OBJECT_TYPES_H__
11
12 #include <drm/drm_gem.h>
13 #include <uapi/drm/i915_drm.h>
14
15 #include "i915_active.h"
16 #include "i915_selftest.h"
17
18 struct drm_i915_gem_object;
19 struct intel_fronbuffer;
20
21 /*
22 * struct i915_lut_handle tracks the fast lookups from handle to vma used
23 * for execbuf. Although we use a radixtree for that mapping, in order to
24 * remove them as the object or context is closed, we need a secondary list
25 * and a translation entry (i915_lut_handle).
26 */
27 struct i915_lut_handle {
28 struct list_head obj_link;
29 struct i915_gem_context *ctx;
30 u32 handle;
31 };
32
33 struct drm_i915_gem_object_ops {
34 unsigned int flags;
35 #define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
36 #define I915_GEM_OBJECT_HAS_IOMEM BIT(1)
37 #define I915_GEM_OBJECT_IS_SHRINKABLE BIT(2)
38 #define I915_GEM_OBJECT_IS_PROXY BIT(3)
39 #define I915_GEM_OBJECT_NO_GGTT BIT(4)
40 #define I915_GEM_OBJECT_ASYNC_CANCEL BIT(5)
41
42 /* Interface between the GEM object and its backing storage.
43 * get_pages() is called once prior to the use of the associated set
44 * of pages before to binding them into the GTT, and put_pages() is
45 * called after we no longer need them. As we expect there to be
46 * associated cost with migrating pages between the backing storage
47 * and making them available for the GPU (e.g. clflush), we may hold
48 * onto the pages after they are no longer referenced by the GPU
49 * in case they may be used again shortly (for example migrating the
50 * pages to a different memory domain within the GTT). put_pages()
51 * will therefore most likely be called when the object itself is
52 * being released or under memory pressure (where we attempt to
53 * reap pages for the shrinker).
54 */
55 int (*get_pages)(struct drm_i915_gem_object *obj);
56 void (*put_pages)(struct drm_i915_gem_object *obj,
57 struct sg_table *pages);
58 void (*truncate)(struct drm_i915_gem_object *obj);
59 void (*writeback)(struct drm_i915_gem_object *obj);
60
61 int (*pwrite)(struct drm_i915_gem_object *obj,
62 const struct drm_i915_gem_pwrite *arg);
63
64 int (*dmabuf_export)(struct drm_i915_gem_object *obj);
65 void (*release)(struct drm_i915_gem_object *obj);
66 };
67
68 enum i915_mmap_type {
69 I915_MMAP_TYPE_GTT = 0,
70 I915_MMAP_TYPE_WC,
71 I915_MMAP_TYPE_WB,
72 I915_MMAP_TYPE_UC,
73 I915_MMAP_NTYPES
74 };
75
76 struct i915_mmap_offset {
77 struct drm_vma_offset_node vma_node;
78 struct drm_i915_gem_object *obj;
79 enum i915_mmap_type mmap_type;
80
81 #ifdef __NetBSD__
82 struct uvm_object uobj;
83 #else
84 struct rb_node offset;
85 #endif
86 };
87
88 struct drm_i915_gem_object {
89 struct drm_gem_object base;
90
91 const struct drm_i915_gem_object_ops *ops;
92
93 struct {
94 /**
95 * @vma.lock: protect the list/tree of vmas
96 */
97 spinlock_t lock;
98
99 /**
100 * @vma.list: List of VMAs backed by this object
101 *
102 * The VMA on this list are ordered by type, all GGTT vma are
103 * placed at the head and all ppGTT vma are placed at the tail.
104 * The different types of GGTT vma are unordered between
105 * themselves, use the @vma.tree (which has a defined order
106 * between all VMA) to quickly find an exact match.
107 */
108 struct list_head list;
109
110 /**
111 * @vma.tree: Ordered tree of VMAs backed by this object
112 *
113 * All VMA created for this object are placed in the @vma.tree
114 * for fast retrieval via a binary search in
115 * i915_vma_instance(). They are also added to @vma.list for
116 * easy iteration.
117 */
118 struct rb_root tree;
119 } vma;
120
121 /**
122 * @lut_list: List of vma lookup entries in use for this object.
123 *
124 * If this object is closed, we need to remove all of its VMA from
125 * the fast lookup index in associated contexts; @lut_list provides
126 * this translation from object to context->handles_vma.
127 */
128 struct list_head lut_list;
129
130 /** Stolen memory for this object, instead of being backed by shmem. */
131 struct drm_mm_node *stolen;
132 union {
133 struct rcu_head rcu;
134 struct llist_node freed;
135 };
136
137 /**
138 * Whether the object is currently in the GGTT mmap.
139 */
140 unsigned int userfault_count;
141 struct list_head userfault_link;
142
143 struct {
144 spinlock_t lock; /* Protects access to mmo offsets */
145 #ifdef __NetBSD__
146 struct i915_mmap_offset *offsets[I915_MMAP_NTYPES];
147 #else
148 struct rb_root offsets;
149 #endif
150 } mmo;
151
152 I915_SELFTEST_DECLARE(struct list_head st_link);
153
154 unsigned long flags;
155 #define I915_BO_ALLOC_CONTIGUOUS BIT(0)
156 #define I915_BO_ALLOC_VOLATILE BIT(1)
157 #define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_VOLATILE)
158 #define I915_BO_READONLY BIT(2)
159
160 /*
161 * Is the object to be mapped as read-only to the GPU
162 * Only honoured if hardware has relevant pte bit
163 */
164 unsigned int cache_level:3;
165 unsigned int cache_coherent:2;
166 #define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
167 #define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
168 unsigned int cache_dirty:1;
169
170 /**
171 * @read_domains: Read memory domains.
172 *
173 * These monitor which caches contain read/write data related to the
174 * object. When transitioning from one set of domains to another,
175 * the driver is called to ensure that caches are suitably flushed and
176 * invalidated.
177 */
178 u16 read_domains;
179
180 /**
181 * @write_domain: Corresponding unique write memory domain.
182 */
183 u16 write_domain;
184
185 struct intel_frontbuffer __rcu *frontbuffer;
186
187 /** Current tiling stride for the object, if it's tiled. */
188 unsigned int tiling_and_stride;
189 #define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
190 #define TILING_MASK (FENCE_MINIMUM_STRIDE - 1)
191 #define STRIDE_MASK (~TILING_MASK)
192
193 /** Count of VMA actually bound by this object */
194 atomic_t bind_count;
195
196 struct {
197 /*
198 * Protects the pages and their use. Do not use directly, but
199 * instead go through the pin/unpin interfaces.
200 */
201 struct mutex lock;
202 atomic_t pages_pin_count;
203 atomic_t shrink_pin;
204
205 /**
206 * Memory region for this object.
207 */
208 struct intel_memory_region *region;
209 /**
210 * List of memory region blocks allocated for this object.
211 */
212 struct list_head blocks;
213 /**
214 * Element within memory_region->objects or region->purgeable
215 * if the object is marked as DONTNEED. Access is protected by
216 * region->obj_lock.
217 */
218 struct list_head region_link;
219
220 #ifdef __NetBSD__
221 /* internal objects */
222 union {
223 struct {
224 bus_dma_segment_t *segs;
225 int nsegs;
226 int rsegs;
227 } internal;
228 struct {
229 bus_dma_segment_t seg;
230 void *kva;
231 } phys;
232 } u;
233 #endif
234
235 struct sg_table *pages;
236 void *mapping;
237
238 struct i915_page_sizes {
239 /**
240 * The sg mask of the pages sg_table. i.e the mask of
241 * of the lengths for each sg entry.
242 */
243 unsigned int phys;
244
245 /**
246 * The gtt page sizes we are allowed to use given the
247 * sg mask and the supported page sizes. This will
248 * express the smallest unit we can use for the whole
249 * object, as well as the larger sizes we may be able
250 * to use opportunistically.
251 */
252 unsigned int sg;
253
254 /**
255 * The actual gtt page size usage. Since we can have
256 * multiple vma associated with this object we need to
257 * prevent any trampling of state, hence a copy of this
258 * struct also lives in each vma, therefore the gtt
259 * value here should only be read/write through the vma.
260 */
261 unsigned int gtt;
262 } page_sizes;
263
264 I915_SELFTEST_DECLARE(unsigned int page_mask);
265
266 struct i915_gem_object_page_iter {
267 struct scatterlist *sg_pos;
268 unsigned int sg_idx; /* in pages, but 32bit eek! */
269
270 struct radix_tree_root radix;
271 struct mutex lock; /* protects this cache */
272 } get_page;
273
274 /**
275 * Element within i915->mm.unbound_list or i915->mm.bound_list,
276 * locked by i915->mm.obj_lock.
277 */
278 struct list_head link;
279
280 /**
281 * Advice: are the backing pages purgeable?
282 */
283 unsigned int madv:2;
284
285 /**
286 * This is set if the object has been written to since the
287 * pages were last acquired.
288 */
289 bool dirty:1;
290
291 /**
292 * This is set if the object has been pinned due to unknown
293 * swizzling.
294 */
295 bool quirked:1;
296 } mm;
297
298 /** Record of address bit 17 of each page at last unbind. */
299 unsigned long *bit_17;
300
301 union {
302 struct i915_gem_userptr {
303 uintptr_t ptr;
304
305 struct i915_mm_struct *mm;
306 struct i915_mmu_object *mmu_object;
307 struct work_struct *work;
308 } userptr;
309
310 unsigned long scratch;
311
312 void *gvt_info;
313 };
314 };
315
316 static inline struct drm_i915_gem_object *
to_intel_bo(struct drm_gem_object * gem)317 to_intel_bo(struct drm_gem_object *gem)
318 {
319 /* Assert that to_intel_bo(NULL) == NULL */
320 BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
321
322 return container_of(gem, struct drm_i915_gem_object, base);
323 }
324
325 #endif
326