1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2016 Intel Corporation
5 */
6
7 #ifndef __I915_GEM_OBJECT_TYPES_H__
8 #define __I915_GEM_OBJECT_TYPES_H__
9
10 #include <linux/mmu_notifier.h>
11
12 #include <drm/drm_gem.h>
13 #include <drm/ttm/ttm_bo.h>
14 #include <uapi/drm/i915_drm.h>
15
16 #include "i915_active.h"
17 #include "i915_selftest.h"
18 #include "i915_vma_resource.h"
19
20 #include "gt/intel_gt_defines.h"
21
22 struct drm_i915_gem_object;
23 struct intel_fronbuffer;
24 struct intel_memory_region;
25
26 /*
27 * struct i915_lut_handle tracks the fast lookups from handle to vma used
28 * for execbuf. Although we use a radixtree for that mapping, in order to
29 * remove them as the object or context is closed, we need a secondary list
30 * and a translation entry (i915_lut_handle).
31 */
32 struct i915_lut_handle {
33 struct list_head obj_link;
34 struct i915_gem_context *ctx;
35 u32 handle;
36 };
37
38 struct drm_i915_gem_object_ops {
39 unsigned int flags;
40 #define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
41 /* Skip the shrinker management in set_pages/unset_pages */
42 #define I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST BIT(2)
43 #define I915_GEM_OBJECT_IS_PROXY BIT(3)
44 #define I915_GEM_OBJECT_NO_MMAP BIT(4)
45
46 /* Interface between the GEM object and its backing storage.
47 * get_pages() is called once prior to the use of the associated set
48 * of pages before to binding them into the GTT, and put_pages() is
49 * called after we no longer need them. As we expect there to be
50 * associated cost with migrating pages between the backing storage
51 * and making them available for the GPU (e.g. clflush), we may hold
52 * onto the pages after they are no longer referenced by the GPU
53 * in case they may be used again shortly (for example migrating the
54 * pages to a different memory domain within the GTT). put_pages()
55 * will therefore most likely be called when the object itself is
56 * being released or under memory pressure (where we attempt to
57 * reap pages for the shrinker).
58 */
59 int (*get_pages)(struct drm_i915_gem_object *obj);
60 void (*put_pages)(struct drm_i915_gem_object *obj,
61 struct sg_table *pages);
62 int (*truncate)(struct drm_i915_gem_object *obj);
63 /**
64 * shrink - Perform further backend specific actions to facilate
65 * shrinking.
66 * @obj: The gem object
67 * @flags: Extra flags to control shrinking behaviour in the backend
68 *
69 * Possible values for @flags:
70 *
71 * I915_GEM_OBJECT_SHRINK_WRITEBACK - Try to perform writeback of the
72 * backing pages, if supported.
73 *
74 * I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT - Don't wait for the object to
75 * idle. Active objects can be considered later. The TTM backend for
76 * example might have aync migrations going on, which don't use any
77 * i915_vma to track the active GTT binding, and hence having an unbound
78 * object might not be enough.
79 */
80 #define I915_GEM_OBJECT_SHRINK_WRITEBACK BIT(0)
81 #define I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT BIT(1)
82 int (*shrink)(struct drm_i915_gem_object *obj, unsigned int flags);
83
84 int (*pread)(struct drm_i915_gem_object *obj,
85 const struct drm_i915_gem_pread *arg);
86 int (*pwrite)(struct drm_i915_gem_object *obj,
87 const struct drm_i915_gem_pwrite *arg);
88 u64 (*mmap_offset)(struct drm_i915_gem_object *obj);
89 void (*unmap_virtual)(struct drm_i915_gem_object *obj);
90
91 int (*dmabuf_export)(struct drm_i915_gem_object *obj);
92
93 /**
94 * adjust_lru - notify that the madvise value was updated
95 * @obj: The gem object
96 *
97 * The madvise value may have been updated, or object was recently
98 * referenced so act accordingly (Perhaps changing an LRU list etc).
99 */
100 void (*adjust_lru)(struct drm_i915_gem_object *obj);
101
102 /**
103 * delayed_free - Override the default delayed free implementation
104 */
105 void (*delayed_free)(struct drm_i915_gem_object *obj);
106
107 /**
108 * migrate - Migrate object to a different region either for
109 * pinning or for as long as the object lock is held.
110 */
111 int (*migrate)(struct drm_i915_gem_object *obj,
112 struct intel_memory_region *mr,
113 unsigned int flags);
114
115 void (*release)(struct drm_i915_gem_object *obj);
116
117 #ifdef __linux__
118 const struct vm_operations_struct *mmap_ops;
119 #else
120 const struct uvm_pagerops *mmap_ops;
121 #endif
122 const char *name; /* friendly name for debug, e.g. lockdep classes */
123 };
124
125 /**
126 * enum i915_cache_level - The supported GTT caching values for system memory
127 * pages.
128 *
129 * These translate to some special GTT PTE bits when binding pages into some
130 * address space. It also determines whether an object, or rather its pages are
131 * coherent with the GPU, when also reading or writing through the CPU cache
132 * with those pages.
133 *
134 * Userspace can also control this through struct drm_i915_gem_caching.
135 */
136 enum i915_cache_level {
137 /**
138 * @I915_CACHE_NONE:
139 *
140 * GPU access is not coherent with the CPU cache. If the cache is dirty
141 * and we need the underlying pages to be coherent with some later GPU
142 * access then we need to manually flush the pages.
143 *
144 * On shared LLC platforms reads and writes through the CPU cache are
145 * still coherent even with this setting. See also
146 * &drm_i915_gem_object.cache_coherent for more details. Due to this we
147 * should only ever use uncached for scanout surfaces, otherwise we end
148 * up over-flushing in some places.
149 *
150 * This is the default on non-LLC platforms.
151 */
152 I915_CACHE_NONE = 0,
153 /**
154 * @I915_CACHE_LLC:
155 *
156 * GPU access is coherent with the CPU cache. If the cache is dirty,
157 * then the GPU will ensure that access remains coherent, when both
158 * reading and writing through the CPU cache. GPU writes can dirty the
159 * CPU cache.
160 *
161 * Not used for scanout surfaces.
162 *
163 * Applies to both platforms with shared LLC(HAS_LLC), and snooping
164 * based platforms(HAS_SNOOP).
165 *
166 * This is the default on shared LLC platforms. The only exception is
167 * scanout objects, where the display engine is not coherent with the
168 * CPU cache. For such objects I915_CACHE_NONE or I915_CACHE_WT is
169 * automatically applied by the kernel in pin_for_display, if userspace
170 * has not done so already.
171 */
172 I915_CACHE_LLC,
173 /**
174 * @I915_CACHE_L3_LLC:
175 *
176 * Explicitly enable the Gfx L3 cache, with coherent LLC.
177 *
178 * The Gfx L3 sits between the domain specific caches, e.g
179 * sampler/render caches, and the larger LLC. LLC is coherent with the
180 * GPU, but L3 is only visible to the GPU, so likely needs to be flushed
181 * when the workload completes.
182 *
183 * Not used for scanout surfaces.
184 *
185 * Only exposed on some gen7 + GGTT. More recent hardware has dropped
186 * this explicit setting, where it should now be enabled by default.
187 */
188 I915_CACHE_L3_LLC,
189 /**
190 * @I915_CACHE_WT:
191 *
192 * Write-through. Used for scanout surfaces.
193 *
194 * The GPU can utilise the caches, while still having the display engine
195 * be coherent with GPU writes, as a result we don't need to flush the
196 * CPU caches when moving out of the render domain. This is the default
197 * setting chosen by the kernel, if supported by the HW, otherwise we
198 * fallback to I915_CACHE_NONE. On the CPU side writes through the CPU
199 * cache still need to be flushed, to remain coherent with the display
200 * engine.
201 */
202 I915_CACHE_WT,
203 /**
204 * @I915_MAX_CACHE_LEVEL:
205 *
206 * Mark the last entry in the enum. Used for defining cachelevel_to_pat
207 * array for cache_level to pat translation table.
208 */
209 I915_MAX_CACHE_LEVEL,
210 };
211
212 enum i915_map_type {
213 I915_MAP_WB = 0,
214 I915_MAP_WC,
215 #define I915_MAP_OVERRIDE BIT(31)
216 I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
217 I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
218 };
219
220 enum i915_mmap_type {
221 I915_MMAP_TYPE_GTT = 0,
222 I915_MMAP_TYPE_WC,
223 I915_MMAP_TYPE_WB,
224 I915_MMAP_TYPE_UC,
225 I915_MMAP_TYPE_FIXED,
226 };
227
228 struct i915_mmap_offset {
229 struct drm_vma_offset_node vma_node;
230 struct drm_i915_gem_object *obj;
231 enum i915_mmap_type mmap_type;
232
233 struct rb_node offset;
234 };
235
236 struct i915_gem_object_page_iter {
237 struct scatterlist *sg_pos;
238 unsigned int sg_idx; /* in pages, but 32bit eek! */
239
240 struct radix_tree_root radix;
241 struct rwlock lock; /* protects this cache */
242 };
243
244 struct drm_i915_gem_object {
245 /*
246 * We might have reason to revisit the below since it wastes
247 * a lot of space for non-ttm gem objects.
248 * In any case, always use the accessors for the ttm_buffer_object
249 * when accessing it.
250 */
251 union {
252 struct drm_gem_object base;
253 struct ttm_buffer_object __do_not_access;
254 };
255
256 const struct drm_i915_gem_object_ops *ops;
257
258 struct {
259 /**
260 * @vma.lock: protect the list/tree of vmas
261 */
262 spinlock_t lock;
263
264 /**
265 * @vma.list: List of VMAs backed by this object
266 *
267 * The VMA on this list are ordered by type, all GGTT vma are
268 * placed at the head and all ppGTT vma are placed at the tail.
269 * The different types of GGTT vma are unordered between
270 * themselves, use the @vma.tree (which has a defined order
271 * between all VMA) to quickly find an exact match.
272 */
273 struct list_head list;
274
275 /**
276 * @vma.tree: Ordered tree of VMAs backed by this object
277 *
278 * All VMA created for this object are placed in the @vma.tree
279 * for fast retrieval via a binary search in
280 * i915_vma_instance(). They are also added to @vma.list for
281 * easy iteration.
282 */
283 struct rb_root tree;
284 } vma;
285
286 /**
287 * @lut_list: List of vma lookup entries in use for this object.
288 *
289 * If this object is closed, we need to remove all of its VMA from
290 * the fast lookup index in associated contexts; @lut_list provides
291 * this translation from object to context->handles_vma.
292 */
293 struct list_head lut_list;
294 spinlock_t lut_lock; /* guards lut_list */
295
296 /**
297 * @obj_link: Link into @i915_gem_ww_ctx.obj_list
298 *
299 * When we lock this object through i915_gem_object_lock() with a
300 * context, we add it to the list to ensure we can unlock everything
301 * when i915_gem_ww_ctx_backoff() or i915_gem_ww_ctx_fini() are called.
302 */
303 struct list_head obj_link;
304 /**
305 * @shared_resv_from: The object shares the resv from this vm.
306 */
307 struct i915_address_space *shares_resv_from;
308
309 union {
310 struct rcu_head rcu;
311 struct llist_node freed;
312 };
313
314 /**
315 * Whether the object is currently in the GGTT or any other supported
316 * fake offset mmap backed by lmem.
317 */
318 unsigned int userfault_count;
319 struct list_head userfault_link;
320
321 struct {
322 spinlock_t lock; /* Protects access to mmo offsets */
323 struct rb_root offsets;
324 } mmo;
325
326 I915_SELFTEST_DECLARE(struct list_head st_link);
327
328 unsigned long flags;
329 #define I915_BO_ALLOC_CONTIGUOUS BIT(0)
330 #define I915_BO_ALLOC_VOLATILE BIT(1)
331 #define I915_BO_ALLOC_CPU_CLEAR BIT(2)
332 #define I915_BO_ALLOC_USER BIT(3)
333 /* Object is allowed to lose its contents on suspend / resume, even if pinned */
334 #define I915_BO_ALLOC_PM_VOLATILE BIT(4)
335 /* Object needs to be restored early using memcpy during resume */
336 #define I915_BO_ALLOC_PM_EARLY BIT(5)
337 /*
338 * Object is likely never accessed by the CPU. This will prioritise the BO to be
339 * allocated in the non-mappable portion of lmem. This is merely a hint, and if
340 * dealing with userspace objects the CPU fault handler is free to ignore this.
341 */
342 #define I915_BO_ALLOC_GPU_ONLY BIT(6)
343 #define I915_BO_ALLOC_CCS_AUX BIT(7)
344 /*
345 * Object is allowed to retain its initial data and will not be cleared on first
346 * access if used along with I915_BO_ALLOC_USER. This is mainly to keep
347 * preallocated framebuffer data intact while transitioning it to i915drmfb.
348 */
349 #define I915_BO_PREALLOC BIT(8)
350 #define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \
351 I915_BO_ALLOC_VOLATILE | \
352 I915_BO_ALLOC_CPU_CLEAR | \
353 I915_BO_ALLOC_USER | \
354 I915_BO_ALLOC_PM_VOLATILE | \
355 I915_BO_ALLOC_PM_EARLY | \
356 I915_BO_ALLOC_GPU_ONLY | \
357 I915_BO_ALLOC_CCS_AUX | \
358 I915_BO_PREALLOC)
359 #define I915_BO_READONLY BIT(9)
360 #define I915_TILING_QUIRK_BIT 10 /* unknown swizzling; do not release! */
361 #define I915_BO_PROTECTED BIT(11)
362 /**
363 * @mem_flags - Mutable placement-related flags
364 *
365 * These are flags that indicate specifics of the memory region
366 * the object is currently in. As such they are only stable
367 * either under the object lock or if the object is pinned.
368 */
369 unsigned int mem_flags;
370 #define I915_BO_FLAG_STRUCT_PAGE BIT(0) /* Object backed by struct pages */
371 #define I915_BO_FLAG_IOMEM BIT(1) /* Object backed by IO memory */
372 /**
373 * @pat_index: The desired PAT index.
374 *
375 * See hardware specification for valid PAT indices for each platform.
376 * This field replaces the @cache_level that contains a value of enum
377 * i915_cache_level since PAT indices are being used by both userspace
378 * and kernel mode driver for caching policy control after GEN12.
379 * In the meantime platform specific tables are created to translate
380 * i915_cache_level into pat index, for more details check the macros
381 * defined i915/i915_pci.c, e.g. PVC_CACHELEVEL.
382 * For backward compatibility, this field contains values exactly match
383 * the entries of enum i915_cache_level for pre-GEN12 platforms (See
384 * LEGACY_CACHELEVEL), so that the PTE encode functions for these
385 * legacy platforms can stay the same.
386 */
387 unsigned int pat_index:6;
388 /**
389 * @pat_set_by_user: Indicate whether pat_index is set by user space
390 *
391 * This field is set to false by default, only set to true if the
392 * pat_index is set by user space. By design, user space is capable of
393 * managing caching behavior by setting pat_index, in which case this
394 * kernel mode driver should never touch the pat_index.
395 */
396 unsigned int pat_set_by_user:1;
397 /**
398 * @cache_coherent:
399 *
400 * Note: with the change above which replaced @cache_level with pat_index,
401 * the use of @cache_coherent is limited to the objects created by kernel
402 * or by userspace without pat index specified.
403 * Check for @pat_set_by_user to find out if an object has pat index set
404 * by userspace. The ioctl's to change cache settings have also been
405 * disabled for the objects with pat index set by userspace. Please don't
406 * assume @cache_coherent having the flags set as describe here. A helper
407 * function i915_gem_object_has_cache_level() provides one way to bypass
408 * the use of this field.
409 *
410 * Track whether the pages are coherent with the GPU if reading or
411 * writing through the CPU caches. The largely depends on the
412 * @cache_level setting.
413 *
414 * On platforms which don't have the shared LLC(HAS_SNOOP), like on Atom
415 * platforms, coherency must be explicitly requested with some special
416 * GTT caching bits(see enum i915_cache_level). When enabling coherency
417 * it does come at a performance and power cost on such platforms. On
418 * the flip side the kernel does not need to manually flush any buffers
419 * which need to be coherent with the GPU, if the object is not coherent
420 * i.e @cache_coherent is zero.
421 *
422 * On platforms that share the LLC with the CPU(HAS_LLC), all GT memory
423 * access will automatically snoop the CPU caches(even with CACHE_NONE).
424 * The one exception is when dealing with the display engine, like with
425 * scanout surfaces. To handle this the kernel will always flush the
426 * surface out of the CPU caches when preparing it for scanout. Also
427 * note that since scanout surfaces are only ever read by the display
428 * engine we only need to care about flushing any writes through the CPU
429 * cache, reads on the other hand will always be coherent.
430 *
431 * Something strange here is why @cache_coherent is not a simple
432 * boolean, i.e coherent vs non-coherent. The reasoning for this is back
433 * to the display engine not being fully coherent. As a result scanout
434 * surfaces will either be marked as I915_CACHE_NONE or I915_CACHE_WT.
435 * In the case of seeing I915_CACHE_NONE the kernel makes the assumption
436 * that this is likely a scanout surface, and will set @cache_coherent
437 * as only I915_BO_CACHE_COHERENT_FOR_READ, on platforms with the shared
438 * LLC. The kernel uses this to always flush writes through the CPU
439 * cache as early as possible, where it can, in effect keeping
440 * @cache_dirty clean, so we can potentially avoid stalling when
441 * flushing the surface just before doing the scanout. This does mean
442 * we might unnecessarily flush non-scanout objects in some places, but
443 * the default assumption is that all normal objects should be using
444 * I915_CACHE_LLC, at least on platforms with the shared LLC.
445 *
446 * Supported values:
447 *
448 * I915_BO_CACHE_COHERENT_FOR_READ:
449 *
450 * On shared LLC platforms, we use this for special scanout surfaces,
451 * where the display engine is not coherent with the CPU cache. As such
452 * we need to ensure we flush any writes before doing the scanout. As an
453 * optimisation we try to flush any writes as early as possible to avoid
454 * stalling later.
455 *
456 * Thus for scanout surfaces using I915_CACHE_NONE, on shared LLC
457 * platforms, we use:
458 *
459 * cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ
460 *
461 * While for normal objects that are fully coherent, including special
462 * scanout surfaces marked as I915_CACHE_WT, we use:
463 *
464 * cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ |
465 * I915_BO_CACHE_COHERENT_FOR_WRITE
466 *
467 * And then for objects that are not coherent at all we use:
468 *
469 * cache_coherent = 0
470 *
471 * I915_BO_CACHE_COHERENT_FOR_WRITE:
472 *
473 * When writing through the CPU cache, the GPU is still coherent. Note
474 * that this also implies I915_BO_CACHE_COHERENT_FOR_READ.
475 */
476 #define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
477 #define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
478 unsigned int cache_coherent:2;
479
480 /**
481 * @cache_dirty:
482 *
483 * Note: with the change above which replaced cache_level with pat_index,
484 * the use of @cache_dirty is limited to the objects created by kernel
485 * or by userspace without pat index specified.
486 * Check for @pat_set_by_user to find out if an object has pat index set
487 * by userspace. The ioctl's to change cache settings have also been
488 * disabled for the objects with pat_index set by userspace. Please don't
489 * assume @cache_dirty is set as describe here. Also see helper function
490 * i915_gem_object_has_cache_level() for possible ways to bypass the use
491 * of this field.
492 *
493 * Track if we are we dirty with writes through the CPU cache for this
494 * object. As a result reading directly from main memory might yield
495 * stale data.
496 *
497 * This also ties into whether the kernel is tracking the object as
498 * coherent with the GPU, as per @cache_coherent, as it determines if
499 * flushing might be needed at various points.
500 *
501 * Another part of @cache_dirty is managing flushing when first
502 * acquiring the pages for system memory, at this point the pages are
503 * considered foreign, so the default assumption is that the cache is
504 * dirty, for example the page zeroing done by the kernel might leave
505 * writes though the CPU cache, or swapping-in, while the actual data in
506 * main memory is potentially stale. Note that this is a potential
507 * security issue when dealing with userspace objects and zeroing. Now,
508 * whether we actually need apply the big sledgehammer of flushing all
509 * the pages on acquire depends on if @cache_coherent is marked as
510 * I915_BO_CACHE_COHERENT_FOR_WRITE, i.e that the GPU will be coherent
511 * for both reads and writes though the CPU cache.
512 *
513 * Note that on shared LLC platforms we still apply the heavy flush for
514 * I915_CACHE_NONE objects, under the assumption that this is going to
515 * be used for scanout.
516 *
517 * Update: On some hardware there is now also the 'Bypass LLC' MOCS
518 * entry, which defeats our @cache_coherent tracking, since userspace
519 * can freely bypass the CPU cache when touching the pages with the GPU,
520 * where the kernel is completely unaware. On such platform we need
521 * apply the sledgehammer-on-acquire regardless of the @cache_coherent.
522 *
523 * Special care is taken on non-LLC platforms, to prevent potential
524 * information leak. The driver currently ensures:
525 *
526 * 1. All userspace objects, by default, have @cache_level set as
527 * I915_CACHE_NONE. The only exception is userptr objects, where we
528 * instead force I915_CACHE_LLC, but we also don't allow userspace to
529 * ever change the @cache_level for such objects. Another special case
530 * is dma-buf, which doesn't rely on @cache_dirty, but there we
531 * always do a forced flush when acquiring the pages, if there is a
532 * chance that the pages can be read directly from main memory with
533 * the GPU.
534 *
535 * 2. All I915_CACHE_NONE objects have @cache_dirty initially true.
536 *
537 * 3. All swapped-out objects(i.e shmem) have @cache_dirty set to
538 * true.
539 *
540 * 4. The @cache_dirty is never freely reset before the initial
541 * flush, even if userspace adjusts the @cache_level through the
542 * i915_gem_set_caching_ioctl.
543 *
544 * 5. All @cache_dirty objects(including swapped-in) are initially
545 * flushed with a synchronous call to drm_clflush_sg in
546 * __i915_gem_object_set_pages. The @cache_dirty can be freely reset
547 * at this point. All further asynchronous clfushes are never security
548 * critical, i.e userspace is free to race against itself.
549 */
550 unsigned int cache_dirty:1;
551
552 /* @is_dpt: Object houses a display page table (DPT) */
553 unsigned int is_dpt:1;
554
555 /**
556 * @read_domains: Read memory domains.
557 *
558 * These monitor which caches contain read/write data related to the
559 * object. When transitioning from one set of domains to another,
560 * the driver is called to ensure that caches are suitably flushed and
561 * invalidated.
562 */
563 u16 read_domains;
564
565 /**
566 * @write_domain: Corresponding unique write memory domain.
567 */
568 u16 write_domain;
569
570 struct intel_frontbuffer __rcu *frontbuffer;
571
572 /** Current tiling stride for the object, if it's tiled. */
573 unsigned int tiling_and_stride;
574 #define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
575 #define TILING_MASK (FENCE_MINIMUM_STRIDE - 1)
576 #define STRIDE_MASK (~TILING_MASK)
577
578 struct {
579 /*
580 * Protects the pages and their use. Do not use directly, but
581 * instead go through the pin/unpin interfaces.
582 */
583 atomic_t pages_pin_count;
584
585 /**
586 * @shrink_pin: Prevents the pages from being made visible to
587 * the shrinker, while the shrink_pin is non-zero. Most users
588 * should pretty much never have to care about this, outside of
589 * some special use cases.
590 *
591 * By default most objects will start out as visible to the
592 * shrinker(if I915_GEM_OBJECT_IS_SHRINKABLE) as soon as the
593 * backing pages are attached to the object, like in
594 * __i915_gem_object_set_pages(). They will then be removed the
595 * shrinker list once the pages are released.
596 *
597 * The @shrink_pin is incremented by calling
598 * i915_gem_object_make_unshrinkable(), which will also remove
599 * the object from the shrinker list, if the pin count was zero.
600 *
601 * Callers will then typically call
602 * i915_gem_object_make_shrinkable() or
603 * i915_gem_object_make_purgeable() to decrement the pin count,
604 * and make the pages visible again.
605 */
606 atomic_t shrink_pin;
607
608 /**
609 * @ttm_shrinkable: True when the object is using shmem pages
610 * underneath. Protected by the object lock.
611 */
612 bool ttm_shrinkable;
613
614 /**
615 * @unknown_state: Indicate that the object is effectively
616 * borked. This is write-once and set if we somehow encounter a
617 * fatal error when moving/clearing the pages, and we are not
618 * able to fallback to memcpy/memset, like on small-BAR systems.
619 * The GPU should also be wedged (or in the process) at this
620 * point.
621 *
622 * Only valid to read this after acquiring the dma-resv lock and
623 * waiting for all DMA_RESV_USAGE_KERNEL fences to be signalled,
624 * or if we otherwise know that the moving fence has signalled,
625 * and we are certain the pages underneath are valid for
626 * immediate access (under normal operation), like just prior to
627 * binding the object or when setting up the CPU fault handler.
628 * See i915_gem_object_has_unknown_state();
629 */
630 bool unknown_state;
631
632 /**
633 * Priority list of potential placements for this object.
634 */
635 struct intel_memory_region **placements;
636 int n_placements;
637
638 /**
639 * Memory region for this object.
640 */
641 struct intel_memory_region *region;
642
643 /**
644 * Memory manager resource allocated for this object. Only
645 * needed for the mock region.
646 */
647 struct ttm_resource *res;
648
649 /**
650 * Element within memory_region->objects or region->purgeable
651 * if the object is marked as DONTNEED. Access is protected by
652 * region->obj_lock.
653 */
654 struct list_head region_link;
655
656 struct i915_refct_sgt *rsgt;
657 struct sg_table *pages;
658 void *mapping;
659
660 struct i915_page_sizes page_sizes;
661
662 I915_SELFTEST_DECLARE(unsigned int page_mask);
663
664 struct i915_gem_object_page_iter get_page;
665 struct i915_gem_object_page_iter get_dma_page;
666
667 /**
668 * Element within i915->mm.shrink_list or i915->mm.purge_list,
669 * locked by i915->mm.obj_lock.
670 */
671 struct list_head link;
672
673 /**
674 * Advice: are the backing pages purgeable?
675 */
676 unsigned int madv:2;
677
678 /**
679 * This is set if the object has been written to since the
680 * pages were last acquired.
681 */
682 bool dirty:1;
683
684 u32 tlb[I915_MAX_GT];
685 } mm;
686
687 struct {
688 struct i915_refct_sgt *cached_io_rsgt;
689 struct i915_gem_object_page_iter get_io_page;
690 struct drm_i915_gem_object *backup;
691 bool created:1;
692 } ttm;
693
694 /*
695 * Record which PXP key instance this object was created against (if
696 * any), so we can use it to determine if the encryption is valid by
697 * comparing against the current key instance.
698 */
699 u32 pxp_key_instance;
700
701 /** Record of address bit 17 of each page at last unbind. */
702 unsigned long *bit_17;
703
704 union {
705 #ifdef CONFIG_MMU_NOTIFIER
706 struct i915_gem_userptr {
707 uintptr_t ptr;
708 unsigned long notifier_seq;
709
710 struct mmu_interval_notifier notifier;
711 struct page **pvec;
712 int page_ref;
713 } userptr;
714 #endif
715
716 struct drm_mm_node *stolen;
717
718 resource_size_t bo_offset;
719
720 unsigned long scratch;
721 u64 encode;
722
723 void *gvt_info;
724 };
725 };
726
727 #define intel_bo_to_drm_bo(bo) (&(bo)->base)
728 #define intel_bo_to_i915(bo) to_i915(intel_bo_to_drm_bo(bo)->dev)
729
730 static inline struct drm_i915_gem_object *
to_intel_bo(struct drm_gem_object * gem)731 to_intel_bo(struct drm_gem_object *gem)
732 {
733 /* Assert that to_intel_bo(NULL) == NULL */
734 BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
735
736 return container_of(gem, struct drm_i915_gem_object, base);
737 }
738
739 #endif
740