15e5d2e20SChris Wilson /*
25e5d2e20SChris Wilson  * SPDX-License-Identifier: MIT
35e5d2e20SChris Wilson  *
45e5d2e20SChris Wilson  * Copyright © 2016 Intel Corporation
55e5d2e20SChris Wilson  */
65e5d2e20SChris Wilson 
75e5d2e20SChris Wilson #ifndef __I915_GEM_OBJECT_TYPES_H__
85e5d2e20SChris Wilson #define __I915_GEM_OBJECT_TYPES_H__
95e5d2e20SChris Wilson 
105e5d2e20SChris Wilson #include <drm/drm_gem.h>
11b1e3177bSChris Wilson #include <uapi/drm/i915_drm.h>
125e5d2e20SChris Wilson 
135e5d2e20SChris Wilson #include "i915_active.h"
145e5d2e20SChris Wilson #include "i915_selftest.h"
155e5d2e20SChris Wilson 
165e5d2e20SChris Wilson struct drm_i915_gem_object;
178e7cb179SChris Wilson struct intel_fronbuffer;
185e5d2e20SChris Wilson 
195e5d2e20SChris Wilson /*
205e5d2e20SChris Wilson  * struct i915_lut_handle tracks the fast lookups from handle to vma used
215e5d2e20SChris Wilson  * for execbuf. Although we use a radixtree for that mapping, in order to
225e5d2e20SChris Wilson  * remove them as the object or context is closed, we need a secondary list
235e5d2e20SChris Wilson  * and a translation entry (i915_lut_handle).
245e5d2e20SChris Wilson  */
255e5d2e20SChris Wilson struct i915_lut_handle {
265e5d2e20SChris Wilson 	struct list_head obj_link;
275e5d2e20SChris Wilson 	struct i915_gem_context *ctx;
285e5d2e20SChris Wilson 	u32 handle;
295e5d2e20SChris Wilson };
305e5d2e20SChris Wilson 
315e5d2e20SChris Wilson struct drm_i915_gem_object_ops {
325e5d2e20SChris Wilson 	unsigned int flags;
3301377a0dSAbdiel Janulgue #define I915_GEM_OBJECT_HAS_IOMEM	BIT(1)
3401377a0dSAbdiel Janulgue #define I915_GEM_OBJECT_IS_SHRINKABLE	BIT(2)
3501377a0dSAbdiel Janulgue #define I915_GEM_OBJECT_IS_PROXY	BIT(3)
36f6c26b55SJanusz Krzysztofik #define I915_GEM_OBJECT_NO_MMAP		BIT(4)
3701377a0dSAbdiel Janulgue #define I915_GEM_OBJECT_ASYNC_CANCEL	BIT(5)
385e5d2e20SChris Wilson 
395e5d2e20SChris Wilson 	/* Interface between the GEM object and its backing storage.
405e5d2e20SChris Wilson 	 * get_pages() is called once prior to the use of the associated set
415e5d2e20SChris Wilson 	 * of pages before to binding them into the GTT, and put_pages() is
425e5d2e20SChris Wilson 	 * called after we no longer need them. As we expect there to be
435e5d2e20SChris Wilson 	 * associated cost with migrating pages between the backing storage
445e5d2e20SChris Wilson 	 * and making them available for the GPU (e.g. clflush), we may hold
455e5d2e20SChris Wilson 	 * onto the pages after they are no longer referenced by the GPU
465e5d2e20SChris Wilson 	 * in case they may be used again shortly (for example migrating the
475e5d2e20SChris Wilson 	 * pages to a different memory domain within the GTT). put_pages()
485e5d2e20SChris Wilson 	 * will therefore most likely be called when the object itself is
495e5d2e20SChris Wilson 	 * being released or under memory pressure (where we attempt to
505e5d2e20SChris Wilson 	 * reap pages for the shrinker).
515e5d2e20SChris Wilson 	 */
525e5d2e20SChris Wilson 	int (*get_pages)(struct drm_i915_gem_object *obj);
535e5d2e20SChris Wilson 	void (*put_pages)(struct drm_i915_gem_object *obj,
545e5d2e20SChris Wilson 			  struct sg_table *pages);
55f033428dSChris Wilson 	void (*truncate)(struct drm_i915_gem_object *obj);
56f033428dSChris Wilson 	void (*writeback)(struct drm_i915_gem_object *obj);
575e5d2e20SChris Wilson 
580049b688SMatthew Auld 	int (*pread)(struct drm_i915_gem_object *obj,
590049b688SMatthew Auld 		     const struct drm_i915_gem_pread *arg);
605e5d2e20SChris Wilson 	int (*pwrite)(struct drm_i915_gem_object *obj,
615e5d2e20SChris Wilson 		      const struct drm_i915_gem_pwrite *arg);
625e5d2e20SChris Wilson 
635e5d2e20SChris Wilson 	int (*dmabuf_export)(struct drm_i915_gem_object *obj);
645e5d2e20SChris Wilson 	void (*release)(struct drm_i915_gem_object *obj);
657d192daaSChris Wilson 
667d192daaSChris Wilson 	const char *name; /* friendly name for debug, e.g. lockdep classes */
675e5d2e20SChris Wilson };
685e5d2e20SChris Wilson 
69e2f4367aSMatthew Auld enum i915_map_type {
70e2f4367aSMatthew Auld 	I915_MAP_WB = 0,
71e2f4367aSMatthew Auld 	I915_MAP_WC,
72e2f4367aSMatthew Auld #define I915_MAP_OVERRIDE BIT(31)
73e2f4367aSMatthew Auld 	I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
74e2f4367aSMatthew Auld 	I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
75e2f4367aSMatthew Auld };
76e2f4367aSMatthew Auld 
77cc662126SAbdiel Janulgue enum i915_mmap_type {
78cc662126SAbdiel Janulgue 	I915_MMAP_TYPE_GTT = 0,
79cc662126SAbdiel Janulgue 	I915_MMAP_TYPE_WC,
80cc662126SAbdiel Janulgue 	I915_MMAP_TYPE_WB,
81cc662126SAbdiel Janulgue 	I915_MMAP_TYPE_UC,
82cc662126SAbdiel Janulgue };
83cc662126SAbdiel Janulgue 
84cc662126SAbdiel Janulgue struct i915_mmap_offset {
85cc662126SAbdiel Janulgue 	struct drm_vma_offset_node vma_node;
86cc662126SAbdiel Janulgue 	struct drm_i915_gem_object *obj;
87cc662126SAbdiel Janulgue 	enum i915_mmap_type mmap_type;
88cc662126SAbdiel Janulgue 
8978655598SChris Wilson 	struct rb_node offset;
90cc662126SAbdiel Janulgue };
91cc662126SAbdiel Janulgue 
92934941edSTvrtko Ursulin struct i915_gem_object_page_iter {
93934941edSTvrtko Ursulin 	struct scatterlist *sg_pos;
94934941edSTvrtko Ursulin 	unsigned int sg_idx; /* in pages, but 32bit eek! */
95934941edSTvrtko Ursulin 
96934941edSTvrtko Ursulin 	struct radix_tree_root radix;
97934941edSTvrtko Ursulin 	struct mutex lock; /* protects this cache */
98934941edSTvrtko Ursulin };
99934941edSTvrtko Ursulin 
1005e5d2e20SChris Wilson struct drm_i915_gem_object {
1015e5d2e20SChris Wilson 	struct drm_gem_object base;
1025e5d2e20SChris Wilson 
1035e5d2e20SChris Wilson 	const struct drm_i915_gem_object_ops *ops;
1045e5d2e20SChris Wilson 
1055e5d2e20SChris Wilson 	struct {
1065e5d2e20SChris Wilson 		/**
1075e5d2e20SChris Wilson 		 * @vma.lock: protect the list/tree of vmas
1085e5d2e20SChris Wilson 		 */
1095e5d2e20SChris Wilson 		spinlock_t lock;
1105e5d2e20SChris Wilson 
1115e5d2e20SChris Wilson 		/**
1125e5d2e20SChris Wilson 		 * @vma.list: List of VMAs backed by this object
1135e5d2e20SChris Wilson 		 *
1145e5d2e20SChris Wilson 		 * The VMA on this list are ordered by type, all GGTT vma are
1155e5d2e20SChris Wilson 		 * placed at the head and all ppGTT vma are placed at the tail.
1165e5d2e20SChris Wilson 		 * The different types of GGTT vma are unordered between
1175e5d2e20SChris Wilson 		 * themselves, use the @vma.tree (which has a defined order
1185e5d2e20SChris Wilson 		 * between all VMA) to quickly find an exact match.
1195e5d2e20SChris Wilson 		 */
1205e5d2e20SChris Wilson 		struct list_head list;
1215e5d2e20SChris Wilson 
1225e5d2e20SChris Wilson 		/**
1235e5d2e20SChris Wilson 		 * @vma.tree: Ordered tree of VMAs backed by this object
1245e5d2e20SChris Wilson 		 *
1255e5d2e20SChris Wilson 		 * All VMA created for this object are placed in the @vma.tree
1265e5d2e20SChris Wilson 		 * for fast retrieval via a binary search in
1275e5d2e20SChris Wilson 		 * i915_vma_instance(). They are also added to @vma.list for
1285e5d2e20SChris Wilson 		 * easy iteration.
1295e5d2e20SChris Wilson 		 */
1305e5d2e20SChris Wilson 		struct rb_root tree;
1315e5d2e20SChris Wilson 	} vma;
1325e5d2e20SChris Wilson 
1335e5d2e20SChris Wilson 	/**
1345e5d2e20SChris Wilson 	 * @lut_list: List of vma lookup entries in use for this object.
1355e5d2e20SChris Wilson 	 *
1365e5d2e20SChris Wilson 	 * If this object is closed, we need to remove all of its VMA from
1375e5d2e20SChris Wilson 	 * the fast lookup index in associated contexts; @lut_list provides
1385e5d2e20SChris Wilson 	 * this translation from object to context->handles_vma.
1395e5d2e20SChris Wilson 	 */
1405e5d2e20SChris Wilson 	struct list_head lut_list;
141096a42ddSChris Wilson 	spinlock_t lut_lock; /* guards lut_list */
1425e5d2e20SChris Wilson 
14380f0b679SMaarten Lankhorst 	/**
14480f0b679SMaarten Lankhorst 	 * @obj_link: Link into @i915_gem_ww_ctx.obj_list
14580f0b679SMaarten Lankhorst 	 *
14680f0b679SMaarten Lankhorst 	 * When we lock this object through i915_gem_object_lock() with a
14780f0b679SMaarten Lankhorst 	 * context, we add it to the list to ensure we can unlock everything
14880f0b679SMaarten Lankhorst 	 * when i915_gem_ww_ctx_backoff() or i915_gem_ww_ctx_fini() are called.
14980f0b679SMaarten Lankhorst 	 */
15080f0b679SMaarten Lankhorst 	struct list_head obj_link;
15180f0b679SMaarten Lankhorst 
1525e5d2e20SChris Wilson 	union {
1535e5d2e20SChris Wilson 		struct rcu_head rcu;
1545e5d2e20SChris Wilson 		struct llist_node freed;
1555e5d2e20SChris Wilson 	};
1565e5d2e20SChris Wilson 
1575e5d2e20SChris Wilson 	/**
1585e5d2e20SChris Wilson 	 * Whether the object is currently in the GGTT mmap.
1595e5d2e20SChris Wilson 	 */
1605e5d2e20SChris Wilson 	unsigned int userfault_count;
1615e5d2e20SChris Wilson 	struct list_head userfault_link;
1625e5d2e20SChris Wilson 
163cc662126SAbdiel Janulgue 	struct {
164cc662126SAbdiel Janulgue 		spinlock_t lock; /* Protects access to mmo offsets */
16578655598SChris Wilson 		struct rb_root offsets;
166cc662126SAbdiel Janulgue 	} mmo;
167cc662126SAbdiel Janulgue 
1685e5d2e20SChris Wilson 	I915_SELFTEST_DECLARE(struct list_head st_link);
1695e5d2e20SChris Wilson 
1702f0b97caSMatthew Auld 	unsigned long flags;
1712f0b97caSMatthew Auld #define I915_BO_ALLOC_CONTIGUOUS BIT(0)
1727c98501aSMatthew Auld #define I915_BO_ALLOC_VOLATILE   BIT(1)
173c471748dSMaarten Lankhorst #define I915_BO_ALLOC_STRUCT_PAGE BIT(2)
174c471748dSMaarten Lankhorst #define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \
175c471748dSMaarten Lankhorst 			     I915_BO_ALLOC_VOLATILE | \
176c471748dSMaarten Lankhorst 			     I915_BO_ALLOC_STRUCT_PAGE)
177c471748dSMaarten Lankhorst #define I915_BO_READONLY         BIT(3)
178c471748dSMaarten Lankhorst #define I915_TILING_QUIRK_BIT    4 /* unknown swizzling; do not release! */
1792f0b97caSMatthew Auld 
1805e5d2e20SChris Wilson 	/*
1815e5d2e20SChris Wilson 	 * Is the object to be mapped as read-only to the GPU
1825e5d2e20SChris Wilson 	 * Only honoured if hardware has relevant pte bit
1835e5d2e20SChris Wilson 	 */
1845e5d2e20SChris Wilson 	unsigned int cache_level:3;
1855e5d2e20SChris Wilson 	unsigned int cache_coherent:2;
1865e5d2e20SChris Wilson #define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
1875e5d2e20SChris Wilson #define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
1885e5d2e20SChris Wilson 	unsigned int cache_dirty:1;
1895e5d2e20SChris Wilson 
1905e5d2e20SChris Wilson 	/**
1915e5d2e20SChris Wilson 	 * @read_domains: Read memory domains.
1925e5d2e20SChris Wilson 	 *
1935e5d2e20SChris Wilson 	 * These monitor which caches contain read/write data related to the
1945e5d2e20SChris Wilson 	 * object. When transitioning from one set of domains to another,
1955e5d2e20SChris Wilson 	 * the driver is called to ensure that caches are suitably flushed and
1965e5d2e20SChris Wilson 	 * invalidated.
1975e5d2e20SChris Wilson 	 */
1985e5d2e20SChris Wilson 	u16 read_domains;
1995e5d2e20SChris Wilson 
2005e5d2e20SChris Wilson 	/**
2015e5d2e20SChris Wilson 	 * @write_domain: Corresponding unique write memory domain.
2025e5d2e20SChris Wilson 	 */
2035e5d2e20SChris Wilson 	u16 write_domain;
2045e5d2e20SChris Wilson 
205da42104fSChris Wilson 	struct intel_frontbuffer __rcu *frontbuffer;
2065e5d2e20SChris Wilson 
2075e5d2e20SChris Wilson 	/** Current tiling stride for the object, if it's tiled. */
2085e5d2e20SChris Wilson 	unsigned int tiling_and_stride;
2095e5d2e20SChris Wilson #define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
2105e5d2e20SChris Wilson #define TILING_MASK (FENCE_MINIMUM_STRIDE - 1)
2115e5d2e20SChris Wilson #define STRIDE_MASK (~TILING_MASK)
2125e5d2e20SChris Wilson 
2135e5d2e20SChris Wilson 	struct {
214f86dbacbSDaniel Vetter 		/*
215f86dbacbSDaniel Vetter 		 * Protects the pages and their use. Do not use directly, but
216f86dbacbSDaniel Vetter 		 * instead go through the pin/unpin interfaces.
217f86dbacbSDaniel Vetter 		 */
218f86dbacbSDaniel Vetter 		struct mutex lock;
2195e5d2e20SChris Wilson 		atomic_t pages_pin_count;
22099013b10SChris Wilson 		atomic_t shrink_pin;
2215e5d2e20SChris Wilson 
222232a6ebaSMatthew Auld 		/**
223232a6ebaSMatthew Auld 		 * Memory region for this object.
224232a6ebaSMatthew Auld 		 */
225232a6ebaSMatthew Auld 		struct intel_memory_region *region;
226232a6ebaSMatthew Auld 		/**
227232a6ebaSMatthew Auld 		 * List of memory region blocks allocated for this object.
228232a6ebaSMatthew Auld 		 */
229232a6ebaSMatthew Auld 		struct list_head blocks;
2307c98501aSMatthew Auld 		/**
2317c98501aSMatthew Auld 		 * Element within memory_region->objects or region->purgeable
2327c98501aSMatthew Auld 		 * if the object is marked as DONTNEED. Access is protected by
2337c98501aSMatthew Auld 		 * region->obj_lock.
2347c98501aSMatthew Auld 		 */
2357c98501aSMatthew Auld 		struct list_head region_link;
236232a6ebaSMatthew Auld 
2375e5d2e20SChris Wilson 		struct sg_table *pages;
2385e5d2e20SChris Wilson 		void *mapping;
2395e5d2e20SChris Wilson 
2405e5d2e20SChris Wilson 		struct i915_page_sizes {
2415e5d2e20SChris Wilson 			/**
2425e5d2e20SChris Wilson 			 * The sg mask of the pages sg_table. i.e the mask of
2435e5d2e20SChris Wilson 			 * of the lengths for each sg entry.
2445e5d2e20SChris Wilson 			 */
2455e5d2e20SChris Wilson 			unsigned int phys;
2465e5d2e20SChris Wilson 
2475e5d2e20SChris Wilson 			/**
2485e5d2e20SChris Wilson 			 * The gtt page sizes we are allowed to use given the
2495e5d2e20SChris Wilson 			 * sg mask and the supported page sizes. This will
2505e5d2e20SChris Wilson 			 * express the smallest unit we can use for the whole
2515e5d2e20SChris Wilson 			 * object, as well as the larger sizes we may be able
2525e5d2e20SChris Wilson 			 * to use opportunistically.
2535e5d2e20SChris Wilson 			 */
2545e5d2e20SChris Wilson 			unsigned int sg;
2555e5d2e20SChris Wilson 
2565e5d2e20SChris Wilson 			/**
2575e5d2e20SChris Wilson 			 * The actual gtt page size usage. Since we can have
2585e5d2e20SChris Wilson 			 * multiple vma associated with this object we need to
2595e5d2e20SChris Wilson 			 * prevent any trampling of state, hence a copy of this
2605e5d2e20SChris Wilson 			 * struct also lives in each vma, therefore the gtt
2615e5d2e20SChris Wilson 			 * value here should only be read/write through the vma.
2625e5d2e20SChris Wilson 			 */
2635e5d2e20SChris Wilson 			unsigned int gtt;
2645e5d2e20SChris Wilson 		} page_sizes;
2655e5d2e20SChris Wilson 
2665e5d2e20SChris Wilson 		I915_SELFTEST_DECLARE(unsigned int page_mask);
2675e5d2e20SChris Wilson 
268934941edSTvrtko Ursulin 		struct i915_gem_object_page_iter get_page;
269934941edSTvrtko Ursulin 		struct i915_gem_object_page_iter get_dma_page;
2705e5d2e20SChris Wilson 
2715e5d2e20SChris Wilson 		/**
2725e5d2e20SChris Wilson 		 * Element within i915->mm.unbound_list or i915->mm.bound_list,
2735e5d2e20SChris Wilson 		 * locked by i915->mm.obj_lock.
2745e5d2e20SChris Wilson 		 */
2755e5d2e20SChris Wilson 		struct list_head link;
2765e5d2e20SChris Wilson 
2775e5d2e20SChris Wilson 		/**
2785e5d2e20SChris Wilson 		 * Advice: are the backing pages purgeable?
2795e5d2e20SChris Wilson 		 */
2805e5d2e20SChris Wilson 		unsigned int madv:2;
2815e5d2e20SChris Wilson 
2825e5d2e20SChris Wilson 		/**
2835e5d2e20SChris Wilson 		 * This is set if the object has been written to since the
2845e5d2e20SChris Wilson 		 * pages were last acquired.
2855e5d2e20SChris Wilson 		 */
2865e5d2e20SChris Wilson 		bool dirty:1;
2875e5d2e20SChris Wilson 	} mm;
2885e5d2e20SChris Wilson 
2895e5d2e20SChris Wilson 	/** Record of address bit 17 of each page at last unbind. */
2905e5d2e20SChris Wilson 	unsigned long *bit_17;
2915e5d2e20SChris Wilson 
2925e5d2e20SChris Wilson 	union {
293*20ee27bdSMaarten Lankhorst #ifdef CONFIG_MMU_NOTIFIER
2945e5d2e20SChris Wilson 		struct i915_gem_userptr {
2955e5d2e20SChris Wilson 			uintptr_t ptr;
2965e5d2e20SChris Wilson 
2975e5d2e20SChris Wilson 			struct i915_mm_struct *mm;
2985e5d2e20SChris Wilson 			struct i915_mmu_object *mmu_object;
2995e5d2e20SChris Wilson 			struct work_struct *work;
3005e5d2e20SChris Wilson 		} userptr;
301*20ee27bdSMaarten Lankhorst #endif
3025e5d2e20SChris Wilson 
30341a9c75dSChris Wilson 		struct drm_mm_node *stolen;
30441a9c75dSChris Wilson 
3055e5d2e20SChris Wilson 		unsigned long scratch;
30689351925SChris Wilson 		u64 encode;
3075e5d2e20SChris Wilson 
3085e5d2e20SChris Wilson 		void *gvt_info;
3095e5d2e20SChris Wilson 	};
3105e5d2e20SChris Wilson };
3115e5d2e20SChris Wilson 
3125e5d2e20SChris Wilson static inline struct drm_i915_gem_object *
3135e5d2e20SChris Wilson to_intel_bo(struct drm_gem_object *gem)
3145e5d2e20SChris Wilson {
3155e5d2e20SChris Wilson 	/* Assert that to_intel_bo(NULL) == NULL */
3165e5d2e20SChris Wilson 	BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
3175e5d2e20SChris Wilson 
3185e5d2e20SChris Wilson 	return container_of(gem, struct drm_i915_gem_object, base);
3195e5d2e20SChris Wilson }
3205e5d2e20SChris Wilson 
3215e5d2e20SChris Wilson #endif
322