1 /* 2 * Copyright © 2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #ifndef __I915_GEM_OBJECT_H__ 26 #define __I915_GEM_OBJECT_H__ 27 28 #include <linux/reservation.h> 29 30 #include <drm/drm_vma_manager.h> 31 #include <drm/drm_gem.h> 32 #include <drm/drmP.h> 33 34 #include <drm/i915_drm.h> 35 36 struct drm_i915_gem_object_ops { 37 unsigned int flags; 38 #define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1 39 #define I915_GEM_OBJECT_IS_SHRINKABLE 0x2 40 41 /* Interface between the GEM object and its backing storage. 42 * get_pages() is called once prior to the use of the associated set 43 * of pages before to binding them into the GTT, and put_pages() is 44 * called after we no longer need them. As we expect there to be 45 * associated cost with migrating pages between the backing storage 46 * and making them available for the GPU (e.g. clflush), we may hold 47 * onto the pages after they are no longer referenced by the GPU 48 * in case they may be used again shortly (for example migrating the 49 * pages to a different memory domain within the GTT). put_pages() 50 * will therefore most likely be called when the object itself is 51 * being released or under memory pressure (where we attempt to 52 * reap pages for the shrinker). 53 */ 54 struct sg_table *(*get_pages)(struct drm_i915_gem_object *); 55 void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *); 56 57 int (*dmabuf_export)(struct drm_i915_gem_object *); 58 void (*release)(struct drm_i915_gem_object *); 59 }; 60 61 struct drm_i915_gem_object { 62 struct drm_gem_object base; 63 64 const struct drm_i915_gem_object_ops *ops; 65 66 /** List of VMAs backed by this object */ 67 struct list_head vma_list; 68 struct rb_root vma_tree; 69 70 /** Stolen memory for this object, instead of being backed by shmem. */ 71 struct drm_mm_node *stolen; 72 struct list_head global_link; 73 union { 74 struct rcu_head rcu; 75 struct llist_node freed; 76 }; 77 78 /** 79 * Whether the object is currently in the GGTT mmap. 80 */ 81 struct list_head userfault_link; 82 83 /** Used in execbuf to temporarily hold a ref */ 84 struct list_head obj_exec_link; 85 86 struct list_head batch_pool_link; 87 88 unsigned long flags; 89 90 /** 91 * Have we taken a reference for the object for incomplete GPU 92 * activity? 93 */ 94 #define I915_BO_ACTIVE_REF 0 95 96 /* 97 * Is the object to be mapped as read-only to the GPU 98 * Only honoured if hardware has relevant pte bit 99 */ 100 unsigned long gt_ro:1; 101 unsigned int cache_level:3; 102 unsigned int cache_dirty:1; 103 104 atomic_t frontbuffer_bits; 105 unsigned int frontbuffer_ggtt_origin; /* write once */ 106 struct i915_gem_active frontbuffer_write; 107 108 /** Current tiling stride for the object, if it's tiled. */ 109 unsigned int tiling_and_stride; 110 #define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */ 111 #define TILING_MASK (FENCE_MINIMUM_STRIDE-1) 112 #define STRIDE_MASK (~TILING_MASK) 113 114 /** Count of VMA actually bound by this object */ 115 unsigned int bind_count; 116 unsigned int active_count; 117 unsigned int pin_display; 118 119 struct { 120 struct lock lock; /* protects the pages and their use */ 121 atomic_t pages_pin_count; 122 123 struct sg_table *pages; 124 void *mapping; 125 126 struct i915_gem_object_page_iter { 127 struct scatterlist *sg_pos; 128 unsigned int sg_idx; /* in pages, but 32bit eek! */ 129 130 struct radix_tree_root radix; 131 struct lock lock; /* protects this cache */ 132 } get_page; 133 134 /** 135 * Advice: are the backing pages purgeable? 136 */ 137 unsigned int madv:2; 138 139 /** 140 * This is set if the object has been written to since the 141 * pages were last acquired. 142 */ 143 bool dirty:1; 144 145 /** 146 * This is set if the object has been pinned due to unknown 147 * swizzling. 148 */ 149 bool quirked:1; 150 } mm; 151 152 /** Breadcrumb of last rendering to the buffer. 153 * There can only be one writer, but we allow for multiple readers. 154 * If there is a writer that necessarily implies that all other 155 * read requests are complete - but we may only be lazily clearing 156 * the read requests. A read request is naturally the most recent 157 * request on a ring, so we may have two different write and read 158 * requests on one ring where the write request is older than the 159 * read request. This allows for the CPU to read from an active 160 * buffer by only waiting for the write to complete. 161 */ 162 struct reservation_object *resv; 163 164 /** References from framebuffers, locks out tiling changes. */ 165 unsigned long framebuffer_references; 166 167 /** Record of address bit 17 of each page at last unbind. */ 168 unsigned long *bit_17; 169 170 struct i915_gem_userptr { 171 uintptr_t ptr; 172 unsigned read_only :1; 173 174 struct i915_mm_struct *mm; 175 struct i915_mmu_object *mmu_object; 176 struct work_struct *work; 177 } userptr; 178 179 /** for phys allocated objects */ 180 struct drm_dma_handle *phys_handle; 181 182 struct reservation_object __builtin_resv; 183 }; 184 185 static inline struct drm_i915_gem_object * 186 to_intel_bo(struct drm_gem_object *gem) 187 { 188 /* Assert that to_intel_bo(NULL) == NULL */ 189 BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base)); 190 191 return container_of(gem, struct drm_i915_gem_object, base); 192 } 193 194 /** 195 * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle 196 * @filp: DRM file private date 197 * @handle: userspace handle 198 * 199 * Returns: 200 * 201 * A pointer to the object named by the handle if such exists on @filp, NULL 202 * otherwise. This object is only valid whilst under the RCU read lock, and 203 * note carefully the object may be in the process of being destroyed. 204 */ 205 static inline struct drm_i915_gem_object * 206 i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle) 207 { 208 #ifdef CONFIG_LOCKDEP 209 WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map)); 210 #endif 211 return idr_find(&file->object_idr, handle); 212 } 213 214 static inline struct drm_i915_gem_object * 215 i915_gem_object_lookup(struct drm_file *file, u32 handle) 216 { 217 struct drm_i915_gem_object *obj; 218 219 rcu_read_lock(); 220 obj = i915_gem_object_lookup_rcu(file, handle); 221 if (obj && !kref_get_unless_zero(&obj->base.refcount)) 222 obj = NULL; 223 rcu_read_unlock(); 224 225 return obj; 226 } 227 228 __attribute__((nonnull)) 229 static inline struct drm_i915_gem_object * 230 i915_gem_object_get(struct drm_i915_gem_object *obj) 231 { 232 drm_gem_object_reference(&obj->base); 233 return obj; 234 } 235 236 __attribute__((nonnull)) 237 static inline void 238 i915_gem_object_put(struct drm_i915_gem_object *obj) 239 { 240 __drm_gem_object_unreference(&obj->base); 241 } 242 243 static inline bool 244 i915_gem_object_is_dead(const struct drm_i915_gem_object *obj) 245 { 246 return atomic_read(&obj->base.refcount.refcount) == 0; 247 } 248 249 static inline bool 250 i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj) 251 { 252 return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE; 253 } 254 255 static inline bool 256 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj) 257 { 258 return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE; 259 } 260 261 static inline bool 262 i915_gem_object_is_active(const struct drm_i915_gem_object *obj) 263 { 264 return obj->active_count; 265 } 266 267 static inline bool 268 i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj) 269 { 270 return test_bit(I915_BO_ACTIVE_REF, &obj->flags); 271 } 272 273 static inline void 274 i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj) 275 { 276 lockdep_assert_held(&obj->base.dev->struct_mutex); 277 __set_bit(I915_BO_ACTIVE_REF, &obj->flags); 278 } 279 280 static inline void 281 i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj) 282 { 283 lockdep_assert_held(&obj->base.dev->struct_mutex); 284 __clear_bit(I915_BO_ACTIVE_REF, &obj->flags); 285 } 286 287 void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj); 288 289 static inline unsigned int 290 i915_gem_object_get_tiling(struct drm_i915_gem_object *obj) 291 { 292 return obj->tiling_and_stride & TILING_MASK; 293 } 294 295 static inline bool 296 i915_gem_object_is_tiled(struct drm_i915_gem_object *obj) 297 { 298 return i915_gem_object_get_tiling(obj) != I915_TILING_NONE; 299 } 300 301 static inline unsigned int 302 i915_gem_object_get_stride(struct drm_i915_gem_object *obj) 303 { 304 return obj->tiling_and_stride & STRIDE_MASK; 305 } 306 307 static inline struct intel_engine_cs * 308 i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj) 309 { 310 struct intel_engine_cs *engine = NULL; 311 struct dma_fence *fence; 312 313 rcu_read_lock(); 314 fence = reservation_object_get_excl_rcu(obj->resv); 315 rcu_read_unlock(); 316 317 if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence)) 318 engine = to_request(fence)->engine; 319 dma_fence_put(fence); 320 321 return engine; 322 } 323 324 #endif 325 326