1 /* 2 * Copyright © 2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #ifndef __I915_GEM_OBJECT_H__ 26 #define __I915_GEM_OBJECT_H__ 27 28 #include <linux/reservation.h> 29 30 #include <drm/drm_vma_manager.h> 31 #include <drm/drm_gem.h> 32 #include <drm/drmP.h> 33 34 #include <drm/i915_drm.h> 35 36 #include "i915_selftest.h" 37 38 struct drm_i915_gem_object_ops { 39 unsigned int flags; 40 #define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1 41 #define I915_GEM_OBJECT_IS_SHRINKABLE 0x2 42 43 /* Interface between the GEM object and its backing storage. 44 * get_pages() is called once prior to the use of the associated set 45 * of pages before to binding them into the GTT, and put_pages() is 46 * called after we no longer need them. As we expect there to be 47 * associated cost with migrating pages between the backing storage 48 * and making them available for the GPU (e.g. clflush), we may hold 49 * onto the pages after they are no longer referenced by the GPU 50 * in case they may be used again shortly (for example migrating the 51 * pages to a different memory domain within the GTT). put_pages() 52 * will therefore most likely be called when the object itself is 53 * being released or under memory pressure (where we attempt to 54 * reap pages for the shrinker). 55 */ 56 struct sg_table *(*get_pages)(struct drm_i915_gem_object *); 57 void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *); 58 59 int (*pwrite)(struct drm_i915_gem_object *, 60 const struct drm_i915_gem_pwrite *); 61 62 int (*dmabuf_export)(struct drm_i915_gem_object *); 63 void (*release)(struct drm_i915_gem_object *); 64 }; 65 66 struct drm_i915_gem_object { 67 struct drm_gem_object base; 68 69 const struct drm_i915_gem_object_ops *ops; 70 71 /** List of VMAs backed by this object */ 72 struct list_head vma_list; 73 struct rb_root vma_tree; 74 75 /** Stolen memory for this object, instead of being backed by shmem. */ 76 struct drm_mm_node *stolen; 77 struct list_head global_link; 78 union { 79 struct rcu_head rcu; 80 struct llist_node freed; 81 }; 82 83 /** 84 * Whether the object is currently in the GGTT mmap. 85 */ 86 struct list_head userfault_link; 87 88 /** Used in execbuf to temporarily hold a ref */ 89 struct list_head obj_exec_link; 90 91 struct list_head batch_pool_link; 92 I915_SELFTEST_DECLARE(struct list_head st_link); 93 94 unsigned long flags; 95 96 /** 97 * Have we taken a reference for the object for incomplete GPU 98 * activity? 99 */ 100 #define I915_BO_ACTIVE_REF 0 101 102 /* 103 * Is the object to be mapped as read-only to the GPU 104 * Only honoured if hardware has relevant pte bit 105 */ 106 unsigned long gt_ro:1; 107 unsigned int cache_level:3; 108 unsigned int cache_dirty:1; 109 110 atomic_t frontbuffer_bits; 111 unsigned int frontbuffer_ggtt_origin; /* write once */ 112 struct i915_gem_active frontbuffer_write; 113 114 /** Current tiling stride for the object, if it's tiled. */ 115 unsigned int tiling_and_stride; 116 #define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */ 117 #define TILING_MASK (FENCE_MINIMUM_STRIDE-1) 118 #define STRIDE_MASK (~TILING_MASK) 119 120 /** Count of VMA actually bound by this object */ 121 unsigned int bind_count; 122 unsigned int active_count; 123 unsigned int pin_display; 124 125 struct { 126 struct lock lock; /* protects the pages and their use */ 127 atomic_t pages_pin_count; 128 129 struct sg_table *pages; 130 void *mapping; 131 132 struct i915_gem_object_page_iter { 133 struct scatterlist *sg_pos; 134 unsigned int sg_idx; /* in pages, but 32bit eek! */ 135 136 struct radix_tree_root radix; 137 struct lock lock; /* protects this cache */ 138 } get_page; 139 140 /** 141 * Advice: are the backing pages purgeable? 142 */ 143 unsigned int madv:2; 144 145 /** 146 * This is set if the object has been written to since the 147 * pages were last acquired. 148 */ 149 bool dirty:1; 150 151 /** 152 * This is set if the object has been pinned due to unknown 153 * swizzling. 154 */ 155 bool quirked:1; 156 } mm; 157 158 /** Breadcrumb of last rendering to the buffer. 159 * There can only be one writer, but we allow for multiple readers. 160 * If there is a writer that necessarily implies that all other 161 * read requests are complete - but we may only be lazily clearing 162 * the read requests. A read request is naturally the most recent 163 * request on a ring, so we may have two different write and read 164 * requests on one ring where the write request is older than the 165 * read request. This allows for the CPU to read from an active 166 * buffer by only waiting for the write to complete. 167 */ 168 struct reservation_object *resv; 169 170 /** References from framebuffers, locks out tiling changes. */ 171 unsigned int framebuffer_references; 172 173 /** Record of address bit 17 of each page at last unbind. */ 174 unsigned long *bit_17; 175 176 union { 177 struct i915_gem_userptr { 178 uintptr_t ptr; 179 unsigned read_only :1; 180 181 struct i915_mm_struct *mm; 182 struct i915_mmu_object *mmu_object; 183 struct work_struct *work; 184 } userptr; 185 186 unsigned long scratch; 187 }; 188 189 /** for phys allocated objects */ 190 struct drm_dma_handle *phys_handle; 191 192 struct reservation_object __builtin_resv; 193 }; 194 195 static inline struct drm_i915_gem_object * 196 to_intel_bo(struct drm_gem_object *gem) 197 { 198 /* Assert that to_intel_bo(NULL) == NULL */ 199 BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base)); 200 201 return container_of(gem, struct drm_i915_gem_object, base); 202 } 203 204 /** 205 * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle 206 * @filp: DRM file private date 207 * @handle: userspace handle 208 * 209 * Returns: 210 * 211 * A pointer to the object named by the handle if such exists on @filp, NULL 212 * otherwise. This object is only valid whilst under the RCU read lock, and 213 * note carefully the object may be in the process of being destroyed. 214 */ 215 static inline struct drm_i915_gem_object * 216 i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle) 217 { 218 #ifdef CONFIG_LOCKDEP 219 WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map)); 220 #endif 221 return idr_find(&file->object_idr, handle); 222 } 223 224 static inline struct drm_i915_gem_object * 225 i915_gem_object_lookup(struct drm_file *file, u32 handle) 226 { 227 struct drm_i915_gem_object *obj; 228 229 rcu_read_lock(); 230 obj = i915_gem_object_lookup_rcu(file, handle); 231 if (obj && !kref_get_unless_zero(&obj->base.refcount)) 232 obj = NULL; 233 rcu_read_unlock(); 234 235 return obj; 236 } 237 238 __attribute__((nonnull)) 239 static inline struct drm_i915_gem_object * 240 i915_gem_object_get(struct drm_i915_gem_object *obj) 241 { 242 drm_gem_object_reference(&obj->base); 243 return obj; 244 } 245 246 __attribute__((nonnull)) 247 static inline void 248 i915_gem_object_put(struct drm_i915_gem_object *obj) 249 { 250 __drm_gem_object_unreference(&obj->base); 251 } 252 253 static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj) 254 { 255 reservation_object_lock(obj->resv, NULL); 256 } 257 258 static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj) 259 { 260 reservation_object_unlock(obj->resv); 261 } 262 263 static inline bool 264 i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj) 265 { 266 return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE; 267 } 268 269 static inline bool 270 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj) 271 { 272 return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE; 273 } 274 275 static inline bool 276 i915_gem_object_is_active(const struct drm_i915_gem_object *obj) 277 { 278 return obj->active_count; 279 } 280 281 static inline bool 282 i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj) 283 { 284 return test_bit(I915_BO_ACTIVE_REF, &obj->flags); 285 } 286 287 static inline void 288 i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj) 289 { 290 lockdep_assert_held(&obj->base.dev->struct_mutex); 291 __set_bit(I915_BO_ACTIVE_REF, &obj->flags); 292 } 293 294 static inline void 295 i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj) 296 { 297 lockdep_assert_held(&obj->base.dev->struct_mutex); 298 __clear_bit(I915_BO_ACTIVE_REF, &obj->flags); 299 } 300 301 void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj); 302 303 static inline bool 304 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj) 305 { 306 return READ_ONCE(obj->framebuffer_references); 307 } 308 309 static inline unsigned int 310 i915_gem_object_get_tiling(struct drm_i915_gem_object *obj) 311 { 312 return obj->tiling_and_stride & TILING_MASK; 313 } 314 315 static inline bool 316 i915_gem_object_is_tiled(struct drm_i915_gem_object *obj) 317 { 318 return i915_gem_object_get_tiling(obj) != I915_TILING_NONE; 319 } 320 321 static inline unsigned int 322 i915_gem_object_get_stride(struct drm_i915_gem_object *obj) 323 { 324 return obj->tiling_and_stride & STRIDE_MASK; 325 } 326 327 static inline unsigned int 328 i915_gem_tile_height(unsigned int tiling) 329 { 330 GEM_BUG_ON(!tiling); 331 return tiling == I915_TILING_Y ? 32 : 8; 332 } 333 334 static inline unsigned int 335 i915_gem_object_get_tile_height(struct drm_i915_gem_object *obj) 336 { 337 return i915_gem_tile_height(i915_gem_object_get_tiling(obj)); 338 } 339 340 static inline unsigned int 341 i915_gem_object_get_tile_row_size(struct drm_i915_gem_object *obj) 342 { 343 return (i915_gem_object_get_stride(obj) * 344 i915_gem_object_get_tile_height(obj)); 345 } 346 347 int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, 348 unsigned int tiling, unsigned int stride); 349 350 static inline struct intel_engine_cs * 351 i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj) 352 { 353 struct intel_engine_cs *engine = NULL; 354 struct dma_fence *fence; 355 356 rcu_read_lock(); 357 fence = reservation_object_get_excl_rcu(obj->resv); 358 rcu_read_unlock(); 359 360 if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence)) 361 engine = to_request(fence)->engine; 362 dma_fence_put(fence); 363 364 return engine; 365 } 366 367 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj); 368 369 #endif 370 371