1 /* 2 * Copyright © 2008-2015 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include <linux/shmem_fs.h> 26 #include <linux/swap.h> 27 #include <linux/pci.h> 28 #include <drm/drmP.h> 29 #include <drm/i915_drm.h> 30 31 #include "i915_drv.h" 32 #include "i915_trace.h" 33 34 #if 0 35 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) 36 { 37 if (!mutex_is_locked(mutex)) 38 return false; 39 40 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES) 41 return mutex->owner == task; 42 #else 43 /* Since UP may be pre-empted, we cannot assume that we own the lock */ 44 return false; 45 #endif 46 } 47 #endif 48 49 /** 50 * i915_gem_shrink - Shrink buffer object caches 51 * @dev_priv: i915 device 52 * @target: amount of memory to make available, in pages 53 * @flags: control flags for selecting cache types 54 * 55 * This function is the main interface to the shrinker. It will try to release 56 * up to @target pages of main memory backing storage from buffer objects. 57 * Selection of the specific caches can be done with @flags. This is e.g. useful 58 * when purgeable objects should be removed from caches preferentially. 59 * 60 * Note that it's not guaranteed that released amount is actually available as 61 * free system memory - the pages might still be in-used to due to other reasons 62 * (like cpu mmaps) or the mm core has reused them before we could grab them. 63 * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to 64 * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all(). 65 * 66 * Also note that any kind of pinning (both per-vma address space pins and 67 * backing storage pins at the buffer object level) result in the shrinker code 68 * having to skip the object. 69 * 70 * Returns: 71 * The number of pages of backing storage actually released. 72 */ 73 unsigned long 74 i915_gem_shrink(struct drm_i915_private *dev_priv, 75 unsigned long target, unsigned flags) 76 { 77 const struct { 78 struct list_head *list; 79 unsigned int bit; 80 } phases[] = { 81 { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND }, 82 { &dev_priv->mm.bound_list, I915_SHRINK_BOUND }, 83 { NULL, 0 }, 84 }, *phase; 85 unsigned long count = 0; 86 87 trace_i915_gem_shrink(dev_priv, target, flags); 88 i915_gem_retire_requests(dev_priv->dev); 89 90 /* 91 * As we may completely rewrite the (un)bound list whilst unbinding 92 * (due to retiring requests) we have to strictly process only 93 * one element of the list at the time, and recheck the list 94 * on every iteration. 95 * 96 * In particular, we must hold a reference whilst removing the 97 * object as we may end up waiting for and/or retiring the objects. 98 * This might release the final reference (held by the active list) 99 * and result in the object being freed from under us. This is 100 * similar to the precautions the eviction code must take whilst 101 * removing objects. 102 * 103 * Also note that although these lists do not hold a reference to 104 * the object we can safely grab one here: The final object 105 * unreferencing and the bound_list are both protected by the 106 * dev->struct_mutex and so we won't ever be able to observe an 107 * object on the bound_list with a reference count equals 0. 108 */ 109 for (phase = phases; phase->list; phase++) { 110 struct list_head still_in_list; 111 112 if ((flags & phase->bit) == 0) 113 continue; 114 115 INIT_LIST_HEAD(&still_in_list); 116 while (count < target && !list_empty(phase->list)) { 117 struct drm_i915_gem_object *obj; 118 struct i915_vma *vma, *v; 119 120 obj = list_first_entry(phase->list, 121 typeof(*obj), global_list); 122 list_move_tail(&obj->global_list, &still_in_list); 123 124 if (flags & I915_SHRINK_PURGEABLE && 125 obj->madv != I915_MADV_DONTNEED) 126 continue; 127 128 if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active) 129 continue; 130 131 drm_gem_object_reference(&obj->base); 132 133 /* For the unbound phase, this should be a no-op! */ 134 list_for_each_entry_safe(vma, v, 135 &obj->vma_list, vma_link) 136 if (i915_vma_unbind(vma)) 137 break; 138 139 if (i915_gem_object_put_pages(obj) == 0) 140 count += obj->base.size >> PAGE_SHIFT; 141 142 drm_gem_object_unreference(&obj->base); 143 } 144 list_splice(&still_in_list, phase->list); 145 } 146 147 i915_gem_retire_requests(dev_priv->dev); 148 149 return count; 150 } 151 152 /** 153 * i915_gem_shrink_all - Shrink buffer object caches completely 154 * @dev_priv: i915 device 155 * 156 * This is a simple wraper around i915_gem_shrink() to aggressively shrink all 157 * caches completely. It also first waits for and retires all outstanding 158 * requests to also be able to release backing storage for active objects. 159 * 160 * This should only be used in code to intentionally quiescent the gpu or as a 161 * last-ditch effort when memory seems to have run out. 162 * 163 * Returns: 164 * The number of pages of backing storage actually released. 165 */ 166 unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv) 167 { 168 return i915_gem_shrink(dev_priv, -1UL, 169 I915_SHRINK_BOUND | 170 I915_SHRINK_UNBOUND | 171 I915_SHRINK_ACTIVE); 172 } 173 174 #if 0 175 static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock) 176 { 177 if (!mutex_trylock(&dev->struct_mutex)) { 178 if (!mutex_is_locked_by(&dev->struct_mutex, current)) 179 return false; 180 181 if (to_i915(dev)->mm.shrinker_no_lock_stealing) 182 return false; 183 184 *unlock = false; 185 } else 186 *unlock = true; 187 188 return true; 189 } 190 191 static int num_vma_bound(struct drm_i915_gem_object *obj) 192 { 193 struct i915_vma *vma; 194 int count = 0; 195 196 list_for_each_entry(vma, &obj->vma_list, vma_link) { 197 if (drm_mm_node_allocated(&vma->node)) 198 count++; 199 if (vma->pin_count) 200 count++; 201 } 202 203 return count; 204 } 205 206 static unsigned long 207 i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) 208 { 209 struct drm_i915_private *dev_priv = 210 container_of(shrinker, struct drm_i915_private, mm.shrinker); 211 struct drm_device *dev = dev_priv->dev; 212 struct drm_i915_gem_object *obj; 213 unsigned long count; 214 bool unlock; 215 216 if (!i915_gem_shrinker_lock(dev, &unlock)) 217 return 0; 218 219 count = 0; 220 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) 221 if (obj->pages_pin_count == 0) 222 count += obj->base.size >> PAGE_SHIFT; 223 224 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 225 if (!obj->active && obj->pages_pin_count == num_vma_bound(obj)) 226 count += obj->base.size >> PAGE_SHIFT; 227 } 228 229 if (unlock) 230 mutex_unlock(&dev->struct_mutex); 231 232 return count; 233 } 234 235 static unsigned long 236 i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) 237 { 238 struct drm_i915_private *dev_priv = 239 container_of(shrinker, struct drm_i915_private, mm.shrinker); 240 struct drm_device *dev = dev_priv->dev; 241 unsigned long freed; 242 bool unlock; 243 244 if (!i915_gem_shrinker_lock(dev, &unlock)) 245 return SHRINK_STOP; 246 247 freed = i915_gem_shrink(dev_priv, 248 sc->nr_to_scan, 249 I915_SHRINK_BOUND | 250 I915_SHRINK_UNBOUND | 251 I915_SHRINK_PURGEABLE); 252 if (freed < sc->nr_to_scan) 253 freed += i915_gem_shrink(dev_priv, 254 sc->nr_to_scan - freed, 255 I915_SHRINK_BOUND | 256 I915_SHRINK_UNBOUND); 257 if (unlock) 258 mutex_unlock(&dev->struct_mutex); 259 260 return freed; 261 } 262 263 static int 264 i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) 265 { 266 struct drm_i915_private *dev_priv = 267 container_of(nb, struct drm_i915_private, mm.oom_notifier); 268 struct drm_device *dev = dev_priv->dev; 269 struct drm_i915_gem_object *obj; 270 unsigned long timeout = msecs_to_jiffies(5000) + 1; 271 unsigned long pinned, bound, unbound, freed_pages; 272 bool was_interruptible; 273 bool unlock; 274 275 while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) { 276 schedule_timeout_killable(1); 277 if (fatal_signal_pending(current)) 278 return NOTIFY_DONE; 279 } 280 if (timeout == 0) { 281 pr_err("Unable to purge GPU memory due lock contention.\n"); 282 return NOTIFY_DONE; 283 } 284 285 was_interruptible = dev_priv->mm.interruptible; 286 dev_priv->mm.interruptible = false; 287 288 freed_pages = i915_gem_shrink_all(dev_priv); 289 290 dev_priv->mm.interruptible = was_interruptible; 291 292 /* Because we may be allocating inside our own driver, we cannot 293 * assert that there are no objects with pinned pages that are not 294 * being pointed to by hardware. 295 */ 296 unbound = bound = pinned = 0; 297 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { 298 if (!obj->base.filp) /* not backed by a freeable object */ 299 continue; 300 301 if (obj->pages_pin_count) 302 pinned += obj->base.size; 303 else 304 unbound += obj->base.size; 305 } 306 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 307 if (!obj->base.filp) 308 continue; 309 310 if (obj->pages_pin_count) 311 pinned += obj->base.size; 312 else 313 bound += obj->base.size; 314 } 315 316 if (unlock) 317 mutex_unlock(&dev->struct_mutex); 318 319 if (freed_pages || unbound || bound) 320 pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n", 321 freed_pages << PAGE_SHIFT, pinned); 322 if (unbound || bound) 323 pr_err("%lu and %lu bytes still available in the " 324 "bound and unbound GPU page lists.\n", 325 bound, unbound); 326 327 *(unsigned long *)ptr += freed_pages; 328 return NOTIFY_DONE; 329 } 330 #endif 331 332 /** 333 * i915_gem_shrinker_init - Initialize i915 shrinker 334 * @dev_priv: i915 device 335 * 336 * This function registers and sets up the i915 shrinker and OOM handler. 337 */ 338 void i915_gem_shrinker_init(struct drm_i915_private *dev_priv) 339 { 340 #if 0 341 dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan; 342 dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count; 343 dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS; 344 register_shrinker(&dev_priv->mm.shrinker); 345 346 dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom; 347 register_oom_notifier(&dev_priv->mm.oom_notifier); 348 #endif 349 } 350