1 /* 2 * Copyright © 2008-2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Chris Wilson <chris@chris-wilson.co.uuk> 26 * 27 */ 28 29 #include <drm/drmP.h> 30 #include <drm/i915_drm.h> 31 32 #include "i915_drv.h" 33 #include "intel_drv.h" 34 #include "i915_trace.h" 35 36 static bool 37 gpu_is_idle(struct drm_i915_private *dev_priv) 38 { 39 struct intel_engine_cs *engine; 40 enum intel_engine_id id; 41 42 for_each_engine(engine, dev_priv, id) { 43 if (intel_engine_is_active(engine)) 44 return false; 45 } 46 47 return true; 48 } 49 50 static bool 51 mark_free(struct i915_vma *vma, unsigned int flags, struct list_head *unwind) 52 { 53 if (i915_vma_is_pinned(vma)) 54 return false; 55 56 if (WARN_ON(!list_empty(&vma->exec_list))) 57 return false; 58 59 if (flags & PIN_NONFAULT && vma->obj->fault_mappable) 60 return false; 61 62 list_add(&vma->exec_list, unwind); 63 return drm_mm_scan_add_block(&vma->node); 64 } 65 66 /** 67 * i915_gem_evict_something - Evict vmas to make room for binding a new one 68 * @vm: address space to evict from 69 * @min_size: size of the desired free space 70 * @alignment: alignment constraint of the desired free space 71 * @cache_level: cache_level for the desired space 72 * @start: start (inclusive) of the range from which to evict objects 73 * @end: end (exclusive) of the range from which to evict objects 74 * @flags: additional flags to control the eviction algorithm 75 * 76 * This function will try to evict vmas until a free space satisfying the 77 * requirements is found. Callers must check first whether any such hole exists 78 * already before calling this function. 79 * 80 * This function is used by the object/vma binding code. 81 * 82 * Since this function is only used to free up virtual address space it only 83 * ignores pinned vmas, and not object where the backing storage itself is 84 * pinned. Hence obj->pages_pin_count does not protect against eviction. 85 * 86 * To clarify: This is for freeing up virtual address space, not for freeing 87 * memory in e.g. the shrinker. 88 */ 89 int 90 i915_gem_evict_something(struct i915_address_space *vm, 91 u64 min_size, u64 alignment, 92 unsigned cache_level, 93 u64 start, u64 end, 94 unsigned flags) 95 { 96 struct drm_i915_private *dev_priv = to_i915(vm->dev); 97 struct list_head eviction_list; 98 struct list_head *phases[] = { 99 &vm->inactive_list, 100 &vm->active_list, 101 NULL, 102 }, **phase; 103 struct i915_vma *vma, *next; 104 int ret; 105 106 trace_i915_gem_evict(vm, min_size, alignment, flags); 107 108 /* 109 * The goal is to evict objects and amalgamate space in LRU order. 110 * The oldest idle objects reside on the inactive list, which is in 111 * retirement order. The next objects to retire are those in flight, 112 * on the active list, again in retirement order. 113 * 114 * The retirement sequence is thus: 115 * 1. Inactive objects (already retired) 116 * 2. Active objects (will stall on unbinding) 117 * 118 * On each list, the oldest objects lie at the HEAD with the freshest 119 * object on the TAIL. 120 */ 121 if (start != 0 || end != vm->total) { 122 drm_mm_init_scan_with_range(&vm->mm, min_size, 123 alignment, cache_level, 124 start, end); 125 } else 126 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); 127 128 if (flags & PIN_NONBLOCK) 129 phases[1] = NULL; 130 131 search_again: 132 INIT_LIST_HEAD(&eviction_list); 133 phase = phases; 134 do { 135 list_for_each_entry(vma, *phase, vm_link) 136 if (mark_free(vma, flags, &eviction_list)) 137 goto found; 138 } while (*++phase); 139 140 /* Nothing found, clean up and bail out! */ 141 list_for_each_entry_safe(vma, next, &eviction_list, exec_list) { 142 ret = drm_mm_scan_remove_block(&vma->node); 143 BUG_ON(ret); 144 145 INIT_LIST_HEAD(&vma->exec_list); 146 } 147 148 /* Can we unpin some objects such as idle hw contents, 149 * or pending flips? But since only the GGTT has global entries 150 * such as scanouts, rinbuffers and contexts, we can skip the 151 * purge when inspecting per-process local address spaces. 152 */ 153 if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK) 154 return -ENOSPC; 155 156 if (gpu_is_idle(dev_priv)) { 157 /* If we still have pending pageflip completions, drop 158 * back to userspace to give our workqueues time to 159 * acquire our locks and unpin the old scanouts. 160 */ 161 return intel_has_pending_fb_unpin(vm->dev) ? -EAGAIN : -ENOSPC; 162 } 163 164 /* Not everything in the GGTT is tracked via vma (otherwise we 165 * could evict as required with minimal stalling) so we are forced 166 * to idle the GPU and explicitly retire outstanding requests in 167 * the hopes that we can then remove contexts and the like only 168 * bound by their active reference. 169 */ 170 ret = i915_gem_switch_to_kernel_context(dev_priv); 171 if (ret) 172 return ret; 173 174 ret = i915_gem_wait_for_idle(dev_priv, 175 I915_WAIT_INTERRUPTIBLE | 176 I915_WAIT_LOCKED); 177 if (ret) 178 return ret; 179 180 i915_gem_retire_requests(dev_priv); 181 goto search_again; 182 183 found: 184 /* drm_mm doesn't allow any other other operations while 185 * scanning, therefore store to-be-evicted objects on a 186 * temporary list and take a reference for all before 187 * calling unbind (which may remove the active reference 188 * of any of our objects, thus corrupting the list). 189 */ 190 list_for_each_entry_safe(vma, next, &eviction_list, exec_list) { 191 if (drm_mm_scan_remove_block(&vma->node)) 192 __i915_vma_pin(vma); 193 else 194 list_del_init(&vma->exec_list); 195 } 196 197 /* Unbinding will emit any required flushes */ 198 while (!list_empty(&eviction_list)) { 199 vma = list_first_entry(&eviction_list, 200 struct i915_vma, 201 exec_list); 202 203 list_del_init(&vma->exec_list); 204 __i915_vma_unpin(vma); 205 if (ret == 0) 206 ret = i915_vma_unbind(vma); 207 } 208 return ret; 209 } 210 211 int 212 i915_gem_evict_for_vma(struct i915_vma *target) 213 { 214 struct drm_mm_node *node, *next; 215 216 list_for_each_entry_safe(node, next, 217 &target->vm->mm.head_node.node_list, 218 node_list) { 219 struct i915_vma *vma; 220 int ret; 221 222 if (node->start + node->size <= target->node.start) 223 continue; 224 if (node->start >= target->node.start + target->node.size) 225 break; 226 227 vma = container_of(node, typeof(*vma), node); 228 229 if (i915_vma_is_pinned(vma)) { 230 if (!vma->exec_entry || i915_vma_pin_count(vma) > 1) 231 /* Object is pinned for some other use */ 232 return -EBUSY; 233 234 /* We need to evict a buffer in the same batch */ 235 if (vma->exec_entry->flags & EXEC_OBJECT_PINNED) 236 /* Overlapping fixed objects in the same batch */ 237 return -EINVAL; 238 239 return -ENOSPC; 240 } 241 242 ret = i915_vma_unbind(vma); 243 if (ret) 244 return ret; 245 } 246 247 return 0; 248 } 249 250 /** 251 * i915_gem_evict_vm - Evict all idle vmas from a vm 252 * @vm: Address space to cleanse 253 * @do_idle: Boolean directing whether to idle first. 254 * 255 * This function evicts all idles vmas from a vm. If all unpinned vmas should be 256 * evicted the @do_idle needs to be set to true. 257 * 258 * This is used by the execbuf code as a last-ditch effort to defragment the 259 * address space. 260 * 261 * To clarify: This is for freeing up virtual address space, not for freeing 262 * memory in e.g. the shrinker. 263 */ 264 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle) 265 { 266 struct i915_vma *vma, *next; 267 int ret; 268 269 WARN_ON(!mutex_is_locked(&vm->dev->struct_mutex)); 270 trace_i915_gem_evict_vm(vm); 271 272 if (do_idle) { 273 struct drm_i915_private *dev_priv = to_i915(vm->dev); 274 275 if (i915_is_ggtt(vm)) { 276 ret = i915_gem_switch_to_kernel_context(dev_priv); 277 if (ret) 278 return ret; 279 } 280 281 ret = i915_gem_wait_for_idle(dev_priv, 282 I915_WAIT_INTERRUPTIBLE | 283 I915_WAIT_LOCKED); 284 if (ret) 285 return ret; 286 287 i915_gem_retire_requests(dev_priv); 288 WARN_ON(!list_empty(&vm->active_list)); 289 } 290 291 list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link) 292 if (!i915_vma_is_pinned(vma)) 293 WARN_ON(i915_vma_unbind(vma)); 294 295 return 0; 296 } 297