1 /* 2 * Copyright © 2008-2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Chris Wilson <chris@chris-wilson.co.uuk> 26 * 27 */ 28 29 #include <drm/drmP.h> 30 #include <drm/i915_drm.h> 31 32 #include "i915_drv.h" 33 #include "intel_drv.h" 34 #include "i915_trace.h" 35 36 static bool 37 gpu_is_idle(struct drm_i915_private *dev_priv) 38 { 39 struct intel_engine_cs *engine; 40 41 for_each_engine(engine, dev_priv) { 42 if (intel_engine_is_active(engine)) 43 return false; 44 } 45 46 return true; 47 } 48 49 static bool 50 mark_free(struct i915_vma *vma, struct list_head *unwind) 51 { 52 if (i915_vma_is_pinned(vma)) 53 return false; 54 55 if (WARN_ON(!list_empty(&vma->exec_list))) 56 return false; 57 58 list_add(&vma->exec_list, unwind); 59 return drm_mm_scan_add_block(&vma->node); 60 } 61 62 /** 63 * i915_gem_evict_something - Evict vmas to make room for binding a new one 64 * @vm: address space to evict from 65 * @min_size: size of the desired free space 66 * @alignment: alignment constraint of the desired free space 67 * @cache_level: cache_level for the desired space 68 * @start: start (inclusive) of the range from which to evict objects 69 * @end: end (exclusive) of the range from which to evict objects 70 * @flags: additional flags to control the eviction algorithm 71 * 72 * This function will try to evict vmas until a free space satisfying the 73 * requirements is found. Callers must check first whether any such hole exists 74 * already before calling this function. 75 * 76 * This function is used by the object/vma binding code. 77 * 78 * Since this function is only used to free up virtual address space it only 79 * ignores pinned vmas, and not object where the backing storage itself is 80 * pinned. Hence obj->pages_pin_count does not protect against eviction. 81 * 82 * To clarify: This is for freeing up virtual address space, not for freeing 83 * memory in e.g. the shrinker. 84 */ 85 int 86 i915_gem_evict_something(struct i915_address_space *vm, 87 u64 min_size, u64 alignment, 88 unsigned cache_level, 89 u64 start, u64 end, 90 unsigned flags) 91 { 92 struct drm_i915_private *dev_priv = to_i915(vm->dev); 93 struct list_head eviction_list; 94 struct list_head *phases[] = { 95 &vm->inactive_list, 96 &vm->active_list, 97 NULL, 98 }, **phase; 99 struct i915_vma *vma, *next; 100 int ret; 101 102 trace_i915_gem_evict(vm, min_size, alignment, flags); 103 104 /* 105 * The goal is to evict objects and amalgamate space in LRU order. 106 * The oldest idle objects reside on the inactive list, which is in 107 * retirement order. The next objects to retire are those in flight, 108 * on the active list, again in retirement order. 109 * 110 * The retirement sequence is thus: 111 * 1. Inactive objects (already retired) 112 * 2. Active objects (will stall on unbinding) 113 * 114 * On each list, the oldest objects lie at the HEAD with the freshest 115 * object on the TAIL. 116 */ 117 if (start != 0 || end != vm->total) { 118 drm_mm_init_scan_with_range(&vm->mm, min_size, 119 alignment, cache_level, 120 start, end); 121 } else 122 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); 123 124 if (flags & PIN_NONBLOCK) 125 phases[1] = NULL; 126 127 search_again: 128 INIT_LIST_HEAD(&eviction_list); 129 phase = phases; 130 do { 131 list_for_each_entry(vma, *phase, vm_link) 132 if (mark_free(vma, &eviction_list)) 133 goto found; 134 } while (*++phase); 135 136 /* Nothing found, clean up and bail out! */ 137 list_for_each_entry_safe(vma, next, &eviction_list, exec_list) { 138 ret = drm_mm_scan_remove_block(&vma->node); 139 BUG_ON(ret); 140 141 INIT_LIST_HEAD(&vma->exec_list); 142 } 143 144 /* Can we unpin some objects such as idle hw contents, 145 * or pending flips? But since only the GGTT has global entries 146 * such as scanouts, rinbuffers and contexts, we can skip the 147 * purge when inspecting per-process local address spaces. 148 */ 149 if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK) 150 return -ENOSPC; 151 152 if (gpu_is_idle(dev_priv)) { 153 /* If we still have pending pageflip completions, drop 154 * back to userspace to give our workqueues time to 155 * acquire our locks and unpin the old scanouts. 156 */ 157 return intel_has_pending_fb_unpin(vm->dev) ? -EAGAIN : -ENOSPC; 158 } 159 160 /* Not everything in the GGTT is tracked via vma (otherwise we 161 * could evict as required with minimal stalling) so we are forced 162 * to idle the GPU and explicitly retire outstanding requests in 163 * the hopes that we can then remove contexts and the like only 164 * bound by their active reference. 165 */ 166 ret = i915_gem_switch_to_kernel_context(dev_priv); 167 if (ret) 168 return ret; 169 170 ret = i915_gem_wait_for_idle(dev_priv, true); 171 if (ret) 172 return ret; 173 174 i915_gem_retire_requests(dev_priv); 175 goto search_again; 176 177 found: 178 /* drm_mm doesn't allow any other other operations while 179 * scanning, therefore store to-be-evicted objects on a 180 * temporary list and take a reference for all before 181 * calling unbind (which may remove the active reference 182 * of any of our objects, thus corrupting the list). 183 */ 184 list_for_each_entry_safe(vma, next, &eviction_list, exec_list) { 185 if (drm_mm_scan_remove_block(&vma->node)) 186 __i915_vma_pin(vma); 187 else 188 list_del_init(&vma->exec_list); 189 } 190 191 /* Unbinding will emit any required flushes */ 192 while (!list_empty(&eviction_list)) { 193 vma = list_first_entry(&eviction_list, 194 struct i915_vma, 195 exec_list); 196 197 list_del_init(&vma->exec_list); 198 __i915_vma_unpin(vma); 199 if (ret == 0) 200 ret = i915_vma_unbind(vma); 201 } 202 return ret; 203 } 204 205 int 206 i915_gem_evict_for_vma(struct i915_vma *target) 207 { 208 struct drm_mm_node *node, *next; 209 210 list_for_each_entry_safe(node, next, 211 &target->vm->mm.head_node.node_list, 212 node_list) { 213 struct i915_vma *vma; 214 int ret; 215 216 if (node->start + node->size <= target->node.start) 217 continue; 218 if (node->start >= target->node.start + target->node.size) 219 break; 220 221 vma = container_of(node, typeof(*vma), node); 222 223 if (i915_vma_is_pinned(vma)) { 224 if (!vma->exec_entry || i915_vma_pin_count(vma) > 1) 225 /* Object is pinned for some other use */ 226 return -EBUSY; 227 228 /* We need to evict a buffer in the same batch */ 229 if (vma->exec_entry->flags & EXEC_OBJECT_PINNED) 230 /* Overlapping fixed objects in the same batch */ 231 return -EINVAL; 232 233 return -ENOSPC; 234 } 235 236 ret = i915_vma_unbind(vma); 237 if (ret) 238 return ret; 239 } 240 241 return 0; 242 } 243 244 /** 245 * i915_gem_evict_vm - Evict all idle vmas from a vm 246 * @vm: Address space to cleanse 247 * @do_idle: Boolean directing whether to idle first. 248 * 249 * This function evicts all idles vmas from a vm. If all unpinned vmas should be 250 * evicted the @do_idle needs to be set to true. 251 * 252 * This is used by the execbuf code as a last-ditch effort to defragment the 253 * address space. 254 * 255 * To clarify: This is for freeing up virtual address space, not for freeing 256 * memory in e.g. the shrinker. 257 */ 258 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle) 259 { 260 struct i915_vma *vma, *next; 261 int ret; 262 263 WARN_ON(!mutex_is_locked(&vm->dev->struct_mutex)); 264 trace_i915_gem_evict_vm(vm); 265 266 if (do_idle) { 267 struct drm_i915_private *dev_priv = to_i915(vm->dev); 268 269 if (i915_is_ggtt(vm)) { 270 ret = i915_gem_switch_to_kernel_context(dev_priv); 271 if (ret) 272 return ret; 273 } 274 275 ret = i915_gem_wait_for_idle(dev_priv, true); 276 if (ret) 277 return ret; 278 279 i915_gem_retire_requests(dev_priv); 280 WARN_ON(!list_empty(&vm->active_list)); 281 } 282 283 list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link) 284 if (!i915_vma_is_pinned(vma)) 285 WARN_ON(i915_vma_unbind(vma)); 286 287 return 0; 288 } 289