1 /* 2 * Copyright © 2008-2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Chris Wilson <chris@chris-wilson.co.uuk> 26 * 27 * $FreeBSD: src/sys/dev/drm2/i915/i915_gem_evict.c,v 1.1 2012/05/22 11:07:44 kib Exp $ 28 */ 29 30 #include <drm/drmP.h> 31 #include <drm/i915_drm.h> 32 #include "i915_drv.h" 33 34 static bool 35 mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) 36 { 37 list_add(&obj->exec_list, unwind); 38 return drm_mm_scan_add_block(obj->gtt_space); 39 } 40 41 int 42 i915_gem_evict_something(struct drm_device *dev, int min_size, 43 unsigned alignment, bool mappable) 44 { 45 drm_i915_private_t *dev_priv = dev->dev_private; 46 struct list_head eviction_list, unwind_list; 47 struct drm_i915_gem_object *obj; 48 int ret = 0; 49 int cache_level = 0; 50 51 /* 52 * The goal is to evict objects and amalgamate space in LRU order. 53 * The oldest idle objects reside on the inactive list, which is in 54 * retirement order. The next objects to retire are those on the (per 55 * ring) active list that do not have an outstanding flush. Once the 56 * hardware reports completion (the seqno is updated after the 57 * batchbuffer has been finished) the clean buffer objects would 58 * be retired to the inactive list. Any dirty objects would be added 59 * to the tail of the flushing list. So after processing the clean 60 * active objects we need to emit a MI_FLUSH to retire the flushing 61 * list, hence the retirement order of the flushing list is in 62 * advance of the dirty objects on the active lists. 63 * 64 * The retirement sequence is thus: 65 * 1. Inactive objects (already retired) 66 * 2. Clean active objects 67 * 3. Flushing list 68 * 4. Dirty active objects. 69 * 70 * On each list, the oldest objects lie at the HEAD with the freshest 71 * object on the TAIL. 72 */ 73 74 INIT_LIST_HEAD(&unwind_list); 75 if (mappable) 76 drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, 77 min_size, alignment, cache_level, 78 0, dev_priv->mm.gtt_mappable_end); 79 else 80 drm_mm_init_scan(&dev_priv->mm.gtt_space, 81 min_size, alignment, cache_level); 82 83 /* First see if there is a large enough contiguous idle region... */ 84 list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { 85 if (mark_free(obj, &unwind_list)) 86 goto found; 87 } 88 89 /* Now merge in the soon-to-be-expired objects... */ 90 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 91 /* Does the object require an outstanding flush? */ 92 if (obj->base.write_domain || obj->pin_count) 93 continue; 94 95 if (mark_free(obj, &unwind_list)) 96 goto found; 97 } 98 99 /* Finally add anything with a pending flush (in order of retirement) */ 100 list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) { 101 if (obj->pin_count) 102 continue; 103 104 if (mark_free(obj, &unwind_list)) 105 goto found; 106 } 107 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 108 if (!obj->base.write_domain || obj->pin_count) 109 continue; 110 111 if (mark_free(obj, &unwind_list)) 112 goto found; 113 } 114 115 /* Nothing found, clean up and bail out! */ 116 while (!list_empty(&unwind_list)) { 117 obj = list_first_entry(&unwind_list, 118 struct drm_i915_gem_object, 119 exec_list); 120 121 ret = drm_mm_scan_remove_block(obj->gtt_space); 122 KASSERT(ret == 0, ("drm_mm_scan_remove_block failed %d", ret)); 123 124 list_del_init(&obj->exec_list); 125 } 126 127 /* We expect the caller to unpin, evict all and try again, or give up. 128 * So calling i915_gem_evict_everything() is unnecessary. 129 */ 130 return -ENOSPC; 131 132 found: 133 /* drm_mm doesn't allow any other other operations while 134 * scanning, therefore store to be evicted objects on a 135 * temporary list. */ 136 INIT_LIST_HEAD(&eviction_list); 137 while (!list_empty(&unwind_list)) { 138 obj = list_first_entry(&unwind_list, 139 struct drm_i915_gem_object, 140 exec_list); 141 if (drm_mm_scan_remove_block(obj->gtt_space)) { 142 list_move(&obj->exec_list, &eviction_list); 143 drm_gem_object_reference(&obj->base); 144 continue; 145 } 146 list_del_init(&obj->exec_list); 147 } 148 149 /* Unbinding will emit any required flushes */ 150 while (!list_empty(&eviction_list)) { 151 obj = list_first_entry(&eviction_list, 152 struct drm_i915_gem_object, 153 exec_list); 154 if (ret == 0) 155 ret = i915_gem_object_unbind(obj); 156 157 list_del_init(&obj->exec_list); 158 drm_gem_object_unreference(&obj->base); 159 } 160 161 return ret; 162 } 163 164 int 165 i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) 166 { 167 drm_i915_private_t *dev_priv = dev->dev_private; 168 int ret; 169 bool lists_empty; 170 171 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 172 list_empty(&dev_priv->mm.flushing_list) && 173 list_empty(&dev_priv->mm.active_list)); 174 if (lists_empty) 175 return -ENOSPC; 176 177 /* Flush everything (on to the inactive lists) and evict */ 178 ret = i915_gpu_idle(dev, true); 179 if (ret) 180 return ret; 181 182 KASSERT(list_empty(&dev_priv->mm.flushing_list), 183 ("flush list not empty")); 184 185 return i915_gem_evict_inactive(dev, purgeable_only); 186 } 187 188 /** Unbinds all inactive objects. */ 189 int 190 i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only) 191 { 192 drm_i915_private_t *dev_priv = dev->dev_private; 193 struct drm_i915_gem_object *obj, *next; 194 195 list_for_each_entry_safe(obj, next, 196 &dev_priv->mm.inactive_list, mm_list) { 197 if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) { 198 int ret = i915_gem_object_unbind(obj); 199 if (ret) 200 return ret; 201 } 202 } 203 204 return 0; 205 } 206