1 /************************************************************************** 2 * 3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #include <drm/ttm/ttm_execbuf_util.h> 29 #include <drm/ttm/ttm_bo_driver.h> 30 #include <drm/ttm/ttm_placement.h> 31 #include <linux/export.h> 32 #include <linux/wait.h> 33 34 static void ttm_eu_backoff_reservation_locked(struct list_head *list) 35 { 36 struct ttm_validate_buffer *entry; 37 38 list_for_each_entry(entry, list, head) { 39 struct ttm_buffer_object *bo = entry->bo; 40 if (!entry->reserved) 41 continue; 42 43 if (entry->removed) { 44 ttm_bo_add_to_lru(bo); 45 entry->removed = false; 46 47 } 48 entry->reserved = false; 49 atomic_set(&bo->reserved, 0); 50 wake_up_all(&bo->event_queue); 51 } 52 } 53 54 static void ttm_eu_del_from_lru_locked(struct list_head *list) 55 { 56 struct ttm_validate_buffer *entry; 57 58 list_for_each_entry(entry, list, head) { 59 struct ttm_buffer_object *bo = entry->bo; 60 if (!entry->reserved) 61 continue; 62 63 if (!entry->removed) { 64 entry->put_count = ttm_bo_del_from_lru(bo); 65 entry->removed = true; 66 } 67 } 68 } 69 70 static void ttm_eu_list_ref_sub(struct list_head *list) 71 { 72 struct ttm_validate_buffer *entry; 73 74 list_for_each_entry(entry, list, head) { 75 struct ttm_buffer_object *bo = entry->bo; 76 77 if (entry->put_count) { 78 ttm_bo_list_ref_sub(bo, entry->put_count, true); 79 entry->put_count = 0; 80 } 81 } 82 } 83 84 void ttm_eu_backoff_reservation(struct list_head *list) 85 { 86 struct ttm_validate_buffer *entry; 87 struct ttm_bo_global *glob; 88 89 if (list_empty(list)) 90 return; 91 92 entry = list_first_entry(list, struct ttm_validate_buffer, head); 93 glob = entry->bo->glob; 94 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 95 ttm_eu_backoff_reservation_locked(list); 96 lockmgr(&glob->lru_lock, LK_RELEASE); 97 } 98 EXPORT_SYMBOL(ttm_eu_backoff_reservation); 99 100 /* 101 * Reserve buffers for validation. 102 * 103 * If a buffer in the list is marked for CPU access, we back off and 104 * wait for that buffer to become free for GPU access. 105 * 106 * If a buffer is reserved for another validation, the validator with 107 * the highest validation sequence backs off and waits for that buffer 108 * to become unreserved. This prevents deadlocks when validating multiple 109 * buffers in different orders. 110 */ 111 112 int ttm_eu_reserve_buffers(struct list_head *list) 113 { 114 struct ttm_bo_global *glob; 115 struct ttm_validate_buffer *entry; 116 int ret; 117 uint32_t val_seq; 118 119 if (list_empty(list)) 120 return 0; 121 122 list_for_each_entry(entry, list, head) { 123 entry->reserved = false; 124 entry->put_count = 0; 125 entry->removed = false; 126 } 127 128 entry = list_first_entry(list, struct ttm_validate_buffer, head); 129 glob = entry->bo->glob; 130 131 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 132 val_seq = entry->bo->bdev->val_seq++; 133 134 retry: 135 list_for_each_entry(entry, list, head) { 136 struct ttm_buffer_object *bo = entry->bo; 137 138 /* already slowpath reserved? */ 139 if (entry->reserved) 140 continue; 141 142 ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq); 143 switch (ret) { 144 case 0: 145 break; 146 case -EBUSY: 147 ttm_eu_del_from_lru_locked(list); 148 lockmgr(&glob->lru_lock, LK_RELEASE); 149 ret = ttm_bo_reserve_nolru(bo, true, false, 150 true, val_seq); 151 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 152 if (!ret) 153 break; 154 155 if (unlikely(ret != -EAGAIN)) 156 goto err; 157 158 /* fallthrough */ 159 case -EAGAIN: 160 ttm_eu_backoff_reservation_locked(list); 161 162 /* 163 * temporarily increase sequence number every retry, 164 * to prevent us from seeing our old reservation 165 * sequence when someone else reserved the buffer, 166 * but hasn't updated the seq_valid/seqno members yet. 167 */ 168 val_seq = entry->bo->bdev->val_seq++; 169 170 lockmgr(&glob->lru_lock, LK_RELEASE); 171 ttm_eu_list_ref_sub(list); 172 ret = ttm_bo_reserve_slowpath_nolru(bo, true, val_seq); 173 if (unlikely(ret != 0)) 174 return ret; 175 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 176 entry->reserved = true; 177 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { 178 ret = -EBUSY; 179 goto err; 180 } 181 goto retry; 182 default: 183 goto err; 184 } 185 186 entry->reserved = true; 187 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { 188 ret = -EBUSY; 189 goto err; 190 } 191 } 192 193 ttm_eu_del_from_lru_locked(list); 194 lockmgr(&glob->lru_lock, LK_RELEASE); 195 ttm_eu_list_ref_sub(list); 196 197 return 0; 198 199 err: 200 ttm_eu_backoff_reservation_locked(list); 201 lockmgr(&glob->lru_lock, LK_RELEASE); 202 ttm_eu_list_ref_sub(list); 203 return ret; 204 } 205 EXPORT_SYMBOL(ttm_eu_reserve_buffers); 206 207 void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) 208 { 209 struct ttm_validate_buffer *entry; 210 struct ttm_buffer_object *bo; 211 struct ttm_bo_global *glob; 212 struct ttm_bo_device *bdev; 213 struct ttm_bo_driver *driver; 214 215 if (list_empty(list)) 216 return; 217 218 bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo; 219 bdev = bo->bdev; 220 driver = bdev->driver; 221 glob = bo->glob; 222 223 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 224 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 225 226 list_for_each_entry(entry, list, head) { 227 bo = entry->bo; 228 entry->old_sync_obj = bo->sync_obj; 229 bo->sync_obj = driver->sync_obj_ref(sync_obj); 230 ttm_bo_unreserve_locked(bo); 231 entry->reserved = false; 232 } 233 lockmgr(&bdev->fence_lock, LK_RELEASE); 234 lockmgr(&glob->lru_lock, LK_RELEASE); 235 236 list_for_each_entry(entry, list, head) { 237 if (entry->old_sync_obj) 238 driver->sync_obj_unref(&entry->old_sync_obj); 239 } 240 } 241 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects); 242