1 /* 2 * (C) Copyright 2016 Intel Corporation 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; version 2 7 * of the License. 8 */ 9 10 #include <linux/slab.h> 11 #include <linux/dma-fence.h> 12 #include <linux/reservation.h> 13 14 #include "i915_sw_fence.h" 15 16 static DEFINE_SPINLOCK(i915_sw_fence_lock); 17 18 static int __i915_sw_fence_notify(struct i915_sw_fence *fence, 19 enum i915_sw_fence_notify state) 20 { 21 i915_sw_fence_notify_t fn; 22 23 fn = (i915_sw_fence_notify_t)(fence->flags & I915_SW_FENCE_MASK); 24 return fn(fence, state); 25 } 26 27 static void i915_sw_fence_free(struct kref *kref) 28 { 29 struct i915_sw_fence *fence = container_of(kref, typeof(*fence), kref); 30 31 WARN_ON(atomic_read(&fence->pending) > 0); 32 33 if (fence->flags & I915_SW_FENCE_MASK) 34 __i915_sw_fence_notify(fence, FENCE_FREE); 35 else 36 kfree(fence); 37 } 38 39 static void i915_sw_fence_put(struct i915_sw_fence *fence) 40 { 41 kref_put(&fence->kref, i915_sw_fence_free); 42 } 43 44 static struct i915_sw_fence *i915_sw_fence_get(struct i915_sw_fence *fence) 45 { 46 kref_get(&fence->kref); 47 return fence; 48 } 49 50 static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence, 51 struct list_head *continuation) 52 { 53 wait_queue_head_t *x = &fence->wait; 54 wait_queue_t *pos, *next; 55 unsigned long flags; 56 57 atomic_set_release(&fence->pending, -1); /* 0 -> -1 [done] */ 58 59 /* 60 * To prevent unbounded recursion as we traverse the graph of 61 * i915_sw_fences, we move the task_list from this, the next ready 62 * fence, to the tail of the original fence's task_list 63 * (and so added to the list to be woken). 64 */ 65 66 spin_lock_irqsave_nested(&x->lock, flags, 1 + !!continuation); 67 if (continuation) { 68 list_for_each_entry_safe(pos, next, &x->task_list, task_list) { 69 if (pos->func == autoremove_wake_function) 70 pos->func(pos, TASK_NORMAL, 0, continuation); 71 else 72 list_move_tail(&pos->task_list, continuation); 73 } 74 } else { 75 LINUX_LIST_HEAD(extra); 76 77 do { 78 list_for_each_entry_safe(pos, next, 79 &x->task_list, task_list) 80 pos->func(pos, TASK_NORMAL, 0, &extra); 81 82 if (list_empty(&extra)) 83 break; 84 85 list_splice_tail_init(&extra, &x->task_list); 86 } while (1); 87 } 88 spin_unlock_irqrestore(&x->lock, flags); 89 } 90 91 static void __i915_sw_fence_complete(struct i915_sw_fence *fence, 92 struct list_head *continuation) 93 { 94 if (!atomic_dec_and_test(&fence->pending)) 95 return; 96 97 if (fence->flags & I915_SW_FENCE_MASK && 98 __i915_sw_fence_notify(fence, FENCE_COMPLETE) != NOTIFY_DONE) 99 return; 100 101 __i915_sw_fence_wake_up_all(fence, continuation); 102 } 103 104 static void i915_sw_fence_complete(struct i915_sw_fence *fence) 105 { 106 if (WARN_ON(i915_sw_fence_done(fence))) 107 return; 108 109 __i915_sw_fence_complete(fence, NULL); 110 } 111 112 static void i915_sw_fence_await(struct i915_sw_fence *fence) 113 { 114 WARN_ON(atomic_inc_return(&fence->pending) <= 1); 115 } 116 117 void i915_sw_fence_init(struct i915_sw_fence *fence, i915_sw_fence_notify_t fn) 118 { 119 BUG_ON((unsigned long)fn & ~I915_SW_FENCE_MASK); 120 121 init_waitqueue_head(&fence->wait); 122 kref_init(&fence->kref); 123 atomic_set(&fence->pending, 1); 124 fence->flags = (unsigned long)fn; 125 } 126 127 void i915_sw_fence_commit(struct i915_sw_fence *fence) 128 { 129 i915_sw_fence_complete(fence); 130 i915_sw_fence_put(fence); 131 } 132 133 static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void *key) 134 { 135 list_del(&wq->task_list); 136 __i915_sw_fence_complete(wq->private, key); 137 i915_sw_fence_put(wq->private); 138 return 0; 139 } 140 141 static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence, 142 const struct i915_sw_fence * const signaler) 143 { 144 wait_queue_t *wq; 145 146 if (__test_and_set_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags)) 147 return false; 148 149 if (fence == signaler) 150 return true; 151 152 list_for_each_entry(wq, &fence->wait.task_list, task_list) { 153 if (wq->func != i915_sw_fence_wake) 154 continue; 155 156 if (__i915_sw_fence_check_if_after(wq->private, signaler)) 157 return true; 158 } 159 160 return false; 161 } 162 163 static void __i915_sw_fence_clear_checked_bit(struct i915_sw_fence *fence) 164 { 165 wait_queue_t *wq; 166 167 if (!__test_and_clear_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags)) 168 return; 169 170 list_for_each_entry(wq, &fence->wait.task_list, task_list) { 171 if (wq->func != i915_sw_fence_wake) 172 continue; 173 174 __i915_sw_fence_clear_checked_bit(wq->private); 175 } 176 } 177 178 static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence, 179 const struct i915_sw_fence * const signaler) 180 { 181 unsigned long flags; 182 bool err; 183 184 if (!IS_ENABLED(CONFIG_I915_SW_FENCE_CHECK_DAG)) 185 return false; 186 187 spin_lock_irqsave(&i915_sw_fence_lock, flags); 188 err = __i915_sw_fence_check_if_after(fence, signaler); 189 __i915_sw_fence_clear_checked_bit(fence); 190 spin_unlock_irqrestore(&i915_sw_fence_lock, flags); 191 192 return err; 193 } 194 195 int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, 196 struct i915_sw_fence *signaler, 197 wait_queue_t *wq) 198 { 199 unsigned long flags; 200 int pending; 201 202 if (i915_sw_fence_done(signaler)) 203 return 0; 204 205 /* The dependency graph must be acyclic. */ 206 if (unlikely(i915_sw_fence_check_if_after(fence, signaler))) 207 return -EINVAL; 208 209 INIT_LIST_HEAD(&wq->task_list); 210 wq->flags = 0; 211 wq->func = i915_sw_fence_wake; 212 wq->private = i915_sw_fence_get(fence); 213 214 i915_sw_fence_await(fence); 215 216 spin_lock_irqsave(&signaler->wait.lock, flags); 217 if (likely(!i915_sw_fence_done(signaler))) { 218 __add_wait_queue_tail(&signaler->wait, wq); 219 pending = 1; 220 } else { 221 i915_sw_fence_wake(wq, 0, 0, NULL); 222 pending = 0; 223 } 224 spin_unlock_irqrestore(&signaler->wait.lock, flags); 225 226 return pending; 227 } 228 229 struct i915_sw_dma_fence_cb { 230 struct dma_fence_cb base; 231 struct i915_sw_fence *fence; 232 struct dma_fence *dma; 233 struct timer_list timer; 234 }; 235 236 static void timer_i915_sw_fence_wake(unsigned long data) 237 { 238 struct i915_sw_dma_fence_cb *cb = (struct i915_sw_dma_fence_cb *)data; 239 240 printk(KERN_WARNING "asynchronous wait on fence %s:%s:%x timed out\n", 241 cb->dma->ops->get_driver_name(cb->dma), 242 cb->dma->ops->get_timeline_name(cb->dma), 243 cb->dma->seqno); 244 dma_fence_put(cb->dma); 245 cb->dma = NULL; 246 247 i915_sw_fence_commit(cb->fence); 248 cb->timer.function = NULL; 249 } 250 251 static void dma_i915_sw_fence_wake(struct dma_fence *dma, 252 struct dma_fence_cb *data) 253 { 254 struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base); 255 256 del_timer_sync(&cb->timer); 257 if (cb->timer.function) 258 i915_sw_fence_commit(cb->fence); 259 dma_fence_put(cb->dma); 260 261 kfree(cb); 262 } 263 264 int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, 265 struct dma_fence *dma, 266 unsigned long timeout, 267 gfp_t gfp) 268 { 269 struct i915_sw_dma_fence_cb *cb; 270 int ret; 271 272 if (dma_fence_is_signaled(dma)) 273 return 0; 274 275 cb = kmalloc(sizeof(*cb), M_DRM, gfp); 276 if (!cb) { 277 if (!gfpflags_allow_blocking(gfp)) 278 return -ENOMEM; 279 280 return dma_fence_wait(dma, false); 281 } 282 283 cb->fence = i915_sw_fence_get(fence); 284 i915_sw_fence_await(fence); 285 286 cb->dma = NULL; 287 __setup_timer(&cb->timer, 288 timer_i915_sw_fence_wake, (unsigned long)cb, 289 TIMER_IRQSAFE); 290 if (timeout) { 291 cb->dma = dma_fence_get(dma); 292 mod_timer(&cb->timer, round_jiffies_up(jiffies + timeout)); 293 } 294 295 ret = dma_fence_add_callback(dma, &cb->base, dma_i915_sw_fence_wake); 296 if (ret == 0) { 297 ret = 1; 298 } else { 299 dma_i915_sw_fence_wake(dma, &cb->base); 300 if (ret == -ENOENT) /* fence already signaled */ 301 ret = 0; 302 } 303 304 return ret; 305 } 306 307 int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, 308 struct reservation_object *resv, 309 const struct dma_fence_ops *exclude, 310 bool write, 311 unsigned long timeout, 312 gfp_t gfp) 313 { 314 struct dma_fence *excl; 315 int ret = 0, pending; 316 317 if (write) { 318 struct dma_fence **shared; 319 unsigned int count, i; 320 321 ret = reservation_object_get_fences_rcu(resv, 322 &excl, &count, &shared); 323 if (ret) 324 return ret; 325 326 for (i = 0; i < count; i++) { 327 if (shared[i]->ops == exclude) 328 continue; 329 330 pending = i915_sw_fence_await_dma_fence(fence, 331 shared[i], 332 timeout, 333 gfp); 334 if (pending < 0) { 335 ret = pending; 336 break; 337 } 338 339 ret |= pending; 340 } 341 342 for (i = 0; i < count; i++) 343 dma_fence_put(shared[i]); 344 kfree(shared); 345 } else { 346 excl = reservation_object_get_excl_rcu(resv); 347 } 348 349 if (ret >= 0 && excl && excl->ops != exclude) { 350 pending = i915_sw_fence_await_dma_fence(fence, 351 excl, 352 timeout, 353 gfp); 354 if (pending < 0) 355 ret = pending; 356 else 357 ret |= pending; 358 } 359 360 dma_fence_put(excl); 361 362 return ret; 363 } 364