/* * Copyright (c) 2019-2020 Jonathan Gray * Copyright (c) 2020 François Tigeot * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include #include void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, spinlock_t *lock, u64 context, unsigned seqno) { fence->ops = ops; fence->lock = lock; fence->context = context; fence->seqno = seqno; fence->flags = 0; fence->error = 0; kref_init(&fence->refcount); INIT_LIST_HEAD(&fence->cb_list); } void dma_fence_release(struct kref *ref) { struct dma_fence *fence = container_of(ref, struct dma_fence, refcount); if (fence->ops && fence->ops->release) fence->ops->release(fence); else kfree(fence); } long dma_fence_wait_timeout(struct dma_fence *fence, bool intr, long timeout) { if (timeout < 0) return -EINVAL; if (fence->ops->wait) return fence->ops->wait(fence, intr, timeout); else return dma_fence_default_wait(fence, intr, timeout); } static atomic64_t drm_fence_context_count = ATOMIC_INIT(1); u64 dma_fence_context_alloc(unsigned num) { return atomic64_add_return(num, &drm_fence_context_count) - num; } struct default_wait_cb { struct dma_fence_cb base; struct task_struct *task; }; static void dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb) { struct default_wait_cb *wait = container_of(cb, struct default_wait_cb, base); wake_up_process(wait->task); } long dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout) { long ret = timeout ? timeout : 1; int err; struct default_wait_cb cb; bool was_set; if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) return ret; lockmgr(fence->lock, LK_EXCLUSIVE); was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags); if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) goto out; if (!was_set && fence->ops->enable_signaling) { if (!fence->ops->enable_signaling(fence)) { dma_fence_signal_locked(fence); goto out; } } if (timeout == 0) { ret = 0; goto out; } cb.base.func = dma_fence_default_wait_cb; cb.task = current; list_add(&cb.base.node, &fence->cb_list); while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { /* wake_up_process() directly uses task_struct pointers as sleep identifiers */ err = lksleep(current, fence->lock, intr ? PCATCH : 0, "dmafence", timeout); if (err == EINTR || err == ERESTART) { ret = -ERESTARTSYS; break; } else if (err == EWOULDBLOCK) { ret = 0; break; } } if (!list_empty(&cb.base.node)) list_del(&cb.base.node); out: lockmgr(fence->lock, LK_RELEASE); return ret; } int dma_fence_signal_locked(struct dma_fence *fence) { struct dma_fence_cb *cur, *tmp; struct list_head cb_list; if (fence == NULL) return -EINVAL; if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) return -EINVAL; list_replace(&fence->cb_list, &cb_list); fence->timestamp = ktime_get(); set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags); list_for_each_entry_safe(cur, tmp, &cb_list, node) { INIT_LIST_HEAD(&cur->node); cur->func(fence, cur); } return 0; } int dma_fence_signal(struct dma_fence *fence) { int r; if (fence == NULL) return -EINVAL; lockmgr(fence->lock, LK_EXCLUSIVE); r = dma_fence_signal_locked(fence); lockmgr(fence->lock, LK_RELEASE); return r; } void dma_fence_enable_sw_signaling(struct dma_fence *fence) { if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && fence->ops->enable_signaling) { lockmgr(fence->lock, LK_EXCLUSIVE); if (!fence->ops->enable_signaling(fence)) dma_fence_signal_locked(fence); lockmgr(fence->lock, LK_RELEASE); } } int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb, dma_fence_func_t func) { int ret = 0; bool was_set; if (WARN_ON(!fence || !func)) return -EINVAL; if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { INIT_LIST_HEAD(&cb->node); return -ENOENT; } lockmgr(fence->lock, LK_EXCLUSIVE); was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags); if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) ret = -ENOENT; else if (!was_set && fence->ops->enable_signaling) { if (!fence->ops->enable_signaling(fence)) { dma_fence_signal_locked(fence); ret = -ENOENT; } } if (!ret) { cb->func = func; list_add_tail(&cb->node, &fence->cb_list); } else INIT_LIST_HEAD(&cb->node); lockmgr(fence->lock, LK_RELEASE); return ret; } bool dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb) { bool ret; lockmgr(fence->lock, LK_EXCLUSIVE); ret = !list_empty(&cb->node); if (ret) list_del_init(&cb->node); lockmgr(fence->lock, LK_RELEASE); return ret; } void dma_fence_free(struct dma_fence *fence) { kfree(fence); }