xref: /dragonfly/sys/dev/drm/linux_fence.c (revision 5ca0a96d)
1 /*
2  * Copyright (c) 2019-2020 Jonathan Gray <jsg@openbsd.org>
3  * Copyright (c) 2020 François Tigeot <ftigeot@wolfpond.org>
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 
25 #include <linux/slab.h>
26 #include <linux/dma-fence.h>
27 
28 void
29 dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
30     spinlock_t *lock, u64 context, unsigned seqno)
31 {
32 	fence->ops = ops;
33 	fence->lock = lock;
34 	fence->context = context;
35 	fence->seqno = seqno;
36 	fence->flags = 0;
37 	fence->error = 0;
38 	kref_init(&fence->refcount);
39 	INIT_LIST_HEAD(&fence->cb_list);
40 }
41 
42 void
43 dma_fence_release(struct kref *ref)
44 {
45 	struct dma_fence *fence = container_of(ref, struct dma_fence, refcount);
46 
47 	if (fence->ops && fence->ops->release)
48 		fence->ops->release(fence);
49 	else
50 		kfree(fence);
51 }
52 
53 long
54 dma_fence_wait_timeout(struct dma_fence *fence, bool intr, long timeout)
55 {
56 	if (timeout < 0)
57 		return -EINVAL;
58 
59 	if (fence->ops->wait)
60 		return fence->ops->wait(fence, intr, timeout);
61 	else
62 		return dma_fence_default_wait(fence, intr, timeout);
63 }
64 
65 static atomic64_t drm_fence_context_count = ATOMIC_INIT(1);
66 
67 u64
68 dma_fence_context_alloc(unsigned num)
69 {
70 	return atomic64_add_return(num, &drm_fence_context_count) - num;
71 }
72 
73 struct default_wait_cb {
74 	struct dma_fence_cb base;
75 	struct task_struct *task;
76 };
77 
78 static void
79 dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
80 {
81 	struct default_wait_cb *wait =
82 		container_of(cb, struct default_wait_cb, base);
83 
84 	wake_up_process(wait->task);
85 }
86 
87 long
88 dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
89 {
90 	long ret = timeout ? timeout : 1;
91 	int err;
92 	struct default_wait_cb cb;
93 	bool was_set;
94 
95 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
96 		return ret;
97 
98 	lockmgr(fence->lock, LK_EXCLUSIVE);
99 
100 	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
101 	    &fence->flags);
102 
103 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
104 		goto out;
105 
106 	if (!was_set && fence->ops->enable_signaling) {
107 		if (!fence->ops->enable_signaling(fence)) {
108 			dma_fence_signal_locked(fence);
109 			goto out;
110 		}
111 	}
112 
113 	if (timeout == 0) {
114 		ret = 0;
115 		goto out;
116 	}
117 
118 	cb.base.func = dma_fence_default_wait_cb;
119 	cb.task = current;
120 	list_add(&cb.base.node, &fence->cb_list);
121 
122 	while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
123 		/* wake_up_process() directly uses task_struct pointers as sleep identifiers */
124 		err = lksleep(current, fence->lock, intr ? PCATCH : 0, "dmafence", timeout);
125 		if (err == EINTR || err == ERESTART) {
126 			ret = -ERESTARTSYS;
127 			break;
128 		} else if (err == EWOULDBLOCK) {
129 			ret = 0;
130 			break;
131 		}
132 	}
133 
134 	if (!list_empty(&cb.base.node))
135 		list_del(&cb.base.node);
136 out:
137 	lockmgr(fence->lock, LK_RELEASE);
138 
139 	return ret;
140 }
141 
142 int
143 dma_fence_signal_locked(struct dma_fence *fence)
144 {
145 	struct dma_fence_cb *cur, *tmp;
146 	struct list_head cb_list;
147 
148 	if (fence == NULL)
149 		return -EINVAL;
150 
151 	if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
152 		return -EINVAL;
153 
154 	list_replace(&fence->cb_list, &cb_list);
155 
156 	fence->timestamp = ktime_get();
157 	set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
158 
159 	list_for_each_entry_safe(cur, tmp, &cb_list, node) {
160 		INIT_LIST_HEAD(&cur->node);
161 		cur->func(fence, cur);
162 	}
163 
164 	return 0;
165 }
166 
167 int
168 dma_fence_signal(struct dma_fence *fence)
169 {
170 	int r;
171 
172 	if (fence == NULL)
173 		return -EINVAL;
174 
175 	lockmgr(fence->lock, LK_EXCLUSIVE);
176 	r = dma_fence_signal_locked(fence);
177 	lockmgr(fence->lock, LK_RELEASE);
178 
179 	return r;
180 }
181 
182 void
183 dma_fence_enable_sw_signaling(struct dma_fence *fence)
184 {
185 	if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
186 	    !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
187 	    fence->ops->enable_signaling) {
188 		lockmgr(fence->lock, LK_EXCLUSIVE);
189 		if (!fence->ops->enable_signaling(fence))
190 			dma_fence_signal_locked(fence);
191 		lockmgr(fence->lock, LK_RELEASE);
192 	}
193 }
194 
195 int
196 dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
197     dma_fence_func_t func)
198 {
199 	int ret = 0;
200 	bool was_set;
201 
202 	if (WARN_ON(!fence || !func))
203 		return -EINVAL;
204 
205 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
206 		INIT_LIST_HEAD(&cb->node);
207 		return -ENOENT;
208 	}
209 
210 	lockmgr(fence->lock, LK_EXCLUSIVE);
211 
212 	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
213 
214 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
215 		ret = -ENOENT;
216 	else if (!was_set && fence->ops->enable_signaling) {
217 		if (!fence->ops->enable_signaling(fence)) {
218 			dma_fence_signal_locked(fence);
219 			ret = -ENOENT;
220 		}
221 	}
222 
223 	if (!ret) {
224 		cb->func = func;
225 		list_add_tail(&cb->node, &fence->cb_list);
226 	} else
227 		INIT_LIST_HEAD(&cb->node);
228 	lockmgr(fence->lock, LK_RELEASE);
229 
230 	return ret;
231 }
232 
233 bool
234 dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
235 {
236 	bool ret;
237 
238 	lockmgr(fence->lock, LK_EXCLUSIVE);
239 
240 	ret = !list_empty(&cb->node);
241 	if (ret)
242 		list_del_init(&cb->node);
243 
244 	lockmgr(fence->lock, LK_RELEASE);
245 
246 	return ret;
247 }
248 
249 void
250 dma_fence_free(struct dma_fence *fence)
251 {
252 	kfree(fence);
253 }
254