xref: /dragonfly/sys/dev/drm/linux_fence.c (revision 655933d6)
1 /*
2  * Copyright (c) 2019-2020 Jonathan Gray <jsg@openbsd.org>
3  * Copyright (c) 2020 François Tigeot <ftigeot@wolfpond.org>
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 
25 #include <linux/slab.h>
26 #include <linux/dma-fence.h>
27 
28 void
29 dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
30     spinlock_t *lock, u64 context, unsigned seqno)
31 {
32 	fence->ops = ops;
33 	fence->lock = lock;
34 	fence->context = context;
35 	fence->seqno = seqno;
36 	fence->flags = 0;
37 	fence->error = 0;
38 	kref_init(&fence->refcount);
39 	INIT_LIST_HEAD(&fence->cb_list);
40 }
41 
42 void
43 dma_fence_release(struct kref *ref)
44 {
45 	struct dma_fence *fence = container_of(ref, struct dma_fence, refcount);
46 
47 	if (fence->ops && fence->ops->release)
48 		fence->ops->release(fence);
49 	else
50 		kfree(fence);
51 }
52 
53 long
54 dma_fence_wait_timeout(struct dma_fence *fence, bool intr, long timeout)
55 {
56 	if (timeout < 0)
57 		return -EINVAL;
58 
59 	if (fence->ops->wait)
60 		return fence->ops->wait(fence, intr, timeout);
61 	else
62 		return dma_fence_default_wait(fence, intr, timeout);
63 }
64 
65 static atomic64_t drm_fence_context_count = ATOMIC_INIT(1);
66 
67 u64
68 dma_fence_context_alloc(unsigned num)
69 {
70 	return atomic64_add_return(num, &drm_fence_context_count) - num;
71 }
72 
73 struct default_wait_cb {
74 	struct dma_fence_cb base;
75 	struct task_struct *task;
76 };
77 
78 static void
79 dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
80 {
81 	struct default_wait_cb *wait =
82 		container_of(cb, struct default_wait_cb, base);
83 
84 	wake_up_process(wait->task);
85 }
86 
87 long
88 dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
89 {
90 	long ret = timeout ? timeout : 1;
91 	unsigned long end;
92 	int err;
93 	struct default_wait_cb cb;
94 	bool was_set;
95 
96 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
97 		return ret;
98 
99 	lockmgr(fence->lock, LK_EXCLUSIVE);
100 
101 	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
102 	    &fence->flags);
103 
104 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
105 		goto out;
106 
107 	if (!was_set && fence->ops->enable_signaling) {
108 		if (!fence->ops->enable_signaling(fence)) {
109 			dma_fence_signal_locked(fence);
110 			goto out;
111 		}
112 	}
113 
114 	if (timeout == 0) {
115 		ret = 0;
116 		goto out;
117 	}
118 
119 	cb.base.func = dma_fence_default_wait_cb;
120 	cb.task = current;
121 	list_add(&cb.base.node, &fence->cb_list);
122 
123 	end = jiffies + timeout;
124 	for (ret = timeout; ret > 0; ret = MAX(0, end - jiffies)) {
125 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
126 			break;
127 		}
128 		if (intr) {
129 			__set_current_state(TASK_INTERRUPTIBLE);
130 		}
131 		else {
132 			__set_current_state(TASK_UNINTERRUPTIBLE);
133 		}
134 		/* wake_up_process() directly uses task_struct pointers as sleep identifiers */
135 		err = lksleep(current, fence->lock, intr ? PCATCH : 0, "dmafence", ret);
136 		if (err == EINTR || err == ERESTART) {
137 			ret = -ERESTARTSYS;
138 			break;
139 		}
140 	}
141 
142 	if (!list_empty(&cb.base.node))
143 		list_del(&cb.base.node);
144 	__set_current_state(TASK_RUNNING);
145 out:
146 	lockmgr(fence->lock, LK_RELEASE);
147 	return ret;
148 }
149 
150 static bool
151 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
152     uint32_t *idx)
153 {
154 	int i;
155 
156 	for (i = 0; i < count; ++i) {
157 		struct dma_fence *fence = fences[i];
158 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
159 			if (idx)
160 				*idx = i;
161 			return true;
162 		}
163 	}
164 	return false;
165 }
166 
167 long
168 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
169     bool intr, long timeout, uint32_t *idx)
170 {
171 	struct default_wait_cb *cb;
172 	long ret = timeout;
173 	unsigned long end;
174 	int i, err;
175 
176 	if (timeout == 0) {
177 		for (i = 0; i < count; i++) {
178 			if (dma_fence_is_signaled(fences[i])) {
179 				if (idx)
180 					*idx = i;
181 				return 1;
182 			}
183 		}
184 		return 0;
185 	}
186 
187 	cb = kcalloc(count, sizeof(struct default_wait_cb), GFP_KERNEL);
188 	if (cb == NULL)
189 		return -ENOMEM;
190 
191 	for (i = 0; i < count; i++) {
192 		struct dma_fence *fence = fences[i];
193 		cb[i].task = current;
194 		if (dma_fence_add_callback(fence, &cb[i].base,
195 		    dma_fence_default_wait_cb)) {
196 			if (idx)
197 				*idx = i;
198 			goto cb_cleanup;
199 		}
200 	}
201 
202 	end = jiffies + timeout;
203 	for (ret = timeout; ret > 0; ret = MAX(0, end - jiffies)) {
204 		if (dma_fence_test_signaled_any(fences, count, idx))
205 			break;
206 		err = tsleep(current, intr ? PCATCH : 0, "dfwat", ret);
207 		if (err == EINTR || err == ERESTART) {
208 			ret = -ERESTARTSYS;
209 			break;
210 		}
211 	}
212 
213 cb_cleanup:
214 	while (i-- > 0)
215 		dma_fence_remove_callback(fences[i], &cb[i].base);
216 	kfree(cb);
217 	return ret;
218 }
219 
220 int
221 dma_fence_signal_locked(struct dma_fence *fence)
222 {
223 	struct dma_fence_cb *cur, *tmp;
224 	struct list_head cb_list;
225 
226 	if (fence == NULL)
227 		return -EINVAL;
228 
229 	if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
230 		return -EINVAL;
231 
232 	list_replace(&fence->cb_list, &cb_list);
233 
234 	fence->timestamp = ktime_get();
235 	set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
236 
237 	list_for_each_entry_safe(cur, tmp, &cb_list, node) {
238 		INIT_LIST_HEAD(&cur->node);
239 		cur->func(fence, cur);
240 	}
241 
242 	return 0;
243 }
244 
245 int
246 dma_fence_signal(struct dma_fence *fence)
247 {
248 	int r;
249 
250 	if (fence == NULL)
251 		return -EINVAL;
252 
253 	lockmgr(fence->lock, LK_EXCLUSIVE);
254 	r = dma_fence_signal_locked(fence);
255 	lockmgr(fence->lock, LK_RELEASE);
256 
257 	return r;
258 }
259 
260 void
261 dma_fence_enable_sw_signaling(struct dma_fence *fence)
262 {
263 	if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
264 	    !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
265 	    fence->ops->enable_signaling) {
266 		lockmgr(fence->lock, LK_EXCLUSIVE);
267 		if (!fence->ops->enable_signaling(fence))
268 			dma_fence_signal_locked(fence);
269 		lockmgr(fence->lock, LK_RELEASE);
270 	}
271 }
272 
273 int
274 dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
275     dma_fence_func_t func)
276 {
277 	int ret = 0;
278 	bool was_set;
279 
280 	if (WARN_ON(!fence || !func))
281 		return -EINVAL;
282 
283 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
284 		INIT_LIST_HEAD(&cb->node);
285 		return -ENOENT;
286 	}
287 
288 	lockmgr(fence->lock, LK_EXCLUSIVE);
289 
290 	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
291 
292 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
293 		ret = -ENOENT;
294 	else if (!was_set && fence->ops->enable_signaling) {
295 		if (!fence->ops->enable_signaling(fence)) {
296 			dma_fence_signal_locked(fence);
297 			ret = -ENOENT;
298 		}
299 	}
300 
301 	if (!ret) {
302 		cb->func = func;
303 		list_add_tail(&cb->node, &fence->cb_list);
304 	} else
305 		INIT_LIST_HEAD(&cb->node);
306 	lockmgr(fence->lock, LK_RELEASE);
307 
308 	return ret;
309 }
310 
311 bool
312 dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
313 {
314 	bool ret;
315 
316 	lockmgr(fence->lock, LK_EXCLUSIVE);
317 
318 	ret = !list_empty(&cb->node);
319 	if (ret)
320 		list_del_init(&cb->node);
321 
322 	lockmgr(fence->lock, LK_RELEASE);
323 
324 	return ret;
325 }
326 
327 void
328 dma_fence_free(struct dma_fence *fence)
329 {
330 	kfree(fence);
331 }
332