xref: /dragonfly/sys/dev/drm/linux_fence.c (revision 65030a6a)
1 /*
2  * Copyright (c) 2019 Jonathan Gray <jsg@openbsd.org>
3  * Copyright (c) 2020 François Tigeot <ftigeot@wolfpond.org>
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 
25 #include <linux/slab.h>
26 #include <linux/dma-fence.h>
27 
28 void
29 dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
30     spinlock_t *lock, u64 context, unsigned seqno)
31 {
32 	fence->ops = ops;
33 	fence->lock = lock;
34 	fence->context = context;
35 	fence->seqno = seqno;
36 	fence->flags = 0;
37 	fence->error = 0;
38 	kref_init(&fence->refcount);
39 	INIT_LIST_HEAD(&fence->cb_list);
40 }
41 
42 void
43 dma_fence_release(struct kref *ref)
44 {
45 	struct dma_fence *fence = container_of(ref, struct dma_fence, refcount);
46 
47 	if (fence->ops && fence->ops->release)
48 		fence->ops->release(fence);
49 	else
50 		kfree(fence);
51 }
52 
53 long
54 dma_fence_wait_timeout(struct dma_fence *fence, bool intr, long timeout)
55 {
56 	if (timeout < 0)
57 		return -EINVAL;
58 
59 	if (fence->ops->wait)
60 		return fence->ops->wait(fence, intr, timeout);
61 	else
62 		return dma_fence_default_wait(fence, intr, timeout);
63 }
64 
65 u64
66 dma_fence_context_alloc(unsigned n)
67 {
68 	static atomic64_t next_context = { 0 };
69 
70 	return atomic64_add_return(n, &next_context) - n;
71 }
72 
73 struct default_wait_cb {
74 	struct dma_fence_cb base;
75 	struct task_struct *task;
76 };
77 
78 static void
79 dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
80 {
81 	struct default_wait_cb *wait =
82 		container_of(cb, struct default_wait_cb, base);
83 
84 	wake_up_process(wait->task);
85 }
86 
87 long
88 dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
89 {
90 	long ret = timeout ? timeout : 1;
91 	int err;
92 	struct default_wait_cb cb;
93 	bool was_set;
94 
95 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
96 		return ret;
97 
98 	if (intr && signal_pending(current))
99 		return -ERESTARTSYS;
100 
101 	lockmgr(fence->lock, LK_EXCLUSIVE);
102 
103 	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
104 	    &fence->flags);
105 
106 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
107 		goto out;
108 
109 	if (!was_set && fence->ops->enable_signaling) {
110 		if (!fence->ops->enable_signaling(fence)) {
111 			dma_fence_signal_locked(fence);
112 			goto out;
113 		}
114 	}
115 
116 	cb.base.func = dma_fence_default_wait_cb;
117 	cb.task = current;
118 	list_add(&cb.base.node, &fence->cb_list);
119 
120 	if (timeout <= 0)
121 		timeout = 1;
122 
123 	while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
124 		if (intr) {
125 			__set_current_state(TASK_INTERRUPTIBLE);
126 		} else {
127 			__set_current_state(TASK_UNINTERRUPTIBLE);
128 		}
129 		/* wake_up_process() directly uses task_struct pointers as sleep identifiers */
130 		err = lksleep(current, fence->lock, intr ? PCATCH : 0, "dmafence", timeout);
131 		if (err == EINTR || err == ERESTART) {
132 			ret = -ERESTARTSYS;
133 			break;
134 		} else if (err == EWOULDBLOCK) {
135 			ret = 0;
136 			break;
137 		}
138 	}
139 
140 	if (!list_empty(&cb.base.node))
141 		list_del(&cb.base.node);
142 	__set_current_state(TASK_RUNNING);
143 
144 out:
145 	lockmgr(fence->lock, LK_RELEASE);
146 
147 	return ret;
148 }
149 
150 int
151 dma_fence_signal_locked(struct dma_fence *fence)
152 {
153 	struct dma_fence_cb *cur, *tmp;
154 
155 	if (fence == NULL)
156 		return -EINVAL;
157 
158 	if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
159 		return -EINVAL;
160 
161 	list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
162 		list_del_init(&cur->node);
163 		cur->func(fence, cur);
164 	}
165 
166 	return 0;
167 }
168 
169 int
170 dma_fence_signal(struct dma_fence *fence)
171 {
172 	if (fence == NULL)
173 		return -EINVAL;
174 
175 	if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
176 		return -EINVAL;
177 
178 	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
179 		struct dma_fence_cb *cur, *tmp;
180 
181 		lockmgr(fence->lock, LK_EXCLUSIVE);
182 		list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
183 			list_del_init(&cur->node);
184 			cur->func(fence, cur);
185 		}
186 		lockmgr(fence->lock, LK_RELEASE);
187 	}
188 
189 	return 0;
190 }
191 
192 void
193 dma_fence_enable_sw_signaling(struct dma_fence *fence)
194 {
195 	if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
196 	    !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
197 	    fence->ops->enable_signaling) {
198 		lockmgr(fence->lock, LK_EXCLUSIVE);
199 		if (!fence->ops->enable_signaling(fence))
200 			dma_fence_signal_locked(fence);
201 		lockmgr(fence->lock, LK_RELEASE);
202 	}
203 }
204 
205 int
206 dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
207     dma_fence_func_t func)
208 {
209 	int ret = 0;
210 	bool was_set;
211 
212 	if (WARN_ON(!fence || !func))
213 		return -EINVAL;
214 
215 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
216 		INIT_LIST_HEAD(&cb->node);
217 		return -ENOENT;
218 	}
219 
220 	lockmgr(fence->lock, LK_EXCLUSIVE);
221 
222 	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
223 
224 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
225 		ret = -ENOENT;
226 	else if (!was_set && fence->ops->enable_signaling) {
227 		if (!fence->ops->enable_signaling(fence)) {
228 			dma_fence_signal_locked(fence);
229 			ret = -ENOENT;
230 		}
231 	}
232 
233 	if (!ret) {
234 		cb->func = func;
235 		list_add_tail(&cb->node, &fence->cb_list);
236 	} else
237 		INIT_LIST_HEAD(&cb->node);
238 	lockmgr(fence->lock, LK_RELEASE);
239 
240 	return ret;
241 }
242 
243 bool
244 dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
245 {
246 	bool ret;
247 
248 	lockmgr(fence->lock, LK_EXCLUSIVE);
249 
250 	ret = !list_empty(&cb->node);
251 	if (ret)
252 		list_del_init(&cb->node);
253 
254 	lockmgr(fence->lock, LK_RELEASE);
255 
256 	return ret;
257 }
258