xref: /openbsd/sys/dev/pci/drm/include/linux/dma-fence.h (revision 771fbea0)
1 /* Public domain. */
2 
3 #ifndef _LINUX_DMA_FENCE_H
4 #define _LINUX_DMA_FENCE_H
5 
6 #include <sys/types.h>
7 #include <sys/malloc.h>
8 #include <sys/mutex.h>
9 #include <linux/kref.h>
10 #include <linux/list.h>
11 #include <linux/bug.h>
12 #include <linux/sched.h>
13 #include <linux/rcupdate.h>
14 
15 #define DMA_FENCE_TRACE(fence, fmt, args...) do {} while(0)
16 
17 struct dma_fence {
18 	struct kref refcount;
19 	const struct dma_fence_ops *ops;
20 	unsigned long flags;
21 	uint64_t context;
22 	uint64_t seqno;
23 	struct mutex *lock;
24 	struct list_head cb_list;
25 	int error;
26 	struct rcu_head rcu;
27 	ktime_t timestamp;
28 };
29 
30 enum dma_fence_flag_bits {
31 	DMA_FENCE_FLAG_SIGNALED_BIT,
32 	DMA_FENCE_FLAG_TIMESTAMP_BIT,
33 	DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
34 	DMA_FENCE_FLAG_USER_BITS,
35 };
36 
37 struct dma_fence_ops {
38 	const char * (*get_driver_name)(struct dma_fence *);
39 	const char * (*get_timeline_name)(struct dma_fence *);
40 	bool (*enable_signaling)(struct dma_fence *);
41 	bool (*signaled)(struct dma_fence *);
42 	long (*wait)(struct dma_fence *, bool, long);
43 	void (*release)(struct dma_fence *);
44 };
45 
46 struct dma_fence_cb;
47 typedef void (*dma_fence_func_t)(struct dma_fence *fence, struct dma_fence_cb *cb);
48 
49 struct dma_fence_cb {
50 	struct list_head node;
51 	dma_fence_func_t func;
52 };
53 
54 uint64_t dma_fence_context_alloc(unsigned int);
55 
56 static inline struct dma_fence *
57 dma_fence_get(struct dma_fence *fence)
58 {
59 	if (fence)
60 		kref_get(&fence->refcount);
61 	return fence;
62 }
63 
64 static inline struct dma_fence *
65 dma_fence_get_rcu(struct dma_fence *fence)
66 {
67 	if (fence)
68 		kref_get(&fence->refcount);
69 	return fence;
70 }
71 
72 static inline struct dma_fence *
73 dma_fence_get_rcu_safe(struct dma_fence **dfp)
74 {
75 	struct dma_fence *fence;
76 	if (dfp == NULL)
77 		return NULL;
78 	fence = *dfp;
79 	if (fence)
80 		kref_get(&fence->refcount);
81 	return fence;
82 }
83 
84 static inline void
85 dma_fence_release(struct kref *ref)
86 {
87 	struct dma_fence *fence = container_of(ref, struct dma_fence, refcount);
88 	if (fence->ops && fence->ops->release)
89 		fence->ops->release(fence);
90 	else
91 		free(fence, M_DRM, 0);
92 }
93 
94 static inline void
95 dma_fence_free(struct dma_fence *fence)
96 {
97 	free(fence, M_DRM, 0);
98 }
99 
100 static inline void
101 dma_fence_put(struct dma_fence *fence)
102 {
103 	if (fence)
104 		kref_put(&fence->refcount, dma_fence_release);
105 }
106 
107 static inline int
108 dma_fence_signal_locked(struct dma_fence *fence)
109 {
110 	struct dma_fence_cb *cur, *tmp;
111 	struct list_head cb_list;
112 
113 	if (fence == NULL)
114 		return -EINVAL;
115 
116 	if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
117 		return -EINVAL;
118 
119 	list_replace(&fence->cb_list, &cb_list);
120 
121 	fence->timestamp = ktime_get();
122 	set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
123 
124 	list_for_each_entry_safe(cur, tmp, &cb_list, node) {
125 		INIT_LIST_HEAD(&cur->node);
126 		cur->func(fence, cur);
127 	}
128 
129 	return 0;
130 }
131 
132 static inline int
133 dma_fence_signal(struct dma_fence *fence)
134 {
135 	int r;
136 
137 	if (fence == NULL)
138 		return -EINVAL;
139 
140 	mtx_enter(fence->lock);
141 	r = dma_fence_signal_locked(fence);
142 	mtx_leave(fence->lock);
143 
144 	return r;
145 }
146 
147 static inline bool
148 dma_fence_is_signaled(struct dma_fence *fence)
149 {
150 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
151 		return true;
152 
153 	if (fence->ops->signaled && fence->ops->signaled(fence)) {
154 		dma_fence_signal(fence);
155 		return true;
156 	}
157 
158 	return false;
159 }
160 
161 static inline bool
162 dma_fence_is_signaled_locked(struct dma_fence *fence)
163 {
164 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
165 		return true;
166 
167 	if (fence->ops->signaled && fence->ops->signaled(fence)) {
168 		dma_fence_signal_locked(fence);
169 		return true;
170 	}
171 
172 	return false;
173 }
174 
175 long dma_fence_default_wait(struct dma_fence *, bool, long);
176 
177 static inline long
178 dma_fence_wait_timeout(struct dma_fence *fence, bool intr, long timeout)
179 {
180 	if (timeout < 0)
181 		return -EINVAL;
182 
183 	if (fence->ops->wait)
184 		return fence->ops->wait(fence, intr, timeout);
185 	else
186 		return dma_fence_default_wait(fence, intr, timeout);
187 }
188 
189 static inline long
190 dma_fence_wait(struct dma_fence *fence, bool intr)
191 {
192 	long ret;
193 
194 	ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
195 	if (ret < 0)
196 		return ret;
197 
198 	return 0;
199 }
200 
201 static inline void
202 dma_fence_enable_sw_signaling(struct dma_fence *fence)
203 {
204 	if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
205 	    !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
206 	    fence->ops->enable_signaling) {
207 		mtx_enter(fence->lock);
208 		if (!fence->ops->enable_signaling(fence))
209 			dma_fence_signal_locked(fence);
210 		mtx_leave(fence->lock);
211 	}
212 }
213 
214 static inline void
215 dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
216     struct mutex *lock, uint64_t context, uint64_t seqno)
217 {
218 	fence->ops = ops;
219 	fence->lock = lock;
220 	fence->context = context;
221 	fence->seqno = seqno;
222 	fence->flags = 0;
223 	fence->error = 0;
224 	kref_init(&fence->refcount);
225 	INIT_LIST_HEAD(&fence->cb_list);
226 }
227 
228 static inline int
229 dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
230     dma_fence_func_t func)
231 {
232 	int ret = 0;
233 	bool was_set;
234 
235 	if (WARN_ON(!fence || !func))
236 		return -EINVAL;
237 
238 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
239 		INIT_LIST_HEAD(&cb->node);
240 		return -ENOENT;
241 	}
242 
243 	mtx_enter(fence->lock);
244 
245 	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
246 
247 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
248 		ret = -ENOENT;
249 	else if (!was_set && fence->ops->enable_signaling) {
250 		if (!fence->ops->enable_signaling(fence)) {
251 			dma_fence_signal_locked(fence);
252 			ret = -ENOENT;
253 		}
254 	}
255 
256 	if (!ret) {
257 		cb->func = func;
258 		list_add_tail(&cb->node, &fence->cb_list);
259 	} else
260 		INIT_LIST_HEAD(&cb->node);
261 	mtx_leave(fence->lock);
262 
263 	return ret;
264 }
265 
266 static inline bool
267 dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
268 {
269 	bool ret;
270 
271 	mtx_enter(fence->lock);
272 
273 	ret = !list_empty(&cb->node);
274 	if (ret)
275 		list_del_init(&cb->node);
276 
277 	mtx_leave(fence->lock);
278 
279 	return ret;
280 }
281 
282 static inline bool
283 dma_fence_is_later(struct dma_fence *a, struct dma_fence *b)
284 {
285 	return (a->seqno > b->seqno);
286 }
287 
288 static inline void
289 dma_fence_set_error(struct dma_fence *fence, int error)
290 {
291 	fence->error = error;
292 }
293 
294 long dma_fence_wait_any_timeout(struct dma_fence **, uint32_t, bool, long,
295     uint32_t *);
296 
297 struct dma_fence *dma_fence_get_stub(void);
298 
299 #endif
300