xref: /dragonfly/sys/dev/drm/include/linux/dma-fence.h (revision 527b525a)
1 /*
2  * Copyright (c) 2019 Jonathan Gray <jsg@openbsd.org>
3  * Copyright (c) 2020 François Tigeot <ftigeot@wolfpond.org>
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 
25 #ifndef _LINUX_DMA_FENCE_H_
26 #define _LINUX_DMA_FENCE_H_
27 
28 #include <linux/err.h>
29 #include <linux/wait.h>
30 #include <linux/list.h>
31 #include <linux/bitops.h>
32 #include <linux/kref.h>
33 #include <linux/sched.h>
34 #include <linux/printk.h>
35 #include <linux/rcupdate.h>
36 
37 #define DMA_FENCE_TRACE(fence, fmt, args...) do {} while(0)
38 
39 struct dma_fence_cb;
40 
41 struct dma_fence {
42 	struct kref refcount;
43 	struct lock *lock;
44 	const struct dma_fence_ops *ops;
45 	struct rcu_head rcu;
46 	struct list_head cb_list;
47 	u64 context;
48 	unsigned seqno;
49 	unsigned long flags;
50 	ktime_t timestamp;
51 	int error;
52 };
53 
54 enum dma_fence_flag_bits {
55 	DMA_FENCE_FLAG_SIGNALED_BIT,
56 	DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
57 	DMA_FENCE_FLAG_USER_BITS, /* must always be last member */
58 };
59 
60 typedef void (*dma_fence_func_t)(struct dma_fence *fence,
61 				 struct dma_fence_cb *cb);
62 
63 struct dma_fence_cb {
64 	struct list_head node;
65 	dma_fence_func_t func;
66 };
67 
68 struct dma_fence_ops {
69 	const char * (*get_driver_name)(struct dma_fence *fence);
70 	const char * (*get_timeline_name)(struct dma_fence *fence);
71 	bool (*enable_signaling)(struct dma_fence *fence);
72 	bool (*signaled)(struct dma_fence *fence);
73 	signed long (*wait)(struct dma_fence *fence,
74 			    bool intr, signed long timeout);
75 	void (*release)(struct dma_fence *fence);
76 
77 	int (*fill_driver_data)(struct dma_fence *fence, void *data, int size);
78 	void (*fence_value_str)(struct dma_fence *fence, char *str, int size);
79 	void (*timeline_value_str)(struct dma_fence *fence,
80 				   char *str, int size);
81 };
82 
83 void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
84 		    spinlock_t *lock, u64 context, unsigned seqno);
85 
86 void dma_fence_release(struct kref *kref);
87 
88 static inline struct dma_fence *
89 dma_fence_get(struct dma_fence *fence)
90 {
91 	if (fence)
92 		kref_get(&fence->refcount);
93 	return fence;
94 }
95 
96 static inline struct dma_fence *
97 dma_fence_get_rcu(struct dma_fence *fence)
98 {
99 	if (fence)
100 		kref_get(&fence->refcount);
101 	return fence;
102 }
103 
104 static inline void
105 dma_fence_put(struct dma_fence *fence)
106 {
107 	if (fence)
108 		kref_put(&fence->refcount, dma_fence_release);
109 }
110 
111 int dma_fence_signal(struct dma_fence *fence);
112 int dma_fence_signal_locked(struct dma_fence *fence);
113 
114 static inline bool
115 dma_fence_is_signaled(struct dma_fence *fence)
116 {
117 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
118 		return true;
119 
120 	if (fence->ops->signaled && fence->ops->signaled(fence)) {
121 		dma_fence_signal(fence);
122 		return true;
123 	}
124 
125 	return false;
126 }
127 
128 void dma_fence_enable_sw_signaling(struct dma_fence *fence);
129 
130 signed long dma_fence_default_wait(struct dma_fence *fence,
131 				   bool intr, signed long timeout);
132 signed long dma_fence_wait_timeout(struct dma_fence *,
133 				   bool intr, signed long timeout);
134 
135 static inline signed long
136 dma_fence_wait(struct dma_fence *fence, bool intr)
137 {
138 	signed long ret;
139 
140 	ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
141 
142 	if (ret < 0)
143 		return ret;
144 
145 	return 0;
146 }
147 
148 int dma_fence_add_callback(struct dma_fence *fence,
149 			   struct dma_fence_cb *cb,
150 			   dma_fence_func_t func);
151 
152 bool dma_fence_remove_callback(struct dma_fence *fence,
153 			       struct dma_fence_cb *cb);
154 
155 u64 dma_fence_context_alloc(unsigned num);
156 
157 static inline bool
158 dma_fence_is_later(struct dma_fence *a, struct dma_fence *b)
159 {
160 	return (a->seqno > b->seqno);
161 }
162 
163 static inline void
164 dma_fence_set_error(struct dma_fence *fence, int error)
165 {
166 	fence->error = error;
167 }
168 
169 static inline struct dma_fence *
170 dma_fence_get_rcu_safe(struct dma_fence **dfp)
171 {
172 	struct dma_fence *fence;
173 	if (dfp == NULL)
174 		return NULL;
175 	fence = *dfp;
176 	if (fence)
177 		kref_get(&fence->refcount);
178 	return fence;
179 }
180 
181 void dma_fence_free(struct dma_fence *fence);
182 
183 #endif	/* _LINUX_DMA_FENCE_H_ */
184