1 /*
2  * Header file for reservations for dma-buf and ttm
3  *
4  * Copyright(C) 2011 Linaro Limited. All rights reserved.
5  * Copyright (C) 2012-2013 Canonical Ltd
6  * Copyright (C) 2012 Texas Instruments
7  *
8  * Authors:
9  * Rob Clark <robdclark@gmail.com>
10  * Maarten Lankhorst <maarten.lankhorst@canonical.com>
11  * Thomas Hellstrom <thellstrom-at-vmware-dot-com>
12  *
13  * Based on bo.c which bears the following copyright notice,
14  * but is dual licensed:
15  *
16  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
17  * All Rights Reserved.
18  *
19  * Permission is hereby granted, free of charge, to any person obtaining a
20  * copy of this software and associated documentation files (the
21  * "Software"), to deal in the Software without restriction, including
22  * without limitation the rights to use, copy, modify, merge, publish,
23  * distribute, sub license, and/or sell copies of the Software, and to
24  * permit persons to whom the Software is furnished to do so, subject to
25  * the following conditions:
26  *
27  * The above copyright notice and this permission notice (including the
28  * next paragraph) shall be included in all copies or substantial portions
29  * of the Software.
30  *
31  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
32  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
34  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
35  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
36  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
37  * USE OR OTHER DEALINGS IN THE SOFTWARE.
38  */
39 #ifndef _LINUX_RESERVATION_H
40 #define _LINUX_RESERVATION_H
41 
42 #include <linux/ww_mutex.h>
43 #include <linux/dma-fence.h>
44 #include <linux/slab.h>
45 #include <linux/seqlock.h>
46 #include <linux/rcupdate.h>
47 
48 extern struct ww_class reservation_ww_class;
49 
50 /**
51  * struct dma_resv_list - a list of shared fences
52  * @rcu: for internal use
53  * @shared_count: table of shared fences
54  * @shared_max: for growing shared fence table
55  * @shared: shared fence table
56  */
57 struct dma_resv_list {
58 	struct rcu_head rcu;
59 	u32 shared_count, shared_max;
60 	struct dma_fence __rcu *shared[];
61 };
62 
63 /**
64  * struct dma_resv - a reservation object manages fences for a buffer
65  * @lock: update side lock
66  * @seq: sequence count for managing RCU read-side synchronization
67  * @fence_excl: the exclusive fence, if there is one currently
68  * @fence: list of current shared fences
69  */
70 struct dma_resv {
71 	struct ww_mutex lock;
72 	seqcount_ww_mutex_t seq;
73 
74 	struct dma_fence __rcu *fence_excl;
75 	struct dma_resv_list __rcu *fence;
76 };
77 
78 #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
79 #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
80 
81 /**
82  * dma_resv_get_list - get the reservation object's
83  * shared fence list, with update-side lock held
84  * @obj: the reservation object
85  *
86  * Returns the shared fence list.  Does NOT take references to
87  * the fence.  The obj->lock must be held.
88  */
dma_resv_get_list(struct dma_resv * obj)89 static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj)
90 {
91 	return rcu_dereference_protected(obj->fence,
92 					 dma_resv_held(obj));
93 }
94 
95 /**
96  * dma_resv_lock - lock the reservation object
97  * @obj: the reservation object
98  * @ctx: the locking context
99  *
100  * Locks the reservation object for exclusive access and modification. Note,
101  * that the lock is only against other writers, readers will run concurrently
102  * with a writer under RCU. The seqlock is used to notify readers if they
103  * overlap with a writer.
104  *
105  * As the reservation object may be locked by multiple parties in an
106  * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
107  * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
108  * object may be locked by itself by passing NULL as @ctx.
109  */
dma_resv_lock(struct dma_resv * obj,struct ww_acquire_ctx * ctx)110 static inline int dma_resv_lock(struct dma_resv *obj,
111 				struct ww_acquire_ctx *ctx)
112 {
113 	return ww_mutex_lock(&obj->lock, ctx);
114 }
115 
116 /**
117  * dma_resv_lock_interruptible - lock the reservation object
118  * @obj: the reservation object
119  * @ctx: the locking context
120  *
121  * Locks the reservation object interruptible for exclusive access and
122  * modification. Note, that the lock is only against other writers, readers
123  * will run concurrently with a writer under RCU. The seqlock is used to
124  * notify readers if they overlap with a writer.
125  *
126  * As the reservation object may be locked by multiple parties in an
127  * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
128  * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
129  * object may be locked by itself by passing NULL as @ctx.
130  */
dma_resv_lock_interruptible(struct dma_resv * obj,struct ww_acquire_ctx * ctx)131 static inline int dma_resv_lock_interruptible(struct dma_resv *obj,
132 					      struct ww_acquire_ctx *ctx)
133 {
134 	return ww_mutex_lock_interruptible(&obj->lock, ctx);
135 }
136 
137 /**
138  * dma_resv_lock_slow - slowpath lock the reservation object
139  * @obj: the reservation object
140  * @ctx: the locking context
141  *
142  * Acquires the reservation object after a die case. This function
143  * will sleep until the lock becomes available. See dma_resv_lock() as
144  * well.
145  */
dma_resv_lock_slow(struct dma_resv * obj,struct ww_acquire_ctx * ctx)146 static inline void dma_resv_lock_slow(struct dma_resv *obj,
147 				      struct ww_acquire_ctx *ctx)
148 {
149 	ww_mutex_lock_slow(&obj->lock, ctx);
150 }
151 
152 /**
153  * dma_resv_lock_slow_interruptible - slowpath lock the reservation
154  * object, interruptible
155  * @obj: the reservation object
156  * @ctx: the locking context
157  *
158  * Acquires the reservation object interruptible after a die case. This function
159  * will sleep until the lock becomes available. See
160  * dma_resv_lock_interruptible() as well.
161  */
dma_resv_lock_slow_interruptible(struct dma_resv * obj,struct ww_acquire_ctx * ctx)162 static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj,
163 						   struct ww_acquire_ctx *ctx)
164 {
165 	return ww_mutex_lock_slow_interruptible(&obj->lock, ctx);
166 }
167 
168 /**
169  * dma_resv_trylock - trylock the reservation object
170  * @obj: the reservation object
171  *
172  * Tries to lock the reservation object for exclusive access and modification.
173  * Note, that the lock is only against other writers, readers will run
174  * concurrently with a writer under RCU. The seqlock is used to notify readers
175  * if they overlap with a writer.
176  *
177  * Also note that since no context is provided, no deadlock protection is
178  * possible.
179  *
180  * Returns true if the lock was acquired, false otherwise.
181  */
dma_resv_trylock(struct dma_resv * obj)182 static inline bool __must_check dma_resv_trylock(struct dma_resv *obj)
183 {
184 	return ww_mutex_trylock(&obj->lock);
185 }
186 
187 /**
188  * dma_resv_is_locked - is the reservation object locked
189  * @obj: the reservation object
190  *
191  * Returns true if the mutex is locked, false if unlocked.
192  */
dma_resv_is_locked(struct dma_resv * obj)193 static inline bool dma_resv_is_locked(struct dma_resv *obj)
194 {
195 	return ww_mutex_is_locked(&obj->lock);
196 }
197 
198 /**
199  * dma_resv_locking_ctx - returns the context used to lock the object
200  * @obj: the reservation object
201  *
202  * Returns the context used to lock a reservation object or NULL if no context
203  * was used or the object is not locked at all.
204  */
dma_resv_locking_ctx(struct dma_resv * obj)205 static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj)
206 {
207 	return READ_ONCE(obj->lock.ctx);
208 }
209 
210 /**
211  * dma_resv_unlock - unlock the reservation object
212  * @obj: the reservation object
213  *
214  * Unlocks the reservation object following exclusive access.
215  */
dma_resv_unlock(struct dma_resv * obj)216 static inline void dma_resv_unlock(struct dma_resv *obj)
217 {
218 #ifdef CONFIG_DEBUG_MUTEXES
219 	/* Test shared fence slot reservation */
220 	if (rcu_access_pointer(obj->fence)) {
221 		struct dma_resv_list *fence = dma_resv_get_list(obj);
222 
223 		fence->shared_max = fence->shared_count;
224 	}
225 #endif
226 	ww_mutex_unlock(&obj->lock);
227 }
228 
229 /**
230  * dma_resv_get_excl - get the reservation object's
231  * exclusive fence, with update-side lock held
232  * @obj: the reservation object
233  *
234  * Returns the exclusive fence (if any).  Does NOT take a
235  * reference. Writers must hold obj->lock, readers may only
236  * hold a RCU read side lock.
237  *
238  * RETURNS
239  * The exclusive fence or NULL
240  */
241 static inline struct dma_fence *
dma_resv_get_excl(struct dma_resv * obj)242 dma_resv_get_excl(struct dma_resv *obj)
243 {
244 	return rcu_dereference_protected(obj->fence_excl,
245 					 dma_resv_held(obj));
246 }
247 
248 /**
249  * dma_resv_get_excl_rcu - get the reservation object's
250  * exclusive fence, without lock held.
251  * @obj: the reservation object
252  *
253  * If there is an exclusive fence, this atomically increments it's
254  * reference count and returns it.
255  *
256  * RETURNS
257  * The exclusive fence or NULL if none
258  */
259 static inline struct dma_fence *
dma_resv_get_excl_rcu(struct dma_resv * obj)260 dma_resv_get_excl_rcu(struct dma_resv *obj)
261 {
262 	struct dma_fence *fence;
263 
264 	if (!rcu_access_pointer(obj->fence_excl))
265 		return NULL;
266 
267 	rcu_read_lock();
268 	fence = dma_fence_get_rcu_safe(&obj->fence_excl);
269 	rcu_read_unlock();
270 
271 	return fence;
272 }
273 
274 void dma_resv_init(struct dma_resv *obj);
275 void dma_resv_fini(struct dma_resv *obj);
276 int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
277 void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
278 
279 void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
280 
281 int dma_resv_get_fences_rcu(struct dma_resv *obj,
282 			    struct dma_fence **pfence_excl,
283 			    unsigned *pshared_count,
284 			    struct dma_fence ***pshared);
285 
286 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
287 
288 long dma_resv_wait_timeout_rcu(struct dma_resv *obj, bool wait_all, bool intr,
289 			       unsigned long timeout);
290 
291 bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all);
292 
293 #endif /* _LINUX_RESERVATION_H */
294