1 /*
2 * Header file for reservations for dma-buf and ttm
3 *
4 * Copyright(C) 2011 Linaro Limited. All rights reserved.
5 * Copyright (C) 2012-2013 Canonical Ltd
6 * Copyright (C) 2012 Texas Instruments
7 *
8 * Authors:
9 * Rob Clark <robdclark@gmail.com>
10 * Maarten Lankhorst <maarten.lankhorst@canonical.com>
11 * Thomas Hellstrom <thellstrom-at-vmware-dot-com>
12 *
13 * Based on bo.c which bears the following copyright notice,
14 * but is dual licensed:
15 *
16 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
17 * All Rights Reserved.
18 *
19 * Permission is hereby granted, free of charge, to any person obtaining a
20 * copy of this software and associated documentation files (the
21 * "Software"), to deal in the Software without restriction, including
22 * without limitation the rights to use, copy, modify, merge, publish,
23 * distribute, sub license, and/or sell copies of the Software, and to
24 * permit persons to whom the Software is furnished to do so, subject to
25 * the following conditions:
26 *
27 * The above copyright notice and this permission notice (including the
28 * next paragraph) shall be included in all copies or substantial portions
29 * of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
32 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
34 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
35 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
36 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
37 * USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39 #ifndef _LINUX_RESERVATION_H
40 #define _LINUX_RESERVATION_H
41
42 #include <linux/ww_mutex.h>
43 #include <linux/dma-fence.h>
44 #include <linux/slab.h>
45 #include <linux/seqlock.h>
46 #include <linux/rcupdate.h>
47
48 extern struct ww_class reservation_ww_class;
49
50 struct dma_resv_list;
51 struct seq_file;
52
53 /**
54 * enum dma_resv_usage - how the fences from a dma_resv obj are used
55 *
56 * This enum describes the different use cases for a dma_resv object and
57 * controls which fences are returned when queried.
58 *
59 * An important fact is that there is the order KERNEL<WRITE<READ<BOOKKEEP and
60 * when the dma_resv object is asked for fences for one use case the fences
61 * for the lower use case are returned as well.
62 *
63 * For example when asking for WRITE fences then the KERNEL fences are returned
64 * as well. Similar when asked for READ fences then both WRITE and KERNEL
65 * fences are returned as well.
66 *
67 * Already used fences can be promoted in the sense that a fence with
68 * DMA_RESV_USAGE_BOOKKEEP could become DMA_RESV_USAGE_READ by adding it again
69 * with this usage. But fences can never be degraded in the sense that a fence
70 * with DMA_RESV_USAGE_WRITE could become DMA_RESV_USAGE_READ.
71 */
72 enum dma_resv_usage {
73 /**
74 * @DMA_RESV_USAGE_KERNEL: For in kernel memory management only.
75 *
76 * This should only be used for things like copying or clearing memory
77 * with a DMA hardware engine for the purpose of kernel memory
78 * management.
79 *
80 * Drivers *always* must wait for those fences before accessing the
81 * resource protected by the dma_resv object. The only exception for
82 * that is when the resource is known to be locked down in place by
83 * pinning it previously.
84 */
85 DMA_RESV_USAGE_KERNEL,
86
87 /**
88 * @DMA_RESV_USAGE_WRITE: Implicit write synchronization.
89 *
90 * This should only be used for userspace command submissions which add
91 * an implicit write dependency.
92 */
93 DMA_RESV_USAGE_WRITE,
94
95 /**
96 * @DMA_RESV_USAGE_READ: Implicit read synchronization.
97 *
98 * This should only be used for userspace command submissions which add
99 * an implicit read dependency.
100 */
101 DMA_RESV_USAGE_READ,
102
103 /**
104 * @DMA_RESV_USAGE_BOOKKEEP: No implicit sync.
105 *
106 * This should be used by submissions which don't want to participate in
107 * any implicit synchronization.
108 *
109 * The most common case are preemption fences, page table updates, TLB
110 * flushes as well as explicit synced user submissions.
111 *
112 * Explicit synced user user submissions can be promoted to
113 * DMA_RESV_USAGE_READ or DMA_RESV_USAGE_WRITE as needed using
114 * dma_buf_import_sync_file() when implicit synchronization should
115 * become necessary after initial adding of the fence.
116 */
117 DMA_RESV_USAGE_BOOKKEEP
118 };
119
120 /**
121 * dma_resv_usage_rw - helper for implicit sync
122 * @write: true if we create a new implicit sync write
123 *
124 * This returns the implicit synchronization usage for write or read accesses,
125 * see enum dma_resv_usage and &dma_buf.resv.
126 */
dma_resv_usage_rw(bool write)127 static inline enum dma_resv_usage dma_resv_usage_rw(bool write)
128 {
129 /* This looks confusing at first sight, but is indeed correct.
130 *
131 * The rational is that new write operations needs to wait for the
132 * existing read and write operations to finish.
133 * But a new read operation only needs to wait for the existing write
134 * operations to finish.
135 */
136 return write ? DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE;
137 }
138
139 /**
140 * struct dma_resv - a reservation object manages fences for a buffer
141 *
142 * This is a container for dma_fence objects which needs to handle multiple use
143 * cases.
144 *
145 * One use is to synchronize cross-driver access to a struct dma_buf, either for
146 * dynamic buffer management or just to handle implicit synchronization between
147 * different users of the buffer in userspace. See &dma_buf.resv for a more
148 * in-depth discussion.
149 *
150 * The other major use is to manage access and locking within a driver in a
151 * buffer based memory manager. struct ttm_buffer_object is the canonical
152 * example here, since this is where reservation objects originated from. But
153 * use in drivers is spreading and some drivers also manage struct
154 * drm_gem_object with the same scheme.
155 */
156 struct dma_resv {
157 /**
158 * @lock:
159 *
160 * Update side lock. Don't use directly, instead use the wrapper
161 * functions like dma_resv_lock() and dma_resv_unlock().
162 *
163 * Drivers which use the reservation object to manage memory dynamically
164 * also use this lock to protect buffer object state like placement,
165 * allocation policies or throughout command submission.
166 */
167 struct ww_mutex lock;
168
169 /**
170 * @fences:
171 *
172 * Array of fences which where added to the dma_resv object
173 *
174 * A new fence is added by calling dma_resv_add_fence(). Since this
175 * often needs to be done past the point of no return in command
176 * submission it cannot fail, and therefore sufficient slots need to be
177 * reserved by calling dma_resv_reserve_fences().
178 */
179 struct dma_resv_list __rcu *fences;
180 };
181
182 /**
183 * struct dma_resv_iter - current position into the dma_resv fences
184 *
185 * Don't touch this directly in the driver, use the accessor function instead.
186 *
187 * IMPORTANT
188 *
189 * When using the lockless iterators like dma_resv_iter_next_unlocked() or
190 * dma_resv_for_each_fence_unlocked() beware that the iterator can be restarted.
191 * Code which accumulates statistics or similar needs to check for this with
192 * dma_resv_iter_is_restarted().
193 */
194 struct dma_resv_iter {
195 /** @obj: The dma_resv object we iterate over */
196 struct dma_resv *obj;
197
198 /** @usage: Return fences with this usage or lower. */
199 enum dma_resv_usage usage;
200
201 /** @fence: the currently handled fence */
202 struct dma_fence *fence;
203
204 /** @fence_usage: the usage of the current fence */
205 enum dma_resv_usage fence_usage;
206
207 /** @index: index into the shared fences */
208 unsigned int index;
209
210 /** @fences: the shared fences; private, *MUST* not dereference */
211 struct dma_resv_list *fences;
212
213 /** @num_fences: number of fences */
214 unsigned int num_fences;
215
216 /** @is_restarted: true if this is the first returned fence */
217 bool is_restarted;
218 };
219
220 struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor);
221 struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor);
222 struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor);
223 struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor);
224
225 /**
226 * dma_resv_iter_begin - initialize a dma_resv_iter object
227 * @cursor: The dma_resv_iter object to initialize
228 * @obj: The dma_resv object which we want to iterate over
229 * @usage: controls which fences to include, see enum dma_resv_usage.
230 */
dma_resv_iter_begin(struct dma_resv_iter * cursor,struct dma_resv * obj,enum dma_resv_usage usage)231 static inline void dma_resv_iter_begin(struct dma_resv_iter *cursor,
232 struct dma_resv *obj,
233 enum dma_resv_usage usage)
234 {
235 cursor->obj = obj;
236 cursor->usage = usage;
237 cursor->fence = NULL;
238 }
239
240 /**
241 * dma_resv_iter_end - cleanup a dma_resv_iter object
242 * @cursor: the dma_resv_iter object which should be cleaned up
243 *
244 * Make sure that the reference to the fence in the cursor is properly
245 * dropped.
246 */
dma_resv_iter_end(struct dma_resv_iter * cursor)247 static inline void dma_resv_iter_end(struct dma_resv_iter *cursor)
248 {
249 dma_fence_put(cursor->fence);
250 }
251
252 /**
253 * dma_resv_iter_usage - Return the usage of the current fence
254 * @cursor: the cursor of the current position
255 *
256 * Returns the usage of the currently processed fence.
257 */
258 static inline enum dma_resv_usage
dma_resv_iter_usage(struct dma_resv_iter * cursor)259 dma_resv_iter_usage(struct dma_resv_iter *cursor)
260 {
261 return cursor->fence_usage;
262 }
263
264 /**
265 * dma_resv_iter_is_restarted - test if this is the first fence after a restart
266 * @cursor: the cursor with the current position
267 *
268 * Return true if this is the first fence in an iteration after a restart.
269 */
dma_resv_iter_is_restarted(struct dma_resv_iter * cursor)270 static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor)
271 {
272 return cursor->is_restarted;
273 }
274
275 /**
276 * dma_resv_for_each_fence_unlocked - unlocked fence iterator
277 * @cursor: a struct dma_resv_iter pointer
278 * @fence: the current fence
279 *
280 * Iterate over the fences in a struct dma_resv object without holding the
281 * &dma_resv.lock and using RCU instead. The cursor needs to be initialized
282 * with dma_resv_iter_begin() and cleaned up with dma_resv_iter_end(). Inside
283 * the iterator a reference to the dma_fence is held and the RCU lock dropped.
284 *
285 * Beware that the iterator can be restarted when the struct dma_resv for
286 * @cursor is modified. Code which accumulates statistics or similar needs to
287 * check for this with dma_resv_iter_is_restarted(). For this reason prefer the
288 * lock iterator dma_resv_for_each_fence() whenever possible.
289 */
290 #define dma_resv_for_each_fence_unlocked(cursor, fence) \
291 for (fence = dma_resv_iter_first_unlocked(cursor); \
292 fence; fence = dma_resv_iter_next_unlocked(cursor))
293
294 /**
295 * dma_resv_for_each_fence - fence iterator
296 * @cursor: a struct dma_resv_iter pointer
297 * @obj: a dma_resv object pointer
298 * @usage: controls which fences to return
299 * @fence: the current fence
300 *
301 * Iterate over the fences in a struct dma_resv object while holding the
302 * &dma_resv.lock. @all_fences controls if the shared fences are returned as
303 * well. The cursor initialisation is part of the iterator and the fence stays
304 * valid as long as the lock is held and so no extra reference to the fence is
305 * taken.
306 */
307 #define dma_resv_for_each_fence(cursor, obj, usage, fence) \
308 for (dma_resv_iter_begin(cursor, obj, usage), \
309 fence = dma_resv_iter_first(cursor); fence; \
310 fence = dma_resv_iter_next(cursor))
311
312 #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
313 #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
314
315 #ifdef CONFIG_DEBUG_MUTEXES
316 void dma_resv_reset_max_fences(struct dma_resv *obj);
317 #else
dma_resv_reset_max_fences(struct dma_resv * obj)318 static inline void dma_resv_reset_max_fences(struct dma_resv *obj) {}
319 #endif
320
321 /**
322 * dma_resv_lock - lock the reservation object
323 * @obj: the reservation object
324 * @ctx: the locking context
325 *
326 * Locks the reservation object for exclusive access and modification. Note,
327 * that the lock is only against other writers, readers will run concurrently
328 * with a writer under RCU. The seqlock is used to notify readers if they
329 * overlap with a writer.
330 *
331 * As the reservation object may be locked by multiple parties in an
332 * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
333 * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
334 * object may be locked by itself by passing NULL as @ctx.
335 *
336 * When a die situation is indicated by returning -EDEADLK all locks held by
337 * @ctx must be unlocked and then dma_resv_lock_slow() called on @obj.
338 *
339 * Unlocked by calling dma_resv_unlock().
340 *
341 * See also dma_resv_lock_interruptible() for the interruptible variant.
342 */
dma_resv_lock(struct dma_resv * obj,struct ww_acquire_ctx * ctx)343 static inline int dma_resv_lock(struct dma_resv *obj,
344 struct ww_acquire_ctx *ctx)
345 {
346 return ww_mutex_lock(&obj->lock, ctx);
347 }
348
349 /**
350 * dma_resv_lock_interruptible - lock the reservation object
351 * @obj: the reservation object
352 * @ctx: the locking context
353 *
354 * Locks the reservation object interruptible for exclusive access and
355 * modification. Note, that the lock is only against other writers, readers
356 * will run concurrently with a writer under RCU. The seqlock is used to
357 * notify readers if they overlap with a writer.
358 *
359 * As the reservation object may be locked by multiple parties in an
360 * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
361 * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
362 * object may be locked by itself by passing NULL as @ctx.
363 *
364 * When a die situation is indicated by returning -EDEADLK all locks held by
365 * @ctx must be unlocked and then dma_resv_lock_slow_interruptible() called on
366 * @obj.
367 *
368 * Unlocked by calling dma_resv_unlock().
369 */
dma_resv_lock_interruptible(struct dma_resv * obj,struct ww_acquire_ctx * ctx)370 static inline int dma_resv_lock_interruptible(struct dma_resv *obj,
371 struct ww_acquire_ctx *ctx)
372 {
373 return ww_mutex_lock_interruptible(&obj->lock, ctx);
374 }
375
376 /**
377 * dma_resv_lock_slow - slowpath lock the reservation object
378 * @obj: the reservation object
379 * @ctx: the locking context
380 *
381 * Acquires the reservation object after a die case. This function
382 * will sleep until the lock becomes available. See dma_resv_lock() as
383 * well.
384 *
385 * See also dma_resv_lock_slow_interruptible() for the interruptible variant.
386 */
dma_resv_lock_slow(struct dma_resv * obj,struct ww_acquire_ctx * ctx)387 static inline void dma_resv_lock_slow(struct dma_resv *obj,
388 struct ww_acquire_ctx *ctx)
389 {
390 ww_mutex_lock_slow(&obj->lock, ctx);
391 }
392
393 /**
394 * dma_resv_lock_slow_interruptible - slowpath lock the reservation
395 * object, interruptible
396 * @obj: the reservation object
397 * @ctx: the locking context
398 *
399 * Acquires the reservation object interruptible after a die case. This function
400 * will sleep until the lock becomes available. See
401 * dma_resv_lock_interruptible() as well.
402 */
dma_resv_lock_slow_interruptible(struct dma_resv * obj,struct ww_acquire_ctx * ctx)403 static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj,
404 struct ww_acquire_ctx *ctx)
405 {
406 return ww_mutex_lock_slow_interruptible(&obj->lock, ctx);
407 }
408
409 /**
410 * dma_resv_trylock - trylock the reservation object
411 * @obj: the reservation object
412 *
413 * Tries to lock the reservation object for exclusive access and modification.
414 * Note, that the lock is only against other writers, readers will run
415 * concurrently with a writer under RCU. The seqlock is used to notify readers
416 * if they overlap with a writer.
417 *
418 * Also note that since no context is provided, no deadlock protection is
419 * possible, which is also not needed for a trylock.
420 *
421 * Returns true if the lock was acquired, false otherwise.
422 */
dma_resv_trylock(struct dma_resv * obj)423 static inline bool __must_check dma_resv_trylock(struct dma_resv *obj)
424 {
425 return ww_mutex_trylock(&obj->lock, NULL);
426 }
427
428 /**
429 * dma_resv_is_locked - is the reservation object locked
430 * @obj: the reservation object
431 *
432 * Returns true if the mutex is locked, false if unlocked.
433 */
dma_resv_is_locked(struct dma_resv * obj)434 static inline bool dma_resv_is_locked(struct dma_resv *obj)
435 {
436 return ww_mutex_is_locked(&obj->lock);
437 }
438
439 /**
440 * dma_resv_locking_ctx - returns the context used to lock the object
441 * @obj: the reservation object
442 *
443 * Returns the context used to lock a reservation object or NULL if no context
444 * was used or the object is not locked at all.
445 *
446 * WARNING: This interface is pretty horrible, but TTM needs it because it
447 * doesn't pass the struct ww_acquire_ctx around in some very long callchains.
448 * Everyone else just uses it to check whether they're holding a reservation or
449 * not.
450 */
dma_resv_locking_ctx(struct dma_resv * obj)451 static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj)
452 {
453 return READ_ONCE(obj->lock.ctx);
454 }
455
456 /**
457 * dma_resv_unlock - unlock the reservation object
458 * @obj: the reservation object
459 *
460 * Unlocks the reservation object following exclusive access.
461 */
dma_resv_unlock(struct dma_resv * obj)462 static inline void dma_resv_unlock(struct dma_resv *obj)
463 {
464 dma_resv_reset_max_fences(obj);
465 ww_mutex_unlock(&obj->lock);
466 }
467
468 void dma_resv_init(struct dma_resv *obj);
469 void dma_resv_fini(struct dma_resv *obj);
470 int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences);
471 void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
472 enum dma_resv_usage usage);
473 void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
474 struct dma_fence *fence,
475 enum dma_resv_usage usage);
476 int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
477 unsigned int *num_fences, struct dma_fence ***fences);
478 int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage,
479 struct dma_fence **fence);
480 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
481 long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
482 bool intr, unsigned long timeout);
483 bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage);
484 void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq);
485
486 #endif /* _LINUX_RESERVATION_H */
487