xref: /linux/drivers/dma-buf/dma-resv.c (revision 52338415)
1 /*
2  * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
3  *
4  * Based on bo.c which bears the following copyright notice,
5  * but is dual licensed:
6  *
7  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
8  * All Rights Reserved.
9  *
10  * Permission is hereby granted, free of charge, to any person obtaining a
11  * copy of this software and associated documentation files (the
12  * "Software"), to deal in the Software without restriction, including
13  * without limitation the rights to use, copy, modify, merge, publish,
14  * distribute, sub license, and/or sell copies of the Software, and to
15  * permit persons to whom the Software is furnished to do so, subject to
16  * the following conditions:
17  *
18  * The above copyright notice and this permission notice (including the
19  * next paragraph) shall be included in all copies or substantial portions
20  * of the Software.
21  *
22  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
25  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
26  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
27  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
28  * USE OR OTHER DEALINGS IN THE SOFTWARE.
29  *
30  **************************************************************************/
31 /*
32  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
33  */
34 
35 #include <linux/dma-resv.h>
36 #include <linux/export.h>
37 
38 /**
39  * DOC: Reservation Object Overview
40  *
41  * The reservation object provides a mechanism to manage shared and
42  * exclusive fences associated with a buffer.  A reservation object
43  * can have attached one exclusive fence (normally associated with
44  * write operations) or N shared fences (read operations).  The RCU
45  * mechanism is used to protect read access to fences from locked
46  * write-side updates.
47  */
48 
49 DEFINE_WD_CLASS(reservation_ww_class);
50 EXPORT_SYMBOL(reservation_ww_class);
51 
52 struct lock_class_key reservation_seqcount_class;
53 EXPORT_SYMBOL(reservation_seqcount_class);
54 
55 const char reservation_seqcount_string[] = "reservation_seqcount";
56 EXPORT_SYMBOL(reservation_seqcount_string);
57 
58 /**
59  * dma_resv_list_alloc - allocate fence list
60  * @shared_max: number of fences we need space for
61  *
62  * Allocate a new dma_resv_list and make sure to correctly initialize
63  * shared_max.
64  */
65 static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max)
66 {
67 	struct dma_resv_list *list;
68 
69 	list = kmalloc(offsetof(typeof(*list), shared[shared_max]), GFP_KERNEL);
70 	if (!list)
71 		return NULL;
72 
73 	list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) /
74 		sizeof(*list->shared);
75 
76 	return list;
77 }
78 
79 /**
80  * dma_resv_list_free - free fence list
81  * @list: list to free
82  *
83  * Free a dma_resv_list and make sure to drop all references.
84  */
85 static void dma_resv_list_free(struct dma_resv_list *list)
86 {
87 	unsigned int i;
88 
89 	if (!list)
90 		return;
91 
92 	for (i = 0; i < list->shared_count; ++i)
93 		dma_fence_put(rcu_dereference_protected(list->shared[i], true));
94 
95 	kfree_rcu(list, rcu);
96 }
97 
98 /**
99  * dma_resv_init - initialize a reservation object
100  * @obj: the reservation object
101  */
102 void dma_resv_init(struct dma_resv *obj)
103 {
104 	ww_mutex_init(&obj->lock, &reservation_ww_class);
105 
106 	__seqcount_init(&obj->seq, reservation_seqcount_string,
107 			&reservation_seqcount_class);
108 	RCU_INIT_POINTER(obj->fence, NULL);
109 	RCU_INIT_POINTER(obj->fence_excl, NULL);
110 }
111 EXPORT_SYMBOL(dma_resv_init);
112 
113 /**
114  * dma_resv_fini - destroys a reservation object
115  * @obj: the reservation object
116  */
117 void dma_resv_fini(struct dma_resv *obj)
118 {
119 	struct dma_resv_list *fobj;
120 	struct dma_fence *excl;
121 
122 	/*
123 	 * This object should be dead and all references must have
124 	 * been released to it, so no need to be protected with rcu.
125 	 */
126 	excl = rcu_dereference_protected(obj->fence_excl, 1);
127 	if (excl)
128 		dma_fence_put(excl);
129 
130 	fobj = rcu_dereference_protected(obj->fence, 1);
131 	dma_resv_list_free(fobj);
132 	ww_mutex_destroy(&obj->lock);
133 }
134 EXPORT_SYMBOL(dma_resv_fini);
135 
136 /**
137  * dma_resv_reserve_shared - Reserve space to add shared fences to
138  * a dma_resv.
139  * @obj: reservation object
140  * @num_fences: number of fences we want to add
141  *
142  * Should be called before dma_resv_add_shared_fence().  Must
143  * be called with obj->lock held.
144  *
145  * RETURNS
146  * Zero for success, or -errno
147  */
148 int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
149 {
150 	struct dma_resv_list *old, *new;
151 	unsigned int i, j, k, max;
152 
153 	dma_resv_assert_held(obj);
154 
155 	old = dma_resv_get_list(obj);
156 
157 	if (old && old->shared_max) {
158 		if ((old->shared_count + num_fences) <= old->shared_max)
159 			return 0;
160 		else
161 			max = max(old->shared_count + num_fences,
162 				  old->shared_max * 2);
163 	} else {
164 		max = 4;
165 	}
166 
167 	new = dma_resv_list_alloc(max);
168 	if (!new)
169 		return -ENOMEM;
170 
171 	/*
172 	 * no need to bump fence refcounts, rcu_read access
173 	 * requires the use of kref_get_unless_zero, and the
174 	 * references from the old struct are carried over to
175 	 * the new.
176 	 */
177 	for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) {
178 		struct dma_fence *fence;
179 
180 		fence = rcu_dereference_protected(old->shared[i],
181 						  dma_resv_held(obj));
182 		if (dma_fence_is_signaled(fence))
183 			RCU_INIT_POINTER(new->shared[--k], fence);
184 		else
185 			RCU_INIT_POINTER(new->shared[j++], fence);
186 	}
187 	new->shared_count = j;
188 
189 	/*
190 	 * We are not changing the effective set of fences here so can
191 	 * merely update the pointer to the new array; both existing
192 	 * readers and new readers will see exactly the same set of
193 	 * active (unsignaled) shared fences. Individual fences and the
194 	 * old array are protected by RCU and so will not vanish under
195 	 * the gaze of the rcu_read_lock() readers.
196 	 */
197 	rcu_assign_pointer(obj->fence, new);
198 
199 	if (!old)
200 		return 0;
201 
202 	/* Drop the references to the signaled fences */
203 	for (i = k; i < max; ++i) {
204 		struct dma_fence *fence;
205 
206 		fence = rcu_dereference_protected(new->shared[i],
207 						  dma_resv_held(obj));
208 		dma_fence_put(fence);
209 	}
210 	kfree_rcu(old, rcu);
211 
212 	return 0;
213 }
214 EXPORT_SYMBOL(dma_resv_reserve_shared);
215 
216 /**
217  * dma_resv_add_shared_fence - Add a fence to a shared slot
218  * @obj: the reservation object
219  * @fence: the shared fence to add
220  *
221  * Add a fence to a shared slot, obj->lock must be held, and
222  * dma_resv_reserve_shared() has been called.
223  */
224 void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
225 {
226 	struct dma_resv_list *fobj;
227 	struct dma_fence *old;
228 	unsigned int i, count;
229 
230 	dma_fence_get(fence);
231 
232 	dma_resv_assert_held(obj);
233 
234 	fobj = dma_resv_get_list(obj);
235 	count = fobj->shared_count;
236 
237 	preempt_disable();
238 	write_seqcount_begin(&obj->seq);
239 
240 	for (i = 0; i < count; ++i) {
241 
242 		old = rcu_dereference_protected(fobj->shared[i],
243 						dma_resv_held(obj));
244 		if (old->context == fence->context ||
245 		    dma_fence_is_signaled(old))
246 			goto replace;
247 	}
248 
249 	BUG_ON(fobj->shared_count >= fobj->shared_max);
250 	old = NULL;
251 	count++;
252 
253 replace:
254 	RCU_INIT_POINTER(fobj->shared[i], fence);
255 	/* pointer update must be visible before we extend the shared_count */
256 	smp_store_mb(fobj->shared_count, count);
257 
258 	write_seqcount_end(&obj->seq);
259 	preempt_enable();
260 	dma_fence_put(old);
261 }
262 EXPORT_SYMBOL(dma_resv_add_shared_fence);
263 
264 /**
265  * dma_resv_add_excl_fence - Add an exclusive fence.
266  * @obj: the reservation object
267  * @fence: the shared fence to add
268  *
269  * Add a fence to the exclusive slot.  The obj->lock must be held.
270  */
271 void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
272 {
273 	struct dma_fence *old_fence = dma_resv_get_excl(obj);
274 	struct dma_resv_list *old;
275 	u32 i = 0;
276 
277 	dma_resv_assert_held(obj);
278 
279 	old = dma_resv_get_list(obj);
280 	if (old)
281 		i = old->shared_count;
282 
283 	if (fence)
284 		dma_fence_get(fence);
285 
286 	preempt_disable();
287 	write_seqcount_begin(&obj->seq);
288 	/* write_seqcount_begin provides the necessary memory barrier */
289 	RCU_INIT_POINTER(obj->fence_excl, fence);
290 	if (old)
291 		old->shared_count = 0;
292 	write_seqcount_end(&obj->seq);
293 	preempt_enable();
294 
295 	/* inplace update, no shared fences */
296 	while (i--)
297 		dma_fence_put(rcu_dereference_protected(old->shared[i],
298 						dma_resv_held(obj)));
299 
300 	dma_fence_put(old_fence);
301 }
302 EXPORT_SYMBOL(dma_resv_add_excl_fence);
303 
304 /**
305 * dma_resv_copy_fences - Copy all fences from src to dst.
306 * @dst: the destination reservation object
307 * @src: the source reservation object
308 *
309 * Copy all fences from src to dst. dst-lock must be held.
310 */
311 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
312 {
313 	struct dma_resv_list *src_list, *dst_list;
314 	struct dma_fence *old, *new;
315 	unsigned i;
316 
317 	dma_resv_assert_held(dst);
318 
319 	rcu_read_lock();
320 	src_list = rcu_dereference(src->fence);
321 
322 retry:
323 	if (src_list) {
324 		unsigned shared_count = src_list->shared_count;
325 
326 		rcu_read_unlock();
327 
328 		dst_list = dma_resv_list_alloc(shared_count);
329 		if (!dst_list)
330 			return -ENOMEM;
331 
332 		rcu_read_lock();
333 		src_list = rcu_dereference(src->fence);
334 		if (!src_list || src_list->shared_count > shared_count) {
335 			kfree(dst_list);
336 			goto retry;
337 		}
338 
339 		dst_list->shared_count = 0;
340 		for (i = 0; i < src_list->shared_count; ++i) {
341 			struct dma_fence *fence;
342 
343 			fence = rcu_dereference(src_list->shared[i]);
344 			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
345 				     &fence->flags))
346 				continue;
347 
348 			if (!dma_fence_get_rcu(fence)) {
349 				dma_resv_list_free(dst_list);
350 				src_list = rcu_dereference(src->fence);
351 				goto retry;
352 			}
353 
354 			if (dma_fence_is_signaled(fence)) {
355 				dma_fence_put(fence);
356 				continue;
357 			}
358 
359 			rcu_assign_pointer(dst_list->shared[dst_list->shared_count++], fence);
360 		}
361 	} else {
362 		dst_list = NULL;
363 	}
364 
365 	new = dma_fence_get_rcu_safe(&src->fence_excl);
366 	rcu_read_unlock();
367 
368 	src_list = dma_resv_get_list(dst);
369 	old = dma_resv_get_excl(dst);
370 
371 	preempt_disable();
372 	write_seqcount_begin(&dst->seq);
373 	/* write_seqcount_begin provides the necessary memory barrier */
374 	RCU_INIT_POINTER(dst->fence_excl, new);
375 	RCU_INIT_POINTER(dst->fence, dst_list);
376 	write_seqcount_end(&dst->seq);
377 	preempt_enable();
378 
379 	dma_resv_list_free(src_list);
380 	dma_fence_put(old);
381 
382 	return 0;
383 }
384 EXPORT_SYMBOL(dma_resv_copy_fences);
385 
386 /**
387  * dma_resv_get_fences_rcu - Get an object's shared and exclusive
388  * fences without update side lock held
389  * @obj: the reservation object
390  * @pfence_excl: the returned exclusive fence (or NULL)
391  * @pshared_count: the number of shared fences returned
392  * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
393  * the required size, and must be freed by caller)
394  *
395  * Retrieve all fences from the reservation object. If the pointer for the
396  * exclusive fence is not specified the fence is put into the array of the
397  * shared fences as well. Returns either zero or -ENOMEM.
398  */
399 int dma_resv_get_fences_rcu(struct dma_resv *obj,
400 			    struct dma_fence **pfence_excl,
401 			    unsigned *pshared_count,
402 			    struct dma_fence ***pshared)
403 {
404 	struct dma_fence **shared = NULL;
405 	struct dma_fence *fence_excl;
406 	unsigned int shared_count;
407 	int ret = 1;
408 
409 	do {
410 		struct dma_resv_list *fobj;
411 		unsigned int i, seq;
412 		size_t sz = 0;
413 
414 		shared_count = i = 0;
415 
416 		rcu_read_lock();
417 		seq = read_seqcount_begin(&obj->seq);
418 
419 		fence_excl = rcu_dereference(obj->fence_excl);
420 		if (fence_excl && !dma_fence_get_rcu(fence_excl))
421 			goto unlock;
422 
423 		fobj = rcu_dereference(obj->fence);
424 		if (fobj)
425 			sz += sizeof(*shared) * fobj->shared_max;
426 
427 		if (!pfence_excl && fence_excl)
428 			sz += sizeof(*shared);
429 
430 		if (sz) {
431 			struct dma_fence **nshared;
432 
433 			nshared = krealloc(shared, sz,
434 					   GFP_NOWAIT | __GFP_NOWARN);
435 			if (!nshared) {
436 				rcu_read_unlock();
437 
438 				dma_fence_put(fence_excl);
439 				fence_excl = NULL;
440 
441 				nshared = krealloc(shared, sz, GFP_KERNEL);
442 				if (nshared) {
443 					shared = nshared;
444 					continue;
445 				}
446 
447 				ret = -ENOMEM;
448 				break;
449 			}
450 			shared = nshared;
451 			shared_count = fobj ? fobj->shared_count : 0;
452 			for (i = 0; i < shared_count; ++i) {
453 				shared[i] = rcu_dereference(fobj->shared[i]);
454 				if (!dma_fence_get_rcu(shared[i]))
455 					break;
456 			}
457 		}
458 
459 		if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
460 			while (i--)
461 				dma_fence_put(shared[i]);
462 			dma_fence_put(fence_excl);
463 			goto unlock;
464 		}
465 
466 		ret = 0;
467 unlock:
468 		rcu_read_unlock();
469 	} while (ret);
470 
471 	if (pfence_excl)
472 		*pfence_excl = fence_excl;
473 	else if (fence_excl)
474 		shared[shared_count++] = fence_excl;
475 
476 	if (!shared_count) {
477 		kfree(shared);
478 		shared = NULL;
479 	}
480 
481 	*pshared_count = shared_count;
482 	*pshared = shared;
483 	return ret;
484 }
485 EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu);
486 
487 /**
488  * dma_resv_wait_timeout_rcu - Wait on reservation's objects
489  * shared and/or exclusive fences.
490  * @obj: the reservation object
491  * @wait_all: if true, wait on all fences, else wait on just exclusive fence
492  * @intr: if true, do interruptible wait
493  * @timeout: timeout value in jiffies or zero to return immediately
494  *
495  * RETURNS
496  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
497  * greater than zer on success.
498  */
499 long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
500 			       bool wait_all, bool intr,
501 			       unsigned long timeout)
502 {
503 	struct dma_fence *fence;
504 	unsigned seq, shared_count;
505 	long ret = timeout ? timeout : 1;
506 	int i;
507 
508 retry:
509 	shared_count = 0;
510 	seq = read_seqcount_begin(&obj->seq);
511 	rcu_read_lock();
512 	i = -1;
513 
514 	fence = rcu_dereference(obj->fence_excl);
515 	if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
516 		if (!dma_fence_get_rcu(fence))
517 			goto unlock_retry;
518 
519 		if (dma_fence_is_signaled(fence)) {
520 			dma_fence_put(fence);
521 			fence = NULL;
522 		}
523 
524 	} else {
525 		fence = NULL;
526 	}
527 
528 	if (wait_all) {
529 		struct dma_resv_list *fobj = rcu_dereference(obj->fence);
530 
531 		if (fobj)
532 			shared_count = fobj->shared_count;
533 
534 		for (i = 0; !fence && i < shared_count; ++i) {
535 			struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
536 
537 			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
538 				     &lfence->flags))
539 				continue;
540 
541 			if (!dma_fence_get_rcu(lfence))
542 				goto unlock_retry;
543 
544 			if (dma_fence_is_signaled(lfence)) {
545 				dma_fence_put(lfence);
546 				continue;
547 			}
548 
549 			fence = lfence;
550 			break;
551 		}
552 	}
553 
554 	rcu_read_unlock();
555 	if (fence) {
556 		if (read_seqcount_retry(&obj->seq, seq)) {
557 			dma_fence_put(fence);
558 			goto retry;
559 		}
560 
561 		ret = dma_fence_wait_timeout(fence, intr, ret);
562 		dma_fence_put(fence);
563 		if (ret > 0 && wait_all && (i + 1 < shared_count))
564 			goto retry;
565 	}
566 	return ret;
567 
568 unlock_retry:
569 	rcu_read_unlock();
570 	goto retry;
571 }
572 EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu);
573 
574 
575 static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
576 {
577 	struct dma_fence *fence, *lfence = passed_fence;
578 	int ret = 1;
579 
580 	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
581 		fence = dma_fence_get_rcu(lfence);
582 		if (!fence)
583 			return -1;
584 
585 		ret = !!dma_fence_is_signaled(fence);
586 		dma_fence_put(fence);
587 	}
588 	return ret;
589 }
590 
591 /**
592  * dma_resv_test_signaled_rcu - Test if a reservation object's
593  * fences have been signaled.
594  * @obj: the reservation object
595  * @test_all: if true, test all fences, otherwise only test the exclusive
596  * fence
597  *
598  * RETURNS
599  * true if all fences signaled, else false
600  */
601 bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
602 {
603 	unsigned seq, shared_count;
604 	int ret;
605 
606 	rcu_read_lock();
607 retry:
608 	ret = true;
609 	shared_count = 0;
610 	seq = read_seqcount_begin(&obj->seq);
611 
612 	if (test_all) {
613 		unsigned i;
614 
615 		struct dma_resv_list *fobj = rcu_dereference(obj->fence);
616 
617 		if (fobj)
618 			shared_count = fobj->shared_count;
619 
620 		for (i = 0; i < shared_count; ++i) {
621 			struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
622 
623 			ret = dma_resv_test_signaled_single(fence);
624 			if (ret < 0)
625 				goto retry;
626 			else if (!ret)
627 				break;
628 		}
629 
630 		if (read_seqcount_retry(&obj->seq, seq))
631 			goto retry;
632 	}
633 
634 	if (!shared_count) {
635 		struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
636 
637 		if (fence_excl) {
638 			ret = dma_resv_test_signaled_single(fence_excl);
639 			if (ret < 0)
640 				goto retry;
641 
642 			if (read_seqcount_retry(&obj->seq, seq))
643 				goto retry;
644 		}
645 	}
646 
647 	rcu_read_unlock();
648 	return ret;
649 }
650 EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu);
651