xref: /dragonfly/sys/dev/drm/linux_fence-array.c (revision 655933d6)
1 /*
2  * Copyright (c) 2019 François Tigeot <ftigeot@wolfpond.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <linux/export.h>
28 #include <linux/slab.h>
29 #include <linux/dma-fence-array.h>
30 
31 #define PENDING_ERROR 1
32 
33 
34 static const char *dma_fence_array_get_driver_name(struct dma_fence *fence)
35 {
36 	return "dma_fence_array";
37 }
38 
39 static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence)
40 {
41 	return "unbound";
42 }
43 
44 static void dma_fence_array_set_pending_error(struct dma_fence_array *array,
45 					      int error)
46 {
47 	/*
48 	 * Propagate the first error reported by any of our fences, but only
49 	 * before we ourselves are signaled.
50 	 */
51 	if (error)
52 		cmpxchg(&array->base.error, PENDING_ERROR, error);
53 }
54 
55 static void dma_fence_array_clear_pending_error(struct dma_fence_array *array)
56 {
57 	/* Clear the error flag if not actually set. */
58 	cmpxchg(&array->base.error, PENDING_ERROR, 0);
59 }
60 
61 static void irq_dma_fence_array_work(struct irq_work *wrk)
62 {
63 	struct dma_fence_array *array = container_of(wrk, typeof(*array), work);
64 
65 	dma_fence_array_clear_pending_error(array);
66 
67 	dma_fence_signal(&array->base);
68 	dma_fence_put(&array->base);
69 }
70 
71 static void dma_fence_array_cb_func(struct dma_fence *f,
72 				    struct dma_fence_cb *cb)
73 {
74 	struct dma_fence_array_cb *array_cb =
75 		container_of(cb, struct dma_fence_array_cb, cb);
76 	struct dma_fence_array *array = array_cb->array;
77 
78 	dma_fence_array_set_pending_error(array, f->error);
79 
80 	if (atomic_dec_and_test(&array->num_pending))
81 		irq_work_queue(&array->work);
82 	else
83 		dma_fence_put(&array->base);
84 }
85 
86 static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
87 {
88 	struct dma_fence_array *array = to_dma_fence_array(fence);
89 	struct dma_fence_array_cb *cb = (void *)(&array[1]);
90 	unsigned i;
91 
92 	for (i = 0; i < array->num_fences; ++i) {
93 		cb[i].array = array;
94 		/*
95 		 * As we may report that the fence is signaled before all
96 		 * callbacks are complete, we need to take an additional
97 		 * reference count on the array so that we do not free it too
98 		 * early. The core fence handling will only hold the reference
99 		 * until we signal the array as complete (but that is now
100 		 * insufficient).
101 		 */
102 		dma_fence_get(&array->base);
103 		if (dma_fence_add_callback(array->fences[i], &cb[i].cb,
104 					   dma_fence_array_cb_func)) {
105 			int error = array->fences[i]->error;
106 
107 			dma_fence_array_set_pending_error(array, error);
108 			dma_fence_put(&array->base);
109 			if (atomic_dec_and_test(&array->num_pending)) {
110 				dma_fence_array_clear_pending_error(array);
111 				return false;
112 			}
113 		}
114 	}
115 
116 	return true;
117 }
118 
119 static bool dma_fence_array_signaled(struct dma_fence *fence)
120 {
121 	struct dma_fence_array *array = to_dma_fence_array(fence);
122 
123 	return atomic_read(&array->num_pending) <= 0;
124 }
125 
126 static void dma_fence_array_release(struct dma_fence *fence)
127 {
128 	struct dma_fence_array *array = to_dma_fence_array(fence);
129 	unsigned i;
130 
131 	for (i = 0; i < array->num_fences; ++i)
132 		dma_fence_put(array->fences[i]);
133 
134 	kfree(array->fences);
135 	dma_fence_free(fence);
136 }
137 
138 const struct dma_fence_ops dma_fence_array_ops = {
139 	.get_driver_name = dma_fence_array_get_driver_name,
140 	.get_timeline_name = dma_fence_array_get_timeline_name,
141 	.enable_signaling = dma_fence_array_enable_signaling,
142 	.signaled = dma_fence_array_signaled,
143 	.wait = dma_fence_default_wait,
144 	.release = dma_fence_array_release,
145 };
146 EXPORT_SYMBOL(dma_fence_array_ops);
147 
148 /**
149  * dma_fence_array_create - Create a custom fence array
150  * @num_fences:		[in]	number of fences to add in the array
151  * @fences:		[in]	array containing the fences
152  * @context:		[in]	fence context to use
153  * @seqno:		[in]	sequence number to use
154  * @signal_on_any:	[in]	signal on any fence in the array
155  *
156  * Allocate a dma_fence_array object and initialize the base fence with
157  * dma_fence_init().
158  * In case of error it returns NULL.
159  *
160  * The caller should allocate the fences array with num_fences size
161  * and fill it with the fences it wants to add to the object. Ownership of this
162  * array is taken and dma_fence_put() is used on each fence on release.
163  *
164  * If @signal_on_any is true the fence array signals if any fence in the array
165  * signals, otherwise it signals when all fences in the array signal.
166  */
167 struct dma_fence_array *dma_fence_array_create(int num_fences,
168 					       struct dma_fence **fences,
169 					       u64 context, unsigned seqno,
170 					       bool signal_on_any)
171 {
172 	struct dma_fence_array *array;
173 	size_t size = sizeof(*array);
174 
175 	/* Allocate the callback structures behind the array. */
176 	size += num_fences * sizeof(struct dma_fence_array_cb);
177 	array = kzalloc(size, GFP_KERNEL);
178 	if (!array)
179 		return NULL;
180 
181 	lockinit(&array->lock, "ldmbfal", 0, 0);
182 	dma_fence_init(&array->base, &dma_fence_array_ops, &array->lock,
183 		       context, seqno);
184 	init_irq_work(&array->work, irq_dma_fence_array_work);
185 
186 	array->num_fences = num_fences;
187 	atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
188 	array->fences = fences;
189 
190 	array->base.error = PENDING_ERROR;
191 
192 	return array;
193 }
194 EXPORT_SYMBOL(dma_fence_array_create);
195