xref: /openbsd/sys/dev/pci/drm/scheduler/sched_fence.c (revision d415bd75)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/kthread.h>
25 #include <linux/module.h>
26 #include <linux/sched.h>
27 #include <linux/slab.h>
28 #include <linux/wait.h>
29 
30 #include <drm/gpu_scheduler.h>
31 
32 #include <sys/pool.h>
33 
34 static struct pool sched_fence_slab;
35 
36 int __init drm_sched_fence_slab_init(void)
37 {
38 #ifdef __linux__
39 	sched_fence_slab = kmem_cache_create(
40 		"drm_sched_fence", sizeof(struct drm_sched_fence), 0,
41 		SLAB_HWCACHE_ALIGN, NULL);
42 	if (!sched_fence_slab)
43 		return -ENOMEM;
44 #else
45 	pool_init(&sched_fence_slab, sizeof(struct drm_sched_fence),
46 	    CACHELINESIZE, IPL_TTY, 0, "drm_sched_fence", NULL);
47 #endif
48 
49 	return 0;
50 }
51 
52 void __exit drm_sched_fence_slab_fini(void)
53 {
54 	rcu_barrier();
55 #ifdef __linux__
56 	kmem_cache_destroy(sched_fence_slab);
57 #else
58 	pool_destroy(&sched_fence_slab);
59 #endif
60 }
61 
62 void drm_sched_fence_scheduled(struct drm_sched_fence *fence)
63 {
64 	dma_fence_signal(&fence->scheduled);
65 }
66 
67 void drm_sched_fence_finished(struct drm_sched_fence *fence)
68 {
69 	dma_fence_signal(&fence->finished);
70 }
71 
72 static const char *drm_sched_fence_get_driver_name(struct dma_fence *fence)
73 {
74 	return "drm_sched";
75 }
76 
77 static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f)
78 {
79 	struct drm_sched_fence *fence = to_drm_sched_fence(f);
80 	return (const char *)fence->sched->name;
81 }
82 
83 static void drm_sched_fence_free_rcu(struct rcu_head *rcu)
84 {
85 	struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
86 	struct drm_sched_fence *fence = to_drm_sched_fence(f);
87 
88 	if (!WARN_ON_ONCE(!fence)) {
89 #ifdef __linux__
90 		kmem_cache_free(sched_fence_slab, fence);
91 #else
92 		pool_put(&sched_fence_slab, fence);
93 #endif
94 	}
95 }
96 
97 /**
98  * drm_sched_fence_free - free up an uninitialized fence
99  *
100  * @fence: fence to free
101  *
102  * Free up the fence memory. Should only be used if drm_sched_fence_init()
103  * has not been called yet.
104  */
105 void drm_sched_fence_free(struct drm_sched_fence *fence)
106 {
107 	/* This function should not be called if the fence has been initialized. */
108 	if (!WARN_ON_ONCE(fence->sched)) {
109 #ifdef __linux__
110 		kmem_cache_free(sched_fence_slab, fence);
111 #else
112 		pool_put(&sched_fence_slab, fence);
113 #endif
114 	}
115 }
116 
117 /**
118  * drm_sched_fence_release_scheduled - callback that fence can be freed
119  *
120  * @f: fence
121  *
122  * This function is called when the reference count becomes zero.
123  * It just RCU schedules freeing up the fence.
124  */
125 static void drm_sched_fence_release_scheduled(struct dma_fence *f)
126 {
127 	struct drm_sched_fence *fence = to_drm_sched_fence(f);
128 
129 	dma_fence_put(fence->parent);
130 	call_rcu(&fence->finished.rcu, drm_sched_fence_free_rcu);
131 }
132 
133 /**
134  * drm_sched_fence_release_finished - drop extra reference
135  *
136  * @f: fence
137  *
138  * Drop the extra reference from the scheduled fence to the base fence.
139  */
140 static void drm_sched_fence_release_finished(struct dma_fence *f)
141 {
142 	struct drm_sched_fence *fence = to_drm_sched_fence(f);
143 
144 	dma_fence_put(&fence->scheduled);
145 }
146 
147 static const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
148 	.get_driver_name = drm_sched_fence_get_driver_name,
149 	.get_timeline_name = drm_sched_fence_get_timeline_name,
150 	.release = drm_sched_fence_release_scheduled,
151 };
152 
153 static const struct dma_fence_ops drm_sched_fence_ops_finished = {
154 	.get_driver_name = drm_sched_fence_get_driver_name,
155 	.get_timeline_name = drm_sched_fence_get_timeline_name,
156 	.release = drm_sched_fence_release_finished,
157 };
158 
159 struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f)
160 {
161 	if (f->ops == &drm_sched_fence_ops_scheduled)
162 		return container_of(f, struct drm_sched_fence, scheduled);
163 
164 	if (f->ops == &drm_sched_fence_ops_finished)
165 		return container_of(f, struct drm_sched_fence, finished);
166 
167 	return NULL;
168 }
169 EXPORT_SYMBOL(to_drm_sched_fence);
170 
171 struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity,
172 					      void *owner)
173 {
174 	struct drm_sched_fence *fence = NULL;
175 
176 #ifdef __linux__
177 	fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
178 #else
179 	fence = pool_get(&sched_fence_slab, PR_WAITOK | PR_ZERO);
180 #endif
181 	if (fence == NULL)
182 		return NULL;
183 
184 	fence->owner = owner;
185 	mtx_init(&fence->lock, IPL_TTY);
186 
187 	return fence;
188 }
189 
190 void drm_sched_fence_init(struct drm_sched_fence *fence,
191 			  struct drm_sched_entity *entity)
192 {
193 	unsigned seq;
194 
195 	fence->sched = entity->rq->sched;
196 	seq = atomic_inc_return(&entity->fence_seq);
197 	dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled,
198 		       &fence->lock, entity->fence_context, seq);
199 	dma_fence_init(&fence->finished, &drm_sched_fence_ops_finished,
200 		       &fence->lock, entity->fence_context + 1, seq);
201 }
202 
203 module_init(drm_sched_fence_slab_init);
204 module_exit(drm_sched_fence_slab_fini);
205 
206 MODULE_DESCRIPTION("DRM GPU scheduler");
207 MODULE_LICENSE("GPL and additional rights");
208