1 /* $NetBSD: sched_fence.c,v 1.7 2022/07/28 10:44:46 riastradh Exp $ */
2
3 /*
4 * Copyright 2015 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: sched_fence.c,v 1.7 2022/07/28 10:44:46 riastradh Exp $");
28
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/sched.h>
32 #include <linux/slab.h>
33 #include <linux/wait.h>
34
35 #include <drm/gpu_scheduler.h>
36
37 static struct kmem_cache *sched_fence_slab;
38
39 #ifdef __NetBSD__ /* XXX module init/fini */
40 #define __init
41 #define __exit
42 #endif
43
drm_sched_fence_slab_init(void)44 static int __init drm_sched_fence_slab_init(void)
45 {
46 sched_fence_slab = kmem_cache_create(
47 "drm_sched_fence", sizeof(struct drm_sched_fence), 0,
48 SLAB_HWCACHE_ALIGN, NULL);
49 if (!sched_fence_slab)
50 return -ENOMEM;
51
52 return 0;
53 }
54
drm_sched_fence_slab_fini(void)55 static void __exit drm_sched_fence_slab_fini(void)
56 {
57 rcu_barrier();
58 kmem_cache_destroy(sched_fence_slab);
59 }
60
drm_sched_fence_scheduled(struct drm_sched_fence * fence)61 void drm_sched_fence_scheduled(struct drm_sched_fence *fence)
62 {
63 int ret = dma_fence_signal(&fence->scheduled);
64
65 if (!ret)
66 DMA_FENCE_TRACE(&fence->scheduled,
67 "signaled from irq context\n");
68 else
69 DMA_FENCE_TRACE(&fence->scheduled,
70 "was already signaled\n");
71 }
72
drm_sched_fence_finished(struct drm_sched_fence * fence)73 void drm_sched_fence_finished(struct drm_sched_fence *fence)
74 {
75 int ret = dma_fence_signal(&fence->finished);
76
77 if (!ret)
78 DMA_FENCE_TRACE(&fence->finished,
79 "signaled from irq context\n");
80 else
81 DMA_FENCE_TRACE(&fence->finished,
82 "was already signaled\n");
83 }
84
drm_sched_fence_get_driver_name(struct dma_fence * fence)85 static const char *drm_sched_fence_get_driver_name(struct dma_fence *fence)
86 {
87 return "drm_sched";
88 }
89
drm_sched_fence_get_timeline_name(struct dma_fence * f)90 static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f)
91 {
92 struct drm_sched_fence *fence = to_drm_sched_fence(f);
93 return (const char *)fence->sched->name;
94 }
95
96 /**
97 * drm_sched_fence_free - free up the fence memory
98 *
99 * @rcu: RCU callback head
100 *
101 * Free up the fence memory after the RCU grace period.
102 */
drm_sched_fence_free(struct rcu_head * rcu)103 static void drm_sched_fence_free(struct rcu_head *rcu)
104 {
105 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
106 struct drm_sched_fence *fence = to_drm_sched_fence(f);
107
108 spin_lock_destroy(&fence->lock);
109
110 kmem_cache_free(sched_fence_slab, fence);
111 }
112
113 /**
114 * drm_sched_fence_release_scheduled - callback that fence can be freed
115 *
116 * @fence: fence
117 *
118 * This function is called when the reference count becomes zero.
119 * It just RCU schedules freeing up the fence.
120 */
drm_sched_fence_release_scheduled(struct dma_fence * f)121 static void drm_sched_fence_release_scheduled(struct dma_fence *f)
122 {
123 struct drm_sched_fence *fence = to_drm_sched_fence(f);
124
125 dma_fence_put(fence->parent);
126 call_rcu(&fence->finished.rcu, drm_sched_fence_free);
127 }
128
129 /**
130 * drm_sched_fence_release_finished - drop extra reference
131 *
132 * @f: fence
133 *
134 * Drop the extra reference from the scheduled fence to the base fence.
135 */
drm_sched_fence_release_finished(struct dma_fence * f)136 static void drm_sched_fence_release_finished(struct dma_fence *f)
137 {
138 struct drm_sched_fence *fence = to_drm_sched_fence(f);
139
140 dma_fence_put(&fence->scheduled);
141 }
142
143 static const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
144 .get_driver_name = drm_sched_fence_get_driver_name,
145 .get_timeline_name = drm_sched_fence_get_timeline_name,
146 .release = drm_sched_fence_release_scheduled,
147 };
148
149 static const struct dma_fence_ops drm_sched_fence_ops_finished = {
150 .get_driver_name = drm_sched_fence_get_driver_name,
151 .get_timeline_name = drm_sched_fence_get_timeline_name,
152 .release = drm_sched_fence_release_finished,
153 };
154
to_drm_sched_fence(struct dma_fence * f)155 struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f)
156 {
157 if (f->ops == &drm_sched_fence_ops_scheduled)
158 return container_of(f, struct drm_sched_fence, scheduled);
159
160 if (f->ops == &drm_sched_fence_ops_finished)
161 return container_of(f, struct drm_sched_fence, finished);
162
163 return NULL;
164 }
165 EXPORT_SYMBOL(to_drm_sched_fence);
166
drm_sched_fence_create(struct drm_sched_entity * entity,void * owner)167 struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity,
168 void *owner)
169 {
170 struct drm_sched_fence *fence = NULL;
171 unsigned seq;
172
173 fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
174 if (fence == NULL)
175 return NULL;
176
177 fence->owner = owner;
178 fence->sched = entity->rq->sched;
179 spin_lock_init(&fence->lock);
180
181 seq = atomic_inc_return(&entity->fence_seq);
182 dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled,
183 &fence->lock, entity->fence_context, seq);
184 dma_fence_init(&fence->finished, &drm_sched_fence_ops_finished,
185 &fence->lock, entity->fence_context + 1, seq);
186
187 return fence;
188 }
189
190 module_init(drm_sched_fence_slab_init);
191 module_exit(drm_sched_fence_slab_fini);
192
193 MODULE_DESCRIPTION("DRM GPU scheduler");
194 MODULE_LICENSE("GPL and additional rights");
195
196 #ifdef __NetBSD__
197 MODULE(MODULE_CLASS_MISC, drmkms_sched, "drmkms,drmkms_linux");
198
199 static int
drmkms_sched_modcmd(modcmd_t cmd,void * arg)200 drmkms_sched_modcmd(modcmd_t cmd, void *arg)
201 {
202
203 switch (cmd) {
204 case MODULE_CMD_INIT:
205 return drm_sched_fence_slab_init();
206 case MODULE_CMD_FINI:
207 drm_sched_fence_slab_fini();
208 return 0;
209 case MODULE_CMD_AUTOUNLOAD:
210 return EBUSY;
211 default:
212 return ENOTTY;
213 }
214 }
215 #endif
216