1 /*	$NetBSD: linux_kthread.c,v 1.9 2021/12/19 12:43:05 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2021 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_kthread.c,v 1.9 2021/12/19 12:43:05 riastradh Exp $");
34 
35 #include <sys/types.h>
36 
37 #include <sys/condvar.h>
38 #include <sys/kmem.h>
39 #include <sys/kthread.h>
40 #include <sys/lwp.h>
41 #include <sys/mutex.h>
42 #include <sys/specificdata.h>
43 
44 #include <linux/err.h>
45 #include <linux/kthread.h>
46 #include <linux/spinlock.h>
47 
48 #include <drm/drm_wait_netbsd.h>
49 
50 struct task_struct {
51 	kmutex_t	kt_lock;
52 	kcondvar_t	kt_cv;
53 	bool		kt_shouldstop:1;
54 	bool		kt_shouldpark:1;
55 	bool		kt_parked:1;
56 	bool		kt_exited:1;
57 	int		kt_ret;
58 
59 	int		(*kt_func)(void *);
60 	void		*kt_cookie;
61 	spinlock_t	*kt_interlock;
62 	drm_waitqueue_t	*kt_wq;
63 	struct lwp	*kt_lwp;
64 };
65 
66 static specificdata_key_t linux_kthread_key __read_mostly = -1;
67 
68 int
linux_kthread_init(void)69 linux_kthread_init(void)
70 {
71 	int error;
72 
73 	error = lwp_specific_key_create(&linux_kthread_key, NULL);
74 	if (error)
75 		goto out;
76 
77 	/* Success!  */
78 	error = 0;
79 
80 out:	if (error)
81 		linux_kthread_fini();
82 	return error;
83 }
84 
85 void
linux_kthread_fini(void)86 linux_kthread_fini(void)
87 {
88 
89 	if (linux_kthread_key != -1) {
90 		lwp_specific_key_delete(linux_kthread_key);
91 		linux_kthread_key = -1;
92 	}
93 }
94 
95 #define	linux_kthread()	_linux_kthread(__func__)
96 static struct task_struct *
_linux_kthread(const char * caller)97 _linux_kthread(const char *caller)
98 {
99 	struct task_struct *T;
100 
101 	T = lwp_getspecific(linux_kthread_key);
102 	KASSERTMSG(T != NULL, "%s must be called from Linux kthread", caller);
103 
104 	return T;
105 }
106 
107 static void
linux_kthread_start(void * cookie)108 linux_kthread_start(void *cookie)
109 {
110 	struct task_struct *T = cookie;
111 	int ret;
112 
113 	lwp_setspecific(linux_kthread_key, T);
114 
115 	ret = (*T->kt_func)(T->kt_cookie);
116 
117 	/*
118 	 * Mark the thread exited, set the return value, and wake any
119 	 * waiting kthread_stop.
120 	 */
121 	mutex_enter(&T->kt_lock);
122 	T->kt_exited = true;
123 	T->kt_ret = ret;
124 	cv_broadcast(&T->kt_cv);
125 	mutex_exit(&T->kt_lock);
126 
127 	/* Exit the (NetBSD) kthread.  */
128 	kthread_exit(0);
129 }
130 
131 static struct task_struct *
kthread_alloc(int (* func)(void *),void * cookie,spinlock_t * interlock,drm_waitqueue_t * wq)132 kthread_alloc(int (*func)(void *), void *cookie, spinlock_t *interlock,
133     drm_waitqueue_t *wq)
134 {
135 	struct task_struct *T;
136 
137 	T = kmem_zalloc(sizeof(*T), KM_SLEEP);
138 
139 	mutex_init(&T->kt_lock, MUTEX_DEFAULT, IPL_VM);
140 	cv_init(&T->kt_cv, "lnxkthrd");
141 
142 	T->kt_shouldstop = false;
143 	T->kt_shouldpark = false;
144 	T->kt_parked = false;
145 	T->kt_exited = false;
146 	T->kt_ret = 0;
147 
148 	T->kt_func = func;
149 	T->kt_cookie = cookie;
150 	T->kt_interlock = interlock;
151 	T->kt_wq = wq;
152 
153 	return T;
154 }
155 
156 static void
kthread_free(struct task_struct * T)157 kthread_free(struct task_struct *T)
158 {
159 
160 	KASSERT(T->kt_exited);
161 
162 	cv_destroy(&T->kt_cv);
163 	mutex_destroy(&T->kt_lock);
164 	kmem_free(T, sizeof(*T));
165 }
166 
167 struct task_struct *
kthread_run(int (* func)(void *),void * cookie,const char * name,spinlock_t * interlock,drm_waitqueue_t * wq)168 kthread_run(int (*func)(void *), void *cookie, const char *name,
169     spinlock_t *interlock, drm_waitqueue_t *wq)
170 {
171 	struct task_struct *T;
172 	int error;
173 
174 	T = kthread_alloc(func, cookie, interlock, wq);
175 	error = kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL,
176 	    linux_kthread_start, T, &T->kt_lwp, "%s", name);
177 	if (error) {
178 		kthread_free(T);
179 		return ERR_PTR(-error); /* XXX errno NetBSD->Linux */
180 	}
181 
182 	return T;
183 }
184 
185 int
kthread_stop(struct task_struct * T)186 kthread_stop(struct task_struct *T)
187 {
188 	int ret;
189 
190 	/* Lock order: interlock, then kthread lock.  */
191 	spin_lock(T->kt_interlock);
192 	mutex_enter(&T->kt_lock);
193 
194 	/*
195 	 * Notify the thread that it's stopping, and wake it if it's
196 	 * parked or sleeping on its own waitqueue.
197 	 */
198 	T->kt_shouldpark = false;
199 	T->kt_shouldstop = true;
200 	cv_broadcast(&T->kt_cv);
201 	DRM_SPIN_WAKEUP_ALL(T->kt_wq, T->kt_interlock);
202 
203 	/* Release the interlock while we wait for thread to finish.  */
204 	spin_unlock(T->kt_interlock);
205 
206 	/* Wait for the thread to finish.  */
207 	while (!T->kt_exited)
208 		cv_wait(&T->kt_cv, &T->kt_lock);
209 
210 	/* Grab the return code and release the lock -- we're done.  */
211 	ret = T->kt_ret;
212 	mutex_exit(&T->kt_lock);
213 
214 	/* Free the (Linux) kthread.  */
215 	kthread_free(T);
216 
217 	/* Return what the thread returned.  */
218 	return ret;
219 }
220 
221 int
kthread_should_stop(void)222 kthread_should_stop(void)
223 {
224 	struct task_struct *T = linux_kthread();
225 	bool shouldstop;
226 
227 	mutex_enter(&T->kt_lock);
228 	shouldstop = T->kt_shouldstop;
229 	mutex_exit(&T->kt_lock);
230 
231 	return shouldstop;
232 }
233 
234 void
kthread_park(struct task_struct * T)235 kthread_park(struct task_struct *T)
236 {
237 
238 	/* Lock order: interlock, then kthread lock.  */
239 	spin_lock(T->kt_interlock);
240 	mutex_enter(&T->kt_lock);
241 
242 	/* Caller must not ask to park if they've already asked to stop.  */
243 	KASSERT(!T->kt_shouldstop);
244 
245 	/* Ask the thread to park.  */
246 	T->kt_shouldpark = true;
247 
248 	/*
249 	 * Ensure the thread is not sleeping on its condvar.  After
250 	 * this point, we are done with the interlock, which we must
251 	 * not hold while we wait on the kthread condvar.
252 	 */
253 	DRM_SPIN_WAKEUP_ALL(T->kt_wq, T->kt_interlock);
254 	spin_unlock(T->kt_interlock);
255 
256 	/*
257 	 * Wait until the thread has issued kthread_parkme, unless we
258 	 * are already the thread, which Linux allows and interprets to
259 	 * mean don't wait.
260 	 */
261 	if (T->kt_lwp != curlwp) {
262 		while (!T->kt_parked)
263 			cv_wait(&T->kt_cv, &T->kt_lock);
264 	}
265 
266 	/* Release the kthread lock too.  */
267 	mutex_exit(&T->kt_lock);
268 }
269 
270 void
kthread_unpark(struct task_struct * T)271 kthread_unpark(struct task_struct *T)
272 {
273 
274 	mutex_enter(&T->kt_lock);
275 	T->kt_shouldpark = false;
276 	cv_broadcast(&T->kt_cv);
277 	mutex_exit(&T->kt_lock);
278 }
279 
280 int
__kthread_should_park(struct task_struct * T)281 __kthread_should_park(struct task_struct *T)
282 {
283 	bool shouldpark;
284 
285 	mutex_enter(&T->kt_lock);
286 	shouldpark = T->kt_shouldpark;
287 	mutex_exit(&T->kt_lock);
288 
289 	return shouldpark;
290 }
291 
292 int
kthread_should_park(void)293 kthread_should_park(void)
294 {
295 	struct task_struct *T = linux_kthread();
296 
297 	return __kthread_should_park(T);
298 }
299 
300 void
kthread_parkme(void)301 kthread_parkme(void)
302 {
303 	struct task_struct *T = linux_kthread();
304 
305 	assert_spin_locked(T->kt_interlock);
306 
307 	spin_unlock(T->kt_interlock);
308 	mutex_enter(&T->kt_lock);
309 	while (T->kt_shouldpark) {
310 		T->kt_parked = true;
311 		cv_broadcast(&T->kt_cv);
312 		cv_wait(&T->kt_cv, &T->kt_lock);
313 		T->kt_parked = false;
314 	}
315 	mutex_exit(&T->kt_lock);
316 	spin_lock(T->kt_interlock);
317 }
318