xref: /netbsd/sys/rump/librump/rumpkern/threads.c (revision d7489322)
1*d7489322Sriastradh /*	$NetBSD: threads.c,v 1.27 2020/08/01 22:30:57 riastradh Exp $	*/
2c03306bcSpooka 
3c03306bcSpooka /*
4c03306bcSpooka  * Copyright (c) 2007-2009 Antti Kantee.  All Rights Reserved.
5c03306bcSpooka  *
6c03306bcSpooka  * Development of this software was supported by
7c03306bcSpooka  * The Finnish Cultural Foundation.
8c03306bcSpooka  *
9c03306bcSpooka  * Redistribution and use in source and binary forms, with or without
10c03306bcSpooka  * modification, are permitted provided that the following conditions
11c03306bcSpooka  * are met:
12c03306bcSpooka  * 1. Redistributions of source code must retain the above copyright
13c03306bcSpooka  *    notice, this list of conditions and the following disclaimer.
14c03306bcSpooka  * 2. Redistributions in binary form must reproduce the above copyright
15c03306bcSpooka  *    notice, this list of conditions and the following disclaimer in the
16c03306bcSpooka  *    documentation and/or other materials provided with the distribution.
17c03306bcSpooka  *
18c03306bcSpooka  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
19c03306bcSpooka  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20c03306bcSpooka  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21c03306bcSpooka  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22c03306bcSpooka  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23c03306bcSpooka  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24c03306bcSpooka  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25c03306bcSpooka  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26c03306bcSpooka  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27c03306bcSpooka  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28c03306bcSpooka  * SUCH DAMAGE.
29c03306bcSpooka  */
30c03306bcSpooka 
31c03306bcSpooka #include <sys/cdefs.h>
32*d7489322Sriastradh __KERNEL_RCSID(0, "$NetBSD: threads.c,v 1.27 2020/08/01 22:30:57 riastradh Exp $");
33c03306bcSpooka 
34c03306bcSpooka #include <sys/param.h>
359970bb9eSpooka #include <sys/atomic.h>
36c03306bcSpooka #include <sys/kmem.h>
37c03306bcSpooka #include <sys/kthread.h>
38c91a5143Spooka #include <sys/malloc.h>
39c03306bcSpooka #include <sys/systm.h>
4042ab70ebSpooka #include <sys/queue.h>
41c03306bcSpooka 
42ff225a39Spooka #include <rump-sys/kern.h>
43c03306bcSpooka 
44ff225a39Spooka #include <rump/rumpuser.h>
45c03306bcSpooka 
4642ab70ebSpooka struct thrdesc {
47c03306bcSpooka 	void (*f)(void *);
48c03306bcSpooka 	void *arg;
4942ab70ebSpooka 	struct lwp *newlwp;
5042ab70ebSpooka 	int runnable;
5142ab70ebSpooka 
5242ab70ebSpooka 	TAILQ_ENTRY(thrdesc) entries;
53c03306bcSpooka };
54c03306bcSpooka 
55b3ce1408Spooka static bool threads_are_go;
56b3ce1408Spooka static struct rumpuser_mtx *thrmtx;
57b3ce1408Spooka static struct rumpuser_cv *thrcv;
5842ab70ebSpooka static TAILQ_HEAD(, thrdesc) newthr;
59b3ce1408Spooka 
60c03306bcSpooka static void *
threadbouncer(void * arg)61c03306bcSpooka threadbouncer(void *arg)
62c03306bcSpooka {
6342ab70ebSpooka 	struct thrdesc *td = arg;
6442ab70ebSpooka 	struct lwp *l = td->newlwp;
65c03306bcSpooka 	void (*f)(void *);
66c03306bcSpooka 	void *thrarg;
67c03306bcSpooka 
6842ab70ebSpooka 	f = td->f;
6942ab70ebSpooka 	thrarg = td->arg;
701029997fSpooka 
71b3ce1408Spooka 	/* don't allow threads to run before all CPUs have fully attached */
72b3ce1408Spooka 	if (!threads_are_go) {
73b3ce1408Spooka 		rumpuser_mutex_enter_nowrap(thrmtx);
74b3ce1408Spooka 		while (!threads_are_go) {
75b3ce1408Spooka 			rumpuser_cv_wait_nowrap(thrcv, thrmtx);
76b3ce1408Spooka 		}
77b3ce1408Spooka 		rumpuser_mutex_exit(thrmtx);
78b3ce1408Spooka 	}
79b3ce1408Spooka 
8019cde6b5Spooka 	/* schedule ourselves */
813290c1c7Spooka 	rump_lwproc_curlwp_set(l);
82c03306bcSpooka 	rump_schedule();
83c03306bcSpooka 
84c91a5143Spooka 	/* free dance struct */
8542ab70ebSpooka 	kmem_intr_free(td, sizeof(*td));
86c91a5143Spooka 
87c03306bcSpooka 	if ((curlwp->l_pflag & LP_MPSAFE) == 0)
88c03306bcSpooka 		KERNEL_LOCK(1, NULL);
89c03306bcSpooka 
90c03306bcSpooka 	f(thrarg);
91c03306bcSpooka 
92c03306bcSpooka 	panic("unreachable, should kthread_exit()");
93c03306bcSpooka }
94c03306bcSpooka 
95b3ce1408Spooka void
rump_thread_init(void)96b3ce1408Spooka rump_thread_init(void)
97b3ce1408Spooka {
98b3ce1408Spooka 
99e204757dSpooka 	rumpuser_mutex_init(&thrmtx, RUMPUSER_MTX_SPIN);
100b3ce1408Spooka 	rumpuser_cv_init(&thrcv);
10142ab70ebSpooka 	TAILQ_INIT(&newthr);
102b3ce1408Spooka }
103b3ce1408Spooka 
104b3ce1408Spooka void
rump_thread_allow(struct lwp * l)10542ab70ebSpooka rump_thread_allow(struct lwp *l)
106b3ce1408Spooka {
10742ab70ebSpooka 	struct thrdesc *td;
108b3ce1408Spooka 
109b3ce1408Spooka 	rumpuser_mutex_enter(thrmtx);
11042ab70ebSpooka 	if (l == NULL) {
111b3ce1408Spooka 		threads_are_go = true;
11242ab70ebSpooka 	} else {
11342ab70ebSpooka 		TAILQ_FOREACH(td, &newthr, entries) {
11442ab70ebSpooka 			if (td->newlwp == l) {
11542ab70ebSpooka 				td->runnable = 1;
11642ab70ebSpooka 				break;
11742ab70ebSpooka 			}
11842ab70ebSpooka 		}
11942ab70ebSpooka 	}
120b3ce1408Spooka 	rumpuser_cv_broadcast(thrcv);
121b3ce1408Spooka 	rumpuser_mutex_exit(thrmtx);
122b3ce1408Spooka }
123b3ce1408Spooka 
124bbdefb24Spooka static struct {
125bbdefb24Spooka 	const char *t_name;
126bbdefb24Spooka 	bool t_ncmp;
127bbdefb24Spooka } nothreads[] = {
128bbdefb24Spooka 	{ "vrele", false },
129350afea2Spooka 	{ "vdrain", false },
130bbdefb24Spooka 	{ "cachegc", false },
131bbdefb24Spooka 	{ "nfssilly", false },
132bbdefb24Spooka 	{ "unpgc", false },
133bbdefb24Spooka 	{ "pmf", true },
134bbdefb24Spooka 	{ "xcall", true },
135bbdefb24Spooka };
136bbdefb24Spooka 
137c03306bcSpooka int
kthread_create(pri_t pri,int flags,struct cpu_info * ci,void (* func)(void *),void * arg,lwp_t ** newlp,const char * fmt,...)138c03306bcSpooka kthread_create(pri_t pri, int flags, struct cpu_info *ci,
139c03306bcSpooka 	void (*func)(void *), void *arg, lwp_t **newlp, const char *fmt, ...)
140c03306bcSpooka {
141c03306bcSpooka 	char thrstore[MAXCOMLEN];
142c03306bcSpooka 	const char *thrname = NULL;
143c03306bcSpooka 	va_list ap;
14442ab70ebSpooka 	struct thrdesc *td;
145c03306bcSpooka 	struct lwp *l;
146c03306bcSpooka 	int rv;
147c03306bcSpooka 
148c03306bcSpooka 	thrstore[0] = '\0';
149c03306bcSpooka 	if (fmt) {
150c03306bcSpooka 		va_start(ap, fmt);
151c03306bcSpooka 		vsnprintf(thrstore, sizeof(thrstore), fmt, ap);
152c03306bcSpooka 		va_end(ap);
153c03306bcSpooka 		thrname = thrstore;
154c03306bcSpooka 	}
155c03306bcSpooka 
156c03306bcSpooka 	/*
157c03306bcSpooka 	 * We don't want a module unload thread.
158c03306bcSpooka 	 * (XXX: yes, this is a kludge too, and the kernel should
159c03306bcSpooka 	 * have a more flexible method for configuring which threads
160c03306bcSpooka 	 * we want).
161c03306bcSpooka 	 */
162c03306bcSpooka 	if (strcmp(thrstore, "modunload") == 0) {
163c03306bcSpooka 		return 0;
164c03306bcSpooka 	}
165c03306bcSpooka 
166c03306bcSpooka 	if (!rump_threads) {
167bbdefb24Spooka 		bool matched;
168bbdefb24Spooka 		int i;
169bbdefb24Spooka 
170bbdefb24Spooka 		/* do we want to fake it? */
171bbdefb24Spooka 		for (i = 0; i < __arraycount(nothreads); i++) {
172bbdefb24Spooka 			if (nothreads[i].t_ncmp) {
173bbdefb24Spooka 				matched = strncmp(thrstore, nothreads[i].t_name,
174bbdefb24Spooka 				    strlen(nothreads[i].t_name)) == 0;
175bbdefb24Spooka 			} else {
176bbdefb24Spooka 				matched = strcmp(thrstore,
177bbdefb24Spooka 				    nothreads[i].t_name) == 0;
178bbdefb24Spooka 			}
179bbdefb24Spooka 			if (matched) {
180bbdefb24Spooka 				aprint_error("rump kernel threads not enabled, "
181bbdefb24Spooka 				    "%s not functional\n", nothreads[i].t_name);
182c03306bcSpooka 				return 0;
183bbdefb24Spooka 			}
184bbdefb24Spooka 		}
185bbdefb24Spooka 		panic("threads not available");
186c03306bcSpooka 	}
187c03306bcSpooka 	KASSERT(fmt != NULL);
188c03306bcSpooka 
18942ab70ebSpooka 	/*
19042ab70ebSpooka 	 * Allocate with intr-safe allocator, give that we may be
19142ab70ebSpooka 	 * creating interrupt threads.
19242ab70ebSpooka 	 */
19342ab70ebSpooka 	td = kmem_intr_alloc(sizeof(*td), KM_SLEEP);
19442ab70ebSpooka 	td->f = func;
19542ab70ebSpooka 	td->arg = arg;
19642ab70ebSpooka 	td->newlwp = l = rump__lwproc_alloclwp(&proc0);
197d11274ecSpooka 	l->l_flag |= LW_SYSTEM;
198c03306bcSpooka 	if (flags & KTHREAD_MPSAFE)
199c03306bcSpooka 		l->l_pflag |= LP_MPSAFE;
2001029997fSpooka 	if (flags & KTHREAD_INTR)
2011029997fSpooka 		l->l_pflag |= LP_INTR;
20262e84772Spooka 	if (ci) {
20362e84772Spooka 		l->l_pflag |= LP_BOUND;
204d11274ecSpooka 		l->l_target_cpu = ci;
20562e84772Spooka 	}
2062f3605b7Spooka 	if (thrname) {
2072f3605b7Spooka 		l->l_name = kmem_alloc(MAXCOMLEN, KM_SLEEP);
2082f3605b7Spooka 		strlcpy(l->l_name, thrname, MAXCOMLEN);
2092f3605b7Spooka 	}
2102f3605b7Spooka 
21142ab70ebSpooka 	rv = rumpuser_thread_create(threadbouncer, td, thrname,
212e345c833Spooka 	    (flags & KTHREAD_MUSTJOIN) == KTHREAD_MUSTJOIN,
213e345c833Spooka 	    pri, ci ? ci->ci_index : -1, &l->l_ctxlink);
214c03306bcSpooka 	if (rv)
21542ab70ebSpooka 		return rv; /* XXX */
216c03306bcSpooka 
2179970bb9eSpooka 	if (newlp) {
218c03306bcSpooka 		*newlp = l;
2199970bb9eSpooka 	} else {
220000492e6Srmind 		KASSERT((flags & KTHREAD_MUSTJOIN) == 0);
2219970bb9eSpooka 	}
2229970bb9eSpooka 
223c03306bcSpooka 	return 0;
224c03306bcSpooka }
225c03306bcSpooka 
226c03306bcSpooka void
kthread_exit(int ecode)227c03306bcSpooka kthread_exit(int ecode)
228c03306bcSpooka {
229c03306bcSpooka 
230c03306bcSpooka 	if ((curlwp->l_pflag & LP_MPSAFE) == 0)
231630dc3f6Spooka 		KERNEL_UNLOCK_LAST(NULL);
23211f8c2f9Spooka 	rump_lwproc_releaselwp();
2339970bb9eSpooka 	/* unschedule includes membar */
234c03306bcSpooka 	rump_unschedule();
235c03306bcSpooka 	rumpuser_thread_exit();
236c03306bcSpooka }
2379970bb9eSpooka 
2389970bb9eSpooka int
kthread_join(struct lwp * l)2399970bb9eSpooka kthread_join(struct lwp *l)
2409970bb9eSpooka {
2419970bb9eSpooka 	int rv;
2429970bb9eSpooka 
2439970bb9eSpooka 	KASSERT(l->l_ctxlink != NULL);
2449970bb9eSpooka 	rv = rumpuser_thread_join(l->l_ctxlink);
2459970bb9eSpooka 	membar_consumer();
2469970bb9eSpooka 
2479970bb9eSpooka 	return rv;
2489970bb9eSpooka }
24942ab70ebSpooka 
250*d7489322Sriastradh int
kthread_fpu_enter(void)251*d7489322Sriastradh kthread_fpu_enter(void)
252*d7489322Sriastradh {
253*d7489322Sriastradh 	struct lwp *l = curlwp;
254*d7489322Sriastradh 	int s;
255*d7489322Sriastradh 
256*d7489322Sriastradh 	KASSERTMSG(l->l_flag & LW_SYSTEM,
257*d7489322Sriastradh 	    "%s is allowed only in kthreads", __func__);
258*d7489322Sriastradh 	s = l->l_flag & LW_SYSTEM_FPU;
259*d7489322Sriastradh 	l->l_flag |= LW_SYSTEM_FPU;
260*d7489322Sriastradh 
261*d7489322Sriastradh 	return s;
262*d7489322Sriastradh }
263*d7489322Sriastradh 
264*d7489322Sriastradh void
kthread_fpu_exit(int s)265*d7489322Sriastradh kthread_fpu_exit(int s)
266*d7489322Sriastradh {
267*d7489322Sriastradh 	struct lwp *l = curlwp;
268*d7489322Sriastradh 
269*d7489322Sriastradh 	KASSERT(s == (s & LW_SYSTEM_FPU));
270*d7489322Sriastradh 	KASSERTMSG(l->l_flag & LW_SYSTEM,
271*d7489322Sriastradh 	    "%s is allowed only in kthreads", __func__);
272*d7489322Sriastradh 	KASSERT(l->l_flag & LW_SYSTEM_FPU);
273*d7489322Sriastradh 	l->l_flag ^= s ^ LW_SYSTEM_FPU;
274*d7489322Sriastradh }
275*d7489322Sriastradh 
27642ab70ebSpooka /*
27742ab70ebSpooka  * Create a non-kernel thread that is scheduled by a rump kernel hypercall.
27842ab70ebSpooka  *
27942ab70ebSpooka  * Sounds strange and out-of-place?  yup yup yup.  the original motivation
28042ab70ebSpooka  * for this was aio.  This is a very infrequent code path in rump kernels.
28142ab70ebSpooka  * XXX: threads created with lwp_create() are eternal for local clients.
28242ab70ebSpooka  * however, they are correctly reaped for remote clients with process exit.
28342ab70ebSpooka  */
28442ab70ebSpooka static void *
lwpbouncer(void * arg)28542ab70ebSpooka lwpbouncer(void *arg)
28642ab70ebSpooka {
28742ab70ebSpooka 	struct thrdesc *td = arg;
28842ab70ebSpooka 	struct lwp *l = td->newlwp;
28942ab70ebSpooka 	void (*f)(void *);
29042ab70ebSpooka 	void *thrarg;
29142ab70ebSpooka 	int run;
29242ab70ebSpooka 
29342ab70ebSpooka 	f = td->f;
29442ab70ebSpooka 	thrarg = td->arg;
29542ab70ebSpooka 
29642ab70ebSpooka 	/* do not run until we've been enqueued */
29742ab70ebSpooka 	rumpuser_mutex_enter_nowrap(thrmtx);
29842ab70ebSpooka 	while ((run = td->runnable) == 0) {
29942ab70ebSpooka 		rumpuser_cv_wait_nowrap(thrcv, thrmtx);
30042ab70ebSpooka 	}
30142ab70ebSpooka 	rumpuser_mutex_exit(thrmtx);
30242ab70ebSpooka 
30342ab70ebSpooka 	/* schedule ourselves */
30442ab70ebSpooka 	rump_lwproc_curlwp_set(l);
30542ab70ebSpooka 	rump_schedule();
30642ab70ebSpooka 	kmem_free(td, sizeof(*td));
30742ab70ebSpooka 
30842ab70ebSpooka 	/* should we just die instead? */
30942ab70ebSpooka 	if (run == -1) {
31042ab70ebSpooka 		rump_lwproc_releaselwp();
31142ab70ebSpooka 		lwp_userret(l);
31242ab70ebSpooka 		panic("lwpbouncer reached unreachable");
31342ab70ebSpooka 	}
31442ab70ebSpooka 
31542ab70ebSpooka 	/* run, and don't come back! */
31642ab70ebSpooka 	f(thrarg);
31742ab70ebSpooka 	panic("lwp return from worker not supported");
31842ab70ebSpooka }
31942ab70ebSpooka 
32042ab70ebSpooka int
lwp_create(struct lwp * l1,struct proc * p2,vaddr_t uaddr,int flags,void * stack,size_t stacksize,void (* func)(void *),void * arg,struct lwp ** newlwpp,int sclass,const sigset_t * sigmask,const stack_t * sigstk)32142ab70ebSpooka lwp_create(struct lwp *l1, struct proc *p2, vaddr_t uaddr, int flags,
32242ab70ebSpooka     void *stack, size_t stacksize, void (*func)(void *), void *arg,
323f649f5a6Skamil     struct lwp **newlwpp, int sclass, const sigset_t *sigmask,
3243fd6d8c7Schristos     const stack_t *sigstk)
32542ab70ebSpooka {
32642ab70ebSpooka 	struct thrdesc *td;
32742ab70ebSpooka 	struct lwp *l;
32842ab70ebSpooka 	int rv;
32942ab70ebSpooka 
33042ab70ebSpooka 	if (flags)
33142ab70ebSpooka 		panic("lwp_create: flags not supported by this implementation");
33242ab70ebSpooka 	td = kmem_alloc(sizeof(*td), KM_SLEEP);
33342ab70ebSpooka 	td->f = func;
33442ab70ebSpooka 	td->arg = arg;
33542ab70ebSpooka 	td->runnable = 0;
33642ab70ebSpooka 	td->newlwp = l = rump__lwproc_alloclwp(p2);
33742ab70ebSpooka 
33842ab70ebSpooka 	rumpuser_mutex_enter_nowrap(thrmtx);
33942ab70ebSpooka 	TAILQ_INSERT_TAIL(&newthr, td, entries);
34042ab70ebSpooka 	rumpuser_mutex_exit(thrmtx);
34142ab70ebSpooka 
34242ab70ebSpooka 	rv = rumpuser_thread_create(lwpbouncer, td, p2->p_comm, 0,
34342ab70ebSpooka 	    PRI_USER, -1, NULL);
34442ab70ebSpooka 	if (rv)
34542ab70ebSpooka 		panic("rumpuser_thread_create failed"); /* XXX */
34642ab70ebSpooka 
34742ab70ebSpooka 	*newlwpp = l;
34842ab70ebSpooka 	return 0;
34942ab70ebSpooka }
35042ab70ebSpooka 
35142ab70ebSpooka void
lwp_exit(struct lwp * l)35242ab70ebSpooka lwp_exit(struct lwp *l)
35342ab70ebSpooka {
35442ab70ebSpooka 	struct thrdesc *td;
35542ab70ebSpooka 
35642ab70ebSpooka 	rumpuser_mutex_enter_nowrap(thrmtx);
35742ab70ebSpooka 	TAILQ_FOREACH(td, &newthr, entries) {
35842ab70ebSpooka 		if (td->newlwp == l) {
35942ab70ebSpooka 			td->runnable = -1;
36042ab70ebSpooka 			break;
36142ab70ebSpooka 		}
36242ab70ebSpooka 	}
36342ab70ebSpooka 	rumpuser_mutex_exit(thrmtx);
36442ab70ebSpooka 
36542ab70ebSpooka 	if (td == NULL)
36642ab70ebSpooka 		panic("lwp_exit: could not find %p\n", l);
36742ab70ebSpooka }
36842ab70ebSpooka 
36942ab70ebSpooka void
lwp_userret(struct lwp * l)37042ab70ebSpooka lwp_userret(struct lwp *l)
37142ab70ebSpooka {
37242ab70ebSpooka 
37342ab70ebSpooka 	if ((l->l_flag & LW_RUMP_QEXIT) == 0)
37442ab70ebSpooka 		return;
37542ab70ebSpooka 
37642ab70ebSpooka 	/* ok, so we should die */
37742ab70ebSpooka 	rump_unschedule();
37842ab70ebSpooka 	rumpuser_thread_exit();
37942ab70ebSpooka }
380