xref: /netbsd/sys/kern/kern_lwp.c (revision 416a8a0e)
1 /*	$NetBSD: kern_lwp.c,v 1.252 2023/04/09 09:18:09 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2019, 2020
5  *     The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Nathan J. Williams, and Andrew Doran.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Overview
35  *
36  *	Lightweight processes (LWPs) are the basic unit or thread of
37  *	execution within the kernel.  The core state of an LWP is described
38  *	by "struct lwp", also known as lwp_t.
39  *
40  *	Each LWP is contained within a process (described by "struct proc"),
41  *	Every process contains at least one LWP, but may contain more.  The
42  *	process describes attributes shared among all of its LWPs such as a
43  *	private address space, global execution state (stopped, active,
44  *	zombie, ...), signal disposition and so on.  On a multiprocessor
45  *	machine, multiple LWPs be executing concurrently in the kernel.
46  *
47  * Execution states
48  *
49  *	At any given time, an LWP has overall state that is described by
50  *	lwp::l_stat.  The states are broken into two sets below.  The first
51  *	set is guaranteed to represent the absolute, current state of the
52  *	LWP:
53  *
54  *	LSONPROC
55  *
56  *		On processor: the LWP is executing on a CPU, either in the
57  *		kernel or in user space.
58  *
59  *	LSRUN
60  *
61  *		Runnable: the LWP is parked on a run queue, and may soon be
62  *		chosen to run by an idle processor, or by a processor that
63  *		has been asked to preempt a currently runnning but lower
64  *		priority LWP.
65  *
66  *	LSIDL
67  *
68  *		Idle: the LWP has been created but has not yet executed, or
69  *		it has ceased executing a unit of work and is waiting to be
70  *		started again.  This state exists so that the LWP can occupy
71  *		a slot in the process & PID table, but without having to
72  *		worry about being touched; lookups of the LWP by ID will
73  *		fail while in this state.  The LWP will become visible for
74  *		lookup once its state transitions further.  Some special
75  *		kernel threads also (ab)use this state to indicate that they
76  *		are idle (soft interrupts and idle LWPs).
77  *
78  *	LSSUSPENDED:
79  *
80  *		Suspended: the LWP has had its execution suspended by
81  *		another LWP in the same process using the _lwp_suspend()
82  *		system call.  User-level LWPs also enter the suspended
83  *		state when the system is shutting down.
84  *
85  *	The second set represent a "statement of intent" on behalf of the
86  *	LWP.  The LWP may in fact be executing on a processor, may be
87  *	sleeping or idle. It is expected to take the necessary action to
88  *	stop executing or become "running" again within a short timeframe.
89  *	The LP_RUNNING flag in lwp::l_pflag indicates that an LWP is running.
90  *	Importantly, it indicates that its state is tied to a CPU.
91  *
92  *	LSZOMB:
93  *
94  *		Dead or dying: the LWP has released most of its resources
95  *		and is about to switch away into oblivion, or has already
96  *		switched away.  When it switches away, its few remaining
97  *		resources can be collected.
98  *
99  *	LSSLEEP:
100  *
101  *		Sleeping: the LWP has entered itself onto a sleep queue, and
102  *		has switched away or will switch away shortly to allow other
103  *		LWPs to run on the CPU.
104  *
105  *	LSSTOP:
106  *
107  *		Stopped: the LWP has been stopped as a result of a job
108  *		control signal, or as a result of the ptrace() interface.
109  *
110  *		Stopped LWPs may run briefly within the kernel to handle
111  *		signals that they receive, but will not return to user space
112  *		until their process' state is changed away from stopped.
113  *
114  *		Single LWPs within a process can not be set stopped
115  *		selectively: all actions that can stop or continue LWPs
116  *		occur at the process level.
117  *
118  * State transitions
119  *
120  *	Note that the LSSTOP state may only be set when returning to
121  *	user space in userret(), or when sleeping interruptably.  The
122  *	LSSUSPENDED state may only be set in userret().  Before setting
123  *	those states, we try to ensure that the LWPs will release all
124  *	locks that they hold, and at a minimum try to ensure that the
125  *	LWP can be set runnable again by a signal.
126  *
127  *	LWPs may transition states in the following ways:
128  *
129  *	 RUN -------> ONPROC		ONPROC -----> RUN
130  *		    				    > SLEEP
131  *		    				    > STOPPED
132  *						    > SUSPENDED
133  *						    > ZOMB
134  *						    > IDL (special cases)
135  *
136  *	 STOPPED ---> RUN		SUSPENDED --> RUN
137  *	            > SLEEP
138  *
139  *	 SLEEP -----> ONPROC		IDL --------> RUN
140  *		    > RUN			    > SUSPENDED
141  *		    > STOPPED			    > STOPPED
142  *						    > ONPROC (special cases)
143  *
144  *	Some state transitions are only possible with kernel threads (eg
145  *	ONPROC -> IDL) and happen under tightly controlled circumstances
146  *	free of unwanted side effects.
147  *
148  * Migration
149  *
150  *	Migration of threads from one CPU to another could be performed
151  *	internally by the scheduler via sched_takecpu() or sched_catchlwp()
152  *	functions.  The universal lwp_migrate() function should be used for
153  *	any other cases.  Subsystems in the kernel must be aware that CPU
154  *	of LWP may change, while it is not locked.
155  *
156  * Locking
157  *
158  *	The majority of fields in 'struct lwp' are covered by a single,
159  *	general spin lock pointed to by lwp::l_mutex.  The locks covering
160  *	each field are documented in sys/lwp.h.
161  *
162  *	State transitions must be made with the LWP's general lock held,
163  *	and may cause the LWP's lock pointer to change.  Manipulation of
164  *	the general lock is not performed directly, but through calls to
165  *	lwp_lock(), lwp_unlock() and others.  It should be noted that the
166  *	adaptive locks are not allowed to be released while the LWP's lock
167  *	is being held (unlike for other spin-locks).
168  *
169  *	States and their associated locks:
170  *
171  *	LSIDL, LSONPROC, LSZOMB, LSSUPENDED:
172  *
173  *		Always covered by spc_lwplock, which protects LWPs not
174  *		associated with any other sync object.  This is a per-CPU
175  *		lock and matches lwp::l_cpu.
176  *
177  *	LSRUN:
178  *
179  *		Always covered by spc_mutex, which protects the run queues.
180  *		This is a per-CPU lock and matches lwp::l_cpu.
181  *
182  *	LSSLEEP:
183  *
184  *		Covered by a lock associated with the sleep queue (sometimes
185  *		a turnstile sleep queue) that the LWP resides on.  This can
186  *		be spc_lwplock for SOBJ_SLEEPQ_NULL (an "untracked" sleep).
187  *
188  *	LSSTOP:
189  *
190  *		If the LWP was previously sleeping (l_wchan != NULL), then
191  *		l_mutex references the sleep queue lock.  If the LWP was
192  *		runnable or on the CPU when halted, or has been removed from
193  *		the sleep queue since halted, then the lock is spc_lwplock.
194  *
195  *	The lock order is as follows:
196  *
197  *		sleepq -> turnstile -> spc_lwplock -> spc_mutex
198  *
199  *	Each process has a scheduler state lock (proc::p_lock), and a
200  *	number of counters on LWPs and their states: p_nzlwps, p_nrlwps, and
201  *	so on.  When an LWP is to be entered into or removed from one of the
202  *	following states, p_lock must be held and the process wide counters
203  *	adjusted:
204  *
205  *		LSIDL, LSZOMB, LSSTOP, LSSUSPENDED
206  *
207  *	(But not always for kernel threads.  There are some special cases
208  *	as mentioned above: soft interrupts, and the idle loops.)
209  *
210  *	Note that an LWP is considered running or likely to run soon if in
211  *	one of the following states.  This affects the value of p_nrlwps:
212  *
213  *		LSRUN, LSONPROC, LSSLEEP
214  *
215  *	p_lock does not need to be held when transitioning among these
216  *	three states, hence p_lock is rarely taken for state transitions.
217  */
218 
219 #include <sys/cdefs.h>
220 __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.252 2023/04/09 09:18:09 riastradh Exp $");
221 
222 #include "opt_ddb.h"
223 #include "opt_lockdebug.h"
224 #include "opt_dtrace.h"
225 
226 #define _LWP_API_PRIVATE
227 
228 #include <sys/param.h>
229 #include <sys/systm.h>
230 #include <sys/cpu.h>
231 #include <sys/pool.h>
232 #include <sys/proc.h>
233 #include <sys/syscallargs.h>
234 #include <sys/syscall_stats.h>
235 #include <sys/kauth.h>
236 #include <sys/sleepq.h>
237 #include <sys/lockdebug.h>
238 #include <sys/kmem.h>
239 #include <sys/pset.h>
240 #include <sys/intr.h>
241 #include <sys/lwpctl.h>
242 #include <sys/atomic.h>
243 #include <sys/filedesc.h>
244 #include <sys/fstrans.h>
245 #include <sys/dtrace_bsd.h>
246 #include <sys/sdt.h>
247 #include <sys/ptrace.h>
248 #include <sys/xcall.h>
249 #include <sys/uidinfo.h>
250 #include <sys/sysctl.h>
251 #include <sys/psref.h>
252 #include <sys/msan.h>
253 #include <sys/kcov.h>
254 #include <sys/cprng.h>
255 #include <sys/futex.h>
256 
257 #include <uvm/uvm_extern.h>
258 #include <uvm/uvm_object.h>
259 
260 static pool_cache_t	lwp_cache	__read_mostly;
261 struct lwplist		alllwp		__cacheline_aligned;
262 
263 static int		lwp_ctor(void *, void *, int);
264 static void		lwp_dtor(void *, void *);
265 
266 /* DTrace proc provider probes */
267 SDT_PROVIDER_DEFINE(proc);
268 
269 SDT_PROBE_DEFINE1(proc, kernel, , lwp__create, "struct lwp *");
270 SDT_PROBE_DEFINE1(proc, kernel, , lwp__start, "struct lwp *");
271 SDT_PROBE_DEFINE1(proc, kernel, , lwp__exit, "struct lwp *");
272 
273 struct turnstile turnstile0 __cacheline_aligned;
274 struct lwp lwp0 __aligned(MIN_LWP_ALIGNMENT) = {
275 #ifdef LWP0_CPU_INFO
276 	.l_cpu = LWP0_CPU_INFO,
277 #endif
278 #ifdef LWP0_MD_INITIALIZER
279 	.l_md = LWP0_MD_INITIALIZER,
280 #endif
281 	.l_proc = &proc0,
282 	.l_lid = 0,		/* we own proc0's slot in the pid table */
283 	.l_flag = LW_SYSTEM,
284 	.l_stat = LSONPROC,
285 	.l_ts = &turnstile0,
286 	.l_syncobj = &sched_syncobj,
287 	.l_refcnt = 0,
288 	.l_priority = PRI_USER + NPRI_USER - 1,
289 	.l_inheritedprio = -1,
290 	.l_class = SCHED_OTHER,
291 	.l_psid = PS_NONE,
292 	.l_pi_lenders = SLIST_HEAD_INITIALIZER(&lwp0.l_pi_lenders),
293 	.l_name = __UNCONST("swapper"),
294 	.l_fd = &filedesc0,
295 };
296 
297 static int
lwp_maxlwp(void)298 lwp_maxlwp(void)
299 {
300 	/* Assume 1 LWP per 1MiB. */
301 	uint64_t lwps_per = ctob(physmem) / (1024 * 1024);
302 
303 	return MAX(MIN(MAXMAXLWP, lwps_per), MAXLWP);
304 }
305 
306 static int sysctl_kern_maxlwp(SYSCTLFN_PROTO);
307 
308 /*
309  * sysctl helper routine for kern.maxlwp. Ensures that the new
310  * values are not too low or too high.
311  */
312 static int
sysctl_kern_maxlwp(SYSCTLFN_ARGS)313 sysctl_kern_maxlwp(SYSCTLFN_ARGS)
314 {
315 	int error, nmaxlwp;
316 	struct sysctlnode node;
317 
318 	nmaxlwp = maxlwp;
319 	node = *rnode;
320 	node.sysctl_data = &nmaxlwp;
321 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
322 	if (error || newp == NULL)
323 		return error;
324 
325 	if (nmaxlwp < 0 || nmaxlwp >= MAXMAXLWP)
326 		return EINVAL;
327 	if (nmaxlwp > lwp_maxlwp())
328 		return EINVAL;
329 	maxlwp = nmaxlwp;
330 
331 	return 0;
332 }
333 
334 static void
sysctl_kern_lwp_setup(void)335 sysctl_kern_lwp_setup(void)
336 {
337 	sysctl_createv(NULL, 0, NULL, NULL,
338 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
339 		       CTLTYPE_INT, "maxlwp",
340 		       SYSCTL_DESCR("Maximum number of simultaneous threads"),
341 		       sysctl_kern_maxlwp, 0, NULL, 0,
342 		       CTL_KERN, CTL_CREATE, CTL_EOL);
343 }
344 
345 void
lwpinit(void)346 lwpinit(void)
347 {
348 
349 	LIST_INIT(&alllwp);
350 	lwpinit_specificdata();
351 	/*
352 	 * Provide a barrier to ensure that all mutex_oncpu() and rw_oncpu()
353 	 * calls will exit before memory of LWPs is returned to the pool, where
354 	 * KVA of LWP structure might be freed and re-used for other purposes.
355 	 * Kernel preemption is disabled around mutex_oncpu() and rw_oncpu()
356 	 * callers, therefore a regular passive serialization barrier will
357 	 * do the job.
358 	 */
359 	lwp_cache = pool_cache_init(sizeof(lwp_t), MIN_LWP_ALIGNMENT, 0,
360 	    PR_PSERIALIZE, "lwppl", NULL, IPL_NONE, lwp_ctor, lwp_dtor, NULL);
361 
362 	maxlwp = lwp_maxlwp();
363 	sysctl_kern_lwp_setup();
364 }
365 
366 void
lwp0_init(void)367 lwp0_init(void)
368 {
369 	struct lwp *l = &lwp0;
370 
371 	KASSERT((void *)uvm_lwp_getuarea(l) != NULL);
372 
373 	LIST_INSERT_HEAD(&alllwp, l, l_list);
374 
375 	callout_init(&l->l_timeout_ch, CALLOUT_MPSAFE);
376 	callout_setfunc(&l->l_timeout_ch, sleepq_timeout, l);
377 	cv_init(&l->l_sigcv, "sigwait");
378 	cv_init(&l->l_waitcv, "vfork");
379 
380 	kauth_cred_hold(proc0.p_cred);
381 	l->l_cred = proc0.p_cred;
382 
383 	kdtrace_thread_ctor(NULL, l);
384 	lwp_initspecific(l);
385 
386 	SYSCALL_TIME_LWP_INIT(l);
387 }
388 
389 /*
390  * Initialize the non-zeroed portion of an lwp_t.
391  */
392 static int
lwp_ctor(void * arg,void * obj,int flags)393 lwp_ctor(void *arg, void *obj, int flags)
394 {
395 	lwp_t *l = obj;
396 
397 	l->l_stat = LSIDL;
398 	l->l_cpu = curcpu();
399 	l->l_mutex = l->l_cpu->ci_schedstate.spc_lwplock;
400 	l->l_ts = pool_get(&turnstile_pool, flags);
401 
402 	if (l->l_ts == NULL) {
403 		return ENOMEM;
404 	} else {
405 		turnstile_ctor(l->l_ts);
406 		return 0;
407 	}
408 }
409 
410 static void
lwp_dtor(void * arg,void * obj)411 lwp_dtor(void *arg, void *obj)
412 {
413 	lwp_t *l = obj;
414 
415 	/*
416 	 * The value of l->l_cpu must still be valid at this point.
417 	 */
418 	KASSERT(l->l_cpu != NULL);
419 
420 	/*
421 	 * We can't return turnstile0 to the pool (it didn't come from it),
422 	 * so if it comes up just drop it quietly and move on.
423 	 */
424 	if (l->l_ts != &turnstile0)
425 		pool_put(&turnstile_pool, l->l_ts);
426 }
427 
428 /*
429  * Set an LWP suspended.
430  *
431  * Must be called with p_lock held, and the LWP locked.  Will unlock the
432  * LWP before return.
433  */
434 int
lwp_suspend(struct lwp * curl,struct lwp * t)435 lwp_suspend(struct lwp *curl, struct lwp *t)
436 {
437 	int error;
438 
439 	KASSERT(mutex_owned(t->l_proc->p_lock));
440 	KASSERT(lwp_locked(t, NULL));
441 
442 	KASSERT(curl != t || curl->l_stat == LSONPROC);
443 
444 	/*
445 	 * If the current LWP has been told to exit, we must not suspend anyone
446 	 * else or deadlock could occur.  We won't return to userspace.
447 	 */
448 	if ((curl->l_flag & (LW_WEXIT | LW_WCORE)) != 0) {
449 		lwp_unlock(t);
450 		return (EDEADLK);
451 	}
452 
453 	if ((t->l_flag & LW_DBGSUSPEND) != 0) {
454 		lwp_unlock(t);
455 		return 0;
456 	}
457 
458 	error = 0;
459 
460 	switch (t->l_stat) {
461 	case LSRUN:
462 	case LSONPROC:
463 		t->l_flag |= LW_WSUSPEND;
464 		lwp_need_userret(t);
465 		lwp_unlock(t);
466 		break;
467 
468 	case LSSLEEP:
469 		t->l_flag |= LW_WSUSPEND;
470 
471 		/*
472 		 * Kick the LWP and try to get it to the kernel boundary
473 		 * so that it will release any locks that it holds.
474 		 * setrunnable() will release the lock.
475 		 */
476 		if ((t->l_flag & LW_SINTR) != 0)
477 			setrunnable(t);
478 		else
479 			lwp_unlock(t);
480 		break;
481 
482 	case LSSUSPENDED:
483 		lwp_unlock(t);
484 		break;
485 
486 	case LSSTOP:
487 		t->l_flag |= LW_WSUSPEND;
488 		setrunnable(t);
489 		break;
490 
491 	case LSIDL:
492 	case LSZOMB:
493 		error = EINTR; /* It's what Solaris does..... */
494 		lwp_unlock(t);
495 		break;
496 	}
497 
498 	return (error);
499 }
500 
501 /*
502  * Restart a suspended LWP.
503  *
504  * Must be called with p_lock held, and the LWP locked.  Will unlock the
505  * LWP before return.
506  */
507 void
lwp_continue(struct lwp * l)508 lwp_continue(struct lwp *l)
509 {
510 
511 	KASSERT(mutex_owned(l->l_proc->p_lock));
512 	KASSERT(lwp_locked(l, NULL));
513 
514 	/* If rebooting or not suspended, then just bail out. */
515 	if ((l->l_flag & LW_WREBOOT) != 0) {
516 		lwp_unlock(l);
517 		return;
518 	}
519 
520 	l->l_flag &= ~LW_WSUSPEND;
521 
522 	if (l->l_stat != LSSUSPENDED || (l->l_flag & LW_DBGSUSPEND) != 0) {
523 		lwp_unlock(l);
524 		return;
525 	}
526 
527 	/* setrunnable() will release the lock. */
528 	setrunnable(l);
529 }
530 
531 /*
532  * Restart a stopped LWP.
533  *
534  * Must be called with p_lock held, and the LWP NOT locked.  Will unlock the
535  * LWP before return.
536  */
537 void
lwp_unstop(struct lwp * l)538 lwp_unstop(struct lwp *l)
539 {
540 	struct proc *p = l->l_proc;
541 
542 	KASSERT(mutex_owned(&proc_lock));
543 	KASSERT(mutex_owned(p->p_lock));
544 
545 	lwp_lock(l);
546 
547 	KASSERT((l->l_flag & LW_DBGSUSPEND) == 0);
548 
549 	/* If not stopped, then just bail out. */
550 	if (l->l_stat != LSSTOP) {
551 		lwp_unlock(l);
552 		return;
553 	}
554 
555 	p->p_stat = SACTIVE;
556 	p->p_sflag &= ~PS_STOPPING;
557 
558 	if (!p->p_waited)
559 		p->p_pptr->p_nstopchild--;
560 
561 	if (l->l_wchan == NULL) {
562 		/* setrunnable() will release the lock. */
563 		setrunnable(l);
564 	} else if (p->p_xsig && (l->l_flag & LW_SINTR) != 0) {
565 		/* setrunnable() so we can receive the signal */
566 		setrunnable(l);
567 	} else {
568 		l->l_stat = LSSLEEP;
569 		p->p_nrlwps++;
570 		lwp_unlock(l);
571 	}
572 }
573 
574 /*
575  * Wait for an LWP within the current process to exit.  If 'lid' is
576  * non-zero, we are waiting for a specific LWP.
577  *
578  * Must be called with p->p_lock held.
579  */
580 int
lwp_wait(struct lwp * l,lwpid_t lid,lwpid_t * departed,bool exiting)581 lwp_wait(struct lwp *l, lwpid_t lid, lwpid_t *departed, bool exiting)
582 {
583 	const lwpid_t curlid = l->l_lid;
584 	proc_t *p = l->l_proc;
585 	lwp_t *l2, *next;
586 	int error;
587 
588 	KASSERT(mutex_owned(p->p_lock));
589 
590 	p->p_nlwpwait++;
591 	l->l_waitingfor = lid;
592 
593 	for (;;) {
594 		int nfound;
595 
596 		/*
597 		 * Avoid a race between exit1() and sigexit(): if the
598 		 * process is dumping core, then we need to bail out: call
599 		 * into lwp_userret() where we will be suspended until the
600 		 * deed is done.
601 		 */
602 		if ((p->p_sflag & PS_WCORE) != 0) {
603 			mutex_exit(p->p_lock);
604 			lwp_userret(l);
605 			KASSERT(false);
606 		}
607 
608 		/*
609 		 * First off, drain any detached LWP that is waiting to be
610 		 * reaped.
611 		 */
612 		while ((l2 = p->p_zomblwp) != NULL) {
613 			p->p_zomblwp = NULL;
614 			lwp_free(l2, false, false);/* releases proc mutex */
615 			mutex_enter(p->p_lock);
616 		}
617 
618 		/*
619 		 * Now look for an LWP to collect.  If the whole process is
620 		 * exiting, count detached LWPs as eligible to be collected,
621 		 * but don't drain them here.
622 		 */
623 		nfound = 0;
624 		error = 0;
625 
626 		/*
627 		 * If given a specific LID, go via pid_table and make sure
628 		 * it's not detached.
629 		 */
630 		if (lid != 0) {
631 			l2 = proc_find_lwp(p, lid);
632 			if (l2 == NULL) {
633 				error = ESRCH;
634 				break;
635 			}
636 			KASSERT(l2->l_lid == lid);
637 			if ((l2->l_prflag & LPR_DETACHED) != 0) {
638 				error = EINVAL;
639 				break;
640 			}
641 		} else {
642 			l2 = LIST_FIRST(&p->p_lwps);
643 		}
644 		for (; l2 != NULL; l2 = next) {
645 			next = (lid != 0 ? NULL : LIST_NEXT(l2, l_sibling));
646 
647 			/*
648 			 * If a specific wait and the target is waiting on
649 			 * us, then avoid deadlock.  This also traps LWPs
650 			 * that try to wait on themselves.
651 			 *
652 			 * Note that this does not handle more complicated
653 			 * cycles, like: t1 -> t2 -> t3 -> t1.  The process
654 			 * can still be killed so it is not a major problem.
655 			 */
656 			if (l2->l_lid == lid && l2->l_waitingfor == curlid) {
657 				error = EDEADLK;
658 				break;
659 			}
660 			if (l2 == l)
661 				continue;
662 			if ((l2->l_prflag & LPR_DETACHED) != 0) {
663 				nfound += exiting;
664 				continue;
665 			}
666 			if (lid != 0) {
667 				/*
668 				 * Mark this LWP as the first waiter, if there
669 				 * is no other.
670 				 */
671 				if (l2->l_waiter == 0)
672 					l2->l_waiter = curlid;
673 			} else if (l2->l_waiter != 0) {
674 				/*
675 				 * It already has a waiter - so don't
676 				 * collect it.  If the waiter doesn't
677 				 * grab it we'll get another chance
678 				 * later.
679 				 */
680 				nfound++;
681 				continue;
682 			}
683 			nfound++;
684 
685 			/* No need to lock the LWP in order to see LSZOMB. */
686 			if (l2->l_stat != LSZOMB)
687 				continue;
688 
689 			/*
690 			 * We're no longer waiting.  Reset the "first waiter"
691 			 * pointer on the target, in case it was us.
692 			 */
693 			l->l_waitingfor = 0;
694 			l2->l_waiter = 0;
695 			p->p_nlwpwait--;
696 			if (departed)
697 				*departed = l2->l_lid;
698 			sched_lwp_collect(l2);
699 
700 			/* lwp_free() releases the proc lock. */
701 			lwp_free(l2, false, false);
702 			mutex_enter(p->p_lock);
703 			return 0;
704 		}
705 
706 		if (error != 0)
707 			break;
708 		if (nfound == 0) {
709 			error = ESRCH;
710 			break;
711 		}
712 
713 		/*
714 		 * Note: since the lock will be dropped, need to restart on
715 		 * wakeup to run all LWPs again, e.g. there may be new LWPs.
716 		 */
717 		if (exiting) {
718 			KASSERT(p->p_nlwps > 1);
719 			error = cv_timedwait(&p->p_lwpcv, p->p_lock, 1);
720 			break;
721 		}
722 
723 		/*
724 		 * Break out if all LWPs are in _lwp_wait().  There are
725 		 * other ways to hang the process with _lwp_wait(), but the
726 		 * sleep is interruptable so little point checking for them.
727 		 */
728 		if (p->p_nlwpwait == p->p_nlwps) {
729 			error = EDEADLK;
730 			break;
731 		}
732 
733 		/*
734 		 * Sit around and wait for something to happen.  We'll be
735 		 * awoken if any of the conditions examined change: if an
736 		 * LWP exits, is collected, or is detached.
737 		 */
738 		if ((error = cv_wait_sig(&p->p_lwpcv, p->p_lock)) != 0)
739 			break;
740 	}
741 
742 	/*
743 	 * We didn't find any LWPs to collect, we may have received a
744 	 * signal, or some other condition has caused us to bail out.
745 	 *
746 	 * If waiting on a specific LWP, clear the waiters marker: some
747 	 * other LWP may want it.  Then, kick all the remaining waiters
748 	 * so that they can re-check for zombies and for deadlock.
749 	 */
750 	if (lid != 0) {
751 		l2 = proc_find_lwp(p, lid);
752 		KASSERT(l2 == NULL || l2->l_lid == lid);
753 
754 		if (l2 != NULL && l2->l_waiter == curlid)
755 			l2->l_waiter = 0;
756 	}
757 	p->p_nlwpwait--;
758 	l->l_waitingfor = 0;
759 	cv_broadcast(&p->p_lwpcv);
760 
761 	return error;
762 }
763 
764 /*
765  * Create a new LWP within process 'p2', using LWP 'l1' as a template.
766  * The new LWP is created in state LSIDL and must be set running,
767  * suspended, or stopped by the caller.
768  */
769 int
lwp_create(lwp_t * l1,proc_t * p2,vaddr_t uaddr,int flags,void * stack,size_t stacksize,void (* func)(void *),void * arg,lwp_t ** rnewlwpp,int sclass,const sigset_t * sigmask,const stack_t * sigstk)770 lwp_create(lwp_t *l1, proc_t *p2, vaddr_t uaddr, int flags,
771     void *stack, size_t stacksize, void (*func)(void *), void *arg,
772     lwp_t **rnewlwpp, int sclass, const sigset_t *sigmask,
773     const stack_t *sigstk)
774 {
775 	struct lwp *l2;
776 
777 	KASSERT(l1 == curlwp || l1->l_proc == &proc0);
778 
779 	/*
780 	 * Enforce limits, excluding the first lwp and kthreads.  We must
781 	 * use the process credentials here when adjusting the limit, as
782 	 * they are what's tied to the accounting entity.  However for
783 	 * authorizing the action, we'll use the LWP's credentials.
784 	 */
785 	mutex_enter(p2->p_lock);
786 	if (p2->p_nlwps != 0 && p2 != &proc0) {
787 		uid_t uid = kauth_cred_getuid(p2->p_cred);
788 		int count = chglwpcnt(uid, 1);
789 		if (__predict_false(count >
790 		    p2->p_rlimit[RLIMIT_NTHR].rlim_cur)) {
791 			if (kauth_authorize_process(l1->l_cred,
792 			    KAUTH_PROCESS_RLIMIT, p2,
793 			    KAUTH_ARG(KAUTH_REQ_PROCESS_RLIMIT_BYPASS),
794 			    &p2->p_rlimit[RLIMIT_NTHR], KAUTH_ARG(RLIMIT_NTHR))
795 			    != 0) {
796 				(void)chglwpcnt(uid, -1);
797 				mutex_exit(p2->p_lock);
798 				return EAGAIN;
799 			}
800 		}
801 	}
802 
803 	/*
804 	 * First off, reap any detached LWP waiting to be collected.
805 	 * We can re-use its LWP structure and turnstile.
806 	 */
807 	if ((l2 = p2->p_zomblwp) != NULL) {
808 		p2->p_zomblwp = NULL;
809 		lwp_free(l2, true, false);
810 		/* p2 now unlocked by lwp_free() */
811 		KASSERT(l2->l_ts != NULL);
812 		KASSERT(l2->l_inheritedprio == -1);
813 		KASSERT(SLIST_EMPTY(&l2->l_pi_lenders));
814 		memset(&l2->l_startzero, 0, sizeof(*l2) -
815 		    offsetof(lwp_t, l_startzero));
816 	} else {
817 		mutex_exit(p2->p_lock);
818 		l2 = pool_cache_get(lwp_cache, PR_WAITOK);
819 		memset(&l2->l_startzero, 0, sizeof(*l2) -
820 		    offsetof(lwp_t, l_startzero));
821 		SLIST_INIT(&l2->l_pi_lenders);
822 	}
823 
824 	/*
825 	 * Because of lockless lookup via pid_table, the LWP can be locked
826 	 * and inspected briefly even after it's freed, so a few fields are
827 	 * kept stable.
828 	 */
829 	KASSERT(l2->l_stat == LSIDL);
830 	KASSERT(l2->l_cpu != NULL);
831 	KASSERT(l2->l_ts != NULL);
832 	KASSERT(l2->l_mutex == l2->l_cpu->ci_schedstate.spc_lwplock);
833 
834 	l2->l_proc = p2;
835 	l2->l_refcnt = 0;
836 	l2->l_class = sclass;
837 
838 	/*
839 	 * Allocate a process ID for this LWP.  We need to do this now
840 	 * while we can still unwind if it fails.  Because we're marked
841 	 * as LSIDL, no lookups by the ID will succeed.
842 	 *
843 	 * N.B. this will always succeed for the first LWP in a process,
844 	 * because proc_alloc_lwpid() will usurp the slot.  Also note
845 	 * that l2->l_proc MUST be valid so that lookups of the proc
846 	 * will succeed, even if the LWP itself is not visible.
847 	 */
848 	if (__predict_false(proc_alloc_lwpid(p2, l2) == -1)) {
849 		pool_cache_put(lwp_cache, l2);
850 		return EAGAIN;
851 	}
852 
853 	/*
854 	 * If vfork(), we want the LWP to run fast and on the same CPU
855 	 * as its parent, so that it can reuse the VM context and cache
856 	 * footprint on the local CPU.
857 	 */
858 	l2->l_kpriority = ((flags & LWP_VFORK) ? true : false);
859 	l2->l_kpribase = PRI_KERNEL;
860 	l2->l_priority = l1->l_priority;
861 	l2->l_inheritedprio = -1;
862 	l2->l_protectprio = -1;
863 	l2->l_auxprio = -1;
864 	l2->l_flag = 0;
865 	l2->l_pflag = LP_MPSAFE;
866 	TAILQ_INIT(&l2->l_ld_locks);
867 	l2->l_psrefs = 0;
868 	kmsan_lwp_alloc(l2);
869 
870 	/*
871 	 * For vfork, borrow parent's lwpctl context if it exists.
872 	 * This also causes us to return via lwp_userret.
873 	 */
874 	if (flags & LWP_VFORK && l1->l_lwpctl) {
875 		l2->l_lwpctl = l1->l_lwpctl;
876 		l2->l_flag |= LW_LWPCTL;
877 	}
878 
879 	/*
880 	 * If not the first LWP in the process, grab a reference to the
881 	 * descriptor table.
882 	 */
883 	l2->l_fd = p2->p_fd;
884 	if (p2->p_nlwps != 0) {
885 		KASSERT(l1->l_proc == p2);
886 		fd_hold(l2);
887 	} else {
888 		KASSERT(l1->l_proc != p2);
889 	}
890 
891 	if (p2->p_flag & PK_SYSTEM) {
892 		/* Mark it as a system LWP. */
893 		l2->l_flag |= LW_SYSTEM;
894 	}
895 
896 	kdtrace_thread_ctor(NULL, l2);
897 	lwp_initspecific(l2);
898 	sched_lwp_fork(l1, l2);
899 	lwp_update_creds(l2);
900 	callout_init(&l2->l_timeout_ch, CALLOUT_MPSAFE);
901 	callout_setfunc(&l2->l_timeout_ch, sleepq_timeout, l2);
902 	cv_init(&l2->l_sigcv, "sigwait");
903 	cv_init(&l2->l_waitcv, "vfork");
904 	l2->l_syncobj = &sched_syncobj;
905 	PSREF_DEBUG_INIT_LWP(l2);
906 
907 	if (rnewlwpp != NULL)
908 		*rnewlwpp = l2;
909 
910 	/*
911 	 * PCU state needs to be saved before calling uvm_lwp_fork() so that
912 	 * the MD cpu_lwp_fork() can copy the saved state to the new LWP.
913 	 */
914 	pcu_save_all(l1);
915 #if PCU_UNIT_COUNT > 0
916 	l2->l_pcu_valid = l1->l_pcu_valid;
917 #endif
918 
919 	uvm_lwp_setuarea(l2, uaddr);
920 	uvm_lwp_fork(l1, l2, stack, stacksize, func, (arg != NULL) ? arg : l2);
921 
922 	mutex_enter(p2->p_lock);
923 	if ((flags & LWP_DETACHED) != 0) {
924 		l2->l_prflag = LPR_DETACHED;
925 		p2->p_ndlwps++;
926 	} else
927 		l2->l_prflag = 0;
928 
929 	if (l1->l_proc == p2) {
930 		/*
931 		 * These flags are set while p_lock is held.  Copy with
932 		 * p_lock held too, so the LWP doesn't sneak into the
933 		 * process without them being set.
934 		 */
935 		l2->l_flag |= (l1->l_flag & (LW_WEXIT | LW_WREBOOT | LW_WCORE));
936 	} else {
937 		/* fork(): pending core/exit doesn't apply to child. */
938 		l2->l_flag |= (l1->l_flag & LW_WREBOOT);
939 	}
940 
941 	l2->l_sigstk = *sigstk;
942 	l2->l_sigmask = *sigmask;
943 	TAILQ_INIT(&l2->l_sigpend.sp_info);
944 	sigemptyset(&l2->l_sigpend.sp_set);
945 	LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
946 	p2->p_nlwps++;
947 	p2->p_nrlwps++;
948 
949 	KASSERT(l2->l_affinity == NULL);
950 
951 	/* Inherit the affinity mask. */
952 	if (l1->l_affinity) {
953 		/*
954 		 * Note that we hold the state lock while inheriting
955 		 * the affinity to avoid race with sched_setaffinity().
956 		 */
957 		lwp_lock(l1);
958 		if (l1->l_affinity) {
959 			kcpuset_use(l1->l_affinity);
960 			l2->l_affinity = l1->l_affinity;
961 		}
962 		lwp_unlock(l1);
963 	}
964 
965 	/* This marks the end of the "must be atomic" section. */
966 	mutex_exit(p2->p_lock);
967 
968 	SDT_PROBE(proc, kernel, , lwp__create, l2, 0, 0, 0, 0);
969 
970 	mutex_enter(&proc_lock);
971 	LIST_INSERT_HEAD(&alllwp, l2, l_list);
972 	/* Inherit a processor-set */
973 	l2->l_psid = l1->l_psid;
974 	mutex_exit(&proc_lock);
975 
976 	SYSCALL_TIME_LWP_INIT(l2);
977 
978 	if (p2->p_emul->e_lwp_fork)
979 		(*p2->p_emul->e_lwp_fork)(l1, l2);
980 
981 	return (0);
982 }
983 
984 /*
985  * Set a new LWP running.  If the process is stopping, then the LWP is
986  * created stopped.
987  */
988 void
lwp_start(lwp_t * l,int flags)989 lwp_start(lwp_t *l, int flags)
990 {
991 	proc_t *p = l->l_proc;
992 
993 	mutex_enter(p->p_lock);
994 	lwp_lock(l);
995 	KASSERT(l->l_stat == LSIDL);
996 	if ((flags & LWP_SUSPENDED) != 0) {
997 		/* It'll suspend itself in lwp_userret(). */
998 		l->l_flag |= LW_WSUSPEND;
999 	}
1000 	if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) {
1001 		KASSERT(l->l_wchan == NULL);
1002 	    	l->l_stat = LSSTOP;
1003 		p->p_nrlwps--;
1004 		lwp_unlock(l);
1005 	} else {
1006 		setrunnable(l);
1007 		/* LWP now unlocked */
1008 	}
1009 	mutex_exit(p->p_lock);
1010 }
1011 
1012 /*
1013  * Called by MD code when a new LWP begins execution.  Must be called
1014  * with the previous LWP locked (so at splsched), or if there is no
1015  * previous LWP, at splsched.
1016  */
1017 void
lwp_startup(struct lwp * prev,struct lwp * new_lwp)1018 lwp_startup(struct lwp *prev, struct lwp *new_lwp)
1019 {
1020 	kmutex_t *lock;
1021 
1022 	KASSERTMSG(new_lwp == curlwp, "l %p curlwp %p prevlwp %p", new_lwp, curlwp, prev);
1023 	KASSERT(kpreempt_disabled());
1024 	KASSERT(prev != NULL);
1025 	KASSERT((prev->l_pflag & LP_RUNNING) != 0);
1026 	KASSERT(curcpu()->ci_mtx_count == -2);
1027 
1028 	/*
1029 	 * Immediately mark the previous LWP as no longer running and
1030 	 * unlock (to keep lock wait times short as possible).  If a
1031 	 * zombie, don't touch after clearing LP_RUNNING as it could be
1032 	 * reaped by another CPU.  Use atomic_store_release to ensure
1033 	 * this -- matches atomic_load_acquire in lwp_free.
1034 	 */
1035 	lock = prev->l_mutex;
1036 	if (__predict_false(prev->l_stat == LSZOMB)) {
1037 		atomic_store_release(&prev->l_pflag,
1038 		    prev->l_pflag & ~LP_RUNNING);
1039 	} else {
1040 		prev->l_pflag &= ~LP_RUNNING;
1041 	}
1042 	mutex_spin_exit(lock);
1043 
1044 	/* Correct spin mutex count after mi_switch(). */
1045 	curcpu()->ci_mtx_count = 0;
1046 
1047 	/* Install new VM context. */
1048 	if (__predict_true(new_lwp->l_proc->p_vmspace)) {
1049 		pmap_activate(new_lwp);
1050 	}
1051 
1052 	/* We remain at IPL_SCHED from mi_switch() - reset it. */
1053 	spl0();
1054 
1055 	LOCKDEBUG_BARRIER(NULL, 0);
1056 	SDT_PROBE(proc, kernel, , lwp__start, new_lwp, 0, 0, 0, 0);
1057 
1058 	/* For kthreads, acquire kernel lock if not MPSAFE. */
1059 	if (__predict_false((new_lwp->l_pflag & LP_MPSAFE) == 0)) {
1060 		KERNEL_LOCK(1, new_lwp);
1061 	}
1062 }
1063 
1064 /*
1065  * Exit an LWP.
1066  *
1067  * *** WARNING *** This can be called with (l != curlwp) in error paths.
1068  */
1069 void
lwp_exit(struct lwp * l)1070 lwp_exit(struct lwp *l)
1071 {
1072 	struct proc *p = l->l_proc;
1073 	struct lwp *l2;
1074 	bool current;
1075 
1076 	current = (l == curlwp);
1077 
1078 	KASSERT(current || l->l_stat == LSIDL);
1079 	KASSERT(current || l->l_target_cpu == NULL);
1080 	KASSERT(p == curproc);
1081 
1082 	SDT_PROBE(proc, kernel, , lwp__exit, l, 0, 0, 0, 0);
1083 
1084 	/* Verify that we hold no locks; for DIAGNOSTIC check kernel_lock. */
1085 	LOCKDEBUG_BARRIER(NULL, 0);
1086 	KASSERTMSG(curcpu()->ci_biglock_count == 0, "kernel_lock leaked");
1087 
1088 	/*
1089 	 * If we are the last live LWP in a process, we need to exit the
1090 	 * entire process.  We do so with an exit status of zero, because
1091 	 * it's a "controlled" exit, and because that's what Solaris does.
1092 	 *
1093 	 * We are not quite a zombie yet, but for accounting purposes we
1094 	 * must increment the count of zombies here.
1095 	 *
1096 	 * Note: the last LWP's specificdata will be deleted here.
1097 	 */
1098 	mutex_enter(p->p_lock);
1099 	if (p->p_nlwps - p->p_nzlwps == 1) {
1100 		KASSERT(current == true);
1101 		KASSERT(p != &proc0);
1102 		exit1(l, 0, 0);
1103 		/* NOTREACHED */
1104 	}
1105 	p->p_nzlwps++;
1106 
1107 	/*
1108 	 * Perform any required thread cleanup.  Do this early so
1109 	 * anyone wanting to look us up with lwp_getref_lwpid() will
1110 	 * fail to find us before we become a zombie.
1111 	 *
1112 	 * N.B. this will unlock p->p_lock on our behalf.
1113 	 */
1114 	lwp_thread_cleanup(l);
1115 
1116 	if (p->p_emul->e_lwp_exit)
1117 		(*p->p_emul->e_lwp_exit)(l);
1118 
1119 	/* Drop filedesc reference. */
1120 	fd_free();
1121 
1122 	/* Release fstrans private data. */
1123 	fstrans_lwp_dtor(l);
1124 
1125 	/* Delete the specificdata while it's still safe to sleep. */
1126 	lwp_finispecific(l);
1127 
1128 	/*
1129 	 * Release our cached credentials.
1130 	 */
1131 	kauth_cred_free(l->l_cred);
1132 	callout_destroy(&l->l_timeout_ch);
1133 
1134 	/*
1135 	 * If traced, report LWP exit event to the debugger.
1136 	 *
1137 	 * Remove the LWP from the global list.
1138 	 * Free its LID from the PID namespace if needed.
1139 	 */
1140 	mutex_enter(&proc_lock);
1141 
1142 	if ((p->p_slflag & (PSL_TRACED|PSL_TRACELWP_EXIT)) ==
1143 	    (PSL_TRACED|PSL_TRACELWP_EXIT)) {
1144 		mutex_enter(p->p_lock);
1145 		if (ISSET(p->p_sflag, PS_WEXIT)) {
1146 			mutex_exit(p->p_lock);
1147 			/*
1148 			 * We are exiting, bail out without informing parent
1149 			 * about a terminating LWP as it would deadlock.
1150 			 */
1151 		} else {
1152 			eventswitch(TRAP_LWP, PTRACE_LWP_EXIT, l->l_lid);
1153 			mutex_enter(&proc_lock);
1154 		}
1155 	}
1156 
1157 	LIST_REMOVE(l, l_list);
1158 	mutex_exit(&proc_lock);
1159 
1160 	/*
1161 	 * Get rid of all references to the LWP that others (e.g. procfs)
1162 	 * may have, and mark the LWP as a zombie.  If the LWP is detached,
1163 	 * mark it waiting for collection in the proc structure.  Note that
1164 	 * before we can do that, we need to free any other dead, deatched
1165 	 * LWP waiting to meet its maker.
1166 	 *
1167 	 * All conditions need to be observed upon under the same hold of
1168 	 * p_lock, because if the lock is dropped any of them can change.
1169 	 */
1170 	mutex_enter(p->p_lock);
1171 	for (;;) {
1172 		if (lwp_drainrefs(l))
1173 			continue;
1174 		if ((l->l_prflag & LPR_DETACHED) != 0) {
1175 			if ((l2 = p->p_zomblwp) != NULL) {
1176 				p->p_zomblwp = NULL;
1177 				lwp_free(l2, false, false);
1178 				/* proc now unlocked */
1179 				mutex_enter(p->p_lock);
1180 				continue;
1181 			}
1182 			p->p_zomblwp = l;
1183 		}
1184 		break;
1185 	}
1186 
1187 	/*
1188 	 * If we find a pending signal for the process and we have been
1189 	 * asked to check for signals, then we lose: arrange to have
1190 	 * all other LWPs in the process check for signals.
1191 	 */
1192 	if ((l->l_flag & LW_PENDSIG) != 0 &&
1193 	    firstsig(&p->p_sigpend.sp_set) != 0) {
1194 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
1195 			lwp_lock(l2);
1196 			signotify(l2);
1197 			lwp_unlock(l2);
1198 		}
1199 	}
1200 
1201 	/*
1202 	 * Release any PCU resources before becoming a zombie.
1203 	 */
1204 	pcu_discard_all(l);
1205 
1206 	lwp_lock(l);
1207 	l->l_stat = LSZOMB;
1208 	if (l->l_name != NULL) {
1209 		strcpy(l->l_name, "(zombie)");
1210 	}
1211 	lwp_unlock(l);
1212 	p->p_nrlwps--;
1213 	cv_broadcast(&p->p_lwpcv);
1214 	if (l->l_lwpctl != NULL)
1215 		l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED;
1216 	mutex_exit(p->p_lock);
1217 
1218 	/*
1219 	 * We can no longer block.  At this point, lwp_free() may already
1220 	 * be gunning for us.  On a multi-CPU system, we may be off p_lwps.
1221 	 *
1222 	 * Free MD LWP resources.
1223 	 */
1224 	cpu_lwp_free(l, 0);
1225 
1226 	if (current) {
1227 		/* Switch away into oblivion. */
1228 		lwp_lock(l);
1229 		spc_lock(l->l_cpu);
1230 		mi_switch(l);
1231 		panic("lwp_exit");
1232 	}
1233 }
1234 
1235 /*
1236  * Free a dead LWP's remaining resources.
1237  *
1238  * XXXLWP limits.
1239  */
1240 void
lwp_free(struct lwp * l,bool recycle,bool last)1241 lwp_free(struct lwp *l, bool recycle, bool last)
1242 {
1243 	struct proc *p = l->l_proc;
1244 	struct rusage *ru;
1245 	ksiginfoq_t kq;
1246 
1247 	KASSERT(l != curlwp);
1248 	KASSERT(last || mutex_owned(p->p_lock));
1249 
1250 	/*
1251 	 * We use the process credentials instead of the lwp credentials here
1252 	 * because the lwp credentials maybe cached (just after a setuid call)
1253 	 * and we don't want pay for syncing, since the lwp is going away
1254 	 * anyway
1255 	 */
1256 	if (p != &proc0 && p->p_nlwps != 1)
1257 		(void)chglwpcnt(kauth_cred_getuid(p->p_cred), -1);
1258 
1259 	/*
1260 	 * In the unlikely event that the LWP is still on the CPU,
1261 	 * then spin until it has switched away.
1262 	 *
1263 	 * atomic_load_acquire matches atomic_store_release in
1264 	 * lwp_startup and mi_switch.
1265 	 */
1266 	while (__predict_false((atomic_load_acquire(&l->l_pflag) & LP_RUNNING)
1267 		!= 0)) {
1268 		SPINLOCK_BACKOFF_HOOK;
1269 	}
1270 
1271 	/*
1272 	 * Now that the LWP's known off the CPU, reset its state back to
1273 	 * LSIDL, which defeats anything that might have gotten a hold on
1274 	 * the LWP via pid_table before the ID was freed.  It's important
1275 	 * to do this with both the LWP locked and p_lock held.
1276 	 *
1277 	 * Also reset the CPU and lock pointer back to curcpu(), since the
1278 	 * LWP will in all likelyhood be cached with the current CPU in
1279 	 * lwp_cache when we free it and later allocated from there again
1280 	 * (avoid incidental lock contention).
1281 	 */
1282 	lwp_lock(l);
1283 	l->l_stat = LSIDL;
1284 	l->l_cpu = curcpu();
1285 	lwp_unlock_to(l, l->l_cpu->ci_schedstate.spc_lwplock);
1286 
1287 	/*
1288 	 * If this was not the last LWP in the process, then adjust counters
1289 	 * and unlock.  This is done differently for the last LWP in exit1().
1290 	 */
1291 	if (!last) {
1292 		/*
1293 		 * Add the LWP's run time to the process' base value.
1294 		 * This needs to co-incide with coming off p_lwps.
1295 		 */
1296 		bintime_add(&p->p_rtime, &l->l_rtime);
1297 		p->p_pctcpu += l->l_pctcpu;
1298 		ru = &p->p_stats->p_ru;
1299 		ruadd(ru, &l->l_ru);
1300 		ru->ru_nvcsw += (l->l_ncsw - l->l_nivcsw);
1301 		ru->ru_nivcsw += l->l_nivcsw;
1302 		LIST_REMOVE(l, l_sibling);
1303 		p->p_nlwps--;
1304 		p->p_nzlwps--;
1305 		if ((l->l_prflag & LPR_DETACHED) != 0)
1306 			p->p_ndlwps--;
1307 
1308 		/*
1309 		 * Have any LWPs sleeping in lwp_wait() recheck for
1310 		 * deadlock.
1311 		 */
1312 		cv_broadcast(&p->p_lwpcv);
1313 		mutex_exit(p->p_lock);
1314 
1315 		/* Free the LWP ID. */
1316 		mutex_enter(&proc_lock);
1317 		proc_free_lwpid(p, l->l_lid);
1318 		mutex_exit(&proc_lock);
1319 	}
1320 
1321 	/*
1322 	 * Destroy the LWP's remaining signal information.
1323 	 */
1324 	ksiginfo_queue_init(&kq);
1325 	sigclear(&l->l_sigpend, NULL, &kq);
1326 	ksiginfo_queue_drain(&kq);
1327 	cv_destroy(&l->l_sigcv);
1328 	cv_destroy(&l->l_waitcv);
1329 
1330 	/*
1331 	 * Free lwpctl structure and affinity.
1332 	 */
1333 	if (l->l_lwpctl) {
1334 		lwp_ctl_free(l);
1335 	}
1336 	if (l->l_affinity) {
1337 		kcpuset_unuse(l->l_affinity, NULL);
1338 		l->l_affinity = NULL;
1339 	}
1340 
1341 	/*
1342 	 * Free remaining data structures and the LWP itself unless the
1343 	 * caller wants to recycle.
1344 	 */
1345 	if (l->l_name != NULL)
1346 		kmem_free(l->l_name, MAXCOMLEN);
1347 
1348 	kmsan_lwp_free(l);
1349 	kcov_lwp_free(l);
1350 	cpu_lwp_free2(l);
1351 	uvm_lwp_exit(l);
1352 
1353 	KASSERT(SLIST_EMPTY(&l->l_pi_lenders));
1354 	KASSERT(l->l_inheritedprio == -1);
1355 	KASSERT(l->l_blcnt == 0);
1356 	kdtrace_thread_dtor(NULL, l);
1357 	if (!recycle)
1358 		pool_cache_put(lwp_cache, l);
1359 }
1360 
1361 /*
1362  * Migrate the LWP to the another CPU.  Unlocks the LWP.
1363  */
1364 void
lwp_migrate(lwp_t * l,struct cpu_info * tci)1365 lwp_migrate(lwp_t *l, struct cpu_info *tci)
1366 {
1367 	struct schedstate_percpu *tspc;
1368 	int lstat = l->l_stat;
1369 
1370 	KASSERT(lwp_locked(l, NULL));
1371 	KASSERT(tci != NULL);
1372 
1373 	/* If LWP is still on the CPU, it must be handled like LSONPROC */
1374 	if ((l->l_pflag & LP_RUNNING) != 0) {
1375 		lstat = LSONPROC;
1376 	}
1377 
1378 	/*
1379 	 * The destination CPU could be changed while previous migration
1380 	 * was not finished.
1381 	 */
1382 	if (l->l_target_cpu != NULL) {
1383 		l->l_target_cpu = tci;
1384 		lwp_unlock(l);
1385 		return;
1386 	}
1387 
1388 	/* Nothing to do if trying to migrate to the same CPU */
1389 	if (l->l_cpu == tci) {
1390 		lwp_unlock(l);
1391 		return;
1392 	}
1393 
1394 	KASSERT(l->l_target_cpu == NULL);
1395 	tspc = &tci->ci_schedstate;
1396 	switch (lstat) {
1397 	case LSRUN:
1398 		l->l_target_cpu = tci;
1399 		break;
1400 	case LSSLEEP:
1401 		l->l_cpu = tci;
1402 		break;
1403 	case LSIDL:
1404 	case LSSTOP:
1405 	case LSSUSPENDED:
1406 		l->l_cpu = tci;
1407 		if (l->l_wchan == NULL) {
1408 			lwp_unlock_to(l, tspc->spc_lwplock);
1409 			return;
1410 		}
1411 		break;
1412 	case LSONPROC:
1413 		l->l_target_cpu = tci;
1414 		spc_lock(l->l_cpu);
1415 		sched_resched_cpu(l->l_cpu, PRI_USER_RT, true);
1416 		/* spc now unlocked */
1417 		break;
1418 	}
1419 	lwp_unlock(l);
1420 }
1421 
1422 #define	lwp_find_exclude(l)					\
1423 	((l)->l_stat == LSIDL || (l)->l_stat == LSZOMB)
1424 
1425 /*
1426  * Find the LWP in the process.  Arguments may be zero, in such case,
1427  * the calling process and first LWP in the list will be used.
1428  * On success - returns proc locked.
1429  *
1430  * => pid == 0 -> look in curproc.
1431  * => pid == -1 -> match any proc.
1432  * => otherwise look up the proc.
1433  *
1434  * => lid == 0 -> first LWP in the proc
1435  * => otherwise specific LWP
1436  */
1437 struct lwp *
lwp_find2(pid_t pid,lwpid_t lid)1438 lwp_find2(pid_t pid, lwpid_t lid)
1439 {
1440 	proc_t *p;
1441 	lwp_t *l;
1442 
1443 	/* First LWP of specified proc. */
1444 	if (lid == 0) {
1445 		switch (pid) {
1446 		case -1:
1447 			/* No lookup keys. */
1448 			return NULL;
1449 		case 0:
1450 			p = curproc;
1451 			mutex_enter(p->p_lock);
1452 			break;
1453 		default:
1454 			mutex_enter(&proc_lock);
1455 			p = proc_find(pid);
1456 			if (__predict_false(p == NULL)) {
1457 				mutex_exit(&proc_lock);
1458 				return NULL;
1459 			}
1460 			mutex_enter(p->p_lock);
1461 			mutex_exit(&proc_lock);
1462 			break;
1463 		}
1464 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1465 			if (__predict_true(!lwp_find_exclude(l)))
1466 				break;
1467 		}
1468 		goto out;
1469 	}
1470 
1471 	l = proc_find_lwp_acquire_proc(lid, &p);
1472 	if (l == NULL)
1473 		return NULL;
1474 	KASSERT(p != NULL);
1475 	KASSERT(mutex_owned(p->p_lock));
1476 
1477 	if (__predict_false(lwp_find_exclude(l))) {
1478 		l = NULL;
1479 		goto out;
1480 	}
1481 
1482 	/* Apply proc filter, if applicable. */
1483 	switch (pid) {
1484 	case -1:
1485 		/* Match anything. */
1486 		break;
1487 	case 0:
1488 		if (p != curproc)
1489 			l = NULL;
1490 		break;
1491 	default:
1492 		if (p->p_pid != pid)
1493 			l = NULL;
1494 		break;
1495 	}
1496 
1497  out:
1498 	if (__predict_false(l == NULL)) {
1499 		mutex_exit(p->p_lock);
1500 	}
1501 	return l;
1502 }
1503 
1504 /*
1505  * Look up a live LWP within the specified process.
1506  *
1507  * Must be called with p->p_lock held (as it looks at the radix tree,
1508  * and also wants to exclude idle and zombie LWPs).
1509  */
1510 struct lwp *
lwp_find(struct proc * p,lwpid_t id)1511 lwp_find(struct proc *p, lwpid_t id)
1512 {
1513 	struct lwp *l;
1514 
1515 	KASSERT(mutex_owned(p->p_lock));
1516 
1517 	l = proc_find_lwp(p, id);
1518 	KASSERT(l == NULL || l->l_lid == id);
1519 
1520 	/*
1521 	 * No need to lock - all of these conditions will
1522 	 * be visible with the process level mutex held.
1523 	 */
1524 	if (__predict_false(l != NULL && lwp_find_exclude(l)))
1525 		l = NULL;
1526 
1527 	return l;
1528 }
1529 
1530 /*
1531  * Update an LWP's cached credentials to mirror the process' master copy.
1532  *
1533  * This happens early in the syscall path, on user trap, and on LWP
1534  * creation.  A long-running LWP can also voluntarily choose to update
1535  * its credentials by calling this routine.  This may be called from
1536  * LWP_CACHE_CREDS(), which checks l->l_prflag & LPR_CRMOD beforehand.
1537  */
1538 void
lwp_update_creds(struct lwp * l)1539 lwp_update_creds(struct lwp *l)
1540 {
1541 	kauth_cred_t oc;
1542 	struct proc *p;
1543 
1544 	p = l->l_proc;
1545 	oc = l->l_cred;
1546 
1547 	mutex_enter(p->p_lock);
1548 	kauth_cred_hold(p->p_cred);
1549 	l->l_cred = p->p_cred;
1550 	l->l_prflag &= ~LPR_CRMOD;
1551 	mutex_exit(p->p_lock);
1552 	if (oc != NULL)
1553 		kauth_cred_free(oc);
1554 }
1555 
1556 /*
1557  * Verify that an LWP is locked, and optionally verify that the lock matches
1558  * one we specify.
1559  */
1560 int
lwp_locked(struct lwp * l,kmutex_t * mtx)1561 lwp_locked(struct lwp *l, kmutex_t *mtx)
1562 {
1563 	kmutex_t *cur = l->l_mutex;
1564 
1565 	return mutex_owned(cur) && (mtx == cur || mtx == NULL);
1566 }
1567 
1568 /*
1569  * Lend a new mutex to an LWP.  The old mutex must be held.
1570  */
1571 kmutex_t *
lwp_setlock(struct lwp * l,kmutex_t * mtx)1572 lwp_setlock(struct lwp *l, kmutex_t *mtx)
1573 {
1574 	kmutex_t *oldmtx = l->l_mutex;
1575 
1576 	KASSERT(mutex_owned(oldmtx));
1577 
1578 	atomic_store_release(&l->l_mutex, mtx);
1579 	return oldmtx;
1580 }
1581 
1582 /*
1583  * Lend a new mutex to an LWP, and release the old mutex.  The old mutex
1584  * must be held.
1585  */
1586 void
lwp_unlock_to(struct lwp * l,kmutex_t * mtx)1587 lwp_unlock_to(struct lwp *l, kmutex_t *mtx)
1588 {
1589 	kmutex_t *old;
1590 
1591 	KASSERT(lwp_locked(l, NULL));
1592 
1593 	old = l->l_mutex;
1594 	atomic_store_release(&l->l_mutex, mtx);
1595 	mutex_spin_exit(old);
1596 }
1597 
1598 int
lwp_trylock(struct lwp * l)1599 lwp_trylock(struct lwp *l)
1600 {
1601 	kmutex_t *old;
1602 
1603 	for (;;) {
1604 		if (!mutex_tryenter(old = atomic_load_consume(&l->l_mutex)))
1605 			return 0;
1606 		if (__predict_true(atomic_load_relaxed(&l->l_mutex) == old))
1607 			return 1;
1608 		mutex_spin_exit(old);
1609 	}
1610 }
1611 
1612 void
lwp_unsleep(lwp_t * l,bool unlock)1613 lwp_unsleep(lwp_t *l, bool unlock)
1614 {
1615 
1616 	KASSERT(mutex_owned(l->l_mutex));
1617 	(*l->l_syncobj->sobj_unsleep)(l, unlock);
1618 }
1619 
1620 /*
1621  * Handle exceptions for mi_userret().  Called if a member of LW_USERRET is
1622  * set.
1623  */
1624 void
lwp_userret(struct lwp * l)1625 lwp_userret(struct lwp *l)
1626 {
1627 	struct proc *p;
1628 	int sig;
1629 
1630 	KASSERT(l == curlwp);
1631 	KASSERT(l->l_stat == LSONPROC);
1632 	p = l->l_proc;
1633 
1634 	/*
1635 	 * It is safe to do this read unlocked on a MP system..
1636 	 */
1637 	while ((l->l_flag & LW_USERRET) != 0) {
1638 		/*
1639 		 * Process pending signals first, unless the process
1640 		 * is dumping core or exiting, where we will instead
1641 		 * enter the LW_WSUSPEND case below.
1642 		 */
1643 		if ((l->l_flag & (LW_PENDSIG | LW_WCORE | LW_WEXIT)) ==
1644 		    LW_PENDSIG) {
1645 			mutex_enter(p->p_lock);
1646 			while ((sig = issignal(l)) != 0)
1647 				postsig(sig);
1648 			mutex_exit(p->p_lock);
1649 		}
1650 
1651 		/*
1652 		 * Core-dump or suspend pending.
1653 		 *
1654 		 * In case of core dump, suspend ourselves, so that the kernel
1655 		 * stack and therefore the userland registers saved in the
1656 		 * trapframe are around for coredump() to write them out.
1657 		 * We also need to save any PCU resources that we have so that
1658 		 * they accessible for coredump().  We issue a wakeup on
1659 		 * p->p_lwpcv so that sigexit() will write the core file out
1660 		 * once all other LWPs are suspended.
1661 		 */
1662 		if ((l->l_flag & LW_WSUSPEND) != 0) {
1663 			pcu_save_all(l);
1664 			mutex_enter(p->p_lock);
1665 			p->p_nrlwps--;
1666 			cv_broadcast(&p->p_lwpcv);
1667 			lwp_lock(l);
1668 			l->l_stat = LSSUSPENDED;
1669 			lwp_unlock(l);
1670 			mutex_exit(p->p_lock);
1671 			lwp_lock(l);
1672 			spc_lock(l->l_cpu);
1673 			mi_switch(l);
1674 		}
1675 
1676 		/* Process is exiting. */
1677 		if ((l->l_flag & LW_WEXIT) != 0) {
1678 			lwp_exit(l);
1679 			KASSERT(0);
1680 			/* NOTREACHED */
1681 		}
1682 
1683 		/* update lwpctl processor (for vfork child_return) */
1684 		if (l->l_flag & LW_LWPCTL) {
1685 			lwp_lock(l);
1686 			KASSERT(kpreempt_disabled());
1687 			l->l_lwpctl->lc_curcpu = (int)cpu_index(l->l_cpu);
1688 			l->l_lwpctl->lc_pctr++;
1689 			l->l_flag &= ~LW_LWPCTL;
1690 			lwp_unlock(l);
1691 		}
1692 	}
1693 }
1694 
1695 /*
1696  * Force an LWP to enter the kernel, to take a trip through lwp_userret().
1697  */
1698 void
lwp_need_userret(struct lwp * l)1699 lwp_need_userret(struct lwp *l)
1700 {
1701 
1702 	KASSERT(!cpu_intr_p());
1703 	KASSERT(lwp_locked(l, NULL));
1704 
1705 	/*
1706 	 * If the LWP is in any state other than LSONPROC, we know that it
1707 	 * is executing in-kernel and will hit userret() on the way out.
1708 	 *
1709 	 * If the LWP is curlwp, then we know we'll be back out to userspace
1710 	 * soon (can't be called from a hardware interrupt here).
1711 	 *
1712 	 * Otherwise, we can't be sure what the LWP is doing, so first make
1713 	 * sure the update to l_flag will be globally visible, and then
1714 	 * force the LWP to take a trip through trap() where it will do
1715 	 * userret().
1716 	 */
1717 	if (l->l_stat == LSONPROC && l != curlwp) {
1718 		membar_producer();
1719 		cpu_signotify(l);
1720 	}
1721 }
1722 
1723 /*
1724  * Add one reference to an LWP.  This will prevent the LWP from
1725  * exiting, thus keep the lwp structure and PCB around to inspect.
1726  */
1727 void
lwp_addref(struct lwp * l)1728 lwp_addref(struct lwp *l)
1729 {
1730 	KASSERT(mutex_owned(l->l_proc->p_lock));
1731 	KASSERT(l->l_stat != LSZOMB);
1732 	l->l_refcnt++;
1733 }
1734 
1735 /*
1736  * Remove one reference to an LWP.  If this is the last reference,
1737  * then we must finalize the LWP's death.
1738  */
1739 void
lwp_delref(struct lwp * l)1740 lwp_delref(struct lwp *l)
1741 {
1742 	struct proc *p = l->l_proc;
1743 
1744 	mutex_enter(p->p_lock);
1745 	lwp_delref2(l);
1746 	mutex_exit(p->p_lock);
1747 }
1748 
1749 /*
1750  * Remove one reference to an LWP.  If this is the last reference,
1751  * then we must finalize the LWP's death.  The proc mutex is held
1752  * on entry.
1753  */
1754 void
lwp_delref2(struct lwp * l)1755 lwp_delref2(struct lwp *l)
1756 {
1757 	struct proc *p = l->l_proc;
1758 
1759 	KASSERT(mutex_owned(p->p_lock));
1760 	KASSERT(l->l_stat != LSZOMB);
1761 	KASSERT(l->l_refcnt > 0);
1762 
1763 	if (--l->l_refcnt == 0)
1764 		cv_broadcast(&p->p_lwpcv);
1765 }
1766 
1767 /*
1768  * Drain all references to the current LWP.  Returns true if
1769  * we blocked.
1770  */
1771 bool
lwp_drainrefs(struct lwp * l)1772 lwp_drainrefs(struct lwp *l)
1773 {
1774 	struct proc *p = l->l_proc;
1775 	bool rv = false;
1776 
1777 	KASSERT(mutex_owned(p->p_lock));
1778 
1779 	l->l_prflag |= LPR_DRAINING;
1780 
1781 	while (l->l_refcnt > 0) {
1782 		rv = true;
1783 		cv_wait(&p->p_lwpcv, p->p_lock);
1784 	}
1785 	return rv;
1786 }
1787 
1788 /*
1789  * Return true if the specified LWP is 'alive'.  Only p->p_lock need
1790  * be held.
1791  */
1792 bool
lwp_alive(lwp_t * l)1793 lwp_alive(lwp_t *l)
1794 {
1795 
1796 	KASSERT(mutex_owned(l->l_proc->p_lock));
1797 
1798 	switch (l->l_stat) {
1799 	case LSSLEEP:
1800 	case LSRUN:
1801 	case LSONPROC:
1802 	case LSSTOP:
1803 	case LSSUSPENDED:
1804 		return true;
1805 	default:
1806 		return false;
1807 	}
1808 }
1809 
1810 /*
1811  * Return first live LWP in the process.
1812  */
1813 lwp_t *
lwp_find_first(proc_t * p)1814 lwp_find_first(proc_t *p)
1815 {
1816 	lwp_t *l;
1817 
1818 	KASSERT(mutex_owned(p->p_lock));
1819 
1820 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1821 		if (lwp_alive(l)) {
1822 			return l;
1823 		}
1824 	}
1825 
1826 	return NULL;
1827 }
1828 
1829 /*
1830  * Allocate a new lwpctl structure for a user LWP.
1831  */
1832 int
lwp_ctl_alloc(vaddr_t * uaddr)1833 lwp_ctl_alloc(vaddr_t *uaddr)
1834 {
1835 	lcproc_t *lp;
1836 	u_int bit, i, offset;
1837 	struct uvm_object *uao;
1838 	int error;
1839 	lcpage_t *lcp;
1840 	proc_t *p;
1841 	lwp_t *l;
1842 
1843 	l = curlwp;
1844 	p = l->l_proc;
1845 
1846 	/* don't allow a vforked process to create lwp ctls */
1847 	if (p->p_lflag & PL_PPWAIT)
1848 		return EBUSY;
1849 
1850 	if (l->l_lcpage != NULL) {
1851 		lcp = l->l_lcpage;
1852 		*uaddr = lcp->lcp_uaddr + (vaddr_t)l->l_lwpctl - lcp->lcp_kaddr;
1853 		return 0;
1854 	}
1855 
1856 	/* First time around, allocate header structure for the process. */
1857 	if ((lp = p->p_lwpctl) == NULL) {
1858 		lp = kmem_alloc(sizeof(*lp), KM_SLEEP);
1859 		mutex_init(&lp->lp_lock, MUTEX_DEFAULT, IPL_NONE);
1860 		lp->lp_uao = NULL;
1861 		TAILQ_INIT(&lp->lp_pages);
1862 		mutex_enter(p->p_lock);
1863 		if (p->p_lwpctl == NULL) {
1864 			p->p_lwpctl = lp;
1865 			mutex_exit(p->p_lock);
1866 		} else {
1867 			mutex_exit(p->p_lock);
1868 			mutex_destroy(&lp->lp_lock);
1869 			kmem_free(lp, sizeof(*lp));
1870 			lp = p->p_lwpctl;
1871 		}
1872 	}
1873 
1874  	/*
1875  	 * Set up an anonymous memory region to hold the shared pages.
1876  	 * Map them into the process' address space.  The user vmspace
1877  	 * gets the first reference on the UAO.
1878  	 */
1879 	mutex_enter(&lp->lp_lock);
1880 	if (lp->lp_uao == NULL) {
1881 		lp->lp_uao = uao_create(LWPCTL_UAREA_SZ, 0);
1882 		lp->lp_cur = 0;
1883 		lp->lp_max = LWPCTL_UAREA_SZ;
1884 		lp->lp_uva = p->p_emul->e_vm_default_addr(p,
1885 		     (vaddr_t)p->p_vmspace->vm_daddr, LWPCTL_UAREA_SZ,
1886 		     p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN);
1887 		error = uvm_map(&p->p_vmspace->vm_map, &lp->lp_uva,
1888 		    LWPCTL_UAREA_SZ, lp->lp_uao, 0, 0, UVM_MAPFLAG(UVM_PROT_RW,
1889 		    UVM_PROT_RW, UVM_INH_NONE, UVM_ADV_NORMAL, 0));
1890 		if (error != 0) {
1891 			uao_detach(lp->lp_uao);
1892 			lp->lp_uao = NULL;
1893 			mutex_exit(&lp->lp_lock);
1894 			return error;
1895 		}
1896 	}
1897 
1898 	/* Get a free block and allocate for this LWP. */
1899 	TAILQ_FOREACH(lcp, &lp->lp_pages, lcp_chain) {
1900 		if (lcp->lcp_nfree != 0)
1901 			break;
1902 	}
1903 	if (lcp == NULL) {
1904 		/* Nothing available - try to set up a free page. */
1905 		if (lp->lp_cur == lp->lp_max) {
1906 			mutex_exit(&lp->lp_lock);
1907 			return ENOMEM;
1908 		}
1909 		lcp = kmem_alloc(LWPCTL_LCPAGE_SZ, KM_SLEEP);
1910 
1911 		/*
1912 		 * Wire the next page down in kernel space.  Since this
1913 		 * is a new mapping, we must add a reference.
1914 		 */
1915 		uao = lp->lp_uao;
1916 		(*uao->pgops->pgo_reference)(uao);
1917 		lcp->lcp_kaddr = vm_map_min(kernel_map);
1918 		error = uvm_map(kernel_map, &lcp->lcp_kaddr, PAGE_SIZE,
1919 		    uao, lp->lp_cur, PAGE_SIZE,
1920 		    UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
1921 		    UVM_INH_NONE, UVM_ADV_RANDOM, 0));
1922 		if (error != 0) {
1923 			mutex_exit(&lp->lp_lock);
1924 			kmem_free(lcp, LWPCTL_LCPAGE_SZ);
1925 			(*uao->pgops->pgo_detach)(uao);
1926 			return error;
1927 		}
1928 		error = uvm_map_pageable(kernel_map, lcp->lcp_kaddr,
1929 		    lcp->lcp_kaddr + PAGE_SIZE, FALSE, 0);
1930 		if (error != 0) {
1931 			mutex_exit(&lp->lp_lock);
1932 			uvm_unmap(kernel_map, lcp->lcp_kaddr,
1933 			    lcp->lcp_kaddr + PAGE_SIZE);
1934 			kmem_free(lcp, LWPCTL_LCPAGE_SZ);
1935 			return error;
1936 		}
1937 		/* Prepare the page descriptor and link into the list. */
1938 		lcp->lcp_uaddr = lp->lp_uva + lp->lp_cur;
1939 		lp->lp_cur += PAGE_SIZE;
1940 		lcp->lcp_nfree = LWPCTL_PER_PAGE;
1941 		lcp->lcp_rotor = 0;
1942 		memset(lcp->lcp_bitmap, 0xff, LWPCTL_BITMAP_SZ);
1943 		TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
1944 	}
1945 	for (i = lcp->lcp_rotor; lcp->lcp_bitmap[i] == 0;) {
1946 		if (++i >= LWPCTL_BITMAP_ENTRIES)
1947 			i = 0;
1948 	}
1949 	bit = ffs(lcp->lcp_bitmap[i]) - 1;
1950 	lcp->lcp_bitmap[i] ^= (1U << bit);
1951 	lcp->lcp_rotor = i;
1952 	lcp->lcp_nfree--;
1953 	l->l_lcpage = lcp;
1954 	offset = (i << 5) + bit;
1955 	l->l_lwpctl = (lwpctl_t *)lcp->lcp_kaddr + offset;
1956 	*uaddr = lcp->lcp_uaddr + offset * sizeof(lwpctl_t);
1957 	mutex_exit(&lp->lp_lock);
1958 
1959 	KPREEMPT_DISABLE(l);
1960 	l->l_lwpctl->lc_curcpu = (int)cpu_index(curcpu());
1961 	KPREEMPT_ENABLE(l);
1962 
1963 	return 0;
1964 }
1965 
1966 /*
1967  * Free an lwpctl structure back to the per-process list.
1968  */
1969 void
lwp_ctl_free(lwp_t * l)1970 lwp_ctl_free(lwp_t *l)
1971 {
1972 	struct proc *p = l->l_proc;
1973 	lcproc_t *lp;
1974 	lcpage_t *lcp;
1975 	u_int map, offset;
1976 
1977 	/* don't free a lwp context we borrowed for vfork */
1978 	if (p->p_lflag & PL_PPWAIT) {
1979 		l->l_lwpctl = NULL;
1980 		return;
1981 	}
1982 
1983 	lp = p->p_lwpctl;
1984 	KASSERT(lp != NULL);
1985 
1986 	lcp = l->l_lcpage;
1987 	offset = (u_int)((lwpctl_t *)l->l_lwpctl - (lwpctl_t *)lcp->lcp_kaddr);
1988 	KASSERT(offset < LWPCTL_PER_PAGE);
1989 
1990 	mutex_enter(&lp->lp_lock);
1991 	lcp->lcp_nfree++;
1992 	map = offset >> 5;
1993 	lcp->lcp_bitmap[map] |= (1U << (offset & 31));
1994 	if (lcp->lcp_bitmap[lcp->lcp_rotor] == 0)
1995 		lcp->lcp_rotor = map;
1996 	if (TAILQ_FIRST(&lp->lp_pages)->lcp_nfree == 0) {
1997 		TAILQ_REMOVE(&lp->lp_pages, lcp, lcp_chain);
1998 		TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
1999 	}
2000 	mutex_exit(&lp->lp_lock);
2001 }
2002 
2003 /*
2004  * Process is exiting; tear down lwpctl state.  This can only be safely
2005  * called by the last LWP in the process.
2006  */
2007 void
lwp_ctl_exit(void)2008 lwp_ctl_exit(void)
2009 {
2010 	lcpage_t *lcp, *next;
2011 	lcproc_t *lp;
2012 	proc_t *p;
2013 	lwp_t *l;
2014 
2015 	l = curlwp;
2016 	l->l_lwpctl = NULL;
2017 	l->l_lcpage = NULL;
2018 	p = l->l_proc;
2019 	lp = p->p_lwpctl;
2020 
2021 	KASSERT(lp != NULL);
2022 	KASSERT(p->p_nlwps == 1);
2023 
2024 	for (lcp = TAILQ_FIRST(&lp->lp_pages); lcp != NULL; lcp = next) {
2025 		next = TAILQ_NEXT(lcp, lcp_chain);
2026 		uvm_unmap(kernel_map, lcp->lcp_kaddr,
2027 		    lcp->lcp_kaddr + PAGE_SIZE);
2028 		kmem_free(lcp, LWPCTL_LCPAGE_SZ);
2029 	}
2030 
2031 	if (lp->lp_uao != NULL) {
2032 		uvm_unmap(&p->p_vmspace->vm_map, lp->lp_uva,
2033 		    lp->lp_uva + LWPCTL_UAREA_SZ);
2034 	}
2035 
2036 	mutex_destroy(&lp->lp_lock);
2037 	kmem_free(lp, sizeof(*lp));
2038 	p->p_lwpctl = NULL;
2039 }
2040 
2041 /*
2042  * Return the current LWP's "preemption counter".  Used to detect
2043  * preemption across operations that can tolerate preemption without
2044  * crashing, but which may generate incorrect results if preempted.
2045  */
2046 uint64_t
lwp_pctr(void)2047 lwp_pctr(void)
2048 {
2049 
2050 	return curlwp->l_ncsw;
2051 }
2052 
2053 /*
2054  * Set an LWP's private data pointer.
2055  */
2056 int
lwp_setprivate(struct lwp * l,void * ptr)2057 lwp_setprivate(struct lwp *l, void *ptr)
2058 {
2059 	int error = 0;
2060 
2061 	l->l_private = ptr;
2062 #ifdef __HAVE_CPU_LWP_SETPRIVATE
2063 	error = cpu_lwp_setprivate(l, ptr);
2064 #endif
2065 	return error;
2066 }
2067 
2068 /*
2069  * Perform any thread-related cleanup on LWP exit.
2070  * N.B. l->l_proc->p_lock must be HELD on entry but will
2071  * be released before returning!
2072  */
2073 void
lwp_thread_cleanup(struct lwp * l)2074 lwp_thread_cleanup(struct lwp *l)
2075 {
2076 
2077 	KASSERT(mutex_owned(l->l_proc->p_lock));
2078 	mutex_exit(l->l_proc->p_lock);
2079 
2080 	/*
2081 	 * If the LWP has robust futexes, release them all
2082 	 * now.
2083 	 */
2084 	if (__predict_false(l->l_robust_head != 0)) {
2085 		futex_release_all_lwp(l);
2086 	}
2087 }
2088 
2089 #if defined(DDB)
2090 #include <machine/pcb.h>
2091 
2092 void
lwp_whatis(uintptr_t addr,void (* pr)(const char *,...))2093 lwp_whatis(uintptr_t addr, void (*pr)(const char *, ...))
2094 {
2095 	lwp_t *l;
2096 
2097 	LIST_FOREACH(l, &alllwp, l_list) {
2098 		uintptr_t stack = (uintptr_t)KSTACK_LOWEST_ADDR(l);
2099 
2100 		if (addr < stack || stack + KSTACK_SIZE <= addr) {
2101 			continue;
2102 		}
2103 		(*pr)("%p is %p+%zu, LWP %p's stack\n",
2104 		    (void *)addr, (void *)stack,
2105 		    (size_t)(addr - stack), l);
2106 	}
2107 }
2108 #endif /* defined(DDB) */
2109