xref: /openbsd/sys/kern/kern_synch.c (revision 20322bed)
1 /*	$OpenBSD: kern_synch.c,v 1.218 2025/01/22 16:14:22 claudio Exp $	*/
2 /*	$NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1990, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)kern_synch.c	8.6 (Berkeley) 1/21/94
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/proc.h>
43 #include <sys/kernel.h>
44 #include <sys/signalvar.h>
45 #include <sys/sched.h>
46 #include <sys/timeout.h>
47 #include <sys/mount.h>
48 #include <sys/syscallargs.h>
49 #include <sys/refcnt.h>
50 #include <sys/atomic.h>
51 #include <sys/tracepoint.h>
52 
53 #include <ddb/db_output.h>
54 
55 #include <machine/spinlock.h>
56 
57 #ifdef DIAGNOSTIC
58 #include <sys/syslog.h>
59 #endif
60 
61 #ifdef KTRACE
62 #include <sys/ktrace.h>
63 #endif
64 
65 int	sleep_signal_check(struct proc *, int);
66 
67 extern void proc_stop(struct proc *p, int);
68 
69 /*
70  * We're only looking at 7 bits of the address; everything is
71  * aligned to 4, lots of things are aligned to greater powers
72  * of 2.  Shift right by 8, i.e. drop the bottom 256 worth.
73  */
74 #define TABLESIZE	128
75 #define LOOKUP(x)	(((long)(x) >> 8) & (TABLESIZE - 1))
TAILQ_HEAD(slpque,proc)76 TAILQ_HEAD(slpque,proc) slpque[TABLESIZE];
77 
78 void
79 sleep_queue_init(void)
80 {
81 	int i;
82 
83 	for (i = 0; i < TABLESIZE; i++)
84 		TAILQ_INIT(&slpque[i]);
85 }
86 
87 /*
88  * Global sleep channel for threads that do not want to
89  * receive wakeup(9) broadcasts.
90  */
91 int nowake;
92 
93 /*
94  * During autoconfiguration or after a panic, a sleep will simply
95  * lower the priority briefly to allow interrupts, then return.
96  * The priority to be used (safepri) is machine-dependent, thus this
97  * value is initialized and maintained in the machine-dependent layers.
98  * This priority will typically be 0, or the lowest priority
99  * that is safe for use on the interrupt stack; it can be made
100  * higher to block network software interrupts after panics.
101  */
102 extern int safepri;
103 
104 /*
105  * General sleep call.  Suspends the current process until a wakeup is
106  * performed on the specified identifier.  The process will then be made
107  * runnable with the specified priority.  Sleeps at most timo/hz seconds
108  * (0 means no timeout).  If pri includes PCATCH flag, signals are checked
109  * before and after sleeping, else signals are not checked.  Returns 0 if
110  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
111  * signal needs to be delivered, ERESTART is returned if the current system
112  * call should be restarted if possible, and EINTR is returned if the system
113  * call should be interrupted by the signal (return EINTR).
114  */
115 int
tsleep(const volatile void * ident,int priority,const char * wmesg,int timo)116 tsleep(const volatile void *ident, int priority, const char *wmesg, int timo)
117 {
118 #ifdef MULTIPROCESSOR
119 	int hold_count;
120 #endif
121 
122 	KASSERT((priority & ~(PRIMASK | PCATCH)) == 0);
123 	KASSERT(ident != &nowake || ISSET(priority, PCATCH) || timo != 0);
124 
125 #ifdef MULTIPROCESSOR
126 	KASSERT(ident == &nowake || timo || _kernel_lock_held());
127 #endif
128 
129 #ifdef DDB
130 	if (cold == 2)
131 		db_stack_dump();
132 #endif
133 	if (cold || panicstr) {
134 		int s;
135 		/*
136 		 * After a panic, or during autoconfiguration,
137 		 * just give interrupts a chance, then just return;
138 		 * don't run any other procs or panic below,
139 		 * in case this is the idle process and already asleep.
140 		 */
141 		s = splhigh();
142 		splx(safepri);
143 #ifdef MULTIPROCESSOR
144 		if (_kernel_lock_held()) {
145 			hold_count = __mp_release_all(&kernel_lock);
146 			__mp_acquire_count(&kernel_lock, hold_count);
147 		}
148 #endif
149 		splx(s);
150 		return (0);
151 	}
152 
153 	sleep_setup(ident, priority, wmesg);
154 	return sleep_finish(timo, 1);
155 }
156 
157 int
tsleep_nsec(const volatile void * ident,int priority,const char * wmesg,uint64_t nsecs)158 tsleep_nsec(const volatile void *ident, int priority, const char *wmesg,
159     uint64_t nsecs)
160 {
161 	uint64_t to_ticks;
162 
163 	if (nsecs == INFSLP)
164 		return tsleep(ident, priority, wmesg, 0);
165 #ifdef DIAGNOSTIC
166 	if (nsecs == 0) {
167 		log(LOG_WARNING,
168 		    "%s: %s[%d]: %s: trying to sleep zero nanoseconds\n",
169 		    __func__, curproc->p_p->ps_comm, curproc->p_p->ps_pid,
170 		    wmesg);
171 	}
172 #endif
173 	/*
174 	 * We want to sleep at least nsecs nanoseconds worth of ticks.
175 	 *
176 	 *  - Clamp nsecs to prevent arithmetic overflow.
177 	 *
178 	 *  - Round nsecs up to account for any nanoseconds that do not
179 	 *    divide evenly into tick_nsec, otherwise we'll lose them to
180 	 *    integer division in the next step.  We add (tick_nsec - 1)
181 	 *    to keep from introducing a spurious tick if there are no
182 	 *    such nanoseconds, i.e. nsecs % tick_nsec == 0.
183 	 *
184 	 *  - Divide the rounded value to a count of ticks.  We divide
185 	 *    by (tick_nsec + 1) to discard the extra tick introduced if,
186 	 *    before rounding, nsecs % tick_nsec == 1.
187 	 *
188 	 *  - Finally, add a tick to the result.  We need to wait out
189 	 *    the current tick before we can begin counting our interval,
190 	 *    as we do not know how much time has elapsed since the
191 	 *    current tick began.
192 	 */
193 	nsecs = MIN(nsecs, UINT64_MAX - tick_nsec);
194 	to_ticks = (nsecs + tick_nsec - 1) / (tick_nsec + 1) + 1;
195 	if (to_ticks > INT_MAX)
196 		to_ticks = INT_MAX;
197 	return tsleep(ident, priority, wmesg, (int)to_ticks);
198 }
199 
200 /*
201  * Same as tsleep, but if we have a mutex provided, then once we've
202  * entered the sleep queue we drop the mutex. After sleeping we re-lock.
203  */
204 int
msleep(const volatile void * ident,struct mutex * mtx,int priority,const char * wmesg,int timo)205 msleep(const volatile void *ident, struct mutex *mtx, int priority,
206     const char *wmesg, int timo)
207 {
208 	int error, spl;
209 #ifdef MULTIPROCESSOR
210 	int hold_count;
211 #endif
212 
213 	KASSERT((priority & ~(PRIMASK | PCATCH | PNORELOCK)) == 0);
214 	KASSERT(ident != &nowake || ISSET(priority, PCATCH) || timo != 0);
215 	KASSERT(mtx != NULL);
216 
217 #ifdef DDB
218 	if (cold == 2)
219 		db_stack_dump();
220 #endif
221 	if (cold || panicstr) {
222 		/*
223 		 * After a panic, or during autoconfiguration,
224 		 * just give interrupts a chance, then just return;
225 		 * don't run any other procs or panic below,
226 		 * in case this is the idle process and already asleep.
227 		 */
228 		spl = MUTEX_OLDIPL(mtx);
229 		MUTEX_OLDIPL(mtx) = safepri;
230 		mtx_leave(mtx);
231 #ifdef MULTIPROCESSOR
232 		if (_kernel_lock_held()) {
233 			hold_count = __mp_release_all(&kernel_lock);
234 			__mp_acquire_count(&kernel_lock, hold_count);
235 		}
236 #endif
237 		if ((priority & PNORELOCK) == 0) {
238 			mtx_enter(mtx);
239 			MUTEX_OLDIPL(mtx) = spl;
240 		} else
241 			splx(spl);
242 		return (0);
243 	}
244 
245 	sleep_setup(ident, priority, wmesg);
246 
247 	mtx_leave(mtx);
248 	/* signal may stop the process, release mutex before that */
249 	error = sleep_finish(timo, 1);
250 
251 	if ((priority & PNORELOCK) == 0)
252 		mtx_enter(mtx);
253 
254 	return error;
255 }
256 
257 int
msleep_nsec(const volatile void * ident,struct mutex * mtx,int priority,const char * wmesg,uint64_t nsecs)258 msleep_nsec(const volatile void *ident, struct mutex *mtx, int priority,
259     const char *wmesg, uint64_t nsecs)
260 {
261 	uint64_t to_ticks;
262 
263 	if (nsecs == INFSLP)
264 		return msleep(ident, mtx, priority, wmesg, 0);
265 #ifdef DIAGNOSTIC
266 	if (nsecs == 0) {
267 		log(LOG_WARNING,
268 		    "%s: %s[%d]: %s: trying to sleep zero nanoseconds\n",
269 		    __func__, curproc->p_p->ps_comm, curproc->p_p->ps_pid,
270 		    wmesg);
271 	}
272 #endif
273 	nsecs = MIN(nsecs, UINT64_MAX - tick_nsec);
274 	to_ticks = (nsecs + tick_nsec - 1) / (tick_nsec + 1) + 1;
275 	if (to_ticks > INT_MAX)
276 		to_ticks = INT_MAX;
277 	return msleep(ident, mtx, priority, wmesg, (int)to_ticks);
278 }
279 
280 /*
281  * Same as tsleep, but if we have a rwlock provided, then once we've
282  * entered the sleep queue we drop the it. After sleeping we re-lock.
283  */
284 int
rwsleep(const volatile void * ident,struct rwlock * rwl,int priority,const char * wmesg,int timo)285 rwsleep(const volatile void *ident, struct rwlock *rwl, int priority,
286     const char *wmesg, int timo)
287 {
288 	int error, status;
289 
290 	KASSERT((priority & ~(PRIMASK | PCATCH | PNORELOCK)) == 0);
291 	KASSERT(ident != &nowake || ISSET(priority, PCATCH) || timo != 0);
292 	KASSERT(ident != rwl);
293 	rw_assert_anylock(rwl);
294 	status = rw_status(rwl);
295 
296 	sleep_setup(ident, priority, wmesg);
297 
298 	rw_exit(rwl);
299 	/* signal may stop the process, release rwlock before that */
300 	error = sleep_finish(timo, 1);
301 
302 	if ((priority & PNORELOCK) == 0)
303 		rw_enter(rwl, status);
304 
305 	return error;
306 }
307 
308 int
rwsleep_nsec(const volatile void * ident,struct rwlock * rwl,int priority,const char * wmesg,uint64_t nsecs)309 rwsleep_nsec(const volatile void *ident, struct rwlock *rwl, int priority,
310     const char *wmesg, uint64_t nsecs)
311 {
312 	uint64_t to_ticks;
313 
314 	if (nsecs == INFSLP)
315 		return rwsleep(ident, rwl, priority, wmesg, 0);
316 #ifdef DIAGNOSTIC
317 	if (nsecs == 0) {
318 		log(LOG_WARNING,
319 		    "%s: %s[%d]: %s: trying to sleep zero nanoseconds\n",
320 		    __func__, curproc->p_p->ps_comm, curproc->p_p->ps_pid,
321 		    wmesg);
322 	}
323 #endif
324 	nsecs = MIN(nsecs, UINT64_MAX - tick_nsec);
325 	to_ticks = (nsecs + tick_nsec - 1) / (tick_nsec + 1) + 1;
326 	if (to_ticks > INT_MAX)
327 		to_ticks = INT_MAX;
328 	return 	rwsleep(ident, rwl, priority, wmesg, (int)to_ticks);
329 }
330 
331 void
sleep_setup(const volatile void * ident,int prio,const char * wmesg)332 sleep_setup(const volatile void *ident, int prio, const char *wmesg)
333 {
334 	struct proc *p = curproc;
335 
336 #ifdef DIAGNOSTIC
337 	if (p->p_flag & P_CANTSLEEP)
338 		panic("sleep: %s failed insomnia", p->p_p->ps_comm);
339 	if (ident == NULL)
340 		panic("sleep: no ident");
341 	if (p->p_stat != SONPROC)
342 		panic("sleep: not SONPROC but %d", p->p_stat);
343 #endif
344 	/* exiting processes are not allowed to catch signals */
345 	if (p->p_flag & P_WEXIT)
346 		CLR(prio, PCATCH);
347 
348 	SCHED_LOCK();
349 
350 	TRACEPOINT(sched, sleep, NULL);
351 
352 	p->p_wchan = ident;
353 	p->p_wmesg = wmesg;
354 	p->p_slptime = 0;
355 	p->p_slppri = prio & PRIMASK;
356 	atomic_setbits_int(&p->p_flag, P_WSLEEP);
357 	TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_runq);
358 	if (prio & PCATCH)
359 		atomic_setbits_int(&p->p_flag, P_SINTR);
360 	p->p_stat = SSLEEP;
361 
362 	SCHED_UNLOCK();
363 }
364 
365 int
sleep_finish(int timo,int do_sleep)366 sleep_finish(int timo, int do_sleep)
367 {
368 	struct proc *p = curproc;
369 	int catch, error = 0, error1 = 0;
370 
371 	catch = p->p_flag & P_SINTR;
372 
373 	if (timo != 0) {
374 		KASSERT((p->p_flag & P_TIMEOUT) == 0);
375 		timeout_add(&p->p_sleep_to, timo);
376 	}
377 
378 	if (catch != 0) {
379 		if ((error = sleep_signal_check(p, 0)) != 0) {
380 			catch = 0;
381 			do_sleep = 0;
382 		}
383 	}
384 
385 	SCHED_LOCK();
386 	/*
387 	 * A few checks need to happen before going to sleep:
388 	 * - If the wakeup happens while going to sleep, p->p_wchan
389 	 * will be NULL. In that case unwind immediately but still
390 	 * check for possible signals and timeouts.
391 	 * - If the sleep is aborted call unsleep and take us of the
392 	 * sleep queue.
393 	 * - If requested to stop force a switch even if the sleep
394 	 * condition got cleared.
395 	 */
396 	if (p->p_wchan == NULL)
397 		do_sleep = 0;
398 	if (do_sleep == 0)
399 		unsleep(p);
400 	if (p->p_stat == SSTOP)
401 		do_sleep = 1;
402 	atomic_clearbits_int(&p->p_flag, P_WSLEEP);
403 
404 	if (do_sleep) {
405 		KASSERT(p->p_stat == SSLEEP || p->p_stat == SSTOP);
406 		p->p_ru.ru_nvcsw++;
407 		mi_switch();
408 	} else {
409 		KASSERT(p->p_stat == SONPROC || p->p_stat == SSLEEP);
410 		p->p_stat = SONPROC;
411 	}
412 
413 #ifdef DIAGNOSTIC
414 	if (p->p_stat != SONPROC)
415 		panic("sleep_finish !SONPROC");
416 #endif
417 
418 	p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri;
419 	SCHED_UNLOCK();
420 
421 	/*
422 	 * Even though this belongs to the signal handling part of sleep,
423 	 * we need to clear it before the ktrace.
424 	 */
425 	atomic_clearbits_int(&p->p_flag, P_SINTR);
426 
427 	if (timo != 0) {
428 		if (p->p_flag & P_TIMEOUT) {
429 			error1 = EWOULDBLOCK;
430 		} else {
431 			/* This can sleep. It must not use timeouts. */
432 			timeout_del_barrier(&p->p_sleep_to);
433 		}
434 		atomic_clearbits_int(&p->p_flag, P_TIMEOUT);
435 	}
436 
437 	/*
438 	 * Check if thread was woken up because of a unwind or signal
439 	 * but ignore any pending stop condition.
440 	 */
441 	if (catch != 0)
442 		error = sleep_signal_check(p, 1);
443 
444 	/* Signal errors are higher priority than timeouts. */
445 	if (error == 0 && error1 != 0)
446 		error = error1;
447 
448 	return error;
449 }
450 
451 /*
452  * Check and handle signals and suspensions around a sleep cycle.
453  * The 2nd call in sleep_finish() sets after_sleep = 1. In this case
454  * any pending suspend event came in after the wakeup / unsleep and
455  * can therefor be ignored. Once the process hits userret the event
456  * will be picked up again.
457  */
458 int
sleep_signal_check(struct proc * p,int after_sleep)459 sleep_signal_check(struct proc *p, int after_sleep)
460 {
461 	struct sigctx ctx;
462 	int err, sig;
463 
464 	if ((err = single_thread_check(p, 1)) != 0) {
465 		if (err != EWOULDBLOCK)
466 			return err;
467 
468 		/* requested to stop */
469 		if (!after_sleep) {
470 			mtx_enter(&p->p_p->ps_mtx);
471 			if (--p->p_p->ps_singlecnt == 0)
472 				wakeup(&p->p_p->ps_singlecnt);
473 			mtx_leave(&p->p_p->ps_mtx);
474 
475 			SCHED_LOCK();
476 			p->p_stat = SSTOP;
477 			SCHED_UNLOCK();
478 		}
479 	}
480 
481 	if ((sig = cursig(p, &ctx, 1)) != 0) {
482 		if (ctx.sig_stop) {
483 			if (!after_sleep) {
484 				p->p_p->ps_xsig = sig;
485 				SCHED_LOCK();
486 				proc_stop(p, 0);
487 				SCHED_UNLOCK();
488 			}
489 		} else if (ctx.sig_intr && !ctx.sig_ignore)
490 			return EINTR;
491 		else
492 			return ERESTART;
493 	}
494 
495 	return 0;
496 }
497 
498 int
wakeup_proc(struct proc * p,int flags)499 wakeup_proc(struct proc *p, int flags)
500 {
501 	int awakened = 0;
502 
503 	SCHED_ASSERT_LOCKED();
504 
505 	if (p->p_wchan != NULL) {
506 		awakened = 1;
507 		if (flags)
508 			atomic_setbits_int(&p->p_flag, flags);
509 #ifdef DIAGNOSTIC
510 		if (p->p_stat != SSLEEP && p->p_stat != SSTOP)
511 			panic("thread %d p_stat is %d", p->p_tid, p->p_stat);
512 #endif
513 		unsleep(p);
514 		if (p->p_stat == SSLEEP)
515 			setrunnable(p);
516 	}
517 
518 	return awakened;
519 }
520 
521 
522 /*
523  * Implement timeout for tsleep.
524  * If process hasn't been awakened (wchan non-zero),
525  * set timeout flag and undo the sleep.  If proc
526  * is stopped, just unsleep so it will remain stopped.
527  */
528 void
endtsleep(void * arg)529 endtsleep(void *arg)
530 {
531 	struct proc *p = arg;
532 
533 	SCHED_LOCK();
534 	wakeup_proc(p, P_TIMEOUT);
535 	SCHED_UNLOCK();
536 }
537 
538 /*
539  * Remove a process from its wait queue
540  */
541 void
unsleep(struct proc * p)542 unsleep(struct proc *p)
543 {
544 	SCHED_ASSERT_LOCKED();
545 
546 	if (p->p_wchan != NULL) {
547 		TAILQ_REMOVE(&slpque[LOOKUP(p->p_wchan)], p, p_runq);
548 		p->p_wchan = NULL;
549 		p->p_wmesg = NULL;
550 		TRACEPOINT(sched, unsleep, p->p_tid + THREAD_PID_OFFSET,
551 		    p->p_p->ps_pid);
552 	}
553 }
554 
555 /*
556  * Make a number of processes sleeping on the specified identifier runnable.
557  */
558 void
wakeup_n(const volatile void * ident,int n)559 wakeup_n(const volatile void *ident, int n)
560 {
561 	struct slpque *qp, wakeq;
562 	struct proc *p;
563 	struct proc *pnext;
564 
565 	TAILQ_INIT(&wakeq);
566 
567 	SCHED_LOCK();
568 	qp = &slpque[LOOKUP(ident)];
569 	for (p = TAILQ_FIRST(qp); p != NULL && n != 0; p = pnext) {
570 		pnext = TAILQ_NEXT(p, p_runq);
571 #ifdef DIAGNOSTIC
572 		if (p->p_stat != SSLEEP && p->p_stat != SSTOP)
573 			panic("thread %d p_stat is %d", p->p_tid, p->p_stat);
574 #endif
575 		KASSERT(p->p_wchan != NULL);
576 		if (p->p_wchan == ident) {
577 			TAILQ_REMOVE(qp, p, p_runq);
578 			p->p_wchan = NULL;
579 			p->p_wmesg = NULL;
580 			TAILQ_INSERT_TAIL(&wakeq, p, p_runq);
581 			--n;
582 		}
583 	}
584 	while ((p = TAILQ_FIRST(&wakeq))) {
585 		TAILQ_REMOVE(&wakeq, p, p_runq);
586 		TRACEPOINT(sched, unsleep, p->p_tid + THREAD_PID_OFFSET,
587 		    p->p_p->ps_pid);
588 		if (p->p_stat == SSLEEP)
589 			setrunnable(p);
590 	}
591 	SCHED_UNLOCK();
592 }
593 
594 /*
595  * Make all processes sleeping on the specified identifier runnable.
596  */
597 void
wakeup(const volatile void * chan)598 wakeup(const volatile void *chan)
599 {
600 	wakeup_n(chan, -1);
601 }
602 
603 int
sys_sched_yield(struct proc * p,void * v,register_t * retval)604 sys_sched_yield(struct proc *p, void *v, register_t *retval)
605 {
606 	struct proc *q;
607 	uint8_t newprio;
608 
609 	/*
610 	 * If one of the threads of a multi-threaded process called
611 	 * sched_yield(2), drop its priority to ensure its siblings
612 	 * can make some progress.
613 	 */
614 	mtx_enter(&p->p_p->ps_mtx);
615 	newprio = p->p_usrpri;
616 	TAILQ_FOREACH(q, &p->p_p->ps_threads, p_thr_link)
617 		newprio = max(newprio, q->p_runpri);
618 	mtx_leave(&p->p_p->ps_mtx);
619 
620 	SCHED_LOCK();
621 	setrunqueue(p->p_cpu, p, newprio);
622 	p->p_ru.ru_nvcsw++;
623 	mi_switch();
624 	SCHED_UNLOCK();
625 
626 	return (0);
627 }
628 
629 static inline int
thrsleep_unlock(_atomic_lock_t * atomiclock)630 thrsleep_unlock(_atomic_lock_t *atomiclock)
631 {
632 	static _atomic_lock_t unlocked = _ATOMIC_LOCK_UNLOCKED;
633 
634 	if (atomiclock == NULL)
635 		return 0;
636 
637 	return copyout(&unlocked, atomiclock, sizeof(unlocked));
638 }
639 
640 struct tslpentry {
641 	TAILQ_ENTRY(tslpentry)	 tslp_link;
642 	struct process		*tslp_ps;
643 	long			 tslp_ident;
644 	struct proc *volatile	 tslp_p;
645 };
646 
647 struct tslp_bucket {
648 	struct tslpqueue	 tsb_list;
649 	struct mutex		 tsb_lock;
650 } __aligned(64);
651 
652 /* thrsleep queue shared between processes */
653 static struct tslp_bucket tsb_shared;
654 
655 #define TSLP_BUCKET_BITS	6
656 #define TSLP_BUCKET_SIZE	(1UL << TSLP_BUCKET_BITS)
657 #define TSLP_BUCKET_MASK	(TSLP_BUCKET_SIZE - 1)
658 
659 static struct tslp_bucket tsb_buckets[TSLP_BUCKET_SIZE];
660 
661 void
tslp_init(void)662 tslp_init(void)
663 {
664 	struct tslp_bucket *tsb;
665 	size_t i;
666 
667 	TAILQ_INIT(&tsb_shared.tsb_list);
668 	mtx_init(&tsb_shared.tsb_lock, IPL_MPFLOOR);
669 
670 	for (i = 0; i < nitems(tsb_buckets); i++) {
671 		tsb = &tsb_buckets[i];
672 
673 		TAILQ_INIT(&tsb->tsb_list);
674 		mtx_init(&tsb->tsb_lock, IPL_MPFLOOR);
675 	}
676 }
677 
678 static struct tslp_bucket *
thrsleep_bucket(long ident)679 thrsleep_bucket(long ident)
680 {
681 	ident >>= 3;
682 	ident ^= ident >> TSLP_BUCKET_BITS;
683 	ident &= TSLP_BUCKET_MASK;
684 
685 	return &tsb_buckets[ident];
686 }
687 
688 static int
thrsleep(struct proc * p,struct sys___thrsleep_args * v)689 thrsleep(struct proc *p, struct sys___thrsleep_args *v)
690 {
691 	struct sys___thrsleep_args /* {
692 		syscallarg(const volatile void *) ident;
693 		syscallarg(clockid_t) clock_id;
694 		syscallarg(const struct timespec *) tp;
695 		syscallarg(void *) lock;
696 		syscallarg(const int *) abort;
697 	} */ *uap = v;
698 	long ident = (long)SCARG(uap, ident);
699 	struct tslpentry entry;
700 	struct tslp_bucket *tsb;
701 	struct timespec *tsp = (struct timespec *)SCARG(uap, tp);
702 	void *lock = SCARG(uap, lock);
703 	const uint32_t *abortp = SCARG(uap, abort);
704 	clockid_t clock_id = SCARG(uap, clock_id);
705 	uint64_t to_ticks = 0;
706 	int error = 0;
707 
708 	if (ident == 0)
709 		return (EINVAL);
710 	if (tsp != NULL) {
711 		struct timespec now;
712 		uint64_t nsecs;
713 
714 		if ((error = clock_gettime(p, clock_id, &now)))
715 			return (error);
716 #ifdef KTRACE
717 		if (KTRPOINT(p, KTR_STRUCT))
718 			ktrabstimespec(p, tsp);
719 #endif
720 
721 		if (timespeccmp(tsp, &now, <=)) {
722 			/* already passed: still do the unlock */
723 			if ((error = thrsleep_unlock(lock)))
724 				return (error);
725 			return (EWOULDBLOCK);
726 		}
727 
728 		timespecsub(tsp, &now, tsp);
729 		nsecs = MIN(TIMESPEC_TO_NSEC(tsp), MAXTSLP);
730 		to_ticks = (nsecs + tick_nsec - 1) / (tick_nsec + 1) + 1;
731 		if (to_ticks > INT_MAX)
732 			to_ticks = INT_MAX;
733 	}
734 
735 	tsb = (ident == -1) ? &tsb_shared : thrsleep_bucket(ident);
736 
737 	/* Interlock with wakeup. */
738 	entry.tslp_ps = p->p_p;
739 	entry.tslp_ident = ident;
740 	entry.tslp_p = p;
741 
742 	mtx_enter(&tsb->tsb_lock);
743 	TAILQ_INSERT_TAIL(&tsb->tsb_list, &entry, tslp_link);
744 	mtx_leave(&tsb->tsb_lock);
745 
746 	error = thrsleep_unlock(lock);
747 	if (error != 0)
748 		goto leave;
749 
750 	if (abortp != NULL) {
751 		uint32_t abort;
752 		error = copyin32(abortp, &abort);
753 		if (error != 0)
754 			goto leave;
755 		if (abort) {
756 			error = EINTR;
757 			goto leave;
758 		}
759 	}
760 
761 	sleep_setup(&entry, PWAIT|PCATCH, "thrsleep");
762 	error = sleep_finish(to_ticks, entry.tslp_p != NULL);
763 	if (error != 0 || entry.tslp_p != NULL) {
764 		mtx_enter(&tsb->tsb_lock);
765 		if (entry.tslp_p != NULL)
766 			TAILQ_REMOVE(&tsb->tsb_list, &entry, tslp_link);
767 		else
768 			error = 0;
769 		mtx_leave(&tsb->tsb_lock);
770 
771 		if (error == ERESTART)
772 			error = ECANCELED;
773 	}
774 
775 	return (error);
776 
777 leave:
778 	if (entry.tslp_p != NULL) {
779 		mtx_enter(&tsb->tsb_lock);
780 		if (entry.tslp_p != NULL)
781 			TAILQ_REMOVE(&tsb->tsb_list, &entry, tslp_link);
782 		mtx_leave(&tsb->tsb_lock);
783 	}
784 
785 	return (error);
786 }
787 
788 int
sys___thrsleep(struct proc * p,void * v,register_t * retval)789 sys___thrsleep(struct proc *p, void *v, register_t *retval)
790 {
791 	struct sys___thrsleep_args /* {
792 		syscallarg(const volatile void *) ident;
793 		syscallarg(clockid_t) clock_id;
794 		syscallarg(struct timespec *) tp;
795 		syscallarg(void *) lock;
796 		syscallarg(const int *) abort;
797 	} */ *uap = v;
798 	struct timespec ts;
799 	int error;
800 
801 	if (SCARG(uap, tp) != NULL) {
802 		if ((error = copyin(SCARG(uap, tp), &ts, sizeof(ts)))) {
803 			*retval = error;
804 			return 0;
805 		}
806 		if (!timespecisvalid(&ts)) {
807 			*retval = EINVAL;
808 			return 0;
809 		}
810 		SCARG(uap, tp) = &ts;
811 	}
812 
813 	*retval = thrsleep(p, uap);
814 	return 0;
815 }
816 
817 static void
tslp_wakeups(struct tslpqueue * tslpq)818 tslp_wakeups(struct tslpqueue *tslpq)
819 {
820 	struct tslpentry *entry, *nentry;
821 	struct proc *p;
822 
823 	SCHED_LOCK();
824 	TAILQ_FOREACH_SAFE(entry, tslpq, tslp_link, nentry) {
825 		p = entry->tslp_p;
826 		entry->tslp_p = NULL;
827 		wakeup_proc(p, 0);
828 	}
829 	SCHED_UNLOCK();
830 }
831 
832 int
sys___thrwakeup(struct proc * p,void * v,register_t * retval)833 sys___thrwakeup(struct proc *p, void *v, register_t *retval)
834 {
835 	struct sys___thrwakeup_args /* {
836 		syscallarg(const volatile void *) ident;
837 		syscallarg(int) n;
838 	} */ *uap = v;
839 	struct tslpentry *entry, *nentry;
840 	struct tslp_bucket *tsb;
841 	long ident = (long)SCARG(uap, ident);
842 	int n = SCARG(uap, n);
843 	int found = 0;
844 	struct tslpqueue wq = TAILQ_HEAD_INITIALIZER(wq);
845 
846 	if (ident == 0) {
847 		*retval = EINVAL;
848 		return (0);
849 	}
850 
851 	if (ident == -1) {
852 		/*
853 		 * Wake up all waiters with ident -1. This is needed
854 		 * because ident -1 can be shared by multiple userspace
855 		 * lock state machines concurrently. The implementation
856 		 * has no way to direct the wakeup to a particular
857 		 * state machine.
858 		 */
859 		mtx_enter(&tsb_shared.tsb_lock);
860 		tslp_wakeups(&tsb_shared.tsb_list);
861 		TAILQ_INIT(&tsb_shared.tsb_list);
862 		mtx_leave(&tsb_shared.tsb_lock);
863 
864 		*retval = 0;
865 		return (0);
866 	}
867 
868 	tsb = thrsleep_bucket(ident);
869 
870 	mtx_enter(&tsb->tsb_lock);
871 	TAILQ_FOREACH_SAFE(entry, &tsb->tsb_list, tslp_link, nentry) {
872 		if (entry->tslp_ident == ident && entry->tslp_ps == p->p_p) {
873 			TAILQ_REMOVE(&tsb->tsb_list, entry, tslp_link);
874 			TAILQ_INSERT_TAIL(&wq, entry, tslp_link);
875 
876 			if (++found == n)
877 				break;
878 		}
879 	}
880 
881 	if (found)
882 		tslp_wakeups(&wq);
883 	mtx_leave(&tsb->tsb_lock);
884 
885 	*retval = found ? 0 : ESRCH;
886 	return (0);
887 }
888 
889 void
refcnt_init(struct refcnt * r)890 refcnt_init(struct refcnt *r)
891 {
892 	refcnt_init_trace(r, 0);
893 }
894 
895 void
refcnt_init_trace(struct refcnt * r,int idx)896 refcnt_init_trace(struct refcnt *r, int idx)
897 {
898 	r->r_traceidx = idx;
899 	atomic_store_int(&r->r_refs, 1);
900 	TRACEINDEX(refcnt, r->r_traceidx, r, 0, +1);
901 }
902 
903 void
refcnt_take(struct refcnt * r)904 refcnt_take(struct refcnt *r)
905 {
906 	u_int refs;
907 
908 	refs = atomic_inc_int_nv(&r->r_refs);
909 	KASSERT(refs != 0);
910 	TRACEINDEX(refcnt, r->r_traceidx, r, refs - 1, +1);
911 	(void)refs;
912 }
913 
914 int
refcnt_rele(struct refcnt * r)915 refcnt_rele(struct refcnt *r)
916 {
917 	u_int refs;
918 
919 	membar_exit_before_atomic();
920 	refs = atomic_dec_int_nv(&r->r_refs);
921 	KASSERT(refs != ~0);
922 	TRACEINDEX(refcnt, r->r_traceidx, r, refs + 1, -1);
923 	if (refs == 0) {
924 		membar_enter_after_atomic();
925 		return (1);
926 	}
927 	return (0);
928 }
929 
930 void
refcnt_rele_wake(struct refcnt * r)931 refcnt_rele_wake(struct refcnt *r)
932 {
933 	if (refcnt_rele(r))
934 		wakeup_one(r);
935 }
936 
937 void
refcnt_finalize(struct refcnt * r,const char * wmesg)938 refcnt_finalize(struct refcnt *r, const char *wmesg)
939 {
940 	u_int refs;
941 
942 	membar_exit_before_atomic();
943 	refs = atomic_dec_int_nv(&r->r_refs);
944 	KASSERT(refs != ~0);
945 	TRACEINDEX(refcnt, r->r_traceidx, r, refs + 1, -1);
946 	while (refs) {
947 		sleep_setup(r, PWAIT, wmesg);
948 		refs = atomic_load_int(&r->r_refs);
949 		sleep_finish(0, refs);
950 	}
951 	TRACEINDEX(refcnt, r->r_traceidx, r, refs, 0);
952 	/* Order subsequent loads and stores after refs == 0 load. */
953 	membar_sync();
954 }
955 
956 int
refcnt_shared(struct refcnt * r)957 refcnt_shared(struct refcnt *r)
958 {
959 	u_int refs;
960 
961 	refs = atomic_load_int(&r->r_refs);
962 	TRACEINDEX(refcnt, r->r_traceidx, r, refs, 0);
963 	return (refs > 1);
964 }
965 
966 unsigned int
refcnt_read(struct refcnt * r)967 refcnt_read(struct refcnt *r)
968 {
969 	u_int refs;
970 
971 	refs = atomic_load_int(&r->r_refs);
972 	TRACEINDEX(refcnt, r->r_traceidx, r, refs, 0);
973 	return (refs);
974 }
975 
976 void
cond_init(struct cond * c)977 cond_init(struct cond *c)
978 {
979 	atomic_store_int(&c->c_wait, 1);
980 }
981 
982 void
cond_signal(struct cond * c)983 cond_signal(struct cond *c)
984 {
985 	atomic_store_int(&c->c_wait, 0);
986 
987 	wakeup_one(c);
988 }
989 
990 void
cond_wait(struct cond * c,const char * wmesg)991 cond_wait(struct cond *c, const char *wmesg)
992 {
993 	unsigned int wait;
994 
995 	wait = atomic_load_int(&c->c_wait);
996 	while (wait) {
997 		sleep_setup(c, PWAIT, wmesg);
998 		wait = atomic_load_int(&c->c_wait);
999 		sleep_finish(0, wait);
1000 	}
1001 }
1002