1 /* $OpenBSD: kern_synch.c,v 1.202 2024/04/18 08:59:38 claudio Exp $ */
2 /* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
3
4 /*
5 * Copyright (c) 1982, 1986, 1990, 1991, 1993
6 * The Regents of the University of California. All rights reserved.
7 * (c) UNIX System Laboratories, Inc.
8 * All or some portions of this file are derived from material licensed
9 * to the University of California by American Telephone and Telegraph
10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11 * the permission of UNIX System Laboratories, Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * @(#)kern_synch.c 8.6 (Berkeley) 1/21/94
38 */
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/proc.h>
43 #include <sys/kernel.h>
44 #include <sys/signalvar.h>
45 #include <sys/sched.h>
46 #include <sys/timeout.h>
47 #include <sys/mount.h>
48 #include <sys/syscallargs.h>
49 #include <sys/refcnt.h>
50 #include <sys/atomic.h>
51 #include <sys/tracepoint.h>
52
53 #include <ddb/db_output.h>
54
55 #include <machine/spinlock.h>
56
57 #ifdef DIAGNOSTIC
58 #include <sys/syslog.h>
59 #endif
60
61 #ifdef KTRACE
62 #include <sys/ktrace.h>
63 #endif
64
65 int sleep_signal_check(void);
66 int thrsleep(struct proc *, struct sys___thrsleep_args *);
67 int thrsleep_unlock(void *);
68
69 /*
70 * We're only looking at 7 bits of the address; everything is
71 * aligned to 4, lots of things are aligned to greater powers
72 * of 2. Shift right by 8, i.e. drop the bottom 256 worth.
73 */
74 #define TABLESIZE 128
75 #define LOOKUP(x) (((long)(x) >> 8) & (TABLESIZE - 1))
TAILQ_HEAD(slpque,proc)76 TAILQ_HEAD(slpque,proc) slpque[TABLESIZE];
77
78 void
79 sleep_queue_init(void)
80 {
81 int i;
82
83 for (i = 0; i < TABLESIZE; i++)
84 TAILQ_INIT(&slpque[i]);
85 }
86
87 /*
88 * Global sleep channel for threads that do not want to
89 * receive wakeup(9) broadcasts.
90 */
91 int nowake;
92
93 /*
94 * During autoconfiguration or after a panic, a sleep will simply
95 * lower the priority briefly to allow interrupts, then return.
96 * The priority to be used (safepri) is machine-dependent, thus this
97 * value is initialized and maintained in the machine-dependent layers.
98 * This priority will typically be 0, or the lowest priority
99 * that is safe for use on the interrupt stack; it can be made
100 * higher to block network software interrupts after panics.
101 */
102 extern int safepri;
103
104 /*
105 * General sleep call. Suspends the current process until a wakeup is
106 * performed on the specified identifier. The process will then be made
107 * runnable with the specified priority. Sleeps at most timo/hz seconds
108 * (0 means no timeout). If pri includes PCATCH flag, signals are checked
109 * before and after sleeping, else signals are not checked. Returns 0 if
110 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
111 * signal needs to be delivered, ERESTART is returned if the current system
112 * call should be restarted if possible, and EINTR is returned if the system
113 * call should be interrupted by the signal (return EINTR).
114 */
115 int
tsleep(const volatile void * ident,int priority,const char * wmesg,int timo)116 tsleep(const volatile void *ident, int priority, const char *wmesg, int timo)
117 {
118 #ifdef MULTIPROCESSOR
119 int hold_count;
120 #endif
121
122 KASSERT((priority & ~(PRIMASK | PCATCH)) == 0);
123 KASSERT(ident != &nowake || ISSET(priority, PCATCH) || timo != 0);
124
125 #ifdef MULTIPROCESSOR
126 KASSERT(ident == &nowake || timo || _kernel_lock_held());
127 #endif
128
129 #ifdef DDB
130 if (cold == 2)
131 db_stack_dump();
132 #endif
133 if (cold || panicstr) {
134 int s;
135 /*
136 * After a panic, or during autoconfiguration,
137 * just give interrupts a chance, then just return;
138 * don't run any other procs or panic below,
139 * in case this is the idle process and already asleep.
140 */
141 s = splhigh();
142 splx(safepri);
143 #ifdef MULTIPROCESSOR
144 if (_kernel_lock_held()) {
145 hold_count = __mp_release_all(&kernel_lock);
146 __mp_acquire_count(&kernel_lock, hold_count);
147 }
148 #endif
149 splx(s);
150 return (0);
151 }
152
153 sleep_setup(ident, priority, wmesg);
154 return sleep_finish(timo, 1);
155 }
156
157 int
tsleep_nsec(const volatile void * ident,int priority,const char * wmesg,uint64_t nsecs)158 tsleep_nsec(const volatile void *ident, int priority, const char *wmesg,
159 uint64_t nsecs)
160 {
161 uint64_t to_ticks;
162
163 if (nsecs == INFSLP)
164 return tsleep(ident, priority, wmesg, 0);
165 #ifdef DIAGNOSTIC
166 if (nsecs == 0) {
167 log(LOG_WARNING,
168 "%s: %s[%d]: %s: trying to sleep zero nanoseconds\n",
169 __func__, curproc->p_p->ps_comm, curproc->p_p->ps_pid,
170 wmesg);
171 }
172 #endif
173 /*
174 * We want to sleep at least nsecs nanoseconds worth of ticks.
175 *
176 * - Clamp nsecs to prevent arithmetic overflow.
177 *
178 * - Round nsecs up to account for any nanoseconds that do not
179 * divide evenly into tick_nsec, otherwise we'll lose them to
180 * integer division in the next step. We add (tick_nsec - 1)
181 * to keep from introducing a spurious tick if there are no
182 * such nanoseconds, i.e. nsecs % tick_nsec == 0.
183 *
184 * - Divide the rounded value to a count of ticks. We divide
185 * by (tick_nsec + 1) to discard the extra tick introduced if,
186 * before rounding, nsecs % tick_nsec == 1.
187 *
188 * - Finally, add a tick to the result. We need to wait out
189 * the current tick before we can begin counting our interval,
190 * as we do not know how much time has elapsed since the
191 * current tick began.
192 */
193 nsecs = MIN(nsecs, UINT64_MAX - tick_nsec);
194 to_ticks = (nsecs + tick_nsec - 1) / (tick_nsec + 1) + 1;
195 if (to_ticks > INT_MAX)
196 to_ticks = INT_MAX;
197 return tsleep(ident, priority, wmesg, (int)to_ticks);
198 }
199
200 /*
201 * Same as tsleep, but if we have a mutex provided, then once we've
202 * entered the sleep queue we drop the mutex. After sleeping we re-lock.
203 */
204 int
msleep(const volatile void * ident,struct mutex * mtx,int priority,const char * wmesg,int timo)205 msleep(const volatile void *ident, struct mutex *mtx, int priority,
206 const char *wmesg, int timo)
207 {
208 int error, spl;
209 #ifdef MULTIPROCESSOR
210 int hold_count;
211 #endif
212
213 KASSERT((priority & ~(PRIMASK | PCATCH | PNORELOCK)) == 0);
214 KASSERT(ident != &nowake || ISSET(priority, PCATCH) || timo != 0);
215 KASSERT(mtx != NULL);
216
217 #ifdef DDB
218 if (cold == 2)
219 db_stack_dump();
220 #endif
221 if (cold || panicstr) {
222 /*
223 * After a panic, or during autoconfiguration,
224 * just give interrupts a chance, then just return;
225 * don't run any other procs or panic below,
226 * in case this is the idle process and already asleep.
227 */
228 spl = MUTEX_OLDIPL(mtx);
229 MUTEX_OLDIPL(mtx) = safepri;
230 mtx_leave(mtx);
231 #ifdef MULTIPROCESSOR
232 if (_kernel_lock_held()) {
233 hold_count = __mp_release_all(&kernel_lock);
234 __mp_acquire_count(&kernel_lock, hold_count);
235 }
236 #endif
237 if ((priority & PNORELOCK) == 0) {
238 mtx_enter(mtx);
239 MUTEX_OLDIPL(mtx) = spl;
240 } else
241 splx(spl);
242 return (0);
243 }
244
245 sleep_setup(ident, priority, wmesg);
246
247 mtx_leave(mtx);
248 /* signal may stop the process, release mutex before that */
249 error = sleep_finish(timo, 1);
250
251 if ((priority & PNORELOCK) == 0)
252 mtx_enter(mtx);
253
254 return error;
255 }
256
257 int
msleep_nsec(const volatile void * ident,struct mutex * mtx,int priority,const char * wmesg,uint64_t nsecs)258 msleep_nsec(const volatile void *ident, struct mutex *mtx, int priority,
259 const char *wmesg, uint64_t nsecs)
260 {
261 uint64_t to_ticks;
262
263 if (nsecs == INFSLP)
264 return msleep(ident, mtx, priority, wmesg, 0);
265 #ifdef DIAGNOSTIC
266 if (nsecs == 0) {
267 log(LOG_WARNING,
268 "%s: %s[%d]: %s: trying to sleep zero nanoseconds\n",
269 __func__, curproc->p_p->ps_comm, curproc->p_p->ps_pid,
270 wmesg);
271 }
272 #endif
273 nsecs = MIN(nsecs, UINT64_MAX - tick_nsec);
274 to_ticks = (nsecs + tick_nsec - 1) / (tick_nsec + 1) + 1;
275 if (to_ticks > INT_MAX)
276 to_ticks = INT_MAX;
277 return msleep(ident, mtx, priority, wmesg, (int)to_ticks);
278 }
279
280 /*
281 * Same as tsleep, but if we have a rwlock provided, then once we've
282 * entered the sleep queue we drop the it. After sleeping we re-lock.
283 */
284 int
rwsleep(const volatile void * ident,struct rwlock * rwl,int priority,const char * wmesg,int timo)285 rwsleep(const volatile void *ident, struct rwlock *rwl, int priority,
286 const char *wmesg, int timo)
287 {
288 int error, status;
289
290 KASSERT((priority & ~(PRIMASK | PCATCH | PNORELOCK)) == 0);
291 KASSERT(ident != &nowake || ISSET(priority, PCATCH) || timo != 0);
292 KASSERT(ident != rwl);
293 rw_assert_anylock(rwl);
294 status = rw_status(rwl);
295
296 sleep_setup(ident, priority, wmesg);
297
298 rw_exit(rwl);
299 /* signal may stop the process, release rwlock before that */
300 error = sleep_finish(timo, 1);
301
302 if ((priority & PNORELOCK) == 0)
303 rw_enter(rwl, status);
304
305 return error;
306 }
307
308 int
rwsleep_nsec(const volatile void * ident,struct rwlock * rwl,int priority,const char * wmesg,uint64_t nsecs)309 rwsleep_nsec(const volatile void *ident, struct rwlock *rwl, int priority,
310 const char *wmesg, uint64_t nsecs)
311 {
312 uint64_t to_ticks;
313
314 if (nsecs == INFSLP)
315 return rwsleep(ident, rwl, priority, wmesg, 0);
316 #ifdef DIAGNOSTIC
317 if (nsecs == 0) {
318 log(LOG_WARNING,
319 "%s: %s[%d]: %s: trying to sleep zero nanoseconds\n",
320 __func__, curproc->p_p->ps_comm, curproc->p_p->ps_pid,
321 wmesg);
322 }
323 #endif
324 nsecs = MIN(nsecs, UINT64_MAX - tick_nsec);
325 to_ticks = (nsecs + tick_nsec - 1) / (tick_nsec + 1) + 1;
326 if (to_ticks > INT_MAX)
327 to_ticks = INT_MAX;
328 return rwsleep(ident, rwl, priority, wmesg, (int)to_ticks);
329 }
330
331 void
sleep_setup(const volatile void * ident,int prio,const char * wmesg)332 sleep_setup(const volatile void *ident, int prio, const char *wmesg)
333 {
334 struct proc *p = curproc;
335 int s;
336
337 #ifdef DIAGNOSTIC
338 if (p->p_flag & P_CANTSLEEP)
339 panic("sleep: %s failed insomnia", p->p_p->ps_comm);
340 if (ident == NULL)
341 panic("tsleep: no ident");
342 if (p->p_stat != SONPROC)
343 panic("tsleep: not SONPROC");
344 #endif
345 /* exiting processes are not allowed to catch signals */
346 if (p->p_flag & P_WEXIT)
347 CLR(prio, PCATCH);
348
349 SCHED_LOCK(s);
350
351 TRACEPOINT(sched, sleep, NULL);
352
353 p->p_wchan = ident;
354 p->p_wmesg = wmesg;
355 p->p_slptime = 0;
356 p->p_slppri = prio & PRIMASK;
357 atomic_setbits_int(&p->p_flag, P_WSLEEP);
358 TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_runq);
359 if (prio & PCATCH)
360 atomic_setbits_int(&p->p_flag, P_SINTR);
361 p->p_stat = SSLEEP;
362
363 SCHED_UNLOCK(s);
364 }
365
366 int
sleep_finish(int timo,int do_sleep)367 sleep_finish(int timo, int do_sleep)
368 {
369 struct proc *p = curproc;
370 int s, catch, error = 0, error1 = 0;
371
372 catch = p->p_flag & P_SINTR;
373
374 if (timo != 0) {
375 KASSERT((p->p_flag & P_TIMEOUT) == 0);
376 timeout_add(&p->p_sleep_to, timo);
377 }
378
379 if (catch != 0) {
380 /*
381 * We put ourselves on the sleep queue and start our
382 * timeout before calling sleep_signal_check(), as we could
383 * stop there, and a wakeup or a SIGCONT (or both) could
384 * occur while we were stopped. A SIGCONT would cause
385 * us to be marked as SSLEEP without resuming us, thus
386 * we must be ready for sleep when sleep_signal_check() is
387 * called.
388 */
389 if ((error = sleep_signal_check()) != 0) {
390 catch = 0;
391 do_sleep = 0;
392 }
393 }
394
395 SCHED_LOCK(s);
396 /*
397 * If the wakeup happens while going to sleep, p->p_wchan
398 * will be NULL. In that case unwind immediately but still
399 * check for possible signals and timeouts.
400 */
401 if (p->p_wchan == NULL)
402 do_sleep = 0;
403 atomic_clearbits_int(&p->p_flag, P_WSLEEP);
404
405 if (do_sleep) {
406 KASSERT(p->p_stat == SSLEEP || p->p_stat == SSTOP);
407 p->p_ru.ru_nvcsw++;
408 mi_switch();
409 } else {
410 KASSERT(p->p_stat == SONPROC || p->p_stat == SSLEEP ||
411 p->p_stat == SSTOP);
412 unsleep(p);
413 p->p_stat = SONPROC;
414 }
415
416 #ifdef DIAGNOSTIC
417 if (p->p_stat != SONPROC)
418 panic("sleep_finish !SONPROC");
419 #endif
420
421 p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri;
422 SCHED_UNLOCK(s);
423
424 /*
425 * Even though this belongs to the signal handling part of sleep,
426 * we need to clear it before the ktrace.
427 */
428 atomic_clearbits_int(&p->p_flag, P_SINTR);
429
430 if (timo != 0) {
431 if (p->p_flag & P_TIMEOUT) {
432 error1 = EWOULDBLOCK;
433 } else {
434 /* This can sleep. It must not use timeouts. */
435 timeout_del_barrier(&p->p_sleep_to);
436 }
437 atomic_clearbits_int(&p->p_flag, P_TIMEOUT);
438 }
439
440 /* Check if thread was woken up because of a unwind or signal */
441 if (catch != 0)
442 error = sleep_signal_check();
443
444 /* Signal errors are higher priority than timeouts. */
445 if (error == 0 && error1 != 0)
446 error = error1;
447
448 return error;
449 }
450
451 /*
452 * Check and handle signals and suspensions around a sleep cycle.
453 */
454 int
sleep_signal_check(void)455 sleep_signal_check(void)
456 {
457 struct proc *p = curproc;
458 struct sigctx ctx;
459 int err, sig;
460
461 if ((err = single_thread_check(p, 1)) != 0)
462 return err;
463 if ((sig = cursig(p, &ctx)) != 0) {
464 if (ctx.sig_intr)
465 return EINTR;
466 else
467 return ERESTART;
468 }
469 return 0;
470 }
471
472 int
wakeup_proc(struct proc * p,int flags)473 wakeup_proc(struct proc *p, int flags)
474 {
475 int awakened = 0;
476
477 SCHED_ASSERT_LOCKED();
478
479 if (p->p_wchan != NULL) {
480 awakened = 1;
481 if (flags)
482 atomic_setbits_int(&p->p_flag, flags);
483 #ifdef DIAGNOSTIC
484 if (p->p_stat != SSLEEP && p->p_stat != SSTOP)
485 panic("thread %d p_stat is %d", p->p_tid, p->p_stat);
486 #endif
487 unsleep(p);
488 if (p->p_stat == SSLEEP)
489 setrunnable(p);
490 }
491
492 return awakened;
493 }
494
495
496 /*
497 * Implement timeout for tsleep.
498 * If process hasn't been awakened (wchan non-zero),
499 * set timeout flag and undo the sleep. If proc
500 * is stopped, just unsleep so it will remain stopped.
501 */
502 void
endtsleep(void * arg)503 endtsleep(void *arg)
504 {
505 struct proc *p = arg;
506 int s;
507
508 SCHED_LOCK(s);
509 wakeup_proc(p, P_TIMEOUT);
510 SCHED_UNLOCK(s);
511 }
512
513 /*
514 * Remove a process from its wait queue
515 */
516 void
unsleep(struct proc * p)517 unsleep(struct proc *p)
518 {
519 SCHED_ASSERT_LOCKED();
520
521 if (p->p_wchan != NULL) {
522 TAILQ_REMOVE(&slpque[LOOKUP(p->p_wchan)], p, p_runq);
523 p->p_wchan = NULL;
524 TRACEPOINT(sched, unsleep, p->p_tid + THREAD_PID_OFFSET,
525 p->p_p->ps_pid);
526 }
527 }
528
529 /*
530 * Make a number of processes sleeping on the specified identifier runnable.
531 */
532 void
wakeup_n(const volatile void * ident,int n)533 wakeup_n(const volatile void *ident, int n)
534 {
535 struct slpque *qp, wakeq;
536 struct proc *p;
537 struct proc *pnext;
538 int s;
539
540 TAILQ_INIT(&wakeq);
541
542 SCHED_LOCK(s);
543 qp = &slpque[LOOKUP(ident)];
544 for (p = TAILQ_FIRST(qp); p != NULL && n != 0; p = pnext) {
545 pnext = TAILQ_NEXT(p, p_runq);
546 #ifdef DIAGNOSTIC
547 if (p->p_stat != SSLEEP && p->p_stat != SSTOP)
548 panic("thread %d p_stat is %d", p->p_tid, p->p_stat);
549 #endif
550 KASSERT(p->p_wchan != NULL);
551 if (p->p_wchan == ident) {
552 TAILQ_REMOVE(qp, p, p_runq);
553 p->p_wchan = NULL;
554 TAILQ_INSERT_TAIL(&wakeq, p, p_runq);
555 --n;
556 }
557 }
558 while ((p = TAILQ_FIRST(&wakeq))) {
559 TAILQ_REMOVE(&wakeq, p, p_runq);
560 TRACEPOINT(sched, unsleep, p->p_tid + THREAD_PID_OFFSET,
561 p->p_p->ps_pid);
562 if (p->p_stat == SSLEEP)
563 setrunnable(p);
564 }
565 SCHED_UNLOCK(s);
566 }
567
568 /*
569 * Make all processes sleeping on the specified identifier runnable.
570 */
571 void
wakeup(const volatile void * chan)572 wakeup(const volatile void *chan)
573 {
574 wakeup_n(chan, -1);
575 }
576
577 int
sys_sched_yield(struct proc * p,void * v,register_t * retval)578 sys_sched_yield(struct proc *p, void *v, register_t *retval)
579 {
580 struct proc *q;
581 uint8_t newprio;
582 int s;
583
584 SCHED_LOCK(s);
585 /*
586 * If one of the threads of a multi-threaded process called
587 * sched_yield(2), drop its priority to ensure its siblings
588 * can make some progress.
589 */
590 newprio = p->p_usrpri;
591 TAILQ_FOREACH(q, &p->p_p->ps_threads, p_thr_link)
592 newprio = max(newprio, q->p_runpri);
593 setrunqueue(p->p_cpu, p, newprio);
594 p->p_ru.ru_nvcsw++;
595 mi_switch();
596 SCHED_UNLOCK(s);
597
598 return (0);
599 }
600
601 int
thrsleep_unlock(void * lock)602 thrsleep_unlock(void *lock)
603 {
604 static _atomic_lock_t unlocked = _ATOMIC_LOCK_UNLOCKED;
605 _atomic_lock_t *atomiclock = lock;
606
607 if (!lock)
608 return 0;
609
610 return copyout(&unlocked, atomiclock, sizeof(unlocked));
611 }
612
613 struct tslpentry {
614 TAILQ_ENTRY(tslpentry) tslp_link;
615 long tslp_ident;
616 };
617
618 /* thrsleep queue shared between processes */
619 static struct tslpqueue thrsleep_queue = TAILQ_HEAD_INITIALIZER(thrsleep_queue);
620 static struct rwlock thrsleep_lock = RWLOCK_INITIALIZER("thrsleeplk");
621
622 int
thrsleep(struct proc * p,struct sys___thrsleep_args * v)623 thrsleep(struct proc *p, struct sys___thrsleep_args *v)
624 {
625 struct sys___thrsleep_args /* {
626 syscallarg(const volatile void *) ident;
627 syscallarg(clockid_t) clock_id;
628 syscallarg(const struct timespec *) tp;
629 syscallarg(void *) lock;
630 syscallarg(const int *) abort;
631 } */ *uap = v;
632 long ident = (long)SCARG(uap, ident);
633 struct tslpentry entry;
634 struct tslpqueue *queue;
635 struct rwlock *qlock;
636 struct timespec *tsp = (struct timespec *)SCARG(uap, tp);
637 void *lock = SCARG(uap, lock);
638 uint64_t nsecs = INFSLP;
639 int abort = 0, error;
640 clockid_t clock_id = SCARG(uap, clock_id);
641
642 if (ident == 0)
643 return (EINVAL);
644 if (tsp != NULL) {
645 struct timespec now;
646
647 if ((error = clock_gettime(p, clock_id, &now)))
648 return (error);
649 #ifdef KTRACE
650 if (KTRPOINT(p, KTR_STRUCT))
651 ktrabstimespec(p, tsp);
652 #endif
653
654 if (timespeccmp(tsp, &now, <=)) {
655 /* already passed: still do the unlock */
656 if ((error = thrsleep_unlock(lock)))
657 return (error);
658 return (EWOULDBLOCK);
659 }
660
661 timespecsub(tsp, &now, tsp);
662 nsecs = MIN(TIMESPEC_TO_NSEC(tsp), MAXTSLP);
663 }
664
665 if (ident == -1) {
666 queue = &thrsleep_queue;
667 qlock = &thrsleep_lock;
668 } else {
669 queue = &p->p_p->ps_tslpqueue;
670 qlock = &p->p_p->ps_lock;
671 }
672
673 /* Interlock with wakeup. */
674 entry.tslp_ident = ident;
675 rw_enter_write(qlock);
676 TAILQ_INSERT_TAIL(queue, &entry, tslp_link);
677 rw_exit_write(qlock);
678
679 error = thrsleep_unlock(lock);
680
681 if (error == 0 && SCARG(uap, abort) != NULL)
682 error = copyin(SCARG(uap, abort), &abort, sizeof(abort));
683
684 rw_enter_write(qlock);
685 if (error != 0)
686 goto out;
687 if (abort != 0) {
688 error = EINTR;
689 goto out;
690 }
691 if (entry.tslp_ident != 0) {
692 error = rwsleep_nsec(&entry, qlock, PWAIT|PCATCH, "thrsleep",
693 nsecs);
694 }
695
696 out:
697 if (entry.tslp_ident != 0)
698 TAILQ_REMOVE(queue, &entry, tslp_link);
699 rw_exit_write(qlock);
700
701 if (error == ERESTART)
702 error = ECANCELED;
703
704 return (error);
705
706 }
707
708 int
sys___thrsleep(struct proc * p,void * v,register_t * retval)709 sys___thrsleep(struct proc *p, void *v, register_t *retval)
710 {
711 struct sys___thrsleep_args /* {
712 syscallarg(const volatile void *) ident;
713 syscallarg(clockid_t) clock_id;
714 syscallarg(struct timespec *) tp;
715 syscallarg(void *) lock;
716 syscallarg(const int *) abort;
717 } */ *uap = v;
718 struct timespec ts;
719 int error;
720
721 if (SCARG(uap, tp) != NULL) {
722 if ((error = copyin(SCARG(uap, tp), &ts, sizeof(ts)))) {
723 *retval = error;
724 return 0;
725 }
726 if (!timespecisvalid(&ts)) {
727 *retval = EINVAL;
728 return 0;
729 }
730 SCARG(uap, tp) = &ts;
731 }
732
733 *retval = thrsleep(p, uap);
734 return 0;
735 }
736
737 int
sys___thrwakeup(struct proc * p,void * v,register_t * retval)738 sys___thrwakeup(struct proc *p, void *v, register_t *retval)
739 {
740 struct sys___thrwakeup_args /* {
741 syscallarg(const volatile void *) ident;
742 syscallarg(int) n;
743 } */ *uap = v;
744 struct tslpentry *entry, *tmp;
745 struct tslpqueue *queue;
746 struct rwlock *qlock;
747 long ident = (long)SCARG(uap, ident);
748 int n = SCARG(uap, n);
749 int found = 0;
750
751 if (ident == 0)
752 *retval = EINVAL;
753 else {
754 if (ident == -1) {
755 queue = &thrsleep_queue;
756 qlock = &thrsleep_lock;
757 /*
758 * Wake up all waiters with ident -1. This is needed
759 * because ident -1 can be shared by multiple userspace
760 * lock state machines concurrently. The implementation
761 * has no way to direct the wakeup to a particular
762 * state machine.
763 */
764 n = 0;
765 } else {
766 queue = &p->p_p->ps_tslpqueue;
767 qlock = &p->p_p->ps_lock;
768 }
769
770 rw_enter_write(qlock);
771 TAILQ_FOREACH_SAFE(entry, queue, tslp_link, tmp) {
772 if (entry->tslp_ident == ident) {
773 TAILQ_REMOVE(queue, entry, tslp_link);
774 entry->tslp_ident = 0;
775 wakeup_one(entry);
776 if (++found == n)
777 break;
778 }
779 }
780 rw_exit_write(qlock);
781
782 if (ident == -1)
783 *retval = 0;
784 else
785 *retval = found ? 0 : ESRCH;
786 }
787
788 return (0);
789 }
790
791 void
refcnt_init(struct refcnt * r)792 refcnt_init(struct refcnt *r)
793 {
794 refcnt_init_trace(r, 0);
795 }
796
797 void
refcnt_init_trace(struct refcnt * r,int idx)798 refcnt_init_trace(struct refcnt *r, int idx)
799 {
800 r->r_traceidx = idx;
801 atomic_store_int(&r->r_refs, 1);
802 TRACEINDEX(refcnt, r->r_traceidx, r, 0, +1);
803 }
804
805 void
refcnt_take(struct refcnt * r)806 refcnt_take(struct refcnt *r)
807 {
808 u_int refs;
809
810 refs = atomic_inc_int_nv(&r->r_refs);
811 KASSERT(refs != 0);
812 TRACEINDEX(refcnt, r->r_traceidx, r, refs - 1, +1);
813 (void)refs;
814 }
815
816 int
refcnt_rele(struct refcnt * r)817 refcnt_rele(struct refcnt *r)
818 {
819 u_int refs;
820
821 membar_exit_before_atomic();
822 refs = atomic_dec_int_nv(&r->r_refs);
823 KASSERT(refs != ~0);
824 TRACEINDEX(refcnt, r->r_traceidx, r, refs + 1, -1);
825 if (refs == 0) {
826 membar_enter_after_atomic();
827 return (1);
828 }
829 return (0);
830 }
831
832 void
refcnt_rele_wake(struct refcnt * r)833 refcnt_rele_wake(struct refcnt *r)
834 {
835 if (refcnt_rele(r))
836 wakeup_one(r);
837 }
838
839 void
refcnt_finalize(struct refcnt * r,const char * wmesg)840 refcnt_finalize(struct refcnt *r, const char *wmesg)
841 {
842 u_int refs;
843
844 membar_exit_before_atomic();
845 refs = atomic_dec_int_nv(&r->r_refs);
846 KASSERT(refs != ~0);
847 TRACEINDEX(refcnt, r->r_traceidx, r, refs + 1, -1);
848 while (refs) {
849 sleep_setup(r, PWAIT, wmesg);
850 refs = atomic_load_int(&r->r_refs);
851 sleep_finish(0, refs);
852 }
853 TRACEINDEX(refcnt, r->r_traceidx, r, refs, 0);
854 /* Order subsequent loads and stores after refs == 0 load. */
855 membar_sync();
856 }
857
858 int
refcnt_shared(struct refcnt * r)859 refcnt_shared(struct refcnt *r)
860 {
861 u_int refs;
862
863 refs = atomic_load_int(&r->r_refs);
864 TRACEINDEX(refcnt, r->r_traceidx, r, refs, 0);
865 return (refs > 1);
866 }
867
868 unsigned int
refcnt_read(struct refcnt * r)869 refcnt_read(struct refcnt *r)
870 {
871 u_int refs;
872
873 refs = atomic_load_int(&r->r_refs);
874 TRACEINDEX(refcnt, r->r_traceidx, r, refs, 0);
875 return (refs);
876 }
877
878 void
cond_init(struct cond * c)879 cond_init(struct cond *c)
880 {
881 atomic_store_int(&c->c_wait, 1);
882 }
883
884 void
cond_signal(struct cond * c)885 cond_signal(struct cond *c)
886 {
887 atomic_store_int(&c->c_wait, 0);
888
889 wakeup_one(c);
890 }
891
892 void
cond_wait(struct cond * c,const char * wmesg)893 cond_wait(struct cond *c, const char *wmesg)
894 {
895 unsigned int wait;
896
897 wait = atomic_load_int(&c->c_wait);
898 while (wait) {
899 sleep_setup(c, PWAIT, wmesg);
900 wait = atomic_load_int(&c->c_wait);
901 sleep_finish(0, wait);
902 }
903 }
904