1 /* $OpenBSD: kern_synch.c,v 1.205 2024/06/03 12:48:25 claudio Exp $ */
2 /* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
3
4 /*
5 * Copyright (c) 1982, 1986, 1990, 1991, 1993
6 * The Regents of the University of California. All rights reserved.
7 * (c) UNIX System Laboratories, Inc.
8 * All or some portions of this file are derived from material licensed
9 * to the University of California by American Telephone and Telegraph
10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11 * the permission of UNIX System Laboratories, Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * @(#)kern_synch.c 8.6 (Berkeley) 1/21/94
38 */
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/proc.h>
43 #include <sys/kernel.h>
44 #include <sys/signalvar.h>
45 #include <sys/sched.h>
46 #include <sys/timeout.h>
47 #include <sys/mount.h>
48 #include <sys/syscallargs.h>
49 #include <sys/refcnt.h>
50 #include <sys/atomic.h>
51 #include <sys/tracepoint.h>
52
53 #include <ddb/db_output.h>
54
55 #include <machine/spinlock.h>
56
57 #ifdef DIAGNOSTIC
58 #include <sys/syslog.h>
59 #endif
60
61 #ifdef KTRACE
62 #include <sys/ktrace.h>
63 #endif
64
65 int sleep_signal_check(void);
66 int thrsleep(struct proc *, struct sys___thrsleep_args *);
67 int thrsleep_unlock(void *);
68
69 /*
70 * We're only looking at 7 bits of the address; everything is
71 * aligned to 4, lots of things are aligned to greater powers
72 * of 2. Shift right by 8, i.e. drop the bottom 256 worth.
73 */
74 #define TABLESIZE 128
75 #define LOOKUP(x) (((long)(x) >> 8) & (TABLESIZE - 1))
TAILQ_HEAD(slpque,proc)76 TAILQ_HEAD(slpque,proc) slpque[TABLESIZE];
77
78 void
79 sleep_queue_init(void)
80 {
81 int i;
82
83 for (i = 0; i < TABLESIZE; i++)
84 TAILQ_INIT(&slpque[i]);
85 }
86
87 /*
88 * Global sleep channel for threads that do not want to
89 * receive wakeup(9) broadcasts.
90 */
91 int nowake;
92
93 /*
94 * During autoconfiguration or after a panic, a sleep will simply
95 * lower the priority briefly to allow interrupts, then return.
96 * The priority to be used (safepri) is machine-dependent, thus this
97 * value is initialized and maintained in the machine-dependent layers.
98 * This priority will typically be 0, or the lowest priority
99 * that is safe for use on the interrupt stack; it can be made
100 * higher to block network software interrupts after panics.
101 */
102 extern int safepri;
103
104 /*
105 * General sleep call. Suspends the current process until a wakeup is
106 * performed on the specified identifier. The process will then be made
107 * runnable with the specified priority. Sleeps at most timo/hz seconds
108 * (0 means no timeout). If pri includes PCATCH flag, signals are checked
109 * before and after sleeping, else signals are not checked. Returns 0 if
110 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
111 * signal needs to be delivered, ERESTART is returned if the current system
112 * call should be restarted if possible, and EINTR is returned if the system
113 * call should be interrupted by the signal (return EINTR).
114 */
115 int
tsleep(const volatile void * ident,int priority,const char * wmesg,int timo)116 tsleep(const volatile void *ident, int priority, const char *wmesg, int timo)
117 {
118 #ifdef MULTIPROCESSOR
119 int hold_count;
120 #endif
121
122 KASSERT((priority & ~(PRIMASK | PCATCH)) == 0);
123 KASSERT(ident != &nowake || ISSET(priority, PCATCH) || timo != 0);
124
125 #ifdef MULTIPROCESSOR
126 KASSERT(ident == &nowake || timo || _kernel_lock_held());
127 #endif
128
129 #ifdef DDB
130 if (cold == 2)
131 db_stack_dump();
132 #endif
133 if (cold || panicstr) {
134 int s;
135 /*
136 * After a panic, or during autoconfiguration,
137 * just give interrupts a chance, then just return;
138 * don't run any other procs or panic below,
139 * in case this is the idle process and already asleep.
140 */
141 s = splhigh();
142 splx(safepri);
143 #ifdef MULTIPROCESSOR
144 if (_kernel_lock_held()) {
145 hold_count = __mp_release_all(&kernel_lock);
146 __mp_acquire_count(&kernel_lock, hold_count);
147 }
148 #endif
149 splx(s);
150 return (0);
151 }
152
153 sleep_setup(ident, priority, wmesg);
154 return sleep_finish(timo, 1);
155 }
156
157 int
tsleep_nsec(const volatile void * ident,int priority,const char * wmesg,uint64_t nsecs)158 tsleep_nsec(const volatile void *ident, int priority, const char *wmesg,
159 uint64_t nsecs)
160 {
161 uint64_t to_ticks;
162
163 if (nsecs == INFSLP)
164 return tsleep(ident, priority, wmesg, 0);
165 #ifdef DIAGNOSTIC
166 if (nsecs == 0) {
167 log(LOG_WARNING,
168 "%s: %s[%d]: %s: trying to sleep zero nanoseconds\n",
169 __func__, curproc->p_p->ps_comm, curproc->p_p->ps_pid,
170 wmesg);
171 }
172 #endif
173 /*
174 * We want to sleep at least nsecs nanoseconds worth of ticks.
175 *
176 * - Clamp nsecs to prevent arithmetic overflow.
177 *
178 * - Round nsecs up to account for any nanoseconds that do not
179 * divide evenly into tick_nsec, otherwise we'll lose them to
180 * integer division in the next step. We add (tick_nsec - 1)
181 * to keep from introducing a spurious tick if there are no
182 * such nanoseconds, i.e. nsecs % tick_nsec == 0.
183 *
184 * - Divide the rounded value to a count of ticks. We divide
185 * by (tick_nsec + 1) to discard the extra tick introduced if,
186 * before rounding, nsecs % tick_nsec == 1.
187 *
188 * - Finally, add a tick to the result. We need to wait out
189 * the current tick before we can begin counting our interval,
190 * as we do not know how much time has elapsed since the
191 * current tick began.
192 */
193 nsecs = MIN(nsecs, UINT64_MAX - tick_nsec);
194 to_ticks = (nsecs + tick_nsec - 1) / (tick_nsec + 1) + 1;
195 if (to_ticks > INT_MAX)
196 to_ticks = INT_MAX;
197 return tsleep(ident, priority, wmesg, (int)to_ticks);
198 }
199
200 /*
201 * Same as tsleep, but if we have a mutex provided, then once we've
202 * entered the sleep queue we drop the mutex. After sleeping we re-lock.
203 */
204 int
msleep(const volatile void * ident,struct mutex * mtx,int priority,const char * wmesg,int timo)205 msleep(const volatile void *ident, struct mutex *mtx, int priority,
206 const char *wmesg, int timo)
207 {
208 int error, spl;
209 #ifdef MULTIPROCESSOR
210 int hold_count;
211 #endif
212
213 KASSERT((priority & ~(PRIMASK | PCATCH | PNORELOCK)) == 0);
214 KASSERT(ident != &nowake || ISSET(priority, PCATCH) || timo != 0);
215 KASSERT(mtx != NULL);
216
217 #ifdef DDB
218 if (cold == 2)
219 db_stack_dump();
220 #endif
221 if (cold || panicstr) {
222 /*
223 * After a panic, or during autoconfiguration,
224 * just give interrupts a chance, then just return;
225 * don't run any other procs or panic below,
226 * in case this is the idle process and already asleep.
227 */
228 spl = MUTEX_OLDIPL(mtx);
229 MUTEX_OLDIPL(mtx) = safepri;
230 mtx_leave(mtx);
231 #ifdef MULTIPROCESSOR
232 if (_kernel_lock_held()) {
233 hold_count = __mp_release_all(&kernel_lock);
234 __mp_acquire_count(&kernel_lock, hold_count);
235 }
236 #endif
237 if ((priority & PNORELOCK) == 0) {
238 mtx_enter(mtx);
239 MUTEX_OLDIPL(mtx) = spl;
240 } else
241 splx(spl);
242 return (0);
243 }
244
245 sleep_setup(ident, priority, wmesg);
246
247 mtx_leave(mtx);
248 /* signal may stop the process, release mutex before that */
249 error = sleep_finish(timo, 1);
250
251 if ((priority & PNORELOCK) == 0)
252 mtx_enter(mtx);
253
254 return error;
255 }
256
257 int
msleep_nsec(const volatile void * ident,struct mutex * mtx,int priority,const char * wmesg,uint64_t nsecs)258 msleep_nsec(const volatile void *ident, struct mutex *mtx, int priority,
259 const char *wmesg, uint64_t nsecs)
260 {
261 uint64_t to_ticks;
262
263 if (nsecs == INFSLP)
264 return msleep(ident, mtx, priority, wmesg, 0);
265 #ifdef DIAGNOSTIC
266 if (nsecs == 0) {
267 log(LOG_WARNING,
268 "%s: %s[%d]: %s: trying to sleep zero nanoseconds\n",
269 __func__, curproc->p_p->ps_comm, curproc->p_p->ps_pid,
270 wmesg);
271 }
272 #endif
273 nsecs = MIN(nsecs, UINT64_MAX - tick_nsec);
274 to_ticks = (nsecs + tick_nsec - 1) / (tick_nsec + 1) + 1;
275 if (to_ticks > INT_MAX)
276 to_ticks = INT_MAX;
277 return msleep(ident, mtx, priority, wmesg, (int)to_ticks);
278 }
279
280 /*
281 * Same as tsleep, but if we have a rwlock provided, then once we've
282 * entered the sleep queue we drop the it. After sleeping we re-lock.
283 */
284 int
rwsleep(const volatile void * ident,struct rwlock * rwl,int priority,const char * wmesg,int timo)285 rwsleep(const volatile void *ident, struct rwlock *rwl, int priority,
286 const char *wmesg, int timo)
287 {
288 int error, status;
289
290 KASSERT((priority & ~(PRIMASK | PCATCH | PNORELOCK)) == 0);
291 KASSERT(ident != &nowake || ISSET(priority, PCATCH) || timo != 0);
292 KASSERT(ident != rwl);
293 rw_assert_anylock(rwl);
294 status = rw_status(rwl);
295
296 sleep_setup(ident, priority, wmesg);
297
298 rw_exit(rwl);
299 /* signal may stop the process, release rwlock before that */
300 error = sleep_finish(timo, 1);
301
302 if ((priority & PNORELOCK) == 0)
303 rw_enter(rwl, status);
304
305 return error;
306 }
307
308 int
rwsleep_nsec(const volatile void * ident,struct rwlock * rwl,int priority,const char * wmesg,uint64_t nsecs)309 rwsleep_nsec(const volatile void *ident, struct rwlock *rwl, int priority,
310 const char *wmesg, uint64_t nsecs)
311 {
312 uint64_t to_ticks;
313
314 if (nsecs == INFSLP)
315 return rwsleep(ident, rwl, priority, wmesg, 0);
316 #ifdef DIAGNOSTIC
317 if (nsecs == 0) {
318 log(LOG_WARNING,
319 "%s: %s[%d]: %s: trying to sleep zero nanoseconds\n",
320 __func__, curproc->p_p->ps_comm, curproc->p_p->ps_pid,
321 wmesg);
322 }
323 #endif
324 nsecs = MIN(nsecs, UINT64_MAX - tick_nsec);
325 to_ticks = (nsecs + tick_nsec - 1) / (tick_nsec + 1) + 1;
326 if (to_ticks > INT_MAX)
327 to_ticks = INT_MAX;
328 return rwsleep(ident, rwl, priority, wmesg, (int)to_ticks);
329 }
330
331 void
sleep_setup(const volatile void * ident,int prio,const char * wmesg)332 sleep_setup(const volatile void *ident, int prio, const char *wmesg)
333 {
334 struct proc *p = curproc;
335
336 #ifdef DIAGNOSTIC
337 if (p->p_flag & P_CANTSLEEP)
338 panic("sleep: %s failed insomnia", p->p_p->ps_comm);
339 if (ident == NULL)
340 panic("tsleep: no ident");
341 if (p->p_stat != SONPROC)
342 panic("tsleep: not SONPROC");
343 #endif
344 /* exiting processes are not allowed to catch signals */
345 if (p->p_flag & P_WEXIT)
346 CLR(prio, PCATCH);
347
348 SCHED_LOCK();
349
350 TRACEPOINT(sched, sleep, NULL);
351
352 p->p_wchan = ident;
353 p->p_wmesg = wmesg;
354 p->p_slptime = 0;
355 p->p_slppri = prio & PRIMASK;
356 atomic_setbits_int(&p->p_flag, P_WSLEEP);
357 TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_runq);
358 if (prio & PCATCH)
359 atomic_setbits_int(&p->p_flag, P_SINTR);
360 p->p_stat = SSLEEP;
361
362 SCHED_UNLOCK();
363 }
364
365 int
sleep_finish(int timo,int do_sleep)366 sleep_finish(int timo, int do_sleep)
367 {
368 struct proc *p = curproc;
369 int catch, error = 0, error1 = 0;
370
371 catch = p->p_flag & P_SINTR;
372
373 if (timo != 0) {
374 KASSERT((p->p_flag & P_TIMEOUT) == 0);
375 timeout_add(&p->p_sleep_to, timo);
376 }
377
378 if (catch != 0) {
379 /*
380 * We put ourselves on the sleep queue and start our
381 * timeout before calling sleep_signal_check(), as we could
382 * stop there, and a wakeup or a SIGCONT (or both) could
383 * occur while we were stopped. A SIGCONT would cause
384 * us to be marked as SSLEEP without resuming us, thus
385 * we must be ready for sleep when sleep_signal_check() is
386 * called.
387 */
388 if ((error = sleep_signal_check()) != 0) {
389 catch = 0;
390 do_sleep = 0;
391 }
392 }
393
394 SCHED_LOCK();
395 /*
396 * If the wakeup happens while going to sleep, p->p_wchan
397 * will be NULL. In that case unwind immediately but still
398 * check for possible signals and timeouts.
399 */
400 if (p->p_wchan == NULL)
401 do_sleep = 0;
402 atomic_clearbits_int(&p->p_flag, P_WSLEEP);
403
404 if (do_sleep) {
405 KASSERT(p->p_stat == SSLEEP || p->p_stat == SSTOP);
406 p->p_ru.ru_nvcsw++;
407 mi_switch();
408 } else {
409 KASSERT(p->p_stat == SONPROC || p->p_stat == SSLEEP ||
410 p->p_stat == SSTOP);
411 unsleep(p);
412 p->p_stat = SONPROC;
413 }
414
415 #ifdef DIAGNOSTIC
416 if (p->p_stat != SONPROC)
417 panic("sleep_finish !SONPROC");
418 #endif
419
420 p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri;
421 SCHED_UNLOCK();
422
423 /*
424 * Even though this belongs to the signal handling part of sleep,
425 * we need to clear it before the ktrace.
426 */
427 atomic_clearbits_int(&p->p_flag, P_SINTR);
428
429 if (timo != 0) {
430 if (p->p_flag & P_TIMEOUT) {
431 error1 = EWOULDBLOCK;
432 } else {
433 /* This can sleep. It must not use timeouts. */
434 timeout_del_barrier(&p->p_sleep_to);
435 }
436 atomic_clearbits_int(&p->p_flag, P_TIMEOUT);
437 }
438
439 /* Check if thread was woken up because of a unwind or signal */
440 if (catch != 0)
441 error = sleep_signal_check();
442
443 /* Signal errors are higher priority than timeouts. */
444 if (error == 0 && error1 != 0)
445 error = error1;
446
447 return error;
448 }
449
450 /*
451 * Check and handle signals and suspensions around a sleep cycle.
452 */
453 int
sleep_signal_check(void)454 sleep_signal_check(void)
455 {
456 struct proc *p = curproc;
457 struct sigctx ctx;
458 int err, sig;
459
460 if ((err = single_thread_check(p, 1)) != 0)
461 return err;
462 if ((sig = cursig(p, &ctx)) != 0) {
463 if (ctx.sig_intr)
464 return EINTR;
465 else
466 return ERESTART;
467 }
468 return 0;
469 }
470
471 int
wakeup_proc(struct proc * p,int flags)472 wakeup_proc(struct proc *p, int flags)
473 {
474 int awakened = 0;
475
476 SCHED_ASSERT_LOCKED();
477
478 if (p->p_wchan != NULL) {
479 awakened = 1;
480 if (flags)
481 atomic_setbits_int(&p->p_flag, flags);
482 #ifdef DIAGNOSTIC
483 if (p->p_stat != SSLEEP && p->p_stat != SSTOP)
484 panic("thread %d p_stat is %d", p->p_tid, p->p_stat);
485 #endif
486 unsleep(p);
487 if (p->p_stat == SSLEEP)
488 setrunnable(p);
489 }
490
491 return awakened;
492 }
493
494
495 /*
496 * Implement timeout for tsleep.
497 * If process hasn't been awakened (wchan non-zero),
498 * set timeout flag and undo the sleep. If proc
499 * is stopped, just unsleep so it will remain stopped.
500 */
501 void
endtsleep(void * arg)502 endtsleep(void *arg)
503 {
504 struct proc *p = arg;
505
506 SCHED_LOCK();
507 wakeup_proc(p, P_TIMEOUT);
508 SCHED_UNLOCK();
509 }
510
511 /*
512 * Remove a process from its wait queue
513 */
514 void
unsleep(struct proc * p)515 unsleep(struct proc *p)
516 {
517 SCHED_ASSERT_LOCKED();
518
519 if (p->p_wchan != NULL) {
520 TAILQ_REMOVE(&slpque[LOOKUP(p->p_wchan)], p, p_runq);
521 p->p_wchan = NULL;
522 p->p_wmesg = NULL;
523 TRACEPOINT(sched, unsleep, p->p_tid + THREAD_PID_OFFSET,
524 p->p_p->ps_pid);
525 }
526 }
527
528 /*
529 * Make a number of processes sleeping on the specified identifier runnable.
530 */
531 void
wakeup_n(const volatile void * ident,int n)532 wakeup_n(const volatile void *ident, int n)
533 {
534 struct slpque *qp, wakeq;
535 struct proc *p;
536 struct proc *pnext;
537
538 TAILQ_INIT(&wakeq);
539
540 SCHED_LOCK();
541 qp = &slpque[LOOKUP(ident)];
542 for (p = TAILQ_FIRST(qp); p != NULL && n != 0; p = pnext) {
543 pnext = TAILQ_NEXT(p, p_runq);
544 #ifdef DIAGNOSTIC
545 if (p->p_stat != SSLEEP && p->p_stat != SSTOP)
546 panic("thread %d p_stat is %d", p->p_tid, p->p_stat);
547 #endif
548 KASSERT(p->p_wchan != NULL);
549 if (p->p_wchan == ident) {
550 TAILQ_REMOVE(qp, p, p_runq);
551 p->p_wchan = NULL;
552 p->p_wmesg = NULL;
553 TAILQ_INSERT_TAIL(&wakeq, p, p_runq);
554 --n;
555 }
556 }
557 while ((p = TAILQ_FIRST(&wakeq))) {
558 TAILQ_REMOVE(&wakeq, p, p_runq);
559 TRACEPOINT(sched, unsleep, p->p_tid + THREAD_PID_OFFSET,
560 p->p_p->ps_pid);
561 if (p->p_stat == SSLEEP)
562 setrunnable(p);
563 }
564 SCHED_UNLOCK();
565 }
566
567 /*
568 * Make all processes sleeping on the specified identifier runnable.
569 */
570 void
wakeup(const volatile void * chan)571 wakeup(const volatile void *chan)
572 {
573 wakeup_n(chan, -1);
574 }
575
576 int
sys_sched_yield(struct proc * p,void * v,register_t * retval)577 sys_sched_yield(struct proc *p, void *v, register_t *retval)
578 {
579 struct proc *q;
580 uint8_t newprio;
581
582 /*
583 * If one of the threads of a multi-threaded process called
584 * sched_yield(2), drop its priority to ensure its siblings
585 * can make some progress.
586 */
587 mtx_enter(&p->p_p->ps_mtx);
588 newprio = p->p_usrpri;
589 TAILQ_FOREACH(q, &p->p_p->ps_threads, p_thr_link)
590 newprio = max(newprio, q->p_runpri);
591 mtx_leave(&p->p_p->ps_mtx);
592
593 SCHED_LOCK();
594 setrunqueue(p->p_cpu, p, newprio);
595 p->p_ru.ru_nvcsw++;
596 mi_switch();
597 SCHED_UNLOCK();
598
599 return (0);
600 }
601
602 int
thrsleep_unlock(void * lock)603 thrsleep_unlock(void *lock)
604 {
605 static _atomic_lock_t unlocked = _ATOMIC_LOCK_UNLOCKED;
606 _atomic_lock_t *atomiclock = lock;
607
608 if (!lock)
609 return 0;
610
611 return copyout(&unlocked, atomiclock, sizeof(unlocked));
612 }
613
614 struct tslpentry {
615 TAILQ_ENTRY(tslpentry) tslp_link;
616 long tslp_ident;
617 };
618
619 /* thrsleep queue shared between processes */
620 static struct tslpqueue thrsleep_queue = TAILQ_HEAD_INITIALIZER(thrsleep_queue);
621 static struct rwlock thrsleep_lock = RWLOCK_INITIALIZER("thrsleeplk");
622
623 int
thrsleep(struct proc * p,struct sys___thrsleep_args * v)624 thrsleep(struct proc *p, struct sys___thrsleep_args *v)
625 {
626 struct sys___thrsleep_args /* {
627 syscallarg(const volatile void *) ident;
628 syscallarg(clockid_t) clock_id;
629 syscallarg(const struct timespec *) tp;
630 syscallarg(void *) lock;
631 syscallarg(const int *) abort;
632 } */ *uap = v;
633 long ident = (long)SCARG(uap, ident);
634 struct tslpentry entry;
635 struct tslpqueue *queue;
636 struct rwlock *qlock;
637 struct timespec *tsp = (struct timespec *)SCARG(uap, tp);
638 void *lock = SCARG(uap, lock);
639 uint64_t nsecs = INFSLP;
640 int abort = 0, error;
641 clockid_t clock_id = SCARG(uap, clock_id);
642
643 if (ident == 0)
644 return (EINVAL);
645 if (tsp != NULL) {
646 struct timespec now;
647
648 if ((error = clock_gettime(p, clock_id, &now)))
649 return (error);
650 #ifdef KTRACE
651 if (KTRPOINT(p, KTR_STRUCT))
652 ktrabstimespec(p, tsp);
653 #endif
654
655 if (timespeccmp(tsp, &now, <=)) {
656 /* already passed: still do the unlock */
657 if ((error = thrsleep_unlock(lock)))
658 return (error);
659 return (EWOULDBLOCK);
660 }
661
662 timespecsub(tsp, &now, tsp);
663 nsecs = MIN(TIMESPEC_TO_NSEC(tsp), MAXTSLP);
664 }
665
666 if (ident == -1) {
667 queue = &thrsleep_queue;
668 qlock = &thrsleep_lock;
669 } else {
670 queue = &p->p_p->ps_tslpqueue;
671 qlock = &p->p_p->ps_lock;
672 }
673
674 /* Interlock with wakeup. */
675 entry.tslp_ident = ident;
676 rw_enter_write(qlock);
677 TAILQ_INSERT_TAIL(queue, &entry, tslp_link);
678 rw_exit_write(qlock);
679
680 error = thrsleep_unlock(lock);
681
682 if (error == 0 && SCARG(uap, abort) != NULL)
683 error = copyin(SCARG(uap, abort), &abort, sizeof(abort));
684
685 rw_enter_write(qlock);
686 if (error != 0)
687 goto out;
688 if (abort != 0) {
689 error = EINTR;
690 goto out;
691 }
692 if (entry.tslp_ident != 0) {
693 error = rwsleep_nsec(&entry, qlock, PWAIT|PCATCH, "thrsleep",
694 nsecs);
695 }
696
697 out:
698 if (entry.tslp_ident != 0)
699 TAILQ_REMOVE(queue, &entry, tslp_link);
700 rw_exit_write(qlock);
701
702 if (error == ERESTART)
703 error = ECANCELED;
704
705 return (error);
706
707 }
708
709 int
sys___thrsleep(struct proc * p,void * v,register_t * retval)710 sys___thrsleep(struct proc *p, void *v, register_t *retval)
711 {
712 struct sys___thrsleep_args /* {
713 syscallarg(const volatile void *) ident;
714 syscallarg(clockid_t) clock_id;
715 syscallarg(struct timespec *) tp;
716 syscallarg(void *) lock;
717 syscallarg(const int *) abort;
718 } */ *uap = v;
719 struct timespec ts;
720 int error;
721
722 if (SCARG(uap, tp) != NULL) {
723 if ((error = copyin(SCARG(uap, tp), &ts, sizeof(ts)))) {
724 *retval = error;
725 return 0;
726 }
727 if (!timespecisvalid(&ts)) {
728 *retval = EINVAL;
729 return 0;
730 }
731 SCARG(uap, tp) = &ts;
732 }
733
734 *retval = thrsleep(p, uap);
735 return 0;
736 }
737
738 int
sys___thrwakeup(struct proc * p,void * v,register_t * retval)739 sys___thrwakeup(struct proc *p, void *v, register_t *retval)
740 {
741 struct sys___thrwakeup_args /* {
742 syscallarg(const volatile void *) ident;
743 syscallarg(int) n;
744 } */ *uap = v;
745 struct tslpentry *entry, *tmp;
746 struct tslpqueue *queue;
747 struct rwlock *qlock;
748 long ident = (long)SCARG(uap, ident);
749 int n = SCARG(uap, n);
750 int found = 0;
751
752 if (ident == 0)
753 *retval = EINVAL;
754 else {
755 if (ident == -1) {
756 queue = &thrsleep_queue;
757 qlock = &thrsleep_lock;
758 /*
759 * Wake up all waiters with ident -1. This is needed
760 * because ident -1 can be shared by multiple userspace
761 * lock state machines concurrently. The implementation
762 * has no way to direct the wakeup to a particular
763 * state machine.
764 */
765 n = 0;
766 } else {
767 queue = &p->p_p->ps_tslpqueue;
768 qlock = &p->p_p->ps_lock;
769 }
770
771 rw_enter_write(qlock);
772 TAILQ_FOREACH_SAFE(entry, queue, tslp_link, tmp) {
773 if (entry->tslp_ident == ident) {
774 TAILQ_REMOVE(queue, entry, tslp_link);
775 entry->tslp_ident = 0;
776 wakeup_one(entry);
777 if (++found == n)
778 break;
779 }
780 }
781 rw_exit_write(qlock);
782
783 if (ident == -1)
784 *retval = 0;
785 else
786 *retval = found ? 0 : ESRCH;
787 }
788
789 return (0);
790 }
791
792 void
refcnt_init(struct refcnt * r)793 refcnt_init(struct refcnt *r)
794 {
795 refcnt_init_trace(r, 0);
796 }
797
798 void
refcnt_init_trace(struct refcnt * r,int idx)799 refcnt_init_trace(struct refcnt *r, int idx)
800 {
801 r->r_traceidx = idx;
802 atomic_store_int(&r->r_refs, 1);
803 TRACEINDEX(refcnt, r->r_traceidx, r, 0, +1);
804 }
805
806 void
refcnt_take(struct refcnt * r)807 refcnt_take(struct refcnt *r)
808 {
809 u_int refs;
810
811 refs = atomic_inc_int_nv(&r->r_refs);
812 KASSERT(refs != 0);
813 TRACEINDEX(refcnt, r->r_traceidx, r, refs - 1, +1);
814 (void)refs;
815 }
816
817 int
refcnt_rele(struct refcnt * r)818 refcnt_rele(struct refcnt *r)
819 {
820 u_int refs;
821
822 membar_exit_before_atomic();
823 refs = atomic_dec_int_nv(&r->r_refs);
824 KASSERT(refs != ~0);
825 TRACEINDEX(refcnt, r->r_traceidx, r, refs + 1, -1);
826 if (refs == 0) {
827 membar_enter_after_atomic();
828 return (1);
829 }
830 return (0);
831 }
832
833 void
refcnt_rele_wake(struct refcnt * r)834 refcnt_rele_wake(struct refcnt *r)
835 {
836 if (refcnt_rele(r))
837 wakeup_one(r);
838 }
839
840 void
refcnt_finalize(struct refcnt * r,const char * wmesg)841 refcnt_finalize(struct refcnt *r, const char *wmesg)
842 {
843 u_int refs;
844
845 membar_exit_before_atomic();
846 refs = atomic_dec_int_nv(&r->r_refs);
847 KASSERT(refs != ~0);
848 TRACEINDEX(refcnt, r->r_traceidx, r, refs + 1, -1);
849 while (refs) {
850 sleep_setup(r, PWAIT, wmesg);
851 refs = atomic_load_int(&r->r_refs);
852 sleep_finish(0, refs);
853 }
854 TRACEINDEX(refcnt, r->r_traceidx, r, refs, 0);
855 /* Order subsequent loads and stores after refs == 0 load. */
856 membar_sync();
857 }
858
859 int
refcnt_shared(struct refcnt * r)860 refcnt_shared(struct refcnt *r)
861 {
862 u_int refs;
863
864 refs = atomic_load_int(&r->r_refs);
865 TRACEINDEX(refcnt, r->r_traceidx, r, refs, 0);
866 return (refs > 1);
867 }
868
869 unsigned int
refcnt_read(struct refcnt * r)870 refcnt_read(struct refcnt *r)
871 {
872 u_int refs;
873
874 refs = atomic_load_int(&r->r_refs);
875 TRACEINDEX(refcnt, r->r_traceidx, r, refs, 0);
876 return (refs);
877 }
878
879 void
cond_init(struct cond * c)880 cond_init(struct cond *c)
881 {
882 atomic_store_int(&c->c_wait, 1);
883 }
884
885 void
cond_signal(struct cond * c)886 cond_signal(struct cond *c)
887 {
888 atomic_store_int(&c->c_wait, 0);
889
890 wakeup_one(c);
891 }
892
893 void
cond_wait(struct cond * c,const char * wmesg)894 cond_wait(struct cond *c, const char *wmesg)
895 {
896 unsigned int wait;
897
898 wait = atomic_load_int(&c->c_wait);
899 while (wait) {
900 sleep_setup(c, PWAIT, wmesg);
901 wait = atomic_load_int(&c->c_wait);
902 sleep_finish(0, wait);
903 }
904 }
905