1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 /*
29 * Implementation of sleep queues used to hold queue of threads blocked on
30 * a wait channel. Sleep queues are different from turnstiles in that wait
31 * channels are not owned by anyone, so there is no priority propagation.
32 * Sleep queues can also provide a timeout and can also be interrupted by
33 * signals. That said, there are several similarities between the turnstile
34 * and sleep queue implementations. (Note: turnstiles were implemented
35 * first.) For example, both use a hash table of the same size where each
36 * bucket is referred to as a "chain" that contains both a spin lock and
37 * a linked list of queues. An individual queue is located by using a hash
38 * to pick a chain, locking the chain, and then walking the chain searching
39 * for the queue. This means that a wait channel object does not need to
40 * embed its queue head just as locks do not embed their turnstile queue
41 * head. Threads also carry around a sleep queue that they lend to the
42 * wait channel when blocking. Just as in turnstiles, the queue includes
43 * a free list of the sleep queues of other threads blocked on the same
44 * wait channel in the case of multiple waiters.
45 *
46 * Some additional functionality provided by sleep queues include the
47 * ability to set a timeout. The timeout is managed using a per-thread
48 * callout that resumes a thread if it is asleep. A thread may also
49 * catch signals while it is asleep (aka an interruptible sleep). The
50 * signal code uses sleepq_abort() to interrupt a sleeping thread. Finally,
51 * sleep queues also provide some extra assertions. One is not allowed to
52 * mix the sleep/wakeup and cv APIs for a given wait channel. Also, one
53 * must consistently use the same lock to synchronize with a wait channel,
54 * though this check is currently only a warning for sleep/wakeup due to
55 * pre-existing abuse of that API. The same lock must also be held when
56 * awakening threads, though that is currently only enforced for condition
57 * variables.
58 */
59
60 #include <sys/cdefs.h>
61 #include "opt_sleepqueue_profiling.h"
62 #include "opt_ddb.h"
63 #include "opt_sched.h"
64 #include "opt_stack.h"
65
66 #include <sys/param.h>
67 #include <sys/systm.h>
68 #include <sys/lock.h>
69 #include <sys/kernel.h>
70 #include <sys/ktr.h>
71 #include <sys/mutex.h>
72 #include <sys/proc.h>
73 #include <sys/sbuf.h>
74 #include <sys/sched.h>
75 #include <sys/sdt.h>
76 #include <sys/signalvar.h>
77 #include <sys/sleepqueue.h>
78 #include <sys/stack.h>
79 #include <sys/sysctl.h>
80 #include <sys/time.h>
81 #ifdef EPOCH_TRACE
82 #include <sys/epoch.h>
83 #endif
84
85 #include <machine/atomic.h>
86
87 #include <vm/uma.h>
88
89 #ifdef DDB
90 #include <ddb/ddb.h>
91 #endif
92
93 /*
94 * Constants for the hash table of sleep queue chains.
95 * SC_TABLESIZE must be a power of two for SC_MASK to work properly.
96 */
97 #ifndef SC_TABLESIZE
98 #define SC_TABLESIZE 256
99 #endif
100 CTASSERT(powerof2(SC_TABLESIZE));
101 #define SC_MASK (SC_TABLESIZE - 1)
102 #define SC_SHIFT 8
103 #define SC_HASH(wc) ((((uintptr_t)(wc) >> SC_SHIFT) ^ (uintptr_t)(wc)) & \
104 SC_MASK)
105 #define SC_LOOKUP(wc) &sleepq_chains[SC_HASH(wc)]
106 #define NR_SLEEPQS 2
107 /*
108 * There are two different lists of sleep queues. Both lists are connected
109 * via the sq_hash entries. The first list is the sleep queue chain list
110 * that a sleep queue is on when it is attached to a wait channel. The
111 * second list is the free list hung off of a sleep queue that is attached
112 * to a wait channel.
113 *
114 * Each sleep queue also contains the wait channel it is attached to, the
115 * list of threads blocked on that wait channel, flags specific to the
116 * wait channel, and the lock used to synchronize with a wait channel.
117 * The flags are used to catch mismatches between the various consumers
118 * of the sleep queue API (e.g. sleep/wakeup and condition variables).
119 * The lock pointer is only used when invariants are enabled for various
120 * debugging checks.
121 *
122 * Locking key:
123 * c - sleep queue chain lock
124 */
125 struct sleepqueue {
126 struct threadqueue sq_blocked[NR_SLEEPQS]; /* (c) Blocked threads. */
127 u_int sq_blockedcnt[NR_SLEEPQS]; /* (c) N. of blocked threads. */
128 LIST_ENTRY(sleepqueue) sq_hash; /* (c) Chain and free list. */
129 LIST_HEAD(, sleepqueue) sq_free; /* (c) Free queues. */
130 const void *sq_wchan; /* (c) Wait channel. */
131 int sq_type; /* (c) Queue type. */
132 #ifdef INVARIANTS
133 struct lock_object *sq_lock; /* (c) Associated lock. */
134 #endif
135 };
136
137 struct sleepqueue_chain {
138 LIST_HEAD(, sleepqueue) sc_queues; /* List of sleep queues. */
139 struct mtx sc_lock; /* Spin lock for this chain. */
140 #ifdef SLEEPQUEUE_PROFILING
141 u_int sc_depth; /* Length of sc_queues. */
142 u_int sc_max_depth; /* Max length of sc_queues. */
143 #endif
144 } __aligned(CACHE_LINE_SIZE);
145
146 #ifdef SLEEPQUEUE_PROFILING
147 static SYSCTL_NODE(_debug, OID_AUTO, sleepq, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
148 "sleepq profiling");
149 static SYSCTL_NODE(_debug_sleepq, OID_AUTO, chains,
150 CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
151 "sleepq chain stats");
152 static u_int sleepq_max_depth;
153 SYSCTL_UINT(_debug_sleepq, OID_AUTO, max_depth, CTLFLAG_RD, &sleepq_max_depth,
154 0, "maxmimum depth achieved of a single chain");
155
156 static void sleepq_profile(const char *wmesg);
157 static int prof_enabled;
158 #endif
159 static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE];
160 static uma_zone_t sleepq_zone;
161
162 /*
163 * Prototypes for non-exported routines.
164 */
165 static int sleepq_catch_signals(const void *wchan, int pri);
166 static inline int sleepq_check_signals(void);
167 static inline int sleepq_check_timeout(void);
168 #ifdef INVARIANTS
169 static void sleepq_dtor(void *mem, int size, void *arg);
170 #endif
171 static int sleepq_init(void *mem, int size, int flags);
172 static int sleepq_resume_thread(struct sleepqueue *sq, struct thread *td,
173 int pri, int srqflags);
174 static void sleepq_remove_thread(struct sleepqueue *sq, struct thread *td);
175 static void sleepq_switch(const void *wchan, int pri);
176 static void sleepq_timeout(void *arg);
177
178 SDT_PROBE_DECLARE(sched, , , sleep);
179 SDT_PROBE_DECLARE(sched, , , wakeup);
180
181 /*
182 * Initialize SLEEPQUEUE_PROFILING specific sysctl nodes.
183 * Note that it must happen after sleepinit() has been fully executed, so
184 * it must happen after SI_SUB_KMEM SYSINIT() subsystem setup.
185 */
186 #ifdef SLEEPQUEUE_PROFILING
187 static void
init_sleepqueue_profiling(void)188 init_sleepqueue_profiling(void)
189 {
190 char chain_name[10];
191 struct sysctl_oid *chain_oid;
192 u_int i;
193
194 for (i = 0; i < SC_TABLESIZE; i++) {
195 snprintf(chain_name, sizeof(chain_name), "%u", i);
196 chain_oid = SYSCTL_ADD_NODE(NULL,
197 SYSCTL_STATIC_CHILDREN(_debug_sleepq_chains), OID_AUTO,
198 chain_name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
199 "sleepq chain stats");
200 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
201 "depth", CTLFLAG_RD, &sleepq_chains[i].sc_depth, 0, NULL);
202 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
203 "max_depth", CTLFLAG_RD, &sleepq_chains[i].sc_max_depth, 0,
204 NULL);
205 }
206 }
207
208 SYSINIT(sleepqueue_profiling, SI_SUB_LOCK, SI_ORDER_ANY,
209 init_sleepqueue_profiling, NULL);
210 #endif
211
212 /*
213 * Early initialization of sleep queues that is called from the sleepinit()
214 * SYSINIT.
215 */
216 void
init_sleepqueues(void)217 init_sleepqueues(void)
218 {
219 int i;
220
221 for (i = 0; i < SC_TABLESIZE; i++) {
222 LIST_INIT(&sleepq_chains[i].sc_queues);
223 mtx_init(&sleepq_chains[i].sc_lock, "sleepq chain", NULL,
224 MTX_SPIN);
225 }
226 sleepq_zone = uma_zcreate("SLEEPQUEUE", sizeof(struct sleepqueue),
227 #ifdef INVARIANTS
228 NULL, sleepq_dtor, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
229 #else
230 NULL, NULL, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
231 #endif
232
233 thread0.td_sleepqueue = sleepq_alloc();
234 }
235
236 /*
237 * Get a sleep queue for a new thread.
238 */
239 struct sleepqueue *
sleepq_alloc(void)240 sleepq_alloc(void)
241 {
242
243 return (uma_zalloc(sleepq_zone, M_WAITOK));
244 }
245
246 /*
247 * Free a sleep queue when a thread is destroyed.
248 */
249 void
sleepq_free(struct sleepqueue * sq)250 sleepq_free(struct sleepqueue *sq)
251 {
252
253 uma_zfree(sleepq_zone, sq);
254 }
255
256 /*
257 * Lock the sleep queue chain associated with the specified wait channel.
258 */
259 void
sleepq_lock(const void * wchan)260 sleepq_lock(const void *wchan)
261 {
262 struct sleepqueue_chain *sc;
263
264 sc = SC_LOOKUP(wchan);
265 mtx_lock_spin(&sc->sc_lock);
266 }
267
268 /*
269 * Look up the sleep queue associated with a given wait channel in the hash
270 * table locking the associated sleep queue chain. If no queue is found in
271 * the table, NULL is returned.
272 */
273 struct sleepqueue *
sleepq_lookup(const void * wchan)274 sleepq_lookup(const void *wchan)
275 {
276 struct sleepqueue_chain *sc;
277 struct sleepqueue *sq;
278
279 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
280 sc = SC_LOOKUP(wchan);
281 mtx_assert(&sc->sc_lock, MA_OWNED);
282 LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
283 if (sq->sq_wchan == wchan)
284 return (sq);
285 return (NULL);
286 }
287
288 /*
289 * Unlock the sleep queue chain associated with a given wait channel.
290 */
291 void
sleepq_release(const void * wchan)292 sleepq_release(const void *wchan)
293 {
294 struct sleepqueue_chain *sc;
295
296 sc = SC_LOOKUP(wchan);
297 mtx_unlock_spin(&sc->sc_lock);
298 }
299
300 /*
301 * Places the current thread on the sleep queue for the specified wait
302 * channel. If INVARIANTS is enabled, then it associates the passed in
303 * lock with the sleepq to make sure it is held when that sleep queue is
304 * woken up.
305 */
306 void
sleepq_add(const void * wchan,struct lock_object * lock,const char * wmesg,int flags,int queue)307 sleepq_add(const void *wchan, struct lock_object *lock, const char *wmesg,
308 int flags, int queue)
309 {
310 struct sleepqueue_chain *sc;
311 struct sleepqueue *sq;
312 struct thread *td;
313
314 td = curthread;
315 sc = SC_LOOKUP(wchan);
316 mtx_assert(&sc->sc_lock, MA_OWNED);
317 MPASS(td->td_sleepqueue != NULL);
318 MPASS(wchan != NULL);
319 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
320
321 /* If this thread is not allowed to sleep, die a horrible death. */
322 if (__predict_false(!THREAD_CAN_SLEEP())) {
323 #ifdef EPOCH_TRACE
324 epoch_trace_list(curthread);
325 #endif
326 KASSERT(0,
327 ("%s: td %p to sleep on wchan %p with sleeping prohibited",
328 __func__, td, wchan));
329 }
330
331 /* Look up the sleep queue associated with the wait channel 'wchan'. */
332 sq = sleepq_lookup(wchan);
333
334 /*
335 * If the wait channel does not already have a sleep queue, use
336 * this thread's sleep queue. Otherwise, insert the current thread
337 * into the sleep queue already in use by this wait channel.
338 */
339 if (sq == NULL) {
340 #ifdef INVARIANTS
341 int i;
342
343 sq = td->td_sleepqueue;
344 for (i = 0; i < NR_SLEEPQS; i++) {
345 KASSERT(TAILQ_EMPTY(&sq->sq_blocked[i]),
346 ("thread's sleep queue %d is not empty", i));
347 KASSERT(sq->sq_blockedcnt[i] == 0,
348 ("thread's sleep queue %d count mismatches", i));
349 }
350 KASSERT(LIST_EMPTY(&sq->sq_free),
351 ("thread's sleep queue has a non-empty free list"));
352 KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer"));
353 sq->sq_lock = lock;
354 #endif
355 #ifdef SLEEPQUEUE_PROFILING
356 sc->sc_depth++;
357 if (sc->sc_depth > sc->sc_max_depth) {
358 sc->sc_max_depth = sc->sc_depth;
359 if (sc->sc_max_depth > sleepq_max_depth)
360 sleepq_max_depth = sc->sc_max_depth;
361 }
362 #endif
363 sq = td->td_sleepqueue;
364 LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash);
365 sq->sq_wchan = wchan;
366 sq->sq_type = flags & SLEEPQ_TYPE;
367 } else {
368 MPASS(wchan == sq->sq_wchan);
369 MPASS(lock == sq->sq_lock);
370 MPASS((flags & SLEEPQ_TYPE) == sq->sq_type);
371 LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash);
372 }
373 thread_lock(td);
374 TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq);
375 sq->sq_blockedcnt[queue]++;
376 td->td_sleepqueue = NULL;
377 td->td_sqqueue = queue;
378 td->td_wchan = wchan;
379 td->td_wmesg = wmesg;
380 if (flags & SLEEPQ_INTERRUPTIBLE) {
381 td->td_intrval = 0;
382 td->td_flags |= TDF_SINTR;
383 }
384 td->td_flags &= ~TDF_TIMEOUT;
385 thread_unlock(td);
386 }
387
388 /*
389 * Sets a timeout that will remove the current thread from the
390 * specified sleep queue at the specified time if the thread has not
391 * already been awakened. Flags are from C_* (callout) namespace.
392 */
393 void
sleepq_set_timeout_sbt(const void * wchan,sbintime_t sbt,sbintime_t pr,int flags)394 sleepq_set_timeout_sbt(const void *wchan, sbintime_t sbt, sbintime_t pr,
395 int flags)
396 {
397 struct sleepqueue_chain *sc __unused;
398 struct thread *td;
399 sbintime_t pr1;
400
401 td = curthread;
402 sc = SC_LOOKUP(wchan);
403 mtx_assert(&sc->sc_lock, MA_OWNED);
404 MPASS(TD_ON_SLEEPQ(td));
405 MPASS(td->td_sleepqueue == NULL);
406 MPASS(wchan != NULL);
407 if (cold && td == &thread0)
408 panic("timed sleep before timers are working");
409 KASSERT(td->td_sleeptimo == 0, ("td %d %p td_sleeptimo %jx",
410 td->td_tid, td, (uintmax_t)td->td_sleeptimo));
411 thread_lock(td);
412 callout_when(sbt, pr, flags, &td->td_sleeptimo, &pr1);
413 thread_unlock(td);
414 callout_reset_sbt_on(&td->td_slpcallout, td->td_sleeptimo, pr1,
415 sleepq_timeout, td, PCPU_GET(cpuid), flags | C_PRECALC |
416 C_DIRECT_EXEC);
417 }
418
419 /*
420 * Return the number of actual sleepers for the specified queue.
421 */
422 u_int
sleepq_sleepcnt(const void * wchan,int queue)423 sleepq_sleepcnt(const void *wchan, int queue)
424 {
425 struct sleepqueue *sq;
426
427 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
428 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
429 sq = sleepq_lookup(wchan);
430 if (sq == NULL)
431 return (0);
432 return (sq->sq_blockedcnt[queue]);
433 }
434
435 static int
sleepq_check_ast_sc_locked(struct thread * td,struct sleepqueue_chain * sc)436 sleepq_check_ast_sc_locked(struct thread *td, struct sleepqueue_chain *sc)
437 {
438 struct proc *p;
439 int ret;
440
441 mtx_assert(&sc->sc_lock, MA_OWNED);
442
443 if ((td->td_pflags & TDP_WAKEUP) != 0) {
444 td->td_pflags &= ~TDP_WAKEUP;
445 thread_lock(td);
446 return (EINTR);
447 }
448
449 /*
450 * See if there are any pending signals or suspension requests for this
451 * thread. If not, we can switch immediately.
452 */
453 thread_lock(td);
454 if (!td_ast_pending(td, TDA_SIG) && !td_ast_pending(td, TDA_SUSPEND))
455 return (0);
456
457 thread_unlock(td);
458 mtx_unlock_spin(&sc->sc_lock);
459
460 p = td->td_proc;
461 CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)",
462 (void *)td, (long)p->p_pid, td->td_name);
463 PROC_LOCK(p);
464
465 /*
466 * Check for suspension first. Checking for signals and then
467 * suspending could result in a missed signal, since a signal
468 * can be delivered while this thread is suspended.
469 */
470 ret = sig_ast_checksusp(td);
471 if (ret != 0) {
472 PROC_UNLOCK(p);
473 mtx_lock_spin(&sc->sc_lock);
474 thread_lock(td);
475 return (ret);
476 }
477
478 ret = sig_ast_needsigchk(td);
479
480 /*
481 * Lock the per-process spinlock prior to dropping the
482 * PROC_LOCK to avoid a signal delivery race.
483 * PROC_LOCK, PROC_SLOCK, and thread_lock() are
484 * currently held in tdsendsignal() and thread_single().
485 */
486 PROC_SLOCK(p);
487 mtx_lock_spin(&sc->sc_lock);
488 PROC_UNLOCK(p);
489 thread_lock(td);
490 PROC_SUNLOCK(p);
491
492 return (ret);
493 }
494
495 /*
496 * Marks the pending sleep of the current thread as interruptible and
497 * makes an initial check for pending signals before putting a thread
498 * to sleep. Enters and exits with the thread lock held. Thread lock
499 * may have transitioned from the sleepq lock to a run lock.
500 */
501 static int
sleepq_catch_signals(const void * wchan,int pri)502 sleepq_catch_signals(const void *wchan, int pri)
503 {
504 struct thread *td;
505 struct sleepqueue_chain *sc;
506 struct sleepqueue *sq;
507 int ret;
508
509 sc = SC_LOOKUP(wchan);
510 mtx_assert(&sc->sc_lock, MA_OWNED);
511 MPASS(wchan != NULL);
512 td = curthread;
513
514 ret = sleepq_check_ast_sc_locked(td, sc);
515 THREAD_LOCK_ASSERT(td, MA_OWNED);
516 mtx_assert(&sc->sc_lock, MA_OWNED);
517
518 if (ret == 0) {
519 /*
520 * No pending signals and no suspension requests found.
521 * Switch the thread off the cpu.
522 */
523 sleepq_switch(wchan, pri);
524 } else {
525 /*
526 * There were pending signals and this thread is still
527 * on the sleep queue, remove it from the sleep queue.
528 */
529 if (TD_ON_SLEEPQ(td)) {
530 sq = sleepq_lookup(wchan);
531 sleepq_remove_thread(sq, td);
532 }
533 MPASS(td->td_lock != &sc->sc_lock);
534 mtx_unlock_spin(&sc->sc_lock);
535 thread_unlock(td);
536 }
537 return (ret);
538 }
539
540 /*
541 * Switches to another thread if we are still asleep on a sleep queue.
542 * Returns with thread lock.
543 */
544 static void
sleepq_switch(const void * wchan,int pri)545 sleepq_switch(const void *wchan, int pri)
546 {
547 struct sleepqueue_chain *sc;
548 struct sleepqueue *sq;
549 struct thread *td;
550 bool rtc_changed;
551
552 td = curthread;
553 sc = SC_LOOKUP(wchan);
554 mtx_assert(&sc->sc_lock, MA_OWNED);
555 THREAD_LOCK_ASSERT(td, MA_OWNED);
556
557 /*
558 * If we have a sleep queue, then we've already been woken up, so
559 * just return.
560 */
561 if (td->td_sleepqueue != NULL) {
562 mtx_unlock_spin(&sc->sc_lock);
563 thread_unlock(td);
564 return;
565 }
566
567 /*
568 * If TDF_TIMEOUT is set, then our sleep has been timed out
569 * already but we are still on the sleep queue, so dequeue the
570 * thread and return.
571 *
572 * Do the same if the real-time clock has been adjusted since this
573 * thread calculated its timeout based on that clock. This handles
574 * the following race:
575 * - The Ts thread needs to sleep until an absolute real-clock time.
576 * It copies the global rtc_generation into curthread->td_rtcgen,
577 * reads the RTC, and calculates a sleep duration based on that time.
578 * See umtxq_sleep() for an example.
579 * - The Tc thread adjusts the RTC, bumps rtc_generation, and wakes
580 * threads that are sleeping until an absolute real-clock time.
581 * See tc_setclock() and the POSIX specification of clock_settime().
582 * - Ts reaches the code below. It holds the sleepqueue chain lock,
583 * so Tc has finished waking, so this thread must test td_rtcgen.
584 * (The declaration of td_rtcgen refers to this comment.)
585 */
586 rtc_changed = td->td_rtcgen != 0 && td->td_rtcgen != rtc_generation;
587 if ((td->td_flags & TDF_TIMEOUT) || rtc_changed) {
588 if (rtc_changed) {
589 td->td_rtcgen = 0;
590 }
591 MPASS(TD_ON_SLEEPQ(td));
592 sq = sleepq_lookup(wchan);
593 sleepq_remove_thread(sq, td);
594 mtx_unlock_spin(&sc->sc_lock);
595 thread_unlock(td);
596 return;
597 }
598 #ifdef SLEEPQUEUE_PROFILING
599 if (prof_enabled)
600 sleepq_profile(td->td_wmesg);
601 #endif
602 MPASS(td->td_sleepqueue == NULL);
603 sched_sleep(td, pri);
604 thread_lock_set(td, &sc->sc_lock);
605 SDT_PROBE0(sched, , , sleep);
606 TD_SET_SLEEPING(td);
607 mi_switch(SW_VOL | SWT_SLEEPQ);
608 KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
609 CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)",
610 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
611 }
612
613 /*
614 * Check to see if we timed out.
615 */
616 static inline int
sleepq_check_timeout(void)617 sleepq_check_timeout(void)
618 {
619 struct thread *td;
620 int res;
621
622 res = 0;
623 td = curthread;
624 if (td->td_sleeptimo != 0) {
625 if (td->td_sleeptimo <= sbinuptime())
626 res = EWOULDBLOCK;
627 td->td_sleeptimo = 0;
628 }
629 return (res);
630 }
631
632 /*
633 * Check to see if we were awoken by a signal.
634 */
635 static inline int
sleepq_check_signals(void)636 sleepq_check_signals(void)
637 {
638 struct thread *td;
639
640 td = curthread;
641 KASSERT((td->td_flags & TDF_SINTR) == 0,
642 ("thread %p still in interruptible sleep?", td));
643
644 return (td->td_intrval);
645 }
646
647 /*
648 * Block the current thread until it is awakened from its sleep queue.
649 */
650 void
sleepq_wait(const void * wchan,int pri)651 sleepq_wait(const void *wchan, int pri)
652 {
653 struct thread *td;
654
655 td = curthread;
656 MPASS(!(td->td_flags & TDF_SINTR));
657 thread_lock(td);
658 sleepq_switch(wchan, pri);
659 }
660
661 /*
662 * Block the current thread until it is awakened from its sleep queue
663 * or it is interrupted by a signal.
664 */
665 int
sleepq_wait_sig(const void * wchan,int pri)666 sleepq_wait_sig(const void *wchan, int pri)
667 {
668 int rcatch;
669
670 rcatch = sleepq_catch_signals(wchan, pri);
671 if (rcatch)
672 return (rcatch);
673 return (sleepq_check_signals());
674 }
675
676 /*
677 * Block the current thread until it is awakened from its sleep queue
678 * or it times out while waiting.
679 */
680 int
sleepq_timedwait(const void * wchan,int pri)681 sleepq_timedwait(const void *wchan, int pri)
682 {
683 struct thread *td;
684
685 td = curthread;
686 MPASS(!(td->td_flags & TDF_SINTR));
687
688 thread_lock(td);
689 sleepq_switch(wchan, pri);
690
691 return (sleepq_check_timeout());
692 }
693
694 /*
695 * Block the current thread until it is awakened from its sleep queue,
696 * it is interrupted by a signal, or it times out waiting to be awakened.
697 */
698 int
sleepq_timedwait_sig(const void * wchan,int pri)699 sleepq_timedwait_sig(const void *wchan, int pri)
700 {
701 int rcatch, rvalt, rvals;
702
703 rcatch = sleepq_catch_signals(wchan, pri);
704 /* We must always call check_timeout() to clear sleeptimo. */
705 rvalt = sleepq_check_timeout();
706 rvals = sleepq_check_signals();
707 if (rcatch)
708 return (rcatch);
709 if (rvals)
710 return (rvals);
711 return (rvalt);
712 }
713
714 /*
715 * Returns the type of sleepqueue given a waitchannel.
716 */
717 int
sleepq_type(const void * wchan)718 sleepq_type(const void *wchan)
719 {
720 struct sleepqueue *sq;
721 int type;
722
723 MPASS(wchan != NULL);
724
725 sq = sleepq_lookup(wchan);
726 if (sq == NULL)
727 return (-1);
728 type = sq->sq_type;
729
730 return (type);
731 }
732
733 /*
734 * Removes a thread from a sleep queue and makes it
735 * runnable.
736 *
737 * Requires the sc chain locked on entry. If SRQ_HOLD is specified it will
738 * be locked on return. Returns without the thread lock held.
739 */
740 static int
sleepq_resume_thread(struct sleepqueue * sq,struct thread * td,int pri,int srqflags)741 sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri,
742 int srqflags)
743 {
744 struct sleepqueue_chain *sc;
745 bool drop;
746
747 MPASS(td != NULL);
748 MPASS(sq->sq_wchan != NULL);
749 MPASS(td->td_wchan == sq->sq_wchan);
750
751 sc = SC_LOOKUP(sq->sq_wchan);
752 mtx_assert(&sc->sc_lock, MA_OWNED);
753
754 /*
755 * Avoid recursing on the chain lock. If the locks don't match we
756 * need to acquire the thread lock which setrunnable will drop for
757 * us. In this case we need to drop the chain lock afterwards.
758 *
759 * There is no race that will make td_lock equal to sc_lock because
760 * we hold sc_lock.
761 */
762 drop = false;
763 if (!TD_IS_SLEEPING(td)) {
764 thread_lock(td);
765 drop = true;
766 } else
767 thread_lock_block_wait(td);
768
769 /* Remove thread from the sleepq. */
770 sleepq_remove_thread(sq, td);
771
772 /* If we're done with the sleepqueue release it. */
773 if ((srqflags & SRQ_HOLD) == 0 && drop)
774 mtx_unlock_spin(&sc->sc_lock);
775
776 /* Adjust priority if requested. */
777 MPASS(pri == 0 || (pri >= PRI_MIN && pri <= PRI_MAX));
778 if (pri != 0 && td->td_priority > pri &&
779 PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
780 sched_prio(td, pri);
781
782 /*
783 * Note that thread td might not be sleeping if it is running
784 * sleepq_catch_signals() on another CPU or is blocked on its
785 * proc lock to check signals. There's no need to mark the
786 * thread runnable in that case.
787 */
788 if (TD_IS_SLEEPING(td)) {
789 MPASS(!drop);
790 TD_CLR_SLEEPING(td);
791 return (setrunnable(td, srqflags));
792 }
793 MPASS(drop);
794 thread_unlock(td);
795
796 return (0);
797 }
798
799 static void
sleepq_remove_thread(struct sleepqueue * sq,struct thread * td)800 sleepq_remove_thread(struct sleepqueue *sq, struct thread *td)
801 {
802 struct sleepqueue_chain *sc __unused;
803
804 MPASS(td != NULL);
805 MPASS(sq->sq_wchan != NULL);
806 MPASS(td->td_wchan == sq->sq_wchan);
807 MPASS(td->td_sqqueue < NR_SLEEPQS && td->td_sqqueue >= 0);
808 THREAD_LOCK_ASSERT(td, MA_OWNED);
809 sc = SC_LOOKUP(sq->sq_wchan);
810 mtx_assert(&sc->sc_lock, MA_OWNED);
811
812 SDT_PROBE2(sched, , , wakeup, td, td->td_proc);
813
814 /* Remove the thread from the queue. */
815 sq->sq_blockedcnt[td->td_sqqueue]--;
816 TAILQ_REMOVE(&sq->sq_blocked[td->td_sqqueue], td, td_slpq);
817
818 /*
819 * Get a sleep queue for this thread. If this is the last waiter,
820 * use the queue itself and take it out of the chain, otherwise,
821 * remove a queue from the free list.
822 */
823 if (LIST_EMPTY(&sq->sq_free)) {
824 td->td_sleepqueue = sq;
825 #ifdef INVARIANTS
826 sq->sq_wchan = NULL;
827 #endif
828 #ifdef SLEEPQUEUE_PROFILING
829 sc->sc_depth--;
830 #endif
831 } else
832 td->td_sleepqueue = LIST_FIRST(&sq->sq_free);
833 LIST_REMOVE(td->td_sleepqueue, sq_hash);
834
835 if ((td->td_flags & TDF_TIMEOUT) == 0 && td->td_sleeptimo != 0 &&
836 td->td_lock == &sc->sc_lock) {
837 /*
838 * We ignore the situation where timeout subsystem was
839 * unable to stop our callout. The struct thread is
840 * type-stable, the callout will use the correct
841 * memory when running. The checks of the
842 * td_sleeptimo value in this function and in
843 * sleepq_timeout() ensure that the thread does not
844 * get spurious wakeups, even if the callout was reset
845 * or thread reused.
846 *
847 * We also cannot safely stop the callout if a scheduler
848 * lock is held since softclock_thread() forces a lock
849 * order of callout lock -> scheduler lock. The thread
850 * lock will be a scheduler lock only if the thread is
851 * preparing to go to sleep, so this is hopefully a rare
852 * scenario.
853 */
854 callout_stop(&td->td_slpcallout);
855 }
856
857 td->td_wmesg = NULL;
858 td->td_wchan = NULL;
859 td->td_flags &= ~(TDF_SINTR | TDF_TIMEOUT);
860
861 CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)",
862 (void *)td, (long)td->td_proc->p_pid, td->td_name);
863 }
864
865 void
sleepq_remove_nested(struct thread * td)866 sleepq_remove_nested(struct thread *td)
867 {
868 struct sleepqueue_chain *sc;
869 struct sleepqueue *sq;
870 const void *wchan;
871
872 MPASS(TD_ON_SLEEPQ(td));
873
874 wchan = td->td_wchan;
875 sc = SC_LOOKUP(wchan);
876 mtx_lock_spin(&sc->sc_lock);
877 sq = sleepq_lookup(wchan);
878 MPASS(sq != NULL);
879 thread_lock(td);
880 sleepq_remove_thread(sq, td);
881 mtx_unlock_spin(&sc->sc_lock);
882 /* Returns with the thread lock owned. */
883 }
884
885 #ifdef INVARIANTS
886 /*
887 * UMA zone item deallocator.
888 */
889 static void
sleepq_dtor(void * mem,int size,void * arg)890 sleepq_dtor(void *mem, int size, void *arg)
891 {
892 struct sleepqueue *sq;
893 int i;
894
895 sq = mem;
896 for (i = 0; i < NR_SLEEPQS; i++) {
897 MPASS(TAILQ_EMPTY(&sq->sq_blocked[i]));
898 MPASS(sq->sq_blockedcnt[i] == 0);
899 }
900 }
901 #endif
902
903 /*
904 * UMA zone item initializer.
905 */
906 static int
sleepq_init(void * mem,int size,int flags)907 sleepq_init(void *mem, int size, int flags)
908 {
909 struct sleepqueue *sq;
910 int i;
911
912 bzero(mem, size);
913 sq = mem;
914 for (i = 0; i < NR_SLEEPQS; i++) {
915 TAILQ_INIT(&sq->sq_blocked[i]);
916 sq->sq_blockedcnt[i] = 0;
917 }
918 LIST_INIT(&sq->sq_free);
919 return (0);
920 }
921
922 /*
923 * Find thread sleeping on a wait channel and resume it.
924 */
925 int
sleepq_signal(const void * wchan,int flags,int pri,int queue)926 sleepq_signal(const void *wchan, int flags, int pri, int queue)
927 {
928 struct sleepqueue_chain *sc;
929 struct sleepqueue *sq;
930 struct threadqueue *head;
931 struct thread *td, *besttd;
932 int wakeup_swapper;
933
934 CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags);
935 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
936 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
937 sq = sleepq_lookup(wchan);
938 if (sq == NULL) {
939 if (flags & SLEEPQ_DROP)
940 sleepq_release(wchan);
941 return (0);
942 }
943 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
944 ("%s: mismatch between sleep/wakeup and cv_*", __func__));
945
946 head = &sq->sq_blocked[queue];
947 if (flags & SLEEPQ_UNFAIR) {
948 /*
949 * Find the most recently sleeping thread, but try to
950 * skip threads still in process of context switch to
951 * avoid spinning on the thread lock.
952 */
953 sc = SC_LOOKUP(wchan);
954 besttd = TAILQ_LAST_FAST(head, thread, td_slpq);
955 while (besttd->td_lock != &sc->sc_lock) {
956 td = TAILQ_PREV_FAST(besttd, head, thread, td_slpq);
957 if (td == NULL)
958 break;
959 besttd = td;
960 }
961 } else {
962 /*
963 * Find the highest priority thread on the queue. If there
964 * is a tie, use the thread that first appears in the queue
965 * as it has been sleeping the longest since threads are
966 * always added to the tail of sleep queues.
967 */
968 besttd = td = TAILQ_FIRST(head);
969 while ((td = TAILQ_NEXT(td, td_slpq)) != NULL) {
970 if (td->td_priority < besttd->td_priority)
971 besttd = td;
972 }
973 }
974 MPASS(besttd != NULL);
975 wakeup_swapper = sleepq_resume_thread(sq, besttd, pri,
976 (flags & SLEEPQ_DROP) ? 0 : SRQ_HOLD);
977 return (wakeup_swapper);
978 }
979
980 static bool
match_any(struct thread * td __unused)981 match_any(struct thread *td __unused)
982 {
983
984 return (true);
985 }
986
987 /*
988 * Resume all threads sleeping on a specified wait channel.
989 */
990 int
sleepq_broadcast(const void * wchan,int flags,int pri,int queue)991 sleepq_broadcast(const void *wchan, int flags, int pri, int queue)
992 {
993 struct sleepqueue *sq;
994
995 CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags);
996 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
997 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
998 sq = sleepq_lookup(wchan);
999 if (sq == NULL)
1000 return (0);
1001 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
1002 ("%s: mismatch between sleep/wakeup and cv_*", __func__));
1003
1004 return (sleepq_remove_matching(sq, queue, match_any, pri));
1005 }
1006
1007 /*
1008 * Resume threads on the sleep queue that match the given predicate.
1009 */
1010 int
sleepq_remove_matching(struct sleepqueue * sq,int queue,bool (* matches)(struct thread *),int pri)1011 sleepq_remove_matching(struct sleepqueue *sq, int queue,
1012 bool (*matches)(struct thread *), int pri)
1013 {
1014 struct thread *td, *tdn;
1015 int wakeup_swapper;
1016
1017 /*
1018 * The last thread will be given ownership of sq and may
1019 * re-enqueue itself before sleepq_resume_thread() returns,
1020 * so we must cache the "next" queue item at the beginning
1021 * of the final iteration.
1022 */
1023 wakeup_swapper = 0;
1024 TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, tdn) {
1025 if (matches(td))
1026 wakeup_swapper |= sleepq_resume_thread(sq, td, pri,
1027 SRQ_HOLD);
1028 }
1029
1030 return (wakeup_swapper);
1031 }
1032
1033 /*
1034 * Time sleeping threads out. When the timeout expires, the thread is
1035 * removed from the sleep queue and made runnable if it is still asleep.
1036 */
1037 static void
sleepq_timeout(void * arg)1038 sleepq_timeout(void *arg)
1039 {
1040 struct sleepqueue_chain *sc __unused;
1041 struct sleepqueue *sq;
1042 struct thread *td;
1043 const void *wchan;
1044 int wakeup_swapper;
1045
1046 td = arg;
1047 CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)",
1048 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
1049
1050 thread_lock(td);
1051 if (td->td_sleeptimo == 0 ||
1052 td->td_sleeptimo > td->td_slpcallout.c_time) {
1053 /*
1054 * The thread does not want a timeout (yet).
1055 */
1056 } else if (TD_IS_SLEEPING(td) && TD_ON_SLEEPQ(td)) {
1057 /*
1058 * See if the thread is asleep and get the wait
1059 * channel if it is.
1060 */
1061 wchan = td->td_wchan;
1062 sc = SC_LOOKUP(wchan);
1063 THREAD_LOCKPTR_ASSERT(td, &sc->sc_lock);
1064 sq = sleepq_lookup(wchan);
1065 MPASS(sq != NULL);
1066 td->td_flags |= TDF_TIMEOUT;
1067 wakeup_swapper = sleepq_resume_thread(sq, td, 0, 0);
1068 if (wakeup_swapper)
1069 kick_proc0();
1070 return;
1071 } else if (TD_ON_SLEEPQ(td)) {
1072 /*
1073 * If the thread is on the SLEEPQ but isn't sleeping
1074 * yet, it can either be on another CPU in between
1075 * sleepq_add() and one of the sleepq_*wait*()
1076 * routines or it can be in sleepq_catch_signals().
1077 */
1078 td->td_flags |= TDF_TIMEOUT;
1079 }
1080 thread_unlock(td);
1081 }
1082
1083 /*
1084 * Resumes a specific thread from the sleep queue associated with a specific
1085 * wait channel if it is on that queue.
1086 */
1087 void
sleepq_remove(struct thread * td,const void * wchan)1088 sleepq_remove(struct thread *td, const void *wchan)
1089 {
1090 struct sleepqueue_chain *sc;
1091 struct sleepqueue *sq;
1092 int wakeup_swapper;
1093
1094 /*
1095 * Look up the sleep queue for this wait channel, then re-check
1096 * that the thread is asleep on that channel, if it is not, then
1097 * bail.
1098 */
1099 MPASS(wchan != NULL);
1100 sc = SC_LOOKUP(wchan);
1101 mtx_lock_spin(&sc->sc_lock);
1102 /*
1103 * We can not lock the thread here as it may be sleeping on a
1104 * different sleepq. However, holding the sleepq lock for this
1105 * wchan can guarantee that we do not miss a wakeup for this
1106 * channel. The asserts below will catch any false positives.
1107 */
1108 if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) {
1109 mtx_unlock_spin(&sc->sc_lock);
1110 return;
1111 }
1112
1113 /* Thread is asleep on sleep queue sq, so wake it up. */
1114 sq = sleepq_lookup(wchan);
1115 MPASS(sq != NULL);
1116 MPASS(td->td_wchan == wchan);
1117 wakeup_swapper = sleepq_resume_thread(sq, td, 0, 0);
1118 if (wakeup_swapper)
1119 kick_proc0();
1120 }
1121
1122 /*
1123 * Abort a thread as if an interrupt had occurred. Only abort
1124 * interruptible waits (unfortunately it isn't safe to abort others).
1125 *
1126 * Requires thread lock on entry, releases on return.
1127 */
1128 int
sleepq_abort(struct thread * td,int intrval)1129 sleepq_abort(struct thread *td, int intrval)
1130 {
1131 struct sleepqueue *sq;
1132 const void *wchan;
1133
1134 THREAD_LOCK_ASSERT(td, MA_OWNED);
1135 MPASS(TD_ON_SLEEPQ(td));
1136 MPASS(td->td_flags & TDF_SINTR);
1137 MPASS((intrval == 0 && (td->td_flags & TDF_SIGWAIT) != 0) ||
1138 intrval == EINTR || intrval == ERESTART);
1139
1140 /*
1141 * If the TDF_TIMEOUT flag is set, just leave. A
1142 * timeout is scheduled anyhow.
1143 */
1144 if (td->td_flags & TDF_TIMEOUT) {
1145 thread_unlock(td);
1146 return (0);
1147 }
1148
1149 CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)",
1150 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
1151 td->td_intrval = intrval;
1152
1153 /*
1154 * If the thread has not slept yet it will find the signal in
1155 * sleepq_catch_signals() and call sleepq_resume_thread. Otherwise
1156 * we have to do it here.
1157 */
1158 if (!TD_IS_SLEEPING(td)) {
1159 thread_unlock(td);
1160 return (0);
1161 }
1162 wchan = td->td_wchan;
1163 MPASS(wchan != NULL);
1164 sq = sleepq_lookup(wchan);
1165 MPASS(sq != NULL);
1166
1167 /* Thread is asleep on sleep queue sq, so wake it up. */
1168 return (sleepq_resume_thread(sq, td, 0, 0));
1169 }
1170
1171 void
sleepq_chains_remove_matching(bool (* matches)(struct thread *))1172 sleepq_chains_remove_matching(bool (*matches)(struct thread *))
1173 {
1174 struct sleepqueue_chain *sc;
1175 struct sleepqueue *sq, *sq1;
1176 int i, wakeup_swapper;
1177
1178 wakeup_swapper = 0;
1179 for (sc = &sleepq_chains[0]; sc < sleepq_chains + SC_TABLESIZE; ++sc) {
1180 if (LIST_EMPTY(&sc->sc_queues)) {
1181 continue;
1182 }
1183 mtx_lock_spin(&sc->sc_lock);
1184 LIST_FOREACH_SAFE(sq, &sc->sc_queues, sq_hash, sq1) {
1185 for (i = 0; i < NR_SLEEPQS; ++i) {
1186 wakeup_swapper |= sleepq_remove_matching(sq, i,
1187 matches, 0);
1188 }
1189 }
1190 mtx_unlock_spin(&sc->sc_lock);
1191 }
1192 if (wakeup_swapper) {
1193 kick_proc0();
1194 }
1195 }
1196
1197 /*
1198 * Prints the stacks of all threads presently sleeping on wchan/queue to
1199 * the sbuf sb. Sets count_stacks_printed to the number of stacks actually
1200 * printed. Typically, this will equal the number of threads sleeping on the
1201 * queue, but may be less if sb overflowed before all stacks were printed.
1202 */
1203 #ifdef STACK
1204 int
sleepq_sbuf_print_stacks(struct sbuf * sb,const void * wchan,int queue,int * count_stacks_printed)1205 sleepq_sbuf_print_stacks(struct sbuf *sb, const void *wchan, int queue,
1206 int *count_stacks_printed)
1207 {
1208 struct thread *td, *td_next;
1209 struct sleepqueue *sq;
1210 struct stack **st;
1211 struct sbuf **td_infos;
1212 int i, stack_idx, error, stacks_to_allocate;
1213 bool finished;
1214
1215 error = 0;
1216 finished = false;
1217
1218 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
1219 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
1220
1221 stacks_to_allocate = 10;
1222 for (i = 0; i < 3 && !finished ; i++) {
1223 /* We cannot malloc while holding the queue's spinlock, so
1224 * we do our mallocs now, and hope it is enough. If it
1225 * isn't, we will free these, drop the lock, malloc more,
1226 * and try again, up to a point. After that point we will
1227 * give up and report ENOMEM. We also cannot write to sb
1228 * during this time since the client may have set the
1229 * SBUF_AUTOEXTEND flag on their sbuf, which could cause a
1230 * malloc as we print to it. So we defer actually printing
1231 * to sb until after we drop the spinlock.
1232 */
1233
1234 /* Where we will store the stacks. */
1235 st = malloc(sizeof(struct stack *) * stacks_to_allocate,
1236 M_TEMP, M_WAITOK);
1237 for (stack_idx = 0; stack_idx < stacks_to_allocate;
1238 stack_idx++)
1239 st[stack_idx] = stack_create(M_WAITOK);
1240
1241 /* Where we will store the td name, tid, etc. */
1242 td_infos = malloc(sizeof(struct sbuf *) * stacks_to_allocate,
1243 M_TEMP, M_WAITOK);
1244 for (stack_idx = 0; stack_idx < stacks_to_allocate;
1245 stack_idx++)
1246 td_infos[stack_idx] = sbuf_new(NULL, NULL,
1247 MAXCOMLEN + sizeof(struct thread *) * 2 + 40,
1248 SBUF_FIXEDLEN);
1249
1250 sleepq_lock(wchan);
1251 sq = sleepq_lookup(wchan);
1252 if (sq == NULL) {
1253 /* This sleepq does not exist; exit and return ENOENT. */
1254 error = ENOENT;
1255 finished = true;
1256 sleepq_release(wchan);
1257 goto loop_end;
1258 }
1259
1260 stack_idx = 0;
1261 /* Save thread info */
1262 TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq,
1263 td_next) {
1264 if (stack_idx >= stacks_to_allocate)
1265 goto loop_end;
1266
1267 /* Note the td_lock is equal to the sleepq_lock here. */
1268 (void)stack_save_td(st[stack_idx], td);
1269
1270 sbuf_printf(td_infos[stack_idx], "%d: %s %p",
1271 td->td_tid, td->td_name, td);
1272
1273 ++stack_idx;
1274 }
1275
1276 finished = true;
1277 sleepq_release(wchan);
1278
1279 /* Print the stacks */
1280 for (i = 0; i < stack_idx; i++) {
1281 sbuf_finish(td_infos[i]);
1282 sbuf_printf(sb, "--- thread %s: ---\n", sbuf_data(td_infos[i]));
1283 stack_sbuf_print(sb, st[i]);
1284 sbuf_putc(sb, '\n');
1285
1286 error = sbuf_error(sb);
1287 if (error == 0)
1288 *count_stacks_printed = stack_idx;
1289 }
1290
1291 loop_end:
1292 if (!finished)
1293 sleepq_release(wchan);
1294 for (stack_idx = 0; stack_idx < stacks_to_allocate;
1295 stack_idx++)
1296 stack_destroy(st[stack_idx]);
1297 for (stack_idx = 0; stack_idx < stacks_to_allocate;
1298 stack_idx++)
1299 sbuf_delete(td_infos[stack_idx]);
1300 free(st, M_TEMP);
1301 free(td_infos, M_TEMP);
1302 stacks_to_allocate *= 10;
1303 }
1304
1305 if (!finished && error == 0)
1306 error = ENOMEM;
1307
1308 return (error);
1309 }
1310 #endif
1311
1312 #ifdef SLEEPQUEUE_PROFILING
1313 #define SLEEPQ_PROF_LOCATIONS 1024
1314 #define SLEEPQ_SBUFSIZE 512
1315 struct sleepq_prof {
1316 LIST_ENTRY(sleepq_prof) sp_link;
1317 const char *sp_wmesg;
1318 long sp_count;
1319 };
1320
1321 LIST_HEAD(sqphead, sleepq_prof);
1322
1323 struct sqphead sleepq_prof_free;
1324 struct sqphead sleepq_hash[SC_TABLESIZE];
1325 static struct sleepq_prof sleepq_profent[SLEEPQ_PROF_LOCATIONS];
1326 static struct mtx sleepq_prof_lock;
1327 MTX_SYSINIT(sleepq_prof_lock, &sleepq_prof_lock, "sleepq_prof", MTX_SPIN);
1328
1329 static void
sleepq_profile(const char * wmesg)1330 sleepq_profile(const char *wmesg)
1331 {
1332 struct sleepq_prof *sp;
1333
1334 mtx_lock_spin(&sleepq_prof_lock);
1335 if (prof_enabled == 0)
1336 goto unlock;
1337 LIST_FOREACH(sp, &sleepq_hash[SC_HASH(wmesg)], sp_link)
1338 if (sp->sp_wmesg == wmesg)
1339 goto done;
1340 sp = LIST_FIRST(&sleepq_prof_free);
1341 if (sp == NULL)
1342 goto unlock;
1343 sp->sp_wmesg = wmesg;
1344 LIST_REMOVE(sp, sp_link);
1345 LIST_INSERT_HEAD(&sleepq_hash[SC_HASH(wmesg)], sp, sp_link);
1346 done:
1347 sp->sp_count++;
1348 unlock:
1349 mtx_unlock_spin(&sleepq_prof_lock);
1350 return;
1351 }
1352
1353 static void
sleepq_prof_reset(void)1354 sleepq_prof_reset(void)
1355 {
1356 struct sleepq_prof *sp;
1357 int enabled;
1358 int i;
1359
1360 mtx_lock_spin(&sleepq_prof_lock);
1361 enabled = prof_enabled;
1362 prof_enabled = 0;
1363 for (i = 0; i < SC_TABLESIZE; i++)
1364 LIST_INIT(&sleepq_hash[i]);
1365 LIST_INIT(&sleepq_prof_free);
1366 for (i = 0; i < SLEEPQ_PROF_LOCATIONS; i++) {
1367 sp = &sleepq_profent[i];
1368 sp->sp_wmesg = NULL;
1369 sp->sp_count = 0;
1370 LIST_INSERT_HEAD(&sleepq_prof_free, sp, sp_link);
1371 }
1372 prof_enabled = enabled;
1373 mtx_unlock_spin(&sleepq_prof_lock);
1374 }
1375
1376 static int
enable_sleepq_prof(SYSCTL_HANDLER_ARGS)1377 enable_sleepq_prof(SYSCTL_HANDLER_ARGS)
1378 {
1379 int error, v;
1380
1381 v = prof_enabled;
1382 error = sysctl_handle_int(oidp, &v, v, req);
1383 if (error)
1384 return (error);
1385 if (req->newptr == NULL)
1386 return (error);
1387 if (v == prof_enabled)
1388 return (0);
1389 if (v == 1)
1390 sleepq_prof_reset();
1391 mtx_lock_spin(&sleepq_prof_lock);
1392 prof_enabled = !!v;
1393 mtx_unlock_spin(&sleepq_prof_lock);
1394
1395 return (0);
1396 }
1397
1398 static int
reset_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)1399 reset_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1400 {
1401 int error, v;
1402
1403 v = 0;
1404 error = sysctl_handle_int(oidp, &v, 0, req);
1405 if (error)
1406 return (error);
1407 if (req->newptr == NULL)
1408 return (error);
1409 if (v == 0)
1410 return (0);
1411 sleepq_prof_reset();
1412
1413 return (0);
1414 }
1415
1416 static int
dump_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)1417 dump_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1418 {
1419 struct sleepq_prof *sp;
1420 struct sbuf *sb;
1421 int enabled;
1422 int error;
1423 int i;
1424
1425 error = sysctl_wire_old_buffer(req, 0);
1426 if (error != 0)
1427 return (error);
1428 sb = sbuf_new_for_sysctl(NULL, NULL, SLEEPQ_SBUFSIZE, req);
1429 sbuf_cat(sb, "\nwmesg\tcount\n");
1430 enabled = prof_enabled;
1431 mtx_lock_spin(&sleepq_prof_lock);
1432 prof_enabled = 0;
1433 mtx_unlock_spin(&sleepq_prof_lock);
1434 for (i = 0; i < SC_TABLESIZE; i++) {
1435 LIST_FOREACH(sp, &sleepq_hash[i], sp_link) {
1436 sbuf_printf(sb, "%s\t%ld\n",
1437 sp->sp_wmesg, sp->sp_count);
1438 }
1439 }
1440 mtx_lock_spin(&sleepq_prof_lock);
1441 prof_enabled = enabled;
1442 mtx_unlock_spin(&sleepq_prof_lock);
1443
1444 error = sbuf_finish(sb);
1445 sbuf_delete(sb);
1446 return (error);
1447 }
1448
1449 SYSCTL_PROC(_debug_sleepq, OID_AUTO, stats,
1450 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, NULL, 0,
1451 dump_sleepq_prof_stats, "A",
1452 "Sleepqueue profiling statistics");
1453 SYSCTL_PROC(_debug_sleepq, OID_AUTO, reset,
1454 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0,
1455 reset_sleepq_prof_stats, "I",
1456 "Reset sleepqueue profiling statistics");
1457 SYSCTL_PROC(_debug_sleepq, OID_AUTO, enable,
1458 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0,
1459 enable_sleepq_prof, "I",
1460 "Enable sleepqueue profiling");
1461 #endif
1462
1463 #ifdef DDB
DB_SHOW_COMMAND(sleepq,db_show_sleepqueue)1464 DB_SHOW_COMMAND(sleepq, db_show_sleepqueue)
1465 {
1466 struct sleepqueue_chain *sc;
1467 struct sleepqueue *sq;
1468 #ifdef INVARIANTS
1469 struct lock_object *lock;
1470 #endif
1471 struct thread *td;
1472 void *wchan;
1473 int i;
1474
1475 if (!have_addr)
1476 return;
1477
1478 /*
1479 * First, see if there is an active sleep queue for the wait channel
1480 * indicated by the address.
1481 */
1482 wchan = (void *)addr;
1483 sc = SC_LOOKUP(wchan);
1484 LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
1485 if (sq->sq_wchan == wchan)
1486 goto found;
1487
1488 /*
1489 * Second, see if there is an active sleep queue at the address
1490 * indicated.
1491 */
1492 for (i = 0; i < SC_TABLESIZE; i++)
1493 LIST_FOREACH(sq, &sleepq_chains[i].sc_queues, sq_hash) {
1494 if (sq == (struct sleepqueue *)addr)
1495 goto found;
1496 }
1497
1498 db_printf("Unable to locate a sleep queue via %p\n", (void *)addr);
1499 return;
1500 found:
1501 db_printf("Wait channel: %p\n", sq->sq_wchan);
1502 db_printf("Queue type: %d\n", sq->sq_type);
1503 #ifdef INVARIANTS
1504 if (sq->sq_lock) {
1505 lock = sq->sq_lock;
1506 db_printf("Associated Interlock: %p - (%s) %s\n", lock,
1507 LOCK_CLASS(lock)->lc_name, lock->lo_name);
1508 }
1509 #endif
1510 db_printf("Blocked threads:\n");
1511 for (i = 0; i < NR_SLEEPQS; i++) {
1512 db_printf("\nQueue[%d]:\n", i);
1513 if (TAILQ_EMPTY(&sq->sq_blocked[i]))
1514 db_printf("\tempty\n");
1515 else
1516 TAILQ_FOREACH(td, &sq->sq_blocked[i],
1517 td_slpq) {
1518 db_printf("\t%p (tid %d, pid %d, \"%s\")\n", td,
1519 td->td_tid, td->td_proc->p_pid,
1520 td->td_name);
1521 }
1522 db_printf("(expected: %u)\n", sq->sq_blockedcnt[i]);
1523 }
1524 }
1525
1526 /* Alias 'show sleepqueue' to 'show sleepq'. */
1527 DB_SHOW_ALIAS(sleepqueue, db_show_sleepqueue);
1528 #endif
1529