1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 /* 29 * Implementation of sleep queues used to hold queue of threads blocked on 30 * a wait channel. Sleep queues are different from turnstiles in that wait 31 * channels are not owned by anyone, so there is no priority propagation. 32 * Sleep queues can also provide a timeout and can also be interrupted by 33 * signals. That said, there are several similarities between the turnstile 34 * and sleep queue implementations. (Note: turnstiles were implemented 35 * first.) For example, both use a hash table of the same size where each 36 * bucket is referred to as a "chain" that contains both a spin lock and 37 * a linked list of queues. An individual queue is located by using a hash 38 * to pick a chain, locking the chain, and then walking the chain searching 39 * for the queue. This means that a wait channel object does not need to 40 * embed its queue head just as locks do not embed their turnstile queue 41 * head. Threads also carry around a sleep queue that they lend to the 42 * wait channel when blocking. Just as in turnstiles, the queue includes 43 * a free list of the sleep queues of other threads blocked on the same 44 * wait channel in the case of multiple waiters. 45 * 46 * Some additional functionality provided by sleep queues include the 47 * ability to set a timeout. The timeout is managed using a per-thread 48 * callout that resumes a thread if it is asleep. A thread may also 49 * catch signals while it is asleep (aka an interruptible sleep). The 50 * signal code uses sleepq_abort() to interrupt a sleeping thread. Finally, 51 * sleep queues also provide some extra assertions. One is not allowed to 52 * mix the sleep/wakeup and cv APIs for a given wait channel. Also, one 53 * must consistently use the same lock to synchronize with a wait channel, 54 * though this check is currently only a warning for sleep/wakeup due to 55 * pre-existing abuse of that API. The same lock must also be held when 56 * awakening threads, though that is currently only enforced for condition 57 * variables. 58 */ 59 60 #include <sys/cdefs.h> 61 __FBSDID("$FreeBSD$"); 62 63 #include "opt_sleepqueue_profiling.h" 64 #include "opt_ddb.h" 65 #include "opt_sched.h" 66 #include "opt_stack.h" 67 68 #include <sys/param.h> 69 #include <sys/systm.h> 70 #include <sys/lock.h> 71 #include <sys/kernel.h> 72 #include <sys/ktr.h> 73 #include <sys/mutex.h> 74 #include <sys/proc.h> 75 #include <sys/sbuf.h> 76 #include <sys/sched.h> 77 #include <sys/sdt.h> 78 #include <sys/signalvar.h> 79 #include <sys/sleepqueue.h> 80 #include <sys/stack.h> 81 #include <sys/sysctl.h> 82 #include <sys/time.h> 83 #ifdef EPOCH_TRACE 84 #include <sys/epoch.h> 85 #endif 86 87 #include <machine/atomic.h> 88 89 #include <vm/uma.h> 90 91 #ifdef DDB 92 #include <ddb/ddb.h> 93 #endif 94 95 96 /* 97 * Constants for the hash table of sleep queue chains. 98 * SC_TABLESIZE must be a power of two for SC_MASK to work properly. 99 */ 100 #ifndef SC_TABLESIZE 101 #define SC_TABLESIZE 256 102 #endif 103 CTASSERT(powerof2(SC_TABLESIZE)); 104 #define SC_MASK (SC_TABLESIZE - 1) 105 #define SC_SHIFT 8 106 #define SC_HASH(wc) ((((uintptr_t)(wc) >> SC_SHIFT) ^ (uintptr_t)(wc)) & \ 107 SC_MASK) 108 #define SC_LOOKUP(wc) &sleepq_chains[SC_HASH(wc)] 109 #define NR_SLEEPQS 2 110 /* 111 * There are two different lists of sleep queues. Both lists are connected 112 * via the sq_hash entries. The first list is the sleep queue chain list 113 * that a sleep queue is on when it is attached to a wait channel. The 114 * second list is the free list hung off of a sleep queue that is attached 115 * to a wait channel. 116 * 117 * Each sleep queue also contains the wait channel it is attached to, the 118 * list of threads blocked on that wait channel, flags specific to the 119 * wait channel, and the lock used to synchronize with a wait channel. 120 * The flags are used to catch mismatches between the various consumers 121 * of the sleep queue API (e.g. sleep/wakeup and condition variables). 122 * The lock pointer is only used when invariants are enabled for various 123 * debugging checks. 124 * 125 * Locking key: 126 * c - sleep queue chain lock 127 */ 128 struct sleepqueue { 129 struct threadqueue sq_blocked[NR_SLEEPQS]; /* (c) Blocked threads. */ 130 u_int sq_blockedcnt[NR_SLEEPQS]; /* (c) N. of blocked threads. */ 131 LIST_ENTRY(sleepqueue) sq_hash; /* (c) Chain and free list. */ 132 LIST_HEAD(, sleepqueue) sq_free; /* (c) Free queues. */ 133 void *sq_wchan; /* (c) Wait channel. */ 134 int sq_type; /* (c) Queue type. */ 135 #ifdef INVARIANTS 136 struct lock_object *sq_lock; /* (c) Associated lock. */ 137 #endif 138 }; 139 140 struct sleepqueue_chain { 141 LIST_HEAD(, sleepqueue) sc_queues; /* List of sleep queues. */ 142 struct mtx sc_lock; /* Spin lock for this chain. */ 143 #ifdef SLEEPQUEUE_PROFILING 144 u_int sc_depth; /* Length of sc_queues. */ 145 u_int sc_max_depth; /* Max length of sc_queues. */ 146 #endif 147 } __aligned(CACHE_LINE_SIZE); 148 149 #ifdef SLEEPQUEUE_PROFILING 150 u_int sleepq_max_depth; 151 static SYSCTL_NODE(_debug, OID_AUTO, sleepq, CTLFLAG_RD, 0, "sleepq profiling"); 152 static SYSCTL_NODE(_debug_sleepq, OID_AUTO, chains, CTLFLAG_RD, 0, 153 "sleepq chain stats"); 154 SYSCTL_UINT(_debug_sleepq, OID_AUTO, max_depth, CTLFLAG_RD, &sleepq_max_depth, 155 0, "maxmimum depth achieved of a single chain"); 156 157 static void sleepq_profile(const char *wmesg); 158 static int prof_enabled; 159 #endif 160 static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE]; 161 static uma_zone_t sleepq_zone; 162 163 /* 164 * Prototypes for non-exported routines. 165 */ 166 static int sleepq_catch_signals(void *wchan, int pri); 167 static int sleepq_check_signals(void); 168 static int sleepq_check_timeout(void); 169 #ifdef INVARIANTS 170 static void sleepq_dtor(void *mem, int size, void *arg); 171 #endif 172 static int sleepq_init(void *mem, int size, int flags); 173 static int sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, 174 int pri, int srqflags); 175 static void sleepq_remove_thread(struct sleepqueue *sq, struct thread *td); 176 static void sleepq_switch(void *wchan, int pri); 177 static void sleepq_timeout(void *arg); 178 179 SDT_PROBE_DECLARE(sched, , , sleep); 180 SDT_PROBE_DECLARE(sched, , , wakeup); 181 182 /* 183 * Initialize SLEEPQUEUE_PROFILING specific sysctl nodes. 184 * Note that it must happen after sleepinit() has been fully executed, so 185 * it must happen after SI_SUB_KMEM SYSINIT() subsystem setup. 186 */ 187 #ifdef SLEEPQUEUE_PROFILING 188 static void 189 init_sleepqueue_profiling(void) 190 { 191 char chain_name[10]; 192 struct sysctl_oid *chain_oid; 193 u_int i; 194 195 for (i = 0; i < SC_TABLESIZE; i++) { 196 snprintf(chain_name, sizeof(chain_name), "%u", i); 197 chain_oid = SYSCTL_ADD_NODE(NULL, 198 SYSCTL_STATIC_CHILDREN(_debug_sleepq_chains), OID_AUTO, 199 chain_name, CTLFLAG_RD, NULL, "sleepq chain stats"); 200 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO, 201 "depth", CTLFLAG_RD, &sleepq_chains[i].sc_depth, 0, NULL); 202 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO, 203 "max_depth", CTLFLAG_RD, &sleepq_chains[i].sc_max_depth, 0, 204 NULL); 205 } 206 } 207 208 SYSINIT(sleepqueue_profiling, SI_SUB_LOCK, SI_ORDER_ANY, 209 init_sleepqueue_profiling, NULL); 210 #endif 211 212 /* 213 * Early initialization of sleep queues that is called from the sleepinit() 214 * SYSINIT. 215 */ 216 void 217 init_sleepqueues(void) 218 { 219 int i; 220 221 for (i = 0; i < SC_TABLESIZE; i++) { 222 LIST_INIT(&sleepq_chains[i].sc_queues); 223 mtx_init(&sleepq_chains[i].sc_lock, "sleepq chain", NULL, 224 MTX_SPIN); 225 } 226 sleepq_zone = uma_zcreate("SLEEPQUEUE", sizeof(struct sleepqueue), 227 #ifdef INVARIANTS 228 NULL, sleepq_dtor, sleepq_init, NULL, UMA_ALIGN_CACHE, 0); 229 #else 230 NULL, NULL, sleepq_init, NULL, UMA_ALIGN_CACHE, 0); 231 #endif 232 233 thread0.td_sleepqueue = sleepq_alloc(); 234 } 235 236 /* 237 * Get a sleep queue for a new thread. 238 */ 239 struct sleepqueue * 240 sleepq_alloc(void) 241 { 242 243 return (uma_zalloc(sleepq_zone, M_WAITOK)); 244 } 245 246 /* 247 * Free a sleep queue when a thread is destroyed. 248 */ 249 void 250 sleepq_free(struct sleepqueue *sq) 251 { 252 253 uma_zfree(sleepq_zone, sq); 254 } 255 256 /* 257 * Lock the sleep queue chain associated with the specified wait channel. 258 */ 259 void 260 sleepq_lock(void *wchan) 261 { 262 struct sleepqueue_chain *sc; 263 264 sc = SC_LOOKUP(wchan); 265 mtx_lock_spin(&sc->sc_lock); 266 } 267 268 /* 269 * Look up the sleep queue associated with a given wait channel in the hash 270 * table locking the associated sleep queue chain. If no queue is found in 271 * the table, NULL is returned. 272 */ 273 struct sleepqueue * 274 sleepq_lookup(void *wchan) 275 { 276 struct sleepqueue_chain *sc; 277 struct sleepqueue *sq; 278 279 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 280 sc = SC_LOOKUP(wchan); 281 mtx_assert(&sc->sc_lock, MA_OWNED); 282 LIST_FOREACH(sq, &sc->sc_queues, sq_hash) 283 if (sq->sq_wchan == wchan) 284 return (sq); 285 return (NULL); 286 } 287 288 /* 289 * Unlock the sleep queue chain associated with a given wait channel. 290 */ 291 void 292 sleepq_release(void *wchan) 293 { 294 struct sleepqueue_chain *sc; 295 296 sc = SC_LOOKUP(wchan); 297 mtx_unlock_spin(&sc->sc_lock); 298 } 299 300 /* 301 * Places the current thread on the sleep queue for the specified wait 302 * channel. If INVARIANTS is enabled, then it associates the passed in 303 * lock with the sleepq to make sure it is held when that sleep queue is 304 * woken up. 305 */ 306 void 307 sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg, int flags, 308 int queue) 309 { 310 struct sleepqueue_chain *sc; 311 struct sleepqueue *sq; 312 struct thread *td; 313 314 td = curthread; 315 sc = SC_LOOKUP(wchan); 316 mtx_assert(&sc->sc_lock, MA_OWNED); 317 MPASS(td->td_sleepqueue != NULL); 318 MPASS(wchan != NULL); 319 MPASS((queue >= 0) && (queue < NR_SLEEPQS)); 320 321 /* If this thread is not allowed to sleep, die a horrible death. */ 322 if (__predict_false(!THREAD_CAN_SLEEP())) { 323 #ifdef EPOCH_TRACE 324 epoch_trace_list(curthread); 325 #endif 326 KASSERT(1, 327 ("%s: td %p to sleep on wchan %p with sleeping prohibited", 328 __func__, td, wchan)); 329 } 330 331 /* Look up the sleep queue associated with the wait channel 'wchan'. */ 332 sq = sleepq_lookup(wchan); 333 334 /* 335 * If the wait channel does not already have a sleep queue, use 336 * this thread's sleep queue. Otherwise, insert the current thread 337 * into the sleep queue already in use by this wait channel. 338 */ 339 if (sq == NULL) { 340 #ifdef INVARIANTS 341 int i; 342 343 sq = td->td_sleepqueue; 344 for (i = 0; i < NR_SLEEPQS; i++) { 345 KASSERT(TAILQ_EMPTY(&sq->sq_blocked[i]), 346 ("thread's sleep queue %d is not empty", i)); 347 KASSERT(sq->sq_blockedcnt[i] == 0, 348 ("thread's sleep queue %d count mismatches", i)); 349 } 350 KASSERT(LIST_EMPTY(&sq->sq_free), 351 ("thread's sleep queue has a non-empty free list")); 352 KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer")); 353 sq->sq_lock = lock; 354 #endif 355 #ifdef SLEEPQUEUE_PROFILING 356 sc->sc_depth++; 357 if (sc->sc_depth > sc->sc_max_depth) { 358 sc->sc_max_depth = sc->sc_depth; 359 if (sc->sc_max_depth > sleepq_max_depth) 360 sleepq_max_depth = sc->sc_max_depth; 361 } 362 #endif 363 sq = td->td_sleepqueue; 364 LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash); 365 sq->sq_wchan = wchan; 366 sq->sq_type = flags & SLEEPQ_TYPE; 367 } else { 368 MPASS(wchan == sq->sq_wchan); 369 MPASS(lock == sq->sq_lock); 370 MPASS((flags & SLEEPQ_TYPE) == sq->sq_type); 371 LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash); 372 } 373 thread_lock(td); 374 TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq); 375 sq->sq_blockedcnt[queue]++; 376 td->td_sleepqueue = NULL; 377 td->td_sqqueue = queue; 378 td->td_wchan = wchan; 379 td->td_wmesg = wmesg; 380 if (flags & SLEEPQ_INTERRUPTIBLE) { 381 td->td_flags |= TDF_SINTR; 382 td->td_flags &= ~TDF_SLEEPABORT; 383 } 384 thread_unlock(td); 385 } 386 387 /* 388 * Sets a timeout that will remove the current thread from the specified 389 * sleep queue after timo ticks if the thread has not already been awakened. 390 */ 391 void 392 sleepq_set_timeout_sbt(void *wchan, sbintime_t sbt, sbintime_t pr, 393 int flags) 394 { 395 struct sleepqueue_chain *sc __unused; 396 struct thread *td; 397 sbintime_t pr1; 398 399 td = curthread; 400 sc = SC_LOOKUP(wchan); 401 mtx_assert(&sc->sc_lock, MA_OWNED); 402 MPASS(TD_ON_SLEEPQ(td)); 403 MPASS(td->td_sleepqueue == NULL); 404 MPASS(wchan != NULL); 405 if (cold && td == &thread0) 406 panic("timed sleep before timers are working"); 407 KASSERT(td->td_sleeptimo == 0, ("td %d %p td_sleeptimo %jx", 408 td->td_tid, td, (uintmax_t)td->td_sleeptimo)); 409 thread_lock(td); 410 callout_when(sbt, pr, flags, &td->td_sleeptimo, &pr1); 411 thread_unlock(td); 412 callout_reset_sbt_on(&td->td_slpcallout, td->td_sleeptimo, pr1, 413 sleepq_timeout, td, PCPU_GET(cpuid), flags | C_PRECALC | 414 C_DIRECT_EXEC); 415 } 416 417 /* 418 * Return the number of actual sleepers for the specified queue. 419 */ 420 u_int 421 sleepq_sleepcnt(void *wchan, int queue) 422 { 423 struct sleepqueue *sq; 424 425 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 426 MPASS((queue >= 0) && (queue < NR_SLEEPQS)); 427 sq = sleepq_lookup(wchan); 428 if (sq == NULL) 429 return (0); 430 return (sq->sq_blockedcnt[queue]); 431 } 432 433 /* 434 * Marks the pending sleep of the current thread as interruptible and 435 * makes an initial check for pending signals before putting a thread 436 * to sleep. Enters and exits with the thread lock held. Thread lock 437 * may have transitioned from the sleepq lock to a run lock. 438 */ 439 static int 440 sleepq_catch_signals(void *wchan, int pri) 441 { 442 struct sleepqueue_chain *sc; 443 struct sleepqueue *sq; 444 struct thread *td; 445 struct proc *p; 446 struct sigacts *ps; 447 int sig, ret; 448 449 ret = 0; 450 td = curthread; 451 p = curproc; 452 sc = SC_LOOKUP(wchan); 453 mtx_assert(&sc->sc_lock, MA_OWNED); 454 MPASS(wchan != NULL); 455 if ((td->td_pflags & TDP_WAKEUP) != 0) { 456 td->td_pflags &= ~TDP_WAKEUP; 457 ret = EINTR; 458 thread_lock(td); 459 goto out; 460 } 461 462 /* 463 * See if there are any pending signals or suspension requests for this 464 * thread. If not, we can switch immediately. 465 */ 466 thread_lock(td); 467 if ((td->td_flags & (TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK)) != 0) { 468 thread_unlock(td); 469 mtx_unlock_spin(&sc->sc_lock); 470 CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)", 471 (void *)td, (long)p->p_pid, td->td_name); 472 PROC_LOCK(p); 473 /* 474 * Check for suspension first. Checking for signals and then 475 * suspending could result in a missed signal, since a signal 476 * can be delivered while this thread is suspended. 477 */ 478 if ((td->td_flags & TDF_NEEDSUSPCHK) != 0) { 479 ret = thread_suspend_check(1); 480 MPASS(ret == 0 || ret == EINTR || ret == ERESTART); 481 if (ret != 0) { 482 PROC_UNLOCK(p); 483 mtx_lock_spin(&sc->sc_lock); 484 thread_lock(td); 485 goto out; 486 } 487 } 488 if ((td->td_flags & TDF_NEEDSIGCHK) != 0) { 489 ps = p->p_sigacts; 490 mtx_lock(&ps->ps_mtx); 491 sig = cursig(td); 492 if (sig == -1) { 493 mtx_unlock(&ps->ps_mtx); 494 KASSERT((td->td_flags & TDF_SBDRY) != 0, 495 ("lost TDF_SBDRY")); 496 KASSERT(TD_SBDRY_INTR(td), 497 ("lost TDF_SERESTART of TDF_SEINTR")); 498 KASSERT((td->td_flags & 499 (TDF_SEINTR | TDF_SERESTART)) != 500 (TDF_SEINTR | TDF_SERESTART), 501 ("both TDF_SEINTR and TDF_SERESTART")); 502 ret = TD_SBDRY_ERRNO(td); 503 } else if (sig != 0) { 504 ret = SIGISMEMBER(ps->ps_sigintr, sig) ? 505 EINTR : ERESTART; 506 mtx_unlock(&ps->ps_mtx); 507 } else { 508 mtx_unlock(&ps->ps_mtx); 509 } 510 511 /* 512 * Do not go into sleep if this thread was the 513 * ptrace(2) attach leader. cursig() consumed 514 * SIGSTOP from PT_ATTACH, but we usually act 515 * on the signal by interrupting sleep, and 516 * should do that here as well. 517 */ 518 if ((td->td_dbgflags & TDB_FSTP) != 0) { 519 if (ret == 0) 520 ret = EINTR; 521 td->td_dbgflags &= ~TDB_FSTP; 522 } 523 } 524 /* 525 * Lock the per-process spinlock prior to dropping the PROC_LOCK 526 * to avoid a signal delivery race. PROC_LOCK, PROC_SLOCK, and 527 * thread_lock() are currently held in tdsendsignal(). 528 */ 529 PROC_SLOCK(p); 530 mtx_lock_spin(&sc->sc_lock); 531 PROC_UNLOCK(p); 532 thread_lock(td); 533 PROC_SUNLOCK(p); 534 } 535 if (ret == 0) { 536 sleepq_switch(wchan, pri); 537 return (0); 538 } 539 out: 540 /* 541 * There were pending signals and this thread is still 542 * on the sleep queue, remove it from the sleep queue. 543 */ 544 if (TD_ON_SLEEPQ(td)) { 545 sq = sleepq_lookup(wchan); 546 sleepq_remove_thread(sq, td); 547 } 548 mtx_unlock_spin(&sc->sc_lock); 549 MPASS(td->td_lock != &sc->sc_lock); 550 return (ret); 551 } 552 553 /* 554 * Switches to another thread if we are still asleep on a sleep queue. 555 * Returns with thread lock. 556 */ 557 static void 558 sleepq_switch(void *wchan, int pri) 559 { 560 struct sleepqueue_chain *sc; 561 struct sleepqueue *sq; 562 struct thread *td; 563 bool rtc_changed; 564 565 td = curthread; 566 sc = SC_LOOKUP(wchan); 567 mtx_assert(&sc->sc_lock, MA_OWNED); 568 THREAD_LOCK_ASSERT(td, MA_OWNED); 569 570 /* 571 * If we have a sleep queue, then we've already been woken up, so 572 * just return. 573 */ 574 if (td->td_sleepqueue != NULL) { 575 mtx_unlock_spin(&sc->sc_lock); 576 return; 577 } 578 579 /* 580 * If TDF_TIMEOUT is set, then our sleep has been timed out 581 * already but we are still on the sleep queue, so dequeue the 582 * thread and return. 583 * 584 * Do the same if the real-time clock has been adjusted since this 585 * thread calculated its timeout based on that clock. This handles 586 * the following race: 587 * - The Ts thread needs to sleep until an absolute real-clock time. 588 * It copies the global rtc_generation into curthread->td_rtcgen, 589 * reads the RTC, and calculates a sleep duration based on that time. 590 * See umtxq_sleep() for an example. 591 * - The Tc thread adjusts the RTC, bumps rtc_generation, and wakes 592 * threads that are sleeping until an absolute real-clock time. 593 * See tc_setclock() and the POSIX specification of clock_settime(). 594 * - Ts reaches the code below. It holds the sleepqueue chain lock, 595 * so Tc has finished waking, so this thread must test td_rtcgen. 596 * (The declaration of td_rtcgen refers to this comment.) 597 */ 598 rtc_changed = td->td_rtcgen != 0 && td->td_rtcgen != rtc_generation; 599 if ((td->td_flags & TDF_TIMEOUT) || rtc_changed) { 600 if (rtc_changed) { 601 td->td_rtcgen = 0; 602 } 603 MPASS(TD_ON_SLEEPQ(td)); 604 sq = sleepq_lookup(wchan); 605 sleepq_remove_thread(sq, td); 606 mtx_unlock_spin(&sc->sc_lock); 607 return; 608 } 609 #ifdef SLEEPQUEUE_PROFILING 610 if (prof_enabled) 611 sleepq_profile(td->td_wmesg); 612 #endif 613 MPASS(td->td_sleepqueue == NULL); 614 sched_sleep(td, pri); 615 thread_lock_set(td, &sc->sc_lock); 616 SDT_PROBE0(sched, , , sleep); 617 TD_SET_SLEEPING(td); 618 mi_switch(SW_VOL | SWT_SLEEPQ, NULL); 619 KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING")); 620 CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)", 621 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name); 622 } 623 624 /* 625 * Check to see if we timed out. 626 */ 627 static int 628 sleepq_check_timeout(void) 629 { 630 struct thread *td; 631 int res; 632 633 td = curthread; 634 THREAD_LOCK_ASSERT(td, MA_OWNED); 635 636 /* 637 * If TDF_TIMEOUT is set, we timed out. But recheck 638 * td_sleeptimo anyway. 639 */ 640 res = 0; 641 if (td->td_sleeptimo != 0) { 642 if (td->td_sleeptimo <= sbinuptime()) 643 res = EWOULDBLOCK; 644 td->td_sleeptimo = 0; 645 } 646 if (td->td_flags & TDF_TIMEOUT) 647 td->td_flags &= ~TDF_TIMEOUT; 648 else 649 /* 650 * We ignore the situation where timeout subsystem was 651 * unable to stop our callout. The struct thread is 652 * type-stable, the callout will use the correct 653 * memory when running. The checks of the 654 * td_sleeptimo value in this function and in 655 * sleepq_timeout() ensure that the thread does not 656 * get spurious wakeups, even if the callout was reset 657 * or thread reused. 658 */ 659 callout_stop(&td->td_slpcallout); 660 return (res); 661 } 662 663 /* 664 * Check to see if we were awoken by a signal. 665 */ 666 static int 667 sleepq_check_signals(void) 668 { 669 struct thread *td; 670 671 td = curthread; 672 THREAD_LOCK_ASSERT(td, MA_OWNED); 673 674 /* We are no longer in an interruptible sleep. */ 675 if (td->td_flags & TDF_SINTR) 676 td->td_flags &= ~TDF_SINTR; 677 678 if (td->td_flags & TDF_SLEEPABORT) { 679 td->td_flags &= ~TDF_SLEEPABORT; 680 return (td->td_intrval); 681 } 682 683 return (0); 684 } 685 686 /* 687 * Block the current thread until it is awakened from its sleep queue. 688 */ 689 void 690 sleepq_wait(void *wchan, int pri) 691 { 692 struct thread *td; 693 694 td = curthread; 695 MPASS(!(td->td_flags & TDF_SINTR)); 696 thread_lock(td); 697 sleepq_switch(wchan, pri); 698 thread_unlock(td); 699 } 700 701 /* 702 * Block the current thread until it is awakened from its sleep queue 703 * or it is interrupted by a signal. 704 */ 705 int 706 sleepq_wait_sig(void *wchan, int pri) 707 { 708 int rcatch; 709 int rval; 710 711 rcatch = sleepq_catch_signals(wchan, pri); 712 rval = sleepq_check_signals(); 713 thread_unlock(curthread); 714 if (rcatch) 715 return (rcatch); 716 return (rval); 717 } 718 719 /* 720 * Block the current thread until it is awakened from its sleep queue 721 * or it times out while waiting. 722 */ 723 int 724 sleepq_timedwait(void *wchan, int pri) 725 { 726 struct thread *td; 727 int rval; 728 729 td = curthread; 730 MPASS(!(td->td_flags & TDF_SINTR)); 731 thread_lock(td); 732 sleepq_switch(wchan, pri); 733 rval = sleepq_check_timeout(); 734 thread_unlock(td); 735 736 return (rval); 737 } 738 739 /* 740 * Block the current thread until it is awakened from its sleep queue, 741 * it is interrupted by a signal, or it times out waiting to be awakened. 742 */ 743 int 744 sleepq_timedwait_sig(void *wchan, int pri) 745 { 746 int rcatch, rvalt, rvals; 747 748 rcatch = sleepq_catch_signals(wchan, pri); 749 rvalt = sleepq_check_timeout(); 750 rvals = sleepq_check_signals(); 751 thread_unlock(curthread); 752 if (rcatch) 753 return (rcatch); 754 if (rvals) 755 return (rvals); 756 return (rvalt); 757 } 758 759 /* 760 * Returns the type of sleepqueue given a waitchannel. 761 */ 762 int 763 sleepq_type(void *wchan) 764 { 765 struct sleepqueue *sq; 766 int type; 767 768 MPASS(wchan != NULL); 769 770 sq = sleepq_lookup(wchan); 771 if (sq == NULL) 772 return (-1); 773 type = sq->sq_type; 774 775 return (type); 776 } 777 778 /* 779 * Removes a thread from a sleep queue and makes it 780 * runnable. 781 * 782 * Requires the sc chain locked on entry. If SRQ_HOLD is specified it will 783 * be locked on return. Returns without the thread lock held. 784 */ 785 static int 786 sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri, 787 int srqflags) 788 { 789 struct sleepqueue_chain *sc; 790 bool drop; 791 792 MPASS(td != NULL); 793 MPASS(sq->sq_wchan != NULL); 794 MPASS(td->td_wchan == sq->sq_wchan); 795 796 sc = SC_LOOKUP(sq->sq_wchan); 797 mtx_assert(&sc->sc_lock, MA_OWNED); 798 799 /* 800 * Avoid recursing on the chain lock. If the locks don't match we 801 * need to acquire the thread lock which setrunnable will drop for 802 * us. In this case we need to drop the chain lock afterwards. 803 * 804 * There is no race that will make td_lock equal to sc_lock because 805 * we hold sc_lock. 806 */ 807 drop = false; 808 if (!TD_IS_SLEEPING(td)) { 809 thread_lock(td); 810 drop = true; 811 } else 812 thread_lock_block_wait(td); 813 814 /* Remove thread from the sleepq. */ 815 sleepq_remove_thread(sq, td); 816 817 /* If we're done with the sleepqueue release it. */ 818 if ((srqflags & SRQ_HOLD) == 0 && drop) 819 mtx_unlock_spin(&sc->sc_lock); 820 821 /* Adjust priority if requested. */ 822 MPASS(pri == 0 || (pri >= PRI_MIN && pri <= PRI_MAX)); 823 if (pri != 0 && td->td_priority > pri && 824 PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) 825 sched_prio(td, pri); 826 827 /* 828 * Note that thread td might not be sleeping if it is running 829 * sleepq_catch_signals() on another CPU or is blocked on its 830 * proc lock to check signals. There's no need to mark the 831 * thread runnable in that case. 832 */ 833 if (TD_IS_SLEEPING(td)) { 834 MPASS(!drop); 835 TD_CLR_SLEEPING(td); 836 return (setrunnable(td, srqflags)); 837 } 838 MPASS(drop); 839 thread_unlock(td); 840 841 return (0); 842 } 843 844 static void 845 sleepq_remove_thread(struct sleepqueue *sq, struct thread *td) 846 { 847 struct sleepqueue_chain *sc __unused; 848 849 MPASS(td != NULL); 850 MPASS(sq->sq_wchan != NULL); 851 MPASS(td->td_wchan == sq->sq_wchan); 852 MPASS(td->td_sqqueue < NR_SLEEPQS && td->td_sqqueue >= 0); 853 THREAD_LOCK_ASSERT(td, MA_OWNED); 854 sc = SC_LOOKUP(sq->sq_wchan); 855 mtx_assert(&sc->sc_lock, MA_OWNED); 856 857 SDT_PROBE2(sched, , , wakeup, td, td->td_proc); 858 859 /* Remove the thread from the queue. */ 860 sq->sq_blockedcnt[td->td_sqqueue]--; 861 TAILQ_REMOVE(&sq->sq_blocked[td->td_sqqueue], td, td_slpq); 862 863 /* 864 * Get a sleep queue for this thread. If this is the last waiter, 865 * use the queue itself and take it out of the chain, otherwise, 866 * remove a queue from the free list. 867 */ 868 if (LIST_EMPTY(&sq->sq_free)) { 869 td->td_sleepqueue = sq; 870 #ifdef INVARIANTS 871 sq->sq_wchan = NULL; 872 #endif 873 #ifdef SLEEPQUEUE_PROFILING 874 sc->sc_depth--; 875 #endif 876 } else 877 td->td_sleepqueue = LIST_FIRST(&sq->sq_free); 878 LIST_REMOVE(td->td_sleepqueue, sq_hash); 879 880 td->td_wmesg = NULL; 881 td->td_wchan = NULL; 882 td->td_flags &= ~TDF_SINTR; 883 884 CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)", 885 (void *)td, (long)td->td_proc->p_pid, td->td_name); 886 } 887 888 #ifdef INVARIANTS 889 /* 890 * UMA zone item deallocator. 891 */ 892 static void 893 sleepq_dtor(void *mem, int size, void *arg) 894 { 895 struct sleepqueue *sq; 896 int i; 897 898 sq = mem; 899 for (i = 0; i < NR_SLEEPQS; i++) { 900 MPASS(TAILQ_EMPTY(&sq->sq_blocked[i])); 901 MPASS(sq->sq_blockedcnt[i] == 0); 902 } 903 } 904 #endif 905 906 /* 907 * UMA zone item initializer. 908 */ 909 static int 910 sleepq_init(void *mem, int size, int flags) 911 { 912 struct sleepqueue *sq; 913 int i; 914 915 bzero(mem, size); 916 sq = mem; 917 for (i = 0; i < NR_SLEEPQS; i++) { 918 TAILQ_INIT(&sq->sq_blocked[i]); 919 sq->sq_blockedcnt[i] = 0; 920 } 921 LIST_INIT(&sq->sq_free); 922 return (0); 923 } 924 925 /* 926 * Find thread sleeping on a wait channel and resume it. 927 */ 928 int 929 sleepq_signal(void *wchan, int flags, int pri, int queue) 930 { 931 struct sleepqueue_chain *sc; 932 struct sleepqueue *sq; 933 struct threadqueue *head; 934 struct thread *td, *besttd; 935 int wakeup_swapper; 936 937 CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags); 938 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 939 MPASS((queue >= 0) && (queue < NR_SLEEPQS)); 940 sq = sleepq_lookup(wchan); 941 if (sq == NULL) 942 return (0); 943 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE), 944 ("%s: mismatch between sleep/wakeup and cv_*", __func__)); 945 946 head = &sq->sq_blocked[queue]; 947 if (flags & SLEEPQ_UNFAIR) { 948 /* 949 * Find the most recently sleeping thread, but try to 950 * skip threads still in process of context switch to 951 * avoid spinning on the thread lock. 952 */ 953 sc = SC_LOOKUP(wchan); 954 besttd = TAILQ_LAST_FAST(head, thread, td_slpq); 955 while (besttd->td_lock != &sc->sc_lock) { 956 td = TAILQ_PREV_FAST(besttd, head, thread, td_slpq); 957 if (td == NULL) 958 break; 959 besttd = td; 960 } 961 } else { 962 /* 963 * Find the highest priority thread on the queue. If there 964 * is a tie, use the thread that first appears in the queue 965 * as it has been sleeping the longest since threads are 966 * always added to the tail of sleep queues. 967 */ 968 besttd = td = TAILQ_FIRST(head); 969 while ((td = TAILQ_NEXT(td, td_slpq)) != NULL) { 970 if (td->td_priority < besttd->td_priority) 971 besttd = td; 972 } 973 } 974 MPASS(besttd != NULL); 975 wakeup_swapper = sleepq_resume_thread(sq, besttd, pri, SRQ_HOLD); 976 return (wakeup_swapper); 977 } 978 979 static bool 980 match_any(struct thread *td __unused) 981 { 982 983 return (true); 984 } 985 986 /* 987 * Resume all threads sleeping on a specified wait channel. 988 */ 989 int 990 sleepq_broadcast(void *wchan, int flags, int pri, int queue) 991 { 992 struct sleepqueue *sq; 993 994 CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags); 995 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 996 MPASS((queue >= 0) && (queue < NR_SLEEPQS)); 997 sq = sleepq_lookup(wchan); 998 if (sq == NULL) 999 return (0); 1000 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE), 1001 ("%s: mismatch between sleep/wakeup and cv_*", __func__)); 1002 1003 return (sleepq_remove_matching(sq, queue, match_any, pri)); 1004 } 1005 1006 /* 1007 * Resume threads on the sleep queue that match the given predicate. 1008 */ 1009 int 1010 sleepq_remove_matching(struct sleepqueue *sq, int queue, 1011 bool (*matches)(struct thread *), int pri) 1012 { 1013 struct thread *td, *tdn; 1014 int wakeup_swapper; 1015 1016 /* 1017 * The last thread will be given ownership of sq and may 1018 * re-enqueue itself before sleepq_resume_thread() returns, 1019 * so we must cache the "next" queue item at the beginning 1020 * of the final iteration. 1021 */ 1022 wakeup_swapper = 0; 1023 TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, tdn) { 1024 if (matches(td)) 1025 wakeup_swapper |= sleepq_resume_thread(sq, td, pri, 1026 SRQ_HOLD); 1027 } 1028 1029 return (wakeup_swapper); 1030 } 1031 1032 /* 1033 * Time sleeping threads out. When the timeout expires, the thread is 1034 * removed from the sleep queue and made runnable if it is still asleep. 1035 */ 1036 static void 1037 sleepq_timeout(void *arg) 1038 { 1039 struct sleepqueue_chain *sc __unused; 1040 struct sleepqueue *sq; 1041 struct thread *td; 1042 void *wchan; 1043 int wakeup_swapper; 1044 1045 td = arg; 1046 CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)", 1047 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name); 1048 1049 thread_lock(td); 1050 if (td->td_sleeptimo > sbinuptime() || td->td_sleeptimo == 0) { 1051 /* 1052 * The thread does not want a timeout (yet). 1053 */ 1054 } else if (TD_IS_SLEEPING(td) && TD_ON_SLEEPQ(td)) { 1055 /* 1056 * See if the thread is asleep and get the wait 1057 * channel if it is. 1058 */ 1059 wchan = td->td_wchan; 1060 sc = SC_LOOKUP(wchan); 1061 THREAD_LOCKPTR_ASSERT(td, &sc->sc_lock); 1062 sq = sleepq_lookup(wchan); 1063 MPASS(sq != NULL); 1064 td->td_flags |= TDF_TIMEOUT; 1065 wakeup_swapper = sleepq_resume_thread(sq, td, 0, 0); 1066 if (wakeup_swapper) 1067 kick_proc0(); 1068 return; 1069 } else if (TD_ON_SLEEPQ(td)) { 1070 /* 1071 * If the thread is on the SLEEPQ but isn't sleeping 1072 * yet, it can either be on another CPU in between 1073 * sleepq_add() and one of the sleepq_*wait*() 1074 * routines or it can be in sleepq_catch_signals(). 1075 */ 1076 td->td_flags |= TDF_TIMEOUT; 1077 } 1078 thread_unlock(td); 1079 } 1080 1081 /* 1082 * Resumes a specific thread from the sleep queue associated with a specific 1083 * wait channel if it is on that queue. 1084 */ 1085 void 1086 sleepq_remove(struct thread *td, void *wchan) 1087 { 1088 struct sleepqueue_chain *sc; 1089 struct sleepqueue *sq; 1090 int wakeup_swapper; 1091 1092 /* 1093 * Look up the sleep queue for this wait channel, then re-check 1094 * that the thread is asleep on that channel, if it is not, then 1095 * bail. 1096 */ 1097 MPASS(wchan != NULL); 1098 sc = SC_LOOKUP(wchan); 1099 mtx_lock_spin(&sc->sc_lock); 1100 /* 1101 * We can not lock the thread here as it may be sleeping on a 1102 * different sleepq. However, holding the sleepq lock for this 1103 * wchan can guarantee that we do not miss a wakeup for this 1104 * channel. The asserts below will catch any false positives. 1105 */ 1106 if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) { 1107 mtx_unlock_spin(&sc->sc_lock); 1108 return; 1109 } 1110 1111 /* Thread is asleep on sleep queue sq, so wake it up. */ 1112 sq = sleepq_lookup(wchan); 1113 MPASS(sq != NULL); 1114 MPASS(td->td_wchan == wchan); 1115 wakeup_swapper = sleepq_resume_thread(sq, td, 0, 0); 1116 if (wakeup_swapper) 1117 kick_proc0(); 1118 } 1119 1120 /* 1121 * Abort a thread as if an interrupt had occurred. Only abort 1122 * interruptible waits (unfortunately it isn't safe to abort others). 1123 * 1124 * Requires thread lock on entry, releases on return. 1125 */ 1126 int 1127 sleepq_abort(struct thread *td, int intrval) 1128 { 1129 struct sleepqueue *sq; 1130 void *wchan; 1131 1132 THREAD_LOCK_ASSERT(td, MA_OWNED); 1133 MPASS(TD_ON_SLEEPQ(td)); 1134 MPASS(td->td_flags & TDF_SINTR); 1135 MPASS(intrval == EINTR || intrval == ERESTART); 1136 1137 /* 1138 * If the TDF_TIMEOUT flag is set, just leave. A 1139 * timeout is scheduled anyhow. 1140 */ 1141 if (td->td_flags & TDF_TIMEOUT) { 1142 thread_unlock(td); 1143 return (0); 1144 } 1145 1146 CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)", 1147 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name); 1148 td->td_intrval = intrval; 1149 td->td_flags |= TDF_SLEEPABORT; 1150 1151 /* 1152 * If the thread has not slept yet it will find the signal in 1153 * sleepq_catch_signals() and call sleepq_resume_thread. Otherwise 1154 * we have to do it here. 1155 */ 1156 if (!TD_IS_SLEEPING(td)) { 1157 thread_unlock(td); 1158 return (0); 1159 } 1160 wchan = td->td_wchan; 1161 MPASS(wchan != NULL); 1162 sq = sleepq_lookup(wchan); 1163 MPASS(sq != NULL); 1164 1165 /* Thread is asleep on sleep queue sq, so wake it up. */ 1166 return (sleepq_resume_thread(sq, td, 0, 0)); 1167 } 1168 1169 void 1170 sleepq_chains_remove_matching(bool (*matches)(struct thread *)) 1171 { 1172 struct sleepqueue_chain *sc; 1173 struct sleepqueue *sq, *sq1; 1174 int i, wakeup_swapper; 1175 1176 wakeup_swapper = 0; 1177 for (sc = &sleepq_chains[0]; sc < sleepq_chains + SC_TABLESIZE; ++sc) { 1178 if (LIST_EMPTY(&sc->sc_queues)) { 1179 continue; 1180 } 1181 mtx_lock_spin(&sc->sc_lock); 1182 LIST_FOREACH_SAFE(sq, &sc->sc_queues, sq_hash, sq1) { 1183 for (i = 0; i < NR_SLEEPQS; ++i) { 1184 wakeup_swapper |= sleepq_remove_matching(sq, i, 1185 matches, 0); 1186 } 1187 } 1188 mtx_unlock_spin(&sc->sc_lock); 1189 } 1190 if (wakeup_swapper) { 1191 kick_proc0(); 1192 } 1193 } 1194 1195 /* 1196 * Prints the stacks of all threads presently sleeping on wchan/queue to 1197 * the sbuf sb. Sets count_stacks_printed to the number of stacks actually 1198 * printed. Typically, this will equal the number of threads sleeping on the 1199 * queue, but may be less if sb overflowed before all stacks were printed. 1200 */ 1201 #ifdef STACK 1202 int 1203 sleepq_sbuf_print_stacks(struct sbuf *sb, void *wchan, int queue, 1204 int *count_stacks_printed) 1205 { 1206 struct thread *td, *td_next; 1207 struct sleepqueue *sq; 1208 struct stack **st; 1209 struct sbuf **td_infos; 1210 int i, stack_idx, error, stacks_to_allocate; 1211 bool finished; 1212 1213 error = 0; 1214 finished = false; 1215 1216 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 1217 MPASS((queue >= 0) && (queue < NR_SLEEPQS)); 1218 1219 stacks_to_allocate = 10; 1220 for (i = 0; i < 3 && !finished ; i++) { 1221 /* We cannot malloc while holding the queue's spinlock, so 1222 * we do our mallocs now, and hope it is enough. If it 1223 * isn't, we will free these, drop the lock, malloc more, 1224 * and try again, up to a point. After that point we will 1225 * give up and report ENOMEM. We also cannot write to sb 1226 * during this time since the client may have set the 1227 * SBUF_AUTOEXTEND flag on their sbuf, which could cause a 1228 * malloc as we print to it. So we defer actually printing 1229 * to sb until after we drop the spinlock. 1230 */ 1231 1232 /* Where we will store the stacks. */ 1233 st = malloc(sizeof(struct stack *) * stacks_to_allocate, 1234 M_TEMP, M_WAITOK); 1235 for (stack_idx = 0; stack_idx < stacks_to_allocate; 1236 stack_idx++) 1237 st[stack_idx] = stack_create(M_WAITOK); 1238 1239 /* Where we will store the td name, tid, etc. */ 1240 td_infos = malloc(sizeof(struct sbuf *) * stacks_to_allocate, 1241 M_TEMP, M_WAITOK); 1242 for (stack_idx = 0; stack_idx < stacks_to_allocate; 1243 stack_idx++) 1244 td_infos[stack_idx] = sbuf_new(NULL, NULL, 1245 MAXCOMLEN + sizeof(struct thread *) * 2 + 40, 1246 SBUF_FIXEDLEN); 1247 1248 sleepq_lock(wchan); 1249 sq = sleepq_lookup(wchan); 1250 if (sq == NULL) { 1251 /* This sleepq does not exist; exit and return ENOENT. */ 1252 error = ENOENT; 1253 finished = true; 1254 sleepq_release(wchan); 1255 goto loop_end; 1256 } 1257 1258 stack_idx = 0; 1259 /* Save thread info */ 1260 TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, 1261 td_next) { 1262 if (stack_idx >= stacks_to_allocate) 1263 goto loop_end; 1264 1265 /* Note the td_lock is equal to the sleepq_lock here. */ 1266 stack_save_td(st[stack_idx], td); 1267 1268 sbuf_printf(td_infos[stack_idx], "%d: %s %p", 1269 td->td_tid, td->td_name, td); 1270 1271 ++stack_idx; 1272 } 1273 1274 finished = true; 1275 sleepq_release(wchan); 1276 1277 /* Print the stacks */ 1278 for (i = 0; i < stack_idx; i++) { 1279 sbuf_finish(td_infos[i]); 1280 sbuf_printf(sb, "--- thread %s: ---\n", sbuf_data(td_infos[i])); 1281 stack_sbuf_print(sb, st[i]); 1282 sbuf_printf(sb, "\n"); 1283 1284 error = sbuf_error(sb); 1285 if (error == 0) 1286 *count_stacks_printed = stack_idx; 1287 } 1288 1289 loop_end: 1290 if (!finished) 1291 sleepq_release(wchan); 1292 for (stack_idx = 0; stack_idx < stacks_to_allocate; 1293 stack_idx++) 1294 stack_destroy(st[stack_idx]); 1295 for (stack_idx = 0; stack_idx < stacks_to_allocate; 1296 stack_idx++) 1297 sbuf_delete(td_infos[stack_idx]); 1298 free(st, M_TEMP); 1299 free(td_infos, M_TEMP); 1300 stacks_to_allocate *= 10; 1301 } 1302 1303 if (!finished && error == 0) 1304 error = ENOMEM; 1305 1306 return (error); 1307 } 1308 #endif 1309 1310 #ifdef SLEEPQUEUE_PROFILING 1311 #define SLEEPQ_PROF_LOCATIONS 1024 1312 #define SLEEPQ_SBUFSIZE 512 1313 struct sleepq_prof { 1314 LIST_ENTRY(sleepq_prof) sp_link; 1315 const char *sp_wmesg; 1316 long sp_count; 1317 }; 1318 1319 LIST_HEAD(sqphead, sleepq_prof); 1320 1321 struct sqphead sleepq_prof_free; 1322 struct sqphead sleepq_hash[SC_TABLESIZE]; 1323 static struct sleepq_prof sleepq_profent[SLEEPQ_PROF_LOCATIONS]; 1324 static struct mtx sleepq_prof_lock; 1325 MTX_SYSINIT(sleepq_prof_lock, &sleepq_prof_lock, "sleepq_prof", MTX_SPIN); 1326 1327 static void 1328 sleepq_profile(const char *wmesg) 1329 { 1330 struct sleepq_prof *sp; 1331 1332 mtx_lock_spin(&sleepq_prof_lock); 1333 if (prof_enabled == 0) 1334 goto unlock; 1335 LIST_FOREACH(sp, &sleepq_hash[SC_HASH(wmesg)], sp_link) 1336 if (sp->sp_wmesg == wmesg) 1337 goto done; 1338 sp = LIST_FIRST(&sleepq_prof_free); 1339 if (sp == NULL) 1340 goto unlock; 1341 sp->sp_wmesg = wmesg; 1342 LIST_REMOVE(sp, sp_link); 1343 LIST_INSERT_HEAD(&sleepq_hash[SC_HASH(wmesg)], sp, sp_link); 1344 done: 1345 sp->sp_count++; 1346 unlock: 1347 mtx_unlock_spin(&sleepq_prof_lock); 1348 return; 1349 } 1350 1351 static void 1352 sleepq_prof_reset(void) 1353 { 1354 struct sleepq_prof *sp; 1355 int enabled; 1356 int i; 1357 1358 mtx_lock_spin(&sleepq_prof_lock); 1359 enabled = prof_enabled; 1360 prof_enabled = 0; 1361 for (i = 0; i < SC_TABLESIZE; i++) 1362 LIST_INIT(&sleepq_hash[i]); 1363 LIST_INIT(&sleepq_prof_free); 1364 for (i = 0; i < SLEEPQ_PROF_LOCATIONS; i++) { 1365 sp = &sleepq_profent[i]; 1366 sp->sp_wmesg = NULL; 1367 sp->sp_count = 0; 1368 LIST_INSERT_HEAD(&sleepq_prof_free, sp, sp_link); 1369 } 1370 prof_enabled = enabled; 1371 mtx_unlock_spin(&sleepq_prof_lock); 1372 } 1373 1374 static int 1375 enable_sleepq_prof(SYSCTL_HANDLER_ARGS) 1376 { 1377 int error, v; 1378 1379 v = prof_enabled; 1380 error = sysctl_handle_int(oidp, &v, v, req); 1381 if (error) 1382 return (error); 1383 if (req->newptr == NULL) 1384 return (error); 1385 if (v == prof_enabled) 1386 return (0); 1387 if (v == 1) 1388 sleepq_prof_reset(); 1389 mtx_lock_spin(&sleepq_prof_lock); 1390 prof_enabled = !!v; 1391 mtx_unlock_spin(&sleepq_prof_lock); 1392 1393 return (0); 1394 } 1395 1396 static int 1397 reset_sleepq_prof_stats(SYSCTL_HANDLER_ARGS) 1398 { 1399 int error, v; 1400 1401 v = 0; 1402 error = sysctl_handle_int(oidp, &v, 0, req); 1403 if (error) 1404 return (error); 1405 if (req->newptr == NULL) 1406 return (error); 1407 if (v == 0) 1408 return (0); 1409 sleepq_prof_reset(); 1410 1411 return (0); 1412 } 1413 1414 static int 1415 dump_sleepq_prof_stats(SYSCTL_HANDLER_ARGS) 1416 { 1417 struct sleepq_prof *sp; 1418 struct sbuf *sb; 1419 int enabled; 1420 int error; 1421 int i; 1422 1423 error = sysctl_wire_old_buffer(req, 0); 1424 if (error != 0) 1425 return (error); 1426 sb = sbuf_new_for_sysctl(NULL, NULL, SLEEPQ_SBUFSIZE, req); 1427 sbuf_printf(sb, "\nwmesg\tcount\n"); 1428 enabled = prof_enabled; 1429 mtx_lock_spin(&sleepq_prof_lock); 1430 prof_enabled = 0; 1431 mtx_unlock_spin(&sleepq_prof_lock); 1432 for (i = 0; i < SC_TABLESIZE; i++) { 1433 LIST_FOREACH(sp, &sleepq_hash[i], sp_link) { 1434 sbuf_printf(sb, "%s\t%ld\n", 1435 sp->sp_wmesg, sp->sp_count); 1436 } 1437 } 1438 mtx_lock_spin(&sleepq_prof_lock); 1439 prof_enabled = enabled; 1440 mtx_unlock_spin(&sleepq_prof_lock); 1441 1442 error = sbuf_finish(sb); 1443 sbuf_delete(sb); 1444 return (error); 1445 } 1446 1447 SYSCTL_PROC(_debug_sleepq, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD, 1448 NULL, 0, dump_sleepq_prof_stats, "A", "Sleepqueue profiling statistics"); 1449 SYSCTL_PROC(_debug_sleepq, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW, 1450 NULL, 0, reset_sleepq_prof_stats, "I", 1451 "Reset sleepqueue profiling statistics"); 1452 SYSCTL_PROC(_debug_sleepq, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW, 1453 NULL, 0, enable_sleepq_prof, "I", "Enable sleepqueue profiling"); 1454 #endif 1455 1456 #ifdef DDB 1457 DB_SHOW_COMMAND(sleepq, db_show_sleepqueue) 1458 { 1459 struct sleepqueue_chain *sc; 1460 struct sleepqueue *sq; 1461 #ifdef INVARIANTS 1462 struct lock_object *lock; 1463 #endif 1464 struct thread *td; 1465 void *wchan; 1466 int i; 1467 1468 if (!have_addr) 1469 return; 1470 1471 /* 1472 * First, see if there is an active sleep queue for the wait channel 1473 * indicated by the address. 1474 */ 1475 wchan = (void *)addr; 1476 sc = SC_LOOKUP(wchan); 1477 LIST_FOREACH(sq, &sc->sc_queues, sq_hash) 1478 if (sq->sq_wchan == wchan) 1479 goto found; 1480 1481 /* 1482 * Second, see if there is an active sleep queue at the address 1483 * indicated. 1484 */ 1485 for (i = 0; i < SC_TABLESIZE; i++) 1486 LIST_FOREACH(sq, &sleepq_chains[i].sc_queues, sq_hash) { 1487 if (sq == (struct sleepqueue *)addr) 1488 goto found; 1489 } 1490 1491 db_printf("Unable to locate a sleep queue via %p\n", (void *)addr); 1492 return; 1493 found: 1494 db_printf("Wait channel: %p\n", sq->sq_wchan); 1495 db_printf("Queue type: %d\n", sq->sq_type); 1496 #ifdef INVARIANTS 1497 if (sq->sq_lock) { 1498 lock = sq->sq_lock; 1499 db_printf("Associated Interlock: %p - (%s) %s\n", lock, 1500 LOCK_CLASS(lock)->lc_name, lock->lo_name); 1501 } 1502 #endif 1503 db_printf("Blocked threads:\n"); 1504 for (i = 0; i < NR_SLEEPQS; i++) { 1505 db_printf("\nQueue[%d]:\n", i); 1506 if (TAILQ_EMPTY(&sq->sq_blocked[i])) 1507 db_printf("\tempty\n"); 1508 else 1509 TAILQ_FOREACH(td, &sq->sq_blocked[i], 1510 td_slpq) { 1511 db_printf("\t%p (tid %d, pid %d, \"%s\")\n", td, 1512 td->td_tid, td->td_proc->p_pid, 1513 td->td_name); 1514 } 1515 db_printf("(expected: %u)\n", sq->sq_blockedcnt[i]); 1516 } 1517 } 1518 1519 /* Alias 'show sleepqueue' to 'show sleepq'. */ 1520 DB_SHOW_ALIAS(sleepqueue, db_show_sleepqueue); 1521 #endif 1522