1 /*- 2 * Copyright (c) 1982, 1986, 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 35 * $FreeBSD: src/sys/kern/kern_synch.c,v 1.87.2.6 2002/10/13 07:29:53 kbyanc Exp $ 36 */ 37 38 #include "opt_ktrace.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/proc.h> 43 #include <sys/kernel.h> 44 #include <sys/signalvar.h> 45 #include <sys/resourcevar.h> 46 #include <sys/vmmeter.h> 47 #include <sys/sysctl.h> 48 #include <sys/priv.h> 49 #include <sys/lock.h> 50 #include <sys/uio.h> 51 #include <sys/kcollect.h> 52 #ifdef KTRACE 53 #include <sys/ktrace.h> 54 #endif 55 #include <sys/ktr.h> 56 #include <sys/serialize.h> 57 58 #include <sys/signal2.h> 59 #include <sys/thread2.h> 60 #include <sys/spinlock2.h> 61 #include <sys/mutex2.h> 62 63 #include <machine/cpu.h> 64 #include <machine/smp.h> 65 66 #include <vm/vm_extern.h> 67 68 struct tslpque { 69 TAILQ_HEAD(, thread) queue; 70 const volatile void *ident0; 71 const volatile void *ident1; 72 const volatile void *ident2; 73 const volatile void *ident3; 74 }; 75 76 static void sched_setup (void *dummy); 77 SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL); 78 static void sched_dyninit (void *dummy); 79 SYSINIT(sched_dyninit, SI_BOOT1_DYNALLOC, SI_ORDER_FIRST, sched_dyninit, NULL); 80 81 int lbolt; 82 void *lbolt_syncer; 83 int ncpus; 84 int ncpus_fit, ncpus_fit_mask; /* note: mask not cpumask_t */ 85 int safepri; 86 int tsleep_now_works; 87 int tsleep_crypto_dump = 0; 88 89 MALLOC_DEFINE(M_TSLEEP, "tslpque", "tsleep queues"); 90 91 #define __DEALL(ident) __DEQUALIFY(void *, ident) 92 93 #if !defined(KTR_TSLEEP) 94 #define KTR_TSLEEP KTR_ALL 95 #endif 96 KTR_INFO_MASTER(tsleep); 97 KTR_INFO(KTR_TSLEEP, tsleep, tsleep_beg, 0, "tsleep enter %p", const volatile void *ident); 98 KTR_INFO(KTR_TSLEEP, tsleep, tsleep_end, 1, "tsleep exit"); 99 KTR_INFO(KTR_TSLEEP, tsleep, wakeup_beg, 2, "wakeup enter %p", const volatile void *ident); 100 KTR_INFO(KTR_TSLEEP, tsleep, wakeup_end, 3, "wakeup exit"); 101 KTR_INFO(KTR_TSLEEP, tsleep, ilockfail, 4, "interlock failed %p", const volatile void *ident); 102 103 #define logtsleep1(name) KTR_LOG(tsleep_ ## name) 104 #define logtsleep2(name, val) KTR_LOG(tsleep_ ## name, val) 105 106 struct loadavg averunnable = 107 { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */ 108 /* 109 * Constants for averages over 1, 5, and 15 minutes 110 * when sampling at 5 second intervals. 111 */ 112 static fixpt_t cexp[3] = { 113 0.9200444146293232 * FSCALE, /* exp(-1/12) */ 114 0.9834714538216174 * FSCALE, /* exp(-1/60) */ 115 0.9944598480048967 * FSCALE, /* exp(-1/180) */ 116 }; 117 118 static void endtsleep (void *); 119 static void loadav (void *arg); 120 static void schedcpu (void *arg); 121 122 static int pctcpu_decay = 10; 123 SYSCTL_INT(_kern, OID_AUTO, pctcpu_decay, CTLFLAG_RW, 124 &pctcpu_decay, 0, ""); 125 126 /* 127 * kernel uses `FSCALE', userland (SHOULD) use kern.fscale 128 */ 129 int fscale __unused = FSCALE; /* exported to systat */ 130 SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, ""); 131 132 /* 133 * Issue a wakeup() from userland (debugging) 134 */ 135 static int 136 sysctl_wakeup(SYSCTL_HANDLER_ARGS) 137 { 138 uint64_t ident = 1; 139 int error = 0; 140 141 if (req->newptr != NULL) { 142 if (priv_check(curthread, PRIV_ROOT)) 143 return (EPERM); 144 error = SYSCTL_IN(req, &ident, sizeof(ident)); 145 if (error) 146 return error; 147 kprintf("issue wakeup %016jx\n", ident); 148 wakeup((void *)(intptr_t)ident); 149 } 150 if (req->oldptr != NULL) { 151 error = SYSCTL_OUT(req, &ident, sizeof(ident)); 152 } 153 return error; 154 } 155 156 SYSCTL_PROC(_debug, OID_AUTO, wakeup, CTLTYPE_UQUAD|CTLFLAG_RW, 0, 0, 157 sysctl_wakeup, "Q", "issue wakeup(addr)"); 158 159 /* 160 * Recompute process priorities, once a second. 161 * 162 * Since the userland schedulers are typically event oriented, if the 163 * estcpu calculation at wakeup() time is not sufficient to make a 164 * process runnable relative to other processes in the system we have 165 * a 1-second recalc to help out. 166 * 167 * This code also allows us to store sysclock_t data in the process structure 168 * without fear of an overrun, since sysclock_t are guarenteed to hold 169 * several seconds worth of count. 170 * 171 * WARNING! callouts can preempt normal threads. However, they will not 172 * preempt a thread holding a spinlock so we *can* safely use spinlocks. 173 */ 174 static int schedcpu_stats(struct proc *p, void *data __unused); 175 static int schedcpu_resource(struct proc *p, void *data __unused); 176 177 static void 178 schedcpu(void *arg) 179 { 180 allproc_scan(schedcpu_stats, NULL, 1); 181 allproc_scan(schedcpu_resource, NULL, 1); 182 if (mycpu->gd_cpuid == 0) { 183 wakeup((caddr_t)&lbolt); 184 wakeup(lbolt_syncer); 185 } 186 callout_reset(&mycpu->gd_schedcpu_callout, hz, schedcpu, NULL); 187 } 188 189 /* 190 * General process statistics once a second 191 */ 192 static int 193 schedcpu_stats(struct proc *p, void *data __unused) 194 { 195 struct lwp *lp; 196 197 /* 198 * Threads may not be completely set up if process in SIDL state. 199 */ 200 if (p->p_stat == SIDL) 201 return(0); 202 203 PHOLD(p); 204 if (lwkt_trytoken(&p->p_token) == FALSE) { 205 PRELE(p); 206 return(0); 207 } 208 209 p->p_swtime++; 210 FOREACH_LWP_IN_PROC(lp, p) { 211 if (lp->lwp_stat == LSSLEEP) { 212 ++lp->lwp_slptime; 213 if (lp->lwp_slptime == 1) 214 p->p_usched->uload_update(lp); 215 } 216 217 /* 218 * Only recalculate processes that are active or have slept 219 * less then 2 seconds. The schedulers understand this. 220 * Otherwise decay by 50% per second. 221 */ 222 if (lp->lwp_slptime <= 1) { 223 p->p_usched->recalculate(lp); 224 } else { 225 int decay; 226 227 decay = pctcpu_decay; 228 cpu_ccfence(); 229 if (decay <= 1) 230 decay = 1; 231 if (decay > 100) 232 decay = 100; 233 lp->lwp_pctcpu = (lp->lwp_pctcpu * (decay - 1)) / decay; 234 } 235 } 236 lwkt_reltoken(&p->p_token); 237 lwkt_yield(); 238 PRELE(p); 239 return(0); 240 } 241 242 /* 243 * Resource checks. XXX break out since ksignal/killproc can block, 244 * limiting us to one process killed per second. There is probably 245 * a better way. 246 */ 247 static int 248 schedcpu_resource(struct proc *p, void *data __unused) 249 { 250 u_int64_t ttime; 251 struct lwp *lp; 252 253 if (p->p_stat == SIDL) 254 return(0); 255 256 PHOLD(p); 257 if (lwkt_trytoken(&p->p_token) == FALSE) { 258 PRELE(p); 259 return(0); 260 } 261 262 if (p->p_stat == SZOMB || p->p_limit == NULL) { 263 lwkt_reltoken(&p->p_token); 264 PRELE(p); 265 return(0); 266 } 267 268 ttime = 0; 269 FOREACH_LWP_IN_PROC(lp, p) { 270 /* 271 * We may have caught an lp in the middle of being 272 * created, lwp_thread can be NULL. 273 */ 274 if (lp->lwp_thread) { 275 ttime += lp->lwp_thread->td_sticks; 276 ttime += lp->lwp_thread->td_uticks; 277 } 278 } 279 280 switch(plimit_testcpulimit(p->p_limit, ttime)) { 281 case PLIMIT_TESTCPU_KILL: 282 killproc(p, "exceeded maximum CPU limit"); 283 break; 284 case PLIMIT_TESTCPU_XCPU: 285 if ((p->p_flags & P_XCPU) == 0) { 286 p->p_flags |= P_XCPU; 287 ksignal(p, SIGXCPU); 288 } 289 break; 290 default: 291 break; 292 } 293 lwkt_reltoken(&p->p_token); 294 lwkt_yield(); 295 PRELE(p); 296 return(0); 297 } 298 299 /* 300 * This is only used by ps. Generate a cpu percentage use over 301 * a period of one second. 302 */ 303 void 304 updatepcpu(struct lwp *lp, int cpticks, int ttlticks) 305 { 306 fixpt_t acc; 307 int remticks; 308 309 acc = (cpticks << FSHIFT) / ttlticks; 310 if (ttlticks >= ESTCPUFREQ) { 311 lp->lwp_pctcpu = acc; 312 } else { 313 remticks = ESTCPUFREQ - ttlticks; 314 lp->lwp_pctcpu = (acc * ttlticks + lp->lwp_pctcpu * remticks) / 315 ESTCPUFREQ; 316 } 317 } 318 319 /* 320 * Handy macros to calculate hash indices. LOOKUP() calculates the 321 * global cpumask hash index, TCHASHSHIFT() converts that into the 322 * pcpu hash index. 323 * 324 * By making the pcpu hash arrays smaller we save a significant amount 325 * of memory at very low cost. The real cost is in IPIs, which are handled 326 * by the much larger global cpumask hash table. 327 */ 328 #define LOOKUP_PRIME 66555444443333333ULL 329 #define LOOKUP(x) ((((uintptr_t)(x) + ((uintptr_t)(x) >> 18)) ^ \ 330 LOOKUP_PRIME) % slpque_tablesize) 331 #define TCHASHSHIFT(x) ((x) >> 4) 332 333 static uint32_t slpque_tablesize; 334 static cpumask_t *slpque_cpumasks; 335 336 SYSCTL_UINT(_kern, OID_AUTO, slpque_tablesize, CTLFLAG_RD, &slpque_tablesize, 337 0, ""); 338 339 /* 340 * This is a dandy function that allows us to interlock tsleep/wakeup 341 * operations with unspecified upper level locks, such as lockmgr locks, 342 * simply by holding a critical section. The sequence is: 343 * 344 * (acquire upper level lock) 345 * tsleep_interlock(blah) 346 * (release upper level lock) 347 * tsleep(blah, ...) 348 * 349 * Basically this functions queues us on the tsleep queue without actually 350 * descheduling us. When tsleep() is later called with PINTERLOCK it 351 * assumes the thread was already queued, otherwise it queues it there. 352 * 353 * Thus it is possible to receive the wakeup prior to going to sleep and 354 * the race conditions are covered. 355 */ 356 static __inline void 357 _tsleep_interlock(globaldata_t gd, const volatile void *ident, int flags) 358 { 359 thread_t td = gd->gd_curthread; 360 struct tslpque *qp; 361 uint32_t cid; 362 uint32_t gid; 363 364 if (ident == NULL) { 365 kprintf("tsleep_interlock: NULL ident %s\n", td->td_comm); 366 print_backtrace(5); 367 } 368 369 crit_enter_quick(td); 370 if (td->td_flags & TDF_TSLEEPQ) { 371 /* 372 * Shortcut if unchanged 373 */ 374 if (td->td_wchan == ident && 375 td->td_wdomain == (flags & PDOMAIN_MASK)) { 376 crit_exit_quick(td); 377 return; 378 } 379 380 /* 381 * Remove current sleepq 382 */ 383 cid = LOOKUP(td->td_wchan); 384 gid = TCHASHSHIFT(cid); 385 qp = &gd->gd_tsleep_hash[gid]; 386 TAILQ_REMOVE(&qp->queue, td, td_sleepq); 387 if (TAILQ_FIRST(&qp->queue) == NULL) { 388 qp->ident0 = NULL; 389 qp->ident1 = NULL; 390 qp->ident2 = NULL; 391 qp->ident3 = NULL; 392 ATOMIC_CPUMASK_NANDBIT(slpque_cpumasks[cid], 393 gd->gd_cpuid); 394 } 395 } else { 396 td->td_flags |= TDF_TSLEEPQ; 397 } 398 cid = LOOKUP(ident); 399 gid = TCHASHSHIFT(cid); 400 qp = &gd->gd_tsleep_hash[gid]; 401 TAILQ_INSERT_TAIL(&qp->queue, td, td_sleepq); 402 if (qp->ident0 != ident && qp->ident1 != ident && 403 qp->ident2 != ident && qp->ident3 != ident) { 404 if (qp->ident0 == NULL) 405 qp->ident0 = ident; 406 else if (qp->ident1 == NULL) 407 qp->ident1 = ident; 408 else if (qp->ident2 == NULL) 409 qp->ident2 = ident; 410 else if (qp->ident3 == NULL) 411 qp->ident3 = ident; 412 else 413 qp->ident0 = (void *)(intptr_t)-1; 414 } 415 ATOMIC_CPUMASK_ORBIT(slpque_cpumasks[cid], gd->gd_cpuid); 416 td->td_wchan = ident; 417 td->td_wdomain = flags & PDOMAIN_MASK; 418 crit_exit_quick(td); 419 } 420 421 void 422 tsleep_interlock(const volatile void *ident, int flags) 423 { 424 _tsleep_interlock(mycpu, ident, flags); 425 } 426 427 /* 428 * Remove thread from sleepq. Must be called with a critical section held. 429 * The thread must not be migrating. 430 */ 431 static __inline void 432 _tsleep_remove(thread_t td) 433 { 434 globaldata_t gd = mycpu; 435 struct tslpque *qp; 436 uint32_t cid; 437 uint32_t gid; 438 439 KKASSERT(td->td_gd == gd && IN_CRITICAL_SECT(td)); 440 KKASSERT((td->td_flags & TDF_MIGRATING) == 0); 441 if (td->td_flags & TDF_TSLEEPQ) { 442 td->td_flags &= ~TDF_TSLEEPQ; 443 cid = LOOKUP(td->td_wchan); 444 gid = TCHASHSHIFT(cid); 445 qp = &gd->gd_tsleep_hash[gid]; 446 TAILQ_REMOVE(&qp->queue, td, td_sleepq); 447 if (TAILQ_FIRST(&qp->queue) == NULL) { 448 ATOMIC_CPUMASK_NANDBIT(slpque_cpumasks[cid], 449 gd->gd_cpuid); 450 } 451 td->td_wchan = NULL; 452 td->td_wdomain = 0; 453 } 454 } 455 456 void 457 tsleep_remove(thread_t td) 458 { 459 _tsleep_remove(td); 460 } 461 462 /* 463 * General sleep call. Suspends the current process until a wakeup is 464 * performed on the specified identifier. The process will then be made 465 * runnable with the specified priority. Sleeps at most timo/hz seconds 466 * (0 means no timeout). If flags includes PCATCH flag, signals are checked 467 * before and after sleeping, else signals are not checked. Returns 0 if 468 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a 469 * signal needs to be delivered, ERESTART is returned if the current system 470 * call should be restarted if possible, and EINTR is returned if the system 471 * call should be interrupted by the signal (return EINTR). 472 * 473 * Note that if we are a process, we release_curproc() before messing with 474 * the LWKT scheduler. 475 * 476 * During autoconfiguration or after a panic, a sleep will simply 477 * lower the priority briefly to allow interrupts, then return. 478 * 479 * WARNING! This code can't block (short of switching away), or bad things 480 * will happen. No getting tokens, no blocking locks, etc. 481 */ 482 int 483 tsleep(const volatile void *ident, int flags, const char *wmesg, int timo) 484 { 485 struct thread *td = curthread; 486 struct lwp *lp = td->td_lwp; 487 struct proc *p = td->td_proc; /* may be NULL */ 488 globaldata_t gd; 489 int sig; 490 int catch; 491 int error; 492 int oldpri; 493 struct callout thandle; 494 495 /* 496 * Currently a severe hack. Make sure any delayed wakeups 497 * are flushed before we sleep or we might deadlock on whatever 498 * event we are sleeping on. 499 */ 500 if (td->td_flags & TDF_DELAYED_WAKEUP) 501 wakeup_end_delayed(); 502 503 /* 504 * NOTE: removed KTRPOINT, it could cause races due to blocking 505 * even in stable. Just scrap it for now. 506 */ 507 if (!tsleep_crypto_dump && (tsleep_now_works == 0 || panicstr)) { 508 /* 509 * After a panic, or before we actually have an operational 510 * softclock, just give interrupts a chance, then just return; 511 * 512 * don't run any other procs or panic below, 513 * in case this is the idle process and already asleep. 514 */ 515 splz(); 516 oldpri = td->td_pri; 517 lwkt_setpri_self(safepri); 518 lwkt_switch(); 519 lwkt_setpri_self(oldpri); 520 return (0); 521 } 522 logtsleep2(tsleep_beg, ident); 523 gd = td->td_gd; 524 KKASSERT(td != &gd->gd_idlethread); /* you must be kidding! */ 525 td->td_wakefromcpu = -1; /* overwritten by _wakeup */ 526 527 /* 528 * NOTE: all of this occurs on the current cpu, including any 529 * callout-based wakeups, so a critical section is a sufficient 530 * interlock. 531 * 532 * The entire sequence through to where we actually sleep must 533 * run without breaking the critical section. 534 */ 535 catch = flags & PCATCH; 536 error = 0; 537 sig = 0; 538 539 crit_enter_quick(td); 540 541 KASSERT(ident != NULL, ("tsleep: no ident")); 542 KASSERT(lp == NULL || 543 lp->lwp_stat == LSRUN || /* Obvious */ 544 lp->lwp_stat == LSSTOP, /* Set in tstop */ 545 ("tsleep %p %s %d", 546 ident, wmesg, lp->lwp_stat)); 547 548 /* 549 * We interlock the sleep queue if the caller has not already done 550 * it for us. This must be done before we potentially acquire any 551 * tokens or we can loose the wakeup. 552 */ 553 if ((flags & PINTERLOCKED) == 0) { 554 _tsleep_interlock(gd, ident, flags); 555 } 556 557 /* 558 * Setup for the current process (if this is a process). We must 559 * interlock with lwp_token to avoid remote wakeup races via 560 * setrunnable() 561 */ 562 if (lp) { 563 lwkt_gettoken(&lp->lwp_token); 564 565 /* 566 * If the umbrella process is in the SCORE state then 567 * make sure that the thread is flagged going into a 568 * normal sleep to allow the core dump to proceed, otherwise 569 * the coredump can end up waiting forever. If the normal 570 * sleep is woken up, the thread will enter a stopped state 571 * upon return to userland. 572 * 573 * We do not want to interrupt or cause a thread exist at 574 * this juncture because that will mess-up the state the 575 * coredump is trying to save. 576 */ 577 if (p->p_stat == SCORE && 578 (lp->lwp_mpflags & LWP_MP_WSTOP) == 0) { 579 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WSTOP); 580 ++p->p_nstopped; 581 } 582 583 /* 584 * PCATCH requested. 585 */ 586 if (catch) { 587 /* 588 * Early termination if PCATCH was set and a 589 * signal is pending, interlocked with the 590 * critical section. 591 * 592 * Early termination only occurs when tsleep() is 593 * entered while in a normal LSRUN state. 594 */ 595 if ((sig = CURSIG(lp)) != 0) 596 goto resume; 597 598 /* 599 * Causes ksignal to wake us up if a signal is 600 * received (interlocked with lp->lwp_token). 601 */ 602 lp->lwp_flags |= LWP_SINTR; 603 } 604 } else { 605 KKASSERT(p == NULL); 606 } 607 608 /* 609 * Make sure the current process has been untangled from 610 * the userland scheduler and initialize slptime to start 611 * counting. 612 * 613 * NOTE: td->td_wakefromcpu is pre-set by the release function 614 * for the dfly scheduler, and then adjusted by _wakeup() 615 */ 616 if (lp) { 617 p->p_usched->release_curproc(lp); 618 lp->lwp_slptime = 0; 619 } 620 621 /* 622 * For PINTERLOCKED operation, TDF_TSLEEPQ might not be set if 623 * a wakeup() was processed before the thread could go to sleep. 624 * 625 * If TDF_TSLEEPQ is set, make sure the ident matches the recorded 626 * ident. If it does not then the thread slept inbetween the 627 * caller's initial tsleep_interlock() call and the caller's tsleep() 628 * call. 629 * 630 * Extreme loads can cause the sending of an IPI (e.g. wakeup()'s) 631 * to process incoming IPIs, thus draining incoming wakeups. 632 */ 633 if ((td->td_flags & TDF_TSLEEPQ) == 0) { 634 logtsleep2(ilockfail, ident); 635 goto resume; 636 } else if (td->td_wchan != ident || 637 td->td_wdomain != (flags & PDOMAIN_MASK)) { 638 logtsleep2(ilockfail, ident); 639 goto resume; 640 } 641 642 /* 643 * scheduling is blocked while in a critical section. Coincide 644 * the descheduled-by-tsleep flag with the descheduling of the 645 * lwkt. 646 * 647 * The timer callout is localized on our cpu and interlocked by 648 * our critical section. 649 */ 650 lwkt_deschedule_self(td); 651 td->td_flags |= TDF_TSLEEP_DESCHEDULED; 652 td->td_wmesg = wmesg; 653 654 /* 655 * Setup the timeout, if any. The timeout is only operable while 656 * the thread is flagged descheduled. 657 */ 658 KKASSERT((td->td_flags & TDF_TIMEOUT) == 0); 659 if (timo) { 660 callout_init_mp(&thandle); 661 callout_reset(&thandle, timo, endtsleep, td); 662 } 663 664 /* 665 * Beddy bye bye. 666 */ 667 if (lp) { 668 /* 669 * Ok, we are sleeping. Place us in the SSLEEP state. 670 */ 671 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0); 672 673 /* 674 * tstop() sets LSSTOP, so don't fiddle with that. 675 */ 676 if (lp->lwp_stat != LSSTOP) 677 lp->lwp_stat = LSSLEEP; 678 lp->lwp_ru.ru_nvcsw++; 679 p->p_usched->uload_update(lp); 680 lwkt_switch(); 681 682 /* 683 * And when we are woken up, put us back in LSRUN. If we 684 * slept for over a second, recalculate our estcpu. 685 */ 686 lp->lwp_stat = LSRUN; 687 if (lp->lwp_slptime) { 688 p->p_usched->uload_update(lp); 689 p->p_usched->recalculate(lp); 690 } 691 lp->lwp_slptime = 0; 692 } else { 693 lwkt_switch(); 694 } 695 696 /* 697 * Make sure we haven't switched cpus while we were asleep. It's 698 * not supposed to happen. Cleanup our temporary flags. 699 */ 700 KKASSERT(gd == td->td_gd); 701 702 /* 703 * Cleanup the timeout. If the timeout has already occured thandle 704 * has already been stopped, otherwise stop thandle. If the timeout 705 * is running (the callout thread must be blocked trying to get 706 * lwp_token) then wait for us to get scheduled. 707 */ 708 if (timo) { 709 while (td->td_flags & TDF_TIMEOUT_RUNNING) { 710 /* else we won't get rescheduled! */ 711 if (lp->lwp_stat != LSSTOP) 712 lp->lwp_stat = LSSLEEP; 713 lwkt_deschedule_self(td); 714 td->td_wmesg = "tsrace"; 715 lwkt_switch(); 716 kprintf("td %p %s: timeout race\n", td, td->td_comm); 717 } 718 if (td->td_flags & TDF_TIMEOUT) { 719 td->td_flags &= ~TDF_TIMEOUT; 720 error = EWOULDBLOCK; 721 } else { 722 /* does not block when on same cpu */ 723 callout_stop(&thandle); 724 } 725 } 726 td->td_flags &= ~TDF_TSLEEP_DESCHEDULED; 727 728 /* 729 * Make sure we have been removed from the sleepq. In most 730 * cases this will have been done for us already but it is 731 * possible for a scheduling IPI to be in-flight from a 732 * previous tsleep/tsleep_interlock() or due to a straight-out 733 * call to lwkt_schedule() (in the case of an interrupt thread), 734 * causing a spurious wakeup. 735 */ 736 _tsleep_remove(td); 737 td->td_wmesg = NULL; 738 739 /* 740 * Figure out the correct error return. If interrupted by a 741 * signal we want to return EINTR or ERESTART. 742 */ 743 resume: 744 if (lp) { 745 if (catch && error == 0) { 746 if (sig != 0 || (sig = CURSIG(lp))) { 747 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig)) 748 error = EINTR; 749 else 750 error = ERESTART; 751 } 752 } 753 754 lp->lwp_flags &= ~LWP_SINTR; 755 756 /* 757 * Unconditionally set us to LSRUN on resume. lwp_stat could 758 * be in a weird state due to the goto resume, particularly 759 * when tsleep() is called from tstop(). 760 */ 761 lp->lwp_stat = LSRUN; 762 lwkt_reltoken(&lp->lwp_token); 763 } 764 logtsleep1(tsleep_end); 765 crit_exit_quick(td); 766 767 return (error); 768 } 769 770 /* 771 * Interlocked spinlock sleep. An exclusively held spinlock must 772 * be passed to ssleep(). The function will atomically release the 773 * spinlock and tsleep on the ident, then reacquire the spinlock and 774 * return. 775 * 776 * This routine is fairly important along the critical path, so optimize it 777 * heavily. 778 */ 779 int 780 ssleep(const volatile void *ident, struct spinlock *spin, int flags, 781 const char *wmesg, int timo) 782 { 783 globaldata_t gd = mycpu; 784 int error; 785 786 _tsleep_interlock(gd, ident, flags); 787 spin_unlock_quick(gd, spin); 788 error = tsleep(ident, flags | PINTERLOCKED, wmesg, timo); 789 KKASSERT(gd == mycpu); 790 _spin_lock_quick(gd, spin, wmesg); 791 792 return (error); 793 } 794 795 int 796 lksleep(const volatile void *ident, struct lock *lock, int flags, 797 const char *wmesg, int timo) 798 { 799 globaldata_t gd = mycpu; 800 int error; 801 802 _tsleep_interlock(gd, ident, flags); 803 lockmgr(lock, LK_RELEASE); 804 error = tsleep(ident, flags | PINTERLOCKED, wmesg, timo); 805 lockmgr(lock, LK_EXCLUSIVE); 806 807 return (error); 808 } 809 810 /* 811 * Interlocked mutex sleep. An exclusively held mutex must be passed 812 * to mtxsleep(). The function will atomically release the mutex 813 * and tsleep on the ident, then reacquire the mutex and return. 814 */ 815 int 816 mtxsleep(const volatile void *ident, struct mtx *mtx, int flags, 817 const char *wmesg, int timo) 818 { 819 globaldata_t gd = mycpu; 820 int error; 821 822 _tsleep_interlock(gd, ident, flags); 823 mtx_unlock(mtx); 824 error = tsleep(ident, flags | PINTERLOCKED, wmesg, timo); 825 mtx_lock_ex_quick(mtx); 826 827 return (error); 828 } 829 830 /* 831 * Interlocked serializer sleep. An exclusively held serializer must 832 * be passed to zsleep(). The function will atomically release 833 * the serializer and tsleep on the ident, then reacquire the serializer 834 * and return. 835 */ 836 int 837 zsleep(const volatile void *ident, struct lwkt_serialize *slz, int flags, 838 const char *wmesg, int timo) 839 { 840 globaldata_t gd = mycpu; 841 int ret; 842 843 ASSERT_SERIALIZED(slz); 844 845 _tsleep_interlock(gd, ident, flags); 846 lwkt_serialize_exit(slz); 847 ret = tsleep(ident, flags | PINTERLOCKED, wmesg, timo); 848 lwkt_serialize_enter(slz); 849 850 return ret; 851 } 852 853 /* 854 * Directly block on the LWKT thread by descheduling it. This 855 * is much faster then tsleep(), but the only legal way to wake 856 * us up is to directly schedule the thread. 857 * 858 * Setting TDF_SINTR will cause new signals to directly schedule us. 859 * 860 * This routine must be called while in a critical section. 861 */ 862 int 863 lwkt_sleep(const char *wmesg, int flags) 864 { 865 thread_t td = curthread; 866 int sig; 867 868 if ((flags & PCATCH) == 0 || td->td_lwp == NULL) { 869 td->td_flags |= TDF_BLOCKED; 870 td->td_wmesg = wmesg; 871 lwkt_deschedule_self(td); 872 lwkt_switch(); 873 td->td_wmesg = NULL; 874 td->td_flags &= ~TDF_BLOCKED; 875 return(0); 876 } 877 if ((sig = CURSIG(td->td_lwp)) != 0) { 878 if (SIGISMEMBER(td->td_proc->p_sigacts->ps_sigintr, sig)) 879 return(EINTR); 880 else 881 return(ERESTART); 882 883 } 884 td->td_flags |= TDF_BLOCKED | TDF_SINTR; 885 td->td_wmesg = wmesg; 886 lwkt_deschedule_self(td); 887 lwkt_switch(); 888 td->td_flags &= ~(TDF_BLOCKED | TDF_SINTR); 889 td->td_wmesg = NULL; 890 return(0); 891 } 892 893 /* 894 * Implement the timeout for tsleep. 895 * 896 * This type of callout timeout is scheduled on the same cpu the process 897 * is sleeping on. Also, at the moment, the MP lock is held. 898 */ 899 static void 900 endtsleep(void *arg) 901 { 902 thread_t td = arg; 903 struct lwp *lp; 904 905 /* 906 * We are going to have to get the lwp_token, which means we might 907 * block. This can race a tsleep getting woken up by other means 908 * so set TDF_TIMEOUT_RUNNING to force the tsleep to wait for our 909 * processing to complete (sorry tsleep!). 910 * 911 * We can safely set td_flags because td MUST be on the same cpu 912 * as we are. 913 */ 914 KKASSERT(td->td_gd == mycpu); 915 crit_enter(); 916 td->td_flags |= TDF_TIMEOUT_RUNNING | TDF_TIMEOUT; 917 918 /* 919 * This can block but TDF_TIMEOUT_RUNNING will prevent the thread 920 * from exiting the tsleep on us. The flag is interlocked by virtue 921 * of lp being on the same cpu as we are. 922 */ 923 if ((lp = td->td_lwp) != NULL) 924 lwkt_gettoken(&lp->lwp_token); 925 926 KKASSERT(td->td_flags & TDF_TSLEEP_DESCHEDULED); 927 928 if (lp) { 929 /* 930 * callout timer should normally never be set in tstop() 931 * because it passes a timeout of 0. However, there is a 932 * case during thread exit (which SSTOP's all the threads) 933 * for which tstop() must break out and can (properly) leave 934 * the thread in LSSTOP. 935 */ 936 KKASSERT(lp->lwp_stat != LSSTOP || 937 (lp->lwp_mpflags & LWP_MP_WEXIT)); 938 setrunnable(lp); 939 lwkt_reltoken(&lp->lwp_token); 940 } else { 941 _tsleep_remove(td); 942 lwkt_schedule(td); 943 } 944 KKASSERT(td->td_gd == mycpu); 945 td->td_flags &= ~TDF_TIMEOUT_RUNNING; 946 crit_exit(); 947 } 948 949 /* 950 * Make all processes sleeping on the specified identifier runnable. 951 * count may be zero or one only. 952 * 953 * The domain encodes the sleep/wakeup domain, flags, plus the originating 954 * cpu. 955 * 956 * This call may run without the MP lock held. We can only manipulate thread 957 * state on the cpu owning the thread. We CANNOT manipulate process state 958 * at all. 959 * 960 * _wakeup() can be passed to an IPI so we can't use (const volatile 961 * void *ident). 962 */ 963 static void 964 _wakeup(void *ident, int domain) 965 { 966 struct tslpque *qp; 967 struct thread *td; 968 struct thread *ntd; 969 globaldata_t gd; 970 cpumask_t mask; 971 uint32_t cid; 972 uint32_t gid; 973 int wids = 0; 974 975 crit_enter(); 976 logtsleep2(wakeup_beg, ident); 977 gd = mycpu; 978 cid = LOOKUP(ident); 979 gid = TCHASHSHIFT(cid); 980 qp = &gd->gd_tsleep_hash[gid]; 981 restart: 982 for (td = TAILQ_FIRST(&qp->queue); td != NULL; td = ntd) { 983 ntd = TAILQ_NEXT(td, td_sleepq); 984 if (td->td_wchan == ident && 985 td->td_wdomain == (domain & PDOMAIN_MASK) 986 ) { 987 KKASSERT(td->td_gd == gd); 988 _tsleep_remove(td); 989 td->td_wakefromcpu = PWAKEUP_DECODE(domain); 990 if (td->td_flags & TDF_TSLEEP_DESCHEDULED) { 991 lwkt_schedule(td); 992 if (domain & PWAKEUP_ONE) 993 goto done; 994 } 995 goto restart; 996 } 997 if (td->td_wchan == qp->ident0) 998 wids |= 1; 999 else if (td->td_wchan == qp->ident1) 1000 wids |= 2; 1001 else if (td->td_wchan == qp->ident2) 1002 wids |= 4; 1003 else if (td->td_wchan == qp->ident3) 1004 wids |= 8; 1005 else 1006 wids |= 16; /* force ident0 to be retained (-1) */ 1007 } 1008 1009 /* 1010 * Because a bunch of cpumask array entries cover the same queue, it 1011 * is possible for our bit to remain set in some of them and cause 1012 * spurious wakeup IPIs later on. Make sure that the bit is cleared 1013 * when a spurious IPI occurs to prevent further spurious IPIs. 1014 */ 1015 if (TAILQ_FIRST(&qp->queue) == NULL) { 1016 ATOMIC_CPUMASK_NANDBIT(slpque_cpumasks[cid], gd->gd_cpuid); 1017 qp->ident0 = NULL; 1018 qp->ident1 = NULL; 1019 qp->ident2 = NULL; 1020 qp->ident3 = NULL; 1021 } else { 1022 if ((wids & 1) == 0) { 1023 if ((wids & 16) == 0) { 1024 qp->ident0 = NULL; 1025 } else { 1026 KKASSERT(qp->ident0 == (void *)(intptr_t)-1); 1027 } 1028 } 1029 if ((wids & 2) == 0) 1030 qp->ident1 = NULL; 1031 if ((wids & 4) == 0) 1032 qp->ident2 = NULL; 1033 if ((wids & 8) == 0) 1034 qp->ident3 = NULL; 1035 } 1036 1037 /* 1038 * We finished checking the current cpu but there still may be 1039 * more work to do. Either wakeup_one was requested and no matching 1040 * thread was found, or a normal wakeup was requested and we have 1041 * to continue checking cpus. 1042 * 1043 * It should be noted that this scheme is actually less expensive then 1044 * the old scheme when waking up multiple threads, since we send 1045 * only one IPI message per target candidate which may then schedule 1046 * multiple threads. Before we could have wound up sending an IPI 1047 * message for each thread on the target cpu (!= current cpu) that 1048 * needed to be woken up. 1049 * 1050 * NOTE: Wakeups occuring on remote cpus are asynchronous. This 1051 * should be ok since we are passing idents in the IPI rather 1052 * then thread pointers. 1053 * 1054 * NOTE: We MUST mfence (or use an atomic op) prior to reading 1055 * the cpumask, as another cpu may have written to it in 1056 * a fashion interlocked with whatever the caller did before 1057 * calling wakeup(). Otherwise we might miss the interaction 1058 * (kern_mutex.c can cause this problem). 1059 * 1060 * lfence is insufficient as it may allow a written state to 1061 * reorder around the cpumask load. 1062 */ 1063 if ((domain & PWAKEUP_MYCPU) == 0) { 1064 globaldata_t tgd; 1065 const volatile void *id0; 1066 int n; 1067 1068 cpu_mfence(); 1069 /* cpu_lfence(); */ 1070 mask = slpque_cpumasks[cid]; 1071 CPUMASK_ANDMASK(mask, gd->gd_other_cpus); 1072 while (CPUMASK_TESTNZERO(mask)) { 1073 n = BSRCPUMASK(mask); 1074 CPUMASK_NANDBIT(mask, n); 1075 tgd = globaldata_find(n); 1076 1077 /* 1078 * Both ident0 compares must from a single load 1079 * to avoid ident0 update races crossing the two 1080 * compares. 1081 */ 1082 qp = &tgd->gd_tsleep_hash[gid]; 1083 id0 = qp->ident0; 1084 cpu_ccfence(); 1085 if (id0 == (void *)(intptr_t)-1) { 1086 lwkt_send_ipiq2(tgd, _wakeup, ident, 1087 domain | PWAKEUP_MYCPU); 1088 ++tgd->gd_cnt.v_wakeup_colls; 1089 } else if (id0 == ident || 1090 qp->ident1 == ident || 1091 qp->ident2 == ident || 1092 qp->ident3 == ident) { 1093 lwkt_send_ipiq2(tgd, _wakeup, ident, 1094 domain | PWAKEUP_MYCPU); 1095 } 1096 } 1097 #if 0 1098 if (CPUMASK_TESTNZERO(mask)) { 1099 lwkt_send_ipiq2_mask(mask, _wakeup, ident, 1100 domain | PWAKEUP_MYCPU); 1101 } 1102 #endif 1103 } 1104 done: 1105 logtsleep1(wakeup_end); 1106 crit_exit(); 1107 } 1108 1109 /* 1110 * Wakeup all threads tsleep()ing on the specified ident, on all cpus 1111 */ 1112 void 1113 wakeup(const volatile void *ident) 1114 { 1115 globaldata_t gd = mycpu; 1116 thread_t td = gd->gd_curthread; 1117 1118 if (td && (td->td_flags & TDF_DELAYED_WAKEUP)) { 1119 /* 1120 * If we are in a delayed wakeup section, record up to two wakeups in 1121 * a per-CPU queue and issue them when we block or exit the delayed 1122 * wakeup section. 1123 */ 1124 if (atomic_cmpset_ptr(&gd->gd_delayed_wakeup[0], NULL, ident)) 1125 return; 1126 if (atomic_cmpset_ptr(&gd->gd_delayed_wakeup[1], NULL, ident)) 1127 return; 1128 1129 ident = atomic_swap_ptr(__DEQUALIFY(volatile void **, &gd->gd_delayed_wakeup[1]), 1130 __DEALL(ident)); 1131 ident = atomic_swap_ptr(__DEQUALIFY(volatile void **, &gd->gd_delayed_wakeup[0]), 1132 __DEALL(ident)); 1133 } 1134 1135 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, gd->gd_cpuid)); 1136 } 1137 1138 /* 1139 * Wakeup one thread tsleep()ing on the specified ident, on any cpu. 1140 */ 1141 void 1142 wakeup_one(const volatile void *ident) 1143 { 1144 /* XXX potentially round-robin the first responding cpu */ 1145 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mycpu->gd_cpuid) | 1146 PWAKEUP_ONE); 1147 } 1148 1149 /* 1150 * Wakeup threads tsleep()ing on the specified ident on the current cpu 1151 * only. 1152 */ 1153 void 1154 wakeup_mycpu(const volatile void *ident) 1155 { 1156 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mycpu->gd_cpuid) | 1157 PWAKEUP_MYCPU); 1158 } 1159 1160 /* 1161 * Wakeup one thread tsleep()ing on the specified ident on the current cpu 1162 * only. 1163 */ 1164 void 1165 wakeup_mycpu_one(const volatile void *ident) 1166 { 1167 /* XXX potentially round-robin the first responding cpu */ 1168 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mycpu->gd_cpuid) | 1169 PWAKEUP_MYCPU | PWAKEUP_ONE); 1170 } 1171 1172 /* 1173 * Wakeup all thread tsleep()ing on the specified ident on the specified cpu 1174 * only. 1175 */ 1176 void 1177 wakeup_oncpu(globaldata_t gd, const volatile void *ident) 1178 { 1179 globaldata_t mygd = mycpu; 1180 if (gd == mycpu) { 1181 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mygd->gd_cpuid) | 1182 PWAKEUP_MYCPU); 1183 } else { 1184 lwkt_send_ipiq2(gd, _wakeup, __DEALL(ident), 1185 PWAKEUP_ENCODE(0, mygd->gd_cpuid) | 1186 PWAKEUP_MYCPU); 1187 } 1188 } 1189 1190 /* 1191 * Wakeup one thread tsleep()ing on the specified ident on the specified cpu 1192 * only. 1193 */ 1194 void 1195 wakeup_oncpu_one(globaldata_t gd, const volatile void *ident) 1196 { 1197 globaldata_t mygd = mycpu; 1198 if (gd == mygd) { 1199 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mygd->gd_cpuid) | 1200 PWAKEUP_MYCPU | PWAKEUP_ONE); 1201 } else { 1202 lwkt_send_ipiq2(gd, _wakeup, __DEALL(ident), 1203 PWAKEUP_ENCODE(0, mygd->gd_cpuid) | 1204 PWAKEUP_MYCPU | PWAKEUP_ONE); 1205 } 1206 } 1207 1208 /* 1209 * Wakeup all threads waiting on the specified ident that slept using 1210 * the specified domain, on all cpus. 1211 */ 1212 void 1213 wakeup_domain(const volatile void *ident, int domain) 1214 { 1215 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(domain, mycpu->gd_cpuid)); 1216 } 1217 1218 /* 1219 * Wakeup one thread waiting on the specified ident that slept using 1220 * the specified domain, on any cpu. 1221 */ 1222 void 1223 wakeup_domain_one(const volatile void *ident, int domain) 1224 { 1225 /* XXX potentially round-robin the first responding cpu */ 1226 _wakeup(__DEALL(ident), 1227 PWAKEUP_ENCODE(domain, mycpu->gd_cpuid) | PWAKEUP_ONE); 1228 } 1229 1230 void 1231 wakeup_start_delayed(void) 1232 { 1233 globaldata_t gd = mycpu; 1234 1235 crit_enter(); 1236 gd->gd_curthread->td_flags |= TDF_DELAYED_WAKEUP; 1237 crit_exit(); 1238 } 1239 1240 void 1241 wakeup_end_delayed(void) 1242 { 1243 globaldata_t gd = mycpu; 1244 1245 if (gd->gd_curthread->td_flags & TDF_DELAYED_WAKEUP) { 1246 crit_enter(); 1247 gd->gd_curthread->td_flags &= ~TDF_DELAYED_WAKEUP; 1248 if (gd->gd_delayed_wakeup[0] || gd->gd_delayed_wakeup[1]) { 1249 if (gd->gd_delayed_wakeup[0]) { 1250 wakeup(gd->gd_delayed_wakeup[0]); 1251 gd->gd_delayed_wakeup[0] = NULL; 1252 } 1253 if (gd->gd_delayed_wakeup[1]) { 1254 wakeup(gd->gd_delayed_wakeup[1]); 1255 gd->gd_delayed_wakeup[1] = NULL; 1256 } 1257 } 1258 crit_exit(); 1259 } 1260 } 1261 1262 /* 1263 * setrunnable() 1264 * 1265 * Make a process runnable. lp->lwp_token must be held on call and this 1266 * function must be called from the cpu owning lp. 1267 * 1268 * This only has an effect if we are in LSSTOP or LSSLEEP. 1269 */ 1270 void 1271 setrunnable(struct lwp *lp) 1272 { 1273 thread_t td = lp->lwp_thread; 1274 1275 ASSERT_LWKT_TOKEN_HELD(&lp->lwp_token); 1276 KKASSERT(td->td_gd == mycpu); 1277 crit_enter(); 1278 if (lp->lwp_stat == LSSTOP) 1279 lp->lwp_stat = LSSLEEP; 1280 if (lp->lwp_stat == LSSLEEP) { 1281 _tsleep_remove(td); 1282 lwkt_schedule(td); 1283 } else if (td->td_flags & TDF_SINTR) { 1284 lwkt_schedule(td); 1285 } 1286 crit_exit(); 1287 } 1288 1289 /* 1290 * The process is stopped due to some condition, usually because p_stat is 1291 * set to SSTOP, but also possibly due to being traced. 1292 * 1293 * Caller must hold p->p_token 1294 * 1295 * NOTE! If the caller sets SSTOP, the caller must also clear P_WAITED 1296 * because the parent may check the child's status before the child actually 1297 * gets to this routine. 1298 * 1299 * This routine is called with the current lwp only, typically just 1300 * before returning to userland if the process state is detected as 1301 * possibly being in a stopped state. 1302 */ 1303 void 1304 tstop(void) 1305 { 1306 struct lwp *lp = curthread->td_lwp; 1307 struct proc *p = lp->lwp_proc; 1308 struct proc *q; 1309 1310 lwkt_gettoken(&lp->lwp_token); 1311 crit_enter(); 1312 1313 /* 1314 * If LWP_MP_WSTOP is set, we were sleeping 1315 * while our process was stopped. At this point 1316 * we were already counted as stopped. 1317 */ 1318 if ((lp->lwp_mpflags & LWP_MP_WSTOP) == 0) { 1319 /* 1320 * If we're the last thread to stop, signal 1321 * our parent. 1322 */ 1323 p->p_nstopped++; 1324 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WSTOP); 1325 wakeup(&p->p_nstopped); 1326 if (p->p_nstopped == p->p_nthreads) { 1327 /* 1328 * Token required to interlock kern_wait() 1329 */ 1330 q = p->p_pptr; 1331 PHOLD(q); 1332 lwkt_gettoken(&q->p_token); 1333 p->p_flags &= ~P_WAITED; 1334 wakeup(p->p_pptr); 1335 if ((q->p_sigacts->ps_flag & PS_NOCLDSTOP) == 0) 1336 ksignal(q, SIGCHLD); 1337 lwkt_reltoken(&q->p_token); 1338 PRELE(q); 1339 } 1340 } 1341 1342 /* 1343 * Wait here while in a stopped state, interlocked with lwp_token. 1344 * We must break-out if the whole process is trying to exit. 1345 */ 1346 while (STOPLWP(p, lp)) { 1347 lp->lwp_stat = LSSTOP; 1348 tsleep(p, 0, "stop", 0); 1349 } 1350 p->p_nstopped--; 1351 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_WSTOP); 1352 crit_exit(); 1353 lwkt_reltoken(&lp->lwp_token); 1354 } 1355 1356 /* 1357 * Compute a tenex style load average of a quantity on 1358 * 1, 5 and 15 minute intervals. This is a pcpu callout. 1359 * 1360 * We segment the lwp scan on a pcpu basis. This does NOT 1361 * mean the associated lwps are on this cpu, it is done 1362 * just to break the work up. 1363 * 1364 * The callout on cpu0 rolls up the stats from the other 1365 * cpus. 1366 */ 1367 static int loadav_count_runnable(struct lwp *p, void *data); 1368 1369 static void 1370 loadav(void *arg) 1371 { 1372 globaldata_t gd = mycpu; 1373 struct loadavg *avg; 1374 int i, nrun; 1375 1376 nrun = 0; 1377 alllwp_scan(loadav_count_runnable, &nrun, 1); 1378 gd->gd_loadav_nrunnable = nrun; 1379 if (gd->gd_cpuid == 0) { 1380 avg = &averunnable; 1381 nrun = 0; 1382 for (i = 0; i < ncpus; ++i) 1383 nrun += globaldata_find(i)->gd_loadav_nrunnable; 1384 for (i = 0; i < 3; i++) { 1385 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] + 1386 (long)nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT; 1387 } 1388 } 1389 1390 /* 1391 * Schedule the next update to occur after 5 seconds, but add a 1392 * random variation to avoid synchronisation with processes that 1393 * run at regular intervals. 1394 */ 1395 callout_reset(&gd->gd_loadav_callout, 1396 hz * 4 + (int)(krandom() % (hz * 2 + 1)), 1397 loadav, NULL); 1398 } 1399 1400 static int 1401 loadav_count_runnable(struct lwp *lp, void *data) 1402 { 1403 int *nrunp = data; 1404 thread_t td; 1405 1406 switch (lp->lwp_stat) { 1407 case LSRUN: 1408 if ((td = lp->lwp_thread) == NULL) 1409 break; 1410 if (td->td_flags & TDF_BLOCKED) 1411 break; 1412 ++*nrunp; 1413 break; 1414 default: 1415 break; 1416 } 1417 lwkt_yield(); 1418 return(0); 1419 } 1420 1421 /* 1422 * Regular data collection 1423 */ 1424 static uint64_t 1425 collect_load_callback(int n) 1426 { 1427 int fscale = averunnable.fscale; 1428 1429 return ((averunnable.ldavg[0] * 100 + (fscale >> 1)) / fscale); 1430 } 1431 1432 static void 1433 sched_setup(void *dummy __unused) 1434 { 1435 globaldata_t save_gd = mycpu; 1436 globaldata_t gd; 1437 int n; 1438 1439 kcollect_register(KCOLLECT_LOAD, "load", collect_load_callback, 1440 KCOLLECT_SCALE(KCOLLECT_LOAD_FORMAT, 0)); 1441 1442 /* 1443 * Kick off timeout driven events by calling first time. We 1444 * split the work across available cpus to help scale it, 1445 * it can eat a lot of cpu when there are a lot of processes 1446 * on the system. 1447 */ 1448 for (n = 0; n < ncpus; ++n) { 1449 gd = globaldata_find(n); 1450 lwkt_setcpu_self(gd); 1451 callout_init_mp(&gd->gd_loadav_callout); 1452 callout_init_mp(&gd->gd_schedcpu_callout); 1453 schedcpu(NULL); 1454 loadav(NULL); 1455 } 1456 lwkt_setcpu_self(save_gd); 1457 } 1458 1459 /* 1460 * Extremely early initialization, dummy-up the tables so we don't have 1461 * to conditionalize for NULL in _wakeup() and tsleep_interlock(). Even 1462 * though the system isn't blocking this early, these functions still 1463 * try to access the hash table. 1464 * 1465 * This setup will be overridden once sched_dyninit() -> sleep_gdinit() 1466 * is called. 1467 */ 1468 void 1469 sleep_early_gdinit(globaldata_t gd) 1470 { 1471 static struct tslpque dummy_slpque; 1472 static cpumask_t dummy_cpumasks; 1473 1474 slpque_tablesize = 1; 1475 gd->gd_tsleep_hash = &dummy_slpque; 1476 slpque_cpumasks = &dummy_cpumasks; 1477 TAILQ_INIT(&dummy_slpque.queue); 1478 } 1479 1480 /* 1481 * PCPU initialization. Called after KMALLOC is operational, by 1482 * sched_dyninit() for cpu 0, and by mi_gdinit() for other cpus later. 1483 * 1484 * WARNING! The pcpu hash table is smaller than the global cpumask 1485 * hash table, which can save us a lot of memory when maxproc 1486 * is set high. 1487 */ 1488 void 1489 sleep_gdinit(globaldata_t gd) 1490 { 1491 struct thread *td; 1492 size_t hash_size; 1493 uint32_t n; 1494 uint32_t i; 1495 1496 /* 1497 * This shouldn't happen, that is there shouldn't be any threads 1498 * waiting on the dummy tsleep queue this early in the boot. 1499 */ 1500 if (gd->gd_cpuid == 0) { 1501 struct tslpque *qp = &gd->gd_tsleep_hash[0]; 1502 TAILQ_FOREACH(td, &qp->queue, td_sleepq) { 1503 kprintf("SLEEP_GDINIT SWITCH %s\n", td->td_comm); 1504 } 1505 } 1506 1507 /* 1508 * Note that we have to allocate one extra slot because we are 1509 * shifting a modulo value. TCHASHSHIFT(slpque_tablesize - 1) can 1510 * return the same value as TCHASHSHIFT(slpque_tablesize). 1511 */ 1512 n = TCHASHSHIFT(slpque_tablesize) + 1; 1513 1514 hash_size = sizeof(struct tslpque) * n; 1515 gd->gd_tsleep_hash = (void *)kmem_alloc3(&kernel_map, hash_size, 1516 VM_SUBSYS_GD, 1517 KM_CPU(gd->gd_cpuid)); 1518 memset(gd->gd_tsleep_hash, 0, hash_size); 1519 for (i = 0; i < n; ++i) 1520 TAILQ_INIT(&gd->gd_tsleep_hash[i].queue); 1521 } 1522 1523 /* 1524 * Dynamic initialization after the memory system is operational. 1525 */ 1526 static void 1527 sched_dyninit(void *dummy __unused) 1528 { 1529 int tblsize; 1530 int tblsize2; 1531 int n; 1532 1533 /* 1534 * Calculate table size for slpque hash. We want a prime number 1535 * large enough to avoid overloading slpque_cpumasks when the 1536 * system has a large number of sleeping processes, which will 1537 * spam IPIs on wakeup(). 1538 * 1539 * While it is true this is really a per-lwp factor, generally 1540 * speaking the maxproc limit is a good metric to go by. 1541 */ 1542 for (tblsize = maxproc | 1; ; tblsize += 2) { 1543 if (tblsize % 3 == 0) 1544 continue; 1545 if (tblsize % 5 == 0) 1546 continue; 1547 tblsize2 = (tblsize / 2) | 1; 1548 for (n = 7; n < tblsize2; n += 2) { 1549 if (tblsize % n == 0) 1550 break; 1551 } 1552 if (n == tblsize2) 1553 break; 1554 } 1555 1556 /* 1557 * PIDs are currently limited to 6 digits. Cap the table size 1558 * at double this. 1559 */ 1560 if (tblsize > 2000003) 1561 tblsize = 2000003; 1562 1563 slpque_tablesize = tblsize; 1564 slpque_cpumasks = kmalloc(sizeof(*slpque_cpumasks) * slpque_tablesize, 1565 M_TSLEEP, M_WAITOK | M_ZERO); 1566 sleep_gdinit(mycpu); 1567 } 1568