1 /*- 2 * Copyright (c) 1982, 1986, 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 35 * $FreeBSD: src/sys/kern/kern_synch.c,v 1.87.2.6 2002/10/13 07:29:53 kbyanc Exp $ 36 */ 37 38 #include "opt_ktrace.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/proc.h> 43 #include <sys/kernel.h> 44 #include <sys/signalvar.h> 45 #include <sys/resourcevar.h> 46 #include <sys/vmmeter.h> 47 #include <sys/sysctl.h> 48 #include <sys/lock.h> 49 #include <sys/priv.h> 50 #include <sys/kcollect.h> 51 #include <sys/malloc.h> 52 #ifdef KTRACE 53 #include <sys/ktrace.h> 54 #endif 55 #include <sys/ktr.h> 56 #include <sys/serialize.h> 57 58 #include <sys/signal2.h> 59 #include <sys/thread2.h> 60 #include <sys/spinlock2.h> 61 #include <sys/mutex2.h> 62 63 #include <machine/cpu.h> 64 #include <machine/smp.h> 65 66 #include <vm/vm_extern.h> 67 68 struct tslpque { 69 TAILQ_HEAD(, thread) queue; 70 const volatile void *ident0; 71 const volatile void *ident1; 72 const volatile void *ident2; 73 const volatile void *ident3; 74 }; 75 76 static void sched_setup (void *dummy); 77 SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL); 78 static void sched_dyninit (void *dummy); 79 SYSINIT(sched_dyninit, SI_BOOT1_DYNALLOC, SI_ORDER_FIRST, sched_dyninit, NULL); 80 81 int lbolt; 82 void *lbolt_syncer; 83 __read_mostly int tsleep_crypto_dump = 0; 84 __read_mostly int ncpus; 85 __read_mostly int ncpus_fit, ncpus_fit_mask; /* note: mask not cpumask_t */ 86 __read_mostly int safepri; 87 __read_mostly int tsleep_now_works; 88 89 MALLOC_DEFINE(M_TSLEEP, "tslpque", "tsleep queues"); 90 91 #define __DEALL(ident) __DEQUALIFY(void *, ident) 92 93 #if !defined(KTR_TSLEEP) 94 #define KTR_TSLEEP KTR_ALL 95 #endif 96 KTR_INFO_MASTER(tsleep); 97 KTR_INFO(KTR_TSLEEP, tsleep, tsleep_beg, 0, "tsleep enter %p", const volatile void *ident); 98 KTR_INFO(KTR_TSLEEP, tsleep, tsleep_end, 1, "tsleep exit"); 99 KTR_INFO(KTR_TSLEEP, tsleep, wakeup_beg, 2, "wakeup enter %p", const volatile void *ident); 100 KTR_INFO(KTR_TSLEEP, tsleep, wakeup_end, 3, "wakeup exit"); 101 KTR_INFO(KTR_TSLEEP, tsleep, ilockfail, 4, "interlock failed %p", const volatile void *ident); 102 103 #define logtsleep1(name) KTR_LOG(tsleep_ ## name) 104 #define logtsleep2(name, val) KTR_LOG(tsleep_ ## name, val) 105 106 __exclusive_cache_line 107 struct loadavg averunnable = 108 { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */ 109 /* 110 * Constants for averages over 1, 5, and 15 minutes 111 * when sampling at 5 second intervals. 112 */ 113 __read_mostly 114 static fixpt_t cexp[3] = { 115 0.9200444146293232 * FSCALE, /* exp(-1/12) */ 116 0.9834714538216174 * FSCALE, /* exp(-1/60) */ 117 0.9944598480048967 * FSCALE, /* exp(-1/180) */ 118 }; 119 120 static void endtsleep (void *); 121 static void loadav (void *arg); 122 static void schedcpu (void *arg); 123 124 __read_mostly static int pctcpu_decay = 10; 125 SYSCTL_INT(_kern, OID_AUTO, pctcpu_decay, CTLFLAG_RW, 126 &pctcpu_decay, 0, ""); 127 128 /* 129 * kernel uses `FSCALE', userland (SHOULD) use kern.fscale 130 */ 131 __read_mostly int fscale __unused = FSCALE; /* exported to systat */ 132 SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, ""); 133 134 /* 135 * Issue a wakeup() from userland (debugging) 136 */ 137 static int 138 sysctl_wakeup(SYSCTL_HANDLER_ARGS) 139 { 140 uint64_t ident = 1; 141 int error = 0; 142 143 if (req->newptr != NULL) { 144 if (priv_check(curthread, PRIV_ROOT)) 145 return (EPERM); 146 error = SYSCTL_IN(req, &ident, sizeof(ident)); 147 if (error) 148 return error; 149 kprintf("issue wakeup %016jx\n", ident); 150 wakeup((void *)(intptr_t)ident); 151 } 152 if (req->oldptr != NULL) { 153 error = SYSCTL_OUT(req, &ident, sizeof(ident)); 154 } 155 return error; 156 } 157 158 static int 159 sysctl_wakeup_umtx(SYSCTL_HANDLER_ARGS) 160 { 161 uint64_t ident = 1; 162 int error = 0; 163 164 if (req->newptr != NULL) { 165 if (priv_check(curthread, PRIV_ROOT)) 166 return (EPERM); 167 error = SYSCTL_IN(req, &ident, sizeof(ident)); 168 if (error) 169 return error; 170 kprintf("issue wakeup %016jx, PDOMAIN_UMTX\n", ident); 171 wakeup_domain((void *)(intptr_t)ident, PDOMAIN_UMTX); 172 } 173 if (req->oldptr != NULL) { 174 error = SYSCTL_OUT(req, &ident, sizeof(ident)); 175 } 176 return error; 177 } 178 179 SYSCTL_PROC(_debug, OID_AUTO, wakeup, CTLTYPE_UQUAD|CTLFLAG_RW, 0, 0, 180 sysctl_wakeup, "Q", "issue wakeup(addr)"); 181 SYSCTL_PROC(_debug, OID_AUTO, wakeup_umtx, CTLTYPE_UQUAD|CTLFLAG_RW, 0, 0, 182 sysctl_wakeup_umtx, "Q", "issue wakeup(addr, PDOMAIN_UMTX)"); 183 184 /* 185 * Recompute process priorities, once a second. 186 * 187 * Since the userland schedulers are typically event oriented, if the 188 * estcpu calculation at wakeup() time is not sufficient to make a 189 * process runnable relative to other processes in the system we have 190 * a 1-second recalc to help out. 191 * 192 * This code also allows us to store sysclock_t data in the process structure 193 * without fear of an overrun, since sysclock_t are guarenteed to hold 194 * several seconds worth of count. 195 * 196 * WARNING! callouts can preempt normal threads. However, they will not 197 * preempt a thread holding a spinlock so we *can* safely use spinlocks. 198 */ 199 static int schedcpu_stats(struct proc *p, void *data __unused); 200 static int schedcpu_resource(struct proc *p, void *data __unused); 201 202 static void 203 schedcpu(void *arg) 204 { 205 allproc_scan(schedcpu_stats, NULL, 1); 206 allproc_scan(schedcpu_resource, NULL, 1); 207 if (mycpu->gd_cpuid == 0) { 208 wakeup((caddr_t)&lbolt); 209 wakeup(lbolt_syncer); 210 } 211 callout_reset(&mycpu->gd_schedcpu_callout, hz, schedcpu, NULL); 212 } 213 214 /* 215 * General process statistics once a second 216 */ 217 static int 218 schedcpu_stats(struct proc *p, void *data __unused) 219 { 220 struct lwp *lp; 221 222 /* 223 * Threads may not be completely set up if process in SIDL state. 224 */ 225 if (p->p_stat == SIDL) 226 return(0); 227 228 PHOLD(p); 229 if (lwkt_trytoken(&p->p_token) == FALSE) { 230 PRELE(p); 231 return(0); 232 } 233 234 p->p_swtime++; 235 FOREACH_LWP_IN_PROC(lp, p) { 236 if (lp->lwp_stat == LSSLEEP) { 237 ++lp->lwp_slptime; 238 if (lp->lwp_slptime == 1) 239 p->p_usched->uload_update(lp); 240 } 241 242 /* 243 * Only recalculate processes that are active or have slept 244 * less then 2 seconds. The schedulers understand this. 245 * Otherwise decay by 50% per second. 246 * 247 * NOTE: uload_update is called separately from kern_synch.c 248 * when slptime == 1, removing the thread's 249 * uload/ucount. 250 */ 251 if (lp->lwp_slptime <= 1) { 252 p->p_usched->recalculate(lp); 253 } else { 254 int decay; 255 256 decay = pctcpu_decay; 257 cpu_ccfence(); 258 if (decay <= 1) 259 decay = 1; 260 if (decay > 100) 261 decay = 100; 262 lp->lwp_pctcpu = (lp->lwp_pctcpu * (decay - 1)) / decay; 263 } 264 } 265 lwkt_reltoken(&p->p_token); 266 lwkt_yield(); 267 PRELE(p); 268 return(0); 269 } 270 271 /* 272 * Resource checks. XXX break out since ksignal/killproc can block, 273 * limiting us to one process killed per second. There is probably 274 * a better way. 275 */ 276 static int 277 schedcpu_resource(struct proc *p, void *data __unused) 278 { 279 u_int64_t ttime; 280 struct lwp *lp; 281 282 if (p->p_stat == SIDL) 283 return(0); 284 285 PHOLD(p); 286 if (lwkt_trytoken(&p->p_token) == FALSE) { 287 PRELE(p); 288 return(0); 289 } 290 291 if (p->p_stat == SZOMB || p->p_limit == NULL) { 292 lwkt_reltoken(&p->p_token); 293 PRELE(p); 294 return(0); 295 } 296 297 ttime = 0; 298 FOREACH_LWP_IN_PROC(lp, p) { 299 /* 300 * We may have caught an lp in the middle of being 301 * created, lwp_thread can be NULL. 302 */ 303 if (lp->lwp_thread) { 304 ttime += lp->lwp_thread->td_sticks; 305 ttime += lp->lwp_thread->td_uticks; 306 } 307 } 308 309 switch(plimit_testcpulimit(p, ttime)) { 310 case PLIMIT_TESTCPU_KILL: 311 killproc(p, "exceeded maximum CPU limit"); 312 break; 313 case PLIMIT_TESTCPU_XCPU: 314 if ((p->p_flags & P_XCPU) == 0) { 315 p->p_flags |= P_XCPU; 316 ksignal(p, SIGXCPU); 317 } 318 break; 319 default: 320 break; 321 } 322 lwkt_reltoken(&p->p_token); 323 lwkt_yield(); 324 PRELE(p); 325 return(0); 326 } 327 328 /* 329 * This is only used by ps. Generate a cpu percentage use over 330 * a period of one second. 331 */ 332 void 333 updatepcpu(struct lwp *lp, int cpticks, int ttlticks) 334 { 335 fixpt_t acc; 336 int remticks; 337 338 acc = (cpticks << FSHIFT) / ttlticks; 339 if (ttlticks >= ESTCPUFREQ) { 340 lp->lwp_pctcpu = acc; 341 } else { 342 remticks = ESTCPUFREQ - ttlticks; 343 lp->lwp_pctcpu = (acc * ttlticks + lp->lwp_pctcpu * remticks) / 344 ESTCPUFREQ; 345 } 346 } 347 348 /* 349 * Handy macros to calculate hash indices. LOOKUP() calculates the 350 * global cpumask hash index, TCHASHSHIFT() converts that into the 351 * pcpu hash index. 352 * 353 * By making the pcpu hash arrays smaller we save a significant amount 354 * of memory at very low cost. The real cost is in IPIs, which are handled 355 * by the much larger global cpumask hash table. 356 */ 357 #define LOOKUP_PRIME 66555444443333333ULL 358 #define LOOKUP(x) ((((uintptr_t)(x) + ((uintptr_t)(x) >> 18)) ^ \ 359 LOOKUP_PRIME) % slpque_tablesize) 360 #define TCHASHSHIFT(x) ((x) >> 4) 361 362 __read_mostly static uint32_t slpque_tablesize; 363 __read_mostly static cpumask_t *slpque_cpumasks; 364 365 SYSCTL_UINT(_kern, OID_AUTO, slpque_tablesize, CTLFLAG_RD, &slpque_tablesize, 366 0, ""); 367 368 /* 369 * This is a dandy function that allows us to interlock tsleep/wakeup 370 * operations with unspecified upper level locks, such as lockmgr locks, 371 * simply by holding a critical section. The sequence is: 372 * 373 * (acquire upper level lock) 374 * tsleep_interlock(blah) 375 * (release upper level lock) 376 * tsleep(blah, ...) 377 * 378 * Basically this functions queues us on the tsleep queue without actually 379 * descheduling us. When tsleep() is later called with PINTERLOCK it 380 * assumes the thread was already queued, otherwise it queues it there. 381 * 382 * Thus it is possible to receive the wakeup prior to going to sleep and 383 * the race conditions are covered. 384 */ 385 static __inline void 386 _tsleep_interlock(globaldata_t gd, const volatile void *ident, int flags) 387 { 388 thread_t td = gd->gd_curthread; 389 struct tslpque *qp; 390 uint32_t cid; 391 uint32_t gid; 392 393 if (ident == NULL) { 394 kprintf("tsleep_interlock: NULL ident %s\n", td->td_comm); 395 print_backtrace(5); 396 } 397 398 crit_enter_quick(td); 399 if (td->td_flags & TDF_TSLEEPQ) { 400 /* 401 * Shortcut if unchanged 402 */ 403 if (td->td_wchan == ident && 404 td->td_wdomain == (flags & PDOMAIN_MASK)) { 405 crit_exit_quick(td); 406 return; 407 } 408 409 /* 410 * Remove current sleepq 411 */ 412 cid = LOOKUP(td->td_wchan); 413 gid = TCHASHSHIFT(cid); 414 qp = &gd->gd_tsleep_hash[gid]; 415 TAILQ_REMOVE(&qp->queue, td, td_sleepq); 416 if (TAILQ_FIRST(&qp->queue) == NULL) { 417 qp->ident0 = NULL; 418 qp->ident1 = NULL; 419 qp->ident2 = NULL; 420 qp->ident3 = NULL; 421 ATOMIC_CPUMASK_NANDBIT(slpque_cpumasks[cid], 422 gd->gd_cpuid); 423 } 424 } else { 425 td->td_flags |= TDF_TSLEEPQ; 426 } 427 cid = LOOKUP(ident); 428 gid = TCHASHSHIFT(cid); 429 qp = &gd->gd_tsleep_hash[gid]; 430 TAILQ_INSERT_TAIL(&qp->queue, td, td_sleepq); 431 if (qp->ident0 != ident && qp->ident1 != ident && 432 qp->ident2 != ident && qp->ident3 != ident) { 433 if (qp->ident0 == NULL) 434 qp->ident0 = ident; 435 else if (qp->ident1 == NULL) 436 qp->ident1 = ident; 437 else if (qp->ident2 == NULL) 438 qp->ident2 = ident; 439 else if (qp->ident3 == NULL) 440 qp->ident3 = ident; 441 else 442 qp->ident0 = (void *)(intptr_t)-1; 443 } 444 ATOMIC_CPUMASK_ORBIT(slpque_cpumasks[cid], gd->gd_cpuid); 445 td->td_wchan = ident; 446 td->td_wdomain = flags & PDOMAIN_MASK; 447 crit_exit_quick(td); 448 } 449 450 void 451 tsleep_interlock(const volatile void *ident, int flags) 452 { 453 _tsleep_interlock(mycpu, ident, flags); 454 } 455 456 /* 457 * Remove thread from sleepq. Must be called with a critical section held. 458 * The thread must not be migrating. 459 */ 460 static __inline void 461 _tsleep_remove(thread_t td) 462 { 463 globaldata_t gd = mycpu; 464 struct tslpque *qp; 465 uint32_t cid; 466 uint32_t gid; 467 468 KKASSERT(td->td_gd == gd && IN_CRITICAL_SECT(td)); 469 KKASSERT((td->td_flags & TDF_MIGRATING) == 0); 470 if (td->td_flags & TDF_TSLEEPQ) { 471 td->td_flags &= ~TDF_TSLEEPQ; 472 cid = LOOKUP(td->td_wchan); 473 gid = TCHASHSHIFT(cid); 474 qp = &gd->gd_tsleep_hash[gid]; 475 TAILQ_REMOVE(&qp->queue, td, td_sleepq); 476 if (TAILQ_FIRST(&qp->queue) == NULL) { 477 ATOMIC_CPUMASK_NANDBIT(slpque_cpumasks[cid], 478 gd->gd_cpuid); 479 } 480 td->td_wchan = NULL; 481 td->td_wdomain = 0; 482 } 483 } 484 485 void 486 tsleep_remove(thread_t td) 487 { 488 _tsleep_remove(td); 489 } 490 491 /* 492 * General sleep call. Suspends the current process until a wakeup is 493 * performed on the specified identifier. The process will then be made 494 * runnable with the specified priority. Sleeps at most timo/hz seconds 495 * (0 means no timeout). If flags includes PCATCH flag, signals are checked 496 * before and after sleeping, else signals are not checked. Returns 0 if 497 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a 498 * signal needs to be delivered, ERESTART is returned if the current system 499 * call should be restarted if possible, and EINTR is returned if the system 500 * call should be interrupted by the signal (return EINTR). 501 * 502 * Note that if we are a process, we release_curproc() before messing with 503 * the LWKT scheduler. 504 * 505 * During autoconfiguration or after a panic, a sleep will simply 506 * lower the priority briefly to allow interrupts, then return. 507 * 508 * WARNING! This code can't block (short of switching away), or bad things 509 * will happen. No getting tokens, no blocking locks, etc. 510 */ 511 int 512 tsleep(const volatile void *ident, int flags, const char *wmesg, int timo) 513 { 514 struct thread *td = curthread; 515 struct lwp *lp = td->td_lwp; 516 struct proc *p = td->td_proc; /* may be NULL */ 517 globaldata_t gd; 518 int sig; 519 int catch; 520 int error; 521 int oldpri; 522 struct callout thandle; 523 524 /* 525 * Currently a severe hack. Make sure any delayed wakeups 526 * are flushed before we sleep or we might deadlock on whatever 527 * event we are sleeping on. 528 */ 529 if (td->td_flags & TDF_DELAYED_WAKEUP) 530 wakeup_end_delayed(); 531 532 /* 533 * NOTE: removed KTRPOINT, it could cause races due to blocking 534 * even in stable. Just scrap it for now. 535 */ 536 if (!tsleep_crypto_dump && (tsleep_now_works == 0 || panicstr)) { 537 /* 538 * After a panic, or before we actually have an operational 539 * softclock, just give interrupts a chance, then just return; 540 * 541 * don't run any other procs or panic below, 542 * in case this is the idle process and already asleep. 543 */ 544 splz(); 545 oldpri = td->td_pri; 546 lwkt_setpri_self(safepri); 547 lwkt_switch(); 548 lwkt_setpri_self(oldpri); 549 return (0); 550 } 551 logtsleep2(tsleep_beg, ident); 552 gd = td->td_gd; 553 KKASSERT(td != &gd->gd_idlethread); /* you must be kidding! */ 554 555 /* 556 * NOTE: all of this occurs on the current cpu, including any 557 * callout-based wakeups, so a critical section is a sufficient 558 * interlock. 559 * 560 * The entire sequence through to where we actually sleep must 561 * run without breaking the critical section. 562 */ 563 catch = flags & PCATCH; 564 error = 0; 565 sig = 0; 566 567 crit_enter_quick(td); 568 569 KASSERT(ident != NULL, ("tsleep: no ident")); 570 KASSERT(lp == NULL || 571 lp->lwp_stat == LSRUN || /* Obvious */ 572 lp->lwp_stat == LSSTOP, /* Set in tstop */ 573 ("tsleep %p %s %d", 574 ident, wmesg, lp->lwp_stat)); 575 576 /* 577 * We interlock the sleep queue if the caller has not already done 578 * it for us. This must be done before we potentially acquire any 579 * tokens or we can loose the wakeup. 580 */ 581 if ((flags & PINTERLOCKED) == 0) { 582 _tsleep_interlock(gd, ident, flags); 583 } 584 585 /* 586 * Setup for the current process (if this is a process). We must 587 * interlock with lwp_token to avoid remote wakeup races via 588 * setrunnable() 589 */ 590 if (lp) { 591 lwkt_gettoken(&lp->lwp_token); 592 593 /* 594 * If the umbrella process is in the SCORE state then 595 * make sure that the thread is flagged going into a 596 * normal sleep to allow the core dump to proceed, otherwise 597 * the coredump can end up waiting forever. If the normal 598 * sleep is woken up, the thread will enter a stopped state 599 * upon return to userland. 600 * 601 * We do not want to interrupt or cause a thread exist at 602 * this juncture because that will mess-up the state the 603 * coredump is trying to save. 604 */ 605 if (p->p_stat == SCORE) { 606 lwkt_gettoken(&p->p_token); 607 if ((lp->lwp_mpflags & LWP_MP_WSTOP) == 0) { 608 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WSTOP); 609 ++p->p_nstopped; 610 } 611 lwkt_reltoken(&p->p_token); 612 } 613 614 /* 615 * PCATCH requested. 616 */ 617 if (catch) { 618 /* 619 * Early termination if PCATCH was set and a 620 * signal is pending, interlocked with the 621 * critical section. 622 * 623 * Early termination only occurs when tsleep() is 624 * entered while in a normal LSRUN state. 625 */ 626 if ((sig = CURSIG(lp)) != 0) 627 goto resume; 628 629 /* 630 * Causes ksignal to wake us up if a signal is 631 * received (interlocked with lp->lwp_token). 632 */ 633 lp->lwp_flags |= LWP_SINTR; 634 } 635 } else { 636 KKASSERT(p == NULL); 637 } 638 639 /* 640 * Make sure the current process has been untangled from 641 * the userland scheduler and initialize slptime to start 642 * counting. 643 * 644 * NOTE: td->td_wakefromcpu is pre-set by the release function 645 * for the dfly scheduler, and then adjusted by _wakeup() 646 */ 647 if (lp) { 648 p->p_usched->release_curproc(lp); 649 lp->lwp_slptime = 0; 650 } 651 652 /* 653 * For PINTERLOCKED operation, TDF_TSLEEPQ might not be set if 654 * a wakeup() was processed before the thread could go to sleep. 655 * 656 * If TDF_TSLEEPQ is set, make sure the ident matches the recorded 657 * ident. If it does not then the thread slept inbetween the 658 * caller's initial tsleep_interlock() call and the caller's tsleep() 659 * call. 660 * 661 * Extreme loads can cause the sending of an IPI (e.g. wakeup()'s) 662 * to process incoming IPIs, thus draining incoming wakeups. 663 */ 664 if ((td->td_flags & TDF_TSLEEPQ) == 0) { 665 logtsleep2(ilockfail, ident); 666 goto resume; 667 } else if (td->td_wchan != ident || 668 td->td_wdomain != (flags & PDOMAIN_MASK)) { 669 logtsleep2(ilockfail, ident); 670 goto resume; 671 } 672 673 /* 674 * scheduling is blocked while in a critical section. Coincide 675 * the descheduled-by-tsleep flag with the descheduling of the 676 * lwkt. 677 * 678 * The timer callout is localized on our cpu and interlocked by 679 * our critical section. 680 */ 681 lwkt_deschedule_self(td); 682 td->td_flags |= TDF_TSLEEP_DESCHEDULED; 683 td->td_wmesg = wmesg; 684 685 /* 686 * Setup the timeout, if any. The timeout is only operable while 687 * the thread is flagged descheduled. 688 */ 689 KKASSERT((td->td_flags & TDF_TIMEOUT) == 0); 690 if (timo) { 691 callout_init_mp(&thandle); 692 callout_reset(&thandle, timo, endtsleep, td); 693 } 694 695 /* 696 * Beddy bye bye. 697 */ 698 if (lp) { 699 /* 700 * Ok, we are sleeping. Place us in the SSLEEP state. 701 */ 702 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0); 703 704 /* 705 * tstop() sets LSSTOP, so don't fiddle with that. 706 */ 707 if (lp->lwp_stat != LSSTOP) 708 lp->lwp_stat = LSSLEEP; 709 lp->lwp_ru.ru_nvcsw++; 710 p->p_usched->uload_update(lp); 711 lwkt_switch(); 712 713 /* 714 * And when we are woken up, put us back in LSRUN. If we 715 * slept for over a second, recalculate our estcpu. 716 */ 717 lp->lwp_stat = LSRUN; 718 if (lp->lwp_slptime) { 719 p->p_usched->uload_update(lp); 720 p->p_usched->recalculate(lp); 721 } 722 lp->lwp_slptime = 0; 723 } else { 724 lwkt_switch(); 725 } 726 727 /* 728 * Make sure we haven't switched cpus while we were asleep. It's 729 * not supposed to happen. Cleanup our temporary flags. 730 */ 731 KKASSERT(gd == td->td_gd); 732 733 /* 734 * Cleanup the timeout. If the timeout has already occured thandle 735 * has already been stopped, otherwise stop thandle. If the timeout 736 * is running (the callout thread must be blocked trying to get 737 * lwp_token) then wait for us to get scheduled. 738 */ 739 if (timo) { 740 while (td->td_flags & TDF_TIMEOUT_RUNNING) { 741 /* else we won't get rescheduled! */ 742 if (lp->lwp_stat != LSSTOP) 743 lp->lwp_stat = LSSLEEP; 744 lwkt_deschedule_self(td); 745 td->td_wmesg = "tsrace"; 746 lwkt_switch(); 747 kprintf("td %p %s: timeout race\n", td, td->td_comm); 748 } 749 if (td->td_flags & TDF_TIMEOUT) { 750 td->td_flags &= ~TDF_TIMEOUT; 751 error = EWOULDBLOCK; 752 } else { 753 /* does not block when on same cpu */ 754 callout_cancel(&thandle); 755 } 756 } 757 td->td_flags &= ~TDF_TSLEEP_DESCHEDULED; 758 759 /* 760 * Make sure we have been removed from the sleepq. In most 761 * cases this will have been done for us already but it is 762 * possible for a scheduling IPI to be in-flight from a 763 * previous tsleep/tsleep_interlock() or due to a straight-out 764 * call to lwkt_schedule() (in the case of an interrupt thread), 765 * causing a spurious wakeup. 766 */ 767 _tsleep_remove(td); 768 td->td_wmesg = NULL; 769 770 /* 771 * Figure out the correct error return. If interrupted by a 772 * signal we want to return EINTR or ERESTART. 773 */ 774 resume: 775 if (lp) { 776 if (catch && error == 0) { 777 if (sig != 0 || (sig = CURSIG(lp))) { 778 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig)) 779 error = EINTR; 780 else 781 error = ERESTART; 782 } 783 } 784 785 lp->lwp_flags &= ~LWP_SINTR; 786 787 /* 788 * Unconditionally set us to LSRUN on resume. lwp_stat could 789 * be in a weird state due to the goto resume, particularly 790 * when tsleep() is called from tstop(). 791 */ 792 lp->lwp_stat = LSRUN; 793 lwkt_reltoken(&lp->lwp_token); 794 } 795 logtsleep1(tsleep_end); 796 crit_exit_quick(td); 797 798 return (error); 799 } 800 801 /* 802 * Interlocked spinlock sleep. An exclusively held spinlock must 803 * be passed to ssleep(). The function will atomically release the 804 * spinlock and tsleep on the ident, then reacquire the spinlock and 805 * return. 806 * 807 * This routine is fairly important along the critical path, so optimize it 808 * heavily. 809 */ 810 int 811 ssleep(const volatile void *ident, struct spinlock *spin, int flags, 812 const char *wmesg, int timo) 813 { 814 globaldata_t gd = mycpu; 815 int error; 816 817 _tsleep_interlock(gd, ident, flags); 818 spin_unlock_quick(gd, spin); 819 error = tsleep(ident, flags | PINTERLOCKED, wmesg, timo); 820 KKASSERT(gd == mycpu); 821 _spin_lock_quick(gd, spin, wmesg); 822 823 return (error); 824 } 825 826 int 827 lksleep(const volatile void *ident, struct lock *lock, int flags, 828 const char *wmesg, int timo) 829 { 830 globaldata_t gd = mycpu; 831 int error; 832 833 _tsleep_interlock(gd, ident, flags); 834 lockmgr(lock, LK_RELEASE); 835 error = tsleep(ident, flags | PINTERLOCKED, wmesg, timo); 836 lockmgr(lock, LK_EXCLUSIVE); 837 838 return (error); 839 } 840 841 /* 842 * Interlocked mutex sleep. An exclusively held mutex must be passed 843 * to mtxsleep(). The function will atomically release the mutex 844 * and tsleep on the ident, then reacquire the mutex and return. 845 */ 846 int 847 mtxsleep(const volatile void *ident, struct mtx *mtx, int flags, 848 const char *wmesg, int timo) 849 { 850 globaldata_t gd = mycpu; 851 int error; 852 853 _tsleep_interlock(gd, ident, flags); 854 mtx_unlock(mtx); 855 error = tsleep(ident, flags | PINTERLOCKED, wmesg, timo); 856 mtx_lock_ex_quick(mtx); 857 858 return (error); 859 } 860 861 /* 862 * Interlocked serializer sleep. An exclusively held serializer must 863 * be passed to zsleep(). The function will atomically release 864 * the serializer and tsleep on the ident, then reacquire the serializer 865 * and return. 866 */ 867 int 868 zsleep(const volatile void *ident, struct lwkt_serialize *slz, int flags, 869 const char *wmesg, int timo) 870 { 871 globaldata_t gd = mycpu; 872 int ret; 873 874 ASSERT_SERIALIZED(slz); 875 876 _tsleep_interlock(gd, ident, flags); 877 lwkt_serialize_exit(slz); 878 ret = tsleep(ident, flags | PINTERLOCKED, wmesg, timo); 879 lwkt_serialize_enter(slz); 880 881 return ret; 882 } 883 884 /* 885 * Directly block on the LWKT thread by descheduling it. This 886 * is much faster then tsleep(), but the only legal way to wake 887 * us up is to directly schedule the thread. 888 * 889 * Setting TDF_SINTR will cause new signals to directly schedule us. 890 * 891 * This routine must be called while in a critical section. 892 */ 893 int 894 lwkt_sleep(const char *wmesg, int flags) 895 { 896 thread_t td = curthread; 897 int sig; 898 899 if ((flags & PCATCH) == 0 || td->td_lwp == NULL) { 900 td->td_flags |= TDF_BLOCKED; 901 td->td_wmesg = wmesg; 902 lwkt_deschedule_self(td); 903 lwkt_switch(); 904 td->td_wmesg = NULL; 905 td->td_flags &= ~TDF_BLOCKED; 906 return(0); 907 } 908 if ((sig = CURSIG(td->td_lwp)) != 0) { 909 if (SIGISMEMBER(td->td_proc->p_sigacts->ps_sigintr, sig)) 910 return(EINTR); 911 else 912 return(ERESTART); 913 914 } 915 td->td_flags |= TDF_BLOCKED | TDF_SINTR; 916 td->td_wmesg = wmesg; 917 lwkt_deschedule_self(td); 918 lwkt_switch(); 919 td->td_flags &= ~(TDF_BLOCKED | TDF_SINTR); 920 td->td_wmesg = NULL; 921 return(0); 922 } 923 924 /* 925 * Implement the timeout for tsleep. 926 * 927 * This type of callout timeout is scheduled on the same cpu the process 928 * is sleeping on. Also, at the moment, the MP lock is held. 929 */ 930 static void 931 endtsleep(void *arg) 932 { 933 thread_t td = arg; 934 struct lwp *lp; 935 936 /* 937 * We are going to have to get the lwp_token, which means we might 938 * block. This can race a tsleep getting woken up by other means 939 * so set TDF_TIMEOUT_RUNNING to force the tsleep to wait for our 940 * processing to complete (sorry tsleep!). 941 * 942 * We can safely set td_flags because td MUST be on the same cpu 943 * as we are. 944 */ 945 KKASSERT(td->td_gd == mycpu); 946 crit_enter(); 947 td->td_flags |= TDF_TIMEOUT_RUNNING | TDF_TIMEOUT; 948 949 /* 950 * This can block but TDF_TIMEOUT_RUNNING will prevent the thread 951 * from exiting the tsleep on us. The flag is interlocked by virtue 952 * of lp being on the same cpu as we are. 953 */ 954 if ((lp = td->td_lwp) != NULL) 955 lwkt_gettoken(&lp->lwp_token); 956 957 KKASSERT(td->td_flags & TDF_TSLEEP_DESCHEDULED); 958 959 if (lp) { 960 /* 961 * callout timer should normally never be set in tstop() 962 * because it passes a timeout of 0. However, there is a 963 * case during thread exit (which SSTOP's all the threads) 964 * for which tstop() must break out and can (properly) leave 965 * the thread in LSSTOP. 966 */ 967 KKASSERT(lp->lwp_stat != LSSTOP || 968 (lp->lwp_mpflags & LWP_MP_WEXIT)); 969 setrunnable(lp); 970 lwkt_reltoken(&lp->lwp_token); 971 } else { 972 _tsleep_remove(td); 973 lwkt_schedule(td); 974 } 975 KKASSERT(td->td_gd == mycpu); 976 td->td_flags &= ~TDF_TIMEOUT_RUNNING; 977 crit_exit(); 978 } 979 980 /* 981 * Make all processes sleeping on the specified identifier runnable. 982 * count may be zero or one only. 983 * 984 * The domain encodes the sleep/wakeup domain, flags, plus the originating 985 * cpu. 986 * 987 * This call may run without the MP lock held. We can only manipulate thread 988 * state on the cpu owning the thread. We CANNOT manipulate process state 989 * at all. 990 * 991 * _wakeup() can be passed to an IPI so we can't use (const volatile 992 * void *ident). 993 */ 994 static void 995 _wakeup(void *ident, int domain) 996 { 997 struct tslpque *qp; 998 struct thread *td; 999 struct thread *ntd; 1000 globaldata_t gd; 1001 cpumask_t mask; 1002 uint32_t cid; 1003 uint32_t gid; 1004 int wids = 0; 1005 1006 crit_enter(); 1007 logtsleep2(wakeup_beg, ident); 1008 gd = mycpu; 1009 cid = LOOKUP(ident); 1010 gid = TCHASHSHIFT(cid); 1011 qp = &gd->gd_tsleep_hash[gid]; 1012 restart: 1013 for (td = TAILQ_FIRST(&qp->queue); td != NULL; td = ntd) { 1014 ntd = TAILQ_NEXT(td, td_sleepq); 1015 if (td->td_wchan == ident && 1016 td->td_wdomain == (domain & PDOMAIN_MASK) 1017 ) { 1018 KKASSERT(td->td_gd == gd); 1019 _tsleep_remove(td); 1020 td->td_wakefromcpu = PWAKEUP_DECODE(domain); 1021 if (td->td_flags & TDF_TSLEEP_DESCHEDULED) { 1022 lwkt_schedule(td); 1023 if (domain & PWAKEUP_ONE) 1024 goto done; 1025 } 1026 goto restart; 1027 } 1028 if (td->td_wchan == qp->ident0) 1029 wids |= 1; 1030 else if (td->td_wchan == qp->ident1) 1031 wids |= 2; 1032 else if (td->td_wchan == qp->ident2) 1033 wids |= 4; 1034 else if (td->td_wchan == qp->ident3) 1035 wids |= 8; 1036 else 1037 wids |= 16; /* force ident0 to be retained (-1) */ 1038 } 1039 1040 /* 1041 * Because a bunch of cpumask array entries cover the same queue, it 1042 * is possible for our bit to remain set in some of them and cause 1043 * spurious wakeup IPIs later on. Make sure that the bit is cleared 1044 * when a spurious IPI occurs to prevent further spurious IPIs. 1045 */ 1046 if (TAILQ_FIRST(&qp->queue) == NULL) { 1047 ATOMIC_CPUMASK_NANDBIT(slpque_cpumasks[cid], gd->gd_cpuid); 1048 qp->ident0 = NULL; 1049 qp->ident1 = NULL; 1050 qp->ident2 = NULL; 1051 qp->ident3 = NULL; 1052 } else { 1053 if ((wids & 1) == 0) { 1054 if ((wids & 16) == 0) { 1055 qp->ident0 = NULL; 1056 } else { 1057 KKASSERT(qp->ident0 == (void *)(intptr_t)-1); 1058 } 1059 } 1060 if ((wids & 2) == 0) 1061 qp->ident1 = NULL; 1062 if ((wids & 4) == 0) 1063 qp->ident2 = NULL; 1064 if ((wids & 8) == 0) 1065 qp->ident3 = NULL; 1066 } 1067 1068 /* 1069 * We finished checking the current cpu but there still may be 1070 * more work to do. Either wakeup_one was requested and no matching 1071 * thread was found, or a normal wakeup was requested and we have 1072 * to continue checking cpus. 1073 * 1074 * It should be noted that this scheme is actually less expensive then 1075 * the old scheme when waking up multiple threads, since we send 1076 * only one IPI message per target candidate which may then schedule 1077 * multiple threads. Before we could have wound up sending an IPI 1078 * message for each thread on the target cpu (!= current cpu) that 1079 * needed to be woken up. 1080 * 1081 * NOTE: Wakeups occuring on remote cpus are asynchronous. This 1082 * should be ok since we are passing idents in the IPI rather 1083 * then thread pointers. 1084 * 1085 * NOTE: We MUST mfence (or use an atomic op) prior to reading 1086 * the cpumask, as another cpu may have written to it in 1087 * a fashion interlocked with whatever the caller did before 1088 * calling wakeup(). Otherwise we might miss the interaction 1089 * (kern_mutex.c can cause this problem). 1090 * 1091 * lfence is insufficient as it may allow a written state to 1092 * reorder around the cpumask load. 1093 */ 1094 if ((domain & PWAKEUP_MYCPU) == 0) { 1095 globaldata_t tgd; 1096 const volatile void *id0; 1097 int n; 1098 1099 cpu_mfence(); 1100 /* cpu_lfence(); */ 1101 mask = slpque_cpumasks[cid]; 1102 CPUMASK_ANDMASK(mask, gd->gd_other_cpus); 1103 while (CPUMASK_TESTNZERO(mask)) { 1104 n = BSRCPUMASK(mask); 1105 CPUMASK_NANDBIT(mask, n); 1106 tgd = globaldata_find(n); 1107 1108 /* 1109 * Both ident0 compares must from a single load 1110 * to avoid ident0 update races crossing the two 1111 * compares. 1112 */ 1113 qp = &tgd->gd_tsleep_hash[gid]; 1114 id0 = qp->ident0; 1115 cpu_ccfence(); 1116 if (id0 == (void *)(intptr_t)-1) { 1117 lwkt_send_ipiq2(tgd, _wakeup, ident, 1118 domain | PWAKEUP_MYCPU); 1119 ++tgd->gd_cnt.v_wakeup_colls; 1120 } else if (id0 == ident || 1121 qp->ident1 == ident || 1122 qp->ident2 == ident || 1123 qp->ident3 == ident) { 1124 lwkt_send_ipiq2(tgd, _wakeup, ident, 1125 domain | PWAKEUP_MYCPU); 1126 } 1127 } 1128 #if 0 1129 if (CPUMASK_TESTNZERO(mask)) { 1130 lwkt_send_ipiq2_mask(mask, _wakeup, ident, 1131 domain | PWAKEUP_MYCPU); 1132 } 1133 #endif 1134 } 1135 done: 1136 logtsleep1(wakeup_end); 1137 crit_exit(); 1138 } 1139 1140 /* 1141 * Wakeup all threads tsleep()ing on the specified ident, on all cpus 1142 */ 1143 void 1144 wakeup(const volatile void *ident) 1145 { 1146 globaldata_t gd = mycpu; 1147 thread_t td = gd->gd_curthread; 1148 1149 if (td && (td->td_flags & TDF_DELAYED_WAKEUP)) { 1150 /* 1151 * If we are in a delayed wakeup section, record up to two wakeups in 1152 * a per-CPU queue and issue them when we block or exit the delayed 1153 * wakeup section. 1154 */ 1155 if (atomic_cmpset_ptr(&gd->gd_delayed_wakeup[0], NULL, ident)) 1156 return; 1157 if (atomic_cmpset_ptr(&gd->gd_delayed_wakeup[1], NULL, ident)) 1158 return; 1159 1160 ident = atomic_swap_ptr(__DEQUALIFY(volatile void **, &gd->gd_delayed_wakeup[1]), 1161 __DEALL(ident)); 1162 ident = atomic_swap_ptr(__DEQUALIFY(volatile void **, &gd->gd_delayed_wakeup[0]), 1163 __DEALL(ident)); 1164 } 1165 1166 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, gd->gd_cpuid)); 1167 } 1168 1169 /* 1170 * Wakeup one thread tsleep()ing on the specified ident, on any cpu. 1171 */ 1172 void 1173 wakeup_one(const volatile void *ident) 1174 { 1175 /* XXX potentially round-robin the first responding cpu */ 1176 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mycpu->gd_cpuid) | 1177 PWAKEUP_ONE); 1178 } 1179 1180 /* 1181 * Wakeup threads tsleep()ing on the specified ident on the current cpu 1182 * only. 1183 */ 1184 void 1185 wakeup_mycpu(const volatile void *ident) 1186 { 1187 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mycpu->gd_cpuid) | 1188 PWAKEUP_MYCPU); 1189 } 1190 1191 /* 1192 * Wakeup one thread tsleep()ing on the specified ident on the current cpu 1193 * only. 1194 */ 1195 void 1196 wakeup_mycpu_one(const volatile void *ident) 1197 { 1198 /* XXX potentially round-robin the first responding cpu */ 1199 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mycpu->gd_cpuid) | 1200 PWAKEUP_MYCPU | PWAKEUP_ONE); 1201 } 1202 1203 /* 1204 * Wakeup all thread tsleep()ing on the specified ident on the specified cpu 1205 * only. 1206 */ 1207 void 1208 wakeup_oncpu(globaldata_t gd, const volatile void *ident) 1209 { 1210 globaldata_t mygd = mycpu; 1211 if (gd == mycpu) { 1212 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mygd->gd_cpuid) | 1213 PWAKEUP_MYCPU); 1214 } else { 1215 lwkt_send_ipiq2(gd, _wakeup, __DEALL(ident), 1216 PWAKEUP_ENCODE(0, mygd->gd_cpuid) | 1217 PWAKEUP_MYCPU); 1218 } 1219 } 1220 1221 /* 1222 * Wakeup one thread tsleep()ing on the specified ident on the specified cpu 1223 * only. 1224 */ 1225 void 1226 wakeup_oncpu_one(globaldata_t gd, const volatile void *ident) 1227 { 1228 globaldata_t mygd = mycpu; 1229 if (gd == mygd) { 1230 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mygd->gd_cpuid) | 1231 PWAKEUP_MYCPU | PWAKEUP_ONE); 1232 } else { 1233 lwkt_send_ipiq2(gd, _wakeup, __DEALL(ident), 1234 PWAKEUP_ENCODE(0, mygd->gd_cpuid) | 1235 PWAKEUP_MYCPU | PWAKEUP_ONE); 1236 } 1237 } 1238 1239 /* 1240 * Wakeup all threads waiting on the specified ident that slept using 1241 * the specified domain, on all cpus. 1242 */ 1243 void 1244 wakeup_domain(const volatile void *ident, int domain) 1245 { 1246 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(domain, mycpu->gd_cpuid)); 1247 } 1248 1249 /* 1250 * Wakeup one thread waiting on the specified ident that slept using 1251 * the specified domain, on any cpu. 1252 */ 1253 void 1254 wakeup_domain_one(const volatile void *ident, int domain) 1255 { 1256 /* XXX potentially round-robin the first responding cpu */ 1257 _wakeup(__DEALL(ident), 1258 PWAKEUP_ENCODE(domain, mycpu->gd_cpuid) | PWAKEUP_ONE); 1259 } 1260 1261 void 1262 wakeup_start_delayed(void) 1263 { 1264 globaldata_t gd = mycpu; 1265 1266 crit_enter(); 1267 gd->gd_curthread->td_flags |= TDF_DELAYED_WAKEUP; 1268 crit_exit(); 1269 } 1270 1271 void 1272 wakeup_end_delayed(void) 1273 { 1274 globaldata_t gd = mycpu; 1275 1276 if (gd->gd_curthread->td_flags & TDF_DELAYED_WAKEUP) { 1277 crit_enter(); 1278 gd->gd_curthread->td_flags &= ~TDF_DELAYED_WAKEUP; 1279 if (gd->gd_delayed_wakeup[0] || gd->gd_delayed_wakeup[1]) { 1280 if (gd->gd_delayed_wakeup[0]) { 1281 wakeup(gd->gd_delayed_wakeup[0]); 1282 gd->gd_delayed_wakeup[0] = NULL; 1283 } 1284 if (gd->gd_delayed_wakeup[1]) { 1285 wakeup(gd->gd_delayed_wakeup[1]); 1286 gd->gd_delayed_wakeup[1] = NULL; 1287 } 1288 } 1289 crit_exit(); 1290 } 1291 } 1292 1293 /* 1294 * setrunnable() 1295 * 1296 * Make a process runnable. lp->lwp_token must be held on call and this 1297 * function must be called from the cpu owning lp. 1298 * 1299 * This only has an effect if we are in LSSTOP or LSSLEEP. 1300 */ 1301 void 1302 setrunnable(struct lwp *lp) 1303 { 1304 thread_t td = lp->lwp_thread; 1305 1306 ASSERT_LWKT_TOKEN_HELD(&lp->lwp_token); 1307 KKASSERT(td->td_gd == mycpu); 1308 crit_enter(); 1309 if (lp->lwp_stat == LSSTOP) 1310 lp->lwp_stat = LSSLEEP; 1311 if (lp->lwp_stat == LSSLEEP) { 1312 _tsleep_remove(td); 1313 lwkt_schedule(td); 1314 } else if (td->td_flags & TDF_SINTR) { 1315 lwkt_schedule(td); 1316 } 1317 crit_exit(); 1318 } 1319 1320 /* 1321 * The process is stopped due to some condition, usually because p_stat is 1322 * set to SSTOP, but also possibly due to being traced. 1323 * 1324 * Caller must hold p->p_token 1325 * 1326 * NOTE! If the caller sets SSTOP, the caller must also clear P_WAITED 1327 * because the parent may check the child's status before the child actually 1328 * gets to this routine. 1329 * 1330 * This routine is called with the current lwp only, typically just 1331 * before returning to userland if the process state is detected as 1332 * possibly being in a stopped state. 1333 */ 1334 void 1335 tstop(void) 1336 { 1337 struct lwp *lp = curthread->td_lwp; 1338 struct proc *p = lp->lwp_proc; 1339 struct proc *q; 1340 1341 lwkt_gettoken(&lp->lwp_token); 1342 crit_enter(); 1343 1344 /* 1345 * If LWP_MP_WSTOP is set, we were sleeping 1346 * while our process was stopped. At this point 1347 * we were already counted as stopped. 1348 */ 1349 if ((lp->lwp_mpflags & LWP_MP_WSTOP) == 0) { 1350 /* 1351 * If we're the last thread to stop, signal 1352 * our parent. 1353 */ 1354 p->p_nstopped++; 1355 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WSTOP); 1356 wakeup(&p->p_nstopped); 1357 if (p->p_nstopped == p->p_nthreads) { 1358 /* 1359 * Token required to interlock kern_wait() 1360 */ 1361 q = p->p_pptr; 1362 PHOLD(q); 1363 lwkt_gettoken(&q->p_token); 1364 p->p_flags &= ~P_WAITED; 1365 wakeup(p->p_pptr); 1366 if ((q->p_sigacts->ps_flag & PS_NOCLDSTOP) == 0) 1367 ksignal(q, SIGCHLD); 1368 lwkt_reltoken(&q->p_token); 1369 PRELE(q); 1370 } 1371 } 1372 1373 /* 1374 * Wait here while in a stopped state, interlocked with lwp_token. 1375 * We must break-out if the whole process is trying to exit. 1376 */ 1377 while (STOPLWP(p, lp)) { 1378 lp->lwp_stat = LSSTOP; 1379 tsleep(p, 0, "stop", 0); 1380 } 1381 p->p_nstopped--; 1382 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_WSTOP); 1383 crit_exit(); 1384 lwkt_reltoken(&lp->lwp_token); 1385 } 1386 1387 /* 1388 * Compute a tenex style load average of a quantity on 1389 * 1, 5 and 15 minute intervals. This is a pcpu callout. 1390 * 1391 * We segment the lwp scan on a pcpu basis. This does NOT 1392 * mean the associated lwps are on this cpu, it is done 1393 * just to break the work up. 1394 * 1395 * The callout on cpu0 rolls up the stats from the other 1396 * cpus. 1397 */ 1398 static int loadav_count_runnable(struct lwp *p, void *data); 1399 1400 static void 1401 loadav(void *arg) 1402 { 1403 globaldata_t gd = mycpu; 1404 struct loadavg *avg; 1405 int i, nrun; 1406 1407 nrun = 0; 1408 alllwp_scan(loadav_count_runnable, &nrun, 1); 1409 gd->gd_loadav_nrunnable = nrun; 1410 if (gd->gd_cpuid == 0) { 1411 avg = &averunnable; 1412 nrun = 0; 1413 for (i = 0; i < ncpus; ++i) 1414 nrun += globaldata_find(i)->gd_loadav_nrunnable; 1415 for (i = 0; i < 3; i++) { 1416 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] + 1417 (long)nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT; 1418 } 1419 } 1420 1421 /* 1422 * Schedule the next update to occur after 5 seconds, but add a 1423 * random variation to avoid synchronisation with processes that 1424 * run at regular intervals. 1425 */ 1426 callout_reset(&gd->gd_loadav_callout, 1427 hz * 4 + (int)(krandom() % (hz * 2 + 1)), 1428 loadav, NULL); 1429 } 1430 1431 static int 1432 loadav_count_runnable(struct lwp *lp, void *data) 1433 { 1434 int *nrunp = data; 1435 thread_t td; 1436 1437 switch (lp->lwp_stat) { 1438 case LSRUN: 1439 if ((td = lp->lwp_thread) == NULL) 1440 break; 1441 if (td->td_flags & TDF_BLOCKED) 1442 break; 1443 ++*nrunp; 1444 break; 1445 default: 1446 break; 1447 } 1448 lwkt_yield(); 1449 return(0); 1450 } 1451 1452 /* 1453 * Regular data collection 1454 */ 1455 static uint64_t 1456 collect_load_callback(int n) 1457 { 1458 int fscale = averunnable.fscale; 1459 1460 return ((averunnable.ldavg[0] * 100 + (fscale >> 1)) / fscale); 1461 } 1462 1463 static void 1464 sched_setup(void *dummy __unused) 1465 { 1466 globaldata_t save_gd = mycpu; 1467 globaldata_t gd; 1468 int n; 1469 1470 kcollect_register(KCOLLECT_LOAD, "load", collect_load_callback, 1471 KCOLLECT_SCALE(KCOLLECT_LOAD_FORMAT, 0)); 1472 1473 /* 1474 * Kick off timeout driven events by calling first time. We 1475 * split the work across available cpus to help scale it, 1476 * it can eat a lot of cpu when there are a lot of processes 1477 * on the system. 1478 */ 1479 for (n = 0; n < ncpus; ++n) { 1480 gd = globaldata_find(n); 1481 lwkt_setcpu_self(gd); 1482 callout_init_mp(&gd->gd_loadav_callout); 1483 callout_init_mp(&gd->gd_schedcpu_callout); 1484 schedcpu(NULL); 1485 loadav(NULL); 1486 } 1487 lwkt_setcpu_self(save_gd); 1488 } 1489 1490 /* 1491 * Extremely early initialization, dummy-up the tables so we don't have 1492 * to conditionalize for NULL in _wakeup() and tsleep_interlock(). Even 1493 * though the system isn't blocking this early, these functions still 1494 * try to access the hash table. 1495 * 1496 * This setup will be overridden once sched_dyninit() -> sleep_gdinit() 1497 * is called. 1498 */ 1499 void 1500 sleep_early_gdinit(globaldata_t gd) 1501 { 1502 static struct tslpque dummy_slpque; 1503 static cpumask_t dummy_cpumasks; 1504 1505 slpque_tablesize = 1; 1506 gd->gd_tsleep_hash = &dummy_slpque; 1507 slpque_cpumasks = &dummy_cpumasks; 1508 TAILQ_INIT(&dummy_slpque.queue); 1509 } 1510 1511 /* 1512 * PCPU initialization. Called after KMALLOC is operational, by 1513 * sched_dyninit() for cpu 0, and by mi_gdinit() for other cpus later. 1514 * 1515 * WARNING! The pcpu hash table is smaller than the global cpumask 1516 * hash table, which can save us a lot of memory when maxproc 1517 * is set high. 1518 */ 1519 void 1520 sleep_gdinit(globaldata_t gd) 1521 { 1522 struct thread *td; 1523 size_t hash_size; 1524 uint32_t n; 1525 uint32_t i; 1526 1527 /* 1528 * This shouldn't happen, that is there shouldn't be any threads 1529 * waiting on the dummy tsleep queue this early in the boot. 1530 */ 1531 if (gd->gd_cpuid == 0) { 1532 struct tslpque *qp = &gd->gd_tsleep_hash[0]; 1533 TAILQ_FOREACH(td, &qp->queue, td_sleepq) { 1534 kprintf("SLEEP_GDINIT SWITCH %s\n", td->td_comm); 1535 } 1536 } 1537 1538 /* 1539 * Note that we have to allocate one extra slot because we are 1540 * shifting a modulo value. TCHASHSHIFT(slpque_tablesize - 1) can 1541 * return the same value as TCHASHSHIFT(slpque_tablesize). 1542 */ 1543 n = TCHASHSHIFT(slpque_tablesize) + 1; 1544 1545 hash_size = sizeof(struct tslpque) * n; 1546 gd->gd_tsleep_hash = (void *)kmem_alloc3(&kernel_map, hash_size, 1547 VM_SUBSYS_GD, 1548 KM_CPU(gd->gd_cpuid)); 1549 memset(gd->gd_tsleep_hash, 0, hash_size); 1550 for (i = 0; i < n; ++i) 1551 TAILQ_INIT(&gd->gd_tsleep_hash[i].queue); 1552 } 1553 1554 /* 1555 * Dynamic initialization after the memory system is operational. 1556 */ 1557 static void 1558 sched_dyninit(void *dummy __unused) 1559 { 1560 int tblsize; 1561 int tblsize2; 1562 int n; 1563 1564 /* 1565 * Calculate table size for slpque hash. We want a prime number 1566 * large enough to avoid overloading slpque_cpumasks when the 1567 * system has a large number of sleeping processes, which will 1568 * spam IPIs on wakeup(). 1569 * 1570 * While it is true this is really a per-lwp factor, generally 1571 * speaking the maxproc limit is a good metric to go by. 1572 */ 1573 for (tblsize = maxproc | 1; ; tblsize += 2) { 1574 if (tblsize % 3 == 0) 1575 continue; 1576 if (tblsize % 5 == 0) 1577 continue; 1578 tblsize2 = (tblsize / 2) | 1; 1579 for (n = 7; n < tblsize2; n += 2) { 1580 if (tblsize % n == 0) 1581 break; 1582 } 1583 if (n == tblsize2) 1584 break; 1585 } 1586 1587 /* 1588 * PIDs are currently limited to 6 digits. Cap the table size 1589 * at double this. 1590 */ 1591 if (tblsize > 2000003) 1592 tblsize = 2000003; 1593 1594 slpque_tablesize = tblsize; 1595 slpque_cpumasks = kmalloc(sizeof(*slpque_cpumasks) * slpque_tablesize, 1596 M_TSLEEP, M_WAITOK | M_ZERO); 1597 sleep_gdinit(mycpu); 1598 } 1599