1 /*- 2 * Copyright (c) 1982, 1986, 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 35 * $FreeBSD: src/sys/kern/kern_synch.c,v 1.87.2.6 2002/10/13 07:29:53 kbyanc Exp $ 36 */ 37 38 #include "opt_ktrace.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/proc.h> 43 #include <sys/kernel.h> 44 #include <sys/signalvar.h> 45 #include <sys/resourcevar.h> 46 #include <sys/vmmeter.h> 47 #include <sys/sysctl.h> 48 #include <sys/lock.h> 49 #include <sys/priv.h> 50 #include <sys/kcollect.h> 51 #include <sys/malloc.h> 52 #ifdef KTRACE 53 #include <sys/ktrace.h> 54 #endif 55 #include <sys/ktr.h> 56 #include <sys/serialize.h> 57 58 #include <sys/signal2.h> 59 #include <sys/thread2.h> 60 #include <sys/spinlock2.h> 61 #include <sys/mutex2.h> 62 63 #include <machine/cpu.h> 64 #include <machine/smp.h> 65 66 #include <vm/vm_extern.h> 67 68 struct tslpque { 69 TAILQ_HEAD(, thread) queue; 70 const volatile void *ident0; 71 const volatile void *ident1; 72 const volatile void *ident2; 73 const volatile void *ident3; 74 }; 75 76 static void sched_setup (void *dummy); 77 SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL); 78 static void sched_dyninit (void *dummy); 79 SYSINIT(sched_dyninit, SI_BOOT1_DYNALLOC, SI_ORDER_FIRST, sched_dyninit, NULL); 80 81 int lbolt; 82 void *lbolt_syncer; 83 __read_mostly int tsleep_crypto_dump = 0; 84 __read_mostly int ncpus; 85 __read_mostly int ncpus_fit, ncpus_fit_mask; /* note: mask not cpumask_t */ 86 __read_mostly int safepri; 87 __read_mostly int tsleep_now_works; 88 89 MALLOC_DEFINE(M_TSLEEP, "tslpque", "tsleep queues"); 90 91 #define __DEALL(ident) __DEQUALIFY(void *, ident) 92 93 #if !defined(KTR_TSLEEP) 94 #define KTR_TSLEEP KTR_ALL 95 #endif 96 KTR_INFO_MASTER(tsleep); 97 KTR_INFO(KTR_TSLEEP, tsleep, tsleep_beg, 0, "tsleep enter %p", const volatile void *ident); 98 KTR_INFO(KTR_TSLEEP, tsleep, tsleep_end, 1, "tsleep exit"); 99 KTR_INFO(KTR_TSLEEP, tsleep, wakeup_beg, 2, "wakeup enter %p", const volatile void *ident); 100 KTR_INFO(KTR_TSLEEP, tsleep, wakeup_end, 3, "wakeup exit"); 101 KTR_INFO(KTR_TSLEEP, tsleep, ilockfail, 4, "interlock failed %p", const volatile void *ident); 102 103 #define logtsleep1(name) KTR_LOG(tsleep_ ## name) 104 #define logtsleep2(name, val) KTR_LOG(tsleep_ ## name, val) 105 106 __exclusive_cache_line 107 struct loadavg averunnable = 108 { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */ 109 /* 110 * Constants for averages over 1, 5, and 15 minutes 111 * when sampling at 5 second intervals. 112 */ 113 __read_mostly 114 static fixpt_t cexp[3] = { 115 0.9200444146293232 * FSCALE, /* exp(-1/12) */ 116 0.9834714538216174 * FSCALE, /* exp(-1/60) */ 117 0.9944598480048967 * FSCALE, /* exp(-1/180) */ 118 }; 119 120 static void endtsleep (void *); 121 static void loadav (void *arg); 122 static void schedcpu (void *arg); 123 124 __read_mostly static int pctcpu_decay = 10; 125 SYSCTL_INT(_kern, OID_AUTO, pctcpu_decay, CTLFLAG_RW, 126 &pctcpu_decay, 0, ""); 127 128 /* 129 * kernel uses `FSCALE', userland (SHOULD) use kern.fscale 130 */ 131 __read_mostly int fscale __unused = FSCALE; /* exported to systat */ 132 SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, ""); 133 134 /* 135 * Issue a wakeup() from userland (debugging) 136 */ 137 static int 138 sysctl_wakeup(SYSCTL_HANDLER_ARGS) 139 { 140 uint64_t ident = 1; 141 int error = 0; 142 143 if (req->newptr != NULL) { 144 if (priv_check(curthread, PRIV_ROOT)) 145 return (EPERM); 146 error = SYSCTL_IN(req, &ident, sizeof(ident)); 147 if (error) 148 return error; 149 kprintf("issue wakeup %016jx\n", ident); 150 wakeup((void *)(intptr_t)ident); 151 } 152 if (req->oldptr != NULL) { 153 error = SYSCTL_OUT(req, &ident, sizeof(ident)); 154 } 155 return error; 156 } 157 158 static int 159 sysctl_wakeup_umtx(SYSCTL_HANDLER_ARGS) 160 { 161 uint64_t ident = 1; 162 int error = 0; 163 164 if (req->newptr != NULL) { 165 if (priv_check(curthread, PRIV_ROOT)) 166 return (EPERM); 167 error = SYSCTL_IN(req, &ident, sizeof(ident)); 168 if (error) 169 return error; 170 kprintf("issue wakeup %016jx, PDOMAIN_UMTX\n", ident); 171 wakeup_domain((void *)(intptr_t)ident, PDOMAIN_UMTX); 172 } 173 if (req->oldptr != NULL) { 174 error = SYSCTL_OUT(req, &ident, sizeof(ident)); 175 } 176 return error; 177 } 178 179 SYSCTL_PROC(_debug, OID_AUTO, wakeup, CTLTYPE_UQUAD|CTLFLAG_RW, 0, 0, 180 sysctl_wakeup, "Q", "issue wakeup(addr)"); 181 SYSCTL_PROC(_debug, OID_AUTO, wakeup_umtx, CTLTYPE_UQUAD|CTLFLAG_RW, 0, 0, 182 sysctl_wakeup_umtx, "Q", "issue wakeup(addr, PDOMAIN_UMTX)"); 183 184 /* 185 * Recompute process priorities, once a second. 186 * 187 * Since the userland schedulers are typically event oriented, if the 188 * estcpu calculation at wakeup() time is not sufficient to make a 189 * process runnable relative to other processes in the system we have 190 * a 1-second recalc to help out. 191 * 192 * This code also allows us to store sysclock_t data in the process structure 193 * without fear of an overrun, since sysclock_t are guarenteed to hold 194 * several seconds worth of count. 195 * 196 * WARNING! callouts can preempt normal threads. However, they will not 197 * preempt a thread holding a spinlock so we *can* safely use spinlocks. 198 */ 199 static int schedcpu_stats(struct proc *p, void *data __unused); 200 static int schedcpu_resource(struct proc *p, void *data __unused); 201 202 static void 203 schedcpu(void *arg) 204 { 205 allproc_scan(schedcpu_stats, NULL, 1); 206 allproc_scan(schedcpu_resource, NULL, 1); 207 if (mycpu->gd_cpuid == 0) { 208 wakeup((caddr_t)&lbolt); 209 wakeup(lbolt_syncer); 210 } 211 callout_reset(&mycpu->gd_schedcpu_callout, hz, schedcpu, NULL); 212 } 213 214 /* 215 * General process statistics once a second 216 */ 217 static int 218 schedcpu_stats(struct proc *p, void *data __unused) 219 { 220 struct lwp *lp; 221 222 /* 223 * Threads may not be completely set up if process in SIDL state. 224 */ 225 if (p->p_stat == SIDL) 226 return(0); 227 228 PHOLD(p); 229 if (lwkt_trytoken(&p->p_token) == FALSE) { 230 PRELE(p); 231 return(0); 232 } 233 234 p->p_swtime++; 235 FOREACH_LWP_IN_PROC(lp, p) { 236 if (lp->lwp_stat == LSSLEEP) { 237 ++lp->lwp_slptime; 238 if (lp->lwp_slptime == 1) 239 p->p_usched->uload_update(lp); 240 } 241 242 /* 243 * Only recalculate processes that are active or have slept 244 * less then 2 seconds. The schedulers understand this. 245 * Otherwise decay by 50% per second. 246 * 247 * NOTE: uload_update is called separately from kern_synch.c 248 * when slptime == 1, removing the thread's 249 * uload/ucount. 250 */ 251 if (lp->lwp_slptime <= 1) { 252 p->p_usched->recalculate(lp); 253 } else { 254 int decay; 255 256 decay = pctcpu_decay; 257 cpu_ccfence(); 258 if (decay <= 1) 259 decay = 1; 260 if (decay > 100) 261 decay = 100; 262 lp->lwp_pctcpu = (lp->lwp_pctcpu * (decay - 1)) / decay; 263 } 264 } 265 lwkt_reltoken(&p->p_token); 266 lwkt_yield(); 267 PRELE(p); 268 return(0); 269 } 270 271 /* 272 * Resource checks. XXX break out since ksignal/killproc can block, 273 * limiting us to one process killed per second. There is probably 274 * a better way. 275 */ 276 static int 277 schedcpu_resource(struct proc *p, void *data __unused) 278 { 279 u_int64_t ttime; 280 struct lwp *lp; 281 282 if (p->p_stat == SIDL) 283 return(0); 284 285 PHOLD(p); 286 if (lwkt_trytoken(&p->p_token) == FALSE) { 287 PRELE(p); 288 return(0); 289 } 290 291 if (p->p_stat == SZOMB || p->p_limit == NULL) { 292 lwkt_reltoken(&p->p_token); 293 PRELE(p); 294 return(0); 295 } 296 297 ttime = 0; 298 FOREACH_LWP_IN_PROC(lp, p) { 299 /* 300 * We may have caught an lp in the middle of being 301 * created, lwp_thread can be NULL. 302 */ 303 if (lp->lwp_thread) { 304 ttime += lp->lwp_thread->td_sticks; 305 ttime += lp->lwp_thread->td_uticks; 306 } 307 } 308 309 switch(plimit_testcpulimit(p, ttime)) { 310 case PLIMIT_TESTCPU_KILL: 311 killproc(p, "exceeded maximum CPU limit"); 312 break; 313 case PLIMIT_TESTCPU_XCPU: 314 if ((p->p_flags & P_XCPU) == 0) { 315 p->p_flags |= P_XCPU; 316 ksignal(p, SIGXCPU); 317 } 318 break; 319 default: 320 break; 321 } 322 lwkt_reltoken(&p->p_token); 323 lwkt_yield(); 324 PRELE(p); 325 return(0); 326 } 327 328 /* 329 * This is only used by ps. Generate a cpu percentage use over 330 * a period of one second. 331 */ 332 void 333 updatepcpu(struct lwp *lp, int cpticks, int ttlticks) 334 { 335 fixpt_t acc; 336 int remticks; 337 338 acc = (cpticks << FSHIFT) / ttlticks; 339 if (ttlticks >= ESTCPUFREQ) { 340 lp->lwp_pctcpu = acc; 341 } else { 342 remticks = ESTCPUFREQ - ttlticks; 343 lp->lwp_pctcpu = (acc * ttlticks + lp->lwp_pctcpu * remticks) / 344 ESTCPUFREQ; 345 } 346 } 347 348 /* 349 * Handy macros to calculate hash indices. LOOKUP() calculates the 350 * global cpumask hash index, TCHASHSHIFT() converts that into the 351 * pcpu hash index. 352 * 353 * By making the pcpu hash arrays smaller we save a significant amount 354 * of memory at very low cost. The real cost is in IPIs, which are handled 355 * by the much larger global cpumask hash table. 356 */ 357 #define LOOKUP_PRIME 66555444443333333ULL 358 #define LOOKUP(x) ((((uintptr_t)(x) + ((uintptr_t)(x) >> 18)) ^ \ 359 LOOKUP_PRIME) % slpque_tablesize) 360 #define TCHASHSHIFT(x) ((x) >> 4) 361 362 __read_mostly static uint32_t slpque_tablesize; 363 __read_mostly static cpumask_t *slpque_cpumasks; 364 365 SYSCTL_UINT(_kern, OID_AUTO, slpque_tablesize, CTLFLAG_RD, &slpque_tablesize, 366 0, ""); 367 368 /* 369 * This is a dandy function that allows us to interlock tsleep/wakeup 370 * operations with unspecified upper level locks, such as lockmgr locks, 371 * simply by holding a critical section. The sequence is: 372 * 373 * (acquire upper level lock) 374 * tsleep_interlock(blah) 375 * (release upper level lock) 376 * tsleep(blah, ...) 377 * 378 * Basically this functions queues us on the tsleep queue without actually 379 * descheduling us. When tsleep() is later called with PINTERLOCK it 380 * assumes the thread was already queued, otherwise it queues it there. 381 * 382 * Thus it is possible to receive the wakeup prior to going to sleep and 383 * the race conditions are covered. 384 */ 385 static __inline void 386 _tsleep_interlock(globaldata_t gd, const volatile void *ident, int flags) 387 { 388 thread_t td = gd->gd_curthread; 389 struct tslpque *qp; 390 uint32_t cid; 391 uint32_t gid; 392 393 if (ident == NULL) { 394 kprintf("tsleep_interlock: NULL ident %s\n", td->td_comm); 395 print_backtrace(5); 396 } 397 398 crit_enter_quick(td); 399 if (td->td_flags & TDF_TSLEEPQ) { 400 /* 401 * Shortcut if unchanged 402 */ 403 if (td->td_wchan == ident && 404 td->td_wdomain == (flags & PDOMAIN_MASK)) { 405 crit_exit_quick(td); 406 return; 407 } 408 409 /* 410 * Remove current sleepq 411 */ 412 cid = LOOKUP(td->td_wchan); 413 gid = TCHASHSHIFT(cid); 414 qp = &gd->gd_tsleep_hash[gid]; 415 TAILQ_REMOVE(&qp->queue, td, td_sleepq); 416 if (TAILQ_FIRST(&qp->queue) == NULL) { 417 qp->ident0 = NULL; 418 qp->ident1 = NULL; 419 qp->ident2 = NULL; 420 qp->ident3 = NULL; 421 ATOMIC_CPUMASK_NANDBIT(slpque_cpumasks[cid], 422 gd->gd_cpuid); 423 } 424 } else { 425 td->td_flags |= TDF_TSLEEPQ; 426 } 427 cid = LOOKUP(ident); 428 gid = TCHASHSHIFT(cid); 429 qp = &gd->gd_tsleep_hash[gid]; 430 TAILQ_INSERT_TAIL(&qp->queue, td, td_sleepq); 431 if (qp->ident0 != ident && qp->ident1 != ident && 432 qp->ident2 != ident && qp->ident3 != ident) { 433 if (qp->ident0 == NULL) 434 qp->ident0 = ident; 435 else if (qp->ident1 == NULL) 436 qp->ident1 = ident; 437 else if (qp->ident2 == NULL) 438 qp->ident2 = ident; 439 else if (qp->ident3 == NULL) 440 qp->ident3 = ident; 441 else 442 qp->ident0 = (void *)(intptr_t)-1; 443 } 444 ATOMIC_CPUMASK_ORBIT(slpque_cpumasks[cid], gd->gd_cpuid); 445 td->td_wchan = ident; 446 td->td_wdomain = flags & PDOMAIN_MASK; 447 crit_exit_quick(td); 448 } 449 450 void 451 tsleep_interlock(const volatile void *ident, int flags) 452 { 453 _tsleep_interlock(mycpu, ident, flags); 454 } 455 456 /* 457 * Remove thread from sleepq. Must be called with a critical section held. 458 * The thread must not be migrating. 459 */ 460 static __inline void 461 _tsleep_remove(thread_t td) 462 { 463 globaldata_t gd = mycpu; 464 struct tslpque *qp; 465 uint32_t cid; 466 uint32_t gid; 467 468 KKASSERT(td->td_gd == gd && IN_CRITICAL_SECT(td)); 469 KKASSERT((td->td_flags & TDF_MIGRATING) == 0); 470 if (td->td_flags & TDF_TSLEEPQ) { 471 td->td_flags &= ~TDF_TSLEEPQ; 472 cid = LOOKUP(td->td_wchan); 473 gid = TCHASHSHIFT(cid); 474 qp = &gd->gd_tsleep_hash[gid]; 475 TAILQ_REMOVE(&qp->queue, td, td_sleepq); 476 if (TAILQ_FIRST(&qp->queue) == NULL) { 477 ATOMIC_CPUMASK_NANDBIT(slpque_cpumasks[cid], 478 gd->gd_cpuid); 479 } 480 td->td_wchan = NULL; 481 td->td_wdomain = 0; 482 } 483 } 484 485 void 486 tsleep_remove(thread_t td) 487 { 488 _tsleep_remove(td); 489 } 490 491 /* 492 * General sleep call. Suspends the current process until a wakeup is 493 * performed on the specified identifier. The process will then be made 494 * runnable with the specified priority. Sleeps at most timo/hz seconds 495 * (0 means no timeout). If flags includes PCATCH flag, signals are checked 496 * before and after sleeping, else signals are not checked. Returns 0 if 497 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a 498 * signal needs to be delivered, ERESTART is returned if the current system 499 * call should be restarted if possible, and EINTR is returned if the system 500 * call should be interrupted by the signal (return EINTR). 501 * 502 * Note that if we are a process, we release_curproc() before messing with 503 * the LWKT scheduler. 504 * 505 * During autoconfiguration or after a panic, a sleep will simply 506 * lower the priority briefly to allow interrupts, then return. 507 * 508 * WARNING! This code can't block (short of switching away), or bad things 509 * will happen. No getting tokens, no blocking locks, etc. 510 */ 511 int 512 tsleep(const volatile void *ident, int flags, const char *wmesg, int timo) 513 { 514 struct thread *td = curthread; 515 struct lwp *lp = td->td_lwp; 516 struct proc *p = td->td_proc; /* may be NULL */ 517 globaldata_t gd; 518 int sig; 519 int catch; 520 int error; 521 int oldpri; 522 struct callout thandle1; 523 struct _callout thandle2; 524 525 /* 526 * Currently a severe hack. Make sure any delayed wakeups 527 * are flushed before we sleep or we might deadlock on whatever 528 * event we are sleeping on. 529 */ 530 if (td->td_flags & TDF_DELAYED_WAKEUP) 531 wakeup_end_delayed(); 532 533 /* 534 * NOTE: removed KTRPOINT, it could cause races due to blocking 535 * even in stable. Just scrap it for now. 536 */ 537 if (!tsleep_crypto_dump && (tsleep_now_works == 0 || panicstr)) { 538 /* 539 * After a panic, or before we actually have an operational 540 * softclock, just give interrupts a chance, then just return; 541 * 542 * don't run any other procs or panic below, 543 * in case this is the idle process and already asleep. 544 */ 545 splz(); 546 oldpri = td->td_pri; 547 lwkt_setpri_self(safepri); 548 lwkt_switch(); 549 lwkt_setpri_self(oldpri); 550 return (0); 551 } 552 logtsleep2(tsleep_beg, ident); 553 gd = td->td_gd; 554 KKASSERT(td != &gd->gd_idlethread); /* you must be kidding! */ 555 556 /* 557 * NOTE: all of this occurs on the current cpu, including any 558 * callout-based wakeups, so a critical section is a sufficient 559 * interlock. 560 * 561 * The entire sequence through to where we actually sleep must 562 * run without breaking the critical section. 563 */ 564 catch = flags & PCATCH; 565 error = 0; 566 sig = 0; 567 568 crit_enter_quick(td); 569 570 KASSERT(ident != NULL, ("tsleep: no ident")); 571 KASSERT(lp == NULL || 572 lp->lwp_stat == LSRUN || /* Obvious */ 573 lp->lwp_stat == LSSTOP, /* Set in tstop */ 574 ("tsleep %p %s %d", 575 ident, wmesg, lp->lwp_stat)); 576 577 /* 578 * We interlock the sleep queue if the caller has not already done 579 * it for us. This must be done before we potentially acquire any 580 * tokens or we can loose the wakeup. 581 */ 582 if ((flags & PINTERLOCKED) == 0) { 583 _tsleep_interlock(gd, ident, flags); 584 } 585 586 /* 587 * Setup for the current process (if this is a process). We must 588 * interlock with lwp_token to avoid remote wakeup races via 589 * setrunnable() 590 */ 591 if (lp) { 592 lwkt_gettoken(&lp->lwp_token); 593 594 /* 595 * If the umbrella process is in the SCORE state then 596 * make sure that the thread is flagged going into a 597 * normal sleep to allow the core dump to proceed, otherwise 598 * the coredump can end up waiting forever. If the normal 599 * sleep is woken up, the thread will enter a stopped state 600 * upon return to userland. 601 * 602 * We do not want to interrupt or cause a thread exist at 603 * this juncture because that will mess-up the state the 604 * coredump is trying to save. 605 */ 606 if (p->p_stat == SCORE) { 607 lwkt_gettoken(&p->p_token); 608 if ((lp->lwp_mpflags & LWP_MP_WSTOP) == 0) { 609 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WSTOP); 610 ++p->p_nstopped; 611 } 612 lwkt_reltoken(&p->p_token); 613 } 614 615 /* 616 * PCATCH requested. 617 */ 618 if (catch) { 619 /* 620 * Early termination if PCATCH was set and a 621 * signal is pending, interlocked with the 622 * critical section. 623 * 624 * Early termination only occurs when tsleep() is 625 * entered while in a normal LSRUN state. 626 */ 627 if ((sig = CURSIG(lp)) != 0) 628 goto resume; 629 630 /* 631 * Causes ksignal to wake us up if a signal is 632 * received (interlocked with lp->lwp_token). 633 */ 634 lp->lwp_flags |= LWP_SINTR; 635 } 636 } else { 637 KKASSERT(p == NULL); 638 } 639 640 /* 641 * Make sure the current process has been untangled from 642 * the userland scheduler and initialize slptime to start 643 * counting. 644 * 645 * NOTE: td->td_wakefromcpu is pre-set by the release function 646 * for the dfly scheduler, and then adjusted by _wakeup() 647 */ 648 if (lp) { 649 p->p_usched->release_curproc(lp); 650 lp->lwp_slptime = 0; 651 } 652 653 /* 654 * For PINTERLOCKED operation, TDF_TSLEEPQ might not be set if 655 * a wakeup() was processed before the thread could go to sleep. 656 * 657 * If TDF_TSLEEPQ is set, make sure the ident matches the recorded 658 * ident. If it does not then the thread slept inbetween the 659 * caller's initial tsleep_interlock() call and the caller's tsleep() 660 * call. 661 * 662 * Extreme loads can cause the sending of an IPI (e.g. wakeup()'s) 663 * to process incoming IPIs, thus draining incoming wakeups. 664 */ 665 if ((td->td_flags & TDF_TSLEEPQ) == 0) { 666 logtsleep2(ilockfail, ident); 667 goto resume; 668 } else if (td->td_wchan != ident || 669 td->td_wdomain != (flags & PDOMAIN_MASK)) { 670 logtsleep2(ilockfail, ident); 671 goto resume; 672 } 673 674 /* 675 * scheduling is blocked while in a critical section. Coincide 676 * the descheduled-by-tsleep flag with the descheduling of the 677 * lwkt. 678 * 679 * The timer callout is localized on our cpu and interlocked by 680 * our critical section. 681 */ 682 lwkt_deschedule_self(td); 683 td->td_flags |= TDF_TSLEEP_DESCHEDULED; 684 td->td_wmesg = wmesg; 685 686 /* 687 * Setup the timeout, if any. The timeout is only operable while 688 * the thread is flagged descheduled. 689 */ 690 KKASSERT((td->td_flags & TDF_TIMEOUT) == 0); 691 if (timo) { 692 _callout_setup_quick(&thandle1, &thandle2, timo, endtsleep, td); 693 } 694 695 /* 696 * Beddy bye bye. 697 */ 698 if (lp) { 699 /* 700 * Ok, we are sleeping. Place us in the SSLEEP state. 701 */ 702 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0); 703 704 /* 705 * tstop() sets LSSTOP, so don't fiddle with that. 706 */ 707 if (lp->lwp_stat != LSSTOP) 708 lp->lwp_stat = LSSLEEP; 709 lp->lwp_ru.ru_nvcsw++; 710 p->p_usched->uload_update(lp); 711 lwkt_switch(); 712 713 /* 714 * And when we are woken up, put us back in LSRUN. If we 715 * slept for over a second, recalculate our estcpu. 716 */ 717 lp->lwp_stat = LSRUN; 718 if (lp->lwp_slptime) { 719 p->p_usched->uload_update(lp); 720 p->p_usched->recalculate(lp); 721 } 722 lp->lwp_slptime = 0; 723 } else { 724 lwkt_switch(); 725 } 726 727 /* 728 * Make sure we haven't switched cpus while we were asleep. It's 729 * not supposed to happen. Cleanup our temporary flags. 730 */ 731 KKASSERT(gd == td->td_gd); 732 733 /* 734 * Cleanup the timeout. If the timeout has already occured thandle 735 * has already been stopped, otherwise stop thandle. If the timeout 736 * is running (the callout thread must be blocked trying to get 737 * lwp_token) then wait for us to get scheduled. 738 */ 739 if (timo) { 740 while (td->td_flags & TDF_TIMEOUT_RUNNING) { 741 /* else we won't get rescheduled! */ 742 if (lp->lwp_stat != LSSTOP) 743 lp->lwp_stat = LSSLEEP; 744 lwkt_deschedule_self(td); 745 td->td_wmesg = "tsrace"; 746 lwkt_switch(); 747 kprintf("td %p %s: timeout race\n", td, td->td_comm); 748 } 749 if (td->td_flags & TDF_TIMEOUT) { 750 td->td_flags &= ~TDF_TIMEOUT; 751 error = EWOULDBLOCK; 752 } else { 753 /* 754 * We are on the same cpu so use the quick version 755 * which is guaranteed not to block or race. 756 */ 757 _callout_cancel_quick(&thandle2); 758 } 759 } 760 td->td_flags &= ~TDF_TSLEEP_DESCHEDULED; 761 762 /* 763 * Make sure we have been removed from the sleepq. In most 764 * cases this will have been done for us already but it is 765 * possible for a scheduling IPI to be in-flight from a 766 * previous tsleep/tsleep_interlock() or due to a straight-out 767 * call to lwkt_schedule() (in the case of an interrupt thread), 768 * causing a spurious wakeup. 769 */ 770 _tsleep_remove(td); 771 td->td_wmesg = NULL; 772 773 /* 774 * Figure out the correct error return. If interrupted by a 775 * signal we want to return EINTR or ERESTART. 776 */ 777 resume: 778 if (lp) { 779 if (catch && error == 0) { 780 if (sig != 0 || (sig = CURSIG(lp))) { 781 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig)) 782 error = EINTR; 783 else 784 error = ERESTART; 785 } 786 } 787 788 lp->lwp_flags &= ~LWP_SINTR; 789 790 /* 791 * Unconditionally set us to LSRUN on resume. lwp_stat could 792 * be in a weird state due to the goto resume, particularly 793 * when tsleep() is called from tstop(). 794 */ 795 lp->lwp_stat = LSRUN; 796 lwkt_reltoken(&lp->lwp_token); 797 } 798 logtsleep1(tsleep_end); 799 crit_exit_quick(td); 800 801 return (error); 802 } 803 804 /* 805 * Interlocked spinlock sleep. An exclusively held spinlock must 806 * be passed to ssleep(). The function will atomically release the 807 * spinlock and tsleep on the ident, then reacquire the spinlock and 808 * return. 809 * 810 * This routine is fairly important along the critical path, so optimize it 811 * heavily. 812 */ 813 int 814 ssleep(const volatile void *ident, struct spinlock *spin, int flags, 815 const char *wmesg, int timo) 816 { 817 globaldata_t gd = mycpu; 818 int error; 819 820 _tsleep_interlock(gd, ident, flags); 821 spin_unlock_quick(gd, spin); 822 error = tsleep(ident, flags | PINTERLOCKED, wmesg, timo); 823 KKASSERT(gd == mycpu); 824 _spin_lock_quick(gd, spin, wmesg); 825 826 return (error); 827 } 828 829 int 830 lksleep(const volatile void *ident, struct lock *lock, int flags, 831 const char *wmesg, int timo) 832 { 833 globaldata_t gd = mycpu; 834 int error; 835 836 _tsleep_interlock(gd, ident, flags); 837 lockmgr(lock, LK_RELEASE); 838 error = tsleep(ident, flags | PINTERLOCKED, wmesg, timo); 839 lockmgr(lock, LK_EXCLUSIVE); 840 841 return (error); 842 } 843 844 /* 845 * Interlocked mutex sleep. An exclusively held mutex must be passed 846 * to mtxsleep(). The function will atomically release the mutex 847 * and tsleep on the ident, then reacquire the mutex and return. 848 */ 849 int 850 mtxsleep(const volatile void *ident, struct mtx *mtx, int flags, 851 const char *wmesg, int timo) 852 { 853 globaldata_t gd = mycpu; 854 int error; 855 856 _tsleep_interlock(gd, ident, flags); 857 mtx_unlock(mtx); 858 error = tsleep(ident, flags | PINTERLOCKED, wmesg, timo); 859 mtx_lock_ex_quick(mtx); 860 861 return (error); 862 } 863 864 /* 865 * Interlocked serializer sleep. An exclusively held serializer must 866 * be passed to zsleep(). The function will atomically release 867 * the serializer and tsleep on the ident, then reacquire the serializer 868 * and return. 869 */ 870 int 871 zsleep(const volatile void *ident, struct lwkt_serialize *slz, int flags, 872 const char *wmesg, int timo) 873 { 874 globaldata_t gd = mycpu; 875 int ret; 876 877 ASSERT_SERIALIZED(slz); 878 879 _tsleep_interlock(gd, ident, flags); 880 lwkt_serialize_exit(slz); 881 ret = tsleep(ident, flags | PINTERLOCKED, wmesg, timo); 882 lwkt_serialize_enter(slz); 883 884 return ret; 885 } 886 887 /* 888 * Directly block on the LWKT thread by descheduling it. This 889 * is much faster then tsleep(), but the only legal way to wake 890 * us up is to directly schedule the thread. 891 * 892 * Setting TDF_SINTR will cause new signals to directly schedule us. 893 * 894 * This routine must be called while in a critical section. 895 */ 896 int 897 lwkt_sleep(const char *wmesg, int flags) 898 { 899 thread_t td = curthread; 900 int sig; 901 902 if ((flags & PCATCH) == 0 || td->td_lwp == NULL) { 903 td->td_flags |= TDF_BLOCKED; 904 td->td_wmesg = wmesg; 905 lwkt_deschedule_self(td); 906 lwkt_switch(); 907 td->td_wmesg = NULL; 908 td->td_flags &= ~TDF_BLOCKED; 909 return(0); 910 } 911 if ((sig = CURSIG(td->td_lwp)) != 0) { 912 if (SIGISMEMBER(td->td_proc->p_sigacts->ps_sigintr, sig)) 913 return(EINTR); 914 else 915 return(ERESTART); 916 917 } 918 td->td_flags |= TDF_BLOCKED | TDF_SINTR; 919 td->td_wmesg = wmesg; 920 lwkt_deschedule_self(td); 921 lwkt_switch(); 922 td->td_flags &= ~(TDF_BLOCKED | TDF_SINTR); 923 td->td_wmesg = NULL; 924 return(0); 925 } 926 927 /* 928 * Implement the timeout for tsleep. 929 * 930 * This type of callout timeout is scheduled on the same cpu the process 931 * is sleeping on. Also, at the moment, the MP lock is held. 932 */ 933 static void 934 endtsleep(void *arg) 935 { 936 thread_t td = arg; 937 struct lwp *lp; 938 939 /* 940 * We are going to have to get the lwp_token, which means we might 941 * block. This can race a tsleep getting woken up by other means 942 * so set TDF_TIMEOUT_RUNNING to force the tsleep to wait for our 943 * processing to complete (sorry tsleep!). 944 * 945 * We can safely set td_flags because td MUST be on the same cpu 946 * as we are. 947 */ 948 KKASSERT(td->td_gd == mycpu); 949 crit_enter(); 950 td->td_flags |= TDF_TIMEOUT_RUNNING | TDF_TIMEOUT; 951 952 /* 953 * This can block but TDF_TIMEOUT_RUNNING will prevent the thread 954 * from exiting the tsleep on us. The flag is interlocked by virtue 955 * of lp being on the same cpu as we are. 956 */ 957 if ((lp = td->td_lwp) != NULL) 958 lwkt_gettoken(&lp->lwp_token); 959 960 KKASSERT(td->td_flags & TDF_TSLEEP_DESCHEDULED); 961 962 if (lp) { 963 /* 964 * callout timer should normally never be set in tstop() 965 * because it passes a timeout of 0. However, there is a 966 * case during thread exit (which SSTOP's all the threads) 967 * for which tstop() must break out and can (properly) leave 968 * the thread in LSSTOP. 969 */ 970 KKASSERT(lp->lwp_stat != LSSTOP || 971 (lp->lwp_mpflags & LWP_MP_WEXIT)); 972 setrunnable(lp); 973 lwkt_reltoken(&lp->lwp_token); 974 } else { 975 _tsleep_remove(td); 976 lwkt_schedule(td); 977 } 978 KKASSERT(td->td_gd == mycpu); 979 td->td_flags &= ~TDF_TIMEOUT_RUNNING; 980 crit_exit(); 981 } 982 983 /* 984 * Make all processes sleeping on the specified identifier runnable. 985 * count may be zero or one only. 986 * 987 * The domain encodes the sleep/wakeup domain, flags, plus the originating 988 * cpu. 989 * 990 * This call may run without the MP lock held. We can only manipulate thread 991 * state on the cpu owning the thread. We CANNOT manipulate process state 992 * at all. 993 * 994 * _wakeup() can be passed to an IPI so we can't use (const volatile 995 * void *ident). 996 */ 997 static void 998 _wakeup(void *ident, int domain) 999 { 1000 struct tslpque *qp; 1001 struct thread *td; 1002 struct thread *ntd; 1003 globaldata_t gd; 1004 cpumask_t mask; 1005 uint32_t cid; 1006 uint32_t gid; 1007 int wids = 0; 1008 1009 crit_enter(); 1010 logtsleep2(wakeup_beg, ident); 1011 gd = mycpu; 1012 cid = LOOKUP(ident); 1013 gid = TCHASHSHIFT(cid); 1014 qp = &gd->gd_tsleep_hash[gid]; 1015 restart: 1016 for (td = TAILQ_FIRST(&qp->queue); td != NULL; td = ntd) { 1017 ntd = TAILQ_NEXT(td, td_sleepq); 1018 if (td->td_wchan == ident && 1019 td->td_wdomain == (domain & PDOMAIN_MASK) 1020 ) { 1021 KKASSERT(td->td_gd == gd); 1022 _tsleep_remove(td); 1023 td->td_wakefromcpu = PWAKEUP_DECODE(domain); 1024 if (td->td_flags & TDF_TSLEEP_DESCHEDULED) { 1025 lwkt_schedule(td); 1026 if (domain & PWAKEUP_ONE) 1027 goto done; 1028 } 1029 goto restart; 1030 } 1031 if (td->td_wchan == qp->ident0) 1032 wids |= 1; 1033 else if (td->td_wchan == qp->ident1) 1034 wids |= 2; 1035 else if (td->td_wchan == qp->ident2) 1036 wids |= 4; 1037 else if (td->td_wchan == qp->ident3) 1038 wids |= 8; 1039 else 1040 wids |= 16; /* force ident0 to be retained (-1) */ 1041 } 1042 1043 /* 1044 * Because a bunch of cpumask array entries cover the same queue, it 1045 * is possible for our bit to remain set in some of them and cause 1046 * spurious wakeup IPIs later on. Make sure that the bit is cleared 1047 * when a spurious IPI occurs to prevent further spurious IPIs. 1048 */ 1049 if (TAILQ_FIRST(&qp->queue) == NULL) { 1050 ATOMIC_CPUMASK_NANDBIT(slpque_cpumasks[cid], gd->gd_cpuid); 1051 qp->ident0 = NULL; 1052 qp->ident1 = NULL; 1053 qp->ident2 = NULL; 1054 qp->ident3 = NULL; 1055 } else { 1056 if ((wids & 1) == 0) { 1057 if ((wids & 16) == 0) { 1058 qp->ident0 = NULL; 1059 } else { 1060 KKASSERT(qp->ident0 == (void *)(intptr_t)-1); 1061 } 1062 } 1063 if ((wids & 2) == 0) 1064 qp->ident1 = NULL; 1065 if ((wids & 4) == 0) 1066 qp->ident2 = NULL; 1067 if ((wids & 8) == 0) 1068 qp->ident3 = NULL; 1069 } 1070 1071 /* 1072 * We finished checking the current cpu but there still may be 1073 * more work to do. Either wakeup_one was requested and no matching 1074 * thread was found, or a normal wakeup was requested and we have 1075 * to continue checking cpus. 1076 * 1077 * It should be noted that this scheme is actually less expensive then 1078 * the old scheme when waking up multiple threads, since we send 1079 * only one IPI message per target candidate which may then schedule 1080 * multiple threads. Before we could have wound up sending an IPI 1081 * message for each thread on the target cpu (!= current cpu) that 1082 * needed to be woken up. 1083 * 1084 * NOTE: Wakeups occuring on remote cpus are asynchronous. This 1085 * should be ok since we are passing idents in the IPI rather 1086 * then thread pointers. 1087 * 1088 * NOTE: We MUST mfence (or use an atomic op) prior to reading 1089 * the cpumask, as another cpu may have written to it in 1090 * a fashion interlocked with whatever the caller did before 1091 * calling wakeup(). Otherwise we might miss the interaction 1092 * (kern_mutex.c can cause this problem). 1093 * 1094 * lfence is insufficient as it may allow a written state to 1095 * reorder around the cpumask load. 1096 */ 1097 if ((domain & PWAKEUP_MYCPU) == 0) { 1098 globaldata_t tgd; 1099 const volatile void *id0; 1100 int n; 1101 1102 cpu_mfence(); 1103 /* cpu_lfence(); */ 1104 mask = slpque_cpumasks[cid]; 1105 CPUMASK_ANDMASK(mask, gd->gd_other_cpus); 1106 while (CPUMASK_TESTNZERO(mask)) { 1107 n = BSRCPUMASK(mask); 1108 CPUMASK_NANDBIT(mask, n); 1109 tgd = globaldata_find(n); 1110 1111 /* 1112 * Both ident0 compares must from a single load 1113 * to avoid ident0 update races crossing the two 1114 * compares. 1115 */ 1116 qp = &tgd->gd_tsleep_hash[gid]; 1117 id0 = qp->ident0; 1118 cpu_ccfence(); 1119 if (id0 == (void *)(intptr_t)-1) { 1120 lwkt_send_ipiq2(tgd, _wakeup, ident, 1121 domain | PWAKEUP_MYCPU); 1122 ++tgd->gd_cnt.v_wakeup_colls; 1123 } else if (id0 == ident || 1124 qp->ident1 == ident || 1125 qp->ident2 == ident || 1126 qp->ident3 == ident) { 1127 lwkt_send_ipiq2(tgd, _wakeup, ident, 1128 domain | PWAKEUP_MYCPU); 1129 } 1130 } 1131 #if 0 1132 if (CPUMASK_TESTNZERO(mask)) { 1133 lwkt_send_ipiq2_mask(mask, _wakeup, ident, 1134 domain | PWAKEUP_MYCPU); 1135 } 1136 #endif 1137 } 1138 done: 1139 logtsleep1(wakeup_end); 1140 crit_exit(); 1141 } 1142 1143 /* 1144 * Wakeup all threads tsleep()ing on the specified ident, on all cpus 1145 */ 1146 void 1147 wakeup(const volatile void *ident) 1148 { 1149 globaldata_t gd = mycpu; 1150 thread_t td = gd->gd_curthread; 1151 1152 if (td && (td->td_flags & TDF_DELAYED_WAKEUP)) { 1153 /* 1154 * If we are in a delayed wakeup section, record up to two wakeups in 1155 * a per-CPU queue and issue them when we block or exit the delayed 1156 * wakeup section. 1157 */ 1158 if (atomic_cmpset_ptr(&gd->gd_delayed_wakeup[0], NULL, ident)) 1159 return; 1160 if (atomic_cmpset_ptr(&gd->gd_delayed_wakeup[1], NULL, ident)) 1161 return; 1162 1163 ident = atomic_swap_ptr(__DEQUALIFY(volatile void **, &gd->gd_delayed_wakeup[1]), 1164 __DEALL(ident)); 1165 ident = atomic_swap_ptr(__DEQUALIFY(volatile void **, &gd->gd_delayed_wakeup[0]), 1166 __DEALL(ident)); 1167 } 1168 1169 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, gd->gd_cpuid)); 1170 } 1171 1172 /* 1173 * Wakeup one thread tsleep()ing on the specified ident, on any cpu. 1174 */ 1175 void 1176 wakeup_one(const volatile void *ident) 1177 { 1178 /* XXX potentially round-robin the first responding cpu */ 1179 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mycpu->gd_cpuid) | 1180 PWAKEUP_ONE); 1181 } 1182 1183 /* 1184 * Wakeup threads tsleep()ing on the specified ident on the current cpu 1185 * only. 1186 */ 1187 void 1188 wakeup_mycpu(const volatile void *ident) 1189 { 1190 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mycpu->gd_cpuid) | 1191 PWAKEUP_MYCPU); 1192 } 1193 1194 /* 1195 * Wakeup one thread tsleep()ing on the specified ident on the current cpu 1196 * only. 1197 */ 1198 void 1199 wakeup_mycpu_one(const volatile void *ident) 1200 { 1201 /* XXX potentially round-robin the first responding cpu */ 1202 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mycpu->gd_cpuid) | 1203 PWAKEUP_MYCPU | PWAKEUP_ONE); 1204 } 1205 1206 /* 1207 * Wakeup all thread tsleep()ing on the specified ident on the specified cpu 1208 * only. 1209 */ 1210 void 1211 wakeup_oncpu(globaldata_t gd, const volatile void *ident) 1212 { 1213 globaldata_t mygd = mycpu; 1214 if (gd == mycpu) { 1215 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mygd->gd_cpuid) | 1216 PWAKEUP_MYCPU); 1217 } else { 1218 lwkt_send_ipiq2(gd, _wakeup, __DEALL(ident), 1219 PWAKEUP_ENCODE(0, mygd->gd_cpuid) | 1220 PWAKEUP_MYCPU); 1221 } 1222 } 1223 1224 /* 1225 * Wakeup one thread tsleep()ing on the specified ident on the specified cpu 1226 * only. 1227 */ 1228 void 1229 wakeup_oncpu_one(globaldata_t gd, const volatile void *ident) 1230 { 1231 globaldata_t mygd = mycpu; 1232 if (gd == mygd) { 1233 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mygd->gd_cpuid) | 1234 PWAKEUP_MYCPU | PWAKEUP_ONE); 1235 } else { 1236 lwkt_send_ipiq2(gd, _wakeup, __DEALL(ident), 1237 PWAKEUP_ENCODE(0, mygd->gd_cpuid) | 1238 PWAKEUP_MYCPU | PWAKEUP_ONE); 1239 } 1240 } 1241 1242 /* 1243 * Wakeup all threads waiting on the specified ident that slept using 1244 * the specified domain, on all cpus. 1245 */ 1246 void 1247 wakeup_domain(const volatile void *ident, int domain) 1248 { 1249 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(domain, mycpu->gd_cpuid)); 1250 } 1251 1252 /* 1253 * Wakeup one thread waiting on the specified ident that slept using 1254 * the specified domain, on any cpu. 1255 */ 1256 void 1257 wakeup_domain_one(const volatile void *ident, int domain) 1258 { 1259 /* XXX potentially round-robin the first responding cpu */ 1260 _wakeup(__DEALL(ident), 1261 PWAKEUP_ENCODE(domain, mycpu->gd_cpuid) | PWAKEUP_ONE); 1262 } 1263 1264 void 1265 wakeup_start_delayed(void) 1266 { 1267 globaldata_t gd = mycpu; 1268 1269 crit_enter(); 1270 gd->gd_curthread->td_flags |= TDF_DELAYED_WAKEUP; 1271 crit_exit(); 1272 } 1273 1274 void 1275 wakeup_end_delayed(void) 1276 { 1277 globaldata_t gd = mycpu; 1278 1279 if (gd->gd_curthread->td_flags & TDF_DELAYED_WAKEUP) { 1280 crit_enter(); 1281 gd->gd_curthread->td_flags &= ~TDF_DELAYED_WAKEUP; 1282 if (gd->gd_delayed_wakeup[0] || gd->gd_delayed_wakeup[1]) { 1283 if (gd->gd_delayed_wakeup[0]) { 1284 wakeup(gd->gd_delayed_wakeup[0]); 1285 gd->gd_delayed_wakeup[0] = NULL; 1286 } 1287 if (gd->gd_delayed_wakeup[1]) { 1288 wakeup(gd->gd_delayed_wakeup[1]); 1289 gd->gd_delayed_wakeup[1] = NULL; 1290 } 1291 } 1292 crit_exit(); 1293 } 1294 } 1295 1296 /* 1297 * setrunnable() 1298 * 1299 * Make a process runnable. lp->lwp_token must be held on call and this 1300 * function must be called from the cpu owning lp. 1301 * 1302 * This only has an effect if we are in LSSTOP or LSSLEEP. 1303 */ 1304 void 1305 setrunnable(struct lwp *lp) 1306 { 1307 thread_t td = lp->lwp_thread; 1308 1309 ASSERT_LWKT_TOKEN_HELD(&lp->lwp_token); 1310 KKASSERT(td->td_gd == mycpu); 1311 crit_enter(); 1312 if (lp->lwp_stat == LSSTOP) 1313 lp->lwp_stat = LSSLEEP; 1314 if (lp->lwp_stat == LSSLEEP) { 1315 _tsleep_remove(td); 1316 lwkt_schedule(td); 1317 } else if (td->td_flags & TDF_SINTR) { 1318 lwkt_schedule(td); 1319 } 1320 crit_exit(); 1321 } 1322 1323 /* 1324 * The process is stopped due to some condition, usually because p_stat is 1325 * set to SSTOP, but also possibly due to being traced. 1326 * 1327 * Caller must hold p->p_token 1328 * 1329 * NOTE! If the caller sets SSTOP, the caller must also clear P_WAITED 1330 * because the parent may check the child's status before the child actually 1331 * gets to this routine. 1332 * 1333 * This routine is called with the current lwp only, typically just 1334 * before returning to userland if the process state is detected as 1335 * possibly being in a stopped state. 1336 */ 1337 void 1338 tstop(void) 1339 { 1340 struct lwp *lp = curthread->td_lwp; 1341 struct proc *p = lp->lwp_proc; 1342 struct proc *q; 1343 1344 lwkt_gettoken(&lp->lwp_token); 1345 crit_enter(); 1346 1347 /* 1348 * If LWP_MP_WSTOP is set, we were sleeping 1349 * while our process was stopped. At this point 1350 * we were already counted as stopped. 1351 */ 1352 if ((lp->lwp_mpflags & LWP_MP_WSTOP) == 0) { 1353 /* 1354 * If we're the last thread to stop, signal 1355 * our parent. 1356 */ 1357 p->p_nstopped++; 1358 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WSTOP); 1359 wakeup(&p->p_nstopped); 1360 if (p->p_nstopped == p->p_nthreads) { 1361 /* 1362 * Token required to interlock kern_wait() 1363 */ 1364 q = p->p_pptr; 1365 PHOLD(q); 1366 lwkt_gettoken(&q->p_token); 1367 p->p_flags &= ~P_WAITED; 1368 wakeup(p->p_pptr); 1369 if ((q->p_sigacts->ps_flag & PS_NOCLDSTOP) == 0) 1370 ksignal(q, SIGCHLD); 1371 lwkt_reltoken(&q->p_token); 1372 PRELE(q); 1373 } 1374 } 1375 1376 /* 1377 * Wait here while in a stopped state, interlocked with lwp_token. 1378 * We must break-out if the whole process is trying to exit. 1379 */ 1380 while (STOPLWP(p, lp)) { 1381 lp->lwp_stat = LSSTOP; 1382 tsleep(p, 0, "stop", 0); 1383 } 1384 p->p_nstopped--; 1385 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_WSTOP); 1386 crit_exit(); 1387 lwkt_reltoken(&lp->lwp_token); 1388 } 1389 1390 /* 1391 * Compute a tenex style load average of a quantity on 1392 * 1, 5 and 15 minute intervals. This is a pcpu callout. 1393 * 1394 * We segment the lwp scan on a pcpu basis. This does NOT 1395 * mean the associated lwps are on this cpu, it is done 1396 * just to break the work up. 1397 * 1398 * The callout on cpu0 rolls up the stats from the other 1399 * cpus. 1400 */ 1401 static int loadav_count_runnable(struct lwp *p, void *data); 1402 1403 static void 1404 loadav(void *arg) 1405 { 1406 globaldata_t gd = mycpu; 1407 struct loadavg *avg; 1408 int i, nrun; 1409 1410 nrun = 0; 1411 alllwp_scan(loadav_count_runnable, &nrun, 1); 1412 gd->gd_loadav_nrunnable = nrun; 1413 if (gd->gd_cpuid == 0) { 1414 avg = &averunnable; 1415 nrun = 0; 1416 for (i = 0; i < ncpus; ++i) 1417 nrun += globaldata_find(i)->gd_loadav_nrunnable; 1418 for (i = 0; i < 3; i++) { 1419 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] + 1420 (long)nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT; 1421 } 1422 } 1423 1424 /* 1425 * Schedule the next update to occur after 5 seconds, but add a 1426 * random variation to avoid synchronisation with processes that 1427 * run at regular intervals. 1428 */ 1429 callout_reset(&gd->gd_loadav_callout, 1430 hz * 4 + (int)(krandom() % (hz * 2 + 1)), 1431 loadav, NULL); 1432 } 1433 1434 static int 1435 loadav_count_runnable(struct lwp *lp, void *data) 1436 { 1437 int *nrunp = data; 1438 thread_t td; 1439 1440 switch (lp->lwp_stat) { 1441 case LSRUN: 1442 if ((td = lp->lwp_thread) == NULL) 1443 break; 1444 if (td->td_flags & TDF_BLOCKED) 1445 break; 1446 ++*nrunp; 1447 break; 1448 default: 1449 break; 1450 } 1451 lwkt_yield(); 1452 return(0); 1453 } 1454 1455 /* 1456 * Regular data collection 1457 */ 1458 static uint64_t 1459 collect_load_callback(int n) 1460 { 1461 int fscale = averunnable.fscale; 1462 1463 return ((averunnable.ldavg[0] * 100 + (fscale >> 1)) / fscale); 1464 } 1465 1466 static void 1467 sched_setup(void *dummy __unused) 1468 { 1469 globaldata_t save_gd = mycpu; 1470 globaldata_t gd; 1471 int n; 1472 1473 kcollect_register(KCOLLECT_LOAD, "load", collect_load_callback, 1474 KCOLLECT_SCALE(KCOLLECT_LOAD_FORMAT, 0)); 1475 1476 /* 1477 * Kick off timeout driven events by calling first time. We 1478 * split the work across available cpus to help scale it, 1479 * it can eat a lot of cpu when there are a lot of processes 1480 * on the system. 1481 */ 1482 for (n = 0; n < ncpus; ++n) { 1483 gd = globaldata_find(n); 1484 lwkt_setcpu_self(gd); 1485 callout_init_mp(&gd->gd_loadav_callout); 1486 callout_init_mp(&gd->gd_schedcpu_callout); 1487 schedcpu(NULL); 1488 loadav(NULL); 1489 } 1490 lwkt_setcpu_self(save_gd); 1491 } 1492 1493 /* 1494 * Extremely early initialization, dummy-up the tables so we don't have 1495 * to conditionalize for NULL in _wakeup() and tsleep_interlock(). Even 1496 * though the system isn't blocking this early, these functions still 1497 * try to access the hash table. 1498 * 1499 * This setup will be overridden once sched_dyninit() -> sleep_gdinit() 1500 * is called. 1501 */ 1502 void 1503 sleep_early_gdinit(globaldata_t gd) 1504 { 1505 static struct tslpque dummy_slpque; 1506 static cpumask_t dummy_cpumasks; 1507 1508 slpque_tablesize = 1; 1509 gd->gd_tsleep_hash = &dummy_slpque; 1510 slpque_cpumasks = &dummy_cpumasks; 1511 TAILQ_INIT(&dummy_slpque.queue); 1512 } 1513 1514 /* 1515 * PCPU initialization. Called after KMALLOC is operational, by 1516 * sched_dyninit() for cpu 0, and by mi_gdinit() for other cpus later. 1517 * 1518 * WARNING! The pcpu hash table is smaller than the global cpumask 1519 * hash table, which can save us a lot of memory when maxproc 1520 * is set high. 1521 */ 1522 void 1523 sleep_gdinit(globaldata_t gd) 1524 { 1525 struct thread *td; 1526 size_t hash_size; 1527 uint32_t n; 1528 uint32_t i; 1529 1530 /* 1531 * This shouldn't happen, that is there shouldn't be any threads 1532 * waiting on the dummy tsleep queue this early in the boot. 1533 */ 1534 if (gd->gd_cpuid == 0) { 1535 struct tslpque *qp = &gd->gd_tsleep_hash[0]; 1536 TAILQ_FOREACH(td, &qp->queue, td_sleepq) { 1537 kprintf("SLEEP_GDINIT SWITCH %s\n", td->td_comm); 1538 } 1539 } 1540 1541 /* 1542 * Note that we have to allocate one extra slot because we are 1543 * shifting a modulo value. TCHASHSHIFT(slpque_tablesize - 1) can 1544 * return the same value as TCHASHSHIFT(slpque_tablesize). 1545 */ 1546 n = TCHASHSHIFT(slpque_tablesize) + 1; 1547 1548 hash_size = sizeof(struct tslpque) * n; 1549 gd->gd_tsleep_hash = (void *)kmem_alloc3(&kernel_map, hash_size, 1550 VM_SUBSYS_GD, 1551 KM_CPU(gd->gd_cpuid)); 1552 memset(gd->gd_tsleep_hash, 0, hash_size); 1553 for (i = 0; i < n; ++i) 1554 TAILQ_INIT(&gd->gd_tsleep_hash[i].queue); 1555 } 1556 1557 /* 1558 * Dynamic initialization after the memory system is operational. 1559 */ 1560 static void 1561 sched_dyninit(void *dummy __unused) 1562 { 1563 int tblsize; 1564 int tblsize2; 1565 int n; 1566 1567 /* 1568 * Calculate table size for slpque hash. We want a prime number 1569 * large enough to avoid overloading slpque_cpumasks when the 1570 * system has a large number of sleeping processes, which will 1571 * spam IPIs on wakeup(). 1572 * 1573 * While it is true this is really a per-lwp factor, generally 1574 * speaking the maxproc limit is a good metric to go by. 1575 */ 1576 for (tblsize = maxproc | 1; ; tblsize += 2) { 1577 if (tblsize % 3 == 0) 1578 continue; 1579 if (tblsize % 5 == 0) 1580 continue; 1581 tblsize2 = (tblsize / 2) | 1; 1582 for (n = 7; n < tblsize2; n += 2) { 1583 if (tblsize % n == 0) 1584 break; 1585 } 1586 if (n == tblsize2) 1587 break; 1588 } 1589 1590 /* 1591 * PIDs are currently limited to 6 digits. Cap the table size 1592 * at double this. 1593 */ 1594 if (tblsize > 2000003) 1595 tblsize = 2000003; 1596 1597 slpque_tablesize = tblsize; 1598 slpque_cpumasks = kmalloc(sizeof(*slpque_cpumasks) * slpque_tablesize, 1599 M_TSLEEP, M_WAITOK | M_ZERO); 1600 sleep_gdinit(mycpu); 1601 } 1602