1 /*- 2 * Copyright (c) 1982, 1986, 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 39 * $FreeBSD: src/sys/kern/kern_synch.c,v 1.87.2.6 2002/10/13 07:29:53 kbyanc Exp $ 40 * $DragonFly: src/sys/kern/kern_synch.c,v 1.91 2008/09/09 04:06:13 dillon Exp $ 41 */ 42 43 #include "opt_ktrace.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/proc.h> 48 #include <sys/kernel.h> 49 #include <sys/signalvar.h> 50 #include <sys/resourcevar.h> 51 #include <sys/vmmeter.h> 52 #include <sys/sysctl.h> 53 #include <sys/lock.h> 54 #include <sys/uio.h> 55 #ifdef KTRACE 56 #include <sys/ktrace.h> 57 #endif 58 #include <sys/xwait.h> 59 #include <sys/ktr.h> 60 #include <sys/serialize.h> 61 62 #include <sys/signal2.h> 63 #include <sys/thread2.h> 64 #include <sys/spinlock2.h> 65 #include <sys/mutex2.h> 66 67 #include <machine/cpu.h> 68 #include <machine/smp.h> 69 70 TAILQ_HEAD(tslpque, thread); 71 72 static void sched_setup (void *dummy); 73 SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL) 74 75 int hogticks; 76 int lbolt; 77 void *lbolt_syncer; 78 int sched_quantum; /* Roundrobin scheduling quantum in ticks. */ 79 int ncpus; 80 int ncpus2, ncpus2_shift, ncpus2_mask; /* note: mask not cpumask_t */ 81 int ncpus_fit, ncpus_fit_mask; /* note: mask not cpumask_t */ 82 int safepri; 83 int tsleep_now_works; 84 int tsleep_crypto_dump = 0; 85 86 static struct callout loadav_callout; 87 static struct callout schedcpu_callout; 88 MALLOC_DEFINE(M_TSLEEP, "tslpque", "tsleep queues"); 89 90 #define __DEALL(ident) __DEQUALIFY(void *, ident) 91 92 #if !defined(KTR_TSLEEP) 93 #define KTR_TSLEEP KTR_ALL 94 #endif 95 KTR_INFO_MASTER(tsleep); 96 KTR_INFO(KTR_TSLEEP, tsleep, tsleep_beg, 0, "tsleep enter %p", const volatile void *ident); 97 KTR_INFO(KTR_TSLEEP, tsleep, tsleep_end, 1, "tsleep exit"); 98 KTR_INFO(KTR_TSLEEP, tsleep, wakeup_beg, 2, "wakeup enter %p", const volatile void *ident); 99 KTR_INFO(KTR_TSLEEP, tsleep, wakeup_end, 3, "wakeup exit"); 100 KTR_INFO(KTR_TSLEEP, tsleep, ilockfail, 4, "interlock failed %p", const volatile void *ident); 101 102 #define logtsleep1(name) KTR_LOG(tsleep_ ## name) 103 #define logtsleep2(name, val) KTR_LOG(tsleep_ ## name, val) 104 105 struct loadavg averunnable = 106 { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */ 107 /* 108 * Constants for averages over 1, 5, and 15 minutes 109 * when sampling at 5 second intervals. 110 */ 111 static fixpt_t cexp[3] = { 112 0.9200444146293232 * FSCALE, /* exp(-1/12) */ 113 0.9834714538216174 * FSCALE, /* exp(-1/60) */ 114 0.9944598480048967 * FSCALE, /* exp(-1/180) */ 115 }; 116 117 static void endtsleep (void *); 118 static void loadav (void *arg); 119 static void schedcpu (void *arg); 120 121 /* 122 * Adjust the scheduler quantum. The quantum is specified in microseconds. 123 * Note that 'tick' is in microseconds per tick. 124 */ 125 static int 126 sysctl_kern_quantum(SYSCTL_HANDLER_ARGS) 127 { 128 int error, new_val; 129 130 new_val = sched_quantum * ustick; 131 error = sysctl_handle_int(oidp, &new_val, 0, req); 132 if (error != 0 || req->newptr == NULL) 133 return (error); 134 if (new_val < ustick) 135 return (EINVAL); 136 sched_quantum = new_val / ustick; 137 hogticks = 2 * sched_quantum; 138 return (0); 139 } 140 141 SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW, 142 0, sizeof sched_quantum, sysctl_kern_quantum, "I", ""); 143 144 static int pctcpu_decay = 10; 145 SYSCTL_INT(_kern, OID_AUTO, pctcpu_decay, CTLFLAG_RW, &pctcpu_decay, 0, ""); 146 147 /* 148 * kernel uses `FSCALE', userland (SHOULD) use kern.fscale 149 */ 150 int fscale __unused = FSCALE; /* exported to systat */ 151 SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, ""); 152 153 /* 154 * Recompute process priorities, once a second. 155 * 156 * Since the userland schedulers are typically event oriented, if the 157 * estcpu calculation at wakeup() time is not sufficient to make a 158 * process runnable relative to other processes in the system we have 159 * a 1-second recalc to help out. 160 * 161 * This code also allows us to store sysclock_t data in the process structure 162 * without fear of an overrun, since sysclock_t are guarenteed to hold 163 * several seconds worth of count. 164 * 165 * WARNING! callouts can preempt normal threads. However, they will not 166 * preempt a thread holding a spinlock so we *can* safely use spinlocks. 167 */ 168 static int schedcpu_stats(struct proc *p, void *data __unused); 169 static int schedcpu_resource(struct proc *p, void *data __unused); 170 171 static void 172 schedcpu(void *arg) 173 { 174 allproc_scan(schedcpu_stats, NULL); 175 allproc_scan(schedcpu_resource, NULL); 176 wakeup((caddr_t)&lbolt); 177 wakeup(lbolt_syncer); 178 callout_reset(&schedcpu_callout, hz, schedcpu, NULL); 179 } 180 181 /* 182 * General process statistics once a second 183 */ 184 static int 185 schedcpu_stats(struct proc *p, void *data __unused) 186 { 187 struct lwp *lp; 188 189 /* 190 * Threads may not be completely set up if process in SIDL state. 191 */ 192 if (p->p_stat == SIDL) 193 return(0); 194 195 PHOLD(p); 196 if (lwkt_trytoken(&p->p_token) == FALSE) { 197 PRELE(p); 198 return(0); 199 } 200 201 p->p_swtime++; 202 FOREACH_LWP_IN_PROC(lp, p) { 203 if (lp->lwp_stat == LSSLEEP) { 204 ++lp->lwp_slptime; 205 if (lp->lwp_slptime == 1) 206 p->p_usched->uload_update(lp); 207 } 208 209 /* 210 * Only recalculate processes that are active or have slept 211 * less then 2 seconds. The schedulers understand this. 212 * Otherwise decay by 50% per second. 213 */ 214 if (lp->lwp_slptime <= 1) { 215 p->p_usched->recalculate(lp); 216 } else { 217 int decay; 218 219 decay = pctcpu_decay; 220 cpu_ccfence(); 221 if (decay <= 1) 222 decay = 1; 223 if (decay > 100) 224 decay = 100; 225 lp->lwp_pctcpu = (lp->lwp_pctcpu * (decay - 1)) / decay; 226 } 227 } 228 lwkt_reltoken(&p->p_token); 229 lwkt_yield(); 230 PRELE(p); 231 return(0); 232 } 233 234 /* 235 * Resource checks. XXX break out since ksignal/killproc can block, 236 * limiting us to one process killed per second. There is probably 237 * a better way. 238 */ 239 static int 240 schedcpu_resource(struct proc *p, void *data __unused) 241 { 242 u_int64_t ttime; 243 struct lwp *lp; 244 245 if (p->p_stat == SIDL) 246 return(0); 247 248 PHOLD(p); 249 if (lwkt_trytoken(&p->p_token) == FALSE) { 250 PRELE(p); 251 return(0); 252 } 253 254 if (p->p_stat == SZOMB || p->p_limit == NULL) { 255 lwkt_reltoken(&p->p_token); 256 PRELE(p); 257 return(0); 258 } 259 260 ttime = 0; 261 FOREACH_LWP_IN_PROC(lp, p) { 262 /* 263 * We may have caught an lp in the middle of being 264 * created, lwp_thread can be NULL. 265 */ 266 if (lp->lwp_thread) { 267 ttime += lp->lwp_thread->td_sticks; 268 ttime += lp->lwp_thread->td_uticks; 269 } 270 } 271 272 switch(plimit_testcpulimit(p->p_limit, ttime)) { 273 case PLIMIT_TESTCPU_KILL: 274 killproc(p, "exceeded maximum CPU limit"); 275 break; 276 case PLIMIT_TESTCPU_XCPU: 277 if ((p->p_flags & P_XCPU) == 0) { 278 p->p_flags |= P_XCPU; 279 ksignal(p, SIGXCPU); 280 } 281 break; 282 default: 283 break; 284 } 285 lwkt_reltoken(&p->p_token); 286 lwkt_yield(); 287 PRELE(p); 288 return(0); 289 } 290 291 /* 292 * This is only used by ps. Generate a cpu percentage use over 293 * a period of one second. 294 */ 295 void 296 updatepcpu(struct lwp *lp, int cpticks, int ttlticks) 297 { 298 fixpt_t acc; 299 int remticks; 300 301 acc = (cpticks << FSHIFT) / ttlticks; 302 if (ttlticks >= ESTCPUFREQ) { 303 lp->lwp_pctcpu = acc; 304 } else { 305 remticks = ESTCPUFREQ - ttlticks; 306 lp->lwp_pctcpu = (acc * ttlticks + lp->lwp_pctcpu * remticks) / 307 ESTCPUFREQ; 308 } 309 } 310 311 /* 312 * tsleep/wakeup hash table parameters. Try to find the sweet spot for 313 * like addresses being slept on. 314 */ 315 #define TABLESIZE 4001 316 #define LOOKUP(x) (((u_int)(uintptr_t)(x)) % TABLESIZE) 317 318 static cpumask_t slpque_cpumasks[TABLESIZE]; 319 320 /* 321 * General scheduler initialization. We force a reschedule 25 times 322 * a second by default. Note that cpu0 is initialized in early boot and 323 * cannot make any high level calls. 324 * 325 * Each cpu has its own sleep queue. 326 */ 327 void 328 sleep_gdinit(globaldata_t gd) 329 { 330 static struct tslpque slpque_cpu0[TABLESIZE]; 331 int i; 332 333 if (gd->gd_cpuid == 0) { 334 sched_quantum = (hz + 24) / 25; 335 hogticks = 2 * sched_quantum; 336 337 gd->gd_tsleep_hash = slpque_cpu0; 338 } else { 339 gd->gd_tsleep_hash = kmalloc(sizeof(slpque_cpu0), 340 M_TSLEEP, M_WAITOK | M_ZERO); 341 } 342 for (i = 0; i < TABLESIZE; ++i) 343 TAILQ_INIT(&gd->gd_tsleep_hash[i]); 344 } 345 346 /* 347 * This is a dandy function that allows us to interlock tsleep/wakeup 348 * operations with unspecified upper level locks, such as lockmgr locks, 349 * simply by holding a critical section. The sequence is: 350 * 351 * (acquire upper level lock) 352 * tsleep_interlock(blah) 353 * (release upper level lock) 354 * tsleep(blah, ...) 355 * 356 * Basically this functions queues us on the tsleep queue without actually 357 * descheduling us. When tsleep() is later called with PINTERLOCK it 358 * assumes the thread was already queued, otherwise it queues it there. 359 * 360 * Thus it is possible to receive the wakeup prior to going to sleep and 361 * the race conditions are covered. 362 */ 363 static __inline void 364 _tsleep_interlock(globaldata_t gd, const volatile void *ident, int flags) 365 { 366 thread_t td = gd->gd_curthread; 367 int id; 368 369 crit_enter_quick(td); 370 if (td->td_flags & TDF_TSLEEPQ) { 371 id = LOOKUP(td->td_wchan); 372 TAILQ_REMOVE(&gd->gd_tsleep_hash[id], td, td_sleepq); 373 if (TAILQ_FIRST(&gd->gd_tsleep_hash[id]) == NULL) { 374 atomic_clear_cpumask(&slpque_cpumasks[id], 375 gd->gd_cpumask); 376 } 377 } else { 378 td->td_flags |= TDF_TSLEEPQ; 379 } 380 id = LOOKUP(ident); 381 TAILQ_INSERT_TAIL(&gd->gd_tsleep_hash[id], td, td_sleepq); 382 atomic_set_cpumask(&slpque_cpumasks[id], gd->gd_cpumask); 383 td->td_wchan = ident; 384 td->td_wdomain = flags & PDOMAIN_MASK; 385 crit_exit_quick(td); 386 } 387 388 void 389 tsleep_interlock(const volatile void *ident, int flags) 390 { 391 _tsleep_interlock(mycpu, ident, flags); 392 } 393 394 /* 395 * Remove thread from sleepq. Must be called with a critical section held. 396 * The thread must not be migrating. 397 */ 398 static __inline void 399 _tsleep_remove(thread_t td) 400 { 401 globaldata_t gd = mycpu; 402 int id; 403 404 KKASSERT(td->td_gd == gd && IN_CRITICAL_SECT(td)); 405 KKASSERT((td->td_flags & TDF_MIGRATING) == 0); 406 if (td->td_flags & TDF_TSLEEPQ) { 407 td->td_flags &= ~TDF_TSLEEPQ; 408 id = LOOKUP(td->td_wchan); 409 TAILQ_REMOVE(&gd->gd_tsleep_hash[id], td, td_sleepq); 410 if (TAILQ_FIRST(&gd->gd_tsleep_hash[id]) == NULL) 411 atomic_clear_cpumask(&slpque_cpumasks[id], gd->gd_cpumask); 412 td->td_wchan = NULL; 413 td->td_wdomain = 0; 414 } 415 } 416 417 void 418 tsleep_remove(thread_t td) 419 { 420 _tsleep_remove(td); 421 } 422 423 /* 424 * General sleep call. Suspends the current process until a wakeup is 425 * performed on the specified identifier. The process will then be made 426 * runnable with the specified priority. Sleeps at most timo/hz seconds 427 * (0 means no timeout). If flags includes PCATCH flag, signals are checked 428 * before and after sleeping, else signals are not checked. Returns 0 if 429 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a 430 * signal needs to be delivered, ERESTART is returned if the current system 431 * call should be restarted if possible, and EINTR is returned if the system 432 * call should be interrupted by the signal (return EINTR). 433 * 434 * Note that if we are a process, we release_curproc() before messing with 435 * the LWKT scheduler. 436 * 437 * During autoconfiguration or after a panic, a sleep will simply 438 * lower the priority briefly to allow interrupts, then return. 439 * 440 * WARNING! This code can't block (short of switching away), or bad things 441 * will happen. No getting tokens, no blocking locks, etc. 442 */ 443 int 444 tsleep(const volatile void *ident, int flags, const char *wmesg, int timo) 445 { 446 struct thread *td = curthread; 447 struct lwp *lp = td->td_lwp; 448 struct proc *p = td->td_proc; /* may be NULL */ 449 globaldata_t gd; 450 int sig; 451 int catch; 452 int error; 453 int oldpri; 454 struct callout thandle; 455 456 /* 457 * Currently a severe hack. Make sure any delayed wakeups 458 * are flushed before we sleep or we might deadlock on whatever 459 * event we are sleeping on. 460 */ 461 if (td->td_flags & TDF_DELAYED_WAKEUP) 462 wakeup_end_delayed(); 463 464 /* 465 * NOTE: removed KTRPOINT, it could cause races due to blocking 466 * even in stable. Just scrap it for now. 467 */ 468 if (!tsleep_crypto_dump && (tsleep_now_works == 0 || panicstr)) { 469 /* 470 * After a panic, or before we actually have an operational 471 * softclock, just give interrupts a chance, then just return; 472 * 473 * don't run any other procs or panic below, 474 * in case this is the idle process and already asleep. 475 */ 476 splz(); 477 oldpri = td->td_pri; 478 lwkt_setpri_self(safepri); 479 lwkt_switch(); 480 lwkt_setpri_self(oldpri); 481 return (0); 482 } 483 logtsleep2(tsleep_beg, ident); 484 gd = td->td_gd; 485 KKASSERT(td != &gd->gd_idlethread); /* you must be kidding! */ 486 td->td_wakefromcpu = -1; /* overwritten by _wakeup */ 487 488 /* 489 * NOTE: all of this occurs on the current cpu, including any 490 * callout-based wakeups, so a critical section is a sufficient 491 * interlock. 492 * 493 * The entire sequence through to where we actually sleep must 494 * run without breaking the critical section. 495 */ 496 catch = flags & PCATCH; 497 error = 0; 498 sig = 0; 499 500 crit_enter_quick(td); 501 502 KASSERT(ident != NULL, ("tsleep: no ident")); 503 KASSERT(lp == NULL || 504 lp->lwp_stat == LSRUN || /* Obvious */ 505 lp->lwp_stat == LSSTOP, /* Set in tstop */ 506 ("tsleep %p %s %d", 507 ident, wmesg, lp->lwp_stat)); 508 509 /* 510 * We interlock the sleep queue if the caller has not already done 511 * it for us. This must be done before we potentially acquire any 512 * tokens or we can loose the wakeup. 513 */ 514 if ((flags & PINTERLOCKED) == 0) { 515 _tsleep_interlock(gd, ident, flags); 516 } 517 518 /* 519 * Setup for the current process (if this is a process). We must 520 * interlock with lwp_token to avoid remote wakeup races via 521 * setrunnable() 522 */ 523 if (lp) { 524 lwkt_gettoken(&lp->lwp_token); 525 if (catch) { 526 /* 527 * Early termination if PCATCH was set and a 528 * signal is pending, interlocked with the 529 * critical section. 530 * 531 * Early termination only occurs when tsleep() is 532 * entered while in a normal LSRUN state. 533 */ 534 if ((sig = CURSIG(lp)) != 0) 535 goto resume; 536 537 /* 538 * Causes ksignal to wake us up if a signal is 539 * received (interlocked with p->p_token). 540 */ 541 lp->lwp_flags |= LWP_SINTR; 542 } 543 } else { 544 KKASSERT(p == NULL); 545 } 546 547 /* 548 * Make sure the current process has been untangled from 549 * the userland scheduler and initialize slptime to start 550 * counting. 551 * 552 * NOTE: td->td_wakefromcpu is pre-set by the release function 553 * for the dfly scheduler, and then adjusted by _wakeup() 554 */ 555 if (lp) { 556 p->p_usched->release_curproc(lp); 557 lp->lwp_slptime = 0; 558 } 559 560 /* 561 * If the interlocked flag is set but our cpu bit in the slpqueue 562 * is no longer set, then a wakeup was processed inbetween the 563 * tsleep_interlock() (ours or the callers), and here. This can 564 * occur under numerous circumstances including when we release the 565 * current process. 566 * 567 * Extreme loads can cause the sending of an IPI (e.g. wakeup()'s) 568 * to process incoming IPIs, thus draining incoming wakeups. 569 */ 570 if ((td->td_flags & TDF_TSLEEPQ) == 0) { 571 logtsleep2(ilockfail, ident); 572 goto resume; 573 } 574 575 /* 576 * scheduling is blocked while in a critical section. Coincide 577 * the descheduled-by-tsleep flag with the descheduling of the 578 * lwkt. 579 * 580 * The timer callout is localized on our cpu and interlocked by 581 * our critical section. 582 */ 583 lwkt_deschedule_self(td); 584 td->td_flags |= TDF_TSLEEP_DESCHEDULED; 585 td->td_wmesg = wmesg; 586 587 /* 588 * Setup the timeout, if any. The timeout is only operable while 589 * the thread is flagged descheduled. 590 */ 591 KKASSERT((td->td_flags & TDF_TIMEOUT) == 0); 592 if (timo) { 593 callout_init_mp(&thandle); 594 callout_reset(&thandle, timo, endtsleep, td); 595 } 596 597 /* 598 * Beddy bye bye. 599 */ 600 if (lp) { 601 /* 602 * Ok, we are sleeping. Place us in the SSLEEP state. 603 */ 604 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0); 605 606 /* 607 * tstop() sets LSSTOP, so don't fiddle with that. 608 */ 609 if (lp->lwp_stat != LSSTOP) 610 lp->lwp_stat = LSSLEEP; 611 lp->lwp_ru.ru_nvcsw++; 612 p->p_usched->uload_update(lp); 613 lwkt_switch(); 614 615 /* 616 * And when we are woken up, put us back in LSRUN. If we 617 * slept for over a second, recalculate our estcpu. 618 */ 619 lp->lwp_stat = LSRUN; 620 if (lp->lwp_slptime) { 621 p->p_usched->uload_update(lp); 622 p->p_usched->recalculate(lp); 623 } 624 lp->lwp_slptime = 0; 625 } else { 626 lwkt_switch(); 627 } 628 629 /* 630 * Make sure we haven't switched cpus while we were asleep. It's 631 * not supposed to happen. Cleanup our temporary flags. 632 */ 633 KKASSERT(gd == td->td_gd); 634 635 /* 636 * Cleanup the timeout. If the timeout has already occured thandle 637 * has already been stopped, otherwise stop thandle. If the timeout 638 * is running (the callout thread must be blocked trying to get 639 * lwp_token) then wait for us to get scheduled. 640 */ 641 if (timo) { 642 while (td->td_flags & TDF_TIMEOUT_RUNNING) { 643 lwkt_deschedule_self(td); 644 td->td_wmesg = "tsrace"; 645 lwkt_switch(); 646 kprintf("td %p %s: timeout race\n", td, td->td_comm); 647 } 648 if (td->td_flags & TDF_TIMEOUT) { 649 td->td_flags &= ~TDF_TIMEOUT; 650 error = EWOULDBLOCK; 651 } else { 652 /* does not block when on same cpu */ 653 callout_stop(&thandle); 654 } 655 } 656 td->td_flags &= ~TDF_TSLEEP_DESCHEDULED; 657 658 /* 659 * Make sure we have been removed from the sleepq. In most 660 * cases this will have been done for us already but it is 661 * possible for a scheduling IPI to be in-flight from a 662 * previous tsleep/tsleep_interlock() or due to a straight-out 663 * call to lwkt_schedule() (in the case of an interrupt thread), 664 * causing a spurious wakeup. 665 */ 666 _tsleep_remove(td); 667 td->td_wmesg = NULL; 668 669 /* 670 * Figure out the correct error return. If interrupted by a 671 * signal we want to return EINTR or ERESTART. 672 */ 673 resume: 674 if (lp) { 675 if (catch && error == 0) { 676 if (sig != 0 || (sig = CURSIG(lp))) { 677 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig)) 678 error = EINTR; 679 else 680 error = ERESTART; 681 } 682 } 683 lp->lwp_flags &= ~LWP_SINTR; 684 lwkt_reltoken(&lp->lwp_token); 685 } 686 logtsleep1(tsleep_end); 687 crit_exit_quick(td); 688 return (error); 689 } 690 691 /* 692 * Interlocked spinlock sleep. An exclusively held spinlock must 693 * be passed to ssleep(). The function will atomically release the 694 * spinlock and tsleep on the ident, then reacquire the spinlock and 695 * return. 696 * 697 * This routine is fairly important along the critical path, so optimize it 698 * heavily. 699 */ 700 int 701 ssleep(const volatile void *ident, struct spinlock *spin, int flags, 702 const char *wmesg, int timo) 703 { 704 globaldata_t gd = mycpu; 705 int error; 706 707 _tsleep_interlock(gd, ident, flags); 708 spin_unlock_quick(gd, spin); 709 error = tsleep(ident, flags | PINTERLOCKED, wmesg, timo); 710 spin_lock_quick(gd, spin); 711 712 return (error); 713 } 714 715 int 716 lksleep(const volatile void *ident, struct lock *lock, int flags, 717 const char *wmesg, int timo) 718 { 719 globaldata_t gd = mycpu; 720 int error; 721 722 _tsleep_interlock(gd, ident, flags); 723 lockmgr(lock, LK_RELEASE); 724 error = tsleep(ident, flags | PINTERLOCKED, wmesg, timo); 725 lockmgr(lock, LK_EXCLUSIVE); 726 727 return (error); 728 } 729 730 /* 731 * Interlocked mutex sleep. An exclusively held mutex must be passed 732 * to mtxsleep(). The function will atomically release the mutex 733 * and tsleep on the ident, then reacquire the mutex and return. 734 */ 735 int 736 mtxsleep(const volatile void *ident, struct mtx *mtx, int flags, 737 const char *wmesg, int timo) 738 { 739 globaldata_t gd = mycpu; 740 int error; 741 742 _tsleep_interlock(gd, ident, flags); 743 mtx_unlock(mtx); 744 error = tsleep(ident, flags | PINTERLOCKED, wmesg, timo); 745 mtx_lock_ex_quick(mtx, wmesg); 746 747 return (error); 748 } 749 750 /* 751 * Interlocked serializer sleep. An exclusively held serializer must 752 * be passed to zsleep(). The function will atomically release 753 * the serializer and tsleep on the ident, then reacquire the serializer 754 * and return. 755 */ 756 int 757 zsleep(const volatile void *ident, struct lwkt_serialize *slz, int flags, 758 const char *wmesg, int timo) 759 { 760 globaldata_t gd = mycpu; 761 int ret; 762 763 ASSERT_SERIALIZED(slz); 764 765 _tsleep_interlock(gd, ident, flags); 766 lwkt_serialize_exit(slz); 767 ret = tsleep(ident, flags | PINTERLOCKED, wmesg, timo); 768 lwkt_serialize_enter(slz); 769 770 return ret; 771 } 772 773 /* 774 * Directly block on the LWKT thread by descheduling it. This 775 * is much faster then tsleep(), but the only legal way to wake 776 * us up is to directly schedule the thread. 777 * 778 * Setting TDF_SINTR will cause new signals to directly schedule us. 779 * 780 * This routine must be called while in a critical section. 781 */ 782 int 783 lwkt_sleep(const char *wmesg, int flags) 784 { 785 thread_t td = curthread; 786 int sig; 787 788 if ((flags & PCATCH) == 0 || td->td_lwp == NULL) { 789 td->td_flags |= TDF_BLOCKED; 790 td->td_wmesg = wmesg; 791 lwkt_deschedule_self(td); 792 lwkt_switch(); 793 td->td_wmesg = NULL; 794 td->td_flags &= ~TDF_BLOCKED; 795 return(0); 796 } 797 if ((sig = CURSIG(td->td_lwp)) != 0) { 798 if (SIGISMEMBER(td->td_proc->p_sigacts->ps_sigintr, sig)) 799 return(EINTR); 800 else 801 return(ERESTART); 802 803 } 804 td->td_flags |= TDF_BLOCKED | TDF_SINTR; 805 td->td_wmesg = wmesg; 806 lwkt_deschedule_self(td); 807 lwkt_switch(); 808 td->td_flags &= ~(TDF_BLOCKED | TDF_SINTR); 809 td->td_wmesg = NULL; 810 return(0); 811 } 812 813 /* 814 * Implement the timeout for tsleep. 815 * 816 * This type of callout timeout is scheduled on the same cpu the process 817 * is sleeping on. Also, at the moment, the MP lock is held. 818 */ 819 static void 820 endtsleep(void *arg) 821 { 822 thread_t td = arg; 823 struct lwp *lp; 824 825 /* 826 * We are going to have to get the lwp_token, which means we might 827 * block. This can race a tsleep getting woken up by other means 828 * so set TDF_TIMEOUT_RUNNING to force the tsleep to wait for our 829 * processing to complete (sorry tsleep!). 830 * 831 * We can safely set td_flags because td MUST be on the same cpu 832 * as we are. 833 */ 834 KKASSERT(td->td_gd == mycpu); 835 crit_enter(); 836 td->td_flags |= TDF_TIMEOUT_RUNNING | TDF_TIMEOUT; 837 838 /* 839 * This can block but TDF_TIMEOUT_RUNNING will prevent the thread 840 * from exiting the tsleep on us. The flag is interlocked by virtue 841 * of lp being on the same cpu as we are. 842 */ 843 if ((lp = td->td_lwp) != NULL) 844 lwkt_gettoken(&lp->lwp_token); 845 846 KKASSERT(td->td_flags & TDF_TSLEEP_DESCHEDULED); 847 848 if (lp) { 849 if (lp->lwp_proc->p_stat != SSTOP) 850 setrunnable(lp); 851 lwkt_reltoken(&lp->lwp_token); 852 } else { 853 _tsleep_remove(td); 854 lwkt_schedule(td); 855 } 856 KKASSERT(td->td_gd == mycpu); 857 td->td_flags &= ~TDF_TIMEOUT_RUNNING; 858 crit_exit(); 859 } 860 861 /* 862 * Make all processes sleeping on the specified identifier runnable. 863 * count may be zero or one only. 864 * 865 * The domain encodes the sleep/wakeup domain, flags, plus the originating 866 * cpu. 867 * 868 * This call may run without the MP lock held. We can only manipulate thread 869 * state on the cpu owning the thread. We CANNOT manipulate process state 870 * at all. 871 * 872 * _wakeup() can be passed to an IPI so we can't use (const volatile 873 * void *ident). 874 */ 875 static void 876 _wakeup(void *ident, int domain) 877 { 878 struct tslpque *qp; 879 struct thread *td; 880 struct thread *ntd; 881 globaldata_t gd; 882 #ifdef SMP 883 cpumask_t mask; 884 #endif 885 int id; 886 887 crit_enter(); 888 logtsleep2(wakeup_beg, ident); 889 gd = mycpu; 890 id = LOOKUP(ident); 891 qp = &gd->gd_tsleep_hash[id]; 892 restart: 893 for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) { 894 ntd = TAILQ_NEXT(td, td_sleepq); 895 if (td->td_wchan == ident && 896 td->td_wdomain == (domain & PDOMAIN_MASK) 897 ) { 898 KKASSERT(td->td_gd == gd); 899 _tsleep_remove(td); 900 td->td_wakefromcpu = PWAKEUP_DECODE(domain); 901 if (td->td_flags & TDF_TSLEEP_DESCHEDULED) { 902 lwkt_schedule(td); 903 if (domain & PWAKEUP_ONE) 904 goto done; 905 } 906 goto restart; 907 } 908 } 909 910 #ifdef SMP 911 /* 912 * We finished checking the current cpu but there still may be 913 * more work to do. Either wakeup_one was requested and no matching 914 * thread was found, or a normal wakeup was requested and we have 915 * to continue checking cpus. 916 * 917 * It should be noted that this scheme is actually less expensive then 918 * the old scheme when waking up multiple threads, since we send 919 * only one IPI message per target candidate which may then schedule 920 * multiple threads. Before we could have wound up sending an IPI 921 * message for each thread on the target cpu (!= current cpu) that 922 * needed to be woken up. 923 * 924 * NOTE: Wakeups occuring on remote cpus are asynchronous. This 925 * should be ok since we are passing idents in the IPI rather then 926 * thread pointers. 927 */ 928 if ((domain & PWAKEUP_MYCPU) == 0 && 929 (mask = slpque_cpumasks[id] & gd->gd_other_cpus) != 0) { 930 lwkt_send_ipiq2_mask(mask, _wakeup, ident, 931 domain | PWAKEUP_MYCPU); 932 } 933 #endif 934 done: 935 logtsleep1(wakeup_end); 936 crit_exit(); 937 } 938 939 /* 940 * Wakeup all threads tsleep()ing on the specified ident, on all cpus 941 */ 942 void 943 wakeup(const volatile void *ident) 944 { 945 globaldata_t gd = mycpu; 946 thread_t td = gd->gd_curthread; 947 948 if (td && (td->td_flags & TDF_DELAYED_WAKEUP)) { 949 if (!atomic_cmpset_ptr(&gd->gd_delayed_wakeup[0], NULL, ident)) { 950 if (!atomic_cmpset_ptr(&gd->gd_delayed_wakeup[1], NULL, ident)) 951 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, gd->gd_cpuid)); 952 } 953 return; 954 } 955 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, gd->gd_cpuid)); 956 } 957 958 /* 959 * Wakeup one thread tsleep()ing on the specified ident, on any cpu. 960 */ 961 void 962 wakeup_one(const volatile void *ident) 963 { 964 /* XXX potentially round-robin the first responding cpu */ 965 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mycpu->gd_cpuid) | 966 PWAKEUP_ONE); 967 } 968 969 /* 970 * Wakeup threads tsleep()ing on the specified ident on the current cpu 971 * only. 972 */ 973 void 974 wakeup_mycpu(const volatile void *ident) 975 { 976 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mycpu->gd_cpuid) | 977 PWAKEUP_MYCPU); 978 } 979 980 /* 981 * Wakeup one thread tsleep()ing on the specified ident on the current cpu 982 * only. 983 */ 984 void 985 wakeup_mycpu_one(const volatile void *ident) 986 { 987 /* XXX potentially round-robin the first responding cpu */ 988 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mycpu->gd_cpuid) | 989 PWAKEUP_MYCPU | PWAKEUP_ONE); 990 } 991 992 /* 993 * Wakeup all thread tsleep()ing on the specified ident on the specified cpu 994 * only. 995 */ 996 void 997 wakeup_oncpu(globaldata_t gd, const volatile void *ident) 998 { 999 #ifdef SMP 1000 globaldata_t mygd = mycpu; 1001 if (gd == mycpu) { 1002 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mygd->gd_cpuid) | 1003 PWAKEUP_MYCPU); 1004 } else { 1005 lwkt_send_ipiq2(gd, _wakeup, __DEALL(ident), 1006 PWAKEUP_ENCODE(0, mygd->gd_cpuid) | 1007 PWAKEUP_MYCPU); 1008 } 1009 #else 1010 _wakeup(__DEALL(ident), PWAKEUP_MYCPU); 1011 #endif 1012 } 1013 1014 /* 1015 * Wakeup one thread tsleep()ing on the specified ident on the specified cpu 1016 * only. 1017 */ 1018 void 1019 wakeup_oncpu_one(globaldata_t gd, const volatile void *ident) 1020 { 1021 #ifdef SMP 1022 globaldata_t mygd = mycpu; 1023 if (gd == mygd) { 1024 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mygd->gd_cpuid) | 1025 PWAKEUP_MYCPU | PWAKEUP_ONE); 1026 } else { 1027 lwkt_send_ipiq2(gd, _wakeup, __DEALL(ident), 1028 PWAKEUP_ENCODE(0, mygd->gd_cpuid) | 1029 PWAKEUP_MYCPU | PWAKEUP_ONE); 1030 } 1031 #else 1032 _wakeup(__DEALL(ident), PWAKEUP_MYCPU | PWAKEUP_ONE); 1033 #endif 1034 } 1035 1036 /* 1037 * Wakeup all threads waiting on the specified ident that slept using 1038 * the specified domain, on all cpus. 1039 */ 1040 void 1041 wakeup_domain(const volatile void *ident, int domain) 1042 { 1043 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(domain, mycpu->gd_cpuid)); 1044 } 1045 1046 /* 1047 * Wakeup one thread waiting on the specified ident that slept using 1048 * the specified domain, on any cpu. 1049 */ 1050 void 1051 wakeup_domain_one(const volatile void *ident, int domain) 1052 { 1053 /* XXX potentially round-robin the first responding cpu */ 1054 _wakeup(__DEALL(ident), 1055 PWAKEUP_ENCODE(domain, mycpu->gd_cpuid) | PWAKEUP_ONE); 1056 } 1057 1058 void 1059 wakeup_start_delayed(void) 1060 { 1061 globaldata_t gd = mycpu; 1062 1063 crit_enter(); 1064 gd->gd_curthread->td_flags |= TDF_DELAYED_WAKEUP; 1065 crit_exit(); 1066 } 1067 1068 void 1069 wakeup_end_delayed(void) 1070 { 1071 globaldata_t gd = mycpu; 1072 1073 if (gd->gd_curthread->td_flags & TDF_DELAYED_WAKEUP) { 1074 crit_enter(); 1075 gd->gd_curthread->td_flags &= ~TDF_DELAYED_WAKEUP; 1076 if (gd->gd_delayed_wakeup[0] || gd->gd_delayed_wakeup[1]) { 1077 if (gd->gd_delayed_wakeup[0]) { 1078 wakeup(gd->gd_delayed_wakeup[0]); 1079 gd->gd_delayed_wakeup[0] = NULL; 1080 } 1081 if (gd->gd_delayed_wakeup[1]) { 1082 wakeup(gd->gd_delayed_wakeup[1]); 1083 gd->gd_delayed_wakeup[1] = NULL; 1084 } 1085 } 1086 crit_exit(); 1087 } 1088 } 1089 1090 /* 1091 * setrunnable() 1092 * 1093 * Make a process runnable. lp->lwp_token must be held on call and this 1094 * function must be called from the cpu owning lp. 1095 * 1096 * This only has an effect if we are in LSSTOP or LSSLEEP. 1097 */ 1098 void 1099 setrunnable(struct lwp *lp) 1100 { 1101 thread_t td = lp->lwp_thread; 1102 1103 ASSERT_LWKT_TOKEN_HELD(&lp->lwp_token); 1104 KKASSERT(td->td_gd == mycpu); 1105 crit_enter(); 1106 if (lp->lwp_stat == LSSTOP) 1107 lp->lwp_stat = LSSLEEP; 1108 if (lp->lwp_stat == LSSLEEP) { 1109 _tsleep_remove(td); 1110 lwkt_schedule(td); 1111 } else if (td->td_flags & TDF_SINTR) { 1112 lwkt_schedule(td); 1113 } 1114 crit_exit(); 1115 } 1116 1117 /* 1118 * The process is stopped due to some condition, usually because p_stat is 1119 * set to SSTOP, but also possibly due to being traced. 1120 * 1121 * Caller must hold p->p_token 1122 * 1123 * NOTE! If the caller sets SSTOP, the caller must also clear P_WAITED 1124 * because the parent may check the child's status before the child actually 1125 * gets to this routine. 1126 * 1127 * This routine is called with the current lwp only, typically just 1128 * before returning to userland if the process state is detected as 1129 * possibly being in a stopped state. 1130 */ 1131 void 1132 tstop(void) 1133 { 1134 struct lwp *lp = curthread->td_lwp; 1135 struct proc *p = lp->lwp_proc; 1136 struct proc *q; 1137 1138 lwkt_gettoken(&lp->lwp_token); 1139 crit_enter(); 1140 1141 /* 1142 * If LWP_MP_WSTOP is set, we were sleeping 1143 * while our process was stopped. At this point 1144 * we were already counted as stopped. 1145 */ 1146 if ((lp->lwp_mpflags & LWP_MP_WSTOP) == 0) { 1147 /* 1148 * If we're the last thread to stop, signal 1149 * our parent. 1150 */ 1151 p->p_nstopped++; 1152 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WSTOP); 1153 wakeup(&p->p_nstopped); 1154 if (p->p_nstopped == p->p_nthreads) { 1155 /* 1156 * Token required to interlock kern_wait() 1157 */ 1158 q = p->p_pptr; 1159 PHOLD(q); 1160 lwkt_gettoken(&q->p_token); 1161 p->p_flags &= ~P_WAITED; 1162 wakeup(p->p_pptr); 1163 if ((q->p_sigacts->ps_flag & PS_NOCLDSTOP) == 0) 1164 ksignal(q, SIGCHLD); 1165 lwkt_reltoken(&q->p_token); 1166 PRELE(q); 1167 } 1168 } 1169 while (p->p_stat == SSTOP) { 1170 lp->lwp_stat = LSSTOP; 1171 tsleep(p, 0, "stop", 0); 1172 } 1173 p->p_nstopped--; 1174 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_WSTOP); 1175 crit_exit(); 1176 lwkt_reltoken(&lp->lwp_token); 1177 } 1178 1179 /* 1180 * Compute a tenex style load average of a quantity on 1181 * 1, 5 and 15 minute intervals. 1182 */ 1183 static int loadav_count_runnable(struct lwp *p, void *data); 1184 1185 static void 1186 loadav(void *arg) 1187 { 1188 struct loadavg *avg; 1189 int i, nrun; 1190 1191 nrun = 0; 1192 alllwp_scan(loadav_count_runnable, &nrun); 1193 avg = &averunnable; 1194 for (i = 0; i < 3; i++) { 1195 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] + 1196 nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT; 1197 } 1198 1199 /* 1200 * Schedule the next update to occur after 5 seconds, but add a 1201 * random variation to avoid synchronisation with processes that 1202 * run at regular intervals. 1203 */ 1204 callout_reset(&loadav_callout, hz * 4 + (int)(krandom() % (hz * 2 + 1)), 1205 loadav, NULL); 1206 } 1207 1208 static int 1209 loadav_count_runnable(struct lwp *lp, void *data) 1210 { 1211 int *nrunp = data; 1212 thread_t td; 1213 1214 switch (lp->lwp_stat) { 1215 case LSRUN: 1216 if ((td = lp->lwp_thread) == NULL) 1217 break; 1218 if (td->td_flags & TDF_BLOCKED) 1219 break; 1220 ++*nrunp; 1221 break; 1222 default: 1223 break; 1224 } 1225 lwkt_yield(); 1226 return(0); 1227 } 1228 1229 /* ARGSUSED */ 1230 static void 1231 sched_setup(void *dummy) 1232 { 1233 callout_init_mp(&loadav_callout); 1234 callout_init_mp(&schedcpu_callout); 1235 1236 /* Kick off timeout driven events by calling first time. */ 1237 schedcpu(NULL); 1238 loadav(NULL); 1239 } 1240 1241