1 /*- 2 * Copyright (c) 1982, 1986, 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 39 * $FreeBSD: src/sys/kern/kern_synch.c,v 1.87.2.6 2002/10/13 07:29:53 kbyanc Exp $ 40 * $DragonFly: src/sys/kern/kern_synch.c,v 1.38 2004/11/10 08:27:54 dillon Exp $ 41 */ 42 43 #include "opt_ktrace.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/proc.h> 48 #include <sys/kernel.h> 49 #include <sys/signalvar.h> 50 #include <sys/resourcevar.h> 51 #include <sys/vmmeter.h> 52 #include <sys/sysctl.h> 53 #include <sys/thread2.h> 54 #ifdef KTRACE 55 #include <sys/uio.h> 56 #include <sys/ktrace.h> 57 #endif 58 #include <sys/xwait.h> 59 60 #include <machine/cpu.h> 61 #include <machine/ipl.h> 62 #include <machine/smp.h> 63 64 static void sched_setup (void *dummy); 65 SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL) 66 67 int hogticks; 68 int lbolt; 69 int sched_quantum; /* Roundrobin scheduling quantum in ticks. */ 70 int ncpus; 71 int ncpus2, ncpus2_shift, ncpus2_mask; 72 73 static struct callout loadav_callout; 74 static struct callout roundrobin_callout; 75 static struct callout schedcpu_callout; 76 77 struct loadavg averunnable = 78 { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */ 79 /* 80 * Constants for averages over 1, 5, and 15 minutes 81 * when sampling at 5 second intervals. 82 */ 83 static fixpt_t cexp[3] = { 84 0.9200444146293232 * FSCALE, /* exp(-1/12) */ 85 0.9834714538216174 * FSCALE, /* exp(-1/60) */ 86 0.9944598480048967 * FSCALE, /* exp(-1/180) */ 87 }; 88 89 static void endtsleep (void *); 90 static void loadav (void *arg); 91 static void roundrobin (void *arg); 92 static void schedcpu (void *arg); 93 static void updatepri (struct proc *p); 94 static void crit_panicints(void); 95 96 static int 97 sysctl_kern_quantum(SYSCTL_HANDLER_ARGS) 98 { 99 int error, new_val; 100 101 new_val = sched_quantum * tick; 102 error = sysctl_handle_int(oidp, &new_val, 0, req); 103 if (error != 0 || req->newptr == NULL) 104 return (error); 105 if (new_val < tick) 106 return (EINVAL); 107 sched_quantum = new_val / tick; 108 hogticks = 2 * sched_quantum; 109 return (0); 110 } 111 112 SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW, 113 0, sizeof sched_quantum, sysctl_kern_quantum, "I", ""); 114 115 int 116 roundrobin_interval(void) 117 { 118 return (sched_quantum); 119 } 120 121 /* 122 * Force switch among equal priority processes every 100ms. 123 * 124 * WARNING! The MP lock is not held on ipi message remotes. 125 */ 126 #ifdef SMP 127 128 static void 129 roundrobin_remote(void *arg) 130 { 131 struct proc *p = lwkt_preempted_proc(); 132 if (p == NULL || RTP_PRIO_NEED_RR(p->p_rtprio.type)) 133 need_user_resched(); 134 } 135 136 #endif 137 138 static void 139 roundrobin(void *arg) 140 { 141 struct proc *p = lwkt_preempted_proc(); 142 if (p == NULL || RTP_PRIO_NEED_RR(p->p_rtprio.type)) 143 need_user_resched(); 144 #ifdef SMP 145 lwkt_send_ipiq_mask(mycpu->gd_other_cpus, roundrobin_remote, NULL); 146 #endif 147 callout_reset(&roundrobin_callout, sched_quantum, roundrobin, NULL); 148 } 149 150 #ifdef SMP 151 152 void 153 resched_cpus(u_int32_t mask) 154 { 155 lwkt_send_ipiq_mask(mask, roundrobin_remote, NULL); 156 } 157 158 #endif 159 160 /* 161 * The load average is scaled by FSCALE (2048 typ). The estimated cpu is 162 * incremented at a rate of ESTCPUVFREQ per second (40hz typ), but this is 163 * divided up across all cpu bound processes running in the system so an 164 * individual process will get less under load. ESTCPULIM typicaly caps 165 * out at ESTCPUMAX (around 376, or 11 nice levels). 166 * 167 * Generally speaking the decay equation needs to break-even on growth 168 * at the limit at all load levels >= 1.0, so if the estimated cpu for 169 * a process increases by (ESTVCPUFREQ / load) per second, then the decay 170 * should reach this value when estcpu reaches ESTCPUMAX. That calculation 171 * is: 172 * 173 * ESTCPUMAX * decay = ESTCPUVFREQ / load 174 * decay = ESTCPUVFREQ / (load * ESTCPUMAX) 175 * decay = estcpu * 0.053 / load 176 * 177 * If the load is less then 1.0 we assume a load of 1.0. 178 */ 179 180 #define cload(loadav) ((loadav) < FSCALE ? FSCALE : (loadav)) 181 #define decay_cpu(loadav,estcpu) \ 182 ((estcpu) * (FSCALE * ESTCPUVFREQ / ESTCPUMAX) / cload(loadav)) 183 184 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 185 static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 186 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 187 188 /* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */ 189 static int fscale __unused = FSCALE; 190 SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, ""); 191 192 /* 193 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the 194 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below 195 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). 196 * 197 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: 198 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). 199 * 200 * If you don't want to bother with the faster/more-accurate formula, you 201 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate 202 * (more general) method of calculating the %age of CPU used by a process. 203 */ 204 #define CCPU_SHIFT 11 205 206 /* 207 * Recompute process priorities, once a second. 208 */ 209 /* ARGSUSED */ 210 static void 211 schedcpu(void *arg) 212 { 213 fixpt_t loadfac = averunnable.ldavg[0]; 214 struct proc *p; 215 int s; 216 unsigned int ndecay; 217 218 FOREACH_PROC_IN_SYSTEM(p) { 219 /* 220 * Increment time in/out of memory and sleep time 221 * (if sleeping). We ignore overflow; with 16-bit int's 222 * (remember them?) overflow takes 45 days. 223 */ 224 p->p_swtime++; 225 if (p->p_stat == SSLEEP || p->p_stat == SSTOP) 226 p->p_slptime++; 227 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT; 228 229 /* 230 * If the process has slept the entire second, 231 * stop recalculating its priority until it wakes up. 232 * 233 * Note that interactive calculations do not occur for 234 * long sleeps (because that isn't necessarily indicative 235 * of an interactive process). 236 */ 237 if (p->p_slptime > 1) 238 continue; 239 /* prevent state changes and protect run queue */ 240 s = splhigh(); 241 /* 242 * p_cpticks runs at ESTCPUFREQ but must be divided by the 243 * load average for par-100% use. Higher p_interactive 244 * values mean less interactive, lower values mean more 245 * interactive. 246 */ 247 if ((((fixpt_t)p->p_cpticks * cload(loadfac)) >> FSHIFT) > 248 ESTCPUFREQ / 4) { 249 if (p->p_interactive < 127) 250 ++p->p_interactive; 251 } else { 252 if (p->p_interactive > -127) 253 --p->p_interactive; 254 } 255 /* 256 * p_pctcpu is only for ps. 257 */ 258 #if (FSHIFT >= CCPU_SHIFT) 259 p->p_pctcpu += (ESTCPUFREQ == 100)? 260 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT): 261 100 * (((fixpt_t) p->p_cpticks) 262 << (FSHIFT - CCPU_SHIFT)) / ESTCPUFREQ; 263 #else 264 p->p_pctcpu += ((FSCALE - ccpu) * 265 (p->p_cpticks * FSCALE / ESTCPUFREQ)) >> FSHIFT; 266 #endif 267 p->p_cpticks = 0; 268 ndecay = decay_cpu(loadfac, p->p_estcpu); 269 if (p->p_estcpu > ndecay) 270 p->p_estcpu -= ndecay; 271 else 272 p->p_estcpu = 0; 273 resetpriority(p); 274 splx(s); 275 } 276 wakeup((caddr_t)&lbolt); 277 callout_reset(&schedcpu_callout, hz, schedcpu, NULL); 278 } 279 280 /* 281 * Recalculate the priority of a process after it has slept for a while. 282 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at 283 * least six times the loadfactor will decay p_estcpu to zero. 284 */ 285 static void 286 updatepri(struct proc *p) 287 { 288 unsigned int ndecay; 289 290 ndecay = decay_cpu(averunnable.ldavg[0], p->p_estcpu) * p->p_slptime; 291 if (p->p_estcpu > ndecay) 292 p->p_estcpu -= ndecay; 293 else 294 p->p_estcpu = 0; 295 resetpriority(p); 296 } 297 298 /* 299 * We're only looking at 7 bits of the address; everything is 300 * aligned to 4, lots of things are aligned to greater powers 301 * of 2. Shift right by 8, i.e. drop the bottom 256 worth. 302 */ 303 #define TABLESIZE 128 304 static TAILQ_HEAD(slpquehead, thread) slpque[TABLESIZE]; 305 #define LOOKUP(x) (((intptr_t)(x) >> 8) & (TABLESIZE - 1)) 306 307 /* 308 * During autoconfiguration or after a panic, a sleep will simply 309 * lower the priority briefly to allow interrupts, then return. 310 * The priority to be used (safepri) is machine-dependent, thus this 311 * value is initialized and maintained in the machine-dependent layers. 312 * This priority will typically be 0, or the lowest priority 313 * that is safe for use on the interrupt stack; it can be made 314 * higher to block network software interrupts after panics. 315 */ 316 int safepri; 317 318 void 319 sleepinit(void) 320 { 321 int i; 322 323 sched_quantum = hz/10; 324 hogticks = 2 * sched_quantum; 325 for (i = 0; i < TABLESIZE; i++) 326 TAILQ_INIT(&slpque[i]); 327 } 328 329 /* 330 * General sleep call. Suspends the current process until a wakeup is 331 * performed on the specified identifier. The process will then be made 332 * runnable with the specified priority. Sleeps at most timo/hz seconds 333 * (0 means no timeout). If flags includes PCATCH flag, signals are checked 334 * before and after sleeping, else signals are not checked. Returns 0 if 335 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a 336 * signal needs to be delivered, ERESTART is returned if the current system 337 * call should be restarted if possible, and EINTR is returned if the system 338 * call should be interrupted by the signal (return EINTR). 339 * 340 * Note that if we are a process, we release_curproc() before messing with 341 * the LWKT scheduler. 342 */ 343 int 344 tsleep(void *ident, int flags, const char *wmesg, int timo) 345 { 346 struct thread *td = curthread; 347 struct proc *p = td->td_proc; /* may be NULL */ 348 int sig = 0, catch = flags & PCATCH; 349 int id = LOOKUP(ident); 350 struct callout thandle; 351 352 /* 353 * NOTE: removed KTRPOINT, it could cause races due to blocking 354 * even in stable. Just scrap it for now. 355 */ 356 if (cold || panicstr) { 357 /* 358 * After a panic, or during autoconfiguration, 359 * just give interrupts a chance, then just return; 360 * don't run any other procs or panic below, 361 * in case this is the idle process and already asleep. 362 */ 363 crit_panicints(); 364 return (0); 365 } 366 KKASSERT(td != &mycpu->gd_idlethread); /* you must be kidding! */ 367 crit_enter_quick(td); 368 KASSERT(ident != NULL, ("tsleep: no ident")); 369 KASSERT(p == NULL || p->p_stat == SRUN, ("tsleep %p %s %d", 370 ident, wmesg, p->p_stat)); 371 372 td->td_wchan = ident; 373 td->td_wmesg = wmesg; 374 if (p) { 375 if (flags & PNORESCHED) 376 td->td_flags |= TDF_NORESCHED; 377 release_curproc(p); 378 p->p_slptime = 0; 379 } 380 lwkt_deschedule_self(td); 381 TAILQ_INSERT_TAIL(&slpque[id], td, td_threadq); 382 if (timo) { 383 callout_init(&thandle); 384 callout_reset(&thandle, timo, endtsleep, td); 385 } 386 /* 387 * We put ourselves on the sleep queue and start our timeout 388 * before calling CURSIG, as we could stop there, and a wakeup 389 * or a SIGCONT (or both) could occur while we were stopped. 390 * A SIGCONT would cause us to be marked as SSLEEP 391 * without resuming us, thus we must be ready for sleep 392 * when CURSIG is called. If the wakeup happens while we're 393 * stopped, td->td_wchan will be 0 upon return from CURSIG. 394 */ 395 if (p) { 396 if (catch) { 397 p->p_flag |= P_SINTR; 398 if ((sig = CURSIG(p))) { 399 if (td->td_wchan) { 400 unsleep(td); 401 lwkt_schedule_self(td); 402 } 403 p->p_stat = SRUN; 404 goto resume; 405 } 406 if (td->td_wchan == NULL) { 407 catch = 0; 408 goto resume; 409 } 410 } else { 411 sig = 0; 412 } 413 414 /* 415 * If we are not the current process we have to remove ourself 416 * from the run queue. 417 */ 418 KASSERT(p->p_stat == SRUN, ("PSTAT NOT SRUN %d %d", p->p_pid, p->p_stat)); 419 /* 420 * If this is the current 'user' process schedule another one. 421 */ 422 clrrunnable(p, SSLEEP); 423 p->p_stats->p_ru.ru_nvcsw++; 424 mi_switch(p); 425 KASSERT(p->p_stat == SRUN, ("tsleep: stat not srun")); 426 } else { 427 lwkt_switch(); 428 } 429 resume: 430 if (p) 431 p->p_flag &= ~P_SINTR; 432 crit_exit_quick(td); 433 td->td_flags &= ~TDF_NORESCHED; 434 if (td->td_flags & TDF_TIMEOUT) { 435 td->td_flags &= ~TDF_TIMEOUT; 436 if (sig == 0) 437 return (EWOULDBLOCK); 438 } else if (timo) { 439 callout_stop(&thandle); 440 } else if (td->td_wmesg) { 441 /* 442 * This can happen if a thread is woken up directly. Clear 443 * wmesg to avoid debugging confusion. 444 */ 445 td->td_wmesg = NULL; 446 } 447 /* inline of iscaught() */ 448 if (p) { 449 if (catch && (sig != 0 || (sig = CURSIG(p)))) { 450 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig)) 451 return (EINTR); 452 return (ERESTART); 453 } 454 } 455 return (0); 456 } 457 458 /* 459 * Implement the timeout for tsleep. We interlock against 460 * wchan when setting TDF_TIMEOUT. For processes we remove 461 * the sleep if the process is stopped rather then sleeping, 462 * so it remains stopped. 463 */ 464 static void 465 endtsleep(void *arg) 466 { 467 thread_t td = arg; 468 struct proc *p; 469 470 crit_enter(); 471 if (td->td_wchan) { 472 td->td_flags |= TDF_TIMEOUT; 473 if ((p = td->td_proc) != NULL) { 474 if (p->p_stat == SSLEEP) 475 setrunnable(p); 476 else 477 unsleep(td); 478 } else { 479 unsleep(td); 480 lwkt_schedule(td); 481 } 482 } 483 crit_exit(); 484 } 485 486 /* 487 * Remove a process from its wait queue 488 */ 489 void 490 unsleep(struct thread *td) 491 { 492 crit_enter(); 493 if (td->td_wchan) { 494 #if 0 495 if (p->p_flag & P_XSLEEP) { 496 struct xwait *w = p->p_wchan; 497 TAILQ_REMOVE(&w->waitq, p, p_procq); 498 p->p_flag &= ~P_XSLEEP; 499 } else 500 #endif 501 TAILQ_REMOVE(&slpque[LOOKUP(td->td_wchan)], td, td_threadq); 502 td->td_wchan = NULL; 503 } 504 crit_exit(); 505 } 506 507 #if 0 508 /* 509 * Make all processes sleeping on the explicit lock structure runnable. 510 */ 511 void 512 xwakeup(struct xwait *w) 513 { 514 struct proc *p; 515 516 crit_enter(); 517 ++w->gen; 518 while ((p = TAILQ_FIRST(&w->waitq)) != NULL) { 519 TAILQ_REMOVE(&w->waitq, p, p_procq); 520 KASSERT(p->p_wchan == w && (p->p_flag & P_XSLEEP), 521 ("xwakeup: wchan mismatch for %p (%p/%p) %08x", p, p->p_wchan, w, p->p_flag & P_XSLEEP)); 522 p->p_wchan = NULL; 523 p->p_flag &= ~P_XSLEEP; 524 if (p->p_stat == SSLEEP) { 525 /* OPTIMIZED EXPANSION OF setrunnable(p); */ 526 if (p->p_slptime > 1) 527 updatepri(p); 528 p->p_slptime = 0; 529 p->p_stat = SRUN; 530 if (p->p_flag & P_INMEM) { 531 lwkt_schedule(td); 532 } else { 533 p->p_flag |= P_SWAPINREQ; 534 wakeup((caddr_t)&proc0); 535 } 536 } 537 } 538 crit_exit(); 539 } 540 #endif 541 542 /* 543 * Make all processes sleeping on the specified identifier runnable. 544 */ 545 static void 546 _wakeup(void *ident, int count) 547 { 548 struct slpquehead *qp; 549 struct thread *td; 550 struct thread *ntd; 551 struct proc *p; 552 int id = LOOKUP(ident); 553 554 crit_enter(); 555 qp = &slpque[id]; 556 restart: 557 for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) { 558 ntd = TAILQ_NEXT(td, td_threadq); 559 if (td->td_wchan == ident) { 560 TAILQ_REMOVE(qp, td, td_threadq); 561 td->td_wchan = NULL; 562 if ((p = td->td_proc) != NULL && p->p_stat == SSLEEP) { 563 /* OPTIMIZED EXPANSION OF setrunnable(p); */ 564 if (p->p_slptime > 1) 565 updatepri(p); 566 p->p_slptime = 0; 567 p->p_stat = SRUN; 568 if (p->p_flag & P_INMEM) { 569 /* 570 * LWKT scheduled now, there is no 571 * userland runq interaction until 572 * the thread tries to return to user 573 * mode. 574 * 575 * setrunqueue(p); 576 */ 577 lwkt_schedule(td); 578 } else { 579 p->p_flag |= P_SWAPINREQ; 580 wakeup((caddr_t)&proc0); 581 } 582 /* END INLINE EXPANSION */ 583 } else if (p == NULL) { 584 lwkt_schedule(td); 585 } 586 if (--count == 0) 587 break; 588 goto restart; 589 } 590 } 591 crit_exit(); 592 } 593 594 void 595 wakeup(void *ident) 596 { 597 _wakeup(ident, 0); 598 } 599 600 void 601 wakeup_one(void *ident) 602 { 603 _wakeup(ident, 1); 604 } 605 606 /* 607 * The machine independent parts of mi_switch(). 608 * 609 * 'p' must be the current process. 610 */ 611 void 612 mi_switch(struct proc *p) 613 { 614 thread_t td = p->p_thread; 615 struct rlimit *rlim; 616 u_int64_t ttime; 617 618 KKASSERT(td == mycpu->gd_curthread); 619 620 crit_enter_quick(td); 621 622 /* 623 * Check if the process exceeds its cpu resource allocation. 624 * If over max, kill it. Time spent in interrupts is not 625 * included. YYY 64 bit match is expensive. Ick. 626 */ 627 ttime = td->td_sticks + td->td_uticks; 628 if (p->p_stat != SZOMB && p->p_limit->p_cpulimit != RLIM_INFINITY && 629 ttime > p->p_limit->p_cpulimit) { 630 rlim = &p->p_rlimit[RLIMIT_CPU]; 631 if (ttime / (rlim_t)1000000 >= rlim->rlim_max) { 632 killproc(p, "exceeded maximum CPU limit"); 633 } else { 634 psignal(p, SIGXCPU); 635 if (rlim->rlim_cur < rlim->rlim_max) { 636 /* XXX: we should make a private copy */ 637 rlim->rlim_cur += 5; 638 } 639 } 640 } 641 642 /* 643 * If we are in a SSTOPped state we deschedule ourselves. 644 * YYY this needs to be cleaned up, remember that LWKTs stay on 645 * their run queue which works differently then the user scheduler 646 * which removes the process from the runq when it runs it. 647 */ 648 mycpu->gd_cnt.v_swtch++; 649 if (p->p_stat == SSTOP) 650 lwkt_deschedule_self(td); 651 lwkt_switch(); 652 crit_exit_quick(td); 653 } 654 655 /* 656 * Change process state to be runnable, 657 * placing it on the run queue if it is in memory, 658 * and awakening the swapper if it isn't in memory. 659 */ 660 void 661 setrunnable(struct proc *p) 662 { 663 int s; 664 665 s = splhigh(); 666 switch (p->p_stat) { 667 case 0: 668 case SRUN: 669 case SZOMB: 670 default: 671 panic("setrunnable"); 672 case SSTOP: 673 case SSLEEP: 674 unsleep(p->p_thread); /* e.g. when sending signals */ 675 break; 676 677 case SIDL: 678 break; 679 } 680 p->p_stat = SRUN; 681 682 /* 683 * The process is controlled by LWKT at this point, we do not mess 684 * around with the userland scheduler until the thread tries to 685 * return to user mode. 686 */ 687 #if 0 688 if (p->p_flag & P_INMEM) 689 setrunqueue(p); 690 #endif 691 if (p->p_flag & P_INMEM) 692 lwkt_schedule(p->p_thread); 693 splx(s); 694 if (p->p_slptime > 1) 695 updatepri(p); 696 p->p_slptime = 0; 697 if ((p->p_flag & P_INMEM) == 0) { 698 p->p_flag |= P_SWAPINREQ; 699 wakeup((caddr_t)&proc0); 700 } 701 } 702 703 /* 704 * Change the process state to NOT be runnable, removing it from the run 705 * queue. 706 */ 707 void 708 clrrunnable(struct proc *p, int stat) 709 { 710 crit_enter_quick(p->p_thread); 711 if (p->p_stat == SRUN && (p->p_flag & P_ONRUNQ)) 712 remrunqueue(p); 713 p->p_stat = stat; 714 crit_exit_quick(p->p_thread); 715 } 716 717 /* 718 * Compute the priority of a process when running in user mode. 719 * Arrange to reschedule if the resulting priority is better 720 * than that of the current process. 721 */ 722 void 723 resetpriority(struct proc *p) 724 { 725 int newpriority; 726 int interactive; 727 int opq; 728 int npq; 729 730 /* 731 * Set p_priority for general process comparisons 732 */ 733 switch(p->p_rtprio.type) { 734 case RTP_PRIO_REALTIME: 735 p->p_priority = PRIBASE_REALTIME + p->p_rtprio.prio; 736 return; 737 case RTP_PRIO_NORMAL: 738 break; 739 case RTP_PRIO_IDLE: 740 p->p_priority = PRIBASE_IDLE + p->p_rtprio.prio; 741 return; 742 case RTP_PRIO_THREAD: 743 p->p_priority = PRIBASE_THREAD + p->p_rtprio.prio; 744 return; 745 } 746 747 /* 748 * NORMAL priorities fall through. These are based on niceness 749 * and cpu use. Lower numbers == higher priorities. 750 */ 751 newpriority = (int)(NICE_ADJUST(p->p_nice - PRIO_MIN) + 752 p->p_estcpu / ESTCPURAMP); 753 754 /* 755 * p_interactive is -128 to +127 and represents very long term 756 * interactivity or batch (whereas estcpu is a much faster variable). 757 * Interactivity can modify the priority by up to 8 units either way. 758 * (8 units == approximately 4 nice levels). 759 */ 760 interactive = p->p_interactive / 10; 761 newpriority += interactive; 762 763 newpriority = MIN(newpriority, MAXPRI); 764 newpriority = MAX(newpriority, 0); 765 npq = newpriority / PPQ; 766 crit_enter(); 767 opq = (p->p_priority & PRIMASK) / PPQ; 768 if (p->p_stat == SRUN && (p->p_flag & P_ONRUNQ) && opq != npq) { 769 /* 770 * We have to move the process to another queue 771 */ 772 remrunqueue(p); 773 p->p_priority = PRIBASE_NORMAL + newpriority; 774 setrunqueue(p); 775 } else { 776 /* 777 * We can just adjust the priority and it will be picked 778 * up later. 779 */ 780 KKASSERT(opq == npq || (p->p_flag & P_ONRUNQ) == 0); 781 p->p_priority = PRIBASE_NORMAL + newpriority; 782 } 783 crit_exit(); 784 } 785 786 /* 787 * Compute a tenex style load average of a quantity on 788 * 1, 5 and 15 minute intervals. 789 */ 790 static void 791 loadav(void *arg) 792 { 793 int i, nrun; 794 struct loadavg *avg; 795 struct proc *p; 796 thread_t td; 797 798 avg = &averunnable; 799 nrun = 0; 800 FOREACH_PROC_IN_SYSTEM(p) { 801 switch (p->p_stat) { 802 case SRUN: 803 if ((td = p->p_thread) == NULL) 804 break; 805 if (td->td_flags & TDF_BLOCKED) 806 break; 807 /* fall through */ 808 case SIDL: 809 nrun++; 810 break; 811 default: 812 break; 813 } 814 } 815 for (i = 0; i < 3; i++) 816 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] + 817 nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT; 818 819 /* 820 * Schedule the next update to occur after 5 seconds, but add a 821 * random variation to avoid synchronisation with processes that 822 * run at regular intervals. 823 */ 824 callout_reset(&loadav_callout, hz * 4 + (int)(random() % (hz * 2 + 1)), 825 loadav, NULL); 826 } 827 828 /* ARGSUSED */ 829 static void 830 sched_setup(void *dummy) 831 { 832 callout_init(&loadav_callout); 833 callout_init(&roundrobin_callout); 834 callout_init(&schedcpu_callout); 835 836 /* Kick off timeout driven events by calling first time. */ 837 roundrobin(NULL); 838 schedcpu(NULL); 839 loadav(NULL); 840 } 841 842 /* 843 * We adjust the priority of the current process. The priority of 844 * a process gets worse as it accumulates CPU time. The cpu usage 845 * estimator (p_estcpu) is increased here. resetpriority() will 846 * compute a different priority each time p_estcpu increases by 847 * INVERSE_ESTCPU_WEIGHT * (until MAXPRI is reached). 848 * 849 * The cpu usage estimator ramps up quite quickly when the process is 850 * running (linearly), and decays away exponentially, at a rate which 851 * is proportionally slower when the system is busy. The basic principle 852 * is that the system will 90% forget that the process used a lot of CPU 853 * time in 5 * loadav seconds. This causes the system to favor processes 854 * which haven't run much recently, and to round-robin among other processes. 855 * 856 * The actual schedulerclock interrupt rate is ESTCPUFREQ, but we generally 857 * want to ramp-up at a faster rate, ESTCPUVFREQ, so p_estcpu is scaled 858 * by (ESTCPUVFREQ / ESTCPUFREQ). You can control the ramp-up/ramp-down 859 * rate by adjusting ESTCPUVFREQ in sys/proc.h in integer multiples 860 * of ESTCPUFREQ. 861 * 862 * WARNING! called from a fast-int or an IPI, the MP lock MIGHT NOT BE HELD 863 * and we cannot block. 864 */ 865 void 866 schedulerclock(void *dummy) 867 { 868 struct thread *td; 869 struct proc *p; 870 871 td = curthread; 872 if ((p = td->td_proc) != NULL) { 873 p->p_cpticks++; /* cpticks runs at ESTCPUFREQ */ 874 p->p_estcpu = ESTCPULIM(p->p_estcpu + ESTCPUVFREQ / ESTCPUFREQ); 875 if (try_mplock()) { 876 resetpriority(p); 877 rel_mplock(); 878 } 879 } 880 } 881 882 static 883 void 884 crit_panicints(void) 885 { 886 int s; 887 int cpri; 888 889 s = splhigh(); 890 cpri = crit_panic_save(); 891 splx(safepri); 892 crit_panic_restore(cpri); 893 splx(s); 894 } 895 896