1 /* 2 * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $DragonFly: src/sys/kern/usched_bsd4.c,v 1.26 2008/11/01 23:31:19 dillon Exp $ 27 */ 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/kernel.h> 32 #include <sys/lock.h> 33 #include <sys/queue.h> 34 #include <sys/proc.h> 35 #include <sys/rtprio.h> 36 #include <sys/uio.h> 37 #include <sys/sysctl.h> 38 #include <sys/resourcevar.h> 39 #include <sys/spinlock.h> 40 #include <machine/cpu.h> 41 #include <machine/smp.h> 42 43 #include <sys/thread2.h> 44 #include <sys/spinlock2.h> 45 #include <sys/mplock2.h> 46 47 /* 48 * Priorities. Note that with 32 run queues per scheduler each queue 49 * represents four priority levels. 50 */ 51 52 #define MAXPRI 128 53 #define PRIMASK (MAXPRI - 1) 54 #define PRIBASE_REALTIME 0 55 #define PRIBASE_NORMAL MAXPRI 56 #define PRIBASE_IDLE (MAXPRI * 2) 57 #define PRIBASE_THREAD (MAXPRI * 3) 58 #define PRIBASE_NULL (MAXPRI * 4) 59 60 #define NQS 32 /* 32 run queues. */ 61 #define PPQ (MAXPRI / NQS) /* priorities per queue */ 62 #define PPQMASK (PPQ - 1) 63 64 /* 65 * NICEPPQ - number of nice units per priority queue 66 * ESTCPURAMP - number of scheduler ticks for estcpu to switch queues 67 * 68 * ESTCPUPPQ - number of estcpu units per priority queue 69 * ESTCPUMAX - number of estcpu units 70 * ESTCPUINCR - amount we have to increment p_estcpu per scheduling tick at 71 * 100% cpu. 72 */ 73 #define NICEPPQ 2 74 #define ESTCPURAMP 4 75 #define ESTCPUPPQ 512 76 #define ESTCPUMAX (ESTCPUPPQ * NQS) 77 #define ESTCPUINCR (ESTCPUPPQ / ESTCPURAMP) 78 #define PRIO_RANGE (PRIO_MAX - PRIO_MIN + 1) 79 80 #define ESTCPULIM(v) min((v), ESTCPUMAX) 81 82 TAILQ_HEAD(rq, lwp); 83 84 #define lwp_priority lwp_usdata.bsd4.priority 85 #define lwp_rqindex lwp_usdata.bsd4.rqindex 86 #define lwp_origcpu lwp_usdata.bsd4.origcpu 87 #define lwp_estcpu lwp_usdata.bsd4.estcpu 88 #define lwp_rqtype lwp_usdata.bsd4.rqtype 89 90 static void bsd4_acquire_curproc(struct lwp *lp); 91 static void bsd4_release_curproc(struct lwp *lp); 92 static void bsd4_select_curproc(globaldata_t gd); 93 static void bsd4_setrunqueue(struct lwp *lp); 94 static void bsd4_schedulerclock(struct lwp *lp, sysclock_t period, 95 sysclock_t cpstamp); 96 static void bsd4_recalculate_estcpu(struct lwp *lp); 97 static void bsd4_resetpriority(struct lwp *lp); 98 static void bsd4_forking(struct lwp *plp, struct lwp *lp); 99 static void bsd4_exiting(struct lwp *plp, struct lwp *lp); 100 static void bsd4_yield(struct lwp *lp); 101 102 #ifdef SMP 103 static void need_user_resched_remote(void *dummy); 104 #endif 105 static struct lwp *chooseproc_locked(struct lwp *chklp); 106 static void bsd4_remrunqueue_locked(struct lwp *lp); 107 static void bsd4_setrunqueue_locked(struct lwp *lp); 108 109 struct usched usched_bsd4 = { 110 { NULL }, 111 "bsd4", "Original DragonFly Scheduler", 112 NULL, /* default registration */ 113 NULL, /* default deregistration */ 114 bsd4_acquire_curproc, 115 bsd4_release_curproc, 116 bsd4_setrunqueue, 117 bsd4_schedulerclock, 118 bsd4_recalculate_estcpu, 119 bsd4_resetpriority, 120 bsd4_forking, 121 bsd4_exiting, 122 NULL, /* setcpumask not supported */ 123 bsd4_yield 124 }; 125 126 struct usched_bsd4_pcpu { 127 struct thread helper_thread; 128 short rrcount; 129 short upri; 130 struct lwp *uschedcp; 131 }; 132 133 typedef struct usched_bsd4_pcpu *bsd4_pcpu_t; 134 135 /* 136 * We have NQS (32) run queues per scheduling class. For the normal 137 * class, there are 128 priorities scaled onto these 32 queues. New 138 * processes are added to the last entry in each queue, and processes 139 * are selected for running by taking them from the head and maintaining 140 * a simple FIFO arrangement. Realtime and Idle priority processes have 141 * and explicit 0-31 priority which maps directly onto their class queue 142 * index. When a queue has something in it, the corresponding bit is 143 * set in the queuebits variable, allowing a single read to determine 144 * the state of all 32 queues and then a ffs() to find the first busy 145 * queue. 146 */ 147 static struct rq bsd4_queues[NQS]; 148 static struct rq bsd4_rtqueues[NQS]; 149 static struct rq bsd4_idqueues[NQS]; 150 static u_int32_t bsd4_queuebits; 151 static u_int32_t bsd4_rtqueuebits; 152 static u_int32_t bsd4_idqueuebits; 153 static cpumask_t bsd4_curprocmask = -1; /* currently running a user process */ 154 static cpumask_t bsd4_rdyprocmask; /* ready to accept a user process */ 155 static int bsd4_runqcount; 156 #ifdef SMP 157 static volatile int bsd4_scancpu; 158 #endif 159 static struct spinlock bsd4_spin; 160 static struct usched_bsd4_pcpu bsd4_pcpu[MAXCPU]; 161 162 SYSCTL_INT(_debug, OID_AUTO, bsd4_runqcount, CTLFLAG_RD, &bsd4_runqcount, 0, ""); 163 #ifdef INVARIANTS 164 static int usched_nonoptimal; 165 SYSCTL_INT(_debug, OID_AUTO, usched_nonoptimal, CTLFLAG_RW, 166 &usched_nonoptimal, 0, "acquire_curproc() was not optimal"); 167 static int usched_optimal; 168 SYSCTL_INT(_debug, OID_AUTO, usched_optimal, CTLFLAG_RW, 169 &usched_optimal, 0, "acquire_curproc() was optimal"); 170 #endif 171 static int usched_debug = -1; 172 SYSCTL_INT(_debug, OID_AUTO, scdebug, CTLFLAG_RW, &usched_debug, 0, ""); 173 #ifdef SMP 174 static int remote_resched_nonaffinity; 175 static int remote_resched_affinity; 176 static int choose_affinity; 177 SYSCTL_INT(_debug, OID_AUTO, remote_resched_nonaffinity, CTLFLAG_RD, 178 &remote_resched_nonaffinity, 0, "Number of remote rescheds"); 179 SYSCTL_INT(_debug, OID_AUTO, remote_resched_affinity, CTLFLAG_RD, 180 &remote_resched_affinity, 0, "Number of remote rescheds"); 181 SYSCTL_INT(_debug, OID_AUTO, choose_affinity, CTLFLAG_RD, 182 &choose_affinity, 0, "chooseproc() was smart"); 183 #endif 184 185 static int usched_bsd4_rrinterval = (ESTCPUFREQ + 9) / 10; 186 SYSCTL_INT(_kern, OID_AUTO, usched_bsd4_rrinterval, CTLFLAG_RW, 187 &usched_bsd4_rrinterval, 0, ""); 188 static int usched_bsd4_decay = ESTCPUINCR / 2; 189 SYSCTL_INT(_kern, OID_AUTO, usched_bsd4_decay, CTLFLAG_RW, 190 &usched_bsd4_decay, 0, ""); 191 192 /* 193 * Initialize the run queues at boot time. 194 */ 195 static void 196 rqinit(void *dummy) 197 { 198 int i; 199 200 spin_init(&bsd4_spin); 201 for (i = 0; i < NQS; i++) { 202 TAILQ_INIT(&bsd4_queues[i]); 203 TAILQ_INIT(&bsd4_rtqueues[i]); 204 TAILQ_INIT(&bsd4_idqueues[i]); 205 } 206 atomic_clear_int(&bsd4_curprocmask, 1); 207 } 208 SYSINIT(runqueue, SI_BOOT2_USCHED, SI_ORDER_FIRST, rqinit, NULL) 209 210 /* 211 * BSD4_ACQUIRE_CURPROC 212 * 213 * This function is called when the kernel intends to return to userland. 214 * It is responsible for making the thread the current designated userland 215 * thread for this cpu, blocking if necessary. 216 * 217 * The kernel has already depressed our LWKT priority so we must not switch 218 * until we have either assigned or disposed of the thread. 219 * 220 * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE 221 * TO ANOTHER CPU! Because most of the kernel assumes that no migration will 222 * occur, this function is called only under very controlled circumstances. 223 * 224 * MPSAFE 225 */ 226 static void 227 bsd4_acquire_curproc(struct lwp *lp) 228 { 229 globaldata_t gd; 230 bsd4_pcpu_t dd; 231 struct lwp *olp; 232 233 crit_enter(); 234 bsd4_recalculate_estcpu(lp); 235 236 /* 237 * If a reschedule was requested give another thread the 238 * driver's seat. 239 */ 240 if (user_resched_wanted()) { 241 clear_user_resched(); 242 bsd4_release_curproc(lp); 243 } 244 245 /* 246 * Loop until we are the current user thread 247 */ 248 do { 249 /* 250 * Reload after a switch or setrunqueue/switch possibly 251 * moved us to another cpu. 252 */ 253 clear_lwkt_resched(); 254 gd = mycpu; 255 dd = &bsd4_pcpu[gd->gd_cpuid]; 256 257 /* 258 * Become the currently scheduled user thread for this cpu 259 * if we can do so trivially. 260 * 261 * We can steal another thread's current thread designation 262 * on this cpu since if we are running that other thread 263 * must not be, so we can safely deschedule it. 264 */ 265 if (dd->uschedcp == lp) { 266 dd->upri = lp->lwp_priority; 267 } else if (dd->uschedcp == NULL) { 268 atomic_set_int(&bsd4_curprocmask, gd->gd_cpumask); 269 dd->uschedcp = lp; 270 dd->upri = lp->lwp_priority; 271 } else if (dd->upri > lp->lwp_priority) { 272 olp = dd->uschedcp; 273 dd->uschedcp = lp; 274 dd->upri = lp->lwp_priority; 275 lwkt_deschedule(olp->lwp_thread); 276 bsd4_setrunqueue(olp); 277 } else { 278 lwkt_deschedule(lp->lwp_thread); 279 bsd4_setrunqueue(lp); 280 lwkt_switch(); 281 } 282 283 /* 284 * Other threads at our current user priority have already 285 * put in their bids, but we must run any kernel threads 286 * at higher priorities, and we could lose our bid to 287 * another thread trying to return to user mode in the 288 * process. 289 * 290 * If we lose our bid we will be descheduled and put on 291 * the run queue. When we are reactivated we will have 292 * another chance. 293 */ 294 if (lwkt_check_resched(lp->lwp_thread) > 1) { 295 lwkt_switch(); 296 continue; 297 } 298 } while (dd->uschedcp != lp); 299 300 crit_exit(); 301 KKASSERT((lp->lwp_flag & LWP_ONRUNQ) == 0); 302 } 303 304 /* 305 * BSD4_RELEASE_CURPROC 306 * 307 * This routine detaches the current thread from the userland scheduler, 308 * usually because the thread needs to run or block in the kernel (at 309 * kernel priority) for a while. 310 * 311 * This routine is also responsible for selecting a new thread to 312 * make the current thread. 313 * 314 * NOTE: This implementation differs from the dummy example in that 315 * bsd4_select_curproc() is able to select the current process, whereas 316 * dummy_select_curproc() is not able to select the current process. 317 * This means we have to NULL out uschedcp. 318 * 319 * Additionally, note that we may already be on a run queue if releasing 320 * via the lwkt_switch() in bsd4_setrunqueue(). 321 * 322 * WARNING! The MP lock may be in an unsynchronized state due to the 323 * way get_mplock() works and the fact that this function may be called 324 * from a passive release during a lwkt_switch(). try_mplock() will deal 325 * with this for us but you should be aware that td_mpcount may not be 326 * useable. 327 * 328 * MPSAFE 329 */ 330 static void 331 bsd4_release_curproc(struct lwp *lp) 332 { 333 globaldata_t gd = mycpu; 334 bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid]; 335 336 if (dd->uschedcp == lp) { 337 crit_enter(); 338 KKASSERT((lp->lwp_flag & LWP_ONRUNQ) == 0); 339 dd->uschedcp = NULL; /* don't let lp be selected */ 340 dd->upri = PRIBASE_NULL; 341 atomic_clear_int(&bsd4_curprocmask, gd->gd_cpumask); 342 bsd4_select_curproc(gd); 343 crit_exit(); 344 } 345 } 346 347 /* 348 * BSD4_SELECT_CURPROC 349 * 350 * Select a new current process for this cpu and clear any pending user 351 * reschedule request. The cpu currently has no current process. 352 * 353 * This routine is also responsible for equal-priority round-robining, 354 * typically triggered from bsd4_schedulerclock(). In our dummy example 355 * all the 'user' threads are LWKT scheduled all at once and we just 356 * call lwkt_switch(). 357 * 358 * The calling process is not on the queue and cannot be selected. 359 * 360 * MPSAFE 361 */ 362 static 363 void 364 bsd4_select_curproc(globaldata_t gd) 365 { 366 bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid]; 367 struct lwp *nlp; 368 int cpuid = gd->gd_cpuid; 369 370 crit_enter_gd(gd); 371 372 spin_lock_wr(&bsd4_spin); 373 if ((nlp = chooseproc_locked(dd->uschedcp)) != NULL) { 374 atomic_set_int(&bsd4_curprocmask, 1 << cpuid); 375 dd->upri = nlp->lwp_priority; 376 dd->uschedcp = nlp; 377 spin_unlock_wr(&bsd4_spin); 378 #ifdef SMP 379 lwkt_acquire(nlp->lwp_thread); 380 #endif 381 lwkt_schedule(nlp->lwp_thread); 382 } else if (bsd4_runqcount && (bsd4_rdyprocmask & (1 << cpuid))) { 383 atomic_clear_int(&bsd4_rdyprocmask, 1 << cpuid); 384 spin_unlock_wr(&bsd4_spin); 385 lwkt_schedule(&dd->helper_thread); 386 } else { 387 spin_unlock_wr(&bsd4_spin); 388 } 389 crit_exit_gd(gd); 390 } 391 392 /* 393 * BSD4_SETRUNQUEUE 394 * 395 * Place the specified lwp on the user scheduler's run queue. This routine 396 * must be called with the thread descheduled. The lwp must be runnable. 397 * 398 * The thread may be the current thread as a special case. 399 * 400 * MPSAFE 401 */ 402 static void 403 bsd4_setrunqueue(struct lwp *lp) 404 { 405 globaldata_t gd; 406 bsd4_pcpu_t dd; 407 #ifdef SMP 408 int cpuid; 409 cpumask_t mask; 410 cpumask_t tmpmask; 411 #endif 412 413 /* 414 * First validate the process state relative to the current cpu. 415 * We don't need the spinlock for this, just a critical section. 416 * We are in control of the process. 417 */ 418 crit_enter(); 419 KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN")); 420 KASSERT((lp->lwp_flag & LWP_ONRUNQ) == 0, 421 ("lwp %d/%d already on runq! flag %08x/%08x", lp->lwp_proc->p_pid, 422 lp->lwp_tid, lp->lwp_proc->p_flag, lp->lwp_flag)); 423 KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0); 424 425 /* 426 * Note: gd and dd are relative to the target thread's last cpu, 427 * NOT our current cpu. 428 */ 429 gd = lp->lwp_thread->td_gd; 430 dd = &bsd4_pcpu[gd->gd_cpuid]; 431 432 /* 433 * This process is not supposed to be scheduled anywhere or assigned 434 * as the current process anywhere. Assert the condition. 435 */ 436 KKASSERT(dd->uschedcp != lp); 437 438 #ifndef SMP 439 /* 440 * If we are not SMP we do not have a scheduler helper to kick 441 * and must directly activate the process if none are scheduled. 442 * 443 * This is really only an issue when bootstrapping init since 444 * the caller in all other cases will be a user process, and 445 * even if released (dd->uschedcp == NULL), that process will 446 * kickstart the scheduler when it returns to user mode from 447 * the kernel. 448 */ 449 if (dd->uschedcp == NULL) { 450 atomic_set_int(&bsd4_curprocmask, gd->gd_cpumask); 451 dd->uschedcp = lp; 452 dd->upri = lp->lwp_priority; 453 lwkt_schedule(lp->lwp_thread); 454 crit_exit(); 455 return; 456 } 457 #endif 458 459 #ifdef SMP 460 /* 461 * XXX fixme. Could be part of a remrunqueue/setrunqueue 462 * operation when the priority is recalculated, so TDF_MIGRATING 463 * may already be set. 464 */ 465 if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0) 466 lwkt_giveaway(lp->lwp_thread); 467 #endif 468 469 /* 470 * We lose control of lp the moment we release the spinlock after 471 * having placed lp on the queue. i.e. another cpu could pick it 472 * up and it could exit, or its priority could be further adjusted, 473 * or something like that. 474 */ 475 spin_lock_wr(&bsd4_spin); 476 bsd4_setrunqueue_locked(lp); 477 478 #ifdef SMP 479 /* 480 * Kick the scheduler helper on one of the other cpu's 481 * and request a reschedule if appropriate. 482 */ 483 cpuid = (bsd4_scancpu & 0xFFFF) % ncpus; 484 ++bsd4_scancpu; 485 mask = ~bsd4_curprocmask & bsd4_rdyprocmask & 486 lp->lwp_cpumask & smp_active_mask; 487 spin_unlock_wr(&bsd4_spin); 488 489 while (mask) { 490 tmpmask = ~((1 << cpuid) - 1); 491 if (mask & tmpmask) 492 cpuid = bsfl(mask & tmpmask); 493 else 494 cpuid = bsfl(mask); 495 gd = globaldata_find(cpuid); 496 dd = &bsd4_pcpu[cpuid]; 497 498 if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) { 499 if (gd == mycpu) 500 need_user_resched_remote(NULL); 501 else 502 lwkt_send_ipiq(gd, need_user_resched_remote, NULL); 503 break; 504 } 505 mask &= ~(1 << cpuid); 506 } 507 #else 508 /* 509 * Request a reschedule if appropriate. 510 */ 511 spin_unlock_wr(&bsd4_spin); 512 if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) { 513 need_user_resched(); 514 } 515 #endif 516 crit_exit(); 517 } 518 519 /* 520 * This routine is called from a systimer IPI. It MUST be MP-safe and 521 * the BGL IS NOT HELD ON ENTRY. This routine is called at ESTCPUFREQ on 522 * each cpu. 523 * 524 * Because this is effectively a 'fast' interrupt, we cannot safely 525 * use spinlocks unless gd_spinlock_rd is NULL and gd_spinlocks_wr is 0, 526 * even if the spinlocks are 'non conflicting'. This is due to the way 527 * spinlock conflicts against cached read locks are handled. 528 * 529 * MPSAFE 530 */ 531 static 532 void 533 bsd4_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp) 534 { 535 globaldata_t gd = mycpu; 536 bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid]; 537 538 /* 539 * Do we need to round-robin? We round-robin 10 times a second. 540 * This should only occur for cpu-bound batch processes. 541 */ 542 if (++dd->rrcount >= usched_bsd4_rrinterval) { 543 dd->rrcount = 0; 544 need_user_resched(); 545 } 546 547 /* 548 * As the process accumulates cpu time p_estcpu is bumped and may 549 * push the process into another scheduling queue. It typically 550 * takes 4 ticks to bump the queue. 551 */ 552 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUINCR); 553 554 /* 555 * Reducing p_origcpu over time causes more of our estcpu to be 556 * returned to the parent when we exit. This is a small tweak 557 * for the batch detection heuristic. 558 */ 559 if (lp->lwp_origcpu) 560 --lp->lwp_origcpu; 561 562 /* 563 * We can only safely call bsd4_resetpriority(), which uses spinlocks, 564 * if we aren't interrupting a thread that is using spinlocks. 565 * Otherwise we can deadlock with another cpu waiting for our read 566 * spinlocks to clear. 567 */ 568 if (gd->gd_spinlock_rd == NULL && gd->gd_spinlocks_wr == 0) 569 bsd4_resetpriority(lp); 570 else 571 need_user_resched(); 572 } 573 574 /* 575 * Called from acquire and from kern_synch's one-second timer (one of the 576 * callout helper threads) with a critical section held. 577 * 578 * Decay p_estcpu based on the number of ticks we haven't been running 579 * and our p_nice. As the load increases each process observes a larger 580 * number of idle ticks (because other processes are running in them). 581 * This observation leads to a larger correction which tends to make the 582 * system more 'batchy'. 583 * 584 * Note that no recalculation occurs for a process which sleeps and wakes 585 * up in the same tick. That is, a system doing thousands of context 586 * switches per second will still only do serious estcpu calculations 587 * ESTCPUFREQ times per second. 588 * 589 * MPSAFE 590 */ 591 static 592 void 593 bsd4_recalculate_estcpu(struct lwp *lp) 594 { 595 globaldata_t gd = mycpu; 596 sysclock_t cpbase; 597 int loadfac; 598 int ndecay; 599 int nticks; 600 int nleft; 601 602 /* 603 * We have to subtract periodic to get the last schedclock 604 * timeout time, otherwise we would get the upcoming timeout. 605 * Keep in mind that a process can migrate between cpus and 606 * while the scheduler clock should be very close, boundary 607 * conditions could lead to a small negative delta. 608 */ 609 cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic; 610 611 if (lp->lwp_slptime > 1) { 612 /* 613 * Too much time has passed, do a coarse correction. 614 */ 615 lp->lwp_estcpu = lp->lwp_estcpu >> 1; 616 bsd4_resetpriority(lp); 617 lp->lwp_cpbase = cpbase; 618 lp->lwp_cpticks = 0; 619 } else if (lp->lwp_cpbase != cpbase) { 620 /* 621 * Adjust estcpu if we are in a different tick. Don't waste 622 * time if we are in the same tick. 623 * 624 * First calculate the number of ticks in the measurement 625 * interval. The nticks calculation can wind up 0 due to 626 * a bug in the handling of lwp_slptime (as yet not found), 627 * so make sure we do not get a divide by 0 panic. 628 */ 629 nticks = (cpbase - lp->lwp_cpbase) / gd->gd_schedclock.periodic; 630 if (nticks <= 0) 631 nticks = 1; 632 updatepcpu(lp, lp->lwp_cpticks, nticks); 633 634 if ((nleft = nticks - lp->lwp_cpticks) < 0) 635 nleft = 0; 636 if (usched_debug == lp->lwp_proc->p_pid) { 637 kprintf("pid %d tid %d estcpu %d cpticks %d nticks %d nleft %d", 638 lp->lwp_proc->p_pid, lp->lwp_tid, lp->lwp_estcpu, 639 lp->lwp_cpticks, nticks, nleft); 640 } 641 642 /* 643 * Calculate a decay value based on ticks remaining scaled 644 * down by the instantanious load and p_nice. 645 */ 646 if ((loadfac = bsd4_runqcount) < 2) 647 loadfac = 2; 648 ndecay = nleft * usched_bsd4_decay * 2 * 649 (PRIO_MAX * 2 - lp->lwp_proc->p_nice) / (loadfac * PRIO_MAX * 2); 650 651 /* 652 * Adjust p_estcpu. Handle a border case where batch jobs 653 * can get stalled long enough to decay to zero when they 654 * shouldn't. 655 */ 656 if (lp->lwp_estcpu > ndecay * 2) 657 lp->lwp_estcpu -= ndecay; 658 else 659 lp->lwp_estcpu >>= 1; 660 661 if (usched_debug == lp->lwp_proc->p_pid) 662 kprintf(" ndecay %d estcpu %d\n", ndecay, lp->lwp_estcpu); 663 bsd4_resetpriority(lp); 664 lp->lwp_cpbase = cpbase; 665 lp->lwp_cpticks = 0; 666 } 667 } 668 669 /* 670 * Compute the priority of a process when running in user mode. 671 * Arrange to reschedule if the resulting priority is better 672 * than that of the current process. 673 * 674 * This routine may be called with any process. 675 * 676 * This routine is called by fork1() for initial setup with the process 677 * of the run queue, and also may be called normally with the process on or 678 * off the run queue. 679 * 680 * MPSAFE 681 */ 682 static void 683 bsd4_resetpriority(struct lwp *lp) 684 { 685 bsd4_pcpu_t dd; 686 int newpriority; 687 u_short newrqtype; 688 int reschedcpu; 689 690 /* 691 * Calculate the new priority and queue type 692 */ 693 crit_enter(); 694 spin_lock_wr(&bsd4_spin); 695 696 newrqtype = lp->lwp_rtprio.type; 697 698 switch(newrqtype) { 699 case RTP_PRIO_REALTIME: 700 case RTP_PRIO_FIFO: 701 newpriority = PRIBASE_REALTIME + 702 (lp->lwp_rtprio.prio & PRIMASK); 703 break; 704 case RTP_PRIO_NORMAL: 705 newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) * PPQ / NICEPPQ; 706 newpriority += lp->lwp_estcpu * PPQ / ESTCPUPPQ; 707 newpriority = newpriority * MAXPRI / (PRIO_RANGE * PPQ / 708 NICEPPQ + ESTCPUMAX * PPQ / ESTCPUPPQ); 709 newpriority = PRIBASE_NORMAL + (newpriority & PRIMASK); 710 break; 711 case RTP_PRIO_IDLE: 712 newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK); 713 break; 714 case RTP_PRIO_THREAD: 715 newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK); 716 break; 717 default: 718 panic("Bad RTP_PRIO %d", newrqtype); 719 /* NOT REACHED */ 720 } 721 722 /* 723 * The newpriority incorporates the queue type so do a simple masked 724 * check to determine if the process has moved to another queue. If 725 * it has, and it is currently on a run queue, then move it. 726 */ 727 if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) { 728 lp->lwp_priority = newpriority; 729 if (lp->lwp_flag & LWP_ONRUNQ) { 730 bsd4_remrunqueue_locked(lp); 731 lp->lwp_rqtype = newrqtype; 732 lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ; 733 bsd4_setrunqueue_locked(lp); 734 reschedcpu = lp->lwp_thread->td_gd->gd_cpuid; 735 } else { 736 lp->lwp_rqtype = newrqtype; 737 lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ; 738 reschedcpu = -1; 739 } 740 } else { 741 lp->lwp_priority = newpriority; 742 reschedcpu = -1; 743 } 744 spin_unlock_wr(&bsd4_spin); 745 746 /* 747 * Determine if we need to reschedule the target cpu. This only 748 * occurs if the LWP is already on a scheduler queue, which means 749 * that idle cpu notification has already occured. At most we 750 * need only issue a need_user_resched() on the appropriate cpu. 751 * 752 * The LWP may be owned by a CPU different from the current one, 753 * in which case dd->uschedcp may be modified without an MP lock 754 * or a spinlock held. The worst that happens is that the code 755 * below causes a spurious need_user_resched() on the target CPU 756 * and dd->pri to be wrong for a short period of time, both of 757 * which are harmless. 758 */ 759 if (reschedcpu >= 0) { 760 dd = &bsd4_pcpu[reschedcpu]; 761 if ((dd->upri & ~PRIMASK) > (lp->lwp_priority & ~PRIMASK)) { 762 dd->upri = lp->lwp_priority; 763 #ifdef SMP 764 if (reschedcpu == mycpu->gd_cpuid) { 765 need_user_resched(); 766 } else { 767 lwkt_send_ipiq(lp->lwp_thread->td_gd, 768 need_user_resched_remote, NULL); 769 } 770 #else 771 need_user_resched(); 772 #endif 773 } 774 } 775 crit_exit(); 776 } 777 778 /* 779 * MPSAFE 780 */ 781 static 782 void 783 bsd4_yield(struct lwp *lp) 784 { 785 #if 0 786 /* FUTURE (or something similar) */ 787 switch(lp->lwp_rqtype) { 788 case RTP_PRIO_NORMAL: 789 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUINCR); 790 break; 791 default: 792 break; 793 } 794 #endif 795 need_user_resched(); 796 } 797 798 /* 799 * Called from fork1() when a new child process is being created. 800 * 801 * Give the child process an initial estcpu that is more batch then 802 * its parent and dock the parent for the fork (but do not 803 * reschedule the parent). This comprises the main part of our batch 804 * detection heuristic for both parallel forking and sequential execs. 805 * 806 * Interactive processes will decay the boosted estcpu quickly while batch 807 * processes will tend to compound it. 808 * XXX lwp should be "spawning" instead of "forking" 809 * 810 * MPSAFE 811 */ 812 static void 813 bsd4_forking(struct lwp *plp, struct lwp *lp) 814 { 815 lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ); 816 lp->lwp_origcpu = lp->lwp_estcpu; 817 plp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ); 818 } 819 820 /* 821 * Called when the parent reaps a child. Propogate cpu use by the child 822 * back to the parent. 823 * 824 * MPSAFE 825 */ 826 static void 827 bsd4_exiting(struct lwp *plp, struct lwp *lp) 828 { 829 int delta; 830 831 if (plp->lwp_proc->p_pid != 1) { 832 delta = lp->lwp_estcpu - lp->lwp_origcpu; 833 if (delta > 0) 834 plp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + delta); 835 } 836 } 837 838 839 /* 840 * chooseproc() is called when a cpu needs a user process to LWKT schedule, 841 * it selects a user process and returns it. If chklp is non-NULL and chklp 842 * has a better or equal priority then the process that would otherwise be 843 * chosen, NULL is returned. 844 * 845 * Until we fix the RUNQ code the chklp test has to be strict or we may 846 * bounce between processes trying to acquire the current process designation. 847 * 848 * MPSAFE - must be called with bsd4_spin exclusive held. The spinlock is 849 * left intact through the entire routine. 850 */ 851 static 852 struct lwp * 853 chooseproc_locked(struct lwp *chklp) 854 { 855 struct lwp *lp; 856 struct rq *q; 857 u_int32_t *which, *which2; 858 u_int32_t pri; 859 u_int32_t rtqbits; 860 u_int32_t tsqbits; 861 u_int32_t idqbits; 862 cpumask_t cpumask; 863 864 rtqbits = bsd4_rtqueuebits; 865 tsqbits = bsd4_queuebits; 866 idqbits = bsd4_idqueuebits; 867 cpumask = mycpu->gd_cpumask; 868 869 #ifdef SMP 870 again: 871 #endif 872 if (rtqbits) { 873 pri = bsfl(rtqbits); 874 q = &bsd4_rtqueues[pri]; 875 which = &bsd4_rtqueuebits; 876 which2 = &rtqbits; 877 } else if (tsqbits) { 878 pri = bsfl(tsqbits); 879 q = &bsd4_queues[pri]; 880 which = &bsd4_queuebits; 881 which2 = &tsqbits; 882 } else if (idqbits) { 883 pri = bsfl(idqbits); 884 q = &bsd4_idqueues[pri]; 885 which = &bsd4_idqueuebits; 886 which2 = &idqbits; 887 } else { 888 return NULL; 889 } 890 lp = TAILQ_FIRST(q); 891 KASSERT(lp, ("chooseproc: no lwp on busy queue")); 892 893 #ifdef SMP 894 while ((lp->lwp_cpumask & cpumask) == 0) { 895 lp = TAILQ_NEXT(lp, lwp_procq); 896 if (lp == NULL) { 897 *which2 &= ~(1 << pri); 898 goto again; 899 } 900 } 901 #endif 902 903 /* 904 * If the passed lwp <chklp> is reasonably close to the selected 905 * lwp <lp>, return NULL (indicating that <chklp> should be kept). 906 * 907 * Note that we must error on the side of <chklp> to avoid bouncing 908 * between threads in the acquire code. 909 */ 910 if (chklp) { 911 if (chklp->lwp_priority < lp->lwp_priority + PPQ) 912 return(NULL); 913 } 914 915 #ifdef SMP 916 /* 917 * If the chosen lwp does not reside on this cpu spend a few 918 * cycles looking for a better candidate at the same priority level. 919 * This is a fallback check, setrunqueue() tries to wakeup the 920 * correct cpu and is our front-line affinity. 921 */ 922 if (lp->lwp_thread->td_gd != mycpu && 923 (chklp = TAILQ_NEXT(lp, lwp_procq)) != NULL 924 ) { 925 if (chklp->lwp_thread->td_gd == mycpu) { 926 ++choose_affinity; 927 lp = chklp; 928 } 929 } 930 #endif 931 932 TAILQ_REMOVE(q, lp, lwp_procq); 933 --bsd4_runqcount; 934 if (TAILQ_EMPTY(q)) 935 *which &= ~(1 << pri); 936 KASSERT((lp->lwp_flag & LWP_ONRUNQ) != 0, ("not on runq6!")); 937 lp->lwp_flag &= ~LWP_ONRUNQ; 938 return lp; 939 } 940 941 #ifdef SMP 942 943 /* 944 * Called via an ipi message to reschedule on another cpu. If no 945 * user thread is active on the target cpu we wake the scheduler 946 * helper thread up to help schedule one. 947 * 948 * MPSAFE 949 */ 950 static 951 void 952 need_user_resched_remote(void *dummy) 953 { 954 globaldata_t gd = mycpu; 955 bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid]; 956 957 if (dd->uschedcp == NULL && (bsd4_rdyprocmask & gd->gd_cpumask)) { 958 atomic_clear_int(&bsd4_rdyprocmask, gd->gd_cpumask); 959 lwkt_schedule(&dd->helper_thread); 960 } else { 961 need_user_resched(); 962 } 963 } 964 965 #endif 966 967 /* 968 * bsd4_remrunqueue_locked() removes a given process from the run queue 969 * that it is on, clearing the queue busy bit if it becomes empty. 970 * 971 * Note that user process scheduler is different from the LWKT schedule. 972 * The user process scheduler only manages user processes but it uses LWKT 973 * underneath, and a user process operating in the kernel will often be 974 * 'released' from our management. 975 * 976 * MPSAFE - bsd4_spin must be held exclusively on call 977 */ 978 static void 979 bsd4_remrunqueue_locked(struct lwp *lp) 980 { 981 struct rq *q; 982 u_int32_t *which; 983 u_int8_t pri; 984 985 KKASSERT(lp->lwp_flag & LWP_ONRUNQ); 986 lp->lwp_flag &= ~LWP_ONRUNQ; 987 --bsd4_runqcount; 988 KKASSERT(bsd4_runqcount >= 0); 989 990 pri = lp->lwp_rqindex; 991 switch(lp->lwp_rqtype) { 992 case RTP_PRIO_NORMAL: 993 q = &bsd4_queues[pri]; 994 which = &bsd4_queuebits; 995 break; 996 case RTP_PRIO_REALTIME: 997 case RTP_PRIO_FIFO: 998 q = &bsd4_rtqueues[pri]; 999 which = &bsd4_rtqueuebits; 1000 break; 1001 case RTP_PRIO_IDLE: 1002 q = &bsd4_idqueues[pri]; 1003 which = &bsd4_idqueuebits; 1004 break; 1005 default: 1006 panic("remrunqueue: invalid rtprio type"); 1007 /* NOT REACHED */ 1008 } 1009 TAILQ_REMOVE(q, lp, lwp_procq); 1010 if (TAILQ_EMPTY(q)) { 1011 KASSERT((*which & (1 << pri)) != 0, 1012 ("remrunqueue: remove from empty queue")); 1013 *which &= ~(1 << pri); 1014 } 1015 } 1016 1017 /* 1018 * bsd4_setrunqueue_locked() 1019 * 1020 * Add a process whos rqtype and rqindex had previously been calculated 1021 * onto the appropriate run queue. Determine if the addition requires 1022 * a reschedule on a cpu and return the cpuid or -1. 1023 * 1024 * NOTE: Lower priorities are better priorities. 1025 * 1026 * MPSAFE - bsd4_spin must be held exclusively on call 1027 */ 1028 static void 1029 bsd4_setrunqueue_locked(struct lwp *lp) 1030 { 1031 struct rq *q; 1032 u_int32_t *which; 1033 int pri; 1034 1035 KKASSERT((lp->lwp_flag & LWP_ONRUNQ) == 0); 1036 lp->lwp_flag |= LWP_ONRUNQ; 1037 ++bsd4_runqcount; 1038 1039 pri = lp->lwp_rqindex; 1040 1041 switch(lp->lwp_rqtype) { 1042 case RTP_PRIO_NORMAL: 1043 q = &bsd4_queues[pri]; 1044 which = &bsd4_queuebits; 1045 break; 1046 case RTP_PRIO_REALTIME: 1047 case RTP_PRIO_FIFO: 1048 q = &bsd4_rtqueues[pri]; 1049 which = &bsd4_rtqueuebits; 1050 break; 1051 case RTP_PRIO_IDLE: 1052 q = &bsd4_idqueues[pri]; 1053 which = &bsd4_idqueuebits; 1054 break; 1055 default: 1056 panic("remrunqueue: invalid rtprio type"); 1057 /* NOT REACHED */ 1058 } 1059 1060 /* 1061 * Add to the correct queue and set the appropriate bit. If no 1062 * lower priority (i.e. better) processes are in the queue then 1063 * we want a reschedule, calculate the best cpu for the job. 1064 * 1065 * Always run reschedules on the LWPs original cpu. 1066 */ 1067 TAILQ_INSERT_TAIL(q, lp, lwp_procq); 1068 *which |= 1 << pri; 1069 } 1070 1071 #ifdef SMP 1072 1073 /* 1074 * For SMP systems a user scheduler helper thread is created for each 1075 * cpu and is used to allow one cpu to wakeup another for the purposes of 1076 * scheduling userland threads from setrunqueue(). UP systems do not 1077 * need the helper since there is only one cpu. We can't use the idle 1078 * thread for this because we need to hold the MP lock. Additionally, 1079 * doing things this way allows us to HLT idle cpus on MP systems. 1080 * 1081 * MPSAFE 1082 */ 1083 static void 1084 sched_thread(void *dummy) 1085 { 1086 globaldata_t gd; 1087 bsd4_pcpu_t dd; 1088 struct lwp *nlp; 1089 cpumask_t cpumask; 1090 int cpuid; 1091 #if 0 1092 cpumask_t tmpmask; 1093 int tmpid; 1094 #endif 1095 1096 gd = mycpu; 1097 cpuid = gd->gd_cpuid; /* doesn't change */ 1098 cpumask = gd->gd_cpumask; /* doesn't change */ 1099 dd = &bsd4_pcpu[cpuid]; 1100 1101 /* 1102 * The scheduler thread does not need to hold the MP lock. Since we 1103 * are woken up only when no user processes are scheduled on a cpu, we 1104 * can run at an ultra low priority. 1105 */ 1106 rel_mplock(); 1107 lwkt_setpri_self(TDPRI_USER_SCHEDULER); 1108 1109 for (;;) { 1110 /* 1111 * We use the LWKT deschedule-interlock trick to avoid racing 1112 * bsd4_rdyprocmask. This means we cannot block through to the 1113 * manual lwkt_switch() call we make below. 1114 */ 1115 crit_enter_gd(gd); 1116 lwkt_deschedule_self(gd->gd_curthread); 1117 spin_lock_wr(&bsd4_spin); 1118 atomic_set_int(&bsd4_rdyprocmask, cpumask); 1119 1120 clear_user_resched(); /* This satisfied the reschedule request */ 1121 dd->rrcount = 0; /* Reset the round-robin counter */ 1122 1123 if ((bsd4_curprocmask & cpumask) == 0) { 1124 /* 1125 * No thread is currently scheduled. 1126 */ 1127 KKASSERT(dd->uschedcp == NULL); 1128 if ((nlp = chooseproc_locked(NULL)) != NULL) { 1129 atomic_set_int(&bsd4_curprocmask, cpumask); 1130 dd->upri = nlp->lwp_priority; 1131 dd->uschedcp = nlp; 1132 spin_unlock_wr(&bsd4_spin); 1133 lwkt_acquire(nlp->lwp_thread); 1134 lwkt_schedule(nlp->lwp_thread); 1135 } else { 1136 spin_unlock_wr(&bsd4_spin); 1137 } 1138 #if 0 1139 /* 1140 * Disabled for now, this can create an infinite loop. 1141 */ 1142 } else if (bsd4_runqcount) { 1143 /* 1144 * Someone scheduled us but raced. In order to not lose 1145 * track of the fact that there may be a LWP ready to go, 1146 * forward the request to another cpu if available. 1147 * 1148 * Rotate through cpus starting with cpuid + 1. Since cpuid 1149 * is already masked out by gd_other_cpus, just use ~cpumask. 1150 */ 1151 tmpmask = bsd4_rdyprocmask & mycpu->gd_other_cpus & 1152 ~bsd4_curprocmask; 1153 if (tmpmask) { 1154 if (tmpmask & ~(cpumask - 1)) 1155 tmpid = bsfl(tmpmask & ~(cpumask - 1)); 1156 else 1157 tmpid = bsfl(tmpmask); 1158 bsd4_scancpu = tmpid; 1159 atomic_clear_int(&bsd4_rdyprocmask, 1 << tmpid); 1160 spin_unlock_wr(&bsd4_spin); 1161 lwkt_schedule(&bsd4_pcpu[tmpid].helper_thread); 1162 } else { 1163 spin_unlock_wr(&bsd4_spin); 1164 } 1165 #endif 1166 } else { 1167 /* 1168 * The runq is empty. 1169 */ 1170 spin_unlock_wr(&bsd4_spin); 1171 } 1172 crit_exit_gd(gd); 1173 lwkt_switch(); 1174 } 1175 } 1176 1177 /* 1178 * Setup our scheduler helpers. Note that curprocmask bit 0 has already 1179 * been cleared by rqinit() and we should not mess with it further. 1180 */ 1181 static void 1182 sched_thread_cpu_init(void) 1183 { 1184 int i; 1185 1186 if (bootverbose) 1187 kprintf("start scheduler helpers on cpus:"); 1188 1189 for (i = 0; i < ncpus; ++i) { 1190 bsd4_pcpu_t dd = &bsd4_pcpu[i]; 1191 cpumask_t mask = 1 << i; 1192 1193 if ((mask & smp_active_mask) == 0) 1194 continue; 1195 1196 if (bootverbose) 1197 kprintf(" %d", i); 1198 1199 lwkt_create(sched_thread, NULL, NULL, &dd->helper_thread, 1200 TDF_STOPREQ, i, "usched %d", i); 1201 1202 /* 1203 * Allow user scheduling on the target cpu. cpu #0 has already 1204 * been enabled in rqinit(). 1205 */ 1206 if (i) 1207 atomic_clear_int(&bsd4_curprocmask, mask); 1208 atomic_set_int(&bsd4_rdyprocmask, mask); 1209 dd->upri = PRIBASE_NULL; 1210 } 1211 if (bootverbose) 1212 kprintf("\n"); 1213 } 1214 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND, 1215 sched_thread_cpu_init, NULL) 1216 1217 #endif 1218 1219