1 /* 2 * Copyright (c) 2012 The DragonFly Project. All rights reserved. 3 * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Matthew Dillon <dillon@backplane.com>, 7 * by Mihai Carabas <mihai.carabas@gmail.com> 8 * and many others. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in 18 * the documentation and/or other materials provided with the 19 * distribution. 20 * 3. Neither the name of The DragonFly Project nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific, prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 */ 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/lock.h> 41 #include <sys/queue.h> 42 #include <sys/proc.h> 43 #include <sys/rtprio.h> 44 #include <sys/uio.h> 45 #include <sys/sysctl.h> 46 #include <sys/resourcevar.h> 47 #include <sys/spinlock.h> 48 #include <sys/cpu_topology.h> 49 #include <sys/thread2.h> 50 #include <sys/spinlock2.h> 51 #include <sys/mplock2.h> 52 53 #include <sys/ktr.h> 54 55 #include <machine/cpu.h> 56 #include <machine/smp.h> 57 58 /* 59 * Priorities. Note that with 32 run queues per scheduler each queue 60 * represents four priority levels. 61 */ 62 63 int dfly_rebalanced; 64 65 #define MAXPRI 128 66 #define PRIMASK (MAXPRI - 1) 67 #define PRIBASE_REALTIME 0 68 #define PRIBASE_NORMAL MAXPRI 69 #define PRIBASE_IDLE (MAXPRI * 2) 70 #define PRIBASE_THREAD (MAXPRI * 3) 71 #define PRIBASE_NULL (MAXPRI * 4) 72 73 #define NQS 32 /* 32 run queues. */ 74 #define PPQ (MAXPRI / NQS) /* priorities per queue */ 75 #define PPQMASK (PPQ - 1) 76 77 /* 78 * NICEPPQ - number of nice units per priority queue 79 * ESTCPUPPQ - number of estcpu units per priority queue 80 * ESTCPUMAX - number of estcpu units 81 */ 82 #define NICEPPQ 2 83 #define ESTCPUPPQ 512 84 #define ESTCPUMAX (ESTCPUPPQ * NQS) 85 #define BATCHMAX (ESTCPUFREQ * 30) 86 #define PRIO_RANGE (PRIO_MAX - PRIO_MIN + 1) 87 88 #define ESTCPULIM(v) min((v), ESTCPUMAX) 89 90 TAILQ_HEAD(rq, lwp); 91 92 #define lwp_priority lwp_usdata.dfly.priority 93 #define lwp_forked lwp_usdata.dfly.forked 94 #define lwp_rqindex lwp_usdata.dfly.rqindex 95 #define lwp_estcpu lwp_usdata.dfly.estcpu 96 #define lwp_estfast lwp_usdata.dfly.estfast 97 #define lwp_uload lwp_usdata.dfly.uload 98 #define lwp_rqtype lwp_usdata.dfly.rqtype 99 #define lwp_qcpu lwp_usdata.dfly.qcpu 100 #define lwp_rrcount lwp_usdata.dfly.rrcount 101 102 struct usched_dfly_pcpu { 103 struct spinlock spin; 104 struct thread helper_thread; 105 short unusde01; 106 short upri; 107 int uload; 108 int ucount; 109 struct lwp *uschedcp; 110 struct rq queues[NQS]; 111 struct rq rtqueues[NQS]; 112 struct rq idqueues[NQS]; 113 u_int32_t queuebits; 114 u_int32_t rtqueuebits; 115 u_int32_t idqueuebits; 116 int runqcount; 117 int cpuid; 118 cpumask_t cpumask; 119 cpu_node_t *cpunode; 120 }; 121 122 typedef struct usched_dfly_pcpu *dfly_pcpu_t; 123 124 static void dfly_acquire_curproc(struct lwp *lp); 125 static void dfly_release_curproc(struct lwp *lp); 126 static void dfly_select_curproc(globaldata_t gd); 127 static void dfly_setrunqueue(struct lwp *lp); 128 static void dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp); 129 static void dfly_schedulerclock(struct lwp *lp, sysclock_t period, 130 sysclock_t cpstamp); 131 static void dfly_recalculate_estcpu(struct lwp *lp); 132 static void dfly_resetpriority(struct lwp *lp); 133 static void dfly_forking(struct lwp *plp, struct lwp *lp); 134 static void dfly_exiting(struct lwp *lp, struct proc *); 135 static void dfly_uload_update(struct lwp *lp); 136 static void dfly_yield(struct lwp *lp); 137 static void dfly_changeqcpu_locked(struct lwp *lp, 138 dfly_pcpu_t dd, dfly_pcpu_t rdd); 139 static dfly_pcpu_t dfly_choose_best_queue(struct lwp *lp); 140 static dfly_pcpu_t dfly_choose_worst_queue(dfly_pcpu_t dd); 141 static dfly_pcpu_t dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp); 142 static void dfly_need_user_resched_remote(void *dummy); 143 static struct lwp *dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd, 144 struct lwp *chklp, int worst); 145 static void dfly_remrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp); 146 static void dfly_setrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp); 147 148 struct usched usched_dfly = { 149 { NULL }, 150 "dfly", "Original DragonFly Scheduler", 151 NULL, /* default registration */ 152 NULL, /* default deregistration */ 153 dfly_acquire_curproc, 154 dfly_release_curproc, 155 dfly_setrunqueue, 156 dfly_schedulerclock, 157 dfly_recalculate_estcpu, 158 dfly_resetpriority, 159 dfly_forking, 160 dfly_exiting, 161 dfly_uload_update, 162 NULL, /* setcpumask not supported */ 163 dfly_yield 164 }; 165 166 /* 167 * We have NQS (32) run queues per scheduling class. For the normal 168 * class, there are 128 priorities scaled onto these 32 queues. New 169 * processes are added to the last entry in each queue, and processes 170 * are selected for running by taking them from the head and maintaining 171 * a simple FIFO arrangement. Realtime and Idle priority processes have 172 * and explicit 0-31 priority which maps directly onto their class queue 173 * index. When a queue has something in it, the corresponding bit is 174 * set in the queuebits variable, allowing a single read to determine 175 * the state of all 32 queues and then a ffs() to find the first busy 176 * queue. 177 */ 178 static cpumask_t dfly_curprocmask = -1; /* currently running a user process */ 179 static cpumask_t dfly_rdyprocmask; /* ready to accept a user process */ 180 static volatile int dfly_scancpu; 181 static volatile int dfly_ucount; /* total running on whole system */ 182 static struct usched_dfly_pcpu dfly_pcpu[MAXCPU]; 183 static struct sysctl_ctx_list usched_dfly_sysctl_ctx; 184 static struct sysctl_oid *usched_dfly_sysctl_tree; 185 186 /* Debug info exposed through debug.* sysctl */ 187 188 static int usched_dfly_debug = -1; 189 SYSCTL_INT(_debug, OID_AUTO, dfly_scdebug, CTLFLAG_RW, 190 &usched_dfly_debug, 0, 191 "Print debug information for this pid"); 192 193 static int usched_dfly_pid_debug = -1; 194 SYSCTL_INT(_debug, OID_AUTO, dfly_pid_debug, CTLFLAG_RW, 195 &usched_dfly_pid_debug, 0, 196 "Print KTR debug information for this pid"); 197 198 static int usched_dfly_chooser = 0; 199 SYSCTL_INT(_debug, OID_AUTO, dfly_chooser, CTLFLAG_RW, 200 &usched_dfly_chooser, 0, 201 "Print KTR debug information for this pid"); 202 203 /* 204 * Tunning usched_dfly - configurable through kern.usched_dfly. 205 * 206 * weight1 - Tries to keep threads on their current cpu. If you 207 * make this value too large the scheduler will not be 208 * able to load-balance large loads. 209 * 210 * weight2 - If non-zero, detects thread pairs undergoing synchronous 211 * communications and tries to move them closer together. 212 * Behavior is adjusted by bit 4 of features (0x10). 213 * 214 * WARNING! Weight2 is a ridiculously sensitive parameter, 215 * a small value is recommended. 216 * 217 * weight3 - Weighting based on the number of recently runnable threads 218 * on the userland scheduling queue (ignoring their loads). 219 * A nominal value here prevents high-priority (low-load) 220 * threads from accumulating on one cpu core when other 221 * cores are available. 222 * 223 * This value should be left fairly small relative to weight1 224 * and weight4. 225 * 226 * weight4 - Weighting based on other cpu queues being available 227 * or running processes with higher lwp_priority's. 228 * 229 * This allows a thread to migrate to another nearby cpu if it 230 * is unable to run on the current cpu based on the other cpu 231 * being idle or running a lower priority (higher lwp_priority) 232 * thread. This value should be large enough to override weight1 233 * 234 * features - These flags can be set or cleared to enable or disable various 235 * features. 236 * 237 * 0x01 Enable idle-cpu pulling (default) 238 * 0x02 Enable proactive pushing (default) 239 * 0x04 Enable rebalancing rover (default) 240 * 0x08 Enable more proactive pushing (default) 241 * 0x10 (flip weight2 limit on same cpu) (default) 242 * 0x20 choose best cpu for forked process 243 * 0x40 choose current cpu for forked process 244 * 0x80 choose random cpu for forked process (default) 245 */ 246 static int usched_dfly_smt = 0; 247 static int usched_dfly_cache_coherent = 0; 248 static int usched_dfly_weight1 = 200; /* keep thread on current cpu */ 249 static int usched_dfly_weight2 = 180; /* synchronous peer's current cpu */ 250 static int usched_dfly_weight3 = 40; /* number of threads on queue */ 251 static int usched_dfly_weight4 = 160; /* availability of idle cores */ 252 static int usched_dfly_features = 0x8F; /* allow pulls */ 253 static int usched_dfly_fast_resched = 0;/* delta priority / resched */ 254 static int usched_dfly_swmask = ~PPQMASK; /* allow pulls */ 255 static int usched_dfly_rrinterval = (ESTCPUFREQ + 9) / 10; 256 static int usched_dfly_decay = 8; 257 258 /* KTR debug printings */ 259 260 KTR_INFO_MASTER(usched); 261 262 #if !defined(KTR_USCHED_DFLY) 263 #define KTR_USCHED_DFLY KTR_ALL 264 #endif 265 266 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc, 0, 267 "USCHED_DFLY(chooseproc: pid %d, old_cpuid %d, curr_cpuid %d)", 268 pid_t pid, int old_cpuid, int curr); 269 270 /* 271 * This function is called when the kernel intends to return to userland. 272 * It is responsible for making the thread the current designated userland 273 * thread for this cpu, blocking if necessary. 274 * 275 * The kernel will not depress our LWKT priority until after we return, 276 * in case we have to shove over to another cpu. 277 * 278 * We must determine our thread's disposition before we switch away. This 279 * is very sensitive code. 280 * 281 * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE 282 * TO ANOTHER CPU! Because most of the kernel assumes that no migration will 283 * occur, this function is called only under very controlled circumstances. 284 */ 285 static void 286 dfly_acquire_curproc(struct lwp *lp) 287 { 288 globaldata_t gd; 289 dfly_pcpu_t dd; 290 dfly_pcpu_t rdd; 291 thread_t td; 292 int force_resched; 293 294 /* 295 * Make sure we aren't sitting on a tsleep queue. 296 */ 297 td = lp->lwp_thread; 298 crit_enter_quick(td); 299 if (td->td_flags & TDF_TSLEEPQ) 300 tsleep_remove(td); 301 dfly_recalculate_estcpu(lp); 302 303 gd = mycpu; 304 dd = &dfly_pcpu[gd->gd_cpuid]; 305 306 /* 307 * Process any pending interrupts/ipi's, then handle reschedule 308 * requests. dfly_release_curproc() will try to assign a new 309 * uschedcp that isn't us and otherwise NULL it out. 310 */ 311 force_resched = 0; 312 if ((td->td_mpflags & TDF_MP_BATCH_DEMARC) && 313 lp->lwp_rrcount >= usched_dfly_rrinterval / 2) { 314 force_resched = 1; 315 } 316 317 if (user_resched_wanted()) { 318 if (dd->uschedcp == lp) 319 force_resched = 1; 320 clear_user_resched(); 321 dfly_release_curproc(lp); 322 } 323 324 /* 325 * Loop until we are the current user thread. 326 * 327 * NOTE: dd spinlock not held at top of loop. 328 */ 329 if (dd->uschedcp == lp) 330 lwkt_yield_quick(); 331 332 while (dd->uschedcp != lp) { 333 lwkt_yield_quick(); 334 335 spin_lock(&dd->spin); 336 337 /* 338 * We are not or are no longer the current lwp and a forced 339 * reschedule was requested. Figure out the best cpu to 340 * run on (our current cpu will be given significant weight). 341 * 342 * (if a reschedule was not requested we want to move this 343 * step after the uschedcp tests). 344 */ 345 if (force_resched && 346 (usched_dfly_features & 0x08) && 347 (rdd = dfly_choose_best_queue(lp)) != dd) { 348 dfly_changeqcpu_locked(lp, dd, rdd); 349 spin_unlock(&dd->spin); 350 lwkt_deschedule(lp->lwp_thread); 351 dfly_setrunqueue_dd(rdd, lp); 352 lwkt_switch(); 353 gd = mycpu; 354 dd = &dfly_pcpu[gd->gd_cpuid]; 355 continue; 356 } 357 358 /* 359 * Either no reschedule was requested or the best queue was 360 * dd, and no current process has been selected. We can 361 * trivially become the current lwp on the current cpu. 362 */ 363 if (dd->uschedcp == NULL) { 364 atomic_set_cpumask(&dfly_curprocmask, gd->gd_cpumask); 365 dd->uschedcp = lp; 366 dd->upri = lp->lwp_priority; 367 KKASSERT(lp->lwp_qcpu == dd->cpuid); 368 spin_unlock(&dd->spin); 369 break; 370 } 371 372 /* 373 * Can we steal the current designated user thread? 374 * 375 * If we do the other thread will stall when it tries to 376 * return to userland, possibly rescheduling elsewhere. 377 * 378 * It is important to do a masked test to avoid the edge 379 * case where two near-equal-priority threads are constantly 380 * interrupting each other. 381 * 382 * In the exact match case another thread has already gained 383 * uschedcp and lowered its priority, if we steal it the 384 * other thread will stay stuck on the LWKT runq and not 385 * push to another cpu. So don't steal on equal-priority even 386 * though it might appear to be more beneficial due to not 387 * having to switch back to the other thread's context. 388 * 389 * usched_dfly_fast_resched requires that two threads be 390 * significantly far apart in priority in order to interrupt. 391 * 392 * If better but not sufficiently far apart, the current 393 * uschedcp will be interrupted at the next scheduler clock. 394 */ 395 if (dd->uschedcp && 396 (dd->upri & ~PPQMASK) > 397 (lp->lwp_priority & ~PPQMASK) + usched_dfly_fast_resched) { 398 dd->uschedcp = lp; 399 dd->upri = lp->lwp_priority; 400 KKASSERT(lp->lwp_qcpu == dd->cpuid); 401 spin_unlock(&dd->spin); 402 break; 403 } 404 /* 405 * We are not the current lwp, figure out the best cpu 406 * to run on (our current cpu will be given significant 407 * weight). Loop on cpu change. 408 */ 409 if ((usched_dfly_features & 0x02) && 410 force_resched == 0 && 411 (rdd = dfly_choose_best_queue(lp)) != dd) { 412 dfly_changeqcpu_locked(lp, dd, rdd); 413 spin_unlock(&dd->spin); 414 lwkt_deschedule(lp->lwp_thread); 415 dfly_setrunqueue_dd(rdd, lp); 416 lwkt_switch(); 417 gd = mycpu; 418 dd = &dfly_pcpu[gd->gd_cpuid]; 419 continue; 420 } 421 422 /* 423 * We cannot become the current lwp, place the lp on the 424 * run-queue of this or another cpu and deschedule ourselves. 425 * 426 * When we are reactivated we will have another chance. 427 * 428 * Reload after a switch or setrunqueue/switch possibly 429 * moved us to another cpu. 430 */ 431 spin_unlock(&dd->spin); 432 lwkt_deschedule(lp->lwp_thread); 433 dfly_setrunqueue_dd(dd, lp); 434 lwkt_switch(); 435 gd = mycpu; 436 dd = &dfly_pcpu[gd->gd_cpuid]; 437 } 438 439 /* 440 * Make sure upri is synchronized, then yield to LWKT threads as 441 * needed before returning. This could result in another reschedule. 442 * XXX 443 */ 444 crit_exit_quick(td); 445 446 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0); 447 } 448 449 /* 450 * DFLY_RELEASE_CURPROC 451 * 452 * This routine detaches the current thread from the userland scheduler, 453 * usually because the thread needs to run or block in the kernel (at 454 * kernel priority) for a while. 455 * 456 * This routine is also responsible for selecting a new thread to 457 * make the current thread. 458 * 459 * NOTE: This implementation differs from the dummy example in that 460 * dfly_select_curproc() is able to select the current process, whereas 461 * dummy_select_curproc() is not able to select the current process. 462 * This means we have to NULL out uschedcp. 463 * 464 * Additionally, note that we may already be on a run queue if releasing 465 * via the lwkt_switch() in dfly_setrunqueue(). 466 */ 467 static void 468 dfly_release_curproc(struct lwp *lp) 469 { 470 globaldata_t gd = mycpu; 471 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid]; 472 473 /* 474 * Make sure td_wakefromcpu is defaulted. This will be overwritten 475 * by wakeup(). 476 */ 477 if (dd->uschedcp == lp) { 478 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0); 479 spin_lock(&dd->spin); 480 if (dd->uschedcp == lp) { 481 dd->uschedcp = NULL; /* don't let lp be selected */ 482 dd->upri = PRIBASE_NULL; 483 atomic_clear_cpumask(&dfly_curprocmask, gd->gd_cpumask); 484 spin_unlock(&dd->spin); 485 dfly_select_curproc(gd); 486 } else { 487 spin_unlock(&dd->spin); 488 } 489 } 490 } 491 492 /* 493 * DFLY_SELECT_CURPROC 494 * 495 * Select a new current process for this cpu and clear any pending user 496 * reschedule request. The cpu currently has no current process. 497 * 498 * This routine is also responsible for equal-priority round-robining, 499 * typically triggered from dfly_schedulerclock(). In our dummy example 500 * all the 'user' threads are LWKT scheduled all at once and we just 501 * call lwkt_switch(). 502 * 503 * The calling process is not on the queue and cannot be selected. 504 */ 505 static 506 void 507 dfly_select_curproc(globaldata_t gd) 508 { 509 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid]; 510 struct lwp *nlp; 511 int cpuid = gd->gd_cpuid; 512 513 crit_enter_gd(gd); 514 515 spin_lock(&dd->spin); 516 nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0); 517 518 if (nlp) { 519 atomic_set_cpumask(&dfly_curprocmask, CPUMASK(cpuid)); 520 dd->upri = nlp->lwp_priority; 521 dd->uschedcp = nlp; 522 #if 0 523 dd->rrcount = 0; /* reset round robin */ 524 #endif 525 spin_unlock(&dd->spin); 526 lwkt_acquire(nlp->lwp_thread); 527 lwkt_schedule(nlp->lwp_thread); 528 } else { 529 spin_unlock(&dd->spin); 530 } 531 crit_exit_gd(gd); 532 } 533 534 /* 535 * Place the specified lwp on the user scheduler's run queue. This routine 536 * must be called with the thread descheduled. The lwp must be runnable. 537 * It must not be possible for anyone else to explicitly schedule this thread. 538 * 539 * The thread may be the current thread as a special case. 540 */ 541 static void 542 dfly_setrunqueue(struct lwp *lp) 543 { 544 dfly_pcpu_t dd; 545 dfly_pcpu_t rdd; 546 547 /* 548 * First validate the process LWKT state. 549 */ 550 KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN")); 551 KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0, 552 ("lwp %d/%d already on runq! flag %08x/%08x", lp->lwp_proc->p_pid, 553 lp->lwp_tid, lp->lwp_proc->p_flags, lp->lwp_flags)); 554 KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0); 555 556 /* 557 * NOTE: dd/rdd do not necessarily represent the current cpu. 558 * Instead they may represent the cpu the thread was last 559 * scheduled on or inherited by its parent. 560 */ 561 dd = &dfly_pcpu[lp->lwp_qcpu]; 562 rdd = dd; 563 564 /* 565 * This process is not supposed to be scheduled anywhere or assigned 566 * as the current process anywhere. Assert the condition. 567 */ 568 KKASSERT(rdd->uschedcp != lp); 569 570 /* 571 * Ok, we have to setrunqueue some target cpu and request a reschedule 572 * if necessary. 573 * 574 * We have to choose the best target cpu. It might not be the current 575 * target even if the current cpu has no running user thread (for 576 * example, because the current cpu might be a hyperthread and its 577 * sibling has a thread assigned). 578 * 579 * If we just forked it is most optimal to run the child on the same 580 * cpu just in case the parent decides to wait for it (thus getting 581 * off that cpu). As long as there is nothing else runnable on the 582 * cpu, that is. If we did this unconditionally a parent forking 583 * multiple children before waiting (e.g. make -j N) leaves other 584 * cpus idle that could be working. 585 */ 586 if (lp->lwp_forked) { 587 lp->lwp_forked = 0; 588 if (usched_dfly_features & 0x20) 589 rdd = dfly_choose_best_queue(lp); 590 else if (usched_dfly_features & 0x40) 591 rdd = &dfly_pcpu[lp->lwp_qcpu]; 592 else if (usched_dfly_features & 0x80) 593 rdd = dfly_choose_queue_simple(rdd, lp); 594 else if (dfly_pcpu[lp->lwp_qcpu].runqcount) 595 rdd = dfly_choose_best_queue(lp); 596 else 597 rdd = &dfly_pcpu[lp->lwp_qcpu]; 598 } else { 599 rdd = dfly_choose_best_queue(lp); 600 /* rdd = &dfly_pcpu[lp->lwp_qcpu]; */ 601 } 602 if (lp->lwp_qcpu != rdd->cpuid) { 603 spin_lock(&dd->spin); 604 dfly_changeqcpu_locked(lp, dd, rdd); 605 spin_unlock(&dd->spin); 606 } 607 dfly_setrunqueue_dd(rdd, lp); 608 } 609 610 /* 611 * Change qcpu to rdd->cpuid. The dd the lp is CURRENTLY on must be 612 * spin-locked on-call. rdd does not have to be. 613 */ 614 static void 615 dfly_changeqcpu_locked(struct lwp *lp, dfly_pcpu_t dd, dfly_pcpu_t rdd) 616 { 617 if (lp->lwp_qcpu != rdd->cpuid) { 618 if (lp->lwp_mpflags & LWP_MP_ULOAD) { 619 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD); 620 atomic_add_int(&dd->uload, -lp->lwp_uload); 621 atomic_add_int(&dd->ucount, -1); 622 atomic_add_int(&dfly_ucount, -1); 623 } 624 lp->lwp_qcpu = rdd->cpuid; 625 } 626 } 627 628 /* 629 * Place lp on rdd's runqueue. Nothing is locked on call. This function 630 * also performs all necessary ancillary notification actions. 631 */ 632 static void 633 dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp) 634 { 635 globaldata_t rgd; 636 637 /* 638 * We might be moving the lp to another cpu's run queue, and once 639 * on the runqueue (even if it is our cpu's), another cpu can rip 640 * it away from us. 641 * 642 * TDF_MIGRATING might already be set if this is part of a 643 * remrunqueue+setrunqueue sequence. 644 */ 645 if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0) 646 lwkt_giveaway(lp->lwp_thread); 647 648 rgd = globaldata_find(rdd->cpuid); 649 650 /* 651 * We lose control of the lp the moment we release the spinlock 652 * after having placed it on the queue. i.e. another cpu could pick 653 * it up, or it could exit, or its priority could be further 654 * adjusted, or something like that. 655 * 656 * WARNING! rdd can point to a foreign cpu! 657 */ 658 spin_lock(&rdd->spin); 659 dfly_setrunqueue_locked(rdd, lp); 660 661 /* 662 * Potentially interrupt the currently-running thread 663 */ 664 if ((rdd->upri & ~PPQMASK) <= (lp->lwp_priority & ~PPQMASK)) { 665 /* 666 * Currently running thread is better or same, do not 667 * interrupt. 668 */ 669 spin_unlock(&rdd->spin); 670 } else if ((rdd->upri & ~PPQMASK) <= (lp->lwp_priority & ~PPQMASK) + 671 usched_dfly_fast_resched) { 672 /* 673 * Currently running thread is not better, but not so bad 674 * that we need to interrupt it. Let it run for one more 675 * scheduler tick. 676 */ 677 if (rdd->uschedcp && 678 rdd->uschedcp->lwp_rrcount < usched_dfly_rrinterval) { 679 rdd->uschedcp->lwp_rrcount = usched_dfly_rrinterval - 1; 680 } 681 spin_unlock(&rdd->spin); 682 } else if (rgd == mycpu) { 683 /* 684 * We should interrupt the currently running thread, which 685 * is on the current cpu. 686 */ 687 spin_unlock(&rdd->spin); 688 if (rdd->uschedcp == NULL) { 689 wakeup_mycpu(&rdd->helper_thread); /* XXX */ 690 need_user_resched(); 691 } else { 692 need_user_resched(); 693 } 694 } else { 695 /* 696 * We should interrupt the currently running thread, which 697 * is on a different cpu. 698 */ 699 spin_unlock(&rdd->spin); 700 lwkt_send_ipiq(rgd, dfly_need_user_resched_remote, NULL); 701 } 702 } 703 704 /* 705 * This routine is called from a systimer IPI. It MUST be MP-safe and 706 * the BGL IS NOT HELD ON ENTRY. This routine is called at ESTCPUFREQ on 707 * each cpu. 708 */ 709 static 710 void 711 dfly_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp) 712 { 713 globaldata_t gd = mycpu; 714 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid]; 715 716 /* 717 * Spinlocks also hold a critical section so there should not be 718 * any active. 719 */ 720 KKASSERT(gd->gd_spinlocks == 0); 721 722 if (lp == NULL) 723 return; 724 725 /* 726 * Do we need to round-robin? We round-robin 10 times a second. 727 * This should only occur for cpu-bound batch processes. 728 */ 729 if (++lp->lwp_rrcount >= usched_dfly_rrinterval) { 730 lp->lwp_thread->td_wakefromcpu = -1; 731 need_user_resched(); 732 } 733 734 /* 735 * Adjust estcpu upward using a real time equivalent calculation, 736 * and recalculate lp's priority. 737 */ 738 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUMAX / ESTCPUFREQ + 1); 739 dfly_resetpriority(lp); 740 741 /* 742 * Rebalance two cpus every 8 ticks, pulling the worst thread 743 * from the worst cpu's queue into a rotating cpu number. 744 * 745 * This mechanic is needed because the push algorithms can 746 * steady-state in an non-optimal configuration. We need to mix it 747 * up a little, even if it means breaking up a paired thread, so 748 * the push algorithms can rebalance the degenerate conditions. 749 * This portion of the algorithm exists to ensure stability at the 750 * selected weightings. 751 * 752 * Because we might be breaking up optimal conditions we do not want 753 * to execute this too quickly, hence we only rebalance approximately 754 * ~7-8 times per second. The push's, on the otherhand, are capable 755 * moving threads to other cpus at a much higher rate. 756 * 757 * We choose the most heavily loaded thread from the worst queue 758 * in order to ensure that multiple heavy-weight threads on the same 759 * queue get broken up, and also because these threads are the most 760 * likely to be able to remain in place. Hopefully then any pairings, 761 * if applicable, migrate to where these threads are. 762 */ 763 if ((usched_dfly_features & 0x04) && 764 ((u_int)sched_ticks & 7) == 0 && 765 (u_int)sched_ticks / 8 % ncpus == gd->gd_cpuid) { 766 /* 767 * Our cpu is up. 768 */ 769 struct lwp *nlp; 770 dfly_pcpu_t rdd; 771 772 rdd = dfly_choose_worst_queue(dd); 773 if (rdd) { 774 spin_lock(&dd->spin); 775 if (spin_trylock(&rdd->spin)) { 776 nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1); 777 spin_unlock(&rdd->spin); 778 if (nlp == NULL) 779 spin_unlock(&dd->spin); 780 } else { 781 spin_unlock(&dd->spin); 782 nlp = NULL; 783 } 784 } else { 785 nlp = NULL; 786 } 787 /* dd->spin held if nlp != NULL */ 788 789 /* 790 * Either schedule it or add it to our queue. 791 */ 792 if (nlp && 793 (nlp->lwp_priority & ~PPQMASK) < (dd->upri & ~PPQMASK)) { 794 atomic_set_cpumask(&dfly_curprocmask, dd->cpumask); 795 dd->upri = nlp->lwp_priority; 796 dd->uschedcp = nlp; 797 #if 0 798 dd->rrcount = 0; /* reset round robin */ 799 #endif 800 spin_unlock(&dd->spin); 801 lwkt_acquire(nlp->lwp_thread); 802 lwkt_schedule(nlp->lwp_thread); 803 } else if (nlp) { 804 dfly_setrunqueue_locked(dd, nlp); 805 spin_unlock(&dd->spin); 806 } 807 } 808 } 809 810 /* 811 * Called from acquire and from kern_synch's one-second timer (one of the 812 * callout helper threads) with a critical section held. 813 * 814 * Adjust p_estcpu based on our single-cpu load, p_nice, and compensate for 815 * overall system load. 816 * 817 * Note that no recalculation occurs for a process which sleeps and wakes 818 * up in the same tick. That is, a system doing thousands of context 819 * switches per second will still only do serious estcpu calculations 820 * ESTCPUFREQ times per second. 821 */ 822 static 823 void 824 dfly_recalculate_estcpu(struct lwp *lp) 825 { 826 globaldata_t gd = mycpu; 827 sysclock_t cpbase; 828 sysclock_t ttlticks; 829 int estcpu; 830 int decay_factor; 831 int ucount; 832 833 /* 834 * We have to subtract periodic to get the last schedclock 835 * timeout time, otherwise we would get the upcoming timeout. 836 * Keep in mind that a process can migrate between cpus and 837 * while the scheduler clock should be very close, boundary 838 * conditions could lead to a small negative delta. 839 */ 840 cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic; 841 842 if (lp->lwp_slptime > 1) { 843 /* 844 * Too much time has passed, do a coarse correction. 845 */ 846 lp->lwp_estcpu = lp->lwp_estcpu >> 1; 847 dfly_resetpriority(lp); 848 lp->lwp_cpbase = cpbase; 849 lp->lwp_cpticks = 0; 850 lp->lwp_estfast = 0; 851 } else if (lp->lwp_cpbase != cpbase) { 852 /* 853 * Adjust estcpu if we are in a different tick. Don't waste 854 * time if we are in the same tick. 855 * 856 * First calculate the number of ticks in the measurement 857 * interval. The ttlticks calculation can wind up 0 due to 858 * a bug in the handling of lwp_slptime (as yet not found), 859 * so make sure we do not get a divide by 0 panic. 860 */ 861 ttlticks = (cpbase - lp->lwp_cpbase) / 862 gd->gd_schedclock.periodic; 863 if ((ssysclock_t)ttlticks < 0) { 864 ttlticks = 0; 865 lp->lwp_cpbase = cpbase; 866 } 867 if (ttlticks == 0) 868 return; 869 updatepcpu(lp, lp->lwp_cpticks, ttlticks); 870 871 /* 872 * Calculate the percentage of one cpu being used then 873 * compensate for any system load in excess of ncpus. 874 * 875 * For example, if we have 8 cores and 16 running cpu-bound 876 * processes then all things being equal each process will 877 * get 50% of one cpu. We need to pump this value back 878 * up to 100% so the estcpu calculation properly adjusts 879 * the process's dynamic priority. 880 * 881 * estcpu is scaled by ESTCPUMAX, pctcpu is scaled by FSCALE. 882 */ 883 estcpu = (lp->lwp_pctcpu * ESTCPUMAX) >> FSHIFT; 884 ucount = dfly_ucount; 885 if (ucount > ncpus) { 886 estcpu += estcpu * (ucount - ncpus) / ncpus; 887 } 888 889 if (usched_dfly_debug == lp->lwp_proc->p_pid) { 890 kprintf("pid %d lwp %p estcpu %3d %3d cp %d/%d", 891 lp->lwp_proc->p_pid, lp, 892 estcpu, lp->lwp_estcpu, 893 lp->lwp_cpticks, ttlticks); 894 } 895 896 /* 897 * Adjust lp->lwp_esetcpu. The decay factor determines how 898 * quickly lwp_estcpu collapses to its realtime calculation. 899 * A slower collapse gives us a more accurate number over 900 * the long term but can create problems with bursty threads 901 * or threads which become cpu hogs. 902 * 903 * To solve this problem, newly started lwps and lwps which 904 * are restarting after having been asleep for a while are 905 * given a much, much faster decay in order to quickly 906 * detect whether they become cpu-bound. 907 * 908 * NOTE: p_nice is accounted for in dfly_resetpriority(), 909 * and not here, but we must still ensure that a 910 * cpu-bound nice -20 process does not completely 911 * override a cpu-bound nice +20 process. 912 * 913 * NOTE: We must use ESTCPULIM() here to deal with any 914 * overshoot. 915 */ 916 decay_factor = usched_dfly_decay; 917 if (decay_factor < 1) 918 decay_factor = 1; 919 if (decay_factor > 1024) 920 decay_factor = 1024; 921 922 if (lp->lwp_estfast < usched_dfly_decay) { 923 ++lp->lwp_estfast; 924 lp->lwp_estcpu = ESTCPULIM( 925 (lp->lwp_estcpu * lp->lwp_estfast + estcpu) / 926 (lp->lwp_estfast + 1)); 927 } else { 928 lp->lwp_estcpu = ESTCPULIM( 929 (lp->lwp_estcpu * decay_factor + estcpu) / 930 (decay_factor + 1)); 931 } 932 933 if (usched_dfly_debug == lp->lwp_proc->p_pid) 934 kprintf(" finalestcpu %d\n", lp->lwp_estcpu); 935 dfly_resetpriority(lp); 936 lp->lwp_cpbase += ttlticks * gd->gd_schedclock.periodic; 937 lp->lwp_cpticks = 0; 938 } 939 } 940 941 /* 942 * Compute the priority of a process when running in user mode. 943 * Arrange to reschedule if the resulting priority is better 944 * than that of the current process. 945 * 946 * This routine may be called with any process. 947 * 948 * This routine is called by fork1() for initial setup with the process 949 * of the run queue, and also may be called normally with the process on or 950 * off the run queue. 951 */ 952 static void 953 dfly_resetpriority(struct lwp *lp) 954 { 955 dfly_pcpu_t rdd; 956 int newpriority; 957 u_short newrqtype; 958 int rcpu; 959 int checkpri; 960 int estcpu; 961 int delta_uload; 962 963 crit_enter(); 964 965 /* 966 * Lock the scheduler (lp) belongs to. This can be on a different 967 * cpu. Handle races. This loop breaks out with the appropriate 968 * rdd locked. 969 */ 970 for (;;) { 971 rcpu = lp->lwp_qcpu; 972 cpu_ccfence(); 973 rdd = &dfly_pcpu[rcpu]; 974 spin_lock(&rdd->spin); 975 if (rcpu == lp->lwp_qcpu) 976 break; 977 spin_unlock(&rdd->spin); 978 } 979 980 /* 981 * Calculate the new priority and queue type 982 */ 983 newrqtype = lp->lwp_rtprio.type; 984 985 switch(newrqtype) { 986 case RTP_PRIO_REALTIME: 987 case RTP_PRIO_FIFO: 988 newpriority = PRIBASE_REALTIME + 989 (lp->lwp_rtprio.prio & PRIMASK); 990 break; 991 case RTP_PRIO_NORMAL: 992 /* 993 * 994 */ 995 estcpu = lp->lwp_estcpu; 996 997 /* 998 * p_nice piece Adds (0-40) * 2 0-80 999 * estcpu Adds 16384 * 4 / 512 0-128 1000 */ 1001 newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) * PPQ / NICEPPQ; 1002 newpriority += estcpu * PPQ / ESTCPUPPQ; 1003 newpriority = newpriority * MAXPRI / (PRIO_RANGE * PPQ / 1004 NICEPPQ + ESTCPUMAX * PPQ / ESTCPUPPQ); 1005 newpriority = PRIBASE_NORMAL + (newpriority & PRIMASK); 1006 break; 1007 case RTP_PRIO_IDLE: 1008 newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK); 1009 break; 1010 case RTP_PRIO_THREAD: 1011 newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK); 1012 break; 1013 default: 1014 panic("Bad RTP_PRIO %d", newrqtype); 1015 /* NOT REACHED */ 1016 } 1017 1018 /* 1019 * The LWKT scheduler doesn't dive usched structures, give it a hint 1020 * on the relative priority of user threads running in the kernel. 1021 * The LWKT scheduler will always ensure that a user thread running 1022 * in the kernel will get cpu some time, regardless of its upri, 1023 * but can decide not to instantly switch from one kernel or user 1024 * mode user thread to a kernel-mode user thread when it has a less 1025 * desireable user priority. 1026 * 1027 * td_upri has normal sense (higher values are more desireable), so 1028 * negate it. 1029 */ 1030 lp->lwp_thread->td_upri = -(newpriority & usched_dfly_swmask); 1031 1032 /* 1033 * The newpriority incorporates the queue type so do a simple masked 1034 * check to determine if the process has moved to another queue. If 1035 * it has, and it is currently on a run queue, then move it. 1036 * 1037 * Since uload is ~PPQMASK masked, no modifications are necessary if 1038 * we end up in the same run queue. 1039 */ 1040 if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) { 1041 if (lp->lwp_mpflags & LWP_MP_ONRUNQ) { 1042 dfly_remrunqueue_locked(rdd, lp); 1043 lp->lwp_priority = newpriority; 1044 lp->lwp_rqtype = newrqtype; 1045 lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ; 1046 dfly_setrunqueue_locked(rdd, lp); 1047 checkpri = 1; 1048 } else { 1049 lp->lwp_priority = newpriority; 1050 lp->lwp_rqtype = newrqtype; 1051 lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ; 1052 checkpri = 0; 1053 } 1054 } else { 1055 /* 1056 * In the same PPQ, uload cannot change. 1057 */ 1058 lp->lwp_priority = newpriority; 1059 checkpri = 1; 1060 rcpu = -1; 1061 } 1062 1063 /* 1064 * Adjust effective load. 1065 * 1066 * Calculate load then scale up or down geometrically based on p_nice. 1067 * Processes niced up (positive) are less important, and processes 1068 * niced downard (negative) are more important. The higher the uload, 1069 * the more important the thread. 1070 */ 1071 /* 0-511, 0-100% cpu */ 1072 delta_uload = lp->lwp_estcpu / NQS; 1073 delta_uload -= delta_uload * lp->lwp_proc->p_nice / (PRIO_MAX + 1); 1074 1075 1076 delta_uload -= lp->lwp_uload; 1077 lp->lwp_uload += delta_uload; 1078 if (lp->lwp_mpflags & LWP_MP_ULOAD) 1079 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload, delta_uload); 1080 1081 /* 1082 * Determine if we need to reschedule the target cpu. This only 1083 * occurs if the LWP is already on a scheduler queue, which means 1084 * that idle cpu notification has already occured. At most we 1085 * need only issue a need_user_resched() on the appropriate cpu. 1086 * 1087 * The LWP may be owned by a CPU different from the current one, 1088 * in which case dd->uschedcp may be modified without an MP lock 1089 * or a spinlock held. The worst that happens is that the code 1090 * below causes a spurious need_user_resched() on the target CPU 1091 * and dd->pri to be wrong for a short period of time, both of 1092 * which are harmless. 1093 * 1094 * If checkpri is 0 we are adjusting the priority of the current 1095 * process, possibly higher (less desireable), so ignore the upri 1096 * check which will fail in that case. 1097 */ 1098 if (rcpu >= 0) { 1099 if ((dfly_rdyprocmask & CPUMASK(rcpu)) && 1100 (checkpri == 0 || 1101 (rdd->upri & ~PRIMASK) > 1102 (lp->lwp_priority & ~PRIMASK))) { 1103 if (rcpu == mycpu->gd_cpuid) { 1104 spin_unlock(&rdd->spin); 1105 need_user_resched(); 1106 } else { 1107 spin_unlock(&rdd->spin); 1108 lwkt_send_ipiq(globaldata_find(rcpu), 1109 dfly_need_user_resched_remote, 1110 NULL); 1111 } 1112 } else { 1113 spin_unlock(&rdd->spin); 1114 } 1115 } else { 1116 spin_unlock(&rdd->spin); 1117 } 1118 crit_exit(); 1119 } 1120 1121 static 1122 void 1123 dfly_yield(struct lwp *lp) 1124 { 1125 #if 0 1126 /* FUTURE (or something similar) */ 1127 switch(lp->lwp_rqtype) { 1128 case RTP_PRIO_NORMAL: 1129 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUINCR); 1130 break; 1131 default: 1132 break; 1133 } 1134 #endif 1135 need_user_resched(); 1136 } 1137 1138 /* 1139 * Called from fork1() when a new child process is being created. 1140 * 1141 * Give the child process an initial estcpu that is more batch then 1142 * its parent and dock the parent for the fork (but do not 1143 * reschedule the parent). 1144 * 1145 * fast 1146 * 1147 * XXX lwp should be "spawning" instead of "forking" 1148 */ 1149 static void 1150 dfly_forking(struct lwp *plp, struct lwp *lp) 1151 { 1152 /* 1153 * Put the child 4 queue slots (out of 32) higher than the parent 1154 * (less desireable than the parent). 1155 */ 1156 lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ * 4); 1157 lp->lwp_forked = 1; 1158 lp->lwp_estfast = 0; 1159 1160 /* 1161 * Dock the parent a cost for the fork, protecting us from fork 1162 * bombs. If the parent is forking quickly make the child more 1163 * batchy. 1164 */ 1165 plp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ / 16); 1166 } 1167 1168 /* 1169 * Called when a lwp is being removed from this scheduler, typically 1170 * during lwp_exit(). We have to clean out any ULOAD accounting before 1171 * we can let the lp go. The dd->spin lock is not needed for uload 1172 * updates. 1173 * 1174 * Scheduler dequeueing has already occurred, no further action in that 1175 * regard is needed. 1176 */ 1177 static void 1178 dfly_exiting(struct lwp *lp, struct proc *child_proc) 1179 { 1180 dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu]; 1181 1182 if (lp->lwp_mpflags & LWP_MP_ULOAD) { 1183 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD); 1184 atomic_add_int(&dd->uload, -lp->lwp_uload); 1185 atomic_add_int(&dd->ucount, -1); 1186 atomic_add_int(&dfly_ucount, -1); 1187 } 1188 } 1189 1190 /* 1191 * This function cannot block in any way, but spinlocks are ok. 1192 * 1193 * Update the uload based on the state of the thread (whether it is going 1194 * to sleep or running again). The uload is meant to be a longer-term 1195 * load and not an instantanious load. 1196 */ 1197 static void 1198 dfly_uload_update(struct lwp *lp) 1199 { 1200 dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu]; 1201 1202 if (lp->lwp_thread->td_flags & TDF_RUNQ) { 1203 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) { 1204 spin_lock(&dd->spin); 1205 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) { 1206 atomic_set_int(&lp->lwp_mpflags, 1207 LWP_MP_ULOAD); 1208 atomic_add_int(&dd->uload, lp->lwp_uload); 1209 atomic_add_int(&dd->ucount, 1); 1210 atomic_add_int(&dfly_ucount, 1); 1211 } 1212 spin_unlock(&dd->spin); 1213 } 1214 } else if (lp->lwp_slptime > 0) { 1215 if (lp->lwp_mpflags & LWP_MP_ULOAD) { 1216 spin_lock(&dd->spin); 1217 if (lp->lwp_mpflags & LWP_MP_ULOAD) { 1218 atomic_clear_int(&lp->lwp_mpflags, 1219 LWP_MP_ULOAD); 1220 atomic_add_int(&dd->uload, -lp->lwp_uload); 1221 atomic_add_int(&dd->ucount, -1); 1222 atomic_add_int(&dfly_ucount, -1); 1223 } 1224 spin_unlock(&dd->spin); 1225 } 1226 } 1227 } 1228 1229 /* 1230 * chooseproc() is called when a cpu needs a user process to LWKT schedule, 1231 * it selects a user process and returns it. If chklp is non-NULL and chklp 1232 * has a better or equal priority then the process that would otherwise be 1233 * chosen, NULL is returned. 1234 * 1235 * Until we fix the RUNQ code the chklp test has to be strict or we may 1236 * bounce between processes trying to acquire the current process designation. 1237 * 1238 * Must be called with rdd->spin locked. The spinlock is left intact through 1239 * the entire routine. dd->spin does not have to be locked. 1240 * 1241 * If worst is non-zero this function finds the worst thread instead of the 1242 * best thread (used by the schedulerclock-based rover). 1243 */ 1244 static 1245 struct lwp * 1246 dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd, 1247 struct lwp *chklp, int worst) 1248 { 1249 struct lwp *lp; 1250 struct rq *q; 1251 u_int32_t *which; 1252 u_int32_t pri; 1253 u_int32_t rtqbits; 1254 u_int32_t tsqbits; 1255 u_int32_t idqbits; 1256 1257 rtqbits = rdd->rtqueuebits; 1258 tsqbits = rdd->queuebits; 1259 idqbits = rdd->idqueuebits; 1260 1261 if (worst) { 1262 if (idqbits) { 1263 pri = bsrl(idqbits); 1264 q = &rdd->idqueues[pri]; 1265 which = &rdd->idqueuebits; 1266 } else if (tsqbits) { 1267 pri = bsrl(tsqbits); 1268 q = &rdd->queues[pri]; 1269 which = &rdd->queuebits; 1270 } else if (rtqbits) { 1271 pri = bsrl(rtqbits); 1272 q = &rdd->rtqueues[pri]; 1273 which = &rdd->rtqueuebits; 1274 } else { 1275 return (NULL); 1276 } 1277 lp = TAILQ_LAST(q, rq); 1278 } else { 1279 if (rtqbits) { 1280 pri = bsfl(rtqbits); 1281 q = &rdd->rtqueues[pri]; 1282 which = &rdd->rtqueuebits; 1283 } else if (tsqbits) { 1284 pri = bsfl(tsqbits); 1285 q = &rdd->queues[pri]; 1286 which = &rdd->queuebits; 1287 } else if (idqbits) { 1288 pri = bsfl(idqbits); 1289 q = &rdd->idqueues[pri]; 1290 which = &rdd->idqueuebits; 1291 } else { 1292 return (NULL); 1293 } 1294 lp = TAILQ_FIRST(q); 1295 } 1296 KASSERT(lp, ("chooseproc: no lwp on busy queue")); 1297 1298 /* 1299 * If the passed lwp <chklp> is reasonably close to the selected 1300 * lwp <lp>, return NULL (indicating that <chklp> should be kept). 1301 * 1302 * Note that we must error on the side of <chklp> to avoid bouncing 1303 * between threads in the acquire code. 1304 */ 1305 if (chklp) { 1306 if (chklp->lwp_priority < lp->lwp_priority + PPQ) 1307 return(NULL); 1308 } 1309 1310 KTR_COND_LOG(usched_chooseproc, 1311 lp->lwp_proc->p_pid == usched_dfly_pid_debug, 1312 lp->lwp_proc->p_pid, 1313 lp->lwp_thread->td_gd->gd_cpuid, 1314 mycpu->gd_cpuid); 1315 1316 KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!")); 1317 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ); 1318 TAILQ_REMOVE(q, lp, lwp_procq); 1319 --rdd->runqcount; 1320 if (TAILQ_EMPTY(q)) 1321 *which &= ~(1 << pri); 1322 1323 /* 1324 * If we are choosing a process from rdd with the intent to 1325 * move it to dd, lwp_qcpu must be adjusted while rdd's spinlock 1326 * is still held. 1327 */ 1328 if (rdd != dd) { 1329 if (lp->lwp_mpflags & LWP_MP_ULOAD) { 1330 atomic_add_int(&rdd->uload, -lp->lwp_uload); 1331 atomic_add_int(&rdd->ucount, -1); 1332 atomic_add_int(&dfly_ucount, -1); 1333 } 1334 lp->lwp_qcpu = dd->cpuid; 1335 atomic_add_int(&dd->uload, lp->lwp_uload); 1336 atomic_add_int(&dd->ucount, 1); 1337 atomic_add_int(&dfly_ucount, 1); 1338 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD); 1339 } 1340 return lp; 1341 } 1342 1343 /* 1344 * USED TO PUSH RUNNABLE LWPS TO THE LEAST LOADED CPU. 1345 * 1346 * Choose a cpu node to schedule lp on, hopefully nearby its current 1347 * node. 1348 * 1349 * We give the current node a modest advantage for obvious reasons. 1350 * 1351 * We also give the node the thread was woken up FROM a slight advantage 1352 * in order to try to schedule paired threads which synchronize/block waiting 1353 * for each other fairly close to each other. Similarly in a network setting 1354 * this feature will also attempt to place a user process near the kernel 1355 * protocol thread that is feeding it data. THIS IS A CRITICAL PART of the 1356 * algorithm as it heuristically groups synchronizing processes for locality 1357 * of reference in multi-socket systems. 1358 * 1359 * We check against running processes and give a big advantage if there 1360 * are none running. 1361 * 1362 * The caller will normally dfly_setrunqueue() lp on the returned queue. 1363 * 1364 * When the topology is known choose a cpu whos group has, in aggregate, 1365 * has the lowest weighted load. 1366 */ 1367 static 1368 dfly_pcpu_t 1369 dfly_choose_best_queue(struct lwp *lp) 1370 { 1371 cpumask_t wakemask; 1372 cpumask_t mask; 1373 cpu_node_t *cpup; 1374 cpu_node_t *cpun; 1375 cpu_node_t *cpub; 1376 dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu]; 1377 dfly_pcpu_t rdd; 1378 int wakecpu; 1379 int cpuid; 1380 int n; 1381 int count; 1382 int load; 1383 int lowest_load; 1384 1385 /* 1386 * When the topology is unknown choose a random cpu that is hopefully 1387 * idle. 1388 */ 1389 if (dd->cpunode == NULL) 1390 return (dfly_choose_queue_simple(dd, lp)); 1391 1392 /* 1393 * Pairing mask 1394 */ 1395 if ((wakecpu = lp->lwp_thread->td_wakefromcpu) >= 0) 1396 wakemask = dfly_pcpu[wakecpu].cpumask; 1397 else 1398 wakemask = 0; 1399 1400 /* 1401 * When the topology is known choose a cpu whos group has, in 1402 * aggregate, has the lowest weighted load. 1403 */ 1404 cpup = root_cpu_node; 1405 rdd = dd; 1406 1407 while (cpup) { 1408 /* 1409 * Degenerate case super-root 1410 */ 1411 if (cpup->child_node && cpup->child_no == 1) { 1412 cpup = cpup->child_node; 1413 continue; 1414 } 1415 1416 /* 1417 * Terminal cpunode 1418 */ 1419 if (cpup->child_node == NULL) { 1420 rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)]; 1421 break; 1422 } 1423 1424 cpub = NULL; 1425 lowest_load = 0x7FFFFFFF; 1426 1427 for (n = 0; n < cpup->child_no; ++n) { 1428 /* 1429 * Accumulate load information for all cpus 1430 * which are members of this node. 1431 */ 1432 cpun = &cpup->child_node[n]; 1433 mask = cpun->members & usched_global_cpumask & 1434 smp_active_mask & lp->lwp_cpumask; 1435 if (mask == 0) 1436 continue; 1437 1438 count = 0; 1439 load = 0; 1440 1441 while (mask) { 1442 cpuid = BSFCPUMASK(mask); 1443 rdd = &dfly_pcpu[cpuid]; 1444 load += rdd->uload; 1445 load += rdd->ucount * usched_dfly_weight3; 1446 1447 if (rdd->uschedcp == NULL && 1448 rdd->runqcount == 0 && 1449 globaldata_find(cpuid)->gd_tdrunqcount == 0 1450 ) { 1451 load -= usched_dfly_weight4; 1452 } 1453 #if 0 1454 else if (rdd->upri > lp->lwp_priority + PPQ) { 1455 load -= usched_dfly_weight4 / 2; 1456 } 1457 #endif 1458 mask &= ~CPUMASK(cpuid); 1459 ++count; 1460 } 1461 1462 /* 1463 * Compensate if the lp is already accounted for in 1464 * the aggregate uload for this mask set. We want 1465 * to calculate the loads as if lp were not present, 1466 * otherwise the calculation is bogus. 1467 */ 1468 if ((lp->lwp_mpflags & LWP_MP_ULOAD) && 1469 (dd->cpumask & cpun->members)) { 1470 load -= lp->lwp_uload; 1471 load -= usched_dfly_weight3; 1472 } 1473 1474 load /= count; 1475 1476 /* 1477 * Advantage the cpu group (lp) is already on. 1478 */ 1479 if (cpun->members & dd->cpumask) 1480 load -= usched_dfly_weight1; 1481 1482 /* 1483 * Advantage the cpu group we want to pair (lp) to, 1484 * but don't let it go to the exact same cpu as 1485 * the wakecpu target. 1486 * 1487 * We do this by checking whether cpun is a 1488 * terminal node or not. All cpun's at the same 1489 * level will either all be terminal or all not 1490 * terminal. 1491 * 1492 * If it is and we match we disadvantage the load. 1493 * If it is and we don't match we advantage the load. 1494 * 1495 * Also note that we are effectively disadvantaging 1496 * all-but-one by the same amount, so it won't effect 1497 * the weight1 factor for the all-but-one nodes. 1498 */ 1499 if (cpun->members & wakemask) { 1500 if (cpun->child_node != NULL) { 1501 /* advantage */ 1502 load -= usched_dfly_weight2; 1503 } else { 1504 if (usched_dfly_features & 0x10) 1505 load += usched_dfly_weight2; 1506 else 1507 load -= usched_dfly_weight2; 1508 } 1509 } 1510 1511 /* 1512 * Calculate the best load 1513 */ 1514 if (cpub == NULL || lowest_load > load || 1515 (lowest_load == load && 1516 (cpun->members & dd->cpumask)) 1517 ) { 1518 lowest_load = load; 1519 cpub = cpun; 1520 } 1521 } 1522 cpup = cpub; 1523 } 1524 if (usched_dfly_chooser) 1525 kprintf("lp %02d->%02d %s\n", 1526 lp->lwp_qcpu, rdd->cpuid, lp->lwp_proc->p_comm); 1527 return (rdd); 1528 } 1529 1530 /* 1531 * USED TO PULL RUNNABLE LWPS FROM THE MOST LOADED CPU. 1532 * 1533 * Choose the worst queue close to dd's cpu node with a non-empty runq 1534 * that is NOT dd. Also require that the moving of the highest-load thread 1535 * from rdd to dd does not cause the uload's to cross each other. 1536 * 1537 * This is used by the thread chooser when the current cpu's queues are 1538 * empty to steal a thread from another cpu's queue. We want to offload 1539 * the most heavily-loaded queue. 1540 */ 1541 static 1542 dfly_pcpu_t 1543 dfly_choose_worst_queue(dfly_pcpu_t dd) 1544 { 1545 cpumask_t mask; 1546 cpu_node_t *cpup; 1547 cpu_node_t *cpun; 1548 cpu_node_t *cpub; 1549 dfly_pcpu_t rdd; 1550 int cpuid; 1551 int n; 1552 int count; 1553 int load; 1554 #if 0 1555 int pri; 1556 int hpri; 1557 #endif 1558 int highest_load; 1559 1560 /* 1561 * When the topology is unknown choose a random cpu that is hopefully 1562 * idle. 1563 */ 1564 if (dd->cpunode == NULL) { 1565 return (NULL); 1566 } 1567 1568 /* 1569 * When the topology is known choose a cpu whos group has, in 1570 * aggregate, has the lowest weighted load. 1571 */ 1572 cpup = root_cpu_node; 1573 rdd = dd; 1574 while (cpup) { 1575 /* 1576 * Degenerate case super-root 1577 */ 1578 if (cpup->child_node && cpup->child_no == 1) { 1579 cpup = cpup->child_node; 1580 continue; 1581 } 1582 1583 /* 1584 * Terminal cpunode 1585 */ 1586 if (cpup->child_node == NULL) { 1587 rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)]; 1588 break; 1589 } 1590 1591 cpub = NULL; 1592 highest_load = 0; 1593 1594 for (n = 0; n < cpup->child_no; ++n) { 1595 /* 1596 * Accumulate load information for all cpus 1597 * which are members of this node. 1598 */ 1599 cpun = &cpup->child_node[n]; 1600 mask = cpun->members & usched_global_cpumask & 1601 smp_active_mask; 1602 if (mask == 0) 1603 continue; 1604 count = 0; 1605 load = 0; 1606 1607 while (mask) { 1608 cpuid = BSFCPUMASK(mask); 1609 rdd = &dfly_pcpu[cpuid]; 1610 load += rdd->uload; 1611 load += rdd->ucount * usched_dfly_weight3; 1612 if (rdd->uschedcp == NULL && 1613 rdd->runqcount == 0 && 1614 globaldata_find(cpuid)->gd_tdrunqcount == 0 1615 ) { 1616 load -= usched_dfly_weight4; 1617 } 1618 #if 0 1619 else if (rdd->upri > dd->upri + PPQ) { 1620 load -= usched_dfly_weight4 / 2; 1621 } 1622 #endif 1623 mask &= ~CPUMASK(cpuid); 1624 ++count; 1625 } 1626 load /= count; 1627 1628 /* 1629 * Prefer candidates which are somewhat closer to 1630 * our cpu. 1631 */ 1632 if (dd->cpumask & cpun->members) 1633 load += usched_dfly_weight1; 1634 1635 /* 1636 * The best candidate is the one with the worst 1637 * (highest) load. 1638 */ 1639 if (cpub == NULL || highest_load < load) { 1640 highest_load = load; 1641 cpub = cpun; 1642 } 1643 } 1644 cpup = cpub; 1645 } 1646 1647 /* 1648 * We never return our own node (dd), and only return a remote 1649 * node if it's load is significantly worse than ours (i.e. where 1650 * stealing a thread would be considered reasonable). 1651 * 1652 * This also helps us avoid breaking paired threads apart which 1653 * can have disastrous effects on performance. 1654 */ 1655 if (rdd == dd) 1656 return(NULL); 1657 1658 #if 0 1659 hpri = 0; 1660 if (rdd->rtqueuebits && hpri < (pri = bsrl(rdd->rtqueuebits))) 1661 hpri = pri; 1662 if (rdd->queuebits && hpri < (pri = bsrl(rdd->queuebits))) 1663 hpri = pri; 1664 if (rdd->idqueuebits && hpri < (pri = bsrl(rdd->idqueuebits))) 1665 hpri = pri; 1666 hpri *= PPQ; 1667 if (rdd->uload - hpri < dd->uload + hpri) 1668 return(NULL); 1669 #endif 1670 return (rdd); 1671 } 1672 1673 static 1674 dfly_pcpu_t 1675 dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp) 1676 { 1677 dfly_pcpu_t rdd; 1678 cpumask_t tmpmask; 1679 cpumask_t mask; 1680 int cpuid; 1681 1682 /* 1683 * Fallback to the original heuristic, select random cpu, 1684 * first checking cpus not currently running a user thread. 1685 */ 1686 ++dfly_scancpu; 1687 cpuid = (dfly_scancpu & 0xFFFF) % ncpus; 1688 mask = ~dfly_curprocmask & dfly_rdyprocmask & lp->lwp_cpumask & 1689 smp_active_mask & usched_global_cpumask; 1690 1691 while (mask) { 1692 tmpmask = ~(CPUMASK(cpuid) - 1); 1693 if (mask & tmpmask) 1694 cpuid = BSFCPUMASK(mask & tmpmask); 1695 else 1696 cpuid = BSFCPUMASK(mask); 1697 rdd = &dfly_pcpu[cpuid]; 1698 1699 if ((rdd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK)) 1700 goto found; 1701 mask &= ~CPUMASK(cpuid); 1702 } 1703 1704 /* 1705 * Then cpus which might have a currently running lp 1706 */ 1707 cpuid = (dfly_scancpu & 0xFFFF) % ncpus; 1708 mask = dfly_curprocmask & dfly_rdyprocmask & 1709 lp->lwp_cpumask & smp_active_mask & usched_global_cpumask; 1710 1711 while (mask) { 1712 tmpmask = ~(CPUMASK(cpuid) - 1); 1713 if (mask & tmpmask) 1714 cpuid = BSFCPUMASK(mask & tmpmask); 1715 else 1716 cpuid = BSFCPUMASK(mask); 1717 rdd = &dfly_pcpu[cpuid]; 1718 1719 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) 1720 goto found; 1721 mask &= ~CPUMASK(cpuid); 1722 } 1723 1724 /* 1725 * If we cannot find a suitable cpu we reload from dfly_scancpu 1726 * and round-robin. Other cpus will pickup as they release their 1727 * current lwps or become ready. 1728 * 1729 * Avoid a degenerate system lockup case if usched_global_cpumask 1730 * is set to 0 or otherwise does not cover lwp_cpumask. 1731 * 1732 * We only kick the target helper thread in this case, we do not 1733 * set the user resched flag because 1734 */ 1735 cpuid = (dfly_scancpu & 0xFFFF) % ncpus; 1736 if ((CPUMASK(cpuid) & usched_global_cpumask) == 0) 1737 cpuid = 0; 1738 rdd = &dfly_pcpu[cpuid]; 1739 found: 1740 return (rdd); 1741 } 1742 1743 static 1744 void 1745 dfly_need_user_resched_remote(void *dummy) 1746 { 1747 globaldata_t gd = mycpu; 1748 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid]; 1749 1750 /* 1751 * Flag reschedule needed 1752 */ 1753 need_user_resched(); 1754 1755 /* 1756 * If no user thread is currently running we need to kick the helper 1757 * on our cpu to recover. Otherwise the cpu will never schedule 1758 * anything again. 1759 * 1760 * We cannot schedule the process ourselves because this is an 1761 * IPI callback and we cannot acquire spinlocks in an IPI callback. 1762 * 1763 * Call wakeup_mycpu to avoid sending IPIs to other CPUs 1764 */ 1765 if (dd->uschedcp == NULL && (dfly_rdyprocmask & gd->gd_cpumask)) { 1766 atomic_clear_cpumask(&dfly_rdyprocmask, gd->gd_cpumask); 1767 wakeup_mycpu(&dd->helper_thread); 1768 } 1769 } 1770 1771 /* 1772 * dfly_remrunqueue_locked() removes a given process from the run queue 1773 * that it is on, clearing the queue busy bit if it becomes empty. 1774 * 1775 * Note that user process scheduler is different from the LWKT schedule. 1776 * The user process scheduler only manages user processes but it uses LWKT 1777 * underneath, and a user process operating in the kernel will often be 1778 * 'released' from our management. 1779 * 1780 * uload is NOT adjusted here. It is only adjusted if the lwkt_thread goes 1781 * to sleep or the lwp is moved to a different runq. 1782 */ 1783 static void 1784 dfly_remrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp) 1785 { 1786 struct rq *q; 1787 u_int32_t *which; 1788 u_int8_t pri; 1789 1790 KKASSERT(rdd->runqcount >= 0); 1791 1792 pri = lp->lwp_rqindex; 1793 1794 switch(lp->lwp_rqtype) { 1795 case RTP_PRIO_NORMAL: 1796 q = &rdd->queues[pri]; 1797 which = &rdd->queuebits; 1798 break; 1799 case RTP_PRIO_REALTIME: 1800 case RTP_PRIO_FIFO: 1801 q = &rdd->rtqueues[pri]; 1802 which = &rdd->rtqueuebits; 1803 break; 1804 case RTP_PRIO_IDLE: 1805 q = &rdd->idqueues[pri]; 1806 which = &rdd->idqueuebits; 1807 break; 1808 default: 1809 panic("remrunqueue: invalid rtprio type"); 1810 /* NOT REACHED */ 1811 } 1812 KKASSERT(lp->lwp_mpflags & LWP_MP_ONRUNQ); 1813 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ); 1814 TAILQ_REMOVE(q, lp, lwp_procq); 1815 --rdd->runqcount; 1816 if (TAILQ_EMPTY(q)) { 1817 KASSERT((*which & (1 << pri)) != 0, 1818 ("remrunqueue: remove from empty queue")); 1819 *which &= ~(1 << pri); 1820 } 1821 } 1822 1823 /* 1824 * dfly_setrunqueue_locked() 1825 * 1826 * Add a process whos rqtype and rqindex had previously been calculated 1827 * onto the appropriate run queue. Determine if the addition requires 1828 * a reschedule on a cpu and return the cpuid or -1. 1829 * 1830 * NOTE: Lower priorities are better priorities. 1831 * 1832 * NOTE ON ULOAD: This variable specifies the aggregate load on a cpu, the 1833 * sum of the rough lwp_priority for all running and runnable 1834 * processes. Lower priority processes (higher lwp_priority 1835 * values) actually DO count as more load, not less, because 1836 * these are the programs which require the most care with 1837 * regards to cpu selection. 1838 */ 1839 static void 1840 dfly_setrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp) 1841 { 1842 struct rq *q; 1843 u_int32_t *which; 1844 int pri; 1845 1846 KKASSERT(lp->lwp_qcpu == rdd->cpuid); 1847 1848 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) { 1849 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD); 1850 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload, lp->lwp_uload); 1851 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].ucount, 1); 1852 atomic_add_int(&dfly_ucount, 1); 1853 } 1854 1855 pri = lp->lwp_rqindex; 1856 1857 switch(lp->lwp_rqtype) { 1858 case RTP_PRIO_NORMAL: 1859 q = &rdd->queues[pri]; 1860 which = &rdd->queuebits; 1861 break; 1862 case RTP_PRIO_REALTIME: 1863 case RTP_PRIO_FIFO: 1864 q = &rdd->rtqueues[pri]; 1865 which = &rdd->rtqueuebits; 1866 break; 1867 case RTP_PRIO_IDLE: 1868 q = &rdd->idqueues[pri]; 1869 which = &rdd->idqueuebits; 1870 break; 1871 default: 1872 panic("remrunqueue: invalid rtprio type"); 1873 /* NOT REACHED */ 1874 } 1875 1876 /* 1877 * Place us on the selected queue. Determine if we should be 1878 * placed at the head of the queue or at the end. 1879 * 1880 * We are placed at the tail if our round-robin count has expired, 1881 * or is about to expire and the system thinks its a good place to 1882 * round-robin, or there is already a next thread on the queue 1883 * (it might be trying to pick up where it left off and we don't 1884 * want to interfere). 1885 */ 1886 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0); 1887 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ); 1888 ++rdd->runqcount; 1889 1890 if (lp->lwp_rrcount >= usched_dfly_rrinterval || 1891 (lp->lwp_rrcount >= usched_dfly_rrinterval / 2 && 1892 (lp->lwp_thread->td_mpflags & TDF_MP_BATCH_DEMARC)) || 1893 !TAILQ_EMPTY(q) 1894 ) { 1895 atomic_clear_int(&lp->lwp_thread->td_mpflags, 1896 TDF_MP_BATCH_DEMARC); 1897 lp->lwp_rrcount = 0; 1898 TAILQ_INSERT_TAIL(q, lp, lwp_procq); 1899 } else { 1900 if (TAILQ_EMPTY(q)) 1901 lp->lwp_rrcount = 0; 1902 TAILQ_INSERT_HEAD(q, lp, lwp_procq); 1903 } 1904 *which |= 1 << pri; 1905 } 1906 1907 /* 1908 * For SMP systems a user scheduler helper thread is created for each 1909 * cpu and is used to allow one cpu to wakeup another for the purposes of 1910 * scheduling userland threads from setrunqueue(). 1911 * 1912 * UP systems do not need the helper since there is only one cpu. 1913 * 1914 * We can't use the idle thread for this because we might block. 1915 * Additionally, doing things this way allows us to HLT idle cpus 1916 * on MP systems. 1917 */ 1918 static void 1919 dfly_helper_thread(void *dummy) 1920 { 1921 globaldata_t gd; 1922 dfly_pcpu_t dd; 1923 dfly_pcpu_t rdd; 1924 struct lwp *nlp; 1925 cpumask_t mask; 1926 int cpuid; 1927 1928 gd = mycpu; 1929 cpuid = gd->gd_cpuid; /* doesn't change */ 1930 mask = gd->gd_cpumask; /* doesn't change */ 1931 dd = &dfly_pcpu[cpuid]; 1932 1933 /* 1934 * Since we only want to be woken up only when no user processes 1935 * are scheduled on a cpu, run at an ultra low priority. 1936 */ 1937 lwkt_setpri_self(TDPRI_USER_SCHEDULER); 1938 1939 tsleep(&dd->helper_thread, 0, "schslp", 0); 1940 1941 for (;;) { 1942 /* 1943 * We use the LWKT deschedule-interlock trick to avoid racing 1944 * dfly_rdyprocmask. This means we cannot block through to the 1945 * manual lwkt_switch() call we make below. 1946 */ 1947 crit_enter_gd(gd); 1948 tsleep_interlock(&dd->helper_thread, 0); 1949 1950 spin_lock(&dd->spin); 1951 1952 atomic_set_cpumask(&dfly_rdyprocmask, mask); 1953 clear_user_resched(); /* This satisfied the reschedule request */ 1954 #if 0 1955 dd->rrcount = 0; /* Reset the round-robin counter */ 1956 #endif 1957 1958 if (dd->runqcount || dd->uschedcp != NULL) { 1959 /* 1960 * Threads are available. A thread may or may not be 1961 * currently scheduled. Get the best thread already queued 1962 * to this cpu. 1963 */ 1964 nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0); 1965 if (nlp) { 1966 atomic_set_cpumask(&dfly_curprocmask, mask); 1967 dd->upri = nlp->lwp_priority; 1968 dd->uschedcp = nlp; 1969 #if 0 1970 dd->rrcount = 0; /* reset round robin */ 1971 #endif 1972 spin_unlock(&dd->spin); 1973 lwkt_acquire(nlp->lwp_thread); 1974 lwkt_schedule(nlp->lwp_thread); 1975 } else { 1976 /* 1977 * This situation should not occur because we had 1978 * at least one thread available. 1979 */ 1980 spin_unlock(&dd->spin); 1981 } 1982 } else if (usched_dfly_features & 0x01) { 1983 /* 1984 * This cpu is devoid of runnable threads, steal a thread 1985 * from another cpu. Since we're stealing, might as well 1986 * load balance at the same time. 1987 * 1988 * We choose the highest-loaded thread from the worst queue. 1989 * 1990 * NOTE! This function only returns a non-NULL rdd when 1991 * another cpu's queue is obviously overloaded. We 1992 * do not want to perform the type of rebalancing 1993 * the schedclock does here because it would result 1994 * in insane process pulling when 'steady' state is 1995 * partially unbalanced (e.g. 6 runnables and only 1996 * 4 cores). 1997 */ 1998 rdd = dfly_choose_worst_queue(dd); 1999 if (rdd && spin_trylock(&rdd->spin)) { 2000 nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1); 2001 spin_unlock(&rdd->spin); 2002 } else { 2003 nlp = NULL; 2004 } 2005 if (nlp) { 2006 atomic_set_cpumask(&dfly_curprocmask, mask); 2007 dd->upri = nlp->lwp_priority; 2008 dd->uschedcp = nlp; 2009 #if 0 2010 dd->rrcount = 0; /* reset round robin */ 2011 #endif 2012 spin_unlock(&dd->spin); 2013 lwkt_acquire(nlp->lwp_thread); 2014 lwkt_schedule(nlp->lwp_thread); 2015 } else { 2016 /* 2017 * Leave the thread on our run queue. Another 2018 * scheduler will try to pull it later. 2019 */ 2020 spin_unlock(&dd->spin); 2021 } 2022 } else { 2023 /* 2024 * devoid of runnable threads and not allowed to steal 2025 * any. 2026 */ 2027 spin_unlock(&dd->spin); 2028 } 2029 2030 /* 2031 * We're descheduled unless someone scheduled us. Switch away. 2032 * Exiting the critical section will cause splz() to be called 2033 * for us if interrupts and such are pending. 2034 */ 2035 crit_exit_gd(gd); 2036 tsleep(&dd->helper_thread, PINTERLOCKED, "schslp", 0); 2037 } 2038 } 2039 2040 #if 0 2041 static int 2042 sysctl_usched_dfly_stick_to_level(SYSCTL_HANDLER_ARGS) 2043 { 2044 int error, new_val; 2045 2046 new_val = usched_dfly_stick_to_level; 2047 2048 error = sysctl_handle_int(oidp, &new_val, 0, req); 2049 if (error != 0 || req->newptr == NULL) 2050 return (error); 2051 if (new_val > cpu_topology_levels_number - 1 || new_val < 0) 2052 return (EINVAL); 2053 usched_dfly_stick_to_level = new_val; 2054 return (0); 2055 } 2056 #endif 2057 2058 /* 2059 * Setup the queues and scheduler helpers (scheduler helpers are SMP only). 2060 * Note that curprocmask bit 0 has already been cleared by rqinit() and 2061 * we should not mess with it further. 2062 */ 2063 static void 2064 usched_dfly_cpu_init(void) 2065 { 2066 int i; 2067 int j; 2068 int cpuid; 2069 int smt_not_supported = 0; 2070 int cache_coherent_not_supported = 0; 2071 2072 if (bootverbose) 2073 kprintf("Start scheduler helpers on cpus:\n"); 2074 2075 sysctl_ctx_init(&usched_dfly_sysctl_ctx); 2076 usched_dfly_sysctl_tree = 2077 SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx, 2078 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO, 2079 "usched_dfly", CTLFLAG_RD, 0, ""); 2080 2081 for (i = 0; i < ncpus; ++i) { 2082 dfly_pcpu_t dd = &dfly_pcpu[i]; 2083 cpumask_t mask = CPUMASK(i); 2084 2085 if ((mask & smp_active_mask) == 0) 2086 continue; 2087 2088 spin_init(&dd->spin); 2089 dd->cpunode = get_cpu_node_by_cpuid(i); 2090 dd->cpuid = i; 2091 dd->cpumask = CPUMASK(i); 2092 for (j = 0; j < NQS; j++) { 2093 TAILQ_INIT(&dd->queues[j]); 2094 TAILQ_INIT(&dd->rtqueues[j]); 2095 TAILQ_INIT(&dd->idqueues[j]); 2096 } 2097 atomic_clear_cpumask(&dfly_curprocmask, 1); 2098 2099 if (dd->cpunode == NULL) { 2100 smt_not_supported = 1; 2101 cache_coherent_not_supported = 1; 2102 if (bootverbose) 2103 kprintf ("\tcpu%d - WARNING: No CPU NODE " 2104 "found for cpu\n", i); 2105 } else { 2106 switch (dd->cpunode->type) { 2107 case THREAD_LEVEL: 2108 if (bootverbose) 2109 kprintf ("\tcpu%d - HyperThreading " 2110 "available. Core siblings: ", 2111 i); 2112 break; 2113 case CORE_LEVEL: 2114 smt_not_supported = 1; 2115 2116 if (bootverbose) 2117 kprintf ("\tcpu%d - No HT available, " 2118 "multi-core/physical " 2119 "cpu. Physical siblings: ", 2120 i); 2121 break; 2122 case CHIP_LEVEL: 2123 smt_not_supported = 1; 2124 2125 if (bootverbose) 2126 kprintf ("\tcpu%d - No HT available, " 2127 "single-core/physical cpu. " 2128 "Package Siblings: ", 2129 i); 2130 break; 2131 default: 2132 /* Let's go for safe defaults here */ 2133 smt_not_supported = 1; 2134 cache_coherent_not_supported = 1; 2135 if (bootverbose) 2136 kprintf ("\tcpu%d - Unknown cpunode->" 2137 "type=%u. Siblings: ", 2138 i, 2139 (u_int)dd->cpunode->type); 2140 break; 2141 } 2142 2143 if (bootverbose) { 2144 if (dd->cpunode->parent_node != NULL) { 2145 CPUSET_FOREACH(cpuid, dd->cpunode->parent_node->members) 2146 kprintf("cpu%d ", cpuid); 2147 kprintf("\n"); 2148 } else { 2149 kprintf(" no siblings\n"); 2150 } 2151 } 2152 } 2153 2154 lwkt_create(dfly_helper_thread, NULL, NULL, &dd->helper_thread, 2155 0, i, "usched %d", i); 2156 2157 /* 2158 * Allow user scheduling on the target cpu. cpu #0 has already 2159 * been enabled in rqinit(). 2160 */ 2161 if (i) 2162 atomic_clear_cpumask(&dfly_curprocmask, mask); 2163 atomic_set_cpumask(&dfly_rdyprocmask, mask); 2164 dd->upri = PRIBASE_NULL; 2165 2166 } 2167 2168 /* usched_dfly sysctl configurable parameters */ 2169 2170 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx, 2171 SYSCTL_CHILDREN(usched_dfly_sysctl_tree), 2172 OID_AUTO, "rrinterval", CTLFLAG_RW, 2173 &usched_dfly_rrinterval, 0, ""); 2174 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx, 2175 SYSCTL_CHILDREN(usched_dfly_sysctl_tree), 2176 OID_AUTO, "decay", CTLFLAG_RW, 2177 &usched_dfly_decay, 0, "Extra decay when not running"); 2178 2179 /* Add enable/disable option for SMT scheduling if supported */ 2180 if (smt_not_supported) { 2181 usched_dfly_smt = 0; 2182 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx, 2183 SYSCTL_CHILDREN(usched_dfly_sysctl_tree), 2184 OID_AUTO, "smt", CTLFLAG_RD, 2185 "NOT SUPPORTED", 0, "SMT NOT SUPPORTED"); 2186 } else { 2187 usched_dfly_smt = 1; 2188 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx, 2189 SYSCTL_CHILDREN(usched_dfly_sysctl_tree), 2190 OID_AUTO, "smt", CTLFLAG_RW, 2191 &usched_dfly_smt, 0, "Enable SMT scheduling"); 2192 } 2193 2194 /* 2195 * Add enable/disable option for cache coherent scheduling 2196 * if supported 2197 */ 2198 if (cache_coherent_not_supported) { 2199 usched_dfly_cache_coherent = 0; 2200 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx, 2201 SYSCTL_CHILDREN(usched_dfly_sysctl_tree), 2202 OID_AUTO, "cache_coherent", CTLFLAG_RD, 2203 "NOT SUPPORTED", 0, 2204 "Cache coherence NOT SUPPORTED"); 2205 } else { 2206 usched_dfly_cache_coherent = 1; 2207 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx, 2208 SYSCTL_CHILDREN(usched_dfly_sysctl_tree), 2209 OID_AUTO, "cache_coherent", CTLFLAG_RW, 2210 &usched_dfly_cache_coherent, 0, 2211 "Enable/Disable cache coherent scheduling"); 2212 2213 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx, 2214 SYSCTL_CHILDREN(usched_dfly_sysctl_tree), 2215 OID_AUTO, "weight1", CTLFLAG_RW, 2216 &usched_dfly_weight1, 200, 2217 "Weight selection for current cpu"); 2218 2219 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx, 2220 SYSCTL_CHILDREN(usched_dfly_sysctl_tree), 2221 OID_AUTO, "weight2", CTLFLAG_RW, 2222 &usched_dfly_weight2, 180, 2223 "Weight selection for wakefrom cpu"); 2224 2225 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx, 2226 SYSCTL_CHILDREN(usched_dfly_sysctl_tree), 2227 OID_AUTO, "weight3", CTLFLAG_RW, 2228 &usched_dfly_weight3, 40, 2229 "Weight selection for num threads on queue"); 2230 2231 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx, 2232 SYSCTL_CHILDREN(usched_dfly_sysctl_tree), 2233 OID_AUTO, "weight4", CTLFLAG_RW, 2234 &usched_dfly_weight4, 160, 2235 "Availability of other idle cpus"); 2236 2237 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx, 2238 SYSCTL_CHILDREN(usched_dfly_sysctl_tree), 2239 OID_AUTO, "fast_resched", CTLFLAG_RW, 2240 &usched_dfly_fast_resched, 0, 2241 "Availability of other idle cpus"); 2242 2243 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx, 2244 SYSCTL_CHILDREN(usched_dfly_sysctl_tree), 2245 OID_AUTO, "features", CTLFLAG_RW, 2246 &usched_dfly_features, 0x8F, 2247 "Allow pulls into empty queues"); 2248 2249 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx, 2250 SYSCTL_CHILDREN(usched_dfly_sysctl_tree), 2251 OID_AUTO, "swmask", CTLFLAG_RW, 2252 &usched_dfly_swmask, ~PPQMASK, 2253 "Queue mask to force thread switch"); 2254 2255 #if 0 2256 SYSCTL_ADD_PROC(&usched_dfly_sysctl_ctx, 2257 SYSCTL_CHILDREN(usched_dfly_sysctl_tree), 2258 OID_AUTO, "stick_to_level", 2259 CTLTYPE_INT | CTLFLAG_RW, 2260 NULL, sizeof usched_dfly_stick_to_level, 2261 sysctl_usched_dfly_stick_to_level, "I", 2262 "Stick a process to this level. See sysctl" 2263 "paremter hw.cpu_topology.level_description"); 2264 #endif 2265 } 2266 } 2267 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND, 2268 usched_dfly_cpu_init, NULL) 2269