1 /*
2 * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>. All rights reserved.
3 * Copyright (c) 2012-2020 The DragonFly Project. All rights reserved.
4 *
5 * This code is derived from software contributed to The DragonFly Project
6 * by Matthew Dillon <dillon@backplane.com>,
7 * by Mihai Carabas <mihai.carabas@gmail.com>
8 * and many others.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in
18 * the documentation and/or other materials provided with the
19 * distribution.
20 * 3. Neither the name of The DragonFly Project nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific, prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 */
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/queue.h>
42 #include <sys/proc.h>
43 #include <sys/rtprio.h>
44 #include <sys/uio.h>
45 #include <sys/sysctl.h>
46 #include <sys/resourcevar.h>
47 #include <sys/spinlock.h>
48 #include <sys/cpu_topology.h>
49 #include <sys/thread2.h>
50 #include <sys/spinlock2.h>
51
52 #include <sys/ktr.h>
53
54 #include <machine/cpu.h>
55 #include <machine/smp.h>
56
57 #include <sys/usched_dfly.h>
58
59 /*static void dfly_acquire_curproc(struct lwp *lp); see sys/usched.h */
60 static void dfly_release_curproc(struct lwp *lp);
61 static void dfly_select_curproc(globaldata_t gd);
62 static void dfly_setrunqueue(struct lwp *lp);
63 static void dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp);
64 static void dfly_schedulerclock(struct lwp *lp, sysclock_t period,
65 sysclock_t cpstamp);
66 static void dfly_recalculate_estcpu(struct lwp *lp);
67 static void dfly_resetpriority(struct lwp *lp);
68 static void dfly_forking(struct lwp *plp, struct lwp *lp);
69 static void dfly_exiting(struct lwp *lp, struct proc *);
70 static void dfly_uload_update(struct lwp *lp);
71 static void dfly_yield(struct lwp *lp);
72 static void dfly_changeqcpu_locked(struct lwp *lp,
73 dfly_pcpu_t dd, dfly_pcpu_t rdd);
74 static dfly_pcpu_t dfly_choose_best_queue(struct lwp *lp);
75 static dfly_pcpu_t dfly_choose_worst_queue(dfly_pcpu_t dd, int forceit);
76 static dfly_pcpu_t dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp);
77 static void dfly_need_user_resched_remote(void *dummy);
78 static struct lwp *dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
79 struct lwp *chklp, int worst);
80 static void dfly_remrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
81 static void dfly_setrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
82 static void dfly_changedcpu(struct lwp *lp);
83
84 struct usched usched_dfly = {
85 { NULL },
86 "dfly", "Original DragonFly Scheduler",
87 NULL, /* default registration */
88 NULL, /* default deregistration */
89 dfly_acquire_curproc,
90 dfly_release_curproc,
91 dfly_setrunqueue,
92 dfly_schedulerclock,
93 dfly_recalculate_estcpu,
94 dfly_resetpriority,
95 dfly_forking,
96 dfly_exiting,
97 dfly_uload_update,
98 NULL, /* setcpumask not supported */
99 dfly_yield,
100 dfly_changedcpu
101 };
102
103 /*
104 * We have NQS (32) run queues per scheduling class. For the normal
105 * class, there are 128 priorities scaled onto these 32 queues. New
106 * processes are added to the last entry in each queue, and processes
107 * are selected for running by taking them from the head and maintaining
108 * a simple FIFO arrangement. Realtime and Idle priority processes have
109 * and explicit 0-31 priority which maps directly onto their class queue
110 * index. When a queue has something in it, the corresponding bit is
111 * set in the queuebits variable, allowing a single read to determine
112 * the state of all 32 queues and then a ffs() to find the first busy
113 * queue.
114 *
115 * curprocmask is used to publish cpus with assigned curprocs to the rest
116 * of the cpus. In certain situations curprocmask may leave a bit set
117 * (e.g. a yield or a token-based yield) even though dd->uschedcp is
118 * NULL'd out temporarily).
119 */
120 /* currently running a user process */
121 static cpumask_t dfly_curprocmask = CPUMASK_INITIALIZER_ALLONES;
122 static cpumask_t dfly_rdyprocmask; /* ready to accept a user process */
123 static struct usched_dfly_pcpu dfly_pcpu[MAXCPU];
124 static struct sysctl_ctx_list usched_dfly_sysctl_ctx;
125 static struct sysctl_oid *usched_dfly_sysctl_tree;
126 static struct lock usched_dfly_config_lk = LOCK_INITIALIZER("usdfs", 0, 0);
127
128 /* Debug info exposed through debug.* sysctl */
129
130 static int usched_dfly_debug = -1;
131 SYSCTL_INT(_debug, OID_AUTO, dfly_scdebug, CTLFLAG_RW,
132 &usched_dfly_debug, 0,
133 "Print debug information for this pid");
134
135 static int usched_dfly_pid_debug = -1;
136 SYSCTL_INT(_debug, OID_AUTO, dfly_pid_debug, CTLFLAG_RW,
137 &usched_dfly_pid_debug, 0,
138 "Print KTR debug information for this pid");
139
140 static int usched_dfly_chooser = 0;
141 SYSCTL_INT(_debug, OID_AUTO, dfly_chooser, CTLFLAG_RW,
142 &usched_dfly_chooser, 0,
143 "Print KTR debug information for this pid");
144
145 /*
146 * WARNING!
147 *
148 * The fork bias can have a large effect on the system in the face of a
149 * make -j N or other high-forking applications.
150 *
151 * Larger values are much less invasive vs other things that
152 * might be running in the system, but can cause exec chains
153 * such as those typically generated by make to have higher
154 * latencies in the face of modest load.
155 *
156 * Lower values are more invasive but have reduced latencies
157 * for such exec chains.
158 *
159 * make -j 10 buildkernel example, build times:
160 *
161 * +0 3:04
162 * +1 3:14 -5.2% <-- default
163 * +2 3:22 -8.9%
164 *
165 * This issue occurs due to the way the scheduler affinity heuristics work.
166 * There is no way to really 'fix' the affinity heuristics because when it
167 * comes right down to it trying to instantly schedule a process on an
168 * available cpu (even if it will become unavailable a microsecond later)
169 * tends to cause processes to shift around between cpus and sockets too much
170 * and breaks the affinity.
171 *
172 * NOTE: Heavily concurrent builds typically have enough things on the pan
173 * that they remain time-efficient even with a higher bias.
174 */
175 static int usched_dfly_forkbias = 1;
176 SYSCTL_INT(_debug, OID_AUTO, dfly_forkbias, CTLFLAG_RW,
177 &usched_dfly_forkbias, 0,
178 "Fork bias for estcpu in whole queues");
179
180 /*
181 * Tunning usched_dfly - configurable through kern.usched_dfly.
182 *
183 * weight1 - Tries to keep threads on their current cpu. If you
184 * make this value too large the scheduler will not be
185 * able to load-balance large loads.
186 *
187 * Generally set to a fairly low value, but high enough
188 * such that estcpu jitter doesn't move threads around.
189 *
190 * weight2 - If non-zero, detects thread pairs undergoing synchronous
191 * communications and tries to move them closer together.
192 * The weight advantages the same package and socket and
193 * disadvantages the same core and same cpu.
194 *
195 * WARNING! Weight2 is a ridiculously sensitive parameter,
196 * particularly against weight4. change the default at your
197 * peril.
198 *
199 * weight3 - Weighting based on the number of recently runnable threads
200 * on the userland scheduling queue (ignoring their loads).
201 *
202 * A nominal value here prevents high-priority (low-load)
203 * threads from accumulating on one cpu core when other
204 * cores are available.
205 *
206 * This value should be left fairly small because low-load
207 * high priority threads can still be mostly idle and too
208 * high a value will kick cpu-bound processes off the cpu
209 * unnecessarily.
210 *
211 * weight4 - Weighting based on availability of other logical cpus running
212 * less important threads (by upri) than the thread we are trying
213 * to schedule.
214 *
215 * This allows a thread to migrate to another nearby cpu if it
216 * is unable to run on the current cpu based on the other cpu
217 * being idle or running a less important (higher lwp_priority)
218 * thread. This value should be large enough to override weight1,
219 * but not so large as to override weight2.
220 *
221 * This parameter generally ensures fairness at the cost of some
222 * performance (if set to too high). It should generally be just
223 * a tad lower than weight2.
224 *
225 * weight5 - Weighting based on the relative amount of ram connected
226 * to the node a cpu resides on.
227 *
228 * This value should remain fairly low to allow assymetric
229 * NUMA nodes to get threads scheduled to them. Setting a very
230 * high level will prevent scheduling on assymetric NUMA nodes
231 * with low amounts of directly-attached memory.
232 *
233 * Note that when testing e.g. N threads on a machine with N
234 * cpu cores with assymtric NUMA nodes, a non-zero value will
235 * cause some cpu threads on the low-priority NUMA nodes to remain
236 * idle even when a few process threads are doubled-up on other
237 * cpus. But this is typically more ideal because it deschedules
238 * low-priority NUMA nodes at lighter nodes.
239 *
240 * Values between 50 and 200 are recommended. Default is 50.
241 *
242 * weight6 - rdd transfer weight hysteresis for regular pair rebalancing
243 * (feature 0x04).
244 *
245 * Defaults to 0, can be increased to improve stabillity at the
246 * cost of more mis-schedules.
247 *
248 * weight7 - rdd transfer weight hysteresis for idle cpu 'pull' (feature 0x01).
249 *
250 * Defaults to -100 to strongly promote a transfer.
251 *
252 * ipc_smt - If enabled, advantage IPC pairing to sibling cpu threads.
253 * If -1, automatic when load >= 1/2 ncpus (default).
254 *
255 * ipc_same- If enabled, advantage IPC pairing to the same logical cpu.
256 * If -1, automatic when load >= ncpus (default).
257 *
258 * features - These flags can be set or cleared to enable or disable various
259 * features.
260 *
261 * 0x01 Enable idle-cpu pulling (default)
262 * 0x02 Enable proactive pushing (default)
263 * 0x04 Enable rebalancing rover (default)
264 * 0x08 Enable more proactive pushing (default)
265 * 0x10 (unassigned)
266 * 0x20 choose best cpu for forked process (default)
267 * 0x40 choose current cpu for forked process
268 * 0x80 choose random cpu for forked process
269 *
270 * NOTE - The idea behind forking mechanic 0x20 is that most
271 * fork()ing is either followed by an exec in the child,
272 * or the parent wait*()s. If the child is short-lived,
273 * there is effectively an IPC dependency (td_wakefromcpu
274 * is also set in kern_fork.c) and we want to implement
275 * the weight2 behavior to reduce IPIs and to reduce CPU
276 * cache ping-ponging.
277 */
278 __read_mostly static int usched_dfly_smt = 0;
279 __read_mostly static int usched_dfly_cache_coherent = 0;
280 __read_mostly static int usched_dfly_weight1 = 30; /* keep thread on cpu */
281 __read_mostly static int usched_dfly_weight2 = 180; /* IPC locality */
282 __read_mostly static int usched_dfly_weight3 = 10; /* threads on queue */
283 __read_mostly static int usched_dfly_weight4 = 120; /* availability of cores */
284 __read_mostly static int usched_dfly_weight5 = 50; /* node attached memory */
285 __read_mostly static int usched_dfly_weight6 = 0; /* 0x04 transfer weight */
286 __read_mostly static int usched_dfly_weight7 = -100;/* 0x01 transfer weight */
287 __read_mostly static int usched_dfly_features = 0x2f; /* allow pulls */
288 __read_mostly static int usched_dfly_fast_resched = PPQ / 2; /* delta pri */
289 __read_mostly static int usched_dfly_swmask = ~PPQMASK; /* allow pulls */
290 __read_mostly static int usched_dfly_rrinterval = (ESTCPUFREQ + 9) / 10;
291 __read_mostly static int usched_dfly_decay = 8;
292 __read_mostly static int usched_dfly_ipc_smt = -1; /* IPC auto smt pair */
293 __read_mostly static int usched_dfly_ipc_same = -1; /* IPC auto same log cpu */
294 __read_mostly static int usched_dfly_poll_ticks = 1; /* helper polling ticks */
295 __read_mostly static long usched_dfly_node_mem;
296
297 /* KTR debug printings */
298
299 KTR_INFO_MASTER(usched);
300
301 #if !defined(KTR_USCHED_DFLY)
302 #define KTR_USCHED_DFLY KTR_ALL
303 #endif
304
305 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc, 0,
306 "USCHED_DFLY(chooseproc: pid %d, old_cpuid %d, curr_cpuid %d)",
307 pid_t pid, int old_cpuid, int curr);
308
309 /*
310 * This function is called when the kernel intends to return to userland.
311 * It is responsible for making the thread the current designated userland
312 * thread for this cpu, blocking if necessary.
313 *
314 * The kernel will not depress our LWKT priority until after we return,
315 * in case we have to shove over to another cpu.
316 *
317 * We must determine our thread's disposition before we switch away. This
318 * is very sensitive code.
319 *
320 * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
321 * TO ANOTHER CPU! Because most of the kernel assumes that no migration will
322 * occur, this function is called only under very controlled circumstances.
323 */
324 void
dfly_acquire_curproc(struct lwp * lp)325 dfly_acquire_curproc(struct lwp *lp)
326 {
327 globaldata_t gd;
328 dfly_pcpu_t dd;
329 dfly_pcpu_t rdd;
330 thread_t td;
331 int force_resched;
332
333 td = lp->lwp_thread;
334 gd = mycpu;
335 dd = &dfly_pcpu[gd->gd_cpuid];
336
337 /*
338 * Quickly return if possible.
339 */
340 if (__predict_true((td->td_flags & TDF_TSLEEPQ) == 0 &&
341 !sched_action_wanted_gd(gd) &&
342 dd->uschedcp == lp)) {
343 return;
344 }
345
346 /*
347 * Make sure we aren't sitting on a tsleep queue.
348 */
349 crit_enter_quick(td);
350 if (td->td_flags & TDF_TSLEEPQ)
351 tsleep_remove(td);
352 dfly_recalculate_estcpu(lp);
353
354 /*
355 * Process any pending interrupts/ipi's, then handle reschedule
356 * requests. dfly_release_curproc() will try to assign a new
357 * uschedcp that isn't us and otherwise NULL it out.
358 */
359 force_resched = 0;
360 if (user_resched_wanted()) {
361 if (dd->uschedcp == lp)
362 force_resched = 1;
363 clear_user_resched();
364 dfly_release_curproc(lp);
365 }
366
367 /*
368 * Loop until we are the current user thread.
369 *
370 * NOTE: dd spinlock not held at top of loop.
371 */
372 if (dd->uschedcp == lp)
373 lwkt_yield_quick();
374
375 while (dd->uschedcp != lp) {
376 /*
377 * Do not do a lwkt_yield_quick() here as it will prevent
378 * the lwp from being placed on the dfly_bsd runqueue for
379 * one cycle (possibly an entire round-robin), preventing
380 * it from being scheduled to another cpu.
381 */
382 /* lwkt_yield_quick(); */
383
384 if (usched_dfly_debug == lp->lwp_proc->p_pid)
385 kprintf(" pid %d acquire curcpu %d (force %d) ",
386 lp->lwp_proc->p_pid, gd->gd_cpuid,
387 force_resched);
388
389
390 spin_lock(&dd->spin);
391
392 /* This lwp is an outcast; force reschedule. */
393 if (__predict_false(
394 CPUMASK_TESTBIT(lp->lwp_cpumask, gd->gd_cpuid) == 0) &&
395 (rdd = dfly_choose_best_queue(lp)) != dd) {
396 dfly_changeqcpu_locked(lp, dd, rdd);
397 spin_unlock(&dd->spin);
398 lwkt_deschedule(lp->lwp_thread);
399 dfly_setrunqueue_dd(rdd, lp);
400 lwkt_switch();
401 gd = mycpu;
402 dd = &dfly_pcpu[gd->gd_cpuid];
403 if (usched_dfly_debug == lp->lwp_proc->p_pid)
404 kprintf("SEL-A cpu %d\n", gd->gd_cpuid);
405 continue;
406 }
407
408 /*
409 * We are not or are no longer the current lwp and a forced
410 * reschedule was requested. Figure out the best cpu to
411 * run on (our current cpu will be given significant weight).
412 *
413 * Doing this on many cpus simultaneously leads to
414 * instability so pace the operation.
415 *
416 * (if a reschedule was not requested we want to move this
417 * step after the uschedcp tests).
418 */
419 if (force_resched &&
420 (usched_dfly_features & 0x08) &&
421 (u_int)sched_ticks / 8 % ncpus == gd->gd_cpuid) {
422 if ((rdd = dfly_choose_best_queue(lp)) != dd) {
423 dfly_changeqcpu_locked(lp, dd, rdd);
424 spin_unlock(&dd->spin);
425 lwkt_deschedule(lp->lwp_thread);
426 dfly_setrunqueue_dd(rdd, lp);
427 lwkt_switch();
428 gd = mycpu;
429 dd = &dfly_pcpu[gd->gd_cpuid];
430 if (usched_dfly_debug == lp->lwp_proc->p_pid)
431 kprintf("SEL-B cpu %d\n", gd->gd_cpuid);
432 continue;
433 }
434 if (usched_dfly_debug == lp->lwp_proc->p_pid)
435 kprintf("(SEL-B same cpu) ");
436 }
437
438 /*
439 * Either no reschedule was requested or the best queue was
440 * dd, and no current process has been selected. We can
441 * trivially become the current lwp on the current cpu.
442 */
443 if (dd->uschedcp == NULL) {
444 atomic_clear_int(&lp->lwp_thread->td_mpflags,
445 TDF_MP_DIDYIELD);
446 if ((dd->flags & DFLY_PCPU_CURMASK) == 0) {
447 ATOMIC_CPUMASK_ORBIT(dfly_curprocmask,
448 gd->gd_cpuid);
449 dd->flags |= DFLY_PCPU_CURMASK;
450 }
451 dd->uschedcp = lp;
452 dd->upri = lp->lwp_priority;
453 KKASSERT(lp->lwp_qcpu == dd->cpuid);
454 spin_unlock(&dd->spin);
455 if (usched_dfly_debug == lp->lwp_proc->p_pid)
456 kprintf("SEL-C cpu %d (same cpu)\n",
457 gd->gd_cpuid);
458 break;
459 }
460
461 /*
462 * Can we steal the current designated user thread?
463 *
464 * If we do the other thread will stall when it tries to
465 * return to userland, possibly rescheduling elsewhere.
466 * Set need_user_resched() to get the thread to cycle soonest.
467 *
468 * It is important to do a masked test to avoid the edge
469 * case where two near-equal-priority threads are constantly
470 * interrupting each other.
471 *
472 * In the exact match case another thread has already gained
473 * uschedcp and lowered its priority, if we steal it the
474 * other thread will stay stuck on the LWKT runq and not
475 * push to another cpu. So don't steal on equal-priority even
476 * though it might appear to be more beneficial due to not
477 * having to switch back to the other thread's context.
478 *
479 * usched_dfly_fast_resched requires that two threads be
480 * significantly far apart in priority in order to interrupt.
481 *
482 * If better but not sufficiently far apart, the current
483 * uschedcp will be interrupted at the next scheduler clock.
484 */
485 if (dd->uschedcp &&
486 (dd->upri & ~PPQMASK) >
487 (lp->lwp_priority & ~PPQMASK) + usched_dfly_fast_resched) {
488 dd->uschedcp = lp;
489 dd->upri = lp->lwp_priority;
490 KKASSERT(lp->lwp_qcpu == dd->cpuid);
491 need_user_resched();
492 spin_unlock(&dd->spin);
493 if (usched_dfly_debug == lp->lwp_proc->p_pid)
494 kprintf("SEL-D cpu %d (same cpu)\n",
495 gd->gd_cpuid);
496 break;
497 }
498
499 /*
500 * Requeue us at lwp_priority, which recalculate_estcpu()
501 * set for us. Reset the rrcount to force placement
502 * at the end of the queue.
503 *
504 * We used to move ourselves to the worst queue, but
505 * this creates a fairly serious priority inversion
506 * problem.
507 */
508 if (lp->lwp_thread->td_mpflags & TDF_MP_DIDYIELD) {
509 spin_unlock(&dd->spin);
510 lp->lwp_rrcount = usched_dfly_rrinterval;
511 lp->lwp_rqindex = (lp->lwp_priority & PRIMASK) / PPQ;
512
513 lwkt_deschedule(lp->lwp_thread);
514 dfly_setrunqueue_dd(dd, lp);
515 atomic_clear_int(&lp->lwp_thread->td_mpflags,
516 TDF_MP_DIDYIELD);
517 lwkt_switch();
518 gd = mycpu;
519 dd = &dfly_pcpu[gd->gd_cpuid];
520 if (usched_dfly_debug == lp->lwp_proc->p_pid)
521 kprintf("SEL-E cpu %d (requeue)\n",
522 gd->gd_cpuid);
523 continue;
524 }
525
526 /*
527 * We are not the current lwp, figure out the best cpu
528 * to run on (our current cpu will be given significant
529 * weight). Loop on cpu change.
530 */
531 if ((usched_dfly_features & 0x02) &&
532 force_resched == 0 &&
533 (rdd = dfly_choose_best_queue(lp)) != dd) {
534 dfly_changeqcpu_locked(lp, dd, rdd);
535 spin_unlock(&dd->spin);
536 lwkt_deschedule(lp->lwp_thread);
537 dfly_setrunqueue_dd(rdd, lp);
538 lwkt_switch();
539 gd = mycpu;
540 dd = &dfly_pcpu[gd->gd_cpuid];
541 if (usched_dfly_debug == lp->lwp_proc->p_pid)
542 kprintf("SEL-F cpu %d (requeue new cpu)\n",
543 gd->gd_cpuid);
544 continue;
545 }
546
547 /*
548 * We cannot become the current lwp, place the lp on the
549 * run-queue of this or another cpu and deschedule ourselves.
550 *
551 * When we are reactivated we will have another chance.
552 *
553 * Reload after a switch or setrunqueue/switch possibly
554 * moved us to another cpu.
555 */
556 spin_unlock(&dd->spin);
557 lwkt_deschedule(lp->lwp_thread);
558 dfly_setrunqueue_dd(dd, lp);
559 lwkt_switch();
560 gd = mycpu;
561 dd = &dfly_pcpu[gd->gd_cpuid];
562 if (usched_dfly_debug == lp->lwp_proc->p_pid)
563 kprintf("SEL-G cpu %d (fallback setrunq)\n",
564 gd->gd_cpuid);
565 }
566 if (usched_dfly_debug == lp->lwp_proc->p_pid)
567 kprintf(" pid %d acquire DONE cpu %d\n",
568 lp->lwp_proc->p_pid, gd->gd_cpuid);
569
570 /*
571 * Make sure upri is synchronized, then yield to LWKT threads as
572 * needed before returning. This could result in another reschedule.
573 * XXX
574 */
575 crit_exit_quick(td);
576
577 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
578 }
579
580 /*
581 * DFLY_RELEASE_CURPROC
582 *
583 * This routine detaches the current thread from the userland scheduler,
584 * usually because the thread needs to run or block in the kernel (at
585 * kernel priority) for a while.
586 *
587 * This routine is also responsible for selecting a new thread to
588 * make the current thread.
589 *
590 * NOTE: This implementation differs from the dummy example in that
591 * dfly_select_curproc() is able to select the current process, whereas
592 * dummy_select_curproc() is not able to select the current process.
593 * This means we have to NULL out uschedcp.
594 *
595 * Additionally, note that we may already be on a run queue if releasing
596 * via the lwkt_switch() in dfly_setrunqueue().
597 */
598 static void
dfly_release_curproc(struct lwp * lp)599 dfly_release_curproc(struct lwp *lp)
600 {
601 globaldata_t gd = mycpu;
602 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
603
604 /*
605 * Make sure td_wakefromcpu is defaulted. This will be overwritten
606 * by wakeup().
607 */
608 if (dd->uschedcp == lp) {
609 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
610 spin_lock(&dd->spin);
611 if (dd->uschedcp == lp) {
612 dd->uschedcp = NULL; /* don't let lp be selected */
613 dd->upri = PRIBASE_NULL;
614
615 /*
616 * We're just going to set it again, avoid the global
617 * cache line ping-pong.
618 */
619 if ((lp->lwp_thread->td_mpflags & TDF_MP_DIDYIELD) == 0) {
620 if (dd->flags & DFLY_PCPU_CURMASK) {
621 ATOMIC_CPUMASK_NANDBIT(dfly_curprocmask,
622 gd->gd_cpuid);
623 dd->flags &= ~DFLY_PCPU_CURMASK;
624 }
625 }
626 spin_unlock(&dd->spin);
627 dfly_select_curproc(gd);
628 } else {
629 spin_unlock(&dd->spin);
630 }
631 }
632 }
633
634 /*
635 * DFLY_SELECT_CURPROC
636 *
637 * Select a new current process for this cpu and clear any pending user
638 * reschedule request. The cpu currently has no current process.
639 *
640 * This routine is also responsible for equal-priority round-robining,
641 * typically triggered from dfly_schedulerclock(). In our dummy example
642 * all the 'user' threads are LWKT scheduled all at once and we just
643 * call lwkt_switch().
644 *
645 * The calling process is not on the queue and cannot be selected.
646 */
647 static
648 void
dfly_select_curproc(globaldata_t gd)649 dfly_select_curproc(globaldata_t gd)
650 {
651 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
652 struct lwp *nlp;
653 int cpuid = gd->gd_cpuid;
654
655 crit_enter_gd(gd);
656
657 spin_lock(&dd->spin);
658 nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
659
660 if (nlp) {
661 if ((dd->flags & DFLY_PCPU_CURMASK) == 0) {
662 ATOMIC_CPUMASK_ORBIT(dfly_curprocmask, cpuid);
663 dd->flags |= DFLY_PCPU_CURMASK;
664 }
665 dd->upri = nlp->lwp_priority;
666 dd->uschedcp = nlp;
667 #if 0
668 dd->rrcount = 0; /* reset round robin */
669 #endif
670 spin_unlock(&dd->spin);
671 lwkt_acquire(nlp->lwp_thread);
672 lwkt_schedule(nlp->lwp_thread);
673 } else {
674 spin_unlock(&dd->spin);
675 }
676 crit_exit_gd(gd);
677 }
678
679 /*
680 * Place the specified lwp on the user scheduler's run queue. This routine
681 * must be called with the thread descheduled. The lwp must be runnable.
682 * It must not be possible for anyone else to explicitly schedule this thread.
683 *
684 * The thread may be the current thread as a special case.
685 */
686 static void
dfly_setrunqueue(struct lwp * lp)687 dfly_setrunqueue(struct lwp *lp)
688 {
689 dfly_pcpu_t dd;
690 dfly_pcpu_t rdd;
691
692 /*
693 * First validate the process LWKT state.
694 */
695 KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN"));
696 KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0,
697 ("lwp %d/%d already on runq! flag %08x/%08x", lp->lwp_proc->p_pid,
698 lp->lwp_tid, lp->lwp_proc->p_flags, lp->lwp_flags));
699 KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0);
700
701 /*
702 * NOTE: dd/rdd do not necessarily represent the current cpu.
703 * Instead they may represent the cpu the thread was last
704 * scheduled on or inherited by its parent.
705 */
706 dd = &dfly_pcpu[lp->lwp_qcpu];
707 rdd = dd;
708
709 /*
710 * This process is not supposed to be scheduled anywhere or assigned
711 * as the current process anywhere. Assert the condition.
712 */
713 KKASSERT(rdd->uschedcp != lp);
714
715 /*
716 * Ok, we have to setrunqueue some target cpu and request a reschedule
717 * if necessary.
718 *
719 * We have to choose the best target cpu. It might not be the current
720 * target even if the current cpu has no running user thread (for
721 * example, because the current cpu might be a hyperthread and its
722 * sibling has a thread assigned).
723 *
724 * If we just forked it is most optimal to run the child on the same
725 * cpu just in case the parent decides to wait for it (thus getting
726 * off that cpu). As long as there is nothing else runnable on the
727 * cpu, that is. If we did this unconditionally a parent forking
728 * multiple children before waiting (e.g. make -j N) leaves other
729 * cpus idle that could be working.
730 */
731 if (lp->lwp_forked) {
732 lp->lwp_forked = 0;
733 if (usched_dfly_features & 0x20)
734 rdd = dfly_choose_best_queue(lp);
735 else if (usched_dfly_features & 0x40)
736 rdd = &dfly_pcpu[lp->lwp_qcpu];
737 else if (usched_dfly_features & 0x80)
738 rdd = dfly_choose_queue_simple(rdd, lp);
739 else if (dfly_pcpu[lp->lwp_qcpu].runqcount)
740 rdd = dfly_choose_best_queue(lp);
741 else
742 rdd = &dfly_pcpu[lp->lwp_qcpu];
743 } else {
744 rdd = dfly_choose_best_queue(lp);
745 /* rdd = &dfly_pcpu[lp->lwp_qcpu]; */
746 }
747 if (lp->lwp_qcpu != rdd->cpuid) {
748 spin_lock(&dd->spin);
749 dfly_changeqcpu_locked(lp, dd, rdd);
750 spin_unlock(&dd->spin);
751 }
752 dfly_setrunqueue_dd(rdd, lp);
753 }
754
755 /*
756 * Change qcpu to rdd->cpuid. The dd the lp is CURRENTLY on must be
757 * spin-locked on-call. rdd does not have to be.
758 */
759 static void
dfly_changeqcpu_locked(struct lwp * lp,dfly_pcpu_t dd,dfly_pcpu_t rdd)760 dfly_changeqcpu_locked(struct lwp *lp, dfly_pcpu_t dd, dfly_pcpu_t rdd)
761 {
762 if (lp->lwp_qcpu != rdd->cpuid) {
763 spin_lock(&lp->lwp_spin);
764 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
765 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
766 atomic_add_long(&dd->uload, -lp->lwp_uload);
767 atomic_add_int(&dd->ucount, -1);
768 }
769 lp->lwp_qcpu = rdd->cpuid;
770 spin_unlock(&lp->lwp_spin);
771 }
772 }
773
774 /*
775 * Place lp on rdd's runqueue. Nothing is locked on call. This function
776 * also performs all necessary ancillary notification actions.
777 */
778 static void
dfly_setrunqueue_dd(dfly_pcpu_t rdd,struct lwp * lp)779 dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp)
780 {
781 globaldata_t rgd;
782
783 /*
784 * We might be moving the lp to another cpu's run queue, and once
785 * on the runqueue (even if it is our cpu's), another cpu can rip
786 * it away from us.
787 *
788 * TDF_MIGRATING might already be set if this is part of a
789 * remrunqueue+setrunqueue sequence.
790 */
791 if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0)
792 lwkt_giveaway(lp->lwp_thread);
793
794 rgd = rdd->gd;
795
796 /*
797 * We lose control of the lp the moment we release the spinlock
798 * after having placed it on the queue. i.e. another cpu could pick
799 * it up, or it could exit, or its priority could be further
800 * adjusted, or something like that.
801 *
802 * WARNING! rdd can point to a foreign cpu!
803 */
804 spin_lock(&rdd->spin);
805 dfly_setrunqueue_locked(rdd, lp);
806
807 /*
808 * Potentially interrupt the currently-running thread
809 */
810 if ((rdd->upri & ~PPQMASK) <= (lp->lwp_priority & ~PPQMASK)) {
811 /*
812 * Currently running thread is better or same, do not
813 * interrupt.
814 */
815 spin_unlock(&rdd->spin);
816 } else if ((rdd->upri & ~PPQMASK) <= (lp->lwp_priority & ~PPQMASK) +
817 usched_dfly_fast_resched) {
818 /*
819 * Currently running thread is not better, but not so bad
820 * that we need to interrupt it. Let it run for one more
821 * scheduler tick.
822 */
823 if (rdd->uschedcp &&
824 rdd->uschedcp->lwp_rrcount < usched_dfly_rrinterval) {
825 rdd->uschedcp->lwp_rrcount = usched_dfly_rrinterval - 1;
826 }
827 spin_unlock(&rdd->spin);
828 } else if (rgd == mycpu) {
829 /*
830 * We should interrupt the currently running thread, which
831 * is on the current cpu. However, if DIDYIELD is set we
832 * round-robin unconditionally and do not interrupt it.
833 */
834 spin_unlock(&rdd->spin);
835 if (rdd->uschedcp == NULL)
836 wakeup_mycpu(rdd->helper_thread); /* XXX */
837 if ((lp->lwp_thread->td_mpflags & TDF_MP_DIDYIELD) == 0)
838 need_user_resched();
839 } else {
840 /*
841 * We should interrupt the currently running thread, which
842 * is on a different cpu.
843 */
844 spin_unlock(&rdd->spin);
845 lwkt_send_ipiq(rgd, dfly_need_user_resched_remote, NULL);
846 }
847 }
848
849 /*
850 * This routine is called from a systimer IPI. It MUST be MP-safe and
851 * the BGL IS NOT HELD ON ENTRY. This routine is called at ESTCPUFREQ on
852 * each cpu.
853 */
854 static
855 void
dfly_schedulerclock(struct lwp * lp,sysclock_t period,sysclock_t cpstamp)856 dfly_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
857 {
858 globaldata_t gd = mycpu;
859 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
860
861 /*
862 * Spinlocks also hold a critical section so there should not be
863 * any active.
864 */
865 KKASSERT(gd->gd_spinlocks == 0 || dumping);
866
867 /*
868 * If lp is NULL we might be contended and lwkt_switch() may have
869 * cycled into the idle thread. Apply the tick to the current
870 * process on this cpu if it is contended.
871 */
872 if (gd->gd_curthread == &gd->gd_idlethread) {
873 lp = dd->uschedcp;
874 if (lp && (lp->lwp_thread == NULL ||
875 lp->lwp_thread->td_contended == 0)) {
876 lp = NULL;
877 }
878 }
879
880 /*
881 * Dock thread for tick
882 */
883 if (lp) {
884 /*
885 * Do we need to round-robin? We round-robin 10 times a
886 * second. This should only occur for cpu-bound batch
887 * processes.
888 */
889 if (++lp->lwp_rrcount >= usched_dfly_rrinterval)
890 need_user_resched();
891 if ((lp->lwp_thread->td_mpflags & TDF_MP_BATCH_DEMARC) &&
892 lp->lwp_rrcount >= usched_dfly_rrinterval / 2) {
893 need_user_resched();
894 }
895
896 /*
897 * Adjust estcpu upward using a real time equivalent
898 * calculation, and recalculate lp's priority. Estcpu
899 * is increased such that it will cap-out over a period
900 * of one second.
901 */
902 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu +
903 ESTCPUMAX / ESTCPUFREQ + 1);
904 dfly_resetpriority(lp);
905 }
906
907 /*
908 * Rebalance two cpus every 8 ticks, pulling the worst thread
909 * from the worst cpu's queue into a rotating cpu number.
910 * Also require that the moving of the highest-load thread
911 * from rdd to dd does not cause the uload to cross over.
912 *
913 * This mechanic is needed because the push algorithms can
914 * steady-state in an non-optimal configuration. We need to mix it
915 * up a little, even if it means breaking up a paired thread, so
916 * the push algorithms can rebalance the degenerate conditions.
917 * This portion of the algorithm exists to ensure stability at the
918 * selected weightings.
919 *
920 * Because we might be breaking up optimal conditions we do not want
921 * to execute this too quickly, hence we only rebalance approximately
922 * ~7-8 times per second. The push's, on the otherhand, are capable
923 * moving threads to other cpus at a much higher rate.
924 *
925 * We choose the most heavily loaded thread from the worst queue
926 * in order to ensure that multiple heavy-weight threads on the same
927 * queue get broken up, and also because these threads are the most
928 * likely to be able to remain in place. Hopefully then any pairings,
929 * if applicable, migrate to where these threads are.
930 */
931 if ((usched_dfly_features & 0x04) &&
932 ((u_int)sched_ticks & 7) == 0 &&
933 (u_int)sched_ticks / 8 % ncpus == gd->gd_cpuid) {
934 /*
935 * Our cpu is up.
936 */
937 struct lwp *nlp;
938 dfly_pcpu_t rdd;
939
940 rdd = dfly_choose_worst_queue(dd, 1);
941 if (rdd && dd->uload + usched_dfly_weight6 / 2 < rdd->uload) {
942 spin_lock(&dd->spin);
943 if (spin_trylock(&rdd->spin)) {
944 nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
945 spin_unlock(&rdd->spin);
946 if (nlp == NULL)
947 spin_unlock(&dd->spin);
948 } else {
949 spin_unlock(&dd->spin);
950 nlp = NULL;
951 }
952 } else {
953 nlp = NULL;
954 }
955 /* dd->spin held if nlp != NULL */
956
957 /*
958 * Either schedule it or add it to our queue.
959 */
960 if (nlp &&
961 (nlp->lwp_priority & ~PPQMASK) < (dd->upri & ~PPQMASK)) {
962 if ((dd->flags & DFLY_PCPU_CURMASK) == 0) {
963 ATOMIC_CPUMASK_ORMASK(dfly_curprocmask,
964 dd->cpumask);
965 dd->flags |= DFLY_PCPU_CURMASK;
966 }
967 dd->upri = nlp->lwp_priority;
968 dd->uschedcp = nlp;
969 #if 0
970 dd->rrcount = 0; /* reset round robin */
971 #endif
972 spin_unlock(&dd->spin);
973 lwkt_acquire(nlp->lwp_thread);
974 lwkt_schedule(nlp->lwp_thread);
975 } else if (nlp) {
976 dfly_setrunqueue_locked(dd, nlp);
977 spin_unlock(&dd->spin);
978 }
979 }
980 }
981
982 /*
983 * Called from acquire and from kern_synch's one-second timer (one of the
984 * callout helper threads) with a critical section held.
985 *
986 * Adjust p_estcpu based on our single-cpu load, p_nice, and compensate for
987 * overall system load.
988 *
989 * Note that no recalculation occurs for a process which sleeps and wakes
990 * up in the same tick. That is, a system doing thousands of context
991 * switches per second will still only do serious estcpu calculations
992 * ESTCPUFREQ times per second.
993 */
994 static
995 void
dfly_recalculate_estcpu(struct lwp * lp)996 dfly_recalculate_estcpu(struct lwp *lp)
997 {
998 globaldata_t gd = mycpu;
999 sysclock_t cpbase;
1000 sysclock_t ttlticks;
1001 int estcpu;
1002 int decay_factor;
1003 int ucount;
1004
1005 /*
1006 * We have to subtract periodic to get the last schedclock
1007 * timeout time, otherwise we would get the upcoming timeout.
1008 * Keep in mind that a process can migrate between cpus and
1009 * while the scheduler clock should be very close, boundary
1010 * conditions could lead to a small negative delta.
1011 */
1012 cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
1013
1014 if (lp->lwp_slptime > 1) {
1015 /*
1016 * Too much time has passed, do a coarse correction.
1017 */
1018 lp->lwp_estcpu = lp->lwp_estcpu >> 1;
1019 dfly_resetpriority(lp);
1020 lp->lwp_cpbase = cpbase;
1021 lp->lwp_cpticks = 0;
1022 lp->lwp_estfast = 0;
1023 } else if (lp->lwp_cpbase != cpbase) {
1024 /*
1025 * Adjust estcpu if we are in a different tick. Don't waste
1026 * time if we are in the same tick.
1027 *
1028 * First calculate the number of ticks in the measurement
1029 * interval. The ttlticks calculation can wind up 0 due to
1030 * a bug in the handling of lwp_slptime (as yet not found),
1031 * so make sure we do not get a divide by 0 panic.
1032 */
1033 ttlticks = (cpbase - lp->lwp_cpbase) /
1034 gd->gd_schedclock.periodic;
1035 if ((ssysclock_t)ttlticks < 0) {
1036 ttlticks = 0;
1037 lp->lwp_cpbase = cpbase;
1038 }
1039 if (ttlticks < 4)
1040 return;
1041 updatepcpu(lp, lp->lwp_cpticks, ttlticks);
1042
1043 /*
1044 * Calculate instant estcpu based percentage of (one) cpu
1045 * used and exponentially average it into the current
1046 * lwp_estcpu.
1047 */
1048 ucount = dfly_pcpu[lp->lwp_qcpu].ucount;
1049 estcpu = lp->lwp_cpticks * ESTCPUMAX / ttlticks;
1050
1051 /*
1052 * The higher ttlticks gets, the more meaning the calculation
1053 * has and the smaller our decay_factor in the exponential
1054 * average.
1055 *
1056 * The uload calculation has been removed because it actually
1057 * makes things worse, causing processes which use less cpu
1058 * (such as a browser) to be pumped up and treated the same
1059 * as a cpu-bound process (such as a make). The same effect
1060 * can occur with sufficient load without the uload
1061 * calculation, but occurs less quickly and takes more load.
1062 * In addition, the less cpu a process uses the smaller the
1063 * effect of the overload.
1064 */
1065 if (ttlticks >= hz)
1066 decay_factor = 1;
1067 else
1068 decay_factor = hz - ttlticks;
1069
1070 lp->lwp_estcpu = ESTCPULIM(
1071 (lp->lwp_estcpu * ttlticks + estcpu) /
1072 (ttlticks + 1));
1073 dfly_resetpriority(lp);
1074 lp->lwp_cpbase += ttlticks * gd->gd_schedclock.periodic;
1075 lp->lwp_cpticks = 0;
1076 }
1077 }
1078
1079 /*
1080 * Compute the priority of a process when running in user mode.
1081 * Arrange to reschedule if the resulting priority is better
1082 * than that of the current process.
1083 *
1084 * This routine may be called with any process.
1085 *
1086 * This routine is called by fork1() for initial setup with the process of
1087 * the run queue, and also may be called normally with the process on or
1088 * off the run queue.
1089 */
1090 static void
dfly_resetpriority(struct lwp * lp)1091 dfly_resetpriority(struct lwp *lp)
1092 {
1093 dfly_pcpu_t rdd;
1094 int newpriority;
1095 u_short newrqtype;
1096 int rcpu;
1097 int checkpri;
1098 int estcpu;
1099 int delta_uload;
1100
1101 crit_enter();
1102
1103 /*
1104 * Lock the scheduler (lp) belongs to. This can be on a different
1105 * cpu. Handle races. This loop breaks out with the appropriate
1106 * rdd locked.
1107 */
1108 for (;;) {
1109 rcpu = lp->lwp_qcpu;
1110 cpu_ccfence();
1111 rdd = &dfly_pcpu[rcpu];
1112 spin_lock(&rdd->spin);
1113 if (rcpu == lp->lwp_qcpu)
1114 break;
1115 spin_unlock(&rdd->spin);
1116 }
1117
1118 /*
1119 * Calculate the new priority and queue type
1120 */
1121 newrqtype = lp->lwp_rtprio.type;
1122
1123 switch(newrqtype) {
1124 case RTP_PRIO_REALTIME:
1125 case RTP_PRIO_FIFO:
1126 newpriority = PRIBASE_REALTIME +
1127 (lp->lwp_rtprio.prio & PRIMASK);
1128 break;
1129 case RTP_PRIO_NORMAL:
1130 /*
1131 * Calculate the new priority.
1132 *
1133 * nice contributes up to NICE_QS queues (typ 32 - full range)
1134 * estcpu contributes up to EST_QS queues (typ 24)
1135 *
1136 * A nice +20 process receives 1/10 cpu vs nice+0. Niced
1137 * process more than 20 apart may receive no cpu, so cpu
1138 * bound nice -20 can prevent a nice +5 from getting any
1139 * cpu. A nice+0, being in the middle, always gets some cpu
1140 * no matter what.
1141 */
1142 estcpu = lp->lwp_estcpu;
1143 newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) *
1144 (NICE_QS * PPQ) / PRIO_RANGE;
1145 newpriority += estcpu * PPQ / ESTCPUPPQ;
1146 if (newpriority < 0)
1147 newpriority = 0;
1148 if (newpriority >= MAXPRI)
1149 newpriority = MAXPRI - 1;
1150 newpriority += PRIBASE_NORMAL;
1151 break;
1152 case RTP_PRIO_IDLE:
1153 newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK);
1154 break;
1155 case RTP_PRIO_THREAD:
1156 newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK);
1157 break;
1158 default:
1159 panic("Bad RTP_PRIO %d", newrqtype);
1160 /* NOT REACHED */
1161 }
1162
1163 /*
1164 * The LWKT scheduler doesn't dive usched structures, give it a hint
1165 * on the relative priority of user threads running in the kernel.
1166 * The LWKT scheduler will always ensure that a user thread running
1167 * in the kernel will get cpu some time, regardless of its upri,
1168 * but can decide not to instantly switch from one kernel or user
1169 * mode user thread to a kernel-mode user thread when it has a less
1170 * desireable user priority.
1171 *
1172 * td_upri has normal sense (higher values are more desireable), so
1173 * negate it (this is a different field lp->lwp_priority)
1174 */
1175 lp->lwp_thread->td_upri = -(newpriority & usched_dfly_swmask);
1176
1177 /*
1178 * The newpriority incorporates the queue type so do a simple masked
1179 * check to determine if the process has moved to another queue. If
1180 * it has, and it is currently on a run queue, then move it.
1181 *
1182 * Since uload is ~PPQMASK masked, no modifications are necessary if
1183 * we end up in the same run queue.
1184 *
1185 * Reset rrcount if moving to a higher-priority queue, otherwise
1186 * retain rrcount.
1187 */
1188 if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) {
1189 if (lp->lwp_priority < newpriority)
1190 lp->lwp_rrcount = 0;
1191 if (lp->lwp_mpflags & LWP_MP_ONRUNQ) {
1192 dfly_remrunqueue_locked(rdd, lp);
1193 lp->lwp_priority = newpriority;
1194 lp->lwp_rqtype = newrqtype;
1195 lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1196 dfly_setrunqueue_locked(rdd, lp);
1197 checkpri = 1;
1198 } else {
1199 lp->lwp_priority = newpriority;
1200 lp->lwp_rqtype = newrqtype;
1201 lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1202 checkpri = 0;
1203 }
1204 } else {
1205 /*
1206 * In the same PPQ, uload cannot change.
1207 */
1208 lp->lwp_priority = newpriority;
1209 checkpri = 1;
1210 rcpu = -1;
1211 }
1212
1213 /*
1214 * Adjust effective load.
1215 *
1216 * Calculate load then scale up or down geometrically based on p_nice.
1217 * Processes niced up (positive) are less important, and processes
1218 * niced downard (negative) are more important. The higher the uload,
1219 * the more important the thread.
1220 */
1221 /* 0-511, 0-100% cpu */
1222 spin_lock(&lp->lwp_spin);
1223 delta_uload = lptouload(lp);
1224 delta_uload -= lp->lwp_uload;
1225 if (lp->lwp_uload + delta_uload < -32767) {
1226 delta_uload = -32768 - lp->lwp_uload;
1227 } else if (lp->lwp_uload + delta_uload > 32767) {
1228 delta_uload = 32767 - lp->lwp_uload;
1229 }
1230 lp->lwp_uload += delta_uload;
1231 if (lp->lwp_mpflags & LWP_MP_ULOAD)
1232 atomic_add_long(&dfly_pcpu[lp->lwp_qcpu].uload, delta_uload);
1233 spin_unlock(&lp->lwp_spin);
1234
1235 /*
1236 * Determine if we need to reschedule the target cpu. This only
1237 * occurs if the LWP is already on a scheduler queue, which means
1238 * that idle cpu notification has already occured. At most we
1239 * need only issue a need_user_resched() on the appropriate cpu.
1240 *
1241 * The LWP may be owned by a CPU different from the current one,
1242 * in which case dd->uschedcp may be modified without an MP lock
1243 * or a spinlock held. The worst that happens is that the code
1244 * below causes a spurious need_user_resched() on the target CPU
1245 * and dd->pri to be wrong for a short period of time, both of
1246 * which are harmless.
1247 *
1248 * If checkpri is 0 we are adjusting the priority of the current
1249 * process, possibly higher (less desireable), so ignore the upri
1250 * check which will fail in that case.
1251 */
1252 if (rcpu >= 0) {
1253 if (CPUMASK_TESTBIT(dfly_rdyprocmask, rcpu) &&
1254 (checkpri == 0 ||
1255 (rdd->upri & ~PRIMASK) >
1256 (lp->lwp_priority & ~PRIMASK))) {
1257 if (rcpu == mycpu->gd_cpuid) {
1258 spin_unlock(&rdd->spin);
1259 need_user_resched();
1260 } else {
1261 spin_unlock(&rdd->spin);
1262 lwkt_send_ipiq(globaldata_find(rcpu),
1263 dfly_need_user_resched_remote,
1264 NULL);
1265 }
1266 } else {
1267 spin_unlock(&rdd->spin);
1268 }
1269 } else {
1270 spin_unlock(&rdd->spin);
1271 }
1272 crit_exit();
1273 }
1274
1275 static
1276 void
dfly_yield(struct lwp * lp)1277 dfly_yield(struct lwp *lp)
1278 {
1279 if (lp->lwp_qcpu != mycpu->gd_cpuid)
1280 return;
1281 KKASSERT(lp == curthread->td_lwp);
1282
1283 /*
1284 * Don't set need_user_resched() or mess with rrcount or anything.
1285 * the TDF flag will override everything as long as we release.
1286 */
1287 atomic_set_int(&lp->lwp_thread->td_mpflags, TDF_MP_DIDYIELD);
1288 dfly_release_curproc(lp);
1289 }
1290
1291 /*
1292 * Thread was forcefully migrated to another cpu. Normally forced migrations
1293 * are used for iterations and the kernel returns to the original cpu before
1294 * returning and this is not needed. However, if the kernel migrates a
1295 * thread to another cpu and wants to leave it there, it has to call this
1296 * scheduler helper.
1297 *
1298 * Note that the lwkt_migratecpu() function also released the thread, so
1299 * we don't have to worry about that.
1300 */
1301 static
1302 void
dfly_changedcpu(struct lwp * lp)1303 dfly_changedcpu(struct lwp *lp)
1304 {
1305 dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1306 dfly_pcpu_t rdd = &dfly_pcpu[mycpu->gd_cpuid];
1307
1308 if (dd != rdd) {
1309 spin_lock(&dd->spin);
1310 dfly_changeqcpu_locked(lp, dd, rdd);
1311 spin_unlock(&dd->spin);
1312 }
1313 }
1314
1315 /*
1316 * Called from fork1() when a new child process is being created.
1317 *
1318 * Give the child process an initial estcpu that is more batch then
1319 * its parent and dock the parent for the fork (but do not
1320 * reschedule the parent).
1321 *
1322 * fast
1323 *
1324 * XXX lwp should be "spawning" instead of "forking"
1325 */
1326 static void
dfly_forking(struct lwp * plp,struct lwp * lp)1327 dfly_forking(struct lwp *plp, struct lwp *lp)
1328 {
1329 int estcpu;
1330
1331 /*
1332 * Put the child 4 queue slots (out of 32) higher than the parent
1333 * (less desireable than the parent).
1334 */
1335 lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu +
1336 ESTCPUPPQ * usched_dfly_forkbias);
1337 lp->lwp_forked = 1;
1338 lp->lwp_estfast = 0;
1339
1340 /*
1341 * Even though the lp will be scheduled specially the first time
1342 * due to lp->lwp_forked, it is important to initialize lwp_qcpu
1343 * to avoid favoring a fixed cpu. XXX
1344 */
1345 #if 0
1346 static uint16_t save_cpu;
1347 lp->lwp_qcpu = ++save_cpu % ncpus;
1348 #else
1349 lp->lwp_qcpu = plp->lwp_qcpu;
1350 if (CPUMASK_TESTBIT(lp->lwp_cpumask, lp->lwp_qcpu) == 0)
1351 lp->lwp_qcpu = BSFCPUMASK(lp->lwp_cpumask);
1352 #endif
1353
1354 /*
1355 * Dock the parent a cost for the fork, protecting us from fork
1356 * bombs. If the parent is forking quickly this makes both the
1357 * parent and child more batchy.
1358 */
1359 estcpu = plp->lwp_estcpu + ESTCPUPPQ / 16;
1360 plp->lwp_estcpu = ESTCPULIM(estcpu);
1361 }
1362
1363 /*
1364 * Called when a lwp is being removed from this scheduler, typically
1365 * during lwp_exit(). We have to clean out any ULOAD accounting before
1366 * we can let the lp go.
1367 *
1368 * Scheduler dequeueing has already occurred, no further action in that
1369 * regard is needed.
1370 */
1371 static void
dfly_exiting(struct lwp * lp,struct proc * child_proc)1372 dfly_exiting(struct lwp *lp, struct proc *child_proc)
1373 {
1374 dfly_pcpu_t dd;
1375
1376 spin_lock(&lp->lwp_spin);
1377 dd = &dfly_pcpu[lp->lwp_qcpu];
1378 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1379 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1380 atomic_add_long(&dd->uload, -lp->lwp_uload);
1381 atomic_add_int(&dd->ucount, -1);
1382 }
1383 spin_unlock(&lp->lwp_spin);
1384 }
1385
1386 /*
1387 * This function cannot block in any way, but spinlocks are ok.
1388 *
1389 * Update the uload based on the state of the thread (whether it is going
1390 * to sleep or running again). The uload is meant to be a longer-term
1391 * load and not an instantanious load.
1392 */
1393 static void
dfly_uload_update(struct lwp * lp)1394 dfly_uload_update(struct lwp *lp)
1395 {
1396 dfly_pcpu_t dd;
1397
1398 if (lp->lwp_thread->td_flags & TDF_RUNQ) {
1399 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1400 spin_lock(&lp->lwp_spin);
1401 dd = &dfly_pcpu[lp->lwp_qcpu];
1402 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1403 atomic_set_int(&lp->lwp_mpflags,
1404 LWP_MP_ULOAD);
1405 atomic_add_long(&dd->uload, lp->lwp_uload);
1406 atomic_add_int(&dd->ucount, 1);
1407 }
1408 spin_unlock(&lp->lwp_spin);
1409 }
1410 } else if (lp->lwp_slptime > 0) {
1411 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1412 spin_lock(&lp->lwp_spin);
1413 dd = &dfly_pcpu[lp->lwp_qcpu];
1414 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1415 atomic_clear_int(&lp->lwp_mpflags,
1416 LWP_MP_ULOAD);
1417 atomic_add_long(&dd->uload, -lp->lwp_uload);
1418 atomic_add_int(&dd->ucount, -1);
1419 }
1420 spin_unlock(&lp->lwp_spin);
1421 }
1422 }
1423 }
1424
1425 /*
1426 * chooseproc() is called when a cpu needs a user process to LWKT schedule,
1427 * it selects a user process and returns it. If chklp is non-NULL and chklp
1428 * has a better or equal priority then the process that would otherwise be
1429 * chosen, NULL is returned.
1430 *
1431 * Until we fix the RUNQ code the chklp test has to be strict or we may
1432 * bounce between processes trying to acquire the current process designation.
1433 *
1434 * Must be called with rdd->spin locked. The spinlock is left intact through
1435 * the entire routine. dd->spin does not have to be locked.
1436 *
1437 * If worst is non-zero this function finds the worst thread instead of the
1438 * best thread (used by the schedulerclock-based rover).
1439 */
1440 static
1441 struct lwp *
dfly_chooseproc_locked(dfly_pcpu_t rdd,dfly_pcpu_t dd,struct lwp * chklp,int worst)1442 dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
1443 struct lwp *chklp, int worst)
1444 {
1445 struct lwp *lp;
1446 struct rq *q;
1447 u_int32_t *which;
1448 u_int32_t pri;
1449 u_int32_t rtqbits;
1450 u_int32_t tsqbits;
1451 u_int32_t idqbits;
1452
1453 /*
1454 * Select best or worst process. Once selected, clear the bit
1455 * in our local variable (idqbits, tsqbits, or rtqbits) just
1456 * in case we have to loop.
1457 */
1458 rtqbits = rdd->rtqueuebits;
1459 tsqbits = rdd->queuebits;
1460 idqbits = rdd->idqueuebits;
1461
1462 loopfar:
1463 if (worst) {
1464 if (idqbits) {
1465 pri = bsrl(idqbits);
1466 idqbits &= ~(1U << pri);
1467 q = &rdd->idqueues[pri];
1468 which = &rdd->idqueuebits;
1469 } else if (tsqbits) {
1470 pri = bsrl(tsqbits);
1471 tsqbits &= ~(1U << pri);
1472 q = &rdd->queues[pri];
1473 which = &rdd->queuebits;
1474 } else if (rtqbits) {
1475 pri = bsrl(rtqbits);
1476 rtqbits &= ~(1U << pri);
1477 q = &rdd->rtqueues[pri];
1478 which = &rdd->rtqueuebits;
1479 } else {
1480 return (NULL);
1481 }
1482 lp = TAILQ_LAST(q, rq);
1483 } else {
1484 if (rtqbits) {
1485 pri = bsfl(rtqbits);
1486 rtqbits &= ~(1U << pri);
1487 q = &rdd->rtqueues[pri];
1488 which = &rdd->rtqueuebits;
1489 } else if (tsqbits) {
1490 pri = bsfl(tsqbits);
1491 tsqbits &= ~(1U << pri);
1492 q = &rdd->queues[pri];
1493 which = &rdd->queuebits;
1494 } else if (idqbits) {
1495 pri = bsfl(idqbits);
1496 idqbits &= ~(1U << pri);
1497 q = &rdd->idqueues[pri];
1498 which = &rdd->idqueuebits;
1499 } else {
1500 return (NULL);
1501 }
1502 lp = TAILQ_FIRST(q);
1503 }
1504 KASSERT(lp, ("chooseproc: no lwp on busy queue"));
1505
1506 loopnear:
1507 /*
1508 * If the passed lwp <chklp> is reasonably close to the selected
1509 * lwp <lp>, return NULL (indicating that <chklp> should be kept).
1510 *
1511 * Note that we must error on the side of <chklp> to avoid bouncing
1512 * between threads in the acquire code.
1513 */
1514 if (chklp) {
1515 if (chklp->lwp_priority < lp->lwp_priority + PPQ)
1516 return(NULL);
1517 }
1518
1519 /*
1520 * When rdd != dd, we have to make sure that the process we
1521 * are pulling is allow to run on our cpu. This alternative
1522 * path is a bit more expensive but its not considered to be
1523 * in the critical path.
1524 */
1525 if (rdd != dd && CPUMASK_TESTBIT(lp->lwp_cpumask, dd->cpuid) == 0) {
1526 if (worst)
1527 lp = TAILQ_PREV(lp, rq, lwp_procq);
1528 else
1529 lp = TAILQ_NEXT(lp, lwp_procq);
1530 if (lp)
1531 goto loopnear;
1532 goto loopfar;
1533 }
1534
1535 KTR_COND_LOG(usched_chooseproc,
1536 lp->lwp_proc->p_pid == usched_dfly_pid_debug,
1537 lp->lwp_proc->p_pid,
1538 lp->lwp_thread->td_gd->gd_cpuid,
1539 mycpu->gd_cpuid);
1540
1541 KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
1542 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1543 TAILQ_REMOVE(q, lp, lwp_procq);
1544 --rdd->runqcount;
1545 if (TAILQ_EMPTY(q))
1546 *which &= ~(1 << pri);
1547
1548 /*
1549 * If we are choosing a process from rdd with the intent to
1550 * move it to dd, lwp_qcpu must be adjusted while rdd's spinlock
1551 * is still held.
1552 */
1553 if (rdd != dd) {
1554 spin_lock(&lp->lwp_spin);
1555 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1556 atomic_add_long(&rdd->uload, -lp->lwp_uload);
1557 atomic_add_int(&rdd->ucount, -1);
1558 }
1559 lp->lwp_qcpu = dd->cpuid;
1560 atomic_add_long(&dd->uload, lp->lwp_uload);
1561 atomic_add_int(&dd->ucount, 1);
1562 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1563 spin_unlock(&lp->lwp_spin);
1564 }
1565 return lp;
1566 }
1567
1568 /*
1569 * USED TO PUSH RUNNABLE LWPS TO THE LEAST LOADED CPU.
1570 *
1571 * Choose a cpu node to schedule lp on, hopefully nearby its current
1572 * node.
1573 *
1574 * We give the current node a modest advantage for obvious reasons.
1575 *
1576 * We also give the node the thread was woken up FROM a slight advantage
1577 * in order to try to schedule paired threads which synchronize/block waiting
1578 * for each other fairly close to each other. Similarly in a network setting
1579 * this feature will also attempt to place a user process near the kernel
1580 * protocol thread that is feeding it data. THIS IS A CRITICAL PART of the
1581 * algorithm as it heuristically groups synchronizing processes for locality
1582 * of reference in multi-socket systems.
1583 *
1584 * We check against running processes and give a big advantage if there
1585 * are none running.
1586 *
1587 * The caller will normally dfly_setrunqueue() lp on the returned queue.
1588 *
1589 * When the topology is known choose a cpu whos group has, in aggregate,
1590 * has the lowest weighted load.
1591 */
1592 static
1593 dfly_pcpu_t
dfly_choose_best_queue(struct lwp * lp)1594 dfly_choose_best_queue(struct lwp *lp)
1595 {
1596 cpumask_t wakemask;
1597 cpumask_t mask;
1598 cpu_node_t *cpup;
1599 cpu_node_t *cpun;
1600 cpu_node_t *cpub;
1601 dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1602 dfly_pcpu_t rdd;
1603 int wakecpu;
1604 int cpuid;
1605 int n;
1606 int loadav;
1607 long load;
1608 long lowest_load;
1609
1610 /*
1611 * When the topology is unknown choose a random cpu that is hopefully
1612 * idle.
1613 */
1614 if (dd->cpunode == NULL)
1615 return (dfly_choose_queue_simple(dd, lp));
1616
1617 loadav = (averunnable.ldavg[0] + FSCALE / 2) >> FSHIFT;
1618
1619 /*
1620 * Pairing mask
1621 */
1622 if ((wakecpu = lp->lwp_thread->td_wakefromcpu) >= 0)
1623 wakemask = dfly_pcpu[wakecpu].cpumask;
1624 else
1625 CPUMASK_ASSZERO(wakemask);
1626
1627 if (usched_dfly_debug == lp->lwp_proc->p_pid)
1628 kprintf("choosebest wakefromcpu %d:\n",
1629 lp->lwp_thread->td_wakefromcpu);
1630
1631 /*
1632 * When the topology is known choose a cpu whos group has, in
1633 * aggregate, has the lowest weighted load.
1634 */
1635 cpup = root_cpu_node;
1636 rdd = dd;
1637
1638 while (cpup) {
1639 /*
1640 * Degenerate case super-root
1641 */
1642 if (cpup->child_no == 1) {
1643 cpup = cpup->child_node[0];
1644 continue;
1645 }
1646
1647 /*
1648 * Terminal cpunode
1649 */
1650 if (cpup->child_no == 0) {
1651 rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1652 if (usched_dfly_debug == lp->lwp_proc->p_pid)
1653 kprintf(" last cpu %d\n", rdd->cpuid);
1654 break;
1655 }
1656
1657 cpub = NULL;
1658 lowest_load = 0x7FFFFFFFFFFFFFFFLL;
1659 if (usched_dfly_debug == lp->lwp_proc->p_pid)
1660 kprintf(" reset lowest_load for scan\n");
1661
1662 for (n = 0; n < cpup->child_no; ++n) {
1663 /*
1664 * Accumulate load information for all cpus
1665 * which are members of this node.
1666 */
1667 int count;
1668
1669 cpun = cpup->child_node[n];
1670 mask = cpun->members;
1671 CPUMASK_ANDMASK(mask, usched_global_cpumask);
1672 CPUMASK_ANDMASK(mask, smp_active_mask);
1673 CPUMASK_ANDMASK(mask, lp->lwp_cpumask);
1674 if (CPUMASK_TESTZERO(mask))
1675 continue;
1676
1677 load = 0;
1678 count = 0;
1679
1680 if (usched_dfly_debug == lp->lwp_proc->p_pid)
1681 kprintf(" mask:");
1682 while (CPUMASK_TESTNZERO(mask)) {
1683 cpuid = BSFCPUMASK(mask);
1684 rdd = &dfly_pcpu[cpuid];
1685
1686 if (usched_dfly_debug == lp->lwp_proc->p_pid)
1687 kprintf(" %d", cpuid);
1688
1689 /*
1690 * Cumulative load for members. Note that
1691 * if (lp) is part of the group, lp's
1692 * contribution will be backed out later.
1693 */
1694 load += rdd->uload;
1695 load += rdd->ucount *
1696 usched_dfly_weight3;
1697
1698 /*
1699 * If the node is running a less important
1700 * thread than our thread, give it an
1701 * advantage. Witha high-enough weighting
1702 * this can override most other considerations
1703 * to provide ultimate priority fairness at
1704 * the cost of localization.
1705 */
1706 if ((rdd->upri & ~PPQMASK) >
1707 (lp->lwp_priority & ~PPQMASK)) {
1708 load -= usched_dfly_weight4;
1709 }
1710
1711 #if 0
1712 if (rdd->uschedcp == NULL &&
1713 rdd->runqcount == 0 &&
1714 rdd->gd->gd_tdrunqcount == 0
1715 ) {
1716 load += rdd->uload / 2;
1717 load += rdd->ucount *
1718 usched_dfly_weight3 / 2;
1719 } else {
1720 load += rdd->uload;
1721 load += rdd->ucount *
1722 usched_dfly_weight3;
1723 }
1724 #endif
1725 CPUMASK_NANDBIT(mask, cpuid);
1726 ++count;
1727 }
1728
1729 /*
1730 * Compensate if the lp is already accounted for in
1731 * the aggregate uload for this mask set. We want
1732 * to calculate the loads as if lp were not present,
1733 * otherwise the calculation is bogus.
1734 */
1735 if ((lp->lwp_mpflags & LWP_MP_ULOAD) &&
1736 CPUMASK_TESTMASK(dd->cpumask, cpun->members)) {
1737 load -= lp->lwp_uload;
1738 load -= usched_dfly_weight3; /* ucount */
1739 }
1740
1741 if (usched_dfly_debug == lp->lwp_proc->p_pid)
1742 kprintf("\n accum_start c=%d ld=%ld "
1743 "cpu=%d ld/cnt=%ld ",
1744 count, load, rdd->cpuid,
1745 load / count);
1746
1747 /*
1748 * load is the aggregate load of count CPUs in the
1749 * group. For the weightings to work as intended,
1750 * we want an average per-cpu load.
1751 */
1752 load = load / count;
1753
1754 /*
1755 * Advantage the cpu group (lp) is already on.
1756 */
1757 if (CPUMASK_TESTMASK(cpun->members, dd->cpumask))
1758 load -= usched_dfly_weight1;
1759
1760 if (usched_dfly_debug == lp->lwp_proc->p_pid)
1761 kprintf("B:%ld ", load);
1762
1763 /*
1764 * Advantage nodes with more memory
1765 */
1766 if (usched_dfly_node_mem) {
1767 load -= cpun->phys_mem * usched_dfly_weight5 /
1768 usched_dfly_node_mem;
1769 }
1770
1771 if (usched_dfly_debug == lp->lwp_proc->p_pid)
1772 kprintf("C:%ld ", load);
1773
1774 /*
1775 * Advantage the cpu group we desire to pair (lp)
1776 * to, but Disadvantage hyperthreads on the same
1777 * core, or the same thread as the ipc peer.
1778 *
1779 * Under very heavy loads it is usually beneficial
1780 * to set kern.usched_dfly.ipc_smt to 1, and under
1781 * extreme loads it might be beneficial to also set
1782 * kern.usched_dfly.ipc_same to 1.
1783 *
1784 * load+ disadvantage
1785 * load- advantage
1786 */
1787 if (CPUMASK_TESTMASK(cpun->members, wakemask)) {
1788 if (cpun->child_no) {
1789 if (cpun->type == CORE_LEVEL &&
1790 usched_dfly_ipc_smt < 0 &&
1791 loadav >= (ncpus >> 1)) {
1792 /*
1793 * Advantage at higher levels
1794 * of the topology.
1795 */
1796 load -= usched_dfly_weight2;
1797 } else if (cpun->type == CORE_LEVEL &&
1798 usched_dfly_ipc_smt == 0) {
1799 /*
1800 * Disadvantage the same core
1801 * when there are hyperthreads.
1802 */
1803 load += usched_dfly_weight2;
1804 } else {
1805 /*
1806 * Advantage at higher levels
1807 * of the topology.
1808 */
1809 load -= usched_dfly_weight2;
1810 }
1811 } else {
1812 /*
1813 * Disadvantage the last level (core
1814 * or hyperthread). Try to schedule
1815 * the ipc
1816 */
1817 if (usched_dfly_ipc_same < 0 &&
1818 loadav >= ncpus) {
1819 load -= usched_dfly_weight2;
1820 } else if (usched_dfly_ipc_same) {
1821 load -= usched_dfly_weight2;
1822 } else {
1823 load += usched_dfly_weight2;
1824 }
1825 }
1826 #if 0
1827 if (cpun->child_no != 0) {
1828 /* advantage */
1829 load -= usched_dfly_weight2;
1830 } else {
1831 /*
1832 * 0x10 (disadvantage)
1833 * 0x00 (advantage) - default
1834 */
1835 if (usched_dfly_features & 0x10)
1836 load += usched_dfly_weight2;
1837 else
1838 load -= usched_dfly_weight2;
1839 }
1840 #endif
1841 }
1842
1843 if (usched_dfly_debug == lp->lwp_proc->p_pid)
1844 kprintf("D:%ld ", load);
1845
1846 /*
1847 * Calculate the best load
1848 */
1849 if (cpub == NULL || lowest_load > load ||
1850 (lowest_load == load &&
1851 CPUMASK_TESTMASK(cpun->members, dd->cpumask))
1852 ) {
1853 lowest_load = load;
1854 cpub = cpun;
1855 }
1856
1857 if (usched_dfly_debug == lp->lwp_proc->p_pid)
1858 kprintf("low=%ld]\n", lowest_load);
1859 }
1860 cpup = cpub;
1861 }
1862 /* Dispatch this outcast to a proper CPU. */
1863 if (__predict_false(CPUMASK_TESTBIT(lp->lwp_cpumask, rdd->cpuid) == 0))
1864 rdd = &dfly_pcpu[BSFCPUMASK(lp->lwp_cpumask)];
1865 if (usched_dfly_chooser > 0) {
1866 --usched_dfly_chooser; /* only N lines */
1867 kprintf("lp %02d->%02d %s\n",
1868 lp->lwp_qcpu, rdd->cpuid, lp->lwp_proc->p_comm);
1869 }
1870 if (usched_dfly_debug == lp->lwp_proc->p_pid)
1871 kprintf("final cpu %d\n", rdd->cpuid);
1872 return (rdd);
1873 }
1874
1875 /*
1876 * USED TO PULL RUNNABLE LWPS FROM THE MOST LOADED CPU.
1877 *
1878 * Choose the worst queue close to dd's cpu node with a non-empty runq
1879 * that is NOT dd.
1880 *
1881 * This is used by the thread chooser when the current cpu's queues are
1882 * empty to steal a thread from another cpu's queue. We want to offload
1883 * the most heavily-loaded queue.
1884 *
1885 * However, we do not want to steal from far-away nodes who themselves
1886 * have idle cpu's that are more suitable to distribute the far-away
1887 * thread to.
1888 */
1889 static
1890 dfly_pcpu_t
dfly_choose_worst_queue(dfly_pcpu_t dd,int forceit)1891 dfly_choose_worst_queue(dfly_pcpu_t dd, int forceit)
1892 {
1893 cpumask_t mask;
1894 cpu_node_t *cpup;
1895 cpu_node_t *cpun;
1896 cpu_node_t *cpub;
1897 dfly_pcpu_t rdd;
1898 int cpuid;
1899 int n;
1900 int highest_runqcount;
1901 long load;
1902 long highest_load;
1903 #if 0
1904 int pri;
1905 int hpri;
1906 #endif
1907
1908 /*
1909 * When the topology is unknown choose a random cpu that is hopefully
1910 * idle.
1911 */
1912 if (dd->cpunode == NULL) {
1913 return (NULL);
1914 }
1915
1916 /*
1917 * When the topology is known choose a cpu whos group has, in
1918 * aggregate, has the highest weighted load.
1919 */
1920 cpup = root_cpu_node;
1921 rdd = dd;
1922 while (cpup) {
1923 /*
1924 * Degenerate case super-root
1925 */
1926 if (cpup->child_no == 1) {
1927 cpup = cpup->child_node[0];
1928 continue;
1929 }
1930
1931 /*
1932 * Terminal cpunode
1933 */
1934 if (cpup->child_no == 0) {
1935 rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1936 break;
1937 }
1938
1939 cpub = NULL;
1940 highest_load = -0x7FFFFFFFFFFFFFFFLL;
1941
1942 for (n = 0; n < cpup->child_no; ++n) {
1943 /*
1944 * Accumulate load information for all cpus
1945 * which are members of this node.
1946 */
1947 int count;
1948 int runqcount;
1949
1950 cpun = cpup->child_node[n];
1951 mask = cpun->members;
1952 CPUMASK_ANDMASK(mask, usched_global_cpumask);
1953 CPUMASK_ANDMASK(mask, smp_active_mask);
1954 if (CPUMASK_TESTZERO(mask))
1955 continue;
1956
1957 load = 0;
1958 count = 0;
1959 runqcount = 0;
1960
1961 while (CPUMASK_TESTNZERO(mask)) {
1962 cpuid = BSFCPUMASK(mask);
1963 rdd = &dfly_pcpu[cpuid];
1964
1965 load += rdd->uload;
1966 load += rdd->ucount * usched_dfly_weight3;
1967
1968 #if 0
1969 if (rdd->uschedcp == NULL &&
1970 rdd->runqcount == 0 &&
1971 rdd->gd->gd_tdrunqcount == 0
1972 ) {
1973 load += rdd->uload / 2;
1974 load += rdd->ucount *
1975 usched_dfly_weight3 / 2;
1976 } else {
1977 load += rdd->uload;
1978 load += rdd->ucount *
1979 usched_dfly_weight3;
1980 }
1981 #endif
1982 CPUMASK_NANDBIT(mask, cpuid);
1983 ++count;
1984 runqcount += rdd->runqcount;
1985 }
1986 load /= count;
1987
1988 /*
1989 * Advantage the cpu group (dd) is already on.
1990 *
1991 * When choosing the worst queue we reverse the
1992 * sign, but only count half the weight.
1993 *
1994 * weight1 needs to be high enough to be stable,
1995 * but this can also cause it to be too sticky,
1996 * so the iterator which rebalances the load sets
1997 * forceit to ignore it.
1998 */
1999 if (forceit == 0 &&
2000 CPUMASK_TESTMASK(dd->cpumask, cpun->members)) {
2001 load += usched_dfly_weight1 / 2;
2002 }
2003
2004 /*
2005 * Disadvantage nodes with more memory (same sign).
2006 */
2007 if (usched_dfly_node_mem) {
2008 load -= cpun->phys_mem * usched_dfly_weight5 /
2009 usched_dfly_node_mem;
2010 }
2011
2012
2013 /*
2014 * The best candidate is the one with the worst
2015 * (highest) load, as long as it also has processes
2016 * on the run queue (verses running one and nothing
2017 * on the run queue).
2018 */
2019 if (cpub == NULL ||
2020 (runqcount && (highest_load < load ||
2021 (highest_load == load &&
2022 CPUMASK_TESTMASK(cpun->members,
2023 dd->cpumask)))) ||
2024 (runqcount && highest_runqcount < runqcount + 1)) {
2025 highest_load = load;
2026 highest_runqcount = runqcount;
2027 cpub = cpun;
2028 }
2029 }
2030 cpup = cpub;
2031 }
2032
2033 /*
2034 * We never return our own node (dd), and only return a remote
2035 * node if it's load is significantly worse than ours (i.e. where
2036 * stealing a thread would be considered reasonable).
2037 *
2038 * This also helps us avoid breaking paired threads apart which
2039 * can have disastrous effects on performance.
2040 */
2041 if (rdd == dd)
2042 return(NULL);
2043
2044 #if 0
2045 hpri = 0;
2046 if (rdd->rtqueuebits && hpri < (pri = bsrl(rdd->rtqueuebits)))
2047 hpri = pri;
2048 if (rdd->queuebits && hpri < (pri = bsrl(rdd->queuebits)))
2049 hpri = pri;
2050 if (rdd->idqueuebits && hpri < (pri = bsrl(rdd->idqueuebits)))
2051 hpri = pri;
2052 hpri *= PPQ;
2053 if (rdd->uload - hpri < dd->uload + hpri)
2054 return(NULL);
2055 #endif
2056 return (rdd);
2057 }
2058
2059 static
2060 dfly_pcpu_t
dfly_choose_queue_simple(dfly_pcpu_t dd,struct lwp * lp)2061 dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp)
2062 {
2063 dfly_pcpu_t rdd;
2064 cpumask_t tmpmask;
2065 cpumask_t mask;
2066 int cpubase;
2067 int cpuid;
2068
2069 /*
2070 * Fallback to the original heuristic, select random cpu,
2071 * first checking the cpus not currently running a user thread.
2072 *
2073 * Use cpuid as the base cpu in our scan, first checking
2074 * cpuid...(ncpus-1), then 0...(cpuid-1). This avoid favoring
2075 * lower-numbered cpus.
2076 */
2077 ++dd->scancpu; /* SMP race ok */
2078 mask = dfly_rdyprocmask;
2079 CPUMASK_NANDMASK(mask, dfly_curprocmask);
2080 CPUMASK_ANDMASK(mask, lp->lwp_cpumask);
2081 CPUMASK_ANDMASK(mask, smp_active_mask);
2082 CPUMASK_ANDMASK(mask, usched_global_cpumask);
2083
2084 cpubase = (int)(dd->scancpu % ncpus);
2085 CPUMASK_ASSBMASK(tmpmask, cpubase);
2086 CPUMASK_INVMASK(tmpmask);
2087 CPUMASK_ANDMASK(tmpmask, mask);
2088 while (CPUMASK_TESTNZERO(tmpmask)) {
2089 cpuid = BSFCPUMASK(tmpmask);
2090 rdd = &dfly_pcpu[cpuid];
2091
2092 if ((rdd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK))
2093 goto found;
2094 CPUMASK_NANDBIT(tmpmask, cpuid);
2095 }
2096
2097 CPUMASK_ASSBMASK(tmpmask, cpubase);
2098 CPUMASK_ANDMASK(tmpmask, mask);
2099 while (CPUMASK_TESTNZERO(tmpmask)) {
2100 cpuid = BSFCPUMASK(tmpmask);
2101 rdd = &dfly_pcpu[cpuid];
2102
2103 if ((rdd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK))
2104 goto found;
2105 CPUMASK_NANDBIT(tmpmask, cpuid);
2106 }
2107
2108 /*
2109 * Then cpus which might have a currently running lp
2110 */
2111 mask = dfly_rdyprocmask;
2112 CPUMASK_ANDMASK(mask, dfly_curprocmask);
2113 CPUMASK_ANDMASK(mask, lp->lwp_cpumask);
2114 CPUMASK_ANDMASK(mask, smp_active_mask);
2115 CPUMASK_ANDMASK(mask, usched_global_cpumask);
2116
2117 CPUMASK_ASSBMASK(tmpmask, cpubase);
2118 CPUMASK_INVMASK(tmpmask);
2119 CPUMASK_ANDMASK(tmpmask, mask);
2120 while (CPUMASK_TESTNZERO(tmpmask)) {
2121 cpuid = BSFCPUMASK(tmpmask);
2122 rdd = &dfly_pcpu[cpuid];
2123
2124 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
2125 goto found;
2126 CPUMASK_NANDBIT(tmpmask, cpuid);
2127 }
2128
2129 CPUMASK_ASSBMASK(tmpmask, cpubase);
2130 CPUMASK_ANDMASK(tmpmask, mask);
2131 while (CPUMASK_TESTNZERO(tmpmask)) {
2132 cpuid = BSFCPUMASK(tmpmask);
2133 rdd = &dfly_pcpu[cpuid];
2134
2135 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
2136 goto found;
2137 CPUMASK_NANDBIT(tmpmask, cpuid);
2138 }
2139
2140 /*
2141 * If we cannot find a suitable cpu we round-robin using scancpu.
2142 * Other cpus will pickup as they release their current lwps or
2143 * become ready.
2144 *
2145 * Avoid a degenerate system lockup case if usched_global_cpumask
2146 * is set to 0 or otherwise does not cover lwp_cpumask.
2147 *
2148 * We only kick the target helper thread in this case, we do not
2149 * set the user resched flag because
2150 */
2151 cpuid = cpubase;
2152 if (CPUMASK_TESTBIT(lp->lwp_cpumask, cpuid) == 0)
2153 cpuid = BSFCPUMASK(lp->lwp_cpumask);
2154 else if (CPUMASK_TESTBIT(usched_global_cpumask, cpuid) == 0)
2155 cpuid = 0;
2156 rdd = &dfly_pcpu[cpuid];
2157 found:
2158 return (rdd);
2159 }
2160
2161 static
2162 void
dfly_need_user_resched_remote(void * dummy)2163 dfly_need_user_resched_remote(void *dummy)
2164 {
2165 globaldata_t gd = mycpu;
2166 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
2167
2168 /*
2169 * Flag reschedule needed
2170 */
2171 need_user_resched();
2172
2173 /*
2174 * If no user thread is currently running we need to kick the helper
2175 * on our cpu to recover. Otherwise the cpu will never schedule
2176 * anything again.
2177 *
2178 * We cannot schedule the process ourselves because this is an
2179 * IPI callback and we cannot acquire spinlocks in an IPI callback.
2180 *
2181 * Call wakeup_mycpu to avoid sending IPIs to other CPUs
2182 */
2183 if (dd->uschedcp == NULL && (dd->flags & DFLY_PCPU_RDYMASK)) {
2184 ATOMIC_CPUMASK_NANDBIT(dfly_rdyprocmask, gd->gd_cpuid);
2185 dd->flags &= ~DFLY_PCPU_RDYMASK;
2186 wakeup_mycpu(dd->helper_thread);
2187 }
2188 }
2189
2190 /*
2191 * dfly_remrunqueue_locked() removes a given process from the run queue
2192 * that it is on, clearing the queue busy bit if it becomes empty.
2193 *
2194 * Note that user process scheduler is different from the LWKT schedule.
2195 * The user process scheduler only manages user processes but it uses LWKT
2196 * underneath, and a user process operating in the kernel will often be
2197 * 'released' from our management.
2198 *
2199 * uload is NOT adjusted here. It is only adjusted if the lwkt_thread goes
2200 * to sleep or the lwp is moved to a different runq.
2201 */
2202 static void
dfly_remrunqueue_locked(dfly_pcpu_t rdd,struct lwp * lp)2203 dfly_remrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
2204 {
2205 struct rq *q;
2206 u_int32_t *which;
2207 u_int8_t pri;
2208
2209 KKASSERT(rdd->runqcount >= 0);
2210
2211 pri = lp->lwp_rqindex;
2212
2213 switch(lp->lwp_rqtype) {
2214 case RTP_PRIO_NORMAL:
2215 q = &rdd->queues[pri];
2216 which = &rdd->queuebits;
2217 break;
2218 case RTP_PRIO_REALTIME:
2219 case RTP_PRIO_FIFO:
2220 q = &rdd->rtqueues[pri];
2221 which = &rdd->rtqueuebits;
2222 break;
2223 case RTP_PRIO_IDLE:
2224 q = &rdd->idqueues[pri];
2225 which = &rdd->idqueuebits;
2226 break;
2227 default:
2228 panic("remrunqueue: invalid rtprio type");
2229 /* NOT REACHED */
2230 }
2231 KKASSERT(lp->lwp_mpflags & LWP_MP_ONRUNQ);
2232 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
2233 TAILQ_REMOVE(q, lp, lwp_procq);
2234 --rdd->runqcount;
2235 if (TAILQ_EMPTY(q)) {
2236 KASSERT((*which & (1 << pri)) != 0,
2237 ("remrunqueue: remove from empty queue"));
2238 *which &= ~(1 << pri);
2239 }
2240 }
2241
2242 /*
2243 * dfly_setrunqueue_locked()
2244 *
2245 * Add a process whos rqtype and rqindex had previously been calculated
2246 * onto the appropriate run queue. Determine if the addition requires
2247 * a reschedule on a cpu and return the cpuid or -1.
2248 *
2249 * NOTE: Lower priorities are better priorities.
2250 *
2251 * NOTE ON ULOAD: This variable specifies the aggregate load on a cpu, the
2252 * sum of the rough lwp_priority for all running and runnable
2253 * processes. Lower priority processes (higher lwp_priority
2254 * values) actually DO count as more load, not less, because
2255 * these are the programs which require the most care with
2256 * regards to cpu selection.
2257 */
2258 static void
dfly_setrunqueue_locked(dfly_pcpu_t rdd,struct lwp * lp)2259 dfly_setrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
2260 {
2261 u_int32_t *which;
2262 struct rq *q;
2263 int pri;
2264
2265 KKASSERT(lp->lwp_qcpu == rdd->cpuid);
2266
2267 spin_lock(&lp->lwp_spin);
2268 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
2269 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
2270 atomic_add_long(&rdd->uload, lp->lwp_uload);
2271 atomic_add_int(&rdd->ucount, 1);
2272 }
2273 spin_unlock(&lp->lwp_spin);
2274
2275 pri = lp->lwp_rqindex;
2276
2277 switch(lp->lwp_rqtype) {
2278 case RTP_PRIO_NORMAL:
2279 q = &rdd->queues[pri];
2280 which = &rdd->queuebits;
2281 break;
2282 case RTP_PRIO_REALTIME:
2283 case RTP_PRIO_FIFO:
2284 q = &rdd->rtqueues[pri];
2285 which = &rdd->rtqueuebits;
2286 break;
2287 case RTP_PRIO_IDLE:
2288 q = &rdd->idqueues[pri];
2289 which = &rdd->idqueuebits;
2290 break;
2291 default:
2292 panic("remrunqueue: invalid rtprio type");
2293 /* NOT REACHED */
2294 }
2295
2296 /*
2297 * Place us on the selected queue. Determine if we should be
2298 * placed at the head of the queue or at the end.
2299 *
2300 * We are placed at the tail if our round-robin count has expired,
2301 * or is about to expire and the system thinks its a good place to
2302 * round-robin, or there is already a next thread on the queue
2303 * (it might be trying to pick up where it left off and we don't
2304 * want to interfere).
2305 */
2306 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
2307 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
2308 ++rdd->runqcount;
2309
2310 if (lp->lwp_rrcount >= usched_dfly_rrinterval ||
2311 (lp->lwp_rrcount >= usched_dfly_rrinterval / 2 &&
2312 (lp->lwp_thread->td_mpflags & TDF_MP_BATCH_DEMARC))
2313 ) {
2314 /*
2315 * Place on tail
2316 */
2317 atomic_clear_int(&lp->lwp_thread->td_mpflags,
2318 TDF_MP_BATCH_DEMARC);
2319 lp->lwp_rrcount = 0;
2320 TAILQ_INSERT_TAIL(q, lp, lwp_procq);
2321 } else {
2322 /*
2323 * Retain rrcount and place on head. Count is retained
2324 * even if the queue is empty.
2325 */
2326 TAILQ_INSERT_HEAD(q, lp, lwp_procq);
2327 }
2328 *which |= 1 << pri;
2329 }
2330
2331 /*
2332 * For SMP systems a user scheduler helper thread is created for each
2333 * cpu and is used to allow one cpu to wakeup another for the purposes of
2334 * scheduling userland threads from setrunqueue().
2335 *
2336 * UP systems do not need the helper since there is only one cpu.
2337 *
2338 * We can't use the idle thread for this because we might block.
2339 * Additionally, doing things this way allows us to HLT idle cpus
2340 * on MP systems.
2341 */
2342 static void
dfly_helper_thread(void * dummy)2343 dfly_helper_thread(void *dummy)
2344 {
2345 globaldata_t gd;
2346 dfly_pcpu_t dd;
2347 dfly_pcpu_t rdd;
2348 struct lwp *nlp;
2349 cpumask_t mask;
2350 int sleepok;
2351 int cpuid;
2352
2353 gd = mycpu;
2354 cpuid = gd->gd_cpuid; /* doesn't change */
2355 mask = gd->gd_cpumask; /* doesn't change */
2356 dd = &dfly_pcpu[cpuid];
2357
2358 /*
2359 * Initial interlock, make sure all dfly_pcpu[] structures have
2360 * been initialized before proceeding.
2361 */
2362 lockmgr(&usched_dfly_config_lk, LK_SHARED);
2363 lockmgr(&usched_dfly_config_lk, LK_RELEASE);
2364
2365 /*
2366 * Since we only want to be woken up only when no user processes
2367 * are scheduled on a cpu, run at an ultra low priority.
2368 */
2369 lwkt_setpri_self(TDPRI_USER_SCHEDULER);
2370
2371 for (;;) {
2372 /*
2373 * We use the LWKT deschedule-interlock trick to avoid racing
2374 * dfly_rdyprocmask. This means we cannot block through to the
2375 * manual lwkt_switch() call we make below.
2376 */
2377 sleepok = 1;
2378 crit_enter_gd(gd);
2379 tsleep_interlock(dd->helper_thread, 0);
2380
2381 spin_lock(&dd->spin);
2382 if ((dd->flags & DFLY_PCPU_RDYMASK) == 0) {
2383 ATOMIC_CPUMASK_ORMASK(dfly_rdyprocmask, mask);
2384 dd->flags |= DFLY_PCPU_RDYMASK;
2385 }
2386 clear_user_resched(); /* This satisfied the reschedule request */
2387 #if 0
2388 dd->rrcount = 0; /* Reset the round-robin counter */
2389 #endif
2390
2391 if (dd->runqcount || dd->uschedcp != NULL) {
2392 /*
2393 * Threads are available. A thread may or may not be
2394 * currently scheduled. Get the best thread already queued
2395 * to this cpu.
2396 */
2397 nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
2398 if (nlp) {
2399 if ((dd->flags & DFLY_PCPU_CURMASK) == 0) {
2400 ATOMIC_CPUMASK_ORMASK(dfly_curprocmask, mask);
2401 dd->flags |= DFLY_PCPU_CURMASK;
2402 }
2403 dd->upri = nlp->lwp_priority;
2404 dd->uschedcp = nlp;
2405 #if 0
2406 dd->rrcount = 0; /* reset round robin */
2407 #endif
2408 spin_unlock(&dd->spin);
2409 lwkt_acquire(nlp->lwp_thread);
2410 lwkt_schedule(nlp->lwp_thread);
2411 } else {
2412 /*
2413 * This situation should not occur because we had
2414 * at least one thread available.
2415 */
2416 spin_unlock(&dd->spin);
2417 }
2418 } else if (usched_dfly_features & 0x01) {
2419 /*
2420 * This cpu is devoid of runnable threads, steal a thread
2421 * from another nearby cpu that is both running something
2422 * and has runnable threads queued. Since we're stealing,
2423 * we might as well load balance at the same time.
2424 *
2425 * We choose the worst thread from the worst queue. This
2426 * can be a bit problematic if the worst queue intends to
2427 * run the thread we choose,
2428 *
2429 * NOTE! This function only returns a non-NULL rdd when
2430 * another cpu's queue is obviously overloaded. We
2431 * do not want to perform the type of rebalancing
2432 * the schedclock does here because it would result
2433 * in insane process pulling when 'steady' state is
2434 * partially unbalanced (e.g. 6 runnables and only
2435 * 4 cores).
2436 */
2437 rdd = dfly_choose_worst_queue(dd, 0);
2438 if (rdd && dd->uload + usched_dfly_weight7 < rdd->uload) {
2439 if (rdd->uschedcp && spin_trylock(&rdd->spin)) {
2440 nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
2441 spin_unlock(&rdd->spin);
2442 } else {
2443 nlp = NULL;
2444 }
2445 } else {
2446 nlp = NULL;
2447 }
2448 if (nlp) {
2449 if ((dd->flags & DFLY_PCPU_CURMASK) == 0) {
2450 ATOMIC_CPUMASK_ORMASK(dfly_curprocmask, mask);
2451 dd->flags |= DFLY_PCPU_CURMASK;
2452 }
2453 dd->upri = nlp->lwp_priority;
2454 dd->uschedcp = nlp;
2455 #if 0
2456 dd->rrcount = 0; /* reset round robin */
2457 #endif
2458 spin_unlock(&dd->spin);
2459 lwkt_acquire(nlp->lwp_thread);
2460 lwkt_schedule(nlp->lwp_thread);
2461 } else {
2462 /*
2463 * Leave the thread on our run queue. Another
2464 * scheduler will try to pull it later.
2465 */
2466 spin_unlock(&dd->spin);
2467 }
2468 } else {
2469 /*
2470 * devoid of runnable threads and not allowed to steal
2471 * any.
2472 */
2473 spin_unlock(&dd->spin);
2474 }
2475
2476 /*
2477 * We're descheduled unless someone scheduled us. Switch away.
2478 * Exiting the critical section will cause splz() to be called
2479 * for us if interrupts and such are pending.
2480 */
2481 crit_exit_gd(gd);
2482 if (sleepok) {
2483 tsleep(dd->helper_thread, PINTERLOCKED, "schslp",
2484 usched_dfly_poll_ticks);
2485 }
2486 }
2487 }
2488
2489 #if 0
2490 static int
2491 sysctl_usched_dfly_stick_to_level(SYSCTL_HANDLER_ARGS)
2492 {
2493 int error, new_val;
2494
2495 new_val = usched_dfly_stick_to_level;
2496
2497 error = sysctl_handle_int(oidp, &new_val, 0, req);
2498 if (error != 0 || req->newptr == NULL)
2499 return (error);
2500 if (new_val > cpu_topology_levels_number - 1 || new_val < 0)
2501 return (EINVAL);
2502 usched_dfly_stick_to_level = new_val;
2503 return (0);
2504 }
2505 #endif
2506
2507 /*
2508 * Setup the queues and scheduler helpers (scheduler helpers are SMP only).
2509 * Note that curprocmask bit 0 has already been cleared by rqinit() and
2510 * we should not mess with it further.
2511 */
2512 static void
usched_dfly_cpu_init(void)2513 usched_dfly_cpu_init(void)
2514 {
2515 int i;
2516 int j;
2517 int smt_not_supported = 0;
2518 int cache_coherent_not_supported = 0;
2519
2520 if (bootverbose)
2521 kprintf("Start usched_dfly helpers on cpus:\n");
2522
2523 sysctl_ctx_init(&usched_dfly_sysctl_ctx);
2524 usched_dfly_sysctl_tree =
2525 SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
2526 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
2527 "usched_dfly", CTLFLAG_RD, 0, "");
2528
2529 usched_dfly_node_mem = get_highest_node_memory();
2530
2531 lockmgr(&usched_dfly_config_lk, LK_EXCLUSIVE);
2532
2533 for (i = 0; i < ncpus; ++i) {
2534 dfly_pcpu_t dd = &dfly_pcpu[i];
2535 cpumask_t mask;
2536
2537 CPUMASK_ASSBIT(mask, i);
2538 if (CPUMASK_TESTMASK(mask, smp_active_mask) == 0)
2539 continue;
2540
2541 spin_init(&dd->spin, "uschedcpuinit");
2542 dd->cpunode = get_cpu_node_by_cpuid(i);
2543 dd->cpuid = i;
2544 dd->gd = globaldata_find(i);
2545 CPUMASK_ASSBIT(dd->cpumask, i);
2546 for (j = 0; j < NQS; j++) {
2547 TAILQ_INIT(&dd->queues[j]);
2548 TAILQ_INIT(&dd->rtqueues[j]);
2549 TAILQ_INIT(&dd->idqueues[j]);
2550 }
2551 ATOMIC_CPUMASK_NANDBIT(dfly_curprocmask, 0);
2552 if (i == 0)
2553 dd->flags &= ~DFLY_PCPU_CURMASK;
2554
2555 if (dd->cpunode == NULL) {
2556 smt_not_supported = 1;
2557 cache_coherent_not_supported = 1;
2558 if (bootverbose)
2559 kprintf (" cpu%d - WARNING: No CPU NODE "
2560 "found for cpu\n", i);
2561 } else {
2562 switch (dd->cpunode->type) {
2563 case THREAD_LEVEL:
2564 if (bootverbose)
2565 kprintf (" cpu%d - HyperThreading "
2566 "available. Core siblings: ",
2567 i);
2568 break;
2569 case CORE_LEVEL:
2570 smt_not_supported = 1;
2571
2572 if (bootverbose)
2573 kprintf (" cpu%d - No HT available, "
2574 "multi-core/physical "
2575 "cpu. Physical siblings: ",
2576 i);
2577 break;
2578 case CHIP_LEVEL:
2579 smt_not_supported = 1;
2580
2581 if (bootverbose)
2582 kprintf (" cpu%d - No HT available, "
2583 "single-core/physical cpu. "
2584 "Package siblings: ",
2585 i);
2586 break;
2587 default:
2588 /* Let's go for safe defaults here */
2589 smt_not_supported = 1;
2590 cache_coherent_not_supported = 1;
2591 if (bootverbose)
2592 kprintf (" cpu%d - Unknown cpunode->"
2593 "type=%u. siblings: ",
2594 i,
2595 (u_int)dd->cpunode->type);
2596 break;
2597 }
2598
2599 if (bootverbose) {
2600 if (dd->cpunode->parent_node != NULL) {
2601 kprint_cpuset(&dd->cpunode->
2602 parent_node->members);
2603 kprintf("\n");
2604 } else {
2605 kprintf(" no siblings\n");
2606 }
2607 }
2608 }
2609
2610 lwkt_create(dfly_helper_thread, NULL, &dd->helper_thread, NULL,
2611 0, i, "usched %d", i);
2612
2613 /*
2614 * Allow user scheduling on the target cpu. cpu #0 has already
2615 * been enabled in rqinit().
2616 */
2617 if (i) {
2618 ATOMIC_CPUMASK_NANDMASK(dfly_curprocmask, mask);
2619 dd->flags &= ~DFLY_PCPU_CURMASK;
2620 }
2621 if ((dd->flags & DFLY_PCPU_RDYMASK) == 0) {
2622 ATOMIC_CPUMASK_ORMASK(dfly_rdyprocmask, mask);
2623 dd->flags |= DFLY_PCPU_RDYMASK;
2624 }
2625 dd->upri = PRIBASE_NULL;
2626
2627 }
2628
2629 /* usched_dfly sysctl configurable parameters */
2630
2631 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2632 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2633 OID_AUTO, "rrinterval", CTLFLAG_RW,
2634 &usched_dfly_rrinterval, 0, "");
2635 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2636 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2637 OID_AUTO, "decay", CTLFLAG_RW,
2638 &usched_dfly_decay, 0, "Extra decay when not running");
2639 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2640 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2641 OID_AUTO, "ipc_smt", CTLFLAG_RW,
2642 &usched_dfly_ipc_smt, 0, "Pair IPC on hyper-threads");
2643 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2644 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2645 OID_AUTO, "ipc_same", CTLFLAG_RW,
2646 &usched_dfly_ipc_same, 0, "Pair IPC on same thread");
2647 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2648 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2649 OID_AUTO, "poll_ticks", CTLFLAG_RW,
2650 &usched_dfly_poll_ticks, 0, "Poll for work (0 ok)");
2651
2652 /* Add enable/disable option for SMT scheduling if supported */
2653 if (smt_not_supported) {
2654 usched_dfly_smt = 0;
2655 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2656 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2657 OID_AUTO, "smt", CTLFLAG_RD,
2658 "NOT SUPPORTED", 0, "SMT NOT SUPPORTED");
2659 } else {
2660 usched_dfly_smt = 1;
2661 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2662 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2663 OID_AUTO, "smt", CTLFLAG_RW,
2664 &usched_dfly_smt, 0, "Enable SMT scheduling");
2665 }
2666
2667 /*
2668 * Add enable/disable option for cache coherent scheduling
2669 * if supported
2670 */
2671 if (cache_coherent_not_supported) {
2672 usched_dfly_cache_coherent = 0;
2673 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2674 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2675 OID_AUTO, "cache_coherent", CTLFLAG_RD,
2676 "NOT SUPPORTED", 0,
2677 "Cache coherence NOT SUPPORTED");
2678 } else {
2679 usched_dfly_cache_coherent = 1;
2680 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2681 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2682 OID_AUTO, "cache_coherent", CTLFLAG_RW,
2683 &usched_dfly_cache_coherent, 0,
2684 "Enable/Disable cache coherent scheduling");
2685
2686 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2687 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2688 OID_AUTO, "weight1", CTLFLAG_RW,
2689 &usched_dfly_weight1, 200,
2690 "Weight selection for current cpu");
2691
2692 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2693 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2694 OID_AUTO, "weight2", CTLFLAG_RW,
2695 &usched_dfly_weight2, 180,
2696 "Weight selection for wakefrom cpu");
2697
2698 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2699 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2700 OID_AUTO, "weight3", CTLFLAG_RW,
2701 &usched_dfly_weight3, 40,
2702 "Weight selection for num threads on queue");
2703
2704 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2705 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2706 OID_AUTO, "weight4", CTLFLAG_RW,
2707 &usched_dfly_weight4, 160,
2708 "Availability of other idle cpus");
2709
2710 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2711 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2712 OID_AUTO, "weight5", CTLFLAG_RW,
2713 &usched_dfly_weight5, 50,
2714 "Memory attached to node");
2715
2716 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2717 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2718 OID_AUTO, "weight6", CTLFLAG_RW,
2719 &usched_dfly_weight6, 150,
2720 "Transfer weight Feat 0x04");
2721
2722 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2723 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2724 OID_AUTO, "weight7", CTLFLAG_RW,
2725 &usched_dfly_weight7, -100,
2726 "Transfer weight Feat 0x01");
2727
2728 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2729 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2730 OID_AUTO, "fast_resched", CTLFLAG_RW,
2731 &usched_dfly_fast_resched, 0,
2732 "Availability of other idle cpus");
2733
2734 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2735 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2736 OID_AUTO, "features", CTLFLAG_RW,
2737 &usched_dfly_features, 0x8F,
2738 "Allow pulls into empty queues");
2739
2740 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2741 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2742 OID_AUTO, "swmask", CTLFLAG_RW,
2743 &usched_dfly_swmask, ~PPQMASK,
2744 "Queue mask to force thread switch");
2745
2746 #if 0
2747 SYSCTL_ADD_PROC(&usched_dfly_sysctl_ctx,
2748 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2749 OID_AUTO, "stick_to_level",
2750 CTLTYPE_INT | CTLFLAG_RW,
2751 NULL, sizeof usched_dfly_stick_to_level,
2752 sysctl_usched_dfly_stick_to_level, "I",
2753 "Stick a process to this level. See sysctl"
2754 "paremter hw.cpu_topology.level_description");
2755 #endif
2756 }
2757 lockmgr(&usched_dfly_config_lk, LK_RELEASE);
2758 }
2759
2760 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2761 usched_dfly_cpu_init, NULL);
2762