xref: /dragonfly/sys/kern/usched_dfly.c (revision 62dc643e)
1 /*
2  * Copyright (c) 2012-2017 The DragonFly Project.  All rights reserved.
3  * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>.  All rights reserved.
4  *
5  * This code is derived from software contributed to The DragonFly Project
6  * by Matthew Dillon <dillon@backplane.com>,
7  * by Mihai Carabas <mihai.carabas@gmail.com>
8  * and many others.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in
18  *    the documentation and/or other materials provided with the
19  *    distribution.
20  * 3. Neither the name of The DragonFly Project nor the names of its
21  *    contributors may be used to endorse or promote products derived
22  *    from this software without specific, prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
28  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/queue.h>
42 #include <sys/proc.h>
43 #include <sys/rtprio.h>
44 #include <sys/uio.h>
45 #include <sys/sysctl.h>
46 #include <sys/resourcevar.h>
47 #include <sys/spinlock.h>
48 #include <sys/cpu_topology.h>
49 #include <sys/thread2.h>
50 #include <sys/spinlock2.h>
51 
52 #include <sys/ktr.h>
53 
54 #include <machine/cpu.h>
55 #include <machine/smp.h>
56 
57 /*
58  * Priorities.  Note that with 32 run queues per scheduler each queue
59  * represents four priority levels.
60  */
61 
62 int dfly_rebalanced;
63 
64 #define MAXPRI			128
65 #define PRIMASK			(MAXPRI - 1)
66 #define PRIBASE_REALTIME	0
67 #define PRIBASE_NORMAL		MAXPRI
68 #define PRIBASE_IDLE		(MAXPRI * 2)
69 #define PRIBASE_THREAD		(MAXPRI * 3)
70 #define PRIBASE_NULL		(MAXPRI * 4)
71 
72 #define NQS	32			/* 32 run queues. */
73 #define PPQ	(MAXPRI / NQS)		/* priorities per queue */
74 #define PPQMASK	(PPQ - 1)
75 
76 /*
77  * NICE_QS	- maximum queues nice can shift the process
78  * EST_QS	- maximum queues estcpu can shift the process
79  *
80  * ESTCPUPPQ	- number of estcpu units per priority queue
81  * ESTCPUMAX	- number of estcpu units
82  *
83  * Remember that NICE runs over the whole -20 to +20 range.
84  */
85 #define NICE_QS		24	/* -20 to +20 shift in whole queues */
86 #define EST_QS		12	/* 0-MAX shift in whole queues */
87 #define ESTCPUPPQ	512
88 #define ESTCPUMAX	(ESTCPUPPQ * EST_QS)
89 #define PRIO_RANGE	(PRIO_MAX - PRIO_MIN + 1)
90 
91 #define ESTCPULIM(v)	min((v), ESTCPUMAX)
92 
93 TAILQ_HEAD(rq, lwp);
94 
95 #define lwp_priority	lwp_usdata.dfly.priority
96 #define lwp_forked	lwp_usdata.dfly.forked
97 #define lwp_rqindex	lwp_usdata.dfly.rqindex
98 #define lwp_estcpu	lwp_usdata.dfly.estcpu
99 #define lwp_estfast	lwp_usdata.dfly.estfast
100 #define lwp_uload	lwp_usdata.dfly.uload
101 #define lwp_rqtype	lwp_usdata.dfly.rqtype
102 #define lwp_qcpu	lwp_usdata.dfly.qcpu
103 #define lwp_rrcount	lwp_usdata.dfly.rrcount
104 
105 struct usched_dfly_pcpu {
106 	struct spinlock spin;
107 	struct thread	*helper_thread;
108 	u_short		scancpu;
109 	short		upri;
110 	int		uload;
111 	int		ucount;
112 	struct lwp	*uschedcp;
113 	struct rq	queues[NQS];
114 	struct rq	rtqueues[NQS];
115 	struct rq	idqueues[NQS];
116 	u_int32_t	queuebits;
117 	u_int32_t	rtqueuebits;
118 	u_int32_t	idqueuebits;
119 	int		runqcount;
120 	int		cpuid;
121 	cpumask_t	cpumask;
122 	cpu_node_t	*cpunode;
123 };
124 
125 typedef struct usched_dfly_pcpu	*dfly_pcpu_t;
126 
127 static void dfly_acquire_curproc(struct lwp *lp);
128 static void dfly_release_curproc(struct lwp *lp);
129 static void dfly_select_curproc(globaldata_t gd);
130 static void dfly_setrunqueue(struct lwp *lp);
131 static void dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp);
132 static void dfly_schedulerclock(struct lwp *lp, sysclock_t period,
133 				sysclock_t cpstamp);
134 static void dfly_recalculate_estcpu(struct lwp *lp);
135 static void dfly_resetpriority(struct lwp *lp);
136 static void dfly_forking(struct lwp *plp, struct lwp *lp);
137 static void dfly_exiting(struct lwp *lp, struct proc *);
138 static void dfly_uload_update(struct lwp *lp);
139 static void dfly_yield(struct lwp *lp);
140 static void dfly_changeqcpu_locked(struct lwp *lp,
141 				dfly_pcpu_t dd, dfly_pcpu_t rdd);
142 static dfly_pcpu_t dfly_choose_best_queue(struct lwp *lp);
143 static dfly_pcpu_t dfly_choose_worst_queue(dfly_pcpu_t dd);
144 static dfly_pcpu_t dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp);
145 static void dfly_need_user_resched_remote(void *dummy);
146 static struct lwp *dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
147 					  struct lwp *chklp, int worst);
148 static void dfly_remrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
149 static void dfly_setrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
150 static void dfly_changedcpu(struct lwp *lp);
151 
152 struct usched usched_dfly = {
153 	{ NULL },
154 	"dfly", "Original DragonFly Scheduler",
155 	NULL,			/* default registration */
156 	NULL,			/* default deregistration */
157 	dfly_acquire_curproc,
158 	dfly_release_curproc,
159 	dfly_setrunqueue,
160 	dfly_schedulerclock,
161 	dfly_recalculate_estcpu,
162 	dfly_resetpriority,
163 	dfly_forking,
164 	dfly_exiting,
165 	dfly_uload_update,
166 	NULL,			/* setcpumask not supported */
167 	dfly_yield,
168 	dfly_changedcpu
169 };
170 
171 /*
172  * We have NQS (32) run queues per scheduling class.  For the normal
173  * class, there are 128 priorities scaled onto these 32 queues.  New
174  * processes are added to the last entry in each queue, and processes
175  * are selected for running by taking them from the head and maintaining
176  * a simple FIFO arrangement.  Realtime and Idle priority processes have
177  * and explicit 0-31 priority which maps directly onto their class queue
178  * index.  When a queue has something in it, the corresponding bit is
179  * set in the queuebits variable, allowing a single read to determine
180  * the state of all 32 queues and then a ffs() to find the first busy
181  * queue.
182  */
183 					/* currently running a user process */
184 static cpumask_t dfly_curprocmask = CPUMASK_INITIALIZER_ALLONES;
185 static cpumask_t dfly_rdyprocmask;	/* ready to accept a user process */
186 static struct usched_dfly_pcpu dfly_pcpu[MAXCPU];
187 static struct sysctl_ctx_list usched_dfly_sysctl_ctx;
188 static struct sysctl_oid *usched_dfly_sysctl_tree;
189 
190 /* Debug info exposed through debug.* sysctl */
191 
192 static int usched_dfly_debug = -1;
193 SYSCTL_INT(_debug, OID_AUTO, dfly_scdebug, CTLFLAG_RW,
194 	   &usched_dfly_debug, 0,
195 	   "Print debug information for this pid");
196 
197 static int usched_dfly_pid_debug = -1;
198 SYSCTL_INT(_debug, OID_AUTO, dfly_pid_debug, CTLFLAG_RW,
199 	   &usched_dfly_pid_debug, 0,
200 	   "Print KTR debug information for this pid");
201 
202 static int usched_dfly_chooser = 0;
203 SYSCTL_INT(_debug, OID_AUTO, dfly_chooser, CTLFLAG_RW,
204 	   &usched_dfly_chooser, 0,
205 	   "Print KTR debug information for this pid");
206 
207 /*
208  * WARNING!
209  *
210  * The fork bias can have a large effect on the system in the face of a
211  * make -j N or other high-forking applications.
212  *
213  * Larger values are much less invasive vs other things that
214  * might be running in the system, but can cause exec chains
215  * such as those typically generated by make to have higher
216  * latencies in the face of modest load.
217  *
218  * Lower values are more invasive but have reduced latencies
219  * for such exec chains.
220  *
221  *	make -j 10 buildkernel example, build times:
222  *
223  *	     +0	3:04
224  *	     +1 3:14	-5.2%	<-- default
225  *	     +2 3:22	-8.9%
226  *
227  * This issue occurs due to the way the scheduler affinity heuristics work.
228  * There is no way to really 'fix' the affinity heuristics because when it
229  * comes right down to it trying to instantly schedule a process on an
230  * available cpu (even if it will become unavailable a microsecond later)
231  * tends to cause processes to shift around between cpus and sockets too much
232  * and breaks the affinity.
233  *
234  * NOTE: Heavily concurrent builds typically have enough things on the pan
235  *	 that they remain time-efficient even with a higher bias.
236  */
237 static int usched_dfly_forkbias = 1;
238 SYSCTL_INT(_debug, OID_AUTO, dfly_forkbias, CTLFLAG_RW,
239 	   &usched_dfly_forkbias, 0,
240 	   "Fork bias for estcpu in whole queues");
241 
242 /*
243  * Tunning usched_dfly - configurable through kern.usched_dfly.
244  *
245  * weight1 - Tries to keep threads on their current cpu.  If you
246  *	     make this value too large the scheduler will not be
247  *	     able to load-balance large loads.
248  *
249  * weight2 - If non-zero, detects thread pairs undergoing synchronous
250  *	     communications and tries to move them closer together.
251  *	     Behavior is adjusted by bit 4 of features (0x10).
252  *
253  *	     WARNING!  Weight2 is a ridiculously sensitive parameter,
254  *	     a small value is recommended.
255  *
256  * weight3 - Weighting based on the number of recently runnable threads
257  *	     on the userland scheduling queue (ignoring their loads).
258  *	     A nominal value here prevents high-priority (low-load)
259  *	     threads from accumulating on one cpu core when other
260  *	     cores are available.
261  *
262  *	     This value should be left fairly small relative to weight1
263  *	     and weight4.
264  *
265  * weight4 - Weighting based on other cpu queues being available
266  *	     or running processes with higher lwp_priority's.
267  *
268  *	     This allows a thread to migrate to another nearby cpu if it
269  *	     is unable to run on the current cpu based on the other cpu
270  *	     being idle or running a lower priority (higher lwp_priority)
271  *	     thread.  This value should be large enough to override weight1
272  *
273  * features - These flags can be set or cleared to enable or disable various
274  *	      features.
275  *
276  *	      0x01	Enable idle-cpu pulling			(default)
277  *	      0x02	Enable proactive pushing		(default)
278  *	      0x04	Enable rebalancing rover		(default)
279  *	      0x08	Enable more proactive pushing		(default)
280  *	      0x10	(flip weight2 limit on same cpu)	(default)
281  *	      0x20	choose best cpu for forked process
282  *	      0x40	choose current cpu for forked process
283  *	      0x80	choose random cpu for forked process	(default)
284  */
285 static int usched_dfly_smt = 0;
286 static int usched_dfly_cache_coherent = 0;
287 static int usched_dfly_weight1 = 200;	/* keep thread on current cpu */
288 static int usched_dfly_weight2 = 180;	/* synchronous peer's current cpu */
289 static int usched_dfly_weight3 = 40;	/* number of threads on queue */
290 static int usched_dfly_weight4 = 160;	/* availability of idle cores */
291 static int usched_dfly_features = 0x8F;	/* allow pulls */
292 static int usched_dfly_fast_resched = 0;/* delta priority / resched */
293 static int usched_dfly_swmask = ~PPQMASK; /* allow pulls */
294 static int usched_dfly_rrinterval = (ESTCPUFREQ + 9) / 10;
295 static int usched_dfly_decay = 8;
296 
297 /* KTR debug printings */
298 
299 KTR_INFO_MASTER(usched);
300 
301 #if !defined(KTR_USCHED_DFLY)
302 #define	KTR_USCHED_DFLY	KTR_ALL
303 #endif
304 
305 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc, 0,
306     "USCHED_DFLY(chooseproc: pid %d, old_cpuid %d, curr_cpuid %d)",
307     pid_t pid, int old_cpuid, int curr);
308 
309 /*
310  * This function is called when the kernel intends to return to userland.
311  * It is responsible for making the thread the current designated userland
312  * thread for this cpu, blocking if necessary.
313  *
314  * The kernel will not depress our LWKT priority until after we return,
315  * in case we have to shove over to another cpu.
316  *
317  * We must determine our thread's disposition before we switch away.  This
318  * is very sensitive code.
319  *
320  * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
321  * TO ANOTHER CPU!  Because most of the kernel assumes that no migration will
322  * occur, this function is called only under very controlled circumstances.
323  */
324 static void
325 dfly_acquire_curproc(struct lwp *lp)
326 {
327 	globaldata_t gd;
328 	dfly_pcpu_t dd;
329 	dfly_pcpu_t rdd;
330 	thread_t td;
331 	int force_resched;
332 
333 	/*
334 	 * Make sure we aren't sitting on a tsleep queue.
335 	 */
336 	td = lp->lwp_thread;
337 	crit_enter_quick(td);
338 	if (td->td_flags & TDF_TSLEEPQ)
339 		tsleep_remove(td);
340 	dfly_recalculate_estcpu(lp);
341 
342 	gd = mycpu;
343 	dd = &dfly_pcpu[gd->gd_cpuid];
344 
345 	/*
346 	 * Process any pending interrupts/ipi's, then handle reschedule
347 	 * requests.  dfly_release_curproc() will try to assign a new
348 	 * uschedcp that isn't us and otherwise NULL it out.
349 	 */
350 	force_resched = 0;
351 	if ((td->td_mpflags & TDF_MP_BATCH_DEMARC) &&
352 	    lp->lwp_rrcount >= usched_dfly_rrinterval / 2) {
353 		force_resched = 1;
354 	}
355 
356 	if (user_resched_wanted()) {
357 		if (dd->uschedcp == lp)
358 			force_resched = 1;
359 		clear_user_resched();
360 		dfly_release_curproc(lp);
361 	}
362 
363 	/*
364 	 * Loop until we are the current user thread.
365 	 *
366 	 * NOTE: dd spinlock not held at top of loop.
367 	 */
368 	if (dd->uschedcp == lp)
369 		lwkt_yield_quick();
370 
371 	while (dd->uschedcp != lp) {
372 		lwkt_yield_quick();
373 
374 		spin_lock(&dd->spin);
375 
376 		/* This lwp is an outcast; force reschedule. */
377 		if (__predict_false(
378 		    CPUMASK_TESTBIT(lp->lwp_cpumask, gd->gd_cpuid) == 0) &&
379 		    (rdd = dfly_choose_best_queue(lp)) != dd) {
380 			dfly_changeqcpu_locked(lp, dd, rdd);
381 			spin_unlock(&dd->spin);
382 			lwkt_deschedule(lp->lwp_thread);
383 			dfly_setrunqueue_dd(rdd, lp);
384 			lwkt_switch();
385 			gd = mycpu;
386 			dd = &dfly_pcpu[gd->gd_cpuid];
387 			continue;
388 		}
389 
390 		if (force_resched &&
391 		   (usched_dfly_features & 0x08) &&
392 		   (rdd = dfly_choose_best_queue(lp)) != dd) {
393 			/*
394 			 * We are not or are no longer the current lwp and a
395 			 * forced reschedule was requested.  Figure out the
396 			 * best cpu to run on (our current cpu will be given
397 			 * significant weight).
398 			 *
399 			 * (if a reschedule was not requested we want to
400 			 *  move this step after the uschedcp tests).
401 			 */
402 			dfly_changeqcpu_locked(lp, dd, rdd);
403 			spin_unlock(&dd->spin);
404 			lwkt_deschedule(lp->lwp_thread);
405 			dfly_setrunqueue_dd(rdd, lp);
406 			lwkt_switch();
407 			gd = mycpu;
408 			dd = &dfly_pcpu[gd->gd_cpuid];
409 			continue;
410 		}
411 
412 		/*
413 		 * Either no reschedule was requested or the best queue was
414 		 * dd, and no current process has been selected.  We can
415 		 * trivially become the current lwp on the current cpu.
416 		 */
417 		if (dd->uschedcp == NULL) {
418 			atomic_clear_int(&lp->lwp_thread->td_mpflags,
419 					 TDF_MP_DIDYIELD);
420 			ATOMIC_CPUMASK_ORBIT(dfly_curprocmask, gd->gd_cpuid);
421 			dd->uschedcp = lp;
422 			dd->upri = lp->lwp_priority;
423 			KKASSERT(lp->lwp_qcpu == dd->cpuid);
424 			spin_unlock(&dd->spin);
425 			break;
426 		}
427 
428 		/*
429 		 * Put us back on the same run queue unconditionally.
430 		 *
431 		 * Set rrinterval to force placement at end of queue.
432 		 * Select the worst queue to ensure we round-robin,
433 		 * but do not change estcpu.
434 		 */
435 		if (lp->lwp_thread->td_mpflags & TDF_MP_DIDYIELD) {
436 			u_int32_t tsqbits;
437 
438 			switch(lp->lwp_rqtype) {
439 			case RTP_PRIO_NORMAL:
440 				tsqbits = dd->queuebits;
441 				spin_unlock(&dd->spin);
442 
443 				lp->lwp_rrcount = usched_dfly_rrinterval;
444 				if (tsqbits)
445 					lp->lwp_rqindex = bsrl(tsqbits);
446 				break;
447 			default:
448 				spin_unlock(&dd->spin);
449 				break;
450 			}
451 			lwkt_deschedule(lp->lwp_thread);
452 			dfly_setrunqueue_dd(dd, lp);
453 			atomic_clear_int(&lp->lwp_thread->td_mpflags,
454 					 TDF_MP_DIDYIELD);
455 			lwkt_switch();
456 			gd = mycpu;
457 			dd = &dfly_pcpu[gd->gd_cpuid];
458 			continue;
459 		}
460 
461 		/*
462 		 * Can we steal the current designated user thread?
463 		 *
464 		 * If we do the other thread will stall when it tries to
465 		 * return to userland, possibly rescheduling elsewhere.
466 		 *
467 		 * It is important to do a masked test to avoid the edge
468 		 * case where two near-equal-priority threads are constantly
469 		 * interrupting each other.
470 		 *
471 		 * In the exact match case another thread has already gained
472 		 * uschedcp and lowered its priority, if we steal it the
473 		 * other thread will stay stuck on the LWKT runq and not
474 		 * push to another cpu.  So don't steal on equal-priority even
475 		 * though it might appear to be more beneficial due to not
476 		 * having to switch back to the other thread's context.
477 		 *
478 		 * usched_dfly_fast_resched requires that two threads be
479 		 * significantly far apart in priority in order to interrupt.
480 		 *
481 		 * If better but not sufficiently far apart, the current
482 		 * uschedcp will be interrupted at the next scheduler clock.
483 		 */
484 		if (dd->uschedcp &&
485 		   (dd->upri & ~PPQMASK) >
486 		   (lp->lwp_priority & ~PPQMASK) + usched_dfly_fast_resched) {
487 			dd->uschedcp = lp;
488 			dd->upri = lp->lwp_priority;
489 			KKASSERT(lp->lwp_qcpu == dd->cpuid);
490 			spin_unlock(&dd->spin);
491 			break;
492 		}
493 		/*
494 		 * We are not the current lwp, figure out the best cpu
495 		 * to run on (our current cpu will be given significant
496 		 * weight).  Loop on cpu change.
497 		 */
498 		if ((usched_dfly_features & 0x02) &&
499 		    force_resched == 0 &&
500 		    (rdd = dfly_choose_best_queue(lp)) != dd) {
501 			dfly_changeqcpu_locked(lp, dd, rdd);
502 			spin_unlock(&dd->spin);
503 			lwkt_deschedule(lp->lwp_thread);
504 			dfly_setrunqueue_dd(rdd, lp);
505 			lwkt_switch();
506 			gd = mycpu;
507 			dd = &dfly_pcpu[gd->gd_cpuid];
508 			continue;
509 		}
510 
511 		/*
512 		 * We cannot become the current lwp, place the lp on the
513 		 * run-queue of this or another cpu and deschedule ourselves.
514 		 *
515 		 * When we are reactivated we will have another chance.
516 		 *
517 		 * Reload after a switch or setrunqueue/switch possibly
518 		 * moved us to another cpu.
519 		 */
520 		spin_unlock(&dd->spin);
521 		lwkt_deschedule(lp->lwp_thread);
522 		dfly_setrunqueue_dd(dd, lp);
523 		lwkt_switch();
524 		gd = mycpu;
525 		dd = &dfly_pcpu[gd->gd_cpuid];
526 	}
527 
528 	/*
529 	 * Make sure upri is synchronized, then yield to LWKT threads as
530 	 * needed before returning.  This could result in another reschedule.
531 	 * XXX
532 	 */
533 	crit_exit_quick(td);
534 
535 	KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
536 }
537 
538 /*
539  * DFLY_RELEASE_CURPROC
540  *
541  * This routine detaches the current thread from the userland scheduler,
542  * usually because the thread needs to run or block in the kernel (at
543  * kernel priority) for a while.
544  *
545  * This routine is also responsible for selecting a new thread to
546  * make the current thread.
547  *
548  * NOTE: This implementation differs from the dummy example in that
549  * dfly_select_curproc() is able to select the current process, whereas
550  * dummy_select_curproc() is not able to select the current process.
551  * This means we have to NULL out uschedcp.
552  *
553  * Additionally, note that we may already be on a run queue if releasing
554  * via the lwkt_switch() in dfly_setrunqueue().
555  */
556 static void
557 dfly_release_curproc(struct lwp *lp)
558 {
559 	globaldata_t gd = mycpu;
560 	dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
561 
562 	/*
563 	 * Make sure td_wakefromcpu is defaulted.  This will be overwritten
564 	 * by wakeup().
565 	 */
566 	if (dd->uschedcp == lp) {
567 		KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
568 		spin_lock(&dd->spin);
569 		if (dd->uschedcp == lp) {
570 			dd->uschedcp = NULL;	/* don't let lp be selected */
571 			dd->upri = PRIBASE_NULL;
572 			ATOMIC_CPUMASK_NANDBIT(dfly_curprocmask, gd->gd_cpuid);
573 			spin_unlock(&dd->spin);
574 			dfly_select_curproc(gd);
575 		} else {
576 			spin_unlock(&dd->spin);
577 		}
578 	}
579 }
580 
581 /*
582  * DFLY_SELECT_CURPROC
583  *
584  * Select a new current process for this cpu and clear any pending user
585  * reschedule request.  The cpu currently has no current process.
586  *
587  * This routine is also responsible for equal-priority round-robining,
588  * typically triggered from dfly_schedulerclock().  In our dummy example
589  * all the 'user' threads are LWKT scheduled all at once and we just
590  * call lwkt_switch().
591  *
592  * The calling process is not on the queue and cannot be selected.
593  */
594 static
595 void
596 dfly_select_curproc(globaldata_t gd)
597 {
598 	dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
599 	struct lwp *nlp;
600 	int cpuid = gd->gd_cpuid;
601 
602 	crit_enter_gd(gd);
603 
604 	spin_lock(&dd->spin);
605 	nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
606 
607 	if (nlp) {
608 		ATOMIC_CPUMASK_ORBIT(dfly_curprocmask, cpuid);
609 		dd->upri = nlp->lwp_priority;
610 		dd->uschedcp = nlp;
611 #if 0
612 		dd->rrcount = 0;		/* reset round robin */
613 #endif
614 		spin_unlock(&dd->spin);
615 		lwkt_acquire(nlp->lwp_thread);
616 		lwkt_schedule(nlp->lwp_thread);
617 	} else {
618 		spin_unlock(&dd->spin);
619 	}
620 	crit_exit_gd(gd);
621 }
622 
623 /*
624  * Place the specified lwp on the user scheduler's run queue.  This routine
625  * must be called with the thread descheduled.  The lwp must be runnable.
626  * It must not be possible for anyone else to explicitly schedule this thread.
627  *
628  * The thread may be the current thread as a special case.
629  */
630 static void
631 dfly_setrunqueue(struct lwp *lp)
632 {
633 	dfly_pcpu_t dd;
634 	dfly_pcpu_t rdd;
635 
636 	/*
637 	 * First validate the process LWKT state.
638 	 */
639 	KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN"));
640 	KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0,
641 	    ("lwp %d/%d already on runq! flag %08x/%08x", lp->lwp_proc->p_pid,
642 	     lp->lwp_tid, lp->lwp_proc->p_flags, lp->lwp_flags));
643 	KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0);
644 
645 	/*
646 	 * NOTE: dd/rdd do not necessarily represent the current cpu.
647 	 *	 Instead they may represent the cpu the thread was last
648 	 *	 scheduled on or inherited by its parent.
649 	 */
650 	dd = &dfly_pcpu[lp->lwp_qcpu];
651 	rdd = dd;
652 
653 	/*
654 	 * This process is not supposed to be scheduled anywhere or assigned
655 	 * as the current process anywhere.  Assert the condition.
656 	 */
657 	KKASSERT(rdd->uschedcp != lp);
658 
659 	/*
660 	 * Ok, we have to setrunqueue some target cpu and request a reschedule
661 	 * if necessary.
662 	 *
663 	 * We have to choose the best target cpu.  It might not be the current
664 	 * target even if the current cpu has no running user thread (for
665 	 * example, because the current cpu might be a hyperthread and its
666 	 * sibling has a thread assigned).
667 	 *
668 	 * If we just forked it is most optimal to run the child on the same
669 	 * cpu just in case the parent decides to wait for it (thus getting
670 	 * off that cpu).  As long as there is nothing else runnable on the
671 	 * cpu, that is.  If we did this unconditionally a parent forking
672 	 * multiple children before waiting (e.g. make -j N) leaves other
673 	 * cpus idle that could be working.
674 	 */
675 	if (lp->lwp_forked) {
676 		lp->lwp_forked = 0;
677 		if (usched_dfly_features & 0x20)
678 			rdd = dfly_choose_best_queue(lp);
679 		else if (usched_dfly_features & 0x40)
680 			rdd = &dfly_pcpu[lp->lwp_qcpu];
681 		else if (usched_dfly_features & 0x80)
682 			rdd = dfly_choose_queue_simple(rdd, lp);
683 		else if (dfly_pcpu[lp->lwp_qcpu].runqcount)
684 			rdd = dfly_choose_best_queue(lp);
685 		else
686 			rdd = &dfly_pcpu[lp->lwp_qcpu];
687 	} else {
688 		rdd = dfly_choose_best_queue(lp);
689 		/* rdd = &dfly_pcpu[lp->lwp_qcpu]; */
690 	}
691 	if (lp->lwp_qcpu != rdd->cpuid) {
692 		spin_lock(&dd->spin);
693 		dfly_changeqcpu_locked(lp, dd, rdd);
694 		spin_unlock(&dd->spin);
695 	}
696 	dfly_setrunqueue_dd(rdd, lp);
697 }
698 
699 /*
700  * Change qcpu to rdd->cpuid.  The dd the lp is CURRENTLY on must be
701  * spin-locked on-call.  rdd does not have to be.
702  */
703 static void
704 dfly_changeqcpu_locked(struct lwp *lp, dfly_pcpu_t dd, dfly_pcpu_t rdd)
705 {
706 	if (lp->lwp_qcpu != rdd->cpuid) {
707 		if (lp->lwp_mpflags & LWP_MP_ULOAD) {
708 			atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
709 			atomic_add_int(&dd->uload, -lp->lwp_uload);
710 			atomic_add_int(&dd->ucount, -1);
711 		}
712 		lp->lwp_qcpu = rdd->cpuid;
713 	}
714 }
715 
716 /*
717  * Place lp on rdd's runqueue.  Nothing is locked on call.  This function
718  * also performs all necessary ancillary notification actions.
719  */
720 static void
721 dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp)
722 {
723 	globaldata_t rgd;
724 
725 	/*
726 	 * We might be moving the lp to another cpu's run queue, and once
727 	 * on the runqueue (even if it is our cpu's), another cpu can rip
728 	 * it away from us.
729 	 *
730 	 * TDF_MIGRATING might already be set if this is part of a
731 	 * remrunqueue+setrunqueue sequence.
732 	 */
733 	if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0)
734 		lwkt_giveaway(lp->lwp_thread);
735 
736 	rgd = globaldata_find(rdd->cpuid);
737 
738 	/*
739 	 * We lose control of the lp the moment we release the spinlock
740 	 * after having placed it on the queue.  i.e. another cpu could pick
741 	 * it up, or it could exit, or its priority could be further
742 	 * adjusted, or something like that.
743 	 *
744 	 * WARNING! rdd can point to a foreign cpu!
745 	 */
746 	spin_lock(&rdd->spin);
747 	dfly_setrunqueue_locked(rdd, lp);
748 
749 	/*
750 	 * Potentially interrupt the currently-running thread
751 	 */
752 	if ((rdd->upri & ~PPQMASK) <= (lp->lwp_priority & ~PPQMASK)) {
753 		/*
754 		 * Currently running thread is better or same, do not
755 		 * interrupt.
756 		 */
757 		spin_unlock(&rdd->spin);
758 	} else if ((rdd->upri & ~PPQMASK) <= (lp->lwp_priority & ~PPQMASK) +
759 		   usched_dfly_fast_resched) {
760 		/*
761 		 * Currently running thread is not better, but not so bad
762 		 * that we need to interrupt it.  Let it run for one more
763 		 * scheduler tick.
764 		 */
765 		if (rdd->uschedcp &&
766 		    rdd->uschedcp->lwp_rrcount < usched_dfly_rrinterval) {
767 			rdd->uschedcp->lwp_rrcount = usched_dfly_rrinterval - 1;
768 		}
769 		spin_unlock(&rdd->spin);
770 	} else if (rgd == mycpu) {
771 		/*
772 		 * We should interrupt the currently running thread, which
773 		 * is on the current cpu.  However, if DIDYIELD is set we
774 		 * round-robin unconditionally and do not interrupt it.
775 		 */
776 		spin_unlock(&rdd->spin);
777 		if (rdd->uschedcp == NULL)
778 			wakeup_mycpu(rdd->helper_thread); /* XXX */
779 		if ((lp->lwp_thread->td_mpflags & TDF_MP_DIDYIELD) == 0)
780 			need_user_resched();
781 	} else {
782 		/*
783 		 * We should interrupt the currently running thread, which
784 		 * is on a different cpu.
785 		 */
786 		spin_unlock(&rdd->spin);
787 		lwkt_send_ipiq(rgd, dfly_need_user_resched_remote, NULL);
788 	}
789 }
790 
791 /*
792  * This routine is called from a systimer IPI.  It MUST be MP-safe and
793  * the BGL IS NOT HELD ON ENTRY.  This routine is called at ESTCPUFREQ on
794  * each cpu.
795  */
796 static
797 void
798 dfly_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
799 {
800 	globaldata_t gd = mycpu;
801 	dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
802 
803 	/*
804 	 * Spinlocks also hold a critical section so there should not be
805 	 * any active.
806 	 */
807 	KKASSERT(gd->gd_spinlocks == 0 || dumping);
808 
809 	/*
810 	 * If lp is NULL we might be contended and lwkt_switch() may have
811 	 * cycled into the idle thread.  Apply the tick to the current
812 	 * process on this cpu if it is contended.
813 	 */
814 	if (gd->gd_curthread == &gd->gd_idlethread) {
815 		lp = dd->uschedcp;
816 		if (lp && (lp->lwp_thread == NULL ||
817 			   lp->lwp_thread->td_contended == 0)) {
818 			lp = NULL;
819 		}
820 	}
821 
822 	/*
823 	 * Dock thread for tick
824 	 */
825 	if (lp) {
826 		/*
827 		 * Do we need to round-robin?  We round-robin 10 times a
828 		 * second.  This should only occur for cpu-bound batch
829 		 * processes.
830 		 */
831 		if (++lp->lwp_rrcount >= usched_dfly_rrinterval) {
832 			lp->lwp_thread->td_wakefromcpu = -1;
833 			need_user_resched();
834 		}
835 
836 		/*
837 		 * Adjust estcpu upward using a real time equivalent
838 		 * calculation, and recalculate lp's priority.  Estcpu
839 		 * is increased such that it will cap-out over a period
840 		 * of one second.
841 		 */
842 		lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu +
843 					   ESTCPUMAX / ESTCPUFREQ + 1);
844 		dfly_resetpriority(lp);
845 	}
846 
847 	/*
848 	 * Rebalance two cpus every 8 ticks, pulling the worst thread
849 	 * from the worst cpu's queue into a rotating cpu number.
850 	 *
851 	 * This mechanic is needed because the push algorithms can
852 	 * steady-state in an non-optimal configuration.  We need to mix it
853 	 * up a little, even if it means breaking up a paired thread, so
854 	 * the push algorithms can rebalance the degenerate conditions.
855 	 * This portion of the algorithm exists to ensure stability at the
856 	 * selected weightings.
857 	 *
858 	 * Because we might be breaking up optimal conditions we do not want
859 	 * to execute this too quickly, hence we only rebalance approximately
860 	 * ~7-8 times per second.  The push's, on the otherhand, are capable
861 	 * moving threads to other cpus at a much higher rate.
862 	 *
863 	 * We choose the most heavily loaded thread from the worst queue
864 	 * in order to ensure that multiple heavy-weight threads on the same
865 	 * queue get broken up, and also because these threads are the most
866 	 * likely to be able to remain in place.  Hopefully then any pairings,
867 	 * if applicable, migrate to where these threads are.
868 	 */
869 	if ((usched_dfly_features & 0x04) &&
870 	    ((u_int)sched_ticks & 7) == 0 &&
871 	    (u_int)sched_ticks / 8 % ncpus == gd->gd_cpuid) {
872 		/*
873 		 * Our cpu is up.
874 		 */
875 		struct lwp *nlp;
876 		dfly_pcpu_t rdd;
877 
878 		rdd = dfly_choose_worst_queue(dd);
879 		if (rdd) {
880 			spin_lock(&dd->spin);
881 			if (spin_trylock(&rdd->spin)) {
882 				nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
883 				spin_unlock(&rdd->spin);
884 				if (nlp == NULL)
885 					spin_unlock(&dd->spin);
886 			} else {
887 				spin_unlock(&dd->spin);
888 				nlp = NULL;
889 			}
890 		} else {
891 			nlp = NULL;
892 		}
893 		/* dd->spin held if nlp != NULL */
894 
895 		/*
896 		 * Either schedule it or add it to our queue.
897 		 */
898 		if (nlp &&
899 		    (nlp->lwp_priority & ~PPQMASK) < (dd->upri & ~PPQMASK)) {
900 			ATOMIC_CPUMASK_ORMASK(dfly_curprocmask, dd->cpumask);
901 			dd->upri = nlp->lwp_priority;
902 			dd->uschedcp = nlp;
903 #if 0
904 			dd->rrcount = 0;	/* reset round robin */
905 #endif
906 			spin_unlock(&dd->spin);
907 			lwkt_acquire(nlp->lwp_thread);
908 			lwkt_schedule(nlp->lwp_thread);
909 		} else if (nlp) {
910 			dfly_setrunqueue_locked(dd, nlp);
911 			spin_unlock(&dd->spin);
912 		}
913 	}
914 }
915 
916 /*
917  * Called from acquire and from kern_synch's one-second timer (one of the
918  * callout helper threads) with a critical section held.
919  *
920  * Adjust p_estcpu based on our single-cpu load, p_nice, and compensate for
921  * overall system load.
922  *
923  * Note that no recalculation occurs for a process which sleeps and wakes
924  * up in the same tick.  That is, a system doing thousands of context
925  * switches per second will still only do serious estcpu calculations
926  * ESTCPUFREQ times per second.
927  */
928 static
929 void
930 dfly_recalculate_estcpu(struct lwp *lp)
931 {
932 	globaldata_t gd = mycpu;
933 	sysclock_t cpbase;
934 	sysclock_t ttlticks;
935 	int estcpu;
936 	int decay_factor;
937 	int ucount;
938 
939 	/*
940 	 * We have to subtract periodic to get the last schedclock
941 	 * timeout time, otherwise we would get the upcoming timeout.
942 	 * Keep in mind that a process can migrate between cpus and
943 	 * while the scheduler clock should be very close, boundary
944 	 * conditions could lead to a small negative delta.
945 	 */
946 	cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
947 
948 	if (lp->lwp_slptime > 1) {
949 		/*
950 		 * Too much time has passed, do a coarse correction.
951 		 */
952 		lp->lwp_estcpu = lp->lwp_estcpu >> 1;
953 		dfly_resetpriority(lp);
954 		lp->lwp_cpbase = cpbase;
955 		lp->lwp_cpticks = 0;
956 		lp->lwp_estfast = 0;
957 	} else if (lp->lwp_cpbase != cpbase) {
958 		/*
959 		 * Adjust estcpu if we are in a different tick.  Don't waste
960 		 * time if we are in the same tick.
961 		 *
962 		 * First calculate the number of ticks in the measurement
963 		 * interval.  The ttlticks calculation can wind up 0 due to
964 		 * a bug in the handling of lwp_slptime  (as yet not found),
965 		 * so make sure we do not get a divide by 0 panic.
966 		 */
967 		ttlticks = (cpbase - lp->lwp_cpbase) /
968 			   gd->gd_schedclock.periodic;
969 		if ((ssysclock_t)ttlticks < 0) {
970 			ttlticks = 0;
971 			lp->lwp_cpbase = cpbase;
972 		}
973 		if (ttlticks < 4)
974 			return;
975 		updatepcpu(lp, lp->lwp_cpticks, ttlticks);
976 
977 		/*
978 		 * Calculate instant estcpu based percentage of (one) cpu
979 		 * used and exponentially average it into the current
980 		 * lwp_estcpu.
981 		 */
982 		ucount = dfly_pcpu[lp->lwp_qcpu].ucount;
983 		estcpu = lp->lwp_cpticks * ESTCPUMAX / ttlticks;
984 
985 		/*
986 		 * The higher ttlticks gets, the more meaning the calculation
987 		 * has and the smaller our decay_factor in the exponential
988 		 * average.
989 		 *
990 		 * The uload calculation has been removed because it actually
991 		 * makes things worse, causing processes which use less cpu
992 		 * (such as a browser) to be pumped up and treated the same
993 		 * as a cpu-bound process (such as a make).  The same effect
994 		 * can occur with sufficient load without the uload
995 		 * calculation, but occurs less quickly and takes more load.
996 		 * In addition, the less cpu a process uses the smaller the
997 		 * effect of the overload.
998 		 */
999 		if (ttlticks >= hz)
1000 			decay_factor = 1;
1001 		else
1002 			decay_factor = hz - ttlticks;
1003 
1004 		lp->lwp_estcpu = ESTCPULIM(
1005 				(lp->lwp_estcpu * ttlticks + estcpu) /
1006 				(ttlticks + 1));
1007 		if (usched_dfly_debug == lp->lwp_proc->p_pid)
1008 			kprintf(" finalestcpu %d %d\n", estcpu, lp->lwp_estcpu);
1009 
1010 #if 0
1011 		/*
1012 		 * Calculate the percentage of one cpu being used then
1013 		 * compensate for any system load in excess of ncpus.
1014 		 *
1015 		 * For example, if we have 8 cores and 16 running cpu-bound
1016 		 * processes then all things being equal each process will
1017 		 * get 50% of one cpu.  We need to pump this value back
1018 		 * up to 100% so the estcpu calculation properly adjusts
1019 		 * the process's dynamic priority.
1020 		 *
1021 		 * estcpu is scaled by ESTCPUMAX, pctcpu is scaled by FSCALE.
1022 		 */
1023 
1024 		estcpu = (lp->lwp_pctcpu * ESTCPUMAX) >> FSHIFT;
1025 		ucount = dfly_ucount;
1026 		if (ucount > ncpus) {
1027 			estcpu += estcpu * (ucount - ncpus) / ncpus;
1028 		}
1029 
1030 		if (usched_dfly_debug == lp->lwp_proc->p_pid) {
1031 			kprintf("pid %d lwp %p estcpu %3d %3d cp %d/%d",
1032 				lp->lwp_proc->p_pid, lp,
1033 				estcpu, lp->lwp_estcpu,
1034 				lp->lwp_cpticks, ttlticks);
1035 		}
1036 
1037 		/*
1038 		 * Adjust lp->lwp_esetcpu.  The decay factor determines how
1039 		 * quickly lwp_estcpu collapses to its realtime calculation.
1040 		 * A slower collapse gives us a more accurate number over
1041 		 * the long term but can create problems with bursty threads
1042 		 * or threads which become cpu hogs.
1043 		 *
1044 		 * To solve this problem, newly started lwps and lwps which
1045 		 * are restarting after having been asleep for a while are
1046 		 * given a much, much faster decay in order to quickly
1047 		 * detect whether they become cpu-bound.
1048 		 *
1049 		 * NOTE: p_nice is accounted for in dfly_resetpriority(),
1050 		 *	 and not here, but we must still ensure that a
1051 		 *	 cpu-bound nice -20 process does not completely
1052 		 *	 override a cpu-bound nice +20 process.
1053 		 *
1054 		 * NOTE: We must use ESTCPULIM() here to deal with any
1055 		 *	 overshoot.
1056 		 */
1057 		decay_factor = usched_dfly_decay;
1058 		if (decay_factor < 1)
1059 			decay_factor = 1;
1060 		if (decay_factor > 1024)
1061 			decay_factor = 1024;
1062 
1063 		if (lp->lwp_estfast < usched_dfly_decay) {
1064 			++lp->lwp_estfast;
1065 			lp->lwp_estcpu = ESTCPULIM(
1066 				(lp->lwp_estcpu * lp->lwp_estfast + estcpu) /
1067 				(lp->lwp_estfast + 1));
1068 		} else {
1069 			lp->lwp_estcpu = ESTCPULIM(
1070 				(lp->lwp_estcpu * decay_factor + estcpu) /
1071 				(decay_factor + 1));
1072 		}
1073 
1074 		if (usched_dfly_debug == lp->lwp_proc->p_pid)
1075 			kprintf(" finalestcpu %d\n", lp->lwp_estcpu);
1076 #endif
1077 		dfly_resetpriority(lp);
1078 		lp->lwp_cpbase += ttlticks * gd->gd_schedclock.periodic;
1079 		lp->lwp_cpticks = 0;
1080 	}
1081 }
1082 
1083 /*
1084  * Compute the priority of a process when running in user mode.
1085  * Arrange to reschedule if the resulting priority is better
1086  * than that of the current process.
1087  *
1088  * This routine may be called with any process.
1089  *
1090  * This routine is called by fork1() for initial setup with the process of
1091  * the run queue, and also may be called normally with the process on or
1092  * off the run queue.
1093  */
1094 static void
1095 dfly_resetpriority(struct lwp *lp)
1096 {
1097 	dfly_pcpu_t rdd;
1098 	int newpriority;
1099 	u_short newrqtype;
1100 	int rcpu;
1101 	int checkpri;
1102 	int estcpu;
1103 	int delta_uload;
1104 
1105 	crit_enter();
1106 
1107 	/*
1108 	 * Lock the scheduler (lp) belongs to.  This can be on a different
1109 	 * cpu.  Handle races.  This loop breaks out with the appropriate
1110 	 * rdd locked.
1111 	 */
1112 	for (;;) {
1113 		rcpu = lp->lwp_qcpu;
1114 		cpu_ccfence();
1115 		rdd = &dfly_pcpu[rcpu];
1116 		spin_lock(&rdd->spin);
1117 		if (rcpu == lp->lwp_qcpu)
1118 			break;
1119 		spin_unlock(&rdd->spin);
1120 	}
1121 
1122 	/*
1123 	 * Calculate the new priority and queue type
1124 	 */
1125 	newrqtype = lp->lwp_rtprio.type;
1126 
1127 	switch(newrqtype) {
1128 	case RTP_PRIO_REALTIME:
1129 	case RTP_PRIO_FIFO:
1130 		newpriority = PRIBASE_REALTIME +
1131 			     (lp->lwp_rtprio.prio & PRIMASK);
1132 		break;
1133 	case RTP_PRIO_NORMAL:
1134 		/*
1135 		 * Calculate the new priority.
1136 		 *
1137 		 * nice contributes up to NICE_QS queues (typ 32 - full range)
1138 		 * estcpu contributes up to EST_QS queues (typ 16)
1139 		 *
1140 		 * A nice +20 process receives 1/10 cpu vs nice+0.  Niced
1141 		 * process more than 20 apart may receive no cpu, so cpu
1142 		 * bound nice -20 can prevent a nice +5 from getting any
1143 		 * cpu.  A nice+0, being in the middle, always gets some cpu
1144 		 * no matter what.
1145 		 */
1146 		estcpu = lp->lwp_estcpu;
1147 		newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) *
1148 			      (NICE_QS * PPQ) / PRIO_RANGE;
1149 		newpriority += estcpu * PPQ / ESTCPUPPQ;
1150 		if (newpriority < 0)
1151 			newpriority = 0;
1152 		if (newpriority >= MAXPRI)
1153 			newpriority = MAXPRI - 1;
1154 		newpriority += PRIBASE_NORMAL;
1155 		break;
1156 	case RTP_PRIO_IDLE:
1157 		newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK);
1158 		break;
1159 	case RTP_PRIO_THREAD:
1160 		newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK);
1161 		break;
1162 	default:
1163 		panic("Bad RTP_PRIO %d", newrqtype);
1164 		/* NOT REACHED */
1165 	}
1166 
1167 	/*
1168 	 * The LWKT scheduler doesn't dive usched structures, give it a hint
1169 	 * on the relative priority of user threads running in the kernel.
1170 	 * The LWKT scheduler will always ensure that a user thread running
1171 	 * in the kernel will get cpu some time, regardless of its upri,
1172 	 * but can decide not to instantly switch from one kernel or user
1173 	 * mode user thread to a kernel-mode user thread when it has a less
1174 	 * desireable user priority.
1175 	 *
1176 	 * td_upri has normal sense (higher values are more desireable), so
1177 	 * negate it (this is a different field lp->lwp_priority)
1178 	 */
1179 	lp->lwp_thread->td_upri = -(newpriority & usched_dfly_swmask);
1180 
1181 	/*
1182 	 * The newpriority incorporates the queue type so do a simple masked
1183 	 * check to determine if the process has moved to another queue.  If
1184 	 * it has, and it is currently on a run queue, then move it.
1185 	 *
1186 	 * Since uload is ~PPQMASK masked, no modifications are necessary if
1187 	 * we end up in the same run queue.
1188 	 *
1189 	 * Reset rrcount if moving to a higher-priority queue, otherwise
1190 	 * retain rrcount.
1191 	 */
1192 	if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) {
1193 		if (lp->lwp_priority < newpriority)
1194 			lp->lwp_rrcount = 0;
1195 		if (lp->lwp_mpflags & LWP_MP_ONRUNQ) {
1196 			dfly_remrunqueue_locked(rdd, lp);
1197 			lp->lwp_priority = newpriority;
1198 			lp->lwp_rqtype = newrqtype;
1199 			lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1200 			dfly_setrunqueue_locked(rdd, lp);
1201 			checkpri = 1;
1202 		} else {
1203 			lp->lwp_priority = newpriority;
1204 			lp->lwp_rqtype = newrqtype;
1205 			lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1206 			checkpri = 0;
1207 		}
1208 	} else {
1209 		/*
1210 		 * In the same PPQ, uload cannot change.
1211 		 */
1212 		lp->lwp_priority = newpriority;
1213 		checkpri = 1;
1214 		rcpu = -1;
1215 	}
1216 
1217 	/*
1218 	 * Adjust effective load.
1219 	 *
1220 	 * Calculate load then scale up or down geometrically based on p_nice.
1221 	 * Processes niced up (positive) are less important, and processes
1222 	 * niced downard (negative) are more important.  The higher the uload,
1223 	 * the more important the thread.
1224 	 */
1225 	/* 0-511, 0-100% cpu */
1226 	delta_uload = lp->lwp_estcpu / NQS;
1227 	delta_uload -= delta_uload * lp->lwp_proc->p_nice / (PRIO_MAX + 1);
1228 	delta_uload -= lp->lwp_uload;
1229 	lp->lwp_uload += delta_uload;
1230 	if (lp->lwp_mpflags & LWP_MP_ULOAD)
1231 		atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload, delta_uload);
1232 
1233 	/*
1234 	 * Determine if we need to reschedule the target cpu.  This only
1235 	 * occurs if the LWP is already on a scheduler queue, which means
1236 	 * that idle cpu notification has already occured.  At most we
1237 	 * need only issue a need_user_resched() on the appropriate cpu.
1238 	 *
1239 	 * The LWP may be owned by a CPU different from the current one,
1240 	 * in which case dd->uschedcp may be modified without an MP lock
1241 	 * or a spinlock held.  The worst that happens is that the code
1242 	 * below causes a spurious need_user_resched() on the target CPU
1243 	 * and dd->pri to be wrong for a short period of time, both of
1244 	 * which are harmless.
1245 	 *
1246 	 * If checkpri is 0 we are adjusting the priority of the current
1247 	 * process, possibly higher (less desireable), so ignore the upri
1248 	 * check which will fail in that case.
1249 	 */
1250 	if (rcpu >= 0) {
1251 		if (CPUMASK_TESTBIT(dfly_rdyprocmask, rcpu) &&
1252 		    (checkpri == 0 ||
1253 		     (rdd->upri & ~PRIMASK) >
1254 		     (lp->lwp_priority & ~PRIMASK))) {
1255 			if (rcpu == mycpu->gd_cpuid) {
1256 				spin_unlock(&rdd->spin);
1257 				need_user_resched();
1258 			} else {
1259 				spin_unlock(&rdd->spin);
1260 				lwkt_send_ipiq(globaldata_find(rcpu),
1261 					       dfly_need_user_resched_remote,
1262 					       NULL);
1263 			}
1264 		} else {
1265 			spin_unlock(&rdd->spin);
1266 		}
1267 	} else {
1268 		spin_unlock(&rdd->spin);
1269 	}
1270 	crit_exit();
1271 }
1272 
1273 static
1274 void
1275 dfly_yield(struct lwp *lp)
1276 {
1277 	if (lp->lwp_qcpu != mycpu->gd_cpuid)
1278 		return;
1279 	KKASSERT(lp == curthread->td_lwp);
1280 
1281 	/*
1282 	 * Don't set need_user_resched() or mess with rrcount or anything.
1283 	 * the TDF flag will override everything as long as we release.
1284 	 */
1285 	atomic_set_int(&lp->lwp_thread->td_mpflags, TDF_MP_DIDYIELD);
1286 	dfly_release_curproc(lp);
1287 }
1288 
1289 /*
1290  * Thread was forcefully migrated to another cpu.  Normally forced migrations
1291  * are used for iterations and the kernel returns to the original cpu before
1292  * returning and this is not needed.  However, if the kernel migrates a
1293  * thread to another cpu and wants to leave it there, it has to call this
1294  * scheduler helper.
1295  *
1296  * Note that the lwkt_migratecpu() function also released the thread, so
1297  * we don't have to worry about that.
1298  */
1299 static
1300 void
1301 dfly_changedcpu(struct lwp *lp)
1302 {
1303 	dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1304 	dfly_pcpu_t rdd = &dfly_pcpu[mycpu->gd_cpuid];
1305 
1306 	if (dd != rdd) {
1307 		spin_lock(&dd->spin);
1308 		dfly_changeqcpu_locked(lp, dd, rdd);
1309 		spin_unlock(&dd->spin);
1310 	}
1311 }
1312 
1313 /*
1314  * Called from fork1() when a new child process is being created.
1315  *
1316  * Give the child process an initial estcpu that is more batch then
1317  * its parent and dock the parent for the fork (but do not
1318  * reschedule the parent).
1319  *
1320  * fast
1321  *
1322  * XXX lwp should be "spawning" instead of "forking"
1323  */
1324 static void
1325 dfly_forking(struct lwp *plp, struct lwp *lp)
1326 {
1327 	int estcpu;
1328 
1329 	/*
1330 	 * Put the child 4 queue slots (out of 32) higher than the parent
1331 	 * (less desireable than the parent).
1332 	 */
1333 	lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu +
1334 				   ESTCPUPPQ * usched_dfly_forkbias);
1335 	lp->lwp_forked = 1;
1336 	lp->lwp_estfast = 0;
1337 
1338 	/*
1339 	 * Even though the lp will be scheduled specially the first time
1340 	 * due to lp->lwp_forked, it is important to initialize lwp_qcpu
1341 	 * to avoid favoring a fixed cpu.
1342 	 */
1343 #if 0
1344 	static uint16_t save_cpu;
1345 	lp->lwp_qcpu = ++save_cpu % ncpus;
1346 #else
1347 	lp->lwp_qcpu = plp->lwp_qcpu;
1348 	if (CPUMASK_TESTBIT(lp->lwp_cpumask, lp->lwp_qcpu) == 0)
1349 		lp->lwp_qcpu = BSFCPUMASK(lp->lwp_cpumask);
1350 #endif
1351 
1352 	/*
1353 	 * Dock the parent a cost for the fork, protecting us from fork
1354 	 * bombs.  If the parent is forking quickly this makes both the
1355 	 * parent and child more batchy.
1356 	 */
1357 	estcpu = plp->lwp_estcpu + ESTCPUPPQ / 16;
1358 	plp->lwp_estcpu = ESTCPULIM(estcpu);
1359 }
1360 
1361 /*
1362  * Called when a lwp is being removed from this scheduler, typically
1363  * during lwp_exit().  We have to clean out any ULOAD accounting before
1364  * we can let the lp go.  The dd->spin lock is not needed for uload
1365  * updates.
1366  *
1367  * Scheduler dequeueing has already occurred, no further action in that
1368  * regard is needed.
1369  */
1370 static void
1371 dfly_exiting(struct lwp *lp, struct proc *child_proc)
1372 {
1373 	dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1374 
1375 	if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1376 		atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1377 		atomic_add_int(&dd->uload, -lp->lwp_uload);
1378 		atomic_add_int(&dd->ucount, -1);
1379 	}
1380 }
1381 
1382 /*
1383  * This function cannot block in any way, but spinlocks are ok.
1384  *
1385  * Update the uload based on the state of the thread (whether it is going
1386  * to sleep or running again).  The uload is meant to be a longer-term
1387  * load and not an instantanious load.
1388  */
1389 static void
1390 dfly_uload_update(struct lwp *lp)
1391 {
1392 	dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1393 
1394 	if (lp->lwp_thread->td_flags & TDF_RUNQ) {
1395 		if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1396 			spin_lock(&dd->spin);
1397 			if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1398 				atomic_set_int(&lp->lwp_mpflags,
1399 					       LWP_MP_ULOAD);
1400 				atomic_add_int(&dd->uload, lp->lwp_uload);
1401 				atomic_add_int(&dd->ucount, 1);
1402 			}
1403 			spin_unlock(&dd->spin);
1404 		}
1405 	} else if (lp->lwp_slptime > 0) {
1406 		if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1407 			spin_lock(&dd->spin);
1408 			if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1409 				atomic_clear_int(&lp->lwp_mpflags,
1410 						 LWP_MP_ULOAD);
1411 				atomic_add_int(&dd->uload, -lp->lwp_uload);
1412 				atomic_add_int(&dd->ucount, -1);
1413 			}
1414 			spin_unlock(&dd->spin);
1415 		}
1416 	}
1417 }
1418 
1419 /*
1420  * chooseproc() is called when a cpu needs a user process to LWKT schedule,
1421  * it selects a user process and returns it.  If chklp is non-NULL and chklp
1422  * has a better or equal priority then the process that would otherwise be
1423  * chosen, NULL is returned.
1424  *
1425  * Until we fix the RUNQ code the chklp test has to be strict or we may
1426  * bounce between processes trying to acquire the current process designation.
1427  *
1428  * Must be called with rdd->spin locked.  The spinlock is left intact through
1429  * the entire routine.  dd->spin does not have to be locked.
1430  *
1431  * If worst is non-zero this function finds the worst thread instead of the
1432  * best thread (used by the schedulerclock-based rover).
1433  */
1434 static
1435 struct lwp *
1436 dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
1437 		       struct lwp *chklp, int worst)
1438 {
1439 	struct lwp *lp;
1440 	struct rq *q;
1441 	u_int32_t *which;
1442 	u_int32_t pri;
1443 	u_int32_t rtqbits;
1444 	u_int32_t tsqbits;
1445 	u_int32_t idqbits;
1446 
1447 	rtqbits = rdd->rtqueuebits;
1448 	tsqbits = rdd->queuebits;
1449 	idqbits = rdd->idqueuebits;
1450 
1451 	if (worst) {
1452 		if (idqbits) {
1453 			pri = bsrl(idqbits);
1454 			q = &rdd->idqueues[pri];
1455 			which = &rdd->idqueuebits;
1456 		} else if (tsqbits) {
1457 			pri = bsrl(tsqbits);
1458 			q = &rdd->queues[pri];
1459 			which = &rdd->queuebits;
1460 		} else if (rtqbits) {
1461 			pri = bsrl(rtqbits);
1462 			q = &rdd->rtqueues[pri];
1463 			which = &rdd->rtqueuebits;
1464 		} else {
1465 			return (NULL);
1466 		}
1467 		lp = TAILQ_LAST(q, rq);
1468 	} else {
1469 		if (rtqbits) {
1470 			pri = bsfl(rtqbits);
1471 			q = &rdd->rtqueues[pri];
1472 			which = &rdd->rtqueuebits;
1473 		} else if (tsqbits) {
1474 			pri = bsfl(tsqbits);
1475 			q = &rdd->queues[pri];
1476 			which = &rdd->queuebits;
1477 		} else if (idqbits) {
1478 			pri = bsfl(idqbits);
1479 			q = &rdd->idqueues[pri];
1480 			which = &rdd->idqueuebits;
1481 		} else {
1482 			return (NULL);
1483 		}
1484 		lp = TAILQ_FIRST(q);
1485 	}
1486 	KASSERT(lp, ("chooseproc: no lwp on busy queue"));
1487 
1488 	/*
1489 	 * If the passed lwp <chklp> is reasonably close to the selected
1490 	 * lwp <lp>, return NULL (indicating that <chklp> should be kept).
1491 	 *
1492 	 * Note that we must error on the side of <chklp> to avoid bouncing
1493 	 * between threads in the acquire code.
1494 	 */
1495 	if (chklp) {
1496 		if (chklp->lwp_priority < lp->lwp_priority + PPQ)
1497 			return(NULL);
1498 	}
1499 
1500 	KTR_COND_LOG(usched_chooseproc,
1501 	    lp->lwp_proc->p_pid == usched_dfly_pid_debug,
1502 	    lp->lwp_proc->p_pid,
1503 	    lp->lwp_thread->td_gd->gd_cpuid,
1504 	    mycpu->gd_cpuid);
1505 
1506 	KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
1507 	atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1508 	TAILQ_REMOVE(q, lp, lwp_procq);
1509 	--rdd->runqcount;
1510 	if (TAILQ_EMPTY(q))
1511 		*which &= ~(1 << pri);
1512 
1513 	/*
1514 	 * If we are choosing a process from rdd with the intent to
1515 	 * move it to dd, lwp_qcpu must be adjusted while rdd's spinlock
1516 	 * is still held.
1517 	 */
1518 	if (rdd != dd) {
1519 		if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1520 			atomic_add_int(&rdd->uload, -lp->lwp_uload);
1521 			atomic_add_int(&rdd->ucount, -1);
1522 		}
1523 		lp->lwp_qcpu = dd->cpuid;
1524 		atomic_add_int(&dd->uload, lp->lwp_uload);
1525 		atomic_add_int(&dd->ucount, 1);
1526 		atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1527 	}
1528 	return lp;
1529 }
1530 
1531 /*
1532  * USED TO PUSH RUNNABLE LWPS TO THE LEAST LOADED CPU.
1533  *
1534  * Choose a cpu node to schedule lp on, hopefully nearby its current
1535  * node.
1536  *
1537  * We give the current node a modest advantage for obvious reasons.
1538  *
1539  * We also give the node the thread was woken up FROM a slight advantage
1540  * in order to try to schedule paired threads which synchronize/block waiting
1541  * for each other fairly close to each other.  Similarly in a network setting
1542  * this feature will also attempt to place a user process near the kernel
1543  * protocol thread that is feeding it data.  THIS IS A CRITICAL PART of the
1544  * algorithm as it heuristically groups synchronizing processes for locality
1545  * of reference in multi-socket systems.
1546  *
1547  * We check against running processes and give a big advantage if there
1548  * are none running.
1549  *
1550  * The caller will normally dfly_setrunqueue() lp on the returned queue.
1551  *
1552  * When the topology is known choose a cpu whos group has, in aggregate,
1553  * has the lowest weighted load.
1554  */
1555 static
1556 dfly_pcpu_t
1557 dfly_choose_best_queue(struct lwp *lp)
1558 {
1559 	cpumask_t wakemask;
1560 	cpumask_t mask;
1561 	cpu_node_t *cpup;
1562 	cpu_node_t *cpun;
1563 	cpu_node_t *cpub;
1564 	dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1565 	dfly_pcpu_t rdd;
1566 	int wakecpu;
1567 	int cpuid;
1568 	int n;
1569 	int count;
1570 	int load;
1571 	int lowest_load;
1572 
1573 	/*
1574 	 * When the topology is unknown choose a random cpu that is hopefully
1575 	 * idle.
1576 	 */
1577 	if (dd->cpunode == NULL)
1578 		return (dfly_choose_queue_simple(dd, lp));
1579 
1580 	/*
1581 	 * Pairing mask
1582 	 */
1583 	if ((wakecpu = lp->lwp_thread->td_wakefromcpu) >= 0)
1584 		wakemask = dfly_pcpu[wakecpu].cpumask;
1585 	else
1586 		CPUMASK_ASSZERO(wakemask);
1587 
1588 	/*
1589 	 * When the topology is known choose a cpu whos group has, in
1590 	 * aggregate, has the lowest weighted load.
1591 	 */
1592 	cpup = root_cpu_node;
1593 	rdd = dd;
1594 
1595 	while (cpup) {
1596 		/*
1597 		 * Degenerate case super-root
1598 		 */
1599 		if (cpup->child_no == 1) {
1600 			cpup = cpup->child_node[0];
1601 			continue;
1602 		}
1603 
1604 		/*
1605 		 * Terminal cpunode
1606 		 */
1607 		if (cpup->child_no == 0) {
1608 			rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1609 			break;
1610 		}
1611 
1612 		cpub = NULL;
1613 		lowest_load = 0x7FFFFFFF;
1614 
1615 		for (n = 0; n < cpup->child_no; ++n) {
1616 			/*
1617 			 * Accumulate load information for all cpus
1618 			 * which are members of this node.
1619 			 */
1620 			cpun = cpup->child_node[n];
1621 			mask = cpun->members;
1622 			CPUMASK_ANDMASK(mask, usched_global_cpumask);
1623 			CPUMASK_ANDMASK(mask, smp_active_mask);
1624 			CPUMASK_ANDMASK(mask, lp->lwp_cpumask);
1625 			if (CPUMASK_TESTZERO(mask))
1626 				continue;
1627 
1628 			count = 0;
1629 			load = 0;
1630 
1631 			while (CPUMASK_TESTNZERO(mask)) {
1632 				cpuid = BSFCPUMASK(mask);
1633 				rdd = &dfly_pcpu[cpuid];
1634 				load += rdd->uload;
1635 				load += rdd->ucount * usched_dfly_weight3;
1636 
1637 				if (rdd->uschedcp == NULL &&
1638 				    rdd->runqcount == 0 &&
1639 				    globaldata_find(cpuid)->gd_tdrunqcount == 0
1640 				) {
1641 					load -= usched_dfly_weight4;
1642 				}
1643 #if 0
1644 				else if (rdd->upri > lp->lwp_priority + PPQ) {
1645 					load -= usched_dfly_weight4 / 2;
1646 				}
1647 #endif
1648 				CPUMASK_NANDBIT(mask, cpuid);
1649 				++count;
1650 			}
1651 
1652 			/*
1653 			 * Compensate if the lp is already accounted for in
1654 			 * the aggregate uload for this mask set.  We want
1655 			 * to calculate the loads as if lp were not present,
1656 			 * otherwise the calculation is bogus.
1657 			 */
1658 			if ((lp->lwp_mpflags & LWP_MP_ULOAD) &&
1659 			    CPUMASK_TESTMASK(dd->cpumask, cpun->members)) {
1660 				load -= lp->lwp_uload;
1661 				load -= usched_dfly_weight3;
1662 			}
1663 
1664 			load /= count;
1665 
1666 			/*
1667 			 * Advantage the cpu group (lp) is already on.
1668 			 */
1669 			if (CPUMASK_TESTMASK(cpun->members, dd->cpumask))
1670 				load -= usched_dfly_weight1;
1671 
1672 			/*
1673 			 * Advantage the cpu group we want to pair (lp) to,
1674 			 * but don't let it go to the exact same cpu as
1675 			 * the wakecpu target.
1676 			 *
1677 			 * We do this by checking whether cpun is a
1678 			 * terminal node or not.  All cpun's at the same
1679 			 * level will either all be terminal or all not
1680 			 * terminal.
1681 			 *
1682 			 * If it is and we match we disadvantage the load.
1683 			 * If it is and we don't match we advantage the load.
1684 			 *
1685 			 * Also note that we are effectively disadvantaging
1686 			 * all-but-one by the same amount, so it won't effect
1687 			 * the weight1 factor for the all-but-one nodes.
1688 			 */
1689 			if (CPUMASK_TESTMASK(cpun->members, wakemask)) {
1690 				if (cpun->child_no != 0) {
1691 					/* advantage */
1692 					load -= usched_dfly_weight2;
1693 				} else {
1694 					if (usched_dfly_features & 0x10)
1695 						load += usched_dfly_weight2;
1696 					else
1697 						load -= usched_dfly_weight2;
1698 				}
1699 			}
1700 
1701 			/*
1702 			 * Calculate the best load
1703 			 */
1704 			if (cpub == NULL || lowest_load > load ||
1705 			    (lowest_load == load &&
1706 			     CPUMASK_TESTMASK(cpun->members, dd->cpumask))
1707 			) {
1708 				lowest_load = load;
1709 				cpub = cpun;
1710 			}
1711 		}
1712 		cpup = cpub;
1713 	}
1714 	/* Dispatch this outcast to a proper CPU. */
1715 	if (__predict_false(CPUMASK_TESTBIT(lp->lwp_cpumask, rdd->cpuid) == 0))
1716 		rdd = &dfly_pcpu[BSFCPUMASK(lp->lwp_cpumask)];
1717 	if (usched_dfly_chooser > 0) {
1718 		--usched_dfly_chooser;		/* only N lines */
1719 		kprintf("lp %02d->%02d %s\n",
1720 			lp->lwp_qcpu, rdd->cpuid, lp->lwp_proc->p_comm);
1721 	}
1722 	return (rdd);
1723 }
1724 
1725 /*
1726  * USED TO PULL RUNNABLE LWPS FROM THE MOST LOADED CPU.
1727  *
1728  * Choose the worst queue close to dd's cpu node with a non-empty runq
1729  * that is NOT dd.  Also require that the moving of the highest-load thread
1730  * from rdd to dd does not cause the uload's to cross each other.
1731  *
1732  * This is used by the thread chooser when the current cpu's queues are
1733  * empty to steal a thread from another cpu's queue.  We want to offload
1734  * the most heavily-loaded queue.
1735  */
1736 static
1737 dfly_pcpu_t
1738 dfly_choose_worst_queue(dfly_pcpu_t dd)
1739 {
1740 	cpumask_t mask;
1741 	cpu_node_t *cpup;
1742 	cpu_node_t *cpun;
1743 	cpu_node_t *cpub;
1744 	dfly_pcpu_t rdd;
1745 	int cpuid;
1746 	int n;
1747 	int count;
1748 	int load;
1749 #if 0
1750 	int pri;
1751 	int hpri;
1752 #endif
1753 	int highest_load;
1754 
1755 	/*
1756 	 * When the topology is unknown choose a random cpu that is hopefully
1757 	 * idle.
1758 	 */
1759 	if (dd->cpunode == NULL) {
1760 		return (NULL);
1761 	}
1762 
1763 	/*
1764 	 * When the topology is known choose a cpu whos group has, in
1765 	 * aggregate, has the highest weighted load.
1766 	 */
1767 	cpup = root_cpu_node;
1768 	rdd = dd;
1769 	while (cpup) {
1770 		/*
1771 		 * Degenerate case super-root
1772 		 */
1773 		if (cpup->child_no == 1) {
1774 			cpup = cpup->child_node[0];
1775 			continue;
1776 		}
1777 
1778 		/*
1779 		 * Terminal cpunode
1780 		 */
1781 		if (cpup->child_no == 0) {
1782 			rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1783 			break;
1784 		}
1785 
1786 		cpub = NULL;
1787 		highest_load = 0;
1788 
1789 		for (n = 0; n < cpup->child_no; ++n) {
1790 			/*
1791 			 * Accumulate load information for all cpus
1792 			 * which are members of this node.
1793 			 */
1794 			cpun = cpup->child_node[n];
1795 			mask = cpun->members;
1796 			CPUMASK_ANDMASK(mask, usched_global_cpumask);
1797 			CPUMASK_ANDMASK(mask, smp_active_mask);
1798 			if (CPUMASK_TESTZERO(mask))
1799 				continue;
1800 
1801 			count = 0;
1802 			load = 0;
1803 
1804 			while (CPUMASK_TESTNZERO(mask)) {
1805 				cpuid = BSFCPUMASK(mask);
1806 				rdd = &dfly_pcpu[cpuid];
1807 				load += rdd->uload;
1808 				load += rdd->ucount * usched_dfly_weight3;
1809 
1810 				if (rdd->uschedcp == NULL &&
1811 				    rdd->runqcount == 0 &&
1812 				    globaldata_find(cpuid)->gd_tdrunqcount == 0
1813 				) {
1814 					load -= usched_dfly_weight4;
1815 				}
1816 #if 0
1817 				else if (rdd->upri > dd->upri + PPQ) {
1818 					load -= usched_dfly_weight4 / 2;
1819 				}
1820 #endif
1821 				CPUMASK_NANDBIT(mask, cpuid);
1822 				++count;
1823 			}
1824 			load /= count;
1825 
1826 			/*
1827 			 * Prefer candidates which are somewhat closer to
1828 			 * our cpu.
1829 			 */
1830 			if (CPUMASK_TESTMASK(dd->cpumask, cpun->members))
1831 				load += usched_dfly_weight1;
1832 
1833 			/*
1834 			 * The best candidate is the one with the worst
1835 			 * (highest) load.
1836 			 */
1837 			if (cpub == NULL || highest_load < load ||
1838 			    (highest_load == load &&
1839 			     CPUMASK_TESTMASK(cpun->members, dd->cpumask))) {
1840 				highest_load = load;
1841 				cpub = cpun;
1842 			}
1843 		}
1844 		cpup = cpub;
1845 	}
1846 
1847 	/*
1848 	 * We never return our own node (dd), and only return a remote
1849 	 * node if it's load is significantly worse than ours (i.e. where
1850 	 * stealing a thread would be considered reasonable).
1851 	 *
1852 	 * This also helps us avoid breaking paired threads apart which
1853 	 * can have disastrous effects on performance.
1854 	 */
1855 	if (rdd == dd)
1856 		return(NULL);
1857 
1858 #if 0
1859 	hpri = 0;
1860 	if (rdd->rtqueuebits && hpri < (pri = bsrl(rdd->rtqueuebits)))
1861 		hpri = pri;
1862 	if (rdd->queuebits && hpri < (pri = bsrl(rdd->queuebits)))
1863 		hpri = pri;
1864 	if (rdd->idqueuebits && hpri < (pri = bsrl(rdd->idqueuebits)))
1865 		hpri = pri;
1866 	hpri *= PPQ;
1867 	if (rdd->uload - hpri < dd->uload + hpri)
1868 		return(NULL);
1869 #endif
1870 	return (rdd);
1871 }
1872 
1873 static
1874 dfly_pcpu_t
1875 dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp)
1876 {
1877 	dfly_pcpu_t rdd;
1878 	cpumask_t tmpmask;
1879 	cpumask_t mask;
1880 	int cpubase;
1881 	int cpuid;
1882 
1883 	/*
1884 	 * Fallback to the original heuristic, select random cpu,
1885 	 * first checking the cpus not currently running a user thread.
1886 	 *
1887 	 * Use cpuid as the base cpu in our scan, first checking
1888 	 * cpuid...(ncpus-1), then 0...(cpuid-1).  This avoid favoring
1889 	 * lower-numbered cpus.
1890 	 */
1891 	++dd->scancpu;		/* SMP race ok */
1892 	mask = dfly_rdyprocmask;
1893 	CPUMASK_NANDMASK(mask, dfly_curprocmask);
1894 	CPUMASK_ANDMASK(mask, lp->lwp_cpumask);
1895 	CPUMASK_ANDMASK(mask, smp_active_mask);
1896 	CPUMASK_ANDMASK(mask, usched_global_cpumask);
1897 
1898 	cpubase = (int)(dd->scancpu % ncpus);
1899 	CPUMASK_ASSBMASK(tmpmask, cpubase);
1900 	CPUMASK_INVMASK(tmpmask);
1901 	CPUMASK_ANDMASK(tmpmask, mask);
1902 	while (CPUMASK_TESTNZERO(tmpmask)) {
1903 		cpuid = BSFCPUMASK(tmpmask);
1904 		rdd = &dfly_pcpu[cpuid];
1905 
1906 		if ((rdd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK))
1907 			goto found;
1908 		CPUMASK_NANDBIT(tmpmask, cpuid);
1909 	}
1910 
1911 	CPUMASK_ASSBMASK(tmpmask, cpubase);
1912 	CPUMASK_ANDMASK(tmpmask, mask);
1913 	while (CPUMASK_TESTNZERO(tmpmask)) {
1914 		cpuid = BSFCPUMASK(tmpmask);
1915 		rdd = &dfly_pcpu[cpuid];
1916 
1917 		if ((rdd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK))
1918 			goto found;
1919 		CPUMASK_NANDBIT(tmpmask, cpuid);
1920 	}
1921 
1922 	/*
1923 	 * Then cpus which might have a currently running lp
1924 	 */
1925 	mask = dfly_rdyprocmask;
1926 	CPUMASK_ANDMASK(mask, dfly_curprocmask);
1927 	CPUMASK_ANDMASK(mask, lp->lwp_cpumask);
1928 	CPUMASK_ANDMASK(mask, smp_active_mask);
1929 	CPUMASK_ANDMASK(mask, usched_global_cpumask);
1930 
1931 	CPUMASK_ASSBMASK(tmpmask, cpubase);
1932 	CPUMASK_INVMASK(tmpmask);
1933 	CPUMASK_ANDMASK(tmpmask, mask);
1934 	while (CPUMASK_TESTNZERO(tmpmask)) {
1935 		cpuid = BSFCPUMASK(tmpmask);
1936 		rdd = &dfly_pcpu[cpuid];
1937 
1938 		if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
1939 			goto found;
1940 		CPUMASK_NANDBIT(tmpmask, cpuid);
1941 	}
1942 
1943 	CPUMASK_ASSBMASK(tmpmask, cpubase);
1944 	CPUMASK_ANDMASK(tmpmask, mask);
1945 	while (CPUMASK_TESTNZERO(tmpmask)) {
1946 		cpuid = BSFCPUMASK(tmpmask);
1947 		rdd = &dfly_pcpu[cpuid];
1948 
1949 		if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
1950 			goto found;
1951 		CPUMASK_NANDBIT(tmpmask, cpuid);
1952 	}
1953 
1954 	/*
1955 	 * If we cannot find a suitable cpu we round-robin using scancpu.
1956 	 * Other cpus will pickup as they release their current lwps or
1957 	 * become ready.
1958 	 *
1959 	 * Avoid a degenerate system lockup case if usched_global_cpumask
1960 	 * is set to 0 or otherwise does not cover lwp_cpumask.
1961 	 *
1962 	 * We only kick the target helper thread in this case, we do not
1963 	 * set the user resched flag because
1964 	 */
1965 	cpuid = cpubase;
1966 	if (CPUMASK_TESTBIT(lp->lwp_cpumask, cpuid) == 0)
1967 		cpuid = BSFCPUMASK(lp->lwp_cpumask);
1968 	else if (CPUMASK_TESTBIT(usched_global_cpumask, cpuid) == 0)
1969 		cpuid = 0;
1970 	rdd = &dfly_pcpu[cpuid];
1971 found:
1972 	return (rdd);
1973 }
1974 
1975 static
1976 void
1977 dfly_need_user_resched_remote(void *dummy)
1978 {
1979 	globaldata_t gd = mycpu;
1980 	dfly_pcpu_t  dd = &dfly_pcpu[gd->gd_cpuid];
1981 
1982 	/*
1983 	 * Flag reschedule needed
1984 	 */
1985 	need_user_resched();
1986 
1987 	/*
1988 	 * If no user thread is currently running we need to kick the helper
1989 	 * on our cpu to recover.  Otherwise the cpu will never schedule
1990 	 * anything again.
1991 	 *
1992 	 * We cannot schedule the process ourselves because this is an
1993 	 * IPI callback and we cannot acquire spinlocks in an IPI callback.
1994 	 *
1995 	 * Call wakeup_mycpu to avoid sending IPIs to other CPUs
1996 	 */
1997 	if (dd->uschedcp == NULL &&
1998 	    CPUMASK_TESTBIT(dfly_rdyprocmask, gd->gd_cpuid)) {
1999 		ATOMIC_CPUMASK_NANDBIT(dfly_rdyprocmask, gd->gd_cpuid);
2000 		wakeup_mycpu(dd->helper_thread);
2001 	}
2002 }
2003 
2004 /*
2005  * dfly_remrunqueue_locked() removes a given process from the run queue
2006  * that it is on, clearing the queue busy bit if it becomes empty.
2007  *
2008  * Note that user process scheduler is different from the LWKT schedule.
2009  * The user process scheduler only manages user processes but it uses LWKT
2010  * underneath, and a user process operating in the kernel will often be
2011  * 'released' from our management.
2012  *
2013  * uload is NOT adjusted here.  It is only adjusted if the lwkt_thread goes
2014  * to sleep or the lwp is moved to a different runq.
2015  */
2016 static void
2017 dfly_remrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
2018 {
2019 	struct rq *q;
2020 	u_int32_t *which;
2021 	u_int8_t pri;
2022 
2023 	KKASSERT(rdd->runqcount >= 0);
2024 
2025 	pri = lp->lwp_rqindex;
2026 
2027 	switch(lp->lwp_rqtype) {
2028 	case RTP_PRIO_NORMAL:
2029 		q = &rdd->queues[pri];
2030 		which = &rdd->queuebits;
2031 		break;
2032 	case RTP_PRIO_REALTIME:
2033 	case RTP_PRIO_FIFO:
2034 		q = &rdd->rtqueues[pri];
2035 		which = &rdd->rtqueuebits;
2036 		break;
2037 	case RTP_PRIO_IDLE:
2038 		q = &rdd->idqueues[pri];
2039 		which = &rdd->idqueuebits;
2040 		break;
2041 	default:
2042 		panic("remrunqueue: invalid rtprio type");
2043 		/* NOT REACHED */
2044 	}
2045 	KKASSERT(lp->lwp_mpflags & LWP_MP_ONRUNQ);
2046 	atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
2047 	TAILQ_REMOVE(q, lp, lwp_procq);
2048 	--rdd->runqcount;
2049 	if (TAILQ_EMPTY(q)) {
2050 		KASSERT((*which & (1 << pri)) != 0,
2051 			("remrunqueue: remove from empty queue"));
2052 		*which &= ~(1 << pri);
2053 	}
2054 }
2055 
2056 /*
2057  * dfly_setrunqueue_locked()
2058  *
2059  * Add a process whos rqtype and rqindex had previously been calculated
2060  * onto the appropriate run queue.   Determine if the addition requires
2061  * a reschedule on a cpu and return the cpuid or -1.
2062  *
2063  * NOTE: 	  Lower priorities are better priorities.
2064  *
2065  * NOTE ON ULOAD: This variable specifies the aggregate load on a cpu, the
2066  *		  sum of the rough lwp_priority for all running and runnable
2067  *		  processes.  Lower priority processes (higher lwp_priority
2068  *		  values) actually DO count as more load, not less, because
2069  *		  these are the programs which require the most care with
2070  *		  regards to cpu selection.
2071  */
2072 static void
2073 dfly_setrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
2074 {
2075 	u_int32_t *which;
2076 	struct rq *q;
2077 	int pri;
2078 
2079 	KKASSERT(lp->lwp_qcpu == rdd->cpuid);
2080 
2081 	if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
2082 		atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
2083 		atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload, lp->lwp_uload);
2084 		atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].ucount, 1);
2085 	}
2086 
2087 	pri = lp->lwp_rqindex;
2088 
2089 	switch(lp->lwp_rqtype) {
2090 	case RTP_PRIO_NORMAL:
2091 		q = &rdd->queues[pri];
2092 		which = &rdd->queuebits;
2093 		break;
2094 	case RTP_PRIO_REALTIME:
2095 	case RTP_PRIO_FIFO:
2096 		q = &rdd->rtqueues[pri];
2097 		which = &rdd->rtqueuebits;
2098 		break;
2099 	case RTP_PRIO_IDLE:
2100 		q = &rdd->idqueues[pri];
2101 		which = &rdd->idqueuebits;
2102 		break;
2103 	default:
2104 		panic("remrunqueue: invalid rtprio type");
2105 		/* NOT REACHED */
2106 	}
2107 
2108 	/*
2109 	 * Place us on the selected queue.  Determine if we should be
2110 	 * placed at the head of the queue or at the end.
2111 	 *
2112 	 * We are placed at the tail if our round-robin count has expired,
2113 	 * or is about to expire and the system thinks its a good place to
2114 	 * round-robin, or there is already a next thread on the queue
2115 	 * (it might be trying to pick up where it left off and we don't
2116 	 * want to interfere).
2117 	 */
2118 	KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
2119 	atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
2120 	++rdd->runqcount;
2121 
2122 	if (lp->lwp_rrcount >= usched_dfly_rrinterval ||
2123 	    (lp->lwp_rrcount >= usched_dfly_rrinterval / 2 &&
2124 	     (lp->lwp_thread->td_mpflags & TDF_MP_BATCH_DEMARC))
2125 	) {
2126 		/*
2127 		 * Place on tail
2128 		 */
2129 		atomic_clear_int(&lp->lwp_thread->td_mpflags,
2130 				 TDF_MP_BATCH_DEMARC);
2131 		lp->lwp_rrcount = 0;
2132 		TAILQ_INSERT_TAIL(q, lp, lwp_procq);
2133 	} else {
2134 		/*
2135 		 * Retain rrcount and place on head.  Count is retained
2136 		 * even if the queue is empty.
2137 		 */
2138 		TAILQ_INSERT_HEAD(q, lp, lwp_procq);
2139 	}
2140 	*which |= 1 << pri;
2141 }
2142 
2143 /*
2144  * For SMP systems a user scheduler helper thread is created for each
2145  * cpu and is used to allow one cpu to wakeup another for the purposes of
2146  * scheduling userland threads from setrunqueue().
2147  *
2148  * UP systems do not need the helper since there is only one cpu.
2149  *
2150  * We can't use the idle thread for this because we might block.
2151  * Additionally, doing things this way allows us to HLT idle cpus
2152  * on MP systems.
2153  */
2154 static void
2155 dfly_helper_thread(void *dummy)
2156 {
2157     globaldata_t gd;
2158     dfly_pcpu_t dd;
2159     dfly_pcpu_t rdd;
2160     struct lwp *nlp;
2161     cpumask_t mask;
2162     int cpuid;
2163 
2164     gd = mycpu;
2165     cpuid = gd->gd_cpuid;	/* doesn't change */
2166     mask = gd->gd_cpumask;	/* doesn't change */
2167     dd = &dfly_pcpu[cpuid];
2168 
2169     /*
2170      * Since we only want to be woken up only when no user processes
2171      * are scheduled on a cpu, run at an ultra low priority.
2172      */
2173     lwkt_setpri_self(TDPRI_USER_SCHEDULER);
2174 
2175     tsleep(dd->helper_thread, 0, "schslp", 0);
2176 
2177     for (;;) {
2178 	/*
2179 	 * We use the LWKT deschedule-interlock trick to avoid racing
2180 	 * dfly_rdyprocmask.  This means we cannot block through to the
2181 	 * manual lwkt_switch() call we make below.
2182 	 */
2183 	crit_enter_gd(gd);
2184 	tsleep_interlock(dd->helper_thread, 0);
2185 
2186 	spin_lock(&dd->spin);
2187 
2188 	ATOMIC_CPUMASK_ORMASK(dfly_rdyprocmask, mask);
2189 	clear_user_resched();	/* This satisfied the reschedule request */
2190 #if 0
2191 	dd->rrcount = 0;	/* Reset the round-robin counter */
2192 #endif
2193 
2194 	if (dd->runqcount || dd->uschedcp != NULL) {
2195 		/*
2196 		 * Threads are available.  A thread may or may not be
2197 		 * currently scheduled.  Get the best thread already queued
2198 		 * to this cpu.
2199 		 */
2200 		nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
2201 		if (nlp) {
2202 			ATOMIC_CPUMASK_ORMASK(dfly_curprocmask, mask);
2203 			dd->upri = nlp->lwp_priority;
2204 			dd->uschedcp = nlp;
2205 #if 0
2206 			dd->rrcount = 0;	/* reset round robin */
2207 #endif
2208 			spin_unlock(&dd->spin);
2209 			lwkt_acquire(nlp->lwp_thread);
2210 			lwkt_schedule(nlp->lwp_thread);
2211 		} else {
2212 			/*
2213 			 * This situation should not occur because we had
2214 			 * at least one thread available.
2215 			 */
2216 			spin_unlock(&dd->spin);
2217 		}
2218 	} else if (usched_dfly_features & 0x01) {
2219 		/*
2220 		 * This cpu is devoid of runnable threads, steal a thread
2221 		 * from another cpu.  Since we're stealing, might as well
2222 		 * load balance at the same time.
2223 		 *
2224 		 * We choose the highest-loaded thread from the worst queue.
2225 		 *
2226 		 * NOTE! This function only returns a non-NULL rdd when
2227 		 *	 another cpu's queue is obviously overloaded.  We
2228 		 *	 do not want to perform the type of rebalancing
2229 		 *	 the schedclock does here because it would result
2230 		 *	 in insane process pulling when 'steady' state is
2231 		 *	 partially unbalanced (e.g. 6 runnables and only
2232 		 *	 4 cores).
2233 		 */
2234 		rdd = dfly_choose_worst_queue(dd);
2235 		if (rdd && spin_trylock(&rdd->spin)) {
2236 			nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
2237 			spin_unlock(&rdd->spin);
2238 		} else {
2239 			nlp = NULL;
2240 		}
2241 		if (nlp) {
2242 			ATOMIC_CPUMASK_ORMASK(dfly_curprocmask, mask);
2243 			dd->upri = nlp->lwp_priority;
2244 			dd->uschedcp = nlp;
2245 #if 0
2246 			dd->rrcount = 0;	/* reset round robin */
2247 #endif
2248 			spin_unlock(&dd->spin);
2249 			lwkt_acquire(nlp->lwp_thread);
2250 			lwkt_schedule(nlp->lwp_thread);
2251 		} else {
2252 			/*
2253 			 * Leave the thread on our run queue.  Another
2254 			 * scheduler will try to pull it later.
2255 			 */
2256 			spin_unlock(&dd->spin);
2257 		}
2258 	} else {
2259 		/*
2260 		 * devoid of runnable threads and not allowed to steal
2261 		 * any.
2262 		 */
2263 		spin_unlock(&dd->spin);
2264 	}
2265 
2266 	/*
2267 	 * We're descheduled unless someone scheduled us.  Switch away.
2268 	 * Exiting the critical section will cause splz() to be called
2269 	 * for us if interrupts and such are pending.
2270 	 */
2271 	crit_exit_gd(gd);
2272 	tsleep(dd->helper_thread, PINTERLOCKED, "schslp", 0);
2273     }
2274 }
2275 
2276 #if 0
2277 static int
2278 sysctl_usched_dfly_stick_to_level(SYSCTL_HANDLER_ARGS)
2279 {
2280 	int error, new_val;
2281 
2282 	new_val = usched_dfly_stick_to_level;
2283 
2284 	error = sysctl_handle_int(oidp, &new_val, 0, req);
2285         if (error != 0 || req->newptr == NULL)
2286 		return (error);
2287 	if (new_val > cpu_topology_levels_number - 1 || new_val < 0)
2288 		return (EINVAL);
2289 	usched_dfly_stick_to_level = new_val;
2290 	return (0);
2291 }
2292 #endif
2293 
2294 /*
2295  * Setup the queues and scheduler helpers (scheduler helpers are SMP only).
2296  * Note that curprocmask bit 0 has already been cleared by rqinit() and
2297  * we should not mess with it further.
2298  */
2299 static void
2300 usched_dfly_cpu_init(void)
2301 {
2302 	int i;
2303 	int j;
2304 	int smt_not_supported = 0;
2305 	int cache_coherent_not_supported = 0;
2306 
2307 	if (bootverbose)
2308 		kprintf("Start usched_dfly helpers on cpus:\n");
2309 
2310 	sysctl_ctx_init(&usched_dfly_sysctl_ctx);
2311 	usched_dfly_sysctl_tree =
2312 		SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
2313 				SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
2314 				"usched_dfly", CTLFLAG_RD, 0, "");
2315 
2316 	for (i = 0; i < ncpus; ++i) {
2317 		dfly_pcpu_t dd = &dfly_pcpu[i];
2318 		cpumask_t mask;
2319 
2320 		CPUMASK_ASSBIT(mask, i);
2321 		if (CPUMASK_TESTMASK(mask, smp_active_mask) == 0)
2322 		    continue;
2323 
2324 		spin_init(&dd->spin, "uschedcpuinit");
2325 		dd->cpunode = get_cpu_node_by_cpuid(i);
2326 		dd->cpuid = i;
2327 		CPUMASK_ASSBIT(dd->cpumask, i);
2328 		for (j = 0; j < NQS; j++) {
2329 			TAILQ_INIT(&dd->queues[j]);
2330 			TAILQ_INIT(&dd->rtqueues[j]);
2331 			TAILQ_INIT(&dd->idqueues[j]);
2332 		}
2333 		ATOMIC_CPUMASK_NANDBIT(dfly_curprocmask, 0);
2334 
2335 		if (dd->cpunode == NULL) {
2336 			smt_not_supported = 1;
2337 			cache_coherent_not_supported = 1;
2338 			if (bootverbose)
2339 				kprintf ("    cpu%d - WARNING: No CPU NODE "
2340 					 "found for cpu\n", i);
2341 		} else {
2342 			switch (dd->cpunode->type) {
2343 			case THREAD_LEVEL:
2344 				if (bootverbose)
2345 					kprintf ("    cpu%d - HyperThreading "
2346 						 "available. Core siblings: ",
2347 						 i);
2348 				break;
2349 			case CORE_LEVEL:
2350 				smt_not_supported = 1;
2351 
2352 				if (bootverbose)
2353 					kprintf ("    cpu%d - No HT available, "
2354 						 "multi-core/physical "
2355 						 "cpu. Physical siblings: ",
2356 						 i);
2357 				break;
2358 			case CHIP_LEVEL:
2359 				smt_not_supported = 1;
2360 
2361 				if (bootverbose)
2362 					kprintf ("    cpu%d - No HT available, "
2363 						 "single-core/physical cpu. "
2364 						 "Package siblings: ",
2365 						 i);
2366 				break;
2367 			default:
2368 				/* Let's go for safe defaults here */
2369 				smt_not_supported = 1;
2370 				cache_coherent_not_supported = 1;
2371 				if (bootverbose)
2372 					kprintf ("    cpu%d - Unknown cpunode->"
2373 						 "type=%u. siblings: ",
2374 						 i,
2375 						 (u_int)dd->cpunode->type);
2376 				break;
2377 			}
2378 
2379 			if (bootverbose) {
2380 				if (dd->cpunode->parent_node != NULL) {
2381 					kprint_cpuset(&dd->cpunode->
2382 							parent_node->members);
2383 					kprintf("\n");
2384 				} else {
2385 					kprintf(" no siblings\n");
2386 				}
2387 			}
2388 		}
2389 
2390 		lwkt_create(dfly_helper_thread, NULL, &dd->helper_thread, NULL,
2391 			    0, i, "usched %d", i);
2392 
2393 		/*
2394 		 * Allow user scheduling on the target cpu.  cpu #0 has already
2395 		 * been enabled in rqinit().
2396 		 */
2397 		if (i)
2398 			ATOMIC_CPUMASK_NANDMASK(dfly_curprocmask, mask);
2399 		ATOMIC_CPUMASK_ORMASK(dfly_rdyprocmask, mask);
2400 		dd->upri = PRIBASE_NULL;
2401 
2402 	}
2403 
2404 	/* usched_dfly sysctl configurable parameters */
2405 
2406 	SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2407 		       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2408 		       OID_AUTO, "rrinterval", CTLFLAG_RW,
2409 		       &usched_dfly_rrinterval, 0, "");
2410 	SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2411 		       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2412 		       OID_AUTO, "decay", CTLFLAG_RW,
2413 		       &usched_dfly_decay, 0, "Extra decay when not running");
2414 
2415 	/* Add enable/disable option for SMT scheduling if supported */
2416 	if (smt_not_supported) {
2417 		usched_dfly_smt = 0;
2418 		SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2419 				  SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2420 				  OID_AUTO, "smt", CTLFLAG_RD,
2421 				  "NOT SUPPORTED", 0, "SMT NOT SUPPORTED");
2422 	} else {
2423 		usched_dfly_smt = 1;
2424 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2425 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2426 			       OID_AUTO, "smt", CTLFLAG_RW,
2427 			       &usched_dfly_smt, 0, "Enable SMT scheduling");
2428 	}
2429 
2430 	/*
2431 	 * Add enable/disable option for cache coherent scheduling
2432 	 * if supported
2433 	 */
2434 	if (cache_coherent_not_supported) {
2435 		usched_dfly_cache_coherent = 0;
2436 		SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2437 				  SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2438 				  OID_AUTO, "cache_coherent", CTLFLAG_RD,
2439 				  "NOT SUPPORTED", 0,
2440 				  "Cache coherence NOT SUPPORTED");
2441 	} else {
2442 		usched_dfly_cache_coherent = 1;
2443 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2444 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2445 			       OID_AUTO, "cache_coherent", CTLFLAG_RW,
2446 			       &usched_dfly_cache_coherent, 0,
2447 			       "Enable/Disable cache coherent scheduling");
2448 
2449 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2450 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2451 			       OID_AUTO, "weight1", CTLFLAG_RW,
2452 			       &usched_dfly_weight1, 200,
2453 			       "Weight selection for current cpu");
2454 
2455 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2456 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2457 			       OID_AUTO, "weight2", CTLFLAG_RW,
2458 			       &usched_dfly_weight2, 180,
2459 			       "Weight selection for wakefrom cpu");
2460 
2461 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2462 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2463 			       OID_AUTO, "weight3", CTLFLAG_RW,
2464 			       &usched_dfly_weight3, 40,
2465 			       "Weight selection for num threads on queue");
2466 
2467 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2468 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2469 			       OID_AUTO, "weight4", CTLFLAG_RW,
2470 			       &usched_dfly_weight4, 160,
2471 			       "Availability of other idle cpus");
2472 
2473 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2474 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2475 			       OID_AUTO, "fast_resched", CTLFLAG_RW,
2476 			       &usched_dfly_fast_resched, 0,
2477 			       "Availability of other idle cpus");
2478 
2479 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2480 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2481 			       OID_AUTO, "features", CTLFLAG_RW,
2482 			       &usched_dfly_features, 0x8F,
2483 			       "Allow pulls into empty queues");
2484 
2485 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2486 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2487 			       OID_AUTO, "swmask", CTLFLAG_RW,
2488 			       &usched_dfly_swmask, ~PPQMASK,
2489 			       "Queue mask to force thread switch");
2490 
2491 #if 0
2492 		SYSCTL_ADD_PROC(&usched_dfly_sysctl_ctx,
2493 				SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2494 				OID_AUTO, "stick_to_level",
2495 				CTLTYPE_INT | CTLFLAG_RW,
2496 				NULL, sizeof usched_dfly_stick_to_level,
2497 				sysctl_usched_dfly_stick_to_level, "I",
2498 				"Stick a process to this level. See sysctl"
2499 				"paremter hw.cpu_topology.level_description");
2500 #endif
2501 	}
2502 }
2503 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2504 	usched_dfly_cpu_init, NULL);
2505