xref: /dragonfly/sys/kern/usched_dfly.c (revision 52cb6762)
1 /*
2  * Copyright (c) 2012 The DragonFly Project.  All rights reserved.
3  * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>.  All rights reserved.
4  *
5  * This code is derived from software contributed to The DragonFly Project
6  * by Matthew Dillon <dillon@backplane.com>,
7  * by Mihai Carabas <mihai.carabas@gmail.com>
8  * and many others.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in
18  *    the documentation and/or other materials provided with the
19  *    distribution.
20  * 3. Neither the name of The DragonFly Project nor the names of its
21  *    contributors may be used to endorse or promote products derived
22  *    from this software without specific, prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
28  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/queue.h>
42 #include <sys/proc.h>
43 #include <sys/rtprio.h>
44 #include <sys/uio.h>
45 #include <sys/sysctl.h>
46 #include <sys/resourcevar.h>
47 #include <sys/spinlock.h>
48 #include <sys/cpu_topology.h>
49 #include <sys/thread2.h>
50 #include <sys/spinlock2.h>
51 #include <sys/mplock2.h>
52 
53 #include <sys/ktr.h>
54 
55 #include <machine/cpu.h>
56 #include <machine/smp.h>
57 
58 /*
59  * Priorities.  Note that with 32 run queues per scheduler each queue
60  * represents four priority levels.
61  */
62 
63 int dfly_rebalanced;
64 
65 #define MAXPRI			128
66 #define PRIMASK			(MAXPRI - 1)
67 #define PRIBASE_REALTIME	0
68 #define PRIBASE_NORMAL		MAXPRI
69 #define PRIBASE_IDLE		(MAXPRI * 2)
70 #define PRIBASE_THREAD		(MAXPRI * 3)
71 #define PRIBASE_NULL		(MAXPRI * 4)
72 
73 #define NQS	32			/* 32 run queues. */
74 #define PPQ	(MAXPRI / NQS)		/* priorities per queue */
75 #define PPQMASK	(PPQ - 1)
76 
77 /*
78  * NICEPPQ	- number of nice units per priority queue
79  * ESTCPUPPQ	- number of estcpu units per priority queue
80  * ESTCPUMAX	- number of estcpu units
81  */
82 #define NICEPPQ		2
83 #define ESTCPUPPQ	512
84 #define ESTCPUMAX	(ESTCPUPPQ * NQS)
85 #define BATCHMAX	(ESTCPUFREQ * 30)
86 #define PRIO_RANGE	(PRIO_MAX - PRIO_MIN + 1)
87 
88 #define ESTCPULIM(v)	min((v), ESTCPUMAX)
89 
90 TAILQ_HEAD(rq, lwp);
91 
92 #define lwp_priority	lwp_usdata.dfly.priority
93 #define lwp_forked	lwp_usdata.dfly.forked
94 #define lwp_rqindex	lwp_usdata.dfly.rqindex
95 #define lwp_estcpu	lwp_usdata.dfly.estcpu
96 #define lwp_estfast	lwp_usdata.dfly.estfast
97 #define lwp_uload	lwp_usdata.dfly.uload
98 #define lwp_rqtype	lwp_usdata.dfly.rqtype
99 #define lwp_qcpu	lwp_usdata.dfly.qcpu
100 #define lwp_rrcount	lwp_usdata.dfly.rrcount
101 
102 struct usched_dfly_pcpu {
103 	struct spinlock spin;
104 	struct thread	*helper_thread;
105 	short		unusde01;
106 	short		upri;
107 	int		uload;
108 	int		ucount;
109 	struct lwp	*uschedcp;
110 	struct rq	queues[NQS];
111 	struct rq	rtqueues[NQS];
112 	struct rq	idqueues[NQS];
113 	u_int32_t	queuebits;
114 	u_int32_t	rtqueuebits;
115 	u_int32_t	idqueuebits;
116 	int		runqcount;
117 	int		cpuid;
118 	cpumask_t	cpumask;
119 	cpu_node_t	*cpunode;
120 };
121 
122 typedef struct usched_dfly_pcpu	*dfly_pcpu_t;
123 
124 static void dfly_acquire_curproc(struct lwp *lp);
125 static void dfly_release_curproc(struct lwp *lp);
126 static void dfly_select_curproc(globaldata_t gd);
127 static void dfly_setrunqueue(struct lwp *lp);
128 static void dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp);
129 static void dfly_schedulerclock(struct lwp *lp, sysclock_t period,
130 				sysclock_t cpstamp);
131 static void dfly_recalculate_estcpu(struct lwp *lp);
132 static void dfly_resetpriority(struct lwp *lp);
133 static void dfly_forking(struct lwp *plp, struct lwp *lp);
134 static void dfly_exiting(struct lwp *lp, struct proc *);
135 static void dfly_uload_update(struct lwp *lp);
136 static void dfly_yield(struct lwp *lp);
137 static void dfly_changeqcpu_locked(struct lwp *lp,
138 				dfly_pcpu_t dd, dfly_pcpu_t rdd);
139 static dfly_pcpu_t dfly_choose_best_queue(struct lwp *lp);
140 static dfly_pcpu_t dfly_choose_worst_queue(dfly_pcpu_t dd);
141 static dfly_pcpu_t dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp);
142 static void dfly_need_user_resched_remote(void *dummy);
143 static struct lwp *dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
144 					  struct lwp *chklp, int worst);
145 static void dfly_remrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
146 static void dfly_setrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
147 static void dfly_changedcpu(struct lwp *lp);
148 
149 struct usched usched_dfly = {
150 	{ NULL },
151 	"dfly", "Original DragonFly Scheduler",
152 	NULL,			/* default registration */
153 	NULL,			/* default deregistration */
154 	dfly_acquire_curproc,
155 	dfly_release_curproc,
156 	dfly_setrunqueue,
157 	dfly_schedulerclock,
158 	dfly_recalculate_estcpu,
159 	dfly_resetpriority,
160 	dfly_forking,
161 	dfly_exiting,
162 	dfly_uload_update,
163 	NULL,			/* setcpumask not supported */
164 	dfly_yield,
165 	dfly_changedcpu
166 };
167 
168 /*
169  * We have NQS (32) run queues per scheduling class.  For the normal
170  * class, there are 128 priorities scaled onto these 32 queues.  New
171  * processes are added to the last entry in each queue, and processes
172  * are selected for running by taking them from the head and maintaining
173  * a simple FIFO arrangement.  Realtime and Idle priority processes have
174  * and explicit 0-31 priority which maps directly onto their class queue
175  * index.  When a queue has something in it, the corresponding bit is
176  * set in the queuebits variable, allowing a single read to determine
177  * the state of all 32 queues and then a ffs() to find the first busy
178  * queue.
179  */
180 					/* currently running a user process */
181 static cpumask_t dfly_curprocmask = CPUMASK_INITIALIZER_ALLONES;
182 static cpumask_t dfly_rdyprocmask;	/* ready to accept a user process */
183 static volatile int dfly_scancpu;
184 static volatile int dfly_ucount;	/* total running on whole system */
185 static struct usched_dfly_pcpu dfly_pcpu[MAXCPU];
186 static struct sysctl_ctx_list usched_dfly_sysctl_ctx;
187 static struct sysctl_oid *usched_dfly_sysctl_tree;
188 
189 /* Debug info exposed through debug.* sysctl */
190 
191 static int usched_dfly_debug = -1;
192 SYSCTL_INT(_debug, OID_AUTO, dfly_scdebug, CTLFLAG_RW,
193 	   &usched_dfly_debug, 0,
194 	   "Print debug information for this pid");
195 
196 static int usched_dfly_pid_debug = -1;
197 SYSCTL_INT(_debug, OID_AUTO, dfly_pid_debug, CTLFLAG_RW,
198 	   &usched_dfly_pid_debug, 0,
199 	   "Print KTR debug information for this pid");
200 
201 static int usched_dfly_chooser = 0;
202 SYSCTL_INT(_debug, OID_AUTO, dfly_chooser, CTLFLAG_RW,
203 	   &usched_dfly_chooser, 0,
204 	   "Print KTR debug information for this pid");
205 
206 /*
207  * Tunning usched_dfly - configurable through kern.usched_dfly.
208  *
209  * weight1 - Tries to keep threads on their current cpu.  If you
210  *	     make this value too large the scheduler will not be
211  *	     able to load-balance large loads.
212  *
213  * weight2 - If non-zero, detects thread pairs undergoing synchronous
214  *	     communications and tries to move them closer together.
215  *	     Behavior is adjusted by bit 4 of features (0x10).
216  *
217  *	     WARNING!  Weight2 is a ridiculously sensitive parameter,
218  *	     a small value is recommended.
219  *
220  * weight3 - Weighting based on the number of recently runnable threads
221  *	     on the userland scheduling queue (ignoring their loads).
222  *	     A nominal value here prevents high-priority (low-load)
223  *	     threads from accumulating on one cpu core when other
224  *	     cores are available.
225  *
226  *	     This value should be left fairly small relative to weight1
227  *	     and weight4.
228  *
229  * weight4 - Weighting based on other cpu queues being available
230  *	     or running processes with higher lwp_priority's.
231  *
232  *	     This allows a thread to migrate to another nearby cpu if it
233  *	     is unable to run on the current cpu based on the other cpu
234  *	     being idle or running a lower priority (higher lwp_priority)
235  *	     thread.  This value should be large enough to override weight1
236  *
237  * features - These flags can be set or cleared to enable or disable various
238  *	      features.
239  *
240  *	      0x01	Enable idle-cpu pulling			(default)
241  *	      0x02	Enable proactive pushing		(default)
242  *	      0x04	Enable rebalancing rover		(default)
243  *	      0x08	Enable more proactive pushing		(default)
244  *	      0x10	(flip weight2 limit on same cpu)	(default)
245  *	      0x20	choose best cpu for forked process
246  *	      0x40	choose current cpu for forked process
247  *	      0x80	choose random cpu for forked process	(default)
248  */
249 static int usched_dfly_smt = 0;
250 static int usched_dfly_cache_coherent = 0;
251 static int usched_dfly_weight1 = 200;	/* keep thread on current cpu */
252 static int usched_dfly_weight2 = 180;	/* synchronous peer's current cpu */
253 static int usched_dfly_weight3 = 40;	/* number of threads on queue */
254 static int usched_dfly_weight4 = 160;	/* availability of idle cores */
255 static int usched_dfly_features = 0x8F;	/* allow pulls */
256 static int usched_dfly_fast_resched = 0;/* delta priority / resched */
257 static int usched_dfly_swmask = ~PPQMASK; /* allow pulls */
258 static int usched_dfly_rrinterval = (ESTCPUFREQ + 9) / 10;
259 static int usched_dfly_decay = 8;
260 
261 /* KTR debug printings */
262 
263 KTR_INFO_MASTER(usched);
264 
265 #if !defined(KTR_USCHED_DFLY)
266 #define	KTR_USCHED_DFLY	KTR_ALL
267 #endif
268 
269 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc, 0,
270     "USCHED_DFLY(chooseproc: pid %d, old_cpuid %d, curr_cpuid %d)",
271     pid_t pid, int old_cpuid, int curr);
272 
273 /*
274  * This function is called when the kernel intends to return to userland.
275  * It is responsible for making the thread the current designated userland
276  * thread for this cpu, blocking if necessary.
277  *
278  * The kernel will not depress our LWKT priority until after we return,
279  * in case we have to shove over to another cpu.
280  *
281  * We must determine our thread's disposition before we switch away.  This
282  * is very sensitive code.
283  *
284  * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
285  * TO ANOTHER CPU!  Because most of the kernel assumes that no migration will
286  * occur, this function is called only under very controlled circumstances.
287  */
288 static void
289 dfly_acquire_curproc(struct lwp *lp)
290 {
291 	globaldata_t gd;
292 	dfly_pcpu_t dd;
293 	dfly_pcpu_t rdd;
294 	thread_t td;
295 	int force_resched;
296 
297 	/*
298 	 * Make sure we aren't sitting on a tsleep queue.
299 	 */
300 	td = lp->lwp_thread;
301 	crit_enter_quick(td);
302 	if (td->td_flags & TDF_TSLEEPQ)
303 		tsleep_remove(td);
304 	dfly_recalculate_estcpu(lp);
305 
306 	gd = mycpu;
307 	dd = &dfly_pcpu[gd->gd_cpuid];
308 
309 	/*
310 	 * Process any pending interrupts/ipi's, then handle reschedule
311 	 * requests.  dfly_release_curproc() will try to assign a new
312 	 * uschedcp that isn't us and otherwise NULL it out.
313 	 */
314 	force_resched = 0;
315 	if ((td->td_mpflags & TDF_MP_BATCH_DEMARC) &&
316 	    lp->lwp_rrcount >= usched_dfly_rrinterval / 2) {
317 		force_resched = 1;
318 	}
319 
320 	if (user_resched_wanted()) {
321 		if (dd->uschedcp == lp)
322 			force_resched = 1;
323 		clear_user_resched();
324 		dfly_release_curproc(lp);
325 	}
326 
327 	/*
328 	 * Loop until we are the current user thread.
329 	 *
330 	 * NOTE: dd spinlock not held at top of loop.
331 	 */
332 	if (dd->uschedcp == lp)
333 		lwkt_yield_quick();
334 
335 	while (dd->uschedcp != lp) {
336 		lwkt_yield_quick();
337 
338 		spin_lock(&dd->spin);
339 
340 		if (force_resched &&
341 		   (usched_dfly_features & 0x08) &&
342 		   (rdd = dfly_choose_best_queue(lp)) != dd) {
343 			/*
344 			 * We are not or are no longer the current lwp and a
345 			 * forced reschedule was requested.  Figure out the
346 			 * best cpu to run on (our current cpu will be given
347 			 * significant weight).
348 			 *
349 			 * (if a reschedule was not requested we want to
350 			 *  move this step after the uschedcp tests).
351 			 */
352 			dfly_changeqcpu_locked(lp, dd, rdd);
353 			spin_unlock(&dd->spin);
354 			lwkt_deschedule(lp->lwp_thread);
355 			dfly_setrunqueue_dd(rdd, lp);
356 			lwkt_switch();
357 			gd = mycpu;
358 			dd = &dfly_pcpu[gd->gd_cpuid];
359 			continue;
360 		}
361 
362 		/*
363 		 * Either no reschedule was requested or the best queue was
364 		 * dd, and no current process has been selected.  We can
365 		 * trivially become the current lwp on the current cpu.
366 		 */
367 		if (dd->uschedcp == NULL) {
368 			atomic_clear_int(&lp->lwp_thread->td_mpflags,
369 					 TDF_MP_DIDYIELD);
370 			ATOMIC_CPUMASK_ORBIT(dfly_curprocmask, gd->gd_cpuid);
371 			dd->uschedcp = lp;
372 			dd->upri = lp->lwp_priority;
373 			KKASSERT(lp->lwp_qcpu == dd->cpuid);
374 			spin_unlock(&dd->spin);
375 			break;
376 		}
377 
378 		/*
379 		 * Put us back on the same run queue unconditionally.
380 		 *
381 		 * Set rrinterval to force placement at end of queue.
382 		 * Select the worst queue to ensure we round-robin,
383 		 * but do not change estcpu.
384 		 */
385 		if (lp->lwp_thread->td_mpflags & TDF_MP_DIDYIELD) {
386 			u_int32_t tsqbits;
387 
388 			switch(lp->lwp_rqtype) {
389 			case RTP_PRIO_NORMAL:
390 				tsqbits = dd->queuebits;
391 				spin_unlock(&dd->spin);
392 
393 				lp->lwp_rrcount = usched_dfly_rrinterval;
394 				if (tsqbits)
395 					lp->lwp_rqindex = bsrl(tsqbits);
396 				break;
397 			default:
398 				spin_unlock(&dd->spin);
399 				break;
400 			}
401 			lwkt_deschedule(lp->lwp_thread);
402 			dfly_setrunqueue_dd(dd, lp);
403 			atomic_clear_int(&lp->lwp_thread->td_mpflags,
404 					 TDF_MP_DIDYIELD);
405 			lwkt_switch();
406 			gd = mycpu;
407 			dd = &dfly_pcpu[gd->gd_cpuid];
408 			continue;
409 		}
410 
411 		/*
412 		 * Can we steal the current designated user thread?
413 		 *
414 		 * If we do the other thread will stall when it tries to
415 		 * return to userland, possibly rescheduling elsewhere.
416 		 *
417 		 * It is important to do a masked test to avoid the edge
418 		 * case where two near-equal-priority threads are constantly
419 		 * interrupting each other.
420 		 *
421 		 * In the exact match case another thread has already gained
422 		 * uschedcp and lowered its priority, if we steal it the
423 		 * other thread will stay stuck on the LWKT runq and not
424 		 * push to another cpu.  So don't steal on equal-priority even
425 		 * though it might appear to be more beneficial due to not
426 		 * having to switch back to the other thread's context.
427 		 *
428 		 * usched_dfly_fast_resched requires that two threads be
429 		 * significantly far apart in priority in order to interrupt.
430 		 *
431 		 * If better but not sufficiently far apart, the current
432 		 * uschedcp will be interrupted at the next scheduler clock.
433 		 */
434 		if (dd->uschedcp &&
435 		   (dd->upri & ~PPQMASK) >
436 		   (lp->lwp_priority & ~PPQMASK) + usched_dfly_fast_resched) {
437 			dd->uschedcp = lp;
438 			dd->upri = lp->lwp_priority;
439 			KKASSERT(lp->lwp_qcpu == dd->cpuid);
440 			spin_unlock(&dd->spin);
441 			break;
442 		}
443 		/*
444 		 * We are not the current lwp, figure out the best cpu
445 		 * to run on (our current cpu will be given significant
446 		 * weight).  Loop on cpu change.
447 		 */
448 		if ((usched_dfly_features & 0x02) &&
449 		    force_resched == 0 &&
450 		    (rdd = dfly_choose_best_queue(lp)) != dd) {
451 			dfly_changeqcpu_locked(lp, dd, rdd);
452 			spin_unlock(&dd->spin);
453 			lwkt_deschedule(lp->lwp_thread);
454 			dfly_setrunqueue_dd(rdd, lp);
455 			lwkt_switch();
456 			gd = mycpu;
457 			dd = &dfly_pcpu[gd->gd_cpuid];
458 			continue;
459 		}
460 
461 		/*
462 		 * We cannot become the current lwp, place the lp on the
463 		 * run-queue of this or another cpu and deschedule ourselves.
464 		 *
465 		 * When we are reactivated we will have another chance.
466 		 *
467 		 * Reload after a switch or setrunqueue/switch possibly
468 		 * moved us to another cpu.
469 		 */
470 		spin_unlock(&dd->spin);
471 		lwkt_deschedule(lp->lwp_thread);
472 		dfly_setrunqueue_dd(dd, lp);
473 		lwkt_switch();
474 		gd = mycpu;
475 		dd = &dfly_pcpu[gd->gd_cpuid];
476 	}
477 
478 	/*
479 	 * Make sure upri is synchronized, then yield to LWKT threads as
480 	 * needed before returning.  This could result in another reschedule.
481 	 * XXX
482 	 */
483 	crit_exit_quick(td);
484 
485 	KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
486 }
487 
488 /*
489  * DFLY_RELEASE_CURPROC
490  *
491  * This routine detaches the current thread from the userland scheduler,
492  * usually because the thread needs to run or block in the kernel (at
493  * kernel priority) for a while.
494  *
495  * This routine is also responsible for selecting a new thread to
496  * make the current thread.
497  *
498  * NOTE: This implementation differs from the dummy example in that
499  * dfly_select_curproc() is able to select the current process, whereas
500  * dummy_select_curproc() is not able to select the current process.
501  * This means we have to NULL out uschedcp.
502  *
503  * Additionally, note that we may already be on a run queue if releasing
504  * via the lwkt_switch() in dfly_setrunqueue().
505  */
506 static void
507 dfly_release_curproc(struct lwp *lp)
508 {
509 	globaldata_t gd = mycpu;
510 	dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
511 
512 	/*
513 	 * Make sure td_wakefromcpu is defaulted.  This will be overwritten
514 	 * by wakeup().
515 	 */
516 	if (dd->uschedcp == lp) {
517 		KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
518 		spin_lock(&dd->spin);
519 		if (dd->uschedcp == lp) {
520 			dd->uschedcp = NULL;	/* don't let lp be selected */
521 			dd->upri = PRIBASE_NULL;
522 			ATOMIC_CPUMASK_NANDBIT(dfly_curprocmask, gd->gd_cpuid);
523 			spin_unlock(&dd->spin);
524 			dfly_select_curproc(gd);
525 		} else {
526 			spin_unlock(&dd->spin);
527 		}
528 	}
529 }
530 
531 /*
532  * DFLY_SELECT_CURPROC
533  *
534  * Select a new current process for this cpu and clear any pending user
535  * reschedule request.  The cpu currently has no current process.
536  *
537  * This routine is also responsible for equal-priority round-robining,
538  * typically triggered from dfly_schedulerclock().  In our dummy example
539  * all the 'user' threads are LWKT scheduled all at once and we just
540  * call lwkt_switch().
541  *
542  * The calling process is not on the queue and cannot be selected.
543  */
544 static
545 void
546 dfly_select_curproc(globaldata_t gd)
547 {
548 	dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
549 	struct lwp *nlp;
550 	int cpuid = gd->gd_cpuid;
551 
552 	crit_enter_gd(gd);
553 
554 	spin_lock(&dd->spin);
555 	nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
556 
557 	if (nlp) {
558 		ATOMIC_CPUMASK_ORBIT(dfly_curprocmask, cpuid);
559 		dd->upri = nlp->lwp_priority;
560 		dd->uschedcp = nlp;
561 #if 0
562 		dd->rrcount = 0;		/* reset round robin */
563 #endif
564 		spin_unlock(&dd->spin);
565 		lwkt_acquire(nlp->lwp_thread);
566 		lwkt_schedule(nlp->lwp_thread);
567 	} else {
568 		spin_unlock(&dd->spin);
569 	}
570 	crit_exit_gd(gd);
571 }
572 
573 /*
574  * Place the specified lwp on the user scheduler's run queue.  This routine
575  * must be called with the thread descheduled.  The lwp must be runnable.
576  * It must not be possible for anyone else to explicitly schedule this thread.
577  *
578  * The thread may be the current thread as a special case.
579  */
580 static void
581 dfly_setrunqueue(struct lwp *lp)
582 {
583 	dfly_pcpu_t dd;
584 	dfly_pcpu_t rdd;
585 
586 	/*
587 	 * First validate the process LWKT state.
588 	 */
589 	KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN"));
590 	KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0,
591 	    ("lwp %d/%d already on runq! flag %08x/%08x", lp->lwp_proc->p_pid,
592 	     lp->lwp_tid, lp->lwp_proc->p_flags, lp->lwp_flags));
593 	KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0);
594 
595 	/*
596 	 * NOTE: dd/rdd do not necessarily represent the current cpu.
597 	 *	 Instead they may represent the cpu the thread was last
598 	 *	 scheduled on or inherited by its parent.
599 	 */
600 	dd = &dfly_pcpu[lp->lwp_qcpu];
601 	rdd = dd;
602 
603 	/*
604 	 * This process is not supposed to be scheduled anywhere or assigned
605 	 * as the current process anywhere.  Assert the condition.
606 	 */
607 	KKASSERT(rdd->uschedcp != lp);
608 
609 	/*
610 	 * Ok, we have to setrunqueue some target cpu and request a reschedule
611 	 * if necessary.
612 	 *
613 	 * We have to choose the best target cpu.  It might not be the current
614 	 * target even if the current cpu has no running user thread (for
615 	 * example, because the current cpu might be a hyperthread and its
616 	 * sibling has a thread assigned).
617 	 *
618 	 * If we just forked it is most optimal to run the child on the same
619 	 * cpu just in case the parent decides to wait for it (thus getting
620 	 * off that cpu).  As long as there is nothing else runnable on the
621 	 * cpu, that is.  If we did this unconditionally a parent forking
622 	 * multiple children before waiting (e.g. make -j N) leaves other
623 	 * cpus idle that could be working.
624 	 */
625 	if (lp->lwp_forked) {
626 		lp->lwp_forked = 0;
627 		if (usched_dfly_features & 0x20)
628 			rdd = dfly_choose_best_queue(lp);
629 		else if (usched_dfly_features & 0x40)
630 			rdd = &dfly_pcpu[lp->lwp_qcpu];
631 		else if (usched_dfly_features & 0x80)
632 			rdd = dfly_choose_queue_simple(rdd, lp);
633 		else if (dfly_pcpu[lp->lwp_qcpu].runqcount)
634 			rdd = dfly_choose_best_queue(lp);
635 		else
636 			rdd = &dfly_pcpu[lp->lwp_qcpu];
637 	} else {
638 		rdd = dfly_choose_best_queue(lp);
639 		/* rdd = &dfly_pcpu[lp->lwp_qcpu]; */
640 	}
641 	if (lp->lwp_qcpu != rdd->cpuid) {
642 		spin_lock(&dd->spin);
643 		dfly_changeqcpu_locked(lp, dd, rdd);
644 		spin_unlock(&dd->spin);
645 	}
646 	dfly_setrunqueue_dd(rdd, lp);
647 }
648 
649 /*
650  * Change qcpu to rdd->cpuid.  The dd the lp is CURRENTLY on must be
651  * spin-locked on-call.  rdd does not have to be.
652  */
653 static void
654 dfly_changeqcpu_locked(struct lwp *lp, dfly_pcpu_t dd, dfly_pcpu_t rdd)
655 {
656 	if (lp->lwp_qcpu != rdd->cpuid) {
657 		if (lp->lwp_mpflags & LWP_MP_ULOAD) {
658 			atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
659 			atomic_add_int(&dd->uload, -lp->lwp_uload);
660 			atomic_add_int(&dd->ucount, -1);
661 			atomic_add_int(&dfly_ucount, -1);
662 		}
663 		lp->lwp_qcpu = rdd->cpuid;
664 	}
665 }
666 
667 /*
668  * Place lp on rdd's runqueue.  Nothing is locked on call.  This function
669  * also performs all necessary ancillary notification actions.
670  */
671 static void
672 dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp)
673 {
674 	globaldata_t rgd;
675 
676 	/*
677 	 * We might be moving the lp to another cpu's run queue, and once
678 	 * on the runqueue (even if it is our cpu's), another cpu can rip
679 	 * it away from us.
680 	 *
681 	 * TDF_MIGRATING might already be set if this is part of a
682 	 * remrunqueue+setrunqueue sequence.
683 	 */
684 	if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0)
685 		lwkt_giveaway(lp->lwp_thread);
686 
687 	rgd = globaldata_find(rdd->cpuid);
688 
689 	/*
690 	 * We lose control of the lp the moment we release the spinlock
691 	 * after having placed it on the queue.  i.e. another cpu could pick
692 	 * it up, or it could exit, or its priority could be further
693 	 * adjusted, or something like that.
694 	 *
695 	 * WARNING! rdd can point to a foreign cpu!
696 	 */
697 	spin_lock(&rdd->spin);
698 	dfly_setrunqueue_locked(rdd, lp);
699 
700 	/*
701 	 * Potentially interrupt the currently-running thread
702 	 */
703 	if ((rdd->upri & ~PPQMASK) <= (lp->lwp_priority & ~PPQMASK)) {
704 		/*
705 		 * Currently running thread is better or same, do not
706 		 * interrupt.
707 		 */
708 		spin_unlock(&rdd->spin);
709 	} else if ((rdd->upri & ~PPQMASK) <= (lp->lwp_priority & ~PPQMASK) +
710 		   usched_dfly_fast_resched) {
711 		/*
712 		 * Currently running thread is not better, but not so bad
713 		 * that we need to interrupt it.  Let it run for one more
714 		 * scheduler tick.
715 		 */
716 		if (rdd->uschedcp &&
717 		    rdd->uschedcp->lwp_rrcount < usched_dfly_rrinterval) {
718 			rdd->uschedcp->lwp_rrcount = usched_dfly_rrinterval - 1;
719 		}
720 		spin_unlock(&rdd->spin);
721 	} else if (rgd == mycpu) {
722 		/*
723 		 * We should interrupt the currently running thread, which
724 		 * is on the current cpu.  However, if DIDYIELD is set we
725 		 * round-robin unconditionally and do not interrupt it.
726 		 */
727 		spin_unlock(&rdd->spin);
728 		if (rdd->uschedcp == NULL)
729 			wakeup_mycpu(rdd->helper_thread); /* XXX */
730 		if ((lp->lwp_thread->td_mpflags & TDF_MP_DIDYIELD) == 0)
731 			need_user_resched();
732 	} else {
733 		/*
734 		 * We should interrupt the currently running thread, which
735 		 * is on a different cpu.
736 		 */
737 		spin_unlock(&rdd->spin);
738 		lwkt_send_ipiq(rgd, dfly_need_user_resched_remote, NULL);
739 	}
740 }
741 
742 /*
743  * This routine is called from a systimer IPI.  It MUST be MP-safe and
744  * the BGL IS NOT HELD ON ENTRY.  This routine is called at ESTCPUFREQ on
745  * each cpu.
746  */
747 static
748 void
749 dfly_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
750 {
751 	globaldata_t gd = mycpu;
752 	dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
753 
754 	/*
755 	 * Spinlocks also hold a critical section so there should not be
756 	 * any active.
757 	 */
758 	KKASSERT(gd->gd_spinlocks == 0);
759 
760 	/*
761 	 * If lp is NULL we might be contended and lwkt_switch() may have
762 	 * cycled into the idle thread.  Apply the tick to the current
763 	 * process on this cpu if it is contended.
764 	 */
765 	if (gd->gd_curthread == &gd->gd_idlethread) {
766 		lp = dd->uschedcp;
767 		if (lp && (lp->lwp_thread == NULL ||
768 			   lp->lwp_thread->td_contended == 0)) {
769 			lp = NULL;
770 		}
771 	}
772 
773 	/*
774 	 * Dock thread for tick
775 	 */
776 	if (lp) {
777 		/*
778 		 * Do we need to round-robin?  We round-robin 10 times a
779 		 * second.  This should only occur for cpu-bound batch
780 		 * processes.
781 		 */
782 		if (++lp->lwp_rrcount >= usched_dfly_rrinterval) {
783 			lp->lwp_thread->td_wakefromcpu = -1;
784 			need_user_resched();
785 		}
786 
787 		/*
788 		 * Adjust estcpu upward using a real time equivalent
789 		 * calculation, and recalculate lp's priority.
790 		 */
791 		lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu +
792 					   ESTCPUMAX / ESTCPUFREQ + 1);
793 		dfly_resetpriority(lp);
794 	}
795 
796 	/*
797 	 * Rebalance two cpus every 8 ticks, pulling the worst thread
798 	 * from the worst cpu's queue into a rotating cpu number.
799 	 *
800 	 * This mechanic is needed because the push algorithms can
801 	 * steady-state in an non-optimal configuration.  We need to mix it
802 	 * up a little, even if it means breaking up a paired thread, so
803 	 * the push algorithms can rebalance the degenerate conditions.
804 	 * This portion of the algorithm exists to ensure stability at the
805 	 * selected weightings.
806 	 *
807 	 * Because we might be breaking up optimal conditions we do not want
808 	 * to execute this too quickly, hence we only rebalance approximately
809 	 * ~7-8 times per second.  The push's, on the otherhand, are capable
810 	 * moving threads to other cpus at a much higher rate.
811 	 *
812 	 * We choose the most heavily loaded thread from the worst queue
813 	 * in order to ensure that multiple heavy-weight threads on the same
814 	 * queue get broken up, and also because these threads are the most
815 	 * likely to be able to remain in place.  Hopefully then any pairings,
816 	 * if applicable, migrate to where these threads are.
817 	 */
818 	if ((usched_dfly_features & 0x04) &&
819 	    ((u_int)sched_ticks & 7) == 0 &&
820 	    (u_int)sched_ticks / 8 % ncpus == gd->gd_cpuid) {
821 		/*
822 		 * Our cpu is up.
823 		 */
824 		struct lwp *nlp;
825 		dfly_pcpu_t rdd;
826 
827 		rdd = dfly_choose_worst_queue(dd);
828 		if (rdd) {
829 			spin_lock(&dd->spin);
830 			if (spin_trylock(&rdd->spin)) {
831 				nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
832 				spin_unlock(&rdd->spin);
833 				if (nlp == NULL)
834 					spin_unlock(&dd->spin);
835 			} else {
836 				spin_unlock(&dd->spin);
837 				nlp = NULL;
838 			}
839 		} else {
840 			nlp = NULL;
841 		}
842 		/* dd->spin held if nlp != NULL */
843 
844 		/*
845 		 * Either schedule it or add it to our queue.
846 		 */
847 		if (nlp &&
848 		    (nlp->lwp_priority & ~PPQMASK) < (dd->upri & ~PPQMASK)) {
849 			ATOMIC_CPUMASK_ORMASK(dfly_curprocmask, dd->cpumask);
850 			dd->upri = nlp->lwp_priority;
851 			dd->uschedcp = nlp;
852 #if 0
853 			dd->rrcount = 0;	/* reset round robin */
854 #endif
855 			spin_unlock(&dd->spin);
856 			lwkt_acquire(nlp->lwp_thread);
857 			lwkt_schedule(nlp->lwp_thread);
858 		} else if (nlp) {
859 			dfly_setrunqueue_locked(dd, nlp);
860 			spin_unlock(&dd->spin);
861 		}
862 	}
863 }
864 
865 /*
866  * Called from acquire and from kern_synch's one-second timer (one of the
867  * callout helper threads) with a critical section held.
868  *
869  * Adjust p_estcpu based on our single-cpu load, p_nice, and compensate for
870  * overall system load.
871  *
872  * Note that no recalculation occurs for a process which sleeps and wakes
873  * up in the same tick.  That is, a system doing thousands of context
874  * switches per second will still only do serious estcpu calculations
875  * ESTCPUFREQ times per second.
876  */
877 static
878 void
879 dfly_recalculate_estcpu(struct lwp *lp)
880 {
881 	globaldata_t gd = mycpu;
882 	sysclock_t cpbase;
883 	sysclock_t ttlticks;
884 	int estcpu;
885 	int decay_factor;
886 	int ucount;
887 
888 	/*
889 	 * We have to subtract periodic to get the last schedclock
890 	 * timeout time, otherwise we would get the upcoming timeout.
891 	 * Keep in mind that a process can migrate between cpus and
892 	 * while the scheduler clock should be very close, boundary
893 	 * conditions could lead to a small negative delta.
894 	 */
895 	cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
896 
897 	if (lp->lwp_slptime > 1) {
898 		/*
899 		 * Too much time has passed, do a coarse correction.
900 		 */
901 		lp->lwp_estcpu = lp->lwp_estcpu >> 1;
902 		dfly_resetpriority(lp);
903 		lp->lwp_cpbase = cpbase;
904 		lp->lwp_cpticks = 0;
905 		lp->lwp_estfast = 0;
906 	} else if (lp->lwp_cpbase != cpbase) {
907 		/*
908 		 * Adjust estcpu if we are in a different tick.  Don't waste
909 		 * time if we are in the same tick.
910 		 *
911 		 * First calculate the number of ticks in the measurement
912 		 * interval.  The ttlticks calculation can wind up 0 due to
913 		 * a bug in the handling of lwp_slptime  (as yet not found),
914 		 * so make sure we do not get a divide by 0 panic.
915 		 */
916 		ttlticks = (cpbase - lp->lwp_cpbase) /
917 			   gd->gd_schedclock.periodic;
918 		if ((ssysclock_t)ttlticks < 0) {
919 			ttlticks = 0;
920 			lp->lwp_cpbase = cpbase;
921 		}
922 		if (ttlticks == 0)
923 			return;
924 		updatepcpu(lp, lp->lwp_cpticks, ttlticks);
925 
926 		/*
927 		 * Calculate the percentage of one cpu being used then
928 		 * compensate for any system load in excess of ncpus.
929 		 *
930 		 * For example, if we have 8 cores and 16 running cpu-bound
931 		 * processes then all things being equal each process will
932 		 * get 50% of one cpu.  We need to pump this value back
933 		 * up to 100% so the estcpu calculation properly adjusts
934 		 * the process's dynamic priority.
935 		 *
936 		 * estcpu is scaled by ESTCPUMAX, pctcpu is scaled by FSCALE.
937 		 */
938 		estcpu = (lp->lwp_pctcpu * ESTCPUMAX) >> FSHIFT;
939 		ucount = dfly_ucount;
940 		if (ucount > ncpus) {
941 			estcpu += estcpu * (ucount - ncpus) / ncpus;
942 		}
943 
944 		if (usched_dfly_debug == lp->lwp_proc->p_pid) {
945 			kprintf("pid %d lwp %p estcpu %3d %3d cp %d/%d",
946 				lp->lwp_proc->p_pid, lp,
947 				estcpu, lp->lwp_estcpu,
948 				lp->lwp_cpticks, ttlticks);
949 		}
950 
951 		/*
952 		 * Adjust lp->lwp_esetcpu.  The decay factor determines how
953 		 * quickly lwp_estcpu collapses to its realtime calculation.
954 		 * A slower collapse gives us a more accurate number over
955 		 * the long term but can create problems with bursty threads
956 		 * or threads which become cpu hogs.
957 		 *
958 		 * To solve this problem, newly started lwps and lwps which
959 		 * are restarting after having been asleep for a while are
960 		 * given a much, much faster decay in order to quickly
961 		 * detect whether they become cpu-bound.
962 		 *
963 		 * NOTE: p_nice is accounted for in dfly_resetpriority(),
964 		 *	 and not here, but we must still ensure that a
965 		 *	 cpu-bound nice -20 process does not completely
966 		 *	 override a cpu-bound nice +20 process.
967 		 *
968 		 * NOTE: We must use ESTCPULIM() here to deal with any
969 		 *	 overshoot.
970 		 */
971 		decay_factor = usched_dfly_decay;
972 		if (decay_factor < 1)
973 			decay_factor = 1;
974 		if (decay_factor > 1024)
975 			decay_factor = 1024;
976 
977 		if (lp->lwp_estfast < usched_dfly_decay) {
978 			++lp->lwp_estfast;
979 			lp->lwp_estcpu = ESTCPULIM(
980 				(lp->lwp_estcpu * lp->lwp_estfast + estcpu) /
981 				(lp->lwp_estfast + 1));
982 		} else {
983 			lp->lwp_estcpu = ESTCPULIM(
984 				(lp->lwp_estcpu * decay_factor + estcpu) /
985 				(decay_factor + 1));
986 		}
987 
988 		if (usched_dfly_debug == lp->lwp_proc->p_pid)
989 			kprintf(" finalestcpu %d\n", lp->lwp_estcpu);
990 		dfly_resetpriority(lp);
991 		lp->lwp_cpbase += ttlticks * gd->gd_schedclock.periodic;
992 		lp->lwp_cpticks = 0;
993 	}
994 }
995 
996 /*
997  * Compute the priority of a process when running in user mode.
998  * Arrange to reschedule if the resulting priority is better
999  * than that of the current process.
1000  *
1001  * This routine may be called with any process.
1002  *
1003  * This routine is called by fork1() for initial setup with the process of
1004  * the run queue, and also may be called normally with the process on or
1005  * off the run queue.
1006  */
1007 static void
1008 dfly_resetpriority(struct lwp *lp)
1009 {
1010 	dfly_pcpu_t rdd;
1011 	int newpriority;
1012 	u_short newrqtype;
1013 	int rcpu;
1014 	int checkpri;
1015 	int estcpu;
1016 	int delta_uload;
1017 
1018 	crit_enter();
1019 
1020 	/*
1021 	 * Lock the scheduler (lp) belongs to.  This can be on a different
1022 	 * cpu.  Handle races.  This loop breaks out with the appropriate
1023 	 * rdd locked.
1024 	 */
1025 	for (;;) {
1026 		rcpu = lp->lwp_qcpu;
1027 		cpu_ccfence();
1028 		rdd = &dfly_pcpu[rcpu];
1029 		spin_lock(&rdd->spin);
1030 		if (rcpu == lp->lwp_qcpu)
1031 			break;
1032 		spin_unlock(&rdd->spin);
1033 	}
1034 
1035 	/*
1036 	 * Calculate the new priority and queue type
1037 	 */
1038 	newrqtype = lp->lwp_rtprio.type;
1039 
1040 	switch(newrqtype) {
1041 	case RTP_PRIO_REALTIME:
1042 	case RTP_PRIO_FIFO:
1043 		newpriority = PRIBASE_REALTIME +
1044 			     (lp->lwp_rtprio.prio & PRIMASK);
1045 		break;
1046 	case RTP_PRIO_NORMAL:
1047 		/*
1048 		 *
1049 		 */
1050 		estcpu = lp->lwp_estcpu;
1051 
1052 		/*
1053 		 * p_nice piece		Adds (0-40) * 2		0-80
1054 		 * estcpu		Adds 16384  * 4 / 512   0-128
1055 		 */
1056 		newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) * PPQ / NICEPPQ;
1057 		newpriority += estcpu * PPQ / ESTCPUPPQ;
1058 		newpriority = newpriority * MAXPRI / (PRIO_RANGE * PPQ /
1059 			      NICEPPQ + ESTCPUMAX * PPQ / ESTCPUPPQ);
1060 		newpriority = PRIBASE_NORMAL + (newpriority & PRIMASK);
1061 		break;
1062 	case RTP_PRIO_IDLE:
1063 		newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK);
1064 		break;
1065 	case RTP_PRIO_THREAD:
1066 		newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK);
1067 		break;
1068 	default:
1069 		panic("Bad RTP_PRIO %d", newrqtype);
1070 		/* NOT REACHED */
1071 	}
1072 
1073 	/*
1074 	 * The LWKT scheduler doesn't dive usched structures, give it a hint
1075 	 * on the relative priority of user threads running in the kernel.
1076 	 * The LWKT scheduler will always ensure that a user thread running
1077 	 * in the kernel will get cpu some time, regardless of its upri,
1078 	 * but can decide not to instantly switch from one kernel or user
1079 	 * mode user thread to a kernel-mode user thread when it has a less
1080 	 * desireable user priority.
1081 	 *
1082 	 * td_upri has normal sense (higher values are more desireable), so
1083 	 * negate it.
1084 	 */
1085 	lp->lwp_thread->td_upri = -(newpriority & usched_dfly_swmask);
1086 
1087 	/*
1088 	 * The newpriority incorporates the queue type so do a simple masked
1089 	 * check to determine if the process has moved to another queue.  If
1090 	 * it has, and it is currently on a run queue, then move it.
1091 	 *
1092 	 * Since uload is ~PPQMASK masked, no modifications are necessary if
1093 	 * we end up in the same run queue.
1094 	 *
1095 	 * Reset rrcount if moving to a higher-priority queue, otherwise
1096 	 * retain rrcount.
1097 	 */
1098 	if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) {
1099 		if (lp->lwp_priority < newpriority)
1100 			lp->lwp_rrcount = 0;
1101 		if (lp->lwp_mpflags & LWP_MP_ONRUNQ) {
1102 			dfly_remrunqueue_locked(rdd, lp);
1103 			lp->lwp_priority = newpriority;
1104 			lp->lwp_rqtype = newrqtype;
1105 			lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1106 			dfly_setrunqueue_locked(rdd, lp);
1107 			checkpri = 1;
1108 		} else {
1109 			lp->lwp_priority = newpriority;
1110 			lp->lwp_rqtype = newrqtype;
1111 			lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1112 			checkpri = 0;
1113 		}
1114 	} else {
1115 		/*
1116 		 * In the same PPQ, uload cannot change.
1117 		 */
1118 		lp->lwp_priority = newpriority;
1119 		checkpri = 1;
1120 		rcpu = -1;
1121 	}
1122 
1123 	/*
1124 	 * Adjust effective load.
1125 	 *
1126 	 * Calculate load then scale up or down geometrically based on p_nice.
1127 	 * Processes niced up (positive) are less important, and processes
1128 	 * niced downard (negative) are more important.  The higher the uload,
1129 	 * the more important the thread.
1130 	 */
1131 	/* 0-511, 0-100% cpu */
1132 	delta_uload = lp->lwp_estcpu / NQS;
1133 	delta_uload -= delta_uload * lp->lwp_proc->p_nice / (PRIO_MAX + 1);
1134 
1135 
1136 	delta_uload -= lp->lwp_uload;
1137 	lp->lwp_uload += delta_uload;
1138 	if (lp->lwp_mpflags & LWP_MP_ULOAD)
1139 		atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload, delta_uload);
1140 
1141 	/*
1142 	 * Determine if we need to reschedule the target cpu.  This only
1143 	 * occurs if the LWP is already on a scheduler queue, which means
1144 	 * that idle cpu notification has already occured.  At most we
1145 	 * need only issue a need_user_resched() on the appropriate cpu.
1146 	 *
1147 	 * The LWP may be owned by a CPU different from the current one,
1148 	 * in which case dd->uschedcp may be modified without an MP lock
1149 	 * or a spinlock held.  The worst that happens is that the code
1150 	 * below causes a spurious need_user_resched() on the target CPU
1151 	 * and dd->pri to be wrong for a short period of time, both of
1152 	 * which are harmless.
1153 	 *
1154 	 * If checkpri is 0 we are adjusting the priority of the current
1155 	 * process, possibly higher (less desireable), so ignore the upri
1156 	 * check which will fail in that case.
1157 	 */
1158 	if (rcpu >= 0) {
1159 		if (CPUMASK_TESTBIT(dfly_rdyprocmask, rcpu) &&
1160 		    (checkpri == 0 ||
1161 		     (rdd->upri & ~PRIMASK) >
1162 		     (lp->lwp_priority & ~PRIMASK))) {
1163 			if (rcpu == mycpu->gd_cpuid) {
1164 				spin_unlock(&rdd->spin);
1165 				need_user_resched();
1166 			} else {
1167 				spin_unlock(&rdd->spin);
1168 				lwkt_send_ipiq(globaldata_find(rcpu),
1169 					       dfly_need_user_resched_remote,
1170 					       NULL);
1171 			}
1172 		} else {
1173 			spin_unlock(&rdd->spin);
1174 		}
1175 	} else {
1176 		spin_unlock(&rdd->spin);
1177 	}
1178 	crit_exit();
1179 }
1180 
1181 static
1182 void
1183 dfly_yield(struct lwp *lp)
1184 {
1185 	if (lp->lwp_qcpu != mycpu->gd_cpuid)
1186 		return;
1187 	KKASSERT(lp == curthread->td_lwp);
1188 
1189 	/*
1190 	 * Don't set need_user_resched() or mess with rrcount or anything.
1191 	 * the TDF flag will override everything as long as we release.
1192 	 */
1193 	atomic_set_int(&lp->lwp_thread->td_mpflags, TDF_MP_DIDYIELD);
1194 	dfly_release_curproc(lp);
1195 }
1196 
1197 /*
1198  * Thread was forcefully migrated to another cpu.  Normally forced migrations
1199  * are used for iterations and the kernel returns to the original cpu before
1200  * returning and this is not needed.  However, if the kernel migrates a
1201  * thread to another cpu and wants to leave it there, it has to call this
1202  * scheduler helper.
1203  *
1204  * Note that the lwkt_migratecpu() function also released the thread, so
1205  * we don't have to worry about that.
1206  */
1207 static
1208 void
1209 dfly_changedcpu(struct lwp *lp)
1210 {
1211 	dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1212 	dfly_pcpu_t rdd = &dfly_pcpu[mycpu->gd_cpuid];
1213 
1214 	if (dd != rdd) {
1215 		spin_lock(&dd->spin);
1216 		dfly_changeqcpu_locked(lp, dd, rdd);
1217 		spin_unlock(&dd->spin);
1218 	}
1219 }
1220 
1221 /*
1222  * Called from fork1() when a new child process is being created.
1223  *
1224  * Give the child process an initial estcpu that is more batch then
1225  * its parent and dock the parent for the fork (but do not
1226  * reschedule the parent).
1227  *
1228  * fast
1229  *
1230  * XXX lwp should be "spawning" instead of "forking"
1231  */
1232 static void
1233 dfly_forking(struct lwp *plp, struct lwp *lp)
1234 {
1235 	/*
1236 	 * Put the child 4 queue slots (out of 32) higher than the parent
1237 	 * (less desireable than the parent).
1238 	 */
1239 	lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ * 4);
1240 	lp->lwp_forked = 1;
1241 	lp->lwp_estfast = 0;
1242 
1243 	/*
1244 	 * Dock the parent a cost for the fork, protecting us from fork
1245 	 * bombs.  If the parent is forking quickly make the child more
1246 	 * batchy.
1247 	 */
1248 	plp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ / 16);
1249 }
1250 
1251 /*
1252  * Called when a lwp is being removed from this scheduler, typically
1253  * during lwp_exit().  We have to clean out any ULOAD accounting before
1254  * we can let the lp go.  The dd->spin lock is not needed for uload
1255  * updates.
1256  *
1257  * Scheduler dequeueing has already occurred, no further action in that
1258  * regard is needed.
1259  */
1260 static void
1261 dfly_exiting(struct lwp *lp, struct proc *child_proc)
1262 {
1263 	dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1264 
1265 	if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1266 		atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1267 		atomic_add_int(&dd->uload, -lp->lwp_uload);
1268 		atomic_add_int(&dd->ucount, -1);
1269 		atomic_add_int(&dfly_ucount, -1);
1270 	}
1271 }
1272 
1273 /*
1274  * This function cannot block in any way, but spinlocks are ok.
1275  *
1276  * Update the uload based on the state of the thread (whether it is going
1277  * to sleep or running again).  The uload is meant to be a longer-term
1278  * load and not an instantanious load.
1279  */
1280 static void
1281 dfly_uload_update(struct lwp *lp)
1282 {
1283 	dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1284 
1285 	if (lp->lwp_thread->td_flags & TDF_RUNQ) {
1286 		if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1287 			spin_lock(&dd->spin);
1288 			if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1289 				atomic_set_int(&lp->lwp_mpflags,
1290 					       LWP_MP_ULOAD);
1291 				atomic_add_int(&dd->uload, lp->lwp_uload);
1292 				atomic_add_int(&dd->ucount, 1);
1293 				atomic_add_int(&dfly_ucount, 1);
1294 			}
1295 			spin_unlock(&dd->spin);
1296 		}
1297 	} else if (lp->lwp_slptime > 0) {
1298 		if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1299 			spin_lock(&dd->spin);
1300 			if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1301 				atomic_clear_int(&lp->lwp_mpflags,
1302 						 LWP_MP_ULOAD);
1303 				atomic_add_int(&dd->uload, -lp->lwp_uload);
1304 				atomic_add_int(&dd->ucount, -1);
1305 				atomic_add_int(&dfly_ucount, -1);
1306 			}
1307 			spin_unlock(&dd->spin);
1308 		}
1309 	}
1310 }
1311 
1312 /*
1313  * chooseproc() is called when a cpu needs a user process to LWKT schedule,
1314  * it selects a user process and returns it.  If chklp is non-NULL and chklp
1315  * has a better or equal priority then the process that would otherwise be
1316  * chosen, NULL is returned.
1317  *
1318  * Until we fix the RUNQ code the chklp test has to be strict or we may
1319  * bounce between processes trying to acquire the current process designation.
1320  *
1321  * Must be called with rdd->spin locked.  The spinlock is left intact through
1322  * the entire routine.  dd->spin does not have to be locked.
1323  *
1324  * If worst is non-zero this function finds the worst thread instead of the
1325  * best thread (used by the schedulerclock-based rover).
1326  */
1327 static
1328 struct lwp *
1329 dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
1330 		       struct lwp *chklp, int worst)
1331 {
1332 	struct lwp *lp;
1333 	struct rq *q;
1334 	u_int32_t *which;
1335 	u_int32_t pri;
1336 	u_int32_t rtqbits;
1337 	u_int32_t tsqbits;
1338 	u_int32_t idqbits;
1339 
1340 	rtqbits = rdd->rtqueuebits;
1341 	tsqbits = rdd->queuebits;
1342 	idqbits = rdd->idqueuebits;
1343 
1344 	if (worst) {
1345 		if (idqbits) {
1346 			pri = bsrl(idqbits);
1347 			q = &rdd->idqueues[pri];
1348 			which = &rdd->idqueuebits;
1349 		} else if (tsqbits) {
1350 			pri = bsrl(tsqbits);
1351 			q = &rdd->queues[pri];
1352 			which = &rdd->queuebits;
1353 		} else if (rtqbits) {
1354 			pri = bsrl(rtqbits);
1355 			q = &rdd->rtqueues[pri];
1356 			which = &rdd->rtqueuebits;
1357 		} else {
1358 			return (NULL);
1359 		}
1360 		lp = TAILQ_LAST(q, rq);
1361 	} else {
1362 		if (rtqbits) {
1363 			pri = bsfl(rtqbits);
1364 			q = &rdd->rtqueues[pri];
1365 			which = &rdd->rtqueuebits;
1366 		} else if (tsqbits) {
1367 			pri = bsfl(tsqbits);
1368 			q = &rdd->queues[pri];
1369 			which = &rdd->queuebits;
1370 		} else if (idqbits) {
1371 			pri = bsfl(idqbits);
1372 			q = &rdd->idqueues[pri];
1373 			which = &rdd->idqueuebits;
1374 		} else {
1375 			return (NULL);
1376 		}
1377 		lp = TAILQ_FIRST(q);
1378 	}
1379 	KASSERT(lp, ("chooseproc: no lwp on busy queue"));
1380 
1381 	/*
1382 	 * If the passed lwp <chklp> is reasonably close to the selected
1383 	 * lwp <lp>, return NULL (indicating that <chklp> should be kept).
1384 	 *
1385 	 * Note that we must error on the side of <chklp> to avoid bouncing
1386 	 * between threads in the acquire code.
1387 	 */
1388 	if (chklp) {
1389 		if (chklp->lwp_priority < lp->lwp_priority + PPQ)
1390 			return(NULL);
1391 	}
1392 
1393 	KTR_COND_LOG(usched_chooseproc,
1394 	    lp->lwp_proc->p_pid == usched_dfly_pid_debug,
1395 	    lp->lwp_proc->p_pid,
1396 	    lp->lwp_thread->td_gd->gd_cpuid,
1397 	    mycpu->gd_cpuid);
1398 
1399 	KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
1400 	atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1401 	TAILQ_REMOVE(q, lp, lwp_procq);
1402 	--rdd->runqcount;
1403 	if (TAILQ_EMPTY(q))
1404 		*which &= ~(1 << pri);
1405 
1406 	/*
1407 	 * If we are choosing a process from rdd with the intent to
1408 	 * move it to dd, lwp_qcpu must be adjusted while rdd's spinlock
1409 	 * is still held.
1410 	 */
1411 	if (rdd != dd) {
1412 		if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1413 			atomic_add_int(&rdd->uload, -lp->lwp_uload);
1414 			atomic_add_int(&rdd->ucount, -1);
1415 			atomic_add_int(&dfly_ucount, -1);
1416 		}
1417 		lp->lwp_qcpu = dd->cpuid;
1418 		atomic_add_int(&dd->uload, lp->lwp_uload);
1419 		atomic_add_int(&dd->ucount, 1);
1420 		atomic_add_int(&dfly_ucount, 1);
1421 		atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1422 	}
1423 	return lp;
1424 }
1425 
1426 /*
1427  * USED TO PUSH RUNNABLE LWPS TO THE LEAST LOADED CPU.
1428  *
1429  * Choose a cpu node to schedule lp on, hopefully nearby its current
1430  * node.
1431  *
1432  * We give the current node a modest advantage for obvious reasons.
1433  *
1434  * We also give the node the thread was woken up FROM a slight advantage
1435  * in order to try to schedule paired threads which synchronize/block waiting
1436  * for each other fairly close to each other.  Similarly in a network setting
1437  * this feature will also attempt to place a user process near the kernel
1438  * protocol thread that is feeding it data.  THIS IS A CRITICAL PART of the
1439  * algorithm as it heuristically groups synchronizing processes for locality
1440  * of reference in multi-socket systems.
1441  *
1442  * We check against running processes and give a big advantage if there
1443  * are none running.
1444  *
1445  * The caller will normally dfly_setrunqueue() lp on the returned queue.
1446  *
1447  * When the topology is known choose a cpu whos group has, in aggregate,
1448  * has the lowest weighted load.
1449  */
1450 static
1451 dfly_pcpu_t
1452 dfly_choose_best_queue(struct lwp *lp)
1453 {
1454 	cpumask_t wakemask;
1455 	cpumask_t mask;
1456 	cpu_node_t *cpup;
1457 	cpu_node_t *cpun;
1458 	cpu_node_t *cpub;
1459 	dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1460 	dfly_pcpu_t rdd;
1461 	int wakecpu;
1462 	int cpuid;
1463 	int n;
1464 	int count;
1465 	int load;
1466 	int lowest_load;
1467 
1468 	/*
1469 	 * When the topology is unknown choose a random cpu that is hopefully
1470 	 * idle.
1471 	 */
1472 	if (dd->cpunode == NULL)
1473 		return (dfly_choose_queue_simple(dd, lp));
1474 
1475 	/*
1476 	 * Pairing mask
1477 	 */
1478 	if ((wakecpu = lp->lwp_thread->td_wakefromcpu) >= 0)
1479 		wakemask = dfly_pcpu[wakecpu].cpumask;
1480 	else
1481 		CPUMASK_ASSZERO(wakemask);
1482 
1483 	/*
1484 	 * When the topology is known choose a cpu whos group has, in
1485 	 * aggregate, has the lowest weighted load.
1486 	 */
1487 	cpup = root_cpu_node;
1488 	rdd = dd;
1489 
1490 	while (cpup) {
1491 		/*
1492 		 * Degenerate case super-root
1493 		 */
1494 		if (cpup->child_no == 1) {
1495 			cpup = cpup->child_node[0];
1496 			continue;
1497 		}
1498 
1499 		/*
1500 		 * Terminal cpunode
1501 		 */
1502 		if (cpup->child_no == 0) {
1503 			rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1504 			break;
1505 		}
1506 
1507 		cpub = NULL;
1508 		lowest_load = 0x7FFFFFFF;
1509 
1510 		for (n = 0; n < cpup->child_no; ++n) {
1511 			/*
1512 			 * Accumulate load information for all cpus
1513 			 * which are members of this node.
1514 			 */
1515 			cpun = cpup->child_node[n];
1516 			mask = cpun->members;
1517 			CPUMASK_ANDMASK(mask, usched_global_cpumask);
1518 			CPUMASK_ANDMASK(mask, smp_active_mask);
1519 			CPUMASK_ANDMASK(mask, lp->lwp_cpumask);
1520 			if (CPUMASK_TESTZERO(mask))
1521 				continue;
1522 
1523 			count = 0;
1524 			load = 0;
1525 
1526 			while (CPUMASK_TESTNZERO(mask)) {
1527 				cpuid = BSFCPUMASK(mask);
1528 				rdd = &dfly_pcpu[cpuid];
1529 				load += rdd->uload;
1530 				load += rdd->ucount * usched_dfly_weight3;
1531 
1532 				if (rdd->uschedcp == NULL &&
1533 				    rdd->runqcount == 0 &&
1534 				    globaldata_find(cpuid)->gd_tdrunqcount == 0
1535 				) {
1536 					load -= usched_dfly_weight4;
1537 				}
1538 #if 0
1539 				else if (rdd->upri > lp->lwp_priority + PPQ) {
1540 					load -= usched_dfly_weight4 / 2;
1541 				}
1542 #endif
1543 				CPUMASK_NANDBIT(mask, cpuid);
1544 				++count;
1545 			}
1546 
1547 			/*
1548 			 * Compensate if the lp is already accounted for in
1549 			 * the aggregate uload for this mask set.  We want
1550 			 * to calculate the loads as if lp were not present,
1551 			 * otherwise the calculation is bogus.
1552 			 */
1553 			if ((lp->lwp_mpflags & LWP_MP_ULOAD) &&
1554 			    CPUMASK_TESTMASK(dd->cpumask, cpun->members)) {
1555 				load -= lp->lwp_uload;
1556 				load -= usched_dfly_weight3;
1557 			}
1558 
1559 			load /= count;
1560 
1561 			/*
1562 			 * Advantage the cpu group (lp) is already on.
1563 			 */
1564 			if (CPUMASK_TESTMASK(cpun->members, dd->cpumask))
1565 				load -= usched_dfly_weight1;
1566 
1567 			/*
1568 			 * Advantage the cpu group we want to pair (lp) to,
1569 			 * but don't let it go to the exact same cpu as
1570 			 * the wakecpu target.
1571 			 *
1572 			 * We do this by checking whether cpun is a
1573 			 * terminal node or not.  All cpun's at the same
1574 			 * level will either all be terminal or all not
1575 			 * terminal.
1576 			 *
1577 			 * If it is and we match we disadvantage the load.
1578 			 * If it is and we don't match we advantage the load.
1579 			 *
1580 			 * Also note that we are effectively disadvantaging
1581 			 * all-but-one by the same amount, so it won't effect
1582 			 * the weight1 factor for the all-but-one nodes.
1583 			 */
1584 			if (CPUMASK_TESTMASK(cpun->members, wakemask)) {
1585 				if (cpun->child_no != 0) {
1586 					/* advantage */
1587 					load -= usched_dfly_weight2;
1588 				} else {
1589 					if (usched_dfly_features & 0x10)
1590 						load += usched_dfly_weight2;
1591 					else
1592 						load -= usched_dfly_weight2;
1593 				}
1594 			}
1595 
1596 			/*
1597 			 * Calculate the best load
1598 			 */
1599 			if (cpub == NULL || lowest_load > load ||
1600 			    (lowest_load == load &&
1601 			     CPUMASK_TESTMASK(cpun->members, dd->cpumask))
1602 			) {
1603 				lowest_load = load;
1604 				cpub = cpun;
1605 			}
1606 		}
1607 		cpup = cpub;
1608 	}
1609 	if (usched_dfly_chooser)
1610 		kprintf("lp %02d->%02d %s\n",
1611 			lp->lwp_qcpu, rdd->cpuid, lp->lwp_proc->p_comm);
1612 	return (rdd);
1613 }
1614 
1615 /*
1616  * USED TO PULL RUNNABLE LWPS FROM THE MOST LOADED CPU.
1617  *
1618  * Choose the worst queue close to dd's cpu node with a non-empty runq
1619  * that is NOT dd.  Also require that the moving of the highest-load thread
1620  * from rdd to dd does not cause the uload's to cross each other.
1621  *
1622  * This is used by the thread chooser when the current cpu's queues are
1623  * empty to steal a thread from another cpu's queue.  We want to offload
1624  * the most heavily-loaded queue.
1625  */
1626 static
1627 dfly_pcpu_t
1628 dfly_choose_worst_queue(dfly_pcpu_t dd)
1629 {
1630 	cpumask_t mask;
1631 	cpu_node_t *cpup;
1632 	cpu_node_t *cpun;
1633 	cpu_node_t *cpub;
1634 	dfly_pcpu_t rdd;
1635 	int cpuid;
1636 	int n;
1637 	int count;
1638 	int load;
1639 #if 0
1640 	int pri;
1641 	int hpri;
1642 #endif
1643 	int highest_load;
1644 
1645 	/*
1646 	 * When the topology is unknown choose a random cpu that is hopefully
1647 	 * idle.
1648 	 */
1649 	if (dd->cpunode == NULL) {
1650 		return (NULL);
1651 	}
1652 
1653 	/*
1654 	 * When the topology is known choose a cpu whos group has, in
1655 	 * aggregate, has the lowest weighted load.
1656 	 */
1657 	cpup = root_cpu_node;
1658 	rdd = dd;
1659 	while (cpup) {
1660 		/*
1661 		 * Degenerate case super-root
1662 		 */
1663 		if (cpup->child_no == 1) {
1664 			cpup = cpup->child_node[0];
1665 			continue;
1666 		}
1667 
1668 		/*
1669 		 * Terminal cpunode
1670 		 */
1671 		if (cpup->child_no == 0) {
1672 			rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1673 			break;
1674 		}
1675 
1676 		cpub = NULL;
1677 		highest_load = 0;
1678 
1679 		for (n = 0; n < cpup->child_no; ++n) {
1680 			/*
1681 			 * Accumulate load information for all cpus
1682 			 * which are members of this node.
1683 			 */
1684 			cpun = cpup->child_node[n];
1685 			mask = cpun->members;
1686 			CPUMASK_ANDMASK(mask, usched_global_cpumask);
1687 			CPUMASK_ANDMASK(mask, smp_active_mask);
1688 			if (CPUMASK_TESTZERO(mask))
1689 				continue;
1690 			count = 0;
1691 			load = 0;
1692 
1693 			while (CPUMASK_TESTNZERO(mask)) {
1694 				cpuid = BSFCPUMASK(mask);
1695 				rdd = &dfly_pcpu[cpuid];
1696 				load += rdd->uload;
1697 				load += rdd->ucount * usched_dfly_weight3;
1698 				if (rdd->uschedcp == NULL &&
1699 				    rdd->runqcount == 0 &&
1700 				    globaldata_find(cpuid)->gd_tdrunqcount == 0
1701 				) {
1702 					load -= usched_dfly_weight4;
1703 				}
1704 #if 0
1705 				else if (rdd->upri > dd->upri + PPQ) {
1706 					load -= usched_dfly_weight4 / 2;
1707 				}
1708 #endif
1709 				CPUMASK_NANDBIT(mask, cpuid);
1710 				++count;
1711 			}
1712 			load /= count;
1713 
1714 			/*
1715 			 * Prefer candidates which are somewhat closer to
1716 			 * our cpu.
1717 			 */
1718 			if (CPUMASK_TESTMASK(dd->cpumask, cpun->members))
1719 				load += usched_dfly_weight1;
1720 
1721 			/*
1722 			 * The best candidate is the one with the worst
1723 			 * (highest) load.
1724 			 */
1725 			if (cpub == NULL || highest_load < load) {
1726 				highest_load = load;
1727 				cpub = cpun;
1728 			}
1729 		}
1730 		cpup = cpub;
1731 	}
1732 
1733 	/*
1734 	 * We never return our own node (dd), and only return a remote
1735 	 * node if it's load is significantly worse than ours (i.e. where
1736 	 * stealing a thread would be considered reasonable).
1737 	 *
1738 	 * This also helps us avoid breaking paired threads apart which
1739 	 * can have disastrous effects on performance.
1740 	 */
1741 	if (rdd == dd)
1742 		return(NULL);
1743 
1744 #if 0
1745 	hpri = 0;
1746 	if (rdd->rtqueuebits && hpri < (pri = bsrl(rdd->rtqueuebits)))
1747 		hpri = pri;
1748 	if (rdd->queuebits && hpri < (pri = bsrl(rdd->queuebits)))
1749 		hpri = pri;
1750 	if (rdd->idqueuebits && hpri < (pri = bsrl(rdd->idqueuebits)))
1751 		hpri = pri;
1752 	hpri *= PPQ;
1753 	if (rdd->uload - hpri < dd->uload + hpri)
1754 		return(NULL);
1755 #endif
1756 	return (rdd);
1757 }
1758 
1759 static
1760 dfly_pcpu_t
1761 dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp)
1762 {
1763 	dfly_pcpu_t rdd;
1764 	cpumask_t tmpmask;
1765 	cpumask_t mask;
1766 	int cpuid;
1767 
1768 	/*
1769 	 * Fallback to the original heuristic, select random cpu,
1770 	 * first checking cpus not currently running a user thread.
1771 	 */
1772 	++dfly_scancpu;
1773 	cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1774 	mask = dfly_rdyprocmask;
1775 	CPUMASK_NANDMASK(mask, dfly_curprocmask);
1776 	CPUMASK_ANDMASK(mask, lp->lwp_cpumask);
1777 	CPUMASK_ANDMASK(mask, smp_active_mask);
1778 	CPUMASK_ANDMASK(mask, usched_global_cpumask);
1779 
1780 	while (CPUMASK_TESTNZERO(mask)) {
1781 		CPUMASK_ASSNBMASK(tmpmask, cpuid);
1782 		if (CPUMASK_TESTMASK(tmpmask, mask)) {
1783 			CPUMASK_ANDMASK(tmpmask, mask);
1784 			cpuid = BSFCPUMASK(tmpmask);
1785 		} else {
1786 			cpuid = BSFCPUMASK(mask);
1787 		}
1788 		rdd = &dfly_pcpu[cpuid];
1789 
1790 		if ((rdd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK))
1791 			goto found;
1792 		CPUMASK_NANDBIT(mask, cpuid);
1793 	}
1794 
1795 	/*
1796 	 * Then cpus which might have a currently running lp
1797 	 */
1798 	cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1799 	mask = dfly_rdyprocmask;
1800 	CPUMASK_ANDMASK(mask, dfly_curprocmask);
1801 	CPUMASK_ANDMASK(mask, lp->lwp_cpumask);
1802 	CPUMASK_ANDMASK(mask, smp_active_mask);
1803 	CPUMASK_ANDMASK(mask, usched_global_cpumask);
1804 
1805 	while (CPUMASK_TESTNZERO(mask)) {
1806 		CPUMASK_ASSNBMASK(tmpmask, cpuid);
1807 		if (CPUMASK_TESTMASK(tmpmask, mask)) {
1808 			CPUMASK_ANDMASK(tmpmask, mask);
1809 			cpuid = BSFCPUMASK(tmpmask);
1810 		} else {
1811 			cpuid = BSFCPUMASK(mask);
1812 		}
1813 		rdd = &dfly_pcpu[cpuid];
1814 
1815 		if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
1816 			goto found;
1817 		CPUMASK_NANDBIT(mask, cpuid);
1818 	}
1819 
1820 	/*
1821 	 * If we cannot find a suitable cpu we reload from dfly_scancpu
1822 	 * and round-robin.  Other cpus will pickup as they release their
1823 	 * current lwps or become ready.
1824 	 *
1825 	 * Avoid a degenerate system lockup case if usched_global_cpumask
1826 	 * is set to 0 or otherwise does not cover lwp_cpumask.
1827 	 *
1828 	 * We only kick the target helper thread in this case, we do not
1829 	 * set the user resched flag because
1830 	 */
1831 	cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1832 	if (CPUMASK_TESTBIT(usched_global_cpumask, cpuid) == 0)
1833 		cpuid = 0;
1834 	rdd = &dfly_pcpu[cpuid];
1835 found:
1836 	return (rdd);
1837 }
1838 
1839 static
1840 void
1841 dfly_need_user_resched_remote(void *dummy)
1842 {
1843 	globaldata_t gd = mycpu;
1844 	dfly_pcpu_t  dd = &dfly_pcpu[gd->gd_cpuid];
1845 
1846 	/*
1847 	 * Flag reschedule needed
1848 	 */
1849 	need_user_resched();
1850 
1851 	/*
1852 	 * If no user thread is currently running we need to kick the helper
1853 	 * on our cpu to recover.  Otherwise the cpu will never schedule
1854 	 * anything again.
1855 	 *
1856 	 * We cannot schedule the process ourselves because this is an
1857 	 * IPI callback and we cannot acquire spinlocks in an IPI callback.
1858 	 *
1859 	 * Call wakeup_mycpu to avoid sending IPIs to other CPUs
1860 	 */
1861 	if (dd->uschedcp == NULL &&
1862 	    CPUMASK_TESTBIT(dfly_rdyprocmask, gd->gd_cpuid)) {
1863 		ATOMIC_CPUMASK_NANDBIT(dfly_rdyprocmask, gd->gd_cpuid);
1864 		wakeup_mycpu(dd->helper_thread);
1865 	}
1866 }
1867 
1868 /*
1869  * dfly_remrunqueue_locked() removes a given process from the run queue
1870  * that it is on, clearing the queue busy bit if it becomes empty.
1871  *
1872  * Note that user process scheduler is different from the LWKT schedule.
1873  * The user process scheduler only manages user processes but it uses LWKT
1874  * underneath, and a user process operating in the kernel will often be
1875  * 'released' from our management.
1876  *
1877  * uload is NOT adjusted here.  It is only adjusted if the lwkt_thread goes
1878  * to sleep or the lwp is moved to a different runq.
1879  */
1880 static void
1881 dfly_remrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1882 {
1883 	struct rq *q;
1884 	u_int32_t *which;
1885 	u_int8_t pri;
1886 
1887 	KKASSERT(rdd->runqcount >= 0);
1888 
1889 	pri = lp->lwp_rqindex;
1890 
1891 	switch(lp->lwp_rqtype) {
1892 	case RTP_PRIO_NORMAL:
1893 		q = &rdd->queues[pri];
1894 		which = &rdd->queuebits;
1895 		break;
1896 	case RTP_PRIO_REALTIME:
1897 	case RTP_PRIO_FIFO:
1898 		q = &rdd->rtqueues[pri];
1899 		which = &rdd->rtqueuebits;
1900 		break;
1901 	case RTP_PRIO_IDLE:
1902 		q = &rdd->idqueues[pri];
1903 		which = &rdd->idqueuebits;
1904 		break;
1905 	default:
1906 		panic("remrunqueue: invalid rtprio type");
1907 		/* NOT REACHED */
1908 	}
1909 	KKASSERT(lp->lwp_mpflags & LWP_MP_ONRUNQ);
1910 	atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1911 	TAILQ_REMOVE(q, lp, lwp_procq);
1912 	--rdd->runqcount;
1913 	if (TAILQ_EMPTY(q)) {
1914 		KASSERT((*which & (1 << pri)) != 0,
1915 			("remrunqueue: remove from empty queue"));
1916 		*which &= ~(1 << pri);
1917 	}
1918 }
1919 
1920 /*
1921  * dfly_setrunqueue_locked()
1922  *
1923  * Add a process whos rqtype and rqindex had previously been calculated
1924  * onto the appropriate run queue.   Determine if the addition requires
1925  * a reschedule on a cpu and return the cpuid or -1.
1926  *
1927  * NOTE: 	  Lower priorities are better priorities.
1928  *
1929  * NOTE ON ULOAD: This variable specifies the aggregate load on a cpu, the
1930  *		  sum of the rough lwp_priority for all running and runnable
1931  *		  processes.  Lower priority processes (higher lwp_priority
1932  *		  values) actually DO count as more load, not less, because
1933  *		  these are the programs which require the most care with
1934  *		  regards to cpu selection.
1935  */
1936 static void
1937 dfly_setrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1938 {
1939 	u_int32_t *which;
1940 	struct rq *q;
1941 	int pri;
1942 
1943 	KKASSERT(lp->lwp_qcpu == rdd->cpuid);
1944 
1945 	if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1946 		atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1947 		atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload, lp->lwp_uload);
1948 		atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].ucount, 1);
1949 		atomic_add_int(&dfly_ucount, 1);
1950 	}
1951 
1952 	pri = lp->lwp_rqindex;
1953 
1954 	switch(lp->lwp_rqtype) {
1955 	case RTP_PRIO_NORMAL:
1956 		q = &rdd->queues[pri];
1957 		which = &rdd->queuebits;
1958 		break;
1959 	case RTP_PRIO_REALTIME:
1960 	case RTP_PRIO_FIFO:
1961 		q = &rdd->rtqueues[pri];
1962 		which = &rdd->rtqueuebits;
1963 		break;
1964 	case RTP_PRIO_IDLE:
1965 		q = &rdd->idqueues[pri];
1966 		which = &rdd->idqueuebits;
1967 		break;
1968 	default:
1969 		panic("remrunqueue: invalid rtprio type");
1970 		/* NOT REACHED */
1971 	}
1972 
1973 	/*
1974 	 * Place us on the selected queue.  Determine if we should be
1975 	 * placed at the head of the queue or at the end.
1976 	 *
1977 	 * We are placed at the tail if our round-robin count has expired,
1978 	 * or is about to expire and the system thinks its a good place to
1979 	 * round-robin, or there is already a next thread on the queue
1980 	 * (it might be trying to pick up where it left off and we don't
1981 	 * want to interfere).
1982 	 */
1983 	KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
1984 	atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1985 	++rdd->runqcount;
1986 
1987 	if (lp->lwp_rrcount >= usched_dfly_rrinterval ||
1988 	    (lp->lwp_rrcount >= usched_dfly_rrinterval / 2 &&
1989 	     (lp->lwp_thread->td_mpflags & TDF_MP_BATCH_DEMARC))
1990 	) {
1991 		/*
1992 		 * Place on tail
1993 		 */
1994 		atomic_clear_int(&lp->lwp_thread->td_mpflags,
1995 				 TDF_MP_BATCH_DEMARC);
1996 		lp->lwp_rrcount = 0;
1997 		TAILQ_INSERT_TAIL(q, lp, lwp_procq);
1998 	} else {
1999 		/*
2000 		 * Retain rrcount and place on head.  Count is retained
2001 		 * even if the queue is empty.
2002 		 */
2003 		TAILQ_INSERT_HEAD(q, lp, lwp_procq);
2004 	}
2005 	*which |= 1 << pri;
2006 }
2007 
2008 /*
2009  * For SMP systems a user scheduler helper thread is created for each
2010  * cpu and is used to allow one cpu to wakeup another for the purposes of
2011  * scheduling userland threads from setrunqueue().
2012  *
2013  * UP systems do not need the helper since there is only one cpu.
2014  *
2015  * We can't use the idle thread for this because we might block.
2016  * Additionally, doing things this way allows us to HLT idle cpus
2017  * on MP systems.
2018  */
2019 static void
2020 dfly_helper_thread(void *dummy)
2021 {
2022     globaldata_t gd;
2023     dfly_pcpu_t dd;
2024     dfly_pcpu_t rdd;
2025     struct lwp *nlp;
2026     cpumask_t mask;
2027     int cpuid;
2028 
2029     gd = mycpu;
2030     cpuid = gd->gd_cpuid;	/* doesn't change */
2031     mask = gd->gd_cpumask;	/* doesn't change */
2032     dd = &dfly_pcpu[cpuid];
2033 
2034     /*
2035      * Since we only want to be woken up only when no user processes
2036      * are scheduled on a cpu, run at an ultra low priority.
2037      */
2038     lwkt_setpri_self(TDPRI_USER_SCHEDULER);
2039 
2040     tsleep(dd->helper_thread, 0, "schslp", 0);
2041 
2042     for (;;) {
2043 	/*
2044 	 * We use the LWKT deschedule-interlock trick to avoid racing
2045 	 * dfly_rdyprocmask.  This means we cannot block through to the
2046 	 * manual lwkt_switch() call we make below.
2047 	 */
2048 	crit_enter_gd(gd);
2049 	tsleep_interlock(dd->helper_thread, 0);
2050 
2051 	spin_lock(&dd->spin);
2052 
2053 	ATOMIC_CPUMASK_ORMASK(dfly_rdyprocmask, mask);
2054 	clear_user_resched();	/* This satisfied the reschedule request */
2055 #if 0
2056 	dd->rrcount = 0;	/* Reset the round-robin counter */
2057 #endif
2058 
2059 	if (dd->runqcount || dd->uschedcp != NULL) {
2060 		/*
2061 		 * Threads are available.  A thread may or may not be
2062 		 * currently scheduled.  Get the best thread already queued
2063 		 * to this cpu.
2064 		 */
2065 		nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
2066 		if (nlp) {
2067 			ATOMIC_CPUMASK_ORMASK(dfly_curprocmask, mask);
2068 			dd->upri = nlp->lwp_priority;
2069 			dd->uschedcp = nlp;
2070 #if 0
2071 			dd->rrcount = 0;	/* reset round robin */
2072 #endif
2073 			spin_unlock(&dd->spin);
2074 			lwkt_acquire(nlp->lwp_thread);
2075 			lwkt_schedule(nlp->lwp_thread);
2076 		} else {
2077 			/*
2078 			 * This situation should not occur because we had
2079 			 * at least one thread available.
2080 			 */
2081 			spin_unlock(&dd->spin);
2082 		}
2083 	} else if (usched_dfly_features & 0x01) {
2084 		/*
2085 		 * This cpu is devoid of runnable threads, steal a thread
2086 		 * from another cpu.  Since we're stealing, might as well
2087 		 * load balance at the same time.
2088 		 *
2089 		 * We choose the highest-loaded thread from the worst queue.
2090 		 *
2091 		 * NOTE! This function only returns a non-NULL rdd when
2092 		 *	 another cpu's queue is obviously overloaded.  We
2093 		 *	 do not want to perform the type of rebalancing
2094 		 *	 the schedclock does here because it would result
2095 		 *	 in insane process pulling when 'steady' state is
2096 		 *	 partially unbalanced (e.g. 6 runnables and only
2097 		 *	 4 cores).
2098 		 */
2099 		rdd = dfly_choose_worst_queue(dd);
2100 		if (rdd && spin_trylock(&rdd->spin)) {
2101 			nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
2102 			spin_unlock(&rdd->spin);
2103 		} else {
2104 			nlp = NULL;
2105 		}
2106 		if (nlp) {
2107 			ATOMIC_CPUMASK_ORMASK(dfly_curprocmask, mask);
2108 			dd->upri = nlp->lwp_priority;
2109 			dd->uschedcp = nlp;
2110 #if 0
2111 			dd->rrcount = 0;	/* reset round robin */
2112 #endif
2113 			spin_unlock(&dd->spin);
2114 			lwkt_acquire(nlp->lwp_thread);
2115 			lwkt_schedule(nlp->lwp_thread);
2116 		} else {
2117 			/*
2118 			 * Leave the thread on our run queue.  Another
2119 			 * scheduler will try to pull it later.
2120 			 */
2121 			spin_unlock(&dd->spin);
2122 		}
2123 	} else {
2124 		/*
2125 		 * devoid of runnable threads and not allowed to steal
2126 		 * any.
2127 		 */
2128 		spin_unlock(&dd->spin);
2129 	}
2130 
2131 	/*
2132 	 * We're descheduled unless someone scheduled us.  Switch away.
2133 	 * Exiting the critical section will cause splz() to be called
2134 	 * for us if interrupts and such are pending.
2135 	 */
2136 	crit_exit_gd(gd);
2137 	tsleep(dd->helper_thread, PINTERLOCKED, "schslp", 0);
2138     }
2139 }
2140 
2141 #if 0
2142 static int
2143 sysctl_usched_dfly_stick_to_level(SYSCTL_HANDLER_ARGS)
2144 {
2145 	int error, new_val;
2146 
2147 	new_val = usched_dfly_stick_to_level;
2148 
2149 	error = sysctl_handle_int(oidp, &new_val, 0, req);
2150         if (error != 0 || req->newptr == NULL)
2151 		return (error);
2152 	if (new_val > cpu_topology_levels_number - 1 || new_val < 0)
2153 		return (EINVAL);
2154 	usched_dfly_stick_to_level = new_val;
2155 	return (0);
2156 }
2157 #endif
2158 
2159 /*
2160  * Setup the queues and scheduler helpers (scheduler helpers are SMP only).
2161  * Note that curprocmask bit 0 has already been cleared by rqinit() and
2162  * we should not mess with it further.
2163  */
2164 static void
2165 usched_dfly_cpu_init(void)
2166 {
2167 	int i;
2168 	int j;
2169 	int smt_not_supported = 0;
2170 	int cache_coherent_not_supported = 0;
2171 
2172 	if (bootverbose)
2173 		kprintf("Start usched_dfly helpers on cpus:\n");
2174 
2175 	sysctl_ctx_init(&usched_dfly_sysctl_ctx);
2176 	usched_dfly_sysctl_tree =
2177 		SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
2178 				SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
2179 				"usched_dfly", CTLFLAG_RD, 0, "");
2180 
2181 	for (i = 0; i < ncpus; ++i) {
2182 		dfly_pcpu_t dd = &dfly_pcpu[i];
2183 		cpumask_t mask;
2184 
2185 		CPUMASK_ASSBIT(mask, i);
2186 		if (CPUMASK_TESTMASK(mask, smp_active_mask) == 0)
2187 		    continue;
2188 
2189 		spin_init(&dd->spin, "uschedcpuinit");
2190 		dd->cpunode = get_cpu_node_by_cpuid(i);
2191 		dd->cpuid = i;
2192 		CPUMASK_ASSBIT(dd->cpumask, i);
2193 		for (j = 0; j < NQS; j++) {
2194 			TAILQ_INIT(&dd->queues[j]);
2195 			TAILQ_INIT(&dd->rtqueues[j]);
2196 			TAILQ_INIT(&dd->idqueues[j]);
2197 		}
2198 		ATOMIC_CPUMASK_NANDBIT(dfly_curprocmask, 0);
2199 
2200 		if (dd->cpunode == NULL) {
2201 			smt_not_supported = 1;
2202 			cache_coherent_not_supported = 1;
2203 			if (bootverbose)
2204 				kprintf ("    cpu%d - WARNING: No CPU NODE "
2205 					 "found for cpu\n", i);
2206 		} else {
2207 			switch (dd->cpunode->type) {
2208 			case THREAD_LEVEL:
2209 				if (bootverbose)
2210 					kprintf ("    cpu%d - HyperThreading "
2211 						 "available. Core siblings: ",
2212 						 i);
2213 				break;
2214 			case CORE_LEVEL:
2215 				smt_not_supported = 1;
2216 
2217 				if (bootverbose)
2218 					kprintf ("    cpu%d - No HT available, "
2219 						 "multi-core/physical "
2220 						 "cpu. Physical siblings: ",
2221 						 i);
2222 				break;
2223 			case CHIP_LEVEL:
2224 				smt_not_supported = 1;
2225 
2226 				if (bootverbose)
2227 					kprintf ("    cpu%d - No HT available, "
2228 						 "single-core/physical cpu. "
2229 						 "Package siblings: ",
2230 						 i);
2231 				break;
2232 			default:
2233 				/* Let's go for safe defaults here */
2234 				smt_not_supported = 1;
2235 				cache_coherent_not_supported = 1;
2236 				if (bootverbose)
2237 					kprintf ("    cpu%d - Unknown cpunode->"
2238 						 "type=%u. siblings: ",
2239 						 i,
2240 						 (u_int)dd->cpunode->type);
2241 				break;
2242 			}
2243 
2244 			if (bootverbose) {
2245 				if (dd->cpunode->parent_node != NULL) {
2246 					kprint_cpuset(&dd->cpunode->
2247 							parent_node->members);
2248 					kprintf("\n");
2249 				} else {
2250 					kprintf(" no siblings\n");
2251 				}
2252 			}
2253 		}
2254 
2255 		lwkt_create(dfly_helper_thread, NULL, &dd->helper_thread, NULL,
2256 			    0, i, "usched %d", i);
2257 
2258 		/*
2259 		 * Allow user scheduling on the target cpu.  cpu #0 has already
2260 		 * been enabled in rqinit().
2261 		 */
2262 		if (i)
2263 			ATOMIC_CPUMASK_NANDMASK(dfly_curprocmask, mask);
2264 		ATOMIC_CPUMASK_ORMASK(dfly_rdyprocmask, mask);
2265 		dd->upri = PRIBASE_NULL;
2266 
2267 	}
2268 
2269 	/* usched_dfly sysctl configurable parameters */
2270 
2271 	SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2272 		       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2273 		       OID_AUTO, "rrinterval", CTLFLAG_RW,
2274 		       &usched_dfly_rrinterval, 0, "");
2275 	SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2276 		       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2277 		       OID_AUTO, "decay", CTLFLAG_RW,
2278 		       &usched_dfly_decay, 0, "Extra decay when not running");
2279 
2280 	/* Add enable/disable option for SMT scheduling if supported */
2281 	if (smt_not_supported) {
2282 		usched_dfly_smt = 0;
2283 		SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2284 				  SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2285 				  OID_AUTO, "smt", CTLFLAG_RD,
2286 				  "NOT SUPPORTED", 0, "SMT NOT SUPPORTED");
2287 	} else {
2288 		usched_dfly_smt = 1;
2289 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2290 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2291 			       OID_AUTO, "smt", CTLFLAG_RW,
2292 			       &usched_dfly_smt, 0, "Enable SMT scheduling");
2293 	}
2294 
2295 	/*
2296 	 * Add enable/disable option for cache coherent scheduling
2297 	 * if supported
2298 	 */
2299 	if (cache_coherent_not_supported) {
2300 		usched_dfly_cache_coherent = 0;
2301 		SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2302 				  SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2303 				  OID_AUTO, "cache_coherent", CTLFLAG_RD,
2304 				  "NOT SUPPORTED", 0,
2305 				  "Cache coherence NOT SUPPORTED");
2306 	} else {
2307 		usched_dfly_cache_coherent = 1;
2308 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2309 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2310 			       OID_AUTO, "cache_coherent", CTLFLAG_RW,
2311 			       &usched_dfly_cache_coherent, 0,
2312 			       "Enable/Disable cache coherent scheduling");
2313 
2314 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2315 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2316 			       OID_AUTO, "weight1", CTLFLAG_RW,
2317 			       &usched_dfly_weight1, 200,
2318 			       "Weight selection for current cpu");
2319 
2320 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2321 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2322 			       OID_AUTO, "weight2", CTLFLAG_RW,
2323 			       &usched_dfly_weight2, 180,
2324 			       "Weight selection for wakefrom cpu");
2325 
2326 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2327 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2328 			       OID_AUTO, "weight3", CTLFLAG_RW,
2329 			       &usched_dfly_weight3, 40,
2330 			       "Weight selection for num threads on queue");
2331 
2332 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2333 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2334 			       OID_AUTO, "weight4", CTLFLAG_RW,
2335 			       &usched_dfly_weight4, 160,
2336 			       "Availability of other idle cpus");
2337 
2338 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2339 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2340 			       OID_AUTO, "fast_resched", CTLFLAG_RW,
2341 			       &usched_dfly_fast_resched, 0,
2342 			       "Availability of other idle cpus");
2343 
2344 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2345 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2346 			       OID_AUTO, "features", CTLFLAG_RW,
2347 			       &usched_dfly_features, 0x8F,
2348 			       "Allow pulls into empty queues");
2349 
2350 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2351 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2352 			       OID_AUTO, "swmask", CTLFLAG_RW,
2353 			       &usched_dfly_swmask, ~PPQMASK,
2354 			       "Queue mask to force thread switch");
2355 
2356 #if 0
2357 		SYSCTL_ADD_PROC(&usched_dfly_sysctl_ctx,
2358 				SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2359 				OID_AUTO, "stick_to_level",
2360 				CTLTYPE_INT | CTLFLAG_RW,
2361 				NULL, sizeof usched_dfly_stick_to_level,
2362 				sysctl_usched_dfly_stick_to_level, "I",
2363 				"Stick a process to this level. See sysctl"
2364 				"paremter hw.cpu_topology.level_description");
2365 #endif
2366 	}
2367 }
2368 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2369 	usched_dfly_cpu_init, NULL);
2370