xref: /dragonfly/sys/kern/usched_dfly.c (revision 07a2f99c)
1 /*
2  * Copyright (c) 2012 The DragonFly Project.  All rights reserved.
3  * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>.  All rights reserved.
4  *
5  * This code is derived from software contributed to The DragonFly Project
6  * by Matthew Dillon <dillon@backplane.com>,
7  * by Mihai Carabas <mihai.carabas@gmail.com>
8  * and many others.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in
18  *    the documentation and/or other materials provided with the
19  *    distribution.
20  * 3. Neither the name of The DragonFly Project nor the names of its
21  *    contributors may be used to endorse or promote products derived
22  *    from this software without specific, prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
28  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/queue.h>
42 #include <sys/proc.h>
43 #include <sys/rtprio.h>
44 #include <sys/uio.h>
45 #include <sys/sysctl.h>
46 #include <sys/resourcevar.h>
47 #include <sys/spinlock.h>
48 #include <sys/cpu_topology.h>
49 #include <sys/thread2.h>
50 #include <sys/spinlock2.h>
51 #include <sys/mplock2.h>
52 
53 #include <sys/ktr.h>
54 
55 #include <machine/cpu.h>
56 #include <machine/smp.h>
57 
58 /*
59  * Priorities.  Note that with 32 run queues per scheduler each queue
60  * represents four priority levels.
61  */
62 
63 int dfly_rebalanced;
64 
65 #define MAXPRI			128
66 #define PRIMASK			(MAXPRI - 1)
67 #define PRIBASE_REALTIME	0
68 #define PRIBASE_NORMAL		MAXPRI
69 #define PRIBASE_IDLE		(MAXPRI * 2)
70 #define PRIBASE_THREAD		(MAXPRI * 3)
71 #define PRIBASE_NULL		(MAXPRI * 4)
72 
73 #define NQS	32			/* 32 run queues. */
74 #define PPQ	(MAXPRI / NQS)		/* priorities per queue */
75 #define PPQMASK	(PPQ - 1)
76 
77 /*
78  * NICEPPQ	- number of nice units per priority queue
79  * ESTCPUPPQ	- number of estcpu units per priority queue
80  * ESTCPUMAX	- number of estcpu units
81  */
82 #define NICEPPQ		2
83 #define ESTCPUPPQ	512
84 #define ESTCPUMAX	(ESTCPUPPQ * NQS)
85 #define BATCHMAX	(ESTCPUFREQ * 30)
86 #define PRIO_RANGE	(PRIO_MAX - PRIO_MIN + 1)
87 
88 #define ESTCPULIM(v)	min((v), ESTCPUMAX)
89 
90 TAILQ_HEAD(rq, lwp);
91 
92 #define lwp_priority	lwp_usdata.dfly.priority
93 #define lwp_forked	lwp_usdata.dfly.forked
94 #define lwp_rqindex	lwp_usdata.dfly.rqindex
95 #define lwp_estcpu	lwp_usdata.dfly.estcpu
96 #define lwp_estfast	lwp_usdata.dfly.estfast
97 #define lwp_uload	lwp_usdata.dfly.uload
98 #define lwp_rqtype	lwp_usdata.dfly.rqtype
99 #define lwp_qcpu	lwp_usdata.dfly.qcpu
100 #define lwp_rrcount	lwp_usdata.dfly.rrcount
101 
102 struct usched_dfly_pcpu {
103 	struct spinlock spin;
104 	struct thread	helper_thread;
105 	short		unusde01;
106 	short		upri;
107 	int		uload;
108 	int		ucount;
109 	struct lwp	*uschedcp;
110 	struct rq	queues[NQS];
111 	struct rq	rtqueues[NQS];
112 	struct rq	idqueues[NQS];
113 	u_int32_t	queuebits;
114 	u_int32_t	rtqueuebits;
115 	u_int32_t	idqueuebits;
116 	int		runqcount;
117 	int		cpuid;
118 	cpumask_t	cpumask;
119 	cpu_node_t	*cpunode;
120 };
121 
122 typedef struct usched_dfly_pcpu	*dfly_pcpu_t;
123 
124 static void dfly_acquire_curproc(struct lwp *lp);
125 static void dfly_release_curproc(struct lwp *lp);
126 static void dfly_select_curproc(globaldata_t gd);
127 static void dfly_setrunqueue(struct lwp *lp);
128 static void dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp);
129 static void dfly_schedulerclock(struct lwp *lp, sysclock_t period,
130 				sysclock_t cpstamp);
131 static void dfly_recalculate_estcpu(struct lwp *lp);
132 static void dfly_resetpriority(struct lwp *lp);
133 static void dfly_forking(struct lwp *plp, struct lwp *lp);
134 static void dfly_exiting(struct lwp *lp, struct proc *);
135 static void dfly_uload_update(struct lwp *lp);
136 static void dfly_yield(struct lwp *lp);
137 static void dfly_changeqcpu_locked(struct lwp *lp,
138 				dfly_pcpu_t dd, dfly_pcpu_t rdd);
139 static dfly_pcpu_t dfly_choose_best_queue(struct lwp *lp);
140 static dfly_pcpu_t dfly_choose_worst_queue(dfly_pcpu_t dd);
141 static dfly_pcpu_t dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp);
142 static void dfly_need_user_resched_remote(void *dummy);
143 static struct lwp *dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
144 					  struct lwp *chklp, int worst);
145 static void dfly_remrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
146 static void dfly_setrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
147 
148 struct usched usched_dfly = {
149 	{ NULL },
150 	"dfly", "Original DragonFly Scheduler",
151 	NULL,			/* default registration */
152 	NULL,			/* default deregistration */
153 	dfly_acquire_curproc,
154 	dfly_release_curproc,
155 	dfly_setrunqueue,
156 	dfly_schedulerclock,
157 	dfly_recalculate_estcpu,
158 	dfly_resetpriority,
159 	dfly_forking,
160 	dfly_exiting,
161 	dfly_uload_update,
162 	NULL,			/* setcpumask not supported */
163 	dfly_yield
164 };
165 
166 /*
167  * We have NQS (32) run queues per scheduling class.  For the normal
168  * class, there are 128 priorities scaled onto these 32 queues.  New
169  * processes are added to the last entry in each queue, and processes
170  * are selected for running by taking them from the head and maintaining
171  * a simple FIFO arrangement.  Realtime and Idle priority processes have
172  * and explicit 0-31 priority which maps directly onto their class queue
173  * index.  When a queue has something in it, the corresponding bit is
174  * set in the queuebits variable, allowing a single read to determine
175  * the state of all 32 queues and then a ffs() to find the first busy
176  * queue.
177  */
178 static cpumask_t dfly_curprocmask = -1;	/* currently running a user process */
179 static cpumask_t dfly_rdyprocmask;	/* ready to accept a user process */
180 static volatile int dfly_scancpu;
181 static volatile int dfly_ucount;	/* total running on whole system */
182 static struct usched_dfly_pcpu dfly_pcpu[MAXCPU];
183 static struct sysctl_ctx_list usched_dfly_sysctl_ctx;
184 static struct sysctl_oid *usched_dfly_sysctl_tree;
185 
186 /* Debug info exposed through debug.* sysctl */
187 
188 static int usched_dfly_debug = -1;
189 SYSCTL_INT(_debug, OID_AUTO, dfly_scdebug, CTLFLAG_RW,
190 	   &usched_dfly_debug, 0,
191 	   "Print debug information for this pid");
192 
193 static int usched_dfly_pid_debug = -1;
194 SYSCTL_INT(_debug, OID_AUTO, dfly_pid_debug, CTLFLAG_RW,
195 	   &usched_dfly_pid_debug, 0,
196 	   "Print KTR debug information for this pid");
197 
198 static int usched_dfly_chooser = 0;
199 SYSCTL_INT(_debug, OID_AUTO, dfly_chooser, CTLFLAG_RW,
200 	   &usched_dfly_chooser, 0,
201 	   "Print KTR debug information for this pid");
202 
203 /*
204  * Tunning usched_dfly - configurable through kern.usched_dfly.
205  *
206  * weight1 - Tries to keep threads on their current cpu.  If you
207  *	     make this value too large the scheduler will not be
208  *	     able to load-balance large loads.
209  *
210  * weight2 - If non-zero, detects thread pairs undergoing synchronous
211  *	     communications and tries to move them closer together.
212  *	     Behavior is adjusted by bit 4 of features (0x10).
213  *
214  *	     WARNING!  Weight2 is a ridiculously sensitive parameter,
215  *	     a small value is recommended.
216  *
217  * weight3 - Weighting based on the number of recently runnable threads
218  *	     on the userland scheduling queue (ignoring their loads).
219  *	     A nominal value here prevents high-priority (low-load)
220  *	     threads from accumulating on one cpu core when other
221  *	     cores are available.
222  *
223  *	     This value should be left fairly small relative to weight1
224  *	     and weight4.
225  *
226  * weight4 - Weighting based on other cpu queues being available
227  *	     or running processes with higher lwp_priority's.
228  *
229  *	     This allows a thread to migrate to another nearby cpu if it
230  *	     is unable to run on the current cpu based on the other cpu
231  *	     being idle or running a lower priority (higher lwp_priority)
232  *	     thread.  This value should be large enough to override weight1
233  *
234  * features - These flags can be set or cleared to enable or disable various
235  *	      features.
236  *
237  *	      0x01	Enable idle-cpu pulling			(default)
238  *	      0x02	Enable proactive pushing		(default)
239  *	      0x04	Enable rebalancing rover		(default)
240  *	      0x08	Enable more proactive pushing		(default)
241  *	      0x10	(flip weight2 limit on same cpu)	(default)
242  *	      0x20	choose best cpu for forked process
243  *	      0x40	choose current cpu for forked process
244  *	      0x80	choose random cpu for forked process	(default)
245  */
246 static int usched_dfly_smt = 0;
247 static int usched_dfly_cache_coherent = 0;
248 static int usched_dfly_weight1 = 200;	/* keep thread on current cpu */
249 static int usched_dfly_weight2 = 180;	/* synchronous peer's current cpu */
250 static int usched_dfly_weight3 = 40;	/* number of threads on queue */
251 static int usched_dfly_weight4 = 160;	/* availability of idle cores */
252 static int usched_dfly_features = 0x8F;	/* allow pulls */
253 static int usched_dfly_fast_resched = 0;/* delta priority / resched */
254 static int usched_dfly_swmask = ~PPQMASK; /* allow pulls */
255 static int usched_dfly_rrinterval = (ESTCPUFREQ + 9) / 10;
256 static int usched_dfly_decay = 8;
257 
258 /* KTR debug printings */
259 
260 KTR_INFO_MASTER(usched);
261 
262 #if !defined(KTR_USCHED_DFLY)
263 #define	KTR_USCHED_DFLY	KTR_ALL
264 #endif
265 
266 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc, 0,
267     "USCHED_DFLY(chooseproc: pid %d, old_cpuid %d, curr_cpuid %d)",
268     pid_t pid, int old_cpuid, int curr);
269 
270 /*
271  * This function is called when the kernel intends to return to userland.
272  * It is responsible for making the thread the current designated userland
273  * thread for this cpu, blocking if necessary.
274  *
275  * The kernel will not depress our LWKT priority until after we return,
276  * in case we have to shove over to another cpu.
277  *
278  * We must determine our thread's disposition before we switch away.  This
279  * is very sensitive code.
280  *
281  * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
282  * TO ANOTHER CPU!  Because most of the kernel assumes that no migration will
283  * occur, this function is called only under very controlled circumstances.
284  */
285 static void
286 dfly_acquire_curproc(struct lwp *lp)
287 {
288 	globaldata_t gd;
289 	dfly_pcpu_t dd;
290 	dfly_pcpu_t rdd;
291 	thread_t td;
292 	int force_resched;
293 
294 	/*
295 	 * Make sure we aren't sitting on a tsleep queue.
296 	 */
297 	td = lp->lwp_thread;
298 	crit_enter_quick(td);
299 	if (td->td_flags & TDF_TSLEEPQ)
300 		tsleep_remove(td);
301 	dfly_recalculate_estcpu(lp);
302 
303 	gd = mycpu;
304 	dd = &dfly_pcpu[gd->gd_cpuid];
305 
306 	/*
307 	 * Process any pending interrupts/ipi's, then handle reschedule
308 	 * requests.  dfly_release_curproc() will try to assign a new
309 	 * uschedcp that isn't us and otherwise NULL it out.
310 	 */
311 	force_resched = 0;
312 	if ((td->td_mpflags & TDF_MP_BATCH_DEMARC) &&
313 	    lp->lwp_rrcount >= usched_dfly_rrinterval / 2) {
314 		force_resched = 1;
315 	}
316 
317 	if (user_resched_wanted()) {
318 		if (dd->uschedcp == lp)
319 			force_resched = 1;
320 		clear_user_resched();
321 		dfly_release_curproc(lp);
322 	}
323 
324 	/*
325 	 * Loop until we are the current user thread.
326 	 *
327 	 * NOTE: dd spinlock not held at top of loop.
328 	 */
329 	if (dd->uschedcp == lp)
330 		lwkt_yield_quick();
331 
332 	while (dd->uschedcp != lp) {
333 		lwkt_yield_quick();
334 
335 		spin_lock(&dd->spin);
336 
337 		/*
338 		 * We are not or are no longer the current lwp and a forced
339 		 * reschedule was requested.  Figure out the best cpu to
340 		 * run on (our current cpu will be given significant weight).
341 		 *
342 		 * (if a reschedule was not requested we want to move this
343 		 *  step after the uschedcp tests).
344 		 */
345 		if (force_resched &&
346 		    (usched_dfly_features & 0x08) &&
347 		    (rdd = dfly_choose_best_queue(lp)) != dd) {
348 			dfly_changeqcpu_locked(lp, dd, rdd);
349 			spin_unlock(&dd->spin);
350 			lwkt_deschedule(lp->lwp_thread);
351 			dfly_setrunqueue_dd(rdd, lp);
352 			lwkt_switch();
353 			gd = mycpu;
354 			dd = &dfly_pcpu[gd->gd_cpuid];
355 			continue;
356 		}
357 
358 		/*
359 		 * Either no reschedule was requested or the best queue was
360 		 * dd, and no current process has been selected.  We can
361 		 * trivially become the current lwp on the current cpu.
362 		 */
363 		if (dd->uschedcp == NULL) {
364 			atomic_set_cpumask(&dfly_curprocmask, gd->gd_cpumask);
365 			dd->uschedcp = lp;
366 			dd->upri = lp->lwp_priority;
367 			KKASSERT(lp->lwp_qcpu == dd->cpuid);
368 			spin_unlock(&dd->spin);
369 			break;
370 		}
371 
372 		/*
373 		 * Can we steal the current designated user thread?
374 		 *
375 		 * If we do the other thread will stall when it tries to
376 		 * return to userland, possibly rescheduling elsewhere.
377 		 *
378 		 * It is important to do a masked test to avoid the edge
379 		 * case where two near-equal-priority threads are constantly
380 		 * interrupting each other.
381 		 *
382 		 * In the exact match case another thread has already gained
383 		 * uschedcp and lowered its priority, if we steal it the
384 		 * other thread will stay stuck on the LWKT runq and not
385 		 * push to another cpu.  So don't steal on equal-priority even
386 		 * though it might appear to be more beneficial due to not
387 		 * having to switch back to the other thread's context.
388 		 *
389 		 * usched_dfly_fast_resched requires that two threads be
390 		 * significantly far apart in priority in order to interrupt.
391 		 *
392 		 * If better but not sufficiently far apart, the current
393 		 * uschedcp will be interrupted at the next scheduler clock.
394 		 */
395 		if (dd->uschedcp &&
396 		   (dd->upri & ~PPQMASK) >
397 		   (lp->lwp_priority & ~PPQMASK) + usched_dfly_fast_resched) {
398 			dd->uschedcp = lp;
399 			dd->upri = lp->lwp_priority;
400 			KKASSERT(lp->lwp_qcpu == dd->cpuid);
401 			spin_unlock(&dd->spin);
402 			break;
403 		}
404 		/*
405 		 * We are not the current lwp, figure out the best cpu
406 		 * to run on (our current cpu will be given significant
407 		 * weight).  Loop on cpu change.
408 		 */
409 		if ((usched_dfly_features & 0x02) &&
410 		    force_resched == 0 &&
411 		    (rdd = dfly_choose_best_queue(lp)) != dd) {
412 			dfly_changeqcpu_locked(lp, dd, rdd);
413 			spin_unlock(&dd->spin);
414 			lwkt_deschedule(lp->lwp_thread);
415 			dfly_setrunqueue_dd(rdd, lp);
416 			lwkt_switch();
417 			gd = mycpu;
418 			dd = &dfly_pcpu[gd->gd_cpuid];
419 			continue;
420 		}
421 
422 		/*
423 		 * We cannot become the current lwp, place the lp on the
424 		 * run-queue of this or another cpu and deschedule ourselves.
425 		 *
426 		 * When we are reactivated we will have another chance.
427 		 *
428 		 * Reload after a switch or setrunqueue/switch possibly
429 		 * moved us to another cpu.
430 		 */
431 		spin_unlock(&dd->spin);
432 		lwkt_deschedule(lp->lwp_thread);
433 		dfly_setrunqueue_dd(dd, lp);
434 		lwkt_switch();
435 		gd = mycpu;
436 		dd = &dfly_pcpu[gd->gd_cpuid];
437 	}
438 
439 	/*
440 	 * Make sure upri is synchronized, then yield to LWKT threads as
441 	 * needed before returning.  This could result in another reschedule.
442 	 * XXX
443 	 */
444 	crit_exit_quick(td);
445 
446 	KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
447 }
448 
449 /*
450  * DFLY_RELEASE_CURPROC
451  *
452  * This routine detaches the current thread from the userland scheduler,
453  * usually because the thread needs to run or block in the kernel (at
454  * kernel priority) for a while.
455  *
456  * This routine is also responsible for selecting a new thread to
457  * make the current thread.
458  *
459  * NOTE: This implementation differs from the dummy example in that
460  * dfly_select_curproc() is able to select the current process, whereas
461  * dummy_select_curproc() is not able to select the current process.
462  * This means we have to NULL out uschedcp.
463  *
464  * Additionally, note that we may already be on a run queue if releasing
465  * via the lwkt_switch() in dfly_setrunqueue().
466  */
467 static void
468 dfly_release_curproc(struct lwp *lp)
469 {
470 	globaldata_t gd = mycpu;
471 	dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
472 
473 	/*
474 	 * Make sure td_wakefromcpu is defaulted.  This will be overwritten
475 	 * by wakeup().
476 	 */
477 	if (dd->uschedcp == lp) {
478 		KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
479 		spin_lock(&dd->spin);
480 		if (dd->uschedcp == lp) {
481 			dd->uschedcp = NULL;	/* don't let lp be selected */
482 			dd->upri = PRIBASE_NULL;
483 			atomic_clear_cpumask(&dfly_curprocmask, gd->gd_cpumask);
484 			spin_unlock(&dd->spin);
485 			dfly_select_curproc(gd);
486 		} else {
487 			spin_unlock(&dd->spin);
488 		}
489 	}
490 }
491 
492 /*
493  * DFLY_SELECT_CURPROC
494  *
495  * Select a new current process for this cpu and clear any pending user
496  * reschedule request.  The cpu currently has no current process.
497  *
498  * This routine is also responsible for equal-priority round-robining,
499  * typically triggered from dfly_schedulerclock().  In our dummy example
500  * all the 'user' threads are LWKT scheduled all at once and we just
501  * call lwkt_switch().
502  *
503  * The calling process is not on the queue and cannot be selected.
504  */
505 static
506 void
507 dfly_select_curproc(globaldata_t gd)
508 {
509 	dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
510 	struct lwp *nlp;
511 	int cpuid = gd->gd_cpuid;
512 
513 	crit_enter_gd(gd);
514 
515 	spin_lock(&dd->spin);
516 	nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
517 
518 	if (nlp) {
519 		atomic_set_cpumask(&dfly_curprocmask, CPUMASK(cpuid));
520 		dd->upri = nlp->lwp_priority;
521 		dd->uschedcp = nlp;
522 #if 0
523 		dd->rrcount = 0;		/* reset round robin */
524 #endif
525 		spin_unlock(&dd->spin);
526 		lwkt_acquire(nlp->lwp_thread);
527 		lwkt_schedule(nlp->lwp_thread);
528 	} else {
529 		spin_unlock(&dd->spin);
530 	}
531 	crit_exit_gd(gd);
532 }
533 
534 /*
535  * Place the specified lwp on the user scheduler's run queue.  This routine
536  * must be called with the thread descheduled.  The lwp must be runnable.
537  * It must not be possible for anyone else to explicitly schedule this thread.
538  *
539  * The thread may be the current thread as a special case.
540  */
541 static void
542 dfly_setrunqueue(struct lwp *lp)
543 {
544 	dfly_pcpu_t dd;
545 	dfly_pcpu_t rdd;
546 
547 	/*
548 	 * First validate the process LWKT state.
549 	 */
550 	KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN"));
551 	KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0,
552 	    ("lwp %d/%d already on runq! flag %08x/%08x", lp->lwp_proc->p_pid,
553 	     lp->lwp_tid, lp->lwp_proc->p_flags, lp->lwp_flags));
554 	KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0);
555 
556 	/*
557 	 * NOTE: dd/rdd do not necessarily represent the current cpu.
558 	 *	 Instead they may represent the cpu the thread was last
559 	 *	 scheduled on or inherited by its parent.
560 	 */
561 	dd = &dfly_pcpu[lp->lwp_qcpu];
562 	rdd = dd;
563 
564 	/*
565 	 * This process is not supposed to be scheduled anywhere or assigned
566 	 * as the current process anywhere.  Assert the condition.
567 	 */
568 	KKASSERT(rdd->uschedcp != lp);
569 
570 	/*
571 	 * Ok, we have to setrunqueue some target cpu and request a reschedule
572 	 * if necessary.
573 	 *
574 	 * We have to choose the best target cpu.  It might not be the current
575 	 * target even if the current cpu has no running user thread (for
576 	 * example, because the current cpu might be a hyperthread and its
577 	 * sibling has a thread assigned).
578 	 *
579 	 * If we just forked it is most optimal to run the child on the same
580 	 * cpu just in case the parent decides to wait for it (thus getting
581 	 * off that cpu).  As long as there is nothing else runnable on the
582 	 * cpu, that is.  If we did this unconditionally a parent forking
583 	 * multiple children before waiting (e.g. make -j N) leaves other
584 	 * cpus idle that could be working.
585 	 */
586 	if (lp->lwp_forked) {
587 		lp->lwp_forked = 0;
588 		if (usched_dfly_features & 0x20)
589 			rdd = dfly_choose_best_queue(lp);
590 		else if (usched_dfly_features & 0x40)
591 			rdd = &dfly_pcpu[lp->lwp_qcpu];
592 		else if (usched_dfly_features & 0x80)
593 			rdd = dfly_choose_queue_simple(rdd, lp);
594 		else if (dfly_pcpu[lp->lwp_qcpu].runqcount)
595 			rdd = dfly_choose_best_queue(lp);
596 		else
597 			rdd = &dfly_pcpu[lp->lwp_qcpu];
598 	} else {
599 		rdd = dfly_choose_best_queue(lp);
600 		/* rdd = &dfly_pcpu[lp->lwp_qcpu]; */
601 	}
602 	if (lp->lwp_qcpu != rdd->cpuid) {
603 		spin_lock(&dd->spin);
604 		dfly_changeqcpu_locked(lp, dd, rdd);
605 		spin_unlock(&dd->spin);
606 	}
607 	dfly_setrunqueue_dd(rdd, lp);
608 }
609 
610 /*
611  * Change qcpu to rdd->cpuid.  The dd the lp is CURRENTLY on must be
612  * spin-locked on-call.  rdd does not have to be.
613  */
614 static void
615 dfly_changeqcpu_locked(struct lwp *lp, dfly_pcpu_t dd, dfly_pcpu_t rdd)
616 {
617 	if (lp->lwp_qcpu != rdd->cpuid) {
618 		if (lp->lwp_mpflags & LWP_MP_ULOAD) {
619 			atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
620 			atomic_add_int(&dd->uload, -lp->lwp_uload);
621 			atomic_add_int(&dd->ucount, -1);
622 			atomic_add_int(&dfly_ucount, -1);
623 		}
624 		lp->lwp_qcpu = rdd->cpuid;
625 	}
626 }
627 
628 /*
629  * Place lp on rdd's runqueue.  Nothing is locked on call.  This function
630  * also performs all necessary ancillary notification actions.
631  */
632 static void
633 dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp)
634 {
635 	globaldata_t rgd;
636 
637 	/*
638 	 * We might be moving the lp to another cpu's run queue, and once
639 	 * on the runqueue (even if it is our cpu's), another cpu can rip
640 	 * it away from us.
641 	 *
642 	 * TDF_MIGRATING might already be set if this is part of a
643 	 * remrunqueue+setrunqueue sequence.
644 	 */
645 	if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0)
646 		lwkt_giveaway(lp->lwp_thread);
647 
648 	rgd = globaldata_find(rdd->cpuid);
649 
650 	/*
651 	 * We lose control of the lp the moment we release the spinlock
652 	 * after having placed it on the queue.  i.e. another cpu could pick
653 	 * it up, or it could exit, or its priority could be further
654 	 * adjusted, or something like that.
655 	 *
656 	 * WARNING! rdd can point to a foreign cpu!
657 	 */
658 	spin_lock(&rdd->spin);
659 	dfly_setrunqueue_locked(rdd, lp);
660 
661 	/*
662 	 * Potentially interrupt the currently-running thread
663 	 */
664 	if ((rdd->upri & ~PPQMASK) <= (lp->lwp_priority & ~PPQMASK)) {
665 		/*
666 		 * Currently running thread is better or same, do not
667 		 * interrupt.
668 		 */
669 		spin_unlock(&rdd->spin);
670 	} else if ((rdd->upri & ~PPQMASK) <= (lp->lwp_priority & ~PPQMASK) +
671 		   usched_dfly_fast_resched) {
672 		/*
673 		 * Currently running thread is not better, but not so bad
674 		 * that we need to interrupt it.  Let it run for one more
675 		 * scheduler tick.
676 		 */
677 		if (rdd->uschedcp &&
678 		    rdd->uschedcp->lwp_rrcount < usched_dfly_rrinterval) {
679 			rdd->uschedcp->lwp_rrcount = usched_dfly_rrinterval - 1;
680 		}
681 		spin_unlock(&rdd->spin);
682 	} else if (rgd == mycpu) {
683 		/*
684 		 * We should interrupt the currently running thread, which
685 		 * is on the current cpu.
686 		 */
687 		spin_unlock(&rdd->spin);
688 		if (rdd->uschedcp == NULL) {
689 			wakeup_mycpu(&rdd->helper_thread); /* XXX */
690 			need_user_resched();
691 		} else {
692 			need_user_resched();
693 		}
694 	} else {
695 		/*
696 		 * We should interrupt the currently running thread, which
697 		 * is on a different cpu.
698 		 */
699 		spin_unlock(&rdd->spin);
700 		lwkt_send_ipiq(rgd, dfly_need_user_resched_remote, NULL);
701 	}
702 }
703 
704 /*
705  * This routine is called from a systimer IPI.  It MUST be MP-safe and
706  * the BGL IS NOT HELD ON ENTRY.  This routine is called at ESTCPUFREQ on
707  * each cpu.
708  */
709 static
710 void
711 dfly_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
712 {
713 	globaldata_t gd = mycpu;
714 	dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
715 
716 	/*
717 	 * Spinlocks also hold a critical section so there should not be
718 	 * any active.
719 	 */
720 	KKASSERT(gd->gd_spinlocks == 0);
721 
722 	if (lp == NULL)
723 		return;
724 
725 	/*
726 	 * Do we need to round-robin?  We round-robin 10 times a second.
727 	 * This should only occur for cpu-bound batch processes.
728 	 */
729 	if (++lp->lwp_rrcount >= usched_dfly_rrinterval) {
730 		lp->lwp_thread->td_wakefromcpu = -1;
731 		need_user_resched();
732 	}
733 
734 	/*
735 	 * Adjust estcpu upward using a real time equivalent calculation,
736 	 * and recalculate lp's priority.
737 	 */
738 	lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUMAX / ESTCPUFREQ + 1);
739 	dfly_resetpriority(lp);
740 
741 	/*
742 	 * Rebalance two cpus every 8 ticks, pulling the worst thread
743 	 * from the worst cpu's queue into a rotating cpu number.
744 	 *
745 	 * This mechanic is needed because the push algorithms can
746 	 * steady-state in an non-optimal configuration.  We need to mix it
747 	 * up a little, even if it means breaking up a paired thread, so
748 	 * the push algorithms can rebalance the degenerate conditions.
749 	 * This portion of the algorithm exists to ensure stability at the
750 	 * selected weightings.
751 	 *
752 	 * Because we might be breaking up optimal conditions we do not want
753 	 * to execute this too quickly, hence we only rebalance approximately
754 	 * ~7-8 times per second.  The push's, on the otherhand, are capable
755 	 * moving threads to other cpus at a much higher rate.
756 	 *
757 	 * We choose the most heavily loaded thread from the worst queue
758 	 * in order to ensure that multiple heavy-weight threads on the same
759 	 * queue get broken up, and also because these threads are the most
760 	 * likely to be able to remain in place.  Hopefully then any pairings,
761 	 * if applicable, migrate to where these threads are.
762 	 */
763 	if ((usched_dfly_features & 0x04) &&
764 	    ((u_int)sched_ticks & 7) == 0 &&
765 	    (u_int)sched_ticks / 8 % ncpus == gd->gd_cpuid) {
766 		/*
767 		 * Our cpu is up.
768 		 */
769 		struct lwp *nlp;
770 		dfly_pcpu_t rdd;
771 
772 		rdd = dfly_choose_worst_queue(dd);
773 		if (rdd) {
774 			spin_lock(&dd->spin);
775 			if (spin_trylock(&rdd->spin)) {
776 				nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
777 				spin_unlock(&rdd->spin);
778 				if (nlp == NULL)
779 					spin_unlock(&dd->spin);
780 			} else {
781 				spin_unlock(&dd->spin);
782 				nlp = NULL;
783 			}
784 		} else {
785 			nlp = NULL;
786 		}
787 		/* dd->spin held if nlp != NULL */
788 
789 		/*
790 		 * Either schedule it or add it to our queue.
791 		 */
792 		if (nlp &&
793 		    (nlp->lwp_priority & ~PPQMASK) < (dd->upri & ~PPQMASK)) {
794 			atomic_set_cpumask(&dfly_curprocmask, dd->cpumask);
795 			dd->upri = nlp->lwp_priority;
796 			dd->uschedcp = nlp;
797 #if 0
798 			dd->rrcount = 0;	/* reset round robin */
799 #endif
800 			spin_unlock(&dd->spin);
801 			lwkt_acquire(nlp->lwp_thread);
802 			lwkt_schedule(nlp->lwp_thread);
803 		} else if (nlp) {
804 			dfly_setrunqueue_locked(dd, nlp);
805 			spin_unlock(&dd->spin);
806 		}
807 	}
808 }
809 
810 /*
811  * Called from acquire and from kern_synch's one-second timer (one of the
812  * callout helper threads) with a critical section held.
813  *
814  * Adjust p_estcpu based on our single-cpu load, p_nice, and compensate for
815  * overall system load.
816  *
817  * Note that no recalculation occurs for a process which sleeps and wakes
818  * up in the same tick.  That is, a system doing thousands of context
819  * switches per second will still only do serious estcpu calculations
820  * ESTCPUFREQ times per second.
821  */
822 static
823 void
824 dfly_recalculate_estcpu(struct lwp *lp)
825 {
826 	globaldata_t gd = mycpu;
827 	sysclock_t cpbase;
828 	sysclock_t ttlticks;
829 	int estcpu;
830 	int decay_factor;
831 	int ucount;
832 
833 	/*
834 	 * We have to subtract periodic to get the last schedclock
835 	 * timeout time, otherwise we would get the upcoming timeout.
836 	 * Keep in mind that a process can migrate between cpus and
837 	 * while the scheduler clock should be very close, boundary
838 	 * conditions could lead to a small negative delta.
839 	 */
840 	cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
841 
842 	if (lp->lwp_slptime > 1) {
843 		/*
844 		 * Too much time has passed, do a coarse correction.
845 		 */
846 		lp->lwp_estcpu = lp->lwp_estcpu >> 1;
847 		dfly_resetpriority(lp);
848 		lp->lwp_cpbase = cpbase;
849 		lp->lwp_cpticks = 0;
850 		lp->lwp_estfast = 0;
851 	} else if (lp->lwp_cpbase != cpbase) {
852 		/*
853 		 * Adjust estcpu if we are in a different tick.  Don't waste
854 		 * time if we are in the same tick.
855 		 *
856 		 * First calculate the number of ticks in the measurement
857 		 * interval.  The ttlticks calculation can wind up 0 due to
858 		 * a bug in the handling of lwp_slptime  (as yet not found),
859 		 * so make sure we do not get a divide by 0 panic.
860 		 */
861 		ttlticks = (cpbase - lp->lwp_cpbase) /
862 			   gd->gd_schedclock.periodic;
863 		if ((ssysclock_t)ttlticks < 0) {
864 			ttlticks = 0;
865 			lp->lwp_cpbase = cpbase;
866 		}
867 		if (ttlticks == 0)
868 			return;
869 		updatepcpu(lp, lp->lwp_cpticks, ttlticks);
870 
871 		/*
872 		 * Calculate the percentage of one cpu being used then
873 		 * compensate for any system load in excess of ncpus.
874 		 *
875 		 * For example, if we have 8 cores and 16 running cpu-bound
876 		 * processes then all things being equal each process will
877 		 * get 50% of one cpu.  We need to pump this value back
878 		 * up to 100% so the estcpu calculation properly adjusts
879 		 * the process's dynamic priority.
880 		 *
881 		 * estcpu is scaled by ESTCPUMAX, pctcpu is scaled by FSCALE.
882 		 */
883 		estcpu = (lp->lwp_pctcpu * ESTCPUMAX) >> FSHIFT;
884 		ucount = dfly_ucount;
885 		if (ucount > ncpus) {
886 			estcpu += estcpu * (ucount - ncpus) / ncpus;
887 		}
888 
889 		if (usched_dfly_debug == lp->lwp_proc->p_pid) {
890 			kprintf("pid %d lwp %p estcpu %3d %3d cp %d/%d",
891 				lp->lwp_proc->p_pid, lp,
892 				estcpu, lp->lwp_estcpu,
893 				lp->lwp_cpticks, ttlticks);
894 		}
895 
896 		/*
897 		 * Adjust lp->lwp_esetcpu.  The decay factor determines how
898 		 * quickly lwp_estcpu collapses to its realtime calculation.
899 		 * A slower collapse gives us a more accurate number over
900 		 * the long term but can create problems with bursty threads
901 		 * or threads which become cpu hogs.
902 		 *
903 		 * To solve this problem, newly started lwps and lwps which
904 		 * are restarting after having been asleep for a while are
905 		 * given a much, much faster decay in order to quickly
906 		 * detect whether they become cpu-bound.
907 		 *
908 		 * NOTE: p_nice is accounted for in dfly_resetpriority(),
909 		 *	 and not here, but we must still ensure that a
910 		 *	 cpu-bound nice -20 process does not completely
911 		 *	 override a cpu-bound nice +20 process.
912 		 *
913 		 * NOTE: We must use ESTCPULIM() here to deal with any
914 		 *	 overshoot.
915 		 */
916 		decay_factor = usched_dfly_decay;
917 		if (decay_factor < 1)
918 			decay_factor = 1;
919 		if (decay_factor > 1024)
920 			decay_factor = 1024;
921 
922 		if (lp->lwp_estfast < usched_dfly_decay) {
923 			++lp->lwp_estfast;
924 			lp->lwp_estcpu = ESTCPULIM(
925 				(lp->lwp_estcpu * lp->lwp_estfast + estcpu) /
926 				(lp->lwp_estfast + 1));
927 		} else {
928 			lp->lwp_estcpu = ESTCPULIM(
929 				(lp->lwp_estcpu * decay_factor + estcpu) /
930 				(decay_factor + 1));
931 		}
932 
933 		if (usched_dfly_debug == lp->lwp_proc->p_pid)
934 			kprintf(" finalestcpu %d\n", lp->lwp_estcpu);
935 		dfly_resetpriority(lp);
936 		lp->lwp_cpbase += ttlticks * gd->gd_schedclock.periodic;
937 		lp->lwp_cpticks = 0;
938 	}
939 }
940 
941 /*
942  * Compute the priority of a process when running in user mode.
943  * Arrange to reschedule if the resulting priority is better
944  * than that of the current process.
945  *
946  * This routine may be called with any process.
947  *
948  * This routine is called by fork1() for initial setup with the process
949  * of the run queue, and also may be called normally with the process on or
950  * off the run queue.
951  */
952 static void
953 dfly_resetpriority(struct lwp *lp)
954 {
955 	dfly_pcpu_t rdd;
956 	int newpriority;
957 	u_short newrqtype;
958 	int rcpu;
959 	int checkpri;
960 	int estcpu;
961 	int delta_uload;
962 
963 	crit_enter();
964 
965 	/*
966 	 * Lock the scheduler (lp) belongs to.  This can be on a different
967 	 * cpu.  Handle races.  This loop breaks out with the appropriate
968 	 * rdd locked.
969 	 */
970 	for (;;) {
971 		rcpu = lp->lwp_qcpu;
972 		cpu_ccfence();
973 		rdd = &dfly_pcpu[rcpu];
974 		spin_lock(&rdd->spin);
975 		if (rcpu == lp->lwp_qcpu)
976 			break;
977 		spin_unlock(&rdd->spin);
978 	}
979 
980 	/*
981 	 * Calculate the new priority and queue type
982 	 */
983 	newrqtype = lp->lwp_rtprio.type;
984 
985 	switch(newrqtype) {
986 	case RTP_PRIO_REALTIME:
987 	case RTP_PRIO_FIFO:
988 		newpriority = PRIBASE_REALTIME +
989 			     (lp->lwp_rtprio.prio & PRIMASK);
990 		break;
991 	case RTP_PRIO_NORMAL:
992 		/*
993 		 *
994 		 */
995 		estcpu = lp->lwp_estcpu;
996 
997 		/*
998 		 * p_nice piece		Adds (0-40) * 2		0-80
999 		 * estcpu		Adds 16384  * 4 / 512   0-128
1000 		 */
1001 		newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) * PPQ / NICEPPQ;
1002 		newpriority += estcpu * PPQ / ESTCPUPPQ;
1003 		newpriority = newpriority * MAXPRI / (PRIO_RANGE * PPQ /
1004 			      NICEPPQ + ESTCPUMAX * PPQ / ESTCPUPPQ);
1005 		newpriority = PRIBASE_NORMAL + (newpriority & PRIMASK);
1006 		break;
1007 	case RTP_PRIO_IDLE:
1008 		newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK);
1009 		break;
1010 	case RTP_PRIO_THREAD:
1011 		newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK);
1012 		break;
1013 	default:
1014 		panic("Bad RTP_PRIO %d", newrqtype);
1015 		/* NOT REACHED */
1016 	}
1017 
1018 	/*
1019 	 * The LWKT scheduler doesn't dive usched structures, give it a hint
1020 	 * on the relative priority of user threads running in the kernel.
1021 	 * The LWKT scheduler will always ensure that a user thread running
1022 	 * in the kernel will get cpu some time, regardless of its upri,
1023 	 * but can decide not to instantly switch from one kernel or user
1024 	 * mode user thread to a kernel-mode user thread when it has a less
1025 	 * desireable user priority.
1026 	 *
1027 	 * td_upri has normal sense (higher values are more desireable), so
1028 	 * negate it.
1029 	 */
1030 	lp->lwp_thread->td_upri = -(newpriority & usched_dfly_swmask);
1031 
1032 	/*
1033 	 * The newpriority incorporates the queue type so do a simple masked
1034 	 * check to determine if the process has moved to another queue.  If
1035 	 * it has, and it is currently on a run queue, then move it.
1036 	 *
1037 	 * Since uload is ~PPQMASK masked, no modifications are necessary if
1038 	 * we end up in the same run queue.
1039 	 */
1040 	if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) {
1041 		if (lp->lwp_mpflags & LWP_MP_ONRUNQ) {
1042 			dfly_remrunqueue_locked(rdd, lp);
1043 			lp->lwp_priority = newpriority;
1044 			lp->lwp_rqtype = newrqtype;
1045 			lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1046 			dfly_setrunqueue_locked(rdd, lp);
1047 			checkpri = 1;
1048 		} else {
1049 			lp->lwp_priority = newpriority;
1050 			lp->lwp_rqtype = newrqtype;
1051 			lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1052 			checkpri = 0;
1053 		}
1054 	} else {
1055 		/*
1056 		 * In the same PPQ, uload cannot change.
1057 		 */
1058 		lp->lwp_priority = newpriority;
1059 		checkpri = 1;
1060 		rcpu = -1;
1061 	}
1062 
1063 	/*
1064 	 * Adjust effective load.
1065 	 *
1066 	 * Calculate load then scale up or down geometrically based on p_nice.
1067 	 * Processes niced up (positive) are less important, and processes
1068 	 * niced downard (negative) are more important.  The higher the uload,
1069 	 * the more important the thread.
1070 	 */
1071 	/* 0-511, 0-100% cpu */
1072 	delta_uload = lp->lwp_estcpu / NQS;
1073 	delta_uload -= delta_uload * lp->lwp_proc->p_nice / (PRIO_MAX + 1);
1074 
1075 
1076 	delta_uload -= lp->lwp_uload;
1077 	lp->lwp_uload += delta_uload;
1078 	if (lp->lwp_mpflags & LWP_MP_ULOAD)
1079 		atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload, delta_uload);
1080 
1081 	/*
1082 	 * Determine if we need to reschedule the target cpu.  This only
1083 	 * occurs if the LWP is already on a scheduler queue, which means
1084 	 * that idle cpu notification has already occured.  At most we
1085 	 * need only issue a need_user_resched() on the appropriate cpu.
1086 	 *
1087 	 * The LWP may be owned by a CPU different from the current one,
1088 	 * in which case dd->uschedcp may be modified without an MP lock
1089 	 * or a spinlock held.  The worst that happens is that the code
1090 	 * below causes a spurious need_user_resched() on the target CPU
1091 	 * and dd->pri to be wrong for a short period of time, both of
1092 	 * which are harmless.
1093 	 *
1094 	 * If checkpri is 0 we are adjusting the priority of the current
1095 	 * process, possibly higher (less desireable), so ignore the upri
1096 	 * check which will fail in that case.
1097 	 */
1098 	if (rcpu >= 0) {
1099 		if ((dfly_rdyprocmask & CPUMASK(rcpu)) &&
1100 		    (checkpri == 0 ||
1101 		     (rdd->upri & ~PRIMASK) >
1102 		     (lp->lwp_priority & ~PRIMASK))) {
1103 			if (rcpu == mycpu->gd_cpuid) {
1104 				spin_unlock(&rdd->spin);
1105 				need_user_resched();
1106 			} else {
1107 				spin_unlock(&rdd->spin);
1108 				lwkt_send_ipiq(globaldata_find(rcpu),
1109 					       dfly_need_user_resched_remote,
1110 					       NULL);
1111 			}
1112 		} else {
1113 			spin_unlock(&rdd->spin);
1114 		}
1115 	} else {
1116 		spin_unlock(&rdd->spin);
1117 	}
1118 	crit_exit();
1119 }
1120 
1121 static
1122 void
1123 dfly_yield(struct lwp *lp)
1124 {
1125 #if 0
1126 	/* FUTURE (or something similar) */
1127 	switch(lp->lwp_rqtype) {
1128 	case RTP_PRIO_NORMAL:
1129 		lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUINCR);
1130 		break;
1131 	default:
1132 		break;
1133 	}
1134 #endif
1135         need_user_resched();
1136 }
1137 
1138 /*
1139  * Called from fork1() when a new child process is being created.
1140  *
1141  * Give the child process an initial estcpu that is more batch then
1142  * its parent and dock the parent for the fork (but do not
1143  * reschedule the parent).
1144  *
1145  * fast
1146  *
1147  * XXX lwp should be "spawning" instead of "forking"
1148  */
1149 static void
1150 dfly_forking(struct lwp *plp, struct lwp *lp)
1151 {
1152 	/*
1153 	 * Put the child 4 queue slots (out of 32) higher than the parent
1154 	 * (less desireable than the parent).
1155 	 */
1156 	lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ * 4);
1157 	lp->lwp_forked = 1;
1158 	lp->lwp_estfast = 0;
1159 
1160 	/*
1161 	 * Dock the parent a cost for the fork, protecting us from fork
1162 	 * bombs.  If the parent is forking quickly make the child more
1163 	 * batchy.
1164 	 */
1165 	plp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ / 16);
1166 }
1167 
1168 /*
1169  * Called when a lwp is being removed from this scheduler, typically
1170  * during lwp_exit().  We have to clean out any ULOAD accounting before
1171  * we can let the lp go.  The dd->spin lock is not needed for uload
1172  * updates.
1173  *
1174  * Scheduler dequeueing has already occurred, no further action in that
1175  * regard is needed.
1176  */
1177 static void
1178 dfly_exiting(struct lwp *lp, struct proc *child_proc)
1179 {
1180 	dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1181 
1182 	if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1183 		atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1184 		atomic_add_int(&dd->uload, -lp->lwp_uload);
1185 		atomic_add_int(&dd->ucount, -1);
1186 		atomic_add_int(&dfly_ucount, -1);
1187 	}
1188 }
1189 
1190 /*
1191  * This function cannot block in any way, but spinlocks are ok.
1192  *
1193  * Update the uload based on the state of the thread (whether it is going
1194  * to sleep or running again).  The uload is meant to be a longer-term
1195  * load and not an instantanious load.
1196  */
1197 static void
1198 dfly_uload_update(struct lwp *lp)
1199 {
1200 	dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1201 
1202 	if (lp->lwp_thread->td_flags & TDF_RUNQ) {
1203 		if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1204 			spin_lock(&dd->spin);
1205 			if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1206 				atomic_set_int(&lp->lwp_mpflags,
1207 					       LWP_MP_ULOAD);
1208 				atomic_add_int(&dd->uload, lp->lwp_uload);
1209 				atomic_add_int(&dd->ucount, 1);
1210 				atomic_add_int(&dfly_ucount, 1);
1211 			}
1212 			spin_unlock(&dd->spin);
1213 		}
1214 	} else if (lp->lwp_slptime > 0) {
1215 		if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1216 			spin_lock(&dd->spin);
1217 			if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1218 				atomic_clear_int(&lp->lwp_mpflags,
1219 						 LWP_MP_ULOAD);
1220 				atomic_add_int(&dd->uload, -lp->lwp_uload);
1221 				atomic_add_int(&dd->ucount, -1);
1222 				atomic_add_int(&dfly_ucount, -1);
1223 			}
1224 			spin_unlock(&dd->spin);
1225 		}
1226 	}
1227 }
1228 
1229 /*
1230  * chooseproc() is called when a cpu needs a user process to LWKT schedule,
1231  * it selects a user process and returns it.  If chklp is non-NULL and chklp
1232  * has a better or equal priority then the process that would otherwise be
1233  * chosen, NULL is returned.
1234  *
1235  * Until we fix the RUNQ code the chklp test has to be strict or we may
1236  * bounce between processes trying to acquire the current process designation.
1237  *
1238  * Must be called with rdd->spin locked.  The spinlock is left intact through
1239  * the entire routine.  dd->spin does not have to be locked.
1240  *
1241  * If worst is non-zero this function finds the worst thread instead of the
1242  * best thread (used by the schedulerclock-based rover).
1243  */
1244 static
1245 struct lwp *
1246 dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
1247 		       struct lwp *chklp, int worst)
1248 {
1249 	struct lwp *lp;
1250 	struct rq *q;
1251 	u_int32_t *which, *which2;
1252 	u_int32_t pri;
1253 	u_int32_t rtqbits;
1254 	u_int32_t tsqbits;
1255 	u_int32_t idqbits;
1256 
1257 	rtqbits = rdd->rtqueuebits;
1258 	tsqbits = rdd->queuebits;
1259 	idqbits = rdd->idqueuebits;
1260 
1261 	if (worst) {
1262 		if (idqbits) {
1263 			pri = bsrl(idqbits);
1264 			q = &rdd->idqueues[pri];
1265 			which = &rdd->idqueuebits;
1266 			which2 = &idqbits;
1267 		} else if (tsqbits) {
1268 			pri = bsrl(tsqbits);
1269 			q = &rdd->queues[pri];
1270 			which = &rdd->queuebits;
1271 			which2 = &tsqbits;
1272 		} else if (rtqbits) {
1273 			pri = bsrl(rtqbits);
1274 			q = &rdd->rtqueues[pri];
1275 			which = &rdd->rtqueuebits;
1276 			which2 = &rtqbits;
1277 		} else {
1278 			return (NULL);
1279 		}
1280 		lp = TAILQ_LAST(q, rq);
1281 	} else {
1282 		if (rtqbits) {
1283 			pri = bsfl(rtqbits);
1284 			q = &rdd->rtqueues[pri];
1285 			which = &rdd->rtqueuebits;
1286 			which2 = &rtqbits;
1287 		} else if (tsqbits) {
1288 			pri = bsfl(tsqbits);
1289 			q = &rdd->queues[pri];
1290 			which = &rdd->queuebits;
1291 			which2 = &tsqbits;
1292 		} else if (idqbits) {
1293 			pri = bsfl(idqbits);
1294 			q = &rdd->idqueues[pri];
1295 			which = &rdd->idqueuebits;
1296 			which2 = &idqbits;
1297 		} else {
1298 			return (NULL);
1299 		}
1300 		lp = TAILQ_FIRST(q);
1301 	}
1302 	KASSERT(lp, ("chooseproc: no lwp on busy queue"));
1303 
1304 	/*
1305 	 * If the passed lwp <chklp> is reasonably close to the selected
1306 	 * lwp <lp>, return NULL (indicating that <chklp> should be kept).
1307 	 *
1308 	 * Note that we must error on the side of <chklp> to avoid bouncing
1309 	 * between threads in the acquire code.
1310 	 */
1311 	if (chklp) {
1312 		if (chklp->lwp_priority < lp->lwp_priority + PPQ)
1313 			return(NULL);
1314 	}
1315 
1316 	KTR_COND_LOG(usched_chooseproc,
1317 	    lp->lwp_proc->p_pid == usched_dfly_pid_debug,
1318 	    lp->lwp_proc->p_pid,
1319 	    lp->lwp_thread->td_gd->gd_cpuid,
1320 	    mycpu->gd_cpuid);
1321 
1322 	KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
1323 	atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1324 	TAILQ_REMOVE(q, lp, lwp_procq);
1325 	--rdd->runqcount;
1326 	if (TAILQ_EMPTY(q))
1327 		*which &= ~(1 << pri);
1328 
1329 	/*
1330 	 * If we are choosing a process from rdd with the intent to
1331 	 * move it to dd, lwp_qcpu must be adjusted while rdd's spinlock
1332 	 * is still held.
1333 	 */
1334 	if (rdd != dd) {
1335 		if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1336 			atomic_add_int(&rdd->uload, -lp->lwp_uload);
1337 			atomic_add_int(&rdd->ucount, -1);
1338 			atomic_add_int(&dfly_ucount, -1);
1339 		}
1340 		lp->lwp_qcpu = dd->cpuid;
1341 		atomic_add_int(&dd->uload, lp->lwp_uload);
1342 		atomic_add_int(&dd->ucount, 1);
1343 		atomic_add_int(&dfly_ucount, 1);
1344 		atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1345 	}
1346 	return lp;
1347 }
1348 
1349 /*
1350  * USED TO PUSH RUNNABLE LWPS TO THE LEAST LOADED CPU.
1351  *
1352  * Choose a cpu node to schedule lp on, hopefully nearby its current
1353  * node.
1354  *
1355  * We give the current node a modest advantage for obvious reasons.
1356  *
1357  * We also give the node the thread was woken up FROM a slight advantage
1358  * in order to try to schedule paired threads which synchronize/block waiting
1359  * for each other fairly close to each other.  Similarly in a network setting
1360  * this feature will also attempt to place a user process near the kernel
1361  * protocol thread that is feeding it data.  THIS IS A CRITICAL PART of the
1362  * algorithm as it heuristically groups synchronizing processes for locality
1363  * of reference in multi-socket systems.
1364  *
1365  * We check against running processes and give a big advantage if there
1366  * are none running.
1367  *
1368  * The caller will normally dfly_setrunqueue() lp on the returned queue.
1369  *
1370  * When the topology is known choose a cpu whos group has, in aggregate,
1371  * has the lowest weighted load.
1372  */
1373 static
1374 dfly_pcpu_t
1375 dfly_choose_best_queue(struct lwp *lp)
1376 {
1377 	cpumask_t wakemask;
1378 	cpumask_t mask;
1379 	cpu_node_t *cpup;
1380 	cpu_node_t *cpun;
1381 	cpu_node_t *cpub;
1382 	dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1383 	dfly_pcpu_t rdd;
1384 	int wakecpu;
1385 	int cpuid;
1386 	int n;
1387 	int count;
1388 	int load;
1389 	int lowest_load;
1390 
1391 	/*
1392 	 * When the topology is unknown choose a random cpu that is hopefully
1393 	 * idle.
1394 	 */
1395 	if (dd->cpunode == NULL)
1396 		return (dfly_choose_queue_simple(dd, lp));
1397 
1398 	/*
1399 	 * Pairing mask
1400 	 */
1401 	if ((wakecpu = lp->lwp_thread->td_wakefromcpu) >= 0)
1402 		wakemask = dfly_pcpu[wakecpu].cpumask;
1403 	else
1404 		wakemask = 0;
1405 
1406 	/*
1407 	 * When the topology is known choose a cpu whos group has, in
1408 	 * aggregate, has the lowest weighted load.
1409 	 */
1410 	cpup = root_cpu_node;
1411 	rdd = dd;
1412 
1413 	while (cpup) {
1414 		/*
1415 		 * Degenerate case super-root
1416 		 */
1417 		if (cpup->child_node && cpup->child_no == 1) {
1418 			cpup = cpup->child_node;
1419 			continue;
1420 		}
1421 
1422 		/*
1423 		 * Terminal cpunode
1424 		 */
1425 		if (cpup->child_node == NULL) {
1426 			rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1427 			break;
1428 		}
1429 
1430 		cpub = NULL;
1431 		lowest_load = 0x7FFFFFFF;
1432 
1433 		for (n = 0; n < cpup->child_no; ++n) {
1434 			/*
1435 			 * Accumulate load information for all cpus
1436 			 * which are members of this node.
1437 			 */
1438 			cpun = &cpup->child_node[n];
1439 			mask = cpun->members & usched_global_cpumask &
1440 			       smp_active_mask & lp->lwp_cpumask;
1441 			if (mask == 0)
1442 				continue;
1443 
1444 			count = 0;
1445 			load = 0;
1446 
1447 			while (mask) {
1448 				cpuid = BSFCPUMASK(mask);
1449 				rdd = &dfly_pcpu[cpuid];
1450 				load += rdd->uload;
1451 				load += rdd->ucount * usched_dfly_weight3;
1452 
1453 				if (rdd->uschedcp == NULL &&
1454 				    rdd->runqcount == 0 &&
1455 				    globaldata_find(cpuid)->gd_tdrunqcount == 0
1456 				) {
1457 					load -= usched_dfly_weight4;
1458 				}
1459 #if 0
1460 				else if (rdd->upri > lp->lwp_priority + PPQ) {
1461 					load -= usched_dfly_weight4 / 2;
1462 				}
1463 #endif
1464 				mask &= ~CPUMASK(cpuid);
1465 				++count;
1466 			}
1467 
1468 			/*
1469 			 * Compensate if the lp is already accounted for in
1470 			 * the aggregate uload for this mask set.  We want
1471 			 * to calculate the loads as if lp were not present,
1472 			 * otherwise the calculation is bogus.
1473 			 */
1474 			if ((lp->lwp_mpflags & LWP_MP_ULOAD) &&
1475 			    (dd->cpumask & cpun->members)) {
1476 				load -= lp->lwp_uload;
1477 				load -= usched_dfly_weight3;
1478 			}
1479 
1480 			load /= count;
1481 
1482 			/*
1483 			 * Advantage the cpu group (lp) is already on.
1484 			 */
1485 			if (cpun->members & dd->cpumask)
1486 				load -= usched_dfly_weight1;
1487 
1488 			/*
1489 			 * Advantage the cpu group we want to pair (lp) to,
1490 			 * but don't let it go to the exact same cpu as
1491 			 * the wakecpu target.
1492 			 *
1493 			 * We do this by checking whether cpun is a
1494 			 * terminal node or not.  All cpun's at the same
1495 			 * level will either all be terminal or all not
1496 			 * terminal.
1497 			 *
1498 			 * If it is and we match we disadvantage the load.
1499 			 * If it is and we don't match we advantage the load.
1500 			 *
1501 			 * Also note that we are effectively disadvantaging
1502 			 * all-but-one by the same amount, so it won't effect
1503 			 * the weight1 factor for the all-but-one nodes.
1504 			 */
1505 			if (cpun->members & wakemask) {
1506 				if (cpun->child_node != NULL) {
1507 					/* advantage */
1508 					load -= usched_dfly_weight2;
1509 				} else {
1510 					if (usched_dfly_features & 0x10)
1511 						load += usched_dfly_weight2;
1512 					else
1513 						load -= usched_dfly_weight2;
1514 				}
1515 			}
1516 
1517 			/*
1518 			 * Calculate the best load
1519 			 */
1520 			if (cpub == NULL || lowest_load > load ||
1521 			    (lowest_load == load &&
1522 			     (cpun->members & dd->cpumask))
1523 			) {
1524 				lowest_load = load;
1525 				cpub = cpun;
1526 			}
1527 		}
1528 		cpup = cpub;
1529 	}
1530 	if (usched_dfly_chooser)
1531 		kprintf("lp %02d->%02d %s\n",
1532 			lp->lwp_qcpu, rdd->cpuid, lp->lwp_proc->p_comm);
1533 	return (rdd);
1534 }
1535 
1536 /*
1537  * USED TO PULL RUNNABLE LWPS FROM THE MOST LOADED CPU.
1538  *
1539  * Choose the worst queue close to dd's cpu node with a non-empty runq
1540  * that is NOT dd.  Also require that the moving of the highest-load thread
1541  * from rdd to dd does not cause the uload's to cross each other.
1542  *
1543  * This is used by the thread chooser when the current cpu's queues are
1544  * empty to steal a thread from another cpu's queue.  We want to offload
1545  * the most heavily-loaded queue.
1546  */
1547 static
1548 dfly_pcpu_t
1549 dfly_choose_worst_queue(dfly_pcpu_t dd)
1550 {
1551 	cpumask_t mask;
1552 	cpu_node_t *cpup;
1553 	cpu_node_t *cpun;
1554 	cpu_node_t *cpub;
1555 	dfly_pcpu_t rdd;
1556 	int cpuid;
1557 	int n;
1558 	int count;
1559 	int load;
1560 #if 0
1561 	int pri;
1562 	int hpri;
1563 #endif
1564 	int highest_load;
1565 
1566 	/*
1567 	 * When the topology is unknown choose a random cpu that is hopefully
1568 	 * idle.
1569 	 */
1570 	if (dd->cpunode == NULL) {
1571 		return (NULL);
1572 	}
1573 
1574 	/*
1575 	 * When the topology is known choose a cpu whos group has, in
1576 	 * aggregate, has the lowest weighted load.
1577 	 */
1578 	cpup = root_cpu_node;
1579 	rdd = dd;
1580 	while (cpup) {
1581 		/*
1582 		 * Degenerate case super-root
1583 		 */
1584 		if (cpup->child_node && cpup->child_no == 1) {
1585 			cpup = cpup->child_node;
1586 			continue;
1587 		}
1588 
1589 		/*
1590 		 * Terminal cpunode
1591 		 */
1592 		if (cpup->child_node == NULL) {
1593 			rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1594 			break;
1595 		}
1596 
1597 		cpub = NULL;
1598 		highest_load = 0;
1599 
1600 		for (n = 0; n < cpup->child_no; ++n) {
1601 			/*
1602 			 * Accumulate load information for all cpus
1603 			 * which are members of this node.
1604 			 */
1605 			cpun = &cpup->child_node[n];
1606 			mask = cpun->members & usched_global_cpumask &
1607 			       smp_active_mask;
1608 			if (mask == 0)
1609 				continue;
1610 			count = 0;
1611 			load = 0;
1612 
1613 			while (mask) {
1614 				cpuid = BSFCPUMASK(mask);
1615 				rdd = &dfly_pcpu[cpuid];
1616 				load += rdd->uload;
1617 				load += rdd->ucount * usched_dfly_weight3;
1618 				if (rdd->uschedcp == NULL &&
1619 				    rdd->runqcount == 0 &&
1620 				    globaldata_find(cpuid)->gd_tdrunqcount == 0
1621 				) {
1622 					load -= usched_dfly_weight4;
1623 				}
1624 #if 0
1625 				else if (rdd->upri > dd->upri + PPQ) {
1626 					load -= usched_dfly_weight4 / 2;
1627 				}
1628 #endif
1629 				mask &= ~CPUMASK(cpuid);
1630 				++count;
1631 			}
1632 			load /= count;
1633 
1634 			/*
1635 			 * Prefer candidates which are somewhat closer to
1636 			 * our cpu.
1637 			 */
1638 			if (dd->cpumask & cpun->members)
1639 				load += usched_dfly_weight1;
1640 
1641 			/*
1642 			 * The best candidate is the one with the worst
1643 			 * (highest) load.
1644 			 */
1645 			if (cpub == NULL || highest_load < load) {
1646 				highest_load = load;
1647 				cpub = cpun;
1648 			}
1649 		}
1650 		cpup = cpub;
1651 	}
1652 
1653 	/*
1654 	 * We never return our own node (dd), and only return a remote
1655 	 * node if it's load is significantly worse than ours (i.e. where
1656 	 * stealing a thread would be considered reasonable).
1657 	 *
1658 	 * This also helps us avoid breaking paired threads apart which
1659 	 * can have disastrous effects on performance.
1660 	 */
1661 	if (rdd == dd)
1662 		return(NULL);
1663 
1664 #if 0
1665 	hpri = 0;
1666 	if (rdd->rtqueuebits && hpri < (pri = bsrl(rdd->rtqueuebits)))
1667 		hpri = pri;
1668 	if (rdd->queuebits && hpri < (pri = bsrl(rdd->queuebits)))
1669 		hpri = pri;
1670 	if (rdd->idqueuebits && hpri < (pri = bsrl(rdd->idqueuebits)))
1671 		hpri = pri;
1672 	hpri *= PPQ;
1673 	if (rdd->uload - hpri < dd->uload + hpri)
1674 		return(NULL);
1675 #endif
1676 	return (rdd);
1677 }
1678 
1679 static
1680 dfly_pcpu_t
1681 dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp)
1682 {
1683 	dfly_pcpu_t rdd;
1684 	cpumask_t tmpmask;
1685 	cpumask_t mask;
1686 	int cpuid;
1687 
1688 	/*
1689 	 * Fallback to the original heuristic, select random cpu,
1690 	 * first checking cpus not currently running a user thread.
1691 	 */
1692 	++dfly_scancpu;
1693 	cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1694 	mask = ~dfly_curprocmask & dfly_rdyprocmask & lp->lwp_cpumask &
1695 	       smp_active_mask & usched_global_cpumask;
1696 
1697 	while (mask) {
1698 		tmpmask = ~(CPUMASK(cpuid) - 1);
1699 		if (mask & tmpmask)
1700 			cpuid = BSFCPUMASK(mask & tmpmask);
1701 		else
1702 			cpuid = BSFCPUMASK(mask);
1703 		rdd = &dfly_pcpu[cpuid];
1704 
1705 		if ((rdd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK))
1706 			goto found;
1707 		mask &= ~CPUMASK(cpuid);
1708 	}
1709 
1710 	/*
1711 	 * Then cpus which might have a currently running lp
1712 	 */
1713 	cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1714 	mask = dfly_curprocmask & dfly_rdyprocmask &
1715 	       lp->lwp_cpumask & smp_active_mask & usched_global_cpumask;
1716 
1717 	while (mask) {
1718 		tmpmask = ~(CPUMASK(cpuid) - 1);
1719 		if (mask & tmpmask)
1720 			cpuid = BSFCPUMASK(mask & tmpmask);
1721 		else
1722 			cpuid = BSFCPUMASK(mask);
1723 		rdd = &dfly_pcpu[cpuid];
1724 
1725 		if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
1726 			goto found;
1727 		mask &= ~CPUMASK(cpuid);
1728 	}
1729 
1730 	/*
1731 	 * If we cannot find a suitable cpu we reload from dfly_scancpu
1732 	 * and round-robin.  Other cpus will pickup as they release their
1733 	 * current lwps or become ready.
1734 	 *
1735 	 * Avoid a degenerate system lockup case if usched_global_cpumask
1736 	 * is set to 0 or otherwise does not cover lwp_cpumask.
1737 	 *
1738 	 * We only kick the target helper thread in this case, we do not
1739 	 * set the user resched flag because
1740 	 */
1741 	cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1742 	if ((CPUMASK(cpuid) & usched_global_cpumask) == 0)
1743 		cpuid = 0;
1744 	rdd = &dfly_pcpu[cpuid];
1745 found:
1746 	return (rdd);
1747 }
1748 
1749 static
1750 void
1751 dfly_need_user_resched_remote(void *dummy)
1752 {
1753 	globaldata_t gd = mycpu;
1754 	dfly_pcpu_t  dd = &dfly_pcpu[gd->gd_cpuid];
1755 
1756 	/*
1757 	 * Flag reschedule needed
1758 	 */
1759 	need_user_resched();
1760 
1761 	/*
1762 	 * If no user thread is currently running we need to kick the helper
1763 	 * on our cpu to recover.  Otherwise the cpu will never schedule
1764 	 * anything again.
1765 	 *
1766 	 * We cannot schedule the process ourselves because this is an
1767 	 * IPI callback and we cannot acquire spinlocks in an IPI callback.
1768 	 *
1769 	 * Call wakeup_mycpu to avoid sending IPIs to other CPUs
1770 	 */
1771 	if (dd->uschedcp == NULL && (dfly_rdyprocmask & gd->gd_cpumask)) {
1772 		atomic_clear_cpumask(&dfly_rdyprocmask, gd->gd_cpumask);
1773 		wakeup_mycpu(&dd->helper_thread);
1774 	}
1775 }
1776 
1777 /*
1778  * dfly_remrunqueue_locked() removes a given process from the run queue
1779  * that it is on, clearing the queue busy bit if it becomes empty.
1780  *
1781  * Note that user process scheduler is different from the LWKT schedule.
1782  * The user process scheduler only manages user processes but it uses LWKT
1783  * underneath, and a user process operating in the kernel will often be
1784  * 'released' from our management.
1785  *
1786  * uload is NOT adjusted here.  It is only adjusted if the lwkt_thread goes
1787  * to sleep or the lwp is moved to a different runq.
1788  */
1789 static void
1790 dfly_remrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1791 {
1792 	struct rq *q;
1793 	u_int32_t *which;
1794 	u_int8_t pri;
1795 
1796 	KKASSERT(rdd->runqcount >= 0);
1797 
1798 	pri = lp->lwp_rqindex;
1799 
1800 	switch(lp->lwp_rqtype) {
1801 	case RTP_PRIO_NORMAL:
1802 		q = &rdd->queues[pri];
1803 		which = &rdd->queuebits;
1804 		break;
1805 	case RTP_PRIO_REALTIME:
1806 	case RTP_PRIO_FIFO:
1807 		q = &rdd->rtqueues[pri];
1808 		which = &rdd->rtqueuebits;
1809 		break;
1810 	case RTP_PRIO_IDLE:
1811 		q = &rdd->idqueues[pri];
1812 		which = &rdd->idqueuebits;
1813 		break;
1814 	default:
1815 		panic("remrunqueue: invalid rtprio type");
1816 		/* NOT REACHED */
1817 	}
1818 	KKASSERT(lp->lwp_mpflags & LWP_MP_ONRUNQ);
1819 	atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1820 	TAILQ_REMOVE(q, lp, lwp_procq);
1821 	--rdd->runqcount;
1822 	if (TAILQ_EMPTY(q)) {
1823 		KASSERT((*which & (1 << pri)) != 0,
1824 			("remrunqueue: remove from empty queue"));
1825 		*which &= ~(1 << pri);
1826 	}
1827 }
1828 
1829 /*
1830  * dfly_setrunqueue_locked()
1831  *
1832  * Add a process whos rqtype and rqindex had previously been calculated
1833  * onto the appropriate run queue.   Determine if the addition requires
1834  * a reschedule on a cpu and return the cpuid or -1.
1835  *
1836  * NOTE: 	  Lower priorities are better priorities.
1837  *
1838  * NOTE ON ULOAD: This variable specifies the aggregate load on a cpu, the
1839  *		  sum of the rough lwp_priority for all running and runnable
1840  *		  processes.  Lower priority processes (higher lwp_priority
1841  *		  values) actually DO count as more load, not less, because
1842  *		  these are the programs which require the most care with
1843  *		  regards to cpu selection.
1844  */
1845 static void
1846 dfly_setrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1847 {
1848 	struct rq *q;
1849 	u_int32_t *which;
1850 	int pri;
1851 
1852 	KKASSERT(lp->lwp_qcpu == rdd->cpuid);
1853 
1854 	if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1855 		atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1856 		atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload, lp->lwp_uload);
1857 		atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].ucount, 1);
1858 		atomic_add_int(&dfly_ucount, 1);
1859 	}
1860 
1861 	pri = lp->lwp_rqindex;
1862 
1863 	switch(lp->lwp_rqtype) {
1864 	case RTP_PRIO_NORMAL:
1865 		q = &rdd->queues[pri];
1866 		which = &rdd->queuebits;
1867 		break;
1868 	case RTP_PRIO_REALTIME:
1869 	case RTP_PRIO_FIFO:
1870 		q = &rdd->rtqueues[pri];
1871 		which = &rdd->rtqueuebits;
1872 		break;
1873 	case RTP_PRIO_IDLE:
1874 		q = &rdd->idqueues[pri];
1875 		which = &rdd->idqueuebits;
1876 		break;
1877 	default:
1878 		panic("remrunqueue: invalid rtprio type");
1879 		/* NOT REACHED */
1880 	}
1881 
1882 	/*
1883 	 * Place us on the selected queue.  Determine if we should be
1884 	 * placed at the head of the queue or at the end.
1885 	 *
1886 	 * We are placed at the tail if our round-robin count has expired,
1887 	 * or is about to expire and the system thinks its a good place to
1888 	 * round-robin, or there is already a next thread on the queue
1889 	 * (it might be trying to pick up where it left off and we don't
1890 	 * want to interfere).
1891 	 */
1892 	KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
1893 	atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1894 	++rdd->runqcount;
1895 
1896 	if (lp->lwp_rrcount >= usched_dfly_rrinterval ||
1897 	    (lp->lwp_rrcount >= usched_dfly_rrinterval / 2 &&
1898 	     (lp->lwp_thread->td_mpflags & TDF_MP_BATCH_DEMARC)) ||
1899 	    !TAILQ_EMPTY(q)
1900 	) {
1901 		atomic_clear_int(&lp->lwp_thread->td_mpflags,
1902 				 TDF_MP_BATCH_DEMARC);
1903 		lp->lwp_rrcount = 0;
1904 		TAILQ_INSERT_TAIL(q, lp, lwp_procq);
1905 	} else {
1906 		if (TAILQ_EMPTY(q))
1907 			lp->lwp_rrcount = 0;
1908 		TAILQ_INSERT_HEAD(q, lp, lwp_procq);
1909 	}
1910 	*which |= 1 << pri;
1911 }
1912 
1913 /*
1914  * For SMP systems a user scheduler helper thread is created for each
1915  * cpu and is used to allow one cpu to wakeup another for the purposes of
1916  * scheduling userland threads from setrunqueue().
1917  *
1918  * UP systems do not need the helper since there is only one cpu.
1919  *
1920  * We can't use the idle thread for this because we might block.
1921  * Additionally, doing things this way allows us to HLT idle cpus
1922  * on MP systems.
1923  */
1924 static void
1925 dfly_helper_thread(void *dummy)
1926 {
1927     globaldata_t gd;
1928     dfly_pcpu_t dd;
1929     dfly_pcpu_t rdd;
1930     struct lwp *nlp;
1931     cpumask_t mask;
1932     int cpuid;
1933 
1934     gd = mycpu;
1935     cpuid = gd->gd_cpuid;	/* doesn't change */
1936     mask = gd->gd_cpumask;	/* doesn't change */
1937     dd = &dfly_pcpu[cpuid];
1938 
1939     /*
1940      * Since we only want to be woken up only when no user processes
1941      * are scheduled on a cpu, run at an ultra low priority.
1942      */
1943     lwkt_setpri_self(TDPRI_USER_SCHEDULER);
1944 
1945     tsleep(&dd->helper_thread, 0, "schslp", 0);
1946 
1947     for (;;) {
1948 	/*
1949 	 * We use the LWKT deschedule-interlock trick to avoid racing
1950 	 * dfly_rdyprocmask.  This means we cannot block through to the
1951 	 * manual lwkt_switch() call we make below.
1952 	 */
1953 	crit_enter_gd(gd);
1954 	tsleep_interlock(&dd->helper_thread, 0);
1955 
1956 	spin_lock(&dd->spin);
1957 
1958 	atomic_set_cpumask(&dfly_rdyprocmask, mask);
1959 	clear_user_resched();	/* This satisfied the reschedule request */
1960 #if 0
1961 	dd->rrcount = 0;	/* Reset the round-robin counter */
1962 #endif
1963 
1964 	if (dd->runqcount || dd->uschedcp != NULL) {
1965 		/*
1966 		 * Threads are available.  A thread may or may not be
1967 		 * currently scheduled.  Get the best thread already queued
1968 		 * to this cpu.
1969 		 */
1970 		nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
1971 		if (nlp) {
1972 			atomic_set_cpumask(&dfly_curprocmask, mask);
1973 			dd->upri = nlp->lwp_priority;
1974 			dd->uschedcp = nlp;
1975 #if 0
1976 			dd->rrcount = 0;	/* reset round robin */
1977 #endif
1978 			spin_unlock(&dd->spin);
1979 			lwkt_acquire(nlp->lwp_thread);
1980 			lwkt_schedule(nlp->lwp_thread);
1981 		} else {
1982 			/*
1983 			 * This situation should not occur because we had
1984 			 * at least one thread available.
1985 			 */
1986 			spin_unlock(&dd->spin);
1987 		}
1988 	} else if (usched_dfly_features & 0x01) {
1989 		/*
1990 		 * This cpu is devoid of runnable threads, steal a thread
1991 		 * from another cpu.  Since we're stealing, might as well
1992 		 * load balance at the same time.
1993 		 *
1994 		 * We choose the highest-loaded thread from the worst queue.
1995 		 *
1996 		 * NOTE! This function only returns a non-NULL rdd when
1997 		 *	 another cpu's queue is obviously overloaded.  We
1998 		 *	 do not want to perform the type of rebalancing
1999 		 *	 the schedclock does here because it would result
2000 		 *	 in insane process pulling when 'steady' state is
2001 		 *	 partially unbalanced (e.g. 6 runnables and only
2002 		 *	 4 cores).
2003 		 */
2004 		rdd = dfly_choose_worst_queue(dd);
2005 		if (rdd && spin_trylock(&rdd->spin)) {
2006 			nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
2007 			spin_unlock(&rdd->spin);
2008 		} else {
2009 			nlp = NULL;
2010 		}
2011 		if (nlp) {
2012 			atomic_set_cpumask(&dfly_curprocmask, mask);
2013 			dd->upri = nlp->lwp_priority;
2014 			dd->uschedcp = nlp;
2015 #if 0
2016 			dd->rrcount = 0;	/* reset round robin */
2017 #endif
2018 			spin_unlock(&dd->spin);
2019 			lwkt_acquire(nlp->lwp_thread);
2020 			lwkt_schedule(nlp->lwp_thread);
2021 		} else {
2022 			/*
2023 			 * Leave the thread on our run queue.  Another
2024 			 * scheduler will try to pull it later.
2025 			 */
2026 			spin_unlock(&dd->spin);
2027 		}
2028 	} else {
2029 		/*
2030 		 * devoid of runnable threads and not allowed to steal
2031 		 * any.
2032 		 */
2033 		spin_unlock(&dd->spin);
2034 	}
2035 
2036 	/*
2037 	 * We're descheduled unless someone scheduled us.  Switch away.
2038 	 * Exiting the critical section will cause splz() to be called
2039 	 * for us if interrupts and such are pending.
2040 	 */
2041 	crit_exit_gd(gd);
2042 	tsleep(&dd->helper_thread, PINTERLOCKED, "schslp", 0);
2043     }
2044 }
2045 
2046 #if 0
2047 static int
2048 sysctl_usched_dfly_stick_to_level(SYSCTL_HANDLER_ARGS)
2049 {
2050 	int error, new_val;
2051 
2052 	new_val = usched_dfly_stick_to_level;
2053 
2054 	error = sysctl_handle_int(oidp, &new_val, 0, req);
2055         if (error != 0 || req->newptr == NULL)
2056 		return (error);
2057 	if (new_val > cpu_topology_levels_number - 1 || new_val < 0)
2058 		return (EINVAL);
2059 	usched_dfly_stick_to_level = new_val;
2060 	return (0);
2061 }
2062 #endif
2063 
2064 /*
2065  * Setup the queues and scheduler helpers (scheduler helpers are SMP only).
2066  * Note that curprocmask bit 0 has already been cleared by rqinit() and
2067  * we should not mess with it further.
2068  */
2069 static void
2070 usched_dfly_cpu_init(void)
2071 {
2072 	int i;
2073 	int j;
2074 	int cpuid;
2075 	int smt_not_supported = 0;
2076 	int cache_coherent_not_supported = 0;
2077 
2078 	if (bootverbose)
2079 		kprintf("Start scheduler helpers on cpus:\n");
2080 
2081 	sysctl_ctx_init(&usched_dfly_sysctl_ctx);
2082 	usched_dfly_sysctl_tree =
2083 		SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
2084 				SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
2085 				"usched_dfly", CTLFLAG_RD, 0, "");
2086 
2087 	for (i = 0; i < ncpus; ++i) {
2088 		dfly_pcpu_t dd = &dfly_pcpu[i];
2089 		cpumask_t mask = CPUMASK(i);
2090 
2091 		if ((mask & smp_active_mask) == 0)
2092 		    continue;
2093 
2094 		spin_init(&dd->spin);
2095 		dd->cpunode = get_cpu_node_by_cpuid(i);
2096 		dd->cpuid = i;
2097 		dd->cpumask = CPUMASK(i);
2098 		for (j = 0; j < NQS; j++) {
2099 			TAILQ_INIT(&dd->queues[j]);
2100 			TAILQ_INIT(&dd->rtqueues[j]);
2101 			TAILQ_INIT(&dd->idqueues[j]);
2102 		}
2103 		atomic_clear_cpumask(&dfly_curprocmask, 1);
2104 
2105 		if (dd->cpunode == NULL) {
2106 			smt_not_supported = 1;
2107 			cache_coherent_not_supported = 1;
2108 			if (bootverbose)
2109 				kprintf ("\tcpu%d - WARNING: No CPU NODE "
2110 					 "found for cpu\n", i);
2111 		} else {
2112 			switch (dd->cpunode->type) {
2113 			case THREAD_LEVEL:
2114 				if (bootverbose)
2115 					kprintf ("\tcpu%d - HyperThreading "
2116 						 "available. Core siblings: ",
2117 						 i);
2118 				break;
2119 			case CORE_LEVEL:
2120 				smt_not_supported = 1;
2121 
2122 				if (bootverbose)
2123 					kprintf ("\tcpu%d - No HT available, "
2124 						 "multi-core/physical "
2125 						 "cpu. Physical siblings: ",
2126 						 i);
2127 				break;
2128 			case CHIP_LEVEL:
2129 				smt_not_supported = 1;
2130 
2131 				if (bootverbose)
2132 					kprintf ("\tcpu%d - No HT available, "
2133 						 "single-core/physical cpu. "
2134 						 "Package Siblings: ",
2135 						 i);
2136 				break;
2137 			default:
2138 				/* Let's go for safe defaults here */
2139 				smt_not_supported = 1;
2140 				cache_coherent_not_supported = 1;
2141 				if (bootverbose)
2142 					kprintf ("\tcpu%d - Unknown cpunode->"
2143 						 "type=%u. Siblings: ",
2144 						 i,
2145 						 (u_int)dd->cpunode->type);
2146 				break;
2147 			}
2148 
2149 			if (bootverbose) {
2150 				if (dd->cpunode->parent_node != NULL) {
2151 					CPUSET_FOREACH(cpuid, dd->cpunode->parent_node->members)
2152 						kprintf("cpu%d ", cpuid);
2153 					kprintf("\n");
2154 				} else {
2155 					kprintf(" no siblings\n");
2156 				}
2157 			}
2158 		}
2159 
2160 		lwkt_create(dfly_helper_thread, NULL, NULL, &dd->helper_thread,
2161 			    0, i, "usched %d", i);
2162 
2163 		/*
2164 		 * Allow user scheduling on the target cpu.  cpu #0 has already
2165 		 * been enabled in rqinit().
2166 		 */
2167 		if (i)
2168 		    atomic_clear_cpumask(&dfly_curprocmask, mask);
2169 		atomic_set_cpumask(&dfly_rdyprocmask, mask);
2170 		dd->upri = PRIBASE_NULL;
2171 
2172 	}
2173 
2174 	/* usched_dfly sysctl configurable parameters */
2175 
2176 	SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2177 		       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2178 		       OID_AUTO, "rrinterval", CTLFLAG_RW,
2179 		       &usched_dfly_rrinterval, 0, "");
2180 	SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2181 		       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2182 		       OID_AUTO, "decay", CTLFLAG_RW,
2183 		       &usched_dfly_decay, 0, "Extra decay when not running");
2184 
2185 	/* Add enable/disable option for SMT scheduling if supported */
2186 	if (smt_not_supported) {
2187 		usched_dfly_smt = 0;
2188 		SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2189 				  SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2190 				  OID_AUTO, "smt", CTLFLAG_RD,
2191 				  "NOT SUPPORTED", 0, "SMT NOT SUPPORTED");
2192 	} else {
2193 		usched_dfly_smt = 1;
2194 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2195 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2196 			       OID_AUTO, "smt", CTLFLAG_RW,
2197 			       &usched_dfly_smt, 0, "Enable SMT scheduling");
2198 	}
2199 
2200 	/*
2201 	 * Add enable/disable option for cache coherent scheduling
2202 	 * if supported
2203 	 */
2204 	if (cache_coherent_not_supported) {
2205 		usched_dfly_cache_coherent = 0;
2206 		SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2207 				  SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2208 				  OID_AUTO, "cache_coherent", CTLFLAG_RD,
2209 				  "NOT SUPPORTED", 0,
2210 				  "Cache coherence NOT SUPPORTED");
2211 	} else {
2212 		usched_dfly_cache_coherent = 1;
2213 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2214 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2215 			       OID_AUTO, "cache_coherent", CTLFLAG_RW,
2216 			       &usched_dfly_cache_coherent, 0,
2217 			       "Enable/Disable cache coherent scheduling");
2218 
2219 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2220 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2221 			       OID_AUTO, "weight1", CTLFLAG_RW,
2222 			       &usched_dfly_weight1, 200,
2223 			       "Weight selection for current cpu");
2224 
2225 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2226 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2227 			       OID_AUTO, "weight2", CTLFLAG_RW,
2228 			       &usched_dfly_weight2, 180,
2229 			       "Weight selection for wakefrom cpu");
2230 
2231 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2232 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2233 			       OID_AUTO, "weight3", CTLFLAG_RW,
2234 			       &usched_dfly_weight3, 40,
2235 			       "Weight selection for num threads on queue");
2236 
2237 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2238 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2239 			       OID_AUTO, "weight4", CTLFLAG_RW,
2240 			       &usched_dfly_weight4, 160,
2241 			       "Availability of other idle cpus");
2242 
2243 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2244 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2245 			       OID_AUTO, "fast_resched", CTLFLAG_RW,
2246 			       &usched_dfly_fast_resched, 0,
2247 			       "Availability of other idle cpus");
2248 
2249 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2250 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2251 			       OID_AUTO, "features", CTLFLAG_RW,
2252 			       &usched_dfly_features, 0x8F,
2253 			       "Allow pulls into empty queues");
2254 
2255 		SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2256 			       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2257 			       OID_AUTO, "swmask", CTLFLAG_RW,
2258 			       &usched_dfly_swmask, ~PPQMASK,
2259 			       "Queue mask to force thread switch");
2260 
2261 #if 0
2262 		SYSCTL_ADD_PROC(&usched_dfly_sysctl_ctx,
2263 				SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2264 				OID_AUTO, "stick_to_level",
2265 				CTLTYPE_INT | CTLFLAG_RW,
2266 				NULL, sizeof usched_dfly_stick_to_level,
2267 				sysctl_usched_dfly_stick_to_level, "I",
2268 				"Stick a process to this level. See sysctl"
2269 				"paremter hw.cpu_topology.level_description");
2270 #endif
2271 	}
2272 }
2273 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2274 	usched_dfly_cpu_init, NULL)
2275