xref: /dragonfly/sys/kern/usched_bsd4.c (revision 0bb9290e)
1 /*
2  * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $DragonFly: src/sys/kern/usched_bsd4.c,v 1.16 2006/07/11 01:01:50 dillon Exp $
27  */
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/lock.h>
33 #include <sys/queue.h>
34 #include <sys/proc.h>
35 #include <sys/rtprio.h>
36 #include <sys/uio.h>
37 #include <sys/sysctl.h>
38 #include <sys/resourcevar.h>
39 #include <sys/spinlock.h>
40 #include <machine/ipl.h>
41 #include <machine/cpu.h>
42 #include <machine/smp.h>
43 
44 #include <sys/thread2.h>
45 #include <sys/spinlock2.h>
46 
47 /*
48  * Priorities.  Note that with 32 run queues per scheduler each queue
49  * represents four priority levels.
50  */
51 
52 #define MAXPRI			128
53 #define PRIMASK			(MAXPRI - 1)
54 #define PRIBASE_REALTIME	0
55 #define PRIBASE_NORMAL		MAXPRI
56 #define PRIBASE_IDLE		(MAXPRI * 2)
57 #define PRIBASE_THREAD		(MAXPRI * 3)
58 #define PRIBASE_NULL		(MAXPRI * 4)
59 
60 #define NQS	32			/* 32 run queues. */
61 #define PPQ	(MAXPRI / NQS)		/* priorities per queue */
62 #define PPQMASK	(PPQ - 1)
63 
64 /*
65  * NICEPPQ	- number of nice units per priority queue
66  * ESTCPURAMP	- number of scheduler ticks for estcpu to switch queues
67  *
68  * ESTCPUPPQ	- number of estcpu units per priority queue
69  * ESTCPUMAX	- number of estcpu units
70  * ESTCPUINCR	- amount we have to increment p_estcpu per scheduling tick at
71  *		  100% cpu.
72  */
73 #define NICEPPQ		2
74 #define ESTCPURAMP	4
75 #define ESTCPUPPQ	512
76 #define ESTCPUMAX	(ESTCPUPPQ * NQS)
77 #define ESTCPUINCR	(ESTCPUPPQ / ESTCPURAMP)
78 #define PRIO_RANGE	(PRIO_MAX - PRIO_MIN + 1)
79 
80 #define ESTCPULIM(v)	min((v), ESTCPUMAX)
81 
82 TAILQ_HEAD(rq, lwp);
83 
84 #define lwp_priority	lwp_usdata.bsd4.priority
85 #define lwp_rqindex	lwp_usdata.bsd4.rqindex
86 #define lwp_origcpu	lwp_usdata.bsd4.origcpu
87 #define lwp_estcpu	lwp_usdata.bsd4.estcpu
88 #define lwp_rqtype	lwp_usdata.bsd4.rqtype
89 
90 static void bsd4_acquire_curproc(struct lwp *lp);
91 static void bsd4_release_curproc(struct lwp *lp);
92 static void bsd4_select_curproc(globaldata_t gd);
93 static void bsd4_setrunqueue(struct lwp *lp);
94 static void bsd4_schedulerclock(struct lwp *lp, sysclock_t period,
95 				sysclock_t cpstamp);
96 static void bsd4_recalculate_estcpu(struct lwp *lp);
97 static void bsd4_resetpriority(struct lwp *lp);
98 static void bsd4_forking(struct lwp *plp, struct lwp *lp);
99 static void bsd4_exiting(struct lwp *plp, struct lwp *lp);
100 
101 #ifdef SMP
102 static void need_user_resched_remote(void *dummy);
103 #endif
104 static struct lwp *chooseproc_locked(struct lwp *chklp);
105 static void bsd4_remrunqueue_locked(struct lwp *lp);
106 static void bsd4_setrunqueue_locked(struct lwp *lp);
107 
108 struct usched usched_bsd4 = {
109 	{ NULL },
110 	"bsd4", "Original DragonFly Scheduler",
111 	NULL,			/* default registration */
112 	NULL,			/* default deregistration */
113 	bsd4_acquire_curproc,
114 	bsd4_release_curproc,
115 	bsd4_setrunqueue,
116 	bsd4_schedulerclock,
117 	bsd4_recalculate_estcpu,
118 	bsd4_resetpriority,
119 	bsd4_forking,
120 	bsd4_exiting,
121 	NULL			/* setcpumask not supported */
122 };
123 
124 struct usched_bsd4_pcpu {
125 	struct thread helper_thread;
126 	short	rrcount;
127 	short	upri;
128 	struct lwp *uschedcp;
129 };
130 
131 typedef struct usched_bsd4_pcpu	*bsd4_pcpu_t;
132 
133 /*
134  * We have NQS (32) run queues per scheduling class.  For the normal
135  * class, there are 128 priorities scaled onto these 32 queues.  New
136  * processes are added to the last entry in each queue, and processes
137  * are selected for running by taking them from the head and maintaining
138  * a simple FIFO arrangement.  Realtime and Idle priority processes have
139  * and explicit 0-31 priority which maps directly onto their class queue
140  * index.  When a queue has something in it, the corresponding bit is
141  * set in the queuebits variable, allowing a single read to determine
142  * the state of all 32 queues and then a ffs() to find the first busy
143  * queue.
144  */
145 static struct rq bsd4_queues[NQS];
146 static struct rq bsd4_rtqueues[NQS];
147 static struct rq bsd4_idqueues[NQS];
148 static u_int32_t bsd4_queuebits;
149 static u_int32_t bsd4_rtqueuebits;
150 static u_int32_t bsd4_idqueuebits;
151 static cpumask_t bsd4_curprocmask = -1;	/* currently running a user process */
152 static cpumask_t bsd4_rdyprocmask;	/* ready to accept a user process */
153 static int	 bsd4_runqcount;
154 #ifdef SMP
155 static volatile int bsd4_scancpu;
156 #endif
157 static struct spinlock bsd4_spin;
158 static struct usched_bsd4_pcpu bsd4_pcpu[MAXCPU];
159 
160 SYSCTL_INT(_debug, OID_AUTO, bsd4_runqcount, CTLFLAG_RD, &bsd4_runqcount, 0, "");
161 #ifdef INVARIANTS
162 static int usched_nonoptimal;
163 SYSCTL_INT(_debug, OID_AUTO, usched_nonoptimal, CTLFLAG_RW,
164         &usched_nonoptimal, 0, "acquire_curproc() was not optimal");
165 static int usched_optimal;
166 SYSCTL_INT(_debug, OID_AUTO, usched_optimal, CTLFLAG_RW,
167         &usched_optimal, 0, "acquire_curproc() was optimal");
168 #endif
169 static int usched_debug = -1;
170 SYSCTL_INT(_debug, OID_AUTO, scdebug, CTLFLAG_RW, &usched_debug, 0, "");
171 #ifdef SMP
172 static int remote_resched_nonaffinity;
173 static int remote_resched_affinity;
174 static int choose_affinity;
175 SYSCTL_INT(_debug, OID_AUTO, remote_resched_nonaffinity, CTLFLAG_RD,
176         &remote_resched_nonaffinity, 0, "Number of remote rescheds");
177 SYSCTL_INT(_debug, OID_AUTO, remote_resched_affinity, CTLFLAG_RD,
178         &remote_resched_affinity, 0, "Number of remote rescheds");
179 SYSCTL_INT(_debug, OID_AUTO, choose_affinity, CTLFLAG_RD,
180         &choose_affinity, 0, "chooseproc() was smart");
181 #endif
182 
183 static int usched_bsd4_rrinterval = (ESTCPUFREQ + 9) / 10;
184 SYSCTL_INT(_kern, OID_AUTO, usched_bsd4_rrinterval, CTLFLAG_RW,
185         &usched_bsd4_rrinterval, 0, "");
186 static int usched_bsd4_decay = ESTCPUINCR / 2;
187 SYSCTL_INT(_kern, OID_AUTO, usched_bsd4_decay, CTLFLAG_RW,
188         &usched_bsd4_decay, 0, "");
189 
190 /*
191  * Initialize the run queues at boot time.
192  */
193 static void
194 rqinit(void *dummy)
195 {
196 	int i;
197 
198 	spin_init(&bsd4_spin);
199 	for (i = 0; i < NQS; i++) {
200 		TAILQ_INIT(&bsd4_queues[i]);
201 		TAILQ_INIT(&bsd4_rtqueues[i]);
202 		TAILQ_INIT(&bsd4_idqueues[i]);
203 	}
204 	atomic_clear_int(&bsd4_curprocmask, 1);
205 }
206 SYSINIT(runqueue, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, rqinit, NULL)
207 
208 /*
209  * BSD4_ACQUIRE_CURPROC
210  *
211  * This function is called when the kernel intends to return to userland.
212  * It is responsible for making the thread the current designated userland
213  * thread for this cpu, blocking if necessary.
214  *
215  * We are expected to handle userland reschedule requests here too.
216  *
217  * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
218  * TO ANOTHER CPU!  Because most of the kernel assumes that no migration will
219  * occur, this function is called only under very controlled circumstances.
220  *
221  * Basically we recalculate our estcpu to hopefully give us a more
222  * favorable disposition, setrunqueue, then wait for the curlwp
223  * designation to be handed to us (if the setrunqueue didn't do it).
224  *
225  * MPSAFE
226  */
227 static void
228 bsd4_acquire_curproc(struct lwp *lp)
229 {
230 	globaldata_t gd = mycpu;
231 	bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid];
232 
233 	/*
234 	 * Possibly select another thread, or keep the current thread.
235 	 */
236 	if (user_resched_wanted())
237 		bsd4_select_curproc(gd);
238 
239 	/*
240 	 * If uschedcp is still pointing to us, we're done
241 	 */
242 	if (dd->uschedcp == lp)
243 		return;
244 
245 	/*
246 	 * If this cpu has no current thread, and the run queue is
247 	 * empty, we can safely select ourself.
248 	 */
249 	if (dd->uschedcp == NULL && bsd4_runqcount == 0) {
250 		atomic_set_int(&bsd4_curprocmask, gd->gd_cpumask);
251 		dd->uschedcp = lp;
252 		dd->upri = lp->lwp_priority;
253 		return;
254 	}
255 
256 	/*
257 	 * Adjust estcpu and recalculate our priority, then put us back on
258 	 * the user process scheduler's runq.  Only increment the involuntary
259 	 * context switch count if the setrunqueue call did not immediately
260 	 * schedule us.
261 	 *
262 	 * Loop until we become the currently scheduled process.  Note that
263 	 * calling setrunqueue can cause us to be migrated to another cpu
264 	 * after we switch away.
265 	 */
266 	do {
267 		crit_enter();
268 		bsd4_recalculate_estcpu(lp);
269 		lwkt_deschedule_self(gd->gd_curthread);
270 		bsd4_setrunqueue(lp);
271 		if ((gd->gd_curthread->td_flags & TDF_RUNQ) == 0)
272 			++lp->lwp_stats->p_ru.ru_nivcsw;
273 		lwkt_switch();
274 		crit_exit();
275 		gd = mycpu;
276 		dd = &bsd4_pcpu[gd->gd_cpuid];
277 	} while (dd->uschedcp != lp);
278 	KKASSERT((lp->lwp_proc->p_flag & P_ONRUNQ) == 0);
279 }
280 
281 /*
282  * BSD4_RELEASE_CURPROC
283  *
284  * This routine detaches the current thread from the userland scheduler,
285  * usually because the thread needs to run in the kernel (at kernel priority)
286  * for a while.
287  *
288  * This routine is also responsible for selecting a new thread to
289  * make the current thread.
290  *
291  * NOTE: This implementation differs from the dummy example in that
292  * bsd4_select_curproc() is able to select the current process, whereas
293  * dummy_select_curproc() is not able to select the current process.
294  * This means we have to NULL out uschedcp.
295  *
296  * Additionally, note that we may already be on a run queue if releasing
297  * via the lwkt_switch() in bsd4_setrunqueue().
298  *
299  * WARNING!  The MP lock may be in an unsynchronized state due to the
300  * way get_mplock() works and the fact that this function may be called
301  * from a passive release during a lwkt_switch().   try_mplock() will deal
302  * with this for us but you should be aware that td_mpcount may not be
303  * useable.
304  *
305  * MPSAFE
306  */
307 static void
308 bsd4_release_curproc(struct lwp *lp)
309 {
310 	globaldata_t gd = mycpu;
311 	bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid];
312 
313 	if (dd->uschedcp == lp) {
314 		/*
315 		 * Note: we leave ou curprocmask bit set to prevent
316 		 * unnecessary scheduler helper wakeups.
317 		 * bsd4_select_curproc() will clean it up.
318 		 */
319 		KKASSERT((lp->lwp_proc->p_flag & P_ONRUNQ) == 0);
320 		dd->uschedcp = NULL;	/* don't let lp be selected */
321 		bsd4_select_curproc(gd);
322 	}
323 }
324 
325 /*
326  * BSD4_SELECT_CURPROC
327  *
328  * Select a new current process for this cpu.  This satisfies a user
329  * scheduler reschedule request so clear that too.
330  *
331  * This routine is also responsible for equal-priority round-robining,
332  * typically triggered from bsd4_schedulerclock().  In our dummy example
333  * all the 'user' threads are LWKT scheduled all at once and we just
334  * call lwkt_switch().
335  *
336  * MPSAFE
337  */
338 static
339 void
340 bsd4_select_curproc(globaldata_t gd)
341 {
342 	bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid];
343 	struct lwp *nlp;
344 	int cpuid = gd->gd_cpuid;
345 
346 	crit_enter_gd(gd);
347 	clear_user_resched();	/* This satisfied the reschedule request */
348 	dd->rrcount = 0;	/* Reset the round-robin counter */
349 
350 	spin_lock_wr(&bsd4_spin);
351 	if ((nlp = chooseproc_locked(dd->uschedcp)) != NULL) {
352 		atomic_set_int(&bsd4_curprocmask, 1 << cpuid);
353 		dd->upri = nlp->lwp_priority;
354 		dd->uschedcp = nlp;
355 		spin_unlock_wr(&bsd4_spin);
356 #ifdef SMP
357 		lwkt_acquire(nlp->lwp_thread);
358 #endif
359 		lwkt_schedule(nlp->lwp_thread);
360 	} else if (dd->uschedcp) {
361 		dd->upri = dd->uschedcp->lwp_priority;
362 		spin_unlock_wr(&bsd4_spin);
363 		KKASSERT(bsd4_curprocmask & (1 << cpuid));
364 	} else if (bsd4_runqcount && (bsd4_rdyprocmask & (1 << cpuid))) {
365 		atomic_clear_int(&bsd4_curprocmask, 1 << cpuid);
366 		atomic_clear_int(&bsd4_rdyprocmask, 1 << cpuid);
367 		dd->uschedcp = NULL;
368 		dd->upri = PRIBASE_NULL;
369 		spin_unlock_wr(&bsd4_spin);
370 		lwkt_schedule(&dd->helper_thread);
371 	} else {
372 		dd->uschedcp = NULL;
373 		dd->upri = PRIBASE_NULL;
374 		atomic_clear_int(&bsd4_curprocmask, 1 << cpuid);
375 		spin_unlock_wr(&bsd4_spin);
376 	}
377 	crit_exit_gd(gd);
378 }
379 
380 /*
381  * BSD4_SETRUNQUEUE
382  *
383  * This routine is called to schedule a new user process after a fork.
384  *
385  * The caller may set P_PASSIVE_ACQ in p_flag to indicate that we should
386  * attempt to leave the thread on the current cpu.
387  *
388  * If P_PASSIVE_ACQ is set setrunqueue() will not wakeup potential target
389  * cpus in an attempt to keep the process on the current cpu at least for
390  * a little while to take advantage of locality of reference (e.g. fork/exec
391  * or short fork/exit, and uio_yield()).
392  *
393  * CPU AFFINITY: cpu affinity is handled by attempting to either schedule
394  * or (user level) preempt on the same cpu that a process was previously
395  * scheduled to.  If we cannot do this but we are at enough of a higher
396  * priority then the processes running on other cpus, we will allow the
397  * process to be stolen by another cpu.
398  *
399  * WARNING!  This routine cannot block.  bsd4_acquire_curproc() does
400  * a deschedule/switch interlock and we can be moved to another cpu
401  * the moment we are switched out.  Our LWKT run state is the only
402  * thing preventing the transfer.
403  *
404  * The associated thread must NOT currently be scheduled (but can be the
405  * current process after it has been LWKT descheduled).  It must NOT be on
406  * a bsd4 scheduler queue either.  The purpose of this routine is to put
407  * it on a scheduler queue or make it the current user process and LWKT
408  * schedule it.  It is possible that the thread is in the middle of a LWKT
409  * switchout on another cpu, lwkt_acquire() deals with that case.
410  *
411  * The process must be runnable.
412  *
413  * MPSAFE
414  */
415 static void
416 bsd4_setrunqueue(struct lwp *lp)
417 {
418 	globaldata_t gd;
419 	bsd4_pcpu_t dd;
420 	int cpuid;
421 #ifdef SMP
422 	cpumask_t mask;
423 	cpumask_t tmpmask;
424 #endif
425 
426 	/*
427 	 * First validate the process state relative to the current cpu.
428 	 * We don't need the spinlock for this, just a critical section.
429 	 * We are in control of the process.
430 	 */
431 	crit_enter();
432 	KASSERT(lp->lwp_proc->p_stat == SRUN, ("setrunqueue: proc not SRUN"));
433 	KASSERT((lp->lwp_proc->p_flag & P_ONRUNQ) == 0,
434 	    ("lwp %d/%d already on runq! flag %08x", lp->lwp_proc->p_pid,
435 	     lp->lwp_tid, lp->lwp_proc->p_flag));
436 	KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0);
437 
438 	/*
439 	 * Note: gd and dd are relative to the target thread's last cpu,
440 	 * NOT our current cpu.
441 	 */
442 	gd = lp->lwp_thread->td_gd;
443 	dd = &bsd4_pcpu[gd->gd_cpuid];
444 
445 	/*
446 	 * This process is not supposed to be scheduled anywhere or assigned
447 	 * as the current process anywhere.  Assert the condition.
448 	 */
449 	KKASSERT(dd->uschedcp != lp);
450 
451 	/*
452 	 * Check local cpu affinity.  The associated thread is stable at
453 	 * the moment.  Note that we may be checking another cpu here so we
454 	 * have to be careful.  We can only assign uschedcp on OUR cpu.
455 	 *
456 	 * This allows us to avoid actually queueing the process.
457 	 * acquire_curproc() will handle any threads we mistakenly schedule.
458 	 */
459 	cpuid = gd->gd_cpuid;
460 	if (gd == mycpu && (bsd4_curprocmask & (1 << cpuid)) == 0) {
461 		atomic_set_int(&bsd4_curprocmask, 1 << cpuid);
462 		dd->uschedcp = lp;
463 		dd->upri = lp->lwp_priority;
464 		lwkt_schedule(lp->lwp_thread);
465 		crit_exit();
466 		return;
467 	}
468 
469 	/*
470 	 * gd and cpuid may still 'hint' at another cpu.  Even so we have
471 	 * to place this process on the userland scheduler's run queue for
472 	 * action by the target cpu.
473 	 */
474 #ifdef SMP
475 	/*
476 	 * XXX fixme.  Could be part of a remrunqueue/setrunqueue
477 	 * operation when the priority is recalculated, so TDF_MIGRATING
478 	 * may already be set.
479 	 */
480 	if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0)
481 		lwkt_giveaway(lp->lwp_thread);
482 #endif
483 
484 	/*
485 	 * We lose control of lp the moment we release the spinlock after
486 	 * having placed lp on the queue.  i.e. another cpu could pick it
487 	 * up and it could exit, or its priority could be further adjusted,
488 	 * or something like that.
489 	 */
490 	spin_lock_wr(&bsd4_spin);
491 	bsd4_setrunqueue_locked(lp);
492 
493 	/*
494 	 * gd, dd, and cpuid are still our target cpu 'hint', not our current
495 	 * cpu info.
496 	 *
497 	 * We always try to schedule a LWP to its original cpu first.  It
498 	 * is possible for the scheduler helper or setrunqueue to assign
499 	 * the LWP to a different cpu before the one we asked for wakes
500 	 * up.
501 	 *
502 	 * If the LWP has higher priority (lower lwp_priority value) on
503 	 * its target cpu, reschedule on that cpu.
504 	 */
505 	if ((lp->lwp_thread->td_flags & TDF_NORESCHED) == 0) {
506 		if ((dd->upri & ~PRIMASK) > (lp->lwp_priority & ~PRIMASK)) {
507 			dd->upri = lp->lwp_priority;
508 			spin_unlock_wr(&bsd4_spin);
509 #ifdef SMP
510 			if (gd == mycpu) {
511 				need_user_resched();
512 			} else {
513 				lwkt_send_ipiq(gd, need_user_resched_remote,
514 					       NULL);
515 			}
516 #else
517 			need_user_resched();
518 #endif
519 			crit_exit();
520 			return;
521 		}
522 	}
523 	spin_unlock_wr(&bsd4_spin);
524 
525 #ifdef SMP
526 	/*
527 	 * Otherwise the LWP has a lower priority or we were asked not
528 	 * to reschedule.  Look for an idle cpu whos scheduler helper
529 	 * is ready to accept more work.
530 	 *
531 	 * Look for an idle cpu starting at our rotator (bsd4_scancpu).
532 	 *
533 	 * If no cpus are ready to accept work, just return.
534 	 *
535 	 * XXX P_PASSIVE_ACQ
536 	 */
537 	mask = ~bsd4_curprocmask & bsd4_rdyprocmask & mycpu->gd_other_cpus &
538 	    lp->lwp_cpumask;
539 	if (mask) {
540 		cpuid = bsd4_scancpu;
541 		if (++cpuid == ncpus)
542 			cpuid = 0;
543 		tmpmask = ~((1 << cpuid) - 1);
544 		if (mask & tmpmask)
545 			cpuid = bsfl(mask & tmpmask);
546 		else
547 			cpuid = bsfl(mask);
548 		atomic_clear_int(&bsd4_rdyprocmask, 1 << cpuid);
549 		bsd4_scancpu = cpuid;
550 		lwkt_schedule(&bsd4_pcpu[cpuid].helper_thread);
551 	}
552 #endif
553 	crit_exit();
554 }
555 
556 /*
557  * This routine is called from a systimer IPI.  It MUST be MP-safe and
558  * the BGL IS NOT HELD ON ENTRY.  This routine is called at ESTCPUFREQ on
559  * each cpu.
560  *
561  * Because this is effectively a 'fast' interrupt, we cannot safely
562  * use spinlocks unless gd_spinlock_rd is NULL and gd_spinlocks_wr is 0,
563  * even if the spinlocks are 'non conflicting'.  This is due to the way
564  * spinlock conflicts against cached read locks are handled.
565  *
566  * MPSAFE
567  */
568 static
569 void
570 bsd4_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
571 {
572 	globaldata_t gd = mycpu;
573 	bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid];
574 
575 	/*
576 	 * Do we need to round-robin?  We round-robin 10 times a second.
577 	 * This should only occur for cpu-bound batch processes.
578 	 */
579 	if (++dd->rrcount >= usched_bsd4_rrinterval) {
580 		dd->rrcount = 0;
581 		need_user_resched();
582 	}
583 
584 	/*
585 	 * As the process accumulates cpu time p_estcpu is bumped and may
586 	 * push the process into another scheduling queue.  It typically
587 	 * takes 4 ticks to bump the queue.
588 	 */
589 	lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUINCR);
590 
591 	/*
592 	 * Reducing p_origcpu over time causes more of our estcpu to be
593 	 * returned to the parent when we exit.  This is a small tweak
594 	 * for the batch detection heuristic.
595 	 */
596 	if (lp->lwp_origcpu)
597 		--lp->lwp_origcpu;
598 
599 	/*
600 	 * We can only safely call bsd4_resetpriority(), which uses spinlocks,
601 	 * if we aren't interrupting a thread that is using spinlocks.
602 	 * Otherwise we can deadlock with another cpu waiting for our read
603 	 * spinlocks to clear.
604 	 */
605 	if (gd->gd_spinlock_rd == NULL && gd->gd_spinlocks_wr == 0)
606 		bsd4_resetpriority(lp);
607 	else
608 		need_user_resched();
609 }
610 
611 /*
612  * Called from acquire and from kern_synch's one-second timer (one of the
613  * callout helper threads) with a critical section held.
614  *
615  * Decay p_estcpu based on the number of ticks we haven't been running
616  * and our p_nice.  As the load increases each process observes a larger
617  * number of idle ticks (because other processes are running in them).
618  * This observation leads to a larger correction which tends to make the
619  * system more 'batchy'.
620  *
621  * Note that no recalculation occurs for a process which sleeps and wakes
622  * up in the same tick.  That is, a system doing thousands of context
623  * switches per second will still only do serious estcpu calculations
624  * ESTCPUFREQ times per second.
625  *
626  * MPSAFE
627  */
628 static
629 void
630 bsd4_recalculate_estcpu(struct lwp *lp)
631 {
632 	globaldata_t gd = mycpu;
633 	sysclock_t cpbase;
634 	int loadfac;
635 	int ndecay;
636 	int nticks;
637 	int nleft;
638 
639 	/*
640 	 * We have to subtract periodic to get the last schedclock
641 	 * timeout time, otherwise we would get the upcoming timeout.
642 	 * Keep in mind that a process can migrate between cpus and
643 	 * while the scheduler clock should be very close, boundary
644 	 * conditions could lead to a small negative delta.
645 	 */
646 	cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
647 
648 	if (lp->lwp_slptime > 1) {
649 		/*
650 		 * Too much time has passed, do a coarse correction.
651 		 */
652 		lp->lwp_estcpu = lp->lwp_estcpu >> 1;
653 		bsd4_resetpriority(lp);
654 		lp->lwp_cpbase = cpbase;
655 		lp->lwp_cpticks = 0;
656 	} else if (lp->lwp_cpbase != cpbase) {
657 		/*
658 		 * Adjust estcpu if we are in a different tick.  Don't waste
659 		 * time if we are in the same tick.
660 		 *
661 		 * First calculate the number of ticks in the measurement
662 		 * interval.  The nticks calculation can wind up 0 due to
663 		 * a bug in the handling of lwp_slptime  (as yet not found),
664 		 * so make sure we do not get a divide by 0 panic.
665 		 */
666 		nticks = (cpbase - lp->lwp_cpbase) / gd->gd_schedclock.periodic;
667 		if (nticks <= 0)
668 			nticks = 1;
669 		updatepcpu(lp, lp->lwp_cpticks, nticks);
670 
671 		if ((nleft = nticks - lp->lwp_cpticks) < 0)
672 			nleft = 0;
673 		if (usched_debug == lp->lwp_proc->p_pid) {
674 			printf("pid %d tid %d estcpu %d cpticks %d nticks %d nleft %d",
675 				lp->lwp_proc->p_pid, lp->lwp_tid, lp->lwp_estcpu,
676 				lp->lwp_cpticks, nticks, nleft);
677 		}
678 
679 		/*
680 		 * Calculate a decay value based on ticks remaining scaled
681 		 * down by the instantanious load and p_nice.
682 		 */
683 		if ((loadfac = bsd4_runqcount) < 2)
684 			loadfac = 2;
685 		ndecay = nleft * usched_bsd4_decay * 2 *
686 			(PRIO_MAX * 2 - lp->lwp_proc->p_nice) / (loadfac * PRIO_MAX * 2);
687 
688 		/*
689 		 * Adjust p_estcpu.  Handle a border case where batch jobs
690 		 * can get stalled long enough to decay to zero when they
691 		 * shouldn't.
692 		 */
693 		if (lp->lwp_estcpu > ndecay * 2)
694 			lp->lwp_estcpu -= ndecay;
695 		else
696 			lp->lwp_estcpu >>= 1;
697 
698 		if (usched_debug == lp->lwp_proc->p_pid)
699 			printf(" ndecay %d estcpu %d\n", ndecay, lp->lwp_estcpu);
700 		bsd4_resetpriority(lp);
701 		lp->lwp_cpbase = cpbase;
702 		lp->lwp_cpticks = 0;
703 	}
704 }
705 
706 /*
707  * Compute the priority of a process when running in user mode.
708  * Arrange to reschedule if the resulting priority is better
709  * than that of the current process.
710  *
711  * This routine may be called with any process.
712  *
713  * This routine is called by fork1() for initial setup with the process
714  * of the run queue, and also may be called normally with the process on or
715  * off the run queue.
716  *
717  * MPSAFE
718  */
719 static void
720 bsd4_resetpriority(struct lwp *lp)
721 {
722 	bsd4_pcpu_t dd;
723 	int newpriority;
724 	u_short newrqtype;
725 	int reschedcpu;
726 
727 	/*
728 	 * Calculate the new priority and queue type
729 	 */
730 	crit_enter();
731 	spin_lock_wr(&bsd4_spin);
732 
733 	newrqtype = lp->lwp_rtprio.type;
734 
735 	switch(newrqtype) {
736 	case RTP_PRIO_REALTIME:
737 	case RTP_PRIO_FIFO:
738 		newpriority = PRIBASE_REALTIME +
739 			     (lp->lwp_rtprio.prio & PRIMASK);
740 		break;
741 	case RTP_PRIO_NORMAL:
742 		newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) * PPQ / NICEPPQ;
743 		newpriority += lp->lwp_estcpu * PPQ / ESTCPUPPQ;
744 		newpriority = newpriority * MAXPRI / (PRIO_RANGE * PPQ /
745 			      NICEPPQ + ESTCPUMAX * PPQ / ESTCPUPPQ);
746 		newpriority = PRIBASE_NORMAL + (newpriority & PRIMASK);
747 		break;
748 	case RTP_PRIO_IDLE:
749 		newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK);
750 		break;
751 	case RTP_PRIO_THREAD:
752 		newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK);
753 		break;
754 	default:
755 		panic("Bad RTP_PRIO %d", newrqtype);
756 		/* NOT REACHED */
757 	}
758 
759 	/*
760 	 * The newpriority incorporates the queue type so do a simple masked
761 	 * check to determine if the process has moved to another queue.  If
762 	 * it has, and it is currently on a run queue, then move it.
763 	 */
764 	if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) {
765 		lp->lwp_priority = newpriority;
766 		if (lp->lwp_proc->p_flag & P_ONRUNQ) {
767 			bsd4_remrunqueue_locked(lp);
768 			lp->lwp_rqtype = newrqtype;
769 			lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
770 			bsd4_setrunqueue_locked(lp);
771 			reschedcpu = lp->lwp_thread->td_gd->gd_cpuid;
772 		} else {
773 			lp->lwp_rqtype = newrqtype;
774 			lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
775 			reschedcpu = -1;
776 		}
777 	} else {
778 		lp->lwp_priority = newpriority;
779 		reschedcpu = -1;
780 	}
781 	spin_unlock_wr(&bsd4_spin);
782 
783 	/*
784 	 * Determine if we need to reschedule the target cpu.  This only
785 	 * occurs if the LWP is already on a scheduler queue, which means
786 	 * that idle cpu notification has already occured.  At most we
787 	 * need only issue a need_user_resched() on the appropriate cpu.
788 	 */
789 	if (reschedcpu >= 0) {
790 		dd = &bsd4_pcpu[reschedcpu];
791 		KKASSERT(dd->uschedcp != lp);
792 		if ((dd->upri & ~PRIMASK) > (lp->lwp_priority & ~PRIMASK)) {
793 			dd->upri = lp->lwp_priority;
794 #ifdef SMP
795 			if (reschedcpu == mycpu->gd_cpuid) {
796 				need_user_resched();
797 			} else {
798 				lwkt_send_ipiq(lp->lwp_thread->td_gd,
799 					       need_user_resched_remote, NULL);
800 			}
801 #else
802 			need_user_resched();
803 #endif
804 		}
805 	}
806 	crit_exit();
807 }
808 
809 /*
810  * Called from fork1() when a new child process is being created.
811  *
812  * Give the child process an initial estcpu that is more batch then
813  * its parent and dock the parent for the fork (but do not
814  * reschedule the parent).   This comprises the main part of our batch
815  * detection heuristic for both parallel forking and sequential execs.
816  *
817  * Interactive processes will decay the boosted estcpu quickly while batch
818  * processes will tend to compound it.
819  * XXX lwp should be "spawning" instead of "forking"
820  *
821  * MPSAFE
822  */
823 static void
824 bsd4_forking(struct lwp *plp, struct lwp *lp)
825 {
826 	lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ);
827 	lp->lwp_origcpu = lp->lwp_estcpu;
828 	plp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ);
829 }
830 
831 /*
832  * Called when the parent reaps a child.   Propogate cpu use by the child
833  * back to the parent.
834  *
835  * MPSAFE
836  */
837 static void
838 bsd4_exiting(struct lwp *plp, struct lwp *lp)
839 {
840 	int delta;
841 
842 	if (plp->lwp_proc->p_pid != 1) {
843 		delta = lp->lwp_estcpu - lp->lwp_origcpu;
844 		if (delta > 0)
845 			plp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + delta);
846 	}
847 }
848 
849 
850 /*
851  * chooseproc() is called when a cpu needs a user process to LWKT schedule,
852  * it selects a user process and returns it.  If chklp is non-NULL and chklp
853  * has a better or equal priority then the process that would otherwise be
854  * chosen, NULL is returned.
855  *
856  * Until we fix the RUNQ code the chklp test has to be strict or we may
857  * bounce between processes trying to acquire the current process designation.
858  *
859  * MPSAFE - must be called with bsd4_spin exclusive held.  The spinlock is
860  *	    left intact through the entire routine.
861  */
862 static
863 struct lwp *
864 chooseproc_locked(struct lwp *chklp)
865 {
866 	struct lwp *lp;
867 	struct rq *q;
868 	u_int32_t *which, *which2;
869 	u_int32_t pri;
870 	u_int32_t rtqbits;
871 	u_int32_t tsqbits;
872 	u_int32_t idqbits;
873 	cpumask_t cpumask;
874 
875 	rtqbits = bsd4_rtqueuebits;
876 	tsqbits = bsd4_queuebits;
877 	idqbits = bsd4_idqueuebits;
878 	cpumask = mycpu->gd_cpumask;
879 
880 #ifdef SMP
881 again:
882 #endif
883 	if (rtqbits) {
884 		pri = bsfl(rtqbits);
885 		q = &bsd4_rtqueues[pri];
886 		which = &bsd4_rtqueuebits;
887 		which2 = &rtqbits;
888 	} else if (tsqbits) {
889 		pri = bsfl(tsqbits);
890 		q = &bsd4_queues[pri];
891 		which = &bsd4_queuebits;
892 		which2 = &tsqbits;
893 	} else if (idqbits) {
894 		pri = bsfl(idqbits);
895 		q = &bsd4_idqueues[pri];
896 		which = &bsd4_idqueuebits;
897 		which2 = &idqbits;
898 	} else {
899 		return NULL;
900 	}
901 	lp = TAILQ_FIRST(q);
902 	KASSERT(lp, ("chooseproc: no lwp on busy queue"));
903 
904 #ifdef SMP
905 	while ((lp->lwp_cpumask & cpumask) == 0) {
906 		lp = TAILQ_NEXT(lp, lwp_procq);
907 		if (lp == NULL) {
908 			*which2 &= ~(1 << pri);
909 			goto again;
910 		}
911 	}
912 #endif
913 
914 	/*
915 	 * If the passed lwp <chklp> is reasonably close to the selected
916 	 * lwp <lp>, return NULL (indicating that <chklp> should be kept).
917 	 *
918 	 * Note that we must error on the side of <chklp> to avoid bouncing
919 	 * between threads in the acquire code.
920 	 */
921 	if (chklp) {
922 		if (chklp->lwp_priority < lp->lwp_priority + PPQ)
923 			return(NULL);
924 	}
925 
926 #ifdef SMP
927 	/*
928 	 * If the chosen lwp does not reside on this cpu spend a few
929 	 * cycles looking for a better candidate at the same priority level.
930 	 * This is a fallback check, setrunqueue() tries to wakeup the
931 	 * correct cpu and is our front-line affinity.
932 	 */
933 	if (lp->lwp_thread->td_gd != mycpu &&
934 	    (chklp = TAILQ_NEXT(lp, lwp_procq)) != NULL
935 	) {
936 		if (chklp->lwp_thread->td_gd == mycpu) {
937 			++choose_affinity;
938 			lp = chklp;
939 		}
940 	}
941 #endif
942 
943 	TAILQ_REMOVE(q, lp, lwp_procq);
944 	--bsd4_runqcount;
945 	if (TAILQ_EMPTY(q))
946 		*which &= ~(1 << pri);
947 	KASSERT((lp->lwp_proc->p_flag & P_ONRUNQ) != 0, ("not on runq6!"));
948 	lp->lwp_proc->p_flag &= ~P_ONRUNQ;
949 	return lp;
950 }
951 
952 #ifdef SMP
953 /*
954  * Called via an ipi message to reschedule on another cpu.
955  *
956  * MPSAFE
957  */
958 static
959 void
960 need_user_resched_remote(void *dummy)
961 {
962 	need_user_resched();
963 }
964 
965 #endif
966 
967 
968 /*
969  * bsd4_remrunqueue_locked() removes a given process from the run queue
970  * that it is on, clearing the queue busy bit if it becomes empty.
971  *
972  * Note that user process scheduler is different from the LWKT schedule.
973  * The user process scheduler only manages user processes but it uses LWKT
974  * underneath, and a user process operating in the kernel will often be
975  * 'released' from our management.
976  *
977  * MPSAFE - bsd4_spin must be held exclusively on call
978  */
979 static void
980 bsd4_remrunqueue_locked(struct lwp *lp)
981 {
982 	struct rq *q;
983 	u_int32_t *which;
984 	u_int8_t pri;
985 
986 	KKASSERT(lp->lwp_proc->p_flag & P_ONRUNQ);
987 	lp->lwp_proc->p_flag &= ~P_ONRUNQ;
988 	--bsd4_runqcount;
989 	KKASSERT(bsd4_runqcount >= 0);
990 
991 	pri = lp->lwp_rqindex;
992 	switch(lp->lwp_rqtype) {
993 	case RTP_PRIO_NORMAL:
994 		q = &bsd4_queues[pri];
995 		which = &bsd4_queuebits;
996 		break;
997 	case RTP_PRIO_REALTIME:
998 	case RTP_PRIO_FIFO:
999 		q = &bsd4_rtqueues[pri];
1000 		which = &bsd4_rtqueuebits;
1001 		break;
1002 	case RTP_PRIO_IDLE:
1003 		q = &bsd4_idqueues[pri];
1004 		which = &bsd4_idqueuebits;
1005 		break;
1006 	default:
1007 		panic("remrunqueue: invalid rtprio type");
1008 		/* NOT REACHED */
1009 	}
1010 	TAILQ_REMOVE(q, lp, lwp_procq);
1011 	if (TAILQ_EMPTY(q)) {
1012 		KASSERT((*which & (1 << pri)) != 0,
1013 			("remrunqueue: remove from empty queue"));
1014 		*which &= ~(1 << pri);
1015 	}
1016 }
1017 
1018 /*
1019  * bsd4_setrunqueue_locked()
1020  *
1021  * Add a process whos rqtype and rqindex had previously been calculated
1022  * onto the appropriate run queue.   Determine if the addition requires
1023  * a reschedule on a cpu and return the cpuid or -1.
1024  *
1025  * NOTE: Lower priorities are better priorities.
1026  *
1027  * MPSAFE - bsd4_spin must be held exclusively on call
1028  */
1029 static void
1030 bsd4_setrunqueue_locked(struct lwp *lp)
1031 {
1032 	struct rq *q;
1033 	u_int32_t *which;
1034 	int pri;
1035 
1036 	KKASSERT((lp->lwp_proc->p_flag & P_ONRUNQ) == 0);
1037 	lp->lwp_proc->p_flag |= P_ONRUNQ;
1038 	++bsd4_runqcount;
1039 
1040 	pri = lp->lwp_rqindex;
1041 
1042 	switch(lp->lwp_rqtype) {
1043 	case RTP_PRIO_NORMAL:
1044 		q = &bsd4_queues[pri];
1045 		which = &bsd4_queuebits;
1046 		break;
1047 	case RTP_PRIO_REALTIME:
1048 	case RTP_PRIO_FIFO:
1049 		q = &bsd4_rtqueues[pri];
1050 		which = &bsd4_rtqueuebits;
1051 		break;
1052 	case RTP_PRIO_IDLE:
1053 		q = &bsd4_idqueues[pri];
1054 		which = &bsd4_idqueuebits;
1055 		break;
1056 	default:
1057 		panic("remrunqueue: invalid rtprio type");
1058 		/* NOT REACHED */
1059 	}
1060 
1061 	/*
1062 	 * Add to the correct queue and set the appropriate bit.  If no
1063 	 * lower priority (i.e. better) processes are in the queue then
1064 	 * we want a reschedule, calculate the best cpu for the job.
1065 	 *
1066 	 * Always run reschedules on the LWPs original cpu.
1067 	 */
1068 	TAILQ_INSERT_TAIL(q, lp, lwp_procq);
1069 	*which |= 1 << pri;
1070 }
1071 
1072 #ifdef SMP
1073 
1074 /*
1075  * For SMP systems a user scheduler helper thread is created for each
1076  * cpu and is used to allow one cpu to wakeup another for the purposes of
1077  * scheduling userland threads from setrunqueue().  UP systems do not
1078  * need the helper since there is only one cpu.  We can't use the idle
1079  * thread for this because we need to hold the MP lock.  Additionally,
1080  * doing things this way allows us to HLT idle cpus on MP systems.
1081  *
1082  * MPSAFE
1083  */
1084 static void
1085 sched_thread(void *dummy)
1086 {
1087     globaldata_t gd;
1088     bsd4_pcpu_t  dd;
1089     struct lwp *nlp;
1090     cpumask_t cpumask;
1091     cpumask_t tmpmask;
1092     int cpuid;
1093     int tmpid;
1094 
1095     gd = mycpu;
1096     cpuid = gd->gd_cpuid;	/* doesn't change */
1097     cpumask = 1 << cpuid;	/* doesn't change */
1098     dd = &bsd4_pcpu[cpuid];
1099 
1100     /*
1101      * The scheduler thread does not need to hold the MP lock.  Since we
1102      * are woken up only when no user processes are scheduled on a cpu, we
1103      * can run at an ultra low priority.
1104      */
1105     rel_mplock();
1106     lwkt_setpri_self(TDPRI_USER_SCHEDULER);
1107 
1108     for (;;) {
1109 	/*
1110 	 * We use the LWKT deschedule-interlock trick to avoid racing
1111 	 * bsd4_rdyprocmask.  This means we cannot block through to the
1112 	 * manual lwkt_switch() call we make below.
1113 	 */
1114 	crit_enter_gd(gd);
1115 	lwkt_deschedule_self(gd->gd_curthread);
1116 	spin_lock_wr(&bsd4_spin);
1117 	atomic_set_int(&bsd4_rdyprocmask, cpumask);
1118 	if ((bsd4_curprocmask & cpumask) == 0) {
1119 		if ((nlp = chooseproc_locked(NULL)) != NULL) {
1120 			atomic_set_int(&bsd4_curprocmask, cpumask);
1121 			dd->upri = nlp->lwp_priority;
1122 			dd->uschedcp = nlp;
1123 			spin_unlock_wr(&bsd4_spin);
1124 			lwkt_acquire(nlp->lwp_thread);
1125 			lwkt_schedule(nlp->lwp_thread);
1126 		} else {
1127 			spin_unlock_wr(&bsd4_spin);
1128 		}
1129 	} else {
1130 		/*
1131 		 * Someone scheduled us but raced.  In order to not lose
1132 		 * track of the fact that there may be a LWP ready to go,
1133 		 * forward the request to another cpu if available.
1134 		 *
1135 		 * Rotate through cpus starting with cpuid + 1.  Since cpuid
1136 		 * is already masked out by gd_other_cpus, just use ~cpumask.
1137 		 */
1138 		tmpmask = ~bsd4_curprocmask & bsd4_rdyprocmask &
1139 			  mycpu->gd_other_cpus;
1140 		if (tmpmask) {
1141 			if (tmpmask & ~(cpumask - 1))
1142 				tmpid = bsfl(tmpmask & ~(cpumask - 1));
1143 			else
1144 				tmpid = bsfl(tmpmask);
1145 			bsd4_scancpu = tmpid;
1146 			atomic_clear_int(&bsd4_rdyprocmask, 1 << tmpid);
1147 			spin_unlock_wr(&bsd4_spin);
1148 			lwkt_schedule(&bsd4_pcpu[tmpid].helper_thread);
1149 		} else {
1150 			spin_unlock_wr(&bsd4_spin);
1151 		}
1152 	}
1153 	crit_exit_gd(gd);
1154 	lwkt_switch();
1155     }
1156 }
1157 
1158 /*
1159  * Setup our scheduler helpers.  Note that curprocmask bit 0 has already
1160  * been cleared by rqinit() and we should not mess with it further.
1161  */
1162 static void
1163 sched_thread_cpu_init(void)
1164 {
1165     int i;
1166 
1167     if (bootverbose)
1168 	printf("start scheduler helpers on cpus:");
1169 
1170     for (i = 0; i < ncpus; ++i) {
1171 	bsd4_pcpu_t dd = &bsd4_pcpu[i];
1172 	cpumask_t mask = 1 << i;
1173 
1174 	if ((mask & smp_active_mask) == 0)
1175 	    continue;
1176 
1177 	if (bootverbose)
1178 	    printf(" %d", i);
1179 
1180 	lwkt_create(sched_thread, NULL, NULL, &dd->helper_thread,
1181 		    TDF_STOPREQ, i, "usched %d", i);
1182 
1183 	/*
1184 	 * Allow user scheduling on the target cpu.  cpu #0 has already
1185 	 * been enabled in rqinit().
1186 	 */
1187 	if (i)
1188 	    atomic_clear_int(&bsd4_curprocmask, mask);
1189 	atomic_set_int(&bsd4_rdyprocmask, mask);
1190     }
1191     if (bootverbose)
1192 	printf("\n");
1193 }
1194 SYSINIT(uschedtd, SI_SUB_FINISH_SMP, SI_ORDER_ANY, sched_thread_cpu_init, NULL)
1195 
1196 #endif
1197 
1198