xref: /dragonfly/sys/kern/kern_synch.c (revision 851524ec)
1 /*-
2  * Copyright (c) 1982, 1986, 1990, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
39  * $FreeBSD: src/sys/kern/kern_synch.c,v 1.87.2.6 2002/10/13 07:29:53 kbyanc Exp $
40  * $DragonFly: src/sys/kern/kern_synch.c,v 1.47 2005/06/27 18:37:57 dillon Exp $
41  */
42 
43 #include "opt_ktrace.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/proc.h>
48 #include <sys/kernel.h>
49 #include <sys/signalvar.h>
50 #include <sys/resourcevar.h>
51 #include <sys/vmmeter.h>
52 #include <sys/sysctl.h>
53 #include <sys/thread2.h>
54 #ifdef KTRACE
55 #include <sys/uio.h>
56 #include <sys/ktrace.h>
57 #endif
58 #include <sys/xwait.h>
59 
60 #include <machine/cpu.h>
61 #include <machine/ipl.h>
62 #include <machine/smp.h>
63 
64 static void sched_setup (void *dummy);
65 SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL)
66 
67 int	hogticks;
68 int	lbolt;
69 int	sched_quantum;		/* Roundrobin scheduling quantum in ticks. */
70 int	ncpus;
71 int	ncpus2, ncpus2_shift, ncpus2_mask;
72 int	safepri;
73 
74 static struct callout loadav_callout;
75 static struct callout schedcpu_callout;
76 
77 struct loadavg averunnable =
78 	{ {0, 0, 0}, FSCALE };	/* load average, of runnable procs */
79 /*
80  * Constants for averages over 1, 5, and 15 minutes
81  * when sampling at 5 second intervals.
82  */
83 static fixpt_t cexp[3] = {
84 	0.9200444146293232 * FSCALE,	/* exp(-1/12) */
85 	0.9834714538216174 * FSCALE,	/* exp(-1/60) */
86 	0.9944598480048967 * FSCALE,	/* exp(-1/180) */
87 };
88 
89 static void	endtsleep (void *);
90 static void	loadav (void *arg);
91 static void	schedcpu (void *arg);
92 
93 /*
94  * Adjust the scheduler quantum.  The quantum is specified in microseconds.
95  * Note that 'tick' is in microseconds per tick.
96  */
97 static int
98 sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
99 {
100 	int error, new_val;
101 
102 	new_val = sched_quantum * tick;
103 	error = sysctl_handle_int(oidp, &new_val, 0, req);
104         if (error != 0 || req->newptr == NULL)
105 		return (error);
106 	if (new_val < tick)
107 		return (EINVAL);
108 	sched_quantum = new_val / tick;
109 	hogticks = 2 * sched_quantum;
110 	return (0);
111 }
112 
113 SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW,
114 	0, sizeof sched_quantum, sysctl_kern_quantum, "I", "");
115 
116 /*
117  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
118  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
119  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
120  *
121  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
122  *     1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
123  *
124  * If you don't want to bother with the faster/more-accurate formula, you
125  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
126  * (more general) method of calculating the %age of CPU used by a process.
127  *
128  * decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing
129  */
130 #define CCPU_SHIFT	11
131 
132 static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
133 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
134 
135 /*
136  * kernel uses `FSCALE', userland (SHOULD) use kern.fscale
137  */
138 static int     fscale __unused = FSCALE;
139 SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, "");
140 
141 /*
142  * Recompute process priorities, once a second.
143  *
144  * Since the userland schedulers are typically event oriented, if the
145  * estcpu calculation at wakeup() time is not sufficient to make a
146  * process runnable relative to other processes in the system we have
147  * a 1-second recalc to help out.
148  *
149  * This code also allows us to store sysclock_t data in the process structure
150  * without fear of an overrun, since sysclock_t are guarenteed to hold
151  * several seconds worth of count.
152  */
153 /* ARGSUSED */
154 static void
155 schedcpu(void *arg)
156 {
157 	struct proc *p;
158 
159 	FOREACH_PROC_IN_SYSTEM(p) {
160 		/*
161 		 * Increment time in/out of memory and sleep time
162 		 * (if sleeping).  We ignore overflow; with 16-bit int's
163 		 * (remember them?) overflow takes 45 days.
164 		 */
165 		crit_enter();
166 		p->p_swtime++;
167 		if (p->p_stat == SSLEEP || p->p_stat == SSTOP)
168 			p->p_slptime++;
169 
170 		/*
171 		 * Only recalculate processes that are active or have slept
172 		 * less then 2 seconds.  The schedulers understand this.
173 		 */
174 		if (p->p_slptime <= 1) {
175 			p->p_usched->recalculate(p);
176 		} else {
177 			p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
178 		}
179 		crit_exit();
180 	}
181 	wakeup((caddr_t)&lbolt);
182 	callout_reset(&schedcpu_callout, hz, schedcpu, NULL);
183 }
184 
185 /*
186  * This is only used by ps.  Generate a cpu percentage use over
187  * a period of one second.
188  */
189 void
190 updatepcpu(struct proc *p, int cpticks, int ttlticks)
191 {
192 	fixpt_t acc;
193 	int remticks;
194 
195 	acc = (cpticks << FSHIFT) / ttlticks;
196 	if (ttlticks >= ESTCPUFREQ) {
197 		p->p_pctcpu = acc;
198 	} else {
199 		remticks = ESTCPUFREQ - ttlticks;
200 		p->p_pctcpu = (acc * ttlticks + p->p_pctcpu * remticks) /
201 				ESTCPUFREQ;
202 	}
203 }
204 
205 
206 /*
207  * We're only looking at 7 bits of the address; everything is
208  * aligned to 4, lots of things are aligned to greater powers
209  * of 2.  Shift right by 8, i.e. drop the bottom 256 worth.
210  */
211 #define TABLESIZE	128
212 static TAILQ_HEAD(slpquehead, thread) slpque[TABLESIZE];
213 #define LOOKUP(x)	(((intptr_t)(x) >> 8) & (TABLESIZE - 1))
214 
215 /*
216  * General scheduler initialization.  We force a reschedule 25 times
217  * a second by default.
218  */
219 void
220 sleepinit(void)
221 {
222 	int i;
223 
224 	sched_quantum = (hz + 24) / 25;
225 	hogticks = 2 * sched_quantum;
226 	for (i = 0; i < TABLESIZE; i++)
227 		TAILQ_INIT(&slpque[i]);
228 }
229 
230 /*
231  * General sleep call.  Suspends the current process until a wakeup is
232  * performed on the specified identifier.  The process will then be made
233  * runnable with the specified priority.  Sleeps at most timo/hz seconds
234  * (0 means no timeout).  If flags includes PCATCH flag, signals are checked
235  * before and after sleeping, else signals are not checked.  Returns 0 if
236  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
237  * signal needs to be delivered, ERESTART is returned if the current system
238  * call should be restarted if possible, and EINTR is returned if the system
239  * call should be interrupted by the signal (return EINTR).
240  *
241  * Note that if we are a process, we release_curproc() before messing with
242  * the LWKT scheduler.
243  *
244  * During autoconfiguration or after a panic, a sleep will simply
245  * lower the priority briefly to allow interrupts, then return.
246  */
247 int
248 tsleep(void *ident, int flags, const char *wmesg, int timo)
249 {
250 	struct thread *td = curthread;
251 	struct proc *p = td->td_proc;		/* may be NULL */
252 	int sig = 0, catch = flags & PCATCH;
253 	int id = LOOKUP(ident);
254 	int oldpri;
255 	struct callout thandle;
256 
257 	/*
258 	 * NOTE: removed KTRPOINT, it could cause races due to blocking
259 	 * even in stable.  Just scrap it for now.
260 	 */
261 	if (cold || panicstr) {
262 		/*
263 		 * After a panic, or during autoconfiguration,
264 		 * just give interrupts a chance, then just return;
265 		 * don't run any other procs or panic below,
266 		 * in case this is the idle process and already asleep.
267 		 */
268 		splz();
269 		oldpri = td->td_pri & TDPRI_MASK;
270 		lwkt_setpri_self(safepri);
271 		lwkt_switch();
272 		lwkt_setpri_self(oldpri);
273 		return (0);
274 	}
275 	KKASSERT(td != &mycpu->gd_idlethread);	/* you must be kidding! */
276 	crit_enter_quick(td);
277 	KASSERT(ident != NULL, ("tsleep: no ident"));
278 	KASSERT(p == NULL || p->p_stat == SRUN, ("tsleep %p %s %d",
279 		ident, wmesg, p->p_stat));
280 
281 	td->td_wchan = ident;
282 	td->td_wmesg = wmesg;
283 	td->td_wdomain = flags & PDOMAIN_MASK;
284 	if (p) {
285 		if (flags & PNORESCHED)
286 			td->td_flags |= TDF_NORESCHED;
287 		p->p_usched->release_curproc(p);
288 		p->p_slptime = 0;
289 	}
290 	lwkt_deschedule_self(td);
291 	TAILQ_INSERT_TAIL(&slpque[id], td, td_threadq);
292 	if (timo) {
293 		callout_init(&thandle);
294 		callout_reset(&thandle, timo, endtsleep, td);
295 	}
296 	/*
297 	 * We put ourselves on the sleep queue and start our timeout
298 	 * before calling CURSIG, as we could stop there, and a wakeup
299 	 * or a SIGCONT (or both) could occur while we were stopped.
300 	 * A SIGCONT would cause us to be marked as SSLEEP
301 	 * without resuming us, thus we must be ready for sleep
302 	 * when CURSIG is called.  If the wakeup happens while we're
303 	 * stopped, td->td_wchan will be 0 upon return from CURSIG.
304 	 */
305 	if (p) {
306 		if (catch) {
307 			p->p_flag |= P_SINTR;
308 			if ((sig = CURSIG(p))) {
309 				if (td->td_wchan) {
310 					unsleep(td);
311 					lwkt_schedule_self(td);
312 				}
313 				p->p_stat = SRUN;
314 				goto resume;
315 			}
316 			if (td->td_wchan == NULL) {
317 				catch = 0;
318 				goto resume;
319 			}
320 		} else {
321 			sig = 0;
322 		}
323 
324 		/*
325 		 * If we are not the current process we have to remove ourself
326 		 * from the run queue.
327 		 */
328 		KASSERT(p->p_stat == SRUN, ("PSTAT NOT SRUN %d %d", p->p_pid, p->p_stat));
329 		/*
330 		 * If this is the current 'user' process schedule another one.
331 		 */
332 		clrrunnable(p, SSLEEP);
333 		p->p_stats->p_ru.ru_nvcsw++;
334 		mi_switch(p);
335 		KASSERT(p->p_stat == SRUN, ("tsleep: stat not srun"));
336 	} else {
337 		lwkt_switch();
338 	}
339 resume:
340 	if (p)
341 		p->p_flag &= ~P_SINTR;
342 	crit_exit_quick(td);
343 	td->td_flags &= ~TDF_NORESCHED;
344 	if (td->td_flags & TDF_TIMEOUT) {
345 		td->td_flags &= ~TDF_TIMEOUT;
346 		if (sig == 0)
347 			return (EWOULDBLOCK);
348 	} else if (timo) {
349 		callout_stop(&thandle);
350 	} else if (td->td_wmesg) {
351 		/*
352 		 * This can happen if a thread is woken up directly.  Clear
353 		 * wmesg to avoid debugging confusion.
354 		 */
355 		td->td_wmesg = NULL;
356 	}
357 	/* inline of iscaught() */
358 	if (p) {
359 		if (catch && (sig != 0 || (sig = CURSIG(p)))) {
360 			if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
361 				return (EINTR);
362 			return (ERESTART);
363 		}
364 	}
365 	return (0);
366 }
367 
368 /*
369  * Implement the timeout for tsleep.  We interlock against
370  * wchan when setting TDF_TIMEOUT.  For processes we remove
371  * the sleep if the process is stopped rather then sleeping,
372  * so it remains stopped.
373  */
374 static void
375 endtsleep(void *arg)
376 {
377 	thread_t td = arg;
378 	struct proc *p;
379 
380 	crit_enter();
381 	if (td->td_wchan) {
382 		td->td_flags |= TDF_TIMEOUT;
383 		if ((p = td->td_proc) != NULL) {
384 			if (p->p_stat == SSLEEP)
385 				setrunnable(p);
386 			else
387 				unsleep(td);
388 		} else {
389 			unsleep(td);
390 			lwkt_schedule(td);
391 		}
392 	}
393 	crit_exit();
394 }
395 
396 /*
397  * Remove a process from its wait queue
398  */
399 void
400 unsleep(struct thread *td)
401 {
402 	crit_enter();
403 	if (td->td_wchan) {
404 		TAILQ_REMOVE(&slpque[LOOKUP(td->td_wchan)], td, td_threadq);
405 		td->td_wchan = NULL;
406 	}
407 	crit_exit();
408 }
409 
410 /*
411  * Make all processes sleeping on the specified identifier runnable.
412  */
413 static void
414 _wakeup(void *ident, int domain, int count)
415 {
416 	struct slpquehead *qp;
417 	struct thread *td;
418 	struct thread *ntd;
419 	struct proc *p;
420 	int id = LOOKUP(ident);
421 
422 	crit_enter();
423 	qp = &slpque[id];
424 restart:
425 	for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) {
426 		ntd = TAILQ_NEXT(td, td_threadq);
427 		if (td->td_wchan == ident && td->td_wdomain == domain) {
428 			TAILQ_REMOVE(qp, td, td_threadq);
429 			td->td_wchan = NULL;
430 			if ((p = td->td_proc) != NULL && p->p_stat == SSLEEP) {
431 				p->p_stat = SRUN;
432 				if (p->p_flag & P_INMEM) {
433 					/*
434 					 * LWKT scheduled now, there is no
435 					 * userland runq interaction until
436 					 * the thread tries to return to user
437 					 * mode.  We do NOT call setrunqueue().
438 					 */
439 					lwkt_schedule(td);
440 				} else {
441 					p->p_flag |= P_SWAPINREQ;
442 					wakeup((caddr_t)&proc0);
443 				}
444 				/* END INLINE EXPANSION */
445 			} else if (p == NULL) {
446 				lwkt_schedule(td);
447 			}
448 			if (--count == 0)
449 				break;
450 			goto restart;
451 		}
452 	}
453 	crit_exit();
454 }
455 
456 void
457 wakeup(void *ident)
458 {
459     _wakeup(ident, 0, 0);
460 }
461 
462 void
463 wakeup_one(void *ident)
464 {
465     _wakeup(ident, 0, 1);
466 }
467 
468 void
469 wakeup_domain(void *ident, int domain)
470 {
471     _wakeup(ident, domain, 0);
472 }
473 
474 void
475 wakeup_domain_one(void *ident, int domain)
476 {
477     _wakeup(ident, domain, 1);
478 }
479 
480 /*
481  * The machine independent parts of mi_switch().
482  *
483  * 'p' must be the current process.
484  */
485 void
486 mi_switch(struct proc *p)
487 {
488 	thread_t td = p->p_thread;
489 	struct rlimit *rlim;
490 	u_int64_t ttime;
491 
492 	KKASSERT(td == mycpu->gd_curthread);
493 
494 	crit_enter_quick(td);
495 
496 	/*
497 	 * Check if the process exceeds its cpu resource allocation.
498 	 * If over max, kill it.  Time spent in interrupts is not
499 	 * included.  YYY 64 bit match is expensive.  Ick.
500 	 */
501 	ttime = td->td_sticks + td->td_uticks;
502 	if (p->p_stat != SZOMB && p->p_limit->p_cpulimit != RLIM_INFINITY &&
503 	    ttime > p->p_limit->p_cpulimit) {
504 		rlim = &p->p_rlimit[RLIMIT_CPU];
505 		if (ttime / (rlim_t)1000000 >= rlim->rlim_max) {
506 			killproc(p, "exceeded maximum CPU limit");
507 		} else {
508 			psignal(p, SIGXCPU);
509 			if (rlim->rlim_cur < rlim->rlim_max) {
510 				/* XXX: we should make a private copy */
511 				rlim->rlim_cur += 5;
512 			}
513 		}
514 	}
515 
516 	/*
517 	 * If we are in a SSTOPped state we deschedule ourselves.
518 	 * YYY this needs to be cleaned up, remember that LWKTs stay on
519 	 * their run queue which works differently then the user scheduler
520 	 * which removes the process from the runq when it runs it.
521 	 */
522 	mycpu->gd_cnt.v_swtch++;
523 	if (p->p_stat == SSTOP)
524 		lwkt_deschedule_self(td);
525 	lwkt_switch();
526 	crit_exit_quick(td);
527 }
528 
529 /*
530  * Change process state to be runnable,
531  * placing it on the run queue if it is in memory,
532  * and awakening the swapper if it isn't in memory.
533  */
534 void
535 setrunnable(struct proc *p)
536 {
537 	crit_enter();
538 
539 	switch (p->p_stat) {
540 	case 0:
541 	case SRUN:
542 	case SZOMB:
543 	default:
544 		panic("setrunnable");
545 	case SSTOP:
546 	case SSLEEP:
547 		unsleep(p->p_thread);	/* e.g. when sending signals */
548 		break;
549 
550 	case SIDL:
551 		break;
552 	}
553 	p->p_stat = SRUN;
554 
555 	/*
556 	 * The process is controlled by LWKT at this point, we do not mess
557 	 * around with the userland scheduler until the thread tries to
558 	 * return to user mode.  We do not clear p_slptime or call
559 	 * setrunqueue().
560 	 */
561 	if (p->p_flag & P_INMEM) {
562 		lwkt_schedule(p->p_thread);
563 	} else {
564 		p->p_flag |= P_SWAPINREQ;
565 		wakeup((caddr_t)&proc0);
566 	}
567 	crit_exit();
568 }
569 
570 /*
571  * Yield / synchronous reschedule.  This is a bit tricky because the trap
572  * code might have set a lazy release on the switch function.   Setting
573  * P_PASSIVE_ACQ will ensure that the lazy release executes when we call
574  * switch, and that we are given a greater chance of affinity with our
575  * current cpu.
576  *
577  * We call lwkt_setpri_self() to rotate our thread to the end of the lwkt
578  * run queue.  lwkt_switch() will also execute any assigned passive release
579  * (which usually calls release_curproc()), allowing a same/higher priority
580  * process to be designated as the current process.
581  *
582  * While it is possible for a lower priority process to be designated,
583  * it's call to lwkt_maybe_switch() in acquire_curproc() will likely
584  * round-robin back to us and we will be able to re-acquire the current
585  * process designation.
586  */
587 void
588 uio_yield(void)
589 {
590 	struct thread *td = curthread;
591 	struct proc *p = td->td_proc;
592 
593 	lwkt_setpri_self(td->td_pri & TDPRI_MASK);
594 	if (p) {
595 		p->p_flag |= P_PASSIVE_ACQ;
596 		lwkt_switch();
597 		p->p_flag &= ~P_PASSIVE_ACQ;
598 	} else {
599 		lwkt_switch();
600 	}
601 }
602 
603 /*
604  * Change the process state to NOT be runnable, removing it from the run
605  * queue.
606  */
607 void
608 clrrunnable(struct proc *p, int stat)
609 {
610 	crit_enter_quick(p->p_thread);
611 	if (p->p_stat == SRUN && (p->p_flag & P_ONRUNQ))
612 		p->p_usched->remrunqueue(p);
613 	p->p_stat = stat;
614 	crit_exit_quick(p->p_thread);
615 }
616 
617 /*
618  * Compute a tenex style load average of a quantity on
619  * 1, 5 and 15 minute intervals.
620  */
621 static void
622 loadav(void *arg)
623 {
624 	int i, nrun;
625 	struct loadavg *avg;
626 	struct proc *p;
627 	thread_t td;
628 
629 	avg = &averunnable;
630 	nrun = 0;
631 	FOREACH_PROC_IN_SYSTEM(p) {
632 		switch (p->p_stat) {
633 		case SRUN:
634 			if ((td = p->p_thread) == NULL)
635 				break;
636 			if (td->td_flags & TDF_BLOCKED)
637 				break;
638 			/* fall through */
639 		case SIDL:
640 			nrun++;
641 			break;
642 		default:
643 			break;
644 		}
645 	}
646 	for (i = 0; i < 3; i++)
647 		avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
648 		    nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
649 
650 	/*
651 	 * Schedule the next update to occur after 5 seconds, but add a
652 	 * random variation to avoid synchronisation with processes that
653 	 * run at regular intervals.
654 	 */
655 	callout_reset(&loadav_callout, hz * 4 + (int)(random() % (hz * 2 + 1)),
656 	    loadav, NULL);
657 }
658 
659 /* ARGSUSED */
660 static void
661 sched_setup(void *dummy)
662 {
663 	callout_init(&loadav_callout);
664 	callout_init(&schedcpu_callout);
665 
666 	/* Kick off timeout driven events by calling first time. */
667 	schedcpu(NULL);
668 	loadav(NULL);
669 }
670 
671