xref: /dragonfly/sys/kern/kern_synch.c (revision 029a4939)
1 /*-
2  * Copyright (c) 1982, 1986, 1990, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
39  * $FreeBSD: src/sys/kern/kern_synch.c,v 1.87.2.6 2002/10/13 07:29:53 kbyanc Exp $
40  * $DragonFly: src/sys/kern/kern_synch.c,v 1.41 2005/01/14 02:20:22 dillon Exp $
41  */
42 
43 #include "opt_ktrace.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/proc.h>
48 #include <sys/kernel.h>
49 #include <sys/signalvar.h>
50 #include <sys/resourcevar.h>
51 #include <sys/vmmeter.h>
52 #include <sys/sysctl.h>
53 #include <sys/thread2.h>
54 #ifdef KTRACE
55 #include <sys/uio.h>
56 #include <sys/ktrace.h>
57 #endif
58 #include <sys/xwait.h>
59 
60 #include <machine/cpu.h>
61 #include <machine/ipl.h>
62 #include <machine/smp.h>
63 
64 static void sched_setup (void *dummy);
65 SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL)
66 
67 int	hogticks;
68 int	lbolt;
69 int	sched_quantum;		/* Roundrobin scheduling quantum in ticks. */
70 int	ncpus;
71 int	ncpus2, ncpus2_shift, ncpus2_mask;
72 
73 static struct callout loadav_callout;
74 static struct callout roundrobin_callout;
75 static struct callout schedcpu_callout;
76 
77 struct loadavg averunnable =
78 	{ {0, 0, 0}, FSCALE };	/* load average, of runnable procs */
79 /*
80  * Constants for averages over 1, 5, and 15 minutes
81  * when sampling at 5 second intervals.
82  */
83 static fixpt_t cexp[3] = {
84 	0.9200444146293232 * FSCALE,	/* exp(-1/12) */
85 	0.9834714538216174 * FSCALE,	/* exp(-1/60) */
86 	0.9944598480048967 * FSCALE,	/* exp(-1/180) */
87 };
88 
89 static void	endtsleep (void *);
90 static void	loadav (void *arg);
91 static void	roundrobin (void *arg);
92 static void	schedcpu (void *arg);
93 static void	updatepri (struct proc *p);
94 static void	crit_panicints(void);
95 
96 static int
97 sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
98 {
99 	int error, new_val;
100 
101 	new_val = sched_quantum * tick;
102 	error = sysctl_handle_int(oidp, &new_val, 0, req);
103         if (error != 0 || req->newptr == NULL)
104 		return (error);
105 	if (new_val < tick)
106 		return (EINVAL);
107 	sched_quantum = new_val / tick;
108 	hogticks = 2 * sched_quantum;
109 	return (0);
110 }
111 
112 SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW,
113 	0, sizeof sched_quantum, sysctl_kern_quantum, "I", "");
114 
115 int
116 roundrobin_interval(void)
117 {
118 	return (sched_quantum);
119 }
120 
121 /*
122  * Force switch among equal priority processes every 100ms.
123  *
124  * WARNING!  The MP lock is not held on ipi message remotes.
125  */
126 #ifdef SMP
127 
128 static void
129 roundrobin_remote(void *arg)
130 {
131 	struct proc *p = lwkt_preempted_proc();
132  	if (p == NULL || RTP_PRIO_NEED_RR(p->p_rtprio.type))
133 		need_user_resched();
134 }
135 
136 #endif
137 
138 static void
139 roundrobin(void *arg)
140 {
141 	struct proc *p = lwkt_preempted_proc();
142  	if (p == NULL || RTP_PRIO_NEED_RR(p->p_rtprio.type))
143 		need_user_resched();
144 #ifdef SMP
145 	lwkt_send_ipiq_mask(mycpu->gd_other_cpus, roundrobin_remote, NULL);
146 #endif
147  	callout_reset(&roundrobin_callout, sched_quantum, roundrobin, NULL);
148 }
149 
150 #ifdef SMP
151 
152 void
153 resched_cpus(u_int32_t mask)
154 {
155 	lwkt_send_ipiq_mask(mask, roundrobin_remote, NULL);
156 }
157 
158 #endif
159 
160 /*
161  * The load average is scaled by FSCALE (2048 typ).  The estimated cpu is
162  * incremented at a rate of ESTCPUVFREQ per second (40hz typ), but this is
163  * divided up across all cpu bound processes running in the system so an
164  * individual process will get less under load.  ESTCPULIM typicaly caps
165  * out at ESTCPUMAX (around 376, or 11 nice levels).
166  *
167  * Generally speaking the decay equation needs to break-even on growth
168  * at the limit at all load levels >= 1.0, so if the estimated cpu for
169  * a process increases by (ESTVCPUFREQ / load) per second, then the decay
170  * should reach this value when estcpu reaches ESTCPUMAX.  That calculation
171  * is:
172  *
173  *	ESTCPUMAX * decay = ESTCPUVFREQ / load
174  *	decay = ESTCPUVFREQ / (load * ESTCPUMAX)
175  *	decay = estcpu * 0.053 / load
176  *
177  * If the load is less then 1.0 we assume a load of 1.0.
178  */
179 
180 #define cload(loadav)	((loadav) < FSCALE ? FSCALE : (loadav))
181 #define decay_cpu(loadav,estcpu)	\
182     ((estcpu) * (FSCALE * ESTCPUVFREQ / ESTCPUMAX) / cload(loadav))
183 
184 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
185 static fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;	/* exp(-1/20) */
186 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
187 
188 /* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */
189 static int	fscale __unused = FSCALE;
190 SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, "");
191 
192 /*
193  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
194  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
195  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
196  *
197  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
198  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
199  *
200  * If you don't want to bother with the faster/more-accurate formula, you
201  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
202  * (more general) method of calculating the %age of CPU used by a process.
203  */
204 #define	CCPU_SHIFT	11
205 
206 /*
207  * Recompute process priorities, once a second.
208  */
209 /* ARGSUSED */
210 static void
211 schedcpu(void *arg)
212 {
213 	fixpt_t loadfac = averunnable.ldavg[0];
214 	struct proc *p;
215 	int s;
216 	unsigned int ndecay;
217 
218 	FOREACH_PROC_IN_SYSTEM(p) {
219 		/*
220 		 * Increment time in/out of memory and sleep time
221 		 * (if sleeping).  We ignore overflow; with 16-bit int's
222 		 * (remember them?) overflow takes 45 days.
223 		 */
224 		p->p_swtime++;
225 		if (p->p_stat == SSLEEP || p->p_stat == SSTOP)
226 			p->p_slptime++;
227 		p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
228 
229 		/*
230 		 * If the process has slept the entire second,
231 		 * stop recalculating its priority until it wakes up.
232 		 *
233 		 * Note that interactive calculations do not occur for
234 		 * long sleeps (because that isn't necessarily indicative
235 		 * of an interactive process).
236 		 */
237 		if (p->p_slptime > 1)
238 			continue;
239 		/* prevent state changes and protect run queue */
240 		s = splhigh();
241 		/*
242 		 * p_cpticks runs at ESTCPUFREQ but must be divided by the
243 		 * load average for par-100% use.  Higher p_interactive
244 		 * values mean less interactive, lower values mean more
245 		 * interactive.
246 		 */
247 		if ((((fixpt_t)p->p_cpticks * cload(loadfac)) >> FSHIFT)  >
248 		    ESTCPUFREQ / 4) {
249 			if (p->p_interactive < 127)
250 				++p->p_interactive;
251 		} else {
252 			if (p->p_interactive > -127)
253 				--p->p_interactive;
254 		}
255 		/*
256 		 * p_pctcpu is only for ps.
257 		 */
258 #if	(FSHIFT >= CCPU_SHIFT)
259 		p->p_pctcpu += (ESTCPUFREQ == 100)?
260 			((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
261                 	100 * (((fixpt_t) p->p_cpticks)
262 				<< (FSHIFT - CCPU_SHIFT)) / ESTCPUFREQ;
263 #else
264 		p->p_pctcpu += ((FSCALE - ccpu) *
265 			(p->p_cpticks * FSCALE / ESTCPUFREQ)) >> FSHIFT;
266 #endif
267 		p->p_cpticks = 0;
268 		ndecay = decay_cpu(loadfac, p->p_estcpu);
269 		if (p->p_estcpu > ndecay)
270 			p->p_estcpu -= ndecay;
271 		else
272 			p->p_estcpu = 0;
273 		resetpriority(p);
274 		splx(s);
275 	}
276 	wakeup((caddr_t)&lbolt);
277 	callout_reset(&schedcpu_callout, hz, schedcpu, NULL);
278 }
279 
280 /*
281  * Recalculate the priority of a process after it has slept for a while.
282  * For all load averages >= 1 and max p_estcpu of 255, sleeping for at
283  * least six times the loadfactor will decay p_estcpu to zero.
284  */
285 static void
286 updatepri(struct proc *p)
287 {
288 	unsigned int ndecay;
289 
290 	ndecay = decay_cpu(averunnable.ldavg[0], p->p_estcpu) * p->p_slptime;
291 	if (p->p_estcpu > ndecay)
292 		p->p_estcpu -= ndecay;
293 	else
294 		p->p_estcpu = 0;
295 	resetpriority(p);
296 }
297 
298 /*
299  * We're only looking at 7 bits of the address; everything is
300  * aligned to 4, lots of things are aligned to greater powers
301  * of 2.  Shift right by 8, i.e. drop the bottom 256 worth.
302  */
303 #define TABLESIZE	128
304 static TAILQ_HEAD(slpquehead, thread) slpque[TABLESIZE];
305 #define LOOKUP(x)	(((intptr_t)(x) >> 8) & (TABLESIZE - 1))
306 
307 /*
308  * During autoconfiguration or after a panic, a sleep will simply
309  * lower the priority briefly to allow interrupts, then return.
310  * The priority to be used (safepri) is machine-dependent, thus this
311  * value is initialized and maintained in the machine-dependent layers.
312  * This priority will typically be 0, or the lowest priority
313  * that is safe for use on the interrupt stack; it can be made
314  * higher to block network software interrupts after panics.
315  */
316 int safepri;
317 
318 void
319 sleepinit(void)
320 {
321 	int i;
322 
323 	sched_quantum = hz/10;
324 	hogticks = 2 * sched_quantum;
325 	for (i = 0; i < TABLESIZE; i++)
326 		TAILQ_INIT(&slpque[i]);
327 }
328 
329 /*
330  * General sleep call.  Suspends the current process until a wakeup is
331  * performed on the specified identifier.  The process will then be made
332  * runnable with the specified priority.  Sleeps at most timo/hz seconds
333  * (0 means no timeout).  If flags includes PCATCH flag, signals are checked
334  * before and after sleeping, else signals are not checked.  Returns 0 if
335  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
336  * signal needs to be delivered, ERESTART is returned if the current system
337  * call should be restarted if possible, and EINTR is returned if the system
338  * call should be interrupted by the signal (return EINTR).
339  *
340  * Note that if we are a process, we release_curproc() before messing with
341  * the LWKT scheduler.
342  */
343 int
344 tsleep(void *ident, int flags, const char *wmesg, int timo)
345 {
346 	struct thread *td = curthread;
347 	struct proc *p = td->td_proc;		/* may be NULL */
348 	int sig = 0, catch = flags & PCATCH;
349 	int id = LOOKUP(ident);
350 	struct callout thandle;
351 
352 	/*
353 	 * NOTE: removed KTRPOINT, it could cause races due to blocking
354 	 * even in stable.  Just scrap it for now.
355 	 */
356 	if (cold || panicstr) {
357 		/*
358 		 * After a panic, or during autoconfiguration,
359 		 * just give interrupts a chance, then just return;
360 		 * don't run any other procs or panic below,
361 		 * in case this is the idle process and already asleep.
362 		 */
363 		crit_panicints();
364 		return (0);
365 	}
366 	KKASSERT(td != &mycpu->gd_idlethread);	/* you must be kidding! */
367 	crit_enter_quick(td);
368 	KASSERT(ident != NULL, ("tsleep: no ident"));
369 	KASSERT(p == NULL || p->p_stat == SRUN, ("tsleep %p %s %d",
370 		ident, wmesg, p->p_stat));
371 
372 	td->td_wchan = ident;
373 	td->td_wmesg = wmesg;
374 	td->td_wdomain = flags & PDOMAIN_MASK;
375 	if (p) {
376 		if (flags & PNORESCHED)
377 			td->td_flags |= TDF_NORESCHED;
378 		release_curproc(p);
379 		p->p_slptime = 0;
380 	}
381 	lwkt_deschedule_self(td);
382 	TAILQ_INSERT_TAIL(&slpque[id], td, td_threadq);
383 	if (timo) {
384 		callout_init(&thandle);
385 		callout_reset(&thandle, timo, endtsleep, td);
386 	}
387 	/*
388 	 * We put ourselves on the sleep queue and start our timeout
389 	 * before calling CURSIG, as we could stop there, and a wakeup
390 	 * or a SIGCONT (or both) could occur while we were stopped.
391 	 * A SIGCONT would cause us to be marked as SSLEEP
392 	 * without resuming us, thus we must be ready for sleep
393 	 * when CURSIG is called.  If the wakeup happens while we're
394 	 * stopped, td->td_wchan will be 0 upon return from CURSIG.
395 	 */
396 	if (p) {
397 		if (catch) {
398 			p->p_flag |= P_SINTR;
399 			if ((sig = CURSIG(p))) {
400 				if (td->td_wchan) {
401 					unsleep(td);
402 					lwkt_schedule_self(td);
403 				}
404 				p->p_stat = SRUN;
405 				goto resume;
406 			}
407 			if (td->td_wchan == NULL) {
408 				catch = 0;
409 				goto resume;
410 			}
411 		} else {
412 			sig = 0;
413 		}
414 
415 		/*
416 		 * If we are not the current process we have to remove ourself
417 		 * from the run queue.
418 		 */
419 		KASSERT(p->p_stat == SRUN, ("PSTAT NOT SRUN %d %d", p->p_pid, p->p_stat));
420 		/*
421 		 * If this is the current 'user' process schedule another one.
422 		 */
423 		clrrunnable(p, SSLEEP);
424 		p->p_stats->p_ru.ru_nvcsw++;
425 		mi_switch(p);
426 		KASSERT(p->p_stat == SRUN, ("tsleep: stat not srun"));
427 	} else {
428 		lwkt_switch();
429 	}
430 resume:
431 	if (p)
432 		p->p_flag &= ~P_SINTR;
433 	crit_exit_quick(td);
434 	td->td_flags &= ~TDF_NORESCHED;
435 	if (td->td_flags & TDF_TIMEOUT) {
436 		td->td_flags &= ~TDF_TIMEOUT;
437 		if (sig == 0)
438 			return (EWOULDBLOCK);
439 	} else if (timo) {
440 		callout_stop(&thandle);
441 	} else if (td->td_wmesg) {
442 		/*
443 		 * This can happen if a thread is woken up directly.  Clear
444 		 * wmesg to avoid debugging confusion.
445 		 */
446 		td->td_wmesg = NULL;
447 	}
448 	/* inline of iscaught() */
449 	if (p) {
450 		if (catch && (sig != 0 || (sig = CURSIG(p)))) {
451 			if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
452 				return (EINTR);
453 			return (ERESTART);
454 		}
455 	}
456 	return (0);
457 }
458 
459 /*
460  * Implement the timeout for tsleep.  We interlock against
461  * wchan when setting TDF_TIMEOUT.  For processes we remove
462  * the sleep if the process is stopped rather then sleeping,
463  * so it remains stopped.
464  */
465 static void
466 endtsleep(void *arg)
467 {
468 	thread_t td = arg;
469 	struct proc *p;
470 
471 	crit_enter();
472 	if (td->td_wchan) {
473 		td->td_flags |= TDF_TIMEOUT;
474 		if ((p = td->td_proc) != NULL) {
475 			if (p->p_stat == SSLEEP)
476 				setrunnable(p);
477 			else
478 				unsleep(td);
479 		} else {
480 			unsleep(td);
481 			lwkt_schedule(td);
482 		}
483 	}
484 	crit_exit();
485 }
486 
487 /*
488  * Remove a process from its wait queue
489  */
490 void
491 unsleep(struct thread *td)
492 {
493 	crit_enter();
494 	if (td->td_wchan) {
495 #if 0
496 		if (p->p_flag & P_XSLEEP) {
497 			struct xwait *w = p->p_wchan;
498 			TAILQ_REMOVE(&w->waitq, p, p_procq);
499 			p->p_flag &= ~P_XSLEEP;
500 		} else
501 #endif
502 		TAILQ_REMOVE(&slpque[LOOKUP(td->td_wchan)], td, td_threadq);
503 		td->td_wchan = NULL;
504 	}
505 	crit_exit();
506 }
507 
508 #if 0
509 /*
510  * Make all processes sleeping on the explicit lock structure runnable.
511  */
512 void
513 xwakeup(struct xwait *w)
514 {
515 	struct proc *p;
516 
517 	crit_enter();
518 	++w->gen;
519 	while ((p = TAILQ_FIRST(&w->waitq)) != NULL) {
520 		TAILQ_REMOVE(&w->waitq, p, p_procq);
521 		KASSERT(p->p_wchan == w && (p->p_flag & P_XSLEEP),
522 		    ("xwakeup: wchan mismatch for %p (%p/%p) %08x", p, p->p_wchan, w, p->p_flag & P_XSLEEP));
523 		p->p_wchan = NULL;
524 		p->p_flag &= ~P_XSLEEP;
525 		if (p->p_stat == SSLEEP) {
526 			/* OPTIMIZED EXPANSION OF setrunnable(p); */
527 			if (p->p_slptime > 1)
528 				updatepri(p);
529 			p->p_slptime = 0;
530 			p->p_stat = SRUN;
531 			if (p->p_flag & P_INMEM) {
532 				lwkt_schedule(td);
533 			} else {
534 				p->p_flag |= P_SWAPINREQ;
535 				wakeup((caddr_t)&proc0);
536 			}
537 		}
538 	}
539 	crit_exit();
540 }
541 #endif
542 
543 /*
544  * Make all processes sleeping on the specified identifier runnable.
545  */
546 static void
547 _wakeup(void *ident, int domain, int count)
548 {
549 	struct slpquehead *qp;
550 	struct thread *td;
551 	struct thread *ntd;
552 	struct proc *p;
553 	int id = LOOKUP(ident);
554 
555 	crit_enter();
556 	qp = &slpque[id];
557 restart:
558 	for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) {
559 		ntd = TAILQ_NEXT(td, td_threadq);
560 		if (td->td_wchan == ident && td->td_wdomain == domain) {
561 			TAILQ_REMOVE(qp, td, td_threadq);
562 			td->td_wchan = NULL;
563 			if ((p = td->td_proc) != NULL && p->p_stat == SSLEEP) {
564 				/* OPTIMIZED EXPANSION OF setrunnable(p); */
565 				if (p->p_slptime > 1)
566 					updatepri(p);
567 				p->p_slptime = 0;
568 				p->p_stat = SRUN;
569 				if (p->p_flag & P_INMEM) {
570 					/*
571 					 * LWKT scheduled now, there is no
572 					 * userland runq interaction until
573 					 * the thread tries to return to user
574 					 * mode.
575 					 *
576 					 * setrunqueue(p);
577 					 */
578 					lwkt_schedule(td);
579 				} else {
580 					p->p_flag |= P_SWAPINREQ;
581 					wakeup((caddr_t)&proc0);
582 				}
583 				/* END INLINE EXPANSION */
584 			} else if (p == NULL) {
585 				lwkt_schedule(td);
586 			}
587 			if (--count == 0)
588 				break;
589 			goto restart;
590 		}
591 	}
592 	crit_exit();
593 }
594 
595 void
596 wakeup(void *ident)
597 {
598     _wakeup(ident, 0, 0);
599 }
600 
601 void
602 wakeup_one(void *ident)
603 {
604     _wakeup(ident, 0, 1);
605 }
606 
607 void
608 wakeup_domain(void *ident, int domain)
609 {
610     _wakeup(ident, domain, 0);
611 }
612 
613 void
614 wakeup_domain_one(void *ident, int domain)
615 {
616     _wakeup(ident, domain, 1);
617 }
618 
619 /*
620  * The machine independent parts of mi_switch().
621  *
622  * 'p' must be the current process.
623  */
624 void
625 mi_switch(struct proc *p)
626 {
627 	thread_t td = p->p_thread;
628 	struct rlimit *rlim;
629 	u_int64_t ttime;
630 
631 	KKASSERT(td == mycpu->gd_curthread);
632 
633 	crit_enter_quick(td);
634 
635 	/*
636 	 * Check if the process exceeds its cpu resource allocation.
637 	 * If over max, kill it.  Time spent in interrupts is not
638 	 * included.  YYY 64 bit match is expensive.  Ick.
639 	 */
640 	ttime = td->td_sticks + td->td_uticks;
641 	if (p->p_stat != SZOMB && p->p_limit->p_cpulimit != RLIM_INFINITY &&
642 	    ttime > p->p_limit->p_cpulimit) {
643 		rlim = &p->p_rlimit[RLIMIT_CPU];
644 		if (ttime / (rlim_t)1000000 >= rlim->rlim_max) {
645 			killproc(p, "exceeded maximum CPU limit");
646 		} else {
647 			psignal(p, SIGXCPU);
648 			if (rlim->rlim_cur < rlim->rlim_max) {
649 				/* XXX: we should make a private copy */
650 				rlim->rlim_cur += 5;
651 			}
652 		}
653 	}
654 
655 	/*
656 	 * If we are in a SSTOPped state we deschedule ourselves.
657 	 * YYY this needs to be cleaned up, remember that LWKTs stay on
658 	 * their run queue which works differently then the user scheduler
659 	 * which removes the process from the runq when it runs it.
660 	 */
661 	mycpu->gd_cnt.v_swtch++;
662 	if (p->p_stat == SSTOP)
663 		lwkt_deschedule_self(td);
664 	lwkt_switch();
665 	crit_exit_quick(td);
666 }
667 
668 /*
669  * Change process state to be runnable,
670  * placing it on the run queue if it is in memory,
671  * and awakening the swapper if it isn't in memory.
672  */
673 void
674 setrunnable(struct proc *p)
675 {
676 	int s;
677 
678 	s = splhigh();
679 	switch (p->p_stat) {
680 	case 0:
681 	case SRUN:
682 	case SZOMB:
683 	default:
684 		panic("setrunnable");
685 	case SSTOP:
686 	case SSLEEP:
687 		unsleep(p->p_thread);	/* e.g. when sending signals */
688 		break;
689 
690 	case SIDL:
691 		break;
692 	}
693 	p->p_stat = SRUN;
694 
695 	/*
696 	 * The process is controlled by LWKT at this point, we do not mess
697 	 * around with the userland scheduler until the thread tries to
698 	 * return to user mode.
699 	 */
700 #if 0
701 	if (p->p_flag & P_INMEM)
702 		setrunqueue(p);
703 #endif
704 	if (p->p_flag & P_INMEM)
705 		lwkt_schedule(p->p_thread);
706 	splx(s);
707 	if (p->p_slptime > 1)
708 		updatepri(p);
709 	p->p_slptime = 0;
710 	if ((p->p_flag & P_INMEM) == 0) {
711 		p->p_flag |= P_SWAPINREQ;
712 		wakeup((caddr_t)&proc0);
713 	}
714 }
715 
716 /*
717  * Change the process state to NOT be runnable, removing it from the run
718  * queue.
719  */
720 void
721 clrrunnable(struct proc *p, int stat)
722 {
723 	crit_enter_quick(p->p_thread);
724 	if (p->p_stat == SRUN && (p->p_flag & P_ONRUNQ))
725 		remrunqueue(p);
726 	p->p_stat = stat;
727 	crit_exit_quick(p->p_thread);
728 }
729 
730 /*
731  * Compute the priority of a process when running in user mode.
732  * Arrange to reschedule if the resulting priority is better
733  * than that of the current process.
734  */
735 void
736 resetpriority(struct proc *p)
737 {
738 	int newpriority;
739 	int interactive;
740 	int opq;
741 	int npq;
742 
743 	/*
744 	 * Set p_priority for general process comparisons
745 	 */
746 	switch(p->p_rtprio.type) {
747 	case RTP_PRIO_REALTIME:
748 		p->p_priority = PRIBASE_REALTIME + p->p_rtprio.prio;
749 		return;
750 	case RTP_PRIO_NORMAL:
751 		break;
752 	case RTP_PRIO_IDLE:
753 		p->p_priority = PRIBASE_IDLE + p->p_rtprio.prio;
754 		return;
755 	case RTP_PRIO_THREAD:
756 		p->p_priority = PRIBASE_THREAD + p->p_rtprio.prio;
757 		return;
758 	}
759 
760 	/*
761 	 * NORMAL priorities fall through.  These are based on niceness
762 	 * and cpu use.  Lower numbers == higher priorities.
763 	 */
764 	newpriority = (int)(NICE_ADJUST(p->p_nice - PRIO_MIN) +
765 			p->p_estcpu / ESTCPURAMP);
766 
767 	/*
768 	 * p_interactive is -128 to +127 and represents very long term
769 	 * interactivity or batch (whereas estcpu is a much faster variable).
770 	 * Interactivity can modify the priority by up to 8 units either way.
771 	 * (8 units == approximately 4 nice levels).
772 	 */
773 	interactive = p->p_interactive / 10;
774 	newpriority += interactive;
775 
776 	newpriority = MIN(newpriority, MAXPRI);
777 	newpriority = MAX(newpriority, 0);
778 	npq = newpriority / PPQ;
779 	crit_enter();
780 	opq = (p->p_priority & PRIMASK) / PPQ;
781 	if (p->p_stat == SRUN && (p->p_flag & P_ONRUNQ) && opq != npq) {
782 		/*
783 		 * We have to move the process to another queue
784 		 */
785 		remrunqueue(p);
786 		p->p_priority = PRIBASE_NORMAL + newpriority;
787 		setrunqueue(p);
788 	} else {
789 		/*
790 		 * We can just adjust the priority and it will be picked
791 		 * up later.
792 		 */
793 		KKASSERT(opq == npq || (p->p_flag & P_ONRUNQ) == 0);
794 		p->p_priority = PRIBASE_NORMAL + newpriority;
795 	}
796 	crit_exit();
797 }
798 
799 /*
800  * Compute a tenex style load average of a quantity on
801  * 1, 5 and 15 minute intervals.
802  */
803 static void
804 loadav(void *arg)
805 {
806 	int i, nrun;
807 	struct loadavg *avg;
808 	struct proc *p;
809 	thread_t td;
810 
811 	avg = &averunnable;
812 	nrun = 0;
813 	FOREACH_PROC_IN_SYSTEM(p) {
814 		switch (p->p_stat) {
815 		case SRUN:
816 			if ((td = p->p_thread) == NULL)
817 				break;
818 			if (td->td_flags & TDF_BLOCKED)
819 				break;
820 			/* fall through */
821 		case SIDL:
822 			nrun++;
823 			break;
824 		default:
825 			break;
826 		}
827 	}
828 	for (i = 0; i < 3; i++)
829 		avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
830 		    nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
831 
832 	/*
833 	 * Schedule the next update to occur after 5 seconds, but add a
834 	 * random variation to avoid synchronisation with processes that
835 	 * run at regular intervals.
836 	 */
837 	callout_reset(&loadav_callout, hz * 4 + (int)(random() % (hz * 2 + 1)),
838 	    loadav, NULL);
839 }
840 
841 /* ARGSUSED */
842 static void
843 sched_setup(void *dummy)
844 {
845 	callout_init(&loadav_callout);
846 	callout_init(&roundrobin_callout);
847 	callout_init(&schedcpu_callout);
848 
849 	/* Kick off timeout driven events by calling first time. */
850 	roundrobin(NULL);
851 	schedcpu(NULL);
852 	loadav(NULL);
853 }
854 
855 /*
856  * We adjust the priority of the current process.  The priority of
857  * a process gets worse as it accumulates CPU time.  The cpu usage
858  * estimator (p_estcpu) is increased here.  resetpriority() will
859  * compute a different priority each time p_estcpu increases by
860  * INVERSE_ESTCPU_WEIGHT * (until MAXPRI is reached).
861  *
862  * The cpu usage estimator ramps up quite quickly when the process is
863  * running (linearly), and decays away exponentially, at a rate which
864  * is proportionally slower when the system is busy.  The basic principle
865  * is that the system will 90% forget that the process used a lot of CPU
866  * time in 5 * loadav seconds.  This causes the system to favor processes
867  * which haven't run much recently, and to round-robin among other processes.
868  *
869  * The actual schedulerclock interrupt rate is ESTCPUFREQ, but we generally
870  * want to ramp-up at a faster rate, ESTCPUVFREQ, so p_estcpu is scaled
871  * by (ESTCPUVFREQ / ESTCPUFREQ).  You can control the ramp-up/ramp-down
872  * rate by adjusting ESTCPUVFREQ in sys/proc.h in integer multiples
873  * of ESTCPUFREQ.
874  *
875  * WARNING! called from a fast-int or an IPI, the MP lock MIGHT NOT BE HELD
876  * and we cannot block.
877  */
878 void
879 schedulerclock(void *dummy)
880 {
881 	struct thread *td;
882 	struct proc *p;
883 
884 	td = curthread;
885 	if ((p = td->td_proc) != NULL) {
886 		p->p_cpticks++;		/* cpticks runs at ESTCPUFREQ */
887 		p->p_estcpu = ESTCPULIM(p->p_estcpu + ESTCPUVFREQ / ESTCPUFREQ);
888 		if (try_mplock()) {
889 			resetpriority(p);
890 			rel_mplock();
891 		}
892 	}
893 }
894 
895 static
896 void
897 crit_panicints(void)
898 {
899     int s;
900     int cpri;
901 
902     s = splhigh();
903     cpri = crit_panic_save();
904     splx(safepri);
905     crit_panic_restore(cpri);
906     splx(s);
907 }
908 
909