xref: /netbsd/sys/kern/kern_synch.c (revision c4a72b64)
1 /*	$NetBSD: kern_synch.c,v 1.115 2002/11/03 13:59:12 nisimura Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the NetBSD
22  *	Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*-
41  * Copyright (c) 1982, 1986, 1990, 1991, 1993
42  *	The Regents of the University of California.  All rights reserved.
43  * (c) UNIX System Laboratories, Inc.
44  * All or some portions of this file are derived from material licensed
45  * to the University of California by American Telephone and Telegraph
46  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
47  * the permission of UNIX System Laboratories, Inc.
48  *
49  * Redistribution and use in source and binary forms, with or without
50  * modification, are permitted provided that the following conditions
51  * are met:
52  * 1. Redistributions of source code must retain the above copyright
53  *    notice, this list of conditions and the following disclaimer.
54  * 2. Redistributions in binary form must reproduce the above copyright
55  *    notice, this list of conditions and the following disclaimer in the
56  *    documentation and/or other materials provided with the distribution.
57  * 3. All advertising materials mentioning features or use of this software
58  *    must display the following acknowledgement:
59  *	This product includes software developed by the University of
60  *	California, Berkeley and its contributors.
61  * 4. Neither the name of the University nor the names of its contributors
62  *    may be used to endorse or promote products derived from this software
63  *    without specific prior written permission.
64  *
65  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
66  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
67  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
68  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
69  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
70  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
71  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
72  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
73  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
74  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
75  * SUCH DAMAGE.
76  *
77  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
78  */
79 
80 #include <sys/cdefs.h>
81 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.115 2002/11/03 13:59:12 nisimura Exp $");
82 
83 #include "opt_ddb.h"
84 #include "opt_ktrace.h"
85 #include "opt_kstack.h"
86 #include "opt_lockdebug.h"
87 #include "opt_multiprocessor.h"
88 #include "opt_perfctrs.h"
89 
90 #include <sys/param.h>
91 #include <sys/systm.h>
92 #include <sys/callout.h>
93 #include <sys/proc.h>
94 #include <sys/kernel.h>
95 #include <sys/buf.h>
96 #if defined(PERFCTRS)
97 #include <sys/pmc.h>
98 #endif
99 #include <sys/signalvar.h>
100 #include <sys/resourcevar.h>
101 #include <sys/sched.h>
102 
103 #include <uvm/uvm_extern.h>
104 
105 #ifdef KTRACE
106 #include <sys/ktrace.h>
107 #endif
108 
109 #include <machine/cpu.h>
110 
111 int	lbolt;			/* once a second sleep address */
112 int	rrticks;		/* number of hardclock ticks per roundrobin() */
113 
114 /*
115  * The global scheduler state.
116  */
117 struct prochd sched_qs[RUNQUE_NQS];	/* run queues */
118 __volatile u_int32_t sched_whichqs;	/* bitmap of non-empty queues */
119 struct slpque sched_slpque[SLPQUE_TABLESIZE]; /* sleep queues */
120 
121 struct simplelock sched_lock = SIMPLELOCK_INITIALIZER;
122 
123 void schedcpu(void *);
124 void updatepri(struct proc *);
125 void endtsleep(void *);
126 
127 __inline void awaken(struct proc *);
128 
129 struct callout schedcpu_ch = CALLOUT_INITIALIZER;
130 
131 /*
132  * Force switch among equal priority processes every 100ms.
133  * Called from hardclock every hz/10 == rrticks hardclock ticks.
134  */
135 /* ARGSUSED */
136 void
137 roundrobin(struct cpu_info *ci)
138 {
139 	struct schedstate_percpu *spc = &ci->ci_schedstate;
140 
141 	spc->spc_rrticks = rrticks;
142 
143 	if (curproc != NULL) {
144 		if (spc->spc_flags & SPCF_SEENRR) {
145 			/*
146 			 * The process has already been through a roundrobin
147 			 * without switching and may be hogging the CPU.
148 			 * Indicate that the process should yield.
149 			 */
150 			spc->spc_flags |= SPCF_SHOULDYIELD;
151 		} else
152 			spc->spc_flags |= SPCF_SEENRR;
153 	}
154 	need_resched(curcpu());
155 }
156 
157 /*
158  * Constants for digital decay and forget:
159  *	90% of (p_estcpu) usage in 5 * loadav time
160  *	95% of (p_pctcpu) usage in 60 seconds (load insensitive)
161  *          Note that, as ps(1) mentions, this can let percentages
162  *          total over 100% (I've seen 137.9% for 3 processes).
163  *
164  * Note that hardclock updates p_estcpu and p_cpticks independently.
165  *
166  * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
167  * That is, the system wants to compute a value of decay such
168  * that the following for loop:
169  * 	for (i = 0; i < (5 * loadavg); i++)
170  * 		p_estcpu *= decay;
171  * will compute
172  * 	p_estcpu *= 0.1;
173  * for all values of loadavg:
174  *
175  * Mathematically this loop can be expressed by saying:
176  * 	decay ** (5 * loadavg) ~= .1
177  *
178  * The system computes decay as:
179  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
180  *
181  * We wish to prove that the system's computation of decay
182  * will always fulfill the equation:
183  * 	decay ** (5 * loadavg) ~= .1
184  *
185  * If we compute b as:
186  * 	b = 2 * loadavg
187  * then
188  * 	decay = b / (b + 1)
189  *
190  * We now need to prove two things:
191  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
192  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
193  *
194  * Facts:
195  *         For x close to zero, exp(x) =~ 1 + x, since
196  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
197  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
198  *         For x close to zero, ln(1+x) =~ x, since
199  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
200  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
201  *         ln(.1) =~ -2.30
202  *
203  * Proof of (1):
204  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
205  *	solving for factor,
206  *      ln(factor) =~ (-2.30/5*loadav), or
207  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
208  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
209  *
210  * Proof of (2):
211  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
212  *	solving for power,
213  *      power*ln(b/(b+1)) =~ -2.30, or
214  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
215  *
216  * Actual power values for the implemented algorithm are as follows:
217  *      loadav: 1       2       3       4
218  *      power:  5.68    10.32   14.94   19.55
219  */
220 
221 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
222 #define	loadfactor(loadav)	(2 * (loadav))
223 #define	decay_cpu(loadfac, cpu)	(((loadfac) * (cpu)) / ((loadfac) + FSCALE))
224 
225 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
226 fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;		/* exp(-1/20) */
227 
228 /*
229  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
230  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
231  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
232  *
233  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
234  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
235  *
236  * If you dont want to bother with the faster/more-accurate formula, you
237  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
238  * (more general) method of calculating the %age of CPU used by a process.
239  */
240 #define	CCPU_SHIFT	11
241 
242 /*
243  * Recompute process priorities, every hz ticks.
244  */
245 /* ARGSUSED */
246 void
247 schedcpu(void *arg)
248 {
249 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
250 	struct proc *p;
251 	int s, s1;
252 	unsigned int newcpu;
253 	int clkhz;
254 
255 	proclist_lock_read();
256 	LIST_FOREACH(p, &allproc, p_list) {
257 		/*
258 		 * Increment time in/out of memory and sleep time
259 		 * (if sleeping).  We ignore overflow; with 16-bit int's
260 		 * (remember them?) overflow takes 45 days.
261 		 */
262 		p->p_swtime++;
263 		if (p->p_stat == SSLEEP || p->p_stat == SSTOP)
264 			p->p_slptime++;
265 		p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
266 		/*
267 		 * If the process has slept the entire second,
268 		 * stop recalculating its priority until it wakes up.
269 		 */
270 		if (p->p_slptime > 1)
271 			continue;
272 		s = splstatclock();	/* prevent state changes */
273 		/*
274 		 * p_pctcpu is only for ps.
275 		 */
276 		clkhz = stathz != 0 ? stathz : hz;
277 #if	(FSHIFT >= CCPU_SHIFT)
278 		p->p_pctcpu += (clkhz == 100)?
279 			((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
280                 	100 * (((fixpt_t) p->p_cpticks)
281 				<< (FSHIFT - CCPU_SHIFT)) / clkhz;
282 #else
283 		p->p_pctcpu += ((FSCALE - ccpu) *
284 			(p->p_cpticks * FSCALE / clkhz)) >> FSHIFT;
285 #endif
286 		p->p_cpticks = 0;
287 		newcpu = (u_int)decay_cpu(loadfac, p->p_estcpu);
288 		p->p_estcpu = newcpu;
289 		SCHED_LOCK(s1);
290 		resetpriority(p);
291 		if (p->p_priority >= PUSER) {
292 			if (p->p_stat == SRUN &&
293 			    (p->p_flag & P_INMEM) &&
294 			    (p->p_priority / PPQ) != (p->p_usrpri / PPQ)) {
295 				remrunqueue(p);
296 				p->p_priority = p->p_usrpri;
297 				setrunqueue(p);
298 			} else
299 				p->p_priority = p->p_usrpri;
300 		}
301 		SCHED_UNLOCK(s1);
302 		splx(s);
303 	}
304 	proclist_unlock_read();
305 	uvm_meter();
306 	wakeup((caddr_t)&lbolt);
307 	callout_reset(&schedcpu_ch, hz, schedcpu, NULL);
308 }
309 
310 /*
311  * Recalculate the priority of a process after it has slept for a while.
312  * For all load averages >= 1 and max p_estcpu of 255, sleeping for at
313  * least six times the loadfactor will decay p_estcpu to zero.
314  */
315 void
316 updatepri(struct proc *p)
317 {
318 	unsigned int newcpu;
319 	fixpt_t loadfac;
320 
321 	SCHED_ASSERT_LOCKED();
322 
323 	newcpu = p->p_estcpu;
324 	loadfac = loadfactor(averunnable.ldavg[0]);
325 
326 	if (p->p_slptime > 5 * loadfac)
327 		p->p_estcpu = 0;
328 	else {
329 		p->p_slptime--;	/* the first time was done in schedcpu */
330 		while (newcpu && --p->p_slptime)
331 			newcpu = (int) decay_cpu(loadfac, newcpu);
332 		p->p_estcpu = newcpu;
333 	}
334 	resetpriority(p);
335 }
336 
337 /*
338  * During autoconfiguration or after a panic, a sleep will simply
339  * lower the priority briefly to allow interrupts, then return.
340  * The priority to be used (safepri) is machine-dependent, thus this
341  * value is initialized and maintained in the machine-dependent layers.
342  * This priority will typically be 0, or the lowest priority
343  * that is safe for use on the interrupt stack; it can be made
344  * higher to block network software interrupts after panics.
345  */
346 int safepri;
347 
348 /*
349  * General sleep call.  Suspends the current process until a wakeup is
350  * performed on the specified identifier.  The process will then be made
351  * runnable with the specified priority.  Sleeps at most timo/hz seconds
352  * (0 means no timeout).  If pri includes PCATCH flag, signals are checked
353  * before and after sleeping, else signals are not checked.  Returns 0 if
354  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
355  * signal needs to be delivered, ERESTART is returned if the current system
356  * call should be restarted if possible, and EINTR is returned if the system
357  * call should be interrupted by the signal (return EINTR).
358  *
359  * The interlock is held until the scheduler_slock is acquired.  The
360  * interlock will be locked before returning back to the caller
361  * unless the PNORELOCK flag is specified, in which case the
362  * interlock will always be unlocked upon return.
363  */
364 int
365 ltsleep(void *ident, int priority, const char *wmesg, int timo,
366     __volatile struct simplelock *interlock)
367 {
368 	struct proc *p = curproc;
369 	struct slpque *qp;
370 	int sig, s;
371 	int catch = priority & PCATCH;
372 	int relock = (priority & PNORELOCK) == 0;
373 
374 	/*
375 	 * XXXSMP
376 	 * This is probably bogus.  Figure out what the right
377 	 * thing to do here really is.
378 	 * Note that not sleeping if ltsleep is called with curproc == NULL
379 	 * in the shutdown case is disgusting but partly necessary given
380 	 * how shutdown (barely) works.
381 	 */
382 	if (cold || (doing_shutdown && (panicstr || (p == NULL)))) {
383 		/*
384 		 * After a panic, or during autoconfiguration,
385 		 * just give interrupts a chance, then just return;
386 		 * don't run any other procs or panic below,
387 		 * in case this is the idle process and already asleep.
388 		 */
389 		s = splhigh();
390 		splx(safepri);
391 		splx(s);
392 		if (interlock != NULL && relock == 0)
393 			simple_unlock(interlock);
394 		return (0);
395 	}
396 
397 	KASSERT(p != NULL);
398 	LOCK_ASSERT(interlock == NULL || simple_lock_held(interlock));
399 
400 #ifdef KTRACE
401 	if (KTRPOINT(p, KTR_CSW))
402 		ktrcsw(p, 1, 0);
403 #endif
404 
405 	SCHED_LOCK(s);
406 
407 #ifdef DIAGNOSTIC
408 	if (ident == NULL)
409 		panic("ltsleep: ident == NULL");
410 	if (p->p_stat != SONPROC)
411 		panic("ltsleep: p_stat %d != SONPROC", p->p_stat);
412 	if (p->p_back != NULL)
413 		panic("ltsleep: p_back != NULL");
414 #endif
415 
416 	p->p_wchan = ident;
417 	p->p_wmesg = wmesg;
418 	p->p_slptime = 0;
419 	p->p_priority = priority & PRIMASK;
420 
421 	qp = SLPQUE(ident);
422 	if (qp->sq_head == 0)
423 		qp->sq_head = p;
424 	else
425 		*qp->sq_tailp = p;
426 	*(qp->sq_tailp = &p->p_forw) = 0;
427 
428 	if (timo)
429 		callout_reset(&p->p_tsleep_ch, timo, endtsleep, p);
430 
431 	/*
432 	 * We can now release the interlock; the scheduler_slock
433 	 * is held, so a thread can't get in to do wakeup() before
434 	 * we do the switch.
435 	 *
436 	 * XXX We leave the code block here, after inserting ourselves
437 	 * on the sleep queue, because we might want a more clever
438 	 * data structure for the sleep queues at some point.
439 	 */
440 	if (interlock != NULL)
441 		simple_unlock(interlock);
442 
443 	/*
444 	 * We put ourselves on the sleep queue and start our timeout
445 	 * before calling CURSIG, as we could stop there, and a wakeup
446 	 * or a SIGCONT (or both) could occur while we were stopped.
447 	 * A SIGCONT would cause us to be marked as SSLEEP
448 	 * without resuming us, thus we must be ready for sleep
449 	 * when CURSIG is called.  If the wakeup happens while we're
450 	 * stopped, p->p_wchan will be 0 upon return from CURSIG.
451 	 */
452 	if (catch) {
453 		p->p_flag |= P_SINTR;
454 		if ((sig = CURSIG(p)) != 0) {
455 			if (p->p_wchan != NULL)
456 				unsleep(p);
457 			p->p_stat = SONPROC;
458 			SCHED_UNLOCK(s);
459 			goto resume;
460 		}
461 		if (p->p_wchan == NULL) {
462 			catch = 0;
463 			SCHED_UNLOCK(s);
464 			goto resume;
465 		}
466 	} else
467 		sig = 0;
468 	p->p_stat = SSLEEP;
469 	p->p_stats->p_ru.ru_nvcsw++;
470 
471 	SCHED_ASSERT_LOCKED();
472 	mi_switch(p, NULL);
473 
474 #if	defined(DDB) && !defined(GPROF)
475 	/* handy breakpoint location after process "wakes" */
476 	__asm(".globl bpendtsleep ; bpendtsleep:");
477 #endif
478 
479 	SCHED_ASSERT_UNLOCKED();
480 	splx(s);
481 
482  resume:
483 	KDASSERT(p->p_cpu != NULL);
484 	KDASSERT(p->p_cpu == curcpu());
485 	p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri;
486 
487 	p->p_flag &= ~P_SINTR;
488 	if (p->p_flag & P_TIMEOUT) {
489 		p->p_flag &= ~P_TIMEOUT;
490 		if (sig == 0) {
491 #ifdef KTRACE
492 			if (KTRPOINT(p, KTR_CSW))
493 				ktrcsw(p, 0, 0);
494 #endif
495 			if (relock && interlock != NULL)
496 				simple_lock(interlock);
497 			return (EWOULDBLOCK);
498 		}
499 	} else if (timo)
500 		callout_stop(&p->p_tsleep_ch);
501 	if (catch && (sig != 0 || (sig = CURSIG(p)) != 0)) {
502 #ifdef KTRACE
503 		if (KTRPOINT(p, KTR_CSW))
504 			ktrcsw(p, 0, 0);
505 #endif
506 		if (relock && interlock != NULL)
507 			simple_lock(interlock);
508 		if ((SIGACTION(p, sig).sa_flags & SA_RESTART) == 0)
509 			return (EINTR);
510 		return (ERESTART);
511 	}
512 #ifdef KTRACE
513 	if (KTRPOINT(p, KTR_CSW))
514 		ktrcsw(p, 0, 0);
515 #endif
516 	if (relock && interlock != NULL)
517 		simple_lock(interlock);
518 	return (0);
519 }
520 
521 /*
522  * Implement timeout for tsleep.
523  * If process hasn't been awakened (wchan non-zero),
524  * set timeout flag and undo the sleep.  If proc
525  * is stopped, just unsleep so it will remain stopped.
526  */
527 void
528 endtsleep(void *arg)
529 {
530 	struct proc *p;
531 	int s;
532 
533 	p = (struct proc *)arg;
534 
535 	SCHED_LOCK(s);
536 	if (p->p_wchan) {
537 		if (p->p_stat == SSLEEP)
538 			setrunnable(p);
539 		else
540 			unsleep(p);
541 		p->p_flag |= P_TIMEOUT;
542 	}
543 	SCHED_UNLOCK(s);
544 }
545 
546 /*
547  * Remove a process from its wait queue
548  */
549 void
550 unsleep(struct proc *p)
551 {
552 	struct slpque *qp;
553 	struct proc **hp;
554 
555 	SCHED_ASSERT_LOCKED();
556 
557 	if (p->p_wchan) {
558 		hp = &(qp = SLPQUE(p->p_wchan))->sq_head;
559 		while (*hp != p)
560 			hp = &(*hp)->p_forw;
561 		*hp = p->p_forw;
562 		if (qp->sq_tailp == &p->p_forw)
563 			qp->sq_tailp = hp;
564 		p->p_wchan = 0;
565 	}
566 }
567 
568 /*
569  * Optimized-for-wakeup() version of setrunnable().
570  */
571 __inline void
572 awaken(struct proc *p)
573 {
574 
575 	SCHED_ASSERT_LOCKED();
576 
577 	if (p->p_slptime > 1)
578 		updatepri(p);
579 	p->p_slptime = 0;
580 	p->p_stat = SRUN;
581 
582 	/*
583 	 * Since curpriority is a user priority, p->p_priority
584 	 * is always better than curpriority.
585 	 */
586 	if (p->p_flag & P_INMEM) {
587 		setrunqueue(p);
588 		KASSERT(p->p_cpu != NULL);
589 		need_resched(p->p_cpu);
590 	} else
591 		sched_wakeup(&proc0);
592 }
593 
594 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
595 void
596 sched_unlock_idle(void)
597 {
598 
599 	simple_unlock(&sched_lock);
600 }
601 
602 void
603 sched_lock_idle(void)
604 {
605 
606 	simple_lock(&sched_lock);
607 }
608 #endif /* MULTIPROCESSOR || LOCKDEBUG */
609 
610 /*
611  * Make all processes sleeping on the specified identifier runnable.
612  */
613 
614 void
615 wakeup(void *ident)
616 {
617 	int s;
618 
619 	SCHED_ASSERT_UNLOCKED();
620 
621 	SCHED_LOCK(s);
622 	sched_wakeup(ident);
623 	SCHED_UNLOCK(s);
624 }
625 
626 void
627 sched_wakeup(void *ident)
628 {
629 	struct slpque *qp;
630 	struct proc *p, **q;
631 
632 	SCHED_ASSERT_LOCKED();
633 
634 	qp = SLPQUE(ident);
635  restart:
636 	for (q = &qp->sq_head; (p = *q) != NULL; ) {
637 #ifdef DIAGNOSTIC
638 		if (p->p_back || (p->p_stat != SSLEEP && p->p_stat != SSTOP))
639 			panic("wakeup");
640 #endif
641 		if (p->p_wchan == ident) {
642 			p->p_wchan = 0;
643 			*q = p->p_forw;
644 			if (qp->sq_tailp == &p->p_forw)
645 				qp->sq_tailp = q;
646 			if (p->p_stat == SSLEEP) {
647 				awaken(p);
648 				goto restart;
649 			}
650 		} else
651 			q = &p->p_forw;
652 	}
653 }
654 
655 /*
656  * Make the highest priority process first in line on the specified
657  * identifier runnable.
658  */
659 void
660 wakeup_one(void *ident)
661 {
662 	struct slpque *qp;
663 	struct proc *p, **q;
664 	struct proc *best_sleepp, **best_sleepq;
665 	struct proc *best_stopp, **best_stopq;
666 	int s;
667 
668 	best_sleepp = best_stopp = NULL;
669 	best_sleepq = best_stopq = NULL;
670 
671 	SCHED_LOCK(s);
672 
673 	qp = SLPQUE(ident);
674 
675 	for (q = &qp->sq_head; (p = *q) != NULL; q = &p->p_forw) {
676 #ifdef DIAGNOSTIC
677 		if (p->p_back || (p->p_stat != SSLEEP && p->p_stat != SSTOP))
678 			panic("wakeup_one");
679 #endif
680 		if (p->p_wchan == ident) {
681 			if (p->p_stat == SSLEEP) {
682 				if (best_sleepp == NULL ||
683 				    p->p_priority < best_sleepp->p_priority) {
684 					best_sleepp = p;
685 					best_sleepq = q;
686 				}
687 			} else {
688 				if (best_stopp == NULL ||
689 				    p->p_priority < best_stopp->p_priority) {
690 					best_stopp = p;
691 					best_stopq = q;
692 				}
693 			}
694 		}
695 	}
696 
697 	/*
698 	 * Consider any SSLEEP process higher than the highest priority SSTOP
699 	 * process.
700 	 */
701 	if (best_sleepp != NULL) {
702 		p = best_sleepp;
703 		q = best_sleepq;
704 	} else {
705 		p = best_stopp;
706 		q = best_stopq;
707 	}
708 
709 	if (p != NULL) {
710 		p->p_wchan = NULL;
711 		*q = p->p_forw;
712 		if (qp->sq_tailp == &p->p_forw)
713 			qp->sq_tailp = q;
714 		if (p->p_stat == SSLEEP)
715 			awaken(p);
716 	}
717 	SCHED_UNLOCK(s);
718 }
719 
720 /*
721  * General yield call.  Puts the current process back on its run queue and
722  * performs a voluntary context switch.
723  */
724 void
725 yield(void)
726 {
727 	struct proc *p = curproc;
728 	int s;
729 
730 	SCHED_LOCK(s);
731 	p->p_priority = p->p_usrpri;
732 	p->p_stat = SRUN;
733 	setrunqueue(p);
734 	p->p_stats->p_ru.ru_nvcsw++;
735 	mi_switch(p, NULL);
736 	SCHED_ASSERT_UNLOCKED();
737 	splx(s);
738 }
739 
740 /*
741  * General preemption call.  Puts the current process back on its run queue
742  * and performs an involuntary context switch.  If a process is supplied,
743  * we switch to that process.  Otherwise, we use the normal process selection
744  * criteria.
745  */
746 void
747 preempt(struct proc *newp)
748 {
749 	struct proc *p = curproc;
750 	int s;
751 
752 	SCHED_LOCK(s);
753 	p->p_priority = p->p_usrpri;
754 	p->p_stat = SRUN;
755 	setrunqueue(p);
756 	p->p_stats->p_ru.ru_nivcsw++;
757 	mi_switch(p, newp);
758 	SCHED_ASSERT_UNLOCKED();
759 	splx(s);
760 }
761 
762 /*
763  * The machine independent parts of context switch.
764  * Must be called at splsched() (no higher!) and with
765  * the sched_lock held.
766  */
767 void
768 mi_switch(struct proc *p, struct proc *newp)
769 {
770 	struct schedstate_percpu *spc;
771 	struct rlimit *rlim;
772 	long s, u;
773 	struct timeval tv;
774 #if defined(MULTIPROCESSOR)
775 	int hold_count;
776 #endif
777 
778 	SCHED_ASSERT_LOCKED();
779 
780 #if defined(MULTIPROCESSOR)
781 	/*
782 	 * Release the kernel_lock, as we are about to yield the CPU.
783 	 * The scheduler lock is still held until cpu_switch()
784 	 * selects a new process and removes it from the run queue.
785 	 */
786 	if (p->p_flag & P_BIGLOCK)
787 		hold_count = spinlock_release_all(&kernel_lock);
788 #endif
789 
790 	KDASSERT(p->p_cpu != NULL);
791 	KDASSERT(p->p_cpu == curcpu());
792 	KDASSERT(newp == NULL);
793 
794 	spc = &p->p_cpu->ci_schedstate;
795 
796 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC)
797 	spinlock_switchcheck();
798 #endif
799 #ifdef LOCKDEBUG
800 	simple_lock_switchcheck();
801 #endif
802 
803 	/*
804 	 * Compute the amount of time during which the current
805 	 * process was running.
806 	 */
807 	microtime(&tv);
808 	u = p->p_rtime.tv_usec + (tv.tv_usec - spc->spc_runtime.tv_usec);
809 	s = p->p_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
810 	if (u < 0) {
811 		u += 1000000;
812 		s--;
813 	} else if (u >= 1000000) {
814 		u -= 1000000;
815 		s++;
816 	}
817 	p->p_rtime.tv_usec = u;
818 	p->p_rtime.tv_sec = s;
819 
820 	/*
821 	 * Check if the process exceeds its cpu resource allocation.
822 	 * If over max, kill it.  In any case, if it has run for more
823 	 * than 10 minutes, reduce priority to give others a chance.
824 	 */
825 	rlim = &p->p_rlimit[RLIMIT_CPU];
826 	if (s >= rlim->rlim_cur) {
827 		/*
828 		 * XXXSMP: we're inside the scheduler lock perimeter;
829 		 * use sched_psignal.
830 		 */
831 		if (s >= rlim->rlim_max)
832 			sched_psignal(p, SIGKILL);
833 		else {
834 			sched_psignal(p, SIGXCPU);
835 			if (rlim->rlim_cur < rlim->rlim_max)
836 				rlim->rlim_cur += 5;
837 		}
838 	}
839 	if (autonicetime && s > autonicetime && p->p_ucred->cr_uid &&
840 	    p->p_nice == NZERO) {
841 		p->p_nice = autoniceval + NZERO;
842 		resetpriority(p);
843 	}
844 
845 	/*
846 	 * Process is about to yield the CPU; clear the appropriate
847 	 * scheduling flags.
848 	 */
849 	spc->spc_flags &= ~SPCF_SWITCHCLEAR;
850 
851 #ifdef KSTACK_CHECK_MAGIC
852 	kstack_check_magic(p);
853 #endif
854 
855 	/*
856 	 * If we are using h/w performance counters, save context.
857 	 */
858 #if PERFCTRS
859 	if (PMC_ENABLED(p))
860 		pmc_save_context(p);
861 #endif
862 
863 	/*
864 	 * Switch to the new current process.  When we
865 	 * run again, we'll return back here.
866 	 */
867 	uvmexp.swtch++;
868 	cpu_switch(p, NULL);
869 
870 	/*
871 	 * If we are using h/w performance counters, restore context.
872 	 */
873 #if PERFCTRS
874 	if (PMC_ENABLED(p))
875 		pmc_restore_context(p);
876 #endif
877 
878 	/*
879 	 * Make sure that MD code released the scheduler lock before
880 	 * resuming us.
881 	 */
882 	SCHED_ASSERT_UNLOCKED();
883 
884 	/*
885 	 * We're running again; record our new start time.  We might
886 	 * be running on a new CPU now, so don't use the cache'd
887 	 * schedstate_percpu pointer.
888 	 */
889 	KDASSERT(p->p_cpu != NULL);
890 	KDASSERT(p->p_cpu == curcpu());
891 	microtime(&p->p_cpu->ci_schedstate.spc_runtime);
892 
893 #if defined(MULTIPROCESSOR)
894 	/*
895 	 * Reacquire the kernel_lock now.  We do this after we've
896 	 * released the scheduler lock to avoid deadlock, and before
897 	 * we reacquire the interlock.
898 	 */
899 	if (p->p_flag & P_BIGLOCK)
900 		spinlock_acquire_count(&kernel_lock, hold_count);
901 #endif
902 }
903 
904 /*
905  * Initialize the (doubly-linked) run queues
906  * to be empty.
907  */
908 void
909 rqinit()
910 {
911 	int i;
912 
913 	for (i = 0; i < RUNQUE_NQS; i++)
914 		sched_qs[i].ph_link = sched_qs[i].ph_rlink =
915 		    (struct proc *)&sched_qs[i];
916 }
917 
918 /*
919  * Change process state to be runnable,
920  * placing it on the run queue if it is in memory,
921  * and awakening the swapper if it isn't in memory.
922  */
923 void
924 setrunnable(struct proc *p)
925 {
926 
927 	SCHED_ASSERT_LOCKED();
928 
929 	switch (p->p_stat) {
930 	case 0:
931 	case SRUN:
932 	case SONPROC:
933 	case SZOMB:
934 	case SDEAD:
935 	default:
936 		panic("setrunnable");
937 	case SSTOP:
938 		/*
939 		 * If we're being traced (possibly because someone attached us
940 		 * while we were stopped), check for a signal from the debugger.
941 		 */
942 		if ((p->p_flag & P_TRACED) != 0 && p->p_xstat != 0) {
943 			sigaddset(&p->p_sigctx.ps_siglist, p->p_xstat);
944 			CHECKSIGS(p);
945 		}
946 	case SSLEEP:
947 		unsleep(p);		/* e.g. when sending signals */
948 		break;
949 
950 	case SIDL:
951 		break;
952 	}
953 	p->p_stat = SRUN;
954 	if (p->p_flag & P_INMEM)
955 		setrunqueue(p);
956 
957 	if (p->p_slptime > 1)
958 		updatepri(p);
959 	p->p_slptime = 0;
960 	if ((p->p_flag & P_INMEM) == 0)
961 		sched_wakeup((caddr_t)&proc0);
962 	else if (p->p_priority < curcpu()->ci_schedstate.spc_curpriority) {
963 		/*
964 		 * XXXSMP
965 		 * This is not exactly right.  Since p->p_cpu persists
966 		 * across a context switch, this gives us some sort
967 		 * of processor affinity.  But we need to figure out
968 		 * at what point it's better to reschedule on a different
969 		 * CPU than the last one.
970 		 */
971 		need_resched((p->p_cpu != NULL) ? p->p_cpu : curcpu());
972 	}
973 }
974 
975 /*
976  * Compute the priority of a process when running in user mode.
977  * Arrange to reschedule if the resulting priority is better
978  * than that of the current process.
979  */
980 void
981 resetpriority(struct proc *p)
982 {
983 	unsigned int newpriority;
984 
985 	SCHED_ASSERT_LOCKED();
986 
987 	newpriority = PUSER + p->p_estcpu + NICE_WEIGHT * (p->p_nice - NZERO);
988 	newpriority = min(newpriority, MAXPRI);
989 	p->p_usrpri = newpriority;
990 	if (newpriority < curcpu()->ci_schedstate.spc_curpriority) {
991 		/*
992 		 * XXXSMP
993 		 * Same applies as in setrunnable() above.
994 		 */
995 		need_resched((p->p_cpu != NULL) ? p->p_cpu : curcpu());
996 	}
997 }
998 
999 /*
1000  * We adjust the priority of the current process.  The priority of a process
1001  * gets worse as it accumulates CPU time.  The cpu usage estimator (p_estcpu)
1002  * is increased here.  The formula for computing priorities (in kern_synch.c)
1003  * will compute a different value each time p_estcpu increases. This can
1004  * cause a switch, but unless the priority crosses a PPQ boundary the actual
1005  * queue will not change.  The cpu usage estimator ramps up quite quickly
1006  * when the process is running (linearly), and decays away exponentially, at
1007  * a rate which is proportionally slower when the system is busy.  The basic
1008  * principle is that the system will 90% forget that the process used a lot
1009  * of CPU time in 5 * loadav seconds.  This causes the system to favor
1010  * processes which haven't run much recently, and to round-robin among other
1011  * processes.
1012  */
1013 
1014 void
1015 schedclock(struct proc *p)
1016 {
1017 	int s;
1018 
1019 	p->p_estcpu = ESTCPULIM(p->p_estcpu + 1);
1020 
1021 	SCHED_LOCK(s);
1022 	resetpriority(p);
1023 	SCHED_UNLOCK(s);
1024 
1025 	if (p->p_priority >= PUSER)
1026 		p->p_priority = p->p_usrpri;
1027 }
1028 
1029 void
1030 suspendsched()
1031 {
1032 	struct proc *p;
1033 	int s;
1034 
1035 	/*
1036 	 * Convert all non-P_SYSTEM SSLEEP or SRUN processes to SSTOP.
1037 	 */
1038 	proclist_lock_read();
1039 	SCHED_LOCK(s);
1040 	LIST_FOREACH(p, &allproc, p_list) {
1041 		if ((p->p_flag & P_SYSTEM) != 0)
1042 			continue;
1043 		switch (p->p_stat) {
1044 		case SRUN:
1045 			if ((p->p_flag & P_INMEM) != 0)
1046 				remrunqueue(p);
1047 			/* FALLTHROUGH */
1048 		case SSLEEP:
1049 			p->p_stat = SSTOP;
1050 			break;
1051 		case SONPROC:
1052 			/*
1053 			 * XXX SMP: we need to deal with processes on
1054 			 * others CPU !
1055 			 */
1056 			break;
1057 		default:
1058 			break;
1059 		}
1060 	}
1061 	SCHED_UNLOCK(s);
1062 	proclist_unlock_read();
1063 }
1064 
1065 /*
1066  * Low-level routines to access the run queue.  Optimised assembler
1067  * routines can override these.
1068  */
1069 
1070 #ifndef __HAVE_MD_RUNQUEUE
1071 
1072 /*
1073  * The primitives that manipulate the run queues.  whichqs tells which
1074  * of the 32 queues qs have processes in them.  Setrunqueue puts processes
1075  * into queues, remrunqueue removes them from queues.  The running process is
1076  * on no queue, other processes are on a queue related to p->p_priority,
1077  * divided by 4 actually to shrink the 0-127 range of priorities into the 32
1078  * available queues.
1079  */
1080 
1081 void
1082 setrunqueue(struct proc *p)
1083 {
1084 	struct prochd *rq;
1085 	struct proc *prev;
1086 	int whichq;
1087 
1088 #ifdef DIAGNOSTIC
1089 	if (p->p_back != NULL || p->p_wchan != NULL || p->p_stat != SRUN)
1090 		panic("setrunqueue");
1091 #endif
1092 	whichq = p->p_priority / 4;
1093 	sched_whichqs |= (1<<whichq);
1094 	rq = &sched_qs[whichq];
1095 	prev = rq->ph_rlink;
1096 	p->p_forw = (struct proc *)rq;
1097 	rq->ph_rlink = p;
1098 	prev->p_forw = p;
1099 	p->p_back = prev;
1100 }
1101 
1102 void
1103 remrunqueue(struct proc *p)
1104 {
1105 	struct proc *prev, *next;
1106 	int whichq;
1107 
1108 	whichq = p->p_priority / 4;
1109 #ifdef DIAGNOSTIC
1110 	if (((sched_whichqs & (1<<whichq)) == 0))
1111 		panic("remrunqueue");
1112 #endif
1113 	prev = p->p_back;
1114 	p->p_back = NULL;
1115 	next = p->p_forw;
1116 	prev->p_forw = next;
1117 	next->p_back = prev;
1118 	if (prev == next)
1119 		sched_whichqs &= ~(1<<whichq);
1120 }
1121 
1122 #endif
1123