xref: /openbsd/sys/kern/sched_bsd.c (revision 898184e3)
1 /*	$OpenBSD: sched_bsd.c,v 1.30 2012/07/09 17:27:32 haesbaert Exp $	*/
2 /*	$NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $	*/
3 
4 /*-
5  * Copyright (c) 1982, 1986, 1990, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)kern_synch.c	8.6 (Berkeley) 1/21/94
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/proc.h>
43 #include <sys/kernel.h>
44 #include <sys/buf.h>
45 #include <sys/signalvar.h>
46 #include <sys/resourcevar.h>
47 #include <uvm/uvm_extern.h>
48 #include <sys/sched.h>
49 #include <sys/timeout.h>
50 
51 #ifdef KTRACE
52 #include <sys/ktrace.h>
53 #endif
54 
55 #include <machine/cpu.h>
56 
57 int	lbolt;			/* once a second sleep address */
58 int	rrticks_init;		/* # of hardclock ticks per roundrobin() */
59 
60 #ifdef MULTIPROCESSOR
61 struct __mp_lock sched_lock;
62 #endif
63 
64 void schedcpu(void *);
65 
66 void
67 scheduler_start(void)
68 {
69 	static struct timeout schedcpu_to;
70 
71 	/*
72 	 * We avoid polluting the global namespace by keeping the scheduler
73 	 * timeouts static in this function.
74 	 * We setup the timeouts here and kick schedcpu and roundrobin once to
75 	 * make them do their job.
76 	 */
77 
78 	timeout_set(&schedcpu_to, schedcpu, &schedcpu_to);
79 
80 	rrticks_init = hz / 10;
81 	schedcpu(&schedcpu_to);
82 }
83 
84 /*
85  * Force switch among equal priority processes every 100ms.
86  */
87 void
88 roundrobin(struct cpu_info *ci)
89 {
90 	struct schedstate_percpu *spc = &ci->ci_schedstate;
91 
92 	spc->spc_rrticks = rrticks_init;
93 
94 	if (ci->ci_curproc != NULL) {
95 		if (spc->spc_schedflags & SPCF_SEENRR) {
96 			/*
97 			 * The process has already been through a roundrobin
98 			 * without switching and may be hogging the CPU.
99 			 * Indicate that the process should yield.
100 			 */
101 			atomic_setbits_int(&spc->spc_schedflags,
102 			    SPCF_SHOULDYIELD);
103 		} else {
104 			atomic_setbits_int(&spc->spc_schedflags,
105 			    SPCF_SEENRR);
106 		}
107 	}
108 
109 	if (spc->spc_nrun)
110 		need_resched(ci);
111 }
112 
113 /*
114  * Constants for digital decay and forget:
115  *	90% of (p_estcpu) usage in 5 * loadav time
116  *	95% of (p_pctcpu) usage in 60 seconds (load insensitive)
117  *          Note that, as ps(1) mentions, this can let percentages
118  *          total over 100% (I've seen 137.9% for 3 processes).
119  *
120  * Note that hardclock updates p_estcpu and p_cpticks independently.
121  *
122  * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
123  * That is, the system wants to compute a value of decay such
124  * that the following for loop:
125  * 	for (i = 0; i < (5 * loadavg); i++)
126  * 		p_estcpu *= decay;
127  * will compute
128  * 	p_estcpu *= 0.1;
129  * for all values of loadavg:
130  *
131  * Mathematically this loop can be expressed by saying:
132  * 	decay ** (5 * loadavg) ~= .1
133  *
134  * The system computes decay as:
135  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
136  *
137  * We wish to prove that the system's computation of decay
138  * will always fulfill the equation:
139  * 	decay ** (5 * loadavg) ~= .1
140  *
141  * If we compute b as:
142  * 	b = 2 * loadavg
143  * then
144  * 	decay = b / (b + 1)
145  *
146  * We now need to prove two things:
147  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
148  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
149  *
150  * Facts:
151  *         For x close to zero, exp(x) =~ 1 + x, since
152  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
153  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
154  *         For x close to zero, ln(1+x) =~ x, since
155  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
156  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
157  *         ln(.1) =~ -2.30
158  *
159  * Proof of (1):
160  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
161  *	solving for factor,
162  *      ln(factor) =~ (-2.30/5*loadav), or
163  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
164  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
165  *
166  * Proof of (2):
167  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
168  *	solving for power,
169  *      power*ln(b/(b+1)) =~ -2.30, or
170  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
171  *
172  * Actual power values for the implemented algorithm are as follows:
173  *      loadav: 1       2       3       4
174  *      power:  5.68    10.32   14.94   19.55
175  */
176 
177 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
178 #define	loadfactor(loadav)	(2 * (loadav))
179 #define	decay_cpu(loadfac, cpu)	(((loadfac) * (cpu)) / ((loadfac) + FSCALE))
180 
181 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
182 fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;		/* exp(-1/20) */
183 
184 /*
185  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
186  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
187  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
188  *
189  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
190  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
191  *
192  * If you don't want to bother with the faster/more-accurate formula, you
193  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
194  * (more general) method of calculating the %age of CPU used by a process.
195  */
196 #define	CCPU_SHIFT	11
197 
198 /*
199  * Recompute process priorities, every second.
200  */
201 void
202 schedcpu(void *arg)
203 {
204 	struct timeout *to = (struct timeout *)arg;
205 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
206 	struct proc *p;
207 	int s;
208 	unsigned int newcpu;
209 	int phz;
210 
211 	/*
212 	 * If we have a statistics clock, use that to calculate CPU
213 	 * time, otherwise revert to using the profiling clock (which,
214 	 * in turn, defaults to hz if there is no separate profiling
215 	 * clock available)
216 	 */
217 	phz = stathz ? stathz : profhz;
218 	KASSERT(phz);
219 
220 	LIST_FOREACH(p, &allproc, p_list) {
221 		/*
222 		 * Increment time in/out of memory and sleep time
223 		 * (if sleeping).  We ignore overflow; with 16-bit int's
224 		 * (remember them?) overflow takes 45 days.
225 		 */
226 		p->p_swtime++;
227 		if (p->p_stat == SSLEEP || p->p_stat == SSTOP)
228 			p->p_slptime++;
229 		p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
230 		/*
231 		 * If the process has slept the entire second,
232 		 * stop recalculating its priority until it wakes up.
233 		 */
234 		if (p->p_slptime > 1)
235 			continue;
236 		SCHED_LOCK(s);
237 		/*
238 		 * p_pctcpu is only for ps.
239 		 */
240 #if	(FSHIFT >= CCPU_SHIFT)
241 		p->p_pctcpu += (phz == 100)?
242 			((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
243                 	100 * (((fixpt_t) p->p_cpticks)
244 				<< (FSHIFT - CCPU_SHIFT)) / phz;
245 #else
246 		p->p_pctcpu += ((FSCALE - ccpu) *
247 			(p->p_cpticks * FSCALE / phz)) >> FSHIFT;
248 #endif
249 		p->p_cpticks = 0;
250 		newcpu = (u_int) decay_cpu(loadfac, p->p_estcpu);
251 		p->p_estcpu = newcpu;
252 		resetpriority(p);
253 		if (p->p_priority >= PUSER) {
254 			if (p->p_stat == SRUN &&
255 			    (p->p_priority / SCHED_PPQ) !=
256 			    (p->p_usrpri / SCHED_PPQ)) {
257 				remrunqueue(p);
258 				p->p_priority = p->p_usrpri;
259 				setrunqueue(p);
260 			} else
261 				p->p_priority = p->p_usrpri;
262 		}
263 		SCHED_UNLOCK(s);
264 	}
265 	uvm_meter();
266 	wakeup(&lbolt);
267 	timeout_add_sec(to, 1);
268 }
269 
270 /*
271  * Recalculate the priority of a process after it has slept for a while.
272  * For all load averages >= 1 and max p_estcpu of 255, sleeping for at
273  * least six times the loadfactor will decay p_estcpu to zero.
274  */
275 void
276 updatepri(struct proc *p)
277 {
278 	unsigned int newcpu = p->p_estcpu;
279 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
280 
281 	SCHED_ASSERT_LOCKED();
282 
283 	if (p->p_slptime > 5 * loadfac)
284 		p->p_estcpu = 0;
285 	else {
286 		p->p_slptime--;	/* the first time was done in schedcpu */
287 		while (newcpu && --p->p_slptime)
288 			newcpu = (int) decay_cpu(loadfac, newcpu);
289 		p->p_estcpu = newcpu;
290 	}
291 	resetpriority(p);
292 }
293 
294 /*
295  * General yield call.  Puts the current process back on its run queue and
296  * performs a voluntary context switch.
297  */
298 void
299 yield(void)
300 {
301 	struct proc *p = curproc;
302 	int s;
303 
304 	SCHED_LOCK(s);
305 	p->p_priority = p->p_usrpri;
306 	p->p_stat = SRUN;
307 	setrunqueue(p);
308 	p->p_ru.ru_nvcsw++;
309 	mi_switch();
310 	SCHED_UNLOCK(s);
311 }
312 
313 /*
314  * General preemption call.  Puts the current process back on its run queue
315  * and performs an involuntary context switch.  If a process is supplied,
316  * we switch to that process.  Otherwise, we use the normal process selection
317  * criteria.
318  */
319 void
320 preempt(struct proc *newp)
321 {
322 	struct proc *p = curproc;
323 	int s;
324 
325 	/*
326 	 * XXX Switching to a specific process is not supported yet.
327 	 */
328 	if (newp != NULL)
329 		panic("preempt: cpu_preempt not yet implemented");
330 
331 	SCHED_LOCK(s);
332 	p->p_priority = p->p_usrpri;
333 	p->p_stat = SRUN;
334 	p->p_cpu = sched_choosecpu(p);
335 	setrunqueue(p);
336 	p->p_ru.ru_nivcsw++;
337 	mi_switch();
338 	SCHED_UNLOCK(s);
339 }
340 
341 void
342 mi_switch(void)
343 {
344 	struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
345 	struct proc *p = curproc;
346 	struct proc *nextproc;
347 	struct process *pr = p->p_p;
348 	struct rlimit *rlim;
349 	rlim_t secs;
350 	struct timeval tv;
351 #ifdef MULTIPROCESSOR
352 	int hold_count;
353 	int sched_count;
354 #endif
355 
356 	assertwaitok();
357 	KASSERT(p->p_stat != SONPROC);
358 
359 	SCHED_ASSERT_LOCKED();
360 
361 #ifdef MULTIPROCESSOR
362 	/*
363 	 * Release the kernel_lock, as we are about to yield the CPU.
364 	 */
365 	sched_count = __mp_release_all_but_one(&sched_lock);
366 	if (__mp_lock_held(&kernel_lock))
367 		hold_count = __mp_release_all(&kernel_lock);
368 	else
369 		hold_count = 0;
370 #endif
371 
372 	/*
373 	 * Compute the amount of time during which the current
374 	 * process was running, and add that to its total so far.
375 	 */
376 	microuptime(&tv);
377 	if (timercmp(&tv, &spc->spc_runtime, <)) {
378 #if 0
379 		printf("uptime is not monotonic! "
380 		    "tv=%lu.%06lu, runtime=%lu.%06lu\n",
381 		    tv.tv_sec, tv.tv_usec, spc->spc_runtime.tv_sec,
382 		    spc->spc_runtime.tv_usec);
383 #endif
384 	} else {
385 		timersub(&tv, &spc->spc_runtime, &tv);
386 		timeradd(&p->p_rtime, &tv, &p->p_rtime);
387 	}
388 
389 	/* add the time counts for this thread to the process's total */
390 	tuagg_unlocked(pr, p);
391 
392 	/*
393 	 * Check if the process exceeds its cpu resource allocation.
394 	 * If over max, kill it.
395 	 */
396 	rlim = &pr->ps_limit->pl_rlimit[RLIMIT_CPU];
397 	secs = pr->ps_tu.tu_runtime.tv_sec;
398 	if (secs >= rlim->rlim_cur) {
399 		if (secs >= rlim->rlim_max) {
400 			psignal(p, SIGKILL);
401 		} else {
402 			psignal(p, SIGXCPU);
403 			if (rlim->rlim_cur < rlim->rlim_max)
404 				rlim->rlim_cur += 5;
405 		}
406 	}
407 
408 	/*
409 	 * Process is about to yield the CPU; clear the appropriate
410 	 * scheduling flags.
411 	 */
412 	atomic_clearbits_int(&spc->spc_schedflags, SPCF_SWITCHCLEAR);
413 
414 	nextproc = sched_chooseproc();
415 
416 	if (p != nextproc) {
417 		uvmexp.swtch++;
418 		cpu_switchto(p, nextproc);
419 	} else {
420 		p->p_stat = SONPROC;
421 	}
422 
423 	clear_resched(curcpu());
424 
425 	SCHED_ASSERT_LOCKED();
426 
427 	/*
428 	 * To preserve lock ordering, we need to release the sched lock
429 	 * and grab it after we grab the big lock.
430 	 * In the future, when the sched lock isn't recursive, we'll
431 	 * just release it here.
432 	 */
433 #ifdef MULTIPROCESSOR
434 	__mp_unlock(&sched_lock);
435 #endif
436 
437 	SCHED_ASSERT_UNLOCKED();
438 
439 	/*
440 	 * We're running again; record our new start time.  We might
441 	 * be running on a new CPU now, so don't use the cache'd
442 	 * schedstate_percpu pointer.
443 	 */
444 	KASSERT(p->p_cpu == curcpu());
445 
446 	microuptime(&p->p_cpu->ci_schedstate.spc_runtime);
447 
448 #ifdef MULTIPROCESSOR
449 	/*
450 	 * Reacquire the kernel_lock now.  We do this after we've
451 	 * released the scheduler lock to avoid deadlock, and before
452 	 * we reacquire the interlock and the scheduler lock.
453 	 */
454 	if (hold_count)
455 		__mp_acquire_count(&kernel_lock, hold_count);
456 	__mp_acquire_count(&sched_lock, sched_count + 1);
457 #endif
458 }
459 
460 static __inline void
461 resched_proc(struct proc *p, u_char pri)
462 {
463 	struct cpu_info *ci;
464 
465 	/*
466 	 * XXXSMP
467 	 * This does not handle the case where its last
468 	 * CPU is running a higher-priority process, but every
469 	 * other CPU is running a lower-priority process.  There
470 	 * are ways to handle this situation, but they're not
471 	 * currently very pretty, and we also need to weigh the
472 	 * cost of moving a process from one CPU to another.
473 	 *
474 	 * XXXSMP
475 	 * There is also the issue of locking the other CPU's
476 	 * sched state, which we currently do not do.
477 	 */
478 	ci = (p->p_cpu != NULL) ? p->p_cpu : curcpu();
479 	if (pri < ci->ci_schedstate.spc_curpriority)
480 		need_resched(ci);
481 }
482 
483 /*
484  * Change process state to be runnable,
485  * placing it on the run queue if it is in memory,
486  * and awakening the swapper if it isn't in memory.
487  */
488 void
489 setrunnable(struct proc *p)
490 {
491 	SCHED_ASSERT_LOCKED();
492 
493 	switch (p->p_stat) {
494 	case 0:
495 	case SRUN:
496 	case SONPROC:
497 	case SZOMB:
498 	case SDEAD:
499 	case SIDL:
500 	default:
501 		panic("setrunnable");
502 	case SSTOP:
503 		/*
504 		 * If we're being traced (possibly because someone attached us
505 		 * while we were stopped), check for a signal from the debugger.
506 		 */
507 		if ((p->p_p->ps_flags & PS_TRACED) != 0 && p->p_xstat != 0)
508 			atomic_setbits_int(&p->p_siglist, sigmask(p->p_xstat));
509 	case SSLEEP:
510 		unsleep(p);		/* e.g. when sending signals */
511 		break;
512 	}
513 	p->p_stat = SRUN;
514 	p->p_cpu = sched_choosecpu(p);
515 	setrunqueue(p);
516 	if (p->p_slptime > 1)
517 		updatepri(p);
518 	p->p_slptime = 0;
519 	resched_proc(p, p->p_priority);
520 }
521 
522 /*
523  * Compute the priority of a process when running in user mode.
524  * Arrange to reschedule if the resulting priority is better
525  * than that of the current process.
526  */
527 void
528 resetpriority(struct proc *p)
529 {
530 	unsigned int newpriority;
531 
532 	SCHED_ASSERT_LOCKED();
533 
534 	newpriority = PUSER + p->p_estcpu +
535 	    NICE_WEIGHT * (p->p_p->ps_nice - NZERO);
536 	newpriority = min(newpriority, MAXPRI);
537 	p->p_usrpri = newpriority;
538 	resched_proc(p, p->p_usrpri);
539 }
540 
541 /*
542  * We adjust the priority of the current process.  The priority of a process
543  * gets worse as it accumulates CPU time.  The cpu usage estimator (p_estcpu)
544  * is increased here.  The formula for computing priorities (in kern_synch.c)
545  * will compute a different value each time p_estcpu increases. This can
546  * cause a switch, but unless the priority crosses a PPQ boundary the actual
547  * queue will not change.  The cpu usage estimator ramps up quite quickly
548  * when the process is running (linearly), and decays away exponentially, at
549  * a rate which is proportionally slower when the system is busy.  The basic
550  * principle is that the system will 90% forget that the process used a lot
551  * of CPU time in 5 * loadav seconds.  This causes the system to favor
552  * processes which haven't run much recently, and to round-robin among other
553  * processes.
554  */
555 
556 void
557 schedclock(struct proc *p)
558 {
559 	int s;
560 
561 	SCHED_LOCK(s);
562 	p->p_estcpu = ESTCPULIM(p->p_estcpu + 1);
563 	resetpriority(p);
564 	if (p->p_priority >= PUSER)
565 		p->p_priority = p->p_usrpri;
566 	SCHED_UNLOCK(s);
567 }
568