xref: /openbsd/sys/kern/kern_clock.c (revision 898184e3)
1 /*	$OpenBSD: kern_clock.c,v 1.79 2013/03/12 09:37:16 mpi Exp $	*/
2 /*	$NetBSD: kern_clock.c,v 1.34 1996/06/09 04:51:03 briggs Exp $	*/
3 
4 /*-
5  * Copyright (c) 1982, 1986, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)kern_clock.c	8.5 (Berkeley) 1/21/94
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/dkstat.h>
43 #include <sys/timeout.h>
44 #include <sys/kernel.h>
45 #include <sys/limits.h>
46 #include <sys/proc.h>
47 #include <sys/user.h>
48 #include <sys/resourcevar.h>
49 #include <sys/signalvar.h>
50 #include <uvm/uvm_extern.h>
51 #include <sys/sysctl.h>
52 #include <sys/sched.h>
53 #include <sys/timetc.h>
54 
55 #include <machine/cpu.h>
56 
57 #ifdef GPROF
58 #include <sys/gmon.h>
59 #endif
60 
61 /*
62  * Clock handling routines.
63  *
64  * This code is written to operate with two timers that run independently of
65  * each other.  The main clock, running hz times per second, is used to keep
66  * track of real time.  The second timer handles kernel and user profiling,
67  * and does resource use estimation.  If the second timer is programmable,
68  * it is randomized to avoid aliasing between the two clocks.  For example,
69  * the randomization prevents an adversary from always giving up the cpu
70  * just before its quantum expires.  Otherwise, it would never accumulate
71  * cpu ticks.  The mean frequency of the second timer is stathz.
72  *
73  * If no second timer exists, stathz will be zero; in this case we drive
74  * profiling and statistics off the main clock.  This WILL NOT be accurate;
75  * do not do it unless absolutely necessary.
76  *
77  * The statistics clock may (or may not) be run at a higher rate while
78  * profiling.  This profile clock runs at profhz.  We require that profhz
79  * be an integral multiple of stathz.
80  *
81  * If the statistics clock is running fast, it must be divided by the ratio
82  * profhz/stathz for statistics.  (For profiling, every tick counts.)
83  */
84 
85 /*
86  * Bump a timeval by a small number of usec's.
87  */
88 #define BUMPTIME(t, usec) { \
89 	volatile struct timeval *tp = (t); \
90 	long us; \
91  \
92 	tp->tv_usec = us = tp->tv_usec + (usec); \
93 	if (us >= 1000000) { \
94 		tp->tv_usec = us - 1000000; \
95 		tp->tv_sec++; \
96 	} \
97 }
98 
99 int	stathz;
100 int	schedhz;
101 int	profhz;
102 int	profprocs;
103 int	ticks;
104 static int psdiv, pscnt;		/* prof => stat divider */
105 int	psratio;			/* ratio: prof / stat */
106 
107 long cp_time[CPUSTATES];
108 
109 void	*softclock_si;
110 
111 /*
112  * Initialize clock frequencies and start both clocks running.
113  */
114 void
115 initclocks(void)
116 {
117 	int i;
118 
119 	softclock_si = softintr_establish(IPL_SOFTCLOCK, softclock, NULL);
120 	if (softclock_si == NULL)
121 		panic("initclocks: unable to register softclock intr");
122 
123 	/*
124 	 * Set divisors to 1 (normal case) and let the machine-specific
125 	 * code do its bit.
126 	 */
127 	psdiv = pscnt = 1;
128 	cpu_initclocks();
129 
130 	/*
131 	 * Compute profhz/stathz, and fix profhz if needed.
132 	 */
133 	i = stathz ? stathz : hz;
134 	if (profhz == 0)
135 		profhz = i;
136 	psratio = profhz / i;
137 
138 	/* For very large HZ, ensure that division by 0 does not occur later */
139 	if (tickadj == 0)
140 		tickadj = 1;
141 
142 	inittimecounter();
143 }
144 
145 /*
146  * hardclock does the accounting needed for ITIMER_PROF and ITIMER_VIRTUAL.
147  * We don't want to send signals with psignal from hardclock because it makes
148  * MULTIPROCESSOR locking very complicated. Instead we use a small trick
149  * to send the signals safely and without blocking too many interrupts
150  * while doing that (signal handling can be heavy).
151  *
152  * hardclock detects that the itimer has expired, and schedules a timeout
153  * to deliver the signal. This works because of the following reasons:
154  *  - The timeout can be scheduled with a 1 tick time because we're
155  *    doing it before the timeout processing in hardclock. So it will
156  *    be scheduled to run as soon as possible.
157  *  - The timeout will be run in softclock which will run before we
158  *    return to userland and process pending signals.
159  *  - If the system is so busy that several VIRTUAL/PROF ticks are
160  *    sent before softclock processing, we'll send only one signal.
161  *    But if we'd send the signal from hardclock only one signal would
162  *    be delivered to the user process. So userland will only see one
163  *    signal anyway.
164  */
165 
166 void
167 virttimer_trampoline(void *v)
168 {
169 	struct process *pr = v;
170 
171 	psignal(pr->ps_mainproc, SIGVTALRM);
172 }
173 
174 void
175 proftimer_trampoline(void *v)
176 {
177 	struct process *pr = v;
178 
179 	psignal(pr->ps_mainproc, SIGPROF);
180 }
181 
182 /*
183  * The real-time timer, interrupting hz times per second.
184  */
185 void
186 hardclock(struct clockframe *frame)
187 {
188 	struct proc *p;
189 	struct cpu_info *ci = curcpu();
190 
191 	p = curproc;
192 	if (p && ((p->p_flag & (P_SYSTEM | P_WEXIT)) == 0)) {
193 		struct process *pr = p->p_p;
194 
195 		/*
196 		 * Run current process's virtual and profile time, as needed.
197 		 */
198 		if (CLKF_USERMODE(frame) &&
199 		    timerisset(&pr->ps_timer[ITIMER_VIRTUAL].it_value) &&
200 		    itimerdecr(&pr->ps_timer[ITIMER_VIRTUAL], tick) == 0)
201 			timeout_add(&pr->ps_virt_to, 1);
202 		if (timerisset(&pr->ps_timer[ITIMER_PROF].it_value) &&
203 		    itimerdecr(&pr->ps_timer[ITIMER_PROF], tick) == 0)
204 			timeout_add(&pr->ps_prof_to, 1);
205 	}
206 
207 	/*
208 	 * If no separate statistics clock is available, run it from here.
209 	 */
210 	if (stathz == 0)
211 		statclock(frame);
212 
213 	if (--ci->ci_schedstate.spc_rrticks <= 0)
214 		roundrobin(ci);
215 
216 	/*
217 	 * If we are not the primary CPU, we're not allowed to do
218 	 * any more work.
219 	 */
220 	if (CPU_IS_PRIMARY(ci) == 0)
221 		return;
222 
223 	tc_ticktock();
224 
225 	/*
226 	 * Update real-time timeout queue.
227 	 * Process callouts at a very low cpu priority, so we don't keep the
228 	 * relatively high clock interrupt priority any longer than necessary.
229 	 */
230 	if (timeout_hardclock_update())
231 		softintr_schedule(softclock_si);
232 }
233 
234 /*
235  * Compute number of hz until specified time.  Used to
236  * compute the second argument to timeout_add() from an absolute time.
237  */
238 int
239 hzto(const struct timeval *tv)
240 {
241 	struct timeval now;
242 	unsigned long ticks;
243 	long sec, usec;
244 
245 	/*
246 	 * If the number of usecs in the whole seconds part of the time
247 	 * difference fits in a long, then the total number of usecs will
248 	 * fit in an unsigned long.  Compute the total and convert it to
249 	 * ticks, rounding up and adding 1 to allow for the current tick
250 	 * to expire.  Rounding also depends on unsigned long arithmetic
251 	 * to avoid overflow.
252 	 *
253 	 * Otherwise, if the number of ticks in the whole seconds part of
254 	 * the time difference fits in a long, then convert the parts to
255 	 * ticks separately and add, using similar rounding methods and
256 	 * overflow avoidance.  This method would work in the previous
257 	 * case but it is slightly slower and assumes that hz is integral.
258 	 *
259 	 * Otherwise, round the time difference down to the maximum
260 	 * representable value.
261 	 *
262 	 * If ints have 32 bits, then the maximum value for any timeout in
263 	 * 10ms ticks is 248 days.
264 	 */
265 	getmicrotime(&now);
266 	sec = tv->tv_sec - now.tv_sec;
267 	usec = tv->tv_usec - now.tv_usec;
268 	if (usec < 0) {
269 		sec--;
270 		usec += 1000000;
271 	}
272 	if (sec < 0 || (sec == 0 && usec <= 0)) {
273 		ticks = 0;
274 	} else if (sec <= LONG_MAX / 1000000)
275 		ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1))
276 		    / tick + 1;
277 	else if (sec <= LONG_MAX / hz)
278 		ticks = sec * hz
279 		    + ((unsigned long)usec + (tick - 1)) / tick + 1;
280 	else
281 		ticks = LONG_MAX;
282 	if (ticks > INT_MAX)
283 		ticks = INT_MAX;
284 	return ((int)ticks);
285 }
286 
287 /*
288  * Compute number of hz in the specified amount of time.
289  */
290 int
291 tvtohz(const struct timeval *tv)
292 {
293 	unsigned long ticks;
294 	long sec, usec;
295 
296 	/*
297 	 * If the number of usecs in the whole seconds part of the time
298 	 * fits in a long, then the total number of usecs will
299 	 * fit in an unsigned long.  Compute the total and convert it to
300 	 * ticks, rounding up and adding 1 to allow for the current tick
301 	 * to expire.  Rounding also depends on unsigned long arithmetic
302 	 * to avoid overflow.
303 	 *
304 	 * Otherwise, if the number of ticks in the whole seconds part of
305 	 * the time fits in a long, then convert the parts to
306 	 * ticks separately and add, using similar rounding methods and
307 	 * overflow avoidance.  This method would work in the previous
308 	 * case but it is slightly slower and assumes that hz is integral.
309 	 *
310 	 * Otherwise, round the time down to the maximum
311 	 * representable value.
312 	 *
313 	 * If ints have 32 bits, then the maximum value for any timeout in
314 	 * 10ms ticks is 248 days.
315 	 */
316 	sec = tv->tv_sec;
317 	usec = tv->tv_usec;
318 	if (sec < 0 || (sec == 0 && usec <= 0))
319 		ticks = 0;
320 	else if (sec <= LONG_MAX / 1000000)
321 		ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1))
322 		    / tick + 1;
323 	else if (sec <= LONG_MAX / hz)
324 		ticks = sec * hz
325 		    + ((unsigned long)usec + (tick - 1)) / tick + 1;
326 	else
327 		ticks = LONG_MAX;
328 	if (ticks > INT_MAX)
329 		ticks = INT_MAX;
330 	return ((int)ticks);
331 }
332 
333 /*
334  * Start profiling on a process.
335  *
336  * Kernel profiling passes proc0 which never exits and hence
337  * keeps the profile clock running constantly.
338  */
339 void
340 startprofclock(struct process *pr)
341 {
342 	int s;
343 
344 	if ((pr->ps_flags & PS_PROFIL) == 0) {
345 		atomic_setbits_int(&pr->ps_flags, PS_PROFIL);
346 		if (++profprocs == 1 && stathz != 0) {
347 			s = splstatclock();
348 			psdiv = pscnt = psratio;
349 			setstatclockrate(profhz);
350 			splx(s);
351 		}
352 	}
353 }
354 
355 /*
356  * Stop profiling on a process.
357  */
358 void
359 stopprofclock(struct process *pr)
360 {
361 	int s;
362 
363 	if (pr->ps_flags & PS_PROFIL) {
364 		atomic_clearbits_int(&pr->ps_flags, PS_PROFIL);
365 		if (--profprocs == 0 && stathz != 0) {
366 			s = splstatclock();
367 			psdiv = pscnt = 1;
368 			setstatclockrate(stathz);
369 			splx(s);
370 		}
371 	}
372 }
373 
374 /*
375  * Statistics clock.  Grab profile sample, and if divider reaches 0,
376  * do process and kernel statistics.
377  */
378 void
379 statclock(struct clockframe *frame)
380 {
381 #ifdef GPROF
382 	struct gmonparam *g;
383 	u_long i;
384 #endif
385 	struct cpu_info *ci = curcpu();
386 	struct schedstate_percpu *spc = &ci->ci_schedstate;
387 	struct proc *p = curproc;
388 	struct process *pr;
389 
390 	/*
391 	 * Notice changes in divisor frequency, and adjust clock
392 	 * frequency accordingly.
393 	 */
394 	if (spc->spc_psdiv != psdiv) {
395 		spc->spc_psdiv = psdiv;
396 		spc->spc_pscnt = psdiv;
397 		if (psdiv == 1) {
398 			setstatclockrate(stathz);
399 		} else {
400 			setstatclockrate(profhz);
401 		}
402 	}
403 
404 	if (CLKF_USERMODE(frame)) {
405 		pr = p->p_p;
406 		if (pr->ps_flags & PS_PROFIL)
407 			addupc_intr(p, CLKF_PC(frame));
408 		if (--spc->spc_pscnt > 0)
409 			return;
410 		/*
411 		 * Came from user mode; CPU was in user state.
412 		 * If this process is being profiled record the tick.
413 		 */
414 		p->p_uticks++;
415 		if (pr->ps_nice > NZERO)
416 			spc->spc_cp_time[CP_NICE]++;
417 		else
418 			spc->spc_cp_time[CP_USER]++;
419 	} else {
420 #ifdef GPROF
421 		/*
422 		 * Kernel statistics are just like addupc_intr, only easier.
423 		 */
424 		g = ci->ci_gmon;
425 		if (g != NULL && g->state == GMON_PROF_ON) {
426 			i = CLKF_PC(frame) - g->lowpc;
427 			if (i < g->textsize) {
428 				i /= HISTFRACTION * sizeof(*g->kcount);
429 				g->kcount[i]++;
430 			}
431 		}
432 #endif
433 #if defined(PROC_PC)
434 		if (p != NULL && p->p_p->ps_flags & PS_PROFIL)
435 			addupc_intr(p, PROC_PC(p));
436 #endif
437 		if (--spc->spc_pscnt > 0)
438 			return;
439 		/*
440 		 * Came from kernel mode, so we were:
441 		 * - handling an interrupt,
442 		 * - doing syscall or trap work on behalf of the current
443 		 *   user process, or
444 		 * - spinning in the idle loop.
445 		 * Whichever it is, charge the time as appropriate.
446 		 * Note that we charge interrupts to the current process,
447 		 * regardless of whether they are ``for'' that process,
448 		 * so that we know how much of its real time was spent
449 		 * in ``non-process'' (i.e., interrupt) work.
450 		 */
451 		if (CLKF_INTR(frame)) {
452 			if (p != NULL)
453 				p->p_iticks++;
454 			spc->spc_cp_time[CP_INTR]++;
455 		} else if (p != NULL && p != spc->spc_idleproc) {
456 			p->p_sticks++;
457 			spc->spc_cp_time[CP_SYS]++;
458 		} else
459 			spc->spc_cp_time[CP_IDLE]++;
460 	}
461 	spc->spc_pscnt = psdiv;
462 
463 	if (p != NULL) {
464 		p->p_cpticks++;
465 		/*
466 		 * If no schedclock is provided, call it here at ~~12-25 Hz;
467 		 * ~~16 Hz is best
468 		 */
469 		if (schedhz == 0) {
470 			if ((++curcpu()->ci_schedstate.spc_schedticks & 3) ==
471 			    0)
472 				schedclock(p);
473 		}
474 	}
475 }
476 
477 /*
478  * Return information about system clocks.
479  */
480 int
481 sysctl_clockrate(char *where, size_t *sizep, void *newp)
482 {
483 	struct clockinfo clkinfo;
484 
485 	/*
486 	 * Construct clockinfo structure.
487 	 */
488 	clkinfo.tick = tick;
489 	clkinfo.tickadj = tickadj;
490 	clkinfo.hz = hz;
491 	clkinfo.profhz = profhz;
492 	clkinfo.stathz = stathz ? stathz : hz;
493 	return (sysctl_rdstruct(where, sizep, newp, &clkinfo, sizeof(clkinfo)));
494 }
495