xref: /dragonfly/sys/kern/kern_clock.c (revision 6700dd34)
1 /*
2  * Copyright (c) 2003,2004 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * Copyright (c) 1997, 1998 Poul-Henning Kamp <phk@FreeBSD.org>
35  * Copyright (c) 1982, 1986, 1991, 1993
36  *	The Regents of the University of California.  All rights reserved.
37  * (c) UNIX System Laboratories, Inc.
38  * All or some portions of this file are derived from material licensed
39  * to the University of California by American Telephone and Telegraph
40  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
41  * the permission of UNIX System Laboratories, Inc.
42  *
43  * Redistribution and use in source and binary forms, with or without
44  * modification, are permitted provided that the following conditions
45  * are met:
46  * 1. Redistributions of source code must retain the above copyright
47  *    notice, this list of conditions and the following disclaimer.
48  * 2. Redistributions in binary form must reproduce the above copyright
49  *    notice, this list of conditions and the following disclaimer in the
50  *    documentation and/or other materials provided with the distribution.
51  * 3. Neither the name of the University nor the names of its contributors
52  *    may be used to endorse or promote products derived from this software
53  *    without specific prior written permission.
54  *
55  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65  * SUCH DAMAGE.
66  *
67  *	@(#)kern_clock.c	8.5 (Berkeley) 1/21/94
68  * $FreeBSD: src/sys/kern/kern_clock.c,v 1.105.2.10 2002/10/17 13:19:40 maxim Exp $
69  */
70 
71 #include "opt_ntp.h"
72 #include "opt_pctrack.h"
73 
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/callout.h>
77 #include <sys/kernel.h>
78 #include <sys/kinfo.h>
79 #include <sys/proc.h>
80 #include <sys/malloc.h>
81 #include <sys/resource.h>
82 #include <sys/resourcevar.h>
83 #include <sys/signalvar.h>
84 #include <sys/priv.h>
85 #include <sys/timex.h>
86 #include <sys/timepps.h>
87 #include <sys/upmap.h>
88 #include <sys/lock.h>
89 #include <sys/sysctl.h>
90 #include <sys/kcollect.h>
91 
92 #include <vm/vm.h>
93 #include <vm/pmap.h>
94 #include <vm/vm_map.h>
95 #include <vm/vm_extern.h>
96 
97 #include <sys/thread2.h>
98 #include <sys/spinlock2.h>
99 
100 #include <machine/cpu.h>
101 #include <machine/limits.h>
102 #include <machine/smp.h>
103 #include <machine/cpufunc.h>
104 #include <machine/specialreg.h>
105 #include <machine/clock.h>
106 
107 #ifdef GPROF
108 #include <sys/gmon.h>
109 #endif
110 
111 #ifdef DEBUG_PCTRACK
112 static void do_pctrack(struct intrframe *frame, int which);
113 #endif
114 
115 static void initclocks (void *dummy);
116 SYSINIT(clocks, SI_BOOT2_CLOCKS, SI_ORDER_FIRST, initclocks, NULL);
117 
118 /*
119  * Some of these don't belong here, but it's easiest to concentrate them.
120  * Note that cpu_time counts in microseconds, but most userland programs
121  * just compare relative times against the total by delta.
122  */
123 struct kinfo_cputime cputime_percpu[MAXCPU];
124 #ifdef DEBUG_PCTRACK
125 struct kinfo_pcheader cputime_pcheader = { PCTRACK_SIZE, PCTRACK_ARYSIZE };
126 struct kinfo_pctrack cputime_pctrack[MAXCPU][PCTRACK_SIZE];
127 #endif
128 
129 static int sniff_enable = 1;
130 static int sniff_target = -1;
131 SYSCTL_INT(_kern, OID_AUTO, sniff_enable, CTLFLAG_RW, &sniff_enable, 0 , "");
132 SYSCTL_INT(_kern, OID_AUTO, sniff_target, CTLFLAG_RW, &sniff_target, 0 , "");
133 
134 static int
135 sysctl_cputime(SYSCTL_HANDLER_ARGS)
136 {
137 	int cpu, error = 0;
138 	int root_error;
139 	size_t size = sizeof(struct kinfo_cputime);
140 	struct kinfo_cputime tmp;
141 
142 	/*
143 	 * NOTE: For security reasons, only root can sniff %rip
144 	 */
145 	root_error = priv_check_cred(curthread->td_ucred, PRIV_ROOT, 0);
146 
147 	for (cpu = 0; cpu < ncpus; ++cpu) {
148 		tmp = cputime_percpu[cpu];
149 		if (root_error == 0) {
150 			tmp.cp_sample_pc =
151 				(int64_t)globaldata_find(cpu)->gd_sample_pc;
152 			tmp.cp_sample_sp =
153 				(int64_t)globaldata_find(cpu)->gd_sample_sp;
154 		}
155 		if ((error = SYSCTL_OUT(req, &tmp, size)) != 0)
156 			break;
157 	}
158 
159 	if (root_error == 0) {
160 		if (sniff_enable) {
161 			int n = sniff_target;
162 			if (n < 0)
163 				smp_sniff();
164 			else if (n < ncpus)
165 				cpu_sniff(n);
166 		}
167 	}
168 
169 	return (error);
170 }
171 SYSCTL_PROC(_kern, OID_AUTO, cputime, (CTLTYPE_OPAQUE|CTLFLAG_RD), 0, 0,
172 	sysctl_cputime, "S,kinfo_cputime", "CPU time statistics");
173 
174 static int
175 sysctl_cp_time(SYSCTL_HANDLER_ARGS)
176 {
177 	long cpu_states[CPUSTATES] = {0};
178 	int cpu, error = 0;
179 	size_t size = sizeof(cpu_states);
180 
181 	for (cpu = 0; cpu < ncpus; ++cpu) {
182 		cpu_states[CP_USER] += cputime_percpu[cpu].cp_user;
183 		cpu_states[CP_NICE] += cputime_percpu[cpu].cp_nice;
184 		cpu_states[CP_SYS] += cputime_percpu[cpu].cp_sys;
185 		cpu_states[CP_INTR] += cputime_percpu[cpu].cp_intr;
186 		cpu_states[CP_IDLE] += cputime_percpu[cpu].cp_idle;
187 	}
188 
189 	error = SYSCTL_OUT(req, cpu_states, size);
190 
191 	return (error);
192 }
193 
194 SYSCTL_PROC(_kern, OID_AUTO, cp_time, (CTLTYPE_LONG|CTLFLAG_RD), 0, 0,
195     sysctl_cp_time, "LU", "CPU time statistics");
196 
197 static int
198 sysctl_cp_times(SYSCTL_HANDLER_ARGS)
199 {
200 	long cpu_states[CPUSTATES] = {0};
201 	int cpu, error;
202 	size_t size = sizeof(cpu_states);
203 
204 	for (error = 0, cpu = 0; error == 0 && cpu < ncpus; ++cpu) {
205 		cpu_states[CP_USER] = cputime_percpu[cpu].cp_user;
206 		cpu_states[CP_NICE] = cputime_percpu[cpu].cp_nice;
207 		cpu_states[CP_SYS] = cputime_percpu[cpu].cp_sys;
208 		cpu_states[CP_INTR] = cputime_percpu[cpu].cp_intr;
209 		cpu_states[CP_IDLE] = cputime_percpu[cpu].cp_idle;
210 		error = SYSCTL_OUT(req, cpu_states, size);
211 	}
212 
213 	return (error);
214 }
215 
216 SYSCTL_PROC(_kern, OID_AUTO, cp_times, (CTLTYPE_LONG|CTLFLAG_RD), 0, 0,
217     sysctl_cp_times, "LU", "per-CPU time statistics");
218 
219 /*
220  * boottime is used to calculate the 'real' uptime.  Do not confuse this with
221  * microuptime().  microtime() is not drift compensated.  The real uptime
222  * with compensation is nanotime() - bootime.  boottime is recalculated
223  * whenever the real time is set based on the compensated elapsed time
224  * in seconds (gd->gd_time_seconds).
225  *
226  * The gd_time_seconds and gd_cpuclock_base fields remain fairly monotonic.
227  * Slight adjustments to gd_cpuclock_base are made to phase-lock it to
228  * the real time.
229  *
230  * WARNING! time_second can backstep on time corrections. Also, unlike
231  *          time_second, time_uptime is not a "real" time_t (seconds
232  *          since the Epoch) but seconds since booting.
233  */
234 struct timespec boottime;	/* boot time (realtime) for reference only */
235 time_t time_second;		/* read-only 'passive' realtime in seconds */
236 time_t time_uptime;		/* read-only 'passive' uptime in seconds */
237 
238 /*
239  * basetime is used to calculate the compensated real time of day.  The
240  * basetime can be modified on a per-tick basis by the adjtime(),
241  * ntp_adjtime(), and sysctl-based time correction APIs.
242  *
243  * Note that frequency corrections can also be made by adjusting
244  * gd_cpuclock_base.
245  *
246  * basetime is a tail-chasing FIFO, updated only by cpu #0.  The FIFO is
247  * used on both SMP and UP systems to avoid MP races between cpu's and
248  * interrupt races on UP systems.
249  */
250 struct hardtime {
251 	__uint32_t time_second;
252 	sysclock_t cpuclock_base;
253 };
254 
255 #define BASETIME_ARYSIZE	16
256 #define BASETIME_ARYMASK	(BASETIME_ARYSIZE - 1)
257 static struct timespec basetime[BASETIME_ARYSIZE];
258 static struct hardtime hardtime[BASETIME_ARYSIZE];
259 static volatile int basetime_index;
260 
261 static int
262 sysctl_get_basetime(SYSCTL_HANDLER_ARGS)
263 {
264 	struct timespec *bt;
265 	int error;
266 	int index;
267 
268 	/*
269 	 * Because basetime data and index may be updated by another cpu,
270 	 * a load fence is required to ensure that the data we read has
271 	 * not been speculatively read relative to a possibly updated index.
272 	 */
273 	index = basetime_index;
274 	cpu_lfence();
275 	bt = &basetime[index];
276 	error = SYSCTL_OUT(req, bt, sizeof(*bt));
277 	return (error);
278 }
279 
280 SYSCTL_STRUCT(_kern, KERN_BOOTTIME, boottime, CTLFLAG_RD,
281     &boottime, timespec, "System boottime");
282 SYSCTL_PROC(_kern, OID_AUTO, basetime, CTLTYPE_STRUCT|CTLFLAG_RD, 0, 0,
283     sysctl_get_basetime, "S,timespec", "System basetime");
284 
285 static void hardclock(systimer_t info, int, struct intrframe *frame);
286 static void statclock(systimer_t info, int, struct intrframe *frame);
287 static void schedclock(systimer_t info, int, struct intrframe *frame);
288 static void getnanotime_nbt(struct timespec *nbt, struct timespec *tsp);
289 
290 int	ticks;			/* system master ticks at hz */
291 int	clocks_running;		/* tsleep/timeout clocks operational */
292 int64_t	nsec_adj;		/* ntpd per-tick adjustment in nsec << 32 */
293 int64_t	nsec_acc;		/* accumulator */
294 int	sched_ticks;		/* global schedule clock ticks */
295 
296 /* NTPD time correction fields */
297 int64_t	ntp_tick_permanent;	/* per-tick adjustment in nsec << 32 */
298 int64_t	ntp_tick_acc;		/* accumulator for per-tick adjustment */
299 int64_t	ntp_delta;		/* one-time correction in nsec */
300 int64_t ntp_big_delta = 1000000000;
301 int32_t	ntp_tick_delta;		/* current adjustment rate */
302 int32_t	ntp_default_tick_delta;	/* adjustment rate for ntp_delta */
303 time_t	ntp_leap_second;	/* time of next leap second */
304 int	ntp_leap_insert;	/* whether to insert or remove a second */
305 struct spinlock ntp_spin;
306 
307 /*
308  * Finish initializing clock frequencies and start all clocks running.
309  */
310 /* ARGSUSED*/
311 static void
312 initclocks(void *dummy)
313 {
314 	/*psratio = profhz / stathz;*/
315 	spin_init(&ntp_spin, "ntp");
316 	initclocks_pcpu();
317 	clocks_running = 1;
318 	if (kpmap) {
319 	    kpmap->tsc_freq = (uint64_t)tsc_frequency;
320 	    kpmap->tick_freq = hz;
321 	}
322 }
323 
324 /*
325  * Called on a per-cpu basis from the idle thread bootstrap on each cpu
326  * during SMP initialization.
327  *
328  * This routine is called concurrently during low-level SMP initialization
329  * and may not block in any way.  Meaning, among other things, we can't
330  * acquire any tokens.
331  */
332 void
333 initclocks_pcpu(void)
334 {
335 	struct globaldata *gd = mycpu;
336 
337 	crit_enter();
338 	if (gd->gd_cpuid == 0) {
339 	    gd->gd_time_seconds = 1;
340 	    gd->gd_cpuclock_base = sys_cputimer->count();
341 	    hardtime[0].time_second = gd->gd_time_seconds;
342 	    hardtime[0].cpuclock_base = gd->gd_cpuclock_base;
343 	} else {
344 	    gd->gd_time_seconds = globaldata_find(0)->gd_time_seconds;
345 	    gd->gd_cpuclock_base = globaldata_find(0)->gd_cpuclock_base;
346 	}
347 
348 	systimer_intr_enable();
349 
350 	crit_exit();
351 }
352 
353 /*
354  * Called on a 10-second interval after the system is operational.
355  * Return the collection data for USERPCT and install the data for
356  * SYSTPCT and IDLEPCT.
357  */
358 static
359 uint64_t
360 collect_cputime_callback(int n)
361 {
362 	static long cpu_base[CPUSTATES];
363 	long cpu_states[CPUSTATES];
364 	long total;
365 	long acc;
366 	long lsb;
367 
368 	bzero(cpu_states, sizeof(cpu_states));
369 	for (n = 0; n < ncpus; ++n) {
370 		cpu_states[CP_USER] += cputime_percpu[n].cp_user;
371 		cpu_states[CP_NICE] += cputime_percpu[n].cp_nice;
372 		cpu_states[CP_SYS] += cputime_percpu[n].cp_sys;
373 		cpu_states[CP_INTR] += cputime_percpu[n].cp_intr;
374 		cpu_states[CP_IDLE] += cputime_percpu[n].cp_idle;
375 	}
376 
377 	acc = 0;
378 	for (n = 0; n < CPUSTATES; ++n) {
379 		total = cpu_states[n] - cpu_base[n];
380 		cpu_base[n] = cpu_states[n];
381 		cpu_states[n] = total;
382 		acc += total;
383 	}
384 	if (acc == 0)		/* prevent degenerate divide by 0 */
385 		acc = 1;
386 	lsb = acc / (10000 * 2);
387 	kcollect_setvalue(KCOLLECT_SYSTPCT,
388 			  (cpu_states[CP_SYS] + lsb) * 10000 / acc);
389 	kcollect_setvalue(KCOLLECT_IDLEPCT,
390 			  (cpu_states[CP_IDLE] + lsb) * 10000 / acc);
391 	kcollect_setvalue(KCOLLECT_INTRPCT,
392 			  (cpu_states[CP_INTR] + lsb) * 10000 / acc);
393 	return((cpu_states[CP_USER] + cpu_states[CP_NICE] + lsb) * 10000 / acc);
394 }
395 
396 /*
397  * This routine is called on just the BSP, just after SMP initialization
398  * completes to * finish initializing any clocks that might contend/block
399  * (e.g. like on a token).  We can't do this in initclocks_pcpu() because
400  * that function is called from the idle thread bootstrap for each cpu and
401  * not allowed to block at all.
402  */
403 static
404 void
405 initclocks_other(void *dummy)
406 {
407 	struct globaldata *ogd = mycpu;
408 	struct globaldata *gd;
409 	int n;
410 
411 	for (n = 0; n < ncpus; ++n) {
412 		lwkt_setcpu_self(globaldata_find(n));
413 		gd = mycpu;
414 
415 		/*
416 		 * Use a non-queued periodic systimer to prevent multiple
417 		 * ticks from building up if the sysclock jumps forward
418 		 * (8254 gets reset).  The sysclock will never jump backwards.
419 		 * Our time sync is based on the actual sysclock, not the
420 		 * ticks count.
421 		 *
422 		 * Install statclock before hardclock to prevent statclock
423 		 * from misinterpreting gd_flags for tick assignment when
424 		 * they overlap.
425 		 */
426 		systimer_init_periodic_flags(&gd->gd_statclock, statclock,
427 					  NULL, stathz,
428 					  SYSTF_MSSYNC | SYSTF_FIRST);
429 		systimer_init_periodic_flags(&gd->gd_hardclock, hardclock,
430 					  NULL, hz, SYSTF_MSSYNC);
431 		/* XXX correct the frequency for scheduler / estcpu tests */
432 		systimer_init_periodic_flags(&gd->gd_schedclock, schedclock,
433 					  NULL, ESTCPUFREQ, SYSTF_MSSYNC);
434 	}
435 	lwkt_setcpu_self(ogd);
436 
437 	/*
438 	 * Regular data collection
439 	 */
440 	kcollect_register(KCOLLECT_USERPCT, "user", collect_cputime_callback,
441 			  KCOLLECT_SCALE(KCOLLECT_USERPCT_FORMAT, 0));
442 	kcollect_register(KCOLLECT_SYSTPCT, "syst", NULL,
443 			  KCOLLECT_SCALE(KCOLLECT_SYSTPCT_FORMAT, 0));
444 	kcollect_register(KCOLLECT_IDLEPCT, "idle", NULL,
445 			  KCOLLECT_SCALE(KCOLLECT_IDLEPCT_FORMAT, 0));
446 }
447 SYSINIT(clocks2, SI_BOOT2_POST_SMP, SI_ORDER_ANY, initclocks_other, NULL);
448 
449 /*
450  * This sets the current real time of day.  Timespecs are in seconds and
451  * nanoseconds.  We do not mess with gd_time_seconds and gd_cpuclock_base,
452  * instead we adjust basetime so basetime + gd_* results in the current
453  * time of day.  This way the gd_* fields are guaranteed to represent
454  * a monotonically increasing 'uptime' value.
455  *
456  * When set_timeofday() is called from userland, the system call forces it
457  * onto cpu #0 since only cpu #0 can update basetime_index.
458  */
459 void
460 set_timeofday(struct timespec *ts)
461 {
462 	struct timespec *nbt;
463 	int ni;
464 
465 	/*
466 	 * XXX SMP / non-atomic basetime updates
467 	 */
468 	crit_enter();
469 	ni = (basetime_index + 1) & BASETIME_ARYMASK;
470 	cpu_lfence();
471 	nbt = &basetime[ni];
472 	nanouptime(nbt);
473 	nbt->tv_sec = ts->tv_sec - nbt->tv_sec;
474 	nbt->tv_nsec = ts->tv_nsec - nbt->tv_nsec;
475 	if (nbt->tv_nsec < 0) {
476 	    nbt->tv_nsec += 1000000000;
477 	    --nbt->tv_sec;
478 	}
479 
480 	/*
481 	 * Note that basetime diverges from boottime as the clock drift is
482 	 * compensated for, so we cannot do away with boottime.  When setting
483 	 * the absolute time of day the drift is 0 (for an instant) and we
484 	 * can simply assign boottime to basetime.
485 	 *
486 	 * Note that nanouptime() is based on gd_time_seconds which is drift
487 	 * compensated up to a point (it is guaranteed to remain monotonically
488 	 * increasing).  gd_time_seconds is thus our best uptime guess and
489 	 * suitable for use in the boottime calculation.  It is already taken
490 	 * into account in the basetime calculation above.
491 	 */
492 	spin_lock(&ntp_spin);
493 	boottime.tv_sec = nbt->tv_sec;
494 	ntp_delta = 0;
495 
496 	/*
497 	 * We now have a new basetime, make sure all other cpus have it,
498 	 * then update the index.
499 	 */
500 	cpu_sfence();
501 	basetime_index = ni;
502 	spin_unlock(&ntp_spin);
503 
504 	crit_exit();
505 }
506 
507 /*
508  * Each cpu has its own hardclock, but we only increments ticks and softticks
509  * on cpu #0.
510  *
511  * NOTE! systimer! the MP lock might not be held here.  We can only safely
512  * manipulate objects owned by the current cpu.
513  */
514 static void
515 hardclock(systimer_t info, int in_ipi, struct intrframe *frame)
516 {
517 	sysclock_t cputicks;
518 	struct proc *p;
519 	struct globaldata *gd = mycpu;
520 
521 	if ((gd->gd_reqflags & RQF_IPIQ) == 0 && lwkt_need_ipiq_process(gd)) {
522 		/* Defer to doreti on passive IPIQ processing */
523 		need_ipiq();
524 	}
525 
526 	/*
527 	 * We update the compensation base to calculate fine-grained time
528 	 * from the sys_cputimer on a per-cpu basis in order to avoid
529 	 * having to mess around with locks.  sys_cputimer is assumed to
530 	 * be consistent across all cpus.  CPU N copies the base state from
531 	 * CPU 0 using the same FIFO trick that we use for basetime (so we
532 	 * don't catch a CPU 0 update in the middle).
533 	 *
534 	 * Note that we never allow info->time (aka gd->gd_hardclock.time)
535 	 * to reverse index gd_cpuclock_base, but that it is possible for
536 	 * it to temporarily get behind in the seconds if something in the
537 	 * system locks interrupts for a long period of time.  Since periodic
538 	 * timers count events, though everything should resynch again
539 	 * immediately.
540 	 */
541 	if (gd->gd_cpuid == 0) {
542 		int ni;
543 
544 		cputicks = info->time - gd->gd_cpuclock_base;
545 		if (cputicks >= sys_cputimer->freq) {
546 			cputicks /= sys_cputimer->freq;
547 			if (cputicks != 0 && cputicks != 1)
548 				kprintf("Warning: hardclock missed > 1 sec\n");
549 			gd->gd_time_seconds += cputicks;
550 			gd->gd_cpuclock_base += sys_cputimer->freq * cputicks;
551 			/* uncorrected monotonic 1-sec gran */
552 			time_uptime += cputicks;
553 		}
554 		ni = (basetime_index + 1) & BASETIME_ARYMASK;
555 		hardtime[ni].time_second = gd->gd_time_seconds;
556 		hardtime[ni].cpuclock_base = gd->gd_cpuclock_base;
557 	} else {
558 		int ni;
559 
560 		ni = basetime_index;
561 		cpu_lfence();
562 		gd->gd_time_seconds = hardtime[ni].time_second;
563 		gd->gd_cpuclock_base = hardtime[ni].cpuclock_base;
564 	}
565 
566 	/*
567 	 * The system-wide ticks counter and NTP related timedelta/tickdelta
568 	 * adjustments only occur on cpu #0.  NTP adjustments are accomplished
569 	 * by updating basetime.
570 	 */
571 	if (gd->gd_cpuid == 0) {
572 	    struct timespec *nbt;
573 	    struct timespec nts;
574 	    int leap;
575 	    int ni;
576 
577 	    ++ticks;
578 
579 #if 0
580 	    if (tco->tc_poll_pps)
581 		tco->tc_poll_pps(tco);
582 #endif
583 
584 	    /*
585 	     * Calculate the new basetime index.  We are in a critical section
586 	     * on cpu #0 and can safely play with basetime_index.  Start
587 	     * with the current basetime and then make adjustments.
588 	     */
589 	    ni = (basetime_index + 1) & BASETIME_ARYMASK;
590 	    nbt = &basetime[ni];
591 	    *nbt = basetime[basetime_index];
592 
593 	    /*
594 	     * ntp adjustments only occur on cpu 0 and are protected by
595 	     * ntp_spin.  This spinlock virtually never conflicts.
596 	     */
597 	    spin_lock(&ntp_spin);
598 
599 	    /*
600 	     * Apply adjtime corrections.  (adjtime() API)
601 	     *
602 	     * adjtime() only runs on cpu #0 so our critical section is
603 	     * sufficient to access these variables.
604 	     */
605 	    if (ntp_delta != 0) {
606 		nbt->tv_nsec += ntp_tick_delta;
607 		ntp_delta -= ntp_tick_delta;
608 		if ((ntp_delta > 0 && ntp_delta < ntp_tick_delta) ||
609 		    (ntp_delta < 0 && ntp_delta > ntp_tick_delta)) {
610 			ntp_tick_delta = ntp_delta;
611  		}
612  	    }
613 
614 	    /*
615 	     * Apply permanent frequency corrections.  (sysctl API)
616 	     */
617 	    if (ntp_tick_permanent != 0) {
618 		ntp_tick_acc += ntp_tick_permanent;
619 		if (ntp_tick_acc >= (1LL << 32)) {
620 		    nbt->tv_nsec += ntp_tick_acc >> 32;
621 		    ntp_tick_acc -= (ntp_tick_acc >> 32) << 32;
622 		} else if (ntp_tick_acc <= -(1LL << 32)) {
623 		    /* Negate ntp_tick_acc to avoid shifting the sign bit. */
624 		    nbt->tv_nsec -= (-ntp_tick_acc) >> 32;
625 		    ntp_tick_acc += ((-ntp_tick_acc) >> 32) << 32;
626 		}
627  	    }
628 
629 	    if (nbt->tv_nsec >= 1000000000) {
630 		    nbt->tv_sec++;
631 		    nbt->tv_nsec -= 1000000000;
632 	    } else if (nbt->tv_nsec < 0) {
633 		    nbt->tv_sec--;
634 		    nbt->tv_nsec += 1000000000;
635 	    }
636 
637 	    /*
638 	     * Another per-tick compensation.  (for ntp_adjtime() API)
639 	     */
640 	    if (nsec_adj != 0) {
641 		nsec_acc += nsec_adj;
642 		if (nsec_acc >= 0x100000000LL) {
643 		    nbt->tv_nsec += nsec_acc >> 32;
644 		    nsec_acc = (nsec_acc & 0xFFFFFFFFLL);
645 		} else if (nsec_acc <= -0x100000000LL) {
646 		    nbt->tv_nsec -= -nsec_acc >> 32;
647 		    nsec_acc = -(-nsec_acc & 0xFFFFFFFFLL);
648 		}
649 		if (nbt->tv_nsec >= 1000000000) {
650 		    nbt->tv_nsec -= 1000000000;
651 		    ++nbt->tv_sec;
652 		} else if (nbt->tv_nsec < 0) {
653 		    nbt->tv_nsec += 1000000000;
654 		    --nbt->tv_sec;
655 		}
656 	    }
657 	    spin_unlock(&ntp_spin);
658 
659 	    /************************************************************
660 	     *			LEAP SECOND CORRECTION			*
661 	     ************************************************************
662 	     *
663 	     * Taking into account all the corrections made above, figure
664 	     * out the new real time.  If the seconds field has changed
665 	     * then apply any pending leap-second corrections.
666 	     */
667 	    getnanotime_nbt(nbt, &nts);
668 
669 	    if (time_second != nts.tv_sec) {
670 		/*
671 		 * Apply leap second (sysctl API).  Adjust nts for changes
672 		 * so we do not have to call getnanotime_nbt again.
673 		 */
674 		if (ntp_leap_second) {
675 		    if (ntp_leap_second == nts.tv_sec) {
676 			if (ntp_leap_insert) {
677 			    nbt->tv_sec++;
678 			    nts.tv_sec++;
679 			} else {
680 			    nbt->tv_sec--;
681 			    nts.tv_sec--;
682 			}
683 			ntp_leap_second--;
684 		    }
685 		}
686 
687 		/*
688 		 * Apply leap second (ntp_adjtime() API), calculate a new
689 		 * nsec_adj field.  ntp_update_second() returns nsec_adj
690 		 * as a per-second value but we need it as a per-tick value.
691 		 */
692 		leap = ntp_update_second(time_second, &nsec_adj);
693 		nsec_adj /= hz;
694 		nbt->tv_sec += leap;
695 		nts.tv_sec += leap;
696 
697 		/*
698 		 * Update the time_second 'approximate time' global.
699 		 */
700 		time_second = nts.tv_sec;
701 	    }
702 
703 	    /*
704 	     * Finally, our new basetime is ready to go live!
705 	     */
706 	    cpu_sfence();
707 	    basetime_index = ni;
708 
709 	    /*
710 	     * Update kpmap on each tick.  TS updates are integrated with
711 	     * fences and upticks allowing userland to read the data
712 	     * deterministically.
713 	     */
714 	    if (kpmap) {
715 		int w;
716 
717 		w = (kpmap->upticks + 1) & 1;
718 		getnanouptime(&kpmap->ts_uptime[w]);
719 		getnanotime(&kpmap->ts_realtime[w]);
720 		cpu_sfence();
721 		++kpmap->upticks;
722 		cpu_sfence();
723 	    }
724 	}
725 
726 	/*
727 	 * lwkt thread scheduler fair queueing
728 	 */
729 	lwkt_schedulerclock(curthread);
730 
731 	/*
732 	 * softticks are handled for all cpus
733 	 */
734 	hardclock_softtick(gd);
735 
736 	/*
737 	 * Rollup accumulated vmstats, copy-back for critical path checks.
738 	 */
739 	vmstats_rollup_cpu(gd);
740 	mycpu->gd_vmstats = vmstats;
741 
742 	/*
743 	 * ITimer handling is per-tick, per-cpu.
744 	 *
745 	 * We must acquire the per-process token in order for ksignal()
746 	 * to be non-blocking.  For the moment this requires an AST fault,
747 	 * the ksignal() cannot be safely issued from this hard interrupt.
748 	 *
749 	 * XXX Even the trytoken here isn't right, and itimer operation in
750 	 *     a multi threaded environment is going to be weird at the
751 	 *     very least.
752 	 */
753 	if ((p = curproc) != NULL && lwkt_trytoken(&p->p_token)) {
754 		crit_enter_hard();
755 		if (p->p_upmap)
756 			++p->p_upmap->runticks;
757 
758 		if (frame && CLKF_USERMODE(frame) &&
759 		    timevalisset(&p->p_timer[ITIMER_VIRTUAL].it_value) &&
760 		    itimerdecr(&p->p_timer[ITIMER_VIRTUAL], ustick) == 0) {
761 			p->p_flags |= P_SIGVTALRM;
762 			need_user_resched();
763 		}
764 		if (timevalisset(&p->p_timer[ITIMER_PROF].it_value) &&
765 		    itimerdecr(&p->p_timer[ITIMER_PROF], ustick) == 0) {
766 			p->p_flags |= P_SIGPROF;
767 			need_user_resched();
768 		}
769 		crit_exit_hard();
770 		lwkt_reltoken(&p->p_token);
771 	}
772 	setdelayed();
773 }
774 
775 /*
776  * The statistics clock typically runs at a 125Hz rate, and is intended
777  * to be frequency offset from the hardclock (typ 100Hz).  It is per-cpu.
778  *
779  * NOTE! systimer! the MP lock might not be held here.  We can only safely
780  * manipulate objects owned by the current cpu.
781  *
782  * The stats clock is responsible for grabbing a profiling sample.
783  * Most of the statistics are only used by user-level statistics programs.
784  * The main exceptions are p->p_uticks, p->p_sticks, p->p_iticks, and
785  * p->p_estcpu.
786  *
787  * Like the other clocks, the stat clock is called from what is effectively
788  * a fast interrupt, so the context should be the thread/process that got
789  * interrupted.
790  */
791 static void
792 statclock(systimer_t info, int in_ipi, struct intrframe *frame)
793 {
794 #ifdef GPROF
795 	struct gmonparam *g;
796 	int i;
797 #endif
798 	globaldata_t gd = mycpu;
799 	thread_t td;
800 	struct proc *p;
801 	int bump;
802 	sysclock_t cv;
803 	sysclock_t scv;
804 
805 	/*
806 	 * How big was our timeslice relative to the last time?  Calculate
807 	 * in microseconds.
808 	 *
809 	 * NOTE: Use of microuptime() is typically MPSAFE, but usually not
810 	 *	 during early boot.  Just use the systimer count to be nice
811 	 *	 to e.g. qemu.  The systimer has a better chance of being
812 	 *	 MPSAFE at early boot.
813 	 */
814 	cv = sys_cputimer->count();
815 	scv = gd->statint.gd_statcv;
816 	if (scv == 0) {
817 		bump = 1;
818 	} else {
819 		bump = (sys_cputimer->freq64_usec * (cv - scv)) >> 32;
820 		if (bump < 0)
821 			bump = 0;
822 		if (bump > 1000000)
823 			bump = 1000000;
824 	}
825 	gd->statint.gd_statcv = cv;
826 
827 #if 0
828 	stv = &gd->gd_stattv;
829 	if (stv->tv_sec == 0) {
830 	    bump = 1;
831 	} else {
832 	    bump = tv.tv_usec - stv->tv_usec +
833 		(tv.tv_sec - stv->tv_sec) * 1000000;
834 	    if (bump < 0)
835 		bump = 0;
836 	    if (bump > 1000000)
837 		bump = 1000000;
838 	}
839 	*stv = tv;
840 #endif
841 
842 	td = curthread;
843 	p = td->td_proc;
844 
845 	if (frame && CLKF_USERMODE(frame)) {
846 		/*
847 		 * Came from userland, handle user time and deal with
848 		 * possible process.
849 		 */
850 		if (p && (p->p_flags & P_PROFIL))
851 			addupc_intr(p, CLKF_PC(frame), 1);
852 		td->td_uticks += bump;
853 
854 		/*
855 		 * Charge the time as appropriate
856 		 */
857 		if (p && p->p_nice > NZERO)
858 			cpu_time.cp_nice += bump;
859 		else
860 			cpu_time.cp_user += bump;
861 	} else {
862 		int intr_nest = gd->gd_intr_nesting_level;
863 
864 		if (in_ipi) {
865 			/*
866 			 * IPI processing code will bump gd_intr_nesting_level
867 			 * up by one, which breaks following CLKF_INTR testing,
868 			 * so we subtract it by one here.
869 			 */
870 			--intr_nest;
871 		}
872 #ifdef GPROF
873 		/*
874 		 * Kernel statistics are just like addupc_intr, only easier.
875 		 */
876 		g = &_gmonparam;
877 		if (g->state == GMON_PROF_ON && frame) {
878 			i = CLKF_PC(frame) - g->lowpc;
879 			if (i < g->textsize) {
880 				i /= HISTFRACTION * sizeof(*g->kcount);
881 				g->kcount[i]++;
882 			}
883 		}
884 #endif
885 
886 #define IS_INTR_RUNNING	((frame && CLKF_INTR(intr_nest)) || CLKF_INTR_TD(td))
887 
888 		/*
889 		 * Came from kernel mode, so we were:
890 		 * - handling an interrupt,
891 		 * - doing syscall or trap work on behalf of the current
892 		 *   user process, or
893 		 * - spinning in the idle loop.
894 		 * Whichever it is, charge the time as appropriate.
895 		 * Note that we charge interrupts to the current process,
896 		 * regardless of whether they are ``for'' that process,
897 		 * so that we know how much of its real time was spent
898 		 * in ``non-process'' (i.e., interrupt) work.
899 		 *
900 		 * XXX assume system if frame is NULL.  A NULL frame
901 		 * can occur if ipi processing is done from a crit_exit().
902 		 */
903 		if (IS_INTR_RUNNING ||
904 		    (gd->gd_reqflags & RQF_INTPEND)) {
905 			/*
906 			 * If we interrupted an interrupt thread, well,
907 			 * count it as interrupt time.
908 			 */
909 			td->td_iticks += bump;
910 #ifdef DEBUG_PCTRACK
911 			if (frame)
912 				do_pctrack(frame, PCTRACK_INT);
913 #endif
914 			cpu_time.cp_intr += bump;
915 		} else if (gd->gd_flags & GDF_VIRTUSER) {
916 			/*
917 			 * The vkernel doesn't do a good job providing trap
918 			 * frames that we can test.  If the GDF_VIRTUSER
919 			 * flag is set we probably interrupted user mode.
920 			 *
921 			 * We also use this flag on the host when entering
922 			 * VMM mode.
923 			 */
924 			td->td_uticks += bump;
925 
926 			/*
927 			 * Charge the time as appropriate
928 			 */
929 			if (p && p->p_nice > NZERO)
930 				cpu_time.cp_nice += bump;
931 			else
932 				cpu_time.cp_user += bump;
933 		} else {
934 			td->td_sticks += bump;
935 			if (td == &gd->gd_idlethread) {
936 				/*
937 				 * We want to count token contention as
938 				 * system time.  When token contention occurs
939 				 * the cpu may only be outside its critical
940 				 * section while switching through the idle
941 				 * thread.  In this situation, various flags
942 				 * will be set in gd_reqflags.
943 				 */
944 				if (gd->gd_reqflags & RQF_IDLECHECK_WK_MASK)
945 					cpu_time.cp_sys += bump;
946 				else
947 					cpu_time.cp_idle += bump;
948 			} else {
949 				/*
950 				 * System thread was running.
951 				 */
952 #ifdef DEBUG_PCTRACK
953 				if (frame)
954 					do_pctrack(frame, PCTRACK_SYS);
955 #endif
956 				cpu_time.cp_sys += bump;
957 			}
958 		}
959 
960 #undef IS_INTR_RUNNING
961 	}
962 }
963 
964 #ifdef DEBUG_PCTRACK
965 /*
966  * Sample the PC when in the kernel or in an interrupt.  User code can
967  * retrieve the information and generate a histogram or other output.
968  */
969 
970 static void
971 do_pctrack(struct intrframe *frame, int which)
972 {
973 	struct kinfo_pctrack *pctrack;
974 
975 	pctrack = &cputime_pctrack[mycpu->gd_cpuid][which];
976 	pctrack->pc_array[pctrack->pc_index & PCTRACK_ARYMASK] =
977 		(void *)CLKF_PC(frame);
978 	++pctrack->pc_index;
979 }
980 
981 static int
982 sysctl_pctrack(SYSCTL_HANDLER_ARGS)
983 {
984 	struct kinfo_pcheader head;
985 	int error;
986 	int cpu;
987 	int ntrack;
988 
989 	head.pc_ntrack = PCTRACK_SIZE;
990 	head.pc_arysize = PCTRACK_ARYSIZE;
991 
992 	if ((error = SYSCTL_OUT(req, &head, sizeof(head))) != 0)
993 		return (error);
994 
995 	for (cpu = 0; cpu < ncpus; ++cpu) {
996 		for (ntrack = 0; ntrack < PCTRACK_SIZE; ++ntrack) {
997 			error = SYSCTL_OUT(req, &cputime_pctrack[cpu][ntrack],
998 					   sizeof(struct kinfo_pctrack));
999 			if (error)
1000 				break;
1001 		}
1002 		if (error)
1003 			break;
1004 	}
1005 	return (error);
1006 }
1007 SYSCTL_PROC(_kern, OID_AUTO, pctrack, (CTLTYPE_OPAQUE|CTLFLAG_RD), 0, 0,
1008 	sysctl_pctrack, "S,kinfo_pcheader", "CPU PC tracking");
1009 
1010 #endif
1011 
1012 /*
1013  * The scheduler clock typically runs at a 50Hz rate.  NOTE! systimer,
1014  * the MP lock might not be held.  We can safely manipulate parts of curproc
1015  * but that's about it.
1016  *
1017  * Each cpu has its own scheduler clock.
1018  */
1019 static void
1020 schedclock(systimer_t info, int in_ipi __unused, struct intrframe *frame)
1021 {
1022 	struct lwp *lp;
1023 	struct rusage *ru;
1024 	struct vmspace *vm;
1025 	long rss;
1026 
1027 	if ((lp = lwkt_preempted_proc()) != NULL) {
1028 		/*
1029 		 * Account for cpu time used and hit the scheduler.  Note
1030 		 * that this call MUST BE MP SAFE, and the BGL IS NOT HELD
1031 		 * HERE.
1032 		 */
1033 		++lp->lwp_cpticks;
1034 		usched_schedulerclock(lp, info->periodic, info->time);
1035 	} else {
1036 		usched_schedulerclock(NULL, info->periodic, info->time);
1037 	}
1038 	if ((lp = curthread->td_lwp) != NULL) {
1039 		/*
1040 		 * Update resource usage integrals and maximums.
1041 		 */
1042 		if ((ru = &lp->lwp_proc->p_ru) &&
1043 		    (vm = lp->lwp_proc->p_vmspace) != NULL) {
1044 			ru->ru_ixrss += pgtok(vm->vm_tsize);
1045 			ru->ru_idrss += pgtok(vm->vm_dsize);
1046 			ru->ru_isrss += pgtok(vm->vm_ssize);
1047 			if (lwkt_trytoken(&vm->vm_map.token)) {
1048 				rss = pgtok(vmspace_resident_count(vm));
1049 				if (ru->ru_maxrss < rss)
1050 					ru->ru_maxrss = rss;
1051 				lwkt_reltoken(&vm->vm_map.token);
1052 			}
1053 		}
1054 	}
1055 	/* Increment the global sched_ticks */
1056 	if (mycpu->gd_cpuid == 0)
1057 		++sched_ticks;
1058 }
1059 
1060 /*
1061  * Compute number of ticks for the specified amount of time.  The
1062  * return value is intended to be used in a clock interrupt timed
1063  * operation and guaranteed to meet or exceed the requested time.
1064  * If the representation overflows, return INT_MAX.  The minimum return
1065  * value is 1 ticks and the function will average the calculation up.
1066  * If any value greater then 0 microseconds is supplied, a value
1067  * of at least 2 will be returned to ensure that a near-term clock
1068  * interrupt does not cause the timeout to occur (degenerately) early.
1069  *
1070  * Note that limit checks must take into account microseconds, which is
1071  * done simply by using the smaller signed long maximum instead of
1072  * the unsigned long maximum.
1073  *
1074  * If ints have 32 bits, then the maximum value for any timeout in
1075  * 10ms ticks is 248 days.
1076  */
1077 int
1078 tvtohz_high(struct timeval *tv)
1079 {
1080 	int ticks;
1081 	long sec, usec;
1082 
1083 	sec = tv->tv_sec;
1084 	usec = tv->tv_usec;
1085 	if (usec < 0) {
1086 		sec--;
1087 		usec += 1000000;
1088 	}
1089 	if (sec < 0) {
1090 #ifdef DIAGNOSTIC
1091 		if (usec > 0) {
1092 			sec++;
1093 			usec -= 1000000;
1094 		}
1095 		kprintf("tvtohz_high: negative time difference "
1096 			"%ld sec %ld usec\n",
1097 			sec, usec);
1098 #endif
1099 		ticks = 1;
1100 	} else if (sec <= INT_MAX / hz) {
1101 		ticks = (int)(sec * hz +
1102 			    ((u_long)usec + (ustick - 1)) / ustick) + 1;
1103 	} else {
1104 		ticks = INT_MAX;
1105 	}
1106 	return (ticks);
1107 }
1108 
1109 int
1110 tstohz_high(struct timespec *ts)
1111 {
1112 	int ticks;
1113 	long sec, nsec;
1114 
1115 	sec = ts->tv_sec;
1116 	nsec = ts->tv_nsec;
1117 	if (nsec < 0) {
1118 		sec--;
1119 		nsec += 1000000000;
1120 	}
1121 	if (sec < 0) {
1122 #ifdef DIAGNOSTIC
1123 		if (nsec > 0) {
1124 			sec++;
1125 			nsec -= 1000000000;
1126 		}
1127 		kprintf("tstohz_high: negative time difference "
1128 			"%ld sec %ld nsec\n",
1129 			sec, nsec);
1130 #endif
1131 		ticks = 1;
1132 	} else if (sec <= INT_MAX / hz) {
1133 		ticks = (int)(sec * hz +
1134 			    ((u_long)nsec + (nstick - 1)) / nstick) + 1;
1135 	} else {
1136 		ticks = INT_MAX;
1137 	}
1138 	return (ticks);
1139 }
1140 
1141 
1142 /*
1143  * Compute number of ticks for the specified amount of time, erroring on
1144  * the side of it being too low to ensure that sleeping the returned number
1145  * of ticks will not result in a late return.
1146  *
1147  * The supplied timeval may not be negative and should be normalized.  A
1148  * return value of 0 is possible if the timeval converts to less then
1149  * 1 tick.
1150  *
1151  * If ints have 32 bits, then the maximum value for any timeout in
1152  * 10ms ticks is 248 days.
1153  */
1154 int
1155 tvtohz_low(struct timeval *tv)
1156 {
1157 	int ticks;
1158 	long sec;
1159 
1160 	sec = tv->tv_sec;
1161 	if (sec <= INT_MAX / hz)
1162 		ticks = (int)(sec * hz + (u_long)tv->tv_usec / ustick);
1163 	else
1164 		ticks = INT_MAX;
1165 	return (ticks);
1166 }
1167 
1168 int
1169 tstohz_low(struct timespec *ts)
1170 {
1171 	int ticks;
1172 	long sec;
1173 
1174 	sec = ts->tv_sec;
1175 	if (sec <= INT_MAX / hz)
1176 		ticks = (int)(sec * hz + (u_long)ts->tv_nsec / nstick);
1177 	else
1178 		ticks = INT_MAX;
1179 	return (ticks);
1180 }
1181 
1182 /*
1183  * Start profiling on a process.
1184  *
1185  * Caller must hold p->p_token();
1186  *
1187  * Kernel profiling passes proc0 which never exits and hence
1188  * keeps the profile clock running constantly.
1189  */
1190 void
1191 startprofclock(struct proc *p)
1192 {
1193 	if ((p->p_flags & P_PROFIL) == 0) {
1194 		p->p_flags |= P_PROFIL;
1195 #if 0	/* XXX */
1196 		if (++profprocs == 1 && stathz != 0) {
1197 			crit_enter();
1198 			psdiv = psratio;
1199 			setstatclockrate(profhz);
1200 			crit_exit();
1201 		}
1202 #endif
1203 	}
1204 }
1205 
1206 /*
1207  * Stop profiling on a process.
1208  *
1209  * caller must hold p->p_token
1210  */
1211 void
1212 stopprofclock(struct proc *p)
1213 {
1214 	if (p->p_flags & P_PROFIL) {
1215 		p->p_flags &= ~P_PROFIL;
1216 #if 0	/* XXX */
1217 		if (--profprocs == 0 && stathz != 0) {
1218 			crit_enter();
1219 			psdiv = 1;
1220 			setstatclockrate(stathz);
1221 			crit_exit();
1222 		}
1223 #endif
1224 	}
1225 }
1226 
1227 /*
1228  * Return information about system clocks.
1229  */
1230 static int
1231 sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS)
1232 {
1233 	struct kinfo_clockinfo clkinfo;
1234 	/*
1235 	 * Construct clockinfo structure.
1236 	 */
1237 	clkinfo.ci_hz = hz;
1238 	clkinfo.ci_tick = ustick;
1239 	clkinfo.ci_tickadj = ntp_default_tick_delta / 1000;
1240 	clkinfo.ci_profhz = profhz;
1241 	clkinfo.ci_stathz = stathz ? stathz : hz;
1242 	return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req));
1243 }
1244 
1245 SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD,
1246 	0, 0, sysctl_kern_clockrate, "S,clockinfo","");
1247 
1248 /*
1249  * We have eight functions for looking at the clock, four for
1250  * microseconds and four for nanoseconds.  For each there is fast
1251  * but less precise version "get{nano|micro}[up]time" which will
1252  * return a time which is up to 1/HZ previous to the call, whereas
1253  * the raw version "{nano|micro}[up]time" will return a timestamp
1254  * which is as precise as possible.  The "up" variants return the
1255  * time relative to system boot, these are well suited for time
1256  * interval measurements.
1257  *
1258  * Each cpu independently maintains the current time of day, so all
1259  * we need to do to protect ourselves from changes is to do a loop
1260  * check on the seconds field changing out from under us.
1261  *
1262  * The system timer maintains a 32 bit count and due to various issues
1263  * it is possible for the calculated delta to occasionally exceed
1264  * sys_cputimer->freq.  If this occurs the sys_cputimer->freq64_nsec
1265  * multiplication can easily overflow, so we deal with the case.  For
1266  * uniformity we deal with the case in the usec case too.
1267  *
1268  * All the [get][micro,nano][time,uptime]() routines are MPSAFE.
1269  */
1270 void
1271 getmicrouptime(struct timeval *tvp)
1272 {
1273 	struct globaldata *gd = mycpu;
1274 	sysclock_t delta;
1275 
1276 	do {
1277 		tvp->tv_sec = gd->gd_time_seconds;
1278 		delta = gd->gd_hardclock.time - gd->gd_cpuclock_base;
1279 	} while (tvp->tv_sec != gd->gd_time_seconds);
1280 
1281 	if (delta >= sys_cputimer->freq) {
1282 		tvp->tv_sec += delta / sys_cputimer->freq;
1283 		delta %= sys_cputimer->freq;
1284 	}
1285 	tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32;
1286 	if (tvp->tv_usec >= 1000000) {
1287 		tvp->tv_usec -= 1000000;
1288 		++tvp->tv_sec;
1289 	}
1290 }
1291 
1292 void
1293 getnanouptime(struct timespec *tsp)
1294 {
1295 	struct globaldata *gd = mycpu;
1296 	sysclock_t delta;
1297 
1298 	do {
1299 		tsp->tv_sec = gd->gd_time_seconds;
1300 		delta = gd->gd_hardclock.time - gd->gd_cpuclock_base;
1301 	} while (tsp->tv_sec != gd->gd_time_seconds);
1302 
1303 	if (delta >= sys_cputimer->freq) {
1304 		tsp->tv_sec += delta / sys_cputimer->freq;
1305 		delta %= sys_cputimer->freq;
1306 	}
1307 	tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32;
1308 }
1309 
1310 void
1311 microuptime(struct timeval *tvp)
1312 {
1313 	struct globaldata *gd = mycpu;
1314 	sysclock_t delta;
1315 
1316 	do {
1317 		tvp->tv_sec = gd->gd_time_seconds;
1318 		delta = sys_cputimer->count() - gd->gd_cpuclock_base;
1319 	} while (tvp->tv_sec != gd->gd_time_seconds);
1320 
1321 	if (delta >= sys_cputimer->freq) {
1322 		tvp->tv_sec += delta / sys_cputimer->freq;
1323 		delta %= sys_cputimer->freq;
1324 	}
1325 	tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32;
1326 }
1327 
1328 void
1329 nanouptime(struct timespec *tsp)
1330 {
1331 	struct globaldata *gd = mycpu;
1332 	sysclock_t delta;
1333 
1334 	do {
1335 		tsp->tv_sec = gd->gd_time_seconds;
1336 		delta = sys_cputimer->count() - gd->gd_cpuclock_base;
1337 	} while (tsp->tv_sec != gd->gd_time_seconds);
1338 
1339 	if (delta >= sys_cputimer->freq) {
1340 		tsp->tv_sec += delta / sys_cputimer->freq;
1341 		delta %= sys_cputimer->freq;
1342 	}
1343 	tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32;
1344 }
1345 
1346 /*
1347  * realtime routines
1348  */
1349 void
1350 getmicrotime(struct timeval *tvp)
1351 {
1352 	struct globaldata *gd = mycpu;
1353 	struct timespec *bt;
1354 	sysclock_t delta;
1355 
1356 	do {
1357 		tvp->tv_sec = gd->gd_time_seconds;
1358 		delta = gd->gd_hardclock.time - gd->gd_cpuclock_base;
1359 	} while (tvp->tv_sec != gd->gd_time_seconds);
1360 
1361 	if (delta >= sys_cputimer->freq) {
1362 		tvp->tv_sec += delta / sys_cputimer->freq;
1363 		delta %= sys_cputimer->freq;
1364 	}
1365 	tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32;
1366 
1367 	bt = &basetime[basetime_index];
1368 	cpu_lfence();
1369 	tvp->tv_sec += bt->tv_sec;
1370 	tvp->tv_usec += bt->tv_nsec / 1000;
1371 	while (tvp->tv_usec >= 1000000) {
1372 		tvp->tv_usec -= 1000000;
1373 		++tvp->tv_sec;
1374 	}
1375 }
1376 
1377 void
1378 getnanotime(struct timespec *tsp)
1379 {
1380 	struct globaldata *gd = mycpu;
1381 	struct timespec *bt;
1382 	sysclock_t delta;
1383 
1384 	do {
1385 		tsp->tv_sec = gd->gd_time_seconds;
1386 		delta = gd->gd_hardclock.time - gd->gd_cpuclock_base;
1387 	} while (tsp->tv_sec != gd->gd_time_seconds);
1388 
1389 	if (delta >= sys_cputimer->freq) {
1390 		tsp->tv_sec += delta / sys_cputimer->freq;
1391 		delta %= sys_cputimer->freq;
1392 	}
1393 	tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32;
1394 
1395 	bt = &basetime[basetime_index];
1396 	cpu_lfence();
1397 	tsp->tv_sec += bt->tv_sec;
1398 	tsp->tv_nsec += bt->tv_nsec;
1399 	while (tsp->tv_nsec >= 1000000000) {
1400 		tsp->tv_nsec -= 1000000000;
1401 		++tsp->tv_sec;
1402 	}
1403 }
1404 
1405 static void
1406 getnanotime_nbt(struct timespec *nbt, struct timespec *tsp)
1407 {
1408 	struct globaldata *gd = mycpu;
1409 	sysclock_t delta;
1410 
1411 	do {
1412 		tsp->tv_sec = gd->gd_time_seconds;
1413 		delta = gd->gd_hardclock.time - gd->gd_cpuclock_base;
1414 	} while (tsp->tv_sec != gd->gd_time_seconds);
1415 
1416 	if (delta >= sys_cputimer->freq) {
1417 		tsp->tv_sec += delta / sys_cputimer->freq;
1418 		delta %= sys_cputimer->freq;
1419 	}
1420 	tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32;
1421 
1422 	tsp->tv_sec += nbt->tv_sec;
1423 	tsp->tv_nsec += nbt->tv_nsec;
1424 	while (tsp->tv_nsec >= 1000000000) {
1425 		tsp->tv_nsec -= 1000000000;
1426 		++tsp->tv_sec;
1427 	}
1428 }
1429 
1430 
1431 void
1432 microtime(struct timeval *tvp)
1433 {
1434 	struct globaldata *gd = mycpu;
1435 	struct timespec *bt;
1436 	sysclock_t delta;
1437 
1438 	do {
1439 		tvp->tv_sec = gd->gd_time_seconds;
1440 		delta = sys_cputimer->count() - gd->gd_cpuclock_base;
1441 	} while (tvp->tv_sec != gd->gd_time_seconds);
1442 
1443 	if (delta >= sys_cputimer->freq) {
1444 		tvp->tv_sec += delta / sys_cputimer->freq;
1445 		delta %= sys_cputimer->freq;
1446 	}
1447 	tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32;
1448 
1449 	bt = &basetime[basetime_index];
1450 	cpu_lfence();
1451 	tvp->tv_sec += bt->tv_sec;
1452 	tvp->tv_usec += bt->tv_nsec / 1000;
1453 	while (tvp->tv_usec >= 1000000) {
1454 		tvp->tv_usec -= 1000000;
1455 		++tvp->tv_sec;
1456 	}
1457 }
1458 
1459 void
1460 nanotime(struct timespec *tsp)
1461 {
1462 	struct globaldata *gd = mycpu;
1463 	struct timespec *bt;
1464 	sysclock_t delta;
1465 
1466 	do {
1467 		tsp->tv_sec = gd->gd_time_seconds;
1468 		delta = sys_cputimer->count() - gd->gd_cpuclock_base;
1469 	} while (tsp->tv_sec != gd->gd_time_seconds);
1470 
1471 	if (delta >= sys_cputimer->freq) {
1472 		tsp->tv_sec += delta / sys_cputimer->freq;
1473 		delta %= sys_cputimer->freq;
1474 	}
1475 	tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32;
1476 
1477 	bt = &basetime[basetime_index];
1478 	cpu_lfence();
1479 	tsp->tv_sec += bt->tv_sec;
1480 	tsp->tv_nsec += bt->tv_nsec;
1481 	while (tsp->tv_nsec >= 1000000000) {
1482 		tsp->tv_nsec -= 1000000000;
1483 		++tsp->tv_sec;
1484 	}
1485 }
1486 
1487 /*
1488  * Get an approximate time_t.  It does not have to be accurate.  This
1489  * function is called only from KTR and can be called with the system in
1490  * any state so do not use a critical section or other complex operation
1491  * here.
1492  *
1493  * NOTE: This is not exactly synchronized with real time.  To do that we
1494  *	 would have to do what microtime does and check for a nanoseconds
1495  *	 overflow.
1496  */
1497 time_t
1498 get_approximate_time_t(void)
1499 {
1500 	struct globaldata *gd = mycpu;
1501 	struct timespec *bt;
1502 
1503 	bt = &basetime[basetime_index];
1504 	return(gd->gd_time_seconds + bt->tv_sec);
1505 }
1506 
1507 int
1508 pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps)
1509 {
1510 	pps_params_t *app;
1511 	struct pps_fetch_args *fapi;
1512 #ifdef PPS_SYNC
1513 	struct pps_kcbind_args *kapi;
1514 #endif
1515 
1516 	switch (cmd) {
1517 	case PPS_IOC_CREATE:
1518 		return (0);
1519 	case PPS_IOC_DESTROY:
1520 		return (0);
1521 	case PPS_IOC_SETPARAMS:
1522 		app = (pps_params_t *)data;
1523 		if (app->mode & ~pps->ppscap)
1524 			return (EINVAL);
1525 		pps->ppsparam = *app;
1526 		return (0);
1527 	case PPS_IOC_GETPARAMS:
1528 		app = (pps_params_t *)data;
1529 		*app = pps->ppsparam;
1530 		app->api_version = PPS_API_VERS_1;
1531 		return (0);
1532 	case PPS_IOC_GETCAP:
1533 		*(int*)data = pps->ppscap;
1534 		return (0);
1535 	case PPS_IOC_FETCH:
1536 		fapi = (struct pps_fetch_args *)data;
1537 		if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC)
1538 			return (EINVAL);
1539 		if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec)
1540 			return (EOPNOTSUPP);
1541 		pps->ppsinfo.current_mode = pps->ppsparam.mode;
1542 		fapi->pps_info_buf = pps->ppsinfo;
1543 		return (0);
1544 	case PPS_IOC_KCBIND:
1545 #ifdef PPS_SYNC
1546 		kapi = (struct pps_kcbind_args *)data;
1547 		/* XXX Only root should be able to do this */
1548 		if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC)
1549 			return (EINVAL);
1550 		if (kapi->kernel_consumer != PPS_KC_HARDPPS)
1551 			return (EINVAL);
1552 		if (kapi->edge & ~pps->ppscap)
1553 			return (EINVAL);
1554 		pps->kcmode = kapi->edge;
1555 		return (0);
1556 #else
1557 		return (EOPNOTSUPP);
1558 #endif
1559 	default:
1560 		return (ENOTTY);
1561 	}
1562 }
1563 
1564 void
1565 pps_init(struct pps_state *pps)
1566 {
1567 	pps->ppscap |= PPS_TSFMT_TSPEC;
1568 	if (pps->ppscap & PPS_CAPTUREASSERT)
1569 		pps->ppscap |= PPS_OFFSETASSERT;
1570 	if (pps->ppscap & PPS_CAPTURECLEAR)
1571 		pps->ppscap |= PPS_OFFSETCLEAR;
1572 }
1573 
1574 void
1575 pps_event(struct pps_state *pps, sysclock_t count, int event)
1576 {
1577 	struct globaldata *gd;
1578 	struct timespec *tsp;
1579 	struct timespec *osp;
1580 	struct timespec *bt;
1581 	struct timespec ts;
1582 	sysclock_t *pcount;
1583 #ifdef PPS_SYNC
1584 	sysclock_t tcount;
1585 #endif
1586 	sysclock_t delta;
1587 	pps_seq_t *pseq;
1588 	int foff;
1589 #ifdef PPS_SYNC
1590 	int fhard;
1591 #endif
1592 	int ni;
1593 
1594 	gd = mycpu;
1595 
1596 	/* Things would be easier with arrays... */
1597 	if (event == PPS_CAPTUREASSERT) {
1598 		tsp = &pps->ppsinfo.assert_timestamp;
1599 		osp = &pps->ppsparam.assert_offset;
1600 		foff = pps->ppsparam.mode & PPS_OFFSETASSERT;
1601 #ifdef PPS_SYNC
1602 		fhard = pps->kcmode & PPS_CAPTUREASSERT;
1603 #endif
1604 		pcount = &pps->ppscount[0];
1605 		pseq = &pps->ppsinfo.assert_sequence;
1606 	} else {
1607 		tsp = &pps->ppsinfo.clear_timestamp;
1608 		osp = &pps->ppsparam.clear_offset;
1609 		foff = pps->ppsparam.mode & PPS_OFFSETCLEAR;
1610 #ifdef PPS_SYNC
1611 		fhard = pps->kcmode & PPS_CAPTURECLEAR;
1612 #endif
1613 		pcount = &pps->ppscount[1];
1614 		pseq = &pps->ppsinfo.clear_sequence;
1615 	}
1616 
1617 	/* Nothing really happened */
1618 	if (*pcount == count)
1619 		return;
1620 
1621 	*pcount = count;
1622 
1623 	do {
1624 		ts.tv_sec = gd->gd_time_seconds;
1625 		delta = count - gd->gd_cpuclock_base;
1626 	} while (ts.tv_sec != gd->gd_time_seconds);
1627 
1628 	if (delta >= sys_cputimer->freq) {
1629 		ts.tv_sec += delta / sys_cputimer->freq;
1630 		delta %= sys_cputimer->freq;
1631 	}
1632 	ts.tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32;
1633 	ni = basetime_index;
1634 	cpu_lfence();
1635 	bt = &basetime[ni];
1636 	ts.tv_sec += bt->tv_sec;
1637 	ts.tv_nsec += bt->tv_nsec;
1638 	while (ts.tv_nsec >= 1000000000) {
1639 		ts.tv_nsec -= 1000000000;
1640 		++ts.tv_sec;
1641 	}
1642 
1643 	(*pseq)++;
1644 	*tsp = ts;
1645 
1646 	if (foff) {
1647 		timespecadd(tsp, osp);
1648 		if (tsp->tv_nsec < 0) {
1649 			tsp->tv_nsec += 1000000000;
1650 			tsp->tv_sec -= 1;
1651 		}
1652 	}
1653 #ifdef PPS_SYNC
1654 	if (fhard) {
1655 		/* magic, at its best... */
1656 		tcount = count - pps->ppscount[2];
1657 		pps->ppscount[2] = count;
1658 		if (tcount >= sys_cputimer->freq) {
1659 			delta = (1000000000 * (tcount / sys_cputimer->freq) +
1660 				 sys_cputimer->freq64_nsec *
1661 				 (tcount % sys_cputimer->freq)) >> 32;
1662 		} else {
1663 			delta = (sys_cputimer->freq64_nsec * tcount) >> 32;
1664 		}
1665 		hardpps(tsp, delta);
1666 	}
1667 #endif
1668 }
1669 
1670 /*
1671  * Return the tsc target value for a delay of (ns).
1672  *
1673  * Returns -1 if the TSC is not supported.
1674  */
1675 int64_t
1676 tsc_get_target(int ns)
1677 {
1678 #if defined(_RDTSC_SUPPORTED_)
1679 	if (cpu_feature & CPUID_TSC) {
1680 		return (rdtsc() + tsc_frequency * ns / (int64_t)1000000000);
1681 	}
1682 #endif
1683 	return(-1);
1684 }
1685 
1686 /*
1687  * Compare the tsc against the passed target
1688  *
1689  * Returns +1 if the target has been reached
1690  * Returns  0 if the target has not yet been reached
1691  * Returns -1 if the TSC is not supported.
1692  *
1693  * Typical use:		while (tsc_test_target(target) == 0) { ...poll... }
1694  */
1695 int
1696 tsc_test_target(int64_t target)
1697 {
1698 #if defined(_RDTSC_SUPPORTED_)
1699 	if (cpu_feature & CPUID_TSC) {
1700 		if ((int64_t)(target - rdtsc()) <= 0)
1701 			return(1);
1702 		return(0);
1703 	}
1704 #endif
1705 	return(-1);
1706 }
1707 
1708 /*
1709  * Delay the specified number of nanoseconds using the tsc.  This function
1710  * returns immediately if the TSC is not supported.  At least one cpu_pause()
1711  * will be issued.
1712  */
1713 void
1714 tsc_delay(int ns)
1715 {
1716 	int64_t clk;
1717 
1718 	clk = tsc_get_target(ns);
1719 	cpu_pause();
1720 	while (tsc_test_target(clk) == 0)
1721 		cpu_pause();
1722 }
1723