1 /*	$NetBSD: ntp_loopfilter.c,v 1.2 2009/12/14 00:41:03 christos Exp $	*/
2 
3 /*
4  * ntp_loopfilter.c - implements the NTP loop filter algorithm
5  *
6  * ATTENTION: Get approval from Dave Mills on all changes to this file!
7  *
8  */
9 #ifdef HAVE_CONFIG_H
10 # include <config.h>
11 #endif
12 
13 #include "ntpd.h"
14 #include "ntp_io.h"
15 #include "ntp_unixtime.h"
16 #include "ntp_stdlib.h"
17 
18 #include <stdio.h>
19 #include <ctype.h>
20 
21 #include <signal.h>
22 #include <setjmp.h>
23 #ifdef __NetBSD__
24 #include <util.h>
25 #endif
26 
27 #if defined(VMS) && defined(VMS_LOCALUNIT)	/*wjm*/
28 #include "ntp_refclock.h"
29 #endif /* VMS */
30 
31 #ifdef KERNEL_PLL
32 #include "ntp_syscall.h"
33 #endif /* KERNEL_PLL */
34 
35 /*
36  * This is an implementation of the clock discipline algorithm described
37  * in UDel TR 97-4-3, as amended. It operates as an adaptive parameter,
38  * hybrid phase/frequency-lock loop. A number of sanity checks are
39  * included to protect against timewarps, timespikes and general mayhem.
40  * All units are in s and s/s, unless noted otherwise.
41  */
42 #define CLOCK_MAX	.128	/* default step threshold (s) */
43 #define CLOCK_MINSTEP	900.	/* default stepout threshold (s) */
44 #define CLOCK_PANIC	1000.	/* default panic threshold (s) */
45 #define	CLOCK_PHI	15e-6	/* max frequency error (s/s) */
46 #define CLOCK_PLL	16.	/* PLL loop gain (log2) */
47 #define CLOCK_AVG	8.	/* parameter averaging constant */
48 #define CLOCK_FLL	.25	/* FLL loop gain */
49 #define	CLOCK_ALLAN	11	/* Allan intercept (log2 s) */
50 #define CLOCK_DAY	86400.	/* one day in seconds (s) */
51 #define CLOCK_JUNE	(CLOCK_DAY * 30) /* June in seconds (s) */
52 #define CLOCK_LIMIT	30	/* poll-adjust threshold */
53 #define CLOCK_PGATE	4.	/* poll-adjust gate */
54 #define PPS_MAXAGE	120	/* kernel pps signal timeout (s) */
55 #define	FREQTOD(x)	((x) / 65536e6) /* NTP to double */
56 #define	DTOFREQ(x)	((int32)((x) * 65536e6)) /* double to NTP */
57 
58 /*
59  * Clock discipline state machine. This is used to control the
60  * synchronization behavior during initialization and following a
61  * timewarp.
62  *
63  *	State	< step		> step		Comments
64  *	========================================================
65  *	NSET	FREQ		step, FREQ	freq not set
66  *
67  *	FSET	SYNC		step, SYNC	freq set
68  *
69  *	FREQ	if (mu < 900)	if (mu < 900)	set freq direct
70  *		    ignore	    ignore
71  *		else		else
72  *		    freq, SYNC	    freq, step, SYNC
73  *
74  *	SYNC	SYNC		SPIK, ignore	adjust phase/freq
75  *
76  *	SPIK	SYNC		if (mu < 900)	adjust phase/freq
77  *				    ignore
78  *				step, SYNC
79  */
80 /*
81  * Kernel PLL/PPS state machine. This is used with the kernel PLL
82  * modifications described in the documentation.
83  *
84  * If kernel support for the ntp_adjtime() system call is available, the
85  * ntp_control flag is set. The ntp_enable and kern_enable flags can be
86  * set at configuration time or run time using ntpdc. If ntp_enable is
87  * false, the discipline loop is unlocked and no corrections of any kind
88  * are made. If both ntp_control and kern_enable are set, the kernel
89  * support is used as described above; if false, the kernel is bypassed
90  * entirely and the daemon discipline used instead.
91  *
92  * There have been three versions of the kernel discipline code. The
93  * first (microkernel) now in Solaris discipilnes the microseconds. The
94  * second and third (nanokernel) disciplines the clock in nanoseconds.
95  * These versions are identifed if the symbol STA_PLL is present in the
96  * header file /usr/include/sys/timex.h. The third and current version
97  * includes TAI offset and is identified by the symbol NTP_API with
98  * value 4.
99  *
100  * Each PPS time/frequency discipline can be enabled by the atom driver
101  * or another driver. If enabled, the STA_PPSTIME and STA_FREQ bits are
102  * set in the kernel status word; otherwise, these bits are cleared.
103  * These bits are also cleard if the kernel reports an error.
104  *
105  * If an external clock is present, the clock driver sets STA_CLK in the
106  * status word. When the local clock driver sees this bit, it updates
107  * via this routine, which then calls ntp_adjtime() with the STA_PLL bit
108  * set to zero, in which case the system clock is not adjusted. This is
109  * also a signal for the external clock driver to discipline the system
110  * clock. Unless specified otherwise, all times are in seconds.
111  */
112 /*
113  * Program variables that can be tinkered.
114  */
115 double	clock_max = CLOCK_MAX;	/* step threshold */
116 double	clock_minstep = CLOCK_MINSTEP; /* stepout threshold */
117 double	clock_panic = CLOCK_PANIC; /* panic threshold */
118 double	clock_phi = CLOCK_PHI;	/* dispersion rate (s/s) */
119 u_char	allan_xpt = CLOCK_ALLAN; /* Allan intercept (log2 s) */
120 
121 /*
122  * Program variables
123  */
124 static double clock_offset;	/* offset */
125 double	clock_jitter;		/* offset jitter */
126 double	drift_comp;		/* frequency (s/s) */
127 double	clock_stability;	/* frequency stability (wander) (s/s) */
128 double	clock_codec;		/* audio codec frequency (samples/s) */
129 static u_long clock_epoch;	/* last update */
130 u_int	sys_tai;		/* TAI offset from UTC */
131 static void rstclock (int, double); /* transition function */
132 static double direct_freq(double); /* direct set frequency */
133 static void set_freq(double);	/* set frequency */
134 
135 #ifdef KERNEL_PLL
136 static struct timex ntv;	/* ntp_adjtime() parameters */
137 int	pll_status;		/* last kernel status bits */
138 #if defined(STA_NANO) && NTP_API == 4
139 static u_int loop_tai;		/* last TAI offset */
140 #endif /* STA_NANO */
141 #endif /* KERNEL_PLL */
142 
143 /*
144  * Clock state machine control flags
145  */
146 int	ntp_enable = 1;		/* clock discipline enabled */
147 int	pll_control;		/* kernel support available */
148 int	kern_enable = 1;	/* kernel support enabled */
149 int	pps_enable;		/* kernel PPS discipline enabled */
150 int	ext_enable;		/* external clock enabled */
151 int	pps_stratum;		/* pps stratum */
152 int	allow_panic = FALSE;	/* allow panic correction */
153 int	mode_ntpdate = FALSE;	/* exit on first clock set */
154 
155 /*
156  * Clock state machine variables
157  */
158 int	state;			/* clock discipline state */
159 u_char	sys_poll;		/* time constant/poll (log2 s) */
160 int	tc_counter;		/* jiggle counter */
161 double	last_offset;		/* last offset (s) */
162 static u_long last_step;	/* last clock step */
163 
164 /*
165  * Huff-n'-puff filter variables
166  */
167 static double *sys_huffpuff;	/* huff-n'-puff filter */
168 static int sys_hufflen;		/* huff-n'-puff filter stages */
169 static int sys_huffptr;		/* huff-n'-puff filter pointer */
170 static double sys_mindly;	/* huff-n'-puff filter min delay */
171 
172 #if defined(KERNEL_PLL)
173 /* Emacs cc-mode goes nuts if we split the next line... */
174 #define MOD_BITS (MOD_OFFSET | MOD_MAXERROR | MOD_ESTERROR | \
175     MOD_STATUS | MOD_TIMECONST)
176 #ifdef SIGSYS
177 static void pll_trap (int);	/* configuration trap */
178 static struct sigaction sigsys;	/* current sigaction status */
179 static struct sigaction newsigsys; /* new sigaction status */
180 static sigjmp_buf env;		/* environment var. for pll_trap() */
181 #endif /* SIGSYS */
182 #endif /* KERNEL_PLL */
183 
184 static void
185 sync_status(const char *what, int status)
186 {
187 	char buf[256], tbuf[1024];
188 #ifdef STA_FMT
189 	snprintb(buf, sizeof(buf), STA_FMT, status);
190 #else
191 	snprintf(buf, sizeof(buf), "%04x", status);
192 #endif
193 	snprintf(tbuf, sizeof(tbuf), "%s status=%s", what, buf);
194 	report_event(EVNT_KERN, NULL, tbuf);
195 }
196 
197 /*
198  * init_loopfilter - initialize loop filter data
199  */
200 void
201 init_loopfilter(void)
202 {
203 	/*
204 	 * Initialize state variables.
205 	 */
206 	sys_poll = ntp_minpoll;
207 	clock_jitter = LOGTOD(sys_precision);
208 }
209 
210 /*
211  * local_clock - the NTP logical clock loop filter.
212  *
213  * Return codes:
214  * -1	update ignored: exceeds panic threshold
215  * 0	update ignored: popcorn or exceeds step threshold
216  * 1	clock was slewed
217  * 2	clock was stepped
218  *
219  * LOCKCLOCK: The only thing this routine does is set the
220  * sys_rootdisp variable equal to the peer dispersion.
221  */
222 int
223 local_clock(
224 	struct	peer *peer,	/* synch source peer structure */
225 	double	fp_offset	/* clock offset (s) */
226 	)
227 {
228 	int	rval;		/* return code */
229 	int	osys_poll;	/* old system poll */
230 	double	mu;		/* interval since last update */
231 	double	clock_frequency; /* clock frequency */
232 	double	dtemp, etemp;	/* double temps */
233 	char	tbuf[80];	/* report buffer */
234 
235 	/*
236 	 * If the loop is opened or the NIST LOCKCLOCK is in use,
237 	 * monitor and record the offsets anyway in order to determine
238 	 * the open-loop response and then go home.
239 	 */
240 #ifdef LOCKCLOCK
241 	return (0);
242 
243 #else /* LOCKCLOCK */
244 	if (!ntp_enable) {
245 		record_loop_stats(fp_offset, drift_comp, clock_jitter,
246 		    clock_stability, sys_poll);
247 		return (0);
248 	}
249 
250 	/*
251 	 * If the clock is way off, panic is declared. The clock_panic
252 	 * defaults to 1000 s; if set to zero, the panic will never
253 	 * occur. The allow_panic defaults to FALSE, so the first panic
254 	 * will exit. It can be set TRUE by a command line option, in
255 	 * which case the clock will be set anyway and time marches on.
256 	 * But, allow_panic will be set FALSE when the update is less
257 	 * than the step threshold; so, subsequent panics will exit.
258 	 */
259 	if (fabs(fp_offset) > clock_panic && clock_panic > 0 &&
260 	    !allow_panic) {
261 		snprintf(tbuf, sizeof(tbuf),
262 		    "%+.0f s; set clock manually within %.0f s.",
263 		    fp_offset, clock_panic);
264 		report_event(EVNT_SYSFAULT, NULL, tbuf);
265 		return (-1);
266 	}
267 
268 	/*
269 	 * This section simulates ntpdate. If the offset exceeds the
270 	 * step threshold (128 ms), step the clock to that time and
271 	 * exit. Othewise, slew the clock to that time and exit. Note
272 	 * that the slew will persist and eventually complete beyond the
273 	 * life of this program. Note that while ntpdate is active, the
274 	 * terminal does not detach, so the termination message prints
275 	 * directly to the terminal.
276 	 */
277 	if (mode_ntpdate) {
278 		if (fabs(fp_offset) > clock_max && clock_max > 0) {
279 			step_systime(fp_offset);
280 			msyslog(LOG_NOTICE, "ntpd: time set %+.6f s",
281 	   		    fp_offset);
282 			printf("ntpd: time set %+.6fs\n", fp_offset);
283 		} else {
284 			adj_systime(fp_offset);
285 			msyslog(LOG_NOTICE, "ntpd: time slew %+.6f s",
286 			    fp_offset);
287 			printf("ntpd: time slew %+.6fs\n", fp_offset);
288 		}
289 		record_loop_stats(fp_offset, drift_comp, clock_jitter,
290 		    clock_stability, sys_poll);
291 		exit (0);
292 	}
293 
294 	/*
295 	 * The huff-n'-puff filter finds the lowest delay in the recent
296 	 * interval. This is used to correct the offset by one-half the
297 	 * difference between the sample delay and minimum delay. This
298 	 * is most effective if the delays are highly assymetric and
299 	 * clockhopping is avoided and the clock frequency wander is
300 	 * relatively small.
301 	 */
302 	if (sys_huffpuff != NULL) {
303 		if (peer->delay < sys_huffpuff[sys_huffptr])
304 			sys_huffpuff[sys_huffptr] = peer->delay;
305 		if (peer->delay < sys_mindly)
306 			sys_mindly = peer->delay;
307 		if (fp_offset > 0)
308 			dtemp = -(peer->delay - sys_mindly) / 2;
309 		else
310 			dtemp = (peer->delay - sys_mindly) / 2;
311 		fp_offset += dtemp;
312 #ifdef DEBUG
313 		if (debug)
314 			printf(
315 		    "local_clock: size %d mindly %.6f huffpuff %.6f\n",
316 			    sys_hufflen, sys_mindly, dtemp);
317 #endif
318 	}
319 
320 	/*
321 	 * Clock state machine transition function which defines how the
322 	 * system reacts to large phase and frequency excursion. There
323 	 * are two main regimes: when the offset exceeds the step
324 	 * threshold (128 ms) and when it does not. Under certain
325 	 * conditions updates are suspended until the stepout theshold
326 	 * (900 s) is exceeded. See the documentation on how these
327 	 * thresholds interact with commands and command line options.
328 	 *
329 	 * Note the kernel is disabled if step is disabled or greater
330 	 * than 0.5 s or in ntpdate mode.
331 	 */
332 	osys_poll = sys_poll;
333 	if (sys_poll < peer->minpoll)
334 		sys_poll = peer->minpoll;
335 	if (sys_poll > peer->maxpoll)
336 		sys_poll = peer->maxpoll;
337 	mu = current_time - clock_epoch;
338 	clock_frequency = drift_comp;
339 	rval = 1;
340 	if (fabs(fp_offset) > clock_max && clock_max > 0) {
341 		switch (state) {
342 
343 		/*
344 		 * In SYNC state we ignore the first outlyer amd switch
345 		 * to SPIK state.
346 		 */
347 		case EVNT_SYNC:
348 			snprintf(tbuf, sizeof(tbuf), "%+.6f s",
349 			    fp_offset);
350 			report_event(EVNT_SPIK, NULL, tbuf);
351 			state = EVNT_SPIK;
352 			return (0);
353 
354 		/*
355 		 * In FREQ state we ignore outlyers and inlyers. At the
356 		 * first outlyer after the stepout threshold, compute
357 		 * the apparent frequency correction and step the phase.
358 		 */
359 		case EVNT_FREQ:
360 			if (mu < clock_minstep)
361 				return (0);
362 
363 			clock_frequency = direct_freq(fp_offset);
364 
365 			/* fall through to S_SPIK */
366 
367 		/*
368 		 * In SPIK state we ignore succeeding outlyers until
369 		 * either an inlyer is found or the stepout threshold is
370 		 * exceeded.
371 		 */
372 		case EVNT_SPIK:
373 			if (mu < clock_minstep)
374 				return (0);
375 
376 			/* fall through to default */
377 
378 		/*
379 		 * We get here by default in NSET and FSET states and
380 		 * from above in FREQ or SPIK states.
381 		 *
382 		 * In NSET state an initial frequency correction is not
383 		 * available, usually because the frequency file has not
384 		 * yet been written. Since the time is outside the step
385 		 * threshold, the clock is stepped. The frequency will
386 		 * be set directly following the stepout interval.
387 		 *
388 		 * In FSET state the initial frequency has been set from
389 		 * the frequency file. Since the time is outside the
390 		 * step threshold, the clock is stepped immediately,
391 		 * rather than after the stepout interval. Guys get
392 		 * nervous if it takes 15 minutes to set the clock for
393 		 * the first time.
394 		 *
395 		 * In FREQ and SPIK states the stepout threshold has
396 		 * expired and the phase is still above the step
397 		 * threshold. Note that a single spike greater than the
398 		 * step threshold is always suppressed, even with a
399 		 * long time constant.
400 		 */
401 		default:
402 			snprintf(tbuf, sizeof(tbuf), "%+.6f s",
403 			    fp_offset);
404 			report_event(EVNT_CLOCKRESET, NULL, tbuf);
405 			step_systime(fp_offset);
406 			reinit_timer();
407 			tc_counter = 0;
408 			clock_jitter = LOGTOD(sys_precision);
409 			rval = 2;
410 			if (state == EVNT_NSET || (current_time -
411 			    last_step) < clock_minstep * 2) {
412 				rstclock(EVNT_FREQ, 0);
413 				return (rval);
414 			}
415 			last_step = current_time;
416 			break;
417 		}
418 		rstclock(EVNT_SYNC, 0);
419 	} else {
420 
421 		/*
422 		 * The offset is less than the step threshold. Calculate
423 		 * the jitter as the exponentially weighted offset
424 		 * differences.
425  	      	 */
426 		etemp = SQUARE(clock_jitter);
427 		dtemp = SQUARE(max(fabs(fp_offset - last_offset),
428 		    LOGTOD(sys_precision)));
429 		clock_jitter = SQRT(etemp + (dtemp - etemp) /
430 		    CLOCK_AVG);
431 		switch (state) {
432 
433 		/*
434 		 * In NSET state this is the first update received and
435 		 * the frequency has not been initialized. Adjust the
436 		 * phase, but do not adjust the frequency until after
437 		 * the stepout threshold.
438 		 */
439 		case EVNT_NSET:
440 			rstclock(EVNT_FREQ, fp_offset);
441 			break;
442 
443 		/*
444 		 * In FSET state this is the first update received and
445 		 * the frequency has been initialized. Adjust the phase,
446 		 * but do not adjust the frequency until the next
447 		 * update.
448 		 */
449 		case EVNT_FSET:
450 			rstclock(EVNT_SYNC, fp_offset);
451 			break;
452 
453 		/*
454 		 * In FREQ state ignore updates until the stepout
455 		 * threshold. After that, compute the new frequency, but
456 		 * do not adjust the phase or frequency until the next
457 		 * update.
458 		 */
459 		case EVNT_FREQ:
460 			if (mu < clock_minstep)
461 				return (0);
462 
463 			clock_frequency = direct_freq(fp_offset);
464 			rstclock(EVNT_SYNC, 0);
465 			break;
466 
467 
468 		/*
469 		 * We get here by default in SYNC and SPIK states. Here
470 		 * we compute the frequency update due to PLL and FLL
471 		 * contributions.
472 		 */
473 		default:
474 			allow_panic = FALSE;
475 
476 			/*
477 			 * The FLL and PLL frequency gain constants
478 			 * depend on the time constant and Allan
479 			 * intercept. The PLL is always used, but
480 			 * becomes ineffective above the Allan intercept
481 			 * where the FLL becomes effective.
482 			 */
483 			if (sys_poll >= allan_xpt)
484 				clock_frequency += (fp_offset -
485 				    clock_offset) /
486 				    max(ULOGTOD(sys_poll), mu) *
487 				    CLOCK_FLL;
488 
489 			/*
490 			 * The PLL frequency gain (numerator) depends on
491 			 * the minimum of the update interval and Allan
492 			 * intercept. This reduces the PLL gain when the
493 			 * FLL becomes effective.
494 			 */
495 			etemp = min(ULOGTOD(allan_xpt), mu);
496 			dtemp = 4 * CLOCK_PLL * ULOGTOD(sys_poll);
497 			clock_frequency += fp_offset * etemp / (dtemp *
498 			    dtemp);
499 			rstclock(EVNT_SYNC, fp_offset);
500 			break;
501 		}
502 	}
503 
504 #ifdef KERNEL_PLL
505 	/*
506 	 * This code segment works when clock adjustments are made using
507 	 * precision time kernel support and the ntp_adjtime() system
508 	 * call. This support is available in Solaris 2.6 and later,
509 	 * Digital Unix 4.0 and later, FreeBSD, Linux and specially
510 	 * modified kernels for HP-UX 9 and Ultrix 4. In the case of the
511 	 * DECstation 5000/240 and Alpha AXP, additional kernel
512 	 * modifications provide a true microsecond clock and nanosecond
513 	 * clock, respectively.
514 	 *
515 	 * Important note: The kernel discipline is used only if the
516 	 * step threshold is less than 0.5 s, as anything higher can
517 	 * lead to overflow problems. This might occur if some misguided
518 	 * lad set the step threshold to something ridiculous.
519 	 */
520 	if (pll_control && kern_enable) {
521 
522 		/*
523 		 * We initialize the structure for the ntp_adjtime()
524 		 * system call. We have to convert everything to
525 		 * microseconds or nanoseconds first. Do not update the
526 		 * system variables if the ext_enable flag is set. In
527 		 * this case, the external clock driver will update the
528 		 * variables, which will be read later by the local
529 		 * clock driver. Afterwards, remember the time and
530 		 * frequency offsets for jitter and stability values and
531 		 * to update the frequency file.
532 		 */
533 		memset(&ntv,  0, sizeof(ntv));
534 		if (ext_enable) {
535 			ntv.modes = MOD_STATUS;
536 		} else {
537 #ifdef STA_NANO
538 			ntv.modes = MOD_BITS | MOD_NANO;
539 #else /* STA_NANO */
540 			ntv.modes = MOD_BITS;
541 #endif /* STA_NANO */
542 			if (clock_offset < 0)
543 				dtemp = -.5;
544 			else
545 				dtemp = .5;
546 #ifdef STA_NANO
547 			ntv.offset = (int32)(clock_offset * 1e9 +
548 			    dtemp);
549 			ntv.constant = sys_poll;
550 #else /* STA_NANO */
551 			ntv.offset = (int32)(clock_offset * 1e6 +
552 			    dtemp);
553 			ntv.constant = sys_poll - 4;
554 #endif /* STA_NANO */
555 			ntv.esterror = (u_int32)(clock_jitter * 1e6);
556 			ntv.maxerror = (u_int32)((sys_rootdelay / 2 +
557 			    sys_rootdisp) * 1e6);
558 			ntv.status = STA_PLL;
559 
560 			/*
561 			 * Enable/disable the PPS if requested.
562 			 */
563 			if (pps_enable) {
564 				ntv.status |= STA_PPSTIME | STA_PPSFREQ;
565 				if (!(pll_status & STA_PPSTIME))
566 					sync_status("PPS enbled", ntv.status);
567 			} else {
568 				ntv.status &= ~(STA_PPSTIME | STA_PPSFREQ);
569 				if (pll_status & STA_PPSTIME)
570 					sync_status("PPS disabled", ntv.status);
571 			}
572 			if (sys_leap == LEAP_ADDSECOND)
573 				ntv.status |= STA_INS;
574 			else if (sys_leap == LEAP_DELSECOND)
575 				ntv.status |= STA_DEL;
576 		}
577 
578 		/*
579 		 * Pass the stuff to the kernel. If it squeals, turn off
580 		 * the pps. In any case, fetch the kernel offset,
581 		 * frequency and jitter.
582 		 */
583 		if (ntp_adjtime(&ntv) == TIME_ERROR) {
584 			if (!(ntv.status & STA_PPSSIGNAL))
585 				sync_status("PPS no signal", ntv.status);
586 			else
587 				sync_status("adjtime error", ntv.status);
588 		} else {
589  			if ((ntv.status ^ pll_status) & ~STA_FLL)
590 				sync_status("status change", ntv.status);
591 		}
592 		pll_status = ntv.status;
593 #ifdef STA_NANO
594 		clock_offset = ntv.offset / 1e9;
595 #else /* STA_NANO */
596 		clock_offset = ntv.offset / 1e6;
597 #endif /* STA_NANO */
598 		clock_frequency = FREQTOD(ntv.freq);
599 
600 		/*
601 		 * If the kernel PPS is lit, monitor its performance.
602 		 */
603 		if (ntv.status & STA_PPSTIME) {
604 #ifdef STA_NANO
605 			clock_jitter = ntv.jitter / 1e9;
606 #else /* STA_NANO */
607 			clock_jitter = ntv.jitter / 1e6;
608 #endif /* STA_NANO */
609 		}
610 
611 #if defined(STA_NANO) && NTP_API == 4
612 		/*
613 		 * If the TAI changes, update the kernel TAI.
614 		 */
615 		if (loop_tai != sys_tai) {
616 			loop_tai = sys_tai;
617 			ntv.modes = MOD_TAI;
618 			ntv.constant = sys_tai;
619 			ntp_adjtime(&ntv);
620 		}
621 #endif /* STA_NANO */
622 	}
623 #endif /* KERNEL_PLL */
624 
625 	/*
626 	 * Clamp the frequency within the tolerance range and calculate
627 	 * the frequency difference since the last update.
628 	 */
629 	if (fabs(clock_frequency) > NTP_MAXFREQ)
630 		msyslog(LOG_NOTICE,
631 		    "frequency error %.0f PPM exceeds tolerance %.0f PPM",
632 		    clock_frequency * 1e6, NTP_MAXFREQ * 1e6);
633 	dtemp = SQUARE(clock_frequency - drift_comp);
634 	if (clock_frequency > NTP_MAXFREQ)
635 		drift_comp = NTP_MAXFREQ;
636 	else if (clock_frequency < -NTP_MAXFREQ)
637 		drift_comp = -NTP_MAXFREQ;
638 	else
639 		drift_comp = clock_frequency;
640 
641 	/*
642 	 * Calculate the wander as the exponentially weighted RMS
643 	 * frequency differences. Record the change for the frequency
644 	 * file update.
645 	 */
646 	etemp = SQUARE(clock_stability);
647 	clock_stability = SQRT(etemp + (dtemp - etemp) / CLOCK_AVG);
648 	drift_file_sw = TRUE;
649 
650 	/*
651 	 * Here we adjust the timeconstan by comparing the current
652 	 * offset with the clock jitter. If the offset is less than the
653 	 * clock jitter times a constant, then the averaging interval is
654 	 * increased, otherwise it is decreased. A bit of hysteresis
655 	 * helps calm the dance. Works best using burst mode.
656 	 */
657 	if (fabs(clock_offset) < CLOCK_PGATE * clock_jitter) {
658 		tc_counter += sys_poll;
659 		if (tc_counter > CLOCK_LIMIT) {
660 			tc_counter = CLOCK_LIMIT;
661 			if (sys_poll < peer->maxpoll) {
662 				tc_counter = 0;
663 				sys_poll++;
664 			}
665 		}
666 	} else {
667 		tc_counter -= sys_poll << 1;
668 		if (tc_counter < -CLOCK_LIMIT) {
669 			tc_counter = -CLOCK_LIMIT;
670 			if (sys_poll > peer->minpoll) {
671 				tc_counter = 0;
672 				sys_poll--;
673 			}
674 		}
675 	}
676 
677 	/*
678 	 * If the time constant has changed, update the poll variables.
679 	 */
680 	if (osys_poll != sys_poll)
681 		poll_update(peer, sys_poll);
682 
683 	/*
684 	 * Yibbidy, yibbbidy, yibbidy; that'h all folks.
685 	 */
686 	record_loop_stats(clock_offset, drift_comp, clock_jitter,
687 	    clock_stability, sys_poll);
688 #ifdef DEBUG
689 	if (debug)
690 		printf(
691 		    "local_clock: offset %.9f jit %.9f freq %.3f stab %.3f poll %d\n",
692 		    clock_offset, clock_jitter, drift_comp * 1e6,
693 		    clock_stability * 1e6, sys_poll);
694 #endif /* DEBUG */
695 	return (rval);
696 #endif /* LOCKCLOCK */
697 }
698 
699 
700 /*
701  * adj_host_clock - Called once every second to update the local clock.
702  *
703  * LOCKCLOCK: The only thing this routine does is increment the
704  * sys_rootdisp variable.
705  */
706 void
707 adj_host_clock(
708 	void
709 	)
710 {
711 	double	adjustment;
712 
713 	/*
714 	 * Update the dispersion since the last update. In contrast to
715 	 * NTPv3, NTPv4 does not declare unsynchronized after one day,
716 	 * since the dispersion check serves this function. Also,
717 	 * since the poll interval can exceed one day, the old test
718 	 * would be counterproductive.
719 	 */
720 	sys_rootdisp += clock_phi;
721 
722 #ifndef LOCKCLOCK
723 	/*
724 	 * If clock discipline is disabled or if the kernel is enabled,
725 	 * get out of Dodge quick.
726 	 */
727 	if (!ntp_enable || mode_ntpdate || (pll_control &&
728 	    kern_enable))
729 		return;
730 
731 	/*
732 	 * Implement the phase and frequency adjustments. The gain
733 	 * factor (denominator) increases with poll interval, so is
734 	 * dominated by the FLL above the Allan intercept.
735  	 */
736 	adjustment = clock_offset / (CLOCK_PLL * ULOGTOD(sys_poll));
737 	clock_offset -= adjustment;
738 	adj_systime(adjustment + drift_comp);
739 #endif /* LOCKCLOCK */
740 }
741 
742 
743 /*
744  * Clock state machine. Enter new state and set state variables.
745  */
746 static void
747 rstclock(
748 	int	trans,		/* new state */
749 	double	offset		/* new offset */
750 	)
751 {
752 #ifdef DEBUG
753 	if (debug > 1)
754 		printf("local_clock: mu %lu state %d poll %d count %d\n",
755 		    current_time - clock_epoch, trans, sys_poll,
756 		    tc_counter);
757 #endif
758 	if (trans != state && trans != EVNT_FSET)
759 		report_event(trans, NULL, NULL);
760 	state = trans;
761 	last_offset = clock_offset = offset;
762 	clock_epoch = current_time;
763 }
764 
765 /*
766  * calc_freq - calculate frequency directly
767  *
768  * This is very carefully done. When the offset is first computed at the
769  * first update, a residual frequency component results. Subsequently,
770  * updates are suppresed until the end of the measurement interval while
771  * the offset is amortized. At the end of the interval the frequency is
772  * calculated from the current offset, residual offset, length of the
773  * interval and residual frequency component. At the same time the
774  * frequenchy file is armed for update at the next hourly stats.
775  */
776 static double
777 direct_freq(
778 	double	fp_offset
779 	)
780 {
781 
782 #ifdef KERNEL_PLL
783 	/*
784 	 * If the kernel is enabled, we need the residual offset to
785 	 * calculate the frequency correction.
786 	 */
787 	if (pll_control && kern_enable) {
788 		memset(&ntv,  0, sizeof(ntv));
789 		ntp_adjtime(&ntv);
790 #ifdef STA_NANO
791 		clock_offset = ntv.offset / 1e9;
792 #else /* STA_NANO */
793 		clock_offset = ntv.offset / 1e6;
794 #endif /* STA_NANO */
795 		drift_comp = FREQTOD(ntv.freq);
796 	}
797 #endif /* KERNEL_PLL */
798 	set_freq((fp_offset - clock_offset) / (current_time -
799 	    clock_epoch) + drift_comp);
800 	wander_resid = 0;
801 	return (drift_comp);
802 }
803 
804 
805 /*
806  * set_freq - set clock frequency
807  */
808 static void
809 set_freq(
810 	double	freq		/* frequency update */
811 	)
812 {
813 	char	tbuf[80];
814 
815 	drift_comp = freq;
816 
817 #ifdef KERNEL_PLL
818 	/*
819 	 * If the kernel is enabled, update the kernel frequency.
820 	 */
821 	if (pll_control && kern_enable) {
822 		memset(&ntv,  0, sizeof(ntv));
823 		ntv.modes = MOD_FREQUENCY;
824 		ntv.freq = DTOFREQ(drift_comp);
825 		ntp_adjtime(&ntv);
826 		snprintf(tbuf, sizeof(tbuf), "kernel %.3f PPM",
827 		    drift_comp * 1e6);
828 		report_event(EVNT_FSET, NULL, tbuf);
829 	} else {
830 		snprintf(tbuf, sizeof(tbuf), "ntpd %.3f PPM",
831 		    drift_comp * 1e6);
832 		report_event(EVNT_FSET, NULL, tbuf);
833 	}
834 #else /* KERNEL_PLL */
835 	snprintf(tbuf, sizeof(tbuf), "ntpd %.3f PPM", drift_comp *
836 	    1e6);
837 	report_event(EVNT_FSET, NULL, tbuf);
838 #endif /* KERNEL_PLL */
839 }
840 
841 /*
842  * huff-n'-puff filter
843  */
844 void
845 huffpuff()
846 {
847 	int i;
848 
849 	if (sys_huffpuff == NULL)
850 		return;
851 
852 	sys_huffptr = (sys_huffptr + 1) % sys_hufflen;
853 	sys_huffpuff[sys_huffptr] = 1e9;
854 	sys_mindly = 1e9;
855 	for (i = 0; i < sys_hufflen; i++) {
856 		if (sys_huffpuff[i] < sys_mindly)
857 			sys_mindly = sys_huffpuff[i];
858 	}
859 }
860 
861 
862 /*
863  * loop_config - configure the loop filter
864  *
865  * LOCKCLOCK: The LOOP_DRIFTINIT and LOOP_DRIFTCOMP cases are no-ops.
866  */
867 void
868 loop_config(
869 	int	item,
870 	double	freq
871 	)
872 {
873 	int i;
874 
875 #ifdef DEBUG
876 	if (debug > 1)
877 		printf("loop_config: item %d freq %f\n", item, freq);
878 #endif
879 	switch (item) {
880 
881 	/*
882 	 * We first assume the kernel supports the ntp_adjtime()
883 	 * syscall. If that syscall works, initialize the kernel time
884 	 * variables. Otherwise, continue leaving no harm behind.
885 	 */
886 	case LOOP_DRIFTINIT:
887 #ifndef LOCKCLOCK
888 #ifdef KERNEL_PLL
889 		if (mode_ntpdate)
890 			break;
891 
892 		pll_control = 1;
893 		memset(&ntv, 0, sizeof(ntv));
894 		ntv.modes = MOD_BITS;
895 		ntv.status = STA_PLL;
896 		ntv.maxerror = MAXDISPERSE;
897 		ntv.esterror = MAXDISPERSE;
898 		ntv.constant = sys_poll;
899 #ifdef SIGSYS
900 		/*
901 		 * Use sigsetjmp() to save state and then call
902 		 * ntp_adjtime(); if it fails, then siglongjmp() is used
903 		 * to return control
904 		 */
905 		newsigsys.sa_handler = pll_trap;
906 		newsigsys.sa_flags = 0;
907 		if (sigaction(SIGSYS, &newsigsys, &sigsys)) {
908 			msyslog(LOG_ERR,
909 			    "sigaction() fails to save SIGSYS trap: %m");
910 			pll_control = 0;
911 		}
912 		if (sigsetjmp(env, 1) == 0)
913 			ntp_adjtime(&ntv);
914 		if ((sigaction(SIGSYS, &sigsys,
915 		    (struct sigaction *)NULL))) {
916 			msyslog(LOG_ERR,
917 			    "sigaction() fails to restore SIGSYS trap: %m");
918 			pll_control = 0;
919 		}
920 #else /* SIGSYS */
921 		ntp_adjtime(&ntv);
922 #endif /* SIGSYS */
923 
924 		/*
925 		 * Save the result status and light up an external clock
926 		 * if available.
927 		 */
928 		pll_status = ntv.status;
929 		if (pll_control) {
930 #ifdef STA_NANO
931 			if (pll_status & STA_CLK)
932 				ext_enable = 1;
933 #endif /* STA_NANO */
934 			sync_status("kernel time sync enabled", ntv.status);
935 		}
936 #endif /* KERNEL_PLL */
937 #endif /* LOCKCLOCK */
938 		break;
939 
940 	/*
941 	 * Initialize the frequency. If the frequency file is missing or
942 	 * broken, set the initial frequency to zero and set the state
943 	 * to NSET. Otherwise, set the initial frequency to the given
944 	 * value and the state to FSET.
945 	 */
946 	case LOOP_DRIFTCOMP:
947 #ifndef LOCKCLOCK
948 		if (freq > NTP_MAXFREQ || freq < -NTP_MAXFREQ) {
949 			set_freq(0);
950 			rstclock(EVNT_NSET, 0);
951 		} else {
952 			set_freq(freq);
953 			rstclock(EVNT_FSET, 0);
954 		}
955 #endif /* LOCKCLOCK */
956 		break;
957 
958 	/*
959 	 * Disable the kernel at shutdown. The microkernel just abandons
960 	 * ship. The nanokernel carefully cleans up so applications can
961 	 * see this. Note the last programmed offset and frequency are
962 	 * left in place.
963 	 */
964 	case LOOP_KERN_CLEAR:
965 #ifndef LOCKCLOCK
966 #ifdef KERNEL_PLL
967 		if (pll_control && kern_enable) {
968 			memset((char *)&ntv, 0, sizeof(ntv));
969 			ntv.modes = MOD_STATUS;
970 			ntv.status = STA_UNSYNC;
971 			ntp_adjtime(&ntv);
972 			sync_status("kernel time sync disabled", ntv.status);
973 		   }
974 #endif /* KERNEL_PLL */
975 #endif /* LOCKCLOCK */
976 		break;
977 
978 	/*
979 	 * Tinker command variables for Ulrich Windl. Very dangerous.
980 	 */
981 	case LOOP_ALLAN:	/* Allan intercept (log2) (allan) */
982 		allan_xpt = (u_char)freq;
983 		break;
984 
985 	case LOOP_CODEC:	/* audio codec frequency (codec) */
986 		clock_codec = freq / 1e6;
987 		break;
988 
989 	case LOOP_PHI:		/* dispersion threshold (dispersion) */
990 		clock_phi = freq / 1e6;
991 		break;
992 
993 	case LOOP_FREQ:		/* initial frequency (freq) */
994 		set_freq(freq / 1e6);
995 		rstclock(EVNT_FSET, 0);
996 		break;
997 
998 	case LOOP_HUFFPUFF:	/* huff-n'-puff length (huffpuff) */
999 		if (freq < HUFFPUFF)
1000 			freq = HUFFPUFF;
1001 		sys_hufflen = (int)(freq / HUFFPUFF);
1002 		sys_huffpuff = (double *)emalloc(sizeof(double) *
1003 		    sys_hufflen);
1004 		for (i = 0; i < sys_hufflen; i++)
1005 			sys_huffpuff[i] = 1e9;
1006 		sys_mindly = 1e9;
1007 		break;
1008 
1009 	case LOOP_PANIC:	/* panic threshold (panic) */
1010 		clock_panic = freq;
1011 		break;
1012 
1013 	case LOOP_MAX:		/* step threshold (step) */
1014 		clock_max = freq;
1015 		if (clock_max == 0 || clock_max > 0.5)
1016 			kern_enable = 0;
1017 		break;
1018 
1019 	case LOOP_MINSTEP:	/* stepout threshold (stepout) */
1020 		clock_minstep = freq;
1021 		break;
1022 
1023 	case LOOP_LEAP:		/* not used */
1024 	default:
1025 		msyslog(LOG_NOTICE,
1026 		    "loop_config: unsupported option %d", item);
1027 	}
1028 }
1029 
1030 
1031 #if defined(KERNEL_PLL) && defined(SIGSYS)
1032 /*
1033  * _trap - trap processor for undefined syscalls
1034  *
1035  * This nugget is called by the kernel when the SYS_ntp_adjtime()
1036  * syscall bombs because the silly thing has not been implemented in
1037  * the kernel. In this case the phase-lock loop is emulated by
1038  * the stock adjtime() syscall and a lot of indelicate abuse.
1039  */
1040 static RETSIGTYPE
1041 pll_trap(
1042 	int arg
1043 	)
1044 {
1045 	pll_control = 0;
1046 	siglongjmp(env, 1);
1047 }
1048 #endif /* KERNEL_PLL && SIGSYS */
1049