xref: /freebsd/contrib/ntp/ntpd/ntp_loopfilter.c (revision ea906c41)
1 /*
2  * ntp_loopfilter.c - implements the NTP loop filter algorithm
3  *
4  * ATTENTION: Get approval from Dave Mills on all changes to this file!
5  *
6  */
7 #ifdef HAVE_CONFIG_H
8 # include <config.h>
9 #endif
10 
11 #include "ntpd.h"
12 #include "ntp_io.h"
13 #include "ntp_unixtime.h"
14 #include "ntp_stdlib.h"
15 
16 #include <stdio.h>
17 #include <ctype.h>
18 
19 #include <signal.h>
20 #include <setjmp.h>
21 
22 #if defined(VMS) && defined(VMS_LOCALUNIT)	/*wjm*/
23 #include "ntp_refclock.h"
24 #endif /* VMS */
25 
26 #ifdef KERNEL_PLL
27 #include "ntp_syscall.h"
28 #endif /* KERNEL_PLL */
29 
30 /*
31  * This is an implementation of the clock discipline algorithm described
32  * in UDel TR 97-4-3, as amended. It operates as an adaptive parameter,
33  * hybrid phase/frequency-lock loop. A number of sanity checks are
34  * included to protect against timewarps, timespikes and general mayhem.
35  * All units are in s and s/s, unless noted otherwise.
36  */
37 #define CLOCK_MAX	.128	/* default step threshold (s) */
38 #define CLOCK_MINSTEP	900.	/* default stepout threshold (s) */
39 #define CLOCK_PANIC	1000.	/* default panic threshold (s) */
40 #define	CLOCK_PHI	15e-6	/* max frequency error (s/s) */
41 #define CLOCK_PLL	16.	/* PLL loop gain (log2) */
42 #define CLOCK_AVG	8.	/* parameter averaging constant */
43 #define CLOCK_FLL	(NTP_MAXPOLL + CLOCK_AVG) /* FLL loop gain */
44 #define	CLOCK_ALLAN	1500.	/* compromise Allan intercept (s) */
45 #define CLOCK_DAY	86400.	/* one day in seconds (s) */
46 #define CLOCK_JUNE	(CLOCK_DAY * 30) /* June in seconds (s) */
47 #define CLOCK_LIMIT	30	/* poll-adjust threshold */
48 #define CLOCK_PGATE	4.	/* poll-adjust gate */
49 #define PPS_MAXAGE	120	/* kernel pps signal timeout (s) */
50 
51 /*
52  * Clock discipline state machine. This is used to control the
53  * synchronization behavior during initialization and following a
54  * timewarp.
55  *
56  *	State	< step		> step		Comments
57  *	====================================================
58  *	NSET	FREQ		step, FREQ	no ntp.drift
59  *
60  *	FSET	SYNC		step, SYNC	ntp.drift
61  *
62  *	FREQ	if (mu < 900)	if (mu < 900)	set freq
63  *		    ignore	    ignore
64  *		else		else
65  *		    freq, SYNC	    freq, step, SYNC
66  *
67  *	SYNC	SYNC		if (mu < 900)	adjust phase/freq
68  *				    ignore
69  *				else
70  *				    SPIK
71  *
72  *	SPIK	SYNC		step, SYNC	set phase
73  */
74 #define S_NSET	0		/* clock never set */
75 #define S_FSET	1		/* frequency set from the drift file */
76 #define S_SPIK	2		/* spike detected */
77 #define S_FREQ	3		/* frequency mode */
78 #define S_SYNC	4		/* clock synchronized */
79 
80 /*
81  * Kernel PLL/PPS state machine. This is used with the kernel PLL
82  * modifications described in the README.kernel file.
83  *
84  * If kernel support for the ntp_adjtime() system call is available, the
85  * ntp_control flag is set. The ntp_enable and kern_enable flags can be
86  * set at configuration time or run time using ntpdc. If ntp_enable is
87  * false, the discipline loop is unlocked and no corrections of any kind
88  * are made. If both ntp_control and kern_enable are set, the kernel
89  * support is used as described above; if false, the kernel is bypassed
90  * entirely and the daemon discipline used instead.
91  *
92  * There have been three versions of the kernel discipline code. The
93  * first (microkernel) now in Solaris discipilnes the microseconds. The
94  * second and third (nanokernel) disciplines the clock in nanoseconds.
95  * These versions are identifed if the symbol STA_PLL is present in the
96  * header file /usr/include/sys/timex.h. The third and current version
97  * includes TAI offset and is identified by the symbol NTP_API with
98  * value 4.
99  *
100  * Each update to a prefer peer sets pps_stratum if it survives the
101  * intersection algorithm and its time is within range. The PPS time
102  * discipline is enabled (STA_PPSTIME bit set in the status word) when
103  * pps_stratum is true and the PPS frequency discipline is enabled. If
104  * the PPS time discipline is enabled and the kernel reports a PPS
105  * signal is present, the pps_control variable is set to the current
106  * time. If the current time is later than pps_control by PPS_MAXAGE
107  * (120 s), this variable is set to zero.
108  *
109  * If an external clock is present, the clock driver sets STA_CLK in the
110  * status word. When the local clock driver sees this bit, it updates
111  * via this routine, which then calls ntp_adjtime() with the STA_PLL bit
112  * set to zero, in which case the system clock is not adjusted. This is
113  * also a signal for the external clock driver to discipline the system
114  * clock.
115  */
116 /*
117  * Program variables that can be tinkered.
118  */
119 double	clock_max = CLOCK_MAX;	/* step threshold (s) */
120 double	clock_minstep = CLOCK_MINSTEP; /* stepout threshold (s) */
121 double	clock_panic = CLOCK_PANIC; /* panic threshold (s) */
122 double	clock_phi = CLOCK_PHI;	/* dispersion rate (s/s) */
123 double	allan_xpt = CLOCK_ALLAN; /* Allan intercept (s) */
124 
125 /*
126  * Program variables
127  */
128 static double clock_offset;	/* offset (s) */
129 double	clock_jitter;		/* offset jitter (s) */
130 double	drift_comp;		/* frequency (s/s) */
131 double	clock_stability;	/* frequency stability (wander) (s/s) */
132 u_long	sys_clocktime;		/* last system clock update */
133 u_long	pps_control;		/* last pps update */
134 u_long	sys_tai;		/* UTC offset from TAI (s) */
135 static void rstclock P((int, u_long, double)); /* transition function */
136 
137 #ifdef KERNEL_PLL
138 struct timex ntv;		/* kernel API parameters */
139 int	pll_status;		/* status bits for kernel pll */
140 #endif /* KERNEL_PLL */
141 
142 /*
143  * Clock state machine control flags
144  */
145 int	ntp_enable;		/* clock discipline enabled */
146 int	pll_control;		/* kernel support available */
147 int	kern_enable;		/* kernel support enabled */
148 int	pps_enable;		/* kernel PPS discipline enabled */
149 int	ext_enable;		/* external clock enabled */
150 int	pps_stratum;		/* pps stratum */
151 int	allow_panic = FALSE;	/* allow panic correction */
152 int	mode_ntpdate = FALSE;	/* exit on first clock set */
153 
154 /*
155  * Clock state machine variables
156  */
157 int	state;			/* clock discipline state */
158 u_char	sys_poll = NTP_MINDPOLL; /* time constant/poll (log2 s) */
159 int	tc_counter;		/* jiggle counter */
160 double	last_offset;		/* last offset (s) */
161 
162 /*
163  * Huff-n'-puff filter variables
164  */
165 static double *sys_huffpuff;	/* huff-n'-puff filter */
166 static int sys_hufflen;		/* huff-n'-puff filter stages */
167 static int sys_huffptr;		/* huff-n'-puff filter pointer */
168 static double sys_mindly;	/* huff-n'-puff filter min delay */
169 
170 #if defined(KERNEL_PLL)
171 /* Emacs cc-mode goes nuts if we split the next line... */
172 #define MOD_BITS (MOD_OFFSET | MOD_MAXERROR | MOD_ESTERROR | \
173     MOD_STATUS | MOD_TIMECONST)
174 #ifdef SIGSYS
175 static void pll_trap P((int));	/* configuration trap */
176 static struct sigaction sigsys;	/* current sigaction status */
177 static struct sigaction newsigsys; /* new sigaction status */
178 static sigjmp_buf env;		/* environment var. for pll_trap() */
179 #endif /* SIGSYS */
180 #endif /* KERNEL_PLL */
181 
182 /*
183  * init_loopfilter - initialize loop filter data
184  */
185 void
186 init_loopfilter(void)
187 {
188 	/*
189 	 * Initialize state variables. Initially, we expect no drift
190 	 * file, so set the state to S_NSET. If a drift file is present,
191 	 * it will be detected later and the state set to S_FSET.
192 	 */
193 	rstclock(S_NSET, 0, 0);
194 	clock_jitter = LOGTOD(sys_precision);
195 }
196 
197 /*
198  * local_clock - the NTP logical clock loop filter.
199  *
200  * Return codes:
201  * -1	update ignored: exceeds panic threshold
202  * 0	update ignored: popcorn or exceeds step threshold
203  * 1	clock was slewed
204  * 2	clock was stepped
205  *
206  * LOCKCLOCK: The only thing this routine does is set the
207  * sys_rootdispersion variable equal to the peer dispersion.
208  */
209 int
210 local_clock(
211 	struct	peer *peer,	/* synch source peer structure */
212 	double	fp_offset	/* clock offset (s) */
213 	)
214 {
215 	int	rval;		/* return code */
216 	u_long	mu;		/* interval since last update (s) */
217 	double	flladj;		/* FLL frequency adjustment (ppm) */
218 	double	plladj;		/* PLL frequency adjustment (ppm) */
219 	double	clock_frequency; /* clock frequency adjustment (ppm) */
220 	double	dtemp, etemp;	/* double temps */
221 #ifdef OPENSSL
222 	u_int32 *tpt;
223 	int	i;
224 	u_int	len;
225 	long	togo;
226 #endif /* OPENSSL */
227 
228 	/*
229 	 * If the loop is opened or the NIST LOCKCLOCK is in use,
230 	 * monitor and record the offsets anyway in order to determine
231 	 * the open-loop response and then go home.
232 	 */
233 #ifdef DEBUG
234 	if (debug)
235 		printf(
236 		    "local_clock: assocID %d offset %.9f freq %.3f state %d\n",
237 		    peer->associd, fp_offset, drift_comp * 1e6, state);
238 #endif
239 #ifdef LOCKCLOCK
240 	return (0);
241 
242 #else /* LOCKCLOCK */
243 	if (!ntp_enable) {
244 		record_loop_stats(fp_offset, drift_comp, clock_jitter,
245 		    clock_stability, sys_poll);
246 		return (0);
247 	}
248 
249 	/*
250 	 * If the clock is way off, panic is declared. The clock_panic
251 	 * defaults to 1000 s; if set to zero, the panic will never
252 	 * occur. The allow_panic defaults to FALSE, so the first panic
253 	 * will exit. It can be set TRUE by a command line option, in
254 	 * which case the clock will be set anyway and time marches on.
255 	 * But, allow_panic will be set FALSE when the update is less
256 	 * than the step threshold; so, subsequent panics will exit.
257 	 */
258 	if (fabs(fp_offset) > clock_panic && clock_panic > 0 &&
259 	    !allow_panic) {
260 		msyslog(LOG_ERR,
261 		    "time correction of %.0f seconds exceeds sanity limit (%.0f); set clock manually to the correct UTC time.",
262 		    fp_offset, clock_panic);
263 		return (-1);
264 	}
265 
266 	/*
267 	 * If simulating ntpdate, set the clock directly, rather than
268 	 * using the discipline. The clock_max defines the step
269 	 * threshold, above which the clock will be stepped instead of
270 	 * slewed. The value defaults to 128 ms, but can be set to even
271 	 * unreasonable values. If set to zero, the clock will never be
272 	 * stepped. Note that a slew will persist beyond the life of
273 	 * this program.
274 	 *
275 	 * Note that if ntpdate is active, the terminal does not detach,
276 	 * so the termination comments print directly to the console.
277 	 */
278 	if (mode_ntpdate) {
279 		if (fabs(fp_offset) > clock_max && clock_max > 0) {
280 			step_systime(fp_offset);
281 			msyslog(LOG_NOTICE, "time reset %+.6f s",
282 	   		    fp_offset);
283 			printf("ntpd: time set %+.6fs\n", fp_offset);
284 		} else {
285 			adj_systime(fp_offset);
286 			msyslog(LOG_NOTICE, "time slew %+.6f s",
287 			    fp_offset);
288 			printf("ntpd: time slew %+.6fs\n", fp_offset);
289 		}
290 		record_loop_stats(fp_offset, drift_comp, clock_jitter,
291 		    clock_stability, sys_poll);
292 		exit (0);
293 	}
294 
295 	/*
296 	 * The huff-n'-puff filter finds the lowest delay in the recent
297 	 * interval. This is used to correct the offset by one-half the
298 	 * difference between the sample delay and minimum delay. This
299 	 * is most effective if the delays are highly assymetric and
300 	 * clockhopping is avoided and the clock frequency wander is
301 	 * relatively small.
302 	 *
303 	 * Note either there is no prefer peer or this update is from
304 	 * the prefer peer.
305 	 */
306 	if (sys_huffpuff != NULL && (sys_prefer == NULL || sys_prefer ==
307 	    peer)) {
308 		if (peer->delay < sys_huffpuff[sys_huffptr])
309 			sys_huffpuff[sys_huffptr] = peer->delay;
310 		if (peer->delay < sys_mindly)
311 			sys_mindly = peer->delay;
312 		if (fp_offset > 0)
313 			dtemp = -(peer->delay - sys_mindly) / 2;
314 		else
315 			dtemp = (peer->delay - sys_mindly) / 2;
316 		fp_offset += dtemp;
317 #ifdef DEBUG
318 		if (debug)
319 			printf(
320 		    "local_clock: size %d mindly %.6f huffpuff %.6f\n",
321 			    sys_hufflen, sys_mindly, dtemp);
322 #endif
323 	}
324 
325 	/*
326 	 * Clock state machine transition function. This is where the
327 	 * action is and defines how the system reacts to large phase
328 	 * and frequency errors. There are two main regimes: when the
329 	 * offset exceeds the step threshold and when it does not.
330 	 * However, if the step threshold is set to zero, a step will
331 	 * never occur. See the instruction manual for the details how
332 	 * these actions interact with the command line options.
333 	 *
334 	 * Note the system poll is set to minpoll only if the clock is
335 	 * stepped. Note also the kernel is disabled if step is
336 	 * disabled or greater than 0.5 s.
337 	 */
338 	clock_frequency = flladj = plladj = 0;
339 	mu = peer->epoch - sys_clocktime;
340 	if (clock_max == 0 || clock_max > 0.5)
341 		kern_enable = 0;
342 	rval = 1;
343 	if (fabs(fp_offset) > clock_max && clock_max > 0) {
344 		switch (state) {
345 
346 		/*
347 		 * In S_SYNC state we ignore the first outlyer amd
348 		 * switch to S_SPIK state.
349 		 */
350 		case S_SYNC:
351 			state = S_SPIK;
352 			return (0);
353 
354 		/*
355 		 * In S_FREQ state we ignore outlyers and inlyers. At
356 		 * the first outlyer after the stepout threshold,
357 		 * compute the apparent frequency correction and step
358 		 * the phase.
359 		 */
360 		case S_FREQ:
361 			if (mu < clock_minstep)
362 				return (0);
363 
364 			clock_frequency = (fp_offset - clock_offset) /
365 			    mu;
366 
367 			/* fall through to S_SPIK */
368 
369 		/*
370 		 * In S_SPIK state we ignore succeeding outlyers until
371 		 * either an inlyer is found or the stepout threshold is
372 		 * exceeded.
373 		 */
374 		case S_SPIK:
375 			if (mu < clock_minstep)
376 				return (0);
377 
378 			/* fall through to default */
379 
380 		/*
381 		 * We get here by default in S_NSET and S_FSET states
382 		 * and from above in S_FREQ or S_SPIK states.
383 		 *
384 		 * In S_NSET state an initial frequency correction is
385 		 * not available, usually because the frequency file has
386 		 * not yet been written. Since the time is outside the
387 		 * step threshold, the clock is stepped. The frequency
388 		 * will be set directly following the stepout interval.
389 		 *
390 		 * In S_FSET state the initial frequency has been set
391 		 * from the frequency file. Since the time is outside
392 		 * the step threshold, the clock is stepped immediately,
393 		 * rather than after the stepout interval. Guys get
394 		 * nervous if it takes 17 minutes to set the clock for
395 		 * the first time.
396 		 *
397 		 * In S_FREQ and S_SPIK states the stepout threshold has
398 		 * expired and the phase is still above the step
399 		 * threshold. Note that a single spike greater than the
400 		 * step threshold is always suppressed, even at the
401 		 * longer poll intervals.
402 		 */
403 		default:
404 			step_systime(fp_offset);
405 			msyslog(LOG_NOTICE, "time reset %+.6f s",
406 			    fp_offset);
407 			reinit_timer();
408 			tc_counter = 0;
409 			sys_poll = NTP_MINPOLL;
410 			sys_tai = 0;
411 			clock_jitter = LOGTOD(sys_precision);
412 			rval = 2;
413 			if (state == S_NSET) {
414 				rstclock(S_FREQ, peer->epoch, 0);
415 				return (rval);
416 			}
417 			break;
418 		}
419 		rstclock(S_SYNC, peer->epoch, 0);
420 	} else {
421 
422 		/*
423 		 * The offset is less than the step threshold. Calculate
424 		 * the jitter as the exponentially weighted offset
425 		 * differences.
426  	      	 */
427 		etemp = SQUARE(clock_jitter);
428 		dtemp = SQUARE(max(fabs(fp_offset - last_offset),
429 		    LOGTOD(sys_precision)));
430 		clock_jitter = SQRT(etemp + (dtemp - etemp) /
431 		    CLOCK_AVG);
432 		switch (state) {
433 
434 		/*
435 		 * In S_NSET state this is the first update received and
436 		 * the frequency has not been initialized. Adjust the
437 		 * phase, but do not adjust the frequency until after
438 		 * the stepout threshold.
439 		 */
440 		case S_NSET:
441 			rstclock(S_FREQ, peer->epoch, fp_offset);
442 			break;
443 
444 		/*
445 		 * In S_FSET state this is the first update received and
446 		 * the frequency has been initialized. Adjust the phase,
447 		 * but do not adjust the frequency until the next
448 		 * update.
449 		 */
450 		case S_FSET:
451 			rstclock(S_SYNC, peer->epoch, fp_offset);
452 			break;
453 
454 		/*
455 		 * In S_FREQ state ignore updates until the stepout
456 		 * threshold. After that, correct the phase and
457 		 * frequency and switch to S_SYNC state.
458 		 */
459 		case S_FREQ:
460 			if (mu < clock_minstep)
461 				return (0);
462 
463 			clock_frequency = (fp_offset - clock_offset) /
464 			    mu;
465 			rstclock(S_SYNC, peer->epoch, fp_offset);
466 			break;
467 
468 		/*
469 		 * We get here by default in S_SYNC and S_SPIK states.
470 		 * Here we compute the frequency update due to PLL and
471 		 * FLL contributions.
472 		 */
473 		default:
474 			allow_panic = FALSE;
475 
476 			/*
477 			 * The FLL and PLL frequency gain constants
478 			 * depend on the poll interval and Allan
479 			 * intercept. The PLL is always used, but
480 			 * becomes ineffective above the Allan
481 			 * intercept. The FLL is not used below one-half
482 			 * the Allan intercept. Above that the loop gain
483 			 * increases in steps to 1 / CLOCK_AVG.
484 			 */
485 			if (ULOGTOD(sys_poll) > allan_xpt / 2) {
486 				dtemp = CLOCK_FLL - sys_poll;
487 				flladj = (fp_offset - clock_offset) /
488 				    (max(mu, allan_xpt) * dtemp);
489 			}
490 
491 			/*
492 			 * For the PLL the integration interval
493 			 * (numerator) is the minimum of the update
494 			 * interval and poll interval. This allows
495 			 * oversampling, but not undersampling.
496 			 */
497 			etemp = min(mu, (u_long)ULOGTOD(sys_poll));
498 			dtemp = 4 * CLOCK_PLL * ULOGTOD(sys_poll);
499 			plladj = fp_offset * etemp / (dtemp * dtemp);
500 			rstclock(S_SYNC, peer->epoch, fp_offset);
501 			break;
502 		}
503 	}
504 
505 #ifdef OPENSSL
506 	/*
507 	 * Scan the loopsecond table to determine the TAI offset. If
508 	 * there is a scheduled leap in future, set the leap warning,
509 	 * but only if less than 30 days before the leap.
510 	 */
511 	tpt = (u_int32 *)tai_leap.ptr;
512 	len = ntohl(tai_leap.vallen) / sizeof(u_int32);
513 	if (tpt != NULL) {
514 		for (i = 0; i < len; i++) {
515 			togo = (long)ntohl(tpt[i]) -
516 			    (long)peer->rec.l_ui;
517 			if (togo > 0) {
518 				if (togo < CLOCK_JUNE)
519 					leap_next |= LEAP_ADDSECOND;
520 				break;
521 			}
522 		}
523 #if defined(STA_NANO) && NTP_API == 4
524 		if (pll_control && kern_enable && sys_tai == 0) {
525 			memset(&ntv, 0, sizeof(ntv));
526 			ntv.modes = MOD_TAI;
527 			ntv.constant = i + TAI_1972 - 1;
528 			ntp_adjtime(&ntv);
529 		}
530 #endif /* STA_NANO */
531 		sys_tai = i + TAI_1972 - 1;
532 	}
533 #endif /* OPENSSL */
534 #ifdef KERNEL_PLL
535 	/*
536 	 * This code segment works when clock adjustments are made using
537 	 * precision time kernel support and the ntp_adjtime() system
538 	 * call. This support is available in Solaris 2.6 and later,
539 	 * Digital Unix 4.0 and later, FreeBSD, Linux and specially
540 	 * modified kernels for HP-UX 9 and Ultrix 4. In the case of the
541 	 * DECstation 5000/240 and Alpha AXP, additional kernel
542 	 * modifications provide a true microsecond clock and nanosecond
543 	 * clock, respectively.
544 	 *
545 	 * Important note: The kernel discipline is used only if the
546 	 * step threshold is less than 0.5 s, as anything higher can
547 	 * lead to overflow problems. This might occur if some misguided
548 	 * lad set the step threshold to something ridiculous.
549 	 */
550 	if (pll_control && kern_enable) {
551 
552 		/*
553 		 * We initialize the structure for the ntp_adjtime()
554 		 * system call. We have to convert everything to
555 		 * microseconds or nanoseconds first. Do not update the
556 		 * system variables if the ext_enable flag is set. In
557 		 * this case, the external clock driver will update the
558 		 * variables, which will be read later by the local
559 		 * clock driver. Afterwards, remember the time and
560 		 * frequency offsets for jitter and stability values and
561 		 * to update the frequency file.
562 		 */
563 		memset(&ntv,  0, sizeof(ntv));
564 		if (ext_enable) {
565 			ntv.modes = MOD_STATUS;
566 		} else {
567 			struct tm *tm = NULL;
568 			time_t tstamp;
569 
570 #ifdef STA_NANO
571 			ntv.modes = MOD_BITS | MOD_NANO;
572 #else /* STA_NANO */
573 			ntv.modes = MOD_BITS;
574 #endif /* STA_NANO */
575 			if (clock_offset < 0)
576 				dtemp = -.5;
577 			else
578 				dtemp = .5;
579 #ifdef STA_NANO
580 			ntv.offset = (int32)(clock_offset * 1e9 +
581 			    dtemp);
582 			ntv.constant = sys_poll;
583 #else /* STA_NANO */
584 			ntv.offset = (int32)(clock_offset * 1e6 +
585 			    dtemp);
586 			ntv.constant = sys_poll - 4;
587 #endif /* STA_NANO */
588 
589 			/*
590 			 * The frequency is set directly only if
591 			 * clock_frequency is nonzero coming out of FREQ
592 			 * state.
593 			 */
594 			if (clock_frequency != 0) {
595 				ntv.modes |= MOD_FREQUENCY;
596 				ntv.freq = (int32)((clock_frequency +
597 				    drift_comp) * 65536e6);
598 			}
599 			ntv.esterror = (u_int32)(clock_jitter * 1e6);
600 			ntv.maxerror = (u_int32)((sys_rootdelay / 2 +
601 			    sys_rootdispersion) * 1e6);
602 			ntv.status = STA_PLL;
603 
604 			/*
605 			 * Set the leap bits in the status word, but
606 			 * only on the last day of June or December.
607 			 */
608 			tstamp = peer->rec.l_ui - JAN_1970;
609 			tm = gmtime(&tstamp);
610 			if (tm != NULL) {
611 				if ((tm->tm_mon + 1 == 6 &&
612 				    tm->tm_mday == 30) || (tm->tm_mon +
613 				    1 == 12 && tm->tm_mday == 31)) {
614 					if (leap_next & LEAP_ADDSECOND)
615 						ntv.status |= STA_INS;
616 					else if (leap_next &
617 					    LEAP_DELSECOND)
618 						ntv.status |= STA_DEL;
619 				}
620 			}
621 
622 			/*
623 			 * If the PPS signal is up and enabled, light
624 			 * the frequency bit. If the PPS driver is
625 			 * working, light the phase bit as well. If not,
626 			 * douse the lights, since somebody else may
627 			 * have left the switch on.
628 			 */
629 			if (pps_enable && pll_status & STA_PPSSIGNAL) {
630 				ntv.status |= STA_PPSFREQ;
631 				if (pps_stratum < STRATUM_UNSPEC)
632 					ntv.status |= STA_PPSTIME;
633 			} else {
634 				ntv.status &= ~(STA_PPSFREQ |
635 				    STA_PPSTIME);
636 			}
637 		}
638 
639 		/*
640 		 * Pass the stuff to the kernel. If it squeals, turn off
641 		 * the pig. In any case, fetch the kernel offset and
642 		 * frequency and pretend we did it here.
643 		 */
644 		if (ntp_adjtime(&ntv) == TIME_ERROR) {
645 			NLOG(NLOG_SYNCEVENT | NLOG_SYSEVENT)
646 			    msyslog(LOG_NOTICE,
647 			    "kernel time sync error %04x", ntv.status);
648 			ntv.status &= ~(STA_PPSFREQ | STA_PPSTIME);
649 		} else {
650 			if ((ntv.status ^ pll_status) & ~STA_FLL)
651 				NLOG(NLOG_SYNCEVENT | NLOG_SYSEVENT)
652 				    msyslog(LOG_NOTICE,
653 				    "kernel time sync status change %04x",
654 				    ntv.status);
655 		}
656 		pll_status = ntv.status;
657 #ifdef STA_NANO
658 		clock_offset = ntv.offset / 1e9;
659 #else /* STA_NANO */
660 		clock_offset = ntv.offset / 1e6;
661 #endif /* STA_NANO */
662 		clock_frequency = ntv.freq / 65536e6;
663 		flladj = plladj = 0;
664 
665 		/*
666 		 * If the kernel PPS is lit, monitor its performance.
667 		 */
668 		if (ntv.status & STA_PPSTIME) {
669 			pps_control = current_time;
670 #ifdef STA_NANO
671 			clock_jitter = ntv.jitter / 1e9;
672 #else /* STA_NANO */
673 			clock_jitter = ntv.jitter / 1e6;
674 #endif /* STA_NANO */
675 		}
676 	} else {
677 #endif /* KERNEL_PLL */
678 
679 		/*
680 		 * We get here if the kernel discipline is not enabled.
681 		 * Adjust the clock frequency as the sum of the directly
682 		 * computed frequency (if measured) and the PLL and FLL
683 		 * increments.
684 		 */
685 		clock_frequency = drift_comp + clock_frequency +
686 		    flladj + plladj;
687 #ifdef KERNEL_PLL
688 	}
689 #endif /* KERNEL_PLL */
690 
691 	/*
692 	 * Clamp the frequency within the tolerance range and calculate
693 	 * the frequency change since the last update.
694 	 */
695 	if (fabs(clock_frequency) > NTP_MAXFREQ)
696 		NLOG(NLOG_SYNCEVENT | NLOG_SYSEVENT)
697 		    msyslog(LOG_NOTICE,
698 		    "frequency error %.0f PPM exceeds tolerance %.0f PPM",
699 		    clock_frequency * 1e6, NTP_MAXFREQ * 1e6);
700 	dtemp = SQUARE(clock_frequency - drift_comp);
701 	if (clock_frequency > NTP_MAXFREQ)
702 		drift_comp = NTP_MAXFREQ;
703 	else if (clock_frequency < -NTP_MAXFREQ)
704 		drift_comp = -NTP_MAXFREQ;
705 	else
706 		drift_comp = clock_frequency;
707 
708 	/*
709 	 * Calculate the wander as the exponentially weighted frequency
710 	 * differences.
711 	 */
712 	etemp = SQUARE(clock_stability);
713 	clock_stability = SQRT(etemp + (dtemp - etemp) / CLOCK_AVG);
714 
715 	/*
716 	 * Here we adjust the poll interval by comparing the current
717 	 * offset with the clock jitter. If the offset is less than the
718 	 * clock jitter times a constant, then the averaging interval is
719 	 * increased, otherwise it is decreased. A bit of hysteresis
720 	 * helps calm the dance. Works best using burst mode.
721 	 */
722 	if (fabs(clock_offset) < CLOCK_PGATE * clock_jitter) {
723 		tc_counter += sys_poll;
724 		if (tc_counter > CLOCK_LIMIT) {
725 			tc_counter = CLOCK_LIMIT;
726 			if (sys_poll < peer->maxpoll) {
727 				tc_counter = 0;
728 				sys_poll++;
729 			}
730 		}
731 	} else {
732 		tc_counter -= sys_poll << 1;
733 		if (tc_counter < -CLOCK_LIMIT) {
734 			tc_counter = -CLOCK_LIMIT;
735 			if (sys_poll > peer->minpoll) {
736 				tc_counter = 0;
737 				sys_poll--;
738 			}
739 		}
740 	}
741 
742 	/*
743 	 * Yibbidy, yibbbidy, yibbidy; that'h all folks.
744 	 */
745 	record_loop_stats(clock_offset, drift_comp, clock_jitter,
746 	    clock_stability, sys_poll);
747 #ifdef DEBUG
748 	if (debug)
749 		printf(
750 		    "local_clock: mu %lu jitr %.6f freq %.3f stab %.6f poll %d count %d\n",
751 		    mu, clock_jitter, drift_comp * 1e6,
752 		    clock_stability * 1e6, sys_poll, tc_counter);
753 #endif /* DEBUG */
754 	return (rval);
755 #endif /* LOCKCLOCK */
756 }
757 
758 
759 /*
760  * adj_host_clock - Called once every second to update the local clock.
761  *
762  * LOCKCLOCK: The only thing this routine does is increment the
763  * sys_rootdispersion variable.
764  */
765 void
766 adj_host_clock(
767 	void
768 	)
769 {
770 	double	adjustment;
771 
772 	/*
773 	 * Update the dispersion since the last update. In contrast to
774 	 * NTPv3, NTPv4 does not declare unsynchronized after one day,
775 	 * since the dispersion check serves this function. Also,
776 	 * since the poll interval can exceed one day, the old test
777 	 * would be counterproductive. Note we do this even with
778 	 * external clocks, since the clock driver will recompute the
779 	 * maximum error and the local clock driver will pick it up and
780 	 * pass to the common refclock routines. Very elegant.
781 	 */
782 	sys_rootdispersion += clock_phi;
783 
784 #ifndef LOCKCLOCK
785 	/*
786 	 * If clock discipline is disabled or if the kernel is enabled,
787 	 * get out of Dodge quick.
788 	 */
789 	if (!ntp_enable || mode_ntpdate || (pll_control &&
790 	    kern_enable))
791 		return;
792 
793 	/*
794 	 * Declare PPS kernel unsync if the pps signal has not been
795 	 * heard for a few minutes.
796 	 */
797 	if (pps_control && current_time - pps_control > PPS_MAXAGE) {
798 		if (pps_control)
799 			NLOG(NLOG_SYNCEVENT | NLOG_SYSEVENT)
800 			    msyslog(LOG_NOTICE, "pps sync disabled");
801 		pps_control = 0;
802 	}
803 
804 	/*
805 	 * Implement the phase and frequency adjustments. The gain
806 	 * factor (denominator) is not allowed to increase beyond the
807 	 * Allan intercept. It doesn't make sense to average phase noise
808 	 * beyond this point and it helps to damp residual offset at the
809 	 * longer poll intervals.
810 	 */
811 	adjustment = clock_offset / (CLOCK_PLL * min(ULOGTOD(sys_poll),
812 	    allan_xpt));
813 	clock_offset -= adjustment;
814 	adj_systime(adjustment + drift_comp);
815 #endif /* LOCKCLOCK */
816 }
817 
818 
819 /*
820  * Clock state machine. Enter new state and set state variables. Note we
821  * use the time of the last clock filter sample, which may be earlier
822  * than the current time.
823  */
824 static void
825 rstclock(
826 	int	trans,		/* new state */
827 	u_long	update,		/* new update time */
828 	double	offset		/* new offset */
829 	)
830 {
831 #ifdef DEBUG
832 	if (debug)
833 		printf("local_clock: time %lu offset %.6f freq %.3f state %d\n",
834 		    update, offset, drift_comp * 1e6, trans);
835 #endif
836 	state = trans;
837 	sys_clocktime = update;
838 	last_offset = clock_offset = offset;
839 }
840 
841 
842 /*
843  * huff-n'-puff filter
844  */
845 void
846 huffpuff()
847 {
848 	int i;
849 
850 	if (sys_huffpuff == NULL)
851 		return;
852 
853 	sys_huffptr = (sys_huffptr + 1) % sys_hufflen;
854 	sys_huffpuff[sys_huffptr] = 1e9;
855 	sys_mindly = 1e9;
856 	for (i = 0; i < sys_hufflen; i++) {
857 		if (sys_huffpuff[i] < sys_mindly)
858 			sys_mindly = sys_huffpuff[i];
859 	}
860 }
861 
862 
863 /*
864  * loop_config - configure the loop filter
865  *
866  * LOCKCLOCK: The LOOP_DRIFTINIT and LOOP_DRIFTCOMP cases are no-ops.
867  */
868 void
869 loop_config(
870 	int item,
871 	double freq
872 	)
873 {
874 	int i;
875 
876 	switch (item) {
877 
878 	case LOOP_DRIFTINIT:
879 
880 #ifndef LOCKCLOCK
881 #ifdef KERNEL_PLL
882 		/*
883 		 * Assume the kernel supports the ntp_adjtime() syscall.
884 		 * If that syscall works, initialize the kernel time
885  		 * variables. Otherwise, continue leaving no harm
886 		 * behind. While at it, ask to set nanosecond mode. If
887 		 * the kernel agrees, rejoice; othewise, it does only
888 		 * microseconds.
889 		 */
890 		if (mode_ntpdate)
891 			break;
892 
893 		pll_control = 1;
894 		memset(&ntv, 0, sizeof(ntv));
895 #ifdef STA_NANO
896 		ntv.modes = MOD_BITS | MOD_NANO;
897 #else /* STA_NANO */
898 		ntv.modes = MOD_BITS;
899 #endif /* STA_NANO */
900 		ntv.maxerror = MAXDISPERSE;
901 		ntv.esterror = MAXDISPERSE;
902 		ntv.status = STA_UNSYNC;
903 #ifdef SIGSYS
904 		/*
905 		 * Use sigsetjmp() to save state and then call
906 		 * ntp_adjtime(); if it fails, then siglongjmp() is used
907 		 * to return control
908 		 */
909 		newsigsys.sa_handler = pll_trap;
910 		newsigsys.sa_flags = 0;
911 		if (sigaction(SIGSYS, &newsigsys, &sigsys)) {
912 			msyslog(LOG_ERR,
913 			    "sigaction() fails to save SIGSYS trap: %m");
914 			pll_control = 0;
915 		}
916 		if (sigsetjmp(env, 1) == 0)
917 			ntp_adjtime(&ntv);
918 		if ((sigaction(SIGSYS, &sigsys,
919 		    (struct sigaction *)NULL))) {
920 			msyslog(LOG_ERR,
921 			    "sigaction() fails to restore SIGSYS trap: %m");
922 			pll_control = 0;
923 		}
924 #else /* SIGSYS */
925 		ntp_adjtime(&ntv);
926 #endif /* SIGSYS */
927 
928 		/*
929 		 * Save the result status and light up an external clock
930 		 * if available.
931 		 */
932 		pll_status = ntv.status;
933 		if (pll_control) {
934 #ifdef STA_NANO
935 			if (pll_status & STA_CLK)
936 				ext_enable = 1;
937 #endif /* STA_NANO */
938 			NLOG(NLOG_SYNCEVENT | NLOG_SYSEVENT)
939 			    msyslog(LOG_INFO,
940 		  	    "kernel time sync status %04x",
941 			    pll_status);
942 		}
943 #endif /* KERNEL_PLL */
944 #endif /* LOCKCLOCK */
945 		break;
946 
947 	case LOOP_DRIFTCOMP:
948 
949 #ifndef LOCKCLOCK
950 		/*
951 		 * If the frequency value is reasonable, set the initial
952 		 * frequency to the given value and the state to S_FSET.
953 		 * Otherwise, the drift file may be missing or broken,
954 		 * so set the frequency to zero. This erases past
955 		 * history should somebody break something.
956 		 */
957 		if (freq <= NTP_MAXFREQ && freq >= -NTP_MAXFREQ) {
958 			drift_comp = freq;
959 			rstclock(S_FSET, 0, 0);
960 		} else {
961 			drift_comp = 0;
962 		}
963 
964 #ifdef KERNEL_PLL
965 		/*
966 		 * Sanity check. If the kernel is available, load the
967 		 * frequency and light up the loop. Make sure the offset
968 		 * is zero to cancel any previous nonsense. If you don't
969 		 * want this initialization, remove the ntp.drift file.
970 		 */
971 		if (pll_control && kern_enable) {
972 			memset((char *)&ntv, 0, sizeof(ntv));
973 			ntv.modes = MOD_OFFSET | MOD_FREQUENCY;
974 			ntv.freq = (int32)(drift_comp * 65536e6);
975 			ntp_adjtime(&ntv);
976 		}
977 #endif /* KERNEL_PLL */
978 #endif /* LOCKCLOCK */
979 		break;
980 
981 	case LOOP_KERN_CLEAR:
982 #ifndef LOCKCLOCK
983 #ifdef KERNEL_PLL
984 		/* Completely turn off the kernel time adjustments. */
985 		if (pll_control) {
986 			memset((char *)&ntv, 0, sizeof(ntv));
987 			ntv.modes = MOD_BITS | MOD_OFFSET | MOD_FREQUENCY;
988 			ntv.status = STA_UNSYNC;
989 			ntp_adjtime(&ntv);
990 			NLOG(NLOG_SYNCEVENT | NLOG_SYSEVENT)
991 			    msyslog(LOG_INFO,
992 		  	    "kernel time sync disabled %04x",
993 			    ntv.status);
994 		   }
995 #endif /* KERNEL_PLL */
996 #endif /* LOCKCLOCK */
997 		break;
998 
999 	/*
1000 	 * Special tinker variables for Ulrich Windl. Very dangerous.
1001 	 */
1002 	case LOOP_MAX:			/* step threshold */
1003 		clock_max = freq;
1004 		break;
1005 
1006 	case LOOP_PANIC:		/* panic threshold */
1007 		clock_panic = freq;
1008 		break;
1009 
1010 	case LOOP_PHI:			/* dispersion rate */
1011 		clock_phi = freq;
1012 		break;
1013 
1014 	case LOOP_MINSTEP:		/* watchdog bark */
1015 		clock_minstep = freq;
1016 		break;
1017 
1018 	case LOOP_ALLAN:		/* Allan intercept */
1019 		allan_xpt = freq;
1020 		break;
1021 
1022 	case LOOP_HUFFPUFF:		/* huff-n'-puff filter length */
1023 		if (freq < HUFFPUFF)
1024 			freq = HUFFPUFF;
1025 		sys_hufflen = (int)(freq / HUFFPUFF);
1026 		sys_huffpuff = (double *)emalloc(sizeof(double) *
1027 		    sys_hufflen);
1028 		for (i = 0; i < sys_hufflen; i++)
1029 			sys_huffpuff[i] = 1e9;
1030 		sys_mindly = 1e9;
1031 		break;
1032 
1033 	case LOOP_FREQ:			/* initial frequency */
1034 		drift_comp = freq / 1e6;
1035 		rstclock(S_FSET, 0, 0);
1036 		break;
1037 	}
1038 }
1039 
1040 
1041 #if defined(KERNEL_PLL) && defined(SIGSYS)
1042 /*
1043  * _trap - trap processor for undefined syscalls
1044  *
1045  * This nugget is called by the kernel when the SYS_ntp_adjtime()
1046  * syscall bombs because the silly thing has not been implemented in
1047  * the kernel. In this case the phase-lock loop is emulated by
1048  * the stock adjtime() syscall and a lot of indelicate abuse.
1049  */
1050 static RETSIGTYPE
1051 pll_trap(
1052 	int arg
1053 	)
1054 {
1055 	pll_control = 0;
1056 	siglongjmp(env, 1);
1057 }
1058 #endif /* KERNEL_PLL && SIGSYS */
1059