xref: /freebsd/contrib/ntp/ntpd/ntp_loopfilter.c (revision b0b1dbdd)
1 /*
2  * ntp_loopfilter.c - implements the NTP loop filter algorithm
3  *
4  * ATTENTION: Get approval from Dave Mills on all changes to this file!
5  *
6  */
7 #ifdef HAVE_CONFIG_H
8 # include <config.h>
9 #endif
10 
11 #ifdef USE_SNPRINTB
12 # include <util.h>
13 #endif
14 #include "ntpd.h"
15 #include "ntp_io.h"
16 #include "ntp_unixtime.h"
17 #include "ntp_stdlib.h"
18 
19 #include <limits.h>
20 #include <stdio.h>
21 #include <ctype.h>
22 
23 #include <signal.h>
24 #include <setjmp.h>
25 
26 #ifdef KERNEL_PLL
27 #include "ntp_syscall.h"
28 #endif /* KERNEL_PLL */
29 
30 /*
31  * This is an implementation of the clock discipline algorithm described
32  * in UDel TR 97-4-3, as amended. It operates as an adaptive parameter,
33  * hybrid phase/frequency-lock loop. A number of sanity checks are
34  * included to protect against timewarps, timespikes and general mayhem.
35  * All units are in s and s/s, unless noted otherwise.
36  */
37 #define CLOCK_MAX	.128	/* default step threshold (s) */
38 #define CLOCK_MINSTEP	300.	/* default stepout threshold (s) */
39 #define CLOCK_PANIC	1000.	/* default panic threshold (s) */
40 #define	CLOCK_PHI	15e-6	/* max frequency error (s/s) */
41 #define CLOCK_PLL	16.	/* PLL loop gain (log2) */
42 #define CLOCK_AVG	8.	/* parameter averaging constant */
43 #define CLOCK_FLL	.25	/* FLL loop gain */
44 #define	CLOCK_FLOOR	.0005	/* startup offset floor (s) */
45 #define	CLOCK_ALLAN	11	/* Allan intercept (log2 s) */
46 #define CLOCK_LIMIT	30	/* poll-adjust threshold */
47 #define CLOCK_PGATE	4.	/* poll-adjust gate */
48 #define PPS_MAXAGE	120	/* kernel pps signal timeout (s) */
49 #define	FREQTOD(x)	((x) / 65536e6) /* NTP to double */
50 #define	DTOFREQ(x)	((int32)((x) * 65536e6)) /* double to NTP */
51 
52 /*
53  * Clock discipline state machine. This is used to control the
54  * synchronization behavior during initialization and following a
55  * timewarp.
56  *
57  *	State	< step		> step		Comments
58  *	========================================================
59  *	NSET	FREQ		step, FREQ	freq not set
60  *
61  *	FSET	SYNC		step, SYNC	freq set
62  *
63  *	FREQ	if (mu < 900)	if (mu < 900)	set freq direct
64  *		    ignore	    ignore
65  *		else		else
66  *		    freq, SYNC	    freq, step, SYNC
67  *
68  *	SYNC	SYNC		SPIK, ignore	adjust phase/freq
69  *
70  *	SPIK	SYNC		if (mu < 900)	adjust phase/freq
71  *				    ignore
72  *				step, SYNC
73  */
74 /*
75  * Kernel PLL/PPS state machine. This is used with the kernel PLL
76  * modifications described in the documentation.
77  *
78  * If kernel support for the ntp_adjtime() system call is available, the
79  * ntp_control flag is set. The ntp_enable and kern_enable flags can be
80  * set at configuration time or run time using ntpdc. If ntp_enable is
81  * false, the discipline loop is unlocked and no corrections of any kind
82  * are made. If both ntp_control and kern_enable are set, the kernel
83  * support is used as described above; if false, the kernel is bypassed
84  * entirely and the daemon discipline used instead.
85  *
86  * There have been three versions of the kernel discipline code. The
87  * first (microkernel) now in Solaris discipilnes the microseconds. The
88  * second and third (nanokernel) disciplines the clock in nanoseconds.
89  * These versions are identifed if the symbol STA_PLL is present in the
90  * header file /usr/include/sys/timex.h. The third and current version
91  * includes TAI offset and is identified by the symbol NTP_API with
92  * value 4.
93  *
94  * Each PPS time/frequency discipline can be enabled by the atom driver
95  * or another driver. If enabled, the STA_PPSTIME and STA_FREQ bits are
96  * set in the kernel status word; otherwise, these bits are cleared.
97  * These bits are also cleard if the kernel reports an error.
98  *
99  * If an external clock is present, the clock driver sets STA_CLK in the
100  * status word. When the local clock driver sees this bit, it updates
101  * via this routine, which then calls ntp_adjtime() with the STA_PLL bit
102  * set to zero, in which case the system clock is not adjusted. This is
103  * also a signal for the external clock driver to discipline the system
104  * clock. Unless specified otherwise, all times are in seconds.
105  */
106 /*
107  * Program variables that can be tinkered.
108  */
109 double	clock_max_back = CLOCK_MAX;	/* step threshold */
110 double	clock_max_fwd =  CLOCK_MAX;	/* step threshold */
111 double	clock_minstep = CLOCK_MINSTEP; /* stepout threshold */
112 double	clock_panic = CLOCK_PANIC; /* panic threshold */
113 double	clock_phi = CLOCK_PHI;	/* dispersion rate (s/s) */
114 u_char	allan_xpt = CLOCK_ALLAN; /* Allan intercept (log2 s) */
115 
116 /*
117  * Program variables
118  */
119 static double clock_offset;	/* offset */
120 double	clock_jitter;		/* offset jitter */
121 double	drift_comp;		/* frequency (s/s) */
122 static double init_drift_comp; /* initial frequency (PPM) */
123 double	clock_stability;	/* frequency stability (wander) (s/s) */
124 double	clock_codec;		/* audio codec frequency (samples/s) */
125 static u_long clock_epoch;	/* last update */
126 u_int	sys_tai;		/* TAI offset from UTC */
127 static int loop_started;	/* TRUE after LOOP_DRIFTINIT */
128 static void rstclock (int, double); /* transition function */
129 static double direct_freq(double); /* direct set frequency */
130 static void set_freq(double);	/* set frequency */
131 #ifndef PATH_MAX
132 # define PATH_MAX MAX_PATH
133 #endif
134 static char relative_path[PATH_MAX + 1]; /* relative path per recursive make */
135 static char *this_file = NULL;
136 
137 #ifdef KERNEL_PLL
138 static struct timex ntv;	/* ntp_adjtime() parameters */
139 int	pll_status;		/* last kernel status bits */
140 #if defined(STA_NANO) && NTP_API == 4
141 static u_int loop_tai;		/* last TAI offset */
142 #endif /* STA_NANO */
143 static	void	start_kern_loop(void);
144 static	void	stop_kern_loop(void);
145 #endif /* KERNEL_PLL */
146 
147 /*
148  * Clock state machine control flags
149  */
150 int	ntp_enable = TRUE;	/* clock discipline enabled */
151 int	pll_control;		/* kernel support available */
152 int	kern_enable = TRUE;	/* kernel support enabled */
153 int	hardpps_enable;		/* kernel PPS discipline enabled */
154 int	ext_enable;		/* external clock enabled */
155 int	pps_stratum;		/* pps stratum */
156 int	kernel_status;		/* from ntp_adjtime */
157 int	force_step_once = FALSE; /* always step time once at startup (-G) */
158 int	mode_ntpdate = FALSE;	/* exit on first clock set (-q) */
159 int	freq_cnt;		/* initial frequency clamp */
160 int	freq_set;		/* initial set frequency switch */
161 
162 /*
163  * Clock state machine variables
164  */
165 int	state = 0;		/* clock discipline state */
166 u_char	sys_poll;		/* time constant/poll (log2 s) */
167 int	tc_counter;		/* jiggle counter */
168 double	last_offset;		/* last offset (s) */
169 
170 /*
171  * Huff-n'-puff filter variables
172  */
173 static double *sys_huffpuff;	/* huff-n'-puff filter */
174 static int sys_hufflen;		/* huff-n'-puff filter stages */
175 static int sys_huffptr;		/* huff-n'-puff filter pointer */
176 static double sys_mindly;	/* huff-n'-puff filter min delay */
177 
178 #if defined(KERNEL_PLL)
179 /* Emacs cc-mode goes nuts if we split the next line... */
180 #define MOD_BITS (MOD_OFFSET | MOD_MAXERROR | MOD_ESTERROR | \
181     MOD_STATUS | MOD_TIMECONST)
182 #ifdef SIGSYS
183 static void pll_trap (int);	/* configuration trap */
184 static struct sigaction sigsys;	/* current sigaction status */
185 static struct sigaction newsigsys; /* new sigaction status */
186 static sigjmp_buf env;		/* environment var. for pll_trap() */
187 #endif /* SIGSYS */
188 #endif /* KERNEL_PLL */
189 
190 static void
191 sync_status(const char *what, int ostatus, int nstatus)
192 {
193 	char obuf[256], nbuf[256], tbuf[1024];
194 #if defined(USE_SNPRINTB) && defined (STA_FMT)
195 	snprintb(obuf, sizeof(obuf), STA_FMT, ostatus);
196 	snprintb(nbuf, sizeof(nbuf), STA_FMT, nstatus);
197 #else
198 	snprintf(obuf, sizeof(obuf), "%04x", ostatus);
199 	snprintf(nbuf, sizeof(nbuf), "%04x", nstatus);
200 #endif
201 	snprintf(tbuf, sizeof(tbuf), "%s status: %s -> %s", what, obuf, nbuf);
202 	report_event(EVNT_KERN, NULL, tbuf);
203 }
204 
205 /*
206  * file_name - return pointer to non-relative portion of this C file pathname
207  */
208 static char *file_name(void)
209 {
210 	if (this_file == NULL) {
211 	    (void)strncpy(relative_path, __FILE__, PATH_MAX);
212 	    for (this_file=relative_path;
213 		*this_file && ! isalnum((unsigned char)*this_file);
214 		this_file++) ;
215 	}
216 	return this_file;
217 }
218 
219 /*
220  * init_loopfilter - initialize loop filter data
221  */
222 void
223 init_loopfilter(void)
224 {
225 	/*
226 	 * Initialize state variables.
227 	 */
228 	sys_poll = ntp_minpoll;
229 	clock_jitter = LOGTOD(sys_precision);
230 	freq_cnt = (int)clock_minstep;
231 }
232 
233 #ifdef KERNEL_PLL
234 /*
235  * ntp_adjtime_error_handler - process errors from ntp_adjtime
236  */
237 static void
238 ntp_adjtime_error_handler(
239 	const char *caller,	/* name of calling function */
240 	struct timex *ptimex,	/* pointer to struct timex */
241 	int ret,		/* return value from ntp_adjtime */
242 	int saved_errno,	/* value of errno when ntp_adjtime returned */
243 	int pps_call,		/* ntp_adjtime call was PPS-related */
244 	int tai_call,		/* ntp_adjtime call was TAI-related */
245 	int line		/* line number of ntp_adjtime call */
246 	)
247 {
248 	char des[1024] = "";	/* Decoded Error Status */
249 
250 	switch (ret) {
251 	    case -1:
252 		switch (saved_errno) {
253 		    case EFAULT:
254 			msyslog(LOG_ERR, "%s: %s line %d: invalid struct timex pointer: 0x%lx",
255 			    caller, file_name(), line,
256 			    (long)((void *)ptimex)
257 			);
258 		    break;
259 		    case EINVAL:
260 			msyslog(LOG_ERR, "%s: %s line %d: invalid struct timex \"constant\" element value: %ld",
261 			    caller, file_name(), line,
262 			    (long)(ptimex->constant)
263 			);
264 		    break;
265 		    case EPERM:
266 			if (tai_call) {
267 			    errno = saved_errno;
268 			    msyslog(LOG_ERR,
269 				"%s: ntp_adjtime(TAI) failed: %m",
270 				caller);
271 			}
272 			errno = saved_errno;
273 			msyslog(LOG_ERR, "%s: %s line %d: ntp_adjtime: %m",
274 			    caller, file_name(), line
275 			);
276 		    break;
277 		    default:
278 			msyslog(LOG_NOTICE, "%s: %s line %d: unhandled errno value %d after failed ntp_adjtime call",
279 			    caller, file_name(), line,
280 			    saved_errno
281 			);
282 		    break;
283 		}
284 	    break;
285 #ifdef TIME_OK
286 	    case TIME_OK: /* 0: synchronized, no leap second warning */
287 		/* msyslog(LOG_INFO, "kernel reports time is synchronized normally"); */
288 	    break;
289 #else
290 # warning TIME_OK is not defined
291 #endif
292 #ifdef TIME_INS
293 	    case TIME_INS: /* 1: positive leap second warning */
294 		msyslog(LOG_INFO, "kernel reports leap second insertion scheduled");
295 	    break;
296 #else
297 # warning TIME_INS is not defined
298 #endif
299 #ifdef TIME_DEL
300 	    case TIME_DEL: /* 2: negative leap second warning */
301 		msyslog(LOG_INFO, "kernel reports leap second deletion scheduled");
302 	    break;
303 #else
304 # warning TIME_DEL is not defined
305 #endif
306 #ifdef TIME_OOP
307 	    case TIME_OOP: /* 3: leap second in progress */
308 		msyslog(LOG_INFO, "kernel reports leap second in progress");
309 	    break;
310 #else
311 # warning TIME_OOP is not defined
312 #endif
313 #ifdef TIME_WAIT
314 	    case TIME_WAIT: /* 4: leap second has occured */
315 		msyslog(LOG_INFO, "kernel reports leap second has occurred");
316 	    break;
317 #else
318 # warning TIME_WAIT is not defined
319 #endif
320 #ifdef TIME_ERROR
321 #if 0
322 
323 from the reference implementation of ntp_gettime():
324 
325 		// Hardware or software error
326         if ((time_status & (STA_UNSYNC | STA_CLOCKERR))
327 
328 	/*
329          * PPS signal lost when either time or frequency synchronization
330          * requested
331          */
332 	|| (time_status & (STA_PPSFREQ | STA_PPSTIME)
333 	    && !(time_status & STA_PPSSIGNAL))
334 
335         /*
336          * PPS jitter exceeded when time synchronization requested
337          */
338 	|| (time_status & STA_PPSTIME &&
339             time_status & STA_PPSJITTER)
340 
341         /*
342          * PPS wander exceeded or calibration error when frequency
343          * synchronization requested
344          */
345 	|| (time_status & STA_PPSFREQ &&
346             time_status & (STA_PPSWANDER | STA_PPSERROR)))
347                 return (TIME_ERROR);
348 
349 or, from ntp_adjtime():
350 
351 	if (  (time_status & (STA_UNSYNC | STA_CLOCKERR))
352 	    || (time_status & (STA_PPSFREQ | STA_PPSTIME)
353 		&& !(time_status & STA_PPSSIGNAL))
354 	    || (time_status & STA_PPSTIME
355 		&& time_status & STA_PPSJITTER)
356 	    || (time_status & STA_PPSFREQ
357 		&& time_status & (STA_PPSWANDER | STA_PPSERROR))
358 	   )
359 		return (TIME_ERROR);
360 #endif
361 
362 	    case TIME_ERROR: /* 5: unsynchronized, or loss of synchronization */
363 				/* error (see status word) */
364 
365 		if (ptimex->status & STA_UNSYNC)
366 			snprintf(des, sizeof(des), "%s%sClock Unsynchronized",
367 				des, (*des) ? "; " : "");
368 
369 		if (ptimex->status & STA_CLOCKERR)
370 			snprintf(des, sizeof(des), "%s%sClock Error",
371 				des, (*des) ? "; " : "");
372 
373 		if (!(ptimex->status & STA_PPSSIGNAL)
374 		    && ptimex->status & STA_PPSFREQ)
375 			snprintf(des, sizeof(des), "%s%sPPS Frequency Sync wanted but no PPS",
376 				des, (*des) ? "; " : "");
377 
378 		if (!(ptimex->status & STA_PPSSIGNAL)
379 		    && ptimex->status & STA_PPSTIME)
380 			snprintf(des, sizeof(des), "%s%sPPS Time Sync wanted but no PPS signal",
381 				des, (*des) ? "; " : "");
382 
383 		if (   ptimex->status & STA_PPSTIME
384 		    && ptimex->status & STA_PPSJITTER)
385 			snprintf(des, sizeof(des), "%s%sPPS Time Sync wanted but PPS Jitter exceeded",
386 				des, (*des) ? "; " : "");
387 
388 		if (   ptimex->status & STA_PPSFREQ
389 		    && ptimex->status & STA_PPSWANDER)
390 			snprintf(des, sizeof(des), "%s%sPPS Frequency Sync wanted but PPS Wander exceeded",
391 				des, (*des) ? "; " : "");
392 
393 		if (   ptimex->status & STA_PPSFREQ
394 		    && ptimex->status & STA_PPSERROR)
395 			snprintf(des, sizeof(des), "%s%sPPS Frequency Sync wanted but Calibration error detected",
396 				des, (*des) ? "; " : "");
397 
398 		if (pps_call && !(ptimex->status & STA_PPSSIGNAL))
399 			report_event(EVNT_KERN, NULL,
400 			    "no PPS signal");
401 		DPRINTF(1, ("kernel loop status %#x (%s)\n",
402 			ptimex->status, des));
403 		/*
404 		 * This code may be returned when ntp_adjtime() has just
405 		 * been called for the first time, quite a while after
406 		 * startup, when ntpd just starts to discipline the kernel
407 		 * time. In this case the occurrence of this message
408 		 * can be pretty confusing.
409 		 *
410 		 * HMS: How about a message when we begin kernel processing:
411 		 *    Determining kernel clock state...
412 		 * so an initial TIME_ERROR message is less confising,
413 		 * or skipping the first message (ugh),
414 		 * or ???
415 		 * msyslog(LOG_INFO, "kernel reports time synchronization lost");
416 		 */
417 		msyslog(LOG_INFO, "kernel reports TIME_ERROR: %#x: %s",
418 			ptimex->status, des);
419 	    break;
420 #else
421 # warning TIME_ERROR is not defined
422 #endif
423 	    default:
424 		msyslog(LOG_NOTICE, "%s: %s line %d: unhandled return value %d from ntp_adjtime() in %s at line %d",
425 		    caller, file_name(), line,
426 		    ret,
427 		    __func__, __LINE__
428 		);
429 	    break;
430 	}
431 	return;
432 }
433 #endif
434 
435 /*
436  * local_clock - the NTP logical clock loop filter.
437  *
438  * Return codes:
439  * -1	update ignored: exceeds panic threshold
440  * 0	update ignored: popcorn or exceeds step threshold
441  * 1	clock was slewed
442  * 2	clock was stepped
443  *
444  * LOCKCLOCK: The only thing this routine does is set the
445  * sys_rootdisp variable equal to the peer dispersion.
446  */
447 int
448 local_clock(
449 	struct	peer *peer,	/* synch source peer structure */
450 	double	fp_offset	/* clock offset (s) */
451 	)
452 {
453 	int	rval;		/* return code */
454 	int	osys_poll;	/* old system poll */
455 	int	ntp_adj_ret;	/* returned by ntp_adjtime */
456 	double	mu;		/* interval since last update */
457 	double	clock_frequency; /* clock frequency */
458 	double	dtemp, etemp;	/* double temps */
459 	char	tbuf[80];	/* report buffer */
460 
461 	(void)ntp_adj_ret; /* not always used below... */
462 	/*
463 	 * If the loop is opened or the NIST LOCKCLOCK is in use,
464 	 * monitor and record the offsets anyway in order to determine
465 	 * the open-loop response and then go home.
466 	 */
467 #ifndef LOCKCLOCK
468 	if (!ntp_enable)
469 #endif /* not LOCKCLOCK */
470 	{
471 		record_loop_stats(fp_offset, drift_comp, clock_jitter,
472 		    clock_stability, sys_poll);
473 		return (0);
474 	}
475 
476 #ifndef LOCKCLOCK
477 	/*
478 	 * If the clock is way off, panic is declared. The clock_panic
479 	 * defaults to 1000 s; if set to zero, the panic will never
480 	 * occur. The allow_panic defaults to FALSE, so the first panic
481 	 * will exit. It can be set TRUE by a command line option, in
482 	 * which case the clock will be set anyway and time marches on.
483 	 * But, allow_panic will be set FALSE when the update is less
484 	 * than the step threshold; so, subsequent panics will exit.
485 	 */
486 	if (fabs(fp_offset) > clock_panic && clock_panic > 0 &&
487 	    !allow_panic) {
488 		snprintf(tbuf, sizeof(tbuf),
489 		    "%+.0f s; set clock manually within %.0f s.",
490 		    fp_offset, clock_panic);
491 		report_event(EVNT_SYSFAULT, NULL, tbuf);
492 		return (-1);
493 	}
494 
495 	allow_panic = FALSE;
496 
497 	/*
498 	 * This section simulates ntpdate. If the offset exceeds the
499 	 * step threshold (128 ms), step the clock to that time and
500 	 * exit. Otherwise, slew the clock to that time and exit. Note
501 	 * that the slew will persist and eventually complete beyond the
502 	 * life of this program. Note that while ntpdate is active, the
503 	 * terminal does not detach, so the termination message prints
504 	 * directly to the terminal.
505 	 */
506 	if (mode_ntpdate) {
507 		if (  ( fp_offset > clock_max_fwd  && clock_max_fwd  > 0)
508 		   || (-fp_offset > clock_max_back && clock_max_back > 0)) {
509 			step_systime(fp_offset);
510 			msyslog(LOG_NOTICE, "ntpd: time set %+.6f s",
511 			    fp_offset);
512 			printf("ntpd: time set %+.6fs\n", fp_offset);
513 		} else {
514 			adj_systime(fp_offset);
515 			msyslog(LOG_NOTICE, "ntpd: time slew %+.6f s",
516 			    fp_offset);
517 			printf("ntpd: time slew %+.6fs\n", fp_offset);
518 		}
519 		record_loop_stats(fp_offset, drift_comp, clock_jitter,
520 		    clock_stability, sys_poll);
521 		exit (0);
522 	}
523 
524 	/*
525 	 * The huff-n'-puff filter finds the lowest delay in the recent
526 	 * interval. This is used to correct the offset by one-half the
527 	 * difference between the sample delay and minimum delay. This
528 	 * is most effective if the delays are highly assymetric and
529 	 * clockhopping is avoided and the clock frequency wander is
530 	 * relatively small.
531 	 */
532 	if (sys_huffpuff != NULL) {
533 		if (peer->delay < sys_huffpuff[sys_huffptr])
534 			sys_huffpuff[sys_huffptr] = peer->delay;
535 		if (peer->delay < sys_mindly)
536 			sys_mindly = peer->delay;
537 		if (fp_offset > 0)
538 			dtemp = -(peer->delay - sys_mindly) / 2;
539 		else
540 			dtemp = (peer->delay - sys_mindly) / 2;
541 		fp_offset += dtemp;
542 		DPRINTF(1, ("local_clock: size %d mindly %.6f huffpuff %.6f\n",
543 			    sys_hufflen, sys_mindly, dtemp));
544 	}
545 
546 	/*
547 	 * Clock state machine transition function which defines how the
548 	 * system reacts to large phase and frequency excursion. There
549 	 * are two main regimes: when the offset exceeds the step
550 	 * threshold (128 ms) and when it does not. Under certain
551 	 * conditions updates are suspended until the stepout theshold
552 	 * (900 s) is exceeded. See the documentation on how these
553 	 * thresholds interact with commands and command line options.
554 	 *
555 	 * Note the kernel is disabled if step is disabled or greater
556 	 * than 0.5 s or in ntpdate mode.
557 	 */
558 	osys_poll = sys_poll;
559 	if (sys_poll < peer->minpoll)
560 		sys_poll = peer->minpoll;
561 	if (sys_poll > peer->maxpoll)
562 		sys_poll = peer->maxpoll;
563 	mu = current_time - clock_epoch;
564 	clock_frequency = drift_comp;
565 	rval = 1;
566 	if (  ( fp_offset > clock_max_fwd  && clock_max_fwd  > 0)
567 	   || (-fp_offset > clock_max_back && clock_max_back > 0)
568 	   || force_step_once ) {
569 		if (force_step_once) {
570 			force_step_once = FALSE;  /* we want this only once after startup */
571 			msyslog(LOG_NOTICE, "Doing intital time step" );
572 		}
573 
574 		switch (state) {
575 
576 		/*
577 		 * In SYNC state we ignore the first outlier and switch
578 		 * to SPIK state.
579 		 */
580 		case EVNT_SYNC:
581 			snprintf(tbuf, sizeof(tbuf), "%+.6f s",
582 			    fp_offset);
583 			report_event(EVNT_SPIK, NULL, tbuf);
584 			state = EVNT_SPIK;
585 			return (0);
586 
587 		/*
588 		 * In FREQ state we ignore outliers and inlyers. At the
589 		 * first outlier after the stepout threshold, compute
590 		 * the apparent frequency correction and step the phase.
591 		 */
592 		case EVNT_FREQ:
593 			if (mu < clock_minstep)
594 				return (0);
595 
596 			clock_frequency = direct_freq(fp_offset);
597 
598 			/* fall through to EVNT_SPIK */
599 
600 		/*
601 		 * In SPIK state we ignore succeeding outliers until
602 		 * either an inlyer is found or the stepout threshold is
603 		 * exceeded.
604 		 */
605 		case EVNT_SPIK:
606 			if (mu < clock_minstep)
607 				return (0);
608 
609 			/* fall through to default */
610 
611 		/*
612 		 * We get here by default in NSET and FSET states and
613 		 * from above in FREQ or SPIK states.
614 		 *
615 		 * In NSET state an initial frequency correction is not
616 		 * available, usually because the frequency file has not
617 		 * yet been written. Since the time is outside the step
618 		 * threshold, the clock is stepped. The frequency will
619 		 * be set directly following the stepout interval.
620 		 *
621 		 * In FSET state the initial frequency has been set from
622 		 * the frequency file. Since the time is outside the
623 		 * step threshold, the clock is stepped immediately,
624 		 * rather than after the stepout interval. Guys get
625 		 * nervous if it takes 15 minutes to set the clock for
626 		 * the first time.
627 		 *
628 		 * In FREQ and SPIK states the stepout threshold has
629 		 * expired and the phase is still above the step
630 		 * threshold. Note that a single spike greater than the
631 		 * step threshold is always suppressed, even with a
632 		 * long time constant.
633 		 */
634 		default:
635 			snprintf(tbuf, sizeof(tbuf), "%+.6f s",
636 			    fp_offset);
637 			report_event(EVNT_CLOCKRESET, NULL, tbuf);
638 			step_systime(fp_offset);
639 			reinit_timer();
640 			tc_counter = 0;
641 			clock_jitter = LOGTOD(sys_precision);
642 			rval = 2;
643 			if (state == EVNT_NSET) {
644 				rstclock(EVNT_FREQ, 0);
645 				return (rval);
646 			}
647 			break;
648 		}
649 		rstclock(EVNT_SYNC, 0);
650 	} else {
651 		/*
652 		 * The offset is less than the step threshold. Calculate
653 		 * the jitter as the exponentially weighted offset
654 		 * differences.
655 		 */
656 		etemp = SQUARE(clock_jitter);
657 		dtemp = SQUARE(max(fabs(fp_offset - last_offset),
658 		    LOGTOD(sys_precision)));
659 		clock_jitter = SQRT(etemp + (dtemp - etemp) /
660 		    CLOCK_AVG);
661 		switch (state) {
662 
663 		/*
664 		 * In NSET state this is the first update received and
665 		 * the frequency has not been initialized. Adjust the
666 		 * phase, but do not adjust the frequency until after
667 		 * the stepout threshold.
668 		 */
669 		case EVNT_NSET:
670 			adj_systime(fp_offset);
671 			rstclock(EVNT_FREQ, fp_offset);
672 			break;
673 
674 		/*
675 		 * In FREQ state ignore updates until the stepout
676 		 * threshold. After that, compute the new frequency, but
677 		 * do not adjust the frequency until the holdoff counter
678 		 * decrements to zero.
679 		 */
680 		case EVNT_FREQ:
681 			if (mu < clock_minstep)
682 				return (0);
683 
684 			clock_frequency = direct_freq(fp_offset);
685 			/* fall through */
686 
687 		/*
688 		 * We get here by default in FSET, SPIK and SYNC states.
689 		 * Here compute the frequency update due to PLL and FLL
690 		 * contributions. Note, we avoid frequency discipline at
691 		 * startup until the initial transient has subsided.
692 		 */
693 		default:
694 			if (freq_cnt == 0) {
695 
696 				/*
697 				 * The FLL and PLL frequency gain constants
698 				 * depend on the time constant and Allan
699 				 * intercept. The PLL is always used, but
700 				 * becomes ineffective above the Allan intercept
701 				 * where the FLL becomes effective.
702 				 */
703 				if (sys_poll >= allan_xpt)
704 					clock_frequency +=
705 					      (fp_offset - clock_offset)
706 					    / ( max(ULOGTOD(sys_poll), mu)
707 					       * CLOCK_FLL);
708 
709 				/*
710 				 * The PLL frequency gain (numerator) depends on
711 				 * the minimum of the update interval and Allan
712 				 * intercept. This reduces the PLL gain when the
713 				 * FLL becomes effective.
714 				 */
715 				etemp = min(ULOGTOD(allan_xpt), mu);
716 				dtemp = 4 * CLOCK_PLL * ULOGTOD(sys_poll);
717 				clock_frequency +=
718 				    fp_offset * etemp / (dtemp * dtemp);
719 			}
720 			rstclock(EVNT_SYNC, fp_offset);
721 			if (fabs(fp_offset) < CLOCK_FLOOR)
722 				freq_cnt = 0;
723 			break;
724 		}
725 	}
726 
727 #ifdef KERNEL_PLL
728 	/*
729 	 * This code segment works when clock adjustments are made using
730 	 * precision time kernel support and the ntp_adjtime() system
731 	 * call. This support is available in Solaris 2.6 and later,
732 	 * Digital Unix 4.0 and later, FreeBSD, Linux and specially
733 	 * modified kernels for HP-UX 9 and Ultrix 4. In the case of the
734 	 * DECstation 5000/240 and Alpha AXP, additional kernel
735 	 * modifications provide a true microsecond clock and nanosecond
736 	 * clock, respectively.
737 	 *
738 	 * Important note: The kernel discipline is used only if the
739 	 * step threshold is less than 0.5 s, as anything higher can
740 	 * lead to overflow problems. This might occur if some misguided
741 	 * lad set the step threshold to something ridiculous.
742 	 */
743 	if (pll_control && kern_enable && freq_cnt == 0) {
744 
745 		/*
746 		 * We initialize the structure for the ntp_adjtime()
747 		 * system call. We have to convert everything to
748 		 * microseconds or nanoseconds first. Do not update the
749 		 * system variables if the ext_enable flag is set. In
750 		 * this case, the external clock driver will update the
751 		 * variables, which will be read later by the local
752 		 * clock driver. Afterwards, remember the time and
753 		 * frequency offsets for jitter and stability values and
754 		 * to update the frequency file.
755 		 */
756 		ZERO(ntv);
757 		if (ext_enable) {
758 			ntv.modes = MOD_STATUS;
759 		} else {
760 #ifdef STA_NANO
761 			ntv.modes = MOD_BITS | MOD_NANO;
762 #else /* STA_NANO */
763 			ntv.modes = MOD_BITS;
764 #endif /* STA_NANO */
765 			if (clock_offset < 0)
766 				dtemp = -.5;
767 			else
768 				dtemp = .5;
769 #ifdef STA_NANO
770 			ntv.offset = (int32)(clock_offset * 1e9 +
771 			    dtemp);
772 			ntv.constant = sys_poll;
773 #else /* STA_NANO */
774 			ntv.offset = (int32)(clock_offset * 1e6 +
775 			    dtemp);
776 			ntv.constant = sys_poll - 4;
777 #endif /* STA_NANO */
778 			if (ntv.constant < 0)
779 				ntv.constant = 0;
780 
781 			ntv.esterror = (u_int32)(clock_jitter * 1e6);
782 			ntv.maxerror = (u_int32)((sys_rootdelay / 2 +
783 			    sys_rootdisp) * 1e6);
784 			ntv.status = STA_PLL;
785 
786 			/*
787 			 * Enable/disable the PPS if requested.
788 			 */
789 			if (hardpps_enable) {
790 				ntv.status |= (STA_PPSTIME | STA_PPSFREQ);
791 				if (!(pll_status & STA_PPSTIME))
792 					sync_status("PPS enabled",
793 						pll_status,
794 						ntv.status);
795 			} else {
796 				ntv.status &= ~(STA_PPSTIME | STA_PPSFREQ);
797 				if (pll_status & STA_PPSTIME)
798 					sync_status("PPS disabled",
799 						pll_status,
800 						ntv.status);
801 			}
802 			if (sys_leap == LEAP_ADDSECOND)
803 				ntv.status |= STA_INS;
804 			else if (sys_leap == LEAP_DELSECOND)
805 				ntv.status |= STA_DEL;
806 		}
807 
808 		/*
809 		 * Pass the stuff to the kernel. If it squeals, turn off
810 		 * the pps. In any case, fetch the kernel offset,
811 		 * frequency and jitter.
812 		 */
813 		ntp_adj_ret = ntp_adjtime(&ntv);
814 		/*
815 		 * A squeal is a return status < 0, or a state change.
816 		 */
817 		if ((0 > ntp_adj_ret) || (ntp_adj_ret != kernel_status)) {
818 			kernel_status = ntp_adj_ret;
819 			ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, hardpps_enable, 0, __LINE__ - 1);
820 		}
821 		pll_status = ntv.status;
822 #ifdef STA_NANO
823 		clock_offset = ntv.offset / 1e9;
824 #else /* STA_NANO */
825 		clock_offset = ntv.offset / 1e6;
826 #endif /* STA_NANO */
827 		clock_frequency = FREQTOD(ntv.freq);
828 
829 		/*
830 		 * If the kernel PPS is lit, monitor its performance.
831 		 */
832 		if (ntv.status & STA_PPSTIME) {
833 #ifdef STA_NANO
834 			clock_jitter = ntv.jitter / 1e9;
835 #else /* STA_NANO */
836 			clock_jitter = ntv.jitter / 1e6;
837 #endif /* STA_NANO */
838 		}
839 
840 #if defined(STA_NANO) && NTP_API == 4
841 		/*
842 		 * If the TAI changes, update the kernel TAI.
843 		 */
844 		if (loop_tai != sys_tai) {
845 			loop_tai = sys_tai;
846 			ntv.modes = MOD_TAI;
847 			ntv.constant = sys_tai;
848 			if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) {
849 			    ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 1, __LINE__ - 1);
850 			}
851 		}
852 #endif /* STA_NANO */
853 	}
854 #endif /* KERNEL_PLL */
855 
856 	/*
857 	 * Clamp the frequency within the tolerance range and calculate
858 	 * the frequency difference since the last update.
859 	 */
860 	if (fabs(clock_frequency) > NTP_MAXFREQ)
861 		msyslog(LOG_NOTICE,
862 		    "frequency error %.0f PPM exceeds tolerance %.0f PPM",
863 		    clock_frequency * 1e6, NTP_MAXFREQ * 1e6);
864 	dtemp = SQUARE(clock_frequency - drift_comp);
865 	if (clock_frequency > NTP_MAXFREQ)
866 		drift_comp = NTP_MAXFREQ;
867 	else if (clock_frequency < -NTP_MAXFREQ)
868 		drift_comp = -NTP_MAXFREQ;
869 	else
870 		drift_comp = clock_frequency;
871 
872 	/*
873 	 * Calculate the wander as the exponentially weighted RMS
874 	 * frequency differences. Record the change for the frequency
875 	 * file update.
876 	 */
877 	etemp = SQUARE(clock_stability);
878 	clock_stability = SQRT(etemp + (dtemp - etemp) / CLOCK_AVG);
879 
880 	/*
881 	 * Here we adjust the time constant by comparing the current
882 	 * offset with the clock jitter. If the offset is less than the
883 	 * clock jitter times a constant, then the averaging interval is
884 	 * increased, otherwise it is decreased. A bit of hysteresis
885 	 * helps calm the dance. Works best using burst mode. Don't
886 	 * fiddle with the poll during the startup clamp period.
887 	 */
888 	if (freq_cnt > 0) {
889 		tc_counter = 0;
890 	} else if (fabs(clock_offset) < CLOCK_PGATE * clock_jitter) {
891 		tc_counter += sys_poll;
892 		if (tc_counter > CLOCK_LIMIT) {
893 			tc_counter = CLOCK_LIMIT;
894 			if (sys_poll < peer->maxpoll) {
895 				tc_counter = 0;
896 				sys_poll++;
897 			}
898 		}
899 	} else {
900 		tc_counter -= sys_poll << 1;
901 		if (tc_counter < -CLOCK_LIMIT) {
902 			tc_counter = -CLOCK_LIMIT;
903 			if (sys_poll > peer->minpoll) {
904 				tc_counter = 0;
905 				sys_poll--;
906 			}
907 		}
908 	}
909 
910 	/*
911 	 * If the time constant has changed, update the poll variables.
912 	 */
913 	if (osys_poll != sys_poll)
914 		poll_update(peer, sys_poll);
915 
916 	/*
917 	 * Yibbidy, yibbbidy, yibbidy; that'h all folks.
918 	 */
919 	record_loop_stats(clock_offset, drift_comp, clock_jitter,
920 	    clock_stability, sys_poll);
921 	DPRINTF(1, ("local_clock: offset %.9f jit %.9f freq %.3f stab %.3f poll %d\n",
922 		    clock_offset, clock_jitter, drift_comp * 1e6,
923 		    clock_stability * 1e6, sys_poll));
924 	return (rval);
925 #endif /* not LOCKCLOCK */
926 }
927 
928 
929 /*
930  * adj_host_clock - Called once every second to update the local clock.
931  *
932  * LOCKCLOCK: The only thing this routine does is increment the
933  * sys_rootdisp variable.
934  */
935 void
936 adj_host_clock(
937 	void
938 	)
939 {
940 	double	offset_adj;
941 	double	freq_adj;
942 
943 	/*
944 	 * Update the dispersion since the last update. In contrast to
945 	 * NTPv3, NTPv4 does not declare unsynchronized after one day,
946 	 * since the dispersion check serves this function. Also,
947 	 * since the poll interval can exceed one day, the old test
948 	 * would be counterproductive. During the startup clamp period, the
949 	 * time constant is clamped at 2.
950 	 */
951 	sys_rootdisp += clock_phi;
952 #ifndef LOCKCLOCK
953 	if (!ntp_enable || mode_ntpdate)
954 		return;
955 	/*
956 	 * Determine the phase adjustment. The gain factor (denominator)
957 	 * increases with poll interval, so is dominated by the FLL
958 	 * above the Allan intercept. Note the reduced time constant at
959 	 * startup.
960 	 */
961 	if (state != EVNT_SYNC) {
962 		offset_adj = 0.;
963 	} else if (freq_cnt > 0) {
964 		offset_adj = clock_offset / (CLOCK_PLL * ULOGTOD(1));
965 		freq_cnt--;
966 #ifdef KERNEL_PLL
967 	} else if (pll_control && kern_enable) {
968 		offset_adj = 0.;
969 #endif /* KERNEL_PLL */
970 	} else {
971 		offset_adj = clock_offset / (CLOCK_PLL * ULOGTOD(sys_poll));
972 	}
973 
974 	/*
975 	 * If the kernel discipline is enabled the frequency correction
976 	 * drift_comp has already been engaged via ntp_adjtime() in
977 	 * set_freq().  Otherwise it is a component of the adj_systime()
978 	 * offset.
979 	 */
980 #ifdef KERNEL_PLL
981 	if (pll_control && kern_enable)
982 		freq_adj = 0.;
983 	else
984 #endif /* KERNEL_PLL */
985 		freq_adj = drift_comp;
986 
987 	/* Bound absolute value of total adjustment to NTP_MAXFREQ. */
988 	if (offset_adj + freq_adj > NTP_MAXFREQ)
989 		offset_adj = NTP_MAXFREQ - freq_adj;
990 	else if (offset_adj + freq_adj < -NTP_MAXFREQ)
991 		offset_adj = -NTP_MAXFREQ - freq_adj;
992 
993 	clock_offset -= offset_adj;
994 	/*
995 	 * Windows port adj_systime() must be called each second,
996 	 * even if the argument is zero, to ease emulation of
997 	 * adjtime() using Windows' slew API which controls the rate
998 	 * but does not automatically stop slewing when an offset
999 	 * has decayed to zero.
1000 	 */
1001 	DEBUG_INSIST(enable_panic_check == TRUE);
1002 	enable_panic_check = FALSE;
1003 	adj_systime(offset_adj + freq_adj);
1004 	enable_panic_check = TRUE;
1005 #endif /* LOCKCLOCK */
1006 }
1007 
1008 
1009 /*
1010  * Clock state machine. Enter new state and set state variables.
1011  */
1012 static void
1013 rstclock(
1014 	int	trans,		/* new state */
1015 	double	offset		/* new offset */
1016 	)
1017 {
1018 	DPRINTF(2, ("rstclock: mu %lu state %d poll %d count %d\n",
1019 		    current_time - clock_epoch, trans, sys_poll,
1020 		    tc_counter));
1021 	if (trans != state && trans != EVNT_FSET)
1022 		report_event(trans, NULL, NULL);
1023 	state = trans;
1024 	last_offset = clock_offset = offset;
1025 	clock_epoch = current_time;
1026 }
1027 
1028 
1029 /*
1030  * calc_freq - calculate frequency directly
1031  *
1032  * This is very carefully done. When the offset is first computed at the
1033  * first update, a residual frequency component results. Subsequently,
1034  * updates are suppresed until the end of the measurement interval while
1035  * the offset is amortized. At the end of the interval the frequency is
1036  * calculated from the current offset, residual offset, length of the
1037  * interval and residual frequency component. At the same time the
1038  * frequenchy file is armed for update at the next hourly stats.
1039  */
1040 static double
1041 direct_freq(
1042 	double	fp_offset
1043 	)
1044 {
1045 	set_freq(fp_offset / (current_time - clock_epoch));
1046 
1047 	return drift_comp;
1048 }
1049 
1050 
1051 /*
1052  * set_freq - set clock frequency correction
1053  *
1054  * Used to step the frequency correction at startup, possibly again once
1055  * the frequency is measured (that is, transitioning from EVNT_NSET to
1056  * EVNT_FSET), and finally to switch between daemon and kernel loop
1057  * discipline at runtime.
1058  *
1059  * When the kernel loop discipline is available but the daemon loop is
1060  * in use, the kernel frequency correction is disabled (set to 0) to
1061  * ensure drift_comp is applied by only one of the loops.
1062  */
1063 static void
1064 set_freq(
1065 	double	freq		/* frequency update */
1066 	)
1067 {
1068 	const char *	loop_desc;
1069 	int ntp_adj_ret;
1070 
1071 	(void)ntp_adj_ret; /* not always used below... */
1072 	drift_comp = freq;
1073 	loop_desc = "ntpd";
1074 #ifdef KERNEL_PLL
1075 	if (pll_control) {
1076 		ZERO(ntv);
1077 		ntv.modes = MOD_FREQUENCY;
1078 		if (kern_enable) {
1079 			loop_desc = "kernel";
1080 			ntv.freq = DTOFREQ(drift_comp);
1081 		}
1082 		if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) {
1083 		    ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 0, __LINE__ - 1);
1084 		}
1085 	}
1086 #endif /* KERNEL_PLL */
1087 	mprintf_event(EVNT_FSET, NULL, "%s %.3f PPM", loop_desc,
1088 	    drift_comp * 1e6);
1089 }
1090 
1091 
1092 #ifdef KERNEL_PLL
1093 static void
1094 start_kern_loop(void)
1095 {
1096 	static int atexit_done;
1097 	int ntp_adj_ret;
1098 
1099 	pll_control = TRUE;
1100 	ZERO(ntv);
1101 	ntv.modes = MOD_BITS;
1102 	ntv.status = STA_PLL;
1103 	ntv.maxerror = MAXDISPERSE;
1104 	ntv.esterror = MAXDISPERSE;
1105 	ntv.constant = sys_poll; /* why is it that here constant is unconditionally set to sys_poll, whereas elsewhere is is modified depending on nanosecond vs. microsecond kernel? */
1106 #ifdef SIGSYS
1107 	/*
1108 	 * Use sigsetjmp() to save state and then call ntp_adjtime(); if
1109 	 * it fails, then pll_trap() will set pll_control FALSE before
1110 	 * returning control using siglogjmp().
1111 	 */
1112 	newsigsys.sa_handler = pll_trap;
1113 	newsigsys.sa_flags = 0;
1114 	if (sigaction(SIGSYS, &newsigsys, &sigsys)) {
1115 		msyslog(LOG_ERR, "sigaction() trap SIGSYS: %m");
1116 		pll_control = FALSE;
1117 	} else {
1118 		if (sigsetjmp(env, 1) == 0) {
1119 			if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) {
1120 			    ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 0, __LINE__ - 1);
1121 			}
1122 		}
1123 		if (sigaction(SIGSYS, &sigsys, NULL)) {
1124 			msyslog(LOG_ERR,
1125 			    "sigaction() restore SIGSYS: %m");
1126 			pll_control = FALSE;
1127 		}
1128 	}
1129 #else /* SIGSYS */
1130 	if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) {
1131 	    ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 0, __LINE__ - 1);
1132 	}
1133 #endif /* SIGSYS */
1134 
1135 	/*
1136 	 * Save the result status and light up an external clock
1137 	 * if available.
1138 	 */
1139 	pll_status = ntv.status;
1140 	if (pll_control) {
1141 		if (!atexit_done) {
1142 			atexit_done = TRUE;
1143 			atexit(&stop_kern_loop);
1144 		}
1145 #ifdef STA_NANO
1146 		if (pll_status & STA_CLK)
1147 			ext_enable = TRUE;
1148 #endif /* STA_NANO */
1149 		report_event(EVNT_KERN, NULL,
1150 	  	    "kernel time sync enabled");
1151 	}
1152 }
1153 #endif	/* KERNEL_PLL */
1154 
1155 
1156 #ifdef KERNEL_PLL
1157 static void
1158 stop_kern_loop(void)
1159 {
1160 	if (pll_control && kern_enable)
1161 		report_event(EVNT_KERN, NULL,
1162 		    "kernel time sync disabled");
1163 }
1164 #endif	/* KERNEL_PLL */
1165 
1166 
1167 /*
1168  * select_loop() - choose kernel or daemon loop discipline.
1169  */
1170 void
1171 select_loop(
1172 	int	use_kern_loop
1173 	)
1174 {
1175 	if (kern_enable == use_kern_loop)
1176 		return;
1177 #ifdef KERNEL_PLL
1178 	if (pll_control && !use_kern_loop)
1179 		stop_kern_loop();
1180 #endif
1181 	kern_enable = use_kern_loop;
1182 #ifdef KERNEL_PLL
1183 	if (pll_control && use_kern_loop)
1184 		start_kern_loop();
1185 #endif
1186 	/*
1187 	 * If this loop selection change occurs after initial startup,
1188 	 * call set_freq() to switch the frequency compensation to or
1189 	 * from the kernel loop.
1190 	 */
1191 #ifdef KERNEL_PLL
1192 	if (pll_control && loop_started)
1193 		set_freq(drift_comp);
1194 #endif
1195 }
1196 
1197 
1198 /*
1199  * huff-n'-puff filter
1200  */
1201 void
1202 huffpuff(void)
1203 {
1204 	int i;
1205 
1206 	if (sys_huffpuff == NULL)
1207 		return;
1208 
1209 	sys_huffptr = (sys_huffptr + 1) % sys_hufflen;
1210 	sys_huffpuff[sys_huffptr] = 1e9;
1211 	sys_mindly = 1e9;
1212 	for (i = 0; i < sys_hufflen; i++) {
1213 		if (sys_huffpuff[i] < sys_mindly)
1214 			sys_mindly = sys_huffpuff[i];
1215 	}
1216 }
1217 
1218 
1219 /*
1220  * loop_config - configure the loop filter
1221  *
1222  * LOCKCLOCK: The LOOP_DRIFTINIT and LOOP_DRIFTCOMP cases are no-ops.
1223  */
1224 void
1225 loop_config(
1226 	int	item,
1227 	double	freq
1228 	)
1229 {
1230 	int	i;
1231 	double	ftemp;
1232 
1233 	DPRINTF(2, ("loop_config: item %d freq %f\n", item, freq));
1234 	switch (item) {
1235 
1236 	/*
1237 	 * We first assume the kernel supports the ntp_adjtime()
1238 	 * syscall. If that syscall works, initialize the kernel time
1239 	 * variables. Otherwise, continue leaving no harm behind.
1240 	 */
1241 	case LOOP_DRIFTINIT:
1242 #ifndef LOCKCLOCK
1243 #ifdef KERNEL_PLL
1244 		if (mode_ntpdate)
1245 			break;
1246 
1247 		start_kern_loop();
1248 #endif /* KERNEL_PLL */
1249 
1250 		/*
1251 		 * Initialize frequency if given; otherwise, begin frequency
1252 		 * calibration phase.
1253 		 */
1254 		ftemp = init_drift_comp / 1e6;
1255 		if (ftemp > NTP_MAXFREQ)
1256 			ftemp = NTP_MAXFREQ;
1257 		else if (ftemp < -NTP_MAXFREQ)
1258 			ftemp = -NTP_MAXFREQ;
1259 		set_freq(ftemp);
1260 		if (freq_set)
1261 			rstclock(EVNT_FSET, 0);
1262 		else
1263 			rstclock(EVNT_NSET, 0);
1264 		loop_started = TRUE;
1265 #endif /* LOCKCLOCK */
1266 		break;
1267 
1268 	case LOOP_KERN_CLEAR:
1269 #if 0		/* XXX: needs more review, and how can we get here? */
1270 #ifndef LOCKCLOCK
1271 # ifdef KERNEL_PLL
1272 		if (pll_control && kern_enable) {
1273 			memset((char *)&ntv, 0, sizeof(ntv));
1274 			ntv.modes = MOD_STATUS;
1275 			ntv.status = STA_UNSYNC;
1276 			ntp_adjtime(&ntv);
1277 			sync_status("kernel time sync disabled",
1278 				pll_status,
1279 				ntv.status);
1280 		   }
1281 # endif /* KERNEL_PLL */
1282 #endif /* LOCKCLOCK */
1283 #endif
1284 		break;
1285 
1286 	/*
1287 	 * Tinker command variables for Ulrich Windl. Very dangerous.
1288 	 */
1289 	case LOOP_ALLAN:	/* Allan intercept (log2) (allan) */
1290 		allan_xpt = (u_char)freq;
1291 		break;
1292 
1293 	case LOOP_CODEC:	/* audio codec frequency (codec) */
1294 		clock_codec = freq / 1e6;
1295 		break;
1296 
1297 	case LOOP_PHI:		/* dispersion threshold (dispersion) */
1298 		clock_phi = freq / 1e6;
1299 		break;
1300 
1301 	case LOOP_FREQ:		/* initial frequency (freq) */
1302 		init_drift_comp = freq;
1303 		freq_set++;
1304 		break;
1305 
1306 	case LOOP_HUFFPUFF:	/* huff-n'-puff length (huffpuff) */
1307 		if (freq < HUFFPUFF)
1308 			freq = HUFFPUFF;
1309 		sys_hufflen = (int)(freq / HUFFPUFF);
1310 		sys_huffpuff = emalloc(sizeof(sys_huffpuff[0]) *
1311 		    sys_hufflen);
1312 		for (i = 0; i < sys_hufflen; i++)
1313 			sys_huffpuff[i] = 1e9;
1314 		sys_mindly = 1e9;
1315 		break;
1316 
1317 	case LOOP_PANIC:	/* panic threshold (panic) */
1318 		clock_panic = freq;
1319 		break;
1320 
1321 	case LOOP_MAX:		/* step threshold (step) */
1322 		clock_max_fwd = clock_max_back = freq;
1323 		if (freq == 0 || freq > 0.5)
1324 			select_loop(FALSE);
1325 		break;
1326 
1327 	case LOOP_MAX_BACK:	/* step threshold (step) */
1328 		clock_max_back = freq;
1329 		/*
1330 		 * Leave using the kernel discipline code unless both
1331 		 * limits are massive.  This assumes the reason to stop
1332 		 * using it is that it's pointless, not that it goes wrong.
1333 		 */
1334 		if (  (clock_max_back == 0 || clock_max_back > 0.5)
1335 		   || (clock_max_fwd  == 0 || clock_max_fwd  > 0.5))
1336 			select_loop(FALSE);
1337 		break;
1338 
1339 	case LOOP_MAX_FWD:	/* step threshold (step) */
1340 		clock_max_fwd = freq;
1341 		if (  (clock_max_back == 0 || clock_max_back > 0.5)
1342 		   || (clock_max_fwd  == 0 || clock_max_fwd  > 0.5))
1343 			select_loop(FALSE);
1344 		break;
1345 
1346 	case LOOP_MINSTEP:	/* stepout threshold (stepout) */
1347 		if (freq < CLOCK_MINSTEP)
1348 			clock_minstep = CLOCK_MINSTEP;
1349 		else
1350 			clock_minstep = freq;
1351 		break;
1352 
1353 	case LOOP_TICK:		/* tick increment (tick) */
1354 		set_sys_tick_precision(freq);
1355 		break;
1356 
1357 	case LOOP_LEAP:		/* not used, fall through */
1358 	default:
1359 		msyslog(LOG_NOTICE,
1360 		    "loop_config: unsupported option %d", item);
1361 	}
1362 }
1363 
1364 
1365 #if defined(KERNEL_PLL) && defined(SIGSYS)
1366 /*
1367  * _trap - trap processor for undefined syscalls
1368  *
1369  * This nugget is called by the kernel when the SYS_ntp_adjtime()
1370  * syscall bombs because the silly thing has not been implemented in
1371  * the kernel. In this case the phase-lock loop is emulated by
1372  * the stock adjtime() syscall and a lot of indelicate abuse.
1373  */
1374 static RETSIGTYPE
1375 pll_trap(
1376 	int arg
1377 	)
1378 {
1379 	pll_control = FALSE;
1380 	siglongjmp(env, 1);
1381 }
1382 #endif /* KERNEL_PLL && SIGSYS */
1383