1 /* $NetBSD: ntp_loopfilter.c,v 1.13 2020/05/25 20:47:25 christos Exp $ */
2
3 /*
4 * ntp_loopfilter.c - implements the NTP loop filter algorithm
5 *
6 * ATTENTION: Get approval from Dave Mills on all changes to this file!
7 *
8 */
9 #ifdef HAVE_CONFIG_H
10 # include <config.h>
11 #endif
12
13 #ifdef USE_SNPRINTB
14 # include <util.h>
15 #endif
16 #include "ntpd.h"
17 #include "ntp_io.h"
18 #include "ntp_unixtime.h"
19 #include "ntp_stdlib.h"
20 #include "timexsup.h"
21
22 #include <limits.h>
23 #include <stdio.h>
24 #include <ctype.h>
25
26 #include <signal.h>
27 #include <setjmp.h>
28
29 #ifdef KERNEL_PLL
30 #include "ntp_syscall.h"
31 #endif /* KERNEL_PLL */
32
33 /*
34 * This is an implementation of the clock discipline algorithm described
35 * in UDel TR 97-4-3, as amended. It operates as an adaptive parameter,
36 * hybrid phase/frequency-lock loop. A number of sanity checks are
37 * included to protect against timewarps, timespikes and general mayhem.
38 * All units are in s and s/s, unless noted otherwise.
39 */
40 #define CLOCK_MAX .128 /* default step threshold (s) */
41 #define CLOCK_MINSTEP 300. /* default stepout threshold (s) */
42 #define CLOCK_PANIC 1000. /* default panic threshold (s) */
43 #define CLOCK_PHI 15e-6 /* max frequency error (s/s) */
44 #define CLOCK_PLL 16. /* PLL loop gain (log2) */
45 #define CLOCK_AVG 8. /* parameter averaging constant */
46 #define CLOCK_FLL .25 /* FLL loop gain */
47 #define CLOCK_FLOOR .0005 /* startup offset floor (s) */
48 #define CLOCK_ALLAN 11 /* Allan intercept (log2 s) */
49 #define CLOCK_LIMIT 30 /* poll-adjust threshold */
50 #define CLOCK_PGATE 4. /* poll-adjust gate */
51 #define PPS_MAXAGE 120 /* kernel pps signal timeout (s) */
52 #define FREQTOD(x) ((x) / 65536e6) /* NTP to double */
53 #define DTOFREQ(x) ((int32)((x) * 65536e6)) /* double to NTP */
54
55 /*
56 * Clock discipline state machine. This is used to control the
57 * synchronization behavior during initialization and following a
58 * timewarp.
59 *
60 * State < step > step Comments
61 * ========================================================
62 * NSET FREQ step, FREQ freq not set
63 *
64 * FSET SYNC step, SYNC freq set
65 *
66 * FREQ if (mu < 900) if (mu < 900) set freq direct
67 * ignore ignore
68 * else else
69 * freq, SYNC freq, step, SYNC
70 *
71 * SYNC SYNC SPIK, ignore adjust phase/freq
72 *
73 * SPIK SYNC if (mu < 900) adjust phase/freq
74 * ignore
75 * step, SYNC
76 */
77 /*
78 * Kernel PLL/PPS state machine. This is used with the kernel PLL
79 * modifications described in the documentation.
80 *
81 * If kernel support for the ntp_adjtime() system call is available, the
82 * ntp_control flag is set. The ntp_enable and kern_enable flags can be
83 * set at configuration time or run time using ntpdc. If ntp_enable is
84 * false, the discipline loop is unlocked and no corrections of any kind
85 * are made. If both ntp_control and kern_enable are set, the kernel
86 * support is used as described above; if false, the kernel is bypassed
87 * entirely and the daemon discipline used instead.
88 *
89 * There have been three versions of the kernel discipline code. The
90 * first (microkernel) now in Solaris discipilnes the microseconds. The
91 * second and third (nanokernel) disciplines the clock in nanoseconds.
92 * These versions are identifed if the symbol STA_PLL is present in the
93 * header file /usr/include/sys/timex.h. The third and current version
94 * includes TAI offset and is identified by the symbol NTP_API with
95 * value 4.
96 *
97 * Each PPS time/frequency discipline can be enabled by the atom driver
98 * or another driver. If enabled, the STA_PPSTIME and STA_FREQ bits are
99 * set in the kernel status word; otherwise, these bits are cleared.
100 * These bits are also cleard if the kernel reports an error.
101 *
102 * If an external clock is present, the clock driver sets STA_CLK in the
103 * status word. When the local clock driver sees this bit, it updates
104 * via this routine, which then calls ntp_adjtime() with the STA_PLL bit
105 * set to zero, in which case the system clock is not adjusted. This is
106 * also a signal for the external clock driver to discipline the system
107 * clock. Unless specified otherwise, all times are in seconds.
108 */
109 /*
110 * Program variables that can be tinkered.
111 */
112 double clock_max_back = CLOCK_MAX; /* step threshold */
113 double clock_max_fwd = CLOCK_MAX; /* step threshold */
114 double clock_minstep = CLOCK_MINSTEP; /* stepout threshold */
115 double clock_panic = CLOCK_PANIC; /* panic threshold */
116 double clock_phi = CLOCK_PHI; /* dispersion rate (s/s) */
117 u_char allan_xpt = CLOCK_ALLAN; /* Allan intercept (log2 s) */
118
119 /*
120 * Program variables
121 */
122 static double clock_offset; /* offset */
123 double clock_jitter; /* offset jitter */
124 double drift_comp; /* frequency (s/s) */
125 static double init_drift_comp; /* initial frequency (PPM) */
126 double clock_stability; /* frequency stability (wander) (s/s) */
127 double clock_codec; /* audio codec frequency (samples/s) */
128 static u_long clock_epoch; /* last update */
129 u_int sys_tai; /* TAI offset from UTC */
130 static int loop_started; /* TRUE after LOOP_DRIFTINIT */
131 static void rstclock (int, double); /* transition function */
132 static double direct_freq(double); /* direct set frequency */
133 static void set_freq(double); /* set frequency */
134 #ifndef PATH_MAX
135 # define PATH_MAX MAX_PATH
136 #endif
137 static char relative_path[PATH_MAX + 1]; /* relative path per recursive make */
138 static char *this_file = NULL;
139
140 #ifdef KERNEL_PLL
141 static struct timex ntv; /* ntp_adjtime() parameters */
142 int pll_status; /* last kernel status bits */
143 #if defined(STA_NANO) && NTP_API == 4
144 static u_int loop_tai; /* last TAI offset */
145 #endif /* STA_NANO */
146 static void start_kern_loop(void);
147 static void stop_kern_loop(void);
148 #endif /* KERNEL_PLL */
149
150 /*
151 * Clock state machine control flags
152 */
153 int ntp_enable = TRUE; /* clock discipline enabled */
154 int pll_control; /* kernel support available */
155 int kern_enable = TRUE; /* kernel support enabled */
156 int hardpps_enable; /* kernel PPS discipline enabled */
157 int ext_enable; /* external clock enabled */
158 int pps_stratum; /* pps stratum */
159 int kernel_status; /* from ntp_adjtime */
160 int force_step_once = FALSE; /* always step time once at startup (-G) */
161 int mode_ntpdate = FALSE; /* exit on first clock set (-q) */
162 int freq_cnt; /* initial frequency clamp */
163 int freq_set; /* initial set frequency switch */
164
165 /*
166 * Clock state machine variables
167 */
168 int state = 0; /* clock discipline state */
169 u_char sys_poll; /* time constant/poll (log2 s) */
170 int tc_counter; /* jiggle counter */
171 double last_offset; /* last offset (s) */
172
173 u_int tc_twinlo; /* TC step down not before this time */
174 u_int tc_twinhi; /* TC step up not before this time */
175
176 /*
177 * Huff-n'-puff filter variables
178 */
179 static double *sys_huffpuff; /* huff-n'-puff filter */
180 static int sys_hufflen; /* huff-n'-puff filter stages */
181 static int sys_huffptr; /* huff-n'-puff filter pointer */
182 static double sys_mindly; /* huff-n'-puff filter min delay */
183
184 #if defined(KERNEL_PLL)
185 /* Emacs cc-mode goes nuts if we split the next line... */
186 #define MOD_BITS (MOD_OFFSET | MOD_MAXERROR | MOD_ESTERROR | \
187 MOD_STATUS | MOD_TIMECONST)
188 #ifdef SIGSYS
189 static void pll_trap (int); /* configuration trap */
190 static struct sigaction sigsys; /* current sigaction status */
191 static struct sigaction newsigsys; /* new sigaction status */
192 static sigjmp_buf env; /* environment var. for pll_trap() */
193 #endif /* SIGSYS */
194 #endif /* KERNEL_PLL */
195
196 static void
sync_status(const char * what,int ostatus,int nstatus)197 sync_status(const char *what, int ostatus, int nstatus)
198 {
199 char obuf[256], nbuf[256], tbuf[1024];
200 #if defined(USE_SNPRINTB) && defined (STA_FMT)
201 snprintb(obuf, sizeof(obuf), STA_FMT, ostatus);
202 snprintb(nbuf, sizeof(nbuf), STA_FMT, nstatus);
203 #else
204 snprintf(obuf, sizeof(obuf), "%04x", ostatus);
205 snprintf(nbuf, sizeof(nbuf), "%04x", nstatus);
206 #endif
207 snprintf(tbuf, sizeof(tbuf), "%s status: %s -> %s", what, obuf, nbuf);
208 report_event(EVNT_KERN, NULL, tbuf);
209 }
210
211 /*
212 * file_name - return pointer to non-relative portion of this C file pathname
213 */
file_name(void)214 static char *file_name(void)
215 {
216 if (this_file == NULL) {
217 (void)strncpy(relative_path, __FILE__, PATH_MAX);
218 for (this_file=relative_path;
219 *this_file && ! isalnum((unsigned char)*this_file);
220 this_file++) ;
221 }
222 return this_file;
223 }
224
225 /*
226 * init_loopfilter - initialize loop filter data
227 */
228 void
init_loopfilter(void)229 init_loopfilter(void)
230 {
231 /*
232 * Initialize state variables.
233 */
234 sys_poll = ntp_minpoll;
235 clock_jitter = LOGTOD(sys_precision);
236 freq_cnt = (int)clock_minstep;
237 }
238
239 #ifdef KERNEL_PLL
240 /*
241 * ntp_adjtime_error_handler - process errors from ntp_adjtime
242 */
243 static void
ntp_adjtime_error_handler(const char * caller,struct timex * ptimex,int ret,int saved_errno,int pps_call,int tai_call,int line)244 ntp_adjtime_error_handler(
245 const char *caller, /* name of calling function */
246 struct timex *ptimex, /* pointer to struct timex */
247 int ret, /* return value from ntp_adjtime */
248 int saved_errno, /* value of errno when ntp_adjtime returned */
249 int pps_call, /* ntp_adjtime call was PPS-related */
250 int tai_call, /* ntp_adjtime call was TAI-related */
251 int line /* line number of ntp_adjtime call */
252 )
253 {
254 char des[1024] = ""; /* Decoded Error Status */
255 char *dbp, *ebp;
256
257 dbp = des;
258 ebp = dbp + sizeof(des);
259
260 switch (ret) {
261 case -1:
262 switch (saved_errno) {
263 case EFAULT:
264 msyslog(LOG_ERR, "%s: %s line %d: invalid struct timex pointer: 0x%lx",
265 caller, file_name(), line,
266 (long)((void *)ptimex)
267 );
268 break;
269 case EINVAL:
270 msyslog(LOG_ERR, "%s: %s line %d: invalid struct timex \"constant\" element value: %ld",
271 caller, file_name(), line,
272 (long)(ptimex->constant)
273 );
274 break;
275 case EPERM:
276 if (tai_call) {
277 errno = saved_errno;
278 msyslog(LOG_ERR,
279 "%s: ntp_adjtime(TAI) failed: %m",
280 caller);
281 }
282 errno = saved_errno;
283 msyslog(LOG_ERR, "%s: %s line %d: ntp_adjtime: %m",
284 caller, file_name(), line
285 );
286 break;
287 default:
288 msyslog(LOG_NOTICE, "%s: %s line %d: unhandled errno value %d after failed ntp_adjtime call",
289 caller, file_name(), line,
290 saved_errno
291 );
292 break;
293 }
294 break;
295 #ifdef TIME_OK
296 case TIME_OK: /* 0: synchronized, no leap second warning */
297 /* msyslog(LOG_INFO, "kernel reports time is synchronized normally"); */
298 break;
299 #else
300 # warning TIME_OK is not defined
301 #endif
302 #ifdef TIME_INS
303 case TIME_INS: /* 1: positive leap second warning */
304 msyslog(LOG_INFO, "kernel reports leap second insertion scheduled");
305 break;
306 #else
307 # warning TIME_INS is not defined
308 #endif
309 #ifdef TIME_DEL
310 case TIME_DEL: /* 2: negative leap second warning */
311 msyslog(LOG_INFO, "kernel reports leap second deletion scheduled");
312 break;
313 #else
314 # warning TIME_DEL is not defined
315 #endif
316 #ifdef TIME_OOP
317 case TIME_OOP: /* 3: leap second in progress */
318 msyslog(LOG_INFO, "kernel reports leap second in progress");
319 break;
320 #else
321 # warning TIME_OOP is not defined
322 #endif
323 #ifdef TIME_WAIT
324 case TIME_WAIT: /* 4: leap second has occured */
325 msyslog(LOG_INFO, "kernel reports leap second has occurred");
326 break;
327 #else
328 # warning TIME_WAIT is not defined
329 #endif
330 #ifdef TIME_ERROR
331 #if 0
332
333 from the reference implementation of ntp_gettime():
334
335 // Hardware or software error
336 if ((time_status & (STA_UNSYNC | STA_CLOCKERR))
337
338 /*
339 * PPS signal lost when either time or frequency synchronization
340 * requested
341 */
342 || (time_status & (STA_PPSFREQ | STA_PPSTIME)
343 && !(time_status & STA_PPSSIGNAL))
344
345 /*
346 * PPS jitter exceeded when time synchronization requested
347 */
348 || (time_status & STA_PPSTIME &&
349 time_status & STA_PPSJITTER)
350
351 /*
352 * PPS wander exceeded or calibration error when frequency
353 * synchronization requested
354 */
355 || (time_status & STA_PPSFREQ &&
356 time_status & (STA_PPSWANDER | STA_PPSERROR)))
357 return (TIME_ERROR);
358
359 or, from ntp_adjtime():
360
361 if ( (time_status & (STA_UNSYNC | STA_CLOCKERR))
362 || (time_status & (STA_PPSFREQ | STA_PPSTIME)
363 && !(time_status & STA_PPSSIGNAL))
364 || (time_status & STA_PPSTIME
365 && time_status & STA_PPSJITTER)
366 || (time_status & STA_PPSFREQ
367 && time_status & (STA_PPSWANDER | STA_PPSERROR))
368 )
369 return (TIME_ERROR);
370 #endif
371
372 case TIME_ERROR: /* 5: unsynchronized, or loss of synchronization */
373 /* error (see status word) */
374
375 if (ptimex->status & STA_UNSYNC)
376 xsbprintf(&dbp, ebp, "%sClock Unsynchronized",
377 (*des) ? "; " : "");
378
379 if (ptimex->status & STA_CLOCKERR)
380 xsbprintf(&dbp, ebp, "%sClock Error",
381 (*des) ? "; " : "");
382
383 if (!(ptimex->status & STA_PPSSIGNAL)
384 && ptimex->status & STA_PPSFREQ)
385 xsbprintf(&dbp, ebp, "%sPPS Frequency Sync wanted but no PPS",
386 (*des) ? "; " : "");
387
388 if (!(ptimex->status & STA_PPSSIGNAL)
389 && ptimex->status & STA_PPSTIME)
390 xsbprintf(&dbp, ebp, "%sPPS Time Sync wanted but no PPS signal",
391 (*des) ? "; " : "");
392
393 if ( ptimex->status & STA_PPSTIME
394 && ptimex->status & STA_PPSJITTER)
395 xsbprintf(&dbp, ebp, "%sPPS Time Sync wanted but PPS Jitter exceeded",
396 (*des) ? "; " : "");
397
398 if ( ptimex->status & STA_PPSFREQ
399 && ptimex->status & STA_PPSWANDER)
400 xsbprintf(&dbp, ebp, "%sPPS Frequency Sync wanted but PPS Wander exceeded",
401 (*des) ? "; " : "");
402
403 if ( ptimex->status & STA_PPSFREQ
404 && ptimex->status & STA_PPSERROR)
405 xsbprintf(&dbp, ebp, "%sPPS Frequency Sync wanted but Calibration error detected",
406 (*des) ? "; " : "");
407
408 if (pps_call && !(ptimex->status & STA_PPSSIGNAL))
409 report_event(EVNT_KERN, NULL,
410 "no PPS signal");
411 DPRINTF(1, ("kernel loop status %#x (%s)\n",
412 ptimex->status, des));
413 /*
414 * This code may be returned when ntp_adjtime() has just
415 * been called for the first time, quite a while after
416 * startup, when ntpd just starts to discipline the kernel
417 * time. In this case the occurrence of this message
418 * can be pretty confusing.
419 *
420 * HMS: How about a message when we begin kernel processing:
421 * Determining kernel clock state...
422 * so an initial TIME_ERROR message is less confising,
423 * or skipping the first message (ugh),
424 * or ???
425 * msyslog(LOG_INFO, "kernel reports time synchronization lost");
426 */
427 msyslog(LOG_INFO, "kernel reports TIME_ERROR: %#x: %s",
428 ptimex->status, des);
429 break;
430 #else
431 # warning TIME_ERROR is not defined
432 #endif
433 default:
434 msyslog(LOG_NOTICE, "%s: %s line %d: unhandled return value %d from ntp_adjtime() in %s at line %d",
435 caller, file_name(), line,
436 ret,
437 __func__, __LINE__
438 );
439 break;
440 }
441 return;
442 }
443 #endif
444
445 /*
446 * local_clock - the NTP logical clock loop filter.
447 *
448 * Return codes:
449 * -1 update ignored: exceeds panic threshold
450 * 0 update ignored: popcorn or exceeds step threshold
451 * 1 clock was slewed
452 * 2 clock was stepped
453 *
454 * LOCKCLOCK: The only thing this routine does is set the
455 * sys_rootdisp variable equal to the peer dispersion.
456 */
457 int
local_clock(struct peer * peer,double fp_offset)458 local_clock(
459 struct peer *peer, /* synch source peer structure */
460 double fp_offset /* clock offset (s) */
461 )
462 {
463 int rval; /* return code */
464 int osys_poll; /* old system poll */
465 int ntp_adj_ret; /* returned by ntp_adjtime */
466 double mu; /* interval since last update */
467 double clock_frequency; /* clock frequency */
468 double dtemp, etemp; /* double temps */
469 char tbuf[80]; /* report buffer */
470
471 (void)ntp_adj_ret; /* not always used below... */
472 /*
473 * If the loop is opened or the NIST LOCKCLOCK is in use,
474 * monitor and record the offsets anyway in order to determine
475 * the open-loop response and then go home.
476 */
477 #ifndef LOCKCLOCK
478 if (!ntp_enable)
479 #endif /* not LOCKCLOCK */
480 {
481 record_loop_stats(fp_offset, drift_comp, clock_jitter,
482 clock_stability, sys_poll);
483 return (0);
484 }
485
486 #ifndef LOCKCLOCK
487 /*
488 * If the clock is way off, panic is declared. The clock_panic
489 * defaults to 1000 s; if set to zero, the panic will never
490 * occur. The allow_panic defaults to FALSE, so the first panic
491 * will exit. It can be set TRUE by a command line option, in
492 * which case the clock will be set anyway and time marches on.
493 * But, allow_panic will be set FALSE when the update is less
494 * than the step threshold; so, subsequent panics will exit.
495 */
496 if (fabs(fp_offset) > clock_panic && clock_panic > 0 &&
497 !allow_panic) {
498 snprintf(tbuf, sizeof(tbuf),
499 "%+.0f s; set clock manually within %.0f s.",
500 fp_offset, clock_panic);
501 report_event(EVNT_SYSFAULT, NULL, tbuf);
502 return (-1);
503 }
504
505 allow_panic = FALSE;
506
507 /*
508 * This section simulates ntpdate. If the offset exceeds the
509 * step threshold (128 ms), step the clock to that time and
510 * exit. Otherwise, slew the clock to that time and exit. Note
511 * that the slew will persist and eventually complete beyond the
512 * life of this program. Note that while ntpdate is active, the
513 * terminal does not detach, so the termination message prints
514 * directly to the terminal.
515 */
516 if (mode_ntpdate) {
517 if ( ( fp_offset > clock_max_fwd && clock_max_fwd > 0)
518 || (-fp_offset > clock_max_back && clock_max_back > 0)) {
519 step_systime(fp_offset);
520 msyslog(LOG_NOTICE, "ntpd: time set %+.6f s",
521 fp_offset);
522 printf("ntpd: time set %+.6fs\n", fp_offset);
523 } else {
524 adj_systime(fp_offset);
525 msyslog(LOG_NOTICE, "ntpd: time slew %+.6f s",
526 fp_offset);
527 printf("ntpd: time slew %+.6fs\n", fp_offset);
528 }
529 record_loop_stats(fp_offset, drift_comp, clock_jitter,
530 clock_stability, sys_poll);
531 exit (0);
532 }
533
534 /*
535 * The huff-n'-puff filter finds the lowest delay in the recent
536 * interval. This is used to correct the offset by one-half the
537 * difference between the sample delay and minimum delay. This
538 * is most effective if the delays are highly assymetric and
539 * clockhopping is avoided and the clock frequency wander is
540 * relatively small.
541 */
542 if (sys_huffpuff != NULL) {
543 if (peer->delay < sys_huffpuff[sys_huffptr])
544 sys_huffpuff[sys_huffptr] = peer->delay;
545 if (peer->delay < sys_mindly)
546 sys_mindly = peer->delay;
547 if (fp_offset > 0)
548 dtemp = -(peer->delay - sys_mindly) / 2;
549 else
550 dtemp = (peer->delay - sys_mindly) / 2;
551 fp_offset += dtemp;
552 DPRINTF(1, ("local_clock: size %d mindly %.6f huffpuff %.6f\n",
553 sys_hufflen, sys_mindly, dtemp));
554 }
555
556 /*
557 * Clock state machine transition function which defines how the
558 * system reacts to large phase and frequency excursion. There
559 * are two main regimes: when the offset exceeds the step
560 * threshold (128 ms) and when it does not. Under certain
561 * conditions updates are suspended until the stepout theshold
562 * (900 s) is exceeded. See the documentation on how these
563 * thresholds interact with commands and command line options.
564 *
565 * Note the kernel is disabled if step is disabled or greater
566 * than 0.5 s or in ntpdate mode.
567 */
568 osys_poll = sys_poll;
569 if (sys_poll < peer->minpoll)
570 sys_poll = peer->minpoll;
571 if (sys_poll > peer->maxpoll)
572 sys_poll = peer->maxpoll;
573 mu = current_time - clock_epoch;
574 clock_frequency = drift_comp;
575 rval = 1;
576 if ( ( fp_offset > clock_max_fwd && clock_max_fwd > 0)
577 || (-fp_offset > clock_max_back && clock_max_back > 0)
578 || force_step_once ) {
579 if (force_step_once) {
580 force_step_once = FALSE; /* we want this only once after startup */
581 msyslog(LOG_NOTICE, "Doing intital time step" );
582 }
583
584 switch (state) {
585
586 /*
587 * In SYNC state we ignore the first outlier and switch
588 * to SPIK state.
589 */
590 case EVNT_SYNC:
591 snprintf(tbuf, sizeof(tbuf), "%+.6f s",
592 fp_offset);
593 report_event(EVNT_SPIK, NULL, tbuf);
594 state = EVNT_SPIK;
595 return (0);
596
597 /*
598 * In FREQ state we ignore outliers and inlyers. At the
599 * first outlier after the stepout threshold, compute
600 * the apparent frequency correction and step the phase.
601 */
602 case EVNT_FREQ:
603 if (mu < clock_minstep)
604 return (0);
605
606 clock_frequency = direct_freq(fp_offset);
607
608 /*FALLTHROUGH*/
609
610 /*
611 * In SPIK state we ignore succeeding outliers until
612 * either an inlyer is found or the stepout threshold is
613 * exceeded.
614 */
615 case EVNT_SPIK:
616 if (mu < clock_minstep)
617 return (0);
618
619 /*FALLTHROUGH*/
620
621 /*
622 * We get here by default in NSET and FSET states and
623 * from above in FREQ or SPIK states.
624 *
625 * In NSET state an initial frequency correction is not
626 * available, usually because the frequency file has not
627 * yet been written. Since the time is outside the step
628 * threshold, the clock is stepped. The frequency will
629 * be set directly following the stepout interval.
630 *
631 * In FSET state the initial frequency has been set from
632 * the frequency file. Since the time is outside the
633 * step threshold, the clock is stepped immediately,
634 * rather than after the stepout interval. Guys get
635 * nervous if it takes 15 minutes to set the clock for
636 * the first time.
637 *
638 * In FREQ and SPIK states the stepout threshold has
639 * expired and the phase is still above the step
640 * threshold. Note that a single spike greater than the
641 * step threshold is always suppressed, even with a
642 * long time constant.
643 */
644 default:
645 snprintf(tbuf, sizeof(tbuf), "%+.6f s",
646 fp_offset);
647 report_event(EVNT_CLOCKRESET, NULL, tbuf);
648 step_systime(fp_offset);
649 reinit_timer();
650 tc_counter = 0;
651 clock_jitter = LOGTOD(sys_precision);
652 rval = 2;
653 if (state == EVNT_NSET) {
654 rstclock(EVNT_FREQ, 0);
655 return (rval);
656 }
657 break;
658 }
659 rstclock(EVNT_SYNC, 0);
660 } else {
661 /*
662 * The offset is less than the step threshold. Calculate
663 * the jitter as the exponentially weighted offset
664 * differences.
665 */
666 etemp = SQUARE(clock_jitter);
667 dtemp = SQUARE(max(fabs(fp_offset - last_offset),
668 LOGTOD(sys_precision)));
669 clock_jitter = SQRT(etemp + (dtemp - etemp) /
670 CLOCK_AVG);
671 switch (state) {
672
673 /*
674 * In NSET state this is the first update received and
675 * the frequency has not been initialized. Adjust the
676 * phase, but do not adjust the frequency until after
677 * the stepout threshold.
678 */
679 case EVNT_NSET:
680 adj_systime(fp_offset);
681 rstclock(EVNT_FREQ, fp_offset);
682 break;
683
684 /*
685 * In FREQ state ignore updates until the stepout
686 * threshold. After that, compute the new frequency, but
687 * do not adjust the frequency until the holdoff counter
688 * decrements to zero.
689 */
690 case EVNT_FREQ:
691 if (mu < clock_minstep)
692 return (0);
693
694 clock_frequency = direct_freq(fp_offset);
695 /* fall through */
696
697 /*
698 * We get here by default in FSET, SPIK and SYNC states.
699 * Here compute the frequency update due to PLL and FLL
700 * contributions. Note, we avoid frequency discipline at
701 * startup until the initial transient has subsided.
702 */
703 default:
704 if (freq_cnt == 0) {
705
706 /*
707 * The FLL and PLL frequency gain constants
708 * depend on the time constant and Allan
709 * intercept. The PLL is always used, but
710 * becomes ineffective above the Allan intercept
711 * where the FLL becomes effective.
712 */
713 if (sys_poll >= allan_xpt)
714 clock_frequency +=
715 (fp_offset - clock_offset)
716 / ( max(ULOGTOD(sys_poll), mu)
717 * CLOCK_FLL);
718
719 /*
720 * The PLL frequency gain (numerator) depends on
721 * the minimum of the update interval and Allan
722 * intercept. This reduces the PLL gain when the
723 * FLL becomes effective.
724 */
725 etemp = min(ULOGTOD(allan_xpt), mu);
726 dtemp = 4 * CLOCK_PLL * ULOGTOD(sys_poll);
727 clock_frequency +=
728 fp_offset * etemp / (dtemp * dtemp);
729 }
730 rstclock(EVNT_SYNC, fp_offset);
731 if (fabs(fp_offset) < CLOCK_FLOOR)
732 freq_cnt = 0;
733 break;
734 }
735 }
736
737 #ifdef KERNEL_PLL
738 /*
739 * This code segment works when clock adjustments are made using
740 * precision time kernel support and the ntp_adjtime() system
741 * call. This support is available in Solaris 2.6 and later,
742 * Digital Unix 4.0 and later, FreeBSD, Linux and specially
743 * modified kernels for HP-UX 9 and Ultrix 4. In the case of the
744 * DECstation 5000/240 and Alpha AXP, additional kernel
745 * modifications provide a true microsecond clock and nanosecond
746 * clock, respectively.
747 *
748 * Important note: The kernel discipline is used only if the
749 * step threshold is less than 0.5 s, as anything higher can
750 * lead to overflow problems. This might occur if some misguided
751 * lad set the step threshold to something ridiculous.
752 */
753 if (pll_control && kern_enable && freq_cnt == 0) {
754
755 /*
756 * We initialize the structure for the ntp_adjtime()
757 * system call. We have to convert everything to
758 * microseconds or nanoseconds first. Do not update the
759 * system variables if the ext_enable flag is set. In
760 * this case, the external clock driver will update the
761 * variables, which will be read later by the local
762 * clock driver. Afterwards, remember the time and
763 * frequency offsets for jitter and stability values and
764 * to update the frequency file.
765 */
766 ZERO(ntv);
767 if (ext_enable) {
768 ntv.modes = MOD_STATUS;
769 } else {
770 ntv.modes = MOD_BITS;
771 ntv.offset = var_long_from_dbl(
772 clock_offset, &ntv.modes);
773 #ifdef STA_NANO
774 ntv.constant = sys_poll;
775 #else /* STA_NANO */
776 ntv.constant = sys_poll - 4;
777 #endif /* STA_NANO */
778 if (ntv.constant < 0)
779 ntv.constant = 0;
780
781 ntv.esterror = usec_long_from_dbl(
782 clock_jitter);
783 ntv.maxerror = usec_long_from_dbl(
784 sys_rootdelay / 2 + sys_rootdisp);
785 ntv.status = STA_PLL;
786
787 /*
788 * Enable/disable the PPS if requested.
789 */
790 if (hardpps_enable) {
791 ntv.status |= (STA_PPSTIME | STA_PPSFREQ);
792 if (!(pll_status & STA_PPSTIME))
793 sync_status("PPS enabled",
794 pll_status,
795 ntv.status);
796 } else {
797 ntv.status &= ~(STA_PPSTIME | STA_PPSFREQ);
798 if (pll_status & STA_PPSTIME)
799 sync_status("PPS disabled",
800 pll_status,
801 ntv.status);
802 }
803 if (sys_leap == LEAP_ADDSECOND)
804 ntv.status |= STA_INS;
805 else if (sys_leap == LEAP_DELSECOND)
806 ntv.status |= STA_DEL;
807 }
808
809 /*
810 * Pass the stuff to the kernel. If it squeals, turn off
811 * the pps. In any case, fetch the kernel offset,
812 * frequency and jitter.
813 */
814 ntp_adj_ret = ntp_adjtime(&ntv);
815 /*
816 * A squeal is a return status < 0, or a state change.
817 */
818 if ((0 > ntp_adj_ret) || (ntp_adj_ret != kernel_status)) {
819 kernel_status = ntp_adj_ret;
820 ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, hardpps_enable, 0, __LINE__ - 1);
821 }
822 pll_status = ntv.status;
823 clock_offset = dbl_from_var_long(ntv.offset, ntv.status);
824 clock_frequency = FREQTOD(ntv.freq);
825
826 /*
827 * If the kernel PPS is lit, monitor its performance.
828 */
829 if (ntv.status & STA_PPSTIME) {
830 clock_jitter = dbl_from_var_long(
831 ntv.jitter, ntv.status);
832 }
833
834 #if defined(STA_NANO) && NTP_API == 4
835 /*
836 * If the TAI changes, update the kernel TAI.
837 */
838 if (loop_tai != sys_tai) {
839 loop_tai = sys_tai;
840 ntv.modes = MOD_TAI;
841 ntv.constant = sys_tai;
842 if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) {
843 ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 1, __LINE__ - 1);
844 }
845 }
846 #endif /* STA_NANO */
847 }
848 #endif /* KERNEL_PLL */
849
850 /*
851 * Clamp the frequency within the tolerance range and calculate
852 * the frequency difference since the last update.
853 */
854 if (fabs(clock_frequency) > NTP_MAXFREQ)
855 msyslog(LOG_NOTICE,
856 "frequency error %.0f PPM exceeds tolerance %.0f PPM",
857 clock_frequency * 1e6, NTP_MAXFREQ * 1e6);
858 dtemp = SQUARE(clock_frequency - drift_comp);
859 if (clock_frequency > NTP_MAXFREQ)
860 drift_comp = NTP_MAXFREQ;
861 else if (clock_frequency < -NTP_MAXFREQ)
862 drift_comp = -NTP_MAXFREQ;
863 else
864 drift_comp = clock_frequency;
865
866 /*
867 * Calculate the wander as the exponentially weighted RMS
868 * frequency differences. Record the change for the frequency
869 * file update.
870 */
871 etemp = SQUARE(clock_stability);
872 clock_stability = SQRT(etemp + (dtemp - etemp) / CLOCK_AVG);
873
874 /*
875 * Here we adjust the time constant by comparing the current
876 * offset with the clock jitter. If the offset is less than the
877 * clock jitter times a constant, then the averaging interval is
878 * increased, otherwise it is decreased. A bit of hysteresis
879 * helps calm the dance. Works best using burst mode. Don't
880 * fiddle with the poll during the startup clamp period.
881 * [Bug 3615] also observe time gates to avoid eager stepping
882 */
883 if (freq_cnt > 0) {
884 tc_counter = 0;
885 tc_twinlo = current_time;
886 tc_twinhi = current_time;
887 } else if (fabs(clock_offset) < CLOCK_PGATE * clock_jitter) {
888 tc_counter += sys_poll;
889 if (tc_counter > CLOCK_LIMIT) {
890 tc_counter = CLOCK_LIMIT;
891 if (sys_poll < peer->maxpoll)
892 sys_poll += (current_time >= tc_twinhi);
893 }
894 } else {
895 tc_counter -= sys_poll << 1;
896 if (tc_counter < -CLOCK_LIMIT) {
897 tc_counter = -CLOCK_LIMIT;
898 if (sys_poll > peer->minpoll)
899 sys_poll -= (current_time >= tc_twinlo);
900 }
901 }
902
903 /*
904 * If the time constant has changed, update the poll variables.
905 *
906 * [bug 3615] also set new time gates
907 * The time limit for stepping down will be half the TC interval
908 * or 60 secs from now, whatever is bigger, and the step up time
909 * limit will be half the TC interval after the step down limit.
910 *
911 * The 'sys_poll' value affects the servo loop gain, and
912 * overshooting sys_poll slows it down unnecessarily. Stepping
913 * down too fast also has bad effects.
914 *
915 * The 'tc_counter' dance itself is something that *should*
916 * happen *once* every (1 << sys_poll) seconds, I think, but
917 * that's not how it works right now, and adding time guards
918 * seems the least intrusive way to handle this.
919 */
920 if (osys_poll != sys_poll) {
921 u_int deadband = 1u << (sys_poll - 1);
922 tc_counter = 0;
923 tc_twinlo = current_time + max(deadband, 60);
924 tc_twinhi = tc_twinlo + deadband;
925 poll_update(peer, sys_poll, 0);
926 }
927
928 /*
929 * Yibbidy, yibbbidy, yibbidy; that'h all folks.
930 */
931 record_loop_stats(clock_offset, drift_comp, clock_jitter,
932 clock_stability, sys_poll);
933 DPRINTF(1, ("local_clock: offset %.9f jit %.9f freq %.3f stab %.3f poll %d\n",
934 clock_offset, clock_jitter, drift_comp * 1e6,
935 clock_stability * 1e6, sys_poll));
936 return (rval);
937 #endif /* not LOCKCLOCK */
938 }
939
940
941 /*
942 * adj_host_clock - Called once every second to update the local clock.
943 *
944 * LOCKCLOCK: The only thing this routine does is increment the
945 * sys_rootdisp variable.
946 */
947 void
adj_host_clock(void)948 adj_host_clock(
949 void
950 )
951 {
952 double offset_adj;
953 double freq_adj;
954
955 /*
956 * Update the dispersion since the last update. In contrast to
957 * NTPv3, NTPv4 does not declare unsynchronized after one day,
958 * since the dispersion check serves this function. Also,
959 * since the poll interval can exceed one day, the old test
960 * would be counterproductive. During the startup clamp period, the
961 * time constant is clamped at 2.
962 */
963 sys_rootdisp += clock_phi;
964 #ifndef LOCKCLOCK
965 if (!ntp_enable || mode_ntpdate)
966 return;
967 /*
968 * Determine the phase adjustment. The gain factor (denominator)
969 * increases with poll interval, so is dominated by the FLL
970 * above the Allan intercept. Note the reduced time constant at
971 * startup.
972 */
973 if (state != EVNT_SYNC) {
974 offset_adj = 0.;
975 } else if (freq_cnt > 0) {
976 offset_adj = clock_offset / (CLOCK_PLL * ULOGTOD(1));
977 freq_cnt--;
978 #ifdef KERNEL_PLL
979 } else if (pll_control && kern_enable) {
980 offset_adj = 0.;
981 #endif /* KERNEL_PLL */
982 } else {
983 offset_adj = clock_offset / (CLOCK_PLL * ULOGTOD(sys_poll));
984 }
985
986 /*
987 * If the kernel discipline is enabled the frequency correction
988 * drift_comp has already been engaged via ntp_adjtime() in
989 * set_freq(). Otherwise it is a component of the adj_systime()
990 * offset.
991 */
992 #ifdef KERNEL_PLL
993 if (pll_control && kern_enable)
994 freq_adj = 0.;
995 else
996 #endif /* KERNEL_PLL */
997 freq_adj = drift_comp;
998
999 /* Bound absolute value of total adjustment to NTP_MAXFREQ. */
1000 if (offset_adj + freq_adj > NTP_MAXFREQ)
1001 offset_adj = NTP_MAXFREQ - freq_adj;
1002 else if (offset_adj + freq_adj < -NTP_MAXFREQ)
1003 offset_adj = -NTP_MAXFREQ - freq_adj;
1004
1005 clock_offset -= offset_adj;
1006 /*
1007 * Windows port adj_systime() must be called each second,
1008 * even if the argument is zero, to ease emulation of
1009 * adjtime() using Windows' slew API which controls the rate
1010 * but does not automatically stop slewing when an offset
1011 * has decayed to zero.
1012 */
1013 DEBUG_INSIST(enable_panic_check == TRUE);
1014 enable_panic_check = FALSE;
1015 adj_systime(offset_adj + freq_adj);
1016 enable_panic_check = TRUE;
1017 #endif /* LOCKCLOCK */
1018 }
1019
1020
1021 /*
1022 * Clock state machine. Enter new state and set state variables.
1023 */
1024 static void
rstclock(int trans,double offset)1025 rstclock(
1026 int trans, /* new state */
1027 double offset /* new offset */
1028 )
1029 {
1030 DPRINTF(2, ("rstclock: mu %lu state %d poll %d count %d\n",
1031 current_time - clock_epoch, trans, sys_poll,
1032 tc_counter));
1033 if (trans != state && trans != EVNT_FSET)
1034 report_event(trans, NULL, NULL);
1035 state = trans;
1036 last_offset = clock_offset = offset;
1037 clock_epoch = current_time;
1038 }
1039
1040
1041 /*
1042 * calc_freq - calculate frequency directly
1043 *
1044 * This is very carefully done. When the offset is first computed at the
1045 * first update, a residual frequency component results. Subsequently,
1046 * updates are suppresed until the end of the measurement interval while
1047 * the offset is amortized. At the end of the interval the frequency is
1048 * calculated from the current offset, residual offset, length of the
1049 * interval and residual frequency component. At the same time the
1050 * frequenchy file is armed for update at the next hourly stats.
1051 */
1052 static double
direct_freq(double fp_offset)1053 direct_freq(
1054 double fp_offset
1055 )
1056 {
1057 set_freq(fp_offset / (current_time - clock_epoch));
1058
1059 return drift_comp;
1060 }
1061
1062
1063 /*
1064 * set_freq - set clock frequency correction
1065 *
1066 * Used to step the frequency correction at startup, possibly again once
1067 * the frequency is measured (that is, transitioning from EVNT_NSET to
1068 * EVNT_FSET), and finally to switch between daemon and kernel loop
1069 * discipline at runtime.
1070 *
1071 * When the kernel loop discipline is available but the daemon loop is
1072 * in use, the kernel frequency correction is disabled (set to 0) to
1073 * ensure drift_comp is applied by only one of the loops.
1074 */
1075 static void
set_freq(double freq)1076 set_freq(
1077 double freq /* frequency update */
1078 )
1079 {
1080 const char * loop_desc;
1081 int ntp_adj_ret;
1082
1083 (void)ntp_adj_ret; /* not always used below... */
1084 drift_comp = freq;
1085 loop_desc = "ntpd";
1086 #ifdef KERNEL_PLL
1087 if (pll_control) {
1088 ZERO(ntv);
1089 ntv.modes = MOD_FREQUENCY;
1090 if (kern_enable) {
1091 loop_desc = "kernel";
1092 ntv.freq = DTOFREQ(drift_comp);
1093 }
1094 if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) {
1095 ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 0, __LINE__ - 1);
1096 }
1097 }
1098 #endif /* KERNEL_PLL */
1099 mprintf_event(EVNT_FSET, NULL, "%s %.3f PPM", loop_desc,
1100 drift_comp * 1e6);
1101 }
1102
1103
1104 #ifdef KERNEL_PLL
1105 static void
start_kern_loop(void)1106 start_kern_loop(void)
1107 {
1108 static int atexit_done;
1109 int ntp_adj_ret;
1110
1111 pll_control = TRUE;
1112 ZERO(ntv);
1113 ntv.modes = MOD_BITS;
1114 ntv.status = STA_PLL | STA_UNSYNC;
1115 ntv.maxerror = MAXDISPERSE * 1.0e6;
1116 ntv.esterror = MAXDISPERSE * 1.0e6;
1117 ntv.constant = sys_poll;
1118 /* ^^^^^^^^ why is it that here constant is
1119 * unconditionally set to sys_poll, whereas elsewhere is is
1120 * modified depending on nanosecond vs. microsecond kernel?
1121 */
1122 #ifdef SIGSYS
1123 /*
1124 * Use sigsetjmp() to save state and then call ntp_adjtime(); if
1125 * it fails, then pll_trap() will set pll_control FALSE before
1126 * returning control using siglogjmp().
1127 */
1128 newsigsys.sa_handler = pll_trap;
1129 newsigsys.sa_flags = 0;
1130 if (sigaction(SIGSYS, &newsigsys, &sigsys)) {
1131 msyslog(LOG_ERR, "sigaction() trap SIGSYS: %m");
1132 pll_control = FALSE;
1133 } else {
1134 if (sigsetjmp(env, 1) == 0) {
1135 if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) {
1136 ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 0, __LINE__ - 1);
1137 }
1138 }
1139 if (sigaction(SIGSYS, &sigsys, NULL)) {
1140 msyslog(LOG_ERR,
1141 "sigaction() restore SIGSYS: %m");
1142 pll_control = FALSE;
1143 }
1144 }
1145 #else /* SIGSYS */
1146 if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) {
1147 ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 0, __LINE__ - 1);
1148 }
1149 #endif /* SIGSYS */
1150
1151 /*
1152 * Save the result status and light up an external clock
1153 * if available.
1154 */
1155 pll_status = ntv.status;
1156 if (pll_control) {
1157 if (!atexit_done) {
1158 atexit_done = TRUE;
1159 atexit(&stop_kern_loop);
1160 }
1161 #ifdef STA_NANO
1162 if (pll_status & STA_CLK)
1163 ext_enable = TRUE;
1164 #endif /* STA_NANO */
1165 report_event(EVNT_KERN, NULL,
1166 "kernel time sync enabled");
1167 }
1168 }
1169 #endif /* KERNEL_PLL */
1170
1171
1172 #ifdef KERNEL_PLL
1173 static void
stop_kern_loop(void)1174 stop_kern_loop(void)
1175 {
1176 if (pll_control && kern_enable)
1177 report_event(EVNT_KERN, NULL,
1178 "kernel time sync disabled");
1179 }
1180 #endif /* KERNEL_PLL */
1181
1182
1183 /*
1184 * select_loop() - choose kernel or daemon loop discipline.
1185 */
1186 void
select_loop(int use_kern_loop)1187 select_loop(
1188 int use_kern_loop
1189 )
1190 {
1191 if (kern_enable == use_kern_loop)
1192 return;
1193 #ifdef KERNEL_PLL
1194 if (pll_control && !use_kern_loop)
1195 stop_kern_loop();
1196 #endif
1197 kern_enable = use_kern_loop;
1198 #ifdef KERNEL_PLL
1199 if (pll_control && use_kern_loop)
1200 start_kern_loop();
1201 #endif
1202 /*
1203 * If this loop selection change occurs after initial startup,
1204 * call set_freq() to switch the frequency compensation to or
1205 * from the kernel loop.
1206 */
1207 #ifdef KERNEL_PLL
1208 if (pll_control && loop_started)
1209 set_freq(drift_comp);
1210 #endif
1211 }
1212
1213
1214 /*
1215 * huff-n'-puff filter
1216 */
1217 void
huffpuff(void)1218 huffpuff(void)
1219 {
1220 int i;
1221
1222 if (sys_huffpuff == NULL)
1223 return;
1224
1225 sys_huffptr = (sys_huffptr + 1) % sys_hufflen;
1226 sys_huffpuff[sys_huffptr] = 1e9;
1227 sys_mindly = 1e9;
1228 for (i = 0; i < sys_hufflen; i++) {
1229 if (sys_huffpuff[i] < sys_mindly)
1230 sys_mindly = sys_huffpuff[i];
1231 }
1232 }
1233
1234
1235 /*
1236 * loop_config - configure the loop filter
1237 *
1238 * LOCKCLOCK: The LOOP_DRIFTINIT and LOOP_DRIFTCOMP cases are no-ops.
1239 */
1240 void
loop_config(int item,double freq)1241 loop_config(
1242 int item,
1243 double freq
1244 )
1245 {
1246 int i;
1247 double ftemp;
1248
1249 DPRINTF(2, ("loop_config: item %d freq %f\n", item, freq));
1250 switch (item) {
1251
1252 /*
1253 * We first assume the kernel supports the ntp_adjtime()
1254 * syscall. If that syscall works, initialize the kernel time
1255 * variables. Otherwise, continue leaving no harm behind.
1256 */
1257 case LOOP_DRIFTINIT:
1258 #ifndef LOCKCLOCK
1259 #ifdef KERNEL_PLL
1260 if (mode_ntpdate)
1261 break;
1262
1263 start_kern_loop();
1264 #endif /* KERNEL_PLL */
1265
1266 /*
1267 * Initialize frequency if given; otherwise, begin frequency
1268 * calibration phase.
1269 */
1270 ftemp = init_drift_comp / 1e6;
1271 if (ftemp > NTP_MAXFREQ)
1272 ftemp = NTP_MAXFREQ;
1273 else if (ftemp < -NTP_MAXFREQ)
1274 ftemp = -NTP_MAXFREQ;
1275 set_freq(ftemp);
1276 if (freq_set)
1277 rstclock(EVNT_FSET, 0);
1278 else
1279 rstclock(EVNT_NSET, 0);
1280 loop_started = TRUE;
1281 #endif /* LOCKCLOCK */
1282 break;
1283
1284 case LOOP_KERN_CLEAR:
1285 #if 0 /* XXX: needs more review, and how can we get here? */
1286 #ifndef LOCKCLOCK
1287 # ifdef KERNEL_PLL
1288 if (pll_control && kern_enable) {
1289 memset((char *)&ntv, 0, sizeof(ntv));
1290 ntv.modes = MOD_STATUS;
1291 ntv.status = STA_UNSYNC;
1292 ntp_adjtime(&ntv);
1293 sync_status("kernel time sync disabled",
1294 pll_status,
1295 ntv.status);
1296 }
1297 # endif /* KERNEL_PLL */
1298 #endif /* LOCKCLOCK */
1299 #endif
1300 break;
1301
1302 /*
1303 * Tinker command variables for Ulrich Windl. Very dangerous.
1304 */
1305 case LOOP_ALLAN: /* Allan intercept (log2) (allan) */
1306 allan_xpt = (u_char)freq;
1307 break;
1308
1309 case LOOP_CODEC: /* audio codec frequency (codec) */
1310 clock_codec = freq / 1e6;
1311 break;
1312
1313 case LOOP_PHI: /* dispersion threshold (dispersion) */
1314 clock_phi = freq / 1e6;
1315 break;
1316
1317 case LOOP_FREQ: /* initial frequency (freq) */
1318 init_drift_comp = freq;
1319 freq_set++;
1320 break;
1321
1322 case LOOP_HUFFPUFF: /* huff-n'-puff length (huffpuff) */
1323 if (freq < HUFFPUFF)
1324 freq = HUFFPUFF;
1325 sys_hufflen = (int)(freq / HUFFPUFF);
1326 sys_huffpuff = eallocarray(sys_hufflen, sizeof(sys_huffpuff[0]));
1327 for (i = 0; i < sys_hufflen; i++)
1328 sys_huffpuff[i] = 1e9;
1329 sys_mindly = 1e9;
1330 break;
1331
1332 case LOOP_PANIC: /* panic threshold (panic) */
1333 clock_panic = freq;
1334 break;
1335
1336 case LOOP_MAX: /* step threshold (step) */
1337 clock_max_fwd = clock_max_back = freq;
1338 if (freq == 0 || freq > 0.5)
1339 select_loop(FALSE);
1340 break;
1341
1342 case LOOP_MAX_BACK: /* step threshold (step) */
1343 clock_max_back = freq;
1344 /*
1345 * Leave using the kernel discipline code unless both
1346 * limits are massive. This assumes the reason to stop
1347 * using it is that it's pointless, not that it goes wrong.
1348 */
1349 if ( (clock_max_back == 0 || clock_max_back > 0.5)
1350 || (clock_max_fwd == 0 || clock_max_fwd > 0.5))
1351 select_loop(FALSE);
1352 break;
1353
1354 case LOOP_MAX_FWD: /* step threshold (step) */
1355 clock_max_fwd = freq;
1356 if ( (clock_max_back == 0 || clock_max_back > 0.5)
1357 || (clock_max_fwd == 0 || clock_max_fwd > 0.5))
1358 select_loop(FALSE);
1359 break;
1360
1361 case LOOP_MINSTEP: /* stepout threshold (stepout) */
1362 if (freq < CLOCK_MINSTEP)
1363 clock_minstep = CLOCK_MINSTEP;
1364 else
1365 clock_minstep = freq;
1366 break;
1367
1368 case LOOP_TICK: /* tick increment (tick) */
1369 set_sys_tick_precision(freq);
1370 break;
1371
1372 case LOOP_LEAP: /* not used, fall through */
1373 default:
1374 msyslog(LOG_NOTICE,
1375 "loop_config: unsupported option %d", item);
1376 }
1377 }
1378
1379
1380 #if defined(KERNEL_PLL) && defined(SIGSYS)
1381 /*
1382 * _trap - trap processor for undefined syscalls
1383 *
1384 * This nugget is called by the kernel when the SYS_ntp_adjtime()
1385 * syscall bombs because the silly thing has not been implemented in
1386 * the kernel. In this case the phase-lock loop is emulated by
1387 * the stock adjtime() syscall and a lot of indelicate abuse.
1388 */
1389 static RETSIGTYPE
pll_trap(int arg)1390 pll_trap(
1391 int arg
1392 )
1393 {
1394 pll_control = FALSE;
1395 siglongjmp(env, 1);
1396 }
1397 #endif /* KERNEL_PLL && SIGSYS */
1398