1 /* $OpenBSD: ntp.c,v 1.181 2024/11/21 13:38:14 claudio Exp $ */
2
3 /*
4 * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
5 * Copyright (c) 2004 Alexander Guy <alexander.guy@andern.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include <sys/types.h>
21 #include <sys/time.h>
22 #include <sys/stat.h>
23 #include <errno.h>
24 #include <fcntl.h>
25 #include <paths.h>
26 #include <poll.h>
27 #include <pwd.h>
28 #include <signal.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <syslog.h>
32 #include <time.h>
33 #include <unistd.h>
34 #include <err.h>
35
36 #include "ntpd.h"
37
38 #define PFD_PIPE_MAIN 0
39 #define PFD_PIPE_DNS 1
40 #define PFD_SOCK_CTL 2
41 #define PFD_MAX 3
42
43 volatile sig_atomic_t ntp_quit = 0;
44 struct imsgbuf *ibuf_main;
45 static struct imsgbuf *ibuf_dns;
46 struct ntpd_conf *conf;
47 struct ctl_conns ctl_conns;
48 u_int peer_cnt;
49 u_int sensors_cnt;
50 extern u_int constraint_cnt;
51
52 void ntp_sighdlr(int);
53 int ntp_dispatch_imsg(void);
54 int ntp_dispatch_imsg_dns(void);
55 void peer_add(struct ntp_peer *);
56 void peer_remove(struct ntp_peer *);
57 int inpool(struct sockaddr_storage *,
58 struct sockaddr_storage[MAX_SERVERS_DNS], size_t);
59
60 void
ntp_sighdlr(int sig)61 ntp_sighdlr(int sig)
62 {
63 switch (sig) {
64 case SIGINT:
65 case SIGTERM:
66 ntp_quit = 1;
67 break;
68 }
69 }
70
71 void
ntp_main(struct ntpd_conf * nconf,struct passwd * pw,int argc,char ** argv)72 ntp_main(struct ntpd_conf *nconf, struct passwd *pw, int argc, char **argv)
73 {
74 int a, b, nfds, i, j, idx_peers, timeout;
75 int nullfd, pipe_dns[2], idx_clients;
76 int ctls;
77 int fd_ctl;
78 int clear_cdns;
79 u_int pfd_elms = 0, idx2peer_elms = 0;
80 u_int listener_cnt, new_cnt, sent_cnt, trial_cnt;
81 u_int ctl_cnt;
82 struct pollfd *pfd = NULL;
83 struct servent *se;
84 struct listen_addr *la;
85 struct ntp_peer *p;
86 struct ntp_peer **idx2peer = NULL;
87 struct ntp_sensor *s, *next_s;
88 struct constraint *cstr;
89 struct timespec tp;
90 struct stat stb;
91 struct ctl_conn *cc;
92 time_t nextaction, last_sensor_scan = 0, now;
93 time_t last_action = 0, interval, last_cdns_reset = 0;
94 void *newp;
95
96 if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, PF_UNSPEC,
97 pipe_dns) == -1)
98 fatal("socketpair");
99
100 start_child(NTPDNS_PROC_NAME, pipe_dns[1], argc, argv);
101
102 log_init(nconf->debug ? LOG_TO_STDERR : LOG_TO_SYSLOG, nconf->verbose,
103 LOG_DAEMON);
104 if (!nconf->debug && setsid() == -1)
105 fatal("setsid");
106 log_procinit("ntp");
107
108 if ((se = getservbyname("ntp", "udp")) == NULL)
109 fatal("getservbyname");
110
111 /* Start control socket. */
112 if ((fd_ctl = control_init(CTLSOCKET)) == -1)
113 fatalx("control socket init failed");
114 if (control_listen(fd_ctl) == -1)
115 fatalx("control socket listen failed");
116 if ((nullfd = open("/dev/null", O_RDWR)) == -1)
117 fatal(NULL);
118
119 if (stat(pw->pw_dir, &stb) == -1) {
120 fatal("privsep dir %s could not be opened", pw->pw_dir);
121 }
122 if (stb.st_uid != 0 || (stb.st_mode & (S_IWGRP|S_IWOTH)) != 0) {
123 fatalx("bad privsep dir %s permissions: %o",
124 pw->pw_dir, stb.st_mode);
125 }
126 if (chroot(pw->pw_dir) == -1)
127 fatal("chroot");
128 if (chdir("/") == -1)
129 fatal("chdir(\"/\")");
130
131 if (!nconf->debug) {
132 dup2(nullfd, STDIN_FILENO);
133 dup2(nullfd, STDOUT_FILENO);
134 dup2(nullfd, STDERR_FILENO);
135 }
136 close(nullfd);
137
138 setproctitle("ntp engine");
139
140 conf = nconf;
141 setup_listeners(se, conf, &listener_cnt);
142
143 if (setgroups(1, &pw->pw_gid) ||
144 setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
145 setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
146 fatal("can't drop privileges");
147
148 endservent();
149
150 /* The ntp process will want to open NTP client sockets -> "inet" */
151 if (pledge("stdio inet", NULL) == -1)
152 err(1, "pledge");
153
154 signal(SIGTERM, ntp_sighdlr);
155 signal(SIGINT, ntp_sighdlr);
156 signal(SIGPIPE, SIG_IGN);
157 signal(SIGHUP, SIG_IGN);
158 signal(SIGCHLD, SIG_DFL);
159
160 if ((ibuf_main = malloc(sizeof(struct imsgbuf))) == NULL)
161 fatal(NULL);
162 if (imsgbuf_init(ibuf_main, PARENT_SOCK_FILENO) == -1)
163 fatal(NULL);
164 if ((ibuf_dns = malloc(sizeof(struct imsgbuf))) == NULL)
165 fatal(NULL);
166 if (imsgbuf_init(ibuf_dns, pipe_dns[0]) == -1)
167 fatal(NULL);
168
169 constraint_cnt = 0;
170 conf->constraint_median = 0;
171 conf->constraint_last = getmonotime();
172 TAILQ_FOREACH(cstr, &conf->constraints, entry)
173 constraint_cnt += constraint_init(cstr);
174
175 TAILQ_FOREACH(p, &conf->ntp_peers, entry)
176 client_peer_init(p);
177
178 memset(&conf->status, 0, sizeof(conf->status));
179
180 conf->freq.num = 0;
181 conf->freq.samples = 0;
182 conf->freq.x = 0.0;
183 conf->freq.xx = 0.0;
184 conf->freq.xy = 0.0;
185 conf->freq.y = 0.0;
186 conf->freq.overall_offset = 0.0;
187
188 conf->status.synced = 0;
189 clock_getres(CLOCK_REALTIME, &tp);
190 b = 1000000000 / tp.tv_nsec; /* convert to Hz */
191 for (a = 0; b > 1; a--, b >>= 1)
192 ;
193 conf->status.precision = a;
194 conf->scale = 1;
195
196 TAILQ_INIT(&ctl_conns);
197 sensor_init();
198
199 log_info("ntp engine ready");
200
201 ctl_cnt = 0;
202 peer_cnt = 0;
203 TAILQ_FOREACH(p, &conf->ntp_peers, entry)
204 peer_cnt++;
205
206 while (ntp_quit == 0) {
207 if (peer_cnt > idx2peer_elms) {
208 if ((newp = reallocarray(idx2peer, peer_cnt,
209 sizeof(*idx2peer))) == NULL) {
210 /* panic for now */
211 log_warn("could not resize idx2peer from %u -> "
212 "%u entries", idx2peer_elms, peer_cnt);
213 fatalx("exiting");
214 }
215 idx2peer = newp;
216 idx2peer_elms = peer_cnt;
217 }
218
219 new_cnt = PFD_MAX +
220 peer_cnt + listener_cnt + ctl_cnt;
221 if (new_cnt > pfd_elms) {
222 if ((newp = reallocarray(pfd, new_cnt,
223 sizeof(*pfd))) == NULL) {
224 /* panic for now */
225 log_warn("could not resize pfd from %u -> "
226 "%u entries", pfd_elms, new_cnt);
227 fatalx("exiting");
228 }
229 pfd = newp;
230 pfd_elms = new_cnt;
231 }
232
233 memset(pfd, 0, sizeof(*pfd) * pfd_elms);
234 memset(idx2peer, 0, sizeof(*idx2peer) * idx2peer_elms);
235 nextaction = getmonotime() + 900;
236 pfd[PFD_PIPE_MAIN].fd = ibuf_main->fd;
237 pfd[PFD_PIPE_MAIN].events = POLLIN;
238 pfd[PFD_PIPE_DNS].fd = ibuf_dns->fd;
239 pfd[PFD_PIPE_DNS].events = POLLIN;
240 pfd[PFD_SOCK_CTL].fd = fd_ctl;
241 pfd[PFD_SOCK_CTL].events = POLLIN;
242
243 i = PFD_MAX;
244 TAILQ_FOREACH(la, &conf->listen_addrs, entry) {
245 pfd[i].fd = la->fd;
246 pfd[i].events = POLLIN;
247 i++;
248 }
249
250 idx_peers = i;
251 sent_cnt = trial_cnt = 0;
252 TAILQ_FOREACH(p, &conf->ntp_peers, entry) {
253 if (!p->trusted && constraint_cnt &&
254 conf->constraint_median == 0)
255 continue;
256
257 if (p->next > 0 && p->next <= getmonotime()) {
258 if (p->state > STATE_DNS_INPROGRESS)
259 trial_cnt++;
260 if (client_query(p) == 0)
261 sent_cnt++;
262 }
263 if (p->deadline > 0 && p->deadline <= getmonotime()) {
264 timeout = 300;
265 log_debug("no reply from %s received in time, "
266 "next query %ds", log_ntp_addr( p->addr),
267 timeout);
268 if (p->trustlevel >= TRUSTLEVEL_BADPEER &&
269 (p->trustlevel /= 2) < TRUSTLEVEL_BADPEER)
270 log_info("peer %s now invalid",
271 log_ntp_addr(p->addr));
272 if (client_nextaddr(p) == 1) {
273 peer_addr_head_clear(p);
274 client_nextaddr(p);
275 }
276 set_next(p, timeout);
277 }
278 if (p->senderrors > MAX_SEND_ERRORS) {
279 log_debug("failed to send query to %s, "
280 "next query %ds", log_ntp_addr(p->addr),
281 INTERVAL_QUERY_PATHETIC);
282 p->senderrors = 0;
283 if (client_nextaddr(p) == 1) {
284 peer_addr_head_clear(p);
285 client_nextaddr(p);
286 }
287 set_next(p, INTERVAL_QUERY_PATHETIC);
288 }
289 if (p->next > 0 && p->next < nextaction)
290 nextaction = p->next;
291 if (p->deadline > 0 && p->deadline < nextaction)
292 nextaction = p->deadline;
293
294 if (p->state == STATE_QUERY_SENT &&
295 p->query.fd != -1) {
296 pfd[i].fd = p->query.fd;
297 pfd[i].events = POLLIN;
298 idx2peer[i - idx_peers] = p;
299 i++;
300 }
301 }
302 idx_clients = i;
303
304 if (!TAILQ_EMPTY(&conf->ntp_conf_sensors) &&
305 (conf->trusted_sensors || constraint_cnt == 0 ||
306 conf->constraint_median != 0)) {
307 if (last_sensor_scan == 0 ||
308 last_sensor_scan + SENSOR_SCAN_INTERVAL <= getmonotime()) {
309 sensors_cnt = sensor_scan();
310 last_sensor_scan = getmonotime();
311 }
312 if (sensors_cnt == 0 &&
313 nextaction > last_sensor_scan + SENSOR_SCAN_INTERVAL)
314 nextaction = last_sensor_scan + SENSOR_SCAN_INTERVAL;
315 sensors_cnt = 0;
316 TAILQ_FOREACH(s, &conf->ntp_sensors, entry) {
317 if (conf->settime && s->offsets[0].offset)
318 priv_settime(s->offsets[0].offset, NULL);
319 sensors_cnt++;
320 if (s->next > 0 && s->next < nextaction)
321 nextaction = s->next;
322 }
323 }
324
325 if (conf->settime &&
326 ((trial_cnt > 0 && sent_cnt == 0) ||
327 (peer_cnt == 0 && sensors_cnt == 0)))
328 priv_settime(0, "no valid peers configured");
329
330 clear_cdns = 1;
331 TAILQ_FOREACH(cstr, &conf->constraints, entry) {
332 constraint_query(cstr, conf->status.synced);
333 if (cstr->state <= STATE_QUERY_SENT)
334 clear_cdns = 0;
335 }
336
337 if (imsgbuf_queuelen(ibuf_main) > 0)
338 pfd[PFD_PIPE_MAIN].events |= POLLOUT;
339 if (imsgbuf_queuelen(ibuf_dns) > 0)
340 pfd[PFD_PIPE_DNS].events |= POLLOUT;
341
342 TAILQ_FOREACH(cc, &ctl_conns, entry) {
343 pfd[i].fd = cc->ibuf.fd;
344 pfd[i].events = POLLIN;
345 if (imsgbuf_queuelen(&cc->ibuf) > 0)
346 pfd[i].events |= POLLOUT;
347 i++;
348 }
349 ctls = i;
350
351 now = getmonotime();
352 if (conf->constraint_median == 0 && clear_cdns &&
353 now - last_cdns_reset > CONSTRAINT_SCAN_INTERVAL) {
354 log_debug("Reset constraint info");
355 constraint_reset();
356 last_cdns_reset = now;
357 nextaction = now + CONSTRAINT_RETRY_INTERVAL;
358 }
359 timeout = nextaction - now;
360 if (timeout < 0)
361 timeout = 0;
362
363 if ((nfds = poll(pfd, i, timeout ? timeout * 1000 : 1)) == -1)
364 if (errno != EINTR) {
365 log_warn("poll error");
366 ntp_quit = 1;
367 }
368
369 if (nfds > 0 && (pfd[PFD_PIPE_MAIN].revents & POLLOUT))
370 if (imsgbuf_write(ibuf_main) == -1) {
371 log_warn("pipe write error (to parent)");
372 ntp_quit = 1;
373 }
374
375 if (nfds > 0 && pfd[PFD_PIPE_MAIN].revents & (POLLIN|POLLERR)) {
376 nfds--;
377 if (ntp_dispatch_imsg() == -1) {
378 log_debug("pipe read error (from main)");
379 ntp_quit = 1;
380 }
381 }
382
383 if (nfds > 0 && (pfd[PFD_PIPE_DNS].revents & POLLOUT))
384 if (imsgbuf_write(ibuf_dns) == -1) {
385 log_warn("pipe write error (to dns engine)");
386 ntp_quit = 1;
387 }
388
389 if (nfds > 0 && pfd[PFD_PIPE_DNS].revents & (POLLIN|POLLERR)) {
390 nfds--;
391 if (ntp_dispatch_imsg_dns() == -1) {
392 log_warn("pipe read error (from dns engine)");
393 ntp_quit = 1;
394 }
395 }
396
397 if (nfds > 0 && pfd[PFD_SOCK_CTL].revents & (POLLIN|POLLERR)) {
398 nfds--;
399 ctl_cnt += control_accept(fd_ctl);
400 }
401
402 for (j = PFD_MAX; nfds > 0 && j < idx_peers; j++)
403 if (pfd[j].revents & (POLLIN|POLLERR)) {
404 nfds--;
405 if (server_dispatch(pfd[j].fd, conf) == -1) {
406 log_warn("pipe write error (conf)");
407 ntp_quit = 1;
408 }
409 }
410
411 for (; nfds > 0 && j < idx_clients; j++) {
412 if (pfd[j].revents & (POLLIN|POLLERR)) {
413 struct ntp_peer *pp = idx2peer[j - idx_peers];
414
415 nfds--;
416 switch (client_dispatch(pp, conf->settime,
417 conf->automatic)) {
418 case -1:
419 log_debug("no reply from %s "
420 "received", log_ntp_addr(pp->addr));
421 if (pp->trustlevel >=
422 TRUSTLEVEL_BADPEER &&
423 (pp->trustlevel /= 2) <
424 TRUSTLEVEL_BADPEER)
425 log_info("peer %s now invalid",
426 log_ntp_addr(pp->addr));
427 break;
428 case 0: /* invalid replies are ignored */
429 break;
430 case 1:
431 last_action = now;
432 break;
433 }
434 }
435 }
436
437 for (; nfds > 0 && j < ctls; j++) {
438 nfds -= control_dispatch_msg(&pfd[j], &ctl_cnt);
439 }
440
441 for (s = TAILQ_FIRST(&conf->ntp_sensors); s != NULL;
442 s = next_s) {
443 next_s = TAILQ_NEXT(s, entry);
444 if (s->next <= now) {
445 last_action = now;
446 sensor_query(s);
447 }
448 }
449
450 /*
451 * Compute maximum of scale_interval(INTERVAL_QUERY_NORMAL),
452 * if we did not process a time message for three times that
453 * interval, stop advertising we're synced.
454 */
455 interval = INTERVAL_QUERY_NORMAL * conf->scale;
456 interval += SCALE_INTERVAL(interval) - 1;
457 if (conf->status.synced && last_action + 3 * interval < now) {
458 log_info("clock is now unsynced due to lack of replies");
459 conf->status.synced = 0;
460 conf->scale = 1;
461 priv_dns(IMSG_UNSYNCED, NULL, 0);
462 }
463 }
464
465 imsgbuf_write(ibuf_main);
466 imsgbuf_clear(ibuf_main);
467 free(ibuf_main);
468 imsgbuf_write(ibuf_dns);
469 imsgbuf_clear(ibuf_dns);
470 free(ibuf_dns);
471
472 log_info("ntp engine exiting");
473 exit(0);
474 }
475
476 int
ntp_dispatch_imsg(void)477 ntp_dispatch_imsg(void)
478 {
479 struct imsg imsg;
480 int n;
481
482 if (imsgbuf_read(ibuf_main) != 1)
483 return (-1);
484
485 for (;;) {
486 if ((n = imsg_get(ibuf_main, &imsg)) == -1)
487 return (-1);
488
489 if (n == 0)
490 break;
491
492 switch (imsg.hdr.type) {
493 case IMSG_ADJTIME:
494 memcpy(&n, imsg.data, sizeof(n));
495 if (n == 1 && !conf->status.synced) {
496 log_info("clock is now synced");
497 conf->status.synced = 1;
498 priv_dns(IMSG_SYNCED, NULL, 0);
499 constraint_reset();
500 } else if (n == 0 && conf->status.synced) {
501 log_info("clock is now unsynced");
502 conf->status.synced = 0;
503 priv_dns(IMSG_UNSYNCED, NULL, 0);
504 }
505 break;
506 case IMSG_CONSTRAINT_RESULT:
507 constraint_msg_result(imsg.hdr.peerid,
508 imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
509 break;
510 case IMSG_CONSTRAINT_CLOSE:
511 constraint_msg_close(imsg.hdr.peerid,
512 imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
513 break;
514 default:
515 break;
516 }
517 imsg_free(&imsg);
518 }
519 return (0);
520 }
521
522 int
inpool(struct sockaddr_storage * a,struct sockaddr_storage old[MAX_SERVERS_DNS],size_t n)523 inpool(struct sockaddr_storage *a,
524 struct sockaddr_storage old[MAX_SERVERS_DNS], size_t n)
525 {
526 size_t i;
527
528 for (i = 0; i < n; i++) {
529 if (a->ss_family != old[i].ss_family)
530 continue;
531 if (a->ss_family == AF_INET) {
532 if (((struct sockaddr_in *)a)->sin_addr.s_addr ==
533 ((struct sockaddr_in *)&old[i])->sin_addr.s_addr)
534 return 1;
535 } else if (memcmp(&((struct sockaddr_in6 *)a)->sin6_addr,
536 &((struct sockaddr_in6 *)&old[i])->sin6_addr,
537 sizeof(struct in6_addr)) == 0) {
538 return 1;
539 }
540 }
541 return 0;
542 }
543
544 int
ntp_dispatch_imsg_dns(void)545 ntp_dispatch_imsg_dns(void)
546 {
547 struct imsg imsg;
548 struct sockaddr_storage existing[MAX_SERVERS_DNS];
549 struct ntp_peer *peer, *npeer, *tmp;
550 u_int16_t dlen;
551 u_char *p;
552 struct ntp_addr *h;
553 size_t addrcount, peercount;
554 int n;
555
556 if (imsgbuf_read(ibuf_dns) != 1)
557 return (-1);
558
559 for (;;) {
560 if ((n = imsg_get(ibuf_dns, &imsg)) == -1)
561 return (-1);
562
563 if (n == 0)
564 break;
565
566 switch (imsg.hdr.type) {
567 case IMSG_HOST_DNS:
568 TAILQ_FOREACH(peer, &conf->ntp_peers, entry)
569 if (peer->id == imsg.hdr.peerid)
570 break;
571 if (peer == NULL) {
572 log_warnx("IMSG_HOST_DNS with invalid peerID");
573 break;
574 }
575 if (peer->addr != NULL) {
576 log_warnx("IMSG_HOST_DNS but addr != NULL!");
577 break;
578 }
579
580 if (peer->addr_head.pool) {
581 n = 0;
582 peercount = 0;
583
584 TAILQ_FOREACH_SAFE(npeer, &conf->ntp_peers,
585 entry, tmp) {
586 if (npeer->addr_head.pool !=
587 peer->addr_head.pool)
588 continue;
589 peercount++;
590 if (npeer->id == peer->id)
591 continue;
592 if (npeer->addr != NULL)
593 existing[n++] = npeer->addr->ss;
594 }
595 }
596
597 dlen = imsg.hdr.len - IMSG_HEADER_SIZE;
598 if (dlen == 0) { /* no data -> temp error */
599 log_debug("DNS lookup tempfail");
600 peer->state = STATE_DNS_TEMPFAIL;
601 if (conf->tmpfail++ == TRIES_AUTO_DNSFAIL)
602 priv_settime(0, "of dns failures");
603 break;
604 }
605
606 p = (u_char *)imsg.data;
607 addrcount = dlen / (sizeof(struct sockaddr_storage) +
608 sizeof(int));
609
610 while (dlen >= sizeof(struct sockaddr_storage) +
611 sizeof(int)) {
612 if ((h = calloc(1, sizeof(struct ntp_addr))) ==
613 NULL)
614 fatal(NULL);
615 memcpy(&h->ss, p, sizeof(h->ss));
616 p += sizeof(h->ss);
617 dlen -= sizeof(h->ss);
618 memcpy(&h->notauth, p, sizeof(int));
619 p += sizeof(int);
620 dlen -= sizeof(int);
621 if (peer->addr_head.pool) {
622 if (peercount > addrcount) {
623 free(h);
624 continue;
625 }
626 if (inpool(&h->ss, existing,
627 n)) {
628 free(h);
629 continue;
630 }
631 log_debug("Adding address %s to %s",
632 log_ntp_addr(h), peer->addr_head.name);
633 npeer = new_peer();
634 npeer->weight = peer->weight;
635 npeer->query_addr4 = peer->query_addr4;
636 npeer->query_addr6 = peer->query_addr6;
637 h->next = NULL;
638 npeer->addr = h;
639 npeer->addr_head.a = h;
640 npeer->addr_head.name =
641 peer->addr_head.name;
642 npeer->addr_head.pool =
643 peer->addr_head.pool;
644 client_peer_init(npeer);
645 npeer->state = STATE_DNS_DONE;
646 peer_add(npeer);
647 peercount++;
648 } else {
649 h->next = peer->addr;
650 peer->addr = h;
651 peer->addr_head.a = peer->addr;
652 peer->state = STATE_DNS_DONE;
653 }
654 }
655 if (dlen != 0)
656 fatalx("IMSG_HOST_DNS: dlen != 0");
657 if (peer->addr_head.pool)
658 peer_remove(peer);
659 else
660 client_addr_init(peer);
661 break;
662 case IMSG_CONSTRAINT_DNS:
663 constraint_msg_dns(imsg.hdr.peerid,
664 imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
665 break;
666 case IMSG_PROBE_ROOT:
667 dlen = imsg.hdr.len - IMSG_HEADER_SIZE;
668 if (dlen != sizeof(int))
669 fatalx("IMSG_PROBE_ROOT");
670 memcpy(&n, imsg.data, sizeof(int));
671 if (n < 0)
672 priv_settime(0, "dns probe failed");
673 break;
674 default:
675 break;
676 }
677 imsg_free(&imsg);
678 }
679 return (0);
680 }
681
682 void
peer_add(struct ntp_peer * p)683 peer_add(struct ntp_peer *p)
684 {
685 TAILQ_INSERT_TAIL(&conf->ntp_peers, p, entry);
686 peer_cnt++;
687 }
688
689 void
peer_remove(struct ntp_peer * p)690 peer_remove(struct ntp_peer *p)
691 {
692 TAILQ_REMOVE(&conf->ntp_peers, p, entry);
693 free(p);
694 peer_cnt--;
695 }
696
697 void
peer_addr_head_clear(struct ntp_peer * p)698 peer_addr_head_clear(struct ntp_peer *p)
699 {
700 host_dns_free(p->addr_head.a);
701 p->addr_head.a = NULL;
702 p->addr = NULL;
703 }
704
705 static void
priv_adjfreq(double offset)706 priv_adjfreq(double offset)
707 {
708 double curtime, freq;
709
710 if (!conf->status.synced){
711 conf->freq.samples = 0;
712 return;
713 }
714
715 conf->freq.samples++;
716
717 if (conf->freq.samples <= 0)
718 return;
719
720 conf->freq.overall_offset += offset;
721 offset = conf->freq.overall_offset;
722
723 curtime = gettime_corrected();
724 conf->freq.xy += offset * curtime;
725 conf->freq.x += curtime;
726 conf->freq.y += offset;
727 conf->freq.xx += curtime * curtime;
728
729 if (conf->freq.samples % FREQUENCY_SAMPLES != 0)
730 return;
731
732 freq =
733 (conf->freq.xy - conf->freq.x * conf->freq.y / conf->freq.samples)
734 /
735 (conf->freq.xx - conf->freq.x * conf->freq.x / conf->freq.samples);
736
737 if (freq > MAX_FREQUENCY_ADJUST)
738 freq = MAX_FREQUENCY_ADJUST;
739 else if (freq < -MAX_FREQUENCY_ADJUST)
740 freq = -MAX_FREQUENCY_ADJUST;
741
742 imsg_compose(ibuf_main, IMSG_ADJFREQ, 0, 0, -1, &freq, sizeof(freq));
743 conf->filters |= FILTER_ADJFREQ;
744 conf->freq.xy = 0.0;
745 conf->freq.x = 0.0;
746 conf->freq.y = 0.0;
747 conf->freq.xx = 0.0;
748 conf->freq.samples = 0;
749 conf->freq.overall_offset = 0.0;
750 conf->freq.num++;
751 }
752
753 int
priv_adjtime(void)754 priv_adjtime(void)
755 {
756 struct ntp_peer *p;
757 struct ntp_sensor *s;
758 int offset_cnt = 0, i = 0, j;
759 struct ntp_offset **offsets;
760 double offset_median;
761
762 TAILQ_FOREACH(p, &conf->ntp_peers, entry) {
763 if (p->trustlevel < TRUSTLEVEL_BADPEER)
764 continue;
765 if (!p->update.good)
766 return (1);
767 offset_cnt += p->weight;
768 }
769
770 TAILQ_FOREACH(s, &conf->ntp_sensors, entry) {
771 if (!s->update.good)
772 continue;
773 offset_cnt += s->weight;
774 }
775
776 if (offset_cnt == 0)
777 return (1);
778
779 if ((offsets = calloc(offset_cnt, sizeof(struct ntp_offset *))) == NULL)
780 fatal("calloc priv_adjtime");
781
782 TAILQ_FOREACH(p, &conf->ntp_peers, entry) {
783 if (p->trustlevel < TRUSTLEVEL_BADPEER)
784 continue;
785 for (j = 0; j < p->weight; j++)
786 offsets[i++] = &p->update;
787 }
788
789 TAILQ_FOREACH(s, &conf->ntp_sensors, entry) {
790 if (!s->update.good)
791 continue;
792 for (j = 0; j < s->weight; j++)
793 offsets[i++] = &s->update;
794 }
795
796 qsort(offsets, offset_cnt, sizeof(struct ntp_offset *), offset_compare);
797
798 i = offset_cnt / 2;
799 if (offset_cnt % 2 == 0)
800 if (offsets[i - 1]->delay < offsets[i]->delay)
801 i -= 1;
802 offset_median = offsets[i]->offset;
803 conf->status.rootdelay = offsets[i]->delay;
804 conf->status.stratum = offsets[i]->status.stratum;
805 conf->status.leap = offsets[i]->status.leap;
806
807 imsg_compose(ibuf_main, IMSG_ADJTIME, 0, 0, -1,
808 &offset_median, sizeof(offset_median));
809
810 priv_adjfreq(offset_median);
811
812 conf->status.reftime = gettime();
813 conf->status.stratum++; /* one more than selected peer */
814 if (conf->status.stratum > NTP_MAXSTRATUM)
815 conf->status.stratum = NTP_MAXSTRATUM;
816 update_scale(offset_median);
817
818 conf->status.refid = offsets[i]->status.send_refid;
819
820 free(offsets);
821
822 TAILQ_FOREACH(p, &conf->ntp_peers, entry) {
823 for (i = 0; i < OFFSET_ARRAY_SIZE; i++)
824 p->reply[i].offset -= offset_median;
825 p->update.good = 0;
826 }
827 TAILQ_FOREACH(s, &conf->ntp_sensors, entry) {
828 for (i = 0; i < SENSOR_OFFSETS; i++)
829 s->offsets[i].offset -= offset_median;
830 s->update.offset -= offset_median;
831 }
832
833 return (0);
834 }
835
836 int
offset_compare(const void * aa,const void * bb)837 offset_compare(const void *aa, const void *bb)
838 {
839 const struct ntp_offset * const *a;
840 const struct ntp_offset * const *b;
841
842 a = aa;
843 b = bb;
844
845 if ((*a)->offset < (*b)->offset)
846 return (-1);
847 else if ((*a)->offset > (*b)->offset)
848 return (1);
849 else
850 return (0);
851 }
852
853 void
priv_settime(double offset,char * msg)854 priv_settime(double offset, char *msg)
855 {
856 if (offset == 0)
857 log_info("cancel settime because %s", msg);
858 imsg_compose(ibuf_main, IMSG_SETTIME, 0, 0, -1,
859 &offset, sizeof(offset));
860 conf->settime = 0;
861 }
862
863 void
priv_dns(int cmd,char * name,u_int32_t peerid)864 priv_dns(int cmd, char *name, u_int32_t peerid)
865 {
866 u_int16_t dlen = 0;
867
868 if (name != NULL)
869 dlen = strlen(name) + 1;
870 imsg_compose(ibuf_dns, cmd, peerid, 0, -1, name, dlen);
871 }
872
873 void
update_scale(double offset)874 update_scale(double offset)
875 {
876 offset += getoffset();
877 if (offset < 0)
878 offset = -offset;
879
880 if (offset > QSCALE_OFF_MAX || !conf->status.synced ||
881 conf->freq.num < 3)
882 conf->scale = 1;
883 else if (offset < QSCALE_OFF_MIN)
884 conf->scale = QSCALE_OFF_MAX / QSCALE_OFF_MIN;
885 else
886 conf->scale = QSCALE_OFF_MAX / offset;
887 }
888
889 time_t
scale_interval(time_t requested)890 scale_interval(time_t requested)
891 {
892 time_t interval, r;
893
894 interval = requested * conf->scale;
895 r = arc4random_uniform(SCALE_INTERVAL(interval));
896 return (interval + r);
897 }
898
899 time_t
error_interval(void)900 error_interval(void)
901 {
902 time_t interval, r;
903
904 interval = INTERVAL_QUERY_PATHETIC * QSCALE_OFF_MAX / QSCALE_OFF_MIN;
905 r = arc4random_uniform(interval / 10);
906 return (interval + r);
907 }
908