xref: /openbsd/usr.sbin/ntpd/ntp.c (revision 150da408)
1 /*	$OpenBSD: ntp.c,v 1.174 2024/02/21 03:31:28 deraadt Exp $ */
2 
3 /*
4  * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
5  * Copyright (c) 2004 Alexander Guy <alexander.guy@andern.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <sys/types.h>
21 #include <sys/time.h>
22 #include <sys/stat.h>
23 #include <errno.h>
24 #include <fcntl.h>
25 #include <paths.h>
26 #include <poll.h>
27 #include <pwd.h>
28 #include <signal.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <syslog.h>
32 #include <time.h>
33 #include <unistd.h>
34 #include <err.h>
35 
36 #include "ntpd.h"
37 
38 #define	PFD_PIPE_MAIN	0
39 #define	PFD_PIPE_DNS	1
40 #define	PFD_SOCK_CTL	2
41 #define	PFD_MAX		3
42 
43 volatile sig_atomic_t	 ntp_quit = 0;
44 struct imsgbuf		*ibuf_main;
45 static struct imsgbuf	*ibuf_dns;
46 struct ntpd_conf	*conf;
47 struct ctl_conns	 ctl_conns;
48 u_int			 peer_cnt;
49 u_int			 sensors_cnt;
50 extern u_int		 constraint_cnt;
51 
52 void	ntp_sighdlr(int);
53 int	ntp_dispatch_imsg(void);
54 int	ntp_dispatch_imsg_dns(void);
55 void	peer_add(struct ntp_peer *);
56 void	peer_remove(struct ntp_peer *);
57 int	inpool(struct sockaddr_storage *,
58 	    struct sockaddr_storage[MAX_SERVERS_DNS], size_t);
59 
60 void
ntp_sighdlr(int sig)61 ntp_sighdlr(int sig)
62 {
63 	switch (sig) {
64 	case SIGINT:
65 	case SIGTERM:
66 		ntp_quit = 1;
67 		break;
68 	}
69 }
70 
71 void
ntp_main(struct ntpd_conf * nconf,struct passwd * pw,int argc,char ** argv)72 ntp_main(struct ntpd_conf *nconf, struct passwd *pw, int argc, char **argv)
73 {
74 	int			 a, b, nfds, i, j, idx_peers, timeout;
75 	int			 nullfd, pipe_dns[2], idx_clients;
76 	int			 ctls;
77 	int			 fd_ctl;
78 	int			 clear_cdns;
79 	u_int			 pfd_elms = 0, idx2peer_elms = 0;
80 	u_int			 listener_cnt, new_cnt, sent_cnt, trial_cnt;
81 	u_int			 ctl_cnt;
82 	struct pollfd		*pfd = NULL;
83 	struct servent		*se;
84 	struct listen_addr	*la;
85 	struct ntp_peer		*p;
86 	struct ntp_peer		**idx2peer = NULL;
87 	struct ntp_sensor	*s, *next_s;
88 	struct constraint	*cstr;
89 	struct timespec		 tp;
90 	struct stat		 stb;
91 	struct ctl_conn		*cc;
92 	time_t			 nextaction, last_sensor_scan = 0, now;
93 	time_t			 last_action = 0, interval, last_cdns_reset = 0;
94 	void			*newp;
95 
96 	if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, PF_UNSPEC,
97 	    pipe_dns) == -1)
98 		fatal("socketpair");
99 
100 	start_child(NTPDNS_PROC_NAME, pipe_dns[1], argc, argv);
101 
102 	log_init(nconf->debug ? LOG_TO_STDERR : LOG_TO_SYSLOG, nconf->verbose,
103 	    LOG_DAEMON);
104 	if (!nconf->debug && setsid() == -1)
105 		fatal("setsid");
106 	log_procinit("ntp");
107 
108 	if ((se = getservbyname("ntp", "udp")) == NULL)
109 		fatal("getservbyname");
110 
111 	/* Start control socket. */
112 	if ((fd_ctl = control_init(CTLSOCKET)) == -1)
113 		fatalx("control socket init failed");
114 	if (control_listen(fd_ctl) == -1)
115 		fatalx("control socket listen failed");
116 	if ((nullfd = open("/dev/null", O_RDWR)) == -1)
117 		fatal(NULL);
118 
119 	if (stat(pw->pw_dir, &stb) == -1) {
120 		fatal("privsep dir %s could not be opened", pw->pw_dir);
121 	}
122 	if (stb.st_uid != 0 || (stb.st_mode & (S_IWGRP|S_IWOTH)) != 0) {
123 		fatalx("bad privsep dir %s permissions: %o",
124 		    pw->pw_dir, stb.st_mode);
125 	}
126 	if (chroot(pw->pw_dir) == -1)
127 		fatal("chroot");
128 	if (chdir("/") == -1)
129 		fatal("chdir(\"/\")");
130 
131 	if (!nconf->debug) {
132 		dup2(nullfd, STDIN_FILENO);
133 		dup2(nullfd, STDOUT_FILENO);
134 		dup2(nullfd, STDERR_FILENO);
135 	}
136 	close(nullfd);
137 
138 	setproctitle("ntp engine");
139 
140 	conf = nconf;
141 	setup_listeners(se, conf, &listener_cnt);
142 
143 	if (setgroups(1, &pw->pw_gid) ||
144 	    setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
145 	    setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
146 		fatal("can't drop privileges");
147 
148 	endservent();
149 
150 	/* The ntp process will want to open NTP client sockets -> "inet" */
151 	if (pledge("stdio inet", NULL) == -1)
152 		err(1, "pledge");
153 
154 	signal(SIGTERM, ntp_sighdlr);
155 	signal(SIGINT, ntp_sighdlr);
156 	signal(SIGPIPE, SIG_IGN);
157 	signal(SIGHUP, SIG_IGN);
158 	signal(SIGCHLD, SIG_DFL);
159 
160 	if ((ibuf_main = malloc(sizeof(struct imsgbuf))) == NULL)
161 		fatal(NULL);
162 	imsg_init(ibuf_main, PARENT_SOCK_FILENO);
163 	if ((ibuf_dns = malloc(sizeof(struct imsgbuf))) == NULL)
164 		fatal(NULL);
165 	imsg_init(ibuf_dns, pipe_dns[0]);
166 
167 	constraint_cnt = 0;
168 	conf->constraint_median = 0;
169 	conf->constraint_last = getmonotime();
170 	TAILQ_FOREACH(cstr, &conf->constraints, entry)
171 		constraint_cnt += constraint_init(cstr);
172 
173 	TAILQ_FOREACH(p, &conf->ntp_peers, entry)
174 		client_peer_init(p);
175 
176 	memset(&conf->status, 0, sizeof(conf->status));
177 
178 	conf->freq.num = 0;
179 	conf->freq.samples = 0;
180 	conf->freq.x = 0.0;
181 	conf->freq.xx = 0.0;
182 	conf->freq.xy = 0.0;
183 	conf->freq.y = 0.0;
184 	conf->freq.overall_offset = 0.0;
185 
186 	conf->status.synced = 0;
187 	clock_getres(CLOCK_REALTIME, &tp);
188 	b = 1000000000 / tp.tv_nsec;	/* convert to Hz */
189 	for (a = 0; b > 1; a--, b >>= 1)
190 		;
191 	conf->status.precision = a;
192 	conf->scale = 1;
193 
194 	TAILQ_INIT(&ctl_conns);
195 	sensor_init();
196 
197 	log_info("ntp engine ready");
198 
199 	ctl_cnt = 0;
200 	peer_cnt = 0;
201 	TAILQ_FOREACH(p, &conf->ntp_peers, entry)
202 		peer_cnt++;
203 
204 	while (ntp_quit == 0) {
205 		if (peer_cnt > idx2peer_elms) {
206 			if ((newp = reallocarray(idx2peer, peer_cnt,
207 			    sizeof(*idx2peer))) == NULL) {
208 				/* panic for now */
209 				log_warn("could not resize idx2peer from %u -> "
210 				    "%u entries", idx2peer_elms, peer_cnt);
211 				fatalx("exiting");
212 			}
213 			idx2peer = newp;
214 			idx2peer_elms = peer_cnt;
215 		}
216 
217 		new_cnt = PFD_MAX +
218 		    peer_cnt + listener_cnt + ctl_cnt;
219 		if (new_cnt > pfd_elms) {
220 			if ((newp = reallocarray(pfd, new_cnt,
221 			    sizeof(*pfd))) == NULL) {
222 				/* panic for now */
223 				log_warn("could not resize pfd from %u -> "
224 				    "%u entries", pfd_elms, new_cnt);
225 				fatalx("exiting");
226 			}
227 			pfd = newp;
228 			pfd_elms = new_cnt;
229 		}
230 
231 		memset(pfd, 0, sizeof(*pfd) * pfd_elms);
232 		memset(idx2peer, 0, sizeof(*idx2peer) * idx2peer_elms);
233 		nextaction = getmonotime() + 900;
234 		pfd[PFD_PIPE_MAIN].fd = ibuf_main->fd;
235 		pfd[PFD_PIPE_MAIN].events = POLLIN;
236 		pfd[PFD_PIPE_DNS].fd = ibuf_dns->fd;
237 		pfd[PFD_PIPE_DNS].events = POLLIN;
238 		pfd[PFD_SOCK_CTL].fd = fd_ctl;
239 		pfd[PFD_SOCK_CTL].events = POLLIN;
240 
241 		i = PFD_MAX;
242 		TAILQ_FOREACH(la, &conf->listen_addrs, entry) {
243 			pfd[i].fd = la->fd;
244 			pfd[i].events = POLLIN;
245 			i++;
246 		}
247 
248 		idx_peers = i;
249 		sent_cnt = trial_cnt = 0;
250 		TAILQ_FOREACH(p, &conf->ntp_peers, entry) {
251 			if (!p->trusted && constraint_cnt &&
252 			    conf->constraint_median == 0)
253 				continue;
254 
255 			if (p->next > 0 && p->next <= getmonotime()) {
256 				if (p->state > STATE_DNS_INPROGRESS)
257 					trial_cnt++;
258 				if (client_query(p) == 0)
259 					sent_cnt++;
260 			}
261 			if (p->deadline > 0 && p->deadline <= getmonotime()) {
262 				timeout = 300;
263 				log_debug("no reply from %s received in time, "
264 				    "next query %ds", log_ntp_addr( p->addr),
265 				    timeout);
266 				if (p->trustlevel >= TRUSTLEVEL_BADPEER &&
267 				    (p->trustlevel /= 2) < TRUSTLEVEL_BADPEER)
268 					log_info("peer %s now invalid",
269 					    log_ntp_addr(p->addr));
270 				if (client_nextaddr(p) == 1) {
271 					peer_addr_head_clear(p);
272 					client_nextaddr(p);
273 				}
274 				set_next(p, timeout);
275 			}
276 			if (p->senderrors > MAX_SEND_ERRORS) {
277 				log_debug("failed to send query to %s, "
278 				    "next query %ds", log_ntp_addr(p->addr),
279 				    INTERVAL_QUERY_PATHETIC);
280 				p->senderrors = 0;
281 				if (client_nextaddr(p) == 1) {
282 					peer_addr_head_clear(p);
283 					client_nextaddr(p);
284 				}
285 				set_next(p, INTERVAL_QUERY_PATHETIC);
286 			}
287 			if (p->next > 0 && p->next < nextaction)
288 				nextaction = p->next;
289 			if (p->deadline > 0 && p->deadline < nextaction)
290 				nextaction = p->deadline;
291 
292 			if (p->state == STATE_QUERY_SENT &&
293 			    p->query.fd != -1) {
294 				pfd[i].fd = p->query.fd;
295 				pfd[i].events = POLLIN;
296 				idx2peer[i - idx_peers] = p;
297 				i++;
298 			}
299 		}
300 		idx_clients = i;
301 
302 		if (!TAILQ_EMPTY(&conf->ntp_conf_sensors) &&
303 		    (conf->trusted_sensors || constraint_cnt == 0 ||
304 		    conf->constraint_median != 0)) {
305 			if (last_sensor_scan == 0 ||
306 			    last_sensor_scan + SENSOR_SCAN_INTERVAL <= getmonotime()) {
307 				sensors_cnt = sensor_scan();
308 				last_sensor_scan = getmonotime();
309 			}
310 			if (sensors_cnt == 0 &&
311 			    nextaction > last_sensor_scan + SENSOR_SCAN_INTERVAL)
312 				nextaction = last_sensor_scan + SENSOR_SCAN_INTERVAL;
313 			sensors_cnt = 0;
314 			TAILQ_FOREACH(s, &conf->ntp_sensors, entry) {
315 				if (conf->settime && s->offsets[0].offset)
316 					priv_settime(s->offsets[0].offset, NULL);
317 				sensors_cnt++;
318 				if (s->next > 0 && s->next < nextaction)
319 					nextaction = s->next;
320 			}
321 		}
322 
323 		if (conf->settime &&
324 		    ((trial_cnt > 0 && sent_cnt == 0) ||
325 		    (peer_cnt == 0 && sensors_cnt == 0)))
326 			priv_settime(0, "no valid peers configured");
327 
328 		clear_cdns = 1;
329 		TAILQ_FOREACH(cstr, &conf->constraints, entry) {
330 			constraint_query(cstr, conf->status.synced);
331 			if (cstr->state <= STATE_QUERY_SENT)
332 				clear_cdns = 0;
333 		}
334 
335 		if (ibuf_main->w.queued > 0)
336 			pfd[PFD_PIPE_MAIN].events |= POLLOUT;
337 		if (ibuf_dns->w.queued > 0)
338 			pfd[PFD_PIPE_DNS].events |= POLLOUT;
339 
340 		TAILQ_FOREACH(cc, &ctl_conns, entry) {
341 			pfd[i].fd = cc->ibuf.fd;
342 			pfd[i].events = POLLIN;
343 			if (cc->ibuf.w.queued > 0)
344 				pfd[i].events |= POLLOUT;
345 			i++;
346 		}
347 		ctls = i;
348 
349 		now = getmonotime();
350 		if (conf->constraint_median == 0 && clear_cdns &&
351 		    now - last_cdns_reset > CONSTRAINT_SCAN_INTERVAL) {
352 			log_debug("Reset constraint info");
353 			constraint_reset();
354 			last_cdns_reset = now;
355 			nextaction = now + CONSTRAINT_RETRY_INTERVAL;
356 		}
357 		timeout = nextaction - now;
358 		if (timeout < 0)
359 			timeout = 0;
360 
361 		if ((nfds = poll(pfd, i, timeout ? timeout * 1000 : 1)) == -1)
362 			if (errno != EINTR) {
363 				log_warn("poll error");
364 				ntp_quit = 1;
365 			}
366 
367 		if (nfds > 0 && (pfd[PFD_PIPE_MAIN].revents & POLLOUT))
368 			if (msgbuf_write(&ibuf_main->w) <= 0 &&
369 			    errno != EAGAIN) {
370 				log_warn("pipe write error (to parent)");
371 				ntp_quit = 1;
372 			}
373 
374 		if (nfds > 0 && pfd[PFD_PIPE_MAIN].revents & (POLLIN|POLLERR)) {
375 			nfds--;
376 			if (ntp_dispatch_imsg() == -1) {
377 				log_debug("pipe read error (from main)");
378 				ntp_quit = 1;
379 			}
380 		}
381 
382 		if (nfds > 0 && (pfd[PFD_PIPE_DNS].revents & POLLOUT))
383 			if (msgbuf_write(&ibuf_dns->w) <= 0 &&
384 			    errno != EAGAIN) {
385 				log_warn("pipe write error (to dns engine)");
386 				ntp_quit = 1;
387 			}
388 
389 		if (nfds > 0 && pfd[PFD_PIPE_DNS].revents & (POLLIN|POLLERR)) {
390 			nfds--;
391 			if (ntp_dispatch_imsg_dns() == -1) {
392 				log_warn("pipe read error (from dns engine)");
393 				ntp_quit = 1;
394 			}
395 		}
396 
397 		if (nfds > 0 && pfd[PFD_SOCK_CTL].revents & (POLLIN|POLLERR)) {
398 			nfds--;
399 			ctl_cnt += control_accept(fd_ctl);
400 		}
401 
402 		for (j = PFD_MAX; nfds > 0 && j < idx_peers; j++)
403 			if (pfd[j].revents & (POLLIN|POLLERR)) {
404 				nfds--;
405 				if (server_dispatch(pfd[j].fd, conf) == -1) {
406 					log_warn("pipe write error (conf)");
407 					ntp_quit = 1;
408 				}
409 			}
410 
411 		for (; nfds > 0 && j < idx_clients; j++) {
412 			if (pfd[j].revents & (POLLIN|POLLERR)) {
413 				struct ntp_peer *pp = idx2peer[j - idx_peers];
414 
415 				nfds--;
416 				switch (client_dispatch(pp, conf->settime,
417 				    conf->automatic)) {
418 				case -1:
419 					log_debug("no reply from %s "
420 					    "received", log_ntp_addr(pp->addr));
421 					if (pp->trustlevel >=
422 					    TRUSTLEVEL_BADPEER &&
423 					    (pp->trustlevel /= 2) <
424 					    TRUSTLEVEL_BADPEER)
425 						log_info("peer %s now invalid",
426 						    log_ntp_addr(pp->addr));
427 					break;
428 				case 0: /* invalid replies are ignored */
429 					break;
430 				case 1:
431 					last_action = now;
432 					break;
433 				}
434 			}
435 		}
436 
437 		for (; nfds > 0 && j < ctls; j++) {
438 			nfds -= control_dispatch_msg(&pfd[j], &ctl_cnt);
439 		}
440 
441 		for (s = TAILQ_FIRST(&conf->ntp_sensors); s != NULL;
442 		    s = next_s) {
443 			next_s = TAILQ_NEXT(s, entry);
444 			if (s->next <= now) {
445 				last_action = now;
446 				sensor_query(s);
447 			}
448 		}
449 
450 		/*
451 		 * Compute maximum of scale_interval(INTERVAL_QUERY_NORMAL),
452 		 * if we did not process a time message for three times that
453 		 * interval, stop advertising we're synced.
454 		 */
455 		interval = INTERVAL_QUERY_NORMAL * conf->scale;
456 		interval += SCALE_INTERVAL(interval) - 1;
457 		if (conf->status.synced && last_action + 3 * interval < now) {
458 			log_info("clock is now unsynced due to lack of replies");
459 			conf->status.synced = 0;
460 			conf->scale = 1;
461 			priv_dns(IMSG_UNSYNCED, NULL, 0);
462 		}
463 	}
464 
465 	msgbuf_write(&ibuf_main->w);
466 	msgbuf_clear(&ibuf_main->w);
467 	free(ibuf_main);
468 	msgbuf_write(&ibuf_dns->w);
469 	msgbuf_clear(&ibuf_dns->w);
470 	free(ibuf_dns);
471 
472 	log_info("ntp engine exiting");
473 	exit(0);
474 }
475 
476 int
ntp_dispatch_imsg(void)477 ntp_dispatch_imsg(void)
478 {
479 	struct imsg		 imsg;
480 	int			 n;
481 
482 	if (((n = imsg_read(ibuf_main)) == -1 && errno != EAGAIN) || n == 0)
483 		return (-1);
484 
485 	for (;;) {
486 		if ((n = imsg_get(ibuf_main, &imsg)) == -1)
487 			return (-1);
488 
489 		if (n == 0)
490 			break;
491 
492 		switch (imsg.hdr.type) {
493 		case IMSG_ADJTIME:
494 			memcpy(&n, imsg.data, sizeof(n));
495 			if (n == 1 && !conf->status.synced) {
496 				log_info("clock is now synced");
497 				conf->status.synced = 1;
498 				priv_dns(IMSG_SYNCED, NULL, 0);
499 				constraint_reset();
500 			} else if (n == 0 && conf->status.synced) {
501 				log_info("clock is now unsynced");
502 				conf->status.synced = 0;
503 				priv_dns(IMSG_UNSYNCED, NULL, 0);
504 			}
505 			break;
506 		case IMSG_CONSTRAINT_RESULT:
507 			constraint_msg_result(imsg.hdr.peerid,
508 			    imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
509 			break;
510 		case IMSG_CONSTRAINT_CLOSE:
511 			constraint_msg_close(imsg.hdr.peerid,
512 			    imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
513 			break;
514 		default:
515 			break;
516 		}
517 		imsg_free(&imsg);
518 	}
519 	return (0);
520 }
521 
522 int
inpool(struct sockaddr_storage * a,struct sockaddr_storage old[MAX_SERVERS_DNS],size_t n)523 inpool(struct sockaddr_storage *a,
524     struct sockaddr_storage old[MAX_SERVERS_DNS], size_t n)
525 {
526 	size_t i;
527 
528 	for (i = 0; i < n; i++) {
529 		if (a->ss_family != old[i].ss_family)
530 			continue;
531 		if (a->ss_family == AF_INET) {
532 			if (((struct sockaddr_in *)a)->sin_addr.s_addr ==
533 			    ((struct sockaddr_in *)&old[i])->sin_addr.s_addr)
534 				return 1;
535 		} else if (memcmp(&((struct sockaddr_in6 *)a)->sin6_addr,
536 		    &((struct sockaddr_in6 *)&old[i])->sin6_addr,
537 		    sizeof(struct in6_addr)) == 0) {
538 			return 1;
539 		}
540 	}
541 	return 0;
542 }
543 
544 int
ntp_dispatch_imsg_dns(void)545 ntp_dispatch_imsg_dns(void)
546 {
547 	struct imsg		 imsg;
548 	struct sockaddr_storage	 existing[MAX_SERVERS_DNS];
549 	struct ntp_peer		*peer, *npeer, *tmp;
550 	u_int16_t		 dlen;
551 	u_char			*p;
552 	struct ntp_addr		*h;
553 	size_t			 addrcount, peercount;
554 	int			 n;
555 
556 	if (((n = imsg_read(ibuf_dns)) == -1 && errno != EAGAIN) || n == 0)
557 		return (-1);
558 
559 	for (;;) {
560 		if ((n = imsg_get(ibuf_dns, &imsg)) == -1)
561 			return (-1);
562 
563 		if (n == 0)
564 			break;
565 
566 		switch (imsg.hdr.type) {
567 		case IMSG_HOST_DNS:
568 			TAILQ_FOREACH(peer, &conf->ntp_peers, entry)
569 				if (peer->id == imsg.hdr.peerid)
570 					break;
571 			if (peer == NULL) {
572 				log_warnx("IMSG_HOST_DNS with invalid peerID");
573 				break;
574 			}
575 			if (peer->addr != NULL) {
576 				log_warnx("IMSG_HOST_DNS but addr != NULL!");
577 				break;
578 			}
579 
580 			if (peer->addr_head.pool) {
581 				n = 0;
582 				peercount = 0;
583 
584 				TAILQ_FOREACH_SAFE(npeer, &conf->ntp_peers,
585 				    entry, tmp) {
586 					if (npeer->addr_head.pool !=
587 					    peer->addr_head.pool)
588 						continue;
589 					peercount++;
590 					if (npeer->id == peer->id)
591 						continue;
592 					if (npeer->addr != NULL)
593 						existing[n++] = npeer->addr->ss;
594 				}
595 			}
596 
597 			dlen = imsg.hdr.len - IMSG_HEADER_SIZE;
598 			if (dlen == 0) {	/* no data -> temp error */
599 				log_debug("DNS lookup tempfail");
600 				peer->state = STATE_DNS_TEMPFAIL;
601 				if (conf->tmpfail++ == TRIES_AUTO_DNSFAIL)
602 					priv_settime(0, "of dns failures");
603 				break;
604 			}
605 
606 			p = (u_char *)imsg.data;
607 			addrcount = dlen / (sizeof(struct sockaddr_storage) +
608 			    sizeof(int));
609 
610 			while (dlen >= sizeof(struct sockaddr_storage) +
611 			    sizeof(int)) {
612 				if ((h = calloc(1, sizeof(struct ntp_addr))) ==
613 				    NULL)
614 					fatal(NULL);
615 				memcpy(&h->ss, p, sizeof(h->ss));
616 				p += sizeof(h->ss);
617 				dlen -= sizeof(h->ss);
618 				memcpy(&h->notauth, p, sizeof(int));
619 				p += sizeof(int);
620 				dlen -= sizeof(int);
621 				if (peer->addr_head.pool) {
622 					if (peercount > addrcount) {
623 						free(h);
624 						continue;
625 					}
626 					if (inpool(&h->ss, existing,
627 					    n)) {
628 						free(h);
629 						continue;
630 					}
631 					log_debug("Adding address %s to %s",
632 					    log_ntp_addr(h), peer->addr_head.name);
633 					npeer = new_peer();
634 					npeer->weight = peer->weight;
635 					npeer->query_addr4 = peer->query_addr4;
636 					npeer->query_addr6 = peer->query_addr6;
637 					h->next = NULL;
638 					npeer->addr = h;
639 					npeer->addr_head.a = h;
640 					npeer->addr_head.name =
641 					    peer->addr_head.name;
642 					npeer->addr_head.pool =
643 					    peer->addr_head.pool;
644 					client_peer_init(npeer);
645 					npeer->state = STATE_DNS_DONE;
646 					peer_add(npeer);
647 					peercount++;
648 				} else {
649 					h->next = peer->addr;
650 					peer->addr = h;
651 					peer->addr_head.a = peer->addr;
652 					peer->state = STATE_DNS_DONE;
653 				}
654 			}
655 			if (dlen != 0)
656 				fatalx("IMSG_HOST_DNS: dlen != 0");
657 			if (peer->addr_head.pool)
658 				peer_remove(peer);
659 			else
660 				client_addr_init(peer);
661 			break;
662 		case IMSG_CONSTRAINT_DNS:
663 			constraint_msg_dns(imsg.hdr.peerid,
664 			    imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
665 			break;
666 		case IMSG_PROBE_ROOT:
667 			dlen = imsg.hdr.len - IMSG_HEADER_SIZE;
668 			if (dlen != sizeof(int))
669 				fatalx("IMSG_PROBE_ROOT");
670 			memcpy(&n, imsg.data, sizeof(int));
671 			if (n < 0)
672 				priv_settime(0, "dns probe failed");
673 			break;
674 		default:
675 			break;
676 		}
677 		imsg_free(&imsg);
678 	}
679 	return (0);
680 }
681 
682 void
peer_add(struct ntp_peer * p)683 peer_add(struct ntp_peer *p)
684 {
685 	TAILQ_INSERT_TAIL(&conf->ntp_peers, p, entry);
686 	peer_cnt++;
687 }
688 
689 void
peer_remove(struct ntp_peer * p)690 peer_remove(struct ntp_peer *p)
691 {
692 	TAILQ_REMOVE(&conf->ntp_peers, p, entry);
693 	free(p);
694 	peer_cnt--;
695 }
696 
697 void
peer_addr_head_clear(struct ntp_peer * p)698 peer_addr_head_clear(struct ntp_peer *p)
699 {
700 	host_dns_free(p->addr_head.a);
701 	p->addr_head.a = NULL;
702 	p->addr = NULL;
703 }
704 
705 static void
priv_adjfreq(double offset)706 priv_adjfreq(double offset)
707 {
708 	double curtime, freq;
709 
710 	if (!conf->status.synced){
711 		conf->freq.samples = 0;
712 		return;
713 	}
714 
715 	conf->freq.samples++;
716 
717 	if (conf->freq.samples <= 0)
718 		return;
719 
720 	conf->freq.overall_offset += offset;
721 	offset = conf->freq.overall_offset;
722 
723 	curtime = gettime_corrected();
724 	conf->freq.xy += offset * curtime;
725 	conf->freq.x += curtime;
726 	conf->freq.y += offset;
727 	conf->freq.xx += curtime * curtime;
728 
729 	if (conf->freq.samples % FREQUENCY_SAMPLES != 0)
730 		return;
731 
732 	freq =
733 	    (conf->freq.xy - conf->freq.x * conf->freq.y / conf->freq.samples)
734 	    /
735 	    (conf->freq.xx - conf->freq.x * conf->freq.x / conf->freq.samples);
736 
737 	if (freq > MAX_FREQUENCY_ADJUST)
738 		freq = MAX_FREQUENCY_ADJUST;
739 	else if (freq < -MAX_FREQUENCY_ADJUST)
740 		freq = -MAX_FREQUENCY_ADJUST;
741 
742 	imsg_compose(ibuf_main, IMSG_ADJFREQ, 0, 0, -1, &freq, sizeof(freq));
743 	conf->filters |= FILTER_ADJFREQ;
744 	conf->freq.xy = 0.0;
745 	conf->freq.x = 0.0;
746 	conf->freq.y = 0.0;
747 	conf->freq.xx = 0.0;
748 	conf->freq.samples = 0;
749 	conf->freq.overall_offset = 0.0;
750 	conf->freq.num++;
751 }
752 
753 int
priv_adjtime(void)754 priv_adjtime(void)
755 {
756 	struct ntp_peer		 *p;
757 	struct ntp_sensor	 *s;
758 	int			  offset_cnt = 0, i = 0, j;
759 	struct ntp_offset	**offsets;
760 	double			  offset_median;
761 
762 	TAILQ_FOREACH(p, &conf->ntp_peers, entry) {
763 		if (p->trustlevel < TRUSTLEVEL_BADPEER)
764 			continue;
765 		if (!p->update.good)
766 			return (1);
767 		offset_cnt += p->weight;
768 	}
769 
770 	TAILQ_FOREACH(s, &conf->ntp_sensors, entry) {
771 		if (!s->update.good)
772 			continue;
773 		offset_cnt += s->weight;
774 	}
775 
776 	if (offset_cnt == 0)
777 		return (1);
778 
779 	if ((offsets = calloc(offset_cnt, sizeof(struct ntp_offset *))) == NULL)
780 		fatal("calloc priv_adjtime");
781 
782 	TAILQ_FOREACH(p, &conf->ntp_peers, entry) {
783 		if (p->trustlevel < TRUSTLEVEL_BADPEER)
784 			continue;
785 		for (j = 0; j < p->weight; j++)
786 			offsets[i++] = &p->update;
787 	}
788 
789 	TAILQ_FOREACH(s, &conf->ntp_sensors, entry) {
790 		if (!s->update.good)
791 			continue;
792 		for (j = 0; j < s->weight; j++)
793 			offsets[i++] = &s->update;
794 	}
795 
796 	qsort(offsets, offset_cnt, sizeof(struct ntp_offset *), offset_compare);
797 
798 	i = offset_cnt / 2;
799 	if (offset_cnt % 2 == 0)
800 		if (offsets[i - 1]->delay < offsets[i]->delay)
801 			i -= 1;
802 	offset_median = offsets[i]->offset;
803 	conf->status.rootdelay = offsets[i]->delay;
804 	conf->status.stratum = offsets[i]->status.stratum;
805 	conf->status.leap = offsets[i]->status.leap;
806 
807 	imsg_compose(ibuf_main, IMSG_ADJTIME, 0, 0, -1,
808 	    &offset_median, sizeof(offset_median));
809 
810 	priv_adjfreq(offset_median);
811 
812 	conf->status.reftime = gettime();
813 	conf->status.stratum++;	/* one more than selected peer */
814 	if (conf->status.stratum > NTP_MAXSTRATUM)
815 		conf->status.stratum = NTP_MAXSTRATUM;
816 	update_scale(offset_median);
817 
818 	conf->status.refid = offsets[i]->status.send_refid;
819 
820 	free(offsets);
821 
822 	TAILQ_FOREACH(p, &conf->ntp_peers, entry) {
823 		for (i = 0; i < OFFSET_ARRAY_SIZE; i++)
824 			p->reply[i].offset -= offset_median;
825 		p->update.good = 0;
826 	}
827 	TAILQ_FOREACH(s, &conf->ntp_sensors, entry) {
828 		for (i = 0; i < SENSOR_OFFSETS; i++)
829 			s->offsets[i].offset -= offset_median;
830 		s->update.offset -= offset_median;
831 	}
832 
833 	return (0);
834 }
835 
836 int
offset_compare(const void * aa,const void * bb)837 offset_compare(const void *aa, const void *bb)
838 {
839 	const struct ntp_offset * const *a;
840 	const struct ntp_offset * const *b;
841 
842 	a = aa;
843 	b = bb;
844 
845 	if ((*a)->offset < (*b)->offset)
846 		return (-1);
847 	else if ((*a)->offset > (*b)->offset)
848 		return (1);
849 	else
850 		return (0);
851 }
852 
853 void
priv_settime(double offset,char * msg)854 priv_settime(double offset, char *msg)
855 {
856 	if (offset == 0)
857 		log_info("cancel settime because %s", msg);
858 	imsg_compose(ibuf_main, IMSG_SETTIME, 0, 0, -1,
859 	    &offset, sizeof(offset));
860 	conf->settime = 0;
861 }
862 
863 void
priv_dns(int cmd,char * name,u_int32_t peerid)864 priv_dns(int cmd, char *name, u_int32_t peerid)
865 {
866 	u_int16_t	dlen = 0;
867 
868 	if (name != NULL)
869 		dlen = strlen(name) + 1;
870 	imsg_compose(ibuf_dns, cmd, peerid, 0, -1, name, dlen);
871 }
872 
873 void
update_scale(double offset)874 update_scale(double offset)
875 {
876 	offset += getoffset();
877 	if (offset < 0)
878 		offset = -offset;
879 
880 	if (offset > QSCALE_OFF_MAX || !conf->status.synced ||
881 	    conf->freq.num < 3)
882 		conf->scale = 1;
883 	else if (offset < QSCALE_OFF_MIN)
884 		conf->scale = QSCALE_OFF_MAX / QSCALE_OFF_MIN;
885 	else
886 		conf->scale = QSCALE_OFF_MAX / offset;
887 }
888 
889 time_t
scale_interval(time_t requested)890 scale_interval(time_t requested)
891 {
892 	time_t interval, r;
893 
894 	interval = requested * conf->scale;
895 	r = arc4random_uniform(SCALE_INTERVAL(interval));
896 	return (interval + r);
897 }
898 
899 time_t
error_interval(void)900 error_interval(void)
901 {
902 	time_t interval, r;
903 
904 	interval = INTERVAL_QUERY_PATHETIC * QSCALE_OFF_MAX / QSCALE_OFF_MIN;
905 	r = arc4random_uniform(interval / 10);
906 	return (interval + r);
907 }
908