xref: /openbsd/usr.sbin/ntpd/ntp.c (revision 73471bf0)
1 /*	$OpenBSD: ntp.c,v 1.168 2021/10/24 21:24:19 deraadt Exp $ */
2 
3 /*
4  * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
5  * Copyright (c) 2004 Alexander Guy <alexander.guy@andern.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <sys/types.h>
21 #include <sys/time.h>
22 #include <sys/stat.h>
23 #include <errno.h>
24 #include <fcntl.h>
25 #include <paths.h>
26 #include <poll.h>
27 #include <pwd.h>
28 #include <signal.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <syslog.h>
32 #include <time.h>
33 #include <unistd.h>
34 #include <err.h>
35 
36 #include "ntpd.h"
37 
38 #define	PFD_PIPE_MAIN	0
39 #define	PFD_PIPE_DNS	1
40 #define	PFD_SOCK_CTL	2
41 #define	PFD_MAX		3
42 
43 volatile sig_atomic_t	 ntp_quit = 0;
44 struct imsgbuf		*ibuf_main;
45 static struct imsgbuf	*ibuf_dns;
46 struct ntpd_conf	*conf;
47 struct ctl_conns	 ctl_conns;
48 u_int			 peer_cnt;
49 u_int			 sensors_cnt;
50 extern u_int		 constraint_cnt;
51 
52 void	ntp_sighdlr(int);
53 int	ntp_dispatch_imsg(void);
54 int	ntp_dispatch_imsg_dns(void);
55 void	peer_add(struct ntp_peer *);
56 void	peer_remove(struct ntp_peer *);
57 int	inpool(struct sockaddr_storage *,
58 	    struct sockaddr_storage[MAX_SERVERS_DNS], size_t);
59 
60 void
61 ntp_sighdlr(int sig)
62 {
63 	switch (sig) {
64 	case SIGINT:
65 	case SIGTERM:
66 		ntp_quit = 1;
67 		break;
68 	}
69 }
70 
71 void
72 ntp_main(struct ntpd_conf *nconf, struct passwd *pw, int argc, char **argv)
73 {
74 	int			 a, b, nfds, i, j, idx_peers, timeout;
75 	int			 nullfd, pipe_dns[2], idx_clients;
76 	int			 ctls;
77 	int			 fd_ctl;
78 	u_int			 pfd_elms = 0, idx2peer_elms = 0;
79 	u_int			 listener_cnt, new_cnt, sent_cnt, trial_cnt;
80 	u_int			 ctl_cnt;
81 	struct pollfd		*pfd = NULL;
82 	struct servent		*se;
83 	struct listen_addr	*la;
84 	struct ntp_peer		*p;
85 	struct ntp_peer		**idx2peer = NULL;
86 	struct ntp_sensor	*s, *next_s;
87 	struct constraint	*cstr;
88 	struct timespec		 tp;
89 	struct stat		 stb;
90 	struct ctl_conn		*cc;
91 	time_t			 nextaction, last_sensor_scan = 0, now;
92 	time_t			 last_action = 0, interval;
93 	void			*newp;
94 
95 	if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, PF_UNSPEC,
96 	    pipe_dns) == -1)
97 		fatal("socketpair");
98 
99 	start_child(NTPDNS_PROC_NAME, pipe_dns[1], argc, argv);
100 
101 	log_init(nconf->debug ? LOG_TO_STDERR : LOG_TO_SYSLOG, nconf->verbose,
102 	    LOG_DAEMON);
103 	if (!nconf->debug && setsid() == -1)
104 		fatal("setsid");
105 	log_procinit("ntp");
106 
107 	if ((se = getservbyname("ntp", "udp")) == NULL)
108 		fatal("getservbyname");
109 
110 	/* Start control socket. */
111 	if ((fd_ctl = control_init(CTLSOCKET)) == -1)
112 		fatalx("control socket init failed");
113 	if (control_listen(fd_ctl) == -1)
114 		fatalx("control socket listen failed");
115 	if ((nullfd = open("/dev/null", O_RDWR)) == -1)
116 		fatal(NULL);
117 
118 	if (stat(pw->pw_dir, &stb) == -1) {
119 		fatal("privsep dir %s could not be opened", pw->pw_dir);
120 	}
121 	if (stb.st_uid != 0 || (stb.st_mode & (S_IWGRP|S_IWOTH)) != 0) {
122 		fatalx("bad privsep dir %s permissions: %o",
123 		    pw->pw_dir, stb.st_mode);
124 	}
125 	if (chroot(pw->pw_dir) == -1)
126 		fatal("chroot");
127 	if (chdir("/") == -1)
128 		fatal("chdir(\"/\")");
129 
130 	if (!nconf->debug) {
131 		dup2(nullfd, STDIN_FILENO);
132 		dup2(nullfd, STDOUT_FILENO);
133 		dup2(nullfd, STDERR_FILENO);
134 	}
135 	close(nullfd);
136 
137 	setproctitle("ntp engine");
138 
139 	conf = nconf;
140 	setup_listeners(se, conf, &listener_cnt);
141 
142 	if (setgroups(1, &pw->pw_gid) ||
143 	    setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
144 	    setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
145 		fatal("can't drop privileges");
146 
147 	endservent();
148 
149 	/* The ntp process will want to open NTP client sockets -> "inet" */
150 	if (pledge("stdio inet", NULL) == -1)
151 		err(1, "pledge");
152 
153 	signal(SIGTERM, ntp_sighdlr);
154 	signal(SIGINT, ntp_sighdlr);
155 	signal(SIGPIPE, SIG_IGN);
156 	signal(SIGHUP, SIG_IGN);
157 	signal(SIGCHLD, SIG_DFL);
158 
159 	if ((ibuf_main = malloc(sizeof(struct imsgbuf))) == NULL)
160 		fatal(NULL);
161 	imsg_init(ibuf_main, PARENT_SOCK_FILENO);
162 	if ((ibuf_dns = malloc(sizeof(struct imsgbuf))) == NULL)
163 		fatal(NULL);
164 	imsg_init(ibuf_dns, pipe_dns[0]);
165 
166 	constraint_cnt = 0;
167 	conf->constraint_median = 0;
168 	conf->constraint_last = getmonotime();
169 	TAILQ_FOREACH(cstr, &conf->constraints, entry)
170 		constraint_cnt += constraint_init(cstr);
171 
172 	TAILQ_FOREACH(p, &conf->ntp_peers, entry)
173 		client_peer_init(p);
174 
175 	memset(&conf->status, 0, sizeof(conf->status));
176 
177 	conf->freq.num = 0;
178 	conf->freq.samples = 0;
179 	conf->freq.x = 0.0;
180 	conf->freq.xx = 0.0;
181 	conf->freq.xy = 0.0;
182 	conf->freq.y = 0.0;
183 	conf->freq.overall_offset = 0.0;
184 
185 	conf->status.synced = 0;
186 	clock_getres(CLOCK_REALTIME, &tp);
187 	b = 1000000000 / tp.tv_nsec;	/* convert to Hz */
188 	for (a = 0; b > 1; a--, b >>= 1)
189 		;
190 	conf->status.precision = a;
191 	conf->scale = 1;
192 
193 	TAILQ_INIT(&ctl_conns);
194 	sensor_init();
195 
196 	log_info("ntp engine ready");
197 
198 	ctl_cnt = 0;
199 	peer_cnt = 0;
200 	TAILQ_FOREACH(p, &conf->ntp_peers, entry)
201 		peer_cnt++;
202 
203 	while (ntp_quit == 0) {
204 		if (peer_cnt > idx2peer_elms) {
205 			if ((newp = reallocarray(idx2peer, peer_cnt,
206 			    sizeof(*idx2peer))) == NULL) {
207 				/* panic for now */
208 				log_warn("could not resize idx2peer from %u -> "
209 				    "%u entries", idx2peer_elms, peer_cnt);
210 				fatalx("exiting");
211 			}
212 			idx2peer = newp;
213 			idx2peer_elms = peer_cnt;
214 		}
215 
216 		new_cnt = PFD_MAX +
217 		    peer_cnt + listener_cnt + ctl_cnt;
218 		if (new_cnt > pfd_elms) {
219 			if ((newp = reallocarray(pfd, new_cnt,
220 			    sizeof(*pfd))) == NULL) {
221 				/* panic for now */
222 				log_warn("could not resize pfd from %u -> "
223 				    "%u entries", pfd_elms, new_cnt);
224 				fatalx("exiting");
225 			}
226 			pfd = newp;
227 			pfd_elms = new_cnt;
228 		}
229 
230 		memset(pfd, 0, sizeof(*pfd) * pfd_elms);
231 		memset(idx2peer, 0, sizeof(*idx2peer) * idx2peer_elms);
232 		nextaction = getmonotime() + 900;
233 		pfd[PFD_PIPE_MAIN].fd = ibuf_main->fd;
234 		pfd[PFD_PIPE_MAIN].events = POLLIN;
235 		pfd[PFD_PIPE_DNS].fd = ibuf_dns->fd;
236 		pfd[PFD_PIPE_DNS].events = POLLIN;
237 		pfd[PFD_SOCK_CTL].fd = fd_ctl;
238 		pfd[PFD_SOCK_CTL].events = POLLIN;
239 
240 		i = PFD_MAX;
241 		TAILQ_FOREACH(la, &conf->listen_addrs, entry) {
242 			pfd[i].fd = la->fd;
243 			pfd[i].events = POLLIN;
244 			i++;
245 		}
246 
247 		idx_peers = i;
248 		sent_cnt = trial_cnt = 0;
249 		TAILQ_FOREACH(p, &conf->ntp_peers, entry) {
250 			if (!p->trusted && constraint_cnt &&
251 			    conf->constraint_median == 0)
252 				continue;
253 
254 			if (p->next > 0 && p->next <= getmonotime()) {
255 				if (p->state > STATE_DNS_INPROGRESS)
256 					trial_cnt++;
257 				if (client_query(p) == 0)
258 					sent_cnt++;
259 			}
260 			if (p->deadline > 0 && p->deadline <= getmonotime()) {
261 				timeout = 300;
262 				log_debug("no reply from %s received in time, "
263 				    "next query %ds", log_sockaddr(
264 				    (struct sockaddr *)&p->addr->ss), timeout);
265 				if (p->trustlevel >= TRUSTLEVEL_BADPEER &&
266 				    (p->trustlevel /= 2) < TRUSTLEVEL_BADPEER)
267 					log_info("peer %s now invalid",
268 					    log_sockaddr(
269 					    (struct sockaddr *)&p->addr->ss));
270 				if (client_nextaddr(p) == 1) {
271 					peer_addr_head_clear(p);
272 					client_nextaddr(p);
273 				}
274 				set_next(p, timeout);
275 			}
276 			if (p->senderrors > MAX_SEND_ERRORS) {
277 				log_debug("failed to send query to %s, "
278 				    "next query %ds", log_sockaddr(
279 				    (struct sockaddr *)&p->addr->ss),
280 				    INTERVAL_QUERY_PATHETIC);
281 				p->senderrors = 0;
282 				if (client_nextaddr(p) == 1) {
283 					peer_addr_head_clear(p);
284 					client_nextaddr(p);
285 				}
286 				set_next(p, INTERVAL_QUERY_PATHETIC);
287 			}
288 			if (p->next > 0 && p->next < nextaction)
289 				nextaction = p->next;
290 			if (p->deadline > 0 && p->deadline < nextaction)
291 				nextaction = p->deadline;
292 
293 			if (p->state == STATE_QUERY_SENT &&
294 			    p->query->fd != -1) {
295 				pfd[i].fd = p->query->fd;
296 				pfd[i].events = POLLIN;
297 				idx2peer[i - idx_peers] = p;
298 				i++;
299 			}
300 		}
301 		idx_clients = i;
302 
303 		if (!TAILQ_EMPTY(&conf->ntp_conf_sensors) &&
304 		    (conf->trusted_sensors || constraint_cnt == 0 ||
305 		    conf->constraint_median != 0)) {
306 			if (last_sensor_scan == 0 ||
307 			    last_sensor_scan + SENSOR_SCAN_INTERVAL <= getmonotime()) {
308 				sensors_cnt = sensor_scan();
309 				last_sensor_scan = getmonotime();
310 			}
311 			if (sensors_cnt == 0 &&
312 			    nextaction > last_sensor_scan + SENSOR_SCAN_INTERVAL)
313 				nextaction = last_sensor_scan + SENSOR_SCAN_INTERVAL;
314 			sensors_cnt = 0;
315 			TAILQ_FOREACH(s, &conf->ntp_sensors, entry) {
316 				if (conf->settime && s->offsets[0].offset)
317 					priv_settime(s->offsets[0].offset, NULL);
318 				sensors_cnt++;
319 				if (s->next > 0 && s->next < nextaction)
320 					nextaction = s->next;
321 			}
322 		}
323 
324 		if (conf->settime &&
325 		    ((trial_cnt > 0 && sent_cnt == 0) ||
326 		    (peer_cnt == 0 && sensors_cnt == 0)))
327 			priv_settime(0, "no valid peers configured");
328 
329 		TAILQ_FOREACH(cstr, &conf->constraints, entry) {
330 			if (constraint_query(cstr) == -1)
331 				continue;
332 		}
333 
334 		if (ibuf_main->w.queued > 0)
335 			pfd[PFD_PIPE_MAIN].events |= POLLOUT;
336 		if (ibuf_dns->w.queued > 0)
337 			pfd[PFD_PIPE_DNS].events |= POLLOUT;
338 
339 		TAILQ_FOREACH(cc, &ctl_conns, entry) {
340 			pfd[i].fd = cc->ibuf.fd;
341 			pfd[i].events = POLLIN;
342 			if (cc->ibuf.w.queued > 0)
343 				pfd[i].events |= POLLOUT;
344 			i++;
345 		}
346 		ctls = i;
347 
348 		now = getmonotime();
349 		timeout = nextaction - now;
350 		if (timeout < 0)
351 			timeout = 0;
352 
353 		if ((nfds = poll(pfd, i, timeout ? timeout * 1000 : 1)) == -1)
354 			if (errno != EINTR) {
355 				log_warn("poll error");
356 				ntp_quit = 1;
357 			}
358 
359 		if (nfds > 0 && (pfd[PFD_PIPE_MAIN].revents & POLLOUT))
360 			if (msgbuf_write(&ibuf_main->w) <= 0 &&
361 			    errno != EAGAIN) {
362 				log_warn("pipe write error (to parent)");
363 				ntp_quit = 1;
364 			}
365 
366 		if (nfds > 0 && pfd[PFD_PIPE_MAIN].revents & (POLLIN|POLLERR)) {
367 			nfds--;
368 			if (ntp_dispatch_imsg() == -1) {
369 				log_debug("pipe read error (from main)");
370 				ntp_quit = 1;
371 			}
372 		}
373 
374 		if (nfds > 0 && (pfd[PFD_PIPE_DNS].revents & POLLOUT))
375 			if (msgbuf_write(&ibuf_dns->w) <= 0 &&
376 			    errno != EAGAIN) {
377 				log_warn("pipe write error (to dns engine)");
378 				ntp_quit = 1;
379 			}
380 
381 		if (nfds > 0 && pfd[PFD_PIPE_DNS].revents & (POLLIN|POLLERR)) {
382 			nfds--;
383 			if (ntp_dispatch_imsg_dns() == -1) {
384 				log_warn("pipe read error (from dns engine)");
385 				ntp_quit = 1;
386 			}
387 		}
388 
389 		if (nfds > 0 && pfd[PFD_SOCK_CTL].revents & (POLLIN|POLLERR)) {
390 			nfds--;
391 			ctl_cnt += control_accept(fd_ctl);
392 		}
393 
394 		for (j = PFD_MAX; nfds > 0 && j < idx_peers; j++)
395 			if (pfd[j].revents & (POLLIN|POLLERR)) {
396 				nfds--;
397 				if (server_dispatch(pfd[j].fd, conf) == -1) {
398 					log_warn("pipe write error (conf)");
399 					ntp_quit = 1;
400 				}
401 			}
402 
403 		for (; nfds > 0 && j < idx_clients; j++) {
404 			if (pfd[j].revents & (POLLIN|POLLERR)) {
405 				struct ntp_peer *pp = idx2peer[j - idx_peers];
406 
407 				nfds--;
408 				switch (client_dispatch(pp, conf->settime,
409 				    conf->automatic)) {
410 				case -1:
411 					log_debug("no reply from %s "
412 					    "received", log_sockaddr(
413 					    (struct sockaddr *) &pp->addr->ss));
414 					if (pp->trustlevel >=
415 					    TRUSTLEVEL_BADPEER &&
416 					    (pp->trustlevel /= 2) <
417 					    TRUSTLEVEL_BADPEER)
418 						log_info("peer %s now invalid",
419 						    log_sockaddr(
420 						    (struct sockaddr *)
421 						    &pp->addr->ss));
422 					break;
423 				case 0: /* invalid replies are ignored */
424 					break;
425 				case 1:
426 					last_action = now;
427 					break;
428 				}
429 			}
430 		}
431 
432 		for (; nfds > 0 && j < ctls; j++) {
433 			nfds -= control_dispatch_msg(&pfd[j], &ctl_cnt);
434 		}
435 
436 		for (s = TAILQ_FIRST(&conf->ntp_sensors); s != NULL;
437 		    s = next_s) {
438 			next_s = TAILQ_NEXT(s, entry);
439 			if (s->next <= now) {
440 				last_action = now;
441 				sensor_query(s);
442 			}
443 		}
444 
445 		/*
446 		 * Compute maximum of scale_interval(INTERVAL_QUERY_NORMAL),
447 		 * if we did not process a time message for three times that
448 		 * interval, stop advertising we're synced.
449 		 */
450 		interval = INTERVAL_QUERY_NORMAL * conf->scale;
451 		interval += SCALE_INTERVAL(interval) - 1;
452 		if (conf->status.synced && last_action + 3 * interval < now) {
453 			log_info("clock is now unsynced due to lack of replies");
454 			conf->status.synced = 0;
455 			conf->scale = 1;
456 			priv_dns(IMSG_UNSYNCED, NULL, 0);
457 		}
458 	}
459 
460 	msgbuf_write(&ibuf_main->w);
461 	msgbuf_clear(&ibuf_main->w);
462 	free(ibuf_main);
463 	msgbuf_write(&ibuf_dns->w);
464 	msgbuf_clear(&ibuf_dns->w);
465 	free(ibuf_dns);
466 
467 	log_info("ntp engine exiting");
468 	exit(0);
469 }
470 
471 int
472 ntp_dispatch_imsg(void)
473 {
474 	struct imsg		 imsg;
475 	int			 n;
476 
477 	if (((n = imsg_read(ibuf_main)) == -1 && errno != EAGAIN) || n == 0)
478 		return (-1);
479 
480 	for (;;) {
481 		if ((n = imsg_get(ibuf_main, &imsg)) == -1)
482 			return (-1);
483 
484 		if (n == 0)
485 			break;
486 
487 		switch (imsg.hdr.type) {
488 		case IMSG_ADJTIME:
489 			memcpy(&n, imsg.data, sizeof(n));
490 			if (n == 1 && !conf->status.synced) {
491 				log_info("clock is now synced");
492 				conf->status.synced = 1;
493 				priv_dns(IMSG_SYNCED, NULL, 0);
494 				constraint_reset();
495 			} else if (n == 0 && conf->status.synced) {
496 				log_info("clock is now unsynced");
497 				conf->status.synced = 0;
498 				priv_dns(IMSG_UNSYNCED, NULL, 0);
499 			}
500 			break;
501 		case IMSG_CONSTRAINT_RESULT:
502 			constraint_msg_result(imsg.hdr.peerid,
503 			    imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
504 			break;
505 		case IMSG_CONSTRAINT_CLOSE:
506 			constraint_msg_close(imsg.hdr.peerid,
507 			    imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
508 			break;
509 		default:
510 			break;
511 		}
512 		imsg_free(&imsg);
513 	}
514 	return (0);
515 }
516 
517 int
518 inpool(struct sockaddr_storage *a,
519     struct sockaddr_storage old[MAX_SERVERS_DNS], size_t n)
520 {
521 	size_t i;
522 
523 	for (i = 0; i < n; i++) {
524 		if (a->ss_family != old[i].ss_family)
525 			continue;
526 		if (a->ss_family == AF_INET) {
527 			if (((struct sockaddr_in *)a)->sin_addr.s_addr ==
528 			    ((struct sockaddr_in *)&old[i])->sin_addr.s_addr)
529 				return 1;
530 		} else if (memcmp(&((struct sockaddr_in6 *)a)->sin6_addr,
531 		    &((struct sockaddr_in6 *)&old[i])->sin6_addr,
532 		    sizeof(struct sockaddr_in6)) == 0) {
533 			return 1;
534 		}
535 	}
536 	return 0;
537 }
538 
539 int
540 ntp_dispatch_imsg_dns(void)
541 {
542 	struct imsg		 imsg;
543 	struct sockaddr_storage	 existing[MAX_SERVERS_DNS];
544 	struct ntp_peer		*peer, *npeer, *tmp;
545 	u_int16_t		 dlen;
546 	u_char			*p;
547 	struct ntp_addr		*h;
548 	size_t			 addrcount, peercount;
549 	int			 n;
550 
551 	if (((n = imsg_read(ibuf_dns)) == -1 && errno != EAGAIN) || n == 0)
552 		return (-1);
553 
554 	for (;;) {
555 		if ((n = imsg_get(ibuf_dns, &imsg)) == -1)
556 			return (-1);
557 
558 		if (n == 0)
559 			break;
560 
561 		switch (imsg.hdr.type) {
562 		case IMSG_HOST_DNS:
563 			TAILQ_FOREACH(peer, &conf->ntp_peers, entry)
564 				if (peer->id == imsg.hdr.peerid)
565 					break;
566 			if (peer == NULL) {
567 				log_warnx("IMSG_HOST_DNS with invalid peerID");
568 				break;
569 			}
570 			if (peer->addr != NULL) {
571 				log_warnx("IMSG_HOST_DNS but addr != NULL!");
572 				break;
573 			}
574 
575 			if (peer->addr_head.pool) {
576 				n = 0;
577 				peercount = 0;
578 
579 				TAILQ_FOREACH_SAFE(npeer, &conf->ntp_peers,
580 				    entry, tmp) {
581 					if (npeer->addr_head.pool !=
582 					    peer->addr_head.pool)
583 						continue;
584 					peercount++;
585 					if (npeer->id == peer->id)
586 						continue;
587 					if (npeer->addr != NULL)
588 						existing[n++] = npeer->addr->ss;
589 				}
590 			}
591 
592 			dlen = imsg.hdr.len - IMSG_HEADER_SIZE;
593 			if (dlen == 0) {	/* no data -> temp error */
594 				log_warnx("DNS lookup tempfail");
595 				peer->state = STATE_DNS_TEMPFAIL;
596 				if (conf->tmpfail++ == TRIES_AUTO_DNSFAIL)
597 					priv_settime(0, "of dns failures");
598 				break;
599 			}
600 
601 			p = (u_char *)imsg.data;
602 			addrcount = dlen / (sizeof(struct sockaddr_storage) +
603 			    sizeof(int));
604 
605 			while (dlen >= sizeof(struct sockaddr_storage) +
606 			    sizeof(int)) {
607 				if ((h = calloc(1, sizeof(struct ntp_addr))) ==
608 				    NULL)
609 					fatal(NULL);
610 				memcpy(&h->ss, p, sizeof(h->ss));
611 				p += sizeof(h->ss);
612 				dlen -= sizeof(h->ss);
613 				memcpy(&h->notauth, p, sizeof(int));
614 				p += sizeof(int);
615 				dlen -= sizeof(int);
616 				if (peer->addr_head.pool) {
617 					if (peercount > addrcount) {
618 						free(h);
619 						continue;
620 					}
621 					if (inpool(&h->ss, existing,
622 					    n)) {
623 						free(h);
624 						continue;
625 					}
626 					log_debug("Adding address %s to %s",
627 					    log_sockaddr((struct sockaddr *)
628 					    &h->ss), peer->addr_head.name);
629 					npeer = new_peer();
630 					npeer->weight = peer->weight;
631 					npeer->query_addr4 = peer->query_addr4;
632 					npeer->query_addr6 = peer->query_addr6;
633 					h->next = NULL;
634 					npeer->addr = h;
635 					npeer->addr_head.a = h;
636 					npeer->addr_head.name =
637 					    peer->addr_head.name;
638 					npeer->addr_head.pool =
639 					    peer->addr_head.pool;
640 					client_peer_init(npeer);
641 					npeer->state = STATE_DNS_DONE;
642 					peer_add(npeer);
643 					peercount++;
644 				} else {
645 					h->next = peer->addr;
646 					peer->addr = h;
647 					peer->addr_head.a = peer->addr;
648 					peer->state = STATE_DNS_DONE;
649 				}
650 			}
651 			if (dlen != 0)
652 				fatalx("IMSG_HOST_DNS: dlen != 0");
653 			if (peer->addr_head.pool)
654 				peer_remove(peer);
655 			else
656 				client_addr_init(peer);
657 			break;
658 		case IMSG_CONSTRAINT_DNS:
659 			constraint_msg_dns(imsg.hdr.peerid,
660 			    imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
661 			break;
662 		case IMSG_PROBE_ROOT:
663 			dlen = imsg.hdr.len - IMSG_HEADER_SIZE;
664 			if (dlen != sizeof(int))
665 				fatalx("IMSG_PROBE_ROOT");
666 			memcpy(&n, imsg.data, sizeof(int));
667 			if (n < 0)
668 				priv_settime(0, "dns probe failed");
669 			break;
670 		default:
671 			break;
672 		}
673 		imsg_free(&imsg);
674 	}
675 	return (0);
676 }
677 
678 void
679 peer_add(struct ntp_peer *p)
680 {
681 	TAILQ_INSERT_TAIL(&conf->ntp_peers, p, entry);
682 	peer_cnt++;
683 }
684 
685 void
686 peer_remove(struct ntp_peer *p)
687 {
688 	TAILQ_REMOVE(&conf->ntp_peers, p, entry);
689 	free(p);
690 	peer_cnt--;
691 }
692 
693 void
694 peer_addr_head_clear(struct ntp_peer *p)
695 {
696 	host_dns_free(p->addr_head.a);
697 	p->addr_head.a = NULL;
698 	p->addr = NULL;
699 }
700 
701 static void
702 priv_adjfreq(double offset)
703 {
704 	double curtime, freq;
705 
706 	if (!conf->status.synced){
707 		conf->freq.samples = 0;
708 		return;
709 	}
710 
711 	conf->freq.samples++;
712 
713 	if (conf->freq.samples <= 0)
714 		return;
715 
716 	conf->freq.overall_offset += offset;
717 	offset = conf->freq.overall_offset;
718 
719 	curtime = gettime_corrected();
720 	conf->freq.xy += offset * curtime;
721 	conf->freq.x += curtime;
722 	conf->freq.y += offset;
723 	conf->freq.xx += curtime * curtime;
724 
725 	if (conf->freq.samples % FREQUENCY_SAMPLES != 0)
726 		return;
727 
728 	freq =
729 	    (conf->freq.xy - conf->freq.x * conf->freq.y / conf->freq.samples)
730 	    /
731 	    (conf->freq.xx - conf->freq.x * conf->freq.x / conf->freq.samples);
732 
733 	if (freq > MAX_FREQUENCY_ADJUST)
734 		freq = MAX_FREQUENCY_ADJUST;
735 	else if (freq < -MAX_FREQUENCY_ADJUST)
736 		freq = -MAX_FREQUENCY_ADJUST;
737 
738 	imsg_compose(ibuf_main, IMSG_ADJFREQ, 0, 0, -1, &freq, sizeof(freq));
739 	conf->filters |= FILTER_ADJFREQ;
740 	conf->freq.xy = 0.0;
741 	conf->freq.x = 0.0;
742 	conf->freq.y = 0.0;
743 	conf->freq.xx = 0.0;
744 	conf->freq.samples = 0;
745 	conf->freq.overall_offset = 0.0;
746 	conf->freq.num++;
747 }
748 
749 int
750 priv_adjtime(void)
751 {
752 	struct ntp_peer		 *p;
753 	struct ntp_sensor	 *s;
754 	int			  offset_cnt = 0, i = 0, j;
755 	struct ntp_offset	**offsets;
756 	double			  offset_median;
757 
758 	TAILQ_FOREACH(p, &conf->ntp_peers, entry) {
759 		if (p->trustlevel < TRUSTLEVEL_BADPEER)
760 			continue;
761 		if (!p->update.good)
762 			return (1);
763 		offset_cnt += p->weight;
764 	}
765 
766 	TAILQ_FOREACH(s, &conf->ntp_sensors, entry) {
767 		if (!s->update.good)
768 			continue;
769 		offset_cnt += s->weight;
770 	}
771 
772 	if (offset_cnt == 0)
773 		return (1);
774 
775 	if ((offsets = calloc(offset_cnt, sizeof(struct ntp_offset *))) == NULL)
776 		fatal("calloc priv_adjtime");
777 
778 	TAILQ_FOREACH(p, &conf->ntp_peers, entry) {
779 		if (p->trustlevel < TRUSTLEVEL_BADPEER)
780 			continue;
781 		for (j = 0; j < p->weight; j++)
782 			offsets[i++] = &p->update;
783 	}
784 
785 	TAILQ_FOREACH(s, &conf->ntp_sensors, entry) {
786 		if (!s->update.good)
787 			continue;
788 		for (j = 0; j < s->weight; j++)
789 			offsets[i++] = &s->update;
790 	}
791 
792 	qsort(offsets, offset_cnt, sizeof(struct ntp_offset *), offset_compare);
793 
794 	i = offset_cnt / 2;
795 	if (offset_cnt % 2 == 0)
796 		if (offsets[i - 1]->delay < offsets[i]->delay)
797 			i -= 1;
798 	offset_median = offsets[i]->offset;
799 	conf->status.rootdelay = offsets[i]->delay;
800 	conf->status.stratum = offsets[i]->status.stratum;
801 	conf->status.leap = offsets[i]->status.leap;
802 
803 	imsg_compose(ibuf_main, IMSG_ADJTIME, 0, 0, -1,
804 	    &offset_median, sizeof(offset_median));
805 
806 	priv_adjfreq(offset_median);
807 
808 	conf->status.reftime = gettime();
809 	conf->status.stratum++;	/* one more than selected peer */
810 	if (conf->status.stratum > NTP_MAXSTRATUM)
811 		conf->status.stratum = NTP_MAXSTRATUM;
812 	update_scale(offset_median);
813 
814 	conf->status.refid = offsets[i]->status.send_refid;
815 
816 	free(offsets);
817 
818 	TAILQ_FOREACH(p, &conf->ntp_peers, entry) {
819 		for (i = 0; i < OFFSET_ARRAY_SIZE; i++)
820 			p->reply[i].offset -= offset_median;
821 		p->update.good = 0;
822 	}
823 	TAILQ_FOREACH(s, &conf->ntp_sensors, entry) {
824 		for (i = 0; i < SENSOR_OFFSETS; i++)
825 			s->offsets[i].offset -= offset_median;
826 		s->update.offset -= offset_median;
827 	}
828 
829 	return (0);
830 }
831 
832 int
833 offset_compare(const void *aa, const void *bb)
834 {
835 	const struct ntp_offset * const *a;
836 	const struct ntp_offset * const *b;
837 
838 	a = aa;
839 	b = bb;
840 
841 	if ((*a)->offset < (*b)->offset)
842 		return (-1);
843 	else if ((*a)->offset > (*b)->offset)
844 		return (1);
845 	else
846 		return (0);
847 }
848 
849 void
850 priv_settime(double offset, char *msg)
851 {
852 	if (offset == 0)
853 		log_info("cancel settime because %s", msg);
854 	imsg_compose(ibuf_main, IMSG_SETTIME, 0, 0, -1,
855 	    &offset, sizeof(offset));
856 	conf->settime = 0;
857 }
858 
859 void
860 priv_dns(int cmd, char *name, u_int32_t peerid)
861 {
862 	u_int16_t	dlen = 0;
863 
864 	if (name != NULL)
865 		dlen = strlen(name) + 1;
866 	imsg_compose(ibuf_dns, cmd, peerid, 0, -1, name, dlen);
867 }
868 
869 void
870 update_scale(double offset)
871 {
872 	offset += getoffset();
873 	if (offset < 0)
874 		offset = -offset;
875 
876 	if (offset > QSCALE_OFF_MAX || !conf->status.synced ||
877 	    conf->freq.num < 3)
878 		conf->scale = 1;
879 	else if (offset < QSCALE_OFF_MIN)
880 		conf->scale = QSCALE_OFF_MAX / QSCALE_OFF_MIN;
881 	else
882 		conf->scale = QSCALE_OFF_MAX / offset;
883 }
884 
885 time_t
886 scale_interval(time_t requested)
887 {
888 	time_t interval, r;
889 
890 	interval = requested * conf->scale;
891 	r = arc4random_uniform(SCALE_INTERVAL(interval));
892 	return (interval + r);
893 }
894 
895 time_t
896 error_interval(void)
897 {
898 	time_t interval, r;
899 
900 	interval = INTERVAL_QUERY_PATHETIC * QSCALE_OFF_MAX / QSCALE_OFF_MIN;
901 	r = arc4random_uniform(interval / 10);
902 	return (interval + r);
903 }
904