xref: /netbsd/external/bsd/ntp/dist/sntp/main.c (revision 9034ec65)
1 /*	$NetBSD: main.c,v 1.18 2020/05/25 20:47:32 christos Exp $	*/
2 
3 #include <config.h>
4 
5 #include <event2/util.h>
6 #include <event2/event.h>
7 
8 #include "ntp_workimpl.h"
9 #ifdef WORK_THREAD
10 # include <event2/thread.h>
11 #endif
12 
13 #ifdef HAVE_SYSEXITS_H
14 # include <sysexits.h>
15 #endif
16 
17 #include "main.h"
18 #include "ntp_libopts.h"
19 #include "kod_management.h"
20 #include "networking.h"
21 #include "utilities.h"
22 #include "log.h"
23 #include "libntp.h"
24 
25 
26 int shutting_down;
27 int time_derived;
28 int time_adjusted;
29 int n_pending_dns = 0;
30 int n_pending_ntp = 0;
31 int ai_fam_pref = AF_UNSPEC;
32 int ntpver = 4;
33 double steplimit = -1;
34 SOCKET sock4 = -1;		/* Socket for IPv4 */
35 SOCKET sock6 = -1;		/* Socket for IPv6 */
36 /*
37 ** BCAST *must* listen on port 123 (by default), so we can only
38 ** use the UCST sockets (above) if they too are using port 123
39 */
40 SOCKET bsock4 = -1;		/* Broadcast Socket for IPv4 */
41 SOCKET bsock6 = -1;		/* Broadcast Socket for IPv6 */
42 struct event_base *base;
43 struct event *ev_sock4;
44 struct event *ev_sock6;
45 struct event *ev_worker_timeout;
46 struct event *ev_xmt_timer;
47 
48 struct dns_ctx {
49 	const char *	name;
50 	int		flags;
51 #define CTX_BCST	0x0001
52 #define CTX_UCST	0x0002
53 #define CTX_xCST	0x0003
54 #define CTX_CONC	0x0004
55 #define CTX_unused	0xfffd
56 	int		key_id;
57 	struct timeval	timeout;
58 	struct key *	key;
59 };
60 
61 typedef struct sent_pkt_tag sent_pkt;
62 struct sent_pkt_tag {
63 	sent_pkt *		link;
64 	struct dns_ctx *	dctx;
65 	sockaddr_u		addr;
66 	time_t			stime;
67 	int			done;
68 	struct pkt		x_pkt;
69 };
70 
71 typedef struct xmt_ctx_tag xmt_ctx;
72 struct xmt_ctx_tag {
73 	xmt_ctx *		link;
74 	SOCKET			sock;
75 	time_t			sched;
76 	sent_pkt *		spkt;
77 };
78 
79 struct timeval	gap;
80 xmt_ctx *	xmt_q;
81 struct key *	keys = NULL;
82 int		response_timeout;
83 struct timeval	response_tv;
84 struct timeval	start_tv;
85 /* check the timeout at least once per second */
86 struct timeval	wakeup_tv = { 0, 888888 };
87 
88 sent_pkt *	fam_listheads[2];
89 #define v4_pkts_list	(fam_listheads[0])
90 #define v6_pkts_list	(fam_listheads[1])
91 
92 static union {
93 	struct pkt pkt;
94 	char   buf[LEN_PKT_NOMAC + NTP_MAXEXTEN + MAX_MAC_LEN];
95 } rbuf;
96 
97 #define r_pkt  rbuf.pkt
98 
99 #ifdef HAVE_DROPROOT
100 int droproot;			/* intres imports these */
101 int root_dropped;
102 #endif
103 u_long current_time;		/* libntp/authkeys.c */
104 
105 void open_sockets(void);
106 void handle_lookup(const char *name, int flags);
107 void sntp_addremove_fd(int fd, int is_pipe, int remove_it);
108 void worker_timeout(evutil_socket_t, short, void *);
109 void worker_resp_cb(evutil_socket_t, short, void *);
110 void sntp_name_resolved(int, int, void *, const char *, const char *,
111 			const struct addrinfo *,
112 			const struct addrinfo *);
113 void queue_xmt(SOCKET sock, struct dns_ctx *dctx, sent_pkt *spkt,
114 	       u_int xmt_delay);
115 void xmt_timer_cb(evutil_socket_t, short, void *ptr);
116 void xmt(xmt_ctx *xctx);
117 int  check_kod(const struct addrinfo *ai);
118 void timeout_query(sent_pkt *);
119 void timeout_queries(void);
120 void sock_cb(evutil_socket_t, short, void *);
121 void check_exit_conditions(void);
122 void sntp_libevent_log_cb(int, const char *);
123 void set_li_vn_mode(struct pkt *spkt, char leap, char version, char mode);
124 int  set_time(double offset);
125 void dec_pending_ntp(const char *, sockaddr_u *);
126 int  libevent_version_ok(void);
127 int  gettimeofday_cached(struct event_base *b, struct timeval *tv);
128 
129 
130 /*
131  * The actual main function.
132  */
133 int
sntp_main(int argc,char ** argv,const char * sntpVersion)134 sntp_main (
135 	int argc,
136 	char **argv,
137 	const char *sntpVersion
138 	)
139 {
140 	int			i;
141 	int			exitcode;
142 	int			optct;
143 	struct event_config *	evcfg;
144 
145 	/* Initialize logging system - sets up progname */
146 	sntp_init_logging(argv[0]);
147 
148 	if (!libevent_version_ok())
149 		exit(EX_SOFTWARE);
150 
151 	init_lib();
152 	init_auth();
153 
154 	optct = ntpOptionProcess(&sntpOptions, argc, argv);
155 	argc -= optct;
156 	argv += optct;
157 
158 
159 	debug = OPT_VALUE_SET_DEBUG_LEVEL;
160 
161 	TRACE(2, ("init_lib() done, %s%s\n",
162 		  (ipv4_works)
163 		      ? "ipv4_works "
164 		      : "",
165 		  (ipv6_works)
166 		      ? "ipv6_works "
167 		      : ""));
168 	ntpver = OPT_VALUE_NTPVERSION;
169 	steplimit = OPT_VALUE_STEPLIMIT / 1e3;
170 	gap.tv_usec = max(0, OPT_VALUE_GAP * 1000);
171 	gap.tv_usec = min(gap.tv_usec, 999999);
172 
173 	if (HAVE_OPT(LOGFILE))
174 		open_logfile(OPT_ARG(LOGFILE));
175 
176 	msyslog(LOG_INFO, "%s", sntpVersion);
177 
178 	if (0 == argc && !HAVE_OPT(BROADCAST) && !HAVE_OPT(CONCURRENT)) {
179 		printf("%s: Must supply at least one of -b hostname, -c hostname, or hostname.\n",
180 		       progname);
181 		exit(EX_USAGE);
182 	}
183 
184 
185 	/*
186 	** Eventually, we probably want:
187 	** - separate bcst and ucst timeouts (why?)
188 	** - multiple --timeout values in the commandline
189 	*/
190 
191 	response_timeout = OPT_VALUE_TIMEOUT;
192 	response_tv.tv_sec = response_timeout;
193 	response_tv.tv_usec = 0;
194 
195 	/* IPv6 available? */
196 	if (isc_net_probeipv6() != ISC_R_SUCCESS) {
197 		ai_fam_pref = AF_INET;
198 		TRACE(1, ("No ipv6 support available, forcing ipv4\n"));
199 	} else {
200 		/* Check for options -4 and -6 */
201 		if (HAVE_OPT(IPV4))
202 			ai_fam_pref = AF_INET;
203 		else if (HAVE_OPT(IPV6))
204 			ai_fam_pref = AF_INET6;
205 	}
206 
207 	/* TODO: Parse config file if declared */
208 
209 	/*
210 	** Init the KOD system.
211 	** For embedded systems with no writable filesystem,
212 	** -K /dev/null can be used to disable KoD storage.
213 	*/
214 	kod_init_kod_db(OPT_ARG(KOD), FALSE);
215 
216 	/* HMS: Check and see what happens if KEYFILE doesn't exist */
217 	auth_init(OPT_ARG(KEYFILE), &keys);
218 
219 	/*
220 	** Considering employing a variable that prevents functions of doing
221 	** anything until everything is initialized properly
222 	**
223 	** HMS: What exactly does the above mean?
224 	*/
225 	event_set_log_callback(&sntp_libevent_log_cb);
226 	if (debug > 0)
227 		event_enable_debug_mode();
228 #ifdef WORK_THREAD
229 	evthread_use_pthreads();
230 	/* we use libevent from main thread only, locks should be academic */
231 	if (debug > 0)
232 		evthread_enable_lock_debuging();
233 #endif
234 	evcfg = event_config_new();
235 	if (NULL == evcfg) {
236 		printf("%s: event_config_new() failed!\n", progname);
237 		return -1;
238 	}
239 #ifndef HAVE_SOCKETPAIR
240 	event_config_require_features(evcfg, EV_FEATURE_FDS);
241 #endif
242 	/* all libevent calls are from main thread */
243 	/* event_config_set_flag(evcfg, EVENT_BASE_FLAG_NOLOCK); */
244 	base = event_base_new_with_config(evcfg);
245 	event_config_free(evcfg);
246 	if (NULL == base) {
247 		printf("%s: event_base_new() failed!\n", progname);
248 		return -1;
249 	}
250 
251 	/* wire into intres resolver */
252 	worker_per_query = TRUE;
253 	addremove_io_fd = &sntp_addremove_fd;
254 
255 	open_sockets();
256 
257 	if (HAVE_OPT(BROADCAST)) {
258 		int		cn = STACKCT_OPT(  BROADCAST );
259 		const char **	cp = STACKLST_OPT( BROADCAST );
260 
261 		while (cn-- > 0) {
262 			handle_lookup(*cp, CTX_BCST);
263 			cp++;
264 		}
265 	}
266 
267 	if (HAVE_OPT(CONCURRENT)) {
268 		int		cn = STACKCT_OPT( CONCURRENT );
269 		const char **	cp = STACKLST_OPT( CONCURRENT );
270 
271 		while (cn-- > 0) {
272 			handle_lookup(*cp, CTX_UCST | CTX_CONC);
273 			cp++;
274 		}
275 	}
276 
277 	for (i = 0; i < argc; ++i)
278 		handle_lookup(argv[i], CTX_UCST);
279 
280 	gettimeofday_cached(base, &start_tv);
281 	event_base_dispatch(base);
282 	event_base_free(base);
283 
284 	if (!time_adjusted &&
285 	    (ENABLED_OPT(STEP) || ENABLED_OPT(SLEW)))
286 		exitcode = 1;
287 	else
288 		exitcode = 0;
289 
290 	return exitcode;
291 }
292 
293 
294 /*
295 ** open sockets and make them non-blocking
296 */
297 void
open_sockets(void)298 open_sockets(
299 	void
300 	)
301 {
302 	sockaddr_u	name;
303 
304 	if (-1 == sock4) {
305 		sock4 = socket(PF_INET, SOCK_DGRAM, 0);
306 		if (-1 == sock4) {
307 			/* error getting a socket */
308 			msyslog(LOG_ERR, "open_sockets: socket(PF_INET) failed: %m");
309 			exit(1);
310 		}
311 		/* Make it non-blocking */
312 		make_socket_nonblocking(sock4);
313 
314 		/* Let's try using a wildcard... */
315 		ZERO(name);
316 		AF(&name) = AF_INET;
317 		SET_ADDR4N(&name, INADDR_ANY);
318 		SET_PORT(&name, (HAVE_OPT(USERESERVEDPORT) ? 123 : 0));
319 
320 		if (-1 == bind(sock4, &name.sa,
321 			       SOCKLEN(&name))) {
322 			msyslog(LOG_ERR, "open_sockets: bind(sock4) failed: %m");
323 			exit(1);
324 		}
325 
326 		/* Register an NTP callback for recv/timeout */
327 		ev_sock4 = event_new(base, sock4,
328 				     EV_TIMEOUT | EV_READ | EV_PERSIST,
329 				     &sock_cb, NULL);
330 		if (NULL == ev_sock4) {
331 			msyslog(LOG_ERR,
332 				"open_sockets: event_new(base, sock4) failed!");
333 		} else {
334 			event_add(ev_sock4, &wakeup_tv);
335 		}
336 	}
337 
338 	/* We may not always have IPv6... */
339 	if (-1 == sock6 && ipv6_works) {
340 		sock6 = socket(PF_INET6, SOCK_DGRAM, 0);
341 		if (-1 == sock6 && ipv6_works) {
342 			/* error getting a socket */
343 			msyslog(LOG_ERR, "open_sockets: socket(PF_INET6) failed: %m");
344 			exit(1);
345 		}
346 		/* Make it non-blocking */
347 		make_socket_nonblocking(sock6);
348 
349 		/* Let's try using a wildcard... */
350 		ZERO(name);
351 		AF(&name) = AF_INET6;
352 		SET_ADDR6N(&name, in6addr_any);
353 		SET_PORT(&name, (HAVE_OPT(USERESERVEDPORT) ? 123 : 0));
354 
355 		if (-1 == bind(sock6, &name.sa,
356 			       SOCKLEN(&name))) {
357 			msyslog(LOG_ERR, "open_sockets: bind(sock6) failed: %m");
358 			exit(1);
359 		}
360 		/* Register an NTP callback for recv/timeout */
361 		ev_sock6 = event_new(base, sock6,
362 				     EV_TIMEOUT | EV_READ | EV_PERSIST,
363 				     &sock_cb, NULL);
364 		if (NULL == ev_sock6) {
365 			msyslog(LOG_ERR,
366 				"open_sockets: event_new(base, sock6) failed!");
367 		} else {
368 			event_add(ev_sock6, &wakeup_tv);
369 		}
370 	}
371 
372 	return;
373 }
374 
375 
376 /*
377 ** handle_lookup
378 */
379 void
handle_lookup(const char * name,int flags)380 handle_lookup(
381 	const char *name,
382 	int flags
383 	)
384 {
385 	struct addrinfo	hints;	/* Local copy is OK */
386 	struct dns_ctx *ctx;
387 	char *		name_copy;
388 	size_t		name_sz;
389 	size_t		octets;
390 
391 	TRACE(1, ("handle_lookup(%s,%#x)\n", name, flags));
392 
393 	ZERO(hints);
394 	hints.ai_family = ai_fam_pref;
395 	hints.ai_flags = AI_CANONNAME | Z_AI_NUMERICSERV;
396 	/*
397 	** Unless we specify a socktype, we'll get at least two
398 	** entries for each address: one for TCP and one for
399 	** UDP. That's not what we want.
400 	*/
401 	hints.ai_socktype = SOCK_DGRAM;
402 	hints.ai_protocol = IPPROTO_UDP;
403 
404 	name_sz = 1 + strlen(name);
405 	octets = sizeof(*ctx) + name_sz;	// Space for a ctx and the name
406 	ctx = emalloc_zero(octets);		// ctx at ctx[0]
407 	name_copy = (char *)(ctx + 1);		// Put the name at ctx[1]
408 	memcpy(name_copy, name, name_sz);	// copy the name to ctx[1]
409 	ctx->name = name_copy;			// point to it...
410 	ctx->flags = flags;
411 	ctx->timeout = response_tv;
412 	ctx->key = NULL;
413 
414 	/* The following should arguably be passed in... */
415 	if (ENABLED_OPT(AUTHENTICATION)) {
416 		ctx->key_id = OPT_VALUE_AUTHENTICATION;
417 		get_key(ctx->key_id, &ctx->key);
418 		if (NULL == ctx->key) {
419 			fprintf(stderr, "%s: Authentication with keyID %d requested, but no matching keyID found in <%s>!\n",
420 				progname, ctx->key_id, OPT_ARG(KEYFILE));
421 			exit(1);
422 		}
423 	} else {
424 		ctx->key_id = -1;
425 	}
426 
427 	++n_pending_dns;
428 	getaddrinfo_sometime(name, "123", &hints, 0,
429 			     &sntp_name_resolved, ctx);
430 }
431 
432 
433 /*
434 ** DNS Callback:
435 ** - For each IP:
436 ** - - open a socket
437 ** - - increment n_pending_ntp
438 ** - - send a request if this is a Unicast callback
439 ** - - queue wait for response
440 ** - decrement n_pending_dns
441 */
442 void
sntp_name_resolved(int rescode,int gai_errno,void * context,const char * name,const char * service,const struct addrinfo * hints,const struct addrinfo * addr)443 sntp_name_resolved(
444 	int			rescode,
445 	int			gai_errno,
446 	void *			context,
447 	const char *		name,
448 	const char *		service,
449 	const struct addrinfo *	hints,
450 	const struct addrinfo *	addr
451 	)
452 {
453 	struct dns_ctx *	dctx;
454 	sent_pkt *		spkt;
455 	const struct addrinfo *	ai;
456 	SOCKET			sock;
457 	u_int			xmt_delay_v4;
458 	u_int			xmt_delay_v6;
459 	u_int			xmt_delay;
460 	size_t			octets;
461 
462 	xmt_delay_v4 = 0;
463 	xmt_delay_v6 = 0;
464 	dctx = context;
465 	if (rescode) {
466 #ifdef EAI_SYSTEM
467 		if (EAI_SYSTEM == rescode) {
468 			errno = gai_errno;
469 			mfprintf(stderr, "%s lookup error %m\n",
470 				 dctx->name);
471 		} else
472 #endif
473 			fprintf(stderr, "%s lookup error %s\n",
474 				dctx->name, gai_strerror(rescode));
475 	} else {
476 		TRACE(3, ("%s [%s]\n", dctx->name,
477 			  (addr->ai_canonname != NULL)
478 			      ? addr->ai_canonname
479 			      : ""));
480 
481 		for (ai = addr; ai != NULL; ai = ai->ai_next) {
482 
483 			if (check_kod(ai))
484 				continue;
485 
486 			switch (ai->ai_family) {
487 
488 			case AF_INET:
489 				sock = sock4;
490 				xmt_delay = xmt_delay_v4;
491 				xmt_delay_v4++;
492 				break;
493 
494 			case AF_INET6:
495 				if (!ipv6_works)
496 					continue;
497 
498 				sock = sock6;
499 				xmt_delay = xmt_delay_v6;
500 				xmt_delay_v6++;
501 				break;
502 
503 			default:
504 				msyslog(LOG_ERR, "sntp_name_resolved: unexpected ai_family: %d",
505 					ai->ai_family);
506 				exit(1);
507 				break;
508 			}
509 
510 			/*
511 			** We're waiting for a response for either unicast
512 			** or broadcast, so...
513 			*/
514 			++n_pending_ntp;
515 
516 			/* If this is for a unicast IP, queue a request */
517 			if (dctx->flags & CTX_UCST) {
518 				spkt = emalloc_zero(sizeof(*spkt));
519 				spkt->dctx = dctx;
520 				octets = min(ai->ai_addrlen, sizeof(spkt->addr));
521 				memcpy(&spkt->addr, ai->ai_addr, octets);
522 				queue_xmt(sock, dctx, spkt, xmt_delay);
523 			}
524 		}
525 	}
526 	/* n_pending_dns really should be >0 here... */
527 	--n_pending_dns;
528 	check_exit_conditions();
529 }
530 
531 
532 /*
533 ** queue_xmt
534 */
535 void
queue_xmt(SOCKET sock,struct dns_ctx * dctx,sent_pkt * spkt,u_int xmt_delay)536 queue_xmt(
537 	SOCKET			sock,
538 	struct dns_ctx *	dctx,
539 	sent_pkt *		spkt,
540 	u_int			xmt_delay
541 	)
542 {
543 	sockaddr_u *	dest;
544 	sent_pkt **	pkt_listp;
545 	sent_pkt *	match;
546 	xmt_ctx *	xctx;
547 	struct timeval	start_cb;
548 	struct timeval	delay;
549 
550 	dest = &spkt->addr;
551 	if (IS_IPV6(dest))
552 		pkt_listp = &v6_pkts_list;
553 	else
554 		pkt_listp = &v4_pkts_list;
555 
556 	/* reject attempts to add address already listed */
557 	for (match = *pkt_listp; match != NULL; match = match->link) {
558 		if (ADDR_PORT_EQ(&spkt->addr, &match->addr)) {
559 			if (strcasecmp(spkt->dctx->name,
560 				       match->dctx->name))
561 				printf("%s %s duplicate address from %s ignored.\n",
562 				       sptoa(&match->addr),
563 				       match->dctx->name,
564 				       spkt->dctx->name);
565 			else
566 				printf("%s %s, duplicate address ignored.\n",
567 				       sptoa(&match->addr),
568 				       match->dctx->name);
569 			dec_pending_ntp(spkt->dctx->name, &spkt->addr);
570 			free(spkt);
571 			return;
572 		}
573 	}
574 
575 	LINK_SLIST(*pkt_listp, spkt, link);
576 
577 	xctx = emalloc_zero(sizeof(*xctx));
578 	xctx->sock = sock;
579 	xctx->spkt = spkt;
580 	gettimeofday_cached(base, &start_cb);
581 	xctx->sched = start_cb.tv_sec + (2 * xmt_delay);
582 
583 	LINK_SORT_SLIST(xmt_q, xctx, (xctx->sched < L_S_S_CUR()->sched),
584 			link, xmt_ctx);
585 	if (xmt_q == xctx) {
586 		/*
587 		 * The new entry is the first scheduled.  The timer is
588 		 * either not active or is set for the second xmt
589 		 * context in xmt_q.
590 		 */
591 		if (NULL == ev_xmt_timer)
592 			ev_xmt_timer = event_new(base, INVALID_SOCKET,
593 						 EV_TIMEOUT,
594 						 &xmt_timer_cb, NULL);
595 		if (NULL == ev_xmt_timer) {
596 			msyslog(LOG_ERR,
597 				"queue_xmt: event_new(base, -1, EV_TIMEOUT) failed!");
598 			exit(1);
599 		}
600 		ZERO(delay);
601 		if (xctx->sched > start_cb.tv_sec)
602 			delay.tv_sec = xctx->sched - start_cb.tv_sec;
603 		event_add(ev_xmt_timer, &delay);
604 		TRACE(2, ("queue_xmt: xmt timer for %u usec\n",
605 			  (u_int)delay.tv_usec));
606 	}
607 }
608 
609 
610 /*
611 ** xmt_timer_cb
612 */
613 void
xmt_timer_cb(evutil_socket_t fd,short what,void * ctx)614 xmt_timer_cb(
615 	evutil_socket_t	fd,
616 	short		what,
617 	void *		ctx
618 	)
619 {
620 	struct timeval	start_cb;
621 	struct timeval	delay;
622 	xmt_ctx *	x;
623 
624 	UNUSED_ARG(fd);
625 	UNUSED_ARG(ctx);
626 	DEBUG_INSIST(EV_TIMEOUT == what);
627 
628 	if (NULL == xmt_q || shutting_down)
629 		return;
630 	gettimeofday_cached(base, &start_cb);
631 	if (xmt_q->sched <= start_cb.tv_sec) {
632 		UNLINK_HEAD_SLIST(x, xmt_q, link);
633 		TRACE(2, ("xmt_timer_cb: at .%6.6u -> %s\n",
634 			  (u_int)start_cb.tv_usec, stoa(&x->spkt->addr)));
635 		xmt(x);
636 		free(x);
637 		if (NULL == xmt_q)
638 			return;
639 	}
640 	if (xmt_q->sched <= start_cb.tv_sec) {
641 		event_add(ev_xmt_timer, &gap);
642 		TRACE(2, ("xmt_timer_cb: at .%6.6u gap %6.6u\n",
643 			  (u_int)start_cb.tv_usec,
644 			  (u_int)gap.tv_usec));
645 	} else {
646 		delay.tv_sec = xmt_q->sched - start_cb.tv_sec;
647 		delay.tv_usec = 0;
648 		event_add(ev_xmt_timer, &delay);
649 		TRACE(2, ("xmt_timer_cb: at .%6.6u next %ld seconds\n",
650 			  (u_int)start_cb.tv_usec,
651 			  (long)delay.tv_sec));
652 	}
653 }
654 
655 
656 /*
657 ** xmt()
658 */
659 void
xmt(xmt_ctx * xctx)660 xmt(
661 	xmt_ctx *	xctx
662 	)
663 {
664 	SOCKET		sock = xctx->sock;
665 	struct dns_ctx *dctx = xctx->spkt->dctx;
666 	sent_pkt *	spkt = xctx->spkt;
667 	sockaddr_u *	dst = &spkt->addr;
668 	struct timeval	tv_xmt;
669 	struct pkt	x_pkt;
670 	size_t		pkt_len;
671 	int		sent;
672 
673 	if (0 != gettimeofday(&tv_xmt, NULL)) {
674 		msyslog(LOG_ERR,
675 			"xmt: gettimeofday() failed: %m");
676 		exit(1);
677 	}
678 	tv_xmt.tv_sec += JAN_1970;
679 
680 	pkt_len = generate_pkt(&x_pkt, &tv_xmt, dctx->key_id,
681 			       dctx->key);
682 
683 	sent = sendpkt(sock, dst, &x_pkt, pkt_len);
684 	if (sent) {
685 		/* Save the packet we sent... */
686 		memcpy(&spkt->x_pkt, &x_pkt, min(sizeof(spkt->x_pkt),
687 		       pkt_len));
688 		spkt->stime = tv_xmt.tv_sec - JAN_1970;
689 
690 		TRACE(2, ("xmt: %lx.%6.6u %s %s\n", (u_long)tv_xmt.tv_sec,
691 			  (u_int)tv_xmt.tv_usec, dctx->name, stoa(dst)));
692 	} else {
693 		dec_pending_ntp(dctx->name, dst);
694 	}
695 
696 	return;
697 }
698 
699 
700 /*
701  * timeout_queries() -- give up on unrequited NTP queries
702  */
703 void
timeout_queries(void)704 timeout_queries(void)
705 {
706 	struct timeval	start_cb;
707 	u_int		idx;
708 	sent_pkt *	head;
709 	sent_pkt *	spkt;
710 	sent_pkt *	spkt_next;
711 	long		age;
712 	int didsomething = 0;
713 
714 	TRACE(3, ("timeout_queries: called to check %u items\n",
715 		  (unsigned)COUNTOF(fam_listheads)));
716 
717 	gettimeofday_cached(base, &start_cb);
718 	for (idx = 0; idx < COUNTOF(fam_listheads); idx++) {
719 		head = fam_listheads[idx];
720 		for (spkt = head; spkt != NULL; spkt = spkt_next) {
721 			char xcst;
722 
723 			didsomething = 1;
724 			switch (spkt->dctx->flags & CTX_xCST) {
725 			    case CTX_BCST:
726 				xcst = 'B';
727 				break;
728 
729 			    case CTX_UCST:
730 				xcst = 'U';
731 				break;
732 
733 			    default:
734 				INSIST(!"spkt->dctx->flags neither UCST nor BCST");
735 				break;
736 			}
737 
738 			spkt_next = spkt->link;
739 			if (0 == spkt->stime || spkt->done)
740 				continue;
741 			age = start_cb.tv_sec - spkt->stime;
742 			TRACE(3, ("%s %s %cCST age %ld\n",
743 				  stoa(&spkt->addr),
744 				  spkt->dctx->name, xcst, age));
745 			if (age > response_timeout)
746 				timeout_query(spkt);
747 		}
748 	}
749 	// Do we care about didsomething?
750 	TRACE(3, ("timeout_queries: didsomething is %d, age is %ld\n",
751 		  didsomething, (long) (start_cb.tv_sec - start_tv.tv_sec)));
752 	if (start_cb.tv_sec - start_tv.tv_sec > response_timeout) {
753 		TRACE(3, ("timeout_queries: bail!\n"));
754 		event_base_loopexit(base, NULL);
755 		shutting_down = TRUE;
756 	}
757 }
758 
759 
dec_pending_ntp(const char * name,sockaddr_u * server)760 void dec_pending_ntp(
761 	const char *	name,
762 	sockaddr_u *	server
763 	)
764 {
765 	if (n_pending_ntp > 0) {
766 		--n_pending_ntp;
767 		check_exit_conditions();
768 	} else {
769 		INSIST(0 == n_pending_ntp);
770 		TRACE(1, ("n_pending_ntp was zero before decrement for %s\n",
771 			  hostnameaddr(name, server)));
772 	}
773 }
774 
775 
timeout_query(sent_pkt * spkt)776 void timeout_query(
777 	sent_pkt *	spkt
778 	)
779 {
780 	sockaddr_u *	server;
781 	char		xcst;
782 
783 
784 	switch (spkt->dctx->flags & CTX_xCST) {
785 	    case CTX_BCST:
786 		xcst = 'B';
787 		break;
788 
789 	    case CTX_UCST:
790 		xcst = 'U';
791 		break;
792 
793 	    default:
794 		INSIST(!"spkt->dctx->flags neither UCST nor BCST");
795 		break;
796 	}
797 	spkt->done = TRUE;
798 	server = &spkt->addr;
799 	msyslog(LOG_INFO, "%s no %cCST response after %d seconds",
800 		hostnameaddr(spkt->dctx->name, server), xcst,
801 		response_timeout);
802 	dec_pending_ntp(spkt->dctx->name, server);
803 	return;
804 }
805 
806 
807 /*
808 ** check_kod
809 */
810 int
check_kod(const struct addrinfo * ai)811 check_kod(
812 	const struct addrinfo *	ai
813 	)
814 {
815 	char *hostname;
816 	struct kod_entry *reason;
817 
818 	/* Is there a KoD on file for this address? */
819 	hostname = addrinfo_to_str(ai);
820 	TRACE(2, ("check_kod: checking <%s>\n", hostname));
821 	if (search_entry(hostname, &reason)) {
822 		printf("prior KoD for %s, skipping.\n",
823 			hostname);
824 		free(reason);
825 		free(hostname);
826 
827 		return 1;
828 	}
829 	free(hostname);
830 
831 	return 0;
832 }
833 
834 
835 /*
836 ** Socket readable/timeout Callback:
837 ** Read in the packet
838 ** Unicast:
839 ** - close socket
840 ** - decrement n_pending_ntp
841 ** - If packet is good, set the time and "exit"
842 ** Broadcast:
843 ** - If packet is good, set the time and "exit"
844 */
845 void
sock_cb(evutil_socket_t fd,short what,void * ptr)846 sock_cb(
847 	evutil_socket_t fd,
848 	short what,
849 	void *ptr
850 	)
851 {
852 	sockaddr_u	sender;
853 	sockaddr_u *	psau;
854 	sent_pkt **	p_pktlist;
855 	sent_pkt *	spkt;
856 	int		rpktl;
857 	int		rc;
858 
859 	INSIST(sock4 == fd || sock6 == fd);
860 
861 	TRACE(3, ("sock_cb: event on sock%s:%s%s%s%s\n",
862 		  (fd == sock6)
863 		      ? "6"
864 		      : "4",
865 		  (what & EV_TIMEOUT) ? " timeout" : "",
866 		  (what & EV_READ)    ? " read" : "",
867 		  (what & EV_WRITE)   ? " write" : "",
868 		  (what & EV_SIGNAL)  ? " signal" : ""));
869 
870 	if (!(EV_READ & what)) {
871 		if (EV_TIMEOUT & what)
872 			timeout_queries();
873 
874 		return;
875 	}
876 
877 	/* Read in the packet */
878 	rpktl = recvdata(fd, &sender, &rbuf, sizeof(rbuf));
879 	if (rpktl < 0) {
880 		msyslog(LOG_DEBUG, "recvfrom error %m");
881 		return;
882 	}
883 
884 	if (sock6 == fd)
885 		p_pktlist = &v6_pkts_list;
886 	else
887 		p_pktlist = &v4_pkts_list;
888 
889 	for (spkt = *p_pktlist; spkt != NULL; spkt = spkt->link) {
890 		psau = &spkt->addr;
891 		if (SOCK_EQ(&sender, psau))
892 			break;
893 	}
894 	if (NULL == spkt) {
895 		msyslog(LOG_WARNING,
896 			"Packet from unexpected source %s dropped",
897 			sptoa(&sender));
898 		return;
899 	}
900 
901 	TRACE(1, ("sock_cb: %s %s\n", spkt->dctx->name,
902 		  sptoa(&sender)));
903 
904 	rpktl = process_pkt(&r_pkt, &sender, rpktl, MODE_SERVER,
905 			    &spkt->x_pkt, "sock_cb");
906 
907 	TRACE(2, ("sock_cb: process_pkt returned %d\n", rpktl));
908 
909 	/* If this is a Unicast packet, one down ... */
910 	if (!spkt->done && (CTX_UCST & spkt->dctx->flags)) {
911 		dec_pending_ntp(spkt->dctx->name, &spkt->addr);
912 		spkt->done = TRUE;
913 	}
914 
915 
916 	/* If the packet is good, set the time and we're all done */
917 	rc = handle_pkt(rpktl, &r_pkt, &spkt->addr, spkt->dctx->name);
918 	if (0 != rc)
919 		TRACE(1, ("sock_cb: handle_pkt() returned %d\n", rc));
920 	check_exit_conditions();
921 }
922 
923 
924 /*
925  * check_exit_conditions()
926  *
927  * If sntp has a reply, ask the event loop to stop after this round of
928  * callbacks, unless --wait was used.
929  */
930 void
check_exit_conditions(void)931 check_exit_conditions(void)
932 {
933 	if ((0 == n_pending_ntp && 0 == n_pending_dns) ||
934 	    (time_derived && !HAVE_OPT(WAIT))) {
935 		event_base_loopexit(base, NULL);
936 		shutting_down = TRUE;
937 	} else {
938 		TRACE(2, ("%d NTP and %d name queries pending\n",
939 			  n_pending_ntp, n_pending_dns));
940 	}
941 }
942 
943 
944 /*
945  * sntp_addremove_fd() is invoked by the intres blocking worker code
946  * to read from a pipe, or to stop same.
947  */
sntp_addremove_fd(int fd,int is_pipe,int remove_it)948 void sntp_addremove_fd(
949 	int	fd,
950 	int	is_pipe,
951 	int	remove_it
952 	)
953 {
954 	u_int		idx;
955 	blocking_child *c;
956 	struct event *	ev;
957 
958 #ifdef HAVE_SOCKETPAIR
959 	if (is_pipe) {
960 		/* sntp only asks for EV_FEATURE_FDS without HAVE_SOCKETPAIR */
961 		msyslog(LOG_ERR, "fatal: pipes not supported on systems with socketpair()");
962 		exit(1);
963 	}
964 #endif
965 
966 	c = NULL;
967 	for (idx = 0; idx < blocking_children_alloc; idx++) {
968 		c = blocking_children[idx];
969 		if (NULL == c)
970 			continue;
971 		if (fd == c->resp_read_pipe)
972 			break;
973 	}
974 	if (idx == blocking_children_alloc)
975 		return;
976 
977 	if (remove_it) {
978 		ev = c->resp_read_ctx;
979 		c->resp_read_ctx = NULL;
980 		event_del(ev);
981 		event_free(ev);
982 
983 		return;
984 	}
985 
986 	ev = event_new(base, fd, EV_READ | EV_PERSIST,
987 		       &worker_resp_cb, c);
988 	if (NULL == ev) {
989 		msyslog(LOG_ERR,
990 			"sntp_addremove_fd: event_new(base, fd) failed!");
991 		return;
992 	}
993 	c->resp_read_ctx = ev;
994 	event_add(ev, NULL);
995 }
996 
997 
998 /* called by forked intres child to close open descriptors */
999 #ifdef WORK_FORK
1000 void
kill_asyncio(int startfd)1001 kill_asyncio(
1002 	int	startfd
1003 	)
1004 {
1005 	if (INVALID_SOCKET != sock4) {
1006 		closesocket(sock4);
1007 		sock4 = INVALID_SOCKET;
1008 	}
1009 	if (INVALID_SOCKET != sock6) {
1010 		closesocket(sock6);
1011 		sock6 = INVALID_SOCKET;
1012 	}
1013 	if (INVALID_SOCKET != bsock4) {
1014 		closesocket(sock4);
1015 		sock4 = INVALID_SOCKET;
1016 	}
1017 	if (INVALID_SOCKET != bsock6) {
1018 		closesocket(sock6);
1019 		sock6 = INVALID_SOCKET;
1020 	}
1021 }
1022 #endif
1023 
1024 
1025 /*
1026  * worker_resp_cb() is invoked when resp_read_pipe is readable.
1027  */
1028 void
worker_resp_cb(evutil_socket_t fd,short what,void * ctx)1029 worker_resp_cb(
1030 	evutil_socket_t	fd,
1031 	short		what,
1032 	void *		ctx	/* blocking_child * */
1033 	)
1034 {
1035 	blocking_child *	c;
1036 
1037 	DEBUG_INSIST(EV_READ & what);
1038 	c = ctx;
1039 	DEBUG_INSIST(fd == c->resp_read_pipe);
1040 	process_blocking_resp(c);
1041 }
1042 
1043 
1044 /*
1045  * intres_timeout_req(s) is invoked in the parent to schedule an idle
1046  * timeout to fire in s seconds, if not reset earlier by a call to
1047  * intres_timeout_req(0), which clears any pending timeout.  When the
1048  * timeout expires, worker_idle_timer_fired() is invoked (again, in the
1049  * parent).
1050  *
1051  * sntp and ntpd each provide implementations adapted to their timers.
1052  */
1053 void
intres_timeout_req(u_int seconds)1054 intres_timeout_req(
1055 	u_int	seconds		/* 0 cancels */
1056 	)
1057 {
1058 	struct timeval	tv_to;
1059 
1060 	if (NULL == ev_worker_timeout) {
1061 		ev_worker_timeout = event_new(base, -1,
1062 					      EV_TIMEOUT | EV_PERSIST,
1063 					      &worker_timeout, NULL);
1064 		DEBUG_INSIST(NULL != ev_worker_timeout);
1065 	} else {
1066 		event_del(ev_worker_timeout);
1067 	}
1068 	if (0 == seconds)
1069 		return;
1070 	tv_to.tv_sec = seconds;
1071 	tv_to.tv_usec = 0;
1072 	event_add(ev_worker_timeout, &tv_to);
1073 }
1074 
1075 
1076 void
worker_timeout(evutil_socket_t fd,short what,void * ctx)1077 worker_timeout(
1078 	evutil_socket_t	fd,
1079 	short		what,
1080 	void *		ctx
1081 	)
1082 {
1083 	UNUSED_ARG(fd);
1084 	UNUSED_ARG(ctx);
1085 
1086 	DEBUG_REQUIRE(EV_TIMEOUT & what);
1087 	worker_idle_timer_fired();
1088 }
1089 
1090 
1091 void
sntp_libevent_log_cb(int severity,const char * msg)1092 sntp_libevent_log_cb(
1093 	int		severity,
1094 	const char *	msg
1095 	)
1096 {
1097 	int		level;
1098 
1099 	switch (severity) {
1100 
1101 	default:
1102 	case _EVENT_LOG_DEBUG:
1103 		level = LOG_DEBUG;
1104 		break;
1105 
1106 	case _EVENT_LOG_MSG:
1107 		level = LOG_NOTICE;
1108 		break;
1109 
1110 	case _EVENT_LOG_WARN:
1111 		level = LOG_WARNING;
1112 		break;
1113 
1114 	case _EVENT_LOG_ERR:
1115 		level = LOG_ERR;
1116 		break;
1117 	}
1118 
1119 	msyslog(level, "%s", msg);
1120 }
1121 
1122 
1123 int
generate_pkt(struct pkt * x_pkt,const struct timeval * tv_xmt,int key_id,struct key * pkt_key)1124 generate_pkt (
1125 	struct pkt *x_pkt,
1126 	const struct timeval *tv_xmt,
1127 	int key_id,
1128 	struct key *pkt_key
1129 	)
1130 {
1131 	l_fp	xmt_fp;
1132 	int	pkt_len;
1133 	int	mac_size;
1134 
1135 	pkt_len = LEN_PKT_NOMAC;
1136 	ZERO(*x_pkt);
1137 	TVTOTS(tv_xmt, &xmt_fp);
1138 	HTONL_FP(&xmt_fp, &x_pkt->xmt);
1139 	x_pkt->stratum = STRATUM_TO_PKT(STRATUM_UNSPEC);
1140 	x_pkt->ppoll = 8;
1141 	/* FIXME! Modus broadcast + adr. check -> bdr. pkt */
1142 	set_li_vn_mode(x_pkt, LEAP_NOTINSYNC, ntpver, 3);
1143 	if (debug > 0) {
1144 		printf("generate_pkt: key_id %d, key pointer %p\n", key_id, pkt_key);
1145 	}
1146 	if (pkt_key != NULL) {
1147 		x_pkt->exten[0] = htonl(key_id);
1148 		mac_size = make_mac(x_pkt, pkt_len, MAX_MDG_LEN,
1149 				    pkt_key, (char *)&x_pkt->exten[1]);
1150 		if (mac_size > 0)
1151 			pkt_len += mac_size + KEY_MAC_LEN;
1152 #ifdef DEBUG
1153 		if (debug > 0) {
1154 			printf("generate_pkt: mac_size is %d\n", mac_size);
1155 		}
1156 #endif
1157 
1158 	}
1159 	return pkt_len;
1160 }
1161 
1162 
1163 int
handle_pkt(int rpktl,struct pkt * rpkt,sockaddr_u * host,const char * hostname)1164 handle_pkt(
1165 	int		rpktl,
1166 	struct pkt *	rpkt,
1167 	sockaddr_u *	host,
1168 	const char *	hostname
1169 	)
1170 {
1171 	char		disptxt[32];
1172 	const char *	addrtxt;
1173 	struct timeval	tv_dst;
1174 	int		cnt;
1175 	int		sw_case;
1176 	int		digits;
1177 	int		stratum;
1178 	char *		ref;
1179 	char *		ts_str;
1180 	const char *	leaptxt;
1181 	double		offset;
1182 	double		precision;
1183 	double		synch_distance;
1184 	char *		p_SNTP_PRETEND_TIME;
1185 	time_t		pretend_time;
1186 #if SIZEOF_TIME_T == 8
1187 	long long	ll;
1188 #else
1189 	long		l;
1190 #endif
1191 
1192 	ts_str = NULL;
1193 
1194 	if (rpktl > 0)
1195 		sw_case = 1;
1196 	else
1197 		sw_case = rpktl;
1198 
1199 	switch (sw_case) {
1200 
1201 	case SERVER_UNUSEABLE:
1202 		return -1;
1203 		break;
1204 
1205 	case PACKET_UNUSEABLE:
1206 		break;
1207 
1208 	case SERVER_AUTH_FAIL:
1209 		break;
1210 
1211 	case KOD_DEMOBILIZE:
1212 		/* Received a DENY or RESTR KOD packet */
1213 		addrtxt = stoa(host);
1214 		ref = (char *)&rpkt->refid;
1215 		add_entry(addrtxt, ref);
1216 		msyslog(LOG_WARNING, "KOD code %c%c%c%c from %s %s",
1217 			ref[0], ref[1], ref[2], ref[3], addrtxt, hostname);
1218 		break;
1219 
1220 	case KOD_RATE:
1221 		/*
1222 		** Hmm...
1223 		** We should probably call add_entry() with an
1224 		** expiration timestamp of several seconds in the future,
1225 		** and back-off even more if we get more RATE responses.
1226 		*/
1227 		break;
1228 
1229 	case 1:
1230 		TRACE(3, ("handle_pkt: %d bytes from %s %s\n",
1231 			  rpktl, stoa(host), hostname));
1232 
1233 		gettimeofday_cached(base, &tv_dst);
1234 
1235 		p_SNTP_PRETEND_TIME = getenv("SNTP_PRETEND_TIME");
1236 		if (p_SNTP_PRETEND_TIME) {
1237 			pretend_time = 0;
1238 #if SIZEOF_TIME_T == 4
1239 			if (1 == sscanf(p_SNTP_PRETEND_TIME, "%ld", &l))
1240 				pretend_time = (time_t)l;
1241 #elif SIZEOF_TIME_T == 8
1242 			if (1 == sscanf(p_SNTP_PRETEND_TIME, "%lld", &ll))
1243 				pretend_time = (time_t)ll;
1244 #else
1245 # include "GRONK: unexpected value for SIZEOF_TIME_T"
1246 #endif
1247 			if (0 != pretend_time)
1248 				tv_dst.tv_sec = pretend_time;
1249 		}
1250 
1251 		offset_calculation(rpkt, rpktl, &tv_dst, &offset,
1252 				   &precision, &synch_distance);
1253 		time_derived = TRUE;
1254 
1255 		for (digits = 0; (precision *= 10.) < 1.; ++digits)
1256 			/* empty */ ;
1257 		if (digits > 6)
1258 			digits = 6;
1259 
1260 		ts_str = tv_to_str(&tv_dst);
1261 		stratum = rpkt->stratum;
1262 		if (0 == stratum)
1263 				stratum = 16;
1264 
1265 		if (synch_distance > 0.) {
1266 			cnt = snprintf(disptxt, sizeof(disptxt),
1267 				       " +/- %f", synch_distance);
1268 			if ((size_t)cnt >= sizeof(disptxt))
1269 				snprintf(disptxt, sizeof(disptxt),
1270 					 "ERROR %d >= %d", cnt,
1271 					 (int)sizeof(disptxt));
1272 		} else {
1273 			disptxt[0] = '\0';
1274 		}
1275 
1276 		switch (PKT_LEAP(rpkt->li_vn_mode)) {
1277 		    case LEAP_NOWARNING:
1278 		    	leaptxt = "no-leap";
1279 			break;
1280 		    case LEAP_ADDSECOND:
1281 		    	leaptxt = "add-leap";
1282 			break;
1283 		    case LEAP_DELSECOND:
1284 		    	leaptxt = "del-leap";
1285 			break;
1286 		    case LEAP_NOTINSYNC:
1287 		    	leaptxt = "unsync";
1288 			break;
1289 		    default:
1290 		    	leaptxt = "LEAP-ERROR";
1291 			break;
1292 		}
1293 
1294 		msyslog(LOG_INFO, "%s %+.*f%s %s s%d %s%s", ts_str,
1295 			digits, offset, disptxt,
1296 			hostnameaddr(hostname, host), stratum,
1297 			leaptxt,
1298 			(time_adjusted)
1299 			    ? " [excess]"
1300 			    : "");
1301 		free(ts_str);
1302 
1303 		if (p_SNTP_PRETEND_TIME)
1304 			return 0;
1305 
1306 		if (!time_adjusted &&
1307 		    (ENABLED_OPT(STEP) || ENABLED_OPT(SLEW)))
1308 			return set_time(offset);
1309 
1310 		return EX_OK;
1311 	}
1312 
1313 	return 1;
1314 }
1315 
1316 
1317 void
offset_calculation(struct pkt * rpkt,int rpktl,struct timeval * tv_dst,double * offset,double * precision,double * synch_distance)1318 offset_calculation(
1319 	struct pkt *rpkt,
1320 	int rpktl,
1321 	struct timeval *tv_dst,
1322 	double *offset,
1323 	double *precision,
1324 	double *synch_distance
1325 	)
1326 {
1327 	l_fp p_rec, p_xmt, p_ref, p_org, tmp, dst;
1328 	u_fp p_rdly, p_rdsp;
1329 	double t21, t34, delta;
1330 
1331 	/* Convert timestamps from network to host byte order */
1332 	p_rdly = NTOHS_FP(rpkt->rootdelay);
1333 	p_rdsp = NTOHS_FP(rpkt->rootdisp);
1334 	NTOHL_FP(&rpkt->reftime, &p_ref);
1335 	NTOHL_FP(&rpkt->org, &p_org);
1336 	NTOHL_FP(&rpkt->rec, &p_rec);
1337 	NTOHL_FP(&rpkt->xmt, &p_xmt);
1338 
1339 	*precision = LOGTOD(rpkt->precision);
1340 
1341 	TRACE(3, ("offset_calculation: LOGTOD(rpkt->precision): %f\n", *precision));
1342 
1343 	/* Compute offset etc. */
1344 	tmp = p_rec;
1345 	L_SUB(&tmp, &p_org);
1346 	LFPTOD(&tmp, t21);
1347 	TVTOTS(tv_dst, &dst);
1348 	dst.l_ui += JAN_1970;
1349 	tmp = p_xmt;
1350 	L_SUB(&tmp, &dst);
1351 	LFPTOD(&tmp, t34);
1352 	*offset = (t21 + t34) / 2.;
1353 	delta = t21 - t34;
1354 
1355 	// synch_distance is:
1356 	// (peer->delay + peer->rootdelay) / 2 + peer->disp
1357 	// + peer->rootdisp + clock_phi * (current_time - peer->update)
1358 	// + peer->jitter;
1359 	//
1360 	// and peer->delay = fabs(peer->offset - p_offset) * 2;
1361 	// and peer->offset needs history, so we're left with
1362 	// p_offset = (t21 + t34) / 2.;
1363 	// peer->disp = 0; (we have no history to augment this)
1364 	// clock_phi = 15e-6;
1365 	// peer->jitter = LOGTOD(sys_precision); (we have no history to augment this)
1366 	// and ntp_proto.c:set_sys_tick_precision() should get us sys_precision.
1367 	//
1368 	// so our answer seems to be:
1369 	//
1370 	// (fabs(t21 + t34) + peer->rootdelay) / 3.
1371 	// + 0 (peer->disp)
1372 	// + peer->rootdisp
1373 	// + 15e-6 (clock_phi)
1374 	// + LOGTOD(sys_precision)
1375 
1376 	INSIST( FPTOD(p_rdly) >= 0. );
1377 #if 1
1378 	*synch_distance = (fabs(t21 + t34) + FPTOD(p_rdly)) / 3.
1379 		+ 0.
1380 		+ FPTOD(p_rdsp)
1381 		+ 15e-6
1382 		+ 0.	/* LOGTOD(sys_precision) when we can get it */
1383 		;
1384 	INSIST( *synch_distance >= 0. );
1385 #else
1386 	*synch_distance = (FPTOD(p_rdly) + FPTOD(p_rdsp))/2.0;
1387 #endif
1388 
1389 #ifdef DEBUG
1390 	if (debug > 3) {
1391 		printf("sntp rootdelay: %f\n", FPTOD(p_rdly));
1392 		printf("sntp rootdisp: %f\n", FPTOD(p_rdsp));
1393 		printf("sntp syncdist: %f\n", *synch_distance);
1394 
1395 		pkt_output(rpkt, rpktl, stdout);
1396 
1397 		printf("sntp offset_calculation: rpkt->reftime:\n");
1398 		l_fp_output(&p_ref, stdout);
1399 		printf("sntp offset_calculation: rpkt->org:\n");
1400 		l_fp_output(&p_org, stdout);
1401 		printf("sntp offset_calculation: rpkt->rec:\n");
1402 		l_fp_output(&p_rec, stdout);
1403 		printf("sntp offset_calculation: rpkt->xmt:\n");
1404 		l_fp_output(&p_xmt, stdout);
1405 	}
1406 #endif
1407 
1408 	TRACE(3, ("sntp offset_calculation:\trec - org t21: %.6f\n"
1409 		  "\txmt - dst t34: %.6f\tdelta: %.6f\toffset: %.6f\n",
1410 		  t21, t34, delta, *offset));
1411 
1412 	return;
1413 }
1414 
1415 
1416 
1417 /* Compute the 8 bits for li_vn_mode */
1418 void
set_li_vn_mode(struct pkt * spkt,char leap,char version,char mode)1419 set_li_vn_mode (
1420 	struct pkt *spkt,
1421 	char leap,
1422 	char version,
1423 	char mode
1424 	)
1425 {
1426 	if (leap > 3) {
1427 		msyslog(LOG_DEBUG, "set_li_vn_mode: leap > 3, using max. 3");
1428 		leap = 3;
1429 	}
1430 
1431 	if ((unsigned char)version > 7) {
1432 		msyslog(LOG_DEBUG, "set_li_vn_mode: version < 0 or > 7, using 4");
1433 		version = 4;
1434 	}
1435 
1436 	if (mode > 7) {
1437 		msyslog(LOG_DEBUG, "set_li_vn_mode: mode > 7, using client mode 3");
1438 		mode = 3;
1439 	}
1440 
1441 	spkt->li_vn_mode  = leap << 6;
1442 	spkt->li_vn_mode |= version << 3;
1443 	spkt->li_vn_mode |= mode;
1444 }
1445 
1446 
1447 /*
1448 ** set_time applies 'offset' to the local clock.
1449 */
1450 int
set_time(double offset)1451 set_time(
1452 	double offset
1453 	)
1454 {
1455 	int rc;
1456 
1457 	if (time_adjusted)
1458 		return EX_OK;
1459 
1460 	/*
1461 	** If we can step but we cannot slew, then step.
1462 	** If we can step or slew and and |offset| > steplimit, then step.
1463 	*/
1464 	if (ENABLED_OPT(STEP) &&
1465 	    (   !ENABLED_OPT(SLEW)
1466 	     || (ENABLED_OPT(SLEW) && (fabs(offset) > steplimit))
1467 	    )) {
1468 		rc = step_systime(offset);
1469 
1470 		/* If there was a problem, can we rely on errno? */
1471 		if (1 == rc)
1472 			time_adjusted = TRUE;
1473 		return (time_adjusted)
1474 			   ? EX_OK
1475 			   : 1;
1476 		/*
1477 		** In case of error, what should we use?
1478 		** EX_UNAVAILABLE?
1479 		** EX_OSERR?
1480 		** EX_NOPERM?
1481 		*/
1482 	}
1483 
1484 	if (ENABLED_OPT(SLEW)) {
1485 		rc = adj_systime(offset);
1486 
1487 		/* If there was a problem, can we rely on errno? */
1488 		if (1 == rc)
1489 			time_adjusted = TRUE;
1490 		return (time_adjusted)
1491 			   ? EX_OK
1492 			   : 1;
1493 		/*
1494 		** In case of error, what should we use?
1495 		** EX_UNAVAILABLE?
1496 		** EX_OSERR?
1497 		** EX_NOPERM?
1498 		*/
1499 	}
1500 
1501 	return EX_SOFTWARE;
1502 }
1503 
1504 
1505 int
libevent_version_ok(void)1506 libevent_version_ok(void)
1507 {
1508 	ev_uint32_t v_compile_maj;
1509 	ev_uint32_t v_run_maj;
1510 
1511 	v_compile_maj = LIBEVENT_VERSION_NUMBER & 0xffff0000;
1512 	v_run_maj = event_get_version_number() & 0xffff0000;
1513 	if (v_compile_maj != v_run_maj) {
1514 		fprintf(stderr,
1515 			"Incompatible libevent versions: have %s, built with %s\n",
1516 			event_get_version(),
1517 			LIBEVENT_VERSION);
1518 		return 0;
1519 	}
1520 	return 1;
1521 }
1522 
1523 /*
1524  * gettimeofday_cached()
1525  *
1526  * Clones the event_base_gettimeofday_cached() interface but ensures the
1527  * times are always on the gettimeofday() 1970 scale.  Older libevent 2
1528  * sometimes used gettimeofday(), sometimes the since-system-start
1529  * clock_gettime(CLOCK_MONOTONIC), depending on the platform.
1530  *
1531  * It is not cleanly possible to tell which timescale older libevent is
1532  * using.
1533  *
1534  * The strategy involves 1 hour thresholds chosen to be far longer than
1535  * the duration of a round of libevent callbacks, which share a cached
1536  * start-of-round time.  First compare the last cached time with the
1537  * current gettimeofday() time.  If they are within one hour, libevent
1538  * is using the proper timescale so leave the offset 0.  Otherwise,
1539  * compare libevent's cached time and the current time on the monotonic
1540  * scale.  If they are within an hour, libevent is using the monotonic
1541  * scale so calculate the offset to add to such times to bring them to
1542  * gettimeofday()'s scale.
1543  */
1544 int
gettimeofday_cached(struct event_base * b,struct timeval * caller_tv)1545 gettimeofday_cached(
1546 	struct event_base *	b,
1547 	struct timeval *	caller_tv
1548 	)
1549 {
1550 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1551 	static struct event_base *	cached_b;
1552 	static struct timeval		cached;
1553 	static struct timeval		adj_cached;
1554 	static struct timeval		offset;
1555 	static int			offset_ready;
1556 	struct timeval			latest;
1557 	struct timeval			systemt;
1558 	struct timespec			ts;
1559 	struct timeval			mono;
1560 	struct timeval			diff;
1561 	int				cgt_rc;
1562 	int				gtod_rc;
1563 
1564 	event_base_gettimeofday_cached(b, &latest);
1565 	if (b == cached_b &&
1566 	    !memcmp(&latest, &cached, sizeof(latest))) {
1567 		*caller_tv = adj_cached;
1568 		return 0;
1569 	}
1570 	cached = latest;
1571 	cached_b = b;
1572 	if (!offset_ready) {
1573 		cgt_rc = clock_gettime(CLOCK_MONOTONIC, &ts);
1574 		gtod_rc = gettimeofday(&systemt, NULL);
1575 		if (0 != gtod_rc) {
1576 			msyslog(LOG_ERR,
1577 				"%s: gettimeofday() error %m",
1578 				progname);
1579 			exit(1);
1580 		}
1581 		diff = sub_tval(systemt, latest);
1582 		if (debug > 1)
1583 			printf("system minus cached %+ld.%06ld\n",
1584 			       (long)diff.tv_sec, (long)diff.tv_usec);
1585 		if (0 != cgt_rc || labs((long)diff.tv_sec) < 3600) {
1586 			/*
1587 			 * Either use_monotonic == 0, or this libevent
1588 			 * has been repaired.  Leave offset at zero.
1589 			 */
1590 		} else {
1591 			mono.tv_sec = ts.tv_sec;
1592 			mono.tv_usec = ts.tv_nsec / 1000;
1593 			diff = sub_tval(latest, mono);
1594 			if (debug > 1)
1595 				printf("cached minus monotonic %+ld.%06ld\n",
1596 				       (long)diff.tv_sec, (long)diff.tv_usec);
1597 			if (labs((long)diff.tv_sec) < 3600) {
1598 				/* older libevent2 using monotonic */
1599 				offset = sub_tval(systemt, mono);
1600 				TRACE(1, ("%s: Offsetting libevent CLOCK_MONOTONIC times  by %+ld.%06ld\n",
1601 					 "gettimeofday_cached",
1602 					 (long)offset.tv_sec,
1603 					 (long)offset.tv_usec));
1604 			}
1605 		}
1606 		offset_ready = TRUE;
1607 	}
1608 	adj_cached = add_tval(cached, offset);
1609 	*caller_tv = adj_cached;
1610 
1611 	return 0;
1612 #else
1613 	return event_base_gettimeofday_cached(b, caller_tv);
1614 #endif
1615 }
1616 
1617 /* Dummy function to satisfy libntp/work_fork.c */
1618 extern int set_user_group_ids(void);
set_user_group_ids(void)1619 int set_user_group_ids(void)
1620 {
1621     return 1;
1622 }
1623