xref: /openbsd/sbin/unwind/resolver.c (revision 0e59d0d1)
1 /*	$OpenBSD: resolver.c,v 1.173 2024/11/21 13:35:20 claudio Exp $	*/
2 
3 
4 /*
5  * Copyright (c) 2018 Florian Obser <florian@openbsd.org>
6  * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org>
7  * Copyright (c) 2004 Esben Norby <norby@openbsd.org>
8  * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
9  *
10  * Permission to use, copy, modify, and distribute this software for any
11  * purpose with or without fee is hereby granted, provided that the above
12  * copyright notice and this permission notice appear in all copies.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
15  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
16  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
17  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
18  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21  */
22 
23 #include <sys/types.h>
24 #include <sys/queue.h>
25 #include <sys/socket.h>
26 #include <sys/syslog.h>
27 #include <sys/time.h>
28 
29 #include <net/route.h>
30 
31 #include <errno.h>
32 #include <event.h>
33 #include <imsg.h>
34 #include <limits.h>
35 #include <netdb.h>
36 #include <asr.h>
37 #include <pwd.h>
38 #include <signal.h>
39 #include <stdio.h>
40 #include <stdlib.h>
41 #include <string.h>
42 #include <time.h>
43 #include <tls.h>
44 #include <unistd.h>
45 
46 #include "libunbound/config.h"
47 #include "libunbound/libunbound/context.h"
48 #include "libunbound/libunbound/libworker.h"
49 #include "libunbound/libunbound/unbound.h"
50 #include "libunbound/libunbound/unbound-event.h"
51 #include "libunbound/services/cache/rrset.h"
52 #include "libunbound/sldns/sbuffer.h"
53 #include "libunbound/sldns/rrdef.h"
54 #include "libunbound/sldns/pkthdr.h"
55 #include "libunbound/sldns/wire2str.h"
56 #include "libunbound/util/config_file.h"
57 #include "libunbound/util/module.h"
58 #include "libunbound/util/regional.h"
59 #include "libunbound/util/storage/slabhash.h"
60 #include "libunbound/validator/validator.h"
61 #include "libunbound/validator/val_kcache.h"
62 #include "libunbound/validator/val_neg.h"
63 
64 #include <openssl/crypto.h>
65 
66 #include "log.h"
67 #include "frontend.h"
68 #include "unwind.h"
69 #include "resolver.h"
70 
71 #define	TLS_DEFAULT_CA_CERT_FILE	"/etc/ssl/cert.pem"
72 #define	UB_LOG_VERBOSE			4
73 #define	UB_LOG_BRIEF			0
74 
75 /* maximum size of a libunbound forwarder definition: IP@PORT#AUTHNAME */
76 #define	FWD_MAX				(INET6_ADDRSTRLEN + NI_MAXHOST + 2 + 5)
77 
78 /*
79  * The prefered resolver type can be this many ms slower than the next
80  * best and still be picked
81  */
82 #define	PREF_RESOLVER_MEDIAN_SKEW	200		/* 200 ms */
83 #define	NEXT_RES_MAX			2000		/* 2000 ms */
84 
85 #define	DOUBT_NXDOMAIN_SEC		(5 * 60)	/* 5 minutes */
86 
87 #define	RESOLVER_CHECK_SEC		1
88 #define	RESOLVER_CHECK_MAXSEC		1024 /* ~17 minutes */
89 #define	DECAY_PERIOD			60
90 #define	DECAY_NOMINATOR			9
91 #define	DECAY_DENOMINATOR		10
92 
93 #define	TRUST_ANCHOR_RETRY_INTERVAL	8640
94 #define	TRUST_ANCHOR_QUERY_INTERVAL	43200
95 
96 /* in libworker_event_done_cb() enum sec_status gets mapped to 0, 1 and 2 */
97 #define	INSECURE	0
98 #define	BOGUS		1
99 #define	SECURE		2
100 
101 #define	WKA1_FOUND	1
102 #define	WKA2_FOUND	2
103 
104 struct uw_resolver {
105 	struct event		 check_ev;
106 	struct event		 free_ev;
107 	struct ub_ctx		*ctx;
108 	void			*asr_ctx;
109 	struct timeval		 check_tv;
110 	int			 ref_cnt;
111 	int			 stop;
112 	enum uw_resolver_state	 state;
113 	enum uw_resolver_type	 type;
114 	int			 check_running;
115 	int64_t			 median;
116 	int64_t			 histogram[nitems(histogram_limits)];
117 	int64_t			 latest_histogram[nitems(histogram_limits)];
118 };
119 
120 struct running_query {
121 	TAILQ_ENTRY(running_query)	 entry;
122 	struct query_imsg		*query_imsg;
123 	struct event			 timer_ev;
124 	struct timespec			 tp;
125 	struct resolver_preference	 res_pref;
126 	int				 next_resolver;
127 	int				 running;
128 };
129 
130 TAILQ_HEAD(, running_query)	 running_queries;
131 
132 typedef void (*resolve_cb_t)(struct uw_resolver *, void *, int, void *, int,
133     int, char *);
134 
135 struct resolver_cb_data {
136 	resolve_cb_t		 cb;
137 	void			*data;
138 	struct uw_resolver	*res;
139 };
140 
141 __dead void		 resolver_shutdown(void);
142 void			 resolver_sig_handler(int sig, short, void *);
143 void			 resolver_dispatch_frontend(int, short, void *);
144 void			 resolver_dispatch_main(int, short, void *);
145 int			 sort_resolver_types(struct resolver_preference *);
146 void			 setup_query(struct query_imsg *);
147 struct running_query	*find_running_query(uint64_t);
148 void			 try_resolver_timo(int, short, void *);
149 int			 try_next_resolver(struct running_query *);
150 
151 int			 resolve(struct uw_resolver *, const char*, int, int,
152 			     void*, resolve_cb_t);
153 void			 resolve_done(struct uw_resolver *, void *, int, void *,
154 			     int, int, char *);
155 void			 ub_resolve_done(void *, int, void *, int, int, char *,
156 			     int);
157 void			 asr_resolve_done(struct asr_result *, void *);
158 void			 new_resolver(enum uw_resolver_type,
159 			     enum uw_resolver_state);
160 struct uw_resolver	*create_resolver(enum uw_resolver_type);
161 #ifdef UNIFIED_CACHE
162 void			 setup_unified_caches(void);
163 void			 set_unified_cache(struct uw_resolver *);
164 #endif /* UNIFIED_CACHE */
165 void			 free_resolver(struct uw_resolver *);
166 void			 set_forwarders(struct uw_resolver *,
167 			     struct uw_forwarder_head *, int);
168 void			 resolver_check_timo(int, short, void *);
169 void			 resolver_free_timo(int, short, void *);
170 void			 check_resolver(struct uw_resolver *);
171 void			 check_resolver_done(struct uw_resolver *, void *, int,
172 			     void *, int, int, char *);
173 void			 schedule_recheck_all_resolvers(void);
174 int			 check_forwarders_changed(struct uw_forwarder_head *,
175 			     struct uw_forwarder_head *);
176 void			 replace_forwarders(struct uw_forwarder_head *,
177 			     struct uw_forwarder_head *);
178 void			 resolver_ref(struct uw_resolver *);
179 void			 resolver_unref(struct uw_resolver *);
180 int			 resolver_cmp(const void *, const void *);
181 void			 restart_ub_resolvers(int);
182 void			 show_status(pid_t);
183 void			 show_autoconf(pid_t);
184 void			 show_mem(pid_t);
185 void			 send_resolver_info(struct uw_resolver *, pid_t);
186 void			 trust_anchor_resolve(void);
187 void			 trust_anchor_timo(int, short, void *);
188 void			 trust_anchor_resolve_done(struct uw_resolver *, void *,
189 			     int, void *, int, int, char *);
190 void			 replace_autoconf_forwarders(struct
191 			     imsg_rdns_proposal *);
192 int			 force_tree_cmp(struct force_tree_entry *,
193 			     struct force_tree_entry *);
194 int			 find_force(struct force_tree *, char *,
195 			     struct uw_resolver **);
196 int64_t			 histogram_median(int64_t *);
197 void			 decay_latest_histograms(int, short, void *);
198 int			 running_query_cnt(void);
199 int			*resolvers_to_restart(struct uw_conf *,
200 			     struct uw_conf *);
201 const char		*query_imsg2str(struct query_imsg *);
202 char			*gen_resolv_conf(void);
203 void			 check_dns64(void);
204 void			 check_dns64_done(struct asr_result *, void *);
205 int			 dns64_prefixlen(const struct in6_addr *,
206 			     const uint8_t *);
207 void			 add_dns64_prefix(const struct in6_addr *, int,
208 			     struct dns64_prefix *, int, int);
209 
210 struct uw_conf			*resolver_conf;
211 static struct imsgev		*iev_frontend;
212 static struct imsgev		*iev_main;
213 struct uw_forwarder_head	 autoconf_forwarder_list;
214 struct uw_resolver		*resolvers[UW_RES_NONE];
215 struct timespec			 last_network_change;
216 
217 struct event			 trust_anchor_timer;
218 struct event			 decay_timer;
219 
220 static struct trust_anchor_head	 trust_anchors, new_trust_anchors;
221 
222 struct event_base		*ev_base;
223 
224 RB_GENERATE(force_tree, force_tree_entry, entry, force_tree_cmp)
225 
226 int				 val_id = -1;
227 #ifdef UNIFIED_CACHE
228 struct slabhash			*unified_msg_cache;
229 struct rrset_cache		*unified_rrset_cache;
230 struct key_cache		*unified_key_cache;
231 struct val_neg_cache		*unified_neg_cache;
232 #endif /* UNIFIED_CACHE */
233 
234 int				 dns64_present;
235 int				 available_afs = HAVE_IPV4 | HAVE_IPV6;
236 
237 static const char * const	 forward_transparent_zones[] = {
238 	/* RFC1918 */
239 	"10.in-addr.arpa. transparent",
240 	"16.172.in-addr.arpa. transparent",
241 	"17.172.in-addr.arpa. transparent",
242 	"18.172.in-addr.arpa. transparent",
243 	"19.172.in-addr.arpa. transparent",
244 	"20.172.in-addr.arpa. transparent",
245 	"21.172.in-addr.arpa. transparent",
246 	"22.172.in-addr.arpa. transparent",
247 	"23.172.in-addr.arpa. transparent",
248 	"24.172.in-addr.arpa. transparent",
249 	"25.172.in-addr.arpa. transparent",
250 	"26.172.in-addr.arpa. transparent",
251 	"27.172.in-addr.arpa. transparent",
252 	"28.172.in-addr.arpa. transparent",
253 	"29.172.in-addr.arpa. transparent",
254 	"30.172.in-addr.arpa. transparent",
255 	"31.172.in-addr.arpa. transparent",
256 	"168.192.in-addr.arpa. transparent",
257 
258 	/* RFC3330 */
259 	"0.in-addr.arpa. transparent",
260 	"254.169.in-addr.arpa. transparent",
261 	"2.0.192.in-addr.arpa. transparent",
262 	"100.51.198.in-addr.arpa. transparent",
263 	"113.0.203.in-addr.arpa. transparent",
264 	"255.255.255.255.in-addr.arpa. transparent",
265 
266 	/* RFC6598 */
267 	"64.100.in-addr.arpa. transparent",
268 	"65.100.in-addr.arpa. transparent",
269 	"66.100.in-addr.arpa. transparent",
270 	"67.100.in-addr.arpa. transparent",
271 	"68.100.in-addr.arpa. transparent",
272 	"69.100.in-addr.arpa. transparent",
273 	"70.100.in-addr.arpa. transparent",
274 	"71.100.in-addr.arpa. transparent",
275 	"72.100.in-addr.arpa. transparent",
276 	"73.100.in-addr.arpa. transparent",
277 	"74.100.in-addr.arpa. transparent",
278 	"75.100.in-addr.arpa. transparent",
279 	"76.100.in-addr.arpa. transparent",
280 	"77.100.in-addr.arpa. transparent",
281 	"78.100.in-addr.arpa. transparent",
282 	"79.100.in-addr.arpa. transparent",
283 	"80.100.in-addr.arpa. transparent",
284 	"81.100.in-addr.arpa. transparent",
285 	"82.100.in-addr.arpa. transparent",
286 	"83.100.in-addr.arpa. transparent",
287 	"84.100.in-addr.arpa. transparent",
288 	"85.100.in-addr.arpa. transparent",
289 	"86.100.in-addr.arpa. transparent",
290 	"87.100.in-addr.arpa. transparent",
291 	"88.100.in-addr.arpa. transparent",
292 	"89.100.in-addr.arpa. transparent",
293 	"90.100.in-addr.arpa. transparent",
294 	"91.100.in-addr.arpa. transparent",
295 	"92.100.in-addr.arpa. transparent",
296 	"93.100.in-addr.arpa. transparent",
297 	"94.100.in-addr.arpa. transparent",
298 	"95.100.in-addr.arpa. transparent",
299 	"96.100.in-addr.arpa. transparent",
300 	"97.100.in-addr.arpa. transparent",
301 	"98.100.in-addr.arpa. transparent",
302 	"99.100.in-addr.arpa. transparent",
303 	"100.100.in-addr.arpa. transparent",
304 	"101.100.in-addr.arpa. transparent",
305 	"102.100.in-addr.arpa. transparent",
306 	"103.100.in-addr.arpa. transparent",
307 	"104.100.in-addr.arpa. transparent",
308 	"105.100.in-addr.arpa. transparent",
309 	"106.100.in-addr.arpa. transparent",
310 	"107.100.in-addr.arpa. transparent",
311 	"108.100.in-addr.arpa. transparent",
312 	"109.100.in-addr.arpa. transparent",
313 	"110.100.in-addr.arpa. transparent",
314 	"111.100.in-addr.arpa. transparent",
315 	"112.100.in-addr.arpa. transparent",
316 	"113.100.in-addr.arpa. transparent",
317 	"114.100.in-addr.arpa. transparent",
318 	"115.100.in-addr.arpa. transparent",
319 	"116.100.in-addr.arpa. transparent",
320 	"117.100.in-addr.arpa. transparent",
321 	"118.100.in-addr.arpa. transparent",
322 	"119.100.in-addr.arpa. transparent",
323 	"120.100.in-addr.arpa. transparent",
324 	"121.100.in-addr.arpa. transparent",
325 	"122.100.in-addr.arpa. transparent",
326 	"123.100.in-addr.arpa. transparent",
327 	"124.100.in-addr.arpa. transparent",
328 	"125.100.in-addr.arpa. transparent",
329 	"126.100.in-addr.arpa. transparent",
330 	"127.100.in-addr.arpa. transparent",
331 
332 	/* RFC4291 */
333 	"0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0."
334 	"ip6.arpa. transparent",
335 
336 	/* RFC4193 */
337 	"D.F.ip6.arpa. transparent",
338 
339 	/* RFC4291 */
340 	"8.E.F.ip6.arpa. transparent",
341 	"9.E.F.ip6.arpa. transparent",
342 	"A.E.F.ip6.arpa. transparent",
343 	"B.E.F.ip6.arpa. transparent",
344 
345 	/* RFC3849 */
346 	"8.B.D.0.1.0.0.2.ip6.arpa. transparent",
347 
348 	/* RFC8375 */
349 	"home.arpa. transparent",
350 };
351 
352 const char	 bogus_past[]	= "validation failure <. NS IN>: signature "
353 				  "expired";
354 const char	 bogus_future[]	= "validation failure <. NS IN>: signature "
355 				  "before inception date";
356 
357 void
resolver_sig_handler(int sig,short event,void * arg)358 resolver_sig_handler(int sig, short event, void *arg)
359 {
360 	/*
361 	 * Normal signal handler rules don't apply because libevent
362 	 * decouples for us.
363 	 */
364 
365 	switch (sig) {
366 	case SIGINT:
367 	case SIGTERM:
368 		resolver_shutdown();
369 	default:
370 		fatalx("unexpected signal");
371 	}
372 }
373 
374 void
resolver(int debug,int verbose)375 resolver(int debug, int verbose)
376 {
377 	struct event		 ev_sigint, ev_sigterm;
378 	struct passwd		*pw;
379 	struct timeval		 tv = {DECAY_PERIOD, 0};
380 	struct alloc_cache	 cache_alloc_test;
381 
382 	resolver_conf = config_new_empty();
383 
384 	log_init(debug, LOG_DAEMON);
385 	log_setverbose(verbose);
386 
387 	if ((pw = getpwnam(UNWIND_USER)) == NULL)
388 		fatal("getpwnam");
389 
390 	setproctitle("%s", "resolver");
391 	log_procinit("resolver");
392 
393 	if (setgroups(1, &pw->pw_gid) ||
394 	    setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
395 	    setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
396 		fatal("can't drop privileges");
397 
398 	if (unveil(TLS_DEFAULT_CA_CERT_FILE, "r") == -1)
399 		fatal("unveil %s", TLS_DEFAULT_CA_CERT_FILE);
400 
401 	if (pledge("stdio inet dns rpath recvfd", NULL) == -1)
402 		fatal("pledge");
403 
404 	ev_base = event_init();
405 
406 	/* Setup signal handler(s). */
407 	signal_set(&ev_sigint, SIGINT, resolver_sig_handler, NULL);
408 	signal_set(&ev_sigterm, SIGTERM, resolver_sig_handler, NULL);
409 	signal_add(&ev_sigint, NULL);
410 	signal_add(&ev_sigterm, NULL);
411 	signal(SIGPIPE, SIG_IGN);
412 	signal(SIGHUP, SIG_IGN);
413 
414 	/* Setup pipe and event handler to the main process. */
415 	if ((iev_main = malloc(sizeof(struct imsgev))) == NULL)
416 		fatal(NULL);
417 
418 	if (imsgbuf_init(&iev_main->ibuf, 3) == -1)
419 		fatal(NULL);
420 	imsgbuf_allow_fdpass(&iev_main->ibuf);
421 	iev_main->handler = resolver_dispatch_main;
422 
423 	/* Setup event handlers. */
424 	iev_main->events = EV_READ;
425 	event_set(&iev_main->ev, iev_main->ibuf.fd, iev_main->events,
426 	    iev_main->handler, iev_main);
427 	event_add(&iev_main->ev, NULL);
428 
429 	evtimer_set(&trust_anchor_timer, trust_anchor_timo, NULL);
430 	evtimer_set(&decay_timer, decay_latest_histograms, NULL);
431 	evtimer_add(&decay_timer, &tv);
432 
433 	clock_gettime(CLOCK_MONOTONIC, &last_network_change);
434 
435 	alloc_init(&cache_alloc_test, NULL, 0);
436 	if (cache_alloc_test.max_reg_blocks != 10)
437 		fatalx("local libunbound/util/alloc.c diff lost");
438 	alloc_clear(&cache_alloc_test);
439 
440 #ifdef UNIFIED_CACHE
441 	setup_unified_caches();
442 #endif /* UNIFIED_CACHE */
443 
444 	TAILQ_INIT(&autoconf_forwarder_list);
445 	TAILQ_INIT(&trust_anchors);
446 	TAILQ_INIT(&new_trust_anchors);
447 	TAILQ_INIT(&running_queries);
448 
449 	event_dispatch();
450 
451 	resolver_shutdown();
452 }
453 
454 __dead void
resolver_shutdown(void)455 resolver_shutdown(void)
456 {
457 	/* Close pipes. */
458 	imsgbuf_clear(&iev_frontend->ibuf);
459 	close(iev_frontend->ibuf.fd);
460 	imsgbuf_clear(&iev_main->ibuf);
461 	close(iev_main->ibuf.fd);
462 
463 	config_clear(resolver_conf);
464 
465 	free(iev_frontend);
466 	free(iev_main);
467 
468 	log_info("resolver exiting");
469 	exit(0);
470 }
471 
472 int
resolver_imsg_compose_main(int type,pid_t pid,void * data,uint16_t datalen)473 resolver_imsg_compose_main(int type, pid_t pid, void *data, uint16_t datalen)
474 {
475 	return (imsg_compose_event(iev_main, type, 0, pid, -1, data, datalen));
476 }
477 
478 int
resolver_imsg_compose_frontend(int type,pid_t pid,void * data,uint16_t datalen)479 resolver_imsg_compose_frontend(int type, pid_t pid, void *data,
480     uint16_t datalen)
481 {
482 	return (imsg_compose_event(iev_frontend, type, 0, pid, -1,
483 	    data, datalen));
484 }
485 
486 void
resolver_dispatch_frontend(int fd,short event,void * bula)487 resolver_dispatch_frontend(int fd, short event, void *bula)
488 {
489 	struct imsgev		*iev = bula;
490 	struct imsgbuf		*ibuf;
491 	struct imsg		 imsg;
492 	struct query_imsg	*query_imsg;
493 	ssize_t			 n;
494 	int			 shut = 0, verbose, i, new_available_afs;
495 	char			*ta;
496 
497 	ibuf = &iev->ibuf;
498 
499 	if (event & EV_READ) {
500 		if ((n = imsgbuf_read(ibuf)) == -1)
501 			fatal("imsgbuf_read error");
502 		if (n == 0)	/* Connection closed. */
503 			shut = 1;
504 	}
505 	if (event & EV_WRITE) {
506 		if (imsgbuf_write(ibuf) == -1) {
507 			if (errno == EPIPE)	/* Connection closed. */
508 				shut = 1;
509 			else
510 				fatal("imsgbuf_write");
511 		}
512 	}
513 
514 	for (;;) {
515 		if ((n = imsg_get(ibuf, &imsg)) == -1)
516 			fatal("%s: imsg_get error", __func__);
517 		if (n == 0)	/* No more messages. */
518 			break;
519 
520 		switch (imsg.hdr.type) {
521 		case IMSG_CTL_LOG_VERBOSE:
522 			if (IMSG_DATA_SIZE(imsg) != sizeof(verbose))
523 				fatalx("%s: IMSG_CTL_LOG_VERBOSE wrong length: "
524 				    "%lu", __func__,
525 				    IMSG_DATA_SIZE(imsg));
526 			memcpy(&verbose, imsg.data, sizeof(verbose));
527 			if (log_getdebug() && (log_getverbose() & OPT_VERBOSE3)
528 			    != (verbose & OPT_VERBOSE3))
529 				restart_ub_resolvers(0);
530 			log_setverbose(verbose);
531 			break;
532 		case IMSG_QUERY:
533 			if (IMSG_DATA_SIZE(imsg) != sizeof(*query_imsg))
534 				fatalx("%s: IMSG_QUERY wrong length: %lu",
535 				    __func__, IMSG_DATA_SIZE(imsg));
536 			if ((query_imsg = malloc(sizeof(*query_imsg))) ==
537 			    NULL) {
538 				log_warn("cannot allocate query");
539 				break;
540 			}
541 			memcpy(query_imsg, imsg.data, sizeof(*query_imsg));
542 			setup_query(query_imsg);
543 			break;
544 		case IMSG_CTL_STATUS:
545 			if (IMSG_DATA_SIZE(imsg) != 0)
546 				fatalx("%s: IMSG_CTL_STATUS wrong length: %lu",
547 				    __func__, IMSG_DATA_SIZE(imsg));
548 			show_status(imsg.hdr.pid);
549 			break;
550 		case IMSG_CTL_AUTOCONF:
551 			if (IMSG_DATA_SIZE(imsg) != 0)
552 				fatalx("%s: IMSG_CTL_AUTOCONF wrong length: "
553 				    "%lu", __func__, IMSG_DATA_SIZE(imsg));
554 			show_autoconf(imsg.hdr.pid);
555 			break;
556 		case IMSG_CTL_MEM:
557 			if (IMSG_DATA_SIZE(imsg) != 0)
558 				fatalx("%s: IMSG_CTL_AUTOCONF wrong length: "
559 				    "%lu", __func__, IMSG_DATA_SIZE(imsg));
560 			show_mem(imsg.hdr.pid);
561 			break;
562 		case IMSG_NEW_TA:
563 			if (((char *)imsg.data)[IMSG_DATA_SIZE(imsg) - 1] !=
564 			    '\0')
565 				fatalx("Invalid trust anchor");
566 			ta = imsg.data;
567 			add_new_ta(&new_trust_anchors, ta);
568 			break;
569 		case IMSG_NEW_TAS_ABORT:
570 			free_tas(&new_trust_anchors);
571 			break;
572 		case IMSG_NEW_TAS_DONE:
573 			if (merge_tas(&new_trust_anchors, &trust_anchors))
574 				restart_ub_resolvers(1);
575 			break;
576 		case IMSG_NETWORK_CHANGED:
577 			clock_gettime(CLOCK_MONOTONIC, &last_network_change);
578 			schedule_recheck_all_resolvers();
579 			for (i = 0; i < UW_RES_NONE; i++) {
580 				if (resolvers[i] == NULL)
581 					continue;
582 				memset(resolvers[i]->latest_histogram, 0,
583 				    sizeof(resolvers[i]->latest_histogram));
584 				resolvers[i]->median = histogram_median(
585 				    resolvers[i]->latest_histogram);
586 			}
587 
588 			break;
589 		case IMSG_REPLACE_DNS:
590 			if (IMSG_DATA_SIZE(imsg) !=
591 			    sizeof(struct imsg_rdns_proposal))
592 				fatalx("%s: IMSG_ADD_DNS wrong length: %lu",
593 				    __func__, IMSG_DATA_SIZE(imsg));
594 			replace_autoconf_forwarders((struct
595 			    imsg_rdns_proposal *)imsg.data);
596 			break;
597 		case IMSG_CHANGE_AFS:
598 			if (IMSG_DATA_SIZE(imsg) !=
599 			    sizeof(new_available_afs))
600 				fatalx("%s: IMSG_CHANGE_AFS wrong length: %lu",
601 				    __func__, IMSG_DATA_SIZE(imsg));
602 			memcpy(&new_available_afs, imsg.data,
603 			    sizeof(new_available_afs));
604 			if (new_available_afs != available_afs) {
605 				available_afs = new_available_afs;
606 				restart_ub_resolvers(1);
607 			}
608 			break;
609 		default:
610 			log_debug("%s: unexpected imsg %d", __func__,
611 			    imsg.hdr.type);
612 			break;
613 		}
614 		imsg_free(&imsg);
615 	}
616 	if (!shut)
617 		imsg_event_add(iev);
618 	else {
619 		/* This pipe is dead. Remove its event handler. */
620 		event_del(&iev->ev);
621 		event_loopexit(NULL);
622 	}
623 }
624 
625 void
resolver_dispatch_main(int fd,short event,void * bula)626 resolver_dispatch_main(int fd, short event, void *bula)
627 {
628 	static struct uw_conf	*nconf;
629 	struct imsg		 imsg;
630 	struct imsgev		*iev = bula;
631 	struct imsgbuf		*ibuf;
632 	ssize_t			 n;
633 	int			 shut = 0, i, *restart;
634 
635 	ibuf = &iev->ibuf;
636 
637 	if (event & EV_READ) {
638 		if ((n = imsgbuf_read(ibuf)) == -1)
639 			fatal("imsgbuf_read error");
640 		if (n == 0)	/* Connection closed. */
641 			shut = 1;
642 	}
643 	if (event & EV_WRITE) {
644 		if (imsgbuf_write(ibuf) == -1) {
645 			if (errno == EPIPE)	/* Connection closed. */
646 				shut = 1;
647 			else
648 				fatal("imsgbuf_write");
649 		}
650 	}
651 
652 	for (;;) {
653 		if ((n = imsg_get(ibuf, &imsg)) == -1)
654 			fatal("%s: imsg_get error", __func__);
655 		if (n == 0)	/* No more messages. */
656 			break;
657 
658 		switch (imsg.hdr.type) {
659 		case IMSG_SOCKET_IPC_FRONTEND:
660 			/*
661 			 * Setup pipe and event handler to the frontend
662 			 * process.
663 			 */
664 			if (iev_frontend)
665 				fatalx("%s: received unexpected imsg fd "
666 				    "to resolver", __func__);
667 
668 			if ((fd = imsg_get_fd(&imsg)) == -1)
669 				fatalx("%s: expected to receive imsg fd to "
670 				   "resolver but didn't receive any", __func__);
671 
672 			iev_frontend = malloc(sizeof(struct imsgev));
673 			if (iev_frontend == NULL)
674 				fatal(NULL);
675 
676 			if (imsgbuf_init(&iev_frontend->ibuf, fd) == -1)
677 				fatal(NULL);
678 			iev_frontend->handler = resolver_dispatch_frontend;
679 			iev_frontend->events = EV_READ;
680 
681 			event_set(&iev_frontend->ev, iev_frontend->ibuf.fd,
682 			iev_frontend->events, iev_frontend->handler,
683 			    iev_frontend);
684 			event_add(&iev_frontend->ev, NULL);
685 			break;
686 
687 		case IMSG_STARTUP:
688 			if (pledge("stdio inet dns rpath", NULL) == -1)
689 				fatal("pledge");
690 			break;
691 		case IMSG_RECONF_CONF:
692 		case IMSG_RECONF_BLOCKLIST_FILE:
693 		case IMSG_RECONF_FORWARDER:
694 		case IMSG_RECONF_DOT_FORWARDER:
695 		case IMSG_RECONF_FORCE:
696 			imsg_receive_config(&imsg, &nconf);
697 			break;
698 		case IMSG_RECONF_END:
699 			if (nconf == NULL)
700 				fatalx("%s: IMSG_RECONF_END without "
701 				    "IMSG_RECONF_CONF", __func__);
702 			restart = resolvers_to_restart(resolver_conf, nconf);
703 			merge_config(resolver_conf, nconf);
704 			nconf = NULL;
705 			for (i = 0; i < UW_RES_NONE; i++)
706 				if (restart[i])
707 					new_resolver(i, UNKNOWN);
708 			break;
709 		default:
710 			log_debug("%s: unexpected imsg %d", __func__,
711 			    imsg.hdr.type);
712 			break;
713 		}
714 		imsg_free(&imsg);
715 	}
716 	if (!shut)
717 		imsg_event_add(iev);
718 	else {
719 		/* This pipe is dead. Remove its event handler. */
720 		event_del(&iev->ev);
721 		event_loopexit(NULL);
722 	}
723 }
724 
725 int
sort_resolver_types(struct resolver_preference * dst)726 sort_resolver_types(struct resolver_preference *dst)
727 {
728 	memcpy(dst, &resolver_conf->res_pref, sizeof(*dst));
729 
730 	/*
731 	 * Sort by resolver quality, validating > resolving etc.
732 	 * mergesort is stable and keeps the configured preference order
733 	 */
734 	return mergesort(dst->types, dst->len, sizeof(dst->types[0]),
735 	    resolver_cmp);
736 }
737 
738 void
setup_query(struct query_imsg * query_imsg)739 setup_query(struct query_imsg *query_imsg)
740 {
741 	struct running_query	*rq;
742 	struct uw_resolver	*res;
743 
744 	if (find_running_query(query_imsg->id) != NULL) {
745 		free(query_imsg);
746 		return;
747 	}
748 
749 	if ((rq = calloc(1, sizeof(*rq))) == NULL) {
750 		log_warnx(NULL);
751 		free(query_imsg);
752 		return;
753 	}
754 
755 	clock_gettime(CLOCK_MONOTONIC, &rq->tp);
756 	rq->query_imsg = query_imsg;
757 	rq->next_resolver = 0;
758 
759 	find_force(&resolver_conf->force, query_imsg->qname, &res);
760 
761 	if (res != NULL && res->state != DEAD && res->state != UNKNOWN) {
762 		rq->res_pref.len = 1;
763 		rq->res_pref.types[0] = res->type;
764 	} else if (sort_resolver_types(&rq->res_pref) == -1) {
765 		log_warn("mergesort");
766 		free(rq->query_imsg);
767 		free(rq);
768 		return;
769 	}
770 
771 	evtimer_set(&rq->timer_ev, try_resolver_timo, rq);
772 
773 	TAILQ_INSERT_TAIL(&running_queries, rq, entry);
774 	try_next_resolver(rq);
775 }
776 
777 struct running_query *
find_running_query(uint64_t id)778 find_running_query(uint64_t id)
779 {
780 	struct running_query	*rq;
781 
782 	TAILQ_FOREACH(rq, &running_queries, entry) {
783 		if (rq->query_imsg->id == id)
784 			return rq;
785 	}
786 	return NULL;
787 }
788 
789 void
try_resolver_timo(int fd,short events,void * arg)790 try_resolver_timo(int fd, short events, void *arg)
791 {
792 	struct running_query	*rq = arg;
793 
794 	try_next_resolver(rq);
795 }
796 
797 int
try_next_resolver(struct running_query * rq)798 try_next_resolver(struct running_query *rq)
799 {
800 	struct uw_resolver	*res = NULL;
801 	struct query_imsg	*query_imsg = NULL;
802 	struct timespec		 tp, elapsed;
803 	struct timeval		 tv = {0, 0};
804 	int64_t			 ms;
805 	int			 i;
806 
807 	while(rq->next_resolver < rq->res_pref.len &&
808 	    ((res = resolvers[rq->res_pref.types[rq->next_resolver]]) == NULL ||
809 	    res->state == DEAD || res->state == UNKNOWN))
810 		rq->next_resolver++;
811 
812 	if (res == NULL) {
813 		evtimer_del(&rq->timer_ev); /* we are not going to find one */
814 		log_debug("%s: could not find (any more) working resolvers",
815 		    __func__);
816 		goto err;
817 	}
818 
819 	rq->next_resolver++;
820 	clock_gettime(CLOCK_MONOTONIC, &tp);
821 	timespecsub(&tp, &rq->tp, &elapsed);
822 	ms = elapsed.tv_sec * 1000 + elapsed.tv_nsec / 1000000;
823 
824 	log_debug("%s[+%lldms]: %s[%s] %s", __func__, ms,
825 	    uw_resolver_type_str[res->type], uw_resolver_state_str[res->state],
826 	    query_imsg2str(rq->query_imsg));
827 
828 	if ((query_imsg = malloc(sizeof(*query_imsg))) == NULL) {
829 		log_warnx("%s", __func__);
830 		goto err;
831 	}
832 	memcpy(query_imsg, rq->query_imsg, sizeof(*query_imsg));
833 	clock_gettime(CLOCK_MONOTONIC, &query_imsg->tp);
834 
835 	ms = res->median;
836 	if (ms > NEXT_RES_MAX)
837 		ms = NEXT_RES_MAX;
838 
839 	/* skip over unavailable resolvers in preferences */
840 	for (i = 0; i < resolver_conf->res_pref.len &&
841 		 resolvers[resolver_conf->res_pref.types[i]] == NULL; i++)
842 		;
843 	if (res->type == resolver_conf->res_pref.types[i])
844 		tv.tv_usec = 1000 * (PREF_RESOLVER_MEDIAN_SKEW + ms);
845 	else
846 		tv.tv_usec = 1000 * ms;
847 
848 	while (tv.tv_usec >= 1000000) {
849 		tv.tv_sec++;
850 		tv.tv_usec -= 1000000;
851 	}
852 	evtimer_add(&rq->timer_ev, &tv);
853 
854 	rq->running++;
855 	if (resolve(res, query_imsg->qname, query_imsg->t,
856 	    query_imsg->c, query_imsg, resolve_done) != 0) {
857 		rq->running--;
858 		goto err;
859 	}
860 
861 	return 0;
862 
863  err:
864 	free(query_imsg);
865 	if (rq->running == 0) {
866 		TAILQ_REMOVE(&running_queries, rq, entry);
867 		evtimer_del(&rq->timer_ev);
868 		free(rq->query_imsg);
869 		free(rq);
870 	}
871 	return 1;
872 }
873 
874 int
resolve(struct uw_resolver * res,const char * name,int rrtype,int rrclass,void * mydata,resolve_cb_t cb)875 resolve(struct uw_resolver *res, const char* name, int rrtype, int rrclass,
876     void *mydata, resolve_cb_t cb)
877 {
878 	struct resolver_cb_data	*cb_data = NULL;
879 	struct asr_query	*aq = NULL;
880 	int			 err;
881 
882 	resolver_ref(res);
883 
884 	if ((cb_data = malloc(sizeof(*cb_data))) == NULL)
885 		goto err;
886 	cb_data->cb = cb;
887 	cb_data->data = mydata;
888 	cb_data->res = res;
889 
890 	switch(res->type) {
891 	case UW_RES_ASR:
892 		if ((aq = res_query_async(name, rrclass, rrtype, res->asr_ctx))
893 		    == NULL) {
894 			log_warn("%s: res_query_async", __func__);
895 			goto err;
896 		}
897 		if (event_asr_run(aq, asr_resolve_done, cb_data) == NULL) {
898 			log_warn("%s: res_query_async", __func__);
899 			goto err;
900 		}
901 		break;
902 	case UW_RES_RECURSOR:
903 	case UW_RES_AUTOCONF:
904 	case UW_RES_ODOT_AUTOCONF:
905 	case UW_RES_FORWARDER:
906 	case UW_RES_ODOT_FORWARDER:
907 	case UW_RES_DOT:
908 		if ((err = ub_resolve_event(res->ctx, name,  rrtype, rrclass,
909 		    cb_data, ub_resolve_done, NULL)) != 0) {
910 			log_warn("%s: ub_resolve_event: err: %d, %s", __func__,
911 			    err, ub_strerror(err));
912 			goto err;
913 		}
914 		break;
915 	default:
916 		fatalx("unknown resolver type %d", res->type);
917 		break;
918 	}
919 
920 	return 0;
921  err:
922 	free(cb_data);
923 	free(aq);
924 	resolver_unref(res);
925 	return 1;
926 }
927 
928 void
resolve_done(struct uw_resolver * res,void * arg,int rcode,void * answer_packet,int answer_len,int sec,char * why_bogus)929 resolve_done(struct uw_resolver *res, void *arg, int rcode,
930     void *answer_packet, int answer_len, int sec, char *why_bogus)
931 {
932 	struct uw_resolver	*tmp_res;
933 	struct ub_result	*result = NULL;
934 	sldns_buffer		*buf = NULL;
935 	struct regional		*region = NULL;
936 	struct query_imsg	*query_imsg;
937 	struct answer_header	*answer_header;
938 	struct running_query	*rq;
939 	struct timespec		 tp, elapsed;
940 	int64_t			 ms;
941 	size_t			 i;
942 	int			 running_res, asr_pref_pos, force_acceptbogus;
943 	char			*str;
944 	char			 rcode_buf[16];
945 	uint8_t			*p, *data;
946 	uint8_t			 answer_imsg[MAX_IMSGSIZE - IMSG_HEADER_SIZE];
947 
948 	clock_gettime(CLOCK_MONOTONIC, &tp);
949 
950 	query_imsg = (struct query_imsg *)arg;
951 
952 	answer_header = (struct answer_header *)answer_imsg;
953 	data = answer_imsg + sizeof(*answer_header);
954 	answer_header->id = query_imsg->id;
955 	answer_header->srvfail = 0;
956 	answer_header->answer_len = 0;
957 
958 	timespecsub(&tp, &query_imsg->tp, &elapsed);
959 
960 	ms = elapsed.tv_sec * 1000 + elapsed.tv_nsec / 1000000;
961 
962 	for (i = 0; i < nitems(histogram_limits); i++) {
963 		if (ms < histogram_limits[i])
964 			break;
965 	}
966 	if (i == nitems(histogram_limits))
967 		log_debug("histogram bucket error");
968 	else {
969 		res->histogram[i]++;
970 		/* latest_histogram is in units of 1000 to avoid rounding
971 		   down when decaying */
972 		res->latest_histogram[i] += 1000;
973 		res->median = histogram_median(res->latest_histogram);
974 	}
975 
976 	if ((rq = find_running_query(query_imsg->id)) == NULL)
977 		goto out;
978 
979 	running_res = --rq->running;
980 
981 	if (rcode == LDNS_RCODE_SERVFAIL) {
982 		if (res->stop != 1)
983 			check_resolver(res);
984 		goto servfail;
985 	}
986 
987 	if (answer_len < LDNS_HEADER_SIZE) {
988 		log_warnx("bad packet: too short");
989 		goto servfail;
990 	}
991 
992 	if (answer_len > UINT16_MAX) {
993 		log_warnx("bad packet: too large: %d - %s", answer_len,
994 		    query_imsg2str(query_imsg));
995 		goto servfail;
996 	}
997 	answer_header->answer_len = answer_len;
998 
999 	if ((result = calloc(1, sizeof(*result))) == NULL)
1000 		goto servfail;
1001 	if ((buf = sldns_buffer_new(answer_len)) == NULL)
1002 		goto servfail;
1003 	if ((region = regional_create()) == NULL)
1004 		goto servfail;
1005 
1006 	result->rcode = LDNS_RCODE_SERVFAIL;
1007 
1008 	sldns_buffer_clear(buf);
1009 	sldns_buffer_write(buf, answer_packet, answer_len);
1010 	sldns_buffer_flip(buf);
1011 	libworker_enter_result(result, buf, region, sec);
1012 	result->answer_packet = NULL;
1013 	result->answer_len = 0;
1014 
1015 	sldns_wire2str_rcode_buf(result->rcode, rcode_buf, sizeof(rcode_buf));
1016 	log_debug("%s[%s]: %s rcode: %s[%d], elapsed: %lldms, running: %d",
1017 	    __func__, uw_resolver_type_str[res->type],
1018 	    query_imsg2str(query_imsg), rcode_buf, result->rcode, ms,
1019 	    running_query_cnt());
1020 
1021 	force_acceptbogus = find_force(&resolver_conf->force, query_imsg->qname,
1022 	    &tmp_res);
1023 	if (tmp_res != NULL && tmp_res->type != res->type)
1024 		force_acceptbogus = 0;
1025 
1026 	timespecsub(&tp, &last_network_change, &elapsed);
1027 	if (sec != SECURE && elapsed.tv_sec < DOUBT_NXDOMAIN_SEC &&
1028 	    !force_acceptbogus && res->type != UW_RES_ASR &&
1029 	    (result->rcode == LDNS_RCODE_NXDOMAIN || sec == BOGUS)) {
1030 		/*
1031 		 * Doubt NXDOMAIN or BOGUS if we just switched networks, we
1032 		 * might be behind a captive portal.
1033 		 */
1034 		log_debug("%s: doubt NXDOMAIN or BOGUS from %s, network change"
1035 		    " %llds ago", __func__, uw_resolver_type_str[res->type],
1036 		    elapsed.tv_sec);
1037 
1038 		/* search for ASR */
1039 		asr_pref_pos = -1;
1040 		for (i = 0; i < (size_t)rq->res_pref.len; i++)
1041 			if (rq->res_pref.types[i] == UW_RES_ASR) {
1042 				asr_pref_pos = i;
1043 				break;
1044 			}
1045 
1046 		if (asr_pref_pos != -1 && resolvers[UW_RES_ASR] != NULL) {
1047 			/* go to ASR if not yet scheduled */
1048 			if (asr_pref_pos >= rq->next_resolver) {
1049 				rq->next_resolver = asr_pref_pos;
1050 				try_next_resolver(rq);
1051 			}
1052 			goto out;
1053 		}
1054 		log_debug("%s: using NXDOMAIN or BOGUS, couldn't find working "
1055 		    "ASR", __func__);
1056 	}
1057 
1058 	if (log_getverbose() & OPT_VERBOSE2 && (str =
1059 	    sldns_wire2str_pkt(answer_packet, answer_len)) != NULL) {
1060 		log_debug("%s", str);
1061 		free(str);
1062 	}
1063 
1064 	if (result->rcode == LDNS_RCODE_SERVFAIL)
1065 		goto servfail;
1066 
1067 	if (sec == SECURE && res->state != VALIDATING && res->stop != -1)
1068 		check_resolver(res);
1069 
1070 	if (res->state == VALIDATING && sec == BOGUS) {
1071 		answer_header->bogus = !force_acceptbogus;
1072 		if (answer_header->bogus && why_bogus != NULL)
1073 			log_warnx("%s", why_bogus);
1074 	} else
1075 		answer_header->bogus = 0;
1076 
1077 	p = answer_packet;
1078 	do {
1079 		int len;
1080 
1081 		if ((size_t)answer_len > sizeof(answer_imsg) -
1082 		    sizeof(*answer_header))
1083 			len = sizeof(answer_imsg) - sizeof(*answer_header);
1084 		else
1085 			len = answer_len;
1086 		memcpy(data, p, len);
1087 		if (resolver_imsg_compose_frontend(IMSG_ANSWER, 0,
1088 		    &answer_imsg, sizeof(*answer_header) + len) == -1)
1089 			fatalx("IMSG_ANSWER failed for \"%s\"",
1090 			    query_imsg2str(query_imsg));
1091 		answer_len -= len;
1092 		p += len;
1093 	} while (answer_len > 0);
1094 
1095 	TAILQ_REMOVE(&running_queries, rq, entry);
1096 	evtimer_del(&rq->timer_ev);
1097 	free(rq->query_imsg);
1098 	free(rq);
1099 	goto out;
1100 
1101  servfail:
1102 	/* try_next_resolver() might free rq */
1103 	if (try_next_resolver(rq) != 0 && running_res == 0) {
1104 		/* we are the last one, send SERVFAIL */
1105 		answer_header->srvfail = 1;
1106 		resolver_imsg_compose_frontend(IMSG_ANSWER, 0,
1107 		    answer_imsg, sizeof(*answer_header));
1108 	}
1109  out:
1110 	free(query_imsg);
1111 	sldns_buffer_free(buf);
1112 	regional_destroy(region);
1113 	ub_resolve_free(result);
1114 }
1115 
1116 void
new_resolver(enum uw_resolver_type type,enum uw_resolver_state state)1117 new_resolver(enum uw_resolver_type type, enum uw_resolver_state state)
1118 {
1119 	free_resolver(resolvers[type]);
1120 	resolvers[type] = NULL;
1121 
1122 	if (!resolver_conf->enabled_resolvers[type])
1123 		return;
1124 
1125 	switch (type) {
1126 	case UW_RES_ASR:
1127 	case UW_RES_AUTOCONF:
1128 	case UW_RES_ODOT_AUTOCONF:
1129 		if (TAILQ_EMPTY(&autoconf_forwarder_list))
1130 			return;
1131 		break;
1132 	case UW_RES_RECURSOR:
1133 		break;
1134 	case UW_RES_FORWARDER:
1135 	case UW_RES_ODOT_FORWARDER:
1136 		if (TAILQ_EMPTY(&resolver_conf->uw_forwarder_list))
1137 			return;
1138 		break;
1139 	case UW_RES_DOT:
1140 		if (TAILQ_EMPTY(&resolver_conf->uw_dot_forwarder_list))
1141 			return;
1142 		break;
1143 	case UW_RES_NONE:
1144 		fatalx("cannot create UW_RES_NONE resolver");
1145 	}
1146 
1147 	switch (type) {
1148 	case UW_RES_RECURSOR:
1149 	case UW_RES_AUTOCONF:
1150 	case UW_RES_ODOT_AUTOCONF:
1151 	case UW_RES_FORWARDER:
1152 	case UW_RES_ODOT_FORWARDER:
1153 	case UW_RES_DOT:
1154 		if (TAILQ_EMPTY(&trust_anchors))
1155 			return;
1156 		break;
1157 	case UW_RES_ASR:
1158 		break;
1159 	case UW_RES_NONE:
1160 		fatalx("cannot create UW_RES_NONE resolver");
1161 	}
1162 
1163 	if ((resolvers[type] = create_resolver(type)) == NULL)
1164 		return;
1165 
1166 	switch (state) {
1167 	case DEAD:
1168 	case UNKNOWN:
1169 		check_resolver(resolvers[type]);
1170 		break;
1171 	case VALIDATING:
1172 #ifdef UNIFIED_CACHE
1173 		set_unified_cache(resolvers[type]);
1174 #endif /* UNIFIED_CACHE */
1175 		/* FALLTHROUGH */
1176 	case RESOLVING:
1177 		resolvers[type]->state = state;
1178 		if (type == UW_RES_ASR)
1179 			check_dns64();
1180 		break;
1181 	}
1182 }
1183 
1184 #ifdef UNIFIED_CACHE
1185 void
set_unified_cache(struct uw_resolver * res)1186 set_unified_cache(struct uw_resolver *res)
1187 {
1188 	if (res == NULL || res->ctx == NULL)
1189 		return;
1190 
1191 	if (res->ctx->env->msg_cache != NULL) {
1192 		/* XXX we are currently not using this */
1193 		if (res->ctx->env->msg_cache != unified_msg_cache ||
1194 		    res->ctx->env->rrset_cache != unified_rrset_cache ||
1195 		    res->ctx->env->key_cache != unified_key_cache ||
1196 		    res->ctx->env->neg_cache != unified_neg_cache)
1197 			fatalx("wrong unified cache set on resolver");
1198 		else
1199 			/* we are upgrading from UNKNOWN back to VALIDATING */
1200 			return;
1201 	}
1202 
1203 	res->ctx->env->msg_cache = unified_msg_cache;
1204 	res->ctx->env->rrset_cache = unified_rrset_cache;
1205 	res->ctx->env->key_cache = unified_key_cache;
1206 	res->ctx->env->neg_cache = unified_neg_cache;
1207 
1208 	context_finalize(res->ctx);
1209 
1210 	if (res->ctx->env->msg_cache != unified_msg_cache ||
1211 	    res->ctx->env->rrset_cache != unified_rrset_cache ||
1212 	    res->ctx->env->key_cache != unified_key_cache ||
1213 	    res->ctx->env->neg_cache != unified_neg_cache)
1214 		fatalx("failed to set unified caches, libunbound/validator/"
1215 		    "validator.c diff lost");
1216 }
1217 #endif /* UNIFIED_CACHE */
1218 
1219 static const struct {
1220 	const char *name;
1221 	const char *value;
1222 } options[] = {
1223 	{ "aggressive-nsec:", "yes" },
1224 	{ "fast-server-permil:", "950" },
1225 	{ "edns-buffer-size:", "1232" },
1226 	{ "target-fetch-policy:", "0 0 0 0 0" },
1227 	{ "outgoing-range:", "64" },
1228 	{ "val-max-restart:", "0" },
1229 	{ "infra-keep-probing", "yes" },
1230 };
1231 
1232 struct uw_resolver *
create_resolver(enum uw_resolver_type type)1233 create_resolver(enum uw_resolver_type type)
1234 {
1235 	struct uw_resolver	*res;
1236 	struct trust_anchor	*ta;
1237 	size_t			 i;
1238 	int			 err;
1239 	char			*resolv_conf;
1240 
1241 	if ((res = calloc(1, sizeof(*res))) == NULL) {
1242 		log_warn("%s", __func__);
1243 		return (NULL);
1244 	}
1245 
1246 	res->type = type;
1247 	res->state = UNKNOWN;
1248 	res->check_tv.tv_sec = RESOLVER_CHECK_SEC;
1249 	res->check_tv.tv_usec = arc4random() % 1000000; /* modulo bias is ok */
1250 
1251 	switch (type) {
1252 	case UW_RES_ASR:
1253 		if (TAILQ_EMPTY(&autoconf_forwarder_list)) {
1254 			free(res);
1255 			return (NULL);
1256 		}
1257 		if ((resolv_conf = gen_resolv_conf()) == NULL) {
1258 			free(res);
1259 			log_warnx("could not create asr context");
1260 			return (NULL);
1261 		}
1262 		if ((res->asr_ctx = asr_resolver_from_string(resolv_conf)) ==
1263 		    NULL) {
1264 			free(res);
1265 			free(resolv_conf);
1266 			log_warnx("could not create asr context");
1267 			return (NULL);
1268 		}
1269 		free(resolv_conf);
1270 		break;
1271 	case UW_RES_RECURSOR:
1272 	case UW_RES_AUTOCONF:
1273 	case UW_RES_ODOT_AUTOCONF:
1274 	case UW_RES_FORWARDER:
1275 	case UW_RES_ODOT_FORWARDER:
1276 	case UW_RES_DOT:
1277 		if ((res->ctx = ub_ctx_create_event(ev_base)) == NULL) {
1278 			free(res);
1279 			log_warnx("could not create unbound context");
1280 			return (NULL);
1281 		}
1282 
1283 		ub_ctx_debuglevel(res->ctx, log_getverbose() & OPT_VERBOSE3 ?
1284 		    UB_LOG_VERBOSE : UB_LOG_BRIEF);
1285 
1286 		TAILQ_FOREACH(ta, &trust_anchors, entry) {
1287 			if ((err = ub_ctx_add_ta(res->ctx, ta->ta)) != 0) {
1288 				ub_ctx_delete(res->ctx);
1289 				free(res);
1290 				log_warnx("error adding trust anchor: %s",
1291 				    ub_strerror(err));
1292 				return (NULL);
1293 			}
1294 		}
1295 
1296 		for (i = 0; i < nitems(options); i++) {
1297 			if ((err = ub_ctx_set_option(res->ctx, options[i].name,
1298 			    options[i].value)) != 0) {
1299 				ub_ctx_delete(res->ctx);
1300 				free(res);
1301 				log_warnx("error setting %s: %s: %s",
1302 				    options[i].name, options[i].value,
1303 				    ub_strerror(err));
1304 				return (NULL);
1305 			}
1306 		}
1307 
1308 		if (!(available_afs & HAVE_IPV4)) {
1309 			if((err = ub_ctx_set_option(res->ctx, "do-ip4:",
1310 			    "no")) != 0) {
1311 				ub_ctx_delete(res->ctx);
1312 				free(res);
1313 				log_warnx("error setting do-ip4: no: %s",
1314 				    ub_strerror(err));
1315 				return (NULL);
1316 			}
1317 		}
1318 
1319 		if (!(available_afs & HAVE_IPV6)) {
1320 			if((err = ub_ctx_set_option(res->ctx, "do-ip6:",
1321 			    "no")) != 0) {
1322 				ub_ctx_delete(res->ctx);
1323 				free(res);
1324 				log_warnx("error setting do-ip6: no: %s",
1325 				    ub_strerror(err));
1326 				return (NULL);
1327 			}
1328 		}
1329 
1330 		if (!log_getdebug()) {
1331 			if((err = ub_ctx_set_option(res->ctx, "use-syslog:",
1332 			    "no")) != 0) {
1333 				ub_ctx_delete(res->ctx);
1334 				free(res);
1335 				log_warnx("error setting use-syslog: no: %s",
1336 				    ub_strerror(err));
1337 				return (NULL);
1338 			}
1339 			ub_ctx_debugout(res->ctx, NULL);
1340 		}
1341 
1342 		break;
1343 	default:
1344 		fatalx("unknown resolver type %d", type);
1345 		break;
1346 	}
1347 
1348 	evtimer_set(&res->check_ev, resolver_check_timo, res);
1349 
1350 	switch(res->type) {
1351 	case UW_RES_ASR:
1352 		break;
1353 	case UW_RES_RECURSOR:
1354 		break;
1355 	case UW_RES_AUTOCONF:
1356 		set_forwarders(res, &autoconf_forwarder_list, 0);
1357 		break;
1358 	case UW_RES_ODOT_AUTOCONF:
1359 		set_forwarders(res, &autoconf_forwarder_list, 853);
1360 		ub_ctx_set_option(res->ctx, "tls-cert-bundle:",
1361 		    TLS_DEFAULT_CA_CERT_FILE);
1362 		ub_ctx_set_tls(res->ctx, 1);
1363 		break;
1364 	case UW_RES_FORWARDER:
1365 		set_forwarders(res, &resolver_conf->uw_forwarder_list, 0);
1366 		break;
1367 	case UW_RES_ODOT_FORWARDER:
1368 		set_forwarders(res, &resolver_conf->uw_forwarder_list, 853);
1369 		ub_ctx_set_option(res->ctx, "tls-cert-bundle:",
1370 		    TLS_DEFAULT_CA_CERT_FILE);
1371 		ub_ctx_set_tls(res->ctx, 1);
1372 		break;
1373 	case UW_RES_DOT:
1374 		set_forwarders(res, &resolver_conf->uw_dot_forwarder_list, 0);
1375 		ub_ctx_set_option(res->ctx, "tls-cert-bundle:",
1376 		    TLS_DEFAULT_CA_CERT_FILE);
1377 		ub_ctx_set_tls(res->ctx, 1);
1378 		break;
1379 	default:
1380 		fatalx("unknown resolver type %d", type);
1381 		break;
1382 	}
1383 
1384 	/* for the forwarder cases allow AS112 and special-use zones */
1385 	switch(res->type) {
1386 	case UW_RES_AUTOCONF:
1387 	case UW_RES_ODOT_AUTOCONF:
1388 	case UW_RES_FORWARDER:
1389 	case UW_RES_ODOT_FORWARDER:
1390 	case UW_RES_DOT:
1391 		for (i = 0; i < nitems(forward_transparent_zones); i++) {
1392 			if((err = ub_ctx_set_option(res->ctx, "local-zone:",
1393 			    forward_transparent_zones[i])) != 0) {
1394 				ub_ctx_delete(res->ctx);
1395 				free(res);
1396 				log_warnx("error setting local-zone: %s: %s",
1397 				    forward_transparent_zones[i],
1398 				    ub_strerror(err));
1399 				return (NULL);
1400 			}
1401 		}
1402 		break;
1403 	default:
1404 		break;
1405 	}
1406 
1407 	return (res);
1408 }
1409 
1410 void
free_resolver(struct uw_resolver * res)1411 free_resolver(struct uw_resolver *res)
1412 {
1413 	if (res == NULL)
1414 		return;
1415 
1416 	if (res->ref_cnt > 0)
1417 		res->stop = 1;
1418 	else {
1419 		evtimer_del(&res->check_ev);
1420 #ifdef UNIFIED_CACHE
1421 		if (res->ctx != NULL) {
1422 			if (res->ctx->env->msg_cache == unified_msg_cache) {
1423 				struct val_env	*val_env;
1424 
1425 				val_env = (struct val_env*)
1426 				    res->ctx->env->modinfo[val_id];
1427 				res->ctx->env->msg_cache = NULL;
1428 				res->ctx->env->rrset_cache = NULL;
1429 				val_env->kcache = NULL;
1430 				res->ctx->env->key_cache = NULL;
1431 				val_env->neg_cache = NULL;
1432 				res->ctx->env->neg_cache = NULL;
1433 			}
1434 		}
1435 #endif /* UNIFIED_CACHE */
1436 		ub_ctx_delete(res->ctx);
1437 		asr_resolver_free(res->asr_ctx);
1438 		free(res);
1439 	}
1440 }
1441 
1442 #ifdef UNIFIED_CACHE
1443 void
setup_unified_caches(void)1444 setup_unified_caches(void)
1445 {
1446 	struct ub_ctx	*ctx;
1447 	struct val_env	*val_env;
1448 	size_t		 i;
1449 	int		 err, j;
1450 
1451 	if ((ctx = ub_ctx_create_event(ev_base)) == NULL)
1452 		fatalx("could not create unbound context");
1453 
1454 	for (i = 0; i < nitems(options); i++) {
1455 		if ((err = ub_ctx_set_option(ctx, options[i].name,
1456 		    options[i].value)) != 0) {
1457 			fatalx("error setting %s: %s: %s", options[i].name,
1458 			    options[i].value, ub_strerror(err));
1459 		}
1460 	}
1461 
1462 	context_finalize(ctx);
1463 
1464 	if (ctx->env->msg_cache == NULL || ctx->env->rrset_cache == NULL ||
1465 	    ctx->env->key_cache == NULL || ctx->env->neg_cache == NULL)
1466 		fatalx("could not setup unified caches");
1467 
1468 	unified_msg_cache = ctx->env->msg_cache;
1469 	unified_rrset_cache = ctx->env->rrset_cache;
1470 	unified_key_cache = ctx->env->key_cache;
1471 	unified_neg_cache = ctx->env->neg_cache;
1472 
1473 	if (val_id == -1) {
1474 		for (j = 0; j < ctx->mods.num; j++) {
1475 			if (strcmp(ctx->mods.mod[j]->name, "validator") == 0) {
1476 				val_id = j;
1477 				break;
1478 			}
1479 		}
1480 		if (val_id == -1)
1481 			fatalx("cannot find validator module");
1482 	}
1483 
1484 	val_env = (struct val_env*)ctx->env->modinfo[val_id];
1485 	ctx->env->msg_cache = NULL;
1486 	ctx->env->rrset_cache = NULL;
1487 	ctx->env->key_cache = NULL;
1488 	val_env->kcache = NULL;
1489 	ctx->env->neg_cache = NULL;
1490 	val_env->neg_cache = NULL;
1491 	ub_ctx_delete(ctx);
1492 }
1493 #endif /* UNIFIED_CACHE */
1494 
1495 void
set_forwarders(struct uw_resolver * res,struct uw_forwarder_head * uw_forwarder_list,int port_override)1496 set_forwarders(struct uw_resolver *res, struct uw_forwarder_head
1497     *uw_forwarder_list, int port_override)
1498 {
1499 	struct uw_forwarder	*uw_forwarder;
1500 	int			 ret;
1501 	char			 fwd[FWD_MAX];
1502 
1503 	TAILQ_FOREACH(uw_forwarder, uw_forwarder_list, entry) {
1504 		if (uw_forwarder->auth_name[0] != '\0')
1505 			ret = snprintf(fwd, sizeof(fwd), "%s@%d#%s",
1506 			    uw_forwarder->ip, port_override ? port_override :
1507 			    uw_forwarder->port, uw_forwarder->auth_name);
1508 		else
1509 			ret = snprintf(fwd, sizeof(fwd), "%s@%d",
1510 			    uw_forwarder->ip, port_override ? port_override :
1511 			    uw_forwarder->port);
1512 
1513 		if (ret < 0 || (size_t)ret >= sizeof(fwd)) {
1514 			log_warnx("forwarder too long");
1515 			continue;
1516 		}
1517 
1518 		ub_ctx_set_fwd(res->ctx, fwd);
1519 	}
1520 }
1521 
1522 void
resolver_check_timo(int fd,short events,void * arg)1523 resolver_check_timo(int fd, short events, void *arg)
1524 {
1525 	check_resolver((struct uw_resolver *)arg);
1526 }
1527 
1528 void
resolver_free_timo(int fd,short events,void * arg)1529 resolver_free_timo(int fd, short events, void *arg)
1530 {
1531 	free_resolver((struct uw_resolver *)arg);
1532 }
1533 
1534 void
check_resolver(struct uw_resolver * resolver_to_check)1535 check_resolver(struct uw_resolver *resolver_to_check)
1536 {
1537 	struct uw_resolver		*res;
1538 
1539 	if (resolver_to_check == NULL)
1540 		return;
1541 
1542 	if (resolver_to_check->check_running)
1543 		return;
1544 
1545 	if ((res = create_resolver(resolver_to_check->type)) == NULL)
1546 		return;
1547 
1548 	resolver_ref(resolver_to_check);
1549 
1550 	resolver_to_check->check_running++;
1551 	if (resolve(res, ".", LDNS_RR_TYPE_NS, LDNS_RR_CLASS_IN,
1552 	    resolver_to_check, check_resolver_done) != 0) {
1553 		resolver_to_check->check_running--;
1554 		resolver_to_check->state = UNKNOWN;
1555 		resolver_unref(resolver_to_check);
1556 		resolver_to_check->check_tv.tv_sec = RESOLVER_CHECK_SEC;
1557 		evtimer_add(&resolver_to_check->check_ev,
1558 		    &resolver_to_check->check_tv);
1559 	}
1560 }
1561 
1562 void
check_resolver_done(struct uw_resolver * res,void * arg,int rcode,void * answer_packet,int answer_len,int sec,char * why_bogus)1563 check_resolver_done(struct uw_resolver *res, void *arg, int rcode,
1564     void *answer_packet, int answer_len, int sec, char *why_bogus)
1565 {
1566 	struct uw_resolver	*checked_resolver = arg;
1567 	struct timeval		 tv = {0, 1};
1568 	enum uw_resolver_state	 prev_state;
1569 	int			 bogus_time = 0;
1570 	char			*str;
1571 
1572 	checked_resolver->check_running--;
1573 
1574 	if (checked_resolver != resolvers[checked_resolver->type]) {
1575 		log_debug("%s: %s: ignoring late check result", __func__,
1576 		    uw_resolver_type_str[checked_resolver->type]);
1577 		goto ignore_late;
1578 	}
1579 
1580 	prev_state = checked_resolver->state;
1581 
1582 	if (rcode == LDNS_RCODE_SERVFAIL) {
1583 		log_debug("%s: %s rcode: SERVFAIL", __func__,
1584 		    uw_resolver_type_str[checked_resolver->type]);
1585 
1586 		checked_resolver->state = DEAD;
1587 		goto out;
1588 	}
1589 
1590 	if (answer_len < LDNS_HEADER_SIZE) {
1591 		checked_resolver->state = DEAD;
1592 		log_warnx("%s: bad packet: too short", __func__);
1593 		goto out;
1594 	}
1595 
1596 	if (sec == SECURE) {
1597 		if (dns64_present && (res->type == UW_RES_AUTOCONF ||
1598 		    res->type == UW_RES_ODOT_AUTOCONF)) {
1599 			/* do not upgrade to validating, DNS64 breaks DNSSEC */
1600 			if (prev_state != RESOLVING)
1601 				new_resolver(checked_resolver->type,
1602 				    RESOLVING);
1603 		} else {
1604 			if (prev_state != VALIDATING)
1605 				new_resolver(checked_resolver->type,
1606 				    VALIDATING);
1607 			if (!(evtimer_pending(&trust_anchor_timer, NULL)))
1608 				evtimer_add(&trust_anchor_timer, &tv);
1609 		}
1610 	 } else if (rcode == LDNS_RCODE_NOERROR &&
1611 	    LDNS_RCODE_WIRE((uint8_t*)answer_packet) == LDNS_RCODE_NOERROR) {
1612 		if (why_bogus) {
1613 			bogus_time = strncmp(why_bogus, bogus_past,
1614 			    sizeof(bogus_past) - 1) == 0 || strncmp(why_bogus,
1615 			    bogus_future, sizeof(bogus_future) - 1) == 0;
1616 
1617 			log_warnx("%s: %s", uw_resolver_type_str[
1618 			    checked_resolver->type], why_bogus);
1619 		}
1620 		if (prev_state != RESOLVING)
1621 			new_resolver(checked_resolver->type, RESOLVING);
1622 	} else
1623 		checked_resolver->state = DEAD; /* we know the root exists */
1624 
1625 	log_debug("%s: %s: %s", __func__,
1626 	    uw_resolver_type_str[checked_resolver->type],
1627 	    uw_resolver_state_str[checked_resolver->state]);
1628 
1629 	if (log_getverbose() & OPT_VERBOSE2 && (str =
1630 	    sldns_wire2str_pkt(answer_packet, answer_len)) != NULL) {
1631 		log_debug("%s", str);
1632 		free(str);
1633 	}
1634 
1635 out:
1636 	if (!checked_resolver->stop && (checked_resolver->state == DEAD ||
1637 	    bogus_time)) {
1638 		if (prev_state == DEAD || bogus_time)
1639 			checked_resolver->check_tv.tv_sec *= 2;
1640 		else
1641 			checked_resolver->check_tv.tv_sec = RESOLVER_CHECK_SEC;
1642 
1643 		if (checked_resolver->check_tv.tv_sec > RESOLVER_CHECK_MAXSEC)
1644 			checked_resolver->check_tv.tv_sec =
1645 			    RESOLVER_CHECK_MAXSEC;
1646 
1647 		evtimer_add(&checked_resolver->check_ev,
1648 		    &checked_resolver->check_tv);
1649 	}
1650 
1651 ignore_late:
1652 	resolver_unref(checked_resolver);
1653 	res->stop = 1; /* do not free in callback */
1654 }
1655 
1656 void
asr_resolve_done(struct asr_result * ar,void * arg)1657 asr_resolve_done(struct asr_result *ar, void *arg)
1658 {
1659 	struct resolver_cb_data	*cb_data = arg;
1660 	cb_data->cb(cb_data->res, cb_data->data, ar->ar_errno == 0 ?
1661 	    ar->ar_rcode : LDNS_RCODE_SERVFAIL, ar->ar_data, ar->ar_datalen, 0,
1662 	    NULL);
1663 	free(ar->ar_data);
1664 	resolver_unref(cb_data->res);
1665 	free(cb_data);
1666 }
1667 
1668 void
ub_resolve_done(void * arg,int rcode,void * answer_packet,int answer_len,int sec,char * why_bogus,int was_ratelimited)1669 ub_resolve_done(void *arg, int rcode, void *answer_packet, int answer_len,
1670     int sec, char *why_bogus, int was_ratelimited)
1671 {
1672 	struct resolver_cb_data	*cb_data = arg;
1673 	cb_data->cb(cb_data->res, cb_data->data, rcode, answer_packet,
1674 	    answer_len, sec, why_bogus);
1675 	resolver_unref(cb_data->res);
1676 	free(cb_data);
1677 }
1678 
1679 void
schedule_recheck_all_resolvers(void)1680 schedule_recheck_all_resolvers(void)
1681 {
1682 	struct timeval	 tv;
1683 	int		 i;
1684 
1685 	tv.tv_sec = 0;
1686 
1687 	for (i = 0; i < UW_RES_NONE; i++) {
1688 		if (resolvers[i] == NULL)
1689 			continue;
1690 		tv.tv_usec = arc4random() % 1000000; /* modulo bias is ok */
1691 		resolvers[i]->state = UNKNOWN;
1692 		evtimer_add(&resolvers[i]->check_ev, &tv);
1693 	}
1694 }
1695 
1696 int
check_forwarders_changed(struct uw_forwarder_head * list_a,struct uw_forwarder_head * list_b)1697 check_forwarders_changed(struct uw_forwarder_head *list_a,
1698     struct uw_forwarder_head *list_b)
1699 {
1700 	struct uw_forwarder	*a, *b;
1701 
1702 	a = TAILQ_FIRST(list_a);
1703 	b = TAILQ_FIRST(list_b);
1704 
1705 	while(a != NULL && b != NULL) {
1706 		if (strcmp(a->ip, b->ip) != 0)
1707 			return 1;
1708 		if (a->port != b->port)
1709 			return 1;
1710 		if (strcmp(a->auth_name, b->auth_name) != 0)
1711 			return 1;
1712 		a = TAILQ_NEXT(a, entry);
1713 		b = TAILQ_NEXT(b, entry);
1714 	}
1715 
1716 	if (a != NULL || b != NULL)
1717 		return 1;
1718 	return 0;
1719 }
1720 
1721 void
resolver_ref(struct uw_resolver * res)1722 resolver_ref(struct uw_resolver *res)
1723 {
1724 	if (res->ref_cnt == INT_MAX)
1725 		fatalx("%s: INT_MAX references", __func__);
1726 	res->ref_cnt++;
1727 }
1728 
1729 void
resolver_unref(struct uw_resolver * res)1730 resolver_unref(struct uw_resolver *res)
1731 {
1732 	struct timeval	 tv = { 0, 1};
1733 
1734 	if (res->ref_cnt == 0)
1735 		fatalx("%s: unreferenced resolver", __func__);
1736 
1737 	res->ref_cnt--;
1738 
1739 	/*
1740 	 * Decouple from libunbound event callback.
1741 	 * If we free the ctx inside of resolve_done or check_resovler_done
1742 	 * we are cutting of the branch we are sitting on and hit a
1743 	 * user-after-free
1744 	 */
1745 	if (res->stop && res->ref_cnt == 0) {
1746 		evtimer_set(&res->free_ev, resolver_free_timo, res);
1747 		evtimer_add(&res->free_ev, &tv);
1748 	}
1749 }
1750 
1751 void
replace_forwarders(struct uw_forwarder_head * new_list,struct uw_forwarder_head * old_list)1752 replace_forwarders(struct uw_forwarder_head *new_list, struct
1753     uw_forwarder_head *old_list)
1754 {
1755 	struct uw_forwarder	*uw_forwarder;
1756 
1757 	while ((uw_forwarder =
1758 	    TAILQ_FIRST(old_list)) != NULL) {
1759 		TAILQ_REMOVE(old_list, uw_forwarder, entry);
1760 		free(uw_forwarder);
1761 	}
1762 
1763 	TAILQ_CONCAT(old_list, new_list, entry);
1764 }
1765 
1766 int
resolver_cmp(const void * _a,const void * _b)1767 resolver_cmp(const void *_a, const void *_b)
1768 {
1769 	const enum uw_resolver_type	 a = *(const enum uw_resolver_type *)_a;
1770 	const enum uw_resolver_type	 b = *(const enum uw_resolver_type *)_b;
1771 	int64_t				 a_median, b_median;
1772 
1773 	if (resolvers[a] == NULL && resolvers[b] == NULL)
1774 		return 0;
1775 
1776 	if (resolvers[b] == NULL)
1777 		return -1;
1778 
1779 	if (resolvers[a] == NULL)
1780 		return 1;
1781 
1782 	if (resolvers[a]->state < resolvers[b]->state)
1783 		return 1;
1784 	else if (resolvers[a]->state > resolvers[b]->state)
1785 		return -1;
1786 	else {
1787 		a_median = resolvers[a]->median;
1788 		b_median = resolvers[b]->median;
1789 		if (resolvers[a]->type == resolver_conf->res_pref.types[0])
1790 			a_median -= PREF_RESOLVER_MEDIAN_SKEW;
1791 		else if (resolvers[b]->type == resolver_conf->res_pref.types[0])
1792 			b_median -= PREF_RESOLVER_MEDIAN_SKEW;
1793 		if (a_median < b_median)
1794 			return -1;
1795 		else if (a_median > b_median)
1796 			return 1;
1797 		else
1798 			return 0;
1799 	}
1800 }
1801 
1802 void
restart_ub_resolvers(int recheck)1803 restart_ub_resolvers(int recheck)
1804 {
1805 	int			 i;
1806 	enum uw_resolver_state	 state;
1807 
1808 	for (i = 0; i < UW_RES_NONE; i++) {
1809 		if (i == UW_RES_ASR)
1810 			continue;
1811 		if (recheck || resolvers[i] == NULL)
1812 			state = UNKNOWN;
1813 		else
1814 			state = resolvers[i]->state;
1815 		new_resolver(i, state);
1816 	}
1817 }
1818 
1819 void
show_status(pid_t pid)1820 show_status(pid_t pid)
1821 {
1822 	struct resolver_preference	 res_pref;
1823 	int				 i;
1824 
1825 	if (sort_resolver_types(&res_pref) == -1)
1826 		log_warn("mergesort");
1827 
1828 	for (i = 0; i < resolver_conf->res_pref.len; i++)
1829 		send_resolver_info(resolvers[res_pref.types[i]], pid);
1830 
1831 	resolver_imsg_compose_frontend(IMSG_CTL_END, pid, NULL, 0);
1832 }
1833 
1834 void
show_autoconf(pid_t pid)1835 show_autoconf(pid_t pid)
1836 {
1837 	struct uw_forwarder		*uw_forwarder;
1838 	struct ctl_forwarder_info	 cfi;
1839 
1840 	TAILQ_FOREACH(uw_forwarder, &autoconf_forwarder_list, entry) {
1841 		memset(&cfi, 0, sizeof(cfi));
1842 		cfi.if_index = uw_forwarder->if_index;
1843 		cfi.src = uw_forwarder->src;
1844 		/* no truncation, structs are in sync */
1845 		memcpy(cfi.ip, uw_forwarder->ip, sizeof(cfi.ip));
1846 		resolver_imsg_compose_frontend(
1847 		    IMSG_CTL_AUTOCONF_RESOLVER_INFO,
1848 		    pid, &cfi, sizeof(cfi));
1849 	}
1850 
1851 	resolver_imsg_compose_frontend(IMSG_CTL_END, pid, NULL, 0);
1852 }
1853 
1854 void
show_mem(pid_t pid)1855 show_mem(pid_t pid)
1856 {
1857 	struct ctl_mem_info	 cmi;
1858 
1859 	memset(&cmi, 0, sizeof(cmi));
1860 #ifdef UNIFIED_CACHE
1861 	cmi.msg_cache_used = slabhash_get_mem(unified_msg_cache);
1862 	cmi.msg_cache_max = slabhash_get_size(unified_msg_cache);
1863 	cmi.rrset_cache_used = slabhash_get_mem(&unified_rrset_cache->table);
1864 	cmi.rrset_cache_max = slabhash_get_size(&unified_rrset_cache->table);
1865 	cmi.key_cache_used = slabhash_get_mem(unified_key_cache->slab);
1866 	cmi.key_cache_max = slabhash_get_size(unified_key_cache->slab);
1867 	cmi.neg_cache_used = unified_neg_cache->use;
1868 	cmi.neg_cache_max = unified_neg_cache->max;
1869 #endif /* UNIFIED_CACHE */
1870 	resolver_imsg_compose_frontend(IMSG_CTL_MEM_INFO, pid, &cmi,
1871 	    sizeof(cmi));
1872 
1873 }
1874 
1875 void
send_resolver_info(struct uw_resolver * res,pid_t pid)1876 send_resolver_info(struct uw_resolver *res, pid_t pid)
1877 {
1878 	struct ctl_resolver_info	 cri;
1879 	size_t				 i;
1880 
1881 	if (res == NULL)
1882 		return;
1883 
1884 	cri.state = res->state;
1885 	cri.type = res->type;
1886 	cri.median = res->median;
1887 
1888 	memcpy(cri.histogram, res->histogram, sizeof(cri.histogram));
1889 	memcpy(cri.latest_histogram, res->latest_histogram,
1890 	    sizeof(cri.latest_histogram));
1891 	for (i = 0; i < nitems(histogram_limits); i++)
1892 		cri.latest_histogram[i] =
1893 		    (cri.latest_histogram[i] + 500) / 1000;
1894 
1895 	resolver_imsg_compose_frontend(IMSG_CTL_RESOLVER_INFO, pid, &cri,
1896 	    sizeof(cri));
1897 }
1898 
1899 void
trust_anchor_resolve(void)1900 trust_anchor_resolve(void)
1901 {
1902 	struct resolver_preference	 res_pref;
1903 	struct uw_resolver		*res;
1904 	struct timeval			 tv = {TRUST_ANCHOR_RETRY_INTERVAL, 0};
1905 
1906 	if (sort_resolver_types(&res_pref) == -1)
1907 		log_warn("mergesort");
1908 
1909 	res = resolvers[res_pref.types[0]];
1910 
1911 	if (res == NULL || res->state < VALIDATING)
1912 		goto err;
1913 
1914 	if (resolve(res, ".",  LDNS_RR_TYPE_DNSKEY, LDNS_RR_CLASS_IN, NULL,
1915 	    trust_anchor_resolve_done) != 0)
1916 		goto err;
1917 
1918 	return;
1919  err:
1920 	evtimer_add(&trust_anchor_timer, &tv);
1921 }
1922 
1923 void
trust_anchor_timo(int fd,short events,void * arg)1924 trust_anchor_timo(int fd, short events, void *arg)
1925 {
1926 	trust_anchor_resolve();
1927 }
1928 
1929 void
trust_anchor_resolve_done(struct uw_resolver * res,void * arg,int rcode,void * answer_packet,int answer_len,int sec,char * why_bogus)1930 trust_anchor_resolve_done(struct uw_resolver *res, void *arg, int rcode,
1931     void *answer_packet, int answer_len, int sec, char *why_bogus)
1932 {
1933 	struct ub_result	*result = NULL;
1934 	sldns_buffer		*buf = NULL;
1935 	struct regional		*region = NULL;
1936 	struct timeval		 tv = {TRUST_ANCHOR_RETRY_INTERVAL, 0};
1937 	int			 i, tas, n;
1938 	uint16_t		 dnskey_flags;
1939 	char			 rdata_buf[1024], *ta;
1940 
1941 	if (rcode == LDNS_RCODE_SERVFAIL) {
1942 		log_debug("%s: rcode: SERVFAIL", __func__);
1943 		goto out;
1944 	}
1945 
1946 	if (answer_len < LDNS_HEADER_SIZE) {
1947 		log_warnx("bad packet: too short");
1948 		goto out;
1949 	}
1950 
1951 	if ((result = calloc(1, sizeof(*result))) == NULL)
1952 		goto out;
1953 
1954 	if (sec != SECURE)
1955 		goto out;
1956 
1957 	if ((buf = sldns_buffer_new(answer_len)) == NULL)
1958 		goto out;
1959 	if ((region = regional_create()) == NULL)
1960 		goto out;
1961 	result->rcode = LDNS_RCODE_SERVFAIL;
1962 
1963 	sldns_buffer_clear(buf);
1964 	sldns_buffer_write(buf, answer_packet, answer_len);
1965 	sldns_buffer_flip(buf);
1966 	libworker_enter_result(result, buf, region, sec);
1967 	result->answer_packet = NULL;
1968 	result->answer_len = 0;
1969 
1970 	if (result->rcode != LDNS_RCODE_NOERROR)
1971 		goto out;
1972 
1973 	i = 0;
1974 	tas = 0;
1975 	while(result->data[i] != NULL) {
1976 		if (result->len[i] < 2) {
1977 			if (tas > 0)
1978 				resolver_imsg_compose_frontend(
1979 				    IMSG_NEW_TAS_ABORT, 0, NULL, 0);
1980 			goto out;
1981 		}
1982 		n = sldns_wire2str_rdata_buf(result->data[i], result->len[i],
1983 		    rdata_buf, sizeof(rdata_buf), LDNS_RR_TYPE_DNSKEY);
1984 
1985 		if (n < 0 || (size_t)n >= sizeof(rdata_buf)) {
1986 			log_warnx("trust anchor buffer to small");
1987 			resolver_imsg_compose_frontend(IMSG_NEW_TAS_ABORT, 0,
1988 			    NULL, 0);
1989 			goto out;
1990 		}
1991 
1992 		memcpy(&dnskey_flags, result->data[i], 2);
1993 		dnskey_flags = ntohs(dnskey_flags);
1994 		if ((dnskey_flags & LDNS_KEY_SEP_KEY) && !(dnskey_flags &
1995 		    LDNS_KEY_REVOKE_KEY)) {
1996 			asprintf(&ta, ".\t%d\tIN\tDNSKEY\t%s", ROOT_DNSKEY_TTL,
1997 			    rdata_buf);
1998 			resolver_imsg_compose_frontend(IMSG_NEW_TA, 0, ta,
1999 			    strlen(ta) + 1);
2000 			tas++;
2001 			free(ta);
2002 		}
2003 		i++;
2004 	}
2005 	if (tas > 0) {
2006 		resolver_imsg_compose_frontend(IMSG_NEW_TAS_DONE, 0, NULL, 0);
2007 		tv.tv_sec = TRUST_ANCHOR_QUERY_INTERVAL;
2008 	}
2009 out:
2010 	sldns_buffer_free(buf);
2011 	regional_destroy(region);
2012 	ub_resolve_free(result);
2013 	evtimer_add(&trust_anchor_timer, &tv);
2014 }
2015 
2016 void
replace_autoconf_forwarders(struct imsg_rdns_proposal * rdns_proposal)2017 replace_autoconf_forwarders(struct imsg_rdns_proposal *rdns_proposal)
2018 {
2019 	struct uw_forwarder_head	 new_forwarder_list;
2020 	struct uw_forwarder		*uw_forwarder, *tmp;
2021 	size_t				 addrsz;
2022 	int				 i, rdns_count, af, changed = 0;
2023 	char				 hostbuf[INET6_ADDRSTRLEN], *src;
2024 
2025 	TAILQ_INIT(&new_forwarder_list);
2026 	af = rdns_proposal->rtdns.sr_family;
2027 	src = rdns_proposal->rtdns.sr_dns;
2028 
2029 	switch (af) {
2030 	case AF_INET:
2031 		addrsz = sizeof(struct in_addr);
2032 		break;
2033 	case AF_INET6:
2034 		addrsz = sizeof(struct in6_addr);
2035 		break;
2036 	default:
2037 		log_warnx("%s: unsupported address family: %d", __func__, af);
2038 		return;
2039 	}
2040 
2041 	if ((rdns_proposal->rtdns.sr_len - 2) % addrsz != 0) {
2042 		log_warnx("ignoring invalid RTM_PROPOSAL");
2043 		return;
2044 	}
2045 	rdns_count = (rdns_proposal->rtdns.sr_len -
2046 	    offsetof(struct sockaddr_rtdns, sr_dns)) / addrsz;
2047 
2048 	for (i = 0; i < rdns_count; i++) {
2049 		struct sockaddr_storage ss;
2050 		struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ss;
2051 		struct sockaddr_in *sin = (struct sockaddr_in *)&ss;
2052 		int err;
2053 
2054 		memset(&ss, 0, sizeof(ss));
2055 		ss.ss_family = af;
2056 		switch (af) {
2057 		case AF_INET:
2058 			memcpy(&sin->sin_addr, src, addrsz);
2059 			if (sin->sin_addr.s_addr == htonl(INADDR_LOOPBACK))
2060 				goto skip;
2061 			ss.ss_len = sizeof(*sin);
2062 			break;
2063 		case AF_INET6:
2064 			memcpy(&sin6->sin6_addr, src, addrsz);
2065 			if (IN6_IS_ADDR_LOOPBACK(&sin6->sin6_addr))
2066 				goto skip;
2067 			if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))
2068 				sin6->sin6_scope_id = rdns_proposal->if_index;
2069 			ss.ss_len = sizeof(*sin6);
2070 			break;
2071 		}
2072 		if ((err = getnameinfo((struct sockaddr *)&ss, ss.ss_len,
2073 		    hostbuf, sizeof(hostbuf), NULL, 0, NI_NUMERICHOST)) != 0) {
2074 			log_warnx("getnameinfo: %s", gai_strerror(err));
2075 			goto skip;
2076 		}
2077 
2078 		if ((uw_forwarder = calloc(1, sizeof(struct uw_forwarder))) ==
2079 		    NULL)
2080 			fatal(NULL);
2081 		if (strlcpy(uw_forwarder->ip, hostbuf, sizeof(uw_forwarder->ip))
2082 		    >= sizeof(uw_forwarder->ip))
2083 			fatalx("strlcpy");
2084 		uw_forwarder->port = 53;
2085 		uw_forwarder->if_index = rdns_proposal->if_index;
2086 		uw_forwarder->src = rdns_proposal->src;
2087 		TAILQ_INSERT_TAIL(&new_forwarder_list, uw_forwarder, entry);
2088 
2089 skip:
2090 		src += addrsz;
2091 	}
2092 
2093 	TAILQ_FOREACH(tmp, &autoconf_forwarder_list, entry) {
2094 		/*
2095 		 * if_index of zero signals to clear all proposals
2096 		 * src of zero signals interface gone
2097 		 */
2098 		if ((rdns_proposal->src == 0 || rdns_proposal->src ==
2099 		    tmp->src) && (rdns_proposal->if_index == 0 ||
2100 		    rdns_proposal->if_index == tmp->if_index))
2101 			continue;
2102 		if ((uw_forwarder = calloc(1, sizeof(struct uw_forwarder))) ==
2103 		    NULL)
2104 			fatal(NULL);
2105 		if (strlcpy(uw_forwarder->ip, tmp->ip,
2106 		    sizeof(uw_forwarder->ip)) >= sizeof(uw_forwarder->ip))
2107 			fatalx("strlcpy");
2108 		uw_forwarder->port = tmp->port;
2109 		uw_forwarder->src = tmp->src;
2110 		uw_forwarder->if_index = tmp->if_index;
2111 		TAILQ_INSERT_TAIL(&new_forwarder_list, uw_forwarder, entry);
2112 	}
2113 
2114 	changed = check_forwarders_changed(&new_forwarder_list,
2115 	    &autoconf_forwarder_list);
2116 
2117 	if (changed) {
2118 		replace_forwarders(&new_forwarder_list,
2119 		    &autoconf_forwarder_list);
2120 		new_resolver(UW_RES_ASR, UNKNOWN);
2121 		new_resolver(UW_RES_AUTOCONF, UNKNOWN);
2122 		new_resolver(UW_RES_ODOT_AUTOCONF, UNKNOWN);
2123 	} else {
2124 		while ((tmp = TAILQ_FIRST(&new_forwarder_list)) != NULL) {
2125 			TAILQ_REMOVE(&new_forwarder_list, tmp, entry);
2126 			free(tmp);
2127 		}
2128 	}
2129 }
2130 
2131 int
force_tree_cmp(struct force_tree_entry * a,struct force_tree_entry * b)2132 force_tree_cmp(struct force_tree_entry *a, struct force_tree_entry *b)
2133 {
2134 	return strcasecmp(a->domain, b->domain);
2135 }
2136 
2137 int
find_force(struct force_tree * tree,char * qname,struct uw_resolver ** res)2138 find_force(struct force_tree *tree, char *qname, struct uw_resolver **res)
2139 {
2140 	struct force_tree_entry	*n, e;
2141 	char 			*p;
2142 
2143 	if (res)
2144 		*res = NULL;
2145 	if (RB_EMPTY(tree))
2146 		return 0;
2147 
2148 	p = qname;
2149 	do {
2150 		if (strlcpy(e.domain, p, sizeof(e.domain)) >= sizeof(e.domain))
2151 			fatal("qname too large");
2152 		n = RB_FIND(force_tree, tree, &e);
2153 		if (n != NULL) {
2154 			log_debug("%s: %s -> %s[%s]", __func__, qname, p,
2155 			    uw_resolver_type_str[n->type]);
2156 			if (res)
2157 				*res = resolvers[n->type];
2158 			return n->acceptbogus;
2159 		}
2160 		if (*p == '.')
2161 			p++;
2162 		p = strchr(p, '.');
2163 		if (p != NULL && p[1] != '\0')
2164 			p++;
2165 	} while (p != NULL);
2166 	return 0;
2167 
2168 }
2169 
2170 int64_t
histogram_median(int64_t * histogram)2171 histogram_median(int64_t *histogram)
2172 {
2173 	size_t	 i;
2174 	int64_t	 sample_count = 0, running_count = 0;
2175 
2176 	/* skip first bucket, it contains cache hits */
2177 	for (i = 1; i < nitems(histogram_limits); i++)
2178 		sample_count += histogram[i];
2179 
2180 	if (sample_count == 0)
2181 		return 0;
2182 
2183 	for (i = 1; i < nitems(histogram_limits); i++) {
2184 		running_count += histogram[i];
2185 		if (running_count >= sample_count / 2)
2186 			break;
2187 	}
2188 
2189 	if (i >= nitems(histogram_limits) - 1)
2190 		return INT64_MAX;
2191 	return (histogram_limits[i - 1] + histogram_limits[i]) / 2;
2192 }
2193 
2194 void
decay_latest_histograms(int fd,short events,void * arg)2195 decay_latest_histograms(int fd, short events, void *arg)
2196 {
2197 	enum uw_resolver_type	 i;
2198 	size_t			 j;
2199 	struct uw_resolver	*res;
2200 	struct timeval		 tv = {DECAY_PERIOD, 0};
2201 
2202 	for (i = 0; i < UW_RES_NONE; i++) {
2203 		res = resolvers[i];
2204 		if (res == NULL)
2205 			continue;
2206 		for (j = 0; j < nitems(res->latest_histogram); j++)
2207 			/* multiply then divide, avoiding truncating to 0 */
2208 			res->latest_histogram[j] = res->latest_histogram[j] *
2209 			    DECAY_NOMINATOR / DECAY_DENOMINATOR;
2210 		res->median = histogram_median(res->latest_histogram);
2211 	}
2212 	evtimer_add(&decay_timer, &tv);
2213 }
2214 
2215 int
running_query_cnt(void)2216 running_query_cnt(void)
2217 {
2218 	struct running_query	*e;
2219 	int			 cnt = 0;
2220 
2221 	TAILQ_FOREACH(e, &running_queries, entry)
2222 		cnt++;
2223 	return cnt;
2224 }
2225 
2226 int *
resolvers_to_restart(struct uw_conf * oconf,struct uw_conf * nconf)2227 resolvers_to_restart(struct uw_conf *oconf, struct uw_conf *nconf)
2228 {
2229 	static int	 restart[UW_RES_NONE];
2230 	int		 i;
2231 
2232 	memset(&restart, 0, sizeof(restart));
2233 	if (check_forwarders_changed(&oconf->uw_forwarder_list,
2234 	    &nconf->uw_forwarder_list)) {
2235 		restart[UW_RES_FORWARDER] = 1;
2236 		restart[UW_RES_ODOT_FORWARDER] = 1;
2237 	}
2238 	if (check_forwarders_changed(&oconf->uw_dot_forwarder_list,
2239 	    &nconf->uw_dot_forwarder_list)) {
2240 		restart[UW_RES_DOT] = 1;
2241 	}
2242 
2243 	for (i = 0; i < UW_RES_NONE; i++) {
2244 		if (oconf->enabled_resolvers[i] != nconf->enabled_resolvers[i])
2245 			restart[i] = 1;
2246 	}
2247 	return restart;
2248 }
2249 
2250 const char *
query_imsg2str(struct query_imsg * query_imsg)2251 query_imsg2str(struct query_imsg *query_imsg)
2252 {
2253 	static char	 buf[sizeof(query_imsg->qname) + 1 + 16 + 1 + 16];
2254 	char		 qclass_buf[16];
2255 	char		 qtype_buf[16];
2256 
2257 	sldns_wire2str_class_buf(query_imsg->c, qclass_buf, sizeof(qclass_buf));
2258 	sldns_wire2str_type_buf(query_imsg->t, qtype_buf, sizeof(qtype_buf));
2259 
2260 	snprintf(buf, sizeof(buf), "%s %s %s", query_imsg->qname, qclass_buf,
2261 	    qtype_buf);
2262 	return buf;
2263 }
2264 
2265 char *
gen_resolv_conf(void)2266 gen_resolv_conf(void)
2267 {
2268 	struct uw_forwarder	*uw_forwarder;
2269 	char			*resolv_conf = NULL, *tmp = NULL;
2270 
2271 	TAILQ_FOREACH(uw_forwarder, &autoconf_forwarder_list, entry) {
2272 		tmp = resolv_conf;
2273 		if (asprintf(&resolv_conf, "%snameserver %s\n", tmp ==
2274 		    NULL ? "" : tmp, uw_forwarder->ip) == -1) {
2275 			free(tmp);
2276 			return (NULL);
2277 		}
2278 		free(tmp);
2279 	}
2280 	return resolv_conf;
2281 }
2282 
2283 void
check_dns64(void)2284 check_dns64(void)
2285 {
2286 	struct asr_query	*aq = NULL;
2287 	char			*resolv_conf;
2288 	void			*asr_ctx;
2289 
2290 	if (TAILQ_EMPTY(&autoconf_forwarder_list))
2291 		return;
2292 
2293 	if ((resolv_conf = gen_resolv_conf()) == NULL) {
2294 		log_warnx("could not create asr context");
2295 		return;
2296 	}
2297 
2298 	if ((asr_ctx = asr_resolver_from_string(resolv_conf)) != NULL) {
2299 		if ((aq = res_query_async("ipv4only.arpa.", LDNS_RR_CLASS_IN,
2300 		    LDNS_RR_TYPE_AAAA, asr_ctx)) == NULL) {
2301 			log_warn("%s: res_query_async", __func__);
2302 			asr_resolver_free(asr_ctx);
2303 		}
2304 		if (event_asr_run(aq, check_dns64_done, asr_ctx) == NULL) {
2305 			log_warn("%s: event_asr_run", __func__);
2306 			free(aq);
2307 			asr_resolver_free(asr_ctx);
2308 		}
2309 	} else
2310 		log_warnx("%s: could not create asr context", __func__);
2311 
2312 	free(resolv_conf);
2313 }
2314 
2315 void
check_dns64_done(struct asr_result * ar,void * arg)2316 check_dns64_done(struct asr_result *ar, void *arg)
2317 {
2318 	/* RFC 7050: ipv4only.arpa resolves to 192.0.0.170 and 192.9.0.171 */
2319 	const uint8_t			 wka1[] = {192, 0, 0, 170};
2320 	const uint8_t			 wka2[] = {192, 0, 0, 171};
2321 	struct query_info		 skip, qinfo;
2322 	struct reply_info		*rinfo = NULL;
2323 	struct regional			*region = NULL;
2324 	struct sldns_buffer		*buf = NULL;
2325 	struct ub_packed_rrset_key	*an_rrset = NULL;
2326 	struct packed_rrset_data	*an_rrset_data;
2327 	struct alloc_cache		 alloc;
2328 	struct edns_data		 edns;
2329 	struct dns64_prefix		*prefixes = NULL;
2330 	size_t				 i;
2331 	int				 preflen, count = 0;
2332 	void				*asr_ctx = arg;
2333 
2334 	if (ar->ar_errno != 0)
2335 		goto fail;
2336 
2337 	memset(&qinfo, 0, sizeof(qinfo));
2338 	alloc_init(&alloc, NULL, 0);
2339 
2340 	if (ar->ar_datalen < LDNS_HEADER_SIZE) {
2341 		log_warnx("%s: bad packet: too short: %d", __func__,
2342 		    ar->ar_datalen);
2343 		goto out;
2344 	}
2345 
2346 	if (ar->ar_datalen > UINT16_MAX) {
2347 		log_warnx("%s: bad packet: too large: %d", __func__,
2348 		    ar->ar_datalen);
2349 		goto out;
2350 	}
2351 
2352 	if (ar->ar_rcode == LDNS_RCODE_NXDOMAIN) {
2353 		/* XXX this means that the autoconf resolver is broken */
2354 		log_debug("%s: NXDOMAIN", __func__);
2355 		goto out;
2356 	}
2357 
2358 	if ((buf = sldns_buffer_new(ar->ar_datalen)) == NULL)
2359 		goto out;
2360 
2361 	if ((region = regional_create()) == NULL)
2362 		goto out;
2363 
2364 	sldns_buffer_write(buf, ar->ar_data, ar->ar_datalen);
2365 	sldns_buffer_flip(buf);
2366 
2367 	/* read past query section, no memory is allocated */
2368 	if (!query_info_parse(&skip, buf))
2369 		goto out;
2370 
2371 	if (reply_info_parse(buf, &alloc, &qinfo, &rinfo, region, &edns) != 0)
2372 		goto out;
2373 
2374 	if ((an_rrset = reply_find_answer_rrset(&qinfo, rinfo)) == NULL)
2375 		goto out;
2376 
2377 	an_rrset_data = (struct packed_rrset_data*)an_rrset->entry.data;
2378 
2379 	prefixes = calloc(an_rrset_data->count, sizeof(struct dns64_prefix));
2380 	if (prefixes == NULL)
2381 		goto out;
2382 
2383 	for (i = 0; i < an_rrset_data->count; i++) {
2384 		struct in6_addr	 in6;
2385 
2386 		/* check for AAAA record */
2387 		if (an_rrset_data->rr_len[i] != 18) /* 2 + 128/8 */
2388 			continue;
2389 		if (an_rrset_data->rr_data[i][0] != 0 &&
2390 		    an_rrset_data->rr_data[i][1] != 16)
2391 			continue;
2392 
2393 		memcpy(&in6, &an_rrset_data->rr_data[i][2],
2394 		    sizeof(in6));
2395 		if ((preflen = dns64_prefixlen(&in6, wka1)) != -1)
2396 			add_dns64_prefix(&in6, preflen, prefixes,
2397 			    an_rrset_data->count, WKA1_FOUND);
2398 		if ((preflen = dns64_prefixlen(&in6, wka2)) != -1)
2399 			add_dns64_prefix(&in6, preflen, prefixes,
2400 			    an_rrset_data->count, WKA2_FOUND);
2401 	}
2402 
2403 	for (i = 0; i < an_rrset_data->count && prefixes[i].flags != 0; i++)
2404 		if ((prefixes[i].flags & (WKA1_FOUND | WKA2_FOUND)) ==
2405 		    (WKA1_FOUND | WKA2_FOUND))
2406 			count++;
2407 
2408 	dns64_present = count > 0;
2409 
2410 	if (dns64_present) {
2411 		/* downgrade SLAAC resolvers, DNS64 breaks DNSSEC */
2412 		if (resolvers[UW_RES_AUTOCONF] != NULL &&
2413 		    resolvers[UW_RES_AUTOCONF]->state == VALIDATING)
2414 			new_resolver(UW_RES_AUTOCONF, RESOLVING);
2415 		if (resolvers[UW_RES_ODOT_AUTOCONF] != NULL &&
2416 		    resolvers[UW_RES_ODOT_AUTOCONF]->state == VALIDATING)
2417 			new_resolver(UW_RES_ODOT_AUTOCONF, RESOLVING);
2418 	}
2419 
2420 	resolver_imsg_compose_frontend(IMSG_NEW_DNS64_PREFIXES_START, 0,
2421 	    &count, sizeof(count));
2422 	for (i = 0; i < an_rrset_data->count && prefixes[i].flags != 0; i++) {
2423 		if ((prefixes[i].flags & (WKA1_FOUND | WKA2_FOUND)) ==
2424 		    (WKA1_FOUND | WKA2_FOUND)) {
2425 			resolver_imsg_compose_frontend(IMSG_NEW_DNS64_PREFIX,
2426 			    0, &prefixes[i], sizeof(struct dns64_prefix));
2427 		}
2428 	}
2429 	resolver_imsg_compose_frontend(IMSG_NEW_DNS64_PREFIXES_DONE, 0, NULL,
2430 	    0);
2431  out:
2432 	free(prefixes);
2433 	query_info_clear(&qinfo);
2434 	reply_info_parsedelete(rinfo, &alloc);
2435 	alloc_clear(&alloc);
2436 	regional_destroy(region);
2437 	sldns_buffer_free(buf);
2438  fail:
2439 	free(ar->ar_data);
2440 	asr_resolver_free(asr_ctx);
2441 }
2442 
2443 int
dns64_prefixlen(const struct in6_addr * in6,const uint8_t * wka)2444 dns64_prefixlen(const struct in6_addr *in6, const uint8_t *wka)
2445 {
2446 	/* RFC 6052, 2.2 */
2447 	static const int	 possible_prefixes[] = {32, 40, 48, 56, 64, 96};
2448 	size_t			 i, j;
2449 	int			 found, pos;
2450 
2451 	for (i = 0; i < nitems(possible_prefixes); i++) {
2452 		pos = possible_prefixes[i] / 8;
2453 		found = 1;
2454 		for (j = 0; j < 4 && found; j++, pos++) {
2455 			if (pos == 8) {
2456 				if (in6->s6_addr[pos] != 0)
2457 					found = 0;
2458 				pos++;
2459 			}
2460 			if (in6->s6_addr[pos] != wka[j])
2461 				found = 0;
2462 		}
2463 		if (found)
2464 			return possible_prefixes[i];
2465 	}
2466 	return -1;
2467 }
2468 
2469 void
add_dns64_prefix(const struct in6_addr * in6,int prefixlen,struct dns64_prefix * prefixes,int prefixes_size,int flag)2470 add_dns64_prefix(const struct in6_addr *in6, int prefixlen,
2471     struct dns64_prefix *prefixes, int prefixes_size, int flag)
2472 {
2473 	struct in6_addr	 tmp;
2474 	int		 i;
2475 
2476 	tmp = *in6;
2477 
2478 	for(i = prefixlen / 8; i < 16; i++)
2479 		tmp.s6_addr[i] = 0;
2480 
2481 	for (i = 0; i < prefixes_size; i++) {
2482 		if (prefixes[i].flags == 0) {
2483 			prefixes[i].in6 = tmp;
2484 			prefixes[i].prefixlen = prefixlen;
2485 			prefixes[i].flags |= flag;
2486 			break;
2487 		} else if (prefixes[i].prefixlen == prefixlen &&
2488 		    memcmp(&prefixes[i].in6, &tmp, sizeof(tmp)) == 0) {
2489 			prefixes[i].flags |= flag;
2490 			break;
2491 		}
2492 	}
2493 }
2494