1 /*
2  * services/outside_network.c - implement sending of queries and wait answer.
3  *
4  * Copyright (c) 2007, NLnet Labs. All rights reserved.
5  *
6  * This software is open source.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * Redistributions of source code must retain the above copyright notice,
13  * this list of conditions and the following disclaimer.
14  *
15  * Redistributions in binary form must reproduce the above copyright notice,
16  * this list of conditions and the following disclaimer in the documentation
17  * and/or other materials provided with the distribution.
18  *
19  * Neither the name of the NLNET LABS nor the names of its contributors may
20  * be used to endorse or promote products derived from this software without
21  * specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27  * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 /**
37  * \file
38  *
39  * This file has functions to send queries to authoritative servers and
40  * wait for the pending answer events.
41  */
42 #include "config.h"
43 #include <ctype.h>
44 #ifdef HAVE_SYS_TYPES_H
45 #  include <sys/types.h>
46 #endif
47 #include <sys/time.h>
48 #include "services/outside_network.h"
49 #include "services/listen_dnsport.h"
50 #include "services/cache/infra.h"
51 #include "iterator/iterator.h"
52 #include "util/data/msgparse.h"
53 #include "util/data/msgreply.h"
54 #include "util/data/msgencode.h"
55 #include "util/data/dname.h"
56 #include "util/netevent.h"
57 #include "util/log.h"
58 #include "util/net_help.h"
59 #include "util/random.h"
60 #include "util/fptr_wlist.h"
61 #include "util/edns.h"
62 #include "sldns/sbuffer.h"
63 #include "dnstap/dnstap.h"
64 #ifdef HAVE_OPENSSL_SSL_H
65 #include <openssl/ssl.h>
66 #endif
67 #ifdef HAVE_X509_VERIFY_PARAM_SET1_HOST
68 #include <openssl/x509v3.h>
69 #endif
70 
71 #ifdef HAVE_NETDB_H
72 #include <netdb.h>
73 #endif
74 #include <fcntl.h>
75 
76 /** number of times to retry making a random ID that is unique. */
77 #define MAX_ID_RETRY 1000
78 /** number of times to retry finding interface, port that can be opened. */
79 #define MAX_PORT_RETRY 10000
80 /** number of retries on outgoing UDP queries */
81 #define OUTBOUND_UDP_RETRY 1
82 
83 /** initiate TCP transaction for serviced query */
84 static void serviced_tcp_initiate(struct serviced_query* sq, sldns_buffer* buff);
85 /** with a fd available, randomize and send UDP */
86 static int randomize_and_send_udp(struct pending* pend, sldns_buffer* packet,
87 	int timeout);
88 
89 /** remove waiting tcp from the outnet waiting list */
90 static void waiting_list_remove(struct outside_network* outnet,
91 	struct waiting_tcp* w);
92 
93 /** select a DNS ID for a TCP stream */
94 static uint16_t tcp_select_id(struct outside_network* outnet,
95 	struct reuse_tcp* reuse);
96 
97 int
pending_cmp(const void * key1,const void * key2)98 pending_cmp(const void* key1, const void* key2)
99 {
100 	struct pending *p1 = (struct pending*)key1;
101 	struct pending *p2 = (struct pending*)key2;
102 	if(p1->id < p2->id)
103 		return -1;
104 	if(p1->id > p2->id)
105 		return 1;
106 	log_assert(p1->id == p2->id);
107 	return sockaddr_cmp(&p1->addr, p1->addrlen, &p2->addr, p2->addrlen);
108 }
109 
110 int
serviced_cmp(const void * key1,const void * key2)111 serviced_cmp(const void* key1, const void* key2)
112 {
113 	struct serviced_query* q1 = (struct serviced_query*)key1;
114 	struct serviced_query* q2 = (struct serviced_query*)key2;
115 	int r;
116 	if(q1->qbuflen < q2->qbuflen)
117 		return -1;
118 	if(q1->qbuflen > q2->qbuflen)
119 		return 1;
120 	log_assert(q1->qbuflen == q2->qbuflen);
121 	log_assert(q1->qbuflen >= 15 /* 10 header, root, type, class */);
122 	/* alternate casing of qname is still the same query */
123 	if((r = memcmp(q1->qbuf, q2->qbuf, 10)) != 0)
124 		return r;
125 	if((r = memcmp(q1->qbuf+q1->qbuflen-4, q2->qbuf+q2->qbuflen-4, 4)) != 0)
126 		return r;
127 	if(q1->dnssec != q2->dnssec) {
128 		if(q1->dnssec < q2->dnssec)
129 			return -1;
130 		return 1;
131 	}
132 	if((r = query_dname_compare(q1->qbuf+10, q2->qbuf+10)) != 0)
133 		return r;
134 	if((r = edns_opt_list_compare(q1->opt_list, q2->opt_list)) != 0)
135 		return r;
136 	return sockaddr_cmp(&q1->addr, q1->addrlen, &q2->addr, q2->addrlen);
137 }
138 
139 /** compare if the reuse element has the same address, port and same ssl-is
140  * used-for-it characteristic */
141 static int
reuse_cmp_addrportssl(const void * key1,const void * key2)142 reuse_cmp_addrportssl(const void* key1, const void* key2)
143 {
144 	struct reuse_tcp* r1 = (struct reuse_tcp*)key1;
145 	struct reuse_tcp* r2 = (struct reuse_tcp*)key2;
146 	int r;
147 	/* compare address and port */
148 	r = sockaddr_cmp(&r1->addr, r1->addrlen, &r2->addr, r2->addrlen);
149 	if(r != 0)
150 		return r;
151 
152 	/* compare if SSL-enabled */
153 	if(r1->is_ssl && !r2->is_ssl)
154 		return 1;
155 	if(!r1->is_ssl && r2->is_ssl)
156 		return -1;
157 	return 0;
158 }
159 
160 int
reuse_cmp(const void * key1,const void * key2)161 reuse_cmp(const void* key1, const void* key2)
162 {
163 	int r;
164 	r = reuse_cmp_addrportssl(key1, key2);
165 	if(r != 0)
166 		return r;
167 
168 	/* compare ptr value */
169 	if(key1 < key2) return -1;
170 	if(key1 > key2) return 1;
171 	return 0;
172 }
173 
reuse_id_cmp(const void * key1,const void * key2)174 int reuse_id_cmp(const void* key1, const void* key2)
175 {
176 	struct waiting_tcp* w1 = (struct waiting_tcp*)key1;
177 	struct waiting_tcp* w2 = (struct waiting_tcp*)key2;
178 	if(w1->id < w2->id)
179 		return -1;
180 	if(w1->id > w2->id)
181 		return 1;
182 	return 0;
183 }
184 
185 /** delete waiting_tcp entry. Does not unlink from waiting list.
186  * @param w: to delete.
187  */
188 static void
waiting_tcp_delete(struct waiting_tcp * w)189 waiting_tcp_delete(struct waiting_tcp* w)
190 {
191 	if(!w) return;
192 	if(w->timer)
193 		comm_timer_delete(w->timer);
194 	free(w);
195 }
196 
197 /**
198  * Pick random outgoing-interface of that family, and bind it.
199  * port set to 0 so OS picks a port number for us.
200  * if it is the ANY address, do not bind.
201  * @param pend: pending tcp structure, for storing the local address choice.
202  * @param w: tcp structure with destination address.
203  * @param s: socket fd.
204  * @return false on error, socket closed.
205  */
206 static int
pick_outgoing_tcp(struct pending_tcp * pend,struct waiting_tcp * w,int s)207 pick_outgoing_tcp(struct pending_tcp* pend, struct waiting_tcp* w, int s)
208 {
209 	struct port_if* pi = NULL;
210 	int num;
211 	pend->pi = NULL;
212 #ifdef INET6
213 	if(addr_is_ip6(&w->addr, w->addrlen))
214 		num = w->outnet->num_ip6;
215 	else
216 #endif
217 		num = w->outnet->num_ip4;
218 	if(num == 0) {
219 		log_err("no TCP outgoing interfaces of family");
220 		log_addr(VERB_OPS, "for addr", &w->addr, w->addrlen);
221 		sock_close(s);
222 		return 0;
223 	}
224 #ifdef INET6
225 	if(addr_is_ip6(&w->addr, w->addrlen))
226 		pi = &w->outnet->ip6_ifs[ub_random_max(w->outnet->rnd, num)];
227 	else
228 #endif
229 		pi = &w->outnet->ip4_ifs[ub_random_max(w->outnet->rnd, num)];
230 	log_assert(pi);
231 	pend->pi = pi;
232 	if(addr_is_any(&pi->addr, pi->addrlen)) {
233 		/* binding to the ANY interface is for listening sockets */
234 		return 1;
235 	}
236 	/* set port to 0 */
237 	if(addr_is_ip6(&pi->addr, pi->addrlen))
238 		((struct sockaddr_in6*)&pi->addr)->sin6_port = 0;
239 	else	((struct sockaddr_in*)&pi->addr)->sin_port = 0;
240 	if(bind(s, (struct sockaddr*)&pi->addr, pi->addrlen) != 0) {
241 #ifndef USE_WINSOCK
242 #ifdef EADDRNOTAVAIL
243 		if(!(verbosity < 4 && errno == EADDRNOTAVAIL))
244 #endif
245 #else /* USE_WINSOCK */
246 		if(!(verbosity < 4 && WSAGetLastError() == WSAEADDRNOTAVAIL))
247 #endif
248 		    log_err("outgoing tcp: bind: %s", sock_strerror(errno));
249 		sock_close(s);
250 		return 0;
251 	}
252 	log_addr(VERB_ALGO, "tcp bound to src", &pi->addr, pi->addrlen);
253 	return 1;
254 }
255 
256 /** get TCP file descriptor for address, returns -1 on failure,
257  * tcp_mss is 0 or maxseg size to set for TCP packets. */
258 int
outnet_get_tcp_fd(struct sockaddr_storage * addr,socklen_t addrlen,int tcp_mss,int dscp)259 outnet_get_tcp_fd(struct sockaddr_storage* addr, socklen_t addrlen, int tcp_mss, int dscp)
260 {
261 	int s;
262 	int af;
263 	char* err;
264 #ifdef SO_REUSEADDR
265 	int on = 1;
266 #endif
267 #ifdef INET6
268 	if(addr_is_ip6(addr, addrlen)){
269 		s = socket(PF_INET6, SOCK_STREAM, IPPROTO_TCP);
270 		af = AF_INET6;
271 	} else {
272 #else
273 	{
274 #endif
275 		af = AF_INET;
276 		s = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP);
277 	}
278 	if(s == -1) {
279 		log_err_addr("outgoing tcp: socket", sock_strerror(errno),
280 			addr, addrlen);
281 		return -1;
282 	}
283 
284 #ifdef SO_REUSEADDR
285 	if(setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (void*)&on,
286 		(socklen_t)sizeof(on)) < 0) {
287 		verbose(VERB_ALGO, "outgoing tcp:"
288 			" setsockopt(.. SO_REUSEADDR ..) failed");
289 	}
290 #endif
291 
292 	err = set_ip_dscp(s, af, dscp);
293 	if(err != NULL) {
294 		verbose(VERB_ALGO, "outgoing tcp:"
295 			"error setting IP DiffServ codepoint on socket");
296 	}
297 
298 	if(tcp_mss > 0) {
299 #if defined(IPPROTO_TCP) && defined(TCP_MAXSEG)
300 		if(setsockopt(s, IPPROTO_TCP, TCP_MAXSEG,
301 			(void*)&tcp_mss, (socklen_t)sizeof(tcp_mss)) < 0) {
302 			verbose(VERB_ALGO, "outgoing tcp:"
303 				" setsockopt(.. TCP_MAXSEG ..) failed");
304 		}
305 #else
306 		verbose(VERB_ALGO, "outgoing tcp:"
307 			" setsockopt(TCP_MAXSEG) unsupported");
308 #endif /* defined(IPPROTO_TCP) && defined(TCP_MAXSEG) */
309 	}
310 
311 	return s;
312 }
313 
314 /** connect tcp connection to addr, 0 on failure */
315 int
316 outnet_tcp_connect(int s, struct sockaddr_storage* addr, socklen_t addrlen)
317 {
318 	if(connect(s, (struct sockaddr*)addr, addrlen) == -1) {
319 #ifndef USE_WINSOCK
320 #ifdef EINPROGRESS
321 		if(errno != EINPROGRESS) {
322 #endif
323 			if(tcp_connect_errno_needs_log(
324 				(struct sockaddr*)addr, addrlen))
325 				log_err_addr("outgoing tcp: connect",
326 					strerror(errno), addr, addrlen);
327 			close(s);
328 			return 0;
329 #ifdef EINPROGRESS
330 		}
331 #endif
332 #else /* USE_WINSOCK */
333 		if(WSAGetLastError() != WSAEINPROGRESS &&
334 			WSAGetLastError() != WSAEWOULDBLOCK) {
335 			closesocket(s);
336 			return 0;
337 		}
338 #endif
339 	}
340 	return 1;
341 }
342 
343 /** log reuse item addr and ptr with message */
344 static void
345 log_reuse_tcp(enum verbosity_value v, const char* msg, struct reuse_tcp* reuse)
346 {
347 	uint16_t port;
348 	char addrbuf[128];
349 	if(verbosity < v) return;
350 	if(!reuse || !reuse->pending || !reuse->pending->c)
351 		return;
352 	addr_to_str(&reuse->addr, reuse->addrlen, addrbuf, sizeof(addrbuf));
353 	port = ntohs(((struct sockaddr_in*)&reuse->addr)->sin_port);
354 	verbose(v, "%s %s#%u fd %d", msg, addrbuf, (unsigned)port,
355 		reuse->pending->c->fd);
356 }
357 
358 /** pop the first element from the writewait list */
359 static struct waiting_tcp* reuse_write_wait_pop(struct reuse_tcp* reuse)
360 {
361 	struct waiting_tcp* w = reuse->write_wait_first;
362 	if(!w)
363 		return NULL;
364 	log_assert(w->write_wait_queued);
365 	log_assert(!w->write_wait_prev);
366 	reuse->write_wait_first = w->write_wait_next;
367 	if(w->write_wait_next)
368 		w->write_wait_next->write_wait_prev = NULL;
369 	else	reuse->write_wait_last = NULL;
370 	w->write_wait_queued = 0;
371 	w->write_wait_next = NULL;
372 	w->write_wait_prev = NULL;
373 	return w;
374 }
375 
376 /** remove the element from the writewait list */
377 static void reuse_write_wait_remove(struct reuse_tcp* reuse,
378 	struct waiting_tcp* w)
379 {
380 	log_assert(w);
381 	log_assert(w->write_wait_queued);
382 	if(!w)
383 		return;
384 	if(!w->write_wait_queued)
385 		return;
386 	if(w->write_wait_prev)
387 		w->write_wait_prev->write_wait_next = w->write_wait_next;
388 	else	reuse->write_wait_first = w->write_wait_next;
389 	log_assert(!w->write_wait_prev ||
390 		w->write_wait_prev->write_wait_next != w->write_wait_prev);
391 	if(w->write_wait_next)
392 		w->write_wait_next->write_wait_prev = w->write_wait_prev;
393 	else	reuse->write_wait_last = w->write_wait_prev;
394 	log_assert(!w->write_wait_next
395 		|| w->write_wait_next->write_wait_prev != w->write_wait_next);
396 	w->write_wait_queued = 0;
397 	w->write_wait_next = NULL;
398 	w->write_wait_prev = NULL;
399 }
400 
401 /** push the element after the last on the writewait list */
402 static void reuse_write_wait_push_back(struct reuse_tcp* reuse,
403 	struct waiting_tcp* w)
404 {
405 	if(!w) return;
406 	log_assert(!w->write_wait_queued);
407 	if(reuse->write_wait_last) {
408 		reuse->write_wait_last->write_wait_next = w;
409 		log_assert(reuse->write_wait_last->write_wait_next !=
410 			reuse->write_wait_last);
411 		w->write_wait_prev = reuse->write_wait_last;
412 	} else {
413 		reuse->write_wait_first = w;
414 	}
415 	reuse->write_wait_last = w;
416 	w->write_wait_queued = 1;
417 }
418 
419 /** insert element in tree by id */
420 void
421 reuse_tree_by_id_insert(struct reuse_tcp* reuse, struct waiting_tcp* w)
422 {
423 #ifdef UNBOUND_DEBUG
424 	rbnode_type* added;
425 #endif
426 	log_assert(w->id_node.key == NULL);
427 	w->id_node.key = w;
428 #ifdef UNBOUND_DEBUG
429 	added =
430 #else
431 	(void)
432 #endif
433 	rbtree_insert(&reuse->tree_by_id, &w->id_node);
434 	log_assert(added);  /* should have been added */
435 }
436 
437 /** find element in tree by id */
438 struct waiting_tcp*
439 reuse_tcp_by_id_find(struct reuse_tcp* reuse, uint16_t id)
440 {
441 	struct waiting_tcp key_w;
442 	rbnode_type* n;
443 	memset(&key_w, 0, sizeof(key_w));
444 	key_w.id_node.key = &key_w;
445 	key_w.id = id;
446 	n = rbtree_search(&reuse->tree_by_id, &key_w);
447 	if(!n) return NULL;
448 	return (struct waiting_tcp*)n->key;
449 }
450 
451 /** return ID value of rbnode in tree_by_id */
452 static uint16_t
453 tree_by_id_get_id(rbnode_type* node)
454 {
455 	struct waiting_tcp* w = (struct waiting_tcp*)node->key;
456 	return w->id;
457 }
458 
459 /** insert into reuse tcp tree and LRU, false on failure (duplicate) */
460 int
461 reuse_tcp_insert(struct outside_network* outnet, struct pending_tcp* pend_tcp)
462 {
463 	log_reuse_tcp(VERB_CLIENT, "reuse_tcp_insert", &pend_tcp->reuse);
464 	if(pend_tcp->reuse.item_on_lru_list) {
465 		if(!pend_tcp->reuse.node.key)
466 			log_err("internal error: reuse_tcp_insert: "
467 				"in lru list without key");
468 		return 1;
469 	}
470 	pend_tcp->reuse.node.key = &pend_tcp->reuse;
471 	pend_tcp->reuse.pending = pend_tcp;
472 	if(!rbtree_insert(&outnet->tcp_reuse, &pend_tcp->reuse.node)) {
473 		/* We are not in the LRU list but we are already in the
474 		 * tcp_reuse tree, strange.
475 		 * Continue to add ourselves to the LRU list. */
476 		log_err("internal error: reuse_tcp_insert: in lru list but "
477 			"not in the tree");
478 	}
479 	/* insert into LRU, first is newest */
480 	pend_tcp->reuse.lru_prev = NULL;
481 	if(outnet->tcp_reuse_first) {
482 		pend_tcp->reuse.lru_next = outnet->tcp_reuse_first;
483 		log_assert(pend_tcp->reuse.lru_next != &pend_tcp->reuse);
484 		outnet->tcp_reuse_first->lru_prev = &pend_tcp->reuse;
485 		log_assert(outnet->tcp_reuse_first->lru_prev !=
486 			outnet->tcp_reuse_first);
487 	} else {
488 		pend_tcp->reuse.lru_next = NULL;
489 		outnet->tcp_reuse_last = &pend_tcp->reuse;
490 	}
491 	outnet->tcp_reuse_first = &pend_tcp->reuse;
492 	pend_tcp->reuse.item_on_lru_list = 1;
493 	log_assert((!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
494 		(outnet->tcp_reuse_first && outnet->tcp_reuse_last));
495 	log_assert(outnet->tcp_reuse_first != outnet->tcp_reuse_first->lru_next &&
496 		outnet->tcp_reuse_first != outnet->tcp_reuse_first->lru_prev);
497 	log_assert(outnet->tcp_reuse_last != outnet->tcp_reuse_last->lru_next &&
498 		outnet->tcp_reuse_last != outnet->tcp_reuse_last->lru_prev);
499 	return 1;
500 }
501 
502 /** find reuse tcp stream to destination for query, or NULL if none */
503 static struct reuse_tcp*
504 reuse_tcp_find(struct outside_network* outnet, struct sockaddr_storage* addr,
505 	socklen_t addrlen, int use_ssl)
506 {
507 	struct waiting_tcp key_w;
508 	struct pending_tcp key_p;
509 	struct comm_point c;
510 	rbnode_type* result = NULL, *prev;
511 	verbose(VERB_CLIENT, "reuse_tcp_find");
512 	memset(&key_w, 0, sizeof(key_w));
513 	memset(&key_p, 0, sizeof(key_p));
514 	memset(&c, 0, sizeof(c));
515 	key_p.query = &key_w;
516 	key_p.c = &c;
517 	key_p.reuse.pending = &key_p;
518 	key_p.reuse.node.key = &key_p.reuse;
519 	if(use_ssl)
520 		key_p.reuse.is_ssl = 1;
521 	if(addrlen > (socklen_t)sizeof(key_p.reuse.addr))
522 		return NULL;
523 	memmove(&key_p.reuse.addr, addr, addrlen);
524 	key_p.reuse.addrlen = addrlen;
525 
526 	verbose(VERB_CLIENT, "reuse_tcp_find: num reuse streams %u",
527 		(unsigned)outnet->tcp_reuse.count);
528 	if(outnet->tcp_reuse.root == NULL ||
529 		outnet->tcp_reuse.root == RBTREE_NULL)
530 		return NULL;
531 	if(rbtree_find_less_equal(&outnet->tcp_reuse, &key_p.reuse,
532 		&result)) {
533 		/* exact match */
534 		/* but the key is on stack, and ptr is compared, impossible */
535 		log_assert(&key_p.reuse != (struct reuse_tcp*)result);
536 		log_assert(&key_p != ((struct reuse_tcp*)result)->pending);
537 	}
538 	/* not found, return null */
539 	if(!result || result == RBTREE_NULL)
540 		return NULL;
541 	verbose(VERB_CLIENT, "reuse_tcp_find check inexact match");
542 	/* inexact match, find one of possibly several connections to the
543 	 * same destination address, with the correct port, ssl, and
544 	 * also less than max number of open queries, or else, fail to open
545 	 * a new one */
546 	/* rewind to start of sequence of same address,port,ssl */
547 	prev = rbtree_previous(result);
548 	while(prev && prev != RBTREE_NULL &&
549 		reuse_cmp_addrportssl(prev->key, &key_p.reuse) == 0) {
550 		result = prev;
551 		prev = rbtree_previous(result);
552 	}
553 
554 	/* loop to find first one that has correct characteristics */
555 	while(result && result != RBTREE_NULL &&
556 		reuse_cmp_addrportssl(result->key, &key_p.reuse) == 0) {
557 		if(((struct reuse_tcp*)result)->tree_by_id.count <
558 			outnet->max_reuse_tcp_queries) {
559 			/* same address, port, ssl-yes-or-no, and has
560 			 * space for another query */
561 			return (struct reuse_tcp*)result;
562 		}
563 		result = rbtree_next(result);
564 	}
565 	return NULL;
566 }
567 
568 /** use the buffer to setup writing the query */
569 static void
570 outnet_tcp_take_query_setup(int s, struct pending_tcp* pend,
571 	struct waiting_tcp* w)
572 {
573 	struct timeval tv;
574 	verbose(VERB_CLIENT, "outnet_tcp_take_query_setup: setup packet to write "
575 		"len %d timeout %d msec",
576 		(int)w->pkt_len, w->timeout);
577 	pend->c->tcp_write_pkt = w->pkt;
578 	pend->c->tcp_write_pkt_len = w->pkt_len;
579 	pend->c->tcp_write_and_read = 1;
580 	pend->c->tcp_write_byte_count = 0;
581 	pend->c->tcp_is_reading = 0;
582 	comm_point_start_listening(pend->c, s, -1);
583 	/* set timer on the waiting_tcp entry, this is the write timeout
584 	 * for the written packet.  The timer on pend->c is the timer
585 	 * for when there is no written packet and we have readtimeouts */
586 #ifndef S_SPLINT_S
587 	tv.tv_sec = w->timeout/1000;
588 	tv.tv_usec = (w->timeout%1000)*1000;
589 #endif
590 	/* if the waiting_tcp was previously waiting for a buffer in the
591 	 * outside_network.tcpwaitlist, then the timer is reset now that
592 	 * we start writing it */
593 	comm_timer_set(w->timer, &tv);
594 }
595 
596 /** use next free buffer to service a tcp query */
597 static int
598 outnet_tcp_take_into_use(struct waiting_tcp* w)
599 {
600 	struct pending_tcp* pend = w->outnet->tcp_free;
601 	int s;
602 	log_assert(pend);
603 	log_assert(w->pkt);
604 	log_assert(w->pkt_len > 0);
605 	log_assert(w->addrlen > 0);
606 	pend->c->tcp_do_toggle_rw = 0;
607 	pend->c->tcp_do_close = 0;
608 	/* open socket */
609 	s = outnet_get_tcp_fd(&w->addr, w->addrlen, w->outnet->tcp_mss, w->outnet->ip_dscp);
610 
611 	if(s == -1)
612 		return 0;
613 
614 	if(!pick_outgoing_tcp(pend, w, s))
615 		return 0;
616 
617 	fd_set_nonblock(s);
618 #ifdef USE_OSX_MSG_FASTOPEN
619 	/* API for fast open is different here. We use a connectx() function and
620 	   then writes can happen as normal even using SSL.*/
621 	/* connectx requires that the len be set in the sockaddr struct*/
622 	struct sockaddr_in *addr_in = (struct sockaddr_in *)&w->addr;
623 	addr_in->sin_len = w->addrlen;
624 	sa_endpoints_t endpoints;
625 	endpoints.sae_srcif = 0;
626 	endpoints.sae_srcaddr = NULL;
627 	endpoints.sae_srcaddrlen = 0;
628 	endpoints.sae_dstaddr = (struct sockaddr *)&w->addr;
629 	endpoints.sae_dstaddrlen = w->addrlen;
630 	if (connectx(s, &endpoints, SAE_ASSOCID_ANY,
631 	             CONNECT_DATA_IDEMPOTENT | CONNECT_RESUME_ON_READ_WRITE,
632 	             NULL, 0, NULL, NULL) == -1) {
633 		/* if fails, failover to connect for OSX 10.10 */
634 #ifdef EINPROGRESS
635 		if(errno != EINPROGRESS) {
636 #else
637 		if(1) {
638 #endif
639 			if(connect(s, (struct sockaddr*)&w->addr, w->addrlen) == -1) {
640 #else /* USE_OSX_MSG_FASTOPEN*/
641 #ifdef USE_MSG_FASTOPEN
642 	pend->c->tcp_do_fastopen = 1;
643 	/* Only do TFO for TCP in which case no connect() is required here.
644 	   Don't combine client TFO with SSL, since OpenSSL can't
645 	   currently support doing a handshake on fd that already isn't connected*/
646 	if (w->outnet->sslctx && w->ssl_upstream) {
647 		if(connect(s, (struct sockaddr*)&w->addr, w->addrlen) == -1) {
648 #else /* USE_MSG_FASTOPEN*/
649 	if(connect(s, (struct sockaddr*)&w->addr, w->addrlen) == -1) {
650 #endif /* USE_MSG_FASTOPEN*/
651 #endif /* USE_OSX_MSG_FASTOPEN*/
652 #ifndef USE_WINSOCK
653 #ifdef EINPROGRESS
654 		if(errno != EINPROGRESS) {
655 #else
656 		if(1) {
657 #endif
658 			if(tcp_connect_errno_needs_log(
659 				(struct sockaddr*)&w->addr, w->addrlen))
660 				log_err_addr("outgoing tcp: connect",
661 					strerror(errno), &w->addr, w->addrlen);
662 			close(s);
663 #else /* USE_WINSOCK */
664 		if(WSAGetLastError() != WSAEINPROGRESS &&
665 			WSAGetLastError() != WSAEWOULDBLOCK) {
666 			closesocket(s);
667 #endif
668 			return 0;
669 		}
670 	}
671 #ifdef USE_MSG_FASTOPEN
672 	}
673 #endif /* USE_MSG_FASTOPEN */
674 #ifdef USE_OSX_MSG_FASTOPEN
675 		}
676 	}
677 #endif /* USE_OSX_MSG_FASTOPEN */
678 	if(w->outnet->sslctx && w->ssl_upstream) {
679 		pend->c->ssl = outgoing_ssl_fd(w->outnet->sslctx, s);
680 		if(!pend->c->ssl) {
681 			pend->c->fd = s;
682 			comm_point_close(pend->c);
683 			return 0;
684 		}
685 		verbose(VERB_ALGO, "the query is using TLS encryption, for %s",
686 			(w->tls_auth_name?w->tls_auth_name:"an unauthenticated connection"));
687 #ifdef USE_WINSOCK
688 		comm_point_tcp_win_bio_cb(pend->c, pend->c->ssl);
689 #endif
690 		pend->c->ssl_shake_state = comm_ssl_shake_write;
691 		if(!set_auth_name_on_ssl(pend->c->ssl, w->tls_auth_name,
692 			w->outnet->tls_use_sni)) {
693 			pend->c->fd = s;
694 #ifdef HAVE_SSL
695 			SSL_free(pend->c->ssl);
696 #endif
697 			pend->c->ssl = NULL;
698 			comm_point_close(pend->c);
699 			return 0;
700 		}
701 	}
702 	w->next_waiting = (void*)pend;
703 	w->outnet->num_tcp_outgoing++;
704 	w->outnet->tcp_free = pend->next_free;
705 	pend->next_free = NULL;
706 	pend->query = w;
707 	pend->reuse.outnet = w->outnet;
708 	pend->c->repinfo.addrlen = w->addrlen;
709 	pend->c->tcp_more_read_again = &pend->reuse.cp_more_read_again;
710 	pend->c->tcp_more_write_again = &pend->reuse.cp_more_write_again;
711 	pend->reuse.cp_more_read_again = 0;
712 	pend->reuse.cp_more_write_again = 0;
713 	memcpy(&pend->c->repinfo.addr, &w->addr, w->addrlen);
714 	pend->reuse.pending = pend;
715 
716 	/* Remove from tree in case the is_ssl will be different and causes the
717 	 * identity of the reuse_tcp to change; could result in nodes not being
718 	 * deleted from the tree (because the new identity does not match the
719 	 * previous node) but their ->key would be changed to NULL. */
720 	if(pend->reuse.node.key)
721 		reuse_tcp_remove_tree_list(w->outnet, &pend->reuse);
722 
723 	if(pend->c->ssl)
724 		pend->reuse.is_ssl = 1;
725 	else	pend->reuse.is_ssl = 0;
726 	/* insert in reuse by address tree if not already inserted there */
727 	(void)reuse_tcp_insert(w->outnet, pend);
728 	reuse_tree_by_id_insert(&pend->reuse, w);
729 	outnet_tcp_take_query_setup(s, pend, w);
730 	return 1;
731 }
732 
733 /** Touch the lru of a reuse_tcp element, it is in use.
734  * This moves it to the front of the list, where it is not likely to
735  * be closed.  Items at the back of the list are closed to make space. */
736 void
737 reuse_tcp_lru_touch(struct outside_network* outnet, struct reuse_tcp* reuse)
738 {
739 	if(!reuse->item_on_lru_list) {
740 		log_err("internal error: we need to touch the lru_list but item not in list");
741 		return; /* not on the list, no lru to modify */
742 	}
743 	log_assert(reuse->lru_prev ||
744 		(!reuse->lru_prev && outnet->tcp_reuse_first == reuse));
745 	if(!reuse->lru_prev)
746 		return; /* already first in the list */
747 	/* remove at current position */
748 	/* since it is not first, there is a previous element */
749 	reuse->lru_prev->lru_next = reuse->lru_next;
750 	log_assert(reuse->lru_prev->lru_next != reuse->lru_prev);
751 	if(reuse->lru_next)
752 		reuse->lru_next->lru_prev = reuse->lru_prev;
753 	else	outnet->tcp_reuse_last = reuse->lru_prev;
754 	log_assert(!reuse->lru_next || reuse->lru_next->lru_prev != reuse->lru_next);
755 	log_assert(outnet->tcp_reuse_last != outnet->tcp_reuse_last->lru_next &&
756 		outnet->tcp_reuse_last != outnet->tcp_reuse_last->lru_prev);
757 	/* insert at the front */
758 	reuse->lru_prev = NULL;
759 	reuse->lru_next = outnet->tcp_reuse_first;
760 	if(outnet->tcp_reuse_first) {
761 		outnet->tcp_reuse_first->lru_prev = reuse;
762 	}
763 	log_assert(reuse->lru_next != reuse);
764 	/* since it is not first, it is not the only element and
765 	 * lru_next is thus not NULL and thus reuse is now not the last in
766 	 * the list, so outnet->tcp_reuse_last does not need to be modified */
767 	outnet->tcp_reuse_first = reuse;
768 	log_assert(outnet->tcp_reuse_first != outnet->tcp_reuse_first->lru_next &&
769 		outnet->tcp_reuse_first != outnet->tcp_reuse_first->lru_prev);
770 	log_assert((!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
771 		(outnet->tcp_reuse_first && outnet->tcp_reuse_last));
772 }
773 
774 /** Snip the last reuse_tcp element off of the LRU list */
775 struct reuse_tcp*
776 reuse_tcp_lru_snip(struct outside_network* outnet)
777 {
778 	struct reuse_tcp* reuse = outnet->tcp_reuse_last;
779 	if(!reuse) return NULL;
780 	/* snip off of LRU */
781 	log_assert(reuse->lru_next == NULL);
782 	if(reuse->lru_prev) {
783 		outnet->tcp_reuse_last = reuse->lru_prev;
784 		reuse->lru_prev->lru_next = NULL;
785 	} else {
786 		outnet->tcp_reuse_last = NULL;
787 		outnet->tcp_reuse_first = NULL;
788 	}
789 	log_assert((!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
790 		(outnet->tcp_reuse_first && outnet->tcp_reuse_last));
791 	reuse->item_on_lru_list = 0;
792 	reuse->lru_next = NULL;
793 	reuse->lru_prev = NULL;
794 	return reuse;
795 }
796 
797 /** call callback on waiting_tcp, if not NULL */
798 static void
799 waiting_tcp_callback(struct waiting_tcp* w, struct comm_point* c, int error,
800 	struct comm_reply* reply_info)
801 {
802 	if(w && w->cb) {
803 		fptr_ok(fptr_whitelist_pending_tcp(w->cb));
804 		(void)(*w->cb)(c, w->cb_arg, error, reply_info);
805 	}
806 }
807 
808 /** add waiting_tcp element to the outnet tcp waiting list */
809 static void
810 outnet_add_tcp_waiting(struct outside_network* outnet, struct waiting_tcp* w)
811 {
812 	struct timeval tv;
813 	log_assert(!w->on_tcp_waiting_list);
814 	if(w->on_tcp_waiting_list)
815 		return;
816 	w->next_waiting = NULL;
817 	if(outnet->tcp_wait_last)
818 		outnet->tcp_wait_last->next_waiting = w;
819 	else	outnet->tcp_wait_first = w;
820 	outnet->tcp_wait_last = w;
821 	w->on_tcp_waiting_list = 1;
822 #ifndef S_SPLINT_S
823 	tv.tv_sec = w->timeout/1000;
824 	tv.tv_usec = (w->timeout%1000)*1000;
825 #endif
826 	comm_timer_set(w->timer, &tv);
827 }
828 
829 /** add waiting_tcp element as first to the outnet tcp waiting list */
830 static void
831 outnet_add_tcp_waiting_first(struct outside_network* outnet,
832 	struct waiting_tcp* w, int reset_timer)
833 {
834 	struct timeval tv;
835 	log_assert(!w->on_tcp_waiting_list);
836 	if(w->on_tcp_waiting_list)
837 		return;
838 	w->next_waiting = outnet->tcp_wait_first;
839 	if(!outnet->tcp_wait_last)
840 		outnet->tcp_wait_last = w;
841 	outnet->tcp_wait_first = w;
842 	w->on_tcp_waiting_list = 1;
843 	if(reset_timer) {
844 #ifndef S_SPLINT_S
845 		tv.tv_sec = w->timeout/1000;
846 		tv.tv_usec = (w->timeout%1000)*1000;
847 #endif
848 		comm_timer_set(w->timer, &tv);
849 	}
850 	log_assert(
851 		(!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
852 		(outnet->tcp_reuse_first && outnet->tcp_reuse_last));
853 }
854 
855 /** see if buffers can be used to service TCP queries */
856 static void
857 use_free_buffer(struct outside_network* outnet)
858 {
859 	struct waiting_tcp* w;
860 	while(outnet->tcp_wait_first && !outnet->want_to_quit) {
861 #ifdef USE_DNSTAP
862 		struct pending_tcp* pend_tcp = NULL;
863 #endif
864 		struct reuse_tcp* reuse = NULL;
865 		w = outnet->tcp_wait_first;
866 		log_assert(w->on_tcp_waiting_list);
867 		outnet->tcp_wait_first = w->next_waiting;
868 		if(outnet->tcp_wait_last == w)
869 			outnet->tcp_wait_last = NULL;
870 		log_assert(
871 			(!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
872 			(outnet->tcp_reuse_first && outnet->tcp_reuse_last));
873 		w->on_tcp_waiting_list = 0;
874 		reuse = reuse_tcp_find(outnet, &w->addr, w->addrlen,
875 			w->ssl_upstream);
876 		/* re-select an ID when moving to a new TCP buffer */
877 		w->id = tcp_select_id(outnet, reuse);
878 		LDNS_ID_SET(w->pkt, w->id);
879 		if(reuse) {
880 			log_reuse_tcp(VERB_CLIENT, "use free buffer for waiting tcp: "
881 				"found reuse", reuse);
882 #ifdef USE_DNSTAP
883 			pend_tcp = reuse->pending;
884 #endif
885 			reuse_tcp_lru_touch(outnet, reuse);
886 			comm_timer_disable(w->timer);
887 			w->next_waiting = (void*)reuse->pending;
888 			reuse_tree_by_id_insert(reuse, w);
889 			if(reuse->pending->query) {
890 				/* on the write wait list */
891 				reuse_write_wait_push_back(reuse, w);
892 			} else {
893 				/* write straight away */
894 				/* stop the timer on read of the fd */
895 				comm_point_stop_listening(reuse->pending->c);
896 				reuse->pending->query = w;
897 				outnet_tcp_take_query_setup(
898 					reuse->pending->c->fd, reuse->pending,
899 					w);
900 			}
901 		} else if(outnet->tcp_free) {
902 			struct pending_tcp* pend = w->outnet->tcp_free;
903 			rbtree_init(&pend->reuse.tree_by_id, reuse_id_cmp);
904 			pend->reuse.pending = pend;
905 			memcpy(&pend->reuse.addr, &w->addr, w->addrlen);
906 			pend->reuse.addrlen = w->addrlen;
907 			if(!outnet_tcp_take_into_use(w)) {
908 				waiting_tcp_callback(w, NULL, NETEVENT_CLOSED,
909 					NULL);
910 				waiting_tcp_delete(w);
911 #ifdef USE_DNSTAP
912 				w = NULL;
913 #endif
914 			}
915 #ifdef USE_DNSTAP
916 			pend_tcp = pend;
917 #endif
918 		} else {
919 			/* no reuse and no free buffer, put back at the start */
920 			outnet_add_tcp_waiting_first(outnet, w, 0);
921 			break;
922 		}
923 #ifdef USE_DNSTAP
924 		if(outnet->dtenv && pend_tcp && w && w->sq &&
925 			(outnet->dtenv->log_resolver_query_messages ||
926 			outnet->dtenv->log_forwarder_query_messages)) {
927 			sldns_buffer tmp;
928 			sldns_buffer_init_frm_data(&tmp, w->pkt, w->pkt_len);
929 			dt_msg_send_outside_query(outnet->dtenv, &w->sq->addr,
930 				&pend_tcp->pi->addr, comm_tcp, w->sq->zone,
931 				w->sq->zonelen, &tmp);
932 		}
933 #endif
934 	}
935 }
936 
937 /** delete element from tree by id */
938 static void
939 reuse_tree_by_id_delete(struct reuse_tcp* reuse, struct waiting_tcp* w)
940 {
941 #ifdef UNBOUND_DEBUG
942 	rbnode_type* rem;
943 #endif
944 	log_assert(w->id_node.key != NULL);
945 #ifdef UNBOUND_DEBUG
946 	rem =
947 #else
948 	(void)
949 #endif
950 	rbtree_delete(&reuse->tree_by_id, w);
951 	log_assert(rem);  /* should have been there */
952 	w->id_node.key = NULL;
953 }
954 
955 /** move writewait list to go for another connection. */
956 static void
957 reuse_move_writewait_away(struct outside_network* outnet,
958 	struct pending_tcp* pend)
959 {
960 	/* the writewait list has not been written yet, so if the
961 	 * stream was closed, they have not actually been failed, only
962 	 * the queries written.  Other queries can get written to another
963 	 * stream.  For upstreams that do not support multiple queries
964 	 * and answers, the stream can get closed, and then the queries
965 	 * can get written on a new socket */
966 	struct waiting_tcp* w;
967 	if(pend->query && pend->query->error_count == 0 &&
968 		pend->c->tcp_write_pkt == pend->query->pkt &&
969 		pend->c->tcp_write_pkt_len == pend->query->pkt_len) {
970 		/* since the current query is not written, it can also
971 		 * move to a free buffer */
972 		if(verbosity >= VERB_CLIENT && pend->query->pkt_len > 12+2+2 &&
973 			LDNS_QDCOUNT(pend->query->pkt) > 0 &&
974 			dname_valid(pend->query->pkt+12, pend->query->pkt_len-12)) {
975 			char buf[LDNS_MAX_DOMAINLEN+1];
976 			dname_str(pend->query->pkt+12, buf);
977 			verbose(VERB_CLIENT, "reuse_move_writewait_away current %s %d bytes were written",
978 				buf, (int)pend->c->tcp_write_byte_count);
979 		}
980 		pend->c->tcp_write_pkt = NULL;
981 		pend->c->tcp_write_pkt_len = 0;
982 		pend->c->tcp_write_and_read = 0;
983 		pend->reuse.cp_more_read_again = 0;
984 		pend->reuse.cp_more_write_again = 0;
985 		pend->c->tcp_is_reading = 1;
986 		w = pend->query;
987 		pend->query = NULL;
988 		/* increase error count, so that if the next socket fails too
989 		 * the server selection is run again with this query failed
990 		 * and it can select a different server (if possible), or
991 		 * fail the query */
992 		w->error_count ++;
993 		reuse_tree_by_id_delete(&pend->reuse, w);
994 		outnet_add_tcp_waiting(outnet, w);
995 	}
996 	while((w = reuse_write_wait_pop(&pend->reuse)) != NULL) {
997 		if(verbosity >= VERB_CLIENT && w->pkt_len > 12+2+2 &&
998 			LDNS_QDCOUNT(w->pkt) > 0 &&
999 			dname_valid(w->pkt+12, w->pkt_len-12)) {
1000 			char buf[LDNS_MAX_DOMAINLEN+1];
1001 			dname_str(w->pkt+12, buf);
1002 			verbose(VERB_CLIENT, "reuse_move_writewait_away item %s", buf);
1003 		}
1004 		reuse_tree_by_id_delete(&pend->reuse, w);
1005 		outnet_add_tcp_waiting(outnet, w);
1006 	}
1007 }
1008 
1009 /** remove reused element from tree and lru list */
1010 void
1011 reuse_tcp_remove_tree_list(struct outside_network* outnet,
1012 	struct reuse_tcp* reuse)
1013 {
1014 	verbose(VERB_CLIENT, "reuse_tcp_remove_tree_list");
1015 	if(reuse->node.key) {
1016 		/* delete it from reuse tree */
1017 		if(!rbtree_delete(&outnet->tcp_reuse, reuse)) {
1018 			/* should not be possible, it should be there */
1019 			char buf[256];
1020 			addr_to_str(&reuse->addr, reuse->addrlen, buf,
1021 				sizeof(buf));
1022 			log_err("reuse tcp delete: node not present, internal error, %s ssl %d lru %d", buf, reuse->is_ssl, reuse->item_on_lru_list);
1023 		}
1024 		reuse->node.key = NULL;
1025 		/* defend against loops on broken tree by zeroing the
1026 		 * rbnode structure */
1027 		memset(&reuse->node, 0, sizeof(reuse->node));
1028 	}
1029 	/* delete from reuse list */
1030 	if(reuse->item_on_lru_list) {
1031 		if(reuse->lru_prev) {
1032 			/* assert that members of the lru list are waiting
1033 			 * and thus have a pending pointer to the struct */
1034 			log_assert(reuse->lru_prev->pending);
1035 			reuse->lru_prev->lru_next = reuse->lru_next;
1036 			log_assert(reuse->lru_prev->lru_next != reuse->lru_prev);
1037 		} else {
1038 			log_assert(!reuse->lru_next || reuse->lru_next->pending);
1039 			outnet->tcp_reuse_first = reuse->lru_next;
1040 			log_assert(!outnet->tcp_reuse_first ||
1041 				(outnet->tcp_reuse_first !=
1042 				 outnet->tcp_reuse_first->lru_next &&
1043 				 outnet->tcp_reuse_first !=
1044 				 outnet->tcp_reuse_first->lru_prev));
1045 		}
1046 		if(reuse->lru_next) {
1047 			/* assert that members of the lru list are waiting
1048 			 * and thus have a pending pointer to the struct */
1049 			log_assert(reuse->lru_next->pending);
1050 			reuse->lru_next->lru_prev = reuse->lru_prev;
1051 			log_assert(reuse->lru_next->lru_prev != reuse->lru_next);
1052 		} else {
1053 			log_assert(!reuse->lru_prev || reuse->lru_prev->pending);
1054 			outnet->tcp_reuse_last = reuse->lru_prev;
1055 			log_assert(!outnet->tcp_reuse_last ||
1056 				(outnet->tcp_reuse_last !=
1057 				 outnet->tcp_reuse_last->lru_next &&
1058 				 outnet->tcp_reuse_last !=
1059 				 outnet->tcp_reuse_last->lru_prev));
1060 		}
1061 		log_assert((!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
1062 			(outnet->tcp_reuse_first && outnet->tcp_reuse_last));
1063 		reuse->item_on_lru_list = 0;
1064 		reuse->lru_next = NULL;
1065 		reuse->lru_prev = NULL;
1066 	}
1067 	reuse->pending = NULL;
1068 }
1069 
1070 /** helper function that deletes an element from the tree of readwait
1071  * elements in tcp reuse structure */
1072 static void reuse_del_readwait_elem(rbnode_type* node, void* ATTR_UNUSED(arg))
1073 {
1074 	struct waiting_tcp* w = (struct waiting_tcp*)node->key;
1075 	waiting_tcp_delete(w);
1076 }
1077 
1078 /** delete readwait waiting_tcp elements, deletes the elements in the list */
1079 void reuse_del_readwait(rbtree_type* tree_by_id)
1080 {
1081 	if(tree_by_id->root == NULL ||
1082 		tree_by_id->root == RBTREE_NULL)
1083 		return;
1084 	traverse_postorder(tree_by_id, &reuse_del_readwait_elem, NULL);
1085 	rbtree_init(tree_by_id, reuse_id_cmp);
1086 }
1087 
1088 /** decommission a tcp buffer, closes commpoint and frees waiting_tcp entry */
1089 static void
1090 decommission_pending_tcp(struct outside_network* outnet,
1091 	struct pending_tcp* pend)
1092 {
1093 	verbose(VERB_CLIENT, "decommission_pending_tcp");
1094 	/* A certain code path can lead here twice for the same pending_tcp
1095 	 * creating a loop in the free pending_tcp list. */
1096 	if(outnet->tcp_free != pend) {
1097 		pend->next_free = outnet->tcp_free;
1098 		outnet->tcp_free = pend;
1099 	}
1100 	if(pend->reuse.node.key) {
1101 		/* needs unlink from the reuse tree to get deleted */
1102 		reuse_tcp_remove_tree_list(outnet, &pend->reuse);
1103 	}
1104 	/* free SSL structure after remove from outnet tcp reuse tree,
1105 	 * because the c->ssl null or not is used for sorting in the tree */
1106 	if(pend->c->ssl) {
1107 #ifdef HAVE_SSL
1108 		SSL_shutdown(pend->c->ssl);
1109 		SSL_free(pend->c->ssl);
1110 		pend->c->ssl = NULL;
1111 #endif
1112 	}
1113 	comm_point_close(pend->c);
1114 	pend->reuse.cp_more_read_again = 0;
1115 	pend->reuse.cp_more_write_again = 0;
1116 	/* unlink the query and writewait list, it is part of the tree
1117 	 * nodes and is deleted */
1118 	pend->query = NULL;
1119 	pend->reuse.write_wait_first = NULL;
1120 	pend->reuse.write_wait_last = NULL;
1121 	reuse_del_readwait(&pend->reuse.tree_by_id);
1122 }
1123 
1124 /** perform failure callbacks for waiting queries in reuse read rbtree */
1125 static void reuse_cb_readwait_for_failure(rbtree_type* tree_by_id, int err)
1126 {
1127 	rbnode_type* node;
1128 	if(tree_by_id->root == NULL ||
1129 		tree_by_id->root == RBTREE_NULL)
1130 		return;
1131 	node = rbtree_first(tree_by_id);
1132 	while(node && node != RBTREE_NULL) {
1133 		struct waiting_tcp* w = (struct waiting_tcp*)node->key;
1134 		waiting_tcp_callback(w, NULL, err, NULL);
1135 		node = rbtree_next(node);
1136 	}
1137 }
1138 
1139 /** perform callbacks for failure and also decommission pending tcp.
1140  * the callbacks remove references in sq->pending to the waiting_tcp
1141  * members of the tree_by_id in the pending tcp.  The pending_tcp is
1142  * removed before the callbacks, so that the callbacks do not modify
1143  * the pending_tcp due to its reference in the outside_network reuse tree */
1144 static void reuse_cb_and_decommission(struct outside_network* outnet,
1145 	struct pending_tcp* pend, int error)
1146 {
1147 	rbtree_type store;
1148 	store = pend->reuse.tree_by_id;
1149 	pend->query = NULL;
1150 	rbtree_init(&pend->reuse.tree_by_id, reuse_id_cmp);
1151 	pend->reuse.write_wait_first = NULL;
1152 	pend->reuse.write_wait_last = NULL;
1153 	decommission_pending_tcp(outnet, pend);
1154 	reuse_cb_readwait_for_failure(&store, error);
1155 	reuse_del_readwait(&store);
1156 }
1157 
1158 /** set timeout on tcp fd and setup read event to catch incoming dns msgs */
1159 static void
1160 reuse_tcp_setup_timeout(struct pending_tcp* pend_tcp, int tcp_reuse_timeout)
1161 {
1162 	log_reuse_tcp(VERB_CLIENT, "reuse_tcp_setup_timeout", &pend_tcp->reuse);
1163 	comm_point_start_listening(pend_tcp->c, -1, tcp_reuse_timeout);
1164 }
1165 
1166 /** set timeout on tcp fd and setup read event to catch incoming dns msgs */
1167 static void
1168 reuse_tcp_setup_read_and_timeout(struct pending_tcp* pend_tcp, int tcp_reuse_timeout)
1169 {
1170 	log_reuse_tcp(VERB_CLIENT, "reuse_tcp_setup_readtimeout", &pend_tcp->reuse);
1171 	sldns_buffer_clear(pend_tcp->c->buffer);
1172 	pend_tcp->c->tcp_is_reading = 1;
1173 	pend_tcp->c->tcp_byte_count = 0;
1174 	comm_point_stop_listening(pend_tcp->c);
1175 	comm_point_start_listening(pend_tcp->c, -1, tcp_reuse_timeout);
1176 }
1177 
1178 int
1179 outnet_tcp_cb(struct comm_point* c, void* arg, int error,
1180 	struct comm_reply *reply_info)
1181 {
1182 	struct pending_tcp* pend = (struct pending_tcp*)arg;
1183 	struct outside_network* outnet = pend->reuse.outnet;
1184 	struct waiting_tcp* w = NULL;
1185 	log_assert(pend->reuse.item_on_lru_list && pend->reuse.node.key);
1186 	verbose(VERB_ALGO, "outnettcp cb");
1187 	if(error == NETEVENT_TIMEOUT) {
1188 		if(pend->c->tcp_write_and_read) {
1189 			verbose(VERB_QUERY, "outnettcp got tcp timeout "
1190 				"for read, ignored because write underway");
1191 			/* if we are writing, ignore readtimer, wait for write timer
1192 			 * or write is done */
1193 			return 0;
1194 		} else {
1195 			verbose(VERB_QUERY, "outnettcp got tcp timeout %s",
1196 				(pend->reuse.tree_by_id.count?"for reading pkt":
1197 				"for keepalive for reuse"));
1198 		}
1199 		/* must be timeout for reading or keepalive reuse,
1200 		 * close it. */
1201 		reuse_tcp_remove_tree_list(outnet, &pend->reuse);
1202 	} else if(error == NETEVENT_PKT_WRITTEN) {
1203 		/* the packet we want to write has been written. */
1204 		verbose(VERB_ALGO, "outnet tcp pkt was written event");
1205 		log_assert(c == pend->c);
1206 		log_assert(pend->query->pkt == pend->c->tcp_write_pkt);
1207 		log_assert(pend->query->pkt_len == pend->c->tcp_write_pkt_len);
1208 		pend->c->tcp_write_pkt = NULL;
1209 		pend->c->tcp_write_pkt_len = 0;
1210 		/* the pend.query is already in tree_by_id */
1211 		log_assert(pend->query->id_node.key);
1212 		pend->query = NULL;
1213 		/* setup to write next packet or setup read timeout */
1214 		if(pend->reuse.write_wait_first) {
1215 			verbose(VERB_ALGO, "outnet tcp setup next pkt");
1216 			/* we can write it straight away perhaps, set flag
1217 			 * because this callback called after a tcp write
1218 			 * succeeded and likely more buffer space is available
1219 			 * and we can write some more. */
1220 			pend->reuse.cp_more_write_again = 1;
1221 			pend->query = reuse_write_wait_pop(&pend->reuse);
1222 			comm_point_stop_listening(pend->c);
1223 			outnet_tcp_take_query_setup(pend->c->fd, pend,
1224 				pend->query);
1225 		} else {
1226 			verbose(VERB_ALGO, "outnet tcp writes done, wait");
1227 			pend->c->tcp_write_and_read = 0;
1228 			pend->reuse.cp_more_read_again = 0;
1229 			pend->reuse.cp_more_write_again = 0;
1230 			pend->c->tcp_is_reading = 1;
1231 			comm_point_stop_listening(pend->c);
1232 			reuse_tcp_setup_timeout(pend, outnet->tcp_reuse_timeout);
1233 		}
1234 		return 0;
1235 	} else if(error != NETEVENT_NOERROR) {
1236 		verbose(VERB_QUERY, "outnettcp got tcp error %d", error);
1237 		reuse_move_writewait_away(outnet, pend);
1238 		/* pass error below and exit */
1239 	} else {
1240 		/* check ID */
1241 		if(sldns_buffer_limit(c->buffer) < sizeof(uint16_t)) {
1242 			log_addr(VERB_QUERY,
1243 				"outnettcp: bad ID in reply, too short, from:",
1244 				&pend->reuse.addr, pend->reuse.addrlen);
1245 			error = NETEVENT_CLOSED;
1246 		} else {
1247 			uint16_t id = LDNS_ID_WIRE(sldns_buffer_begin(
1248 				c->buffer));
1249 			/* find the query the reply is for */
1250 			w = reuse_tcp_by_id_find(&pend->reuse, id);
1251 		}
1252 	}
1253 	if(error == NETEVENT_NOERROR && !w) {
1254 		/* no struct waiting found in tree, no reply to call */
1255 		log_addr(VERB_QUERY, "outnettcp: bad ID in reply, from:",
1256 			&pend->reuse.addr, pend->reuse.addrlen);
1257 		error = NETEVENT_CLOSED;
1258 	}
1259 	if(error == NETEVENT_NOERROR) {
1260 		/* add to reuse tree so it can be reused, if not a failure.
1261 		 * This is possible if the state machine wants to make a tcp
1262 		 * query again to the same destination. */
1263 		if(outnet->tcp_reuse.count < outnet->tcp_reuse_max) {
1264 			(void)reuse_tcp_insert(outnet, pend);
1265 		}
1266 	}
1267 	if(w) {
1268 		reuse_tree_by_id_delete(&pend->reuse, w);
1269 		verbose(VERB_CLIENT, "outnet tcp callback query err %d buflen %d",
1270 			error, (int)sldns_buffer_limit(c->buffer));
1271 		waiting_tcp_callback(w, c, error, reply_info);
1272 		waiting_tcp_delete(w);
1273 	}
1274 	verbose(VERB_CLIENT, "outnet_tcp_cb reuse after cb");
1275 	if(error == NETEVENT_NOERROR && pend->reuse.node.key) {
1276 		verbose(VERB_CLIENT, "outnet_tcp_cb reuse after cb: keep it");
1277 		/* it is in the reuse_tcp tree, with other queries, or
1278 		 * on the empty list. do not decommission it */
1279 		/* if there are more outstanding queries, we could try to
1280 		 * read again, to see if it is on the input,
1281 		 * because this callback called after a successful read
1282 		 * and there could be more bytes to read on the input */
1283 		if(pend->reuse.tree_by_id.count != 0)
1284 			pend->reuse.cp_more_read_again = 1;
1285 		reuse_tcp_setup_read_and_timeout(pend, outnet->tcp_reuse_timeout);
1286 		return 0;
1287 	}
1288 	verbose(VERB_CLIENT, "outnet_tcp_cb reuse after cb: decommission it");
1289 	/* no queries on it, no space to keep it. or timeout or closed due
1290 	 * to error.  Close it */
1291 	reuse_cb_and_decommission(outnet, pend, (error==NETEVENT_TIMEOUT?
1292 		NETEVENT_TIMEOUT:NETEVENT_CLOSED));
1293 	use_free_buffer(outnet);
1294 	return 0;
1295 }
1296 
1297 /** lower use count on pc, see if it can be closed */
1298 static void
1299 portcomm_loweruse(struct outside_network* outnet, struct port_comm* pc)
1300 {
1301 	struct port_if* pif;
1302 	pc->num_outstanding--;
1303 	if(pc->num_outstanding > 0) {
1304 		return;
1305 	}
1306 	/* close it and replace in unused list */
1307 	verbose(VERB_ALGO, "close of port %d", pc->number);
1308 	comm_point_close(pc->cp);
1309 	pif = pc->pif;
1310 	log_assert(pif->inuse > 0);
1311 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
1312 	pif->avail_ports[pif->avail_total - pif->inuse] = pc->number;
1313 #endif
1314 	pif->inuse--;
1315 	pif->out[pc->index] = pif->out[pif->inuse];
1316 	pif->out[pc->index]->index = pc->index;
1317 	pc->next = outnet->unused_fds;
1318 	outnet->unused_fds = pc;
1319 }
1320 
1321 /** try to send waiting UDP queries */
1322 static void
1323 outnet_send_wait_udp(struct outside_network* outnet)
1324 {
1325 	struct pending* pend;
1326 	/* process waiting queries */
1327 	while(outnet->udp_wait_first && outnet->unused_fds
1328 		&& !outnet->want_to_quit) {
1329 		pend = outnet->udp_wait_first;
1330 		outnet->udp_wait_first = pend->next_waiting;
1331 		if(!pend->next_waiting) outnet->udp_wait_last = NULL;
1332 		sldns_buffer_clear(outnet->udp_buff);
1333 		sldns_buffer_write(outnet->udp_buff, pend->pkt, pend->pkt_len);
1334 		sldns_buffer_flip(outnet->udp_buff);
1335 		free(pend->pkt); /* freeing now makes get_mem correct */
1336 		pend->pkt = NULL;
1337 		pend->pkt_len = 0;
1338 		if(!randomize_and_send_udp(pend, outnet->udp_buff,
1339 			pend->timeout)) {
1340 			/* callback error on pending */
1341 			if(pend->cb) {
1342 				fptr_ok(fptr_whitelist_pending_udp(pend->cb));
1343 				(void)(*pend->cb)(outnet->unused_fds->cp, pend->cb_arg,
1344 					NETEVENT_CLOSED, NULL);
1345 			}
1346 			pending_delete(outnet, pend);
1347 		}
1348 	}
1349 }
1350 
1351 int
1352 outnet_udp_cb(struct comm_point* c, void* arg, int error,
1353 	struct comm_reply *reply_info)
1354 {
1355 	struct outside_network* outnet = (struct outside_network*)arg;
1356 	struct pending key;
1357 	struct pending* p;
1358 	verbose(VERB_ALGO, "answer cb");
1359 
1360 	if(error != NETEVENT_NOERROR) {
1361 		verbose(VERB_QUERY, "outnetudp got udp error %d", error);
1362 		return 0;
1363 	}
1364 	if(sldns_buffer_limit(c->buffer) < LDNS_HEADER_SIZE) {
1365 		verbose(VERB_QUERY, "outnetudp udp too short");
1366 		return 0;
1367 	}
1368 	log_assert(reply_info);
1369 
1370 	/* setup lookup key */
1371 	key.id = (unsigned)LDNS_ID_WIRE(sldns_buffer_begin(c->buffer));
1372 	memcpy(&key.addr, &reply_info->addr, reply_info->addrlen);
1373 	key.addrlen = reply_info->addrlen;
1374 	verbose(VERB_ALGO, "Incoming reply id = %4.4x", key.id);
1375 	log_addr(VERB_ALGO, "Incoming reply addr =",
1376 		&reply_info->addr, reply_info->addrlen);
1377 
1378 	/* find it, see if this thing is a valid query response */
1379 	verbose(VERB_ALGO, "lookup size is %d entries", (int)outnet->pending->count);
1380 	p = (struct pending*)rbtree_search(outnet->pending, &key);
1381 	if(!p) {
1382 		verbose(VERB_QUERY, "received unwanted or unsolicited udp reply dropped.");
1383 		log_buf(VERB_ALGO, "dropped message", c->buffer);
1384 		outnet->unwanted_replies++;
1385 		if(outnet->unwanted_threshold && ++outnet->unwanted_total
1386 			>= outnet->unwanted_threshold) {
1387 			log_warn("unwanted reply total reached threshold (%u)"
1388 				" you may be under attack."
1389 				" defensive action: clearing the cache",
1390 				(unsigned)outnet->unwanted_threshold);
1391 			fptr_ok(fptr_whitelist_alloc_cleanup(
1392 				outnet->unwanted_action));
1393 			(*outnet->unwanted_action)(outnet->unwanted_param);
1394 			outnet->unwanted_total = 0;
1395 		}
1396 		return 0;
1397 	}
1398 
1399 	verbose(VERB_ALGO, "received udp reply.");
1400 	log_buf(VERB_ALGO, "udp message", c->buffer);
1401 	if(p->pc->cp != c) {
1402 		verbose(VERB_QUERY, "received reply id,addr on wrong port. "
1403 			"dropped.");
1404 		outnet->unwanted_replies++;
1405 		if(outnet->unwanted_threshold && ++outnet->unwanted_total
1406 			>= outnet->unwanted_threshold) {
1407 			log_warn("unwanted reply total reached threshold (%u)"
1408 				" you may be under attack."
1409 				" defensive action: clearing the cache",
1410 				(unsigned)outnet->unwanted_threshold);
1411 			fptr_ok(fptr_whitelist_alloc_cleanup(
1412 				outnet->unwanted_action));
1413 			(*outnet->unwanted_action)(outnet->unwanted_param);
1414 			outnet->unwanted_total = 0;
1415 		}
1416 		return 0;
1417 	}
1418 	comm_timer_disable(p->timer);
1419 	verbose(VERB_ALGO, "outnet handle udp reply");
1420 	/* delete from tree first in case callback creates a retry */
1421 	(void)rbtree_delete(outnet->pending, p->node.key);
1422 	if(p->cb) {
1423 		fptr_ok(fptr_whitelist_pending_udp(p->cb));
1424 		(void)(*p->cb)(p->pc->cp, p->cb_arg, NETEVENT_NOERROR, reply_info);
1425 	}
1426 	portcomm_loweruse(outnet, p->pc);
1427 	pending_delete(NULL, p);
1428 	outnet_send_wait_udp(outnet);
1429 	return 0;
1430 }
1431 
1432 /** calculate number of ip4 and ip6 interfaces*/
1433 static void
1434 calc_num46(char** ifs, int num_ifs, int do_ip4, int do_ip6,
1435 	int* num_ip4, int* num_ip6)
1436 {
1437 	int i;
1438 	*num_ip4 = 0;
1439 	*num_ip6 = 0;
1440 	if(num_ifs <= 0) {
1441 		if(do_ip4)
1442 			*num_ip4 = 1;
1443 		if(do_ip6)
1444 			*num_ip6 = 1;
1445 		return;
1446 	}
1447 	for(i=0; i<num_ifs; i++)
1448 	{
1449 		if(str_is_ip6(ifs[i])) {
1450 			if(do_ip6)
1451 				(*num_ip6)++;
1452 		} else {
1453 			if(do_ip4)
1454 				(*num_ip4)++;
1455 		}
1456 	}
1457 
1458 }
1459 
1460 void
1461 pending_udp_timer_delay_cb(void* arg)
1462 {
1463 	struct pending* p = (struct pending*)arg;
1464 	struct outside_network* outnet = p->outnet;
1465 	verbose(VERB_ALGO, "timeout udp with delay");
1466 	portcomm_loweruse(outnet, p->pc);
1467 	pending_delete(outnet, p);
1468 	outnet_send_wait_udp(outnet);
1469 }
1470 
1471 void
1472 pending_udp_timer_cb(void *arg)
1473 {
1474 	struct pending* p = (struct pending*)arg;
1475 	struct outside_network* outnet = p->outnet;
1476 	/* it timed out */
1477 	verbose(VERB_ALGO, "timeout udp");
1478 	if(p->cb) {
1479 		fptr_ok(fptr_whitelist_pending_udp(p->cb));
1480 		(void)(*p->cb)(p->pc->cp, p->cb_arg, NETEVENT_TIMEOUT, NULL);
1481 	}
1482 	/* if delayclose, keep port open for a longer time.
1483 	 * But if the udpwaitlist exists, then we are struggling to
1484 	 * keep up with demand for sockets, so do not wait, but service
1485 	 * the customer (customer service more important than portICMPs) */
1486 	if(outnet->delayclose && !outnet->udp_wait_first) {
1487 		p->cb = NULL;
1488 		p->timer->callback = &pending_udp_timer_delay_cb;
1489 		comm_timer_set(p->timer, &outnet->delay_tv);
1490 		return;
1491 	}
1492 	portcomm_loweruse(outnet, p->pc);
1493 	pending_delete(outnet, p);
1494 	outnet_send_wait_udp(outnet);
1495 }
1496 
1497 /** create pending_tcp buffers */
1498 static int
1499 create_pending_tcp(struct outside_network* outnet, size_t bufsize)
1500 {
1501 	size_t i;
1502 	if(outnet->num_tcp == 0)
1503 		return 1; /* no tcp needed, nothing to do */
1504 	if(!(outnet->tcp_conns = (struct pending_tcp **)calloc(
1505 			outnet->num_tcp, sizeof(struct pending_tcp*))))
1506 		return 0;
1507 	for(i=0; i<outnet->num_tcp; i++) {
1508 		if(!(outnet->tcp_conns[i] = (struct pending_tcp*)calloc(1,
1509 			sizeof(struct pending_tcp))))
1510 			return 0;
1511 		outnet->tcp_conns[i]->next_free = outnet->tcp_free;
1512 		outnet->tcp_free = outnet->tcp_conns[i];
1513 		outnet->tcp_conns[i]->c = comm_point_create_tcp_out(
1514 			outnet->base, bufsize, outnet_tcp_cb,
1515 			outnet->tcp_conns[i]);
1516 		if(!outnet->tcp_conns[i]->c)
1517 			return 0;
1518 	}
1519 	return 1;
1520 }
1521 
1522 /** setup an outgoing interface, ready address */
1523 static int setup_if(struct port_if* pif, const char* addrstr,
1524 	int* avail, int numavail, size_t numfd)
1525 {
1526 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
1527 	pif->avail_total = numavail;
1528 	pif->avail_ports = (int*)memdup(avail, (size_t)numavail*sizeof(int));
1529 	if(!pif->avail_ports)
1530 		return 0;
1531 #endif
1532 	if(!ipstrtoaddr(addrstr, UNBOUND_DNS_PORT, &pif->addr, &pif->addrlen) &&
1533 	   !netblockstrtoaddr(addrstr, UNBOUND_DNS_PORT,
1534 			      &pif->addr, &pif->addrlen, &pif->pfxlen))
1535 		return 0;
1536 	pif->maxout = (int)numfd;
1537 	pif->inuse = 0;
1538 	pif->out = (struct port_comm**)calloc(numfd,
1539 		sizeof(struct port_comm*));
1540 	if(!pif->out)
1541 		return 0;
1542 	return 1;
1543 }
1544 
1545 struct outside_network*
1546 outside_network_create(struct comm_base *base, size_t bufsize,
1547 	size_t num_ports, char** ifs, int num_ifs, int do_ip4,
1548 	int do_ip6, size_t num_tcp, int dscp, struct infra_cache* infra,
1549 	struct ub_randstate* rnd, int use_caps_for_id, int* availports,
1550 	int numavailports, size_t unwanted_threshold, int tcp_mss,
1551 	void (*unwanted_action)(void*), void* unwanted_param, int do_udp,
1552 	void* sslctx, int delayclose, int tls_use_sni, struct dt_env* dtenv,
1553 	int udp_connect, int max_reuse_tcp_queries, int tcp_reuse_timeout,
1554 	int tcp_auth_query_timeout)
1555 {
1556 	struct outside_network* outnet = (struct outside_network*)
1557 		calloc(1, sizeof(struct outside_network));
1558 	size_t k;
1559 	if(!outnet) {
1560 		log_err("malloc failed");
1561 		return NULL;
1562 	}
1563 	comm_base_timept(base, &outnet->now_secs, &outnet->now_tv);
1564 	outnet->base = base;
1565 	outnet->num_tcp = num_tcp;
1566 	outnet->max_reuse_tcp_queries = max_reuse_tcp_queries;
1567 	outnet->tcp_reuse_timeout= tcp_reuse_timeout;
1568 	outnet->tcp_auth_query_timeout = tcp_auth_query_timeout;
1569 	outnet->num_tcp_outgoing = 0;
1570 	outnet->infra = infra;
1571 	outnet->rnd = rnd;
1572 	outnet->sslctx = sslctx;
1573 	outnet->tls_use_sni = tls_use_sni;
1574 #ifdef USE_DNSTAP
1575 	outnet->dtenv = dtenv;
1576 #else
1577 	(void)dtenv;
1578 #endif
1579 	outnet->svcd_overhead = 0;
1580 	outnet->want_to_quit = 0;
1581 	outnet->unwanted_threshold = unwanted_threshold;
1582 	outnet->unwanted_action = unwanted_action;
1583 	outnet->unwanted_param = unwanted_param;
1584 	outnet->use_caps_for_id = use_caps_for_id;
1585 	outnet->do_udp = do_udp;
1586 	outnet->tcp_mss = tcp_mss;
1587 	outnet->ip_dscp = dscp;
1588 #ifndef S_SPLINT_S
1589 	if(delayclose) {
1590 		outnet->delayclose = 1;
1591 		outnet->delay_tv.tv_sec = delayclose/1000;
1592 		outnet->delay_tv.tv_usec = (delayclose%1000)*1000;
1593 	}
1594 #endif
1595 	if(udp_connect) {
1596 		outnet->udp_connect = 1;
1597 	}
1598 	if(numavailports == 0 || num_ports == 0) {
1599 		log_err("no outgoing ports available");
1600 		outside_network_delete(outnet);
1601 		return NULL;
1602 	}
1603 #ifndef INET6
1604 	do_ip6 = 0;
1605 #endif
1606 	calc_num46(ifs, num_ifs, do_ip4, do_ip6,
1607 		&outnet->num_ip4, &outnet->num_ip6);
1608 	if(outnet->num_ip4 != 0) {
1609 		if(!(outnet->ip4_ifs = (struct port_if*)calloc(
1610 			(size_t)outnet->num_ip4, sizeof(struct port_if)))) {
1611 			log_err("malloc failed");
1612 			outside_network_delete(outnet);
1613 			return NULL;
1614 		}
1615 	}
1616 	if(outnet->num_ip6 != 0) {
1617 		if(!(outnet->ip6_ifs = (struct port_if*)calloc(
1618 			(size_t)outnet->num_ip6, sizeof(struct port_if)))) {
1619 			log_err("malloc failed");
1620 			outside_network_delete(outnet);
1621 			return NULL;
1622 		}
1623 	}
1624 	if(	!(outnet->udp_buff = sldns_buffer_new(bufsize)) ||
1625 		!(outnet->pending = rbtree_create(pending_cmp)) ||
1626 		!(outnet->serviced = rbtree_create(serviced_cmp)) ||
1627 		!create_pending_tcp(outnet, bufsize)) {
1628 		log_err("malloc failed");
1629 		outside_network_delete(outnet);
1630 		return NULL;
1631 	}
1632 	rbtree_init(&outnet->tcp_reuse, reuse_cmp);
1633 	outnet->tcp_reuse_max = num_tcp;
1634 
1635 	/* allocate commpoints */
1636 	for(k=0; k<num_ports; k++) {
1637 		struct port_comm* pc;
1638 		pc = (struct port_comm*)calloc(1, sizeof(*pc));
1639 		if(!pc) {
1640 			log_err("malloc failed");
1641 			outside_network_delete(outnet);
1642 			return NULL;
1643 		}
1644 		pc->cp = comm_point_create_udp(outnet->base, -1,
1645 			outnet->udp_buff, outnet_udp_cb, outnet, NULL);
1646 		if(!pc->cp) {
1647 			log_err("malloc failed");
1648 			free(pc);
1649 			outside_network_delete(outnet);
1650 			return NULL;
1651 		}
1652 		pc->next = outnet->unused_fds;
1653 		outnet->unused_fds = pc;
1654 	}
1655 
1656 	/* allocate interfaces */
1657 	if(num_ifs == 0) {
1658 		if(do_ip4 && !setup_if(&outnet->ip4_ifs[0], "0.0.0.0",
1659 			availports, numavailports, num_ports)) {
1660 			log_err("malloc failed");
1661 			outside_network_delete(outnet);
1662 			return NULL;
1663 		}
1664 		if(do_ip6 && !setup_if(&outnet->ip6_ifs[0], "::",
1665 			availports, numavailports, num_ports)) {
1666 			log_err("malloc failed");
1667 			outside_network_delete(outnet);
1668 			return NULL;
1669 		}
1670 	} else {
1671 		size_t done_4 = 0, done_6 = 0;
1672 		int i;
1673 		for(i=0; i<num_ifs; i++) {
1674 			if(str_is_ip6(ifs[i]) && do_ip6) {
1675 				if(!setup_if(&outnet->ip6_ifs[done_6], ifs[i],
1676 					availports, numavailports, num_ports)){
1677 					log_err("malloc failed");
1678 					outside_network_delete(outnet);
1679 					return NULL;
1680 				}
1681 				done_6++;
1682 			}
1683 			if(!str_is_ip6(ifs[i]) && do_ip4) {
1684 				if(!setup_if(&outnet->ip4_ifs[done_4], ifs[i],
1685 					availports, numavailports, num_ports)){
1686 					log_err("malloc failed");
1687 					outside_network_delete(outnet);
1688 					return NULL;
1689 				}
1690 				done_4++;
1691 			}
1692 		}
1693 	}
1694 	return outnet;
1695 }
1696 
1697 /** helper pending delete */
1698 static void
1699 pending_node_del(rbnode_type* node, void* arg)
1700 {
1701 	struct pending* pend = (struct pending*)node;
1702 	struct outside_network* outnet = (struct outside_network*)arg;
1703 	pending_delete(outnet, pend);
1704 }
1705 
1706 /** helper serviced delete */
1707 static void
1708 serviced_node_del(rbnode_type* node, void* ATTR_UNUSED(arg))
1709 {
1710 	struct serviced_query* sq = (struct serviced_query*)node;
1711 	struct service_callback* p = sq->cblist, *np;
1712 	free(sq->qbuf);
1713 	free(sq->zone);
1714 	free(sq->tls_auth_name);
1715 	edns_opt_list_free(sq->opt_list);
1716 	while(p) {
1717 		np = p->next;
1718 		free(p);
1719 		p = np;
1720 	}
1721 	free(sq);
1722 }
1723 
1724 void
1725 outside_network_quit_prepare(struct outside_network* outnet)
1726 {
1727 	if(!outnet)
1728 		return;
1729 	/* prevent queued items from being sent */
1730 	outnet->want_to_quit = 1;
1731 }
1732 
1733 void
1734 outside_network_delete(struct outside_network* outnet)
1735 {
1736 	if(!outnet)
1737 		return;
1738 	outnet->want_to_quit = 1;
1739 	/* check every element, since we can be called on malloc error */
1740 	if(outnet->pending) {
1741 		/* free pending elements, but do no unlink from tree. */
1742 		traverse_postorder(outnet->pending, pending_node_del, NULL);
1743 		free(outnet->pending);
1744 	}
1745 	if(outnet->serviced) {
1746 		traverse_postorder(outnet->serviced, serviced_node_del, NULL);
1747 		free(outnet->serviced);
1748 	}
1749 	if(outnet->udp_buff)
1750 		sldns_buffer_free(outnet->udp_buff);
1751 	if(outnet->unused_fds) {
1752 		struct port_comm* p = outnet->unused_fds, *np;
1753 		while(p) {
1754 			np = p->next;
1755 			comm_point_delete(p->cp);
1756 			free(p);
1757 			p = np;
1758 		}
1759 		outnet->unused_fds = NULL;
1760 	}
1761 	if(outnet->ip4_ifs) {
1762 		int i, k;
1763 		for(i=0; i<outnet->num_ip4; i++) {
1764 			for(k=0; k<outnet->ip4_ifs[i].inuse; k++) {
1765 				struct port_comm* pc = outnet->ip4_ifs[i].
1766 					out[k];
1767 				comm_point_delete(pc->cp);
1768 				free(pc);
1769 			}
1770 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
1771 			free(outnet->ip4_ifs[i].avail_ports);
1772 #endif
1773 			free(outnet->ip4_ifs[i].out);
1774 		}
1775 		free(outnet->ip4_ifs);
1776 	}
1777 	if(outnet->ip6_ifs) {
1778 		int i, k;
1779 		for(i=0; i<outnet->num_ip6; i++) {
1780 			for(k=0; k<outnet->ip6_ifs[i].inuse; k++) {
1781 				struct port_comm* pc = outnet->ip6_ifs[i].
1782 					out[k];
1783 				comm_point_delete(pc->cp);
1784 				free(pc);
1785 			}
1786 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
1787 			free(outnet->ip6_ifs[i].avail_ports);
1788 #endif
1789 			free(outnet->ip6_ifs[i].out);
1790 		}
1791 		free(outnet->ip6_ifs);
1792 	}
1793 	if(outnet->tcp_conns) {
1794 		size_t i;
1795 		for(i=0; i<outnet->num_tcp; i++)
1796 			if(outnet->tcp_conns[i]) {
1797 				struct pending_tcp* pend;
1798 				pend = outnet->tcp_conns[i];
1799 				if(pend->reuse.item_on_lru_list) {
1800 					/* delete waiting_tcp elements that
1801 					 * the tcp conn is working on */
1802 					decommission_pending_tcp(outnet, pend);
1803 				}
1804 				comm_point_delete(outnet->tcp_conns[i]->c);
1805 				free(outnet->tcp_conns[i]);
1806 				outnet->tcp_conns[i] = NULL;
1807 			}
1808 		free(outnet->tcp_conns);
1809 		outnet->tcp_conns = NULL;
1810 	}
1811 	if(outnet->tcp_wait_first) {
1812 		struct waiting_tcp* p = outnet->tcp_wait_first, *np;
1813 		while(p) {
1814 			np = p->next_waiting;
1815 			waiting_tcp_delete(p);
1816 			p = np;
1817 		}
1818 	}
1819 	/* was allocated in struct pending that was deleted above */
1820 	rbtree_init(&outnet->tcp_reuse, reuse_cmp);
1821 	outnet->tcp_reuse_first = NULL;
1822 	outnet->tcp_reuse_last = NULL;
1823 	if(outnet->udp_wait_first) {
1824 		struct pending* p = outnet->udp_wait_first, *np;
1825 		while(p) {
1826 			np = p->next_waiting;
1827 			pending_delete(NULL, p);
1828 			p = np;
1829 		}
1830 	}
1831 	free(outnet);
1832 }
1833 
1834 void
1835 pending_delete(struct outside_network* outnet, struct pending* p)
1836 {
1837 	if(!p)
1838 		return;
1839 	if(outnet && outnet->udp_wait_first &&
1840 		(p->next_waiting || p == outnet->udp_wait_last) ) {
1841 		/* delete from waiting list, if it is in the waiting list */
1842 		struct pending* prev = NULL, *x = outnet->udp_wait_first;
1843 		while(x && x != p) {
1844 			prev = x;
1845 			x = x->next_waiting;
1846 		}
1847 		if(x) {
1848 			log_assert(x == p);
1849 			if(prev)
1850 				prev->next_waiting = p->next_waiting;
1851 			else	outnet->udp_wait_first = p->next_waiting;
1852 			if(outnet->udp_wait_last == p)
1853 				outnet->udp_wait_last = prev;
1854 		}
1855 	}
1856 	if(outnet) {
1857 		(void)rbtree_delete(outnet->pending, p->node.key);
1858 	}
1859 	if(p->timer)
1860 		comm_timer_delete(p->timer);
1861 	free(p->pkt);
1862 	free(p);
1863 }
1864 
1865 static void
1866 sai6_putrandom(struct sockaddr_in6 *sa, int pfxlen, struct ub_randstate *rnd)
1867 {
1868 	int i, last;
1869 	if(!(pfxlen > 0 && pfxlen < 128))
1870 		return;
1871 	for(i = 0; i < (128 - pfxlen) / 8; i++) {
1872 		sa->sin6_addr.s6_addr[15-i] = (uint8_t)ub_random_max(rnd, 256);
1873 	}
1874 	last = pfxlen & 7;
1875 	if(last != 0) {
1876 		sa->sin6_addr.s6_addr[15-i] |=
1877 			((0xFF >> last) & ub_random_max(rnd, 256));
1878 	}
1879 }
1880 
1881 /**
1882  * Try to open a UDP socket for outgoing communication.
1883  * Sets sockets options as needed.
1884  * @param addr: socket address.
1885  * @param addrlen: length of address.
1886  * @param pfxlen: length of network prefix (for address randomisation).
1887  * @param port: port override for addr.
1888  * @param inuse: if -1 is returned, this bool means the port was in use.
1889  * @param rnd: random state (for address randomisation).
1890  * @param dscp: DSCP to use.
1891  * @return fd or -1
1892  */
1893 static int
1894 udp_sockport(struct sockaddr_storage* addr, socklen_t addrlen, int pfxlen,
1895 	int port, int* inuse, struct ub_randstate* rnd, int dscp)
1896 {
1897 	int fd, noproto;
1898 	if(addr_is_ip6(addr, addrlen)) {
1899 		int freebind = 0;
1900 		struct sockaddr_in6 sa = *(struct sockaddr_in6*)addr;
1901 		sa.sin6_port = (in_port_t)htons((uint16_t)port);
1902 		sa.sin6_flowinfo = 0;
1903 		sa.sin6_scope_id = 0;
1904 		if(pfxlen != 0) {
1905 			freebind = 1;
1906 			sai6_putrandom(&sa, pfxlen, rnd);
1907 		}
1908 		fd = create_udp_sock(AF_INET6, SOCK_DGRAM,
1909 			(struct sockaddr*)&sa, addrlen, 1, inuse, &noproto,
1910 			0, 0, 0, NULL, 0, freebind, 0, dscp);
1911 	} else {
1912 		struct sockaddr_in* sa = (struct sockaddr_in*)addr;
1913 		sa->sin_port = (in_port_t)htons((uint16_t)port);
1914 		fd = create_udp_sock(AF_INET, SOCK_DGRAM,
1915 			(struct sockaddr*)addr, addrlen, 1, inuse, &noproto,
1916 			0, 0, 0, NULL, 0, 0, 0, dscp);
1917 	}
1918 	return fd;
1919 }
1920 
1921 /** Select random ID */
1922 static int
1923 select_id(struct outside_network* outnet, struct pending* pend,
1924 	sldns_buffer* packet)
1925 {
1926 	int id_tries = 0;
1927 	pend->id = GET_RANDOM_ID(outnet->rnd);
1928 	LDNS_ID_SET(sldns_buffer_begin(packet), pend->id);
1929 
1930 	/* insert in tree */
1931 	pend->node.key = pend;
1932 	while(!rbtree_insert(outnet->pending, &pend->node)) {
1933 		/* change ID to avoid collision */
1934 		pend->id = GET_RANDOM_ID(outnet->rnd);
1935 		LDNS_ID_SET(sldns_buffer_begin(packet), pend->id);
1936 		id_tries++;
1937 		if(id_tries == MAX_ID_RETRY) {
1938 			pend->id=99999; /* non existent ID */
1939 			log_err("failed to generate unique ID, drop msg");
1940 			return 0;
1941 		}
1942 	}
1943 	verbose(VERB_ALGO, "inserted new pending reply id=%4.4x", pend->id);
1944 	return 1;
1945 }
1946 
1947 /** return true is UDP connect error needs to be logged */
1948 static int udp_connect_needs_log(int err)
1949 {
1950 	switch(err) {
1951 	case ECONNREFUSED:
1952 #  ifdef ENETUNREACH
1953 	case ENETUNREACH:
1954 #  endif
1955 #  ifdef EHOSTDOWN
1956 	case EHOSTDOWN:
1957 #  endif
1958 #  ifdef EHOSTUNREACH
1959 	case EHOSTUNREACH:
1960 #  endif
1961 #  ifdef ENETDOWN
1962 	case ENETDOWN:
1963 #  endif
1964 	case EPERM:
1965 	case EACCES:
1966 		if(verbosity >= VERB_ALGO)
1967 			return 1;
1968 		return 0;
1969 	default:
1970 		break;
1971 	}
1972 	return 1;
1973 }
1974 
1975 
1976 /** Select random interface and port */
1977 static int
1978 select_ifport(struct outside_network* outnet, struct pending* pend,
1979 	int num_if, struct port_if* ifs)
1980 {
1981 	int my_if, my_port, fd, portno, inuse, tries=0;
1982 	struct port_if* pif;
1983 	/* randomly select interface and port */
1984 	if(num_if == 0) {
1985 		verbose(VERB_QUERY, "Need to send query but have no "
1986 			"outgoing interfaces of that family");
1987 		return 0;
1988 	}
1989 	log_assert(outnet->unused_fds);
1990 	tries = 0;
1991 	while(1) {
1992 		my_if = ub_random_max(outnet->rnd, num_if);
1993 		pif = &ifs[my_if];
1994 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
1995 		if(outnet->udp_connect) {
1996 			/* if we connect() we cannot reuse fds for a port */
1997 			if(pif->inuse >= pif->avail_total) {
1998 				tries++;
1999 				if(tries < MAX_PORT_RETRY)
2000 					continue;
2001 				log_err("failed to find an open port, drop msg");
2002 				return 0;
2003 			}
2004 			my_port = pif->inuse + ub_random_max(outnet->rnd,
2005 				pif->avail_total - pif->inuse);
2006 		} else  {
2007 			my_port = ub_random_max(outnet->rnd, pif->avail_total);
2008 			if(my_port < pif->inuse) {
2009 				/* port already open */
2010 				pend->pc = pif->out[my_port];
2011 				verbose(VERB_ALGO, "using UDP if=%d port=%d",
2012 					my_if, pend->pc->number);
2013 				break;
2014 			}
2015 		}
2016 		/* try to open new port, if fails, loop to try again */
2017 		log_assert(pif->inuse < pif->maxout);
2018 		portno = pif->avail_ports[my_port - pif->inuse];
2019 #else
2020 		my_port = portno = 0;
2021 #endif
2022 		fd = udp_sockport(&pif->addr, pif->addrlen, pif->pfxlen,
2023 			portno, &inuse, outnet->rnd, outnet->ip_dscp);
2024 		if(fd == -1 && !inuse) {
2025 			/* nonrecoverable error making socket */
2026 			return 0;
2027 		}
2028 		if(fd != -1) {
2029 			verbose(VERB_ALGO, "opened UDP if=%d port=%d",
2030 				my_if, portno);
2031 			if(outnet->udp_connect) {
2032 				/* connect() to the destination */
2033 				if(connect(fd, (struct sockaddr*)&pend->addr,
2034 					pend->addrlen) < 0) {
2035 					if(udp_connect_needs_log(errno)) {
2036 						log_err_addr("udp connect failed",
2037 							strerror(errno), &pend->addr,
2038 							pend->addrlen);
2039 					}
2040 					sock_close(fd);
2041 					return 0;
2042 				}
2043 			}
2044 			/* grab fd */
2045 			pend->pc = outnet->unused_fds;
2046 			outnet->unused_fds = pend->pc->next;
2047 
2048 			/* setup portcomm */
2049 			pend->pc->next = NULL;
2050 			pend->pc->number = portno;
2051 			pend->pc->pif = pif;
2052 			pend->pc->index = pif->inuse;
2053 			pend->pc->num_outstanding = 0;
2054 			comm_point_start_listening(pend->pc->cp, fd, -1);
2055 
2056 			/* grab port in interface */
2057 			pif->out[pif->inuse] = pend->pc;
2058 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
2059 			pif->avail_ports[my_port - pif->inuse] =
2060 				pif->avail_ports[pif->avail_total-pif->inuse-1];
2061 #endif
2062 			pif->inuse++;
2063 			break;
2064 		}
2065 		/* failed, already in use */
2066 		verbose(VERB_QUERY, "port %d in use, trying another", portno);
2067 		tries++;
2068 		if(tries == MAX_PORT_RETRY) {
2069 			log_err("failed to find an open port, drop msg");
2070 			return 0;
2071 		}
2072 	}
2073 	log_assert(pend->pc);
2074 	pend->pc->num_outstanding++;
2075 
2076 	return 1;
2077 }
2078 
2079 static int
2080 randomize_and_send_udp(struct pending* pend, sldns_buffer* packet, int timeout)
2081 {
2082 	struct timeval tv;
2083 	struct outside_network* outnet = pend->sq->outnet;
2084 
2085 	/* select id */
2086 	if(!select_id(outnet, pend, packet)) {
2087 		return 0;
2088 	}
2089 
2090 	/* select src_if, port */
2091 	if(addr_is_ip6(&pend->addr, pend->addrlen)) {
2092 		if(!select_ifport(outnet, pend,
2093 			outnet->num_ip6, outnet->ip6_ifs))
2094 			return 0;
2095 	} else {
2096 		if(!select_ifport(outnet, pend,
2097 			outnet->num_ip4, outnet->ip4_ifs))
2098 			return 0;
2099 	}
2100 	log_assert(pend->pc && pend->pc->cp);
2101 
2102 	/* send it over the commlink */
2103 	if(!comm_point_send_udp_msg(pend->pc->cp, packet,
2104 		(struct sockaddr*)&pend->addr, pend->addrlen, outnet->udp_connect)) {
2105 		portcomm_loweruse(outnet, pend->pc);
2106 		return 0;
2107 	}
2108 
2109 	/* system calls to set timeout after sending UDP to make roundtrip
2110 	   smaller. */
2111 #ifndef S_SPLINT_S
2112 	tv.tv_sec = timeout/1000;
2113 	tv.tv_usec = (timeout%1000)*1000;
2114 #endif
2115 	comm_timer_set(pend->timer, &tv);
2116 
2117 #ifdef USE_DNSTAP
2118 	/*
2119 	 * sending src (local service)/dst (upstream) addresses over DNSTAP
2120 	 * There are no chances to get the src (local service) addr if unbound
2121 	 * is not configured with specific outgoing IP-addresses. So we will
2122 	 * pass 0.0.0.0 (::) to argument for
2123 	 * dt_msg_send_outside_query()/dt_msg_send_outside_response() calls.
2124 	 */
2125 	if(outnet->dtenv &&
2126 	   (outnet->dtenv->log_resolver_query_messages ||
2127 		outnet->dtenv->log_forwarder_query_messages)) {
2128 			log_addr(VERB_ALGO, "from local addr", &pend->pc->pif->addr, pend->pc->pif->addrlen);
2129 			log_addr(VERB_ALGO, "request to upstream", &pend->addr, pend->addrlen);
2130 			dt_msg_send_outside_query(outnet->dtenv, &pend->addr, &pend->pc->pif->addr, comm_udp,
2131 				pend->sq->zone, pend->sq->zonelen, packet);
2132 	}
2133 #endif
2134 	return 1;
2135 }
2136 
2137 struct pending*
2138 pending_udp_query(struct serviced_query* sq, struct sldns_buffer* packet,
2139 	int timeout, comm_point_callback_type* cb, void* cb_arg)
2140 {
2141 	struct pending* pend = (struct pending*)calloc(1, sizeof(*pend));
2142 	if(!pend) return NULL;
2143 	pend->outnet = sq->outnet;
2144 	pend->sq = sq;
2145 	pend->addrlen = sq->addrlen;
2146 	memmove(&pend->addr, &sq->addr, sq->addrlen);
2147 	pend->cb = cb;
2148 	pend->cb_arg = cb_arg;
2149 	pend->node.key = pend;
2150 	pend->timer = comm_timer_create(sq->outnet->base, pending_udp_timer_cb,
2151 		pend);
2152 	if(!pend->timer) {
2153 		free(pend);
2154 		return NULL;
2155 	}
2156 
2157 	if(sq->outnet->unused_fds == NULL) {
2158 		/* no unused fd, cannot create a new port (randomly) */
2159 		verbose(VERB_ALGO, "no fds available, udp query waiting");
2160 		pend->timeout = timeout;
2161 		pend->pkt_len = sldns_buffer_limit(packet);
2162 		pend->pkt = (uint8_t*)memdup(sldns_buffer_begin(packet),
2163 			pend->pkt_len);
2164 		if(!pend->pkt) {
2165 			comm_timer_delete(pend->timer);
2166 			free(pend);
2167 			return NULL;
2168 		}
2169 		/* put at end of waiting list */
2170 		if(sq->outnet->udp_wait_last)
2171 			sq->outnet->udp_wait_last->next_waiting = pend;
2172 		else
2173 			sq->outnet->udp_wait_first = pend;
2174 		sq->outnet->udp_wait_last = pend;
2175 		return pend;
2176 	}
2177 	if(!randomize_and_send_udp(pend, packet, timeout)) {
2178 		pending_delete(sq->outnet, pend);
2179 		return NULL;
2180 	}
2181 	return pend;
2182 }
2183 
2184 void
2185 outnet_tcptimer(void* arg)
2186 {
2187 	struct waiting_tcp* w = (struct waiting_tcp*)arg;
2188 	struct outside_network* outnet = w->outnet;
2189 	verbose(VERB_CLIENT, "outnet_tcptimer");
2190 	if(w->on_tcp_waiting_list) {
2191 		/* it is on the waiting list */
2192 		waiting_list_remove(outnet, w);
2193 		waiting_tcp_callback(w, NULL, NETEVENT_TIMEOUT, NULL);
2194 		waiting_tcp_delete(w);
2195 	} else {
2196 		/* it was in use */
2197 		struct pending_tcp* pend=(struct pending_tcp*)w->next_waiting;
2198 		reuse_cb_and_decommission(outnet, pend, NETEVENT_TIMEOUT);
2199 	}
2200 	use_free_buffer(outnet);
2201 }
2202 
2203 /** close the oldest reuse_tcp connection to make a fd and struct pend
2204  * available for a new stream connection */
2205 static void
2206 reuse_tcp_close_oldest(struct outside_network* outnet)
2207 {
2208 	struct reuse_tcp* reuse;
2209 	verbose(VERB_CLIENT, "reuse_tcp_close_oldest");
2210 	reuse = reuse_tcp_lru_snip(outnet);
2211 	if(!reuse) return;
2212 	/* free up */
2213 	reuse_cb_and_decommission(outnet, reuse->pending, NETEVENT_CLOSED);
2214 }
2215 
2216 static uint16_t
2217 tcp_select_id(struct outside_network* outnet, struct reuse_tcp* reuse)
2218 {
2219 	if(reuse)
2220 		return reuse_tcp_select_id(reuse, outnet);
2221 	return GET_RANDOM_ID(outnet->rnd);
2222 }
2223 
2224 /** find spare ID value for reuse tcp stream.  That is random and also does
2225  * not collide with an existing query ID that is in use or waiting */
2226 uint16_t
2227 reuse_tcp_select_id(struct reuse_tcp* reuse, struct outside_network* outnet)
2228 {
2229 	uint16_t id = 0, curid, nextid;
2230 	const int try_random = 2000;
2231 	int i;
2232 	unsigned select, count, space;
2233 	rbnode_type* node;
2234 
2235 	/* make really sure the tree is not empty */
2236 	if(reuse->tree_by_id.count == 0) {
2237 		id = GET_RANDOM_ID(outnet->rnd);
2238 		return id;
2239 	}
2240 
2241 	/* try to find random empty spots by picking them */
2242 	for(i = 0; i<try_random; i++) {
2243 		id = GET_RANDOM_ID(outnet->rnd);
2244 		if(!reuse_tcp_by_id_find(reuse, id)) {
2245 			return id;
2246 		}
2247 	}
2248 
2249 	/* equally pick a random unused element from the tree that is
2250 	 * not in use.  Pick a the n-th index of an ununused number,
2251 	 * then loop over the empty spaces in the tree and find it */
2252 	log_assert(reuse->tree_by_id.count < 0xffff);
2253 	select = ub_random_max(outnet->rnd, 0xffff - reuse->tree_by_id.count);
2254 	/* select value now in 0 .. num free - 1 */
2255 
2256 	count = 0; /* number of free spaces passed by */
2257 	node = rbtree_first(&reuse->tree_by_id);
2258 	log_assert(node && node != RBTREE_NULL); /* tree not empty */
2259 	/* see if select is before first node */
2260 	if(select < tree_by_id_get_id(node))
2261 		return select;
2262 	count += tree_by_id_get_id(node);
2263 	/* perhaps select is between nodes */
2264 	while(node && node != RBTREE_NULL) {
2265 		rbnode_type* next = rbtree_next(node);
2266 		if(next && next != RBTREE_NULL) {
2267 			curid = tree_by_id_get_id(node);
2268 			nextid = tree_by_id_get_id(next);
2269 			log_assert(curid < nextid);
2270 			if(curid != 0xffff && curid + 1 < nextid) {
2271 				/* space between nodes */
2272 				space = nextid - curid - 1;
2273 				log_assert(select >= count);
2274 				if(select < count + space) {
2275 					/* here it is */
2276 					return curid + 1 + (select - count);
2277 				}
2278 				count += space;
2279 			}
2280 		}
2281 		node = next;
2282 	}
2283 
2284 	/* select is after the last node */
2285 	/* count is the number of free positions before the nodes in the
2286 	 * tree */
2287 	node = rbtree_last(&reuse->tree_by_id);
2288 	log_assert(node && node != RBTREE_NULL); /* tree not empty */
2289 	curid = tree_by_id_get_id(node);
2290 	log_assert(count + (0xffff-curid) + reuse->tree_by_id.count == 0xffff);
2291 	return curid + 1 + (select - count);
2292 }
2293 
2294 struct waiting_tcp*
2295 pending_tcp_query(struct serviced_query* sq, sldns_buffer* packet,
2296 	int timeout, comm_point_callback_type* callback, void* callback_arg)
2297 {
2298 	struct pending_tcp* pend = sq->outnet->tcp_free;
2299 	struct reuse_tcp* reuse = NULL;
2300 	struct waiting_tcp* w;
2301 
2302 	verbose(VERB_CLIENT, "pending_tcp_query");
2303 	if(sldns_buffer_limit(packet) < sizeof(uint16_t)) {
2304 		verbose(VERB_ALGO, "pending tcp query with too short buffer < 2");
2305 		return NULL;
2306 	}
2307 
2308 	/* find out if a reused stream to the target exists */
2309 	/* if so, take it into use */
2310 	reuse = reuse_tcp_find(sq->outnet, &sq->addr, sq->addrlen,
2311 		sq->ssl_upstream);
2312 	if(reuse) {
2313 		log_reuse_tcp(VERB_CLIENT, "pending_tcp_query: found reuse", reuse);
2314 		log_assert(reuse->pending);
2315 		pend = reuse->pending;
2316 		reuse_tcp_lru_touch(sq->outnet, reuse);
2317 	}
2318 
2319 	log_assert(!reuse || (reuse && pend));
2320 	/* if !pend but we have reuse streams, close a reuse stream
2321 	 * to be able to open a new one to this target, no use waiting
2322 	 * to reuse a file descriptor while another query needs to use
2323 	 * that buffer and file descriptor now. */
2324 	if(!pend) {
2325 		reuse_tcp_close_oldest(sq->outnet);
2326 		pend = sq->outnet->tcp_free;
2327 		log_assert(!reuse || (pend == reuse->pending));
2328 	}
2329 
2330 	/* allocate space to store query */
2331 	w = (struct waiting_tcp*)malloc(sizeof(struct waiting_tcp)
2332 		+ sldns_buffer_limit(packet));
2333 	if(!w) {
2334 		return NULL;
2335 	}
2336 	if(!(w->timer = comm_timer_create(sq->outnet->base, outnet_tcptimer, w))) {
2337 		free(w);
2338 		return NULL;
2339 	}
2340 	w->pkt = (uint8_t*)w + sizeof(struct waiting_tcp);
2341 	w->pkt_len = sldns_buffer_limit(packet);
2342 	memmove(w->pkt, sldns_buffer_begin(packet), w->pkt_len);
2343 	w->id = tcp_select_id(sq->outnet, reuse);
2344 	LDNS_ID_SET(w->pkt, w->id);
2345 	memcpy(&w->addr, &sq->addr, sq->addrlen);
2346 	w->addrlen = sq->addrlen;
2347 	w->outnet = sq->outnet;
2348 	w->on_tcp_waiting_list = 0;
2349 	w->next_waiting = NULL;
2350 	w->cb = callback;
2351 	w->cb_arg = callback_arg;
2352 	w->ssl_upstream = sq->ssl_upstream;
2353 	w->tls_auth_name = sq->tls_auth_name;
2354 	w->timeout = timeout;
2355 	w->id_node.key = NULL;
2356 	w->write_wait_prev = NULL;
2357 	w->write_wait_next = NULL;
2358 	w->write_wait_queued = 0;
2359 	w->error_count = 0;
2360 #ifdef USE_DNSTAP
2361 	w->sq = NULL;
2362 #endif
2363 	if(pend) {
2364 		/* we have a buffer available right now */
2365 		if(reuse) {
2366 			log_assert(reuse == &pend->reuse);
2367 			/* reuse existing fd, write query and continue */
2368 			/* store query in tree by id */
2369 			verbose(VERB_CLIENT, "pending_tcp_query: reuse, store");
2370 			w->next_waiting = (void*)pend;
2371 			reuse_tree_by_id_insert(&pend->reuse, w);
2372 			/* can we write right now? */
2373 			if(pend->query == NULL) {
2374 				/* write straight away */
2375 				/* stop the timer on read of the fd */
2376 				comm_point_stop_listening(pend->c);
2377 				pend->query = w;
2378 				outnet_tcp_take_query_setup(pend->c->fd, pend,
2379 					w);
2380 			} else {
2381 				/* put it in the waiting list for
2382 				 * this stream */
2383 				reuse_write_wait_push_back(&pend->reuse, w);
2384 			}
2385 		} else {
2386 			/* create new fd and connect to addr, setup to
2387 			 * write query */
2388 			verbose(VERB_CLIENT, "pending_tcp_query: new fd, connect");
2389 			rbtree_init(&pend->reuse.tree_by_id, reuse_id_cmp);
2390 			pend->reuse.pending = pend;
2391 			memcpy(&pend->reuse.addr, &sq->addr, sq->addrlen);
2392 			pend->reuse.addrlen = sq->addrlen;
2393 			if(!outnet_tcp_take_into_use(w)) {
2394 				waiting_tcp_delete(w);
2395 				return NULL;
2396 			}
2397 		}
2398 #ifdef USE_DNSTAP
2399 		if(sq->outnet->dtenv &&
2400 		   (sq->outnet->dtenv->log_resolver_query_messages ||
2401 		    sq->outnet->dtenv->log_forwarder_query_messages)) {
2402 			/* use w->pkt, because it has the ID value */
2403 			sldns_buffer tmp;
2404 			sldns_buffer_init_frm_data(&tmp, w->pkt, w->pkt_len);
2405 			dt_msg_send_outside_query(sq->outnet->dtenv, &sq->addr,
2406 				&pend->pi->addr, comm_tcp, sq->zone,
2407 				sq->zonelen, &tmp);
2408 		}
2409 #endif
2410 	} else {
2411 		/* queue up */
2412 		/* waiting for a buffer on the outside network buffer wait
2413 		 * list */
2414 		verbose(VERB_CLIENT, "pending_tcp_query: queue to wait");
2415 #ifdef USE_DNSTAP
2416 		w->sq = sq;
2417 #endif
2418 		outnet_add_tcp_waiting(sq->outnet, w);
2419 	}
2420 	return w;
2421 }
2422 
2423 /** create query for serviced queries */
2424 static void
2425 serviced_gen_query(sldns_buffer* buff, uint8_t* qname, size_t qnamelen,
2426 	uint16_t qtype, uint16_t qclass, uint16_t flags)
2427 {
2428 	sldns_buffer_clear(buff);
2429 	/* skip id */
2430 	sldns_buffer_write_u16(buff, flags);
2431 	sldns_buffer_write_u16(buff, 1); /* qdcount */
2432 	sldns_buffer_write_u16(buff, 0); /* ancount */
2433 	sldns_buffer_write_u16(buff, 0); /* nscount */
2434 	sldns_buffer_write_u16(buff, 0); /* arcount */
2435 	sldns_buffer_write(buff, qname, qnamelen);
2436 	sldns_buffer_write_u16(buff, qtype);
2437 	sldns_buffer_write_u16(buff, qclass);
2438 	sldns_buffer_flip(buff);
2439 }
2440 
2441 /** lookup serviced query in serviced query rbtree */
2442 static struct serviced_query*
2443 lookup_serviced(struct outside_network* outnet, sldns_buffer* buff, int dnssec,
2444 	struct sockaddr_storage* addr, socklen_t addrlen,
2445 	struct edns_option* opt_list)
2446 {
2447 	struct serviced_query key;
2448 	key.node.key = &key;
2449 	key.qbuf = sldns_buffer_begin(buff);
2450 	key.qbuflen = sldns_buffer_limit(buff);
2451 	key.dnssec = dnssec;
2452 	memcpy(&key.addr, addr, addrlen);
2453 	key.addrlen = addrlen;
2454 	key.outnet = outnet;
2455 	key.opt_list = opt_list;
2456 	return (struct serviced_query*)rbtree_search(outnet->serviced, &key);
2457 }
2458 
2459 /** Create new serviced entry */
2460 static struct serviced_query*
2461 serviced_create(struct outside_network* outnet, sldns_buffer* buff, int dnssec,
2462 	int want_dnssec, int nocaps, int tcp_upstream, int ssl_upstream,
2463 	char* tls_auth_name, struct sockaddr_storage* addr, socklen_t addrlen,
2464 	uint8_t* zone, size_t zonelen, int qtype, struct edns_option* opt_list,
2465 	size_t pad_queries_block_size)
2466 {
2467 	struct serviced_query* sq = (struct serviced_query*)malloc(sizeof(*sq));
2468 #ifdef UNBOUND_DEBUG
2469 	rbnode_type* ins;
2470 #endif
2471 	if(!sq)
2472 		return NULL;
2473 	sq->node.key = sq;
2474 	sq->qbuf = memdup(sldns_buffer_begin(buff), sldns_buffer_limit(buff));
2475 	if(!sq->qbuf) {
2476 		free(sq);
2477 		return NULL;
2478 	}
2479 	sq->qbuflen = sldns_buffer_limit(buff);
2480 	sq->zone = memdup(zone, zonelen);
2481 	if(!sq->zone) {
2482 		free(sq->qbuf);
2483 		free(sq);
2484 		return NULL;
2485 	}
2486 	sq->zonelen = zonelen;
2487 	sq->qtype = qtype;
2488 	sq->dnssec = dnssec;
2489 	sq->want_dnssec = want_dnssec;
2490 	sq->nocaps = nocaps;
2491 	sq->tcp_upstream = tcp_upstream;
2492 	sq->ssl_upstream = ssl_upstream;
2493 	if(tls_auth_name) {
2494 		sq->tls_auth_name = strdup(tls_auth_name);
2495 		if(!sq->tls_auth_name) {
2496 			free(sq->zone);
2497 			free(sq->qbuf);
2498 			free(sq);
2499 			return NULL;
2500 		}
2501 	} else {
2502 		sq->tls_auth_name = NULL;
2503 	}
2504 	memcpy(&sq->addr, addr, addrlen);
2505 	sq->addrlen = addrlen;
2506 	sq->opt_list = NULL;
2507 	if(opt_list) {
2508 		sq->opt_list = edns_opt_copy_alloc(opt_list);
2509 		if(!sq->opt_list) {
2510 			free(sq->tls_auth_name);
2511 			free(sq->zone);
2512 			free(sq->qbuf);
2513 			free(sq);
2514 			return NULL;
2515 		}
2516 	}
2517 	sq->outnet = outnet;
2518 	sq->cblist = NULL;
2519 	sq->pending = NULL;
2520 	sq->status = serviced_initial;
2521 	sq->retry = 0;
2522 	sq->to_be_deleted = 0;
2523 	sq->padding_block_size = pad_queries_block_size;
2524 #ifdef UNBOUND_DEBUG
2525 	ins =
2526 #else
2527 	(void)
2528 #endif
2529 	rbtree_insert(outnet->serviced, &sq->node);
2530 	log_assert(ins != NULL); /* must not be already present */
2531 	return sq;
2532 }
2533 
2534 /** remove waiting tcp from the outnet waiting list */
2535 static void
2536 waiting_list_remove(struct outside_network* outnet, struct waiting_tcp* w)
2537 {
2538 	struct waiting_tcp* p = outnet->tcp_wait_first, *prev = NULL;
2539 	w->on_tcp_waiting_list = 0;
2540 	while(p) {
2541 		if(p == w) {
2542 			/* remove w */
2543 			if(prev)
2544 				prev->next_waiting = w->next_waiting;
2545 			else	outnet->tcp_wait_first = w->next_waiting;
2546 			if(outnet->tcp_wait_last == w)
2547 				outnet->tcp_wait_last = prev;
2548 			return;
2549 		}
2550 		prev = p;
2551 		p = p->next_waiting;
2552 	}
2553 	/* waiting_list_remove is currently called only with items that are
2554 	 * already in the waiting list. */
2555 	log_assert(0);
2556 }
2557 
2558 /** reuse tcp stream, remove serviced query from stream,
2559  * return true if the stream is kept, false if it is to be closed */
2560 static int
2561 reuse_tcp_remove_serviced_keep(struct waiting_tcp* w,
2562 	struct serviced_query* sq)
2563 {
2564 	struct pending_tcp* pend_tcp = (struct pending_tcp*)w->next_waiting;
2565 	verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep");
2566 	/* remove the callback. let query continue to write to not cancel
2567 	 * the stream itself.  also keep it as an entry in the tree_by_id,
2568 	 * in case the answer returns (that we no longer want), but we cannot
2569 	 * pick the same ID number meanwhile */
2570 	w->cb = NULL;
2571 	/* see if can be entered in reuse tree
2572 	 * for that the FD has to be non-1 */
2573 	if(pend_tcp->c->fd == -1) {
2574 		verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep: -1 fd");
2575 		return 0;
2576 	}
2577 	/* if in tree and used by other queries */
2578 	if(pend_tcp->reuse.node.key) {
2579 		verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep: in use by other queries");
2580 		/* do not reset the keepalive timer, for that
2581 		 * we'd need traffic, and this is where the serviced is
2582 		 * removed due to state machine internal reasons,
2583 		 * eg. iterator no longer interested in this query */
2584 		return 1;
2585 	}
2586 	/* if still open and want to keep it open */
2587 	if(pend_tcp->c->fd != -1 && sq->outnet->tcp_reuse.count <
2588 		sq->outnet->tcp_reuse_max) {
2589 		verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep: keep open");
2590 		/* set a keepalive timer on it */
2591 		if(!reuse_tcp_insert(sq->outnet, pend_tcp)) {
2592 			return 0;
2593 		}
2594 		reuse_tcp_setup_timeout(pend_tcp, sq->outnet->tcp_reuse_timeout);
2595 		return 1;
2596 	}
2597 	return 0;
2598 }
2599 
2600 /** cleanup serviced query entry */
2601 static void
2602 serviced_delete(struct serviced_query* sq)
2603 {
2604 	verbose(VERB_CLIENT, "serviced_delete");
2605 	if(sq->pending) {
2606 		/* clear up the pending query */
2607 		if(sq->status == serviced_query_UDP_EDNS ||
2608 			sq->status == serviced_query_UDP ||
2609 			sq->status == serviced_query_UDP_EDNS_FRAG ||
2610 			sq->status == serviced_query_UDP_EDNS_fallback) {
2611 			struct pending* p = (struct pending*)sq->pending;
2612 			verbose(VERB_CLIENT, "serviced_delete: UDP");
2613 			if(p->pc)
2614 				portcomm_loweruse(sq->outnet, p->pc);
2615 			pending_delete(sq->outnet, p);
2616 			/* this call can cause reentrant calls back into the
2617 			 * mesh */
2618 			outnet_send_wait_udp(sq->outnet);
2619 		} else {
2620 			struct waiting_tcp* w = (struct waiting_tcp*)
2621 				sq->pending;
2622 			verbose(VERB_CLIENT, "serviced_delete: TCP");
2623 			/* if on stream-write-waiting list then
2624 			 * remove from waiting list and waiting_tcp_delete */
2625 			if(w->write_wait_queued) {
2626 				struct pending_tcp* pend =
2627 					(struct pending_tcp*)w->next_waiting;
2628 				verbose(VERB_CLIENT, "serviced_delete: writewait");
2629 				reuse_tree_by_id_delete(&pend->reuse, w);
2630 				reuse_write_wait_remove(&pend->reuse, w);
2631 				waiting_tcp_delete(w);
2632 			} else if(!w->on_tcp_waiting_list) {
2633 				struct pending_tcp* pend =
2634 					(struct pending_tcp*)w->next_waiting;
2635 				verbose(VERB_CLIENT, "serviced_delete: tcpreusekeep");
2636 				if(!reuse_tcp_remove_serviced_keep(w, sq)) {
2637 					reuse_cb_and_decommission(sq->outnet,
2638 						pend, NETEVENT_CLOSED);
2639 					use_free_buffer(sq->outnet);
2640 				}
2641 				sq->pending = NULL;
2642 			} else {
2643 				verbose(VERB_CLIENT, "serviced_delete: tcpwait");
2644 				waiting_list_remove(sq->outnet, w);
2645 				waiting_tcp_delete(w);
2646 			}
2647 		}
2648 	}
2649 	/* does not delete from tree, caller has to do that */
2650 	serviced_node_del(&sq->node, NULL);
2651 }
2652 
2653 /** perturb a dname capitalization randomly */
2654 static void
2655 serviced_perturb_qname(struct ub_randstate* rnd, uint8_t* qbuf, size_t len)
2656 {
2657 	uint8_t lablen;
2658 	uint8_t* d = qbuf + 10;
2659 	long int random = 0;
2660 	int bits = 0;
2661 	log_assert(len >= 10 + 5 /* offset qname, root, qtype, qclass */);
2662 	(void)len;
2663 	lablen = *d++;
2664 	while(lablen) {
2665 		while(lablen--) {
2666 			/* only perturb A-Z, a-z */
2667 			if(isalpha((unsigned char)*d)) {
2668 				/* get a random bit */
2669 				if(bits == 0) {
2670 					random = ub_random(rnd);
2671 					bits = 30;
2672 				}
2673 				if(random & 0x1) {
2674 					*d = (uint8_t)toupper((unsigned char)*d);
2675 				} else {
2676 					*d = (uint8_t)tolower((unsigned char)*d);
2677 				}
2678 				random >>= 1;
2679 				bits--;
2680 			}
2681 			d++;
2682 		}
2683 		lablen = *d++;
2684 	}
2685 	if(verbosity >= VERB_ALGO) {
2686 		char buf[LDNS_MAX_DOMAINLEN+1];
2687 		dname_str(qbuf+10, buf);
2688 		verbose(VERB_ALGO, "qname perturbed to %s", buf);
2689 	}
2690 }
2691 
2692 /** put serviced query into a buffer */
2693 static void
2694 serviced_encode(struct serviced_query* sq, sldns_buffer* buff, int with_edns)
2695 {
2696 	/* if we are using 0x20 bits for ID randomness, perturb them */
2697 	if(sq->outnet->use_caps_for_id && !sq->nocaps) {
2698 		serviced_perturb_qname(sq->outnet->rnd, sq->qbuf, sq->qbuflen);
2699 	}
2700 	/* generate query */
2701 	sldns_buffer_clear(buff);
2702 	sldns_buffer_write_u16(buff, 0); /* id placeholder */
2703 	sldns_buffer_write(buff, sq->qbuf, sq->qbuflen);
2704 	sldns_buffer_flip(buff);
2705 	if(with_edns) {
2706 		/* add edns section */
2707 		struct edns_data edns;
2708 		struct edns_option padding_option;
2709 		edns.edns_present = 1;
2710 		edns.ext_rcode = 0;
2711 		edns.edns_version = EDNS_ADVERTISED_VERSION;
2712 		edns.opt_list_in = NULL;
2713 		edns.opt_list_out = sq->opt_list;
2714 		edns.opt_list_inplace_cb_out = NULL;
2715 		if(sq->status == serviced_query_UDP_EDNS_FRAG) {
2716 			if(addr_is_ip6(&sq->addr, sq->addrlen)) {
2717 				if(EDNS_FRAG_SIZE_IP6 < EDNS_ADVERTISED_SIZE)
2718 					edns.udp_size = EDNS_FRAG_SIZE_IP6;
2719 				else	edns.udp_size = EDNS_ADVERTISED_SIZE;
2720 			} else {
2721 				if(EDNS_FRAG_SIZE_IP4 < EDNS_ADVERTISED_SIZE)
2722 					edns.udp_size = EDNS_FRAG_SIZE_IP4;
2723 				else	edns.udp_size = EDNS_ADVERTISED_SIZE;
2724 			}
2725 		} else {
2726 			edns.udp_size = EDNS_ADVERTISED_SIZE;
2727 		}
2728 		edns.bits = 0;
2729 		if(sq->dnssec & EDNS_DO)
2730 			edns.bits = EDNS_DO;
2731 		if(sq->dnssec & BIT_CD)
2732 			LDNS_CD_SET(sldns_buffer_begin(buff));
2733 		if (sq->ssl_upstream && sq->padding_block_size) {
2734 			padding_option.opt_code = LDNS_EDNS_PADDING;
2735 			padding_option.opt_len = 0;
2736 			padding_option.opt_data = NULL;
2737 			padding_option.next = edns.opt_list_out;
2738 			edns.opt_list_out = &padding_option;
2739 			edns.padding_block_size = sq->padding_block_size;
2740 		}
2741 		attach_edns_record(buff, &edns);
2742 	}
2743 }
2744 
2745 /**
2746  * Perform serviced query UDP sending operation.
2747  * Sends UDP with EDNS, unless infra host marked non EDNS.
2748  * @param sq: query to send.
2749  * @param buff: buffer scratch space.
2750  * @return 0 on error.
2751  */
2752 static int
2753 serviced_udp_send(struct serviced_query* sq, sldns_buffer* buff)
2754 {
2755 	int rtt, vs;
2756 	uint8_t edns_lame_known;
2757 	time_t now = *sq->outnet->now_secs;
2758 
2759 	if(!infra_host(sq->outnet->infra, &sq->addr, sq->addrlen, sq->zone,
2760 		sq->zonelen, now, &vs, &edns_lame_known, &rtt))
2761 		return 0;
2762 	sq->last_rtt = rtt;
2763 	verbose(VERB_ALGO, "EDNS lookup known=%d vs=%d", edns_lame_known, vs);
2764 	if(sq->status == serviced_initial) {
2765 		if(vs != -1) {
2766 			sq->status = serviced_query_UDP_EDNS;
2767 		} else {
2768 			sq->status = serviced_query_UDP;
2769 		}
2770 	}
2771 	serviced_encode(sq, buff, (sq->status == serviced_query_UDP_EDNS) ||
2772 		(sq->status == serviced_query_UDP_EDNS_FRAG));
2773 	sq->last_sent_time = *sq->outnet->now_tv;
2774 	sq->edns_lame_known = (int)edns_lame_known;
2775 	verbose(VERB_ALGO, "serviced query UDP timeout=%d msec", rtt);
2776 	sq->pending = pending_udp_query(sq, buff, rtt,
2777 		serviced_udp_callback, sq);
2778 	if(!sq->pending)
2779 		return 0;
2780 	return 1;
2781 }
2782 
2783 /** check that perturbed qname is identical */
2784 static int
2785 serviced_check_qname(sldns_buffer* pkt, uint8_t* qbuf, size_t qbuflen)
2786 {
2787 	uint8_t* d1 = sldns_buffer_begin(pkt)+12;
2788 	uint8_t* d2 = qbuf+10;
2789 	uint8_t len1, len2;
2790 	int count = 0;
2791 	if(sldns_buffer_limit(pkt) < 12+1+4) /* packet too small for qname */
2792 		return 0;
2793 	log_assert(qbuflen >= 15 /* 10 header, root, type, class */);
2794 	len1 = *d1++;
2795 	len2 = *d2++;
2796 	while(len1 != 0 || len2 != 0) {
2797 		if(LABEL_IS_PTR(len1)) {
2798 			/* check if we can read *d1 with compression ptr rest */
2799 			if(d1 >= sldns_buffer_at(pkt, sldns_buffer_limit(pkt)))
2800 				return 0;
2801 			d1 = sldns_buffer_begin(pkt)+PTR_OFFSET(len1, *d1);
2802 			/* check if we can read the destination *d1 */
2803 			if(d1 >= sldns_buffer_at(pkt, sldns_buffer_limit(pkt)))
2804 				return 0;
2805 			len1 = *d1++;
2806 			if(count++ > MAX_COMPRESS_PTRS)
2807 				return 0;
2808 			continue;
2809 		}
2810 		if(d2 > qbuf+qbuflen)
2811 			return 0;
2812 		if(len1 != len2)
2813 			return 0;
2814 		if(len1 > LDNS_MAX_LABELLEN)
2815 			return 0;
2816 		/* check len1 + 1(next length) are okay to read */
2817 		if(d1+len1 >= sldns_buffer_at(pkt, sldns_buffer_limit(pkt)))
2818 			return 0;
2819 		log_assert(len1 <= LDNS_MAX_LABELLEN);
2820 		log_assert(len2 <= LDNS_MAX_LABELLEN);
2821 		log_assert(len1 == len2 && len1 != 0);
2822 		/* compare the labels - bitwise identical */
2823 		if(memcmp(d1, d2, len1) != 0)
2824 			return 0;
2825 		d1 += len1;
2826 		d2 += len2;
2827 		len1 = *d1++;
2828 		len2 = *d2++;
2829 	}
2830 	return 1;
2831 }
2832 
2833 /** call the callbacks for a serviced query */
2834 static void
2835 serviced_callbacks(struct serviced_query* sq, int error, struct comm_point* c,
2836 	struct comm_reply* rep)
2837 {
2838 	struct service_callback* p;
2839 	int dobackup = (sq->cblist && sq->cblist->next); /* >1 cb*/
2840 	uint8_t *backup_p = NULL;
2841 	size_t backlen = 0;
2842 #ifdef UNBOUND_DEBUG
2843 	rbnode_type* rem =
2844 #else
2845 	(void)
2846 #endif
2847 	/* remove from tree, and schedule for deletion, so that callbacks
2848 	 * can safely deregister themselves and even create new serviced
2849 	 * queries that are identical to this one. */
2850 	rbtree_delete(sq->outnet->serviced, sq);
2851 	log_assert(rem); /* should have been present */
2852 	sq->to_be_deleted = 1;
2853 	verbose(VERB_ALGO, "svcd callbacks start");
2854 	if(sq->outnet->use_caps_for_id && error == NETEVENT_NOERROR && c &&
2855 		!sq->nocaps && sq->qtype != LDNS_RR_TYPE_PTR) {
2856 		/* for type PTR do not check perturbed name in answer,
2857 		 * compatibility with cisco dns guard boxes that mess up
2858 		 * reverse queries 0x20 contents */
2859 		/* noerror and nxdomain must have a qname in reply */
2860 		if(sldns_buffer_read_u16_at(c->buffer, 4) == 0 &&
2861 			(LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer))
2862 				== LDNS_RCODE_NOERROR ||
2863 			 LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer))
2864 				== LDNS_RCODE_NXDOMAIN)) {
2865 			verbose(VERB_DETAIL, "no qname in reply to check 0x20ID");
2866 			log_addr(VERB_DETAIL, "from server",
2867 				&sq->addr, sq->addrlen);
2868 			log_buf(VERB_DETAIL, "for packet", c->buffer);
2869 			error = NETEVENT_CLOSED;
2870 			c = NULL;
2871 		} else if(sldns_buffer_read_u16_at(c->buffer, 4) > 0 &&
2872 			!serviced_check_qname(c->buffer, sq->qbuf,
2873 			sq->qbuflen)) {
2874 			verbose(VERB_DETAIL, "wrong 0x20-ID in reply qname");
2875 			log_addr(VERB_DETAIL, "from server",
2876 				&sq->addr, sq->addrlen);
2877 			log_buf(VERB_DETAIL, "for packet", c->buffer);
2878 			error = NETEVENT_CAPSFAIL;
2879 			/* and cleanup too */
2880 			pkt_dname_tolower(c->buffer,
2881 				sldns_buffer_at(c->buffer, 12));
2882 		} else {
2883 			verbose(VERB_ALGO, "good 0x20-ID in reply qname");
2884 			/* cleanup caps, prettier cache contents. */
2885 			pkt_dname_tolower(c->buffer,
2886 				sldns_buffer_at(c->buffer, 12));
2887 		}
2888 	}
2889 	if(dobackup && c) {
2890 		/* make a backup of the query, since the querystate processing
2891 		 * may send outgoing queries that overwrite the buffer.
2892 		 * use secondary buffer to store the query.
2893 		 * This is a data copy, but faster than packet to server */
2894 		backlen = sldns_buffer_limit(c->buffer);
2895 		backup_p = memdup(sldns_buffer_begin(c->buffer), backlen);
2896 		if(!backup_p) {
2897 			log_err("malloc failure in serviced query callbacks");
2898 			error = NETEVENT_CLOSED;
2899 			c = NULL;
2900 		}
2901 		sq->outnet->svcd_overhead = backlen;
2902 	}
2903 	/* test the actual sq->cblist, because the next elem could be deleted*/
2904 	while((p=sq->cblist) != NULL) {
2905 		sq->cblist = p->next; /* remove this element */
2906 		if(dobackup && c) {
2907 			sldns_buffer_clear(c->buffer);
2908 			sldns_buffer_write(c->buffer, backup_p, backlen);
2909 			sldns_buffer_flip(c->buffer);
2910 		}
2911 		fptr_ok(fptr_whitelist_serviced_query(p->cb));
2912 		(void)(*p->cb)(c, p->cb_arg, error, rep);
2913 		free(p);
2914 	}
2915 	if(backup_p) {
2916 		free(backup_p);
2917 		sq->outnet->svcd_overhead = 0;
2918 	}
2919 	verbose(VERB_ALGO, "svcd callbacks end");
2920 	log_assert(sq->cblist == NULL);
2921 	serviced_delete(sq);
2922 }
2923 
2924 int
2925 serviced_tcp_callback(struct comm_point* c, void* arg, int error,
2926         struct comm_reply* rep)
2927 {
2928 	struct serviced_query* sq = (struct serviced_query*)arg;
2929 	struct comm_reply r2;
2930 #ifdef USE_DNSTAP
2931 	struct waiting_tcp* w = (struct waiting_tcp*)sq->pending;
2932 	struct pending_tcp* pend_tcp = NULL;
2933 	struct port_if* pi = NULL;
2934 	if(!w->on_tcp_waiting_list && w->next_waiting) {
2935 		pend_tcp = (struct pending_tcp*)w->next_waiting;
2936 		pi = pend_tcp->pi;
2937 	}
2938 #endif
2939 	sq->pending = NULL; /* removed after this callback */
2940 	if(error != NETEVENT_NOERROR)
2941 		log_addr(VERB_QUERY, "tcp error for address",
2942 			&sq->addr, sq->addrlen);
2943 	if(error==NETEVENT_NOERROR)
2944 		infra_update_tcp_works(sq->outnet->infra, &sq->addr,
2945 			sq->addrlen, sq->zone, sq->zonelen);
2946 #ifdef USE_DNSTAP
2947 	/*
2948 	 * sending src (local service)/dst (upstream) addresses over DNSTAP
2949 	 */
2950 	if(error==NETEVENT_NOERROR && pi && sq->outnet->dtenv &&
2951 	   (sq->outnet->dtenv->log_resolver_response_messages ||
2952 	    sq->outnet->dtenv->log_forwarder_response_messages)) {
2953 		log_addr(VERB_ALGO, "response from upstream", &sq->addr, sq->addrlen);
2954 		log_addr(VERB_ALGO, "to local addr", &pi->addr, pi->addrlen);
2955 		dt_msg_send_outside_response(sq->outnet->dtenv, &sq->addr,
2956 			&pi->addr, c->type, sq->zone, sq->zonelen, sq->qbuf,
2957 			sq->qbuflen, &sq->last_sent_time, sq->outnet->now_tv,
2958 			c->buffer);
2959 	}
2960 #endif
2961 	if(error==NETEVENT_NOERROR && sq->status == serviced_query_TCP_EDNS &&
2962 		(LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) ==
2963 		LDNS_RCODE_FORMERR || LDNS_RCODE_WIRE(sldns_buffer_begin(
2964 		c->buffer)) == LDNS_RCODE_NOTIMPL) ) {
2965 		/* attempt to fallback to nonEDNS */
2966 		sq->status = serviced_query_TCP_EDNS_fallback;
2967 		serviced_tcp_initiate(sq, c->buffer);
2968 		return 0;
2969 	} else if(error==NETEVENT_NOERROR &&
2970 		sq->status == serviced_query_TCP_EDNS_fallback &&
2971 			(LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) ==
2972 			LDNS_RCODE_NOERROR || LDNS_RCODE_WIRE(
2973 			sldns_buffer_begin(c->buffer)) == LDNS_RCODE_NXDOMAIN
2974 			|| LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer))
2975 			== LDNS_RCODE_YXDOMAIN)) {
2976 		/* the fallback produced a result that looks promising, note
2977 		 * that this server should be approached without EDNS */
2978 		/* only store noEDNS in cache if domain is noDNSSEC */
2979 		if(!sq->want_dnssec)
2980 		  if(!infra_edns_update(sq->outnet->infra, &sq->addr,
2981 			sq->addrlen, sq->zone, sq->zonelen, -1,
2982 			*sq->outnet->now_secs))
2983 			log_err("Out of memory caching no edns for host");
2984 		sq->status = serviced_query_TCP;
2985 	}
2986 	if(sq->tcp_upstream || sq->ssl_upstream) {
2987 	    struct timeval now = *sq->outnet->now_tv;
2988 	    if(error!=NETEVENT_NOERROR) {
2989 	        if(!infra_rtt_update(sq->outnet->infra, &sq->addr,
2990 		    sq->addrlen, sq->zone, sq->zonelen, sq->qtype,
2991 		    -1, sq->last_rtt, (time_t)now.tv_sec))
2992 		    log_err("out of memory in TCP exponential backoff.");
2993 	    } else if(now.tv_sec > sq->last_sent_time.tv_sec ||
2994 		(now.tv_sec == sq->last_sent_time.tv_sec &&
2995 		now.tv_usec > sq->last_sent_time.tv_usec)) {
2996 		/* convert from microseconds to milliseconds */
2997 		int roundtime = ((int)(now.tv_sec - sq->last_sent_time.tv_sec))*1000
2998 		  + ((int)now.tv_usec - (int)sq->last_sent_time.tv_usec)/1000;
2999 		verbose(VERB_ALGO, "measured TCP-time at %d msec", roundtime);
3000 		log_assert(roundtime >= 0);
3001 		/* only store if less then AUTH_TIMEOUT seconds, it could be
3002 		 * huge due to system-hibernated and we woke up */
3003 		if(roundtime < 60000) {
3004 		    if(!infra_rtt_update(sq->outnet->infra, &sq->addr,
3005 			sq->addrlen, sq->zone, sq->zonelen, sq->qtype,
3006 			roundtime, sq->last_rtt, (time_t)now.tv_sec))
3007 			log_err("out of memory noting rtt.");
3008 		}
3009 	    }
3010 	}
3011 	/* insert address into reply info */
3012 	if(!rep) {
3013 		/* create one if there isn't (on errors) */
3014 		rep = &r2;
3015 		r2.c = c;
3016 	}
3017 	memcpy(&rep->addr, &sq->addr, sq->addrlen);
3018 	rep->addrlen = sq->addrlen;
3019 	serviced_callbacks(sq, error, c, rep);
3020 	return 0;
3021 }
3022 
3023 static void
3024 serviced_tcp_initiate(struct serviced_query* sq, sldns_buffer* buff)
3025 {
3026 	verbose(VERB_ALGO, "initiate TCP query %s",
3027 		sq->status==serviced_query_TCP_EDNS?"EDNS":"");
3028 	serviced_encode(sq, buff, sq->status == serviced_query_TCP_EDNS);
3029 	sq->last_sent_time = *sq->outnet->now_tv;
3030 	sq->pending = pending_tcp_query(sq, buff, sq->outnet->tcp_auth_query_timeout,
3031 		serviced_tcp_callback, sq);
3032 	if(!sq->pending) {
3033 		/* delete from tree so that a retry by above layer does not
3034 		 * clash with this entry */
3035 		verbose(VERB_ALGO, "serviced_tcp_initiate: failed to send tcp query");
3036 		serviced_callbacks(sq, NETEVENT_CLOSED, NULL, NULL);
3037 	}
3038 }
3039 
3040 /** Send serviced query over TCP return false on initial failure */
3041 static int
3042 serviced_tcp_send(struct serviced_query* sq, sldns_buffer* buff)
3043 {
3044 	int vs, rtt, timeout;
3045 	uint8_t edns_lame_known;
3046 	if(!infra_host(sq->outnet->infra, &sq->addr, sq->addrlen, sq->zone,
3047 		sq->zonelen, *sq->outnet->now_secs, &vs, &edns_lame_known,
3048 		&rtt))
3049 		return 0;
3050 	sq->last_rtt = rtt;
3051 	if(vs != -1)
3052 		sq->status = serviced_query_TCP_EDNS;
3053 	else 	sq->status = serviced_query_TCP;
3054 	serviced_encode(sq, buff, sq->status == serviced_query_TCP_EDNS);
3055 	sq->last_sent_time = *sq->outnet->now_tv;
3056 	if(sq->tcp_upstream || sq->ssl_upstream) {
3057 		timeout = rtt;
3058 		if(rtt >= UNKNOWN_SERVER_NICENESS && rtt < sq->outnet->tcp_auth_query_timeout)
3059 			timeout = sq->outnet->tcp_auth_query_timeout;
3060 	} else {
3061 		timeout = sq->outnet->tcp_auth_query_timeout;
3062 	}
3063 	sq->pending = pending_tcp_query(sq, buff, timeout,
3064 		serviced_tcp_callback, sq);
3065 	return sq->pending != NULL;
3066 }
3067 
3068 /* see if packet is edns malformed; got zeroes at start.
3069  * This is from servers that return malformed packets to EDNS0 queries,
3070  * but they return good packets for nonEDNS0 queries.
3071  * We try to detect their output; without resorting to a full parse or
3072  * check for too many bytes after the end of the packet. */
3073 static int
3074 packet_edns_malformed(struct sldns_buffer* buf, int qtype)
3075 {
3076 	size_t len;
3077 	if(sldns_buffer_limit(buf) < LDNS_HEADER_SIZE)
3078 		return 1; /* malformed */
3079 	/* they have NOERROR rcode, 1 answer. */
3080 	if(LDNS_RCODE_WIRE(sldns_buffer_begin(buf)) != LDNS_RCODE_NOERROR)
3081 		return 0;
3082 	/* one query (to skip) and answer records */
3083 	if(LDNS_QDCOUNT(sldns_buffer_begin(buf)) != 1 ||
3084 		LDNS_ANCOUNT(sldns_buffer_begin(buf)) == 0)
3085 		return 0;
3086 	/* skip qname */
3087 	len = dname_valid(sldns_buffer_at(buf, LDNS_HEADER_SIZE),
3088 		sldns_buffer_limit(buf)-LDNS_HEADER_SIZE);
3089 	if(len == 0)
3090 		return 0;
3091 	if(len == 1 && qtype == 0)
3092 		return 0; /* we asked for '.' and type 0 */
3093 	/* and then 4 bytes (type and class of query) */
3094 	if(sldns_buffer_limit(buf) < LDNS_HEADER_SIZE + len + 4 + 3)
3095 		return 0;
3096 
3097 	/* and start with 11 zeroes as the answer RR */
3098 	/* so check the qtype of the answer record, qname=0, type=0 */
3099 	if(sldns_buffer_at(buf, LDNS_HEADER_SIZE+len+4)[0] == 0 &&
3100 	   sldns_buffer_at(buf, LDNS_HEADER_SIZE+len+4)[1] == 0 &&
3101 	   sldns_buffer_at(buf, LDNS_HEADER_SIZE+len+4)[2] == 0)
3102 		return 1;
3103 	return 0;
3104 }
3105 
3106 int
3107 serviced_udp_callback(struct comm_point* c, void* arg, int error,
3108         struct comm_reply* rep)
3109 {
3110 	struct serviced_query* sq = (struct serviced_query*)arg;
3111 	struct outside_network* outnet = sq->outnet;
3112 	struct timeval now = *sq->outnet->now_tv;
3113 #ifdef USE_DNSTAP
3114 	struct pending* p = (struct pending*)sq->pending;
3115 	struct port_if* pi = p->pc->pif;
3116 #endif
3117 
3118 	sq->pending = NULL; /* removed after callback */
3119 	if(error == NETEVENT_TIMEOUT) {
3120 		if(sq->status == serviced_query_UDP_EDNS && sq->last_rtt < 5000) {
3121 			/* fallback to 1480/1280 */
3122 			sq->status = serviced_query_UDP_EDNS_FRAG;
3123 			log_name_addr(VERB_ALGO, "try edns1xx0", sq->qbuf+10,
3124 				&sq->addr, sq->addrlen);
3125 			if(!serviced_udp_send(sq, c->buffer)) {
3126 				serviced_callbacks(sq, NETEVENT_CLOSED, c, rep);
3127 			}
3128 			return 0;
3129 		}
3130 		if(sq->status == serviced_query_UDP_EDNS_FRAG) {
3131 			/* fragmentation size did not fix it */
3132 			sq->status = serviced_query_UDP_EDNS;
3133 		}
3134 		sq->retry++;
3135 		if(!infra_rtt_update(outnet->infra, &sq->addr, sq->addrlen,
3136 			sq->zone, sq->zonelen, sq->qtype, -1, sq->last_rtt,
3137 			(time_t)now.tv_sec))
3138 			log_err("out of memory in UDP exponential backoff");
3139 		if(sq->retry < OUTBOUND_UDP_RETRY) {
3140 			log_name_addr(VERB_ALGO, "retry query", sq->qbuf+10,
3141 				&sq->addr, sq->addrlen);
3142 			if(!serviced_udp_send(sq, c->buffer)) {
3143 				serviced_callbacks(sq, NETEVENT_CLOSED, c, rep);
3144 			}
3145 			return 0;
3146 		}
3147 	}
3148 	if(error != NETEVENT_NOERROR) {
3149 		/* udp returns error (due to no ID or interface available) */
3150 		serviced_callbacks(sq, error, c, rep);
3151 		return 0;
3152 	}
3153 #ifdef USE_DNSTAP
3154 	/*
3155 	 * sending src (local service)/dst (upstream) addresses over DNSTAP
3156 	 */
3157 	if(error == NETEVENT_NOERROR && outnet->dtenv &&
3158 	   (outnet->dtenv->log_resolver_response_messages ||
3159 	    outnet->dtenv->log_forwarder_response_messages)) {
3160 		log_addr(VERB_ALGO, "response from upstream", &sq->addr, sq->addrlen);
3161 		log_addr(VERB_ALGO, "to local addr", &pi->addr, pi->addrlen);
3162 		dt_msg_send_outside_response(outnet->dtenv, &sq->addr, &pi->addr, c->type,
3163 		  sq->zone, sq->zonelen, sq->qbuf, sq->qbuflen,
3164 		  &sq->last_sent_time, sq->outnet->now_tv, c->buffer);
3165 	}
3166 #endif
3167 	if( (sq->status == serviced_query_UDP_EDNS
3168 		||sq->status == serviced_query_UDP_EDNS_FRAG)
3169 		&& (LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer))
3170 			== LDNS_RCODE_FORMERR || LDNS_RCODE_WIRE(
3171 			sldns_buffer_begin(c->buffer)) == LDNS_RCODE_NOTIMPL
3172 		    || packet_edns_malformed(c->buffer, sq->qtype)
3173 			)) {
3174 		/* try to get an answer by falling back without EDNS */
3175 		verbose(VERB_ALGO, "serviced query: attempt without EDNS");
3176 		sq->status = serviced_query_UDP_EDNS_fallback;
3177 		sq->retry = 0;
3178 		if(!serviced_udp_send(sq, c->buffer)) {
3179 			serviced_callbacks(sq, NETEVENT_CLOSED, c, rep);
3180 		}
3181 		return 0;
3182 	} else if(sq->status == serviced_query_UDP_EDNS &&
3183 		!sq->edns_lame_known) {
3184 		/* now we know that edns queries received answers store that */
3185 		log_addr(VERB_ALGO, "serviced query: EDNS works for",
3186 			&sq->addr, sq->addrlen);
3187 		if(!infra_edns_update(outnet->infra, &sq->addr, sq->addrlen,
3188 			sq->zone, sq->zonelen, 0, (time_t)now.tv_sec)) {
3189 			log_err("Out of memory caching edns works");
3190 		}
3191 		sq->edns_lame_known = 1;
3192 	} else if(sq->status == serviced_query_UDP_EDNS_fallback &&
3193 		!sq->edns_lame_known && (LDNS_RCODE_WIRE(
3194 		sldns_buffer_begin(c->buffer)) == LDNS_RCODE_NOERROR ||
3195 		LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) ==
3196 		LDNS_RCODE_NXDOMAIN || LDNS_RCODE_WIRE(sldns_buffer_begin(
3197 		c->buffer)) == LDNS_RCODE_YXDOMAIN)) {
3198 		/* the fallback produced a result that looks promising, note
3199 		 * that this server should be approached without EDNS */
3200 		/* only store noEDNS in cache if domain is noDNSSEC */
3201 		if(!sq->want_dnssec) {
3202 		  log_addr(VERB_ALGO, "serviced query: EDNS fails for",
3203 			&sq->addr, sq->addrlen);
3204 		  if(!infra_edns_update(outnet->infra, &sq->addr, sq->addrlen,
3205 			sq->zone, sq->zonelen, -1, (time_t)now.tv_sec)) {
3206 			log_err("Out of memory caching no edns for host");
3207 		  }
3208 		} else {
3209 		  log_addr(VERB_ALGO, "serviced query: EDNS fails, but "
3210 			"not stored because need DNSSEC for", &sq->addr,
3211 			sq->addrlen);
3212 		}
3213 		sq->status = serviced_query_UDP;
3214 	}
3215 	if(now.tv_sec > sq->last_sent_time.tv_sec ||
3216 		(now.tv_sec == sq->last_sent_time.tv_sec &&
3217 		now.tv_usec > sq->last_sent_time.tv_usec)) {
3218 		/* convert from microseconds to milliseconds */
3219 		int roundtime = ((int)(now.tv_sec - sq->last_sent_time.tv_sec))*1000
3220 		  + ((int)now.tv_usec - (int)sq->last_sent_time.tv_usec)/1000;
3221 		verbose(VERB_ALGO, "measured roundtrip at %d msec", roundtime);
3222 		log_assert(roundtime >= 0);
3223 		/* in case the system hibernated, do not enter a huge value,
3224 		 * above this value gives trouble with server selection */
3225 		if(roundtime < 60000) {
3226 		    if(!infra_rtt_update(outnet->infra, &sq->addr, sq->addrlen,
3227 			sq->zone, sq->zonelen, sq->qtype, roundtime,
3228 			sq->last_rtt, (time_t)now.tv_sec))
3229 			log_err("out of memory noting rtt.");
3230 		}
3231 	}
3232 	/* perform TC flag check and TCP fallback after updating our
3233 	 * cache entries for EDNS status and RTT times */
3234 	if(LDNS_TC_WIRE(sldns_buffer_begin(c->buffer))) {
3235 		/* fallback to TCP */
3236 		/* this discards partial UDP contents */
3237 		if(sq->status == serviced_query_UDP_EDNS ||
3238 			sq->status == serviced_query_UDP_EDNS_FRAG ||
3239 			sq->status == serviced_query_UDP_EDNS_fallback)
3240 			/* if we have unfinished EDNS_fallback, start again */
3241 			sq->status = serviced_query_TCP_EDNS;
3242 		else	sq->status = serviced_query_TCP;
3243 		serviced_tcp_initiate(sq, c->buffer);
3244 		return 0;
3245 	}
3246 	/* yay! an answer */
3247 	serviced_callbacks(sq, error, c, rep);
3248 	return 0;
3249 }
3250 
3251 struct serviced_query*
3252 outnet_serviced_query(struct outside_network* outnet,
3253 	struct query_info* qinfo, uint16_t flags, int dnssec, int want_dnssec,
3254 	int nocaps, int tcp_upstream, int ssl_upstream, char* tls_auth_name,
3255 	struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* zone,
3256 	size_t zonelen, struct module_qstate* qstate,
3257 	comm_point_callback_type* callback, void* callback_arg, sldns_buffer* buff,
3258 	struct module_env* env)
3259 {
3260 	struct serviced_query* sq;
3261 	struct service_callback* cb;
3262 	struct edns_string_addr* client_string_addr;
3263 
3264 	if(!inplace_cb_query_call(env, qinfo, flags, addr, addrlen, zone, zonelen,
3265 		qstate, qstate->region))
3266 			return NULL;
3267 
3268 	if((client_string_addr = edns_string_addr_lookup(
3269 		&env->edns_strings->client_strings, addr, addrlen))) {
3270 		edns_opt_list_append(&qstate->edns_opts_back_out,
3271 			env->edns_strings->client_string_opcode,
3272 			client_string_addr->string_len,
3273 			client_string_addr->string, qstate->region);
3274 	}
3275 
3276 	serviced_gen_query(buff, qinfo->qname, qinfo->qname_len, qinfo->qtype,
3277 		qinfo->qclass, flags);
3278 	sq = lookup_serviced(outnet, buff, dnssec, addr, addrlen,
3279 		qstate->edns_opts_back_out);
3280 	/* duplicate entries are included in the callback list, because
3281 	 * there is a counterpart registration by our caller that needs to
3282 	 * be doubly-removed (with callbacks perhaps). */
3283 	if(!(cb = (struct service_callback*)malloc(sizeof(*cb))))
3284 		return NULL;
3285 	if(!sq) {
3286 		/* make new serviced query entry */
3287 		sq = serviced_create(outnet, buff, dnssec, want_dnssec, nocaps,
3288 			tcp_upstream, ssl_upstream, tls_auth_name, addr,
3289 			addrlen, zone, zonelen, (int)qinfo->qtype,
3290 			qstate->edns_opts_back_out,
3291 			( ssl_upstream && env->cfg->pad_queries
3292 			? env->cfg->pad_queries_block_size : 0 ));
3293 		if(!sq) {
3294 			free(cb);
3295 			return NULL;
3296 		}
3297 		/* perform first network action */
3298 		if(outnet->do_udp && !(tcp_upstream || ssl_upstream)) {
3299 			if(!serviced_udp_send(sq, buff)) {
3300 				(void)rbtree_delete(outnet->serviced, sq);
3301 				serviced_node_del(&sq->node, NULL);
3302 				free(cb);
3303 				return NULL;
3304 			}
3305 		} else {
3306 			if(!serviced_tcp_send(sq, buff)) {
3307 				(void)rbtree_delete(outnet->serviced, sq);
3308 				serviced_node_del(&sq->node, NULL);
3309 				free(cb);
3310 				return NULL;
3311 			}
3312 		}
3313 	}
3314 	/* add callback to list of callbacks */
3315 	cb->cb = callback;
3316 	cb->cb_arg = callback_arg;
3317 	cb->next = sq->cblist;
3318 	sq->cblist = cb;
3319 	return sq;
3320 }
3321 
3322 /** remove callback from list */
3323 static void
3324 callback_list_remove(struct serviced_query* sq, void* cb_arg)
3325 {
3326 	struct service_callback** pp = &sq->cblist;
3327 	while(*pp) {
3328 		if((*pp)->cb_arg == cb_arg) {
3329 			struct service_callback* del = *pp;
3330 			*pp = del->next;
3331 			free(del);
3332 			return;
3333 		}
3334 		pp = &(*pp)->next;
3335 	}
3336 }
3337 
3338 void outnet_serviced_query_stop(struct serviced_query* sq, void* cb_arg)
3339 {
3340 	if(!sq)
3341 		return;
3342 	callback_list_remove(sq, cb_arg);
3343 	/* if callbacks() routine scheduled deletion, let it do that */
3344 	if(!sq->cblist && !sq->to_be_deleted) {
3345 		(void)rbtree_delete(sq->outnet->serviced, sq);
3346 		serviced_delete(sq);
3347 	}
3348 }
3349 
3350 /** create fd to send to this destination */
3351 static int
3352 fd_for_dest(struct outside_network* outnet, struct sockaddr_storage* to_addr,
3353 	socklen_t to_addrlen)
3354 {
3355 	struct sockaddr_storage* addr;
3356 	socklen_t addrlen;
3357 	int i, try, pnum, dscp;
3358 	struct port_if* pif;
3359 
3360 	/* create fd */
3361 	dscp = outnet->ip_dscp;
3362 	for(try = 0; try<1000; try++) {
3363 		int port = 0;
3364 		int freebind = 0;
3365 		int noproto = 0;
3366 		int inuse = 0;
3367 		int fd = -1;
3368 
3369 		/* select interface */
3370 		if(addr_is_ip6(to_addr, to_addrlen)) {
3371 			if(outnet->num_ip6 == 0) {
3372 				char to[64];
3373 				addr_to_str(to_addr, to_addrlen, to, sizeof(to));
3374 				verbose(VERB_QUERY, "need ipv6 to send, but no ipv6 outgoing interfaces, for %s", to);
3375 				return -1;
3376 			}
3377 			i = ub_random_max(outnet->rnd, outnet->num_ip6);
3378 			pif = &outnet->ip6_ifs[i];
3379 		} else {
3380 			if(outnet->num_ip4 == 0) {
3381 				char to[64];
3382 				addr_to_str(to_addr, to_addrlen, to, sizeof(to));
3383 				verbose(VERB_QUERY, "need ipv4 to send, but no ipv4 outgoing interfaces, for %s", to);
3384 				return -1;
3385 			}
3386 			i = ub_random_max(outnet->rnd, outnet->num_ip4);
3387 			pif = &outnet->ip4_ifs[i];
3388 		}
3389 		addr = &pif->addr;
3390 		addrlen = pif->addrlen;
3391 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
3392 		pnum = ub_random_max(outnet->rnd, pif->avail_total);
3393 		if(pnum < pif->inuse) {
3394 			/* port already open */
3395 			port = pif->out[pnum]->number;
3396 		} else {
3397 			/* unused ports in start part of array */
3398 			port = pif->avail_ports[pnum - pif->inuse];
3399 		}
3400 #else
3401 		pnum = port = 0;
3402 #endif
3403 		if(addr_is_ip6(to_addr, to_addrlen)) {
3404 			struct sockaddr_in6 sa = *(struct sockaddr_in6*)addr;
3405 			sa.sin6_port = (in_port_t)htons((uint16_t)port);
3406 			fd = create_udp_sock(AF_INET6, SOCK_DGRAM,
3407 				(struct sockaddr*)&sa, addrlen, 1, &inuse, &noproto,
3408 				0, 0, 0, NULL, 0, freebind, 0, dscp);
3409 		} else {
3410 			struct sockaddr_in* sa = (struct sockaddr_in*)addr;
3411 			sa->sin_port = (in_port_t)htons((uint16_t)port);
3412 			fd = create_udp_sock(AF_INET, SOCK_DGRAM,
3413 				(struct sockaddr*)addr, addrlen, 1, &inuse, &noproto,
3414 				0, 0, 0, NULL, 0, freebind, 0, dscp);
3415 		}
3416 		if(fd != -1) {
3417 			return fd;
3418 		}
3419 		if(!inuse) {
3420 			return -1;
3421 		}
3422 	}
3423 	/* too many tries */
3424 	log_err("cannot send probe, ports are in use");
3425 	return -1;
3426 }
3427 
3428 struct comm_point*
3429 outnet_comm_point_for_udp(struct outside_network* outnet,
3430 	comm_point_callback_type* cb, void* cb_arg,
3431 	struct sockaddr_storage* to_addr, socklen_t to_addrlen)
3432 {
3433 	struct comm_point* cp;
3434 	int fd = fd_for_dest(outnet, to_addr, to_addrlen);
3435 	if(fd == -1) {
3436 		return NULL;
3437 	}
3438 	cp = comm_point_create_udp(outnet->base, fd, outnet->udp_buff,
3439 		cb, cb_arg, NULL);
3440 	if(!cp) {
3441 		log_err("malloc failure");
3442 		close(fd);
3443 		return NULL;
3444 	}
3445 	return cp;
3446 }
3447 
3448 /** setup SSL for comm point */
3449 static int
3450 setup_comm_ssl(struct comm_point* cp, struct outside_network* outnet,
3451 	int fd, char* host)
3452 {
3453 	cp->ssl = outgoing_ssl_fd(outnet->sslctx, fd);
3454 	if(!cp->ssl) {
3455 		log_err("cannot create SSL object");
3456 		return 0;
3457 	}
3458 #ifdef USE_WINSOCK
3459 	comm_point_tcp_win_bio_cb(cp, cp->ssl);
3460 #endif
3461 	cp->ssl_shake_state = comm_ssl_shake_write;
3462 	/* https verification */
3463 #ifdef HAVE_SSL
3464 	if(outnet->tls_use_sni) {
3465 		(void)SSL_set_tlsext_host_name(cp->ssl, host);
3466 	}
3467 #endif
3468 #ifdef HAVE_SSL_SET1_HOST
3469 	if((SSL_CTX_get_verify_mode(outnet->sslctx)&SSL_VERIFY_PEER)) {
3470 		/* because we set SSL_VERIFY_PEER, in netevent in
3471 		 * ssl_handshake, it'll check if the certificate
3472 		 * verification has succeeded */
3473 		/* SSL_VERIFY_PEER is set on the sslctx */
3474 		/* and the certificates to verify with are loaded into
3475 		 * it with SSL_load_verify_locations or
3476 		 * SSL_CTX_set_default_verify_paths */
3477 		/* setting the hostname makes openssl verify the
3478 		 * host name in the x509 certificate in the
3479 		 * SSL connection*/
3480 		if(!SSL_set1_host(cp->ssl, host)) {
3481 			log_err("SSL_set1_host failed");
3482 			return 0;
3483 		}
3484 	}
3485 #elif defined(HAVE_X509_VERIFY_PARAM_SET1_HOST)
3486 	/* openssl 1.0.2 has this function that can be used for
3487 	 * set1_host like verification */
3488 	if((SSL_CTX_get_verify_mode(outnet->sslctx)&SSL_VERIFY_PEER)) {
3489 		X509_VERIFY_PARAM* param = SSL_get0_param(cp->ssl);
3490 #  ifdef X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS
3491 		X509_VERIFY_PARAM_set_hostflags(param, X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS);
3492 #  endif
3493 		if(!X509_VERIFY_PARAM_set1_host(param, host, strlen(host))) {
3494 			log_err("X509_VERIFY_PARAM_set1_host failed");
3495 			return 0;
3496 		}
3497 	}
3498 #else
3499 	(void)host;
3500 #endif /* HAVE_SSL_SET1_HOST */
3501 	return 1;
3502 }
3503 
3504 struct comm_point*
3505 outnet_comm_point_for_tcp(struct outside_network* outnet,
3506 	comm_point_callback_type* cb, void* cb_arg,
3507 	struct sockaddr_storage* to_addr, socklen_t to_addrlen,
3508 	sldns_buffer* query, int timeout, int ssl, char* host)
3509 {
3510 	struct comm_point* cp;
3511 	int fd = outnet_get_tcp_fd(to_addr, to_addrlen, outnet->tcp_mss, outnet->ip_dscp);
3512 	if(fd == -1) {
3513 		return 0;
3514 	}
3515 	fd_set_nonblock(fd);
3516 	if(!outnet_tcp_connect(fd, to_addr, to_addrlen)) {
3517 		/* outnet_tcp_connect has closed fd on error for us */
3518 		return 0;
3519 	}
3520 	cp = comm_point_create_tcp_out(outnet->base, 65552, cb, cb_arg);
3521 	if(!cp) {
3522 		log_err("malloc failure");
3523 		close(fd);
3524 		return 0;
3525 	}
3526 	cp->repinfo.addrlen = to_addrlen;
3527 	memcpy(&cp->repinfo.addr, to_addr, to_addrlen);
3528 
3529 	/* setup for SSL (if needed) */
3530 	if(ssl) {
3531 		if(!setup_comm_ssl(cp, outnet, fd, host)) {
3532 			log_err("cannot setup XoT");
3533 			comm_point_delete(cp);
3534 			return NULL;
3535 		}
3536 	}
3537 
3538 	/* set timeout on TCP connection */
3539 	comm_point_start_listening(cp, fd, timeout);
3540 	/* copy scratch buffer to cp->buffer */
3541 	sldns_buffer_copy(cp->buffer, query);
3542 	return cp;
3543 }
3544 
3545 /** setup the User-Agent HTTP header based on http-user-agent configuration */
3546 static void
3547 setup_http_user_agent(sldns_buffer* buf, struct config_file* cfg)
3548 {
3549 	if(cfg->hide_http_user_agent) return;
3550 	if(cfg->http_user_agent==NULL || cfg->http_user_agent[0] == 0) {
3551 		sldns_buffer_printf(buf, "User-Agent: %s/%s\r\n", PACKAGE_NAME,
3552 			PACKAGE_VERSION);
3553 	} else {
3554 		sldns_buffer_printf(buf, "User-Agent: %s\r\n", cfg->http_user_agent);
3555 	}
3556 }
3557 
3558 /** setup http request headers in buffer for sending query to destination */
3559 static int
3560 setup_http_request(sldns_buffer* buf, char* host, char* path,
3561 	struct config_file* cfg)
3562 {
3563 	sldns_buffer_clear(buf);
3564 	sldns_buffer_printf(buf, "GET /%s HTTP/1.1\r\n", path);
3565 	sldns_buffer_printf(buf, "Host: %s\r\n", host);
3566 	setup_http_user_agent(buf, cfg);
3567 	/* We do not really do multiple queries per connection,
3568 	 * but this header setting is also not needed.
3569 	 * sldns_buffer_printf(buf, "Connection: close\r\n") */
3570 	sldns_buffer_printf(buf, "\r\n");
3571 	if(sldns_buffer_position(buf)+10 > sldns_buffer_capacity(buf))
3572 		return 0; /* somehow buffer too short, but it is about 60K
3573 		and the request is only a couple bytes long. */
3574 	sldns_buffer_flip(buf);
3575 	return 1;
3576 }
3577 
3578 struct comm_point*
3579 outnet_comm_point_for_http(struct outside_network* outnet,
3580 	comm_point_callback_type* cb, void* cb_arg,
3581 	struct sockaddr_storage* to_addr, socklen_t to_addrlen, int timeout,
3582 	int ssl, char* host, char* path, struct config_file* cfg)
3583 {
3584 	/* cp calls cb with err=NETEVENT_DONE when transfer is done */
3585 	struct comm_point* cp;
3586 	int fd = outnet_get_tcp_fd(to_addr, to_addrlen, outnet->tcp_mss, outnet->ip_dscp);
3587 	if(fd == -1) {
3588 		return 0;
3589 	}
3590 	fd_set_nonblock(fd);
3591 	if(!outnet_tcp_connect(fd, to_addr, to_addrlen)) {
3592 		/* outnet_tcp_connect has closed fd on error for us */
3593 		return 0;
3594 	}
3595 	cp = comm_point_create_http_out(outnet->base, 65552, cb, cb_arg,
3596 		outnet->udp_buff);
3597 	if(!cp) {
3598 		log_err("malloc failure");
3599 		close(fd);
3600 		return 0;
3601 	}
3602 	cp->repinfo.addrlen = to_addrlen;
3603 	memcpy(&cp->repinfo.addr, to_addr, to_addrlen);
3604 
3605 	/* setup for SSL (if needed) */
3606 	if(ssl) {
3607 		if(!setup_comm_ssl(cp, outnet, fd, host)) {
3608 			log_err("cannot setup https");
3609 			comm_point_delete(cp);
3610 			return NULL;
3611 		}
3612 	}
3613 
3614 	/* set timeout on TCP connection */
3615 	comm_point_start_listening(cp, fd, timeout);
3616 
3617 	/* setup http request in cp->buffer */
3618 	if(!setup_http_request(cp->buffer, host, path, cfg)) {
3619 		log_err("error setting up http request");
3620 		comm_point_delete(cp);
3621 		return NULL;
3622 	}
3623 	return cp;
3624 }
3625 
3626 /** get memory used by waiting tcp entry (in use or not) */
3627 static size_t
3628 waiting_tcp_get_mem(struct waiting_tcp* w)
3629 {
3630 	size_t s;
3631 	if(!w) return 0;
3632 	s = sizeof(*w) + w->pkt_len;
3633 	if(w->timer)
3634 		s += comm_timer_get_mem(w->timer);
3635 	return s;
3636 }
3637 
3638 /** get memory used by port if */
3639 static size_t
3640 if_get_mem(struct port_if* pif)
3641 {
3642 	size_t s;
3643 	int i;
3644 	s = sizeof(*pif) +
3645 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
3646 	    sizeof(int)*pif->avail_total +
3647 #endif
3648 		sizeof(struct port_comm*)*pif->maxout;
3649 	for(i=0; i<pif->inuse; i++)
3650 		s += sizeof(*pif->out[i]) +
3651 			comm_point_get_mem(pif->out[i]->cp);
3652 	return s;
3653 }
3654 
3655 /** get memory used by waiting udp */
3656 static size_t
3657 waiting_udp_get_mem(struct pending* w)
3658 {
3659 	size_t s;
3660 	s = sizeof(*w) + comm_timer_get_mem(w->timer) + w->pkt_len;
3661 	return s;
3662 }
3663 
3664 size_t outnet_get_mem(struct outside_network* outnet)
3665 {
3666 	size_t i;
3667 	int k;
3668 	struct waiting_tcp* w;
3669 	struct pending* u;
3670 	struct serviced_query* sq;
3671 	struct service_callback* sb;
3672 	struct port_comm* pc;
3673 	size_t s = sizeof(*outnet) + sizeof(*outnet->base) +
3674 		sizeof(*outnet->udp_buff) +
3675 		sldns_buffer_capacity(outnet->udp_buff);
3676 	/* second buffer is not ours */
3677 	for(pc = outnet->unused_fds; pc; pc = pc->next) {
3678 		s += sizeof(*pc) + comm_point_get_mem(pc->cp);
3679 	}
3680 	for(k=0; k<outnet->num_ip4; k++)
3681 		s += if_get_mem(&outnet->ip4_ifs[k]);
3682 	for(k=0; k<outnet->num_ip6; k++)
3683 		s += if_get_mem(&outnet->ip6_ifs[k]);
3684 	for(u=outnet->udp_wait_first; u; u=u->next_waiting)
3685 		s += waiting_udp_get_mem(u);
3686 
3687 	s += sizeof(struct pending_tcp*)*outnet->num_tcp;
3688 	for(i=0; i<outnet->num_tcp; i++) {
3689 		s += sizeof(struct pending_tcp);
3690 		s += comm_point_get_mem(outnet->tcp_conns[i]->c);
3691 		if(outnet->tcp_conns[i]->query)
3692 			s += waiting_tcp_get_mem(outnet->tcp_conns[i]->query);
3693 	}
3694 	for(w=outnet->tcp_wait_first; w; w = w->next_waiting)
3695 		s += waiting_tcp_get_mem(w);
3696 	s += sizeof(*outnet->pending);
3697 	s += (sizeof(struct pending) + comm_timer_get_mem(NULL)) *
3698 		outnet->pending->count;
3699 	s += sizeof(*outnet->serviced);
3700 	s += outnet->svcd_overhead;
3701 	RBTREE_FOR(sq, struct serviced_query*, outnet->serviced) {
3702 		s += sizeof(*sq) + sq->qbuflen;
3703 		for(sb = sq->cblist; sb; sb = sb->next)
3704 			s += sizeof(*sb);
3705 	}
3706 	return s;
3707 }
3708 
3709 size_t
3710 serviced_get_mem(struct serviced_query* sq)
3711 {
3712 	struct service_callback* sb;
3713 	size_t s;
3714 	s = sizeof(*sq) + sq->qbuflen;
3715 	for(sb = sq->cblist; sb; sb = sb->next)
3716 		s += sizeof(*sb);
3717 	if(sq->status == serviced_query_UDP_EDNS ||
3718 		sq->status == serviced_query_UDP ||
3719 		sq->status == serviced_query_UDP_EDNS_FRAG ||
3720 		sq->status == serviced_query_UDP_EDNS_fallback) {
3721 		s += sizeof(struct pending);
3722 		s += comm_timer_get_mem(NULL);
3723 	} else {
3724 		/* does not have size of the pkt pointer */
3725 		/* always has a timer except on malloc failures */
3726 
3727 		/* these sizes are part of the main outside network mem */
3728 		/*
3729 		s += sizeof(struct waiting_tcp);
3730 		s += comm_timer_get_mem(NULL);
3731 		*/
3732 	}
3733 	return s;
3734 }
3735 
3736