1 /*
2  * libunbound/worker.c - worker thread or process that resolves
3  *
4  * Copyright (c) 2007, NLnet Labs. All rights reserved.
5  *
6  * This software is open source.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * Redistributions of source code must retain the above copyright notice,
13  * this list of conditions and the following disclaimer.
14  *
15  * Redistributions in binary form must reproduce the above copyright notice,
16  * this list of conditions and the following disclaimer in the documentation
17  * and/or other materials provided with the distribution.
18  *
19  * Neither the name of the NLNET LABS nor the names of its contributors may
20  * be used to endorse or promote products derived from this software without
21  * specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27  * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 /**
37  * \file
38  *
39  * This file contains the worker process or thread that performs
40  * the DNS resolving and validation. The worker is called by a procedure
41  * and if in the background continues until exit, if in the foreground
42  * returns from the procedure when done.
43  */
44 #include "config.h"
45 #ifdef HAVE_SSL
46 #include <openssl/ssl.h>
47 #endif
48 #include "libunbound/libworker.h"
49 #include "libunbound/context.h"
50 #include "libunbound/unbound.h"
51 #include "libunbound/worker.h"
52 #include "libunbound/unbound-event.h"
53 #include "services/outside_network.h"
54 #include "services/mesh.h"
55 #include "services/localzone.h"
56 #include "services/cache/rrset.h"
57 #include "services/outbound_list.h"
58 #include "services/authzone.h"
59 #include "util/fptr_wlist.h"
60 #include "util/module.h"
61 #include "util/regional.h"
62 #include "util/random.h"
63 #include "util/config_file.h"
64 #include "util/netevent.h"
65 #include "util/storage/lookup3.h"
66 #include "util/storage/slabhash.h"
67 #include "util/net_help.h"
68 #include "util/data/dname.h"
69 #include "util/data/msgreply.h"
70 #include "util/data/msgencode.h"
71 #include "util/tube.h"
72 #include "iterator/iter_fwd.h"
73 #include "iterator/iter_hints.h"
74 #include "sldns/sbuffer.h"
75 #include "sldns/str2wire.h"
76 
77 /** handle new query command for bg worker */
78 static void handle_newq(struct libworker* w, uint8_t* buf, uint32_t len);
79 
80 /** delete libworker env */
81 static void
82 libworker_delete_env(struct libworker* w)
83 {
84 	if(w->env) {
85 		outside_network_quit_prepare(w->back);
86 		mesh_delete(w->env->mesh);
87 		context_release_alloc(w->ctx, w->env->alloc,
88 			!w->is_bg || w->is_bg_thread);
89 		sldns_buffer_free(w->env->scratch_buffer);
90 		regional_destroy(w->env->scratch);
91 		forwards_delete(w->env->fwds);
92 		hints_delete(w->env->hints);
93 		ub_randfree(w->env->rnd);
94 		free(w->env);
95 	}
96 #ifdef HAVE_SSL
97 	SSL_CTX_free(w->sslctx);
98 #endif
99 	outside_network_delete(w->back);
100 }
101 
102 /** delete libworker struct */
103 static void
104 libworker_delete(struct libworker* w)
105 {
106 	if(!w) return;
107 	libworker_delete_env(w);
108 	comm_base_delete(w->base);
109 	free(w);
110 }
111 
112 void
113 libworker_delete_event(struct libworker* w)
114 {
115 	if(!w) return;
116 	libworker_delete_env(w);
117 	comm_base_delete_no_base(w->base);
118 	free(w);
119 }
120 
121 /** setup fresh libworker struct */
122 static struct libworker*
123 libworker_setup(struct ub_ctx* ctx, int is_bg, struct ub_event_base* eb)
124 {
125 	unsigned int seed;
126 	struct libworker* w = (struct libworker*)calloc(1, sizeof(*w));
127 	struct config_file* cfg = ctx->env->cfg;
128 	int* ports;
129 	int numports;
130 	if(!w) return NULL;
131 	w->is_bg = is_bg;
132 	w->ctx = ctx;
133 	w->env = (struct module_env*)malloc(sizeof(*w->env));
134 	if(!w->env) {
135 		free(w);
136 		return NULL;
137 	}
138 	*w->env = *ctx->env;
139 	w->env->alloc = context_obtain_alloc(ctx, !w->is_bg || w->is_bg_thread);
140 	if(!w->env->alloc) {
141 		libworker_delete(w);
142 		return NULL;
143 	}
144 	w->thread_num = w->env->alloc->thread_num;
145 	alloc_set_id_cleanup(w->env->alloc, &libworker_alloc_cleanup, w);
146 	if(!w->is_bg || w->is_bg_thread) {
147 		lock_basic_lock(&ctx->cfglock);
148 	}
149 	w->env->scratch = regional_create_custom(cfg->msg_buffer_size);
150 	w->env->scratch_buffer = sldns_buffer_new(cfg->msg_buffer_size);
151 	w->env->fwds = forwards_create();
152 	if(w->env->fwds && !forwards_apply_cfg(w->env->fwds, cfg)) {
153 		forwards_delete(w->env->fwds);
154 		w->env->fwds = NULL;
155 	}
156 	w->env->hints = hints_create();
157 	if(w->env->hints && !hints_apply_cfg(w->env->hints, cfg)) {
158 		hints_delete(w->env->hints);
159 		w->env->hints = NULL;
160 	}
161 	if(cfg->ssl_upstream || (cfg->tls_cert_bundle && cfg->tls_cert_bundle[0]) || cfg->tls_win_cert) {
162 		w->sslctx = connect_sslctx_create(NULL, NULL,
163 			cfg->tls_cert_bundle, cfg->tls_win_cert);
164 		if(!w->sslctx) {
165 			/* to make the setup fail after unlock */
166 			hints_delete(w->env->hints);
167 			w->env->hints = NULL;
168 		}
169 	}
170 	if(!w->is_bg || w->is_bg_thread) {
171 		lock_basic_unlock(&ctx->cfglock);
172 	}
173 	if(!w->env->scratch || !w->env->scratch_buffer || !w->env->fwds ||
174 		!w->env->hints) {
175 		libworker_delete(w);
176 		return NULL;
177 	}
178 	w->env->worker = (struct worker*)w;
179 	w->env->probe_timer = NULL;
180 	seed = (unsigned int)time(NULL) ^ (unsigned int)getpid() ^
181 		(((unsigned int)w->thread_num)<<17);
182 	seed ^= (unsigned int)w->env->alloc->next_id;
183 	if(!w->is_bg || w->is_bg_thread) {
184 		lock_basic_lock(&ctx->cfglock);
185 	}
186 	if(!(w->env->rnd = ub_initstate(seed, ctx->seed_rnd))) {
187 		if(!w->is_bg || w->is_bg_thread) {
188 			lock_basic_unlock(&ctx->cfglock);
189 		}
190 		explicit_bzero(&seed, sizeof(seed));
191 		libworker_delete(w);
192 		return NULL;
193 	}
194 	if(!w->is_bg || w->is_bg_thread) {
195 		lock_basic_unlock(&ctx->cfglock);
196 	}
197 	if(1) {
198 		/* primitive lockout for threading: if it overwrites another
199 		 * thread it is like wiping the cache (which is likely empty
200 		 * at the start) */
201 		/* note we are holding the ctx lock in normal threaded
202 		 * cases so that is solved properly, it is only for many ctx
203 		 * in different threads that this may clash */
204 		static int done_raninit = 0;
205 		if(!done_raninit) {
206 			done_raninit = 1;
207 			hash_set_raninit((uint32_t)ub_random(w->env->rnd));
208 		}
209 	}
210 	explicit_bzero(&seed, sizeof(seed));
211 
212 	if(eb)
213 		w->base = comm_base_create_event(eb);
214 	else	w->base = comm_base_create(0);
215 	if(!w->base) {
216 		libworker_delete(w);
217 		return NULL;
218 	}
219 	w->env->worker_base = w->base;
220 	if(!w->is_bg || w->is_bg_thread) {
221 		lock_basic_lock(&ctx->cfglock);
222 	}
223 	numports = cfg_condense_ports(cfg, &ports);
224 	if(numports == 0) {
225 		int locked = !w->is_bg || w->is_bg_thread;
226 		libworker_delete(w);
227 		if(locked) {
228 			lock_basic_unlock(&ctx->cfglock);
229 		}
230 		return NULL;
231 	}
232 	w->back = outside_network_create(w->base, cfg->msg_buffer_size,
233 		(size_t)cfg->outgoing_num_ports, cfg->out_ifs,
234 		cfg->num_out_ifs, cfg->do_ip4, cfg->do_ip6,
235 		cfg->do_tcp?cfg->outgoing_num_tcp:0,
236 		w->env->infra_cache, w->env->rnd, cfg->use_caps_bits_for_id,
237 		ports, numports, cfg->unwanted_threshold,
238 		cfg->outgoing_tcp_mss, &libworker_alloc_cleanup, w,
239 		cfg->do_udp || cfg->udp_upstream_without_downstream, w->sslctx,
240 		cfg->delay_close, NULL);
241 	w->env->outnet = w->back;
242 	if(!w->is_bg || w->is_bg_thread) {
243 		lock_basic_unlock(&ctx->cfglock);
244 	}
245 	free(ports);
246 	if(!w->back) {
247 		libworker_delete(w);
248 		return NULL;
249 	}
250 	w->env->mesh = mesh_create(&ctx->mods, w->env);
251 	if(!w->env->mesh) {
252 		libworker_delete(w);
253 		return NULL;
254 	}
255 	w->env->send_query = &libworker_send_query;
256 	w->env->detach_subs = &mesh_detach_subs;
257 	w->env->attach_sub = &mesh_attach_sub;
258 	w->env->add_sub = &mesh_add_sub;
259 	w->env->kill_sub = &mesh_state_delete;
260 	w->env->detect_cycle = &mesh_detect_cycle;
261 	comm_base_timept(w->base, &w->env->now, &w->env->now_tv);
262 	return w;
263 }
264 
265 struct libworker* libworker_create_event(struct ub_ctx* ctx,
266 	struct ub_event_base* eb)
267 {
268 	return libworker_setup(ctx, 0, eb);
269 }
270 
271 /** handle cancel command for bg worker */
272 static void
273 handle_cancel(struct libworker* w, uint8_t* buf, uint32_t len)
274 {
275 	struct ctx_query* q;
276 	if(w->is_bg_thread) {
277 		lock_basic_lock(&w->ctx->cfglock);
278 		q = context_deserialize_cancel(w->ctx, buf, len);
279 		lock_basic_unlock(&w->ctx->cfglock);
280 	} else {
281 		q = context_deserialize_cancel(w->ctx, buf, len);
282 	}
283 	if(!q) {
284 		/* probably simply lookup failed, i.e. the message had been
285 		 * processed and answered before the cancel arrived */
286 		return;
287 	}
288 	q->cancelled = 1;
289 	free(buf);
290 }
291 
292 /** do control command coming into bg server */
293 static void
294 libworker_do_cmd(struct libworker* w, uint8_t* msg, uint32_t len)
295 {
296 	switch(context_serial_getcmd(msg, len)) {
297 		default:
298 		case UB_LIBCMD_ANSWER:
299 			log_err("unknown command for bg worker %d",
300 				(int)context_serial_getcmd(msg, len));
301 			/* and fall through to quit */
302 			/* fallthrough */
303 		case UB_LIBCMD_QUIT:
304 			free(msg);
305 			comm_base_exit(w->base);
306 			break;
307 		case UB_LIBCMD_NEWQUERY:
308 			handle_newq(w, msg, len);
309 			break;
310 		case UB_LIBCMD_CANCEL:
311 			handle_cancel(w, msg, len);
312 			break;
313 	}
314 }
315 
316 /** handle control command coming into server */
317 void
318 libworker_handle_control_cmd(struct tube* ATTR_UNUSED(tube),
319 	uint8_t* msg, size_t len, int err, void* arg)
320 {
321 	struct libworker* w = (struct libworker*)arg;
322 
323 	if(err != 0) {
324 		free(msg);
325 		/* it is of no use to go on, exit */
326 		comm_base_exit(w->base);
327 		return;
328 	}
329 	libworker_do_cmd(w, msg, len); /* also frees the buf */
330 }
331 
332 /** the background thread func */
333 static void*
334 libworker_dobg(void* arg)
335 {
336 	/* setup */
337 	uint32_t m;
338 	struct libworker* w = (struct libworker*)arg;
339 	struct ub_ctx* ctx;
340 	if(!w) {
341 		log_err("libunbound bg worker init failed, nomem");
342 		return NULL;
343 	}
344 	ctx = w->ctx;
345 	log_thread_set(&w->thread_num);
346 #ifdef THREADS_DISABLED
347 	/* we are forked */
348 	w->is_bg_thread = 0;
349 	/* close non-used parts of the pipes */
350 	tube_close_write(ctx->qq_pipe);
351 	tube_close_read(ctx->rr_pipe);
352 #endif
353 	if(!tube_setup_bg_listen(ctx->qq_pipe, w->base,
354 		libworker_handle_control_cmd, w)) {
355 		log_err("libunbound bg worker init failed, no bglisten");
356 		return NULL;
357 	}
358 	if(!tube_setup_bg_write(ctx->rr_pipe, w->base)) {
359 		log_err("libunbound bg worker init failed, no bgwrite");
360 		return NULL;
361 	}
362 
363 	/* do the work */
364 	comm_base_dispatch(w->base);
365 
366 	/* cleanup */
367 	m = UB_LIBCMD_QUIT;
368 	w->want_quit = 1;
369 	tube_remove_bg_listen(w->ctx->qq_pipe);
370 	tube_remove_bg_write(w->ctx->rr_pipe);
371 	libworker_delete(w);
372 	(void)tube_write_msg(ctx->rr_pipe, (uint8_t*)&m,
373 		(uint32_t)sizeof(m), 0);
374 #ifdef THREADS_DISABLED
375 	/* close pipes from forked process before exit */
376 	tube_close_read(ctx->qq_pipe);
377 	tube_close_write(ctx->rr_pipe);
378 #endif
379 	return NULL;
380 }
381 
382 int libworker_bg(struct ub_ctx* ctx)
383 {
384 	struct libworker* w;
385 	/* fork or threadcreate */
386 	lock_basic_lock(&ctx->cfglock);
387 	if(ctx->dothread) {
388 		lock_basic_unlock(&ctx->cfglock);
389 		w = libworker_setup(ctx, 1, NULL);
390 		if(!w) return UB_NOMEM;
391 		w->is_bg_thread = 1;
392 #ifdef ENABLE_LOCK_CHECKS
393 		w->thread_num = 1; /* for nicer DEBUG checklocks */
394 #endif
395 		ub_thread_create(&ctx->bg_tid, libworker_dobg, w);
396 	} else {
397 		lock_basic_unlock(&ctx->cfglock);
398 #ifndef HAVE_FORK
399 		/* no fork on windows */
400 		return UB_FORKFAIL;
401 #else /* HAVE_FORK */
402 		switch((ctx->bg_pid=fork())) {
403 			case 0:
404 				w = libworker_setup(ctx, 1, NULL);
405 				if(!w) fatal_exit("out of memory");
406 				/* close non-used parts of the pipes */
407 				tube_close_write(ctx->qq_pipe);
408 				tube_close_read(ctx->rr_pipe);
409 				(void)libworker_dobg(w);
410 				exit(0);
411 				break;
412 			case -1:
413 				return UB_FORKFAIL;
414 			default:
415 				/* close non-used parts, so that the worker
416 				 * bgprocess gets 'pipe closed' when the
417 				 * main process exits */
418 				tube_close_read(ctx->qq_pipe);
419 				tube_close_write(ctx->rr_pipe);
420 				break;
421 		}
422 #endif /* HAVE_FORK */
423 	}
424 	return UB_NOERROR;
425 }
426 
427 /** insert canonname */
428 static int
429 fill_canon(struct ub_result* res, uint8_t* s)
430 {
431 	char buf[255+2];
432 	dname_str(s, buf);
433 	res->canonname = strdup(buf);
434 	return res->canonname != 0;
435 }
436 
437 /** fill data into result */
438 static int
439 fill_res(struct ub_result* res, struct ub_packed_rrset_key* answer,
440 	uint8_t* finalcname, struct query_info* rq, struct reply_info* rep)
441 {
442 	size_t i;
443 	struct packed_rrset_data* data;
444 	res->ttl = 0;
445 	if(!answer) {
446 		if(finalcname) {
447 			if(!fill_canon(res, finalcname))
448 				return 0; /* out of memory */
449 		}
450 		if(rep->rrset_count != 0)
451 			res->ttl = (int)rep->ttl;
452 		res->data = (char**)calloc(1, sizeof(char*));
453 		res->len = (int*)calloc(1, sizeof(int));
454 		return (res->data && res->len);
455 	}
456 	data = (struct packed_rrset_data*)answer->entry.data;
457 	if(query_dname_compare(rq->qname, answer->rk.dname) != 0) {
458 		if(!fill_canon(res, answer->rk.dname))
459 			return 0; /* out of memory */
460 	} else	res->canonname = NULL;
461 	res->data = (char**)calloc(data->count+1, sizeof(char*));
462 	res->len = (int*)calloc(data->count+1, sizeof(int));
463 	if(!res->data || !res->len)
464 		return 0; /* out of memory */
465 	for(i=0; i<data->count; i++) {
466 		/* remove rdlength from rdata */
467 		res->len[i] = (int)(data->rr_len[i] - 2);
468 		res->data[i] = memdup(data->rr_data[i]+2, (size_t)res->len[i]);
469 		if(!res->data[i])
470 			return 0; /* out of memory */
471 	}
472 	/* ttl for positive answers, from CNAME and answer RRs */
473 	if(data->count != 0) {
474 		size_t j;
475 		res->ttl = (int)data->ttl;
476 		for(j=0; j<rep->an_numrrsets; j++) {
477 			struct packed_rrset_data* d =
478 				(struct packed_rrset_data*)rep->rrsets[j]->
479 				entry.data;
480 			if((int)d->ttl < res->ttl)
481 				res->ttl = (int)d->ttl;
482 		}
483 	}
484 	/* ttl for negative answers */
485 	if(data->count == 0 && rep->rrset_count != 0)
486 		res->ttl = (int)rep->ttl;
487 	res->data[data->count] = NULL;
488 	res->len[data->count] = 0;
489 	return 1;
490 }
491 
492 /** fill result from parsed message, on error fills servfail */
493 void
494 libworker_enter_result(struct ub_result* res, sldns_buffer* buf,
495 	struct regional* temp, enum sec_status msg_security)
496 {
497 	struct query_info rq;
498 	struct reply_info* rep;
499 	res->rcode = LDNS_RCODE_SERVFAIL;
500 	rep = parse_reply_in_temp_region(buf, temp, &rq);
501 	if(!rep) {
502 		log_err("cannot parse buf");
503 		return; /* error parsing buf, or out of memory */
504 	}
505 	if(!fill_res(res, reply_find_answer_rrset(&rq, rep),
506 		reply_find_final_cname_target(&rq, rep), &rq, rep))
507 		return; /* out of memory */
508 	/* rcode, havedata, nxdomain, secure, bogus */
509 	res->rcode = (int)FLAGS_GET_RCODE(rep->flags);
510 	if(res->data && res->data[0])
511 		res->havedata = 1;
512 	if(res->rcode == LDNS_RCODE_NXDOMAIN)
513 		res->nxdomain = 1;
514 	if(msg_security == sec_status_secure)
515 		res->secure = 1;
516 	if(msg_security == sec_status_bogus ||
517 		msg_security == sec_status_secure_sentinel_fail)
518 		res->bogus = 1;
519 }
520 
521 /** fillup fg results */
522 static void
523 libworker_fillup_fg(struct ctx_query* q, int rcode, sldns_buffer* buf,
524 	enum sec_status s, char* why_bogus, int was_ratelimited)
525 {
526 	q->res->was_ratelimited = was_ratelimited;
527 	if(why_bogus)
528 		q->res->why_bogus = strdup(why_bogus);
529 	if(rcode != 0) {
530 		q->res->rcode = rcode;
531 		q->msg_security = s;
532 		return;
533 	}
534 
535 	q->res->rcode = LDNS_RCODE_SERVFAIL;
536 	q->msg_security = 0;
537 	q->msg = memdup(sldns_buffer_begin(buf), sldns_buffer_limit(buf));
538 	q->msg_len = sldns_buffer_limit(buf);
539 	if(!q->msg) {
540 		return; /* the error is in the rcode */
541 	}
542 
543 	/* canonname and results */
544 	q->msg_security = s;
545 	libworker_enter_result(q->res, buf, q->w->env->scratch, s);
546 }
547 
548 void
549 libworker_fg_done_cb(void* arg, int rcode, sldns_buffer* buf, enum sec_status s,
550 	char* why_bogus, int was_ratelimited)
551 {
552 	struct ctx_query* q = (struct ctx_query*)arg;
553 	/* fg query is done; exit comm base */
554 	comm_base_exit(q->w->base);
555 
556 	libworker_fillup_fg(q, rcode, buf, s, why_bogus, was_ratelimited);
557 }
558 
559 /** setup qinfo and edns */
560 static int
561 setup_qinfo_edns(struct libworker* w, struct ctx_query* q,
562 	struct query_info* qinfo, struct edns_data* edns)
563 {
564 	qinfo->qtype = (uint16_t)q->res->qtype;
565 	qinfo->qclass = (uint16_t)q->res->qclass;
566 	qinfo->local_alias = NULL;
567 	qinfo->qname = sldns_str2wire_dname(q->res->qname, &qinfo->qname_len);
568 	if(!qinfo->qname) {
569 		return 0;
570 	}
571 	qinfo->local_alias = NULL;
572 	edns->edns_present = 1;
573 	edns->ext_rcode = 0;
574 	edns->edns_version = 0;
575 	edns->bits = EDNS_DO;
576 	edns->opt_list = NULL;
577 	if(sldns_buffer_capacity(w->back->udp_buff) < 65535)
578 		edns->udp_size = (uint16_t)sldns_buffer_capacity(
579 			w->back->udp_buff);
580 	else	edns->udp_size = 65535;
581 	return 1;
582 }
583 
584 int libworker_fg(struct ub_ctx* ctx, struct ctx_query* q)
585 {
586 	struct libworker* w = libworker_setup(ctx, 0, NULL);
587 	uint16_t qflags, qid;
588 	struct query_info qinfo;
589 	struct edns_data edns;
590 	if(!w)
591 		return UB_INITFAIL;
592 	if(!setup_qinfo_edns(w, q, &qinfo, &edns)) {
593 		libworker_delete(w);
594 		return UB_SYNTAX;
595 	}
596 	qid = 0;
597 	qflags = BIT_RD;
598 	q->w = w;
599 	/* see if there is a fixed answer */
600 	sldns_buffer_write_u16_at(w->back->udp_buff, 0, qid);
601 	sldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags);
602 	if(local_zones_answer(ctx->local_zones, w->env, &qinfo, &edns,
603 		w->back->udp_buff, w->env->scratch, NULL, NULL, 0, NULL, 0,
604 		NULL, 0, NULL, 0, NULL)) {
605 		regional_free_all(w->env->scratch);
606 		libworker_fillup_fg(q, LDNS_RCODE_NOERROR,
607 			w->back->udp_buff, sec_status_insecure, NULL, 0);
608 		libworker_delete(w);
609 		free(qinfo.qname);
610 		return UB_NOERROR;
611 	}
612 	if(ctx->env->auth_zones && auth_zones_answer(ctx->env->auth_zones,
613 		w->env, &qinfo, &edns, NULL, w->back->udp_buff, w->env->scratch)) {
614 		regional_free_all(w->env->scratch);
615 		libworker_fillup_fg(q, LDNS_RCODE_NOERROR,
616 			w->back->udp_buff, sec_status_insecure, NULL, 0);
617 		libworker_delete(w);
618 		free(qinfo.qname);
619 		return UB_NOERROR;
620 	}
621 	/* process new query */
622 	if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns,
623 		w->back->udp_buff, qid, libworker_fg_done_cb, q)) {
624 		free(qinfo.qname);
625 		return UB_NOMEM;
626 	}
627 	free(qinfo.qname);
628 
629 	/* wait for reply */
630 	comm_base_dispatch(w->base);
631 
632 	libworker_delete(w);
633 	return UB_NOERROR;
634 }
635 
636 void
637 libworker_event_done_cb(void* arg, int rcode, sldns_buffer* buf,
638 	enum sec_status s, char* why_bogus, int was_ratelimited)
639 {
640 	struct ctx_query* q = (struct ctx_query*)arg;
641 	ub_event_callback_type cb = q->cb_event;
642 	void* cb_arg = q->cb_arg;
643 	int cancelled = q->cancelled;
644 
645 	/* delete it now */
646 	struct ub_ctx* ctx = q->w->ctx;
647 	lock_basic_lock(&ctx->cfglock);
648 	(void)rbtree_delete(&ctx->queries, q->node.key);
649 	ctx->num_async--;
650 	context_query_delete(q);
651 	lock_basic_unlock(&ctx->cfglock);
652 
653 	if(!cancelled) {
654 		/* call callback */
655 		int sec = 0;
656 		if(s == sec_status_bogus)
657 			sec = 1;
658 		else if(s == sec_status_secure)
659 			sec = 2;
660 		(*cb)(cb_arg, rcode, (void*)sldns_buffer_begin(buf),
661 			(int)sldns_buffer_limit(buf), sec, why_bogus, was_ratelimited);
662 	}
663 }
664 
665 int libworker_attach_mesh(struct ub_ctx* ctx, struct ctx_query* q,
666 	int* async_id)
667 {
668 	struct libworker* w = ctx->event_worker;
669 	uint16_t qflags, qid;
670 	struct query_info qinfo;
671 	struct edns_data edns;
672 	if(!w)
673 		return UB_INITFAIL;
674 	if(!setup_qinfo_edns(w, q, &qinfo, &edns))
675 		return UB_SYNTAX;
676 	qid = 0;
677 	qflags = BIT_RD;
678 	q->w = w;
679 	/* see if there is a fixed answer */
680 	sldns_buffer_write_u16_at(w->back->udp_buff, 0, qid);
681 	sldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags);
682 	if(local_zones_answer(ctx->local_zones, w->env, &qinfo, &edns,
683 		w->back->udp_buff, w->env->scratch, NULL, NULL, 0, NULL, 0,
684 		NULL, 0, NULL, 0, NULL)) {
685 		regional_free_all(w->env->scratch);
686 		free(qinfo.qname);
687 		libworker_event_done_cb(q, LDNS_RCODE_NOERROR,
688 			w->back->udp_buff, sec_status_insecure, NULL, 0);
689 		return UB_NOERROR;
690 	}
691 	if(ctx->env->auth_zones && auth_zones_answer(ctx->env->auth_zones,
692 		w->env, &qinfo, &edns, NULL, w->back->udp_buff, w->env->scratch)) {
693 		regional_free_all(w->env->scratch);
694 		free(qinfo.qname);
695 		libworker_event_done_cb(q, LDNS_RCODE_NOERROR,
696 			w->back->udp_buff, sec_status_insecure, NULL, 0);
697 		return UB_NOERROR;
698 	}
699 	/* process new query */
700 	if(async_id)
701 		*async_id = q->querynum;
702 	if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns,
703 		w->back->udp_buff, qid, libworker_event_done_cb, q)) {
704 		free(qinfo.qname);
705 		return UB_NOMEM;
706 	}
707 	free(qinfo.qname);
708 	return UB_NOERROR;
709 }
710 
711 /** add result to the bg worker result queue */
712 static void
713 add_bg_result(struct libworker* w, struct ctx_query* q, sldns_buffer* pkt,
714 	int err, char* reason, int was_ratelimited)
715 {
716 	uint8_t* msg = NULL;
717 	uint32_t len = 0;
718 
719 	if(w->want_quit) {
720 		context_query_delete(q);
721 		return;
722 	}
723 	/* serialize and delete unneeded q */
724 	if(w->is_bg_thread) {
725 		lock_basic_lock(&w->ctx->cfglock);
726 		if(reason)
727 			q->res->why_bogus = strdup(reason);
728 		q->res->was_ratelimited = was_ratelimited;
729 		if(pkt) {
730 			q->msg_len = sldns_buffer_remaining(pkt);
731 			q->msg = memdup(sldns_buffer_begin(pkt), q->msg_len);
732 			if(!q->msg) {
733 				msg = context_serialize_answer(q, UB_NOMEM, NULL, &len);
734 			} else {
735 				msg = context_serialize_answer(q, err, NULL, &len);
736 			}
737 		} else {
738 			msg = context_serialize_answer(q, err, NULL, &len);
739 		}
740 		lock_basic_unlock(&w->ctx->cfglock);
741 	} else {
742 		if(reason)
743 			q->res->why_bogus = strdup(reason);
744 		q->res->was_ratelimited = was_ratelimited;
745 		msg = context_serialize_answer(q, err, pkt, &len);
746 		(void)rbtree_delete(&w->ctx->queries, q->node.key);
747 		w->ctx->num_async--;
748 		context_query_delete(q);
749 	}
750 
751 	if(!msg) {
752 		log_err("out of memory for async answer");
753 		return;
754 	}
755 	if(!tube_queue_item(w->ctx->rr_pipe, msg, len)) {
756 		log_err("out of memory for async answer");
757 		return;
758 	}
759 }
760 
761 void
762 libworker_bg_done_cb(void* arg, int rcode, sldns_buffer* buf, enum sec_status s,
763 	char* why_bogus, int was_ratelimited)
764 {
765 	struct ctx_query* q = (struct ctx_query*)arg;
766 
767 	if(q->cancelled || q->w->back->want_to_quit) {
768 		if(q->w->is_bg_thread) {
769 			/* delete it now */
770 			struct ub_ctx* ctx = q->w->ctx;
771 			lock_basic_lock(&ctx->cfglock);
772 			(void)rbtree_delete(&ctx->queries, q->node.key);
773 			ctx->num_async--;
774 			context_query_delete(q);
775 			lock_basic_unlock(&ctx->cfglock);
776 		}
777 		/* cancelled, do not give answer */
778 		return;
779 	}
780 	q->msg_security = s;
781 	if(!buf) {
782 		buf = q->w->env->scratch_buffer;
783 	}
784 	if(rcode != 0) {
785 		error_encode(buf, rcode, NULL, 0, BIT_RD, NULL);
786 	}
787 	add_bg_result(q->w, q, buf, UB_NOERROR, why_bogus, was_ratelimited);
788 }
789 
790 
791 /** handle new query command for bg worker */
792 static void
793 handle_newq(struct libworker* w, uint8_t* buf, uint32_t len)
794 {
795 	uint16_t qflags, qid;
796 	struct query_info qinfo;
797 	struct edns_data edns;
798 	struct ctx_query* q;
799 	if(w->is_bg_thread) {
800 		lock_basic_lock(&w->ctx->cfglock);
801 		q = context_lookup_new_query(w->ctx, buf, len);
802 		lock_basic_unlock(&w->ctx->cfglock);
803 	} else {
804 		q = context_deserialize_new_query(w->ctx, buf, len);
805 	}
806 	free(buf);
807 	if(!q) {
808 		log_err("failed to deserialize newq");
809 		return;
810 	}
811 	if(!setup_qinfo_edns(w, q, &qinfo, &edns)) {
812 		add_bg_result(w, q, NULL, UB_SYNTAX, NULL, 0);
813 		return;
814 	}
815 	qid = 0;
816 	qflags = BIT_RD;
817 	/* see if there is a fixed answer */
818 	sldns_buffer_write_u16_at(w->back->udp_buff, 0, qid);
819 	sldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags);
820 	if(local_zones_answer(w->ctx->local_zones, w->env, &qinfo, &edns,
821 		w->back->udp_buff, w->env->scratch, NULL, NULL, 0, NULL, 0,
822 		NULL, 0, NULL, 0, NULL)) {
823 		regional_free_all(w->env->scratch);
824 		q->msg_security = sec_status_insecure;
825 		add_bg_result(w, q, w->back->udp_buff, UB_NOERROR, NULL, 0);
826 		free(qinfo.qname);
827 		return;
828 	}
829 	if(w->ctx->env->auth_zones && auth_zones_answer(w->ctx->env->auth_zones,
830 		w->env, &qinfo, &edns, NULL, w->back->udp_buff, w->env->scratch)) {
831 		regional_free_all(w->env->scratch);
832 		q->msg_security = sec_status_insecure;
833 		add_bg_result(w, q, w->back->udp_buff, UB_NOERROR, NULL, 0);
834 		free(qinfo.qname);
835 		return;
836 	}
837 	q->w = w;
838 	/* process new query */
839 	if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns,
840 		w->back->udp_buff, qid, libworker_bg_done_cb, q)) {
841 		add_bg_result(w, q, NULL, UB_NOMEM, NULL, 0);
842 	}
843 	free(qinfo.qname);
844 }
845 
846 void libworker_alloc_cleanup(void* arg)
847 {
848 	struct libworker* w = (struct libworker*)arg;
849 	slabhash_clear(&w->env->rrset_cache->table);
850         slabhash_clear(w->env->msg_cache);
851 }
852 
853 struct outbound_entry* libworker_send_query(struct query_info* qinfo,
854 	uint16_t flags, int dnssec, int want_dnssec, int nocaps,
855 	struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* zone,
856 	size_t zonelen, int ssl_upstream, char* tls_auth_name,
857 	struct module_qstate* q)
858 {
859 	struct libworker* w = (struct libworker*)q->env->worker;
860 	struct outbound_entry* e = (struct outbound_entry*)regional_alloc(
861 		q->region, sizeof(*e));
862 	if(!e)
863 		return NULL;
864 	e->qstate = q;
865 	e->qsent = outnet_serviced_query(w->back, qinfo, flags, dnssec,
866 		want_dnssec, nocaps, q->env->cfg->tcp_upstream, ssl_upstream,
867 		tls_auth_name, addr, addrlen, zone, zonelen, q,
868 		libworker_handle_service_reply, e, w->back->udp_buff, q->env);
869 	if(!e->qsent) {
870 		return NULL;
871 	}
872 	return e;
873 }
874 
875 int
876 libworker_handle_reply(struct comm_point* c, void* arg, int error,
877         struct comm_reply* reply_info)
878 {
879 	struct module_qstate* q = (struct module_qstate*)arg;
880 	struct libworker* lw = (struct libworker*)q->env->worker;
881 	struct outbound_entry e;
882 	e.qstate = q;
883 	e.qsent = NULL;
884 
885 	if(error != 0) {
886 		mesh_report_reply(lw->env->mesh, &e, reply_info, error);
887 		return 0;
888 	}
889 	/* sanity check. */
890 	if(!LDNS_QR_WIRE(sldns_buffer_begin(c->buffer))
891 		|| LDNS_OPCODE_WIRE(sldns_buffer_begin(c->buffer)) !=
892 			LDNS_PACKET_QUERY
893 		|| LDNS_QDCOUNT(sldns_buffer_begin(c->buffer)) > 1) {
894 		/* error becomes timeout for the module as if this reply
895 		 * never arrived. */
896 		mesh_report_reply(lw->env->mesh, &e, reply_info,
897 			NETEVENT_TIMEOUT);
898 		return 0;
899 	}
900 	mesh_report_reply(lw->env->mesh, &e, reply_info, NETEVENT_NOERROR);
901 	return 0;
902 }
903 
904 int
905 libworker_handle_service_reply(struct comm_point* c, void* arg, int error,
906         struct comm_reply* reply_info)
907 {
908 	struct outbound_entry* e = (struct outbound_entry*)arg;
909 	struct libworker* lw = (struct libworker*)e->qstate->env->worker;
910 
911 	if(error != 0) {
912 		mesh_report_reply(lw->env->mesh, e, reply_info, error);
913 		return 0;
914 	}
915 	/* sanity check. */
916 	if(!LDNS_QR_WIRE(sldns_buffer_begin(c->buffer))
917 		|| LDNS_OPCODE_WIRE(sldns_buffer_begin(c->buffer)) !=
918 			LDNS_PACKET_QUERY
919 		|| LDNS_QDCOUNT(sldns_buffer_begin(c->buffer)) > 1) {
920 		/* error becomes timeout for the module as if this reply
921 		 * never arrived. */
922 		mesh_report_reply(lw->env->mesh, e, reply_info,
923 			NETEVENT_TIMEOUT);
924 		return 0;
925 	}
926 	mesh_report_reply(lw->env->mesh,  e, reply_info, NETEVENT_NOERROR);
927 	return 0;
928 }
929 
930 /* --- fake callbacks for fptr_wlist to work --- */
931 void worker_handle_control_cmd(struct tube* ATTR_UNUSED(tube),
932 	uint8_t* ATTR_UNUSED(buffer), size_t ATTR_UNUSED(len),
933 	int ATTR_UNUSED(error), void* ATTR_UNUSED(arg))
934 {
935 	log_assert(0);
936 }
937 
938 int worker_handle_request(struct comm_point* ATTR_UNUSED(c),
939 	void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
940         struct comm_reply* ATTR_UNUSED(repinfo))
941 {
942 	log_assert(0);
943 	return 0;
944 }
945 
946 int worker_handle_reply(struct comm_point* ATTR_UNUSED(c),
947 	void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
948         struct comm_reply* ATTR_UNUSED(reply_info))
949 {
950 	log_assert(0);
951 	return 0;
952 }
953 
954 int worker_handle_service_reply(struct comm_point* ATTR_UNUSED(c),
955 	void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
956         struct comm_reply* ATTR_UNUSED(reply_info))
957 {
958 	log_assert(0);
959 	return 0;
960 }
961 
962 int remote_accept_callback(struct comm_point* ATTR_UNUSED(c),
963 	void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
964         struct comm_reply* ATTR_UNUSED(repinfo))
965 {
966 	log_assert(0);
967 	return 0;
968 }
969 
970 int remote_control_callback(struct comm_point* ATTR_UNUSED(c),
971 	void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
972         struct comm_reply* ATTR_UNUSED(repinfo))
973 {
974 	log_assert(0);
975 	return 0;
976 }
977 
978 void worker_sighandler(int ATTR_UNUSED(sig), void* ATTR_UNUSED(arg))
979 {
980 	log_assert(0);
981 }
982 
983 struct outbound_entry* worker_send_query(struct query_info* ATTR_UNUSED(qinfo),
984 	uint16_t ATTR_UNUSED(flags), int ATTR_UNUSED(dnssec),
985 	int ATTR_UNUSED(want_dnssec), int ATTR_UNUSED(nocaps),
986 	struct sockaddr_storage* ATTR_UNUSED(addr), socklen_t ATTR_UNUSED(addrlen),
987 	uint8_t* ATTR_UNUSED(zone), size_t ATTR_UNUSED(zonelen),
988 	int ATTR_UNUSED(ssl_upstream), char* ATTR_UNUSED(tls_auth_name),
989 	struct module_qstate* ATTR_UNUSED(q))
990 {
991 	log_assert(0);
992 	return 0;
993 }
994 
995 void
996 worker_alloc_cleanup(void* ATTR_UNUSED(arg))
997 {
998 	log_assert(0);
999 }
1000 
1001 void worker_stat_timer_cb(void* ATTR_UNUSED(arg))
1002 {
1003 	log_assert(0);
1004 }
1005 
1006 void worker_probe_timer_cb(void* ATTR_UNUSED(arg))
1007 {
1008 	log_assert(0);
1009 }
1010 
1011 void worker_start_accept(void* ATTR_UNUSED(arg))
1012 {
1013 	log_assert(0);
1014 }
1015 
1016 void worker_stop_accept(void* ATTR_UNUSED(arg))
1017 {
1018 	log_assert(0);
1019 }
1020 
1021 int order_lock_cmp(const void* ATTR_UNUSED(e1), const void* ATTR_UNUSED(e2))
1022 {
1023 	log_assert(0);
1024 	return 0;
1025 }
1026 
1027 int
1028 codeline_cmp(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b))
1029 {
1030 	log_assert(0);
1031 	return 0;
1032 }
1033 
1034 int replay_var_compare(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b))
1035 {
1036         log_assert(0);
1037         return 0;
1038 }
1039 
1040 void remote_get_opt_ssl(char* ATTR_UNUSED(str), void* ATTR_UNUSED(arg))
1041 {
1042         log_assert(0);
1043 }
1044 
1045 #ifdef UB_ON_WINDOWS
1046 void
1047 worker_win_stop_cb(int ATTR_UNUSED(fd), short ATTR_UNUSED(ev), void*
1048         ATTR_UNUSED(arg)) {
1049         log_assert(0);
1050 }
1051 
1052 void
1053 wsvc_cron_cb(void* ATTR_UNUSED(arg))
1054 {
1055         log_assert(0);
1056 }
1057 #endif /* UB_ON_WINDOWS */
1058