1 /*
2 * services/mesh.c - deal with mesh of query states and handle events for that.
3 *
4 * Copyright (c) 2007, NLnet Labs. All rights reserved.
5 *
6 * This software is open source.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * Redistributions of source code must retain the above copyright notice,
13 * this list of conditions and the following disclaimer.
14 *
15 * Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
18 *
19 * Neither the name of the NLNET LABS nor the names of its contributors may
20 * be used to endorse or promote products derived from this software without
21 * specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 */
35
36 /**
37 * \file
38 *
39 * This file contains functions to assist in dealing with a mesh of
40 * query states. This mesh is supposed to be thread-specific.
41 * It consists of query states (per qname, qtype, qclass) and connections
42 * between query states and the super and subquery states, and replies to
43 * send back to clients.
44 */
45 #include "config.h"
46 #include "services/mesh.h"
47 #include "services/outbound_list.h"
48 #include "services/cache/dns.h"
49 #include "services/cache/rrset.h"
50 #include "util/log.h"
51 #include "util/net_help.h"
52 #include "util/module.h"
53 #include "util/regional.h"
54 #include "util/data/msgencode.h"
55 #include "util/timehist.h"
56 #include "util/fptr_wlist.h"
57 #include "util/alloc.h"
58 #include "util/config_file.h"
59 #include "util/edns.h"
60 #include "sldns/sbuffer.h"
61 #include "sldns/wire2str.h"
62 #include "services/localzone.h"
63 #include "util/data/dname.h"
64 #include "respip/respip.h"
65 #include "services/listen_dnsport.h"
66
67 #ifdef CLIENT_SUBNET
68 #include "edns-subnet/subnetmod.h"
69 #include "edns-subnet/edns-subnet.h"
70 #endif
71
72 /** subtract timers and the values do not overflow or become negative */
73 static void
timeval_subtract(struct timeval * d,const struct timeval * end,const struct timeval * start)74 timeval_subtract(struct timeval* d, const struct timeval* end, const struct timeval* start)
75 {
76 #ifndef S_SPLINT_S
77 time_t end_usec = end->tv_usec;
78 d->tv_sec = end->tv_sec - start->tv_sec;
79 if(end_usec < start->tv_usec) {
80 end_usec += 1000000;
81 d->tv_sec--;
82 }
83 d->tv_usec = end_usec - start->tv_usec;
84 #endif
85 }
86
87 /** add timers and the values do not overflow or become negative */
88 static void
timeval_add(struct timeval * d,const struct timeval * add)89 timeval_add(struct timeval* d, const struct timeval* add)
90 {
91 #ifndef S_SPLINT_S
92 d->tv_sec += add->tv_sec;
93 d->tv_usec += add->tv_usec;
94 if(d->tv_usec >= 1000000 ) {
95 d->tv_usec -= 1000000;
96 d->tv_sec++;
97 }
98 #endif
99 }
100
101 /** divide sum of timers to get average */
102 static void
timeval_divide(struct timeval * avg,const struct timeval * sum,size_t d)103 timeval_divide(struct timeval* avg, const struct timeval* sum, size_t d)
104 {
105 #ifndef S_SPLINT_S
106 size_t leftover;
107 if(d <= 0) {
108 avg->tv_sec = 0;
109 avg->tv_usec = 0;
110 return;
111 }
112 avg->tv_sec = sum->tv_sec / d;
113 avg->tv_usec = sum->tv_usec / d;
114 /* handle fraction from seconds divide */
115 leftover = sum->tv_sec - avg->tv_sec*d;
116 if(leftover <= 0)
117 leftover = 0;
118 avg->tv_usec += (((long long)leftover)*((long long)1000000))/d;
119 if(avg->tv_sec < 0)
120 avg->tv_sec = 0;
121 if(avg->tv_usec < 0)
122 avg->tv_usec = 0;
123 #endif
124 }
125
126 /** histogram compare of time values */
127 static int
timeval_smaller(const struct timeval * x,const struct timeval * y)128 timeval_smaller(const struct timeval* x, const struct timeval* y)
129 {
130 #ifndef S_SPLINT_S
131 if(x->tv_sec < y->tv_sec)
132 return 1;
133 else if(x->tv_sec == y->tv_sec) {
134 if(x->tv_usec <= y->tv_usec)
135 return 1;
136 else return 0;
137 }
138 else return 0;
139 #endif
140 }
141
142 /**
143 * Compare two response-ip client info entries for the purpose of mesh state
144 * compare. It returns 0 if ci_a and ci_b are considered equal; otherwise
145 * 1 or -1 (they mean 'ci_a is larger/smaller than ci_b', respectively, but
146 * in practice it should be only used to mean they are different).
147 * We cannot share the mesh state for two queries if different response-ip
148 * actions can apply in the end, even if those queries are otherwise identical.
149 * For this purpose we compare tag lists and tag action lists; they should be
150 * identical to share the same state.
151 * For tag data, we don't look into the data content, as it can be
152 * expensive; unless tag data are not defined for both or they point to the
153 * exact same data in memory (i.e., they come from the same ACL entry), we
154 * consider these data different.
155 * Likewise, if the client info is associated with views, we don't look into
156 * the views. They are considered different unless they are exactly the same
157 * even if the views only differ in the names.
158 */
159 static int
client_info_compare(const struct respip_client_info * ci_a,const struct respip_client_info * ci_b)160 client_info_compare(const struct respip_client_info* ci_a,
161 const struct respip_client_info* ci_b)
162 {
163 int cmp;
164
165 if(!ci_a && !ci_b)
166 return 0;
167 if(ci_a && !ci_b)
168 return -1;
169 if(!ci_a && ci_b)
170 return 1;
171 if(ci_a->taglen != ci_b->taglen)
172 return (ci_a->taglen < ci_b->taglen) ? -1 : 1;
173 if(ci_a->taglist && !ci_b->taglist)
174 return -1;
175 if(!ci_a->taglist && ci_b->taglist)
176 return 1;
177 if(ci_a->taglist && ci_b->taglist) {
178 cmp = memcmp(ci_a->taglist, ci_b->taglist, ci_a->taglen);
179 if(cmp != 0)
180 return cmp;
181 }
182 if(ci_a->tag_actions_size != ci_b->tag_actions_size)
183 return (ci_a->tag_actions_size < ci_b->tag_actions_size) ?
184 -1 : 1;
185 if(ci_a->tag_actions && !ci_b->tag_actions)
186 return -1;
187 if(!ci_a->tag_actions && ci_b->tag_actions)
188 return 1;
189 if(ci_a->tag_actions && ci_b->tag_actions) {
190 cmp = memcmp(ci_a->tag_actions, ci_b->tag_actions,
191 ci_a->tag_actions_size);
192 if(cmp != 0)
193 return cmp;
194 }
195 if(ci_a->tag_datas != ci_b->tag_datas)
196 return ci_a->tag_datas < ci_b->tag_datas ? -1 : 1;
197 if(ci_a->view != ci_b->view)
198 return ci_a->view < ci_b->view ? -1 : 1;
199 /* For the unbound daemon these should be non-NULL and identical,
200 * but we check that just in case. */
201 if(ci_a->respip_set != ci_b->respip_set)
202 return ci_a->respip_set < ci_b->respip_set ? -1 : 1;
203 return 0;
204 }
205
206 int
mesh_state_compare(const void * ap,const void * bp)207 mesh_state_compare(const void* ap, const void* bp)
208 {
209 struct mesh_state* a = (struct mesh_state*)ap;
210 struct mesh_state* b = (struct mesh_state*)bp;
211 int cmp;
212
213 if(a->unique < b->unique)
214 return -1;
215 if(a->unique > b->unique)
216 return 1;
217
218 if(a->s.is_priming && !b->s.is_priming)
219 return -1;
220 if(!a->s.is_priming && b->s.is_priming)
221 return 1;
222
223 if(a->s.is_valrec && !b->s.is_valrec)
224 return -1;
225 if(!a->s.is_valrec && b->s.is_valrec)
226 return 1;
227
228 if((a->s.query_flags&BIT_RD) && !(b->s.query_flags&BIT_RD))
229 return -1;
230 if(!(a->s.query_flags&BIT_RD) && (b->s.query_flags&BIT_RD))
231 return 1;
232
233 if((a->s.query_flags&BIT_CD) && !(b->s.query_flags&BIT_CD))
234 return -1;
235 if(!(a->s.query_flags&BIT_CD) && (b->s.query_flags&BIT_CD))
236 return 1;
237
238 cmp = query_info_compare(&a->s.qinfo, &b->s.qinfo);
239 if(cmp != 0)
240 return cmp;
241 return client_info_compare(a->s.client_info, b->s.client_info);
242 }
243
244 int
mesh_state_ref_compare(const void * ap,const void * bp)245 mesh_state_ref_compare(const void* ap, const void* bp)
246 {
247 struct mesh_state_ref* a = (struct mesh_state_ref*)ap;
248 struct mesh_state_ref* b = (struct mesh_state_ref*)bp;
249 return mesh_state_compare(a->s, b->s);
250 }
251
252 struct mesh_area*
mesh_create(struct module_stack * stack,struct module_env * env)253 mesh_create(struct module_stack* stack, struct module_env* env)
254 {
255 struct mesh_area* mesh = calloc(1, sizeof(struct mesh_area));
256 if(!mesh) {
257 log_err("mesh area alloc: out of memory");
258 return NULL;
259 }
260 mesh->histogram = timehist_setup();
261 mesh->qbuf_bak = sldns_buffer_new(env->cfg->msg_buffer_size);
262 if(!mesh->histogram || !mesh->qbuf_bak) {
263 free(mesh);
264 log_err("mesh area alloc: out of memory");
265 return NULL;
266 }
267 mesh->mods = *stack;
268 mesh->env = env;
269 rbtree_init(&mesh->run, &mesh_state_compare);
270 rbtree_init(&mesh->all, &mesh_state_compare);
271 mesh->num_reply_addrs = 0;
272 mesh->num_reply_states = 0;
273 mesh->num_detached_states = 0;
274 mesh->num_forever_states = 0;
275 mesh->stats_jostled = 0;
276 mesh->stats_dropped = 0;
277 mesh->ans_expired = 0;
278 mesh->max_reply_states = env->cfg->num_queries_per_thread;
279 mesh->max_forever_states = (mesh->max_reply_states+1)/2;
280 #ifndef S_SPLINT_S
281 mesh->jostle_max.tv_sec = (time_t)(env->cfg->jostle_time / 1000);
282 mesh->jostle_max.tv_usec = (time_t)((env->cfg->jostle_time % 1000)
283 *1000);
284 #endif
285 return mesh;
286 }
287
288 /** help mesh delete delete mesh states */
289 static void
mesh_delete_helper(rbnode_type * n)290 mesh_delete_helper(rbnode_type* n)
291 {
292 struct mesh_state* mstate = (struct mesh_state*)n->key;
293 /* perform a full delete, not only 'cleanup' routine,
294 * because other callbacks expect a clean state in the mesh.
295 * For 're-entrant' calls */
296 mesh_state_delete(&mstate->s);
297 /* but because these delete the items from the tree, postorder
298 * traversal and rbtree rebalancing do not work together */
299 }
300
301 void
mesh_delete(struct mesh_area * mesh)302 mesh_delete(struct mesh_area* mesh)
303 {
304 if(!mesh)
305 return;
306 /* free all query states */
307 while(mesh->all.count)
308 mesh_delete_helper(mesh->all.root);
309 timehist_delete(mesh->histogram);
310 sldns_buffer_free(mesh->qbuf_bak);
311 free(mesh);
312 }
313
314 void
mesh_delete_all(struct mesh_area * mesh)315 mesh_delete_all(struct mesh_area* mesh)
316 {
317 /* free all query states */
318 while(mesh->all.count)
319 mesh_delete_helper(mesh->all.root);
320 mesh->stats_dropped += mesh->num_reply_addrs;
321 /* clear mesh area references */
322 rbtree_init(&mesh->run, &mesh_state_compare);
323 rbtree_init(&mesh->all, &mesh_state_compare);
324 mesh->num_reply_addrs = 0;
325 mesh->num_reply_states = 0;
326 mesh->num_detached_states = 0;
327 mesh->num_forever_states = 0;
328 mesh->forever_first = NULL;
329 mesh->forever_last = NULL;
330 mesh->jostle_first = NULL;
331 mesh->jostle_last = NULL;
332 }
333
mesh_make_new_space(struct mesh_area * mesh,sldns_buffer * qbuf)334 int mesh_make_new_space(struct mesh_area* mesh, sldns_buffer* qbuf)
335 {
336 struct mesh_state* m = mesh->jostle_first;
337 /* free space is available */
338 if(mesh->num_reply_states < mesh->max_reply_states)
339 return 1;
340 /* try to kick out a jostle-list item */
341 if(m && m->reply_list && m->list_select == mesh_jostle_list) {
342 /* how old is it? */
343 struct timeval age;
344 timeval_subtract(&age, mesh->env->now_tv,
345 &m->reply_list->start_time);
346 if(timeval_smaller(&mesh->jostle_max, &age)) {
347 /* its a goner */
348 log_nametypeclass(VERB_ALGO, "query jostled out to "
349 "make space for a new one",
350 m->s.qinfo.qname, m->s.qinfo.qtype,
351 m->s.qinfo.qclass);
352 /* backup the query */
353 if(qbuf) sldns_buffer_copy(mesh->qbuf_bak, qbuf);
354 /* notify supers */
355 if(m->super_set.count > 0) {
356 verbose(VERB_ALGO, "notify supers of failure");
357 m->s.return_msg = NULL;
358 m->s.return_rcode = LDNS_RCODE_SERVFAIL;
359 mesh_walk_supers(mesh, m);
360 }
361 mesh->stats_jostled ++;
362 mesh_state_delete(&m->s);
363 /* restore the query - note that the qinfo ptr to
364 * the querybuffer is then correct again. */
365 if(qbuf) sldns_buffer_copy(qbuf, mesh->qbuf_bak);
366 return 1;
367 }
368 }
369 /* no space for new item */
370 return 0;
371 }
372
373 struct dns_msg*
mesh_serve_expired_lookup(struct module_qstate * qstate,struct query_info * lookup_qinfo)374 mesh_serve_expired_lookup(struct module_qstate* qstate,
375 struct query_info* lookup_qinfo)
376 {
377 hashvalue_type h;
378 struct lruhash_entry* e;
379 struct dns_msg* msg;
380 struct reply_info* data;
381 struct msgreply_entry* key;
382 time_t timenow = *qstate->env->now;
383 int must_validate = (!(qstate->query_flags&BIT_CD)
384 || qstate->env->cfg->ignore_cd) && qstate->env->need_to_validate;
385 /* Lookup cache */
386 h = query_info_hash(lookup_qinfo, qstate->query_flags);
387 e = slabhash_lookup(qstate->env->msg_cache, h, lookup_qinfo, 0);
388 if(!e) return NULL;
389
390 key = (struct msgreply_entry*)e->key;
391 data = (struct reply_info*)e->data;
392 msg = tomsg(qstate->env, &key->key, data, qstate->region, timenow,
393 qstate->env->cfg->serve_expired, qstate->env->scratch);
394 if(!msg)
395 goto bail_out;
396
397 /* Check CNAME chain (if any)
398 * This is part of tomsg above; no need to check now. */
399
400 /* Check security status of the cached answer.
401 * tomsg above has a subset of these checks, so we are leaving
402 * these as is.
403 * In case of bogus or revalidation we don't care to reply here. */
404 if(must_validate && (msg->rep->security == sec_status_bogus ||
405 msg->rep->security == sec_status_secure_sentinel_fail)) {
406 verbose(VERB_ALGO, "Serve expired: bogus answer found in cache");
407 goto bail_out;
408 } else if(msg->rep->security == sec_status_unchecked && must_validate) {
409 verbose(VERB_ALGO, "Serve expired: unchecked entry needs "
410 "validation");
411 goto bail_out; /* need to validate cache entry first */
412 } else if(msg->rep->security == sec_status_secure &&
413 !reply_all_rrsets_secure(msg->rep) && must_validate) {
414 verbose(VERB_ALGO, "Serve expired: secure entry"
415 " changed status");
416 goto bail_out; /* rrset changed, re-verify */
417 }
418
419 lock_rw_unlock(&e->lock);
420 return msg;
421
422 bail_out:
423 lock_rw_unlock(&e->lock);
424 return NULL;
425 }
426
427
428 /** Init the serve expired data structure */
429 static int
mesh_serve_expired_init(struct mesh_state * mstate,int timeout)430 mesh_serve_expired_init(struct mesh_state* mstate, int timeout)
431 {
432 struct timeval t;
433
434 /* Create serve_expired_data if not there yet */
435 if(!mstate->s.serve_expired_data) {
436 mstate->s.serve_expired_data = (struct serve_expired_data*)
437 regional_alloc_zero(
438 mstate->s.region, sizeof(struct serve_expired_data));
439 if(!mstate->s.serve_expired_data)
440 return 0;
441 }
442
443 /* Don't overwrite the function if already set */
444 mstate->s.serve_expired_data->get_cached_answer =
445 mstate->s.serve_expired_data->get_cached_answer?
446 mstate->s.serve_expired_data->get_cached_answer:
447 &mesh_serve_expired_lookup;
448
449 /* In case this timer already popped, start it again */
450 if(!mstate->s.serve_expired_data->timer) {
451 mstate->s.serve_expired_data->timer = comm_timer_create(
452 mstate->s.env->worker_base, mesh_serve_expired_callback, mstate);
453 if(!mstate->s.serve_expired_data->timer)
454 return 0;
455 #ifndef S_SPLINT_S
456 t.tv_sec = timeout/1000;
457 t.tv_usec = (timeout%1000)*1000;
458 #endif
459 comm_timer_set(mstate->s.serve_expired_data->timer, &t);
460 }
461 return 1;
462 }
463
mesh_new_client(struct mesh_area * mesh,struct query_info * qinfo,struct respip_client_info * cinfo,uint16_t qflags,struct edns_data * edns,struct comm_reply * rep,uint16_t qid,int rpz_passthru)464 void mesh_new_client(struct mesh_area* mesh, struct query_info* qinfo,
465 struct respip_client_info* cinfo, uint16_t qflags,
466 struct edns_data* edns, struct comm_reply* rep, uint16_t qid,
467 int rpz_passthru)
468 {
469 struct mesh_state* s = NULL;
470 int unique = unique_mesh_state(edns->opt_list_in, mesh->env);
471 int was_detached = 0;
472 int was_noreply = 0;
473 int added = 0;
474 int timeout = mesh->env->cfg->serve_expired?
475 mesh->env->cfg->serve_expired_client_timeout:0;
476 struct sldns_buffer* r_buffer = rep->c->buffer;
477 if(rep->c->tcp_req_info) {
478 r_buffer = rep->c->tcp_req_info->spool_buffer;
479 }
480 if(!unique)
481 s = mesh_area_find(mesh, cinfo, qinfo, qflags&(BIT_RD|BIT_CD), 0, 0);
482 /* does this create a new reply state? */
483 if(!s || s->list_select == mesh_no_list) {
484 if(!mesh_make_new_space(mesh, rep->c->buffer)) {
485 verbose(VERB_ALGO, "Too many queries. dropping "
486 "incoming query.");
487 comm_point_drop_reply(rep);
488 mesh->stats_dropped++;
489 return;
490 }
491 /* for this new reply state, the reply address is free,
492 * so the limit of reply addresses does not stop reply states*/
493 } else {
494 /* protect our memory usage from storing reply addresses */
495 if(mesh->num_reply_addrs > mesh->max_reply_states*16) {
496 verbose(VERB_ALGO, "Too many requests queued. "
497 "dropping incoming query.");
498 comm_point_drop_reply(rep);
499 mesh->stats_dropped++;
500 return;
501 }
502 }
503 /* see if it already exists, if not, create one */
504 if(!s) {
505 #ifdef UNBOUND_DEBUG
506 struct rbnode_type* n;
507 #endif
508 s = mesh_state_create(mesh->env, qinfo, cinfo,
509 qflags&(BIT_RD|BIT_CD), 0, 0);
510 if(!s) {
511 log_err("mesh_state_create: out of memory; SERVFAIL");
512 if(!inplace_cb_reply_servfail_call(mesh->env, qinfo, NULL, NULL,
513 LDNS_RCODE_SERVFAIL, edns, rep, mesh->env->scratch, mesh->env->now_tv))
514 edns->opt_list_inplace_cb_out = NULL;
515 error_encode(r_buffer, LDNS_RCODE_SERVFAIL,
516 qinfo, qid, qflags, edns);
517 comm_point_send_reply(rep);
518 return;
519 }
520 if(unique)
521 mesh_state_make_unique(s);
522 s->s.rpz_passthru = rpz_passthru;
523 /* copy the edns options we got from the front */
524 if(edns->opt_list_in) {
525 s->s.edns_opts_front_in = edns_opt_copy_region(edns->opt_list_in,
526 s->s.region);
527 if(!s->s.edns_opts_front_in) {
528 log_err("mesh_state_create: out of memory; SERVFAIL");
529 if(!inplace_cb_reply_servfail_call(mesh->env, qinfo, NULL,
530 NULL, LDNS_RCODE_SERVFAIL, edns, rep, mesh->env->scratch, mesh->env->now_tv))
531 edns->opt_list_inplace_cb_out = NULL;
532 error_encode(r_buffer, LDNS_RCODE_SERVFAIL,
533 qinfo, qid, qflags, edns);
534 comm_point_send_reply(rep);
535 return;
536 }
537 }
538
539 #ifdef UNBOUND_DEBUG
540 n =
541 #else
542 (void)
543 #endif
544 rbtree_insert(&mesh->all, &s->node);
545 log_assert(n != NULL);
546 /* set detached (it is now) */
547 mesh->num_detached_states++;
548 added = 1;
549 }
550 if(!s->reply_list && !s->cb_list) {
551 was_noreply = 1;
552 if(s->super_set.count == 0) {
553 was_detached = 1;
554 }
555 }
556 /* add reply to s */
557 if(!mesh_state_add_reply(s, edns, rep, qid, qflags, qinfo)) {
558 log_err("mesh_new_client: out of memory; SERVFAIL");
559 goto servfail_mem;
560 }
561 if(rep->c->tcp_req_info) {
562 if(!tcp_req_info_add_meshstate(rep->c->tcp_req_info, mesh, s)) {
563 log_err("mesh_new_client: out of memory add tcpreqinfo");
564 goto servfail_mem;
565 }
566 }
567 if(rep->c->use_h2) {
568 http2_stream_add_meshstate(rep->c->h2_stream, mesh, s);
569 }
570 /* add serve expired timer if required and not already there */
571 if(timeout && !mesh_serve_expired_init(s, timeout)) {
572 log_err("mesh_new_client: out of memory initializing serve expired");
573 goto servfail_mem;
574 }
575 /* update statistics */
576 if(was_detached) {
577 log_assert(mesh->num_detached_states > 0);
578 mesh->num_detached_states--;
579 }
580 if(was_noreply) {
581 mesh->num_reply_states ++;
582 }
583 mesh->num_reply_addrs++;
584 if(s->list_select == mesh_no_list) {
585 /* move to either the forever or the jostle_list */
586 if(mesh->num_forever_states < mesh->max_forever_states) {
587 mesh->num_forever_states ++;
588 mesh_list_insert(s, &mesh->forever_first,
589 &mesh->forever_last);
590 s->list_select = mesh_forever_list;
591 } else {
592 mesh_list_insert(s, &mesh->jostle_first,
593 &mesh->jostle_last);
594 s->list_select = mesh_jostle_list;
595 }
596 }
597 if(added)
598 mesh_run(mesh, s, module_event_new, NULL);
599 return;
600
601 servfail_mem:
602 if(!inplace_cb_reply_servfail_call(mesh->env, qinfo, &s->s,
603 NULL, LDNS_RCODE_SERVFAIL, edns, rep, mesh->env->scratch, mesh->env->now_tv))
604 edns->opt_list_inplace_cb_out = NULL;
605 error_encode(r_buffer, LDNS_RCODE_SERVFAIL,
606 qinfo, qid, qflags, edns);
607 comm_point_send_reply(rep);
608 if(added)
609 mesh_state_delete(&s->s);
610 return;
611 }
612
613 int
mesh_new_callback(struct mesh_area * mesh,struct query_info * qinfo,uint16_t qflags,struct edns_data * edns,sldns_buffer * buf,uint16_t qid,mesh_cb_func_type cb,void * cb_arg,int rpz_passthru)614 mesh_new_callback(struct mesh_area* mesh, struct query_info* qinfo,
615 uint16_t qflags, struct edns_data* edns, sldns_buffer* buf,
616 uint16_t qid, mesh_cb_func_type cb, void* cb_arg, int rpz_passthru)
617 {
618 struct mesh_state* s = NULL;
619 int unique = unique_mesh_state(edns->opt_list_in, mesh->env);
620 int timeout = mesh->env->cfg->serve_expired?
621 mesh->env->cfg->serve_expired_client_timeout:0;
622 int was_detached = 0;
623 int was_noreply = 0;
624 int added = 0;
625 if(!unique)
626 s = mesh_area_find(mesh, NULL, qinfo, qflags&(BIT_RD|BIT_CD), 0, 0);
627
628 /* there are no limits on the number of callbacks */
629
630 /* see if it already exists, if not, create one */
631 if(!s) {
632 #ifdef UNBOUND_DEBUG
633 struct rbnode_type* n;
634 #endif
635 s = mesh_state_create(mesh->env, qinfo, NULL,
636 qflags&(BIT_RD|BIT_CD), 0, 0);
637 if(!s) {
638 return 0;
639 }
640 if(unique)
641 mesh_state_make_unique(s);
642 s->s.rpz_passthru = rpz_passthru;
643 if(edns->opt_list_in) {
644 s->s.edns_opts_front_in = edns_opt_copy_region(edns->opt_list_in,
645 s->s.region);
646 if(!s->s.edns_opts_front_in) {
647 return 0;
648 }
649 }
650 #ifdef UNBOUND_DEBUG
651 n =
652 #else
653 (void)
654 #endif
655 rbtree_insert(&mesh->all, &s->node);
656 log_assert(n != NULL);
657 /* set detached (it is now) */
658 mesh->num_detached_states++;
659 added = 1;
660 }
661 if(!s->reply_list && !s->cb_list) {
662 was_noreply = 1;
663 if(s->super_set.count == 0) {
664 was_detached = 1;
665 }
666 }
667 /* add reply to s */
668 if(!mesh_state_add_cb(s, edns, buf, cb, cb_arg, qid, qflags)) {
669 if(added)
670 mesh_state_delete(&s->s);
671 return 0;
672 }
673 /* add serve expired timer if not already there */
674 if(timeout && !mesh_serve_expired_init(s, timeout)) {
675 return 0;
676 }
677 /* update statistics */
678 if(was_detached) {
679 log_assert(mesh->num_detached_states > 0);
680 mesh->num_detached_states--;
681 }
682 if(was_noreply) {
683 mesh->num_reply_states ++;
684 }
685 mesh->num_reply_addrs++;
686 if(added)
687 mesh_run(mesh, s, module_event_new, NULL);
688 return 1;
689 }
690
691 /* Internal backend routine of mesh_new_prefetch(). It takes one additional
692 * parameter, 'run', which controls whether to run the prefetch state
693 * immediately. When this function is called internally 'run' could be
694 * 0 (false), in which case the new state is only made runnable so it
695 * will not be run recursively on top of the current state. */
mesh_schedule_prefetch(struct mesh_area * mesh,struct query_info * qinfo,uint16_t qflags,time_t leeway,int run,int rpz_passthru)696 static void mesh_schedule_prefetch(struct mesh_area* mesh,
697 struct query_info* qinfo, uint16_t qflags, time_t leeway, int run,
698 int rpz_passthru)
699 {
700 struct mesh_state* s = mesh_area_find(mesh, NULL, qinfo,
701 qflags&(BIT_RD|BIT_CD), 0, 0);
702 #ifdef UNBOUND_DEBUG
703 struct rbnode_type* n;
704 #endif
705 /* already exists, and for a different purpose perhaps.
706 * if mesh_no_list, keep it that way. */
707 if(s) {
708 /* make it ignore the cache from now on */
709 if(!s->s.blacklist)
710 sock_list_insert(&s->s.blacklist, NULL, 0, s->s.region);
711 if(s->s.prefetch_leeway < leeway)
712 s->s.prefetch_leeway = leeway;
713 return;
714 }
715 if(!mesh_make_new_space(mesh, NULL)) {
716 verbose(VERB_ALGO, "Too many queries. dropped prefetch.");
717 mesh->stats_dropped ++;
718 return;
719 }
720
721 s = mesh_state_create(mesh->env, qinfo, NULL,
722 qflags&(BIT_RD|BIT_CD), 0, 0);
723 if(!s) {
724 log_err("prefetch mesh_state_create: out of memory");
725 return;
726 }
727 #ifdef UNBOUND_DEBUG
728 n =
729 #else
730 (void)
731 #endif
732 rbtree_insert(&mesh->all, &s->node);
733 log_assert(n != NULL);
734 /* set detached (it is now) */
735 mesh->num_detached_states++;
736 /* make it ignore the cache */
737 sock_list_insert(&s->s.blacklist, NULL, 0, s->s.region);
738 s->s.prefetch_leeway = leeway;
739
740 if(s->list_select == mesh_no_list) {
741 /* move to either the forever or the jostle_list */
742 if(mesh->num_forever_states < mesh->max_forever_states) {
743 mesh->num_forever_states ++;
744 mesh_list_insert(s, &mesh->forever_first,
745 &mesh->forever_last);
746 s->list_select = mesh_forever_list;
747 } else {
748 mesh_list_insert(s, &mesh->jostle_first,
749 &mesh->jostle_last);
750 s->list_select = mesh_jostle_list;
751 }
752 }
753 s->s.rpz_passthru = rpz_passthru;
754
755 if(!run) {
756 #ifdef UNBOUND_DEBUG
757 n =
758 #else
759 (void)
760 #endif
761 rbtree_insert(&mesh->run, &s->run_node);
762 log_assert(n != NULL);
763 return;
764 }
765
766 mesh_run(mesh, s, module_event_new, NULL);
767 }
768
769 #ifdef CLIENT_SUBNET
770 /* Same logic as mesh_schedule_prefetch but tailored to the subnet module logic
771 * like passing along the comm_reply info. This will be faked into an EDNS
772 * option for processing by the subnet module if the client has not already
773 * attached its own ECS data. */
mesh_schedule_prefetch_subnet(struct mesh_area * mesh,struct query_info * qinfo,uint16_t qflags,time_t leeway,int run,int rpz_passthru,struct comm_reply * rep,struct edns_option * edns_list)774 static void mesh_schedule_prefetch_subnet(struct mesh_area* mesh,
775 struct query_info* qinfo, uint16_t qflags, time_t leeway, int run,
776 int rpz_passthru, struct comm_reply* rep, struct edns_option* edns_list)
777 {
778 struct mesh_state* s = NULL;
779 struct edns_option* opt = NULL;
780 #ifdef UNBOUND_DEBUG
781 struct rbnode_type* n;
782 #endif
783 if(!mesh_make_new_space(mesh, NULL)) {
784 verbose(VERB_ALGO, "Too many queries. dropped prefetch.");
785 mesh->stats_dropped ++;
786 return;
787 }
788
789 s = mesh_state_create(mesh->env, qinfo, NULL,
790 qflags&(BIT_RD|BIT_CD), 0, 0);
791 if(!s) {
792 log_err("prefetch_subnet mesh_state_create: out of memory");
793 return;
794 }
795 mesh_state_make_unique(s);
796
797 opt = edns_opt_list_find(edns_list, mesh->env->cfg->client_subnet_opcode);
798 if(opt) {
799 /* Use the client's ECS data */
800 if(!edns_opt_list_append(&s->s.edns_opts_front_in, opt->opt_code,
801 opt->opt_len, opt->opt_data, s->s.region)) {
802 log_err("prefetch_subnet edns_opt_list_append: out of memory");
803 return;
804 }
805 } else {
806 /* Fake the ECS data from the client's IP */
807 struct ecs_data ecs;
808 memset(&ecs, 0, sizeof(ecs));
809 subnet_option_from_ss(&rep->addr, &ecs, mesh->env->cfg);
810 if(ecs.subnet_validdata == 0) {
811 log_err("prefetch_subnet subnet_option_from_ss: invalid data");
812 return;
813 }
814 subnet_ecs_opt_list_append(&ecs, &s->s.edns_opts_front_in,
815 &s->s, s->s.region);
816 if(!s->s.edns_opts_front_in) {
817 log_err("prefetch_subnet subnet_ecs_opt_list_append: out of memory");
818 return;
819 }
820 }
821 #ifdef UNBOUND_DEBUG
822 n =
823 #else
824 (void)
825 #endif
826 rbtree_insert(&mesh->all, &s->node);
827 log_assert(n != NULL);
828 /* set detached (it is now) */
829 mesh->num_detached_states++;
830 /* make it ignore the cache */
831 sock_list_insert(&s->s.blacklist, NULL, 0, s->s.region);
832 s->s.prefetch_leeway = leeway;
833
834 if(s->list_select == mesh_no_list) {
835 /* move to either the forever or the jostle_list */
836 if(mesh->num_forever_states < mesh->max_forever_states) {
837 mesh->num_forever_states ++;
838 mesh_list_insert(s, &mesh->forever_first,
839 &mesh->forever_last);
840 s->list_select = mesh_forever_list;
841 } else {
842 mesh_list_insert(s, &mesh->jostle_first,
843 &mesh->jostle_last);
844 s->list_select = mesh_jostle_list;
845 }
846 }
847 s->s.rpz_passthru = rpz_passthru;
848
849 if(!run) {
850 #ifdef UNBOUND_DEBUG
851 n =
852 #else
853 (void)
854 #endif
855 rbtree_insert(&mesh->run, &s->run_node);
856 log_assert(n != NULL);
857 return;
858 }
859
860 mesh_run(mesh, s, module_event_new, NULL);
861 }
862 #endif /* CLIENT_SUBNET */
863
mesh_new_prefetch(struct mesh_area * mesh,struct query_info * qinfo,uint16_t qflags,time_t leeway,int rpz_passthru,struct comm_reply * rep,struct edns_option * opt_list)864 void mesh_new_prefetch(struct mesh_area* mesh, struct query_info* qinfo,
865 uint16_t qflags, time_t leeway, int rpz_passthru,
866 struct comm_reply* rep, struct edns_option* opt_list)
867 {
868 (void)opt_list;
869 (void)rep;
870 #ifdef CLIENT_SUBNET
871 if(rep)
872 mesh_schedule_prefetch_subnet(mesh, qinfo, qflags, leeway, 1,
873 rpz_passthru, rep, opt_list);
874 else
875 #endif
876 mesh_schedule_prefetch(mesh, qinfo, qflags, leeway, 1,
877 rpz_passthru);
878 }
879
mesh_report_reply(struct mesh_area * mesh,struct outbound_entry * e,struct comm_reply * reply,int what)880 void mesh_report_reply(struct mesh_area* mesh, struct outbound_entry* e,
881 struct comm_reply* reply, int what)
882 {
883 enum module_ev event = module_event_reply;
884 e->qstate->reply = reply;
885 if(what != NETEVENT_NOERROR) {
886 event = module_event_noreply;
887 if(what == NETEVENT_CAPSFAIL)
888 event = module_event_capsfail;
889 }
890 mesh_run(mesh, e->qstate->mesh_info, event, e);
891 }
892
893 struct mesh_state*
mesh_state_create(struct module_env * env,struct query_info * qinfo,struct respip_client_info * cinfo,uint16_t qflags,int prime,int valrec)894 mesh_state_create(struct module_env* env, struct query_info* qinfo,
895 struct respip_client_info* cinfo, uint16_t qflags, int prime,
896 int valrec)
897 {
898 struct regional* region = alloc_reg_obtain(env->alloc);
899 struct mesh_state* mstate;
900 int i;
901 if(!region)
902 return NULL;
903 mstate = (struct mesh_state*)regional_alloc(region,
904 sizeof(struct mesh_state));
905 if(!mstate) {
906 alloc_reg_release(env->alloc, region);
907 return NULL;
908 }
909 memset(mstate, 0, sizeof(*mstate));
910 mstate->node = *RBTREE_NULL;
911 mstate->run_node = *RBTREE_NULL;
912 mstate->node.key = mstate;
913 mstate->run_node.key = mstate;
914 mstate->reply_list = NULL;
915 mstate->list_select = mesh_no_list;
916 mstate->replies_sent = 0;
917 rbtree_init(&mstate->super_set, &mesh_state_ref_compare);
918 rbtree_init(&mstate->sub_set, &mesh_state_ref_compare);
919 mstate->num_activated = 0;
920 mstate->unique = NULL;
921 /* init module qstate */
922 mstate->s.qinfo.qtype = qinfo->qtype;
923 mstate->s.qinfo.qclass = qinfo->qclass;
924 mstate->s.qinfo.local_alias = NULL;
925 mstate->s.qinfo.qname_len = qinfo->qname_len;
926 mstate->s.qinfo.qname = regional_alloc_init(region, qinfo->qname,
927 qinfo->qname_len);
928 if(!mstate->s.qinfo.qname) {
929 alloc_reg_release(env->alloc, region);
930 return NULL;
931 }
932 if(cinfo) {
933 mstate->s.client_info = regional_alloc_init(region, cinfo,
934 sizeof(*cinfo));
935 if(!mstate->s.client_info) {
936 alloc_reg_release(env->alloc, region);
937 return NULL;
938 }
939 }
940 /* remove all weird bits from qflags */
941 mstate->s.query_flags = (qflags & (BIT_RD|BIT_CD));
942 mstate->s.is_priming = prime;
943 mstate->s.is_valrec = valrec;
944 mstate->s.reply = NULL;
945 mstate->s.region = region;
946 mstate->s.curmod = 0;
947 mstate->s.return_msg = 0;
948 mstate->s.return_rcode = LDNS_RCODE_NOERROR;
949 mstate->s.env = env;
950 mstate->s.mesh_info = mstate;
951 mstate->s.prefetch_leeway = 0;
952 mstate->s.serve_expired_data = NULL;
953 mstate->s.no_cache_lookup = 0;
954 mstate->s.no_cache_store = 0;
955 mstate->s.need_refetch = 0;
956 mstate->s.was_ratelimited = 0;
957 mstate->s.qstarttime = *env->now;
958
959 /* init modules */
960 for(i=0; i<env->mesh->mods.num; i++) {
961 mstate->s.minfo[i] = NULL;
962 mstate->s.ext_state[i] = module_state_initial;
963 }
964 /* init edns option lists */
965 mstate->s.edns_opts_front_in = NULL;
966 mstate->s.edns_opts_back_out = NULL;
967 mstate->s.edns_opts_back_in = NULL;
968 mstate->s.edns_opts_front_out = NULL;
969
970 return mstate;
971 }
972
973 int
mesh_state_is_unique(struct mesh_state * mstate)974 mesh_state_is_unique(struct mesh_state* mstate)
975 {
976 return mstate->unique != NULL;
977 }
978
979 void
mesh_state_make_unique(struct mesh_state * mstate)980 mesh_state_make_unique(struct mesh_state* mstate)
981 {
982 mstate->unique = mstate;
983 }
984
985 void
mesh_state_cleanup(struct mesh_state * mstate)986 mesh_state_cleanup(struct mesh_state* mstate)
987 {
988 struct mesh_area* mesh;
989 int i;
990 if(!mstate)
991 return;
992 mesh = mstate->s.env->mesh;
993 /* Stop and delete the serve expired timer */
994 if(mstate->s.serve_expired_data && mstate->s.serve_expired_data->timer) {
995 comm_timer_delete(mstate->s.serve_expired_data->timer);
996 mstate->s.serve_expired_data->timer = NULL;
997 }
998 /* drop unsent replies */
999 if(!mstate->replies_sent) {
1000 struct mesh_reply* rep = mstate->reply_list;
1001 struct mesh_cb* cb;
1002 /* in tcp_req_info, the mstates linked are removed, but
1003 * the reply_list is now NULL, so the remove-from-empty-list
1004 * takes no time and also it does not do the mesh accounting */
1005 mstate->reply_list = NULL;
1006 for(; rep; rep=rep->next) {
1007 comm_point_drop_reply(&rep->query_reply);
1008 log_assert(mesh->num_reply_addrs > 0);
1009 mesh->num_reply_addrs--;
1010 }
1011 while((cb = mstate->cb_list)!=NULL) {
1012 mstate->cb_list = cb->next;
1013 fptr_ok(fptr_whitelist_mesh_cb(cb->cb));
1014 (*cb->cb)(cb->cb_arg, LDNS_RCODE_SERVFAIL, NULL,
1015 sec_status_unchecked, NULL, 0);
1016 log_assert(mesh->num_reply_addrs > 0);
1017 mesh->num_reply_addrs--;
1018 }
1019 }
1020
1021 /* de-init modules */
1022 for(i=0; i<mesh->mods.num; i++) {
1023 fptr_ok(fptr_whitelist_mod_clear(mesh->mods.mod[i]->clear));
1024 (*mesh->mods.mod[i]->clear)(&mstate->s, i);
1025 mstate->s.minfo[i] = NULL;
1026 mstate->s.ext_state[i] = module_finished;
1027 }
1028 alloc_reg_release(mstate->s.env->alloc, mstate->s.region);
1029 }
1030
1031 void
mesh_state_delete(struct module_qstate * qstate)1032 mesh_state_delete(struct module_qstate* qstate)
1033 {
1034 struct mesh_area* mesh;
1035 struct mesh_state_ref* super, ref;
1036 struct mesh_state* mstate;
1037 if(!qstate)
1038 return;
1039 mstate = qstate->mesh_info;
1040 mesh = mstate->s.env->mesh;
1041 mesh_detach_subs(&mstate->s);
1042 if(mstate->list_select == mesh_forever_list) {
1043 mesh->num_forever_states --;
1044 mesh_list_remove(mstate, &mesh->forever_first,
1045 &mesh->forever_last);
1046 } else if(mstate->list_select == mesh_jostle_list) {
1047 mesh_list_remove(mstate, &mesh->jostle_first,
1048 &mesh->jostle_last);
1049 }
1050 if(!mstate->reply_list && !mstate->cb_list
1051 && mstate->super_set.count == 0) {
1052 log_assert(mesh->num_detached_states > 0);
1053 mesh->num_detached_states--;
1054 }
1055 if(mstate->reply_list || mstate->cb_list) {
1056 log_assert(mesh->num_reply_states > 0);
1057 mesh->num_reply_states--;
1058 }
1059 ref.node.key = &ref;
1060 ref.s = mstate;
1061 RBTREE_FOR(super, struct mesh_state_ref*, &mstate->super_set) {
1062 (void)rbtree_delete(&super->s->sub_set, &ref);
1063 }
1064 (void)rbtree_delete(&mesh->run, mstate);
1065 (void)rbtree_delete(&mesh->all, mstate);
1066 mesh_state_cleanup(mstate);
1067 }
1068
1069 /** helper recursive rbtree find routine */
1070 static int
find_in_subsub(struct mesh_state * m,struct mesh_state * tofind,size_t * c)1071 find_in_subsub(struct mesh_state* m, struct mesh_state* tofind, size_t *c)
1072 {
1073 struct mesh_state_ref* r;
1074 if((*c)++ > MESH_MAX_SUBSUB)
1075 return 1;
1076 RBTREE_FOR(r, struct mesh_state_ref*, &m->sub_set) {
1077 if(r->s == tofind || find_in_subsub(r->s, tofind, c))
1078 return 1;
1079 }
1080 return 0;
1081 }
1082
1083 /** find cycle for already looked up mesh_state */
1084 static int
mesh_detect_cycle_found(struct module_qstate * qstate,struct mesh_state * dep_m)1085 mesh_detect_cycle_found(struct module_qstate* qstate, struct mesh_state* dep_m)
1086 {
1087 struct mesh_state* cyc_m = qstate->mesh_info;
1088 size_t counter = 0;
1089 if(!dep_m)
1090 return 0;
1091 if(dep_m == cyc_m || find_in_subsub(dep_m, cyc_m, &counter)) {
1092 if(counter > MESH_MAX_SUBSUB)
1093 return 2;
1094 return 1;
1095 }
1096 return 0;
1097 }
1098
mesh_detach_subs(struct module_qstate * qstate)1099 void mesh_detach_subs(struct module_qstate* qstate)
1100 {
1101 struct mesh_area* mesh = qstate->env->mesh;
1102 struct mesh_state_ref* ref, lookup;
1103 #ifdef UNBOUND_DEBUG
1104 struct rbnode_type* n;
1105 #endif
1106 lookup.node.key = &lookup;
1107 lookup.s = qstate->mesh_info;
1108 RBTREE_FOR(ref, struct mesh_state_ref*, &qstate->mesh_info->sub_set) {
1109 #ifdef UNBOUND_DEBUG
1110 n =
1111 #else
1112 (void)
1113 #endif
1114 rbtree_delete(&ref->s->super_set, &lookup);
1115 log_assert(n != NULL); /* must have been present */
1116 if(!ref->s->reply_list && !ref->s->cb_list
1117 && ref->s->super_set.count == 0) {
1118 mesh->num_detached_states++;
1119 log_assert(mesh->num_detached_states +
1120 mesh->num_reply_states <= mesh->all.count);
1121 }
1122 }
1123 rbtree_init(&qstate->mesh_info->sub_set, &mesh_state_ref_compare);
1124 }
1125
mesh_add_sub(struct module_qstate * qstate,struct query_info * qinfo,uint16_t qflags,int prime,int valrec,struct module_qstate ** newq,struct mesh_state ** sub)1126 int mesh_add_sub(struct module_qstate* qstate, struct query_info* qinfo,
1127 uint16_t qflags, int prime, int valrec, struct module_qstate** newq,
1128 struct mesh_state** sub)
1129 {
1130 /* find it, if not, create it */
1131 struct mesh_area* mesh = qstate->env->mesh;
1132 *sub = mesh_area_find(mesh, NULL, qinfo, qflags,
1133 prime, valrec);
1134 if(mesh_detect_cycle_found(qstate, *sub)) {
1135 verbose(VERB_ALGO, "attach failed, cycle detected");
1136 return 0;
1137 }
1138 if(!*sub) {
1139 #ifdef UNBOUND_DEBUG
1140 struct rbnode_type* n;
1141 #endif
1142 /* create a new one */
1143 *sub = mesh_state_create(qstate->env, qinfo, NULL, qflags, prime,
1144 valrec);
1145 if(!*sub) {
1146 log_err("mesh_attach_sub: out of memory");
1147 return 0;
1148 }
1149 #ifdef UNBOUND_DEBUG
1150 n =
1151 #else
1152 (void)
1153 #endif
1154 rbtree_insert(&mesh->all, &(*sub)->node);
1155 log_assert(n != NULL);
1156 /* set detached (it is now) */
1157 mesh->num_detached_states++;
1158 /* set new query state to run */
1159 #ifdef UNBOUND_DEBUG
1160 n =
1161 #else
1162 (void)
1163 #endif
1164 rbtree_insert(&mesh->run, &(*sub)->run_node);
1165 log_assert(n != NULL);
1166 *newq = &(*sub)->s;
1167 } else
1168 *newq = NULL;
1169 return 1;
1170 }
1171
mesh_attach_sub(struct module_qstate * qstate,struct query_info * qinfo,uint16_t qflags,int prime,int valrec,struct module_qstate ** newq)1172 int mesh_attach_sub(struct module_qstate* qstate, struct query_info* qinfo,
1173 uint16_t qflags, int prime, int valrec, struct module_qstate** newq)
1174 {
1175 struct mesh_area* mesh = qstate->env->mesh;
1176 struct mesh_state* sub = NULL;
1177 int was_detached;
1178 if(!mesh_add_sub(qstate, qinfo, qflags, prime, valrec, newq, &sub))
1179 return 0;
1180 was_detached = (sub->super_set.count == 0);
1181 if(!mesh_state_attachment(qstate->mesh_info, sub))
1182 return 0;
1183 /* if it was a duplicate attachment, the count was not zero before */
1184 if(!sub->reply_list && !sub->cb_list && was_detached &&
1185 sub->super_set.count == 1) {
1186 /* it used to be detached, before this one got added */
1187 log_assert(mesh->num_detached_states > 0);
1188 mesh->num_detached_states--;
1189 }
1190 /* *newq will be run when inited after the current module stops */
1191 return 1;
1192 }
1193
mesh_state_attachment(struct mesh_state * super,struct mesh_state * sub)1194 int mesh_state_attachment(struct mesh_state* super, struct mesh_state* sub)
1195 {
1196 #ifdef UNBOUND_DEBUG
1197 struct rbnode_type* n;
1198 #endif
1199 struct mesh_state_ref* subref; /* points to sub, inserted in super */
1200 struct mesh_state_ref* superref; /* points to super, inserted in sub */
1201 if( !(subref = regional_alloc(super->s.region,
1202 sizeof(struct mesh_state_ref))) ||
1203 !(superref = regional_alloc(sub->s.region,
1204 sizeof(struct mesh_state_ref))) ) {
1205 log_err("mesh_state_attachment: out of memory");
1206 return 0;
1207 }
1208 superref->node.key = superref;
1209 superref->s = super;
1210 subref->node.key = subref;
1211 subref->s = sub;
1212 if(!rbtree_insert(&sub->super_set, &superref->node)) {
1213 /* this should not happen, iterator and validator do not
1214 * attach subqueries that are identical. */
1215 /* already attached, we are done, nothing todo.
1216 * since superref and subref already allocated in region,
1217 * we cannot free them */
1218 return 1;
1219 }
1220 #ifdef UNBOUND_DEBUG
1221 n =
1222 #else
1223 (void)
1224 #endif
1225 rbtree_insert(&super->sub_set, &subref->node);
1226 log_assert(n != NULL); /* we checked above if statement, the reverse
1227 administration should not fail now, unless they are out of sync */
1228 return 1;
1229 }
1230
1231 /**
1232 * callback results to mesh cb entry
1233 * @param m: mesh state to send it for.
1234 * @param rcode: if not 0, error code.
1235 * @param rep: reply to send (or NULL if rcode is set).
1236 * @param r: callback entry
1237 * @param start_time: the time to pass to callback functions, it is 0 or
1238 * a value from one of the packets if the mesh state had packets.
1239 */
1240 static void
mesh_do_callback(struct mesh_state * m,int rcode,struct reply_info * rep,struct mesh_cb * r,struct timeval * start_time)1241 mesh_do_callback(struct mesh_state* m, int rcode, struct reply_info* rep,
1242 struct mesh_cb* r, struct timeval* start_time)
1243 {
1244 int secure;
1245 char* reason = NULL;
1246 int was_ratelimited = m->s.was_ratelimited;
1247 /* bogus messages are not made into servfail, sec_status passed
1248 * to the callback function */
1249 if(rep && rep->security == sec_status_secure)
1250 secure = 1;
1251 else secure = 0;
1252 if(!rep && rcode == LDNS_RCODE_NOERROR)
1253 rcode = LDNS_RCODE_SERVFAIL;
1254 if(!rcode && (rep->security == sec_status_bogus ||
1255 rep->security == sec_status_secure_sentinel_fail)) {
1256 if(!(reason = errinf_to_str_bogus(&m->s)))
1257 rcode = LDNS_RCODE_SERVFAIL;
1258 }
1259 /* send the reply */
1260 if(rcode) {
1261 if(rcode == LDNS_RCODE_SERVFAIL) {
1262 if(!inplace_cb_reply_servfail_call(m->s.env, &m->s.qinfo, &m->s,
1263 rep, rcode, &r->edns, NULL, m->s.region, start_time))
1264 r->edns.opt_list_inplace_cb_out = NULL;
1265 } else {
1266 if(!inplace_cb_reply_call(m->s.env, &m->s.qinfo, &m->s, rep, rcode,
1267 &r->edns, NULL, m->s.region, start_time))
1268 r->edns.opt_list_inplace_cb_out = NULL;
1269 }
1270 fptr_ok(fptr_whitelist_mesh_cb(r->cb));
1271 (*r->cb)(r->cb_arg, rcode, r->buf, sec_status_unchecked, NULL,
1272 was_ratelimited);
1273 } else {
1274 size_t udp_size = r->edns.udp_size;
1275 sldns_buffer_clear(r->buf);
1276 r->edns.edns_version = EDNS_ADVERTISED_VERSION;
1277 r->edns.udp_size = EDNS_ADVERTISED_SIZE;
1278 r->edns.ext_rcode = 0;
1279 r->edns.bits &= EDNS_DO;
1280
1281 if(!inplace_cb_reply_call(m->s.env, &m->s.qinfo, &m->s, rep,
1282 LDNS_RCODE_NOERROR, &r->edns, NULL, m->s.region, start_time) ||
1283 !reply_info_answer_encode(&m->s.qinfo, rep, r->qid,
1284 r->qflags, r->buf, 0, 1,
1285 m->s.env->scratch, udp_size, &r->edns,
1286 (int)(r->edns.bits & EDNS_DO), secure))
1287 {
1288 fptr_ok(fptr_whitelist_mesh_cb(r->cb));
1289 (*r->cb)(r->cb_arg, LDNS_RCODE_SERVFAIL, r->buf,
1290 sec_status_unchecked, NULL, 0);
1291 } else {
1292 fptr_ok(fptr_whitelist_mesh_cb(r->cb));
1293 (*r->cb)(r->cb_arg, LDNS_RCODE_NOERROR, r->buf,
1294 rep->security, reason, was_ratelimited);
1295 }
1296 }
1297 free(reason);
1298 log_assert(m->s.env->mesh->num_reply_addrs > 0);
1299 m->s.env->mesh->num_reply_addrs--;
1300 }
1301
1302 static inline int
mesh_is_rpz_respip_tcponly_action(struct mesh_state const * m)1303 mesh_is_rpz_respip_tcponly_action(struct mesh_state const* m)
1304 {
1305 struct respip_action_info const* respip_info = m->s.respip_action_info;
1306 return respip_info == NULL
1307 ? 0
1308 : (respip_info->rpz_used
1309 && !respip_info->rpz_disabled
1310 && respip_info->action == respip_truncate);
1311 }
1312
1313 static inline int
mesh_is_udp(struct mesh_reply const * r)1314 mesh_is_udp(struct mesh_reply const* r) {
1315 return r->query_reply.c->type == comm_udp;
1316 }
1317
1318 /**
1319 * Send reply to mesh reply entry
1320 * @param m: mesh state to send it for.
1321 * @param rcode: if not 0, error code.
1322 * @param rep: reply to send (or NULL if rcode is set).
1323 * @param r: reply entry
1324 * @param r_buffer: buffer to use for reply entry.
1325 * @param prev: previous reply, already has its answer encoded in buffer.
1326 * @param prev_buffer: buffer for previous reply.
1327 */
1328 static void
mesh_send_reply(struct mesh_state * m,int rcode,struct reply_info * rep,struct mesh_reply * r,struct sldns_buffer * r_buffer,struct mesh_reply * prev,struct sldns_buffer * prev_buffer)1329 mesh_send_reply(struct mesh_state* m, int rcode, struct reply_info* rep,
1330 struct mesh_reply* r, struct sldns_buffer* r_buffer,
1331 struct mesh_reply* prev, struct sldns_buffer* prev_buffer)
1332 {
1333 struct timeval end_time;
1334 struct timeval duration;
1335 int secure;
1336 /* briefly set the replylist to null in case the
1337 * meshsendreply calls tcpreqinfo sendreply that
1338 * comm_point_drops because of size, and then the
1339 * null stops the mesh state remove and thus
1340 * reply_list modification and accounting */
1341 struct mesh_reply* rlist = m->reply_list;
1342
1343 /* rpz: apply actions */
1344 rcode = mesh_is_udp(r) && mesh_is_rpz_respip_tcponly_action(m)
1345 ? (rcode|BIT_TC) : rcode;
1346
1347 /* examine security status */
1348 if(m->s.env->need_to_validate && (!(r->qflags&BIT_CD) ||
1349 m->s.env->cfg->ignore_cd) && rep &&
1350 (rep->security <= sec_status_bogus ||
1351 rep->security == sec_status_secure_sentinel_fail)) {
1352 rcode = LDNS_RCODE_SERVFAIL;
1353 if(m->s.env->cfg->stat_extended)
1354 m->s.env->mesh->ans_bogus++;
1355 }
1356 if(rep && rep->security == sec_status_secure)
1357 secure = 1;
1358 else secure = 0;
1359 if(!rep && rcode == LDNS_RCODE_NOERROR)
1360 rcode = LDNS_RCODE_SERVFAIL;
1361 if(r->query_reply.c->use_h2) {
1362 r->query_reply.c->h2_stream = r->h2_stream;
1363 /* Mesh reply won't exist for long anymore. Make it impossible
1364 * for HTTP/2 stream to refer to mesh state, in case
1365 * connection gets cleanup before HTTP/2 stream close. */
1366 r->h2_stream->mesh_state = NULL;
1367 }
1368 /* send the reply */
1369 /* We don't reuse the encoded answer if:
1370 * - either the previous or current response has a local alias. We could
1371 * compare the alias records and still reuse the previous answer if they
1372 * are the same, but that would be complicated and error prone for the
1373 * relatively minor case. So we err on the side of safety.
1374 * - there are registered callback functions for the given rcode, as these
1375 * need to be called for each reply. */
1376 if(((rcode != LDNS_RCODE_SERVFAIL &&
1377 !m->s.env->inplace_cb_lists[inplace_cb_reply]) ||
1378 (rcode == LDNS_RCODE_SERVFAIL &&
1379 !m->s.env->inplace_cb_lists[inplace_cb_reply_servfail])) &&
1380 prev && prev_buffer && prev->qflags == r->qflags &&
1381 !prev->local_alias && !r->local_alias &&
1382 prev->edns.edns_present == r->edns.edns_present &&
1383 prev->edns.bits == r->edns.bits &&
1384 prev->edns.udp_size == r->edns.udp_size &&
1385 edns_opt_list_compare(prev->edns.opt_list_out, r->edns.opt_list_out) == 0 &&
1386 edns_opt_list_compare(prev->edns.opt_list_inplace_cb_out, r->edns.opt_list_inplace_cb_out) == 0
1387 ) {
1388 /* if the previous reply is identical to this one, fix ID */
1389 if(prev_buffer != r_buffer)
1390 sldns_buffer_copy(r_buffer, prev_buffer);
1391 sldns_buffer_write_at(r_buffer, 0, &r->qid, sizeof(uint16_t));
1392 sldns_buffer_write_at(r_buffer, 12, r->qname,
1393 m->s.qinfo.qname_len);
1394 m->reply_list = NULL;
1395 comm_point_send_reply(&r->query_reply);
1396 m->reply_list = rlist;
1397 } else if(rcode) {
1398 m->s.qinfo.qname = r->qname;
1399 m->s.qinfo.local_alias = r->local_alias;
1400 if(rcode == LDNS_RCODE_SERVFAIL) {
1401 if(!inplace_cb_reply_servfail_call(m->s.env, &m->s.qinfo, &m->s,
1402 rep, rcode, &r->edns, &r->query_reply, m->s.region, &r->start_time))
1403 r->edns.opt_list_inplace_cb_out = NULL;
1404 } else {
1405 if(!inplace_cb_reply_call(m->s.env, &m->s.qinfo, &m->s, rep, rcode,
1406 &r->edns, &r->query_reply, m->s.region, &r->start_time))
1407 r->edns.opt_list_inplace_cb_out = NULL;
1408 }
1409 /* Send along EDE BOGUS EDNS0 option when answer is bogus */
1410 if(m->s.env->cfg->ede && rcode == LDNS_RCODE_SERVFAIL &&
1411 m->s.env->need_to_validate && (!(r->qflags&BIT_CD) ||
1412 m->s.env->cfg->ignore_cd) && rep &&
1413 (rep->security <= sec_status_bogus ||
1414 rep->security == sec_status_secure_sentinel_fail)) {
1415 char *reason = m->s.env->cfg->val_log_level >= 2
1416 ? errinf_to_str_bogus(&m->s) : NULL;
1417
1418 /* During validation the EDE code can be received via two
1419 * code paths. One code path fills the reply_info EDE, and
1420 * the other fills it in the errinf_strlist. These paths
1421 * intersect at some points, but where is opaque due to
1422 * the complexity of the validator. At the time of writing
1423 * we make the choice to prefer the EDE from errinf_strlist
1424 * but a compelling reason to do otherwise is just as valid
1425 */
1426 sldns_ede_code reason_bogus = errinf_to_reason_bogus(&m->s);
1427 if ((reason_bogus == LDNS_EDE_DNSSEC_BOGUS &&
1428 rep->reason_bogus != LDNS_EDE_NONE) ||
1429 reason_bogus == LDNS_EDE_NONE) {
1430 reason_bogus = rep->reason_bogus;
1431 }
1432
1433 if(reason_bogus != LDNS_EDE_NONE) {
1434 edns_opt_list_append_ede(&r->edns.opt_list_out,
1435 m->s.region, reason_bogus, reason);
1436 }
1437 free(reason);
1438 }
1439 error_encode(r_buffer, rcode, &m->s.qinfo, r->qid,
1440 r->qflags, &r->edns);
1441 m->reply_list = NULL;
1442 comm_point_send_reply(&r->query_reply);
1443 m->reply_list = rlist;
1444 } else {
1445 size_t udp_size = r->edns.udp_size;
1446 r->edns.edns_version = EDNS_ADVERTISED_VERSION;
1447 r->edns.udp_size = EDNS_ADVERTISED_SIZE;
1448 r->edns.ext_rcode = 0;
1449 r->edns.bits &= EDNS_DO;
1450 m->s.qinfo.qname = r->qname;
1451 m->s.qinfo.local_alias = r->local_alias;
1452 if(!inplace_cb_reply_call(m->s.env, &m->s.qinfo, &m->s, rep,
1453 LDNS_RCODE_NOERROR, &r->edns, &r->query_reply, m->s.region, &r->start_time) ||
1454 !reply_info_answer_encode(&m->s.qinfo, rep, r->qid,
1455 r->qflags, r_buffer, 0, 1, m->s.env->scratch,
1456 udp_size, &r->edns, (int)(r->edns.bits & EDNS_DO),
1457 secure))
1458 {
1459 if(!inplace_cb_reply_servfail_call(m->s.env, &m->s.qinfo, &m->s,
1460 rep, LDNS_RCODE_SERVFAIL, &r->edns, &r->query_reply, m->s.region, &r->start_time))
1461 r->edns.opt_list_inplace_cb_out = NULL;
1462 /* internal server error (probably malloc failure) so no
1463 * EDE (RFC8914) needed */
1464 error_encode(r_buffer, LDNS_RCODE_SERVFAIL,
1465 &m->s.qinfo, r->qid, r->qflags, &r->edns);
1466 }
1467 m->reply_list = NULL;
1468 comm_point_send_reply(&r->query_reply);
1469 m->reply_list = rlist;
1470 }
1471 /* account */
1472 log_assert(m->s.env->mesh->num_reply_addrs > 0);
1473 m->s.env->mesh->num_reply_addrs--;
1474 end_time = *m->s.env->now_tv;
1475 timeval_subtract(&duration, &end_time, &r->start_time);
1476 verbose(VERB_ALGO, "query took " ARG_LL "d.%6.6d sec",
1477 (long long)duration.tv_sec, (int)duration.tv_usec);
1478 m->s.env->mesh->replies_sent++;
1479 timeval_add(&m->s.env->mesh->replies_sum_wait, &duration);
1480 timehist_insert(m->s.env->mesh->histogram, &duration);
1481 if(m->s.env->cfg->stat_extended) {
1482 uint16_t rc = FLAGS_GET_RCODE(sldns_buffer_read_u16_at(
1483 r_buffer, 2));
1484 if(secure) m->s.env->mesh->ans_secure++;
1485 m->s.env->mesh->ans_rcode[ rc ] ++;
1486 if(rc == 0 && LDNS_ANCOUNT(sldns_buffer_begin(r_buffer)) == 0)
1487 m->s.env->mesh->ans_nodata++;
1488 }
1489 /* Log reply sent */
1490 if(m->s.env->cfg->log_replies) {
1491 log_reply_info(NO_VERBOSE, &m->s.qinfo, &r->query_reply.addr,
1492 r->query_reply.addrlen, duration, 0, r_buffer);
1493 }
1494 }
1495
mesh_query_done(struct mesh_state * mstate)1496 void mesh_query_done(struct mesh_state* mstate)
1497 {
1498 struct mesh_reply* r;
1499 struct mesh_reply* prev = NULL;
1500 struct sldns_buffer* prev_buffer = NULL;
1501 struct mesh_cb* c;
1502 struct reply_info* rep = (mstate->s.return_msg?
1503 mstate->s.return_msg->rep:NULL);
1504 struct timeval tv = {0, 0};
1505 /* No need for the serve expired timer anymore; we are going to reply. */
1506 if(mstate->s.serve_expired_data) {
1507 comm_timer_delete(mstate->s.serve_expired_data->timer);
1508 mstate->s.serve_expired_data->timer = NULL;
1509 }
1510 if(mstate->s.return_rcode == LDNS_RCODE_SERVFAIL ||
1511 (rep && FLAGS_GET_RCODE(rep->flags) == LDNS_RCODE_SERVFAIL)) {
1512 /* we are SERVFAILing; check for expired answer here */
1513 mesh_serve_expired_callback(mstate);
1514 if((mstate->reply_list || mstate->cb_list)
1515 && mstate->s.env->cfg->log_servfail
1516 && !mstate->s.env->cfg->val_log_squelch) {
1517 char* err = errinf_to_str_servfail(&mstate->s);
1518 if(err)
1519 log_err("%s", err);
1520 free(err);
1521 }
1522 }
1523 for(r = mstate->reply_list; r; r = r->next) {
1524 tv = r->start_time;
1525
1526 /* if a response-ip address block has been stored the
1527 * information should be logged for each client. */
1528 if(mstate->s.respip_action_info &&
1529 mstate->s.respip_action_info->addrinfo) {
1530 respip_inform_print(mstate->s.respip_action_info,
1531 r->qname, mstate->s.qinfo.qtype,
1532 mstate->s.qinfo.qclass, r->local_alias,
1533 &r->query_reply);
1534 if(mstate->s.env->cfg->stat_extended &&
1535 mstate->s.respip_action_info->rpz_used) {
1536 if(mstate->s.respip_action_info->rpz_disabled)
1537 mstate->s.env->mesh->rpz_action[RPZ_DISABLED_ACTION]++;
1538 if(mstate->s.respip_action_info->rpz_cname_override)
1539 mstate->s.env->mesh->rpz_action[RPZ_CNAME_OVERRIDE_ACTION]++;
1540 else
1541 mstate->s.env->mesh->rpz_action[respip_action_to_rpz_action(
1542 mstate->s.respip_action_info->action)]++;
1543 }
1544 }
1545
1546 /* if this query is determined to be dropped during the
1547 * mesh processing, this is the point to take that action. */
1548 if(mstate->s.is_drop) {
1549 /* briefly set the reply_list to NULL, so that the
1550 * tcp req info cleanup routine that calls the mesh
1551 * to deregister the meshstate for it is not done
1552 * because the list is NULL and also accounting is not
1553 * done there, but instead we do that here. */
1554 struct mesh_reply* reply_list = mstate->reply_list;
1555 mstate->reply_list = NULL;
1556 comm_point_drop_reply(&r->query_reply);
1557 mstate->reply_list = reply_list;
1558 } else {
1559 struct sldns_buffer* r_buffer = r->query_reply.c->buffer;
1560 if(r->query_reply.c->tcp_req_info) {
1561 r_buffer = r->query_reply.c->tcp_req_info->spool_buffer;
1562 prev_buffer = NULL;
1563 }
1564 mesh_send_reply(mstate, mstate->s.return_rcode, rep,
1565 r, r_buffer, prev, prev_buffer);
1566 if(r->query_reply.c->tcp_req_info) {
1567 tcp_req_info_remove_mesh_state(r->query_reply.c->tcp_req_info, mstate);
1568 r_buffer = NULL;
1569 }
1570 prev = r;
1571 prev_buffer = r_buffer;
1572 }
1573 }
1574 if(mstate->reply_list) {
1575 mstate->reply_list = NULL;
1576 if(!mstate->reply_list && !mstate->cb_list) {
1577 /* was a reply state, not anymore */
1578 log_assert(mstate->s.env->mesh->num_reply_states > 0);
1579 mstate->s.env->mesh->num_reply_states--;
1580 }
1581 if(!mstate->reply_list && !mstate->cb_list &&
1582 mstate->super_set.count == 0)
1583 mstate->s.env->mesh->num_detached_states++;
1584 }
1585 mstate->replies_sent = 1;
1586 while((c = mstate->cb_list) != NULL) {
1587 /* take this cb off the list; so that the list can be
1588 * changed, eg. by adds from the callback routine */
1589 if(!mstate->reply_list && mstate->cb_list && !c->next) {
1590 /* was a reply state, not anymore */
1591 log_assert(mstate->s.env->mesh->num_reply_states > 0);
1592 mstate->s.env->mesh->num_reply_states--;
1593 }
1594 mstate->cb_list = c->next;
1595 if(!mstate->reply_list && !mstate->cb_list &&
1596 mstate->super_set.count == 0)
1597 mstate->s.env->mesh->num_detached_states++;
1598 mesh_do_callback(mstate, mstate->s.return_rcode, rep, c, &tv);
1599 }
1600 }
1601
mesh_walk_supers(struct mesh_area * mesh,struct mesh_state * mstate)1602 void mesh_walk_supers(struct mesh_area* mesh, struct mesh_state* mstate)
1603 {
1604 struct mesh_state_ref* ref;
1605 RBTREE_FOR(ref, struct mesh_state_ref*, &mstate->super_set)
1606 {
1607 /* make super runnable */
1608 (void)rbtree_insert(&mesh->run, &ref->s->run_node);
1609 /* callback the function to inform super of result */
1610 fptr_ok(fptr_whitelist_mod_inform_super(
1611 mesh->mods.mod[ref->s->s.curmod]->inform_super));
1612 (*mesh->mods.mod[ref->s->s.curmod]->inform_super)(&mstate->s,
1613 ref->s->s.curmod, &ref->s->s);
1614 /* copy state that is always relevant to super */
1615 copy_state_to_super(&mstate->s, ref->s->s.curmod, &ref->s->s);
1616 }
1617 }
1618
mesh_area_find(struct mesh_area * mesh,struct respip_client_info * cinfo,struct query_info * qinfo,uint16_t qflags,int prime,int valrec)1619 struct mesh_state* mesh_area_find(struct mesh_area* mesh,
1620 struct respip_client_info* cinfo, struct query_info* qinfo,
1621 uint16_t qflags, int prime, int valrec)
1622 {
1623 struct mesh_state key;
1624 struct mesh_state* result;
1625
1626 key.node.key = &key;
1627 key.s.is_priming = prime;
1628 key.s.is_valrec = valrec;
1629 key.s.qinfo = *qinfo;
1630 key.s.query_flags = qflags;
1631 /* We are searching for a similar mesh state when we DO want to
1632 * aggregate the state. Thus unique is set to NULL. (default when we
1633 * desire aggregation).*/
1634 key.unique = NULL;
1635 key.s.client_info = cinfo;
1636
1637 result = (struct mesh_state*)rbtree_search(&mesh->all, &key);
1638 return result;
1639 }
1640
mesh_state_add_cb(struct mesh_state * s,struct edns_data * edns,sldns_buffer * buf,mesh_cb_func_type cb,void * cb_arg,uint16_t qid,uint16_t qflags)1641 int mesh_state_add_cb(struct mesh_state* s, struct edns_data* edns,
1642 sldns_buffer* buf, mesh_cb_func_type cb, void* cb_arg,
1643 uint16_t qid, uint16_t qflags)
1644 {
1645 struct mesh_cb* r = regional_alloc(s->s.region,
1646 sizeof(struct mesh_cb));
1647 if(!r)
1648 return 0;
1649 r->buf = buf;
1650 log_assert(fptr_whitelist_mesh_cb(cb)); /* early failure ifmissing*/
1651 r->cb = cb;
1652 r->cb_arg = cb_arg;
1653 r->edns = *edns;
1654 if(edns->opt_list_in && !(r->edns.opt_list_in =
1655 edns_opt_copy_region(edns->opt_list_in, s->s.region)))
1656 return 0;
1657 if(edns->opt_list_out && !(r->edns.opt_list_out =
1658 edns_opt_copy_region(edns->opt_list_out, s->s.region)))
1659 return 0;
1660 if(edns->opt_list_inplace_cb_out && !(r->edns.opt_list_inplace_cb_out =
1661 edns_opt_copy_region(edns->opt_list_inplace_cb_out, s->s.region)))
1662 return 0;
1663 r->qid = qid;
1664 r->qflags = qflags;
1665 r->next = s->cb_list;
1666 s->cb_list = r;
1667 return 1;
1668
1669 }
1670
mesh_state_add_reply(struct mesh_state * s,struct edns_data * edns,struct comm_reply * rep,uint16_t qid,uint16_t qflags,const struct query_info * qinfo)1671 int mesh_state_add_reply(struct mesh_state* s, struct edns_data* edns,
1672 struct comm_reply* rep, uint16_t qid, uint16_t qflags,
1673 const struct query_info* qinfo)
1674 {
1675 struct mesh_reply* r = regional_alloc(s->s.region,
1676 sizeof(struct mesh_reply));
1677 if(!r)
1678 return 0;
1679 r->query_reply = *rep;
1680 r->edns = *edns;
1681 if(edns->opt_list_in && !(r->edns.opt_list_in =
1682 edns_opt_copy_region(edns->opt_list_in, s->s.region)))
1683 return 0;
1684 if(edns->opt_list_out && !(r->edns.opt_list_out =
1685 edns_opt_copy_region(edns->opt_list_out, s->s.region)))
1686 return 0;
1687 if(edns->opt_list_inplace_cb_out && !(r->edns.opt_list_inplace_cb_out =
1688 edns_opt_copy_region(edns->opt_list_inplace_cb_out, s->s.region)))
1689 return 0;
1690 r->qid = qid;
1691 r->qflags = qflags;
1692 r->start_time = *s->s.env->now_tv;
1693 r->next = s->reply_list;
1694 r->qname = regional_alloc_init(s->s.region, qinfo->qname,
1695 s->s.qinfo.qname_len);
1696 if(!r->qname)
1697 return 0;
1698 if(rep->c->use_h2)
1699 r->h2_stream = rep->c->h2_stream;
1700
1701 /* Data related to local alias stored in 'qinfo' (if any) is ephemeral
1702 * and can be different for different original queries (even if the
1703 * replaced query name is the same). So we need to make a deep copy
1704 * and store the copy for each reply info. */
1705 if(qinfo->local_alias) {
1706 struct packed_rrset_data* d;
1707 struct packed_rrset_data* dsrc;
1708 r->local_alias = regional_alloc_zero(s->s.region,
1709 sizeof(*qinfo->local_alias));
1710 if(!r->local_alias)
1711 return 0;
1712 r->local_alias->rrset = regional_alloc_init(s->s.region,
1713 qinfo->local_alias->rrset,
1714 sizeof(*qinfo->local_alias->rrset));
1715 if(!r->local_alias->rrset)
1716 return 0;
1717 dsrc = qinfo->local_alias->rrset->entry.data;
1718
1719 /* In the current implementation, a local alias must be
1720 * a single CNAME RR (see worker_handle_request()). */
1721 log_assert(!qinfo->local_alias->next && dsrc->count == 1 &&
1722 qinfo->local_alias->rrset->rk.type ==
1723 htons(LDNS_RR_TYPE_CNAME));
1724 /* we should make a local copy for the owner name of
1725 * the RRset */
1726 r->local_alias->rrset->rk.dname_len =
1727 qinfo->local_alias->rrset->rk.dname_len;
1728 r->local_alias->rrset->rk.dname = regional_alloc_init(
1729 s->s.region, qinfo->local_alias->rrset->rk.dname,
1730 qinfo->local_alias->rrset->rk.dname_len);
1731 if(!r->local_alias->rrset->rk.dname)
1732 return 0;
1733
1734 /* the rrset is not packed, like in the cache, but it is
1735 * individually allocated with an allocator from localzone. */
1736 d = regional_alloc_zero(s->s.region, sizeof(*d));
1737 if(!d)
1738 return 0;
1739 r->local_alias->rrset->entry.data = d;
1740 if(!rrset_insert_rr(s->s.region, d, dsrc->rr_data[0],
1741 dsrc->rr_len[0], dsrc->rr_ttl[0], "CNAME local alias"))
1742 return 0;
1743 } else
1744 r->local_alias = NULL;
1745
1746 s->reply_list = r;
1747 return 1;
1748 }
1749
1750 /* Extract the query info and flags from 'mstate' into '*qinfop' and '*qflags'.
1751 * Since this is only used for internal refetch of otherwise-expired answer,
1752 * we simply ignore the rare failure mode when memory allocation fails. */
1753 static void
mesh_copy_qinfo(struct mesh_state * mstate,struct query_info ** qinfop,uint16_t * qflags)1754 mesh_copy_qinfo(struct mesh_state* mstate, struct query_info** qinfop,
1755 uint16_t* qflags)
1756 {
1757 struct regional* region = mstate->s.env->scratch;
1758 struct query_info* qinfo;
1759
1760 qinfo = regional_alloc_init(region, &mstate->s.qinfo, sizeof(*qinfo));
1761 if(!qinfo)
1762 return;
1763 qinfo->qname = regional_alloc_init(region, qinfo->qname,
1764 qinfo->qname_len);
1765 if(!qinfo->qname)
1766 return;
1767 *qinfop = qinfo;
1768 *qflags = mstate->s.query_flags;
1769 }
1770
1771 /**
1772 * Continue processing the mesh state at another module.
1773 * Handles module to modules transfer of control.
1774 * Handles module finished.
1775 * @param mesh: the mesh area.
1776 * @param mstate: currently active mesh state.
1777 * Deleted if finished, calls _done and _supers to
1778 * send replies to clients and inform other mesh states.
1779 * This in turn may create additional runnable mesh states.
1780 * @param s: state at which the current module exited.
1781 * @param ev: the event sent to the module.
1782 * returned is the event to send to the next module.
1783 * @return true if continue processing at the new module.
1784 * false if not continued processing is needed.
1785 */
1786 static int
mesh_continue(struct mesh_area * mesh,struct mesh_state * mstate,enum module_ext_state s,enum module_ev * ev)1787 mesh_continue(struct mesh_area* mesh, struct mesh_state* mstate,
1788 enum module_ext_state s, enum module_ev* ev)
1789 {
1790 mstate->num_activated++;
1791 if(mstate->num_activated > MESH_MAX_ACTIVATION) {
1792 /* module is looping. Stop it. */
1793 log_err("internal error: looping module (%s) stopped",
1794 mesh->mods.mod[mstate->s.curmod]->name);
1795 log_query_info(NO_VERBOSE, "pass error for qstate",
1796 &mstate->s.qinfo);
1797 s = module_error;
1798 }
1799 if(s == module_wait_module || s == module_restart_next) {
1800 /* start next module */
1801 mstate->s.curmod++;
1802 if(mesh->mods.num == mstate->s.curmod) {
1803 log_err("Cannot pass to next module; at last module");
1804 log_query_info(VERB_QUERY, "pass error for qstate",
1805 &mstate->s.qinfo);
1806 mstate->s.curmod--;
1807 return mesh_continue(mesh, mstate, module_error, ev);
1808 }
1809 if(s == module_restart_next) {
1810 int curmod = mstate->s.curmod;
1811 for(; mstate->s.curmod < mesh->mods.num;
1812 mstate->s.curmod++) {
1813 fptr_ok(fptr_whitelist_mod_clear(
1814 mesh->mods.mod[mstate->s.curmod]->clear));
1815 (*mesh->mods.mod[mstate->s.curmod]->clear)
1816 (&mstate->s, mstate->s.curmod);
1817 mstate->s.minfo[mstate->s.curmod] = NULL;
1818 }
1819 mstate->s.curmod = curmod;
1820 }
1821 *ev = module_event_pass;
1822 return 1;
1823 }
1824 if(s == module_wait_subquery && mstate->sub_set.count == 0) {
1825 log_err("module cannot wait for subquery, subquery list empty");
1826 log_query_info(VERB_QUERY, "pass error for qstate",
1827 &mstate->s.qinfo);
1828 s = module_error;
1829 }
1830 if(s == module_error && mstate->s.return_rcode == LDNS_RCODE_NOERROR) {
1831 /* error is bad, handle pass back up below */
1832 mstate->s.return_rcode = LDNS_RCODE_SERVFAIL;
1833 }
1834 if(s == module_error) {
1835 mesh_query_done(mstate);
1836 mesh_walk_supers(mesh, mstate);
1837 mesh_state_delete(&mstate->s);
1838 return 0;
1839 }
1840 if(s == module_finished) {
1841 if(mstate->s.curmod == 0) {
1842 struct query_info* qinfo = NULL;
1843 uint16_t qflags;
1844 int rpz_p = 0;
1845
1846 mesh_query_done(mstate);
1847 mesh_walk_supers(mesh, mstate);
1848
1849 /* If the answer to the query needs to be refetched
1850 * from an external DNS server, we'll need to schedule
1851 * a prefetch after removing the current state, so
1852 * we need to make a copy of the query info here. */
1853 if(mstate->s.need_refetch) {
1854 mesh_copy_qinfo(mstate, &qinfo, &qflags);
1855 rpz_p = mstate->s.rpz_passthru;
1856 }
1857
1858 mesh_state_delete(&mstate->s);
1859 if(qinfo) {
1860 mesh_schedule_prefetch(mesh, qinfo, qflags,
1861 0, 1, rpz_p);
1862 }
1863 return 0;
1864 }
1865 /* pass along the locus of control */
1866 mstate->s.curmod --;
1867 *ev = module_event_moddone;
1868 return 1;
1869 }
1870 return 0;
1871 }
1872
mesh_run(struct mesh_area * mesh,struct mesh_state * mstate,enum module_ev ev,struct outbound_entry * e)1873 void mesh_run(struct mesh_area* mesh, struct mesh_state* mstate,
1874 enum module_ev ev, struct outbound_entry* e)
1875 {
1876 enum module_ext_state s;
1877 verbose(VERB_ALGO, "mesh_run: start");
1878 while(mstate) {
1879 /* run the module */
1880 fptr_ok(fptr_whitelist_mod_operate(
1881 mesh->mods.mod[mstate->s.curmod]->operate));
1882 (*mesh->mods.mod[mstate->s.curmod]->operate)
1883 (&mstate->s, ev, mstate->s.curmod, e);
1884
1885 /* examine results */
1886 mstate->s.reply = NULL;
1887 regional_free_all(mstate->s.env->scratch);
1888 s = mstate->s.ext_state[mstate->s.curmod];
1889 verbose(VERB_ALGO, "mesh_run: %s module exit state is %s",
1890 mesh->mods.mod[mstate->s.curmod]->name, strextstate(s));
1891 e = NULL;
1892 if(mesh_continue(mesh, mstate, s, &ev))
1893 continue;
1894
1895 /* run more modules */
1896 ev = module_event_pass;
1897 if(mesh->run.count > 0) {
1898 /* pop random element off the runnable tree */
1899 mstate = (struct mesh_state*)mesh->run.root->key;
1900 (void)rbtree_delete(&mesh->run, mstate);
1901 } else mstate = NULL;
1902 }
1903 if(verbosity >= VERB_ALGO) {
1904 mesh_stats(mesh, "mesh_run: end");
1905 mesh_log_list(mesh);
1906 }
1907 }
1908
1909 void
mesh_log_list(struct mesh_area * mesh)1910 mesh_log_list(struct mesh_area* mesh)
1911 {
1912 char buf[30];
1913 struct mesh_state* m;
1914 int num = 0;
1915 RBTREE_FOR(m, struct mesh_state*, &mesh->all) {
1916 snprintf(buf, sizeof(buf), "%d%s%s%s%s%s%s mod%d %s%s",
1917 num++, (m->s.is_priming)?"p":"", /* prime */
1918 (m->s.is_valrec)?"v":"", /* prime */
1919 (m->s.query_flags&BIT_RD)?"RD":"",
1920 (m->s.query_flags&BIT_CD)?"CD":"",
1921 (m->super_set.count==0)?"d":"", /* detached */
1922 (m->sub_set.count!=0)?"c":"", /* children */
1923 m->s.curmod, (m->reply_list)?"rep":"", /*hasreply*/
1924 (m->cb_list)?"cb":"" /* callbacks */
1925 );
1926 log_query_info(VERB_ALGO, buf, &m->s.qinfo);
1927 }
1928 }
1929
1930 void
mesh_stats(struct mesh_area * mesh,const char * str)1931 mesh_stats(struct mesh_area* mesh, const char* str)
1932 {
1933 verbose(VERB_DETAIL, "%s %u recursion states (%u with reply, "
1934 "%u detached), %u waiting replies, %u recursion replies "
1935 "sent, %d replies dropped, %d states jostled out",
1936 str, (unsigned)mesh->all.count,
1937 (unsigned)mesh->num_reply_states,
1938 (unsigned)mesh->num_detached_states,
1939 (unsigned)mesh->num_reply_addrs,
1940 (unsigned)mesh->replies_sent,
1941 (unsigned)mesh->stats_dropped,
1942 (unsigned)mesh->stats_jostled);
1943 if(mesh->replies_sent > 0) {
1944 struct timeval avg;
1945 timeval_divide(&avg, &mesh->replies_sum_wait,
1946 mesh->replies_sent);
1947 log_info("average recursion processing time "
1948 ARG_LL "d.%6.6d sec",
1949 (long long)avg.tv_sec, (int)avg.tv_usec);
1950 log_info("histogram of recursion processing times");
1951 timehist_log(mesh->histogram, "recursions");
1952 }
1953 }
1954
1955 void
mesh_stats_clear(struct mesh_area * mesh)1956 mesh_stats_clear(struct mesh_area* mesh)
1957 {
1958 if(!mesh)
1959 return;
1960 mesh->replies_sent = 0;
1961 mesh->replies_sum_wait.tv_sec = 0;
1962 mesh->replies_sum_wait.tv_usec = 0;
1963 mesh->stats_jostled = 0;
1964 mesh->stats_dropped = 0;
1965 timehist_clear(mesh->histogram);
1966 mesh->ans_secure = 0;
1967 mesh->ans_bogus = 0;
1968 mesh->ans_expired = 0;
1969 memset(&mesh->ans_rcode[0], 0, sizeof(size_t)*UB_STATS_RCODE_NUM);
1970 memset(&mesh->rpz_action[0], 0, sizeof(size_t)*UB_STATS_RPZ_ACTION_NUM);
1971 mesh->ans_nodata = 0;
1972 }
1973
1974 size_t
mesh_get_mem(struct mesh_area * mesh)1975 mesh_get_mem(struct mesh_area* mesh)
1976 {
1977 struct mesh_state* m;
1978 size_t s = sizeof(*mesh) + sizeof(struct timehist) +
1979 sizeof(struct th_buck)*mesh->histogram->num +
1980 sizeof(sldns_buffer) + sldns_buffer_capacity(mesh->qbuf_bak);
1981 RBTREE_FOR(m, struct mesh_state*, &mesh->all) {
1982 /* all, including m itself allocated in qstate region */
1983 s += regional_get_mem(m->s.region);
1984 }
1985 return s;
1986 }
1987
1988 int
mesh_detect_cycle(struct module_qstate * qstate,struct query_info * qinfo,uint16_t flags,int prime,int valrec)1989 mesh_detect_cycle(struct module_qstate* qstate, struct query_info* qinfo,
1990 uint16_t flags, int prime, int valrec)
1991 {
1992 struct mesh_area* mesh = qstate->env->mesh;
1993 struct mesh_state* dep_m = NULL;
1994 dep_m = mesh_area_find(mesh, NULL, qinfo, flags, prime, valrec);
1995 return mesh_detect_cycle_found(qstate, dep_m);
1996 }
1997
mesh_list_insert(struct mesh_state * m,struct mesh_state ** fp,struct mesh_state ** lp)1998 void mesh_list_insert(struct mesh_state* m, struct mesh_state** fp,
1999 struct mesh_state** lp)
2000 {
2001 /* insert as last element */
2002 m->prev = *lp;
2003 m->next = NULL;
2004 if(*lp)
2005 (*lp)->next = m;
2006 else *fp = m;
2007 *lp = m;
2008 }
2009
mesh_list_remove(struct mesh_state * m,struct mesh_state ** fp,struct mesh_state ** lp)2010 void mesh_list_remove(struct mesh_state* m, struct mesh_state** fp,
2011 struct mesh_state** lp)
2012 {
2013 if(m->next)
2014 m->next->prev = m->prev;
2015 else *lp = m->prev;
2016 if(m->prev)
2017 m->prev->next = m->next;
2018 else *fp = m->next;
2019 }
2020
mesh_state_remove_reply(struct mesh_area * mesh,struct mesh_state * m,struct comm_point * cp)2021 void mesh_state_remove_reply(struct mesh_area* mesh, struct mesh_state* m,
2022 struct comm_point* cp)
2023 {
2024 struct mesh_reply* n, *prev = NULL;
2025 n = m->reply_list;
2026 /* when in mesh_cleanup, it sets the reply_list to NULL, so that
2027 * there is no accounting twice */
2028 if(!n) return; /* nothing to remove, also no accounting needed */
2029 while(n) {
2030 if(n->query_reply.c == cp) {
2031 /* unlink it */
2032 if(prev) prev->next = n->next;
2033 else m->reply_list = n->next;
2034 /* delete it, but allocated in m region */
2035 log_assert(mesh->num_reply_addrs > 0);
2036 mesh->num_reply_addrs--;
2037
2038 /* prev = prev; */
2039 n = n->next;
2040 continue;
2041 }
2042 prev = n;
2043 n = n->next;
2044 }
2045 /* it was not detached (because it had a reply list), could be now */
2046 if(!m->reply_list && !m->cb_list
2047 && m->super_set.count == 0) {
2048 mesh->num_detached_states++;
2049 }
2050 /* if not replies any more in mstate, it is no longer a reply_state */
2051 if(!m->reply_list && !m->cb_list) {
2052 log_assert(mesh->num_reply_states > 0);
2053 mesh->num_reply_states--;
2054 }
2055 }
2056
2057
2058 static int
apply_respip_action(struct module_qstate * qstate,const struct query_info * qinfo,struct respip_client_info * cinfo,struct respip_action_info * actinfo,struct reply_info * rep,struct ub_packed_rrset_key ** alias_rrset,struct reply_info ** encode_repp,struct auth_zones * az)2059 apply_respip_action(struct module_qstate* qstate,
2060 const struct query_info* qinfo, struct respip_client_info* cinfo,
2061 struct respip_action_info* actinfo, struct reply_info* rep,
2062 struct ub_packed_rrset_key** alias_rrset,
2063 struct reply_info** encode_repp, struct auth_zones* az)
2064 {
2065 if(qinfo->qtype != LDNS_RR_TYPE_A &&
2066 qinfo->qtype != LDNS_RR_TYPE_AAAA &&
2067 qinfo->qtype != LDNS_RR_TYPE_ANY)
2068 return 1;
2069
2070 if(!respip_rewrite_reply(qinfo, cinfo, rep, encode_repp, actinfo,
2071 alias_rrset, 0, qstate->region, az, NULL))
2072 return 0;
2073
2074 /* xxx_deny actions mean dropping the reply, unless the original reply
2075 * was redirected to response-ip data. */
2076 if((actinfo->action == respip_deny ||
2077 actinfo->action == respip_inform_deny) &&
2078 *encode_repp == rep)
2079 *encode_repp = NULL;
2080
2081 return 1;
2082 }
2083
2084 void
mesh_serve_expired_callback(void * arg)2085 mesh_serve_expired_callback(void* arg)
2086 {
2087 struct mesh_state* mstate = (struct mesh_state*) arg;
2088 struct module_qstate* qstate = &mstate->s;
2089 struct mesh_reply* r;
2090 struct mesh_area* mesh = qstate->env->mesh;
2091 struct dns_msg* msg;
2092 struct mesh_cb* c;
2093 struct mesh_reply* prev = NULL;
2094 struct sldns_buffer* prev_buffer = NULL;
2095 struct sldns_buffer* r_buffer = NULL;
2096 struct reply_info* partial_rep = NULL;
2097 struct ub_packed_rrset_key* alias_rrset = NULL;
2098 struct reply_info* encode_rep = NULL;
2099 struct respip_action_info actinfo;
2100 struct query_info* lookup_qinfo = &qstate->qinfo;
2101 struct query_info qinfo_tmp;
2102 struct timeval tv = {0, 0};
2103 int must_validate = (!(qstate->query_flags&BIT_CD)
2104 || qstate->env->cfg->ignore_cd) && qstate->env->need_to_validate;
2105 if(!qstate->serve_expired_data) return;
2106 verbose(VERB_ALGO, "Serve expired: Trying to reply with expired data");
2107 comm_timer_delete(qstate->serve_expired_data->timer);
2108 qstate->serve_expired_data->timer = NULL;
2109 /* If is_drop or no_cache_lookup (modules that handle their own cache e.g.,
2110 * subnetmod) ignore stale data from the main cache. */
2111 if(qstate->no_cache_lookup || qstate->is_drop) {
2112 verbose(VERB_ALGO,
2113 "Serve expired: Not allowed to look into cache for stale");
2114 return;
2115 }
2116 /* The following while is used instead of the `goto lookup_cache`
2117 * like in the worker. */
2118 while(1) {
2119 fptr_ok(fptr_whitelist_serve_expired_lookup(
2120 qstate->serve_expired_data->get_cached_answer));
2121 msg = (*qstate->serve_expired_data->get_cached_answer)(qstate,
2122 lookup_qinfo);
2123 if(!msg)
2124 return;
2125 /* Reset these in case we pass a second time from here. */
2126 encode_rep = msg->rep;
2127 memset(&actinfo, 0, sizeof(actinfo));
2128 actinfo.action = respip_none;
2129 alias_rrset = NULL;
2130 if((mesh->use_response_ip || mesh->use_rpz) &&
2131 !partial_rep && !apply_respip_action(qstate, &qstate->qinfo,
2132 qstate->client_info, &actinfo, msg->rep, &alias_rrset, &encode_rep,
2133 qstate->env->auth_zones)) {
2134 return;
2135 } else if(partial_rep &&
2136 !respip_merge_cname(partial_rep, &qstate->qinfo, msg->rep,
2137 qstate->client_info, must_validate, &encode_rep, qstate->region,
2138 qstate->env->auth_zones)) {
2139 return;
2140 }
2141 if(!encode_rep || alias_rrset) {
2142 if(!encode_rep) {
2143 /* Needs drop */
2144 return;
2145 } else {
2146 /* A partial CNAME chain is found. */
2147 partial_rep = encode_rep;
2148 }
2149 }
2150 /* We've found a partial reply ending with an
2151 * alias. Replace the lookup qinfo for the
2152 * alias target and lookup the cache again to
2153 * (possibly) complete the reply. As we're
2154 * passing the "base" reply, there will be no
2155 * more alias chasing. */
2156 if(partial_rep) {
2157 memset(&qinfo_tmp, 0, sizeof(qinfo_tmp));
2158 get_cname_target(alias_rrset, &qinfo_tmp.qname,
2159 &qinfo_tmp.qname_len);
2160 if(!qinfo_tmp.qname) {
2161 log_err("Serve expired: unexpected: invalid answer alias");
2162 return;
2163 }
2164 qinfo_tmp.qtype = qstate->qinfo.qtype;
2165 qinfo_tmp.qclass = qstate->qinfo.qclass;
2166 lookup_qinfo = &qinfo_tmp;
2167 continue;
2168 }
2169 break;
2170 }
2171
2172 if(verbosity >= VERB_ALGO)
2173 log_dns_msg("Serve expired lookup", &qstate->qinfo, msg->rep);
2174
2175 for(r = mstate->reply_list; r; r = r->next) {
2176 tv = r->start_time;
2177
2178 /* If address info is returned, it means the action should be an
2179 * 'inform' variant and the information should be logged. */
2180 if(actinfo.addrinfo) {
2181 respip_inform_print(&actinfo, r->qname,
2182 qstate->qinfo.qtype, qstate->qinfo.qclass,
2183 r->local_alias, &r->query_reply);
2184
2185 if(qstate->env->cfg->stat_extended && actinfo.rpz_used) {
2186 if(actinfo.rpz_disabled)
2187 qstate->env->mesh->rpz_action[RPZ_DISABLED_ACTION]++;
2188 if(actinfo.rpz_cname_override)
2189 qstate->env->mesh->rpz_action[RPZ_CNAME_OVERRIDE_ACTION]++;
2190 else
2191 qstate->env->mesh->rpz_action[
2192 respip_action_to_rpz_action(actinfo.action)]++;
2193 }
2194 }
2195
2196 /* Add EDE Stale Answer (RCF8914). Ignore global ede as this is
2197 * warning instead of an error */
2198 if (r->edns.edns_present && qstate->env->cfg->ede_serve_expired &&
2199 qstate->env->cfg->ede) {
2200 edns_opt_list_append_ede(&r->edns.opt_list_out,
2201 mstate->s.region, LDNS_EDE_STALE_ANSWER, NULL);
2202 }
2203
2204 r_buffer = r->query_reply.c->buffer;
2205 if(r->query_reply.c->tcp_req_info)
2206 r_buffer = r->query_reply.c->tcp_req_info->spool_buffer;
2207 mesh_send_reply(mstate, LDNS_RCODE_NOERROR, msg->rep,
2208 r, r_buffer, prev, prev_buffer);
2209 if(r->query_reply.c->tcp_req_info)
2210 tcp_req_info_remove_mesh_state(r->query_reply.c->tcp_req_info, mstate);
2211 prev = r;
2212 prev_buffer = r_buffer;
2213
2214 /* Account for each reply sent. */
2215 mesh->ans_expired++;
2216
2217 }
2218 if(mstate->reply_list) {
2219 mstate->reply_list = NULL;
2220 if(!mstate->reply_list && !mstate->cb_list) {
2221 log_assert(mesh->num_reply_states > 0);
2222 mesh->num_reply_states--;
2223 if(mstate->super_set.count == 0) {
2224 mesh->num_detached_states++;
2225 }
2226 }
2227 }
2228 while((c = mstate->cb_list) != NULL) {
2229 /* take this cb off the list; so that the list can be
2230 * changed, eg. by adds from the callback routine */
2231 if(!mstate->reply_list && mstate->cb_list && !c->next) {
2232 /* was a reply state, not anymore */
2233 log_assert(qstate->env->mesh->num_reply_states > 0);
2234 qstate->env->mesh->num_reply_states--;
2235 }
2236 mstate->cb_list = c->next;
2237 if(!mstate->reply_list && !mstate->cb_list &&
2238 mstate->super_set.count == 0)
2239 qstate->env->mesh->num_detached_states++;
2240 mesh_do_callback(mstate, LDNS_RCODE_NOERROR, msg->rep, c, &tv);
2241 }
2242 }
2243
mesh_jostle_exceeded(struct mesh_area * mesh)2244 int mesh_jostle_exceeded(struct mesh_area* mesh)
2245 {
2246 if(mesh->all.count < mesh->max_reply_states)
2247 return 0;
2248 return 1;
2249 }
2250