1 /*
2 * iterator/iter_utils.c - iterative resolver module utility functions.
3 *
4 * Copyright (c) 2007, NLnet Labs. All rights reserved.
5 *
6 * This software is open source.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * Redistributions of source code must retain the above copyright notice,
13 * this list of conditions and the following disclaimer.
14 *
15 * Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
18 *
19 * Neither the name of the NLNET LABS nor the names of its contributors may
20 * be used to endorse or promote products derived from this software without
21 * specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 */
35
36 /**
37 * \file
38 *
39 * This file contains functions to assist the iterator module.
40 * Configuration options. Forward zones.
41 */
42 #include "config.h"
43 #include "iterator/iter_utils.h"
44 #include "iterator/iterator.h"
45 #include "iterator/iter_hints.h"
46 #include "iterator/iter_fwd.h"
47 #include "iterator/iter_donotq.h"
48 #include "iterator/iter_delegpt.h"
49 #include "iterator/iter_priv.h"
50 #include "services/cache/infra.h"
51 #include "services/cache/dns.h"
52 #include "services/cache/rrset.h"
53 #include "services/outside_network.h"
54 #include "util/net_help.h"
55 #include "util/module.h"
56 #include "util/log.h"
57 #include "util/config_file.h"
58 #include "util/regional.h"
59 #include "util/data/msgparse.h"
60 #include "util/data/dname.h"
61 #include "util/random.h"
62 #include "util/fptr_wlist.h"
63 #include "validator/val_anchor.h"
64 #include "validator/val_kcache.h"
65 #include "validator/val_kentry.h"
66 #include "validator/val_utils.h"
67 #include "validator/val_sigcrypt.h"
68 #include "sldns/sbuffer.h"
69 #include "sldns/str2wire.h"
70
71 /** time when nameserver glue is said to be 'recent' */
72 #define SUSPICION_RECENT_EXPIRY 86400
73
74 /** fillup fetch policy array */
75 static void
fetch_fill(struct iter_env * ie,const char * str)76 fetch_fill(struct iter_env* ie, const char* str)
77 {
78 char* s = (char*)str, *e;
79 int i;
80 for(i=0; i<ie->max_dependency_depth+1; i++) {
81 ie->target_fetch_policy[i] = strtol(s, &e, 10);
82 if(s == e)
83 fatal_exit("cannot parse fetch policy number %s", s);
84 s = e;
85 }
86 }
87
88 /** Read config string that represents the target fetch policy */
89 static int
read_fetch_policy(struct iter_env * ie,const char * str)90 read_fetch_policy(struct iter_env* ie, const char* str)
91 {
92 int count = cfg_count_numbers(str);
93 if(count < 1) {
94 log_err("Cannot parse target fetch policy: \"%s\"", str);
95 return 0;
96 }
97 ie->max_dependency_depth = count - 1;
98 ie->target_fetch_policy = (int*)calloc(
99 (size_t)ie->max_dependency_depth+1, sizeof(int));
100 if(!ie->target_fetch_policy) {
101 log_err("alloc fetch policy: out of memory");
102 return 0;
103 }
104 fetch_fill(ie, str);
105 return 1;
106 }
107
108 /** apply config caps whitelist items to name tree */
109 static int
caps_white_apply_cfg(rbtree_type * ntree,struct config_file * cfg)110 caps_white_apply_cfg(rbtree_type* ntree, struct config_file* cfg)
111 {
112 struct config_strlist* p;
113 for(p=cfg->caps_whitelist; p; p=p->next) {
114 struct name_tree_node* n;
115 size_t len;
116 uint8_t* nm = sldns_str2wire_dname(p->str, &len);
117 if(!nm) {
118 log_err("could not parse %s", p->str);
119 return 0;
120 }
121 n = (struct name_tree_node*)calloc(1, sizeof(*n));
122 if(!n) {
123 log_err("out of memory");
124 free(nm);
125 return 0;
126 }
127 n->node.key = n;
128 n->name = nm;
129 n->len = len;
130 n->labs = dname_count_labels(nm);
131 n->dclass = LDNS_RR_CLASS_IN;
132 if(!name_tree_insert(ntree, n, nm, len, n->labs, n->dclass)) {
133 /* duplicate element ignored, idempotent */
134 free(n->name);
135 free(n);
136 }
137 }
138 name_tree_init_parents(ntree);
139 return 1;
140 }
141
142 int
iter_apply_cfg(struct iter_env * iter_env,struct config_file * cfg)143 iter_apply_cfg(struct iter_env* iter_env, struct config_file* cfg)
144 {
145 int i;
146 /* target fetch policy */
147 if(!read_fetch_policy(iter_env, cfg->target_fetch_policy))
148 return 0;
149 for(i=0; i<iter_env->max_dependency_depth+1; i++)
150 verbose(VERB_QUERY, "target fetch policy for level %d is %d",
151 i, iter_env->target_fetch_policy[i]);
152
153 if(!iter_env->donotq)
154 iter_env->donotq = donotq_create();
155 if(!iter_env->donotq || !donotq_apply_cfg(iter_env->donotq, cfg)) {
156 log_err("Could not set donotqueryaddresses");
157 return 0;
158 }
159 if(!iter_env->priv)
160 iter_env->priv = priv_create();
161 if(!iter_env->priv || !priv_apply_cfg(iter_env->priv, cfg)) {
162 log_err("Could not set private addresses");
163 return 0;
164 }
165 if(cfg->caps_whitelist) {
166 if(!iter_env->caps_white)
167 iter_env->caps_white = rbtree_create(name_tree_compare);
168 if(!iter_env->caps_white || !caps_white_apply_cfg(
169 iter_env->caps_white, cfg)) {
170 log_err("Could not set capsforid whitelist");
171 return 0;
172 }
173
174 }
175 iter_env->supports_ipv6 = cfg->do_ip6;
176 iter_env->supports_ipv4 = cfg->do_ip4;
177 iter_env->outbound_msg_retry = cfg->outbound_msg_retry;
178 return 1;
179 }
180
181 /** filter out unsuitable targets
182 * @param iter_env: iterator environment with ipv6-support flag.
183 * @param env: module environment with infra cache.
184 * @param name: zone name
185 * @param namelen: length of name
186 * @param qtype: query type (host order).
187 * @param now: current time
188 * @param a: address in delegation point we are examining.
189 * @return an integer that signals the target suitability.
190 * as follows:
191 * -1: The address should be omitted from the list.
192 * Because:
193 * o The address is bogus (DNSSEC validation failure).
194 * o Listed as donotquery
195 * o is ipv6 but no ipv6 support (in operating system).
196 * o is ipv4 but no ipv4 support (in operating system).
197 * o is lame
198 * Otherwise, an rtt in milliseconds.
199 * 0 .. USEFUL_SERVER_TOP_TIMEOUT-1
200 * The roundtrip time timeout estimate. less than 2 minutes.
201 * Note that util/rtt.c has a MIN_TIMEOUT of 50 msec, thus
202 * values 0 .. 49 are not used, unless that is changed.
203 * USEFUL_SERVER_TOP_TIMEOUT
204 * This value exactly is given for unresponsive blacklisted.
205 * USEFUL_SERVER_TOP_TIMEOUT+1
206 * For non-blacklisted servers: huge timeout, but has traffic.
207 * USEFUL_SERVER_TOP_TIMEOUT*1 ..
208 * parent-side lame servers get this penalty. A dispreferential
209 * server. (lame in delegpt).
210 * USEFUL_SERVER_TOP_TIMEOUT*2 ..
211 * dnsseclame servers get penalty
212 * USEFUL_SERVER_TOP_TIMEOUT*3 ..
213 * recursion lame servers get penalty
214 * UNKNOWN_SERVER_NICENESS
215 * If no information is known about the server, this is
216 * returned. 376 msec or so.
217 * +BLACKLIST_PENALTY (of USEFUL_TOP_TIMEOUT*4) for dnssec failed IPs.
218 *
219 * When a final value is chosen that is dnsseclame ; dnsseclameness checking
220 * is turned off (so we do not discard the reply).
221 * When a final value is chosen that is recursionlame; RD bit is set on query.
222 * Because of the numbers this means recursionlame also have dnssec lameness
223 * checking turned off.
224 */
225 static int
iter_filter_unsuitable(struct iter_env * iter_env,struct module_env * env,uint8_t * name,size_t namelen,uint16_t qtype,time_t now,struct delegpt_addr * a)226 iter_filter_unsuitable(struct iter_env* iter_env, struct module_env* env,
227 uint8_t* name, size_t namelen, uint16_t qtype, time_t now,
228 struct delegpt_addr* a)
229 {
230 int rtt, lame, reclame, dnsseclame;
231 if(a->bogus)
232 return -1; /* address of server is bogus */
233 if(donotq_lookup(iter_env->donotq, &a->addr, a->addrlen)) {
234 log_addr(VERB_ALGO, "skip addr on the donotquery list",
235 &a->addr, a->addrlen);
236 return -1; /* server is on the donotquery list */
237 }
238 if(!iter_env->supports_ipv6 && addr_is_ip6(&a->addr, a->addrlen)) {
239 return -1; /* there is no ip6 available */
240 }
241 if(!iter_env->supports_ipv4 && !addr_is_ip6(&a->addr, a->addrlen)) {
242 return -1; /* there is no ip4 available */
243 }
244 /* check lameness - need zone , class info */
245 if(infra_get_lame_rtt(env->infra_cache, &a->addr, a->addrlen,
246 name, namelen, qtype, &lame, &dnsseclame, &reclame,
247 &rtt, now)) {
248 log_addr(VERB_ALGO, "servselect", &a->addr, a->addrlen);
249 verbose(VERB_ALGO, " rtt=%d%s%s%s%s", rtt,
250 lame?" LAME":"",
251 dnsseclame?" DNSSEC_LAME":"",
252 reclame?" REC_LAME":"",
253 a->lame?" ADDR_LAME":"");
254 if(lame)
255 return -1; /* server is lame */
256 else if(rtt >= USEFUL_SERVER_TOP_TIMEOUT)
257 /* server is unresponsive,
258 * we used to return TOP_TIMEOUT, but fairly useless,
259 * because if == TOP_TIMEOUT is dropped because
260 * blacklisted later, instead, remove it here, so
261 * other choices (that are not blacklisted) can be
262 * tried */
263 return -1;
264 /* select remainder from worst to best */
265 else if(reclame)
266 return rtt+USEFUL_SERVER_TOP_TIMEOUT*3; /* nonpref */
267 else if(dnsseclame || a->dnsseclame)
268 return rtt+USEFUL_SERVER_TOP_TIMEOUT*2; /* nonpref */
269 else if(a->lame)
270 return rtt+USEFUL_SERVER_TOP_TIMEOUT+1; /* nonpref */
271 else return rtt;
272 }
273 /* no server information present */
274 if(a->dnsseclame)
275 return UNKNOWN_SERVER_NICENESS+USEFUL_SERVER_TOP_TIMEOUT*2; /* nonpref */
276 else if(a->lame)
277 return USEFUL_SERVER_TOP_TIMEOUT+1+UNKNOWN_SERVER_NICENESS; /* nonpref */
278 return UNKNOWN_SERVER_NICENESS;
279 }
280
281 /** lookup RTT information, and also store fastest rtt (if any) */
282 static int
iter_fill_rtt(struct iter_env * iter_env,struct module_env * env,uint8_t * name,size_t namelen,uint16_t qtype,time_t now,struct delegpt * dp,int * best_rtt,struct sock_list * blacklist,size_t * num_suitable_results)283 iter_fill_rtt(struct iter_env* iter_env, struct module_env* env,
284 uint8_t* name, size_t namelen, uint16_t qtype, time_t now,
285 struct delegpt* dp, int* best_rtt, struct sock_list* blacklist,
286 size_t* num_suitable_results)
287 {
288 int got_it = 0;
289 struct delegpt_addr* a;
290 *num_suitable_results = 0;
291
292 if(dp->bogus)
293 return 0; /* NS bogus, all bogus, nothing found */
294 for(a=dp->result_list; a; a = a->next_result) {
295 a->sel_rtt = iter_filter_unsuitable(iter_env, env,
296 name, namelen, qtype, now, a);
297 if(a->sel_rtt != -1) {
298 if(sock_list_find(blacklist, &a->addr, a->addrlen))
299 a->sel_rtt += BLACKLIST_PENALTY;
300
301 if(!got_it) {
302 *best_rtt = a->sel_rtt;
303 got_it = 1;
304 } else if(a->sel_rtt < *best_rtt) {
305 *best_rtt = a->sel_rtt;
306 }
307 (*num_suitable_results)++;
308 }
309 }
310 return got_it;
311 }
312
313 /** compare two rtts, return -1, 0 or 1 */
314 static int
rtt_compare(const void * x,const void * y)315 rtt_compare(const void* x, const void* y)
316 {
317 if(*(int*)x == *(int*)y)
318 return 0;
319 if(*(int*)x > *(int*)y)
320 return 1;
321 return -1;
322 }
323
324 /** get RTT for the Nth fastest server */
325 static int
nth_rtt(struct delegpt_addr * result_list,size_t num_results,size_t n)326 nth_rtt(struct delegpt_addr* result_list, size_t num_results, size_t n)
327 {
328 int rtt_band;
329 size_t i;
330 int* rtt_list, *rtt_index;
331
332 if(num_results < 1 || n >= num_results) {
333 return -1;
334 }
335
336 rtt_list = calloc(num_results, sizeof(int));
337 if(!rtt_list) {
338 log_err("malloc failure: allocating rtt_list");
339 return -1;
340 }
341 rtt_index = rtt_list;
342
343 for(i=0; i<num_results && result_list; i++) {
344 if(result_list->sel_rtt != -1) {
345 *rtt_index = result_list->sel_rtt;
346 rtt_index++;
347 }
348 result_list=result_list->next_result;
349 }
350 qsort(rtt_list, num_results, sizeof(*rtt_list), rtt_compare);
351
352 log_assert(n > 0);
353 rtt_band = rtt_list[n-1];
354 free(rtt_list);
355
356 return rtt_band;
357 }
358
359 /** filter the address list, putting best targets at front,
360 * returns number of best targets (or 0, no suitable targets) */
361 static int
iter_filter_order(struct iter_env * iter_env,struct module_env * env,uint8_t * name,size_t namelen,uint16_t qtype,time_t now,struct delegpt * dp,int * selected_rtt,int open_target,struct sock_list * blacklist,time_t prefetch)362 iter_filter_order(struct iter_env* iter_env, struct module_env* env,
363 uint8_t* name, size_t namelen, uint16_t qtype, time_t now,
364 struct delegpt* dp, int* selected_rtt, int open_target,
365 struct sock_list* blacklist, time_t prefetch)
366 {
367 int got_num = 0, low_rtt = 0, swap_to_front, rtt_band = RTT_BAND, nth;
368 int alllame = 0;
369 size_t num_results;
370 struct delegpt_addr* a, *n, *prev=NULL;
371
372 /* fillup sel_rtt and find best rtt in the bunch */
373 got_num = iter_fill_rtt(iter_env, env, name, namelen, qtype, now, dp,
374 &low_rtt, blacklist, &num_results);
375 if(got_num == 0)
376 return 0;
377 if(low_rtt >= USEFUL_SERVER_TOP_TIMEOUT &&
378 /* If all missing (or not fully resolved) targets are lame,
379 * then use the remaining lame address. */
380 ((delegpt_count_missing_targets(dp, &alllame) > 0 && !alllame) ||
381 open_target > 0)) {
382 verbose(VERB_ALGO, "Bad choices, trying to get more choice");
383 return 0; /* we want more choice. The best choice is a bad one.
384 return 0 to force the caller to fetch more */
385 }
386
387 if(env->cfg->fast_server_permil != 0 && prefetch == 0 &&
388 num_results > env->cfg->fast_server_num &&
389 ub_random_max(env->rnd, 1000) < env->cfg->fast_server_permil) {
390 /* the query is not prefetch, but for a downstream client,
391 * there are more servers available then the fastest N we want
392 * to choose from. Limit our choice to the fastest servers. */
393 nth = nth_rtt(dp->result_list, num_results,
394 env->cfg->fast_server_num);
395 if(nth > 0) {
396 rtt_band = nth - low_rtt;
397 if(rtt_band > RTT_BAND)
398 rtt_band = RTT_BAND;
399 }
400 }
401
402 got_num = 0;
403 a = dp->result_list;
404 while(a) {
405 /* skip unsuitable targets */
406 if(a->sel_rtt == -1) {
407 prev = a;
408 a = a->next_result;
409 continue;
410 }
411 /* classify the server address and determine what to do */
412 swap_to_front = 0;
413 if(a->sel_rtt >= low_rtt && a->sel_rtt - low_rtt <= rtt_band) {
414 got_num++;
415 swap_to_front = 1;
416 } else if(a->sel_rtt<low_rtt && low_rtt-a->sel_rtt<=rtt_band) {
417 got_num++;
418 swap_to_front = 1;
419 }
420 /* swap to front if necessary, or move to next result */
421 if(swap_to_front && prev) {
422 n = a->next_result;
423 prev->next_result = n;
424 a->next_result = dp->result_list;
425 dp->result_list = a;
426 a = n;
427 } else {
428 prev = a;
429 a = a->next_result;
430 }
431 }
432 *selected_rtt = low_rtt;
433
434 if (env->cfg->prefer_ip6) {
435 int got_num6 = 0;
436 int low_rtt6 = 0;
437 int i;
438 int attempt = -1; /* filter to make sure addresses have
439 less attempts on them than the first, to force round
440 robin when all the IPv6 addresses fail */
441 int num4ok = 0; /* number ip4 at low attempt count */
442 int num4_lowrtt = 0;
443 prev = NULL;
444 a = dp->result_list;
445 for(i = 0; i < got_num; i++) {
446 if(!a) break; /* robustness */
447 swap_to_front = 0;
448 if(a->addr.ss_family != AF_INET6 && attempt == -1) {
449 /* if we only have ip4 at low attempt count,
450 * then ip6 is failing, and we need to
451 * select one of the remaining IPv4 addrs */
452 attempt = a->attempts;
453 num4ok++;
454 num4_lowrtt = a->sel_rtt;
455 } else if(a->addr.ss_family != AF_INET6 && attempt == a->attempts) {
456 num4ok++;
457 if(num4_lowrtt == 0 || a->sel_rtt < num4_lowrtt) {
458 num4_lowrtt = a->sel_rtt;
459 }
460 }
461 if(a->addr.ss_family == AF_INET6) {
462 if(attempt == -1) {
463 attempt = a->attempts;
464 } else if(a->attempts > attempt) {
465 break;
466 }
467 got_num6++;
468 swap_to_front = 1;
469 if(low_rtt6 == 0 || a->sel_rtt < low_rtt6) {
470 low_rtt6 = a->sel_rtt;
471 }
472 }
473 /* swap to front if IPv6, or move to next result */
474 if(swap_to_front && prev) {
475 n = a->next_result;
476 prev->next_result = n;
477 a->next_result = dp->result_list;
478 dp->result_list = a;
479 a = n;
480 } else {
481 prev = a;
482 a = a->next_result;
483 }
484 }
485 if(got_num6 > 0) {
486 got_num = got_num6;
487 *selected_rtt = low_rtt6;
488 } else if(num4ok > 0) {
489 got_num = num4ok;
490 *selected_rtt = num4_lowrtt;
491 }
492 } else if (env->cfg->prefer_ip4) {
493 int got_num4 = 0;
494 int low_rtt4 = 0;
495 int i;
496 int attempt = -1; /* filter to make sure addresses have
497 less attempts on them than the first, to force round
498 robin when all the IPv4 addresses fail */
499 int num6ok = 0; /* number ip6 at low attempt count */
500 int num6_lowrtt = 0;
501 prev = NULL;
502 a = dp->result_list;
503 for(i = 0; i < got_num; i++) {
504 if(!a) break; /* robustness */
505 swap_to_front = 0;
506 if(a->addr.ss_family != AF_INET && attempt == -1) {
507 /* if we only have ip6 at low attempt count,
508 * then ip4 is failing, and we need to
509 * select one of the remaining IPv6 addrs */
510 attempt = a->attempts;
511 num6ok++;
512 num6_lowrtt = a->sel_rtt;
513 } else if(a->addr.ss_family != AF_INET && attempt == a->attempts) {
514 num6ok++;
515 if(num6_lowrtt == 0 || a->sel_rtt < num6_lowrtt) {
516 num6_lowrtt = a->sel_rtt;
517 }
518 }
519 if(a->addr.ss_family == AF_INET) {
520 if(attempt == -1) {
521 attempt = a->attempts;
522 } else if(a->attempts > attempt) {
523 break;
524 }
525 got_num4++;
526 swap_to_front = 1;
527 if(low_rtt4 == 0 || a->sel_rtt < low_rtt4) {
528 low_rtt4 = a->sel_rtt;
529 }
530 }
531 /* swap to front if IPv4, or move to next result */
532 if(swap_to_front && prev) {
533 n = a->next_result;
534 prev->next_result = n;
535 a->next_result = dp->result_list;
536 dp->result_list = a;
537 a = n;
538 } else {
539 prev = a;
540 a = a->next_result;
541 }
542 }
543 if(got_num4 > 0) {
544 got_num = got_num4;
545 *selected_rtt = low_rtt4;
546 } else if(num6ok > 0) {
547 got_num = num6ok;
548 *selected_rtt = num6_lowrtt;
549 }
550 }
551 return got_num;
552 }
553
554 struct delegpt_addr*
iter_server_selection(struct iter_env * iter_env,struct module_env * env,struct delegpt * dp,uint8_t * name,size_t namelen,uint16_t qtype,int * dnssec_lame,int * chase_to_rd,int open_target,struct sock_list * blacklist,time_t prefetch)555 iter_server_selection(struct iter_env* iter_env,
556 struct module_env* env, struct delegpt* dp,
557 uint8_t* name, size_t namelen, uint16_t qtype, int* dnssec_lame,
558 int* chase_to_rd, int open_target, struct sock_list* blacklist,
559 time_t prefetch)
560 {
561 int sel;
562 int selrtt;
563 struct delegpt_addr* a, *prev;
564 int num = iter_filter_order(iter_env, env, name, namelen, qtype,
565 *env->now, dp, &selrtt, open_target, blacklist, prefetch);
566
567 if(num == 0)
568 return NULL;
569 verbose(VERB_ALGO, "selrtt %d", selrtt);
570 if(selrtt > BLACKLIST_PENALTY) {
571 if(selrtt-BLACKLIST_PENALTY > USEFUL_SERVER_TOP_TIMEOUT*3) {
572 verbose(VERB_ALGO, "chase to "
573 "blacklisted recursion lame server");
574 *chase_to_rd = 1;
575 }
576 if(selrtt-BLACKLIST_PENALTY > USEFUL_SERVER_TOP_TIMEOUT*2) {
577 verbose(VERB_ALGO, "chase to "
578 "blacklisted dnssec lame server");
579 *dnssec_lame = 1;
580 }
581 } else {
582 if(selrtt > USEFUL_SERVER_TOP_TIMEOUT*3) {
583 verbose(VERB_ALGO, "chase to recursion lame server");
584 *chase_to_rd = 1;
585 }
586 if(selrtt > USEFUL_SERVER_TOP_TIMEOUT*2) {
587 verbose(VERB_ALGO, "chase to dnssec lame server");
588 *dnssec_lame = 1;
589 }
590 if(selrtt == USEFUL_SERVER_TOP_TIMEOUT) {
591 verbose(VERB_ALGO, "chase to blacklisted lame server");
592 return NULL;
593 }
594 }
595
596 if(num == 1) {
597 a = dp->result_list;
598 if(++a->attempts < iter_env->outbound_msg_retry)
599 return a;
600 dp->result_list = a->next_result;
601 return a;
602 }
603
604 /* randomly select a target from the list */
605 log_assert(num > 1);
606 /* grab secure random number, to pick unexpected server.
607 * also we need it to be threadsafe. */
608 sel = ub_random_max(env->rnd, num);
609 a = dp->result_list;
610 prev = NULL;
611 while(sel > 0 && a) {
612 prev = a;
613 a = a->next_result;
614 sel--;
615 }
616 if(!a) /* robustness */
617 return NULL;
618 if(++a->attempts < iter_env->outbound_msg_retry)
619 return a;
620 /* remove it from the delegation point result list */
621 if(prev)
622 prev->next_result = a->next_result;
623 else dp->result_list = a->next_result;
624 return a;
625 }
626
627 struct dns_msg*
dns_alloc_msg(sldns_buffer * pkt,struct msg_parse * msg,struct regional * region)628 dns_alloc_msg(sldns_buffer* pkt, struct msg_parse* msg,
629 struct regional* region)
630 {
631 struct dns_msg* m = (struct dns_msg*)regional_alloc(region,
632 sizeof(struct dns_msg));
633 if(!m)
634 return NULL;
635 memset(m, 0, sizeof(*m));
636 if(!parse_create_msg(pkt, msg, NULL, &m->qinfo, &m->rep, region)) {
637 log_err("malloc failure: allocating incoming dns_msg");
638 return NULL;
639 }
640 return m;
641 }
642
643 struct dns_msg*
dns_copy_msg(struct dns_msg * from,struct regional * region)644 dns_copy_msg(struct dns_msg* from, struct regional* region)
645 {
646 struct dns_msg* m = (struct dns_msg*)regional_alloc(region,
647 sizeof(struct dns_msg));
648 if(!m)
649 return NULL;
650 m->qinfo = from->qinfo;
651 if(!(m->qinfo.qname = regional_alloc_init(region, from->qinfo.qname,
652 from->qinfo.qname_len)))
653 return NULL;
654 if(!(m->rep = reply_info_copy(from->rep, NULL, region)))
655 return NULL;
656 return m;
657 }
658
659 void
iter_dns_store(struct module_env * env,struct query_info * msgqinf,struct reply_info * msgrep,int is_referral,time_t leeway,int pside,struct regional * region,uint16_t flags,time_t qstarttime)660 iter_dns_store(struct module_env* env, struct query_info* msgqinf,
661 struct reply_info* msgrep, int is_referral, time_t leeway, int pside,
662 struct regional* region, uint16_t flags, time_t qstarttime)
663 {
664 if(!dns_cache_store(env, msgqinf, msgrep, is_referral, leeway,
665 pside, region, flags, qstarttime))
666 log_err("out of memory: cannot store data in cache");
667 }
668
669 int
iter_ns_probability(struct ub_randstate * rnd,int n,int m)670 iter_ns_probability(struct ub_randstate* rnd, int n, int m)
671 {
672 int sel;
673 if(n == m) /* 100% chance */
674 return 1;
675 /* we do not need secure random numbers here, but
676 * we do need it to be threadsafe, so we use this */
677 sel = ub_random_max(rnd, m);
678 return (sel < n);
679 }
680
681 /** detect dependency cycle for query and target */
682 static int
causes_cycle(struct module_qstate * qstate,uint8_t * name,size_t namelen,uint16_t t,uint16_t c)683 causes_cycle(struct module_qstate* qstate, uint8_t* name, size_t namelen,
684 uint16_t t, uint16_t c)
685 {
686 struct query_info qinf;
687 qinf.qname = name;
688 qinf.qname_len = namelen;
689 qinf.qtype = t;
690 qinf.qclass = c;
691 qinf.local_alias = NULL;
692 fptr_ok(fptr_whitelist_modenv_detect_cycle(
693 qstate->env->detect_cycle));
694 return (*qstate->env->detect_cycle)(qstate, &qinf,
695 (uint16_t)(BIT_RD|BIT_CD), qstate->is_priming,
696 qstate->is_valrec);
697 }
698
699 void
iter_mark_cycle_targets(struct module_qstate * qstate,struct delegpt * dp)700 iter_mark_cycle_targets(struct module_qstate* qstate, struct delegpt* dp)
701 {
702 struct delegpt_ns* ns;
703 for(ns = dp->nslist; ns; ns = ns->next) {
704 if(ns->resolved)
705 continue;
706 /* see if this ns as target causes dependency cycle */
707 if(causes_cycle(qstate, ns->name, ns->namelen,
708 LDNS_RR_TYPE_AAAA, qstate->qinfo.qclass) ||
709 causes_cycle(qstate, ns->name, ns->namelen,
710 LDNS_RR_TYPE_A, qstate->qinfo.qclass)) {
711 log_nametypeclass(VERB_QUERY, "skipping target due "
712 "to dependency cycle (harden-glue: no may "
713 "fix some of the cycles)",
714 ns->name, LDNS_RR_TYPE_A,
715 qstate->qinfo.qclass);
716 ns->resolved = 1;
717 }
718 }
719 }
720
721 void
iter_mark_pside_cycle_targets(struct module_qstate * qstate,struct delegpt * dp)722 iter_mark_pside_cycle_targets(struct module_qstate* qstate, struct delegpt* dp)
723 {
724 struct delegpt_ns* ns;
725 for(ns = dp->nslist; ns; ns = ns->next) {
726 if(ns->done_pside4 && ns->done_pside6)
727 continue;
728 /* see if this ns as target causes dependency cycle */
729 if(causes_cycle(qstate, ns->name, ns->namelen,
730 LDNS_RR_TYPE_A, qstate->qinfo.qclass)) {
731 log_nametypeclass(VERB_QUERY, "skipping target due "
732 "to dependency cycle", ns->name,
733 LDNS_RR_TYPE_A, qstate->qinfo.qclass);
734 ns->done_pside4 = 1;
735 }
736 if(causes_cycle(qstate, ns->name, ns->namelen,
737 LDNS_RR_TYPE_AAAA, qstate->qinfo.qclass)) {
738 log_nametypeclass(VERB_QUERY, "skipping target due "
739 "to dependency cycle", ns->name,
740 LDNS_RR_TYPE_AAAA, qstate->qinfo.qclass);
741 ns->done_pside6 = 1;
742 }
743 }
744 }
745
746 int
iter_dp_is_useless(struct query_info * qinfo,uint16_t qflags,struct delegpt * dp,int supports_ipv4,int supports_ipv6)747 iter_dp_is_useless(struct query_info* qinfo, uint16_t qflags,
748 struct delegpt* dp, int supports_ipv4, int supports_ipv6)
749 {
750 struct delegpt_ns* ns;
751 struct delegpt_addr* a;
752 /* check:
753 * o RD qflag is on.
754 * o no addresses are provided.
755 * o all NS items are required glue.
756 * OR
757 * o RD qflag is on.
758 * o no addresses are provided.
759 * o the query is for one of the nameservers in dp,
760 * and that nameserver is a glue-name for this dp.
761 */
762 if(!(qflags&BIT_RD))
763 return 0;
764 /* either available or unused targets,
765 * if they exist, the dp is not useless. */
766 for(a = dp->usable_list; a; a = a->next_usable) {
767 if(!addr_is_ip6(&a->addr, a->addrlen) && supports_ipv4)
768 return 0;
769 else if(addr_is_ip6(&a->addr, a->addrlen) && supports_ipv6)
770 return 0;
771 }
772 for(a = dp->result_list; a; a = a->next_result) {
773 if(!addr_is_ip6(&a->addr, a->addrlen) && supports_ipv4)
774 return 0;
775 else if(addr_is_ip6(&a->addr, a->addrlen) && supports_ipv6)
776 return 0;
777 }
778
779 /* see if query is for one of the nameservers, which is glue */
780 if( ((qinfo->qtype == LDNS_RR_TYPE_A && supports_ipv4) ||
781 (qinfo->qtype == LDNS_RR_TYPE_AAAA && supports_ipv6)) &&
782 dname_subdomain_c(qinfo->qname, dp->name) &&
783 delegpt_find_ns(dp, qinfo->qname, qinfo->qname_len))
784 return 1;
785
786 for(ns = dp->nslist; ns; ns = ns->next) {
787 if(ns->resolved) /* skip failed targets */
788 continue;
789 if(!dname_subdomain_c(ns->name, dp->name))
790 return 0; /* one address is not required glue */
791 }
792 return 1;
793 }
794
795 int
iter_qname_indicates_dnssec(struct module_env * env,struct query_info * qinfo)796 iter_qname_indicates_dnssec(struct module_env* env, struct query_info *qinfo)
797 {
798 struct trust_anchor* a;
799 if(!env || !env->anchors || !qinfo || !qinfo->qname)
800 return 0;
801 /* a trust anchor exists above the name? */
802 if((a=anchors_lookup(env->anchors, qinfo->qname, qinfo->qname_len,
803 qinfo->qclass))) {
804 if(a->numDS == 0 && a->numDNSKEY == 0) {
805 /* insecure trust point */
806 lock_basic_unlock(&a->lock);
807 return 0;
808 }
809 lock_basic_unlock(&a->lock);
810 return 1;
811 }
812 /* no trust anchor above it. */
813 return 0;
814 }
815
816 int
iter_indicates_dnssec(struct module_env * env,struct delegpt * dp,struct dns_msg * msg,uint16_t dclass)817 iter_indicates_dnssec(struct module_env* env, struct delegpt* dp,
818 struct dns_msg* msg, uint16_t dclass)
819 {
820 struct trust_anchor* a;
821 /* information not available, !env->anchors can be common */
822 if(!env || !env->anchors || !dp || !dp->name)
823 return 0;
824 /* a trust anchor exists with this name, RRSIGs expected */
825 if((a=anchor_find(env->anchors, dp->name, dp->namelabs, dp->namelen,
826 dclass))) {
827 if(a->numDS == 0 && a->numDNSKEY == 0) {
828 /* insecure trust point */
829 lock_basic_unlock(&a->lock);
830 return 0;
831 }
832 lock_basic_unlock(&a->lock);
833 return 1;
834 }
835 /* see if DS rrset was given, in AUTH section */
836 if(msg && msg->rep &&
837 reply_find_rrset_section_ns(msg->rep, dp->name, dp->namelen,
838 LDNS_RR_TYPE_DS, dclass))
839 return 1;
840 /* look in key cache */
841 if(env->key_cache) {
842 struct key_entry_key* kk = key_cache_obtain(env->key_cache,
843 dp->name, dp->namelen, dclass, env->scratch, *env->now);
844 if(kk) {
845 if(query_dname_compare(kk->name, dp->name) == 0) {
846 if(key_entry_isgood(kk) || key_entry_isbad(kk)) {
847 regional_free_all(env->scratch);
848 return 1;
849 } else if(key_entry_isnull(kk)) {
850 regional_free_all(env->scratch);
851 return 0;
852 }
853 }
854 regional_free_all(env->scratch);
855 }
856 }
857 return 0;
858 }
859
860 int
iter_msg_has_dnssec(struct dns_msg * msg)861 iter_msg_has_dnssec(struct dns_msg* msg)
862 {
863 size_t i;
864 if(!msg || !msg->rep)
865 return 0;
866 for(i=0; i<msg->rep->an_numrrsets + msg->rep->ns_numrrsets; i++) {
867 if(((struct packed_rrset_data*)msg->rep->rrsets[i]->
868 entry.data)->rrsig_count > 0)
869 return 1;
870 }
871 /* empty message has no DNSSEC info, with DNSSEC the reply is
872 * not empty (NSEC) */
873 return 0;
874 }
875
iter_msg_from_zone(struct dns_msg * msg,struct delegpt * dp,enum response_type type,uint16_t dclass)876 int iter_msg_from_zone(struct dns_msg* msg, struct delegpt* dp,
877 enum response_type type, uint16_t dclass)
878 {
879 if(!msg || !dp || !msg->rep || !dp->name)
880 return 0;
881 /* SOA RRset - always from reply zone */
882 if(reply_find_rrset_section_an(msg->rep, dp->name, dp->namelen,
883 LDNS_RR_TYPE_SOA, dclass) ||
884 reply_find_rrset_section_ns(msg->rep, dp->name, dp->namelen,
885 LDNS_RR_TYPE_SOA, dclass))
886 return 1;
887 if(type == RESPONSE_TYPE_REFERRAL) {
888 size_t i;
889 /* if it adds a single label, i.e. we expect .com,
890 * and referral to example.com. NS ... , then origin zone
891 * is .com. For a referral to sub.example.com. NS ... then
892 * we do not know, since example.com. may be in between. */
893 for(i=0; i<msg->rep->an_numrrsets+msg->rep->ns_numrrsets;
894 i++) {
895 struct ub_packed_rrset_key* s = msg->rep->rrsets[i];
896 if(ntohs(s->rk.type) == LDNS_RR_TYPE_NS &&
897 ntohs(s->rk.rrset_class) == dclass) {
898 int l = dname_count_labels(s->rk.dname);
899 if(l == dp->namelabs + 1 &&
900 dname_strict_subdomain(s->rk.dname,
901 l, dp->name, dp->namelabs))
902 return 1;
903 }
904 }
905 return 0;
906 }
907 log_assert(type==RESPONSE_TYPE_ANSWER || type==RESPONSE_TYPE_CNAME);
908 /* not a referral, and not lame delegation (upwards), so,
909 * any NS rrset must be from the zone itself */
910 if(reply_find_rrset_section_an(msg->rep, dp->name, dp->namelen,
911 LDNS_RR_TYPE_NS, dclass) ||
912 reply_find_rrset_section_ns(msg->rep, dp->name, dp->namelen,
913 LDNS_RR_TYPE_NS, dclass))
914 return 1;
915 /* a DNSKEY set is expected at the zone apex as well */
916 /* this is for 'minimal responses' for DNSKEYs */
917 if(reply_find_rrset_section_an(msg->rep, dp->name, dp->namelen,
918 LDNS_RR_TYPE_DNSKEY, dclass))
919 return 1;
920 return 0;
921 }
922
923 /**
924 * check equality of two rrsets
925 * @param k1: rrset
926 * @param k2: rrset
927 * @return true if equal
928 */
929 static int
rrset_equal(struct ub_packed_rrset_key * k1,struct ub_packed_rrset_key * k2)930 rrset_equal(struct ub_packed_rrset_key* k1, struct ub_packed_rrset_key* k2)
931 {
932 struct packed_rrset_data* d1 = (struct packed_rrset_data*)
933 k1->entry.data;
934 struct packed_rrset_data* d2 = (struct packed_rrset_data*)
935 k2->entry.data;
936 size_t i, t;
937 if(k1->rk.dname_len != k2->rk.dname_len ||
938 k1->rk.flags != k2->rk.flags ||
939 k1->rk.type != k2->rk.type ||
940 k1->rk.rrset_class != k2->rk.rrset_class ||
941 query_dname_compare(k1->rk.dname, k2->rk.dname) != 0)
942 return 0;
943 if( /* do not check ttl: d1->ttl != d2->ttl || */
944 d1->count != d2->count ||
945 d1->rrsig_count != d2->rrsig_count ||
946 d1->trust != d2->trust ||
947 d1->security != d2->security)
948 return 0;
949 t = d1->count + d1->rrsig_count;
950 for(i=0; i<t; i++) {
951 if(d1->rr_len[i] != d2->rr_len[i] ||
952 /* no ttl check: d1->rr_ttl[i] != d2->rr_ttl[i] ||*/
953 memcmp(d1->rr_data[i], d2->rr_data[i],
954 d1->rr_len[i]) != 0)
955 return 0;
956 }
957 return 1;
958 }
959
960 /** compare rrsets and sort canonically. Compares rrset name, type, class.
961 * return 0 if equal, +1 if x > y, and -1 if x < y.
962 */
963 static int
rrset_canonical_sort_cmp(const void * x,const void * y)964 rrset_canonical_sort_cmp(const void* x, const void* y)
965 {
966 struct ub_packed_rrset_key* rrx = *(struct ub_packed_rrset_key**)x;
967 struct ub_packed_rrset_key* rry = *(struct ub_packed_rrset_key**)y;
968 int r = dname_canonical_compare(rrx->rk.dname, rry->rk.dname);
969 if(r != 0)
970 return r;
971 if(rrx->rk.type != rry->rk.type) {
972 if(ntohs(rrx->rk.type) > ntohs(rry->rk.type))
973 return 1;
974 else return -1;
975 }
976 if(rrx->rk.rrset_class != rry->rk.rrset_class) {
977 if(ntohs(rrx->rk.rrset_class) > ntohs(rry->rk.rrset_class))
978 return 1;
979 else return -1;
980 }
981 return 0;
982 }
983
984 int
reply_equal(struct reply_info * p,struct reply_info * q,struct regional * region)985 reply_equal(struct reply_info* p, struct reply_info* q, struct regional* region)
986 {
987 size_t i;
988 struct ub_packed_rrset_key** sorted_p, **sorted_q;
989 if(p->flags != q->flags ||
990 p->qdcount != q->qdcount ||
991 /* do not check TTL, this may differ */
992 /*
993 p->ttl != q->ttl ||
994 p->prefetch_ttl != q->prefetch_ttl ||
995 */
996 p->security != q->security ||
997 p->an_numrrsets != q->an_numrrsets ||
998 p->ns_numrrsets != q->ns_numrrsets ||
999 p->ar_numrrsets != q->ar_numrrsets ||
1000 p->rrset_count != q->rrset_count)
1001 return 0;
1002 /* sort the rrsets in the authority and additional sections before
1003 * compare, the query and answer sections are ordered in the sequence
1004 * they should have (eg. one after the other for aliases). */
1005 sorted_p = (struct ub_packed_rrset_key**)regional_alloc_init(
1006 region, p->rrsets, sizeof(*sorted_p)*p->rrset_count);
1007 if(!sorted_p) return 0;
1008 log_assert(p->an_numrrsets + p->ns_numrrsets + p->ar_numrrsets <=
1009 p->rrset_count);
1010 qsort(sorted_p + p->an_numrrsets, p->ns_numrrsets,
1011 sizeof(*sorted_p), rrset_canonical_sort_cmp);
1012 qsort(sorted_p + p->an_numrrsets + p->ns_numrrsets, p->ar_numrrsets,
1013 sizeof(*sorted_p), rrset_canonical_sort_cmp);
1014
1015 sorted_q = (struct ub_packed_rrset_key**)regional_alloc_init(
1016 region, q->rrsets, sizeof(*sorted_q)*q->rrset_count);
1017 if(!sorted_q) {
1018 regional_free_all(region);
1019 return 0;
1020 }
1021 log_assert(q->an_numrrsets + q->ns_numrrsets + q->ar_numrrsets <=
1022 q->rrset_count);
1023 qsort(sorted_q + q->an_numrrsets, q->ns_numrrsets,
1024 sizeof(*sorted_q), rrset_canonical_sort_cmp);
1025 qsort(sorted_q + q->an_numrrsets + q->ns_numrrsets, q->ar_numrrsets,
1026 sizeof(*sorted_q), rrset_canonical_sort_cmp);
1027
1028 /* compare the rrsets */
1029 for(i=0; i<p->rrset_count; i++) {
1030 if(!rrset_equal(sorted_p[i], sorted_q[i])) {
1031 if(!rrset_canonical_equal(region, sorted_p[i],
1032 sorted_q[i])) {
1033 regional_free_all(region);
1034 return 0;
1035 }
1036 }
1037 }
1038 regional_free_all(region);
1039 return 1;
1040 }
1041
1042 void
caps_strip_reply(struct reply_info * rep)1043 caps_strip_reply(struct reply_info* rep)
1044 {
1045 size_t i;
1046 if(!rep) return;
1047 /* see if message is a referral, in which case the additional and
1048 * NS record cannot be removed */
1049 /* referrals have the AA flag unset (strict check, not elsewhere in
1050 * unbound, but for 0x20 this is very convenient). */
1051 if(!(rep->flags&BIT_AA))
1052 return;
1053 /* remove the additional section from the reply */
1054 if(rep->ar_numrrsets != 0) {
1055 verbose(VERB_ALGO, "caps fallback: removing additional section");
1056 rep->rrset_count -= rep->ar_numrrsets;
1057 rep->ar_numrrsets = 0;
1058 }
1059 /* is there an NS set in the authority section to remove? */
1060 /* the failure case (Cisco firewalls) only has one rrset in authsec */
1061 for(i=rep->an_numrrsets; i<rep->an_numrrsets+rep->ns_numrrsets; i++) {
1062 struct ub_packed_rrset_key* s = rep->rrsets[i];
1063 if(ntohs(s->rk.type) == LDNS_RR_TYPE_NS) {
1064 /* remove NS rrset and break from loop (loop limits
1065 * have changed) */
1066 /* move last rrset into this position (there is no
1067 * additional section any more) */
1068 verbose(VERB_ALGO, "caps fallback: removing NS rrset");
1069 if(i < rep->rrset_count-1)
1070 rep->rrsets[i]=rep->rrsets[rep->rrset_count-1];
1071 rep->rrset_count --;
1072 rep->ns_numrrsets --;
1073 break;
1074 }
1075 }
1076 }
1077
caps_failed_rcode(struct reply_info * rep)1078 int caps_failed_rcode(struct reply_info* rep)
1079 {
1080 return !(FLAGS_GET_RCODE(rep->flags) == LDNS_RCODE_NOERROR ||
1081 FLAGS_GET_RCODE(rep->flags) == LDNS_RCODE_NXDOMAIN);
1082 }
1083
1084 void
iter_store_parentside_rrset(struct module_env * env,struct ub_packed_rrset_key * rrset)1085 iter_store_parentside_rrset(struct module_env* env,
1086 struct ub_packed_rrset_key* rrset)
1087 {
1088 struct rrset_ref ref;
1089 rrset = packed_rrset_copy_alloc(rrset, env->alloc, *env->now);
1090 if(!rrset) {
1091 log_err("malloc failure in store_parentside_rrset");
1092 return;
1093 }
1094 rrset->rk.flags |= PACKED_RRSET_PARENT_SIDE;
1095 rrset->entry.hash = rrset_key_hash(&rrset->rk);
1096 ref.key = rrset;
1097 ref.id = rrset->id;
1098 /* ignore ret: if it was in the cache, ref updated */
1099 (void)rrset_cache_update(env->rrset_cache, &ref, env->alloc, *env->now);
1100 }
1101
1102 /** fetch NS record from reply, if any */
1103 static struct ub_packed_rrset_key*
reply_get_NS_rrset(struct reply_info * rep)1104 reply_get_NS_rrset(struct reply_info* rep)
1105 {
1106 size_t i;
1107 for(i=0; i<rep->rrset_count; i++) {
1108 if(rep->rrsets[i]->rk.type == htons(LDNS_RR_TYPE_NS)) {
1109 return rep->rrsets[i];
1110 }
1111 }
1112 return NULL;
1113 }
1114
1115 void
iter_store_parentside_NS(struct module_env * env,struct reply_info * rep)1116 iter_store_parentside_NS(struct module_env* env, struct reply_info* rep)
1117 {
1118 struct ub_packed_rrset_key* rrset = reply_get_NS_rrset(rep);
1119 if(rrset) {
1120 log_rrset_key(VERB_ALGO, "store parent-side NS", rrset);
1121 iter_store_parentside_rrset(env, rrset);
1122 }
1123 }
1124
iter_store_parentside_neg(struct module_env * env,struct query_info * qinfo,struct reply_info * rep)1125 void iter_store_parentside_neg(struct module_env* env,
1126 struct query_info* qinfo, struct reply_info* rep)
1127 {
1128 /* TTL: NS from referral in iq->deleg_msg,
1129 * or first RR from iq->response,
1130 * or servfail5secs if !iq->response */
1131 time_t ttl = NORR_TTL;
1132 struct ub_packed_rrset_key* neg;
1133 struct packed_rrset_data* newd;
1134 if(rep) {
1135 struct ub_packed_rrset_key* rrset = reply_get_NS_rrset(rep);
1136 if(!rrset && rep->rrset_count != 0) rrset = rep->rrsets[0];
1137 if(rrset) ttl = ub_packed_rrset_ttl(rrset);
1138 }
1139 /* create empty rrset to store */
1140 neg = (struct ub_packed_rrset_key*)regional_alloc(env->scratch,
1141 sizeof(struct ub_packed_rrset_key));
1142 if(!neg) {
1143 log_err("out of memory in store_parentside_neg");
1144 return;
1145 }
1146 memset(&neg->entry, 0, sizeof(neg->entry));
1147 neg->entry.key = neg;
1148 neg->rk.type = htons(qinfo->qtype);
1149 neg->rk.rrset_class = htons(qinfo->qclass);
1150 neg->rk.flags = 0;
1151 neg->rk.dname = regional_alloc_init(env->scratch, qinfo->qname,
1152 qinfo->qname_len);
1153 if(!neg->rk.dname) {
1154 log_err("out of memory in store_parentside_neg");
1155 return;
1156 }
1157 neg->rk.dname_len = qinfo->qname_len;
1158 neg->entry.hash = rrset_key_hash(&neg->rk);
1159 newd = (struct packed_rrset_data*)regional_alloc_zero(env->scratch,
1160 sizeof(struct packed_rrset_data) + sizeof(size_t) +
1161 sizeof(uint8_t*) + sizeof(time_t) + sizeof(uint16_t));
1162 if(!newd) {
1163 log_err("out of memory in store_parentside_neg");
1164 return;
1165 }
1166 neg->entry.data = newd;
1167 newd->ttl = ttl;
1168 /* entry must have one RR, otherwise not valid in cache.
1169 * put in one RR with empty rdata: those are ignored as nameserver */
1170 newd->count = 1;
1171 newd->rrsig_count = 0;
1172 newd->trust = rrset_trust_ans_noAA;
1173 newd->rr_len = (size_t*)((uint8_t*)newd +
1174 sizeof(struct packed_rrset_data));
1175 newd->rr_len[0] = 0 /* zero len rdata */ + sizeof(uint16_t);
1176 packed_rrset_ptr_fixup(newd);
1177 newd->rr_ttl[0] = newd->ttl;
1178 sldns_write_uint16(newd->rr_data[0], 0 /* zero len rdata */);
1179 /* store it */
1180 log_rrset_key(VERB_ALGO, "store parent-side negative", neg);
1181 iter_store_parentside_rrset(env, neg);
1182 }
1183
1184 int
iter_lookup_parent_NS_from_cache(struct module_env * env,struct delegpt * dp,struct regional * region,struct query_info * qinfo)1185 iter_lookup_parent_NS_from_cache(struct module_env* env, struct delegpt* dp,
1186 struct regional* region, struct query_info* qinfo)
1187 {
1188 struct ub_packed_rrset_key* akey;
1189 akey = rrset_cache_lookup(env->rrset_cache, dp->name,
1190 dp->namelen, LDNS_RR_TYPE_NS, qinfo->qclass,
1191 PACKED_RRSET_PARENT_SIDE, *env->now, 0);
1192 if(akey) {
1193 log_rrset_key(VERB_ALGO, "found parent-side NS in cache", akey);
1194 dp->has_parent_side_NS = 1;
1195 /* and mark the new names as lame */
1196 if(!delegpt_rrset_add_ns(dp, region, akey, 1)) {
1197 lock_rw_unlock(&akey->entry.lock);
1198 return 0;
1199 }
1200 lock_rw_unlock(&akey->entry.lock);
1201 }
1202 return 1;
1203 }
1204
iter_lookup_parent_glue_from_cache(struct module_env * env,struct delegpt * dp,struct regional * region,struct query_info * qinfo)1205 int iter_lookup_parent_glue_from_cache(struct module_env* env,
1206 struct delegpt* dp, struct regional* region, struct query_info* qinfo)
1207 {
1208 struct ub_packed_rrset_key* akey;
1209 struct delegpt_ns* ns;
1210 size_t num = delegpt_count_targets(dp);
1211 for(ns = dp->nslist; ns; ns = ns->next) {
1212 if(ns->cache_lookup_count > ITERATOR_NAME_CACHELOOKUP_MAX_PSIDE)
1213 continue;
1214 ns->cache_lookup_count++;
1215 /* get cached parentside A */
1216 akey = rrset_cache_lookup(env->rrset_cache, ns->name,
1217 ns->namelen, LDNS_RR_TYPE_A, qinfo->qclass,
1218 PACKED_RRSET_PARENT_SIDE, *env->now, 0);
1219 if(akey) {
1220 log_rrset_key(VERB_ALGO, "found parent-side", akey);
1221 ns->done_pside4 = 1;
1222 /* a negative-cache-element has no addresses it adds */
1223 if(!delegpt_add_rrset_A(dp, region, akey, 1, NULL))
1224 log_err("malloc failure in lookup_parent_glue");
1225 lock_rw_unlock(&akey->entry.lock);
1226 }
1227 /* get cached parentside AAAA */
1228 akey = rrset_cache_lookup(env->rrset_cache, ns->name,
1229 ns->namelen, LDNS_RR_TYPE_AAAA, qinfo->qclass,
1230 PACKED_RRSET_PARENT_SIDE, *env->now, 0);
1231 if(akey) {
1232 log_rrset_key(VERB_ALGO, "found parent-side", akey);
1233 ns->done_pside6 = 1;
1234 /* a negative-cache-element has no addresses it adds */
1235 if(!delegpt_add_rrset_AAAA(dp, region, akey, 1, NULL))
1236 log_err("malloc failure in lookup_parent_glue");
1237 lock_rw_unlock(&akey->entry.lock);
1238 }
1239 }
1240 /* see if new (but lame) addresses have become available */
1241 return delegpt_count_targets(dp) != num;
1242 }
1243
1244 int
iter_get_next_root(struct iter_hints * hints,struct iter_forwards * fwd,uint16_t * c)1245 iter_get_next_root(struct iter_hints* hints, struct iter_forwards* fwd,
1246 uint16_t* c)
1247 {
1248 uint16_t c1 = *c, c2 = *c;
1249 int r1 = hints_next_root(hints, &c1);
1250 int r2 = forwards_next_root(fwd, &c2);
1251 if(!r1 && !r2) /* got none, end of list */
1252 return 0;
1253 else if(!r1) /* got one, return that */
1254 *c = c2;
1255 else if(!r2)
1256 *c = c1;
1257 else if(c1 < c2) /* got both take smallest */
1258 *c = c1;
1259 else *c = c2;
1260 return 1;
1261 }
1262
1263 void
iter_scrub_ds(struct dns_msg * msg,struct ub_packed_rrset_key * ns,uint8_t * z)1264 iter_scrub_ds(struct dns_msg* msg, struct ub_packed_rrset_key* ns, uint8_t* z)
1265 {
1266 /* Only the DS record for the delegation itself is expected.
1267 * We allow DS for everything between the bailiwick and the
1268 * zonecut, thus DS records must be at or above the zonecut.
1269 * And the DS records must be below the server authority zone.
1270 * The answer section is already scrubbed. */
1271 size_t i = msg->rep->an_numrrsets;
1272 while(i < (msg->rep->an_numrrsets + msg->rep->ns_numrrsets)) {
1273 struct ub_packed_rrset_key* s = msg->rep->rrsets[i];
1274 if(ntohs(s->rk.type) == LDNS_RR_TYPE_DS &&
1275 (!ns || !dname_subdomain_c(ns->rk.dname, s->rk.dname)
1276 || query_dname_compare(z, s->rk.dname) == 0)) {
1277 log_nametypeclass(VERB_ALGO, "removing irrelevant DS",
1278 s->rk.dname, ntohs(s->rk.type),
1279 ntohs(s->rk.rrset_class));
1280 memmove(msg->rep->rrsets+i, msg->rep->rrsets+i+1,
1281 sizeof(struct ub_packed_rrset_key*) *
1282 (msg->rep->rrset_count-i-1));
1283 msg->rep->ns_numrrsets--;
1284 msg->rep->rrset_count--;
1285 /* stay at same i, but new record */
1286 continue;
1287 }
1288 i++;
1289 }
1290 }
1291
1292 void
iter_scrub_nxdomain(struct dns_msg * msg)1293 iter_scrub_nxdomain(struct dns_msg* msg)
1294 {
1295 if(msg->rep->an_numrrsets == 0)
1296 return;
1297
1298 memmove(msg->rep->rrsets, msg->rep->rrsets+msg->rep->an_numrrsets,
1299 sizeof(struct ub_packed_rrset_key*) *
1300 (msg->rep->rrset_count-msg->rep->an_numrrsets));
1301 msg->rep->rrset_count -= msg->rep->an_numrrsets;
1302 msg->rep->an_numrrsets = 0;
1303 }
1304
iter_dec_attempts(struct delegpt * dp,int d,int outbound_msg_retry)1305 void iter_dec_attempts(struct delegpt* dp, int d, int outbound_msg_retry)
1306 {
1307 struct delegpt_addr* a;
1308 for(a=dp->target_list; a; a = a->next_target) {
1309 if(a->attempts >= outbound_msg_retry) {
1310 /* add back to result list */
1311 a->next_result = dp->result_list;
1312 dp->result_list = a;
1313 }
1314 if(a->attempts > d)
1315 a->attempts -= d;
1316 else a->attempts = 0;
1317 }
1318 }
1319
iter_merge_retry_counts(struct delegpt * dp,struct delegpt * old,int outbound_msg_retry)1320 void iter_merge_retry_counts(struct delegpt* dp, struct delegpt* old,
1321 int outbound_msg_retry)
1322 {
1323 struct delegpt_addr* a, *o, *prev;
1324 for(a=dp->target_list; a; a = a->next_target) {
1325 o = delegpt_find_addr(old, &a->addr, a->addrlen);
1326 if(o) {
1327 log_addr(VERB_ALGO, "copy attempt count previous dp",
1328 &a->addr, a->addrlen);
1329 a->attempts = o->attempts;
1330 }
1331 }
1332 prev = NULL;
1333 a = dp->usable_list;
1334 while(a) {
1335 if(a->attempts >= outbound_msg_retry) {
1336 log_addr(VERB_ALGO, "remove from usable list dp",
1337 &a->addr, a->addrlen);
1338 /* remove from result list */
1339 if(prev)
1340 prev->next_usable = a->next_usable;
1341 else dp->usable_list = a->next_usable;
1342 /* prev stays the same */
1343 a = a->next_usable;
1344 continue;
1345 }
1346 prev = a;
1347 a = a->next_usable;
1348 }
1349 }
1350
1351 int
iter_ds_toolow(struct dns_msg * msg,struct delegpt * dp)1352 iter_ds_toolow(struct dns_msg* msg, struct delegpt* dp)
1353 {
1354 /* if for query example.com, there is example.com SOA or a subdomain
1355 * of example.com, then we are too low and need to fetch NS. */
1356 size_t i;
1357 /* if we have a DNAME or CNAME we are probably wrong */
1358 /* if we have a qtype DS in the answer section, its fine */
1359 for(i=0; i < msg->rep->an_numrrsets; i++) {
1360 struct ub_packed_rrset_key* s = msg->rep->rrsets[i];
1361 if(ntohs(s->rk.type) == LDNS_RR_TYPE_DNAME ||
1362 ntohs(s->rk.type) == LDNS_RR_TYPE_CNAME) {
1363 /* not the right answer, maybe too low, check the
1364 * RRSIG signer name (if there is any) for a hint
1365 * that it is from the dp zone anyway */
1366 uint8_t* sname;
1367 size_t slen;
1368 val_find_rrset_signer(s, &sname, &slen);
1369 if(sname && query_dname_compare(dp->name, sname)==0)
1370 return 0; /* it is fine, from the right dp */
1371 return 1;
1372 }
1373 if(ntohs(s->rk.type) == LDNS_RR_TYPE_DS)
1374 return 0; /* fine, we have a DS record */
1375 }
1376 for(i=msg->rep->an_numrrsets;
1377 i < msg->rep->an_numrrsets + msg->rep->ns_numrrsets; i++) {
1378 struct ub_packed_rrset_key* s = msg->rep->rrsets[i];
1379 if(ntohs(s->rk.type) == LDNS_RR_TYPE_SOA) {
1380 if(dname_subdomain_c(s->rk.dname, msg->qinfo.qname))
1381 return 1; /* point is too low */
1382 if(query_dname_compare(s->rk.dname, dp->name)==0)
1383 return 0; /* right dp */
1384 }
1385 if(ntohs(s->rk.type) == LDNS_RR_TYPE_NSEC ||
1386 ntohs(s->rk.type) == LDNS_RR_TYPE_NSEC3) {
1387 uint8_t* sname;
1388 size_t slen;
1389 val_find_rrset_signer(s, &sname, &slen);
1390 if(sname && query_dname_compare(dp->name, sname)==0)
1391 return 0; /* it is fine, from the right dp */
1392 return 1;
1393 }
1394 }
1395 /* we do not know */
1396 return 1;
1397 }
1398
iter_dp_cangodown(struct query_info * qinfo,struct delegpt * dp)1399 int iter_dp_cangodown(struct query_info* qinfo, struct delegpt* dp)
1400 {
1401 /* no delegation point, do not see how we can go down,
1402 * robust check, it should really exist */
1403 if(!dp) return 0;
1404
1405 /* see if dp equals the qname, then we cannot go down further */
1406 if(query_dname_compare(qinfo->qname, dp->name) == 0)
1407 return 0;
1408 /* if dp is one label above the name we also cannot go down further */
1409 if(dname_count_labels(qinfo->qname) == dp->namelabs+1)
1410 return 0;
1411 return 1;
1412 }
1413
1414 int
iter_stub_fwd_no_cache(struct module_qstate * qstate,struct query_info * qinf,uint8_t ** retdpname,size_t * retdpnamelen)1415 iter_stub_fwd_no_cache(struct module_qstate *qstate, struct query_info *qinf,
1416 uint8_t** retdpname, size_t* retdpnamelen)
1417 {
1418 struct iter_hints_stub *stub;
1419 struct delegpt *dp;
1420
1421 /* Check for stub. */
1422 stub = hints_lookup_stub(qstate->env->hints, qinf->qname,
1423 qinf->qclass, NULL);
1424 dp = forwards_lookup(qstate->env->fwds, qinf->qname, qinf->qclass);
1425
1426 /* see if forward or stub is more pertinent */
1427 if(stub && stub->dp && dp) {
1428 if(dname_strict_subdomain(dp->name, dp->namelabs,
1429 stub->dp->name, stub->dp->namelabs)) {
1430 stub = NULL; /* ignore stub, forward is lower */
1431 } else {
1432 dp = NULL; /* ignore forward, stub is lower */
1433 }
1434 }
1435
1436 /* check stub */
1437 if (stub != NULL && stub->dp != NULL) {
1438 if(stub->dp->no_cache) {
1439 char qname[255+1];
1440 char dpname[255+1];
1441 dname_str(qinf->qname, qname);
1442 dname_str(stub->dp->name, dpname);
1443 verbose(VERB_ALGO, "stub for %s %s has no_cache", qname, dpname);
1444 }
1445 if(retdpname) {
1446 *retdpname = stub->dp->name;
1447 *retdpnamelen = stub->dp->namelen;
1448 }
1449 return (stub->dp->no_cache);
1450 }
1451
1452 /* Check for forward. */
1453 if (dp) {
1454 if(dp->no_cache) {
1455 char qname[255+1];
1456 char dpname[255+1];
1457 dname_str(qinf->qname, qname);
1458 dname_str(dp->name, dpname);
1459 verbose(VERB_ALGO, "forward for %s %s has no_cache", qname, dpname);
1460 }
1461 if(retdpname) {
1462 *retdpname = dp->name;
1463 *retdpnamelen = dp->namelen;
1464 }
1465 return (dp->no_cache);
1466 }
1467 if(retdpname) {
1468 *retdpname = NULL;
1469 *retdpnamelen = 0;
1470 }
1471 return 0;
1472 }
1473
iterator_set_ip46_support(struct module_stack * mods,struct module_env * env,struct outside_network * outnet)1474 void iterator_set_ip46_support(struct module_stack* mods,
1475 struct module_env* env, struct outside_network* outnet)
1476 {
1477 int m = modstack_find(mods, "iterator");
1478 struct iter_env* ie = NULL;
1479 if(m == -1)
1480 return;
1481 ie = (struct iter_env*)env->modinfo[m];
1482 if(outnet->pending == NULL)
1483 return; /* we are in testbound, no rbtree for UDP */
1484 if(outnet->num_ip4 == 0)
1485 ie->supports_ipv4 = 0;
1486 if(outnet->num_ip6 == 0)
1487 ie->supports_ipv6 = 0;
1488 }
1489