1 /*  Copyright (C) 2014-2017 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
2  *  SPDX-License-Identifier: GPL-3.0-or-later
3  */
4 
5 #include <ctype.h>
6 #include <inttypes.h>
7 #include <stdio.h>
8 #include <fcntl.h>
9 #include <arpa/inet.h>
10 #include <libknot/rrtype/rdname.h>
11 #include <libknot/descriptor.h>
12 #include <ucw/mempool.h>
13 #include <sys/socket.h>
14 #include "kresconfig.h"
15 #include "lib/resolve.h"
16 #include "lib/layer.h"
17 #include "lib/rplan.h"
18 #include "lib/layer/iterate.h"
19 #include "lib/dnssec/ta.h"
20 #include "lib/dnssec.h"
21 #if ENABLE_COOKIES
22 #include "lib/cookies/control.h"
23 #include "lib/cookies/helper.h"
24 #include "lib/cookies/nonce.h"
25 #else /* Define compatibility macros */
26 #define KNOT_EDNS_OPTION_COOKIE 10
27 #endif /* ENABLE_COOKIES */
28 
29 #define VERBOSE_MSG(qry, ...) QRVERBOSE((qry), RESOLVER,  __VA_ARGS__)
30 
kr_rank_check(uint8_t rank)31 bool kr_rank_check(uint8_t rank)
32 {
33 	switch (rank & ~KR_RANK_AUTH) {
34 	case KR_RANK_INITIAL:
35 	case KR_RANK_OMIT:
36 	case KR_RANK_TRY:
37 	case KR_RANK_INDET:
38 	case KR_RANK_BOGUS:
39 	case KR_RANK_MISMATCH:
40 	case KR_RANK_MISSING:
41 	case KR_RANK_INSECURE:
42 	case KR_RANK_SECURE:
43 		return true;
44 	default:
45 		return false;
46 	}
47 }
48 
kr_rank_test(uint8_t rank,uint8_t kr_flag)49 bool kr_rank_test(uint8_t rank, uint8_t kr_flag)
50 {
51 	if (kr_fails_assert(kr_rank_check(rank) && kr_rank_check(kr_flag)))
52 		return false;
53 	if (kr_flag == KR_RANK_AUTH) {
54 		return rank & KR_RANK_AUTH;
55 	}
56 	if (kr_fails_assert(!(kr_flag & KR_RANK_AUTH)))
57 		return false;
58 	/* The rest are exclusive values - exactly one has to be set. */
59 	return (rank & ~KR_RANK_AUTH) == kr_flag;
60 }
61 
62 /** @internal Set @a yielded to all RRs with matching @a qry_uid. */
set_yield(ranked_rr_array_t * array,const uint32_t qry_uid,const bool yielded)63 static void set_yield(ranked_rr_array_t *array, const uint32_t qry_uid, const bool yielded)
64 {
65 	for (unsigned i = 0; i < array->len; ++i) {
66 		ranked_rr_array_entry_t *entry = array->at[i];
67 		if (entry->qry_uid == qry_uid) {
68 			entry->yielded = yielded;
69 		}
70 	}
71 }
72 
73 /**
74  * @internal Defer execution of current query.
75  * The current layer state and input will be pushed to a stack and resumed on next iteration.
76  */
consume_yield(kr_layer_t * ctx,knot_pkt_t * pkt)77 static int consume_yield(kr_layer_t *ctx, knot_pkt_t *pkt)
78 {
79 	struct kr_request *req = ctx->req;
80 	size_t pkt_size = pkt->size;
81 	if (knot_pkt_has_tsig(pkt)) {
82 		pkt_size += pkt->tsig_wire.len;
83 	}
84 	knot_pkt_t *pkt_copy = knot_pkt_new(NULL, pkt_size, &req->pool);
85 	struct kr_layer_pickle *pickle = mm_alloc(&req->pool, sizeof(*pickle));
86 	if (pickle && pkt_copy && knot_pkt_copy(pkt_copy, pkt) == 0) {
87 		struct kr_query *qry = req->current_query;
88 		pickle->api = ctx->api;
89 		pickle->state = ctx->state;
90 		pickle->pkt = pkt_copy;
91 		pickle->next = qry->deferred;
92 		qry->deferred = pickle;
93 		set_yield(&req->answ_selected, qry->uid, true);
94 		set_yield(&req->auth_selected, qry->uid, true);
95 		return kr_ok();
96 	}
97 	return kr_error(ENOMEM);
98 }
begin_yield(kr_layer_t * ctx)99 static int begin_yield(kr_layer_t *ctx) { return kr_ok(); }
reset_yield(kr_layer_t * ctx)100 static int reset_yield(kr_layer_t *ctx) { return kr_ok(); }
finish_yield(kr_layer_t * ctx)101 static int finish_yield(kr_layer_t *ctx) { return kr_ok(); }
produce_yield(kr_layer_t * ctx,knot_pkt_t * pkt)102 static int produce_yield(kr_layer_t *ctx, knot_pkt_t *pkt) { return kr_ok(); }
checkout_yield(kr_layer_t * ctx,knot_pkt_t * packet,struct sockaddr * dst,int type)103 static int checkout_yield(kr_layer_t *ctx, knot_pkt_t *packet, struct sockaddr *dst, int type) { return kr_ok(); }
answer_finalize_yield(kr_layer_t * ctx)104 static int answer_finalize_yield(kr_layer_t *ctx) { return kr_ok(); }
105 
106 /** @internal Macro for iterating module layers. */
107 #define RESUME_LAYERS(from, r, qry, func, ...) \
108     (r)->current_query = (qry); \
109 	for (size_t i = (from); i < (r)->ctx->modules->len; ++i) { \
110 		struct kr_module *mod = (r)->ctx->modules->at[i]; \
111 		if (mod->layer) { \
112 			struct kr_layer layer = {.state = (r)->state, .api = mod->layer, .req = (r)}; \
113 			if (layer.api && layer.api->func) { \
114 				(r)->state = layer.api->func(&layer, ##__VA_ARGS__); \
115 				/* It's an easy mistake to return error code, for example. */ \
116 				/* (though we could allow such an overload later) */ \
117 				if (kr_fails_assert(kr_state_consistent((r)->state))) { \
118 					(r)->state = KR_STATE_FAIL; \
119 				} else \
120 				if ((r)->state == KR_STATE_YIELD) { \
121 					func ## _yield(&layer, ##__VA_ARGS__); \
122 					break; \
123 				} \
124 			} \
125 		} \
126 	} /* Invalidate current query. */ \
127 	(r)->current_query = NULL
128 
129 /** @internal Macro for starting module iteration. */
130 #define ITERATE_LAYERS(req, qry, func, ...) RESUME_LAYERS(0, req, qry, func, ##__VA_ARGS__)
131 
132 /** @internal Find layer id matching API. */
layer_id(struct kr_request * req,const struct kr_layer_api * api)133 static inline size_t layer_id(struct kr_request *req, const struct kr_layer_api *api) {
134 	module_array_t *modules = req->ctx->modules;
135 	for (size_t i = 0; i < modules->len; ++i) {
136 		if (modules->at[i]->layer == api) {
137 			return i;
138 		}
139 	}
140 	return 0; /* Not found, try all. */
141 }
142 
143 /* @internal We don't need to deal with locale here */
isletter(unsigned chr)144 KR_CONST static inline bool isletter(unsigned chr)
145 { return (chr | 0x20 /* tolower */) - 'a' <= 'z' - 'a'; }
146 
147 /* Randomize QNAME letter case.
148  * This adds 32 bits of randomness at maximum, but that's more than an average domain name length.
149  * https://tools.ietf.org/html/draft-vixie-dnsext-dns0x20-00
150  */
randomized_qname_case(knot_dname_t * restrict qname,uint32_t secret)151 static void randomized_qname_case(knot_dname_t * restrict qname, uint32_t secret)
152 {
153 	if (secret == 0)
154 		return;
155 	if (kr_fails_assert(qname))
156 		return;
157 	const int len = knot_dname_size(qname) - 2; /* Skip first, last label. First is length, last is always root */
158 	for (int i = 0; i < len; ++i) {
159 		/* Note: this relies on the fact that correct label lengths
160 		 * can't pass the isletter() test (by "luck"). */
161 		if (isletter(*++qname)) {
162 				*qname ^= ((secret >> (i & 31)) & 1) * 0x20;
163 		}
164 	}
165 }
166 
167 /** This turns of QNAME minimisation if there is a non-terminal between current zone cut, and name target.
168  *  It save several minimization steps, as the zone cut is likely final one.
169  */
check_empty_nonterms(struct kr_query * qry,knot_pkt_t * pkt,struct kr_cache * cache,uint32_t timestamp)170 static void check_empty_nonterms(struct kr_query *qry, knot_pkt_t *pkt, struct kr_cache *cache, uint32_t timestamp)
171 {
172 	// FIXME cleanup, etc.
173 #if 0
174 	if (qry->flags.NO_MINIMIZE) {
175 		return;
176 	}
177 
178 	const knot_dname_t *target = qry->sname;
179 	const knot_dname_t *cut_name = qry->zone_cut.name;
180 	if (!target || !cut_name)
181 		return;
182 
183 	struct kr_cache_entry *entry = NULL;
184 	/* @note: The non-terminal must be direct child of zone cut (e.g. label distance <= 2),
185 	 *        otherwise this would risk leaking information to parent if the NODATA TTD > zone cut TTD. */
186 	int labels = knot_dname_labels(target, NULL) - knot_dname_labels(cut_name, NULL);
187 	while (target[0] && labels > 2) {
188 		target = knot_wire_next_label(target, NULL);
189 		--labels;
190 	}
191 	for (int i = 0; i < labels; ++i) {
192 		int ret = kr_cache_peek(cache, KR_CACHE_PKT, target, KNOT_RRTYPE_NS, &entry, &timestamp);
193 		if (ret == 0) { /* Either NXDOMAIN or NODATA, start here. */
194 			/* @todo We could stop resolution here for NXDOMAIN, but we can't because of broken CDNs */
195 			qry->flags.NO_MINIMIZE = true;
196 			kr_make_query(qry, pkt);
197 			break;
198 		}
199 		kr_assert(target[0]);
200 		target = knot_wire_next_label(target, NULL);
201 	}
202 	kr_cache_commit(cache);
203 #endif
204 }
205 
ns_fetch_cut(struct kr_query * qry,const knot_dname_t * requested_name,struct kr_request * req,knot_pkt_t * pkt)206 static int ns_fetch_cut(struct kr_query *qry, const knot_dname_t *requested_name,
207 			struct kr_request *req, knot_pkt_t *pkt)
208 {
209 	/* It can occur that here parent query already have
210 	 * provably insecure zonecut which not in the cache yet. */
211 	struct kr_qflags pflags;
212 	if (qry->parent) {
213 		pflags = qry->parent->flags;
214 	}
215 	const bool is_insecure = qry->parent != NULL
216 		&& !(pflags.AWAIT_IPV4 || pflags.AWAIT_IPV6)
217 		&& (pflags.DNSSEC_INSECURE || pflags.DNSSEC_NODS);
218 
219 	/* Want DNSSEC if it's possible to secure this name
220 	 * (e.g. is covered by any TA) */
221 	if (is_insecure) {
222 		/* If parent is insecure we don't want DNSSEC
223 		 * even if cut name is covered by TA. */
224 		qry->flags.DNSSEC_WANT = false;
225 		qry->flags.DNSSEC_INSECURE = true;
226 		VERBOSE_MSG(qry, "=> going insecure because parent query is insecure\n");
227 	} else if (kr_ta_closest(req->ctx, qry->zone_cut.name, KNOT_RRTYPE_NS)) {
228 		qry->flags.DNSSEC_WANT = true;
229 	} else {
230 		qry->flags.DNSSEC_WANT = false;
231 		VERBOSE_MSG(qry, "=> going insecure because there's no covering TA\n");
232 	}
233 
234 	struct kr_zonecut cut_found;
235 	kr_zonecut_init(&cut_found, requested_name, req->rplan.pool);
236 	/* Cut that has been found can differs from cut that has been requested.
237 	 * So if not already insecure,
238 	 * try to fetch ta & keys even if initial cut name not covered by TA */
239 	bool secure = !is_insecure;
240 	int ret = kr_zonecut_find_cached(req->ctx, &cut_found, requested_name,
241 					 qry, &secure);
242 	if (ret == kr_error(ENOENT)) {
243 		/* No cached cut found, start from SBELT
244 		 * and issue priming query. */
245 		kr_zonecut_deinit(&cut_found);
246 		ret = kr_zonecut_set_sbelt(req->ctx, &qry->zone_cut);
247 		if (ret != 0) {
248 			return KR_STATE_FAIL;
249 		}
250 		VERBOSE_MSG(qry, "=> using root hints\n");
251 		qry->flags.AWAIT_CUT = false;
252 		return KR_STATE_DONE;
253 	} else if (ret != kr_ok()) {
254 		kr_zonecut_deinit(&cut_found);
255 		return KR_STATE_FAIL;
256 	}
257 
258 	/* Find out security status.
259 	 * Go insecure if the zone cut is provably insecure */
260 	if ((qry->flags.DNSSEC_WANT) && !secure) {
261 		VERBOSE_MSG(qry, "=> NS is provably without DS, going insecure\n");
262 		qry->flags.DNSSEC_WANT = false;
263 		qry->flags.DNSSEC_INSECURE = true;
264 	}
265 	/* Zonecut name can change, check it again
266 	 * to prevent unnecessary DS & DNSKEY queries */
267 	if (!(qry->flags.DNSSEC_INSECURE) &&
268 	    kr_ta_closest(req->ctx, cut_found.name, KNOT_RRTYPE_NS)) {
269 		qry->flags.DNSSEC_WANT = true;
270 	} else {
271 		qry->flags.DNSSEC_WANT = false;
272 	}
273 	/* Check if any DNSKEY found for cached cut */
274 	if (qry->flags.DNSSEC_WANT && cut_found.key == NULL &&
275 	    kr_zonecut_is_empty(&cut_found)) {
276 		/* Cut found and there are no proofs of zone insecurity.
277 		 * But no DNSKEY found and no glue fetched.
278 		 * We have got circular dependency - must fetch A\AAAA
279 		 * from authoritative, but we have no key to verify it. */
280 		kr_zonecut_deinit(&cut_found);
281 		if (requested_name[0] != '\0' ) {
282 			/* If not root - try next label */
283 			return KR_STATE_CONSUME;
284 		}
285 		/* No cached cut & keys found, start from SBELT */
286 		ret = kr_zonecut_set_sbelt(req->ctx, &qry->zone_cut);
287 		if (ret != 0) {
288 			return KR_STATE_FAIL;
289 		}
290 		VERBOSE_MSG(qry, "=> using root hints\n");
291 		qry->flags.AWAIT_CUT = false;
292 		return KR_STATE_DONE;
293 	}
294 	/* Use the found zone cut. */
295 	kr_zonecut_move(&qry->zone_cut, &cut_found);
296 	/* Check if there's a non-terminal between target and current cut. */
297 	struct kr_cache *cache = &req->ctx->cache;
298 	check_empty_nonterms(qry, pkt, cache, qry->timestamp.tv_sec);
299 	/* Cut found */
300 	return KR_STATE_PRODUCE;
301 }
302 
edns_put(knot_pkt_t * pkt,bool reclaim)303 static int edns_put(knot_pkt_t *pkt, bool reclaim)
304 {
305 	if (!pkt->opt_rr) {
306 		return kr_ok();
307 	}
308 	if (reclaim) {
309 		/* Reclaim reserved size. */
310 		int ret = knot_pkt_reclaim(pkt, knot_edns_wire_size(pkt->opt_rr));
311 		if (ret != 0) {
312 			return ret;
313 		}
314 	}
315 	/* Write to packet. */
316 	if (kr_fails_assert(pkt->current == KNOT_ADDITIONAL))
317 		return kr_error(EINVAL);
318 	return knot_pkt_put(pkt, KNOT_COMPR_HINT_NONE, pkt->opt_rr, KNOT_PF_FREE);
319 }
320 
321 /** Removes last EDNS OPT RR written to the packet. */
edns_erase_and_reserve(knot_pkt_t * pkt)322 static int edns_erase_and_reserve(knot_pkt_t *pkt)
323 {
324 	/* Nothing to be done. */
325 	if (!pkt || !pkt->opt_rr) {
326 		return 0;
327 	}
328 
329 	/* Fail if the data are located elsewhere than at the end of packet. */
330 	if (pkt->current != KNOT_ADDITIONAL ||
331 	    pkt->opt_rr != &pkt->rr[pkt->rrset_count - 1]) {
332 		return -1;
333 	}
334 
335 	size_t len = knot_rrset_size(pkt->opt_rr);
336 	int16_t rr_removed = pkt->opt_rr->rrs.count;
337 	/* Decrease rrset counters. */
338 	pkt->rrset_count -= 1;
339 	pkt->sections[pkt->current].count -= 1;
340 	pkt->size -= len;
341 	knot_wire_add_arcount(pkt->wire, -rr_removed); /* ADDITIONAL */
342 
343 	pkt->opt_rr = NULL;
344 
345 	/* Reserve the freed space. */
346 	return knot_pkt_reserve(pkt, len);
347 }
348 
edns_create(knot_pkt_t * pkt,const struct kr_request * req)349 static int edns_create(knot_pkt_t *pkt, const struct kr_request *req)
350 {
351 	pkt->opt_rr = knot_rrset_copy(req->ctx->upstream_opt_rr, &pkt->mm);
352 	size_t wire_size = knot_edns_wire_size(pkt->opt_rr);
353 #if ENABLE_COOKIES
354 	if (req->ctx->cookie_ctx.clnt.enabled ||
355 	    req->ctx->cookie_ctx.srvr.enabled) {
356 		wire_size += KR_COOKIE_OPT_MAX_LEN;
357 	}
358 #endif /* ENABLE_COOKIES */
359 	if (req->qsource.flags.tls) {
360 		if (req->ctx->tls_padding == -1)
361 			/* FIXME: we do not know how to reserve space for the
362 			 * default padding policy, since we can't predict what
363 			 * it will select. So i'm just guessing :/ */
364 			wire_size += KNOT_EDNS_OPTION_HDRLEN + 512;
365 		if (req->ctx->tls_padding >= 2)
366 			wire_size += KNOT_EDNS_OPTION_HDRLEN + req->ctx->tls_padding;
367 	}
368 	return knot_pkt_reserve(pkt, wire_size);
369 }
370 
371 /**
372  * @param all_secure optionally &&-combine security of written RRs into its value.
373  *		     (i.e. if you pass a pointer to false, it will always remain)
374  * @param all_cname optionally output if all written RRs are CNAMEs and RRSIGs of CNAMEs
375  * @return error code, ignoring if forced to truncate the packet.
376  */
write_extra_ranked_records(const ranked_rr_array_t * arr,uint16_t reorder,knot_pkt_t * answer,bool * all_secure,bool * all_cname)377 static int write_extra_ranked_records(const ranked_rr_array_t *arr, uint16_t reorder,
378 				      knot_pkt_t *answer, bool *all_secure, bool *all_cname)
379 {
380 	const bool has_dnssec = knot_pkt_has_dnssec(answer);
381 	bool all_sec = true;
382 	bool all_cn = (all_cname != NULL); /* optim.: init as false if not needed */
383 	int err = kr_ok();
384 
385 	for (size_t i = 0; i < arr->len; ++i) {
386 		ranked_rr_array_entry_t * entry = arr->at[i];
387 		kr_assert(!entry->in_progress);
388 		if (!entry->to_wire) {
389 			continue;
390 		}
391 		knot_rrset_t *rr = entry->rr;
392 		if (!has_dnssec) {
393 			if (rr->type != knot_pkt_qtype(answer) && knot_rrtype_is_dnssec(rr->type)) {
394 				continue;
395 			}
396 		}
397 		err = knot_pkt_put_rotate(answer, 0, rr, reorder, 0);
398 		if (err != KNOT_EOK) {
399 			if (err == KNOT_ESPACE) {
400 				err = kr_ok();
401 			}
402 			break;
403 		}
404 
405 		if (rr->type != KNOT_RRTYPE_RRSIG) {
406 			all_sec = all_sec && kr_rank_test(entry->rank, KR_RANK_SECURE);
407 		}
408 		all_cn = all_cn && kr_rrset_type_maysig(entry->rr) == KNOT_RRTYPE_CNAME;
409 	}
410 
411 	if (all_secure) {
412 		*all_secure = *all_secure && all_sec;
413 	}
414 	if (all_cname) {
415 		*all_cname = all_cn;
416 	}
417 	return err;
418 }
419 
420 /** @internal Add an EDNS padding RR into the answer if requested and required. */
answer_padding(struct kr_request * request)421 static int answer_padding(struct kr_request *request)
422 {
423 	if (kr_fails_assert(request && request->answer && request->ctx))
424 		return kr_error(EINVAL);
425 	if (!request->qsource.flags.tls) {
426 		/* Not meaningful to pad without encryption. */
427 		return kr_ok();
428 	}
429 	int32_t padding = request->ctx->tls_padding;
430 	knot_pkt_t *answer = request->answer;
431 	knot_rrset_t *opt_rr = answer->opt_rr;
432 	int32_t pad_bytes = -1;
433 
434 	if (padding == -1) { /* use the default padding policy from libknot */
435 		pad_bytes =  knot_pkt_default_padding_size(answer, opt_rr);
436 	}
437 	if (padding >= 2) {
438 		int32_t max_pad_bytes = knot_edns_get_payload(opt_rr) - (answer->size + knot_rrset_size(opt_rr));
439 		pad_bytes = MIN(knot_edns_alignment_size(answer->size, knot_rrset_size(opt_rr), padding),
440 				max_pad_bytes);
441 	}
442 
443 	if (pad_bytes >= 0) {
444 		uint8_t zeros[MAX(1, pad_bytes)];
445 		memset(zeros, 0, sizeof(zeros));
446 		int r = knot_edns_add_option(opt_rr, KNOT_EDNS_OPTION_PADDING,
447 					     pad_bytes, zeros, &answer->mm);
448 		if (r != KNOT_EOK) {
449 			knot_rrset_clear(opt_rr, &answer->mm);
450 			return kr_error(r);
451 		}
452 	}
453 	return kr_ok();
454 }
455 
456 /* Make a clean SERVFAIL answer. */
answer_fail(struct kr_request * request)457 static void answer_fail(struct kr_request *request)
458 {
459 	/* Note: OPT in SERVFAIL response is still useful for cookies/additional info. */
460 	if (kr_log_is_debug(RESOLVER, request))  /* logging optimization */
461 		kr_log_req(request, 0, 0, RESOLVER,
462 			"request failed, answering with empty SERVFAIL\n");
463 	knot_pkt_t *answer = request->answer;
464 	knot_rrset_t *opt_rr = answer->opt_rr; /* it gets NULLed below */
465 	int ret = kr_pkt_clear_payload(answer);
466 	knot_wire_clear_ad(answer->wire);
467 	knot_wire_clear_aa(answer->wire);
468 	knot_wire_set_rcode(answer->wire, KNOT_RCODE_SERVFAIL);
469 	if (ret == 0 && opt_rr) {
470 		knot_pkt_begin(answer, KNOT_ADDITIONAL);
471 		answer->opt_rr = opt_rr;
472 		answer_padding(request); /* Ignore failed padding in SERVFAIL answer. */
473 		edns_put(answer, false);
474 	}
475 }
476 
477 /* Append EDNS records into the answer. */
answer_append_edns(struct kr_request * request)478 static int answer_append_edns(struct kr_request *request)
479 {
480 	knot_pkt_t *answer = request->answer;
481 	if (!answer->opt_rr)
482 		return kr_ok();
483 	int ret = answer_padding(request);
484 	if (!ret) ret = knot_pkt_begin(answer, KNOT_ADDITIONAL);
485 	if (!ret) ret = knot_pkt_put(answer, KNOT_COMPR_HINT_NONE,
486 				     answer->opt_rr, KNOT_PF_FREE);
487 	return ret;
488 }
489 
answer_finalize(struct kr_request * request)490 static void answer_finalize(struct kr_request *request)
491 {
492 	struct kr_rplan *rplan = &request->rplan;
493 	knot_pkt_t *answer = request->answer;
494 	const uint8_t *q_wire = request->qsource.packet->wire;
495 
496 	if (answer->rrset_count != 0) {
497 		/* Non-standard: we assume the answer had been constructed.
498 		 * Let's check we don't have a "collision". */
499 		const ranked_rr_array_t *selected[] = kr_request_selected(request);
500 		for (int psec = KNOT_ANSWER; psec <= KNOT_ADDITIONAL; ++psec) {
501 			const ranked_rr_array_t *arr = selected[psec];
502 			for (ssize_t i = 0; i < arr->len; ++i) {
503 				if (kr_fails_assert(!arr->at[i]->to_wire)) {
504 					answer_fail(request);
505 					return;
506 				}
507 			}
508 		}
509 		/* We only add EDNS, and we even assume AD bit was correct. */
510 		if (answer_append_edns(request)) {
511 			answer_fail(request);
512 			return;
513 		}
514 		return;
515 	}
516 
517 	struct kr_query *const last =
518 		rplan->resolved.len > 0 ? array_tail(rplan->resolved) : NULL;
519 		/* TODO  ^^^^ this is slightly fragile */
520 
521 	if (!last) {
522 		/* Suspicious: no kr_query got resolved (not even from cache),
523 		 * so let's (defensively) SERVFAIL the request.
524 		 * ATM many checks below depend on `last` anyway,
525 		 * so this helps to avoid surprises. */
526 		answer_fail(request);
527 		return;
528 	}
529 	/* TODO: clean this up in !660 or followup, and it isn't foolproof anyway. */
530 	if (last->flags.DNSSEC_BOGUS
531 	    || (rplan->pending.len > 0 && array_tail(rplan->pending)->flags.DNSSEC_BOGUS)) {
532 		if (!knot_wire_get_cd(q_wire)) {
533 			answer_fail(request);
534 			return;
535 		}
536 	}
537 
538 	/* AD flag.  We can only change `secure` from true to false.
539 	 * Be conservative.  Primary approach: check ranks of all RRs in wire.
540 	 * Only "negative answers" need special handling. */
541 	bool secure = last != NULL && request->state == KR_STATE_DONE /*< suspicious otherwise */
542 		&& knot_pkt_qtype(answer) != KNOT_RRTYPE_RRSIG;
543 	if (last && (last->flags.STUB)) {
544 		secure = false; /* don't trust forwarding for now */
545 	}
546 	if (last && (last->flags.DNSSEC_OPTOUT)) {
547 		VERBOSE_MSG(last, "insecure because of opt-out\n");
548 		secure = false; /* the last answer is insecure due to opt-out */
549 	}
550 
551 	/* Write all RRsets meant for the answer. */
552 	const uint16_t reorder = last ? last->reorder : 0;
553 	bool answ_all_cnames = false/*arbitrary*/;
554 	if (knot_pkt_begin(answer, KNOT_ANSWER)
555 	    || write_extra_ranked_records(&request->answ_selected, reorder,
556 					answer, &secure, &answ_all_cnames)
557 	    || knot_pkt_begin(answer, KNOT_AUTHORITY)
558 	    || write_extra_ranked_records(&request->auth_selected, reorder,
559 					answer, &secure, NULL)
560 	    || knot_pkt_begin(answer, KNOT_ADDITIONAL)
561 	    || write_extra_ranked_records(&request->add_selected, reorder,
562 					answer, NULL/*not relevant to AD*/, NULL)
563 	    || answer_append_edns(request)
564 	   )
565 	{
566 		answer_fail(request);
567 		return;
568 	}
569 
570 	if (!last) secure = false; /*< should be no-op, mostly documentation */
571 	/* AD: "negative answers" need more handling. */
572 	if (kr_response_classify(answer) != PKT_NOERROR
573 	    /* Additionally check for CNAME chains that "end in NODATA",
574 	     * as those would also be PKT_NOERROR. */
575 	    || (answ_all_cnames && knot_pkt_qtype(answer) != KNOT_RRTYPE_CNAME)) {
576 
577 		secure = secure && last->flags.DNSSEC_WANT
578 			&& !last->flags.DNSSEC_BOGUS && !last->flags.DNSSEC_INSECURE;
579 	}
580 
581 	if (secure) {
582 		struct kr_query *cname_parent = last->cname_parent;
583 		while (cname_parent != NULL) {
584 			if (cname_parent->flags.DNSSEC_OPTOUT) {
585 				secure = false;
586 				break;
587 			}
588 			cname_parent = cname_parent->cname_parent;
589 		}
590 	}
591 
592 	/* No detailed analysis ATM, just _SECURE or not.
593 	 * LATER: request->rank might better be computed in validator's finish phase. */
594 	VERBOSE_MSG(last, "AD: request%s classified as SECURE\n", secure ? "" : " NOT");
595 	request->rank = secure ? KR_RANK_SECURE : KR_RANK_INITIAL;
596 
597 	/* Set AD if secure and AD bit "was requested". */
598 	if (secure && !knot_wire_get_cd(q_wire)
599 	    && (knot_pkt_has_dnssec(answer) || knot_wire_get_ad(q_wire))) {
600 		knot_wire_set_ad(answer->wire);
601 	}
602 }
603 
query_finalize(struct kr_request * request,struct kr_query * qry,knot_pkt_t * pkt)604 static int query_finalize(struct kr_request *request, struct kr_query *qry, knot_pkt_t *pkt)
605 {
606 	knot_pkt_begin(pkt, KNOT_ADDITIONAL);
607 	if (qry->flags.NO_EDNS)
608 		return kr_ok();
609 	/* Remove any EDNS records from any previous iteration. */
610 	int ret = edns_erase_and_reserve(pkt);
611 	if (ret) return ret;
612 	ret = edns_create(pkt, request);
613 	if (ret) return ret;
614 	if (qry->flags.STUB) {
615 		/* Stub resolution (ask for +rd and +do) */
616 		knot_wire_set_rd(pkt->wire);
617 		if (knot_pkt_has_dnssec(request->qsource.packet)) {
618 			knot_edns_set_do(pkt->opt_rr);
619 		}
620 		if (knot_wire_get_cd(request->qsource.packet->wire)) {
621 			knot_wire_set_cd(pkt->wire);
622 		}
623 	} else {
624 		/* Full resolution (ask for +cd and +do) */
625 		knot_edns_set_do(pkt->opt_rr);
626 		knot_wire_set_cd(pkt->wire);
627 		if (qry->flags.FORWARD) {
628 			knot_wire_set_rd(pkt->wire);
629 		}
630 	}
631 	return kr_ok();
632 }
633 
kr_resolve_begin(struct kr_request * request,struct kr_context * ctx)634 int kr_resolve_begin(struct kr_request *request, struct kr_context *ctx)
635 {
636 	/* Initialize request */
637 	request->ctx = ctx;
638 	request->answer = NULL;
639 	request->options = ctx->options;
640 	request->state = KR_STATE_CONSUME;
641 	request->current_query = NULL;
642 	array_init(request->answ_selected);
643 	array_init(request->auth_selected);
644 	array_init(request->add_selected);
645 	request->answ_validated = false;
646 	request->auth_validated = false;
647 	request->rank = KR_RANK_INITIAL;
648 	request->trace_log = NULL;
649 	request->trace_finish = NULL;
650 
651 	/* Expect first query */
652 	kr_rplan_init(&request->rplan, request, &request->pool);
653 	return KR_STATE_CONSUME;
654 }
655 
resolve_query(struct kr_request * request,const knot_pkt_t * packet)656 static int resolve_query(struct kr_request *request, const knot_pkt_t *packet)
657 {
658 	struct kr_rplan *rplan = &request->rplan;
659 	const knot_dname_t *qname = knot_pkt_qname(packet);
660 	uint16_t qclass = knot_pkt_qclass(packet);
661 	uint16_t qtype = knot_pkt_qtype(packet);
662 	struct kr_query *qry = NULL;
663 	struct kr_context *ctx = request->ctx;
664 	struct kr_cookie_ctx *cookie_ctx = ctx ? &ctx->cookie_ctx : NULL;
665 
666 	if (qname != NULL) {
667 		qry = kr_rplan_push(rplan, NULL, qname, qclass, qtype);
668 	} else if (cookie_ctx && cookie_ctx->srvr.enabled &&
669 		   knot_wire_get_qdcount(packet->wire) == 0 &&
670 		   knot_pkt_has_edns(packet) &&
671 		   knot_pkt_edns_option(packet, KNOT_EDNS_OPTION_COOKIE)) {
672 		/* Plan empty query only for cookies. */
673 		qry = kr_rplan_push_empty(rplan, NULL);
674 	}
675 	if (!qry) {
676 		return KR_STATE_FAIL;
677 	}
678 
679 	if (qname != NULL) {
680 		/* Deferred zone cut lookup for this query. */
681 		qry->flags.AWAIT_CUT = true;
682 		/* Want DNSSEC if it's possible to secure this name (e.g. is covered by any TA) */
683 		if ((knot_wire_get_ad(packet->wire) || knot_pkt_has_dnssec(packet)) &&
684 		    kr_ta_closest(request->ctx, qry->sname, qtype)) {
685 			qry->flags.DNSSEC_WANT = true;
686 		}
687 	}
688 
689 	/* Expect answer, pop if satisfied immediately */
690 	ITERATE_LAYERS(request, qry, begin);
691 	if ((request->state & KR_STATE_DONE) != 0) {
692 		kr_rplan_pop(rplan, qry);
693 	} else if (qname == NULL) {
694 		/* it is an empty query which must be resolved by
695 		   `begin` layer of cookie module.
696 		   If query isn't resolved, fail. */
697 		request->state = KR_STATE_FAIL;
698 	}
699 	return request->state;
700 }
701 
kr_request_ensure_edns(struct kr_request * request)702 knot_rrset_t* kr_request_ensure_edns(struct kr_request *request)
703 {
704 	kr_require(request && request->answer && request->qsource.packet && request->ctx);
705 	knot_pkt_t* answer = request->answer;
706 	bool want_edns = knot_pkt_has_edns(request->qsource.packet);
707 	if (!want_edns) {
708 		kr_assert(!answer->opt_rr);
709 		return answer->opt_rr;
710 	} else if (answer->opt_rr) {
711 		return answer->opt_rr;
712 	}
713 
714 	kr_assert(request->ctx->downstream_opt_rr);
715 	answer->opt_rr = knot_rrset_copy(request->ctx->downstream_opt_rr, &answer->mm);
716 	if (!answer->opt_rr)
717 		return NULL;
718 	if (knot_pkt_has_dnssec(request->qsource.packet))
719 		knot_edns_set_do(answer->opt_rr);
720 	return answer->opt_rr;
721 }
722 
kr_request_ensure_answer(struct kr_request * request)723 knot_pkt_t *kr_request_ensure_answer(struct kr_request *request)
724 {
725 	if (request->answer)
726 		return request->answer;
727 
728 	const knot_pkt_t *qs_pkt = request->qsource.packet;
729 	if (kr_fails_assert(qs_pkt))
730 		goto fail;
731 	// Find answer_max: limit on DNS wire length.
732 	uint16_t answer_max;
733 	const struct kr_request_qsource_flags *qs_flags = &request->qsource.flags;
734 	if (kr_fails_assert((qs_flags->tls || qs_flags->http) ? qs_flags->tcp : true))
735 		goto fail;
736 	if (!request->qsource.addr || qs_flags->tcp) {
737 		// not on UDP
738 		answer_max = KNOT_WIRE_MAX_PKTSIZE;
739 	} else if (knot_pkt_has_edns(qs_pkt)) {
740 		// UDP with EDNS
741 		answer_max = MIN(knot_edns_get_payload(qs_pkt->opt_rr),
742 				 knot_edns_get_payload(request->ctx->downstream_opt_rr));
743 		answer_max = MAX(answer_max, KNOT_WIRE_MIN_PKTSIZE);
744 	} else {
745 		// UDP without EDNS
746 		answer_max = KNOT_WIRE_MIN_PKTSIZE;
747 	}
748 
749 	// Allocate the packet.
750 	uint8_t *wire = NULL;
751 	if (request->alloc_wire_cb) {
752 		wire = request->alloc_wire_cb(request, &answer_max);
753 		if (!wire)
754 			goto enomem;
755 	}
756 	knot_pkt_t *answer = request->answer =
757 		knot_pkt_new(wire, answer_max, &request->pool);
758 	if (!answer || knot_pkt_init_response(answer, qs_pkt) != 0) {
759 		kr_assert(!answer); // otherwise we messed something up
760 		goto enomem;
761 	}
762 	if (!wire)
763 		wire = answer->wire;
764 
765 	// Much was done by knot_pkt_init_response()
766 	knot_wire_set_ra(wire);
767 	knot_wire_set_rcode(wire, KNOT_RCODE_NOERROR);
768 	if (knot_wire_get_cd(qs_pkt->wire)) {
769 		knot_wire_set_cd(wire);
770 	}
771 
772 	// Prepare EDNS if required.
773 	if (knot_pkt_has_edns(qs_pkt) && kr_fails_assert(kr_request_ensure_edns(request)))
774 		goto enomem; // answer is on mempool, so "leak" is OK
775 
776 	return request->answer;
777 enomem:
778 fail:
779 	request->state = KR_STATE_FAIL; // TODO: really combine with another flag?
780 	return request->answer = NULL;
781 }
782 
resolution_time_exceeded(struct kr_query * qry,uint64_t now)783 static bool resolution_time_exceeded(struct kr_query *qry, uint64_t now)
784 {
785 	uint64_t resolving_time = now - qry->creation_time_mono;
786 	if (resolving_time > KR_RESOLVE_TIME_LIMIT) {
787 		VERBOSE_MSG(qry, "query resolution time limit exceeded\n");
788 		return true;
789 	}
790 	return false;
791 }
792 
kr_resolve_consume(struct kr_request * request,struct kr_transport ** transport,knot_pkt_t * packet)793 int kr_resolve_consume(struct kr_request *request, struct kr_transport **transport, knot_pkt_t *packet)
794 {
795 	struct kr_rplan *rplan = &request->rplan;
796 
797 	/* Empty resolution plan, push packet as the new query */
798 	if (packet && kr_rplan_empty(rplan)) {
799 		return resolve_query(request, packet);
800 	}
801 
802 	/* Different processing for network error */
803 	struct kr_query *qry = array_tail(rplan->pending);
804 	/* Check overall resolution time */
805 	if (resolution_time_exceeded(qry, kr_now())) {
806 		return KR_STATE_FAIL;
807 	}
808 	bool tried_tcp = (qry->flags.TCP);
809 	if (!packet || packet->size == 0) {
810 		return KR_STATE_PRODUCE;
811 	} else {
812 		/* Packet cleared, derandomize QNAME. */
813 		knot_dname_t *qname_raw = knot_pkt_qname(packet);
814 		if (qname_raw && qry->secret != 0) {
815 			randomized_qname_case(qname_raw, qry->secret);
816 		}
817 		request->state = KR_STATE_CONSUME;
818 		if (qry->flags.CACHED) {
819 			ITERATE_LAYERS(request, qry, consume, packet);
820 		} else {
821 			/* Fill in source and latency information. */
822 			request->upstream.rtt = kr_now() - qry->timestamp_mono;
823 			request->upstream.transport = transport ? *transport : NULL;
824 			ITERATE_LAYERS(request, qry, consume, packet);
825 			/* Clear temporary information */
826 			request->upstream.transport = NULL;
827 			request->upstream.rtt = 0;
828 		}
829 	}
830 
831 	if (transport && !qry->flags.CACHED) {
832 		if (!(request->state & KR_STATE_FAIL)) {
833 			/* Do not complete NS address resolution on soft-fail. */
834 			const int rcode = packet ? knot_wire_get_rcode(packet->wire) : 0;
835 			if (rcode != KNOT_RCODE_SERVFAIL && rcode != KNOT_RCODE_REFUSED) {
836 				qry->flags.AWAIT_IPV6 = false;
837 				qry->flags.AWAIT_IPV4 = false;
838 			}
839 		}
840 	}
841 
842 	if (request->state & KR_STATE_FAIL) {
843 		qry->flags.RESOLVED = false;
844 	}
845 
846 	if (!qry->flags.CACHED) {
847 		if (request->state & KR_STATE_FAIL) {
848 			if (++request->count_fail_row > KR_CONSUME_FAIL_ROW_LIMIT) {
849 				if (kr_log_is_debug(RESOLVER, request)) {  /* logging optimization */
850 					kr_log_req(request, 0, 2, RESOLVER,
851 						"=> too many failures in a row, "
852 						"bail out (mitigation for NXNSAttack "
853 						"CVE-2020-12667)\n");
854 				}
855 				if (!qry->flags.NO_NS_FOUND) {
856 					qry->flags.NO_NS_FOUND = true;
857 					return KR_STATE_PRODUCE;
858 				}
859 				return KR_STATE_FAIL;
860 			}
861 		} else {
862 			request->count_fail_row = 0;
863 		}
864 	}
865 
866 	/* Pop query if resolved. */
867 	if (request->state == KR_STATE_YIELD) {
868 		return KR_STATE_PRODUCE; /* Requery */
869 	} else if (qry->flags.RESOLVED) {
870 		kr_rplan_pop(rplan, qry);
871 	} else if (!tried_tcp && (qry->flags.TCP)) {
872 		return KR_STATE_PRODUCE; /* Requery over TCP */
873 	} else { /* Clear query flags for next attempt */
874 		qry->flags.CACHED = false;
875 		if (!request->options.TCP) {
876 			qry->flags.TCP = false;
877 		}
878 	}
879 
880 	ITERATE_LAYERS(request, qry, reset);
881 
882 	/* Do not finish with bogus answer. */
883 	if (qry->flags.DNSSEC_BOGUS)  {
884 		if (qry->flags.FORWARD || qry->flags.STUB) {
885 			return KR_STATE_FAIL;
886 		}
887 		/* Other servers might not have broken DNSSEC. */
888 		qry->flags.DNSSEC_BOGUS = false;
889 		return KR_STATE_PRODUCE;
890 	}
891 
892 	return kr_rplan_empty(&request->rplan) ? KR_STATE_DONE : KR_STATE_PRODUCE;
893 }
894 
895 /** @internal Spawn subrequest in current zone cut (no minimization or lookup). */
zone_cut_subreq(struct kr_rplan * rplan,struct kr_query * parent,const knot_dname_t * qname,uint16_t qtype)896 static struct kr_query *zone_cut_subreq(struct kr_rplan *rplan, struct kr_query *parent,
897                            const knot_dname_t *qname, uint16_t qtype)
898 {
899 	struct kr_query *next = kr_rplan_push(rplan, parent, qname, parent->sclass, qtype);
900 	if (!next) {
901 		return NULL;
902 	}
903 	kr_zonecut_set(&next->zone_cut, parent->zone_cut.name);
904 	if (kr_zonecut_copy(&next->zone_cut, &parent->zone_cut) != 0 ||
905 	    kr_zonecut_copy_trust(&next->zone_cut, &parent->zone_cut) != 0) {
906 		return NULL;
907 	}
908 	next->flags.NO_MINIMIZE = true;
909 	if (parent->flags.DNSSEC_WANT) {
910 		next->flags.DNSSEC_WANT = true;
911 	}
912 	return next;
913 }
914 
forward_trust_chain_check(struct kr_request * request,struct kr_query * qry,bool resume)915 static int forward_trust_chain_check(struct kr_request *request, struct kr_query *qry, bool resume)
916 {
917 	struct kr_rplan *rplan = &request->rplan;
918 	map_t *trust_anchors = &request->ctx->trust_anchors;
919 	map_t *negative_anchors = &request->ctx->negative_anchors;
920 
921 	if (qry->parent != NULL &&
922 	    !(qry->forward_flags.CNAME) &&
923 	    !(qry->flags.DNS64_MARK) &&
924 	    knot_dname_in_bailiwick(qry->zone_cut.name, qry->parent->zone_cut.name) >= 0) {
925 		return KR_STATE_PRODUCE;
926 	}
927 
928 	if (kr_fails_assert(qry->flags.FORWARD))
929 		return KR_STATE_FAIL;
930 
931 	if (!trust_anchors) {
932 		qry->flags.AWAIT_CUT = false;
933 		return KR_STATE_PRODUCE;
934 	}
935 
936 	if (qry->flags.DNSSEC_INSECURE) {
937 		qry->flags.AWAIT_CUT = false;
938 		return KR_STATE_PRODUCE;
939 	}
940 
941 	if (qry->forward_flags.NO_MINIMIZE) {
942 		qry->flags.AWAIT_CUT = false;
943 		return KR_STATE_PRODUCE;
944 	}
945 
946 	const knot_dname_t *start_name = qry->sname;
947 	if ((qry->flags.AWAIT_CUT) && !resume) {
948 		qry->flags.AWAIT_CUT = false;
949 		const knot_dname_t *longest_ta = kr_ta_closest(request->ctx, qry->sname, qry->stype);
950 		if (longest_ta) {
951 			start_name = longest_ta;
952 			qry->zone_cut.name = knot_dname_copy(start_name, qry->zone_cut.pool);
953 			qry->flags.DNSSEC_WANT = true;
954 		} else {
955 			qry->flags.DNSSEC_WANT = false;
956 			return KR_STATE_PRODUCE;
957 		}
958 	}
959 
960 	bool has_ta = (qry->zone_cut.trust_anchor != NULL);
961 	knot_dname_t *ta_name = (has_ta ? qry->zone_cut.trust_anchor->owner : NULL);
962 	bool refetch_ta = (!has_ta || !knot_dname_is_equal(qry->zone_cut.name, ta_name));
963 	bool is_dnskey_subreq = kr_rplan_satisfies(qry, ta_name, KNOT_CLASS_IN, KNOT_RRTYPE_DNSKEY);
964 	bool refetch_key = has_ta && (!qry->zone_cut.key || !knot_dname_is_equal(ta_name, qry->zone_cut.key->owner));
965 	if (refetch_key && !is_dnskey_subreq) {
966 		struct kr_query *next = zone_cut_subreq(rplan, qry, ta_name, KNOT_RRTYPE_DNSKEY);
967 		if (!next) {
968 			return KR_STATE_FAIL;
969 		}
970 		return KR_STATE_DONE;
971 	}
972 
973 	int name_offset = 1;
974 	const knot_dname_t *wanted_name;
975 	bool nods, ds_req, ns_req, minimized, ns_exist;
976 	do {
977 		wanted_name = start_name;
978 		ds_req = false;
979 		ns_req = false;
980 		ns_exist = true;
981 
982 		int cut_labels = knot_dname_labels(qry->zone_cut.name, NULL);
983 		int wanted_name_labels = knot_dname_labels(wanted_name, NULL);
984 		while (wanted_name[0] && wanted_name_labels > cut_labels + name_offset) {
985 			wanted_name = knot_wire_next_label(wanted_name, NULL);
986 			wanted_name_labels -= 1;
987 		}
988 		minimized = (wanted_name != qry->sname);
989 
990 		for (int i = 0; i < request->rplan.resolved.len; ++i) {
991 			struct kr_query *q = request->rplan.resolved.at[i];
992 			if (q->parent == qry &&
993 			    q->sclass == qry->sclass &&
994 			    (q->stype == KNOT_RRTYPE_DS || q->stype == KNOT_RRTYPE_NS) &&
995 			    knot_dname_is_equal(q->sname, wanted_name)) {
996 				if (q->stype == KNOT_RRTYPE_DS) {
997 					ds_req = true;
998 					if (q->flags.CNAME) {
999 						ns_exist = false;
1000 					} else if (!(q->flags.DNSSEC_OPTOUT)) {
1001 						int ret = kr_dnssec_matches_name_and_type(&request->auth_selected, q->uid,
1002 											  wanted_name, KNOT_RRTYPE_NS);
1003 						ns_exist = (ret == kr_ok());
1004 					}
1005 				} else {
1006 					if (q->flags.CNAME) {
1007 						ns_exist = false;
1008 					}
1009 					ns_req = true;
1010 				}
1011 			}
1012 		}
1013 
1014 		if (ds_req && ns_exist && !ns_req && (minimized || resume)) {
1015 			struct kr_query *next = zone_cut_subreq(rplan, qry, wanted_name,
1016 								KNOT_RRTYPE_NS);
1017 			if (!next) {
1018 				return KR_STATE_FAIL;
1019 			}
1020 			return KR_STATE_DONE;
1021 		}
1022 
1023 		if (qry->parent == NULL && (qry->flags.CNAME) &&
1024 		    ds_req && ns_req) {
1025 			return KR_STATE_PRODUCE;
1026 		}
1027 
1028 		/* set `nods` */
1029 		if ((qry->stype == KNOT_RRTYPE_DS) &&
1030 	            knot_dname_is_equal(wanted_name, qry->sname)) {
1031 			nods = true;
1032 		} else if (resume && !ds_req) {
1033 			nods = false;
1034 		} else if (!minimized && qry->stype != KNOT_RRTYPE_DNSKEY) {
1035 			nods = true;
1036 		} else {
1037 			nods = ds_req;
1038 		}
1039 		name_offset += 1;
1040 	} while (ds_req && (ns_req || !ns_exist) && minimized);
1041 
1042 	/* Disable DNSSEC if it enters NTA. */
1043 	if (kr_ta_get(negative_anchors, wanted_name)){
1044 		VERBOSE_MSG(qry, ">< negative TA, going insecure\n");
1045 		qry->flags.DNSSEC_WANT = false;
1046 	}
1047 
1048 	/* Enable DNSSEC if enters a new island of trust. */
1049 	bool want_secure = (qry->flags.DNSSEC_WANT) &&
1050 			    !knot_wire_get_cd(request->qsource.packet->wire);
1051 	if (!(qry->flags.DNSSEC_WANT) &&
1052 	    !knot_wire_get_cd(request->qsource.packet->wire) &&
1053 	    kr_ta_get(trust_anchors, wanted_name)) {
1054 		qry->flags.DNSSEC_WANT = true;
1055 		want_secure = true;
1056 		if (kr_log_is_debug_qry(RESOLVER, qry)) {
1057 			KR_DNAME_GET_STR(qname_str, wanted_name);
1058 			VERBOSE_MSG(qry, ">< TA: '%s'\n", qname_str);
1059 		}
1060 	}
1061 
1062 	if (want_secure && !qry->zone_cut.trust_anchor) {
1063 		knot_rrset_t *ta_rr = kr_ta_get(trust_anchors, wanted_name);
1064 		if (!ta_rr) {
1065 			char name[] = "\0";
1066 			ta_rr = kr_ta_get(trust_anchors, (knot_dname_t*)name);
1067 		}
1068 		if (ta_rr) {
1069 			qry->zone_cut.trust_anchor = knot_rrset_copy(ta_rr, qry->zone_cut.pool);
1070 		}
1071 	}
1072 
1073 	has_ta = (qry->zone_cut.trust_anchor != NULL);
1074 	ta_name = (has_ta ? qry->zone_cut.trust_anchor->owner : NULL);
1075 	refetch_ta = (!has_ta || !knot_dname_is_equal(wanted_name, ta_name));
1076 	if (!nods && want_secure && refetch_ta) {
1077 		struct kr_query *next = zone_cut_subreq(rplan, qry, wanted_name,
1078 							KNOT_RRTYPE_DS);
1079 		if (!next) {
1080 			return KR_STATE_FAIL;
1081 		}
1082 		return KR_STATE_DONE;
1083 	}
1084 
1085 	/* Try to fetch missing DNSKEY.
1086 	 * Do not fetch if this is a DNSKEY subrequest to avoid circular dependency. */
1087 	is_dnskey_subreq = kr_rplan_satisfies(qry, ta_name, KNOT_CLASS_IN, KNOT_RRTYPE_DNSKEY);
1088 	refetch_key = has_ta && (!qry->zone_cut.key || !knot_dname_is_equal(ta_name, qry->zone_cut.key->owner));
1089 	if (want_secure && refetch_key && !is_dnskey_subreq) {
1090 		struct kr_query *next = zone_cut_subreq(rplan, qry, ta_name, KNOT_RRTYPE_DNSKEY);
1091 		if (!next) {
1092 			return KR_STATE_FAIL;
1093 		}
1094 		return KR_STATE_DONE;
1095 	}
1096 
1097 	return KR_STATE_PRODUCE;
1098 }
1099 
1100 /* @todo: Validator refactoring, keep this in driver for now. */
trust_chain_check(struct kr_request * request,struct kr_query * qry)1101 static int trust_chain_check(struct kr_request *request, struct kr_query *qry)
1102 {
1103 	struct kr_rplan *rplan = &request->rplan;
1104 	map_t *trust_anchors = &request->ctx->trust_anchors;
1105 	map_t *negative_anchors = &request->ctx->negative_anchors;
1106 
1107 	/* Disable DNSSEC if it enters NTA. */
1108 	if (kr_ta_get(negative_anchors, qry->zone_cut.name)){
1109 		VERBOSE_MSG(qry, ">< negative TA, going insecure\n");
1110 		qry->flags.DNSSEC_WANT = false;
1111 		qry->flags.DNSSEC_INSECURE = true;
1112 	}
1113 	if (qry->flags.DNSSEC_NODS) {
1114 		/* This is the next query iteration with minimized qname.
1115 		 * At previous iteration DS non-existence has been proven */
1116 		VERBOSE_MSG(qry, "<= DS doesn't exist, going insecure\n");
1117 		qry->flags.DNSSEC_NODS = false;
1118 		qry->flags.DNSSEC_WANT = false;
1119 		qry->flags.DNSSEC_INSECURE = true;
1120 	}
1121 	/* Enable DNSSEC if entering a new (or different) island of trust,
1122 	 * and update the TA RRset if required. */
1123 	const bool has_cd = knot_wire_get_cd(request->qsource.packet->wire);
1124 	knot_rrset_t *ta_rr = kr_ta_get(trust_anchors, qry->zone_cut.name);
1125 	if (!has_cd && ta_rr) {
1126 		qry->flags.DNSSEC_WANT = true;
1127 		if (qry->zone_cut.trust_anchor == NULL
1128 		    || !knot_dname_is_equal(qry->zone_cut.trust_anchor->owner, qry->zone_cut.name)) {
1129 			mm_free(qry->zone_cut.pool, qry->zone_cut.trust_anchor);
1130 			qry->zone_cut.trust_anchor = knot_rrset_copy(ta_rr, qry->zone_cut.pool);
1131 
1132 			if (kr_log_is_debug_qry(RESOLVER, qry)) {
1133 				KR_DNAME_GET_STR(qname_str, ta_rr->owner);
1134 				VERBOSE_MSG(qry, ">< TA: '%s'\n", qname_str);
1135 			}
1136 		}
1137 	}
1138 
1139 	/* Try to fetch missing DS (from above the cut). */
1140 	const bool has_ta = (qry->zone_cut.trust_anchor != NULL);
1141 	const knot_dname_t *ta_name = (has_ta ? qry->zone_cut.trust_anchor->owner : NULL);
1142 	const bool refetch_ta = !has_ta || !knot_dname_is_equal(qry->zone_cut.name, ta_name);
1143 	const bool want_secure = qry->flags.DNSSEC_WANT && !has_cd;
1144 	if (want_secure && refetch_ta) {
1145 		/* @todo we could fetch the information from the parent cut, but we don't remember that now */
1146 		struct kr_query *next = kr_rplan_push(rplan, qry, qry->zone_cut.name, qry->sclass, KNOT_RRTYPE_DS);
1147 		if (!next) {
1148 			return KR_STATE_FAIL;
1149 		}
1150 		next->flags.AWAIT_CUT = true;
1151 		next->flags.DNSSEC_WANT = true;
1152 		return KR_STATE_DONE;
1153 	}
1154 	/* Try to fetch missing DNSKEY (either missing or above current cut).
1155 	 * Do not fetch if this is a DNSKEY subrequest to avoid circular dependency. */
1156 	const bool is_dnskey_subreq = kr_rplan_satisfies(qry, ta_name, KNOT_CLASS_IN, KNOT_RRTYPE_DNSKEY);
1157 	const bool refetch_key = has_ta && (!qry->zone_cut.key || !knot_dname_is_equal(ta_name, qry->zone_cut.key->owner));
1158 	if (want_secure && refetch_key && !is_dnskey_subreq) {
1159 		struct kr_query *next = zone_cut_subreq(rplan, qry, ta_name, KNOT_RRTYPE_DNSKEY);
1160 		if (!next) {
1161 			return KR_STATE_FAIL;
1162 		}
1163 		return KR_STATE_DONE;
1164 	}
1165 
1166 	return KR_STATE_PRODUCE;
1167 }
1168 
1169 /** @internal Check current zone cut status and credibility, spawn subrequests if needed. */
zone_cut_check(struct kr_request * request,struct kr_query * qry,knot_pkt_t * packet)1170 static int zone_cut_check(struct kr_request *request, struct kr_query *qry, knot_pkt_t *packet)
1171 /* TODO: using cache on this point in this way just isn't nice; remove in time */
1172 {
1173 	/* Stub mode, just forward and do not solve cut. */
1174 	if (qry->flags.STUB) {
1175 		return KR_STATE_PRODUCE;
1176 	}
1177 
1178 	/* Forwarding to upstream resolver mode.
1179 	 * Since forwarding targets already are in qry->ns -
1180 	 * cut fetching is not needed. */
1181 	if (qry->flags.FORWARD) {
1182 		return forward_trust_chain_check(request, qry, false);
1183 	}
1184 	if (!(qry->flags.AWAIT_CUT)) {
1185 		/* The query was resolved from cache.
1186 		 * Spawn DS \ DNSKEY requests if needed and exit */
1187 		return trust_chain_check(request, qry);
1188 	}
1189 
1190 	/* The query wasn't resolved from cache,
1191 	 * now it's the time to look up closest zone cut from cache. */
1192 	struct kr_cache *cache = &request->ctx->cache;
1193 	if (!kr_cache_is_open(cache)) {
1194 		int ret = kr_zonecut_set_sbelt(request->ctx, &qry->zone_cut);
1195 		if (ret != 0) {
1196 			return KR_STATE_FAIL;
1197 		}
1198 		VERBOSE_MSG(qry, "=> no cache open, using root hints\n");
1199 		qry->flags.AWAIT_CUT = false;
1200 		return KR_STATE_DONE;
1201 	}
1202 
1203 	const knot_dname_t *requested_name = qry->sname;
1204 	/* If at/subdomain of parent zone cut, start from its encloser.
1205 	 * This is for case when we get to a dead end
1206 	 * (and need glue from parent), or DS refetch. */
1207 	if (qry->parent) {
1208 		const knot_dname_t *parent = qry->parent->zone_cut.name;
1209 		if (parent[0] != '\0'
1210 		    && knot_dname_in_bailiwick(qry->sname, parent) >= 0) {
1211 			requested_name = knot_wire_next_label(parent, NULL);
1212 		}
1213 	} else if ((qry->stype == KNOT_RRTYPE_DS) && (qry->sname[0] != '\0')) {
1214 		/* If this is explicit DS query, start from encloser too. */
1215 		requested_name = knot_wire_next_label(requested_name, NULL);
1216 	}
1217 
1218 	int state = KR_STATE_FAIL;
1219 	do {
1220 		state = ns_fetch_cut(qry, requested_name, request, packet);
1221 		if (state == KR_STATE_DONE || (state & KR_STATE_FAIL)) {
1222 			return state;
1223 		} else if (state == KR_STATE_CONSUME) {
1224 			requested_name = knot_wire_next_label(requested_name, NULL);
1225 		}
1226 	} while (state == KR_STATE_CONSUME);
1227 
1228 	/* Update minimized QNAME if zone cut changed */
1229 	if (qry->zone_cut.name && qry->zone_cut.name[0] != '\0' && !(qry->flags.NO_MINIMIZE)) {
1230 		if (kr_make_query(qry, packet) != 0) {
1231 			return KR_STATE_FAIL;
1232 		}
1233 	}
1234 	qry->flags.AWAIT_CUT = false;
1235 
1236 	/* Check trust chain */
1237 	return trust_chain_check(request, qry);
1238 }
1239 
1240 
ns_resolve_addr(struct kr_query * qry,struct kr_request * param,struct kr_transport * transport,uint16_t next_type)1241 static int ns_resolve_addr(struct kr_query *qry, struct kr_request *param, struct kr_transport *transport, uint16_t next_type)
1242 {
1243 	struct kr_rplan *rplan = &param->rplan;
1244 	struct kr_context *ctx = param->ctx;
1245 
1246 
1247 	/* Start NS queries from root, to avoid certain cases
1248 	 * where a NS drops out of cache and the rest is unavailable,
1249 	 * this would lead to dependency loop in current zone cut.
1250 	 */
1251 
1252 	/* Bail out if the query is already pending or dependency loop. */
1253 	if (!next_type || kr_rplan_satisfies(qry->parent, transport->ns_name, KNOT_CLASS_IN, next_type)) {
1254 		/* Fall back to SBELT if root server query fails. */
1255 		if (!next_type && qry->zone_cut.name[0] == '\0') {
1256 			VERBOSE_MSG(qry, "=> fallback to root hints\n");
1257 			kr_zonecut_set_sbelt(ctx, &qry->zone_cut);
1258 			return kr_error(EAGAIN);
1259 		}
1260 		/* No IPv4 nor IPv6, flag server as unusable. */
1261 		VERBOSE_MSG(qry, "=> unresolvable NS address, bailing out\n");
1262 		kr_zonecut_del_all(&qry->zone_cut, transport->ns_name);
1263 		return kr_error(EHOSTUNREACH);
1264 	}
1265 	/* Push new query to the resolution plan */
1266 	struct kr_query *next =
1267 		kr_rplan_push(rplan, qry, transport->ns_name, KNOT_CLASS_IN, next_type);
1268 	if (!next) {
1269 		return kr_error(ENOMEM);
1270 	}
1271 	next->flags.NONAUTH = true;
1272 
1273 	/* At the root level with no NS addresses, add SBELT subrequest. */
1274 	int ret = 0;
1275 	if (qry->zone_cut.name[0] == '\0') {
1276 		ret = kr_zonecut_set_sbelt(ctx, &next->zone_cut);
1277 		if (ret == 0) { /* Copy TA and key since it's the same cut to avoid lookup. */
1278 			kr_zonecut_copy_trust(&next->zone_cut, &qry->zone_cut);
1279 			kr_zonecut_set_sbelt(ctx, &qry->zone_cut); /* Add SBELT to parent in case query fails. */
1280 		}
1281 	} else {
1282 		next->flags.AWAIT_CUT = true;
1283 	}
1284 
1285 	if (ret == 0) {
1286 		if (next_type == KNOT_RRTYPE_AAAA) {
1287 			qry->flags.AWAIT_IPV6 = true;
1288 		} else {
1289 			qry->flags.AWAIT_IPV4 = true;
1290 		}
1291 	}
1292 
1293 	return ret;
1294 }
1295 
kr_resolve_produce(struct kr_request * request,struct kr_transport ** transport,knot_pkt_t * packet)1296 int kr_resolve_produce(struct kr_request *request, struct kr_transport **transport, knot_pkt_t *packet)
1297 {
1298 	struct kr_rplan *rplan = &request->rplan;
1299 
1300 	/* No query left for resolution */
1301 	if (kr_rplan_empty(rplan)) {
1302 		return KR_STATE_FAIL;
1303 	}
1304 
1305 	struct kr_query *qry = array_tail(rplan->pending);
1306 
1307 	/* Initialize server selection */
1308 	if (!qry->server_selection.initialized) {
1309 		kr_server_selection_init(qry);
1310 	}
1311 
1312 	/* If we have deferred answers, resume them. */
1313 	if (qry->deferred != NULL) {
1314 		/* @todo: Refactoring validator, check trust chain before resuming. */
1315 		int state = 0;
1316 		if (((qry->flags.FORWARD) == 0) ||
1317 		    ((qry->stype == KNOT_RRTYPE_DS) && (qry->flags.CNAME))) {
1318 			state = trust_chain_check(request, qry);
1319 		} else {
1320 			state = forward_trust_chain_check(request, qry, true);
1321 		}
1322 
1323 		switch(state) {
1324 		case KR_STATE_FAIL: return KR_STATE_FAIL;
1325 		case KR_STATE_DONE: return KR_STATE_PRODUCE;
1326 		default: break;
1327 		}
1328 		VERBOSE_MSG(qry, "=> resuming yielded answer\n");
1329 		struct kr_layer_pickle *pickle = qry->deferred;
1330 		request->state = KR_STATE_YIELD;
1331 		set_yield(&request->answ_selected, qry->uid, false);
1332 		set_yield(&request->auth_selected, qry->uid, false);
1333 		RESUME_LAYERS(layer_id(request, pickle->api), request, qry, consume, pickle->pkt);
1334 		if (request->state != KR_STATE_YIELD) {
1335 			/* No new deferred answers, take the next */
1336 			qry->deferred = pickle->next;
1337 		}
1338 	} else {
1339 		/* Caller is interested in always tracking a zone cut, even if the answer is cached
1340 		 * this is normally not required, and incurs another cache lookups for cached answer. */
1341 		if (qry->flags.ALWAYS_CUT) {
1342 			if (!(qry->flags.STUB)) {
1343 				switch(zone_cut_check(request, qry, packet)) {
1344 				case KR_STATE_FAIL: return KR_STATE_FAIL;
1345 				case KR_STATE_DONE: return KR_STATE_PRODUCE;
1346 				default: break;
1347 				}
1348 			}
1349 		}
1350 		/* Resolve current query and produce dependent or finish */
1351 		request->state = KR_STATE_PRODUCE;
1352 		ITERATE_LAYERS(request, qry, produce, packet);
1353 		if (!(request->state & KR_STATE_FAIL) && knot_wire_get_qr(packet->wire)) {
1354 			/* Produced an answer from cache, consume it. */
1355 			qry->secret = 0;
1356 			request->state = KR_STATE_CONSUME;
1357 			ITERATE_LAYERS(request, qry, consume, packet);
1358 		}
1359 	}
1360 	switch(request->state) {
1361 	case KR_STATE_FAIL: return request->state;
1362 	case KR_STATE_CONSUME: break;
1363 	case KR_STATE_DONE:
1364 	default: /* Current query is done */
1365 		if (qry->flags.RESOLVED && request->state != KR_STATE_YIELD) {
1366 			kr_rplan_pop(rplan, qry);
1367 		}
1368 		ITERATE_LAYERS(request, qry, reset);
1369 		return kr_rplan_empty(rplan) ? KR_STATE_DONE : KR_STATE_PRODUCE;
1370 	}
1371 
1372 
1373 	/* This query has RD=0 or is ANY, stop here. */
1374 	if (qry->stype == KNOT_RRTYPE_ANY ||
1375 	    !knot_wire_get_rd(request->qsource.packet->wire)) {
1376 		VERBOSE_MSG(qry, "=> qtype is ANY or RD=0, bail out\n");
1377 		return KR_STATE_FAIL;
1378 	}
1379 
1380 	/* Update zone cut, spawn new subrequests. */
1381 	if (!(qry->flags.STUB)) {
1382 		int state = zone_cut_check(request, qry, packet);
1383 		switch(state) {
1384 		case KR_STATE_FAIL: return KR_STATE_FAIL;
1385 		case KR_STATE_DONE: return KR_STATE_PRODUCE;
1386 		default: break;
1387 		}
1388 	}
1389 
1390 
1391 	const struct kr_qflags qflg = qry->flags;
1392 	const bool retry = qflg.TCP || qflg.BADCOOKIE_AGAIN;
1393 	if (!qflg.FORWARD && !qflg.STUB && !retry) { /* Keep NS when requerying/stub/badcookie. */
1394 		/* Root DNSKEY must be fetched from the hints to avoid chicken and egg problem. */
1395 		if (qry->sname[0] == '\0' && qry->stype == KNOT_RRTYPE_DNSKEY) {
1396 			kr_zonecut_set_sbelt(request->ctx, &qry->zone_cut);
1397 		}
1398 	}
1399 
1400 	qry->server_selection.choose_transport(qry, transport);
1401 
1402 	if (*transport == NULL) {
1403 		/* Properly signal to serve_stale module. */
1404 		if (qry->flags.NO_NS_FOUND) {
1405 			ITERATE_LAYERS(request, qry, reset);
1406 			kr_rplan_pop(rplan, qry);
1407 			return KR_STATE_FAIL;
1408 		} else {
1409 			/* FIXME: This is probably quite inefficient:
1410 			* we go through the whole qr_task_step loop just because of the serve_stale
1411 			* module which might not even be loaded. */
1412 			qry->flags.NO_NS_FOUND = true;
1413 			return KR_STATE_PRODUCE;
1414 		}
1415 	}
1416 
1417 	if ((*transport)->protocol == KR_TRANSPORT_RESOLVE_A || (*transport)->protocol == KR_TRANSPORT_RESOLVE_AAAA) {
1418 		uint16_t type = (*transport)->protocol == KR_TRANSPORT_RESOLVE_A ? KNOT_RRTYPE_A : KNOT_RRTYPE_AAAA;
1419 		ns_resolve_addr(qry, qry->request, *transport, type);
1420 		ITERATE_LAYERS(request, qry, reset);
1421 		return KR_STATE_PRODUCE;
1422 	}
1423 
1424 	/* Randomize query case (if not in not turned off) */
1425 	qry->secret = qry->flags.NO_0X20 ? 0 : kr_rand_bytes(sizeof(qry->secret));
1426 	knot_dname_t *qname_raw = knot_pkt_qname(packet);
1427 	randomized_qname_case(qname_raw, qry->secret);
1428 
1429 	/*
1430 	 * Additional query is going to be finalized when calling
1431 	 * kr_resolve_checkout().
1432 	 */
1433 	qry->timestamp_mono = kr_now();
1434 	return request->state;
1435 }
1436 
1437 #if ENABLE_COOKIES
1438 /** Update DNS cookie data in packet. */
outbound_request_update_cookies(struct kr_request * req,const struct sockaddr * src,const struct sockaddr * dst)1439 static bool outbound_request_update_cookies(struct kr_request *req,
1440                                             const struct sockaddr *src,
1441                                             const struct sockaddr *dst)
1442 {
1443 	if (kr_fails_assert(req))
1444 		return false;
1445 
1446 	/* RFC7873 4.1 strongly requires server address. */
1447 	if (!dst)
1448 		return false;
1449 
1450 	struct kr_cookie_settings *clnt_sett = &req->ctx->cookie_ctx.clnt;
1451 
1452 	/* Cookies disabled or packet has no EDNS section. */
1453 	if (!clnt_sett->enabled)
1454 		return true;
1455 
1456 	/*
1457 	 * RFC7873 4.1 recommends using also the client address. The matter is
1458 	 * also discussed in section 6.
1459 	 */
1460 
1461 	kr_request_put_cookie(&clnt_sett->current, req->ctx->cache_cookie,
1462 	                      src, dst, req);
1463 
1464 	return true;
1465 }
1466 #endif /* ENABLE_COOKIES */
1467 
kr_resolve_checkout(struct kr_request * request,const struct sockaddr * src,struct kr_transport * transport,knot_pkt_t * packet)1468 int kr_resolve_checkout(struct kr_request *request, const struct sockaddr *src,
1469                         struct kr_transport *transport, knot_pkt_t *packet)
1470 {
1471 	/* @todo: Update documentation if this function becomes approved. */
1472 
1473 	struct kr_rplan *rplan = &request->rplan;
1474 
1475 	if (knot_wire_get_qr(packet->wire) != 0) {
1476 		return kr_ok();
1477 	}
1478 
1479 	/* No query left for resolution */
1480 	if (kr_rplan_empty(rplan)) {
1481 		return kr_error(EINVAL);
1482 	}
1483 	struct kr_query *qry = array_tail(rplan->pending);
1484 
1485 #if ENABLE_COOKIES
1486 	/* Update DNS cookies in request. */
1487 	if (type == SOCK_DGRAM) { /* @todo: Add cookies also over TCP? */
1488 		/*
1489 		 * The actual server IP address is needed before generating the
1490 		 * actual cookie. If we don't know the server address then we
1491 		 * also don't know the actual cookie size.
1492 		 */
1493 		if (!outbound_request_update_cookies(request, src, &transport->address.ip)) {
1494 			return kr_error(EINVAL);
1495 		}
1496 	}
1497 #endif /* ENABLE_COOKIES */
1498 
1499 	int ret = query_finalize(request, qry, packet);
1500 	if (ret != 0) {
1501 		return kr_error(EINVAL);
1502 	}
1503 
1504 	/* Track changes in minimization secret to enable/disable minimization */
1505 	uint32_t old_minimization_secret = qry->secret;
1506 
1507 	/* Run the checkout layers and cancel on failure.
1508 	 * The checkout layer doesn't persist the state, so canceled subrequests
1509 	 * don't affect the resolution or rest of the processing. */
1510 	int type = -1;
1511 	switch(transport->protocol) {
1512 	case KR_TRANSPORT_UDP:
1513 		type = SOCK_DGRAM;
1514 		break;
1515 	case KR_TRANSPORT_TCP:
1516 	case KR_TRANSPORT_TLS:
1517 		type = SOCK_STREAM;
1518 		break;
1519 	default:
1520 		kr_assert(false);
1521 	}
1522 	int state = request->state;
1523 	ITERATE_LAYERS(request, qry, checkout, packet, &transport->address.ip, type);
1524 	if (request->state & KR_STATE_FAIL) {
1525 		request->state = state; /* Restore */
1526 		return kr_error(ECANCELED);
1527 	}
1528 
1529 	/* Randomize query case (if secret changed) */
1530 	knot_dname_t *qname = knot_pkt_qname(packet);
1531 	if (qry->secret != old_minimization_secret) {
1532 		randomized_qname_case(qname, qry->secret);
1533 	}
1534 
1535 	/* Write down OPT unless in safemode */
1536 	if (!(qry->flags.NO_EDNS)) {
1537 		ret = edns_put(packet, true);
1538 		if (ret != 0) {
1539 			return kr_error(EINVAL);
1540 		}
1541 	}
1542 
1543 	if (kr_log_is_debug_qry(RESOLVER, qry)) {
1544 		KR_DNAME_GET_STR(qname_str, knot_pkt_qname(packet));
1545 		KR_DNAME_GET_STR(ns_name, transport->ns_name);
1546 		KR_DNAME_GET_STR(zonecut_str, qry->zone_cut.name);
1547 		KR_RRTYPE_GET_STR(type_str, knot_pkt_qtype(packet));
1548 		const char *ns_str = kr_straddr(&transport->address.ip);
1549 
1550 		VERBOSE_MSG(qry,
1551 			"=> id: '%05u' querying: '%s'@'%s' zone cut: '%s' "
1552 			"qname: '%s' qtype: '%s' proto: '%s'\n",
1553 			qry->id, ns_name, ns_str ? ns_str : "", zonecut_str,
1554 			qname_str, type_str, (qry->flags.TCP) ? "tcp" : "udp");
1555 	}
1556 
1557 	return kr_ok();
1558 }
1559 
kr_resolve_finish(struct kr_request * request,int state)1560 int kr_resolve_finish(struct kr_request *request, int state)
1561 {
1562 	request->state = state;
1563 	/* Finalize answer and construct whole wire-format (unless dropping). */
1564 	knot_pkt_t *answer = kr_request_ensure_answer(request);
1565 	if (answer) {
1566 		ITERATE_LAYERS(request, NULL, answer_finalize);
1567 		answer_finalize(request);
1568 
1569 		/* Defensive style, in case someone has forgotten.
1570 		 * Beware: non-empty answers do make sense even with SERVFAIL case, etc. */
1571 		if (request->state != KR_STATE_DONE) {
1572 			uint8_t *wire = answer->wire;
1573 			switch (knot_wire_get_rcode(wire)) {
1574 			case KNOT_RCODE_NOERROR:
1575 			case KNOT_RCODE_NXDOMAIN:
1576 				knot_wire_clear_ad(wire);
1577 				knot_wire_clear_aa(wire);
1578 				knot_wire_set_rcode(wire, KNOT_RCODE_SERVFAIL);
1579 			}
1580 		}
1581 	}
1582 
1583 	ITERATE_LAYERS(request, NULL, finish);
1584 
1585 	struct kr_rplan *rplan = &request->rplan;
1586 	struct kr_query *last = kr_rplan_last(rplan);
1587 	VERBOSE_MSG(last, "finished in state: %d, queries: %zu, mempool: %zu B\n",
1588 		  request->state, rplan->resolved.len, (size_t) mp_total_size(request->pool.ctx));
1589 
1590 	/* Trace request finish */
1591 	if (request->trace_finish) {
1592 		request->trace_finish(request);
1593 	}
1594 
1595 	/* Uninstall all tracepoints */
1596 	request->trace_finish = NULL;
1597 	request->trace_log = NULL;
1598 
1599 	return KR_STATE_DONE;
1600 }
1601 
kr_resolve_plan(struct kr_request * request)1602 struct kr_rplan *kr_resolve_plan(struct kr_request *request)
1603 {
1604 	if (request) {
1605 		return &request->rplan;
1606 	}
1607 	return NULL;
1608 }
1609 
kr_resolve_pool(struct kr_request * request)1610 knot_mm_t *kr_resolve_pool(struct kr_request *request)
1611 {
1612 	if (request) {
1613 		return &request->pool;
1614 	}
1615 	return NULL;
1616 }
1617 
1618 #undef VERBOSE_MSG
1619