1 /*  Copyright (C) 2014-2017 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
2  *  SPDX-License-Identifier: GPL-3.0-or-later
3  */
4 
5 #include "lib/zonecut.h"
6 
7 #include "contrib/cleanup.h"
8 #include "lib/defines.h"
9 #include "lib/generic/pack.h"
10 #include "lib/layer.h"
11 #include "lib/resolve.h"
12 #include "lib/rplan.h"
13 
14 #include <libknot/descriptor.h>
15 #include <libknot/packet/wire.h>
16 #include <libknot/rrtype/rdname.h>
17 
18 #define VERBOSE_MSG(qry, ...) QRVERBOSE(qry, ZCUT, __VA_ARGS__)
19 
20 /** Information for one NS name + address type. */
21 typedef enum {
22 	AI_UNINITED = 0,
23 	AI_REPUT,	/**< Don't use this addrset, due to: cache_rep, NO_IPV6, ...
24 			 * cache_rep approximates various problems when fetching the RRset. */
25 	AI_CYCLED,	/**< Skipped due to cycle detection; see implementation for details. */
26 	AI_LAST_BAD = AI_CYCLED, /** bad states: <= AI_LAST_BAD */
27 	AI_UNKNOWN,	/**< Don't know status of this RRset; various reasons. */
28 	AI_EMPTY,	/**< No usable address (may mean e.g. just NODATA). */
29 	AI_OK,		/**< At least one usable address.
30 			 * LATER: we might be interested whether it's only glue. */
31 } addrset_info_t;
32 
33 
update_cut_name(struct kr_zonecut * cut,const knot_dname_t * name)34 static void update_cut_name(struct kr_zonecut *cut, const knot_dname_t *name)
35 {
36 	if (knot_dname_is_equal(name, cut->name)) {
37 		return;
38 	}
39 	knot_dname_t *next_name = knot_dname_copy(name, cut->pool);
40 	mm_free(cut->pool, cut->name);
41 	cut->name = next_name;
42 }
43 
kr_zonecut_init(struct kr_zonecut * cut,const knot_dname_t * name,knot_mm_t * pool)44 int kr_zonecut_init(struct kr_zonecut *cut, const knot_dname_t *name, knot_mm_t *pool)
45 {
46 	if (!cut || !name) {
47 		return kr_error(EINVAL);
48 	}
49 
50 	memset(cut, 0, sizeof(*cut));
51 	cut->name = knot_dname_copy(name, pool);
52 	cut->pool = pool;
53 	cut->nsset = trie_create(pool);
54 	return cut->name && cut->nsset ? kr_ok() : kr_error(ENOMEM);
55 }
56 
57 /** Completely free a pack_t. */
free_addr_set(pack_t * pack,knot_mm_t * pool)58 static inline void free_addr_set(pack_t *pack, knot_mm_t *pool)
59 {
60 	if (kr_fails_assert(pack)) {
61 		/* promised we don't store NULL packs */
62 		return;
63 	}
64 	pack_clear_mm(*pack, mm_free, pool);
65 	mm_free(pool, pack);
66 }
67 /** Trivial wrapper for use in trie_apply, due to ugly casting. */
free_addr_set_cb(trie_val_t * v,void * pool)68 static int free_addr_set_cb(trie_val_t *v, void *pool)
69 {
70 	free_addr_set(*v, pool);
71 	return kr_ok();
72 }
73 
kr_zonecut_deinit(struct kr_zonecut * cut)74 void kr_zonecut_deinit(struct kr_zonecut *cut)
75 {
76 	if (!cut) {
77 		return;
78 	}
79 	mm_free(cut->pool, cut->name);
80 	if (cut->nsset) {
81 		trie_apply(cut->nsset, free_addr_set_cb, cut->pool);
82 		trie_free(cut->nsset);
83 	}
84 	knot_rrset_free(cut->key, cut->pool);
85 	knot_rrset_free(cut->trust_anchor, cut->pool);
86 }
87 
kr_zonecut_move(struct kr_zonecut * to,const struct kr_zonecut * from)88 void kr_zonecut_move(struct kr_zonecut *to, const struct kr_zonecut *from)
89 {
90 	if (!to || !from) abort();
91 	kr_zonecut_deinit(to);
92 	memcpy(to, from, sizeof(*to));
93 }
94 
kr_zonecut_set(struct kr_zonecut * cut,const knot_dname_t * name)95 void kr_zonecut_set(struct kr_zonecut *cut, const knot_dname_t *name)
96 {
97 	if (!cut || !name) {
98 		return;
99 	}
100 	knot_rrset_t *key, *ta;
101 	key = cut->key; cut->key = NULL;
102 	ta = cut->trust_anchor; cut->trust_anchor = NULL;
103 	kr_zonecut_deinit(cut);
104 	kr_zonecut_init(cut, name, cut->pool);
105 	cut->key = key;
106 	cut->trust_anchor = ta;
107 }
108 
kr_zonecut_copy(struct kr_zonecut * dst,const struct kr_zonecut * src)109 int kr_zonecut_copy(struct kr_zonecut *dst, const struct kr_zonecut *src)
110 {
111 	if (!dst || !src) {
112 		return kr_error(EINVAL);
113 	}
114 	if (!dst->nsset) {
115 		dst->nsset = trie_create(dst->pool);
116 	}
117 	/* Copy the contents, one by one. */
118 	int ret = kr_ok();
119 	trie_it_t *it;
120 	for (it = trie_it_begin(src->nsset); !trie_it_finished(it); trie_it_next(it)) {
121 		size_t klen;
122 		const char * const k = trie_it_key(it, &klen);
123 		pack_t **new_pack = (pack_t **)trie_get_ins(dst->nsset, k, klen);
124 		if (!new_pack) {
125 			ret = kr_error(ENOMEM);
126 			break;
127 		}
128 		const pack_t *old_pack = *trie_it_val(it);
129 		ret = pack_clone(new_pack, old_pack, dst->pool);
130 		if (ret) break;
131 	}
132 	trie_it_free(it);
133 	return ret;
134 }
135 
kr_zonecut_copy_trust(struct kr_zonecut * dst,const struct kr_zonecut * src)136 int kr_zonecut_copy_trust(struct kr_zonecut *dst, const struct kr_zonecut *src)
137 {
138 	knot_rrset_t *key_copy = NULL;
139 	knot_rrset_t *ta_copy = NULL;
140 
141 	if (src->key) {
142 		key_copy = knot_rrset_copy(src->key, dst->pool);
143 		if (!key_copy) {
144 			return kr_error(ENOMEM);
145 		}
146 	}
147 
148 	if (src->trust_anchor) {
149 		ta_copy = knot_rrset_copy(src->trust_anchor, dst->pool);
150 		if (!ta_copy) {
151 			knot_rrset_free(key_copy, dst->pool);
152 			return kr_error(ENOMEM);
153 		}
154 	}
155 
156 	knot_rrset_free(dst->key, dst->pool);
157 	dst->key = key_copy;
158 	knot_rrset_free(dst->trust_anchor, dst->pool);
159 	dst->trust_anchor = ta_copy;
160 
161 	return kr_ok();
162 }
163 
kr_zonecut_add(struct kr_zonecut * cut,const knot_dname_t * ns,const void * data,int len)164 int kr_zonecut_add(struct kr_zonecut *cut, const knot_dname_t *ns, const void *data, int len)
165 {
166 	if (kr_fails_assert(cut && ns && cut->nsset && (!data || len > 0)))
167 		return kr_error(EINVAL);
168 	/* Disabled; add_reverse_pair() misuses this for domain name in rdata. */
169 	if (false && data && len != sizeof(struct in_addr)
170 		  && len != sizeof(struct in6_addr)) {
171 		kr_assert(!EINVAL);
172 		return kr_error(EINVAL);
173 	}
174 
175 	/* Get a pack_t for the ns. */
176 	pack_t **pack = (pack_t **)trie_get_ins(cut->nsset, (const char *)ns, knot_dname_size(ns));
177 	if (!pack) return kr_error(ENOMEM);
178 	if (*pack == NULL) {
179 		*pack = mm_alloc(cut->pool, sizeof(pack_t));
180 		if (*pack == NULL) return kr_error(ENOMEM);
181 		pack_init(**pack);
182 	}
183 	/* Insert data (if has any) */
184 	if (data == NULL) {
185 		return kr_ok();
186 	}
187 	/* Check for duplicates */
188 	if (pack_obj_find(*pack, data, len)) {
189 		return kr_ok();
190 	}
191 	/* Push new address */
192 	int ret = pack_reserve_mm(**pack, 1, len, kr_memreserve, cut->pool);
193 	if (ret != 0) {
194 		return kr_error(ENOMEM);
195 	}
196 	return pack_obj_push(*pack, data, len);
197 }
198 
kr_zonecut_del(struct kr_zonecut * cut,const knot_dname_t * ns,const void * data,int len)199 int kr_zonecut_del(struct kr_zonecut *cut, const knot_dname_t *ns, const void *data, int len)
200 {
201 	if (!cut || !ns || (data && len <= 0)) {
202 		return kr_error(EINVAL);
203 	}
204 
205 	/* Find the address list. */
206 	int ret = kr_ok();
207 	pack_t *pack = kr_zonecut_find(cut, ns);
208 	if (pack == NULL) {
209 		return kr_error(ENOENT);
210 	}
211 	/* Remove address from the pack. */
212 	if (data) {
213 		ret = pack_obj_del(pack, data, len);
214 	}
215 	/* No servers left, remove NS from the set. */
216 	if (pack->len == 0) {
217 		free_addr_set(pack, cut->pool);
218 		ret = trie_del(cut->nsset, (const char *)ns, knot_dname_size(ns), NULL);
219 		if (kr_fails_assert(ret == 0)) /* only KNOT_ENOENT and that *can't* happen */
220 			return kr_error(ret);
221 		return kr_ok();
222 	}
223 
224 	return ret;
225 }
226 
kr_zonecut_del_all(struct kr_zonecut * cut,const knot_dname_t * ns)227 int kr_zonecut_del_all(struct kr_zonecut *cut, const knot_dname_t *ns)
228 {
229 	if (!cut || !ns) {
230 		return kr_error(EINVAL);
231 	}
232 
233 	/* Find the address list; then free and remove it. */
234 	pack_t *pack;
235 	int ret = trie_del(cut->nsset, (const char *)ns, knot_dname_size(ns),
236 			   (trie_val_t *)&pack);
237 	if (ret) { /* deletion failed */
238 		kr_assert(ret == KNOT_ENOENT);
239 		return kr_error(ENOENT);
240 	}
241 	free_addr_set(pack, cut->pool);
242 	return kr_ok();
243 }
244 
kr_zonecut_find(struct kr_zonecut * cut,const knot_dname_t * ns)245 pack_t *kr_zonecut_find(struct kr_zonecut *cut, const knot_dname_t *ns)
246 {
247 	if (!cut || !ns) {
248 		return NULL;
249 	}
250 	trie_val_t *val = trie_get_try(cut->nsset, (const char *)ns, knot_dname_size(ns));
251 	/* we get pointer to the pack_t pointer */
252 	return val ? (pack_t *)*val : NULL;
253 }
254 
has_address(trie_val_t * v,void * baton_)255 static int has_address(trie_val_t *v, void *baton_)
256 {
257 	const pack_t *pack = *v;
258 	const bool found = pack != NULL && pack->len != 0;
259 	return found;
260 }
261 
kr_zonecut_is_empty(struct kr_zonecut * cut)262 bool kr_zonecut_is_empty(struct kr_zonecut *cut)
263 {
264 	if (kr_fails_assert(cut && cut->nsset))
265 		return true;
266 	return !trie_apply(cut->nsset, has_address, NULL);
267 }
268 
kr_zonecut_set_sbelt(struct kr_context * ctx,struct kr_zonecut * cut)269 int kr_zonecut_set_sbelt(struct kr_context *ctx, struct kr_zonecut *cut)
270 {
271 	if (!ctx || !cut || !ctx->root_hints.nsset) {
272 		return kr_error(EINVAL);
273 	}
274 
275 	trie_apply(cut->nsset, free_addr_set_cb, cut->pool);
276 	trie_clear(cut->nsset);
277 
278 	const uint8_t *const dname_root = (const uint8_t *)/*sign-cast*/("");
279 	update_cut_name(cut, dname_root);
280 	/* Copy root hints from resolution context. */
281 	return kr_zonecut_copy(cut, &ctx->root_hints);
282 }
283 
284 /** Fetch address for zone cut.  Any rank is accepted (i.e. glue as well). */
fetch_addr(pack_t * addrs,const knot_dname_t * ns,uint16_t rrtype,knot_mm_t * mm_pool,const struct kr_query * qry)285 static addrset_info_t fetch_addr(pack_t *addrs, const knot_dname_t *ns, uint16_t rrtype,
286 				 knot_mm_t *mm_pool, const struct kr_query *qry)
287 // LATER(optim.): excessive data copying
288 {
289 	int rdlen;
290 	switch (rrtype) {
291 	case KNOT_RRTYPE_A:
292 		rdlen = 4;
293 		break;
294 	case KNOT_RRTYPE_AAAA:
295 		rdlen = 16;
296 		break;
297 	default:
298 		kr_assert(!EINVAL);
299 		return AI_UNKNOWN;
300 	}
301 
302 	struct kr_context *ctx = qry->request->ctx;
303 	struct kr_cache_p peek;
304 	if (kr_cache_peek_exact(&ctx->cache, ns, rrtype, &peek) != 0) {
305 		return AI_UNKNOWN;
306 	}
307 	int32_t new_ttl = kr_cache_ttl(&peek, qry, ns, rrtype);
308 	if (new_ttl < 0) {
309 		return AI_UNKNOWN;
310 	}
311 
312 	knot_rrset_t cached_rr;
313 	knot_rrset_init(&cached_rr, /*const-cast*/(knot_dname_t *)ns, rrtype,
314 			KNOT_CLASS_IN, new_ttl);
315 	if (kr_cache_materialize(&cached_rr.rrs, &peek, mm_pool) < 0) {
316 		return AI_UNKNOWN;
317 	}
318 
319 	/* Reserve memory in *addrs.  Implementation detail:
320 	 * pack_t cares for lengths, so we don't store those in the data. */
321 	const size_t pack_extra_size = cached_rr.rrs.size
322 		- cached_rr.rrs.count * offsetof(knot_rdata_t, len);
323 	int ret = pack_reserve_mm(*addrs, cached_rr.rrs.count, pack_extra_size,
324 				  kr_memreserve, mm_pool);
325 	if (ret) abort(); /* ENOMEM "probably" */
326 
327 	int usable_cnt = 0;
328 	addrset_info_t result = AI_EMPTY;
329 	knot_rdata_t *rd = cached_rr.rrs.rdata;
330 	for (uint16_t i = 0; i < cached_rr.rrs.count; ++i, rd = knot_rdataset_next(rd)) {
331 		if (unlikely(rd->len != rdlen)) {
332 			VERBOSE_MSG(qry, "bad NS address length %d for rrtype %d, skipping\n",
333 					(int)rd->len, (int)rrtype);
334 			continue;
335 		}
336 		result = AI_OK;
337 		++usable_cnt;
338 
339 		ret = pack_obj_push(addrs, rd->data, rd->len);
340 		kr_assert(!ret); /* didn't fit because of incorrectly reserved memory */
341 		/* LATER: for now we lose quite some information here,
342 		 * as keeping it would need substantial changes on other places,
343 		 * and it turned out to be premature optimization (most likely).
344 		 * We might e.g. skip adding unusable addresses,
345 		 * and either keep some rtt information associated
346 		 * or even finish up choosing the set to send packets to.
347 		 * Overall there's some overlap with nsrep.c functionality.
348 		 */
349 	}
350 	if (usable_cnt != cached_rr.rrs.count) {
351 		VERBOSE_MSG(qry, "usable NS addresses: %d/%d\n",
352 				usable_cnt, cached_rr.rrs.count);
353 	}
354 	return result;
355 }
356 
357 /** Fetch best NS for zone cut. */
fetch_ns(struct kr_context * ctx,struct kr_zonecut * cut,const knot_dname_t * name,const struct kr_query * qry,uint8_t * restrict rank)358 static int fetch_ns(struct kr_context *ctx, struct kr_zonecut *cut,
359 		    const knot_dname_t *name, const struct kr_query *qry,
360 		    uint8_t * restrict rank)
361 {
362 	struct kr_cache_p peek;
363 	int ret = kr_cache_peek_exact(&ctx->cache, name, KNOT_RRTYPE_NS, &peek);
364 	if (ret != 0) {
365 		return ret;
366 	}
367 	/* Note: we accept *any* rank from the cache.  We assume that nothing
368 	 * completely untrustworthy could get into the cache, e.g out-of-bailiwick
369 	 * records that weren't validated.
370 	 */
371 	*rank = peek.rank;
372 
373 	int32_t new_ttl = kr_cache_ttl(&peek, qry, name, KNOT_RRTYPE_NS);
374 	if (new_ttl < 0) {
375 		return kr_error(ESTALE);
376 	}
377 	/* Materialize the rdataset temporarily, for simplicity. */
378 	knot_rdataset_t ns_rds = { 0 };
379 	ret = kr_cache_materialize(&ns_rds, &peek, cut->pool);
380 	if (ret < 0) {
381 		return ret;
382 	}
383 
384 	/* Insert name servers for this zone cut, addresses will be looked up
385 	 * on-demand (either from cache or iteratively) */
386 	bool all_bad = true; /**< All NSs (seen so far) are in a bad state. */
387 	knot_rdata_t *rdata_i = ns_rds.rdata;
388 	for (unsigned i = 0; i < ns_rds.count;
389 			++i, rdata_i = knot_rdataset_next(rdata_i)) {
390 		const knot_dname_t *ns_name = knot_ns_name(rdata_i);
391 		const size_t ns_size = knot_dname_size(ns_name);
392 
393 		/* Get a new pack within the nsset. */
394 		pack_t **pack = (pack_t **)trie_get_ins(cut->nsset,
395 					(const char *)ns_name, ns_size);
396 		if (!pack) return kr_error(ENOMEM);
397 		kr_assert(!*pack); /* not critical, really */
398 		*pack = mm_alloc(cut->pool, sizeof(pack_t));
399 		if (!*pack) return kr_error(ENOMEM);
400 		pack_init(**pack);
401 
402 		addrset_info_t infos[2];
403 
404 		/* Fetch NS reputation and decide whether to prefetch A/AAAA records. */
405 		infos[0] = fetch_addr(*pack, ns_name, KNOT_RRTYPE_A, cut->pool, qry);
406 		infos[1] = fetch_addr(*pack, ns_name, KNOT_RRTYPE_AAAA, cut->pool, qry);
407 
408 		#if 0 /* rather unlikely to be useful unless changing some zcut code */
409 		if (kr_log_is_debug_qry(ZCUT, qry)) {
410 			auto_free char *ns_name_txt = kr_dname_text(ns_name);
411 			VERBOSE_MSG(qry, "NS %s infos: %d, %d\n",
412 					ns_name_txt, (int)infos[0], (int)infos[1]);
413 		}
414 		#endif
415 
416 		/* AI_CYCLED checks.
417 		 * If an ancestor query has its zone cut in the state that
418 		 * it's looking for name or address(es) of some NS(s),
419 		 * we want to avoid doing so with a NS that lies under its cut.
420 		 * Instead we need to consider such names unusable in the cut (for now). */
421 		if (infos[0] != AI_UNKNOWN && infos[1] != AI_UNKNOWN) {
422 			/* Optimization: the following loop would be pointless. */
423 			all_bad = false;
424 			continue;
425 		}
426 		for (const struct kr_query *aq = qry; aq->parent; aq = aq->parent) {
427 			const struct kr_qflags *aqpf = &aq->parent->flags;
428 			if (   (aqpf->AWAIT_CUT  && aq->stype == KNOT_RRTYPE_NS)
429 			    || (aqpf->AWAIT_IPV4 && aq->stype == KNOT_RRTYPE_A)
430 			    || (aqpf->AWAIT_IPV6 && aq->stype == KNOT_RRTYPE_AAAA)) {
431 				if (knot_dname_in_bailiwick(ns_name,
432 							aq->parent->zone_cut.name)) {
433 					for (int j = 0; j < 2; ++j)
434 						if (infos[j] == AI_UNKNOWN)
435 							infos[j] = AI_CYCLED;
436 					break;
437 				}
438 			} else {
439 				/* This ancestor waits for other reason that
440 				 * NS name or address, so we're out of a direct cycle. */
441 				break;
442 			}
443 		}
444 		all_bad = all_bad && infos[0] <= AI_LAST_BAD && infos[1] <= AI_LAST_BAD;
445 	}
446 
447 	if (all_bad && kr_log_is_debug_qry(ZCUT, qry)) {
448 		auto_free char *name_txt = kr_dname_text(name);
449 		VERBOSE_MSG(qry, "cut %s: all NSs bad, count = %d\n",
450 				name_txt, (int)ns_rds.count);
451 	}
452 	knot_rdataset_clear(&ns_rds, cut->pool);
453 	return all_bad ? ELOOP : kr_ok();
454 }
455 
456 /**
457  * Fetch secure RRSet of given type.
458  */
fetch_secure_rrset(knot_rrset_t ** rr,struct kr_cache * cache,const knot_dname_t * owner,uint16_t type,knot_mm_t * pool,const struct kr_query * qry)459 static int fetch_secure_rrset(knot_rrset_t **rr, struct kr_cache *cache,
460 	const knot_dname_t *owner, uint16_t type, knot_mm_t *pool,
461 	const struct kr_query *qry)
462 {
463 	if (kr_fails_assert(rr))
464 		return kr_error(EINVAL);
465 	/* peek, check rank and TTL */
466 	struct kr_cache_p peek;
467 	int ret = kr_cache_peek_exact(cache, owner, type, &peek);
468 	if (ret != 0)
469 		return ret;
470 	if (!kr_rank_test(peek.rank, KR_RANK_SECURE))
471 		return kr_error(ENOENT);
472 	int32_t new_ttl = kr_cache_ttl(&peek, qry, owner, type);
473 	if (new_ttl < 0)
474 		return kr_error(ESTALE);
475 	/* materialize a new RRset */
476 	knot_rrset_free(*rr, pool);
477 	*rr = mm_alloc(pool, sizeof(knot_rrset_t));
478 	if (*rr == NULL)
479 		return kr_error(ENOMEM);
480 	owner = knot_dname_copy(/*const-cast*/(knot_dname_t *)owner, pool);
481 	if (!owner) {
482 		mm_free(pool, *rr);
483 		*rr = NULL;
484 		return kr_error(ENOMEM);
485 	}
486 	knot_rrset_init(*rr, /*const-cast*/(knot_dname_t *)owner, type,
487 			KNOT_CLASS_IN, new_ttl);
488 	ret = kr_cache_materialize(&(*rr)->rrs, &peek, pool);
489 	if (ret < 0) {
490 		knot_rrset_free(*rr, pool);
491 		*rr = NULL;
492 		return ret;
493 	}
494 
495 	return kr_ok();
496 }
497 
kr_zonecut_find_cached(struct kr_context * ctx,struct kr_zonecut * cut,const knot_dname_t * name,const struct kr_query * qry,bool * restrict secured)498 int kr_zonecut_find_cached(struct kr_context *ctx, struct kr_zonecut *cut,
499 			   const knot_dname_t *name, const struct kr_query *qry,
500 			   bool * restrict secured)
501 {
502 	if (!ctx || !cut || !name)
503 		return kr_error(EINVAL);
504 	/* I'm not sure whether the caller always passes a clean state;
505 	 * mixing doesn't seem to make sense in any case, so let's clear it.
506 	 * We don't bother freeing the packs, as they're on mempool. */
507 	trie_clear(cut->nsset);
508 	/* Copy name as it may overlap with cut name that is to be replaced. */
509 	knot_dname_t *qname = knot_dname_copy(name, cut->pool);
510 	if (!qname) {
511 		return kr_error(ENOMEM);
512 	}
513 	/* Start at QNAME. */
514 	int ret;
515 	const knot_dname_t *label = qname;
516 	while (true) {
517 		/* Fetch NS first and see if it's insecure. */
518 		uint8_t rank = 0;
519 		const bool is_root = (label[0] == '\0');
520 		ret = fetch_ns(ctx, cut, label, qry, &rank);
521 		if (ret == 0) {
522 			/* Flag as insecure if cached as this */
523 			if (kr_rank_test(rank, KR_RANK_INSECURE)) {
524 				*secured = false;
525 			}
526 			/* Fetch DS and DNSKEY if caller wants secure zone cut */
527 			int ret_ds = 1, ret_dnskey = 1;
528 			if (*secured || is_root) {
529 				ret_ds = fetch_secure_rrset(&cut->trust_anchor, &ctx->cache,
530 						label, KNOT_RRTYPE_DS, cut->pool, qry);
531 				ret_dnskey = fetch_secure_rrset(&cut->key, &ctx->cache,
532 						label, KNOT_RRTYPE_DNSKEY, cut->pool, qry);
533 			}
534 			update_cut_name(cut, label);
535 			if (kr_log_is_debug_qry(ZCUT, qry)) {
536 				auto_free char *label_str = kr_dname_text(label);
537 				VERBOSE_MSG(qry,
538 					"found cut: %s (rank 0%.2o return codes: DS %d, DNSKEY %d)\n",
539 					label_str, rank, ret_ds, ret_dnskey);
540 			}
541 			ret = kr_ok();
542 			break;
543 		} /* else */
544 
545 		trie_clear(cut->nsset);
546 		/* Subtract label from QNAME. */
547 		if (!is_root) {
548 			label = knot_wire_next_label(label, NULL);
549 		} else {
550 			ret = kr_error(ENOENT);
551 			break;
552 		}
553 	}
554 
555 	kr_cache_commit(&ctx->cache);
556 	mm_free(cut->pool, qname);
557 	return ret;
558 }
559