1 /*  Copyright (C) 2021 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
2 
3     This program is free software: you can redistribute it and/or modify
4     it under the terms of the GNU General Public License as published by
5     the Free Software Foundation, either version 3 of the License, or
6     (at your option) any later version.
7 
8     This program is distributed in the hope that it will be useful,
9     but WITHOUT ANY WARRANTY; without even the implied warranty of
10     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11     GNU General Public License for more details.
12 
13     You should have received a copy of the GNU General Public License
14     along with this program.  If not, see <https://www.gnu.org/licenses/>.
15  */
16 
17 #include "knot/zone/adjust.h"
18 #include "knot/common/log.h"
19 #include "knot/dnssec/zone-nsec.h"
20 #include "knot/zone/adds_tree.h"
21 #include "knot/zone/measure.h"
22 #include "libdnssec/error.h"
23 
node_non_dnssec_exists(const zone_node_t * node)24 static bool node_non_dnssec_exists(const zone_node_t *node)
25 {
26 	assert(node);
27 
28 	for (uint16_t i = 0; i < node->rrset_count; ++i) {
29 		switch (node->rrs[i].type) {
30 		case KNOT_RRTYPE_NSEC:
31 		case KNOT_RRTYPE_NSEC3:
32 		case KNOT_RRTYPE_RRSIG:
33 			continue;
34 		default:
35 			return true;
36 		}
37 	}
38 
39 	return false;
40 }
41 
adjust_cb_flags(zone_node_t * node,adjust_ctx_t * ctx)42 int adjust_cb_flags(zone_node_t *node, adjust_ctx_t *ctx)
43 {
44 	zone_node_t *parent = node_parent(node);
45 	uint16_t flags_orig = node->flags;
46 	bool set_subt_auth = false;
47 	bool has_data = node_non_dnssec_exists(node);
48 
49 	assert(!(node->flags & NODE_FLAGS_DELETED));
50 
51 	node->flags &= ~(NODE_FLAGS_DELEG | NODE_FLAGS_NONAUTH | NODE_FLAGS_SUBTREE_AUTH | NODE_FLAGS_SUBTREE_DATA);
52 
53 	if (parent && (parent->flags & NODE_FLAGS_DELEG || parent->flags & NODE_FLAGS_NONAUTH)) {
54 		node->flags |= NODE_FLAGS_NONAUTH;
55 	} else if (node_rrtype_exists(node, KNOT_RRTYPE_NS) && node != ctx->zone->apex) {
56 		node->flags |= NODE_FLAGS_DELEG;
57 		if (node_rrtype_exists(node, KNOT_RRTYPE_DS)) {
58 			set_subt_auth = true;
59 		}
60 	} else if (has_data) {
61 		set_subt_auth = true;
62 	}
63 
64 	if (set_subt_auth) {
65 		node_set_flag_hierarch(node, NODE_FLAGS_SUBTREE_AUTH);
66 	}
67 	if (has_data) {
68 		node_set_flag_hierarch(node, NODE_FLAGS_SUBTREE_DATA);
69 	}
70 
71 	if (node->flags != flags_orig && ctx->changed_nodes != NULL) {
72 		return zone_tree_insert(ctx->changed_nodes, &node);
73 	}
74 
75 	return KNOT_EOK;
76 }
77 
unadjust_cb_point_to_nsec3(zone_node_t * node,adjust_ctx_t * ctx)78 int unadjust_cb_point_to_nsec3(zone_node_t *node, adjust_ctx_t *ctx)
79 {
80 	// downgrade the NSEC3 node pointer to NSEC3 name
81 	if (node->flags & NODE_FLAGS_NSEC3_NODE) {
82 		node->nsec3_hash = knot_dname_copy(node->nsec3_node->owner, NULL);
83 		node->flags &= ~NODE_FLAGS_NSEC3_NODE;
84 	}
85 	assert(ctx->changed_nodes == NULL);
86 	return KNOT_EOK;
87 }
88 
adjust_cb_wildcard_nsec3(zone_node_t * node,adjust_ctx_t * ctx)89 int adjust_cb_wildcard_nsec3(zone_node_t *node, adjust_ctx_t *ctx)
90 {
91 	if (!knot_is_nsec3_enabled(ctx->zone)) {
92 		if (node->nsec3_wildcard_name != NULL && ctx->changed_nodes != NULL) {
93 			zone_tree_insert(ctx->changed_nodes, &node);
94 		}
95 		node->nsec3_wildcard_name = NULL;
96 		return KNOT_EOK;
97 	}
98 
99 	if (ctx->nsec3_param_changed) {
100 		node->nsec3_wildcard_name = NULL;
101 	}
102 
103 	if (node->nsec3_wildcard_name != NULL) {
104 		return KNOT_EOK;
105 	}
106 
107 	size_t wildcard_size = knot_dname_size(node->owner) + 2;
108 	size_t wildcard_nsec3 = zone_nsec3_name_len(ctx->zone);
109 	if (wildcard_size > KNOT_DNAME_MAXLEN) {
110 		return KNOT_EOK;
111 	}
112 
113 	node->nsec3_wildcard_name = malloc(wildcard_nsec3);
114 	if (node->nsec3_wildcard_name == NULL) {
115 		return KNOT_ENOMEM;
116 	}
117 
118 	if (ctx->changed_nodes != NULL) {
119 		zone_tree_insert(ctx->changed_nodes, &node);
120 	}
121 
122 	knot_dname_t wildcard[wildcard_size];
123 	assert(wildcard_size > 2);
124 	memcpy(wildcard, "\x01""*", 2);
125 	memcpy(wildcard + 2, node->owner, wildcard_size - 2);
126 	return knot_create_nsec3_owner(node->nsec3_wildcard_name, wildcard_nsec3,
127 	                               wildcard, ctx->zone->apex->owner, &ctx->zone->nsec3_params);
128 }
129 
nsec3_params_match(const knot_rdataset_t * rrs,const dnssec_nsec3_params_t * params,size_t rdata_pos)130 static bool nsec3_params_match(const knot_rdataset_t *rrs,
131                                const dnssec_nsec3_params_t *params,
132                                size_t rdata_pos)
133 {
134 	assert(rrs != NULL);
135 	assert(params != NULL);
136 
137 	knot_rdata_t *rdata = knot_rdataset_at(rrs, rdata_pos);
138 
139 	return (knot_nsec3_alg(rdata) == params->algorithm
140 	        && knot_nsec3_iters(rdata) == params->iterations
141 	        && knot_nsec3_salt_len(rdata) == params->salt.size
142 	        && memcmp(knot_nsec3_salt(rdata), params->salt.data,
143 	                  params->salt.size) == 0);
144 }
145 
adjust_cb_nsec3_flags(zone_node_t * node,adjust_ctx_t * ctx)146 int adjust_cb_nsec3_flags(zone_node_t *node, adjust_ctx_t *ctx)
147 {
148 	uint16_t flags_orig = node->flags;
149 
150 	// check if this node belongs to correct chain
151 	node->flags &= ~NODE_FLAGS_IN_NSEC3_CHAIN;
152 	const knot_rdataset_t *nsec3_rrs = node_rdataset(node, KNOT_RRTYPE_NSEC3);
153 	for (uint16_t i = 0; nsec3_rrs != NULL && i < nsec3_rrs->count; i++) {
154 		if (nsec3_params_match(nsec3_rrs, &ctx->zone->nsec3_params, i)) {
155 			node->flags |= NODE_FLAGS_IN_NSEC3_CHAIN;
156 		}
157 	}
158 
159 	if (node->flags != flags_orig && ctx->changed_nodes != NULL) {
160 		return zone_tree_insert(ctx->changed_nodes, &node);
161 	}
162 
163 	return KNOT_EOK;
164 }
165 
adjust_cb_nsec3_pointer(zone_node_t * node,adjust_ctx_t * ctx)166 int adjust_cb_nsec3_pointer(zone_node_t *node, adjust_ctx_t *ctx)
167 {
168 	uint16_t flags_orig = node->flags;
169 	zone_node_t *ptr_orig = node->nsec3_node;
170 	int ret = KNOT_EOK;
171 	if (ctx->nsec3_param_changed) {
172 		if (!(node->flags & NODE_FLAGS_NSEC3_NODE) &&
173 		    node->nsec3_hash != binode_counterpart(node)->nsec3_hash) {
174 			free(node->nsec3_hash);
175 		}
176 		node->nsec3_hash = NULL;
177 		node->flags &= ~NODE_FLAGS_NSEC3_NODE;
178 		(void)node_nsec3_node(node, ctx->zone);
179 	} else {
180 		ret = binode_fix_nsec3_pointer(node, ctx->zone);
181 	}
182 	if (ret == KNOT_EOK && ctx->changed_nodes != NULL &&
183 	    (flags_orig != node->flags || ptr_orig != node->nsec3_node)) {
184 		ret = zone_tree_insert(ctx->changed_nodes, &node);
185 	}
186 	return ret;
187 }
188 
189 /*! \brief Link pointers to additional nodes for this RRSet. */
discover_additionals(zone_node_t * adjn,uint16_t rr_at,adjust_ctx_t * ctx)190 static int discover_additionals(zone_node_t *adjn, uint16_t rr_at,
191                                 adjust_ctx_t *ctx)
192 {
193 	struct rr_data *rr_data = &adjn->rrs[rr_at];
194 	assert(rr_data != NULL);
195 
196 	const knot_rdataset_t *rrs = &rr_data->rrs;
197 	knot_rdata_t *rdata = knot_rdataset_at(rrs, 0);
198 	uint16_t rdcount = rrs->count;
199 
200 	uint16_t mandatory_count = 0;
201 	uint16_t others_count = 0;
202 	glue_t mandatory[rdcount];
203 	glue_t others[rdcount];
204 
205 	/* Scan new additional nodes. */
206 	for (uint16_t i = 0; i < rdcount; i++) {
207 		const knot_dname_t *dname = knot_rdata_name(rdata, rr_data->type);
208 		const zone_node_t *node = NULL;
209 
210 		if (!zone_contents_find_node_or_wildcard(ctx->zone, dname, &node)) {
211 			rdata = knot_rdataset_next(rdata);
212 			continue;
213 		}
214 
215 		glue_t *glue;
216 		if ((node->flags & (NODE_FLAGS_DELEG | NODE_FLAGS_NONAUTH)) &&
217 		    rr_data->type == KNOT_RRTYPE_NS &&
218 		    knot_dname_in_bailiwick(node->owner, adjn->owner) >= 0) {
219 			glue = &mandatory[mandatory_count++];
220 			glue->optional = false;
221 		} else {
222 			glue = &others[others_count++];
223 			glue->optional = true;
224 		}
225 		glue->node = node;
226 		glue->ns_pos = i;
227 		rdata = knot_rdataset_next(rdata);
228 	}
229 
230 	/* Store sorted additionals by the type, mandatory first. */
231 	size_t total_count = mandatory_count + others_count;
232 	additional_t *new_addit = NULL;
233 	if (total_count > 0) {
234 		new_addit = malloc(sizeof(additional_t));
235 		if (new_addit == NULL) {
236 			return KNOT_ENOMEM;
237 		}
238 		new_addit->count = total_count;
239 
240 		size_t size = total_count * sizeof(glue_t);
241 		new_addit->glues = malloc(size);
242 		if (new_addit->glues == NULL) {
243 			free(new_addit);
244 			return KNOT_ENOMEM;
245 		}
246 
247 		size_t mandatory_size = mandatory_count * sizeof(glue_t);
248 		memcpy(new_addit->glues, mandatory, mandatory_size);
249 		memcpy(new_addit->glues + mandatory_count, others,
250 		       size - mandatory_size);
251 	}
252 
253 	/* If the result differs, shallow copy node and store additionals. */
254 	if (!additional_equal(rr_data->additional, new_addit)) {
255 		if (ctx->changed_nodes != NULL) {
256 			zone_tree_insert(ctx->changed_nodes, &adjn);
257 		}
258 
259 		if (!binode_additional_shared(adjn, adjn->rrs[rr_at].type)) {
260 			// this happens when additionals are adjusted twice during one update, e.g. IXFR-from-diff
261 			additional_clear(adjn->rrs[rr_at].additional);
262 		}
263 
264 		int ret = binode_prepare_change(adjn, NULL);
265 		if (ret != KNOT_EOK) {
266 			return ret;
267 		}
268 		rr_data = &adjn->rrs[rr_at];
269 
270 		rr_data->additional = new_addit;
271 	} else {
272 		additional_clear(new_addit);
273 	}
274 
275 	return KNOT_EOK;
276 }
277 
adjust_cb_additionals(zone_node_t * node,adjust_ctx_t * ctx)278 int adjust_cb_additionals(zone_node_t *node, adjust_ctx_t *ctx)
279 {
280 	/* Lookup additional records for specific nodes. */
281 	for(uint16_t i = 0; i < node->rrset_count; ++i) {
282 		struct rr_data *rr_data = &node->rrs[i];
283 		if (knot_rrtype_additional_needed(rr_data->type)) {
284 			int ret = discover_additionals(node, i, ctx);
285 			if (ret != KNOT_EOK) {
286 				return ret;
287 			}
288 		}
289 	}
290 	return KNOT_EOK;
291 }
292 
adjust_cb_flags_and_nsec3(zone_node_t * node,adjust_ctx_t * ctx)293 int adjust_cb_flags_and_nsec3(zone_node_t *node, adjust_ctx_t *ctx)
294 {
295 	int ret = adjust_cb_flags(node, ctx);
296 	if (ret == KNOT_EOK) {
297 		ret = adjust_cb_nsec3_pointer(node, ctx);
298 	}
299 	return ret;
300 }
301 
adjust_cb_nsec3_and_additionals(zone_node_t * node,adjust_ctx_t * ctx)302 int adjust_cb_nsec3_and_additionals(zone_node_t *node, adjust_ctx_t *ctx)
303 {
304 	int ret = adjust_cb_nsec3_pointer(node, ctx);
305 	if (ret == KNOT_EOK) {
306 		ret = adjust_cb_wildcard_nsec3(node, ctx);
307 	}
308 	if (ret == KNOT_EOK) {
309 		ret = adjust_cb_additionals(node, ctx);
310 	}
311 	return ret;
312 }
313 
adjust_cb_nsec3_and_wildcard(zone_node_t * node,adjust_ctx_t * ctx)314 int adjust_cb_nsec3_and_wildcard(zone_node_t *node, adjust_ctx_t *ctx)
315 {
316 	int ret = adjust_cb_wildcard_nsec3(node, ctx);
317 	if (ret == KNOT_EOK) {
318 		ret = adjust_cb_nsec3_pointer(node, ctx);
319 	}
320 	return ret;
321 }
322 
adjust_cb_void(_unused_ zone_node_t * node,_unused_ adjust_ctx_t * ctx)323 int adjust_cb_void(_unused_ zone_node_t *node, _unused_ adjust_ctx_t *ctx)
324 {
325 	return KNOT_EOK;
326 }
327 
328 typedef struct {
329 	zone_node_t *first_node;
330 	adjust_ctx_t ctx;
331 	zone_node_t *previous_node;
332 	adjust_cb_t adjust_cb;
333 	bool adjust_prevs;
334 	measure_t *m;
335 
336 	// just for parallel
337 	unsigned threads;
338 	unsigned thr_id;
339 	size_t i;
340 	pthread_t thread;
341 	int ret;
342 	zone_tree_t *tree;
343 } zone_adjust_arg_t;
344 
adjust_single(zone_node_t * node,void * data)345 static int adjust_single(zone_node_t *node, void *data)
346 {
347 	assert(node != NULL);
348 	assert(data != NULL);
349 
350 	zone_adjust_arg_t *args = (zone_adjust_arg_t *)data;
351 
352 	// parallel adjust support
353 	if (args->threads > 1) {
354 		if (args->i++ % args->threads != args->thr_id) {
355 			return KNOT_EOK;
356 		}
357 	}
358 
359 	if (args->m != NULL) {
360 		knot_measure_node(node, args->m);
361 	}
362 
363 	if ((node->flags & NODE_FLAGS_DELETED)) {
364 		return KNOT_EOK;
365 	}
366 
367 	// remember first node
368 	if (args->first_node == NULL) {
369 		args->first_node = node;
370 	}
371 
372 	// set pointer to previous node
373 	if (args->adjust_prevs && args->previous_node != NULL &&
374 	    node->prev != args->previous_node &&
375 	    node->prev != binode_counterpart(args->previous_node)) {
376 		zone_tree_insert(args->ctx.changed_nodes, &node);
377 		node->prev = args->previous_node;
378 	}
379 
380 	// update remembered previous pointer only if authoritative
381 	if (!(node->flags & NODE_FLAGS_NONAUTH) && node->rrset_count > 0) {
382 		args->previous_node = node;
383 	}
384 
385 	return args->adjust_cb(node, &args->ctx);
386 }
387 
zone_adjust_tree(zone_tree_t * tree,adjust_ctx_t * ctx,adjust_cb_t adjust_cb,bool adjust_prevs,measure_t * measure_ctx)388 static int zone_adjust_tree(zone_tree_t *tree, adjust_ctx_t *ctx, adjust_cb_t adjust_cb,
389                             bool adjust_prevs, measure_t *measure_ctx)
390 {
391 	if (zone_tree_is_empty(tree)) {
392 		return KNOT_EOK;
393 	}
394 
395 	zone_adjust_arg_t arg = { 0 };
396 	arg.ctx = *ctx;
397 	arg.adjust_cb = adjust_cb;
398 	arg.adjust_prevs = adjust_prevs;
399 	arg.m = measure_ctx;
400 
401 	int ret = zone_tree_apply(tree, adjust_single, &arg);
402 	if (ret != KNOT_EOK) {
403 		return ret;
404 	}
405 
406 	if (adjust_prevs && arg.first_node != NULL) {
407 		zone_tree_insert(ctx->changed_nodes, &arg.first_node);
408 		arg.first_node->prev = arg.previous_node;
409 	}
410 
411 	return KNOT_EOK;
412 }
413 
adjust_tree_thread(void * ctx)414 static void *adjust_tree_thread(void *ctx)
415 {
416 	zone_adjust_arg_t *arg = ctx;
417 
418 	arg->ret = zone_tree_apply(arg->tree, adjust_single, ctx);
419 
420 	return NULL;
421 }
422 
zone_adjust_tree_parallel(zone_tree_t * tree,adjust_ctx_t * ctx,adjust_cb_t adjust_cb,unsigned threads)423 static int zone_adjust_tree_parallel(zone_tree_t *tree, adjust_ctx_t *ctx,
424                                      adjust_cb_t adjust_cb, unsigned threads)
425 {
426 	if (zone_tree_is_empty(tree)) {
427 		return KNOT_EOK;
428 	}
429 
430 	zone_adjust_arg_t args[threads];
431 	memset(args, 0, sizeof(args));
432 	int ret = KNOT_EOK;
433 
434 	for (unsigned i = 0; i < threads; i++) {
435 		args[i].first_node = NULL;
436 		args[i].ctx = *ctx;
437 		args[i].adjust_cb = adjust_cb;
438 		args[i].adjust_prevs = false;
439 		args[i].m = NULL;
440 		args[i].tree = tree;
441 		args[i].threads = threads;
442 		args[i].i = 0;
443 		args[i].thr_id = i;
444 		args[i].ret = -1;
445 		if (ctx->changed_nodes != NULL) {
446 			args[i].ctx.changed_nodes = zone_tree_create(true);
447 			if (args[i].ctx.changed_nodes == NULL) {
448 				ret = KNOT_ENOMEM;
449 				break;
450 			}
451 			args[i].ctx.changed_nodes->flags = tree->flags;
452 		}
453 	}
454 	if (ret != KNOT_EOK) {
455 		for (unsigned i = 0; i < threads; i++) {
456 			zone_tree_free(&args[i].ctx.changed_nodes);
457 		}
458 		return ret;
459 	}
460 
461 	for (unsigned i = 0; i < threads; i++) {
462 		args[i].ret = pthread_create(&args[i].thread, NULL, adjust_tree_thread, &args[i]);
463 	}
464 
465 	for (unsigned i = 0; i < threads; i++) {
466 		if (args[i].ret == 0) {
467 			args[i].ret = pthread_join(args[i].thread, NULL);
468 		}
469 		if (args[i].ret != 0) {
470 			ret = knot_map_errno_code(args[i].ret);
471 		}
472 		if (ret == KNOT_EOK && ctx->changed_nodes != NULL) {
473 			ret = zone_tree_merge(ctx->changed_nodes, args[i].ctx.changed_nodes);
474 		}
475 		zone_tree_free(&args[i].ctx.changed_nodes);
476 	}
477 
478 	return ret;
479 }
480 
zone_adjust_contents(zone_contents_t * zone,adjust_cb_t nodes_cb,adjust_cb_t nsec3_cb,bool measure_zone,bool adjust_prevs,unsigned threads,zone_tree_t * add_changed)481 int zone_adjust_contents(zone_contents_t *zone, adjust_cb_t nodes_cb, adjust_cb_t nsec3_cb,
482                          bool measure_zone, bool adjust_prevs, unsigned threads,
483                          zone_tree_t *add_changed)
484 {
485 	int ret = zone_contents_load_nsec3param(zone);
486 	if (ret != KNOT_EOK) {
487 		log_zone_error(zone->apex->owner,
488 		               "failed to load NSEC3 parameters (%s)",
489 		               knot_strerror(ret));
490 		return ret;
491 	}
492 	zone->dnssec = node_rrtype_is_signed(zone->apex, KNOT_RRTYPE_SOA);
493 
494 	measure_t m = knot_measure_init(measure_zone, false);
495 	adjust_ctx_t ctx = { zone, add_changed, true };
496 
497 	if (threads > 1) {
498 		assert(nodes_cb != adjust_cb_flags); // This cb demands parent to be adjusted before child
499 		                                     // => required sequential adjusting (also true for
500 		                                     // adjust_cb_flags_and_nsec3) !!
501 		assert(!measure_zone);
502 		assert(!adjust_prevs);
503 		if (nsec3_cb != NULL) {
504 			ret = zone_adjust_tree_parallel(zone->nsec3_nodes, &ctx, nsec3_cb, threads);
505 		}
506 		if (ret == KNOT_EOK && nodes_cb != NULL) {
507 			ret = zone_adjust_tree_parallel(zone->nodes, &ctx, nodes_cb, threads);
508 		}
509 	} else {
510 		if (nsec3_cb != NULL) {
511 			ret = zone_adjust_tree(zone->nsec3_nodes, &ctx, nsec3_cb, adjust_prevs, &m);
512 		}
513 		if (ret == KNOT_EOK && nodes_cb != NULL) {
514 			ret = zone_adjust_tree(zone->nodes, &ctx, nodes_cb, adjust_prevs, &m);
515 		}
516 	}
517 
518 	if (ret == KNOT_EOK && measure_zone && nodes_cb != NULL && nsec3_cb != NULL) {
519 		knot_measure_finish_zone(&m, zone);
520 	}
521 	return ret;
522 }
523 
zone_adjust_update(zone_update_t * update,adjust_cb_t nodes_cb,adjust_cb_t nsec3_cb,bool measure_diff)524 int zone_adjust_update(zone_update_t *update, adjust_cb_t nodes_cb, adjust_cb_t nsec3_cb, bool measure_diff)
525 {
526 	int ret = KNOT_EOK;
527 	measure_t m = knot_measure_init(false, measure_diff);
528 	adjust_ctx_t ctx = { update->new_cont, update->a_ctx->adjust_ptrs, zone_update_changed_nsec3param(update) };
529 
530 	if (nsec3_cb != NULL) {
531 		ret = zone_adjust_tree(update->a_ctx->nsec3_ptrs, &ctx, nsec3_cb, false, &m);
532 	}
533 	if (ret == KNOT_EOK && nodes_cb != NULL) {
534 		ret = zone_adjust_tree(update->a_ctx->node_ptrs, &ctx, nodes_cb, false, &m);
535 	}
536 	if (ret == KNOT_EOK && measure_diff && nodes_cb != NULL && nsec3_cb != NULL) {
537 		knot_measure_finish_update(&m, update);
538 	}
539 	return ret;
540 }
541 
zone_adjust_full(zone_contents_t * zone,unsigned threads)542 int zone_adjust_full(zone_contents_t *zone, unsigned threads)
543 {
544 	int ret = zone_adjust_contents(zone, adjust_cb_flags, adjust_cb_nsec3_flags,
545 	                               true, true, 1, NULL);
546 	if (ret == KNOT_EOK) {
547 		ret = zone_adjust_contents(zone, adjust_cb_nsec3_and_additionals, NULL,
548 		                           false, false, threads, NULL);
549 	}
550 	if (ret == KNOT_EOK) {
551 		additionals_tree_free(zone->adds_tree);
552 		ret = additionals_tree_from_zone(&zone->adds_tree, zone);
553 	}
554 	return ret;
555 }
556 
adjust_additionals_cb(zone_node_t * node,void * ctx)557 static int adjust_additionals_cb(zone_node_t *node, void *ctx)
558 {
559 	adjust_ctx_t *actx = ctx;
560 	zone_node_t *real_node = zone_tree_fix_get(node, actx->zone->nodes);
561 	return adjust_cb_additionals(real_node, actx);
562 }
563 
adjust_point_to_nsec3_cb(zone_node_t * node,void * ctx)564 static int adjust_point_to_nsec3_cb(zone_node_t *node, void *ctx)
565 {
566 	adjust_ctx_t *actx = ctx;
567 	zone_node_t *real_node = zone_tree_fix_get(node, actx->zone->nodes);
568 	return adjust_cb_nsec3_pointer(real_node, actx);
569 }
570 
zone_adjust_incremental_update(zone_update_t * update,unsigned threads)571 int zone_adjust_incremental_update(zone_update_t *update, unsigned threads)
572 {
573 	int ret = zone_contents_load_nsec3param(update->new_cont);
574 	if (ret != KNOT_EOK) {
575 		return ret;
576 	}
577 	bool nsec3change = zone_update_changed_nsec3param(update);
578 	adjust_ctx_t ctx = { update->new_cont, update->a_ctx->adjust_ptrs, nsec3change };
579 
580 	ret = zone_adjust_contents(update->new_cont, adjust_cb_flags, adjust_cb_nsec3_flags,
581 	                           false, true, 1, update->a_ctx->adjust_ptrs);
582 	if (ret == KNOT_EOK) {
583 		if (nsec3change) {
584 			ret = zone_adjust_contents(update->new_cont, adjust_cb_nsec3_and_wildcard, NULL,
585 			                           false, false, threads, update->a_ctx->adjust_ptrs);
586 			if (ret == KNOT_EOK) {
587 				// just measure zone size
588 				ret = zone_adjust_update(update, adjust_cb_void, adjust_cb_void, true);
589 			}
590 		} else {
591 			ret = zone_adjust_update(update, adjust_cb_wildcard_nsec3, adjust_cb_void, true);
592 		}
593 	}
594 	if (ret == KNOT_EOK) {
595 		if (update->new_cont->adds_tree != NULL && !nsec3change) {
596 			ret = additionals_tree_update_from_binodes(
597 				update->new_cont->adds_tree,
598 				update->a_ctx->node_ptrs,
599 				update->new_cont
600 			);
601 		} else {
602 			additionals_tree_free(update->new_cont->adds_tree);
603 			ret = additionals_tree_from_zone(&update->new_cont->adds_tree, update->new_cont);
604 		}
605 	}
606 	if (ret == KNOT_EOK) {
607 		ret = additionals_reverse_apply_multi(
608 			update->new_cont->adds_tree,
609 			update->a_ctx->node_ptrs,
610 			adjust_additionals_cb,
611 			&ctx
612 		);
613 	}
614 	if (ret == KNOT_EOK) {
615 		ret = zone_adjust_update(update, adjust_cb_additionals, adjust_cb_void, false);
616 	}
617 	if (ret == KNOT_EOK) {
618 		if (!nsec3change) {
619 			ret = additionals_reverse_apply_multi(
620 				update->new_cont->adds_tree,
621 				update->a_ctx->nsec3_ptrs,
622 				adjust_point_to_nsec3_cb,
623 				&ctx
624 			);
625 		}
626 	}
627 	return ret;
628 }
629