1 /* Copyright © 2012 Brandon L Black <blblack@gmail.com> and Jay Reitz <jreitz@gmail.com>
2 *
3 * This file is part of gdnsd.
4 *
5 * gdnsd is free software: you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation, either version 3 of the License, or
8 * (at your option) any later version.
9 *
10 * gdnsd is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with gdnsd. If not, see <http://www.gnu.org/licenses/>.
17 *
18 */
19
20 #include <config.h>
21 #include "ltree.h"
22
23 #include "conf.h"
24 #include "dnspacket.h"
25 #include "ltarena.h"
26
27 #include <gdnsd/alloc.h>
28 #include <gdnsd/dname.h>
29 #include <gdnsd/log.h>
30
31 #include <string.h>
32 #include <stdlib.h>
33 #include <fcntl.h>
34 #include <unistd.h>
35 #include <limits.h>
36
37 // special label used to hide out-of-zone glue
38 // inside zone root node child lists
39 static const uint8_t ooz_glue_label[1] = { 0 };
40
41 #define log_zfatal(...)\
42 do {\
43 log_err(__VA_ARGS__);\
44 return true;\
45 } while(0)
46
47 #define log_zwarn(...)\
48 do {\
49 if(gcfg->zones_strict_data) {\
50 log_err(__VA_ARGS__);\
51 return true;\
52 }\
53 else {\
54 log_warn(__VA_ARGS__);\
55 }\
56 } while(0);
57
58 // don't use this directly, use macro below
59 // this logs the lstack labels as a partial domainname (possibly empty),
60 // intended to be completed with the zone name via the macro below
_logf_lstack(const uint8_t ** lstack,unsigned depth)61 static const char* _logf_lstack(const uint8_t** lstack, unsigned depth) {
62 char* dnbuf = dmn_fmtbuf_alloc(1024);
63 char* dnptr = dnbuf;
64
65 while(depth--) {
66 const uint8_t llen = *(lstack[depth]);
67 for(unsigned i = 1; i <= llen; i++) {
68 char x = (char)lstack[depth][i];
69 if(x > 0x20 && x < 0x7F) {
70 *dnptr++ = x;
71 }
72 else {
73 *dnptr++ = '\\';
74 *dnptr++ = '0' + (x / 100);
75 *dnptr++ = '0' + ((x / 10) % 10);
76 *dnptr++ = '0' + (x % 10);
77 }
78 }
79 *dnptr++ = '.';
80 }
81
82 *dnptr = '\0';
83 return dnbuf;
84 }
85
86 #define logf_lstack(_lstack, _depth, _zdname) \
87 _logf_lstack(_lstack, _depth), logf_dname(_zdname)
88
89 #ifndef HAVE_BUILTIN_CLZ
90
91 F_CONST
count2mask(uint32_t x)92 static uint32_t count2mask(uint32_t x) {
93 x |= 1U;
94 x |= x >> 1U;
95 x |= x >> 2U;
96 x |= x >> 4U;
97 x |= x >> 8U;
98 x |= x >> 16U;
99 return x;
100 }
101
102 #else
103
104 F_CONST
count2mask(const uint32_t x)105 static uint32_t count2mask(const uint32_t x) {
106 // This variant is about twice as fast as the above, but
107 // only available w/ GCC 3.4 and above.
108 return ((1U << (31U - (unsigned)__builtin_clz(x|1U))) << 1U) - 1U;
109 }
110
111 #endif
112
113 F_NONNULL
ltree_childtable_grow(ltree_node_t * node)114 static void ltree_childtable_grow(ltree_node_t* node) {
115 const uint32_t old_max_slot = count2mask(node->child_hash_mask);
116 const uint32_t new_hash_mask = (old_max_slot << 1) | 1;
117 ltree_node_t** new_table = xcalloc(new_hash_mask + 1, sizeof(ltree_node_t*));
118 for(uint32_t i = 0; i <= old_max_slot; i++) {
119 ltree_node_t* entry = node->child_table[i];
120 while(entry) {
121 ltree_node_t* next_entry = entry->next;
122 entry->next = NULL;
123
124 const uint32_t child_hash = ltree_hash(entry->label, new_hash_mask);
125 ltree_node_t* slot = new_table[child_hash];
126
127 if(slot) {
128 while(slot->next)
129 slot = slot->next;
130 slot->next = entry;
131 }
132 else {
133 new_table[child_hash] = entry;
134 }
135
136 entry = next_entry;
137 }
138 }
139
140 free(node->child_table);
141
142 node->child_table = new_table;
143 }
144
145 F_NONNULL F_PURE
ltree_node_find_child(const ltree_node_t * node,const uint8_t * child_label)146 static ltree_node_t* ltree_node_find_child(const ltree_node_t* node, const uint8_t* child_label) {
147 ltree_node_t* rv = NULL;
148
149 if(node->child_table) {
150 const uint32_t child_mask = count2mask(node->child_hash_mask);
151 const uint32_t child_hash = ltree_hash(child_label, child_mask);
152 ltree_node_t* child = node->child_table[child_hash];
153 while(child) {
154 if(!gdnsd_label_cmp(child_label, child->label)) {
155 rv = child;
156 break;
157 }
158 child = child->next;
159 }
160 }
161
162 return rv;
163 }
164
165 // Creates a new, disconnected node
166 F_NONNULLX(1)
ltree_node_new(ltarena_t * arena,const uint8_t * label,const uint32_t flags)167 static ltree_node_t* ltree_node_new(ltarena_t* arena, const uint8_t* label, const uint32_t flags) {
168 ltree_node_t* rv = xcalloc(1, sizeof(ltree_node_t));
169 if(label)
170 rv->label = lta_labeldup(arena, label);
171 rv->flags = flags;
172 return rv;
173 }
174
175 F_NONNULL
ltree_node_find_or_add_child(ltarena_t * arena,ltree_node_t * node,const uint8_t * child_label)176 static ltree_node_t* ltree_node_find_or_add_child(ltarena_t* arena, ltree_node_t* node, const uint8_t* child_label) {
177 const uint32_t child_mask = count2mask(node->child_hash_mask);
178 const uint32_t child_hash = ltree_hash(child_label, child_mask);
179
180 if(!node->child_table) {
181 dmn_assert(!node->child_hash_mask);
182 node->child_table = xcalloc(2, sizeof(ltree_node_t*));
183 }
184
185 ltree_node_t* child = node->child_table[child_hash];
186 while(child) {
187 if(!gdnsd_label_cmp(child_label, child->label))
188 return child;
189 child = child->next;
190 }
191
192 child = ltree_node_new(arena, child_label, 0);
193 child->next = node->child_table[child_hash];
194 node->child_table[child_hash] = child;
195
196 if(node->child_hash_mask == child_mask)
197 ltree_childtable_grow(node);
198 node->child_hash_mask++;
199
200 return child;
201 }
202
203 // "dname" should be an FQDN format-wise, but:
204 // (a) Must be in-zone for the given zone
205 // (b) Must have the zone portion cut off the end,
206 // e.g. for zone "example.com.", the dname normally
207 // known as "www.example.com." should be just "www."
208 F_NONNULL
ltree_find_or_add_dname(const zone_t * zone,const uint8_t * dname)209 static ltree_node_t* ltree_find_or_add_dname(const zone_t* zone, const uint8_t* dname) {
210 dmn_assert(zone->root); dmn_assert(zone->dname);
211 dmn_assert(dname_status(dname) == DNAME_VALID);
212
213 // Construct a label stack from dname
214 const uint8_t* lstack[127];
215 unsigned lcount = dname_to_lstack(dname, lstack);
216
217 ltree_node_t* current = zone->root;
218 while(lcount--)
219 current = ltree_node_find_or_add_child(zone->arena, current, lstack[lcount]);
220
221 return current;
222 }
223
224 #define MK_RRSET_GET(_typ, _nam, _dtyp) \
225 F_NONNULL F_PURE \
226 static ltree_rrset_ ## _typ ## _t* ltree_node_get_rrset_ ## _nam (const ltree_node_t* node) {\
227 ltree_rrset_t* rrsets = node->rrsets;\
228 while(rrsets) {\
229 if(rrsets->gen.type == _dtyp)\
230 return &(rrsets)-> _typ;\
231 rrsets = rrsets->gen.next;\
232 }\
233 return NULL;\
234 }
235
MK_RRSET_GET(addr,addr,DNS_TYPE_A)236 MK_RRSET_GET(addr, addr, DNS_TYPE_A)
237 MK_RRSET_GET(soa, soa, DNS_TYPE_SOA)
238 F_UNUSED
239 MK_RRSET_GET(cname, cname, DNS_TYPE_CNAME)
240 F_UNUSED
241 MK_RRSET_GET(dync, dync, DNS_TYPE_DYNC)
242 MK_RRSET_GET(ns, ns, DNS_TYPE_NS)
243 MK_RRSET_GET(ptr, ptr, DNS_TYPE_PTR)
244 MK_RRSET_GET(mx, mx, DNS_TYPE_MX)
245 MK_RRSET_GET(srv, srv, DNS_TYPE_SRV)
246 MK_RRSET_GET(naptr, naptr, DNS_TYPE_NAPTR)
247 MK_RRSET_GET(txt, txt, DNS_TYPE_TXT)
248
249 #define MK_RRSET_ADD(_typ, _nam, _dtyp) \
250 F_NONNULL \
251 static ltree_rrset_ ## _typ ## _t* ltree_node_add_rrset_ ## _nam (ltree_node_t* node) {\
252 ltree_rrset_t** store_at = &node->rrsets;\
253 while(*store_at)\
254 store_at = &(*store_at)->gen.next;\
255 ltree_rrset_ ## _typ ## _t* nrr = xcalloc(1, sizeof(ltree_rrset_ ## _typ ## _t));\
256 *store_at = (ltree_rrset_t*)nrr;\
257 (*store_at)->gen.type = _dtyp;\
258 return nrr;\
259 }
260
261 MK_RRSET_ADD(addr, addr, DNS_TYPE_A)
262 MK_RRSET_ADD(soa, soa, DNS_TYPE_SOA)
263 MK_RRSET_ADD(cname, cname, DNS_TYPE_CNAME)
264 MK_RRSET_ADD(dync, dync, DNS_TYPE_DYNC)
265 MK_RRSET_ADD(ns, ns, DNS_TYPE_NS)
266 MK_RRSET_ADD(ptr, ptr, DNS_TYPE_PTR)
267 MK_RRSET_ADD(mx, mx, DNS_TYPE_MX)
268 MK_RRSET_ADD(srv, srv, DNS_TYPE_SRV)
269 MK_RRSET_ADD(naptr, naptr, DNS_TYPE_NAPTR)
270 MK_RRSET_ADD(txt, txt, DNS_TYPE_TXT)
271
272 // standard chunk for clamping TTLs in ltree_add_rec_*
273 #define CLAMP_TTL(_t) \
274 if(ttl > gcfg->max_ttl) {\
275 log_zwarn("Name '%s%s': %s TTL %u too large, clamped to max_ttl setting of %u", logf_dname(dname), logf_dname(zone->dname), _t, ttl, gcfg->max_ttl);\
276 ttl = gcfg->max_ttl;\
277 }\
278 else if(ttl < gcfg->min_ttl) {\
279 log_zwarn("Name '%s%s': %s TTL %u too small, clamped to min_ttl setting of %u", logf_dname(dname), logf_dname(zone->dname), _t, ttl, gcfg->min_ttl);\
280 ttl = gcfg->min_ttl;\
281 }
282
283 bool ltree_add_rec_a(const zone_t* zone, const uint8_t* dname, const uint32_t addr, unsigned ttl, const unsigned limit_v4, const bool ooz) {
284 ltree_node_t* node;
285 if(ooz) {
286 ltree_node_t* ooz_node = ltree_node_find_or_add_child(zone->arena, zone->root, ooz_glue_label);
287 node = ltree_node_find_or_add_child(zone->arena, ooz_node, dname);
288 }
289 else {
290 node = ltree_find_or_add_dname(zone, dname);
291 }
292
293 ltree_rrset_addr_t* rrset = ltree_node_get_rrset_addr(node);
294 if(!rrset) {
295 CLAMP_TTL("A")
296 rrset = ltree_node_add_rrset_addr(node);
297 rrset->gen.count = 1;
298 rrset->gen.ttl = htonl(ttl);
299 rrset->limit_v4 = limit_v4;
300 rrset->v4a[0] = addr;
301 }
302 else {
303 if(!(rrset->gen.count | rrset->count_v6)) // DYNA here already
304 log_zfatal("Name '%s%s': DYNA cannot co-exist at the same name as A and/or AAAA", logf_dname(dname), logf_dname(zone->dname));
305 if(ntohl(rrset->gen.ttl) != ttl)
306 log_zwarn("Name '%s%s': All TTLs for A and/or AAAA records at the same name should agree (using %u)", logf_dname(dname), logf_dname(zone->dname), ntohl(rrset->gen.ttl));
307 if(rrset->gen.count == UINT16_MAX)
308 log_zfatal("Name '%s%s': Too many RRs of type A", logf_dname(dname), logf_dname(zone->dname));
309 if(rrset->gen.count > 0) {
310 if(rrset->limit_v4 != limit_v4)
311 log_zwarn("Name '%s%s': All $ADDR_LIMIT_4 for A-records at the same name should agree (using %u)", logf_dname(dname), logf_dname(zone->dname), rrset->limit_v4);
312 }
313 else {
314 rrset->limit_v4 = limit_v4;
315 }
316
317 if(!rrset->count_v6 && rrset->gen.count <= LTREE_V4A_SIZE) {
318 if(rrset->gen.count == LTREE_V4A_SIZE) { // upgrade to addrs, copy old addrs
319 uint32_t* new_v4 = xmalloc(sizeof(uint32_t) * (LTREE_V4A_SIZE + 1));
320 memcpy(new_v4, rrset->v4a, sizeof(uint32_t) * LTREE_V4A_SIZE);
321 new_v4[LTREE_V4A_SIZE] = addr;
322 rrset->addrs.v4 = new_v4;
323 rrset->addrs.v6 = NULL;
324 rrset->gen.count = LTREE_V4A_SIZE + 1;
325 }
326 else {
327 rrset->v4a[rrset->gen.count++] = addr;
328 }
329 }
330 else {
331 rrset->addrs.v4 = xrealloc(rrset->addrs.v4, sizeof(uint32_t) * (1U + rrset->gen.count));
332 rrset->addrs.v4[rrset->gen.count++] = addr;
333 }
334 }
335
336 return false;
337 }
338
ltree_add_rec_aaaa(const zone_t * zone,const uint8_t * dname,const uint8_t * addr,unsigned ttl,const unsigned limit_v6,const bool ooz)339 bool ltree_add_rec_aaaa(const zone_t* zone, const uint8_t* dname, const uint8_t* addr, unsigned ttl, const unsigned limit_v6, const bool ooz) {
340 ltree_node_t* node;
341 if(ooz) {
342 ltree_node_t* ooz_node = ltree_node_find_or_add_child(zone->arena, zone->root, ooz_glue_label);
343 node = ltree_node_find_or_add_child(zone->arena, ooz_node, dname);
344 }
345 else {
346 node = ltree_find_or_add_dname(zone, dname);
347 }
348
349 ltree_rrset_addr_t* rrset = ltree_node_get_rrset_addr(node);
350 if(!rrset) {
351 CLAMP_TTL("AAAA")
352 rrset = ltree_node_add_rrset_addr(node);
353 rrset->addrs.v6 = xmalloc(16);
354 memcpy(rrset->addrs.v6, addr, 16);
355 rrset->count_v6 = 1;
356 rrset->gen.ttl = htonl(ttl);
357 rrset->limit_v6 = limit_v6;
358 }
359 else {
360 if(!(rrset->gen.count | rrset->count_v6)) // DYNA here already
361 log_zfatal("Name '%s%s': DYNA cannot co-exist at the same name as A and/or AAAA", logf_dname(dname), logf_dname(zone->dname));
362 if(ntohl(rrset->gen.ttl) != ttl)
363 log_zwarn("Name '%s%s': All TTLs for A and/or AAAA records at the same name should agree (using %u)", logf_dname(dname), logf_dname(zone->dname), ntohl(rrset->gen.ttl));
364 if(rrset->count_v6 == UINT16_MAX)
365 log_zfatal("Name '%s%s': Too many RRs of type AAAA", logf_dname(dname), logf_dname(zone->dname));
366 if(rrset->count_v6 > 0) {
367 if(rrset->limit_v6 != limit_v6)
368 log_zwarn("Name '%s%s': All $ADDR_LIMIT_6 for AAAA-records at the same name should agree (using %u)", logf_dname(dname), logf_dname(zone->dname), rrset->limit_v6);
369 }
370 else {
371 rrset->limit_v6 = limit_v6;
372 }
373
374 if(!rrset->count_v6 && rrset->gen.count <= LTREE_V4A_SIZE) {
375 // was v4a-style, convert to addrs
376 uint32_t* new_v4 = xmalloc(sizeof(uint32_t) * rrset->gen.count);
377 memcpy(new_v4, rrset->v4a, sizeof(uint32_t) * rrset->gen.count);
378 rrset->addrs.v4 = new_v4;
379 rrset->addrs.v6 = NULL;
380 }
381 rrset->addrs.v6 = xrealloc(rrset->addrs.v6, 16 * (1U + rrset->count_v6));
382 memcpy(rrset->addrs.v6 + (rrset->count_v6++ * 16), addr, 16);
383 }
384
385 return false;
386 }
387
ltree_add_rec_dynaddr(const zone_t * zone,const uint8_t * dname,const char * rhs,unsigned ttl,unsigned ttl_min,const unsigned limit_v4,const unsigned limit_v6,const bool ooz)388 bool ltree_add_rec_dynaddr(const zone_t* zone, const uint8_t* dname, const char* rhs, unsigned ttl, unsigned ttl_min, const unsigned limit_v4, const unsigned limit_v6, const bool ooz) {
389 ltree_node_t* node;
390 if(ooz) {
391 ltree_node_t* ooz_node = ltree_node_find_or_add_child(zone->arena, zone->root, ooz_glue_label);
392 node = ltree_node_find_or_add_child(zone->arena, ooz_node, dname);
393 }
394 else {
395 node = ltree_find_or_add_dname(zone, dname);
396 }
397
398 ltree_rrset_addr_t* rrset;
399 if((rrset = ltree_node_get_rrset_addr(node))) {
400 if(rrset->gen.count | rrset->count_v6)
401 log_zfatal("Name '%s%s': DYNA cannot co-exist at the same name as A and/or AAAA", logf_dname(dname), logf_dname(zone->dname));
402 log_zfatal("Name '%s%s': DYNA defined twice for the same name", logf_dname(dname), logf_dname(zone->dname));
403 }
404
405 CLAMP_TTL("DYNA")
406 if(ttl_min < gcfg->min_ttl) {
407 log_zwarn("Name '%s%s': DYNA Min-TTL /%u too small, clamped to min_ttl setting of %u", logf_dname(dname), logf_dname(zone->dname), ttl_min, gcfg->min_ttl);
408 ttl_min = gcfg->min_ttl;
409 }
410 if(ttl_min > ttl) {
411 log_zwarn("Name '%s%s': DYNA Min-TTL /%u larger than Max-TTL %u, clamping to Max-TTL", logf_dname(dname), logf_dname(zone->dname), ttl_min, ttl);
412 ttl_min = ttl;
413 }
414
415 rrset = ltree_node_add_rrset_addr(node);
416 rrset->gen.ttl = htonl(ttl);
417 rrset->dyn.ttl_min = ttl_min;
418 rrset->limit_v4 = limit_v4;
419 rrset->limit_v6 = limit_v6;
420
421 const unsigned rhs_size = strlen(rhs) + 1;
422 char plugin_name[rhs_size];
423 memcpy(plugin_name, rhs, rhs_size);
424 char* resource_name;
425 if((resource_name = strchr(plugin_name, '!')))
426 *resource_name++ = '\0';
427
428 const plugin_t* const p = gdnsd_plugin_find(plugin_name);
429 if(likely(p)) {
430 if(!p->resolve)
431 log_zfatal("Name '%s%s': DYNA RR refers to a non-resolver plugin", logf_dname(dname), logf_dname(zone->dname));
432 rrset->dyn.func = p->resolve;
433 rrset->dyn.resource = 0;
434 if(p->map_res) {
435 const int res = p->map_res(resource_name, NULL);
436 if(res < 0)
437 log_zfatal("Name '%s%s': resolver plugin '%s' rejected resource name '%s'", logf_dname(dname), logf_dname(zone->dname), plugin_name, resource_name);
438 else
439 rrset->dyn.resource = (unsigned)res;
440 }
441 return false;
442 }
443
444 log_zfatal("Name '%s%s': DYNA RR refers to plugin '%s', which is not loaded", logf_dname(dname), logf_dname(zone->dname), plugin_name);
445 }
446
ltree_add_rec_cname(const zone_t * zone,const uint8_t * dname,const uint8_t * rhs,unsigned ttl)447 bool ltree_add_rec_cname(const zone_t* zone, const uint8_t* dname, const uint8_t* rhs, unsigned ttl) {
448 CLAMP_TTL("CNAME")
449
450 ltree_node_t* node = ltree_find_or_add_dname(zone, dname);
451 ltree_rrset_cname_t* rrset = ltree_node_add_rrset_cname(node);
452 rrset->dname = lta_dnamedup(zone->arena, rhs);
453 rrset->gen.ttl = htonl(ttl);
454 rrset->gen.count = 1;
455
456 return false;
457 }
458
ltree_add_rec_dync(const zone_t * zone,const uint8_t * dname,const char * rhs,const uint8_t * origin,unsigned ttl,unsigned ttl_min,const unsigned limit_v4,const unsigned limit_v6)459 bool ltree_add_rec_dync(const zone_t* zone, const uint8_t* dname, const char* rhs, const uint8_t* origin, unsigned ttl, unsigned ttl_min, const unsigned limit_v4, const unsigned limit_v6) {
460 CLAMP_TTL("DYNC")
461
462 if(ttl_min < gcfg->min_ttl) {
463 log_zwarn("Name '%s%s': DYNC Min-TTL /%u too small, clamped to min_ttl setting of %u", logf_dname(dname), logf_dname(zone->dname), ttl_min, gcfg->min_ttl);
464 ttl_min = gcfg->min_ttl;
465 }
466 if(ttl_min > ttl) {
467 log_zwarn("Name '%s%s': DYNC Min-TTL /%u larger than Max-TTL %u, clamping to Max-TTL", logf_dname(dname), logf_dname(zone->dname), ttl_min, ttl);
468 ttl_min = ttl;
469 }
470
471 ltree_node_t* node = ltree_find_or_add_dname(zone, dname);
472 ltree_rrset_dync_t* rrset = ltree_node_add_rrset_dync(node);
473 rrset->origin = lta_dnamedup(zone->arena, origin);
474 rrset->gen.ttl = htonl(ttl);
475 rrset->ttl_min = ttl_min;
476 rrset->limit_v4 = limit_v4;
477 rrset->limit_v6 = limit_v6;
478
479 const unsigned rhs_size = strlen(rhs) + 1;
480 char plugin_name[rhs_size];
481 memcpy(plugin_name, rhs, rhs_size);
482 char* resource_name;
483 if((resource_name = strchr(plugin_name, '!')))
484 *resource_name++ = '\0';
485
486 const plugin_t* const p = gdnsd_plugin_find(plugin_name);
487 if(!p)
488 log_zfatal("Name '%s%s': DYNC refers to plugin '%s', which is not loaded", logf_dname(dname), logf_dname(zone->dname), plugin_name);
489 if(!p->resolve)
490 log_zfatal("Name '%s%s': DYNC RR refers to a non-resolver plugin", logf_dname(dname), logf_dname(zone->dname));
491 rrset->func = p->resolve;
492
493 // we pass rrset->origin instead of origin here, in case the plugin author saves the pointer
494 // (which he probably shouldn't, but can't hurt to make life easier)
495 rrset->resource = 0;
496 if(p->map_res) {
497 const int res = p->map_res(resource_name, rrset->origin);
498 if(res < 0)
499 log_zfatal("Name '%s%s': plugin '%s' rejected DYNC resource '%s' at origin '%s'", logf_dname(dname), logf_dname(zone->dname), plugin_name, resource_name, rrset->origin);
500 rrset->resource = (unsigned)res;
501 }
502
503 return false;
504 }
505
506 // It's like C++ templating, but sadly even uglier ...
507 // This macro assumes "ltree_node_t* node" and "uint8_t* dname" in
508 // the current context, and creates "rrset" and "new_rdata" of
509 // the appropriate types
510 // _szassume is a size assumption. If we expect 2+ to be the common
511 // case for the rrset's count, set it to 2, otherwise 1.
512 #define INSERT_NEXT_RR(_typ, _nam, _pnam, _szassume) \
513 ltree_rdata_ ## _typ ## _t* new_rdata;\
514 ltree_rrset_ ## _typ ## _t* rrset = ltree_node_get_rrset_ ## _nam (node);\
515 {\
516 if(!rrset) {\
517 CLAMP_TTL(_pnam) \
518 rrset = ltree_node_add_rrset_ ## _nam (node);\
519 rrset->gen.count = 1;\
520 rrset->gen.ttl = htonl(ttl);\
521 new_rdata = rrset->rdata = xmalloc(sizeof(ltree_rdata_ ## _typ ## _t) * _szassume);\
522 }\
523 else {\
524 if(ntohl(rrset->gen.ttl) != ttl)\
525 log_zwarn("Name '%s%s': All TTLs for type %s should match (using %u)", logf_dname(dname), logf_dname(zone->dname), _pnam, ntohl(rrset->gen.ttl));\
526 if(rrset->gen.count == UINT16_MAX)\
527 log_zfatal("Name '%s%s': Too many RRs of type %s", logf_dname(dname), logf_dname(zone->dname), _pnam);\
528 if(_szassume == 1 || rrset->gen.count >= _szassume) \
529 rrset->rdata = xrealloc(rrset->rdata, (1U + rrset->gen.count) * sizeof(ltree_rdata_ ## _typ ## _t));\
530 new_rdata = &rrset->rdata[rrset->gen.count++];\
531 }\
532 }
533
ltree_add_rec_ptr(const zone_t * zone,const uint8_t * dname,const uint8_t * rhs,unsigned ttl)534 bool ltree_add_rec_ptr(const zone_t* zone, const uint8_t* dname, const uint8_t* rhs, unsigned ttl) {
535 ltree_node_t* node = ltree_find_or_add_dname(zone, dname);
536
537 INSERT_NEXT_RR(ptr, ptr, "PTR", 1);
538 new_rdata->dname = lta_dnamedup(zone->arena, rhs);
539 if(dname_isinzone(zone->dname, rhs))
540 log_zwarn("Name '%s%s': PTR record points to same-zone name '%s', which is usually a mistake (missing terminal dot?)", logf_dname(dname), logf_dname(zone->dname), logf_dname(rhs));
541 return false;
542 }
543
ltree_add_rec_ns(const zone_t * zone,const uint8_t * dname,const uint8_t * rhs,unsigned ttl)544 bool ltree_add_rec_ns(const zone_t* zone, const uint8_t* dname, const uint8_t* rhs, unsigned ttl) {
545 ltree_node_t* node = ltree_find_or_add_dname(zone, dname);
546
547 // If this is a delegation by definition, (NS rec not at zone root), flag it
548 // and check for wildcard. Zone root is quickly identified by lack of a label.
549 if(node->label) {
550 node->flags |= LTNFLAG_DELEG;
551 if(node->label[0] == 1 && node->label[1] == '*')
552 log_zfatal("Name '%s%s': Cannot delegate via wildcards", logf_dname(dname), logf_dname(zone->dname));
553 }
554
555 INSERT_NEXT_RR(ns, ns, "NS", 2)
556 new_rdata->dname = lta_dnamedup(zone->arena, rhs);
557 new_rdata->ad = NULL;
558 return false;
559 }
560
ltree_add_rec_mx(const zone_t * zone,const uint8_t * dname,const uint8_t * rhs,unsigned ttl,const unsigned pref)561 bool ltree_add_rec_mx(const zone_t* zone, const uint8_t* dname, const uint8_t* rhs, unsigned ttl, const unsigned pref) {
562 if(pref > 65535U)
563 log_zfatal("Name '%s%s': MX preference value %u too large", logf_dname(dname), logf_dname(zone->dname), pref);
564
565 ltree_node_t* node = ltree_find_or_add_dname(zone, dname);
566
567 INSERT_NEXT_RR(mx, mx, "MX", 2)
568 new_rdata->dname = lta_dnamedup(zone->arena, rhs);
569 new_rdata->pref = htons(pref);
570 new_rdata->ad = NULL;
571 return false;
572 }
573
ltree_add_rec_srv(const zone_t * zone,const uint8_t * dname,const uint8_t * rhs,unsigned ttl,const unsigned priority,const unsigned weight,const unsigned port)574 bool ltree_add_rec_srv(const zone_t* zone, const uint8_t* dname, const uint8_t* rhs, unsigned ttl, const unsigned priority, const unsigned weight, const unsigned port) {
575 if(priority > 65535U)
576 log_zfatal("Name '%s%s': SRV priority value %u too large", logf_dname(dname), logf_dname(zone->dname), priority);
577 if(weight > 65535U)
578 log_zfatal("Name '%s%s': SRV weight value %u too large", logf_dname(dname), logf_dname(zone->dname), weight);
579 if(port > 65535U)
580 log_zfatal("Name '%s%s': SRV port value %u too large", logf_dname(dname), logf_dname(zone->dname), port);
581
582 ltree_node_t* node = ltree_find_or_add_dname(zone, dname);
583
584 INSERT_NEXT_RR(srv, srv, "SRV", 1)
585 new_rdata->dname = lta_dnamedup(zone->arena, rhs);
586 new_rdata->priority = htons(priority);
587 new_rdata->weight = htons(weight);
588 new_rdata->port = htons(port);
589 new_rdata->ad = NULL;
590 return false;
591 }
592
593 /* RFC 2195 was obsoleted by RFC 3403 for defining the NAPTR RR
594 * As 3403 is much looser about the contents of the 3 text fields,
595 * there's not much validation we can do on them.
596 *
597 * All we can really say for sure anymore is:
598 * 1) Flags must be [0-9A-Za-z]*
599 * 2) Regexp (the final text field) and Replacement (the RHS domainname)
600 * are apparently mutually exclusive as of RFC3403, and it is an error
601 * to define both in one NAPTR RR. The "undefined" value for Regexp is the empty
602 * string, and the "undefined" value for Replacement is the root of DNS ('\0').
603 */
604 F_NONNULL
naptr_validate_flags(const uint8_t * zone_dname,const uint8_t * dname,const uint8_t * flags)605 static bool naptr_validate_flags(const uint8_t* zone_dname, const uint8_t* dname, const uint8_t* flags) {
606 unsigned len = *flags++;
607 while(len--) {
608 unsigned c = *flags++;
609 if((c > 0x7AU) // > 'Z'
610 || (c > 0x5BU && c < 0x61U) // > 'z' && < 'A'
611 || (c > 0x39U && c < 0x41U) // > '9' && < 'a'
612 || (c < 0x30U)) // < '0'
613 log_zwarn("Name '%s%s': NAPTR has illegal flag char '%c'", logf_dname(dname), logf_dname(zone_dname), (int)c);
614 }
615
616 return false;
617 }
618
ltree_add_rec_naptr(const zone_t * zone,const uint8_t * dname,const uint8_t * rhs,unsigned ttl,const unsigned order,const unsigned pref,const unsigned num_texts V_UNUSED,uint8_t ** texts)619 bool ltree_add_rec_naptr(const zone_t* zone, const uint8_t* dname, const uint8_t* rhs, unsigned ttl, const unsigned order, const unsigned pref, const unsigned num_texts V_UNUSED, uint8_t** texts) {
620 dmn_assert(num_texts == 3);
621
622 if(order > 65535U)
623 log_zfatal("Name '%s%s': NAPTR order value %u too large", logf_dname(dname), logf_dname(zone->dname), order);
624 if(pref > 65535U)
625 log_zfatal("Name '%s%s': NAPTR preference value %u too large", logf_dname(dname), logf_dname(zone->dname), pref);
626 if(naptr_validate_flags(zone->dname, dname, texts[NAPTR_TEXTS_FLAGS]))
627 return true;
628
629 if(rhs[1] != 0 && texts[NAPTR_TEXTS_REGEXP][0])
630 log_zwarn("Name '%s%s': NAPTR does not allow defining both Regexp and Replacement in a single RR", logf_dname(dname), logf_dname(zone->dname));
631
632 ltree_node_t* node = ltree_find_or_add_dname(zone, dname);
633
634 INSERT_NEXT_RR(naptr, naptr, "NAPTR", 1)
635 new_rdata->dname = lta_dnamedup(zone->arena, rhs);
636 new_rdata->order = htons(order);
637 new_rdata->pref = htons(pref);
638 memcpy(new_rdata->texts, texts, sizeof(new_rdata->texts));
639 new_rdata->ad = NULL;
640 return false;
641 }
642
643 // We copy the array of pointers, but alias the actual data (which is malloc'd for
644 // us per call in the parser).
ltree_add_rec_txt(const zone_t * zone,const uint8_t * dname,const unsigned num_texts,uint8_t ** texts,unsigned ttl)645 bool ltree_add_rec_txt(const zone_t* zone, const uint8_t* dname, const unsigned num_texts, uint8_t** texts, unsigned ttl) {
646 dmn_assert(num_texts);
647
648 ltree_node_t* node = ltree_find_or_add_dname(zone, dname);
649
650 INSERT_NEXT_RR(txt, txt, "TXT", 1)
651 ltree_rdata_txt_t new_rd = *new_rdata = xmalloc((num_texts + 1) * sizeof(uint8_t*));
652 for(unsigned i = 0; i <= num_texts; i++)
653 new_rd[i] = texts[i];
654 return false;
655 }
656
ltree_add_rec_soa(const zone_t * zone,const uint8_t * dname,const uint8_t * master,const uint8_t * email,unsigned ttl,const unsigned serial,const unsigned refresh,const unsigned retry,const unsigned expire,unsigned ncache)657 bool ltree_add_rec_soa(const zone_t* zone, const uint8_t* dname, const uint8_t* master, const uint8_t* email, unsigned ttl, const unsigned serial, const unsigned refresh, const unsigned retry, const unsigned expire, unsigned ncache) {
658 if(ncache > gcfg->max_ncache_ttl) {
659 log_zwarn("Zone '%s': SOA negative-cache field %u too large, clamped to max_ncache_ttl setting of %u", logf_dname(dname), ncache, gcfg->max_ncache_ttl);
660 ncache = gcfg->max_ncache_ttl;
661 }
662 else if(ncache < gcfg->min_ttl) {
663 log_zwarn("Zone '%s': SOA negative-cache field %u too small, clamped to min_ttl setting of %u", logf_dname(dname), ncache, gcfg->min_ttl);
664 ncache = gcfg->min_ttl;
665 }
666
667 ltree_node_t* node = ltree_find_or_add_dname(zone, dname);
668
669 if(ltree_node_get_rrset_soa(node))
670 log_zfatal("Zone '%s': SOA defined twice", logf_dname(dname));
671
672 ltree_rrset_soa_t* soa = ltree_node_add_rrset_soa(node);
673 soa->email = lta_dnamedup(zone->arena, email);
674 soa->master = lta_dnamedup(zone->arena, master);
675
676 soa->gen.ttl = htonl(ttl);
677 soa->times[0] = htonl(serial);
678 soa->times[1] = htonl(refresh);
679 soa->times[2] = htonl(retry);
680 soa->times[3] = htonl(expire);
681 soa->times[4] = htonl(ncache);
682 soa->neg_ttl = htonl(ttl < ncache ? ttl : ncache);
683
684 return false;
685 }
686
687 // It is critical that get/add_rrset_rfc3597 are not called with
688 // rrtype set to the number of other known, explicitly supported types...
689 F_NONNULL F_PURE
ltree_node_get_rrset_rfc3597(const ltree_node_t * node,const unsigned rrtype)690 static ltree_rrset_rfc3597_t* ltree_node_get_rrset_rfc3597(const ltree_node_t* node, const unsigned rrtype) {
691 ltree_rrset_t* rrsets = node->rrsets;
692 while(rrsets) {
693 if(rrsets->gen.type == rrtype)
694 return &(rrsets)->rfc3597;
695 rrsets = rrsets->gen.next;
696 }
697 return NULL;
698 }
699
700 F_NONNULL
ltree_node_add_rrset_rfc3597(ltree_node_t * node,const unsigned rrtype)701 static ltree_rrset_rfc3597_t* ltree_node_add_rrset_rfc3597(ltree_node_t* node, const unsigned rrtype) {
702 ltree_rrset_t** store_at = &node->rrsets;
703 while(*store_at)
704 store_at = &(*store_at)->gen.next;
705 ltree_rrset_rfc3597_t* nrr = xcalloc(1, sizeof(ltree_rrset_rfc3597_t));
706 *store_at = (ltree_rrset_t*)nrr;
707 (*store_at)->gen.type = rrtype;
708 return nrr;
709 }
710
ltree_add_rec_rfc3597(const zone_t * zone,const uint8_t * dname,const unsigned rrtype,unsigned ttl,const unsigned rdlen,uint8_t * rd)711 bool ltree_add_rec_rfc3597(const zone_t* zone, const uint8_t* dname, const unsigned rrtype, unsigned ttl, const unsigned rdlen, uint8_t* rd) {
712 ltree_node_t* node = ltree_find_or_add_dname(zone, dname);
713
714 if(rrtype == DNS_TYPE_A
715 || rrtype == DNS_TYPE_AAAA
716 || rrtype == DNS_TYPE_SOA
717 || rrtype == DNS_TYPE_CNAME
718 || rrtype == DNS_TYPE_NS
719 || rrtype == DNS_TYPE_PTR
720 || rrtype == DNS_TYPE_MX
721 || rrtype == DNS_TYPE_SRV
722 || rrtype == DNS_TYPE_NAPTR
723 || rrtype == DNS_TYPE_TXT)
724 log_zfatal("Name '%s%s': RFC3597 TYPE%u not allowed, please use the explicit support built in for this RR type", logf_dname(dname), logf_dname(zone->dname), rrtype);
725
726 if(rrtype == DNS_TYPE_AXFR
727 || rrtype == DNS_TYPE_DYNC
728 || rrtype == DNS_TYPE_IXFR
729 || rrtype == DNS_TYPE_ANY)
730 log_zfatal("Name '%s%s': RFC3597 TYPE%u not allowed", logf_dname(dname), logf_dname(zone->dname), rrtype);
731
732 ltree_rrset_rfc3597_t* rrset = ltree_node_get_rrset_rfc3597(node, rrtype);
733
734 ltree_rdata_rfc3597_t* new_rdata;
735
736 if(!rrset) {
737 rrset = ltree_node_add_rrset_rfc3597(node, rrtype);
738 rrset->gen.count = 1;
739 rrset->gen.ttl = htonl(ttl);
740 new_rdata = rrset->rdata = xmalloc(sizeof(ltree_rdata_rfc3597_t));
741 }
742 else {
743 if(ntohl(rrset->gen.ttl) != ttl)
744 log_zwarn("Name '%s%s': All TTLs for type RFC3597 TYPE%u should match (using %u)", logf_dname(dname), logf_dname(zone->dname), rrtype, ntohl(rrset->gen.ttl));
745 if(rrset->gen.count == UINT16_MAX)
746 log_zfatal("Name '%s%s': Too many RFC3597 RRs of type TYPE%u", logf_dname(dname), logf_dname(zone->dname), rrtype);
747 rrset->rdata = xrealloc(rrset->rdata, (1U + rrset->gen.count) * sizeof(ltree_rdata_rfc3597_t));
748 new_rdata = &rrset->rdata[rrset->gen.count++];
749 }
750
751 new_rdata->rdlen = rdlen;
752 new_rdata->rd = rd;
753 return false;
754 }
755
756 F_NONNULL
ltree_search_dname_zone(const uint8_t * dname,const zone_t * zone,ltree_node_t ** node_out)757 static ltree_dname_status_t ltree_search_dname_zone(const uint8_t* dname, const zone_t* zone, ltree_node_t** node_out) {
758 dmn_assert(*dname != 0); dmn_assert(*dname != 2); // these are always illegal dnames
759
760 ltree_dname_status_t rval = DNAME_NOAUTH;
761 ltree_node_t* rv_node = NULL;
762 if(dname_isinzone(zone->dname, dname)) {
763 rval = DNAME_AUTH;
764 uint8_t local_dname[256];
765 gdnsd_dname_copy(local_dname, dname);
766 gdnsd_dname_drop_zone(local_dname, zone->dname);
767
768 // construct label ptr stack
769 const uint8_t* lstack[127];
770 unsigned lcount = dname_to_lstack(local_dname, lstack);
771
772 ltree_node_t* current = zone->root;
773
774 do {
775 top_loop:;
776 if(current->flags & LTNFLAG_DELEG)
777 rval = DNAME_DELEG;
778
779 if(!lcount || !current->child_table) {
780 if(!lcount) rv_node = current;
781 break;
782 }
783
784 lcount--;
785 const uint8_t* child_label = lstack[lcount];
786 ltree_node_t* entry = current->child_table[ltree_hash(child_label, current->child_hash_mask)];
787
788 while(entry) {
789 if(!gdnsd_label_cmp(child_label, entry->label)) {
790 current = entry;
791 goto top_loop;
792 }
793 entry = entry->next;
794 }
795 } while(0);
796
797 // If in auth space with no match, and we still have a child_table, check for wildcard
798 if(!rv_node && rval == DNAME_AUTH && current->child_table) {
799 static const uint8_t label_wild[2] = { '\001', '*' };
800 ltree_node_t* entry = current->child_table[ltree_hash(label_wild, current->child_hash_mask)];
801 while(entry) {
802 if(entry->label[0] == '\001' && entry->label[1] == '*') {
803 rv_node = entry;
804 break;
805 }
806 entry = entry->next;
807 }
808 }
809 }
810
811 *node_out = rv_node;
812 return rval;
813 }
814
815 // retval: true, all is well (although we didn't necessarily set an address)
816 // false, the target points at an authoritative name in the same zone which doesn't exist
817 F_NONNULL
set_valid_addr(const uint8_t * dname,const zone_t * zone,ltree_rrset_addr_t ** addr_out)818 static bool set_valid_addr(const uint8_t* dname, const zone_t* zone, ltree_rrset_addr_t** addr_out) {
819 dmn_assert(*dname);
820
821 ltree_node_t* node;
822 const ltree_dname_status_t status = ltree_search_dname_zone(dname, zone, &node);
823
824 *addr_out = NULL;
825 if(status == DNAME_AUTH)
826 if(!node || !(*addr_out = ltree_node_get_rrset_addr(node)))
827 return false;
828
829 return true;
830 }
831
832 // Input must be a binstr (first byte is len, rest is the data),
833 // "c" must be an uppercase ASCII character.
834 // retval indicates whether the string contains this character
835 // (in upper or lower case form).
836 F_NONNULL F_PURE
binstr_hasichr(const uint8_t * bstr,const uint8_t c)837 static bool binstr_hasichr(const uint8_t* bstr, const uint8_t c) {
838 dmn_assert(c > 0x40 && c < 0x5B);
839 unsigned len = *bstr++;
840 while(len--) {
841 if(((*bstr++) & (~0x20)) == c)
842 return true;
843 }
844 return false;
845 }
846
847 // For static addresses, if no limit was specified, set it
848 // to the count for simplicity. If limit is greater than
849 // count, limit limit to the count. This is done at runtime
850 // for DYNA.
fix_addr_limits(ltree_rrset_addr_t * node_addr)851 static void fix_addr_limits(ltree_rrset_addr_t* node_addr) {
852 if(!node_addr->limit_v4 || node_addr->limit_v4 > node_addr->gen.count)
853 node_addr->limit_v4 = node_addr->gen.count;
854 if(!node_addr->limit_v6 || node_addr->limit_v6 > node_addr->count_v6)
855 node_addr->limit_v6 = node_addr->count_v6;
856 }
857
858 F_WUNUSED F_NONNULL
p1_proc_cname(const zone_t * zone,const ltree_rrset_cname_t * node_cname,const uint8_t ** lstack,const unsigned depth)859 static bool p1_proc_cname(const zone_t* zone, const ltree_rrset_cname_t* node_cname, const uint8_t** lstack, const unsigned depth) {
860 ltree_node_t* cn_target;
861 ltree_dname_status_t cnstat = ltree_search_dname_zone(node_cname->dname, zone, &cn_target);
862 if(cnstat == DNAME_AUTH) {
863 if(!cn_target) {
864 log_zwarn("CNAME '%s%s' points to known same-zone NXDOMAIN '%s'",
865 logf_lstack(lstack, depth, zone->dname), logf_dname(node_cname->dname));
866 }
867 else if(!cn_target->rrsets) {
868 log_zwarn("CNAME '%s%s' points to '%s' in the same zone, which has no data",
869 logf_lstack(lstack, depth, zone->dname), logf_dname(node_cname->dname));
870 }
871 }
872
873 unsigned cn_depth = 1;
874 while(cn_target && cnstat == DNAME_AUTH && cn_target->rrsets && cn_target->rrsets->gen.type == DNS_TYPE_CNAME) {
875 if(++cn_depth > gcfg->max_cname_depth) {
876 log_zfatal("CNAME '%s%s' leads to a CNAME chain longer than %u (max_cname_depth)", logf_lstack(lstack, depth, zone->dname), gcfg->max_cname_depth);
877 break;
878 }
879 ltree_rrset_cname_t* cur_cname = &cn_target->rrsets->cname;
880 cnstat = ltree_search_dname_zone(cur_cname->dname, zone, &cn_target);
881 }
882
883 return false;
884 }
885
886 F_WUNUSED F_NONNULL
p1_proc_ns(const zone_t * zone,const bool in_deleg,ltree_rdata_ns_t * this_ns,const uint8_t ** lstack,const unsigned depth)887 static bool p1_proc_ns(const zone_t* zone, const bool in_deleg, ltree_rdata_ns_t* this_ns, const uint8_t** lstack, const unsigned depth) {
888 dmn_assert(!this_ns->ad);
889
890 ltree_node_t* ns_target = NULL;
891 ltree_rrset_addr_t* target_addr = NULL;
892 ltree_dname_status_t ns_status = ltree_search_dname_zone(this_ns->dname, zone, &ns_target);
893
894 // if NOAUTH, look for explicit out-of-zone glue
895 if(ns_status == DNAME_NOAUTH) {
896 ltree_node_t* ooz = ltree_node_find_child(zone->root, ooz_glue_label);
897 if(ooz) {
898 ns_target = ltree_node_find_child(ooz, this_ns->dname);
899 if(ns_target) {
900 dmn_assert(!ns_target->child_table);
901 dmn_assert(ns_target->rrsets);
902 dmn_assert(ns_target->rrsets->gen.type == DNS_TYPE_A);
903 target_addr = &ns_target->rrsets->addr;
904 }
905 }
906 }
907 else {
908 // if !NOAUTH, target must be in auth or deleg space for this
909 // same zone, and we *must* have a legal address for it
910 dmn_assert(ns_status == DNAME_AUTH || ns_status == DNAME_DELEG);
911 if(!ns_target || !(target_addr = ltree_node_get_rrset_addr(ns_target)))
912 log_zfatal("Missing A and/or AAAA records for target nameserver in '%s%s NS %s'",
913 logf_lstack(lstack, depth, zone->dname), logf_dname(this_ns->dname));
914 }
915
916 // use target_addr found via either path above
917 if(target_addr) {
918 dmn_assert(ns_target);
919 this_ns->ad = target_addr;
920 // treat as glue if NS for delegation, and addr is in delegation or ooz
921 if(ns_status != DNAME_AUTH) {
922 if(in_deleg)
923 AD_SET_GLUE(this_ns->ad);
924 ns_target->flags |= LTNFLAG_GUSED;
925 }
926 }
927
928 return false;
929 }
930
931 // Phase 1:
932 // Walks the entire ltree, accomplishing two things in a single pass:
933 // 1) Sanity-check of referential and structural things
934 // that could not be checked as records were being added.
935 // 2) Setting various inter-node pointers for the dnspacket code (and
936 // Phase 2) to chase later.
937 F_WUNUSED F_NONNULL
ltree_postproc_phase1(const uint8_t ** lstack,const ltree_node_t * node,const zone_t * zone,const unsigned depth,const bool in_deleg)938 static bool ltree_postproc_phase1(const uint8_t** lstack, const ltree_node_t* node, const zone_t* zone, const unsigned depth, const bool in_deleg) {
939 bool node_has_rfc3597 = false;
940 ltree_rrset_addr_t* node_addr = NULL;
941 ltree_rrset_cname_t* node_cname = NULL;
942 ltree_rrset_dync_t* node_dync = NULL;
943 ltree_rrset_ns_t* node_ns = NULL;
944 ltree_rrset_ptr_t* node_ptr = NULL;
945 ltree_rrset_mx_t* node_mx = NULL;
946 ltree_rrset_srv_t* node_srv = NULL;
947 ltree_rrset_naptr_t* node_naptr = NULL;
948 ltree_rrset_txt_t* node_txt = NULL;
949
950 {
951 ltree_rrset_t* rrset = node->rrsets;
952 while(rrset) {
953 switch(rrset->gen.type) {
954 case DNS_TYPE_A: node_addr = &rrset->addr; break;
955 case DNS_TYPE_SOA: /* phase1 doesn't use SOA */ break;
956 case DNS_TYPE_CNAME: node_cname = &rrset->cname; break;
957 case DNS_TYPE_DYNC: node_dync = &rrset->dync; break;
958 case DNS_TYPE_NS: node_ns = &rrset->ns; break;
959 case DNS_TYPE_PTR: node_ptr = &rrset->ptr; break;
960 case DNS_TYPE_MX: node_mx = &rrset->mx; break;
961 case DNS_TYPE_SRV: node_srv = &rrset->srv; break;
962 case DNS_TYPE_NAPTR: node_naptr = &rrset->naptr; break;
963 case DNS_TYPE_TXT: node_txt = &rrset->txt; break;
964 default: node_has_rfc3597 = true; break;
965 }
966 rrset = rrset->gen.next;
967 }
968 }
969
970 if(in_deleg) {
971 dmn_assert(depth > 0);
972 if(lstack[depth - 1][0] == 1 && lstack[depth - 1][1] == '*')
973 log_zfatal("Domainname '%s%s': Wildcards not allowed for delegation/glue data", logf_lstack(lstack, depth, zone->dname));
974
975 if(node_cname
976 || node_dync
977 || node_ptr
978 || node_mx
979 || node_srv
980 || node_naptr
981 || node_txt
982 || (node_ns && !(node->flags & LTNFLAG_DELEG))
983 || node_has_rfc3597)
984 log_zfatal("Delegated sub-zone '%s%s' can only have NS and/or address records as appropriate", logf_lstack(lstack, depth, zone->dname));
985 }
986
987 if(node_cname) {
988 if(node->rrsets->gen.next) // basically "if first RR for this node has a link to a second RR"
989 log_zfatal("CNAME not allowed alongside other data at domainname '%s%s'", logf_lstack(lstack, depth, zone->dname));
990 if(p1_proc_cname(zone, node_cname, lstack, depth))
991 return true;
992 return false; // CNAME can't co-exist with others, so we're done here
993 }
994
995 if(node_dync) {
996 if(node->rrsets->gen.next) // basically "if first RR for this node has a link to a second RR"
997 log_zfatal("DYNC not allowed alongside other data at domainname '%s%s'", logf_lstack(lstack, depth, zone->dname));
998 return false; // DYNC can't co-exist with others, so we're done here
999 }
1000
1001 if(node_addr && (node_addr->gen.count | node_addr->count_v6))
1002 fix_addr_limits(node_addr);
1003
1004 if(node_ns)
1005 for(unsigned i = 0; i < node_ns->gen.count; i++)
1006 if(p1_proc_ns(zone, in_deleg, &(node_ns->rdata[i]), lstack, depth))
1007 return true;
1008
1009 if(node_mx)
1010 for(unsigned i = 0; i < node_mx->gen.count; i++)
1011 if(!set_valid_addr(node_mx->rdata[i].dname, zone, &(node_mx->rdata[i].ad)))
1012 log_zwarn("In rrset '%s%s MX', same-zone target '%s' has no addresses", logf_lstack(lstack, depth, zone->dname), logf_dname(node_mx->rdata[i].dname));
1013
1014 if(node_srv)
1015 for(unsigned i = 0; i < node_srv->gen.count; i++)
1016 if(!set_valid_addr(node_srv->rdata[i].dname, zone, &(node_srv->rdata[i].ad)))
1017 log_zwarn("In rrset '%s%s SRV', same-zone target '%s' has no addresses", logf_lstack(lstack, depth, zone->dname), logf_dname(node_srv->rdata[i].dname));
1018
1019 if(node_naptr) {
1020 for(unsigned i = 0; i < node_naptr->gen.count; i++) {
1021 if(binstr_hasichr(node_naptr->rdata[i].texts[NAPTR_TEXTS_FLAGS], 'A')) {
1022 if(!set_valid_addr(node_naptr->rdata[i].dname, zone, &(node_naptr->rdata[i].ad)))
1023 log_zwarn("In rrset '%s%s NAPTR', same-zone A-target '%s' has no A or AAAA records", logf_lstack(lstack, depth, zone->dname), logf_dname(node_naptr->rdata[i].dname));
1024 }
1025 }
1026 }
1027
1028 return false;
1029 }
1030
1031 // Phase 2:
1032 // Checks on unused glue RRs underneath delegations
1033 // Checks the total count of glue RRs per delegation
1034 // Checks TTL matching between NS and glue RRs
1035 F_WUNUSED F_NONNULL
ltree_postproc_phase2(const uint8_t ** lstack,const ltree_node_t * node,const zone_t * zone,const unsigned depth,const bool in_deleg)1036 static bool ltree_postproc_phase2(const uint8_t** lstack, const ltree_node_t* node, const zone_t* zone, const unsigned depth, const bool in_deleg) {
1037 if(in_deleg) {
1038 dmn_assert(!ltree_node_get_rrset_cname(node));
1039 dmn_assert(!ltree_node_get_rrset_dync(node));
1040 if(ltree_node_get_rrset_addr(node) && !(node->flags & LTNFLAG_GUSED))
1041 log_zwarn("Delegation glue address(es) at domainname '%s%s' are unused and ignored", logf_lstack(lstack, depth, zone->dname));
1042 if(node->flags & LTNFLAG_DELEG) {
1043 ltree_rrset_ns_t* ns = ltree_node_get_rrset_ns(node);
1044 dmn_assert(ns);
1045 const unsigned nsct = ns->gen.count;
1046 ltree_rdata_ns_t* nsrd = ns->rdata;
1047 dmn_assert(nsct);
1048 dmn_assert(nsrd);
1049 unsigned num_glue = 0;
1050 for(unsigned i = 0; i < nsct; i++) {
1051 if(AD_IS_GLUE(nsrd[i].ad))
1052 num_glue++;
1053 }
1054 if(num_glue > gcfg->max_addtl_rrsets)
1055 log_zfatal("Delegation point '%s%s' has '%u' glued NS RRs, which is greater than the configured max_addtl_rrsets (%u)", logf_lstack(lstack, depth, zone->dname), num_glue, gcfg->max_addtl_rrsets);
1056 }
1057 }
1058
1059 return false;
1060 }
1061
1062 F_WUNUSED F_NONNULLX(1, 2, 3)
_ltree_proc_inner(bool (* fn)(const uint8_t **,const ltree_node_t *,const zone_t *,const unsigned,const bool),const uint8_t ** lstack,ltree_node_t * node,const zone_t * zone,const unsigned depth,bool in_deleg)1063 static bool _ltree_proc_inner(bool (*fn)(const uint8_t**, const ltree_node_t*, const zone_t*, const unsigned, const bool), const uint8_t** lstack, ltree_node_t* node, const zone_t* zone, const unsigned depth, bool in_deleg) {
1064 if(node->flags & LTNFLAG_DELEG) {
1065 dmn_assert(node->label);
1066 if(in_deleg)
1067 log_zfatal("Delegation '%s%s' is within another delegation", logf_lstack(lstack, depth, zone->dname));
1068 in_deleg = true;
1069 }
1070
1071 if(unlikely(fn(lstack, node, zone, depth, in_deleg)))
1072 return true;
1073
1074 // Recurse into children
1075 if(node->child_table) {
1076 const uint32_t cmask = node->child_hash_mask;
1077 for(uint32_t i = 0; i <= cmask; i++) {
1078 ltree_node_t* child = node->child_table[i];
1079 while(child) {
1080 lstack[depth] = child->label;
1081 if(unlikely(_ltree_proc_inner(fn, lstack, child, zone, depth + 1, in_deleg)))
1082 return true;
1083 child = child->next;
1084 }
1085 }
1086 }
1087
1088 return false;
1089 }
1090
1091 F_WUNUSED F_NONNULL
ltree_postproc(const zone_t * zone,bool (* fn)(const uint8_t **,const ltree_node_t *,const zone_t *,const unsigned,const bool))1092 static bool ltree_postproc(const zone_t* zone, bool (*fn)(const uint8_t**, const ltree_node_t*, const zone_t*, const unsigned, const bool)) {
1093 // label stack:
1094 // used to reconstruct full domainnames
1095 // for error/warning message output
1096 const uint8_t* lstack[127];
1097
1098 return _ltree_proc_inner(fn, lstack, zone->root, zone, 0, false);
1099 }
1100
1101 F_WUNUSED F_NONNULL
ltree_postproc_zroot_phase1(zone_t * zone)1102 static bool ltree_postproc_zroot_phase1(zone_t* zone) {
1103 ltree_node_t* zroot = zone->root;
1104 ltree_rrset_soa_t* zroot_soa = NULL;
1105 ltree_rrset_ns_t* zroot_ns = NULL;
1106
1107 {
1108 ltree_rrset_t* rrset = zroot->rrsets;
1109 while(rrset) {
1110 switch(rrset->gen.type) {
1111 case DNS_TYPE_SOA: zroot_soa = &rrset->soa; break;
1112 case DNS_TYPE_NS: zroot_ns = &rrset->ns; break;
1113 default: break;
1114 }
1115 rrset = rrset->gen.next;
1116 }
1117 }
1118
1119 dmn_assert(!zroot->label); // zone roots don't get a label
1120 if(!zroot_soa)
1121 log_zfatal("Zone '%s' has no SOA record", logf_dname(zone->dname));
1122 if(!zroot_ns)
1123 log_zfatal("Zone '%s' has no NS records", logf_dname(zone->dname));
1124 bool ok = false;
1125 dmn_assert(zroot_ns->gen.count);
1126 if(zroot_ns->gen.count < 2)
1127 log_zwarn("Zone '%s' only has one NS record, this is (probably) bad practice", logf_dname(zone->dname));
1128 for(unsigned i = 0; i < zroot_ns->gen.count; i++) {
1129 if(!gdnsd_dname_cmp(zroot_soa->master, zroot_ns->rdata[i].dname)) {
1130 ok = true;
1131 break;
1132 }
1133 }
1134 if(!ok)
1135 log_zwarn("Zone '%s': SOA Master does not match any NS records for this zone", logf_dname(zone->dname));
1136
1137 // copy SOA Serial field up to zone_t for easy comparisons
1138 zone->serial = ntohl(zroot_soa->times[0]);
1139 return false;
1140 }
1141
1142 F_NONNULL
ltree_postproc_zroot_phase2(const zone_t * zone)1143 static bool ltree_postproc_zroot_phase2(const zone_t* zone) {
1144 ltree_node_t* ooz = ltree_node_find_child(zone->root, ooz_glue_label);
1145 if(ooz) {
1146 for(unsigned i = 0; i <= ooz->child_hash_mask; i++) {
1147 ltree_node_t* ooz_node = ooz->child_table[i];
1148 while(ooz_node) {
1149 dmn_assert(ooz_node->rrsets);
1150 dmn_assert(ooz_node->rrsets->gen.type == DNS_TYPE_A);
1151 dmn_assert(!ooz_node->rrsets->gen.next);
1152 fix_addr_limits(&ooz_node->rrsets->addr);
1153 if(!(ooz_node->flags & LTNFLAG_GUSED))
1154 log_zwarn("In zone '%s', explicit out-of-zone glue address(es) at domainname '%s' are unused and ignored", logf_dname(zone->dname), logf_dname(ooz_node->label));
1155 ooz_node = ooz_node->next;
1156 }
1157 }
1158 }
1159
1160 return false;
1161 }
1162
1163 F_NONNULL
ltree_fix_masks(ltree_node_t * node)1164 static void ltree_fix_masks(ltree_node_t* node) {
1165 const uint32_t cmask = count2mask(node->child_hash_mask);
1166 node->child_hash_mask = cmask;
1167 if(node->child_table) {
1168 for(uint32_t i = 0; i <= cmask; i++) {
1169 ltree_node_t* child = node->child_table[i];
1170 while(child) {
1171 ltree_fix_masks(child);
1172 child = child->next;
1173 }
1174 }
1175 }
1176 }
1177
1178 // common processing for zones
ltree_init_zone(zone_t * zone)1179 void ltree_init_zone(zone_t* zone) {
1180 dmn_assert(zone->dname);
1181 dmn_assert(zone->arena);
1182 dmn_assert(!zone->root);
1183
1184 zone->root = ltree_node_new(zone->arena, NULL, 0);
1185 }
1186
ltree_postproc_zone(zone_t * zone)1187 bool ltree_postproc_zone(zone_t* zone) {
1188 dmn_assert(zone->dname);
1189 dmn_assert(zone->arena);
1190 dmn_assert(zone->root);
1191
1192 ltree_fix_masks(zone->root);
1193
1194 // zroot phase1 is a readonly check of zone basics
1195 // (e.g. NS/SOA existence), also sets zone->serial
1196 if(unlikely(ltree_postproc_zroot_phase1(zone)))
1197 return true;
1198 // tree phase1 does a ton of readonly per-node checks
1199 // (e.g. junk inside delegations, CNAME depth, CNAME
1200 // and DYNC do not have partner rrsets)
1201 // It also sets additional-data pointers from various
1202 // other RR-types -> address rrsets, including
1203 // flagging glue in the glue-address cases and
1204 // marking it as used. Ditto for additional data
1205 // for local CNAME targets.
1206 if(unlikely(ltree_postproc(zone, ltree_postproc_phase1)))
1207 return true;
1208
1209 // zroot phase2 checks for unused out-of-zone glue addresses,
1210 // and also does the standard address limit>count fixups on them
1211 if(unlikely(ltree_postproc_zroot_phase2(zone)))
1212 return true;
1213
1214 // tree phase2 looks for unused delegation glue addresses,
1215 // and delegation glue address sets that exceed max_addtl_rrsets
1216 if(unlikely(ltree_postproc(zone, ltree_postproc_phase2)))
1217 return true;
1218 return false;
1219 }
1220
ltree_destroy(ltree_node_t * node)1221 void ltree_destroy(ltree_node_t* node) {
1222 ltree_rrset_t* rrset = node->rrsets;
1223 while(rrset) {
1224 ltree_rrset_t* next = rrset->gen.next;
1225 switch(rrset->gen.type) {
1226 case DNS_TYPE_A:
1227 if(rrset->addr.count_v6) {
1228 dmn_assert(rrset->addr.addrs.v6);
1229 free(rrset->addr.addrs.v6);
1230 if(rrset->addr.addrs.v4)
1231 free(rrset->addr.addrs.v4);
1232 }
1233 else if(rrset->gen.count && rrset->gen.count > LTREE_V4A_SIZE) {
1234 dmn_assert(!rrset->addr.addrs.v6);
1235 dmn_assert(rrset->addr.addrs.v4);
1236 free(rrset->addr.addrs.v4);
1237 }
1238 break;
1239
1240 case DNS_TYPE_NAPTR:
1241 for(unsigned i = 0; i < rrset->gen.count; i++) {
1242 free(rrset->naptr.rdata[i].texts[NAPTR_TEXTS_REGEXP]);
1243 free(rrset->naptr.rdata[i].texts[NAPTR_TEXTS_SERVICES]);
1244 free(rrset->naptr.rdata[i].texts[NAPTR_TEXTS_FLAGS]);
1245 }
1246 free(rrset->naptr.rdata);
1247 break;
1248 case DNS_TYPE_TXT:
1249 for(unsigned i = 0; i < rrset->gen.count; i++) {
1250 uint8_t** tptr = rrset->txt.rdata[i];
1251 uint8_t* t;
1252 while((t = *tptr++))
1253 free(t);
1254 free(rrset->txt.rdata[i]);
1255 }
1256 free(rrset->txt.rdata);
1257 break;
1258 case DNS_TYPE_NS:
1259 free(rrset->ns.rdata);
1260 break;
1261 case DNS_TYPE_MX:
1262 free(rrset->mx.rdata);
1263 break;
1264 case DNS_TYPE_PTR:
1265 free(rrset->ptr.rdata);
1266 break;
1267 case DNS_TYPE_SRV:
1268 free(rrset->srv.rdata);
1269 break;
1270 case DNS_TYPE_SOA:
1271 case DNS_TYPE_CNAME:
1272 case DNS_TYPE_DYNC:
1273 break;
1274 default:
1275 for(unsigned i = 0; i < rrset->gen.count; i++)
1276 free(rrset->rfc3597.rdata[i].rd);
1277 free(rrset->rfc3597.rdata);
1278 break;
1279 }
1280 free(rrset);
1281 rrset = next;
1282 }
1283
1284 if(node->child_table) {
1285 const uint32_t cmask = count2mask(node->child_hash_mask);
1286 for(unsigned i = 0; i <= cmask; i++) {
1287 ltree_node_t* child = node->child_table[i];
1288 while(child) {
1289 ltree_node_t* next = child->next;
1290 ltree_destroy(child);
1291 child = next;
1292 }
1293 }
1294 }
1295
1296 free(node->child_table);
1297 free(node);
1298 }
1299