1 /*
2 * ntp_restrict.c - determine host restrictions
3 */
4
5 #include "config.h"
6
7 #include <stdio.h>
8 #include <sys/types.h>
9
10 #include "ntpd.h"
11 #include "ntp_lists.h"
12 #include "ntp_stdlib.h"
13 #include "ntp_assert.h"
14
15 /*
16 * This code keeps a simple address-and-mask list of hosts we want
17 * to place restrictions on (or remove them from). The restrictions
18 * are implemented as a set of flags which tell you what the host
19 * can't do. There is a subroutine entry to return the flags. The
20 * list is kept sorted to reduce the average number of comparisons
21 * and make sure you get the set of restrictions most specific to
22 * the address.
23 *
24 * The algorithm is that, when looking up a host, it is first assumed
25 * that the default set of restrictions will apply. It then searches
26 * down through the list. Whenever it finds a match it adopts the
27 * match's flags instead. When you hit the point where the sorted
28 * address is greater than the target, you return with the last set of
29 * flags you found. Because of the ordering of the list, the most
30 * specific match will provide the final set of flags.
31 *
32 * This was originally intended to restrict you from sync'ing to your
33 * own broadcasts when you are doing that, by restricting yourself from
34 * your own interfaces. It was also thought it would sometimes be useful
35 * to keep a misbehaving host or two from abusing your primary clock. It
36 * has been expanded, however, to suit the needs of those with more
37 * restrictive access policies.
38 */
39 /*
40 * We will use two lists, one for IPv4 addresses and one for IPv6
41 * addresses. This is not protocol-independent but for now I can't
42 * find a way to respect this. We'll check this later... JFB 07/2001
43 */
44 #define MASK_IPV6_ADDR(dst, src, msk) \
45 do { \
46 int idx; \
47 for (idx = 0; idx < (int)COUNTOF((dst)->s6_addr); idx++) { \
48 (dst)->s6_addr[idx] = (src)->s6_addr[idx] \
49 & (msk)->s6_addr[idx]; \
50 } \
51 } while (0)
52
53 /*
54 * We allocate INC_RESLIST{4|6} entries to the free list whenever empty.
55 * Auto-tune these to be just less than 1KB (leaving at least 16 bytes
56 * for allocator overhead).
57 */
58 #define INC_RESLIST4 ((1024 - 16) / V4_SIZEOF_RESTRICT_U)
59 #define INC_RESLIST6 ((1024 - 16) / V6_SIZEOF_RESTRICT_U)
60
61 /*
62 * The restriction list
63 */
64 struct restriction_data rstrct = {
65 /*
66 * (MOVED FROM ntp_monitor.c)
67 * Parameters of the RES_LIMITED restriction option. We define headway
68 * as the idle time between packets. A packet is discarded if the
69 * headway is less than the minimum, as well as if the average headway
70 * is less than eight times the increment.
71 */
72 .ntp_minpkt = NTP_MINPKT, /* minimum (log 2 s) */
73 .ntp_minpoll = NTP_MINPOLL, /* increment (log 2 s) */
74 };
75 static int restrictcount; /* count in the restrict lists */
76
77 /*
78 * The free list and associated counters. Also some uninteresting
79 * stat counters.
80 */
81 static restrict_u *resfree4; /* available entries (free list) */
82 static restrict_u *resfree6;
83
84 static unsigned long res_calls;
85 static unsigned long res_found;
86 static unsigned long res_not_found;
87
88 /*
89 * Count number of restriction entries referring to RES_LIMITED, to
90 * control implicit activation/deactivation of the MRU monlist.
91 */
92 static unsigned long res_limited_refcnt;
93
94 /*
95 * Our default entries.
96 */
97 static restrict_u restrict_def4;
98 static restrict_u restrict_def6;
99
100 /*
101 * "restrict source ..." enabled knob and restriction bits.
102 */
103 static bool restrict_source_enabled = false;
104 static unsigned short restrict_source_flags;
105 static unsigned short restrict_source_mflags;
106
107 /*
108 * private functions
109 */
110 static restrict_u * alloc_res4(void);
111 static restrict_u * alloc_res6(void);
112 static void free_res(restrict_u *, bool);
113 static void inc_res_limited(void);
114 static void dec_res_limited(void);
115 static restrict_u * match_restrict4_addr(uint32_t, unsigned short);
116 static restrict_u * match_restrict6_addr(const struct in6_addr *,
117 unsigned short);
118 static restrict_u * match_restrict_entry(const restrict_u *, int);
119 static int res_sorts_before4(restrict_u *, restrict_u *);
120 static int res_sorts_before6(restrict_u *, restrict_u *);
121
122
123 /*
124 * init_restrict - initialize the restriction data structures
125 */
126 void
init_restrict(void)127 init_restrict(void)
128 {
129 /*
130 * The restriction lists begin with a default entry with address
131 * and mask 0, which will match any entry. The lists are kept
132 * sorted by descending address followed by descending mask:
133 *
134 * address mask
135 * 192.168.0.0 255.255.255.0 kod limited noquery nopeer
136 * 192.168.0.0 255.255.0.0 kod limited
137 * 0.0.0.0 0.0.0.0 kod limited noquery
138 *
139 * The first entry which matches an address is used. With the
140 * example restrictions above, 192.168.0.0/24 matches the first
141 * entry, the rest of 192.168.0.0/16 matches the second, and
142 * everything else matches the third (default).
143 *
144 * Note this achieves the same result a little more efficiently
145 * than the documented behavior, which is to keep the lists
146 * sorted by ascending address followed by ascending mask, with
147 * the _last_ matching entry used.
148 *
149 * An additional wrinkle is we may have multiple entries with
150 * the same address and mask but differing match flags (mflags).
151 * At present there is only one, RESM_NTPONLY. Entries with
152 * RESM_NTPONLY are sorted earlier so they take precedence over
153 * any otherwise similar entry without. Again, this is the same
154 * behavior as but reversed implementation compared to the docs.
155 *
156 */
157
158 LINK_SLIST(rstrct.restrictlist4, &restrict_def4, link);
159 LINK_SLIST(rstrct.restrictlist6, &restrict_def6, link);
160 restrict_def4.flags = RES_Default;
161 restrict_def6.flags = RES_Default;
162 if (RES_Default & RES_LIMITED) {
163 inc_res_limited();
164 inc_res_limited();
165 }
166 restrictcount = 2;
167 }
168
169
170 static restrict_u *
alloc_res4(void)171 alloc_res4(void)
172 {
173 const size_t cb = V4_SIZEOF_RESTRICT_U;
174 const size_t count = INC_RESLIST4;
175 restrict_u * rl;
176 restrict_u * res;
177
178 UNLINK_HEAD_SLIST(res, resfree4, link);
179 if (res != NULL)
180 return res;
181
182 rl = emalloc_zero(count * cb);
183 /* link all but the first onto free list */
184 res = (void *)((char *)rl + (count - 1) * cb);
185 for (int i = count - 1; i > 0; i--) {
186 LINK_SLIST(resfree4, res, link);
187 res = (void *)((char *)res - cb);
188 }
189 INSIST(rl == res);
190 /* allocate the first */
191 return res;
192 }
193
194
195 static restrict_u *
alloc_res6(void)196 alloc_res6(void)
197 {
198 const size_t cb = V6_SIZEOF_RESTRICT_U;
199 const size_t count = INC_RESLIST6;
200 restrict_u * rl;
201 restrict_u * res;
202
203 UNLINK_HEAD_SLIST(res, resfree6, link);
204 if (res != NULL)
205 return res;
206
207 rl = emalloc_zero(count * cb);
208 /* link all but the first onto free list */
209 res = (void *)((char *)rl + (count - 1) * cb);
210 for (int i = count - 1; i > 0; i--) {
211 LINK_SLIST(resfree6, res, link);
212 res = (void *)((char *)res - cb);
213 }
214 INSIST(rl == res);
215 /* allocate the first */
216 return res;
217 }
218
219
220 static void
free_res(restrict_u * res,bool v6)221 free_res(
222 restrict_u * res,
223 bool v6
224 )
225 {
226 restrict_u ** plisthead;
227 restrict_u * unlinked;
228
229 restrictcount--;
230 if (RES_LIMITED & res->flags)
231 dec_res_limited();
232
233 if (v6)
234 plisthead = &rstrct.restrictlist6;
235 else
236 plisthead = &rstrct.restrictlist4;
237 UNLINK_SLIST(unlinked, *plisthead, res, link, restrict_u);
238 INSIST(unlinked == res);
239
240 if (v6) {
241 memset(res, '\0', V6_SIZEOF_RESTRICT_U);
242 plisthead = &resfree6;
243 } else {
244 memset(res, '\0', V4_SIZEOF_RESTRICT_U);
245 plisthead = &resfree4;
246 }
247 LINK_SLIST(*plisthead, res, link);
248 }
249
250
251 static void
inc_res_limited(void)252 inc_res_limited(void)
253 {
254 if (!res_limited_refcnt)
255 mon_setup(MON_RES);
256 res_limited_refcnt++;
257 }
258
259
260 static void
dec_res_limited(void)261 dec_res_limited(void)
262 {
263 res_limited_refcnt--;
264 if (!res_limited_refcnt)
265 mon_setdown(MON_RES);
266 }
267
268
269 static restrict_u *
match_restrict4_addr(uint32_t addr,unsigned short port)270 match_restrict4_addr(
271 uint32_t addr,
272 unsigned short port
273 )
274 {
275 restrict_u * res;
276 restrict_u * next;
277
278 for (res = rstrct.restrictlist4; res != NULL; res = next) {
279 next = res->link;
280 if (res->u.v4.addr == (addr & res->u.v4.mask)
281 && (!(RESM_NTPONLY & res->mflags)
282 || NTP_PORT == port))
283 break;
284 }
285 return res;
286 }
287
288
289 static restrict_u *
match_restrict6_addr(const struct in6_addr * addr,unsigned short port)290 match_restrict6_addr(
291 const struct in6_addr * addr,
292 unsigned short port
293 )
294 {
295 restrict_u * res;
296 restrict_u * next;
297 struct in6_addr masked;
298
299 for (res = rstrct.restrictlist6; res != NULL; res = next) {
300 next = res->link;
301 INSIST(next != res);
302 MASK_IPV6_ADDR(&masked, addr, &res->u.v6.mask);
303 if (ADDR6_EQ(&masked, &res->u.v6.addr)
304 && (!(RESM_NTPONLY & res->mflags)
305 || NTP_PORT == (int)port))
306 break;
307 }
308 return res;
309 }
310
311
312 /*
313 * match_restrict_entry - find an exact match on a restrict list.
314 *
315 * Exact match is addr, mask, and mflags all equal.
316 * In order to use more common code for IPv4 and IPv6, this routine
317 * requires the caller to populate a restrict_u with mflags and either
318 * the v4 or v6 address and mask as appropriate. Other fields in the
319 * input restrict_u are ignored.
320 */
321 static restrict_u *
match_restrict_entry(const restrict_u * pmatch,int v6)322 match_restrict_entry(
323 const restrict_u * pmatch,
324 int v6
325 )
326 {
327 restrict_u *res;
328 restrict_u *rlist;
329 size_t cb;
330
331 if (v6) {
332 rlist = rstrct.restrictlist6;
333 cb = sizeof(pmatch->u.v6);
334 } else {
335 rlist = rstrct.restrictlist4;
336 cb = sizeof(pmatch->u.v4);
337 }
338
339 for (res = rlist; res != NULL; res = res->link)
340 if (res->mflags == pmatch->mflags &&
341 !memcmp(&res->u, &pmatch->u, cb))
342 break;
343 return res;
344 }
345
346
347 /*
348 * res_sorts_before4 - compare two restrict4 entries
349 *
350 * Returns nonzero if r1 sorts before r2. We sort by descending
351 * address, then descending mask, then descending mflags, so sorting
352 * before means having a higher value.
353 */
354 static int
res_sorts_before4(restrict_u * r1,restrict_u * r2)355 res_sorts_before4(
356 restrict_u *r1,
357 restrict_u *r2
358 )
359 {
360 int r1_before_r2;
361
362 if (r1->u.v4.addr > r2->u.v4.addr)
363 r1_before_r2 = 1;
364 else if (r1->u.v4.addr < r2->u.v4.addr)
365 r1_before_r2 = 0;
366 else if (r1->u.v4.mask > r2->u.v4.mask)
367 r1_before_r2 = 1;
368 else if (r1->u.v4.mask < r2->u.v4.mask)
369 r1_before_r2 = 0;
370 else if (r1->mflags > r2->mflags)
371 r1_before_r2 = 1;
372 else
373 r1_before_r2 = 0;
374
375 return r1_before_r2;
376 }
377
378
379 /*
380 * res_sorts_before6 - compare two restrict6 entries
381 *
382 * Returns nonzero if r1 sorts before r2. We sort by descending
383 * address, then descending mask, then descending mflags, so sorting
384 * before means having a higher value.
385 */
386 static int
res_sorts_before6(restrict_u * r1,restrict_u * r2)387 res_sorts_before6(
388 restrict_u *r1,
389 restrict_u *r2
390 )
391 {
392 int r1_before_r2;
393 int cmp;
394
395 cmp = ADDR6_CMP(&r1->u.v6.addr, &r2->u.v6.addr);
396 if (cmp > 0) { /* r1->addr > r2->addr */
397 r1_before_r2 = 1;
398 } else if (cmp < 0) { /* r2->addr > r1->addr */
399 r1_before_r2 = 0;
400 } else {
401 cmp = ADDR6_CMP(&r1->u.v6.mask, &r2->u.v6.mask);
402 if (cmp > 0) { /* r1->mask > r2->mask*/
403 r1_before_r2 = 1;
404 } else if (cmp < 0) { /* r2->mask > r1->mask */
405 r1_before_r2 = 0;
406 } else if (r1->mflags > r2->mflags)
407 r1_before_r2 = 1;
408 else
409 r1_before_r2 = 0;
410 }
411
412 return r1_before_r2;
413 }
414
415
416 /*
417 * restrictions - return restrictions for this host
418 */
419 unsigned short
restrictions(sockaddr_u * srcadr)420 restrictions(
421 sockaddr_u *srcadr
422 )
423 {
424 restrict_u *match;
425 struct in6_addr *pin6;
426 unsigned short flags;
427
428 res_calls++;
429 flags = 0;
430 /* IPv4 source address */
431 if (IS_IPV4(srcadr)) {
432 /*
433 * Ignore any packets with a multicast source address
434 * (this should be done early in the receive process,
435 * not later!)
436 */
437 if (IN_CLASSD(SRCADR(srcadr)))
438 return (int)RES_IGNORE;
439
440 match = match_restrict4_addr(SRCADR(srcadr),
441 SRCPORT(srcadr));
442 match->hitcount++;
443 /*
444 * res_not_found counts only use of the final default
445 * entry, not any "restrict default ntpport ...", which
446 * would be just before the final default.
447 */
448 if (&restrict_def4 == match)
449 res_not_found++;
450 else
451 res_found++;
452 flags = match->flags;
453 }
454
455 /* IPv6 source address */
456 if (IS_IPV6(srcadr)) {
457 pin6 = PSOCK_ADDR6(srcadr);
458
459 /*
460 * Ignore any packets with a multicast source address
461 * (this should be done early in the receive process,
462 * not later!)
463 */
464 if (IN6_IS_ADDR_MULTICAST(pin6))
465 return (int)RES_IGNORE;
466
467 match = match_restrict6_addr(pin6, SRCPORT(srcadr));
468 match->hitcount++;
469 if (&restrict_def6 == match)
470 res_not_found++;
471 else
472 res_found++;
473 flags = match->flags;
474 }
475 return (flags);
476 }
477
478
479 /*
480 * hack_restrict - add/subtract/manipulate entries on the restrict list
481 */
482 void
hack_restrict(int op,sockaddr_u * resaddr,sockaddr_u * resmask,unsigned short mflags,unsigned short flags)483 hack_restrict(
484 int op,
485 sockaddr_u * resaddr,
486 sockaddr_u * resmask,
487 unsigned short mflags,
488 unsigned short flags
489 )
490 {
491 bool v6;
492 restrict_u match;
493 restrict_u * res;
494 restrict_u ** plisthead;
495
496 DPRINT(1, ("restrict: op %d addr %s mask %s mflags %08x flags %08x\n",
497 op, socktoa(resaddr), socktoa(resmask), mflags, flags));
498
499 if (NULL == resaddr) {
500 /* restrict source */
501 REQUIRE(NULL == resmask);
502 REQUIRE(RESTRICT_FLAGS == op);
503 restrict_source_flags = flags;
504 restrict_source_mflags = mflags;
505 restrict_source_enabled = true;
506 return;
507 }
508
509 ZERO(match);
510 /* silence VC9 potentially uninit warnings */
511 res = NULL;
512 v6 = false;
513
514 if (IS_IPV4(resaddr)) {
515 v6 = false;
516 /*
517 * Get address and mask in host byte order for easy
518 * comparison as uint32_t
519 */
520 match.u.v4.addr = SRCADR(resaddr);
521 match.u.v4.mask = SRCADR(resmask);
522 match.u.v4.addr &= match.u.v4.mask;
523
524 } else if (IS_IPV6(resaddr)) {
525 v6 = true;
526 /*
527 * Get address and mask in network byte order for easy
528 * comparison as byte sequences (e.g. memcmp())
529 */
530 match.u.v6.mask = SOCK_ADDR6(resmask);
531 MASK_IPV6_ADDR(&match.u.v6.addr, PSOCK_ADDR6(resaddr),
532 &match.u.v6.mask);
533
534 } else /* not IPv4 nor IPv6 */
535 REQUIRE(0);
536
537 match.flags = flags;
538 match.mflags = mflags;
539 res = match_restrict_entry(&match, v6);
540
541 switch (op) {
542
543 case RESTRICT_FLAGS:
544 /*
545 * Here we add bits to the flags. If this is a
546 * new restriction add it.
547 */
548 if (NULL == res) {
549 if (v6) {
550 res = alloc_res6();
551 memcpy(res, &match,
552 V6_SIZEOF_RESTRICT_U);
553 plisthead = &rstrct.restrictlist6;
554 } else {
555 res = alloc_res4();
556 memcpy(res, &match,
557 V4_SIZEOF_RESTRICT_U);
558 plisthead = &rstrct.restrictlist4;
559 }
560 LINK_SORT_SLIST(
561 *plisthead, res,
562 (v6)
563 ? res_sorts_before6(res, L_S_S_CUR())
564 : res_sorts_before4(res, L_S_S_CUR()),
565 link, restrict_u);
566 restrictcount++;
567 if (RES_LIMITED & flags)
568 inc_res_limited();
569 } else {
570 if ((RES_LIMITED & flags) &&
571 !(RES_LIMITED & res->flags))
572 inc_res_limited();
573 res->flags |= flags;
574 }
575 break;
576
577 case RESTRICT_UNFLAG:
578 /*
579 * Remove some bits from the flags. If we didn't
580 * find this one, just return.
581 */
582 if (res != NULL) {
583 if ((RES_LIMITED & res->flags)
584 && (RES_LIMITED & flags))
585 dec_res_limited();
586 res->flags &= ~flags;
587 }
588 break;
589
590 case RESTRICT_REMOVE:
591 case RESTRICT_REMOVEIF:
592 /*
593 * Remove an entry from the table entirely if we
594 * found one. Don't remove the default entry and
595 * don't remove an interface entry.
596 */
597 if (res != NULL
598 && (RESTRICT_REMOVEIF == op
599 || !(RESM_INTERFACE & res->mflags))
600 && res != &restrict_def4
601 && res != &restrict_def6)
602 free_res(res, v6);
603 break;
604
605 default: /* unknown op */
606 INSIST(0);
607 break;
608 }
609
610 }
611
612
613 /* restrict_source - poke hole in restrictions if needed
614 * requires "restrict source <flags|NULL>"
615 * Called in 3 cases:
616 * newpeer when allocating a slot with IP Address
617 * dns_check/dns_take_server when DNS assigns an IP Address
618 * nts_check/dns_take_server when NTS assigns an IP Address
619 *
620 * Holes created have RESM_SOURCE in mflags
621 * Restrictions must be initialized before adding servers
622 */
623 void
restrict_source(struct peer * peer)624 restrict_source(
625 struct peer * peer
626 )
627 {
628 sockaddr_u * addr = &peer->srcadr;
629 sockaddr_u onesmask;
630 restrict_u * res;
631 bool found_specific = false;
632 bool need_poke = false;
633 bool auth, nts;
634
635 REQUIRE(AF_INET == AF(addr) || AF_INET6 == AF(addr));
636
637 SET_HOSTMASK(&onesmask, AF(addr));
638
639 /*
640 * If there is a specific entry for this address, hands
641 * off, as it is condidered more specific than "restrict
642 * server ...".
643 */
644 if (IS_IPV4(addr)) {
645 res = match_restrict4_addr(SRCADR(addr), SRCPORT(addr));
646 found_specific = (SRCADR(&onesmask) == res->u.v4.mask);
647 } else {
648 res = match_restrict6_addr(&SOCK_ADDR6(addr),
649 SRCPORT(addr));
650 found_specific = ADDR6_EQ(&res->u.v6.mask,
651 &SOCK_ADDR6(&onesmask));
652 }
653
654 if (RES_IGNORE & res->flags) {
655 need_poke = true;
656 }
657 auth = (0 != peer->cfg.peerkey);
658 nts = peer->cfg.flags & FLAG_NTS;
659 if (RES_DONTTRUST & res->flags && !auth && !nts) {
660 /* needs authentication, but this slot doesn't have any */
661 need_poke = true;
662 }
663 if (!need_poke) {
664 /* works without a hole */
665 return;
666 }
667 if (found_specific) {
668 msyslog(LOG_ERR, "RESTRICT: Specific restriction will break %s",
669 socktoa(addr));
670 return;
671 }
672 if (!restrict_source_enabled) {
673 msyslog(LOG_ERR, "RESTRICT: Can't poke hole in restrictions for %s - need \"restrict source <flags>\"",
674 socktoa(addr));
675 return;
676 }
677
678 msyslog(LOG_INFO, "RESTRICT: Poking hole in restrictions for %s",
679 socktoa(addr));
680
681 hack_restrict(RESTRICT_FLAGS, addr, &onesmask,
682 restrict_source_mflags, restrict_source_flags);
683 }
684
685 /* unrestrict_source - remove hole poked in restrictions
686 */
687 void
unrestrict_source(struct peer * peer)688 unrestrict_source(
689 struct peer * peer
690 )
691 {
692 sockaddr_u * addr = &peer->srcadr;
693 sockaddr_u onesmask;
694 restrict_u * res;
695
696 if (IS_IPV4(addr)) {
697 res = match_restrict4_addr(SRCADR(addr), SRCPORT(addr));
698 } else {
699 res = match_restrict6_addr(&SOCK_ADDR6(addr),
700 SRCPORT(addr));
701 }
702 if (!(res->mflags & RESM_SOURCE)) {
703 return; /* nothing to cleanup */
704 }
705
706 msyslog(LOG_INFO, "RESTRICT: Removing hole in restrictions for %s",
707 socktoa(addr));
708
709 SET_HOSTMASK(&onesmask, AF(addr));
710 hack_restrict(RESTRICT_REMOVE, addr, &onesmask, 0, 0);
711
712 }
713
714
715