xref: /freebsd/contrib/ntp/ntpd/ntp_restrict.c (revision f5f40dd6)
1 /*
2  * ntp_restrict.c - determine host restrictions
3  */
4 #ifdef HAVE_CONFIG_H
5 #include <config.h>
6 #endif
7 
8 #include <stdio.h>
9 #include <sys/types.h>
10 
11 #include "ntpd.h"
12 #include "ntp_if.h"
13 #include "ntp_lists.h"
14 #include "ntp_stdlib.h"
15 #include "ntp_assert.h"
16 
17 /*
18  * This code keeps a simple address-and-mask list of addressses we want
19  * to place restrictions on (or remove them from). The restrictions are
20  * implemented as a set of flags which tell you what matching addresses
21  * can't do.  The list is sorted retrieve the restrictions most specific
22 *  to the address.
23  *
24  * This was originally intended to restrict you from sync'ing to your
25  * own broadcasts when you are doing that, by restricting yourself from
26  * your own interfaces. It was also thought it would sometimes be useful
27  * to keep a misbehaving host or two from abusing your primary clock. It
28  * has been expanded, however, to suit the needs of those with more
29  * restrictive access policies.
30  */
31 #define MASK_IPV6_ADDR(dst, src, msk)					\
32 	do {								\
33 		int x;							\
34 									\
35 		for (x = 0; x < (int)COUNTOF((dst)->s6_addr); x++) {	\
36 			(dst)->s6_addr[x] =   (src)->s6_addr[x]		\
37 					    & (msk)->s6_addr[x];	\
38 		}							\
39 	} while (FALSE)
40 
41 /*
42  * We allocate INC_RESLIST{4|6} entries to the free list whenever empty.
43  * Auto-tune these to be just less than 1KB (leaving at least 32 bytes
44  * for allocator overhead).
45  */
46 #define	INC_RESLIST4	((1024 - 32) / V4_SIZEOF_RESTRICT_U)
47 #define	INC_RESLIST6	((1024 - 32) / V6_SIZEOF_RESTRICT_U)
48 
49 /*
50  * The restriction list
51  */
52 restrict_u *restrictlist4;
53 restrict_u *restrictlist6;
54 static int restrictcount;	/* count in the restrict lists */
55 
56 /*
57  * The free list and associated counters.  Also some uninteresting
58  * stat counters.
59  */
60 static restrict_u *resfree4;	/* available entries (free list) */
61 static restrict_u *resfree6;
62 
63 static u_long res_calls;
64 static u_long res_found;
65 static u_long res_not_found;
66 
67 /*
68  * Count number of restriction entries referring to RES_LIMITED, to
69  * control implicit activation/deactivation of the MRU monlist.
70  */
71 static	u_long res_limited_refcnt;
72 
73 /*
74  * Our default entries.
75  *
76  * We can make this cleaner with c99 support: see init_restrict().
77  */
78 static	restrict_u	restrict_def4;
79 static	restrict_u	restrict_def6;
80 
81 /*
82  * "restrict source ..." enabled knob and restriction bits.
83  */
84 static	int		restrict_source_enabled;
85 static	u_int32		restrict_source_rflags;
86 static	u_short		restrict_source_mflags;
87 static	short		restrict_source_ippeerlimit;
88 
89 /*
90  * private functions
91  */
92 static	restrict_u *	alloc_res4(void);
93 static	restrict_u *	alloc_res6(void);
94 static	void		free_res(restrict_u *, int);
95 static	inline void	inc_res_limited(void);
96 static	inline void	dec_res_limited(void);
97 static	restrict_u *	match_restrict4_addr(u_int32, u_short);
98 static	restrict_u *	match_restrict6_addr(const struct in6_addr *,
99 					     u_short);
100 static	restrict_u *	match_restrict_entry(const restrict_u *, int);
101 static inline int/*BOOL*/	mflags_sorts_before(u_short, u_short);
102 static	int/*BOOL*/	res_sorts_before4(restrict_u *, restrict_u *);
103 static	int/*BOOL*/	res_sorts_before6(restrict_u *, restrict_u *);
104 
105 typedef int (*res_sort_fn)(restrict_u *, restrict_u *);
106 
107 
108 /* dump_restrict() & dump_restricts() are DEBUG-only */
109 #ifdef DEBUG
110 static void		dump_restrict(restrict_u *, int);
111 
112 
113 /*
114  * dump_restrict - spit out a single restriction entry
115  */
116 static void
dump_restrict(restrict_u * res,int is_ipv6)117 dump_restrict(
118 	restrict_u *	res,
119 	int		is_ipv6
120 )
121 {
122 	char as[INET6_ADDRSTRLEN];
123 	char ms[INET6_ADDRSTRLEN];
124 
125 	if (is_ipv6) {
126 		inet_ntop(AF_INET6, &res->u.v6.addr, as, sizeof as);
127 		inet_ntop(AF_INET6, &res->u.v6.mask, ms, sizeof ms);
128 	} else {
129 		struct in_addr	sia, sim;
130 
131 		sia.s_addr = htonl(res->u.v4.addr);
132 		sim.s_addr = htonl(res->u.v4.addr);
133 		inet_ntop(AF_INET, &sia, as, sizeof as);
134 		inet_ntop(AF_INET, &sim, ms, sizeof ms);
135 	}
136 	printf("%s/%s: hits %u ippeerlimit %hd mflags %s rflags %s",
137 		as, ms, res->count, res->ippeerlimit,
138 		mflags_str(res->mflags),
139 		rflags_str(res->rflags));
140 	if (res->expire > 0) {
141 		printf(" expire %u\n", res->expire);
142 	} else {
143 		printf("\n");
144 	}
145 }
146 
147 
148 /*
149  * dump_restricts - spit out the 'restrict' entries
150  */
151 void
dump_restricts(void)152 dump_restricts(void)
153 {
154 	restrict_u *	res;
155 
156 	/* Spit out the IPv4 list */
157 	printf("dump_restricts: restrictlist4: %p\n", restrictlist4);
158 	for (res = restrictlist4; res != NULL; res = res->link) {
159 		dump_restrict(res, 0);
160 	}
161 
162 	/* Spit out the IPv6 list */
163 	printf("dump_restricts: restrictlist6: %p\n", restrictlist6);
164 	for (res = restrictlist6; res != NULL; res = res->link) {
165 		dump_restrict(res, 1);
166 	}
167 }
168 #endif /* DEBUG - dump_restrict() / dump_restricts() */
169 
170 
171 /*
172  * init_restrict - initialize the restriction data structures
173  */
174 void
init_restrict(void)175 init_restrict(void)
176 {
177 	/*
178 	 * The restriction lists end with a default entry with address
179 	 * and mask 0, which will match any entry.  The lists are kept
180 	 * sorted by descending address followed by descending mask:
181 	 *
182 	 *   address	  mask
183 	 * 192.168.0.0	255.255.255.0	kod limited noquery nopeer
184 	 * 192.168.0.0	255.255.0.0	kod limited
185 	 * 0.0.0.0	0.0.0.0		kod limited noquery
186 	 *
187 	 * The first entry which matches an address is used.  With the
188 	 * example restrictions above, 192.168.0.0/24 matches the first
189 	 * entry, the rest of 192.168.0.0/16 matches the second, and
190 	 * everything else matches the third (default).
191 	 *
192 	 * Note this achieves the same result a little more efficiently
193 	 * than the documented behavior, which is to keep the lists
194 	 * sorted by ascending address followed by ascending mask, with
195 	 * the _last_ matching entry used.
196 	 *
197 	 * An additional wrinkle is we may have multiple entries with
198 	 * the same address and mask but differing match flags (mflags).
199 	 * We want to never talk to ourself, so RES_IGNORE entries for
200 	 * each local address are added by ntp_io.c with a host mask and
201 	 * both RESM_INTERFACE and RESM_NTPONLY set.  We sort those
202 	 * entries before entries without those flags to achieve this.
203 	 * The remaining match flag is RESM_SOURCE, used to dynamically
204 	 * set restrictions for each peer based on the prototype set by
205 	 * "restrict source" in the configuration.  We want those entries
206 	 * to be considered only when there is not a static host
207 	 * restriction for the address in the configuration, to allow
208 	 * operators to blacklist pool and manycast servers at runtime as
209 	 * desired using ntpq runtime configuration.  Such static entries
210 	 * have no RESM_ bits set, so the sort order for mflags is first
211 	 * RESM_INTERFACE, then entries without RESM_SOURCE, finally the
212 	 * remaining.
213 	 */
214 
215 	restrict_def4.ippeerlimit = -1;		/* Cleaner if we have C99 */
216 	restrict_def6.ippeerlimit = -1;		/* Cleaner if we have C99 */
217 
218 	LINK_SLIST(restrictlist4, &restrict_def4, link);
219 	LINK_SLIST(restrictlist6, &restrict_def6, link);
220 	restrictcount = 2;
221 }
222 
223 
224 static restrict_u *
alloc_res4(void)225 alloc_res4(void)
226 {
227 	const size_t	cb = V4_SIZEOF_RESTRICT_U;
228 	const size_t	count = INC_RESLIST4;
229 	restrict_u*	rl;
230 	restrict_u*	res;
231 	size_t		i;
232 
233 	UNLINK_HEAD_SLIST(res, resfree4, link);
234 	if (res != NULL) {
235 		return res;
236 	}
237 	rl = eallocarray(count, cb);
238 	/* link all but the first onto free list */
239 	res = (void *)((char *)rl + (count - 1) * cb);
240 	for (i = count - 1; i > 0; i--) {
241 		LINK_SLIST(resfree4, res, link);
242 		res = (void *)((char *)res - cb);
243 	}
244 	DEBUG_INSIST(rl == res);
245 	/* allocate the first */
246 	return res;
247 }
248 
249 
250 static restrict_u *
alloc_res6(void)251 alloc_res6(void)
252 {
253 	const size_t	cb = V6_SIZEOF_RESTRICT_U;
254 	const size_t	count = INC_RESLIST6;
255 	restrict_u *	rl;
256 	restrict_u *	res;
257 	size_t		i;
258 
259 	UNLINK_HEAD_SLIST(res, resfree6, link);
260 	if (res != NULL) {
261 		return res;
262 	}
263 	rl = eallocarray(count, cb);
264 	/* link all but the first onto free list */
265 	res = (void *)((char *)rl + (count - 1) * cb);
266 	for (i = count - 1; i > 0; i--) {
267 		LINK_SLIST(resfree6, res, link);
268 		res = (void *)((char *)res - cb);
269 	}
270 	DEBUG_INSIST(rl == res);
271 	/* allocate the first */
272 	return res;
273 }
274 
275 
276 static void
free_res(restrict_u * res,int v6)277 free_res(
278 	restrict_u *	res,
279 	int		v6
280 	)
281 {
282 	restrict_u **	rlisthead_ptr;
283 	restrict_u **	flisthead_ptr;
284 	restrict_u *	unlinked;
285 	size_t		sz;
286 
287 	restrictcount--;
288 	if (RES_LIMITED & res->rflags) {
289 		dec_res_limited();
290 	}
291 	if (v6) {
292 		rlisthead_ptr = &restrictlist6;
293 		flisthead_ptr = &resfree6;
294 		sz = V6_SIZEOF_RESTRICT_U;
295 	} else {
296 		rlisthead_ptr = &restrictlist4;
297 		flisthead_ptr = &resfree4;
298 		sz = V4_SIZEOF_RESTRICT_U;
299 	}
300 	UNLINK_SLIST(unlinked, *rlisthead_ptr, res, link, restrict_u);
301 	INSIST(unlinked == res);
302 	zero_mem(res, sz);
303 	LINK_SLIST(*flisthead_ptr, res, link);
304 }
305 
306 
307 static inline void
inc_res_limited(void)308 inc_res_limited(void)
309 {
310 	if (0 == res_limited_refcnt) {
311 		mon_start(MON_RES);
312 	}
313 	res_limited_refcnt++;
314 }
315 
316 
317 static inline void
dec_res_limited(void)318 dec_res_limited(void)
319 {
320 	res_limited_refcnt--;
321 	if (0 == res_limited_refcnt) {
322 		mon_stop(MON_RES);
323 	}
324 }
325 
326 
327 static restrict_u *
match_restrict4_addr(u_int32 addr,u_short port)328 match_restrict4_addr(
329 	u_int32	addr,
330 	u_short	port
331 	)
332 {
333 	const int	v6 = FALSE;
334 	restrict_u *	res;
335 	restrict_u *	next;
336 
337 	for (res = restrictlist4; res != NULL; res = next) {
338 		next = res->link;
339 		if (res->expire && res->expire <= current_time) {
340 			free_res(res, v6);	/* zeroes the contents */
341 		}
342 		if (   res->u.v4.addr == (addr & res->u.v4.mask)
343 		    && (   !(RESM_NTPONLY & res->mflags)
344 			|| NTP_PORT == port)) {
345 
346 			break;
347 		}
348 	}
349 	return res;
350 }
351 
352 
353 static restrict_u *
match_restrict6_addr(const struct in6_addr * addr,u_short port)354 match_restrict6_addr(
355 	const struct in6_addr *	addr,
356 	u_short			port
357 	)
358 {
359 	const int	v6 = TRUE;
360 	restrict_u *	res;
361 	restrict_u *	next;
362 	struct in6_addr	masked;
363 
364 	for (res = restrictlist6; res != NULL; res = next) {
365 		next = res->link;
366 		if (res->expire && res->expire <= current_time) {
367 			free_res(res, v6);
368 		}
369 		MASK_IPV6_ADDR(&masked, addr, &res->u.v6.mask);
370 		if (ADDR6_EQ(&masked, &res->u.v6.addr)
371 		    && (   !(RESM_NTPONLY & res->mflags)
372 			|| NTP_PORT == (int)port)) {
373 
374 			break;
375 		}
376 	}
377 	return res;
378 }
379 
380 
381 /*
382  * match_restrict_entry - find an exact match on a restrict list.
383  *
384  * Exact match is addr, mask, and mflags all equal.
385  * In order to use more common code for IPv4 and IPv6, this routine
386  * requires the caller to populate a restrict_u with mflags and either
387  * the v4 or v6 address and mask as appropriate.  Other fields in the
388  * input restrict_u are ignored.
389  */
390 static restrict_u *
match_restrict_entry(const restrict_u * pmatch,int v6)391 match_restrict_entry(
392 	const restrict_u *	pmatch,
393 	int			v6
394 	)
395 {
396 	restrict_u *res;
397 	restrict_u *rlist;
398 	size_t cb;
399 
400 	if (v6) {
401 		rlist = restrictlist6;
402 		cb = sizeof(pmatch->u.v6);
403 	} else {
404 		rlist = restrictlist4;
405 		cb = sizeof(pmatch->u.v4);
406 	}
407 
408 	for (res = rlist; res != NULL; res = res->link) {
409 		if (res->mflags == pmatch->mflags &&
410 		    !memcmp(&res->u, &pmatch->u, cb)) {
411 			break;
412 		}
413 	}
414 	return res;
415 }
416 
417 
418 /*
419  * mflags_sorts_before - common mflags sorting code
420  *
421  * See block comment in init_restrict() above for rationale.
422  */
423 static inline int/*BOOL*/
mflags_sorts_before(u_short m1,u_short m2)424 mflags_sorts_before(
425 	u_short	m1,
426 	u_short	m2
427 	)
428 {
429 	if (    (RESM_INTERFACE & m1)
430 	    && !(RESM_INTERFACE & m2)) {
431 		return TRUE;
432 	} else if (   !(RESM_SOURCE & m1)
433 		   &&  (RESM_SOURCE & m2)) {
434 		return TRUE;
435 	} else {
436 		return FALSE;
437 	}
438 }
439 
440 
441 /*
442  * res_sorts_before4 - compare IPv4 restriction entries
443  *
444  * Returns nonzero if r1 sorts before r2.  We sort by descending
445  * address, then descending mask, then an intricate mflags sort
446  * order explained in a block comment near the top of this file.
447  */
448 static int/*BOOL*/
res_sorts_before4(restrict_u * r1,restrict_u * r2)449 res_sorts_before4(
450 	restrict_u *r1,
451 	restrict_u *r2
452 	)
453 {
454 	int r1_before_r2;
455 
456 	if (r1->u.v4.addr > r2->u.v4.addr) {
457 		r1_before_r2 = TRUE;
458 	} else if (r1->u.v4.addr < r2->u.v4.addr) {
459 		r1_before_r2 = FALSE;
460 	} else if (r1->u.v4.mask > r2->u.v4.mask) {
461 		r1_before_r2 = TRUE;
462 	} else if (r1->u.v4.mask < r2->u.v4.mask) {
463 		r1_before_r2 = FALSE;
464 	} else {
465 		r1_before_r2 = mflags_sorts_before(r1->mflags, r2->mflags);
466 	}
467 
468 	return r1_before_r2;
469 }
470 
471 
472 /*
473  * res_sorts_before6 - compare IPv6 restriction entries
474  *
475  * Returns nonzero if r1 sorts before r2.  We sort by descending
476  * address, then descending mask, then an intricate mflags sort
477  * order explained in a block comment near the top of this file.
478  */
479 static int/*BOOL*/
res_sorts_before6(restrict_u * r1,restrict_u * r2)480 res_sorts_before6(
481 	restrict_u* r1,
482 	restrict_u* r2
483 )
484 {
485 	int r1_before_r2;
486 	int cmp;
487 
488 	cmp = ADDR6_CMP(&r1->u.v6.addr, &r2->u.v6.addr);
489 	if (cmp > 0) {		/* r1->addr > r2->addr */
490 		r1_before_r2 = TRUE;
491 	} else if (cmp < 0) {	/* r2->addr > r1->addr */
492 		r1_before_r2 = FALSE;
493 	} else {
494 		cmp = ADDR6_CMP(&r1->u.v6.mask, &r2->u.v6.mask);
495 		if (cmp > 0) {		/* r1->mask > r2->mask*/
496 			r1_before_r2 = TRUE;
497 		} else if (cmp < 0) {	/* r2->mask > r1->mask */
498 			r1_before_r2 = FALSE;
499 		} else {
500 			r1_before_r2 = mflags_sorts_before(r1->mflags,
501 							   r2->mflags);
502 		}
503 	}
504 
505 	return r1_before_r2;
506 }
507 
508 
509 /*
510  * restrictions - return restrictions for this host in *r4a
511  */
512 void
restrictions(sockaddr_u * srcadr,r4addr * r4a)513 restrictions(
514 	sockaddr_u *srcadr,
515 	r4addr *r4a
516 	)
517 {
518 	restrict_u *match;
519 	struct in6_addr *pin6;
520 
521 	DEBUG_REQUIRE(NULL != r4a);
522 
523 	res_calls++;
524 
525 	if (IS_IPV4(srcadr)) {
526 		/*
527 		 * Ignore any packets with a multicast source address
528 		 * (this should be done early in the receive process,
529 		 * not later!)
530 		 */
531 		if (IN_CLASSD(SRCADR(srcadr))) {
532 			goto multicast;
533 		}
534 
535 		match = match_restrict4_addr(SRCADR(srcadr),
536 					     SRCPORT(srcadr));
537 		DEBUG_INSIST(match != NULL);
538 		match->count++;
539 		/*
540 		 * res_not_found counts only use of the final default
541 		 * entry, not any "restrict default ntpport ...", which
542 		 * would be just before the final default.
543 		 */
544 		if (&restrict_def4 == match)
545 			res_not_found++;
546 		else
547 			res_found++;
548 		r4a->rflags = match->rflags;
549 		r4a->ippeerlimit = match->ippeerlimit;
550 	} else {
551 		DEBUG_REQUIRE(IS_IPV6(srcadr));
552 
553 		pin6 = PSOCK_ADDR6(srcadr);
554 
555 		/*
556 		 * Ignore any packets with a multicast source address
557 		 * (this should be done early in the receive process,
558 		 * not later!)
559 		 */
560 		if (IN6_IS_ADDR_MULTICAST(pin6)) {
561 			goto multicast;
562 		}
563 		match = match_restrict6_addr(pin6, SRCPORT(srcadr));
564 		DEBUG_INSIST(match != NULL);
565 		match->count++;
566 		if (&restrict_def6 == match)
567 			res_not_found++;
568 		else
569 			res_found++;
570 		r4a->rflags = match->rflags;
571 		r4a->ippeerlimit = match->ippeerlimit;
572 	}
573 
574 	return;
575 
576     multicast:
577 	r4a->rflags = RES_IGNORE;
578 	r4a->ippeerlimit = 0;
579 }
580 
581 
582 #ifdef DEBUG
583 /* display string for restrict_op */
584 const char *
resop_str(restrict_op op)585 resop_str(restrict_op op)
586 {
587 	switch (op) {
588 	    case RESTRICT_FLAGS:	return "RESTRICT_FLAGS";
589 	    case RESTRICT_UNFLAG:	return "RESTRICT_UNFLAG";
590 	    case RESTRICT_REMOVE:	return "RESTRICT_REMOVE";
591 	    case RESTRICT_REMOVEIF:	return "RESTRICT_REMOVEIF";
592 	}
593 	DEBUG_INVARIANT(!"bad restrict_op in resop_str");
594 	return "";	/* silence not all paths return value warning */
595 }
596 #endif	/* DEBUG */
597 
598 
599 /*
600  * hack_restrict - add/subtract/manipulate entries on the restrict list
601  */
602 int/*BOOL*/
hack_restrict(restrict_op op,sockaddr_u * resaddr,sockaddr_u * resmask,short ippeerlimit,u_short mflags,u_short rflags,u_int32 expire)603 hack_restrict(
604 	restrict_op	op,
605 	sockaddr_u *	resaddr,
606 	sockaddr_u *	resmask,
607 	short		ippeerlimit,
608 	u_short		mflags,
609 	u_short		rflags,
610 	u_int32		expire
611 	)
612 {
613 	int		v6;
614 	int		bump_res_limited = FALSE;
615 	restrict_u	match;
616 	restrict_u *	res;
617 	restrict_u **	plisthead;
618 	res_sort_fn	pfn_sort;
619 
620 #ifdef DEBUG
621 	if (debug > 0) {
622 		printf("hack_restrict: op %s addr %s mask %s",
623 			resop_str(op), stoa(resaddr), stoa(resmask));
624 		if (ippeerlimit >= 0) {
625 			printf(" ippeerlimit %d", ippeerlimit);
626 		}
627 		printf(" mflags %s rflags %s", mflags_str(mflags),
628 		       rflags_str(rflags));
629 		if (expire) {
630 			printf("lifetime %u\n",
631 			       expire - (u_int32)current_time);
632 		} else {
633 			printf("\n");
634 		}
635 	}
636 #endif
637 
638 	if (NULL == resaddr) {
639 		DEBUG_REQUIRE(NULL == resmask);
640 		DEBUG_REQUIRE(RESTRICT_FLAGS == op);
641 		DEBUG_REQUIRE(RESM_SOURCE & mflags);
642 		restrict_source_rflags = rflags;
643 		restrict_source_mflags = mflags;
644 		restrict_source_ippeerlimit = ippeerlimit;
645 		restrict_source_enabled = TRUE;
646 		DPRINTF(1, ("restrict source template saved\n"));
647 		return TRUE;
648 	}
649 
650 	ZERO(match);
651 
652 	if (IS_IPV4(resaddr)) {
653 		DEBUG_INVARIANT(IS_IPV4(resmask));
654 		v6 = FALSE;
655 		/*
656 		 * Get address and mask in host byte order for easy
657 		 * comparison as u_int32
658 		 */
659 		match.u.v4.addr = SRCADR(resaddr);
660 		match.u.v4.mask = SRCADR(resmask);
661 		match.u.v4.addr &= match.u.v4.mask;
662 	} else {
663 		DEBUG_INVARIANT(IS_IPV6(resaddr));
664 		DEBUG_INVARIANT(IS_IPV6(resmask));
665 		v6 = TRUE;
666 		/*
667 		 * Get address and mask in network byte order for easy
668 		 * comparison as byte sequences (e.g. memcmp())
669 		 */
670 		match.u.v6.mask = SOCK_ADDR6(resmask);
671 		MASK_IPV6_ADDR(&match.u.v6.addr, PSOCK_ADDR6(resaddr),
672 			       &match.u.v6.mask);
673 	}
674 
675 	match.mflags = mflags;
676 	res = match_restrict_entry(&match, v6);
677 
678 	switch (op) {
679 
680 	case RESTRICT_FLAGS:
681 		/*
682 		 * Here we add bits to the rflags. If we already have
683 		 * this restriction modify it.
684 		 */
685 		if (NULL != res) {
686 			if (    (RES_LIMITED & rflags)
687 			    && !(RES_LIMITED & res->rflags)) {
688 
689 				bump_res_limited = TRUE;
690 			}
691 			res->rflags |= rflags;
692 			res->expire = expire;
693 		} else {
694 			match.rflags = rflags;
695 			match.expire = expire;
696 			match.ippeerlimit = ippeerlimit;
697 			if (v6) {
698 				res = alloc_res6();
699 				memcpy(res, &match, V6_SIZEOF_RESTRICT_U);
700 				plisthead = &restrictlist6;
701 				pfn_sort = &res_sorts_before6;
702 			} else {
703 				res = alloc_res4();
704 				memcpy(res, &match, V4_SIZEOF_RESTRICT_U);
705 				plisthead = &restrictlist4;
706 				pfn_sort = &res_sorts_before4;
707 			}
708 			LINK_SORT_SLIST(
709 				*plisthead, res,
710 				(*pfn_sort)(res, L_S_S_CUR()),
711 				link, restrict_u);
712 			restrictcount++;
713 			if (RES_LIMITED & rflags) {
714 				bump_res_limited = TRUE;
715 			}
716 		}
717 		if (bump_res_limited) {
718 			inc_res_limited();
719 		}
720 		return TRUE;
721 
722 	case RESTRICT_UNFLAG:
723 		/*
724 		 * Remove some bits from the rflags. If we didn't
725 		 * find this one, just return.
726 		 */
727 		if (NULL == res) {
728 			DPRINTF(1, ("No match for %s %s removing rflags %s\n",
729 				    stoa(resaddr), stoa(resmask),
730 				    rflags_str(rflags)));
731 			return FALSE;
732 		}
733 		if (   (RES_LIMITED & res->rflags)
734 		    && (RES_LIMITED & rflags)) {
735 			dec_res_limited();
736 		}
737 		res->rflags &= ~rflags;
738 		return TRUE;
739 
740 	case RESTRICT_REMOVE:
741 	case RESTRICT_REMOVEIF:
742 		/*
743 		 * Remove an entry from the table entirely if we
744 		 * found one. Don't remove the default entry and
745 		 * don't remove an interface entry unless asked.
746 		 */
747 		if (   res != NULL
748 		    && (   RESTRICT_REMOVEIF == op
749 			|| !(RESM_INTERFACE & res->mflags))
750 		    && res != &restrict_def4
751 		    && res != &restrict_def6) {
752 
753 			free_res(res, v6);
754 			return TRUE;
755 		}
756 		DPRINTF(1, ("No match removing %s %s restriction\n",
757 			    stoa(resaddr), stoa(resmask)));
758 		return FALSE;
759 	}
760 	/* notreached */
761 	return FALSE;
762 }
763 
764 
765 /*
766  * restrict_source - maintains dynamic "restrict source ..." entries as
767  *		     peers come and go.
768  */
769 void
restrict_source(sockaddr_u * addr,int farewell,u_int32 lifetime)770 restrict_source(
771 	sockaddr_u *	addr,
772 	int		farewell,	/* TRUE to remove */
773 	u_int32		lifetime	/* seconds, 0 forever */
774 	)
775 {
776 	sockaddr_u	onesmask;
777 	int/*BOOL*/	success;
778 
779 	if (   !restrict_source_enabled || SOCK_UNSPEC(addr)
780 	    || IS_MCAST(addr) || ISREFCLOCKADR(addr)) {
781 		return;
782 	}
783 
784 	REQUIRE(AF_INET == AF(addr) || AF_INET6 == AF(addr));
785 
786 	SET_HOSTMASK(&onesmask, AF(addr));
787 	if (farewell) {
788 		success = hack_restrict(RESTRICT_REMOVE, addr, &onesmask,
789 					0, RESM_SOURCE, 0, 0);
790 		if (success) {
791 			DPRINTF(1, ("%s %s removed", __func__,
792 				    stoa(addr)));
793 		} else {
794 			msyslog(LOG_ERR, "%s remove %s failed",
795 					 __func__, stoa(addr));
796 		}
797 		return;
798 	}
799 
800 	success = hack_restrict(RESTRICT_FLAGS, addr, &onesmask,
801 				restrict_source_ippeerlimit,
802 				restrict_source_mflags,
803 				restrict_source_rflags,
804 				lifetime > 0
805 				    ? lifetime + current_time
806 				    : 0);
807 	if (success) {
808 		DPRINTF(1, ("%s %s add/upd\n", __func__,
809 			    stoa(addr)));
810 	} else {
811 		msyslog(LOG_ERR, "%s %s failed", __func__, stoa(addr));
812 	}
813 }
814 
815 
816 #ifdef DEBUG
817 /* Convert restriction RES_ flag bits into a display string */
818 const char *
rflags_str(u_short rflags)819 rflags_str(
820 	u_short rflags
821 	)
822 {
823 	const size_t	sz = LIB_BUFLENGTH;
824 	char *		rfs;
825 
826 	LIB_GETBUF(rfs);
827 	rfs[0] = '\0';
828 
829 	if (rflags & RES_FLAKE) {
830 		CLEAR_BIT_IF_DEBUG(RES_FLAKE, rflags);
831 		append_flagstr(rfs, sz, "flake");
832 	}
833 
834 	if (rflags & RES_IGNORE) {
835 		CLEAR_BIT_IF_DEBUG(RES_IGNORE, rflags);
836 		append_flagstr(rfs, sz, "ignore");
837 	}
838 
839 	if (rflags & RES_KOD) {
840 		CLEAR_BIT_IF_DEBUG(RES_KOD, rflags);
841 		append_flagstr(rfs, sz, "kod");
842 	}
843 
844 	if (rflags & RES_MSSNTP) {
845 		CLEAR_BIT_IF_DEBUG(RES_MSSNTP, rflags);
846 		append_flagstr(rfs, sz, "mssntp");
847 	}
848 
849 	if (rflags & RES_LIMITED) {
850 		CLEAR_BIT_IF_DEBUG(RES_LIMITED, rflags);
851 		append_flagstr(rfs, sz, "limited");
852 	}
853 
854 	if (rflags & RES_LPTRAP) {
855 		CLEAR_BIT_IF_DEBUG(RES_LPTRAP, rflags);
856 		append_flagstr(rfs, sz, "lptrap");
857 	}
858 
859 	if (rflags & RES_NOMODIFY) {
860 		CLEAR_BIT_IF_DEBUG(RES_NOMODIFY, rflags);
861 		append_flagstr(rfs, sz, "nomodify");
862 	}
863 
864 	if (rflags & RES_NOMRULIST) {
865 		CLEAR_BIT_IF_DEBUG(RES_NOMRULIST, rflags);
866 		append_flagstr(rfs, sz, "nomrulist");
867 	}
868 
869 	if (rflags & RES_NOEPEER) {
870 		CLEAR_BIT_IF_DEBUG(RES_NOEPEER, rflags);
871 		append_flagstr(rfs, sz, "noepeer");
872 	}
873 
874 	if (rflags & RES_NOPEER) {
875 		CLEAR_BIT_IF_DEBUG(RES_NOPEER, rflags);
876 		append_flagstr(rfs, sz, "nopeer");
877 	}
878 
879 	if (rflags & RES_NOQUERY) {
880 		CLEAR_BIT_IF_DEBUG(RES_NOQUERY, rflags);
881 		append_flagstr(rfs, sz, "noquery");
882 	}
883 
884 	if (rflags & RES_DONTSERVE) {
885 		CLEAR_BIT_IF_DEBUG(RES_DONTSERVE, rflags);
886 		append_flagstr(rfs, sz, "dontserve");
887 	}
888 
889 	if (rflags & RES_NOTRAP) {
890 		CLEAR_BIT_IF_DEBUG(RES_NOTRAP, rflags);
891 		append_flagstr(rfs, sz, "notrap");
892 	}
893 
894 	if (rflags & RES_DONTTRUST) {
895 		CLEAR_BIT_IF_DEBUG(RES_DONTTRUST, rflags);
896 		append_flagstr(rfs, sz, "notrust");
897 	}
898 
899 	if (rflags & RES_SRVRSPFUZ) {
900 		CLEAR_BIT_IF_DEBUG(RES_SRVRSPFUZ, rflags);
901 		append_flagstr(rfs, sz, "srvrspfuz");
902 	}
903 
904 	if (rflags & RES_VERSION) {
905 		CLEAR_BIT_IF_DEBUG(RES_VERSION, rflags);
906 		append_flagstr(rfs, sz, "version");
907 	}
908 
909 	DEBUG_INVARIANT(!rflags);
910 
911 	if ('\0' == rfs[0]) {
912 		append_flagstr(rfs, sz, "(none)");
913 	}
914 
915 	return rfs;
916 }
917 
918 
919 /* Convert restriction match RESM_ flag bits into a display string */
920 const char *
mflags_str(u_short mflags)921 mflags_str(
922 	u_short mflags
923 	)
924 {
925 	const size_t	sz = LIB_BUFLENGTH;
926 	char *		mfs;
927 
928 	LIB_GETBUF(mfs);
929 	mfs[0] = '\0';
930 
931 	if (mflags & RESM_NTPONLY) {
932 		CLEAR_BIT_IF_DEBUG(RESM_NTPONLY, mflags);
933 		append_flagstr(mfs, sz, "ntponly");
934 	}
935 
936 	if (mflags & RESM_SOURCE) {
937 		CLEAR_BIT_IF_DEBUG(RESM_SOURCE, mflags);
938 		append_flagstr(mfs, sz, "source");
939 	}
940 
941 	if (mflags & RESM_INTERFACE) {
942 		CLEAR_BIT_IF_DEBUG(RESM_INTERFACE, mflags);
943 		append_flagstr(mfs, sz, "interface");
944 	}
945 
946 	DEBUG_INVARIANT(!mflags);
947 
948 	return mfs;
949 }
950 #endif	/* DEBUG */
951