1 /*
2  * ntp_restrict.c - determine host restrictions
3  */
4 #ifdef HAVE_CONFIG_H
5 #include <config.h>
6 #endif
7 
8 #include <stdio.h>
9 #include <sys/types.h>
10 
11 #include "ntpd.h"
12 #include "ntp_if.h"
13 #include "ntp_lists.h"
14 #include "ntp_stdlib.h"
15 #include "ntp_assert.h"
16 
17 /*
18  * This code keeps a simple address-and-mask list of hosts we want
19  * to place restrictions on (or remove them from). The restrictions
20  * are implemented as a set of flags which tell you what the host
21  * can't do. There is a subroutine entry to return the flags. The
22  * list is kept sorted to reduce the average number of comparisons
23  * and make sure you get the set of restrictions most specific to
24  * the address.
25  *
26  * The algorithm is that, when looking up a host, it is first assumed
27  * that the default set of restrictions will apply. It then searches
28  * down through the list. Whenever it finds a match it adopts the
29  * match's flags instead. When you hit the point where the sorted
30  * address is greater than the target, you return with the last set of
31  * flags you found. Because of the ordering of the list, the most
32  * specific match will provide the final set of flags.
33  *
34  * This was originally intended to restrict you from sync'ing to your
35  * own broadcasts when you are doing that, by restricting yourself from
36  * your own interfaces. It was also thought it would sometimes be useful
37  * to keep a misbehaving host or two from abusing your primary clock. It
38  * has been expanded, however, to suit the needs of those with more
39  * restrictive access policies.
40  */
41 /*
42  * We will use two lists, one for IPv4 addresses and one for IPv6
43  * addresses. This is not protocol-independant but for now I can't
44  * find a way to respect this. We'll check this later... JFB 07/2001
45  */
46 #define MASK_IPV6_ADDR(dst, src, msk)					\
47 	do {								\
48 		int idx;						\
49 		for (idx = 0; idx < (int)COUNTOF((dst)->s6_addr); idx++) { \
50 			(dst)->s6_addr[idx] = (src)->s6_addr[idx]	\
51 					      & (msk)->s6_addr[idx];	\
52 		}							\
53 	} while (0)
54 
55 /*
56  * We allocate INC_RESLIST{4|6} entries to the free list whenever empty.
57  * Auto-tune these to be just less than 1KB (leaving at least 16 bytes
58  * for allocator overhead).
59  */
60 #define	INC_RESLIST4	((1024 - 16) / V4_SIZEOF_RESTRICT_U)
61 #define	INC_RESLIST6	((1024 - 16) / V6_SIZEOF_RESTRICT_U)
62 
63 /*
64  * The restriction list
65  */
66 restrict_u *restrictlist4;
67 restrict_u *restrictlist6;
68 static int restrictcount;	/* count in the restrict lists */
69 
70 /*
71  * The free list and associated counters.  Also some uninteresting
72  * stat counters.
73  */
74 static restrict_u *resfree4;	/* available entries (free list) */
75 static restrict_u *resfree6;
76 
77 static u_long res_calls;
78 static u_long res_found;
79 static u_long res_not_found;
80 
81 /*
82  * Count number of restriction entries referring to RES_LIMITED, to
83  * control implicit activation/deactivation of the MRU monlist.
84  */
85 static	u_long res_limited_refcnt;
86 
87 /*
88  * Our default entries.
89  *
90  * We can make this cleaner with c99 support: see init_restrict().
91  */
92 static	restrict_u	restrict_def4;
93 static	restrict_u	restrict_def6;
94 
95 /*
96  * "restrict source ..." enabled knob and restriction bits.
97  */
98 static	int		restrict_source_enabled;
99 static	u_int32		restrict_source_rflags;
100 static	u_short		restrict_source_mflags;
101 static	short		restrict_source_ippeerlimit;
102 
103 /*
104  * private functions
105  */
106 static restrict_u *	alloc_res4(void);
107 static restrict_u *	alloc_res6(void);
108 static void		free_res(restrict_u *, int);
109 static void		inc_res_limited(void);
110 static void		dec_res_limited(void);
111 static restrict_u *	match_restrict4_addr(u_int32, u_short);
112 static restrict_u *	match_restrict6_addr(const struct in6_addr *,
113 					     u_short);
114 static restrict_u *	match_restrict_entry(const restrict_u *, int);
115 static int		res_sorts_before4(restrict_u *, restrict_u *);
116 static int		res_sorts_before6(restrict_u *, restrict_u *);
117 static char *		roptoa(restrict_op op);
118 
119 
120 void	dump_restricts(void);
121 
122 /*
123  * dump_restrict - spit out a restrict_u
124  */
125 static void
dump_restrict(restrict_u * res,int is_ipv6)126 dump_restrict(
127 	restrict_u *	res,
128 	int		is_ipv6
129 	)
130 {
131 	char as[INET6_ADDRSTRLEN];
132 	char ms[INET6_ADDRSTRLEN];
133 
134 	if (is_ipv6) {
135 		inet_ntop(AF_INET6, &res->u.v6.addr, as, sizeof as);
136 		inet_ntop(AF_INET6, &res->u.v6.mask, ms, sizeof ms);
137 	} else {
138 		struct in_addr	sia = { htonl(res->u.v4.addr) };
139 		struct in_addr	sim = { htonl(res->u.v4.mask) };
140 
141 		inet_ntop(AF_INET, &sia, as, sizeof as);
142 		inet_ntop(AF_INET, &sim, ms, sizeof ms);
143 	}
144 	mprintf("restrict node at %p: %s/%s count %d, rflags %08x, mflags %04x, ippeerlimit %d, expire %lu, next %p\n",
145 		res, as, ms, res->count, res->rflags, res->mflags,
146 		res->ippeerlimit, res->expire, res->link);
147 	return;
148 }
149 
150 
151 /*
152  * dump_restricts - spit out the 'restrict' lines
153  */
154 void
dump_restricts(void)155 dump_restricts(void)
156 {
157 	restrict_u *	res;
158 	restrict_u *	next;
159 
160 	mprintf("dump_restrict: restrict_def4: %p\n", &restrict_def4);
161 	/* Spit out 'restrict {,-4,-6} default ...' lines, if needed */
162 	for (res = &restrict_def4; res != NULL; res = next) {
163 		dump_restrict(res, 0);
164 		next = res->link;
165 	}
166 
167 	mprintf("dump_restrict: restrict_def6: %p\n", &restrict_def6);
168 	for (res = &restrict_def6; res != NULL; res = next) {
169 		dump_restrict(res, 1);
170 		next = res->link;
171 	}
172 
173 	/* Spit out the IPv4 list */
174 	mprintf("dump_restrict: restrictlist4: %p\n", &restrictlist4);
175 	for (res = restrictlist4; res != NULL; res = next) {
176 		dump_restrict(res, 0);
177 		next = res->link;
178 	}
179 
180 	/* Spit out the IPv6 list */
181 	mprintf("dump_restrict: restrictlist6: %p\n", &restrictlist6);
182 	for (res = restrictlist6; res != NULL; res = next) {
183 		dump_restrict(res, 1);
184 		next = res->link;
185 	}
186 
187 	return;
188 }
189 
190 /*
191  * init_restrict - initialize the restriction data structures
192  */
193 void
init_restrict(void)194 init_restrict(void)
195 {
196 	/*
197 	 * The restriction lists begin with a default entry with address
198 	 * and mask 0, which will match any entry.  The lists are kept
199 	 * sorted by descending address followed by descending mask:
200 	 *
201 	 *   address	  mask
202 	 * 192.168.0.0	255.255.255.0	kod limited noquery nopeer
203 	 * 192.168.0.0	255.255.0.0	kod limited
204 	 * 0.0.0.0	0.0.0.0		kod limited noquery
205 	 *
206 	 * The first entry which matches an address is used.  With the
207 	 * example restrictions above, 192.168.0.0/24 matches the first
208 	 * entry, the rest of 192.168.0.0/16 matches the second, and
209 	 * everything else matches the third (default).
210 	 *
211 	 * Note this achieves the same result a little more efficiently
212 	 * than the documented behavior, which is to keep the lists
213 	 * sorted by ascending address followed by ascending mask, with
214 	 * the _last_ matching entry used.
215 	 *
216 	 * An additional wrinkle is we may have multiple entries with
217 	 * the same address and mask but differing match flags (mflags).
218 	 * At present there is only one, RESM_NTPONLY.  Entries with
219 	 * RESM_NTPONLY are sorted earlier so they take precedence over
220 	 * any otherwise similar entry without.  Again, this is the same
221 	 * behavior as but reversed implementation compared to the docs.
222 	 *
223 	 */
224 
225 	restrict_def4.ippeerlimit = -1;		/* Cleaner if we have C99 */
226 	restrict_def6.ippeerlimit = -1;		/* Cleaner if we have C99 */
227 
228 	LINK_SLIST(restrictlist4, &restrict_def4, link);
229 	LINK_SLIST(restrictlist6, &restrict_def6, link);
230 	restrictcount = 2;
231 }
232 
233 
234 static restrict_u *
alloc_res4(void)235 alloc_res4(void)
236 {
237 	const size_t	cb = V4_SIZEOF_RESTRICT_U;
238 	const size_t	count = INC_RESLIST4;
239 	restrict_u *	rl;
240 	restrict_u *	res;
241 	size_t		i;
242 
243 	UNLINK_HEAD_SLIST(res, resfree4, link);
244 	if (res != NULL)
245 		return res;
246 
247 	rl = eallocarray(count, cb);
248 	/* link all but the first onto free list */
249 	res = (void *)((char *)rl + (count - 1) * cb);
250 	for (i = count - 1; i > 0; i--) {
251 		LINK_SLIST(resfree4, res, link);
252 		res = (void *)((char *)res - cb);
253 	}
254 	INSIST(rl == res);
255 	/* allocate the first */
256 	return res;
257 }
258 
259 
260 static restrict_u *
alloc_res6(void)261 alloc_res6(void)
262 {
263 	const size_t	cb = V6_SIZEOF_RESTRICT_U;
264 	const size_t	count = INC_RESLIST6;
265 	restrict_u *	rl;
266 	restrict_u *	res;
267 	size_t		i;
268 
269 	UNLINK_HEAD_SLIST(res, resfree6, link);
270 	if (res != NULL)
271 		return res;
272 
273 	rl = eallocarray(count, cb);
274 	/* link all but the first onto free list */
275 	res = (void *)((char *)rl + (count - 1) * cb);
276 	for (i = count - 1; i > 0; i--) {
277 		LINK_SLIST(resfree6, res, link);
278 		res = (void *)((char *)res - cb);
279 	}
280 	INSIST(rl == res);
281 	/* allocate the first */
282 	return res;
283 }
284 
285 
286 static void
free_res(restrict_u * res,int v6)287 free_res(
288 	restrict_u *	res,
289 	int		v6
290 	)
291 {
292 	restrict_u **	plisthead;
293 	restrict_u *	unlinked;
294 
295 	restrictcount--;
296 	if (RES_LIMITED & res->rflags)
297 		dec_res_limited();
298 
299 	if (v6)
300 		plisthead = &restrictlist6;
301 	else
302 		plisthead = &restrictlist4;
303 	UNLINK_SLIST(unlinked, *plisthead, res, link, restrict_u);
304 	INSIST(unlinked == res);
305 
306 	if (v6) {
307 		zero_mem(res, V6_SIZEOF_RESTRICT_U);
308 		plisthead = &resfree6;
309 	} else {
310 		zero_mem(res, V4_SIZEOF_RESTRICT_U);
311 		plisthead = &resfree4;
312 	}
313 	LINK_SLIST(*plisthead, res, link);
314 }
315 
316 
317 static void
inc_res_limited(void)318 inc_res_limited(void)
319 {
320 	if (!res_limited_refcnt)
321 		mon_start(MON_RES);
322 	res_limited_refcnt++;
323 }
324 
325 
326 static void
dec_res_limited(void)327 dec_res_limited(void)
328 {
329 	res_limited_refcnt--;
330 	if (!res_limited_refcnt)
331 		mon_stop(MON_RES);
332 }
333 
334 
335 static restrict_u *
match_restrict4_addr(u_int32 addr,u_short port)336 match_restrict4_addr(
337 	u_int32	addr,
338 	u_short	port
339 	)
340 {
341 	const int	v6 = 0;
342 	restrict_u *	res;
343 	restrict_u *	next;
344 
345 	for (res = restrictlist4; res != NULL; res = next) {
346 #ifdef DEBUG
347 		struct in_addr	sia = { htonl(res->u.v4.addr) };
348 #endif
349 
350 		next = res->link;
351 		DPRINTF(2, ("match_restrict4_addr: Checking %s, port %d ... ",
352 			    inet_ntoa(sia), port));
353 		if (   res->expire
354 		    && res->expire <= current_time)
355 			free_res(res, v6);	/* zeroes the contents */
356 		if (   res->u.v4.addr == (addr & res->u.v4.mask)
357 		    && (   !(RESM_NTPONLY & res->mflags)
358 			|| NTP_PORT == port)) {
359 			DPRINTF(2, ("MATCH: ippeerlimit %d\n", res->ippeerlimit));
360 			break;
361 		}
362 		DPRINTF(2, ("doesn't match: ippeerlimit %d\n", res->ippeerlimit));
363 	}
364 	return res;
365 }
366 
367 
368 static restrict_u *
match_restrict6_addr(const struct in6_addr * addr,u_short port)369 match_restrict6_addr(
370 	const struct in6_addr *	addr,
371 	u_short			port
372 	)
373 {
374 	const int	v6 = 1;
375 	restrict_u *	res;
376 	restrict_u *	next;
377 	struct in6_addr	masked;
378 
379 	for (res = restrictlist6; res != NULL; res = next) {
380 		next = res->link;
381 		INSIST(next != res);
382 		if (res->expire &&
383 		    res->expire <= current_time)
384 			free_res(res, v6);
385 		MASK_IPV6_ADDR(&masked, addr, &res->u.v6.mask);
386 		if (ADDR6_EQ(&masked, &res->u.v6.addr)
387 		    && (!(RESM_NTPONLY & res->mflags)
388 			|| NTP_PORT == (int)port))
389 			break;
390 	}
391 	return res;
392 }
393 
394 
395 /*
396  * match_restrict_entry - find an exact match on a restrict list.
397  *
398  * Exact match is addr, mask, and mflags all equal.
399  * In order to use more common code for IPv4 and IPv6, this routine
400  * requires the caller to populate a restrict_u with mflags and either
401  * the v4 or v6 address and mask as appropriate.  Other fields in the
402  * input restrict_u are ignored.
403  */
404 static restrict_u *
match_restrict_entry(const restrict_u * pmatch,int v6)405 match_restrict_entry(
406 	const restrict_u *	pmatch,
407 	int			v6
408 	)
409 {
410 	restrict_u *res;
411 	restrict_u *rlist;
412 	size_t cb;
413 
414 	if (v6) {
415 		rlist = restrictlist6;
416 		cb = sizeof(pmatch->u.v6);
417 	} else {
418 		rlist = restrictlist4;
419 		cb = sizeof(pmatch->u.v4);
420 	}
421 
422 	for (res = rlist; res != NULL; res = res->link)
423 		if (res->mflags == pmatch->mflags &&
424 		    !memcmp(&res->u, &pmatch->u, cb))
425 			break;
426 	return res;
427 }
428 
429 
430 /*
431  * res_sorts_before4 - compare two restrict4 entries
432  *
433  * Returns nonzero if r1 sorts before r2.  We sort by descending
434  * address, then descending mask, then descending mflags, so sorting
435  * before means having a higher value.
436  */
437 static int
res_sorts_before4(restrict_u * r1,restrict_u * r2)438 res_sorts_before4(
439 	restrict_u *r1,
440 	restrict_u *r2
441 	)
442 {
443 	int r1_before_r2;
444 
445 	if (r1->u.v4.addr > r2->u.v4.addr)
446 		r1_before_r2 = 1;
447 	else if (r1->u.v4.addr < r2->u.v4.addr)
448 		r1_before_r2 = 0;
449 	else if (r1->u.v4.mask > r2->u.v4.mask)
450 		r1_before_r2 = 1;
451 	else if (r1->u.v4.mask < r2->u.v4.mask)
452 		r1_before_r2 = 0;
453 	else if (r1->mflags > r2->mflags)
454 		r1_before_r2 = 1;
455 	else
456 		r1_before_r2 = 0;
457 
458 	return r1_before_r2;
459 }
460 
461 
462 /*
463  * res_sorts_before6 - compare two restrict6 entries
464  *
465  * Returns nonzero if r1 sorts before r2.  We sort by descending
466  * address, then descending mask, then descending mflags, so sorting
467  * before means having a higher value.
468  */
469 static int
res_sorts_before6(restrict_u * r1,restrict_u * r2)470 res_sorts_before6(
471 	restrict_u *r1,
472 	restrict_u *r2
473 	)
474 {
475 	int r1_before_r2;
476 	int cmp;
477 
478 	cmp = ADDR6_CMP(&r1->u.v6.addr, &r2->u.v6.addr);
479 	if (cmp > 0)		/* r1->addr > r2->addr */
480 		r1_before_r2 = 1;
481 	else if (cmp < 0)	/* r2->addr > r1->addr */
482 		r1_before_r2 = 0;
483 	else {
484 		cmp = ADDR6_CMP(&r1->u.v6.mask, &r2->u.v6.mask);
485 		if (cmp > 0)		/* r1->mask > r2->mask*/
486 			r1_before_r2 = 1;
487 		else if (cmp < 0)	/* r2->mask > r1->mask */
488 			r1_before_r2 = 0;
489 		else if (r1->mflags > r2->mflags)
490 			r1_before_r2 = 1;
491 		else
492 			r1_before_r2 = 0;
493 	}
494 
495 	return r1_before_r2;
496 }
497 
498 
499 /*
500  * restrictions - return restrictions for this host in *r4a
501  */
502 void
restrictions(sockaddr_u * srcadr,r4addr * r4a)503 restrictions(
504 	sockaddr_u *srcadr,
505 	r4addr *r4a
506 	)
507 {
508 	restrict_u *match;
509 	struct in6_addr *pin6;
510 
511 	REQUIRE(NULL != r4a);
512 
513 	res_calls++;
514 	r4a->rflags = RES_IGNORE;
515 	r4a->ippeerlimit = 0;
516 
517 	DPRINTF(1, ("restrictions: looking up %s\n", stoa(srcadr)));
518 
519 	/* IPv4 source address */
520 	if (IS_IPV4(srcadr)) {
521 		/*
522 		 * Ignore any packets with a multicast source address
523 		 * (this should be done early in the receive process,
524 		 * not later!)
525 		 */
526 		if (IN_CLASSD(SRCADR(srcadr))) {
527 			DPRINTF(1, ("restrictions: srcadr %s is multicast\n", stoa(srcadr)));
528 			r4a->ippeerlimit = 2;	/* XXX: we should use a better value */
529 			return;
530 		}
531 
532 		match = match_restrict4_addr(SRCADR(srcadr),
533 					     SRCPORT(srcadr));
534 
535 		INSIST(match != NULL);
536 
537 		match->count++;
538 		/*
539 		 * res_not_found counts only use of the final default
540 		 * entry, not any "restrict default ntpport ...", which
541 		 * would be just before the final default.
542 		 */
543 		if (&restrict_def4 == match)
544 			res_not_found++;
545 		else
546 			res_found++;
547 		r4a->rflags = match->rflags;
548 		r4a->ippeerlimit = match->ippeerlimit;
549 	}
550 
551 	/* IPv6 source address */
552 	if (IS_IPV6(srcadr)) {
553 		pin6 = PSOCK_ADDR6(srcadr);
554 
555 		/*
556 		 * Ignore any packets with a multicast source address
557 		 * (this should be done early in the receive process,
558 		 * not later!)
559 		 */
560 		if (IN6_IS_ADDR_MULTICAST(pin6))
561 			return;
562 
563 		match = match_restrict6_addr(pin6, SRCPORT(srcadr));
564 		INSIST(match != NULL);
565 		match->count++;
566 		if (&restrict_def6 == match)
567 			res_not_found++;
568 		else
569 			res_found++;
570 		r4a->rflags = match->rflags;
571 		r4a->ippeerlimit = match->ippeerlimit;
572 	}
573 
574 	return;
575 }
576 
577 
578 /*
579  * roptoa - convert a restrict_op to a string
580  */
581 char *
roptoa(restrict_op op)582 roptoa(restrict_op op) {
583 	static char sb[30];
584 
585 	switch(op) {
586 	    case RESTRICT_FLAGS:	return "RESTRICT_FLAGS";
587 	    case RESTRICT_UNFLAG:	return "RESTRICT_UNFLAGS";
588 	    case RESTRICT_REMOVE:	return "RESTRICT_REMOVE";
589 	    case RESTRICT_REMOVEIF:	return "RESTRICT_REMOVEIF";
590 	    default:
591 		snprintf(sb, sizeof sb, "**RESTRICT_#%d**", op);
592 		return sb;
593 	}
594 }
595 
596 
597 /*
598  * hack_restrict - add/subtract/manipulate entries on the restrict list
599  */
600 void
hack_restrict(restrict_op op,sockaddr_u * resaddr,sockaddr_u * resmask,short ippeerlimit,u_short mflags,u_short rflags,u_long expire)601 hack_restrict(
602 	restrict_op	op,
603 	sockaddr_u *	resaddr,
604 	sockaddr_u *	resmask,
605 	short		ippeerlimit,
606 	u_short		mflags,
607 	u_short		rflags,
608 	u_long		expire
609 	)
610 {
611 	int		v6;
612 	restrict_u	match;
613 	restrict_u *	res;
614 	restrict_u **	plisthead;
615 
616 	DPRINTF(1, ("hack_restrict: op %s addr %s mask %s ippeerlimit %d mflags %08x rflags %08x\n",
617 		    roptoa(op), stoa(resaddr), stoa(resmask), ippeerlimit, mflags, rflags));
618 
619 	if (NULL == resaddr) {
620 		REQUIRE(NULL == resmask);
621 		REQUIRE(RESTRICT_FLAGS == op);
622 		restrict_source_rflags = rflags;
623 		restrict_source_mflags = mflags;
624 		restrict_source_ippeerlimit = ippeerlimit;
625 		restrict_source_enabled = 1;
626 		return;
627 	}
628 
629 	ZERO(match);
630 
631 #if 0
632 	/* silence VC9 potentially uninit warnings */
633 	// HMS: let's use a compiler-specific "enable" for this.
634 	res = NULL;
635 	v6 = 0;
636 #endif
637 
638 	if (IS_IPV4(resaddr)) {
639 		v6 = 0;
640 		/*
641 		 * Get address and mask in host byte order for easy
642 		 * comparison as u_int32
643 		 */
644 		match.u.v4.addr = SRCADR(resaddr);
645 		match.u.v4.mask = SRCADR(resmask);
646 		match.u.v4.addr &= match.u.v4.mask;
647 
648 	} else if (IS_IPV6(resaddr)) {
649 		v6 = 1;
650 		/*
651 		 * Get address and mask in network byte order for easy
652 		 * comparison as byte sequences (e.g. memcmp())
653 		 */
654 		match.u.v6.mask = SOCK_ADDR6(resmask);
655 		MASK_IPV6_ADDR(&match.u.v6.addr, PSOCK_ADDR6(resaddr),
656 			       &match.u.v6.mask);
657 
658 	} else	/* not IPv4 nor IPv6 */
659 		REQUIRE(0);
660 
661 	match.rflags = rflags;
662 	match.mflags = mflags;
663 	match.ippeerlimit = ippeerlimit;
664 	match.expire = expire;
665 	res = match_restrict_entry(&match, v6);
666 
667 	switch (op) {
668 
669 	case RESTRICT_FLAGS:
670 		/*
671 		 * Here we add bits to the rflags. If this is a
672 		 * new restriction add it.
673 		 */
674 		if (NULL == res) {
675 			if (v6) {
676 				res = alloc_res6();
677 				memcpy(res, &match,
678 				       V6_SIZEOF_RESTRICT_U);
679 				plisthead = &restrictlist6;
680 			} else {
681 				res = alloc_res4();
682 				memcpy(res, &match,
683 				       V4_SIZEOF_RESTRICT_U);
684 				plisthead = &restrictlist4;
685 			}
686 			LINK_SORT_SLIST(
687 				*plisthead, res,
688 				(v6)
689 				  ? res_sorts_before6(res, L_S_S_CUR())
690 				  : res_sorts_before4(res, L_S_S_CUR()),
691 				link, restrict_u);
692 			restrictcount++;
693 			if (RES_LIMITED & rflags)
694 				inc_res_limited();
695 		} else {
696 			if (   (RES_LIMITED & rflags)
697 			    && !(RES_LIMITED & res->rflags))
698 				inc_res_limited();
699 			res->rflags |= rflags;
700 		}
701 
702 		res->ippeerlimit = match.ippeerlimit;
703 
704 		break;
705 
706 	case RESTRICT_UNFLAG:
707 		/*
708 		 * Remove some bits from the rflags. If we didn't
709 		 * find this one, just return.
710 		 */
711 		if (res != NULL) {
712 			if (   (RES_LIMITED & res->rflags)
713 			    && (RES_LIMITED & rflags))
714 				dec_res_limited();
715 			res->rflags &= ~rflags;
716 		}
717 		break;
718 
719 	case RESTRICT_REMOVE:
720 	case RESTRICT_REMOVEIF:
721 		/*
722 		 * Remove an entry from the table entirely if we
723 		 * found one. Don't remove the default entry and
724 		 * don't remove an interface entry.
725 		 */
726 		if (res != NULL
727 		    && (RESTRICT_REMOVEIF == op
728 			|| !(RESM_INTERFACE & res->mflags))
729 		    && res != &restrict_def4
730 		    && res != &restrict_def6)
731 			free_res(res, v6);
732 		break;
733 
734 	default:	/* unknown op */
735 		INSIST(0);
736 		break;
737 	}
738 
739 }
740 
741 
742 /*
743  * restrict_source - maintains dynamic "restrict source ..." entries as
744  *		     peers come and go.
745  */
746 void
restrict_source(sockaddr_u * addr,int farewell,u_long expire)747 restrict_source(
748 	sockaddr_u *	addr,
749 	int		farewell,	/* 0 to add, 1 to remove */
750 	u_long		expire		/* 0 is infinite, valid until */
751 	)
752 {
753 	sockaddr_u	onesmask;
754 	restrict_u *	res;
755 	int		found_specific;
756 
757 	if (!restrict_source_enabled || SOCK_UNSPEC(addr) ||
758 	    IS_MCAST(addr) || ISREFCLOCKADR(addr))
759 		return;
760 
761 	REQUIRE(AF_INET == AF(addr) || AF_INET6 == AF(addr));
762 
763 	SET_HOSTMASK(&onesmask, AF(addr));
764 	if (farewell) {
765 		hack_restrict(RESTRICT_REMOVE, addr, &onesmask,
766 			      -2, 0, 0, 0);
767 		DPRINTF(1, ("restrict_source: %s removed", stoa(addr)));
768 		return;
769 	}
770 
771 	/*
772 	 * If there is a specific entry for this address, hands
773 	 * off, as it is condidered more specific than "restrict
774 	 * server ...".
775 	 * However, if the specific entry found is a fleeting one
776 	 * added by pool_xmit() before soliciting, replace it
777 	 * immediately regardless of the expire value to make way
778 	 * for the more persistent entry.
779 	 */
780 	if (IS_IPV4(addr)) {
781 		res = match_restrict4_addr(SRCADR(addr), SRCPORT(addr));
782 		INSIST(res != NULL);
783 		found_specific = (SRCADR(&onesmask) == res->u.v4.mask);
784 	} else {
785 		res = match_restrict6_addr(&SOCK_ADDR6(addr),
786 					   SRCPORT(addr));
787 		INSIST(res != NULL);
788 		found_specific = ADDR6_EQ(&res->u.v6.mask,
789 					  &SOCK_ADDR6(&onesmask));
790 	}
791 	if (!expire && found_specific && res->expire) {
792 		found_specific = 0;
793 		free_res(res, IS_IPV6(addr));
794 	}
795 	if (found_specific)
796 		return;
797 
798 	hack_restrict(RESTRICT_FLAGS, addr, &onesmask,
799 		      restrict_source_ippeerlimit,
800 		      restrict_source_mflags, restrict_source_rflags, expire);
801 	DPRINTF(1, ("restrict_source: %s host restriction added\n",
802 		    stoa(addr)));
803 }
804