xref: /dragonfly/sys/net/pf/pf_table.c (revision 926deccb)
1 /*	$OpenBSD: pf_table.c,v 1.78 2008/06/14 03:50:14 art Exp $	*/
2 
3 /*
4  * Copyright (c) 2010 The DragonFly Project.  All rights reserved.
5  *
6  * Copyright (c) 2002 Cedric Berger
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  *    - Redistributions of source code must retain the above copyright
14  *      notice, this list of conditions and the following disclaimer.
15  *    - Redistributions in binary form must reproduce the above
16  *      copyright notice, this list of conditions and the following
17  *      disclaimer in the documentation and/or other materials provided
18  *      with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  */
34 
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/socket.h>
41 #include <sys/mbuf.h>
42 #include <sys/kernel.h>
43 #include <sys/malloc.h>
44 #include <sys/thread2.h>
45 
46 #include <net/if.h>
47 #include <net/route.h>
48 #include <netinet/in.h>
49 #include <net/pf/pfvar.h>
50 
51 #define ACCEPT_FLAGS(flags, oklist)		\
52 	do {					\
53 		if ((flags & ~(oklist)) &	\
54 		    PFR_FLAG_ALLMASK)		\
55 			return (EINVAL);	\
56 	} while (0)
57 
58 #define COPYIN(from, to, size, flags)		\
59 	((flags & PFR_FLAG_USERIOCTL) ?		\
60 	copyin((from), (to), (size)) :		\
61 	(bcopy((from), (to), (size)), 0))
62 
63 #define COPYOUT(from, to, size, flags)		\
64 	((flags & PFR_FLAG_USERIOCTL) ?		\
65 	copyout((from), (to), (size)) :		\
66 	(bcopy((from), (to), (size)), 0))
67 
68 #define	FILLIN_SIN(sin, addr)			\
69 	do {					\
70 		(sin).sin_len = sizeof(sin);	\
71 		(sin).sin_family = AF_INET;	\
72 		(sin).sin_addr = (addr);	\
73 	} while (0)
74 
75 #define	FILLIN_SIN6(sin6, addr)			\
76 	do {					\
77 		(sin6).sin6_len = sizeof(sin6);	\
78 		(sin6).sin6_family = AF_INET6;	\
79 		(sin6).sin6_addr = (addr);	\
80 	} while (0)
81 
82 #define SWAP(type, a1, a2)			\
83 	do {					\
84 		type tmp = a1;			\
85 		a1 = a2;			\
86 		a2 = tmp;			\
87 	} while (0)
88 
89 #define SUNION2PF(su, af) (((af)==AF_INET) ?	\
90     (struct pf_addr *)&(su)->sin.sin_addr :	\
91     (struct pf_addr *)&(su)->sin6.sin6_addr)
92 
93 #define	AF_BITS(af)		(((af)==AF_INET)?32:128)
94 #define	ADDR_NETWORK(ad)	((ad)->pfra_net < AF_BITS((ad)->pfra_af))
95 #define	KENTRY_NETWORK(ke)	((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
96 #define KENTRY_RNF_ROOT(ke) \
97 		((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
98 
99 #define NO_ADDRESSES		(-1)
100 #define ENQUEUE_UNMARKED_ONLY	(1)
101 #define INVERT_NEG_FLAG		(1)
102 
103 static MALLOC_DEFINE(M_PFRKTABLEPL, "pfrktable", "pf radix table pool list");
104 static MALLOC_DEFINE(M_PFRKENTRYPL, "pfrkentry", "pf radix entry pool list");
105 static MALLOC_DEFINE(M_PFRKENTRYPL2, "pfrkentry2", "pf radix entry 2 pool list");
106 static MALLOC_DEFINE(M_PFRKCOUNTERSPL, "pfrkcounters", "pf radix counters");
107 
108 struct pfr_walktree {
109 	enum pfrw_op {
110 		PFRW_MARK,
111 		PFRW_SWEEP,
112 		PFRW_ENQUEUE,
113 		PFRW_GET_ADDRS,
114 		PFRW_GET_ASTATS,
115 		PFRW_POOL_GET,
116 		PFRW_DYNADDR_UPDATE
117 	}	 pfrw_op;
118 	union {
119 		struct pfr_addr		*pfrw1_addr;
120 		struct pfr_astats	*pfrw1_astats;
121 		struct pfr_kentryworkq	*pfrw1_workq;
122 		struct pfr_kentry	*pfrw1_kentry;
123 		struct pfi_dynaddr	*pfrw1_dyn;
124 	}	 pfrw_1;
125 	int	 pfrw_free;
126 	int	 pfrw_flags;
127 };
128 #define pfrw_addr	pfrw_1.pfrw1_addr
129 #define pfrw_astats	pfrw_1.pfrw1_astats
130 #define pfrw_workq	pfrw_1.pfrw1_workq
131 #define pfrw_kentry	pfrw_1.pfrw1_kentry
132 #define pfrw_dyn	pfrw_1.pfrw1_dyn
133 #define pfrw_cnt	pfrw_free
134 
135 #define senderr(e)	do { rv = (e); goto _bad; } while (0)
136 struct malloc_type	*pfr_ktable_pl;
137 struct malloc_type	*pfr_kentry_pl;
138 struct malloc_type	*pfr_kentry_pl2;
139 struct sockaddr_in	 pfr_sin;
140 struct sockaddr_in6	 pfr_sin6;
141 union sockaddr_union	 pfr_mask;
142 struct pf_addr		 pfr_ffaddr;
143 
144 void			 pfr_copyout_addr(struct pfr_addr *,
145 			    struct pfr_kentry *ke);
146 int			 pfr_validate_addr(struct pfr_addr *);
147 void			 pfr_enqueue_addrs(struct pfr_ktable *,
148 			    struct pfr_kentryworkq *, int *, int);
149 void			 pfr_mark_addrs(struct pfr_ktable *);
150 struct pfr_kentry	*pfr_lookup_addr(struct pfr_ktable *,
151 			    struct pfr_addr *, int);
152 struct pfr_kentry	*pfr_create_kentry(struct pfr_addr *, int);
153 void			 pfr_destroy_kentries(struct pfr_kentryworkq *);
154 void			 pfr_destroy_kentry(struct pfr_kentry *);
155 void			 pfr_insert_kentries(struct pfr_ktable *,
156 			    struct pfr_kentryworkq *, long);
157 void			 pfr_remove_kentries(struct pfr_ktable *,
158 			    struct pfr_kentryworkq *);
159 void			 pfr_clstats_kentries(struct pfr_kentryworkq *, long,
160 			    int);
161 void			 pfr_reset_feedback(struct pfr_addr *, int, int);
162 void			 pfr_prepare_network(union sockaddr_union *, int, int);
163 int			 pfr_route_kentry(struct pfr_ktable *,
164 			    struct pfr_kentry *);
165 int			 pfr_unroute_kentry(struct pfr_ktable *,
166 			    struct pfr_kentry *);
167 int			 pfr_walktree(struct radix_node *, void *);
168 int			 pfr_validate_table(struct pfr_table *, int, int);
169 int			 pfr_fix_anchor(char *);
170 void			 pfr_commit_ktable(struct pfr_ktable *, long);
171 void			 pfr_insert_ktables(struct pfr_ktableworkq *);
172 void			 pfr_insert_ktable(struct pfr_ktable *);
173 void			 pfr_setflags_ktables(struct pfr_ktableworkq *);
174 void			 pfr_setflags_ktable(struct pfr_ktable *, int);
175 void			 pfr_clstats_ktables(struct pfr_ktableworkq *, long,
176 			    int);
177 void			 pfr_clstats_ktable(struct pfr_ktable *, long, int);
178 struct pfr_ktable	*pfr_create_ktable(struct pfr_table *, long, int);
179 void			 pfr_destroy_ktables(struct pfr_ktableworkq *, int);
180 void			 pfr_destroy_ktable(struct pfr_ktable *, int);
181 int			 pfr_ktable_compare(struct pfr_ktable *,
182 			    struct pfr_ktable *);
183 struct pfr_ktable	*pfr_lookup_table(struct pfr_table *);
184 void			 pfr_clean_node_mask(struct pfr_ktable *,
185 			    struct pfr_kentryworkq *);
186 int			 pfr_table_count(struct pfr_table *, int);
187 int			 pfr_skip_table(struct pfr_table *,
188 			    struct pfr_ktable *, int);
189 struct pfr_kentry	*pfr_kentry_byidx(struct pfr_ktable *, int, int);
190 
191 RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
192 RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
193 
194 struct pfr_ktablehead	 pfr_ktables;
195 struct pfr_table	 pfr_nulltable;
196 int			 pfr_ktable_cnt;
197 
198 void
199 pfr_initialize(void)
200 {
201 	pfr_sin.sin_len = sizeof(pfr_sin);
202 	pfr_sin.sin_family = AF_INET;
203 	pfr_sin6.sin6_len = sizeof(pfr_sin6);
204 	pfr_sin6.sin6_family = AF_INET6;
205 
206 	memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
207 }
208 
209 int
210 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
211 {
212 	struct pfr_ktable	*kt;
213 	struct pfr_kentryworkq	 workq;
214 
215 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
216 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
217 		return (EINVAL);
218 	kt = pfr_lookup_table(tbl);
219 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
220 		return (ESRCH);
221 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
222 		return (EPERM);
223 	pfr_enqueue_addrs(kt, &workq, ndel, 0);
224 
225 	if (!(flags & PFR_FLAG_DUMMY)) {
226 		if (flags & PFR_FLAG_ATOMIC)
227 			crit_enter();
228 		pfr_remove_kentries(kt, &workq);
229 		if (flags & PFR_FLAG_ATOMIC)
230 			crit_exit();
231 		if (kt->pfrkt_cnt) {
232 			kprintf("pfr_clr_addrs: corruption detected (%d).\n",
233 			    kt->pfrkt_cnt);
234 			kt->pfrkt_cnt = 0;
235 		}
236 	}
237 	return (0);
238 }
239 
240 int
241 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
242     int *nadd, int flags)
243 {
244 	struct pfr_ktable	*kt, *tmpkt;
245 	struct pfr_kentryworkq	 workq;
246 	struct pfr_kentry	*p, *q;
247 	struct pfr_addr		 ad;
248 	int			 i, rv, xadd = 0;
249 	long			 tzero = time_second;
250 
251 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
252 	    PFR_FLAG_FEEDBACK);
253 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
254 		return (EINVAL);
255 	kt = pfr_lookup_table(tbl);
256 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
257 		return (ESRCH);
258 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
259 		return (EPERM);
260 	tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
261 	if (tmpkt == NULL)
262 		return (ENOMEM);
263 	SLIST_INIT(&workq);
264 	for (i = 0; i < size; i++) {
265 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
266 			senderr(EFAULT);
267 		if (pfr_validate_addr(&ad))
268 			senderr(EINVAL);
269 		p = pfr_lookup_addr(kt, &ad, 1);
270 		q = pfr_lookup_addr(tmpkt, &ad, 1);
271 		if (flags & PFR_FLAG_FEEDBACK) {
272 			if (q != NULL)
273 				ad.pfra_fback = PFR_FB_DUPLICATE;
274 			else if (p == NULL)
275 				ad.pfra_fback = PFR_FB_ADDED;
276 			else if (p->pfrke_not != ad.pfra_not)
277 				ad.pfra_fback = PFR_FB_CONFLICT;
278 			else
279 				ad.pfra_fback = PFR_FB_NONE;
280 		}
281 		if (p == NULL && q == NULL) {
282 			p = pfr_create_kentry(&ad,
283 			    !(flags & PFR_FLAG_USERIOCTL));
284 			if (p == NULL)
285 				senderr(ENOMEM);
286 			if (pfr_route_kentry(tmpkt, p)) {
287 				pfr_destroy_kentry(p);
288 				ad.pfra_fback = PFR_FB_NONE;
289 			} else {
290 				SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
291 				xadd++;
292 			}
293 		}
294 		if (flags & PFR_FLAG_FEEDBACK)
295 			if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
296 				senderr(EFAULT);
297 	}
298 	pfr_clean_node_mask(tmpkt, &workq);
299 	if (!(flags & PFR_FLAG_DUMMY)) {
300 		if (flags & PFR_FLAG_ATOMIC)
301 			crit_enter();
302 		pfr_insert_kentries(kt, &workq, tzero);
303 		if (flags & PFR_FLAG_ATOMIC)
304 			crit_exit();
305 	} else
306 		pfr_destroy_kentries(&workq);
307 	if (nadd != NULL)
308 		*nadd = xadd;
309 	pfr_destroy_ktable(tmpkt, 0);
310 	return (0);
311 _bad:
312 	pfr_clean_node_mask(tmpkt, &workq);
313 	pfr_destroy_kentries(&workq);
314 	if (flags & PFR_FLAG_FEEDBACK)
315 		pfr_reset_feedback(addr, size, flags);
316 	pfr_destroy_ktable(tmpkt, 0);
317 	return (rv);
318 }
319 
320 int
321 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
322     int *ndel, int flags)
323 {
324 	struct pfr_ktable	*kt;
325 	struct pfr_kentryworkq	 workq;
326 	struct pfr_kentry	*p;
327 	struct pfr_addr		 ad;
328 	int			 i, rv, xdel = 0, log = 1;
329 
330 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
331 	    PFR_FLAG_FEEDBACK);
332 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
333 		return (EINVAL);
334 	kt = pfr_lookup_table(tbl);
335 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
336 		return (ESRCH);
337 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
338 		return (EPERM);
339 	/*
340 	 * there are two algorithms to choose from here.
341 	 * with:
342 	 *   n: number of addresses to delete
343 	 *   N: number of addresses in the table
344 	 *
345 	 * one is O(N) and is better for large 'n'
346 	 * one is O(n*LOG(N)) and is better for small 'n'
347 	 *
348 	 * following code try to decide which one is best.
349 	 */
350 	for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
351 		log++;
352 	if (size > kt->pfrkt_cnt/log) {
353 		/* full table scan */
354 		pfr_mark_addrs(kt);
355 	} else {
356 		/* iterate over addresses to delete */
357 		for (i = 0; i < size; i++) {
358 			if (COPYIN(addr+i, &ad, sizeof(ad), flags))
359 				return (EFAULT);
360 			if (pfr_validate_addr(&ad))
361 				return (EINVAL);
362 			p = pfr_lookup_addr(kt, &ad, 1);
363 			if (p != NULL)
364 				p->pfrke_mark = 0;
365 		}
366 	}
367 	SLIST_INIT(&workq);
368 	for (i = 0; i < size; i++) {
369 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
370 			senderr(EFAULT);
371 		if (pfr_validate_addr(&ad))
372 			senderr(EINVAL);
373 		p = pfr_lookup_addr(kt, &ad, 1);
374 		if (flags & PFR_FLAG_FEEDBACK) {
375 			if (p == NULL)
376 				ad.pfra_fback = PFR_FB_NONE;
377 			else if (p->pfrke_not != ad.pfra_not)
378 				ad.pfra_fback = PFR_FB_CONFLICT;
379 			else if (p->pfrke_mark)
380 				ad.pfra_fback = PFR_FB_DUPLICATE;
381 			else
382 				ad.pfra_fback = PFR_FB_DELETED;
383 		}
384 		if (p != NULL && p->pfrke_not == ad.pfra_not &&
385 		    !p->pfrke_mark) {
386 			p->pfrke_mark = 1;
387 			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
388 			xdel++;
389 		}
390 		if (flags & PFR_FLAG_FEEDBACK)
391 			if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
392 				senderr(EFAULT);
393 	}
394 	if (!(flags & PFR_FLAG_DUMMY)) {
395 		if (flags & PFR_FLAG_ATOMIC)
396 			crit_enter();
397 		pfr_remove_kentries(kt, &workq);
398 		if (flags & PFR_FLAG_ATOMIC)
399 			crit_exit();
400 	}
401 	if (ndel != NULL)
402 		*ndel = xdel;
403 	return (0);
404 _bad:
405 	if (flags & PFR_FLAG_FEEDBACK)
406 		pfr_reset_feedback(addr, size, flags);
407 	return (rv);
408 }
409 
410 int
411 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
412     int *size2, int *nadd, int *ndel, int *nchange, int flags,
413     u_int32_t ignore_pfrt_flags)
414 {
415 	struct pfr_ktable	*kt, *tmpkt;
416 	struct pfr_kentryworkq	 addq, delq, changeq;
417 	struct pfr_kentry	*p, *q;
418 	struct pfr_addr		 ad;
419 	int			 i, rv, xadd = 0, xdel = 0, xchange = 0;
420 	long			 tzero = time_second;
421 
422 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
423 	    PFR_FLAG_FEEDBACK);
424 	if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
425 	    PFR_FLAG_USERIOCTL))
426 		return (EINVAL);
427 	kt = pfr_lookup_table(tbl);
428 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
429 		return (ESRCH);
430 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
431 		return (EPERM);
432 	tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
433 	if (tmpkt == NULL)
434 		return (ENOMEM);
435 	pfr_mark_addrs(kt);
436 	SLIST_INIT(&addq);
437 	SLIST_INIT(&delq);
438 	SLIST_INIT(&changeq);
439 	for (i = 0; i < size; i++) {
440 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
441 			senderr(EFAULT);
442 		if (pfr_validate_addr(&ad))
443 			senderr(EINVAL);
444 		ad.pfra_fback = PFR_FB_NONE;
445 		p = pfr_lookup_addr(kt, &ad, 1);
446 		if (p != NULL) {
447 			if (p->pfrke_mark) {
448 				ad.pfra_fback = PFR_FB_DUPLICATE;
449 				goto _skip;
450 			}
451 			p->pfrke_mark = 1;
452 			if (p->pfrke_not != ad.pfra_not) {
453 				SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
454 				ad.pfra_fback = PFR_FB_CHANGED;
455 				xchange++;
456 			}
457 		} else {
458 			q = pfr_lookup_addr(tmpkt, &ad, 1);
459 			if (q != NULL) {
460 				ad.pfra_fback = PFR_FB_DUPLICATE;
461 				goto _skip;
462 			}
463 			p = pfr_create_kentry(&ad,
464 			    !(flags & PFR_FLAG_USERIOCTL));
465 			if (p == NULL)
466 				senderr(ENOMEM);
467 			if (pfr_route_kentry(tmpkt, p)) {
468 				pfr_destroy_kentry(p);
469 				ad.pfra_fback = PFR_FB_NONE;
470 			} else {
471 				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
472 				ad.pfra_fback = PFR_FB_ADDED;
473 				xadd++;
474 			}
475 		}
476 _skip:
477 		if (flags & PFR_FLAG_FEEDBACK)
478 			if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
479 				senderr(EFAULT);
480 	}
481 	pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
482 	if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
483 		if (*size2 < size+xdel) {
484 			*size2 = size+xdel;
485 			senderr(0);
486 		}
487 		i = 0;
488 		SLIST_FOREACH(p, &delq, pfrke_workq) {
489 			pfr_copyout_addr(&ad, p);
490 			ad.pfra_fback = PFR_FB_DELETED;
491 			if (COPYOUT(&ad, addr+size+i, sizeof(ad), flags))
492 				senderr(EFAULT);
493 			i++;
494 		}
495 	}
496 	pfr_clean_node_mask(tmpkt, &addq);
497 	if (!(flags & PFR_FLAG_DUMMY)) {
498 		if (flags & PFR_FLAG_ATOMIC)
499 			crit_enter();
500 		pfr_insert_kentries(kt, &addq, tzero);
501 		pfr_remove_kentries(kt, &delq);
502 		pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
503 		if (flags & PFR_FLAG_ATOMIC)
504 			crit_exit();
505 	} else
506 		pfr_destroy_kentries(&addq);
507 	if (nadd != NULL)
508 		*nadd = xadd;
509 	if (ndel != NULL)
510 		*ndel = xdel;
511 	if (nchange != NULL)
512 		*nchange = xchange;
513 	if ((flags & PFR_FLAG_FEEDBACK) && size2)
514 		*size2 = size+xdel;
515 	pfr_destroy_ktable(tmpkt, 0);
516 	return (0);
517 _bad:
518 	pfr_clean_node_mask(tmpkt, &addq);
519 	pfr_destroy_kentries(&addq);
520 	if (flags & PFR_FLAG_FEEDBACK)
521 		pfr_reset_feedback(addr, size, flags);
522 	pfr_destroy_ktable(tmpkt, 0);
523 	return (rv);
524 }
525 
526 int
527 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
528 	int *nmatch, int flags)
529 {
530 	struct pfr_ktable	*kt;
531 	struct pfr_kentry	*p;
532 	struct pfr_addr		 ad;
533 	int			 i, xmatch = 0;
534 
535 	ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
536 	if (pfr_validate_table(tbl, 0, 0))
537 		return (EINVAL);
538 	kt = pfr_lookup_table(tbl);
539 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
540 		return (ESRCH);
541 
542 	for (i = 0; i < size; i++) {
543 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
544 			return (EFAULT);
545 		if (pfr_validate_addr(&ad))
546 			return (EINVAL);
547 		if (ADDR_NETWORK(&ad))
548 			return (EINVAL);
549 		p = pfr_lookup_addr(kt, &ad, 0);
550 		if (flags & PFR_FLAG_REPLACE)
551 			pfr_copyout_addr(&ad, p);
552 		ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
553 		    (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
554 		if (p != NULL && !p->pfrke_not)
555 			xmatch++;
556 		if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
557 			return (EFAULT);
558 	}
559 	if (nmatch != NULL)
560 		*nmatch = xmatch;
561 	return (0);
562 }
563 
564 int
565 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
566 	int flags)
567 {
568 	struct pfr_ktable	*kt;
569 	struct pfr_walktree	 w;
570 	int			 rv;
571 
572 	ACCEPT_FLAGS(flags, 0);
573 	if (pfr_validate_table(tbl, 0, 0))
574 		return (EINVAL);
575 	kt = pfr_lookup_table(tbl);
576 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
577 		return (ESRCH);
578 	if (kt->pfrkt_cnt > *size) {
579 		*size = kt->pfrkt_cnt;
580 		return (0);
581 	}
582 
583 	bzero(&w, sizeof(w));
584 	w.pfrw_op = PFRW_GET_ADDRS;
585 	w.pfrw_addr = addr;
586 	w.pfrw_free = kt->pfrkt_cnt;
587 	w.pfrw_flags = flags;
588 	rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
589 	if (!rv)
590 		rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
591 	if (rv)
592 		return (rv);
593 
594 	if (w.pfrw_free) {
595 		kprintf("pfr_get_addrs: corruption detected (%d).\n",
596 		    w.pfrw_free);
597 		return (ENOTTY);
598 	}
599 	*size = kt->pfrkt_cnt;
600 	return (0);
601 }
602 
603 int
604 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
605 	int flags)
606 {
607 	struct pfr_ktable	*kt;
608 	struct pfr_walktree	 w;
609 	struct pfr_kentryworkq	 workq;
610 	int			 rv;
611 	long			 tzero = time_second;
612 
613 	/* XXX PFR_FLAG_CLSTATS disabled */
614 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC);
615 	if (pfr_validate_table(tbl, 0, 0))
616 		return (EINVAL);
617 	kt = pfr_lookup_table(tbl);
618 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
619 		return (ESRCH);
620 	if (kt->pfrkt_cnt > *size) {
621 		*size = kt->pfrkt_cnt;
622 		return (0);
623 	}
624 
625 	bzero(&w, sizeof(w));
626 	w.pfrw_op = PFRW_GET_ASTATS;
627 	w.pfrw_astats = addr;
628 	w.pfrw_free = kt->pfrkt_cnt;
629 	w.pfrw_flags = flags;
630 	if (flags & PFR_FLAG_ATOMIC)
631 		crit_enter();
632 	rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
633 	if (!rv)
634 		rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
635 	if (!rv && (flags & PFR_FLAG_CLSTATS)) {
636 		pfr_enqueue_addrs(kt, &workq, NULL, 0);
637 		pfr_clstats_kentries(&workq, tzero, 0);
638 	}
639 	if (flags & PFR_FLAG_ATOMIC)
640 		crit_exit();
641 	if (rv)
642 		return (rv);
643 
644 	if (w.pfrw_free) {
645 		kprintf("pfr_get_astats: corruption detected (%d).\n",
646 		    w.pfrw_free);
647 		return (ENOTTY);
648 	}
649 	*size = kt->pfrkt_cnt;
650 	return (0);
651 }
652 
653 int
654 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
655     int *nzero, int flags)
656 {
657 	struct pfr_ktable	*kt;
658 	struct pfr_kentryworkq	 workq;
659 	struct pfr_kentry	*p;
660 	struct pfr_addr		 ad;
661 	int			 i, rv, xzero = 0;
662 
663 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
664 	    PFR_FLAG_FEEDBACK);
665 	if (pfr_validate_table(tbl, 0, 0))
666 		return (EINVAL);
667 	kt = pfr_lookup_table(tbl);
668 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
669 		return (ESRCH);
670 	SLIST_INIT(&workq);
671 	for (i = 0; i < size; i++) {
672 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
673 			senderr(EFAULT);
674 		if (pfr_validate_addr(&ad))
675 			senderr(EINVAL);
676 		p = pfr_lookup_addr(kt, &ad, 1);
677 		if (flags & PFR_FLAG_FEEDBACK) {
678 			ad.pfra_fback = (p != NULL) ?
679 			    PFR_FB_CLEARED : PFR_FB_NONE;
680 			if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
681 				senderr(EFAULT);
682 		}
683 		if (p != NULL) {
684 			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
685 			xzero++;
686 		}
687 	}
688 
689 	if (!(flags & PFR_FLAG_DUMMY)) {
690 		if (flags & PFR_FLAG_ATOMIC)
691 			crit_enter();
692 		pfr_clstats_kentries(&workq, 0, 0);
693 		if (flags & PFR_FLAG_ATOMIC)
694 			crit_exit();
695 	}
696 	if (nzero != NULL)
697 		*nzero = xzero;
698 	return (0);
699 _bad:
700 	if (flags & PFR_FLAG_FEEDBACK)
701 		pfr_reset_feedback(addr, size, flags);
702 	return (rv);
703 }
704 
705 int
706 pfr_validate_addr(struct pfr_addr *ad)
707 {
708 	int i;
709 
710 	switch (ad->pfra_af) {
711 #ifdef INET
712 	case AF_INET:
713 		if (ad->pfra_net > 32)
714 			return (-1);
715 		break;
716 #endif /* INET */
717 #ifdef INET6
718 	case AF_INET6:
719 		if (ad->pfra_net > 128)
720 			return (-1);
721 		break;
722 #endif /* INET6 */
723 	default:
724 		return (-1);
725 	}
726 	if (ad->pfra_net < 128 &&
727 		(((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
728 			return (-1);
729 	for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
730 		if (((caddr_t)ad)[i])
731 			return (-1);
732 	if (ad->pfra_not && ad->pfra_not != 1)
733 		return (-1);
734 	if (ad->pfra_fback)
735 		return (-1);
736 	return (0);
737 }
738 
739 void
740 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
741 	int *naddr, int sweep)
742 {
743 	struct pfr_walktree	w;
744 
745 	SLIST_INIT(workq);
746 	bzero(&w, sizeof(w));
747 	w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
748 	w.pfrw_workq = workq;
749 	if (kt->pfrkt_ip4 != NULL)
750 		if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
751 			kprintf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
752 	if (kt->pfrkt_ip6 != NULL)
753 		if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
754 			kprintf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
755 	if (naddr != NULL)
756 		*naddr = w.pfrw_cnt;
757 }
758 
759 void
760 pfr_mark_addrs(struct pfr_ktable *kt)
761 {
762 	struct pfr_walktree	w;
763 
764 	bzero(&w, sizeof(w));
765 	w.pfrw_op = PFRW_MARK;
766 	if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
767 		kprintf("pfr_mark_addrs: IPv4 walktree failed.\n");
768 	if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
769 		kprintf("pfr_mark_addrs: IPv6 walktree failed.\n");
770 }
771 
772 
773 struct pfr_kentry *
774 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
775 {
776 	union sockaddr_union	 sa, mask;
777 	struct radix_node_head	*head = NULL;
778 	struct pfr_kentry	*ke;
779 
780 	bzero(&sa, sizeof(sa));
781 	if (ad->pfra_af == AF_INET) {
782 		FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
783 		head = kt->pfrkt_ip4;
784 	} else if ( ad->pfra_af == AF_INET6 ) {
785 		FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
786 		head = kt->pfrkt_ip6;
787 	}
788 	if (ADDR_NETWORK(ad)) {
789 		pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
790 		crit_enter(); /* rn_lookup makes use of globals */
791 		ke = (struct pfr_kentry *)rn_lookup((char *)&sa, (char *)&mask,
792 		    head);
793 		crit_exit();
794 		if (ke && KENTRY_RNF_ROOT(ke))
795 			ke = NULL;
796 	} else {
797 		ke = (struct pfr_kentry *)rn_match((char *)&sa, head);
798 		if (ke && KENTRY_RNF_ROOT(ke))
799 			ke = NULL;
800 		if (exact && ke && KENTRY_NETWORK(ke))
801 			ke = NULL;
802 	}
803 	return (ke);
804 }
805 
806 struct pfr_kentry *
807 pfr_create_kentry(struct pfr_addr *ad, int intr)
808 {
809 	struct pfr_kentry	*ke;
810 
811 	if (intr)
812 		ke = kmalloc(sizeof(struct pfr_kentry), M_PFRKENTRYPL2, M_NOWAIT|M_ZERO);
813 	else
814 		ke = kmalloc(sizeof(struct pfr_kentry), M_PFRKENTRYPL, M_NOWAIT|M_ZERO|M_NULLOK);
815 	if (ke == NULL)
816 		return (NULL);
817 
818 	if (ad->pfra_af == AF_INET)
819 		FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
820 	else if (ad->pfra_af == AF_INET6)
821 		FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
822 	ke->pfrke_af = ad->pfra_af;
823 	ke->pfrke_net = ad->pfra_net;
824 	ke->pfrke_not = ad->pfra_not;
825 	ke->pfrke_intrpool = intr;
826 	return (ke);
827 }
828 
829 void
830 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
831 {
832 	struct pfr_kentry	*p, *q;
833 
834 	for (p = SLIST_FIRST(workq); p != NULL; p = q) {
835 		q = SLIST_NEXT(p, pfrke_workq);
836 		pfr_destroy_kentry(p);
837 	}
838 }
839 
840 void
841 pfr_destroy_kentry(struct pfr_kentry *ke)
842 {
843 	if (ke->pfrke_counters)
844 		kfree(ke->pfrke_counters, M_PFRKCOUNTERSPL);
845 	if (ke->pfrke_intrpool)
846 		kfree(ke, M_PFRKENTRYPL2);
847 	else
848 		kfree(ke, M_PFRKENTRYPL);
849 }
850 
851 void
852 pfr_insert_kentries(struct pfr_ktable *kt,
853     struct pfr_kentryworkq *workq, long tzero)
854 {
855 	struct pfr_kentry	*p;
856 	int			 rv, n = 0;
857 
858 	SLIST_FOREACH(p, workq, pfrke_workq) {
859 		rv = pfr_route_kentry(kt, p);
860 		if (rv) {
861 			kprintf("pfr_insert_kentries: cannot route entry "
862 			    "(code=%d).\n", rv);
863 			break;
864 		}
865 		p->pfrke_tzero = tzero;
866 		n++;
867 	}
868 	kt->pfrkt_cnt += n;
869 }
870 
871 int
872 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
873 {
874 	struct pfr_kentry	*p;
875 	int			 rv;
876 
877 	p = pfr_lookup_addr(kt, ad, 1);
878 	if (p != NULL)
879 		return (0);
880 	p = pfr_create_kentry(ad, 1);
881 	if (p == NULL)
882 		return (EINVAL);
883 
884 	rv = pfr_route_kentry(kt, p);
885 	if (rv)
886 		return (rv);
887 
888 	p->pfrke_tzero = tzero;
889 	kt->pfrkt_cnt++;
890 
891 	return (0);
892 }
893 
894 void
895 pfr_remove_kentries(struct pfr_ktable *kt,
896     struct pfr_kentryworkq *workq)
897 {
898 	struct pfr_kentry	*p;
899 	int			 n = 0;
900 
901 	SLIST_FOREACH(p, workq, pfrke_workq) {
902 		pfr_unroute_kentry(kt, p);
903 		n++;
904 	}
905 	kt->pfrkt_cnt -= n;
906 	pfr_destroy_kentries(workq);
907 }
908 
909 void
910 pfr_clean_node_mask(struct pfr_ktable *kt,
911     struct pfr_kentryworkq *workq)
912 {
913 	struct pfr_kentry	*p;
914 
915 	SLIST_FOREACH(p, workq, pfrke_workq)
916 		pfr_unroute_kentry(kt, p);
917 }
918 
919 void
920 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
921 {
922 	struct pfr_kentry	*p;
923 
924 	SLIST_FOREACH(p, workq, pfrke_workq) {
925 		crit_enter();
926 		if (negchange)
927 			p->pfrke_not = !p->pfrke_not;
928 		if (p->pfrke_counters) {
929 			kfree(p->pfrke_counters, M_PFRKCOUNTERSPL);
930 			p->pfrke_counters = NULL;
931 		}
932 		crit_exit();
933 		p->pfrke_tzero = tzero;
934 	}
935 }
936 
937 void
938 pfr_reset_feedback(struct pfr_addr *addr, int size, int flags)
939 {
940 	struct pfr_addr	ad;
941 	int		i;
942 
943 	for (i = 0; i < size; i++) {
944 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
945 			break;
946 		ad.pfra_fback = PFR_FB_NONE;
947 		if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
948 			break;
949 	}
950 }
951 
952 void
953 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
954 {
955 	int	i;
956 
957 	bzero(sa, sizeof(*sa));
958 	if (af == AF_INET) {
959 		sa->sin.sin_len = sizeof(sa->sin);
960 		sa->sin.sin_family = AF_INET;
961 		sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
962 	} else if (af == AF_INET6) {
963 		sa->sin6.sin6_len = sizeof(sa->sin6);
964 		sa->sin6.sin6_family = AF_INET6;
965 		for (i = 0; i < 4; i++) {
966 			if (net <= 32) {
967 				sa->sin6.sin6_addr.s6_addr32[i] =
968 				    net ? htonl(-1 << (32-net)) : 0;
969 				break;
970 			}
971 			sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
972 			net -= 32;
973 		}
974 	}
975 }
976 
977 int
978 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
979 {
980 	union sockaddr_union	 mask;
981 	struct radix_node	*rn;
982 	struct radix_node_head	*head = NULL;
983 
984 	bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
985 	if (ke->pfrke_af == AF_INET)
986 		head = kt->pfrkt_ip4;
987 	else if (ke->pfrke_af == AF_INET6)
988 		head = kt->pfrkt_ip6;
989 
990 	crit_enter();
991 	if (KENTRY_NETWORK(ke)) {
992 		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
993 		rn = rn_addroute((char *)&ke->pfrke_sa, (char *)&mask, head,
994 		    ke->pfrke_node);
995 	} else
996 		rn = rn_addroute((char *)&ke->pfrke_sa, NULL, head,
997 		    ke->pfrke_node);
998 	crit_exit();
999 
1000 	return (rn == NULL ? -1 : 0);
1001 }
1002 
1003 int
1004 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1005 {
1006 	union sockaddr_union	 mask;
1007 	struct radix_node	*rn;
1008 	struct radix_node_head	*head = NULL;
1009 
1010 	if (ke->pfrke_af == AF_INET)
1011 		head = kt->pfrkt_ip4;
1012 	else if (ke->pfrke_af == AF_INET6)
1013 		head = kt->pfrkt_ip6;
1014 
1015 	crit_enter();
1016 	if (KENTRY_NETWORK(ke)) {
1017 		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1018 		rn = rn_delete((char *)&ke->pfrke_sa, (char *)&mask, head);
1019 	} else
1020 		rn = rn_delete((char *)&ke->pfrke_sa, NULL, head);
1021 	crit_exit();
1022 
1023 	if (rn == NULL) {
1024 		kprintf("pfr_unroute_kentry: delete failed.\n");
1025 		return (-1);
1026 	}
1027 	return (0);
1028 }
1029 
1030 void
1031 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
1032 {
1033 	bzero(ad, sizeof(*ad));
1034 	if (ke == NULL)
1035 		return;
1036 	ad->pfra_af = ke->pfrke_af;
1037 	ad->pfra_net = ke->pfrke_net;
1038 	ad->pfra_not = ke->pfrke_not;
1039 	if (ad->pfra_af == AF_INET)
1040 		ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1041 	else if (ad->pfra_af == AF_INET6)
1042 		ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1043 }
1044 
1045 int
1046 pfr_walktree(struct radix_node *rn, void *arg)
1047 {
1048 	struct pfr_kentry	*ke = (struct pfr_kentry *)rn;
1049 	struct pfr_walktree	*w = arg;
1050 	int			flags = w->pfrw_flags;
1051 
1052 	switch (w->pfrw_op) {
1053 	case PFRW_MARK:
1054 		ke->pfrke_mark = 0;
1055 		break;
1056 	case PFRW_SWEEP:
1057 		if (ke->pfrke_mark)
1058 			break;
1059 		/* FALLTHROUGH */
1060 	case PFRW_ENQUEUE:
1061 		SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1062 		w->pfrw_cnt++;
1063 		break;
1064 	case PFRW_GET_ADDRS:
1065 		if (w->pfrw_free-- > 0) {
1066 			struct pfr_addr ad;
1067 
1068 			pfr_copyout_addr(&ad, ke);
1069 			if (copyout(&ad, w->pfrw_addr, sizeof(ad)))
1070 				return (EFAULT);
1071 			w->pfrw_addr++;
1072 		}
1073 		break;
1074 	case PFRW_GET_ASTATS:
1075 		if (w->pfrw_free-- > 0) {
1076 			struct pfr_astats as;
1077 
1078 			pfr_copyout_addr(&as.pfras_a, ke);
1079 
1080 			crit_enter();
1081 			if (ke->pfrke_counters) {
1082 				bcopy(ke->pfrke_counters->pfrkc_packets,
1083 				    as.pfras_packets, sizeof(as.pfras_packets));
1084 				bcopy(ke->pfrke_counters->pfrkc_bytes,
1085 				    as.pfras_bytes, sizeof(as.pfras_bytes));
1086 			} else {
1087 				bzero(as.pfras_packets, sizeof(as.pfras_packets));
1088 				bzero(as.pfras_bytes, sizeof(as.pfras_bytes));
1089 				as.pfras_a.pfra_fback = PFR_FB_NOCOUNT;
1090 			}
1091 			crit_exit();
1092 			as.pfras_tzero = ke->pfrke_tzero;
1093 
1094 			if (COPYOUT(&as, w->pfrw_astats, sizeof(as), flags))
1095 				return (EFAULT);
1096 			w->pfrw_astats++;
1097 		}
1098 		break;
1099 	case PFRW_POOL_GET:
1100 		if (ke->pfrke_not)
1101 			break; /* negative entries are ignored */
1102 		if (!w->pfrw_cnt--) {
1103 			w->pfrw_kentry = ke;
1104 			return (1); /* finish search */
1105 		}
1106 		break;
1107 	case PFRW_DYNADDR_UPDATE:
1108 		if (ke->pfrke_af == AF_INET) {
1109 			if (w->pfrw_dyn->pfid_acnt4++ > 0)
1110 				break;
1111 			pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1112 			w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
1113 			    &ke->pfrke_sa, AF_INET);
1114 			w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
1115 			    &pfr_mask, AF_INET);
1116 		} else if (ke->pfrke_af == AF_INET6){
1117 			if (w->pfrw_dyn->pfid_acnt6++ > 0)
1118 				break;
1119 			pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1120 			w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
1121 			    &ke->pfrke_sa, AF_INET6);
1122 			w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
1123 			    &pfr_mask, AF_INET6);
1124 		}
1125 		break;
1126 	}
1127 	return (0);
1128 }
1129 
1130 int
1131 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1132 {
1133 	struct pfr_ktableworkq	 workq;
1134 	struct pfr_ktable	*p;
1135 	int			 xdel = 0;
1136 
1137 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1138 	    PFR_FLAG_ALLRSETS);
1139 	if (pfr_fix_anchor(filter->pfrt_anchor))
1140 		return (EINVAL);
1141 	if (pfr_table_count(filter, flags) < 0)
1142 		return (ENOENT);
1143 
1144 	SLIST_INIT(&workq);
1145 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1146 		if (pfr_skip_table(filter, p, flags))
1147 			continue;
1148 		if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1149 			continue;
1150 		if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1151 			continue;
1152 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1153 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1154 		xdel++;
1155 	}
1156 	if (!(flags & PFR_FLAG_DUMMY)) {
1157 		if (flags & PFR_FLAG_ATOMIC)
1158 			crit_enter();
1159 		pfr_setflags_ktables(&workq);
1160 		if (flags & PFR_FLAG_ATOMIC)
1161 			crit_exit();
1162 	}
1163 	if (ndel != NULL)
1164 		*ndel = xdel;
1165 	return (0);
1166 }
1167 
1168 int
1169 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1170 {
1171 	struct pfr_ktableworkq	 addq, changeq;
1172 	struct pfr_ktable	*p, *q, *r, key;
1173 	int			 i, rv, xadd = 0;
1174 	long			 tzero = time_second;
1175 
1176 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1177 	SLIST_INIT(&addq);
1178 	SLIST_INIT(&changeq);
1179 	for (i = 0; i < size; i++) {
1180 		if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1181 			senderr(EFAULT);
1182 		if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1183 		    flags & PFR_FLAG_USERIOCTL))
1184 			senderr(EINVAL);
1185 		key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1186 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1187 		if (p == NULL) {
1188 			p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1189 			if (p == NULL)
1190 				senderr(ENOMEM);
1191 			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1192 				if (!pfr_ktable_compare(p, q))
1193 					goto _skip;
1194 			}
1195 			SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1196 			xadd++;
1197 			if (!key.pfrkt_anchor[0])
1198 				goto _skip;
1199 
1200 			/* find or create root table */
1201 			bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1202 			r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1203 			if (r != NULL) {
1204 				p->pfrkt_root = r;
1205 				goto _skip;
1206 			}
1207 			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1208 				if (!pfr_ktable_compare(&key, q)) {
1209 					p->pfrkt_root = q;
1210 					goto _skip;
1211 				}
1212 			}
1213 			key.pfrkt_flags = 0;
1214 			r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1215 			if (r == NULL)
1216 				senderr(ENOMEM);
1217 			SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1218 			p->pfrkt_root = r;
1219 		} else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1220 			SLIST_FOREACH(q, &changeq, pfrkt_workq)
1221 				if (!pfr_ktable_compare(&key, q))
1222 					goto _skip;
1223 			p->pfrkt_nflags = (p->pfrkt_flags &
1224 			    ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1225 			SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1226 			xadd++;
1227 		}
1228 _skip:
1229 	;
1230 	}
1231 	if (!(flags & PFR_FLAG_DUMMY)) {
1232 		if (flags & PFR_FLAG_ATOMIC)
1233 			crit_enter();
1234 		pfr_insert_ktables(&addq);
1235 		pfr_setflags_ktables(&changeq);
1236 		if (flags & PFR_FLAG_ATOMIC)
1237 			crit_exit();
1238 	} else
1239 		 pfr_destroy_ktables(&addq, 0);
1240 	if (nadd != NULL)
1241 		*nadd = xadd;
1242 	return (0);
1243 _bad:
1244 	pfr_destroy_ktables(&addq, 0);
1245 	return (rv);
1246 }
1247 
1248 int
1249 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1250 {
1251 	struct pfr_ktableworkq	 workq;
1252 	struct pfr_ktable	*p, *q, key;
1253 	int			 i, xdel = 0;
1254 
1255 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1256 	SLIST_INIT(&workq);
1257 	for (i = 0; i < size; i++) {
1258 		if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1259 			return (EFAULT);
1260 		if (pfr_validate_table(&key.pfrkt_t, 0,
1261 		    flags & PFR_FLAG_USERIOCTL))
1262 			return (EINVAL);
1263 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1264 		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1265 			SLIST_FOREACH(q, &workq, pfrkt_workq)
1266 				if (!pfr_ktable_compare(p, q))
1267 					goto _skip;
1268 			p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1269 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1270 			xdel++;
1271 		}
1272 _skip:
1273 	;
1274 	}
1275 
1276 	if (!(flags & PFR_FLAG_DUMMY)) {
1277 		if (flags & PFR_FLAG_ATOMIC)
1278 			crit_enter();
1279 		pfr_setflags_ktables(&workq);
1280 		if (flags & PFR_FLAG_ATOMIC)
1281 			crit_exit();
1282 	}
1283 	if (ndel != NULL)
1284 		*ndel = xdel;
1285 	return (0);
1286 }
1287 
1288 int
1289 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1290 	int flags)
1291 {
1292 	struct pfr_ktable	*p;
1293 	int			 n, nn;
1294 
1295 	ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1296 	if (pfr_fix_anchor(filter->pfrt_anchor))
1297 		return (EINVAL);
1298 	n = nn = pfr_table_count(filter, flags);
1299 	if (n < 0)
1300 		return (ENOENT);
1301 	if (n > *size) {
1302 		*size = n;
1303 		return (0);
1304 	}
1305 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1306 		if (pfr_skip_table(filter, p, flags))
1307 			continue;
1308 		if (n-- <= 0)
1309 			continue;
1310 		if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl), flags))
1311 			return (EFAULT);
1312 	}
1313 	if (n) {
1314 		kprintf("pfr_get_tables: corruption detected (%d).\n", n);
1315 		return (ENOTTY);
1316 	}
1317 	*size = nn;
1318 	return (0);
1319 }
1320 
1321 int
1322 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1323 	int flags)
1324 {
1325 	struct pfr_ktable	*p;
1326 	struct pfr_ktableworkq	 workq;
1327 	int			 n, nn;
1328 	long			 tzero = time_second;
1329 
1330 	/* XXX PFR_FLAG_CLSTATS disabled */
1331 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_ALLRSETS);
1332 	if (pfr_fix_anchor(filter->pfrt_anchor))
1333 		return (EINVAL);
1334 	n = nn = pfr_table_count(filter, flags);
1335 	if (n < 0)
1336 		return (ENOENT);
1337 	if (n > *size) {
1338 		*size = n;
1339 		return (0);
1340 	}
1341 	SLIST_INIT(&workq);
1342 	if (flags & PFR_FLAG_ATOMIC)
1343 		crit_enter();
1344 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1345 		if (pfr_skip_table(filter, p, flags))
1346 			continue;
1347 		if (n-- <= 0)
1348 			continue;
1349 		if (!(flags & PFR_FLAG_ATOMIC))
1350 			crit_enter();
1351 		if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl), flags)) {
1352 			crit_exit();
1353 			return (EFAULT);
1354 		}
1355 		if (!(flags & PFR_FLAG_ATOMIC))
1356 			crit_exit();
1357 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1358 	}
1359 	if (flags & PFR_FLAG_CLSTATS)
1360 		pfr_clstats_ktables(&workq, tzero,
1361 		    flags & PFR_FLAG_ADDRSTOO);
1362 	if (flags & PFR_FLAG_ATOMIC)
1363 		crit_exit();
1364 	if (n) {
1365 		kprintf("pfr_get_tstats: corruption detected (%d).\n", n);
1366 		return (ENOTTY);
1367 	}
1368 	*size = nn;
1369 	return (0);
1370 }
1371 
1372 int
1373 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1374 {
1375 	struct pfr_ktableworkq	 workq;
1376 	struct pfr_ktable	*p, key;
1377 	int			 i, xzero = 0;
1378 	long			 tzero = time_second;
1379 
1380 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1381 	    PFR_FLAG_ADDRSTOO);
1382 	SLIST_INIT(&workq);
1383 	for (i = 0; i < size; i++) {
1384 		if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1385 			return (EFAULT);
1386 		if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1387 			return (EINVAL);
1388 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1389 		if (p != NULL) {
1390 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1391 			xzero++;
1392 		}
1393 	}
1394 	if (!(flags & PFR_FLAG_DUMMY)) {
1395 		if (flags & PFR_FLAG_ATOMIC)
1396 			crit_enter();
1397 		pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1398 		if (flags & PFR_FLAG_ATOMIC)
1399 			crit_exit();
1400 	}
1401 	if (nzero != NULL)
1402 		*nzero = xzero;
1403 	return (0);
1404 }
1405 
1406 int
1407 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1408 	int *nchange, int *ndel, int flags)
1409 {
1410 	struct pfr_ktableworkq	 workq;
1411 	struct pfr_ktable	*p, *q, key;
1412 	int			 i, xchange = 0, xdel = 0;
1413 
1414 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1415 	if ((setflag & ~PFR_TFLAG_USRMASK) ||
1416 	    (clrflag & ~PFR_TFLAG_USRMASK) ||
1417 	    (setflag & clrflag))
1418 		return (EINVAL);
1419 	SLIST_INIT(&workq);
1420 	for (i = 0; i < size; i++) {
1421 		if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1422 			return (EFAULT);
1423 		if (pfr_validate_table(&key.pfrkt_t, 0,
1424 		    flags & PFR_FLAG_USERIOCTL))
1425 			return (EINVAL);
1426 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1427 		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1428 			p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1429 			    ~clrflag;
1430 			if (p->pfrkt_nflags == p->pfrkt_flags)
1431 				goto _skip;
1432 			SLIST_FOREACH(q, &workq, pfrkt_workq)
1433 				if (!pfr_ktable_compare(p, q))
1434 					goto _skip;
1435 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1436 			if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1437 			    (clrflag & PFR_TFLAG_PERSIST) &&
1438 			    !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1439 				xdel++;
1440 			else
1441 				xchange++;
1442 		}
1443 _skip:
1444 	;
1445 	}
1446 	if (!(flags & PFR_FLAG_DUMMY)) {
1447 		if (flags & PFR_FLAG_ATOMIC)
1448 			crit_enter();
1449 		pfr_setflags_ktables(&workq);
1450 		if (flags & PFR_FLAG_ATOMIC)
1451 			crit_exit();
1452 	}
1453 	if (nchange != NULL)
1454 		*nchange = xchange;
1455 	if (ndel != NULL)
1456 		*ndel = xdel;
1457 	return (0);
1458 }
1459 
1460 int
1461 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1462 {
1463 	struct pfr_ktableworkq	 workq;
1464 	struct pfr_ktable	*p;
1465 	struct pf_ruleset	*rs;
1466 	int			 xdel = 0;
1467 
1468 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1469 	rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1470 	if (rs == NULL)
1471 		return (ENOMEM);
1472 	SLIST_INIT(&workq);
1473 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1474 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1475 		    pfr_skip_table(trs, p, 0))
1476 			continue;
1477 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1478 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1479 		xdel++;
1480 	}
1481 	if (!(flags & PFR_FLAG_DUMMY)) {
1482 		pfr_setflags_ktables(&workq);
1483 		if (ticket != NULL)
1484 			*ticket = ++rs->tticket;
1485 		rs->topen = 1;
1486 	} else
1487 		pf_remove_if_empty_ruleset(rs);
1488 	if (ndel != NULL)
1489 		*ndel = xdel;
1490 	return (0);
1491 }
1492 
1493 int
1494 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1495     int *nadd, int *naddr, u_int32_t ticket, int flags)
1496 {
1497 	struct pfr_ktableworkq	 tableq;
1498 	struct pfr_kentryworkq	 addrq;
1499 	struct pfr_ktable	*kt, *rt, *shadow, key;
1500 	struct pfr_kentry	*p;
1501 	struct pfr_addr		 ad;
1502 	struct pf_ruleset	*rs;
1503 	int			 i, rv, xadd = 0, xaddr = 0;
1504 
1505 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1506 	if (size && !(flags & PFR_FLAG_ADDRSTOO))
1507 		return (EINVAL);
1508 	if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1509 	    flags & PFR_FLAG_USERIOCTL))
1510 		return (EINVAL);
1511 	rs = pf_find_ruleset(tbl->pfrt_anchor);
1512 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1513 		return (EBUSY);
1514 	tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1515 	SLIST_INIT(&tableq);
1516 	kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1517 	if (kt == NULL) {
1518 		kt = pfr_create_ktable(tbl, 0, 1);
1519 		if (kt == NULL)
1520 			return (ENOMEM);
1521 		SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1522 		xadd++;
1523 		if (!tbl->pfrt_anchor[0])
1524 			goto _skip;
1525 
1526 		/* find or create root table */
1527 		bzero(&key, sizeof(key));
1528 		strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1529 		rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1530 		if (rt != NULL) {
1531 			kt->pfrkt_root = rt;
1532 			goto _skip;
1533 		}
1534 		rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1535 		if (rt == NULL) {
1536 			pfr_destroy_ktables(&tableq, 0);
1537 			return (ENOMEM);
1538 		}
1539 		SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1540 		kt->pfrkt_root = rt;
1541 	} else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1542 		xadd++;
1543 _skip:
1544 	shadow = pfr_create_ktable(tbl, 0, 0);
1545 	if (shadow == NULL) {
1546 		pfr_destroy_ktables(&tableq, 0);
1547 		return (ENOMEM);
1548 	}
1549 	SLIST_INIT(&addrq);
1550 	for (i = 0; i < size; i++) {
1551 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
1552 			senderr(EFAULT);
1553 		if (pfr_validate_addr(&ad))
1554 			senderr(EINVAL);
1555 		if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
1556 			continue;
1557 		p = pfr_create_kentry(&ad, 0);
1558 		if (p == NULL)
1559 			senderr(ENOMEM);
1560 		if (pfr_route_kentry(shadow, p)) {
1561 			pfr_destroy_kentry(p);
1562 			continue;
1563 		}
1564 		SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1565 		xaddr++;
1566 	}
1567 	if (!(flags & PFR_FLAG_DUMMY)) {
1568 		if (kt->pfrkt_shadow != NULL)
1569 			pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1570 		kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1571 		pfr_insert_ktables(&tableq);
1572 		shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1573 		    xaddr : NO_ADDRESSES;
1574 		kt->pfrkt_shadow = shadow;
1575 	} else {
1576 		pfr_clean_node_mask(shadow, &addrq);
1577 		pfr_destroy_ktable(shadow, 0);
1578 		pfr_destroy_ktables(&tableq, 0);
1579 		pfr_destroy_kentries(&addrq);
1580 	}
1581 	if (nadd != NULL)
1582 		*nadd = xadd;
1583 	if (naddr != NULL)
1584 		*naddr = xaddr;
1585 	return (0);
1586 _bad:
1587 	pfr_destroy_ktable(shadow, 0);
1588 	pfr_destroy_ktables(&tableq, 0);
1589 	pfr_destroy_kentries(&addrq);
1590 	return (rv);
1591 }
1592 
1593 int
1594 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1595 {
1596 	struct pfr_ktableworkq	 workq;
1597 	struct pfr_ktable	*p;
1598 	struct pf_ruleset	*rs;
1599 	int			 xdel = 0;
1600 
1601 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1602 	rs = pf_find_ruleset(trs->pfrt_anchor);
1603 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1604 		return (0);
1605 	SLIST_INIT(&workq);
1606 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1607 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1608 		    pfr_skip_table(trs, p, 0))
1609 			continue;
1610 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1611 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1612 		xdel++;
1613 	}
1614 	if (!(flags & PFR_FLAG_DUMMY)) {
1615 		pfr_setflags_ktables(&workq);
1616 		rs->topen = 0;
1617 		pf_remove_if_empty_ruleset(rs);
1618 	}
1619 	if (ndel != NULL)
1620 		*ndel = xdel;
1621 	return (0);
1622 }
1623 
1624 int
1625 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1626     int *nchange, int flags)
1627 {
1628 	struct pfr_ktable	*p, *q;
1629 	struct pfr_ktableworkq	 workq;
1630 	struct pf_ruleset	*rs;
1631 	int			 xadd = 0, xchange = 0;
1632 	long			 tzero = time_second;
1633 
1634 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1635 	rs = pf_find_ruleset(trs->pfrt_anchor);
1636 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1637 		return (EBUSY);
1638 
1639 	SLIST_INIT(&workq);
1640 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1641 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1642 		    pfr_skip_table(trs, p, 0))
1643 			continue;
1644 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1645 		if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1646 			xchange++;
1647 		else
1648 			xadd++;
1649 	}
1650 
1651 	if (!(flags & PFR_FLAG_DUMMY)) {
1652 		if (flags & PFR_FLAG_ATOMIC)
1653 			crit_enter();
1654 		for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1655 			q = SLIST_NEXT(p, pfrkt_workq);
1656 			pfr_commit_ktable(p, tzero);
1657 		}
1658 		if (flags & PFR_FLAG_ATOMIC)
1659 			crit_exit();
1660 		rs->topen = 0;
1661 		pf_remove_if_empty_ruleset(rs);
1662 	}
1663 	if (nadd != NULL)
1664 		*nadd = xadd;
1665 	if (nchange != NULL)
1666 		*nchange = xchange;
1667 
1668 	return (0);
1669 }
1670 
1671 void
1672 pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1673 {
1674 	struct pfr_ktable	*shadow = kt->pfrkt_shadow;
1675 	int			 nflags;
1676 
1677 	if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1678 		if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1679 			pfr_clstats_ktable(kt, tzero, 1);
1680 	} else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1681 		/* kt might contain addresses */
1682 		struct pfr_kentryworkq	 addrq, addq, changeq, delq, garbageq;
1683 		struct pfr_kentry	*p, *q, *next;
1684 		struct pfr_addr		 ad;
1685 
1686 		pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1687 		pfr_mark_addrs(kt);
1688 		SLIST_INIT(&addq);
1689 		SLIST_INIT(&changeq);
1690 		SLIST_INIT(&delq);
1691 		SLIST_INIT(&garbageq);
1692 		pfr_clean_node_mask(shadow, &addrq);
1693 		for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1694 			next = SLIST_NEXT(p, pfrke_workq);	/* XXX */
1695 			pfr_copyout_addr(&ad, p);
1696 			q = pfr_lookup_addr(kt, &ad, 1);
1697 			if (q != NULL) {
1698 				if (q->pfrke_not != p->pfrke_not)
1699 					SLIST_INSERT_HEAD(&changeq, q,
1700 					    pfrke_workq);
1701 				q->pfrke_mark = 1;
1702 				SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1703 			} else {
1704 				p->pfrke_tzero = tzero;
1705 				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1706 			}
1707 		}
1708 		pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1709 		pfr_insert_kentries(kt, &addq, tzero);
1710 		pfr_remove_kentries(kt, &delq);
1711 		pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1712 		pfr_destroy_kentries(&garbageq);
1713 	} else {
1714 		/* kt cannot contain addresses */
1715 		SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1716 		    shadow->pfrkt_ip4);
1717 		SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1718 		    shadow->pfrkt_ip6);
1719 		SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1720 		pfr_clstats_ktable(kt, tzero, 1);
1721 	}
1722 	nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1723 	    (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1724 		& ~PFR_TFLAG_INACTIVE;
1725 	pfr_destroy_ktable(shadow, 0);
1726 	kt->pfrkt_shadow = NULL;
1727 	pfr_setflags_ktable(kt, nflags);
1728 }
1729 
1730 int
1731 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1732 {
1733 	int i;
1734 
1735 	if (!tbl->pfrt_name[0])
1736 		return (-1);
1737 	if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1738 		 return (-1);
1739 	if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1740 		return (-1);
1741 	for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1742 		if (tbl->pfrt_name[i])
1743 			return (-1);
1744 	if (pfr_fix_anchor(tbl->pfrt_anchor))
1745 		return (-1);
1746 	if (tbl->pfrt_flags & ~allowedflags)
1747 		return (-1);
1748 	return (0);
1749 }
1750 
1751 /*
1752  * Rewrite anchors referenced by tables to remove slashes
1753  * and check for validity.
1754  */
1755 int
1756 pfr_fix_anchor(char *anchor)
1757 {
1758 	size_t siz = MAXPATHLEN;
1759 	int i;
1760 
1761 	if (anchor[0] == '/') {
1762 		char *path;
1763 		int off;
1764 
1765 		path = anchor;
1766 		off = 1;
1767 		while (*++path == '/')
1768 			off++;
1769 		bcopy(path, anchor, siz - off);
1770 		memset(anchor + siz - off, 0, off);
1771 	}
1772 	if (anchor[siz - 1])
1773 		return (-1);
1774 	for (i = strlen(anchor); i < siz; i++)
1775 		if (anchor[i])
1776 			return (-1);
1777 	return (0);
1778 }
1779 
1780 int
1781 pfr_table_count(struct pfr_table *filter, int flags)
1782 {
1783 	struct pf_ruleset *rs;
1784 
1785 	if (flags & PFR_FLAG_ALLRSETS)
1786 		return (pfr_ktable_cnt);
1787 	if (filter->pfrt_anchor[0]) {
1788 		rs = pf_find_ruleset(filter->pfrt_anchor);
1789 		return ((rs != NULL) ? rs->tables : -1);
1790 	}
1791 	return (pf_main_ruleset.tables);
1792 }
1793 
1794 int
1795 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1796 {
1797 	if (flags & PFR_FLAG_ALLRSETS)
1798 		return (0);
1799 	if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1800 		return (1);
1801 	return (0);
1802 }
1803 
1804 void
1805 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1806 {
1807 	struct pfr_ktable	*p;
1808 
1809 	SLIST_FOREACH(p, workq, pfrkt_workq)
1810 		pfr_insert_ktable(p);
1811 }
1812 
1813 void
1814 pfr_insert_ktable(struct pfr_ktable *kt)
1815 {
1816 	RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1817 	pfr_ktable_cnt++;
1818 	if (kt->pfrkt_root != NULL)
1819 		if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1820 			pfr_setflags_ktable(kt->pfrkt_root,
1821 			    kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1822 }
1823 
1824 void
1825 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1826 {
1827 	struct pfr_ktable	*p, *q;
1828 
1829 	for (p = SLIST_FIRST(workq); p; p = q) {
1830 		q = SLIST_NEXT(p, pfrkt_workq);
1831 		pfr_setflags_ktable(p, p->pfrkt_nflags);
1832 	}
1833 }
1834 
1835 void
1836 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1837 {
1838 	struct pfr_kentryworkq	addrq;
1839 
1840 	if (!(newf & PFR_TFLAG_REFERENCED) &&
1841 	    !(newf & PFR_TFLAG_PERSIST))
1842 		newf &= ~PFR_TFLAG_ACTIVE;
1843 	if (!(newf & PFR_TFLAG_ACTIVE))
1844 		newf &= ~PFR_TFLAG_USRMASK;
1845 	if (!(newf & PFR_TFLAG_SETMASK)) {
1846 		RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1847 		if (kt->pfrkt_root != NULL)
1848 			if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1849 				pfr_setflags_ktable(kt->pfrkt_root,
1850 				    kt->pfrkt_root->pfrkt_flags &
1851 					~PFR_TFLAG_REFDANCHOR);
1852 		pfr_destroy_ktable(kt, 1);
1853 		pfr_ktable_cnt--;
1854 		return;
1855 	}
1856 	if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1857 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1858 		pfr_remove_kentries(kt, &addrq);
1859 	}
1860 	if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1861 		pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1862 		kt->pfrkt_shadow = NULL;
1863 	}
1864 	kt->pfrkt_flags = newf;
1865 }
1866 
1867 void
1868 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1869 {
1870 	struct pfr_ktable	*p;
1871 
1872 	SLIST_FOREACH(p, workq, pfrkt_workq)
1873 		pfr_clstats_ktable(p, tzero, recurse);
1874 }
1875 
1876 void
1877 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1878 {
1879 	struct pfr_kentryworkq	 addrq;
1880 
1881 	if (recurse) {
1882 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1883 		pfr_clstats_kentries(&addrq, tzero, 0);
1884 	}
1885 	crit_enter();
1886 	bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1887 	bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1888 	kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1889 	crit_exit();
1890 	kt->pfrkt_tzero = tzero;
1891 }
1892 
1893 struct pfr_ktable *
1894 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1895 {
1896 	struct pfr_ktable	*kt;
1897 	struct pf_ruleset	*rs;
1898 
1899 	kt = kmalloc(sizeof(struct pfr_ktable), M_PFRKTABLEPL, M_NOWAIT|M_ZERO|M_NULLOK);
1900 	if (kt == NULL)
1901 		return (NULL);
1902 	kt->pfrkt_t = *tbl;
1903 
1904 	if (attachruleset) {
1905 		rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
1906 		if (!rs) {
1907 			pfr_destroy_ktable(kt, 0);
1908 			return (NULL);
1909 		}
1910 		kt->pfrkt_rs = rs;
1911 		rs->tables++;
1912 	}
1913 
1914 	KKASSERT(pf_maskhead != NULL);
1915 	if (!rn_inithead((void **)&kt->pfrkt_ip4, pf_maskhead,
1916 	    offsetof(struct sockaddr_in, sin_addr) * 8) ||
1917 	    !rn_inithead((void **)&kt->pfrkt_ip6, pf_maskhead,
1918 	    offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1919 		pfr_destroy_ktable(kt, 0);
1920 		return (NULL);
1921 	}
1922 	kt->pfrkt_tzero = tzero;
1923 
1924 	return (kt);
1925 }
1926 
1927 void
1928 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1929 {
1930 	struct pfr_ktable	*p, *q;
1931 
1932 	for (p = SLIST_FIRST(workq); p; p = q) {
1933 		q = SLIST_NEXT(p, pfrkt_workq);
1934 		pfr_destroy_ktable(p, flushaddr);
1935 	}
1936 }
1937 
1938 void
1939 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1940 {
1941 	struct pfr_kentryworkq	 addrq;
1942 
1943 	if (flushaddr) {
1944 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1945 		pfr_clean_node_mask(kt, &addrq);
1946 		pfr_destroy_kentries(&addrq);
1947 	}
1948 	if (kt->pfrkt_ip4 != NULL)
1949 		kfree((caddr_t)kt->pfrkt_ip4, M_RTABLE);
1950 
1951 	if (kt->pfrkt_ip6 != NULL)
1952 		kfree((caddr_t)kt->pfrkt_ip6, M_RTABLE);
1953 	if (kt->pfrkt_shadow != NULL)
1954 		pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1955 	if (kt->pfrkt_rs != NULL) {
1956 		kt->pfrkt_rs->tables--;
1957 		pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1958 	}
1959 	kfree(kt, M_PFRKTABLEPL);
1960 }
1961 
1962 int
1963 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1964 {
1965 	int d;
1966 
1967 	if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
1968 		return (d);
1969 	return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
1970 }
1971 
1972 struct pfr_ktable *
1973 pfr_lookup_table(struct pfr_table *tbl)
1974 {
1975 	/* struct pfr_ktable start like a struct pfr_table */
1976 	return (RB_FIND(pfr_ktablehead, &pfr_ktables,
1977 	    (struct pfr_ktable *)tbl));
1978 }
1979 
1980 int
1981 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
1982 {
1983 	struct pfr_kentry	*ke = NULL;
1984 	int			 match;
1985 
1986 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1987 		kt = kt->pfrkt_root;
1988 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1989 		return (0);
1990 
1991 	switch (af) {
1992 #ifdef INET
1993 	case AF_INET:
1994 		pfr_sin.sin_addr.s_addr = a->addr32[0];
1995 		ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin,
1996 		    kt->pfrkt_ip4);
1997 		if (ke && KENTRY_RNF_ROOT(ke))
1998 			ke = NULL;
1999 		break;
2000 #endif /* INET */
2001 #ifdef INET6
2002 	case AF_INET6:
2003 		bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2004 		ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin6,
2005 		    kt->pfrkt_ip6);
2006 		if (ke && KENTRY_RNF_ROOT(ke))
2007 			ke = NULL;
2008 		break;
2009 #endif /* INET6 */
2010 	}
2011 	match = (ke && !ke->pfrke_not);
2012 	if (match)
2013 		kt->pfrkt_match++;
2014 	else
2015 		kt->pfrkt_nomatch++;
2016 	return (match);
2017 }
2018 
2019 void
2020 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2021     u_int64_t len, int dir_out, int op_pass, int notrule)
2022 {
2023 	struct pfr_kentry	*ke = NULL;
2024 
2025 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2026 		kt = kt->pfrkt_root;
2027 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2028 		return;
2029 
2030 	switch (af) {
2031 #ifdef INET
2032 	case AF_INET:
2033 		pfr_sin.sin_addr.s_addr = a->addr32[0];
2034 		ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin,
2035 		    kt->pfrkt_ip4);
2036 		if (ke && KENTRY_RNF_ROOT(ke))
2037 			ke = NULL;
2038 		break;
2039 #endif /* INET */
2040 #ifdef INET6
2041 	case AF_INET6:
2042 		bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2043 		ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin6,
2044 		    kt->pfrkt_ip6);
2045 		if (ke && KENTRY_RNF_ROOT(ke))
2046 			ke = NULL;
2047 		break;
2048 #endif /* INET6 */
2049 	default:
2050 		;
2051 	}
2052 	if ((ke == NULL || ke->pfrke_not) != notrule) {
2053 		if (op_pass != PFR_OP_PASS)
2054 			kprintf("pfr_update_stats: assertion failed.\n");
2055 		op_pass = PFR_OP_XPASS;
2056 	}
2057 	kt->pfrkt_packets[dir_out][op_pass]++;
2058 	kt->pfrkt_bytes[dir_out][op_pass] += len;
2059 	if (ke != NULL && op_pass != PFR_OP_XPASS &&
2060 	    (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
2061 		if (ke->pfrke_counters == NULL)
2062 			ke->pfrke_counters = kmalloc(sizeof(struct pfr_kcounters),
2063 			    M_PFRKCOUNTERSPL, M_NOWAIT|M_ZERO);
2064 		if (ke->pfrke_counters != NULL) {
2065 			ke->pfrke_counters->pfrkc_packets[dir_out][op_pass]++;
2066 			ke->pfrke_counters->pfrkc_bytes[dir_out][op_pass] += len;
2067 		}
2068 	}
2069 }
2070 
2071 struct pfr_ktable *
2072 pfr_attach_table(struct pf_ruleset *rs, char *name)
2073 {
2074 	struct pfr_ktable	*kt, *rt;
2075 	struct pfr_table	 tbl;
2076 	struct pf_anchor	*ac = rs->anchor;
2077 
2078 	bzero(&tbl, sizeof(tbl));
2079 	strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2080 	if (ac != NULL)
2081 		strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2082 	kt = pfr_lookup_table(&tbl);
2083 	if (kt == NULL) {
2084 		kt = pfr_create_ktable(&tbl, time_second, 1);
2085 		if (kt == NULL)
2086 			return (NULL);
2087 		if (ac != NULL) {
2088 			bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2089 			rt = pfr_lookup_table(&tbl);
2090 			if (rt == NULL) {
2091 				rt = pfr_create_ktable(&tbl, 0, 1);
2092 				if (rt == NULL) {
2093 					pfr_destroy_ktable(kt, 0);
2094 					return (NULL);
2095 				}
2096 				pfr_insert_ktable(rt);
2097 			}
2098 			kt->pfrkt_root = rt;
2099 		}
2100 		pfr_insert_ktable(kt);
2101 	}
2102 	if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2103 		pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2104 	return (kt);
2105 }
2106 
2107 void
2108 pfr_detach_table(struct pfr_ktable *kt)
2109 {
2110 	if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
2111 		kprintf("pfr_detach_table: refcount = %d.\n",
2112 		    kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
2113 	else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2114 		pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2115 }
2116 
2117 int
2118 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2119     struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
2120 {
2121 	struct pfr_kentry	*ke, *ke2 = NULL;
2122 	struct pf_addr		*addr = NULL;
2123 	union sockaddr_union	 mask;
2124 	int			 idx = -1, use_counter = 0;
2125 
2126 	if (af == AF_INET)
2127 		addr = (struct pf_addr *)&pfr_sin.sin_addr;
2128 	else if (af == AF_INET6)
2129 		addr = (struct pf_addr *)&pfr_sin6.sin6_addr;
2130 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2131 		kt = kt->pfrkt_root;
2132 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2133 		return (-1);
2134 
2135 	if (pidx != NULL)
2136 		idx = *pidx;
2137 	if (counter != NULL && idx >= 0)
2138 		use_counter = 1;
2139 	if (idx < 0)
2140 		idx = 0;
2141 
2142 _next_block:
2143 	ke = pfr_kentry_byidx(kt, idx, af);
2144 	if (ke == NULL) {
2145 		kt->pfrkt_nomatch++;
2146 		return (1);
2147 	}
2148 	pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2149 	*raddr = SUNION2PF(&ke->pfrke_sa, af);
2150 	*rmask = SUNION2PF(&pfr_mask, af);
2151 
2152 	if (use_counter) {
2153 		/* is supplied address within block? */
2154 		if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
2155 			/* no, go to next block in table */
2156 			idx++;
2157 			use_counter = 0;
2158 			goto _next_block;
2159 		}
2160 		PF_ACPY(addr, counter, af);
2161 	} else {
2162 		/* use first address of block */
2163 		PF_ACPY(addr, *raddr, af);
2164 	}
2165 
2166 	if (!KENTRY_NETWORK(ke)) {
2167 		/* this is a single IP address - no possible nested block */
2168 		PF_ACPY(counter, addr, af);
2169 		*pidx = idx;
2170 		kt->pfrkt_match++;
2171 		return (0);
2172 	}
2173 	for (;;) {
2174 		/* we don't want to use a nested block */
2175 		if (af == AF_INET)
2176 			ke2 = (struct pfr_kentry *)rn_match((char *)&pfr_sin,
2177 			    kt->pfrkt_ip4);
2178 		else if (af == AF_INET6)
2179 			ke2 = (struct pfr_kentry *)rn_match((char *)&pfr_sin6,
2180 			    kt->pfrkt_ip6);
2181 		/* no need to check KENTRY_RNF_ROOT() here */
2182 		if (ke2 == ke) {
2183 			/* lookup return the same block - perfect */
2184 			PF_ACPY(counter, addr, af);
2185 			*pidx = idx;
2186 			kt->pfrkt_match++;
2187 			return (0);
2188 		}
2189 
2190 		/* we need to increase the counter past the nested block */
2191 		pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2192 		PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2193 		PF_AINC(addr, af);
2194 		if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2195 			/* ok, we reached the end of our main block */
2196 			/* go to next block in table */
2197 			idx++;
2198 			use_counter = 0;
2199 			goto _next_block;
2200 		}
2201 	}
2202 }
2203 
2204 struct pfr_kentry *
2205 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2206 {
2207 	struct pfr_walktree	w;
2208 
2209 	bzero(&w, sizeof(w));
2210 	w.pfrw_op = PFRW_POOL_GET;
2211 	w.pfrw_cnt = idx;
2212 
2213 	switch (af) {
2214 #ifdef INET
2215 	case AF_INET:
2216 		kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2217 		return (w.pfrw_kentry);
2218 #endif /* INET */
2219 #ifdef INET6
2220 	case AF_INET6:
2221 		kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2222 		return (w.pfrw_kentry);
2223 #endif /* INET6 */
2224 	default:
2225 		return (NULL);
2226 	}
2227 }
2228 
2229 void
2230 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2231 {
2232 	struct pfr_walktree	w;
2233 
2234 	bzero(&w, sizeof(w));
2235 	w.pfrw_op = PFRW_DYNADDR_UPDATE;
2236 	w.pfrw_dyn = dyn;
2237 
2238 	crit_enter();
2239 	dyn->pfid_acnt4 = 0;
2240 	dyn->pfid_acnt6 = 0;
2241 	if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2242 		kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2243 	if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2244 		kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2245 	crit_exit();
2246 }
2247