xref: /openbsd/sys/net/pf_table.c (revision 17df1aa7)
1 /*	$OpenBSD: pf_table.c,v 1.83 2010/02/24 15:04:40 henning Exp $	*/
2 
3 /*
4  * Copyright (c) 2002 Cedric Berger
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  *    - Redistributions of source code must retain the above copyright
12  *      notice, this list of conditions and the following disclaimer.
13  *    - Redistributions in binary form must reproduce the above
14  *      copyright notice, this list of conditions and the following
15  *      disclaimer in the documentation and/or other materials provided
16  *      with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  */
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/socket.h>
36 #include <sys/mbuf.h>
37 #include <sys/kernel.h>
38 #include <sys/pool.h>
39 #include <sys/syslog.h>
40 
41 #include <net/if.h>
42 #include <net/route.h>
43 #include <netinet/in.h>
44 #include <netinet/ip_ipsp.h>
45 #include <net/pfvar.h>
46 
47 #define ACCEPT_FLAGS(flags, oklist)		\
48 	do {					\
49 		if ((flags & ~(oklist)) &	\
50 		    PFR_FLAG_ALLMASK)		\
51 			return (EINVAL);	\
52 	} while (0)
53 
54 #define COPYIN(from, to, size, flags)		\
55 	((flags & PFR_FLAG_USERIOCTL) ?		\
56 	copyin((from), (to), (size)) :		\
57 	(bcopy((from), (to), (size)), 0))
58 
59 #define COPYOUT(from, to, size, flags)		\
60 	((flags & PFR_FLAG_USERIOCTL) ?		\
61 	copyout((from), (to), (size)) :		\
62 	(bcopy((from), (to), (size)), 0))
63 
64 #define	FILLIN_SIN(sin, addr)			\
65 	do {					\
66 		(sin).sin_len = sizeof(sin);	\
67 		(sin).sin_family = AF_INET;	\
68 		(sin).sin_addr = (addr);	\
69 	} while (0)
70 
71 #define	FILLIN_SIN6(sin6, addr)			\
72 	do {					\
73 		(sin6).sin6_len = sizeof(sin6);	\
74 		(sin6).sin6_family = AF_INET6;	\
75 		(sin6).sin6_addr = (addr);	\
76 	} while (0)
77 
78 #define SWAP(type, a1, a2)			\
79 	do {					\
80 		type tmp = a1;			\
81 		a1 = a2;			\
82 		a2 = tmp;			\
83 	} while (0)
84 
85 #define SUNION2PF(su, af) (((af)==AF_INET) ?	\
86     (struct pf_addr *)&(su)->sin.sin_addr :	\
87     (struct pf_addr *)&(su)->sin6.sin6_addr)
88 
89 #define	AF_BITS(af)		(((af)==AF_INET)?32:128)
90 #define	ADDR_NETWORK(ad)	((ad)->pfra_net < AF_BITS((ad)->pfra_af))
91 #define	KENTRY_NETWORK(ke)	((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
92 #define KENTRY_RNF_ROOT(ke) \
93 		((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
94 
95 #define NO_ADDRESSES		(-1)
96 #define ENQUEUE_UNMARKED_ONLY	(1)
97 #define INVERT_NEG_FLAG		(1)
98 
99 struct pfr_walktree {
100 	enum pfrw_op {
101 		PFRW_MARK,
102 		PFRW_SWEEP,
103 		PFRW_ENQUEUE,
104 		PFRW_GET_ADDRS,
105 		PFRW_GET_ASTATS,
106 		PFRW_POOL_GET,
107 		PFRW_DYNADDR_UPDATE
108 	}	 pfrw_op;
109 	union {
110 		struct pfr_addr		*pfrw1_addr;
111 		struct pfr_astats	*pfrw1_astats;
112 		struct pfr_kentryworkq	*pfrw1_workq;
113 		struct pfr_kentry	*pfrw1_kentry;
114 		struct pfi_dynaddr	*pfrw1_dyn;
115 	}	 pfrw_1;
116 	int	 pfrw_free;
117 	int	 pfrw_flags;
118 };
119 #define pfrw_addr	pfrw_1.pfrw1_addr
120 #define pfrw_astats	pfrw_1.pfrw1_astats
121 #define pfrw_workq	pfrw_1.pfrw1_workq
122 #define pfrw_kentry	pfrw_1.pfrw1_kentry
123 #define pfrw_dyn	pfrw_1.pfrw1_dyn
124 #define pfrw_cnt	pfrw_free
125 
126 #define senderr(e)	do { rv = (e); goto _bad; } while (0)
127 
128 struct pool		 pfr_ktable_pl;
129 struct pool		 pfr_kentry_pl[PFRKE_MAX];
130 struct pool		 pfr_kcounters_pl;
131 struct sockaddr_in	 pfr_sin;
132 struct sockaddr_in6	 pfr_sin6;
133 union sockaddr_union	 pfr_mask;
134 struct pf_addr		 pfr_ffaddr;
135 
136 void			 pfr_copyout_addr(struct pfr_addr *,
137 			    struct pfr_kentry *ke);
138 int			 pfr_validate_addr(struct pfr_addr *);
139 void			 pfr_enqueue_addrs(struct pfr_ktable *,
140 			    struct pfr_kentryworkq *, int *, int);
141 void			 pfr_mark_addrs(struct pfr_ktable *);
142 struct pfr_kentry	*pfr_lookup_addr(struct pfr_ktable *,
143 			    struct pfr_addr *, int);
144 struct pfr_kentry	*pfr_create_kentry(struct pfr_addr *);
145 void			 pfr_destroy_kentries(struct pfr_kentryworkq *);
146 void			 pfr_destroy_kentry(struct pfr_kentry *);
147 void			 pfr_insert_kentries(struct pfr_ktable *,
148 			    struct pfr_kentryworkq *, long);
149 void			 pfr_remove_kentries(struct pfr_ktable *,
150 			    struct pfr_kentryworkq *);
151 void			 pfr_clstats_kentries(struct pfr_kentryworkq *, long,
152 			    int);
153 void			 pfr_reset_feedback(struct pfr_addr *, int, int);
154 void			 pfr_prepare_network(union sockaddr_union *, int, int);
155 int			 pfr_route_kentry(struct pfr_ktable *,
156 			    struct pfr_kentry *);
157 int			 pfr_unroute_kentry(struct pfr_ktable *,
158 			    struct pfr_kentry *);
159 int			 pfr_walktree(struct radix_node *, void *);
160 int			 pfr_validate_table(struct pfr_table *, int, int);
161 int			 pfr_fix_anchor(char *);
162 void			 pfr_commit_ktable(struct pfr_ktable *, long);
163 void			 pfr_insert_ktables(struct pfr_ktableworkq *);
164 void			 pfr_insert_ktable(struct pfr_ktable *);
165 void			 pfr_setflags_ktables(struct pfr_ktableworkq *);
166 void			 pfr_setflags_ktable(struct pfr_ktable *, int);
167 void			 pfr_clstats_ktables(struct pfr_ktableworkq *, long,
168 			    int);
169 void			 pfr_clstats_ktable(struct pfr_ktable *, long, int);
170 struct pfr_ktable	*pfr_create_ktable(struct pfr_table *, long, int, int);
171 void			 pfr_destroy_ktables(struct pfr_ktableworkq *, int);
172 void			 pfr_destroy_ktable(struct pfr_ktable *, int);
173 int			 pfr_ktable_compare(struct pfr_ktable *,
174 			    struct pfr_ktable *);
175 struct pfr_ktable	*pfr_lookup_table(struct pfr_table *);
176 void			 pfr_clean_node_mask(struct pfr_ktable *,
177 			    struct pfr_kentryworkq *);
178 int			 pfr_table_count(struct pfr_table *, int);
179 int			 pfr_skip_table(struct pfr_table *,
180 			    struct pfr_ktable *, int);
181 struct pfr_kentry	*pfr_kentry_byidx(struct pfr_ktable *, int, int);
182 
183 RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
184 RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
185 
186 struct pfr_ktablehead	 pfr_ktables;
187 struct pfr_table	 pfr_nulltable;
188 int			 pfr_ktable_cnt;
189 
190 void
191 pfr_initialize(void)
192 {
193 	pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0,
194 	    "pfrktable", NULL);
195 	pool_init(&pfr_kentry_pl[PFRKE_PLAIN], sizeof(struct pfr_kentry),
196 	    0, 0, 0, "pfrke_plain", NULL);
197 	pool_init(&pfr_kentry_pl[PFRKE_ROUTE], sizeof(struct pfr_kentry_route),
198 	    0, 0, 0, "pfrke_route", NULL);
199 
200 	pool_init(&pfr_kcounters_pl, sizeof(struct pfr_kcounters),
201 	    0, 0, 0, "pfrkcounters", NULL);
202 
203 	pfr_sin.sin_len = sizeof(pfr_sin);
204 	pfr_sin.sin_family = AF_INET;
205 	pfr_sin6.sin6_len = sizeof(pfr_sin6);
206 	pfr_sin6.sin6_family = AF_INET6;
207 
208 	memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
209 }
210 
211 int
212 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
213 {
214 	struct pfr_ktable	*kt;
215 	struct pfr_kentryworkq	 workq;
216 	int			 s;
217 
218 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
219 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
220 		return (EINVAL);
221 	kt = pfr_lookup_table(tbl);
222 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
223 		return (ESRCH);
224 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
225 		return (EPERM);
226 	pfr_enqueue_addrs(kt, &workq, ndel, 0);
227 
228 	if (!(flags & PFR_FLAG_DUMMY)) {
229 		if (flags & PFR_FLAG_ATOMIC)
230 			s = splsoftnet();
231 		pfr_remove_kentries(kt, &workq);
232 		if (flags & PFR_FLAG_ATOMIC)
233 			splx(s);
234 		if (kt->pfrkt_cnt) {
235 			printf("pfr_clr_addrs: corruption detected (%d).\n",
236 			    kt->pfrkt_cnt);
237 			kt->pfrkt_cnt = 0;
238 		}
239 	}
240 	return (0);
241 }
242 
243 int
244 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
245     int *nadd, int flags)
246 {
247 	struct pfr_ktable	*kt, *tmpkt;
248 	struct pfr_kentryworkq	 workq;
249 	struct pfr_kentry	*p, *q;
250 	struct pfr_addr		 ad;
251 	int			 i, rv, s, xadd = 0;
252 	long			 tzero = time_second;
253 
254 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
255 	    PFR_FLAG_FEEDBACK);
256 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
257 		return (EINVAL);
258 	kt = pfr_lookup_table(tbl);
259 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
260 		return (ESRCH);
261 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
262 		return (EPERM);
263 	tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0,
264 	    !(flags & PFR_FLAG_USERIOCTL));
265 	if (tmpkt == NULL)
266 		return (ENOMEM);
267 	SLIST_INIT(&workq);
268 	for (i = 0; i < size; i++) {
269 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
270 			senderr(EFAULT);
271 		if (pfr_validate_addr(&ad))
272 			senderr(EINVAL);
273 		p = pfr_lookup_addr(kt, &ad, 1);
274 		q = pfr_lookup_addr(tmpkt, &ad, 1);
275 		if (flags & PFR_FLAG_FEEDBACK) {
276 			if (q != NULL)
277 				ad.pfra_fback = PFR_FB_DUPLICATE;
278 			else if (p == NULL)
279 				ad.pfra_fback = PFR_FB_ADDED;
280 			else if ((p->pfrke_flags & PFRKE_FLAG_NOT) !=
281 			    ad.pfra_not)
282 				ad.pfra_fback = PFR_FB_CONFLICT;
283 			else
284 				ad.pfra_fback = PFR_FB_NONE;
285 		}
286 		if (p == NULL && q == NULL) {
287 			p = pfr_create_kentry(&ad);
288 			if (p == NULL)
289 				senderr(ENOMEM);
290 			if (pfr_route_kentry(tmpkt, p)) {
291 				pfr_destroy_kentry(p);
292 				ad.pfra_fback = PFR_FB_NONE;
293 			} else {
294 				SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
295 				xadd++;
296 			}
297 		}
298 		if (flags & PFR_FLAG_FEEDBACK)
299 			if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
300 				senderr(EFAULT);
301 	}
302 	pfr_clean_node_mask(tmpkt, &workq);
303 	if (!(flags & PFR_FLAG_DUMMY)) {
304 		if (flags & PFR_FLAG_ATOMIC)
305 			s = splsoftnet();
306 		pfr_insert_kentries(kt, &workq, tzero);
307 		if (flags & PFR_FLAG_ATOMIC)
308 			splx(s);
309 	} else
310 		pfr_destroy_kentries(&workq);
311 	if (nadd != NULL)
312 		*nadd = xadd;
313 	pfr_destroy_ktable(tmpkt, 0);
314 	return (0);
315 _bad:
316 	pfr_clean_node_mask(tmpkt, &workq);
317 	pfr_destroy_kentries(&workq);
318 	if (flags & PFR_FLAG_FEEDBACK)
319 		pfr_reset_feedback(addr, size, flags);
320 	pfr_destroy_ktable(tmpkt, 0);
321 	return (rv);
322 }
323 
324 int
325 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
326     int *ndel, int flags)
327 {
328 	struct pfr_ktable	*kt;
329 	struct pfr_kentryworkq	 workq;
330 	struct pfr_kentry	*p;
331 	struct pfr_addr		 ad;
332 	int			 i, rv, s, xdel = 0, log = 1;
333 
334 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
335 	    PFR_FLAG_FEEDBACK);
336 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
337 		return (EINVAL);
338 	kt = pfr_lookup_table(tbl);
339 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
340 		return (ESRCH);
341 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
342 		return (EPERM);
343 	/*
344 	 * there are two algorithms to choose from here.
345 	 * with:
346 	 *   n: number of addresses to delete
347 	 *   N: number of addresses in the table
348 	 *
349 	 * one is O(N) and is better for large 'n'
350 	 * one is O(n*LOG(N)) and is better for small 'n'
351 	 *
352 	 * following code try to decide which one is best.
353 	 */
354 	for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
355 		log++;
356 	if (size > kt->pfrkt_cnt/log) {
357 		/* full table scan */
358 		pfr_mark_addrs(kt);
359 	} else {
360 		/* iterate over addresses to delete */
361 		for (i = 0; i < size; i++) {
362 			if (COPYIN(addr+i, &ad, sizeof(ad), flags))
363 				return (EFAULT);
364 			if (pfr_validate_addr(&ad))
365 				return (EINVAL);
366 			p = pfr_lookup_addr(kt, &ad, 1);
367 			if (p != NULL)
368 				p->pfrke_flags &= ~PFRKE_FLAG_MARK;
369 		}
370 	}
371 	SLIST_INIT(&workq);
372 	for (i = 0; i < size; i++) {
373 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
374 			senderr(EFAULT);
375 		if (pfr_validate_addr(&ad))
376 			senderr(EINVAL);
377 		p = pfr_lookup_addr(kt, &ad, 1);
378 		if (flags & PFR_FLAG_FEEDBACK) {
379 			if (p == NULL)
380 				ad.pfra_fback = PFR_FB_NONE;
381 			else if ((p->pfrke_flags & PFRKE_FLAG_NOT) !=
382 			    ad.pfra_not)
383 				ad.pfra_fback = PFR_FB_CONFLICT;
384 			else if (p->pfrke_flags & PFRKE_FLAG_MARK)
385 				ad.pfra_fback = PFR_FB_DUPLICATE;
386 			else
387 				ad.pfra_fback = PFR_FB_DELETED;
388 		}
389 		if (p != NULL &&
390 		    (p->pfrke_flags & PFRKE_FLAG_NOT) == ad.pfra_not &&
391 		    !(p->pfrke_flags & PFRKE_FLAG_MARK)) {
392 			p->pfrke_flags |= PFRKE_FLAG_MARK;
393 			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
394 			xdel++;
395 		}
396 		if (flags & PFR_FLAG_FEEDBACK)
397 			if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
398 				senderr(EFAULT);
399 	}
400 	if (!(flags & PFR_FLAG_DUMMY)) {
401 		if (flags & PFR_FLAG_ATOMIC)
402 			s = splsoftnet();
403 		pfr_remove_kentries(kt, &workq);
404 		if (flags & PFR_FLAG_ATOMIC)
405 			splx(s);
406 	}
407 	if (ndel != NULL)
408 		*ndel = xdel;
409 	return (0);
410 _bad:
411 	if (flags & PFR_FLAG_FEEDBACK)
412 		pfr_reset_feedback(addr, size, flags);
413 	return (rv);
414 }
415 
416 int
417 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
418     int *size2, int *nadd, int *ndel, int *nchange, int flags,
419     u_int32_t ignore_pfrt_flags)
420 {
421 	struct pfr_ktable	*kt, *tmpkt;
422 	struct pfr_kentryworkq	 addq, delq, changeq;
423 	struct pfr_kentry	*p, *q;
424 	struct pfr_addr		 ad;
425 	int			 i, rv, s, xadd = 0, xdel = 0, xchange = 0;
426 	long			 tzero = time_second;
427 
428 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
429 	    PFR_FLAG_FEEDBACK);
430 	if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
431 	    PFR_FLAG_USERIOCTL))
432 		return (EINVAL);
433 	kt = pfr_lookup_table(tbl);
434 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
435 		return (ESRCH);
436 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
437 		return (EPERM);
438 	tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0,
439 	    !(flags & PFR_FLAG_USERIOCTL));
440 	if (tmpkt == NULL)
441 		return (ENOMEM);
442 	pfr_mark_addrs(kt);
443 	SLIST_INIT(&addq);
444 	SLIST_INIT(&delq);
445 	SLIST_INIT(&changeq);
446 	for (i = 0; i < size; i++) {
447 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
448 			senderr(EFAULT);
449 		if (pfr_validate_addr(&ad))
450 			senderr(EINVAL);
451 		ad.pfra_fback = PFR_FB_NONE;
452 		p = pfr_lookup_addr(kt, &ad, 1);
453 		if (p != NULL) {
454 			if (p->pfrke_flags & PFRKE_FLAG_MARK) {
455 				ad.pfra_fback = PFR_FB_DUPLICATE;
456 				goto _skip;
457 			}
458 			p->pfrke_flags |= PFRKE_FLAG_MARK;
459 			if ((p->pfrke_flags & PFRKE_FLAG_NOT) != ad.pfra_not) {
460 				SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
461 				ad.pfra_fback = PFR_FB_CHANGED;
462 				xchange++;
463 			}
464 		} else {
465 			q = pfr_lookup_addr(tmpkt, &ad, 1);
466 			if (q != NULL) {
467 				ad.pfra_fback = PFR_FB_DUPLICATE;
468 				goto _skip;
469 			}
470 			p = pfr_create_kentry(&ad);
471 			if (p == NULL)
472 				senderr(ENOMEM);
473 			if (pfr_route_kentry(tmpkt, p)) {
474 				pfr_destroy_kentry(p);
475 				ad.pfra_fback = PFR_FB_NONE;
476 			} else {
477 				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
478 				ad.pfra_fback = PFR_FB_ADDED;
479 				xadd++;
480 			}
481 		}
482 _skip:
483 		if (flags & PFR_FLAG_FEEDBACK)
484 			if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
485 				senderr(EFAULT);
486 	}
487 	pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
488 	if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
489 		if (*size2 < size+xdel) {
490 			*size2 = size+xdel;
491 			senderr(0);
492 		}
493 		i = 0;
494 		SLIST_FOREACH(p, &delq, pfrke_workq) {
495 			pfr_copyout_addr(&ad, p);
496 			ad.pfra_fback = PFR_FB_DELETED;
497 			if (COPYOUT(&ad, addr+size+i, sizeof(ad), flags))
498 				senderr(EFAULT);
499 			i++;
500 		}
501 	}
502 	pfr_clean_node_mask(tmpkt, &addq);
503 	if (!(flags & PFR_FLAG_DUMMY)) {
504 		if (flags & PFR_FLAG_ATOMIC)
505 			s = splsoftnet();
506 		pfr_insert_kentries(kt, &addq, tzero);
507 		pfr_remove_kentries(kt, &delq);
508 		pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
509 		if (flags & PFR_FLAG_ATOMIC)
510 			splx(s);
511 	} else
512 		pfr_destroy_kentries(&addq);
513 	if (nadd != NULL)
514 		*nadd = xadd;
515 	if (ndel != NULL)
516 		*ndel = xdel;
517 	if (nchange != NULL)
518 		*nchange = xchange;
519 	if ((flags & PFR_FLAG_FEEDBACK) && size2)
520 		*size2 = size+xdel;
521 	pfr_destroy_ktable(tmpkt, 0);
522 	return (0);
523 _bad:
524 	pfr_clean_node_mask(tmpkt, &addq);
525 	pfr_destroy_kentries(&addq);
526 	if (flags & PFR_FLAG_FEEDBACK)
527 		pfr_reset_feedback(addr, size, flags);
528 	pfr_destroy_ktable(tmpkt, 0);
529 	return (rv);
530 }
531 
532 int
533 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
534 	int *nmatch, int flags)
535 {
536 	struct pfr_ktable	*kt;
537 	struct pfr_kentry	*p;
538 	struct pfr_addr		 ad;
539 	int			 i, xmatch = 0;
540 
541 	ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
542 	if (pfr_validate_table(tbl, 0, 0))
543 		return (EINVAL);
544 	kt = pfr_lookup_table(tbl);
545 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
546 		return (ESRCH);
547 
548 	for (i = 0; i < size; i++) {
549 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
550 			return (EFAULT);
551 		if (pfr_validate_addr(&ad))
552 			return (EINVAL);
553 		if (ADDR_NETWORK(&ad))
554 			return (EINVAL);
555 		p = pfr_lookup_addr(kt, &ad, 0);
556 		if (flags & PFR_FLAG_REPLACE)
557 			pfr_copyout_addr(&ad, p);
558 		ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
559 		    ((p->pfrke_flags & PFRKE_FLAG_NOT) ?
560 		    PFR_FB_NOTMATCH : PFR_FB_MATCH);
561 		if (p != NULL && !(p->pfrke_flags & PFRKE_FLAG_NOT))
562 			xmatch++;
563 		if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
564 			return (EFAULT);
565 	}
566 	if (nmatch != NULL)
567 		*nmatch = xmatch;
568 	return (0);
569 }
570 
571 int
572 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
573 	int flags)
574 {
575 	struct pfr_ktable	*kt;
576 	struct pfr_walktree	 w;
577 	int			 rv;
578 
579 	ACCEPT_FLAGS(flags, 0);
580 	if (pfr_validate_table(tbl, 0, 0))
581 		return (EINVAL);
582 	kt = pfr_lookup_table(tbl);
583 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
584 		return (ESRCH);
585 	if (kt->pfrkt_cnt > *size) {
586 		*size = kt->pfrkt_cnt;
587 		return (0);
588 	}
589 
590 	bzero(&w, sizeof(w));
591 	w.pfrw_op = PFRW_GET_ADDRS;
592 	w.pfrw_addr = addr;
593 	w.pfrw_free = kt->pfrkt_cnt;
594 	w.pfrw_flags = flags;
595 	rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
596 	if (!rv)
597 		rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
598 	if (rv)
599 		return (rv);
600 
601 	if (w.pfrw_free) {
602 		printf("pfr_get_addrs: corruption detected (%d).\n",
603 		    w.pfrw_free);
604 		return (ENOTTY);
605 	}
606 	*size = kt->pfrkt_cnt;
607 	return (0);
608 }
609 
610 int
611 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
612 	int flags)
613 {
614 	struct pfr_ktable	*kt;
615 	struct pfr_walktree	 w;
616 	struct pfr_kentryworkq	 workq;
617 	int			 rv, s;
618 	long			 tzero = time_second;
619 
620 	/* XXX PFR_FLAG_CLSTATS disabled */
621 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC);
622 	if (pfr_validate_table(tbl, 0, 0))
623 		return (EINVAL);
624 	kt = pfr_lookup_table(tbl);
625 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
626 		return (ESRCH);
627 	if (kt->pfrkt_cnt > *size) {
628 		*size = kt->pfrkt_cnt;
629 		return (0);
630 	}
631 
632 	bzero(&w, sizeof(w));
633 	w.pfrw_op = PFRW_GET_ASTATS;
634 	w.pfrw_astats = addr;
635 	w.pfrw_free = kt->pfrkt_cnt;
636 	w.pfrw_flags = flags;
637 	if (flags & PFR_FLAG_ATOMIC)
638 		s = splsoftnet();
639 	rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
640 	if (!rv)
641 		rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
642 	if (!rv && (flags & PFR_FLAG_CLSTATS)) {
643 		pfr_enqueue_addrs(kt, &workq, NULL, 0);
644 		pfr_clstats_kentries(&workq, tzero, 0);
645 	}
646 	if (flags & PFR_FLAG_ATOMIC)
647 		splx(s);
648 	if (rv)
649 		return (rv);
650 
651 	if (w.pfrw_free) {
652 		printf("pfr_get_astats: corruption detected (%d).\n",
653 		    w.pfrw_free);
654 		return (ENOTTY);
655 	}
656 	*size = kt->pfrkt_cnt;
657 	return (0);
658 }
659 
660 int
661 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
662     int *nzero, int flags)
663 {
664 	struct pfr_ktable	*kt;
665 	struct pfr_kentryworkq	 workq;
666 	struct pfr_kentry	*p;
667 	struct pfr_addr		 ad;
668 	int			 i, rv, s, xzero = 0;
669 
670 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
671 	    PFR_FLAG_FEEDBACK);
672 	if (pfr_validate_table(tbl, 0, 0))
673 		return (EINVAL);
674 	kt = pfr_lookup_table(tbl);
675 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
676 		return (ESRCH);
677 	SLIST_INIT(&workq);
678 	for (i = 0; i < size; i++) {
679 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
680 			senderr(EFAULT);
681 		if (pfr_validate_addr(&ad))
682 			senderr(EINVAL);
683 		p = pfr_lookup_addr(kt, &ad, 1);
684 		if (flags & PFR_FLAG_FEEDBACK) {
685 			ad.pfra_fback = (p != NULL) ?
686 			    PFR_FB_CLEARED : PFR_FB_NONE;
687 			if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
688 				senderr(EFAULT);
689 		}
690 		if (p != NULL) {
691 			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
692 			xzero++;
693 		}
694 	}
695 
696 	if (!(flags & PFR_FLAG_DUMMY)) {
697 		if (flags & PFR_FLAG_ATOMIC)
698 			s = splsoftnet();
699 		pfr_clstats_kentries(&workq, 0, 0);
700 		if (flags & PFR_FLAG_ATOMIC)
701 			splx(s);
702 	}
703 	if (nzero != NULL)
704 		*nzero = xzero;
705 	return (0);
706 _bad:
707 	if (flags & PFR_FLAG_FEEDBACK)
708 		pfr_reset_feedback(addr, size, flags);
709 	return (rv);
710 }
711 
712 int
713 pfr_validate_addr(struct pfr_addr *ad)
714 {
715 	int i;
716 
717 	switch (ad->pfra_af) {
718 #ifdef INET
719 	case AF_INET:
720 		if (ad->pfra_net > 32)
721 			return (-1);
722 		break;
723 #endif /* INET */
724 #ifdef INET6
725 	case AF_INET6:
726 		if (ad->pfra_net > 128)
727 			return (-1);
728 		break;
729 #endif /* INET6 */
730 	default:
731 		return (-1);
732 	}
733 	if (ad->pfra_net < 128 &&
734 		(((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
735 			return (-1);
736 	for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
737 		if (((caddr_t)ad)[i])
738 			return (-1);
739 	if (ad->pfra_not && ad->pfra_not != 1)
740 		return (-1);
741 	if (ad->pfra_fback)
742 		return (-1);
743 	return (0);
744 }
745 
746 void
747 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
748 	int *naddr, int sweep)
749 {
750 	struct pfr_walktree	w;
751 
752 	SLIST_INIT(workq);
753 	bzero(&w, sizeof(w));
754 	w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
755 	w.pfrw_workq = workq;
756 	if (kt->pfrkt_ip4 != NULL)
757 		if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
758 			printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
759 	if (kt->pfrkt_ip6 != NULL)
760 		if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
761 			printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
762 	if (naddr != NULL)
763 		*naddr = w.pfrw_cnt;
764 }
765 
766 void
767 pfr_mark_addrs(struct pfr_ktable *kt)
768 {
769 	struct pfr_walktree	w;
770 
771 	bzero(&w, sizeof(w));
772 	w.pfrw_op = PFRW_MARK;
773 	if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
774 		printf("pfr_mark_addrs: IPv4 walktree failed.\n");
775 	if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
776 		printf("pfr_mark_addrs: IPv6 walktree failed.\n");
777 }
778 
779 
780 struct pfr_kentry *
781 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
782 {
783 	union sockaddr_union	 sa, mask;
784 	struct radix_node_head	*head;
785 	struct pfr_kentry	*ke;
786 	int			 s;
787 
788 	bzero(&sa, sizeof(sa));
789 	if (ad->pfra_af == AF_INET) {
790 		FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
791 		head = kt->pfrkt_ip4;
792 	} else if ( ad->pfra_af == AF_INET6 ) {
793 		FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
794 		head = kt->pfrkt_ip6;
795 	}
796 	if (ADDR_NETWORK(ad)) {
797 		pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
798 		s = splsoftnet(); /* rn_lookup makes use of globals */
799 		ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
800 		splx(s);
801 		if (ke && KENTRY_RNF_ROOT(ke))
802 			ke = NULL;
803 	} else {
804 		ke = (struct pfr_kentry *)rn_match(&sa, head);
805 		if (ke && KENTRY_RNF_ROOT(ke))
806 			ke = NULL;
807 		if (exact && ke && KENTRY_NETWORK(ke))
808 			ke = NULL;
809 	}
810 	return (ke);
811 }
812 
813 struct pfr_kentry *
814 pfr_create_kentry(struct pfr_addr *ad)
815 {
816 	struct pfr_kentry_all	*ke;
817 
818 	ke = pool_get(&pfr_kentry_pl[ad->pfra_type], PR_NOWAIT | PR_ZERO);
819 	if (ke == NULL)
820 		return (NULL);
821 
822 	ke->pfrke_type = ad->pfra_type;
823 
824 	switch (ke->pfrke_type) {
825 	case PFRKE_PLAIN:
826 		break;
827 	case PFRKE_ROUTE:
828 		ke->pfrke_rkif = pfi_kif_get(ad->pfra_ifname);
829 		if (ke->pfrke_rkif)
830 			pfi_kif_ref(ke->pfrke_rkif, PFI_KIF_REF_ROUTE);
831 		break;
832 	default:
833 		panic("unknown pfrke_type %d\n", ke->pfrke_type);
834 		break;
835 	}
836 
837 	if (ad->pfra_af == AF_INET)
838 		FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
839 	else if (ad->pfra_af == AF_INET6)
840 		FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
841 	ke->pfrke_af = ad->pfra_af;
842 	ke->pfrke_net = ad->pfra_net;
843 	if (ad->pfra_not)
844 		ke->pfrke_flags |= PFRKE_FLAG_NOT;
845 	return ((struct pfr_kentry *)ke);
846 }
847 
848 void
849 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
850 {
851 	struct pfr_kentry	*p, *q;
852 
853 	for (p = SLIST_FIRST(workq); p != NULL; p = q) {
854 		q = SLIST_NEXT(p, pfrke_workq);
855 		pfr_destroy_kentry(p);
856 	}
857 }
858 
859 void
860 pfr_destroy_kentry(struct pfr_kentry *ke)
861 {
862 	if (ke->pfrke_counters)
863 		pool_put(&pfr_kcounters_pl, ke->pfrke_counters);
864 	pool_put(&pfr_kentry_pl[ke->pfrke_type], ke);
865 }
866 
867 void
868 pfr_insert_kentries(struct pfr_ktable *kt,
869     struct pfr_kentryworkq *workq, long tzero)
870 {
871 	struct pfr_kentry	*p;
872 	int			 rv, n = 0;
873 
874 	SLIST_FOREACH(p, workq, pfrke_workq) {
875 		rv = pfr_route_kentry(kt, p);
876 		if (rv) {
877 			printf("pfr_insert_kentries: cannot route entry "
878 			    "(code=%d).\n", rv);
879 			break;
880 		}
881 		p->pfrke_tzero = tzero;
882 		n++;
883 	}
884 	kt->pfrkt_cnt += n;
885 }
886 
887 int
888 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
889 {
890 	struct pfr_kentry	*p;
891 	int			 rv;
892 
893 	p = pfr_lookup_addr(kt, ad, 1);
894 	if (p != NULL)
895 		return (0);
896 	p = pfr_create_kentry(ad);
897 	if (p == NULL)
898 		return (EINVAL);
899 
900 	rv = pfr_route_kentry(kt, p);
901 	if (rv)
902 		return (rv);
903 
904 	p->pfrke_tzero = tzero;
905 	kt->pfrkt_cnt++;
906 
907 	return (0);
908 }
909 
910 void
911 pfr_remove_kentries(struct pfr_ktable *kt,
912     struct pfr_kentryworkq *workq)
913 {
914 	struct pfr_kentry	*p;
915 	int			 n = 0;
916 
917 	SLIST_FOREACH(p, workq, pfrke_workq) {
918 		pfr_unroute_kentry(kt, p);
919 		n++;
920 	}
921 	kt->pfrkt_cnt -= n;
922 	pfr_destroy_kentries(workq);
923 }
924 
925 void
926 pfr_clean_node_mask(struct pfr_ktable *kt,
927     struct pfr_kentryworkq *workq)
928 {
929 	struct pfr_kentry	*p;
930 
931 	SLIST_FOREACH(p, workq, pfrke_workq)
932 		pfr_unroute_kentry(kt, p);
933 }
934 
935 void
936 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
937 {
938 	struct pfr_kentry	*p;
939 	int			 s;
940 
941 	SLIST_FOREACH(p, workq, pfrke_workq) {
942 		s = splsoftnet();
943 		if (negchange)
944 			p->pfrke_flags ^= p->pfrke_flags & PFRKE_FLAG_NOT;
945 		if (p->pfrke_counters) {
946 			pool_put(&pfr_kcounters_pl, p->pfrke_counters);
947 			p->pfrke_counters = NULL;
948 		}
949 		splx(s);
950 		p->pfrke_tzero = tzero;
951 	}
952 }
953 
954 void
955 pfr_reset_feedback(struct pfr_addr *addr, int size, int flags)
956 {
957 	struct pfr_addr	ad;
958 	int		i;
959 
960 	for (i = 0; i < size; i++) {
961 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
962 			break;
963 		ad.pfra_fback = PFR_FB_NONE;
964 		if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
965 			break;
966 	}
967 }
968 
969 void
970 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
971 {
972 	int	i;
973 
974 	bzero(sa, sizeof(*sa));
975 	if (af == AF_INET) {
976 		sa->sin.sin_len = sizeof(sa->sin);
977 		sa->sin.sin_family = AF_INET;
978 		sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
979 	} else if (af == AF_INET6) {
980 		sa->sin6.sin6_len = sizeof(sa->sin6);
981 		sa->sin6.sin6_family = AF_INET6;
982 		for (i = 0; i < 4; i++) {
983 			if (net <= 32) {
984 				sa->sin6.sin6_addr.s6_addr32[i] =
985 				    net ? htonl(-1 << (32-net)) : 0;
986 				break;
987 			}
988 			sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
989 			net -= 32;
990 		}
991 	}
992 }
993 
994 int
995 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
996 {
997 	union sockaddr_union	 mask;
998 	struct radix_node	*rn;
999 	struct radix_node_head	*head;
1000 	int			 s;
1001 
1002 	bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
1003 	if (ke->pfrke_af == AF_INET)
1004 		head = kt->pfrkt_ip4;
1005 	else if (ke->pfrke_af == AF_INET6)
1006 		head = kt->pfrkt_ip6;
1007 
1008 	s = splsoftnet();
1009 	if (KENTRY_NETWORK(ke)) {
1010 		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1011 		rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node, 0);
1012 	} else
1013 		rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node, 0);
1014 	splx(s);
1015 
1016 	return (rn == NULL ? -1 : 0);
1017 }
1018 
1019 int
1020 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1021 {
1022 	union sockaddr_union	 mask;
1023 	struct radix_node	*rn;
1024 	struct radix_node_head	*head;
1025 	int			 s;
1026 
1027 	if (ke->pfrke_af == AF_INET)
1028 		head = kt->pfrkt_ip4;
1029 	else if (ke->pfrke_af == AF_INET6)
1030 		head = kt->pfrkt_ip6;
1031 
1032 	s = splsoftnet();
1033 	if (KENTRY_NETWORK(ke)) {
1034 		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1035 		rn = rn_delete(&ke->pfrke_sa, &mask, head, NULL);
1036 	} else
1037 		rn = rn_delete(&ke->pfrke_sa, NULL, head, NULL);
1038 	splx(s);
1039 
1040 	if (rn == NULL) {
1041 		printf("pfr_unroute_kentry: delete failed.\n");
1042 		return (-1);
1043 	}
1044 	return (0);
1045 }
1046 
1047 void
1048 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
1049 {
1050 	bzero(ad, sizeof(*ad));
1051 	if (ke == NULL)
1052 		return;
1053 	ad->pfra_af = ke->pfrke_af;
1054 	ad->pfra_net = ke->pfrke_net;
1055 	if (ke->pfrke_flags & PFRKE_FLAG_NOT)
1056 		ad->pfra_not = 1;
1057 	if (ad->pfra_af == AF_INET)
1058 		ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1059 	else if (ad->pfra_af == AF_INET6)
1060 		ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1061 	if (ke->pfrke_type == PFRKE_ROUTE &&
1062 	    ((struct pfr_kentry_route *)ke)->kif != NULL)
1063 		strlcpy(ad->pfra_ifname,
1064 		    ((struct pfr_kentry_route *)ke)->kif->pfik_name,
1065 		    IFNAMSIZ);
1066 }
1067 
1068 int
1069 pfr_walktree(struct radix_node *rn, void *arg)
1070 {
1071 	struct pfr_kentry	*ke = (struct pfr_kentry *)rn;
1072 	struct pfr_walktree	*w = arg;
1073 	int			 s, flags = w->pfrw_flags;
1074 
1075 	switch (w->pfrw_op) {
1076 	case PFRW_MARK:
1077 		ke->pfrke_flags &= ~PFRKE_FLAG_MARK;
1078 		break;
1079 	case PFRW_SWEEP:
1080 		if (ke->pfrke_flags & PFRKE_FLAG_MARK)
1081 			break;
1082 		/* FALLTHROUGH */
1083 	case PFRW_ENQUEUE:
1084 		SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1085 		w->pfrw_cnt++;
1086 		break;
1087 	case PFRW_GET_ADDRS:
1088 		if (w->pfrw_free-- > 0) {
1089 			struct pfr_addr ad;
1090 
1091 			pfr_copyout_addr(&ad, ke);
1092 			if (copyout(&ad, w->pfrw_addr, sizeof(ad)))
1093 				return (EFAULT);
1094 			w->pfrw_addr++;
1095 		}
1096 		break;
1097 	case PFRW_GET_ASTATS:
1098 		if (w->pfrw_free-- > 0) {
1099 			struct pfr_astats as;
1100 
1101 			pfr_copyout_addr(&as.pfras_a, ke);
1102 
1103 			s = splsoftnet();
1104 			if (ke->pfrke_counters) {
1105 				bcopy(ke->pfrke_counters->pfrkc_packets,
1106 				    as.pfras_packets, sizeof(as.pfras_packets));
1107 				bcopy(ke->pfrke_counters->pfrkc_bytes,
1108 				    as.pfras_bytes, sizeof(as.pfras_bytes));
1109 			} else {
1110 				bzero(as.pfras_packets, sizeof(as.pfras_packets));
1111 				bzero(as.pfras_bytes, sizeof(as.pfras_bytes));
1112 				as.pfras_a.pfra_fback = PFR_FB_NOCOUNT;
1113 			}
1114 			splx(s);
1115 			as.pfras_tzero = ke->pfrke_tzero;
1116 
1117 			if (COPYOUT(&as, w->pfrw_astats, sizeof(as), flags))
1118 				return (EFAULT);
1119 			w->pfrw_astats++;
1120 		}
1121 		break;
1122 	case PFRW_POOL_GET:
1123 		if (ke->pfrke_flags & PFRKE_FLAG_NOT)
1124 			break; /* negative entries are ignored */
1125 		if (!w->pfrw_cnt--) {
1126 			w->pfrw_kentry = ke;
1127 			return (1); /* finish search */
1128 		}
1129 		break;
1130 	case PFRW_DYNADDR_UPDATE:
1131 		if (ke->pfrke_af == AF_INET) {
1132 			if (w->pfrw_dyn->pfid_acnt4++ > 0)
1133 				break;
1134 			pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1135 			w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
1136 			    &ke->pfrke_sa, AF_INET);
1137 			w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
1138 			    &pfr_mask, AF_INET);
1139 		} else if (ke->pfrke_af == AF_INET6){
1140 			if (w->pfrw_dyn->pfid_acnt6++ > 0)
1141 				break;
1142 			pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1143 			w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
1144 			    &ke->pfrke_sa, AF_INET6);
1145 			w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
1146 			    &pfr_mask, AF_INET6);
1147 		}
1148 		break;
1149 	}
1150 	return (0);
1151 }
1152 
1153 int
1154 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1155 {
1156 	struct pfr_ktableworkq	 workq;
1157 	struct pfr_ktable	*p;
1158 	int			 s, xdel = 0;
1159 
1160 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1161 	    PFR_FLAG_ALLRSETS);
1162 	if (pfr_fix_anchor(filter->pfrt_anchor))
1163 		return (EINVAL);
1164 	if (pfr_table_count(filter, flags) < 0)
1165 		return (ENOENT);
1166 
1167 	SLIST_INIT(&workq);
1168 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1169 		if (pfr_skip_table(filter, p, flags))
1170 			continue;
1171 		if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1172 			continue;
1173 		if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1174 			continue;
1175 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1176 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1177 		xdel++;
1178 	}
1179 	if (!(flags & PFR_FLAG_DUMMY)) {
1180 		if (flags & PFR_FLAG_ATOMIC)
1181 			s = splsoftnet();
1182 		pfr_setflags_ktables(&workq);
1183 		if (flags & PFR_FLAG_ATOMIC)
1184 			splx(s);
1185 	}
1186 	if (ndel != NULL)
1187 		*ndel = xdel;
1188 	return (0);
1189 }
1190 
1191 int
1192 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1193 {
1194 	struct pfr_ktableworkq	 addq, changeq;
1195 	struct pfr_ktable	*p, *q, *r, key;
1196 	int			 i, rv, s, xadd = 0;
1197 	long			 tzero = time_second;
1198 
1199 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1200 	SLIST_INIT(&addq);
1201 	SLIST_INIT(&changeq);
1202 	for (i = 0; i < size; i++) {
1203 		if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1204 			senderr(EFAULT);
1205 		if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1206 		    flags & PFR_FLAG_USERIOCTL))
1207 			senderr(EINVAL);
1208 		key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1209 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1210 		if (p == NULL) {
1211 			p = pfr_create_ktable(&key.pfrkt_t, tzero, 1,
1212 			    !(flags & PFR_FLAG_USERIOCTL));
1213 			if (p == NULL)
1214 				senderr(ENOMEM);
1215 			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1216 				if (!pfr_ktable_compare(p, q))
1217 					goto _skip;
1218 			}
1219 			SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1220 			xadd++;
1221 			if (!key.pfrkt_anchor[0])
1222 				goto _skip;
1223 
1224 			/* find or create root table */
1225 			bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1226 			r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1227 			if (r != NULL) {
1228 				p->pfrkt_root = r;
1229 				goto _skip;
1230 			}
1231 			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1232 				if (!pfr_ktable_compare(&key, q)) {
1233 					p->pfrkt_root = q;
1234 					goto _skip;
1235 				}
1236 			}
1237 			key.pfrkt_flags = 0;
1238 			r = pfr_create_ktable(&key.pfrkt_t, 0, 1,
1239 			    !(flags & PFR_FLAG_USERIOCTL));
1240 			if (r == NULL)
1241 				senderr(ENOMEM);
1242 			SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1243 			p->pfrkt_root = r;
1244 		} else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1245 			SLIST_FOREACH(q, &changeq, pfrkt_workq)
1246 				if (!pfr_ktable_compare(&key, q))
1247 					goto _skip;
1248 			p->pfrkt_nflags = (p->pfrkt_flags &
1249 			    ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1250 			SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1251 			xadd++;
1252 		}
1253 _skip:
1254 	;
1255 	}
1256 	if (!(flags & PFR_FLAG_DUMMY)) {
1257 		if (flags & PFR_FLAG_ATOMIC)
1258 			s = splsoftnet();
1259 		pfr_insert_ktables(&addq);
1260 		pfr_setflags_ktables(&changeq);
1261 		if (flags & PFR_FLAG_ATOMIC)
1262 			splx(s);
1263 	} else
1264 		 pfr_destroy_ktables(&addq, 0);
1265 	if (nadd != NULL)
1266 		*nadd = xadd;
1267 	return (0);
1268 _bad:
1269 	pfr_destroy_ktables(&addq, 0);
1270 	return (rv);
1271 }
1272 
1273 int
1274 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1275 {
1276 	struct pfr_ktableworkq	 workq;
1277 	struct pfr_ktable	*p, *q, key;
1278 	int			 i, s, xdel = 0;
1279 
1280 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1281 	SLIST_INIT(&workq);
1282 	for (i = 0; i < size; i++) {
1283 		if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1284 			return (EFAULT);
1285 		if (pfr_validate_table(&key.pfrkt_t, 0,
1286 		    flags & PFR_FLAG_USERIOCTL))
1287 			return (EINVAL);
1288 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1289 		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1290 			SLIST_FOREACH(q, &workq, pfrkt_workq)
1291 				if (!pfr_ktable_compare(p, q))
1292 					goto _skip;
1293 			p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1294 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1295 			xdel++;
1296 		}
1297 _skip:
1298 	;
1299 	}
1300 
1301 	if (!(flags & PFR_FLAG_DUMMY)) {
1302 		if (flags & PFR_FLAG_ATOMIC)
1303 			s = splsoftnet();
1304 		pfr_setflags_ktables(&workq);
1305 		if (flags & PFR_FLAG_ATOMIC)
1306 			splx(s);
1307 	}
1308 	if (ndel != NULL)
1309 		*ndel = xdel;
1310 	return (0);
1311 }
1312 
1313 int
1314 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1315 	int flags)
1316 {
1317 	struct pfr_ktable	*p;
1318 	int			 n, nn;
1319 
1320 	ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1321 	if (pfr_fix_anchor(filter->pfrt_anchor))
1322 		return (EINVAL);
1323 	n = nn = pfr_table_count(filter, flags);
1324 	if (n < 0)
1325 		return (ENOENT);
1326 	if (n > *size) {
1327 		*size = n;
1328 		return (0);
1329 	}
1330 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1331 		if (pfr_skip_table(filter, p, flags))
1332 			continue;
1333 		if (n-- <= 0)
1334 			continue;
1335 		if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl), flags))
1336 			return (EFAULT);
1337 	}
1338 	if (n) {
1339 		printf("pfr_get_tables: corruption detected (%d).\n", n);
1340 		return (ENOTTY);
1341 	}
1342 	*size = nn;
1343 	return (0);
1344 }
1345 
1346 int
1347 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1348 	int flags)
1349 {
1350 	struct pfr_ktable	*p;
1351 	struct pfr_ktableworkq	 workq;
1352 	int			 s, n, nn;
1353 	long			 tzero = time_second;
1354 
1355 	/* XXX PFR_FLAG_CLSTATS disabled */
1356 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_ALLRSETS);
1357 	if (pfr_fix_anchor(filter->pfrt_anchor))
1358 		return (EINVAL);
1359 	n = nn = pfr_table_count(filter, flags);
1360 	if (n < 0)
1361 		return (ENOENT);
1362 	if (n > *size) {
1363 		*size = n;
1364 		return (0);
1365 	}
1366 	SLIST_INIT(&workq);
1367 	if (flags & PFR_FLAG_ATOMIC)
1368 		s = splsoftnet();
1369 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1370 		if (pfr_skip_table(filter, p, flags))
1371 			continue;
1372 		if (n-- <= 0)
1373 			continue;
1374 		if (!(flags & PFR_FLAG_ATOMIC))
1375 			s = splsoftnet();
1376 		if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl), flags)) {
1377 			splx(s);
1378 			return (EFAULT);
1379 		}
1380 		if (!(flags & PFR_FLAG_ATOMIC))
1381 			splx(s);
1382 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1383 	}
1384 	if (flags & PFR_FLAG_CLSTATS)
1385 		pfr_clstats_ktables(&workq, tzero,
1386 		    flags & PFR_FLAG_ADDRSTOO);
1387 	if (flags & PFR_FLAG_ATOMIC)
1388 		splx(s);
1389 	if (n) {
1390 		printf("pfr_get_tstats: corruption detected (%d).\n", n);
1391 		return (ENOTTY);
1392 	}
1393 	*size = nn;
1394 	return (0);
1395 }
1396 
1397 int
1398 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1399 {
1400 	struct pfr_ktableworkq	 workq;
1401 	struct pfr_ktable	*p, key;
1402 	int			 i, s, xzero = 0;
1403 	long			 tzero = time_second;
1404 
1405 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1406 	    PFR_FLAG_ADDRSTOO);
1407 	SLIST_INIT(&workq);
1408 	for (i = 0; i < size; i++) {
1409 		if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1410 			return (EFAULT);
1411 		if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1412 			return (EINVAL);
1413 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1414 		if (p != NULL) {
1415 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1416 			xzero++;
1417 		}
1418 	}
1419 	if (!(flags & PFR_FLAG_DUMMY)) {
1420 		if (flags & PFR_FLAG_ATOMIC)
1421 			s = splsoftnet();
1422 		pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1423 		if (flags & PFR_FLAG_ATOMIC)
1424 			splx(s);
1425 	}
1426 	if (nzero != NULL)
1427 		*nzero = xzero;
1428 	return (0);
1429 }
1430 
1431 int
1432 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1433 	int *nchange, int *ndel, int flags)
1434 {
1435 	struct pfr_ktableworkq	 workq;
1436 	struct pfr_ktable	*p, *q, key;
1437 	int			 i, s, xchange = 0, xdel = 0;
1438 
1439 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1440 	if ((setflag & ~PFR_TFLAG_USRMASK) ||
1441 	    (clrflag & ~PFR_TFLAG_USRMASK) ||
1442 	    (setflag & clrflag))
1443 		return (EINVAL);
1444 	SLIST_INIT(&workq);
1445 	for (i = 0; i < size; i++) {
1446 		if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1447 			return (EFAULT);
1448 		if (pfr_validate_table(&key.pfrkt_t, 0,
1449 		    flags & PFR_FLAG_USERIOCTL))
1450 			return (EINVAL);
1451 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1452 		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1453 			p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1454 			    ~clrflag;
1455 			if (p->pfrkt_nflags == p->pfrkt_flags)
1456 				goto _skip;
1457 			SLIST_FOREACH(q, &workq, pfrkt_workq)
1458 				if (!pfr_ktable_compare(p, q))
1459 					goto _skip;
1460 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1461 			if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1462 			    (clrflag & PFR_TFLAG_PERSIST) &&
1463 			    !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1464 				xdel++;
1465 			else
1466 				xchange++;
1467 		}
1468 _skip:
1469 	;
1470 	}
1471 	if (!(flags & PFR_FLAG_DUMMY)) {
1472 		if (flags & PFR_FLAG_ATOMIC)
1473 			s = splsoftnet();
1474 		pfr_setflags_ktables(&workq);
1475 		if (flags & PFR_FLAG_ATOMIC)
1476 			splx(s);
1477 	}
1478 	if (nchange != NULL)
1479 		*nchange = xchange;
1480 	if (ndel != NULL)
1481 		*ndel = xdel;
1482 	return (0);
1483 }
1484 
1485 int
1486 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1487 {
1488 	struct pfr_ktableworkq	 workq;
1489 	struct pfr_ktable	*p;
1490 	struct pf_ruleset	*rs;
1491 	int			 xdel = 0;
1492 
1493 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1494 	rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1495 	if (rs == NULL)
1496 		return (ENOMEM);
1497 	SLIST_INIT(&workq);
1498 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1499 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1500 		    pfr_skip_table(trs, p, 0))
1501 			continue;
1502 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1503 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1504 		xdel++;
1505 	}
1506 	if (!(flags & PFR_FLAG_DUMMY)) {
1507 		pfr_setflags_ktables(&workq);
1508 		if (ticket != NULL)
1509 			*ticket = ++rs->tticket;
1510 		rs->topen = 1;
1511 	} else
1512 		pf_remove_if_empty_ruleset(rs);
1513 	if (ndel != NULL)
1514 		*ndel = xdel;
1515 	return (0);
1516 }
1517 
1518 int
1519 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1520     int *nadd, int *naddr, u_int32_t ticket, int flags)
1521 {
1522 	struct pfr_ktableworkq	 tableq;
1523 	struct pfr_kentryworkq	 addrq;
1524 	struct pfr_ktable	*kt, *rt, *shadow, key;
1525 	struct pfr_kentry	*p;
1526 	struct pfr_addr		 ad;
1527 	struct pf_ruleset	*rs;
1528 	int			 i, rv, xadd = 0, xaddr = 0;
1529 
1530 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1531 	if (size && !(flags & PFR_FLAG_ADDRSTOO))
1532 		return (EINVAL);
1533 	if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1534 	    flags & PFR_FLAG_USERIOCTL))
1535 		return (EINVAL);
1536 	rs = pf_find_ruleset(tbl->pfrt_anchor);
1537 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1538 		return (EBUSY);
1539 	tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1540 	SLIST_INIT(&tableq);
1541 	kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1542 	if (kt == NULL) {
1543 		kt = pfr_create_ktable(tbl, 0, 1,
1544 		    !(flags & PFR_FLAG_USERIOCTL));
1545 		if (kt == NULL)
1546 			return (ENOMEM);
1547 		SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1548 		xadd++;
1549 		if (!tbl->pfrt_anchor[0])
1550 			goto _skip;
1551 
1552 		/* find or create root table */
1553 		bzero(&key, sizeof(key));
1554 		strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1555 		rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1556 		if (rt != NULL) {
1557 			kt->pfrkt_root = rt;
1558 			goto _skip;
1559 		}
1560 		rt = pfr_create_ktable(&key.pfrkt_t, 0, 1,
1561 		    !(flags & PFR_FLAG_USERIOCTL));
1562 		if (rt == NULL) {
1563 			pfr_destroy_ktables(&tableq, 0);
1564 			return (ENOMEM);
1565 		}
1566 		SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1567 		kt->pfrkt_root = rt;
1568 	} else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1569 		xadd++;
1570 _skip:
1571 	shadow = pfr_create_ktable(tbl, 0, 0, !(flags & PFR_FLAG_USERIOCTL));
1572 	if (shadow == NULL) {
1573 		pfr_destroy_ktables(&tableq, 0);
1574 		return (ENOMEM);
1575 	}
1576 	SLIST_INIT(&addrq);
1577 	for (i = 0; i < size; i++) {
1578 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
1579 			senderr(EFAULT);
1580 		if (pfr_validate_addr(&ad))
1581 			senderr(EINVAL);
1582 		if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
1583 			continue;
1584 		p = pfr_create_kentry(&ad);
1585 		if (p == NULL)
1586 			senderr(ENOMEM);
1587 		if (pfr_route_kentry(shadow, p)) {
1588 			pfr_destroy_kentry(p);
1589 			continue;
1590 		}
1591 		SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1592 		xaddr++;
1593 	}
1594 	if (!(flags & PFR_FLAG_DUMMY)) {
1595 		if (kt->pfrkt_shadow != NULL)
1596 			pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1597 		kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1598 		pfr_insert_ktables(&tableq);
1599 		shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1600 		    xaddr : NO_ADDRESSES;
1601 		kt->pfrkt_shadow = shadow;
1602 	} else {
1603 		pfr_clean_node_mask(shadow, &addrq);
1604 		pfr_destroy_ktable(shadow, 0);
1605 		pfr_destroy_ktables(&tableq, 0);
1606 		pfr_destroy_kentries(&addrq);
1607 	}
1608 	if (nadd != NULL)
1609 		*nadd = xadd;
1610 	if (naddr != NULL)
1611 		*naddr = xaddr;
1612 	return (0);
1613 _bad:
1614 	pfr_destroy_ktable(shadow, 0);
1615 	pfr_destroy_ktables(&tableq, 0);
1616 	pfr_destroy_kentries(&addrq);
1617 	return (rv);
1618 }
1619 
1620 int
1621 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1622 {
1623 	struct pfr_ktableworkq	 workq;
1624 	struct pfr_ktable	*p;
1625 	struct pf_ruleset	*rs;
1626 	int			 xdel = 0;
1627 
1628 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1629 	rs = pf_find_ruleset(trs->pfrt_anchor);
1630 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1631 		return (0);
1632 	SLIST_INIT(&workq);
1633 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1634 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1635 		    pfr_skip_table(trs, p, 0))
1636 			continue;
1637 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1638 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1639 		xdel++;
1640 	}
1641 	if (!(flags & PFR_FLAG_DUMMY)) {
1642 		pfr_setflags_ktables(&workq);
1643 		rs->topen = 0;
1644 		pf_remove_if_empty_ruleset(rs);
1645 	}
1646 	if (ndel != NULL)
1647 		*ndel = xdel;
1648 	return (0);
1649 }
1650 
1651 int
1652 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1653     int *nchange, int flags)
1654 {
1655 	struct pfr_ktable	*p, *q;
1656 	struct pfr_ktableworkq	 workq;
1657 	struct pf_ruleset	*rs;
1658 	int			 s, xadd = 0, xchange = 0;
1659 	long			 tzero = time_second;
1660 
1661 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1662 	rs = pf_find_ruleset(trs->pfrt_anchor);
1663 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1664 		return (EBUSY);
1665 
1666 	SLIST_INIT(&workq);
1667 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1668 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1669 		    pfr_skip_table(trs, p, 0))
1670 			continue;
1671 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1672 		if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1673 			xchange++;
1674 		else
1675 			xadd++;
1676 	}
1677 
1678 	if (!(flags & PFR_FLAG_DUMMY)) {
1679 		if (flags & PFR_FLAG_ATOMIC)
1680 			s = splsoftnet();
1681 		for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1682 			q = SLIST_NEXT(p, pfrkt_workq);
1683 			pfr_commit_ktable(p, tzero);
1684 		}
1685 		if (flags & PFR_FLAG_ATOMIC)
1686 			splx(s);
1687 		rs->topen = 0;
1688 		pf_remove_if_empty_ruleset(rs);
1689 	}
1690 	if (nadd != NULL)
1691 		*nadd = xadd;
1692 	if (nchange != NULL)
1693 		*nchange = xchange;
1694 
1695 	return (0);
1696 }
1697 
1698 void
1699 pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1700 {
1701 	struct pfr_ktable	*shadow = kt->pfrkt_shadow;
1702 	int			 nflags;
1703 
1704 	if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1705 		if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1706 			pfr_clstats_ktable(kt, tzero, 1);
1707 	} else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1708 		/* kt might contain addresses */
1709 		struct pfr_kentryworkq	 addrq, addq, changeq, delq, garbageq;
1710 		struct pfr_kentry	*p, *q, *next;
1711 		struct pfr_addr		 ad;
1712 
1713 		pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1714 		pfr_mark_addrs(kt);
1715 		SLIST_INIT(&addq);
1716 		SLIST_INIT(&changeq);
1717 		SLIST_INIT(&delq);
1718 		SLIST_INIT(&garbageq);
1719 		pfr_clean_node_mask(shadow, &addrq);
1720 		for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1721 			next = SLIST_NEXT(p, pfrke_workq);	/* XXX */
1722 			pfr_copyout_addr(&ad, p);
1723 			q = pfr_lookup_addr(kt, &ad, 1);
1724 			if (q != NULL) {
1725 				if ((q->pfrke_flags & PFRKE_FLAG_NOT) !=
1726 				   (p->pfrke_flags & PFRKE_FLAG_NOT))
1727 					SLIST_INSERT_HEAD(&changeq, q,
1728 					    pfrke_workq);
1729 				q->pfrke_flags |= PFRKE_FLAG_MARK;
1730 				SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1731 			} else {
1732 				p->pfrke_tzero = tzero;
1733 				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1734 			}
1735 		}
1736 		pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1737 		pfr_insert_kentries(kt, &addq, tzero);
1738 		pfr_remove_kentries(kt, &delq);
1739 		pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1740 		pfr_destroy_kentries(&garbageq);
1741 	} else {
1742 		/* kt cannot contain addresses */
1743 		SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1744 		    shadow->pfrkt_ip4);
1745 		SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1746 		    shadow->pfrkt_ip6);
1747 		SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1748 		pfr_clstats_ktable(kt, tzero, 1);
1749 	}
1750 	nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1751 	    (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1752 		& ~PFR_TFLAG_INACTIVE;
1753 	pfr_destroy_ktable(shadow, 0);
1754 	kt->pfrkt_shadow = NULL;
1755 	pfr_setflags_ktable(kt, nflags);
1756 }
1757 
1758 int
1759 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1760 {
1761 	int i;
1762 
1763 	if (!tbl->pfrt_name[0])
1764 		return (-1);
1765 	if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1766 		 return (-1);
1767 	if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1768 		return (-1);
1769 	for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1770 		if (tbl->pfrt_name[i])
1771 			return (-1);
1772 	if (pfr_fix_anchor(tbl->pfrt_anchor))
1773 		return (-1);
1774 	if (tbl->pfrt_flags & ~allowedflags)
1775 		return (-1);
1776 	return (0);
1777 }
1778 
1779 /*
1780  * Rewrite anchors referenced by tables to remove slashes
1781  * and check for validity.
1782  */
1783 int
1784 pfr_fix_anchor(char *anchor)
1785 {
1786 	size_t siz = MAXPATHLEN;
1787 	int i;
1788 
1789 	if (anchor[0] == '/') {
1790 		char *path;
1791 		int off;
1792 
1793 		path = anchor;
1794 		off = 1;
1795 		while (*++path == '/')
1796 			off++;
1797 		bcopy(path, anchor, siz - off);
1798 		memset(anchor + siz - off, 0, off);
1799 	}
1800 	if (anchor[siz - 1])
1801 		return (-1);
1802 	for (i = strlen(anchor); i < siz; i++)
1803 		if (anchor[i])
1804 			return (-1);
1805 	return (0);
1806 }
1807 
1808 int
1809 pfr_table_count(struct pfr_table *filter, int flags)
1810 {
1811 	struct pf_ruleset *rs;
1812 
1813 	if (flags & PFR_FLAG_ALLRSETS)
1814 		return (pfr_ktable_cnt);
1815 	if (filter->pfrt_anchor[0]) {
1816 		rs = pf_find_ruleset(filter->pfrt_anchor);
1817 		return ((rs != NULL) ? rs->tables : -1);
1818 	}
1819 	return (pf_main_ruleset.tables);
1820 }
1821 
1822 int
1823 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1824 {
1825 	if (flags & PFR_FLAG_ALLRSETS)
1826 		return (0);
1827 	if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1828 		return (1);
1829 	return (0);
1830 }
1831 
1832 void
1833 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1834 {
1835 	struct pfr_ktable	*p;
1836 
1837 	SLIST_FOREACH(p, workq, pfrkt_workq)
1838 		pfr_insert_ktable(p);
1839 }
1840 
1841 void
1842 pfr_insert_ktable(struct pfr_ktable *kt)
1843 {
1844 	RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1845 	pfr_ktable_cnt++;
1846 	if (kt->pfrkt_root != NULL)
1847 		if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1848 			pfr_setflags_ktable(kt->pfrkt_root,
1849 			    kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1850 }
1851 
1852 void
1853 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1854 {
1855 	struct pfr_ktable	*p, *q;
1856 
1857 	for (p = SLIST_FIRST(workq); p; p = q) {
1858 		q = SLIST_NEXT(p, pfrkt_workq);
1859 		pfr_setflags_ktable(p, p->pfrkt_nflags);
1860 	}
1861 }
1862 
1863 void
1864 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1865 {
1866 	struct pfr_kentryworkq	addrq;
1867 
1868 	if (!(newf & PFR_TFLAG_REFERENCED) &&
1869 	    !(newf & PFR_TFLAG_PERSIST))
1870 		newf &= ~PFR_TFLAG_ACTIVE;
1871 	if (!(newf & PFR_TFLAG_ACTIVE))
1872 		newf &= ~PFR_TFLAG_USRMASK;
1873 	if (!(newf & PFR_TFLAG_SETMASK)) {
1874 		RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1875 		if (kt->pfrkt_root != NULL)
1876 			if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1877 				pfr_setflags_ktable(kt->pfrkt_root,
1878 				    kt->pfrkt_root->pfrkt_flags &
1879 					~PFR_TFLAG_REFDANCHOR);
1880 		pfr_destroy_ktable(kt, 1);
1881 		pfr_ktable_cnt--;
1882 		return;
1883 	}
1884 	if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1885 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1886 		pfr_remove_kentries(kt, &addrq);
1887 	}
1888 	if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1889 		pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1890 		kt->pfrkt_shadow = NULL;
1891 	}
1892 	kt->pfrkt_flags = newf;
1893 }
1894 
1895 void
1896 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1897 {
1898 	struct pfr_ktable	*p;
1899 
1900 	SLIST_FOREACH(p, workq, pfrkt_workq)
1901 		pfr_clstats_ktable(p, tzero, recurse);
1902 }
1903 
1904 void
1905 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1906 {
1907 	struct pfr_kentryworkq	 addrq;
1908 	int			 s;
1909 
1910 	if (recurse) {
1911 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1912 		pfr_clstats_kentries(&addrq, tzero, 0);
1913 	}
1914 	s = splsoftnet();
1915 	bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1916 	bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1917 	kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1918 	splx(s);
1919 	kt->pfrkt_tzero = tzero;
1920 }
1921 
1922 struct pfr_ktable *
1923 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset,
1924     int intr)
1925 {
1926 	struct pfr_ktable	*kt;
1927 	struct pf_ruleset	*rs;
1928 
1929 	if (intr)
1930 		kt = pool_get(&pfr_ktable_pl, PR_NOWAIT|PR_ZERO|PR_LIMITFAIL);
1931 	else
1932 		kt = pool_get(&pfr_ktable_pl, PR_WAITOK|PR_ZERO|PR_LIMITFAIL);
1933 	if (kt == NULL)
1934 		return (NULL);
1935 	kt->pfrkt_t = *tbl;
1936 
1937 	if (attachruleset) {
1938 		rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
1939 		if (!rs) {
1940 			pfr_destroy_ktable(kt, 0);
1941 			return (NULL);
1942 		}
1943 		kt->pfrkt_rs = rs;
1944 		rs->tables++;
1945 	}
1946 
1947 	if (!rn_inithead((void **)&kt->pfrkt_ip4,
1948 	    offsetof(struct sockaddr_in, sin_addr) * 8) ||
1949 	    !rn_inithead((void **)&kt->pfrkt_ip6,
1950 	    offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1951 		pfr_destroy_ktable(kt, 0);
1952 		return (NULL);
1953 	}
1954 	kt->pfrkt_tzero = tzero;
1955 
1956 	return (kt);
1957 }
1958 
1959 void
1960 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1961 {
1962 	struct pfr_ktable	*p, *q;
1963 
1964 	for (p = SLIST_FIRST(workq); p; p = q) {
1965 		q = SLIST_NEXT(p, pfrkt_workq);
1966 		pfr_destroy_ktable(p, flushaddr);
1967 	}
1968 }
1969 
1970 void
1971 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1972 {
1973 	struct pfr_kentryworkq	 addrq;
1974 
1975 	if (flushaddr) {
1976 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1977 		pfr_clean_node_mask(kt, &addrq);
1978 		pfr_destroy_kentries(&addrq);
1979 	}
1980 	if (kt->pfrkt_ip4 != NULL)
1981 		free((caddr_t)kt->pfrkt_ip4, M_RTABLE);
1982 	if (kt->pfrkt_ip6 != NULL)
1983 		free((caddr_t)kt->pfrkt_ip6, M_RTABLE);
1984 	if (kt->pfrkt_shadow != NULL)
1985 		pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1986 	if (kt->pfrkt_rs != NULL) {
1987 		kt->pfrkt_rs->tables--;
1988 		pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1989 	}
1990 	pool_put(&pfr_ktable_pl, kt);
1991 }
1992 
1993 int
1994 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1995 {
1996 	int d;
1997 
1998 	if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
1999 		return (d);
2000 	return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
2001 }
2002 
2003 struct pfr_ktable *
2004 pfr_lookup_table(struct pfr_table *tbl)
2005 {
2006 	/* struct pfr_ktable start like a struct pfr_table */
2007 	return (RB_FIND(pfr_ktablehead, &pfr_ktables,
2008 	    (struct pfr_ktable *)tbl));
2009 }
2010 
2011 int
2012 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
2013 {
2014 	struct pfr_kentry	*ke = NULL;
2015 	int			 match;
2016 
2017 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2018 		kt = kt->pfrkt_root;
2019 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2020 		return (0);
2021 
2022 	switch (af) {
2023 #ifdef INET
2024 	case AF_INET:
2025 		pfr_sin.sin_addr.s_addr = a->addr32[0];
2026 		ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
2027 		if (ke && KENTRY_RNF_ROOT(ke))
2028 			ke = NULL;
2029 		break;
2030 #endif /* INET */
2031 #ifdef INET6
2032 	case AF_INET6:
2033 		bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2034 		ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2035 		if (ke && KENTRY_RNF_ROOT(ke))
2036 			ke = NULL;
2037 		break;
2038 #endif /* INET6 */
2039 	}
2040 	match = (ke && !(ke->pfrke_flags & PFRKE_FLAG_NOT));
2041 	if (match)
2042 		kt->pfrkt_match++;
2043 	else
2044 		kt->pfrkt_nomatch++;
2045 	return (match);
2046 }
2047 
2048 void
2049 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2050     u_int64_t len, int dir_out, int op_pass, int notrule)
2051 {
2052 	struct pfr_kentry	*ke = NULL;
2053 
2054 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2055 		kt = kt->pfrkt_root;
2056 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2057 		return;
2058 
2059 	switch (af) {
2060 #ifdef INET
2061 	case AF_INET:
2062 		pfr_sin.sin_addr.s_addr = a->addr32[0];
2063 		ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
2064 		if (ke && KENTRY_RNF_ROOT(ke))
2065 			ke = NULL;
2066 		break;
2067 #endif /* INET */
2068 #ifdef INET6
2069 	case AF_INET6:
2070 		bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2071 		ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2072 		if (ke && KENTRY_RNF_ROOT(ke))
2073 			ke = NULL;
2074 		break;
2075 #endif /* INET6 */
2076 	default:
2077 		;
2078 	}
2079 	if ((ke == NULL || (ke->pfrke_flags & PFRKE_FLAG_NOT)) != notrule) {
2080 		if (op_pass != PFR_OP_PASS)
2081 			printf("pfr_update_stats: assertion failed.\n");
2082 		op_pass = PFR_OP_XPASS;
2083 	}
2084 	kt->pfrkt_packets[dir_out][op_pass]++;
2085 	kt->pfrkt_bytes[dir_out][op_pass] += len;
2086 	if (ke != NULL && op_pass != PFR_OP_XPASS &&
2087 	    (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
2088 		if (ke->pfrke_counters == NULL)
2089 			ke->pfrke_counters = pool_get(&pfr_kcounters_pl,
2090 			    PR_NOWAIT | PR_ZERO);
2091 		if (ke->pfrke_counters != NULL) {
2092 			ke->pfrke_counters->pfrkc_packets[dir_out][op_pass]++;
2093 			ke->pfrke_counters->pfrkc_bytes[dir_out][op_pass] += len;
2094 		}
2095 	}
2096 }
2097 
2098 struct pfr_ktable *
2099 pfr_attach_table(struct pf_ruleset *rs, char *name, int intr)
2100 {
2101 	struct pfr_ktable	*kt, *rt;
2102 	struct pfr_table	 tbl;
2103 	struct pf_anchor	*ac = rs->anchor;
2104 
2105 	bzero(&tbl, sizeof(tbl));
2106 	strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2107 	if (ac != NULL)
2108 		strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2109 	kt = pfr_lookup_table(&tbl);
2110 	if (kt == NULL) {
2111 		kt = pfr_create_ktable(&tbl, time_second, 1, intr);
2112 		if (kt == NULL)
2113 			return (NULL);
2114 		if (ac != NULL) {
2115 			bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2116 			rt = pfr_lookup_table(&tbl);
2117 			if (rt == NULL) {
2118 				rt = pfr_create_ktable(&tbl, 0, 1, intr);
2119 				if (rt == NULL) {
2120 					pfr_destroy_ktable(kt, 0);
2121 					return (NULL);
2122 				}
2123 				pfr_insert_ktable(rt);
2124 			}
2125 			kt->pfrkt_root = rt;
2126 		}
2127 		pfr_insert_ktable(kt);
2128 	}
2129 	if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2130 		pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2131 	return (kt);
2132 }
2133 
2134 void
2135 pfr_detach_table(struct pfr_ktable *kt)
2136 {
2137 	if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
2138 		printf("pfr_detach_table: refcount = %d.\n",
2139 		    kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
2140 	else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2141 		pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2142 }
2143 
2144 int
2145 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2146     struct pf_addr **raddr, struct pf_addr **rmask, struct pfi_kif **kif,
2147     sa_family_t af)
2148 {
2149 	struct pfr_kentry	*ke, *ke2;
2150 	struct pf_addr		*addr;
2151 	union sockaddr_union	 mask;
2152 	int			 idx = -1, use_counter = 0;
2153 
2154 	if (af == AF_INET)
2155 		addr = (struct pf_addr *)&pfr_sin.sin_addr;
2156 	else if (af == AF_INET6)
2157 		addr = (struct pf_addr *)&pfr_sin6.sin6_addr;
2158 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2159 		kt = kt->pfrkt_root;
2160 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2161 		return (-1);
2162 
2163 	if (pidx != NULL)
2164 		idx = *pidx;
2165 	if (counter != NULL && idx >= 0)
2166 		use_counter = 1;
2167 	if (idx < 0)
2168 		idx = 0;
2169 
2170 _next_block:
2171 	ke = pfr_kentry_byidx(kt, idx, af);
2172 	if (ke == NULL) {
2173 		/* we don't have this idx, try looping */
2174 		idx = 0;
2175 		ke = pfr_kentry_byidx(kt, idx, af);
2176 		if (ke == NULL) {
2177 			kt->pfrkt_nomatch++;
2178 			return (1);
2179 		}
2180 	}
2181 	pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2182 	*raddr = SUNION2PF(&ke->pfrke_sa, af);
2183 	*rmask = SUNION2PF(&pfr_mask, af);
2184 
2185 	if (use_counter) {
2186 		/* is supplied address within block? */
2187 		if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
2188 			/* no, go to next block in table */
2189 			idx++;
2190 			use_counter = 0;
2191 			goto _next_block;
2192 		}
2193 		PF_ACPY(addr, counter, af);
2194 	} else {
2195 		/* use first address of block */
2196 		PF_ACPY(addr, *raddr, af);
2197 	}
2198 
2199 	if (!KENTRY_NETWORK(ke)) {
2200 		/* this is a single IP address - no possible nested block */
2201 		PF_ACPY(counter, addr, af);
2202 		*pidx = idx;
2203 		kt->pfrkt_match++;
2204 		if (ke->pfrke_type == PFRKE_ROUTE)
2205 			*kif = ((struct pfr_kentry_route *)ke)->kif;
2206 		return (0);
2207 	}
2208 	for (;;) {
2209 		/* we don't want to use a nested block */
2210 		if (af == AF_INET)
2211 			ke2 = (struct pfr_kentry *)rn_match(&pfr_sin,
2212 			    kt->pfrkt_ip4);
2213 		else if (af == AF_INET6)
2214 			ke2 = (struct pfr_kentry *)rn_match(&pfr_sin6,
2215 			    kt->pfrkt_ip6);
2216 		/* no need to check KENTRY_RNF_ROOT() here */
2217 		if (ke2 == ke) {
2218 			/* lookup return the same block - perfect */
2219 			PF_ACPY(counter, addr, af);
2220 			*pidx = idx;
2221 			kt->pfrkt_match++;
2222 			if (ke->pfrke_type == PFRKE_ROUTE)
2223 				*kif = ((struct pfr_kentry_route *)ke)->kif;
2224 			return (0);
2225 		}
2226 
2227 		/* we need to increase the counter past the nested block */
2228 		pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2229 		PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2230 		PF_AINC(addr, af);
2231 		if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2232 			/* ok, we reached the end of our main block */
2233 			/* go to next block in table */
2234 			idx++;
2235 			use_counter = 0;
2236 			goto _next_block;
2237 		}
2238 	}
2239 }
2240 
2241 struct pfr_kentry *
2242 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2243 {
2244 	struct pfr_walktree	w;
2245 
2246 	bzero(&w, sizeof(w));
2247 	w.pfrw_op = PFRW_POOL_GET;
2248 	w.pfrw_cnt = idx;
2249 
2250 	switch (af) {
2251 #ifdef INET
2252 	case AF_INET:
2253 		rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2254 		return (w.pfrw_kentry);
2255 #endif /* INET */
2256 #ifdef INET6
2257 	case AF_INET6:
2258 		rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2259 		return (w.pfrw_kentry);
2260 #endif /* INET6 */
2261 	default:
2262 		return (NULL);
2263 	}
2264 }
2265 
2266 void
2267 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2268 {
2269 	struct pfr_walktree	w;
2270 	int			s;
2271 
2272 	bzero(&w, sizeof(w));
2273 	w.pfrw_op = PFRW_DYNADDR_UPDATE;
2274 	w.pfrw_dyn = dyn;
2275 
2276 	s = splsoftnet();
2277 	dyn->pfid_acnt4 = 0;
2278 	dyn->pfid_acnt6 = 0;
2279 	if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2280 		rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2281 	if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2282 		rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2283 	splx(s);
2284 }
2285