xref: /openbsd/sys/net/pf_table.c (revision db3296cf)
1 /*	$OpenBSD: pf_table.c,v 1.38 2003/06/24 13:52:50 henning Exp $	*/
2 
3 /*
4  * Copyright (c) 2002 Cedric Berger
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  *    - Redistributions of source code must retain the above copyright
12  *      notice, this list of conditions and the following disclaimer.
13  *    - Redistributions in binary form must reproduce the above
14  *      copyright notice, this list of conditions and the following
15  *      disclaimer in the documentation and/or other materials provided
16  *      with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  */
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/socket.h>
36 #include <sys/mbuf.h>
37 #include <sys/kernel.h>
38 
39 #include <net/if.h>
40 #include <net/route.h>
41 #include <netinet/in.h>
42 #include <netinet/ip_ipsp.h>
43 #include <net/pfvar.h>
44 
45 #define ACCEPT_FLAGS(oklist)			\
46 	do {					\
47 		if ((flags & ~(oklist)) &	\
48 		    PFR_FLAG_ALLMASK)		\
49 			return (EINVAL);	\
50 	} while (0)
51 
52 #define	FILLIN_SIN(sin, addr)			\
53 	do {					\
54 		(sin).sin_len = sizeof(sin);	\
55 		(sin).sin_family = AF_INET;	\
56 		(sin).sin_addr = (addr);	\
57 	} while (0)
58 
59 #define	FILLIN_SIN6(sin6, addr)			\
60 	do {					\
61 		(sin6).sin6_len = sizeof(sin6);	\
62 		(sin6).sin6_family = AF_INET6;	\
63 		(sin6).sin6_addr = (addr);	\
64 	} while (0)
65 
66 #define SWAP(type, a1, a2)			\
67 	do {					\
68 		type tmp = a1;			\
69 		a1 = a2;			\
70 		a2 = tmp;			\
71 	} while (0)
72 
73 #define	AF_BITS(af)		(((af)==AF_INET)?32:128)
74 #define	ADDR_NETWORK(ad)	((ad)->pfra_net < AF_BITS((ad)->pfra_af))
75 #define	KENTRY_NETWORK(ke)	((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
76 #define KENTRY_RNF_ROOT(ke) \
77 		((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
78 
79 #define NO_ADDRESSES		(-1)
80 #define ENQUEUE_UNMARKED_ONLY	(1)
81 #define INVERT_NEG_FLAG		(1)
82 
83 struct pfr_walktree {
84 	enum pfrw_op {
85 		PFRW_MARK,
86 		PFRW_SWEEP,
87 		PFRW_ENQUEUE,
88 		PFRW_GET_ADDRS,
89 		PFRW_GET_ASTATS
90 	}	 pfrw_op;
91 	union {
92 		struct pfr_addr		*pfrw1_addr;
93 		struct pfr_astats	*pfrw1_astats;
94 		struct pfr_kentryworkq	*pfrw1_workq;
95 	}	 pfrw_1;
96 	int	 pfrw_free;
97 };
98 #define pfrw_addr	pfrw_1.pfrw1_addr
99 #define pfrw_astats	pfrw_1.pfrw1_astats
100 #define pfrw_workq	pfrw_1.pfrw1_workq
101 #define pfrw_cnt	pfrw_free
102 
103 #define senderr(e)	do { rv = (e); goto _bad; } while (0)
104 
105 struct pool		 pfr_ktable_pl;
106 struct pool		 pfr_kentry_pl;
107 struct sockaddr_in	 pfr_sin;
108 struct sockaddr_in6	 pfr_sin6;
109 
110 void			 pfr_copyout_addr(struct pfr_addr *,
111 			    struct pfr_kentry *ke);
112 int			 pfr_validate_addr(struct pfr_addr *);
113 void			 pfr_enqueue_addrs(struct pfr_ktable *,
114 			    struct pfr_kentryworkq *, int *, int);
115 void			 pfr_mark_addrs(struct pfr_ktable *);
116 struct pfr_kentry	*pfr_lookup_addr(struct pfr_ktable *,
117 			    struct pfr_addr *, int);
118 struct pfr_kentry	*pfr_create_kentry(struct pfr_addr *);
119 void			 pfr_destroy_kentries(struct pfr_kentryworkq *);
120 void			 pfr_destroy_kentry(struct pfr_kentry *);
121 void			 pfr_insert_kentries(struct pfr_ktable *,
122 			    struct pfr_kentryworkq *, long);
123 void			 pfr_remove_kentries(struct pfr_ktable *,
124 			    struct pfr_kentryworkq *);
125 void			 pfr_clstats_kentries(struct pfr_kentryworkq *, long,
126 			    int);
127 void			 pfr_reset_feedback(struct pfr_addr *, int);
128 void			 pfr_prepare_network(union sockaddr_union *, int, int);
129 int			 pfr_route_kentry(struct pfr_ktable *,
130 			    struct pfr_kentry *);
131 int			 pfr_unroute_kentry(struct pfr_ktable *,
132 			    struct pfr_kentry *);
133 int			 pfr_walktree(struct radix_node *, void *);
134 int			 pfr_validate_table(struct pfr_table *, int);
135 void			 pfr_commit_ktable(struct pfr_ktable *, long);
136 void			 pfr_insert_ktables(struct pfr_ktableworkq *);
137 void			 pfr_insert_ktable(struct pfr_ktable *);
138 void			 pfr_setflags_ktables(struct pfr_ktableworkq *);
139 void			 pfr_setflags_ktable(struct pfr_ktable *, int);
140 void			 pfr_clstats_ktables(struct pfr_ktableworkq *, long,
141 			    int);
142 void			 pfr_clstats_ktable(struct pfr_ktable *, long, int);
143 struct pfr_ktable	*pfr_create_ktable(struct pfr_table *, long, int);
144 void			 pfr_destroy_ktables(struct pfr_ktableworkq *, int);
145 void			 pfr_destroy_ktable(struct pfr_ktable *, int);
146 int			 pfr_ktable_compare(struct pfr_ktable *,
147 			    struct pfr_ktable *);
148 struct pfr_ktable	*pfr_lookup_table(struct pfr_table *);
149 void			 pfr_clean_node_mask(struct pfr_ktable *,
150 			    struct pfr_kentryworkq *);
151 int			 pfr_table_count(struct pfr_table *, int);
152 int			 pfr_skip_table(struct pfr_table *,
153 			    struct pfr_ktable *, int);
154 
155 RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
156 RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
157 
158 struct pfr_ktablehead	 pfr_ktables;
159 struct pfr_table	 pfr_nulltable;
160 int			 pfr_ktable_cnt;
161 int			 pfr_ticket;
162 
163 void
164 pfr_initialize(void)
165 {
166 	pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0,
167 	    "pfrktable", NULL);
168 	pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0,
169 	    "pfrkentry", NULL);
170 
171 	pfr_sin.sin_len = sizeof(pfr_sin);
172 	pfr_sin.sin_family = AF_INET;
173 	pfr_sin6.sin6_len = sizeof(pfr_sin6);
174 	pfr_sin6.sin6_family = AF_INET6;
175 
176 	pfr_ticket = 100;
177 }
178 
179 int
180 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
181 {
182 	struct pfr_ktable	*kt;
183 	struct pfr_kentryworkq	 workq;
184 	int			 s;
185 
186 	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
187 	if (pfr_validate_table(tbl, 0))
188 		return (EINVAL);
189 	kt = pfr_lookup_table(tbl);
190 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
191 		return (ESRCH);
192 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
193 		return (EPERM);
194 	pfr_enqueue_addrs(kt, &workq, ndel, 0);
195 
196 	if (!(flags & PFR_FLAG_DUMMY)) {
197 		if (flags & PFR_FLAG_ATOMIC)
198 			s = splsoftnet();
199 		pfr_remove_kentries(kt, &workq);
200 		if (flags & PFR_FLAG_ATOMIC)
201 			splx(s);
202 		if (kt->pfrkt_cnt) {
203 			printf("pfr_clr_addrs: corruption detected (%d).\n",
204 			    kt->pfrkt_cnt);
205 			kt->pfrkt_cnt = 0;
206 		}
207 	}
208 	return (0);
209 }
210 
211 int
212 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
213     int *nadd, int flags)
214 {
215 	struct pfr_ktable	*kt, *tmpkt;
216 	struct pfr_kentryworkq	 workq;
217 	struct pfr_kentry	*p, *q;
218 	struct pfr_addr		 ad;
219 	int			 i, rv, s, xadd = 0;
220 	long			 tzero = time.tv_sec;
221 
222 	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
223 	if (pfr_validate_table(tbl, 0))
224 		return (EINVAL);
225 	kt = pfr_lookup_table(tbl);
226 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
227 		return (ESRCH);
228 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
229 		return (EPERM);
230 	tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
231 	if (tmpkt == NULL)
232 		return (ENOMEM);
233 	SLIST_INIT(&workq);
234 	for (i = 0; i < size; i++) {
235 		if (copyin(addr+i, &ad, sizeof(ad)))
236 			senderr(EFAULT);
237 		if (pfr_validate_addr(&ad))
238 			senderr(EINVAL);
239 		p = pfr_lookup_addr(kt, &ad, 1);
240 		q = pfr_lookup_addr(tmpkt, &ad, 1);
241 		if (flags & PFR_FLAG_FEEDBACK) {
242 			if (q != NULL)
243 				ad.pfra_fback = PFR_FB_DUPLICATE;
244 			else if (p == NULL)
245 				ad.pfra_fback = PFR_FB_ADDED;
246 			else if (p->pfrke_not != ad.pfra_not)
247 				ad.pfra_fback = PFR_FB_CONFLICT;
248 			else
249 				ad.pfra_fback = PFR_FB_NONE;
250 		}
251 		if (p == NULL && q == NULL) {
252 			p = pfr_create_kentry(&ad);
253 			if (p == NULL)
254 				senderr(ENOMEM);
255 			if (pfr_route_kentry(tmpkt, p)) {
256 				pfr_destroy_kentry(p);
257 				ad.pfra_fback = PFR_FB_NONE;
258 			} else {
259 				SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
260 				xadd++;
261 			}
262 		}
263 		if (flags & PFR_FLAG_FEEDBACK)
264 			if (copyout(&ad, addr+i, sizeof(ad)))
265 				senderr(EFAULT);
266 	}
267 	pfr_clean_node_mask(tmpkt, &workq);
268 	if (!(flags & PFR_FLAG_DUMMY)) {
269 		if (flags & PFR_FLAG_ATOMIC)
270 			s = splsoftnet();
271 		pfr_insert_kentries(kt, &workq, tzero);
272 		if (flags & PFR_FLAG_ATOMIC)
273 			splx(s);
274 	} else
275 		pfr_destroy_kentries(&workq);
276 	if (nadd != NULL)
277 		*nadd = xadd;
278 	pfr_destroy_ktable(tmpkt, 0);
279 	return (0);
280 _bad:
281 	pfr_clean_node_mask(tmpkt, &workq);
282 	pfr_destroy_kentries(&workq);
283 	if (flags & PFR_FLAG_FEEDBACK)
284 		pfr_reset_feedback(addr, size);
285 	pfr_destroy_ktable(tmpkt, 0);
286 	return (rv);
287 }
288 
289 int
290 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
291     int *ndel, int flags)
292 {
293 	struct pfr_ktable	*kt;
294 	struct pfr_kentryworkq	 workq;
295 	struct pfr_kentry	*p;
296 	struct pfr_addr		 ad;
297 	int			 i, rv, s, xdel = 0;
298 
299 	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
300 	if (pfr_validate_table(tbl, 0))
301 		return (EINVAL);
302 	kt = pfr_lookup_table(tbl);
303 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
304 		return (ESRCH);
305 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
306 		return (EPERM);
307 	pfr_mark_addrs(kt);
308 	SLIST_INIT(&workq);
309 	for (i = 0; i < size; i++) {
310 		if (copyin(addr+i, &ad, sizeof(ad)))
311 			senderr(EFAULT);
312 		if (pfr_validate_addr(&ad))
313 			senderr(EINVAL);
314 		p = pfr_lookup_addr(kt, &ad, 1);
315 		if (flags & PFR_FLAG_FEEDBACK) {
316 			if (p == NULL)
317 				ad.pfra_fback = PFR_FB_NONE;
318 			else if (p->pfrke_not != ad.pfra_not)
319 				ad.pfra_fback = PFR_FB_CONFLICT;
320 			else if (p->pfrke_mark)
321 				ad.pfra_fback = PFR_FB_DUPLICATE;
322 			else
323 				ad.pfra_fback = PFR_FB_DELETED;
324 		}
325 		if (p != NULL && p->pfrke_not == ad.pfra_not &&
326 		    !p->pfrke_mark) {
327 			p->pfrke_mark = 1;
328 			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
329 			xdel++;
330 		}
331 		if (flags & PFR_FLAG_FEEDBACK)
332 			if (copyout(&ad, addr+i, sizeof(ad)))
333 				senderr(EFAULT);
334 	}
335 	if (!(flags & PFR_FLAG_DUMMY)) {
336 		if (flags & PFR_FLAG_ATOMIC)
337 			s = splsoftnet();
338 		pfr_remove_kentries(kt, &workq);
339 		if (flags & PFR_FLAG_ATOMIC)
340 			splx(s);
341 	}
342 	if (ndel != NULL)
343 		*ndel = xdel;
344 	return (0);
345 _bad:
346 	if (flags & PFR_FLAG_FEEDBACK)
347 		pfr_reset_feedback(addr, size);
348 	return (rv);
349 }
350 
351 int
352 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
353     int *size2, int *nadd, int *ndel, int *nchange, int flags)
354 {
355 	struct pfr_ktable	*kt, *tmpkt;
356 	struct pfr_kentryworkq	 addq, delq, changeq;
357 	struct pfr_kentry	*p, *q;
358 	struct pfr_addr		 ad;
359 	int			 i, rv, s, xadd = 0, xdel = 0, xchange = 0;
360 	long			 tzero = time.tv_sec;
361 
362 	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
363 	if (pfr_validate_table(tbl, 0))
364 		return (EINVAL);
365 	kt = pfr_lookup_table(tbl);
366 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
367 		return (ESRCH);
368 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
369 		return (EPERM);
370 	tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
371 	if (tmpkt == NULL)
372 		return (ENOMEM);
373 	pfr_mark_addrs(kt);
374 	SLIST_INIT(&addq);
375 	SLIST_INIT(&delq);
376 	SLIST_INIT(&changeq);
377 	for (i = 0; i < size; i++) {
378 		if (copyin(addr+i, &ad, sizeof(ad)))
379 			senderr(EFAULT);
380 		if (pfr_validate_addr(&ad))
381 			senderr(EINVAL);
382 		ad.pfra_fback = PFR_FB_NONE;
383 		p = pfr_lookup_addr(kt, &ad, 1);
384 		if (p != NULL) {
385 			if (p->pfrke_mark) {
386 				ad.pfra_fback = PFR_FB_DUPLICATE;
387 				goto _skip;
388 			}
389 			p->pfrke_mark = 1;
390 			if (p->pfrke_not != ad.pfra_not) {
391 				SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
392 				ad.pfra_fback = PFR_FB_CHANGED;
393 				xchange++;
394 			}
395 		} else {
396 			q = pfr_lookup_addr(tmpkt, &ad, 1);
397 			if (q != NULL) {
398 				ad.pfra_fback = PFR_FB_DUPLICATE;
399 				goto _skip;
400 			}
401 			p = pfr_create_kentry(&ad);
402 			if (p == NULL)
403 				senderr(ENOMEM);
404 			if (pfr_route_kentry(tmpkt, p)) {
405 				pfr_destroy_kentry(p);
406 				ad.pfra_fback = PFR_FB_NONE;
407 			} else {
408 				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
409 				ad.pfra_fback = PFR_FB_ADDED;
410 				xadd++;
411 			}
412 		}
413 _skip:
414 		if (flags & PFR_FLAG_FEEDBACK)
415 			if (copyout(&ad, addr+i, sizeof(ad)))
416 				senderr(EFAULT);
417 	}
418 	pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
419 	if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
420 		if (*size2 < size+xdel) {
421 			*size2 = size+xdel;
422 			senderr(0);
423 		}
424 		i = 0;
425 		SLIST_FOREACH(p, &delq, pfrke_workq) {
426 			pfr_copyout_addr(&ad, p);
427 			ad.pfra_fback = PFR_FB_DELETED;
428 			if (copyout(&ad, addr+size+i, sizeof(ad)))
429 				senderr(EFAULT);
430 			i++;
431 		}
432 	}
433 	pfr_clean_node_mask(tmpkt, &addq);
434 	if (!(flags & PFR_FLAG_DUMMY)) {
435 		if (flags & PFR_FLAG_ATOMIC)
436 			s = splsoftnet();
437 		pfr_insert_kentries(kt, &addq, tzero);
438 		pfr_remove_kentries(kt, &delq);
439 		pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
440 		if (flags & PFR_FLAG_ATOMIC)
441 			splx(s);
442 	} else
443 		pfr_destroy_kentries(&addq);
444 	if (nadd != NULL)
445 		*nadd = xadd;
446 	if (ndel != NULL)
447 		*ndel = xdel;
448 	if (nchange != NULL)
449 		*nchange = xchange;
450 	if ((flags & PFR_FLAG_FEEDBACK) && *size2)
451 		*size2 = size+xdel;
452 	pfr_destroy_ktable(tmpkt, 0);
453 	return (0);
454 _bad:
455 	pfr_clean_node_mask(tmpkt, &addq);
456 	pfr_destroy_kentries(&addq);
457 	if (flags & PFR_FLAG_FEEDBACK)
458 		pfr_reset_feedback(addr, size);
459 	pfr_destroy_ktable(tmpkt, 0);
460 	return (rv);
461 }
462 
463 int
464 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
465 	int *nmatch, int flags)
466 {
467 	struct pfr_ktable	*kt;
468 	struct pfr_kentry	*p;
469 	struct pfr_addr		 ad;
470 	int			 i, xmatch = 0;
471 
472 	ACCEPT_FLAGS(PFR_FLAG_REPLACE);
473 	if (pfr_validate_table(tbl, 0))
474 		return (EINVAL);
475 	kt = pfr_lookup_table(tbl);
476 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
477 		return (ESRCH);
478 
479 	for (i = 0; i < size; i++) {
480 		if (copyin(addr+i, &ad, sizeof(ad)))
481 			return (EFAULT);
482 		if (pfr_validate_addr(&ad))
483 			return (EINVAL);
484 		if (ADDR_NETWORK(&ad))
485 			return (EINVAL);
486 		p = pfr_lookup_addr(kt, &ad, 0);
487 		if (flags & PFR_FLAG_REPLACE)
488 			pfr_copyout_addr(&ad, p);
489 		ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
490 		    (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
491 		if (p != NULL && !p->pfrke_not)
492 			xmatch++;
493 		if (copyout(&ad, addr+i, sizeof(ad)))
494 			return (EFAULT);
495 	}
496 	if (nmatch != NULL)
497 		*nmatch = xmatch;
498 	return (0);
499 }
500 
501 int
502 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
503 	int flags)
504 {
505 	struct pfr_ktable	*kt;
506 	struct pfr_walktree	 w;
507 	int			 rv;
508 
509 	ACCEPT_FLAGS(0);
510 	if (pfr_validate_table(tbl, 0))
511 		return (EINVAL);
512 	kt = pfr_lookup_table(tbl);
513 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
514 		return (ESRCH);
515 	if (kt->pfrkt_cnt > *size) {
516 		*size = kt->pfrkt_cnt;
517 		return (0);
518 	}
519 
520 	bzero(&w, sizeof(w));
521 	w.pfrw_op = PFRW_GET_ADDRS;
522 	w.pfrw_addr = addr;
523 	w.pfrw_free = kt->pfrkt_cnt;
524 	rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
525 	if (!rv)
526 		rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
527 	if (rv)
528 		return (rv);
529 
530 	if (w.pfrw_free) {
531 		printf("pfr_get_addrs: corruption detected (%d).\n",
532 		    w.pfrw_free);
533 		return (ENOTTY);
534 	}
535 	*size = kt->pfrkt_cnt;
536 	return (0);
537 }
538 
539 int
540 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
541 	int flags)
542 {
543 	struct pfr_ktable	*kt;
544 	struct pfr_walktree	 w;
545 	struct pfr_kentryworkq	 workq;
546 	int			 rv, s;
547 	long			 tzero = time.tv_sec;
548 
549 	ACCEPT_FLAGS(PFR_FLAG_ATOMIC); /* XXX PFR_FLAG_CLSTATS disabled */
550 	if (pfr_validate_table(tbl, 0))
551 		return (EINVAL);
552 	kt = pfr_lookup_table(tbl);
553 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
554 		return (ESRCH);
555 	if (kt->pfrkt_cnt > *size) {
556 		*size = kt->pfrkt_cnt;
557 		return (0);
558 	}
559 
560 	bzero(&w, sizeof(w));
561 	w.pfrw_op = PFRW_GET_ASTATS;
562 	w.pfrw_astats = addr;
563 	w.pfrw_free = kt->pfrkt_cnt;
564 	if (flags & PFR_FLAG_ATOMIC)
565 		s = splsoftnet();
566 	rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
567 	if (!rv)
568 		rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
569 	if (!rv && (flags & PFR_FLAG_CLSTATS)) {
570 		pfr_enqueue_addrs(kt, &workq, NULL, 0);
571 		pfr_clstats_kentries(&workq, tzero, 0);
572 	}
573 	if (flags & PFR_FLAG_ATOMIC)
574 		splx(s);
575 	if (rv)
576 		return (rv);
577 
578 	if (w.pfrw_free) {
579 		printf("pfr_get_astats: corruption detected (%d).\n",
580 		    w.pfrw_free);
581 		return (ENOTTY);
582 	}
583 	*size = kt->pfrkt_cnt;
584 	return (0);
585 }
586 
587 int
588 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
589     int *nzero, int flags)
590 {
591 	struct pfr_ktable	*kt;
592 	struct pfr_kentryworkq	 workq;
593 	struct pfr_kentry	*p;
594 	struct pfr_addr		 ad;
595 	int			 i, rv, s, xzero = 0;
596 
597 	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
598 	if (pfr_validate_table(tbl, 0))
599 		return (EINVAL);
600 	kt = pfr_lookup_table(tbl);
601 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
602 		return (ESRCH);
603 	SLIST_INIT(&workq);
604 	for (i = 0; i < size; i++) {
605 		if (copyin(addr+i, &ad, sizeof(ad)))
606 			senderr(EFAULT);
607 		if (pfr_validate_addr(&ad))
608 			senderr(EINVAL);
609 		p = pfr_lookup_addr(kt, &ad, 1);
610 		if (flags & PFR_FLAG_FEEDBACK) {
611 			ad.pfra_fback = (p != NULL) ?
612 			    PFR_FB_CLEARED : PFR_FB_NONE;
613 			if (copyout(&ad, addr+i, sizeof(ad)))
614 				senderr(EFAULT);
615 		}
616 		if (p != NULL) {
617 			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
618 			xzero++;
619 		}
620 	}
621 
622 	if (!(flags & PFR_FLAG_DUMMY)) {
623 		if (flags & PFR_FLAG_ATOMIC)
624 			s = splsoftnet();
625 		pfr_clstats_kentries(&workq, 0, 0);
626 		if (flags & PFR_FLAG_ATOMIC)
627 			splx(s);
628 	}
629 	if (nzero != NULL)
630 		*nzero = xzero;
631 	return (0);
632 _bad:
633 	if (flags & PFR_FLAG_FEEDBACK)
634 		pfr_reset_feedback(addr, size);
635 	return (rv);
636 }
637 
638 int
639 pfr_validate_addr(struct pfr_addr *ad)
640 {
641 	int i;
642 
643 	switch (ad->pfra_af) {
644 	case AF_INET:
645 		if (ad->pfra_net > 32)
646 			return (-1);
647 		break;
648 	case AF_INET6:
649 		if (ad->pfra_net > 128)
650 			return (-1);
651 		break;
652 	default:
653 		return (-1);
654 	}
655 	if (ad->pfra_net < 128 &&
656 		(((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
657 			return (-1);
658 	for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
659 		if (((caddr_t)ad)[i])
660 			return (-1);
661 	if (ad->pfra_not && ad->pfra_not != 1)
662 		return (-1);
663 	if (ad->pfra_fback)
664 		return (-1);
665 	return (0);
666 }
667 
668 void
669 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
670 	int *naddr, int sweep)
671 {
672 	struct pfr_walktree	w;
673 
674 	SLIST_INIT(workq);
675 	bzero(&w, sizeof(w));
676 	w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
677 	w.pfrw_workq = workq;
678 	if (kt->pfrkt_ip4 != NULL)
679 		if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
680 			printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
681 	if (kt->pfrkt_ip6 != NULL)
682 		if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
683 			printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
684 	if (naddr != NULL)
685 		*naddr = w.pfrw_cnt;
686 }
687 
688 void
689 pfr_mark_addrs(struct pfr_ktable *kt)
690 {
691 	struct pfr_walktree	w;
692 
693 	bzero(&w, sizeof(w));
694 	w.pfrw_op = PFRW_MARK;
695 	if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
696 		printf("pfr_mark_addrs: IPv4 walktree failed.\n");
697 	if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
698 		printf("pfr_mark_addrs: IPv6 walktree failed.\n");
699 }
700 
701 
702 struct pfr_kentry *
703 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
704 {
705 	union sockaddr_union	 sa, mask;
706 	struct radix_node_head	*head;
707 	struct pfr_kentry	*ke;
708 	int			 s;
709 
710 	bzero(&sa, sizeof(sa));
711 	if (ad->pfra_af == AF_INET) {
712 		FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
713 		head = kt->pfrkt_ip4;
714 	} else {
715 		FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
716 		head = kt->pfrkt_ip6;
717 	}
718 	if (ADDR_NETWORK(ad)) {
719 		pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
720 		s = splsoftnet(); /* rn_lookup makes use of globals */
721 		ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
722 		splx(s);
723 		if (ke && KENTRY_RNF_ROOT(ke))
724 			ke = NULL;
725 	} else {
726 		ke = (struct pfr_kentry *)rn_match(&sa, head);
727 		if (ke && KENTRY_RNF_ROOT(ke))
728 			ke = NULL;
729 		if (exact && ke && KENTRY_NETWORK(ke))
730 			ke = NULL;
731 	}
732 	return (ke);
733 }
734 
735 struct pfr_kentry *
736 pfr_create_kentry(struct pfr_addr *ad)
737 {
738 	struct pfr_kentry	*ke;
739 
740 	ke = pool_get(&pfr_kentry_pl, PR_NOWAIT);
741 	if (ke == NULL)
742 		return (NULL);
743 	bzero(ke, sizeof(*ke));
744 
745 	if (ad->pfra_af == AF_INET)
746 		FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
747 	else
748 		FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
749 	ke->pfrke_af = ad->pfra_af;
750 	ke->pfrke_net = ad->pfra_net;
751 	ke->pfrke_not = ad->pfra_not;
752 	return (ke);
753 }
754 
755 void
756 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
757 {
758 	struct pfr_kentry	*p, *q;
759 
760 	for (p = SLIST_FIRST(workq); p != NULL; p = q) {
761 		q = SLIST_NEXT(p, pfrke_workq);
762 		pfr_destroy_kentry(p);
763 	}
764 }
765 
766 void
767 pfr_destroy_kentry(struct pfr_kentry *ke)
768 {
769 	pool_put(&pfr_kentry_pl, ke);
770 }
771 
772 void
773 pfr_insert_kentries(struct pfr_ktable *kt,
774     struct pfr_kentryworkq *workq, long tzero)
775 {
776 	struct pfr_kentry	*p;
777 	int			 rv, n = 0;
778 
779 	SLIST_FOREACH(p, workq, pfrke_workq) {
780 		rv = pfr_route_kentry(kt, p);
781 		if (rv) {
782 			printf("pfr_insert_kentries: cannot route entry "
783 			    "(code=%d).\n", rv);
784 			break;
785 		}
786 		p->pfrke_tzero = tzero;
787 		n++;
788 	}
789 	kt->pfrkt_cnt += n;
790 }
791 
792 void
793 pfr_remove_kentries(struct pfr_ktable *kt,
794     struct pfr_kentryworkq *workq)
795 {
796 	struct pfr_kentry	*p;
797 	int			 n = 0;
798 
799 	SLIST_FOREACH(p, workq, pfrke_workq) {
800 		pfr_unroute_kentry(kt, p);
801 		n++;
802 	}
803 	kt->pfrkt_cnt -= n;
804 	pfr_destroy_kentries(workq);
805 }
806 
807 void
808 pfr_clean_node_mask(struct pfr_ktable *kt,
809     struct pfr_kentryworkq *workq)
810 {
811         struct pfr_kentry       *p;
812 
813         SLIST_FOREACH(p, workq, pfrke_workq)
814                 pfr_unroute_kentry(kt, p);
815 }
816 
817 void
818 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
819 {
820 	struct pfr_kentry	*p;
821 	int			 s;
822 
823 	SLIST_FOREACH(p, workq, pfrke_workq) {
824 		s = splsoftnet();
825 		if (negchange)
826 			p->pfrke_not = !p->pfrke_not;
827 		bzero(p->pfrke_packets, sizeof(p->pfrke_packets));
828 		bzero(p->pfrke_bytes, sizeof(p->pfrke_bytes));
829 		splx(s);
830 		p->pfrke_tzero = tzero;
831 	}
832 }
833 
834 void
835 pfr_reset_feedback(struct pfr_addr *addr, int size)
836 {
837 	struct pfr_addr	ad;
838 	int		i;
839 
840 	for (i = 0; i < size; i++) {
841 		if (copyin(addr+i, &ad, sizeof(ad)))
842 			break;
843 		ad.pfra_fback = PFR_FB_NONE;
844 		if (copyout(&ad, addr+i, sizeof(ad)))
845 			break;
846 	}
847 }
848 
849 void
850 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
851 {
852 	int	i;
853 
854 	bzero(sa, sizeof(*sa));
855 	if (af == AF_INET) {
856 		sa->sin.sin_len = sizeof(sa->sin);
857 		sa->sin.sin_family = AF_INET;
858 		sa->sin.sin_addr.s_addr = htonl(-1 << (32-net));
859 	} else {
860 		sa->sin6.sin6_len = sizeof(sa->sin6);
861 		sa->sin6.sin6_family = AF_INET6;
862 		for (i = 0; i < 4; i++) {
863 			if (net <= 32) {
864 				sa->sin6.sin6_addr.s6_addr32[i] =
865 				    htonl(-1 << (32-net));
866 				break;
867 			}
868 			sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
869 			net -= 32;
870 		}
871 	}
872 }
873 
874 int
875 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
876 {
877 	union sockaddr_union	 mask;
878 	struct radix_node	*rn;
879 	struct radix_node_head	*head;
880 	int			 s;
881 
882 	bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
883 	if (ke->pfrke_af == AF_INET)
884 		head = kt->pfrkt_ip4;
885 	else
886 		head = kt->pfrkt_ip6;
887 
888 	s = splsoftnet();
889 	if (KENTRY_NETWORK(ke)) {
890 		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
891 		rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
892 	} else
893 		rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
894 	splx(s);
895 
896 	return (rn == NULL ? -1 : 0);
897 }
898 
899 int
900 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
901 {
902 	union sockaddr_union	 mask;
903 	struct radix_node	*rn;
904 	struct radix_node_head	*head;
905 	int			 s;
906 
907 	if (ke->pfrke_af == AF_INET)
908 		head = kt->pfrkt_ip4;
909 	else
910 		head = kt->pfrkt_ip6;
911 
912 	s = splsoftnet();
913 	if (KENTRY_NETWORK(ke)) {
914 		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
915 		rn = rn_delete(&ke->pfrke_sa, &mask, head);
916 	} else
917 		rn = rn_delete(&ke->pfrke_sa, NULL, head);
918 	splx(s);
919 
920 	if (rn == NULL) {
921 		printf("pfr_unroute_kentry: delete failed.\n");
922 		return (-1);
923 	}
924 	return (0);
925 }
926 
927 void
928 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
929 {
930 	bzero(ad, sizeof(*ad));
931 	if (ke == NULL)
932 		return;
933 	ad->pfra_af = ke->pfrke_af;
934 	ad->pfra_net = ke->pfrke_net;
935 	ad->pfra_not = ke->pfrke_not;
936 	if (ad->pfra_af == AF_INET)
937 		ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
938 	else
939 		ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
940 }
941 
942 int
943 pfr_walktree(struct radix_node *rn, void *arg)
944 {
945 	struct pfr_kentry	*ke = (struct pfr_kentry *)rn;
946 	struct pfr_walktree	*w = arg;
947 	int			 s;
948 
949 	switch (w->pfrw_op) {
950 	case PFRW_MARK:
951 		ke->pfrke_mark = 0;
952 		break;
953 	case PFRW_SWEEP:
954 		if (ke->pfrke_mark)
955 			break;
956 		/* fall trough */
957 	case PFRW_ENQUEUE:
958 		SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
959 		w->pfrw_cnt++;
960 		break;
961 	case PFRW_GET_ADDRS:
962 		if (w->pfrw_free-- > 0) {
963 			struct pfr_addr ad;
964 
965 			pfr_copyout_addr(&ad, ke);
966 			if (copyout(&ad, w->pfrw_addr, sizeof(ad)))
967 				return (EFAULT);
968 			w->pfrw_addr++;
969 		}
970 		break;
971 	case PFRW_GET_ASTATS:
972 		if (w->pfrw_free-- > 0) {
973 			struct pfr_astats as;
974 
975 			pfr_copyout_addr(&as.pfras_a, ke);
976 
977 			s = splsoftnet();
978 			bcopy(ke->pfrke_packets, as.pfras_packets,
979 			    sizeof(as.pfras_packets));
980 			bcopy(ke->pfrke_bytes, as.pfras_bytes,
981 			    sizeof(as.pfras_bytes));
982 			splx(s);
983 			as.pfras_tzero = ke->pfrke_tzero;
984 
985 			if (copyout(&as, w->pfrw_astats, sizeof(as)))
986 				return (EFAULT);
987 			w->pfrw_astats++;
988 		}
989 		break;
990 	}
991 	return (0);
992 }
993 
994 int
995 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
996 {
997 	struct pfr_ktableworkq	 workq;
998 	struct pfr_ktable	*p;
999 	int			 s, xdel = 0;
1000 
1001 	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ALLRSETS);
1002 	if (pfr_table_count(filter, flags) < 0)
1003 		return (ENOENT);
1004 
1005 	SLIST_INIT(&workq);
1006 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1007 		if (pfr_skip_table(filter, p, flags))
1008 			continue;
1009 		if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1010 			continue;
1011 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1012 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1013 		xdel++;
1014 	}
1015 	if (!(flags & PFR_FLAG_DUMMY)) {
1016 		if (flags & PFR_FLAG_ATOMIC)
1017 			s = splsoftnet();
1018 		pfr_setflags_ktables(&workq);
1019 		if (flags & PFR_FLAG_ATOMIC)
1020 			splx(s);
1021 	}
1022 	if (ndel != NULL)
1023 		*ndel = xdel;
1024 	return (0);
1025 }
1026 
1027 int
1028 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1029 {
1030 	struct pfr_ktableworkq	 addq, changeq;
1031 	struct pfr_ktable	*p, *q, *r, key;
1032 	int			 i, rv, s, xadd = 0;
1033 	long			 tzero = time.tv_sec;
1034 
1035 	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1036 	SLIST_INIT(&addq);
1037 	SLIST_INIT(&changeq);
1038 	for (i = 0; i < size; i++) {
1039 		if (copyin(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1040 			senderr(EFAULT);
1041 		if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK))
1042 			senderr(EINVAL);
1043 		key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1044 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1045 		if (p == NULL) {
1046 			p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1047 			if (p == NULL)
1048 				senderr(ENOMEM);
1049 			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1050 				if (!pfr_ktable_compare(p, q))
1051 					goto _skip;
1052 			}
1053 			SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1054 			xadd++;
1055 			if (!key.pfrkt_anchor[0])
1056 				goto _skip;
1057 
1058 			/* find or create root table */
1059 			bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1060 			bzero(key.pfrkt_ruleset, sizeof(key.pfrkt_ruleset));
1061 			r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1062 			if (r != NULL) {
1063 				p->pfrkt_root = r;
1064 				goto _skip;
1065 			}
1066 			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1067 				if (!pfr_ktable_compare(&key, q)) {
1068 					p->pfrkt_root = q;
1069 					goto _skip;
1070 				}
1071 			}
1072 			key.pfrkt_flags = 0;
1073 			r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1074 			if (r == NULL)
1075 				senderr(ENOMEM);
1076 			SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1077 			p->pfrkt_root = r;
1078 		} else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1079 			SLIST_FOREACH(q, &changeq, pfrkt_workq)
1080 				if (!pfr_ktable_compare(&key, q))
1081 					goto _skip;
1082 			p->pfrkt_nflags = (p->pfrkt_flags &
1083 			    ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1084 			SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1085 			xadd++;
1086 		}
1087 _skip:
1088 	;
1089 	}
1090 	if (!(flags & PFR_FLAG_DUMMY)) {
1091 		if (flags & PFR_FLAG_ATOMIC)
1092 			s = splsoftnet();
1093 		pfr_insert_ktables(&addq);
1094 		pfr_setflags_ktables(&changeq);
1095 		if (flags & PFR_FLAG_ATOMIC)
1096 			splx(s);
1097 	} else
1098 		 pfr_destroy_ktables(&addq, 0);
1099 	if (nadd != NULL)
1100 		*nadd = xadd;
1101 	return (0);
1102 _bad:
1103 	pfr_destroy_ktables(&addq, 0);
1104 	return (rv);
1105 }
1106 
1107 int
1108 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1109 {
1110 	struct pfr_ktableworkq	 workq;
1111 	struct pfr_ktable	*p, *q, key;
1112 	int			 i, s, xdel = 0;
1113 
1114 	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1115 	SLIST_INIT(&workq);
1116 	for (i = 0; i < size; i++) {
1117 		if (copyin(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1118 			return (EFAULT);
1119 		if (pfr_validate_table(&key.pfrkt_t, 0))
1120 			return (EINVAL);
1121 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1122 		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1123 			SLIST_FOREACH(q, &workq, pfrkt_workq)
1124 				if (!pfr_ktable_compare(p, q))
1125 					goto _skip;
1126 			p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1127 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1128 			xdel++;
1129 		}
1130 _skip:
1131 	;
1132 	}
1133 
1134 	if (!(flags & PFR_FLAG_DUMMY)) {
1135 		if (flags & PFR_FLAG_ATOMIC)
1136 			s = splsoftnet();
1137 		pfr_setflags_ktables(&workq);
1138 		if (flags & PFR_FLAG_ATOMIC)
1139 			splx(s);
1140 	}
1141 	if (ndel != NULL)
1142 		*ndel = xdel;
1143 	return (0);
1144 }
1145 
1146 int
1147 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1148 	int flags)
1149 {
1150 	struct pfr_ktable	*p;
1151 	int			 n, nn;
1152 
1153 	ACCEPT_FLAGS(PFR_FLAG_ALLRSETS);
1154 	n = nn = pfr_table_count(filter, flags);
1155 	if (n < 0)
1156 		return (ENOENT);
1157 	if (n > *size) {
1158 		*size = n;
1159 		return (0);
1160 	}
1161 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1162 		if (pfr_skip_table(filter, p, flags))
1163 			continue;
1164 		if (n-- <= 0)
1165 			continue;
1166 		if (copyout(&p->pfrkt_t, tbl++, sizeof(*tbl)))
1167 			return (EFAULT);
1168 	}
1169 	if (n) {
1170 		printf("pfr_get_tables: corruption detected (%d).\n", n);
1171 		return (ENOTTY);
1172 	}
1173 	*size = nn;
1174 	return (0);
1175 }
1176 
1177 int
1178 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1179 	int flags)
1180 {
1181 	struct pfr_ktable	*p;
1182 	struct pfr_ktableworkq	 workq;
1183 	int			 s, n, nn;
1184 	long			 tzero = time.tv_sec;
1185 
1186 	ACCEPT_FLAGS(PFR_FLAG_ATOMIC|PFR_FLAG_ALLRSETS);
1187 					/* XXX PFR_FLAG_CLSTATS disabled */
1188 	n = nn = pfr_table_count(filter, flags);
1189 	if (n < 0)
1190 		return (ENOENT);
1191 	if (n > *size) {
1192 		*size = n;
1193 		return (0);
1194 	}
1195 	SLIST_INIT(&workq);
1196 	if (flags & PFR_FLAG_ATOMIC)
1197 		s = splsoftnet();
1198 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1199 		if (pfr_skip_table(filter, p, flags))
1200 			continue;
1201 		if (n-- <= 0)
1202 			continue;
1203 		if (!(flags & PFR_FLAG_ATOMIC))
1204 			s = splsoftnet();
1205 		if (copyout(&p->pfrkt_ts, tbl++, sizeof(*tbl))) {
1206 			splx(s);
1207 			return (EFAULT);
1208 		}
1209 		if (!(flags & PFR_FLAG_ATOMIC))
1210 			splx(s);
1211 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1212 	}
1213 	if (flags & PFR_FLAG_CLSTATS)
1214 		pfr_clstats_ktables(&workq, tzero,
1215 		    flags & PFR_FLAG_ADDRSTOO);
1216 	if (flags & PFR_FLAG_ATOMIC)
1217 		splx(s);
1218 	if (n) {
1219 		printf("pfr_get_tstats: corruption detected (%d).\n", n);
1220 		return (ENOTTY);
1221 	}
1222 	*size = nn;
1223 	return (0);
1224 }
1225 
1226 int
1227 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1228 {
1229 	struct pfr_ktableworkq	 workq;
1230 	struct pfr_ktable	*p, key;
1231 	int			 i, s, xzero = 0;
1232 	long			 tzero = time.tv_sec;
1233 
1234 	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ADDRSTOO);
1235 	SLIST_INIT(&workq);
1236 	for (i = 0; i < size; i++) {
1237 		if (copyin(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1238 			return (EFAULT);
1239 		if (pfr_validate_table(&key.pfrkt_t, 0))
1240 			return (EINVAL);
1241 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1242 		if (p != NULL) {
1243 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1244 			xzero++;
1245 		}
1246 	}
1247 	if (!(flags & PFR_FLAG_DUMMY)) {
1248 		if (flags & PFR_FLAG_ATOMIC)
1249 			s = splsoftnet();
1250 		pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1251 		if (flags & PFR_FLAG_ATOMIC)
1252 			splx(s);
1253 	}
1254 	if (nzero != NULL)
1255 		*nzero = xzero;
1256 	return (0);
1257 }
1258 
1259 int
1260 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1261 	int *nchange, int *ndel, int flags)
1262 {
1263 	struct pfr_ktableworkq	 workq;
1264 	struct pfr_ktable	*p, *q, key;
1265 	int			 i, s, xchange = 0, xdel = 0;
1266 
1267 	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1268 	if ((setflag & ~PFR_TFLAG_USRMASK) ||
1269 	    (clrflag & ~PFR_TFLAG_USRMASK) ||
1270 	    (setflag & clrflag))
1271 		return (EINVAL);
1272 	SLIST_INIT(&workq);
1273 	for (i = 0; i < size; i++) {
1274 		if (copyin(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1275 			return (EFAULT);
1276 		if (pfr_validate_table(&key.pfrkt_t, 0))
1277 			return (EINVAL);
1278 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1279 		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1280 			p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1281 			    ~clrflag;
1282 			if (p->pfrkt_nflags == p->pfrkt_flags)
1283 				goto _skip;
1284 			SLIST_FOREACH(q, &workq, pfrkt_workq)
1285 				if (!pfr_ktable_compare(p, q))
1286 					goto _skip;
1287 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1288 			if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1289 			    (clrflag & PFR_TFLAG_PERSIST) &&
1290 			    !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1291 				xdel++;
1292 			else
1293 				xchange++;
1294 		}
1295 _skip:
1296 	;
1297 	}
1298 	if (!(flags & PFR_FLAG_DUMMY)) {
1299 		if (flags & PFR_FLAG_ATOMIC)
1300 			s = splsoftnet();
1301 		pfr_setflags_ktables(&workq);
1302 		if (flags & PFR_FLAG_ATOMIC)
1303 			splx(s);
1304 	}
1305 	if (nchange != NULL)
1306 		*nchange = xchange;
1307 	if (ndel != NULL)
1308 		*ndel = xdel;
1309 	return (0);
1310 }
1311 
1312 int
1313 pfr_ina_begin(int *ticket, int *ndel, int flags)
1314 {
1315 	struct pfr_ktableworkq	 workq;
1316 	struct pfr_ktable	*p;
1317 	int			 xdel = 0;
1318 
1319 	ACCEPT_FLAGS(PFR_FLAG_DUMMY);
1320 	SLIST_INIT(&workq);
1321 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1322 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE))
1323 			continue;
1324 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1325 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1326 		xdel++;
1327 	}
1328 	if (!(flags & PFR_FLAG_DUMMY))
1329 		pfr_setflags_ktables(&workq);
1330 	if (ndel != NULL)
1331 		*ndel = xdel;
1332 	if (ticket != NULL && !(flags & PFR_FLAG_DUMMY))
1333 		*ticket = ++pfr_ticket;
1334 	return (0);
1335 }
1336 
1337 int
1338 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1339     int *nadd, int *naddr, int ticket, int flags)
1340 {
1341 	struct pfr_ktableworkq	 tableq;
1342 	struct pfr_kentryworkq	 addrq;
1343 	struct pfr_ktable	*kt, *rt, *shadow, key;
1344 	struct pfr_kentry	*p;
1345 	struct pfr_addr		 ad;
1346 	int			 i, rv, xadd = 0, xaddr = 0;
1347 
1348 	ACCEPT_FLAGS(PFR_FLAG_DUMMY|PFR_FLAG_ADDRSTOO);
1349 	if (ticket != pfr_ticket)
1350 		return (EBUSY);
1351 	if (size && !(flags & PFR_FLAG_ADDRSTOO))
1352 		return (EINVAL);
1353 	if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK))
1354 		return (EINVAL);
1355 	tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1356 	SLIST_INIT(&tableq);
1357 	kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1358 	if (kt == NULL) {
1359 		kt = pfr_create_ktable(tbl, 0, 1);
1360 		if (kt == NULL)
1361 			return (ENOMEM);
1362 		SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1363 		xadd++;
1364 		if (!tbl->pfrt_anchor[0])
1365 			goto _skip;
1366 
1367 		/* find or create root table */
1368 		bzero(&key, sizeof(key));
1369 		strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1370 		rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1371 		if (rt != NULL) {
1372 			kt->pfrkt_root = rt;
1373 			goto _skip;
1374 		}
1375 		rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1376 		if (rt == NULL) {
1377 			pfr_destroy_ktables(&tableq, 0);
1378 			return (ENOMEM);
1379 		}
1380 		SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1381 		kt->pfrkt_root = rt;
1382 	} else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1383 		xadd++;
1384 _skip:
1385 	shadow = pfr_create_ktable(tbl, 0, 0);
1386 	if (shadow == NULL) {
1387 		pfr_destroy_ktables(&tableq, 0);
1388 		return (ENOMEM);
1389 	}
1390 	SLIST_INIT(&addrq);
1391 	for (i = 0; i < size; i++) {
1392 		if (copyin(addr+i, &ad, sizeof(ad)))
1393 			senderr(EFAULT);
1394 		if (pfr_validate_addr(&ad))
1395 			senderr(EINVAL);
1396 		if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
1397 			continue;
1398 		p = pfr_create_kentry(&ad);
1399 		if (p == NULL)
1400 			senderr(ENOMEM);
1401 		if (pfr_route_kentry(shadow, p)) {
1402 			pfr_destroy_kentry(p);
1403 			continue;
1404 		}
1405 		SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1406 		xaddr++;
1407 	}
1408 	if (!(flags & PFR_FLAG_DUMMY)) {
1409 		if (kt->pfrkt_shadow != NULL)
1410 			pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1411 		kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1412 		pfr_insert_ktables(&tableq);
1413 		shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1414 		    xaddr : NO_ADDRESSES;
1415 		kt->pfrkt_shadow = shadow;
1416 	} else {
1417 		pfr_clean_node_mask(shadow, &addrq);
1418 		pfr_destroy_ktable(shadow, 0);
1419 		pfr_destroy_ktables(&tableq, 0);
1420 		pfr_destroy_kentries(&addrq);
1421 	}
1422 	if (nadd != NULL)
1423 		*nadd = xadd;
1424 	if (naddr != NULL)
1425 		*naddr = xaddr;
1426 	return (0);
1427 _bad:
1428 	pfr_destroy_ktable(shadow, 0);
1429 	pfr_destroy_ktables(&tableq, 0);
1430 	pfr_destroy_kentries(&addrq);
1431 	return (rv);
1432 }
1433 
1434 int
1435 pfr_ina_commit(int ticket, int *nadd, int *nchange, int flags)
1436 {
1437 	struct pfr_ktable	*p;
1438 	struct pfr_ktableworkq	 workq;
1439 	int			 s, xadd = 0, xchange = 0;
1440 	long			 tzero = time.tv_sec;
1441 
1442 	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1443 	if (ticket != pfr_ticket)
1444 		return (EBUSY);
1445 	pfr_ticket++;
1446 
1447 	SLIST_INIT(&workq);
1448 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1449 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE))
1450 			continue;
1451 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1452 		if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1453 			xchange++;
1454 		else
1455 			xadd++;
1456 	}
1457 
1458 	if (!(flags & PFR_FLAG_DUMMY)) {
1459 		if (flags & PFR_FLAG_ATOMIC)
1460 			s = splsoftnet();
1461 		SLIST_FOREACH(p, &workq, pfrkt_workq)
1462 			pfr_commit_ktable(p, tzero);
1463 		if (flags & PFR_FLAG_ATOMIC)
1464 			splx(s);
1465 	}
1466 	if (nadd != NULL)
1467 		*nadd = xadd;
1468 	if (nchange != NULL)
1469 		*nchange = xchange;
1470 
1471 	return (0);
1472 }
1473 
1474 void
1475 pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1476 {
1477 	struct pfr_ktable	*shadow = kt->pfrkt_shadow;
1478 	int			 nflags;
1479 
1480 	if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1481 		if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1482 			pfr_clstats_ktable(kt, tzero, 1);
1483 	} else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1484 		/* kt might contain addresses */
1485 		struct pfr_kentryworkq	 addrq, addq, changeq, delq, garbageq;
1486 		struct pfr_kentry	*p, *q, *next;
1487 		struct pfr_addr		 ad;
1488 
1489 		pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1490 		pfr_mark_addrs(kt);
1491 		SLIST_INIT(&addq);
1492 		SLIST_INIT(&changeq);
1493 		SLIST_INIT(&delq);
1494 		SLIST_INIT(&garbageq);
1495 		pfr_clean_node_mask(shadow, &addrq);
1496 		for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1497 			next = SLIST_NEXT(p, pfrke_workq);	/* XXX */
1498 			pfr_copyout_addr(&ad, p);
1499 			q = pfr_lookup_addr(kt, &ad, 1);
1500 			if (q != NULL) {
1501 				if (q->pfrke_not != p->pfrke_not)
1502 					SLIST_INSERT_HEAD(&changeq, q,
1503 					    pfrke_workq);
1504 				q->pfrke_mark = 1;
1505 				SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1506 			} else {
1507 				p->pfrke_tzero = tzero;
1508 				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1509 			}
1510 		}
1511 		pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1512 		pfr_insert_kentries(kt, &addq, tzero);
1513 		pfr_remove_kentries(kt, &delq);
1514 		pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1515 		pfr_destroy_kentries(&garbageq);
1516 	} else {
1517 		/* kt cannot contain addresses */
1518 		SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1519 		    shadow->pfrkt_ip4);
1520 		SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1521 		    shadow->pfrkt_ip6);
1522 		SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1523 		pfr_clstats_ktable(kt, tzero, 1);
1524 	}
1525 	nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1526 	    (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1527 		& ~PFR_TFLAG_INACTIVE;
1528 	pfr_destroy_ktable(shadow, 0);
1529 	kt->pfrkt_shadow = NULL;
1530 	pfr_setflags_ktable(kt, nflags);
1531 }
1532 
1533 int
1534 pfr_validate_table(struct pfr_table *tbl, int allowedflags)
1535 {
1536 	int i;
1537 
1538 	if (!tbl->pfrt_name[0])
1539 		return (-1);
1540 	if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1541 		return (-1);
1542 	for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1543 		if (tbl->pfrt_name[i])
1544 			return (-1);
1545 	if (tbl->pfrt_flags & ~allowedflags)
1546 		return (-1);
1547 	return (0);
1548 }
1549 
1550 int
1551 pfr_table_count(struct pfr_table *filter, int flags)
1552 {
1553 	struct pf_ruleset *rs;
1554 	struct pf_anchor *ac;
1555 
1556 	if (flags & PFR_FLAG_ALLRSETS)
1557 		return (pfr_ktable_cnt);
1558 	if (filter->pfrt_ruleset[0]) {
1559 		rs = pf_find_ruleset(filter->pfrt_anchor,
1560 		    filter->pfrt_ruleset);
1561 		return ((rs != NULL) ? rs->tables : -1);
1562 	}
1563 	if (filter->pfrt_anchor[0]) {
1564 		ac = pf_find_anchor(filter->pfrt_anchor);
1565 		return ((ac != NULL) ? ac->tables : -1);
1566 	}
1567 	return (pf_main_ruleset.tables);
1568 }
1569 
1570 int
1571 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1572 {
1573 	if (flags & PFR_FLAG_ALLRSETS)
1574 		return (0);
1575 	if (strncmp(filter->pfrt_anchor, kt->pfrkt_anchor,
1576 	    PF_ANCHOR_NAME_SIZE))
1577 		return (1);
1578 	if (!filter->pfrt_ruleset[0])
1579 		return (0);
1580 	if (strncmp(filter->pfrt_ruleset, kt->pfrkt_ruleset,
1581 	    PF_RULESET_NAME_SIZE))
1582 		return (1);
1583 	return (0);
1584 }
1585 
1586 void
1587 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1588 {
1589 	struct pfr_ktable	*p;
1590 
1591 	SLIST_FOREACH(p, workq, pfrkt_workq)
1592 		pfr_insert_ktable(p);
1593 }
1594 
1595 void
1596 pfr_insert_ktable(struct pfr_ktable *kt)
1597 {
1598 	RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1599 	pfr_ktable_cnt++;
1600 	if (kt->pfrkt_root != NULL)
1601 		if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1602 			pfr_setflags_ktable(kt->pfrkt_root,
1603 			    kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1604 }
1605 
1606 void
1607 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1608 {
1609 	struct pfr_ktable	*p;
1610 
1611 	SLIST_FOREACH(p, workq, pfrkt_workq)
1612 		pfr_setflags_ktable(p, p->pfrkt_nflags);
1613 }
1614 
1615 void
1616 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1617 {
1618 	struct pfr_kentryworkq	addrq;
1619 
1620 	if (!(newf & PFR_TFLAG_REFERENCED) &&
1621 	    !(newf & PFR_TFLAG_PERSIST))
1622 		newf &= ~PFR_TFLAG_ACTIVE;
1623 	if (!(newf & PFR_TFLAG_ACTIVE))
1624 		newf &= ~PFR_TFLAG_USRMASK;
1625 	if (!(newf & PFR_TFLAG_SETMASK)) {
1626 		RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1627 		if (kt->pfrkt_root != NULL)
1628 			if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1629 				pfr_setflags_ktable(kt->pfrkt_root,
1630 				    kt->pfrkt_root->pfrkt_flags &
1631 					~PFR_TFLAG_REFDANCHOR);
1632 		pfr_destroy_ktable(kt, 1);
1633 		pfr_ktable_cnt--;
1634 		return;
1635 	}
1636 	if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1637 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1638 		pfr_remove_kentries(kt, &addrq);
1639 	}
1640 	if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1641 		pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1642 		kt->pfrkt_shadow = NULL;
1643 	}
1644 	kt->pfrkt_flags = newf;
1645 }
1646 
1647 void
1648 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1649 {
1650 	struct pfr_ktable	*p;
1651 
1652 	SLIST_FOREACH(p, workq, pfrkt_workq)
1653 		pfr_clstats_ktable(p, tzero, recurse);
1654 }
1655 
1656 void
1657 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1658 {
1659 	struct pfr_kentryworkq	 addrq;
1660 	int			 s;
1661 
1662 	if (recurse) {
1663 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1664 		pfr_clstats_kentries(&addrq, tzero, 0);
1665 	}
1666 	s = splsoftnet();
1667 	bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1668 	bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1669 	kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1670 	splx(s);
1671 	kt->pfrkt_tzero = tzero;
1672 }
1673 
1674 struct pfr_ktable *
1675 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1676 {
1677 	struct pfr_ktable	*kt;
1678 	struct pf_ruleset	*rs;
1679 
1680 	kt = pool_get(&pfr_ktable_pl, PR_NOWAIT);
1681 	if (kt == NULL)
1682 		return (NULL);
1683 	bzero(kt, sizeof(*kt));
1684 	kt->pfrkt_t = *tbl;
1685 
1686 	if (attachruleset) {
1687 		rs = pf_find_or_create_ruleset(tbl->pfrt_anchor,
1688 		    tbl->pfrt_ruleset);
1689 		if (!rs) {
1690 			pfr_destroy_ktable(kt, 0);
1691 			return (NULL);
1692 		}
1693 		kt->pfrkt_rs = rs;
1694 		rs->tables++;
1695 		if (rs->anchor != NULL)
1696 			rs->anchor->tables++;
1697 	}
1698 
1699 	if (!rn_inithead((void **)&kt->pfrkt_ip4,
1700 	    offsetof(struct sockaddr_in, sin_addr) * 8) ||
1701 	    !rn_inithead((void **)&kt->pfrkt_ip6,
1702 	    offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1703 		pfr_destroy_ktable(kt, 0);
1704 		return (NULL);
1705 	}
1706 	kt->pfrkt_tzero = tzero;
1707 
1708 	return (kt);
1709 }
1710 
1711 void
1712 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1713 {
1714 	struct pfr_ktable	*p, *q;
1715 
1716 	for (p = SLIST_FIRST(workq); p; p = q) {
1717 		q = SLIST_NEXT(p, pfrkt_workq);
1718 		pfr_destroy_ktable(p, flushaddr);
1719 	}
1720 }
1721 
1722 void
1723 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1724 {
1725 	struct pfr_kentryworkq	 addrq;
1726 
1727 	if (flushaddr) {
1728 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1729 		pfr_clean_node_mask(kt, &addrq);
1730 		pfr_destroy_kentries(&addrq);
1731 	}
1732 	if (kt->pfrkt_ip4 != NULL)
1733 		free((caddr_t)kt->pfrkt_ip4, M_RTABLE);
1734 	if (kt->pfrkt_ip6 != NULL)
1735 		free((caddr_t)kt->pfrkt_ip6, M_RTABLE);
1736 	if (kt->pfrkt_shadow != NULL)
1737 		pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1738 	if (kt->pfrkt_rs != NULL) {
1739 		kt->pfrkt_rs->tables--;
1740 		if (kt->pfrkt_rs->anchor != NULL)
1741 			kt->pfrkt_rs->anchor->tables--;
1742 		pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1743 	}
1744 	pool_put(&pfr_ktable_pl, kt);
1745 }
1746 
1747 int
1748 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1749 {
1750 	int d;
1751 
1752 	if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
1753 		return (d);
1754 	if ((d = strncmp(p->pfrkt_anchor, q->pfrkt_anchor,
1755 	    PF_ANCHOR_NAME_SIZE)))
1756 		return (d);
1757 	return strncmp(p->pfrkt_ruleset, q->pfrkt_ruleset,
1758 	    PF_RULESET_NAME_SIZE);
1759 }
1760 
1761 struct pfr_ktable *
1762 pfr_lookup_table(struct pfr_table *tbl)
1763 {
1764 	/* struct pfr_ktable start like a struct pfr_table */
1765 	return RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1766 }
1767 
1768 int
1769 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
1770 {
1771 	struct pfr_kentry	*ke = NULL;
1772 	int			 match;
1773 
1774 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1775 		kt = kt->pfrkt_root;
1776 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1777 		return 0;
1778 
1779 	switch (af) {
1780 	case AF_INET:
1781 		pfr_sin.sin_addr.s_addr = a->addr32[0];
1782 		ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
1783 		if (ke && KENTRY_RNF_ROOT(ke))
1784 			ke = NULL;
1785 		break;
1786 	case AF_INET6:
1787 		bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
1788 		ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
1789 		if (ke && KENTRY_RNF_ROOT(ke))
1790 			ke = NULL;
1791 		break;
1792 	}
1793 	match = (ke && !ke->pfrke_not);
1794 	if (match)
1795 		kt->pfrkt_match++;
1796 	else
1797 		kt->pfrkt_nomatch++;
1798 	return (match);
1799 }
1800 
1801 void
1802 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
1803     u_int64_t len, int dir_out, int op_pass, int notrule)
1804 {
1805 	struct pfr_kentry	*ke = NULL;
1806 
1807 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1808 		kt = kt->pfrkt_root;
1809 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1810 		return;
1811 
1812 	switch (af) {
1813 	case AF_INET:
1814 		pfr_sin.sin_addr.s_addr = a->addr32[0];
1815 		ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
1816 		if (ke && KENTRY_RNF_ROOT(ke))
1817 			ke = NULL;
1818 		break;
1819 	case AF_INET6:
1820 		bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
1821 		ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
1822 		if (ke && KENTRY_RNF_ROOT(ke))
1823 			ke = NULL;
1824 		break;
1825 	}
1826 	if ((ke == NULL || ke->pfrke_not) != notrule) {
1827 		if (op_pass != PFR_OP_PASS)
1828 			printf("pfr_update_stats: assertion failed.\n");
1829 		op_pass = PFR_OP_XPASS;
1830 	}
1831 	kt->pfrkt_packets[dir_out][op_pass]++;
1832 	kt->pfrkt_bytes[dir_out][op_pass] += len;
1833 	if (ke != NULL && op_pass != PFR_OP_XPASS) {
1834 		ke->pfrke_packets[dir_out][op_pass]++;
1835 		ke->pfrke_bytes[dir_out][op_pass] += len;
1836 	}
1837 }
1838 
1839 struct pfr_ktable *
1840 pfr_attach_table(struct pf_ruleset *rs, char *name)
1841 {
1842 	struct pfr_ktable	*kt, *rt;
1843 	struct pfr_table	 tbl;
1844 	struct pf_anchor	*ac = rs->anchor;
1845 
1846 	bzero(&tbl, sizeof(tbl));
1847 	strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
1848 	if (ac != NULL) {
1849 		strlcpy(tbl.pfrt_anchor, ac->name, sizeof(tbl.pfrt_anchor));
1850 		strlcpy(tbl.pfrt_ruleset, rs->name, sizeof(tbl.pfrt_ruleset));
1851 	}
1852 	kt = pfr_lookup_table(&tbl);
1853 	if (kt == NULL) {
1854 		kt = pfr_create_ktable(&tbl, time.tv_sec, 1);
1855 		if (kt == NULL)
1856 			return (NULL);
1857 		if (ac != NULL) {
1858 			bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
1859 			bzero(tbl.pfrt_ruleset, sizeof(tbl.pfrt_ruleset));
1860 			rt = pfr_lookup_table(&tbl);
1861 			if (rt == NULL) {
1862 				rt = pfr_create_ktable(&tbl, 0, 1);
1863 				if (rt == NULL) {
1864 					pfr_destroy_ktable(kt, 0);
1865 					return (NULL);
1866 				}
1867 				pfr_insert_ktable(rt);
1868 			}
1869 			kt->pfrkt_root = rt;
1870 		}
1871 		pfr_insert_ktable(kt);
1872 	}
1873 	if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
1874 		pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
1875 	return kt;
1876 }
1877 
1878 void
1879 pfr_detach_table(struct pfr_ktable *kt)
1880 {
1881 	if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
1882 		printf("pfr_detach_table: refcount = %d.\n",
1883 		    kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
1884 	else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
1885 		pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
1886 }
1887