xref: /dragonfly/sys/net/pf/pf_ioctl.c (revision 71126e33)
1 /*	$FreeBSD: src/sys/contrib/pf/net/pf_ioctl.c,v 1.12 2004/08/12 14:15:42 mlaier Exp $	*/
2 /*	$OpenBSD: pf_ioctl.c,v 1.112.2.2 2004/07/24 18:28:12 brad Exp $ */
3 /*	$DragonFly: src/sys/net/pf/pf_ioctl.c,v 1.3 2004/09/21 21:20:58 joerg Exp $ */
4 
5 /*
6  * Copyright (c) 2004 The DragonFly Project.  All rights reserved.
7  *
8  * Copyright (c) 2001 Daniel Hartmeier
9  * Copyright (c) 2002,2003 Henning Brauer
10  * All rights reserved.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  *
16  *    - Redistributions of source code must retain the above copyright
17  *      notice, this list of conditions and the following disclaimer.
18  *    - Redistributions in binary form must reproduce the above
19  *      copyright notice, this list of conditions and the following
20  *      disclaimer in the documentation and/or other materials provided
21  *      with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
31  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
33  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Effort sponsored in part by the Defense Advanced Research Projects
37  * Agency (DARPA) and Air Force Research Laboratory, Air Force
38  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
39  *
40  */
41 
42 #include "opt_inet.h"
43 #include "opt_inet6.h"
44 #include "use_pfsync.h"
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/mbuf.h>
49 #include <sys/filio.h>
50 #include <sys/fcntl.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53 #include <sys/kernel.h>
54 #include <sys/time.h>
55 #include <sys/malloc.h>
56 #include <sys/module.h>
57 #include <sys/conf.h>
58 #include <vm/vm_zone.h>
59 
60 #include <net/if.h>
61 #include <net/if_types.h>
62 #include <net/route.h>
63 
64 #include <netinet/in.h>
65 #include <netinet/in_var.h>
66 #include <netinet/in_systm.h>
67 #include <netinet/ip.h>
68 #include <netinet/ip_var.h>
69 #include <netinet/ip_icmp.h>
70 
71 #include <net/pf/pfvar.h>
72 
73 #if NPFSYNC > 0
74 #include <net/pf/if_pfsync.h>
75 #endif /* NPFSYNC > 0 */
76 
77 #ifdef INET6
78 #include <netinet/ip6.h>
79 #include <netinet/in_pcb.h>
80 #endif /* INET6 */
81 
82 #ifdef ALTQ
83 #include <altq/altq.h>
84 #endif
85 
86 #include <machine/limits.h>
87 #include <net/pfil.h>
88 void			 init_zone_var(void);
89 void			 cleanup_pf_zone(void);
90 int			 pfattach(void);
91 int			 pfopen(dev_t, int, int, struct thread *);
92 int			 pfclose(dev_t, int, int, struct thread *);
93 struct pf_pool		*pf_get_pool(char *, char *, u_int32_t,
94 			    u_int8_t, u_int32_t, u_int8_t, u_int8_t, u_int8_t);
95 int			 pf_get_ruleset_number(u_int8_t);
96 void			 pf_init_ruleset(struct pf_ruleset *);
97 void			 pf_mv_pool(struct pf_palist *, struct pf_palist *);
98 void			 pf_empty_pool(struct pf_palist *);
99 int			 pfioctl(dev_t, u_long, caddr_t, int, struct thread *);
100 #ifdef ALTQ
101 int			 pf_begin_altq(u_int32_t *);
102 int			 pf_rollback_altq(u_int32_t);
103 int			 pf_commit_altq(u_int32_t);
104 #endif /* ALTQ */
105 int			 pf_begin_rules(u_int32_t *, int, char *, char *);
106 int			 pf_rollback_rules(u_int32_t, int, char *, char *);
107 int			 pf_commit_rules(u_int32_t, int, char *, char *);
108 
109 extern struct callout	 pf_expire_to;
110 
111 struct pf_rule		 pf_default_rule;
112 
113 #define	TAGID_MAX	 50000
114 TAILQ_HEAD(pf_tags, pf_tagname)	pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
115 				pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
116 
117 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
118 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
119 #endif
120 static u_int16_t	 tagname2tag(struct pf_tags *, char *);
121 static void		 tag2tagname(struct pf_tags *, u_int16_t, char *);
122 static void		 tag_unref(struct pf_tags *, u_int16_t);
123 
124 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
125 
126 static dev_t	pf_dev;
127 
128 /*
129  * XXX - These are new and need to be checked when moveing to a new version
130  */
131 static void		 pf_clear_states(void);
132 static int		 pf_clear_tables(void);
133 static void		 pf_clear_srcnodes(void);
134 /*
135  * XXX - These are new and need to be checked when moveing to a new version
136  */
137 
138 /*
139  * Wrapper functions for pfil(9) hooks
140  */
141 static int pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp,
142 		int dir);
143 static int pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp,
144 		int dir);
145 #ifdef INET6
146 static int pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp,
147 		int dir);
148 static int pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp,
149 		int dir);
150 #endif
151 
152 static int 		 hook_pf(void);
153 static int 		 dehook_pf(void);
154 static int 		 shutdown_pf(void);
155 static int 		 pf_load(void);
156 static int 		 pf_unload(void);
157 
158 static struct cdevsw pf_cdevsw = {	    /* XXX convert to port model */
159 	.d_name =	PF_NAME,
160 	.d_maj =	73,		    /* XXX */
161 	.old_open =	pfopen,
162 	.old_close =	pfclose,
163 	.old_ioctl =	pfioctl
164 };
165 
166 static volatile int pf_pfil_hooked = 0;
167 
168 void
169 init_zone_var(void)
170 {
171 	pf_src_tree_pl = pf_rule_pl = NULL;
172 	pf_state_pl = pf_altq_pl = pf_pooladdr_pl = NULL;
173 	pf_frent_pl = pf_frag_pl = pf_cache_pl = pf_cent_pl = NULL;
174 	pf_state_scrub_pl = NULL;
175 	pfr_ktable_pl = pfr_kentry_pl = NULL;
176 }
177 
178 void
179 cleanup_pf_zone(void)
180 {
181 	ZONE_DESTROY(pf_src_tree_pl);
182 	ZONE_DESTROY(pf_rule_pl);
183 	ZONE_DESTROY(pf_state_pl);
184 	ZONE_DESTROY(pf_altq_pl);
185 	ZONE_DESTROY(pf_pooladdr_pl);
186 	ZONE_DESTROY(pf_frent_pl);
187 	ZONE_DESTROY(pf_frag_pl);
188 	ZONE_DESTROY(pf_cache_pl);
189 	ZONE_DESTROY(pf_cent_pl);
190 	ZONE_DESTROY(pfr_ktable_pl);
191 	ZONE_DESTROY(pfr_kentry_pl);
192 	ZONE_DESTROY(pf_state_scrub_pl);
193 	ZONE_DESTROY(pfi_addr_pl);
194 }
195 
196 int
197 pfattach(void)
198 {
199 	u_int32_t *my_timeout = pf_default_rule.timeout;
200 	int error = 1;
201 
202 	do {
203 		ZONE_CREATE(pf_src_tree_pl,struct pf_src_node, "pfsrctrpl");
204 		ZONE_CREATE(pf_rule_pl,    struct pf_rule, "pfrulepl");
205 		ZONE_CREATE(pf_state_pl,   struct pf_state, "pfstatepl");
206 		ZONE_CREATE(pf_altq_pl,    struct pf_altq, "pfaltqpl");
207 		ZONE_CREATE(pf_pooladdr_pl,struct pf_pooladdr, "pfpooladdrpl");
208 		ZONE_CREATE(pfr_ktable_pl, struct pfr_ktable, "pfrktable");
209 		ZONE_CREATE(pfr_kentry_pl, struct pfr_kentry, "pfrkentry");
210 		ZONE_CREATE(pf_frent_pl,   struct pf_frent, "pffrent");
211 		ZONE_CREATE(pf_frag_pl,    struct pf_fragment, "pffrag");
212 		ZONE_CREATE(pf_cache_pl,   struct pf_fragment, "pffrcache");
213 		ZONE_CREATE(pf_cent_pl,    struct pf_frcache, "pffrcent");
214 		ZONE_CREATE(pf_state_scrub_pl, struct pf_state_scrub,
215 		    "pfstatescrub");
216 		ZONE_CREATE(pfi_addr_pl,   struct pfi_dynaddr, "pfiaddrpl");
217 		error = 0;
218 	} while(0);
219 	if (error) {
220 		cleanup_pf_zone();
221 		return (error);
222 	}
223 	pfr_initialize();
224 	pfi_initialize();
225 	error = pf_osfp_initialize();
226 	if (error) {
227 		cleanup_pf_zone();
228 		pf_osfp_cleanup();
229 		return (error);
230 	}
231 
232 	pf_pool_limits[PF_LIMIT_STATES].pp = pf_state_pl;
233 	pf_pool_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
234 	pf_pool_limits[PF_LIMIT_FRAGS].pp = pf_frent_pl;
235 	pf_pool_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT;
236 	/* XXX uma_zone_set_max(pf_pool_limits[PF_LIMIT_STATES].pp,
237 		pf_pool_limits[PF_LIMIT_STATES].limit);
238 	*/
239 
240 	RB_INIT(&tree_src_tracking);
241 	TAILQ_INIT(&pf_anchors);
242 	pf_init_ruleset(&pf_main_ruleset);
243 	TAILQ_INIT(&pf_altqs[0]);
244 	TAILQ_INIT(&pf_altqs[1]);
245 	TAILQ_INIT(&pf_pabuf);
246 	pf_altqs_active = &pf_altqs[0];
247 	pf_altqs_inactive = &pf_altqs[1];
248 	TAILQ_INIT(&state_updates);
249 
250 	/* default rule should never be garbage collected */
251 	pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
252 	pf_default_rule.action = PF_PASS;
253 	pf_default_rule.nr = (uint32_t)(-1);
254 
255 	/* initialize default timeouts */
256 	my_timeout[PFTM_TCP_FIRST_PACKET] = 120;	/* First TCP packet */
257 	my_timeout[PFTM_TCP_OPENING] = 30; 		/* No response yet */
258 	my_timeout[PFTM_TCP_ESTABLISHED] = 24*60*60;	/* Established */
259 	my_timeout[PFTM_TCP_CLOSING] = 15 * 60;		/* Half closed */
260 	my_timeout[PFTM_TCP_FIN_WAIT] = 45;		/* Got both FINs */
261 	my_timeout[PFTM_TCP_CLOSED] = 90;		/* Got a RST */
262 	my_timeout[PFTM_UDP_FIRST_PACKET] = 60;		/* First UDP packet */
263 	my_timeout[PFTM_UDP_SINGLE] = 30;		/* Unidirectional */
264 	my_timeout[PFTM_UDP_MULTIPLE] = 60;		/* Bidirectional */
265 	my_timeout[PFTM_ICMP_FIRST_PACKET] = 20;	/* First ICMP packet */
266 	my_timeout[PFTM_ICMP_ERROR_REPLY] = 10;		/* Got error response */
267 	my_timeout[PFTM_OTHER_FIRST_PACKET] = 60;	/* First packet */
268 	my_timeout[PFTM_OTHER_SINGLE] = 30;		/* Unidirectional */
269 	my_timeout[PFTM_OTHER_MULTIPLE] = 60;		/* Bidirectional */
270 	my_timeout[PFTM_FRAG] = 30;			/* Fragment expire */
271 	my_timeout[PFTM_INTERVAL] = 10;			/* Expire interval */
272 
273 	callout_init(&pf_expire_to);
274 	callout_reset(&pf_expire_to, my_timeout[PFTM_INTERVAL] * hz,
275 	    pf_purge_timeout, &pf_expire_to);
276 
277 	pf_normalize_init();
278 	bzero(&pf_status, sizeof(pf_status));
279 	pf_status.debug = PF_DEBUG_URGENT;
280 	pf_pfil_hooked = 0;
281 
282 	/* XXX do our best to avoid a conflict */
283 	pf_status.hostid = arc4random();
284 
285 	return (error);
286 }
287 
288 int
289 pfopen(dev_t dev, int flags, int devtype, struct thread *td)
290 {
291 	if (minor(dev) >= 1)
292 		return (ENXIO);
293 	return (0);
294 }
295 
296 int
297 pfclose(dev_t dev, int flags, int fmt, struct thread *td)
298 {
299 	if (minor(dev) >= 1)
300 		return (ENXIO);
301 	return (0);
302 }
303 
304 struct pf_pool *
305 pf_get_pool(char *anchorname, char *rulesetname, u_int32_t ticket,
306     u_int8_t rule_action, u_int32_t rule_number, u_int8_t r_last,
307     u_int8_t active, u_int8_t check_ticket)
308 {
309 	struct pf_ruleset	*ruleset;
310 	struct pf_rule		*rule;
311 	int			 rs_num;
312 
313 	ruleset = pf_find_ruleset(anchorname, rulesetname);
314 	if (ruleset == NULL)
315 		return (NULL);
316 	rs_num = pf_get_ruleset_number(rule_action);
317 	if (rs_num >= PF_RULESET_MAX)
318 		return (NULL);
319 	if (active) {
320 		if (check_ticket && ticket !=
321 		    ruleset->rules[rs_num].active.ticket)
322 			return (NULL);
323 		if (r_last)
324 			rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
325 			    pf_rulequeue);
326 		else
327 			rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
328 	} else {
329 		if (check_ticket && ticket !=
330 		    ruleset->rules[rs_num].inactive.ticket)
331 			return (NULL);
332 		if (r_last)
333 			rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
334 			    pf_rulequeue);
335 		else
336 			rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
337 	}
338 	if (!r_last) {
339 		while ((rule != NULL) && (rule->nr != rule_number))
340 			rule = TAILQ_NEXT(rule, entries);
341 	}
342 	if (rule == NULL)
343 		return (NULL);
344 
345 	return (&rule->rpool);
346 }
347 
348 int
349 pf_get_ruleset_number(u_int8_t action)
350 {
351 	switch (action) {
352 	case PF_SCRUB:
353 		return (PF_RULESET_SCRUB);
354 		break;
355 	case PF_PASS:
356 	case PF_DROP:
357 		return (PF_RULESET_FILTER);
358 		break;
359 	case PF_NAT:
360 	case PF_NONAT:
361 		return (PF_RULESET_NAT);
362 		break;
363 	case PF_BINAT:
364 	case PF_NOBINAT:
365 		return (PF_RULESET_BINAT);
366 		break;
367 	case PF_RDR:
368 	case PF_NORDR:
369 		return (PF_RULESET_RDR);
370 		break;
371 	default:
372 		return (PF_RULESET_MAX);
373 		break;
374 	}
375 }
376 
377 void
378 pf_init_ruleset(struct pf_ruleset *ruleset)
379 {
380 	int	i;
381 
382 	memset(ruleset, 0, sizeof(struct pf_ruleset));
383 	for (i = 0; i < PF_RULESET_MAX; i++) {
384 		TAILQ_INIT(&ruleset->rules[i].queues[0]);
385 		TAILQ_INIT(&ruleset->rules[i].queues[1]);
386 		ruleset->rules[i].active.ptr = &ruleset->rules[i].queues[0];
387 		ruleset->rules[i].inactive.ptr = &ruleset->rules[i].queues[1];
388 	}
389 }
390 
391 struct pf_anchor *
392 pf_find_anchor(const char *anchorname)
393 {
394 	struct pf_anchor	*anchor;
395 	int			 n = -1;
396 
397 	anchor = TAILQ_FIRST(&pf_anchors);
398 	while (anchor != NULL && (n = strcmp(anchor->name, anchorname)) < 0)
399 		anchor = TAILQ_NEXT(anchor, entries);
400 	if (n == 0)
401 		return (anchor);
402 	else
403 		return (NULL);
404 }
405 
406 struct pf_ruleset *
407 pf_find_ruleset(char *anchorname, char *rulesetname)
408 {
409 	struct pf_anchor	*anchor;
410 	struct pf_ruleset	*ruleset;
411 
412 	if (!anchorname[0] && !rulesetname[0])
413 		return (&pf_main_ruleset);
414 	if (!anchorname[0] || !rulesetname[0])
415 		return (NULL);
416 	anchorname[PF_ANCHOR_NAME_SIZE-1] = 0;
417 	rulesetname[PF_RULESET_NAME_SIZE-1] = 0;
418 	anchor = pf_find_anchor(anchorname);
419 	if (anchor == NULL)
420 		return (NULL);
421 	ruleset = TAILQ_FIRST(&anchor->rulesets);
422 	while (ruleset != NULL && strcmp(ruleset->name, rulesetname) < 0)
423 		ruleset = TAILQ_NEXT(ruleset, entries);
424 	if (ruleset != NULL && !strcmp(ruleset->name, rulesetname))
425 		return (ruleset);
426 	else
427 		return (NULL);
428 }
429 
430 struct pf_ruleset *
431 pf_find_or_create_ruleset(char anchorname[PF_ANCHOR_NAME_SIZE],
432     char rulesetname[PF_RULESET_NAME_SIZE])
433 {
434 	struct pf_anchor	*anchor, *a;
435 	struct pf_ruleset	*ruleset, *r;
436 
437 	if (!anchorname[0] && !rulesetname[0])
438 		return (&pf_main_ruleset);
439 	if (!anchorname[0] || !rulesetname[0])
440 		return (NULL);
441 	anchorname[PF_ANCHOR_NAME_SIZE-1] = 0;
442 	rulesetname[PF_RULESET_NAME_SIZE-1] = 0;
443 	a = TAILQ_FIRST(&pf_anchors);
444 	while (a != NULL && strcmp(a->name, anchorname) < 0)
445 		a = TAILQ_NEXT(a, entries);
446 	if (a != NULL && !strcmp(a->name, anchorname))
447 		anchor = a;
448 	else {
449 		anchor = (struct pf_anchor *)malloc(sizeof(struct pf_anchor),
450 		    M_TEMP, M_NOWAIT);
451 		if (anchor == NULL)
452 			return (NULL);
453 		memset(anchor, 0, sizeof(struct pf_anchor));
454 		bcopy(anchorname, anchor->name, sizeof(anchor->name));
455 		TAILQ_INIT(&anchor->rulesets);
456 		if (a != NULL)
457 			TAILQ_INSERT_BEFORE(a, anchor, entries);
458 		else
459 			TAILQ_INSERT_TAIL(&pf_anchors, anchor, entries);
460 	}
461 	r = TAILQ_FIRST(&anchor->rulesets);
462 	while (r != NULL && strcmp(r->name, rulesetname) < 0)
463 		r = TAILQ_NEXT(r, entries);
464 	if (r != NULL && !strcmp(r->name, rulesetname))
465 		return (r);
466 	ruleset = (struct pf_ruleset *)malloc(sizeof(struct pf_ruleset),
467 	    M_TEMP, M_NOWAIT);
468 	if (ruleset != NULL) {
469 		pf_init_ruleset(ruleset);
470 		bcopy(rulesetname, ruleset->name, sizeof(ruleset->name));
471 		ruleset->anchor = anchor;
472 		if (r != NULL)
473 			TAILQ_INSERT_BEFORE(r, ruleset, entries);
474 		else
475 			TAILQ_INSERT_TAIL(&anchor->rulesets, ruleset, entries);
476 	}
477 	return (ruleset);
478 }
479 
480 void
481 pf_remove_if_empty_ruleset(struct pf_ruleset *ruleset)
482 {
483 	struct pf_anchor	*anchor;
484 	int			 i;
485 
486 	if (ruleset == NULL || ruleset->anchor == NULL || ruleset->tables > 0 ||
487 	    ruleset->topen)
488 		return;
489 	for (i = 0; i < PF_RULESET_MAX; ++i)
490 		if (!TAILQ_EMPTY(ruleset->rules[i].active.ptr) ||
491 		    !TAILQ_EMPTY(ruleset->rules[i].inactive.ptr) ||
492 		    ruleset->rules[i].inactive.open)
493 			return;
494 
495 	anchor = ruleset->anchor;
496 	TAILQ_REMOVE(&anchor->rulesets, ruleset, entries);
497 	free(ruleset, M_TEMP);
498 
499 	if (TAILQ_EMPTY(&anchor->rulesets)) {
500 		TAILQ_REMOVE(&pf_anchors, anchor, entries);
501 		free(anchor, M_TEMP);
502 		pf_update_anchor_rules();
503 	}
504 }
505 
506 void
507 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
508 {
509 	struct pf_pooladdr	*mv_pool_pa;
510 
511 	while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
512 		TAILQ_REMOVE(poola, mv_pool_pa, entries);
513 		TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
514 	}
515 }
516 
517 void
518 pf_empty_pool(struct pf_palist *poola)
519 {
520 	struct pf_pooladdr	*empty_pool_pa;
521 
522 	while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
523 		pfi_dynaddr_remove(&empty_pool_pa->addr);
524 		pf_tbladdr_remove(&empty_pool_pa->addr);
525 		pfi_detach_rule(empty_pool_pa->kif);
526 		TAILQ_REMOVE(poola, empty_pool_pa, entries);
527 		pool_put(&pf_pooladdr_pl, empty_pool_pa);
528 	}
529 }
530 
531 void
532 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
533 {
534 	if (rulequeue != NULL) {
535 		if (rule->states <= 0) {
536 			/*
537 			 * XXX - we need to remove the table *before* detaching
538 			 * the rule to make sure the table code does not delete
539 			 * the anchor under our feet.
540 			 */
541 			pf_tbladdr_remove(&rule->src.addr);
542 			pf_tbladdr_remove(&rule->dst.addr);
543 		}
544 		TAILQ_REMOVE(rulequeue, rule, entries);
545 		rule->entries.tqe_prev = NULL;
546 		rule->nr = (uint32_t)(-1);
547 	}
548 
549 	if (rule->states > 0 || rule->src_nodes > 0 ||
550 	    rule->entries.tqe_prev != NULL)
551 		return;
552 	pf_tag_unref(rule->tag);
553 	pf_tag_unref(rule->match_tag);
554 #ifdef ALTQ
555 	if (rule->pqid != rule->qid)
556 		pf_qid_unref(rule->pqid);
557 	pf_qid_unref(rule->qid);
558 #endif
559 	pfi_dynaddr_remove(&rule->src.addr);
560 	pfi_dynaddr_remove(&rule->dst.addr);
561 	if (rulequeue == NULL) {
562 		pf_tbladdr_remove(&rule->src.addr);
563 		pf_tbladdr_remove(&rule->dst.addr);
564 	}
565 	pfi_detach_rule(rule->kif);
566 	pf_empty_pool(&rule->rpool.list);
567 	pool_put(&pf_rule_pl, rule);
568 }
569 
570 static	u_int16_t
571 tagname2tag(struct pf_tags *head, char *tagname)
572 {
573 	struct pf_tagname	*tag, *p = NULL;
574 	u_int16_t		 new_tagid = 1;
575 
576 	TAILQ_FOREACH(tag, head, entries)
577 		if (strcmp(tagname, tag->name) == 0) {
578 			tag->ref++;
579 			return (tag->tag);
580 		}
581 
582 	/*
583 	 * to avoid fragmentation, we do a linear search from the beginning
584 	 * and take the first free slot we find. if there is none or the list
585 	 * is empty, append a new entry at the end.
586 	 */
587 
588 	/* new entry */
589 	if (!TAILQ_EMPTY(head))
590 		for (p = TAILQ_FIRST(head); p != NULL &&
591 		    p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
592 			new_tagid = p->tag + 1;
593 
594 	if (new_tagid > TAGID_MAX)
595 		return (0);
596 
597 	/* allocate and fill new struct pf_tagname */
598 	tag = (struct pf_tagname *)malloc(sizeof(struct pf_tagname),
599 	    M_TEMP, M_NOWAIT);
600 	if (tag == NULL)
601 		return (0);
602 	bzero(tag, sizeof(struct pf_tagname));
603 	strlcpy(tag->name, tagname, sizeof(tag->name));
604 	tag->tag = new_tagid;
605 	tag->ref++;
606 
607 	if (p != NULL)	/* insert new entry before p */
608 		TAILQ_INSERT_BEFORE(p, tag, entries);
609 	else	/* either list empty or no free slot in between */
610 		TAILQ_INSERT_TAIL(head, tag, entries);
611 
612 	return (tag->tag);
613 }
614 
615 static	void
616 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
617 {
618 	struct pf_tagname	*tag;
619 
620 	TAILQ_FOREACH(tag, head, entries)
621 		if (tag->tag == tagid) {
622 			strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
623 			return;
624 		}
625 }
626 
627 static	void
628 tag_unref(struct pf_tags *head, u_int16_t tag)
629 {
630 	struct pf_tagname	*p, *next;
631 
632 	if (tag == 0)
633 		return;
634 
635 	for (p = TAILQ_FIRST(head); p != NULL; p = next) {
636 		next = TAILQ_NEXT(p, entries);
637 		if (tag == p->tag) {
638 			if (--p->ref == 0) {
639 				TAILQ_REMOVE(head, p, entries);
640 				free(p, M_TEMP);
641 			}
642 			break;
643 		}
644 	}
645 }
646 
647 u_int16_t
648 pf_tagname2tag(char *tagname)
649 {
650 	return (tagname2tag(&pf_tags, tagname));
651 }
652 
653 void
654 pf_tag2tagname(u_int16_t tagid, char *p)
655 {
656 	return (tag2tagname(&pf_tags, tagid, p));
657 }
658 
659 void
660 pf_tag_unref(u_int16_t tag)
661 {
662 	return (tag_unref(&pf_tags, tag));
663 }
664 
665 #ifdef ALTQ
666 u_int32_t
667 pf_qname2qid(char *qname)
668 {
669 	return ((u_int32_t)tagname2tag(&pf_qids, qname));
670 }
671 
672 void
673 pf_qid2qname(u_int32_t qid, char *p)
674 {
675 	return (tag2tagname(&pf_qids, (u_int16_t)qid, p));
676 }
677 
678 void
679 pf_qid_unref(u_int32_t qid)
680 {
681 	return (tag_unref(&pf_qids, (u_int16_t)qid));
682 }
683 
684 int
685 pf_begin_altq(u_int32_t *ticket)
686 {
687 	struct pf_altq	*altq;
688 	int		 error = 0;
689 
690 	/* Purge the old altq list */
691 	while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
692 		TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
693 		if (altq->qname[0] == 0) {
694 			/* detach and destroy the discipline */
695 			error = altq_remove(altq);
696 		} else
697 			pf_qid_unref(altq->qid);
698 		pool_put(&pf_altq_pl, altq);
699 	}
700 	if (error)
701 		return (error);
702 	*ticket = ++ticket_altqs_inactive;
703 	altqs_inactive_open = 1;
704 	return (0);
705 }
706 
707 int
708 pf_rollback_altq(u_int32_t ticket)
709 {
710 	struct pf_altq	*altq;
711 	int		 error = 0;
712 
713 	if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
714 		return (0);
715 	/* Purge the old altq list */
716 	while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
717 		TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
718 		if (altq->qname[0] == 0) {
719 			/* detach and destroy the discipline */
720 			error = altq_remove(altq);
721 		} else
722 			pf_qid_unref(altq->qid);
723 		pool_put(&pf_altq_pl, altq);
724 	}
725 	altqs_inactive_open = 0;
726 	return (error);
727 }
728 
729 int
730 pf_commit_altq(u_int32_t ticket)
731 {
732 	struct pf_altqqueue	*old_altqs;
733 	struct pf_altq		*altq;
734 	int			 s, err, error = 0;
735 
736 	if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
737 		return (EBUSY);
738 
739 	/* swap altqs, keep the old. */
740 	s = splsoftnet();
741 	old_altqs = pf_altqs_active;
742 	pf_altqs_active = pf_altqs_inactive;
743 	pf_altqs_inactive = old_altqs;
744 	ticket_altqs_active = ticket_altqs_inactive;
745 
746 	/* Attach new disciplines */
747 	TAILQ_FOREACH(altq, pf_altqs_active, entries) {
748 		if (altq->qname[0] == 0) {
749 			/* attach the discipline */
750 			error = altq_pfattach(altq);
751 			if (error) {
752 				splx(s);
753 				return (error);
754 			}
755 		}
756 	}
757 
758 	/* Purge the old altq list */
759 	while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
760 		TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
761 		if (altq->qname[0] == 0) {
762 			/* detach and destroy the discipline */
763 			err = altq_pfdetach(altq);
764 			if (err != 0 && error == 0)
765 				error = err;
766 			err = altq_remove(altq);
767 			if (err != 0 && error == 0)
768 				error = err;
769 		} else
770 			pf_qid_unref(altq->qid);
771 		pool_put(&pf_altq_pl, altq);
772 	}
773 	splx(s);
774 
775 	altqs_inactive_open = 0;
776 	return (error);
777 }
778 #endif /* ALTQ */
779 
780 int
781 pf_begin_rules(u_int32_t *ticket, int rs_num, char *anchor, char *ruleset)
782 {
783 	struct pf_ruleset	*rs;
784 	struct pf_rule		*rule;
785 
786 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
787 		return (EINVAL);
788 	rs = pf_find_or_create_ruleset(anchor, ruleset);
789 	if (rs == NULL)
790 		return (EINVAL);
791 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL)
792 		pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
793 	*ticket = ++rs->rules[rs_num].inactive.ticket;
794 	rs->rules[rs_num].inactive.open = 1;
795 	return (0);
796 }
797 
798 int
799 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor, char *ruleset)
800 {
801 	struct pf_ruleset	*rs;
802 	struct pf_rule		*rule;
803 
804 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
805 		return (EINVAL);
806 	rs = pf_find_ruleset(anchor, ruleset);
807 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
808 	    rs->rules[rs_num].inactive.ticket != ticket)
809 		return (0);
810 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL)
811 		pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
812 	rs->rules[rs_num].inactive.open = 0;
813 	return (0);
814 }
815 
816 int
817 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor, char *ruleset)
818 {
819 	struct pf_ruleset	*rs;
820 	struct pf_rule		*rule;
821 	struct pf_rulequeue	*old_rules;
822 	int			 s;
823 
824 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
825 		return (EINVAL);
826 	rs = pf_find_ruleset(anchor, ruleset);
827 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
828 	    ticket != rs->rules[rs_num].inactive.ticket)
829 		return (EBUSY);
830 
831 	/* Swap rules, keep the old. */
832 	s = splsoftnet();
833 	old_rules = rs->rules[rs_num].active.ptr;
834 	rs->rules[rs_num].active.ptr =
835 	    rs->rules[rs_num].inactive.ptr;
836 	rs->rules[rs_num].inactive.ptr = old_rules;
837 	rs->rules[rs_num].active.ticket =
838 	    rs->rules[rs_num].inactive.ticket;
839 	pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
840 
841 	/* Purge the old rule list. */
842 	while ((rule = TAILQ_FIRST(old_rules)) != NULL)
843 		pf_rm_rule(old_rules, rule);
844 	rs->rules[rs_num].inactive.open = 0;
845 	pf_remove_if_empty_ruleset(rs);
846 	pf_update_anchor_rules();
847 	splx(s);
848 	return (0);
849 }
850 
851 int
852 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
853 {
854 	struct pf_pooladdr	*pa = NULL;
855 	struct pf_pool		*pool = NULL;
856 	int			 s;
857 	int			 error = 0;
858 
859 	/* XXX keep in sync with switch() below */
860 	if (securelevel > 1)
861 		switch (cmd) {
862 		case DIOCGETRULES:
863 		case DIOCGETRULE:
864 		case DIOCGETADDRS:
865 		case DIOCGETADDR:
866 		case DIOCGETSTATE:
867 		case DIOCSETSTATUSIF:
868 		case DIOCGETSTATUS:
869 		case DIOCCLRSTATUS:
870 		case DIOCNATLOOK:
871 		case DIOCSETDEBUG:
872 		case DIOCGETSTATES:
873 		case DIOCGETTIMEOUT:
874 		case DIOCCLRRULECTRS:
875 		case DIOCGETLIMIT:
876 		case DIOCGETALTQS:
877 		case DIOCGETALTQ:
878 		case DIOCGETQSTATS:
879 		case DIOCGETANCHORS:
880 		case DIOCGETANCHOR:
881 		case DIOCGETRULESETS:
882 		case DIOCGETRULESET:
883 		case DIOCRGETTABLES:
884 		case DIOCRGETTSTATS:
885 		case DIOCRCLRTSTATS:
886 		case DIOCRCLRADDRS:
887 		case DIOCRADDADDRS:
888 		case DIOCRDELADDRS:
889 		case DIOCRSETADDRS:
890 		case DIOCRGETADDRS:
891 		case DIOCRGETASTATS:
892 		case DIOCRCLRASTATS:
893 		case DIOCRTSTADDRS:
894 		case DIOCOSFPGET:
895 		case DIOCGETSRCNODES:
896 		case DIOCCLRSRCNODES:
897 		case DIOCIGETIFACES:
898 		case DIOCICLRISTATS:
899 		case DIOCGIFSPEED:
900 			break;
901 		case DIOCRCLRTABLES:
902 		case DIOCRADDTABLES:
903 		case DIOCRDELTABLES:
904 		case DIOCRSETTFLAGS:
905 			if (((struct pfioc_table *)addr)->pfrio_flags &
906 			    PFR_FLAG_DUMMY)
907 				break; /* dummy operation ok */
908 			return (EPERM);
909 		default:
910 			return (EPERM);
911 		}
912 
913 	if (!(flags & FWRITE))
914 		switch (cmd) {
915 		case DIOCGETRULES:
916 		case DIOCGETRULE:
917 		case DIOCGETADDRS:
918 		case DIOCGETADDR:
919 		case DIOCGETSTATE:
920 		case DIOCGETSTATUS:
921 		case DIOCGETSTATES:
922 		case DIOCGETTIMEOUT:
923 		case DIOCGETLIMIT:
924 		case DIOCGETALTQS:
925 		case DIOCGETALTQ:
926 		case DIOCGETQSTATS:
927 		case DIOCGETANCHORS:
928 		case DIOCGETANCHOR:
929 		case DIOCGETRULESETS:
930 		case DIOCGETRULESET:
931 		case DIOCRGETTABLES:
932 		case DIOCRGETTSTATS:
933 		case DIOCRGETADDRS:
934 		case DIOCRGETASTATS:
935 		case DIOCRTSTADDRS:
936 		case DIOCOSFPGET:
937 		case DIOCGETSRCNODES:
938 		case DIOCIGETIFACES:
939 		case DIOCGIFSPEED:
940 			break;
941 		case DIOCRCLRTABLES:
942 		case DIOCRADDTABLES:
943 		case DIOCRDELTABLES:
944 		case DIOCRCLRTSTATS:
945 		case DIOCRCLRADDRS:
946 		case DIOCRADDADDRS:
947 		case DIOCRDELADDRS:
948 		case DIOCRSETADDRS:
949 		case DIOCRSETTFLAGS:
950 			if (((struct pfioc_table *)addr)->pfrio_flags &
951 			    PFR_FLAG_DUMMY)
952 				break; /* dummy operation ok */
953 			return (EACCES);
954 		default:
955 			return (EACCES);
956 		}
957 
958 	switch (cmd) {
959 
960 	case DIOCSTART:
961 		if (pf_status.running)
962 			error = EEXIST;
963 		else {
964 			error = hook_pf();
965 			if (error) {
966 				DPFPRINTF(PF_DEBUG_MISC,
967 				    ("pf: pfil registeration fail\n"));
968 				break;
969 			}
970 			pf_status.running = 1;
971 			pf_status.since = time_second;
972 			if (pf_status.stateid == 0) {
973 				pf_status.stateid = time_second;
974 				pf_status.stateid = pf_status.stateid << 32;
975 			}
976 			DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
977 		}
978 		break;
979 
980 	case DIOCSTOP:
981 		if (!pf_status.running)
982 			error = ENOENT;
983 		else {
984 			pf_status.running = 0;
985 			error = dehook_pf();
986 			if (error) {
987 				pf_status.running = 1;
988 				DPFPRINTF(PF_DEBUG_MISC,
989 					("pf: pfil unregisteration failed\n"));
990 			}
991 			pf_status.since = time_second;
992 			DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
993 		}
994 		break;
995 
996 	case DIOCBEGINRULES: {
997 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
998 
999 		error = pf_begin_rules(&pr->ticket, pf_get_ruleset_number(
1000 		    pr->rule.action), pr->anchor, pr->ruleset);
1001 		break;
1002 	}
1003 
1004 	case DIOCADDRULE: {
1005 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1006 		struct pf_ruleset	*ruleset;
1007 		struct pf_rule		*rule, *tail;
1008 		struct pf_pooladdr	*pa;
1009 		int			 rs_num;
1010 
1011 		ruleset = pf_find_ruleset(pr->anchor, pr->ruleset);
1012 		if (ruleset == NULL) {
1013 			error = EINVAL;
1014 			break;
1015 		}
1016 		rs_num = pf_get_ruleset_number(pr->rule.action);
1017 		if (rs_num >= PF_RULESET_MAX) {
1018 			error = EINVAL;
1019 			break;
1020 		}
1021 		if (pr->rule.anchorname[0] && ruleset != &pf_main_ruleset) {
1022 			error = EINVAL;
1023 			break;
1024 		}
1025 		if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1026 			error = EINVAL;
1027 			break;
1028 		}
1029 		if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
1030 			error = EBUSY;
1031 			break;
1032 		}
1033 		if (pr->pool_ticket != ticket_pabuf) {
1034 			error = EBUSY;
1035 			break;
1036 		}
1037 		rule = pool_get(&pf_rule_pl, PR_NOWAIT);
1038 		if (rule == NULL) {
1039 			error = ENOMEM;
1040 			break;
1041 		}
1042 		bcopy(&pr->rule, rule, sizeof(struct pf_rule));
1043 		rule->anchor = NULL;
1044 		rule->kif = NULL;
1045 		TAILQ_INIT(&rule->rpool.list);
1046 		/* initialize refcounting */
1047 		rule->states = 0;
1048 		rule->src_nodes = 0;
1049 		rule->entries.tqe_prev = NULL;
1050 #ifndef INET
1051 		if (rule->af == AF_INET) {
1052 			pool_put(&pf_rule_pl, rule);
1053 			error = EAFNOSUPPORT;
1054 			break;
1055 		}
1056 #endif /* INET */
1057 #ifndef INET6
1058 		if (rule->af == AF_INET6) {
1059 			pool_put(&pf_rule_pl, rule);
1060 			error = EAFNOSUPPORT;
1061 			break;
1062 		}
1063 #endif /* INET6 */
1064 		tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
1065 		    pf_rulequeue);
1066 		if (tail)
1067 			rule->nr = tail->nr + 1;
1068 		else
1069 			rule->nr = 0;
1070 		if (rule->ifname[0]) {
1071 			rule->kif = pfi_attach_rule(rule->ifname);
1072 			if (rule->kif == NULL) {
1073 				pool_put(&pf_rule_pl, rule);
1074 				error = EINVAL;
1075 				break;
1076 			}
1077 		}
1078 
1079 #ifdef ALTQ
1080 		/* set queue IDs */
1081 		if (rule->qname[0] != 0) {
1082 			if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
1083 				error = EBUSY;
1084 			else if (rule->pqname[0] != 0) {
1085 				if ((rule->pqid =
1086 				    pf_qname2qid(rule->pqname)) == 0)
1087 					error = EBUSY;
1088 			} else
1089 				rule->pqid = rule->qid;
1090 		}
1091 #endif
1092 		if (rule->tagname[0])
1093 			if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
1094 				error = EBUSY;
1095 		if (rule->match_tagname[0])
1096 			if ((rule->match_tag =
1097 			    pf_tagname2tag(rule->match_tagname)) == 0)
1098 				error = EBUSY;
1099 		if (rule->rt && !rule->direction)
1100 			error = EINVAL;
1101 		if (pfi_dynaddr_setup(&rule->src.addr, rule->af))
1102 			error = EINVAL;
1103 		if (pfi_dynaddr_setup(&rule->dst.addr, rule->af))
1104 			error = EINVAL;
1105 		if (pf_tbladdr_setup(ruleset, &rule->src.addr))
1106 			error = EINVAL;
1107 		if (pf_tbladdr_setup(ruleset, &rule->dst.addr))
1108 			error = EINVAL;
1109 		TAILQ_FOREACH(pa, &pf_pabuf, entries)
1110 			if (pf_tbladdr_setup(ruleset, &pa->addr))
1111 				error = EINVAL;
1112 
1113 		pf_mv_pool(&pf_pabuf, &rule->rpool.list);
1114 		if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
1115 		    (rule->action == PF_BINAT)) && !rule->anchorname[0]) ||
1116 		    (rule->rt > PF_FASTROUTE)) &&
1117 		    (TAILQ_FIRST(&rule->rpool.list) == NULL))
1118 			error = EINVAL;
1119 
1120 		if (error) {
1121 			pf_rm_rule(NULL, rule);
1122 			break;
1123 		}
1124 		rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
1125 		rule->evaluations = rule->packets = rule->bytes = 0;
1126 		TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
1127 		    rule, entries);
1128 		break;
1129 	}
1130 
1131 	case DIOCCOMMITRULES: {
1132 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1133 
1134 		error = pf_commit_rules(pr->ticket, pf_get_ruleset_number(
1135 		    pr->rule.action), pr->anchor, pr->ruleset);
1136 		break;
1137 	}
1138 
1139 	case DIOCGETRULES: {
1140 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1141 		struct pf_ruleset	*ruleset;
1142 		struct pf_rule		*tail;
1143 		int			 rs_num;
1144 
1145 		ruleset = pf_find_ruleset(pr->anchor, pr->ruleset);
1146 		if (ruleset == NULL) {
1147 			error = EINVAL;
1148 			break;
1149 		}
1150 		rs_num = pf_get_ruleset_number(pr->rule.action);
1151 		if (rs_num >= PF_RULESET_MAX) {
1152 			error = EINVAL;
1153 			break;
1154 		}
1155 		s = splsoftnet();
1156 		tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
1157 		    pf_rulequeue);
1158 		if (tail)
1159 			pr->nr = tail->nr + 1;
1160 		else
1161 			pr->nr = 0;
1162 		pr->ticket = ruleset->rules[rs_num].active.ticket;
1163 		splx(s);
1164 		break;
1165 	}
1166 
1167 	case DIOCGETRULE: {
1168 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1169 		struct pf_ruleset	*ruleset;
1170 		struct pf_rule		*rule;
1171 		int			 rs_num, i;
1172 
1173 		ruleset = pf_find_ruleset(pr->anchor, pr->ruleset);
1174 		if (ruleset == NULL) {
1175 			error = EINVAL;
1176 			break;
1177 		}
1178 		rs_num = pf_get_ruleset_number(pr->rule.action);
1179 		if (rs_num >= PF_RULESET_MAX) {
1180 			error = EINVAL;
1181 			break;
1182 		}
1183 		if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
1184 			error = EBUSY;
1185 			break;
1186 		}
1187 		s = splsoftnet();
1188 		rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
1189 		while ((rule != NULL) && (rule->nr != pr->nr))
1190 			rule = TAILQ_NEXT(rule, entries);
1191 		if (rule == NULL) {
1192 			error = EBUSY;
1193 			splx(s);
1194 			break;
1195 		}
1196 		bcopy(rule, &pr->rule, sizeof(struct pf_rule));
1197 		pfi_dynaddr_copyout(&pr->rule.src.addr);
1198 		pfi_dynaddr_copyout(&pr->rule.dst.addr);
1199 		pf_tbladdr_copyout(&pr->rule.src.addr);
1200 		pf_tbladdr_copyout(&pr->rule.dst.addr);
1201 		for (i = 0; i < PF_SKIP_COUNT; ++i)
1202 			if (rule->skip[i].ptr == NULL)
1203 				pr->rule.skip[i].nr = (uint32_t)(-1);
1204 			else
1205 				pr->rule.skip[i].nr =
1206 				    rule->skip[i].ptr->nr;
1207 		splx(s);
1208 		break;
1209 	}
1210 
1211 	case DIOCCHANGERULE: {
1212 		struct pfioc_rule	*pcr = (struct pfioc_rule *)addr;
1213 		struct pf_ruleset	*ruleset;
1214 		struct pf_rule		*oldrule = NULL, *newrule = NULL;
1215 		u_int32_t		 nr = 0;
1216 		int			 rs_num;
1217 
1218 		if (!(pcr->action == PF_CHANGE_REMOVE ||
1219 		    pcr->action == PF_CHANGE_GET_TICKET) &&
1220 		    pcr->pool_ticket != ticket_pabuf) {
1221 			error = EBUSY;
1222 			break;
1223 		}
1224 
1225 		if (pcr->action < PF_CHANGE_ADD_HEAD ||
1226 		    pcr->action > PF_CHANGE_GET_TICKET) {
1227 			error = EINVAL;
1228 			break;
1229 		}
1230 		ruleset = pf_find_ruleset(pcr->anchor, pcr->ruleset);
1231 		if (ruleset == NULL) {
1232 			error = EINVAL;
1233 			break;
1234 		}
1235 		rs_num = pf_get_ruleset_number(pcr->rule.action);
1236 		if (rs_num >= PF_RULESET_MAX) {
1237 			error = EINVAL;
1238 			break;
1239 		}
1240 
1241 		if (pcr->action == PF_CHANGE_GET_TICKET) {
1242 			pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
1243 			break;
1244 		} else {
1245 			if (pcr->ticket !=
1246 			    ruleset->rules[rs_num].active.ticket) {
1247 				error = EINVAL;
1248 				break;
1249 			}
1250 			if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1251 				error = EINVAL;
1252 				break;
1253 			}
1254 		}
1255 
1256 		if (pcr->action != PF_CHANGE_REMOVE) {
1257 			newrule = pool_get(&pf_rule_pl, PR_NOWAIT);
1258 			if (newrule == NULL) {
1259 				error = ENOMEM;
1260 				break;
1261 			}
1262 			bcopy(&pcr->rule, newrule, sizeof(struct pf_rule));
1263 			TAILQ_INIT(&newrule->rpool.list);
1264 			/* initialize refcounting */
1265 			newrule->states = 0;
1266 			newrule->entries.tqe_prev = NULL;
1267 #ifndef INET
1268 			if (newrule->af == AF_INET) {
1269 				pool_put(&pf_rule_pl, newrule);
1270 				error = EAFNOSUPPORT;
1271 				break;
1272 			}
1273 #endif /* INET */
1274 #ifndef INET6
1275 			if (newrule->af == AF_INET6) {
1276 				pool_put(&pf_rule_pl, newrule);
1277 				error = EAFNOSUPPORT;
1278 				break;
1279 			}
1280 #endif /* INET6 */
1281 			if (newrule->ifname[0]) {
1282 				newrule->kif = pfi_attach_rule(newrule->ifname);
1283 				if (newrule->kif == NULL) {
1284 					pool_put(&pf_rule_pl, newrule);
1285 					error = EINVAL;
1286 					break;
1287 				}
1288 			} else
1289 				newrule->kif = NULL;
1290 
1291 #ifdef ALTQ
1292 			/* set queue IDs */
1293 			if (newrule->qname[0] != 0) {
1294 				if ((newrule->qid =
1295 				    pf_qname2qid(newrule->qname)) == 0)
1296 					error = EBUSY;
1297 				else if (newrule->pqname[0] != 0) {
1298 					if ((newrule->pqid =
1299 					    pf_qname2qid(newrule->pqname)) == 0)
1300 						error = EBUSY;
1301 				} else
1302 					newrule->pqid = newrule->qid;
1303 			}
1304 #endif
1305 			if (newrule->tagname[0])
1306 				if ((newrule->tag =
1307 				    pf_tagname2tag(newrule->tagname)) == 0)
1308 					error = EBUSY;
1309 			if (newrule->match_tagname[0])
1310 				if ((newrule->match_tag = pf_tagname2tag(
1311 				    newrule->match_tagname)) == 0)
1312 					error = EBUSY;
1313 
1314 			if (newrule->rt && !newrule->direction)
1315 				error = EINVAL;
1316 			if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af))
1317 				error = EINVAL;
1318 			if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af))
1319 				error = EINVAL;
1320 			if (pf_tbladdr_setup(ruleset, &newrule->src.addr))
1321 				error = EINVAL;
1322 			if (pf_tbladdr_setup(ruleset, &newrule->dst.addr))
1323 				error = EINVAL;
1324 
1325 			pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
1326 			if (((((newrule->action == PF_NAT) ||
1327 			    (newrule->action == PF_RDR) ||
1328 			    (newrule->action == PF_BINAT) ||
1329 			    (newrule->rt > PF_FASTROUTE)) &&
1330 			    !newrule->anchorname[0])) &&
1331 			    (TAILQ_FIRST(&newrule->rpool.list) == NULL))
1332 				error = EINVAL;
1333 
1334 			if (error) {
1335 				pf_rm_rule(NULL, newrule);
1336 				break;
1337 			}
1338 			newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
1339 			newrule->evaluations = newrule->packets = 0;
1340 			newrule->bytes = 0;
1341 		}
1342 		pf_empty_pool(&pf_pabuf);
1343 
1344 		s = splsoftnet();
1345 
1346 		if (pcr->action == PF_CHANGE_ADD_HEAD)
1347 			oldrule = TAILQ_FIRST(
1348 			    ruleset->rules[rs_num].active.ptr);
1349 		else if (pcr->action == PF_CHANGE_ADD_TAIL)
1350 			oldrule = TAILQ_LAST(
1351 			    ruleset->rules[rs_num].active.ptr, pf_rulequeue);
1352 		else {
1353 			oldrule = TAILQ_FIRST(
1354 			    ruleset->rules[rs_num].active.ptr);
1355 			while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1356 				oldrule = TAILQ_NEXT(oldrule, entries);
1357 			if (oldrule == NULL) {
1358 				pf_rm_rule(NULL, newrule);
1359 				error = EINVAL;
1360 				splx(s);
1361 				break;
1362 			}
1363 		}
1364 
1365 		if (pcr->action == PF_CHANGE_REMOVE)
1366 			pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
1367 		else {
1368 			if (oldrule == NULL)
1369 				TAILQ_INSERT_TAIL(
1370 				    ruleset->rules[rs_num].active.ptr,
1371 				    newrule, entries);
1372 			else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1373 			    pcr->action == PF_CHANGE_ADD_BEFORE)
1374 				TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1375 			else
1376 				TAILQ_INSERT_AFTER(
1377 				    ruleset->rules[rs_num].active.ptr,
1378 				    oldrule, newrule, entries);
1379 		}
1380 
1381 		nr = 0;
1382 		TAILQ_FOREACH(oldrule,
1383 		    ruleset->rules[rs_num].active.ptr, entries)
1384 			oldrule->nr = nr++;
1385 
1386 		pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
1387 		pf_remove_if_empty_ruleset(ruleset);
1388 		pf_update_anchor_rules();
1389 
1390 		ruleset->rules[rs_num].active.ticket++;
1391 		splx(s);
1392 		break;
1393 	}
1394 
1395 	case DIOCCLRSTATES: {
1396 		struct pf_state		*state;
1397 		struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1398 		int			 killed = 0;
1399 
1400 		s = splsoftnet();
1401 		RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1402 			if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1403 			    state->u.s.kif->pfik_name)) {
1404 				state->timeout = PFTM_PURGE;
1405 #if NPFSYNC
1406 				/* don't send out individual delete messages */
1407 				state->sync_flags = PFSTATE_NOSYNC;
1408 #endif
1409 				killed++;
1410 			}
1411 		}
1412 		pf_purge_expired_states();
1413 		pf_status.states = 0;
1414 		psk->psk_af = killed;
1415 #if NPFSYNC
1416 		pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
1417 #endif
1418 		splx(s);
1419 		break;
1420 	}
1421 
1422 	case DIOCKILLSTATES: {
1423 		struct pf_state		*state;
1424 		struct pfioc_state_kill	*psk = (struct pfioc_state_kill *)addr;
1425 		int			 killed = 0;
1426 
1427 		s = splsoftnet();
1428 		RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1429 			if ((!psk->psk_af || state->af == psk->psk_af)
1430 			    && (!psk->psk_proto || psk->psk_proto ==
1431 			    state->proto) &&
1432 			    PF_MATCHA(psk->psk_src.not,
1433 			    &psk->psk_src.addr.v.a.addr,
1434 			    &psk->psk_src.addr.v.a.mask,
1435 			    &state->lan.addr, state->af) &&
1436 			    PF_MATCHA(psk->psk_dst.not,
1437 			    &psk->psk_dst.addr.v.a.addr,
1438 			    &psk->psk_dst.addr.v.a.mask,
1439 			    &state->ext.addr, state->af) &&
1440 			    (psk->psk_src.port_op == 0 ||
1441 			    pf_match_port(psk->psk_src.port_op,
1442 			    psk->psk_src.port[0], psk->psk_src.port[1],
1443 			    state->lan.port)) &&
1444 			    (psk->psk_dst.port_op == 0 ||
1445 			    pf_match_port(psk->psk_dst.port_op,
1446 			    psk->psk_dst.port[0], psk->psk_dst.port[1],
1447 			    state->ext.port)) &&
1448 			    (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1449 			    state->u.s.kif->pfik_name))) {
1450 				state->timeout = PFTM_PURGE;
1451 				killed++;
1452 			}
1453 		}
1454 		pf_purge_expired_states();
1455 		splx(s);
1456 		psk->psk_af = killed;
1457 		break;
1458 	}
1459 
1460 	case DIOCADDSTATE: {
1461 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
1462 		struct pf_state		*state;
1463 		struct pfi_kif		*kif;
1464 
1465 		if (ps->state.timeout >= PFTM_MAX &&
1466 		    ps->state.timeout != PFTM_UNTIL_PACKET) {
1467 			error = EINVAL;
1468 			break;
1469 		}
1470 		state = pool_get(&pf_state_pl, PR_NOWAIT);
1471 		if (state == NULL) {
1472 			error = ENOMEM;
1473 			break;
1474 		}
1475 		s = splsoftnet();
1476 		kif = pfi_lookup_create(ps->state.u.ifname);
1477 		if (kif == NULL) {
1478 			pool_put(&pf_state_pl, state);
1479 			error = ENOENT;
1480 			splx(s);
1481 			break;
1482 		}
1483 		bcopy(&ps->state, state, sizeof(struct pf_state));
1484 		bzero(&state->u, sizeof(state->u));
1485 		state->rule.ptr = &pf_default_rule;
1486 		state->nat_rule.ptr = NULL;
1487 		state->anchor.ptr = NULL;
1488 		state->rt_kif = NULL;
1489 		state->creation = time_second;
1490 		state->pfsync_time = 0;
1491 		state->packets[0] = state->packets[1] = 0;
1492 		state->bytes[0] = state->bytes[1] = 0;
1493 
1494 		if (pf_insert_state(kif, state)) {
1495 			pfi_maybe_destroy(kif);
1496 			pool_put(&pf_state_pl, state);
1497 			error = ENOMEM;
1498 		}
1499 		splx(s);
1500 		break;
1501 	}
1502 
1503 	case DIOCGETSTATE: {
1504 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
1505 		struct pf_state		*state;
1506 		u_int32_t		 nr;
1507 
1508 		nr = 0;
1509 		s = splsoftnet();
1510 		RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1511 			if (nr >= ps->nr)
1512 				break;
1513 			nr++;
1514 		}
1515 		if (state == NULL) {
1516 			error = EBUSY;
1517 			splx(s);
1518 			break;
1519 		}
1520 		bcopy(state, &ps->state, sizeof(struct pf_state));
1521 		ps->state.rule.nr = state->rule.ptr->nr;
1522 		ps->state.nat_rule.nr = (state->nat_rule.ptr == NULL) ?
1523 		    (uint32_t)(-1) : state->nat_rule.ptr->nr;
1524 		ps->state.anchor.nr = (state->anchor.ptr == NULL) ?
1525 		    (uint32_t)(-1) : state->anchor.ptr->nr;
1526 		splx(s);
1527 		ps->state.expire = pf_state_expires(state);
1528 		if (ps->state.expire > time_second)
1529 			ps->state.expire -= time_second;
1530 		else
1531 			ps->state.expire = 0;
1532 		break;
1533 	}
1534 
1535 	case DIOCGETSTATES: {
1536 		struct pfioc_states	*ps = (struct pfioc_states *)addr;
1537 		struct pf_state		*state;
1538 		struct pf_state		*p, pstore;
1539 		struct pfi_kif		*kif;
1540 		u_int32_t		 nr = 0;
1541 		int			 space = ps->ps_len;
1542 
1543 		if (space == 0) {
1544 			s = splsoftnet();
1545 			TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states)
1546 				nr += kif->pfik_states;
1547 			splx(s);
1548 			ps->ps_len = sizeof(struct pf_state) * nr;
1549 			return (0);
1550 		}
1551 
1552 		s = splsoftnet();
1553 		p = ps->ps_states;
1554 		TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states)
1555 			RB_FOREACH(state, pf_state_tree_ext_gwy,
1556 			    &kif->pfik_ext_gwy) {
1557 				int	secs = time_second;
1558 
1559 				if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len)
1560 					break;
1561 
1562 				bcopy(state, &pstore, sizeof(pstore));
1563 				strlcpy(pstore.u.ifname, kif->pfik_name,
1564 				    sizeof(pstore.u.ifname));
1565 				pstore.rule.nr = state->rule.ptr->nr;
1566 				pstore.nat_rule.nr = (state->nat_rule.ptr ==
1567 				    NULL) ? (uint32_t)(-1)
1568 					  : state->nat_rule.ptr->nr;
1569 				pstore.anchor.nr = (state->anchor.ptr ==
1570 				    NULL) ? (uint32_t)(-1)
1571 					  : state->anchor.ptr->nr;
1572 				pstore.creation = secs - pstore.creation;
1573 				pstore.expire = pf_state_expires(state);
1574 				if (pstore.expire > secs)
1575 					pstore.expire -= secs;
1576 				else
1577 					pstore.expire = 0;
1578 				error = copyout(&pstore, p, sizeof(*p));
1579 				if (error) {
1580 					splx(s);
1581 					goto fail;
1582 				}
1583 				p++;
1584 				nr++;
1585 			}
1586 		ps->ps_len = sizeof(struct pf_state) * nr;
1587 		splx(s);
1588 		break;
1589 	}
1590 
1591 	case DIOCGETSTATUS: {
1592 		struct pf_status *s = (struct pf_status *)addr;
1593 		bcopy(&pf_status, s, sizeof(struct pf_status));
1594 		pfi_fill_oldstatus(s);
1595 		break;
1596 	}
1597 
1598 	case DIOCSETSTATUSIF: {
1599 		struct pfioc_if	*pi = (struct pfioc_if *)addr;
1600 
1601 		if (pi->ifname[0] == 0) {
1602 			bzero(pf_status.ifname, IFNAMSIZ);
1603 			break;
1604 		}
1605 		if (ifunit(pi->ifname) == NULL) {
1606 			error = EINVAL;
1607 			break;
1608 		}
1609 		strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
1610 		break;
1611 	}
1612 
1613 	case DIOCCLRSTATUS: {
1614 		bzero(pf_status.counters, sizeof(pf_status.counters));
1615 		bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
1616 		bzero(pf_status.scounters, sizeof(pf_status.scounters));
1617 		if (*pf_status.ifname)
1618 			pfi_clr_istats(pf_status.ifname, NULL,
1619 			    PFI_FLAG_INSTANCE);
1620 		break;
1621 	}
1622 
1623 	case DIOCNATLOOK: {
1624 		struct pfioc_natlook	*pnl = (struct pfioc_natlook *)addr;
1625 		struct pf_state		*state;
1626 		struct pf_state		 key;
1627 		int			 m = 0, direction = pnl->direction;
1628 
1629 		key.af = pnl->af;
1630 		key.proto = pnl->proto;
1631 
1632 		if (!pnl->proto ||
1633 		    PF_AZERO(&pnl->saddr, pnl->af) ||
1634 		    PF_AZERO(&pnl->daddr, pnl->af) ||
1635 		    !pnl->dport || !pnl->sport)
1636 			error = EINVAL;
1637 		else {
1638 			s = splsoftnet();
1639 
1640 			/*
1641 			 * userland gives us source and dest of connection,
1642 			 * reverse the lookup so we ask for what happens with
1643 			 * the return traffic, enabling us to find it in the
1644 			 * state tree.
1645 			 */
1646 			if (direction == PF_IN) {
1647 				PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af);
1648 				key.ext.port = pnl->dport;
1649 				PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af);
1650 				key.gwy.port = pnl->sport;
1651 				state = pf_find_state_all(&key, PF_EXT_GWY, &m);
1652 			} else {
1653 				PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af);
1654 				key.lan.port = pnl->dport;
1655 				PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af);
1656 				key.ext.port = pnl->sport;
1657 				state = pf_find_state_all(&key, PF_LAN_EXT, &m);
1658 			}
1659 			if (m > 1)
1660 				error = E2BIG;	/* more than one state */
1661 			else if (state != NULL) {
1662 				if (direction == PF_IN) {
1663 					PF_ACPY(&pnl->rsaddr, &state->lan.addr,
1664 					    state->af);
1665 					pnl->rsport = state->lan.port;
1666 					PF_ACPY(&pnl->rdaddr, &pnl->daddr,
1667 					    pnl->af);
1668 					pnl->rdport = pnl->dport;
1669 				} else {
1670 					PF_ACPY(&pnl->rdaddr, &state->gwy.addr,
1671 					    state->af);
1672 					pnl->rdport = state->gwy.port;
1673 					PF_ACPY(&pnl->rsaddr, &pnl->saddr,
1674 					    pnl->af);
1675 					pnl->rsport = pnl->sport;
1676 				}
1677 			} else
1678 				error = ENOENT;
1679 			splx(s);
1680 		}
1681 		break;
1682 	}
1683 
1684 	case DIOCSETTIMEOUT: {
1685 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
1686 		int		 old;
1687 
1688 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
1689 		    pt->seconds < 0) {
1690 			error = EINVAL;
1691 			goto fail;
1692 		}
1693 		old = pf_default_rule.timeout[pt->timeout];
1694 		pf_default_rule.timeout[pt->timeout] = pt->seconds;
1695 		pt->seconds = old;
1696 		break;
1697 	}
1698 
1699 	case DIOCGETTIMEOUT: {
1700 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
1701 
1702 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
1703 			error = EINVAL;
1704 			goto fail;
1705 		}
1706 		pt->seconds = pf_default_rule.timeout[pt->timeout];
1707 		break;
1708 	}
1709 
1710 	case DIOCGETLIMIT: {
1711 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
1712 
1713 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
1714 			error = EINVAL;
1715 			goto fail;
1716 		}
1717 		pl->limit = pf_pool_limits[pl->index].limit;
1718 		break;
1719 	}
1720 
1721 	case DIOCSETLIMIT: {
1722 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
1723 		int			 old_limit;
1724 
1725 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
1726 		    pf_pool_limits[pl->index].pp == NULL) {
1727 			error = EINVAL;
1728 			goto fail;
1729 		}
1730 
1731 		/* XXX Get an API to set limits on the zone/pool */
1732 		old_limit = pf_pool_limits[pl->index].limit;
1733 		pf_pool_limits[pl->index].limit = pl->limit;
1734 		pl->limit = old_limit;
1735 		break;
1736 	}
1737 
1738 	case DIOCSETDEBUG: {
1739 		u_int32_t	*level = (u_int32_t *)addr;
1740 
1741 		pf_status.debug = *level;
1742 		break;
1743 	}
1744 
1745 	case DIOCCLRRULECTRS: {
1746 		struct pf_ruleset	*ruleset = &pf_main_ruleset;
1747 		struct pf_rule		*rule;
1748 
1749 		s = splsoftnet();
1750 		TAILQ_FOREACH(rule,
1751 		    ruleset->rules[PF_RULESET_FILTER].active.ptr, entries)
1752 			rule->evaluations = rule->packets =
1753 			    rule->bytes = 0;
1754 		splx(s);
1755 		break;
1756 	}
1757 
1758 	case DIOCGIFSPEED: {
1759 		struct pf_ifspeed	*psp = (struct pf_ifspeed *)addr;
1760 		struct pf_ifspeed	ps;
1761 		struct ifnet		*ifp;
1762 
1763 		if (psp->ifname[0] != 0) {
1764 			/* Can we completely trust user-land? */
1765 			strlcpy(ps.ifname, psp->ifname, IFNAMSIZ);
1766 			ifp = ifunit(ps.ifname);
1767 			if (ifp )
1768 				psp->baudrate = ifp->if_baudrate;
1769 			else
1770 				error = EINVAL;
1771 		} else
1772 			error = EINVAL;
1773 		break;
1774 	}
1775 #ifdef ALTQ
1776 	case DIOCSTARTALTQ: {
1777 		struct pf_altq		*altq;
1778 		struct ifnet		*ifp;
1779 		struct tb_profile	 tb;
1780 
1781 		/* enable all altq interfaces on active list */
1782 		s = splsoftnet();
1783 		TAILQ_FOREACH(altq, pf_altqs_active, entries) {
1784 			if (altq->qname[0] == 0) {
1785 				if ((ifp = ifunit(altq->ifname)) == NULL) {
1786 					error = EINVAL;
1787 					break;
1788 				}
1789 				if (ifp->if_snd.altq_type != ALTQT_NONE)
1790 					error = altq_enable(&ifp->if_snd);
1791 				if (error != 0)
1792 					break;
1793 				/* set tokenbucket regulator */
1794 				tb.rate = altq->ifbandwidth;
1795 				tb.depth = altq->tbrsize;
1796 				error = tbr_set(&ifp->if_snd, &tb);
1797 				if (error != 0)
1798 					break;
1799 			}
1800 		}
1801 		splx(s);
1802 		DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
1803 		break;
1804 	}
1805 
1806 	case DIOCSTOPALTQ: {
1807 		struct pf_altq		*altq;
1808 		struct ifnet		*ifp;
1809 		struct tb_profile	 tb;
1810 		int			 err;
1811 
1812 		/* disable all altq interfaces on active list */
1813 		s = splsoftnet();
1814 		TAILQ_FOREACH(altq, pf_altqs_active, entries) {
1815 			if (altq->qname[0] == 0) {
1816 				if ((ifp = ifunit(altq->ifname)) == NULL) {
1817 					error = EINVAL;
1818 					break;
1819 				}
1820 				if (ifp->if_snd.altq_type != ALTQT_NONE) {
1821 					err = altq_disable(&ifp->if_snd);
1822 					if (err != 0 && error == 0)
1823 						error = err;
1824 				}
1825 				/* clear tokenbucket regulator */
1826 				tb.rate = 0;
1827 				err = tbr_set(&ifp->if_snd, &tb);
1828 				if (err != 0 && error == 0)
1829 					error = err;
1830 			}
1831 		}
1832 		splx(s);
1833 		DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
1834 		break;
1835 	}
1836 
1837 	case DIOCBEGINALTQS: {
1838 		u_int32_t	*ticket = (u_int32_t *)addr;
1839 
1840 		error = pf_begin_altq(ticket);
1841 		break;
1842 	}
1843 
1844 	case DIOCADDALTQ: {
1845 		struct pfioc_altq	*pa = (struct pfioc_altq *)addr;
1846 		struct pf_altq		*altq, *a;
1847 
1848 		if (pa->ticket != ticket_altqs_inactive) {
1849 			error = EBUSY;
1850 			break;
1851 		}
1852 		altq = pool_get(&pf_altq_pl, PR_NOWAIT);
1853 		if (altq == NULL) {
1854 			error = ENOMEM;
1855 			break;
1856 		}
1857 		bcopy(&pa->altq, altq, sizeof(struct pf_altq));
1858 
1859 		/*
1860 		 * if this is for a queue, find the discipline and
1861 		 * copy the necessary fields
1862 		 */
1863 		if (altq->qname[0] != 0) {
1864 			if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
1865 				error = EBUSY;
1866 				pool_put(&pf_altq_pl, altq);
1867 				break;
1868 			}
1869 			TAILQ_FOREACH(a, pf_altqs_inactive, entries) {
1870 				if (strncmp(a->ifname, altq->ifname,
1871 				    IFNAMSIZ) == 0 && a->qname[0] == 0) {
1872 					altq->altq_disc = a->altq_disc;
1873 					break;
1874 				}
1875 			}
1876 		}
1877 
1878 		error = altq_add(altq);
1879 		if (error) {
1880 			pool_put(&pf_altq_pl, altq);
1881 			break;
1882 		}
1883 
1884 		TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries);
1885 		bcopy(altq, &pa->altq, sizeof(struct pf_altq));
1886 		break;
1887 	}
1888 
1889 	case DIOCCOMMITALTQS: {
1890 		u_int32_t		ticket = *(u_int32_t *)addr;
1891 
1892 		error = pf_commit_altq(ticket);
1893 		break;
1894 	}
1895 
1896 	case DIOCGETALTQS: {
1897 		struct pfioc_altq	*pa = (struct pfioc_altq *)addr;
1898 		struct pf_altq		*altq;
1899 
1900 		pa->nr = 0;
1901 		s = splsoftnet();
1902 		TAILQ_FOREACH(altq, pf_altqs_active, entries)
1903 			pa->nr++;
1904 		pa->ticket = ticket_altqs_active;
1905 		splx(s);
1906 		break;
1907 	}
1908 
1909 	case DIOCGETALTQ: {
1910 		struct pfioc_altq	*pa = (struct pfioc_altq *)addr;
1911 		struct pf_altq		*altq;
1912 		u_int32_t		 nr;
1913 
1914 		if (pa->ticket != ticket_altqs_active) {
1915 			error = EBUSY;
1916 			break;
1917 		}
1918 		nr = 0;
1919 		s = splsoftnet();
1920 		altq = TAILQ_FIRST(pf_altqs_active);
1921 		while ((altq != NULL) && (nr < pa->nr)) {
1922 			altq = TAILQ_NEXT(altq, entries);
1923 			nr++;
1924 		}
1925 		if (altq == NULL) {
1926 			error = EBUSY;
1927 			splx(s);
1928 			break;
1929 		}
1930 		bcopy(altq, &pa->altq, sizeof(struct pf_altq));
1931 		splx(s);
1932 		break;
1933 	}
1934 
1935 	case DIOCCHANGEALTQ:
1936 		/* CHANGEALTQ not supported yet! */
1937 		error = ENODEV;
1938 		break;
1939 
1940 	case DIOCGETQSTATS: {
1941 		struct pfioc_qstats	*pq = (struct pfioc_qstats *)addr;
1942 		struct pf_altq		*altq;
1943 		u_int32_t		 nr;
1944 		int			 nbytes;
1945 
1946 		if (pq->ticket != ticket_altqs_active) {
1947 			error = EBUSY;
1948 			break;
1949 		}
1950 		nbytes = pq->nbytes;
1951 		nr = 0;
1952 		s = splsoftnet();
1953 		altq = TAILQ_FIRST(pf_altqs_active);
1954 		while ((altq != NULL) && (nr < pq->nr)) {
1955 			altq = TAILQ_NEXT(altq, entries);
1956 			nr++;
1957 		}
1958 		if (altq == NULL) {
1959 			error = EBUSY;
1960 			splx(s);
1961 			break;
1962 		}
1963 		error = altq_getqstats(altq, pq->buf, &nbytes);
1964 		splx(s);
1965 		if (error == 0) {
1966 			pq->scheduler = altq->scheduler;
1967 			pq->nbytes = nbytes;
1968 		}
1969 		break;
1970 	}
1971 #endif /* ALTQ */
1972 
1973 	case DIOCBEGINADDRS: {
1974 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
1975 
1976 		pf_empty_pool(&pf_pabuf);
1977 		pp->ticket = ++ticket_pabuf;
1978 		break;
1979 	}
1980 
1981 	case DIOCADDADDR: {
1982 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
1983 
1984 #ifndef INET
1985 		if (pp->af == AF_INET) {
1986 			error = EAFNOSUPPORT;
1987 			break;
1988 		}
1989 #endif /* INET */
1990 #ifndef INET6
1991 		if (pp->af == AF_INET6) {
1992 			error = EAFNOSUPPORT;
1993 			break;
1994 		}
1995 #endif /* INET6 */
1996 		if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
1997 		    pp->addr.addr.type != PF_ADDR_DYNIFTL &&
1998 		    pp->addr.addr.type != PF_ADDR_TABLE) {
1999 			error = EINVAL;
2000 			break;
2001 		}
2002 		pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2003 		if (pa == NULL) {
2004 			error = ENOMEM;
2005 			break;
2006 		}
2007 		bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr));
2008 		if (pa->ifname[0]) {
2009 			pa->kif = pfi_attach_rule(pa->ifname);
2010 			if (pa->kif == NULL) {
2011 				pool_put(&pf_pooladdr_pl, pa);
2012 				error = EINVAL;
2013 				break;
2014 			}
2015 		}
2016 		if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
2017 			pfi_dynaddr_remove(&pa->addr);
2018 			pfi_detach_rule(pa->kif);
2019 			pool_put(&pf_pooladdr_pl, pa);
2020 			error = EINVAL;
2021 			break;
2022 		}
2023 		TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
2024 		break;
2025 	}
2026 
2027 	case DIOCGETADDRS: {
2028 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
2029 
2030 		pp->nr = 0;
2031 		s = splsoftnet();
2032 		pool = pf_get_pool(pp->anchor, pp->ruleset, pp->ticket,
2033 		    pp->r_action, pp->r_num, 0, 1, 0);
2034 		if (pool == NULL) {
2035 			error = EBUSY;
2036 			splx(s);
2037 			break;
2038 		}
2039 		TAILQ_FOREACH(pa, &pool->list, entries)
2040 			pp->nr++;
2041 		splx(s);
2042 		break;
2043 	}
2044 
2045 	case DIOCGETADDR: {
2046 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
2047 		u_int32_t		 nr = 0;
2048 
2049 		s = splsoftnet();
2050 		pool = pf_get_pool(pp->anchor, pp->ruleset, pp->ticket,
2051 		    pp->r_action, pp->r_num, 0, 1, 1);
2052 		if (pool == NULL) {
2053 			error = EBUSY;
2054 			splx(s);
2055 			break;
2056 		}
2057 		pa = TAILQ_FIRST(&pool->list);
2058 		while ((pa != NULL) && (nr < pp->nr)) {
2059 			pa = TAILQ_NEXT(pa, entries);
2060 			nr++;
2061 		}
2062 		if (pa == NULL) {
2063 			error = EBUSY;
2064 			splx(s);
2065 			break;
2066 		}
2067 		bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr));
2068 		pfi_dynaddr_copyout(&pp->addr.addr);
2069 		pf_tbladdr_copyout(&pp->addr.addr);
2070 		splx(s);
2071 		break;
2072 	}
2073 
2074 	case DIOCCHANGEADDR: {
2075 		struct pfioc_pooladdr	*pca = (struct pfioc_pooladdr *)addr;
2076 		struct pf_pooladdr	*oldpa = NULL, *newpa = NULL;
2077 		struct pf_ruleset	*ruleset;
2078 
2079 		if (pca->action < PF_CHANGE_ADD_HEAD ||
2080 		    pca->action > PF_CHANGE_REMOVE) {
2081 			error = EINVAL;
2082 			break;
2083 		}
2084 		if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
2085 		    pca->addr.addr.type != PF_ADDR_DYNIFTL &&
2086 		    pca->addr.addr.type != PF_ADDR_TABLE) {
2087 			error = EINVAL;
2088 			break;
2089 		}
2090 
2091 		ruleset = pf_find_ruleset(pca->anchor, pca->ruleset);
2092 		if (ruleset == NULL) {
2093 			error = EBUSY;
2094 			break;
2095 		}
2096 		pool = pf_get_pool(pca->anchor, pca->ruleset, pca->ticket,
2097 		    pca->r_action, pca->r_num, pca->r_last, 1, 1);
2098 		if (pool == NULL) {
2099 			error = EBUSY;
2100 			break;
2101 		}
2102 		if (pca->action != PF_CHANGE_REMOVE) {
2103 			newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2104 			if (newpa == NULL) {
2105 				error = ENOMEM;
2106 				break;
2107 			}
2108 			bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
2109 #ifndef INET
2110 			if (pca->af == AF_INET) {
2111 				pool_put(&pf_pooladdr_pl, newpa);
2112 				error = EAFNOSUPPORT;
2113 				break;
2114 			}
2115 #endif /* INET */
2116 #ifndef INET6
2117 			if (pca->af == AF_INET6) {
2118 				pool_put(&pf_pooladdr_pl, newpa);
2119 				error = EAFNOSUPPORT;
2120 				break;
2121 			}
2122 #endif /* INET6 */
2123 			if (newpa->ifname[0]) {
2124 				newpa->kif = pfi_attach_rule(newpa->ifname);
2125 				if (newpa->kif == NULL) {
2126 					pool_put(&pf_pooladdr_pl, newpa);
2127 					error = EINVAL;
2128 					break;
2129 				}
2130 			} else
2131 				newpa->kif = NULL;
2132 			if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
2133 			    pf_tbladdr_setup(ruleset, &newpa->addr)) {
2134 				pfi_dynaddr_remove(&newpa->addr);
2135 				pfi_detach_rule(newpa->kif);
2136 				pool_put(&pf_pooladdr_pl, newpa);
2137 				error = EINVAL;
2138 				break;
2139 			}
2140 		}
2141 
2142 		s = splsoftnet();
2143 
2144 		if (pca->action == PF_CHANGE_ADD_HEAD)
2145 			oldpa = TAILQ_FIRST(&pool->list);
2146 		else if (pca->action == PF_CHANGE_ADD_TAIL)
2147 			oldpa = TAILQ_LAST(&pool->list, pf_palist);
2148 		else {
2149 			int	i = 0;
2150 
2151 			oldpa = TAILQ_FIRST(&pool->list);
2152 			while ((oldpa != NULL) && (i < pca->nr)) {
2153 				oldpa = TAILQ_NEXT(oldpa, entries);
2154 				i++;
2155 			}
2156 			if (oldpa == NULL) {
2157 				error = EINVAL;
2158 				splx(s);
2159 				break;
2160 			}
2161 		}
2162 
2163 		if (pca->action == PF_CHANGE_REMOVE) {
2164 			TAILQ_REMOVE(&pool->list, oldpa, entries);
2165 			pfi_dynaddr_remove(&oldpa->addr);
2166 			pf_tbladdr_remove(&oldpa->addr);
2167 			pfi_detach_rule(oldpa->kif);
2168 			pool_put(&pf_pooladdr_pl, oldpa);
2169 		} else {
2170 			if (oldpa == NULL)
2171 				TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
2172 			else if (pca->action == PF_CHANGE_ADD_HEAD ||
2173 			    pca->action == PF_CHANGE_ADD_BEFORE)
2174 				TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
2175 			else
2176 				TAILQ_INSERT_AFTER(&pool->list, oldpa,
2177 				    newpa, entries);
2178 		}
2179 
2180 		pool->cur = TAILQ_FIRST(&pool->list);
2181 		PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
2182 		    pca->af);
2183 		splx(s);
2184 		break;
2185 	}
2186 
2187 	case DIOCGETANCHORS: {
2188 		struct pfioc_anchor	*pa = (struct pfioc_anchor *)addr;
2189 		struct pf_anchor	*anchor;
2190 
2191 		pa->nr = 0;
2192 		TAILQ_FOREACH(anchor, &pf_anchors, entries)
2193 			pa->nr++;
2194 		break;
2195 	}
2196 
2197 	case DIOCGETANCHOR: {
2198 		struct pfioc_anchor	*pa = (struct pfioc_anchor *)addr;
2199 		struct pf_anchor	*anchor;
2200 		u_int32_t		 nr = 0;
2201 
2202 		anchor = TAILQ_FIRST(&pf_anchors);
2203 		while (anchor != NULL && nr < pa->nr) {
2204 			anchor = TAILQ_NEXT(anchor, entries);
2205 			nr++;
2206 		}
2207 		if (anchor == NULL)
2208 			error = EBUSY;
2209 		else
2210 			bcopy(anchor->name, pa->name, sizeof(pa->name));
2211 		break;
2212 	}
2213 
2214 	case DIOCGETRULESETS: {
2215 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
2216 		struct pf_anchor	*anchor;
2217 		struct pf_ruleset	*ruleset;
2218 
2219 		pr->anchor[PF_ANCHOR_NAME_SIZE-1] = 0;
2220 		if ((anchor = pf_find_anchor(pr->anchor)) == NULL) {
2221 			error = EINVAL;
2222 			break;
2223 		}
2224 		pr->nr = 0;
2225 		TAILQ_FOREACH(ruleset, &anchor->rulesets, entries)
2226 			pr->nr++;
2227 		break;
2228 	}
2229 
2230 	case DIOCGETRULESET: {
2231 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
2232 		struct pf_anchor	*anchor;
2233 		struct pf_ruleset	*ruleset;
2234 		u_int32_t		 nr = 0;
2235 
2236 		if ((anchor = pf_find_anchor(pr->anchor)) == NULL) {
2237 			error = EINVAL;
2238 			break;
2239 		}
2240 		ruleset = TAILQ_FIRST(&anchor->rulesets);
2241 		while (ruleset != NULL && nr < pr->nr) {
2242 			ruleset = TAILQ_NEXT(ruleset, entries);
2243 			nr++;
2244 		}
2245 		if (ruleset == NULL)
2246 			error = EBUSY;
2247 		else
2248 			bcopy(ruleset->name, pr->name, sizeof(pr->name));
2249 		break;
2250 	}
2251 
2252 	case DIOCRCLRTABLES: {
2253 		struct pfioc_table *io = (struct pfioc_table *)addr;
2254 
2255 		if (io->pfrio_esize != 0) {
2256 			error = ENODEV;
2257 			break;
2258 		}
2259 		error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2260 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
2261 		break;
2262 	}
2263 
2264 	case DIOCRADDTABLES: {
2265 		struct pfioc_table *io = (struct pfioc_table *)addr;
2266 
2267 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2268 			error = ENODEV;
2269 			break;
2270 		}
2271 		error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
2272 		    &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2273 		break;
2274 	}
2275 
2276 	case DIOCRDELTABLES: {
2277 		struct pfioc_table *io = (struct pfioc_table *)addr;
2278 
2279 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2280 			error = ENODEV;
2281 			break;
2282 		}
2283 		error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
2284 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2285 		break;
2286 	}
2287 
2288 	case DIOCRGETTABLES: {
2289 		struct pfioc_table *io = (struct pfioc_table *)addr;
2290 
2291 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2292 			error = ENODEV;
2293 			break;
2294 		}
2295 		error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
2296 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2297 		break;
2298 	}
2299 
2300 	case DIOCRGETTSTATS: {
2301 		struct pfioc_table *io = (struct pfioc_table *)addr;
2302 
2303 		if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
2304 			error = ENODEV;
2305 			break;
2306 		}
2307 		error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
2308 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2309 		break;
2310 	}
2311 
2312 	case DIOCRCLRTSTATS: {
2313 		struct pfioc_table *io = (struct pfioc_table *)addr;
2314 
2315 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2316 			error = ENODEV;
2317 			break;
2318 		}
2319 		error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
2320 		    &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2321 		break;
2322 	}
2323 
2324 	case DIOCRSETTFLAGS: {
2325 		struct pfioc_table *io = (struct pfioc_table *)addr;
2326 
2327 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2328 			error = ENODEV;
2329 			break;
2330 		}
2331 		error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
2332 		    io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
2333 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2334 		break;
2335 	}
2336 
2337 	case DIOCRCLRADDRS: {
2338 		struct pfioc_table *io = (struct pfioc_table *)addr;
2339 
2340 		if (io->pfrio_esize != 0) {
2341 			error = ENODEV;
2342 			break;
2343 		}
2344 		error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
2345 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
2346 		break;
2347 	}
2348 
2349 	case DIOCRADDADDRS: {
2350 		struct pfioc_table *io = (struct pfioc_table *)addr;
2351 
2352 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2353 			error = ENODEV;
2354 			break;
2355 		}
2356 		error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
2357 		    io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
2358 		    PFR_FLAG_USERIOCTL);
2359 		break;
2360 	}
2361 
2362 	case DIOCRDELADDRS: {
2363 		struct pfioc_table *io = (struct pfioc_table *)addr;
2364 
2365 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2366 			error = ENODEV;
2367 			break;
2368 		}
2369 		error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
2370 		    io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
2371 		    PFR_FLAG_USERIOCTL);
2372 		break;
2373 	}
2374 
2375 	case DIOCRSETADDRS: {
2376 		struct pfioc_table *io = (struct pfioc_table *)addr;
2377 
2378 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2379 			error = ENODEV;
2380 			break;
2381 		}
2382 		error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
2383 		    io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
2384 		    &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
2385 		    PFR_FLAG_USERIOCTL);
2386 		break;
2387 	}
2388 
2389 	case DIOCRGETADDRS: {
2390 		struct pfioc_table *io = (struct pfioc_table *)addr;
2391 
2392 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2393 			error = ENODEV;
2394 			break;
2395 		}
2396 		error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
2397 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2398 		break;
2399 	}
2400 
2401 	case DIOCRGETASTATS: {
2402 		struct pfioc_table *io = (struct pfioc_table *)addr;
2403 
2404 		if (io->pfrio_esize != sizeof(struct pfr_astats)) {
2405 			error = ENODEV;
2406 			break;
2407 		}
2408 		error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
2409 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2410 		break;
2411 	}
2412 
2413 	case DIOCRCLRASTATS: {
2414 		struct pfioc_table *io = (struct pfioc_table *)addr;
2415 
2416 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2417 			error = ENODEV;
2418 			break;
2419 		}
2420 		error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
2421 		    io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
2422 		    PFR_FLAG_USERIOCTL);
2423 		break;
2424 	}
2425 
2426 	case DIOCRTSTADDRS: {
2427 		struct pfioc_table *io = (struct pfioc_table *)addr;
2428 
2429 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2430 			error = ENODEV;
2431 			break;
2432 		}
2433 		error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
2434 		    io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
2435 		    PFR_FLAG_USERIOCTL);
2436 		break;
2437 	}
2438 
2439 	case DIOCRINABEGIN: {
2440 		struct pfioc_table *io = (struct pfioc_table *)addr;
2441 
2442 		if (io->pfrio_esize != 0) {
2443 			error = ENODEV;
2444 			break;
2445 		}
2446 		error = pfr_ina_begin(&io->pfrio_table, &io->pfrio_ticket,
2447 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2448 		break;
2449 	}
2450 
2451 	case DIOCRINACOMMIT: {
2452 		struct pfioc_table *io = (struct pfioc_table *)addr;
2453 
2454 		if (io->pfrio_esize != 0) {
2455 			error = ENODEV;
2456 			break;
2457 		}
2458 		error = pfr_ina_commit(&io->pfrio_table, io->pfrio_ticket,
2459 		    &io->pfrio_nadd, &io->pfrio_nchange, io->pfrio_flags |
2460 		    PFR_FLAG_USERIOCTL);
2461 		break;
2462 	}
2463 
2464 	case DIOCRINADEFINE: {
2465 		struct pfioc_table *io = (struct pfioc_table *)addr;
2466 
2467 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2468 			error = ENODEV;
2469 			break;
2470 		}
2471 		error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
2472 		    io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
2473 		    io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2474 		break;
2475 	}
2476 
2477 	case DIOCOSFPADD: {
2478 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2479 		s = splsoftnet();
2480 		error = pf_osfp_add(io);
2481 		splx(s);
2482 		break;
2483 	}
2484 
2485 	case DIOCOSFPGET: {
2486 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2487 		s = splsoftnet();
2488 		error = pf_osfp_get(io);
2489 		splx(s);
2490 		break;
2491 	}
2492 
2493 	case DIOCXBEGIN: {
2494 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
2495 		struct pfioc_trans_e	 ioe;
2496 		struct pfr_table	 table;
2497 		int			 i;
2498 
2499 		if (io->esize != sizeof(ioe)) {
2500 			error = ENODEV;
2501 			goto fail;
2502 		}
2503 		for (i = 0; i < io->size; i++) {
2504 			if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2505 				error = EFAULT;
2506 				goto fail;
2507 			}
2508 			switch (ioe.rs_num) {
2509 #ifdef ALTQ
2510 			case PF_RULESET_ALTQ:
2511 				if (ioe.anchor[0] || ioe.ruleset[0]) {
2512 					error = EINVAL;
2513 					goto fail;
2514 				}
2515 				if ((error = pf_begin_altq(&ioe.ticket)))
2516 					goto fail;
2517 				break;
2518 #endif /* ALTQ */
2519 			case PF_RULESET_TABLE:
2520 				bzero(&table, sizeof(table));
2521 				strlcpy(table.pfrt_anchor, ioe.anchor,
2522 				    sizeof(table.pfrt_anchor));
2523 				strlcpy(table.pfrt_ruleset, ioe.ruleset,
2524 				    sizeof(table.pfrt_ruleset));
2525 				if ((error = pfr_ina_begin(&table,
2526 				    &ioe.ticket, NULL, 0)))
2527 					goto fail;
2528 				break;
2529 			default:
2530 				if ((error = pf_begin_rules(&ioe.ticket,
2531 				    ioe.rs_num, ioe.anchor, ioe.ruleset)))
2532 					goto fail;
2533 				break;
2534 			}
2535 			if (copyout(&ioe, io->array+i, sizeof(io->array[i]))) {
2536 				error = EFAULT;
2537 				goto fail;
2538 			}
2539 		}
2540 		break;
2541 	}
2542 
2543 	case DIOCXROLLBACK: {
2544 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
2545 		struct pfioc_trans_e	 ioe;
2546 		struct pfr_table	 table;
2547 		int			 i;
2548 
2549 		if (io->esize != sizeof(ioe)) {
2550 			error = ENODEV;
2551 			goto fail;
2552 		}
2553 		for (i = 0; i < io->size; i++) {
2554 			if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2555 				error = EFAULT;
2556 				goto fail;
2557 			}
2558 			switch (ioe.rs_num) {
2559 #ifdef ALTQ
2560 			case PF_RULESET_ALTQ:
2561 				if (ioe.anchor[0] || ioe.ruleset[0]) {
2562 					error = EINVAL;
2563 					goto fail;
2564 				}
2565 				if ((error = pf_rollback_altq(ioe.ticket)))
2566 					goto fail; /* really bad */
2567 				break;
2568 #endif /* ALTQ */
2569 			case PF_RULESET_TABLE:
2570 				bzero(&table, sizeof(table));
2571 				strlcpy(table.pfrt_anchor, ioe.anchor,
2572 				    sizeof(table.pfrt_anchor));
2573 				strlcpy(table.pfrt_ruleset, ioe.ruleset,
2574 				    sizeof(table.pfrt_ruleset));
2575 				if ((error = pfr_ina_rollback(&table,
2576 				    ioe.ticket, NULL, 0)))
2577 					goto fail; /* really bad */
2578 				break;
2579 			default:
2580 				if ((error = pf_rollback_rules(ioe.ticket,
2581 				    ioe.rs_num, ioe.anchor, ioe.ruleset)))
2582 					goto fail; /* really bad */
2583 				break;
2584 			}
2585 		}
2586 		break;
2587 	}
2588 
2589 	case DIOCXCOMMIT: {
2590 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
2591 		struct pfioc_trans_e	 ioe;
2592 		struct pfr_table	 table;
2593 		struct pf_ruleset	*rs;
2594 		int			 i;
2595 
2596 		if (io->esize != sizeof(ioe)) {
2597 			error = ENODEV;
2598 			goto fail;
2599 		}
2600 		/* first makes sure everything will succeed */
2601 		for (i = 0; i < io->size; i++) {
2602 			if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2603 				error = EFAULT;
2604 				goto fail;
2605 			}
2606 			switch (ioe.rs_num) {
2607 #ifdef ALTQ
2608 			case PF_RULESET_ALTQ:
2609 				if (ioe.anchor[0] || ioe.ruleset[0]) {
2610 					error = EINVAL;
2611 					goto fail;
2612 				}
2613 				if (!altqs_inactive_open || ioe.ticket !=
2614 				    ticket_altqs_inactive) {
2615 					error = EBUSY;
2616 					goto fail;
2617 				}
2618 				break;
2619 #endif /* ALTQ */
2620 			case PF_RULESET_TABLE:
2621 				rs = pf_find_ruleset(ioe.anchor, ioe.ruleset);
2622 				if (rs == NULL || !rs->topen || ioe.ticket !=
2623 				     rs->tticket) {
2624 					error = EBUSY;
2625 					goto fail;
2626 				}
2627 				break;
2628 			default:
2629 				if (ioe.rs_num < 0 || ioe.rs_num >=
2630 				    PF_RULESET_MAX) {
2631 					error = EINVAL;
2632 					goto fail;
2633 				}
2634 				rs = pf_find_ruleset(ioe.anchor, ioe.ruleset);
2635 				if (rs == NULL ||
2636 				    !rs->rules[ioe.rs_num].inactive.open ||
2637 				    rs->rules[ioe.rs_num].inactive.ticket !=
2638 				    ioe.ticket) {
2639 					error = EBUSY;
2640 					goto fail;
2641 				}
2642 				break;
2643 			}
2644 		}
2645 		/* now do the commit - no errors should happen here */
2646 		for (i = 0; i < io->size; i++) {
2647 			if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2648 				error = EFAULT;
2649 				goto fail;
2650 			}
2651 			switch (ioe.rs_num) {
2652 #ifdef ALTQ
2653 			case PF_RULESET_ALTQ:
2654 				if ((error = pf_commit_altq(ioe.ticket)))
2655 					goto fail; /* really bad */
2656 				break;
2657 #endif /* ALTQ */
2658 			case PF_RULESET_TABLE:
2659 				bzero(&table, sizeof(table));
2660 				strlcpy(table.pfrt_anchor, ioe.anchor,
2661 				    sizeof(table.pfrt_anchor));
2662 				strlcpy(table.pfrt_ruleset, ioe.ruleset,
2663 				    sizeof(table.pfrt_ruleset));
2664 				if ((error = pfr_ina_commit(&table, ioe.ticket,
2665 				    NULL, NULL, 0)))
2666 					goto fail; /* really bad */
2667 				break;
2668 			default:
2669 				if ((error = pf_commit_rules(ioe.ticket,
2670 				    ioe.rs_num, ioe.anchor, ioe.ruleset)))
2671 					goto fail; /* really bad */
2672 				break;
2673 			}
2674 		}
2675 		break;
2676 	}
2677 
2678 	case DIOCGETSRCNODES: {
2679 		struct pfioc_src_nodes	*psn = (struct pfioc_src_nodes *)addr;
2680 		struct pf_src_node	*n;
2681 		struct pf_src_node *p, pstore;
2682 		u_int32_t		 nr = 0;
2683 		int			 space = psn->psn_len;
2684 
2685 		if (space == 0) {
2686 			s = splsoftnet();
2687 			RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
2688 				nr++;
2689 			splx(s);
2690 			psn->psn_len = sizeof(struct pf_src_node) * nr;
2691 			return (0);
2692 		}
2693 
2694 		s = splsoftnet();
2695 		p = psn->psn_src_nodes;
2696 		RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2697 			int	secs = time_second;
2698 
2699 			if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
2700 				break;
2701 
2702 			bcopy(n, &pstore, sizeof(pstore));
2703 			if (n->rule.ptr != NULL)
2704 				pstore.rule.nr = n->rule.ptr->nr;
2705 			pstore.creation = secs - pstore.creation;
2706 			if (pstore.expire > secs)
2707 				pstore.expire -= secs;
2708 			else
2709 				pstore.expire = 0;
2710 			error = copyout(&pstore, p, sizeof(*p));
2711 			if (error) {
2712 				splx(s);
2713 				goto fail;
2714 			}
2715 			p++;
2716 			nr++;
2717 		}
2718 		psn->psn_len = sizeof(struct pf_src_node) * nr;
2719 		splx(s);
2720 		break;
2721 	}
2722 
2723 	case DIOCCLRSRCNODES: {
2724 		struct pf_src_node	*n;
2725 		struct pf_state		*state;
2726 
2727 		s = splsoftnet();
2728 		RB_FOREACH(state, pf_state_tree_id, &tree_id) {
2729 			state->src_node = NULL;
2730 			state->nat_src_node = NULL;
2731 		}
2732 		RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2733 			n->expire = 1;
2734 			n->states = 0;
2735 		}
2736 		pf_purge_expired_src_nodes();
2737 		pf_status.src_nodes = 0;
2738 		splx(s);
2739 		break;
2740 	}
2741 
2742 	case DIOCSETHOSTID: {
2743 		u_int32_t	*hostid = (u_int32_t *)addr;
2744 
2745 		if (*hostid == 0) {
2746 			error = EINVAL;
2747 			goto fail;
2748 		}
2749 		pf_status.hostid = *hostid;
2750 		break;
2751 	}
2752 
2753 	case DIOCOSFPFLUSH:
2754 		s = splsoftnet();
2755 		pf_osfp_flush();
2756 		splx(s);
2757 		break;
2758 
2759 	case DIOCIGETIFACES: {
2760 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
2761 
2762 		if (io->pfiio_esize != sizeof(struct pfi_if)) {
2763 			error = ENODEV;
2764 			break;
2765 		}
2766 		error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
2767 		    &io->pfiio_size, io->pfiio_flags);
2768 		break;
2769 	}
2770 
2771 	case DIOCICLRISTATS: {
2772 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
2773 
2774 		error = pfi_clr_istats(io->pfiio_name, &io->pfiio_nzero,
2775 		    io->pfiio_flags);
2776 		break;
2777 	}
2778 
2779 	default:
2780 		error = ENODEV;
2781 		break;
2782 	}
2783 fail:
2784 	return (error);
2785 }
2786 
2787 /*
2788  * XXX - Check for version missmatch!!!
2789  */
2790 static void
2791 pf_clear_states(void)
2792 {
2793 	struct pf_state		*state;
2794 
2795 	RB_FOREACH(state, pf_state_tree_id, &tree_id) {
2796 		state->timeout = PFTM_PURGE;
2797 #if NPFSYNC
2798 		/* don't send out individual delete messages */
2799 		state->sync_flags = PFSTATE_NOSYNC;
2800 #endif
2801 	}
2802 	pf_purge_expired_states();
2803 	pf_status.states = 0;
2804 #if 0 /* NPFSYNC */
2805 /*
2806  * XXX This is called on module unload, we do not want to sync that over? */
2807  */
2808 	pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
2809 #endif
2810 }
2811 
2812 static int
2813 pf_clear_tables(void)
2814 {
2815 	struct pfioc_table io;
2816 	int error;
2817 
2818 	bzero(&io, sizeof(io));
2819 
2820 	error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
2821 	    io.pfrio_flags);
2822 
2823 	return (error);
2824 }
2825 
2826 static void
2827 pf_clear_srcnodes(void)
2828 {
2829 	struct pf_src_node	*n;
2830 	struct pf_state		*state;
2831 
2832 	RB_FOREACH(state, pf_state_tree_id, &tree_id) {
2833 		state->src_node = NULL;
2834 		state->nat_src_node = NULL;
2835 	}
2836 	RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2837 		n->expire = 1;
2838 		n->states = 0;
2839 	}
2840 	pf_purge_expired_src_nodes();
2841 	pf_status.src_nodes = 0;
2842 }
2843 /*
2844  * XXX - Check for version missmatch!!!
2845  */
2846 
2847 /*
2848  * Duplicate pfctl -Fa operation to get rid of as much as we can.
2849  */
2850 static int
2851 shutdown_pf(void)
2852 {
2853 	int error = 0;
2854 	u_int32_t t[5];
2855 	char nn = '\0';
2856 
2857 	callout_stop(&pf_expire_to);
2858 
2859 	pf_status.running = 0;
2860 	do {
2861 		if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn,
2862 		    &nn)) != 0) {
2863 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n"));
2864 			break;
2865 		}
2866 		if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn,
2867 		    &nn)) != 0) {
2868 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n"));
2869 			break;		/* XXX: rollback? */
2870 		}
2871 		if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn, &nn))
2872 		    != 0) {
2873 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n"));
2874 			break;		/* XXX: rollback? */
2875 		}
2876 		if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn, &nn))
2877 		    != 0) {
2878 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n"));
2879 			break;		/* XXX: rollback? */
2880 		}
2881 		if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn, &nn))
2882 		    != 0) {
2883 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n"));
2884 			break;		/* XXX: rollback? */
2885 		}
2886 
2887 		/* XXX: these should always succeed here */
2888 		pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn, &nn);
2889 		pf_commit_rules(t[1], PF_RULESET_FILTER, &nn, &nn);
2890 		pf_commit_rules(t[2], PF_RULESET_NAT, &nn, &nn);
2891 		pf_commit_rules(t[3], PF_RULESET_BINAT, &nn, &nn);
2892 		pf_commit_rules(t[4], PF_RULESET_RDR, &nn, &nn);
2893 
2894 		if ((error = pf_clear_tables()) != 0)
2895 			break;
2896 
2897 #ifdef ALTQ
2898 		if ((error = pf_begin_altq(&t[0])) != 0) {
2899 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n"));
2900 			break;
2901 		}
2902 		pf_commit_altq(t[0]);
2903 #endif
2904 
2905 		pf_clear_states();
2906 
2907 		pf_clear_srcnodes();
2908 
2909 		/* status does not use malloced mem so no need to cleanup */
2910 		/* fingerprints and interfaces have their own cleanup code */
2911 	} while(0);
2912 
2913         return (error);
2914 }
2915 
2916 static int
2917 pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir)
2918 {
2919 	/*
2920 	 * DragonFly's version of pf uses FreeBSD's native host byte ordering
2921 	 * for ip_len/ip_off. This is why we don't have to change byte order
2922 	 * like the FreeBSD-5 version does.
2923 	 */
2924 	int chk;
2925 
2926 	chk = pf_test(PF_IN, ifp, m);
2927 	if (chk && *m) {
2928 		m_freem(*m);
2929 		*m = NULL;
2930 	}
2931 	return chk;
2932 }
2933 
2934 static int
2935 pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir)
2936 {
2937 	/*
2938 	 * DragonFly's version of pf uses FreeBSD's native host byte ordering
2939 	 * for ip_len/ip_off. This is why we don't have to change byte order
2940 	 * like the FreeBSD-5 version does.
2941 	 */
2942 	int chk;
2943 
2944 	/* We need a proper CSUM befor we start (s. OpenBSD ip_output) */
2945 	if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
2946 		in_delayed_cksum(*m);
2947 		(*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
2948 	}
2949 	chk = pf_test(PF_OUT, ifp, m);
2950 	if (chk && *m) {
2951 		m_freem(*m);
2952 		*m = NULL;
2953 	}
2954 	return chk;
2955 }
2956 
2957 #ifdef INET6
2958 static int
2959 pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir)
2960 {
2961 	/*
2962 	 * IPv6 is not affected by ip_len/ip_off byte order changes.
2963 	 */
2964 	int chk;
2965 
2966 	chk = pf_test6(PF_IN, ifp, m);
2967 	if (chk && *m) {
2968 		m_freem(*m);
2969 		*m = NULL;
2970 	}
2971 	return chk;
2972 }
2973 
2974 static int
2975 pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir)
2976 {
2977 	/*
2978 	 * IPv6 is not affected by ip_len/ip_off byte order changes.
2979 	 */
2980 	int chk;
2981 
2982 	/* We need a proper CSUM befor we start (s. OpenBSD ip_output) */
2983 	if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
2984 		in_delayed_cksum(*m);
2985 		(*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
2986 	}
2987 	chk = pf_test6(PF_OUT, ifp, m);
2988 	if (chk && *m) {
2989 		m_freem(*m);
2990 		*m = NULL;
2991 	}
2992 	return chk;
2993 }
2994 #endif /* INET6 */
2995 
2996 static int
2997 hook_pf(void)
2998 {
2999 	struct pfil_head *pfh_inet;
3000 #ifdef INET6
3001 	struct pfil_head *pfh_inet6;
3002 #endif
3003 
3004 	if (pf_pfil_hooked)
3005 		return (0);
3006 
3007 	pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3008 	if (pfh_inet == NULL)
3009 		return (ENODEV);
3010 	pfil_add_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet);
3011 	pfil_add_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet);
3012 #ifdef INET6
3013 	pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3014 	if (pfh_inet6 == NULL) {
3015 		pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK,
3016 		    pfh_inet);
3017 		pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK,
3018 		    pfh_inet);
3019 		return (ENODEV);
3020 	}
3021 	pfil_add_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet6);
3022 	pfil_add_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet6);
3023 #endif
3024 
3025 	pf_pfil_hooked = 1;
3026 	return (0);
3027 }
3028 
3029 static int
3030 dehook_pf(void)
3031 {
3032 	struct pfil_head *pfh_inet;
3033 #ifdef INET6
3034 	struct pfil_head *pfh_inet6;
3035 #endif
3036 
3037 	if (pf_pfil_hooked == 0)
3038 		return (0);
3039 
3040 	pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3041 	if (pfh_inet == NULL)
3042 		return (ENODEV);
3043 	pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK,
3044 	    pfh_inet);
3045 	pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK,
3046 	    pfh_inet);
3047 #ifdef INET6
3048 	pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3049 	if (pfh_inet6 == NULL)
3050 		return (ENODEV);
3051 	pfil_remove_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK,
3052 	    pfh_inet6);
3053 	pfil_remove_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK,
3054 	    pfh_inet6);
3055 #endif
3056 
3057 	pf_pfil_hooked = 0;
3058 	return (0);
3059 }
3060 
3061 static int
3062 pf_load(void)
3063 {
3064 	int error;
3065 
3066 	init_zone_var();
3067 	error = cdevsw_add(&pf_cdevsw, 0, 0);
3068 	if (error)
3069 		return (error);
3070 	pf_dev = make_dev(&pf_cdevsw, 0, 0, 0, 0600, PF_NAME);
3071 	error = pfattach();
3072 	if (error) {
3073 		cdevsw_remove(&pf_cdevsw, 0, 0);
3074 		return (error);
3075 	}
3076 	return (0);
3077 }
3078 
3079 static int
3080 pf_unload(void)
3081 {
3082 	int error;
3083 
3084 	pf_status.running = 0;
3085 	error = dehook_pf();
3086 	if (error) {
3087 		/*
3088 		 * Should not happen!
3089 		 * XXX Due to error code ESRCH, kldunload will show
3090 		 * a message like 'No such process'.
3091 		 */
3092 		printf("pfil unregisteration fail\n");
3093 		return error;
3094 	}
3095 	shutdown_pf();
3096 	pfi_cleanup();
3097 	pf_osfp_flush();
3098 	pf_osfp_cleanup();
3099 	cleanup_pf_zone();
3100 	cdevsw_remove(&pf_cdevsw, 0, 0);
3101 	return 0;
3102 }
3103 
3104 static int
3105 pf_modevent(module_t mod, int type, void *data)
3106 {
3107 	int error = 0;
3108 
3109 	switch(type) {
3110 	case MOD_LOAD:
3111 		error = pf_load();
3112 		break;
3113 
3114 	case MOD_UNLOAD:
3115 		error = pf_unload();
3116 		break;
3117 	default:
3118 		error = EINVAL;
3119 		break;
3120 	}
3121 	return error;
3122 }
3123 
3124 static moduledata_t pf_mod = {
3125 	"pf",
3126 	pf_modevent,
3127 	0
3128 };
3129 
3130 DECLARE_MODULE(pf, pf_mod, SI_SUB_PSEUDO, SI_ORDER_FIRST);
3131 MODULE_VERSION(pf, PF_MODVER);
3132