xref: /freebsd/sys/netpfil/pf/pf_ioctl.c (revision 2b833162)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2001 Daniel Hartmeier
5  * Copyright (c) 2002,2003 Henning Brauer
6  * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  *    - Redistributions of source code must retain the above copyright
14  *      notice, this list of conditions and the following disclaimer.
15  *    - Redistributions in binary form must reproduce the above
16  *      copyright notice, this list of conditions and the following
17  *      disclaimer in the documentation and/or other materials provided
18  *      with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Effort sponsored in part by the Defense Advanced Research Projects
34  * Agency (DARPA) and Air Force Research Laboratory, Air Force
35  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36  *
37  *	$OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $
38  */
39 
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
42 
43 #include "opt_inet.h"
44 #include "opt_inet6.h"
45 #include "opt_bpf.h"
46 #include "opt_pf.h"
47 
48 #include <sys/param.h>
49 #include <sys/_bitset.h>
50 #include <sys/bitset.h>
51 #include <sys/bus.h>
52 #include <sys/conf.h>
53 #include <sys/endian.h>
54 #include <sys/fcntl.h>
55 #include <sys/filio.h>
56 #include <sys/hash.h>
57 #include <sys/interrupt.h>
58 #include <sys/jail.h>
59 #include <sys/kernel.h>
60 #include <sys/kthread.h>
61 #include <sys/lock.h>
62 #include <sys/mbuf.h>
63 #include <sys/module.h>
64 #include <sys/nv.h>
65 #include <sys/proc.h>
66 #include <sys/sdt.h>
67 #include <sys/smp.h>
68 #include <sys/socket.h>
69 #include <sys/sysctl.h>
70 #include <sys/md5.h>
71 #include <sys/ucred.h>
72 
73 #include <net/if.h>
74 #include <net/if_var.h>
75 #include <net/if_private.h>
76 #include <net/vnet.h>
77 #include <net/route.h>
78 #include <net/pfil.h>
79 #include <net/pfvar.h>
80 #include <net/if_pfsync.h>
81 #include <net/if_pflog.h>
82 
83 #include <netinet/in.h>
84 #include <netinet/ip.h>
85 #include <netinet/ip_var.h>
86 #include <netinet6/ip6_var.h>
87 #include <netinet/ip_icmp.h>
88 #include <netpfil/pf/pf_nv.h>
89 
90 #ifdef INET6
91 #include <netinet/ip6.h>
92 #endif /* INET6 */
93 
94 #ifdef ALTQ
95 #include <net/altq/altq.h>
96 #endif
97 
98 SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int");
99 SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int");
100 SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int");
101 SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int");
102 
103 static struct pf_kpool	*pf_get_kpool(const char *, u_int32_t, u_int8_t,
104 			    u_int32_t, u_int8_t, u_int8_t, u_int8_t);
105 
106 static void		 pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *);
107 static void		 pf_empty_kpool(struct pf_kpalist *);
108 static int		 pfioctl(struct cdev *, u_long, caddr_t, int,
109 			    struct thread *);
110 static int		 pf_begin_eth(uint32_t *, const char *);
111 static void		 pf_rollback_eth_cb(struct epoch_context *);
112 static int		 pf_rollback_eth(uint32_t, const char *);
113 static int		 pf_commit_eth(uint32_t, const char *);
114 static void		 pf_free_eth_rule(struct pf_keth_rule *);
115 #ifdef ALTQ
116 static int		 pf_begin_altq(u_int32_t *);
117 static int		 pf_rollback_altq(u_int32_t);
118 static int		 pf_commit_altq(u_int32_t);
119 static int		 pf_enable_altq(struct pf_altq *);
120 static int		 pf_disable_altq(struct pf_altq *);
121 static uint16_t		 pf_qname2qid(const char *);
122 static void		 pf_qid_unref(uint16_t);
123 #endif /* ALTQ */
124 static int		 pf_begin_rules(u_int32_t *, int, const char *);
125 static int		 pf_rollback_rules(u_int32_t, int, char *);
126 static int		 pf_setup_pfsync_matching(struct pf_kruleset *);
127 static void		 pf_hash_rule_rolling(MD5_CTX *, struct pf_krule *);
128 static void		 pf_hash_rule(struct pf_krule *);
129 static void		 pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
130 static int		 pf_commit_rules(u_int32_t, int, char *);
131 static int		 pf_addr_setup(struct pf_kruleset *,
132 			    struct pf_addr_wrap *, sa_family_t);
133 static void		 pf_addr_copyout(struct pf_addr_wrap *);
134 static void		 pf_src_node_copy(const struct pf_ksrc_node *,
135 			    struct pf_src_node *);
136 #ifdef ALTQ
137 static int		 pf_export_kaltq(struct pf_altq *,
138 			    struct pfioc_altq_v1 *, size_t);
139 static int		 pf_import_kaltq(struct pfioc_altq_v1 *,
140 			    struct pf_altq *, size_t);
141 #endif /* ALTQ */
142 
143 VNET_DEFINE(struct pf_krule,	pf_default_rule);
144 
145 static __inline int             pf_krule_compare(struct pf_krule *,
146 				    struct pf_krule *);
147 
148 RB_GENERATE(pf_krule_global, pf_krule, entry_global, pf_krule_compare);
149 
150 #ifdef ALTQ
151 VNET_DEFINE_STATIC(int,		pf_altq_running);
152 #define	V_pf_altq_running	VNET(pf_altq_running)
153 #endif
154 
155 #define	TAGID_MAX	 50000
156 struct pf_tagname {
157 	TAILQ_ENTRY(pf_tagname)	namehash_entries;
158 	TAILQ_ENTRY(pf_tagname)	taghash_entries;
159 	char			name[PF_TAG_NAME_SIZE];
160 	uint16_t		tag;
161 	int			ref;
162 };
163 
164 struct pf_tagset {
165 	TAILQ_HEAD(, pf_tagname)	*namehash;
166 	TAILQ_HEAD(, pf_tagname)	*taghash;
167 	unsigned int			 mask;
168 	uint32_t			 seed;
169 	BITSET_DEFINE(, TAGID_MAX)	 avail;
170 };
171 
172 VNET_DEFINE(struct pf_tagset, pf_tags);
173 #define	V_pf_tags	VNET(pf_tags)
174 static unsigned int	pf_rule_tag_hashsize;
175 #define	PF_RULE_TAG_HASH_SIZE_DEFAULT	128
176 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN,
177     &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT,
178     "Size of pf(4) rule tag hashtable");
179 
180 #ifdef ALTQ
181 VNET_DEFINE(struct pf_tagset, pf_qids);
182 #define	V_pf_qids	VNET(pf_qids)
183 static unsigned int	pf_queue_tag_hashsize;
184 #define	PF_QUEUE_TAG_HASH_SIZE_DEFAULT	128
185 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN,
186     &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT,
187     "Size of pf(4) queue tag hashtable");
188 #endif
189 VNET_DEFINE(uma_zone_t,	 pf_tag_z);
190 #define	V_pf_tag_z		 VNET(pf_tag_z)
191 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db");
192 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules");
193 
194 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
195 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
196 #endif
197 
198 static void		 pf_init_tagset(struct pf_tagset *, unsigned int *,
199 			    unsigned int);
200 static void		 pf_cleanup_tagset(struct pf_tagset *);
201 static uint16_t		 tagname2hashindex(const struct pf_tagset *, const char *);
202 static uint16_t		 tag2hashindex(const struct pf_tagset *, uint16_t);
203 static u_int16_t	 tagname2tag(struct pf_tagset *, const char *);
204 static u_int16_t	 pf_tagname2tag(const char *);
205 static void		 tag_unref(struct pf_tagset *, u_int16_t);
206 
207 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
208 
209 struct cdev *pf_dev;
210 
211 /*
212  * XXX - These are new and need to be checked when moveing to a new version
213  */
214 static void		 pf_clear_all_states(void);
215 static unsigned int	 pf_clear_states(const struct pf_kstate_kill *);
216 static void		 pf_killstates(struct pf_kstate_kill *,
217 			    unsigned int *);
218 static int		 pf_killstates_row(struct pf_kstate_kill *,
219 			    struct pf_idhash *);
220 static int		 pf_killstates_nv(struct pfioc_nv *);
221 static int		 pf_clearstates_nv(struct pfioc_nv *);
222 static int		 pf_getstate(struct pfioc_nv *);
223 static int		 pf_getstatus(struct pfioc_nv *);
224 static int		 pf_clear_tables(void);
225 static void		 pf_clear_srcnodes(struct pf_ksrc_node *);
226 static void		 pf_kill_srcnodes(struct pfioc_src_node_kill *);
227 static int		 pf_keepcounters(struct pfioc_nv *);
228 static void		 pf_tbladdr_copyout(struct pf_addr_wrap *);
229 
230 /*
231  * Wrapper functions for pfil(9) hooks
232  */
233 static pfil_return_t pf_eth_check_in(struct mbuf **m, struct ifnet *ifp,
234     int flags, void *ruleset __unused, struct inpcb *inp);
235 static pfil_return_t pf_eth_check_out(struct mbuf **m, struct ifnet *ifp,
236     int flags, void *ruleset __unused, struct inpcb *inp);
237 #ifdef INET
238 static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp,
239     int flags, void *ruleset __unused, struct inpcb *inp);
240 static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp,
241     int flags, void *ruleset __unused, struct inpcb *inp);
242 #endif
243 #ifdef INET6
244 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp,
245     int flags, void *ruleset __unused, struct inpcb *inp);
246 static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp,
247     int flags, void *ruleset __unused, struct inpcb *inp);
248 #endif
249 
250 static void		hook_pf_eth(void);
251 static void		hook_pf(void);
252 static void		dehook_pf_eth(void);
253 static void		dehook_pf(void);
254 static int		shutdown_pf(void);
255 static int		pf_load(void);
256 static void		pf_unload(void);
257 
258 static struct cdevsw pf_cdevsw = {
259 	.d_ioctl =	pfioctl,
260 	.d_name =	PF_NAME,
261 	.d_version =	D_VERSION,
262 };
263 
264 VNET_DEFINE_STATIC(bool, pf_pfil_hooked);
265 #define V_pf_pfil_hooked	VNET(pf_pfil_hooked)
266 VNET_DEFINE_STATIC(bool, pf_pfil_eth_hooked);
267 #define V_pf_pfil_eth_hooked	VNET(pf_pfil_eth_hooked)
268 
269 /*
270  * We need a flag that is neither hooked nor running to know when
271  * the VNET is "valid".  We primarily need this to control (global)
272  * external event, e.g., eventhandlers.
273  */
274 VNET_DEFINE(int, pf_vnet_active);
275 #define V_pf_vnet_active	VNET(pf_vnet_active)
276 
277 int pf_end_threads;
278 struct proc *pf_purge_proc;
279 
280 struct rmlock			pf_rules_lock;
281 struct sx			pf_ioctl_lock;
282 struct sx			pf_end_lock;
283 
284 /* pfsync */
285 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr);
286 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr);
287 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr);
288 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr);
289 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr);
290 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr);
291 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr;
292 
293 /* pflog */
294 pflog_packet_t			*pflog_packet_ptr = NULL;
295 
296 /*
297  * Copy a user-provided string, returning an error if truncation would occur.
298  * Avoid scanning past "sz" bytes in the source string since there's no
299  * guarantee that it's nul-terminated.
300  */
301 static int
302 pf_user_strcpy(char *dst, const char *src, size_t sz)
303 {
304 	if (strnlen(src, sz) == sz)
305 		return (EINVAL);
306 	(void)strlcpy(dst, src, sz);
307 	return (0);
308 }
309 
310 static void
311 pfattach_vnet(void)
312 {
313 	u_int32_t *my_timeout = V_pf_default_rule.timeout;
314 
315 	bzero(&V_pf_status, sizeof(V_pf_status));
316 
317 	pf_initialize();
318 	pfr_initialize();
319 	pfi_initialize_vnet();
320 	pf_normalize_init();
321 	pf_syncookies_init();
322 
323 	V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
324 	V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
325 
326 	RB_INIT(&V_pf_anchors);
327 	pf_init_kruleset(&pf_main_ruleset);
328 
329 	pf_init_keth(V_pf_keth);
330 
331 	/* default rule should never be garbage collected */
332 	V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next;
333 #ifdef PF_DEFAULT_TO_DROP
334 	V_pf_default_rule.action = PF_DROP;
335 #else
336 	V_pf_default_rule.action = PF_PASS;
337 #endif
338 	V_pf_default_rule.nr = -1;
339 	V_pf_default_rule.rtableid = -1;
340 
341 	pf_counter_u64_init(&V_pf_default_rule.evaluations, M_WAITOK);
342 	for (int i = 0; i < 2; i++) {
343 		pf_counter_u64_init(&V_pf_default_rule.packets[i], M_WAITOK);
344 		pf_counter_u64_init(&V_pf_default_rule.bytes[i], M_WAITOK);
345 	}
346 	V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK);
347 	V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK);
348 	V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK);
349 
350 	V_pf_default_rule.timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
351 	    M_WAITOK | M_ZERO);
352 
353 #ifdef PF_WANT_32_TO_64_COUNTER
354 	V_pf_kifmarker = malloc(sizeof(*V_pf_kifmarker), PFI_MTYPE, M_WAITOK | M_ZERO);
355 	V_pf_rulemarker = malloc(sizeof(*V_pf_rulemarker), M_PFRULE, M_WAITOK | M_ZERO);
356 	PF_RULES_WLOCK();
357 	LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist);
358 	LIST_INSERT_HEAD(&V_pf_allrulelist, &V_pf_default_rule, allrulelist);
359 	V_pf_allrulecount++;
360 	LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist);
361 	PF_RULES_WUNLOCK();
362 #endif
363 
364 	/* initialize default timeouts */
365 	my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
366 	my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
367 	my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
368 	my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
369 	my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
370 	my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
371 	my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
372 	my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
373 	my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
374 	my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
375 	my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
376 	my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
377 	my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
378 	my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
379 	my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
380 	my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
381 	my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
382 	my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
383 	my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
384 	my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
385 
386 	V_pf_status.debug = PF_DEBUG_URGENT;
387 	/*
388 	 * XXX This is different than in OpenBSD where reassembly is enabled by
389 	 * defult. In FreeBSD we expect people to still use scrub rules and
390 	 * switch to the new syntax later. Only when they switch they must
391 	 * explicitly enable reassemle. We could change the default once the
392 	 * scrub rule functionality is hopefully removed some day in future.
393 	 */
394 	V_pf_status.reass = 0;
395 
396 	V_pf_pfil_hooked = false;
397 	V_pf_pfil_eth_hooked = false;
398 
399 	/* XXX do our best to avoid a conflict */
400 	V_pf_status.hostid = arc4random();
401 
402 	for (int i = 0; i < PFRES_MAX; i++)
403 		V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK);
404 	for (int i = 0; i < KLCNT_MAX; i++)
405 		V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK);
406 	for (int i = 0; i < FCNT_MAX; i++)
407 		pf_counter_u64_init(&V_pf_status.fcounters[i], M_WAITOK);
408 	for (int i = 0; i < SCNT_MAX; i++)
409 		V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK);
410 
411 	if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET,
412 	    INTR_MPSAFE, &V_pf_swi_cookie) != 0)
413 		/* XXXGL: leaked all above. */
414 		return;
415 }
416 
417 static struct pf_kpool *
418 pf_get_kpool(const char *anchor, u_int32_t ticket, u_int8_t rule_action,
419     u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
420     u_int8_t check_ticket)
421 {
422 	struct pf_kruleset	*ruleset;
423 	struct pf_krule		*rule;
424 	int			 rs_num;
425 
426 	ruleset = pf_find_kruleset(anchor);
427 	if (ruleset == NULL)
428 		return (NULL);
429 	rs_num = pf_get_ruleset_number(rule_action);
430 	if (rs_num >= PF_RULESET_MAX)
431 		return (NULL);
432 	if (active) {
433 		if (check_ticket && ticket !=
434 		    ruleset->rules[rs_num].active.ticket)
435 			return (NULL);
436 		if (r_last)
437 			rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
438 			    pf_krulequeue);
439 		else
440 			rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
441 	} else {
442 		if (check_ticket && ticket !=
443 		    ruleset->rules[rs_num].inactive.ticket)
444 			return (NULL);
445 		if (r_last)
446 			rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
447 			    pf_krulequeue);
448 		else
449 			rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
450 	}
451 	if (!r_last) {
452 		while ((rule != NULL) && (rule->nr != rule_number))
453 			rule = TAILQ_NEXT(rule, entries);
454 	}
455 	if (rule == NULL)
456 		return (NULL);
457 
458 	return (&rule->rpool);
459 }
460 
461 static void
462 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb)
463 {
464 	struct pf_kpooladdr	*mv_pool_pa;
465 
466 	while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
467 		TAILQ_REMOVE(poola, mv_pool_pa, entries);
468 		TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
469 	}
470 }
471 
472 static void
473 pf_empty_kpool(struct pf_kpalist *poola)
474 {
475 	struct pf_kpooladdr *pa;
476 
477 	while ((pa = TAILQ_FIRST(poola)) != NULL) {
478 		switch (pa->addr.type) {
479 		case PF_ADDR_DYNIFTL:
480 			pfi_dynaddr_remove(pa->addr.p.dyn);
481 			break;
482 		case PF_ADDR_TABLE:
483 			/* XXX: this could be unfinished pooladdr on pabuf */
484 			if (pa->addr.p.tbl != NULL)
485 				pfr_detach_table(pa->addr.p.tbl);
486 			break;
487 		}
488 		if (pa->kif)
489 			pfi_kkif_unref(pa->kif);
490 		TAILQ_REMOVE(poola, pa, entries);
491 		free(pa, M_PFRULE);
492 	}
493 }
494 
495 static void
496 pf_unlink_rule_locked(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
497 {
498 
499 	PF_RULES_WASSERT();
500 	PF_UNLNKDRULES_ASSERT();
501 
502 	TAILQ_REMOVE(rulequeue, rule, entries);
503 
504 	rule->rule_ref |= PFRULE_REFS;
505 	TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries);
506 }
507 
508 static void
509 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
510 {
511 
512 	PF_RULES_WASSERT();
513 
514 	PF_UNLNKDRULES_LOCK();
515 	pf_unlink_rule_locked(rulequeue, rule);
516 	PF_UNLNKDRULES_UNLOCK();
517 }
518 
519 static void
520 pf_free_eth_rule(struct pf_keth_rule *rule)
521 {
522 	PF_RULES_WASSERT();
523 
524 	if (rule == NULL)
525 		return;
526 
527 	if (rule->tag)
528 		tag_unref(&V_pf_tags, rule->tag);
529 	if (rule->match_tag)
530 		tag_unref(&V_pf_tags, rule->match_tag);
531 #ifdef ALTQ
532 	pf_qid_unref(rule->qid);
533 #endif
534 
535 	if (rule->bridge_to)
536 		pfi_kkif_unref(rule->bridge_to);
537 	if (rule->kif)
538 		pfi_kkif_unref(rule->kif);
539 
540 	if (rule->ipsrc.addr.type == PF_ADDR_TABLE)
541 		pfr_detach_table(rule->ipsrc.addr.p.tbl);
542 	if (rule->ipdst.addr.type == PF_ADDR_TABLE)
543 		pfr_detach_table(rule->ipdst.addr.p.tbl);
544 
545 	counter_u64_free(rule->evaluations);
546 	for (int i = 0; i < 2; i++) {
547 		counter_u64_free(rule->packets[i]);
548 		counter_u64_free(rule->bytes[i]);
549 	}
550 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
551 	pf_keth_anchor_remove(rule);
552 
553 	free(rule, M_PFRULE);
554 }
555 
556 void
557 pf_free_rule(struct pf_krule *rule)
558 {
559 
560 	PF_RULES_WASSERT();
561 	PF_CONFIG_ASSERT();
562 
563 	if (rule->tag)
564 		tag_unref(&V_pf_tags, rule->tag);
565 	if (rule->match_tag)
566 		tag_unref(&V_pf_tags, rule->match_tag);
567 #ifdef ALTQ
568 	if (rule->pqid != rule->qid)
569 		pf_qid_unref(rule->pqid);
570 	pf_qid_unref(rule->qid);
571 #endif
572 	switch (rule->src.addr.type) {
573 	case PF_ADDR_DYNIFTL:
574 		pfi_dynaddr_remove(rule->src.addr.p.dyn);
575 		break;
576 	case PF_ADDR_TABLE:
577 		pfr_detach_table(rule->src.addr.p.tbl);
578 		break;
579 	}
580 	switch (rule->dst.addr.type) {
581 	case PF_ADDR_DYNIFTL:
582 		pfi_dynaddr_remove(rule->dst.addr.p.dyn);
583 		break;
584 	case PF_ADDR_TABLE:
585 		pfr_detach_table(rule->dst.addr.p.tbl);
586 		break;
587 	}
588 	if (rule->overload_tbl)
589 		pfr_detach_table(rule->overload_tbl);
590 	if (rule->kif)
591 		pfi_kkif_unref(rule->kif);
592 	pf_kanchor_remove(rule);
593 	pf_empty_kpool(&rule->rpool.list);
594 
595 	pf_krule_free(rule);
596 }
597 
598 static void
599 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size,
600     unsigned int default_size)
601 {
602 	unsigned int i;
603 	unsigned int hashsize;
604 
605 	if (*tunable_size == 0 || !powerof2(*tunable_size))
606 		*tunable_size = default_size;
607 
608 	hashsize = *tunable_size;
609 	ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH,
610 	    M_WAITOK);
611 	ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH,
612 	    M_WAITOK);
613 	ts->mask = hashsize - 1;
614 	ts->seed = arc4random();
615 	for (i = 0; i < hashsize; i++) {
616 		TAILQ_INIT(&ts->namehash[i]);
617 		TAILQ_INIT(&ts->taghash[i]);
618 	}
619 	BIT_FILL(TAGID_MAX, &ts->avail);
620 }
621 
622 static void
623 pf_cleanup_tagset(struct pf_tagset *ts)
624 {
625 	unsigned int i;
626 	unsigned int hashsize;
627 	struct pf_tagname *t, *tmp;
628 
629 	/*
630 	 * Only need to clean up one of the hashes as each tag is hashed
631 	 * into each table.
632 	 */
633 	hashsize = ts->mask + 1;
634 	for (i = 0; i < hashsize; i++)
635 		TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp)
636 			uma_zfree(V_pf_tag_z, t);
637 
638 	free(ts->namehash, M_PFHASH);
639 	free(ts->taghash, M_PFHASH);
640 }
641 
642 static uint16_t
643 tagname2hashindex(const struct pf_tagset *ts, const char *tagname)
644 {
645 	size_t len;
646 
647 	len = strnlen(tagname, PF_TAG_NAME_SIZE - 1);
648 	return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask);
649 }
650 
651 static uint16_t
652 tag2hashindex(const struct pf_tagset *ts, uint16_t tag)
653 {
654 
655 	return (tag & ts->mask);
656 }
657 
658 static u_int16_t
659 tagname2tag(struct pf_tagset *ts, const char *tagname)
660 {
661 	struct pf_tagname	*tag;
662 	u_int32_t		 index;
663 	u_int16_t		 new_tagid;
664 
665 	PF_RULES_WASSERT();
666 
667 	index = tagname2hashindex(ts, tagname);
668 	TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries)
669 		if (strcmp(tagname, tag->name) == 0) {
670 			tag->ref++;
671 			return (tag->tag);
672 		}
673 
674 	/*
675 	 * new entry
676 	 *
677 	 * to avoid fragmentation, we do a linear search from the beginning
678 	 * and take the first free slot we find.
679 	 */
680 	new_tagid = BIT_FFS(TAGID_MAX, &ts->avail);
681 	/*
682 	 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX].
683 	 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits
684 	 * set.  It may also return a bit number greater than TAGID_MAX due
685 	 * to rounding of the number of bits in the vector up to a multiple
686 	 * of the vector word size at declaration/allocation time.
687 	 */
688 	if ((new_tagid == 0) || (new_tagid > TAGID_MAX))
689 		return (0);
690 
691 	/* Mark the tag as in use.  Bits are 0-based for BIT_CLR() */
692 	BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail);
693 
694 	/* allocate and fill new struct pf_tagname */
695 	tag = uma_zalloc(V_pf_tag_z, M_NOWAIT);
696 	if (tag == NULL)
697 		return (0);
698 	strlcpy(tag->name, tagname, sizeof(tag->name));
699 	tag->tag = new_tagid;
700 	tag->ref = 1;
701 
702 	/* Insert into namehash */
703 	TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries);
704 
705 	/* Insert into taghash */
706 	index = tag2hashindex(ts, new_tagid);
707 	TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries);
708 
709 	return (tag->tag);
710 }
711 
712 static void
713 tag_unref(struct pf_tagset *ts, u_int16_t tag)
714 {
715 	struct pf_tagname	*t;
716 	uint16_t		 index;
717 
718 	PF_RULES_WASSERT();
719 
720 	index = tag2hashindex(ts, tag);
721 	TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries)
722 		if (tag == t->tag) {
723 			if (--t->ref == 0) {
724 				TAILQ_REMOVE(&ts->taghash[index], t,
725 				    taghash_entries);
726 				index = tagname2hashindex(ts, t->name);
727 				TAILQ_REMOVE(&ts->namehash[index], t,
728 				    namehash_entries);
729 				/* Bits are 0-based for BIT_SET() */
730 				BIT_SET(TAGID_MAX, tag - 1, &ts->avail);
731 				uma_zfree(V_pf_tag_z, t);
732 			}
733 			break;
734 		}
735 }
736 
737 static uint16_t
738 pf_tagname2tag(const char *tagname)
739 {
740 	return (tagname2tag(&V_pf_tags, tagname));
741 }
742 
743 static int
744 pf_begin_eth(uint32_t *ticket, const char *anchor)
745 {
746 	struct pf_keth_rule *rule, *tmp;
747 	struct pf_keth_ruleset *rs;
748 
749 	PF_RULES_WASSERT();
750 
751 	rs = pf_find_or_create_keth_ruleset(anchor);
752 	if (rs == NULL)
753 		return (EINVAL);
754 
755 	/* Purge old inactive rules. */
756 	TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
757 	    tmp) {
758 		TAILQ_REMOVE(rs->inactive.rules, rule,
759 		    entries);
760 		pf_free_eth_rule(rule);
761 	}
762 
763 	*ticket = ++rs->inactive.ticket;
764 	rs->inactive.open = 1;
765 
766 	return (0);
767 }
768 
769 static void
770 pf_rollback_eth_cb(struct epoch_context *ctx)
771 {
772 	struct pf_keth_ruleset *rs;
773 
774 	rs = __containerof(ctx, struct pf_keth_ruleset, epoch_ctx);
775 
776 	CURVNET_SET(rs->vnet);
777 
778 	PF_RULES_WLOCK();
779 	pf_rollback_eth(rs->inactive.ticket,
780 	    rs->anchor ? rs->anchor->path : "");
781 	PF_RULES_WUNLOCK();
782 
783 	CURVNET_RESTORE();
784 }
785 
786 static int
787 pf_rollback_eth(uint32_t ticket, const char *anchor)
788 {
789 	struct pf_keth_rule *rule, *tmp;
790 	struct pf_keth_ruleset *rs;
791 
792 	PF_RULES_WASSERT();
793 
794 	rs = pf_find_keth_ruleset(anchor);
795 	if (rs == NULL)
796 		return (EINVAL);
797 
798 	if (!rs->inactive.open ||
799 	    ticket != rs->inactive.ticket)
800 		return (0);
801 
802 	/* Purge old inactive rules. */
803 	TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
804 	    tmp) {
805 		TAILQ_REMOVE(rs->inactive.rules, rule, entries);
806 		pf_free_eth_rule(rule);
807 	}
808 
809 	rs->inactive.open = 0;
810 
811 	pf_remove_if_empty_keth_ruleset(rs);
812 
813 	return (0);
814 }
815 
816 #define	PF_SET_SKIP_STEPS(i)					\
817 	do {							\
818 		while (head[i] != cur) {			\
819 			head[i]->skip[i].ptr = cur;		\
820 			head[i] = TAILQ_NEXT(head[i], entries);	\
821 		}						\
822 	} while (0)
823 
824 static void
825 pf_eth_calc_skip_steps(struct pf_keth_ruleq *rules)
826 {
827 	struct pf_keth_rule *cur, *prev, *head[PFE_SKIP_COUNT];
828 	int i;
829 
830 	cur = TAILQ_FIRST(rules);
831 	prev = cur;
832 	for (i = 0; i < PFE_SKIP_COUNT; ++i)
833 		head[i] = cur;
834 	while (cur != NULL) {
835 		if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
836 			PF_SET_SKIP_STEPS(PFE_SKIP_IFP);
837 		if (cur->direction != prev->direction)
838 			PF_SET_SKIP_STEPS(PFE_SKIP_DIR);
839 		if (cur->proto != prev->proto)
840 			PF_SET_SKIP_STEPS(PFE_SKIP_PROTO);
841 		if (memcmp(&cur->src, &prev->src, sizeof(cur->src)) != 0)
842 			PF_SET_SKIP_STEPS(PFE_SKIP_SRC_ADDR);
843 		if (memcmp(&cur->dst, &prev->dst, sizeof(cur->dst)) != 0)
844 			PF_SET_SKIP_STEPS(PFE_SKIP_DST_ADDR);
845 
846 		prev = cur;
847 		cur = TAILQ_NEXT(cur, entries);
848 	}
849 	for (i = 0; i < PFE_SKIP_COUNT; ++i)
850 		PF_SET_SKIP_STEPS(i);
851 }
852 
853 static int
854 pf_commit_eth(uint32_t ticket, const char *anchor)
855 {
856 	struct pf_keth_ruleq *rules;
857 	struct pf_keth_ruleset *rs;
858 
859 	rs = pf_find_keth_ruleset(anchor);
860 	if (rs == NULL) {
861 		return (EINVAL);
862 	}
863 
864 	if (!rs->inactive.open ||
865 	    ticket != rs->inactive.ticket)
866 		return (EBUSY);
867 
868 	PF_RULES_WASSERT();
869 
870 	pf_eth_calc_skip_steps(rs->inactive.rules);
871 
872 	rules = rs->active.rules;
873 	ck_pr_store_ptr(&rs->active.rules, rs->inactive.rules);
874 	rs->inactive.rules = rules;
875 	rs->inactive.ticket = rs->active.ticket;
876 
877 	/* Clean up inactive rules (i.e. previously active rules), only when
878 	 * we're sure they're no longer used. */
879 	NET_EPOCH_CALL(pf_rollback_eth_cb, &rs->epoch_ctx);
880 
881 	return (0);
882 }
883 
884 #ifdef ALTQ
885 static uint16_t
886 pf_qname2qid(const char *qname)
887 {
888 	return (tagname2tag(&V_pf_qids, qname));
889 }
890 
891 static void
892 pf_qid_unref(uint16_t qid)
893 {
894 	tag_unref(&V_pf_qids, qid);
895 }
896 
897 static int
898 pf_begin_altq(u_int32_t *ticket)
899 {
900 	struct pf_altq	*altq, *tmp;
901 	int		 error = 0;
902 
903 	PF_RULES_WASSERT();
904 
905 	/* Purge the old altq lists */
906 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
907 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
908 			/* detach and destroy the discipline */
909 			error = altq_remove(altq);
910 		}
911 		free(altq, M_PFALTQ);
912 	}
913 	TAILQ_INIT(V_pf_altq_ifs_inactive);
914 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
915 		pf_qid_unref(altq->qid);
916 		free(altq, M_PFALTQ);
917 	}
918 	TAILQ_INIT(V_pf_altqs_inactive);
919 	if (error)
920 		return (error);
921 	*ticket = ++V_ticket_altqs_inactive;
922 	V_altqs_inactive_open = 1;
923 	return (0);
924 }
925 
926 static int
927 pf_rollback_altq(u_int32_t ticket)
928 {
929 	struct pf_altq	*altq, *tmp;
930 	int		 error = 0;
931 
932 	PF_RULES_WASSERT();
933 
934 	if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
935 		return (0);
936 	/* Purge the old altq lists */
937 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
938 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
939 			/* detach and destroy the discipline */
940 			error = altq_remove(altq);
941 		}
942 		free(altq, M_PFALTQ);
943 	}
944 	TAILQ_INIT(V_pf_altq_ifs_inactive);
945 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
946 		pf_qid_unref(altq->qid);
947 		free(altq, M_PFALTQ);
948 	}
949 	TAILQ_INIT(V_pf_altqs_inactive);
950 	V_altqs_inactive_open = 0;
951 	return (error);
952 }
953 
954 static int
955 pf_commit_altq(u_int32_t ticket)
956 {
957 	struct pf_altqqueue	*old_altqs, *old_altq_ifs;
958 	struct pf_altq		*altq, *tmp;
959 	int			 err, error = 0;
960 
961 	PF_RULES_WASSERT();
962 
963 	if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
964 		return (EBUSY);
965 
966 	/* swap altqs, keep the old. */
967 	old_altqs = V_pf_altqs_active;
968 	old_altq_ifs = V_pf_altq_ifs_active;
969 	V_pf_altqs_active = V_pf_altqs_inactive;
970 	V_pf_altq_ifs_active = V_pf_altq_ifs_inactive;
971 	V_pf_altqs_inactive = old_altqs;
972 	V_pf_altq_ifs_inactive = old_altq_ifs;
973 	V_ticket_altqs_active = V_ticket_altqs_inactive;
974 
975 	/* Attach new disciplines */
976 	TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
977 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
978 			/* attach the discipline */
979 			error = altq_pfattach(altq);
980 			if (error == 0 && V_pf_altq_running)
981 				error = pf_enable_altq(altq);
982 			if (error != 0)
983 				return (error);
984 		}
985 	}
986 
987 	/* Purge the old altq lists */
988 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
989 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
990 			/* detach and destroy the discipline */
991 			if (V_pf_altq_running)
992 				error = pf_disable_altq(altq);
993 			err = altq_pfdetach(altq);
994 			if (err != 0 && error == 0)
995 				error = err;
996 			err = altq_remove(altq);
997 			if (err != 0 && error == 0)
998 				error = err;
999 		}
1000 		free(altq, M_PFALTQ);
1001 	}
1002 	TAILQ_INIT(V_pf_altq_ifs_inactive);
1003 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
1004 		pf_qid_unref(altq->qid);
1005 		free(altq, M_PFALTQ);
1006 	}
1007 	TAILQ_INIT(V_pf_altqs_inactive);
1008 
1009 	V_altqs_inactive_open = 0;
1010 	return (error);
1011 }
1012 
1013 static int
1014 pf_enable_altq(struct pf_altq *altq)
1015 {
1016 	struct ifnet		*ifp;
1017 	struct tb_profile	 tb;
1018 	int			 error = 0;
1019 
1020 	if ((ifp = ifunit(altq->ifname)) == NULL)
1021 		return (EINVAL);
1022 
1023 	if (ifp->if_snd.altq_type != ALTQT_NONE)
1024 		error = altq_enable(&ifp->if_snd);
1025 
1026 	/* set tokenbucket regulator */
1027 	if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
1028 		tb.rate = altq->ifbandwidth;
1029 		tb.depth = altq->tbrsize;
1030 		error = tbr_set(&ifp->if_snd, &tb);
1031 	}
1032 
1033 	return (error);
1034 }
1035 
1036 static int
1037 pf_disable_altq(struct pf_altq *altq)
1038 {
1039 	struct ifnet		*ifp;
1040 	struct tb_profile	 tb;
1041 	int			 error;
1042 
1043 	if ((ifp = ifunit(altq->ifname)) == NULL)
1044 		return (EINVAL);
1045 
1046 	/*
1047 	 * when the discipline is no longer referenced, it was overridden
1048 	 * by a new one.  if so, just return.
1049 	 */
1050 	if (altq->altq_disc != ifp->if_snd.altq_disc)
1051 		return (0);
1052 
1053 	error = altq_disable(&ifp->if_snd);
1054 
1055 	if (error == 0) {
1056 		/* clear tokenbucket regulator */
1057 		tb.rate = 0;
1058 		error = tbr_set(&ifp->if_snd, &tb);
1059 	}
1060 
1061 	return (error);
1062 }
1063 
1064 static int
1065 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket,
1066     struct pf_altq *altq)
1067 {
1068 	struct ifnet	*ifp1;
1069 	int		 error = 0;
1070 
1071 	/* Deactivate the interface in question */
1072 	altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED;
1073 	if ((ifp1 = ifunit(altq->ifname)) == NULL ||
1074 	    (remove && ifp1 == ifp)) {
1075 		altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
1076 	} else {
1077 		error = altq_add(ifp1, altq);
1078 
1079 		if (ticket != V_ticket_altqs_inactive)
1080 			error = EBUSY;
1081 
1082 		if (error)
1083 			free(altq, M_PFALTQ);
1084 	}
1085 
1086 	return (error);
1087 }
1088 
1089 void
1090 pf_altq_ifnet_event(struct ifnet *ifp, int remove)
1091 {
1092 	struct pf_altq	*a1, *a2, *a3;
1093 	u_int32_t	 ticket;
1094 	int		 error = 0;
1095 
1096 	/*
1097 	 * No need to re-evaluate the configuration for events on interfaces
1098 	 * that do not support ALTQ, as it's not possible for such
1099 	 * interfaces to be part of the configuration.
1100 	 */
1101 	if (!ALTQ_IS_READY(&ifp->if_snd))
1102 		return;
1103 
1104 	/* Interrupt userland queue modifications */
1105 	if (V_altqs_inactive_open)
1106 		pf_rollback_altq(V_ticket_altqs_inactive);
1107 
1108 	/* Start new altq ruleset */
1109 	if (pf_begin_altq(&ticket))
1110 		return;
1111 
1112 	/* Copy the current active set */
1113 	TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) {
1114 		a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
1115 		if (a2 == NULL) {
1116 			error = ENOMEM;
1117 			break;
1118 		}
1119 		bcopy(a1, a2, sizeof(struct pf_altq));
1120 
1121 		error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
1122 		if (error)
1123 			break;
1124 
1125 		TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries);
1126 	}
1127 	if (error)
1128 		goto out;
1129 	TAILQ_FOREACH(a1, V_pf_altqs_active, entries) {
1130 		a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
1131 		if (a2 == NULL) {
1132 			error = ENOMEM;
1133 			break;
1134 		}
1135 		bcopy(a1, a2, sizeof(struct pf_altq));
1136 
1137 		if ((a2->qid = pf_qname2qid(a2->qname)) == 0) {
1138 			error = EBUSY;
1139 			free(a2, M_PFALTQ);
1140 			break;
1141 		}
1142 		a2->altq_disc = NULL;
1143 		TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) {
1144 			if (strncmp(a3->ifname, a2->ifname,
1145 				IFNAMSIZ) == 0) {
1146 				a2->altq_disc = a3->altq_disc;
1147 				break;
1148 			}
1149 		}
1150 		error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
1151 		if (error)
1152 			break;
1153 
1154 		TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries);
1155 	}
1156 
1157 out:
1158 	if (error != 0)
1159 		pf_rollback_altq(ticket);
1160 	else
1161 		pf_commit_altq(ticket);
1162 }
1163 #endif /* ALTQ */
1164 
1165 static struct pf_krule_global *
1166 pf_rule_tree_alloc(int flags)
1167 {
1168 	struct pf_krule_global *tree;
1169 
1170 	tree = malloc(sizeof(struct pf_krule_global), M_TEMP, flags);
1171 	if (tree == NULL)
1172 		return (NULL);
1173 	RB_INIT(tree);
1174 	return (tree);
1175 }
1176 
1177 static void
1178 pf_rule_tree_free(struct pf_krule_global *tree)
1179 {
1180 
1181 	free(tree, M_TEMP);
1182 }
1183 
1184 static int
1185 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
1186 {
1187 	struct pf_krule_global *tree;
1188 	struct pf_kruleset	*rs;
1189 	struct pf_krule		*rule;
1190 
1191 	PF_RULES_WASSERT();
1192 
1193 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1194 		return (EINVAL);
1195 	tree = pf_rule_tree_alloc(M_NOWAIT);
1196 	if (tree == NULL)
1197 		return (ENOMEM);
1198 	rs = pf_find_or_create_kruleset(anchor);
1199 	if (rs == NULL) {
1200 		free(tree, M_TEMP);
1201 		return (EINVAL);
1202 	}
1203 	pf_rule_tree_free(rs->rules[rs_num].inactive.tree);
1204 	rs->rules[rs_num].inactive.tree = tree;
1205 
1206 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1207 		pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
1208 		rs->rules[rs_num].inactive.rcount--;
1209 	}
1210 	*ticket = ++rs->rules[rs_num].inactive.ticket;
1211 	rs->rules[rs_num].inactive.open = 1;
1212 	return (0);
1213 }
1214 
1215 static int
1216 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
1217 {
1218 	struct pf_kruleset	*rs;
1219 	struct pf_krule		*rule;
1220 
1221 	PF_RULES_WASSERT();
1222 
1223 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1224 		return (EINVAL);
1225 	rs = pf_find_kruleset(anchor);
1226 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1227 	    rs->rules[rs_num].inactive.ticket != ticket)
1228 		return (0);
1229 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1230 		pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
1231 		rs->rules[rs_num].inactive.rcount--;
1232 	}
1233 	rs->rules[rs_num].inactive.open = 0;
1234 	return (0);
1235 }
1236 
1237 #define PF_MD5_UPD(st, elm)						\
1238 		MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
1239 
1240 #define PF_MD5_UPD_STR(st, elm)						\
1241 		MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
1242 
1243 #define PF_MD5_UPD_HTONL(st, elm, stor) do {				\
1244 		(stor) = htonl((st)->elm);				\
1245 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
1246 } while (0)
1247 
1248 #define PF_MD5_UPD_HTONS(st, elm, stor) do {				\
1249 		(stor) = htons((st)->elm);				\
1250 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
1251 } while (0)
1252 
1253 static void
1254 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
1255 {
1256 	PF_MD5_UPD(pfr, addr.type);
1257 	switch (pfr->addr.type) {
1258 		case PF_ADDR_DYNIFTL:
1259 			PF_MD5_UPD(pfr, addr.v.ifname);
1260 			PF_MD5_UPD(pfr, addr.iflags);
1261 			break;
1262 		case PF_ADDR_TABLE:
1263 			PF_MD5_UPD(pfr, addr.v.tblname);
1264 			break;
1265 		case PF_ADDR_ADDRMASK:
1266 			/* XXX ignore af? */
1267 			PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
1268 			PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
1269 			break;
1270 	}
1271 
1272 	PF_MD5_UPD(pfr, port[0]);
1273 	PF_MD5_UPD(pfr, port[1]);
1274 	PF_MD5_UPD(pfr, neg);
1275 	PF_MD5_UPD(pfr, port_op);
1276 }
1277 
1278 static void
1279 pf_hash_rule_rolling(MD5_CTX *ctx, struct pf_krule *rule)
1280 {
1281 	u_int16_t x;
1282 	u_int32_t y;
1283 
1284 	pf_hash_rule_addr(ctx, &rule->src);
1285 	pf_hash_rule_addr(ctx, &rule->dst);
1286 	for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++)
1287 		PF_MD5_UPD_STR(rule, label[i]);
1288 	PF_MD5_UPD_STR(rule, ifname);
1289 	PF_MD5_UPD_STR(rule, match_tagname);
1290 	PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1291 	PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1292 	PF_MD5_UPD_HTONL(rule, prob, y);
1293 	PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1294 	PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1295 	PF_MD5_UPD(rule, uid.op);
1296 	PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1297 	PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1298 	PF_MD5_UPD(rule, gid.op);
1299 	PF_MD5_UPD_HTONL(rule, rule_flag, y);
1300 	PF_MD5_UPD(rule, action);
1301 	PF_MD5_UPD(rule, direction);
1302 	PF_MD5_UPD(rule, af);
1303 	PF_MD5_UPD(rule, quick);
1304 	PF_MD5_UPD(rule, ifnot);
1305 	PF_MD5_UPD(rule, match_tag_not);
1306 	PF_MD5_UPD(rule, natpass);
1307 	PF_MD5_UPD(rule, keep_state);
1308 	PF_MD5_UPD(rule, proto);
1309 	PF_MD5_UPD(rule, type);
1310 	PF_MD5_UPD(rule, code);
1311 	PF_MD5_UPD(rule, flags);
1312 	PF_MD5_UPD(rule, flagset);
1313 	PF_MD5_UPD(rule, allow_opts);
1314 	PF_MD5_UPD(rule, rt);
1315 	PF_MD5_UPD(rule, tos);
1316 	PF_MD5_UPD(rule, scrub_flags);
1317 	PF_MD5_UPD(rule, min_ttl);
1318 	PF_MD5_UPD(rule, set_tos);
1319 	if (rule->anchor != NULL)
1320 		PF_MD5_UPD_STR(rule, anchor->path);
1321 }
1322 
1323 static void
1324 pf_hash_rule(struct pf_krule *rule)
1325 {
1326 	MD5_CTX		ctx;
1327 
1328 	MD5Init(&ctx);
1329 	pf_hash_rule_rolling(&ctx, rule);
1330 	MD5Final(rule->md5sum, &ctx);
1331 }
1332 
1333 static int
1334 pf_krule_compare(struct pf_krule *a, struct pf_krule *b)
1335 {
1336 
1337 	return (memcmp(a->md5sum, b->md5sum, PF_MD5_DIGEST_LENGTH));
1338 }
1339 
1340 static int
1341 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1342 {
1343 	struct pf_kruleset	*rs;
1344 	struct pf_krule		*rule, **old_array, *old_rule;
1345 	struct pf_krulequeue	*old_rules;
1346 	struct pf_krule_global  *old_tree;
1347 	int			 error;
1348 	u_int32_t		 old_rcount;
1349 
1350 	PF_RULES_WASSERT();
1351 
1352 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1353 		return (EINVAL);
1354 	rs = pf_find_kruleset(anchor);
1355 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1356 	    ticket != rs->rules[rs_num].inactive.ticket)
1357 		return (EBUSY);
1358 
1359 	/* Calculate checksum for the main ruleset */
1360 	if (rs == &pf_main_ruleset) {
1361 		error = pf_setup_pfsync_matching(rs);
1362 		if (error != 0)
1363 			return (error);
1364 	}
1365 
1366 	/* Swap rules, keep the old. */
1367 	old_rules = rs->rules[rs_num].active.ptr;
1368 	old_rcount = rs->rules[rs_num].active.rcount;
1369 	old_array = rs->rules[rs_num].active.ptr_array;
1370 	old_tree = rs->rules[rs_num].active.tree;
1371 
1372 	rs->rules[rs_num].active.ptr =
1373 	    rs->rules[rs_num].inactive.ptr;
1374 	rs->rules[rs_num].active.ptr_array =
1375 	    rs->rules[rs_num].inactive.ptr_array;
1376 	rs->rules[rs_num].active.tree =
1377 	    rs->rules[rs_num].inactive.tree;
1378 	rs->rules[rs_num].active.rcount =
1379 	    rs->rules[rs_num].inactive.rcount;
1380 
1381 	/* Attempt to preserve counter information. */
1382 	if (V_pf_status.keep_counters && old_tree != NULL) {
1383 		TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr,
1384 		    entries) {
1385 			old_rule = RB_FIND(pf_krule_global, old_tree, rule);
1386 			if (old_rule == NULL) {
1387 				continue;
1388 			}
1389 			pf_counter_u64_critical_enter();
1390 			pf_counter_u64_add_protected(&rule->evaluations,
1391 			    pf_counter_u64_fetch(&old_rule->evaluations));
1392 			pf_counter_u64_add_protected(&rule->packets[0],
1393 			    pf_counter_u64_fetch(&old_rule->packets[0]));
1394 			pf_counter_u64_add_protected(&rule->packets[1],
1395 			    pf_counter_u64_fetch(&old_rule->packets[1]));
1396 			pf_counter_u64_add_protected(&rule->bytes[0],
1397 			    pf_counter_u64_fetch(&old_rule->bytes[0]));
1398 			pf_counter_u64_add_protected(&rule->bytes[1],
1399 			    pf_counter_u64_fetch(&old_rule->bytes[1]));
1400 			pf_counter_u64_critical_exit();
1401 		}
1402 	}
1403 
1404 	rs->rules[rs_num].inactive.ptr = old_rules;
1405 	rs->rules[rs_num].inactive.ptr_array = old_array;
1406 	rs->rules[rs_num].inactive.tree = NULL; /* important for pf_ioctl_addrule */
1407 	rs->rules[rs_num].inactive.rcount = old_rcount;
1408 
1409 	rs->rules[rs_num].active.ticket =
1410 	    rs->rules[rs_num].inactive.ticket;
1411 	pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1412 
1413 	/* Purge the old rule list. */
1414 	PF_UNLNKDRULES_LOCK();
1415 	while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1416 		pf_unlink_rule_locked(old_rules, rule);
1417 	PF_UNLNKDRULES_UNLOCK();
1418 	if (rs->rules[rs_num].inactive.ptr_array)
1419 		free(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
1420 	rs->rules[rs_num].inactive.ptr_array = NULL;
1421 	rs->rules[rs_num].inactive.rcount = 0;
1422 	rs->rules[rs_num].inactive.open = 0;
1423 	pf_remove_if_empty_kruleset(rs);
1424 	free(old_tree, M_TEMP);
1425 
1426 	return (0);
1427 }
1428 
1429 static int
1430 pf_setup_pfsync_matching(struct pf_kruleset *rs)
1431 {
1432 	MD5_CTX			 ctx;
1433 	struct pf_krule		*rule;
1434 	int			 rs_cnt;
1435 	u_int8_t		 digest[PF_MD5_DIGEST_LENGTH];
1436 
1437 	MD5Init(&ctx);
1438 	for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1439 		/* XXX PF_RULESET_SCRUB as well? */
1440 		if (rs_cnt == PF_RULESET_SCRUB)
1441 			continue;
1442 
1443 		if (rs->rules[rs_cnt].inactive.ptr_array)
1444 			free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
1445 		rs->rules[rs_cnt].inactive.ptr_array = NULL;
1446 
1447 		if (rs->rules[rs_cnt].inactive.rcount) {
1448 			rs->rules[rs_cnt].inactive.ptr_array =
1449 			    malloc(sizeof(caddr_t) *
1450 			    rs->rules[rs_cnt].inactive.rcount,
1451 			    M_TEMP, M_NOWAIT);
1452 
1453 			if (!rs->rules[rs_cnt].inactive.ptr_array)
1454 				return (ENOMEM);
1455 		}
1456 
1457 		TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1458 		    entries) {
1459 			pf_hash_rule_rolling(&ctx, rule);
1460 			(rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1461 		}
1462 	}
1463 
1464 	MD5Final(digest, &ctx);
1465 	memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum));
1466 	return (0);
1467 }
1468 
1469 static int
1470 pf_eth_addr_setup(struct pf_keth_ruleset *ruleset, struct pf_addr_wrap *addr)
1471 {
1472 	int error = 0;
1473 
1474 	switch (addr->type) {
1475 	case PF_ADDR_TABLE:
1476 		addr->p.tbl = pfr_eth_attach_table(ruleset, addr->v.tblname);
1477 		if (addr->p.tbl == NULL)
1478 			error = ENOMEM;
1479 		break;
1480 	default:
1481 		error = EINVAL;
1482 	}
1483 
1484 	return (error);
1485 }
1486 
1487 static int
1488 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr,
1489     sa_family_t af)
1490 {
1491 	int error = 0;
1492 
1493 	switch (addr->type) {
1494 	case PF_ADDR_TABLE:
1495 		addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname);
1496 		if (addr->p.tbl == NULL)
1497 			error = ENOMEM;
1498 		break;
1499 	case PF_ADDR_DYNIFTL:
1500 		error = pfi_dynaddr_setup(addr, af);
1501 		break;
1502 	}
1503 
1504 	return (error);
1505 }
1506 
1507 static void
1508 pf_addr_copyout(struct pf_addr_wrap *addr)
1509 {
1510 
1511 	switch (addr->type) {
1512 	case PF_ADDR_DYNIFTL:
1513 		pfi_dynaddr_copyout(addr);
1514 		break;
1515 	case PF_ADDR_TABLE:
1516 		pf_tbladdr_copyout(addr);
1517 		break;
1518 	}
1519 }
1520 
1521 static void
1522 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out)
1523 {
1524 	int	secs = time_uptime, diff;
1525 
1526 	bzero(out, sizeof(struct pf_src_node));
1527 
1528 	bcopy(&in->addr, &out->addr, sizeof(struct pf_addr));
1529 	bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr));
1530 
1531 	if (in->rule.ptr != NULL)
1532 		out->rule.nr = in->rule.ptr->nr;
1533 
1534 	for (int i = 0; i < 2; i++) {
1535 		out->bytes[i] = counter_u64_fetch(in->bytes[i]);
1536 		out->packets[i] = counter_u64_fetch(in->packets[i]);
1537 	}
1538 
1539 	out->states = in->states;
1540 	out->conn = in->conn;
1541 	out->af = in->af;
1542 	out->ruletype = in->ruletype;
1543 
1544 	out->creation = secs - in->creation;
1545 	if (out->expire > secs)
1546 		out->expire -= secs;
1547 	else
1548 		out->expire = 0;
1549 
1550 	/* Adjust the connection rate estimate. */
1551 	diff = secs - in->conn_rate.last;
1552 	if (diff >= in->conn_rate.seconds)
1553 		out->conn_rate.count = 0;
1554 	else
1555 		out->conn_rate.count -=
1556 		    in->conn_rate.count * diff /
1557 		    in->conn_rate.seconds;
1558 }
1559 
1560 #ifdef ALTQ
1561 /*
1562  * Handle export of struct pf_kaltq to user binaries that may be using any
1563  * version of struct pf_altq.
1564  */
1565 static int
1566 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size)
1567 {
1568 	u_int32_t version;
1569 
1570 	if (ioc_size == sizeof(struct pfioc_altq_v0))
1571 		version = 0;
1572 	else
1573 		version = pa->version;
1574 
1575 	if (version > PFIOC_ALTQ_VERSION)
1576 		return (EINVAL);
1577 
1578 #define ASSIGN(x) exported_q->x = q->x
1579 #define COPY(x) \
1580 	bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x)))
1581 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX)
1582 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX)
1583 
1584 	switch (version) {
1585 	case 0: {
1586 		struct pf_altq_v0 *exported_q =
1587 		    &((struct pfioc_altq_v0 *)pa)->altq;
1588 
1589 		COPY(ifname);
1590 
1591 		ASSIGN(scheduler);
1592 		ASSIGN(tbrsize);
1593 		exported_q->tbrsize = SATU16(q->tbrsize);
1594 		exported_q->ifbandwidth = SATU32(q->ifbandwidth);
1595 
1596 		COPY(qname);
1597 		COPY(parent);
1598 		ASSIGN(parent_qid);
1599 		exported_q->bandwidth = SATU32(q->bandwidth);
1600 		ASSIGN(priority);
1601 		ASSIGN(local_flags);
1602 
1603 		ASSIGN(qlimit);
1604 		ASSIGN(flags);
1605 
1606 		if (q->scheduler == ALTQT_HFSC) {
1607 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x
1608 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \
1609 			    SATU32(q->pq_u.hfsc_opts.x)
1610 
1611 			ASSIGN_OPT_SATU32(rtsc_m1);
1612 			ASSIGN_OPT(rtsc_d);
1613 			ASSIGN_OPT_SATU32(rtsc_m2);
1614 
1615 			ASSIGN_OPT_SATU32(lssc_m1);
1616 			ASSIGN_OPT(lssc_d);
1617 			ASSIGN_OPT_SATU32(lssc_m2);
1618 
1619 			ASSIGN_OPT_SATU32(ulsc_m1);
1620 			ASSIGN_OPT(ulsc_d);
1621 			ASSIGN_OPT_SATU32(ulsc_m2);
1622 
1623 			ASSIGN_OPT(flags);
1624 
1625 #undef ASSIGN_OPT
1626 #undef ASSIGN_OPT_SATU32
1627 		} else
1628 			COPY(pq_u);
1629 
1630 		ASSIGN(qid);
1631 		break;
1632 	}
1633 	case 1:	{
1634 		struct pf_altq_v1 *exported_q =
1635 		    &((struct pfioc_altq_v1 *)pa)->altq;
1636 
1637 		COPY(ifname);
1638 
1639 		ASSIGN(scheduler);
1640 		ASSIGN(tbrsize);
1641 		ASSIGN(ifbandwidth);
1642 
1643 		COPY(qname);
1644 		COPY(parent);
1645 		ASSIGN(parent_qid);
1646 		ASSIGN(bandwidth);
1647 		ASSIGN(priority);
1648 		ASSIGN(local_flags);
1649 
1650 		ASSIGN(qlimit);
1651 		ASSIGN(flags);
1652 		COPY(pq_u);
1653 
1654 		ASSIGN(qid);
1655 		break;
1656 	}
1657 	default:
1658 		panic("%s: unhandled struct pfioc_altq version", __func__);
1659 		break;
1660 	}
1661 
1662 #undef ASSIGN
1663 #undef COPY
1664 #undef SATU16
1665 #undef SATU32
1666 
1667 	return (0);
1668 }
1669 
1670 /*
1671  * Handle import to struct pf_kaltq of struct pf_altq from user binaries
1672  * that may be using any version of it.
1673  */
1674 static int
1675 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size)
1676 {
1677 	u_int32_t version;
1678 
1679 	if (ioc_size == sizeof(struct pfioc_altq_v0))
1680 		version = 0;
1681 	else
1682 		version = pa->version;
1683 
1684 	if (version > PFIOC_ALTQ_VERSION)
1685 		return (EINVAL);
1686 
1687 #define ASSIGN(x) q->x = imported_q->x
1688 #define COPY(x) \
1689 	bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x)))
1690 
1691 	switch (version) {
1692 	case 0: {
1693 		struct pf_altq_v0 *imported_q =
1694 		    &((struct pfioc_altq_v0 *)pa)->altq;
1695 
1696 		COPY(ifname);
1697 
1698 		ASSIGN(scheduler);
1699 		ASSIGN(tbrsize); /* 16-bit -> 32-bit */
1700 		ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */
1701 
1702 		COPY(qname);
1703 		COPY(parent);
1704 		ASSIGN(parent_qid);
1705 		ASSIGN(bandwidth); /* 32-bit -> 64-bit */
1706 		ASSIGN(priority);
1707 		ASSIGN(local_flags);
1708 
1709 		ASSIGN(qlimit);
1710 		ASSIGN(flags);
1711 
1712 		if (imported_q->scheduler == ALTQT_HFSC) {
1713 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x
1714 
1715 			/*
1716 			 * The m1 and m2 parameters are being copied from
1717 			 * 32-bit to 64-bit.
1718 			 */
1719 			ASSIGN_OPT(rtsc_m1);
1720 			ASSIGN_OPT(rtsc_d);
1721 			ASSIGN_OPT(rtsc_m2);
1722 
1723 			ASSIGN_OPT(lssc_m1);
1724 			ASSIGN_OPT(lssc_d);
1725 			ASSIGN_OPT(lssc_m2);
1726 
1727 			ASSIGN_OPT(ulsc_m1);
1728 			ASSIGN_OPT(ulsc_d);
1729 			ASSIGN_OPT(ulsc_m2);
1730 
1731 			ASSIGN_OPT(flags);
1732 
1733 #undef ASSIGN_OPT
1734 		} else
1735 			COPY(pq_u);
1736 
1737 		ASSIGN(qid);
1738 		break;
1739 	}
1740 	case 1: {
1741 		struct pf_altq_v1 *imported_q =
1742 		    &((struct pfioc_altq_v1 *)pa)->altq;
1743 
1744 		COPY(ifname);
1745 
1746 		ASSIGN(scheduler);
1747 		ASSIGN(tbrsize);
1748 		ASSIGN(ifbandwidth);
1749 
1750 		COPY(qname);
1751 		COPY(parent);
1752 		ASSIGN(parent_qid);
1753 		ASSIGN(bandwidth);
1754 		ASSIGN(priority);
1755 		ASSIGN(local_flags);
1756 
1757 		ASSIGN(qlimit);
1758 		ASSIGN(flags);
1759 		COPY(pq_u);
1760 
1761 		ASSIGN(qid);
1762 		break;
1763 	}
1764 	default:
1765 		panic("%s: unhandled struct pfioc_altq version", __func__);
1766 		break;
1767 	}
1768 
1769 #undef ASSIGN
1770 #undef COPY
1771 
1772 	return (0);
1773 }
1774 
1775 static struct pf_altq *
1776 pf_altq_get_nth_active(u_int32_t n)
1777 {
1778 	struct pf_altq		*altq;
1779 	u_int32_t		 nr;
1780 
1781 	nr = 0;
1782 	TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
1783 		if (nr == n)
1784 			return (altq);
1785 		nr++;
1786 	}
1787 
1788 	TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
1789 		if (nr == n)
1790 			return (altq);
1791 		nr++;
1792 	}
1793 
1794 	return (NULL);
1795 }
1796 #endif /* ALTQ */
1797 
1798 struct pf_krule *
1799 pf_krule_alloc(void)
1800 {
1801 	struct pf_krule *rule;
1802 
1803 	rule = malloc(sizeof(struct pf_krule), M_PFRULE, M_WAITOK | M_ZERO);
1804 	mtx_init(&rule->rpool.mtx, "pf_krule_pool", NULL, MTX_DEF);
1805 	rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
1806 	    M_WAITOK | M_ZERO);
1807 	return (rule);
1808 }
1809 
1810 void
1811 pf_krule_free(struct pf_krule *rule)
1812 {
1813 #ifdef PF_WANT_32_TO_64_COUNTER
1814 	bool wowned;
1815 #endif
1816 
1817 	if (rule == NULL)
1818 		return;
1819 
1820 #ifdef PF_WANT_32_TO_64_COUNTER
1821 	if (rule->allrulelinked) {
1822 		wowned = PF_RULES_WOWNED();
1823 		if (!wowned)
1824 			PF_RULES_WLOCK();
1825 		LIST_REMOVE(rule, allrulelist);
1826 		V_pf_allrulecount--;
1827 		if (!wowned)
1828 			PF_RULES_WUNLOCK();
1829 	}
1830 #endif
1831 
1832 	pf_counter_u64_deinit(&rule->evaluations);
1833 	for (int i = 0; i < 2; i++) {
1834 		pf_counter_u64_deinit(&rule->packets[i]);
1835 		pf_counter_u64_deinit(&rule->bytes[i]);
1836 	}
1837 	counter_u64_free(rule->states_cur);
1838 	counter_u64_free(rule->states_tot);
1839 	counter_u64_free(rule->src_nodes);
1840 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
1841 
1842 	mtx_destroy(&rule->rpool.mtx);
1843 	free(rule, M_PFRULE);
1844 }
1845 
1846 static void
1847 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool,
1848     struct pf_pooladdr *pool)
1849 {
1850 
1851 	bzero(pool, sizeof(*pool));
1852 	bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr));
1853 	strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname));
1854 }
1855 
1856 static int
1857 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool,
1858     struct pf_kpooladdr *kpool)
1859 {
1860 	int ret;
1861 
1862 	bzero(kpool, sizeof(*kpool));
1863 	bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr));
1864 	ret = pf_user_strcpy(kpool->ifname, pool->ifname,
1865 	    sizeof(kpool->ifname));
1866 	return (ret);
1867 }
1868 
1869 static void
1870 pf_kpool_to_pool(const struct pf_kpool *kpool, struct pf_pool *pool)
1871 {
1872 	bzero(pool, sizeof(*pool));
1873 
1874 	bcopy(&kpool->key, &pool->key, sizeof(pool->key));
1875 	bcopy(&kpool->counter, &pool->counter, sizeof(pool->counter));
1876 
1877 	pool->tblidx = kpool->tblidx;
1878 	pool->proxy_port[0] = kpool->proxy_port[0];
1879 	pool->proxy_port[1] = kpool->proxy_port[1];
1880 	pool->opts = kpool->opts;
1881 }
1882 
1883 static void
1884 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool)
1885 {
1886 	_Static_assert(sizeof(pool->key) == sizeof(kpool->key), "");
1887 	_Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), "");
1888 
1889 	bcopy(&pool->key, &kpool->key, sizeof(kpool->key));
1890 	bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter));
1891 
1892 	kpool->tblidx = pool->tblidx;
1893 	kpool->proxy_port[0] = pool->proxy_port[0];
1894 	kpool->proxy_port[1] = pool->proxy_port[1];
1895 	kpool->opts = pool->opts;
1896 }
1897 
1898 static void
1899 pf_krule_to_rule(const struct pf_krule *krule, struct pf_rule *rule)
1900 {
1901 
1902 	bzero(rule, sizeof(*rule));
1903 
1904 	bcopy(&krule->src, &rule->src, sizeof(rule->src));
1905 	bcopy(&krule->dst, &rule->dst, sizeof(rule->dst));
1906 
1907 	for (int i = 0; i < PF_SKIP_COUNT; ++i) {
1908 		if (rule->skip[i].ptr == NULL)
1909 			rule->skip[i].nr = -1;
1910 		else
1911 			rule->skip[i].nr = krule->skip[i].ptr->nr;
1912 	}
1913 
1914 	strlcpy(rule->label, krule->label[0], sizeof(rule->label));
1915 	strlcpy(rule->ifname, krule->ifname, sizeof(rule->ifname));
1916 	strlcpy(rule->qname, krule->qname, sizeof(rule->qname));
1917 	strlcpy(rule->pqname, krule->pqname, sizeof(rule->pqname));
1918 	strlcpy(rule->tagname, krule->tagname, sizeof(rule->tagname));
1919 	strlcpy(rule->match_tagname, krule->match_tagname,
1920 	    sizeof(rule->match_tagname));
1921 	strlcpy(rule->overload_tblname, krule->overload_tblname,
1922 	    sizeof(rule->overload_tblname));
1923 
1924 	pf_kpool_to_pool(&krule->rpool, &rule->rpool);
1925 
1926 	rule->evaluations = pf_counter_u64_fetch(&krule->evaluations);
1927 	for (int i = 0; i < 2; i++) {
1928 		rule->packets[i] = pf_counter_u64_fetch(&krule->packets[i]);
1929 		rule->bytes[i] = pf_counter_u64_fetch(&krule->bytes[i]);
1930 	}
1931 
1932 	/* kif, anchor, overload_tbl are not copied over. */
1933 
1934 	rule->os_fingerprint = krule->os_fingerprint;
1935 
1936 	rule->rtableid = krule->rtableid;
1937 	bcopy(krule->timeout, rule->timeout, sizeof(krule->timeout));
1938 	rule->max_states = krule->max_states;
1939 	rule->max_src_nodes = krule->max_src_nodes;
1940 	rule->max_src_states = krule->max_src_states;
1941 	rule->max_src_conn = krule->max_src_conn;
1942 	rule->max_src_conn_rate.limit = krule->max_src_conn_rate.limit;
1943 	rule->max_src_conn_rate.seconds = krule->max_src_conn_rate.seconds;
1944 	rule->qid = krule->qid;
1945 	rule->pqid = krule->pqid;
1946 	rule->nr = krule->nr;
1947 	rule->prob = krule->prob;
1948 	rule->cuid = krule->cuid;
1949 	rule->cpid = krule->cpid;
1950 
1951 	rule->return_icmp = krule->return_icmp;
1952 	rule->return_icmp6 = krule->return_icmp6;
1953 	rule->max_mss = krule->max_mss;
1954 	rule->tag = krule->tag;
1955 	rule->match_tag = krule->match_tag;
1956 	rule->scrub_flags = krule->scrub_flags;
1957 
1958 	bcopy(&krule->uid, &rule->uid, sizeof(krule->uid));
1959 	bcopy(&krule->gid, &rule->gid, sizeof(krule->gid));
1960 
1961 	rule->rule_flag = krule->rule_flag;
1962 	rule->action = krule->action;
1963 	rule->direction = krule->direction;
1964 	rule->log = krule->log;
1965 	rule->logif = krule->logif;
1966 	rule->quick = krule->quick;
1967 	rule->ifnot = krule->ifnot;
1968 	rule->match_tag_not = krule->match_tag_not;
1969 	rule->natpass = krule->natpass;
1970 
1971 	rule->keep_state = krule->keep_state;
1972 	rule->af = krule->af;
1973 	rule->proto = krule->proto;
1974 	rule->type = krule->type;
1975 	rule->code = krule->code;
1976 	rule->flags = krule->flags;
1977 	rule->flagset = krule->flagset;
1978 	rule->min_ttl = krule->min_ttl;
1979 	rule->allow_opts = krule->allow_opts;
1980 	rule->rt = krule->rt;
1981 	rule->return_ttl = krule->return_ttl;
1982 	rule->tos = krule->tos;
1983 	rule->set_tos = krule->set_tos;
1984 	rule->anchor_relative = krule->anchor_relative;
1985 	rule->anchor_wildcard = krule->anchor_wildcard;
1986 
1987 	rule->flush = krule->flush;
1988 	rule->prio = krule->prio;
1989 	rule->set_prio[0] = krule->set_prio[0];
1990 	rule->set_prio[1] = krule->set_prio[1];
1991 
1992 	bcopy(&krule->divert, &rule->divert, sizeof(krule->divert));
1993 
1994 	rule->u_states_cur = counter_u64_fetch(krule->states_cur);
1995 	rule->u_states_tot = counter_u64_fetch(krule->states_tot);
1996 	rule->u_src_nodes = counter_u64_fetch(krule->src_nodes);
1997 }
1998 
1999 static int
2000 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule)
2001 {
2002 	int ret;
2003 
2004 #ifndef INET
2005 	if (rule->af == AF_INET) {
2006 		return (EAFNOSUPPORT);
2007 	}
2008 #endif /* INET */
2009 #ifndef INET6
2010 	if (rule->af == AF_INET6) {
2011 		return (EAFNOSUPPORT);
2012 	}
2013 #endif /* INET6 */
2014 
2015 	ret = pf_check_rule_addr(&rule->src);
2016 	if (ret != 0)
2017 		return (ret);
2018 	ret = pf_check_rule_addr(&rule->dst);
2019 	if (ret != 0)
2020 		return (ret);
2021 
2022 	bcopy(&rule->src, &krule->src, sizeof(rule->src));
2023 	bcopy(&rule->dst, &krule->dst, sizeof(rule->dst));
2024 
2025 	ret = pf_user_strcpy(krule->label[0], rule->label, sizeof(rule->label));
2026 	if (ret != 0)
2027 		return (ret);
2028 	ret = pf_user_strcpy(krule->ifname, rule->ifname, sizeof(rule->ifname));
2029 	if (ret != 0)
2030 		return (ret);
2031 	ret = pf_user_strcpy(krule->qname, rule->qname, sizeof(rule->qname));
2032 	if (ret != 0)
2033 		return (ret);
2034 	ret = pf_user_strcpy(krule->pqname, rule->pqname, sizeof(rule->pqname));
2035 	if (ret != 0)
2036 		return (ret);
2037 	ret = pf_user_strcpy(krule->tagname, rule->tagname,
2038 	    sizeof(rule->tagname));
2039 	if (ret != 0)
2040 		return (ret);
2041 	ret = pf_user_strcpy(krule->match_tagname, rule->match_tagname,
2042 	    sizeof(rule->match_tagname));
2043 	if (ret != 0)
2044 		return (ret);
2045 	ret = pf_user_strcpy(krule->overload_tblname, rule->overload_tblname,
2046 	    sizeof(rule->overload_tblname));
2047 	if (ret != 0)
2048 		return (ret);
2049 
2050 	pf_pool_to_kpool(&rule->rpool, &krule->rpool);
2051 
2052 	/* Don't allow userspace to set evaulations, packets or bytes. */
2053 	/* kif, anchor, overload_tbl are not copied over. */
2054 
2055 	krule->os_fingerprint = rule->os_fingerprint;
2056 
2057 	krule->rtableid = rule->rtableid;
2058 	bcopy(rule->timeout, krule->timeout, sizeof(krule->timeout));
2059 	krule->max_states = rule->max_states;
2060 	krule->max_src_nodes = rule->max_src_nodes;
2061 	krule->max_src_states = rule->max_src_states;
2062 	krule->max_src_conn = rule->max_src_conn;
2063 	krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit;
2064 	krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds;
2065 	krule->qid = rule->qid;
2066 	krule->pqid = rule->pqid;
2067 	krule->nr = rule->nr;
2068 	krule->prob = rule->prob;
2069 	krule->cuid = rule->cuid;
2070 	krule->cpid = rule->cpid;
2071 
2072 	krule->return_icmp = rule->return_icmp;
2073 	krule->return_icmp6 = rule->return_icmp6;
2074 	krule->max_mss = rule->max_mss;
2075 	krule->tag = rule->tag;
2076 	krule->match_tag = rule->match_tag;
2077 	krule->scrub_flags = rule->scrub_flags;
2078 
2079 	bcopy(&rule->uid, &krule->uid, sizeof(krule->uid));
2080 	bcopy(&rule->gid, &krule->gid, sizeof(krule->gid));
2081 
2082 	krule->rule_flag = rule->rule_flag;
2083 	krule->action = rule->action;
2084 	krule->direction = rule->direction;
2085 	krule->log = rule->log;
2086 	krule->logif = rule->logif;
2087 	krule->quick = rule->quick;
2088 	krule->ifnot = rule->ifnot;
2089 	krule->match_tag_not = rule->match_tag_not;
2090 	krule->natpass = rule->natpass;
2091 
2092 	krule->keep_state = rule->keep_state;
2093 	krule->af = rule->af;
2094 	krule->proto = rule->proto;
2095 	krule->type = rule->type;
2096 	krule->code = rule->code;
2097 	krule->flags = rule->flags;
2098 	krule->flagset = rule->flagset;
2099 	krule->min_ttl = rule->min_ttl;
2100 	krule->allow_opts = rule->allow_opts;
2101 	krule->rt = rule->rt;
2102 	krule->return_ttl = rule->return_ttl;
2103 	krule->tos = rule->tos;
2104 	krule->set_tos = rule->set_tos;
2105 
2106 	krule->flush = rule->flush;
2107 	krule->prio = rule->prio;
2108 	krule->set_prio[0] = rule->set_prio[0];
2109 	krule->set_prio[1] = rule->set_prio[1];
2110 
2111 	bcopy(&rule->divert, &krule->divert, sizeof(krule->divert));
2112 
2113 	return (0);
2114 }
2115 
2116 static int
2117 pf_state_kill_to_kstate_kill(const struct pfioc_state_kill *psk,
2118     struct pf_kstate_kill *kill)
2119 {
2120 	int ret;
2121 
2122 	bzero(kill, sizeof(*kill));
2123 
2124 	bcopy(&psk->psk_pfcmp, &kill->psk_pfcmp, sizeof(kill->psk_pfcmp));
2125 	kill->psk_af = psk->psk_af;
2126 	kill->psk_proto = psk->psk_proto;
2127 	bcopy(&psk->psk_src, &kill->psk_src, sizeof(kill->psk_src));
2128 	bcopy(&psk->psk_dst, &kill->psk_dst, sizeof(kill->psk_dst));
2129 	ret = pf_user_strcpy(kill->psk_ifname, psk->psk_ifname,
2130 	    sizeof(kill->psk_ifname));
2131 	if (ret != 0)
2132 		return (ret);
2133 	ret = pf_user_strcpy(kill->psk_label, psk->psk_label,
2134 	    sizeof(kill->psk_label));
2135 	if (ret != 0)
2136 		return (ret);
2137 
2138 	return (0);
2139 }
2140 
2141 static int
2142 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket,
2143     uint32_t pool_ticket, const char *anchor, const char *anchor_call,
2144     struct thread *td)
2145 {
2146 	struct pf_kruleset	*ruleset;
2147 	struct pf_krule		*tail;
2148 	struct pf_kpooladdr	*pa;
2149 	struct pfi_kkif		*kif = NULL;
2150 	int			 rs_num;
2151 	int			 error = 0;
2152 
2153 	if ((rule->return_icmp >> 8) > ICMP_MAXTYPE) {
2154 		error = EINVAL;
2155 		goto errout_unlocked;
2156 	}
2157 
2158 #define	ERROUT(x)	ERROUT_FUNCTION(errout, x)
2159 
2160 	if (rule->ifname[0])
2161 		kif = pf_kkif_create(M_WAITOK);
2162 	pf_counter_u64_init(&rule->evaluations, M_WAITOK);
2163 	for (int i = 0; i < 2; i++) {
2164 		pf_counter_u64_init(&rule->packets[i], M_WAITOK);
2165 		pf_counter_u64_init(&rule->bytes[i], M_WAITOK);
2166 	}
2167 	rule->states_cur = counter_u64_alloc(M_WAITOK);
2168 	rule->states_tot = counter_u64_alloc(M_WAITOK);
2169 	rule->src_nodes = counter_u64_alloc(M_WAITOK);
2170 	rule->cuid = td->td_ucred->cr_ruid;
2171 	rule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
2172 	TAILQ_INIT(&rule->rpool.list);
2173 
2174 	PF_CONFIG_LOCK();
2175 	PF_RULES_WLOCK();
2176 #ifdef PF_WANT_32_TO_64_COUNTER
2177 	LIST_INSERT_HEAD(&V_pf_allrulelist, rule, allrulelist);
2178 	MPASS(!rule->allrulelinked);
2179 	rule->allrulelinked = true;
2180 	V_pf_allrulecount++;
2181 #endif
2182 	ruleset = pf_find_kruleset(anchor);
2183 	if (ruleset == NULL)
2184 		ERROUT(EINVAL);
2185 	rs_num = pf_get_ruleset_number(rule->action);
2186 	if (rs_num >= PF_RULESET_MAX)
2187 		ERROUT(EINVAL);
2188 	if (ticket != ruleset->rules[rs_num].inactive.ticket) {
2189 		DPFPRINTF(PF_DEBUG_MISC,
2190 		    ("ticket: %d != [%d]%d\n", ticket, rs_num,
2191 		    ruleset->rules[rs_num].inactive.ticket));
2192 		ERROUT(EBUSY);
2193 	}
2194 	if (pool_ticket != V_ticket_pabuf) {
2195 		DPFPRINTF(PF_DEBUG_MISC,
2196 		    ("pool_ticket: %d != %d\n", pool_ticket,
2197 		    V_ticket_pabuf));
2198 		ERROUT(EBUSY);
2199 	}
2200 	/*
2201 	 * XXXMJG hack: there is no mechanism to ensure they started the
2202 	 * transaction. Ticket checked above may happen to match by accident,
2203 	 * even if nobody called DIOCXBEGIN, let alone this process.
2204 	 * Partially work around it by checking if the RB tree got allocated,
2205 	 * see pf_begin_rules.
2206 	 */
2207 	if (ruleset->rules[rs_num].inactive.tree == NULL) {
2208 		ERROUT(EINVAL);
2209 	}
2210 
2211 	tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
2212 	    pf_krulequeue);
2213 	if (tail)
2214 		rule->nr = tail->nr + 1;
2215 	else
2216 		rule->nr = 0;
2217 	if (rule->ifname[0]) {
2218 		rule->kif = pfi_kkif_attach(kif, rule->ifname);
2219 		kif = NULL;
2220 		pfi_kkif_ref(rule->kif);
2221 	} else
2222 		rule->kif = NULL;
2223 
2224 	if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs)
2225 		error = EBUSY;
2226 
2227 #ifdef ALTQ
2228 	/* set queue IDs */
2229 	if (rule->qname[0] != 0) {
2230 		if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
2231 			error = EBUSY;
2232 		else if (rule->pqname[0] != 0) {
2233 			if ((rule->pqid =
2234 			    pf_qname2qid(rule->pqname)) == 0)
2235 				error = EBUSY;
2236 		} else
2237 			rule->pqid = rule->qid;
2238 	}
2239 #endif
2240 	if (rule->tagname[0])
2241 		if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
2242 			error = EBUSY;
2243 	if (rule->match_tagname[0])
2244 		if ((rule->match_tag =
2245 		    pf_tagname2tag(rule->match_tagname)) == 0)
2246 			error = EBUSY;
2247 	if (rule->rt && !rule->direction)
2248 		error = EINVAL;
2249 	if (!rule->log)
2250 		rule->logif = 0;
2251 	if (rule->logif >= PFLOGIFS_MAX)
2252 		error = EINVAL;
2253 	if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
2254 		error = ENOMEM;
2255 	if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
2256 		error = ENOMEM;
2257 	if (pf_kanchor_setup(rule, ruleset, anchor_call))
2258 		error = EINVAL;
2259 	if (rule->scrub_flags & PFSTATE_SETPRIO &&
2260 	    (rule->set_prio[0] > PF_PRIO_MAX ||
2261 	    rule->set_prio[1] > PF_PRIO_MAX))
2262 		error = EINVAL;
2263 	TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
2264 		if (pa->addr.type == PF_ADDR_TABLE) {
2265 			pa->addr.p.tbl = pfr_attach_table(ruleset,
2266 			    pa->addr.v.tblname);
2267 			if (pa->addr.p.tbl == NULL)
2268 				error = ENOMEM;
2269 		}
2270 
2271 	rule->overload_tbl = NULL;
2272 	if (rule->overload_tblname[0]) {
2273 		if ((rule->overload_tbl = pfr_attach_table(ruleset,
2274 		    rule->overload_tblname)) == NULL)
2275 			error = EINVAL;
2276 		else
2277 			rule->overload_tbl->pfrkt_flags |=
2278 			    PFR_TFLAG_ACTIVE;
2279 	}
2280 
2281 	pf_mv_kpool(&V_pf_pabuf, &rule->rpool.list);
2282 	if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
2283 	    (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
2284 	    (rule->rt > PF_NOPFROUTE)) &&
2285 	    (TAILQ_FIRST(&rule->rpool.list) == NULL))
2286 		error = EINVAL;
2287 
2288 	if (error) {
2289 		pf_free_rule(rule);
2290 		rule = NULL;
2291 		ERROUT(error);
2292 	}
2293 
2294 	rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
2295 	TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
2296 	    rule, entries);
2297 	ruleset->rules[rs_num].inactive.rcount++;
2298 
2299 	PF_RULES_WUNLOCK();
2300 	pf_hash_rule(rule);
2301 	if (RB_INSERT(pf_krule_global, ruleset->rules[rs_num].inactive.tree, rule) != NULL) {
2302 		PF_RULES_WLOCK();
2303 		TAILQ_REMOVE(ruleset->rules[rs_num].inactive.ptr, rule, entries);
2304 		ruleset->rules[rs_num].inactive.rcount--;
2305 		pf_free_rule(rule);
2306 		rule = NULL;
2307 		ERROUT(EEXIST);
2308 	}
2309 	PF_CONFIG_UNLOCK();
2310 
2311 	return (0);
2312 
2313 #undef ERROUT
2314 errout:
2315 	PF_RULES_WUNLOCK();
2316 	PF_CONFIG_UNLOCK();
2317 errout_unlocked:
2318 	pf_kkif_free(kif);
2319 	pf_krule_free(rule);
2320 	return (error);
2321 }
2322 
2323 static bool
2324 pf_label_match(const struct pf_krule *rule, const char *label)
2325 {
2326 	int i = 0;
2327 
2328 	while (*rule->label[i]) {
2329 		if (strcmp(rule->label[i], label) == 0)
2330 			return (true);
2331 		i++;
2332 	}
2333 
2334 	return (false);
2335 }
2336 
2337 static unsigned int
2338 pf_kill_matching_state(struct pf_state_key_cmp *key, int dir)
2339 {
2340 	struct pf_kstate *s;
2341 	int more = 0;
2342 
2343 	s = pf_find_state_all(key, dir, &more);
2344 	if (s == NULL)
2345 		return (0);
2346 
2347 	if (more) {
2348 		PF_STATE_UNLOCK(s);
2349 		return (0);
2350 	}
2351 
2352 	pf_unlink_state(s);
2353 	return (1);
2354 }
2355 
2356 static int
2357 pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih)
2358 {
2359 	struct pf_kstate	*s;
2360 	struct pf_state_key	*sk;
2361 	struct pf_addr		*srcaddr, *dstaddr;
2362 	struct pf_state_key_cmp	 match_key;
2363 	int			 idx, killed = 0;
2364 	unsigned int		 dir;
2365 	u_int16_t		 srcport, dstport;
2366 	struct pfi_kkif		*kif;
2367 
2368 relock_DIOCKILLSTATES:
2369 	PF_HASHROW_LOCK(ih);
2370 	LIST_FOREACH(s, &ih->states, entry) {
2371 		/* For floating states look at the original kif. */
2372 		kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
2373 
2374 		sk = s->key[PF_SK_WIRE];
2375 		if (s->direction == PF_OUT) {
2376 			srcaddr = &sk->addr[1];
2377 			dstaddr = &sk->addr[0];
2378 			srcport = sk->port[1];
2379 			dstport = sk->port[0];
2380 		} else {
2381 			srcaddr = &sk->addr[0];
2382 			dstaddr = &sk->addr[1];
2383 			srcport = sk->port[0];
2384 			dstport = sk->port[1];
2385 		}
2386 
2387 		if (psk->psk_af && sk->af != psk->psk_af)
2388 			continue;
2389 
2390 		if (psk->psk_proto && psk->psk_proto != sk->proto)
2391 			continue;
2392 
2393 		if (! PF_MATCHA(psk->psk_src.neg, &psk->psk_src.addr.v.a.addr,
2394 		    &psk->psk_src.addr.v.a.mask, srcaddr, sk->af))
2395 			continue;
2396 
2397 		if (! PF_MATCHA(psk->psk_dst.neg, &psk->psk_dst.addr.v.a.addr,
2398 		    &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af))
2399 			continue;
2400 
2401 		if (!  PF_MATCHA(psk->psk_rt_addr.neg,
2402 		    &psk->psk_rt_addr.addr.v.a.addr,
2403 		    &psk->psk_rt_addr.addr.v.a.mask,
2404 		    &s->rt_addr, sk->af))
2405 			continue;
2406 
2407 		if (psk->psk_src.port_op != 0 &&
2408 		    ! pf_match_port(psk->psk_src.port_op,
2409 		    psk->psk_src.port[0], psk->psk_src.port[1], srcport))
2410 			continue;
2411 
2412 		if (psk->psk_dst.port_op != 0 &&
2413 		    ! pf_match_port(psk->psk_dst.port_op,
2414 		    psk->psk_dst.port[0], psk->psk_dst.port[1], dstport))
2415 			continue;
2416 
2417 		if (psk->psk_label[0] &&
2418 		    ! pf_label_match(s->rule.ptr, psk->psk_label))
2419 			continue;
2420 
2421 		if (psk->psk_ifname[0] && strcmp(psk->psk_ifname,
2422 		    kif->pfik_name))
2423 			continue;
2424 
2425 		if (psk->psk_kill_match) {
2426 			/* Create the key to find matching states, with lock
2427 			 * held. */
2428 
2429 			bzero(&match_key, sizeof(match_key));
2430 
2431 			if (s->direction == PF_OUT) {
2432 				dir = PF_IN;
2433 				idx = PF_SK_STACK;
2434 			} else {
2435 				dir = PF_OUT;
2436 				idx = PF_SK_WIRE;
2437 			}
2438 
2439 			match_key.af = s->key[idx]->af;
2440 			match_key.proto = s->key[idx]->proto;
2441 			PF_ACPY(&match_key.addr[0],
2442 			    &s->key[idx]->addr[1], match_key.af);
2443 			match_key.port[0] = s->key[idx]->port[1];
2444 			PF_ACPY(&match_key.addr[1],
2445 			    &s->key[idx]->addr[0], match_key.af);
2446 			match_key.port[1] = s->key[idx]->port[0];
2447 		}
2448 
2449 		pf_unlink_state(s);
2450 		killed++;
2451 
2452 		if (psk->psk_kill_match)
2453 			killed += pf_kill_matching_state(&match_key, dir);
2454 
2455 		goto relock_DIOCKILLSTATES;
2456 	}
2457 	PF_HASHROW_UNLOCK(ih);
2458 
2459 	return (killed);
2460 }
2461 
2462 static int
2463 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
2464 {
2465 	int			 error = 0;
2466 	PF_RULES_RLOCK_TRACKER;
2467 
2468 #define	ERROUT_IOCTL(target, x)					\
2469     do {								\
2470 	    error = (x);						\
2471 	    SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__);	\
2472 	    goto target;						\
2473     } while (0)
2474 
2475 
2476 	/* XXX keep in sync with switch() below */
2477 	if (securelevel_gt(td->td_ucred, 2))
2478 		switch (cmd) {
2479 		case DIOCGETRULES:
2480 		case DIOCGETRULE:
2481 		case DIOCGETRULENV:
2482 		case DIOCGETADDRS:
2483 		case DIOCGETADDR:
2484 		case DIOCGETSTATE:
2485 		case DIOCGETSTATENV:
2486 		case DIOCSETSTATUSIF:
2487 		case DIOCGETSTATUS:
2488 		case DIOCGETSTATUSNV:
2489 		case DIOCCLRSTATUS:
2490 		case DIOCNATLOOK:
2491 		case DIOCSETDEBUG:
2492 		case DIOCGETSTATES:
2493 		case DIOCGETSTATESV2:
2494 		case DIOCGETTIMEOUT:
2495 		case DIOCCLRRULECTRS:
2496 		case DIOCGETLIMIT:
2497 		case DIOCGETALTQSV0:
2498 		case DIOCGETALTQSV1:
2499 		case DIOCGETALTQV0:
2500 		case DIOCGETALTQV1:
2501 		case DIOCGETQSTATSV0:
2502 		case DIOCGETQSTATSV1:
2503 		case DIOCGETRULESETS:
2504 		case DIOCGETRULESET:
2505 		case DIOCRGETTABLES:
2506 		case DIOCRGETTSTATS:
2507 		case DIOCRCLRTSTATS:
2508 		case DIOCRCLRADDRS:
2509 		case DIOCRADDADDRS:
2510 		case DIOCRDELADDRS:
2511 		case DIOCRSETADDRS:
2512 		case DIOCRGETADDRS:
2513 		case DIOCRGETASTATS:
2514 		case DIOCRCLRASTATS:
2515 		case DIOCRTSTADDRS:
2516 		case DIOCOSFPGET:
2517 		case DIOCGETSRCNODES:
2518 		case DIOCCLRSRCNODES:
2519 		case DIOCGETSYNCOOKIES:
2520 		case DIOCIGETIFACES:
2521 		case DIOCGIFSPEEDV0:
2522 		case DIOCGIFSPEEDV1:
2523 		case DIOCSETIFFLAG:
2524 		case DIOCCLRIFFLAG:
2525 		case DIOCGETETHRULES:
2526 		case DIOCGETETHRULE:
2527 		case DIOCGETETHRULESETS:
2528 		case DIOCGETETHRULESET:
2529 			break;
2530 		case DIOCRCLRTABLES:
2531 		case DIOCRADDTABLES:
2532 		case DIOCRDELTABLES:
2533 		case DIOCRSETTFLAGS:
2534 			if (((struct pfioc_table *)addr)->pfrio_flags &
2535 			    PFR_FLAG_DUMMY)
2536 				break; /* dummy operation ok */
2537 			return (EPERM);
2538 		default:
2539 			return (EPERM);
2540 		}
2541 
2542 	if (!(flags & FWRITE))
2543 		switch (cmd) {
2544 		case DIOCGETRULES:
2545 		case DIOCGETADDRS:
2546 		case DIOCGETADDR:
2547 		case DIOCGETSTATE:
2548 		case DIOCGETSTATENV:
2549 		case DIOCGETSTATUS:
2550 		case DIOCGETSTATUSNV:
2551 		case DIOCGETSTATES:
2552 		case DIOCGETSTATESV2:
2553 		case DIOCGETTIMEOUT:
2554 		case DIOCGETLIMIT:
2555 		case DIOCGETALTQSV0:
2556 		case DIOCGETALTQSV1:
2557 		case DIOCGETALTQV0:
2558 		case DIOCGETALTQV1:
2559 		case DIOCGETQSTATSV0:
2560 		case DIOCGETQSTATSV1:
2561 		case DIOCGETRULESETS:
2562 		case DIOCGETRULESET:
2563 		case DIOCNATLOOK:
2564 		case DIOCRGETTABLES:
2565 		case DIOCRGETTSTATS:
2566 		case DIOCRGETADDRS:
2567 		case DIOCRGETASTATS:
2568 		case DIOCRTSTADDRS:
2569 		case DIOCOSFPGET:
2570 		case DIOCGETSRCNODES:
2571 		case DIOCGETSYNCOOKIES:
2572 		case DIOCIGETIFACES:
2573 		case DIOCGIFSPEEDV1:
2574 		case DIOCGIFSPEEDV0:
2575 		case DIOCGETRULENV:
2576 		case DIOCGETETHRULES:
2577 		case DIOCGETETHRULE:
2578 		case DIOCGETETHRULESETS:
2579 		case DIOCGETETHRULESET:
2580 			break;
2581 		case DIOCRCLRTABLES:
2582 		case DIOCRADDTABLES:
2583 		case DIOCRDELTABLES:
2584 		case DIOCRCLRTSTATS:
2585 		case DIOCRCLRADDRS:
2586 		case DIOCRADDADDRS:
2587 		case DIOCRDELADDRS:
2588 		case DIOCRSETADDRS:
2589 		case DIOCRSETTFLAGS:
2590 			if (((struct pfioc_table *)addr)->pfrio_flags &
2591 			    PFR_FLAG_DUMMY) {
2592 				flags |= FWRITE; /* need write lock for dummy */
2593 				break; /* dummy operation ok */
2594 			}
2595 			return (EACCES);
2596 		case DIOCGETRULE:
2597 			if (((struct pfioc_rule *)addr)->action ==
2598 			    PF_GET_CLR_CNTR)
2599 				return (EACCES);
2600 			break;
2601 		default:
2602 			return (EACCES);
2603 		}
2604 
2605 	CURVNET_SET(TD_TO_VNET(td));
2606 
2607 	switch (cmd) {
2608 	case DIOCSTART:
2609 		sx_xlock(&pf_ioctl_lock);
2610 		if (V_pf_status.running)
2611 			error = EEXIST;
2612 		else {
2613 			hook_pf();
2614 			if (! TAILQ_EMPTY(V_pf_keth->active.rules))
2615 				hook_pf_eth();
2616 			V_pf_status.running = 1;
2617 			V_pf_status.since = time_second;
2618 			new_unrhdr64(&V_pf_stateid, time_second);
2619 
2620 			DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
2621 		}
2622 		break;
2623 
2624 	case DIOCSTOP:
2625 		sx_xlock(&pf_ioctl_lock);
2626 		if (!V_pf_status.running)
2627 			error = ENOENT;
2628 		else {
2629 			V_pf_status.running = 0;
2630 			dehook_pf();
2631 			dehook_pf_eth();
2632 			V_pf_status.since = time_second;
2633 			DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
2634 		}
2635 		break;
2636 
2637 	case DIOCGETETHRULES: {
2638 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
2639 		nvlist_t		*nvl;
2640 		void			*packed;
2641 		struct pf_keth_rule	*tail;
2642 		struct pf_keth_ruleset	*rs;
2643 		u_int32_t		 ticket, nr;
2644 		const char		*anchor = "";
2645 
2646 		nvl = NULL;
2647 		packed = NULL;
2648 
2649 #define	ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULES_error, x)
2650 
2651 		if (nv->len > pf_ioctl_maxcount)
2652 			ERROUT(ENOMEM);
2653 
2654 		/* Copy the request in */
2655 		packed = malloc(nv->len, M_NVLIST, M_WAITOK);
2656 		if (packed == NULL)
2657 			ERROUT(ENOMEM);
2658 
2659 		error = copyin(nv->data, packed, nv->len);
2660 		if (error)
2661 			ERROUT(error);
2662 
2663 		nvl = nvlist_unpack(packed, nv->len, 0);
2664 		if (nvl == NULL)
2665 			ERROUT(EBADMSG);
2666 
2667 		if (! nvlist_exists_string(nvl, "anchor"))
2668 			ERROUT(EBADMSG);
2669 
2670 		anchor = nvlist_get_string(nvl, "anchor");
2671 
2672 		rs = pf_find_keth_ruleset(anchor);
2673 
2674 		nvlist_destroy(nvl);
2675 		nvl = NULL;
2676 		free(packed, M_NVLIST);
2677 		packed = NULL;
2678 
2679 		if (rs == NULL)
2680 			ERROUT(ENOENT);
2681 
2682 		/* Reply */
2683 		nvl = nvlist_create(0);
2684 		if (nvl == NULL)
2685 			ERROUT(ENOMEM);
2686 
2687 		PF_RULES_RLOCK();
2688 
2689 		ticket = rs->active.ticket;
2690 		tail = TAILQ_LAST(rs->active.rules, pf_keth_ruleq);
2691 		if (tail)
2692 			nr = tail->nr + 1;
2693 		else
2694 			nr = 0;
2695 
2696 		PF_RULES_RUNLOCK();
2697 
2698 		nvlist_add_number(nvl, "ticket", ticket);
2699 		nvlist_add_number(nvl, "nr", nr);
2700 
2701 		packed = nvlist_pack(nvl, &nv->len);
2702 		if (packed == NULL)
2703 			ERROUT(ENOMEM);
2704 
2705 		if (nv->size == 0)
2706 			ERROUT(0);
2707 		else if (nv->size < nv->len)
2708 			ERROUT(ENOSPC);
2709 
2710 		error = copyout(packed, nv->data, nv->len);
2711 
2712 #undef ERROUT
2713 DIOCGETETHRULES_error:
2714 		free(packed, M_NVLIST);
2715 		nvlist_destroy(nvl);
2716 		break;
2717 	}
2718 
2719 	case DIOCGETETHRULE: {
2720 		struct epoch_tracker	 et;
2721 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
2722 		nvlist_t		*nvl = NULL;
2723 		void			*nvlpacked = NULL;
2724 		struct pf_keth_rule	*rule = NULL;
2725 		struct pf_keth_ruleset	*rs;
2726 		u_int32_t		 ticket, nr;
2727 		bool			 clear = false;
2728 		const char		*anchor;
2729 
2730 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULE_error, x)
2731 
2732 		if (nv->len > pf_ioctl_maxcount)
2733 			ERROUT(ENOMEM);
2734 
2735 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
2736 		if (nvlpacked == NULL)
2737 			ERROUT(ENOMEM);
2738 
2739 		error = copyin(nv->data, nvlpacked, nv->len);
2740 		if (error)
2741 			ERROUT(error);
2742 
2743 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
2744 		if (nvl == NULL)
2745 			ERROUT(EBADMSG);
2746 		if (! nvlist_exists_number(nvl, "ticket"))
2747 			ERROUT(EBADMSG);
2748 		ticket = nvlist_get_number(nvl, "ticket");
2749 		if (! nvlist_exists_string(nvl, "anchor"))
2750 			ERROUT(EBADMSG);
2751 		anchor = nvlist_get_string(nvl, "anchor");
2752 
2753 		if (nvlist_exists_bool(nvl, "clear"))
2754 			clear = nvlist_get_bool(nvl, "clear");
2755 
2756 		if (clear && !(flags & FWRITE))
2757 			ERROUT(EACCES);
2758 
2759 		if (! nvlist_exists_number(nvl, "nr"))
2760 			ERROUT(EBADMSG);
2761 		nr = nvlist_get_number(nvl, "nr");
2762 
2763 		PF_RULES_RLOCK();
2764 		rs = pf_find_keth_ruleset(anchor);
2765 		if (rs == NULL) {
2766 			PF_RULES_RUNLOCK();
2767 			ERROUT(ENOENT);
2768 		}
2769 		if (ticket != rs->active.ticket) {
2770 			PF_RULES_RUNLOCK();
2771 			ERROUT(EBUSY);
2772 		}
2773 
2774 		nvlist_destroy(nvl);
2775 		nvl = NULL;
2776 		free(nvlpacked, M_NVLIST);
2777 		nvlpacked = NULL;
2778 
2779 		rule = TAILQ_FIRST(rs->active.rules);
2780 		while ((rule != NULL) && (rule->nr != nr))
2781 			rule = TAILQ_NEXT(rule, entries);
2782 		if (rule == NULL) {
2783 			PF_RULES_RUNLOCK();
2784 			ERROUT(ENOENT);
2785 		}
2786 		/* Make sure rule can't go away. */
2787 		NET_EPOCH_ENTER(et);
2788 		PF_RULES_RUNLOCK();
2789 		nvl = pf_keth_rule_to_nveth_rule(rule);
2790 		if (pf_keth_anchor_nvcopyout(rs, rule, nvl))
2791 			ERROUT(EBUSY);
2792 		NET_EPOCH_EXIT(et);
2793 		if (nvl == NULL)
2794 			ERROUT(ENOMEM);
2795 
2796 		nvlpacked = nvlist_pack(nvl, &nv->len);
2797 		if (nvlpacked == NULL)
2798 			ERROUT(ENOMEM);
2799 
2800 		if (nv->size == 0)
2801 			ERROUT(0);
2802 		else if (nv->size < nv->len)
2803 			ERROUT(ENOSPC);
2804 
2805 		error = copyout(nvlpacked, nv->data, nv->len);
2806 		if (error == 0 && clear) {
2807 			counter_u64_zero(rule->evaluations);
2808 			for (int i = 0; i < 2; i++) {
2809 				counter_u64_zero(rule->packets[i]);
2810 				counter_u64_zero(rule->bytes[i]);
2811 			}
2812 		}
2813 
2814 #undef ERROUT
2815 DIOCGETETHRULE_error:
2816 		free(nvlpacked, M_NVLIST);
2817 		nvlist_destroy(nvl);
2818 		break;
2819 	}
2820 
2821 	case DIOCADDETHRULE: {
2822 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
2823 		nvlist_t		*nvl = NULL;
2824 		void			*nvlpacked = NULL;
2825 		struct pf_keth_rule	*rule = NULL, *tail = NULL;
2826 		struct pf_keth_ruleset	*ruleset = NULL;
2827 		struct pfi_kkif		*kif = NULL, *bridge_to_kif = NULL;
2828 		const char		*anchor = "", *anchor_call = "";
2829 
2830 #define ERROUT(x)	ERROUT_IOCTL(DIOCADDETHRULE_error, x)
2831 
2832 		if (nv->len > pf_ioctl_maxcount)
2833 			ERROUT(ENOMEM);
2834 
2835 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
2836 		if (nvlpacked == NULL)
2837 			ERROUT(ENOMEM);
2838 
2839 		error = copyin(nv->data, nvlpacked, nv->len);
2840 		if (error)
2841 			ERROUT(error);
2842 
2843 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
2844 		if (nvl == NULL)
2845 			ERROUT(EBADMSG);
2846 
2847 		if (! nvlist_exists_number(nvl, "ticket"))
2848 			ERROUT(EBADMSG);
2849 
2850 		if (nvlist_exists_string(nvl, "anchor"))
2851 			anchor = nvlist_get_string(nvl, "anchor");
2852 		if (nvlist_exists_string(nvl, "anchor_call"))
2853 			anchor_call = nvlist_get_string(nvl, "anchor_call");
2854 
2855 		ruleset = pf_find_keth_ruleset(anchor);
2856 		if (ruleset == NULL)
2857 			ERROUT(EINVAL);
2858 
2859 		if (nvlist_get_number(nvl, "ticket") !=
2860 		    ruleset->inactive.ticket) {
2861 			DPFPRINTF(PF_DEBUG_MISC,
2862 			    ("ticket: %d != %d\n",
2863 			    (u_int32_t)nvlist_get_number(nvl, "ticket"),
2864 			    ruleset->inactive.ticket));
2865 			ERROUT(EBUSY);
2866 		}
2867 
2868 		rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK);
2869 		if (rule == NULL)
2870 			ERROUT(ENOMEM);
2871 		rule->timestamp = NULL;
2872 
2873 		error = pf_nveth_rule_to_keth_rule(nvl, rule);
2874 		if (error != 0)
2875 			ERROUT(error);
2876 
2877 		if (rule->ifname[0])
2878 			kif = pf_kkif_create(M_WAITOK);
2879 		if (rule->bridge_to_name[0])
2880 			bridge_to_kif = pf_kkif_create(M_WAITOK);
2881 		rule->evaluations = counter_u64_alloc(M_WAITOK);
2882 		for (int i = 0; i < 2; i++) {
2883 			rule->packets[i] = counter_u64_alloc(M_WAITOK);
2884 			rule->bytes[i] = counter_u64_alloc(M_WAITOK);
2885 		}
2886 		rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
2887 		    M_WAITOK | M_ZERO);
2888 
2889 		PF_RULES_WLOCK();
2890 
2891 		if (rule->ifname[0]) {
2892 			rule->kif = pfi_kkif_attach(kif, rule->ifname);
2893 			pfi_kkif_ref(rule->kif);
2894 		} else
2895 			rule->kif = NULL;
2896 		if (rule->bridge_to_name[0]) {
2897 			rule->bridge_to = pfi_kkif_attach(bridge_to_kif,
2898 			    rule->bridge_to_name);
2899 			pfi_kkif_ref(rule->bridge_to);
2900 		} else
2901 			rule->bridge_to = NULL;
2902 
2903 #ifdef ALTQ
2904 		/* set queue IDs */
2905 		if (rule->qname[0] != 0) {
2906 			if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
2907 				error = EBUSY;
2908 			else
2909 				rule->qid = rule->qid;
2910 		}
2911 #endif
2912 		if (rule->tagname[0])
2913 			if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
2914 				error = EBUSY;
2915 		if (rule->match_tagname[0])
2916 			if ((rule->match_tag = pf_tagname2tag(
2917 			    rule->match_tagname)) == 0)
2918 				error = EBUSY;
2919 
2920 		if (error == 0 && rule->ipdst.addr.type == PF_ADDR_TABLE)
2921 			error = pf_eth_addr_setup(ruleset, &rule->ipdst.addr);
2922 		if (error == 0 && rule->ipsrc.addr.type == PF_ADDR_TABLE)
2923 			error = pf_eth_addr_setup(ruleset, &rule->ipsrc.addr);
2924 
2925 		if (error) {
2926 			pf_free_eth_rule(rule);
2927 			PF_RULES_WUNLOCK();
2928 			ERROUT(error);
2929 		}
2930 
2931 		if (pf_keth_anchor_setup(rule, ruleset, anchor_call)) {
2932 			pf_free_eth_rule(rule);
2933 			PF_RULES_WUNLOCK();
2934 			ERROUT(EINVAL);
2935 		}
2936 
2937 		tail = TAILQ_LAST(ruleset->inactive.rules, pf_keth_ruleq);
2938 		if (tail)
2939 			rule->nr = tail->nr + 1;
2940 		else
2941 			rule->nr = 0;
2942 
2943 		TAILQ_INSERT_TAIL(ruleset->inactive.rules, rule, entries);
2944 
2945 		PF_RULES_WUNLOCK();
2946 
2947 #undef ERROUT
2948 DIOCADDETHRULE_error:
2949 		nvlist_destroy(nvl);
2950 		free(nvlpacked, M_NVLIST);
2951 		break;
2952 	}
2953 
2954 	case DIOCGETETHRULESETS: {
2955 		struct epoch_tracker	 et;
2956 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
2957 		nvlist_t		*nvl = NULL;
2958 		void			*nvlpacked = NULL;
2959 		struct pf_keth_ruleset	*ruleset;
2960 		struct pf_keth_anchor	*anchor;
2961 		int			 nr = 0;
2962 
2963 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULESETS_error, x)
2964 
2965 		if (nv->len > pf_ioctl_maxcount)
2966 			ERROUT(ENOMEM);
2967 
2968 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
2969 		if (nvlpacked == NULL)
2970 			ERROUT(ENOMEM);
2971 
2972 		error = copyin(nv->data, nvlpacked, nv->len);
2973 		if (error)
2974 			ERROUT(error);
2975 
2976 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
2977 		if (nvl == NULL)
2978 			ERROUT(EBADMSG);
2979 		if (! nvlist_exists_string(nvl, "path"))
2980 			ERROUT(EBADMSG);
2981 
2982 		NET_EPOCH_ENTER(et);
2983 
2984 		if ((ruleset = pf_find_keth_ruleset(
2985 		    nvlist_get_string(nvl, "path"))) == NULL) {
2986 			NET_EPOCH_EXIT(et);
2987 			ERROUT(ENOENT);
2988 		}
2989 
2990 		if (ruleset->anchor == NULL) {
2991 			RB_FOREACH(anchor, pf_keth_anchor_global, &V_pf_keth_anchors)
2992 				if (anchor->parent == NULL)
2993 					nr++;
2994 		} else {
2995 			RB_FOREACH(anchor, pf_keth_anchor_node,
2996 			    &ruleset->anchor->children)
2997 				nr++;
2998 		}
2999 
3000 		NET_EPOCH_EXIT(et);
3001 
3002 		nvlist_destroy(nvl);
3003 		nvl = NULL;
3004 		free(nvlpacked, M_NVLIST);
3005 		nvlpacked = NULL;
3006 
3007 		nvl = nvlist_create(0);
3008 		if (nvl == NULL)
3009 			ERROUT(ENOMEM);
3010 
3011 		nvlist_add_number(nvl, "nr", nr);
3012 
3013 		nvlpacked = nvlist_pack(nvl, &nv->len);
3014 		if (nvlpacked == NULL)
3015 			ERROUT(ENOMEM);
3016 
3017 		if (nv->size == 0)
3018 			ERROUT(0);
3019 		else if (nv->size < nv->len)
3020 			ERROUT(ENOSPC);
3021 
3022 		error = copyout(nvlpacked, nv->data, nv->len);
3023 
3024 #undef ERROUT
3025 DIOCGETETHRULESETS_error:
3026 		free(nvlpacked, M_NVLIST);
3027 		nvlist_destroy(nvl);
3028 		break;
3029 	}
3030 
3031 	case DIOCGETETHRULESET: {
3032 		struct epoch_tracker	 et;
3033 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3034 		nvlist_t		*nvl = NULL;
3035 		void			*nvlpacked = NULL;
3036 		struct pf_keth_ruleset	*ruleset;
3037 		struct pf_keth_anchor	*anchor;
3038 		int			 nr = 0, req_nr = 0;
3039 		bool			 found = false;
3040 
3041 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULESET_error, x)
3042 
3043 		if (nv->len > pf_ioctl_maxcount)
3044 			ERROUT(ENOMEM);
3045 
3046 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3047 		if (nvlpacked == NULL)
3048 			ERROUT(ENOMEM);
3049 
3050 		error = copyin(nv->data, nvlpacked, nv->len);
3051 		if (error)
3052 			ERROUT(error);
3053 
3054 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3055 		if (nvl == NULL)
3056 			ERROUT(EBADMSG);
3057 		if (! nvlist_exists_string(nvl, "path"))
3058 			ERROUT(EBADMSG);
3059 		if (! nvlist_exists_number(nvl, "nr"))
3060 			ERROUT(EBADMSG);
3061 
3062 		req_nr = nvlist_get_number(nvl, "nr");
3063 
3064 		NET_EPOCH_ENTER(et);
3065 
3066 		if ((ruleset = pf_find_keth_ruleset(
3067 		    nvlist_get_string(nvl, "path"))) == NULL) {
3068 			NET_EPOCH_EXIT(et);
3069 			ERROUT(ENOENT);
3070 		}
3071 
3072 		nvlist_destroy(nvl);
3073 		nvl = NULL;
3074 		free(nvlpacked, M_NVLIST);
3075 		nvlpacked = NULL;
3076 
3077 		nvl = nvlist_create(0);
3078 		if (nvl == NULL) {
3079 			NET_EPOCH_EXIT(et);
3080 			ERROUT(ENOMEM);
3081 		}
3082 
3083 		if (ruleset->anchor == NULL) {
3084 			RB_FOREACH(anchor, pf_keth_anchor_global,
3085 			    &V_pf_keth_anchors) {
3086 				if (anchor->parent == NULL && nr++ == req_nr) {
3087 					found = true;
3088 					break;
3089 				}
3090 			}
3091 		} else {
3092 			RB_FOREACH(anchor, pf_keth_anchor_node,
3093 			     &ruleset->anchor->children) {
3094 				if (nr++ == req_nr) {
3095 					found = true;
3096 					break;
3097 				}
3098 			}
3099 		}
3100 
3101 		NET_EPOCH_EXIT(et);
3102 		if (found) {
3103 			nvlist_add_number(nvl, "nr", nr);
3104 			nvlist_add_string(nvl, "name", anchor->name);
3105 			if (ruleset->anchor)
3106 				nvlist_add_string(nvl, "path",
3107 				    ruleset->anchor->path);
3108 			else
3109 				nvlist_add_string(nvl, "path", "");
3110 		} else {
3111 			ERROUT(EBUSY);
3112 		}
3113 
3114 		nvlpacked = nvlist_pack(nvl, &nv->len);
3115 		if (nvlpacked == NULL)
3116 			ERROUT(ENOMEM);
3117 
3118 		if (nv->size == 0)
3119 			ERROUT(0);
3120 		else if (nv->size < nv->len)
3121 			ERROUT(ENOSPC);
3122 
3123 		error = copyout(nvlpacked, nv->data, nv->len);
3124 
3125 #undef ERROUT
3126 DIOCGETETHRULESET_error:
3127 		free(nvlpacked, M_NVLIST);
3128 		nvlist_destroy(nvl);
3129 		break;
3130 	}
3131 
3132 	case DIOCADDRULENV: {
3133 		struct pfioc_nv	*nv = (struct pfioc_nv *)addr;
3134 		nvlist_t	*nvl = NULL;
3135 		void		*nvlpacked = NULL;
3136 		struct pf_krule	*rule = NULL;
3137 		const char	*anchor = "", *anchor_call = "";
3138 		uint32_t	 ticket = 0, pool_ticket = 0;
3139 
3140 #define	ERROUT(x)	ERROUT_IOCTL(DIOCADDRULENV_error, x)
3141 
3142 		if (nv->len > pf_ioctl_maxcount)
3143 			ERROUT(ENOMEM);
3144 
3145 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3146 		error = copyin(nv->data, nvlpacked, nv->len);
3147 		if (error)
3148 			ERROUT(error);
3149 
3150 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3151 		if (nvl == NULL)
3152 			ERROUT(EBADMSG);
3153 
3154 		if (! nvlist_exists_number(nvl, "ticket"))
3155 			ERROUT(EINVAL);
3156 		ticket = nvlist_get_number(nvl, "ticket");
3157 
3158 		if (! nvlist_exists_number(nvl, "pool_ticket"))
3159 			ERROUT(EINVAL);
3160 		pool_ticket = nvlist_get_number(nvl, "pool_ticket");
3161 
3162 		if (! nvlist_exists_nvlist(nvl, "rule"))
3163 			ERROUT(EINVAL);
3164 
3165 		rule = pf_krule_alloc();
3166 		error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"),
3167 		    rule);
3168 		if (error)
3169 			ERROUT(error);
3170 
3171 		if (nvlist_exists_string(nvl, "anchor"))
3172 			anchor = nvlist_get_string(nvl, "anchor");
3173 		if (nvlist_exists_string(nvl, "anchor_call"))
3174 			anchor_call = nvlist_get_string(nvl, "anchor_call");
3175 
3176 		if ((error = nvlist_error(nvl)))
3177 			ERROUT(error);
3178 
3179 		/* Frees rule on error */
3180 		error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor,
3181 		    anchor_call, td);
3182 
3183 		nvlist_destroy(nvl);
3184 		free(nvlpacked, M_NVLIST);
3185 		break;
3186 #undef ERROUT
3187 DIOCADDRULENV_error:
3188 		pf_krule_free(rule);
3189 		nvlist_destroy(nvl);
3190 		free(nvlpacked, M_NVLIST);
3191 
3192 		break;
3193 	}
3194 	case DIOCADDRULE: {
3195 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
3196 		struct pf_krule		*rule;
3197 
3198 		rule = pf_krule_alloc();
3199 		error = pf_rule_to_krule(&pr->rule, rule);
3200 		if (error != 0) {
3201 			pf_krule_free(rule);
3202 			break;
3203 		}
3204 
3205 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
3206 
3207 		/* Frees rule on error */
3208 		error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket,
3209 		    pr->anchor, pr->anchor_call, td);
3210 		break;
3211 	}
3212 
3213 	case DIOCGETRULES: {
3214 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
3215 		struct pf_kruleset	*ruleset;
3216 		struct pf_krule		*tail;
3217 		int			 rs_num;
3218 
3219 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
3220 
3221 		PF_RULES_WLOCK();
3222 		ruleset = pf_find_kruleset(pr->anchor);
3223 		if (ruleset == NULL) {
3224 			PF_RULES_WUNLOCK();
3225 			error = EINVAL;
3226 			break;
3227 		}
3228 		rs_num = pf_get_ruleset_number(pr->rule.action);
3229 		if (rs_num >= PF_RULESET_MAX) {
3230 			PF_RULES_WUNLOCK();
3231 			error = EINVAL;
3232 			break;
3233 		}
3234 		tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
3235 		    pf_krulequeue);
3236 		if (tail)
3237 			pr->nr = tail->nr + 1;
3238 		else
3239 			pr->nr = 0;
3240 		pr->ticket = ruleset->rules[rs_num].active.ticket;
3241 		PF_RULES_WUNLOCK();
3242 		break;
3243 	}
3244 
3245 	case DIOCGETRULE: {
3246 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
3247 		struct pf_kruleset	*ruleset;
3248 		struct pf_krule		*rule;
3249 		int			 rs_num;
3250 
3251 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
3252 
3253 		PF_RULES_WLOCK();
3254 		ruleset = pf_find_kruleset(pr->anchor);
3255 		if (ruleset == NULL) {
3256 			PF_RULES_WUNLOCK();
3257 			error = EINVAL;
3258 			break;
3259 		}
3260 		rs_num = pf_get_ruleset_number(pr->rule.action);
3261 		if (rs_num >= PF_RULESET_MAX) {
3262 			PF_RULES_WUNLOCK();
3263 			error = EINVAL;
3264 			break;
3265 		}
3266 		if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
3267 			PF_RULES_WUNLOCK();
3268 			error = EBUSY;
3269 			break;
3270 		}
3271 		rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3272 		while ((rule != NULL) && (rule->nr != pr->nr))
3273 			rule = TAILQ_NEXT(rule, entries);
3274 		if (rule == NULL) {
3275 			PF_RULES_WUNLOCK();
3276 			error = EBUSY;
3277 			break;
3278 		}
3279 
3280 		pf_krule_to_rule(rule, &pr->rule);
3281 
3282 		if (pf_kanchor_copyout(ruleset, rule, pr)) {
3283 			PF_RULES_WUNLOCK();
3284 			error = EBUSY;
3285 			break;
3286 		}
3287 		pf_addr_copyout(&pr->rule.src.addr);
3288 		pf_addr_copyout(&pr->rule.dst.addr);
3289 
3290 		if (pr->action == PF_GET_CLR_CNTR) {
3291 			pf_counter_u64_zero(&rule->evaluations);
3292 			for (int i = 0; i < 2; i++) {
3293 				pf_counter_u64_zero(&rule->packets[i]);
3294 				pf_counter_u64_zero(&rule->bytes[i]);
3295 			}
3296 			counter_u64_zero(rule->states_tot);
3297 		}
3298 		PF_RULES_WUNLOCK();
3299 		break;
3300 	}
3301 
3302 	case DIOCGETRULENV: {
3303 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3304 		nvlist_t		*nvrule = NULL;
3305 		nvlist_t		*nvl = NULL;
3306 		struct pf_kruleset	*ruleset;
3307 		struct pf_krule		*rule;
3308 		void			*nvlpacked = NULL;
3309 		int			 rs_num, nr;
3310 		bool			 clear_counter = false;
3311 
3312 #define	ERROUT(x)	ERROUT_IOCTL(DIOCGETRULENV_error, x)
3313 
3314 		if (nv->len > pf_ioctl_maxcount)
3315 			ERROUT(ENOMEM);
3316 
3317 		/* Copy the request in */
3318 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3319 		if (nvlpacked == NULL)
3320 			ERROUT(ENOMEM);
3321 
3322 		error = copyin(nv->data, nvlpacked, nv->len);
3323 		if (error)
3324 			ERROUT(error);
3325 
3326 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3327 		if (nvl == NULL)
3328 			ERROUT(EBADMSG);
3329 
3330 		if (! nvlist_exists_string(nvl, "anchor"))
3331 			ERROUT(EBADMSG);
3332 		if (! nvlist_exists_number(nvl, "ruleset"))
3333 			ERROUT(EBADMSG);
3334 		if (! nvlist_exists_number(nvl, "ticket"))
3335 			ERROUT(EBADMSG);
3336 		if (! nvlist_exists_number(nvl, "nr"))
3337 			ERROUT(EBADMSG);
3338 
3339 		if (nvlist_exists_bool(nvl, "clear_counter"))
3340 			clear_counter = nvlist_get_bool(nvl, "clear_counter");
3341 
3342 		if (clear_counter && !(flags & FWRITE))
3343 			ERROUT(EACCES);
3344 
3345 		nr = nvlist_get_number(nvl, "nr");
3346 
3347 		PF_RULES_WLOCK();
3348 		ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor"));
3349 		if (ruleset == NULL) {
3350 			PF_RULES_WUNLOCK();
3351 			ERROUT(ENOENT);
3352 		}
3353 
3354 		rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset"));
3355 		if (rs_num >= PF_RULESET_MAX) {
3356 			PF_RULES_WUNLOCK();
3357 			ERROUT(EINVAL);
3358 		}
3359 
3360 		if (nvlist_get_number(nvl, "ticket") !=
3361 		    ruleset->rules[rs_num].active.ticket) {
3362 			PF_RULES_WUNLOCK();
3363 			ERROUT(EBUSY);
3364 		}
3365 
3366 		if ((error = nvlist_error(nvl))) {
3367 			PF_RULES_WUNLOCK();
3368 			ERROUT(error);
3369 		}
3370 
3371 		rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3372 		while ((rule != NULL) && (rule->nr != nr))
3373 			rule = TAILQ_NEXT(rule, entries);
3374 		if (rule == NULL) {
3375 			PF_RULES_WUNLOCK();
3376 			ERROUT(EBUSY);
3377 		}
3378 
3379 		nvrule = pf_krule_to_nvrule(rule);
3380 
3381 		nvlist_destroy(nvl);
3382 		nvl = nvlist_create(0);
3383 		if (nvl == NULL) {
3384 			PF_RULES_WUNLOCK();
3385 			ERROUT(ENOMEM);
3386 		}
3387 		nvlist_add_number(nvl, "nr", nr);
3388 		nvlist_add_nvlist(nvl, "rule", nvrule);
3389 		nvlist_destroy(nvrule);
3390 		nvrule = NULL;
3391 		if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) {
3392 			PF_RULES_WUNLOCK();
3393 			ERROUT(EBUSY);
3394 		}
3395 
3396 		free(nvlpacked, M_NVLIST);
3397 		nvlpacked = nvlist_pack(nvl, &nv->len);
3398 		if (nvlpacked == NULL) {
3399 			PF_RULES_WUNLOCK();
3400 			ERROUT(ENOMEM);
3401 		}
3402 
3403 		if (nv->size == 0) {
3404 			PF_RULES_WUNLOCK();
3405 			ERROUT(0);
3406 		}
3407 		else if (nv->size < nv->len) {
3408 			PF_RULES_WUNLOCK();
3409 			ERROUT(ENOSPC);
3410 		}
3411 
3412 		if (clear_counter) {
3413 			pf_counter_u64_zero(&rule->evaluations);
3414 			for (int i = 0; i < 2; i++) {
3415 				pf_counter_u64_zero(&rule->packets[i]);
3416 				pf_counter_u64_zero(&rule->bytes[i]);
3417 			}
3418 			counter_u64_zero(rule->states_tot);
3419 		}
3420 		PF_RULES_WUNLOCK();
3421 
3422 		error = copyout(nvlpacked, nv->data, nv->len);
3423 
3424 #undef ERROUT
3425 DIOCGETRULENV_error:
3426 		free(nvlpacked, M_NVLIST);
3427 		nvlist_destroy(nvrule);
3428 		nvlist_destroy(nvl);
3429 
3430 		break;
3431 	}
3432 
3433 	case DIOCCHANGERULE: {
3434 		struct pfioc_rule	*pcr = (struct pfioc_rule *)addr;
3435 		struct pf_kruleset	*ruleset;
3436 		struct pf_krule		*oldrule = NULL, *newrule = NULL;
3437 		struct pfi_kkif		*kif = NULL;
3438 		struct pf_kpooladdr	*pa;
3439 		u_int32_t		 nr = 0;
3440 		int			 rs_num;
3441 
3442 		pcr->anchor[sizeof(pcr->anchor) - 1] = 0;
3443 
3444 		if (pcr->action < PF_CHANGE_ADD_HEAD ||
3445 		    pcr->action > PF_CHANGE_GET_TICKET) {
3446 			error = EINVAL;
3447 			break;
3448 		}
3449 		if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3450 			error = EINVAL;
3451 			break;
3452 		}
3453 
3454 		if (pcr->action != PF_CHANGE_REMOVE) {
3455 			newrule = pf_krule_alloc();
3456 			error = pf_rule_to_krule(&pcr->rule, newrule);
3457 			if (error != 0) {
3458 				pf_krule_free(newrule);
3459 				break;
3460 			}
3461 
3462 			if (newrule->ifname[0])
3463 				kif = pf_kkif_create(M_WAITOK);
3464 			pf_counter_u64_init(&newrule->evaluations, M_WAITOK);
3465 			for (int i = 0; i < 2; i++) {
3466 				pf_counter_u64_init(&newrule->packets[i], M_WAITOK);
3467 				pf_counter_u64_init(&newrule->bytes[i], M_WAITOK);
3468 			}
3469 			newrule->states_cur = counter_u64_alloc(M_WAITOK);
3470 			newrule->states_tot = counter_u64_alloc(M_WAITOK);
3471 			newrule->src_nodes = counter_u64_alloc(M_WAITOK);
3472 			newrule->cuid = td->td_ucred->cr_ruid;
3473 			newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
3474 			TAILQ_INIT(&newrule->rpool.list);
3475 		}
3476 #define	ERROUT(x)	ERROUT_IOCTL(DIOCCHANGERULE_error, x)
3477 
3478 		PF_CONFIG_LOCK();
3479 		PF_RULES_WLOCK();
3480 #ifdef PF_WANT_32_TO_64_COUNTER
3481 		if (newrule != NULL) {
3482 			LIST_INSERT_HEAD(&V_pf_allrulelist, newrule, allrulelist);
3483 			newrule->allrulelinked = true;
3484 			V_pf_allrulecount++;
3485 		}
3486 #endif
3487 
3488 		if (!(pcr->action == PF_CHANGE_REMOVE ||
3489 		    pcr->action == PF_CHANGE_GET_TICKET) &&
3490 		    pcr->pool_ticket != V_ticket_pabuf)
3491 			ERROUT(EBUSY);
3492 
3493 		ruleset = pf_find_kruleset(pcr->anchor);
3494 		if (ruleset == NULL)
3495 			ERROUT(EINVAL);
3496 
3497 		rs_num = pf_get_ruleset_number(pcr->rule.action);
3498 		if (rs_num >= PF_RULESET_MAX)
3499 			ERROUT(EINVAL);
3500 
3501 		/*
3502 		 * XXXMJG: there is no guarantee that the ruleset was
3503 		 * created by the usual route of calling DIOCXBEGIN.
3504 		 * As a result it is possible the rule tree will not
3505 		 * be allocated yet. Hack around it by doing it here.
3506 		 * Note it is fine to let the tree persist in case of
3507 		 * error as it will be freed down the road on future
3508 		 * updates (if need be).
3509 		 */
3510 		if (ruleset->rules[rs_num].active.tree == NULL) {
3511 			ruleset->rules[rs_num].active.tree = pf_rule_tree_alloc(M_NOWAIT);
3512 			if (ruleset->rules[rs_num].active.tree == NULL) {
3513 				ERROUT(ENOMEM);
3514 			}
3515 		}
3516 
3517 		if (pcr->action == PF_CHANGE_GET_TICKET) {
3518 			pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
3519 			ERROUT(0);
3520 		} else if (pcr->ticket !=
3521 			    ruleset->rules[rs_num].active.ticket)
3522 				ERROUT(EINVAL);
3523 
3524 		if (pcr->action != PF_CHANGE_REMOVE) {
3525 			if (newrule->ifname[0]) {
3526 				newrule->kif = pfi_kkif_attach(kif,
3527 				    newrule->ifname);
3528 				kif = NULL;
3529 				pfi_kkif_ref(newrule->kif);
3530 			} else
3531 				newrule->kif = NULL;
3532 
3533 			if (newrule->rtableid > 0 &&
3534 			    newrule->rtableid >= rt_numfibs)
3535 				error = EBUSY;
3536 
3537 #ifdef ALTQ
3538 			/* set queue IDs */
3539 			if (newrule->qname[0] != 0) {
3540 				if ((newrule->qid =
3541 				    pf_qname2qid(newrule->qname)) == 0)
3542 					error = EBUSY;
3543 				else if (newrule->pqname[0] != 0) {
3544 					if ((newrule->pqid =
3545 					    pf_qname2qid(newrule->pqname)) == 0)
3546 						error = EBUSY;
3547 				} else
3548 					newrule->pqid = newrule->qid;
3549 			}
3550 #endif /* ALTQ */
3551 			if (newrule->tagname[0])
3552 				if ((newrule->tag =
3553 				    pf_tagname2tag(newrule->tagname)) == 0)
3554 					error = EBUSY;
3555 			if (newrule->match_tagname[0])
3556 				if ((newrule->match_tag = pf_tagname2tag(
3557 				    newrule->match_tagname)) == 0)
3558 					error = EBUSY;
3559 			if (newrule->rt && !newrule->direction)
3560 				error = EINVAL;
3561 			if (!newrule->log)
3562 				newrule->logif = 0;
3563 			if (newrule->logif >= PFLOGIFS_MAX)
3564 				error = EINVAL;
3565 			if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
3566 				error = ENOMEM;
3567 			if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
3568 				error = ENOMEM;
3569 			if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call))
3570 				error = EINVAL;
3571 			TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
3572 				if (pa->addr.type == PF_ADDR_TABLE) {
3573 					pa->addr.p.tbl =
3574 					    pfr_attach_table(ruleset,
3575 					    pa->addr.v.tblname);
3576 					if (pa->addr.p.tbl == NULL)
3577 						error = ENOMEM;
3578 				}
3579 
3580 			newrule->overload_tbl = NULL;
3581 			if (newrule->overload_tblname[0]) {
3582 				if ((newrule->overload_tbl = pfr_attach_table(
3583 				    ruleset, newrule->overload_tblname)) ==
3584 				    NULL)
3585 					error = EINVAL;
3586 				else
3587 					newrule->overload_tbl->pfrkt_flags |=
3588 					    PFR_TFLAG_ACTIVE;
3589 			}
3590 
3591 			pf_mv_kpool(&V_pf_pabuf, &newrule->rpool.list);
3592 			if (((((newrule->action == PF_NAT) ||
3593 			    (newrule->action == PF_RDR) ||
3594 			    (newrule->action == PF_BINAT) ||
3595 			    (newrule->rt > PF_NOPFROUTE)) &&
3596 			    !newrule->anchor)) &&
3597 			    (TAILQ_FIRST(&newrule->rpool.list) == NULL))
3598 				error = EINVAL;
3599 
3600 			if (error) {
3601 				pf_free_rule(newrule);
3602 				PF_RULES_WUNLOCK();
3603 				PF_CONFIG_UNLOCK();
3604 				break;
3605 			}
3606 
3607 			newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
3608 		}
3609 		pf_empty_kpool(&V_pf_pabuf);
3610 
3611 		if (pcr->action == PF_CHANGE_ADD_HEAD)
3612 			oldrule = TAILQ_FIRST(
3613 			    ruleset->rules[rs_num].active.ptr);
3614 		else if (pcr->action == PF_CHANGE_ADD_TAIL)
3615 			oldrule = TAILQ_LAST(
3616 			    ruleset->rules[rs_num].active.ptr, pf_krulequeue);
3617 		else {
3618 			oldrule = TAILQ_FIRST(
3619 			    ruleset->rules[rs_num].active.ptr);
3620 			while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
3621 				oldrule = TAILQ_NEXT(oldrule, entries);
3622 			if (oldrule == NULL) {
3623 				if (newrule != NULL)
3624 					pf_free_rule(newrule);
3625 				PF_RULES_WUNLOCK();
3626 				PF_CONFIG_UNLOCK();
3627 				error = EINVAL;
3628 				break;
3629 			}
3630 		}
3631 
3632 		if (pcr->action == PF_CHANGE_REMOVE) {
3633 			pf_unlink_rule(ruleset->rules[rs_num].active.ptr,
3634 			    oldrule);
3635 			RB_REMOVE(pf_krule_global,
3636 			    ruleset->rules[rs_num].active.tree, oldrule);
3637 			ruleset->rules[rs_num].active.rcount--;
3638 		} else {
3639 			pf_hash_rule(newrule);
3640 			if (RB_INSERT(pf_krule_global,
3641 			    ruleset->rules[rs_num].active.tree, newrule) != NULL) {
3642 				pf_free_rule(newrule);
3643 				PF_RULES_WUNLOCK();
3644 				PF_CONFIG_UNLOCK();
3645 				error = EEXIST;
3646 				break;
3647 			}
3648 
3649 			if (oldrule == NULL)
3650 				TAILQ_INSERT_TAIL(
3651 				    ruleset->rules[rs_num].active.ptr,
3652 				    newrule, entries);
3653 			else if (pcr->action == PF_CHANGE_ADD_HEAD ||
3654 			    pcr->action == PF_CHANGE_ADD_BEFORE)
3655 				TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
3656 			else
3657 				TAILQ_INSERT_AFTER(
3658 				    ruleset->rules[rs_num].active.ptr,
3659 				    oldrule, newrule, entries);
3660 			ruleset->rules[rs_num].active.rcount++;
3661 		}
3662 
3663 		nr = 0;
3664 		TAILQ_FOREACH(oldrule,
3665 		    ruleset->rules[rs_num].active.ptr, entries)
3666 			oldrule->nr = nr++;
3667 
3668 		ruleset->rules[rs_num].active.ticket++;
3669 
3670 		pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
3671 		pf_remove_if_empty_kruleset(ruleset);
3672 
3673 		PF_RULES_WUNLOCK();
3674 		PF_CONFIG_UNLOCK();
3675 		break;
3676 
3677 #undef ERROUT
3678 DIOCCHANGERULE_error:
3679 		PF_RULES_WUNLOCK();
3680 		PF_CONFIG_UNLOCK();
3681 		pf_krule_free(newrule);
3682 		pf_kkif_free(kif);
3683 		break;
3684 	}
3685 
3686 	case DIOCCLRSTATES: {
3687 		struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
3688 		struct pf_kstate_kill	 kill;
3689 
3690 		error = pf_state_kill_to_kstate_kill(psk, &kill);
3691 		if (error)
3692 			break;
3693 
3694 		psk->psk_killed = pf_clear_states(&kill);
3695 		break;
3696 	}
3697 
3698 	case DIOCCLRSTATESNV: {
3699 		error = pf_clearstates_nv((struct pfioc_nv *)addr);
3700 		break;
3701 	}
3702 
3703 	case DIOCKILLSTATES: {
3704 		struct pfioc_state_kill	*psk = (struct pfioc_state_kill *)addr;
3705 		struct pf_kstate_kill	 kill;
3706 
3707 		error = pf_state_kill_to_kstate_kill(psk, &kill);
3708 		if (error)
3709 			break;
3710 
3711 		psk->psk_killed = 0;
3712 		pf_killstates(&kill, &psk->psk_killed);
3713 		break;
3714 	}
3715 
3716 	case DIOCKILLSTATESNV: {
3717 		error = pf_killstates_nv((struct pfioc_nv *)addr);
3718 		break;
3719 	}
3720 
3721 	case DIOCADDSTATE: {
3722 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
3723 		struct pfsync_state	*sp = &ps->state;
3724 
3725 		if (sp->timeout >= PFTM_MAX) {
3726 			error = EINVAL;
3727 			break;
3728 		}
3729 		if (V_pfsync_state_import_ptr != NULL) {
3730 			PF_RULES_RLOCK();
3731 			error = V_pfsync_state_import_ptr(sp, PFSYNC_SI_IOCTL);
3732 			PF_RULES_RUNLOCK();
3733 		} else
3734 			error = EOPNOTSUPP;
3735 		break;
3736 	}
3737 
3738 	case DIOCGETSTATE: {
3739 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
3740 		struct pf_kstate	*s;
3741 
3742 		s = pf_find_state_byid(ps->state.id, ps->state.creatorid);
3743 		if (s == NULL) {
3744 			error = ENOENT;
3745 			break;
3746 		}
3747 
3748 		pfsync_state_export(&ps->state, s);
3749 		PF_STATE_UNLOCK(s);
3750 		break;
3751 	}
3752 
3753 	case DIOCGETSTATENV: {
3754 		error = pf_getstate((struct pfioc_nv *)addr);
3755 		break;
3756 	}
3757 
3758 	case DIOCGETSTATES: {
3759 		struct pfioc_states	*ps = (struct pfioc_states *)addr;
3760 		struct pf_kstate	*s;
3761 		struct pfsync_state	*pstore, *p;
3762 		int			 i, nr;
3763 		size_t			 slice_count = 16, count;
3764 		void			*out;
3765 
3766 		if (ps->ps_len <= 0) {
3767 			nr = uma_zone_get_cur(V_pf_state_z);
3768 			ps->ps_len = sizeof(struct pfsync_state) * nr;
3769 			break;
3770 		}
3771 
3772 		out = ps->ps_states;
3773 		pstore = mallocarray(slice_count,
3774 		    sizeof(struct pfsync_state), M_TEMP, M_WAITOK | M_ZERO);
3775 		nr = 0;
3776 
3777 		for (i = 0; i <= pf_hashmask; i++) {
3778 			struct pf_idhash *ih = &V_pf_idhash[i];
3779 
3780 DIOCGETSTATES_retry:
3781 			p = pstore;
3782 
3783 			if (LIST_EMPTY(&ih->states))
3784 				continue;
3785 
3786 			PF_HASHROW_LOCK(ih);
3787 			count = 0;
3788 			LIST_FOREACH(s, &ih->states, entry) {
3789 				if (s->timeout == PFTM_UNLINKED)
3790 					continue;
3791 				count++;
3792 			}
3793 
3794 			if (count > slice_count) {
3795 				PF_HASHROW_UNLOCK(ih);
3796 				free(pstore, M_TEMP);
3797 				slice_count = count * 2;
3798 				pstore = mallocarray(slice_count,
3799 				    sizeof(struct pfsync_state), M_TEMP,
3800 				    M_WAITOK | M_ZERO);
3801 				goto DIOCGETSTATES_retry;
3802 			}
3803 
3804 			if ((nr+count) * sizeof(*p) > ps->ps_len) {
3805 				PF_HASHROW_UNLOCK(ih);
3806 				goto DIOCGETSTATES_full;
3807 			}
3808 
3809 			LIST_FOREACH(s, &ih->states, entry) {
3810 				if (s->timeout == PFTM_UNLINKED)
3811 					continue;
3812 
3813 				pfsync_state_export(p, s);
3814 				p++;
3815 				nr++;
3816 			}
3817 			PF_HASHROW_UNLOCK(ih);
3818 			error = copyout(pstore, out,
3819 			    sizeof(struct pfsync_state) * count);
3820 			if (error)
3821 				break;
3822 			out = ps->ps_states + nr;
3823 		}
3824 DIOCGETSTATES_full:
3825 		ps->ps_len = sizeof(struct pfsync_state) * nr;
3826 		free(pstore, M_TEMP);
3827 
3828 		break;
3829 	}
3830 
3831 	case DIOCGETSTATESV2: {
3832 		struct pfioc_states_v2	*ps = (struct pfioc_states_v2 *)addr;
3833 		struct pf_kstate	*s;
3834 		struct pf_state_export	*pstore, *p;
3835 		int i, nr;
3836 		size_t slice_count = 16, count;
3837 		void *out;
3838 
3839 		if (ps->ps_req_version > PF_STATE_VERSION) {
3840 			error = ENOTSUP;
3841 			break;
3842 		}
3843 
3844 		if (ps->ps_len <= 0) {
3845 			nr = uma_zone_get_cur(V_pf_state_z);
3846 			ps->ps_len = sizeof(struct pf_state_export) * nr;
3847 			break;
3848 		}
3849 
3850 		out = ps->ps_states;
3851 		pstore = mallocarray(slice_count,
3852 		    sizeof(struct pf_state_export), M_TEMP, M_WAITOK | M_ZERO);
3853 		nr = 0;
3854 
3855 		for (i = 0; i <= pf_hashmask; i++) {
3856 			struct pf_idhash *ih = &V_pf_idhash[i];
3857 
3858 DIOCGETSTATESV2_retry:
3859 			p = pstore;
3860 
3861 			if (LIST_EMPTY(&ih->states))
3862 				continue;
3863 
3864 			PF_HASHROW_LOCK(ih);
3865 			count = 0;
3866 			LIST_FOREACH(s, &ih->states, entry) {
3867 				if (s->timeout == PFTM_UNLINKED)
3868 					continue;
3869 				count++;
3870 			}
3871 
3872 			if (count > slice_count) {
3873 				PF_HASHROW_UNLOCK(ih);
3874 				free(pstore, M_TEMP);
3875 				slice_count = count * 2;
3876 				pstore = mallocarray(slice_count,
3877 				    sizeof(struct pf_state_export), M_TEMP,
3878 				    M_WAITOK | M_ZERO);
3879 				goto DIOCGETSTATESV2_retry;
3880 			}
3881 
3882 			if ((nr+count) * sizeof(*p) > ps->ps_len) {
3883 				PF_HASHROW_UNLOCK(ih);
3884 				goto DIOCGETSTATESV2_full;
3885 			}
3886 
3887 			LIST_FOREACH(s, &ih->states, entry) {
3888 				if (s->timeout == PFTM_UNLINKED)
3889 					continue;
3890 
3891 				pf_state_export(p, s);
3892 				p++;
3893 				nr++;
3894 			}
3895 			PF_HASHROW_UNLOCK(ih);
3896 			error = copyout(pstore, out,
3897 			    sizeof(struct pf_state_export) * count);
3898 			if (error)
3899 				break;
3900 			out = ps->ps_states + nr;
3901 		}
3902 DIOCGETSTATESV2_full:
3903 		ps->ps_len = nr * sizeof(struct pf_state_export);
3904 		free(pstore, M_TEMP);
3905 
3906 		break;
3907 	}
3908 
3909 	case DIOCGETSTATUS: {
3910 		struct pf_status *s = (struct pf_status *)addr;
3911 
3912 		PF_RULES_RLOCK();
3913 		s->running = V_pf_status.running;
3914 		s->since   = V_pf_status.since;
3915 		s->debug   = V_pf_status.debug;
3916 		s->hostid  = V_pf_status.hostid;
3917 		s->states  = V_pf_status.states;
3918 		s->src_nodes = V_pf_status.src_nodes;
3919 
3920 		for (int i = 0; i < PFRES_MAX; i++)
3921 			s->counters[i] =
3922 			    counter_u64_fetch(V_pf_status.counters[i]);
3923 		for (int i = 0; i < LCNT_MAX; i++)
3924 			s->lcounters[i] =
3925 			    counter_u64_fetch(V_pf_status.lcounters[i]);
3926 		for (int i = 0; i < FCNT_MAX; i++)
3927 			s->fcounters[i] =
3928 			    pf_counter_u64_fetch(&V_pf_status.fcounters[i]);
3929 		for (int i = 0; i < SCNT_MAX; i++)
3930 			s->scounters[i] =
3931 			    counter_u64_fetch(V_pf_status.scounters[i]);
3932 
3933 		bcopy(V_pf_status.ifname, s->ifname, IFNAMSIZ);
3934 		bcopy(V_pf_status.pf_chksum, s->pf_chksum,
3935 		    PF_MD5_DIGEST_LENGTH);
3936 
3937 		pfi_update_status(s->ifname, s);
3938 		PF_RULES_RUNLOCK();
3939 		break;
3940 	}
3941 
3942 	case DIOCGETSTATUSNV: {
3943 		error = pf_getstatus((struct pfioc_nv *)addr);
3944 		break;
3945 	}
3946 
3947 	case DIOCSETSTATUSIF: {
3948 		struct pfioc_if	*pi = (struct pfioc_if *)addr;
3949 
3950 		if (pi->ifname[0] == 0) {
3951 			bzero(V_pf_status.ifname, IFNAMSIZ);
3952 			break;
3953 		}
3954 		PF_RULES_WLOCK();
3955 		error = pf_user_strcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ);
3956 		PF_RULES_WUNLOCK();
3957 		break;
3958 	}
3959 
3960 	case DIOCCLRSTATUS: {
3961 		PF_RULES_WLOCK();
3962 		for (int i = 0; i < PFRES_MAX; i++)
3963 			counter_u64_zero(V_pf_status.counters[i]);
3964 		for (int i = 0; i < FCNT_MAX; i++)
3965 			pf_counter_u64_zero(&V_pf_status.fcounters[i]);
3966 		for (int i = 0; i < SCNT_MAX; i++)
3967 			counter_u64_zero(V_pf_status.scounters[i]);
3968 		for (int i = 0; i < KLCNT_MAX; i++)
3969 			counter_u64_zero(V_pf_status.lcounters[i]);
3970 		V_pf_status.since = time_second;
3971 		if (*V_pf_status.ifname)
3972 			pfi_update_status(V_pf_status.ifname, NULL);
3973 		PF_RULES_WUNLOCK();
3974 		break;
3975 	}
3976 
3977 	case DIOCNATLOOK: {
3978 		struct pfioc_natlook	*pnl = (struct pfioc_natlook *)addr;
3979 		struct pf_state_key	*sk;
3980 		struct pf_kstate	*state;
3981 		struct pf_state_key_cmp	 key;
3982 		int			 m = 0, direction = pnl->direction;
3983 		int			 sidx, didx;
3984 
3985 		/* NATLOOK src and dst are reversed, so reverse sidx/didx */
3986 		sidx = (direction == PF_IN) ? 1 : 0;
3987 		didx = (direction == PF_IN) ? 0 : 1;
3988 
3989 		if (!pnl->proto ||
3990 		    PF_AZERO(&pnl->saddr, pnl->af) ||
3991 		    PF_AZERO(&pnl->daddr, pnl->af) ||
3992 		    ((pnl->proto == IPPROTO_TCP ||
3993 		    pnl->proto == IPPROTO_UDP) &&
3994 		    (!pnl->dport || !pnl->sport)))
3995 			error = EINVAL;
3996 		else {
3997 			bzero(&key, sizeof(key));
3998 			key.af = pnl->af;
3999 			key.proto = pnl->proto;
4000 			PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af);
4001 			key.port[sidx] = pnl->sport;
4002 			PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af);
4003 			key.port[didx] = pnl->dport;
4004 
4005 			state = pf_find_state_all(&key, direction, &m);
4006 			if (state == NULL) {
4007 				error = ENOENT;
4008 			} else {
4009 				if (m > 1) {
4010 					PF_STATE_UNLOCK(state);
4011 					error = E2BIG;	/* more than one state */
4012 				} else {
4013 					sk = state->key[sidx];
4014 					PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af);
4015 					pnl->rsport = sk->port[sidx];
4016 					PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af);
4017 					pnl->rdport = sk->port[didx];
4018 					PF_STATE_UNLOCK(state);
4019 				}
4020 			}
4021 		}
4022 		break;
4023 	}
4024 
4025 	case DIOCSETTIMEOUT: {
4026 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
4027 		int		 old;
4028 
4029 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
4030 		    pt->seconds < 0) {
4031 			error = EINVAL;
4032 			break;
4033 		}
4034 		PF_RULES_WLOCK();
4035 		old = V_pf_default_rule.timeout[pt->timeout];
4036 		if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
4037 			pt->seconds = 1;
4038 		V_pf_default_rule.timeout[pt->timeout] = pt->seconds;
4039 		if (pt->timeout == PFTM_INTERVAL && pt->seconds < old)
4040 			wakeup(pf_purge_thread);
4041 		pt->seconds = old;
4042 		PF_RULES_WUNLOCK();
4043 		break;
4044 	}
4045 
4046 	case DIOCGETTIMEOUT: {
4047 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
4048 
4049 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
4050 			error = EINVAL;
4051 			break;
4052 		}
4053 		PF_RULES_RLOCK();
4054 		pt->seconds = V_pf_default_rule.timeout[pt->timeout];
4055 		PF_RULES_RUNLOCK();
4056 		break;
4057 	}
4058 
4059 	case DIOCGETLIMIT: {
4060 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
4061 
4062 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
4063 			error = EINVAL;
4064 			break;
4065 		}
4066 		PF_RULES_RLOCK();
4067 		pl->limit = V_pf_limits[pl->index].limit;
4068 		PF_RULES_RUNLOCK();
4069 		break;
4070 	}
4071 
4072 	case DIOCSETLIMIT: {
4073 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
4074 		int			 old_limit;
4075 
4076 		PF_RULES_WLOCK();
4077 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
4078 		    V_pf_limits[pl->index].zone == NULL) {
4079 			PF_RULES_WUNLOCK();
4080 			error = EINVAL;
4081 			break;
4082 		}
4083 		uma_zone_set_max(V_pf_limits[pl->index].zone, pl->limit);
4084 		old_limit = V_pf_limits[pl->index].limit;
4085 		V_pf_limits[pl->index].limit = pl->limit;
4086 		pl->limit = old_limit;
4087 		PF_RULES_WUNLOCK();
4088 		break;
4089 	}
4090 
4091 	case DIOCSETDEBUG: {
4092 		u_int32_t	*level = (u_int32_t *)addr;
4093 
4094 		PF_RULES_WLOCK();
4095 		V_pf_status.debug = *level;
4096 		PF_RULES_WUNLOCK();
4097 		break;
4098 	}
4099 
4100 	case DIOCCLRRULECTRS: {
4101 		/* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
4102 		struct pf_kruleset	*ruleset = &pf_main_ruleset;
4103 		struct pf_krule		*rule;
4104 
4105 		PF_RULES_WLOCK();
4106 		TAILQ_FOREACH(rule,
4107 		    ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
4108 			pf_counter_u64_zero(&rule->evaluations);
4109 			for (int i = 0; i < 2; i++) {
4110 				pf_counter_u64_zero(&rule->packets[i]);
4111 				pf_counter_u64_zero(&rule->bytes[i]);
4112 			}
4113 		}
4114 		PF_RULES_WUNLOCK();
4115 		break;
4116 	}
4117 
4118 	case DIOCGIFSPEEDV0:
4119 	case DIOCGIFSPEEDV1: {
4120 		struct pf_ifspeed_v1	*psp = (struct pf_ifspeed_v1 *)addr;
4121 		struct pf_ifspeed_v1	ps;
4122 		struct ifnet		*ifp;
4123 
4124 		if (psp->ifname[0] == '\0') {
4125 			error = EINVAL;
4126 			break;
4127 		}
4128 
4129 		error = pf_user_strcpy(ps.ifname, psp->ifname, IFNAMSIZ);
4130 		if (error != 0)
4131 			break;
4132 		ifp = ifunit(ps.ifname);
4133 		if (ifp != NULL) {
4134 			psp->baudrate32 =
4135 			    (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX);
4136 			if (cmd == DIOCGIFSPEEDV1)
4137 				psp->baudrate = ifp->if_baudrate;
4138 		} else {
4139 			error = EINVAL;
4140 		}
4141 		break;
4142 	}
4143 
4144 #ifdef ALTQ
4145 	case DIOCSTARTALTQ: {
4146 		struct pf_altq		*altq;
4147 
4148 		PF_RULES_WLOCK();
4149 		/* enable all altq interfaces on active list */
4150 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
4151 			if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
4152 				error = pf_enable_altq(altq);
4153 				if (error != 0)
4154 					break;
4155 			}
4156 		}
4157 		if (error == 0)
4158 			V_pf_altq_running = 1;
4159 		PF_RULES_WUNLOCK();
4160 		DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
4161 		break;
4162 	}
4163 
4164 	case DIOCSTOPALTQ: {
4165 		struct pf_altq		*altq;
4166 
4167 		PF_RULES_WLOCK();
4168 		/* disable all altq interfaces on active list */
4169 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
4170 			if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
4171 				error = pf_disable_altq(altq);
4172 				if (error != 0)
4173 					break;
4174 			}
4175 		}
4176 		if (error == 0)
4177 			V_pf_altq_running = 0;
4178 		PF_RULES_WUNLOCK();
4179 		DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
4180 		break;
4181 	}
4182 
4183 	case DIOCADDALTQV0:
4184 	case DIOCADDALTQV1: {
4185 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4186 		struct pf_altq		*altq, *a;
4187 		struct ifnet		*ifp;
4188 
4189 		altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO);
4190 		error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd));
4191 		if (error)
4192 			break;
4193 		altq->local_flags = 0;
4194 
4195 		PF_RULES_WLOCK();
4196 		if (pa->ticket != V_ticket_altqs_inactive) {
4197 			PF_RULES_WUNLOCK();
4198 			free(altq, M_PFALTQ);
4199 			error = EBUSY;
4200 			break;
4201 		}
4202 
4203 		/*
4204 		 * if this is for a queue, find the discipline and
4205 		 * copy the necessary fields
4206 		 */
4207 		if (altq->qname[0] != 0) {
4208 			if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
4209 				PF_RULES_WUNLOCK();
4210 				error = EBUSY;
4211 				free(altq, M_PFALTQ);
4212 				break;
4213 			}
4214 			altq->altq_disc = NULL;
4215 			TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) {
4216 				if (strncmp(a->ifname, altq->ifname,
4217 				    IFNAMSIZ) == 0) {
4218 					altq->altq_disc = a->altq_disc;
4219 					break;
4220 				}
4221 			}
4222 		}
4223 
4224 		if ((ifp = ifunit(altq->ifname)) == NULL)
4225 			altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
4226 		else
4227 			error = altq_add(ifp, altq);
4228 
4229 		if (error) {
4230 			PF_RULES_WUNLOCK();
4231 			free(altq, M_PFALTQ);
4232 			break;
4233 		}
4234 
4235 		if (altq->qname[0] != 0)
4236 			TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries);
4237 		else
4238 			TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries);
4239 		/* version error check done on import above */
4240 		pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
4241 		PF_RULES_WUNLOCK();
4242 		break;
4243 	}
4244 
4245 	case DIOCGETALTQSV0:
4246 	case DIOCGETALTQSV1: {
4247 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4248 		struct pf_altq		*altq;
4249 
4250 		PF_RULES_RLOCK();
4251 		pa->nr = 0;
4252 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries)
4253 			pa->nr++;
4254 		TAILQ_FOREACH(altq, V_pf_altqs_active, entries)
4255 			pa->nr++;
4256 		pa->ticket = V_ticket_altqs_active;
4257 		PF_RULES_RUNLOCK();
4258 		break;
4259 	}
4260 
4261 	case DIOCGETALTQV0:
4262 	case DIOCGETALTQV1: {
4263 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4264 		struct pf_altq		*altq;
4265 
4266 		PF_RULES_RLOCK();
4267 		if (pa->ticket != V_ticket_altqs_active) {
4268 			PF_RULES_RUNLOCK();
4269 			error = EBUSY;
4270 			break;
4271 		}
4272 		altq = pf_altq_get_nth_active(pa->nr);
4273 		if (altq == NULL) {
4274 			PF_RULES_RUNLOCK();
4275 			error = EBUSY;
4276 			break;
4277 		}
4278 		pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
4279 		PF_RULES_RUNLOCK();
4280 		break;
4281 	}
4282 
4283 	case DIOCCHANGEALTQV0:
4284 	case DIOCCHANGEALTQV1:
4285 		/* CHANGEALTQ not supported yet! */
4286 		error = ENODEV;
4287 		break;
4288 
4289 	case DIOCGETQSTATSV0:
4290 	case DIOCGETQSTATSV1: {
4291 		struct pfioc_qstats_v1	*pq = (struct pfioc_qstats_v1 *)addr;
4292 		struct pf_altq		*altq;
4293 		int			 nbytes;
4294 		u_int32_t		 version;
4295 
4296 		PF_RULES_RLOCK();
4297 		if (pq->ticket != V_ticket_altqs_active) {
4298 			PF_RULES_RUNLOCK();
4299 			error = EBUSY;
4300 			break;
4301 		}
4302 		nbytes = pq->nbytes;
4303 		altq = pf_altq_get_nth_active(pq->nr);
4304 		if (altq == NULL) {
4305 			PF_RULES_RUNLOCK();
4306 			error = EBUSY;
4307 			break;
4308 		}
4309 
4310 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) {
4311 			PF_RULES_RUNLOCK();
4312 			error = ENXIO;
4313 			break;
4314 		}
4315 		PF_RULES_RUNLOCK();
4316 		if (cmd == DIOCGETQSTATSV0)
4317 			version = 0;  /* DIOCGETQSTATSV0 means stats struct v0 */
4318 		else
4319 			version = pq->version;
4320 		error = altq_getqstats(altq, pq->buf, &nbytes, version);
4321 		if (error == 0) {
4322 			pq->scheduler = altq->scheduler;
4323 			pq->nbytes = nbytes;
4324 		}
4325 		break;
4326 	}
4327 #endif /* ALTQ */
4328 
4329 	case DIOCBEGINADDRS: {
4330 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4331 
4332 		PF_RULES_WLOCK();
4333 		pf_empty_kpool(&V_pf_pabuf);
4334 		pp->ticket = ++V_ticket_pabuf;
4335 		PF_RULES_WUNLOCK();
4336 		break;
4337 	}
4338 
4339 	case DIOCADDADDR: {
4340 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4341 		struct pf_kpooladdr	*pa;
4342 		struct pfi_kkif		*kif = NULL;
4343 
4344 #ifndef INET
4345 		if (pp->af == AF_INET) {
4346 			error = EAFNOSUPPORT;
4347 			break;
4348 		}
4349 #endif /* INET */
4350 #ifndef INET6
4351 		if (pp->af == AF_INET6) {
4352 			error = EAFNOSUPPORT;
4353 			break;
4354 		}
4355 #endif /* INET6 */
4356 		if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
4357 		    pp->addr.addr.type != PF_ADDR_DYNIFTL &&
4358 		    pp->addr.addr.type != PF_ADDR_TABLE) {
4359 			error = EINVAL;
4360 			break;
4361 		}
4362 		if (pp->addr.addr.p.dyn != NULL) {
4363 			error = EINVAL;
4364 			break;
4365 		}
4366 		pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK);
4367 		error = pf_pooladdr_to_kpooladdr(&pp->addr, pa);
4368 		if (error != 0)
4369 			break;
4370 		if (pa->ifname[0])
4371 			kif = pf_kkif_create(M_WAITOK);
4372 		PF_RULES_WLOCK();
4373 		if (pp->ticket != V_ticket_pabuf) {
4374 			PF_RULES_WUNLOCK();
4375 			if (pa->ifname[0])
4376 				pf_kkif_free(kif);
4377 			free(pa, M_PFRULE);
4378 			error = EBUSY;
4379 			break;
4380 		}
4381 		if (pa->ifname[0]) {
4382 			pa->kif = pfi_kkif_attach(kif, pa->ifname);
4383 			kif = NULL;
4384 			pfi_kkif_ref(pa->kif);
4385 		} else
4386 			pa->kif = NULL;
4387 		if (pa->addr.type == PF_ADDR_DYNIFTL && ((error =
4388 		    pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) {
4389 			if (pa->ifname[0])
4390 				pfi_kkif_unref(pa->kif);
4391 			PF_RULES_WUNLOCK();
4392 			free(pa, M_PFRULE);
4393 			break;
4394 		}
4395 		TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries);
4396 		PF_RULES_WUNLOCK();
4397 		break;
4398 	}
4399 
4400 	case DIOCGETADDRS: {
4401 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4402 		struct pf_kpool		*pool;
4403 		struct pf_kpooladdr	*pa;
4404 
4405 		pp->anchor[sizeof(pp->anchor) - 1] = 0;
4406 		pp->nr = 0;
4407 
4408 		PF_RULES_RLOCK();
4409 		pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
4410 		    pp->r_num, 0, 1, 0);
4411 		if (pool == NULL) {
4412 			PF_RULES_RUNLOCK();
4413 			error = EBUSY;
4414 			break;
4415 		}
4416 		TAILQ_FOREACH(pa, &pool->list, entries)
4417 			pp->nr++;
4418 		PF_RULES_RUNLOCK();
4419 		break;
4420 	}
4421 
4422 	case DIOCGETADDR: {
4423 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4424 		struct pf_kpool		*pool;
4425 		struct pf_kpooladdr	*pa;
4426 		u_int32_t		 nr = 0;
4427 
4428 		pp->anchor[sizeof(pp->anchor) - 1] = 0;
4429 
4430 		PF_RULES_RLOCK();
4431 		pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
4432 		    pp->r_num, 0, 1, 1);
4433 		if (pool == NULL) {
4434 			PF_RULES_RUNLOCK();
4435 			error = EBUSY;
4436 			break;
4437 		}
4438 		pa = TAILQ_FIRST(&pool->list);
4439 		while ((pa != NULL) && (nr < pp->nr)) {
4440 			pa = TAILQ_NEXT(pa, entries);
4441 			nr++;
4442 		}
4443 		if (pa == NULL) {
4444 			PF_RULES_RUNLOCK();
4445 			error = EBUSY;
4446 			break;
4447 		}
4448 		pf_kpooladdr_to_pooladdr(pa, &pp->addr);
4449 		pf_addr_copyout(&pp->addr.addr);
4450 		PF_RULES_RUNLOCK();
4451 		break;
4452 	}
4453 
4454 	case DIOCCHANGEADDR: {
4455 		struct pfioc_pooladdr	*pca = (struct pfioc_pooladdr *)addr;
4456 		struct pf_kpool		*pool;
4457 		struct pf_kpooladdr	*oldpa = NULL, *newpa = NULL;
4458 		struct pf_kruleset	*ruleset;
4459 		struct pfi_kkif		*kif = NULL;
4460 
4461 		pca->anchor[sizeof(pca->anchor) - 1] = 0;
4462 
4463 		if (pca->action < PF_CHANGE_ADD_HEAD ||
4464 		    pca->action > PF_CHANGE_REMOVE) {
4465 			error = EINVAL;
4466 			break;
4467 		}
4468 		if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
4469 		    pca->addr.addr.type != PF_ADDR_DYNIFTL &&
4470 		    pca->addr.addr.type != PF_ADDR_TABLE) {
4471 			error = EINVAL;
4472 			break;
4473 		}
4474 		if (pca->addr.addr.p.dyn != NULL) {
4475 			error = EINVAL;
4476 			break;
4477 		}
4478 
4479 		if (pca->action != PF_CHANGE_REMOVE) {
4480 #ifndef INET
4481 			if (pca->af == AF_INET) {
4482 				error = EAFNOSUPPORT;
4483 				break;
4484 			}
4485 #endif /* INET */
4486 #ifndef INET6
4487 			if (pca->af == AF_INET6) {
4488 				error = EAFNOSUPPORT;
4489 				break;
4490 			}
4491 #endif /* INET6 */
4492 			newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK);
4493 			bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
4494 			if (newpa->ifname[0])
4495 				kif = pf_kkif_create(M_WAITOK);
4496 			newpa->kif = NULL;
4497 		}
4498 #define	ERROUT(x)	ERROUT_IOCTL(DIOCCHANGEADDR_error, x)
4499 		PF_RULES_WLOCK();
4500 		ruleset = pf_find_kruleset(pca->anchor);
4501 		if (ruleset == NULL)
4502 			ERROUT(EBUSY);
4503 
4504 		pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action,
4505 		    pca->r_num, pca->r_last, 1, 1);
4506 		if (pool == NULL)
4507 			ERROUT(EBUSY);
4508 
4509 		if (pca->action != PF_CHANGE_REMOVE) {
4510 			if (newpa->ifname[0]) {
4511 				newpa->kif = pfi_kkif_attach(kif, newpa->ifname);
4512 				pfi_kkif_ref(newpa->kif);
4513 				kif = NULL;
4514 			}
4515 
4516 			switch (newpa->addr.type) {
4517 			case PF_ADDR_DYNIFTL:
4518 				error = pfi_dynaddr_setup(&newpa->addr,
4519 				    pca->af);
4520 				break;
4521 			case PF_ADDR_TABLE:
4522 				newpa->addr.p.tbl = pfr_attach_table(ruleset,
4523 				    newpa->addr.v.tblname);
4524 				if (newpa->addr.p.tbl == NULL)
4525 					error = ENOMEM;
4526 				break;
4527 			}
4528 			if (error)
4529 				goto DIOCCHANGEADDR_error;
4530 		}
4531 
4532 		switch (pca->action) {
4533 		case PF_CHANGE_ADD_HEAD:
4534 			oldpa = TAILQ_FIRST(&pool->list);
4535 			break;
4536 		case PF_CHANGE_ADD_TAIL:
4537 			oldpa = TAILQ_LAST(&pool->list, pf_kpalist);
4538 			break;
4539 		default:
4540 			oldpa = TAILQ_FIRST(&pool->list);
4541 			for (int i = 0; oldpa && i < pca->nr; i++)
4542 				oldpa = TAILQ_NEXT(oldpa, entries);
4543 
4544 			if (oldpa == NULL)
4545 				ERROUT(EINVAL);
4546 		}
4547 
4548 		if (pca->action == PF_CHANGE_REMOVE) {
4549 			TAILQ_REMOVE(&pool->list, oldpa, entries);
4550 			switch (oldpa->addr.type) {
4551 			case PF_ADDR_DYNIFTL:
4552 				pfi_dynaddr_remove(oldpa->addr.p.dyn);
4553 				break;
4554 			case PF_ADDR_TABLE:
4555 				pfr_detach_table(oldpa->addr.p.tbl);
4556 				break;
4557 			}
4558 			if (oldpa->kif)
4559 				pfi_kkif_unref(oldpa->kif);
4560 			free(oldpa, M_PFRULE);
4561 		} else {
4562 			if (oldpa == NULL)
4563 				TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
4564 			else if (pca->action == PF_CHANGE_ADD_HEAD ||
4565 			    pca->action == PF_CHANGE_ADD_BEFORE)
4566 				TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
4567 			else
4568 				TAILQ_INSERT_AFTER(&pool->list, oldpa,
4569 				    newpa, entries);
4570 		}
4571 
4572 		pool->cur = TAILQ_FIRST(&pool->list);
4573 		PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af);
4574 		PF_RULES_WUNLOCK();
4575 		break;
4576 
4577 #undef ERROUT
4578 DIOCCHANGEADDR_error:
4579 		if (newpa != NULL) {
4580 			if (newpa->kif)
4581 				pfi_kkif_unref(newpa->kif);
4582 			free(newpa, M_PFRULE);
4583 		}
4584 		PF_RULES_WUNLOCK();
4585 		pf_kkif_free(kif);
4586 		break;
4587 	}
4588 
4589 	case DIOCGETRULESETS: {
4590 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
4591 		struct pf_kruleset	*ruleset;
4592 		struct pf_kanchor	*anchor;
4593 
4594 		pr->path[sizeof(pr->path) - 1] = 0;
4595 
4596 		PF_RULES_RLOCK();
4597 		if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
4598 			PF_RULES_RUNLOCK();
4599 			error = ENOENT;
4600 			break;
4601 		}
4602 		pr->nr = 0;
4603 		if (ruleset->anchor == NULL) {
4604 			/* XXX kludge for pf_main_ruleset */
4605 			RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
4606 				if (anchor->parent == NULL)
4607 					pr->nr++;
4608 		} else {
4609 			RB_FOREACH(anchor, pf_kanchor_node,
4610 			    &ruleset->anchor->children)
4611 				pr->nr++;
4612 		}
4613 		PF_RULES_RUNLOCK();
4614 		break;
4615 	}
4616 
4617 	case DIOCGETRULESET: {
4618 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
4619 		struct pf_kruleset	*ruleset;
4620 		struct pf_kanchor	*anchor;
4621 		u_int32_t		 nr = 0;
4622 
4623 		pr->path[sizeof(pr->path) - 1] = 0;
4624 
4625 		PF_RULES_RLOCK();
4626 		if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
4627 			PF_RULES_RUNLOCK();
4628 			error = ENOENT;
4629 			break;
4630 		}
4631 		pr->name[0] = 0;
4632 		if (ruleset->anchor == NULL) {
4633 			/* XXX kludge for pf_main_ruleset */
4634 			RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
4635 				if (anchor->parent == NULL && nr++ == pr->nr) {
4636 					strlcpy(pr->name, anchor->name,
4637 					    sizeof(pr->name));
4638 					break;
4639 				}
4640 		} else {
4641 			RB_FOREACH(anchor, pf_kanchor_node,
4642 			    &ruleset->anchor->children)
4643 				if (nr++ == pr->nr) {
4644 					strlcpy(pr->name, anchor->name,
4645 					    sizeof(pr->name));
4646 					break;
4647 				}
4648 		}
4649 		if (!pr->name[0])
4650 			error = EBUSY;
4651 		PF_RULES_RUNLOCK();
4652 		break;
4653 	}
4654 
4655 	case DIOCRCLRTABLES: {
4656 		struct pfioc_table *io = (struct pfioc_table *)addr;
4657 
4658 		if (io->pfrio_esize != 0) {
4659 			error = ENODEV;
4660 			break;
4661 		}
4662 		PF_RULES_WLOCK();
4663 		error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
4664 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
4665 		PF_RULES_WUNLOCK();
4666 		break;
4667 	}
4668 
4669 	case DIOCRADDTABLES: {
4670 		struct pfioc_table *io = (struct pfioc_table *)addr;
4671 		struct pfr_table *pfrts;
4672 		size_t totlen;
4673 
4674 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4675 			error = ENODEV;
4676 			break;
4677 		}
4678 
4679 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4680 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4681 			error = ENOMEM;
4682 			break;
4683 		}
4684 
4685 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4686 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4687 		    M_TEMP, M_WAITOK);
4688 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4689 		if (error) {
4690 			free(pfrts, M_TEMP);
4691 			break;
4692 		}
4693 		PF_RULES_WLOCK();
4694 		error = pfr_add_tables(pfrts, io->pfrio_size,
4695 		    &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4696 		PF_RULES_WUNLOCK();
4697 		free(pfrts, M_TEMP);
4698 		break;
4699 	}
4700 
4701 	case DIOCRDELTABLES: {
4702 		struct pfioc_table *io = (struct pfioc_table *)addr;
4703 		struct pfr_table *pfrts;
4704 		size_t totlen;
4705 
4706 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4707 			error = ENODEV;
4708 			break;
4709 		}
4710 
4711 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4712 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4713 			error = ENOMEM;
4714 			break;
4715 		}
4716 
4717 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4718 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4719 		    M_TEMP, M_WAITOK);
4720 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4721 		if (error) {
4722 			free(pfrts, M_TEMP);
4723 			break;
4724 		}
4725 		PF_RULES_WLOCK();
4726 		error = pfr_del_tables(pfrts, io->pfrio_size,
4727 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4728 		PF_RULES_WUNLOCK();
4729 		free(pfrts, M_TEMP);
4730 		break;
4731 	}
4732 
4733 	case DIOCRGETTABLES: {
4734 		struct pfioc_table *io = (struct pfioc_table *)addr;
4735 		struct pfr_table *pfrts;
4736 		size_t totlen;
4737 		int n;
4738 
4739 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4740 			error = ENODEV;
4741 			break;
4742 		}
4743 		PF_RULES_RLOCK();
4744 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4745 		if (n < 0) {
4746 			PF_RULES_RUNLOCK();
4747 			error = EINVAL;
4748 			break;
4749 		}
4750 		io->pfrio_size = min(io->pfrio_size, n);
4751 
4752 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4753 
4754 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4755 		    M_TEMP, M_NOWAIT | M_ZERO);
4756 		if (pfrts == NULL) {
4757 			error = ENOMEM;
4758 			PF_RULES_RUNLOCK();
4759 			break;
4760 		}
4761 		error = pfr_get_tables(&io->pfrio_table, pfrts,
4762 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4763 		PF_RULES_RUNLOCK();
4764 		if (error == 0)
4765 			error = copyout(pfrts, io->pfrio_buffer, totlen);
4766 		free(pfrts, M_TEMP);
4767 		break;
4768 	}
4769 
4770 	case DIOCRGETTSTATS: {
4771 		struct pfioc_table *io = (struct pfioc_table *)addr;
4772 		struct pfr_tstats *pfrtstats;
4773 		size_t totlen;
4774 		int n;
4775 
4776 		if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
4777 			error = ENODEV;
4778 			break;
4779 		}
4780 		PF_TABLE_STATS_LOCK();
4781 		PF_RULES_RLOCK();
4782 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4783 		if (n < 0) {
4784 			PF_RULES_RUNLOCK();
4785 			PF_TABLE_STATS_UNLOCK();
4786 			error = EINVAL;
4787 			break;
4788 		}
4789 		io->pfrio_size = min(io->pfrio_size, n);
4790 
4791 		totlen = io->pfrio_size * sizeof(struct pfr_tstats);
4792 		pfrtstats = mallocarray(io->pfrio_size,
4793 		    sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT | M_ZERO);
4794 		if (pfrtstats == NULL) {
4795 			error = ENOMEM;
4796 			PF_RULES_RUNLOCK();
4797 			PF_TABLE_STATS_UNLOCK();
4798 			break;
4799 		}
4800 		error = pfr_get_tstats(&io->pfrio_table, pfrtstats,
4801 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4802 		PF_RULES_RUNLOCK();
4803 		PF_TABLE_STATS_UNLOCK();
4804 		if (error == 0)
4805 			error = copyout(pfrtstats, io->pfrio_buffer, totlen);
4806 		free(pfrtstats, M_TEMP);
4807 		break;
4808 	}
4809 
4810 	case DIOCRCLRTSTATS: {
4811 		struct pfioc_table *io = (struct pfioc_table *)addr;
4812 		struct pfr_table *pfrts;
4813 		size_t totlen;
4814 
4815 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4816 			error = ENODEV;
4817 			break;
4818 		}
4819 
4820 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4821 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4822 			/* We used to count tables and use the minimum required
4823 			 * size, so we didn't fail on overly large requests.
4824 			 * Keep doing so. */
4825 			io->pfrio_size = pf_ioctl_maxcount;
4826 			break;
4827 		}
4828 
4829 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4830 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4831 		    M_TEMP, M_WAITOK);
4832 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4833 		if (error) {
4834 			free(pfrts, M_TEMP);
4835 			break;
4836 		}
4837 
4838 		PF_TABLE_STATS_LOCK();
4839 		PF_RULES_RLOCK();
4840 		error = pfr_clr_tstats(pfrts, io->pfrio_size,
4841 		    &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4842 		PF_RULES_RUNLOCK();
4843 		PF_TABLE_STATS_UNLOCK();
4844 		free(pfrts, M_TEMP);
4845 		break;
4846 	}
4847 
4848 	case DIOCRSETTFLAGS: {
4849 		struct pfioc_table *io = (struct pfioc_table *)addr;
4850 		struct pfr_table *pfrts;
4851 		size_t totlen;
4852 		int n;
4853 
4854 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4855 			error = ENODEV;
4856 			break;
4857 		}
4858 
4859 		PF_RULES_RLOCK();
4860 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4861 		if (n < 0) {
4862 			PF_RULES_RUNLOCK();
4863 			error = EINVAL;
4864 			break;
4865 		}
4866 
4867 		io->pfrio_size = min(io->pfrio_size, n);
4868 		PF_RULES_RUNLOCK();
4869 
4870 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4871 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4872 		    M_TEMP, M_WAITOK);
4873 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4874 		if (error) {
4875 			free(pfrts, M_TEMP);
4876 			break;
4877 		}
4878 		PF_RULES_WLOCK();
4879 		error = pfr_set_tflags(pfrts, io->pfrio_size,
4880 		    io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
4881 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4882 		PF_RULES_WUNLOCK();
4883 		free(pfrts, M_TEMP);
4884 		break;
4885 	}
4886 
4887 	case DIOCRCLRADDRS: {
4888 		struct pfioc_table *io = (struct pfioc_table *)addr;
4889 
4890 		if (io->pfrio_esize != 0) {
4891 			error = ENODEV;
4892 			break;
4893 		}
4894 		PF_RULES_WLOCK();
4895 		error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
4896 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
4897 		PF_RULES_WUNLOCK();
4898 		break;
4899 	}
4900 
4901 	case DIOCRADDADDRS: {
4902 		struct pfioc_table *io = (struct pfioc_table *)addr;
4903 		struct pfr_addr *pfras;
4904 		size_t totlen;
4905 
4906 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4907 			error = ENODEV;
4908 			break;
4909 		}
4910 		if (io->pfrio_size < 0 ||
4911 		    io->pfrio_size > pf_ioctl_maxcount ||
4912 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4913 			error = EINVAL;
4914 			break;
4915 		}
4916 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4917 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4918 		    M_TEMP, M_WAITOK);
4919 		error = copyin(io->pfrio_buffer, pfras, totlen);
4920 		if (error) {
4921 			free(pfras, M_TEMP);
4922 			break;
4923 		}
4924 		PF_RULES_WLOCK();
4925 		error = pfr_add_addrs(&io->pfrio_table, pfras,
4926 		    io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
4927 		    PFR_FLAG_USERIOCTL);
4928 		PF_RULES_WUNLOCK();
4929 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4930 			error = copyout(pfras, io->pfrio_buffer, totlen);
4931 		free(pfras, M_TEMP);
4932 		break;
4933 	}
4934 
4935 	case DIOCRDELADDRS: {
4936 		struct pfioc_table *io = (struct pfioc_table *)addr;
4937 		struct pfr_addr *pfras;
4938 		size_t totlen;
4939 
4940 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4941 			error = ENODEV;
4942 			break;
4943 		}
4944 		if (io->pfrio_size < 0 ||
4945 		    io->pfrio_size > pf_ioctl_maxcount ||
4946 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4947 			error = EINVAL;
4948 			break;
4949 		}
4950 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4951 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4952 		    M_TEMP, M_WAITOK);
4953 		error = copyin(io->pfrio_buffer, pfras, totlen);
4954 		if (error) {
4955 			free(pfras, M_TEMP);
4956 			break;
4957 		}
4958 		PF_RULES_WLOCK();
4959 		error = pfr_del_addrs(&io->pfrio_table, pfras,
4960 		    io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
4961 		    PFR_FLAG_USERIOCTL);
4962 		PF_RULES_WUNLOCK();
4963 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4964 			error = copyout(pfras, io->pfrio_buffer, totlen);
4965 		free(pfras, M_TEMP);
4966 		break;
4967 	}
4968 
4969 	case DIOCRSETADDRS: {
4970 		struct pfioc_table *io = (struct pfioc_table *)addr;
4971 		struct pfr_addr *pfras;
4972 		size_t totlen, count;
4973 
4974 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4975 			error = ENODEV;
4976 			break;
4977 		}
4978 		if (io->pfrio_size < 0 || io->pfrio_size2 < 0) {
4979 			error = EINVAL;
4980 			break;
4981 		}
4982 		count = max(io->pfrio_size, io->pfrio_size2);
4983 		if (count > pf_ioctl_maxcount ||
4984 		    WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) {
4985 			error = EINVAL;
4986 			break;
4987 		}
4988 		totlen = count * sizeof(struct pfr_addr);
4989 		pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP,
4990 		    M_WAITOK);
4991 		error = copyin(io->pfrio_buffer, pfras, totlen);
4992 		if (error) {
4993 			free(pfras, M_TEMP);
4994 			break;
4995 		}
4996 		PF_RULES_WLOCK();
4997 		error = pfr_set_addrs(&io->pfrio_table, pfras,
4998 		    io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
4999 		    &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
5000 		    PFR_FLAG_USERIOCTL, 0);
5001 		PF_RULES_WUNLOCK();
5002 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
5003 			error = copyout(pfras, io->pfrio_buffer, totlen);
5004 		free(pfras, M_TEMP);
5005 		break;
5006 	}
5007 
5008 	case DIOCRGETADDRS: {
5009 		struct pfioc_table *io = (struct pfioc_table *)addr;
5010 		struct pfr_addr *pfras;
5011 		size_t totlen;
5012 
5013 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5014 			error = ENODEV;
5015 			break;
5016 		}
5017 		if (io->pfrio_size < 0 ||
5018 		    io->pfrio_size > pf_ioctl_maxcount ||
5019 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5020 			error = EINVAL;
5021 			break;
5022 		}
5023 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5024 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5025 		    M_TEMP, M_WAITOK | M_ZERO);
5026 		PF_RULES_RLOCK();
5027 		error = pfr_get_addrs(&io->pfrio_table, pfras,
5028 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5029 		PF_RULES_RUNLOCK();
5030 		if (error == 0)
5031 			error = copyout(pfras, io->pfrio_buffer, totlen);
5032 		free(pfras, M_TEMP);
5033 		break;
5034 	}
5035 
5036 	case DIOCRGETASTATS: {
5037 		struct pfioc_table *io = (struct pfioc_table *)addr;
5038 		struct pfr_astats *pfrastats;
5039 		size_t totlen;
5040 
5041 		if (io->pfrio_esize != sizeof(struct pfr_astats)) {
5042 			error = ENODEV;
5043 			break;
5044 		}
5045 		if (io->pfrio_size < 0 ||
5046 		    io->pfrio_size > pf_ioctl_maxcount ||
5047 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) {
5048 			error = EINVAL;
5049 			break;
5050 		}
5051 		totlen = io->pfrio_size * sizeof(struct pfr_astats);
5052 		pfrastats = mallocarray(io->pfrio_size,
5053 		    sizeof(struct pfr_astats), M_TEMP, M_WAITOK | M_ZERO);
5054 		PF_RULES_RLOCK();
5055 		error = pfr_get_astats(&io->pfrio_table, pfrastats,
5056 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5057 		PF_RULES_RUNLOCK();
5058 		if (error == 0)
5059 			error = copyout(pfrastats, io->pfrio_buffer, totlen);
5060 		free(pfrastats, M_TEMP);
5061 		break;
5062 	}
5063 
5064 	case DIOCRCLRASTATS: {
5065 		struct pfioc_table *io = (struct pfioc_table *)addr;
5066 		struct pfr_addr *pfras;
5067 		size_t totlen;
5068 
5069 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5070 			error = ENODEV;
5071 			break;
5072 		}
5073 		if (io->pfrio_size < 0 ||
5074 		    io->pfrio_size > pf_ioctl_maxcount ||
5075 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5076 			error = EINVAL;
5077 			break;
5078 		}
5079 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5080 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5081 		    M_TEMP, M_WAITOK);
5082 		error = copyin(io->pfrio_buffer, pfras, totlen);
5083 		if (error) {
5084 			free(pfras, M_TEMP);
5085 			break;
5086 		}
5087 		PF_RULES_WLOCK();
5088 		error = pfr_clr_astats(&io->pfrio_table, pfras,
5089 		    io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
5090 		    PFR_FLAG_USERIOCTL);
5091 		PF_RULES_WUNLOCK();
5092 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
5093 			error = copyout(pfras, io->pfrio_buffer, totlen);
5094 		free(pfras, M_TEMP);
5095 		break;
5096 	}
5097 
5098 	case DIOCRTSTADDRS: {
5099 		struct pfioc_table *io = (struct pfioc_table *)addr;
5100 		struct pfr_addr *pfras;
5101 		size_t totlen;
5102 
5103 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5104 			error = ENODEV;
5105 			break;
5106 		}
5107 		if (io->pfrio_size < 0 ||
5108 		    io->pfrio_size > pf_ioctl_maxcount ||
5109 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5110 			error = EINVAL;
5111 			break;
5112 		}
5113 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5114 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5115 		    M_TEMP, M_WAITOK);
5116 		error = copyin(io->pfrio_buffer, pfras, totlen);
5117 		if (error) {
5118 			free(pfras, M_TEMP);
5119 			break;
5120 		}
5121 		PF_RULES_RLOCK();
5122 		error = pfr_tst_addrs(&io->pfrio_table, pfras,
5123 		    io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
5124 		    PFR_FLAG_USERIOCTL);
5125 		PF_RULES_RUNLOCK();
5126 		if (error == 0)
5127 			error = copyout(pfras, io->pfrio_buffer, totlen);
5128 		free(pfras, M_TEMP);
5129 		break;
5130 	}
5131 
5132 	case DIOCRINADEFINE: {
5133 		struct pfioc_table *io = (struct pfioc_table *)addr;
5134 		struct pfr_addr *pfras;
5135 		size_t totlen;
5136 
5137 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5138 			error = ENODEV;
5139 			break;
5140 		}
5141 		if (io->pfrio_size < 0 ||
5142 		    io->pfrio_size > pf_ioctl_maxcount ||
5143 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5144 			error = EINVAL;
5145 			break;
5146 		}
5147 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5148 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5149 		    M_TEMP, M_WAITOK);
5150 		error = copyin(io->pfrio_buffer, pfras, totlen);
5151 		if (error) {
5152 			free(pfras, M_TEMP);
5153 			break;
5154 		}
5155 		PF_RULES_WLOCK();
5156 		error = pfr_ina_define(&io->pfrio_table, pfras,
5157 		    io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
5158 		    io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5159 		PF_RULES_WUNLOCK();
5160 		free(pfras, M_TEMP);
5161 		break;
5162 	}
5163 
5164 	case DIOCOSFPADD: {
5165 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
5166 		PF_RULES_WLOCK();
5167 		error = pf_osfp_add(io);
5168 		PF_RULES_WUNLOCK();
5169 		break;
5170 	}
5171 
5172 	case DIOCOSFPGET: {
5173 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
5174 		PF_RULES_RLOCK();
5175 		error = pf_osfp_get(io);
5176 		PF_RULES_RUNLOCK();
5177 		break;
5178 	}
5179 
5180 	case DIOCXBEGIN: {
5181 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5182 		struct pfioc_trans_e	*ioes, *ioe;
5183 		size_t			 totlen;
5184 		int			 i;
5185 
5186 		if (io->esize != sizeof(*ioe)) {
5187 			error = ENODEV;
5188 			break;
5189 		}
5190 		if (io->size < 0 ||
5191 		    io->size > pf_ioctl_maxcount ||
5192 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5193 			error = EINVAL;
5194 			break;
5195 		}
5196 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5197 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5198 		    M_TEMP, M_WAITOK);
5199 		error = copyin(io->array, ioes, totlen);
5200 		if (error) {
5201 			free(ioes, M_TEMP);
5202 			break;
5203 		}
5204 		/* Ensure there's no more ethernet rules to clean up. */
5205 		NET_EPOCH_DRAIN_CALLBACKS();
5206 		PF_RULES_WLOCK();
5207 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5208 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
5209 			switch (ioe->rs_num) {
5210 			case PF_RULESET_ETH:
5211 				if ((error = pf_begin_eth(&ioe->ticket, ioe->anchor))) {
5212 					PF_RULES_WUNLOCK();
5213 					free(ioes, M_TEMP);
5214 					goto fail;
5215 				}
5216 				break;
5217 #ifdef ALTQ
5218 			case PF_RULESET_ALTQ:
5219 				if (ioe->anchor[0]) {
5220 					PF_RULES_WUNLOCK();
5221 					free(ioes, M_TEMP);
5222 					error = EINVAL;
5223 					goto fail;
5224 				}
5225 				if ((error = pf_begin_altq(&ioe->ticket))) {
5226 					PF_RULES_WUNLOCK();
5227 					free(ioes, M_TEMP);
5228 					goto fail;
5229 				}
5230 				break;
5231 #endif /* ALTQ */
5232 			case PF_RULESET_TABLE:
5233 			    {
5234 				struct pfr_table table;
5235 
5236 				bzero(&table, sizeof(table));
5237 				strlcpy(table.pfrt_anchor, ioe->anchor,
5238 				    sizeof(table.pfrt_anchor));
5239 				if ((error = pfr_ina_begin(&table,
5240 				    &ioe->ticket, NULL, 0))) {
5241 					PF_RULES_WUNLOCK();
5242 					free(ioes, M_TEMP);
5243 					goto fail;
5244 				}
5245 				break;
5246 			    }
5247 			default:
5248 				if ((error = pf_begin_rules(&ioe->ticket,
5249 				    ioe->rs_num, ioe->anchor))) {
5250 					PF_RULES_WUNLOCK();
5251 					free(ioes, M_TEMP);
5252 					goto fail;
5253 				}
5254 				break;
5255 			}
5256 		}
5257 		PF_RULES_WUNLOCK();
5258 		error = copyout(ioes, io->array, totlen);
5259 		free(ioes, M_TEMP);
5260 		break;
5261 	}
5262 
5263 	case DIOCXROLLBACK: {
5264 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5265 		struct pfioc_trans_e	*ioe, *ioes;
5266 		size_t			 totlen;
5267 		int			 i;
5268 
5269 		if (io->esize != sizeof(*ioe)) {
5270 			error = ENODEV;
5271 			break;
5272 		}
5273 		if (io->size < 0 ||
5274 		    io->size > pf_ioctl_maxcount ||
5275 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5276 			error = EINVAL;
5277 			break;
5278 		}
5279 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5280 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5281 		    M_TEMP, M_WAITOK);
5282 		error = copyin(io->array, ioes, totlen);
5283 		if (error) {
5284 			free(ioes, M_TEMP);
5285 			break;
5286 		}
5287 		PF_RULES_WLOCK();
5288 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5289 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
5290 			switch (ioe->rs_num) {
5291 			case PF_RULESET_ETH:
5292 				if ((error = pf_rollback_eth(ioe->ticket,
5293 				    ioe->anchor))) {
5294 					PF_RULES_WUNLOCK();
5295 					free(ioes, M_TEMP);
5296 					goto fail; /* really bad */
5297 				}
5298 				break;
5299 #ifdef ALTQ
5300 			case PF_RULESET_ALTQ:
5301 				if (ioe->anchor[0]) {
5302 					PF_RULES_WUNLOCK();
5303 					free(ioes, M_TEMP);
5304 					error = EINVAL;
5305 					goto fail;
5306 				}
5307 				if ((error = pf_rollback_altq(ioe->ticket))) {
5308 					PF_RULES_WUNLOCK();
5309 					free(ioes, M_TEMP);
5310 					goto fail; /* really bad */
5311 				}
5312 				break;
5313 #endif /* ALTQ */
5314 			case PF_RULESET_TABLE:
5315 			    {
5316 				struct pfr_table table;
5317 
5318 				bzero(&table, sizeof(table));
5319 				strlcpy(table.pfrt_anchor, ioe->anchor,
5320 				    sizeof(table.pfrt_anchor));
5321 				if ((error = pfr_ina_rollback(&table,
5322 				    ioe->ticket, NULL, 0))) {
5323 					PF_RULES_WUNLOCK();
5324 					free(ioes, M_TEMP);
5325 					goto fail; /* really bad */
5326 				}
5327 				break;
5328 			    }
5329 			default:
5330 				if ((error = pf_rollback_rules(ioe->ticket,
5331 				    ioe->rs_num, ioe->anchor))) {
5332 					PF_RULES_WUNLOCK();
5333 					free(ioes, M_TEMP);
5334 					goto fail; /* really bad */
5335 				}
5336 				break;
5337 			}
5338 		}
5339 		PF_RULES_WUNLOCK();
5340 		free(ioes, M_TEMP);
5341 		break;
5342 	}
5343 
5344 	case DIOCXCOMMIT: {
5345 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5346 		struct pfioc_trans_e	*ioe, *ioes;
5347 		struct pf_kruleset	*rs;
5348 		struct pf_keth_ruleset	*ers;
5349 		size_t			 totlen;
5350 		int			 i;
5351 
5352 		if (io->esize != sizeof(*ioe)) {
5353 			error = ENODEV;
5354 			break;
5355 		}
5356 
5357 		if (io->size < 0 ||
5358 		    io->size > pf_ioctl_maxcount ||
5359 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5360 			error = EINVAL;
5361 			break;
5362 		}
5363 
5364 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5365 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5366 		    M_TEMP, M_WAITOK);
5367 		error = copyin(io->array, ioes, totlen);
5368 		if (error) {
5369 			free(ioes, M_TEMP);
5370 			break;
5371 		}
5372 		PF_RULES_WLOCK();
5373 		/* First makes sure everything will succeed. */
5374 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5375 			ioe->anchor[sizeof(ioe->anchor) - 1] = 0;
5376 			switch (ioe->rs_num) {
5377 			case PF_RULESET_ETH:
5378 				ers = pf_find_keth_ruleset(ioe->anchor);
5379 				if (ers == NULL || ioe->ticket == 0 ||
5380 				    ioe->ticket != ers->inactive.ticket) {
5381 					PF_RULES_WUNLOCK();
5382 					free(ioes, M_TEMP);
5383 					error = EINVAL;
5384 					goto fail;
5385 				}
5386 				break;
5387 #ifdef ALTQ
5388 			case PF_RULESET_ALTQ:
5389 				if (ioe->anchor[0]) {
5390 					PF_RULES_WUNLOCK();
5391 					free(ioes, M_TEMP);
5392 					error = EINVAL;
5393 					goto fail;
5394 				}
5395 				if (!V_altqs_inactive_open || ioe->ticket !=
5396 				    V_ticket_altqs_inactive) {
5397 					PF_RULES_WUNLOCK();
5398 					free(ioes, M_TEMP);
5399 					error = EBUSY;
5400 					goto fail;
5401 				}
5402 				break;
5403 #endif /* ALTQ */
5404 			case PF_RULESET_TABLE:
5405 				rs = pf_find_kruleset(ioe->anchor);
5406 				if (rs == NULL || !rs->topen || ioe->ticket !=
5407 				    rs->tticket) {
5408 					PF_RULES_WUNLOCK();
5409 					free(ioes, M_TEMP);
5410 					error = EBUSY;
5411 					goto fail;
5412 				}
5413 				break;
5414 			default:
5415 				if (ioe->rs_num < 0 || ioe->rs_num >=
5416 				    PF_RULESET_MAX) {
5417 					PF_RULES_WUNLOCK();
5418 					free(ioes, M_TEMP);
5419 					error = EINVAL;
5420 					goto fail;
5421 				}
5422 				rs = pf_find_kruleset(ioe->anchor);
5423 				if (rs == NULL ||
5424 				    !rs->rules[ioe->rs_num].inactive.open ||
5425 				    rs->rules[ioe->rs_num].inactive.ticket !=
5426 				    ioe->ticket) {
5427 					PF_RULES_WUNLOCK();
5428 					free(ioes, M_TEMP);
5429 					error = EBUSY;
5430 					goto fail;
5431 				}
5432 				break;
5433 			}
5434 		}
5435 		/* Now do the commit - no errors should happen here. */
5436 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5437 			switch (ioe->rs_num) {
5438 			case PF_RULESET_ETH:
5439 				if ((error = pf_commit_eth(ioe->ticket, ioe->anchor))) {
5440 					PF_RULES_WUNLOCK();
5441 					free(ioes, M_TEMP);
5442 					goto fail; /* really bad */
5443 				}
5444 				break;
5445 #ifdef ALTQ
5446 			case PF_RULESET_ALTQ:
5447 				if ((error = pf_commit_altq(ioe->ticket))) {
5448 					PF_RULES_WUNLOCK();
5449 					free(ioes, M_TEMP);
5450 					goto fail; /* really bad */
5451 				}
5452 				break;
5453 #endif /* ALTQ */
5454 			case PF_RULESET_TABLE:
5455 			    {
5456 				struct pfr_table table;
5457 
5458 				bzero(&table, sizeof(table));
5459 				(void)strlcpy(table.pfrt_anchor, ioe->anchor,
5460 				    sizeof(table.pfrt_anchor));
5461 				if ((error = pfr_ina_commit(&table,
5462 				    ioe->ticket, NULL, NULL, 0))) {
5463 					PF_RULES_WUNLOCK();
5464 					free(ioes, M_TEMP);
5465 					goto fail; /* really bad */
5466 				}
5467 				break;
5468 			    }
5469 			default:
5470 				if ((error = pf_commit_rules(ioe->ticket,
5471 				    ioe->rs_num, ioe->anchor))) {
5472 					PF_RULES_WUNLOCK();
5473 					free(ioes, M_TEMP);
5474 					goto fail; /* really bad */
5475 				}
5476 				break;
5477 			}
5478 		}
5479 		PF_RULES_WUNLOCK();
5480 
5481 		/* Only hook into EtherNet taffic if we've got rules for it. */
5482 		if (! TAILQ_EMPTY(V_pf_keth->active.rules))
5483 			hook_pf_eth();
5484 		else
5485 			dehook_pf_eth();
5486 
5487 		free(ioes, M_TEMP);
5488 		break;
5489 	}
5490 
5491 	case DIOCGETSRCNODES: {
5492 		struct pfioc_src_nodes	*psn = (struct pfioc_src_nodes *)addr;
5493 		struct pf_srchash	*sh;
5494 		struct pf_ksrc_node	*n;
5495 		struct pf_src_node	*p, *pstore;
5496 		uint32_t		 i, nr = 0;
5497 
5498 		for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
5499 				i++, sh++) {
5500 			PF_HASHROW_LOCK(sh);
5501 			LIST_FOREACH(n, &sh->nodes, entry)
5502 				nr++;
5503 			PF_HASHROW_UNLOCK(sh);
5504 		}
5505 
5506 		psn->psn_len = min(psn->psn_len,
5507 		    sizeof(struct pf_src_node) * nr);
5508 
5509 		if (psn->psn_len == 0) {
5510 			psn->psn_len = sizeof(struct pf_src_node) * nr;
5511 			break;
5512 		}
5513 
5514 		nr = 0;
5515 
5516 		p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO);
5517 		for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
5518 		    i++, sh++) {
5519 		    PF_HASHROW_LOCK(sh);
5520 		    LIST_FOREACH(n, &sh->nodes, entry) {
5521 
5522 			if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
5523 				break;
5524 
5525 			pf_src_node_copy(n, p);
5526 
5527 			p++;
5528 			nr++;
5529 		    }
5530 		    PF_HASHROW_UNLOCK(sh);
5531 		}
5532 		error = copyout(pstore, psn->psn_src_nodes,
5533 		    sizeof(struct pf_src_node) * nr);
5534 		if (error) {
5535 			free(pstore, M_TEMP);
5536 			break;
5537 		}
5538 		psn->psn_len = sizeof(struct pf_src_node) * nr;
5539 		free(pstore, M_TEMP);
5540 		break;
5541 	}
5542 
5543 	case DIOCCLRSRCNODES: {
5544 		pf_clear_srcnodes(NULL);
5545 		pf_purge_expired_src_nodes();
5546 		break;
5547 	}
5548 
5549 	case DIOCKILLSRCNODES:
5550 		pf_kill_srcnodes((struct pfioc_src_node_kill *)addr);
5551 		break;
5552 
5553 #ifdef COMPAT_FREEBSD13
5554 	case DIOCKEEPCOUNTERS_FREEBSD13:
5555 #endif
5556 	case DIOCKEEPCOUNTERS:
5557 		error = pf_keepcounters((struct pfioc_nv *)addr);
5558 		break;
5559 
5560 	case DIOCGETSYNCOOKIES:
5561 		error = pf_get_syncookies((struct pfioc_nv *)addr);
5562 		break;
5563 
5564 	case DIOCSETSYNCOOKIES:
5565 		error = pf_set_syncookies((struct pfioc_nv *)addr);
5566 		break;
5567 
5568 	case DIOCSETHOSTID: {
5569 		u_int32_t	*hostid = (u_int32_t *)addr;
5570 
5571 		PF_RULES_WLOCK();
5572 		if (*hostid == 0)
5573 			V_pf_status.hostid = arc4random();
5574 		else
5575 			V_pf_status.hostid = *hostid;
5576 		PF_RULES_WUNLOCK();
5577 		break;
5578 	}
5579 
5580 	case DIOCOSFPFLUSH:
5581 		PF_RULES_WLOCK();
5582 		pf_osfp_flush();
5583 		PF_RULES_WUNLOCK();
5584 		break;
5585 
5586 	case DIOCIGETIFACES: {
5587 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5588 		struct pfi_kif *ifstore;
5589 		size_t bufsiz;
5590 
5591 		if (io->pfiio_esize != sizeof(struct pfi_kif)) {
5592 			error = ENODEV;
5593 			break;
5594 		}
5595 
5596 		if (io->pfiio_size < 0 ||
5597 		    io->pfiio_size > pf_ioctl_maxcount ||
5598 		    WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) {
5599 			error = EINVAL;
5600 			break;
5601 		}
5602 
5603 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5604 
5605 		bufsiz = io->pfiio_size * sizeof(struct pfi_kif);
5606 		ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif),
5607 		    M_TEMP, M_WAITOK | M_ZERO);
5608 
5609 		PF_RULES_RLOCK();
5610 		pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size);
5611 		PF_RULES_RUNLOCK();
5612 		error = copyout(ifstore, io->pfiio_buffer, bufsiz);
5613 		free(ifstore, M_TEMP);
5614 		break;
5615 	}
5616 
5617 	case DIOCSETIFFLAG: {
5618 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5619 
5620 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5621 
5622 		PF_RULES_WLOCK();
5623 		error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
5624 		PF_RULES_WUNLOCK();
5625 		break;
5626 	}
5627 
5628 	case DIOCCLRIFFLAG: {
5629 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5630 
5631 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5632 
5633 		PF_RULES_WLOCK();
5634 		error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
5635 		PF_RULES_WUNLOCK();
5636 		break;
5637 	}
5638 
5639 	case DIOCSETREASS: {
5640 		u_int32_t	*reass = (u_int32_t *)addr;
5641 
5642 		V_pf_status.reass = *reass & (PF_REASS_ENABLED|PF_REASS_NODF);
5643 		/* Removal of DF flag without reassembly enabled is not a
5644 		 * valid combination. Disable reassembly in such case. */
5645 		if (!(V_pf_status.reass & PF_REASS_ENABLED))
5646 			V_pf_status.reass = 0;
5647 		break;
5648 	}
5649 
5650 	default:
5651 		error = ENODEV;
5652 		break;
5653 	}
5654 fail:
5655 	if (sx_xlocked(&pf_ioctl_lock))
5656 		sx_xunlock(&pf_ioctl_lock);
5657 	CURVNET_RESTORE();
5658 
5659 #undef ERROUT_IOCTL
5660 
5661 	return (error);
5662 }
5663 
5664 void
5665 pfsync_state_export(struct pfsync_state *sp, struct pf_kstate *st)
5666 {
5667 	bzero(sp, sizeof(struct pfsync_state));
5668 
5669 	/* copy from state key */
5670 	sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
5671 	sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
5672 	sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
5673 	sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
5674 	sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
5675 	sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
5676 	sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
5677 	sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
5678 	sp->proto = st->key[PF_SK_WIRE]->proto;
5679 	sp->af = st->key[PF_SK_WIRE]->af;
5680 
5681 	/* copy from state */
5682 	strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
5683 	bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
5684 	sp->creation = htonl(time_uptime - st->creation);
5685 	sp->expire = pf_state_expires(st);
5686 	if (sp->expire <= time_uptime)
5687 		sp->expire = htonl(0);
5688 	else
5689 		sp->expire = htonl(sp->expire - time_uptime);
5690 
5691 	sp->direction = st->direction;
5692 	sp->log = st->log;
5693 	sp->timeout = st->timeout;
5694 	sp->state_flags_compat = st->state_flags;
5695 	sp->state_flags = htons(st->state_flags);
5696 	if (st->src_node)
5697 		sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
5698 	if (st->nat_src_node)
5699 		sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5700 
5701 	sp->id = st->id;
5702 	sp->creatorid = st->creatorid;
5703 	pf_state_peer_hton(&st->src, &sp->src);
5704 	pf_state_peer_hton(&st->dst, &sp->dst);
5705 
5706 	if (st->rule.ptr == NULL)
5707 		sp->rule = htonl(-1);
5708 	else
5709 		sp->rule = htonl(st->rule.ptr->nr);
5710 	if (st->anchor.ptr == NULL)
5711 		sp->anchor = htonl(-1);
5712 	else
5713 		sp->anchor = htonl(st->anchor.ptr->nr);
5714 	if (st->nat_rule.ptr == NULL)
5715 		sp->nat_rule = htonl(-1);
5716 	else
5717 		sp->nat_rule = htonl(st->nat_rule.ptr->nr);
5718 
5719 	pf_state_counter_hton(st->packets[0], sp->packets[0]);
5720 	pf_state_counter_hton(st->packets[1], sp->packets[1]);
5721 	pf_state_counter_hton(st->bytes[0], sp->bytes[0]);
5722 	pf_state_counter_hton(st->bytes[1], sp->bytes[1]);
5723 }
5724 
5725 void
5726 pf_state_export(struct pf_state_export *sp, struct pf_kstate *st)
5727 {
5728 	bzero(sp, sizeof(*sp));
5729 
5730 	sp->version = PF_STATE_VERSION;
5731 
5732 	/* copy from state key */
5733 	sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
5734 	sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
5735 	sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
5736 	sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
5737 	sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
5738 	sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
5739 	sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
5740 	sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
5741 	sp->proto = st->key[PF_SK_WIRE]->proto;
5742 	sp->af = st->key[PF_SK_WIRE]->af;
5743 
5744 	/* copy from state */
5745 	strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
5746 	strlcpy(sp->orig_ifname, st->orig_kif->pfik_name,
5747 	    sizeof(sp->orig_ifname));
5748 	bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
5749 	sp->creation = htonl(time_uptime - st->creation);
5750 	sp->expire = pf_state_expires(st);
5751 	if (sp->expire <= time_uptime)
5752 		sp->expire = htonl(0);
5753 	else
5754 		sp->expire = htonl(sp->expire - time_uptime);
5755 
5756 	sp->direction = st->direction;
5757 	sp->log = st->log;
5758 	sp->timeout = st->timeout;
5759 	/* 8 bits for old peers, 16 bits for new peers */
5760 	sp->state_flags_compat = st->state_flags;
5761 	sp->state_flags = st->state_flags;
5762 	if (st->src_node)
5763 		sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
5764 	if (st->nat_src_node)
5765 		sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5766 
5767 	sp->id = st->id;
5768 	sp->creatorid = st->creatorid;
5769 	pf_state_peer_hton(&st->src, &sp->src);
5770 	pf_state_peer_hton(&st->dst, &sp->dst);
5771 
5772 	if (st->rule.ptr == NULL)
5773 		sp->rule = htonl(-1);
5774 	else
5775 		sp->rule = htonl(st->rule.ptr->nr);
5776 	if (st->anchor.ptr == NULL)
5777 		sp->anchor = htonl(-1);
5778 	else
5779 		sp->anchor = htonl(st->anchor.ptr->nr);
5780 	if (st->nat_rule.ptr == NULL)
5781 		sp->nat_rule = htonl(-1);
5782 	else
5783 		sp->nat_rule = htonl(st->nat_rule.ptr->nr);
5784 
5785 	sp->packets[0] = st->packets[0];
5786 	sp->packets[1] = st->packets[1];
5787 	sp->bytes[0] = st->bytes[0];
5788 	sp->bytes[1] = st->bytes[1];
5789 }
5790 
5791 static void
5792 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
5793 {
5794 	struct pfr_ktable *kt;
5795 
5796 	KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type));
5797 
5798 	kt = aw->p.tbl;
5799 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
5800 		kt = kt->pfrkt_root;
5801 	aw->p.tbl = NULL;
5802 	aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
5803 		kt->pfrkt_cnt : -1;
5804 }
5805 
5806 static int
5807 pf_add_status_counters(nvlist_t *nvl, const char *name, counter_u64_t *counters,
5808     size_t number, char **names)
5809 {
5810 	nvlist_t        *nvc;
5811 
5812 	nvc = nvlist_create(0);
5813 	if (nvc == NULL)
5814 		return (ENOMEM);
5815 
5816 	for (int i = 0; i < number; i++) {
5817 		nvlist_append_number_array(nvc, "counters",
5818 		    counter_u64_fetch(counters[i]));
5819 		nvlist_append_string_array(nvc, "names",
5820 		    names[i]);
5821 		nvlist_append_number_array(nvc, "ids",
5822 		    i);
5823 	}
5824 	nvlist_add_nvlist(nvl, name, nvc);
5825 	nvlist_destroy(nvc);
5826 
5827 	return (0);
5828 }
5829 
5830 static int
5831 pf_getstatus(struct pfioc_nv *nv)
5832 {
5833 	nvlist_t        *nvl = NULL, *nvc = NULL;
5834 	void            *nvlpacked = NULL;
5835 	int              error;
5836 	struct pf_status s;
5837 	char *pf_reasons[PFRES_MAX+1] = PFRES_NAMES;
5838 	char *pf_lcounter[KLCNT_MAX+1] = KLCNT_NAMES;
5839 	char *pf_fcounter[FCNT_MAX+1] = FCNT_NAMES;
5840 	PF_RULES_RLOCK_TRACKER;
5841 
5842 #define ERROUT(x)      ERROUT_FUNCTION(errout, x)
5843 
5844 	PF_RULES_RLOCK();
5845 
5846 	nvl = nvlist_create(0);
5847 	if (nvl == NULL)
5848 		ERROUT(ENOMEM);
5849 
5850 	nvlist_add_bool(nvl, "running", V_pf_status.running);
5851 	nvlist_add_number(nvl, "since", V_pf_status.since);
5852 	nvlist_add_number(nvl, "debug", V_pf_status.debug);
5853 	nvlist_add_number(nvl, "hostid", V_pf_status.hostid);
5854 	nvlist_add_number(nvl, "states", V_pf_status.states);
5855 	nvlist_add_number(nvl, "src_nodes", V_pf_status.src_nodes);
5856 	nvlist_add_number(nvl, "reass", V_pf_status.reass);
5857 	nvlist_add_bool(nvl, "syncookies_active",
5858 	    V_pf_status.syncookies_active);
5859 
5860 	/* counters */
5861 	error = pf_add_status_counters(nvl, "counters", V_pf_status.counters,
5862 	    PFRES_MAX, pf_reasons);
5863 	if (error != 0)
5864 		ERROUT(error);
5865 
5866 	/* lcounters */
5867 	error = pf_add_status_counters(nvl, "lcounters", V_pf_status.lcounters,
5868 	    KLCNT_MAX, pf_lcounter);
5869 	if (error != 0)
5870 		ERROUT(error);
5871 
5872 	/* fcounters */
5873 	nvc = nvlist_create(0);
5874 	if (nvc == NULL)
5875 		ERROUT(ENOMEM);
5876 
5877 	for (int i = 0; i < FCNT_MAX; i++) {
5878 		nvlist_append_number_array(nvc, "counters",
5879 		    pf_counter_u64_fetch(&V_pf_status.fcounters[i]));
5880 		nvlist_append_string_array(nvc, "names",
5881 		    pf_fcounter[i]);
5882 		nvlist_append_number_array(nvc, "ids",
5883 		    i);
5884 	}
5885 	nvlist_add_nvlist(nvl, "fcounters", nvc);
5886 	nvlist_destroy(nvc);
5887 	nvc = NULL;
5888 
5889 	/* scounters */
5890 	error = pf_add_status_counters(nvl, "scounters", V_pf_status.scounters,
5891 	    SCNT_MAX, pf_fcounter);
5892 	if (error != 0)
5893 		ERROUT(error);
5894 
5895 	nvlist_add_string(nvl, "ifname", V_pf_status.ifname);
5896 	nvlist_add_binary(nvl, "chksum", V_pf_status.pf_chksum,
5897 	    PF_MD5_DIGEST_LENGTH);
5898 
5899 	pfi_update_status(V_pf_status.ifname, &s);
5900 
5901 	/* pcounters / bcounters */
5902 	for (int i = 0; i < 2; i++) {
5903 		for (int j = 0; j < 2; j++) {
5904 			for (int k = 0; k < 2; k++) {
5905 				nvlist_append_number_array(nvl, "pcounters",
5906 				    s.pcounters[i][j][k]);
5907 			}
5908 			nvlist_append_number_array(nvl, "bcounters",
5909 			    s.bcounters[i][j]);
5910 		}
5911 	}
5912 
5913 	nvlpacked = nvlist_pack(nvl, &nv->len);
5914 	if (nvlpacked == NULL)
5915 		ERROUT(ENOMEM);
5916 
5917 	if (nv->size == 0)
5918 		ERROUT(0);
5919 	else if (nv->size < nv->len)
5920 		ERROUT(ENOSPC);
5921 
5922 	PF_RULES_RUNLOCK();
5923 	error = copyout(nvlpacked, nv->data, nv->len);
5924 	goto done;
5925 
5926 #undef ERROUT
5927 errout:
5928 	PF_RULES_RUNLOCK();
5929 done:
5930 	free(nvlpacked, M_NVLIST);
5931 	nvlist_destroy(nvc);
5932 	nvlist_destroy(nvl);
5933 
5934 	return (error);
5935 }
5936 
5937 /*
5938  * XXX - Check for version mismatch!!!
5939  */
5940 static void
5941 pf_clear_all_states(void)
5942 {
5943 	struct pf_kstate	*s;
5944 	u_int i;
5945 
5946 	for (i = 0; i <= pf_hashmask; i++) {
5947 		struct pf_idhash *ih = &V_pf_idhash[i];
5948 relock:
5949 		PF_HASHROW_LOCK(ih);
5950 		LIST_FOREACH(s, &ih->states, entry) {
5951 			s->timeout = PFTM_PURGE;
5952 			/* Don't send out individual delete messages. */
5953 			s->state_flags |= PFSTATE_NOSYNC;
5954 			pf_unlink_state(s);
5955 			goto relock;
5956 		}
5957 		PF_HASHROW_UNLOCK(ih);
5958 	}
5959 }
5960 
5961 static int
5962 pf_clear_tables(void)
5963 {
5964 	struct pfioc_table io;
5965 	int error;
5966 
5967 	bzero(&io, sizeof(io));
5968 
5969 	error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
5970 	    io.pfrio_flags);
5971 
5972 	return (error);
5973 }
5974 
5975 static void
5976 pf_clear_srcnodes(struct pf_ksrc_node *n)
5977 {
5978 	struct pf_kstate *s;
5979 	int i;
5980 
5981 	for (i = 0; i <= pf_hashmask; i++) {
5982 		struct pf_idhash *ih = &V_pf_idhash[i];
5983 
5984 		PF_HASHROW_LOCK(ih);
5985 		LIST_FOREACH(s, &ih->states, entry) {
5986 			if (n == NULL || n == s->src_node)
5987 				s->src_node = NULL;
5988 			if (n == NULL || n == s->nat_src_node)
5989 				s->nat_src_node = NULL;
5990 		}
5991 		PF_HASHROW_UNLOCK(ih);
5992 	}
5993 
5994 	if (n == NULL) {
5995 		struct pf_srchash *sh;
5996 
5997 		for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
5998 		    i++, sh++) {
5999 			PF_HASHROW_LOCK(sh);
6000 			LIST_FOREACH(n, &sh->nodes, entry) {
6001 				n->expire = 1;
6002 				n->states = 0;
6003 			}
6004 			PF_HASHROW_UNLOCK(sh);
6005 		}
6006 	} else {
6007 		/* XXX: hash slot should already be locked here. */
6008 		n->expire = 1;
6009 		n->states = 0;
6010 	}
6011 }
6012 
6013 static void
6014 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk)
6015 {
6016 	struct pf_ksrc_node_list	 kill;
6017 
6018 	LIST_INIT(&kill);
6019 	for (int i = 0; i <= pf_srchashmask; i++) {
6020 		struct pf_srchash *sh = &V_pf_srchash[i];
6021 		struct pf_ksrc_node *sn, *tmp;
6022 
6023 		PF_HASHROW_LOCK(sh);
6024 		LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp)
6025 			if (PF_MATCHA(psnk->psnk_src.neg,
6026 			      &psnk->psnk_src.addr.v.a.addr,
6027 			      &psnk->psnk_src.addr.v.a.mask,
6028 			      &sn->addr, sn->af) &&
6029 			    PF_MATCHA(psnk->psnk_dst.neg,
6030 			      &psnk->psnk_dst.addr.v.a.addr,
6031 			      &psnk->psnk_dst.addr.v.a.mask,
6032 			      &sn->raddr, sn->af)) {
6033 				pf_unlink_src_node(sn);
6034 				LIST_INSERT_HEAD(&kill, sn, entry);
6035 				sn->expire = 1;
6036 			}
6037 		PF_HASHROW_UNLOCK(sh);
6038 	}
6039 
6040 	for (int i = 0; i <= pf_hashmask; i++) {
6041 		struct pf_idhash *ih = &V_pf_idhash[i];
6042 		struct pf_kstate *s;
6043 
6044 		PF_HASHROW_LOCK(ih);
6045 		LIST_FOREACH(s, &ih->states, entry) {
6046 			if (s->src_node && s->src_node->expire == 1)
6047 				s->src_node = NULL;
6048 			if (s->nat_src_node && s->nat_src_node->expire == 1)
6049 				s->nat_src_node = NULL;
6050 		}
6051 		PF_HASHROW_UNLOCK(ih);
6052 	}
6053 
6054 	psnk->psnk_killed = pf_free_src_nodes(&kill);
6055 }
6056 
6057 static int
6058 pf_keepcounters(struct pfioc_nv *nv)
6059 {
6060 	nvlist_t	*nvl = NULL;
6061 	void		*nvlpacked = NULL;
6062 	int		 error = 0;
6063 
6064 #define	ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6065 
6066 	if (nv->len > pf_ioctl_maxcount)
6067 		ERROUT(ENOMEM);
6068 
6069 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6070 	if (nvlpacked == NULL)
6071 		ERROUT(ENOMEM);
6072 
6073 	error = copyin(nv->data, nvlpacked, nv->len);
6074 	if (error)
6075 		ERROUT(error);
6076 
6077 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6078 	if (nvl == NULL)
6079 		ERROUT(EBADMSG);
6080 
6081 	if (! nvlist_exists_bool(nvl, "keep_counters"))
6082 		ERROUT(EBADMSG);
6083 
6084 	V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters");
6085 
6086 on_error:
6087 	nvlist_destroy(nvl);
6088 	free(nvlpacked, M_NVLIST);
6089 	return (error);
6090 }
6091 
6092 static unsigned int
6093 pf_clear_states(const struct pf_kstate_kill *kill)
6094 {
6095 	struct pf_state_key_cmp	 match_key;
6096 	struct pf_kstate	*s;
6097 	struct pfi_kkif	*kif;
6098 	int		 idx;
6099 	unsigned int	 killed = 0, dir;
6100 
6101 	for (unsigned int i = 0; i <= pf_hashmask; i++) {
6102 		struct pf_idhash *ih = &V_pf_idhash[i];
6103 
6104 relock_DIOCCLRSTATES:
6105 		PF_HASHROW_LOCK(ih);
6106 		LIST_FOREACH(s, &ih->states, entry) {
6107 			/* For floating states look at the original kif. */
6108 			kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
6109 
6110 			if (kill->psk_ifname[0] &&
6111 			    strcmp(kill->psk_ifname,
6112 			    kif->pfik_name))
6113 				continue;
6114 
6115 			if (kill->psk_kill_match) {
6116 				bzero(&match_key, sizeof(match_key));
6117 
6118 				if (s->direction == PF_OUT) {
6119 					dir = PF_IN;
6120 					idx = PF_SK_STACK;
6121 				} else {
6122 					dir = PF_OUT;
6123 					idx = PF_SK_WIRE;
6124 				}
6125 
6126 				match_key.af = s->key[idx]->af;
6127 				match_key.proto = s->key[idx]->proto;
6128 				PF_ACPY(&match_key.addr[0],
6129 				    &s->key[idx]->addr[1], match_key.af);
6130 				match_key.port[0] = s->key[idx]->port[1];
6131 				PF_ACPY(&match_key.addr[1],
6132 				    &s->key[idx]->addr[0], match_key.af);
6133 				match_key.port[1] = s->key[idx]->port[0];
6134 			}
6135 
6136 			/*
6137 			 * Don't send out individual
6138 			 * delete messages.
6139 			 */
6140 			s->state_flags |= PFSTATE_NOSYNC;
6141 			pf_unlink_state(s);
6142 			killed++;
6143 
6144 			if (kill->psk_kill_match)
6145 				killed += pf_kill_matching_state(&match_key,
6146 				    dir);
6147 
6148 			goto relock_DIOCCLRSTATES;
6149 		}
6150 		PF_HASHROW_UNLOCK(ih);
6151 	}
6152 
6153 	if (V_pfsync_clear_states_ptr != NULL)
6154 		V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname);
6155 
6156 	return (killed);
6157 }
6158 
6159 static void
6160 pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed)
6161 {
6162 	struct pf_kstate	*s;
6163 
6164 	if (kill->psk_pfcmp.id) {
6165 		if (kill->psk_pfcmp.creatorid == 0)
6166 			kill->psk_pfcmp.creatorid = V_pf_status.hostid;
6167 		if ((s = pf_find_state_byid(kill->psk_pfcmp.id,
6168 		    kill->psk_pfcmp.creatorid))) {
6169 			pf_unlink_state(s);
6170 			*killed = 1;
6171 		}
6172 		return;
6173 	}
6174 
6175 	for (unsigned int i = 0; i <= pf_hashmask; i++)
6176 		*killed += pf_killstates_row(kill, &V_pf_idhash[i]);
6177 
6178 	return;
6179 }
6180 
6181 static int
6182 pf_killstates_nv(struct pfioc_nv *nv)
6183 {
6184 	struct pf_kstate_kill	 kill;
6185 	nvlist_t		*nvl = NULL;
6186 	void			*nvlpacked = NULL;
6187 	int			 error = 0;
6188 	unsigned int		 killed = 0;
6189 
6190 #define ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6191 
6192 	if (nv->len > pf_ioctl_maxcount)
6193 		ERROUT(ENOMEM);
6194 
6195 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6196 	if (nvlpacked == NULL)
6197 		ERROUT(ENOMEM);
6198 
6199 	error = copyin(nv->data, nvlpacked, nv->len);
6200 	if (error)
6201 		ERROUT(error);
6202 
6203 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6204 	if (nvl == NULL)
6205 		ERROUT(EBADMSG);
6206 
6207 	error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
6208 	if (error)
6209 		ERROUT(error);
6210 
6211 	pf_killstates(&kill, &killed);
6212 
6213 	free(nvlpacked, M_NVLIST);
6214 	nvlpacked = NULL;
6215 	nvlist_destroy(nvl);
6216 	nvl = nvlist_create(0);
6217 	if (nvl == NULL)
6218 		ERROUT(ENOMEM);
6219 
6220 	nvlist_add_number(nvl, "killed", killed);
6221 
6222 	nvlpacked = nvlist_pack(nvl, &nv->len);
6223 	if (nvlpacked == NULL)
6224 		ERROUT(ENOMEM);
6225 
6226 	if (nv->size == 0)
6227 		ERROUT(0);
6228 	else if (nv->size < nv->len)
6229 		ERROUT(ENOSPC);
6230 
6231 	error = copyout(nvlpacked, nv->data, nv->len);
6232 
6233 on_error:
6234 	nvlist_destroy(nvl);
6235 	free(nvlpacked, M_NVLIST);
6236 	return (error);
6237 }
6238 
6239 static int
6240 pf_clearstates_nv(struct pfioc_nv *nv)
6241 {
6242 	struct pf_kstate_kill	 kill;
6243 	nvlist_t		*nvl = NULL;
6244 	void			*nvlpacked = NULL;
6245 	int			 error = 0;
6246 	unsigned int		 killed;
6247 
6248 #define ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6249 
6250 	if (nv->len > pf_ioctl_maxcount)
6251 		ERROUT(ENOMEM);
6252 
6253 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6254 	if (nvlpacked == NULL)
6255 		ERROUT(ENOMEM);
6256 
6257 	error = copyin(nv->data, nvlpacked, nv->len);
6258 	if (error)
6259 		ERROUT(error);
6260 
6261 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6262 	if (nvl == NULL)
6263 		ERROUT(EBADMSG);
6264 
6265 	error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
6266 	if (error)
6267 		ERROUT(error);
6268 
6269 	killed = pf_clear_states(&kill);
6270 
6271 	free(nvlpacked, M_NVLIST);
6272 	nvlpacked = NULL;
6273 	nvlist_destroy(nvl);
6274 	nvl = nvlist_create(0);
6275 	if (nvl == NULL)
6276 		ERROUT(ENOMEM);
6277 
6278 	nvlist_add_number(nvl, "killed", killed);
6279 
6280 	nvlpacked = nvlist_pack(nvl, &nv->len);
6281 	if (nvlpacked == NULL)
6282 		ERROUT(ENOMEM);
6283 
6284 	if (nv->size == 0)
6285 		ERROUT(0);
6286 	else if (nv->size < nv->len)
6287 		ERROUT(ENOSPC);
6288 
6289 	error = copyout(nvlpacked, nv->data, nv->len);
6290 
6291 #undef ERROUT
6292 on_error:
6293 	nvlist_destroy(nvl);
6294 	free(nvlpacked, M_NVLIST);
6295 	return (error);
6296 }
6297 
6298 static int
6299 pf_getstate(struct pfioc_nv *nv)
6300 {
6301 	nvlist_t		*nvl = NULL, *nvls;
6302 	void			*nvlpacked = NULL;
6303 	struct pf_kstate	*s = NULL;
6304 	int			 error = 0;
6305 	uint64_t		 id, creatorid;
6306 
6307 #define ERROUT(x)	ERROUT_FUNCTION(errout, x)
6308 
6309 	if (nv->len > pf_ioctl_maxcount)
6310 		ERROUT(ENOMEM);
6311 
6312 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6313 	if (nvlpacked == NULL)
6314 		ERROUT(ENOMEM);
6315 
6316 	error = copyin(nv->data, nvlpacked, nv->len);
6317 	if (error)
6318 		ERROUT(error);
6319 
6320 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6321 	if (nvl == NULL)
6322 		ERROUT(EBADMSG);
6323 
6324 	PFNV_CHK(pf_nvuint64(nvl, "id", &id));
6325 	PFNV_CHK(pf_nvuint64(nvl, "creatorid", &creatorid));
6326 
6327 	s = pf_find_state_byid(id, creatorid);
6328 	if (s == NULL)
6329 		ERROUT(ENOENT);
6330 
6331 	free(nvlpacked, M_NVLIST);
6332 	nvlpacked = NULL;
6333 	nvlist_destroy(nvl);
6334 	nvl = nvlist_create(0);
6335 	if (nvl == NULL)
6336 		ERROUT(ENOMEM);
6337 
6338 	nvls = pf_state_to_nvstate(s);
6339 	if (nvls == NULL)
6340 		ERROUT(ENOMEM);
6341 
6342 	nvlist_add_nvlist(nvl, "state", nvls);
6343 	nvlist_destroy(nvls);
6344 
6345 	nvlpacked = nvlist_pack(nvl, &nv->len);
6346 	if (nvlpacked == NULL)
6347 		ERROUT(ENOMEM);
6348 
6349 	if (nv->size == 0)
6350 		ERROUT(0);
6351 	else if (nv->size < nv->len)
6352 		ERROUT(ENOSPC);
6353 
6354 	error = copyout(nvlpacked, nv->data, nv->len);
6355 
6356 #undef ERROUT
6357 errout:
6358 	if (s != NULL)
6359 		PF_STATE_UNLOCK(s);
6360 	free(nvlpacked, M_NVLIST);
6361 	nvlist_destroy(nvl);
6362 	return (error);
6363 }
6364 
6365 /*
6366  * XXX - Check for version mismatch!!!
6367  */
6368 
6369 /*
6370  * Duplicate pfctl -Fa operation to get rid of as much as we can.
6371  */
6372 static int
6373 shutdown_pf(void)
6374 {
6375 	int error = 0;
6376 	u_int32_t t[5];
6377 	char nn = '\0';
6378 
6379 	do {
6380 		if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
6381 		    != 0) {
6382 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n"));
6383 			break;
6384 		}
6385 		if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
6386 		    != 0) {
6387 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n"));
6388 			break;		/* XXX: rollback? */
6389 		}
6390 		if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
6391 		    != 0) {
6392 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n"));
6393 			break;		/* XXX: rollback? */
6394 		}
6395 		if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
6396 		    != 0) {
6397 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n"));
6398 			break;		/* XXX: rollback? */
6399 		}
6400 		if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
6401 		    != 0) {
6402 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n"));
6403 			break;		/* XXX: rollback? */
6404 		}
6405 
6406 		/* XXX: these should always succeed here */
6407 		pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
6408 		pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
6409 		pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
6410 		pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
6411 		pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
6412 
6413 		if ((error = pf_clear_tables()) != 0)
6414 			break;
6415 
6416 		if ((error = pf_begin_eth(&t[0], &nn)) != 0) {
6417 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: eth\n"));
6418 			break;
6419 		}
6420 		pf_commit_eth(t[0], &nn);
6421 
6422 #ifdef ALTQ
6423 		if ((error = pf_begin_altq(&t[0])) != 0) {
6424 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n"));
6425 			break;
6426 		}
6427 		pf_commit_altq(t[0]);
6428 #endif
6429 
6430 		pf_clear_all_states();
6431 
6432 		pf_clear_srcnodes(NULL);
6433 
6434 		/* status does not use malloced mem so no need to cleanup */
6435 		/* fingerprints and interfaces have their own cleanup code */
6436 	} while(0);
6437 
6438 	return (error);
6439 }
6440 
6441 static pfil_return_t
6442 pf_check_return(int chk, struct mbuf **m)
6443 {
6444 
6445 	switch (chk) {
6446 	case PF_PASS:
6447 		if (*m == NULL)
6448 			return (PFIL_CONSUMED);
6449 		else
6450 			return (PFIL_PASS);
6451 		break;
6452 	default:
6453 		if (*m != NULL) {
6454 			m_freem(*m);
6455 			*m = NULL;
6456 		}
6457 		return (PFIL_DROPPED);
6458 	}
6459 }
6460 
6461 static pfil_return_t
6462 pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
6463     void *ruleset __unused, struct inpcb *inp)
6464 {
6465 	int chk;
6466 
6467 	chk = pf_test_eth(PF_IN, flags, ifp, m, inp);
6468 
6469 	return (pf_check_return(chk, m));
6470 }
6471 
6472 static pfil_return_t
6473 pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
6474     void *ruleset __unused, struct inpcb *inp)
6475 {
6476 	int chk;
6477 
6478 	chk = pf_test_eth(PF_OUT, flags, ifp, m, inp);
6479 
6480 	return (pf_check_return(chk, m));
6481 }
6482 
6483 #ifdef INET
6484 static pfil_return_t
6485 pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
6486     void *ruleset __unused, struct inpcb *inp)
6487 {
6488 	int chk;
6489 
6490 	chk = pf_test(PF_IN, flags, ifp, m, inp);
6491 
6492 	return (pf_check_return(chk, m));
6493 }
6494 
6495 static pfil_return_t
6496 pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
6497     void *ruleset __unused,  struct inpcb *inp)
6498 {
6499 	int chk;
6500 
6501 	chk = pf_test(PF_OUT, flags, ifp, m, inp);
6502 
6503 	return (pf_check_return(chk, m));
6504 }
6505 #endif
6506 
6507 #ifdef INET6
6508 static pfil_return_t
6509 pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags,
6510     void *ruleset __unused,  struct inpcb *inp)
6511 {
6512 	int chk;
6513 
6514 	/*
6515 	 * In case of loopback traffic IPv6 uses the real interface in
6516 	 * order to support scoped addresses. In order to support stateful
6517 	 * filtering we have change this to lo0 as it is the case in IPv4.
6518 	 */
6519 	CURVNET_SET(ifp->if_vnet);
6520 	chk = pf_test6(PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp, m, inp);
6521 	CURVNET_RESTORE();
6522 
6523 	return (pf_check_return(chk, m));
6524 }
6525 
6526 static pfil_return_t
6527 pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags,
6528     void *ruleset __unused,  struct inpcb *inp)
6529 {
6530 	int chk;
6531 
6532 	CURVNET_SET(ifp->if_vnet);
6533 	chk = pf_test6(PF_OUT, flags, ifp, m, inp);
6534 	CURVNET_RESTORE();
6535 
6536 	return (pf_check_return(chk, m));
6537 }
6538 #endif /* INET6 */
6539 
6540 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_in_hook);
6541 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_out_hook);
6542 #define	V_pf_eth_in_hook	VNET(pf_eth_in_hook)
6543 #define	V_pf_eth_out_hook	VNET(pf_eth_out_hook)
6544 
6545 #ifdef INET
6546 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook);
6547 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook);
6548 #define	V_pf_ip4_in_hook	VNET(pf_ip4_in_hook)
6549 #define	V_pf_ip4_out_hook	VNET(pf_ip4_out_hook)
6550 #endif
6551 #ifdef INET6
6552 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook);
6553 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook);
6554 #define	V_pf_ip6_in_hook	VNET(pf_ip6_in_hook)
6555 #define	V_pf_ip6_out_hook	VNET(pf_ip6_out_hook)
6556 #endif
6557 
6558 static void
6559 hook_pf_eth(void)
6560 {
6561 	struct pfil_hook_args pha = {
6562 		.pa_version = PFIL_VERSION,
6563 		.pa_modname = "pf",
6564 		.pa_type = PFIL_TYPE_ETHERNET,
6565 	};
6566 	struct pfil_link_args pla = {
6567 		.pa_version = PFIL_VERSION,
6568 	};
6569 	int ret __diagused;
6570 
6571 	if (atomic_load_bool(&V_pf_pfil_eth_hooked))
6572 		return;
6573 
6574 	pha.pa_mbuf_chk = pf_eth_check_in;
6575 	pha.pa_flags = PFIL_IN;
6576 	pha.pa_rulname = "eth-in";
6577 	V_pf_eth_in_hook = pfil_add_hook(&pha);
6578 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6579 	pla.pa_head = V_link_pfil_head;
6580 	pla.pa_hook = V_pf_eth_in_hook;
6581 	ret = pfil_link(&pla);
6582 	MPASS(ret == 0);
6583 	pha.pa_mbuf_chk = pf_eth_check_out;
6584 	pha.pa_flags = PFIL_OUT;
6585 	pha.pa_rulname = "eth-out";
6586 	V_pf_eth_out_hook = pfil_add_hook(&pha);
6587 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6588 	pla.pa_head = V_link_pfil_head;
6589 	pla.pa_hook = V_pf_eth_out_hook;
6590 	ret = pfil_link(&pla);
6591 	MPASS(ret == 0);
6592 
6593 	atomic_store_bool(&V_pf_pfil_eth_hooked, true);
6594 }
6595 
6596 static void
6597 hook_pf(void)
6598 {
6599 	struct pfil_hook_args pha = {
6600 		.pa_version = PFIL_VERSION,
6601 		.pa_modname = "pf",
6602 	};
6603 	struct pfil_link_args pla = {
6604 		.pa_version = PFIL_VERSION,
6605 	};
6606 	int ret __diagused;
6607 
6608 	if (atomic_load_bool(&V_pf_pfil_hooked))
6609 		return;
6610 
6611 #ifdef INET
6612 	pha.pa_type = PFIL_TYPE_IP4;
6613 	pha.pa_mbuf_chk = pf_check_in;
6614 	pha.pa_flags = PFIL_IN;
6615 	pha.pa_rulname = "default-in";
6616 	V_pf_ip4_in_hook = pfil_add_hook(&pha);
6617 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6618 	pla.pa_head = V_inet_pfil_head;
6619 	pla.pa_hook = V_pf_ip4_in_hook;
6620 	ret = pfil_link(&pla);
6621 	MPASS(ret == 0);
6622 	pha.pa_mbuf_chk = pf_check_out;
6623 	pha.pa_flags = PFIL_OUT;
6624 	pha.pa_rulname = "default-out";
6625 	V_pf_ip4_out_hook = pfil_add_hook(&pha);
6626 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6627 	pla.pa_head = V_inet_pfil_head;
6628 	pla.pa_hook = V_pf_ip4_out_hook;
6629 	ret = pfil_link(&pla);
6630 	MPASS(ret == 0);
6631 #endif
6632 #ifdef INET6
6633 	pha.pa_type = PFIL_TYPE_IP6;
6634 	pha.pa_mbuf_chk = pf_check6_in;
6635 	pha.pa_flags = PFIL_IN;
6636 	pha.pa_rulname = "default-in6";
6637 	V_pf_ip6_in_hook = pfil_add_hook(&pha);
6638 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6639 	pla.pa_head = V_inet6_pfil_head;
6640 	pla.pa_hook = V_pf_ip6_in_hook;
6641 	ret = pfil_link(&pla);
6642 	MPASS(ret == 0);
6643 	pha.pa_mbuf_chk = pf_check6_out;
6644 	pha.pa_rulname = "default-out6";
6645 	pha.pa_flags = PFIL_OUT;
6646 	V_pf_ip6_out_hook = pfil_add_hook(&pha);
6647 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6648 	pla.pa_head = V_inet6_pfil_head;
6649 	pla.pa_hook = V_pf_ip6_out_hook;
6650 	ret = pfil_link(&pla);
6651 	MPASS(ret == 0);
6652 #endif
6653 
6654 	atomic_store_bool(&V_pf_pfil_hooked, true);
6655 }
6656 
6657 static void
6658 dehook_pf_eth(void)
6659 {
6660 
6661 	if (!atomic_load_bool(&V_pf_pfil_eth_hooked))
6662 		return;
6663 
6664 	pfil_remove_hook(V_pf_eth_in_hook);
6665 	pfil_remove_hook(V_pf_eth_out_hook);
6666 
6667 	atomic_store_bool(&V_pf_pfil_eth_hooked, false);
6668 }
6669 
6670 static void
6671 dehook_pf(void)
6672 {
6673 
6674 	if (!atomic_load_bool(&V_pf_pfil_hooked))
6675 		return;
6676 
6677 #ifdef INET
6678 	pfil_remove_hook(V_pf_ip4_in_hook);
6679 	pfil_remove_hook(V_pf_ip4_out_hook);
6680 #endif
6681 #ifdef INET6
6682 	pfil_remove_hook(V_pf_ip6_in_hook);
6683 	pfil_remove_hook(V_pf_ip6_out_hook);
6684 #endif
6685 
6686 	atomic_store_bool(&V_pf_pfil_hooked, false);
6687 }
6688 
6689 static void
6690 pf_load_vnet(void)
6691 {
6692 	V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname),
6693 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
6694 
6695 	pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize,
6696 	    PF_RULE_TAG_HASH_SIZE_DEFAULT);
6697 #ifdef ALTQ
6698 	pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize,
6699 	    PF_QUEUE_TAG_HASH_SIZE_DEFAULT);
6700 #endif
6701 
6702 	V_pf_keth = &V_pf_main_keth_anchor.ruleset;
6703 
6704 	pfattach_vnet();
6705 	V_pf_vnet_active = 1;
6706 }
6707 
6708 static int
6709 pf_load(void)
6710 {
6711 	int error;
6712 
6713 	rm_init_flags(&pf_rules_lock, "pf rulesets", RM_RECURSE);
6714 	sx_init(&pf_ioctl_lock, "pf ioctl");
6715 	sx_init(&pf_end_lock, "pf end thread");
6716 
6717 	pf_mtag_initialize();
6718 
6719 	pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME);
6720 	if (pf_dev == NULL)
6721 		return (ENOMEM);
6722 
6723 	pf_end_threads = 0;
6724 	error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge");
6725 	if (error != 0)
6726 		return (error);
6727 
6728 	pfi_initialize();
6729 
6730 	return (0);
6731 }
6732 
6733 static void
6734 pf_unload_vnet(void)
6735 {
6736 	int ret __diagused;
6737 
6738 	V_pf_vnet_active = 0;
6739 	V_pf_status.running = 0;
6740 	dehook_pf();
6741 	dehook_pf_eth();
6742 
6743 	PF_RULES_WLOCK();
6744 	pf_syncookies_cleanup();
6745 	shutdown_pf();
6746 	PF_RULES_WUNLOCK();
6747 
6748 	/* Make sure we've cleaned up ethernet rules before we continue. */
6749 	NET_EPOCH_DRAIN_CALLBACKS();
6750 
6751 	ret = swi_remove(V_pf_swi_cookie);
6752 	MPASS(ret == 0);
6753 	ret = intr_event_destroy(V_pf_swi_ie);
6754 	MPASS(ret == 0);
6755 
6756 	pf_unload_vnet_purge();
6757 
6758 	pf_normalize_cleanup();
6759 	PF_RULES_WLOCK();
6760 	pfi_cleanup_vnet();
6761 	PF_RULES_WUNLOCK();
6762 	pfr_cleanup();
6763 	pf_osfp_flush();
6764 	pf_cleanup();
6765 	if (IS_DEFAULT_VNET(curvnet))
6766 		pf_mtag_cleanup();
6767 
6768 	pf_cleanup_tagset(&V_pf_tags);
6769 #ifdef ALTQ
6770 	pf_cleanup_tagset(&V_pf_qids);
6771 #endif
6772 	uma_zdestroy(V_pf_tag_z);
6773 
6774 #ifdef PF_WANT_32_TO_64_COUNTER
6775 	PF_RULES_WLOCK();
6776 	LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist);
6777 
6778 	MPASS(LIST_EMPTY(&V_pf_allkiflist));
6779 	MPASS(V_pf_allkifcount == 0);
6780 
6781 	LIST_REMOVE(&V_pf_default_rule, allrulelist);
6782 	V_pf_allrulecount--;
6783 	LIST_REMOVE(V_pf_rulemarker, allrulelist);
6784 
6785 	/*
6786 	 * There are known pf rule leaks when running the test suite.
6787 	 */
6788 #ifdef notyet
6789 	MPASS(LIST_EMPTY(&V_pf_allrulelist));
6790 	MPASS(V_pf_allrulecount == 0);
6791 #endif
6792 
6793 	PF_RULES_WUNLOCK();
6794 
6795 	free(V_pf_kifmarker, PFI_MTYPE);
6796 	free(V_pf_rulemarker, M_PFRULE);
6797 #endif
6798 
6799 	/* Free counters last as we updated them during shutdown. */
6800 	pf_counter_u64_deinit(&V_pf_default_rule.evaluations);
6801 	for (int i = 0; i < 2; i++) {
6802 		pf_counter_u64_deinit(&V_pf_default_rule.packets[i]);
6803 		pf_counter_u64_deinit(&V_pf_default_rule.bytes[i]);
6804 	}
6805 	counter_u64_free(V_pf_default_rule.states_cur);
6806 	counter_u64_free(V_pf_default_rule.states_tot);
6807 	counter_u64_free(V_pf_default_rule.src_nodes);
6808 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, V_pf_default_rule.timestamp);
6809 
6810 	for (int i = 0; i < PFRES_MAX; i++)
6811 		counter_u64_free(V_pf_status.counters[i]);
6812 	for (int i = 0; i < KLCNT_MAX; i++)
6813 		counter_u64_free(V_pf_status.lcounters[i]);
6814 	for (int i = 0; i < FCNT_MAX; i++)
6815 		pf_counter_u64_deinit(&V_pf_status.fcounters[i]);
6816 	for (int i = 0; i < SCNT_MAX; i++)
6817 		counter_u64_free(V_pf_status.scounters[i]);
6818 }
6819 
6820 static void
6821 pf_unload(void)
6822 {
6823 
6824 	sx_xlock(&pf_end_lock);
6825 	pf_end_threads = 1;
6826 	while (pf_end_threads < 2) {
6827 		wakeup_one(pf_purge_thread);
6828 		sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0);
6829 	}
6830 	sx_xunlock(&pf_end_lock);
6831 
6832 	if (pf_dev != NULL)
6833 		destroy_dev(pf_dev);
6834 
6835 	pfi_cleanup();
6836 
6837 	rm_destroy(&pf_rules_lock);
6838 	sx_destroy(&pf_ioctl_lock);
6839 	sx_destroy(&pf_end_lock);
6840 }
6841 
6842 static void
6843 vnet_pf_init(void *unused __unused)
6844 {
6845 
6846 	pf_load_vnet();
6847 }
6848 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
6849     vnet_pf_init, NULL);
6850 
6851 static void
6852 vnet_pf_uninit(const void *unused __unused)
6853 {
6854 
6855 	pf_unload_vnet();
6856 }
6857 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL);
6858 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
6859     vnet_pf_uninit, NULL);
6860 
6861 static int
6862 pf_modevent(module_t mod, int type, void *data)
6863 {
6864 	int error = 0;
6865 
6866 	switch(type) {
6867 	case MOD_LOAD:
6868 		error = pf_load();
6869 		break;
6870 	case MOD_UNLOAD:
6871 		/* Handled in SYSUNINIT(pf_unload) to ensure it's done after
6872 		 * the vnet_pf_uninit()s */
6873 		break;
6874 	default:
6875 		error = EINVAL;
6876 		break;
6877 	}
6878 
6879 	return (error);
6880 }
6881 
6882 static moduledata_t pf_mod = {
6883 	"pf",
6884 	pf_modevent,
6885 	0
6886 };
6887 
6888 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND);
6889 MODULE_VERSION(pf, PF_MODVER);
6890