xref: /freebsd/sys/netpfil/pf/pf_ioctl.c (revision 4b9d6057)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2001 Daniel Hartmeier
5  * Copyright (c) 2002,2003 Henning Brauer
6  * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  *    - Redistributions of source code must retain the above copyright
14  *      notice, this list of conditions and the following disclaimer.
15  *    - Redistributions in binary form must reproduce the above
16  *      copyright notice, this list of conditions and the following
17  *      disclaimer in the documentation and/or other materials provided
18  *      with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Effort sponsored in part by the Defense Advanced Research Projects
34  * Agency (DARPA) and Air Force Research Laboratory, Air Force
35  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36  *
37  *	$OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $
38  */
39 
40 #include <sys/cdefs.h>
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
43 #include "opt_bpf.h"
44 #include "opt_pf.h"
45 
46 #include <sys/param.h>
47 #include <sys/_bitset.h>
48 #include <sys/bitset.h>
49 #include <sys/bus.h>
50 #include <sys/conf.h>
51 #include <sys/endian.h>
52 #include <sys/fcntl.h>
53 #include <sys/filio.h>
54 #include <sys/hash.h>
55 #include <sys/interrupt.h>
56 #include <sys/jail.h>
57 #include <sys/kernel.h>
58 #include <sys/kthread.h>
59 #include <sys/lock.h>
60 #include <sys/mbuf.h>
61 #include <sys/module.h>
62 #include <sys/nv.h>
63 #include <sys/proc.h>
64 #include <sys/sdt.h>
65 #include <sys/smp.h>
66 #include <sys/socket.h>
67 #include <sys/sysctl.h>
68 #include <sys/md5.h>
69 #include <sys/ucred.h>
70 
71 #include <net/if.h>
72 #include <net/if_var.h>
73 #include <net/if_private.h>
74 #include <net/vnet.h>
75 #include <net/route.h>
76 #include <net/pfil.h>
77 #include <net/pfvar.h>
78 #include <net/if_pfsync.h>
79 #include <net/if_pflog.h>
80 
81 #include <netinet/in.h>
82 #include <netinet/ip.h>
83 #include <netinet/ip_var.h>
84 #include <netinet6/ip6_var.h>
85 #include <netinet/ip_icmp.h>
86 #include <netpfil/pf/pf_nl.h>
87 #include <netpfil/pf/pf_nv.h>
88 
89 #ifdef INET6
90 #include <netinet/ip6.h>
91 #endif /* INET6 */
92 
93 #ifdef ALTQ
94 #include <net/altq/altq.h>
95 #endif
96 
97 SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int");
98 SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int");
99 SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int");
100 SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int");
101 
102 static struct pf_kpool	*pf_get_kpool(const char *, u_int32_t, u_int8_t,
103 			    u_int32_t, u_int8_t, u_int8_t, u_int8_t);
104 
105 static void		 pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *);
106 static void		 pf_empty_kpool(struct pf_kpalist *);
107 static int		 pfioctl(struct cdev *, u_long, caddr_t, int,
108 			    struct thread *);
109 static int		 pf_begin_eth(uint32_t *, const char *);
110 static void		 pf_rollback_eth_cb(struct epoch_context *);
111 static int		 pf_rollback_eth(uint32_t, const char *);
112 static int		 pf_commit_eth(uint32_t, const char *);
113 static void		 pf_free_eth_rule(struct pf_keth_rule *);
114 #ifdef ALTQ
115 static int		 pf_begin_altq(u_int32_t *);
116 static int		 pf_rollback_altq(u_int32_t);
117 static int		 pf_commit_altq(u_int32_t);
118 static int		 pf_enable_altq(struct pf_altq *);
119 static int		 pf_disable_altq(struct pf_altq *);
120 static uint16_t		 pf_qname2qid(const char *);
121 static void		 pf_qid_unref(uint16_t);
122 #endif /* ALTQ */
123 static int		 pf_begin_rules(u_int32_t *, int, const char *);
124 static int		 pf_rollback_rules(u_int32_t, int, char *);
125 static int		 pf_setup_pfsync_matching(struct pf_kruleset *);
126 static void		 pf_hash_rule_rolling(MD5_CTX *, struct pf_krule *);
127 static void		 pf_hash_rule(struct pf_krule *);
128 static void		 pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
129 static int		 pf_commit_rules(u_int32_t, int, char *);
130 static int		 pf_addr_setup(struct pf_kruleset *,
131 			    struct pf_addr_wrap *, sa_family_t);
132 static void		 pf_addr_copyout(struct pf_addr_wrap *);
133 static void		 pf_src_node_copy(const struct pf_ksrc_node *,
134 			    struct pf_src_node *);
135 #ifdef ALTQ
136 static int		 pf_export_kaltq(struct pf_altq *,
137 			    struct pfioc_altq_v1 *, size_t);
138 static int		 pf_import_kaltq(struct pfioc_altq_v1 *,
139 			    struct pf_altq *, size_t);
140 #endif /* ALTQ */
141 
142 VNET_DEFINE(struct pf_krule,	pf_default_rule);
143 
144 static __inline int             pf_krule_compare(struct pf_krule *,
145 				    struct pf_krule *);
146 
147 RB_GENERATE(pf_krule_global, pf_krule, entry_global, pf_krule_compare);
148 
149 #ifdef ALTQ
150 VNET_DEFINE_STATIC(int,		pf_altq_running);
151 #define	V_pf_altq_running	VNET(pf_altq_running)
152 #endif
153 
154 #define	TAGID_MAX	 50000
155 struct pf_tagname {
156 	TAILQ_ENTRY(pf_tagname)	namehash_entries;
157 	TAILQ_ENTRY(pf_tagname)	taghash_entries;
158 	char			name[PF_TAG_NAME_SIZE];
159 	uint16_t		tag;
160 	int			ref;
161 };
162 
163 struct pf_tagset {
164 	TAILQ_HEAD(, pf_tagname)	*namehash;
165 	TAILQ_HEAD(, pf_tagname)	*taghash;
166 	unsigned int			 mask;
167 	uint32_t			 seed;
168 	BITSET_DEFINE(, TAGID_MAX)	 avail;
169 };
170 
171 VNET_DEFINE(struct pf_tagset, pf_tags);
172 #define	V_pf_tags	VNET(pf_tags)
173 static unsigned int	pf_rule_tag_hashsize;
174 #define	PF_RULE_TAG_HASH_SIZE_DEFAULT	128
175 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN,
176     &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT,
177     "Size of pf(4) rule tag hashtable");
178 
179 #ifdef ALTQ
180 VNET_DEFINE(struct pf_tagset, pf_qids);
181 #define	V_pf_qids	VNET(pf_qids)
182 static unsigned int	pf_queue_tag_hashsize;
183 #define	PF_QUEUE_TAG_HASH_SIZE_DEFAULT	128
184 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN,
185     &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT,
186     "Size of pf(4) queue tag hashtable");
187 #endif
188 VNET_DEFINE(uma_zone_t,	 pf_tag_z);
189 #define	V_pf_tag_z		 VNET(pf_tag_z)
190 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db");
191 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules");
192 
193 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
194 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
195 #endif
196 
197 VNET_DEFINE_STATIC(bool, pf_filter_local) = false;
198 #define V_pf_filter_local	VNET(pf_filter_local)
199 SYSCTL_BOOL(_net_pf, OID_AUTO, filter_local, CTLFLAG_VNET | CTLFLAG_RW,
200     &VNET_NAME(pf_filter_local), false,
201     "Enable filtering for packets delivered to local network stack");
202 
203 #ifdef PF_DEFAULT_TO_DROP
204 VNET_DEFINE_STATIC(bool, default_to_drop) = true;
205 #else
206 VNET_DEFINE_STATIC(bool, default_to_drop);
207 #endif
208 #define	V_default_to_drop VNET(default_to_drop)
209 SYSCTL_BOOL(_net_pf, OID_AUTO, default_to_drop, CTLFLAG_RDTUN | CTLFLAG_VNET,
210     &VNET_NAME(default_to_drop), false,
211     "Make the default rule drop all packets.");
212 
213 static void		 pf_init_tagset(struct pf_tagset *, unsigned int *,
214 			    unsigned int);
215 static void		 pf_cleanup_tagset(struct pf_tagset *);
216 static uint16_t		 tagname2hashindex(const struct pf_tagset *, const char *);
217 static uint16_t		 tag2hashindex(const struct pf_tagset *, uint16_t);
218 static u_int16_t	 tagname2tag(struct pf_tagset *, const char *);
219 static u_int16_t	 pf_tagname2tag(const char *);
220 static void		 tag_unref(struct pf_tagset *, u_int16_t);
221 
222 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
223 
224 struct cdev *pf_dev;
225 
226 /*
227  * XXX - These are new and need to be checked when moveing to a new version
228  */
229 static void		 pf_clear_all_states(void);
230 static unsigned int	 pf_clear_states(const struct pf_kstate_kill *);
231 static void		 pf_killstates(struct pf_kstate_kill *,
232 			    unsigned int *);
233 static int		 pf_killstates_row(struct pf_kstate_kill *,
234 			    struct pf_idhash *);
235 static int		 pf_killstates_nv(struct pfioc_nv *);
236 static int		 pf_clearstates_nv(struct pfioc_nv *);
237 static int		 pf_getstate(struct pfioc_nv *);
238 static int		 pf_getstatus(struct pfioc_nv *);
239 static int		 pf_clear_tables(void);
240 static void		 pf_clear_srcnodes(struct pf_ksrc_node *);
241 static void		 pf_kill_srcnodes(struct pfioc_src_node_kill *);
242 static int		 pf_keepcounters(struct pfioc_nv *);
243 static void		 pf_tbladdr_copyout(struct pf_addr_wrap *);
244 
245 /*
246  * Wrapper functions for pfil(9) hooks
247  */
248 static pfil_return_t pf_eth_check_in(struct mbuf **m, struct ifnet *ifp,
249     int flags, void *ruleset __unused, struct inpcb *inp);
250 static pfil_return_t pf_eth_check_out(struct mbuf **m, struct ifnet *ifp,
251     int flags, void *ruleset __unused, struct inpcb *inp);
252 #ifdef INET
253 static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp,
254     int flags, void *ruleset __unused, struct inpcb *inp);
255 static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp,
256     int flags, void *ruleset __unused, struct inpcb *inp);
257 #endif
258 #ifdef INET6
259 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp,
260     int flags, void *ruleset __unused, struct inpcb *inp);
261 static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp,
262     int flags, void *ruleset __unused, struct inpcb *inp);
263 #endif
264 
265 static void		hook_pf_eth(void);
266 static void		hook_pf(void);
267 static void		dehook_pf_eth(void);
268 static void		dehook_pf(void);
269 static int		shutdown_pf(void);
270 static int		pf_load(void);
271 static void		pf_unload(void);
272 
273 static struct cdevsw pf_cdevsw = {
274 	.d_ioctl =	pfioctl,
275 	.d_name =	PF_NAME,
276 	.d_version =	D_VERSION,
277 };
278 
279 VNET_DEFINE_STATIC(bool, pf_pfil_hooked);
280 #define V_pf_pfil_hooked	VNET(pf_pfil_hooked)
281 VNET_DEFINE_STATIC(bool, pf_pfil_eth_hooked);
282 #define V_pf_pfil_eth_hooked	VNET(pf_pfil_eth_hooked)
283 
284 /*
285  * We need a flag that is neither hooked nor running to know when
286  * the VNET is "valid".  We primarily need this to control (global)
287  * external event, e.g., eventhandlers.
288  */
289 VNET_DEFINE(int, pf_vnet_active);
290 #define V_pf_vnet_active	VNET(pf_vnet_active)
291 
292 int pf_end_threads;
293 struct proc *pf_purge_proc;
294 
295 VNET_DEFINE(struct rmlock, pf_rules_lock);
296 VNET_DEFINE_STATIC(struct sx, pf_ioctl_lock);
297 #define	V_pf_ioctl_lock		VNET(pf_ioctl_lock)
298 struct sx			pf_end_lock;
299 
300 /* pfsync */
301 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr);
302 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr);
303 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr);
304 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr);
305 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr);
306 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr);
307 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr;
308 
309 /* pflog */
310 pflog_packet_t			*pflog_packet_ptr = NULL;
311 
312 /*
313  * Copy a user-provided string, returning an error if truncation would occur.
314  * Avoid scanning past "sz" bytes in the source string since there's no
315  * guarantee that it's nul-terminated.
316  */
317 static int
318 pf_user_strcpy(char *dst, const char *src, size_t sz)
319 {
320 	if (strnlen(src, sz) == sz)
321 		return (EINVAL);
322 	(void)strlcpy(dst, src, sz);
323 	return (0);
324 }
325 
326 static void
327 pfattach_vnet(void)
328 {
329 	u_int32_t *my_timeout = V_pf_default_rule.timeout;
330 
331 	bzero(&V_pf_status, sizeof(V_pf_status));
332 
333 	pf_initialize();
334 	pfr_initialize();
335 	pfi_initialize_vnet();
336 	pf_normalize_init();
337 	pf_syncookies_init();
338 
339 	V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
340 	V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
341 
342 	RB_INIT(&V_pf_anchors);
343 	pf_init_kruleset(&pf_main_ruleset);
344 
345 	pf_init_keth(V_pf_keth);
346 
347 	/* default rule should never be garbage collected */
348 	V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next;
349 	V_pf_default_rule.action = V_default_to_drop ? PF_DROP : PF_PASS;
350 	V_pf_default_rule.nr = -1;
351 	V_pf_default_rule.rtableid = -1;
352 
353 	pf_counter_u64_init(&V_pf_default_rule.evaluations, M_WAITOK);
354 	for (int i = 0; i < 2; i++) {
355 		pf_counter_u64_init(&V_pf_default_rule.packets[i], M_WAITOK);
356 		pf_counter_u64_init(&V_pf_default_rule.bytes[i], M_WAITOK);
357 	}
358 	V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK);
359 	V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK);
360 	V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK);
361 
362 	V_pf_default_rule.timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
363 	    M_WAITOK | M_ZERO);
364 
365 #ifdef PF_WANT_32_TO_64_COUNTER
366 	V_pf_kifmarker = malloc(sizeof(*V_pf_kifmarker), PFI_MTYPE, M_WAITOK | M_ZERO);
367 	V_pf_rulemarker = malloc(sizeof(*V_pf_rulemarker), M_PFRULE, M_WAITOK | M_ZERO);
368 	PF_RULES_WLOCK();
369 	LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist);
370 	LIST_INSERT_HEAD(&V_pf_allrulelist, &V_pf_default_rule, allrulelist);
371 	V_pf_allrulecount++;
372 	LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist);
373 	PF_RULES_WUNLOCK();
374 #endif
375 
376 	/* initialize default timeouts */
377 	my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
378 	my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
379 	my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
380 	my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
381 	my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
382 	my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
383 	my_timeout[PFTM_SCTP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
384 	my_timeout[PFTM_SCTP_OPENING] = PFTM_TCP_OPENING_VAL;
385 	my_timeout[PFTM_SCTP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
386 	my_timeout[PFTM_SCTP_CLOSING] = PFTM_TCP_CLOSING_VAL;
387 	my_timeout[PFTM_SCTP_CLOSED] = PFTM_TCP_CLOSED_VAL;
388 	my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
389 	my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
390 	my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
391 	my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
392 	my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
393 	my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
394 	my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
395 	my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
396 	my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
397 	my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
398 	my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
399 	my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
400 	my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
401 	my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
402 
403 	V_pf_status.debug = PF_DEBUG_URGENT;
404 	/*
405 	 * XXX This is different than in OpenBSD where reassembly is enabled by
406 	 * defult. In FreeBSD we expect people to still use scrub rules and
407 	 * switch to the new syntax later. Only when they switch they must
408 	 * explicitly enable reassemle. We could change the default once the
409 	 * scrub rule functionality is hopefully removed some day in future.
410 	 */
411 	V_pf_status.reass = 0;
412 
413 	V_pf_pfil_hooked = false;
414 	V_pf_pfil_eth_hooked = false;
415 
416 	/* XXX do our best to avoid a conflict */
417 	V_pf_status.hostid = arc4random();
418 
419 	for (int i = 0; i < PFRES_MAX; i++)
420 		V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK);
421 	for (int i = 0; i < KLCNT_MAX; i++)
422 		V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK);
423 	for (int i = 0; i < FCNT_MAX; i++)
424 		pf_counter_u64_init(&V_pf_status.fcounters[i], M_WAITOK);
425 	for (int i = 0; i < SCNT_MAX; i++)
426 		V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK);
427 
428 	if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET,
429 	    INTR_MPSAFE, &V_pf_swi_cookie) != 0)
430 		/* XXXGL: leaked all above. */
431 		return;
432 }
433 
434 static struct pf_kpool *
435 pf_get_kpool(const char *anchor, u_int32_t ticket, u_int8_t rule_action,
436     u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
437     u_int8_t check_ticket)
438 {
439 	struct pf_kruleset	*ruleset;
440 	struct pf_krule		*rule;
441 	int			 rs_num;
442 
443 	ruleset = pf_find_kruleset(anchor);
444 	if (ruleset == NULL)
445 		return (NULL);
446 	rs_num = pf_get_ruleset_number(rule_action);
447 	if (rs_num >= PF_RULESET_MAX)
448 		return (NULL);
449 	if (active) {
450 		if (check_ticket && ticket !=
451 		    ruleset->rules[rs_num].active.ticket)
452 			return (NULL);
453 		if (r_last)
454 			rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
455 			    pf_krulequeue);
456 		else
457 			rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
458 	} else {
459 		if (check_ticket && ticket !=
460 		    ruleset->rules[rs_num].inactive.ticket)
461 			return (NULL);
462 		if (r_last)
463 			rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
464 			    pf_krulequeue);
465 		else
466 			rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
467 	}
468 	if (!r_last) {
469 		while ((rule != NULL) && (rule->nr != rule_number))
470 			rule = TAILQ_NEXT(rule, entries);
471 	}
472 	if (rule == NULL)
473 		return (NULL);
474 
475 	return (&rule->rpool);
476 }
477 
478 static void
479 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb)
480 {
481 	struct pf_kpooladdr	*mv_pool_pa;
482 
483 	while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
484 		TAILQ_REMOVE(poola, mv_pool_pa, entries);
485 		TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
486 	}
487 }
488 
489 static void
490 pf_empty_kpool(struct pf_kpalist *poola)
491 {
492 	struct pf_kpooladdr *pa;
493 
494 	while ((pa = TAILQ_FIRST(poola)) != NULL) {
495 		switch (pa->addr.type) {
496 		case PF_ADDR_DYNIFTL:
497 			pfi_dynaddr_remove(pa->addr.p.dyn);
498 			break;
499 		case PF_ADDR_TABLE:
500 			/* XXX: this could be unfinished pooladdr on pabuf */
501 			if (pa->addr.p.tbl != NULL)
502 				pfr_detach_table(pa->addr.p.tbl);
503 			break;
504 		}
505 		if (pa->kif)
506 			pfi_kkif_unref(pa->kif);
507 		TAILQ_REMOVE(poola, pa, entries);
508 		free(pa, M_PFRULE);
509 	}
510 }
511 
512 static void
513 pf_unlink_rule_locked(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
514 {
515 
516 	PF_RULES_WASSERT();
517 	PF_UNLNKDRULES_ASSERT();
518 
519 	TAILQ_REMOVE(rulequeue, rule, entries);
520 
521 	rule->rule_ref |= PFRULE_REFS;
522 	TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries);
523 }
524 
525 static void
526 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
527 {
528 
529 	PF_RULES_WASSERT();
530 
531 	PF_UNLNKDRULES_LOCK();
532 	pf_unlink_rule_locked(rulequeue, rule);
533 	PF_UNLNKDRULES_UNLOCK();
534 }
535 
536 static void
537 pf_free_eth_rule(struct pf_keth_rule *rule)
538 {
539 	PF_RULES_WASSERT();
540 
541 	if (rule == NULL)
542 		return;
543 
544 	if (rule->tag)
545 		tag_unref(&V_pf_tags, rule->tag);
546 	if (rule->match_tag)
547 		tag_unref(&V_pf_tags, rule->match_tag);
548 #ifdef ALTQ
549 	pf_qid_unref(rule->qid);
550 #endif
551 
552 	if (rule->bridge_to)
553 		pfi_kkif_unref(rule->bridge_to);
554 	if (rule->kif)
555 		pfi_kkif_unref(rule->kif);
556 
557 	if (rule->ipsrc.addr.type == PF_ADDR_TABLE)
558 		pfr_detach_table(rule->ipsrc.addr.p.tbl);
559 	if (rule->ipdst.addr.type == PF_ADDR_TABLE)
560 		pfr_detach_table(rule->ipdst.addr.p.tbl);
561 
562 	counter_u64_free(rule->evaluations);
563 	for (int i = 0; i < 2; i++) {
564 		counter_u64_free(rule->packets[i]);
565 		counter_u64_free(rule->bytes[i]);
566 	}
567 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
568 	pf_keth_anchor_remove(rule);
569 
570 	free(rule, M_PFRULE);
571 }
572 
573 void
574 pf_free_rule(struct pf_krule *rule)
575 {
576 
577 	PF_RULES_WASSERT();
578 	PF_CONFIG_ASSERT();
579 
580 	if (rule->tag)
581 		tag_unref(&V_pf_tags, rule->tag);
582 	if (rule->match_tag)
583 		tag_unref(&V_pf_tags, rule->match_tag);
584 #ifdef ALTQ
585 	if (rule->pqid != rule->qid)
586 		pf_qid_unref(rule->pqid);
587 	pf_qid_unref(rule->qid);
588 #endif
589 	switch (rule->src.addr.type) {
590 	case PF_ADDR_DYNIFTL:
591 		pfi_dynaddr_remove(rule->src.addr.p.dyn);
592 		break;
593 	case PF_ADDR_TABLE:
594 		pfr_detach_table(rule->src.addr.p.tbl);
595 		break;
596 	}
597 	switch (rule->dst.addr.type) {
598 	case PF_ADDR_DYNIFTL:
599 		pfi_dynaddr_remove(rule->dst.addr.p.dyn);
600 		break;
601 	case PF_ADDR_TABLE:
602 		pfr_detach_table(rule->dst.addr.p.tbl);
603 		break;
604 	}
605 	if (rule->overload_tbl)
606 		pfr_detach_table(rule->overload_tbl);
607 	if (rule->kif)
608 		pfi_kkif_unref(rule->kif);
609 	pf_kanchor_remove(rule);
610 	pf_empty_kpool(&rule->rpool.list);
611 
612 	pf_krule_free(rule);
613 }
614 
615 static void
616 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size,
617     unsigned int default_size)
618 {
619 	unsigned int i;
620 	unsigned int hashsize;
621 
622 	if (*tunable_size == 0 || !powerof2(*tunable_size))
623 		*tunable_size = default_size;
624 
625 	hashsize = *tunable_size;
626 	ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH,
627 	    M_WAITOK);
628 	ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH,
629 	    M_WAITOK);
630 	ts->mask = hashsize - 1;
631 	ts->seed = arc4random();
632 	for (i = 0; i < hashsize; i++) {
633 		TAILQ_INIT(&ts->namehash[i]);
634 		TAILQ_INIT(&ts->taghash[i]);
635 	}
636 	BIT_FILL(TAGID_MAX, &ts->avail);
637 }
638 
639 static void
640 pf_cleanup_tagset(struct pf_tagset *ts)
641 {
642 	unsigned int i;
643 	unsigned int hashsize;
644 	struct pf_tagname *t, *tmp;
645 
646 	/*
647 	 * Only need to clean up one of the hashes as each tag is hashed
648 	 * into each table.
649 	 */
650 	hashsize = ts->mask + 1;
651 	for (i = 0; i < hashsize; i++)
652 		TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp)
653 			uma_zfree(V_pf_tag_z, t);
654 
655 	free(ts->namehash, M_PFHASH);
656 	free(ts->taghash, M_PFHASH);
657 }
658 
659 static uint16_t
660 tagname2hashindex(const struct pf_tagset *ts, const char *tagname)
661 {
662 	size_t len;
663 
664 	len = strnlen(tagname, PF_TAG_NAME_SIZE - 1);
665 	return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask);
666 }
667 
668 static uint16_t
669 tag2hashindex(const struct pf_tagset *ts, uint16_t tag)
670 {
671 
672 	return (tag & ts->mask);
673 }
674 
675 static u_int16_t
676 tagname2tag(struct pf_tagset *ts, const char *tagname)
677 {
678 	struct pf_tagname	*tag;
679 	u_int32_t		 index;
680 	u_int16_t		 new_tagid;
681 
682 	PF_RULES_WASSERT();
683 
684 	index = tagname2hashindex(ts, tagname);
685 	TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries)
686 		if (strcmp(tagname, tag->name) == 0) {
687 			tag->ref++;
688 			return (tag->tag);
689 		}
690 
691 	/*
692 	 * new entry
693 	 *
694 	 * to avoid fragmentation, we do a linear search from the beginning
695 	 * and take the first free slot we find.
696 	 */
697 	new_tagid = BIT_FFS(TAGID_MAX, &ts->avail);
698 	/*
699 	 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX].
700 	 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits
701 	 * set.  It may also return a bit number greater than TAGID_MAX due
702 	 * to rounding of the number of bits in the vector up to a multiple
703 	 * of the vector word size at declaration/allocation time.
704 	 */
705 	if ((new_tagid == 0) || (new_tagid > TAGID_MAX))
706 		return (0);
707 
708 	/* Mark the tag as in use.  Bits are 0-based for BIT_CLR() */
709 	BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail);
710 
711 	/* allocate and fill new struct pf_tagname */
712 	tag = uma_zalloc(V_pf_tag_z, M_NOWAIT);
713 	if (tag == NULL)
714 		return (0);
715 	strlcpy(tag->name, tagname, sizeof(tag->name));
716 	tag->tag = new_tagid;
717 	tag->ref = 1;
718 
719 	/* Insert into namehash */
720 	TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries);
721 
722 	/* Insert into taghash */
723 	index = tag2hashindex(ts, new_tagid);
724 	TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries);
725 
726 	return (tag->tag);
727 }
728 
729 static void
730 tag_unref(struct pf_tagset *ts, u_int16_t tag)
731 {
732 	struct pf_tagname	*t;
733 	uint16_t		 index;
734 
735 	PF_RULES_WASSERT();
736 
737 	index = tag2hashindex(ts, tag);
738 	TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries)
739 		if (tag == t->tag) {
740 			if (--t->ref == 0) {
741 				TAILQ_REMOVE(&ts->taghash[index], t,
742 				    taghash_entries);
743 				index = tagname2hashindex(ts, t->name);
744 				TAILQ_REMOVE(&ts->namehash[index], t,
745 				    namehash_entries);
746 				/* Bits are 0-based for BIT_SET() */
747 				BIT_SET(TAGID_MAX, tag - 1, &ts->avail);
748 				uma_zfree(V_pf_tag_z, t);
749 			}
750 			break;
751 		}
752 }
753 
754 static uint16_t
755 pf_tagname2tag(const char *tagname)
756 {
757 	return (tagname2tag(&V_pf_tags, tagname));
758 }
759 
760 static int
761 pf_begin_eth(uint32_t *ticket, const char *anchor)
762 {
763 	struct pf_keth_rule *rule, *tmp;
764 	struct pf_keth_ruleset *rs;
765 
766 	PF_RULES_WASSERT();
767 
768 	rs = pf_find_or_create_keth_ruleset(anchor);
769 	if (rs == NULL)
770 		return (EINVAL);
771 
772 	/* Purge old inactive rules. */
773 	TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
774 	    tmp) {
775 		TAILQ_REMOVE(rs->inactive.rules, rule,
776 		    entries);
777 		pf_free_eth_rule(rule);
778 	}
779 
780 	*ticket = ++rs->inactive.ticket;
781 	rs->inactive.open = 1;
782 
783 	return (0);
784 }
785 
786 static void
787 pf_rollback_eth_cb(struct epoch_context *ctx)
788 {
789 	struct pf_keth_ruleset *rs;
790 
791 	rs = __containerof(ctx, struct pf_keth_ruleset, epoch_ctx);
792 
793 	CURVNET_SET(rs->vnet);
794 
795 	PF_RULES_WLOCK();
796 	pf_rollback_eth(rs->inactive.ticket,
797 	    rs->anchor ? rs->anchor->path : "");
798 	PF_RULES_WUNLOCK();
799 
800 	CURVNET_RESTORE();
801 }
802 
803 static int
804 pf_rollback_eth(uint32_t ticket, const char *anchor)
805 {
806 	struct pf_keth_rule *rule, *tmp;
807 	struct pf_keth_ruleset *rs;
808 
809 	PF_RULES_WASSERT();
810 
811 	rs = pf_find_keth_ruleset(anchor);
812 	if (rs == NULL)
813 		return (EINVAL);
814 
815 	if (!rs->inactive.open ||
816 	    ticket != rs->inactive.ticket)
817 		return (0);
818 
819 	/* Purge old inactive rules. */
820 	TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
821 	    tmp) {
822 		TAILQ_REMOVE(rs->inactive.rules, rule, entries);
823 		pf_free_eth_rule(rule);
824 	}
825 
826 	rs->inactive.open = 0;
827 
828 	pf_remove_if_empty_keth_ruleset(rs);
829 
830 	return (0);
831 }
832 
833 #define	PF_SET_SKIP_STEPS(i)					\
834 	do {							\
835 		while (head[i] != cur) {			\
836 			head[i]->skip[i].ptr = cur;		\
837 			head[i] = TAILQ_NEXT(head[i], entries);	\
838 		}						\
839 	} while (0)
840 
841 static void
842 pf_eth_calc_skip_steps(struct pf_keth_ruleq *rules)
843 {
844 	struct pf_keth_rule *cur, *prev, *head[PFE_SKIP_COUNT];
845 	int i;
846 
847 	cur = TAILQ_FIRST(rules);
848 	prev = cur;
849 	for (i = 0; i < PFE_SKIP_COUNT; ++i)
850 		head[i] = cur;
851 	while (cur != NULL) {
852 		if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
853 			PF_SET_SKIP_STEPS(PFE_SKIP_IFP);
854 		if (cur->direction != prev->direction)
855 			PF_SET_SKIP_STEPS(PFE_SKIP_DIR);
856 		if (cur->proto != prev->proto)
857 			PF_SET_SKIP_STEPS(PFE_SKIP_PROTO);
858 		if (memcmp(&cur->src, &prev->src, sizeof(cur->src)) != 0)
859 			PF_SET_SKIP_STEPS(PFE_SKIP_SRC_ADDR);
860 		if (memcmp(&cur->dst, &prev->dst, sizeof(cur->dst)) != 0)
861 			PF_SET_SKIP_STEPS(PFE_SKIP_DST_ADDR);
862 		if (cur->ipsrc.neg != prev->ipsrc.neg ||
863 		    pf_addr_wrap_neq(&cur->ipsrc.addr, &prev->ipsrc.addr))
864 			PF_SET_SKIP_STEPS(PFE_SKIP_SRC_IP_ADDR);
865 		if (cur->ipdst.neg != prev->ipdst.neg ||
866 		    pf_addr_wrap_neq(&cur->ipdst.addr, &prev->ipdst.addr))
867 			PF_SET_SKIP_STEPS(PFE_SKIP_DST_IP_ADDR);
868 
869 		prev = cur;
870 		cur = TAILQ_NEXT(cur, entries);
871 	}
872 	for (i = 0; i < PFE_SKIP_COUNT; ++i)
873 		PF_SET_SKIP_STEPS(i);
874 }
875 
876 static int
877 pf_commit_eth(uint32_t ticket, const char *anchor)
878 {
879 	struct pf_keth_ruleq *rules;
880 	struct pf_keth_ruleset *rs;
881 
882 	rs = pf_find_keth_ruleset(anchor);
883 	if (rs == NULL) {
884 		return (EINVAL);
885 	}
886 
887 	if (!rs->inactive.open ||
888 	    ticket != rs->inactive.ticket)
889 		return (EBUSY);
890 
891 	PF_RULES_WASSERT();
892 
893 	pf_eth_calc_skip_steps(rs->inactive.rules);
894 
895 	rules = rs->active.rules;
896 	ck_pr_store_ptr(&rs->active.rules, rs->inactive.rules);
897 	rs->inactive.rules = rules;
898 	rs->inactive.ticket = rs->active.ticket;
899 
900 	/* Clean up inactive rules (i.e. previously active rules), only when
901 	 * we're sure they're no longer used. */
902 	NET_EPOCH_CALL(pf_rollback_eth_cb, &rs->epoch_ctx);
903 
904 	return (0);
905 }
906 
907 #ifdef ALTQ
908 static uint16_t
909 pf_qname2qid(const char *qname)
910 {
911 	return (tagname2tag(&V_pf_qids, qname));
912 }
913 
914 static void
915 pf_qid_unref(uint16_t qid)
916 {
917 	tag_unref(&V_pf_qids, qid);
918 }
919 
920 static int
921 pf_begin_altq(u_int32_t *ticket)
922 {
923 	struct pf_altq	*altq, *tmp;
924 	int		 error = 0;
925 
926 	PF_RULES_WASSERT();
927 
928 	/* Purge the old altq lists */
929 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
930 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
931 			/* detach and destroy the discipline */
932 			error = altq_remove(altq);
933 		}
934 		free(altq, M_PFALTQ);
935 	}
936 	TAILQ_INIT(V_pf_altq_ifs_inactive);
937 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
938 		pf_qid_unref(altq->qid);
939 		free(altq, M_PFALTQ);
940 	}
941 	TAILQ_INIT(V_pf_altqs_inactive);
942 	if (error)
943 		return (error);
944 	*ticket = ++V_ticket_altqs_inactive;
945 	V_altqs_inactive_open = 1;
946 	return (0);
947 }
948 
949 static int
950 pf_rollback_altq(u_int32_t ticket)
951 {
952 	struct pf_altq	*altq, *tmp;
953 	int		 error = 0;
954 
955 	PF_RULES_WASSERT();
956 
957 	if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
958 		return (0);
959 	/* Purge the old altq lists */
960 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
961 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
962 			/* detach and destroy the discipline */
963 			error = altq_remove(altq);
964 		}
965 		free(altq, M_PFALTQ);
966 	}
967 	TAILQ_INIT(V_pf_altq_ifs_inactive);
968 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
969 		pf_qid_unref(altq->qid);
970 		free(altq, M_PFALTQ);
971 	}
972 	TAILQ_INIT(V_pf_altqs_inactive);
973 	V_altqs_inactive_open = 0;
974 	return (error);
975 }
976 
977 static int
978 pf_commit_altq(u_int32_t ticket)
979 {
980 	struct pf_altqqueue	*old_altqs, *old_altq_ifs;
981 	struct pf_altq		*altq, *tmp;
982 	int			 err, error = 0;
983 
984 	PF_RULES_WASSERT();
985 
986 	if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
987 		return (EBUSY);
988 
989 	/* swap altqs, keep the old. */
990 	old_altqs = V_pf_altqs_active;
991 	old_altq_ifs = V_pf_altq_ifs_active;
992 	V_pf_altqs_active = V_pf_altqs_inactive;
993 	V_pf_altq_ifs_active = V_pf_altq_ifs_inactive;
994 	V_pf_altqs_inactive = old_altqs;
995 	V_pf_altq_ifs_inactive = old_altq_ifs;
996 	V_ticket_altqs_active = V_ticket_altqs_inactive;
997 
998 	/* Attach new disciplines */
999 	TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
1000 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
1001 			/* attach the discipline */
1002 			error = altq_pfattach(altq);
1003 			if (error == 0 && V_pf_altq_running)
1004 				error = pf_enable_altq(altq);
1005 			if (error != 0)
1006 				return (error);
1007 		}
1008 	}
1009 
1010 	/* Purge the old altq lists */
1011 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
1012 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
1013 			/* detach and destroy the discipline */
1014 			if (V_pf_altq_running)
1015 				error = pf_disable_altq(altq);
1016 			err = altq_pfdetach(altq);
1017 			if (err != 0 && error == 0)
1018 				error = err;
1019 			err = altq_remove(altq);
1020 			if (err != 0 && error == 0)
1021 				error = err;
1022 		}
1023 		free(altq, M_PFALTQ);
1024 	}
1025 	TAILQ_INIT(V_pf_altq_ifs_inactive);
1026 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
1027 		pf_qid_unref(altq->qid);
1028 		free(altq, M_PFALTQ);
1029 	}
1030 	TAILQ_INIT(V_pf_altqs_inactive);
1031 
1032 	V_altqs_inactive_open = 0;
1033 	return (error);
1034 }
1035 
1036 static int
1037 pf_enable_altq(struct pf_altq *altq)
1038 {
1039 	struct ifnet		*ifp;
1040 	struct tb_profile	 tb;
1041 	int			 error = 0;
1042 
1043 	if ((ifp = ifunit(altq->ifname)) == NULL)
1044 		return (EINVAL);
1045 
1046 	if (ifp->if_snd.altq_type != ALTQT_NONE)
1047 		error = altq_enable(&ifp->if_snd);
1048 
1049 	/* set tokenbucket regulator */
1050 	if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
1051 		tb.rate = altq->ifbandwidth;
1052 		tb.depth = altq->tbrsize;
1053 		error = tbr_set(&ifp->if_snd, &tb);
1054 	}
1055 
1056 	return (error);
1057 }
1058 
1059 static int
1060 pf_disable_altq(struct pf_altq *altq)
1061 {
1062 	struct ifnet		*ifp;
1063 	struct tb_profile	 tb;
1064 	int			 error;
1065 
1066 	if ((ifp = ifunit(altq->ifname)) == NULL)
1067 		return (EINVAL);
1068 
1069 	/*
1070 	 * when the discipline is no longer referenced, it was overridden
1071 	 * by a new one.  if so, just return.
1072 	 */
1073 	if (altq->altq_disc != ifp->if_snd.altq_disc)
1074 		return (0);
1075 
1076 	error = altq_disable(&ifp->if_snd);
1077 
1078 	if (error == 0) {
1079 		/* clear tokenbucket regulator */
1080 		tb.rate = 0;
1081 		error = tbr_set(&ifp->if_snd, &tb);
1082 	}
1083 
1084 	return (error);
1085 }
1086 
1087 static int
1088 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket,
1089     struct pf_altq *altq)
1090 {
1091 	struct ifnet	*ifp1;
1092 	int		 error = 0;
1093 
1094 	/* Deactivate the interface in question */
1095 	altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED;
1096 	if ((ifp1 = ifunit(altq->ifname)) == NULL ||
1097 	    (remove && ifp1 == ifp)) {
1098 		altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
1099 	} else {
1100 		error = altq_add(ifp1, altq);
1101 
1102 		if (ticket != V_ticket_altqs_inactive)
1103 			error = EBUSY;
1104 
1105 		if (error)
1106 			free(altq, M_PFALTQ);
1107 	}
1108 
1109 	return (error);
1110 }
1111 
1112 void
1113 pf_altq_ifnet_event(struct ifnet *ifp, int remove)
1114 {
1115 	struct pf_altq	*a1, *a2, *a3;
1116 	u_int32_t	 ticket;
1117 	int		 error = 0;
1118 
1119 	/*
1120 	 * No need to re-evaluate the configuration for events on interfaces
1121 	 * that do not support ALTQ, as it's not possible for such
1122 	 * interfaces to be part of the configuration.
1123 	 */
1124 	if (!ALTQ_IS_READY(&ifp->if_snd))
1125 		return;
1126 
1127 	/* Interrupt userland queue modifications */
1128 	if (V_altqs_inactive_open)
1129 		pf_rollback_altq(V_ticket_altqs_inactive);
1130 
1131 	/* Start new altq ruleset */
1132 	if (pf_begin_altq(&ticket))
1133 		return;
1134 
1135 	/* Copy the current active set */
1136 	TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) {
1137 		a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
1138 		if (a2 == NULL) {
1139 			error = ENOMEM;
1140 			break;
1141 		}
1142 		bcopy(a1, a2, sizeof(struct pf_altq));
1143 
1144 		error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
1145 		if (error)
1146 			break;
1147 
1148 		TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries);
1149 	}
1150 	if (error)
1151 		goto out;
1152 	TAILQ_FOREACH(a1, V_pf_altqs_active, entries) {
1153 		a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
1154 		if (a2 == NULL) {
1155 			error = ENOMEM;
1156 			break;
1157 		}
1158 		bcopy(a1, a2, sizeof(struct pf_altq));
1159 
1160 		if ((a2->qid = pf_qname2qid(a2->qname)) == 0) {
1161 			error = EBUSY;
1162 			free(a2, M_PFALTQ);
1163 			break;
1164 		}
1165 		a2->altq_disc = NULL;
1166 		TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) {
1167 			if (strncmp(a3->ifname, a2->ifname,
1168 				IFNAMSIZ) == 0) {
1169 				a2->altq_disc = a3->altq_disc;
1170 				break;
1171 			}
1172 		}
1173 		error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
1174 		if (error)
1175 			break;
1176 
1177 		TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries);
1178 	}
1179 
1180 out:
1181 	if (error != 0)
1182 		pf_rollback_altq(ticket);
1183 	else
1184 		pf_commit_altq(ticket);
1185 }
1186 #endif /* ALTQ */
1187 
1188 static struct pf_krule_global *
1189 pf_rule_tree_alloc(int flags)
1190 {
1191 	struct pf_krule_global *tree;
1192 
1193 	tree = malloc(sizeof(struct pf_krule_global), M_TEMP, flags);
1194 	if (tree == NULL)
1195 		return (NULL);
1196 	RB_INIT(tree);
1197 	return (tree);
1198 }
1199 
1200 static void
1201 pf_rule_tree_free(struct pf_krule_global *tree)
1202 {
1203 
1204 	free(tree, M_TEMP);
1205 }
1206 
1207 static int
1208 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
1209 {
1210 	struct pf_krule_global *tree;
1211 	struct pf_kruleset	*rs;
1212 	struct pf_krule		*rule;
1213 
1214 	PF_RULES_WASSERT();
1215 
1216 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1217 		return (EINVAL);
1218 	tree = pf_rule_tree_alloc(M_NOWAIT);
1219 	if (tree == NULL)
1220 		return (ENOMEM);
1221 	rs = pf_find_or_create_kruleset(anchor);
1222 	if (rs == NULL) {
1223 		free(tree, M_TEMP);
1224 		return (EINVAL);
1225 	}
1226 	pf_rule_tree_free(rs->rules[rs_num].inactive.tree);
1227 	rs->rules[rs_num].inactive.tree = tree;
1228 
1229 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1230 		pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
1231 		rs->rules[rs_num].inactive.rcount--;
1232 	}
1233 	*ticket = ++rs->rules[rs_num].inactive.ticket;
1234 	rs->rules[rs_num].inactive.open = 1;
1235 	return (0);
1236 }
1237 
1238 static int
1239 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
1240 {
1241 	struct pf_kruleset	*rs;
1242 	struct pf_krule		*rule;
1243 
1244 	PF_RULES_WASSERT();
1245 
1246 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1247 		return (EINVAL);
1248 	rs = pf_find_kruleset(anchor);
1249 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1250 	    rs->rules[rs_num].inactive.ticket != ticket)
1251 		return (0);
1252 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1253 		pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
1254 		rs->rules[rs_num].inactive.rcount--;
1255 	}
1256 	rs->rules[rs_num].inactive.open = 0;
1257 	return (0);
1258 }
1259 
1260 #define PF_MD5_UPD(st, elm)						\
1261 		MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
1262 
1263 #define PF_MD5_UPD_STR(st, elm)						\
1264 		MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
1265 
1266 #define PF_MD5_UPD_HTONL(st, elm, stor) do {				\
1267 		(stor) = htonl((st)->elm);				\
1268 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
1269 } while (0)
1270 
1271 #define PF_MD5_UPD_HTONS(st, elm, stor) do {				\
1272 		(stor) = htons((st)->elm);				\
1273 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
1274 } while (0)
1275 
1276 static void
1277 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
1278 {
1279 	PF_MD5_UPD(pfr, addr.type);
1280 	switch (pfr->addr.type) {
1281 		case PF_ADDR_DYNIFTL:
1282 			PF_MD5_UPD(pfr, addr.v.ifname);
1283 			PF_MD5_UPD(pfr, addr.iflags);
1284 			break;
1285 		case PF_ADDR_TABLE:
1286 			PF_MD5_UPD(pfr, addr.v.tblname);
1287 			break;
1288 		case PF_ADDR_ADDRMASK:
1289 			/* XXX ignore af? */
1290 			PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
1291 			PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
1292 			break;
1293 	}
1294 
1295 	PF_MD5_UPD(pfr, port[0]);
1296 	PF_MD5_UPD(pfr, port[1]);
1297 	PF_MD5_UPD(pfr, neg);
1298 	PF_MD5_UPD(pfr, port_op);
1299 }
1300 
1301 static void
1302 pf_hash_rule_rolling(MD5_CTX *ctx, struct pf_krule *rule)
1303 {
1304 	u_int16_t x;
1305 	u_int32_t y;
1306 
1307 	pf_hash_rule_addr(ctx, &rule->src);
1308 	pf_hash_rule_addr(ctx, &rule->dst);
1309 	for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++)
1310 		PF_MD5_UPD_STR(rule, label[i]);
1311 	PF_MD5_UPD_STR(rule, ifname);
1312 	PF_MD5_UPD_STR(rule, match_tagname);
1313 	PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1314 	PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1315 	PF_MD5_UPD_HTONL(rule, prob, y);
1316 	PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1317 	PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1318 	PF_MD5_UPD(rule, uid.op);
1319 	PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1320 	PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1321 	PF_MD5_UPD(rule, gid.op);
1322 	PF_MD5_UPD_HTONL(rule, rule_flag, y);
1323 	PF_MD5_UPD(rule, action);
1324 	PF_MD5_UPD(rule, direction);
1325 	PF_MD5_UPD(rule, af);
1326 	PF_MD5_UPD(rule, quick);
1327 	PF_MD5_UPD(rule, ifnot);
1328 	PF_MD5_UPD(rule, match_tag_not);
1329 	PF_MD5_UPD(rule, natpass);
1330 	PF_MD5_UPD(rule, keep_state);
1331 	PF_MD5_UPD(rule, proto);
1332 	PF_MD5_UPD(rule, type);
1333 	PF_MD5_UPD(rule, code);
1334 	PF_MD5_UPD(rule, flags);
1335 	PF_MD5_UPD(rule, flagset);
1336 	PF_MD5_UPD(rule, allow_opts);
1337 	PF_MD5_UPD(rule, rt);
1338 	PF_MD5_UPD(rule, tos);
1339 	PF_MD5_UPD(rule, scrub_flags);
1340 	PF_MD5_UPD(rule, min_ttl);
1341 	PF_MD5_UPD(rule, set_tos);
1342 	if (rule->anchor != NULL)
1343 		PF_MD5_UPD_STR(rule, anchor->path);
1344 }
1345 
1346 static void
1347 pf_hash_rule(struct pf_krule *rule)
1348 {
1349 	MD5_CTX		ctx;
1350 
1351 	MD5Init(&ctx);
1352 	pf_hash_rule_rolling(&ctx, rule);
1353 	MD5Final(rule->md5sum, &ctx);
1354 }
1355 
1356 static int
1357 pf_krule_compare(struct pf_krule *a, struct pf_krule *b)
1358 {
1359 
1360 	return (memcmp(a->md5sum, b->md5sum, PF_MD5_DIGEST_LENGTH));
1361 }
1362 
1363 static int
1364 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1365 {
1366 	struct pf_kruleset	*rs;
1367 	struct pf_krule		*rule, **old_array, *old_rule;
1368 	struct pf_krulequeue	*old_rules;
1369 	struct pf_krule_global  *old_tree;
1370 	int			 error;
1371 	u_int32_t		 old_rcount;
1372 
1373 	PF_RULES_WASSERT();
1374 
1375 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1376 		return (EINVAL);
1377 	rs = pf_find_kruleset(anchor);
1378 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1379 	    ticket != rs->rules[rs_num].inactive.ticket)
1380 		return (EBUSY);
1381 
1382 	/* Calculate checksum for the main ruleset */
1383 	if (rs == &pf_main_ruleset) {
1384 		error = pf_setup_pfsync_matching(rs);
1385 		if (error != 0)
1386 			return (error);
1387 	}
1388 
1389 	/* Swap rules, keep the old. */
1390 	old_rules = rs->rules[rs_num].active.ptr;
1391 	old_rcount = rs->rules[rs_num].active.rcount;
1392 	old_array = rs->rules[rs_num].active.ptr_array;
1393 	old_tree = rs->rules[rs_num].active.tree;
1394 
1395 	rs->rules[rs_num].active.ptr =
1396 	    rs->rules[rs_num].inactive.ptr;
1397 	rs->rules[rs_num].active.ptr_array =
1398 	    rs->rules[rs_num].inactive.ptr_array;
1399 	rs->rules[rs_num].active.tree =
1400 	    rs->rules[rs_num].inactive.tree;
1401 	rs->rules[rs_num].active.rcount =
1402 	    rs->rules[rs_num].inactive.rcount;
1403 
1404 	/* Attempt to preserve counter information. */
1405 	if (V_pf_status.keep_counters && old_tree != NULL) {
1406 		TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr,
1407 		    entries) {
1408 			old_rule = RB_FIND(pf_krule_global, old_tree, rule);
1409 			if (old_rule == NULL) {
1410 				continue;
1411 			}
1412 			pf_counter_u64_critical_enter();
1413 			pf_counter_u64_add_protected(&rule->evaluations,
1414 			    pf_counter_u64_fetch(&old_rule->evaluations));
1415 			pf_counter_u64_add_protected(&rule->packets[0],
1416 			    pf_counter_u64_fetch(&old_rule->packets[0]));
1417 			pf_counter_u64_add_protected(&rule->packets[1],
1418 			    pf_counter_u64_fetch(&old_rule->packets[1]));
1419 			pf_counter_u64_add_protected(&rule->bytes[0],
1420 			    pf_counter_u64_fetch(&old_rule->bytes[0]));
1421 			pf_counter_u64_add_protected(&rule->bytes[1],
1422 			    pf_counter_u64_fetch(&old_rule->bytes[1]));
1423 			pf_counter_u64_critical_exit();
1424 		}
1425 	}
1426 
1427 	rs->rules[rs_num].inactive.ptr = old_rules;
1428 	rs->rules[rs_num].inactive.ptr_array = old_array;
1429 	rs->rules[rs_num].inactive.tree = NULL; /* important for pf_ioctl_addrule */
1430 	rs->rules[rs_num].inactive.rcount = old_rcount;
1431 
1432 	rs->rules[rs_num].active.ticket =
1433 	    rs->rules[rs_num].inactive.ticket;
1434 	pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1435 
1436 	/* Purge the old rule list. */
1437 	PF_UNLNKDRULES_LOCK();
1438 	while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1439 		pf_unlink_rule_locked(old_rules, rule);
1440 	PF_UNLNKDRULES_UNLOCK();
1441 	if (rs->rules[rs_num].inactive.ptr_array)
1442 		free(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
1443 	rs->rules[rs_num].inactive.ptr_array = NULL;
1444 	rs->rules[rs_num].inactive.rcount = 0;
1445 	rs->rules[rs_num].inactive.open = 0;
1446 	pf_remove_if_empty_kruleset(rs);
1447 	free(old_tree, M_TEMP);
1448 
1449 	return (0);
1450 }
1451 
1452 static int
1453 pf_setup_pfsync_matching(struct pf_kruleset *rs)
1454 {
1455 	MD5_CTX			 ctx;
1456 	struct pf_krule		*rule;
1457 	int			 rs_cnt;
1458 	u_int8_t		 digest[PF_MD5_DIGEST_LENGTH];
1459 
1460 	MD5Init(&ctx);
1461 	for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1462 		/* XXX PF_RULESET_SCRUB as well? */
1463 		if (rs_cnt == PF_RULESET_SCRUB)
1464 			continue;
1465 
1466 		if (rs->rules[rs_cnt].inactive.ptr_array)
1467 			free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
1468 		rs->rules[rs_cnt].inactive.ptr_array = NULL;
1469 
1470 		if (rs->rules[rs_cnt].inactive.rcount) {
1471 			rs->rules[rs_cnt].inactive.ptr_array =
1472 			    mallocarray(rs->rules[rs_cnt].inactive.rcount,
1473 			    sizeof(struct pf_rule **),
1474 			    M_TEMP, M_NOWAIT);
1475 
1476 			if (!rs->rules[rs_cnt].inactive.ptr_array)
1477 				return (ENOMEM);
1478 		}
1479 
1480 		TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1481 		    entries) {
1482 			pf_hash_rule_rolling(&ctx, rule);
1483 			(rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1484 		}
1485 	}
1486 
1487 	MD5Final(digest, &ctx);
1488 	memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum));
1489 	return (0);
1490 }
1491 
1492 static int
1493 pf_eth_addr_setup(struct pf_keth_ruleset *ruleset, struct pf_addr_wrap *addr)
1494 {
1495 	int error = 0;
1496 
1497 	switch (addr->type) {
1498 	case PF_ADDR_TABLE:
1499 		addr->p.tbl = pfr_eth_attach_table(ruleset, addr->v.tblname);
1500 		if (addr->p.tbl == NULL)
1501 			error = ENOMEM;
1502 		break;
1503 	default:
1504 		error = EINVAL;
1505 	}
1506 
1507 	return (error);
1508 }
1509 
1510 static int
1511 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr,
1512     sa_family_t af)
1513 {
1514 	int error = 0;
1515 
1516 	switch (addr->type) {
1517 	case PF_ADDR_TABLE:
1518 		addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname);
1519 		if (addr->p.tbl == NULL)
1520 			error = ENOMEM;
1521 		break;
1522 	case PF_ADDR_DYNIFTL:
1523 		error = pfi_dynaddr_setup(addr, af);
1524 		break;
1525 	}
1526 
1527 	return (error);
1528 }
1529 
1530 static void
1531 pf_addr_copyout(struct pf_addr_wrap *addr)
1532 {
1533 
1534 	switch (addr->type) {
1535 	case PF_ADDR_DYNIFTL:
1536 		pfi_dynaddr_copyout(addr);
1537 		break;
1538 	case PF_ADDR_TABLE:
1539 		pf_tbladdr_copyout(addr);
1540 		break;
1541 	}
1542 }
1543 
1544 static void
1545 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out)
1546 {
1547 	int	secs = time_uptime, diff;
1548 
1549 	bzero(out, sizeof(struct pf_src_node));
1550 
1551 	bcopy(&in->addr, &out->addr, sizeof(struct pf_addr));
1552 	bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr));
1553 
1554 	if (in->rule.ptr != NULL)
1555 		out->rule.nr = in->rule.ptr->nr;
1556 
1557 	for (int i = 0; i < 2; i++) {
1558 		out->bytes[i] = counter_u64_fetch(in->bytes[i]);
1559 		out->packets[i] = counter_u64_fetch(in->packets[i]);
1560 	}
1561 
1562 	out->states = in->states;
1563 	out->conn = in->conn;
1564 	out->af = in->af;
1565 	out->ruletype = in->ruletype;
1566 
1567 	out->creation = secs - in->creation;
1568 	if (out->expire > secs)
1569 		out->expire -= secs;
1570 	else
1571 		out->expire = 0;
1572 
1573 	/* Adjust the connection rate estimate. */
1574 	diff = secs - in->conn_rate.last;
1575 	if (diff >= in->conn_rate.seconds)
1576 		out->conn_rate.count = 0;
1577 	else
1578 		out->conn_rate.count -=
1579 		    in->conn_rate.count * diff /
1580 		    in->conn_rate.seconds;
1581 }
1582 
1583 #ifdef ALTQ
1584 /*
1585  * Handle export of struct pf_kaltq to user binaries that may be using any
1586  * version of struct pf_altq.
1587  */
1588 static int
1589 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size)
1590 {
1591 	u_int32_t version;
1592 
1593 	if (ioc_size == sizeof(struct pfioc_altq_v0))
1594 		version = 0;
1595 	else
1596 		version = pa->version;
1597 
1598 	if (version > PFIOC_ALTQ_VERSION)
1599 		return (EINVAL);
1600 
1601 #define ASSIGN(x) exported_q->x = q->x
1602 #define COPY(x) \
1603 	bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x)))
1604 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX)
1605 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX)
1606 
1607 	switch (version) {
1608 	case 0: {
1609 		struct pf_altq_v0 *exported_q =
1610 		    &((struct pfioc_altq_v0 *)pa)->altq;
1611 
1612 		COPY(ifname);
1613 
1614 		ASSIGN(scheduler);
1615 		ASSIGN(tbrsize);
1616 		exported_q->tbrsize = SATU16(q->tbrsize);
1617 		exported_q->ifbandwidth = SATU32(q->ifbandwidth);
1618 
1619 		COPY(qname);
1620 		COPY(parent);
1621 		ASSIGN(parent_qid);
1622 		exported_q->bandwidth = SATU32(q->bandwidth);
1623 		ASSIGN(priority);
1624 		ASSIGN(local_flags);
1625 
1626 		ASSIGN(qlimit);
1627 		ASSIGN(flags);
1628 
1629 		if (q->scheduler == ALTQT_HFSC) {
1630 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x
1631 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \
1632 			    SATU32(q->pq_u.hfsc_opts.x)
1633 
1634 			ASSIGN_OPT_SATU32(rtsc_m1);
1635 			ASSIGN_OPT(rtsc_d);
1636 			ASSIGN_OPT_SATU32(rtsc_m2);
1637 
1638 			ASSIGN_OPT_SATU32(lssc_m1);
1639 			ASSIGN_OPT(lssc_d);
1640 			ASSIGN_OPT_SATU32(lssc_m2);
1641 
1642 			ASSIGN_OPT_SATU32(ulsc_m1);
1643 			ASSIGN_OPT(ulsc_d);
1644 			ASSIGN_OPT_SATU32(ulsc_m2);
1645 
1646 			ASSIGN_OPT(flags);
1647 
1648 #undef ASSIGN_OPT
1649 #undef ASSIGN_OPT_SATU32
1650 		} else
1651 			COPY(pq_u);
1652 
1653 		ASSIGN(qid);
1654 		break;
1655 	}
1656 	case 1:	{
1657 		struct pf_altq_v1 *exported_q =
1658 		    &((struct pfioc_altq_v1 *)pa)->altq;
1659 
1660 		COPY(ifname);
1661 
1662 		ASSIGN(scheduler);
1663 		ASSIGN(tbrsize);
1664 		ASSIGN(ifbandwidth);
1665 
1666 		COPY(qname);
1667 		COPY(parent);
1668 		ASSIGN(parent_qid);
1669 		ASSIGN(bandwidth);
1670 		ASSIGN(priority);
1671 		ASSIGN(local_flags);
1672 
1673 		ASSIGN(qlimit);
1674 		ASSIGN(flags);
1675 		COPY(pq_u);
1676 
1677 		ASSIGN(qid);
1678 		break;
1679 	}
1680 	default:
1681 		panic("%s: unhandled struct pfioc_altq version", __func__);
1682 		break;
1683 	}
1684 
1685 #undef ASSIGN
1686 #undef COPY
1687 #undef SATU16
1688 #undef SATU32
1689 
1690 	return (0);
1691 }
1692 
1693 /*
1694  * Handle import to struct pf_kaltq of struct pf_altq from user binaries
1695  * that may be using any version of it.
1696  */
1697 static int
1698 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size)
1699 {
1700 	u_int32_t version;
1701 
1702 	if (ioc_size == sizeof(struct pfioc_altq_v0))
1703 		version = 0;
1704 	else
1705 		version = pa->version;
1706 
1707 	if (version > PFIOC_ALTQ_VERSION)
1708 		return (EINVAL);
1709 
1710 #define ASSIGN(x) q->x = imported_q->x
1711 #define COPY(x) \
1712 	bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x)))
1713 
1714 	switch (version) {
1715 	case 0: {
1716 		struct pf_altq_v0 *imported_q =
1717 		    &((struct pfioc_altq_v0 *)pa)->altq;
1718 
1719 		COPY(ifname);
1720 
1721 		ASSIGN(scheduler);
1722 		ASSIGN(tbrsize); /* 16-bit -> 32-bit */
1723 		ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */
1724 
1725 		COPY(qname);
1726 		COPY(parent);
1727 		ASSIGN(parent_qid);
1728 		ASSIGN(bandwidth); /* 32-bit -> 64-bit */
1729 		ASSIGN(priority);
1730 		ASSIGN(local_flags);
1731 
1732 		ASSIGN(qlimit);
1733 		ASSIGN(flags);
1734 
1735 		if (imported_q->scheduler == ALTQT_HFSC) {
1736 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x
1737 
1738 			/*
1739 			 * The m1 and m2 parameters are being copied from
1740 			 * 32-bit to 64-bit.
1741 			 */
1742 			ASSIGN_OPT(rtsc_m1);
1743 			ASSIGN_OPT(rtsc_d);
1744 			ASSIGN_OPT(rtsc_m2);
1745 
1746 			ASSIGN_OPT(lssc_m1);
1747 			ASSIGN_OPT(lssc_d);
1748 			ASSIGN_OPT(lssc_m2);
1749 
1750 			ASSIGN_OPT(ulsc_m1);
1751 			ASSIGN_OPT(ulsc_d);
1752 			ASSIGN_OPT(ulsc_m2);
1753 
1754 			ASSIGN_OPT(flags);
1755 
1756 #undef ASSIGN_OPT
1757 		} else
1758 			COPY(pq_u);
1759 
1760 		ASSIGN(qid);
1761 		break;
1762 	}
1763 	case 1: {
1764 		struct pf_altq_v1 *imported_q =
1765 		    &((struct pfioc_altq_v1 *)pa)->altq;
1766 
1767 		COPY(ifname);
1768 
1769 		ASSIGN(scheduler);
1770 		ASSIGN(tbrsize);
1771 		ASSIGN(ifbandwidth);
1772 
1773 		COPY(qname);
1774 		COPY(parent);
1775 		ASSIGN(parent_qid);
1776 		ASSIGN(bandwidth);
1777 		ASSIGN(priority);
1778 		ASSIGN(local_flags);
1779 
1780 		ASSIGN(qlimit);
1781 		ASSIGN(flags);
1782 		COPY(pq_u);
1783 
1784 		ASSIGN(qid);
1785 		break;
1786 	}
1787 	default:
1788 		panic("%s: unhandled struct pfioc_altq version", __func__);
1789 		break;
1790 	}
1791 
1792 #undef ASSIGN
1793 #undef COPY
1794 
1795 	return (0);
1796 }
1797 
1798 static struct pf_altq *
1799 pf_altq_get_nth_active(u_int32_t n)
1800 {
1801 	struct pf_altq		*altq;
1802 	u_int32_t		 nr;
1803 
1804 	nr = 0;
1805 	TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
1806 		if (nr == n)
1807 			return (altq);
1808 		nr++;
1809 	}
1810 
1811 	TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
1812 		if (nr == n)
1813 			return (altq);
1814 		nr++;
1815 	}
1816 
1817 	return (NULL);
1818 }
1819 #endif /* ALTQ */
1820 
1821 struct pf_krule *
1822 pf_krule_alloc(void)
1823 {
1824 	struct pf_krule *rule;
1825 
1826 	rule = malloc(sizeof(struct pf_krule), M_PFRULE, M_WAITOK | M_ZERO);
1827 	mtx_init(&rule->rpool.mtx, "pf_krule_pool", NULL, MTX_DEF);
1828 	rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
1829 	    M_WAITOK | M_ZERO);
1830 	return (rule);
1831 }
1832 
1833 void
1834 pf_krule_free(struct pf_krule *rule)
1835 {
1836 #ifdef PF_WANT_32_TO_64_COUNTER
1837 	bool wowned;
1838 #endif
1839 
1840 	if (rule == NULL)
1841 		return;
1842 
1843 #ifdef PF_WANT_32_TO_64_COUNTER
1844 	if (rule->allrulelinked) {
1845 		wowned = PF_RULES_WOWNED();
1846 		if (!wowned)
1847 			PF_RULES_WLOCK();
1848 		LIST_REMOVE(rule, allrulelist);
1849 		V_pf_allrulecount--;
1850 		if (!wowned)
1851 			PF_RULES_WUNLOCK();
1852 	}
1853 #endif
1854 
1855 	pf_counter_u64_deinit(&rule->evaluations);
1856 	for (int i = 0; i < 2; i++) {
1857 		pf_counter_u64_deinit(&rule->packets[i]);
1858 		pf_counter_u64_deinit(&rule->bytes[i]);
1859 	}
1860 	counter_u64_free(rule->states_cur);
1861 	counter_u64_free(rule->states_tot);
1862 	counter_u64_free(rule->src_nodes);
1863 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
1864 
1865 	mtx_destroy(&rule->rpool.mtx);
1866 	free(rule, M_PFRULE);
1867 }
1868 
1869 static void
1870 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool,
1871     struct pf_pooladdr *pool)
1872 {
1873 
1874 	bzero(pool, sizeof(*pool));
1875 	bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr));
1876 	strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname));
1877 }
1878 
1879 static int
1880 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool,
1881     struct pf_kpooladdr *kpool)
1882 {
1883 	int ret;
1884 
1885 	bzero(kpool, sizeof(*kpool));
1886 	bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr));
1887 	ret = pf_user_strcpy(kpool->ifname, pool->ifname,
1888 	    sizeof(kpool->ifname));
1889 	return (ret);
1890 }
1891 
1892 static void
1893 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool)
1894 {
1895 	_Static_assert(sizeof(pool->key) == sizeof(kpool->key), "");
1896 	_Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), "");
1897 
1898 	bcopy(&pool->key, &kpool->key, sizeof(kpool->key));
1899 	bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter));
1900 
1901 	kpool->tblidx = pool->tblidx;
1902 	kpool->proxy_port[0] = pool->proxy_port[0];
1903 	kpool->proxy_port[1] = pool->proxy_port[1];
1904 	kpool->opts = pool->opts;
1905 }
1906 
1907 static int
1908 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule)
1909 {
1910 	int ret;
1911 
1912 #ifndef INET
1913 	if (rule->af == AF_INET) {
1914 		return (EAFNOSUPPORT);
1915 	}
1916 #endif /* INET */
1917 #ifndef INET6
1918 	if (rule->af == AF_INET6) {
1919 		return (EAFNOSUPPORT);
1920 	}
1921 #endif /* INET6 */
1922 
1923 	ret = pf_check_rule_addr(&rule->src);
1924 	if (ret != 0)
1925 		return (ret);
1926 	ret = pf_check_rule_addr(&rule->dst);
1927 	if (ret != 0)
1928 		return (ret);
1929 
1930 	bcopy(&rule->src, &krule->src, sizeof(rule->src));
1931 	bcopy(&rule->dst, &krule->dst, sizeof(rule->dst));
1932 
1933 	ret = pf_user_strcpy(krule->label[0], rule->label, sizeof(rule->label));
1934 	if (ret != 0)
1935 		return (ret);
1936 	ret = pf_user_strcpy(krule->ifname, rule->ifname, sizeof(rule->ifname));
1937 	if (ret != 0)
1938 		return (ret);
1939 	ret = pf_user_strcpy(krule->qname, rule->qname, sizeof(rule->qname));
1940 	if (ret != 0)
1941 		return (ret);
1942 	ret = pf_user_strcpy(krule->pqname, rule->pqname, sizeof(rule->pqname));
1943 	if (ret != 0)
1944 		return (ret);
1945 	ret = pf_user_strcpy(krule->tagname, rule->tagname,
1946 	    sizeof(rule->tagname));
1947 	if (ret != 0)
1948 		return (ret);
1949 	ret = pf_user_strcpy(krule->match_tagname, rule->match_tagname,
1950 	    sizeof(rule->match_tagname));
1951 	if (ret != 0)
1952 		return (ret);
1953 	ret = pf_user_strcpy(krule->overload_tblname, rule->overload_tblname,
1954 	    sizeof(rule->overload_tblname));
1955 	if (ret != 0)
1956 		return (ret);
1957 
1958 	pf_pool_to_kpool(&rule->rpool, &krule->rpool);
1959 
1960 	/* Don't allow userspace to set evaluations, packets or bytes. */
1961 	/* kif, anchor, overload_tbl are not copied over. */
1962 
1963 	krule->os_fingerprint = rule->os_fingerprint;
1964 
1965 	krule->rtableid = rule->rtableid;
1966 	bcopy(rule->timeout, krule->timeout, sizeof(krule->timeout));
1967 	krule->max_states = rule->max_states;
1968 	krule->max_src_nodes = rule->max_src_nodes;
1969 	krule->max_src_states = rule->max_src_states;
1970 	krule->max_src_conn = rule->max_src_conn;
1971 	krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit;
1972 	krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds;
1973 	krule->qid = rule->qid;
1974 	krule->pqid = rule->pqid;
1975 	krule->nr = rule->nr;
1976 	krule->prob = rule->prob;
1977 	krule->cuid = rule->cuid;
1978 	krule->cpid = rule->cpid;
1979 
1980 	krule->return_icmp = rule->return_icmp;
1981 	krule->return_icmp6 = rule->return_icmp6;
1982 	krule->max_mss = rule->max_mss;
1983 	krule->tag = rule->tag;
1984 	krule->match_tag = rule->match_tag;
1985 	krule->scrub_flags = rule->scrub_flags;
1986 
1987 	bcopy(&rule->uid, &krule->uid, sizeof(krule->uid));
1988 	bcopy(&rule->gid, &krule->gid, sizeof(krule->gid));
1989 
1990 	krule->rule_flag = rule->rule_flag;
1991 	krule->action = rule->action;
1992 	krule->direction = rule->direction;
1993 	krule->log = rule->log;
1994 	krule->logif = rule->logif;
1995 	krule->quick = rule->quick;
1996 	krule->ifnot = rule->ifnot;
1997 	krule->match_tag_not = rule->match_tag_not;
1998 	krule->natpass = rule->natpass;
1999 
2000 	krule->keep_state = rule->keep_state;
2001 	krule->af = rule->af;
2002 	krule->proto = rule->proto;
2003 	krule->type = rule->type;
2004 	krule->code = rule->code;
2005 	krule->flags = rule->flags;
2006 	krule->flagset = rule->flagset;
2007 	krule->min_ttl = rule->min_ttl;
2008 	krule->allow_opts = rule->allow_opts;
2009 	krule->rt = rule->rt;
2010 	krule->return_ttl = rule->return_ttl;
2011 	krule->tos = rule->tos;
2012 	krule->set_tos = rule->set_tos;
2013 
2014 	krule->flush = rule->flush;
2015 	krule->prio = rule->prio;
2016 	krule->set_prio[0] = rule->set_prio[0];
2017 	krule->set_prio[1] = rule->set_prio[1];
2018 
2019 	bcopy(&rule->divert, &krule->divert, sizeof(krule->divert));
2020 
2021 	return (0);
2022 }
2023 
2024 int
2025 pf_ioctl_getrules(struct pfioc_rule *pr)
2026 {
2027 	struct pf_kruleset	*ruleset;
2028 	struct pf_krule		*tail;
2029 	int			 rs_num;
2030 
2031 	PF_RULES_WLOCK();
2032 	ruleset = pf_find_kruleset(pr->anchor);
2033 	if (ruleset == NULL) {
2034 		PF_RULES_WUNLOCK();
2035 		return (EINVAL);
2036 	}
2037 	rs_num = pf_get_ruleset_number(pr->rule.action);
2038 	if (rs_num >= PF_RULESET_MAX) {
2039 		PF_RULES_WUNLOCK();
2040 		return (EINVAL);
2041 	}
2042 	tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
2043 	    pf_krulequeue);
2044 	if (tail)
2045 		pr->nr = tail->nr + 1;
2046 	else
2047 		pr->nr = 0;
2048 	pr->ticket = ruleset->rules[rs_num].active.ticket;
2049 	PF_RULES_WUNLOCK();
2050 
2051 	return (0);
2052 }
2053 
2054 int
2055 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket,
2056     uint32_t pool_ticket, const char *anchor, const char *anchor_call,
2057     uid_t uid, pid_t pid)
2058 {
2059 	struct pf_kruleset	*ruleset;
2060 	struct pf_krule		*tail;
2061 	struct pf_kpooladdr	*pa;
2062 	struct pfi_kkif		*kif = NULL;
2063 	int			 rs_num;
2064 	int			 error = 0;
2065 
2066 	if ((rule->return_icmp >> 8) > ICMP_MAXTYPE) {
2067 		error = EINVAL;
2068 		goto errout_unlocked;
2069 	}
2070 
2071 #define	ERROUT(x)	ERROUT_FUNCTION(errout, x)
2072 
2073 	if (rule->ifname[0])
2074 		kif = pf_kkif_create(M_WAITOK);
2075 	pf_counter_u64_init(&rule->evaluations, M_WAITOK);
2076 	for (int i = 0; i < 2; i++) {
2077 		pf_counter_u64_init(&rule->packets[i], M_WAITOK);
2078 		pf_counter_u64_init(&rule->bytes[i], M_WAITOK);
2079 	}
2080 	rule->states_cur = counter_u64_alloc(M_WAITOK);
2081 	rule->states_tot = counter_u64_alloc(M_WAITOK);
2082 	rule->src_nodes = counter_u64_alloc(M_WAITOK);
2083 	rule->cuid = uid;
2084 	rule->cpid = pid;
2085 	TAILQ_INIT(&rule->rpool.list);
2086 
2087 	PF_CONFIG_LOCK();
2088 	PF_RULES_WLOCK();
2089 #ifdef PF_WANT_32_TO_64_COUNTER
2090 	LIST_INSERT_HEAD(&V_pf_allrulelist, rule, allrulelist);
2091 	MPASS(!rule->allrulelinked);
2092 	rule->allrulelinked = true;
2093 	V_pf_allrulecount++;
2094 #endif
2095 	ruleset = pf_find_kruleset(anchor);
2096 	if (ruleset == NULL)
2097 		ERROUT(EINVAL);
2098 	rs_num = pf_get_ruleset_number(rule->action);
2099 	if (rs_num >= PF_RULESET_MAX)
2100 		ERROUT(EINVAL);
2101 	if (ticket != ruleset->rules[rs_num].inactive.ticket) {
2102 		DPFPRINTF(PF_DEBUG_MISC,
2103 		    ("ticket: %d != [%d]%d\n", ticket, rs_num,
2104 		    ruleset->rules[rs_num].inactive.ticket));
2105 		ERROUT(EBUSY);
2106 	}
2107 	if (pool_ticket != V_ticket_pabuf) {
2108 		DPFPRINTF(PF_DEBUG_MISC,
2109 		    ("pool_ticket: %d != %d\n", pool_ticket,
2110 		    V_ticket_pabuf));
2111 		ERROUT(EBUSY);
2112 	}
2113 	/*
2114 	 * XXXMJG hack: there is no mechanism to ensure they started the
2115 	 * transaction. Ticket checked above may happen to match by accident,
2116 	 * even if nobody called DIOCXBEGIN, let alone this process.
2117 	 * Partially work around it by checking if the RB tree got allocated,
2118 	 * see pf_begin_rules.
2119 	 */
2120 	if (ruleset->rules[rs_num].inactive.tree == NULL) {
2121 		ERROUT(EINVAL);
2122 	}
2123 
2124 	tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
2125 	    pf_krulequeue);
2126 	if (tail)
2127 		rule->nr = tail->nr + 1;
2128 	else
2129 		rule->nr = 0;
2130 	if (rule->ifname[0]) {
2131 		rule->kif = pfi_kkif_attach(kif, rule->ifname);
2132 		kif = NULL;
2133 		pfi_kkif_ref(rule->kif);
2134 	} else
2135 		rule->kif = NULL;
2136 
2137 	if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs)
2138 		error = EBUSY;
2139 
2140 #ifdef ALTQ
2141 	/* set queue IDs */
2142 	if (rule->qname[0] != 0) {
2143 		if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
2144 			error = EBUSY;
2145 		else if (rule->pqname[0] != 0) {
2146 			if ((rule->pqid =
2147 			    pf_qname2qid(rule->pqname)) == 0)
2148 				error = EBUSY;
2149 		} else
2150 			rule->pqid = rule->qid;
2151 	}
2152 #endif
2153 	if (rule->tagname[0])
2154 		if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
2155 			error = EBUSY;
2156 	if (rule->match_tagname[0])
2157 		if ((rule->match_tag =
2158 		    pf_tagname2tag(rule->match_tagname)) == 0)
2159 			error = EBUSY;
2160 	if (rule->rt && !rule->direction)
2161 		error = EINVAL;
2162 	if (!rule->log)
2163 		rule->logif = 0;
2164 	if (rule->logif >= PFLOGIFS_MAX)
2165 		error = EINVAL;
2166 	if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
2167 		error = ENOMEM;
2168 	if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
2169 		error = ENOMEM;
2170 	if (pf_kanchor_setup(rule, ruleset, anchor_call))
2171 		error = EINVAL;
2172 	if (rule->scrub_flags & PFSTATE_SETPRIO &&
2173 	    (rule->set_prio[0] > PF_PRIO_MAX ||
2174 	    rule->set_prio[1] > PF_PRIO_MAX))
2175 		error = EINVAL;
2176 	TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
2177 		if (pa->addr.type == PF_ADDR_TABLE) {
2178 			pa->addr.p.tbl = pfr_attach_table(ruleset,
2179 			    pa->addr.v.tblname);
2180 			if (pa->addr.p.tbl == NULL)
2181 				error = ENOMEM;
2182 		}
2183 
2184 	rule->overload_tbl = NULL;
2185 	if (rule->overload_tblname[0]) {
2186 		if ((rule->overload_tbl = pfr_attach_table(ruleset,
2187 		    rule->overload_tblname)) == NULL)
2188 			error = EINVAL;
2189 		else
2190 			rule->overload_tbl->pfrkt_flags |=
2191 			    PFR_TFLAG_ACTIVE;
2192 	}
2193 
2194 	pf_mv_kpool(&V_pf_pabuf, &rule->rpool.list);
2195 	if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
2196 	    (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
2197 	    (rule->rt > PF_NOPFROUTE)) &&
2198 	    (TAILQ_FIRST(&rule->rpool.list) == NULL))
2199 		error = EINVAL;
2200 
2201 	if (error) {
2202 		pf_free_rule(rule);
2203 		rule = NULL;
2204 		ERROUT(error);
2205 	}
2206 
2207 	rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
2208 	TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
2209 	    rule, entries);
2210 	ruleset->rules[rs_num].inactive.rcount++;
2211 
2212 	PF_RULES_WUNLOCK();
2213 	pf_hash_rule(rule);
2214 	if (RB_INSERT(pf_krule_global, ruleset->rules[rs_num].inactive.tree, rule) != NULL) {
2215 		PF_RULES_WLOCK();
2216 		TAILQ_REMOVE(ruleset->rules[rs_num].inactive.ptr, rule, entries);
2217 		ruleset->rules[rs_num].inactive.rcount--;
2218 		pf_free_rule(rule);
2219 		rule = NULL;
2220 		ERROUT(EEXIST);
2221 	}
2222 	PF_CONFIG_UNLOCK();
2223 
2224 	return (0);
2225 
2226 #undef ERROUT
2227 errout:
2228 	PF_RULES_WUNLOCK();
2229 	PF_CONFIG_UNLOCK();
2230 errout_unlocked:
2231 	pf_kkif_free(kif);
2232 	pf_krule_free(rule);
2233 	return (error);
2234 }
2235 
2236 static bool
2237 pf_label_match(const struct pf_krule *rule, const char *label)
2238 {
2239 	int i = 0;
2240 
2241 	while (*rule->label[i]) {
2242 		if (strcmp(rule->label[i], label) == 0)
2243 			return (true);
2244 		i++;
2245 	}
2246 
2247 	return (false);
2248 }
2249 
2250 static unsigned int
2251 pf_kill_matching_state(struct pf_state_key_cmp *key, int dir)
2252 {
2253 	struct pf_kstate *s;
2254 	int more = 0;
2255 
2256 	s = pf_find_state_all(key, dir, &more);
2257 	if (s == NULL)
2258 		return (0);
2259 
2260 	if (more) {
2261 		PF_STATE_UNLOCK(s);
2262 		return (0);
2263 	}
2264 
2265 	pf_unlink_state(s);
2266 	return (1);
2267 }
2268 
2269 static int
2270 pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih)
2271 {
2272 	struct pf_kstate	*s;
2273 	struct pf_state_key	*sk;
2274 	struct pf_addr		*srcaddr, *dstaddr;
2275 	struct pf_state_key_cmp	 match_key;
2276 	int			 idx, killed = 0;
2277 	unsigned int		 dir;
2278 	u_int16_t		 srcport, dstport;
2279 	struct pfi_kkif		*kif;
2280 
2281 relock_DIOCKILLSTATES:
2282 	PF_HASHROW_LOCK(ih);
2283 	LIST_FOREACH(s, &ih->states, entry) {
2284 		/* For floating states look at the original kif. */
2285 		kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
2286 
2287 		sk = s->key[psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE];
2288 		if (s->direction == PF_OUT) {
2289 			srcaddr = &sk->addr[1];
2290 			dstaddr = &sk->addr[0];
2291 			srcport = sk->port[1];
2292 			dstport = sk->port[0];
2293 		} else {
2294 			srcaddr = &sk->addr[0];
2295 			dstaddr = &sk->addr[1];
2296 			srcport = sk->port[0];
2297 			dstport = sk->port[1];
2298 		}
2299 
2300 		if (psk->psk_af && sk->af != psk->psk_af)
2301 			continue;
2302 
2303 		if (psk->psk_proto && psk->psk_proto != sk->proto)
2304 			continue;
2305 
2306 		if (! PF_MATCHA(psk->psk_src.neg, &psk->psk_src.addr.v.a.addr,
2307 		    &psk->psk_src.addr.v.a.mask, srcaddr, sk->af))
2308 			continue;
2309 
2310 		if (! PF_MATCHA(psk->psk_dst.neg, &psk->psk_dst.addr.v.a.addr,
2311 		    &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af))
2312 			continue;
2313 
2314 		if (!  PF_MATCHA(psk->psk_rt_addr.neg,
2315 		    &psk->psk_rt_addr.addr.v.a.addr,
2316 		    &psk->psk_rt_addr.addr.v.a.mask,
2317 		    &s->rt_addr, sk->af))
2318 			continue;
2319 
2320 		if (psk->psk_src.port_op != 0 &&
2321 		    ! pf_match_port(psk->psk_src.port_op,
2322 		    psk->psk_src.port[0], psk->psk_src.port[1], srcport))
2323 			continue;
2324 
2325 		if (psk->psk_dst.port_op != 0 &&
2326 		    ! pf_match_port(psk->psk_dst.port_op,
2327 		    psk->psk_dst.port[0], psk->psk_dst.port[1], dstport))
2328 			continue;
2329 
2330 		if (psk->psk_label[0] &&
2331 		    ! pf_label_match(s->rule.ptr, psk->psk_label))
2332 			continue;
2333 
2334 		if (psk->psk_ifname[0] && strcmp(psk->psk_ifname,
2335 		    kif->pfik_name))
2336 			continue;
2337 
2338 		if (psk->psk_kill_match) {
2339 			/* Create the key to find matching states, with lock
2340 			 * held. */
2341 
2342 			bzero(&match_key, sizeof(match_key));
2343 
2344 			if (s->direction == PF_OUT) {
2345 				dir = PF_IN;
2346 				idx = psk->psk_nat ? PF_SK_WIRE : PF_SK_STACK;
2347 			} else {
2348 				dir = PF_OUT;
2349 				idx = psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE;
2350 			}
2351 
2352 			match_key.af = s->key[idx]->af;
2353 			match_key.proto = s->key[idx]->proto;
2354 			PF_ACPY(&match_key.addr[0],
2355 			    &s->key[idx]->addr[1], match_key.af);
2356 			match_key.port[0] = s->key[idx]->port[1];
2357 			PF_ACPY(&match_key.addr[1],
2358 			    &s->key[idx]->addr[0], match_key.af);
2359 			match_key.port[1] = s->key[idx]->port[0];
2360 		}
2361 
2362 		pf_unlink_state(s);
2363 		killed++;
2364 
2365 		if (psk->psk_kill_match)
2366 			killed += pf_kill_matching_state(&match_key, dir);
2367 
2368 		goto relock_DIOCKILLSTATES;
2369 	}
2370 	PF_HASHROW_UNLOCK(ih);
2371 
2372 	return (killed);
2373 }
2374 
2375 int
2376 pf_start(void)
2377 {
2378 	int error = 0;
2379 
2380 	sx_xlock(&V_pf_ioctl_lock);
2381 	if (V_pf_status.running)
2382 		error = EEXIST;
2383 	else {
2384 		hook_pf();
2385 		if (! TAILQ_EMPTY(V_pf_keth->active.rules))
2386 			hook_pf_eth();
2387 		V_pf_status.running = 1;
2388 		V_pf_status.since = time_second;
2389 		new_unrhdr64(&V_pf_stateid, time_second);
2390 
2391 		DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
2392 	}
2393 	sx_xunlock(&V_pf_ioctl_lock);
2394 
2395 	return (error);
2396 }
2397 
2398 int
2399 pf_stop(void)
2400 {
2401 	int error = 0;
2402 
2403 	sx_xlock(&V_pf_ioctl_lock);
2404 	if (!V_pf_status.running)
2405 		error = ENOENT;
2406 	else {
2407 		V_pf_status.running = 0;
2408 		dehook_pf();
2409 		dehook_pf_eth();
2410 		V_pf_status.since = time_second;
2411 		DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
2412 	}
2413 	sx_xunlock(&V_pf_ioctl_lock);
2414 
2415 	return (error);
2416 }
2417 
2418 static int
2419 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
2420 {
2421 	int			 error = 0;
2422 	PF_RULES_RLOCK_TRACKER;
2423 
2424 #define	ERROUT_IOCTL(target, x)					\
2425     do {								\
2426 	    error = (x);						\
2427 	    SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__);	\
2428 	    goto target;						\
2429     } while (0)
2430 
2431 
2432 	/* XXX keep in sync with switch() below */
2433 	if (securelevel_gt(td->td_ucred, 2))
2434 		switch (cmd) {
2435 		case DIOCGETRULES:
2436 		case DIOCGETRULENV:
2437 		case DIOCGETADDRS:
2438 		case DIOCGETADDR:
2439 		case DIOCGETSTATE:
2440 		case DIOCGETSTATENV:
2441 		case DIOCSETSTATUSIF:
2442 		case DIOCGETSTATUSNV:
2443 		case DIOCCLRSTATUS:
2444 		case DIOCNATLOOK:
2445 		case DIOCSETDEBUG:
2446 #ifdef COMPAT_FREEBSD14
2447 		case DIOCGETSTATES:
2448 		case DIOCGETSTATESV2:
2449 #endif
2450 		case DIOCGETTIMEOUT:
2451 		case DIOCCLRRULECTRS:
2452 		case DIOCGETLIMIT:
2453 		case DIOCGETALTQSV0:
2454 		case DIOCGETALTQSV1:
2455 		case DIOCGETALTQV0:
2456 		case DIOCGETALTQV1:
2457 		case DIOCGETQSTATSV0:
2458 		case DIOCGETQSTATSV1:
2459 		case DIOCGETRULESETS:
2460 		case DIOCGETRULESET:
2461 		case DIOCRGETTABLES:
2462 		case DIOCRGETTSTATS:
2463 		case DIOCRCLRTSTATS:
2464 		case DIOCRCLRADDRS:
2465 		case DIOCRADDADDRS:
2466 		case DIOCRDELADDRS:
2467 		case DIOCRSETADDRS:
2468 		case DIOCRGETADDRS:
2469 		case DIOCRGETASTATS:
2470 		case DIOCRCLRASTATS:
2471 		case DIOCRTSTADDRS:
2472 		case DIOCOSFPGET:
2473 		case DIOCGETSRCNODES:
2474 		case DIOCCLRSRCNODES:
2475 		case DIOCGETSYNCOOKIES:
2476 		case DIOCIGETIFACES:
2477 		case DIOCGIFSPEEDV0:
2478 		case DIOCGIFSPEEDV1:
2479 		case DIOCSETIFFLAG:
2480 		case DIOCCLRIFFLAG:
2481 		case DIOCGETETHRULES:
2482 		case DIOCGETETHRULE:
2483 		case DIOCGETETHRULESETS:
2484 		case DIOCGETETHRULESET:
2485 			break;
2486 		case DIOCRCLRTABLES:
2487 		case DIOCRADDTABLES:
2488 		case DIOCRDELTABLES:
2489 		case DIOCRSETTFLAGS:
2490 			if (((struct pfioc_table *)addr)->pfrio_flags &
2491 			    PFR_FLAG_DUMMY)
2492 				break; /* dummy operation ok */
2493 			return (EPERM);
2494 		default:
2495 			return (EPERM);
2496 		}
2497 
2498 	if (!(flags & FWRITE))
2499 		switch (cmd) {
2500 		case DIOCGETRULES:
2501 		case DIOCGETADDRS:
2502 		case DIOCGETADDR:
2503 		case DIOCGETSTATE:
2504 		case DIOCGETSTATENV:
2505 		case DIOCGETSTATUSNV:
2506 #ifdef COMPAT_FREEBSD14
2507 		case DIOCGETSTATES:
2508 		case DIOCGETSTATESV2:
2509 #endif
2510 		case DIOCGETTIMEOUT:
2511 		case DIOCGETLIMIT:
2512 		case DIOCGETALTQSV0:
2513 		case DIOCGETALTQSV1:
2514 		case DIOCGETALTQV0:
2515 		case DIOCGETALTQV1:
2516 		case DIOCGETQSTATSV0:
2517 		case DIOCGETQSTATSV1:
2518 		case DIOCGETRULESETS:
2519 		case DIOCGETRULESET:
2520 		case DIOCNATLOOK:
2521 		case DIOCRGETTABLES:
2522 		case DIOCRGETTSTATS:
2523 		case DIOCRGETADDRS:
2524 		case DIOCRGETASTATS:
2525 		case DIOCRTSTADDRS:
2526 		case DIOCOSFPGET:
2527 		case DIOCGETSRCNODES:
2528 		case DIOCGETSYNCOOKIES:
2529 		case DIOCIGETIFACES:
2530 		case DIOCGIFSPEEDV1:
2531 		case DIOCGIFSPEEDV0:
2532 		case DIOCGETRULENV:
2533 		case DIOCGETETHRULES:
2534 		case DIOCGETETHRULE:
2535 		case DIOCGETETHRULESETS:
2536 		case DIOCGETETHRULESET:
2537 			break;
2538 		case DIOCRCLRTABLES:
2539 		case DIOCRADDTABLES:
2540 		case DIOCRDELTABLES:
2541 		case DIOCRCLRTSTATS:
2542 		case DIOCRCLRADDRS:
2543 		case DIOCRADDADDRS:
2544 		case DIOCRDELADDRS:
2545 		case DIOCRSETADDRS:
2546 		case DIOCRSETTFLAGS:
2547 			if (((struct pfioc_table *)addr)->pfrio_flags &
2548 			    PFR_FLAG_DUMMY) {
2549 				flags |= FWRITE; /* need write lock for dummy */
2550 				break; /* dummy operation ok */
2551 			}
2552 			return (EACCES);
2553 		default:
2554 			return (EACCES);
2555 		}
2556 
2557 	CURVNET_SET(TD_TO_VNET(td));
2558 
2559 	switch (cmd) {
2560 #ifdef COMPAT_FREEBSD14
2561 	case DIOCSTART:
2562 		error = pf_start();
2563 		break;
2564 
2565 	case DIOCSTOP:
2566 		error = pf_stop();
2567 		break;
2568 #endif
2569 
2570 	case DIOCGETETHRULES: {
2571 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
2572 		nvlist_t		*nvl;
2573 		void			*packed;
2574 		struct pf_keth_rule	*tail;
2575 		struct pf_keth_ruleset	*rs;
2576 		u_int32_t		 ticket, nr;
2577 		const char		*anchor = "";
2578 
2579 		nvl = NULL;
2580 		packed = NULL;
2581 
2582 #define	ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULES_error, x)
2583 
2584 		if (nv->len > pf_ioctl_maxcount)
2585 			ERROUT(ENOMEM);
2586 
2587 		/* Copy the request in */
2588 		packed = malloc(nv->len, M_NVLIST, M_WAITOK);
2589 		if (packed == NULL)
2590 			ERROUT(ENOMEM);
2591 
2592 		error = copyin(nv->data, packed, nv->len);
2593 		if (error)
2594 			ERROUT(error);
2595 
2596 		nvl = nvlist_unpack(packed, nv->len, 0);
2597 		if (nvl == NULL)
2598 			ERROUT(EBADMSG);
2599 
2600 		if (! nvlist_exists_string(nvl, "anchor"))
2601 			ERROUT(EBADMSG);
2602 
2603 		anchor = nvlist_get_string(nvl, "anchor");
2604 
2605 		rs = pf_find_keth_ruleset(anchor);
2606 
2607 		nvlist_destroy(nvl);
2608 		nvl = NULL;
2609 		free(packed, M_NVLIST);
2610 		packed = NULL;
2611 
2612 		if (rs == NULL)
2613 			ERROUT(ENOENT);
2614 
2615 		/* Reply */
2616 		nvl = nvlist_create(0);
2617 		if (nvl == NULL)
2618 			ERROUT(ENOMEM);
2619 
2620 		PF_RULES_RLOCK();
2621 
2622 		ticket = rs->active.ticket;
2623 		tail = TAILQ_LAST(rs->active.rules, pf_keth_ruleq);
2624 		if (tail)
2625 			nr = tail->nr + 1;
2626 		else
2627 			nr = 0;
2628 
2629 		PF_RULES_RUNLOCK();
2630 
2631 		nvlist_add_number(nvl, "ticket", ticket);
2632 		nvlist_add_number(nvl, "nr", nr);
2633 
2634 		packed = nvlist_pack(nvl, &nv->len);
2635 		if (packed == NULL)
2636 			ERROUT(ENOMEM);
2637 
2638 		if (nv->size == 0)
2639 			ERROUT(0);
2640 		else if (nv->size < nv->len)
2641 			ERROUT(ENOSPC);
2642 
2643 		error = copyout(packed, nv->data, nv->len);
2644 
2645 #undef ERROUT
2646 DIOCGETETHRULES_error:
2647 		free(packed, M_NVLIST);
2648 		nvlist_destroy(nvl);
2649 		break;
2650 	}
2651 
2652 	case DIOCGETETHRULE: {
2653 		struct epoch_tracker	 et;
2654 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
2655 		nvlist_t		*nvl = NULL;
2656 		void			*nvlpacked = NULL;
2657 		struct pf_keth_rule	*rule = NULL;
2658 		struct pf_keth_ruleset	*rs;
2659 		u_int32_t		 ticket, nr;
2660 		bool			 clear = false;
2661 		const char		*anchor;
2662 
2663 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULE_error, x)
2664 
2665 		if (nv->len > pf_ioctl_maxcount)
2666 			ERROUT(ENOMEM);
2667 
2668 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
2669 		if (nvlpacked == NULL)
2670 			ERROUT(ENOMEM);
2671 
2672 		error = copyin(nv->data, nvlpacked, nv->len);
2673 		if (error)
2674 			ERROUT(error);
2675 
2676 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
2677 		if (nvl == NULL)
2678 			ERROUT(EBADMSG);
2679 		if (! nvlist_exists_number(nvl, "ticket"))
2680 			ERROUT(EBADMSG);
2681 		ticket = nvlist_get_number(nvl, "ticket");
2682 		if (! nvlist_exists_string(nvl, "anchor"))
2683 			ERROUT(EBADMSG);
2684 		anchor = nvlist_get_string(nvl, "anchor");
2685 
2686 		if (nvlist_exists_bool(nvl, "clear"))
2687 			clear = nvlist_get_bool(nvl, "clear");
2688 
2689 		if (clear && !(flags & FWRITE))
2690 			ERROUT(EACCES);
2691 
2692 		if (! nvlist_exists_number(nvl, "nr"))
2693 			ERROUT(EBADMSG);
2694 		nr = nvlist_get_number(nvl, "nr");
2695 
2696 		PF_RULES_RLOCK();
2697 		rs = pf_find_keth_ruleset(anchor);
2698 		if (rs == NULL) {
2699 			PF_RULES_RUNLOCK();
2700 			ERROUT(ENOENT);
2701 		}
2702 		if (ticket != rs->active.ticket) {
2703 			PF_RULES_RUNLOCK();
2704 			ERROUT(EBUSY);
2705 		}
2706 
2707 		nvlist_destroy(nvl);
2708 		nvl = NULL;
2709 		free(nvlpacked, M_NVLIST);
2710 		nvlpacked = NULL;
2711 
2712 		rule = TAILQ_FIRST(rs->active.rules);
2713 		while ((rule != NULL) && (rule->nr != nr))
2714 			rule = TAILQ_NEXT(rule, entries);
2715 		if (rule == NULL) {
2716 			PF_RULES_RUNLOCK();
2717 			ERROUT(ENOENT);
2718 		}
2719 		/* Make sure rule can't go away. */
2720 		NET_EPOCH_ENTER(et);
2721 		PF_RULES_RUNLOCK();
2722 		nvl = pf_keth_rule_to_nveth_rule(rule);
2723 		if (pf_keth_anchor_nvcopyout(rs, rule, nvl))
2724 			ERROUT(EBUSY);
2725 		NET_EPOCH_EXIT(et);
2726 		if (nvl == NULL)
2727 			ERROUT(ENOMEM);
2728 
2729 		nvlpacked = nvlist_pack(nvl, &nv->len);
2730 		if (nvlpacked == NULL)
2731 			ERROUT(ENOMEM);
2732 
2733 		if (nv->size == 0)
2734 			ERROUT(0);
2735 		else if (nv->size < nv->len)
2736 			ERROUT(ENOSPC);
2737 
2738 		error = copyout(nvlpacked, nv->data, nv->len);
2739 		if (error == 0 && clear) {
2740 			counter_u64_zero(rule->evaluations);
2741 			for (int i = 0; i < 2; i++) {
2742 				counter_u64_zero(rule->packets[i]);
2743 				counter_u64_zero(rule->bytes[i]);
2744 			}
2745 		}
2746 
2747 #undef ERROUT
2748 DIOCGETETHRULE_error:
2749 		free(nvlpacked, M_NVLIST);
2750 		nvlist_destroy(nvl);
2751 		break;
2752 	}
2753 
2754 	case DIOCADDETHRULE: {
2755 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
2756 		nvlist_t		*nvl = NULL;
2757 		void			*nvlpacked = NULL;
2758 		struct pf_keth_rule	*rule = NULL, *tail = NULL;
2759 		struct pf_keth_ruleset	*ruleset = NULL;
2760 		struct pfi_kkif		*kif = NULL, *bridge_to_kif = NULL;
2761 		const char		*anchor = "", *anchor_call = "";
2762 
2763 #define ERROUT(x)	ERROUT_IOCTL(DIOCADDETHRULE_error, x)
2764 
2765 		if (nv->len > pf_ioctl_maxcount)
2766 			ERROUT(ENOMEM);
2767 
2768 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
2769 		if (nvlpacked == NULL)
2770 			ERROUT(ENOMEM);
2771 
2772 		error = copyin(nv->data, nvlpacked, nv->len);
2773 		if (error)
2774 			ERROUT(error);
2775 
2776 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
2777 		if (nvl == NULL)
2778 			ERROUT(EBADMSG);
2779 
2780 		if (! nvlist_exists_number(nvl, "ticket"))
2781 			ERROUT(EBADMSG);
2782 
2783 		if (nvlist_exists_string(nvl, "anchor"))
2784 			anchor = nvlist_get_string(nvl, "anchor");
2785 		if (nvlist_exists_string(nvl, "anchor_call"))
2786 			anchor_call = nvlist_get_string(nvl, "anchor_call");
2787 
2788 		ruleset = pf_find_keth_ruleset(anchor);
2789 		if (ruleset == NULL)
2790 			ERROUT(EINVAL);
2791 
2792 		if (nvlist_get_number(nvl, "ticket") !=
2793 		    ruleset->inactive.ticket) {
2794 			DPFPRINTF(PF_DEBUG_MISC,
2795 			    ("ticket: %d != %d\n",
2796 			    (u_int32_t)nvlist_get_number(nvl, "ticket"),
2797 			    ruleset->inactive.ticket));
2798 			ERROUT(EBUSY);
2799 		}
2800 
2801 		rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK);
2802 		if (rule == NULL)
2803 			ERROUT(ENOMEM);
2804 		rule->timestamp = NULL;
2805 
2806 		error = pf_nveth_rule_to_keth_rule(nvl, rule);
2807 		if (error != 0)
2808 			ERROUT(error);
2809 
2810 		if (rule->ifname[0])
2811 			kif = pf_kkif_create(M_WAITOK);
2812 		if (rule->bridge_to_name[0])
2813 			bridge_to_kif = pf_kkif_create(M_WAITOK);
2814 		rule->evaluations = counter_u64_alloc(M_WAITOK);
2815 		for (int i = 0; i < 2; i++) {
2816 			rule->packets[i] = counter_u64_alloc(M_WAITOK);
2817 			rule->bytes[i] = counter_u64_alloc(M_WAITOK);
2818 		}
2819 		rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
2820 		    M_WAITOK | M_ZERO);
2821 
2822 		PF_RULES_WLOCK();
2823 
2824 		if (rule->ifname[0]) {
2825 			rule->kif = pfi_kkif_attach(kif, rule->ifname);
2826 			pfi_kkif_ref(rule->kif);
2827 		} else
2828 			rule->kif = NULL;
2829 		if (rule->bridge_to_name[0]) {
2830 			rule->bridge_to = pfi_kkif_attach(bridge_to_kif,
2831 			    rule->bridge_to_name);
2832 			pfi_kkif_ref(rule->bridge_to);
2833 		} else
2834 			rule->bridge_to = NULL;
2835 
2836 #ifdef ALTQ
2837 		/* set queue IDs */
2838 		if (rule->qname[0] != 0) {
2839 			if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
2840 				error = EBUSY;
2841 			else
2842 				rule->qid = rule->qid;
2843 		}
2844 #endif
2845 		if (rule->tagname[0])
2846 			if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
2847 				error = EBUSY;
2848 		if (rule->match_tagname[0])
2849 			if ((rule->match_tag = pf_tagname2tag(
2850 			    rule->match_tagname)) == 0)
2851 				error = EBUSY;
2852 
2853 		if (error == 0 && rule->ipdst.addr.type == PF_ADDR_TABLE)
2854 			error = pf_eth_addr_setup(ruleset, &rule->ipdst.addr);
2855 		if (error == 0 && rule->ipsrc.addr.type == PF_ADDR_TABLE)
2856 			error = pf_eth_addr_setup(ruleset, &rule->ipsrc.addr);
2857 
2858 		if (error) {
2859 			pf_free_eth_rule(rule);
2860 			PF_RULES_WUNLOCK();
2861 			ERROUT(error);
2862 		}
2863 
2864 		if (pf_keth_anchor_setup(rule, ruleset, anchor_call)) {
2865 			pf_free_eth_rule(rule);
2866 			PF_RULES_WUNLOCK();
2867 			ERROUT(EINVAL);
2868 		}
2869 
2870 		tail = TAILQ_LAST(ruleset->inactive.rules, pf_keth_ruleq);
2871 		if (tail)
2872 			rule->nr = tail->nr + 1;
2873 		else
2874 			rule->nr = 0;
2875 
2876 		TAILQ_INSERT_TAIL(ruleset->inactive.rules, rule, entries);
2877 
2878 		PF_RULES_WUNLOCK();
2879 
2880 #undef ERROUT
2881 DIOCADDETHRULE_error:
2882 		nvlist_destroy(nvl);
2883 		free(nvlpacked, M_NVLIST);
2884 		break;
2885 	}
2886 
2887 	case DIOCGETETHRULESETS: {
2888 		struct epoch_tracker	 et;
2889 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
2890 		nvlist_t		*nvl = NULL;
2891 		void			*nvlpacked = NULL;
2892 		struct pf_keth_ruleset	*ruleset;
2893 		struct pf_keth_anchor	*anchor;
2894 		int			 nr = 0;
2895 
2896 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULESETS_error, x)
2897 
2898 		if (nv->len > pf_ioctl_maxcount)
2899 			ERROUT(ENOMEM);
2900 
2901 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
2902 		if (nvlpacked == NULL)
2903 			ERROUT(ENOMEM);
2904 
2905 		error = copyin(nv->data, nvlpacked, nv->len);
2906 		if (error)
2907 			ERROUT(error);
2908 
2909 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
2910 		if (nvl == NULL)
2911 			ERROUT(EBADMSG);
2912 		if (! nvlist_exists_string(nvl, "path"))
2913 			ERROUT(EBADMSG);
2914 
2915 		NET_EPOCH_ENTER(et);
2916 
2917 		if ((ruleset = pf_find_keth_ruleset(
2918 		    nvlist_get_string(nvl, "path"))) == NULL) {
2919 			NET_EPOCH_EXIT(et);
2920 			ERROUT(ENOENT);
2921 		}
2922 
2923 		if (ruleset->anchor == NULL) {
2924 			RB_FOREACH(anchor, pf_keth_anchor_global, &V_pf_keth_anchors)
2925 				if (anchor->parent == NULL)
2926 					nr++;
2927 		} else {
2928 			RB_FOREACH(anchor, pf_keth_anchor_node,
2929 			    &ruleset->anchor->children)
2930 				nr++;
2931 		}
2932 
2933 		NET_EPOCH_EXIT(et);
2934 
2935 		nvlist_destroy(nvl);
2936 		nvl = NULL;
2937 		free(nvlpacked, M_NVLIST);
2938 		nvlpacked = NULL;
2939 
2940 		nvl = nvlist_create(0);
2941 		if (nvl == NULL)
2942 			ERROUT(ENOMEM);
2943 
2944 		nvlist_add_number(nvl, "nr", nr);
2945 
2946 		nvlpacked = nvlist_pack(nvl, &nv->len);
2947 		if (nvlpacked == NULL)
2948 			ERROUT(ENOMEM);
2949 
2950 		if (nv->size == 0)
2951 			ERROUT(0);
2952 		else if (nv->size < nv->len)
2953 			ERROUT(ENOSPC);
2954 
2955 		error = copyout(nvlpacked, nv->data, nv->len);
2956 
2957 #undef ERROUT
2958 DIOCGETETHRULESETS_error:
2959 		free(nvlpacked, M_NVLIST);
2960 		nvlist_destroy(nvl);
2961 		break;
2962 	}
2963 
2964 	case DIOCGETETHRULESET: {
2965 		struct epoch_tracker	 et;
2966 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
2967 		nvlist_t		*nvl = NULL;
2968 		void			*nvlpacked = NULL;
2969 		struct pf_keth_ruleset	*ruleset;
2970 		struct pf_keth_anchor	*anchor;
2971 		int			 nr = 0, req_nr = 0;
2972 		bool			 found = false;
2973 
2974 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULESET_error, x)
2975 
2976 		if (nv->len > pf_ioctl_maxcount)
2977 			ERROUT(ENOMEM);
2978 
2979 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
2980 		if (nvlpacked == NULL)
2981 			ERROUT(ENOMEM);
2982 
2983 		error = copyin(nv->data, nvlpacked, nv->len);
2984 		if (error)
2985 			ERROUT(error);
2986 
2987 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
2988 		if (nvl == NULL)
2989 			ERROUT(EBADMSG);
2990 		if (! nvlist_exists_string(nvl, "path"))
2991 			ERROUT(EBADMSG);
2992 		if (! nvlist_exists_number(nvl, "nr"))
2993 			ERROUT(EBADMSG);
2994 
2995 		req_nr = nvlist_get_number(nvl, "nr");
2996 
2997 		NET_EPOCH_ENTER(et);
2998 
2999 		if ((ruleset = pf_find_keth_ruleset(
3000 		    nvlist_get_string(nvl, "path"))) == NULL) {
3001 			NET_EPOCH_EXIT(et);
3002 			ERROUT(ENOENT);
3003 		}
3004 
3005 		nvlist_destroy(nvl);
3006 		nvl = NULL;
3007 		free(nvlpacked, M_NVLIST);
3008 		nvlpacked = NULL;
3009 
3010 		nvl = nvlist_create(0);
3011 		if (nvl == NULL) {
3012 			NET_EPOCH_EXIT(et);
3013 			ERROUT(ENOMEM);
3014 		}
3015 
3016 		if (ruleset->anchor == NULL) {
3017 			RB_FOREACH(anchor, pf_keth_anchor_global,
3018 			    &V_pf_keth_anchors) {
3019 				if (anchor->parent == NULL && nr++ == req_nr) {
3020 					found = true;
3021 					break;
3022 				}
3023 			}
3024 		} else {
3025 			RB_FOREACH(anchor, pf_keth_anchor_node,
3026 			     &ruleset->anchor->children) {
3027 				if (nr++ == req_nr) {
3028 					found = true;
3029 					break;
3030 				}
3031 			}
3032 		}
3033 
3034 		NET_EPOCH_EXIT(et);
3035 		if (found) {
3036 			nvlist_add_number(nvl, "nr", nr);
3037 			nvlist_add_string(nvl, "name", anchor->name);
3038 			if (ruleset->anchor)
3039 				nvlist_add_string(nvl, "path",
3040 				    ruleset->anchor->path);
3041 			else
3042 				nvlist_add_string(nvl, "path", "");
3043 		} else {
3044 			ERROUT(EBUSY);
3045 		}
3046 
3047 		nvlpacked = nvlist_pack(nvl, &nv->len);
3048 		if (nvlpacked == NULL)
3049 			ERROUT(ENOMEM);
3050 
3051 		if (nv->size == 0)
3052 			ERROUT(0);
3053 		else if (nv->size < nv->len)
3054 			ERROUT(ENOSPC);
3055 
3056 		error = copyout(nvlpacked, nv->data, nv->len);
3057 
3058 #undef ERROUT
3059 DIOCGETETHRULESET_error:
3060 		free(nvlpacked, M_NVLIST);
3061 		nvlist_destroy(nvl);
3062 		break;
3063 	}
3064 
3065 	case DIOCADDRULENV: {
3066 		struct pfioc_nv	*nv = (struct pfioc_nv *)addr;
3067 		nvlist_t	*nvl = NULL;
3068 		void		*nvlpacked = NULL;
3069 		struct pf_krule	*rule = NULL;
3070 		const char	*anchor = "", *anchor_call = "";
3071 		uint32_t	 ticket = 0, pool_ticket = 0;
3072 
3073 #define	ERROUT(x)	ERROUT_IOCTL(DIOCADDRULENV_error, x)
3074 
3075 		if (nv->len > pf_ioctl_maxcount)
3076 			ERROUT(ENOMEM);
3077 
3078 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3079 		error = copyin(nv->data, nvlpacked, nv->len);
3080 		if (error)
3081 			ERROUT(error);
3082 
3083 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3084 		if (nvl == NULL)
3085 			ERROUT(EBADMSG);
3086 
3087 		if (! nvlist_exists_number(nvl, "ticket"))
3088 			ERROUT(EINVAL);
3089 		ticket = nvlist_get_number(nvl, "ticket");
3090 
3091 		if (! nvlist_exists_number(nvl, "pool_ticket"))
3092 			ERROUT(EINVAL);
3093 		pool_ticket = nvlist_get_number(nvl, "pool_ticket");
3094 
3095 		if (! nvlist_exists_nvlist(nvl, "rule"))
3096 			ERROUT(EINVAL);
3097 
3098 		rule = pf_krule_alloc();
3099 		error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"),
3100 		    rule);
3101 		if (error)
3102 			ERROUT(error);
3103 
3104 		if (nvlist_exists_string(nvl, "anchor"))
3105 			anchor = nvlist_get_string(nvl, "anchor");
3106 		if (nvlist_exists_string(nvl, "anchor_call"))
3107 			anchor_call = nvlist_get_string(nvl, "anchor_call");
3108 
3109 		if ((error = nvlist_error(nvl)))
3110 			ERROUT(error);
3111 
3112 		/* Frees rule on error */
3113 		error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor,
3114 		    anchor_call, td->td_ucred->cr_ruid,
3115 		    td->td_proc ? td->td_proc->p_pid : 0);
3116 
3117 		nvlist_destroy(nvl);
3118 		free(nvlpacked, M_NVLIST);
3119 		break;
3120 #undef ERROUT
3121 DIOCADDRULENV_error:
3122 		pf_krule_free(rule);
3123 		nvlist_destroy(nvl);
3124 		free(nvlpacked, M_NVLIST);
3125 
3126 		break;
3127 	}
3128 	case DIOCADDRULE: {
3129 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
3130 		struct pf_krule		*rule;
3131 
3132 		rule = pf_krule_alloc();
3133 		error = pf_rule_to_krule(&pr->rule, rule);
3134 		if (error != 0) {
3135 			pf_krule_free(rule);
3136 			break;
3137 		}
3138 
3139 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
3140 
3141 		/* Frees rule on error */
3142 		error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket,
3143 		    pr->anchor, pr->anchor_call, td->td_ucred->cr_ruid,
3144 		    td->td_proc ? td->td_proc->p_pid : 0);
3145 		break;
3146 	}
3147 
3148 	case DIOCGETRULES: {
3149 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
3150 
3151 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
3152 
3153 		error = pf_ioctl_getrules(pr);
3154 
3155 		break;
3156 	}
3157 
3158 	case DIOCGETRULENV: {
3159 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3160 		nvlist_t		*nvrule = NULL;
3161 		nvlist_t		*nvl = NULL;
3162 		struct pf_kruleset	*ruleset;
3163 		struct pf_krule		*rule;
3164 		void			*nvlpacked = NULL;
3165 		int			 rs_num, nr;
3166 		bool			 clear_counter = false;
3167 
3168 #define	ERROUT(x)	ERROUT_IOCTL(DIOCGETRULENV_error, x)
3169 
3170 		if (nv->len > pf_ioctl_maxcount)
3171 			ERROUT(ENOMEM);
3172 
3173 		/* Copy the request in */
3174 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3175 		if (nvlpacked == NULL)
3176 			ERROUT(ENOMEM);
3177 
3178 		error = copyin(nv->data, nvlpacked, nv->len);
3179 		if (error)
3180 			ERROUT(error);
3181 
3182 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3183 		if (nvl == NULL)
3184 			ERROUT(EBADMSG);
3185 
3186 		if (! nvlist_exists_string(nvl, "anchor"))
3187 			ERROUT(EBADMSG);
3188 		if (! nvlist_exists_number(nvl, "ruleset"))
3189 			ERROUT(EBADMSG);
3190 		if (! nvlist_exists_number(nvl, "ticket"))
3191 			ERROUT(EBADMSG);
3192 		if (! nvlist_exists_number(nvl, "nr"))
3193 			ERROUT(EBADMSG);
3194 
3195 		if (nvlist_exists_bool(nvl, "clear_counter"))
3196 			clear_counter = nvlist_get_bool(nvl, "clear_counter");
3197 
3198 		if (clear_counter && !(flags & FWRITE))
3199 			ERROUT(EACCES);
3200 
3201 		nr = nvlist_get_number(nvl, "nr");
3202 
3203 		PF_RULES_WLOCK();
3204 		ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor"));
3205 		if (ruleset == NULL) {
3206 			PF_RULES_WUNLOCK();
3207 			ERROUT(ENOENT);
3208 		}
3209 
3210 		rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset"));
3211 		if (rs_num >= PF_RULESET_MAX) {
3212 			PF_RULES_WUNLOCK();
3213 			ERROUT(EINVAL);
3214 		}
3215 
3216 		if (nvlist_get_number(nvl, "ticket") !=
3217 		    ruleset->rules[rs_num].active.ticket) {
3218 			PF_RULES_WUNLOCK();
3219 			ERROUT(EBUSY);
3220 		}
3221 
3222 		if ((error = nvlist_error(nvl))) {
3223 			PF_RULES_WUNLOCK();
3224 			ERROUT(error);
3225 		}
3226 
3227 		rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3228 		while ((rule != NULL) && (rule->nr != nr))
3229 			rule = TAILQ_NEXT(rule, entries);
3230 		if (rule == NULL) {
3231 			PF_RULES_WUNLOCK();
3232 			ERROUT(EBUSY);
3233 		}
3234 
3235 		nvrule = pf_krule_to_nvrule(rule);
3236 
3237 		nvlist_destroy(nvl);
3238 		nvl = nvlist_create(0);
3239 		if (nvl == NULL) {
3240 			PF_RULES_WUNLOCK();
3241 			ERROUT(ENOMEM);
3242 		}
3243 		nvlist_add_number(nvl, "nr", nr);
3244 		nvlist_add_nvlist(nvl, "rule", nvrule);
3245 		nvlist_destroy(nvrule);
3246 		nvrule = NULL;
3247 		if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) {
3248 			PF_RULES_WUNLOCK();
3249 			ERROUT(EBUSY);
3250 		}
3251 
3252 		free(nvlpacked, M_NVLIST);
3253 		nvlpacked = nvlist_pack(nvl, &nv->len);
3254 		if (nvlpacked == NULL) {
3255 			PF_RULES_WUNLOCK();
3256 			ERROUT(ENOMEM);
3257 		}
3258 
3259 		if (nv->size == 0) {
3260 			PF_RULES_WUNLOCK();
3261 			ERROUT(0);
3262 		}
3263 		else if (nv->size < nv->len) {
3264 			PF_RULES_WUNLOCK();
3265 			ERROUT(ENOSPC);
3266 		}
3267 
3268 		if (clear_counter) {
3269 			pf_counter_u64_zero(&rule->evaluations);
3270 			for (int i = 0; i < 2; i++) {
3271 				pf_counter_u64_zero(&rule->packets[i]);
3272 				pf_counter_u64_zero(&rule->bytes[i]);
3273 			}
3274 			counter_u64_zero(rule->states_tot);
3275 		}
3276 		PF_RULES_WUNLOCK();
3277 
3278 		error = copyout(nvlpacked, nv->data, nv->len);
3279 
3280 #undef ERROUT
3281 DIOCGETRULENV_error:
3282 		free(nvlpacked, M_NVLIST);
3283 		nvlist_destroy(nvrule);
3284 		nvlist_destroy(nvl);
3285 
3286 		break;
3287 	}
3288 
3289 	case DIOCCHANGERULE: {
3290 		struct pfioc_rule	*pcr = (struct pfioc_rule *)addr;
3291 		struct pf_kruleset	*ruleset;
3292 		struct pf_krule		*oldrule = NULL, *newrule = NULL;
3293 		struct pfi_kkif		*kif = NULL;
3294 		struct pf_kpooladdr	*pa;
3295 		u_int32_t		 nr = 0;
3296 		int			 rs_num;
3297 
3298 		pcr->anchor[sizeof(pcr->anchor) - 1] = 0;
3299 
3300 		if (pcr->action < PF_CHANGE_ADD_HEAD ||
3301 		    pcr->action > PF_CHANGE_GET_TICKET) {
3302 			error = EINVAL;
3303 			break;
3304 		}
3305 		if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3306 			error = EINVAL;
3307 			break;
3308 		}
3309 
3310 		if (pcr->action != PF_CHANGE_REMOVE) {
3311 			newrule = pf_krule_alloc();
3312 			error = pf_rule_to_krule(&pcr->rule, newrule);
3313 			if (error != 0) {
3314 				pf_krule_free(newrule);
3315 				break;
3316 			}
3317 
3318 			if (newrule->ifname[0])
3319 				kif = pf_kkif_create(M_WAITOK);
3320 			pf_counter_u64_init(&newrule->evaluations, M_WAITOK);
3321 			for (int i = 0; i < 2; i++) {
3322 				pf_counter_u64_init(&newrule->packets[i], M_WAITOK);
3323 				pf_counter_u64_init(&newrule->bytes[i], M_WAITOK);
3324 			}
3325 			newrule->states_cur = counter_u64_alloc(M_WAITOK);
3326 			newrule->states_tot = counter_u64_alloc(M_WAITOK);
3327 			newrule->src_nodes = counter_u64_alloc(M_WAITOK);
3328 			newrule->cuid = td->td_ucred->cr_ruid;
3329 			newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
3330 			TAILQ_INIT(&newrule->rpool.list);
3331 		}
3332 #define	ERROUT(x)	ERROUT_IOCTL(DIOCCHANGERULE_error, x)
3333 
3334 		PF_CONFIG_LOCK();
3335 		PF_RULES_WLOCK();
3336 #ifdef PF_WANT_32_TO_64_COUNTER
3337 		if (newrule != NULL) {
3338 			LIST_INSERT_HEAD(&V_pf_allrulelist, newrule, allrulelist);
3339 			newrule->allrulelinked = true;
3340 			V_pf_allrulecount++;
3341 		}
3342 #endif
3343 
3344 		if (!(pcr->action == PF_CHANGE_REMOVE ||
3345 		    pcr->action == PF_CHANGE_GET_TICKET) &&
3346 		    pcr->pool_ticket != V_ticket_pabuf)
3347 			ERROUT(EBUSY);
3348 
3349 		ruleset = pf_find_kruleset(pcr->anchor);
3350 		if (ruleset == NULL)
3351 			ERROUT(EINVAL);
3352 
3353 		rs_num = pf_get_ruleset_number(pcr->rule.action);
3354 		if (rs_num >= PF_RULESET_MAX)
3355 			ERROUT(EINVAL);
3356 
3357 		/*
3358 		 * XXXMJG: there is no guarantee that the ruleset was
3359 		 * created by the usual route of calling DIOCXBEGIN.
3360 		 * As a result it is possible the rule tree will not
3361 		 * be allocated yet. Hack around it by doing it here.
3362 		 * Note it is fine to let the tree persist in case of
3363 		 * error as it will be freed down the road on future
3364 		 * updates (if need be).
3365 		 */
3366 		if (ruleset->rules[rs_num].active.tree == NULL) {
3367 			ruleset->rules[rs_num].active.tree = pf_rule_tree_alloc(M_NOWAIT);
3368 			if (ruleset->rules[rs_num].active.tree == NULL) {
3369 				ERROUT(ENOMEM);
3370 			}
3371 		}
3372 
3373 		if (pcr->action == PF_CHANGE_GET_TICKET) {
3374 			pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
3375 			ERROUT(0);
3376 		} else if (pcr->ticket !=
3377 			    ruleset->rules[rs_num].active.ticket)
3378 				ERROUT(EINVAL);
3379 
3380 		if (pcr->action != PF_CHANGE_REMOVE) {
3381 			if (newrule->ifname[0]) {
3382 				newrule->kif = pfi_kkif_attach(kif,
3383 				    newrule->ifname);
3384 				kif = NULL;
3385 				pfi_kkif_ref(newrule->kif);
3386 			} else
3387 				newrule->kif = NULL;
3388 
3389 			if (newrule->rtableid > 0 &&
3390 			    newrule->rtableid >= rt_numfibs)
3391 				error = EBUSY;
3392 
3393 #ifdef ALTQ
3394 			/* set queue IDs */
3395 			if (newrule->qname[0] != 0) {
3396 				if ((newrule->qid =
3397 				    pf_qname2qid(newrule->qname)) == 0)
3398 					error = EBUSY;
3399 				else if (newrule->pqname[0] != 0) {
3400 					if ((newrule->pqid =
3401 					    pf_qname2qid(newrule->pqname)) == 0)
3402 						error = EBUSY;
3403 				} else
3404 					newrule->pqid = newrule->qid;
3405 			}
3406 #endif /* ALTQ */
3407 			if (newrule->tagname[0])
3408 				if ((newrule->tag =
3409 				    pf_tagname2tag(newrule->tagname)) == 0)
3410 					error = EBUSY;
3411 			if (newrule->match_tagname[0])
3412 				if ((newrule->match_tag = pf_tagname2tag(
3413 				    newrule->match_tagname)) == 0)
3414 					error = EBUSY;
3415 			if (newrule->rt && !newrule->direction)
3416 				error = EINVAL;
3417 			if (!newrule->log)
3418 				newrule->logif = 0;
3419 			if (newrule->logif >= PFLOGIFS_MAX)
3420 				error = EINVAL;
3421 			if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
3422 				error = ENOMEM;
3423 			if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
3424 				error = ENOMEM;
3425 			if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call))
3426 				error = EINVAL;
3427 			TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
3428 				if (pa->addr.type == PF_ADDR_TABLE) {
3429 					pa->addr.p.tbl =
3430 					    pfr_attach_table(ruleset,
3431 					    pa->addr.v.tblname);
3432 					if (pa->addr.p.tbl == NULL)
3433 						error = ENOMEM;
3434 				}
3435 
3436 			newrule->overload_tbl = NULL;
3437 			if (newrule->overload_tblname[0]) {
3438 				if ((newrule->overload_tbl = pfr_attach_table(
3439 				    ruleset, newrule->overload_tblname)) ==
3440 				    NULL)
3441 					error = EINVAL;
3442 				else
3443 					newrule->overload_tbl->pfrkt_flags |=
3444 					    PFR_TFLAG_ACTIVE;
3445 			}
3446 
3447 			pf_mv_kpool(&V_pf_pabuf, &newrule->rpool.list);
3448 			if (((((newrule->action == PF_NAT) ||
3449 			    (newrule->action == PF_RDR) ||
3450 			    (newrule->action == PF_BINAT) ||
3451 			    (newrule->rt > PF_NOPFROUTE)) &&
3452 			    !newrule->anchor)) &&
3453 			    (TAILQ_FIRST(&newrule->rpool.list) == NULL))
3454 				error = EINVAL;
3455 
3456 			if (error) {
3457 				pf_free_rule(newrule);
3458 				PF_RULES_WUNLOCK();
3459 				PF_CONFIG_UNLOCK();
3460 				break;
3461 			}
3462 
3463 			newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
3464 		}
3465 		pf_empty_kpool(&V_pf_pabuf);
3466 
3467 		if (pcr->action == PF_CHANGE_ADD_HEAD)
3468 			oldrule = TAILQ_FIRST(
3469 			    ruleset->rules[rs_num].active.ptr);
3470 		else if (pcr->action == PF_CHANGE_ADD_TAIL)
3471 			oldrule = TAILQ_LAST(
3472 			    ruleset->rules[rs_num].active.ptr, pf_krulequeue);
3473 		else {
3474 			oldrule = TAILQ_FIRST(
3475 			    ruleset->rules[rs_num].active.ptr);
3476 			while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
3477 				oldrule = TAILQ_NEXT(oldrule, entries);
3478 			if (oldrule == NULL) {
3479 				if (newrule != NULL)
3480 					pf_free_rule(newrule);
3481 				PF_RULES_WUNLOCK();
3482 				PF_CONFIG_UNLOCK();
3483 				error = EINVAL;
3484 				break;
3485 			}
3486 		}
3487 
3488 		if (pcr->action == PF_CHANGE_REMOVE) {
3489 			pf_unlink_rule(ruleset->rules[rs_num].active.ptr,
3490 			    oldrule);
3491 			RB_REMOVE(pf_krule_global,
3492 			    ruleset->rules[rs_num].active.tree, oldrule);
3493 			ruleset->rules[rs_num].active.rcount--;
3494 		} else {
3495 			pf_hash_rule(newrule);
3496 			if (RB_INSERT(pf_krule_global,
3497 			    ruleset->rules[rs_num].active.tree, newrule) != NULL) {
3498 				pf_free_rule(newrule);
3499 				PF_RULES_WUNLOCK();
3500 				PF_CONFIG_UNLOCK();
3501 				error = EEXIST;
3502 				break;
3503 			}
3504 
3505 			if (oldrule == NULL)
3506 				TAILQ_INSERT_TAIL(
3507 				    ruleset->rules[rs_num].active.ptr,
3508 				    newrule, entries);
3509 			else if (pcr->action == PF_CHANGE_ADD_HEAD ||
3510 			    pcr->action == PF_CHANGE_ADD_BEFORE)
3511 				TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
3512 			else
3513 				TAILQ_INSERT_AFTER(
3514 				    ruleset->rules[rs_num].active.ptr,
3515 				    oldrule, newrule, entries);
3516 			ruleset->rules[rs_num].active.rcount++;
3517 		}
3518 
3519 		nr = 0;
3520 		TAILQ_FOREACH(oldrule,
3521 		    ruleset->rules[rs_num].active.ptr, entries)
3522 			oldrule->nr = nr++;
3523 
3524 		ruleset->rules[rs_num].active.ticket++;
3525 
3526 		pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
3527 		pf_remove_if_empty_kruleset(ruleset);
3528 
3529 		PF_RULES_WUNLOCK();
3530 		PF_CONFIG_UNLOCK();
3531 		break;
3532 
3533 #undef ERROUT
3534 DIOCCHANGERULE_error:
3535 		PF_RULES_WUNLOCK();
3536 		PF_CONFIG_UNLOCK();
3537 		pf_krule_free(newrule);
3538 		pf_kkif_free(kif);
3539 		break;
3540 	}
3541 
3542 	case DIOCCLRSTATESNV: {
3543 		error = pf_clearstates_nv((struct pfioc_nv *)addr);
3544 		break;
3545 	}
3546 
3547 	case DIOCKILLSTATESNV: {
3548 		error = pf_killstates_nv((struct pfioc_nv *)addr);
3549 		break;
3550 	}
3551 
3552 	case DIOCADDSTATE: {
3553 		struct pfioc_state		*ps = (struct pfioc_state *)addr;
3554 		struct pfsync_state_1301	*sp = &ps->state;
3555 
3556 		if (sp->timeout >= PFTM_MAX) {
3557 			error = EINVAL;
3558 			break;
3559 		}
3560 		if (V_pfsync_state_import_ptr != NULL) {
3561 			PF_RULES_RLOCK();
3562 			error = V_pfsync_state_import_ptr(
3563 			    (union pfsync_state_union *)sp, PFSYNC_SI_IOCTL,
3564 			    PFSYNC_MSG_VERSION_1301);
3565 			PF_RULES_RUNLOCK();
3566 		} else
3567 			error = EOPNOTSUPP;
3568 		break;
3569 	}
3570 
3571 	case DIOCGETSTATE: {
3572 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
3573 		struct pf_kstate	*s;
3574 
3575 		s = pf_find_state_byid(ps->state.id, ps->state.creatorid);
3576 		if (s == NULL) {
3577 			error = ENOENT;
3578 			break;
3579 		}
3580 
3581 		pfsync_state_export((union pfsync_state_union*)&ps->state,
3582 		    s, PFSYNC_MSG_VERSION_1301);
3583 		PF_STATE_UNLOCK(s);
3584 		break;
3585 	}
3586 
3587 	case DIOCGETSTATENV: {
3588 		error = pf_getstate((struct pfioc_nv *)addr);
3589 		break;
3590 	}
3591 
3592 #ifdef COMPAT_FREEBSD14
3593 	case DIOCGETSTATES: {
3594 		struct pfioc_states	*ps = (struct pfioc_states *)addr;
3595 		struct pf_kstate	*s;
3596 		struct pfsync_state_1301	*pstore, *p;
3597 		int			 i, nr;
3598 		size_t			 slice_count = 16, count;
3599 		void			*out;
3600 
3601 		if (ps->ps_len <= 0) {
3602 			nr = uma_zone_get_cur(V_pf_state_z);
3603 			ps->ps_len = sizeof(struct pfsync_state_1301) * nr;
3604 			break;
3605 		}
3606 
3607 		out = ps->ps_states;
3608 		pstore = mallocarray(slice_count,
3609 		    sizeof(struct pfsync_state_1301), M_TEMP, M_WAITOK | M_ZERO);
3610 		nr = 0;
3611 
3612 		for (i = 0; i <= pf_hashmask; i++) {
3613 			struct pf_idhash *ih = &V_pf_idhash[i];
3614 
3615 DIOCGETSTATES_retry:
3616 			p = pstore;
3617 
3618 			if (LIST_EMPTY(&ih->states))
3619 				continue;
3620 
3621 			PF_HASHROW_LOCK(ih);
3622 			count = 0;
3623 			LIST_FOREACH(s, &ih->states, entry) {
3624 				if (s->timeout == PFTM_UNLINKED)
3625 					continue;
3626 				count++;
3627 			}
3628 
3629 			if (count > slice_count) {
3630 				PF_HASHROW_UNLOCK(ih);
3631 				free(pstore, M_TEMP);
3632 				slice_count = count * 2;
3633 				pstore = mallocarray(slice_count,
3634 				    sizeof(struct pfsync_state_1301), M_TEMP,
3635 				    M_WAITOK | M_ZERO);
3636 				goto DIOCGETSTATES_retry;
3637 			}
3638 
3639 			if ((nr+count) * sizeof(*p) > ps->ps_len) {
3640 				PF_HASHROW_UNLOCK(ih);
3641 				goto DIOCGETSTATES_full;
3642 			}
3643 
3644 			LIST_FOREACH(s, &ih->states, entry) {
3645 				if (s->timeout == PFTM_UNLINKED)
3646 					continue;
3647 
3648 				pfsync_state_export((union pfsync_state_union*)p,
3649 				    s, PFSYNC_MSG_VERSION_1301);
3650 				p++;
3651 				nr++;
3652 			}
3653 			PF_HASHROW_UNLOCK(ih);
3654 			error = copyout(pstore, out,
3655 			    sizeof(struct pfsync_state_1301) * count);
3656 			if (error)
3657 				break;
3658 			out = ps->ps_states + nr;
3659 		}
3660 DIOCGETSTATES_full:
3661 		ps->ps_len = sizeof(struct pfsync_state_1301) * nr;
3662 		free(pstore, M_TEMP);
3663 
3664 		break;
3665 	}
3666 
3667 	case DIOCGETSTATESV2: {
3668 		struct pfioc_states_v2	*ps = (struct pfioc_states_v2 *)addr;
3669 		struct pf_kstate	*s;
3670 		struct pf_state_export	*pstore, *p;
3671 		int i, nr;
3672 		size_t slice_count = 16, count;
3673 		void *out;
3674 
3675 		if (ps->ps_req_version > PF_STATE_VERSION) {
3676 			error = ENOTSUP;
3677 			break;
3678 		}
3679 
3680 		if (ps->ps_len <= 0) {
3681 			nr = uma_zone_get_cur(V_pf_state_z);
3682 			ps->ps_len = sizeof(struct pf_state_export) * nr;
3683 			break;
3684 		}
3685 
3686 		out = ps->ps_states;
3687 		pstore = mallocarray(slice_count,
3688 		    sizeof(struct pf_state_export), M_TEMP, M_WAITOK | M_ZERO);
3689 		nr = 0;
3690 
3691 		for (i = 0; i <= pf_hashmask; i++) {
3692 			struct pf_idhash *ih = &V_pf_idhash[i];
3693 
3694 DIOCGETSTATESV2_retry:
3695 			p = pstore;
3696 
3697 			if (LIST_EMPTY(&ih->states))
3698 				continue;
3699 
3700 			PF_HASHROW_LOCK(ih);
3701 			count = 0;
3702 			LIST_FOREACH(s, &ih->states, entry) {
3703 				if (s->timeout == PFTM_UNLINKED)
3704 					continue;
3705 				count++;
3706 			}
3707 
3708 			if (count > slice_count) {
3709 				PF_HASHROW_UNLOCK(ih);
3710 				free(pstore, M_TEMP);
3711 				slice_count = count * 2;
3712 				pstore = mallocarray(slice_count,
3713 				    sizeof(struct pf_state_export), M_TEMP,
3714 				    M_WAITOK | M_ZERO);
3715 				goto DIOCGETSTATESV2_retry;
3716 			}
3717 
3718 			if ((nr+count) * sizeof(*p) > ps->ps_len) {
3719 				PF_HASHROW_UNLOCK(ih);
3720 				goto DIOCGETSTATESV2_full;
3721 			}
3722 
3723 			LIST_FOREACH(s, &ih->states, entry) {
3724 				if (s->timeout == PFTM_UNLINKED)
3725 					continue;
3726 
3727 				pf_state_export(p, s);
3728 				p++;
3729 				nr++;
3730 			}
3731 			PF_HASHROW_UNLOCK(ih);
3732 			error = copyout(pstore, out,
3733 			    sizeof(struct pf_state_export) * count);
3734 			if (error)
3735 				break;
3736 			out = ps->ps_states + nr;
3737 		}
3738 DIOCGETSTATESV2_full:
3739 		ps->ps_len = nr * sizeof(struct pf_state_export);
3740 		free(pstore, M_TEMP);
3741 
3742 		break;
3743 	}
3744 #endif
3745 	case DIOCGETSTATUSNV: {
3746 		error = pf_getstatus((struct pfioc_nv *)addr);
3747 		break;
3748 	}
3749 
3750 	case DIOCSETSTATUSIF: {
3751 		struct pfioc_if	*pi = (struct pfioc_if *)addr;
3752 
3753 		if (pi->ifname[0] == 0) {
3754 			bzero(V_pf_status.ifname, IFNAMSIZ);
3755 			break;
3756 		}
3757 		PF_RULES_WLOCK();
3758 		error = pf_user_strcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ);
3759 		PF_RULES_WUNLOCK();
3760 		break;
3761 	}
3762 
3763 	case DIOCCLRSTATUS: {
3764 		PF_RULES_WLOCK();
3765 		for (int i = 0; i < PFRES_MAX; i++)
3766 			counter_u64_zero(V_pf_status.counters[i]);
3767 		for (int i = 0; i < FCNT_MAX; i++)
3768 			pf_counter_u64_zero(&V_pf_status.fcounters[i]);
3769 		for (int i = 0; i < SCNT_MAX; i++)
3770 			counter_u64_zero(V_pf_status.scounters[i]);
3771 		for (int i = 0; i < KLCNT_MAX; i++)
3772 			counter_u64_zero(V_pf_status.lcounters[i]);
3773 		V_pf_status.since = time_second;
3774 		if (*V_pf_status.ifname)
3775 			pfi_update_status(V_pf_status.ifname, NULL);
3776 		PF_RULES_WUNLOCK();
3777 		break;
3778 	}
3779 
3780 	case DIOCNATLOOK: {
3781 		struct pfioc_natlook	*pnl = (struct pfioc_natlook *)addr;
3782 		struct pf_state_key	*sk;
3783 		struct pf_kstate	*state;
3784 		struct pf_state_key_cmp	 key;
3785 		int			 m = 0, direction = pnl->direction;
3786 		int			 sidx, didx;
3787 
3788 		/* NATLOOK src and dst are reversed, so reverse sidx/didx */
3789 		sidx = (direction == PF_IN) ? 1 : 0;
3790 		didx = (direction == PF_IN) ? 0 : 1;
3791 
3792 		if (!pnl->proto ||
3793 		    PF_AZERO(&pnl->saddr, pnl->af) ||
3794 		    PF_AZERO(&pnl->daddr, pnl->af) ||
3795 		    ((pnl->proto == IPPROTO_TCP ||
3796 		    pnl->proto == IPPROTO_UDP) &&
3797 		    (!pnl->dport || !pnl->sport)))
3798 			error = EINVAL;
3799 		else {
3800 			bzero(&key, sizeof(key));
3801 			key.af = pnl->af;
3802 			key.proto = pnl->proto;
3803 			PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af);
3804 			key.port[sidx] = pnl->sport;
3805 			PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af);
3806 			key.port[didx] = pnl->dport;
3807 
3808 			state = pf_find_state_all(&key, direction, &m);
3809 			if (state == NULL) {
3810 				error = ENOENT;
3811 			} else {
3812 				if (m > 1) {
3813 					PF_STATE_UNLOCK(state);
3814 					error = E2BIG;	/* more than one state */
3815 				} else {
3816 					sk = state->key[sidx];
3817 					PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af);
3818 					pnl->rsport = sk->port[sidx];
3819 					PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af);
3820 					pnl->rdport = sk->port[didx];
3821 					PF_STATE_UNLOCK(state);
3822 				}
3823 			}
3824 		}
3825 		break;
3826 	}
3827 
3828 	case DIOCSETTIMEOUT: {
3829 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
3830 		int		 old;
3831 
3832 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
3833 		    pt->seconds < 0) {
3834 			error = EINVAL;
3835 			break;
3836 		}
3837 		PF_RULES_WLOCK();
3838 		old = V_pf_default_rule.timeout[pt->timeout];
3839 		if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
3840 			pt->seconds = 1;
3841 		V_pf_default_rule.timeout[pt->timeout] = pt->seconds;
3842 		if (pt->timeout == PFTM_INTERVAL && pt->seconds < old)
3843 			wakeup(pf_purge_thread);
3844 		pt->seconds = old;
3845 		PF_RULES_WUNLOCK();
3846 		break;
3847 	}
3848 
3849 	case DIOCGETTIMEOUT: {
3850 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
3851 
3852 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
3853 			error = EINVAL;
3854 			break;
3855 		}
3856 		PF_RULES_RLOCK();
3857 		pt->seconds = V_pf_default_rule.timeout[pt->timeout];
3858 		PF_RULES_RUNLOCK();
3859 		break;
3860 	}
3861 
3862 	case DIOCGETLIMIT: {
3863 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
3864 
3865 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
3866 			error = EINVAL;
3867 			break;
3868 		}
3869 		PF_RULES_RLOCK();
3870 		pl->limit = V_pf_limits[pl->index].limit;
3871 		PF_RULES_RUNLOCK();
3872 		break;
3873 	}
3874 
3875 	case DIOCSETLIMIT: {
3876 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
3877 		int			 old_limit;
3878 
3879 		PF_RULES_WLOCK();
3880 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
3881 		    V_pf_limits[pl->index].zone == NULL) {
3882 			PF_RULES_WUNLOCK();
3883 			error = EINVAL;
3884 			break;
3885 		}
3886 		uma_zone_set_max(V_pf_limits[pl->index].zone, pl->limit);
3887 		old_limit = V_pf_limits[pl->index].limit;
3888 		V_pf_limits[pl->index].limit = pl->limit;
3889 		pl->limit = old_limit;
3890 		PF_RULES_WUNLOCK();
3891 		break;
3892 	}
3893 
3894 	case DIOCSETDEBUG: {
3895 		u_int32_t	*level = (u_int32_t *)addr;
3896 
3897 		PF_RULES_WLOCK();
3898 		V_pf_status.debug = *level;
3899 		PF_RULES_WUNLOCK();
3900 		break;
3901 	}
3902 
3903 	case DIOCCLRRULECTRS: {
3904 		/* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
3905 		struct pf_kruleset	*ruleset = &pf_main_ruleset;
3906 		struct pf_krule		*rule;
3907 
3908 		PF_RULES_WLOCK();
3909 		TAILQ_FOREACH(rule,
3910 		    ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
3911 			pf_counter_u64_zero(&rule->evaluations);
3912 			for (int i = 0; i < 2; i++) {
3913 				pf_counter_u64_zero(&rule->packets[i]);
3914 				pf_counter_u64_zero(&rule->bytes[i]);
3915 			}
3916 		}
3917 		PF_RULES_WUNLOCK();
3918 		break;
3919 	}
3920 
3921 	case DIOCGIFSPEEDV0:
3922 	case DIOCGIFSPEEDV1: {
3923 		struct pf_ifspeed_v1	*psp = (struct pf_ifspeed_v1 *)addr;
3924 		struct pf_ifspeed_v1	ps;
3925 		struct ifnet		*ifp;
3926 
3927 		if (psp->ifname[0] == '\0') {
3928 			error = EINVAL;
3929 			break;
3930 		}
3931 
3932 		error = pf_user_strcpy(ps.ifname, psp->ifname, IFNAMSIZ);
3933 		if (error != 0)
3934 			break;
3935 		ifp = ifunit(ps.ifname);
3936 		if (ifp != NULL) {
3937 			psp->baudrate32 =
3938 			    (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX);
3939 			if (cmd == DIOCGIFSPEEDV1)
3940 				psp->baudrate = ifp->if_baudrate;
3941 		} else {
3942 			error = EINVAL;
3943 		}
3944 		break;
3945 	}
3946 
3947 #ifdef ALTQ
3948 	case DIOCSTARTALTQ: {
3949 		struct pf_altq		*altq;
3950 
3951 		PF_RULES_WLOCK();
3952 		/* enable all altq interfaces on active list */
3953 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
3954 			if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
3955 				error = pf_enable_altq(altq);
3956 				if (error != 0)
3957 					break;
3958 			}
3959 		}
3960 		if (error == 0)
3961 			V_pf_altq_running = 1;
3962 		PF_RULES_WUNLOCK();
3963 		DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
3964 		break;
3965 	}
3966 
3967 	case DIOCSTOPALTQ: {
3968 		struct pf_altq		*altq;
3969 
3970 		PF_RULES_WLOCK();
3971 		/* disable all altq interfaces on active list */
3972 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
3973 			if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
3974 				error = pf_disable_altq(altq);
3975 				if (error != 0)
3976 					break;
3977 			}
3978 		}
3979 		if (error == 0)
3980 			V_pf_altq_running = 0;
3981 		PF_RULES_WUNLOCK();
3982 		DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
3983 		break;
3984 	}
3985 
3986 	case DIOCADDALTQV0:
3987 	case DIOCADDALTQV1: {
3988 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
3989 		struct pf_altq		*altq, *a;
3990 		struct ifnet		*ifp;
3991 
3992 		altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO);
3993 		error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd));
3994 		if (error)
3995 			break;
3996 		altq->local_flags = 0;
3997 
3998 		PF_RULES_WLOCK();
3999 		if (pa->ticket != V_ticket_altqs_inactive) {
4000 			PF_RULES_WUNLOCK();
4001 			free(altq, M_PFALTQ);
4002 			error = EBUSY;
4003 			break;
4004 		}
4005 
4006 		/*
4007 		 * if this is for a queue, find the discipline and
4008 		 * copy the necessary fields
4009 		 */
4010 		if (altq->qname[0] != 0) {
4011 			if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
4012 				PF_RULES_WUNLOCK();
4013 				error = EBUSY;
4014 				free(altq, M_PFALTQ);
4015 				break;
4016 			}
4017 			altq->altq_disc = NULL;
4018 			TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) {
4019 				if (strncmp(a->ifname, altq->ifname,
4020 				    IFNAMSIZ) == 0) {
4021 					altq->altq_disc = a->altq_disc;
4022 					break;
4023 				}
4024 			}
4025 		}
4026 
4027 		if ((ifp = ifunit(altq->ifname)) == NULL)
4028 			altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
4029 		else
4030 			error = altq_add(ifp, altq);
4031 
4032 		if (error) {
4033 			PF_RULES_WUNLOCK();
4034 			free(altq, M_PFALTQ);
4035 			break;
4036 		}
4037 
4038 		if (altq->qname[0] != 0)
4039 			TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries);
4040 		else
4041 			TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries);
4042 		/* version error check done on import above */
4043 		pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
4044 		PF_RULES_WUNLOCK();
4045 		break;
4046 	}
4047 
4048 	case DIOCGETALTQSV0:
4049 	case DIOCGETALTQSV1: {
4050 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4051 		struct pf_altq		*altq;
4052 
4053 		PF_RULES_RLOCK();
4054 		pa->nr = 0;
4055 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries)
4056 			pa->nr++;
4057 		TAILQ_FOREACH(altq, V_pf_altqs_active, entries)
4058 			pa->nr++;
4059 		pa->ticket = V_ticket_altqs_active;
4060 		PF_RULES_RUNLOCK();
4061 		break;
4062 	}
4063 
4064 	case DIOCGETALTQV0:
4065 	case DIOCGETALTQV1: {
4066 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4067 		struct pf_altq		*altq;
4068 
4069 		PF_RULES_RLOCK();
4070 		if (pa->ticket != V_ticket_altqs_active) {
4071 			PF_RULES_RUNLOCK();
4072 			error = EBUSY;
4073 			break;
4074 		}
4075 		altq = pf_altq_get_nth_active(pa->nr);
4076 		if (altq == NULL) {
4077 			PF_RULES_RUNLOCK();
4078 			error = EBUSY;
4079 			break;
4080 		}
4081 		pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
4082 		PF_RULES_RUNLOCK();
4083 		break;
4084 	}
4085 
4086 	case DIOCCHANGEALTQV0:
4087 	case DIOCCHANGEALTQV1:
4088 		/* CHANGEALTQ not supported yet! */
4089 		error = ENODEV;
4090 		break;
4091 
4092 	case DIOCGETQSTATSV0:
4093 	case DIOCGETQSTATSV1: {
4094 		struct pfioc_qstats_v1	*pq = (struct pfioc_qstats_v1 *)addr;
4095 		struct pf_altq		*altq;
4096 		int			 nbytes;
4097 		u_int32_t		 version;
4098 
4099 		PF_RULES_RLOCK();
4100 		if (pq->ticket != V_ticket_altqs_active) {
4101 			PF_RULES_RUNLOCK();
4102 			error = EBUSY;
4103 			break;
4104 		}
4105 		nbytes = pq->nbytes;
4106 		altq = pf_altq_get_nth_active(pq->nr);
4107 		if (altq == NULL) {
4108 			PF_RULES_RUNLOCK();
4109 			error = EBUSY;
4110 			break;
4111 		}
4112 
4113 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) {
4114 			PF_RULES_RUNLOCK();
4115 			error = ENXIO;
4116 			break;
4117 		}
4118 		PF_RULES_RUNLOCK();
4119 		if (cmd == DIOCGETQSTATSV0)
4120 			version = 0;  /* DIOCGETQSTATSV0 means stats struct v0 */
4121 		else
4122 			version = pq->version;
4123 		error = altq_getqstats(altq, pq->buf, &nbytes, version);
4124 		if (error == 0) {
4125 			pq->scheduler = altq->scheduler;
4126 			pq->nbytes = nbytes;
4127 		}
4128 		break;
4129 	}
4130 #endif /* ALTQ */
4131 
4132 	case DIOCBEGINADDRS: {
4133 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4134 
4135 		PF_RULES_WLOCK();
4136 		pf_empty_kpool(&V_pf_pabuf);
4137 		pp->ticket = ++V_ticket_pabuf;
4138 		PF_RULES_WUNLOCK();
4139 		break;
4140 	}
4141 
4142 	case DIOCADDADDR: {
4143 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4144 		struct pf_kpooladdr	*pa;
4145 		struct pfi_kkif		*kif = NULL;
4146 
4147 #ifndef INET
4148 		if (pp->af == AF_INET) {
4149 			error = EAFNOSUPPORT;
4150 			break;
4151 		}
4152 #endif /* INET */
4153 #ifndef INET6
4154 		if (pp->af == AF_INET6) {
4155 			error = EAFNOSUPPORT;
4156 			break;
4157 		}
4158 #endif /* INET6 */
4159 		if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
4160 		    pp->addr.addr.type != PF_ADDR_DYNIFTL &&
4161 		    pp->addr.addr.type != PF_ADDR_TABLE) {
4162 			error = EINVAL;
4163 			break;
4164 		}
4165 		if (pp->addr.addr.p.dyn != NULL) {
4166 			error = EINVAL;
4167 			break;
4168 		}
4169 		pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK);
4170 		error = pf_pooladdr_to_kpooladdr(&pp->addr, pa);
4171 		if (error != 0)
4172 			break;
4173 		if (pa->ifname[0])
4174 			kif = pf_kkif_create(M_WAITOK);
4175 		PF_RULES_WLOCK();
4176 		if (pp->ticket != V_ticket_pabuf) {
4177 			PF_RULES_WUNLOCK();
4178 			if (pa->ifname[0])
4179 				pf_kkif_free(kif);
4180 			free(pa, M_PFRULE);
4181 			error = EBUSY;
4182 			break;
4183 		}
4184 		if (pa->ifname[0]) {
4185 			pa->kif = pfi_kkif_attach(kif, pa->ifname);
4186 			kif = NULL;
4187 			pfi_kkif_ref(pa->kif);
4188 		} else
4189 			pa->kif = NULL;
4190 		if (pa->addr.type == PF_ADDR_DYNIFTL && ((error =
4191 		    pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) {
4192 			if (pa->ifname[0])
4193 				pfi_kkif_unref(pa->kif);
4194 			PF_RULES_WUNLOCK();
4195 			free(pa, M_PFRULE);
4196 			break;
4197 		}
4198 		TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries);
4199 		PF_RULES_WUNLOCK();
4200 		break;
4201 	}
4202 
4203 	case DIOCGETADDRS: {
4204 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4205 		struct pf_kpool		*pool;
4206 		struct pf_kpooladdr	*pa;
4207 
4208 		pp->anchor[sizeof(pp->anchor) - 1] = 0;
4209 		pp->nr = 0;
4210 
4211 		PF_RULES_RLOCK();
4212 		pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
4213 		    pp->r_num, 0, 1, 0);
4214 		if (pool == NULL) {
4215 			PF_RULES_RUNLOCK();
4216 			error = EBUSY;
4217 			break;
4218 		}
4219 		TAILQ_FOREACH(pa, &pool->list, entries)
4220 			pp->nr++;
4221 		PF_RULES_RUNLOCK();
4222 		break;
4223 	}
4224 
4225 	case DIOCGETADDR: {
4226 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4227 		struct pf_kpool		*pool;
4228 		struct pf_kpooladdr	*pa;
4229 		u_int32_t		 nr = 0;
4230 
4231 		pp->anchor[sizeof(pp->anchor) - 1] = 0;
4232 
4233 		PF_RULES_RLOCK();
4234 		pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
4235 		    pp->r_num, 0, 1, 1);
4236 		if (pool == NULL) {
4237 			PF_RULES_RUNLOCK();
4238 			error = EBUSY;
4239 			break;
4240 		}
4241 		pa = TAILQ_FIRST(&pool->list);
4242 		while ((pa != NULL) && (nr < pp->nr)) {
4243 			pa = TAILQ_NEXT(pa, entries);
4244 			nr++;
4245 		}
4246 		if (pa == NULL) {
4247 			PF_RULES_RUNLOCK();
4248 			error = EBUSY;
4249 			break;
4250 		}
4251 		pf_kpooladdr_to_pooladdr(pa, &pp->addr);
4252 		pf_addr_copyout(&pp->addr.addr);
4253 		PF_RULES_RUNLOCK();
4254 		break;
4255 	}
4256 
4257 	case DIOCCHANGEADDR: {
4258 		struct pfioc_pooladdr	*pca = (struct pfioc_pooladdr *)addr;
4259 		struct pf_kpool		*pool;
4260 		struct pf_kpooladdr	*oldpa = NULL, *newpa = NULL;
4261 		struct pf_kruleset	*ruleset;
4262 		struct pfi_kkif		*kif = NULL;
4263 
4264 		pca->anchor[sizeof(pca->anchor) - 1] = 0;
4265 
4266 		if (pca->action < PF_CHANGE_ADD_HEAD ||
4267 		    pca->action > PF_CHANGE_REMOVE) {
4268 			error = EINVAL;
4269 			break;
4270 		}
4271 		if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
4272 		    pca->addr.addr.type != PF_ADDR_DYNIFTL &&
4273 		    pca->addr.addr.type != PF_ADDR_TABLE) {
4274 			error = EINVAL;
4275 			break;
4276 		}
4277 		if (pca->addr.addr.p.dyn != NULL) {
4278 			error = EINVAL;
4279 			break;
4280 		}
4281 
4282 		if (pca->action != PF_CHANGE_REMOVE) {
4283 #ifndef INET
4284 			if (pca->af == AF_INET) {
4285 				error = EAFNOSUPPORT;
4286 				break;
4287 			}
4288 #endif /* INET */
4289 #ifndef INET6
4290 			if (pca->af == AF_INET6) {
4291 				error = EAFNOSUPPORT;
4292 				break;
4293 			}
4294 #endif /* INET6 */
4295 			newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK);
4296 			bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
4297 			if (newpa->ifname[0])
4298 				kif = pf_kkif_create(M_WAITOK);
4299 			newpa->kif = NULL;
4300 		}
4301 #define	ERROUT(x)	ERROUT_IOCTL(DIOCCHANGEADDR_error, x)
4302 		PF_RULES_WLOCK();
4303 		ruleset = pf_find_kruleset(pca->anchor);
4304 		if (ruleset == NULL)
4305 			ERROUT(EBUSY);
4306 
4307 		pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action,
4308 		    pca->r_num, pca->r_last, 1, 1);
4309 		if (pool == NULL)
4310 			ERROUT(EBUSY);
4311 
4312 		if (pca->action != PF_CHANGE_REMOVE) {
4313 			if (newpa->ifname[0]) {
4314 				newpa->kif = pfi_kkif_attach(kif, newpa->ifname);
4315 				pfi_kkif_ref(newpa->kif);
4316 				kif = NULL;
4317 			}
4318 
4319 			switch (newpa->addr.type) {
4320 			case PF_ADDR_DYNIFTL:
4321 				error = pfi_dynaddr_setup(&newpa->addr,
4322 				    pca->af);
4323 				break;
4324 			case PF_ADDR_TABLE:
4325 				newpa->addr.p.tbl = pfr_attach_table(ruleset,
4326 				    newpa->addr.v.tblname);
4327 				if (newpa->addr.p.tbl == NULL)
4328 					error = ENOMEM;
4329 				break;
4330 			}
4331 			if (error)
4332 				goto DIOCCHANGEADDR_error;
4333 		}
4334 
4335 		switch (pca->action) {
4336 		case PF_CHANGE_ADD_HEAD:
4337 			oldpa = TAILQ_FIRST(&pool->list);
4338 			break;
4339 		case PF_CHANGE_ADD_TAIL:
4340 			oldpa = TAILQ_LAST(&pool->list, pf_kpalist);
4341 			break;
4342 		default:
4343 			oldpa = TAILQ_FIRST(&pool->list);
4344 			for (int i = 0; oldpa && i < pca->nr; i++)
4345 				oldpa = TAILQ_NEXT(oldpa, entries);
4346 
4347 			if (oldpa == NULL)
4348 				ERROUT(EINVAL);
4349 		}
4350 
4351 		if (pca->action == PF_CHANGE_REMOVE) {
4352 			TAILQ_REMOVE(&pool->list, oldpa, entries);
4353 			switch (oldpa->addr.type) {
4354 			case PF_ADDR_DYNIFTL:
4355 				pfi_dynaddr_remove(oldpa->addr.p.dyn);
4356 				break;
4357 			case PF_ADDR_TABLE:
4358 				pfr_detach_table(oldpa->addr.p.tbl);
4359 				break;
4360 			}
4361 			if (oldpa->kif)
4362 				pfi_kkif_unref(oldpa->kif);
4363 			free(oldpa, M_PFRULE);
4364 		} else {
4365 			if (oldpa == NULL)
4366 				TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
4367 			else if (pca->action == PF_CHANGE_ADD_HEAD ||
4368 			    pca->action == PF_CHANGE_ADD_BEFORE)
4369 				TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
4370 			else
4371 				TAILQ_INSERT_AFTER(&pool->list, oldpa,
4372 				    newpa, entries);
4373 		}
4374 
4375 		pool->cur = TAILQ_FIRST(&pool->list);
4376 		PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af);
4377 		PF_RULES_WUNLOCK();
4378 		break;
4379 
4380 #undef ERROUT
4381 DIOCCHANGEADDR_error:
4382 		if (newpa != NULL) {
4383 			if (newpa->kif)
4384 				pfi_kkif_unref(newpa->kif);
4385 			free(newpa, M_PFRULE);
4386 		}
4387 		PF_RULES_WUNLOCK();
4388 		pf_kkif_free(kif);
4389 		break;
4390 	}
4391 
4392 	case DIOCGETRULESETS: {
4393 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
4394 		struct pf_kruleset	*ruleset;
4395 		struct pf_kanchor	*anchor;
4396 
4397 		pr->path[sizeof(pr->path) - 1] = 0;
4398 
4399 		PF_RULES_RLOCK();
4400 		if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
4401 			PF_RULES_RUNLOCK();
4402 			error = ENOENT;
4403 			break;
4404 		}
4405 		pr->nr = 0;
4406 		if (ruleset->anchor == NULL) {
4407 			/* XXX kludge for pf_main_ruleset */
4408 			RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
4409 				if (anchor->parent == NULL)
4410 					pr->nr++;
4411 		} else {
4412 			RB_FOREACH(anchor, pf_kanchor_node,
4413 			    &ruleset->anchor->children)
4414 				pr->nr++;
4415 		}
4416 		PF_RULES_RUNLOCK();
4417 		break;
4418 	}
4419 
4420 	case DIOCGETRULESET: {
4421 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
4422 		struct pf_kruleset	*ruleset;
4423 		struct pf_kanchor	*anchor;
4424 		u_int32_t		 nr = 0;
4425 
4426 		pr->path[sizeof(pr->path) - 1] = 0;
4427 
4428 		PF_RULES_RLOCK();
4429 		if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
4430 			PF_RULES_RUNLOCK();
4431 			error = ENOENT;
4432 			break;
4433 		}
4434 		pr->name[0] = 0;
4435 		if (ruleset->anchor == NULL) {
4436 			/* XXX kludge for pf_main_ruleset */
4437 			RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
4438 				if (anchor->parent == NULL && nr++ == pr->nr) {
4439 					strlcpy(pr->name, anchor->name,
4440 					    sizeof(pr->name));
4441 					break;
4442 				}
4443 		} else {
4444 			RB_FOREACH(anchor, pf_kanchor_node,
4445 			    &ruleset->anchor->children)
4446 				if (nr++ == pr->nr) {
4447 					strlcpy(pr->name, anchor->name,
4448 					    sizeof(pr->name));
4449 					break;
4450 				}
4451 		}
4452 		if (!pr->name[0])
4453 			error = EBUSY;
4454 		PF_RULES_RUNLOCK();
4455 		break;
4456 	}
4457 
4458 	case DIOCRCLRTABLES: {
4459 		struct pfioc_table *io = (struct pfioc_table *)addr;
4460 
4461 		if (io->pfrio_esize != 0) {
4462 			error = ENODEV;
4463 			break;
4464 		}
4465 		PF_RULES_WLOCK();
4466 		error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
4467 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
4468 		PF_RULES_WUNLOCK();
4469 		break;
4470 	}
4471 
4472 	case DIOCRADDTABLES: {
4473 		struct pfioc_table *io = (struct pfioc_table *)addr;
4474 		struct pfr_table *pfrts;
4475 		size_t totlen;
4476 
4477 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4478 			error = ENODEV;
4479 			break;
4480 		}
4481 
4482 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4483 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4484 			error = ENOMEM;
4485 			break;
4486 		}
4487 
4488 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4489 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4490 		    M_TEMP, M_WAITOK);
4491 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4492 		if (error) {
4493 			free(pfrts, M_TEMP);
4494 			break;
4495 		}
4496 		PF_RULES_WLOCK();
4497 		error = pfr_add_tables(pfrts, io->pfrio_size,
4498 		    &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4499 		PF_RULES_WUNLOCK();
4500 		free(pfrts, M_TEMP);
4501 		break;
4502 	}
4503 
4504 	case DIOCRDELTABLES: {
4505 		struct pfioc_table *io = (struct pfioc_table *)addr;
4506 		struct pfr_table *pfrts;
4507 		size_t totlen;
4508 
4509 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4510 			error = ENODEV;
4511 			break;
4512 		}
4513 
4514 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4515 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4516 			error = ENOMEM;
4517 			break;
4518 		}
4519 
4520 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4521 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4522 		    M_TEMP, M_WAITOK);
4523 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4524 		if (error) {
4525 			free(pfrts, M_TEMP);
4526 			break;
4527 		}
4528 		PF_RULES_WLOCK();
4529 		error = pfr_del_tables(pfrts, io->pfrio_size,
4530 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4531 		PF_RULES_WUNLOCK();
4532 		free(pfrts, M_TEMP);
4533 		break;
4534 	}
4535 
4536 	case DIOCRGETTABLES: {
4537 		struct pfioc_table *io = (struct pfioc_table *)addr;
4538 		struct pfr_table *pfrts;
4539 		size_t totlen;
4540 		int n;
4541 
4542 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4543 			error = ENODEV;
4544 			break;
4545 		}
4546 		PF_RULES_RLOCK();
4547 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4548 		if (n < 0) {
4549 			PF_RULES_RUNLOCK();
4550 			error = EINVAL;
4551 			break;
4552 		}
4553 		io->pfrio_size = min(io->pfrio_size, n);
4554 
4555 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4556 
4557 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4558 		    M_TEMP, M_NOWAIT | M_ZERO);
4559 		if (pfrts == NULL) {
4560 			error = ENOMEM;
4561 			PF_RULES_RUNLOCK();
4562 			break;
4563 		}
4564 		error = pfr_get_tables(&io->pfrio_table, pfrts,
4565 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4566 		PF_RULES_RUNLOCK();
4567 		if (error == 0)
4568 			error = copyout(pfrts, io->pfrio_buffer, totlen);
4569 		free(pfrts, M_TEMP);
4570 		break;
4571 	}
4572 
4573 	case DIOCRGETTSTATS: {
4574 		struct pfioc_table *io = (struct pfioc_table *)addr;
4575 		struct pfr_tstats *pfrtstats;
4576 		size_t totlen;
4577 		int n;
4578 
4579 		if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
4580 			error = ENODEV;
4581 			break;
4582 		}
4583 		PF_TABLE_STATS_LOCK();
4584 		PF_RULES_RLOCK();
4585 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4586 		if (n < 0) {
4587 			PF_RULES_RUNLOCK();
4588 			PF_TABLE_STATS_UNLOCK();
4589 			error = EINVAL;
4590 			break;
4591 		}
4592 		io->pfrio_size = min(io->pfrio_size, n);
4593 
4594 		totlen = io->pfrio_size * sizeof(struct pfr_tstats);
4595 		pfrtstats = mallocarray(io->pfrio_size,
4596 		    sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT | M_ZERO);
4597 		if (pfrtstats == NULL) {
4598 			error = ENOMEM;
4599 			PF_RULES_RUNLOCK();
4600 			PF_TABLE_STATS_UNLOCK();
4601 			break;
4602 		}
4603 		error = pfr_get_tstats(&io->pfrio_table, pfrtstats,
4604 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4605 		PF_RULES_RUNLOCK();
4606 		PF_TABLE_STATS_UNLOCK();
4607 		if (error == 0)
4608 			error = copyout(pfrtstats, io->pfrio_buffer, totlen);
4609 		free(pfrtstats, M_TEMP);
4610 		break;
4611 	}
4612 
4613 	case DIOCRCLRTSTATS: {
4614 		struct pfioc_table *io = (struct pfioc_table *)addr;
4615 		struct pfr_table *pfrts;
4616 		size_t totlen;
4617 
4618 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4619 			error = ENODEV;
4620 			break;
4621 		}
4622 
4623 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4624 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4625 			/* We used to count tables and use the minimum required
4626 			 * size, so we didn't fail on overly large requests.
4627 			 * Keep doing so. */
4628 			io->pfrio_size = pf_ioctl_maxcount;
4629 			break;
4630 		}
4631 
4632 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4633 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4634 		    M_TEMP, M_WAITOK);
4635 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4636 		if (error) {
4637 			free(pfrts, M_TEMP);
4638 			break;
4639 		}
4640 
4641 		PF_TABLE_STATS_LOCK();
4642 		PF_RULES_RLOCK();
4643 		error = pfr_clr_tstats(pfrts, io->pfrio_size,
4644 		    &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4645 		PF_RULES_RUNLOCK();
4646 		PF_TABLE_STATS_UNLOCK();
4647 		free(pfrts, M_TEMP);
4648 		break;
4649 	}
4650 
4651 	case DIOCRSETTFLAGS: {
4652 		struct pfioc_table *io = (struct pfioc_table *)addr;
4653 		struct pfr_table *pfrts;
4654 		size_t totlen;
4655 		int n;
4656 
4657 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4658 			error = ENODEV;
4659 			break;
4660 		}
4661 
4662 		PF_RULES_RLOCK();
4663 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4664 		if (n < 0) {
4665 			PF_RULES_RUNLOCK();
4666 			error = EINVAL;
4667 			break;
4668 		}
4669 
4670 		io->pfrio_size = min(io->pfrio_size, n);
4671 		PF_RULES_RUNLOCK();
4672 
4673 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4674 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4675 		    M_TEMP, M_WAITOK);
4676 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4677 		if (error) {
4678 			free(pfrts, M_TEMP);
4679 			break;
4680 		}
4681 		PF_RULES_WLOCK();
4682 		error = pfr_set_tflags(pfrts, io->pfrio_size,
4683 		    io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
4684 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4685 		PF_RULES_WUNLOCK();
4686 		free(pfrts, M_TEMP);
4687 		break;
4688 	}
4689 
4690 	case DIOCRCLRADDRS: {
4691 		struct pfioc_table *io = (struct pfioc_table *)addr;
4692 
4693 		if (io->pfrio_esize != 0) {
4694 			error = ENODEV;
4695 			break;
4696 		}
4697 		PF_RULES_WLOCK();
4698 		error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
4699 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
4700 		PF_RULES_WUNLOCK();
4701 		break;
4702 	}
4703 
4704 	case DIOCRADDADDRS: {
4705 		struct pfioc_table *io = (struct pfioc_table *)addr;
4706 		struct pfr_addr *pfras;
4707 		size_t totlen;
4708 
4709 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4710 			error = ENODEV;
4711 			break;
4712 		}
4713 		if (io->pfrio_size < 0 ||
4714 		    io->pfrio_size > pf_ioctl_maxcount ||
4715 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4716 			error = EINVAL;
4717 			break;
4718 		}
4719 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4720 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4721 		    M_TEMP, M_WAITOK);
4722 		error = copyin(io->pfrio_buffer, pfras, totlen);
4723 		if (error) {
4724 			free(pfras, M_TEMP);
4725 			break;
4726 		}
4727 		PF_RULES_WLOCK();
4728 		error = pfr_add_addrs(&io->pfrio_table, pfras,
4729 		    io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
4730 		    PFR_FLAG_USERIOCTL);
4731 		PF_RULES_WUNLOCK();
4732 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4733 			error = copyout(pfras, io->pfrio_buffer, totlen);
4734 		free(pfras, M_TEMP);
4735 		break;
4736 	}
4737 
4738 	case DIOCRDELADDRS: {
4739 		struct pfioc_table *io = (struct pfioc_table *)addr;
4740 		struct pfr_addr *pfras;
4741 		size_t totlen;
4742 
4743 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4744 			error = ENODEV;
4745 			break;
4746 		}
4747 		if (io->pfrio_size < 0 ||
4748 		    io->pfrio_size > pf_ioctl_maxcount ||
4749 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4750 			error = EINVAL;
4751 			break;
4752 		}
4753 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4754 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4755 		    M_TEMP, M_WAITOK);
4756 		error = copyin(io->pfrio_buffer, pfras, totlen);
4757 		if (error) {
4758 			free(pfras, M_TEMP);
4759 			break;
4760 		}
4761 		PF_RULES_WLOCK();
4762 		error = pfr_del_addrs(&io->pfrio_table, pfras,
4763 		    io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
4764 		    PFR_FLAG_USERIOCTL);
4765 		PF_RULES_WUNLOCK();
4766 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4767 			error = copyout(pfras, io->pfrio_buffer, totlen);
4768 		free(pfras, M_TEMP);
4769 		break;
4770 	}
4771 
4772 	case DIOCRSETADDRS: {
4773 		struct pfioc_table *io = (struct pfioc_table *)addr;
4774 		struct pfr_addr *pfras;
4775 		size_t totlen, count;
4776 
4777 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4778 			error = ENODEV;
4779 			break;
4780 		}
4781 		if (io->pfrio_size < 0 || io->pfrio_size2 < 0) {
4782 			error = EINVAL;
4783 			break;
4784 		}
4785 		count = max(io->pfrio_size, io->pfrio_size2);
4786 		if (count > pf_ioctl_maxcount ||
4787 		    WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) {
4788 			error = EINVAL;
4789 			break;
4790 		}
4791 		totlen = count * sizeof(struct pfr_addr);
4792 		pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP,
4793 		    M_WAITOK);
4794 		error = copyin(io->pfrio_buffer, pfras, totlen);
4795 		if (error) {
4796 			free(pfras, M_TEMP);
4797 			break;
4798 		}
4799 		PF_RULES_WLOCK();
4800 		error = pfr_set_addrs(&io->pfrio_table, pfras,
4801 		    io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
4802 		    &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
4803 		    PFR_FLAG_USERIOCTL, 0);
4804 		PF_RULES_WUNLOCK();
4805 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4806 			error = copyout(pfras, io->pfrio_buffer, totlen);
4807 		free(pfras, M_TEMP);
4808 		break;
4809 	}
4810 
4811 	case DIOCRGETADDRS: {
4812 		struct pfioc_table *io = (struct pfioc_table *)addr;
4813 		struct pfr_addr *pfras;
4814 		size_t totlen;
4815 
4816 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4817 			error = ENODEV;
4818 			break;
4819 		}
4820 		if (io->pfrio_size < 0 ||
4821 		    io->pfrio_size > pf_ioctl_maxcount ||
4822 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4823 			error = EINVAL;
4824 			break;
4825 		}
4826 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4827 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4828 		    M_TEMP, M_WAITOK | M_ZERO);
4829 		PF_RULES_RLOCK();
4830 		error = pfr_get_addrs(&io->pfrio_table, pfras,
4831 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4832 		PF_RULES_RUNLOCK();
4833 		if (error == 0)
4834 			error = copyout(pfras, io->pfrio_buffer, totlen);
4835 		free(pfras, M_TEMP);
4836 		break;
4837 	}
4838 
4839 	case DIOCRGETASTATS: {
4840 		struct pfioc_table *io = (struct pfioc_table *)addr;
4841 		struct pfr_astats *pfrastats;
4842 		size_t totlen;
4843 
4844 		if (io->pfrio_esize != sizeof(struct pfr_astats)) {
4845 			error = ENODEV;
4846 			break;
4847 		}
4848 		if (io->pfrio_size < 0 ||
4849 		    io->pfrio_size > pf_ioctl_maxcount ||
4850 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) {
4851 			error = EINVAL;
4852 			break;
4853 		}
4854 		totlen = io->pfrio_size * sizeof(struct pfr_astats);
4855 		pfrastats = mallocarray(io->pfrio_size,
4856 		    sizeof(struct pfr_astats), M_TEMP, M_WAITOK | M_ZERO);
4857 		PF_RULES_RLOCK();
4858 		error = pfr_get_astats(&io->pfrio_table, pfrastats,
4859 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4860 		PF_RULES_RUNLOCK();
4861 		if (error == 0)
4862 			error = copyout(pfrastats, io->pfrio_buffer, totlen);
4863 		free(pfrastats, M_TEMP);
4864 		break;
4865 	}
4866 
4867 	case DIOCRCLRASTATS: {
4868 		struct pfioc_table *io = (struct pfioc_table *)addr;
4869 		struct pfr_addr *pfras;
4870 		size_t totlen;
4871 
4872 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4873 			error = ENODEV;
4874 			break;
4875 		}
4876 		if (io->pfrio_size < 0 ||
4877 		    io->pfrio_size > pf_ioctl_maxcount ||
4878 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4879 			error = EINVAL;
4880 			break;
4881 		}
4882 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4883 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4884 		    M_TEMP, M_WAITOK);
4885 		error = copyin(io->pfrio_buffer, pfras, totlen);
4886 		if (error) {
4887 			free(pfras, M_TEMP);
4888 			break;
4889 		}
4890 		PF_RULES_WLOCK();
4891 		error = pfr_clr_astats(&io->pfrio_table, pfras,
4892 		    io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
4893 		    PFR_FLAG_USERIOCTL);
4894 		PF_RULES_WUNLOCK();
4895 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4896 			error = copyout(pfras, io->pfrio_buffer, totlen);
4897 		free(pfras, M_TEMP);
4898 		break;
4899 	}
4900 
4901 	case DIOCRTSTADDRS: {
4902 		struct pfioc_table *io = (struct pfioc_table *)addr;
4903 		struct pfr_addr *pfras;
4904 		size_t totlen;
4905 
4906 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4907 			error = ENODEV;
4908 			break;
4909 		}
4910 		if (io->pfrio_size < 0 ||
4911 		    io->pfrio_size > pf_ioctl_maxcount ||
4912 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4913 			error = EINVAL;
4914 			break;
4915 		}
4916 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4917 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4918 		    M_TEMP, M_WAITOK);
4919 		error = copyin(io->pfrio_buffer, pfras, totlen);
4920 		if (error) {
4921 			free(pfras, M_TEMP);
4922 			break;
4923 		}
4924 		PF_RULES_RLOCK();
4925 		error = pfr_tst_addrs(&io->pfrio_table, pfras,
4926 		    io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
4927 		    PFR_FLAG_USERIOCTL);
4928 		PF_RULES_RUNLOCK();
4929 		if (error == 0)
4930 			error = copyout(pfras, io->pfrio_buffer, totlen);
4931 		free(pfras, M_TEMP);
4932 		break;
4933 	}
4934 
4935 	case DIOCRINADEFINE: {
4936 		struct pfioc_table *io = (struct pfioc_table *)addr;
4937 		struct pfr_addr *pfras;
4938 		size_t totlen;
4939 
4940 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4941 			error = ENODEV;
4942 			break;
4943 		}
4944 		if (io->pfrio_size < 0 ||
4945 		    io->pfrio_size > pf_ioctl_maxcount ||
4946 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4947 			error = EINVAL;
4948 			break;
4949 		}
4950 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4951 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4952 		    M_TEMP, M_WAITOK);
4953 		error = copyin(io->pfrio_buffer, pfras, totlen);
4954 		if (error) {
4955 			free(pfras, M_TEMP);
4956 			break;
4957 		}
4958 		PF_RULES_WLOCK();
4959 		error = pfr_ina_define(&io->pfrio_table, pfras,
4960 		    io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
4961 		    io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4962 		PF_RULES_WUNLOCK();
4963 		free(pfras, M_TEMP);
4964 		break;
4965 	}
4966 
4967 	case DIOCOSFPADD: {
4968 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
4969 		PF_RULES_WLOCK();
4970 		error = pf_osfp_add(io);
4971 		PF_RULES_WUNLOCK();
4972 		break;
4973 	}
4974 
4975 	case DIOCOSFPGET: {
4976 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
4977 		PF_RULES_RLOCK();
4978 		error = pf_osfp_get(io);
4979 		PF_RULES_RUNLOCK();
4980 		break;
4981 	}
4982 
4983 	case DIOCXBEGIN: {
4984 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
4985 		struct pfioc_trans_e	*ioes, *ioe;
4986 		size_t			 totlen;
4987 		int			 i;
4988 
4989 		if (io->esize != sizeof(*ioe)) {
4990 			error = ENODEV;
4991 			break;
4992 		}
4993 		if (io->size < 0 ||
4994 		    io->size > pf_ioctl_maxcount ||
4995 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
4996 			error = EINVAL;
4997 			break;
4998 		}
4999 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5000 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5001 		    M_TEMP, M_WAITOK);
5002 		error = copyin(io->array, ioes, totlen);
5003 		if (error) {
5004 			free(ioes, M_TEMP);
5005 			break;
5006 		}
5007 		/* Ensure there's no more ethernet rules to clean up. */
5008 		NET_EPOCH_DRAIN_CALLBACKS();
5009 		PF_RULES_WLOCK();
5010 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5011 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
5012 			switch (ioe->rs_num) {
5013 			case PF_RULESET_ETH:
5014 				if ((error = pf_begin_eth(&ioe->ticket, ioe->anchor))) {
5015 					PF_RULES_WUNLOCK();
5016 					free(ioes, M_TEMP);
5017 					goto fail;
5018 				}
5019 				break;
5020 #ifdef ALTQ
5021 			case PF_RULESET_ALTQ:
5022 				if (ioe->anchor[0]) {
5023 					PF_RULES_WUNLOCK();
5024 					free(ioes, M_TEMP);
5025 					error = EINVAL;
5026 					goto fail;
5027 				}
5028 				if ((error = pf_begin_altq(&ioe->ticket))) {
5029 					PF_RULES_WUNLOCK();
5030 					free(ioes, M_TEMP);
5031 					goto fail;
5032 				}
5033 				break;
5034 #endif /* ALTQ */
5035 			case PF_RULESET_TABLE:
5036 			    {
5037 				struct pfr_table table;
5038 
5039 				bzero(&table, sizeof(table));
5040 				strlcpy(table.pfrt_anchor, ioe->anchor,
5041 				    sizeof(table.pfrt_anchor));
5042 				if ((error = pfr_ina_begin(&table,
5043 				    &ioe->ticket, NULL, 0))) {
5044 					PF_RULES_WUNLOCK();
5045 					free(ioes, M_TEMP);
5046 					goto fail;
5047 				}
5048 				break;
5049 			    }
5050 			default:
5051 				if ((error = pf_begin_rules(&ioe->ticket,
5052 				    ioe->rs_num, ioe->anchor))) {
5053 					PF_RULES_WUNLOCK();
5054 					free(ioes, M_TEMP);
5055 					goto fail;
5056 				}
5057 				break;
5058 			}
5059 		}
5060 		PF_RULES_WUNLOCK();
5061 		error = copyout(ioes, io->array, totlen);
5062 		free(ioes, M_TEMP);
5063 		break;
5064 	}
5065 
5066 	case DIOCXROLLBACK: {
5067 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5068 		struct pfioc_trans_e	*ioe, *ioes;
5069 		size_t			 totlen;
5070 		int			 i;
5071 
5072 		if (io->esize != sizeof(*ioe)) {
5073 			error = ENODEV;
5074 			break;
5075 		}
5076 		if (io->size < 0 ||
5077 		    io->size > pf_ioctl_maxcount ||
5078 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5079 			error = EINVAL;
5080 			break;
5081 		}
5082 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5083 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5084 		    M_TEMP, M_WAITOK);
5085 		error = copyin(io->array, ioes, totlen);
5086 		if (error) {
5087 			free(ioes, M_TEMP);
5088 			break;
5089 		}
5090 		PF_RULES_WLOCK();
5091 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5092 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
5093 			switch (ioe->rs_num) {
5094 			case PF_RULESET_ETH:
5095 				if ((error = pf_rollback_eth(ioe->ticket,
5096 				    ioe->anchor))) {
5097 					PF_RULES_WUNLOCK();
5098 					free(ioes, M_TEMP);
5099 					goto fail; /* really bad */
5100 				}
5101 				break;
5102 #ifdef ALTQ
5103 			case PF_RULESET_ALTQ:
5104 				if (ioe->anchor[0]) {
5105 					PF_RULES_WUNLOCK();
5106 					free(ioes, M_TEMP);
5107 					error = EINVAL;
5108 					goto fail;
5109 				}
5110 				if ((error = pf_rollback_altq(ioe->ticket))) {
5111 					PF_RULES_WUNLOCK();
5112 					free(ioes, M_TEMP);
5113 					goto fail; /* really bad */
5114 				}
5115 				break;
5116 #endif /* ALTQ */
5117 			case PF_RULESET_TABLE:
5118 			    {
5119 				struct pfr_table table;
5120 
5121 				bzero(&table, sizeof(table));
5122 				strlcpy(table.pfrt_anchor, ioe->anchor,
5123 				    sizeof(table.pfrt_anchor));
5124 				if ((error = pfr_ina_rollback(&table,
5125 				    ioe->ticket, NULL, 0))) {
5126 					PF_RULES_WUNLOCK();
5127 					free(ioes, M_TEMP);
5128 					goto fail; /* really bad */
5129 				}
5130 				break;
5131 			    }
5132 			default:
5133 				if ((error = pf_rollback_rules(ioe->ticket,
5134 				    ioe->rs_num, ioe->anchor))) {
5135 					PF_RULES_WUNLOCK();
5136 					free(ioes, M_TEMP);
5137 					goto fail; /* really bad */
5138 				}
5139 				break;
5140 			}
5141 		}
5142 		PF_RULES_WUNLOCK();
5143 		free(ioes, M_TEMP);
5144 		break;
5145 	}
5146 
5147 	case DIOCXCOMMIT: {
5148 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5149 		struct pfioc_trans_e	*ioe, *ioes;
5150 		struct pf_kruleset	*rs;
5151 		struct pf_keth_ruleset	*ers;
5152 		size_t			 totlen;
5153 		int			 i;
5154 
5155 		if (io->esize != sizeof(*ioe)) {
5156 			error = ENODEV;
5157 			break;
5158 		}
5159 
5160 		if (io->size < 0 ||
5161 		    io->size > pf_ioctl_maxcount ||
5162 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5163 			error = EINVAL;
5164 			break;
5165 		}
5166 
5167 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5168 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5169 		    M_TEMP, M_WAITOK);
5170 		error = copyin(io->array, ioes, totlen);
5171 		if (error) {
5172 			free(ioes, M_TEMP);
5173 			break;
5174 		}
5175 		PF_RULES_WLOCK();
5176 		/* First makes sure everything will succeed. */
5177 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5178 			ioe->anchor[sizeof(ioe->anchor) - 1] = 0;
5179 			switch (ioe->rs_num) {
5180 			case PF_RULESET_ETH:
5181 				ers = pf_find_keth_ruleset(ioe->anchor);
5182 				if (ers == NULL || ioe->ticket == 0 ||
5183 				    ioe->ticket != ers->inactive.ticket) {
5184 					PF_RULES_WUNLOCK();
5185 					free(ioes, M_TEMP);
5186 					error = EINVAL;
5187 					goto fail;
5188 				}
5189 				break;
5190 #ifdef ALTQ
5191 			case PF_RULESET_ALTQ:
5192 				if (ioe->anchor[0]) {
5193 					PF_RULES_WUNLOCK();
5194 					free(ioes, M_TEMP);
5195 					error = EINVAL;
5196 					goto fail;
5197 				}
5198 				if (!V_altqs_inactive_open || ioe->ticket !=
5199 				    V_ticket_altqs_inactive) {
5200 					PF_RULES_WUNLOCK();
5201 					free(ioes, M_TEMP);
5202 					error = EBUSY;
5203 					goto fail;
5204 				}
5205 				break;
5206 #endif /* ALTQ */
5207 			case PF_RULESET_TABLE:
5208 				rs = pf_find_kruleset(ioe->anchor);
5209 				if (rs == NULL || !rs->topen || ioe->ticket !=
5210 				    rs->tticket) {
5211 					PF_RULES_WUNLOCK();
5212 					free(ioes, M_TEMP);
5213 					error = EBUSY;
5214 					goto fail;
5215 				}
5216 				break;
5217 			default:
5218 				if (ioe->rs_num < 0 || ioe->rs_num >=
5219 				    PF_RULESET_MAX) {
5220 					PF_RULES_WUNLOCK();
5221 					free(ioes, M_TEMP);
5222 					error = EINVAL;
5223 					goto fail;
5224 				}
5225 				rs = pf_find_kruleset(ioe->anchor);
5226 				if (rs == NULL ||
5227 				    !rs->rules[ioe->rs_num].inactive.open ||
5228 				    rs->rules[ioe->rs_num].inactive.ticket !=
5229 				    ioe->ticket) {
5230 					PF_RULES_WUNLOCK();
5231 					free(ioes, M_TEMP);
5232 					error = EBUSY;
5233 					goto fail;
5234 				}
5235 				break;
5236 			}
5237 		}
5238 		/* Now do the commit - no errors should happen here. */
5239 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5240 			switch (ioe->rs_num) {
5241 			case PF_RULESET_ETH:
5242 				if ((error = pf_commit_eth(ioe->ticket, ioe->anchor))) {
5243 					PF_RULES_WUNLOCK();
5244 					free(ioes, M_TEMP);
5245 					goto fail; /* really bad */
5246 				}
5247 				break;
5248 #ifdef ALTQ
5249 			case PF_RULESET_ALTQ:
5250 				if ((error = pf_commit_altq(ioe->ticket))) {
5251 					PF_RULES_WUNLOCK();
5252 					free(ioes, M_TEMP);
5253 					goto fail; /* really bad */
5254 				}
5255 				break;
5256 #endif /* ALTQ */
5257 			case PF_RULESET_TABLE:
5258 			    {
5259 				struct pfr_table table;
5260 
5261 				bzero(&table, sizeof(table));
5262 				(void)strlcpy(table.pfrt_anchor, ioe->anchor,
5263 				    sizeof(table.pfrt_anchor));
5264 				if ((error = pfr_ina_commit(&table,
5265 				    ioe->ticket, NULL, NULL, 0))) {
5266 					PF_RULES_WUNLOCK();
5267 					free(ioes, M_TEMP);
5268 					goto fail; /* really bad */
5269 				}
5270 				break;
5271 			    }
5272 			default:
5273 				if ((error = pf_commit_rules(ioe->ticket,
5274 				    ioe->rs_num, ioe->anchor))) {
5275 					PF_RULES_WUNLOCK();
5276 					free(ioes, M_TEMP);
5277 					goto fail; /* really bad */
5278 				}
5279 				break;
5280 			}
5281 		}
5282 		PF_RULES_WUNLOCK();
5283 
5284 		/* Only hook into EtherNet taffic if we've got rules for it. */
5285 		if (! TAILQ_EMPTY(V_pf_keth->active.rules))
5286 			hook_pf_eth();
5287 		else
5288 			dehook_pf_eth();
5289 
5290 		free(ioes, M_TEMP);
5291 		break;
5292 	}
5293 
5294 	case DIOCGETSRCNODES: {
5295 		struct pfioc_src_nodes	*psn = (struct pfioc_src_nodes *)addr;
5296 		struct pf_srchash	*sh;
5297 		struct pf_ksrc_node	*n;
5298 		struct pf_src_node	*p, *pstore;
5299 		uint32_t		 i, nr = 0;
5300 
5301 		for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
5302 				i++, sh++) {
5303 			PF_HASHROW_LOCK(sh);
5304 			LIST_FOREACH(n, &sh->nodes, entry)
5305 				nr++;
5306 			PF_HASHROW_UNLOCK(sh);
5307 		}
5308 
5309 		psn->psn_len = min(psn->psn_len,
5310 		    sizeof(struct pf_src_node) * nr);
5311 
5312 		if (psn->psn_len == 0) {
5313 			psn->psn_len = sizeof(struct pf_src_node) * nr;
5314 			break;
5315 		}
5316 
5317 		nr = 0;
5318 
5319 		p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO);
5320 		for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
5321 		    i++, sh++) {
5322 		    PF_HASHROW_LOCK(sh);
5323 		    LIST_FOREACH(n, &sh->nodes, entry) {
5324 
5325 			if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
5326 				break;
5327 
5328 			pf_src_node_copy(n, p);
5329 
5330 			p++;
5331 			nr++;
5332 		    }
5333 		    PF_HASHROW_UNLOCK(sh);
5334 		}
5335 		error = copyout(pstore, psn->psn_src_nodes,
5336 		    sizeof(struct pf_src_node) * nr);
5337 		if (error) {
5338 			free(pstore, M_TEMP);
5339 			break;
5340 		}
5341 		psn->psn_len = sizeof(struct pf_src_node) * nr;
5342 		free(pstore, M_TEMP);
5343 		break;
5344 	}
5345 
5346 	case DIOCCLRSRCNODES: {
5347 		pf_clear_srcnodes(NULL);
5348 		pf_purge_expired_src_nodes();
5349 		break;
5350 	}
5351 
5352 	case DIOCKILLSRCNODES:
5353 		pf_kill_srcnodes((struct pfioc_src_node_kill *)addr);
5354 		break;
5355 
5356 #ifdef COMPAT_FREEBSD13
5357 	case DIOCKEEPCOUNTERS_FREEBSD13:
5358 #endif
5359 	case DIOCKEEPCOUNTERS:
5360 		error = pf_keepcounters((struct pfioc_nv *)addr);
5361 		break;
5362 
5363 	case DIOCGETSYNCOOKIES:
5364 		error = pf_get_syncookies((struct pfioc_nv *)addr);
5365 		break;
5366 
5367 	case DIOCSETSYNCOOKIES:
5368 		error = pf_set_syncookies((struct pfioc_nv *)addr);
5369 		break;
5370 
5371 	case DIOCSETHOSTID: {
5372 		u_int32_t	*hostid = (u_int32_t *)addr;
5373 
5374 		PF_RULES_WLOCK();
5375 		if (*hostid == 0)
5376 			V_pf_status.hostid = arc4random();
5377 		else
5378 			V_pf_status.hostid = *hostid;
5379 		PF_RULES_WUNLOCK();
5380 		break;
5381 	}
5382 
5383 	case DIOCOSFPFLUSH:
5384 		PF_RULES_WLOCK();
5385 		pf_osfp_flush();
5386 		PF_RULES_WUNLOCK();
5387 		break;
5388 
5389 	case DIOCIGETIFACES: {
5390 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5391 		struct pfi_kif *ifstore;
5392 		size_t bufsiz;
5393 
5394 		if (io->pfiio_esize != sizeof(struct pfi_kif)) {
5395 			error = ENODEV;
5396 			break;
5397 		}
5398 
5399 		if (io->pfiio_size < 0 ||
5400 		    io->pfiio_size > pf_ioctl_maxcount ||
5401 		    WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) {
5402 			error = EINVAL;
5403 			break;
5404 		}
5405 
5406 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5407 
5408 		bufsiz = io->pfiio_size * sizeof(struct pfi_kif);
5409 		ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif),
5410 		    M_TEMP, M_WAITOK | M_ZERO);
5411 
5412 		PF_RULES_RLOCK();
5413 		pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size);
5414 		PF_RULES_RUNLOCK();
5415 		error = copyout(ifstore, io->pfiio_buffer, bufsiz);
5416 		free(ifstore, M_TEMP);
5417 		break;
5418 	}
5419 
5420 	case DIOCSETIFFLAG: {
5421 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5422 
5423 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5424 
5425 		PF_RULES_WLOCK();
5426 		error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
5427 		PF_RULES_WUNLOCK();
5428 		break;
5429 	}
5430 
5431 	case DIOCCLRIFFLAG: {
5432 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5433 
5434 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5435 
5436 		PF_RULES_WLOCK();
5437 		error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
5438 		PF_RULES_WUNLOCK();
5439 		break;
5440 	}
5441 
5442 	case DIOCSETREASS: {
5443 		u_int32_t	*reass = (u_int32_t *)addr;
5444 
5445 		V_pf_status.reass = *reass & (PF_REASS_ENABLED|PF_REASS_NODF);
5446 		/* Removal of DF flag without reassembly enabled is not a
5447 		 * valid combination. Disable reassembly in such case. */
5448 		if (!(V_pf_status.reass & PF_REASS_ENABLED))
5449 			V_pf_status.reass = 0;
5450 		break;
5451 	}
5452 
5453 	default:
5454 		error = ENODEV;
5455 		break;
5456 	}
5457 fail:
5458 	CURVNET_RESTORE();
5459 
5460 #undef ERROUT_IOCTL
5461 
5462 	return (error);
5463 }
5464 
5465 void
5466 pfsync_state_export(union pfsync_state_union *sp, struct pf_kstate *st, int msg_version)
5467 {
5468 	bzero(sp, sizeof(union pfsync_state_union));
5469 
5470 	/* copy from state key */
5471 	sp->pfs_1301.key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
5472 	sp->pfs_1301.key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
5473 	sp->pfs_1301.key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
5474 	sp->pfs_1301.key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
5475 	sp->pfs_1301.key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
5476 	sp->pfs_1301.key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
5477 	sp->pfs_1301.key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
5478 	sp->pfs_1301.key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
5479 	sp->pfs_1301.proto = st->key[PF_SK_WIRE]->proto;
5480 	sp->pfs_1301.af = st->key[PF_SK_WIRE]->af;
5481 
5482 	/* copy from state */
5483 	strlcpy(sp->pfs_1301.ifname, st->kif->pfik_name, sizeof(sp->pfs_1301.ifname));
5484 	bcopy(&st->rt_addr, &sp->pfs_1301.rt_addr, sizeof(sp->pfs_1301.rt_addr));
5485 	sp->pfs_1301.creation = htonl(time_uptime - st->creation);
5486 	sp->pfs_1301.expire = pf_state_expires(st);
5487 	if (sp->pfs_1301.expire <= time_uptime)
5488 		sp->pfs_1301.expire = htonl(0);
5489 	else
5490 		sp->pfs_1301.expire = htonl(sp->pfs_1301.expire - time_uptime);
5491 
5492 	sp->pfs_1301.direction = st->direction;
5493 	sp->pfs_1301.log = st->act.log;
5494 	sp->pfs_1301.timeout = st->timeout;
5495 
5496 	switch (msg_version) {
5497 		case PFSYNC_MSG_VERSION_1301:
5498 			sp->pfs_1301.state_flags = st->state_flags;
5499 			break;
5500 		case PFSYNC_MSG_VERSION_1400:
5501 			sp->pfs_1400.state_flags = htons(st->state_flags);
5502 			sp->pfs_1400.qid = htons(st->act.qid);
5503 			sp->pfs_1400.pqid = htons(st->act.pqid);
5504 			sp->pfs_1400.dnpipe = htons(st->act.dnpipe);
5505 			sp->pfs_1400.dnrpipe = htons(st->act.dnrpipe);
5506 			sp->pfs_1400.rtableid = htonl(st->act.rtableid);
5507 			sp->pfs_1400.min_ttl = st->act.min_ttl;
5508 			sp->pfs_1400.set_tos = st->act.set_tos;
5509 			sp->pfs_1400.max_mss = htons(st->act.max_mss);
5510 			sp->pfs_1400.set_prio[0] = st->act.set_prio[0];
5511 			sp->pfs_1400.set_prio[1] = st->act.set_prio[1];
5512 			sp->pfs_1400.rt = st->rt;
5513 			if (st->rt_kif)
5514 				strlcpy(sp->pfs_1400.rt_ifname,
5515 				    st->rt_kif->pfik_name,
5516 				    sizeof(sp->pfs_1400.rt_ifname));
5517 			break;
5518 		default:
5519 			panic("%s: Unsupported pfsync_msg_version %d",
5520 			    __func__, msg_version);
5521 	}
5522 
5523 	if (st->src_node)
5524 		sp->pfs_1301.sync_flags |= PFSYNC_FLAG_SRCNODE;
5525 	if (st->nat_src_node)
5526 		sp->pfs_1301.sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5527 
5528 	sp->pfs_1301.id = st->id;
5529 	sp->pfs_1301.creatorid = st->creatorid;
5530 	pf_state_peer_hton(&st->src, &sp->pfs_1301.src);
5531 	pf_state_peer_hton(&st->dst, &sp->pfs_1301.dst);
5532 
5533 	if (st->rule.ptr == NULL)
5534 		sp->pfs_1301.rule = htonl(-1);
5535 	else
5536 		sp->pfs_1301.rule = htonl(st->rule.ptr->nr);
5537 	if (st->anchor.ptr == NULL)
5538 		sp->pfs_1301.anchor = htonl(-1);
5539 	else
5540 		sp->pfs_1301.anchor = htonl(st->anchor.ptr->nr);
5541 	if (st->nat_rule.ptr == NULL)
5542 		sp->pfs_1301.nat_rule = htonl(-1);
5543 	else
5544 		sp->pfs_1301.nat_rule = htonl(st->nat_rule.ptr->nr);
5545 
5546 	pf_state_counter_hton(st->packets[0], sp->pfs_1301.packets[0]);
5547 	pf_state_counter_hton(st->packets[1], sp->pfs_1301.packets[1]);
5548 	pf_state_counter_hton(st->bytes[0], sp->pfs_1301.bytes[0]);
5549 	pf_state_counter_hton(st->bytes[1], sp->pfs_1301.bytes[1]);
5550 }
5551 
5552 void
5553 pf_state_export(struct pf_state_export *sp, struct pf_kstate *st)
5554 {
5555 	bzero(sp, sizeof(*sp));
5556 
5557 	sp->version = PF_STATE_VERSION;
5558 
5559 	/* copy from state key */
5560 	sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
5561 	sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
5562 	sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
5563 	sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
5564 	sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
5565 	sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
5566 	sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
5567 	sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
5568 	sp->proto = st->key[PF_SK_WIRE]->proto;
5569 	sp->af = st->key[PF_SK_WIRE]->af;
5570 
5571 	/* copy from state */
5572 	strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
5573 	strlcpy(sp->orig_ifname, st->orig_kif->pfik_name,
5574 	    sizeof(sp->orig_ifname));
5575 	bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
5576 	sp->creation = htonl(time_uptime - st->creation);
5577 	sp->expire = pf_state_expires(st);
5578 	if (sp->expire <= time_uptime)
5579 		sp->expire = htonl(0);
5580 	else
5581 		sp->expire = htonl(sp->expire - time_uptime);
5582 
5583 	sp->direction = st->direction;
5584 	sp->log = st->act.log;
5585 	sp->timeout = st->timeout;
5586 	/* 8 bits for the old libpfctl, 16 bits for the new libpfctl */
5587 	sp->state_flags_compat = st->state_flags;
5588 	sp->state_flags = htons(st->state_flags);
5589 	if (st->src_node)
5590 		sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
5591 	if (st->nat_src_node)
5592 		sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5593 
5594 	sp->id = st->id;
5595 	sp->creatorid = st->creatorid;
5596 	pf_state_peer_hton(&st->src, &sp->src);
5597 	pf_state_peer_hton(&st->dst, &sp->dst);
5598 
5599 	if (st->rule.ptr == NULL)
5600 		sp->rule = htonl(-1);
5601 	else
5602 		sp->rule = htonl(st->rule.ptr->nr);
5603 	if (st->anchor.ptr == NULL)
5604 		sp->anchor = htonl(-1);
5605 	else
5606 		sp->anchor = htonl(st->anchor.ptr->nr);
5607 	if (st->nat_rule.ptr == NULL)
5608 		sp->nat_rule = htonl(-1);
5609 	else
5610 		sp->nat_rule = htonl(st->nat_rule.ptr->nr);
5611 
5612 	sp->packets[0] = st->packets[0];
5613 	sp->packets[1] = st->packets[1];
5614 	sp->bytes[0] = st->bytes[0];
5615 	sp->bytes[1] = st->bytes[1];
5616 
5617 	sp->qid = htons(st->act.qid);
5618 	sp->pqid = htons(st->act.pqid);
5619 	sp->dnpipe = htons(st->act.dnpipe);
5620 	sp->dnrpipe = htons(st->act.dnrpipe);
5621 	sp->rtableid = htonl(st->act.rtableid);
5622 	sp->min_ttl = st->act.min_ttl;
5623 	sp->set_tos = st->act.set_tos;
5624 	sp->max_mss = htons(st->act.max_mss);
5625 	sp->rt = st->rt;
5626 	if (st->rt_kif)
5627 		strlcpy(sp->rt_ifname, st->rt_kif->pfik_name,
5628 		    sizeof(sp->rt_ifname));
5629 	sp->set_prio[0] = st->act.set_prio[0];
5630 	sp->set_prio[1] = st->act.set_prio[1];
5631 
5632 }
5633 
5634 static void
5635 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
5636 {
5637 	struct pfr_ktable *kt;
5638 
5639 	KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type));
5640 
5641 	kt = aw->p.tbl;
5642 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
5643 		kt = kt->pfrkt_root;
5644 	aw->p.tbl = NULL;
5645 	aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
5646 		kt->pfrkt_cnt : -1;
5647 }
5648 
5649 static int
5650 pf_add_status_counters(nvlist_t *nvl, const char *name, counter_u64_t *counters,
5651     size_t number, char **names)
5652 {
5653 	nvlist_t        *nvc;
5654 
5655 	nvc = nvlist_create(0);
5656 	if (nvc == NULL)
5657 		return (ENOMEM);
5658 
5659 	for (int i = 0; i < number; i++) {
5660 		nvlist_append_number_array(nvc, "counters",
5661 		    counter_u64_fetch(counters[i]));
5662 		nvlist_append_string_array(nvc, "names",
5663 		    names[i]);
5664 		nvlist_append_number_array(nvc, "ids",
5665 		    i);
5666 	}
5667 	nvlist_add_nvlist(nvl, name, nvc);
5668 	nvlist_destroy(nvc);
5669 
5670 	return (0);
5671 }
5672 
5673 static int
5674 pf_getstatus(struct pfioc_nv *nv)
5675 {
5676 	nvlist_t        *nvl = NULL, *nvc = NULL;
5677 	void            *nvlpacked = NULL;
5678 	int              error;
5679 	struct pf_status s;
5680 	char *pf_reasons[PFRES_MAX+1] = PFRES_NAMES;
5681 	char *pf_lcounter[KLCNT_MAX+1] = KLCNT_NAMES;
5682 	char *pf_fcounter[FCNT_MAX+1] = FCNT_NAMES;
5683 	PF_RULES_RLOCK_TRACKER;
5684 
5685 #define ERROUT(x)      ERROUT_FUNCTION(errout, x)
5686 
5687 	PF_RULES_RLOCK();
5688 
5689 	nvl = nvlist_create(0);
5690 	if (nvl == NULL)
5691 		ERROUT(ENOMEM);
5692 
5693 	nvlist_add_bool(nvl, "running", V_pf_status.running);
5694 	nvlist_add_number(nvl, "since", V_pf_status.since);
5695 	nvlist_add_number(nvl, "debug", V_pf_status.debug);
5696 	nvlist_add_number(nvl, "hostid", V_pf_status.hostid);
5697 	nvlist_add_number(nvl, "states", V_pf_status.states);
5698 	nvlist_add_number(nvl, "src_nodes", V_pf_status.src_nodes);
5699 	nvlist_add_number(nvl, "reass", V_pf_status.reass);
5700 	nvlist_add_bool(nvl, "syncookies_active",
5701 	    V_pf_status.syncookies_active);
5702 	nvlist_add_number(nvl, "halfopen_states", V_pf_status.states_halfopen);
5703 
5704 	/* counters */
5705 	error = pf_add_status_counters(nvl, "counters", V_pf_status.counters,
5706 	    PFRES_MAX, pf_reasons);
5707 	if (error != 0)
5708 		ERROUT(error);
5709 
5710 	/* lcounters */
5711 	error = pf_add_status_counters(nvl, "lcounters", V_pf_status.lcounters,
5712 	    KLCNT_MAX, pf_lcounter);
5713 	if (error != 0)
5714 		ERROUT(error);
5715 
5716 	/* fcounters */
5717 	nvc = nvlist_create(0);
5718 	if (nvc == NULL)
5719 		ERROUT(ENOMEM);
5720 
5721 	for (int i = 0; i < FCNT_MAX; i++) {
5722 		nvlist_append_number_array(nvc, "counters",
5723 		    pf_counter_u64_fetch(&V_pf_status.fcounters[i]));
5724 		nvlist_append_string_array(nvc, "names",
5725 		    pf_fcounter[i]);
5726 		nvlist_append_number_array(nvc, "ids",
5727 		    i);
5728 	}
5729 	nvlist_add_nvlist(nvl, "fcounters", nvc);
5730 	nvlist_destroy(nvc);
5731 	nvc = NULL;
5732 
5733 	/* scounters */
5734 	error = pf_add_status_counters(nvl, "scounters", V_pf_status.scounters,
5735 	    SCNT_MAX, pf_fcounter);
5736 	if (error != 0)
5737 		ERROUT(error);
5738 
5739 	nvlist_add_string(nvl, "ifname", V_pf_status.ifname);
5740 	nvlist_add_binary(nvl, "chksum", V_pf_status.pf_chksum,
5741 	    PF_MD5_DIGEST_LENGTH);
5742 
5743 	pfi_update_status(V_pf_status.ifname, &s);
5744 
5745 	/* pcounters / bcounters */
5746 	for (int i = 0; i < 2; i++) {
5747 		for (int j = 0; j < 2; j++) {
5748 			for (int k = 0; k < 2; k++) {
5749 				nvlist_append_number_array(nvl, "pcounters",
5750 				    s.pcounters[i][j][k]);
5751 			}
5752 			nvlist_append_number_array(nvl, "bcounters",
5753 			    s.bcounters[i][j]);
5754 		}
5755 	}
5756 
5757 	nvlpacked = nvlist_pack(nvl, &nv->len);
5758 	if (nvlpacked == NULL)
5759 		ERROUT(ENOMEM);
5760 
5761 	if (nv->size == 0)
5762 		ERROUT(0);
5763 	else if (nv->size < nv->len)
5764 		ERROUT(ENOSPC);
5765 
5766 	PF_RULES_RUNLOCK();
5767 	error = copyout(nvlpacked, nv->data, nv->len);
5768 	goto done;
5769 
5770 #undef ERROUT
5771 errout:
5772 	PF_RULES_RUNLOCK();
5773 done:
5774 	free(nvlpacked, M_NVLIST);
5775 	nvlist_destroy(nvc);
5776 	nvlist_destroy(nvl);
5777 
5778 	return (error);
5779 }
5780 
5781 /*
5782  * XXX - Check for version mismatch!!!
5783  */
5784 static void
5785 pf_clear_all_states(void)
5786 {
5787 	struct pf_kstate	*s;
5788 	u_int i;
5789 
5790 	for (i = 0; i <= pf_hashmask; i++) {
5791 		struct pf_idhash *ih = &V_pf_idhash[i];
5792 relock:
5793 		PF_HASHROW_LOCK(ih);
5794 		LIST_FOREACH(s, &ih->states, entry) {
5795 			s->timeout = PFTM_PURGE;
5796 			/* Don't send out individual delete messages. */
5797 			s->state_flags |= PFSTATE_NOSYNC;
5798 			pf_unlink_state(s);
5799 			goto relock;
5800 		}
5801 		PF_HASHROW_UNLOCK(ih);
5802 	}
5803 }
5804 
5805 static int
5806 pf_clear_tables(void)
5807 {
5808 	struct pfioc_table io;
5809 	int error;
5810 
5811 	bzero(&io, sizeof(io));
5812 	io.pfrio_flags |= PFR_FLAG_ALLRSETS;
5813 
5814 	error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
5815 	    io.pfrio_flags);
5816 
5817 	return (error);
5818 }
5819 
5820 static void
5821 pf_clear_srcnodes(struct pf_ksrc_node *n)
5822 {
5823 	struct pf_kstate *s;
5824 	int i;
5825 
5826 	for (i = 0; i <= pf_hashmask; i++) {
5827 		struct pf_idhash *ih = &V_pf_idhash[i];
5828 
5829 		PF_HASHROW_LOCK(ih);
5830 		LIST_FOREACH(s, &ih->states, entry) {
5831 			if (n == NULL || n == s->src_node)
5832 				s->src_node = NULL;
5833 			if (n == NULL || n == s->nat_src_node)
5834 				s->nat_src_node = NULL;
5835 		}
5836 		PF_HASHROW_UNLOCK(ih);
5837 	}
5838 
5839 	if (n == NULL) {
5840 		struct pf_srchash *sh;
5841 
5842 		for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
5843 		    i++, sh++) {
5844 			PF_HASHROW_LOCK(sh);
5845 			LIST_FOREACH(n, &sh->nodes, entry) {
5846 				n->expire = 1;
5847 				n->states = 0;
5848 			}
5849 			PF_HASHROW_UNLOCK(sh);
5850 		}
5851 	} else {
5852 		/* XXX: hash slot should already be locked here. */
5853 		n->expire = 1;
5854 		n->states = 0;
5855 	}
5856 }
5857 
5858 static void
5859 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk)
5860 {
5861 	struct pf_ksrc_node_list	 kill;
5862 
5863 	LIST_INIT(&kill);
5864 	for (int i = 0; i <= pf_srchashmask; i++) {
5865 		struct pf_srchash *sh = &V_pf_srchash[i];
5866 		struct pf_ksrc_node *sn, *tmp;
5867 
5868 		PF_HASHROW_LOCK(sh);
5869 		LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp)
5870 			if (PF_MATCHA(psnk->psnk_src.neg,
5871 			      &psnk->psnk_src.addr.v.a.addr,
5872 			      &psnk->psnk_src.addr.v.a.mask,
5873 			      &sn->addr, sn->af) &&
5874 			    PF_MATCHA(psnk->psnk_dst.neg,
5875 			      &psnk->psnk_dst.addr.v.a.addr,
5876 			      &psnk->psnk_dst.addr.v.a.mask,
5877 			      &sn->raddr, sn->af)) {
5878 				pf_unlink_src_node(sn);
5879 				LIST_INSERT_HEAD(&kill, sn, entry);
5880 				sn->expire = 1;
5881 			}
5882 		PF_HASHROW_UNLOCK(sh);
5883 	}
5884 
5885 	for (int i = 0; i <= pf_hashmask; i++) {
5886 		struct pf_idhash *ih = &V_pf_idhash[i];
5887 		struct pf_kstate *s;
5888 
5889 		PF_HASHROW_LOCK(ih);
5890 		LIST_FOREACH(s, &ih->states, entry) {
5891 			if (s->src_node && s->src_node->expire == 1)
5892 				s->src_node = NULL;
5893 			if (s->nat_src_node && s->nat_src_node->expire == 1)
5894 				s->nat_src_node = NULL;
5895 		}
5896 		PF_HASHROW_UNLOCK(ih);
5897 	}
5898 
5899 	psnk->psnk_killed = pf_free_src_nodes(&kill);
5900 }
5901 
5902 static int
5903 pf_keepcounters(struct pfioc_nv *nv)
5904 {
5905 	nvlist_t	*nvl = NULL;
5906 	void		*nvlpacked = NULL;
5907 	int		 error = 0;
5908 
5909 #define	ERROUT(x)	ERROUT_FUNCTION(on_error, x)
5910 
5911 	if (nv->len > pf_ioctl_maxcount)
5912 		ERROUT(ENOMEM);
5913 
5914 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
5915 	if (nvlpacked == NULL)
5916 		ERROUT(ENOMEM);
5917 
5918 	error = copyin(nv->data, nvlpacked, nv->len);
5919 	if (error)
5920 		ERROUT(error);
5921 
5922 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
5923 	if (nvl == NULL)
5924 		ERROUT(EBADMSG);
5925 
5926 	if (! nvlist_exists_bool(nvl, "keep_counters"))
5927 		ERROUT(EBADMSG);
5928 
5929 	V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters");
5930 
5931 on_error:
5932 	nvlist_destroy(nvl);
5933 	free(nvlpacked, M_NVLIST);
5934 	return (error);
5935 }
5936 
5937 static unsigned int
5938 pf_clear_states(const struct pf_kstate_kill *kill)
5939 {
5940 	struct pf_state_key_cmp	 match_key;
5941 	struct pf_kstate	*s;
5942 	struct pfi_kkif	*kif;
5943 	int		 idx;
5944 	unsigned int	 killed = 0, dir;
5945 
5946 	for (unsigned int i = 0; i <= pf_hashmask; i++) {
5947 		struct pf_idhash *ih = &V_pf_idhash[i];
5948 
5949 relock_DIOCCLRSTATES:
5950 		PF_HASHROW_LOCK(ih);
5951 		LIST_FOREACH(s, &ih->states, entry) {
5952 			/* For floating states look at the original kif. */
5953 			kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
5954 
5955 			if (kill->psk_ifname[0] &&
5956 			    strcmp(kill->psk_ifname,
5957 			    kif->pfik_name))
5958 				continue;
5959 
5960 			if (kill->psk_kill_match) {
5961 				bzero(&match_key, sizeof(match_key));
5962 
5963 				if (s->direction == PF_OUT) {
5964 					dir = PF_IN;
5965 					idx = PF_SK_STACK;
5966 				} else {
5967 					dir = PF_OUT;
5968 					idx = PF_SK_WIRE;
5969 				}
5970 
5971 				match_key.af = s->key[idx]->af;
5972 				match_key.proto = s->key[idx]->proto;
5973 				PF_ACPY(&match_key.addr[0],
5974 				    &s->key[idx]->addr[1], match_key.af);
5975 				match_key.port[0] = s->key[idx]->port[1];
5976 				PF_ACPY(&match_key.addr[1],
5977 				    &s->key[idx]->addr[0], match_key.af);
5978 				match_key.port[1] = s->key[idx]->port[0];
5979 			}
5980 
5981 			/*
5982 			 * Don't send out individual
5983 			 * delete messages.
5984 			 */
5985 			s->state_flags |= PFSTATE_NOSYNC;
5986 			pf_unlink_state(s);
5987 			killed++;
5988 
5989 			if (kill->psk_kill_match)
5990 				killed += pf_kill_matching_state(&match_key,
5991 				    dir);
5992 
5993 			goto relock_DIOCCLRSTATES;
5994 		}
5995 		PF_HASHROW_UNLOCK(ih);
5996 	}
5997 
5998 	if (V_pfsync_clear_states_ptr != NULL)
5999 		V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname);
6000 
6001 	return (killed);
6002 }
6003 
6004 static void
6005 pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed)
6006 {
6007 	struct pf_kstate	*s;
6008 
6009 	if (kill->psk_pfcmp.id) {
6010 		if (kill->psk_pfcmp.creatorid == 0)
6011 			kill->psk_pfcmp.creatorid = V_pf_status.hostid;
6012 		if ((s = pf_find_state_byid(kill->psk_pfcmp.id,
6013 		    kill->psk_pfcmp.creatorid))) {
6014 			pf_unlink_state(s);
6015 			*killed = 1;
6016 		}
6017 		return;
6018 	}
6019 
6020 	for (unsigned int i = 0; i <= pf_hashmask; i++)
6021 		*killed += pf_killstates_row(kill, &V_pf_idhash[i]);
6022 
6023 	return;
6024 }
6025 
6026 static int
6027 pf_killstates_nv(struct pfioc_nv *nv)
6028 {
6029 	struct pf_kstate_kill	 kill;
6030 	nvlist_t		*nvl = NULL;
6031 	void			*nvlpacked = NULL;
6032 	int			 error = 0;
6033 	unsigned int		 killed = 0;
6034 
6035 #define ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6036 
6037 	if (nv->len > pf_ioctl_maxcount)
6038 		ERROUT(ENOMEM);
6039 
6040 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6041 	if (nvlpacked == NULL)
6042 		ERROUT(ENOMEM);
6043 
6044 	error = copyin(nv->data, nvlpacked, nv->len);
6045 	if (error)
6046 		ERROUT(error);
6047 
6048 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6049 	if (nvl == NULL)
6050 		ERROUT(EBADMSG);
6051 
6052 	error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
6053 	if (error)
6054 		ERROUT(error);
6055 
6056 	pf_killstates(&kill, &killed);
6057 
6058 	free(nvlpacked, M_NVLIST);
6059 	nvlpacked = NULL;
6060 	nvlist_destroy(nvl);
6061 	nvl = nvlist_create(0);
6062 	if (nvl == NULL)
6063 		ERROUT(ENOMEM);
6064 
6065 	nvlist_add_number(nvl, "killed", killed);
6066 
6067 	nvlpacked = nvlist_pack(nvl, &nv->len);
6068 	if (nvlpacked == NULL)
6069 		ERROUT(ENOMEM);
6070 
6071 	if (nv->size == 0)
6072 		ERROUT(0);
6073 	else if (nv->size < nv->len)
6074 		ERROUT(ENOSPC);
6075 
6076 	error = copyout(nvlpacked, nv->data, nv->len);
6077 
6078 on_error:
6079 	nvlist_destroy(nvl);
6080 	free(nvlpacked, M_NVLIST);
6081 	return (error);
6082 }
6083 
6084 static int
6085 pf_clearstates_nv(struct pfioc_nv *nv)
6086 {
6087 	struct pf_kstate_kill	 kill;
6088 	nvlist_t		*nvl = NULL;
6089 	void			*nvlpacked = NULL;
6090 	int			 error = 0;
6091 	unsigned int		 killed;
6092 
6093 #define ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6094 
6095 	if (nv->len > pf_ioctl_maxcount)
6096 		ERROUT(ENOMEM);
6097 
6098 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6099 	if (nvlpacked == NULL)
6100 		ERROUT(ENOMEM);
6101 
6102 	error = copyin(nv->data, nvlpacked, nv->len);
6103 	if (error)
6104 		ERROUT(error);
6105 
6106 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6107 	if (nvl == NULL)
6108 		ERROUT(EBADMSG);
6109 
6110 	error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
6111 	if (error)
6112 		ERROUT(error);
6113 
6114 	killed = pf_clear_states(&kill);
6115 
6116 	free(nvlpacked, M_NVLIST);
6117 	nvlpacked = NULL;
6118 	nvlist_destroy(nvl);
6119 	nvl = nvlist_create(0);
6120 	if (nvl == NULL)
6121 		ERROUT(ENOMEM);
6122 
6123 	nvlist_add_number(nvl, "killed", killed);
6124 
6125 	nvlpacked = nvlist_pack(nvl, &nv->len);
6126 	if (nvlpacked == NULL)
6127 		ERROUT(ENOMEM);
6128 
6129 	if (nv->size == 0)
6130 		ERROUT(0);
6131 	else if (nv->size < nv->len)
6132 		ERROUT(ENOSPC);
6133 
6134 	error = copyout(nvlpacked, nv->data, nv->len);
6135 
6136 #undef ERROUT
6137 on_error:
6138 	nvlist_destroy(nvl);
6139 	free(nvlpacked, M_NVLIST);
6140 	return (error);
6141 }
6142 
6143 static int
6144 pf_getstate(struct pfioc_nv *nv)
6145 {
6146 	nvlist_t		*nvl = NULL, *nvls;
6147 	void			*nvlpacked = NULL;
6148 	struct pf_kstate	*s = NULL;
6149 	int			 error = 0;
6150 	uint64_t		 id, creatorid;
6151 
6152 #define ERROUT(x)	ERROUT_FUNCTION(errout, x)
6153 
6154 	if (nv->len > pf_ioctl_maxcount)
6155 		ERROUT(ENOMEM);
6156 
6157 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6158 	if (nvlpacked == NULL)
6159 		ERROUT(ENOMEM);
6160 
6161 	error = copyin(nv->data, nvlpacked, nv->len);
6162 	if (error)
6163 		ERROUT(error);
6164 
6165 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6166 	if (nvl == NULL)
6167 		ERROUT(EBADMSG);
6168 
6169 	PFNV_CHK(pf_nvuint64(nvl, "id", &id));
6170 	PFNV_CHK(pf_nvuint64(nvl, "creatorid", &creatorid));
6171 
6172 	s = pf_find_state_byid(id, creatorid);
6173 	if (s == NULL)
6174 		ERROUT(ENOENT);
6175 
6176 	free(nvlpacked, M_NVLIST);
6177 	nvlpacked = NULL;
6178 	nvlist_destroy(nvl);
6179 	nvl = nvlist_create(0);
6180 	if (nvl == NULL)
6181 		ERROUT(ENOMEM);
6182 
6183 	nvls = pf_state_to_nvstate(s);
6184 	if (nvls == NULL)
6185 		ERROUT(ENOMEM);
6186 
6187 	nvlist_add_nvlist(nvl, "state", nvls);
6188 	nvlist_destroy(nvls);
6189 
6190 	nvlpacked = nvlist_pack(nvl, &nv->len);
6191 	if (nvlpacked == NULL)
6192 		ERROUT(ENOMEM);
6193 
6194 	if (nv->size == 0)
6195 		ERROUT(0);
6196 	else if (nv->size < nv->len)
6197 		ERROUT(ENOSPC);
6198 
6199 	error = copyout(nvlpacked, nv->data, nv->len);
6200 
6201 #undef ERROUT
6202 errout:
6203 	if (s != NULL)
6204 		PF_STATE_UNLOCK(s);
6205 	free(nvlpacked, M_NVLIST);
6206 	nvlist_destroy(nvl);
6207 	return (error);
6208 }
6209 
6210 /*
6211  * XXX - Check for version mismatch!!!
6212  */
6213 
6214 /*
6215  * Duplicate pfctl -Fa operation to get rid of as much as we can.
6216  */
6217 static int
6218 shutdown_pf(void)
6219 {
6220 	int error = 0;
6221 	u_int32_t t[5];
6222 	char nn = '\0';
6223 	struct pf_kanchor *anchor;
6224 	struct pf_keth_anchor *eth_anchor;
6225 	int rs_num;
6226 
6227 	do {
6228 		/* Unlink rules of all user defined anchors */
6229 		RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) {
6230 			/* Wildcard based anchors may not have a respective
6231 			 * explicit anchor rule or they may be left empty
6232 			 * without rules. It leads to anchor.refcnt=0, and the
6233 			 * rest of the logic does not expect it. */
6234 			if (anchor->refcnt == 0)
6235 				anchor->refcnt = 1;
6236 			for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) {
6237 				if ((error = pf_begin_rules(&t[rs_num], rs_num,
6238 				    anchor->path)) != 0) {
6239 					DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: "
6240 					    "anchor.path=%s rs_num=%d\n",
6241 					    anchor->path, rs_num));
6242 					goto error;	/* XXX: rollback? */
6243 				}
6244 			}
6245 			for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) {
6246 				error = pf_commit_rules(t[rs_num], rs_num,
6247 				    anchor->path);
6248 				MPASS(error == 0);
6249 			}
6250 		}
6251 
6252 		/* Unlink rules of all user defined ether anchors */
6253 		RB_FOREACH(eth_anchor, pf_keth_anchor_global,
6254 		    &V_pf_keth_anchors) {
6255 			/* Wildcard based anchors may not have a respective
6256 			 * explicit anchor rule or they may be left empty
6257 			 * without rules. It leads to anchor.refcnt=0, and the
6258 			 * rest of the logic does not expect it. */
6259 			if (eth_anchor->refcnt == 0)
6260 				eth_anchor->refcnt = 1;
6261 			if ((error = pf_begin_eth(&t[0], eth_anchor->path))
6262 			    != 0) {
6263 				DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: eth "
6264 				    "anchor.path=%s\n", eth_anchor->path));
6265 				goto error;
6266 			}
6267 			error = pf_commit_eth(t[0], eth_anchor->path);
6268 			MPASS(error == 0);
6269 		}
6270 
6271 		if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
6272 		    != 0) {
6273 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n"));
6274 			break;
6275 		}
6276 		if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
6277 		    != 0) {
6278 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n"));
6279 			break;		/* XXX: rollback? */
6280 		}
6281 		if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
6282 		    != 0) {
6283 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n"));
6284 			break;		/* XXX: rollback? */
6285 		}
6286 		if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
6287 		    != 0) {
6288 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n"));
6289 			break;		/* XXX: rollback? */
6290 		}
6291 		if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
6292 		    != 0) {
6293 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n"));
6294 			break;		/* XXX: rollback? */
6295 		}
6296 
6297 		error = pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
6298 		MPASS(error == 0);
6299 		error = pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
6300 		MPASS(error == 0);
6301 		error = pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
6302 		MPASS(error == 0);
6303 		error = pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
6304 		MPASS(error == 0);
6305 		error = pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
6306 		MPASS(error == 0);
6307 
6308 		if ((error = pf_clear_tables()) != 0)
6309 			break;
6310 
6311 		if ((error = pf_begin_eth(&t[0], &nn)) != 0) {
6312 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: eth\n"));
6313 			break;
6314 		}
6315 		error = pf_commit_eth(t[0], &nn);
6316 		MPASS(error == 0);
6317 
6318 #ifdef ALTQ
6319 		if ((error = pf_begin_altq(&t[0])) != 0) {
6320 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n"));
6321 			break;
6322 		}
6323 		pf_commit_altq(t[0]);
6324 #endif
6325 
6326 		pf_clear_all_states();
6327 
6328 		pf_clear_srcnodes(NULL);
6329 
6330 		/* status does not use malloced mem so no need to cleanup */
6331 		/* fingerprints and interfaces have their own cleanup code */
6332 	} while(0);
6333 
6334 error:
6335 	return (error);
6336 }
6337 
6338 static pfil_return_t
6339 pf_check_return(int chk, struct mbuf **m)
6340 {
6341 
6342 	switch (chk) {
6343 	case PF_PASS:
6344 		if (*m == NULL)
6345 			return (PFIL_CONSUMED);
6346 		else
6347 			return (PFIL_PASS);
6348 		break;
6349 	default:
6350 		if (*m != NULL) {
6351 			m_freem(*m);
6352 			*m = NULL;
6353 		}
6354 		return (PFIL_DROPPED);
6355 	}
6356 }
6357 
6358 static pfil_return_t
6359 pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
6360     void *ruleset __unused, struct inpcb *inp)
6361 {
6362 	int chk;
6363 
6364 	chk = pf_test_eth(PF_IN, flags, ifp, m, inp);
6365 
6366 	return (pf_check_return(chk, m));
6367 }
6368 
6369 static pfil_return_t
6370 pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
6371     void *ruleset __unused, struct inpcb *inp)
6372 {
6373 	int chk;
6374 
6375 	chk = pf_test_eth(PF_OUT, flags, ifp, m, inp);
6376 
6377 	return (pf_check_return(chk, m));
6378 }
6379 
6380 #ifdef INET
6381 static pfil_return_t
6382 pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
6383     void *ruleset __unused, struct inpcb *inp)
6384 {
6385 	int chk;
6386 
6387 	chk = pf_test(PF_IN, flags, ifp, m, inp, NULL);
6388 
6389 	return (pf_check_return(chk, m));
6390 }
6391 
6392 static pfil_return_t
6393 pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
6394     void *ruleset __unused,  struct inpcb *inp)
6395 {
6396 	int chk;
6397 
6398 	chk = pf_test(PF_OUT, flags, ifp, m, inp, NULL);
6399 
6400 	return (pf_check_return(chk, m));
6401 }
6402 #endif
6403 
6404 #ifdef INET6
6405 static pfil_return_t
6406 pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags,
6407     void *ruleset __unused,  struct inpcb *inp)
6408 {
6409 	int chk;
6410 
6411 	/*
6412 	 * In case of loopback traffic IPv6 uses the real interface in
6413 	 * order to support scoped addresses. In order to support stateful
6414 	 * filtering we have change this to lo0 as it is the case in IPv4.
6415 	 */
6416 	CURVNET_SET(ifp->if_vnet);
6417 	chk = pf_test6(PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp,
6418 	    m, inp, NULL);
6419 	CURVNET_RESTORE();
6420 
6421 	return (pf_check_return(chk, m));
6422 }
6423 
6424 static pfil_return_t
6425 pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags,
6426     void *ruleset __unused,  struct inpcb *inp)
6427 {
6428 	int chk;
6429 
6430 	CURVNET_SET(ifp->if_vnet);
6431 	chk = pf_test6(PF_OUT, flags, ifp, m, inp, NULL);
6432 	CURVNET_RESTORE();
6433 
6434 	return (pf_check_return(chk, m));
6435 }
6436 #endif /* INET6 */
6437 
6438 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_in_hook);
6439 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_out_hook);
6440 #define	V_pf_eth_in_hook	VNET(pf_eth_in_hook)
6441 #define	V_pf_eth_out_hook	VNET(pf_eth_out_hook)
6442 
6443 #ifdef INET
6444 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook);
6445 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook);
6446 #define	V_pf_ip4_in_hook	VNET(pf_ip4_in_hook)
6447 #define	V_pf_ip4_out_hook	VNET(pf_ip4_out_hook)
6448 #endif
6449 #ifdef INET6
6450 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook);
6451 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook);
6452 #define	V_pf_ip6_in_hook	VNET(pf_ip6_in_hook)
6453 #define	V_pf_ip6_out_hook	VNET(pf_ip6_out_hook)
6454 #endif
6455 
6456 static void
6457 hook_pf_eth(void)
6458 {
6459 	struct pfil_hook_args pha = {
6460 		.pa_version = PFIL_VERSION,
6461 		.pa_modname = "pf",
6462 		.pa_type = PFIL_TYPE_ETHERNET,
6463 	};
6464 	struct pfil_link_args pla = {
6465 		.pa_version = PFIL_VERSION,
6466 	};
6467 	int ret __diagused;
6468 
6469 	if (atomic_load_bool(&V_pf_pfil_eth_hooked))
6470 		return;
6471 
6472 	pha.pa_mbuf_chk = pf_eth_check_in;
6473 	pha.pa_flags = PFIL_IN;
6474 	pha.pa_rulname = "eth-in";
6475 	V_pf_eth_in_hook = pfil_add_hook(&pha);
6476 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6477 	pla.pa_head = V_link_pfil_head;
6478 	pla.pa_hook = V_pf_eth_in_hook;
6479 	ret = pfil_link(&pla);
6480 	MPASS(ret == 0);
6481 	pha.pa_mbuf_chk = pf_eth_check_out;
6482 	pha.pa_flags = PFIL_OUT;
6483 	pha.pa_rulname = "eth-out";
6484 	V_pf_eth_out_hook = pfil_add_hook(&pha);
6485 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6486 	pla.pa_head = V_link_pfil_head;
6487 	pla.pa_hook = V_pf_eth_out_hook;
6488 	ret = pfil_link(&pla);
6489 	MPASS(ret == 0);
6490 
6491 	atomic_store_bool(&V_pf_pfil_eth_hooked, true);
6492 }
6493 
6494 static void
6495 hook_pf(void)
6496 {
6497 	struct pfil_hook_args pha = {
6498 		.pa_version = PFIL_VERSION,
6499 		.pa_modname = "pf",
6500 	};
6501 	struct pfil_link_args pla = {
6502 		.pa_version = PFIL_VERSION,
6503 	};
6504 	int ret __diagused;
6505 
6506 	if (atomic_load_bool(&V_pf_pfil_hooked))
6507 		return;
6508 
6509 #ifdef INET
6510 	pha.pa_type = PFIL_TYPE_IP4;
6511 	pha.pa_mbuf_chk = pf_check_in;
6512 	pha.pa_flags = PFIL_IN;
6513 	pha.pa_rulname = "default-in";
6514 	V_pf_ip4_in_hook = pfil_add_hook(&pha);
6515 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6516 	pla.pa_head = V_inet_pfil_head;
6517 	pla.pa_hook = V_pf_ip4_in_hook;
6518 	ret = pfil_link(&pla);
6519 	MPASS(ret == 0);
6520 	pha.pa_mbuf_chk = pf_check_out;
6521 	pha.pa_flags = PFIL_OUT;
6522 	pha.pa_rulname = "default-out";
6523 	V_pf_ip4_out_hook = pfil_add_hook(&pha);
6524 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6525 	pla.pa_head = V_inet_pfil_head;
6526 	pla.pa_hook = V_pf_ip4_out_hook;
6527 	ret = pfil_link(&pla);
6528 	MPASS(ret == 0);
6529 	if (V_pf_filter_local) {
6530 		pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6531 		pla.pa_head = V_inet_local_pfil_head;
6532 		pla.pa_hook = V_pf_ip4_out_hook;
6533 		ret = pfil_link(&pla);
6534 		MPASS(ret == 0);
6535 	}
6536 #endif
6537 #ifdef INET6
6538 	pha.pa_type = PFIL_TYPE_IP6;
6539 	pha.pa_mbuf_chk = pf_check6_in;
6540 	pha.pa_flags = PFIL_IN;
6541 	pha.pa_rulname = "default-in6";
6542 	V_pf_ip6_in_hook = pfil_add_hook(&pha);
6543 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6544 	pla.pa_head = V_inet6_pfil_head;
6545 	pla.pa_hook = V_pf_ip6_in_hook;
6546 	ret = pfil_link(&pla);
6547 	MPASS(ret == 0);
6548 	pha.pa_mbuf_chk = pf_check6_out;
6549 	pha.pa_rulname = "default-out6";
6550 	pha.pa_flags = PFIL_OUT;
6551 	V_pf_ip6_out_hook = pfil_add_hook(&pha);
6552 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6553 	pla.pa_head = V_inet6_pfil_head;
6554 	pla.pa_hook = V_pf_ip6_out_hook;
6555 	ret = pfil_link(&pla);
6556 	MPASS(ret == 0);
6557 	if (V_pf_filter_local) {
6558 		pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6559 		pla.pa_head = V_inet6_local_pfil_head;
6560 		pla.pa_hook = V_pf_ip6_out_hook;
6561 		ret = pfil_link(&pla);
6562 		MPASS(ret == 0);
6563 	}
6564 #endif
6565 
6566 	atomic_store_bool(&V_pf_pfil_hooked, true);
6567 }
6568 
6569 static void
6570 dehook_pf_eth(void)
6571 {
6572 
6573 	if (!atomic_load_bool(&V_pf_pfil_eth_hooked))
6574 		return;
6575 
6576 	pfil_remove_hook(V_pf_eth_in_hook);
6577 	pfil_remove_hook(V_pf_eth_out_hook);
6578 
6579 	atomic_store_bool(&V_pf_pfil_eth_hooked, false);
6580 }
6581 
6582 static void
6583 dehook_pf(void)
6584 {
6585 
6586 	if (!atomic_load_bool(&V_pf_pfil_hooked))
6587 		return;
6588 
6589 #ifdef INET
6590 	pfil_remove_hook(V_pf_ip4_in_hook);
6591 	pfil_remove_hook(V_pf_ip4_out_hook);
6592 #endif
6593 #ifdef INET6
6594 	pfil_remove_hook(V_pf_ip6_in_hook);
6595 	pfil_remove_hook(V_pf_ip6_out_hook);
6596 #endif
6597 
6598 	atomic_store_bool(&V_pf_pfil_hooked, false);
6599 }
6600 
6601 static void
6602 pf_load_vnet(void)
6603 {
6604 	V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname),
6605 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
6606 
6607 	rm_init_flags(&V_pf_rules_lock, "pf rulesets", RM_RECURSE);
6608 	sx_init(&V_pf_ioctl_lock, "pf ioctl");
6609 
6610 	pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize,
6611 	    PF_RULE_TAG_HASH_SIZE_DEFAULT);
6612 #ifdef ALTQ
6613 	pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize,
6614 	    PF_QUEUE_TAG_HASH_SIZE_DEFAULT);
6615 #endif
6616 
6617 	V_pf_keth = &V_pf_main_keth_anchor.ruleset;
6618 
6619 	pfattach_vnet();
6620 	V_pf_vnet_active = 1;
6621 }
6622 
6623 static int
6624 pf_load(void)
6625 {
6626 	int error;
6627 
6628 	sx_init(&pf_end_lock, "pf end thread");
6629 
6630 	pf_mtag_initialize();
6631 
6632 	pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME);
6633 	if (pf_dev == NULL)
6634 		return (ENOMEM);
6635 
6636 	pf_end_threads = 0;
6637 	error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge");
6638 	if (error != 0)
6639 		return (error);
6640 
6641 	pfi_initialize();
6642 
6643 	return (0);
6644 }
6645 
6646 static void
6647 pf_unload_vnet(void)
6648 {
6649 	int ret __diagused;
6650 
6651 	V_pf_vnet_active = 0;
6652 	V_pf_status.running = 0;
6653 	dehook_pf();
6654 	dehook_pf_eth();
6655 
6656 	PF_RULES_WLOCK();
6657 	pf_syncookies_cleanup();
6658 	shutdown_pf();
6659 	PF_RULES_WUNLOCK();
6660 
6661 	/* Make sure we've cleaned up ethernet rules before we continue. */
6662 	NET_EPOCH_DRAIN_CALLBACKS();
6663 
6664 	ret = swi_remove(V_pf_swi_cookie);
6665 	MPASS(ret == 0);
6666 	ret = intr_event_destroy(V_pf_swi_ie);
6667 	MPASS(ret == 0);
6668 
6669 	pf_unload_vnet_purge();
6670 
6671 	pf_normalize_cleanup();
6672 	PF_RULES_WLOCK();
6673 	pfi_cleanup_vnet();
6674 	PF_RULES_WUNLOCK();
6675 	pfr_cleanup();
6676 	pf_osfp_flush();
6677 	pf_cleanup();
6678 	if (IS_DEFAULT_VNET(curvnet))
6679 		pf_mtag_cleanup();
6680 
6681 	pf_cleanup_tagset(&V_pf_tags);
6682 #ifdef ALTQ
6683 	pf_cleanup_tagset(&V_pf_qids);
6684 #endif
6685 	uma_zdestroy(V_pf_tag_z);
6686 
6687 #ifdef PF_WANT_32_TO_64_COUNTER
6688 	PF_RULES_WLOCK();
6689 	LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist);
6690 
6691 	MPASS(LIST_EMPTY(&V_pf_allkiflist));
6692 	MPASS(V_pf_allkifcount == 0);
6693 
6694 	LIST_REMOVE(&V_pf_default_rule, allrulelist);
6695 	V_pf_allrulecount--;
6696 	LIST_REMOVE(V_pf_rulemarker, allrulelist);
6697 
6698 	/*
6699 	 * There are known pf rule leaks when running the test suite.
6700 	 */
6701 #ifdef notyet
6702 	MPASS(LIST_EMPTY(&V_pf_allrulelist));
6703 	MPASS(V_pf_allrulecount == 0);
6704 #endif
6705 
6706 	PF_RULES_WUNLOCK();
6707 
6708 	free(V_pf_kifmarker, PFI_MTYPE);
6709 	free(V_pf_rulemarker, M_PFRULE);
6710 #endif
6711 
6712 	/* Free counters last as we updated them during shutdown. */
6713 	pf_counter_u64_deinit(&V_pf_default_rule.evaluations);
6714 	for (int i = 0; i < 2; i++) {
6715 		pf_counter_u64_deinit(&V_pf_default_rule.packets[i]);
6716 		pf_counter_u64_deinit(&V_pf_default_rule.bytes[i]);
6717 	}
6718 	counter_u64_free(V_pf_default_rule.states_cur);
6719 	counter_u64_free(V_pf_default_rule.states_tot);
6720 	counter_u64_free(V_pf_default_rule.src_nodes);
6721 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, V_pf_default_rule.timestamp);
6722 
6723 	for (int i = 0; i < PFRES_MAX; i++)
6724 		counter_u64_free(V_pf_status.counters[i]);
6725 	for (int i = 0; i < KLCNT_MAX; i++)
6726 		counter_u64_free(V_pf_status.lcounters[i]);
6727 	for (int i = 0; i < FCNT_MAX; i++)
6728 		pf_counter_u64_deinit(&V_pf_status.fcounters[i]);
6729 	for (int i = 0; i < SCNT_MAX; i++)
6730 		counter_u64_free(V_pf_status.scounters[i]);
6731 
6732 	rm_destroy(&V_pf_rules_lock);
6733 	sx_destroy(&V_pf_ioctl_lock);
6734 }
6735 
6736 static void
6737 pf_unload(void)
6738 {
6739 
6740 	sx_xlock(&pf_end_lock);
6741 	pf_end_threads = 1;
6742 	while (pf_end_threads < 2) {
6743 		wakeup_one(pf_purge_thread);
6744 		sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0);
6745 	}
6746 	sx_xunlock(&pf_end_lock);
6747 
6748 	pf_nl_unregister();
6749 
6750 	if (pf_dev != NULL)
6751 		destroy_dev(pf_dev);
6752 
6753 	pfi_cleanup();
6754 
6755 	sx_destroy(&pf_end_lock);
6756 }
6757 
6758 static void
6759 vnet_pf_init(void *unused __unused)
6760 {
6761 
6762 	pf_load_vnet();
6763 }
6764 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
6765     vnet_pf_init, NULL);
6766 
6767 static void
6768 vnet_pf_uninit(const void *unused __unused)
6769 {
6770 
6771 	pf_unload_vnet();
6772 }
6773 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL);
6774 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
6775     vnet_pf_uninit, NULL);
6776 
6777 static int
6778 pf_modevent(module_t mod, int type, void *data)
6779 {
6780 	int error = 0;
6781 
6782 	switch(type) {
6783 	case MOD_LOAD:
6784 		error = pf_load();
6785 		pf_nl_register();
6786 		break;
6787 	case MOD_UNLOAD:
6788 		/* Handled in SYSUNINIT(pf_unload) to ensure it's done after
6789 		 * the vnet_pf_uninit()s */
6790 		break;
6791 	default:
6792 		error = EINVAL;
6793 		break;
6794 	}
6795 
6796 	return (error);
6797 }
6798 
6799 static moduledata_t pf_mod = {
6800 	"pf",
6801 	pf_modevent,
6802 	0
6803 };
6804 
6805 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND);
6806 MODULE_DEPEND(pf, netlink, 1, 1, 1);
6807 MODULE_VERSION(pf, PF_MODVER);
6808