xref: /freebsd/sys/netpfil/pf/pf_ioctl.c (revision 9ae91f59)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2001 Daniel Hartmeier
5  * Copyright (c) 2002,2003 Henning Brauer
6  * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  *    - Redistributions of source code must retain the above copyright
14  *      notice, this list of conditions and the following disclaimer.
15  *    - Redistributions in binary form must reproduce the above
16  *      copyright notice, this list of conditions and the following
17  *      disclaimer in the documentation and/or other materials provided
18  *      with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Effort sponsored in part by the Defense Advanced Research Projects
34  * Agency (DARPA) and Air Force Research Laboratory, Air Force
35  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36  *
37  *	$OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $
38  */
39 
40 #include <sys/cdefs.h>
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
43 #include "opt_bpf.h"
44 #include "opt_pf.h"
45 
46 #include <sys/param.h>
47 #include <sys/_bitset.h>
48 #include <sys/bitset.h>
49 #include <sys/bus.h>
50 #include <sys/conf.h>
51 #include <sys/endian.h>
52 #include <sys/fcntl.h>
53 #include <sys/filio.h>
54 #include <sys/hash.h>
55 #include <sys/interrupt.h>
56 #include <sys/jail.h>
57 #include <sys/kernel.h>
58 #include <sys/kthread.h>
59 #include <sys/lock.h>
60 #include <sys/mbuf.h>
61 #include <sys/module.h>
62 #include <sys/nv.h>
63 #include <sys/proc.h>
64 #include <sys/sdt.h>
65 #include <sys/smp.h>
66 #include <sys/socket.h>
67 #include <sys/sysctl.h>
68 #include <sys/md5.h>
69 #include <sys/ucred.h>
70 
71 #include <net/if.h>
72 #include <net/if_var.h>
73 #include <net/if_private.h>
74 #include <net/vnet.h>
75 #include <net/route.h>
76 #include <net/pfil.h>
77 #include <net/pfvar.h>
78 #include <net/if_pfsync.h>
79 #include <net/if_pflog.h>
80 
81 #include <netinet/in.h>
82 #include <netinet/ip.h>
83 #include <netinet/ip_var.h>
84 #include <netinet6/ip6_var.h>
85 #include <netinet/ip_icmp.h>
86 #include <netpfil/pf/pf_nl.h>
87 #include <netpfil/pf/pf_nv.h>
88 
89 #ifdef INET6
90 #include <netinet/ip6.h>
91 #endif /* INET6 */
92 
93 #ifdef ALTQ
94 #include <net/altq/altq.h>
95 #endif
96 
97 SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int");
98 SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int");
99 SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int");
100 SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int");
101 
102 static struct pf_kpool	*pf_get_kpool(const char *, u_int32_t, u_int8_t,
103 			    u_int32_t, u_int8_t, u_int8_t, u_int8_t);
104 
105 static void		 pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *);
106 static void		 pf_empty_kpool(struct pf_kpalist *);
107 static int		 pfioctl(struct cdev *, u_long, caddr_t, int,
108 			    struct thread *);
109 static int		 pf_begin_eth(uint32_t *, const char *);
110 static void		 pf_rollback_eth_cb(struct epoch_context *);
111 static int		 pf_rollback_eth(uint32_t, const char *);
112 static int		 pf_commit_eth(uint32_t, const char *);
113 static void		 pf_free_eth_rule(struct pf_keth_rule *);
114 #ifdef ALTQ
115 static int		 pf_begin_altq(u_int32_t *);
116 static int		 pf_rollback_altq(u_int32_t);
117 static int		 pf_commit_altq(u_int32_t);
118 static int		 pf_enable_altq(struct pf_altq *);
119 static int		 pf_disable_altq(struct pf_altq *);
120 static uint16_t		 pf_qname2qid(const char *);
121 static void		 pf_qid_unref(uint16_t);
122 #endif /* ALTQ */
123 static int		 pf_begin_rules(u_int32_t *, int, const char *);
124 static int		 pf_rollback_rules(u_int32_t, int, char *);
125 static int		 pf_setup_pfsync_matching(struct pf_kruleset *);
126 static void		 pf_hash_rule_rolling(MD5_CTX *, struct pf_krule *);
127 static void		 pf_hash_rule(struct pf_krule *);
128 static void		 pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
129 static int		 pf_commit_rules(u_int32_t, int, char *);
130 static int		 pf_addr_setup(struct pf_kruleset *,
131 			    struct pf_addr_wrap *, sa_family_t);
132 static void		 pf_addr_copyout(struct pf_addr_wrap *);
133 static void		 pf_src_node_copy(const struct pf_ksrc_node *,
134 			    struct pf_src_node *);
135 #ifdef ALTQ
136 static int		 pf_export_kaltq(struct pf_altq *,
137 			    struct pfioc_altq_v1 *, size_t);
138 static int		 pf_import_kaltq(struct pfioc_altq_v1 *,
139 			    struct pf_altq *, size_t);
140 #endif /* ALTQ */
141 
142 VNET_DEFINE(struct pf_krule,	pf_default_rule);
143 
144 static __inline int             pf_krule_compare(struct pf_krule *,
145 				    struct pf_krule *);
146 
147 RB_GENERATE(pf_krule_global, pf_krule, entry_global, pf_krule_compare);
148 
149 #ifdef ALTQ
150 VNET_DEFINE_STATIC(int,		pf_altq_running);
151 #define	V_pf_altq_running	VNET(pf_altq_running)
152 #endif
153 
154 #define	TAGID_MAX	 50000
155 struct pf_tagname {
156 	TAILQ_ENTRY(pf_tagname)	namehash_entries;
157 	TAILQ_ENTRY(pf_tagname)	taghash_entries;
158 	char			name[PF_TAG_NAME_SIZE];
159 	uint16_t		tag;
160 	int			ref;
161 };
162 
163 struct pf_tagset {
164 	TAILQ_HEAD(, pf_tagname)	*namehash;
165 	TAILQ_HEAD(, pf_tagname)	*taghash;
166 	unsigned int			 mask;
167 	uint32_t			 seed;
168 	BITSET_DEFINE(, TAGID_MAX)	 avail;
169 };
170 
171 VNET_DEFINE(struct pf_tagset, pf_tags);
172 #define	V_pf_tags	VNET(pf_tags)
173 static unsigned int	pf_rule_tag_hashsize;
174 #define	PF_RULE_TAG_HASH_SIZE_DEFAULT	128
175 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN,
176     &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT,
177     "Size of pf(4) rule tag hashtable");
178 
179 #ifdef ALTQ
180 VNET_DEFINE(struct pf_tagset, pf_qids);
181 #define	V_pf_qids	VNET(pf_qids)
182 static unsigned int	pf_queue_tag_hashsize;
183 #define	PF_QUEUE_TAG_HASH_SIZE_DEFAULT	128
184 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN,
185     &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT,
186     "Size of pf(4) queue tag hashtable");
187 #endif
188 VNET_DEFINE(uma_zone_t,	 pf_tag_z);
189 #define	V_pf_tag_z		 VNET(pf_tag_z)
190 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db");
191 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules");
192 
193 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
194 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
195 #endif
196 
197 VNET_DEFINE_STATIC(bool, pf_filter_local) = false;
198 #define V_pf_filter_local	VNET(pf_filter_local)
199 SYSCTL_BOOL(_net_pf, OID_AUTO, filter_local, CTLFLAG_VNET | CTLFLAG_RW,
200     &VNET_NAME(pf_filter_local), false,
201     "Enable filtering for packets delivered to local network stack");
202 
203 #ifdef PF_DEFAULT_TO_DROP
204 VNET_DEFINE_STATIC(bool, default_to_drop) = true;
205 #else
206 VNET_DEFINE_STATIC(bool, default_to_drop);
207 #endif
208 #define	V_default_to_drop VNET(default_to_drop)
209 SYSCTL_BOOL(_net_pf, OID_AUTO, default_to_drop, CTLFLAG_RDTUN | CTLFLAG_VNET,
210     &VNET_NAME(default_to_drop), false,
211     "Make the default rule drop all packets.");
212 
213 static void		 pf_init_tagset(struct pf_tagset *, unsigned int *,
214 			    unsigned int);
215 static void		 pf_cleanup_tagset(struct pf_tagset *);
216 static uint16_t		 tagname2hashindex(const struct pf_tagset *, const char *);
217 static uint16_t		 tag2hashindex(const struct pf_tagset *, uint16_t);
218 static u_int16_t	 tagname2tag(struct pf_tagset *, const char *);
219 static u_int16_t	 pf_tagname2tag(const char *);
220 static void		 tag_unref(struct pf_tagset *, u_int16_t);
221 
222 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
223 
224 struct cdev *pf_dev;
225 
226 /*
227  * XXX - These are new and need to be checked when moveing to a new version
228  */
229 static void		 pf_clear_all_states(void);
230 static int		 pf_killstates_row(struct pf_kstate_kill *,
231 			    struct pf_idhash *);
232 static int		 pf_killstates_nv(struct pfioc_nv *);
233 static int		 pf_clearstates_nv(struct pfioc_nv *);
234 static int		 pf_getstate(struct pfioc_nv *);
235 static int		 pf_getstatus(struct pfioc_nv *);
236 static int		 pf_clear_tables(void);
237 static void		 pf_clear_srcnodes(struct pf_ksrc_node *);
238 static void		 pf_kill_srcnodes(struct pfioc_src_node_kill *);
239 static int		 pf_keepcounters(struct pfioc_nv *);
240 static void		 pf_tbladdr_copyout(struct pf_addr_wrap *);
241 
242 /*
243  * Wrapper functions for pfil(9) hooks
244  */
245 static pfil_return_t pf_eth_check_in(struct mbuf **m, struct ifnet *ifp,
246     int flags, void *ruleset __unused, struct inpcb *inp);
247 static pfil_return_t pf_eth_check_out(struct mbuf **m, struct ifnet *ifp,
248     int flags, void *ruleset __unused, struct inpcb *inp);
249 #ifdef INET
250 static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp,
251     int flags, void *ruleset __unused, struct inpcb *inp);
252 static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp,
253     int flags, void *ruleset __unused, struct inpcb *inp);
254 #endif
255 #ifdef INET6
256 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp,
257     int flags, void *ruleset __unused, struct inpcb *inp);
258 static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp,
259     int flags, void *ruleset __unused, struct inpcb *inp);
260 #endif
261 
262 static void		hook_pf_eth(void);
263 static void		hook_pf(void);
264 static void		dehook_pf_eth(void);
265 static void		dehook_pf(void);
266 static int		shutdown_pf(void);
267 static int		pf_load(void);
268 static void		pf_unload(void);
269 
270 static struct cdevsw pf_cdevsw = {
271 	.d_ioctl =	pfioctl,
272 	.d_name =	PF_NAME,
273 	.d_version =	D_VERSION,
274 };
275 
276 VNET_DEFINE_STATIC(bool, pf_pfil_hooked);
277 #define V_pf_pfil_hooked	VNET(pf_pfil_hooked)
278 VNET_DEFINE_STATIC(bool, pf_pfil_eth_hooked);
279 #define V_pf_pfil_eth_hooked	VNET(pf_pfil_eth_hooked)
280 
281 /*
282  * We need a flag that is neither hooked nor running to know when
283  * the VNET is "valid".  We primarily need this to control (global)
284  * external event, e.g., eventhandlers.
285  */
286 VNET_DEFINE(int, pf_vnet_active);
287 #define V_pf_vnet_active	VNET(pf_vnet_active)
288 
289 int pf_end_threads;
290 struct proc *pf_purge_proc;
291 
292 VNET_DEFINE(struct rmlock, pf_rules_lock);
293 VNET_DEFINE_STATIC(struct sx, pf_ioctl_lock);
294 #define	V_pf_ioctl_lock		VNET(pf_ioctl_lock)
295 struct sx			pf_end_lock;
296 
297 /* pfsync */
298 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr);
299 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr);
300 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr);
301 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr);
302 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr);
303 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr);
304 VNET_DEFINE(pflow_export_state_t *, pflow_export_state_ptr);
305 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr;
306 
307 /* pflog */
308 pflog_packet_t			*pflog_packet_ptr = NULL;
309 
310 /*
311  * Copy a user-provided string, returning an error if truncation would occur.
312  * Avoid scanning past "sz" bytes in the source string since there's no
313  * guarantee that it's nul-terminated.
314  */
315 static int
pf_user_strcpy(char * dst,const char * src,size_t sz)316 pf_user_strcpy(char *dst, const char *src, size_t sz)
317 {
318 	if (strnlen(src, sz) == sz)
319 		return (EINVAL);
320 	(void)strlcpy(dst, src, sz);
321 	return (0);
322 }
323 
324 static void
pfattach_vnet(void)325 pfattach_vnet(void)
326 {
327 	u_int32_t *my_timeout = V_pf_default_rule.timeout;
328 
329 	bzero(&V_pf_status, sizeof(V_pf_status));
330 
331 	pf_initialize();
332 	pfr_initialize();
333 	pfi_initialize_vnet();
334 	pf_normalize_init();
335 	pf_syncookies_init();
336 
337 	V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
338 	V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
339 
340 	RB_INIT(&V_pf_anchors);
341 	pf_init_kruleset(&pf_main_ruleset);
342 
343 	pf_init_keth(V_pf_keth);
344 
345 	/* default rule should never be garbage collected */
346 	V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next;
347 	V_pf_default_rule.action = V_default_to_drop ? PF_DROP : PF_PASS;
348 	V_pf_default_rule.nr = -1;
349 	V_pf_default_rule.rtableid = -1;
350 
351 	pf_counter_u64_init(&V_pf_default_rule.evaluations, M_WAITOK);
352 	for (int i = 0; i < 2; i++) {
353 		pf_counter_u64_init(&V_pf_default_rule.packets[i], M_WAITOK);
354 		pf_counter_u64_init(&V_pf_default_rule.bytes[i], M_WAITOK);
355 	}
356 	V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK);
357 	V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK);
358 	V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK);
359 
360 	V_pf_default_rule.timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
361 	    M_WAITOK | M_ZERO);
362 
363 #ifdef PF_WANT_32_TO_64_COUNTER
364 	V_pf_kifmarker = malloc(sizeof(*V_pf_kifmarker), PFI_MTYPE, M_WAITOK | M_ZERO);
365 	V_pf_rulemarker = malloc(sizeof(*V_pf_rulemarker), M_PFRULE, M_WAITOK | M_ZERO);
366 	PF_RULES_WLOCK();
367 	LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist);
368 	LIST_INSERT_HEAD(&V_pf_allrulelist, &V_pf_default_rule, allrulelist);
369 	V_pf_allrulecount++;
370 	LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist);
371 	PF_RULES_WUNLOCK();
372 #endif
373 
374 	/* initialize default timeouts */
375 	my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
376 	my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
377 	my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
378 	my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
379 	my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
380 	my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
381 	my_timeout[PFTM_SCTP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
382 	my_timeout[PFTM_SCTP_OPENING] = PFTM_TCP_OPENING_VAL;
383 	my_timeout[PFTM_SCTP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
384 	my_timeout[PFTM_SCTP_CLOSING] = PFTM_TCP_CLOSING_VAL;
385 	my_timeout[PFTM_SCTP_CLOSED] = PFTM_TCP_CLOSED_VAL;
386 	my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
387 	my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
388 	my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
389 	my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
390 	my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
391 	my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
392 	my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
393 	my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
394 	my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
395 	my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
396 	my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
397 	my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
398 	my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
399 	my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
400 
401 	V_pf_status.debug = PF_DEBUG_URGENT;
402 	/*
403 	 * XXX This is different than in OpenBSD where reassembly is enabled by
404 	 * defult. In FreeBSD we expect people to still use scrub rules and
405 	 * switch to the new syntax later. Only when they switch they must
406 	 * explicitly enable reassemle. We could change the default once the
407 	 * scrub rule functionality is hopefully removed some day in future.
408 	 */
409 	V_pf_status.reass = 0;
410 
411 	V_pf_pfil_hooked = false;
412 	V_pf_pfil_eth_hooked = false;
413 
414 	/* XXX do our best to avoid a conflict */
415 	V_pf_status.hostid = arc4random();
416 
417 	for (int i = 0; i < PFRES_MAX; i++)
418 		V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK);
419 	for (int i = 0; i < KLCNT_MAX; i++)
420 		V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK);
421 	for (int i = 0; i < FCNT_MAX; i++)
422 		pf_counter_u64_init(&V_pf_status.fcounters[i], M_WAITOK);
423 	for (int i = 0; i < SCNT_MAX; i++)
424 		V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK);
425 
426 	if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET,
427 	    INTR_MPSAFE, &V_pf_swi_cookie) != 0)
428 		/* XXXGL: leaked all above. */
429 		return;
430 }
431 
432 static struct pf_kpool *
pf_get_kpool(const char * anchor,u_int32_t ticket,u_int8_t rule_action,u_int32_t rule_number,u_int8_t r_last,u_int8_t active,u_int8_t check_ticket)433 pf_get_kpool(const char *anchor, u_int32_t ticket, u_int8_t rule_action,
434     u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
435     u_int8_t check_ticket)
436 {
437 	struct pf_kruleset	*ruleset;
438 	struct pf_krule		*rule;
439 	int			 rs_num;
440 
441 	ruleset = pf_find_kruleset(anchor);
442 	if (ruleset == NULL)
443 		return (NULL);
444 	rs_num = pf_get_ruleset_number(rule_action);
445 	if (rs_num >= PF_RULESET_MAX)
446 		return (NULL);
447 	if (active) {
448 		if (check_ticket && ticket !=
449 		    ruleset->rules[rs_num].active.ticket)
450 			return (NULL);
451 		if (r_last)
452 			rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
453 			    pf_krulequeue);
454 		else
455 			rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
456 	} else {
457 		if (check_ticket && ticket !=
458 		    ruleset->rules[rs_num].inactive.ticket)
459 			return (NULL);
460 		if (r_last)
461 			rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
462 			    pf_krulequeue);
463 		else
464 			rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
465 	}
466 	if (!r_last) {
467 		while ((rule != NULL) && (rule->nr != rule_number))
468 			rule = TAILQ_NEXT(rule, entries);
469 	}
470 	if (rule == NULL)
471 		return (NULL);
472 
473 	return (&rule->rpool);
474 }
475 
476 static void
pf_mv_kpool(struct pf_kpalist * poola,struct pf_kpalist * poolb)477 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb)
478 {
479 	struct pf_kpooladdr	*mv_pool_pa;
480 
481 	while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
482 		TAILQ_REMOVE(poola, mv_pool_pa, entries);
483 		TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
484 	}
485 }
486 
487 static void
pf_empty_kpool(struct pf_kpalist * poola)488 pf_empty_kpool(struct pf_kpalist *poola)
489 {
490 	struct pf_kpooladdr *pa;
491 
492 	while ((pa = TAILQ_FIRST(poola)) != NULL) {
493 		switch (pa->addr.type) {
494 		case PF_ADDR_DYNIFTL:
495 			pfi_dynaddr_remove(pa->addr.p.dyn);
496 			break;
497 		case PF_ADDR_TABLE:
498 			/* XXX: this could be unfinished pooladdr on pabuf */
499 			if (pa->addr.p.tbl != NULL)
500 				pfr_detach_table(pa->addr.p.tbl);
501 			break;
502 		}
503 		if (pa->kif)
504 			pfi_kkif_unref(pa->kif);
505 		TAILQ_REMOVE(poola, pa, entries);
506 		free(pa, M_PFRULE);
507 	}
508 }
509 
510 static void
pf_unlink_rule_locked(struct pf_krulequeue * rulequeue,struct pf_krule * rule)511 pf_unlink_rule_locked(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
512 {
513 
514 	PF_RULES_WASSERT();
515 	PF_UNLNKDRULES_ASSERT();
516 
517 	TAILQ_REMOVE(rulequeue, rule, entries);
518 
519 	rule->rule_ref |= PFRULE_REFS;
520 	TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries);
521 }
522 
523 static void
pf_unlink_rule(struct pf_krulequeue * rulequeue,struct pf_krule * rule)524 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
525 {
526 
527 	PF_RULES_WASSERT();
528 
529 	PF_UNLNKDRULES_LOCK();
530 	pf_unlink_rule_locked(rulequeue, rule);
531 	PF_UNLNKDRULES_UNLOCK();
532 }
533 
534 static void
pf_free_eth_rule(struct pf_keth_rule * rule)535 pf_free_eth_rule(struct pf_keth_rule *rule)
536 {
537 	PF_RULES_WASSERT();
538 
539 	if (rule == NULL)
540 		return;
541 
542 	if (rule->tag)
543 		tag_unref(&V_pf_tags, rule->tag);
544 	if (rule->match_tag)
545 		tag_unref(&V_pf_tags, rule->match_tag);
546 #ifdef ALTQ
547 	pf_qid_unref(rule->qid);
548 #endif
549 
550 	if (rule->bridge_to)
551 		pfi_kkif_unref(rule->bridge_to);
552 	if (rule->kif)
553 		pfi_kkif_unref(rule->kif);
554 
555 	if (rule->ipsrc.addr.type == PF_ADDR_TABLE)
556 		pfr_detach_table(rule->ipsrc.addr.p.tbl);
557 	if (rule->ipdst.addr.type == PF_ADDR_TABLE)
558 		pfr_detach_table(rule->ipdst.addr.p.tbl);
559 
560 	counter_u64_free(rule->evaluations);
561 	for (int i = 0; i < 2; i++) {
562 		counter_u64_free(rule->packets[i]);
563 		counter_u64_free(rule->bytes[i]);
564 	}
565 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
566 	pf_keth_anchor_remove(rule);
567 
568 	free(rule, M_PFRULE);
569 }
570 
571 void
pf_free_rule(struct pf_krule * rule)572 pf_free_rule(struct pf_krule *rule)
573 {
574 
575 	PF_RULES_WASSERT();
576 	PF_CONFIG_ASSERT();
577 
578 	if (rule->tag)
579 		tag_unref(&V_pf_tags, rule->tag);
580 	if (rule->match_tag)
581 		tag_unref(&V_pf_tags, rule->match_tag);
582 #ifdef ALTQ
583 	if (rule->pqid != rule->qid)
584 		pf_qid_unref(rule->pqid);
585 	pf_qid_unref(rule->qid);
586 #endif
587 	switch (rule->src.addr.type) {
588 	case PF_ADDR_DYNIFTL:
589 		pfi_dynaddr_remove(rule->src.addr.p.dyn);
590 		break;
591 	case PF_ADDR_TABLE:
592 		pfr_detach_table(rule->src.addr.p.tbl);
593 		break;
594 	}
595 	switch (rule->dst.addr.type) {
596 	case PF_ADDR_DYNIFTL:
597 		pfi_dynaddr_remove(rule->dst.addr.p.dyn);
598 		break;
599 	case PF_ADDR_TABLE:
600 		pfr_detach_table(rule->dst.addr.p.tbl);
601 		break;
602 	}
603 	if (rule->overload_tbl)
604 		pfr_detach_table(rule->overload_tbl);
605 	if (rule->kif)
606 		pfi_kkif_unref(rule->kif);
607 	pf_kanchor_remove(rule);
608 	pf_empty_kpool(&rule->rpool.list);
609 
610 	pf_krule_free(rule);
611 }
612 
613 static void
pf_init_tagset(struct pf_tagset * ts,unsigned int * tunable_size,unsigned int default_size)614 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size,
615     unsigned int default_size)
616 {
617 	unsigned int i;
618 	unsigned int hashsize;
619 
620 	if (*tunable_size == 0 || !powerof2(*tunable_size))
621 		*tunable_size = default_size;
622 
623 	hashsize = *tunable_size;
624 	ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH,
625 	    M_WAITOK);
626 	ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH,
627 	    M_WAITOK);
628 	ts->mask = hashsize - 1;
629 	ts->seed = arc4random();
630 	for (i = 0; i < hashsize; i++) {
631 		TAILQ_INIT(&ts->namehash[i]);
632 		TAILQ_INIT(&ts->taghash[i]);
633 	}
634 	BIT_FILL(TAGID_MAX, &ts->avail);
635 }
636 
637 static void
pf_cleanup_tagset(struct pf_tagset * ts)638 pf_cleanup_tagset(struct pf_tagset *ts)
639 {
640 	unsigned int i;
641 	unsigned int hashsize;
642 	struct pf_tagname *t, *tmp;
643 
644 	/*
645 	 * Only need to clean up one of the hashes as each tag is hashed
646 	 * into each table.
647 	 */
648 	hashsize = ts->mask + 1;
649 	for (i = 0; i < hashsize; i++)
650 		TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp)
651 			uma_zfree(V_pf_tag_z, t);
652 
653 	free(ts->namehash, M_PFHASH);
654 	free(ts->taghash, M_PFHASH);
655 }
656 
657 static uint16_t
tagname2hashindex(const struct pf_tagset * ts,const char * tagname)658 tagname2hashindex(const struct pf_tagset *ts, const char *tagname)
659 {
660 	size_t len;
661 
662 	len = strnlen(tagname, PF_TAG_NAME_SIZE - 1);
663 	return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask);
664 }
665 
666 static uint16_t
tag2hashindex(const struct pf_tagset * ts,uint16_t tag)667 tag2hashindex(const struct pf_tagset *ts, uint16_t tag)
668 {
669 
670 	return (tag & ts->mask);
671 }
672 
673 static u_int16_t
tagname2tag(struct pf_tagset * ts,const char * tagname)674 tagname2tag(struct pf_tagset *ts, const char *tagname)
675 {
676 	struct pf_tagname	*tag;
677 	u_int32_t		 index;
678 	u_int16_t		 new_tagid;
679 
680 	PF_RULES_WASSERT();
681 
682 	index = tagname2hashindex(ts, tagname);
683 	TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries)
684 		if (strcmp(tagname, tag->name) == 0) {
685 			tag->ref++;
686 			return (tag->tag);
687 		}
688 
689 	/*
690 	 * new entry
691 	 *
692 	 * to avoid fragmentation, we do a linear search from the beginning
693 	 * and take the first free slot we find.
694 	 */
695 	new_tagid = BIT_FFS(TAGID_MAX, &ts->avail);
696 	/*
697 	 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX].
698 	 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits
699 	 * set.  It may also return a bit number greater than TAGID_MAX due
700 	 * to rounding of the number of bits in the vector up to a multiple
701 	 * of the vector word size at declaration/allocation time.
702 	 */
703 	if ((new_tagid == 0) || (new_tagid > TAGID_MAX))
704 		return (0);
705 
706 	/* Mark the tag as in use.  Bits are 0-based for BIT_CLR() */
707 	BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail);
708 
709 	/* allocate and fill new struct pf_tagname */
710 	tag = uma_zalloc(V_pf_tag_z, M_NOWAIT);
711 	if (tag == NULL)
712 		return (0);
713 	strlcpy(tag->name, tagname, sizeof(tag->name));
714 	tag->tag = new_tagid;
715 	tag->ref = 1;
716 
717 	/* Insert into namehash */
718 	TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries);
719 
720 	/* Insert into taghash */
721 	index = tag2hashindex(ts, new_tagid);
722 	TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries);
723 
724 	return (tag->tag);
725 }
726 
727 static void
tag_unref(struct pf_tagset * ts,u_int16_t tag)728 tag_unref(struct pf_tagset *ts, u_int16_t tag)
729 {
730 	struct pf_tagname	*t;
731 	uint16_t		 index;
732 
733 	PF_RULES_WASSERT();
734 
735 	index = tag2hashindex(ts, tag);
736 	TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries)
737 		if (tag == t->tag) {
738 			if (--t->ref == 0) {
739 				TAILQ_REMOVE(&ts->taghash[index], t,
740 				    taghash_entries);
741 				index = tagname2hashindex(ts, t->name);
742 				TAILQ_REMOVE(&ts->namehash[index], t,
743 				    namehash_entries);
744 				/* Bits are 0-based for BIT_SET() */
745 				BIT_SET(TAGID_MAX, tag - 1, &ts->avail);
746 				uma_zfree(V_pf_tag_z, t);
747 			}
748 			break;
749 		}
750 }
751 
752 static uint16_t
pf_tagname2tag(const char * tagname)753 pf_tagname2tag(const char *tagname)
754 {
755 	return (tagname2tag(&V_pf_tags, tagname));
756 }
757 
758 static int
pf_begin_eth(uint32_t * ticket,const char * anchor)759 pf_begin_eth(uint32_t *ticket, const char *anchor)
760 {
761 	struct pf_keth_rule *rule, *tmp;
762 	struct pf_keth_ruleset *rs;
763 
764 	PF_RULES_WASSERT();
765 
766 	rs = pf_find_or_create_keth_ruleset(anchor);
767 	if (rs == NULL)
768 		return (EINVAL);
769 
770 	/* Purge old inactive rules. */
771 	TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
772 	    tmp) {
773 		TAILQ_REMOVE(rs->inactive.rules, rule,
774 		    entries);
775 		pf_free_eth_rule(rule);
776 	}
777 
778 	*ticket = ++rs->inactive.ticket;
779 	rs->inactive.open = 1;
780 
781 	return (0);
782 }
783 
784 static void
pf_rollback_eth_cb(struct epoch_context * ctx)785 pf_rollback_eth_cb(struct epoch_context *ctx)
786 {
787 	struct pf_keth_ruleset *rs;
788 
789 	rs = __containerof(ctx, struct pf_keth_ruleset, epoch_ctx);
790 
791 	CURVNET_SET(rs->vnet);
792 
793 	PF_RULES_WLOCK();
794 	pf_rollback_eth(rs->inactive.ticket,
795 	    rs->anchor ? rs->anchor->path : "");
796 	PF_RULES_WUNLOCK();
797 
798 	CURVNET_RESTORE();
799 }
800 
801 static int
pf_rollback_eth(uint32_t ticket,const char * anchor)802 pf_rollback_eth(uint32_t ticket, const char *anchor)
803 {
804 	struct pf_keth_rule *rule, *tmp;
805 	struct pf_keth_ruleset *rs;
806 
807 	PF_RULES_WASSERT();
808 
809 	rs = pf_find_keth_ruleset(anchor);
810 	if (rs == NULL)
811 		return (EINVAL);
812 
813 	if (!rs->inactive.open ||
814 	    ticket != rs->inactive.ticket)
815 		return (0);
816 
817 	/* Purge old inactive rules. */
818 	TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
819 	    tmp) {
820 		TAILQ_REMOVE(rs->inactive.rules, rule, entries);
821 		pf_free_eth_rule(rule);
822 	}
823 
824 	rs->inactive.open = 0;
825 
826 	pf_remove_if_empty_keth_ruleset(rs);
827 
828 	return (0);
829 }
830 
831 #define	PF_SET_SKIP_STEPS(i)					\
832 	do {							\
833 		while (head[i] != cur) {			\
834 			head[i]->skip[i].ptr = cur;		\
835 			head[i] = TAILQ_NEXT(head[i], entries);	\
836 		}						\
837 	} while (0)
838 
839 static void
pf_eth_calc_skip_steps(struct pf_keth_ruleq * rules)840 pf_eth_calc_skip_steps(struct pf_keth_ruleq *rules)
841 {
842 	struct pf_keth_rule *cur, *prev, *head[PFE_SKIP_COUNT];
843 	int i;
844 
845 	cur = TAILQ_FIRST(rules);
846 	prev = cur;
847 	for (i = 0; i < PFE_SKIP_COUNT; ++i)
848 		head[i] = cur;
849 	while (cur != NULL) {
850 		if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
851 			PF_SET_SKIP_STEPS(PFE_SKIP_IFP);
852 		if (cur->direction != prev->direction)
853 			PF_SET_SKIP_STEPS(PFE_SKIP_DIR);
854 		if (cur->proto != prev->proto)
855 			PF_SET_SKIP_STEPS(PFE_SKIP_PROTO);
856 		if (memcmp(&cur->src, &prev->src, sizeof(cur->src)) != 0)
857 			PF_SET_SKIP_STEPS(PFE_SKIP_SRC_ADDR);
858 		if (memcmp(&cur->dst, &prev->dst, sizeof(cur->dst)) != 0)
859 			PF_SET_SKIP_STEPS(PFE_SKIP_DST_ADDR);
860 		if (cur->ipsrc.neg != prev->ipsrc.neg ||
861 		    pf_addr_wrap_neq(&cur->ipsrc.addr, &prev->ipsrc.addr))
862 			PF_SET_SKIP_STEPS(PFE_SKIP_SRC_IP_ADDR);
863 		if (cur->ipdst.neg != prev->ipdst.neg ||
864 		    pf_addr_wrap_neq(&cur->ipdst.addr, &prev->ipdst.addr))
865 			PF_SET_SKIP_STEPS(PFE_SKIP_DST_IP_ADDR);
866 
867 		prev = cur;
868 		cur = TAILQ_NEXT(cur, entries);
869 	}
870 	for (i = 0; i < PFE_SKIP_COUNT; ++i)
871 		PF_SET_SKIP_STEPS(i);
872 }
873 
874 static int
pf_commit_eth(uint32_t ticket,const char * anchor)875 pf_commit_eth(uint32_t ticket, const char *anchor)
876 {
877 	struct pf_keth_ruleq *rules;
878 	struct pf_keth_ruleset *rs;
879 
880 	rs = pf_find_keth_ruleset(anchor);
881 	if (rs == NULL) {
882 		return (EINVAL);
883 	}
884 
885 	if (!rs->inactive.open ||
886 	    ticket != rs->inactive.ticket)
887 		return (EBUSY);
888 
889 	PF_RULES_WASSERT();
890 
891 	pf_eth_calc_skip_steps(rs->inactive.rules);
892 
893 	rules = rs->active.rules;
894 	ck_pr_store_ptr(&rs->active.rules, rs->inactive.rules);
895 	rs->inactive.rules = rules;
896 	rs->inactive.ticket = rs->active.ticket;
897 
898 	/* Clean up inactive rules (i.e. previously active rules), only when
899 	 * we're sure they're no longer used. */
900 	NET_EPOCH_CALL(pf_rollback_eth_cb, &rs->epoch_ctx);
901 
902 	return (0);
903 }
904 
905 #ifdef ALTQ
906 static uint16_t
pf_qname2qid(const char * qname)907 pf_qname2qid(const char *qname)
908 {
909 	return (tagname2tag(&V_pf_qids, qname));
910 }
911 
912 static void
pf_qid_unref(uint16_t qid)913 pf_qid_unref(uint16_t qid)
914 {
915 	tag_unref(&V_pf_qids, qid);
916 }
917 
918 static int
pf_begin_altq(u_int32_t * ticket)919 pf_begin_altq(u_int32_t *ticket)
920 {
921 	struct pf_altq	*altq, *tmp;
922 	int		 error = 0;
923 
924 	PF_RULES_WASSERT();
925 
926 	/* Purge the old altq lists */
927 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
928 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
929 			/* detach and destroy the discipline */
930 			error = altq_remove(altq);
931 		}
932 		free(altq, M_PFALTQ);
933 	}
934 	TAILQ_INIT(V_pf_altq_ifs_inactive);
935 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
936 		pf_qid_unref(altq->qid);
937 		free(altq, M_PFALTQ);
938 	}
939 	TAILQ_INIT(V_pf_altqs_inactive);
940 	if (error)
941 		return (error);
942 	*ticket = ++V_ticket_altqs_inactive;
943 	V_altqs_inactive_open = 1;
944 	return (0);
945 }
946 
947 static int
pf_rollback_altq(u_int32_t ticket)948 pf_rollback_altq(u_int32_t ticket)
949 {
950 	struct pf_altq	*altq, *tmp;
951 	int		 error = 0;
952 
953 	PF_RULES_WASSERT();
954 
955 	if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
956 		return (0);
957 	/* Purge the old altq lists */
958 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
959 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
960 			/* detach and destroy the discipline */
961 			error = altq_remove(altq);
962 		}
963 		free(altq, M_PFALTQ);
964 	}
965 	TAILQ_INIT(V_pf_altq_ifs_inactive);
966 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
967 		pf_qid_unref(altq->qid);
968 		free(altq, M_PFALTQ);
969 	}
970 	TAILQ_INIT(V_pf_altqs_inactive);
971 	V_altqs_inactive_open = 0;
972 	return (error);
973 }
974 
975 static int
pf_commit_altq(u_int32_t ticket)976 pf_commit_altq(u_int32_t ticket)
977 {
978 	struct pf_altqqueue	*old_altqs, *old_altq_ifs;
979 	struct pf_altq		*altq, *tmp;
980 	int			 err, error = 0;
981 
982 	PF_RULES_WASSERT();
983 
984 	if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
985 		return (EBUSY);
986 
987 	/* swap altqs, keep the old. */
988 	old_altqs = V_pf_altqs_active;
989 	old_altq_ifs = V_pf_altq_ifs_active;
990 	V_pf_altqs_active = V_pf_altqs_inactive;
991 	V_pf_altq_ifs_active = V_pf_altq_ifs_inactive;
992 	V_pf_altqs_inactive = old_altqs;
993 	V_pf_altq_ifs_inactive = old_altq_ifs;
994 	V_ticket_altqs_active = V_ticket_altqs_inactive;
995 
996 	/* Attach new disciplines */
997 	TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
998 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
999 			/* attach the discipline */
1000 			error = altq_pfattach(altq);
1001 			if (error == 0 && V_pf_altq_running)
1002 				error = pf_enable_altq(altq);
1003 			if (error != 0)
1004 				return (error);
1005 		}
1006 	}
1007 
1008 	/* Purge the old altq lists */
1009 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
1010 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
1011 			/* detach and destroy the discipline */
1012 			if (V_pf_altq_running)
1013 				error = pf_disable_altq(altq);
1014 			err = altq_pfdetach(altq);
1015 			if (err != 0 && error == 0)
1016 				error = err;
1017 			err = altq_remove(altq);
1018 			if (err != 0 && error == 0)
1019 				error = err;
1020 		}
1021 		free(altq, M_PFALTQ);
1022 	}
1023 	TAILQ_INIT(V_pf_altq_ifs_inactive);
1024 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
1025 		pf_qid_unref(altq->qid);
1026 		free(altq, M_PFALTQ);
1027 	}
1028 	TAILQ_INIT(V_pf_altqs_inactive);
1029 
1030 	V_altqs_inactive_open = 0;
1031 	return (error);
1032 }
1033 
1034 static int
pf_enable_altq(struct pf_altq * altq)1035 pf_enable_altq(struct pf_altq *altq)
1036 {
1037 	struct ifnet		*ifp;
1038 	struct tb_profile	 tb;
1039 	int			 error = 0;
1040 
1041 	if ((ifp = ifunit(altq->ifname)) == NULL)
1042 		return (EINVAL);
1043 
1044 	if (ifp->if_snd.altq_type != ALTQT_NONE)
1045 		error = altq_enable(&ifp->if_snd);
1046 
1047 	/* set tokenbucket regulator */
1048 	if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
1049 		tb.rate = altq->ifbandwidth;
1050 		tb.depth = altq->tbrsize;
1051 		error = tbr_set(&ifp->if_snd, &tb);
1052 	}
1053 
1054 	return (error);
1055 }
1056 
1057 static int
pf_disable_altq(struct pf_altq * altq)1058 pf_disable_altq(struct pf_altq *altq)
1059 {
1060 	struct ifnet		*ifp;
1061 	struct tb_profile	 tb;
1062 	int			 error;
1063 
1064 	if ((ifp = ifunit(altq->ifname)) == NULL)
1065 		return (EINVAL);
1066 
1067 	/*
1068 	 * when the discipline is no longer referenced, it was overridden
1069 	 * by a new one.  if so, just return.
1070 	 */
1071 	if (altq->altq_disc != ifp->if_snd.altq_disc)
1072 		return (0);
1073 
1074 	error = altq_disable(&ifp->if_snd);
1075 
1076 	if (error == 0) {
1077 		/* clear tokenbucket regulator */
1078 		tb.rate = 0;
1079 		error = tbr_set(&ifp->if_snd, &tb);
1080 	}
1081 
1082 	return (error);
1083 }
1084 
1085 static int
pf_altq_ifnet_event_add(struct ifnet * ifp,int remove,u_int32_t ticket,struct pf_altq * altq)1086 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket,
1087     struct pf_altq *altq)
1088 {
1089 	struct ifnet	*ifp1;
1090 	int		 error = 0;
1091 
1092 	/* Deactivate the interface in question */
1093 	altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED;
1094 	if ((ifp1 = ifunit(altq->ifname)) == NULL ||
1095 	    (remove && ifp1 == ifp)) {
1096 		altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
1097 	} else {
1098 		error = altq_add(ifp1, altq);
1099 
1100 		if (ticket != V_ticket_altqs_inactive)
1101 			error = EBUSY;
1102 
1103 		if (error)
1104 			free(altq, M_PFALTQ);
1105 	}
1106 
1107 	return (error);
1108 }
1109 
1110 void
pf_altq_ifnet_event(struct ifnet * ifp,int remove)1111 pf_altq_ifnet_event(struct ifnet *ifp, int remove)
1112 {
1113 	struct pf_altq	*a1, *a2, *a3;
1114 	u_int32_t	 ticket;
1115 	int		 error = 0;
1116 
1117 	/*
1118 	 * No need to re-evaluate the configuration for events on interfaces
1119 	 * that do not support ALTQ, as it's not possible for such
1120 	 * interfaces to be part of the configuration.
1121 	 */
1122 	if (!ALTQ_IS_READY(&ifp->if_snd))
1123 		return;
1124 
1125 	/* Interrupt userland queue modifications */
1126 	if (V_altqs_inactive_open)
1127 		pf_rollback_altq(V_ticket_altqs_inactive);
1128 
1129 	/* Start new altq ruleset */
1130 	if (pf_begin_altq(&ticket))
1131 		return;
1132 
1133 	/* Copy the current active set */
1134 	TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) {
1135 		a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
1136 		if (a2 == NULL) {
1137 			error = ENOMEM;
1138 			break;
1139 		}
1140 		bcopy(a1, a2, sizeof(struct pf_altq));
1141 
1142 		error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
1143 		if (error)
1144 			break;
1145 
1146 		TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries);
1147 	}
1148 	if (error)
1149 		goto out;
1150 	TAILQ_FOREACH(a1, V_pf_altqs_active, entries) {
1151 		a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
1152 		if (a2 == NULL) {
1153 			error = ENOMEM;
1154 			break;
1155 		}
1156 		bcopy(a1, a2, sizeof(struct pf_altq));
1157 
1158 		if ((a2->qid = pf_qname2qid(a2->qname)) == 0) {
1159 			error = EBUSY;
1160 			free(a2, M_PFALTQ);
1161 			break;
1162 		}
1163 		a2->altq_disc = NULL;
1164 		TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) {
1165 			if (strncmp(a3->ifname, a2->ifname,
1166 				IFNAMSIZ) == 0) {
1167 				a2->altq_disc = a3->altq_disc;
1168 				break;
1169 			}
1170 		}
1171 		error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
1172 		if (error)
1173 			break;
1174 
1175 		TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries);
1176 	}
1177 
1178 out:
1179 	if (error != 0)
1180 		pf_rollback_altq(ticket);
1181 	else
1182 		pf_commit_altq(ticket);
1183 }
1184 #endif /* ALTQ */
1185 
1186 static struct pf_krule_global *
pf_rule_tree_alloc(int flags)1187 pf_rule_tree_alloc(int flags)
1188 {
1189 	struct pf_krule_global *tree;
1190 
1191 	tree = malloc(sizeof(struct pf_krule_global), M_TEMP, flags);
1192 	if (tree == NULL)
1193 		return (NULL);
1194 	RB_INIT(tree);
1195 	return (tree);
1196 }
1197 
1198 static void
pf_rule_tree_free(struct pf_krule_global * tree)1199 pf_rule_tree_free(struct pf_krule_global *tree)
1200 {
1201 
1202 	free(tree, M_TEMP);
1203 }
1204 
1205 static int
pf_begin_rules(u_int32_t * ticket,int rs_num,const char * anchor)1206 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
1207 {
1208 	struct pf_krule_global *tree;
1209 	struct pf_kruleset	*rs;
1210 	struct pf_krule		*rule;
1211 
1212 	PF_RULES_WASSERT();
1213 
1214 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1215 		return (EINVAL);
1216 	tree = pf_rule_tree_alloc(M_NOWAIT);
1217 	if (tree == NULL)
1218 		return (ENOMEM);
1219 	rs = pf_find_or_create_kruleset(anchor);
1220 	if (rs == NULL) {
1221 		free(tree, M_TEMP);
1222 		return (EINVAL);
1223 	}
1224 	pf_rule_tree_free(rs->rules[rs_num].inactive.tree);
1225 	rs->rules[rs_num].inactive.tree = tree;
1226 
1227 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1228 		pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
1229 		rs->rules[rs_num].inactive.rcount--;
1230 	}
1231 	*ticket = ++rs->rules[rs_num].inactive.ticket;
1232 	rs->rules[rs_num].inactive.open = 1;
1233 	return (0);
1234 }
1235 
1236 static int
pf_rollback_rules(u_int32_t ticket,int rs_num,char * anchor)1237 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
1238 {
1239 	struct pf_kruleset	*rs;
1240 	struct pf_krule		*rule;
1241 
1242 	PF_RULES_WASSERT();
1243 
1244 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1245 		return (EINVAL);
1246 	rs = pf_find_kruleset(anchor);
1247 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1248 	    rs->rules[rs_num].inactive.ticket != ticket)
1249 		return (0);
1250 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1251 		pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
1252 		rs->rules[rs_num].inactive.rcount--;
1253 	}
1254 	rs->rules[rs_num].inactive.open = 0;
1255 	return (0);
1256 }
1257 
1258 #define PF_MD5_UPD(st, elm)						\
1259 		MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
1260 
1261 #define PF_MD5_UPD_STR(st, elm)						\
1262 		MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
1263 
1264 #define PF_MD5_UPD_HTONL(st, elm, stor) do {				\
1265 		(stor) = htonl((st)->elm);				\
1266 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
1267 } while (0)
1268 
1269 #define PF_MD5_UPD_HTONS(st, elm, stor) do {				\
1270 		(stor) = htons((st)->elm);				\
1271 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
1272 } while (0)
1273 
1274 static void
pf_hash_rule_addr(MD5_CTX * ctx,struct pf_rule_addr * pfr)1275 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
1276 {
1277 	PF_MD5_UPD(pfr, addr.type);
1278 	switch (pfr->addr.type) {
1279 		case PF_ADDR_DYNIFTL:
1280 			PF_MD5_UPD(pfr, addr.v.ifname);
1281 			PF_MD5_UPD(pfr, addr.iflags);
1282 			break;
1283 		case PF_ADDR_TABLE:
1284 			PF_MD5_UPD(pfr, addr.v.tblname);
1285 			break;
1286 		case PF_ADDR_ADDRMASK:
1287 			/* XXX ignore af? */
1288 			PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
1289 			PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
1290 			break;
1291 	}
1292 
1293 	PF_MD5_UPD(pfr, port[0]);
1294 	PF_MD5_UPD(pfr, port[1]);
1295 	PF_MD5_UPD(pfr, neg);
1296 	PF_MD5_UPD(pfr, port_op);
1297 }
1298 
1299 static void
pf_hash_rule_rolling(MD5_CTX * ctx,struct pf_krule * rule)1300 pf_hash_rule_rolling(MD5_CTX *ctx, struct pf_krule *rule)
1301 {
1302 	u_int16_t x;
1303 	u_int32_t y;
1304 
1305 	pf_hash_rule_addr(ctx, &rule->src);
1306 	pf_hash_rule_addr(ctx, &rule->dst);
1307 	for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++)
1308 		PF_MD5_UPD_STR(rule, label[i]);
1309 	PF_MD5_UPD_STR(rule, ifname);
1310 	PF_MD5_UPD_STR(rule, match_tagname);
1311 	PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1312 	PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1313 	PF_MD5_UPD_HTONL(rule, prob, y);
1314 	PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1315 	PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1316 	PF_MD5_UPD(rule, uid.op);
1317 	PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1318 	PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1319 	PF_MD5_UPD(rule, gid.op);
1320 	PF_MD5_UPD_HTONL(rule, rule_flag, y);
1321 	PF_MD5_UPD(rule, action);
1322 	PF_MD5_UPD(rule, direction);
1323 	PF_MD5_UPD(rule, af);
1324 	PF_MD5_UPD(rule, quick);
1325 	PF_MD5_UPD(rule, ifnot);
1326 	PF_MD5_UPD(rule, match_tag_not);
1327 	PF_MD5_UPD(rule, natpass);
1328 	PF_MD5_UPD(rule, keep_state);
1329 	PF_MD5_UPD(rule, proto);
1330 	PF_MD5_UPD(rule, type);
1331 	PF_MD5_UPD(rule, code);
1332 	PF_MD5_UPD(rule, flags);
1333 	PF_MD5_UPD(rule, flagset);
1334 	PF_MD5_UPD(rule, allow_opts);
1335 	PF_MD5_UPD(rule, rt);
1336 	PF_MD5_UPD(rule, tos);
1337 	PF_MD5_UPD(rule, scrub_flags);
1338 	PF_MD5_UPD(rule, min_ttl);
1339 	PF_MD5_UPD(rule, set_tos);
1340 	if (rule->anchor != NULL)
1341 		PF_MD5_UPD_STR(rule, anchor->path);
1342 }
1343 
1344 static void
pf_hash_rule(struct pf_krule * rule)1345 pf_hash_rule(struct pf_krule *rule)
1346 {
1347 	MD5_CTX		ctx;
1348 
1349 	MD5Init(&ctx);
1350 	pf_hash_rule_rolling(&ctx, rule);
1351 	MD5Final(rule->md5sum, &ctx);
1352 }
1353 
1354 static int
pf_krule_compare(struct pf_krule * a,struct pf_krule * b)1355 pf_krule_compare(struct pf_krule *a, struct pf_krule *b)
1356 {
1357 
1358 	return (memcmp(a->md5sum, b->md5sum, PF_MD5_DIGEST_LENGTH));
1359 }
1360 
1361 static int
pf_commit_rules(u_int32_t ticket,int rs_num,char * anchor)1362 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1363 {
1364 	struct pf_kruleset	*rs;
1365 	struct pf_krule		*rule, **old_array, *old_rule;
1366 	struct pf_krulequeue	*old_rules;
1367 	struct pf_krule_global  *old_tree;
1368 	int			 error;
1369 	u_int32_t		 old_rcount;
1370 
1371 	PF_RULES_WASSERT();
1372 
1373 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1374 		return (EINVAL);
1375 	rs = pf_find_kruleset(anchor);
1376 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1377 	    ticket != rs->rules[rs_num].inactive.ticket)
1378 		return (EBUSY);
1379 
1380 	/* Calculate checksum for the main ruleset */
1381 	if (rs == &pf_main_ruleset) {
1382 		error = pf_setup_pfsync_matching(rs);
1383 		if (error != 0)
1384 			return (error);
1385 	}
1386 
1387 	/* Swap rules, keep the old. */
1388 	old_rules = rs->rules[rs_num].active.ptr;
1389 	old_rcount = rs->rules[rs_num].active.rcount;
1390 	old_array = rs->rules[rs_num].active.ptr_array;
1391 	old_tree = rs->rules[rs_num].active.tree;
1392 
1393 	rs->rules[rs_num].active.ptr =
1394 	    rs->rules[rs_num].inactive.ptr;
1395 	rs->rules[rs_num].active.ptr_array =
1396 	    rs->rules[rs_num].inactive.ptr_array;
1397 	rs->rules[rs_num].active.tree =
1398 	    rs->rules[rs_num].inactive.tree;
1399 	rs->rules[rs_num].active.rcount =
1400 	    rs->rules[rs_num].inactive.rcount;
1401 
1402 	/* Attempt to preserve counter information. */
1403 	if (V_pf_status.keep_counters && old_tree != NULL) {
1404 		TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr,
1405 		    entries) {
1406 			old_rule = RB_FIND(pf_krule_global, old_tree, rule);
1407 			if (old_rule == NULL) {
1408 				continue;
1409 			}
1410 			pf_counter_u64_critical_enter();
1411 			pf_counter_u64_rollup_protected(&rule->evaluations,
1412 			    pf_counter_u64_fetch(&old_rule->evaluations));
1413 			pf_counter_u64_rollup_protected(&rule->packets[0],
1414 			    pf_counter_u64_fetch(&old_rule->packets[0]));
1415 			pf_counter_u64_rollup_protected(&rule->packets[1],
1416 			    pf_counter_u64_fetch(&old_rule->packets[1]));
1417 			pf_counter_u64_rollup_protected(&rule->bytes[0],
1418 			    pf_counter_u64_fetch(&old_rule->bytes[0]));
1419 			pf_counter_u64_rollup_protected(&rule->bytes[1],
1420 			    pf_counter_u64_fetch(&old_rule->bytes[1]));
1421 			pf_counter_u64_critical_exit();
1422 		}
1423 	}
1424 
1425 	rs->rules[rs_num].inactive.ptr = old_rules;
1426 	rs->rules[rs_num].inactive.ptr_array = old_array;
1427 	rs->rules[rs_num].inactive.tree = NULL; /* important for pf_ioctl_addrule */
1428 	rs->rules[rs_num].inactive.rcount = old_rcount;
1429 
1430 	rs->rules[rs_num].active.ticket =
1431 	    rs->rules[rs_num].inactive.ticket;
1432 	pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1433 
1434 	/* Purge the old rule list. */
1435 	PF_UNLNKDRULES_LOCK();
1436 	while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1437 		pf_unlink_rule_locked(old_rules, rule);
1438 	PF_UNLNKDRULES_UNLOCK();
1439 	if (rs->rules[rs_num].inactive.ptr_array)
1440 		free(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
1441 	rs->rules[rs_num].inactive.ptr_array = NULL;
1442 	rs->rules[rs_num].inactive.rcount = 0;
1443 	rs->rules[rs_num].inactive.open = 0;
1444 	pf_remove_if_empty_kruleset(rs);
1445 	free(old_tree, M_TEMP);
1446 
1447 	return (0);
1448 }
1449 
1450 static int
pf_setup_pfsync_matching(struct pf_kruleset * rs)1451 pf_setup_pfsync_matching(struct pf_kruleset *rs)
1452 {
1453 	MD5_CTX			 ctx;
1454 	struct pf_krule		*rule;
1455 	int			 rs_cnt;
1456 	u_int8_t		 digest[PF_MD5_DIGEST_LENGTH];
1457 
1458 	MD5Init(&ctx);
1459 	for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1460 		/* XXX PF_RULESET_SCRUB as well? */
1461 		if (rs_cnt == PF_RULESET_SCRUB)
1462 			continue;
1463 
1464 		if (rs->rules[rs_cnt].inactive.ptr_array)
1465 			free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
1466 		rs->rules[rs_cnt].inactive.ptr_array = NULL;
1467 
1468 		if (rs->rules[rs_cnt].inactive.rcount) {
1469 			rs->rules[rs_cnt].inactive.ptr_array =
1470 			    mallocarray(rs->rules[rs_cnt].inactive.rcount,
1471 			    sizeof(struct pf_rule **),
1472 			    M_TEMP, M_NOWAIT);
1473 
1474 			if (!rs->rules[rs_cnt].inactive.ptr_array)
1475 				return (ENOMEM);
1476 		}
1477 
1478 		TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1479 		    entries) {
1480 			pf_hash_rule_rolling(&ctx, rule);
1481 			(rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1482 		}
1483 	}
1484 
1485 	MD5Final(digest, &ctx);
1486 	memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum));
1487 	return (0);
1488 }
1489 
1490 static int
pf_eth_addr_setup(struct pf_keth_ruleset * ruleset,struct pf_addr_wrap * addr)1491 pf_eth_addr_setup(struct pf_keth_ruleset *ruleset, struct pf_addr_wrap *addr)
1492 {
1493 	int error = 0;
1494 
1495 	switch (addr->type) {
1496 	case PF_ADDR_TABLE:
1497 		addr->p.tbl = pfr_eth_attach_table(ruleset, addr->v.tblname);
1498 		if (addr->p.tbl == NULL)
1499 			error = ENOMEM;
1500 		break;
1501 	default:
1502 		error = EINVAL;
1503 	}
1504 
1505 	return (error);
1506 }
1507 
1508 static int
pf_addr_setup(struct pf_kruleset * ruleset,struct pf_addr_wrap * addr,sa_family_t af)1509 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr,
1510     sa_family_t af)
1511 {
1512 	int error = 0;
1513 
1514 	switch (addr->type) {
1515 	case PF_ADDR_TABLE:
1516 		addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname);
1517 		if (addr->p.tbl == NULL)
1518 			error = ENOMEM;
1519 		break;
1520 	case PF_ADDR_DYNIFTL:
1521 		error = pfi_dynaddr_setup(addr, af);
1522 		break;
1523 	}
1524 
1525 	return (error);
1526 }
1527 
1528 static void
pf_addr_copyout(struct pf_addr_wrap * addr)1529 pf_addr_copyout(struct pf_addr_wrap *addr)
1530 {
1531 
1532 	switch (addr->type) {
1533 	case PF_ADDR_DYNIFTL:
1534 		pfi_dynaddr_copyout(addr);
1535 		break;
1536 	case PF_ADDR_TABLE:
1537 		pf_tbladdr_copyout(addr);
1538 		break;
1539 	}
1540 }
1541 
1542 static void
pf_src_node_copy(const struct pf_ksrc_node * in,struct pf_src_node * out)1543 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out)
1544 {
1545 	int	secs = time_uptime, diff;
1546 
1547 	bzero(out, sizeof(struct pf_src_node));
1548 
1549 	bcopy(&in->addr, &out->addr, sizeof(struct pf_addr));
1550 	bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr));
1551 
1552 	if (in->rule.ptr != NULL)
1553 		out->rule.nr = in->rule.ptr->nr;
1554 
1555 	for (int i = 0; i < 2; i++) {
1556 		out->bytes[i] = counter_u64_fetch(in->bytes[i]);
1557 		out->packets[i] = counter_u64_fetch(in->packets[i]);
1558 	}
1559 
1560 	out->states = in->states;
1561 	out->conn = in->conn;
1562 	out->af = in->af;
1563 	out->ruletype = in->ruletype;
1564 
1565 	out->creation = secs - in->creation;
1566 	if (out->expire > secs)
1567 		out->expire -= secs;
1568 	else
1569 		out->expire = 0;
1570 
1571 	/* Adjust the connection rate estimate. */
1572 	diff = secs - in->conn_rate.last;
1573 	if (diff >= in->conn_rate.seconds)
1574 		out->conn_rate.count = 0;
1575 	else
1576 		out->conn_rate.count -=
1577 		    in->conn_rate.count * diff /
1578 		    in->conn_rate.seconds;
1579 }
1580 
1581 #ifdef ALTQ
1582 /*
1583  * Handle export of struct pf_kaltq to user binaries that may be using any
1584  * version of struct pf_altq.
1585  */
1586 static int
pf_export_kaltq(struct pf_altq * q,struct pfioc_altq_v1 * pa,size_t ioc_size)1587 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size)
1588 {
1589 	u_int32_t version;
1590 
1591 	if (ioc_size == sizeof(struct pfioc_altq_v0))
1592 		version = 0;
1593 	else
1594 		version = pa->version;
1595 
1596 	if (version > PFIOC_ALTQ_VERSION)
1597 		return (EINVAL);
1598 
1599 #define ASSIGN(x) exported_q->x = q->x
1600 #define COPY(x) \
1601 	bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x)))
1602 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX)
1603 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX)
1604 
1605 	switch (version) {
1606 	case 0: {
1607 		struct pf_altq_v0 *exported_q =
1608 		    &((struct pfioc_altq_v0 *)pa)->altq;
1609 
1610 		COPY(ifname);
1611 
1612 		ASSIGN(scheduler);
1613 		ASSIGN(tbrsize);
1614 		exported_q->tbrsize = SATU16(q->tbrsize);
1615 		exported_q->ifbandwidth = SATU32(q->ifbandwidth);
1616 
1617 		COPY(qname);
1618 		COPY(parent);
1619 		ASSIGN(parent_qid);
1620 		exported_q->bandwidth = SATU32(q->bandwidth);
1621 		ASSIGN(priority);
1622 		ASSIGN(local_flags);
1623 
1624 		ASSIGN(qlimit);
1625 		ASSIGN(flags);
1626 
1627 		if (q->scheduler == ALTQT_HFSC) {
1628 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x
1629 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \
1630 			    SATU32(q->pq_u.hfsc_opts.x)
1631 
1632 			ASSIGN_OPT_SATU32(rtsc_m1);
1633 			ASSIGN_OPT(rtsc_d);
1634 			ASSIGN_OPT_SATU32(rtsc_m2);
1635 
1636 			ASSIGN_OPT_SATU32(lssc_m1);
1637 			ASSIGN_OPT(lssc_d);
1638 			ASSIGN_OPT_SATU32(lssc_m2);
1639 
1640 			ASSIGN_OPT_SATU32(ulsc_m1);
1641 			ASSIGN_OPT(ulsc_d);
1642 			ASSIGN_OPT_SATU32(ulsc_m2);
1643 
1644 			ASSIGN_OPT(flags);
1645 
1646 #undef ASSIGN_OPT
1647 #undef ASSIGN_OPT_SATU32
1648 		} else
1649 			COPY(pq_u);
1650 
1651 		ASSIGN(qid);
1652 		break;
1653 	}
1654 	case 1:	{
1655 		struct pf_altq_v1 *exported_q =
1656 		    &((struct pfioc_altq_v1 *)pa)->altq;
1657 
1658 		COPY(ifname);
1659 
1660 		ASSIGN(scheduler);
1661 		ASSIGN(tbrsize);
1662 		ASSIGN(ifbandwidth);
1663 
1664 		COPY(qname);
1665 		COPY(parent);
1666 		ASSIGN(parent_qid);
1667 		ASSIGN(bandwidth);
1668 		ASSIGN(priority);
1669 		ASSIGN(local_flags);
1670 
1671 		ASSIGN(qlimit);
1672 		ASSIGN(flags);
1673 		COPY(pq_u);
1674 
1675 		ASSIGN(qid);
1676 		break;
1677 	}
1678 	default:
1679 		panic("%s: unhandled struct pfioc_altq version", __func__);
1680 		break;
1681 	}
1682 
1683 #undef ASSIGN
1684 #undef COPY
1685 #undef SATU16
1686 #undef SATU32
1687 
1688 	return (0);
1689 }
1690 
1691 /*
1692  * Handle import to struct pf_kaltq of struct pf_altq from user binaries
1693  * that may be using any version of it.
1694  */
1695 static int
pf_import_kaltq(struct pfioc_altq_v1 * pa,struct pf_altq * q,size_t ioc_size)1696 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size)
1697 {
1698 	u_int32_t version;
1699 
1700 	if (ioc_size == sizeof(struct pfioc_altq_v0))
1701 		version = 0;
1702 	else
1703 		version = pa->version;
1704 
1705 	if (version > PFIOC_ALTQ_VERSION)
1706 		return (EINVAL);
1707 
1708 #define ASSIGN(x) q->x = imported_q->x
1709 #define COPY(x) \
1710 	bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x)))
1711 
1712 	switch (version) {
1713 	case 0: {
1714 		struct pf_altq_v0 *imported_q =
1715 		    &((struct pfioc_altq_v0 *)pa)->altq;
1716 
1717 		COPY(ifname);
1718 
1719 		ASSIGN(scheduler);
1720 		ASSIGN(tbrsize); /* 16-bit -> 32-bit */
1721 		ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */
1722 
1723 		COPY(qname);
1724 		COPY(parent);
1725 		ASSIGN(parent_qid);
1726 		ASSIGN(bandwidth); /* 32-bit -> 64-bit */
1727 		ASSIGN(priority);
1728 		ASSIGN(local_flags);
1729 
1730 		ASSIGN(qlimit);
1731 		ASSIGN(flags);
1732 
1733 		if (imported_q->scheduler == ALTQT_HFSC) {
1734 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x
1735 
1736 			/*
1737 			 * The m1 and m2 parameters are being copied from
1738 			 * 32-bit to 64-bit.
1739 			 */
1740 			ASSIGN_OPT(rtsc_m1);
1741 			ASSIGN_OPT(rtsc_d);
1742 			ASSIGN_OPT(rtsc_m2);
1743 
1744 			ASSIGN_OPT(lssc_m1);
1745 			ASSIGN_OPT(lssc_d);
1746 			ASSIGN_OPT(lssc_m2);
1747 
1748 			ASSIGN_OPT(ulsc_m1);
1749 			ASSIGN_OPT(ulsc_d);
1750 			ASSIGN_OPT(ulsc_m2);
1751 
1752 			ASSIGN_OPT(flags);
1753 
1754 #undef ASSIGN_OPT
1755 		} else
1756 			COPY(pq_u);
1757 
1758 		ASSIGN(qid);
1759 		break;
1760 	}
1761 	case 1: {
1762 		struct pf_altq_v1 *imported_q =
1763 		    &((struct pfioc_altq_v1 *)pa)->altq;
1764 
1765 		COPY(ifname);
1766 
1767 		ASSIGN(scheduler);
1768 		ASSIGN(tbrsize);
1769 		ASSIGN(ifbandwidth);
1770 
1771 		COPY(qname);
1772 		COPY(parent);
1773 		ASSIGN(parent_qid);
1774 		ASSIGN(bandwidth);
1775 		ASSIGN(priority);
1776 		ASSIGN(local_flags);
1777 
1778 		ASSIGN(qlimit);
1779 		ASSIGN(flags);
1780 		COPY(pq_u);
1781 
1782 		ASSIGN(qid);
1783 		break;
1784 	}
1785 	default:
1786 		panic("%s: unhandled struct pfioc_altq version", __func__);
1787 		break;
1788 	}
1789 
1790 #undef ASSIGN
1791 #undef COPY
1792 
1793 	return (0);
1794 }
1795 
1796 static struct pf_altq *
pf_altq_get_nth_active(u_int32_t n)1797 pf_altq_get_nth_active(u_int32_t n)
1798 {
1799 	struct pf_altq		*altq;
1800 	u_int32_t		 nr;
1801 
1802 	nr = 0;
1803 	TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
1804 		if (nr == n)
1805 			return (altq);
1806 		nr++;
1807 	}
1808 
1809 	TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
1810 		if (nr == n)
1811 			return (altq);
1812 		nr++;
1813 	}
1814 
1815 	return (NULL);
1816 }
1817 #endif /* ALTQ */
1818 
1819 struct pf_krule *
pf_krule_alloc(void)1820 pf_krule_alloc(void)
1821 {
1822 	struct pf_krule *rule;
1823 
1824 	rule = malloc(sizeof(struct pf_krule), M_PFRULE, M_WAITOK | M_ZERO);
1825 	mtx_init(&rule->rpool.mtx, "pf_krule_pool", NULL, MTX_DEF);
1826 	rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
1827 	    M_WAITOK | M_ZERO);
1828 	return (rule);
1829 }
1830 
1831 void
pf_krule_free(struct pf_krule * rule)1832 pf_krule_free(struct pf_krule *rule)
1833 {
1834 #ifdef PF_WANT_32_TO_64_COUNTER
1835 	bool wowned;
1836 #endif
1837 
1838 	if (rule == NULL)
1839 		return;
1840 
1841 #ifdef PF_WANT_32_TO_64_COUNTER
1842 	if (rule->allrulelinked) {
1843 		wowned = PF_RULES_WOWNED();
1844 		if (!wowned)
1845 			PF_RULES_WLOCK();
1846 		LIST_REMOVE(rule, allrulelist);
1847 		V_pf_allrulecount--;
1848 		if (!wowned)
1849 			PF_RULES_WUNLOCK();
1850 	}
1851 #endif
1852 
1853 	pf_counter_u64_deinit(&rule->evaluations);
1854 	for (int i = 0; i < 2; i++) {
1855 		pf_counter_u64_deinit(&rule->packets[i]);
1856 		pf_counter_u64_deinit(&rule->bytes[i]);
1857 	}
1858 	counter_u64_free(rule->states_cur);
1859 	counter_u64_free(rule->states_tot);
1860 	counter_u64_free(rule->src_nodes);
1861 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
1862 
1863 	mtx_destroy(&rule->rpool.mtx);
1864 	free(rule, M_PFRULE);
1865 }
1866 
1867 void
pf_krule_clear_counters(struct pf_krule * rule)1868 pf_krule_clear_counters(struct pf_krule *rule)
1869 {
1870 	pf_counter_u64_zero(&rule->evaluations);
1871 	for (int i = 0; i < 2; i++) {
1872 		pf_counter_u64_zero(&rule->packets[i]);
1873 		pf_counter_u64_zero(&rule->bytes[i]);
1874 	}
1875 	counter_u64_zero(rule->states_tot);
1876 }
1877 
1878 static void
pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr * kpool,struct pf_pooladdr * pool)1879 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool,
1880     struct pf_pooladdr *pool)
1881 {
1882 
1883 	bzero(pool, sizeof(*pool));
1884 	bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr));
1885 	strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname));
1886 }
1887 
1888 static int
pf_pooladdr_to_kpooladdr(const struct pf_pooladdr * pool,struct pf_kpooladdr * kpool)1889 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool,
1890     struct pf_kpooladdr *kpool)
1891 {
1892 	int ret;
1893 
1894 	bzero(kpool, sizeof(*kpool));
1895 	bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr));
1896 	ret = pf_user_strcpy(kpool->ifname, pool->ifname,
1897 	    sizeof(kpool->ifname));
1898 	return (ret);
1899 }
1900 
1901 static void
pf_pool_to_kpool(const struct pf_pool * pool,struct pf_kpool * kpool)1902 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool)
1903 {
1904 	_Static_assert(sizeof(pool->key) == sizeof(kpool->key), "");
1905 	_Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), "");
1906 
1907 	bcopy(&pool->key, &kpool->key, sizeof(kpool->key));
1908 	bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter));
1909 
1910 	kpool->tblidx = pool->tblidx;
1911 	kpool->proxy_port[0] = pool->proxy_port[0];
1912 	kpool->proxy_port[1] = pool->proxy_port[1];
1913 	kpool->opts = pool->opts;
1914 }
1915 
1916 static int
pf_rule_to_krule(const struct pf_rule * rule,struct pf_krule * krule)1917 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule)
1918 {
1919 	int ret;
1920 
1921 #ifndef INET
1922 	if (rule->af == AF_INET) {
1923 		return (EAFNOSUPPORT);
1924 	}
1925 #endif /* INET */
1926 #ifndef INET6
1927 	if (rule->af == AF_INET6) {
1928 		return (EAFNOSUPPORT);
1929 	}
1930 #endif /* INET6 */
1931 
1932 	ret = pf_check_rule_addr(&rule->src);
1933 	if (ret != 0)
1934 		return (ret);
1935 	ret = pf_check_rule_addr(&rule->dst);
1936 	if (ret != 0)
1937 		return (ret);
1938 
1939 	bcopy(&rule->src, &krule->src, sizeof(rule->src));
1940 	bcopy(&rule->dst, &krule->dst, sizeof(rule->dst));
1941 
1942 	ret = pf_user_strcpy(krule->label[0], rule->label, sizeof(rule->label));
1943 	if (ret != 0)
1944 		return (ret);
1945 	ret = pf_user_strcpy(krule->ifname, rule->ifname, sizeof(rule->ifname));
1946 	if (ret != 0)
1947 		return (ret);
1948 	ret = pf_user_strcpy(krule->qname, rule->qname, sizeof(rule->qname));
1949 	if (ret != 0)
1950 		return (ret);
1951 	ret = pf_user_strcpy(krule->pqname, rule->pqname, sizeof(rule->pqname));
1952 	if (ret != 0)
1953 		return (ret);
1954 	ret = pf_user_strcpy(krule->tagname, rule->tagname,
1955 	    sizeof(rule->tagname));
1956 	if (ret != 0)
1957 		return (ret);
1958 	ret = pf_user_strcpy(krule->match_tagname, rule->match_tagname,
1959 	    sizeof(rule->match_tagname));
1960 	if (ret != 0)
1961 		return (ret);
1962 	ret = pf_user_strcpy(krule->overload_tblname, rule->overload_tblname,
1963 	    sizeof(rule->overload_tblname));
1964 	if (ret != 0)
1965 		return (ret);
1966 
1967 	pf_pool_to_kpool(&rule->rpool, &krule->rpool);
1968 
1969 	/* Don't allow userspace to set evaluations, packets or bytes. */
1970 	/* kif, anchor, overload_tbl are not copied over. */
1971 
1972 	krule->os_fingerprint = rule->os_fingerprint;
1973 
1974 	krule->rtableid = rule->rtableid;
1975 	/* pf_rule->timeout is smaller than pf_krule->timeout */
1976 	bcopy(rule->timeout, krule->timeout, sizeof(rule->timeout));
1977 	krule->max_states = rule->max_states;
1978 	krule->max_src_nodes = rule->max_src_nodes;
1979 	krule->max_src_states = rule->max_src_states;
1980 	krule->max_src_conn = rule->max_src_conn;
1981 	krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit;
1982 	krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds;
1983 	krule->qid = rule->qid;
1984 	krule->pqid = rule->pqid;
1985 	krule->nr = rule->nr;
1986 	krule->prob = rule->prob;
1987 	krule->cuid = rule->cuid;
1988 	krule->cpid = rule->cpid;
1989 
1990 	krule->return_icmp = rule->return_icmp;
1991 	krule->return_icmp6 = rule->return_icmp6;
1992 	krule->max_mss = rule->max_mss;
1993 	krule->tag = rule->tag;
1994 	krule->match_tag = rule->match_tag;
1995 	krule->scrub_flags = rule->scrub_flags;
1996 
1997 	bcopy(&rule->uid, &krule->uid, sizeof(krule->uid));
1998 	bcopy(&rule->gid, &krule->gid, sizeof(krule->gid));
1999 
2000 	krule->rule_flag = rule->rule_flag;
2001 	krule->action = rule->action;
2002 	krule->direction = rule->direction;
2003 	krule->log = rule->log;
2004 	krule->logif = rule->logif;
2005 	krule->quick = rule->quick;
2006 	krule->ifnot = rule->ifnot;
2007 	krule->match_tag_not = rule->match_tag_not;
2008 	krule->natpass = rule->natpass;
2009 
2010 	krule->keep_state = rule->keep_state;
2011 	krule->af = rule->af;
2012 	krule->proto = rule->proto;
2013 	krule->type = rule->type;
2014 	krule->code = rule->code;
2015 	krule->flags = rule->flags;
2016 	krule->flagset = rule->flagset;
2017 	krule->min_ttl = rule->min_ttl;
2018 	krule->allow_opts = rule->allow_opts;
2019 	krule->rt = rule->rt;
2020 	krule->return_ttl = rule->return_ttl;
2021 	krule->tos = rule->tos;
2022 	krule->set_tos = rule->set_tos;
2023 
2024 	krule->flush = rule->flush;
2025 	krule->prio = rule->prio;
2026 	krule->set_prio[0] = rule->set_prio[0];
2027 	krule->set_prio[1] = rule->set_prio[1];
2028 
2029 	bcopy(&rule->divert, &krule->divert, sizeof(krule->divert));
2030 
2031 	return (0);
2032 }
2033 
2034 int
pf_ioctl_getrules(struct pfioc_rule * pr)2035 pf_ioctl_getrules(struct pfioc_rule *pr)
2036 {
2037 	struct pf_kruleset	*ruleset;
2038 	struct pf_krule		*tail;
2039 	int			 rs_num;
2040 
2041 	PF_RULES_WLOCK();
2042 	ruleset = pf_find_kruleset(pr->anchor);
2043 	if (ruleset == NULL) {
2044 		PF_RULES_WUNLOCK();
2045 		return (EINVAL);
2046 	}
2047 	rs_num = pf_get_ruleset_number(pr->rule.action);
2048 	if (rs_num >= PF_RULESET_MAX) {
2049 		PF_RULES_WUNLOCK();
2050 		return (EINVAL);
2051 	}
2052 	tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
2053 	    pf_krulequeue);
2054 	if (tail)
2055 		pr->nr = tail->nr + 1;
2056 	else
2057 		pr->nr = 0;
2058 	pr->ticket = ruleset->rules[rs_num].active.ticket;
2059 	PF_RULES_WUNLOCK();
2060 
2061 	return (0);
2062 }
2063 
2064 int
pf_ioctl_addrule(struct pf_krule * rule,uint32_t ticket,uint32_t pool_ticket,const char * anchor,const char * anchor_call,uid_t uid,pid_t pid)2065 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket,
2066     uint32_t pool_ticket, const char *anchor, const char *anchor_call,
2067     uid_t uid, pid_t pid)
2068 {
2069 	struct pf_kruleset	*ruleset;
2070 	struct pf_krule		*tail;
2071 	struct pf_kpooladdr	*pa;
2072 	struct pfi_kkif		*kif = NULL;
2073 	int			 rs_num;
2074 	int			 error = 0;
2075 
2076 	if ((rule->return_icmp >> 8) > ICMP_MAXTYPE) {
2077 		error = EINVAL;
2078 		goto errout_unlocked;
2079 	}
2080 
2081 #define	ERROUT(x)	ERROUT_FUNCTION(errout, x)
2082 
2083 	if (rule->ifname[0])
2084 		kif = pf_kkif_create(M_WAITOK);
2085 	pf_counter_u64_init(&rule->evaluations, M_WAITOK);
2086 	for (int i = 0; i < 2; i++) {
2087 		pf_counter_u64_init(&rule->packets[i], M_WAITOK);
2088 		pf_counter_u64_init(&rule->bytes[i], M_WAITOK);
2089 	}
2090 	rule->states_cur = counter_u64_alloc(M_WAITOK);
2091 	rule->states_tot = counter_u64_alloc(M_WAITOK);
2092 	rule->src_nodes = counter_u64_alloc(M_WAITOK);
2093 	rule->cuid = uid;
2094 	rule->cpid = pid;
2095 	TAILQ_INIT(&rule->rpool.list);
2096 
2097 	PF_CONFIG_LOCK();
2098 	PF_RULES_WLOCK();
2099 #ifdef PF_WANT_32_TO_64_COUNTER
2100 	LIST_INSERT_HEAD(&V_pf_allrulelist, rule, allrulelist);
2101 	MPASS(!rule->allrulelinked);
2102 	rule->allrulelinked = true;
2103 	V_pf_allrulecount++;
2104 #endif
2105 	ruleset = pf_find_kruleset(anchor);
2106 	if (ruleset == NULL)
2107 		ERROUT(EINVAL);
2108 	rs_num = pf_get_ruleset_number(rule->action);
2109 	if (rs_num >= PF_RULESET_MAX)
2110 		ERROUT(EINVAL);
2111 	if (ticket != ruleset->rules[rs_num].inactive.ticket) {
2112 		DPFPRINTF(PF_DEBUG_MISC,
2113 		    ("ticket: %d != [%d]%d\n", ticket, rs_num,
2114 		    ruleset->rules[rs_num].inactive.ticket));
2115 		ERROUT(EBUSY);
2116 	}
2117 	if (pool_ticket != V_ticket_pabuf) {
2118 		DPFPRINTF(PF_DEBUG_MISC,
2119 		    ("pool_ticket: %d != %d\n", pool_ticket,
2120 		    V_ticket_pabuf));
2121 		ERROUT(EBUSY);
2122 	}
2123 	/*
2124 	 * XXXMJG hack: there is no mechanism to ensure they started the
2125 	 * transaction. Ticket checked above may happen to match by accident,
2126 	 * even if nobody called DIOCXBEGIN, let alone this process.
2127 	 * Partially work around it by checking if the RB tree got allocated,
2128 	 * see pf_begin_rules.
2129 	 */
2130 	if (ruleset->rules[rs_num].inactive.tree == NULL) {
2131 		ERROUT(EINVAL);
2132 	}
2133 
2134 	tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
2135 	    pf_krulequeue);
2136 	if (tail)
2137 		rule->nr = tail->nr + 1;
2138 	else
2139 		rule->nr = 0;
2140 	if (rule->ifname[0]) {
2141 		rule->kif = pfi_kkif_attach(kif, rule->ifname);
2142 		kif = NULL;
2143 		pfi_kkif_ref(rule->kif);
2144 	} else
2145 		rule->kif = NULL;
2146 
2147 	if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs)
2148 		error = EBUSY;
2149 
2150 #ifdef ALTQ
2151 	/* set queue IDs */
2152 	if (rule->qname[0] != 0) {
2153 		if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
2154 			error = EBUSY;
2155 		else if (rule->pqname[0] != 0) {
2156 			if ((rule->pqid =
2157 			    pf_qname2qid(rule->pqname)) == 0)
2158 				error = EBUSY;
2159 		} else
2160 			rule->pqid = rule->qid;
2161 	}
2162 #endif
2163 	if (rule->tagname[0])
2164 		if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
2165 			error = EBUSY;
2166 	if (rule->match_tagname[0])
2167 		if ((rule->match_tag =
2168 		    pf_tagname2tag(rule->match_tagname)) == 0)
2169 			error = EBUSY;
2170 	if (rule->rt && !rule->direction)
2171 		error = EINVAL;
2172 	if (!rule->log)
2173 		rule->logif = 0;
2174 	if (rule->logif >= PFLOGIFS_MAX)
2175 		error = EINVAL;
2176 	if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
2177 		error = ENOMEM;
2178 	if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
2179 		error = ENOMEM;
2180 	if (pf_kanchor_setup(rule, ruleset, anchor_call))
2181 		error = EINVAL;
2182 	if (rule->scrub_flags & PFSTATE_SETPRIO &&
2183 	    (rule->set_prio[0] > PF_PRIO_MAX ||
2184 	    rule->set_prio[1] > PF_PRIO_MAX))
2185 		error = EINVAL;
2186 	TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
2187 		if (pa->addr.type == PF_ADDR_TABLE) {
2188 			pa->addr.p.tbl = pfr_attach_table(ruleset,
2189 			    pa->addr.v.tblname);
2190 			if (pa->addr.p.tbl == NULL)
2191 				error = ENOMEM;
2192 		}
2193 
2194 	rule->overload_tbl = NULL;
2195 	if (rule->overload_tblname[0]) {
2196 		if ((rule->overload_tbl = pfr_attach_table(ruleset,
2197 		    rule->overload_tblname)) == NULL)
2198 			error = EINVAL;
2199 		else
2200 			rule->overload_tbl->pfrkt_flags |=
2201 			    PFR_TFLAG_ACTIVE;
2202 	}
2203 
2204 	pf_mv_kpool(&V_pf_pabuf, &rule->rpool.list);
2205 	if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
2206 	    (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
2207 	    (rule->rt > PF_NOPFROUTE)) &&
2208 	    (TAILQ_FIRST(&rule->rpool.list) == NULL))
2209 		error = EINVAL;
2210 
2211 	if (error) {
2212 		pf_free_rule(rule);
2213 		rule = NULL;
2214 		ERROUT(error);
2215 	}
2216 
2217 	rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
2218 	TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
2219 	    rule, entries);
2220 	ruleset->rules[rs_num].inactive.rcount++;
2221 
2222 	PF_RULES_WUNLOCK();
2223 	pf_hash_rule(rule);
2224 	if (RB_INSERT(pf_krule_global, ruleset->rules[rs_num].inactive.tree, rule) != NULL) {
2225 		PF_RULES_WLOCK();
2226 		TAILQ_REMOVE(ruleset->rules[rs_num].inactive.ptr, rule, entries);
2227 		ruleset->rules[rs_num].inactive.rcount--;
2228 		pf_free_rule(rule);
2229 		rule = NULL;
2230 		ERROUT(EEXIST);
2231 	}
2232 	PF_CONFIG_UNLOCK();
2233 
2234 	return (0);
2235 
2236 #undef ERROUT
2237 errout:
2238 	PF_RULES_WUNLOCK();
2239 	PF_CONFIG_UNLOCK();
2240 errout_unlocked:
2241 	pf_kkif_free(kif);
2242 	pf_krule_free(rule);
2243 	return (error);
2244 }
2245 
2246 static bool
pf_label_match(const struct pf_krule * rule,const char * label)2247 pf_label_match(const struct pf_krule *rule, const char *label)
2248 {
2249 	int i = 0;
2250 
2251 	while (*rule->label[i]) {
2252 		if (strcmp(rule->label[i], label) == 0)
2253 			return (true);
2254 		i++;
2255 	}
2256 
2257 	return (false);
2258 }
2259 
2260 static unsigned int
pf_kill_matching_state(struct pf_state_key_cmp * key,int dir)2261 pf_kill_matching_state(struct pf_state_key_cmp *key, int dir)
2262 {
2263 	struct pf_kstate *s;
2264 	int more = 0;
2265 
2266 	s = pf_find_state_all(key, dir, &more);
2267 	if (s == NULL)
2268 		return (0);
2269 
2270 	if (more) {
2271 		PF_STATE_UNLOCK(s);
2272 		return (0);
2273 	}
2274 
2275 	pf_unlink_state(s);
2276 	return (1);
2277 }
2278 
2279 static int
pf_killstates_row(struct pf_kstate_kill * psk,struct pf_idhash * ih)2280 pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih)
2281 {
2282 	struct pf_kstate	*s;
2283 	struct pf_state_key	*sk;
2284 	struct pf_addr		*srcaddr, *dstaddr;
2285 	struct pf_state_key_cmp	 match_key;
2286 	int			 idx, killed = 0;
2287 	unsigned int		 dir;
2288 	u_int16_t		 srcport, dstport;
2289 	struct pfi_kkif		*kif;
2290 
2291 relock_DIOCKILLSTATES:
2292 	PF_HASHROW_LOCK(ih);
2293 	LIST_FOREACH(s, &ih->states, entry) {
2294 		/* For floating states look at the original kif. */
2295 		kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
2296 
2297 		sk = s->key[psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE];
2298 		if (s->direction == PF_OUT) {
2299 			srcaddr = &sk->addr[1];
2300 			dstaddr = &sk->addr[0];
2301 			srcport = sk->port[1];
2302 			dstport = sk->port[0];
2303 		} else {
2304 			srcaddr = &sk->addr[0];
2305 			dstaddr = &sk->addr[1];
2306 			srcport = sk->port[0];
2307 			dstport = sk->port[1];
2308 		}
2309 
2310 		if (psk->psk_af && sk->af != psk->psk_af)
2311 			continue;
2312 
2313 		if (psk->psk_proto && psk->psk_proto != sk->proto)
2314 			continue;
2315 
2316 		if (! PF_MATCHA(psk->psk_src.neg, &psk->psk_src.addr.v.a.addr,
2317 		    &psk->psk_src.addr.v.a.mask, srcaddr, sk->af))
2318 			continue;
2319 
2320 		if (! PF_MATCHA(psk->psk_dst.neg, &psk->psk_dst.addr.v.a.addr,
2321 		    &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af))
2322 			continue;
2323 
2324 		if (!  PF_MATCHA(psk->psk_rt_addr.neg,
2325 		    &psk->psk_rt_addr.addr.v.a.addr,
2326 		    &psk->psk_rt_addr.addr.v.a.mask,
2327 		    &s->rt_addr, sk->af))
2328 			continue;
2329 
2330 		if (psk->psk_src.port_op != 0 &&
2331 		    ! pf_match_port(psk->psk_src.port_op,
2332 		    psk->psk_src.port[0], psk->psk_src.port[1], srcport))
2333 			continue;
2334 
2335 		if (psk->psk_dst.port_op != 0 &&
2336 		    ! pf_match_port(psk->psk_dst.port_op,
2337 		    psk->psk_dst.port[0], psk->psk_dst.port[1], dstport))
2338 			continue;
2339 
2340 		if (psk->psk_label[0] &&
2341 		    ! pf_label_match(s->rule.ptr, psk->psk_label))
2342 			continue;
2343 
2344 		if (psk->psk_ifname[0] && strcmp(psk->psk_ifname,
2345 		    kif->pfik_name))
2346 			continue;
2347 
2348 		if (psk->psk_kill_match) {
2349 			/* Create the key to find matching states, with lock
2350 			 * held. */
2351 
2352 			bzero(&match_key, sizeof(match_key));
2353 
2354 			if (s->direction == PF_OUT) {
2355 				dir = PF_IN;
2356 				idx = psk->psk_nat ? PF_SK_WIRE : PF_SK_STACK;
2357 			} else {
2358 				dir = PF_OUT;
2359 				idx = psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE;
2360 			}
2361 
2362 			match_key.af = s->key[idx]->af;
2363 			match_key.proto = s->key[idx]->proto;
2364 			PF_ACPY(&match_key.addr[0],
2365 			    &s->key[idx]->addr[1], match_key.af);
2366 			match_key.port[0] = s->key[idx]->port[1];
2367 			PF_ACPY(&match_key.addr[1],
2368 			    &s->key[idx]->addr[0], match_key.af);
2369 			match_key.port[1] = s->key[idx]->port[0];
2370 		}
2371 
2372 		pf_unlink_state(s);
2373 		killed++;
2374 
2375 		if (psk->psk_kill_match)
2376 			killed += pf_kill_matching_state(&match_key, dir);
2377 
2378 		goto relock_DIOCKILLSTATES;
2379 	}
2380 	PF_HASHROW_UNLOCK(ih);
2381 
2382 	return (killed);
2383 }
2384 
2385 int
pf_start(void)2386 pf_start(void)
2387 {
2388 	int error = 0;
2389 
2390 	sx_xlock(&V_pf_ioctl_lock);
2391 	if (V_pf_status.running)
2392 		error = EEXIST;
2393 	else {
2394 		hook_pf();
2395 		if (! TAILQ_EMPTY(V_pf_keth->active.rules))
2396 			hook_pf_eth();
2397 		V_pf_status.running = 1;
2398 		V_pf_status.since = time_second;
2399 		new_unrhdr64(&V_pf_stateid, time_second);
2400 
2401 		DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
2402 	}
2403 	sx_xunlock(&V_pf_ioctl_lock);
2404 
2405 	return (error);
2406 }
2407 
2408 int
pf_stop(void)2409 pf_stop(void)
2410 {
2411 	int error = 0;
2412 
2413 	sx_xlock(&V_pf_ioctl_lock);
2414 	if (!V_pf_status.running)
2415 		error = ENOENT;
2416 	else {
2417 		V_pf_status.running = 0;
2418 		dehook_pf();
2419 		dehook_pf_eth();
2420 		V_pf_status.since = time_second;
2421 		DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
2422 	}
2423 	sx_xunlock(&V_pf_ioctl_lock);
2424 
2425 	return (error);
2426 }
2427 
2428 void
pf_ioctl_clear_status(void)2429 pf_ioctl_clear_status(void)
2430 {
2431 	PF_RULES_WLOCK();
2432 	for (int i = 0; i < PFRES_MAX; i++)
2433 		counter_u64_zero(V_pf_status.counters[i]);
2434 	for (int i = 0; i < FCNT_MAX; i++)
2435 		pf_counter_u64_zero(&V_pf_status.fcounters[i]);
2436 	for (int i = 0; i < SCNT_MAX; i++)
2437 		counter_u64_zero(V_pf_status.scounters[i]);
2438 	for (int i = 0; i < KLCNT_MAX; i++)
2439 		counter_u64_zero(V_pf_status.lcounters[i]);
2440 	V_pf_status.since = time_second;
2441 	if (*V_pf_status.ifname)
2442 		pfi_update_status(V_pf_status.ifname, NULL);
2443 	PF_RULES_WUNLOCK();
2444 }
2445 
2446 int
pf_ioctl_set_timeout(int timeout,int seconds,int * prev_seconds)2447 pf_ioctl_set_timeout(int timeout, int seconds, int *prev_seconds)
2448 {
2449 	uint32_t old;
2450 
2451 	if (timeout < 0 || timeout >= PFTM_MAX ||
2452 	    seconds < 0)
2453 		return (EINVAL);
2454 
2455 	PF_RULES_WLOCK();
2456 	old = V_pf_default_rule.timeout[timeout];
2457 	if (timeout == PFTM_INTERVAL && seconds == 0)
2458 		seconds = 1;
2459 	V_pf_default_rule.timeout[timeout] = seconds;
2460 	if (timeout == PFTM_INTERVAL && seconds < old)
2461 		wakeup(pf_purge_thread);
2462 
2463 	if (prev_seconds != NULL)
2464 		*prev_seconds = old;
2465 
2466 	PF_RULES_WUNLOCK();
2467 
2468 	return (0);
2469 }
2470 
2471 int
pf_ioctl_get_timeout(int timeout,int * seconds)2472 pf_ioctl_get_timeout(int timeout, int *seconds)
2473 {
2474 	PF_RULES_RLOCK_TRACKER;
2475 
2476 	if (timeout < 0 || timeout >= PFTM_MAX)
2477 		return (EINVAL);
2478 
2479 	PF_RULES_RLOCK();
2480 	*seconds = V_pf_default_rule.timeout[timeout];
2481 	PF_RULES_RUNLOCK();
2482 
2483 	return (0);
2484 }
2485 
2486 int
pf_ioctl_set_limit(int index,unsigned int limit,unsigned int * old_limit)2487 pf_ioctl_set_limit(int index, unsigned int limit, unsigned int *old_limit)
2488 {
2489 
2490 	PF_RULES_WLOCK();
2491 	if (index < 0 || index >= PF_LIMIT_MAX ||
2492 	    V_pf_limits[index].zone == NULL) {
2493 		PF_RULES_WUNLOCK();
2494 		return (EINVAL);
2495 	}
2496 	uma_zone_set_max(V_pf_limits[index].zone, limit);
2497 	if (old_limit != NULL)
2498 		*old_limit = V_pf_limits[index].limit;
2499 	V_pf_limits[index].limit = limit;
2500 	PF_RULES_WUNLOCK();
2501 
2502 	return (0);
2503 }
2504 
2505 int
pf_ioctl_get_limit(int index,unsigned int * limit)2506 pf_ioctl_get_limit(int index, unsigned int *limit)
2507 {
2508 	PF_RULES_RLOCK_TRACKER;
2509 
2510 	if (index < 0 || index >= PF_LIMIT_MAX)
2511 		return (EINVAL);
2512 
2513 	PF_RULES_RLOCK();
2514 	*limit = V_pf_limits[index].limit;
2515 	PF_RULES_RUNLOCK();
2516 
2517 	return (0);
2518 }
2519 
2520 int
pf_ioctl_begin_addrs(uint32_t * ticket)2521 pf_ioctl_begin_addrs(uint32_t *ticket)
2522 {
2523 	PF_RULES_WLOCK();
2524 	pf_empty_kpool(&V_pf_pabuf);
2525 	*ticket = ++V_ticket_pabuf;
2526 	PF_RULES_WUNLOCK();
2527 
2528 	return (0);
2529 }
2530 
2531 int
pf_ioctl_add_addr(struct pfioc_pooladdr * pp)2532 pf_ioctl_add_addr(struct pfioc_pooladdr *pp)
2533 {
2534 	struct pf_kpooladdr	*pa = NULL;
2535 	struct pfi_kkif		*kif = NULL;
2536 	int error;
2537 
2538 #ifndef INET
2539 	if (pp->af == AF_INET)
2540 		return (EAFNOSUPPORT);
2541 #endif /* INET */
2542 #ifndef INET6
2543 	if (pp->af == AF_INET6)
2544 		return (EAFNOSUPPORT);
2545 #endif /* INET6 */
2546 
2547 	if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2548 	    pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2549 	    pp->addr.addr.type != PF_ADDR_TABLE)
2550 		return (EINVAL);
2551 
2552 	if (pp->addr.addr.p.dyn != NULL)
2553 		return (EINVAL);
2554 
2555 	pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK);
2556 	error = pf_pooladdr_to_kpooladdr(&pp->addr, pa);
2557 	if (error != 0)
2558 		goto out;
2559 	if (pa->ifname[0])
2560 		kif = pf_kkif_create(M_WAITOK);
2561 	PF_RULES_WLOCK();
2562 	if (pp->ticket != V_ticket_pabuf) {
2563 		PF_RULES_WUNLOCK();
2564 		if (pa->ifname[0])
2565 			pf_kkif_free(kif);
2566 		error = EBUSY;
2567 		goto out;
2568 	}
2569 	if (pa->ifname[0]) {
2570 		pa->kif = pfi_kkif_attach(kif, pa->ifname);
2571 		kif = NULL;
2572 		pfi_kkif_ref(pa->kif);
2573 	} else
2574 		pa->kif = NULL;
2575 	if (pa->addr.type == PF_ADDR_DYNIFTL && ((error =
2576 	    pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) {
2577 		if (pa->ifname[0])
2578 			pfi_kkif_unref(pa->kif);
2579 		PF_RULES_WUNLOCK();
2580 		goto out;
2581 	}
2582 	TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries);
2583 	PF_RULES_WUNLOCK();
2584 
2585 	return (0);
2586 
2587 out:
2588 	free(pa, M_PFRULE);
2589 	return (error);
2590 }
2591 
2592 int
pf_ioctl_get_addrs(struct pfioc_pooladdr * pp)2593 pf_ioctl_get_addrs(struct pfioc_pooladdr *pp)
2594 {
2595 	struct pf_kpool		*pool;
2596 	struct pf_kpooladdr	*pa;
2597 
2598 	PF_RULES_RLOCK_TRACKER;
2599 
2600 	pp->anchor[sizeof(pp->anchor) - 1] = 0;
2601 	pp->nr = 0;
2602 
2603 	PF_RULES_RLOCK();
2604 	pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
2605 	    pp->r_num, 0, 1, 0);
2606 	if (pool == NULL) {
2607 		PF_RULES_RUNLOCK();
2608 		return (EBUSY);
2609 	}
2610 	TAILQ_FOREACH(pa, &pool->list, entries)
2611 		pp->nr++;
2612 	PF_RULES_RUNLOCK();
2613 
2614 	return (0);
2615 }
2616 
2617 int
pf_ioctl_get_addr(struct pfioc_pooladdr * pp)2618 pf_ioctl_get_addr(struct pfioc_pooladdr *pp)
2619 {
2620 	struct pf_kpool		*pool;
2621 	struct pf_kpooladdr	*pa;
2622 	u_int32_t		 nr = 0;
2623 
2624 	PF_RULES_RLOCK_TRACKER;
2625 
2626 	pp->anchor[sizeof(pp->anchor) - 1] = 0;
2627 
2628 	PF_RULES_RLOCK();
2629 	pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
2630 	    pp->r_num, 0, 1, 1);
2631 	if (pool == NULL) {
2632 		PF_RULES_RUNLOCK();
2633 		return (EBUSY);
2634 	}
2635 	pa = TAILQ_FIRST(&pool->list);
2636 	while ((pa != NULL) && (nr < pp->nr)) {
2637 		pa = TAILQ_NEXT(pa, entries);
2638 		nr++;
2639 	}
2640 	if (pa == NULL) {
2641 		PF_RULES_RUNLOCK();
2642 		return (EBUSY);
2643 	}
2644 	pf_kpooladdr_to_pooladdr(pa, &pp->addr);
2645 	pf_addr_copyout(&pp->addr.addr);
2646 	PF_RULES_RUNLOCK();
2647 
2648 	return (0);
2649 }
2650 
2651 static int
pfioctl(struct cdev * dev,u_long cmd,caddr_t addr,int flags,struct thread * td)2652 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
2653 {
2654 	int			 error = 0;
2655 	PF_RULES_RLOCK_TRACKER;
2656 
2657 #define	ERROUT_IOCTL(target, x)					\
2658     do {								\
2659 	    error = (x);						\
2660 	    SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__);	\
2661 	    goto target;						\
2662     } while (0)
2663 
2664 
2665 	/* XXX keep in sync with switch() below */
2666 	if (securelevel_gt(td->td_ucred, 2))
2667 		switch (cmd) {
2668 		case DIOCGETRULES:
2669 		case DIOCGETRULENV:
2670 		case DIOCGETADDRS:
2671 		case DIOCGETADDR:
2672 		case DIOCGETSTATE:
2673 		case DIOCGETSTATENV:
2674 		case DIOCSETSTATUSIF:
2675 		case DIOCGETSTATUSNV:
2676 		case DIOCCLRSTATUS:
2677 		case DIOCNATLOOK:
2678 		case DIOCSETDEBUG:
2679 #ifdef COMPAT_FREEBSD14
2680 		case DIOCGETSTATES:
2681 		case DIOCGETSTATESV2:
2682 #endif
2683 		case DIOCGETTIMEOUT:
2684 		case DIOCCLRRULECTRS:
2685 		case DIOCGETLIMIT:
2686 		case DIOCGETALTQSV0:
2687 		case DIOCGETALTQSV1:
2688 		case DIOCGETALTQV0:
2689 		case DIOCGETALTQV1:
2690 		case DIOCGETQSTATSV0:
2691 		case DIOCGETQSTATSV1:
2692 		case DIOCGETRULESETS:
2693 		case DIOCGETRULESET:
2694 		case DIOCRGETTABLES:
2695 		case DIOCRGETTSTATS:
2696 		case DIOCRCLRTSTATS:
2697 		case DIOCRCLRADDRS:
2698 		case DIOCRADDADDRS:
2699 		case DIOCRDELADDRS:
2700 		case DIOCRSETADDRS:
2701 		case DIOCRGETADDRS:
2702 		case DIOCRGETASTATS:
2703 		case DIOCRCLRASTATS:
2704 		case DIOCRTSTADDRS:
2705 		case DIOCOSFPGET:
2706 		case DIOCGETSRCNODES:
2707 		case DIOCCLRSRCNODES:
2708 		case DIOCGETSYNCOOKIES:
2709 		case DIOCIGETIFACES:
2710 		case DIOCGIFSPEEDV0:
2711 		case DIOCGIFSPEEDV1:
2712 		case DIOCSETIFFLAG:
2713 		case DIOCCLRIFFLAG:
2714 		case DIOCGETETHRULES:
2715 		case DIOCGETETHRULE:
2716 		case DIOCGETETHRULESETS:
2717 		case DIOCGETETHRULESET:
2718 			break;
2719 		case DIOCRCLRTABLES:
2720 		case DIOCRADDTABLES:
2721 		case DIOCRDELTABLES:
2722 		case DIOCRSETTFLAGS:
2723 			if (((struct pfioc_table *)addr)->pfrio_flags &
2724 			    PFR_FLAG_DUMMY)
2725 				break; /* dummy operation ok */
2726 			return (EPERM);
2727 		default:
2728 			return (EPERM);
2729 		}
2730 
2731 	if (!(flags & FWRITE))
2732 		switch (cmd) {
2733 		case DIOCGETRULES:
2734 		case DIOCGETADDRS:
2735 		case DIOCGETADDR:
2736 		case DIOCGETSTATE:
2737 		case DIOCGETSTATENV:
2738 		case DIOCGETSTATUSNV:
2739 #ifdef COMPAT_FREEBSD14
2740 		case DIOCGETSTATES:
2741 		case DIOCGETSTATESV2:
2742 #endif
2743 		case DIOCGETTIMEOUT:
2744 		case DIOCGETLIMIT:
2745 		case DIOCGETALTQSV0:
2746 		case DIOCGETALTQSV1:
2747 		case DIOCGETALTQV0:
2748 		case DIOCGETALTQV1:
2749 		case DIOCGETQSTATSV0:
2750 		case DIOCGETQSTATSV1:
2751 		case DIOCGETRULESETS:
2752 		case DIOCGETRULESET:
2753 		case DIOCNATLOOK:
2754 		case DIOCRGETTABLES:
2755 		case DIOCRGETTSTATS:
2756 		case DIOCRGETADDRS:
2757 		case DIOCRGETASTATS:
2758 		case DIOCRTSTADDRS:
2759 		case DIOCOSFPGET:
2760 		case DIOCGETSRCNODES:
2761 		case DIOCGETSYNCOOKIES:
2762 		case DIOCIGETIFACES:
2763 		case DIOCGIFSPEEDV1:
2764 		case DIOCGIFSPEEDV0:
2765 		case DIOCGETRULENV:
2766 		case DIOCGETETHRULES:
2767 		case DIOCGETETHRULE:
2768 		case DIOCGETETHRULESETS:
2769 		case DIOCGETETHRULESET:
2770 			break;
2771 		case DIOCRCLRTABLES:
2772 		case DIOCRADDTABLES:
2773 		case DIOCRDELTABLES:
2774 		case DIOCRCLRTSTATS:
2775 		case DIOCRCLRADDRS:
2776 		case DIOCRADDADDRS:
2777 		case DIOCRDELADDRS:
2778 		case DIOCRSETADDRS:
2779 		case DIOCRSETTFLAGS:
2780 			if (((struct pfioc_table *)addr)->pfrio_flags &
2781 			    PFR_FLAG_DUMMY) {
2782 				flags |= FWRITE; /* need write lock for dummy */
2783 				break; /* dummy operation ok */
2784 			}
2785 			return (EACCES);
2786 		default:
2787 			return (EACCES);
2788 		}
2789 
2790 	CURVNET_SET(TD_TO_VNET(td));
2791 
2792 	switch (cmd) {
2793 #ifdef COMPAT_FREEBSD14
2794 	case DIOCSTART:
2795 		error = pf_start();
2796 		break;
2797 
2798 	case DIOCSTOP:
2799 		error = pf_stop();
2800 		break;
2801 #endif
2802 
2803 	case DIOCGETETHRULES: {
2804 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
2805 		nvlist_t		*nvl;
2806 		void			*packed;
2807 		struct pf_keth_rule	*tail;
2808 		struct pf_keth_ruleset	*rs;
2809 		u_int32_t		 ticket, nr;
2810 		const char		*anchor = "";
2811 
2812 		nvl = NULL;
2813 		packed = NULL;
2814 
2815 #define	ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULES_error, x)
2816 
2817 		if (nv->len > pf_ioctl_maxcount)
2818 			ERROUT(ENOMEM);
2819 
2820 		/* Copy the request in */
2821 		packed = malloc(nv->len, M_NVLIST, M_WAITOK);
2822 		if (packed == NULL)
2823 			ERROUT(ENOMEM);
2824 
2825 		error = copyin(nv->data, packed, nv->len);
2826 		if (error)
2827 			ERROUT(error);
2828 
2829 		nvl = nvlist_unpack(packed, nv->len, 0);
2830 		if (nvl == NULL)
2831 			ERROUT(EBADMSG);
2832 
2833 		if (! nvlist_exists_string(nvl, "anchor"))
2834 			ERROUT(EBADMSG);
2835 
2836 		anchor = nvlist_get_string(nvl, "anchor");
2837 
2838 		rs = pf_find_keth_ruleset(anchor);
2839 
2840 		nvlist_destroy(nvl);
2841 		nvl = NULL;
2842 		free(packed, M_NVLIST);
2843 		packed = NULL;
2844 
2845 		if (rs == NULL)
2846 			ERROUT(ENOENT);
2847 
2848 		/* Reply */
2849 		nvl = nvlist_create(0);
2850 		if (nvl == NULL)
2851 			ERROUT(ENOMEM);
2852 
2853 		PF_RULES_RLOCK();
2854 
2855 		ticket = rs->active.ticket;
2856 		tail = TAILQ_LAST(rs->active.rules, pf_keth_ruleq);
2857 		if (tail)
2858 			nr = tail->nr + 1;
2859 		else
2860 			nr = 0;
2861 
2862 		PF_RULES_RUNLOCK();
2863 
2864 		nvlist_add_number(nvl, "ticket", ticket);
2865 		nvlist_add_number(nvl, "nr", nr);
2866 
2867 		packed = nvlist_pack(nvl, &nv->len);
2868 		if (packed == NULL)
2869 			ERROUT(ENOMEM);
2870 
2871 		if (nv->size == 0)
2872 			ERROUT(0);
2873 		else if (nv->size < nv->len)
2874 			ERROUT(ENOSPC);
2875 
2876 		error = copyout(packed, nv->data, nv->len);
2877 
2878 #undef ERROUT
2879 DIOCGETETHRULES_error:
2880 		free(packed, M_NVLIST);
2881 		nvlist_destroy(nvl);
2882 		break;
2883 	}
2884 
2885 	case DIOCGETETHRULE: {
2886 		struct epoch_tracker	 et;
2887 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
2888 		nvlist_t		*nvl = NULL;
2889 		void			*nvlpacked = NULL;
2890 		struct pf_keth_rule	*rule = NULL;
2891 		struct pf_keth_ruleset	*rs;
2892 		u_int32_t		 ticket, nr;
2893 		bool			 clear = false;
2894 		const char		*anchor;
2895 
2896 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULE_error, x)
2897 
2898 		if (nv->len > pf_ioctl_maxcount)
2899 			ERROUT(ENOMEM);
2900 
2901 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
2902 		if (nvlpacked == NULL)
2903 			ERROUT(ENOMEM);
2904 
2905 		error = copyin(nv->data, nvlpacked, nv->len);
2906 		if (error)
2907 			ERROUT(error);
2908 
2909 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
2910 		if (nvl == NULL)
2911 			ERROUT(EBADMSG);
2912 		if (! nvlist_exists_number(nvl, "ticket"))
2913 			ERROUT(EBADMSG);
2914 		ticket = nvlist_get_number(nvl, "ticket");
2915 		if (! nvlist_exists_string(nvl, "anchor"))
2916 			ERROUT(EBADMSG);
2917 		anchor = nvlist_get_string(nvl, "anchor");
2918 
2919 		if (nvlist_exists_bool(nvl, "clear"))
2920 			clear = nvlist_get_bool(nvl, "clear");
2921 
2922 		if (clear && !(flags & FWRITE))
2923 			ERROUT(EACCES);
2924 
2925 		if (! nvlist_exists_number(nvl, "nr"))
2926 			ERROUT(EBADMSG);
2927 		nr = nvlist_get_number(nvl, "nr");
2928 
2929 		PF_RULES_RLOCK();
2930 		rs = pf_find_keth_ruleset(anchor);
2931 		if (rs == NULL) {
2932 			PF_RULES_RUNLOCK();
2933 			ERROUT(ENOENT);
2934 		}
2935 		if (ticket != rs->active.ticket) {
2936 			PF_RULES_RUNLOCK();
2937 			ERROUT(EBUSY);
2938 		}
2939 
2940 		nvlist_destroy(nvl);
2941 		nvl = NULL;
2942 		free(nvlpacked, M_NVLIST);
2943 		nvlpacked = NULL;
2944 
2945 		rule = TAILQ_FIRST(rs->active.rules);
2946 		while ((rule != NULL) && (rule->nr != nr))
2947 			rule = TAILQ_NEXT(rule, entries);
2948 		if (rule == NULL) {
2949 			PF_RULES_RUNLOCK();
2950 			ERROUT(ENOENT);
2951 		}
2952 		/* Make sure rule can't go away. */
2953 		NET_EPOCH_ENTER(et);
2954 		PF_RULES_RUNLOCK();
2955 		nvl = pf_keth_rule_to_nveth_rule(rule);
2956 		if (pf_keth_anchor_nvcopyout(rs, rule, nvl))
2957 			ERROUT(EBUSY);
2958 		NET_EPOCH_EXIT(et);
2959 		if (nvl == NULL)
2960 			ERROUT(ENOMEM);
2961 
2962 		nvlpacked = nvlist_pack(nvl, &nv->len);
2963 		if (nvlpacked == NULL)
2964 			ERROUT(ENOMEM);
2965 
2966 		if (nv->size == 0)
2967 			ERROUT(0);
2968 		else if (nv->size < nv->len)
2969 			ERROUT(ENOSPC);
2970 
2971 		error = copyout(nvlpacked, nv->data, nv->len);
2972 		if (error == 0 && clear) {
2973 			counter_u64_zero(rule->evaluations);
2974 			for (int i = 0; i < 2; i++) {
2975 				counter_u64_zero(rule->packets[i]);
2976 				counter_u64_zero(rule->bytes[i]);
2977 			}
2978 		}
2979 
2980 #undef ERROUT
2981 DIOCGETETHRULE_error:
2982 		free(nvlpacked, M_NVLIST);
2983 		nvlist_destroy(nvl);
2984 		break;
2985 	}
2986 
2987 	case DIOCADDETHRULE: {
2988 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
2989 		nvlist_t		*nvl = NULL;
2990 		void			*nvlpacked = NULL;
2991 		struct pf_keth_rule	*rule = NULL, *tail = NULL;
2992 		struct pf_keth_ruleset	*ruleset = NULL;
2993 		struct pfi_kkif		*kif = NULL, *bridge_to_kif = NULL;
2994 		const char		*anchor = "", *anchor_call = "";
2995 
2996 #define ERROUT(x)	ERROUT_IOCTL(DIOCADDETHRULE_error, x)
2997 
2998 		if (nv->len > pf_ioctl_maxcount)
2999 			ERROUT(ENOMEM);
3000 
3001 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3002 		if (nvlpacked == NULL)
3003 			ERROUT(ENOMEM);
3004 
3005 		error = copyin(nv->data, nvlpacked, nv->len);
3006 		if (error)
3007 			ERROUT(error);
3008 
3009 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3010 		if (nvl == NULL)
3011 			ERROUT(EBADMSG);
3012 
3013 		if (! nvlist_exists_number(nvl, "ticket"))
3014 			ERROUT(EBADMSG);
3015 
3016 		if (nvlist_exists_string(nvl, "anchor"))
3017 			anchor = nvlist_get_string(nvl, "anchor");
3018 		if (nvlist_exists_string(nvl, "anchor_call"))
3019 			anchor_call = nvlist_get_string(nvl, "anchor_call");
3020 
3021 		ruleset = pf_find_keth_ruleset(anchor);
3022 		if (ruleset == NULL)
3023 			ERROUT(EINVAL);
3024 
3025 		if (nvlist_get_number(nvl, "ticket") !=
3026 		    ruleset->inactive.ticket) {
3027 			DPFPRINTF(PF_DEBUG_MISC,
3028 			    ("ticket: %d != %d\n",
3029 			    (u_int32_t)nvlist_get_number(nvl, "ticket"),
3030 			    ruleset->inactive.ticket));
3031 			ERROUT(EBUSY);
3032 		}
3033 
3034 		rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK);
3035 		if (rule == NULL)
3036 			ERROUT(ENOMEM);
3037 		rule->timestamp = NULL;
3038 
3039 		error = pf_nveth_rule_to_keth_rule(nvl, rule);
3040 		if (error != 0)
3041 			ERROUT(error);
3042 
3043 		if (rule->ifname[0])
3044 			kif = pf_kkif_create(M_WAITOK);
3045 		if (rule->bridge_to_name[0])
3046 			bridge_to_kif = pf_kkif_create(M_WAITOK);
3047 		rule->evaluations = counter_u64_alloc(M_WAITOK);
3048 		for (int i = 0; i < 2; i++) {
3049 			rule->packets[i] = counter_u64_alloc(M_WAITOK);
3050 			rule->bytes[i] = counter_u64_alloc(M_WAITOK);
3051 		}
3052 		rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
3053 		    M_WAITOK | M_ZERO);
3054 
3055 		PF_RULES_WLOCK();
3056 
3057 		if (rule->ifname[0]) {
3058 			rule->kif = pfi_kkif_attach(kif, rule->ifname);
3059 			pfi_kkif_ref(rule->kif);
3060 		} else
3061 			rule->kif = NULL;
3062 		if (rule->bridge_to_name[0]) {
3063 			rule->bridge_to = pfi_kkif_attach(bridge_to_kif,
3064 			    rule->bridge_to_name);
3065 			pfi_kkif_ref(rule->bridge_to);
3066 		} else
3067 			rule->bridge_to = NULL;
3068 
3069 #ifdef ALTQ
3070 		/* set queue IDs */
3071 		if (rule->qname[0] != 0) {
3072 			if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
3073 				error = EBUSY;
3074 			else
3075 				rule->qid = rule->qid;
3076 		}
3077 #endif
3078 		if (rule->tagname[0])
3079 			if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
3080 				error = EBUSY;
3081 		if (rule->match_tagname[0])
3082 			if ((rule->match_tag = pf_tagname2tag(
3083 			    rule->match_tagname)) == 0)
3084 				error = EBUSY;
3085 
3086 		if (error == 0 && rule->ipdst.addr.type == PF_ADDR_TABLE)
3087 			error = pf_eth_addr_setup(ruleset, &rule->ipdst.addr);
3088 		if (error == 0 && rule->ipsrc.addr.type == PF_ADDR_TABLE)
3089 			error = pf_eth_addr_setup(ruleset, &rule->ipsrc.addr);
3090 
3091 		if (error) {
3092 			pf_free_eth_rule(rule);
3093 			PF_RULES_WUNLOCK();
3094 			ERROUT(error);
3095 		}
3096 
3097 		if (pf_keth_anchor_setup(rule, ruleset, anchor_call)) {
3098 			pf_free_eth_rule(rule);
3099 			PF_RULES_WUNLOCK();
3100 			ERROUT(EINVAL);
3101 		}
3102 
3103 		tail = TAILQ_LAST(ruleset->inactive.rules, pf_keth_ruleq);
3104 		if (tail)
3105 			rule->nr = tail->nr + 1;
3106 		else
3107 			rule->nr = 0;
3108 
3109 		TAILQ_INSERT_TAIL(ruleset->inactive.rules, rule, entries);
3110 
3111 		PF_RULES_WUNLOCK();
3112 
3113 #undef ERROUT
3114 DIOCADDETHRULE_error:
3115 		nvlist_destroy(nvl);
3116 		free(nvlpacked, M_NVLIST);
3117 		break;
3118 	}
3119 
3120 	case DIOCGETETHRULESETS: {
3121 		struct epoch_tracker	 et;
3122 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3123 		nvlist_t		*nvl = NULL;
3124 		void			*nvlpacked = NULL;
3125 		struct pf_keth_ruleset	*ruleset;
3126 		struct pf_keth_anchor	*anchor;
3127 		int			 nr = 0;
3128 
3129 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULESETS_error, x)
3130 
3131 		if (nv->len > pf_ioctl_maxcount)
3132 			ERROUT(ENOMEM);
3133 
3134 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3135 		if (nvlpacked == NULL)
3136 			ERROUT(ENOMEM);
3137 
3138 		error = copyin(nv->data, nvlpacked, nv->len);
3139 		if (error)
3140 			ERROUT(error);
3141 
3142 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3143 		if (nvl == NULL)
3144 			ERROUT(EBADMSG);
3145 		if (! nvlist_exists_string(nvl, "path"))
3146 			ERROUT(EBADMSG);
3147 
3148 		NET_EPOCH_ENTER(et);
3149 
3150 		if ((ruleset = pf_find_keth_ruleset(
3151 		    nvlist_get_string(nvl, "path"))) == NULL) {
3152 			NET_EPOCH_EXIT(et);
3153 			ERROUT(ENOENT);
3154 		}
3155 
3156 		if (ruleset->anchor == NULL) {
3157 			RB_FOREACH(anchor, pf_keth_anchor_global, &V_pf_keth_anchors)
3158 				if (anchor->parent == NULL)
3159 					nr++;
3160 		} else {
3161 			RB_FOREACH(anchor, pf_keth_anchor_node,
3162 			    &ruleset->anchor->children)
3163 				nr++;
3164 		}
3165 
3166 		NET_EPOCH_EXIT(et);
3167 
3168 		nvlist_destroy(nvl);
3169 		nvl = NULL;
3170 		free(nvlpacked, M_NVLIST);
3171 		nvlpacked = NULL;
3172 
3173 		nvl = nvlist_create(0);
3174 		if (nvl == NULL)
3175 			ERROUT(ENOMEM);
3176 
3177 		nvlist_add_number(nvl, "nr", nr);
3178 
3179 		nvlpacked = nvlist_pack(nvl, &nv->len);
3180 		if (nvlpacked == NULL)
3181 			ERROUT(ENOMEM);
3182 
3183 		if (nv->size == 0)
3184 			ERROUT(0);
3185 		else if (nv->size < nv->len)
3186 			ERROUT(ENOSPC);
3187 
3188 		error = copyout(nvlpacked, nv->data, nv->len);
3189 
3190 #undef ERROUT
3191 DIOCGETETHRULESETS_error:
3192 		free(nvlpacked, M_NVLIST);
3193 		nvlist_destroy(nvl);
3194 		break;
3195 	}
3196 
3197 	case DIOCGETETHRULESET: {
3198 		struct epoch_tracker	 et;
3199 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3200 		nvlist_t		*nvl = NULL;
3201 		void			*nvlpacked = NULL;
3202 		struct pf_keth_ruleset	*ruleset;
3203 		struct pf_keth_anchor	*anchor;
3204 		int			 nr = 0, req_nr = 0;
3205 		bool			 found = false;
3206 
3207 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULESET_error, x)
3208 
3209 		if (nv->len > pf_ioctl_maxcount)
3210 			ERROUT(ENOMEM);
3211 
3212 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3213 		if (nvlpacked == NULL)
3214 			ERROUT(ENOMEM);
3215 
3216 		error = copyin(nv->data, nvlpacked, nv->len);
3217 		if (error)
3218 			ERROUT(error);
3219 
3220 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3221 		if (nvl == NULL)
3222 			ERROUT(EBADMSG);
3223 		if (! nvlist_exists_string(nvl, "path"))
3224 			ERROUT(EBADMSG);
3225 		if (! nvlist_exists_number(nvl, "nr"))
3226 			ERROUT(EBADMSG);
3227 
3228 		req_nr = nvlist_get_number(nvl, "nr");
3229 
3230 		NET_EPOCH_ENTER(et);
3231 
3232 		if ((ruleset = pf_find_keth_ruleset(
3233 		    nvlist_get_string(nvl, "path"))) == NULL) {
3234 			NET_EPOCH_EXIT(et);
3235 			ERROUT(ENOENT);
3236 		}
3237 
3238 		nvlist_destroy(nvl);
3239 		nvl = NULL;
3240 		free(nvlpacked, M_NVLIST);
3241 		nvlpacked = NULL;
3242 
3243 		nvl = nvlist_create(0);
3244 		if (nvl == NULL) {
3245 			NET_EPOCH_EXIT(et);
3246 			ERROUT(ENOMEM);
3247 		}
3248 
3249 		if (ruleset->anchor == NULL) {
3250 			RB_FOREACH(anchor, pf_keth_anchor_global,
3251 			    &V_pf_keth_anchors) {
3252 				if (anchor->parent == NULL && nr++ == req_nr) {
3253 					found = true;
3254 					break;
3255 				}
3256 			}
3257 		} else {
3258 			RB_FOREACH(anchor, pf_keth_anchor_node,
3259 			     &ruleset->anchor->children) {
3260 				if (nr++ == req_nr) {
3261 					found = true;
3262 					break;
3263 				}
3264 			}
3265 		}
3266 
3267 		NET_EPOCH_EXIT(et);
3268 		if (found) {
3269 			nvlist_add_number(nvl, "nr", nr);
3270 			nvlist_add_string(nvl, "name", anchor->name);
3271 			if (ruleset->anchor)
3272 				nvlist_add_string(nvl, "path",
3273 				    ruleset->anchor->path);
3274 			else
3275 				nvlist_add_string(nvl, "path", "");
3276 		} else {
3277 			ERROUT(EBUSY);
3278 		}
3279 
3280 		nvlpacked = nvlist_pack(nvl, &nv->len);
3281 		if (nvlpacked == NULL)
3282 			ERROUT(ENOMEM);
3283 
3284 		if (nv->size == 0)
3285 			ERROUT(0);
3286 		else if (nv->size < nv->len)
3287 			ERROUT(ENOSPC);
3288 
3289 		error = copyout(nvlpacked, nv->data, nv->len);
3290 
3291 #undef ERROUT
3292 DIOCGETETHRULESET_error:
3293 		free(nvlpacked, M_NVLIST);
3294 		nvlist_destroy(nvl);
3295 		break;
3296 	}
3297 
3298 	case DIOCADDRULENV: {
3299 		struct pfioc_nv	*nv = (struct pfioc_nv *)addr;
3300 		nvlist_t	*nvl = NULL;
3301 		void		*nvlpacked = NULL;
3302 		struct pf_krule	*rule = NULL;
3303 		const char	*anchor = "", *anchor_call = "";
3304 		uint32_t	 ticket = 0, pool_ticket = 0;
3305 
3306 #define	ERROUT(x)	ERROUT_IOCTL(DIOCADDRULENV_error, x)
3307 
3308 		if (nv->len > pf_ioctl_maxcount)
3309 			ERROUT(ENOMEM);
3310 
3311 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3312 		error = copyin(nv->data, nvlpacked, nv->len);
3313 		if (error)
3314 			ERROUT(error);
3315 
3316 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3317 		if (nvl == NULL)
3318 			ERROUT(EBADMSG);
3319 
3320 		if (! nvlist_exists_number(nvl, "ticket"))
3321 			ERROUT(EINVAL);
3322 		ticket = nvlist_get_number(nvl, "ticket");
3323 
3324 		if (! nvlist_exists_number(nvl, "pool_ticket"))
3325 			ERROUT(EINVAL);
3326 		pool_ticket = nvlist_get_number(nvl, "pool_ticket");
3327 
3328 		if (! nvlist_exists_nvlist(nvl, "rule"))
3329 			ERROUT(EINVAL);
3330 
3331 		rule = pf_krule_alloc();
3332 		error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"),
3333 		    rule);
3334 		if (error)
3335 			ERROUT(error);
3336 
3337 		if (nvlist_exists_string(nvl, "anchor"))
3338 			anchor = nvlist_get_string(nvl, "anchor");
3339 		if (nvlist_exists_string(nvl, "anchor_call"))
3340 			anchor_call = nvlist_get_string(nvl, "anchor_call");
3341 
3342 		if ((error = nvlist_error(nvl)))
3343 			ERROUT(error);
3344 
3345 		/* Frees rule on error */
3346 		error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor,
3347 		    anchor_call, td->td_ucred->cr_ruid,
3348 		    td->td_proc ? td->td_proc->p_pid : 0);
3349 
3350 		nvlist_destroy(nvl);
3351 		free(nvlpacked, M_NVLIST);
3352 		break;
3353 #undef ERROUT
3354 DIOCADDRULENV_error:
3355 		pf_krule_free(rule);
3356 		nvlist_destroy(nvl);
3357 		free(nvlpacked, M_NVLIST);
3358 
3359 		break;
3360 	}
3361 	case DIOCADDRULE: {
3362 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
3363 		struct pf_krule		*rule;
3364 
3365 		rule = pf_krule_alloc();
3366 		error = pf_rule_to_krule(&pr->rule, rule);
3367 		if (error != 0) {
3368 			pf_krule_free(rule);
3369 			break;
3370 		}
3371 
3372 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
3373 
3374 		/* Frees rule on error */
3375 		error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket,
3376 		    pr->anchor, pr->anchor_call, td->td_ucred->cr_ruid,
3377 		    td->td_proc ? td->td_proc->p_pid : 0);
3378 		break;
3379 	}
3380 
3381 	case DIOCGETRULES: {
3382 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
3383 
3384 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
3385 
3386 		error = pf_ioctl_getrules(pr);
3387 
3388 		break;
3389 	}
3390 
3391 	case DIOCGETRULENV: {
3392 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3393 		nvlist_t		*nvrule = NULL;
3394 		nvlist_t		*nvl = NULL;
3395 		struct pf_kruleset	*ruleset;
3396 		struct pf_krule		*rule;
3397 		void			*nvlpacked = NULL;
3398 		int			 rs_num, nr;
3399 		bool			 clear_counter = false;
3400 
3401 #define	ERROUT(x)	ERROUT_IOCTL(DIOCGETRULENV_error, x)
3402 
3403 		if (nv->len > pf_ioctl_maxcount)
3404 			ERROUT(ENOMEM);
3405 
3406 		/* Copy the request in */
3407 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3408 		if (nvlpacked == NULL)
3409 			ERROUT(ENOMEM);
3410 
3411 		error = copyin(nv->data, nvlpacked, nv->len);
3412 		if (error)
3413 			ERROUT(error);
3414 
3415 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3416 		if (nvl == NULL)
3417 			ERROUT(EBADMSG);
3418 
3419 		if (! nvlist_exists_string(nvl, "anchor"))
3420 			ERROUT(EBADMSG);
3421 		if (! nvlist_exists_number(nvl, "ruleset"))
3422 			ERROUT(EBADMSG);
3423 		if (! nvlist_exists_number(nvl, "ticket"))
3424 			ERROUT(EBADMSG);
3425 		if (! nvlist_exists_number(nvl, "nr"))
3426 			ERROUT(EBADMSG);
3427 
3428 		if (nvlist_exists_bool(nvl, "clear_counter"))
3429 			clear_counter = nvlist_get_bool(nvl, "clear_counter");
3430 
3431 		if (clear_counter && !(flags & FWRITE))
3432 			ERROUT(EACCES);
3433 
3434 		nr = nvlist_get_number(nvl, "nr");
3435 
3436 		PF_RULES_WLOCK();
3437 		ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor"));
3438 		if (ruleset == NULL) {
3439 			PF_RULES_WUNLOCK();
3440 			ERROUT(ENOENT);
3441 		}
3442 
3443 		rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset"));
3444 		if (rs_num >= PF_RULESET_MAX) {
3445 			PF_RULES_WUNLOCK();
3446 			ERROUT(EINVAL);
3447 		}
3448 
3449 		if (nvlist_get_number(nvl, "ticket") !=
3450 		    ruleset->rules[rs_num].active.ticket) {
3451 			PF_RULES_WUNLOCK();
3452 			ERROUT(EBUSY);
3453 		}
3454 
3455 		if ((error = nvlist_error(nvl))) {
3456 			PF_RULES_WUNLOCK();
3457 			ERROUT(error);
3458 		}
3459 
3460 		rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3461 		while ((rule != NULL) && (rule->nr != nr))
3462 			rule = TAILQ_NEXT(rule, entries);
3463 		if (rule == NULL) {
3464 			PF_RULES_WUNLOCK();
3465 			ERROUT(EBUSY);
3466 		}
3467 
3468 		nvrule = pf_krule_to_nvrule(rule);
3469 
3470 		nvlist_destroy(nvl);
3471 		nvl = nvlist_create(0);
3472 		if (nvl == NULL) {
3473 			PF_RULES_WUNLOCK();
3474 			ERROUT(ENOMEM);
3475 		}
3476 		nvlist_add_number(nvl, "nr", nr);
3477 		nvlist_add_nvlist(nvl, "rule", nvrule);
3478 		nvlist_destroy(nvrule);
3479 		nvrule = NULL;
3480 		if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) {
3481 			PF_RULES_WUNLOCK();
3482 			ERROUT(EBUSY);
3483 		}
3484 
3485 		free(nvlpacked, M_NVLIST);
3486 		nvlpacked = nvlist_pack(nvl, &nv->len);
3487 		if (nvlpacked == NULL) {
3488 			PF_RULES_WUNLOCK();
3489 			ERROUT(ENOMEM);
3490 		}
3491 
3492 		if (nv->size == 0) {
3493 			PF_RULES_WUNLOCK();
3494 			ERROUT(0);
3495 		}
3496 		else if (nv->size < nv->len) {
3497 			PF_RULES_WUNLOCK();
3498 			ERROUT(ENOSPC);
3499 		}
3500 
3501 		if (clear_counter)
3502 			pf_krule_clear_counters(rule);
3503 
3504 		PF_RULES_WUNLOCK();
3505 
3506 		error = copyout(nvlpacked, nv->data, nv->len);
3507 
3508 #undef ERROUT
3509 DIOCGETRULENV_error:
3510 		free(nvlpacked, M_NVLIST);
3511 		nvlist_destroy(nvrule);
3512 		nvlist_destroy(nvl);
3513 
3514 		break;
3515 	}
3516 
3517 	case DIOCCHANGERULE: {
3518 		struct pfioc_rule	*pcr = (struct pfioc_rule *)addr;
3519 		struct pf_kruleset	*ruleset;
3520 		struct pf_krule		*oldrule = NULL, *newrule = NULL;
3521 		struct pfi_kkif		*kif = NULL;
3522 		struct pf_kpooladdr	*pa;
3523 		u_int32_t		 nr = 0;
3524 		int			 rs_num;
3525 
3526 		pcr->anchor[sizeof(pcr->anchor) - 1] = 0;
3527 
3528 		if (pcr->action < PF_CHANGE_ADD_HEAD ||
3529 		    pcr->action > PF_CHANGE_GET_TICKET) {
3530 			error = EINVAL;
3531 			break;
3532 		}
3533 		if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3534 			error = EINVAL;
3535 			break;
3536 		}
3537 
3538 		if (pcr->action != PF_CHANGE_REMOVE) {
3539 			newrule = pf_krule_alloc();
3540 			error = pf_rule_to_krule(&pcr->rule, newrule);
3541 			if (error != 0) {
3542 				pf_krule_free(newrule);
3543 				break;
3544 			}
3545 
3546 			if (newrule->ifname[0])
3547 				kif = pf_kkif_create(M_WAITOK);
3548 			pf_counter_u64_init(&newrule->evaluations, M_WAITOK);
3549 			for (int i = 0; i < 2; i++) {
3550 				pf_counter_u64_init(&newrule->packets[i], M_WAITOK);
3551 				pf_counter_u64_init(&newrule->bytes[i], M_WAITOK);
3552 			}
3553 			newrule->states_cur = counter_u64_alloc(M_WAITOK);
3554 			newrule->states_tot = counter_u64_alloc(M_WAITOK);
3555 			newrule->src_nodes = counter_u64_alloc(M_WAITOK);
3556 			newrule->cuid = td->td_ucred->cr_ruid;
3557 			newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
3558 			TAILQ_INIT(&newrule->rpool.list);
3559 		}
3560 #define	ERROUT(x)	ERROUT_IOCTL(DIOCCHANGERULE_error, x)
3561 
3562 		PF_CONFIG_LOCK();
3563 		PF_RULES_WLOCK();
3564 #ifdef PF_WANT_32_TO_64_COUNTER
3565 		if (newrule != NULL) {
3566 			LIST_INSERT_HEAD(&V_pf_allrulelist, newrule, allrulelist);
3567 			newrule->allrulelinked = true;
3568 			V_pf_allrulecount++;
3569 		}
3570 #endif
3571 
3572 		if (!(pcr->action == PF_CHANGE_REMOVE ||
3573 		    pcr->action == PF_CHANGE_GET_TICKET) &&
3574 		    pcr->pool_ticket != V_ticket_pabuf)
3575 			ERROUT(EBUSY);
3576 
3577 		ruleset = pf_find_kruleset(pcr->anchor);
3578 		if (ruleset == NULL)
3579 			ERROUT(EINVAL);
3580 
3581 		rs_num = pf_get_ruleset_number(pcr->rule.action);
3582 		if (rs_num >= PF_RULESET_MAX)
3583 			ERROUT(EINVAL);
3584 
3585 		/*
3586 		 * XXXMJG: there is no guarantee that the ruleset was
3587 		 * created by the usual route of calling DIOCXBEGIN.
3588 		 * As a result it is possible the rule tree will not
3589 		 * be allocated yet. Hack around it by doing it here.
3590 		 * Note it is fine to let the tree persist in case of
3591 		 * error as it will be freed down the road on future
3592 		 * updates (if need be).
3593 		 */
3594 		if (ruleset->rules[rs_num].active.tree == NULL) {
3595 			ruleset->rules[rs_num].active.tree = pf_rule_tree_alloc(M_NOWAIT);
3596 			if (ruleset->rules[rs_num].active.tree == NULL) {
3597 				ERROUT(ENOMEM);
3598 			}
3599 		}
3600 
3601 		if (pcr->action == PF_CHANGE_GET_TICKET) {
3602 			pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
3603 			ERROUT(0);
3604 		} else if (pcr->ticket !=
3605 			    ruleset->rules[rs_num].active.ticket)
3606 				ERROUT(EINVAL);
3607 
3608 		if (pcr->action != PF_CHANGE_REMOVE) {
3609 			if (newrule->ifname[0]) {
3610 				newrule->kif = pfi_kkif_attach(kif,
3611 				    newrule->ifname);
3612 				kif = NULL;
3613 				pfi_kkif_ref(newrule->kif);
3614 			} else
3615 				newrule->kif = NULL;
3616 
3617 			if (newrule->rtableid > 0 &&
3618 			    newrule->rtableid >= rt_numfibs)
3619 				error = EBUSY;
3620 
3621 #ifdef ALTQ
3622 			/* set queue IDs */
3623 			if (newrule->qname[0] != 0) {
3624 				if ((newrule->qid =
3625 				    pf_qname2qid(newrule->qname)) == 0)
3626 					error = EBUSY;
3627 				else if (newrule->pqname[0] != 0) {
3628 					if ((newrule->pqid =
3629 					    pf_qname2qid(newrule->pqname)) == 0)
3630 						error = EBUSY;
3631 				} else
3632 					newrule->pqid = newrule->qid;
3633 			}
3634 #endif /* ALTQ */
3635 			if (newrule->tagname[0])
3636 				if ((newrule->tag =
3637 				    pf_tagname2tag(newrule->tagname)) == 0)
3638 					error = EBUSY;
3639 			if (newrule->match_tagname[0])
3640 				if ((newrule->match_tag = pf_tagname2tag(
3641 				    newrule->match_tagname)) == 0)
3642 					error = EBUSY;
3643 			if (newrule->rt && !newrule->direction)
3644 				error = EINVAL;
3645 			if (!newrule->log)
3646 				newrule->logif = 0;
3647 			if (newrule->logif >= PFLOGIFS_MAX)
3648 				error = EINVAL;
3649 			if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
3650 				error = ENOMEM;
3651 			if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
3652 				error = ENOMEM;
3653 			if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call))
3654 				error = EINVAL;
3655 			TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
3656 				if (pa->addr.type == PF_ADDR_TABLE) {
3657 					pa->addr.p.tbl =
3658 					    pfr_attach_table(ruleset,
3659 					    pa->addr.v.tblname);
3660 					if (pa->addr.p.tbl == NULL)
3661 						error = ENOMEM;
3662 				}
3663 
3664 			newrule->overload_tbl = NULL;
3665 			if (newrule->overload_tblname[0]) {
3666 				if ((newrule->overload_tbl = pfr_attach_table(
3667 				    ruleset, newrule->overload_tblname)) ==
3668 				    NULL)
3669 					error = EINVAL;
3670 				else
3671 					newrule->overload_tbl->pfrkt_flags |=
3672 					    PFR_TFLAG_ACTIVE;
3673 			}
3674 
3675 			pf_mv_kpool(&V_pf_pabuf, &newrule->rpool.list);
3676 			if (((((newrule->action == PF_NAT) ||
3677 			    (newrule->action == PF_RDR) ||
3678 			    (newrule->action == PF_BINAT) ||
3679 			    (newrule->rt > PF_NOPFROUTE)) &&
3680 			    !newrule->anchor)) &&
3681 			    (TAILQ_FIRST(&newrule->rpool.list) == NULL))
3682 				error = EINVAL;
3683 
3684 			if (error) {
3685 				pf_free_rule(newrule);
3686 				PF_RULES_WUNLOCK();
3687 				PF_CONFIG_UNLOCK();
3688 				break;
3689 			}
3690 
3691 			newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
3692 		}
3693 		pf_empty_kpool(&V_pf_pabuf);
3694 
3695 		if (pcr->action == PF_CHANGE_ADD_HEAD)
3696 			oldrule = TAILQ_FIRST(
3697 			    ruleset->rules[rs_num].active.ptr);
3698 		else if (pcr->action == PF_CHANGE_ADD_TAIL)
3699 			oldrule = TAILQ_LAST(
3700 			    ruleset->rules[rs_num].active.ptr, pf_krulequeue);
3701 		else {
3702 			oldrule = TAILQ_FIRST(
3703 			    ruleset->rules[rs_num].active.ptr);
3704 			while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
3705 				oldrule = TAILQ_NEXT(oldrule, entries);
3706 			if (oldrule == NULL) {
3707 				if (newrule != NULL)
3708 					pf_free_rule(newrule);
3709 				PF_RULES_WUNLOCK();
3710 				PF_CONFIG_UNLOCK();
3711 				error = EINVAL;
3712 				break;
3713 			}
3714 		}
3715 
3716 		if (pcr->action == PF_CHANGE_REMOVE) {
3717 			pf_unlink_rule(ruleset->rules[rs_num].active.ptr,
3718 			    oldrule);
3719 			RB_REMOVE(pf_krule_global,
3720 			    ruleset->rules[rs_num].active.tree, oldrule);
3721 			ruleset->rules[rs_num].active.rcount--;
3722 		} else {
3723 			pf_hash_rule(newrule);
3724 			if (RB_INSERT(pf_krule_global,
3725 			    ruleset->rules[rs_num].active.tree, newrule) != NULL) {
3726 				pf_free_rule(newrule);
3727 				PF_RULES_WUNLOCK();
3728 				PF_CONFIG_UNLOCK();
3729 				error = EEXIST;
3730 				break;
3731 			}
3732 
3733 			if (oldrule == NULL)
3734 				TAILQ_INSERT_TAIL(
3735 				    ruleset->rules[rs_num].active.ptr,
3736 				    newrule, entries);
3737 			else if (pcr->action == PF_CHANGE_ADD_HEAD ||
3738 			    pcr->action == PF_CHANGE_ADD_BEFORE)
3739 				TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
3740 			else
3741 				TAILQ_INSERT_AFTER(
3742 				    ruleset->rules[rs_num].active.ptr,
3743 				    oldrule, newrule, entries);
3744 			ruleset->rules[rs_num].active.rcount++;
3745 		}
3746 
3747 		nr = 0;
3748 		TAILQ_FOREACH(oldrule,
3749 		    ruleset->rules[rs_num].active.ptr, entries)
3750 			oldrule->nr = nr++;
3751 
3752 		ruleset->rules[rs_num].active.ticket++;
3753 
3754 		pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
3755 		pf_remove_if_empty_kruleset(ruleset);
3756 
3757 		PF_RULES_WUNLOCK();
3758 		PF_CONFIG_UNLOCK();
3759 		break;
3760 
3761 #undef ERROUT
3762 DIOCCHANGERULE_error:
3763 		PF_RULES_WUNLOCK();
3764 		PF_CONFIG_UNLOCK();
3765 		pf_krule_free(newrule);
3766 		pf_kkif_free(kif);
3767 		break;
3768 	}
3769 
3770 	case DIOCCLRSTATESNV: {
3771 		error = pf_clearstates_nv((struct pfioc_nv *)addr);
3772 		break;
3773 	}
3774 
3775 	case DIOCKILLSTATESNV: {
3776 		error = pf_killstates_nv((struct pfioc_nv *)addr);
3777 		break;
3778 	}
3779 
3780 	case DIOCADDSTATE: {
3781 		struct pfioc_state		*ps = (struct pfioc_state *)addr;
3782 		struct pfsync_state_1301	*sp = &ps->state;
3783 
3784 		if (sp->timeout >= PFTM_MAX) {
3785 			error = EINVAL;
3786 			break;
3787 		}
3788 		if (V_pfsync_state_import_ptr != NULL) {
3789 			PF_RULES_RLOCK();
3790 			error = V_pfsync_state_import_ptr(
3791 			    (union pfsync_state_union *)sp, PFSYNC_SI_IOCTL,
3792 			    PFSYNC_MSG_VERSION_1301);
3793 			PF_RULES_RUNLOCK();
3794 		} else
3795 			error = EOPNOTSUPP;
3796 		break;
3797 	}
3798 
3799 	case DIOCGETSTATE: {
3800 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
3801 		struct pf_kstate	*s;
3802 
3803 		s = pf_find_state_byid(ps->state.id, ps->state.creatorid);
3804 		if (s == NULL) {
3805 			error = ENOENT;
3806 			break;
3807 		}
3808 
3809 		pfsync_state_export((union pfsync_state_union*)&ps->state,
3810 		    s, PFSYNC_MSG_VERSION_1301);
3811 		PF_STATE_UNLOCK(s);
3812 		break;
3813 	}
3814 
3815 	case DIOCGETSTATENV: {
3816 		error = pf_getstate((struct pfioc_nv *)addr);
3817 		break;
3818 	}
3819 
3820 #ifdef COMPAT_FREEBSD14
3821 	case DIOCGETSTATES: {
3822 		struct pfioc_states	*ps = (struct pfioc_states *)addr;
3823 		struct pf_kstate	*s;
3824 		struct pfsync_state_1301	*pstore, *p;
3825 		int			 i, nr;
3826 		size_t			 slice_count = 16, count;
3827 		void			*out;
3828 
3829 		if (ps->ps_len <= 0) {
3830 			nr = uma_zone_get_cur(V_pf_state_z);
3831 			ps->ps_len = sizeof(struct pfsync_state_1301) * nr;
3832 			break;
3833 		}
3834 
3835 		out = ps->ps_states;
3836 		pstore = mallocarray(slice_count,
3837 		    sizeof(struct pfsync_state_1301), M_TEMP, M_WAITOK | M_ZERO);
3838 		nr = 0;
3839 
3840 		for (i = 0; i <= V_pf_hashmask; i++) {
3841 			struct pf_idhash *ih = &V_pf_idhash[i];
3842 
3843 DIOCGETSTATES_retry:
3844 			p = pstore;
3845 
3846 			if (LIST_EMPTY(&ih->states))
3847 				continue;
3848 
3849 			PF_HASHROW_LOCK(ih);
3850 			count = 0;
3851 			LIST_FOREACH(s, &ih->states, entry) {
3852 				if (s->timeout == PFTM_UNLINKED)
3853 					continue;
3854 				count++;
3855 			}
3856 
3857 			if (count > slice_count) {
3858 				PF_HASHROW_UNLOCK(ih);
3859 				free(pstore, M_TEMP);
3860 				slice_count = count * 2;
3861 				pstore = mallocarray(slice_count,
3862 				    sizeof(struct pfsync_state_1301), M_TEMP,
3863 				    M_WAITOK | M_ZERO);
3864 				goto DIOCGETSTATES_retry;
3865 			}
3866 
3867 			if ((nr+count) * sizeof(*p) > ps->ps_len) {
3868 				PF_HASHROW_UNLOCK(ih);
3869 				goto DIOCGETSTATES_full;
3870 			}
3871 
3872 			LIST_FOREACH(s, &ih->states, entry) {
3873 				if (s->timeout == PFTM_UNLINKED)
3874 					continue;
3875 
3876 				pfsync_state_export((union pfsync_state_union*)p,
3877 				    s, PFSYNC_MSG_VERSION_1301);
3878 				p++;
3879 				nr++;
3880 			}
3881 			PF_HASHROW_UNLOCK(ih);
3882 			error = copyout(pstore, out,
3883 			    sizeof(struct pfsync_state_1301) * count);
3884 			if (error)
3885 				break;
3886 			out = ps->ps_states + nr;
3887 		}
3888 DIOCGETSTATES_full:
3889 		ps->ps_len = sizeof(struct pfsync_state_1301) * nr;
3890 		free(pstore, M_TEMP);
3891 
3892 		break;
3893 	}
3894 
3895 	case DIOCGETSTATESV2: {
3896 		struct pfioc_states_v2	*ps = (struct pfioc_states_v2 *)addr;
3897 		struct pf_kstate	*s;
3898 		struct pf_state_export	*pstore, *p;
3899 		int i, nr;
3900 		size_t slice_count = 16, count;
3901 		void *out;
3902 
3903 		if (ps->ps_req_version > PF_STATE_VERSION) {
3904 			error = ENOTSUP;
3905 			break;
3906 		}
3907 
3908 		if (ps->ps_len <= 0) {
3909 			nr = uma_zone_get_cur(V_pf_state_z);
3910 			ps->ps_len = sizeof(struct pf_state_export) * nr;
3911 			break;
3912 		}
3913 
3914 		out = ps->ps_states;
3915 		pstore = mallocarray(slice_count,
3916 		    sizeof(struct pf_state_export), M_TEMP, M_WAITOK | M_ZERO);
3917 		nr = 0;
3918 
3919 		for (i = 0; i <= V_pf_hashmask; i++) {
3920 			struct pf_idhash *ih = &V_pf_idhash[i];
3921 
3922 DIOCGETSTATESV2_retry:
3923 			p = pstore;
3924 
3925 			if (LIST_EMPTY(&ih->states))
3926 				continue;
3927 
3928 			PF_HASHROW_LOCK(ih);
3929 			count = 0;
3930 			LIST_FOREACH(s, &ih->states, entry) {
3931 				if (s->timeout == PFTM_UNLINKED)
3932 					continue;
3933 				count++;
3934 			}
3935 
3936 			if (count > slice_count) {
3937 				PF_HASHROW_UNLOCK(ih);
3938 				free(pstore, M_TEMP);
3939 				slice_count = count * 2;
3940 				pstore = mallocarray(slice_count,
3941 				    sizeof(struct pf_state_export), M_TEMP,
3942 				    M_WAITOK | M_ZERO);
3943 				goto DIOCGETSTATESV2_retry;
3944 			}
3945 
3946 			if ((nr+count) * sizeof(*p) > ps->ps_len) {
3947 				PF_HASHROW_UNLOCK(ih);
3948 				goto DIOCGETSTATESV2_full;
3949 			}
3950 
3951 			LIST_FOREACH(s, &ih->states, entry) {
3952 				if (s->timeout == PFTM_UNLINKED)
3953 					continue;
3954 
3955 				pf_state_export(p, s);
3956 				p++;
3957 				nr++;
3958 			}
3959 			PF_HASHROW_UNLOCK(ih);
3960 			error = copyout(pstore, out,
3961 			    sizeof(struct pf_state_export) * count);
3962 			if (error)
3963 				break;
3964 			out = ps->ps_states + nr;
3965 		}
3966 DIOCGETSTATESV2_full:
3967 		ps->ps_len = nr * sizeof(struct pf_state_export);
3968 		free(pstore, M_TEMP);
3969 
3970 		break;
3971 	}
3972 #endif
3973 	case DIOCGETSTATUSNV: {
3974 		error = pf_getstatus((struct pfioc_nv *)addr);
3975 		break;
3976 	}
3977 
3978 	case DIOCSETSTATUSIF: {
3979 		struct pfioc_if	*pi = (struct pfioc_if *)addr;
3980 
3981 		if (pi->ifname[0] == 0) {
3982 			bzero(V_pf_status.ifname, IFNAMSIZ);
3983 			break;
3984 		}
3985 		PF_RULES_WLOCK();
3986 		error = pf_user_strcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ);
3987 		PF_RULES_WUNLOCK();
3988 		break;
3989 	}
3990 
3991 	case DIOCCLRSTATUS: {
3992 		pf_ioctl_clear_status();
3993 		break;
3994 	}
3995 
3996 	case DIOCNATLOOK: {
3997 		struct pfioc_natlook	*pnl = (struct pfioc_natlook *)addr;
3998 		struct pf_state_key	*sk;
3999 		struct pf_kstate	*state;
4000 		struct pf_state_key_cmp	 key;
4001 		int			 m = 0, direction = pnl->direction;
4002 		int			 sidx, didx;
4003 
4004 		/* NATLOOK src and dst are reversed, so reverse sidx/didx */
4005 		sidx = (direction == PF_IN) ? 1 : 0;
4006 		didx = (direction == PF_IN) ? 0 : 1;
4007 
4008 		if (!pnl->proto ||
4009 		    PF_AZERO(&pnl->saddr, pnl->af) ||
4010 		    PF_AZERO(&pnl->daddr, pnl->af) ||
4011 		    ((pnl->proto == IPPROTO_TCP ||
4012 		    pnl->proto == IPPROTO_UDP) &&
4013 		    (!pnl->dport || !pnl->sport)))
4014 			error = EINVAL;
4015 		else {
4016 			bzero(&key, sizeof(key));
4017 			key.af = pnl->af;
4018 			key.proto = pnl->proto;
4019 			PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af);
4020 			key.port[sidx] = pnl->sport;
4021 			PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af);
4022 			key.port[didx] = pnl->dport;
4023 
4024 			state = pf_find_state_all(&key, direction, &m);
4025 			if (state == NULL) {
4026 				error = ENOENT;
4027 			} else {
4028 				if (m > 1) {
4029 					PF_STATE_UNLOCK(state);
4030 					error = E2BIG;	/* more than one state */
4031 				} else {
4032 					sk = state->key[sidx];
4033 					PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af);
4034 					pnl->rsport = sk->port[sidx];
4035 					PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af);
4036 					pnl->rdport = sk->port[didx];
4037 					PF_STATE_UNLOCK(state);
4038 				}
4039 			}
4040 		}
4041 		break;
4042 	}
4043 
4044 	case DIOCSETTIMEOUT: {
4045 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
4046 
4047 		error = pf_ioctl_set_timeout(pt->timeout, pt->seconds,
4048 		    &pt->seconds);
4049 		break;
4050 	}
4051 
4052 	case DIOCGETTIMEOUT: {
4053 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
4054 
4055 		error = pf_ioctl_get_timeout(pt->timeout, &pt->seconds);
4056 		break;
4057 	}
4058 
4059 	case DIOCGETLIMIT: {
4060 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
4061 
4062 		error = pf_ioctl_get_limit(pl->index, &pl->limit);
4063 		break;
4064 	}
4065 
4066 	case DIOCSETLIMIT: {
4067 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
4068 		unsigned int old_limit;
4069 
4070 		error = pf_ioctl_set_limit(pl->index, pl->limit, &old_limit);
4071 		pl->limit = old_limit;
4072 		break;
4073 	}
4074 
4075 	case DIOCSETDEBUG: {
4076 		u_int32_t	*level = (u_int32_t *)addr;
4077 
4078 		PF_RULES_WLOCK();
4079 		V_pf_status.debug = *level;
4080 		PF_RULES_WUNLOCK();
4081 		break;
4082 	}
4083 
4084 	case DIOCCLRRULECTRS: {
4085 		/* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
4086 		struct pf_kruleset	*ruleset = &pf_main_ruleset;
4087 		struct pf_krule		*rule;
4088 
4089 		PF_RULES_WLOCK();
4090 		TAILQ_FOREACH(rule,
4091 		    ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
4092 			pf_counter_u64_zero(&rule->evaluations);
4093 			for (int i = 0; i < 2; i++) {
4094 				pf_counter_u64_zero(&rule->packets[i]);
4095 				pf_counter_u64_zero(&rule->bytes[i]);
4096 			}
4097 		}
4098 		PF_RULES_WUNLOCK();
4099 		break;
4100 	}
4101 
4102 	case DIOCGIFSPEEDV0:
4103 	case DIOCGIFSPEEDV1: {
4104 		struct pf_ifspeed_v1	*psp = (struct pf_ifspeed_v1 *)addr;
4105 		struct pf_ifspeed_v1	ps;
4106 		struct ifnet		*ifp;
4107 
4108 		if (psp->ifname[0] == '\0') {
4109 			error = EINVAL;
4110 			break;
4111 		}
4112 
4113 		error = pf_user_strcpy(ps.ifname, psp->ifname, IFNAMSIZ);
4114 		if (error != 0)
4115 			break;
4116 		ifp = ifunit(ps.ifname);
4117 		if (ifp != NULL) {
4118 			psp->baudrate32 =
4119 			    (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX);
4120 			if (cmd == DIOCGIFSPEEDV1)
4121 				psp->baudrate = ifp->if_baudrate;
4122 		} else {
4123 			error = EINVAL;
4124 		}
4125 		break;
4126 	}
4127 
4128 #ifdef ALTQ
4129 	case DIOCSTARTALTQ: {
4130 		struct pf_altq		*altq;
4131 
4132 		PF_RULES_WLOCK();
4133 		/* enable all altq interfaces on active list */
4134 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
4135 			if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
4136 				error = pf_enable_altq(altq);
4137 				if (error != 0)
4138 					break;
4139 			}
4140 		}
4141 		if (error == 0)
4142 			V_pf_altq_running = 1;
4143 		PF_RULES_WUNLOCK();
4144 		DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
4145 		break;
4146 	}
4147 
4148 	case DIOCSTOPALTQ: {
4149 		struct pf_altq		*altq;
4150 
4151 		PF_RULES_WLOCK();
4152 		/* disable all altq interfaces on active list */
4153 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
4154 			if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
4155 				error = pf_disable_altq(altq);
4156 				if (error != 0)
4157 					break;
4158 			}
4159 		}
4160 		if (error == 0)
4161 			V_pf_altq_running = 0;
4162 		PF_RULES_WUNLOCK();
4163 		DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
4164 		break;
4165 	}
4166 
4167 	case DIOCADDALTQV0:
4168 	case DIOCADDALTQV1: {
4169 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4170 		struct pf_altq		*altq, *a;
4171 		struct ifnet		*ifp;
4172 
4173 		altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO);
4174 		error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd));
4175 		if (error)
4176 			break;
4177 		altq->local_flags = 0;
4178 
4179 		PF_RULES_WLOCK();
4180 		if (pa->ticket != V_ticket_altqs_inactive) {
4181 			PF_RULES_WUNLOCK();
4182 			free(altq, M_PFALTQ);
4183 			error = EBUSY;
4184 			break;
4185 		}
4186 
4187 		/*
4188 		 * if this is for a queue, find the discipline and
4189 		 * copy the necessary fields
4190 		 */
4191 		if (altq->qname[0] != 0) {
4192 			if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
4193 				PF_RULES_WUNLOCK();
4194 				error = EBUSY;
4195 				free(altq, M_PFALTQ);
4196 				break;
4197 			}
4198 			altq->altq_disc = NULL;
4199 			TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) {
4200 				if (strncmp(a->ifname, altq->ifname,
4201 				    IFNAMSIZ) == 0) {
4202 					altq->altq_disc = a->altq_disc;
4203 					break;
4204 				}
4205 			}
4206 		}
4207 
4208 		if ((ifp = ifunit(altq->ifname)) == NULL)
4209 			altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
4210 		else
4211 			error = altq_add(ifp, altq);
4212 
4213 		if (error) {
4214 			PF_RULES_WUNLOCK();
4215 			free(altq, M_PFALTQ);
4216 			break;
4217 		}
4218 
4219 		if (altq->qname[0] != 0)
4220 			TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries);
4221 		else
4222 			TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries);
4223 		/* version error check done on import above */
4224 		pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
4225 		PF_RULES_WUNLOCK();
4226 		break;
4227 	}
4228 
4229 	case DIOCGETALTQSV0:
4230 	case DIOCGETALTQSV1: {
4231 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4232 		struct pf_altq		*altq;
4233 
4234 		PF_RULES_RLOCK();
4235 		pa->nr = 0;
4236 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries)
4237 			pa->nr++;
4238 		TAILQ_FOREACH(altq, V_pf_altqs_active, entries)
4239 			pa->nr++;
4240 		pa->ticket = V_ticket_altqs_active;
4241 		PF_RULES_RUNLOCK();
4242 		break;
4243 	}
4244 
4245 	case DIOCGETALTQV0:
4246 	case DIOCGETALTQV1: {
4247 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4248 		struct pf_altq		*altq;
4249 
4250 		PF_RULES_RLOCK();
4251 		if (pa->ticket != V_ticket_altqs_active) {
4252 			PF_RULES_RUNLOCK();
4253 			error = EBUSY;
4254 			break;
4255 		}
4256 		altq = pf_altq_get_nth_active(pa->nr);
4257 		if (altq == NULL) {
4258 			PF_RULES_RUNLOCK();
4259 			error = EBUSY;
4260 			break;
4261 		}
4262 		pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
4263 		PF_RULES_RUNLOCK();
4264 		break;
4265 	}
4266 
4267 	case DIOCCHANGEALTQV0:
4268 	case DIOCCHANGEALTQV1:
4269 		/* CHANGEALTQ not supported yet! */
4270 		error = ENODEV;
4271 		break;
4272 
4273 	case DIOCGETQSTATSV0:
4274 	case DIOCGETQSTATSV1: {
4275 		struct pfioc_qstats_v1	*pq = (struct pfioc_qstats_v1 *)addr;
4276 		struct pf_altq		*altq;
4277 		int			 nbytes;
4278 		u_int32_t		 version;
4279 
4280 		PF_RULES_RLOCK();
4281 		if (pq->ticket != V_ticket_altqs_active) {
4282 			PF_RULES_RUNLOCK();
4283 			error = EBUSY;
4284 			break;
4285 		}
4286 		nbytes = pq->nbytes;
4287 		altq = pf_altq_get_nth_active(pq->nr);
4288 		if (altq == NULL) {
4289 			PF_RULES_RUNLOCK();
4290 			error = EBUSY;
4291 			break;
4292 		}
4293 
4294 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) {
4295 			PF_RULES_RUNLOCK();
4296 			error = ENXIO;
4297 			break;
4298 		}
4299 		PF_RULES_RUNLOCK();
4300 		if (cmd == DIOCGETQSTATSV0)
4301 			version = 0;  /* DIOCGETQSTATSV0 means stats struct v0 */
4302 		else
4303 			version = pq->version;
4304 		error = altq_getqstats(altq, pq->buf, &nbytes, version);
4305 		if (error == 0) {
4306 			pq->scheduler = altq->scheduler;
4307 			pq->nbytes = nbytes;
4308 		}
4309 		break;
4310 	}
4311 #endif /* ALTQ */
4312 
4313 	case DIOCBEGINADDRS: {
4314 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4315 
4316 		error = pf_ioctl_begin_addrs(&pp->ticket);
4317 		break;
4318 	}
4319 
4320 	case DIOCADDADDR: {
4321 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4322 
4323 		error = pf_ioctl_add_addr(pp);
4324 		break;
4325 	}
4326 
4327 	case DIOCGETADDRS: {
4328 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4329 
4330 		error = pf_ioctl_get_addrs(pp);
4331 		break;
4332 	}
4333 
4334 	case DIOCGETADDR: {
4335 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4336 
4337 		error = pf_ioctl_get_addr(pp);
4338 		break;
4339 	}
4340 
4341 	case DIOCCHANGEADDR: {
4342 		struct pfioc_pooladdr	*pca = (struct pfioc_pooladdr *)addr;
4343 		struct pf_kpool		*pool;
4344 		struct pf_kpooladdr	*oldpa = NULL, *newpa = NULL;
4345 		struct pf_kruleset	*ruleset;
4346 		struct pfi_kkif		*kif = NULL;
4347 
4348 		pca->anchor[sizeof(pca->anchor) - 1] = 0;
4349 
4350 		if (pca->action < PF_CHANGE_ADD_HEAD ||
4351 		    pca->action > PF_CHANGE_REMOVE) {
4352 			error = EINVAL;
4353 			break;
4354 		}
4355 		if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
4356 		    pca->addr.addr.type != PF_ADDR_DYNIFTL &&
4357 		    pca->addr.addr.type != PF_ADDR_TABLE) {
4358 			error = EINVAL;
4359 			break;
4360 		}
4361 		if (pca->addr.addr.p.dyn != NULL) {
4362 			error = EINVAL;
4363 			break;
4364 		}
4365 
4366 		if (pca->action != PF_CHANGE_REMOVE) {
4367 #ifndef INET
4368 			if (pca->af == AF_INET) {
4369 				error = EAFNOSUPPORT;
4370 				break;
4371 			}
4372 #endif /* INET */
4373 #ifndef INET6
4374 			if (pca->af == AF_INET6) {
4375 				error = EAFNOSUPPORT;
4376 				break;
4377 			}
4378 #endif /* INET6 */
4379 			newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK);
4380 			bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
4381 			if (newpa->ifname[0])
4382 				kif = pf_kkif_create(M_WAITOK);
4383 			newpa->kif = NULL;
4384 		}
4385 #define	ERROUT(x)	ERROUT_IOCTL(DIOCCHANGEADDR_error, x)
4386 		PF_RULES_WLOCK();
4387 		ruleset = pf_find_kruleset(pca->anchor);
4388 		if (ruleset == NULL)
4389 			ERROUT(EBUSY);
4390 
4391 		pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action,
4392 		    pca->r_num, pca->r_last, 1, 1);
4393 		if (pool == NULL)
4394 			ERROUT(EBUSY);
4395 
4396 		if (pca->action != PF_CHANGE_REMOVE) {
4397 			if (newpa->ifname[0]) {
4398 				newpa->kif = pfi_kkif_attach(kif, newpa->ifname);
4399 				pfi_kkif_ref(newpa->kif);
4400 				kif = NULL;
4401 			}
4402 
4403 			switch (newpa->addr.type) {
4404 			case PF_ADDR_DYNIFTL:
4405 				error = pfi_dynaddr_setup(&newpa->addr,
4406 				    pca->af);
4407 				break;
4408 			case PF_ADDR_TABLE:
4409 				newpa->addr.p.tbl = pfr_attach_table(ruleset,
4410 				    newpa->addr.v.tblname);
4411 				if (newpa->addr.p.tbl == NULL)
4412 					error = ENOMEM;
4413 				break;
4414 			}
4415 			if (error)
4416 				goto DIOCCHANGEADDR_error;
4417 		}
4418 
4419 		switch (pca->action) {
4420 		case PF_CHANGE_ADD_HEAD:
4421 			oldpa = TAILQ_FIRST(&pool->list);
4422 			break;
4423 		case PF_CHANGE_ADD_TAIL:
4424 			oldpa = TAILQ_LAST(&pool->list, pf_kpalist);
4425 			break;
4426 		default:
4427 			oldpa = TAILQ_FIRST(&pool->list);
4428 			for (int i = 0; oldpa && i < pca->nr; i++)
4429 				oldpa = TAILQ_NEXT(oldpa, entries);
4430 
4431 			if (oldpa == NULL)
4432 				ERROUT(EINVAL);
4433 		}
4434 
4435 		if (pca->action == PF_CHANGE_REMOVE) {
4436 			TAILQ_REMOVE(&pool->list, oldpa, entries);
4437 			switch (oldpa->addr.type) {
4438 			case PF_ADDR_DYNIFTL:
4439 				pfi_dynaddr_remove(oldpa->addr.p.dyn);
4440 				break;
4441 			case PF_ADDR_TABLE:
4442 				pfr_detach_table(oldpa->addr.p.tbl);
4443 				break;
4444 			}
4445 			if (oldpa->kif)
4446 				pfi_kkif_unref(oldpa->kif);
4447 			free(oldpa, M_PFRULE);
4448 		} else {
4449 			if (oldpa == NULL)
4450 				TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
4451 			else if (pca->action == PF_CHANGE_ADD_HEAD ||
4452 			    pca->action == PF_CHANGE_ADD_BEFORE)
4453 				TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
4454 			else
4455 				TAILQ_INSERT_AFTER(&pool->list, oldpa,
4456 				    newpa, entries);
4457 		}
4458 
4459 		pool->cur = TAILQ_FIRST(&pool->list);
4460 		PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af);
4461 		PF_RULES_WUNLOCK();
4462 		break;
4463 
4464 #undef ERROUT
4465 DIOCCHANGEADDR_error:
4466 		if (newpa != NULL) {
4467 			if (newpa->kif)
4468 				pfi_kkif_unref(newpa->kif);
4469 			free(newpa, M_PFRULE);
4470 		}
4471 		PF_RULES_WUNLOCK();
4472 		pf_kkif_free(kif);
4473 		break;
4474 	}
4475 
4476 	case DIOCGETRULESETS: {
4477 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
4478 		struct pf_kruleset	*ruleset;
4479 		struct pf_kanchor	*anchor;
4480 
4481 		pr->path[sizeof(pr->path) - 1] = 0;
4482 
4483 		PF_RULES_RLOCK();
4484 		if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
4485 			PF_RULES_RUNLOCK();
4486 			error = ENOENT;
4487 			break;
4488 		}
4489 		pr->nr = 0;
4490 		if (ruleset->anchor == NULL) {
4491 			/* XXX kludge for pf_main_ruleset */
4492 			RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
4493 				if (anchor->parent == NULL)
4494 					pr->nr++;
4495 		} else {
4496 			RB_FOREACH(anchor, pf_kanchor_node,
4497 			    &ruleset->anchor->children)
4498 				pr->nr++;
4499 		}
4500 		PF_RULES_RUNLOCK();
4501 		break;
4502 	}
4503 
4504 	case DIOCGETRULESET: {
4505 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
4506 		struct pf_kruleset	*ruleset;
4507 		struct pf_kanchor	*anchor;
4508 		u_int32_t		 nr = 0;
4509 
4510 		pr->path[sizeof(pr->path) - 1] = 0;
4511 
4512 		PF_RULES_RLOCK();
4513 		if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
4514 			PF_RULES_RUNLOCK();
4515 			error = ENOENT;
4516 			break;
4517 		}
4518 		pr->name[0] = 0;
4519 		if (ruleset->anchor == NULL) {
4520 			/* XXX kludge for pf_main_ruleset */
4521 			RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
4522 				if (anchor->parent == NULL && nr++ == pr->nr) {
4523 					strlcpy(pr->name, anchor->name,
4524 					    sizeof(pr->name));
4525 					break;
4526 				}
4527 		} else {
4528 			RB_FOREACH(anchor, pf_kanchor_node,
4529 			    &ruleset->anchor->children)
4530 				if (nr++ == pr->nr) {
4531 					strlcpy(pr->name, anchor->name,
4532 					    sizeof(pr->name));
4533 					break;
4534 				}
4535 		}
4536 		if (!pr->name[0])
4537 			error = EBUSY;
4538 		PF_RULES_RUNLOCK();
4539 		break;
4540 	}
4541 
4542 	case DIOCRCLRTABLES: {
4543 		struct pfioc_table *io = (struct pfioc_table *)addr;
4544 
4545 		if (io->pfrio_esize != 0) {
4546 			error = ENODEV;
4547 			break;
4548 		}
4549 		PF_RULES_WLOCK();
4550 		error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
4551 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
4552 		PF_RULES_WUNLOCK();
4553 		break;
4554 	}
4555 
4556 	case DIOCRADDTABLES: {
4557 		struct pfioc_table *io = (struct pfioc_table *)addr;
4558 		struct pfr_table *pfrts;
4559 		size_t totlen;
4560 
4561 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4562 			error = ENODEV;
4563 			break;
4564 		}
4565 
4566 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4567 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4568 			error = ENOMEM;
4569 			break;
4570 		}
4571 
4572 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4573 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4574 		    M_TEMP, M_WAITOK);
4575 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4576 		if (error) {
4577 			free(pfrts, M_TEMP);
4578 			break;
4579 		}
4580 		PF_RULES_WLOCK();
4581 		error = pfr_add_tables(pfrts, io->pfrio_size,
4582 		    &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4583 		PF_RULES_WUNLOCK();
4584 		free(pfrts, M_TEMP);
4585 		break;
4586 	}
4587 
4588 	case DIOCRDELTABLES: {
4589 		struct pfioc_table *io = (struct pfioc_table *)addr;
4590 		struct pfr_table *pfrts;
4591 		size_t totlen;
4592 
4593 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4594 			error = ENODEV;
4595 			break;
4596 		}
4597 
4598 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4599 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4600 			error = ENOMEM;
4601 			break;
4602 		}
4603 
4604 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4605 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4606 		    M_TEMP, M_WAITOK);
4607 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4608 		if (error) {
4609 			free(pfrts, M_TEMP);
4610 			break;
4611 		}
4612 		PF_RULES_WLOCK();
4613 		error = pfr_del_tables(pfrts, io->pfrio_size,
4614 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4615 		PF_RULES_WUNLOCK();
4616 		free(pfrts, M_TEMP);
4617 		break;
4618 	}
4619 
4620 	case DIOCRGETTABLES: {
4621 		struct pfioc_table *io = (struct pfioc_table *)addr;
4622 		struct pfr_table *pfrts;
4623 		size_t totlen;
4624 		int n;
4625 
4626 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4627 			error = ENODEV;
4628 			break;
4629 		}
4630 		PF_RULES_RLOCK();
4631 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4632 		if (n < 0) {
4633 			PF_RULES_RUNLOCK();
4634 			error = EINVAL;
4635 			break;
4636 		}
4637 		io->pfrio_size = min(io->pfrio_size, n);
4638 
4639 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4640 
4641 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4642 		    M_TEMP, M_NOWAIT | M_ZERO);
4643 		if (pfrts == NULL) {
4644 			error = ENOMEM;
4645 			PF_RULES_RUNLOCK();
4646 			break;
4647 		}
4648 		error = pfr_get_tables(&io->pfrio_table, pfrts,
4649 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4650 		PF_RULES_RUNLOCK();
4651 		if (error == 0)
4652 			error = copyout(pfrts, io->pfrio_buffer, totlen);
4653 		free(pfrts, M_TEMP);
4654 		break;
4655 	}
4656 
4657 	case DIOCRGETTSTATS: {
4658 		struct pfioc_table *io = (struct pfioc_table *)addr;
4659 		struct pfr_tstats *pfrtstats;
4660 		size_t totlen;
4661 		int n;
4662 
4663 		if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
4664 			error = ENODEV;
4665 			break;
4666 		}
4667 		PF_TABLE_STATS_LOCK();
4668 		PF_RULES_RLOCK();
4669 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4670 		if (n < 0) {
4671 			PF_RULES_RUNLOCK();
4672 			PF_TABLE_STATS_UNLOCK();
4673 			error = EINVAL;
4674 			break;
4675 		}
4676 		io->pfrio_size = min(io->pfrio_size, n);
4677 
4678 		totlen = io->pfrio_size * sizeof(struct pfr_tstats);
4679 		pfrtstats = mallocarray(io->pfrio_size,
4680 		    sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT | M_ZERO);
4681 		if (pfrtstats == NULL) {
4682 			error = ENOMEM;
4683 			PF_RULES_RUNLOCK();
4684 			PF_TABLE_STATS_UNLOCK();
4685 			break;
4686 		}
4687 		error = pfr_get_tstats(&io->pfrio_table, pfrtstats,
4688 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4689 		PF_RULES_RUNLOCK();
4690 		PF_TABLE_STATS_UNLOCK();
4691 		if (error == 0)
4692 			error = copyout(pfrtstats, io->pfrio_buffer, totlen);
4693 		free(pfrtstats, M_TEMP);
4694 		break;
4695 	}
4696 
4697 	case DIOCRCLRTSTATS: {
4698 		struct pfioc_table *io = (struct pfioc_table *)addr;
4699 		struct pfr_table *pfrts;
4700 		size_t totlen;
4701 
4702 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4703 			error = ENODEV;
4704 			break;
4705 		}
4706 
4707 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4708 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4709 			/* We used to count tables and use the minimum required
4710 			 * size, so we didn't fail on overly large requests.
4711 			 * Keep doing so. */
4712 			io->pfrio_size = pf_ioctl_maxcount;
4713 			break;
4714 		}
4715 
4716 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4717 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4718 		    M_TEMP, M_WAITOK);
4719 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4720 		if (error) {
4721 			free(pfrts, M_TEMP);
4722 			break;
4723 		}
4724 
4725 		PF_TABLE_STATS_LOCK();
4726 		PF_RULES_RLOCK();
4727 		error = pfr_clr_tstats(pfrts, io->pfrio_size,
4728 		    &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4729 		PF_RULES_RUNLOCK();
4730 		PF_TABLE_STATS_UNLOCK();
4731 		free(pfrts, M_TEMP);
4732 		break;
4733 	}
4734 
4735 	case DIOCRSETTFLAGS: {
4736 		struct pfioc_table *io = (struct pfioc_table *)addr;
4737 		struct pfr_table *pfrts;
4738 		size_t totlen;
4739 		int n;
4740 
4741 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4742 			error = ENODEV;
4743 			break;
4744 		}
4745 
4746 		PF_RULES_RLOCK();
4747 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4748 		if (n < 0) {
4749 			PF_RULES_RUNLOCK();
4750 			error = EINVAL;
4751 			break;
4752 		}
4753 
4754 		io->pfrio_size = min(io->pfrio_size, n);
4755 		PF_RULES_RUNLOCK();
4756 
4757 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4758 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4759 		    M_TEMP, M_WAITOK);
4760 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4761 		if (error) {
4762 			free(pfrts, M_TEMP);
4763 			break;
4764 		}
4765 		PF_RULES_WLOCK();
4766 		error = pfr_set_tflags(pfrts, io->pfrio_size,
4767 		    io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
4768 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4769 		PF_RULES_WUNLOCK();
4770 		free(pfrts, M_TEMP);
4771 		break;
4772 	}
4773 
4774 	case DIOCRCLRADDRS: {
4775 		struct pfioc_table *io = (struct pfioc_table *)addr;
4776 
4777 		if (io->pfrio_esize != 0) {
4778 			error = ENODEV;
4779 			break;
4780 		}
4781 		PF_RULES_WLOCK();
4782 		error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
4783 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
4784 		PF_RULES_WUNLOCK();
4785 		break;
4786 	}
4787 
4788 	case DIOCRADDADDRS: {
4789 		struct pfioc_table *io = (struct pfioc_table *)addr;
4790 		struct pfr_addr *pfras;
4791 		size_t totlen;
4792 
4793 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4794 			error = ENODEV;
4795 			break;
4796 		}
4797 		if (io->pfrio_size < 0 ||
4798 		    io->pfrio_size > pf_ioctl_maxcount ||
4799 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4800 			error = EINVAL;
4801 			break;
4802 		}
4803 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4804 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4805 		    M_TEMP, M_WAITOK);
4806 		error = copyin(io->pfrio_buffer, pfras, totlen);
4807 		if (error) {
4808 			free(pfras, M_TEMP);
4809 			break;
4810 		}
4811 		PF_RULES_WLOCK();
4812 		error = pfr_add_addrs(&io->pfrio_table, pfras,
4813 		    io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
4814 		    PFR_FLAG_USERIOCTL);
4815 		PF_RULES_WUNLOCK();
4816 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4817 			error = copyout(pfras, io->pfrio_buffer, totlen);
4818 		free(pfras, M_TEMP);
4819 		break;
4820 	}
4821 
4822 	case DIOCRDELADDRS: {
4823 		struct pfioc_table *io = (struct pfioc_table *)addr;
4824 		struct pfr_addr *pfras;
4825 		size_t totlen;
4826 
4827 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4828 			error = ENODEV;
4829 			break;
4830 		}
4831 		if (io->pfrio_size < 0 ||
4832 		    io->pfrio_size > pf_ioctl_maxcount ||
4833 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4834 			error = EINVAL;
4835 			break;
4836 		}
4837 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4838 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4839 		    M_TEMP, M_WAITOK);
4840 		error = copyin(io->pfrio_buffer, pfras, totlen);
4841 		if (error) {
4842 			free(pfras, M_TEMP);
4843 			break;
4844 		}
4845 		PF_RULES_WLOCK();
4846 		error = pfr_del_addrs(&io->pfrio_table, pfras,
4847 		    io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
4848 		    PFR_FLAG_USERIOCTL);
4849 		PF_RULES_WUNLOCK();
4850 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4851 			error = copyout(pfras, io->pfrio_buffer, totlen);
4852 		free(pfras, M_TEMP);
4853 		break;
4854 	}
4855 
4856 	case DIOCRSETADDRS: {
4857 		struct pfioc_table *io = (struct pfioc_table *)addr;
4858 		struct pfr_addr *pfras;
4859 		size_t totlen, count;
4860 
4861 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4862 			error = ENODEV;
4863 			break;
4864 		}
4865 		if (io->pfrio_size < 0 || io->pfrio_size2 < 0) {
4866 			error = EINVAL;
4867 			break;
4868 		}
4869 		count = max(io->pfrio_size, io->pfrio_size2);
4870 		if (count > pf_ioctl_maxcount ||
4871 		    WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) {
4872 			error = EINVAL;
4873 			break;
4874 		}
4875 		totlen = count * sizeof(struct pfr_addr);
4876 		pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP,
4877 		    M_WAITOK);
4878 		error = copyin(io->pfrio_buffer, pfras, totlen);
4879 		if (error) {
4880 			free(pfras, M_TEMP);
4881 			break;
4882 		}
4883 		PF_RULES_WLOCK();
4884 		error = pfr_set_addrs(&io->pfrio_table, pfras,
4885 		    io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
4886 		    &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
4887 		    PFR_FLAG_USERIOCTL, 0);
4888 		PF_RULES_WUNLOCK();
4889 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4890 			error = copyout(pfras, io->pfrio_buffer, totlen);
4891 		free(pfras, M_TEMP);
4892 		break;
4893 	}
4894 
4895 	case DIOCRGETADDRS: {
4896 		struct pfioc_table *io = (struct pfioc_table *)addr;
4897 		struct pfr_addr *pfras;
4898 		size_t totlen;
4899 
4900 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4901 			error = ENODEV;
4902 			break;
4903 		}
4904 		if (io->pfrio_size < 0 ||
4905 		    io->pfrio_size > pf_ioctl_maxcount ||
4906 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4907 			error = EINVAL;
4908 			break;
4909 		}
4910 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4911 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4912 		    M_TEMP, M_WAITOK | M_ZERO);
4913 		PF_RULES_RLOCK();
4914 		error = pfr_get_addrs(&io->pfrio_table, pfras,
4915 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4916 		PF_RULES_RUNLOCK();
4917 		if (error == 0)
4918 			error = copyout(pfras, io->pfrio_buffer, totlen);
4919 		free(pfras, M_TEMP);
4920 		break;
4921 	}
4922 
4923 	case DIOCRGETASTATS: {
4924 		struct pfioc_table *io = (struct pfioc_table *)addr;
4925 		struct pfr_astats *pfrastats;
4926 		size_t totlen;
4927 
4928 		if (io->pfrio_esize != sizeof(struct pfr_astats)) {
4929 			error = ENODEV;
4930 			break;
4931 		}
4932 		if (io->pfrio_size < 0 ||
4933 		    io->pfrio_size > pf_ioctl_maxcount ||
4934 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) {
4935 			error = EINVAL;
4936 			break;
4937 		}
4938 		totlen = io->pfrio_size * sizeof(struct pfr_astats);
4939 		pfrastats = mallocarray(io->pfrio_size,
4940 		    sizeof(struct pfr_astats), M_TEMP, M_WAITOK | M_ZERO);
4941 		PF_RULES_RLOCK();
4942 		error = pfr_get_astats(&io->pfrio_table, pfrastats,
4943 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4944 		PF_RULES_RUNLOCK();
4945 		if (error == 0)
4946 			error = copyout(pfrastats, io->pfrio_buffer, totlen);
4947 		free(pfrastats, M_TEMP);
4948 		break;
4949 	}
4950 
4951 	case DIOCRCLRASTATS: {
4952 		struct pfioc_table *io = (struct pfioc_table *)addr;
4953 		struct pfr_addr *pfras;
4954 		size_t totlen;
4955 
4956 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4957 			error = ENODEV;
4958 			break;
4959 		}
4960 		if (io->pfrio_size < 0 ||
4961 		    io->pfrio_size > pf_ioctl_maxcount ||
4962 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4963 			error = EINVAL;
4964 			break;
4965 		}
4966 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4967 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4968 		    M_TEMP, M_WAITOK);
4969 		error = copyin(io->pfrio_buffer, pfras, totlen);
4970 		if (error) {
4971 			free(pfras, M_TEMP);
4972 			break;
4973 		}
4974 		PF_RULES_WLOCK();
4975 		error = pfr_clr_astats(&io->pfrio_table, pfras,
4976 		    io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
4977 		    PFR_FLAG_USERIOCTL);
4978 		PF_RULES_WUNLOCK();
4979 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4980 			error = copyout(pfras, io->pfrio_buffer, totlen);
4981 		free(pfras, M_TEMP);
4982 		break;
4983 	}
4984 
4985 	case DIOCRTSTADDRS: {
4986 		struct pfioc_table *io = (struct pfioc_table *)addr;
4987 		struct pfr_addr *pfras;
4988 		size_t totlen;
4989 
4990 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4991 			error = ENODEV;
4992 			break;
4993 		}
4994 		if (io->pfrio_size < 0 ||
4995 		    io->pfrio_size > pf_ioctl_maxcount ||
4996 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4997 			error = EINVAL;
4998 			break;
4999 		}
5000 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5001 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5002 		    M_TEMP, M_WAITOK);
5003 		error = copyin(io->pfrio_buffer, pfras, totlen);
5004 		if (error) {
5005 			free(pfras, M_TEMP);
5006 			break;
5007 		}
5008 		PF_RULES_RLOCK();
5009 		error = pfr_tst_addrs(&io->pfrio_table, pfras,
5010 		    io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
5011 		    PFR_FLAG_USERIOCTL);
5012 		PF_RULES_RUNLOCK();
5013 		if (error == 0)
5014 			error = copyout(pfras, io->pfrio_buffer, totlen);
5015 		free(pfras, M_TEMP);
5016 		break;
5017 	}
5018 
5019 	case DIOCRINADEFINE: {
5020 		struct pfioc_table *io = (struct pfioc_table *)addr;
5021 		struct pfr_addr *pfras;
5022 		size_t totlen;
5023 
5024 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5025 			error = ENODEV;
5026 			break;
5027 		}
5028 		if (io->pfrio_size < 0 ||
5029 		    io->pfrio_size > pf_ioctl_maxcount ||
5030 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5031 			error = EINVAL;
5032 			break;
5033 		}
5034 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5035 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5036 		    M_TEMP, M_WAITOK);
5037 		error = copyin(io->pfrio_buffer, pfras, totlen);
5038 		if (error) {
5039 			free(pfras, M_TEMP);
5040 			break;
5041 		}
5042 		PF_RULES_WLOCK();
5043 		error = pfr_ina_define(&io->pfrio_table, pfras,
5044 		    io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
5045 		    io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5046 		PF_RULES_WUNLOCK();
5047 		free(pfras, M_TEMP);
5048 		break;
5049 	}
5050 
5051 	case DIOCOSFPADD: {
5052 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
5053 		PF_RULES_WLOCK();
5054 		error = pf_osfp_add(io);
5055 		PF_RULES_WUNLOCK();
5056 		break;
5057 	}
5058 
5059 	case DIOCOSFPGET: {
5060 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
5061 		PF_RULES_RLOCK();
5062 		error = pf_osfp_get(io);
5063 		PF_RULES_RUNLOCK();
5064 		break;
5065 	}
5066 
5067 	case DIOCXBEGIN: {
5068 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5069 		struct pfioc_trans_e	*ioes, *ioe;
5070 		size_t			 totlen;
5071 		int			 i;
5072 
5073 		if (io->esize != sizeof(*ioe)) {
5074 			error = ENODEV;
5075 			break;
5076 		}
5077 		if (io->size < 0 ||
5078 		    io->size > pf_ioctl_maxcount ||
5079 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5080 			error = EINVAL;
5081 			break;
5082 		}
5083 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5084 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5085 		    M_TEMP, M_WAITOK);
5086 		error = copyin(io->array, ioes, totlen);
5087 		if (error) {
5088 			free(ioes, M_TEMP);
5089 			break;
5090 		}
5091 		/* Ensure there's no more ethernet rules to clean up. */
5092 		NET_EPOCH_DRAIN_CALLBACKS();
5093 		PF_RULES_WLOCK();
5094 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5095 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
5096 			switch (ioe->rs_num) {
5097 			case PF_RULESET_ETH:
5098 				if ((error = pf_begin_eth(&ioe->ticket, ioe->anchor))) {
5099 					PF_RULES_WUNLOCK();
5100 					free(ioes, M_TEMP);
5101 					goto fail;
5102 				}
5103 				break;
5104 #ifdef ALTQ
5105 			case PF_RULESET_ALTQ:
5106 				if (ioe->anchor[0]) {
5107 					PF_RULES_WUNLOCK();
5108 					free(ioes, M_TEMP);
5109 					error = EINVAL;
5110 					goto fail;
5111 				}
5112 				if ((error = pf_begin_altq(&ioe->ticket))) {
5113 					PF_RULES_WUNLOCK();
5114 					free(ioes, M_TEMP);
5115 					goto fail;
5116 				}
5117 				break;
5118 #endif /* ALTQ */
5119 			case PF_RULESET_TABLE:
5120 			    {
5121 				struct pfr_table table;
5122 
5123 				bzero(&table, sizeof(table));
5124 				strlcpy(table.pfrt_anchor, ioe->anchor,
5125 				    sizeof(table.pfrt_anchor));
5126 				if ((error = pfr_ina_begin(&table,
5127 				    &ioe->ticket, NULL, 0))) {
5128 					PF_RULES_WUNLOCK();
5129 					free(ioes, M_TEMP);
5130 					goto fail;
5131 				}
5132 				break;
5133 			    }
5134 			default:
5135 				if ((error = pf_begin_rules(&ioe->ticket,
5136 				    ioe->rs_num, ioe->anchor))) {
5137 					PF_RULES_WUNLOCK();
5138 					free(ioes, M_TEMP);
5139 					goto fail;
5140 				}
5141 				break;
5142 			}
5143 		}
5144 		PF_RULES_WUNLOCK();
5145 		error = copyout(ioes, io->array, totlen);
5146 		free(ioes, M_TEMP);
5147 		break;
5148 	}
5149 
5150 	case DIOCXROLLBACK: {
5151 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5152 		struct pfioc_trans_e	*ioe, *ioes;
5153 		size_t			 totlen;
5154 		int			 i;
5155 
5156 		if (io->esize != sizeof(*ioe)) {
5157 			error = ENODEV;
5158 			break;
5159 		}
5160 		if (io->size < 0 ||
5161 		    io->size > pf_ioctl_maxcount ||
5162 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5163 			error = EINVAL;
5164 			break;
5165 		}
5166 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5167 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5168 		    M_TEMP, M_WAITOK);
5169 		error = copyin(io->array, ioes, totlen);
5170 		if (error) {
5171 			free(ioes, M_TEMP);
5172 			break;
5173 		}
5174 		PF_RULES_WLOCK();
5175 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5176 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
5177 			switch (ioe->rs_num) {
5178 			case PF_RULESET_ETH:
5179 				if ((error = pf_rollback_eth(ioe->ticket,
5180 				    ioe->anchor))) {
5181 					PF_RULES_WUNLOCK();
5182 					free(ioes, M_TEMP);
5183 					goto fail; /* really bad */
5184 				}
5185 				break;
5186 #ifdef ALTQ
5187 			case PF_RULESET_ALTQ:
5188 				if (ioe->anchor[0]) {
5189 					PF_RULES_WUNLOCK();
5190 					free(ioes, M_TEMP);
5191 					error = EINVAL;
5192 					goto fail;
5193 				}
5194 				if ((error = pf_rollback_altq(ioe->ticket))) {
5195 					PF_RULES_WUNLOCK();
5196 					free(ioes, M_TEMP);
5197 					goto fail; /* really bad */
5198 				}
5199 				break;
5200 #endif /* ALTQ */
5201 			case PF_RULESET_TABLE:
5202 			    {
5203 				struct pfr_table table;
5204 
5205 				bzero(&table, sizeof(table));
5206 				strlcpy(table.pfrt_anchor, ioe->anchor,
5207 				    sizeof(table.pfrt_anchor));
5208 				if ((error = pfr_ina_rollback(&table,
5209 				    ioe->ticket, NULL, 0))) {
5210 					PF_RULES_WUNLOCK();
5211 					free(ioes, M_TEMP);
5212 					goto fail; /* really bad */
5213 				}
5214 				break;
5215 			    }
5216 			default:
5217 				if ((error = pf_rollback_rules(ioe->ticket,
5218 				    ioe->rs_num, ioe->anchor))) {
5219 					PF_RULES_WUNLOCK();
5220 					free(ioes, M_TEMP);
5221 					goto fail; /* really bad */
5222 				}
5223 				break;
5224 			}
5225 		}
5226 		PF_RULES_WUNLOCK();
5227 		free(ioes, M_TEMP);
5228 		break;
5229 	}
5230 
5231 	case DIOCXCOMMIT: {
5232 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5233 		struct pfioc_trans_e	*ioe, *ioes;
5234 		struct pf_kruleset	*rs;
5235 		struct pf_keth_ruleset	*ers;
5236 		size_t			 totlen;
5237 		int			 i;
5238 
5239 		if (io->esize != sizeof(*ioe)) {
5240 			error = ENODEV;
5241 			break;
5242 		}
5243 
5244 		if (io->size < 0 ||
5245 		    io->size > pf_ioctl_maxcount ||
5246 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5247 			error = EINVAL;
5248 			break;
5249 		}
5250 
5251 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5252 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5253 		    M_TEMP, M_WAITOK);
5254 		error = copyin(io->array, ioes, totlen);
5255 		if (error) {
5256 			free(ioes, M_TEMP);
5257 			break;
5258 		}
5259 		PF_RULES_WLOCK();
5260 		/* First makes sure everything will succeed. */
5261 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5262 			ioe->anchor[sizeof(ioe->anchor) - 1] = 0;
5263 			switch (ioe->rs_num) {
5264 			case PF_RULESET_ETH:
5265 				ers = pf_find_keth_ruleset(ioe->anchor);
5266 				if (ers == NULL || ioe->ticket == 0 ||
5267 				    ioe->ticket != ers->inactive.ticket) {
5268 					PF_RULES_WUNLOCK();
5269 					free(ioes, M_TEMP);
5270 					error = EINVAL;
5271 					goto fail;
5272 				}
5273 				break;
5274 #ifdef ALTQ
5275 			case PF_RULESET_ALTQ:
5276 				if (ioe->anchor[0]) {
5277 					PF_RULES_WUNLOCK();
5278 					free(ioes, M_TEMP);
5279 					error = EINVAL;
5280 					goto fail;
5281 				}
5282 				if (!V_altqs_inactive_open || ioe->ticket !=
5283 				    V_ticket_altqs_inactive) {
5284 					PF_RULES_WUNLOCK();
5285 					free(ioes, M_TEMP);
5286 					error = EBUSY;
5287 					goto fail;
5288 				}
5289 				break;
5290 #endif /* ALTQ */
5291 			case PF_RULESET_TABLE:
5292 				rs = pf_find_kruleset(ioe->anchor);
5293 				if (rs == NULL || !rs->topen || ioe->ticket !=
5294 				    rs->tticket) {
5295 					PF_RULES_WUNLOCK();
5296 					free(ioes, M_TEMP);
5297 					error = EBUSY;
5298 					goto fail;
5299 				}
5300 				break;
5301 			default:
5302 				if (ioe->rs_num < 0 || ioe->rs_num >=
5303 				    PF_RULESET_MAX) {
5304 					PF_RULES_WUNLOCK();
5305 					free(ioes, M_TEMP);
5306 					error = EINVAL;
5307 					goto fail;
5308 				}
5309 				rs = pf_find_kruleset(ioe->anchor);
5310 				if (rs == NULL ||
5311 				    !rs->rules[ioe->rs_num].inactive.open ||
5312 				    rs->rules[ioe->rs_num].inactive.ticket !=
5313 				    ioe->ticket) {
5314 					PF_RULES_WUNLOCK();
5315 					free(ioes, M_TEMP);
5316 					error = EBUSY;
5317 					goto fail;
5318 				}
5319 				break;
5320 			}
5321 		}
5322 		/* Now do the commit - no errors should happen here. */
5323 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5324 			switch (ioe->rs_num) {
5325 			case PF_RULESET_ETH:
5326 				if ((error = pf_commit_eth(ioe->ticket, ioe->anchor))) {
5327 					PF_RULES_WUNLOCK();
5328 					free(ioes, M_TEMP);
5329 					goto fail; /* really bad */
5330 				}
5331 				break;
5332 #ifdef ALTQ
5333 			case PF_RULESET_ALTQ:
5334 				if ((error = pf_commit_altq(ioe->ticket))) {
5335 					PF_RULES_WUNLOCK();
5336 					free(ioes, M_TEMP);
5337 					goto fail; /* really bad */
5338 				}
5339 				break;
5340 #endif /* ALTQ */
5341 			case PF_RULESET_TABLE:
5342 			    {
5343 				struct pfr_table table;
5344 
5345 				bzero(&table, sizeof(table));
5346 				(void)strlcpy(table.pfrt_anchor, ioe->anchor,
5347 				    sizeof(table.pfrt_anchor));
5348 				if ((error = pfr_ina_commit(&table,
5349 				    ioe->ticket, NULL, NULL, 0))) {
5350 					PF_RULES_WUNLOCK();
5351 					free(ioes, M_TEMP);
5352 					goto fail; /* really bad */
5353 				}
5354 				break;
5355 			    }
5356 			default:
5357 				if ((error = pf_commit_rules(ioe->ticket,
5358 				    ioe->rs_num, ioe->anchor))) {
5359 					PF_RULES_WUNLOCK();
5360 					free(ioes, M_TEMP);
5361 					goto fail; /* really bad */
5362 				}
5363 				break;
5364 			}
5365 		}
5366 		PF_RULES_WUNLOCK();
5367 
5368 		/* Only hook into EtherNet taffic if we've got rules for it. */
5369 		if (! TAILQ_EMPTY(V_pf_keth->active.rules))
5370 			hook_pf_eth();
5371 		else
5372 			dehook_pf_eth();
5373 
5374 		free(ioes, M_TEMP);
5375 		break;
5376 	}
5377 
5378 	case DIOCGETSRCNODES: {
5379 		struct pfioc_src_nodes	*psn = (struct pfioc_src_nodes *)addr;
5380 		struct pf_srchash	*sh;
5381 		struct pf_ksrc_node	*n;
5382 		struct pf_src_node	*p, *pstore;
5383 		uint32_t		 i, nr = 0;
5384 
5385 		for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask;
5386 				i++, sh++) {
5387 			PF_HASHROW_LOCK(sh);
5388 			LIST_FOREACH(n, &sh->nodes, entry)
5389 				nr++;
5390 			PF_HASHROW_UNLOCK(sh);
5391 		}
5392 
5393 		psn->psn_len = min(psn->psn_len,
5394 		    sizeof(struct pf_src_node) * nr);
5395 
5396 		if (psn->psn_len == 0) {
5397 			psn->psn_len = sizeof(struct pf_src_node) * nr;
5398 			break;
5399 		}
5400 
5401 		nr = 0;
5402 
5403 		p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO);
5404 		for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask;
5405 		    i++, sh++) {
5406 		    PF_HASHROW_LOCK(sh);
5407 		    LIST_FOREACH(n, &sh->nodes, entry) {
5408 
5409 			if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
5410 				break;
5411 
5412 			pf_src_node_copy(n, p);
5413 
5414 			p++;
5415 			nr++;
5416 		    }
5417 		    PF_HASHROW_UNLOCK(sh);
5418 		}
5419 		error = copyout(pstore, psn->psn_src_nodes,
5420 		    sizeof(struct pf_src_node) * nr);
5421 		if (error) {
5422 			free(pstore, M_TEMP);
5423 			break;
5424 		}
5425 		psn->psn_len = sizeof(struct pf_src_node) * nr;
5426 		free(pstore, M_TEMP);
5427 		break;
5428 	}
5429 
5430 	case DIOCCLRSRCNODES: {
5431 		pf_clear_srcnodes(NULL);
5432 		pf_purge_expired_src_nodes();
5433 		break;
5434 	}
5435 
5436 	case DIOCKILLSRCNODES:
5437 		pf_kill_srcnodes((struct pfioc_src_node_kill *)addr);
5438 		break;
5439 
5440 #ifdef COMPAT_FREEBSD13
5441 	case DIOCKEEPCOUNTERS_FREEBSD13:
5442 #endif
5443 	case DIOCKEEPCOUNTERS:
5444 		error = pf_keepcounters((struct pfioc_nv *)addr);
5445 		break;
5446 
5447 	case DIOCGETSYNCOOKIES:
5448 		error = pf_get_syncookies((struct pfioc_nv *)addr);
5449 		break;
5450 
5451 	case DIOCSETSYNCOOKIES:
5452 		error = pf_set_syncookies((struct pfioc_nv *)addr);
5453 		break;
5454 
5455 	case DIOCSETHOSTID: {
5456 		u_int32_t	*hostid = (u_int32_t *)addr;
5457 
5458 		PF_RULES_WLOCK();
5459 		if (*hostid == 0)
5460 			V_pf_status.hostid = arc4random();
5461 		else
5462 			V_pf_status.hostid = *hostid;
5463 		PF_RULES_WUNLOCK();
5464 		break;
5465 	}
5466 
5467 	case DIOCOSFPFLUSH:
5468 		PF_RULES_WLOCK();
5469 		pf_osfp_flush();
5470 		PF_RULES_WUNLOCK();
5471 		break;
5472 
5473 	case DIOCIGETIFACES: {
5474 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5475 		struct pfi_kif *ifstore;
5476 		size_t bufsiz;
5477 
5478 		if (io->pfiio_esize != sizeof(struct pfi_kif)) {
5479 			error = ENODEV;
5480 			break;
5481 		}
5482 
5483 		if (io->pfiio_size < 0 ||
5484 		    io->pfiio_size > pf_ioctl_maxcount ||
5485 		    WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) {
5486 			error = EINVAL;
5487 			break;
5488 		}
5489 
5490 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5491 
5492 		bufsiz = io->pfiio_size * sizeof(struct pfi_kif);
5493 		ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif),
5494 		    M_TEMP, M_WAITOK | M_ZERO);
5495 
5496 		PF_RULES_RLOCK();
5497 		pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size);
5498 		PF_RULES_RUNLOCK();
5499 		error = copyout(ifstore, io->pfiio_buffer, bufsiz);
5500 		free(ifstore, M_TEMP);
5501 		break;
5502 	}
5503 
5504 	case DIOCSETIFFLAG: {
5505 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5506 
5507 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5508 
5509 		PF_RULES_WLOCK();
5510 		error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
5511 		PF_RULES_WUNLOCK();
5512 		break;
5513 	}
5514 
5515 	case DIOCCLRIFFLAG: {
5516 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5517 
5518 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5519 
5520 		PF_RULES_WLOCK();
5521 		error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
5522 		PF_RULES_WUNLOCK();
5523 		break;
5524 	}
5525 
5526 	case DIOCSETREASS: {
5527 		u_int32_t	*reass = (u_int32_t *)addr;
5528 
5529 		V_pf_status.reass = *reass & (PF_REASS_ENABLED|PF_REASS_NODF);
5530 		/* Removal of DF flag without reassembly enabled is not a
5531 		 * valid combination. Disable reassembly in such case. */
5532 		if (!(V_pf_status.reass & PF_REASS_ENABLED))
5533 			V_pf_status.reass = 0;
5534 		break;
5535 	}
5536 
5537 	default:
5538 		error = ENODEV;
5539 		break;
5540 	}
5541 fail:
5542 	CURVNET_RESTORE();
5543 
5544 #undef ERROUT_IOCTL
5545 
5546 	return (error);
5547 }
5548 
5549 void
pfsync_state_export(union pfsync_state_union * sp,struct pf_kstate * st,int msg_version)5550 pfsync_state_export(union pfsync_state_union *sp, struct pf_kstate *st, int msg_version)
5551 {
5552 	bzero(sp, sizeof(union pfsync_state_union));
5553 
5554 	/* copy from state key */
5555 	sp->pfs_1301.key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
5556 	sp->pfs_1301.key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
5557 	sp->pfs_1301.key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
5558 	sp->pfs_1301.key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
5559 	sp->pfs_1301.key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
5560 	sp->pfs_1301.key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
5561 	sp->pfs_1301.key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
5562 	sp->pfs_1301.key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
5563 	sp->pfs_1301.proto = st->key[PF_SK_WIRE]->proto;
5564 	sp->pfs_1301.af = st->key[PF_SK_WIRE]->af;
5565 
5566 	/* copy from state */
5567 	strlcpy(sp->pfs_1301.ifname, st->kif->pfik_name, sizeof(sp->pfs_1301.ifname));
5568 	bcopy(&st->rt_addr, &sp->pfs_1301.rt_addr, sizeof(sp->pfs_1301.rt_addr));
5569 	sp->pfs_1301.creation = htonl(time_uptime - (st->creation / 1000));
5570 	sp->pfs_1301.expire = pf_state_expires(st);
5571 	if (sp->pfs_1301.expire <= time_uptime)
5572 		sp->pfs_1301.expire = htonl(0);
5573 	else
5574 		sp->pfs_1301.expire = htonl(sp->pfs_1301.expire - time_uptime);
5575 
5576 	sp->pfs_1301.direction = st->direction;
5577 	sp->pfs_1301.log = st->act.log;
5578 	sp->pfs_1301.timeout = st->timeout;
5579 
5580 	switch (msg_version) {
5581 		case PFSYNC_MSG_VERSION_1301:
5582 			sp->pfs_1301.state_flags = st->state_flags;
5583 			break;
5584 		case PFSYNC_MSG_VERSION_1400:
5585 			sp->pfs_1400.state_flags = htons(st->state_flags);
5586 			sp->pfs_1400.qid = htons(st->act.qid);
5587 			sp->pfs_1400.pqid = htons(st->act.pqid);
5588 			sp->pfs_1400.dnpipe = htons(st->act.dnpipe);
5589 			sp->pfs_1400.dnrpipe = htons(st->act.dnrpipe);
5590 			sp->pfs_1400.rtableid = htonl(st->act.rtableid);
5591 			sp->pfs_1400.min_ttl = st->act.min_ttl;
5592 			sp->pfs_1400.set_tos = st->act.set_tos;
5593 			sp->pfs_1400.max_mss = htons(st->act.max_mss);
5594 			sp->pfs_1400.set_prio[0] = st->act.set_prio[0];
5595 			sp->pfs_1400.set_prio[1] = st->act.set_prio[1];
5596 			sp->pfs_1400.rt = st->rt;
5597 			if (st->rt_kif)
5598 				strlcpy(sp->pfs_1400.rt_ifname,
5599 				    st->rt_kif->pfik_name,
5600 				    sizeof(sp->pfs_1400.rt_ifname));
5601 			break;
5602 		default:
5603 			panic("%s: Unsupported pfsync_msg_version %d",
5604 			    __func__, msg_version);
5605 	}
5606 
5607 	if (st->src_node)
5608 		sp->pfs_1301.sync_flags |= PFSYNC_FLAG_SRCNODE;
5609 	if (st->nat_src_node)
5610 		sp->pfs_1301.sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5611 
5612 	sp->pfs_1301.id = st->id;
5613 	sp->pfs_1301.creatorid = st->creatorid;
5614 	pf_state_peer_hton(&st->src, &sp->pfs_1301.src);
5615 	pf_state_peer_hton(&st->dst, &sp->pfs_1301.dst);
5616 
5617 	if (st->rule.ptr == NULL)
5618 		sp->pfs_1301.rule = htonl(-1);
5619 	else
5620 		sp->pfs_1301.rule = htonl(st->rule.ptr->nr);
5621 	if (st->anchor.ptr == NULL)
5622 		sp->pfs_1301.anchor = htonl(-1);
5623 	else
5624 		sp->pfs_1301.anchor = htonl(st->anchor.ptr->nr);
5625 	if (st->nat_rule.ptr == NULL)
5626 		sp->pfs_1301.nat_rule = htonl(-1);
5627 	else
5628 		sp->pfs_1301.nat_rule = htonl(st->nat_rule.ptr->nr);
5629 
5630 	pf_state_counter_hton(st->packets[0], sp->pfs_1301.packets[0]);
5631 	pf_state_counter_hton(st->packets[1], sp->pfs_1301.packets[1]);
5632 	pf_state_counter_hton(st->bytes[0], sp->pfs_1301.bytes[0]);
5633 	pf_state_counter_hton(st->bytes[1], sp->pfs_1301.bytes[1]);
5634 }
5635 
5636 void
pf_state_export(struct pf_state_export * sp,struct pf_kstate * st)5637 pf_state_export(struct pf_state_export *sp, struct pf_kstate *st)
5638 {
5639 	bzero(sp, sizeof(*sp));
5640 
5641 	sp->version = PF_STATE_VERSION;
5642 
5643 	/* copy from state key */
5644 	sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
5645 	sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
5646 	sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
5647 	sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
5648 	sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
5649 	sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
5650 	sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
5651 	sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
5652 	sp->proto = st->key[PF_SK_WIRE]->proto;
5653 	sp->af = st->key[PF_SK_WIRE]->af;
5654 
5655 	/* copy from state */
5656 	strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
5657 	strlcpy(sp->orig_ifname, st->orig_kif->pfik_name,
5658 	    sizeof(sp->orig_ifname));
5659 	bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
5660 	sp->creation = htonl(time_uptime - (st->creation / 1000));
5661 	sp->expire = pf_state_expires(st);
5662 	if (sp->expire <= time_uptime)
5663 		sp->expire = htonl(0);
5664 	else
5665 		sp->expire = htonl(sp->expire - time_uptime);
5666 
5667 	sp->direction = st->direction;
5668 	sp->log = st->act.log;
5669 	sp->timeout = st->timeout;
5670 	/* 8 bits for the old libpfctl, 16 bits for the new libpfctl */
5671 	sp->state_flags_compat = st->state_flags;
5672 	sp->state_flags = htons(st->state_flags);
5673 	if (st->src_node)
5674 		sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
5675 	if (st->nat_src_node)
5676 		sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5677 
5678 	sp->id = st->id;
5679 	sp->creatorid = st->creatorid;
5680 	pf_state_peer_hton(&st->src, &sp->src);
5681 	pf_state_peer_hton(&st->dst, &sp->dst);
5682 
5683 	if (st->rule.ptr == NULL)
5684 		sp->rule = htonl(-1);
5685 	else
5686 		sp->rule = htonl(st->rule.ptr->nr);
5687 	if (st->anchor.ptr == NULL)
5688 		sp->anchor = htonl(-1);
5689 	else
5690 		sp->anchor = htonl(st->anchor.ptr->nr);
5691 	if (st->nat_rule.ptr == NULL)
5692 		sp->nat_rule = htonl(-1);
5693 	else
5694 		sp->nat_rule = htonl(st->nat_rule.ptr->nr);
5695 
5696 	sp->packets[0] = st->packets[0];
5697 	sp->packets[1] = st->packets[1];
5698 	sp->bytes[0] = st->bytes[0];
5699 	sp->bytes[1] = st->bytes[1];
5700 
5701 	sp->qid = htons(st->act.qid);
5702 	sp->pqid = htons(st->act.pqid);
5703 	sp->dnpipe = htons(st->act.dnpipe);
5704 	sp->dnrpipe = htons(st->act.dnrpipe);
5705 	sp->rtableid = htonl(st->act.rtableid);
5706 	sp->min_ttl = st->act.min_ttl;
5707 	sp->set_tos = st->act.set_tos;
5708 	sp->max_mss = htons(st->act.max_mss);
5709 	sp->rt = st->rt;
5710 	if (st->rt_kif)
5711 		strlcpy(sp->rt_ifname, st->rt_kif->pfik_name,
5712 		    sizeof(sp->rt_ifname));
5713 	sp->set_prio[0] = st->act.set_prio[0];
5714 	sp->set_prio[1] = st->act.set_prio[1];
5715 
5716 }
5717 
5718 static void
pf_tbladdr_copyout(struct pf_addr_wrap * aw)5719 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
5720 {
5721 	struct pfr_ktable *kt;
5722 
5723 	KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type));
5724 
5725 	kt = aw->p.tbl;
5726 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
5727 		kt = kt->pfrkt_root;
5728 	aw->p.tbl = NULL;
5729 	aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
5730 		kt->pfrkt_cnt : -1;
5731 }
5732 
5733 static int
pf_add_status_counters(nvlist_t * nvl,const char * name,counter_u64_t * counters,size_t number,char ** names)5734 pf_add_status_counters(nvlist_t *nvl, const char *name, counter_u64_t *counters,
5735     size_t number, char **names)
5736 {
5737 	nvlist_t        *nvc;
5738 
5739 	nvc = nvlist_create(0);
5740 	if (nvc == NULL)
5741 		return (ENOMEM);
5742 
5743 	for (int i = 0; i < number; i++) {
5744 		nvlist_append_number_array(nvc, "counters",
5745 		    counter_u64_fetch(counters[i]));
5746 		nvlist_append_string_array(nvc, "names",
5747 		    names[i]);
5748 		nvlist_append_number_array(nvc, "ids",
5749 		    i);
5750 	}
5751 	nvlist_add_nvlist(nvl, name, nvc);
5752 	nvlist_destroy(nvc);
5753 
5754 	return (0);
5755 }
5756 
5757 static int
pf_getstatus(struct pfioc_nv * nv)5758 pf_getstatus(struct pfioc_nv *nv)
5759 {
5760 	nvlist_t        *nvl = NULL, *nvc = NULL;
5761 	void            *nvlpacked = NULL;
5762 	int              error;
5763 	struct pf_status s;
5764 	char *pf_reasons[PFRES_MAX+1] = PFRES_NAMES;
5765 	char *pf_lcounter[KLCNT_MAX+1] = KLCNT_NAMES;
5766 	char *pf_fcounter[FCNT_MAX+1] = FCNT_NAMES;
5767 	PF_RULES_RLOCK_TRACKER;
5768 
5769 #define ERROUT(x)      ERROUT_FUNCTION(errout, x)
5770 
5771 	PF_RULES_RLOCK();
5772 
5773 	nvl = nvlist_create(0);
5774 	if (nvl == NULL)
5775 		ERROUT(ENOMEM);
5776 
5777 	nvlist_add_bool(nvl, "running", V_pf_status.running);
5778 	nvlist_add_number(nvl, "since", V_pf_status.since);
5779 	nvlist_add_number(nvl, "debug", V_pf_status.debug);
5780 	nvlist_add_number(nvl, "hostid", V_pf_status.hostid);
5781 	nvlist_add_number(nvl, "states", V_pf_status.states);
5782 	nvlist_add_number(nvl, "src_nodes", V_pf_status.src_nodes);
5783 	nvlist_add_number(nvl, "reass", V_pf_status.reass);
5784 	nvlist_add_bool(nvl, "syncookies_active",
5785 	    V_pf_status.syncookies_active);
5786 	nvlist_add_number(nvl, "halfopen_states", V_pf_status.states_halfopen);
5787 
5788 	/* counters */
5789 	error = pf_add_status_counters(nvl, "counters", V_pf_status.counters,
5790 	    PFRES_MAX, pf_reasons);
5791 	if (error != 0)
5792 		ERROUT(error);
5793 
5794 	/* lcounters */
5795 	error = pf_add_status_counters(nvl, "lcounters", V_pf_status.lcounters,
5796 	    KLCNT_MAX, pf_lcounter);
5797 	if (error != 0)
5798 		ERROUT(error);
5799 
5800 	/* fcounters */
5801 	nvc = nvlist_create(0);
5802 	if (nvc == NULL)
5803 		ERROUT(ENOMEM);
5804 
5805 	for (int i = 0; i < FCNT_MAX; i++) {
5806 		nvlist_append_number_array(nvc, "counters",
5807 		    pf_counter_u64_fetch(&V_pf_status.fcounters[i]));
5808 		nvlist_append_string_array(nvc, "names",
5809 		    pf_fcounter[i]);
5810 		nvlist_append_number_array(nvc, "ids",
5811 		    i);
5812 	}
5813 	nvlist_add_nvlist(nvl, "fcounters", nvc);
5814 	nvlist_destroy(nvc);
5815 	nvc = NULL;
5816 
5817 	/* scounters */
5818 	error = pf_add_status_counters(nvl, "scounters", V_pf_status.scounters,
5819 	    SCNT_MAX, pf_fcounter);
5820 	if (error != 0)
5821 		ERROUT(error);
5822 
5823 	nvlist_add_string(nvl, "ifname", V_pf_status.ifname);
5824 	nvlist_add_binary(nvl, "chksum", V_pf_status.pf_chksum,
5825 	    PF_MD5_DIGEST_LENGTH);
5826 
5827 	pfi_update_status(V_pf_status.ifname, &s);
5828 
5829 	/* pcounters / bcounters */
5830 	for (int i = 0; i < 2; i++) {
5831 		for (int j = 0; j < 2; j++) {
5832 			for (int k = 0; k < 2; k++) {
5833 				nvlist_append_number_array(nvl, "pcounters",
5834 				    s.pcounters[i][j][k]);
5835 			}
5836 			nvlist_append_number_array(nvl, "bcounters",
5837 			    s.bcounters[i][j]);
5838 		}
5839 	}
5840 
5841 	nvlpacked = nvlist_pack(nvl, &nv->len);
5842 	if (nvlpacked == NULL)
5843 		ERROUT(ENOMEM);
5844 
5845 	if (nv->size == 0)
5846 		ERROUT(0);
5847 	else if (nv->size < nv->len)
5848 		ERROUT(ENOSPC);
5849 
5850 	PF_RULES_RUNLOCK();
5851 	error = copyout(nvlpacked, nv->data, nv->len);
5852 	goto done;
5853 
5854 #undef ERROUT
5855 errout:
5856 	PF_RULES_RUNLOCK();
5857 done:
5858 	free(nvlpacked, M_NVLIST);
5859 	nvlist_destroy(nvc);
5860 	nvlist_destroy(nvl);
5861 
5862 	return (error);
5863 }
5864 
5865 /*
5866  * XXX - Check for version mismatch!!!
5867  */
5868 static void
pf_clear_all_states(void)5869 pf_clear_all_states(void)
5870 {
5871 	struct epoch_tracker	 et;
5872 	struct pf_kstate	*s;
5873 	u_int i;
5874 
5875 	NET_EPOCH_ENTER(et);
5876 	for (i = 0; i <= V_pf_hashmask; i++) {
5877 		struct pf_idhash *ih = &V_pf_idhash[i];
5878 relock:
5879 		PF_HASHROW_LOCK(ih);
5880 		LIST_FOREACH(s, &ih->states, entry) {
5881 			s->timeout = PFTM_PURGE;
5882 			/* Don't send out individual delete messages. */
5883 			s->state_flags |= PFSTATE_NOSYNC;
5884 			pf_unlink_state(s);
5885 			goto relock;
5886 		}
5887 		PF_HASHROW_UNLOCK(ih);
5888 	}
5889 	NET_EPOCH_EXIT(et);
5890 }
5891 
5892 static int
pf_clear_tables(void)5893 pf_clear_tables(void)
5894 {
5895 	struct pfioc_table io;
5896 	int error;
5897 
5898 	bzero(&io, sizeof(io));
5899 	io.pfrio_flags |= PFR_FLAG_ALLRSETS;
5900 
5901 	error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
5902 	    io.pfrio_flags);
5903 
5904 	return (error);
5905 }
5906 
5907 static void
pf_clear_srcnodes(struct pf_ksrc_node * n)5908 pf_clear_srcnodes(struct pf_ksrc_node *n)
5909 {
5910 	struct pf_kstate *s;
5911 	int i;
5912 
5913 	for (i = 0; i <= V_pf_hashmask; i++) {
5914 		struct pf_idhash *ih = &V_pf_idhash[i];
5915 
5916 		PF_HASHROW_LOCK(ih);
5917 		LIST_FOREACH(s, &ih->states, entry) {
5918 			if (n == NULL || n == s->src_node)
5919 				s->src_node = NULL;
5920 			if (n == NULL || n == s->nat_src_node)
5921 				s->nat_src_node = NULL;
5922 		}
5923 		PF_HASHROW_UNLOCK(ih);
5924 	}
5925 
5926 	if (n == NULL) {
5927 		struct pf_srchash *sh;
5928 
5929 		for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask;
5930 		    i++, sh++) {
5931 			PF_HASHROW_LOCK(sh);
5932 			LIST_FOREACH(n, &sh->nodes, entry) {
5933 				n->expire = 1;
5934 				n->states = 0;
5935 			}
5936 			PF_HASHROW_UNLOCK(sh);
5937 		}
5938 	} else {
5939 		/* XXX: hash slot should already be locked here. */
5940 		n->expire = 1;
5941 		n->states = 0;
5942 	}
5943 }
5944 
5945 static void
pf_kill_srcnodes(struct pfioc_src_node_kill * psnk)5946 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk)
5947 {
5948 	struct pf_ksrc_node_list	 kill;
5949 
5950 	LIST_INIT(&kill);
5951 	for (int i = 0; i <= V_pf_srchashmask; i++) {
5952 		struct pf_srchash *sh = &V_pf_srchash[i];
5953 		struct pf_ksrc_node *sn, *tmp;
5954 
5955 		PF_HASHROW_LOCK(sh);
5956 		LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp)
5957 			if (PF_MATCHA(psnk->psnk_src.neg,
5958 			      &psnk->psnk_src.addr.v.a.addr,
5959 			      &psnk->psnk_src.addr.v.a.mask,
5960 			      &sn->addr, sn->af) &&
5961 			    PF_MATCHA(psnk->psnk_dst.neg,
5962 			      &psnk->psnk_dst.addr.v.a.addr,
5963 			      &psnk->psnk_dst.addr.v.a.mask,
5964 			      &sn->raddr, sn->af)) {
5965 				pf_unlink_src_node(sn);
5966 				LIST_INSERT_HEAD(&kill, sn, entry);
5967 				sn->expire = 1;
5968 			}
5969 		PF_HASHROW_UNLOCK(sh);
5970 	}
5971 
5972 	for (int i = 0; i <= V_pf_hashmask; i++) {
5973 		struct pf_idhash *ih = &V_pf_idhash[i];
5974 		struct pf_kstate *s;
5975 
5976 		PF_HASHROW_LOCK(ih);
5977 		LIST_FOREACH(s, &ih->states, entry) {
5978 			if (s->src_node && s->src_node->expire == 1)
5979 				s->src_node = NULL;
5980 			if (s->nat_src_node && s->nat_src_node->expire == 1)
5981 				s->nat_src_node = NULL;
5982 		}
5983 		PF_HASHROW_UNLOCK(ih);
5984 	}
5985 
5986 	psnk->psnk_killed = pf_free_src_nodes(&kill);
5987 }
5988 
5989 static int
pf_keepcounters(struct pfioc_nv * nv)5990 pf_keepcounters(struct pfioc_nv *nv)
5991 {
5992 	nvlist_t	*nvl = NULL;
5993 	void		*nvlpacked = NULL;
5994 	int		 error = 0;
5995 
5996 #define	ERROUT(x)	ERROUT_FUNCTION(on_error, x)
5997 
5998 	if (nv->len > pf_ioctl_maxcount)
5999 		ERROUT(ENOMEM);
6000 
6001 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6002 	if (nvlpacked == NULL)
6003 		ERROUT(ENOMEM);
6004 
6005 	error = copyin(nv->data, nvlpacked, nv->len);
6006 	if (error)
6007 		ERROUT(error);
6008 
6009 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6010 	if (nvl == NULL)
6011 		ERROUT(EBADMSG);
6012 
6013 	if (! nvlist_exists_bool(nvl, "keep_counters"))
6014 		ERROUT(EBADMSG);
6015 
6016 	V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters");
6017 
6018 on_error:
6019 	nvlist_destroy(nvl);
6020 	free(nvlpacked, M_NVLIST);
6021 	return (error);
6022 }
6023 
6024 unsigned int
pf_clear_states(const struct pf_kstate_kill * kill)6025 pf_clear_states(const struct pf_kstate_kill *kill)
6026 {
6027 	struct pf_state_key_cmp	 match_key;
6028 	struct pf_kstate	*s;
6029 	struct pfi_kkif	*kif;
6030 	int		 idx;
6031 	unsigned int	 killed = 0, dir;
6032 
6033 	NET_EPOCH_ASSERT();
6034 
6035 	for (unsigned int i = 0; i <= V_pf_hashmask; i++) {
6036 		struct pf_idhash *ih = &V_pf_idhash[i];
6037 
6038 relock_DIOCCLRSTATES:
6039 		PF_HASHROW_LOCK(ih);
6040 		LIST_FOREACH(s, &ih->states, entry) {
6041 			/* For floating states look at the original kif. */
6042 			kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
6043 
6044 			if (kill->psk_ifname[0] &&
6045 			    strcmp(kill->psk_ifname,
6046 			    kif->pfik_name))
6047 				continue;
6048 
6049 			if (kill->psk_kill_match) {
6050 				bzero(&match_key, sizeof(match_key));
6051 
6052 				if (s->direction == PF_OUT) {
6053 					dir = PF_IN;
6054 					idx = PF_SK_STACK;
6055 				} else {
6056 					dir = PF_OUT;
6057 					idx = PF_SK_WIRE;
6058 				}
6059 
6060 				match_key.af = s->key[idx]->af;
6061 				match_key.proto = s->key[idx]->proto;
6062 				PF_ACPY(&match_key.addr[0],
6063 				    &s->key[idx]->addr[1], match_key.af);
6064 				match_key.port[0] = s->key[idx]->port[1];
6065 				PF_ACPY(&match_key.addr[1],
6066 				    &s->key[idx]->addr[0], match_key.af);
6067 				match_key.port[1] = s->key[idx]->port[0];
6068 			}
6069 
6070 			/*
6071 			 * Don't send out individual
6072 			 * delete messages.
6073 			 */
6074 			s->state_flags |= PFSTATE_NOSYNC;
6075 			pf_unlink_state(s);
6076 			killed++;
6077 
6078 			if (kill->psk_kill_match)
6079 				killed += pf_kill_matching_state(&match_key,
6080 				    dir);
6081 
6082 			goto relock_DIOCCLRSTATES;
6083 		}
6084 		PF_HASHROW_UNLOCK(ih);
6085 	}
6086 
6087 	if (V_pfsync_clear_states_ptr != NULL)
6088 		V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname);
6089 
6090 	return (killed);
6091 }
6092 
6093 void
pf_killstates(struct pf_kstate_kill * kill,unsigned int * killed)6094 pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed)
6095 {
6096 	struct pf_kstate	*s;
6097 
6098 	NET_EPOCH_ASSERT();
6099 	if (kill->psk_pfcmp.id) {
6100 		if (kill->psk_pfcmp.creatorid == 0)
6101 			kill->psk_pfcmp.creatorid = V_pf_status.hostid;
6102 		if ((s = pf_find_state_byid(kill->psk_pfcmp.id,
6103 		    kill->psk_pfcmp.creatorid))) {
6104 			pf_unlink_state(s);
6105 			*killed = 1;
6106 		}
6107 		return;
6108 	}
6109 
6110 	for (unsigned int i = 0; i <= V_pf_hashmask; i++)
6111 		*killed += pf_killstates_row(kill, &V_pf_idhash[i]);
6112 }
6113 
6114 static int
pf_killstates_nv(struct pfioc_nv * nv)6115 pf_killstates_nv(struct pfioc_nv *nv)
6116 {
6117 	struct pf_kstate_kill	 kill;
6118 	struct epoch_tracker	 et;
6119 	nvlist_t		*nvl = NULL;
6120 	void			*nvlpacked = NULL;
6121 	int			 error = 0;
6122 	unsigned int		 killed = 0;
6123 
6124 #define ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6125 
6126 	if (nv->len > pf_ioctl_maxcount)
6127 		ERROUT(ENOMEM);
6128 
6129 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6130 	if (nvlpacked == NULL)
6131 		ERROUT(ENOMEM);
6132 
6133 	error = copyin(nv->data, nvlpacked, nv->len);
6134 	if (error)
6135 		ERROUT(error);
6136 
6137 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6138 	if (nvl == NULL)
6139 		ERROUT(EBADMSG);
6140 
6141 	error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
6142 	if (error)
6143 		ERROUT(error);
6144 
6145 	NET_EPOCH_ENTER(et);
6146 	pf_killstates(&kill, &killed);
6147 	NET_EPOCH_EXIT(et);
6148 
6149 	free(nvlpacked, M_NVLIST);
6150 	nvlpacked = NULL;
6151 	nvlist_destroy(nvl);
6152 	nvl = nvlist_create(0);
6153 	if (nvl == NULL)
6154 		ERROUT(ENOMEM);
6155 
6156 	nvlist_add_number(nvl, "killed", killed);
6157 
6158 	nvlpacked = nvlist_pack(nvl, &nv->len);
6159 	if (nvlpacked == NULL)
6160 		ERROUT(ENOMEM);
6161 
6162 	if (nv->size == 0)
6163 		ERROUT(0);
6164 	else if (nv->size < nv->len)
6165 		ERROUT(ENOSPC);
6166 
6167 	error = copyout(nvlpacked, nv->data, nv->len);
6168 
6169 on_error:
6170 	nvlist_destroy(nvl);
6171 	free(nvlpacked, M_NVLIST);
6172 	return (error);
6173 }
6174 
6175 static int
pf_clearstates_nv(struct pfioc_nv * nv)6176 pf_clearstates_nv(struct pfioc_nv *nv)
6177 {
6178 	struct pf_kstate_kill	 kill;
6179 	struct epoch_tracker	 et;
6180 	nvlist_t		*nvl = NULL;
6181 	void			*nvlpacked = NULL;
6182 	int			 error = 0;
6183 	unsigned int		 killed;
6184 
6185 #define ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6186 
6187 	if (nv->len > pf_ioctl_maxcount)
6188 		ERROUT(ENOMEM);
6189 
6190 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6191 	if (nvlpacked == NULL)
6192 		ERROUT(ENOMEM);
6193 
6194 	error = copyin(nv->data, nvlpacked, nv->len);
6195 	if (error)
6196 		ERROUT(error);
6197 
6198 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6199 	if (nvl == NULL)
6200 		ERROUT(EBADMSG);
6201 
6202 	error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
6203 	if (error)
6204 		ERROUT(error);
6205 
6206 	NET_EPOCH_ENTER(et);
6207 	killed = pf_clear_states(&kill);
6208 	NET_EPOCH_EXIT(et);
6209 
6210 	free(nvlpacked, M_NVLIST);
6211 	nvlpacked = NULL;
6212 	nvlist_destroy(nvl);
6213 	nvl = nvlist_create(0);
6214 	if (nvl == NULL)
6215 		ERROUT(ENOMEM);
6216 
6217 	nvlist_add_number(nvl, "killed", killed);
6218 
6219 	nvlpacked = nvlist_pack(nvl, &nv->len);
6220 	if (nvlpacked == NULL)
6221 		ERROUT(ENOMEM);
6222 
6223 	if (nv->size == 0)
6224 		ERROUT(0);
6225 	else if (nv->size < nv->len)
6226 		ERROUT(ENOSPC);
6227 
6228 	error = copyout(nvlpacked, nv->data, nv->len);
6229 
6230 #undef ERROUT
6231 on_error:
6232 	nvlist_destroy(nvl);
6233 	free(nvlpacked, M_NVLIST);
6234 	return (error);
6235 }
6236 
6237 static int
pf_getstate(struct pfioc_nv * nv)6238 pf_getstate(struct pfioc_nv *nv)
6239 {
6240 	nvlist_t		*nvl = NULL, *nvls;
6241 	void			*nvlpacked = NULL;
6242 	struct pf_kstate	*s = NULL;
6243 	int			 error = 0;
6244 	uint64_t		 id, creatorid;
6245 
6246 #define ERROUT(x)	ERROUT_FUNCTION(errout, x)
6247 
6248 	if (nv->len > pf_ioctl_maxcount)
6249 		ERROUT(ENOMEM);
6250 
6251 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6252 	if (nvlpacked == NULL)
6253 		ERROUT(ENOMEM);
6254 
6255 	error = copyin(nv->data, nvlpacked, nv->len);
6256 	if (error)
6257 		ERROUT(error);
6258 
6259 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6260 	if (nvl == NULL)
6261 		ERROUT(EBADMSG);
6262 
6263 	PFNV_CHK(pf_nvuint64(nvl, "id", &id));
6264 	PFNV_CHK(pf_nvuint64(nvl, "creatorid", &creatorid));
6265 
6266 	s = pf_find_state_byid(id, creatorid);
6267 	if (s == NULL)
6268 		ERROUT(ENOENT);
6269 
6270 	free(nvlpacked, M_NVLIST);
6271 	nvlpacked = NULL;
6272 	nvlist_destroy(nvl);
6273 	nvl = nvlist_create(0);
6274 	if (nvl == NULL)
6275 		ERROUT(ENOMEM);
6276 
6277 	nvls = pf_state_to_nvstate(s);
6278 	if (nvls == NULL)
6279 		ERROUT(ENOMEM);
6280 
6281 	nvlist_add_nvlist(nvl, "state", nvls);
6282 	nvlist_destroy(nvls);
6283 
6284 	nvlpacked = nvlist_pack(nvl, &nv->len);
6285 	if (nvlpacked == NULL)
6286 		ERROUT(ENOMEM);
6287 
6288 	if (nv->size == 0)
6289 		ERROUT(0);
6290 	else if (nv->size < nv->len)
6291 		ERROUT(ENOSPC);
6292 
6293 	error = copyout(nvlpacked, nv->data, nv->len);
6294 
6295 #undef ERROUT
6296 errout:
6297 	if (s != NULL)
6298 		PF_STATE_UNLOCK(s);
6299 	free(nvlpacked, M_NVLIST);
6300 	nvlist_destroy(nvl);
6301 	return (error);
6302 }
6303 
6304 /*
6305  * XXX - Check for version mismatch!!!
6306  */
6307 
6308 /*
6309  * Duplicate pfctl -Fa operation to get rid of as much as we can.
6310  */
6311 static int
shutdown_pf(void)6312 shutdown_pf(void)
6313 {
6314 	int error = 0;
6315 	u_int32_t t[5];
6316 	char nn = '\0';
6317 	struct pf_kanchor *anchor;
6318 	struct pf_keth_anchor *eth_anchor;
6319 	int rs_num;
6320 
6321 	do {
6322 		/* Unlink rules of all user defined anchors */
6323 		RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) {
6324 			/* Wildcard based anchors may not have a respective
6325 			 * explicit anchor rule or they may be left empty
6326 			 * without rules. It leads to anchor.refcnt=0, and the
6327 			 * rest of the logic does not expect it. */
6328 			if (anchor->refcnt == 0)
6329 				anchor->refcnt = 1;
6330 			for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) {
6331 				if ((error = pf_begin_rules(&t[rs_num], rs_num,
6332 				    anchor->path)) != 0) {
6333 					DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: "
6334 					    "anchor.path=%s rs_num=%d\n",
6335 					    anchor->path, rs_num));
6336 					goto error;	/* XXX: rollback? */
6337 				}
6338 			}
6339 			for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) {
6340 				error = pf_commit_rules(t[rs_num], rs_num,
6341 				    anchor->path);
6342 				MPASS(error == 0);
6343 			}
6344 		}
6345 
6346 		/* Unlink rules of all user defined ether anchors */
6347 		RB_FOREACH(eth_anchor, pf_keth_anchor_global,
6348 		    &V_pf_keth_anchors) {
6349 			/* Wildcard based anchors may not have a respective
6350 			 * explicit anchor rule or they may be left empty
6351 			 * without rules. It leads to anchor.refcnt=0, and the
6352 			 * rest of the logic does not expect it. */
6353 			if (eth_anchor->refcnt == 0)
6354 				eth_anchor->refcnt = 1;
6355 			if ((error = pf_begin_eth(&t[0], eth_anchor->path))
6356 			    != 0) {
6357 				DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: eth "
6358 				    "anchor.path=%s\n", eth_anchor->path));
6359 				goto error;
6360 			}
6361 			error = pf_commit_eth(t[0], eth_anchor->path);
6362 			MPASS(error == 0);
6363 		}
6364 
6365 		if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
6366 		    != 0) {
6367 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n"));
6368 			break;
6369 		}
6370 		if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
6371 		    != 0) {
6372 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n"));
6373 			break;		/* XXX: rollback? */
6374 		}
6375 		if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
6376 		    != 0) {
6377 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n"));
6378 			break;		/* XXX: rollback? */
6379 		}
6380 		if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
6381 		    != 0) {
6382 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n"));
6383 			break;		/* XXX: rollback? */
6384 		}
6385 		if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
6386 		    != 0) {
6387 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n"));
6388 			break;		/* XXX: rollback? */
6389 		}
6390 
6391 		error = pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
6392 		MPASS(error == 0);
6393 		error = pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
6394 		MPASS(error == 0);
6395 		error = pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
6396 		MPASS(error == 0);
6397 		error = pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
6398 		MPASS(error == 0);
6399 		error = pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
6400 		MPASS(error == 0);
6401 
6402 		if ((error = pf_clear_tables()) != 0)
6403 			break;
6404 
6405 		if ((error = pf_begin_eth(&t[0], &nn)) != 0) {
6406 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: eth\n"));
6407 			break;
6408 		}
6409 		error = pf_commit_eth(t[0], &nn);
6410 		MPASS(error == 0);
6411 
6412 #ifdef ALTQ
6413 		if ((error = pf_begin_altq(&t[0])) != 0) {
6414 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n"));
6415 			break;
6416 		}
6417 		pf_commit_altq(t[0]);
6418 #endif
6419 
6420 		pf_clear_all_states();
6421 
6422 		pf_clear_srcnodes(NULL);
6423 
6424 		/* status does not use malloced mem so no need to cleanup */
6425 		/* fingerprints and interfaces have their own cleanup code */
6426 	} while(0);
6427 
6428 error:
6429 	return (error);
6430 }
6431 
6432 static pfil_return_t
pf_check_return(int chk,struct mbuf ** m)6433 pf_check_return(int chk, struct mbuf **m)
6434 {
6435 
6436 	switch (chk) {
6437 	case PF_PASS:
6438 		if (*m == NULL)
6439 			return (PFIL_CONSUMED);
6440 		else
6441 			return (PFIL_PASS);
6442 		break;
6443 	default:
6444 		if (*m != NULL) {
6445 			m_freem(*m);
6446 			*m = NULL;
6447 		}
6448 		return (PFIL_DROPPED);
6449 	}
6450 }
6451 
6452 static pfil_return_t
pf_eth_check_in(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6453 pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
6454     void *ruleset __unused, struct inpcb *inp)
6455 {
6456 	int chk;
6457 
6458 	chk = pf_test_eth(PF_IN, flags, ifp, m, inp);
6459 
6460 	return (pf_check_return(chk, m));
6461 }
6462 
6463 static pfil_return_t
pf_eth_check_out(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6464 pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
6465     void *ruleset __unused, struct inpcb *inp)
6466 {
6467 	int chk;
6468 
6469 	chk = pf_test_eth(PF_OUT, flags, ifp, m, inp);
6470 
6471 	return (pf_check_return(chk, m));
6472 }
6473 
6474 #ifdef INET
6475 static pfil_return_t
pf_check_in(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6476 pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
6477     void *ruleset __unused, struct inpcb *inp)
6478 {
6479 	int chk;
6480 
6481 	chk = pf_test(PF_IN, flags, ifp, m, inp, NULL);
6482 
6483 	return (pf_check_return(chk, m));
6484 }
6485 
6486 static pfil_return_t
pf_check_out(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6487 pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
6488     void *ruleset __unused,  struct inpcb *inp)
6489 {
6490 	int chk;
6491 
6492 	chk = pf_test(PF_OUT, flags, ifp, m, inp, NULL);
6493 
6494 	return (pf_check_return(chk, m));
6495 }
6496 #endif
6497 
6498 #ifdef INET6
6499 static pfil_return_t
pf_check6_in(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6500 pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags,
6501     void *ruleset __unused,  struct inpcb *inp)
6502 {
6503 	int chk;
6504 
6505 	/*
6506 	 * In case of loopback traffic IPv6 uses the real interface in
6507 	 * order to support scoped addresses. In order to support stateful
6508 	 * filtering we have change this to lo0 as it is the case in IPv4.
6509 	 */
6510 	CURVNET_SET(ifp->if_vnet);
6511 	chk = pf_test6(PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp,
6512 	    m, inp, NULL);
6513 	CURVNET_RESTORE();
6514 
6515 	return (pf_check_return(chk, m));
6516 }
6517 
6518 static pfil_return_t
pf_check6_out(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6519 pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags,
6520     void *ruleset __unused,  struct inpcb *inp)
6521 {
6522 	int chk;
6523 
6524 	CURVNET_SET(ifp->if_vnet);
6525 	chk = pf_test6(PF_OUT, flags, ifp, m, inp, NULL);
6526 	CURVNET_RESTORE();
6527 
6528 	return (pf_check_return(chk, m));
6529 }
6530 #endif /* INET6 */
6531 
6532 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_in_hook);
6533 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_out_hook);
6534 #define	V_pf_eth_in_hook	VNET(pf_eth_in_hook)
6535 #define	V_pf_eth_out_hook	VNET(pf_eth_out_hook)
6536 
6537 #ifdef INET
6538 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook);
6539 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook);
6540 #define	V_pf_ip4_in_hook	VNET(pf_ip4_in_hook)
6541 #define	V_pf_ip4_out_hook	VNET(pf_ip4_out_hook)
6542 #endif
6543 #ifdef INET6
6544 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook);
6545 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook);
6546 #define	V_pf_ip6_in_hook	VNET(pf_ip6_in_hook)
6547 #define	V_pf_ip6_out_hook	VNET(pf_ip6_out_hook)
6548 #endif
6549 
6550 static void
hook_pf_eth(void)6551 hook_pf_eth(void)
6552 {
6553 	struct pfil_hook_args pha = {
6554 		.pa_version = PFIL_VERSION,
6555 		.pa_modname = "pf",
6556 		.pa_type = PFIL_TYPE_ETHERNET,
6557 	};
6558 	struct pfil_link_args pla = {
6559 		.pa_version = PFIL_VERSION,
6560 	};
6561 	int ret __diagused;
6562 
6563 	if (atomic_load_bool(&V_pf_pfil_eth_hooked))
6564 		return;
6565 
6566 	pha.pa_mbuf_chk = pf_eth_check_in;
6567 	pha.pa_flags = PFIL_IN;
6568 	pha.pa_rulname = "eth-in";
6569 	V_pf_eth_in_hook = pfil_add_hook(&pha);
6570 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6571 	pla.pa_head = V_link_pfil_head;
6572 	pla.pa_hook = V_pf_eth_in_hook;
6573 	ret = pfil_link(&pla);
6574 	MPASS(ret == 0);
6575 	pha.pa_mbuf_chk = pf_eth_check_out;
6576 	pha.pa_flags = PFIL_OUT;
6577 	pha.pa_rulname = "eth-out";
6578 	V_pf_eth_out_hook = pfil_add_hook(&pha);
6579 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6580 	pla.pa_head = V_link_pfil_head;
6581 	pla.pa_hook = V_pf_eth_out_hook;
6582 	ret = pfil_link(&pla);
6583 	MPASS(ret == 0);
6584 
6585 	atomic_store_bool(&V_pf_pfil_eth_hooked, true);
6586 }
6587 
6588 static void
hook_pf(void)6589 hook_pf(void)
6590 {
6591 	struct pfil_hook_args pha = {
6592 		.pa_version = PFIL_VERSION,
6593 		.pa_modname = "pf",
6594 	};
6595 	struct pfil_link_args pla = {
6596 		.pa_version = PFIL_VERSION,
6597 	};
6598 	int ret __diagused;
6599 
6600 	if (atomic_load_bool(&V_pf_pfil_hooked))
6601 		return;
6602 
6603 #ifdef INET
6604 	pha.pa_type = PFIL_TYPE_IP4;
6605 	pha.pa_mbuf_chk = pf_check_in;
6606 	pha.pa_flags = PFIL_IN;
6607 	pha.pa_rulname = "default-in";
6608 	V_pf_ip4_in_hook = pfil_add_hook(&pha);
6609 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6610 	pla.pa_head = V_inet_pfil_head;
6611 	pla.pa_hook = V_pf_ip4_in_hook;
6612 	ret = pfil_link(&pla);
6613 	MPASS(ret == 0);
6614 	pha.pa_mbuf_chk = pf_check_out;
6615 	pha.pa_flags = PFIL_OUT;
6616 	pha.pa_rulname = "default-out";
6617 	V_pf_ip4_out_hook = pfil_add_hook(&pha);
6618 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6619 	pla.pa_head = V_inet_pfil_head;
6620 	pla.pa_hook = V_pf_ip4_out_hook;
6621 	ret = pfil_link(&pla);
6622 	MPASS(ret == 0);
6623 	if (V_pf_filter_local) {
6624 		pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6625 		pla.pa_head = V_inet_local_pfil_head;
6626 		pla.pa_hook = V_pf_ip4_out_hook;
6627 		ret = pfil_link(&pla);
6628 		MPASS(ret == 0);
6629 	}
6630 #endif
6631 #ifdef INET6
6632 	pha.pa_type = PFIL_TYPE_IP6;
6633 	pha.pa_mbuf_chk = pf_check6_in;
6634 	pha.pa_flags = PFIL_IN;
6635 	pha.pa_rulname = "default-in6";
6636 	V_pf_ip6_in_hook = pfil_add_hook(&pha);
6637 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6638 	pla.pa_head = V_inet6_pfil_head;
6639 	pla.pa_hook = V_pf_ip6_in_hook;
6640 	ret = pfil_link(&pla);
6641 	MPASS(ret == 0);
6642 	pha.pa_mbuf_chk = pf_check6_out;
6643 	pha.pa_rulname = "default-out6";
6644 	pha.pa_flags = PFIL_OUT;
6645 	V_pf_ip6_out_hook = pfil_add_hook(&pha);
6646 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6647 	pla.pa_head = V_inet6_pfil_head;
6648 	pla.pa_hook = V_pf_ip6_out_hook;
6649 	ret = pfil_link(&pla);
6650 	MPASS(ret == 0);
6651 	if (V_pf_filter_local) {
6652 		pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6653 		pla.pa_head = V_inet6_local_pfil_head;
6654 		pla.pa_hook = V_pf_ip6_out_hook;
6655 		ret = pfil_link(&pla);
6656 		MPASS(ret == 0);
6657 	}
6658 #endif
6659 
6660 	atomic_store_bool(&V_pf_pfil_hooked, true);
6661 }
6662 
6663 static void
dehook_pf_eth(void)6664 dehook_pf_eth(void)
6665 {
6666 
6667 	if (!atomic_load_bool(&V_pf_pfil_eth_hooked))
6668 		return;
6669 
6670 	pfil_remove_hook(V_pf_eth_in_hook);
6671 	pfil_remove_hook(V_pf_eth_out_hook);
6672 
6673 	atomic_store_bool(&V_pf_pfil_eth_hooked, false);
6674 }
6675 
6676 static void
dehook_pf(void)6677 dehook_pf(void)
6678 {
6679 
6680 	if (!atomic_load_bool(&V_pf_pfil_hooked))
6681 		return;
6682 
6683 #ifdef INET
6684 	pfil_remove_hook(V_pf_ip4_in_hook);
6685 	pfil_remove_hook(V_pf_ip4_out_hook);
6686 #endif
6687 #ifdef INET6
6688 	pfil_remove_hook(V_pf_ip6_in_hook);
6689 	pfil_remove_hook(V_pf_ip6_out_hook);
6690 #endif
6691 
6692 	atomic_store_bool(&V_pf_pfil_hooked, false);
6693 }
6694 
6695 static void
pf_load_vnet(void)6696 pf_load_vnet(void)
6697 {
6698 	V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname),
6699 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
6700 
6701 	rm_init_flags(&V_pf_rules_lock, "pf rulesets", RM_RECURSE);
6702 	sx_init(&V_pf_ioctl_lock, "pf ioctl");
6703 
6704 	pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize,
6705 	    PF_RULE_TAG_HASH_SIZE_DEFAULT);
6706 #ifdef ALTQ
6707 	pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize,
6708 	    PF_QUEUE_TAG_HASH_SIZE_DEFAULT);
6709 #endif
6710 
6711 	V_pf_keth = &V_pf_main_keth_anchor.ruleset;
6712 
6713 	pfattach_vnet();
6714 	V_pf_vnet_active = 1;
6715 }
6716 
6717 static int
pf_load(void)6718 pf_load(void)
6719 {
6720 	int error;
6721 
6722 	sx_init(&pf_end_lock, "pf end thread");
6723 
6724 	pf_mtag_initialize();
6725 
6726 	pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME);
6727 	if (pf_dev == NULL)
6728 		return (ENOMEM);
6729 
6730 	pf_end_threads = 0;
6731 	error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge");
6732 	if (error != 0)
6733 		return (error);
6734 
6735 	pfi_initialize();
6736 
6737 	return (0);
6738 }
6739 
6740 static void
pf_unload_vnet(void)6741 pf_unload_vnet(void)
6742 {
6743 	int ret __diagused;
6744 
6745 	V_pf_vnet_active = 0;
6746 	V_pf_status.running = 0;
6747 	dehook_pf();
6748 	dehook_pf_eth();
6749 
6750 	PF_RULES_WLOCK();
6751 	pf_syncookies_cleanup();
6752 	shutdown_pf();
6753 	PF_RULES_WUNLOCK();
6754 
6755 	/* Make sure we've cleaned up ethernet rules before we continue. */
6756 	NET_EPOCH_DRAIN_CALLBACKS();
6757 
6758 	ret = swi_remove(V_pf_swi_cookie);
6759 	MPASS(ret == 0);
6760 	ret = intr_event_destroy(V_pf_swi_ie);
6761 	MPASS(ret == 0);
6762 
6763 	pf_unload_vnet_purge();
6764 
6765 	pf_normalize_cleanup();
6766 	PF_RULES_WLOCK();
6767 	pfi_cleanup_vnet();
6768 	PF_RULES_WUNLOCK();
6769 	pfr_cleanup();
6770 	pf_osfp_flush();
6771 	pf_cleanup();
6772 	if (IS_DEFAULT_VNET(curvnet))
6773 		pf_mtag_cleanup();
6774 
6775 	pf_cleanup_tagset(&V_pf_tags);
6776 #ifdef ALTQ
6777 	pf_cleanup_tagset(&V_pf_qids);
6778 #endif
6779 	uma_zdestroy(V_pf_tag_z);
6780 
6781 #ifdef PF_WANT_32_TO_64_COUNTER
6782 	PF_RULES_WLOCK();
6783 	LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist);
6784 
6785 	MPASS(LIST_EMPTY(&V_pf_allkiflist));
6786 	MPASS(V_pf_allkifcount == 0);
6787 
6788 	LIST_REMOVE(&V_pf_default_rule, allrulelist);
6789 	V_pf_allrulecount--;
6790 	LIST_REMOVE(V_pf_rulemarker, allrulelist);
6791 
6792 	MPASS(LIST_EMPTY(&V_pf_allrulelist));
6793 	MPASS(V_pf_allrulecount == 0);
6794 
6795 	PF_RULES_WUNLOCK();
6796 
6797 	free(V_pf_kifmarker, PFI_MTYPE);
6798 	free(V_pf_rulemarker, M_PFRULE);
6799 #endif
6800 
6801 	/* Free counters last as we updated them during shutdown. */
6802 	pf_counter_u64_deinit(&V_pf_default_rule.evaluations);
6803 	for (int i = 0; i < 2; i++) {
6804 		pf_counter_u64_deinit(&V_pf_default_rule.packets[i]);
6805 		pf_counter_u64_deinit(&V_pf_default_rule.bytes[i]);
6806 	}
6807 	counter_u64_free(V_pf_default_rule.states_cur);
6808 	counter_u64_free(V_pf_default_rule.states_tot);
6809 	counter_u64_free(V_pf_default_rule.src_nodes);
6810 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, V_pf_default_rule.timestamp);
6811 
6812 	for (int i = 0; i < PFRES_MAX; i++)
6813 		counter_u64_free(V_pf_status.counters[i]);
6814 	for (int i = 0; i < KLCNT_MAX; i++)
6815 		counter_u64_free(V_pf_status.lcounters[i]);
6816 	for (int i = 0; i < FCNT_MAX; i++)
6817 		pf_counter_u64_deinit(&V_pf_status.fcounters[i]);
6818 	for (int i = 0; i < SCNT_MAX; i++)
6819 		counter_u64_free(V_pf_status.scounters[i]);
6820 
6821 	rm_destroy(&V_pf_rules_lock);
6822 	sx_destroy(&V_pf_ioctl_lock);
6823 }
6824 
6825 static void
pf_unload(void)6826 pf_unload(void)
6827 {
6828 
6829 	sx_xlock(&pf_end_lock);
6830 	pf_end_threads = 1;
6831 	while (pf_end_threads < 2) {
6832 		wakeup_one(pf_purge_thread);
6833 		sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0);
6834 	}
6835 	sx_xunlock(&pf_end_lock);
6836 
6837 	pf_nl_unregister();
6838 
6839 	if (pf_dev != NULL)
6840 		destroy_dev(pf_dev);
6841 
6842 	pfi_cleanup();
6843 
6844 	sx_destroy(&pf_end_lock);
6845 }
6846 
6847 static void
vnet_pf_init(void * unused __unused)6848 vnet_pf_init(void *unused __unused)
6849 {
6850 
6851 	pf_load_vnet();
6852 }
6853 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
6854     vnet_pf_init, NULL);
6855 
6856 static void
vnet_pf_uninit(const void * unused __unused)6857 vnet_pf_uninit(const void *unused __unused)
6858 {
6859 
6860 	pf_unload_vnet();
6861 }
6862 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL);
6863 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
6864     vnet_pf_uninit, NULL);
6865 
6866 static int
pf_modevent(module_t mod,int type,void * data)6867 pf_modevent(module_t mod, int type, void *data)
6868 {
6869 	int error = 0;
6870 
6871 	switch(type) {
6872 	case MOD_LOAD:
6873 		error = pf_load();
6874 		pf_nl_register();
6875 		break;
6876 	case MOD_UNLOAD:
6877 		/* Handled in SYSUNINIT(pf_unload) to ensure it's done after
6878 		 * the vnet_pf_uninit()s */
6879 		break;
6880 	default:
6881 		error = EINVAL;
6882 		break;
6883 	}
6884 
6885 	return (error);
6886 }
6887 
6888 static moduledata_t pf_mod = {
6889 	"pf",
6890 	pf_modevent,
6891 	0
6892 };
6893 
6894 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND);
6895 MODULE_DEPEND(pf, netlink, 1, 1, 1);
6896 MODULE_VERSION(pf, PF_MODVER);
6897