xref: /dragonfly/sys/net/ipfw3/ip_fw3.c (revision cae2835b)
1 /*
2  * Copyright (c) 1993 Daniel Boulet
3  * Copyright (c) 1994 Ugen J.S.Antsilevich
4  * Copyright (c) 2002 Luigi Rizzo, Universita` di Pisa
5  * Copyright (c) 2015 - 2016 The DragonFly Project.  All rights reserved.
6  *
7  * This code is derived from software contributed to The DragonFly Project
8  * by Bill Yuan <bycn82@dragonflybsd.org>
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in
18  *    the documentation and/or other materials provided with the
19  *    distribution.
20  * 3. Neither the name of The DragonFly Project nor the names of its
21  *    contributors may be used to endorse or promote products derived
22  *    from this software without specific, prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
28  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  */
38 
39 #include "opt_ipfw.h"
40 #include "opt_inet.h"
41 #ifndef INET
42 #error IPFIREWALL3 requires INET.
43 #endif /* INET */
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #include <sys/kernel.h>
50 #include <sys/proc.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53 #include <sys/sysctl.h>
54 #include <sys/syslog.h>
55 #include <sys/ucred.h>
56 #include <sys/in_cksum.h>
57 #include <sys/lock.h>
58 #include <sys/thread2.h>
59 #include <sys/mplock2.h>
60 
61 #include <netinet/in.h>
62 #include <netinet/in_systm.h>
63 #include <netinet/in_var.h>
64 #include <netinet/in_pcb.h>
65 #include <netinet/ip.h>
66 #include <netinet/ip_var.h>
67 #include <netinet/ip_icmp.h>
68 #include <netinet/tcp.h>
69 #include <netinet/tcp_timer.h>
70 #include <netinet/tcp_var.h>
71 #include <netinet/tcpip.h>
72 #include <netinet/udp.h>
73 #include <netinet/udp_var.h>
74 #include <netinet/ip_divert.h>
75 #include <netinet/if_ether.h>
76 
77 #include <net/if.h>
78 #include <net/radix.h>
79 #include <net/route.h>
80 #include <net/pfil.h>
81 #include <net/netmsg2.h>
82 
83 #include <net/ipfw3/ip_fw.h>
84 #include <net/ipfw3/ip_fw3_log.h>
85 #include <net/ipfw3/ip_fw3_table.h>
86 #include <net/ipfw3/ip_fw3_sync.h>
87 #include <net/ipfw3_basic/ip_fw3_basic.h>
88 #include <net/ipfw3_nat/ip_fw3_nat.h>
89 #include <net/dummynet3/ip_dummynet3.h>
90 
91 MALLOC_DEFINE(M_IPFW3, "IPFW3", "ip_fw3 default module");
92 
93 #ifdef IPFIREWALL_DEBUG
94 #define DPRINTF(fmt, ...)			\
95 do { 						\
96 	if (fw_debug > 0) 			\
97 		kprintf(fmt, __VA_ARGS__); 	\
98 } while (0)
99 #else
100 #define DPRINTF(fmt, ...)	((void)0)
101 #endif
102 
103 #define MAX_MODULE		10
104 #define MAX_OPCODE_PER_MODULE	100
105 
106 #define IPFW_AUTOINC_STEP_MIN	1
107 #define IPFW_AUTOINC_STEP_MAX	1000
108 #define IPFW_AUTOINC_STEP_DEF	100
109 
110 
111 struct netmsg_ipfw {
112 	struct netmsg_base base;
113 	const struct ipfw_ioc_rule *ioc_rule;
114 	struct ip_fw	*rule;
115 	struct ip_fw	*next_rule;
116 	struct ip_fw	*prev_rule;
117 	struct ip_fw	*sibling;	/* sibling in prevous CPU */
118 };
119 
120 struct netmsg_del {
121 	struct netmsg_base base;
122 	struct ip_fw	*rule;
123 	struct ip_fw	*start_rule;
124 	struct ip_fw	*prev_rule;
125 	struct ipfw_ioc_state *ioc_state;
126 	uint16_t	rulenum;
127 	uint8_t		from_set;
128 	uint8_t		to_set;
129 };
130 
131 struct netmsg_zent {
132 	struct netmsg_base base;
133 	struct ip_fw	*start_rule;
134 	uint16_t	rulenum;
135 	uint16_t	log_only;
136 };
137 
138 ip_fw_ctl_t *ipfw_ctl_nat_ptr = NULL;
139 
140 /* handlers which implemented in ipfw_basic module */
141 ipfw_basic_delete_state_t *ipfw_basic_flush_state_prt = NULL;
142 ipfw_basic_append_state_t *ipfw_basic_append_state_prt = NULL;
143 
144 extern int ip_fw_loaded;
145 static uint32_t static_count;	/* # of static rules */
146 static uint32_t static_ioc_len;	/* bytes of static rules */
147 static int ipfw_flushing;
148 int fw_verbose = 0;
149 static int fw_debug;
150 static int autoinc_step = IPFW_AUTOINC_STEP_DEF;
151 
152 static int	ipfw_sysctl_enable(SYSCTL_HANDLER_ARGS);
153 static int	ipfw_sysctl_autoinc_step(SYSCTL_HANDLER_ARGS);
154 
155 SYSCTL_NODE(_net_inet_ip, OID_AUTO, fw3, CTLFLAG_RW, 0, "Firewall");
156 SYSCTL_PROC(_net_inet_ip_fw3, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW,
157 	&fw3_enable, 0, ipfw_sysctl_enable, "I", "Enable ipfw");
158 SYSCTL_PROC(_net_inet_ip_fw3, OID_AUTO, autoinc_step, CTLTYPE_INT | CTLFLAG_RW,
159 	&autoinc_step, 0, ipfw_sysctl_autoinc_step, "I",
160 	"Rule number autincrement step");
161 SYSCTL_INT(_net_inet_ip_fw3, OID_AUTO,one_pass,CTLFLAG_RW,
162 	&fw3_one_pass, 0,
163 	"Only do a single pass through ipfw when using dummynet(4)");
164 SYSCTL_INT(_net_inet_ip_fw3, OID_AUTO, debug, CTLFLAG_RW,
165 	&fw_debug, 0, "Enable printing of debug ip_fw statements");
166 SYSCTL_INT(_net_inet_ip_fw3, OID_AUTO, verbose, CTLFLAG_RW,
167 	&fw_verbose, 0, "Log matches to ipfw rules");
168 SYSCTL_INT(_net_inet_ip_fw3, OID_AUTO, static_count, CTLFLAG_RD,
169 	&static_count, 0, "Number of static rules");
170 
171 filter_func filter_funcs[MAX_MODULE][MAX_OPCODE_PER_MODULE];
172 struct ipfw_module ipfw_modules[MAX_MODULE];
173 struct ipfw_context *ipfw_ctx[MAXCPU];
174 struct ipfw_sync_context sync_ctx;
175 static int ipfw_ctl(struct sockopt *sopt);
176 
177 
178 void
179 check_accept(int *cmd_ctl, int *cmd_val, struct ip_fw_args **args,
180 		struct ip_fw **f, ipfw_insn *cmd, uint16_t ip_len);
181 void
182 check_deny(int *cmd_ctl, int *cmd_val, struct ip_fw_args **args,
183 		struct ip_fw **f, ipfw_insn *cmd, uint16_t ip_len);
184 void init_module(void);
185 
186 
187 void
188 register_ipfw_module(int module_id,char *module_name)
189 {
190 	struct ipfw_module *tmp;
191 	int i;
192 
193 	tmp = ipfw_modules;
194 	for (i=0; i < MAX_MODULE; i++) {
195 		if (tmp->type == 0) {
196 			tmp->type = 1;
197 			tmp->id = module_id;
198 			strncpy(tmp->name, module_name, strlen(module_name));
199 			break;
200 		}
201 		tmp++;
202 	}
203 	kprintf("ipfw3 module %s loaded\n", module_name);
204 }
205 
206 int
207 unregister_ipfw_module(int module_id)
208 {
209 	struct ipfw_module *tmp;
210 	struct ip_fw *fw;
211 	ipfw_insn *cmd;
212 	int i, len, cmdlen, found;
213 
214 	found = 0;
215 	tmp = ipfw_modules;
216 	struct ipfw_context *ctx = ipfw_ctx[mycpuid];
217 	fw = ctx->ipfw_rule_chain;
218 	for (; fw; fw = fw->next) {
219 		for (len = fw->cmd_len, cmd = fw->cmd; len > 0;
220 			len -= cmdlen,
221 			cmd = (ipfw_insn *)((uint32_t *)cmd + cmdlen)) {
222 			cmdlen = F_LEN(cmd);
223 			if (cmd->module == 0 &&
224 				(cmd->opcode == 0 || cmd->opcode == 1)) {
225 				//action accept or deny
226 			} else if (cmd->module == module_id) {
227 				found = 1;
228 				goto decide;
229 			}
230 		}
231 	}
232 decide:
233 	if (found) {
234 		return 1;
235 	} else {
236 		for (i = 0; i < MAX_MODULE; i++) {
237 			if (tmp->type == 1 && tmp->id == module_id) {
238 				tmp->type = 0;
239 				kprintf("ipfw3 module %s unloaded\n",
240 						tmp->name);
241 				break;
242 			}
243 			tmp++;
244 		}
245 
246 		for (i = 0; i < MAX_OPCODE_PER_MODULE; i++) {
247 			if (module_id == 0) {
248 				if (i ==0 || i == 1) {
249 					continue;
250 				}
251 			}
252 			filter_funcs[module_id][i] = NULL;
253 		}
254 		return 0;
255 	}
256 }
257 
258 void
259 register_ipfw_filter_funcs(int module, int opcode, filter_func func)
260 {
261 	filter_funcs[module][opcode] = func;
262 }
263 
264 void
265 check_accept(int *cmd_ctl, int *cmd_val, struct ip_fw_args **args,
266 		struct ip_fw **f, ipfw_insn *cmd, uint16_t ip_len)
267 {
268 	*cmd_val = IP_FW_PASS;
269 	*cmd_ctl = IP_FW_CTL_DONE;
270 	if (cmd->arg3) {
271 		ipfw_log((*args)->m, (*args)->eh, cmd->arg1);
272 	}
273 }
274 
275 void
276 check_deny(int *cmd_ctl, int *cmd_val, struct ip_fw_args **args,
277 		struct ip_fw **f, ipfw_insn *cmd, uint16_t ip_len)
278 {
279 	*cmd_val = IP_FW_DENY;
280 	*cmd_ctl = IP_FW_CTL_DONE;
281 	if (cmd->arg3) {
282 		ipfw_log((*args)->m, (*args)->eh, cmd->arg1);
283 	}
284 }
285 
286 void
287 init_module(void)
288 {
289 	memset(ipfw_modules, 0, sizeof(struct ipfw_module) * MAX_MODULE);
290 	memset(filter_funcs, 0, sizeof(filter_func) *
291 			MAX_OPCODE_PER_MODULE * MAX_MODULE);
292 	register_ipfw_filter_funcs(0, O_BASIC_ACCEPT,
293 			(filter_func)check_accept);
294 	register_ipfw_filter_funcs(0, O_BASIC_DENY, (filter_func)check_deny);
295 }
296 
297 static __inline int
298 ipfw_free_rule(struct ip_fw *rule)
299 {
300 	kfree(rule, M_IPFW3);
301 	rule = NULL;
302 	return 1;
303 }
304 
305 static struct ip_fw *
306 lookup_next_rule(struct ip_fw *me)
307 {
308 	struct ip_fw *rule = NULL;
309 	ipfw_insn *cmd;
310 
311 	/* look for action, in case it is a skipto */
312 	cmd = ACTION_PTR(me);
313 	if ((int)cmd->module == MODULE_BASIC_ID &&
314 		(int)cmd->opcode == O_BASIC_SKIPTO) {
315 		for (rule = me->next; rule; rule = rule->next) {
316 			if (rule->rulenum >= cmd->arg1)
317 				break;
318 		}
319 	}
320 	if (rule == NULL) {	/* failure or not a skipto */
321 		rule = me->next;
322 	}
323 	me->next_rule = rule;
324 	return rule;
325 }
326 
327 /*
328  * rules are stored in ctx->ipfw_rule_chain.
329  * and each rule is combination of multiple cmds.(ipfw_insn)
330  * in each rule, it begin with filter cmds. and end with action cmds.
331  * 'outer/inner loop' are looping the rules/cmds.
332  * it will invoke the cmds relatived function according to the cmd's
333  * module id and opcode id. and process according to return value.
334  */
335 static int
336 ipfw_chk(struct ip_fw_args *args)
337 {
338 	struct mbuf *m = args->m;
339 	struct ip *ip = mtod(m, struct ip *);
340 	struct ip_fw *f = NULL;		/* matching rule */
341 	int cmd_val = IP_FW_PASS;
342 	struct m_tag *mtag;
343 	struct divert_info *divinfo;
344 
345 	/*
346 	 * hlen	The length of the IPv4 header.
347 	 *	hlen >0 means we have an IPv4 packet.
348 	 */
349 	u_int hlen = 0;		/* hlen >0 means we have an IP pkt */
350 
351 	/*
352 	 * offset	The offset of a fragment. offset != 0 means that
353 	 *	we have a fragment at this offset of an IPv4 packet.
354 	 *	offset == 0 means that (if this is an IPv4 packet)
355 	 *	this is the first or only fragment.
356 	 */
357 	u_short offset = 0;
358 
359 	uint8_t proto;
360 	uint16_t src_port = 0, dst_port = 0;	/* NOTE: host format	*/
361 	struct in_addr src_ip, dst_ip;		/* NOTE: network format	*/
362 	uint16_t ip_len = 0;
363 	uint8_t prev_module = -1, prev_opcode = -1; /* previous module & opcode */
364 	struct ipfw_context *ctx = ipfw_ctx[mycpuid];
365 
366 	if (m->m_pkthdr.fw_flags & IPFW_MBUF_GENERATED)
367 		return IP_FW_PASS;	/* accept */
368 
369 	if (args->eh == NULL ||		/* layer 3 packet */
370 		(m->m_pkthdr.len >= sizeof(struct ip) &&
371 		 ntohs(args->eh->ether_type) == ETHERTYPE_IP))
372 		hlen = ip->ip_hl << 2;
373 
374 	/*
375 	 * Collect parameters into local variables for faster matching.
376 	 */
377 	if (hlen == 0) {	/* do not grab addresses for non-ip pkts */
378 		proto = args->f_id.proto = 0;	/* mark f_id invalid */
379 		goto after_ip_checks;
380 	}
381 
382 	proto = args->f_id.proto = ip->ip_p;
383 	src_ip = ip->ip_src;
384 	dst_ip = ip->ip_dst;
385 	if (args->eh != NULL) { /* layer 2 packets are as on the wire */
386 		offset = ntohs(ip->ip_off) & IP_OFFMASK;
387 		ip_len = ntohs(ip->ip_len);
388 	} else {
389 		offset = ip->ip_off & IP_OFFMASK;
390 		ip_len = ip->ip_len;
391 	}
392 
393 #define PULLUP_TO(len)					\
394 do {							\
395 	if (m->m_len < (len)) {				\
396 		args->m = m = m_pullup(m, (len));	\
397 			if (m == NULL)			\
398 				goto pullup_failed;	\
399 		ip = mtod(m, struct ip *);		\
400 	}						\
401 } while (0)
402 
403 	if (offset == 0) {
404 		switch (proto) {
405 			case IPPROTO_TCP:
406 				{
407 					struct tcphdr *tcp;
408 
409 					PULLUP_TO(hlen + sizeof(struct tcphdr));
410 					tcp = L3HDR(struct tcphdr, ip);
411 					dst_port = tcp->th_dport;
412 					src_port = tcp->th_sport;
413 					args->f_id.flags = tcp->th_flags;
414 				}
415 				break;
416 
417 			case IPPROTO_UDP:
418 				{
419 					struct udphdr *udp;
420 
421 					PULLUP_TO(hlen + sizeof(struct udphdr));
422 					udp = L3HDR(struct udphdr, ip);
423 					dst_port = udp->uh_dport;
424 					src_port = udp->uh_sport;
425 				}
426 				break;
427 
428 			case IPPROTO_ICMP:
429 				PULLUP_TO(hlen + 4);
430 				args->f_id.flags =
431 					L3HDR(struct icmp, ip)->icmp_type;
432 				break;
433 
434 			default:
435 				break;
436 		}
437 	}
438 
439 #undef PULLUP_TO
440 
441 	args->f_id.src_ip = ntohl(src_ip.s_addr);
442 	args->f_id.dst_ip = ntohl(dst_ip.s_addr);
443 	args->f_id.src_port = src_port = ntohs(src_port);
444 	args->f_id.dst_port = dst_port = ntohs(dst_port);
445 
446 after_ip_checks:
447 	if (args->rule) {
448 		/*
449 		 * Packet has already been tagged. Look for the next rule
450 		 * to restart processing.
451 		 *
452 		 * If fw3_one_pass != 0 then just accept it.
453 		 * XXX should not happen here, but optimized out in
454 		 * the caller.
455 		 */
456 		if (fw3_one_pass)
457 			return IP_FW_PASS;
458 
459 		/* This rule is being/has been flushed */
460 		if (ipfw_flushing)
461 			return IP_FW_DENY;
462 
463 		f = args->rule->next_rule;
464 		if (f == NULL)
465 			f = lookup_next_rule(args->rule);
466 	} else {
467 		/*
468 		 * Find the starting rule. It can be either the first
469 		 * one, or the one after divert_rule if asked so.
470 		 */
471 		int skipto;
472 
473 		mtag = m_tag_find(m, PACKET_TAG_IPFW_DIVERT, NULL);
474 		if (mtag != NULL) {
475 			divinfo = m_tag_data(mtag);
476 			skipto = divinfo->skipto;
477 		} else {
478 			skipto = 0;
479 		}
480 
481 		f = ctx->ipfw_rule_chain;
482 		if (args->eh == NULL && skipto != 0) {
483 			/* No skipto during rule flushing */
484 			if (ipfw_flushing) {
485 				return IP_FW_DENY;
486 			}
487 			if (skipto >= IPFW_DEFAULT_RULE) {
488 				return IP_FW_DENY; /* invalid */
489 			}
490 			while (f && f->rulenum <= skipto) {
491 				f = f->next;
492 			}
493 			if (f == NULL) {	/* drop packet */
494 				return IP_FW_DENY;
495 			}
496 		} else if (ipfw_flushing) {
497 			/* Rules are being flushed; skip to default rule */
498 			f = ctx->ipfw_default_rule;
499 		}
500 	}
501 	if ((mtag = m_tag_find(m, PACKET_TAG_IPFW_DIVERT, NULL)) != NULL) {
502 		m_tag_delete(m, mtag);
503 	}
504 
505 	/*
506 	 * Now scan the rules, and parse microinstructions for each rule.
507 	 */
508 	int prev_val;	/*  previous result of 'or' filter */
509 	int l, cmdlen;
510 	ipfw_insn *cmd;
511 	int cmd_ctl;
512 	/* foreach rule in chain */
513 	for (; f; f = f->next) {
514 again:  /* check the rule again*/
515 		if (ctx->ipfw_set_disable & (1 << f->set)) {
516 			continue;
517 		}
518 
519 		prev_val = -1;
520 		 /* foreach cmd in rule */
521 		for (l = f->cmd_len, cmd = f->cmd; l > 0; l -= cmdlen,
522 			cmd = (ipfw_insn *)((uint32_t *)cmd+ cmdlen)) {
523 			cmdlen = F_LEN(cmd);
524 
525 			/* skip 'or' filter when already match */
526 			if (cmd->len & F_OR &&
527 				cmd->module == prev_module &&
528 				cmd->opcode == prev_opcode &&
529 				prev_val == 1) {
530 				goto next_cmd;
531 			}
532 
533 check_body: /* check the body of the rule again.*/
534 			(filter_funcs[cmd->module][cmd->opcode])
535 				(&cmd_ctl, &cmd_val, &args, &f, cmd, ip_len);
536 			switch(cmd_ctl) {
537 				case IP_FW_CTL_DONE:
538 					if (prev_val == 0) /* but 'or' failed */
539 						goto next_rule;
540 					goto done;
541 				case IP_FW_CTL_AGAIN:
542 					goto again;
543 				case IP_FW_CTL_NEXT:
544 					goto next_rule;
545 				case IP_FW_CTL_NAT:
546 					args->rule=f;
547 					goto done;
548 				case IP_FW_CTL_CHK_STATE:
549 					/* update the cmd and l */
550 					cmd = ACTION_PTR(f);
551 					l = f->cmd_len - f->act_ofs;
552 					goto check_body;
553 			}
554 			if (cmd->len & F_NOT)
555 				cmd_val= !cmd_val;
556 
557 			if (cmd->len & F_OR) {	/* has 'or' */
558 				if (!cmd_val) {	/* not matched */
559 					if(prev_val == -1){	/* first 'or' */
560 						prev_val = 0;
561 						prev_module = cmd->module;
562 						prev_opcode = cmd->opcode;
563 					} else if (prev_module == cmd->module &&
564 						prev_opcode == cmd->opcode) {
565 						/* continuous 'or' filter */
566 					} else if (prev_module != cmd->module ||
567 						prev_opcode != cmd->opcode) {
568 						/* 'or' filter changed */
569 						if(prev_val == 0){
570 							goto next_rule;
571 						} else {
572 							prev_val = 0;
573 							prev_module = cmd->module;
574 							prev_opcode = cmd->opcode;
575 						}
576 					}
577 				} else { /* has 'or' and matched */
578 					prev_val = 1;
579 					prev_module = cmd->module;
580 					prev_opcode = cmd->opcode;
581 				}
582 			} else { /* no or */
583 				if (!cmd_val) {	/* not matched */
584 					goto next_rule;
585 				} else {
586 					if (prev_val == 0) {
587 						/* previous 'or' not matched */
588 						goto next_rule;
589 					} else {
590 						prev_val = -1;
591 					}
592 				}
593 			}
594 next_cmd:;
595 		}	/* end of inner for, scan opcodes */
596 next_rule:;		/* try next rule		*/
597 	}		/* end of outer for, scan rules */
598 	kprintf("+++ ipfw: ouch!, skip past end of rules, denying packet\n");
599 	return IP_FW_DENY;
600 
601 done:
602 	/* Update statistics */
603 	f->pcnt++;
604 	f->bcnt += ip_len;
605 	f->timestamp = time_second;
606 	return cmd_val;
607 
608 pullup_failed:
609 	if (fw_verbose)
610 		kprintf("pullup failed\n");
611 	return IP_FW_DENY;
612 }
613 
614 static void
615 ipfw_dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa)
616 {
617 	struct m_tag *mtag;
618 	struct dn_pkt *pkt;
619 	ipfw_insn *cmd;
620 	const struct ipfw_flow_id *id;
621 	struct dn_flow_id *fid;
622 
623 	M_ASSERTPKTHDR(m);
624 
625 	mtag = m_tag_get(PACKET_TAG_DUMMYNET, sizeof(*pkt), M_NOWAIT);
626 	if (mtag == NULL) {
627 		m_freem(m);
628 		return;
629 	}
630 	m_tag_prepend(m, mtag);
631 
632 	pkt = m_tag_data(mtag);
633 	bzero(pkt, sizeof(*pkt));
634 
635 	cmd = (ipfw_insn *)((uint32_t *)fwa->rule->cmd + fwa->rule->act_ofs);
636 	KASSERT(cmd->opcode == O_DUMMYNET_PIPE ||
637 			cmd->opcode == O_DUMMYNET_QUEUE,
638 			("Rule is not PIPE or QUEUE, opcode %d", cmd->opcode));
639 
640 	pkt->dn_m = m;
641 	pkt->dn_flags = (dir & DN_FLAGS_DIR_MASK);
642 	pkt->ifp = fwa->oif;
643 	pkt->pipe_nr = pipe_nr;
644 
645 	pkt->cpuid = mycpuid;
646 	pkt->msgport = netisr_curport();
647 
648 	id = &fwa->f_id;
649 	fid = &pkt->id;
650 	fid->fid_dst_ip = id->dst_ip;
651 	fid->fid_src_ip = id->src_ip;
652 	fid->fid_dst_port = id->dst_port;
653 	fid->fid_src_port = id->src_port;
654 	fid->fid_proto = id->proto;
655 	fid->fid_flags = id->flags;
656 
657 	pkt->dn_priv = fwa->rule;
658 
659 	if ((int)cmd->opcode == O_DUMMYNET_PIPE)
660 		pkt->dn_flags |= DN_FLAGS_IS_PIPE;
661 
662 	m->m_pkthdr.fw_flags |= DUMMYNET_MBUF_TAGGED;
663 }
664 
665 static __inline void
666 ipfw_inc_static_count(struct ip_fw *rule)
667 {
668 	/* Static rule's counts are updated only on CPU0 */
669 	KKASSERT(mycpuid == 0);
670 
671 	static_count++;
672 	static_ioc_len += IOC_RULESIZE(rule);
673 }
674 
675 static __inline void
676 ipfw_dec_static_count(struct ip_fw *rule)
677 {
678 	int l = IOC_RULESIZE(rule);
679 
680 	/* Static rule's counts are updated only on CPU0 */
681 	KKASSERT(mycpuid == 0);
682 
683 	KASSERT(static_count > 0, ("invalid static count %u", static_count));
684 	static_count--;
685 
686 	KASSERT(static_ioc_len >= l,
687 			("invalid static len %u", static_ioc_len));
688 	static_ioc_len -= l;
689 }
690 
691 static void
692 ipfw_add_rule_dispatch(netmsg_t nmsg)
693 {
694 	struct netmsg_ipfw *fwmsg = (struct netmsg_ipfw *)nmsg;
695 	struct ipfw_context *ctx = ipfw_ctx[mycpuid];
696 	struct ip_fw *rule, *prev,*next;
697 	const struct ipfw_ioc_rule *ioc_rule;
698 
699 	ioc_rule = fwmsg->ioc_rule;
700 	 // create rule by ioc_rule
701 	rule = kmalloc(RULESIZE(ioc_rule), M_IPFW3, M_WAITOK | M_ZERO);
702 	rule->act_ofs = ioc_rule->act_ofs;
703 	rule->cmd_len = ioc_rule->cmd_len;
704 	rule->rulenum = ioc_rule->rulenum;
705 	rule->set = ioc_rule->set;
706 	bcopy(ioc_rule->cmd, rule->cmd, rule->cmd_len * 4);
707 
708 	for (prev = NULL, next = ctx->ipfw_rule_chain;
709 		next; prev = next, next = next->next) {
710 		if (next->rulenum > ioc_rule->rulenum) {
711 			break;
712 		}
713 	}
714 	KASSERT(next != NULL, ("no default rule?!"));
715 
716 	/*
717 	 * Insert rule into the pre-determined position
718 	 */
719 	if (prev != NULL) {
720 		rule->next = next;
721 		prev->next = rule;
722 	} else {
723 		rule->next = ctx->ipfw_rule_chain;
724 		ctx->ipfw_rule_chain = rule;
725 	}
726 
727 	/*
728 	 * if sibiling in last CPU is exists,
729 	 * then it's sibling should be current rule
730 	 */
731 	if (fwmsg->sibling != NULL) {
732 		fwmsg->sibling->sibling = rule;
733 	}
734 	/* prepare for next CPU */
735 	fwmsg->sibling = rule;
736 
737 	if (mycpuid == 0) {
738 		/* Statistics only need to be updated once */
739 		ipfw_inc_static_count(rule);
740 	}
741 	ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
742 }
743 
744 /*
745  * confirm the rulenumber
746  * call dispatch function to add rule into the list
747  * Update the statistic
748  */
749 static void
750 ipfw_add_rule(struct ipfw_ioc_rule *ioc_rule)
751 {
752 	struct ipfw_context *ctx = ipfw_ctx[mycpuid];
753 	struct netmsg_ipfw fwmsg;
754 	struct netmsg_base *nmsg;
755 	struct ip_fw *f;
756 
757 	IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
758 
759 	/*
760 	 * If rulenum is 0, find highest numbered rule before the
761 	 * default rule, and add rule number incremental step.
762 	 */
763 	if (ioc_rule->rulenum == 0) {
764 		int step = autoinc_step;
765 
766 		KKASSERT(step >= IPFW_AUTOINC_STEP_MIN &&
767 				step <= IPFW_AUTOINC_STEP_MAX);
768 
769 		/*
770 		 * Locate the highest numbered rule before default
771 		 */
772 		for (f = ctx->ipfw_rule_chain; f; f = f->next) {
773 			if (f->rulenum == IPFW_DEFAULT_RULE)
774 				break;
775 			ioc_rule->rulenum = f->rulenum;
776 		}
777 		if (ioc_rule->rulenum < IPFW_DEFAULT_RULE - step)
778 			ioc_rule->rulenum += step;
779 	}
780 	KASSERT(ioc_rule->rulenum != IPFW_DEFAULT_RULE &&
781 			ioc_rule->rulenum != 0,
782 			("invalid rule num %d", ioc_rule->rulenum));
783 
784 	bzero(&fwmsg, sizeof(fwmsg));
785 	nmsg = &fwmsg.base;
786 	netmsg_init(nmsg, NULL, &curthread->td_msgport,
787 			0, ipfw_add_rule_dispatch);
788 	fwmsg.ioc_rule = ioc_rule;
789 
790 	ifnet_domsg(&nmsg->lmsg, 0);
791 
792 	DPRINTF("++ installed rule %d, static count now %d\n",
793 			ioc_rule->rulenum, static_count);
794 }
795 
796 /**
797  * Free storage associated with a static rule (including derived
798  * dynamic rules).
799  * The caller is in charge of clearing rule pointers to avoid
800  * dangling pointers.
801  * @return a pointer to the next entry.
802  * Arguments are not checked, so they better be correct.
803  * Must be called at splimp().
804  */
805 static struct ip_fw *
806 ipfw_delete_rule(struct ipfw_context *ctx,
807 		 struct ip_fw *prev, struct ip_fw *rule)
808 {
809 	if (prev == NULL)
810 		ctx->ipfw_rule_chain = rule->next;
811 	else
812 		prev->next = rule->next;
813 
814 	if (mycpuid == IPFW_CFGCPUID)
815 		ipfw_dec_static_count(rule);
816 
817 	kfree(rule, M_IPFW3);
818 	rule = NULL;
819 	return NULL;
820 }
821 
822 static void
823 ipfw_flush_rule_dispatch(netmsg_t nmsg)
824 {
825 	struct lwkt_msg *lmsg = &nmsg->lmsg;
826 	struct ipfw_context *ctx = ipfw_ctx[mycpuid];
827 	struct ip_fw *rule, *the_rule;
828 	int kill_default = lmsg->u.ms_result;
829 
830 	rule = ctx->ipfw_rule_chain;
831 	while (rule != NULL) {
832 		if (rule->rulenum == IPFW_DEFAULT_RULE && kill_default == 0) {
833 			ctx->ipfw_rule_chain = rule;
834 			break;
835 		}
836 		the_rule = rule;
837 		rule = rule->next;
838 		if (mycpuid == IPFW_CFGCPUID)
839 			ipfw_dec_static_count(the_rule);
840 
841 		kfree(the_rule, M_IPFW3);
842 	}
843 
844 	ifnet_forwardmsg(lmsg, mycpuid + 1);
845 }
846 
847 static void
848 ipfw_append_state_dispatch(netmsg_t nmsg)
849 {
850 	struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
851 	struct ipfw_ioc_state *ioc_state = dmsg->ioc_state;
852 	(*ipfw_basic_append_state_prt)(ioc_state);
853 	ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
854 }
855 
856 static void
857 ipfw_delete_state_dispatch(netmsg_t nmsg)
858 {
859 	struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
860 	struct ipfw_context *ctx = ipfw_ctx[mycpuid];
861 	struct ip_fw *rule = ctx->ipfw_rule_chain;
862 	while (rule != NULL) {
863 		if (rule->rulenum == dmsg->rulenum) {
864 			break;
865 		}
866 		rule = rule->next;
867 	}
868 
869 	(*ipfw_basic_flush_state_prt)(rule);
870 	ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
871 }
872 
873 /*
874  * Deletes all rules from a chain (including the default rule
875  * if the second argument is set).
876  * Must be called at splimp().
877  */
878 static void
879 ipfw_ctl_flush_rule(int kill_default)
880 {
881 	struct netmsg_del dmsg;
882 	struct netmsg_base nmsg;
883 	struct lwkt_msg *lmsg;
884 
885 	IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
886 
887 	/*
888 	 * If 'kill_default' then caller has done the necessary
889 	 * msgport syncing; unnecessary to do it again.
890 	 */
891 	if (!kill_default) {
892 		/*
893 		 * Let ipfw_chk() know the rules are going to
894 		 * be flushed, so it could jump directly to
895 		 * the default rule.
896 		 */
897 		ipfw_flushing = 1;
898 		netmsg_service_sync();
899 	}
900 
901 	/*
902 	 * if ipfw_basic_flush_state_prt
903 	 * flush all states in all CPU
904 	 */
905 	if (ipfw_basic_flush_state_prt != NULL) {
906 		bzero(&dmsg, sizeof(dmsg));
907 		netmsg_init(&dmsg.base, NULL, &curthread->td_msgport,
908 				0, ipfw_delete_state_dispatch);
909 		ifnet_domsg(&dmsg.base.lmsg, 0);
910 	}
911 	/*
912 	 * Press the 'flush' button
913 	 */
914 	bzero(&nmsg, sizeof(nmsg));
915 	netmsg_init(&nmsg, NULL, &curthread->td_msgport,
916 			0, ipfw_flush_rule_dispatch);
917 	lmsg = &nmsg.lmsg;
918 	lmsg->u.ms_result = kill_default;
919 	ifnet_domsg(lmsg, 0);
920 
921 	if (kill_default) {
922 		KASSERT(static_count == 0,
923 				("%u static rules remain", static_count));
924 		KASSERT(static_ioc_len == 0,
925 				("%u bytes of static rules remain", static_ioc_len));
926 	}
927 
928 	/* Flush is done */
929 	ipfw_flushing = 0;
930 }
931 
932 static void
933 ipfw_delete_rule_dispatch(netmsg_t nmsg)
934 {
935 	struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
936 	struct ipfw_context *ctx = ipfw_ctx[mycpuid];
937 	struct ip_fw *rule, *prev = NULL;
938 
939 	rule = ctx->ipfw_rule_chain;
940 	while (rule!=NULL) {
941 		if (rule->rulenum == dmsg->rulenum) {
942 			ipfw_delete_rule(ctx, prev, rule);
943 			break;
944 		}
945 		prev = rule;
946 		rule = rule->next;
947 	}
948 
949 	ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
950 }
951 
952 static int
953 ipfw_alt_delete_rule(uint16_t rulenum)
954 {
955 	struct netmsg_del dmsg;
956 	struct netmsg_base *nmsg;
957 
958 	/*
959 	 * delete the state which stub is the rule
960 	 * which belongs to the CPU and the rulenum
961 	 */
962 	bzero(&dmsg, sizeof(dmsg));
963 	nmsg = &dmsg.base;
964 	netmsg_init(nmsg, NULL, &curthread->td_msgport,
965 			0, ipfw_delete_state_dispatch);
966 	dmsg.rulenum = rulenum;
967 	ifnet_domsg(&nmsg->lmsg, 0);
968 
969 	/*
970 	 * Get rid of the rule duplications on all CPUs
971 	 */
972 	bzero(&dmsg, sizeof(dmsg));
973 	nmsg = &dmsg.base;
974 	netmsg_init(nmsg, NULL, &curthread->td_msgport,
975 			0, ipfw_delete_rule_dispatch);
976 	dmsg.rulenum = rulenum;
977 	ifnet_domsg(&nmsg->lmsg, 0);
978 	return 0;
979 }
980 
981 static void
982 ipfw_alt_delete_ruleset_dispatch(netmsg_t nmsg)
983 {
984 	struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
985 	struct ipfw_context *ctx = ipfw_ctx[mycpuid];
986 	struct ip_fw *prev, *rule;
987 #ifdef INVARIANTS
988 	int del = 0;
989 #endif
990 
991 	prev = NULL;
992 	rule = ctx->ipfw_rule_chain;
993 	while (rule != NULL) {
994 		if (rule->set == dmsg->from_set) {
995 			rule = ipfw_delete_rule(ctx, prev, rule);
996 #ifdef INVARIANTS
997 			del = 1;
998 #endif
999 		} else {
1000 			prev = rule;
1001 			rule = rule->next;
1002 		}
1003 	}
1004 	KASSERT(del, ("no match set?!"));
1005 
1006 	ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
1007 }
1008 
1009 static void
1010 ipfw_disable_ruleset_state_dispatch(netmsg_t nmsg)
1011 {
1012 	struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
1013 	struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1014 	struct ip_fw *rule;
1015 #ifdef INVARIANTS
1016 	int cleared = 0;
1017 #endif
1018 
1019 	for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
1020 		if (rule->set == dmsg->from_set) {
1021 #ifdef INVARIANTS
1022 			cleared = 1;
1023 #endif
1024 		}
1025 	}
1026 	KASSERT(cleared, ("no match set?!"));
1027 
1028 	ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
1029 }
1030 
1031 static int
1032 ipfw_alt_delete_ruleset(uint8_t set)
1033 {
1034 	struct netmsg_del dmsg;
1035 	struct netmsg_base *nmsg;
1036 	int state, del;
1037 	struct ip_fw *rule;
1038 	struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1039 
1040 	/*
1041 	 * Check whether the 'set' exists.  If it exists,
1042 	 * then check whether any rules within the set will
1043 	 * try to create states.
1044 	 */
1045 	state = 0;
1046 	del = 0;
1047 	for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
1048 		if (rule->set == set) {
1049 			del = 1;
1050 		}
1051 	}
1052 	if (!del)
1053 		return 0; /* XXX EINVAL? */
1054 
1055 	if (state) {
1056 		/*
1057 		 * Clear the STATE flag, so no more states will be
1058 		 * created based the rules in this set.
1059 		 */
1060 		bzero(&dmsg, sizeof(dmsg));
1061 		nmsg = &dmsg.base;
1062 		netmsg_init(nmsg, NULL, &curthread->td_msgport,
1063 				0, ipfw_disable_ruleset_state_dispatch);
1064 		dmsg.from_set = set;
1065 
1066 		ifnet_domsg(&nmsg->lmsg, 0);
1067 	}
1068 
1069 	/*
1070 	 * Delete this set
1071 	 */
1072 	bzero(&dmsg, sizeof(dmsg));
1073 	nmsg = &dmsg.base;
1074 	netmsg_init(nmsg, NULL, &curthread->td_msgport,
1075 			0, ipfw_alt_delete_ruleset_dispatch);
1076 	dmsg.from_set = set;
1077 
1078 	ifnet_domsg(&nmsg->lmsg, 0);
1079 	return 0;
1080 }
1081 
1082 static void
1083 ipfw_alt_move_rule_dispatch(netmsg_t nmsg)
1084 {
1085 	struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
1086 	struct ip_fw *rule;
1087 
1088 	rule = dmsg->start_rule;
1089 
1090 	/*
1091 	 * Move to the position on the next CPU
1092 	 * before the msg is forwarded.
1093 	 */
1094 
1095 	while (rule && rule->rulenum <= dmsg->rulenum) {
1096 		if (rule->rulenum == dmsg->rulenum)
1097 			rule->set = dmsg->to_set;
1098 		rule = rule->next;
1099 	}
1100 	ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
1101 }
1102 
1103 static int
1104 ipfw_alt_move_rule(uint16_t rulenum, uint8_t set)
1105 {
1106 	struct netmsg_del dmsg;
1107 	struct netmsg_base *nmsg;
1108 	struct ip_fw *rule;
1109 	struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1110 
1111 	/*
1112 	 * Locate first rule to move
1113 	 */
1114 	for (rule = ctx->ipfw_rule_chain;
1115 		rule && rule->rulenum <= rulenum; rule = rule->next) {
1116 		if (rule->rulenum == rulenum && rule->set != set)
1117 			break;
1118 	}
1119 	if (rule == NULL || rule->rulenum > rulenum)
1120 		return 0; /* XXX error? */
1121 
1122 	bzero(&dmsg, sizeof(dmsg));
1123 	nmsg = &dmsg.base;
1124 	netmsg_init(nmsg, NULL, &curthread->td_msgport,
1125 			0, ipfw_alt_move_rule_dispatch);
1126 	dmsg.start_rule = rule;
1127 	dmsg.rulenum = rulenum;
1128 	dmsg.to_set = set;
1129 
1130 	ifnet_domsg(&nmsg->lmsg, 0);
1131 	KKASSERT(dmsg.start_rule == NULL);
1132 	return 0;
1133 }
1134 
1135 static void
1136 ipfw_alt_move_ruleset_dispatch(netmsg_t nmsg)
1137 {
1138 	struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
1139 	struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1140 	struct ip_fw *rule;
1141 
1142 	for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
1143 		if (rule->set == dmsg->from_set)
1144 			rule->set = dmsg->to_set;
1145 	}
1146 	ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
1147 }
1148 
1149 static int
1150 ipfw_alt_move_ruleset(uint8_t from_set, uint8_t to_set)
1151 {
1152 	struct netmsg_del dmsg;
1153 	struct netmsg_base *nmsg;
1154 
1155 	bzero(&dmsg, sizeof(dmsg));
1156 	nmsg = &dmsg.base;
1157 	netmsg_init(nmsg, NULL, &curthread->td_msgport,
1158 			0, ipfw_alt_move_ruleset_dispatch);
1159 	dmsg.from_set = from_set;
1160 	dmsg.to_set = to_set;
1161 
1162 	ifnet_domsg(&nmsg->lmsg, 0);
1163 	return 0;
1164 }
1165 
1166 static void
1167 ipfw_alt_swap_ruleset_dispatch(netmsg_t nmsg)
1168 {
1169 	struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
1170 	struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1171 	struct ip_fw *rule;
1172 
1173 	for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
1174 		if (rule->set == dmsg->from_set)
1175 			rule->set = dmsg->to_set;
1176 		else if (rule->set == dmsg->to_set)
1177 			rule->set = dmsg->from_set;
1178 	}
1179 	ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
1180 }
1181 
1182 static int
1183 ipfw_alt_swap_ruleset(uint8_t set1, uint8_t set2)
1184 {
1185 	struct netmsg_del dmsg;
1186 	struct netmsg_base *nmsg;
1187 
1188 	bzero(&dmsg, sizeof(dmsg));
1189 	nmsg = &dmsg.base;
1190 	netmsg_init(nmsg, NULL, &curthread->td_msgport,
1191 			0, ipfw_alt_swap_ruleset_dispatch);
1192 	dmsg.from_set = set1;
1193 	dmsg.to_set = set2;
1194 
1195 	ifnet_domsg(&nmsg->lmsg, 0);
1196 	return 0;
1197 }
1198 
1199 
1200 static int
1201 ipfw_ctl_alter(uint32_t arg)
1202 {
1203 	uint16_t rulenum;
1204 	uint8_t cmd, new_set;
1205 	int error = 0;
1206 
1207 	rulenum = arg & 0xffff;
1208 	cmd = (arg >> 24) & 0xff;
1209 	new_set = (arg >> 16) & 0xff;
1210 
1211 	if (cmd > 4)
1212 		return EINVAL;
1213 	if (new_set >= IPFW_DEFAULT_SET)
1214 		return EINVAL;
1215 	if (cmd == 0 || cmd == 2) {
1216 		if (rulenum == IPFW_DEFAULT_RULE)
1217 			return EINVAL;
1218 	} else {
1219 		if (rulenum >= IPFW_DEFAULT_SET)
1220 			return EINVAL;
1221 	}
1222 
1223 	switch (cmd) {
1224 	case 0:	/* delete rules with given number */
1225 		error = ipfw_alt_delete_rule(rulenum);
1226 		break;
1227 
1228 	case 1:	/* delete all rules with given set number */
1229 		error = ipfw_alt_delete_ruleset(rulenum);
1230 		break;
1231 
1232 	case 2:	/* move rules with given number to new set */
1233 		error = ipfw_alt_move_rule(rulenum, new_set);
1234 		break;
1235 
1236 	case 3: /* move rules with given set number to new set */
1237 		error = ipfw_alt_move_ruleset(rulenum, new_set);
1238 		break;
1239 
1240 	case 4: /* swap two sets */
1241 		error = ipfw_alt_swap_ruleset(rulenum, new_set);
1242 		break;
1243 	}
1244 	return error;
1245 }
1246 
1247 /*
1248  * Clear counters for a specific rule.
1249  */
1250 static void
1251 clear_counters(struct ip_fw *rule)
1252 {
1253 	rule->bcnt = rule->pcnt = 0;
1254 	rule->timestamp = 0;
1255 }
1256 
1257 static void
1258 ipfw_zero_entry_dispatch(netmsg_t nmsg)
1259 {
1260 	struct netmsg_zent *zmsg = (struct netmsg_zent *)nmsg;
1261 	struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1262 	struct ip_fw *rule;
1263 
1264 	if (zmsg->rulenum == 0) {
1265 		for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
1266 			clear_counters(rule);
1267 		}
1268 	} else {
1269 		for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
1270 			if (rule->rulenum == zmsg->rulenum) {
1271 				clear_counters(rule);
1272 			}
1273 		}
1274 	}
1275 	ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
1276 }
1277 
1278 /**
1279  * Reset some or all counters on firewall rules.
1280  * @arg frwl is null to clear all entries, or contains a specific
1281  * rule number.
1282  * @arg log_only is 1 if we only want to reset logs, zero otherwise.
1283  */
1284 static int
1285 ipfw_ctl_zero_entry(int rulenum, int log_only)
1286 {
1287 	struct netmsg_zent zmsg;
1288 	struct netmsg_base *nmsg;
1289 	const char *msg;
1290 	struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1291 
1292 	bzero(&zmsg, sizeof(zmsg));
1293 	nmsg = &zmsg.base;
1294 	netmsg_init(nmsg, NULL, &curthread->td_msgport,
1295 			0, ipfw_zero_entry_dispatch);
1296 	zmsg.log_only = log_only;
1297 
1298 	if (rulenum == 0) {
1299 		msg = log_only ? "ipfw: All logging counts reset.\n"
1300 				   : "ipfw: Accounting cleared.\n";
1301 	} else {
1302 		struct ip_fw *rule;
1303 
1304 		/*
1305 		 * Locate the first rule with 'rulenum'
1306 		 */
1307 		for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
1308 			if (rule->rulenum == rulenum)
1309 				break;
1310 		}
1311 		if (rule == NULL) /* we did not find any matching rules */
1312 			return (EINVAL);
1313 		zmsg.start_rule = rule;
1314 		zmsg.rulenum = rulenum;
1315 
1316 		msg = log_only ? "ipfw: Entry %d logging count reset.\n"
1317 				   : "ipfw: Entry %d cleared.\n";
1318 	}
1319 	ifnet_domsg(&nmsg->lmsg, 0);
1320 	KKASSERT(zmsg.start_rule == NULL);
1321 
1322 	if (fw_verbose)
1323 		log(LOG_SECURITY | LOG_NOTICE, msg, rulenum);
1324 	return (0);
1325 }
1326 
1327 static int
1328 ipfw_ctl_add_state(struct sockopt *sopt)
1329 {
1330 	struct ipfw_ioc_state *ioc_state;
1331 	ioc_state = sopt->sopt_val;
1332 	if (ipfw_basic_append_state_prt != NULL) {
1333 		struct netmsg_del dmsg;
1334 		bzero(&dmsg, sizeof(dmsg));
1335 		netmsg_init(&dmsg.base, NULL, &curthread->td_msgport,
1336 			0, ipfw_append_state_dispatch);
1337 		(&dmsg)->ioc_state = ioc_state;
1338 		ifnet_domsg(&dmsg.base.lmsg, 0);
1339 	}
1340 	return 0;
1341 }
1342 
1343 static int
1344 ipfw_ctl_delete_state(struct sockopt *sopt)
1345 {
1346 	int rulenum = 0, error;
1347 	if (sopt->sopt_valsize != 0) {
1348 		error = soopt_to_kbuf(sopt, &rulenum, sizeof(int), sizeof(int));
1349 		if (error) {
1350 			return -1;
1351 		}
1352 	}
1353 	struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1354 	struct ip_fw *rule = ctx->ipfw_rule_chain;
1355 
1356 	while (rule!=NULL) {
1357 		if (rule->rulenum == rulenum) {
1358 			break;
1359 		}
1360 		rule = rule->next;
1361 	}
1362 	if (rule == NULL) {
1363 		return -1;
1364 	}
1365 
1366 	struct netmsg_del dmsg;
1367 	struct netmsg_base *nmsg;
1368 	/*
1369 	 * delete the state which stub is the rule
1370 	 * which belongs to the CPU and the rulenum
1371 	 */
1372 	bzero(&dmsg, sizeof(dmsg));
1373 	nmsg = &dmsg.base;
1374 	netmsg_init(nmsg, NULL, &curthread->td_msgport,
1375 			0, ipfw_delete_state_dispatch);
1376 	dmsg.rulenum = rulenum;
1377 	ifnet_domsg(&nmsg->lmsg, 0);
1378 	return 0;
1379 }
1380 
1381 static int
1382 ipfw_ctl_flush_state(struct sockopt *sopt)
1383 {
1384 	struct netmsg_del dmsg;
1385 	struct netmsg_base *nmsg;
1386 	/*
1387 	 * delete the state which stub is the rule
1388 	 * which belongs to the CPU and the rulenum
1389 	 */
1390 	bzero(&dmsg, sizeof(dmsg));
1391 	nmsg = &dmsg.base;
1392 	netmsg_init(nmsg, NULL, &curthread->td_msgport,
1393 			0, ipfw_delete_state_dispatch);
1394 	dmsg.rulenum = 0;
1395 	ifnet_domsg(&nmsg->lmsg, 0);
1396 	return 0;
1397 }
1398 
1399 /*
1400  * Get the ioc_rule from the sopt
1401  * call ipfw_add_rule to add the rule
1402  */
1403 static int
1404 ipfw_ctl_add_rule(struct sockopt *sopt)
1405 {
1406 	struct ipfw_ioc_rule *ioc_rule;
1407 	size_t size;
1408 
1409 	size = sopt->sopt_valsize;
1410 	if (size > (sizeof(uint32_t) * IPFW_RULE_SIZE_MAX) ||
1411 			size < sizeof(*ioc_rule)) {
1412 		return EINVAL;
1413 	}
1414 	if (size != (sizeof(uint32_t) * IPFW_RULE_SIZE_MAX)) {
1415 		sopt->sopt_val = krealloc(sopt->sopt_val, sizeof(uint32_t) *
1416 				IPFW_RULE_SIZE_MAX, M_TEMP, M_WAITOK);
1417 	}
1418 	ioc_rule = sopt->sopt_val;
1419 
1420 	ipfw_add_rule(ioc_rule);
1421 	return 0;
1422 }
1423 
1424 static void *
1425 ipfw_copy_state(struct ip_fw_state *state, struct ipfw_ioc_state *ioc_state, int cpuid)
1426 {
1427 	ioc_state->pcnt = state->pcnt;
1428 	ioc_state->bcnt = state->bcnt;
1429 	ioc_state->lifetime = state->lifetime;
1430 	ioc_state->timestamp = state->timestamp;
1431 	ioc_state->cpuid = cpuid;
1432 	ioc_state->expiry = state->expiry;
1433 	ioc_state->rulenum = state->stub->rulenum;
1434 
1435 	bcopy(&state->flow_id, &ioc_state->flow_id, sizeof(struct ipfw_flow_id));
1436 	return ioc_state + 1;
1437 }
1438 
1439 static void *
1440 ipfw_copy_rule(const struct ip_fw *rule, struct ipfw_ioc_rule *ioc_rule)
1441 {
1442 	const struct ip_fw *sibling;
1443 #ifdef INVARIANTS
1444 	int i;
1445 #endif
1446 
1447 	ioc_rule->act_ofs = rule->act_ofs;
1448 	ioc_rule->cmd_len = rule->cmd_len;
1449 	ioc_rule->rulenum = rule->rulenum;
1450 	ioc_rule->set = rule->set;
1451 
1452 	ioc_rule->set_disable = ipfw_ctx[mycpuid]->ipfw_set_disable;
1453 	ioc_rule->static_count = static_count;
1454 	ioc_rule->static_len = static_ioc_len;
1455 
1456 	ioc_rule->pcnt = 1;
1457 	ioc_rule->bcnt = 0;
1458 	ioc_rule->timestamp = 0;
1459 
1460 #ifdef INVARIANTS
1461 	i = 0;
1462 #endif
1463 	ioc_rule->pcnt = 0;
1464 	ioc_rule->bcnt = 0;
1465 	ioc_rule->timestamp = 0;
1466 	for (sibling = rule; sibling != NULL; sibling = sibling->sibling) {
1467 		ioc_rule->pcnt += sibling->pcnt;
1468 		ioc_rule->bcnt += sibling->bcnt;
1469 		if (sibling->timestamp > ioc_rule->timestamp)
1470 			ioc_rule->timestamp = sibling->timestamp;
1471 #ifdef INVARIANTS
1472 		++i;
1473 #endif
1474 	}
1475 
1476 	KASSERT(i == ncpus, ("static rule is not duplicated on every cpu"));
1477 
1478 	bcopy(rule->cmd, ioc_rule->cmd, ioc_rule->cmd_len * 4 /* XXX */);
1479 
1480 	return ((uint8_t *)ioc_rule + IOC_RULESIZE(ioc_rule));
1481 }
1482 
1483 static int
1484 ipfw_ctl_get_modules(struct sockopt *sopt)
1485 {
1486 	int i;
1487 	struct ipfw_module *mod;
1488 	char module_str[1024];
1489 	memset(module_str,0,1024);
1490 	for (i = 0, mod = ipfw_modules; i < MAX_MODULE; i++, mod++) {
1491 		if (mod->type != 0) {
1492 			if (i > 0)
1493 				strcat(module_str,",");
1494 			strcat(module_str,mod->name);
1495 		}
1496 	}
1497 	bzero(sopt->sopt_val, sopt->sopt_valsize);
1498 	bcopy(module_str, sopt->sopt_val, strlen(module_str));
1499 	sopt->sopt_valsize = strlen(module_str);
1500 	return 0;
1501 }
1502 
1503 /*
1504  * Copy all static rules and states on all CPU
1505  */
1506 static int
1507 ipfw_ctl_get_rules(struct sockopt *sopt)
1508 {
1509 	struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1510 	struct ipfw_state_context *state_ctx;
1511 	struct ip_fw *rule;
1512 	struct ip_fw_state *state;
1513 	void *bp;
1514 	size_t size;
1515 	int i, j, state_count = 0;
1516 
1517 	size = static_ioc_len;
1518 	for (i = 0; i < ncpus; i++) {
1519 		for (j = 0; j < ctx->state_hash_size; j++) {
1520 			state_ctx = &ipfw_ctx[i]->state_ctx[j];
1521 			state_count += state_ctx->count;
1522 		}
1523 	}
1524 	if (state_count > 0) {
1525 		size += state_count * sizeof(struct ipfw_ioc_state);
1526 	}
1527 
1528 	if (sopt->sopt_valsize < size) {
1529 		/* XXX TODO sopt_val is not big enough */
1530 		bzero(sopt->sopt_val, sopt->sopt_valsize);
1531 		return 0;
1532 	}
1533 
1534 	sopt->sopt_valsize = size;
1535 	bp = sopt->sopt_val;
1536 
1537 	for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
1538 		bp = ipfw_copy_rule(rule, bp);
1539 	}
1540 	if (state_count > 0 ) {
1541 		for (i = 0; i < ncpus; i++) {
1542 			for (j = 0; j < ctx->state_hash_size; j++) {
1543 				state_ctx = &ipfw_ctx[i]->state_ctx[j];
1544 				state = state_ctx->state;
1545 				while (state != NULL) {
1546 					bp = ipfw_copy_state(state, bp, i);
1547 					state = state->next;
1548 				}
1549 			}
1550 		}
1551 	}
1552 	return 0;
1553 }
1554 
1555 static void
1556 ipfw_set_disable_dispatch(netmsg_t nmsg)
1557 {
1558 	struct lwkt_msg *lmsg = &nmsg->lmsg;
1559 	struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1560 
1561 	ctx->ipfw_set_disable = lmsg->u.ms_result32;
1562 
1563 	ifnet_forwardmsg(lmsg, mycpuid + 1);
1564 }
1565 
1566 static void
1567 ipfw_ctl_set_disable(uint32_t disable, uint32_t enable)
1568 {
1569 	struct netmsg_base nmsg;
1570 	struct lwkt_msg *lmsg;
1571 	uint32_t set_disable;
1572 
1573 	/* IPFW_DEFAULT_SET is always enabled */
1574 	enable |= (1 << IPFW_DEFAULT_SET);
1575 	set_disable = (ipfw_ctx[mycpuid]->ipfw_set_disable | disable) & ~enable;
1576 
1577 	bzero(&nmsg, sizeof(nmsg));
1578 	netmsg_init(&nmsg, NULL, &curthread->td_msgport,
1579 			0, ipfw_set_disable_dispatch);
1580 	lmsg = &nmsg.lmsg;
1581 	lmsg->u.ms_result32 = set_disable;
1582 
1583 	ifnet_domsg(lmsg, 0);
1584 }
1585 
1586 
1587 /*
1588  * ipfw_ctl_x - extended version of ipfw_ctl
1589  * remove the x_header, and adjust the sopt_name,sopt_val and sopt_valsize.
1590  */
1591 int
1592 ipfw_ctl_x(struct sockopt *sopt)
1593 {
1594 	ip_fw_x_header *x_header;
1595 	x_header = (ip_fw_x_header *)(sopt->sopt_val);
1596 	sopt->sopt_name = x_header->opcode;
1597 	sopt->sopt_valsize -= sizeof(ip_fw_x_header);
1598 	bcopy(++x_header, sopt->sopt_val, sopt->sopt_valsize);
1599 	return ipfw_ctl(sopt);
1600 }
1601 
1602 
1603 /**
1604  * {set|get}sockopt parser.
1605  */
1606 static int
1607 ipfw_ctl(struct sockopt *sopt)
1608 {
1609 	int error, rulenum;
1610 	uint32_t *masks;
1611 	size_t size;
1612 
1613 	error = 0;
1614 	switch (sopt->sopt_name) {
1615 		case IP_FW_X:
1616 			ipfw_ctl_x(sopt);
1617 			break;
1618 		case IP_FW_GET:
1619 			error = ipfw_ctl_get_rules(sopt);
1620 			break;
1621 		case IP_FW_MODULE:
1622 			error = ipfw_ctl_get_modules(sopt);
1623 			break;
1624 
1625 		case IP_FW_FLUSH:
1626 			ipfw_ctl_flush_rule(0);
1627 			break;
1628 
1629 		case IP_FW_ADD:
1630 			error = ipfw_ctl_add_rule(sopt);
1631 			break;
1632 
1633 		case IP_FW_DEL:
1634 			/*
1635 			 * IP_FW_DEL is used for deleting single rules or sets,
1636 			 * and (ab)used to atomically manipulate sets.
1637 			 * Argument size is used to distinguish between the two:
1638 			 *	sizeof(uint32_t)
1639 			 *	delete single rule or set of rules,
1640 			 *	or reassign rules (or sets) to a different set.
1641 			 *	2 * sizeof(uint32_t)
1642 			 *	atomic disable/enable sets.
1643 			 *	first uint32_t contains sets to be disabled,
1644 			 *	second uint32_t contains sets to be enabled.
1645 			 */
1646 			masks = sopt->sopt_val;
1647 			size = sopt->sopt_valsize;
1648 			if (size == sizeof(*masks)) {
1649 				/*
1650 				 * Delete or reassign static rule
1651 				 */
1652 				error = ipfw_ctl_alter(masks[0]);
1653 			} else if (size == (2 * sizeof(*masks))) {
1654 				/*
1655 				 * Set enable/disable
1656 				 */
1657 				ipfw_ctl_set_disable(masks[0], masks[1]);
1658 			} else {
1659 				error = EINVAL;
1660 			}
1661 			break;
1662 		case IP_FW_ZERO:
1663 		case IP_FW_RESETLOG: /* argument is an int, the rule number */
1664 			rulenum = 0;
1665 			if (sopt->sopt_valsize != 0) {
1666 				error = soopt_to_kbuf(sopt, &rulenum,
1667 						sizeof(int), sizeof(int));
1668 				if (error) {
1669 					break;
1670 				}
1671 			}
1672 			error = ipfw_ctl_zero_entry(rulenum,
1673 					sopt->sopt_name == IP_FW_RESETLOG);
1674 			break;
1675 		case IP_FW_NAT_ADD:
1676 		case IP_FW_NAT_DEL:
1677 		case IP_FW_NAT_FLUSH:
1678 		case IP_FW_NAT_GET:
1679 		case IP_FW_NAT_GET_RECORD:
1680 			if (ipfw_ctl_nat_ptr != NULL) {
1681 				error = ipfw_ctl_nat_ptr(sopt);
1682 			}
1683 			break;
1684 		case IP_DUMMYNET_GET:
1685 		case IP_DUMMYNET_CONFIGURE:
1686 		case IP_DUMMYNET_DEL:
1687 		case IP_DUMMYNET_FLUSH:
1688 			error = ip_dn_sockopt(sopt);
1689 			break;
1690 		case IP_FW_STATE_ADD:
1691 			error = ipfw_ctl_add_state(sopt);
1692 			break;
1693 		case IP_FW_STATE_DEL:
1694 			error = ipfw_ctl_delete_state(sopt);
1695 			break;
1696 		case IP_FW_STATE_FLUSH:
1697 			error = ipfw_ctl_flush_state(sopt);
1698 			break;
1699 		case IP_FW_TABLE_CREATE:
1700 		case IP_FW_TABLE_DELETE:
1701 		case IP_FW_TABLE_APPEND:
1702 		case IP_FW_TABLE_REMOVE:
1703 		case IP_FW_TABLE_LIST:
1704 		case IP_FW_TABLE_FLUSH:
1705 		case IP_FW_TABLE_SHOW:
1706 		case IP_FW_TABLE_TEST:
1707 		case IP_FW_TABLE_RENAME:
1708 			error = ipfw_ctl_table_sockopt(sopt);
1709 			break;
1710 		case IP_FW_SYNC_SHOW_CONF:
1711 		case IP_FW_SYNC_SHOW_STATUS:
1712 		case IP_FW_SYNC_EDGE_CONF:
1713 		case IP_FW_SYNC_EDGE_START:
1714 		case IP_FW_SYNC_EDGE_STOP:
1715 		case IP_FW_SYNC_EDGE_TEST:
1716 		case IP_FW_SYNC_EDGE_CLEAR:
1717 		case IP_FW_SYNC_CENTRE_CONF:
1718 		case IP_FW_SYNC_CENTRE_START:
1719 		case IP_FW_SYNC_CENTRE_STOP:
1720 		case IP_FW_SYNC_CENTRE_TEST:
1721 		case IP_FW_SYNC_CENTRE_CLEAR:
1722 			error = ipfw_ctl_sync_sockopt(sopt);
1723 			break;
1724 		default:
1725 			kprintf("ipfw_ctl invalid option %d\n",
1726 				sopt->sopt_name);
1727 			error = EINVAL;
1728 	}
1729 	return error;
1730 }
1731 
1732 static int
1733 ipfw_check_in(void *arg, struct mbuf **m0, struct ifnet *ifp, int dir)
1734 {
1735 	struct ip_fw_args args;
1736 	struct mbuf *m = *m0;
1737 	struct m_tag *mtag;
1738 	int tee = 0, error = 0, ret;
1739 	// again:
1740 	if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED) {
1741 		/* Extract info from dummynet tag */
1742 		mtag = m_tag_find(m, PACKET_TAG_DUMMYNET, NULL);
1743 		KKASSERT(mtag != NULL);
1744 		args.rule = ((struct dn_pkt *)m_tag_data(mtag))->dn_priv;
1745 		KKASSERT(args.rule != NULL);
1746 
1747 		m_tag_delete(m, mtag);
1748 		m->m_pkthdr.fw_flags &= ~DUMMYNET_MBUF_TAGGED;
1749 	} else {
1750 		args.rule = NULL;
1751 	}
1752 
1753 	args.eh = NULL;
1754 	args.oif = NULL;
1755 	args.m = m;
1756 	ret = ipfw_chk(&args);
1757 	m = args.m;
1758 
1759 	if (m == NULL) {
1760 		error = EACCES;
1761 		goto back;
1762 	}
1763 	switch (ret) {
1764 		case IP_FW_PASS:
1765 			break;
1766 
1767 		case IP_FW_DENY:
1768 			m_freem(m);
1769 			m = NULL;
1770 			error = EACCES;
1771 			break;
1772 
1773 		case IP_FW_DUMMYNET:
1774 			/* Send packet to the appropriate pipe */
1775 			ipfw_dummynet_io(m, args.cookie, DN_TO_IP_IN, &args);
1776 			break;
1777 
1778 		case IP_FW_TEE:
1779 			tee = 1;
1780 			/* FALL THROUGH */
1781 
1782 		case IP_FW_DIVERT:
1783 			/*
1784 			 * Must clear bridge tag when changing
1785 			 */
1786 			m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED;
1787 			if (ip_divert_p != NULL) {
1788 				m = ip_divert_p(m, tee, 1);
1789 			} else {
1790 				m_freem(m);
1791 				m = NULL;
1792 				/* not sure this is the right error msg */
1793 				error = EACCES;
1794 			}
1795 			break;
1796 
1797 		case IP_FW_NAT:
1798 			break;
1799 		case IP_FW_ROUTE:
1800 			break;
1801 		default:
1802 			panic("unknown ipfw return value: %d", ret);
1803 	}
1804 back:
1805 	*m0 = m;
1806 	return error;
1807 }
1808 
1809 static int
1810 ipfw_check_out(void *arg, struct mbuf **m0, struct ifnet *ifp, int dir)
1811 {
1812 	struct ip_fw_args args;
1813 	struct mbuf *m = *m0;
1814 	struct m_tag *mtag;
1815 	int tee = 0, error = 0, ret;
1816 	// again:
1817 	if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED) {
1818 		/* Extract info from dummynet tag */
1819 		mtag = m_tag_find(m, PACKET_TAG_DUMMYNET, NULL);
1820 		KKASSERT(mtag != NULL);
1821 		args.rule = ((struct dn_pkt *)m_tag_data(mtag))->dn_priv;
1822 		KKASSERT(args.rule != NULL);
1823 
1824 		m_tag_delete(m, mtag);
1825 		m->m_pkthdr.fw_flags &= ~DUMMYNET_MBUF_TAGGED;
1826 	} else {
1827 		args.rule = NULL;
1828 	}
1829 
1830 	args.eh = NULL;
1831 	args.m = m;
1832 	args.oif = ifp;
1833 	ret = ipfw_chk(&args);
1834 	m = args.m;
1835 
1836 	if (m == NULL) {
1837 		error = EACCES;
1838 		goto back;
1839 	}
1840 
1841 	switch (ret) {
1842 		case IP_FW_PASS:
1843 			break;
1844 
1845 		case IP_FW_DENY:
1846 			m_freem(m);
1847 			m = NULL;
1848 			error = EACCES;
1849 			break;
1850 
1851 		case IP_FW_DUMMYNET:
1852 			ipfw_dummynet_io(m, args.cookie, DN_TO_IP_OUT, &args);
1853 			break;
1854 
1855 		case IP_FW_TEE:
1856 			tee = 1;
1857 			/* FALL THROUGH */
1858 
1859 		case IP_FW_DIVERT:
1860 			if (ip_divert_p != NULL) {
1861 				m = ip_divert_p(m, tee, 0);
1862 			} else {
1863 				m_freem(m);
1864 				m = NULL;
1865 				/* not sure this is the right error msg */
1866 				error = EACCES;
1867 			}
1868 			break;
1869 
1870 		case IP_FW_NAT:
1871 			break;
1872 		case IP_FW_ROUTE:
1873 			break;
1874 		default:
1875 			panic("unknown ipfw return value: %d", ret);
1876 	}
1877 back:
1878 	*m0 = m;
1879 	return error;
1880 }
1881 
1882 static void
1883 ipfw_hook(void)
1884 {
1885 	struct pfil_head *pfh;
1886 	IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
1887 
1888 	pfh = pfil_head_get(PFIL_TYPE_AF, AF_INET);
1889 	if (pfh == NULL)
1890 		return;
1891 
1892 	pfil_add_hook(ipfw_check_in, NULL, PFIL_IN | PFIL_MPSAFE, pfh);
1893 	pfil_add_hook(ipfw_check_out, NULL, PFIL_OUT | PFIL_MPSAFE, pfh);
1894 }
1895 
1896 static void
1897 ipfw_dehook(void)
1898 {
1899 	struct pfil_head *pfh;
1900 
1901 	IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
1902 
1903 	pfh = pfil_head_get(PFIL_TYPE_AF, AF_INET);
1904 	if (pfh == NULL)
1905 		return;
1906 
1907 	pfil_remove_hook(ipfw_check_in, NULL, PFIL_IN, pfh);
1908 	pfil_remove_hook(ipfw_check_out, NULL, PFIL_OUT, pfh);
1909 }
1910 
1911 static void
1912 ipfw_sysctl_enable_dispatch(netmsg_t nmsg)
1913 {
1914 	struct lwkt_msg *lmsg = &nmsg->lmsg;
1915 	int enable = lmsg->u.ms_result;
1916 
1917 	if (fw3_enable == enable)
1918 		goto reply;
1919 
1920 	fw3_enable = enable;
1921 	if (fw3_enable)
1922 		ipfw_hook();
1923 	else
1924 		ipfw_dehook();
1925 
1926 reply:
1927 	lwkt_replymsg(lmsg, 0);
1928 }
1929 
1930 static int
1931 ipfw_sysctl_enable(SYSCTL_HANDLER_ARGS)
1932 {
1933 	struct netmsg_base nmsg;
1934 	struct lwkt_msg *lmsg;
1935 	int enable, error;
1936 
1937 	enable = fw3_enable;
1938 	error = sysctl_handle_int(oidp, &enable, 0, req);
1939 	if (error || req->newptr == NULL)
1940 		return error;
1941 
1942 	netmsg_init(&nmsg, NULL, &curthread->td_msgport,
1943 			0, ipfw_sysctl_enable_dispatch);
1944 	lmsg = &nmsg.lmsg;
1945 	lmsg->u.ms_result = enable;
1946 
1947 	return lwkt_domsg(IPFW_CFGPORT, lmsg, 0);
1948 }
1949 
1950 static int
1951 ipfw_sysctl_autoinc_step(SYSCTL_HANDLER_ARGS)
1952 {
1953 	return sysctl_int_range(oidp, arg1, arg2, req,
1954 			IPFW_AUTOINC_STEP_MIN, IPFW_AUTOINC_STEP_MAX);
1955 }
1956 
1957 
1958 static void
1959 ipfw_ctx_init_dispatch(netmsg_t nmsg)
1960 {
1961 	struct netmsg_ipfw *fwmsg = (struct netmsg_ipfw *)nmsg;
1962 	struct ipfw_context *ctx;
1963 	struct ip_fw *def_rule;
1964 
1965 	ctx = kmalloc(sizeof(struct ipfw_context), M_IPFW3, M_WAITOK | M_ZERO);
1966 	ipfw_ctx[mycpuid] = ctx;
1967 
1968 	def_rule = kmalloc(sizeof(struct ip_fw), M_IPFW3, M_WAITOK | M_ZERO);
1969 	def_rule->act_ofs = 0;
1970 	def_rule->rulenum = IPFW_DEFAULT_RULE;
1971 	def_rule->cmd_len = 2;
1972 	def_rule->set = IPFW_DEFAULT_SET;
1973 
1974 	def_rule->cmd[0].len = LEN_OF_IPFWINSN;
1975 	def_rule->cmd[0].module = MODULE_BASIC_ID;
1976 #ifdef IPFIREWALL_DEFAULT_TO_ACCEPT
1977 	def_rule->cmd[0].opcode = O_BASIC_ACCEPT;
1978 #else
1979 	if (filters_default_to_accept)
1980 		def_rule->cmd[0].opcode = O_BASIC_ACCEPT;
1981 	else
1982 		def_rule->cmd[0].opcode = O_BASIC_DENY;
1983 #endif
1984 
1985 	/* Install the default rule */
1986 	ctx->ipfw_default_rule = def_rule;
1987 	ctx->ipfw_rule_chain = def_rule;
1988 
1989 	/*
1990 	 * if sibiling in last CPU is exists,
1991 	 * then it's sibling should be current rule
1992 	 */
1993 	if (fwmsg->sibling != NULL) {
1994 		fwmsg->sibling->sibling = def_rule;
1995 	}
1996 	/* prepare for next CPU */
1997 	fwmsg->sibling = def_rule;
1998 
1999 	/* Statistics only need to be updated once */
2000 	if (mycpuid == 0)
2001 		ipfw_inc_static_count(def_rule);
2002 
2003 	ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
2004 }
2005 
2006 static void
2007 ipfw_init_dispatch(netmsg_t nmsg)
2008 {
2009 	struct netmsg_ipfw fwmsg;
2010 	int error = 0;
2011 	if (IPFW3_LOADED) {
2012 		kprintf("ipfw3 already loaded\n");
2013 		error = EEXIST;
2014 		goto reply;
2015 	}
2016 
2017 	bzero(&fwmsg, sizeof(fwmsg));
2018 	netmsg_init(&fwmsg.base, NULL, &curthread->td_msgport,
2019 			0, ipfw_ctx_init_dispatch);
2020 	ifnet_domsg(&fwmsg.base.lmsg, 0);
2021 
2022 	ip_fw_chk_ptr = ipfw_chk;
2023 	ip_fw_ctl_x_ptr = ipfw_ctl_x;
2024 	ip_fw_dn_io_ptr = ipfw_dummynet_io;
2025 
2026 	kprintf("ipfw3 initialized, default to %s\n",
2027 			filters_default_to_accept ? "accept" : "deny");
2028 
2029 	ip_fw3_loaded = 1;
2030 	if (fw3_enable)
2031 		ipfw_hook();
2032 reply:
2033 	lwkt_replymsg(&nmsg->lmsg, error);
2034 }
2035 
2036 static int
2037 ipfw3_init(void)
2038 {
2039 	struct netmsg_base smsg;
2040 	int error;
2041 
2042 	ipfw3_log_modevent(MOD_LOAD);
2043 	ipfw3_sync_modevent(MOD_LOAD);
2044 
2045 	init_module();
2046 	netmsg_init(&smsg, NULL, &curthread->td_msgport,
2047 			0, ipfw_init_dispatch);
2048 	error = lwkt_domsg(IPFW_CFGPORT, &smsg.lmsg, 0);
2049 	netmsg_init(&smsg, NULL, &curthread->td_msgport,
2050 			0, table_init_dispatch);
2051 	error = lwkt_domsg(IPFW_CFGPORT, &smsg.lmsg, 0);
2052 	return error;
2053 }
2054 
2055 #ifdef KLD_MODULE
2056 
2057 static void
2058 ipfw_fini_dispatch(netmsg_t nmsg)
2059 {
2060 	int error = 0, cpu;
2061 
2062 	ip_fw3_loaded = 0;
2063 
2064 	ipfw_dehook();
2065 	netmsg_service_sync();
2066 	ip_fw_chk_ptr = NULL;
2067 	ip_fw_ctl_x_ptr = NULL;
2068 	ip_fw_dn_io_ptr = NULL;
2069 	ipfw_ctl_flush_rule(1 /* kill default rule */);
2070 	table_fini();
2071 	/* Free pre-cpu context */
2072 	for (cpu = 0; cpu < ncpus; ++cpu) {
2073 		if (ipfw_ctx[cpu] != NULL) {
2074 			kfree(ipfw_ctx[cpu], M_IPFW3);
2075 			ipfw_ctx[cpu] = NULL;
2076 		}
2077 	}
2078 	kprintf("ipfw3 unloaded\n");
2079 
2080 	lwkt_replymsg(&nmsg->lmsg, error);
2081 }
2082 
2083 static int
2084 ipfw3_fini(void)
2085 {
2086 	struct netmsg_base smsg;
2087 
2088 	ipfw3_log_modevent(MOD_UNLOAD);
2089 	ipfw3_sync_modevent(MOD_UNLOAD);
2090 
2091 	netmsg_init(&smsg, NULL, &curthread->td_msgport,
2092 			0, ipfw_fini_dispatch);
2093 	return lwkt_domsg(IPFW_CFGPORT, &smsg.lmsg, 0);
2094 }
2095 
2096 #endif	/* KLD_MODULE */
2097 
2098 static int
2099 ipfw3_modevent(module_t mod, int type, void *unused)
2100 {
2101 	int err = 0;
2102 
2103 	switch (type) {
2104 		case MOD_LOAD:
2105 			err = ipfw3_init();
2106 			break;
2107 
2108 		case MOD_UNLOAD:
2109 
2110 #ifndef KLD_MODULE
2111 			kprintf("ipfw statically compiled, cannot unload\n");
2112 			err = EBUSY;
2113 #else
2114 			err = ipfw3_fini();
2115 #endif
2116 			break;
2117 		default:
2118 			break;
2119 	}
2120 	return err;
2121 }
2122 
2123 static moduledata_t ipfw3mod = {
2124 	"ipfw3",
2125 	ipfw3_modevent,
2126 	0
2127 };
2128 DECLARE_MODULE(ipfw3, ipfw3mod, SI_SUB_PROTO_END, SI_ORDER_ANY);
2129 MODULE_VERSION(ipfw3, 1);
2130