xref: /dragonfly/sys/net/ipfw3/ip_fw3.c (revision 745703c7)
1 /*
2  * Copyright (c) 1993 Daniel Boulet
3  * Copyright (c) 1994 Ugen J.S.Antsilevich
4  * Copyright (c) 2002 Luigi Rizzo, Universita` di Pisa
5  * Copyright (c) 2015 The DragonFly Project.  All rights reserved.
6  *
7  * This code is derived from software contributed to The DragonFly Project
8  * by Bill Yuan <bycn82@gmail.com>
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in
18  *    the documentation and/or other materials provided with the
19  *    distribution.
20  * 3. Neither the name of The DragonFly Project nor the names of its
21  *    contributors may be used to endorse or promote products derived
22  *    from this software without specific, prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
28  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  */
38 
39 #include "opt_ipfw.h"
40 #include "opt_inet.h"
41 #ifndef INET
42 #error IPFIREWALL3 requires INET.
43 #endif /* INET */
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #include <sys/kernel.h>
50 #include <sys/proc.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53 #include <sys/sysctl.h>
54 #include <sys/syslog.h>
55 #include <sys/ucred.h>
56 #include <sys/in_cksum.h>
57 #include <sys/lock.h>
58 #include <sys/thread2.h>
59 #include <sys/mplock2.h>
60 
61 #include <netinet/in.h>
62 #include <netinet/in_systm.h>
63 #include <netinet/in_var.h>
64 #include <netinet/in_pcb.h>
65 #include <netinet/ip.h>
66 #include <netinet/ip_var.h>
67 #include <netinet/ip_icmp.h>
68 #include <netinet/tcp.h>
69 #include <netinet/tcp_timer.h>
70 #include <netinet/tcp_var.h>
71 #include <netinet/tcpip.h>
72 #include <netinet/udp.h>
73 #include <netinet/udp_var.h>
74 #include <netinet/ip_divert.h>
75 #include <netinet/if_ether.h>
76 
77 #include <net/if.h>
78 #include <net/route.h>
79 #include <net/pfil.h>
80 #include <net/netmsg2.h>
81 
82 #include <net/ipfw3/ip_fw.h>
83 #include <net/ipfw3_basic/ip_fw3_basic.h>
84 #include <net/ipfw3_nat/ip_fw3_nat.h>
85 #include <net/dummynet3/ip_dummynet3.h>
86 
87 MALLOC_DEFINE(M_IPFW3, "IPFW3", "ip_fw3 default module");
88 
89 #ifdef IPFIREWALL_DEBUG
90 #define DPRINTF(fmt, ...)			\
91 do { 						\
92 	if (fw_debug > 0) 			\
93 		kprintf(fmt, __VA_ARGS__); 	\
94 } while (0)
95 #else
96 #define DPRINTF(fmt, ...)	((void)0)
97 #endif
98 
99 #define MAX_MODULE		10
100 #define MAX_OPCODE_PER_MODULE	100
101 
102 #define IPFW_AUTOINC_STEP_MIN	1
103 #define IPFW_AUTOINC_STEP_MAX	1000
104 #define IPFW_AUTOINC_STEP_DEF	100
105 
106 
107 struct netmsg_ipfw {
108 	struct netmsg_base base;
109 	const struct ipfw_ioc_rule *ioc_rule;
110 	struct ip_fw	*rule;
111 	struct ip_fw	*next_rule;
112 	struct ip_fw	*prev_rule;
113 	struct ip_fw	*sibling;	/* sibling in prevous CPU */
114 };
115 
116 struct netmsg_del {
117 	struct netmsg_base base;
118 	struct ip_fw	*rule;
119 	struct ip_fw	*start_rule;
120 	struct ip_fw	*prev_rule;
121 	struct ipfw_ioc_state *ioc_state;
122 	uint16_t	rulenum;
123 	uint8_t		from_set;
124 	uint8_t		to_set;
125 };
126 
127 struct netmsg_zent {
128 	struct netmsg_base base;
129 	struct ip_fw	*start_rule;
130 	uint16_t	rulenum;
131 	uint16_t	log_only;
132 };
133 
134 ipfw_nat_cfg_t *ipfw_nat_cfg_ptr;
135 ipfw_nat_cfg_t *ipfw_nat_del_ptr;
136 ipfw_nat_cfg_t *ipfw_nat_flush_ptr;
137 ipfw_nat_cfg_t *ipfw_nat_get_cfg_ptr;
138 ipfw_nat_cfg_t *ipfw_nat_get_log_ptr;
139 
140 /* handlers which implemented in ipfw_basic module */
141 ipfw_basic_delete_state_t *ipfw_basic_flush_state_prt = NULL;
142 ipfw_basic_append_state_t *ipfw_basic_append_state_prt = NULL;
143 
144 static struct ipfw_context	*ipfw_ctx[MAXCPU];
145 static struct ipfw_nat_context *ipfw_nat_ctx;
146 
147 extern int ip_fw_loaded;
148 static uint32_t static_count;	/* # of static rules */
149 static uint32_t static_ioc_len;	/* bytes of static rules */
150 static int ipfw_flushing;
151 static int fw_verbose;
152 static int verbose_limit;
153 static int fw_debug;
154 static int autoinc_step = IPFW_AUTOINC_STEP_DEF;
155 
156 static int	ipfw_sysctl_enable(SYSCTL_HANDLER_ARGS);
157 static int	ipfw_sysctl_autoinc_step(SYSCTL_HANDLER_ARGS);
158 
159 SYSCTL_NODE(_net_inet_ip, OID_AUTO, fw3, CTLFLAG_RW, 0, "Firewall");
160 SYSCTL_PROC(_net_inet_ip_fw3, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW,
161 	&fw3_enable, 0, ipfw_sysctl_enable, "I", "Enable ipfw");
162 SYSCTL_PROC(_net_inet_ip_fw3, OID_AUTO, autoinc_step, CTLTYPE_INT | CTLFLAG_RW,
163 	&autoinc_step, 0, ipfw_sysctl_autoinc_step, "I",
164 	"Rule number autincrement step");
165 SYSCTL_INT(_net_inet_ip_fw3, OID_AUTO,one_pass,CTLFLAG_RW,
166 	&fw3_one_pass, 0,
167 	"Only do a single pass through ipfw when using dummynet(4)");
168 SYSCTL_INT(_net_inet_ip_fw3, OID_AUTO, debug, CTLFLAG_RW,
169 	&fw_debug, 0, "Enable printing of debug ip_fw statements");
170 SYSCTL_INT(_net_inet_ip_fw3, OID_AUTO, verbose, CTLFLAG_RW,
171 	&fw_verbose, 0, "Log matches to ipfw rules");
172 SYSCTL_INT(_net_inet_ip_fw3, OID_AUTO, verbose_limit, CTLFLAG_RW,
173 	&verbose_limit, 0, "Set upper limit of matches of ipfw rules logged");
174 SYSCTL_INT(_net_inet_ip_fw3, OID_AUTO, static_count, CTLFLAG_RD,
175 	&static_count, 0, "Number of static rules");
176 
177 filter_func filter_funcs[MAX_MODULE][MAX_OPCODE_PER_MODULE];
178 struct ipfw_module ipfw_modules[MAX_MODULE];
179 static int ipfw_ctl(struct sockopt *sopt);
180 
181 
182 void
183 check_accept(int *cmd_ctl, int *cmd_val, struct ip_fw_args **args,
184 		struct ip_fw **f, ipfw_insn *cmd, uint16_t ip_len);
185 void
186 check_deny(int *cmd_ctl, int *cmd_val, struct ip_fw_args **args,
187 		struct ip_fw **f, ipfw_insn *cmd, uint16_t ip_len);
188 void init_module(void);
189 
190 
191 void
192 register_ipfw_module(int module_id,char *module_name)
193 {
194 	struct ipfw_module *tmp;
195 	int i;
196 
197 	tmp = ipfw_modules;
198 	for (i=0; i < MAX_MODULE; i++) {
199 		if (tmp->type == 0) {
200 			tmp->type = 1;
201 			tmp->id = module_id;
202 			strncpy(tmp->name, module_name, strlen(module_name));
203 			break;
204 		}
205 		tmp++;
206 	}
207 	kprintf("ipfw3 module %s loaded ", module_name);
208 }
209 
210 int
211 unregister_ipfw_module(int module_id)
212 {
213 	struct ipfw_module *tmp;
214 	struct ip_fw *fw;
215 	ipfw_insn *cmd;
216 	int i, len, cmdlen, found;
217 
218 	found = 0;
219 	tmp = ipfw_modules;
220 	struct ipfw_context *ctx = ipfw_ctx[mycpuid];
221 	fw = ctx->ipfw_rule_chain;
222 	for (; fw; fw = fw->next) {
223 		for (len = fw->cmd_len, cmd = fw->cmd; len > 0;
224 			len -= cmdlen,
225 			cmd = (ipfw_insn *)((uint32_t *)cmd + cmdlen)) {
226 			cmdlen = F_LEN(cmd);
227 			if (cmd->module == 0 &&
228 				(cmd->opcode == 0 || cmd->opcode == 1)) {
229 				//action accept or deny
230 			} else if (cmd->module == module_id) {
231 				found = 1;
232 				goto decide;
233 			}
234 		}
235 	}
236 decide:
237 	if (found) {
238 		return 1;
239 	} else {
240 		for (i = 0; i < MAX_MODULE; i++) {
241 			if (tmp->type == 1 && tmp->id == module_id) {
242 				tmp->type = 0;
243 				kprintf("ipfw3 module %s unloaded ", tmp->name);
244 				break;
245 			}
246 			tmp++;
247 		}
248 
249 		for (i = 0; i < MAX_OPCODE_PER_MODULE; i++) {
250 			if (module_id == 0) {
251 				if (i ==0 || i == 1) {
252 					continue;
253 				}
254 			}
255 			filter_funcs[module_id][i] = NULL;
256 		}
257 		return 0;
258 	}
259 }
260 
261 void
262 register_ipfw_filter_funcs(int module, int opcode, filter_func func)
263 {
264 	filter_funcs[module][opcode] = func;
265 }
266 
267 void
268 check_accept(int *cmd_ctl, int *cmd_val, struct ip_fw_args **args,
269 		struct ip_fw **f, ipfw_insn *cmd, uint16_t ip_len)
270 {
271 	*cmd_val = IP_FW_PASS;
272 	*cmd_ctl = IP_FW_CTL_DONE;
273 }
274 
275 void
276 check_deny(int *cmd_ctl, int *cmd_val, struct ip_fw_args **args,
277 		struct ip_fw **f, ipfw_insn *cmd, uint16_t ip_len)
278 {
279 	*cmd_val = IP_FW_DENY;
280 	*cmd_ctl = IP_FW_CTL_DONE;
281 }
282 
283 void
284 init_module(void)
285 {
286 	memset(ipfw_modules, 0, sizeof(struct ipfw_module) * MAX_MODULE);
287 	memset(filter_funcs, 0, sizeof(filter_func) *
288 			MAX_OPCODE_PER_MODULE * MAX_MODULE);
289 	register_ipfw_filter_funcs(0, O_BASIC_ACCEPT,
290 			(filter_func)check_accept);
291 	register_ipfw_filter_funcs(0, O_BASIC_DENY, (filter_func)check_deny);
292 }
293 
294 static __inline int
295 ipfw_free_rule(struct ip_fw *rule)
296 {
297 	kfree(rule, M_IPFW3);
298 	rule = NULL;
299 	return 1;
300 }
301 
302 static struct ip_fw *
303 lookup_next_rule(struct ip_fw *me)
304 {
305 	struct ip_fw *rule = NULL;
306 	ipfw_insn *cmd;
307 
308 	/* look for action, in case it is a skipto */
309 	cmd = ACTION_PTR(me);
310 	if ((int)cmd->module == MODULE_BASIC_ID &&
311 		(int)cmd->opcode == O_BASIC_SKIPTO) {
312 		for (rule = me->next; rule; rule = rule->next) {
313 			if (rule->rulenum >= cmd->arg1)
314 				break;
315 		}
316 	}
317 	if (rule == NULL) {	/* failure or not a skipto */
318 		rule = me->next;
319 	}
320 	me->next_rule = rule;
321 	return rule;
322 }
323 
324 /*
325  * rules are stored in ctx->ipfw_rule_chain.
326  * and each rule is combination of multiple cmds.(ipfw_insn)
327  * in each rule, it begin with filter cmds. and end with action cmds.
328  * 'outer/inner loop' are looping the rules/cmds.
329  * it will invoke the cmds relatived function according to the cmd's
330  * module id and opcode id. and process according to return value.
331  */
332 static int
333 ipfw_chk(struct ip_fw_args *args)
334 {
335 	struct mbuf *m = args->m;
336 	struct ip *ip = mtod(m, struct ip *);
337 	struct ip_fw *f = NULL;		/* matching rule */
338 	int cmd_val = IP_FW_PASS;
339 	struct m_tag *mtag;
340 	struct divert_info *divinfo;
341 
342 	/*
343 	 * hlen	The length of the IPv4 header.
344 	 *	hlen >0 means we have an IPv4 packet.
345 	 */
346 	u_int hlen = 0;		/* hlen >0 means we have an IP pkt */
347 
348 	/*
349 	 * offset	The offset of a fragment. offset != 0 means that
350 	 *	we have a fragment at this offset of an IPv4 packet.
351 	 *	offset == 0 means that (if this is an IPv4 packet)
352 	 *	this is the first or only fragment.
353 	 */
354 	u_short offset = 0;
355 
356 	uint8_t proto;
357 	uint16_t src_port = 0, dst_port = 0;	/* NOTE: host format	*/
358 	struct in_addr src_ip, dst_ip;		/* NOTE: network format	*/
359 	uint16_t ip_len = 0;
360 	uint8_t prev_module = -1, prev_opcode = -1; /* previous module & opcode */
361 	struct ipfw_context *ctx = ipfw_ctx[mycpuid];
362 
363 	if (m->m_pkthdr.fw_flags & IPFW_MBUF_GENERATED)
364 		return IP_FW_PASS;	/* accept */
365 
366 	if (args->eh == NULL ||		/* layer 3 packet */
367 		(m->m_pkthdr.len >= sizeof(struct ip) &&
368 		 ntohs(args->eh->ether_type) == ETHERTYPE_IP))
369 		hlen = ip->ip_hl << 2;
370 
371 	/*
372 	 * Collect parameters into local variables for faster matching.
373 	 */
374 	if (hlen == 0) {	/* do not grab addresses for non-ip pkts */
375 		proto = args->f_id.proto = 0;	/* mark f_id invalid */
376 		goto after_ip_checks;
377 	}
378 
379 	proto = args->f_id.proto = ip->ip_p;
380 	src_ip = ip->ip_src;
381 	dst_ip = ip->ip_dst;
382 	if (args->eh != NULL) { /* layer 2 packets are as on the wire */
383 		offset = ntohs(ip->ip_off) & IP_OFFMASK;
384 		ip_len = ntohs(ip->ip_len);
385 	} else {
386 		offset = ip->ip_off & IP_OFFMASK;
387 		ip_len = ip->ip_len;
388 	}
389 
390 #define PULLUP_TO(len)					\
391 do {							\
392 	if (m->m_len < (len)) {				\
393 		args->m = m = m_pullup(m, (len));	\
394 			if (m == NULL)			\
395 				goto pullup_failed;	\
396 		ip = mtod(m, struct ip *);		\
397 	}						\
398 } while (0)
399 
400 	if (offset == 0) {
401 		switch (proto) {
402 			case IPPROTO_TCP:
403 				{
404 					struct tcphdr *tcp;
405 
406 					PULLUP_TO(hlen + sizeof(struct tcphdr));
407 					tcp = L3HDR(struct tcphdr, ip);
408 					dst_port = tcp->th_dport;
409 					src_port = tcp->th_sport;
410 					args->f_id.flags = tcp->th_flags;
411 				}
412 				break;
413 
414 			case IPPROTO_UDP:
415 				{
416 					struct udphdr *udp;
417 
418 					PULLUP_TO(hlen + sizeof(struct udphdr));
419 					udp = L3HDR(struct udphdr, ip);
420 					dst_port = udp->uh_dport;
421 					src_port = udp->uh_sport;
422 				}
423 				break;
424 
425 			case IPPROTO_ICMP:
426 				PULLUP_TO(hlen + 4);
427 				args->f_id.flags =
428 					L3HDR(struct icmp, ip)->icmp_type;
429 				break;
430 
431 			default:
432 				break;
433 		}
434 	}
435 
436 #undef PULLUP_TO
437 
438 	args->f_id.src_ip = ntohl(src_ip.s_addr);
439 	args->f_id.dst_ip = ntohl(dst_ip.s_addr);
440 	args->f_id.src_port = src_port = ntohs(src_port);
441 	args->f_id.dst_port = dst_port = ntohs(dst_port);
442 
443 after_ip_checks:
444 	if (args->rule) {
445 		/*
446 		 * Packet has already been tagged. Look for the next rule
447 		 * to restart processing.
448 		 *
449 		 * If fw3_one_pass != 0 then just accept it.
450 		 * XXX should not happen here, but optimized out in
451 		 * the caller.
452 		 */
453 		if (fw3_one_pass)
454 			return IP_FW_PASS;
455 
456 		/* This rule is being/has been flushed */
457 		if (ipfw_flushing)
458 			return IP_FW_DENY;
459 
460 		f = args->rule->next_rule;
461 		if (f == NULL)
462 			f = lookup_next_rule(args->rule);
463 	} else {
464 		/*
465 		 * Find the starting rule. It can be either the first
466 		 * one, or the one after divert_rule if asked so.
467 		 */
468 		int skipto;
469 
470 		mtag = m_tag_find(m, PACKET_TAG_IPFW_DIVERT, NULL);
471 		if (mtag != NULL) {
472 			divinfo = m_tag_data(mtag);
473 			skipto = divinfo->skipto;
474 		} else {
475 			skipto = 0;
476 		}
477 
478 		f = ctx->ipfw_rule_chain;
479 		if (args->eh == NULL && skipto != 0) {
480 			/* No skipto during rule flushing */
481 			if (ipfw_flushing) {
482 				return IP_FW_DENY;
483 			}
484 			if (skipto >= IPFW_DEFAULT_RULE) {
485 				return IP_FW_DENY; /* invalid */
486 			}
487 			while (f && f->rulenum <= skipto) {
488 				f = f->next;
489 			}
490 			if (f == NULL) {	/* drop packet */
491 				return IP_FW_DENY;
492 			}
493 		} else if (ipfw_flushing) {
494 			/* Rules are being flushed; skip to default rule */
495 			f = ctx->ipfw_default_rule;
496 		}
497 	}
498 	if ((mtag = m_tag_find(m, PACKET_TAG_IPFW_DIVERT, NULL)) != NULL) {
499 		m_tag_delete(m, mtag);
500 	}
501 
502 	/*
503 	 * Now scan the rules, and parse microinstructions for each rule.
504 	 */
505 	int prev_val;	/*  previous result of 'or' filter */
506 	int l, cmdlen;
507 	ipfw_insn *cmd;
508 	int cmd_ctl;
509 	/* foreach rule in chain */
510 	for (; f; f = f->next) {
511 again:  /* check the rule again*/
512 		if (ctx->ipfw_set_disable & (1 << f->set)) {
513 			continue;
514 		}
515 
516 		prev_val = -1;
517 		 /* foreach cmd in rule */
518 		for (l = f->cmd_len, cmd = f->cmd; l > 0; l -= cmdlen,
519 			cmd = (ipfw_insn *)((uint32_t *)cmd+ cmdlen)) {
520 			cmdlen = F_LEN(cmd);
521 
522 			/* skip 'or' filter when already match */
523 			if (cmd->len & F_OR &&
524 				cmd->module == prev_module &&
525 				cmd->opcode == prev_opcode &&
526 				prev_val == 1) {
527 				goto next_cmd;
528 			}
529 
530 check_body: /* check the body of the rule again.*/
531 			(filter_funcs[cmd->module][cmd->opcode])
532 				(&cmd_ctl, &cmd_val, &args, &f, cmd, ip_len);
533 			switch(cmd_ctl) {
534 				case IP_FW_CTL_DONE:
535 					if (prev_val == 0) /* but 'or' failed */
536 						goto next_rule;
537 					goto done;
538 				case IP_FW_CTL_AGAIN:
539 					goto again;
540 				case IP_FW_CTL_NEXT:
541 					goto next_rule;
542 				case IP_FW_CTL_NAT:
543 					args->rule=f;
544 					goto done;
545 				case IP_FW_CTL_CHK_STATE:
546 					/* update the cmd and l */
547 					cmd = ACTION_PTR(f);
548 					l = f->cmd_len - f->act_ofs;
549 					goto check_body;
550 			}
551 			if (cmd->len & F_NOT)
552 				cmd_val= !cmd_val;
553 
554 			if (cmd->len & F_OR) {	/* has 'or' */
555 				if (!cmd_val) {	/* not matched */
556 					if(prev_val == -1){	/* first 'or' */
557 						prev_val = 0;
558 						prev_module = cmd->module;
559 						prev_opcode = cmd->opcode;
560 					} else if (prev_module == cmd->module &&
561 						prev_opcode == cmd->opcode) {
562 						/* continuous 'or' filter */
563 					} else if (prev_module != cmd->module ||
564 						prev_opcode != cmd->opcode) {
565 						/* 'or' filter changed */
566 						if(prev_val == 0){
567 							goto next_rule;
568 						} else {
569 							prev_val = 0;
570 							prev_module = cmd->module;
571 							prev_opcode = cmd->opcode;
572 						}
573 					}
574 				} else { /* has 'or' and matched */
575 					prev_val = 1;
576 					prev_module = cmd->module;
577 					prev_opcode = cmd->opcode;
578 				}
579 			} else { /* no or */
580 				if (!cmd_val) {	/* not matched */
581 					goto next_rule;
582 				} else {
583 					if (prev_val == 0) {
584 						/* previous 'or' not matched */
585 						goto next_rule;
586 					} else {
587 						prev_val = -1;
588 					}
589 				}
590 			}
591 next_cmd:;
592 		}	/* end of inner for, scan opcodes */
593 next_rule:;		/* try next rule		*/
594 	}		/* end of outer for, scan rules */
595 	kprintf("+++ ipfw: ouch!, skip past end of rules, denying packet\n");
596 	return IP_FW_DENY;
597 
598 done:
599 	/* Update statistics */
600 	f->pcnt++;
601 	f->bcnt += ip_len;
602 	f->timestamp = time_second;
603 	return cmd_val;
604 
605 pullup_failed:
606 	if (fw_verbose)
607 		kprintf("pullup failed\n");
608 	return IP_FW_DENY;
609 }
610 
611 static void
612 ipfw_dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa)
613 {
614 	struct m_tag *mtag;
615 	struct dn_pkt *pkt;
616 	ipfw_insn *cmd;
617 	const struct ipfw_flow_id *id;
618 	struct dn_flow_id *fid;
619 
620 	M_ASSERTPKTHDR(m);
621 
622 	mtag = m_tag_get(PACKET_TAG_DUMMYNET, sizeof(*pkt), M_NOWAIT);
623 	if (mtag == NULL) {
624 		m_freem(m);
625 		return;
626 	}
627 	m_tag_prepend(m, mtag);
628 
629 	pkt = m_tag_data(mtag);
630 	bzero(pkt, sizeof(*pkt));
631 
632 	cmd = fwa->rule->cmd + fwa->rule->act_ofs;
633 	KASSERT(cmd->opcode == O_DUMMYNET_PIPE ||
634 			cmd->opcode == O_DUMMYNET_QUEUE,
635 			("Rule is not PIPE or QUEUE, opcode %d", cmd->opcode));
636 
637 	pkt->dn_m = m;
638 	pkt->dn_flags = (dir & DN_FLAGS_DIR_MASK);
639 	pkt->ifp = fwa->oif;
640 	pkt->pipe_nr = pipe_nr;
641 
642 	pkt->msgport = netisr_curport();
643 
644 	id = &fwa->f_id;
645 	fid = &pkt->id;
646 	fid->fid_dst_ip = id->dst_ip;
647 	fid->fid_src_ip = id->src_ip;
648 	fid->fid_dst_port = id->dst_port;
649 	fid->fid_src_port = id->src_port;
650 	fid->fid_proto = id->proto;
651 	fid->fid_flags = id->flags;
652 
653 	pkt->dn_priv = fwa->rule;
654 
655 	if ((int)cmd->opcode == O_DUMMYNET_PIPE)
656 		pkt->dn_flags |= DN_FLAGS_IS_PIPE;
657 
658 	m->m_pkthdr.fw_flags |= DUMMYNET_MBUF_TAGGED;
659 }
660 
661 static __inline void
662 ipfw_inc_static_count(struct ip_fw *rule)
663 {
664 	/* Static rule's counts are updated only on CPU0 */
665 	KKASSERT(mycpuid == 0);
666 
667 	static_count++;
668 	static_ioc_len += IOC_RULESIZE(rule);
669 }
670 
671 static __inline void
672 ipfw_dec_static_count(struct ip_fw *rule)
673 {
674 	int l = IOC_RULESIZE(rule);
675 
676 	/* Static rule's counts are updated only on CPU0 */
677 	KKASSERT(mycpuid == 0);
678 
679 	KASSERT(static_count > 0, ("invalid static count %u", static_count));
680 	static_count--;
681 
682 	KASSERT(static_ioc_len >= l,
683 			("invalid static len %u", static_ioc_len));
684 	static_ioc_len -= l;
685 }
686 
687 static void
688 ipfw_add_rule_dispatch(netmsg_t nmsg)
689 {
690 	struct netmsg_ipfw *fwmsg = (struct netmsg_ipfw *)nmsg;
691 	struct ipfw_context *ctx = ipfw_ctx[mycpuid];
692 	struct ip_fw *rule, *prev,*next;
693 	const struct ipfw_ioc_rule *ioc_rule;
694 
695 	ioc_rule = fwmsg->ioc_rule;
696 	 // create rule by ioc_rule
697 	rule = kmalloc(RULESIZE(ioc_rule), M_IPFW3, M_WAITOK | M_ZERO);
698 	rule->act_ofs = ioc_rule->act_ofs;
699 	rule->cmd_len = ioc_rule->cmd_len;
700 	rule->rulenum = ioc_rule->rulenum;
701 	rule->set = ioc_rule->set;
702 	bcopy(ioc_rule->cmd, rule->cmd, rule->cmd_len * 4);
703 
704 	for (prev = NULL, next = ctx->ipfw_rule_chain;
705 		next; prev = next, next = next->next) {
706 		if (next->rulenum > ioc_rule->rulenum) {
707 			break;
708 		}
709 	}
710 	KASSERT(next != NULL, ("no default rule?!"));
711 
712 	/*
713 	 * Insert rule into the pre-determined position
714 	 */
715 	if (prev != NULL) {
716 		rule->next = next;
717 		prev->next = rule;
718 	} else {
719 		rule->next = ctx->ipfw_rule_chain;
720 		ctx->ipfw_rule_chain = rule;
721 	}
722 
723 	/*
724 	 * if sibiling in last CPU is exists,
725 	 * then it's sibling should be current rule
726 	 */
727 	if (fwmsg->sibling != NULL) {
728 		fwmsg->sibling->sibling = rule;
729 	}
730 	/* prepare for next CPU */
731 	fwmsg->sibling = rule;
732 
733 	if (mycpuid == 0) {
734 		/* Statistics only need to be updated once */
735 		ipfw_inc_static_count(rule);
736 	}
737 	ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
738 }
739 
740 /*
741  * confirm the rulenumber
742  * call dispatch function to add rule into the list
743  * Update the statistic
744  */
745 static void
746 ipfw_add_rule(struct ipfw_ioc_rule *ioc_rule)
747 {
748 	struct ipfw_context *ctx = ipfw_ctx[mycpuid];
749 	struct netmsg_ipfw fwmsg;
750 	struct netmsg_base *nmsg;
751 	struct ip_fw *f;
752 
753 	IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
754 
755 	/*
756 	 * If rulenum is 0, find highest numbered rule before the
757 	 * default rule, and add rule number incremental step.
758 	 */
759 	if (ioc_rule->rulenum == 0) {
760 		int step = autoinc_step;
761 
762 		KKASSERT(step >= IPFW_AUTOINC_STEP_MIN &&
763 				step <= IPFW_AUTOINC_STEP_MAX);
764 
765 		/*
766 		 * Locate the highest numbered rule before default
767 		 */
768 		for (f = ctx->ipfw_rule_chain; f; f = f->next) {
769 			if (f->rulenum == IPFW_DEFAULT_RULE)
770 				break;
771 			ioc_rule->rulenum = f->rulenum;
772 		}
773 		if (ioc_rule->rulenum < IPFW_DEFAULT_RULE - step)
774 			ioc_rule->rulenum += step;
775 	}
776 	KASSERT(ioc_rule->rulenum != IPFW_DEFAULT_RULE &&
777 			ioc_rule->rulenum != 0,
778 			("invalid rule num %d", ioc_rule->rulenum));
779 
780 	bzero(&fwmsg, sizeof(fwmsg));
781 	nmsg = &fwmsg.base;
782 	netmsg_init(nmsg, NULL, &curthread->td_msgport,
783 			0, ipfw_add_rule_dispatch);
784 	fwmsg.ioc_rule = ioc_rule;
785 
786 	ifnet_domsg(&nmsg->lmsg, 0);
787 
788 	DPRINTF("++ installed rule %d, static count now %d\n",
789 			ioc_rule->rulenum, static_count);
790 }
791 
792 /**
793  * Free storage associated with a static rule (including derived
794  * dynamic rules).
795  * The caller is in charge of clearing rule pointers to avoid
796  * dangling pointers.
797  * @return a pointer to the next entry.
798  * Arguments are not checked, so they better be correct.
799  * Must be called at splimp().
800  */
801 static struct ip_fw *
802 ipfw_delete_rule(struct ipfw_context *ctx,
803 		 struct ip_fw *prev, struct ip_fw *rule)
804 {
805 	if (prev == NULL)
806 		ctx->ipfw_rule_chain = rule->next;
807 	else
808 		prev->next = rule->next;
809 
810 	if (mycpuid == IPFW_CFGCPUID)
811 		ipfw_dec_static_count(rule);
812 
813 	kfree(rule, M_IPFW3);
814 	rule = NULL;
815 	return NULL;
816 }
817 
818 static void
819 ipfw_flush_rule_dispatch(netmsg_t nmsg)
820 {
821 	struct lwkt_msg *lmsg = &nmsg->lmsg;
822 	struct ipfw_context *ctx = ipfw_ctx[mycpuid];
823 	struct ip_fw *rule, *the_rule;
824 	int kill_default = lmsg->u.ms_result;
825 
826 	rule = ctx->ipfw_rule_chain;
827 	while (rule != NULL) {
828 		if (rule->rulenum == IPFW_DEFAULT_RULE && kill_default == 0) {
829 			ctx->ipfw_rule_chain = rule;
830 			break;
831 		}
832 		the_rule = rule;
833 		rule = rule->next;
834 		if (mycpuid == IPFW_CFGCPUID)
835 			ipfw_dec_static_count(the_rule);
836 
837 		kfree(the_rule, M_IPFW3);
838 	}
839 
840 	ifnet_forwardmsg(lmsg, mycpuid + 1);
841 }
842 
843 static void
844 ipfw_append_state_dispatch(netmsg_t nmsg)
845 {
846 	struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
847 	struct ipfw_ioc_state *ioc_state = dmsg->ioc_state;
848 	(*ipfw_basic_append_state_prt)(ioc_state);
849 	ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
850 }
851 
852 static void
853 ipfw_delete_state_dispatch(netmsg_t nmsg)
854 {
855 	struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
856 	struct ipfw_context *ctx = ipfw_ctx[mycpuid];
857 	struct ip_fw *rule = ctx->ipfw_rule_chain;
858 	while (rule != NULL) {
859 		if (rule->rulenum == dmsg->rulenum) {
860 			break;
861 		}
862 		rule = rule->next;
863 	}
864 
865 	(*ipfw_basic_flush_state_prt)(rule);
866 	ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
867 }
868 
869 /*
870  * Deletes all rules from a chain (including the default rule
871  * if the second argument is set).
872  * Must be called at splimp().
873  */
874 static void
875 ipfw_ctl_flush_rule(int kill_default)
876 {
877 	struct netmsg_del dmsg;
878 	struct netmsg_base nmsg;
879 	struct lwkt_msg *lmsg;
880 
881 	IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
882 
883 	/*
884 	 * If 'kill_default' then caller has done the necessary
885 	 * msgport syncing; unnecessary to do it again.
886 	 */
887 	if (!kill_default) {
888 		/*
889 		 * Let ipfw_chk() know the rules are going to
890 		 * be flushed, so it could jump directly to
891 		 * the default rule.
892 		 */
893 		ipfw_flushing = 1;
894 		netmsg_service_sync();
895 	}
896 
897 	/*
898 	 * if ipfw_basic_flush_state_prt
899 	 * flush all states in all CPU
900 	 */
901 	if (ipfw_basic_flush_state_prt != NULL) {
902 		bzero(&dmsg, sizeof(dmsg));
903 		netmsg_init(&dmsg.base, NULL, &curthread->td_msgport,
904 				0, ipfw_delete_state_dispatch);
905 		ifnet_domsg(&dmsg.base.lmsg, 0);
906 	}
907 	/*
908 	 * Press the 'flush' button
909 	 */
910 	bzero(&nmsg, sizeof(nmsg));
911 	netmsg_init(&nmsg, NULL, &curthread->td_msgport,
912 			0, ipfw_flush_rule_dispatch);
913 	lmsg = &nmsg.lmsg;
914 	lmsg->u.ms_result = kill_default;
915 	ifnet_domsg(lmsg, 0);
916 
917 	if (kill_default) {
918 		KASSERT(static_count == 0,
919 				("%u static rules remain", static_count));
920 		KASSERT(static_ioc_len == 0,
921 				("%u bytes of static rules remain", static_ioc_len));
922 	}
923 
924 	/* Flush is done */
925 	ipfw_flushing = 0;
926 }
927 
928 static void
929 ipfw_delete_rule_dispatch(netmsg_t nmsg)
930 {
931 	struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
932 	struct ipfw_context *ctx = ipfw_ctx[mycpuid];
933 	struct ip_fw *rule, *prev = NULL;
934 
935 	rule = ctx->ipfw_rule_chain;
936 	while (rule!=NULL) {
937 		if (rule->rulenum == dmsg->rulenum) {
938 			ipfw_delete_rule(ctx, prev, rule);
939 			break;
940 		}
941 		prev = rule;
942 		rule = rule->next;
943 	}
944 
945 	ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
946 }
947 
948 static int
949 ipfw_alt_delete_rule(uint16_t rulenum)
950 {
951 	struct netmsg_del dmsg;
952 	struct netmsg_base *nmsg;
953 
954 	/*
955 	 * delete the state which stub is the rule
956 	 * which belongs to the CPU and the rulenum
957 	 */
958 	bzero(&dmsg, sizeof(dmsg));
959 	nmsg = &dmsg.base;
960 	netmsg_init(nmsg, NULL, &curthread->td_msgport,
961 			0, ipfw_delete_state_dispatch);
962 	dmsg.rulenum = rulenum;
963 	ifnet_domsg(&nmsg->lmsg, 0);
964 
965 	/*
966 	 * Get rid of the rule duplications on all CPUs
967 	 */
968 	bzero(&dmsg, sizeof(dmsg));
969 	nmsg = &dmsg.base;
970 	netmsg_init(nmsg, NULL, &curthread->td_msgport,
971 			0, ipfw_delete_rule_dispatch);
972 	dmsg.rulenum = rulenum;
973 	ifnet_domsg(&nmsg->lmsg, 0);
974 	return 0;
975 }
976 
977 static void
978 ipfw_alt_delete_ruleset_dispatch(netmsg_t nmsg)
979 {
980 	struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
981 	struct ipfw_context *ctx = ipfw_ctx[mycpuid];
982 	struct ip_fw *prev, *rule;
983 #ifdef INVARIANTS
984 	int del = 0;
985 #endif
986 
987 	prev = NULL;
988 	rule = ctx->ipfw_rule_chain;
989 	while (rule != NULL) {
990 		if (rule->set == dmsg->from_set) {
991 			rule = ipfw_delete_rule(ctx, prev, rule);
992 #ifdef INVARIANTS
993 			del = 1;
994 #endif
995 		} else {
996 			prev = rule;
997 			rule = rule->next;
998 		}
999 	}
1000 	KASSERT(del, ("no match set?!"));
1001 
1002 	ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
1003 }
1004 
1005 static void
1006 ipfw_disable_ruleset_state_dispatch(netmsg_t nmsg)
1007 {
1008 	struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
1009 	struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1010 	struct ip_fw *rule;
1011 #ifdef INVARIANTS
1012 	int cleared = 0;
1013 #endif
1014 
1015 	for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
1016 		if (rule->set == dmsg->from_set) {
1017 #ifdef INVARIANTS
1018 			cleared = 1;
1019 #endif
1020 		}
1021 	}
1022 	KASSERT(cleared, ("no match set?!"));
1023 
1024 	ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
1025 }
1026 
1027 static int
1028 ipfw_alt_delete_ruleset(uint8_t set)
1029 {
1030 	struct netmsg_del dmsg;
1031 	struct netmsg_base *nmsg;
1032 	int state, del;
1033 	struct ip_fw *rule;
1034 	struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1035 
1036 	/*
1037 	 * Check whether the 'set' exists.  If it exists,
1038 	 * then check whether any rules within the set will
1039 	 * try to create states.
1040 	 */
1041 	state = 0;
1042 	del = 0;
1043 	for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
1044 		if (rule->set == set) {
1045 			del = 1;
1046 		}
1047 	}
1048 	if (!del)
1049 		return 0; /* XXX EINVAL? */
1050 
1051 	if (state) {
1052 		/*
1053 		 * Clear the STATE flag, so no more states will be
1054 		 * created based the rules in this set.
1055 		 */
1056 		bzero(&dmsg, sizeof(dmsg));
1057 		nmsg = &dmsg.base;
1058 		netmsg_init(nmsg, NULL, &curthread->td_msgport,
1059 				0, ipfw_disable_ruleset_state_dispatch);
1060 		dmsg.from_set = set;
1061 
1062 		ifnet_domsg(&nmsg->lmsg, 0);
1063 	}
1064 
1065 	/*
1066 	 * Delete this set
1067 	 */
1068 	bzero(&dmsg, sizeof(dmsg));
1069 	nmsg = &dmsg.base;
1070 	netmsg_init(nmsg, NULL, &curthread->td_msgport,
1071 			0, ipfw_alt_delete_ruleset_dispatch);
1072 	dmsg.from_set = set;
1073 
1074 	ifnet_domsg(&nmsg->lmsg, 0);
1075 	return 0;
1076 }
1077 
1078 static void
1079 ipfw_alt_move_rule_dispatch(netmsg_t nmsg)
1080 {
1081 	struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
1082 	struct ip_fw *rule;
1083 
1084 	rule = dmsg->start_rule;
1085 
1086 	/*
1087 	 * Move to the position on the next CPU
1088 	 * before the msg is forwarded.
1089 	 */
1090 
1091 	while (rule && rule->rulenum <= dmsg->rulenum) {
1092 		if (rule->rulenum == dmsg->rulenum)
1093 			rule->set = dmsg->to_set;
1094 		rule = rule->next;
1095 	}
1096 	ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
1097 }
1098 
1099 static int
1100 ipfw_alt_move_rule(uint16_t rulenum, uint8_t set)
1101 {
1102 	struct netmsg_del dmsg;
1103 	struct netmsg_base *nmsg;
1104 	struct ip_fw *rule;
1105 	struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1106 
1107 	/*
1108 	 * Locate first rule to move
1109 	 */
1110 	for (rule = ctx->ipfw_rule_chain;
1111 		rule && rule->rulenum <= rulenum; rule = rule->next) {
1112 		if (rule->rulenum == rulenum && rule->set != set)
1113 			break;
1114 	}
1115 	if (rule == NULL || rule->rulenum > rulenum)
1116 		return 0; /* XXX error? */
1117 
1118 	bzero(&dmsg, sizeof(dmsg));
1119 	nmsg = &dmsg.base;
1120 	netmsg_init(nmsg, NULL, &curthread->td_msgport,
1121 			0, ipfw_alt_move_rule_dispatch);
1122 	dmsg.start_rule = rule;
1123 	dmsg.rulenum = rulenum;
1124 	dmsg.to_set = set;
1125 
1126 	ifnet_domsg(&nmsg->lmsg, 0);
1127 	KKASSERT(dmsg.start_rule == NULL);
1128 	return 0;
1129 }
1130 
1131 static void
1132 ipfw_alt_move_ruleset_dispatch(netmsg_t nmsg)
1133 {
1134 	struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
1135 	struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1136 	struct ip_fw *rule;
1137 
1138 	for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
1139 		if (rule->set == dmsg->from_set)
1140 			rule->set = dmsg->to_set;
1141 	}
1142 	ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
1143 }
1144 
1145 static int
1146 ipfw_alt_move_ruleset(uint8_t from_set, uint8_t to_set)
1147 {
1148 	struct netmsg_del dmsg;
1149 	struct netmsg_base *nmsg;
1150 
1151 	bzero(&dmsg, sizeof(dmsg));
1152 	nmsg = &dmsg.base;
1153 	netmsg_init(nmsg, NULL, &curthread->td_msgport,
1154 			0, ipfw_alt_move_ruleset_dispatch);
1155 	dmsg.from_set = from_set;
1156 	dmsg.to_set = to_set;
1157 
1158 	ifnet_domsg(&nmsg->lmsg, 0);
1159 	return 0;
1160 }
1161 
1162 static void
1163 ipfw_alt_swap_ruleset_dispatch(netmsg_t nmsg)
1164 {
1165 	struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
1166 	struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1167 	struct ip_fw *rule;
1168 
1169 	for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
1170 		if (rule->set == dmsg->from_set)
1171 			rule->set = dmsg->to_set;
1172 		else if (rule->set == dmsg->to_set)
1173 			rule->set = dmsg->from_set;
1174 	}
1175 	ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
1176 }
1177 
1178 static int
1179 ipfw_alt_swap_ruleset(uint8_t set1, uint8_t set2)
1180 {
1181 	struct netmsg_del dmsg;
1182 	struct netmsg_base *nmsg;
1183 
1184 	bzero(&dmsg, sizeof(dmsg));
1185 	nmsg = &dmsg.base;
1186 	netmsg_init(nmsg, NULL, &curthread->td_msgport,
1187 			0, ipfw_alt_swap_ruleset_dispatch);
1188 	dmsg.from_set = set1;
1189 	dmsg.to_set = set2;
1190 
1191 	ifnet_domsg(&nmsg->lmsg, 0);
1192 	return 0;
1193 }
1194 
1195 
1196 static int
1197 ipfw_ctl_alter(uint32_t arg)
1198 {
1199 	uint16_t rulenum;
1200 	uint8_t cmd, new_set;
1201 	int error = 0;
1202 
1203 	rulenum = arg & 0xffff;
1204 	cmd = (arg >> 24) & 0xff;
1205 	new_set = (arg >> 16) & 0xff;
1206 
1207 	if (cmd > 4)
1208 		return EINVAL;
1209 	if (new_set >= IPFW_DEFAULT_SET)
1210 		return EINVAL;
1211 	if (cmd == 0 || cmd == 2) {
1212 		if (rulenum == IPFW_DEFAULT_RULE)
1213 			return EINVAL;
1214 	} else {
1215 		if (rulenum >= IPFW_DEFAULT_SET)
1216 			return EINVAL;
1217 	}
1218 
1219 	switch (cmd) {
1220 	case 0:	/* delete rules with given number */
1221 		error = ipfw_alt_delete_rule(rulenum);
1222 		break;
1223 
1224 	case 1:	/* delete all rules with given set number */
1225 		error = ipfw_alt_delete_ruleset(rulenum);
1226 		break;
1227 
1228 	case 2:	/* move rules with given number to new set */
1229 		error = ipfw_alt_move_rule(rulenum, new_set);
1230 		break;
1231 
1232 	case 3: /* move rules with given set number to new set */
1233 		error = ipfw_alt_move_ruleset(rulenum, new_set);
1234 		break;
1235 
1236 	case 4: /* swap two sets */
1237 		error = ipfw_alt_swap_ruleset(rulenum, new_set);
1238 		break;
1239 	}
1240 	return error;
1241 }
1242 
1243 /*
1244  * Clear counters for a specific rule.
1245  */
1246 static void
1247 clear_counters(struct ip_fw *rule)
1248 {
1249 	rule->bcnt = rule->pcnt = 0;
1250 	rule->timestamp = 0;
1251 }
1252 
1253 static void
1254 ipfw_zero_entry_dispatch(netmsg_t nmsg)
1255 {
1256 	struct netmsg_zent *zmsg = (struct netmsg_zent *)nmsg;
1257 	struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1258 	struct ip_fw *rule;
1259 
1260 	if (zmsg->rulenum == 0) {
1261 		for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
1262 			clear_counters(rule);
1263 		}
1264 	} else {
1265 		for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
1266 			if (rule->rulenum == zmsg->rulenum) {
1267 				clear_counters(rule);
1268 			}
1269 		}
1270 	}
1271 	ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
1272 }
1273 
1274 /**
1275  * Reset some or all counters on firewall rules.
1276  * @arg frwl is null to clear all entries, or contains a specific
1277  * rule number.
1278  * @arg log_only is 1 if we only want to reset logs, zero otherwise.
1279  */
1280 static int
1281 ipfw_ctl_zero_entry(int rulenum, int log_only)
1282 {
1283 	struct netmsg_zent zmsg;
1284 	struct netmsg_base *nmsg;
1285 	const char *msg;
1286 	struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1287 
1288 	bzero(&zmsg, sizeof(zmsg));
1289 	nmsg = &zmsg.base;
1290 	netmsg_init(nmsg, NULL, &curthread->td_msgport,
1291 			0, ipfw_zero_entry_dispatch);
1292 	zmsg.log_only = log_only;
1293 
1294 	if (rulenum == 0) {
1295 		msg = log_only ? "ipfw: All logging counts reset.\n"
1296 				   : "ipfw: Accounting cleared.\n";
1297 	} else {
1298 		struct ip_fw *rule;
1299 
1300 		/*
1301 		 * Locate the first rule with 'rulenum'
1302 		 */
1303 		for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
1304 			if (rule->rulenum == rulenum)
1305 				break;
1306 		}
1307 		if (rule == NULL) /* we did not find any matching rules */
1308 			return (EINVAL);
1309 		zmsg.start_rule = rule;
1310 		zmsg.rulenum = rulenum;
1311 
1312 		msg = log_only ? "ipfw: Entry %d logging count reset.\n"
1313 				   : "ipfw: Entry %d cleared.\n";
1314 	}
1315 	ifnet_domsg(&nmsg->lmsg, 0);
1316 	KKASSERT(zmsg.start_rule == NULL);
1317 
1318 	if (fw_verbose)
1319 		log(LOG_SECURITY | LOG_NOTICE, msg, rulenum);
1320 	return (0);
1321 }
1322 
1323 static int
1324 ipfw_ctl_add_state(struct sockopt *sopt)
1325 {
1326 	struct ipfw_ioc_state *ioc_state;
1327 	ioc_state = sopt->sopt_val;
1328 	if (ipfw_basic_append_state_prt != NULL) {
1329 		struct netmsg_del dmsg;
1330 		bzero(&dmsg, sizeof(dmsg));
1331 		netmsg_init(&dmsg.base, NULL, &curthread->td_msgport,
1332 			0, ipfw_append_state_dispatch);
1333 		(&dmsg)->ioc_state = ioc_state;
1334 		ifnet_domsg(&dmsg.base.lmsg, 0);
1335 	}
1336 	return 0;
1337 }
1338 
1339 static int
1340 ipfw_ctl_delete_state(struct sockopt *sopt)
1341 {
1342 	int rulenum = 0, error;
1343 	if (sopt->sopt_valsize != 0) {
1344 		error = soopt_to_kbuf(sopt, &rulenum, sizeof(int), sizeof(int));
1345 		if (error) {
1346 			return -1;
1347 		}
1348 	}
1349 	struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1350 	struct ip_fw *rule = ctx->ipfw_rule_chain;
1351 
1352 	while (rule!=NULL) {
1353 		if (rule->rulenum == rulenum) {
1354 			break;
1355 		}
1356 		rule = rule->next;
1357 	}
1358 	if (rule == NULL) {
1359 		return -1;
1360 	}
1361 
1362 	struct netmsg_del dmsg;
1363 	struct netmsg_base *nmsg;
1364 	/*
1365 	 * delete the state which stub is the rule
1366 	 * which belongs to the CPU and the rulenum
1367 	 */
1368 	bzero(&dmsg, sizeof(dmsg));
1369 	nmsg = &dmsg.base;
1370 	netmsg_init(nmsg, NULL, &curthread->td_msgport,
1371 			0, ipfw_delete_state_dispatch);
1372 	dmsg.rulenum = rulenum;
1373 	ifnet_domsg(&nmsg->lmsg, 0);
1374 	return 0;
1375 }
1376 
1377 static int
1378 ipfw_ctl_flush_state(struct sockopt *sopt)
1379 {
1380 	struct netmsg_del dmsg;
1381 	struct netmsg_base *nmsg;
1382 	/*
1383 	 * delete the state which stub is the rule
1384 	 * which belongs to the CPU and the rulenum
1385 	 */
1386 	bzero(&dmsg, sizeof(dmsg));
1387 	nmsg = &dmsg.base;
1388 	netmsg_init(nmsg, NULL, &curthread->td_msgport,
1389 			0, ipfw_delete_state_dispatch);
1390 	dmsg.rulenum = 0;
1391 	ifnet_domsg(&nmsg->lmsg, 0);
1392 	return 0;
1393 }
1394 
1395 /*
1396  * Get the ioc_rule from the sopt
1397  * call ipfw_add_rule to add the rule
1398  */
1399 static int
1400 ipfw_ctl_add_rule(struct sockopt *sopt)
1401 {
1402 	struct ipfw_ioc_rule *ioc_rule;
1403 	size_t size;
1404 
1405 	size = sopt->sopt_valsize;
1406 	if (size > (sizeof(uint32_t) * IPFW_RULE_SIZE_MAX) ||
1407 			size < sizeof(*ioc_rule)) {
1408 		return EINVAL;
1409 	}
1410 	if (size != (sizeof(uint32_t) * IPFW_RULE_SIZE_MAX)) {
1411 		sopt->sopt_val = krealloc(sopt->sopt_val, sizeof(uint32_t) *
1412 				IPFW_RULE_SIZE_MAX, M_TEMP, M_WAITOK);
1413 	}
1414 	ioc_rule = sopt->sopt_val;
1415 
1416 	ipfw_add_rule(ioc_rule);
1417 	return 0;
1418 }
1419 
1420 static void *
1421 ipfw_copy_state(struct ip_fw_state *state, struct ipfw_ioc_state *ioc_state, int cpuid)
1422 {
1423 	ioc_state->pcnt = state->pcnt;
1424 	ioc_state->bcnt = state->bcnt;
1425 	ioc_state->lifetime = state->lifetime;
1426 	ioc_state->timestamp = state->timestamp;
1427 	ioc_state->cpuid = cpuid;
1428 	ioc_state->expiry = state->expiry;
1429 	ioc_state->rulenum = state->stub->rulenum;
1430 
1431 	bcopy(&state->flow_id, &ioc_state->flow_id, sizeof(struct ipfw_flow_id));
1432 	return ioc_state + 1;
1433 }
1434 
1435 static void *
1436 ipfw_copy_rule(const struct ip_fw *rule, struct ipfw_ioc_rule *ioc_rule)
1437 {
1438 	const struct ip_fw *sibling;
1439 #ifdef INVARIANTS
1440 	int i;
1441 #endif
1442 
1443 	ioc_rule->act_ofs = rule->act_ofs;
1444 	ioc_rule->cmd_len = rule->cmd_len;
1445 	ioc_rule->rulenum = rule->rulenum;
1446 	ioc_rule->set = rule->set;
1447 
1448 	ioc_rule->set_disable = ipfw_ctx[mycpuid]->ipfw_set_disable;
1449 	ioc_rule->static_count = static_count;
1450 	ioc_rule->static_len = static_ioc_len;
1451 
1452 	ioc_rule->pcnt = 1;
1453 	ioc_rule->bcnt = 0;
1454 	ioc_rule->timestamp = 0;
1455 
1456 #ifdef INVARIANTS
1457 	i = 0;
1458 #endif
1459 	ioc_rule->pcnt = 0;
1460 	ioc_rule->bcnt = 0;
1461 	ioc_rule->timestamp = 0;
1462 	for (sibling = rule; sibling != NULL; sibling = sibling->sibling) {
1463 		ioc_rule->pcnt += sibling->pcnt;
1464 		ioc_rule->bcnt += sibling->bcnt;
1465 		if (sibling->timestamp > ioc_rule->timestamp)
1466 			ioc_rule->timestamp = sibling->timestamp;
1467 #ifdef INVARIANTS
1468 		++i;
1469 #endif
1470 	}
1471 
1472 	KASSERT(i == ncpus, ("static rule is not duplicated on every cpu"));
1473 
1474 	bcopy(rule->cmd, ioc_rule->cmd, ioc_rule->cmd_len * 4 /* XXX */);
1475 
1476 	return ((uint8_t *)ioc_rule + IOC_RULESIZE(ioc_rule));
1477 }
1478 
1479 static int
1480 ipfw_ctl_get_modules(struct sockopt *sopt)
1481 {
1482 	int i;
1483 	struct ipfw_module *mod;
1484 	char module_str[1024];
1485 	memset(module_str,0,1024);
1486 	for (i = 0, mod = ipfw_modules; i < MAX_MODULE; i++, mod++) {
1487 		if (mod->type != 0) {
1488 			if (i > 0)
1489 				strcat(module_str,",");
1490 			strcat(module_str,mod->name);
1491 		}
1492 	}
1493 	bzero(sopt->sopt_val, sopt->sopt_valsize);
1494 	bcopy(module_str, sopt->sopt_val, strlen(module_str));
1495 	sopt->sopt_valsize = strlen(module_str);
1496 	return 0;
1497 }
1498 
1499 /*
1500  * Copy all static rules and states on all CPU
1501  */
1502 static int
1503 ipfw_ctl_get_rules(struct sockopt *sopt)
1504 {
1505 	struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1506 	struct ipfw_state_context *state_ctx;
1507 	struct ip_fw *rule;
1508 	struct ip_fw_state *state;
1509 	void *bp;
1510 	size_t size;
1511 	int i, j, state_count = 0;
1512 
1513 	size = static_ioc_len;
1514 	for (i = 0; i < ncpus; i++) {
1515 		for (j = 0; j < ctx->state_hash_size; j++) {
1516 			state_ctx = &ipfw_ctx[i]->state_ctx[j];
1517 			state_count += state_ctx->count;
1518 		}
1519 	}
1520 	if (state_count > 0) {
1521 		size += state_count * sizeof(struct ipfw_ioc_state);
1522 	}
1523 
1524 	if (sopt->sopt_valsize < size) {
1525 		/* XXX TODO sopt_val is not big enough */
1526 		bzero(sopt->sopt_val, sopt->sopt_valsize);
1527 		return 0;
1528 	}
1529 
1530 	sopt->sopt_valsize = size;
1531 	bp = sopt->sopt_val;
1532 
1533 	for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
1534 		bp = ipfw_copy_rule(rule, bp);
1535 	}
1536 	if (state_count > 0 ) {
1537 		for (i = 0; i < ncpus; i++) {
1538 			for (j = 0; j < ctx->state_hash_size; j++) {
1539 				state_ctx = &ipfw_ctx[i]->state_ctx[j];
1540 				state = state_ctx->state;
1541 				while (state != NULL) {
1542 					bp = ipfw_copy_state(state, bp, i);
1543 					state = state->next;
1544 				}
1545 			}
1546 		}
1547 	}
1548 	return 0;
1549 }
1550 
1551 static void
1552 ipfw_set_disable_dispatch(netmsg_t nmsg)
1553 {
1554 	struct lwkt_msg *lmsg = &nmsg->lmsg;
1555 	struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1556 
1557 	ctx->ipfw_set_disable = lmsg->u.ms_result32;
1558 
1559 	ifnet_forwardmsg(lmsg, mycpuid + 1);
1560 }
1561 
1562 static void
1563 ipfw_ctl_set_disable(uint32_t disable, uint32_t enable)
1564 {
1565 	struct netmsg_base nmsg;
1566 	struct lwkt_msg *lmsg;
1567 	uint32_t set_disable;
1568 
1569 	/* IPFW_DEFAULT_SET is always enabled */
1570 	enable |= (1 << IPFW_DEFAULT_SET);
1571 	set_disable = (ipfw_ctx[mycpuid]->ipfw_set_disable | disable) & ~enable;
1572 
1573 	bzero(&nmsg, sizeof(nmsg));
1574 	netmsg_init(&nmsg, NULL, &curthread->td_msgport,
1575 			0, ipfw_set_disable_dispatch);
1576 	lmsg = &nmsg.lmsg;
1577 	lmsg->u.ms_result32 = set_disable;
1578 
1579 	ifnet_domsg(lmsg, 0);
1580 }
1581 
1582 
1583 /*
1584  * ipfw_ctl_x - extended version of ipfw_ctl
1585  * remove the x_header, and adjust the sopt_name,sopt_val and sopt_valsize.
1586  */
1587 int
1588 ipfw_ctl_x(struct sockopt *sopt)
1589 {
1590 	ip_fw_x_header *x_header;
1591 	x_header = (ip_fw_x_header *)(sopt->sopt_val);
1592 	sopt->sopt_name = x_header->opcode;
1593 	sopt->sopt_valsize -= sizeof(ip_fw_x_header);
1594 	bcopy(++x_header, sopt->sopt_val, sopt->sopt_valsize);
1595 	return ipfw_ctl(sopt);
1596 }
1597 
1598 
1599 /**
1600  * {set|get}sockopt parser.
1601  */
1602 static int
1603 ipfw_ctl(struct sockopt *sopt)
1604 {
1605 	int error, rulenum;
1606 	uint32_t *masks;
1607 	size_t size;
1608 
1609 	error = 0;
1610 	switch (sopt->sopt_name) {
1611 		case IP_FW_X:
1612 			ipfw_ctl_x(sopt);
1613 			break;
1614 		case IP_FW_GET:
1615 			error = ipfw_ctl_get_rules(sopt);
1616 			break;
1617 		case IP_FW_MODULE:
1618 			error = ipfw_ctl_get_modules(sopt);
1619 			break;
1620 
1621 		case IP_FW_FLUSH:
1622 			ipfw_ctl_flush_rule(0);
1623 			break;
1624 
1625 		case IP_FW_ADD:
1626 			error = ipfw_ctl_add_rule(sopt);
1627 			break;
1628 
1629 		case IP_FW_DEL:
1630 			/*
1631 			 * IP_FW_DEL is used for deleting single rules or sets,
1632 			 * and (ab)used to atomically manipulate sets.
1633 			 * Argument size is used to distinguish between the two:
1634 			 *	sizeof(uint32_t)
1635 			 *	delete single rule or set of rules,
1636 			 *	or reassign rules (or sets) to a different set.
1637 			 *	2 * sizeof(uint32_t)
1638 			 *	atomic disable/enable sets.
1639 			 *	first uint32_t contains sets to be disabled,
1640 			 *	second uint32_t contains sets to be enabled.
1641 			 */
1642 			masks = sopt->sopt_val;
1643 			size = sopt->sopt_valsize;
1644 			if (size == sizeof(*masks)) {
1645 				/*
1646 				 * Delete or reassign static rule
1647 				 */
1648 				error = ipfw_ctl_alter(masks[0]);
1649 			} else if (size == (2 * sizeof(*masks))) {
1650 				/*
1651 				 * Set enable/disable
1652 				 */
1653 				ipfw_ctl_set_disable(masks[0], masks[1]);
1654 			} else {
1655 				error = EINVAL;
1656 			}
1657 			break;
1658 		case IP_FW_ZERO:
1659 		case IP_FW_RESETLOG: /* argument is an int, the rule number */
1660 			rulenum = 0;
1661 			if (sopt->sopt_valsize != 0) {
1662 				error = soopt_to_kbuf(sopt, &rulenum,
1663 						sizeof(int), sizeof(int));
1664 				if (error) {
1665 					break;
1666 				}
1667 			}
1668 			error = ipfw_ctl_zero_entry(rulenum,
1669 					sopt->sopt_name == IP_FW_RESETLOG);
1670 			break;
1671 		case IP_FW_NAT_CFG:
1672 			error = ipfw_nat_cfg_ptr(sopt);
1673 			break;
1674 		case IP_FW_NAT_DEL:
1675 			error = ipfw_nat_del_ptr(sopt);
1676 			break;
1677 		case IP_FW_NAT_FLUSH:
1678 			error = ipfw_nat_flush_ptr(sopt);
1679 			break;
1680 		case IP_FW_NAT_GET:
1681 			error = ipfw_nat_get_cfg_ptr(sopt);
1682 			break;
1683 		case IP_FW_NAT_LOG:
1684 			error = ipfw_nat_get_log_ptr(sopt);
1685 			break;
1686 		case IP_DUMMYNET_GET:
1687 		case IP_DUMMYNET_CONFIGURE:
1688 		case IP_DUMMYNET_DEL:
1689 		case IP_DUMMYNET_FLUSH:
1690 			error = ip_dn_sockopt(sopt);
1691 			break;
1692 		case IP_FW_STATE_ADD:
1693 			error = ipfw_ctl_add_state(sopt);
1694 			break;
1695 		case IP_FW_STATE_DEL:
1696 			error = ipfw_ctl_delete_state(sopt);
1697 			break;
1698 		case IP_FW_STATE_FLUSH:
1699 			error = ipfw_ctl_flush_state(sopt);
1700 			break;
1701 		default:
1702 			kprintf("ipfw_ctl invalid option %d\n",
1703 				sopt->sopt_name);
1704 			error = EINVAL;
1705 	}
1706 	return error;
1707 }
1708 
1709 static int
1710 ipfw_check_in(void *arg, struct mbuf **m0, struct ifnet *ifp, int dir)
1711 {
1712 	struct ip_fw_args args;
1713 	struct mbuf *m = *m0;
1714 	struct m_tag *mtag;
1715 	int tee = 0, error = 0, ret;
1716 	// again:
1717 	if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED) {
1718 		/* Extract info from dummynet tag */
1719 		mtag = m_tag_find(m, PACKET_TAG_DUMMYNET, NULL);
1720 		KKASSERT(mtag != NULL);
1721 		args.rule = ((struct dn_pkt *)m_tag_data(mtag))->dn_priv;
1722 		KKASSERT(args.rule != NULL);
1723 
1724 		m_tag_delete(m, mtag);
1725 		m->m_pkthdr.fw_flags &= ~DUMMYNET_MBUF_TAGGED;
1726 	} else {
1727 		args.rule = NULL;
1728 	}
1729 
1730 	args.eh = NULL;
1731 	args.oif = NULL;
1732 	args.m = m;
1733 	ret = ipfw_chk(&args);
1734 	m = args.m;
1735 
1736 	if (m == NULL) {
1737 		error = EACCES;
1738 		goto back;
1739 	}
1740 	switch (ret) {
1741 		case IP_FW_PASS:
1742 			break;
1743 
1744 		case IP_FW_DENY:
1745 			m_freem(m);
1746 			m = NULL;
1747 			error = EACCES;
1748 			break;
1749 
1750 		case IP_FW_DUMMYNET:
1751 			/* Send packet to the appropriate pipe */
1752 			ipfw_dummynet_io(m, args.cookie, DN_TO_IP_IN, &args);
1753 			break;
1754 
1755 		case IP_FW_TEE:
1756 			tee = 1;
1757 			/* FALL THROUGH */
1758 
1759 		case IP_FW_DIVERT:
1760 			/*
1761 			 * Must clear bridge tag when changing
1762 			 */
1763 			m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED;
1764 			if (ip_divert_p != NULL) {
1765 				m = ip_divert_p(m, tee, 1);
1766 			} else {
1767 				m_freem(m);
1768 				m = NULL;
1769 				/* not sure this is the right error msg */
1770 				error = EACCES;
1771 			}
1772 			break;
1773 
1774 		case IP_FW_NAT:
1775 			break;
1776 		case IP_FW_ROUTE:
1777 			break;
1778 		default:
1779 			panic("unknown ipfw return value: %d", ret);
1780 	}
1781 back:
1782 	*m0 = m;
1783 	return error;
1784 }
1785 
1786 static int
1787 ipfw_check_out(void *arg, struct mbuf **m0, struct ifnet *ifp, int dir)
1788 {
1789 	struct ip_fw_args args;
1790 	struct mbuf *m = *m0;
1791 	struct m_tag *mtag;
1792 	int tee = 0, error = 0, ret;
1793 	// again:
1794 	if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED) {
1795 		/* Extract info from dummynet tag */
1796 		mtag = m_tag_find(m, PACKET_TAG_DUMMYNET, NULL);
1797 		KKASSERT(mtag != NULL);
1798 		args.rule = ((struct dn_pkt *)m_tag_data(mtag))->dn_priv;
1799 		KKASSERT(args.rule != NULL);
1800 
1801 		m_tag_delete(m, mtag);
1802 		m->m_pkthdr.fw_flags &= ~DUMMYNET_MBUF_TAGGED;
1803 	} else {
1804 		args.rule = NULL;
1805 	}
1806 
1807 	args.eh = NULL;
1808 	args.m = m;
1809 	args.oif = ifp;
1810 	ret = ipfw_chk(&args);
1811 	m = args.m;
1812 
1813 	if (m == NULL) {
1814 		error = EACCES;
1815 		goto back;
1816 	}
1817 
1818 	switch (ret) {
1819 		case IP_FW_PASS:
1820 			break;
1821 
1822 		case IP_FW_DENY:
1823 			m_freem(m);
1824 			m = NULL;
1825 			error = EACCES;
1826 			break;
1827 
1828 		case IP_FW_DUMMYNET:
1829 			ipfw_dummynet_io(m, args.cookie, DN_TO_IP_OUT, &args);
1830 			break;
1831 
1832 		case IP_FW_TEE:
1833 			tee = 1;
1834 			/* FALL THROUGH */
1835 
1836 		case IP_FW_DIVERT:
1837 			if (ip_divert_p != NULL) {
1838 				m = ip_divert_p(m, tee, 0);
1839 			} else {
1840 				m_freem(m);
1841 				m = NULL;
1842 				/* not sure this is the right error msg */
1843 				error = EACCES;
1844 			}
1845 			break;
1846 
1847 		case IP_FW_NAT:
1848 			break;
1849 		case IP_FW_ROUTE:
1850 			break;
1851 		default:
1852 			panic("unknown ipfw return value: %d", ret);
1853 	}
1854 back:
1855 	*m0 = m;
1856 	return error;
1857 }
1858 
1859 static void
1860 ipfw_hook(void)
1861 {
1862 	struct pfil_head *pfh;
1863 	IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
1864 
1865 	pfh = pfil_head_get(PFIL_TYPE_AF, AF_INET);
1866 	if (pfh == NULL)
1867 		return;
1868 
1869 	pfil_add_hook(ipfw_check_in, NULL, PFIL_IN | PFIL_MPSAFE, pfh);
1870 	pfil_add_hook(ipfw_check_out, NULL, PFIL_OUT | PFIL_MPSAFE, pfh);
1871 }
1872 
1873 static void
1874 ipfw_dehook(void)
1875 {
1876 	struct pfil_head *pfh;
1877 
1878 	IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
1879 
1880 	pfh = pfil_head_get(PFIL_TYPE_AF, AF_INET);
1881 	if (pfh == NULL)
1882 		return;
1883 
1884 	pfil_remove_hook(ipfw_check_in, NULL, PFIL_IN, pfh);
1885 	pfil_remove_hook(ipfw_check_out, NULL, PFIL_OUT, pfh);
1886 }
1887 
1888 static void
1889 ipfw_sysctl_enable_dispatch(netmsg_t nmsg)
1890 {
1891 	struct lwkt_msg *lmsg = &nmsg->lmsg;
1892 	int enable = lmsg->u.ms_result;
1893 
1894 	if (fw3_enable == enable)
1895 		goto reply;
1896 
1897 	fw3_enable = enable;
1898 	if (fw3_enable)
1899 		ipfw_hook();
1900 	else
1901 		ipfw_dehook();
1902 
1903 reply:
1904 	lwkt_replymsg(lmsg, 0);
1905 }
1906 
1907 static int
1908 ipfw_sysctl_enable(SYSCTL_HANDLER_ARGS)
1909 {
1910 	struct netmsg_base nmsg;
1911 	struct lwkt_msg *lmsg;
1912 	int enable, error;
1913 
1914 	enable = fw3_enable;
1915 	error = sysctl_handle_int(oidp, &enable, 0, req);
1916 	if (error || req->newptr == NULL)
1917 		return error;
1918 
1919 	netmsg_init(&nmsg, NULL, &curthread->td_msgport,
1920 			0, ipfw_sysctl_enable_dispatch);
1921 	lmsg = &nmsg.lmsg;
1922 	lmsg->u.ms_result = enable;
1923 
1924 	return lwkt_domsg(IPFW_CFGPORT, lmsg, 0);
1925 }
1926 
1927 static int
1928 ipfw_sysctl_autoinc_step(SYSCTL_HANDLER_ARGS)
1929 {
1930 	return sysctl_int_range(oidp, arg1, arg2, req,
1931 			IPFW_AUTOINC_STEP_MIN, IPFW_AUTOINC_STEP_MAX);
1932 }
1933 
1934 
1935 static void
1936 ipfw_ctx_init_dispatch(netmsg_t nmsg)
1937 {
1938 	struct netmsg_ipfw *fwmsg = (struct netmsg_ipfw *)nmsg;
1939 	struct ipfw_context *ctx;
1940 	struct ip_fw *def_rule;
1941 
1942 	if (mycpuid == 0 ) {
1943 		ipfw_nat_ctx = kmalloc(sizeof(struct ipfw_nat_context),
1944 				M_IPFW3, M_WAITOK | M_ZERO);
1945 	}
1946 
1947 	ctx = kmalloc(sizeof(struct ipfw_context), M_IPFW3, M_WAITOK | M_ZERO);
1948 	ipfw_ctx[mycpuid] = ctx;
1949 
1950 	def_rule = kmalloc(sizeof(struct ip_fw), M_IPFW3, M_WAITOK | M_ZERO);
1951 	def_rule->act_ofs = 0;
1952 	def_rule->rulenum = IPFW_DEFAULT_RULE;
1953 	def_rule->cmd_len = 2;
1954 	def_rule->set = IPFW_DEFAULT_SET;
1955 
1956 	def_rule->cmd[0].len = LEN_OF_IPFWINSN;
1957 	def_rule->cmd[0].module = MODULE_BASIC_ID;
1958 #ifdef IPFIREWALL_DEFAULT_TO_ACCEPT
1959 	def_rule->cmd[0].opcode = O_BASIC_ACCEPT;
1960 #else
1961 	if (filters_default_to_accept)
1962 		def_rule->cmd[0].opcode = O_BASIC_ACCEPT;
1963 	else
1964 		def_rule->cmd[0].opcode = O_BASIC_DENY;
1965 #endif
1966 
1967 	/* Install the default rule */
1968 	ctx->ipfw_default_rule = def_rule;
1969 	ctx->ipfw_rule_chain = def_rule;
1970 
1971 	/*
1972 	 * if sibiling in last CPU is exists,
1973 	 * then it's sibling should be current rule
1974 	 */
1975 	if (fwmsg->sibling != NULL) {
1976 		fwmsg->sibling->sibling = def_rule;
1977 	}
1978 	/* prepare for next CPU */
1979 	fwmsg->sibling = def_rule;
1980 
1981 	/* Statistics only need to be updated once */
1982 	if (mycpuid == 0)
1983 		ipfw_inc_static_count(def_rule);
1984 
1985 	ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
1986 }
1987 
1988 static void
1989 ipfw_init_dispatch(netmsg_t nmsg)
1990 {
1991 	struct netmsg_ipfw fwmsg;
1992 	int error = 0;
1993 	if (IPFW3_LOADED) {
1994 		kprintf("IP firewall already loaded\n");
1995 		error = EEXIST;
1996 		goto reply;
1997 	}
1998 
1999 	bzero(&fwmsg, sizeof(fwmsg));
2000 	netmsg_init(&fwmsg.base, NULL, &curthread->td_msgport,
2001 			0, ipfw_ctx_init_dispatch);
2002 	ifnet_domsg(&fwmsg.base.lmsg, 0);
2003 
2004 	ip_fw_chk_ptr = ipfw_chk;
2005 	ip_fw_ctl_x_ptr = ipfw_ctl_x;
2006 	ip_fw_dn_io_ptr = ipfw_dummynet_io;
2007 
2008 	kprintf("ipfw3 initialized, default to %s, logging ",
2009 		(int)(ipfw_ctx[mycpuid]->ipfw_default_rule->cmd[0].opcode) ==
2010 		O_BASIC_ACCEPT ? "accept" : "deny");
2011 
2012 #ifdef IPFIREWALL_VERBOSE
2013 	fw_verbose = 1;
2014 #endif
2015 #ifdef IPFIREWALL_VERBOSE_LIMIT
2016 	verbose_limit = IPFIREWALL_VERBOSE_LIMIT;
2017 #endif
2018 	if (fw_verbose == 0) {
2019 		kprintf("disabled ");
2020 	} else if (verbose_limit == 0) {
2021 		kprintf("unlimited ");
2022 	} else {
2023 		kprintf("limited to %d packets/entry by default ",
2024 				verbose_limit);
2025 	}
2026 	kprintf("\n");
2027 	ip_fw3_loaded = 1;
2028 	if (fw3_enable)
2029 		ipfw_hook();
2030 reply:
2031 	lwkt_replymsg(&nmsg->lmsg, error);
2032 }
2033 
2034 static int
2035 ipfw3_init(void)
2036 {
2037 	struct netmsg_base smsg;
2038 	init_module();
2039 	netmsg_init(&smsg, NULL, &curthread->td_msgport,
2040 			0, ipfw_init_dispatch);
2041 	return lwkt_domsg(IPFW_CFGPORT, &smsg.lmsg, 0);
2042 }
2043 
2044 #ifdef KLD_MODULE
2045 
2046 static void
2047 ipfw_fini_dispatch(netmsg_t nmsg)
2048 {
2049 	int error = 0, cpu;
2050 
2051 	ip_fw3_loaded = 0;
2052 
2053 	ipfw_dehook();
2054 	netmsg_service_sync();
2055 	ip_fw_chk_ptr = NULL;
2056 	ip_fw_ctl_x_ptr = NULL;
2057 	ip_fw_dn_io_ptr = NULL;
2058 	ipfw_ctl_flush_rule(1 /* kill default rule */);
2059 	/* Free pre-cpu context */
2060 	for (cpu = 0; cpu < ncpus; ++cpu) {
2061 		if (ipfw_ctx[cpu] != NULL) {
2062 			kfree(ipfw_ctx[cpu], M_IPFW3);
2063 			ipfw_ctx[cpu] = NULL;
2064 		}
2065 	}
2066 	kfree(ipfw_nat_ctx,M_IPFW3);
2067 	ipfw_nat_ctx = NULL;
2068 	kprintf("IP firewall unloaded\n");
2069 
2070 	lwkt_replymsg(&nmsg->lmsg, error);
2071 }
2072 
2073 static int
2074 ipfw3_fini(void)
2075 {
2076 	struct netmsg_base smsg;
2077 	netmsg_init(&smsg, NULL, &curthread->td_msgport,
2078 			0, ipfw_fini_dispatch);
2079 	return lwkt_domsg(IPFW_CFGPORT, &smsg.lmsg, 0);
2080 }
2081 
2082 #endif	/* KLD_MODULE */
2083 
2084 static int
2085 ipfw3_modevent(module_t mod, int type, void *unused)
2086 {
2087 	int err = 0;
2088 
2089 	switch (type) {
2090 		case MOD_LOAD:
2091 			err = ipfw3_init();
2092 			break;
2093 
2094 		case MOD_UNLOAD:
2095 
2096 #ifndef KLD_MODULE
2097 			kprintf("ipfw statically compiled, cannot unload\n");
2098 			err = EBUSY;
2099 #else
2100 			err = ipfw3_fini();
2101 #endif
2102 			break;
2103 		default:
2104 			break;
2105 	}
2106 	return err;
2107 }
2108 
2109 static moduledata_t ipfw3mod = {
2110 	"ipfw3",
2111 	ipfw3_modevent,
2112 	0
2113 };
2114 DECLARE_MODULE(ipfw3, ipfw3mod, SI_SUB_PROTO_END, SI_ORDER_ANY);
2115 MODULE_VERSION(ipfw3, 1);
2116