1 /* 2 * Copyright (c) 2002 Luigi Rizzo, Universita` di Pisa 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: src/sys/netinet/ip_fw2.c,v 1.6.2.12 2003/04/08 10:42:32 maxim Exp $ 26 * $DragonFly: src/sys/net/ipfw/ip_fw2.c,v 1.100 2008/11/22 11:03:35 sephe Exp $ 27 */ 28 29 /* 30 * Implement IP packet firewall (new version) 31 */ 32 33 #include "opt_ipfw.h" 34 #include "opt_inet.h" 35 #ifndef INET 36 #error IPFIREWALL requires INET. 37 #endif /* INET */ 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/malloc.h> 42 #include <sys/mbuf.h> 43 #include <sys/kernel.h> 44 #include <sys/proc.h> 45 #include <sys/socket.h> 46 #include <sys/socketvar.h> 47 #include <sys/sysctl.h> 48 #include <sys/syslog.h> 49 #include <sys/ucred.h> 50 #include <sys/in_cksum.h> 51 #include <sys/lock.h> 52 53 #include <net/if.h> 54 #include <net/route.h> 55 #include <net/pfil.h> 56 #include <net/dummynet/ip_dummynet.h> 57 58 #include <sys/thread2.h> 59 #include <sys/mplock2.h> 60 #include <net/netmsg2.h> 61 62 #include <netinet/in.h> 63 #include <netinet/in_systm.h> 64 #include <netinet/in_var.h> 65 #include <netinet/in_pcb.h> 66 #include <netinet/ip.h> 67 #include <netinet/ip_var.h> 68 #include <netinet/ip_icmp.h> 69 #include <netinet/tcp.h> 70 #include <netinet/tcp_timer.h> 71 #include <netinet/tcp_var.h> 72 #include <netinet/tcpip.h> 73 #include <netinet/udp.h> 74 #include <netinet/udp_var.h> 75 #include <netinet/ip_divert.h> 76 #include <netinet/if_ether.h> /* XXX for ETHERTYPE_IP */ 77 78 #include <net/ipfw/ip_fw2.h> 79 80 #ifdef IPFIREWALL_DEBUG 81 #define DPRINTF(fmt, ...) \ 82 do { \ 83 if (fw_debug > 0) \ 84 kprintf(fmt, __VA_ARGS__); \ 85 } while (0) 86 #else 87 #define DPRINTF(fmt, ...) ((void)0) 88 #endif 89 90 /* 91 * Description about per-CPU rule duplication: 92 * 93 * Module loading/unloading and all ioctl operations are serialized 94 * by netisr0, so we don't have any ordering or locking problems. 95 * 96 * Following graph shows how operation on per-CPU rule list is 97 * performed [2 CPU case]: 98 * 99 * CPU0 CPU1 100 * 101 * netisr0 <------------------------------------+ 102 * domsg | 103 * | | 104 * | netmsg | 105 * | | 106 * V | 107 * ifnet0 | 108 * : | netmsg 109 * :(delete/add...) | 110 * : | 111 * : netmsg | 112 * forwardmsg---------->ifnet1 | 113 * : | 114 * :(delete/add...) | 115 * : | 116 * : | 117 * replymsg--------------+ 118 * 119 * 120 * 121 * 122 * Rules which will not create states (dyn rules) [2 CPU case] 123 * 124 * CPU0 CPU1 125 * layer3_chain layer3_chain 126 * | | 127 * V V 128 * +-------+ sibling +-------+ sibling 129 * | rule1 |--------->| rule1 |--------->NULL 130 * +-------+ +-------+ 131 * | | 132 * |next |next 133 * V V 134 * +-------+ sibling +-------+ sibling 135 * | rule2 |--------->| rule2 |--------->NULL 136 * +-------+ +-------+ 137 * 138 * ip_fw.sibling: 139 * 1) Ease statistics calculation during IP_FW_GET. We only need to 140 * iterate layer3_chain on CPU0; the current rule's duplication on 141 * the other CPUs could safely be read-only accessed by using 142 * ip_fw.sibling 143 * 2) Accelerate rule insertion and deletion, e.g. rule insertion: 144 * a) In netisr0 (on CPU0) rule3 is determined to be inserted between 145 * rule1 and rule2. To make this decision we need to iterate the 146 * layer3_chain on CPU0. The netmsg, which is used to insert the 147 * rule, will contain rule1 on CPU0 as prev_rule and rule2 on CPU0 148 * as next_rule 149 * b) After the insertion on CPU0 is done, we will move on to CPU1. 150 * But instead of relocating the rule3's position on CPU1 by 151 * iterating the layer3_chain on CPU1, we set the netmsg's prev_rule 152 * to rule1->sibling and next_rule to rule2->sibling before the 153 * netmsg is forwarded to CPU1 from CPU0 154 * 155 * 156 * 157 * Rules which will create states (dyn rules) [2 CPU case] 158 * (unnecessary parts are omitted; they are same as in the previous figure) 159 * 160 * CPU0 CPU1 161 * 162 * +-------+ +-------+ 163 * | rule1 | | rule1 | 164 * +-------+ +-------+ 165 * ^ | | ^ 166 * | |stub stub| | 167 * | | | | 168 * | +----+ +----+ | 169 * | | | | 170 * | V V | 171 * | +--------------------+ | 172 * | | rule_stub | | 173 * | | (read-only shared) | | 174 * | | | | 175 * | | back pointer array | | 176 * | | (indexed by cpuid) | | 177 * | | | | 178 * +----|---------[0] | | 179 * | [1]--------|----+ 180 * | | 181 * +--------------------+ 182 * ^ ^ 183 * | | 184 * ........|............|............ 185 * : | | : 186 * : |stub |stub : 187 * : | | : 188 * : +---------+ +---------+ : 189 * : | state1a | | state1b | .... : 190 * : +---------+ +---------+ : 191 * : : 192 * : states table : 193 * : (shared) : 194 * : (protected by dyn_lock) : 195 * .................................. 196 * 197 * [state1a and state1b are states created by rule1] 198 * 199 * ip_fw_stub: 200 * This structure is introduced so that shared (locked) state table could 201 * work with per-CPU (duplicated) static rules. It mainly bridges states 202 * and static rules and serves as static rule's place holder (a read-only 203 * shared part of duplicated rules) from states point of view. 204 * 205 * IPFW_RULE_F_STATE (only for rules which create states): 206 * o During rule installation, this flag is turned on after rule's 207 * duplications reach all CPUs, to avoid at least following race: 208 * 1) rule1 is duplicated on CPU0 and is not duplicated on CPU1 yet 209 * 2) rule1 creates state1 210 * 3) state1 is located on CPU1 by check-state 211 * But rule1 is not duplicated on CPU1 yet 212 * o During rule deletion, this flag is turned off before deleting states 213 * created by the rule and before deleting the rule itself, so no 214 * more states will be created by the to-be-deleted rule even when its 215 * duplication on certain CPUs are not eliminated yet. 216 */ 217 218 #define IPFW_AUTOINC_STEP_MIN 1 219 #define IPFW_AUTOINC_STEP_MAX 1000 220 #define IPFW_AUTOINC_STEP_DEF 100 221 222 #define IPFW_DEFAULT_RULE 65535 /* rulenum for the default rule */ 223 #define IPFW_DEFAULT_SET 31 /* set number for the default rule */ 224 225 struct netmsg_ipfw { 226 struct netmsg_base base; 227 const struct ipfw_ioc_rule *ioc_rule; 228 struct ip_fw *next_rule; 229 struct ip_fw *prev_rule; 230 struct ip_fw *sibling; 231 struct ip_fw_stub *stub; 232 }; 233 234 struct netmsg_del { 235 struct netmsg_base base; 236 struct ip_fw *start_rule; 237 struct ip_fw *prev_rule; 238 uint16_t rulenum; 239 uint8_t from_set; 240 uint8_t to_set; 241 }; 242 243 struct netmsg_zent { 244 struct netmsg_base base; 245 struct ip_fw *start_rule; 246 uint16_t rulenum; 247 uint16_t log_only; 248 }; 249 250 struct ipfw_context { 251 struct ip_fw *ipfw_layer3_chain; /* list of rules for layer3 */ 252 struct ip_fw *ipfw_default_rule; /* default rule */ 253 uint64_t ipfw_norule_counter; /* counter for ipfw_log(NULL) */ 254 255 /* 256 * ipfw_set_disable contains one bit per set value (0..31). 257 * If the bit is set, all rules with the corresponding set 258 * are disabled. Set IPDW_DEFAULT_SET is reserved for the 259 * default rule and CANNOT be disabled. 260 */ 261 uint32_t ipfw_set_disable; 262 uint32_t ipfw_gen; /* generation of rule list */ 263 }; 264 265 static struct ipfw_context *ipfw_ctx[MAXCPU]; 266 267 #ifdef KLD_MODULE 268 /* 269 * Module can not be unloaded, if there are references to 270 * certains rules of ipfw(4), e.g. dummynet(4) 271 */ 272 static int ipfw_refcnt; 273 #endif 274 275 MALLOC_DEFINE(M_IPFW, "IpFw/IpAcct", "IpFw/IpAcct chain's"); 276 277 /* 278 * Following two global variables are accessed and 279 * updated only on CPU0 280 */ 281 static uint32_t static_count; /* # of static rules */ 282 static uint32_t static_ioc_len; /* bytes of static rules */ 283 284 /* 285 * If 1, then ipfw static rules are being flushed, 286 * ipfw_chk() will skip to the default rule. 287 */ 288 static int ipfw_flushing; 289 290 static int fw_verbose; 291 static int verbose_limit; 292 293 static int fw_debug; 294 static int autoinc_step = IPFW_AUTOINC_STEP_DEF; 295 296 static int ipfw_sysctl_enable(SYSCTL_HANDLER_ARGS); 297 static int ipfw_sysctl_autoinc_step(SYSCTL_HANDLER_ARGS); 298 static int ipfw_sysctl_dyn_buckets(SYSCTL_HANDLER_ARGS); 299 static int ipfw_sysctl_dyn_fin(SYSCTL_HANDLER_ARGS); 300 static int ipfw_sysctl_dyn_rst(SYSCTL_HANDLER_ARGS); 301 302 SYSCTL_NODE(_net_inet_ip, OID_AUTO, fw, CTLFLAG_RW, 0, "Firewall"); 303 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW, 304 &fw_enable, 0, ipfw_sysctl_enable, "I", "Enable ipfw"); 305 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, autoinc_step, CTLTYPE_INT | CTLFLAG_RW, 306 &autoinc_step, 0, ipfw_sysctl_autoinc_step, "I", 307 "Rule number autincrement step"); 308 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO,one_pass,CTLFLAG_RW, 309 &fw_one_pass, 0, 310 "Only do a single pass through ipfw when using dummynet(4)"); 311 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, debug, CTLFLAG_RW, 312 &fw_debug, 0, "Enable printing of debug ip_fw statements"); 313 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, verbose, CTLFLAG_RW, 314 &fw_verbose, 0, "Log matches to ipfw rules"); 315 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, verbose_limit, CTLFLAG_RW, 316 &verbose_limit, 0, "Set upper limit of matches of ipfw rules logged"); 317 318 /* 319 * Description of dynamic rules. 320 * 321 * Dynamic rules are stored in lists accessed through a hash table 322 * (ipfw_dyn_v) whose size is curr_dyn_buckets. This value can 323 * be modified through the sysctl variable dyn_buckets which is 324 * updated when the table becomes empty. 325 * 326 * XXX currently there is only one list, ipfw_dyn. 327 * 328 * When a packet is received, its address fields are first masked 329 * with the mask defined for the rule, then hashed, then matched 330 * against the entries in the corresponding list. 331 * Dynamic rules can be used for different purposes: 332 * + stateful rules; 333 * + enforcing limits on the number of sessions; 334 * + in-kernel NAT (not implemented yet) 335 * 336 * The lifetime of dynamic rules is regulated by dyn_*_lifetime, 337 * measured in seconds and depending on the flags. 338 * 339 * The total number of dynamic rules is stored in dyn_count. 340 * The max number of dynamic rules is dyn_max. When we reach 341 * the maximum number of rules we do not create anymore. This is 342 * done to avoid consuming too much memory, but also too much 343 * time when searching on each packet (ideally, we should try instead 344 * to put a limit on the length of the list on each bucket...). 345 * 346 * Each dynamic rule holds a pointer to the parent ipfw rule so 347 * we know what action to perform. Dynamic rules are removed when 348 * the parent rule is deleted. XXX we should make them survive. 349 * 350 * There are some limitations with dynamic rules -- we do not 351 * obey the 'randomized match', and we do not do multiple 352 * passes through the firewall. XXX check the latter!!! 353 * 354 * NOTE about the SHARED LOCKMGR LOCK during dynamic rule looking up: 355 * Only TCP state transition will change dynamic rule's state and ack 356 * sequences, while all packets of one TCP connection only goes through 357 * one TCP thread, so it is safe to use shared lockmgr lock during dynamic 358 * rule looking up. The keep alive callout uses exclusive lockmgr lock 359 * when it tries to find suitable dynamic rules to send keep alive, so 360 * it will not see half updated state and ack sequences. Though the expire 361 * field updating looks racy for other protocols, the resolution (second) 362 * of expire field makes this kind of race harmless. 363 * XXX statistics' updating is _not_ MPsafe!!! 364 * XXX once UDP output path is fixed, we could use lockless dynamic rule 365 * hash table 366 */ 367 static ipfw_dyn_rule **ipfw_dyn_v = NULL; 368 static uint32_t dyn_buckets = 256; /* must be power of 2 */ 369 static uint32_t curr_dyn_buckets = 256; /* must be power of 2 */ 370 static uint32_t dyn_buckets_gen; /* generation of dyn buckets array */ 371 static struct lock dyn_lock; /* dynamic rules' hash table lock */ 372 373 static struct netmsg_base ipfw_timeout_netmsg; /* schedule ipfw timeout */ 374 static struct callout ipfw_timeout_h; 375 376 /* 377 * Timeouts for various events in handing dynamic rules. 378 */ 379 static uint32_t dyn_ack_lifetime = 300; 380 static uint32_t dyn_syn_lifetime = 20; 381 static uint32_t dyn_fin_lifetime = 1; 382 static uint32_t dyn_rst_lifetime = 1; 383 static uint32_t dyn_udp_lifetime = 10; 384 static uint32_t dyn_short_lifetime = 5; 385 386 /* 387 * Keepalives are sent if dyn_keepalive is set. They are sent every 388 * dyn_keepalive_period seconds, in the last dyn_keepalive_interval 389 * seconds of lifetime of a rule. 390 * dyn_rst_lifetime and dyn_fin_lifetime should be strictly lower 391 * than dyn_keepalive_period. 392 */ 393 394 static uint32_t dyn_keepalive_interval = 20; 395 static uint32_t dyn_keepalive_period = 5; 396 static uint32_t dyn_keepalive = 1; /* do send keepalives */ 397 398 static uint32_t dyn_count; /* # of dynamic rules */ 399 static uint32_t dyn_max = 4096; /* max # of dynamic rules */ 400 401 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_buckets, CTLTYPE_INT | CTLFLAG_RW, 402 &dyn_buckets, 0, ipfw_sysctl_dyn_buckets, "I", "Number of dyn. buckets"); 403 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, curr_dyn_buckets, CTLFLAG_RD, 404 &curr_dyn_buckets, 0, "Current Number of dyn. buckets"); 405 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_count, CTLFLAG_RD, 406 &dyn_count, 0, "Number of dyn. rules"); 407 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_max, CTLFLAG_RW, 408 &dyn_max, 0, "Max number of dyn. rules"); 409 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, static_count, CTLFLAG_RD, 410 &static_count, 0, "Number of static rules"); 411 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_ack_lifetime, CTLFLAG_RW, 412 &dyn_ack_lifetime, 0, "Lifetime of dyn. rules for acks"); 413 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_syn_lifetime, CTLFLAG_RW, 414 &dyn_syn_lifetime, 0, "Lifetime of dyn. rules for syn"); 415 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_fin_lifetime, 416 CTLTYPE_INT | CTLFLAG_RW, &dyn_fin_lifetime, 0, ipfw_sysctl_dyn_fin, "I", 417 "Lifetime of dyn. rules for fin"); 418 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_rst_lifetime, 419 CTLTYPE_INT | CTLFLAG_RW, &dyn_rst_lifetime, 0, ipfw_sysctl_dyn_rst, "I", 420 "Lifetime of dyn. rules for rst"); 421 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_udp_lifetime, CTLFLAG_RW, 422 &dyn_udp_lifetime, 0, "Lifetime of dyn. rules for UDP"); 423 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_short_lifetime, CTLFLAG_RW, 424 &dyn_short_lifetime, 0, "Lifetime of dyn. rules for other situations"); 425 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_keepalive, CTLFLAG_RW, 426 &dyn_keepalive, 0, "Enable keepalives for dyn. rules"); 427 428 static ip_fw_chk_t ipfw_chk; 429 static void ipfw_tick(void *); 430 431 static __inline int 432 ipfw_free_rule(struct ip_fw *rule) 433 { 434 KASSERT(rule->cpuid == mycpuid, ("rule freed on cpu%d\n", mycpuid)); 435 KASSERT(rule->refcnt > 0, ("invalid refcnt %u\n", rule->refcnt)); 436 rule->refcnt--; 437 if (rule->refcnt == 0) { 438 kfree(rule, M_IPFW); 439 return 1; 440 } 441 return 0; 442 } 443 444 static void 445 ipfw_unref_rule(void *priv) 446 { 447 ipfw_free_rule(priv); 448 #ifdef KLD_MODULE 449 atomic_subtract_int(&ipfw_refcnt, 1); 450 #endif 451 } 452 453 static __inline void 454 ipfw_ref_rule(struct ip_fw *rule) 455 { 456 KASSERT(rule->cpuid == mycpuid, ("rule used on cpu%d\n", mycpuid)); 457 #ifdef KLD_MODULE 458 atomic_add_int(&ipfw_refcnt, 1); 459 #endif 460 rule->refcnt++; 461 } 462 463 /* 464 * This macro maps an ip pointer into a layer3 header pointer of type T 465 */ 466 #define L3HDR(T, ip) ((T *)((uint32_t *)(ip) + (ip)->ip_hl)) 467 468 static __inline int 469 icmptype_match(struct ip *ip, ipfw_insn_u32 *cmd) 470 { 471 int type = L3HDR(struct icmp,ip)->icmp_type; 472 473 return (type <= ICMP_MAXTYPE && (cmd->d[0] & (1 << type))); 474 } 475 476 #define TT ((1 << ICMP_ECHO) | \ 477 (1 << ICMP_ROUTERSOLICIT) | \ 478 (1 << ICMP_TSTAMP) | \ 479 (1 << ICMP_IREQ) | \ 480 (1 << ICMP_MASKREQ)) 481 482 static int 483 is_icmp_query(struct ip *ip) 484 { 485 int type = L3HDR(struct icmp, ip)->icmp_type; 486 487 return (type <= ICMP_MAXTYPE && (TT & (1 << type))); 488 } 489 490 #undef TT 491 492 /* 493 * The following checks use two arrays of 8 or 16 bits to store the 494 * bits that we want set or clear, respectively. They are in the 495 * low and high half of cmd->arg1 or cmd->d[0]. 496 * 497 * We scan options and store the bits we find set. We succeed if 498 * 499 * (want_set & ~bits) == 0 && (want_clear & ~bits) == want_clear 500 * 501 * The code is sometimes optimized not to store additional variables. 502 */ 503 504 static int 505 flags_match(ipfw_insn *cmd, uint8_t bits) 506 { 507 u_char want_clear; 508 bits = ~bits; 509 510 if (((cmd->arg1 & 0xff) & bits) != 0) 511 return 0; /* some bits we want set were clear */ 512 513 want_clear = (cmd->arg1 >> 8) & 0xff; 514 if ((want_clear & bits) != want_clear) 515 return 0; /* some bits we want clear were set */ 516 return 1; 517 } 518 519 static int 520 ipopts_match(struct ip *ip, ipfw_insn *cmd) 521 { 522 int optlen, bits = 0; 523 u_char *cp = (u_char *)(ip + 1); 524 int x = (ip->ip_hl << 2) - sizeof(struct ip); 525 526 for (; x > 0; x -= optlen, cp += optlen) { 527 int opt = cp[IPOPT_OPTVAL]; 528 529 if (opt == IPOPT_EOL) 530 break; 531 532 if (opt == IPOPT_NOP) { 533 optlen = 1; 534 } else { 535 optlen = cp[IPOPT_OLEN]; 536 if (optlen <= 0 || optlen > x) 537 return 0; /* invalid or truncated */ 538 } 539 540 switch (opt) { 541 case IPOPT_LSRR: 542 bits |= IP_FW_IPOPT_LSRR; 543 break; 544 545 case IPOPT_SSRR: 546 bits |= IP_FW_IPOPT_SSRR; 547 break; 548 549 case IPOPT_RR: 550 bits |= IP_FW_IPOPT_RR; 551 break; 552 553 case IPOPT_TS: 554 bits |= IP_FW_IPOPT_TS; 555 break; 556 557 default: 558 break; 559 } 560 } 561 return (flags_match(cmd, bits)); 562 } 563 564 static int 565 tcpopts_match(struct ip *ip, ipfw_insn *cmd) 566 { 567 int optlen, bits = 0; 568 struct tcphdr *tcp = L3HDR(struct tcphdr,ip); 569 u_char *cp = (u_char *)(tcp + 1); 570 int x = (tcp->th_off << 2) - sizeof(struct tcphdr); 571 572 for (; x > 0; x -= optlen, cp += optlen) { 573 int opt = cp[0]; 574 575 if (opt == TCPOPT_EOL) 576 break; 577 578 if (opt == TCPOPT_NOP) { 579 optlen = 1; 580 } else { 581 optlen = cp[1]; 582 if (optlen <= 0) 583 break; 584 } 585 586 switch (opt) { 587 case TCPOPT_MAXSEG: 588 bits |= IP_FW_TCPOPT_MSS; 589 break; 590 591 case TCPOPT_WINDOW: 592 bits |= IP_FW_TCPOPT_WINDOW; 593 break; 594 595 case TCPOPT_SACK_PERMITTED: 596 case TCPOPT_SACK: 597 bits |= IP_FW_TCPOPT_SACK; 598 break; 599 600 case TCPOPT_TIMESTAMP: 601 bits |= IP_FW_TCPOPT_TS; 602 break; 603 604 case TCPOPT_CC: 605 case TCPOPT_CCNEW: 606 case TCPOPT_CCECHO: 607 bits |= IP_FW_TCPOPT_CC; 608 break; 609 610 default: 611 break; 612 } 613 } 614 return (flags_match(cmd, bits)); 615 } 616 617 static int 618 iface_match(struct ifnet *ifp, ipfw_insn_if *cmd) 619 { 620 if (ifp == NULL) /* no iface with this packet, match fails */ 621 return 0; 622 623 /* Check by name or by IP address */ 624 if (cmd->name[0] != '\0') { /* match by name */ 625 /* Check name */ 626 if (cmd->p.glob) { 627 if (kfnmatch(cmd->name, ifp->if_xname, 0) == 0) 628 return(1); 629 } else { 630 if (strncmp(ifp->if_xname, cmd->name, IFNAMSIZ) == 0) 631 return(1); 632 } 633 } else { 634 struct ifaddr_container *ifac; 635 636 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 637 struct ifaddr *ia = ifac->ifa; 638 639 if (ia->ifa_addr == NULL) 640 continue; 641 if (ia->ifa_addr->sa_family != AF_INET) 642 continue; 643 if (cmd->p.ip.s_addr == ((struct sockaddr_in *) 644 (ia->ifa_addr))->sin_addr.s_addr) 645 return(1); /* match */ 646 } 647 } 648 return(0); /* no match, fail ... */ 649 } 650 651 #define SNPARGS(buf, len) buf + len, sizeof(buf) > len ? sizeof(buf) - len : 0 652 653 /* 654 * We enter here when we have a rule with O_LOG. 655 * XXX this function alone takes about 2Kbytes of code! 656 */ 657 static void 658 ipfw_log(struct ip_fw *f, u_int hlen, struct ether_header *eh, 659 struct mbuf *m, struct ifnet *oif) 660 { 661 char *action; 662 int limit_reached = 0; 663 char action2[40], proto[48], fragment[28]; 664 665 fragment[0] = '\0'; 666 proto[0] = '\0'; 667 668 if (f == NULL) { /* bogus pkt */ 669 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 670 671 if (verbose_limit != 0 && 672 ctx->ipfw_norule_counter >= verbose_limit) 673 return; 674 ctx->ipfw_norule_counter++; 675 if (ctx->ipfw_norule_counter == verbose_limit) 676 limit_reached = verbose_limit; 677 action = "Refuse"; 678 } else { /* O_LOG is the first action, find the real one */ 679 ipfw_insn *cmd = ACTION_PTR(f); 680 ipfw_insn_log *l = (ipfw_insn_log *)cmd; 681 682 if (l->max_log != 0 && l->log_left == 0) 683 return; 684 l->log_left--; 685 if (l->log_left == 0) 686 limit_reached = l->max_log; 687 cmd += F_LEN(cmd); /* point to first action */ 688 if (cmd->opcode == O_PROB) 689 cmd += F_LEN(cmd); 690 691 action = action2; 692 switch (cmd->opcode) { 693 case O_DENY: 694 action = "Deny"; 695 break; 696 697 case O_REJECT: 698 if (cmd->arg1==ICMP_REJECT_RST) { 699 action = "Reset"; 700 } else if (cmd->arg1==ICMP_UNREACH_HOST) { 701 action = "Reject"; 702 } else { 703 ksnprintf(SNPARGS(action2, 0), "Unreach %d", 704 cmd->arg1); 705 } 706 break; 707 708 case O_ACCEPT: 709 action = "Accept"; 710 break; 711 712 case O_COUNT: 713 action = "Count"; 714 break; 715 716 case O_DIVERT: 717 ksnprintf(SNPARGS(action2, 0), "Divert %d", cmd->arg1); 718 break; 719 720 case O_TEE: 721 ksnprintf(SNPARGS(action2, 0), "Tee %d", cmd->arg1); 722 break; 723 724 case O_SKIPTO: 725 ksnprintf(SNPARGS(action2, 0), "SkipTo %d", cmd->arg1); 726 break; 727 728 case O_PIPE: 729 ksnprintf(SNPARGS(action2, 0), "Pipe %d", cmd->arg1); 730 break; 731 732 case O_QUEUE: 733 ksnprintf(SNPARGS(action2, 0), "Queue %d", cmd->arg1); 734 break; 735 736 case O_FORWARD_IP: 737 { 738 ipfw_insn_sa *sa = (ipfw_insn_sa *)cmd; 739 int len; 740 741 len = ksnprintf(SNPARGS(action2, 0), 742 "Forward to %s", 743 inet_ntoa(sa->sa.sin_addr)); 744 if (sa->sa.sin_port) { 745 ksnprintf(SNPARGS(action2, len), ":%d", 746 sa->sa.sin_port); 747 } 748 } 749 break; 750 751 default: 752 action = "UNKNOWN"; 753 break; 754 } 755 } 756 757 if (hlen == 0) { /* non-ip */ 758 ksnprintf(SNPARGS(proto, 0), "MAC"); 759 } else { 760 struct ip *ip = mtod(m, struct ip *); 761 /* these three are all aliases to the same thing */ 762 struct icmp *const icmp = L3HDR(struct icmp, ip); 763 struct tcphdr *const tcp = (struct tcphdr *)icmp; 764 struct udphdr *const udp = (struct udphdr *)icmp; 765 766 int ip_off, offset, ip_len; 767 int len; 768 769 if (eh != NULL) { /* layer 2 packets are as on the wire */ 770 ip_off = ntohs(ip->ip_off); 771 ip_len = ntohs(ip->ip_len); 772 } else { 773 ip_off = ip->ip_off; 774 ip_len = ip->ip_len; 775 } 776 offset = ip_off & IP_OFFMASK; 777 switch (ip->ip_p) { 778 case IPPROTO_TCP: 779 len = ksnprintf(SNPARGS(proto, 0), "TCP %s", 780 inet_ntoa(ip->ip_src)); 781 if (offset == 0) { 782 ksnprintf(SNPARGS(proto, len), ":%d %s:%d", 783 ntohs(tcp->th_sport), 784 inet_ntoa(ip->ip_dst), 785 ntohs(tcp->th_dport)); 786 } else { 787 ksnprintf(SNPARGS(proto, len), " %s", 788 inet_ntoa(ip->ip_dst)); 789 } 790 break; 791 792 case IPPROTO_UDP: 793 len = ksnprintf(SNPARGS(proto, 0), "UDP %s", 794 inet_ntoa(ip->ip_src)); 795 if (offset == 0) { 796 ksnprintf(SNPARGS(proto, len), ":%d %s:%d", 797 ntohs(udp->uh_sport), 798 inet_ntoa(ip->ip_dst), 799 ntohs(udp->uh_dport)); 800 } else { 801 ksnprintf(SNPARGS(proto, len), " %s", 802 inet_ntoa(ip->ip_dst)); 803 } 804 break; 805 806 case IPPROTO_ICMP: 807 if (offset == 0) { 808 len = ksnprintf(SNPARGS(proto, 0), 809 "ICMP:%u.%u ", 810 icmp->icmp_type, 811 icmp->icmp_code); 812 } else { 813 len = ksnprintf(SNPARGS(proto, 0), "ICMP "); 814 } 815 len += ksnprintf(SNPARGS(proto, len), "%s", 816 inet_ntoa(ip->ip_src)); 817 ksnprintf(SNPARGS(proto, len), " %s", 818 inet_ntoa(ip->ip_dst)); 819 break; 820 821 default: 822 len = ksnprintf(SNPARGS(proto, 0), "P:%d %s", ip->ip_p, 823 inet_ntoa(ip->ip_src)); 824 ksnprintf(SNPARGS(proto, len), " %s", 825 inet_ntoa(ip->ip_dst)); 826 break; 827 } 828 829 if (ip_off & (IP_MF | IP_OFFMASK)) { 830 ksnprintf(SNPARGS(fragment, 0), " (frag %d:%d@%d%s)", 831 ntohs(ip->ip_id), ip_len - (ip->ip_hl << 2), 832 offset << 3, (ip_off & IP_MF) ? "+" : ""); 833 } 834 } 835 836 if (oif || m->m_pkthdr.rcvif) { 837 log(LOG_SECURITY | LOG_INFO, 838 "ipfw: %d %s %s %s via %s%s\n", 839 f ? f->rulenum : -1, 840 action, proto, oif ? "out" : "in", 841 oif ? oif->if_xname : m->m_pkthdr.rcvif->if_xname, 842 fragment); 843 } else { 844 log(LOG_SECURITY | LOG_INFO, 845 "ipfw: %d %s %s [no if info]%s\n", 846 f ? f->rulenum : -1, 847 action, proto, fragment); 848 } 849 850 if (limit_reached) { 851 log(LOG_SECURITY | LOG_NOTICE, 852 "ipfw: limit %d reached on entry %d\n", 853 limit_reached, f ? f->rulenum : -1); 854 } 855 } 856 857 #undef SNPARGS 858 859 /* 860 * IMPORTANT: the hash function for dynamic rules must be commutative 861 * in source and destination (ip,port), because rules are bidirectional 862 * and we want to find both in the same bucket. 863 */ 864 static __inline int 865 hash_packet(struct ipfw_flow_id *id) 866 { 867 uint32_t i; 868 869 i = (id->dst_ip) ^ (id->src_ip) ^ (id->dst_port) ^ (id->src_port); 870 i &= (curr_dyn_buckets - 1); 871 return i; 872 } 873 874 /** 875 * unlink a dynamic rule from a chain. prev is a pointer to 876 * the previous one, q is a pointer to the rule to delete, 877 * head is a pointer to the head of the queue. 878 * Modifies q and potentially also head. 879 */ 880 #define UNLINK_DYN_RULE(prev, head, q) \ 881 do { \ 882 ipfw_dyn_rule *old_q = q; \ 883 \ 884 /* remove a refcount to the parent */ \ 885 if (q->dyn_type == O_LIMIT) \ 886 q->parent->count--; \ 887 DPRINTF("-- unlink entry 0x%08x %d -> 0x%08x %d, %d left\n", \ 888 q->id.src_ip, q->id.src_port, \ 889 q->id.dst_ip, q->id.dst_port, dyn_count - 1); \ 890 if (prev != NULL) \ 891 prev->next = q = q->next; \ 892 else \ 893 head = q = q->next; \ 894 KASSERT(dyn_count > 0, ("invalid dyn count %u\n", dyn_count)); \ 895 dyn_count--; \ 896 kfree(old_q, M_IPFW); \ 897 } while (0) 898 899 #define TIME_LEQ(a, b) ((int)((a) - (b)) <= 0) 900 901 /** 902 * Remove dynamic rules pointing to "rule", or all of them if rule == NULL. 903 * 904 * If keep_me == NULL, rules are deleted even if not expired, 905 * otherwise only expired rules are removed. 906 * 907 * The value of the second parameter is also used to point to identify 908 * a rule we absolutely do not want to remove (e.g. because we are 909 * holding a reference to it -- this is the case with O_LIMIT_PARENT 910 * rules). The pointer is only used for comparison, so any non-null 911 * value will do. 912 */ 913 static void 914 remove_dyn_rule_locked(struct ip_fw *rule, ipfw_dyn_rule *keep_me) 915 { 916 static uint32_t last_remove = 0; /* XXX */ 917 918 #define FORCE (keep_me == NULL) 919 920 ipfw_dyn_rule *prev, *q; 921 int i, pass = 0, max_pass = 0, unlinked = 0; 922 923 if (ipfw_dyn_v == NULL || dyn_count == 0) 924 return; 925 /* do not expire more than once per second, it is useless */ 926 if (!FORCE && last_remove == time_second) 927 return; 928 last_remove = time_second; 929 930 /* 931 * because O_LIMIT refer to parent rules, during the first pass only 932 * remove child and mark any pending LIMIT_PARENT, and remove 933 * them in a second pass. 934 */ 935 next_pass: 936 for (i = 0; i < curr_dyn_buckets; i++) { 937 for (prev = NULL, q = ipfw_dyn_v[i]; q;) { 938 /* 939 * Logic can become complex here, so we split tests. 940 */ 941 if (q == keep_me) 942 goto next; 943 if (rule != NULL && rule->stub != q->stub) 944 goto next; /* not the one we are looking for */ 945 if (q->dyn_type == O_LIMIT_PARENT) { 946 /* 947 * handle parent in the second pass, 948 * record we need one. 949 */ 950 max_pass = 1; 951 if (pass == 0) 952 goto next; 953 if (FORCE && q->count != 0) { 954 /* XXX should not happen! */ 955 kprintf("OUCH! cannot remove rule, " 956 "count %d\n", q->count); 957 } 958 } else { 959 if (!FORCE && !TIME_LEQ(q->expire, time_second)) 960 goto next; 961 } 962 unlinked = 1; 963 UNLINK_DYN_RULE(prev, ipfw_dyn_v[i], q); 964 continue; 965 next: 966 prev = q; 967 q = q->next; 968 } 969 } 970 if (pass++ < max_pass) 971 goto next_pass; 972 973 if (unlinked) 974 ++dyn_buckets_gen; 975 976 #undef FORCE 977 } 978 979 /** 980 * lookup a dynamic rule. 981 */ 982 static ipfw_dyn_rule * 983 lookup_dyn_rule(struct ipfw_flow_id *pkt, int *match_direction, 984 struct tcphdr *tcp) 985 { 986 /* 987 * stateful ipfw extensions. 988 * Lookup into dynamic session queue 989 */ 990 #define MATCH_REVERSE 0 991 #define MATCH_FORWARD 1 992 #define MATCH_NONE 2 993 #define MATCH_UNKNOWN 3 994 int i, dir = MATCH_NONE; 995 ipfw_dyn_rule *prev, *q=NULL; 996 997 if (ipfw_dyn_v == NULL) 998 goto done; /* not found */ 999 1000 i = hash_packet(pkt); 1001 for (prev = NULL, q = ipfw_dyn_v[i]; q != NULL;) { 1002 if (q->dyn_type == O_LIMIT_PARENT) 1003 goto next; 1004 1005 if (TIME_LEQ(q->expire, time_second)) { 1006 /* 1007 * Entry expired; skip. 1008 * Let ipfw_tick() take care of it 1009 */ 1010 goto next; 1011 } 1012 1013 if (pkt->proto == q->id.proto) { 1014 if (pkt->src_ip == q->id.src_ip && 1015 pkt->dst_ip == q->id.dst_ip && 1016 pkt->src_port == q->id.src_port && 1017 pkt->dst_port == q->id.dst_port) { 1018 dir = MATCH_FORWARD; 1019 break; 1020 } 1021 if (pkt->src_ip == q->id.dst_ip && 1022 pkt->dst_ip == q->id.src_ip && 1023 pkt->src_port == q->id.dst_port && 1024 pkt->dst_port == q->id.src_port) { 1025 dir = MATCH_REVERSE; 1026 break; 1027 } 1028 } 1029 next: 1030 prev = q; 1031 q = q->next; 1032 } 1033 if (q == NULL) 1034 goto done; /* q = NULL, not found */ 1035 1036 if (pkt->proto == IPPROTO_TCP) { /* update state according to flags */ 1037 u_char flags = pkt->flags & (TH_FIN|TH_SYN|TH_RST); 1038 1039 #define BOTH_SYN (TH_SYN | (TH_SYN << 8)) 1040 #define BOTH_FIN (TH_FIN | (TH_FIN << 8)) 1041 1042 q->state |= (dir == MATCH_FORWARD ) ? flags : (flags << 8); 1043 switch (q->state) { 1044 case TH_SYN: /* opening */ 1045 q->expire = time_second + dyn_syn_lifetime; 1046 break; 1047 1048 case BOTH_SYN: /* move to established */ 1049 case BOTH_SYN | TH_FIN : /* one side tries to close */ 1050 case BOTH_SYN | (TH_FIN << 8) : 1051 if (tcp) { 1052 uint32_t ack = ntohl(tcp->th_ack); 1053 1054 #define _SEQ_GE(a, b) ((int)(a) - (int)(b) >= 0) 1055 1056 if (dir == MATCH_FORWARD) { 1057 if (q->ack_fwd == 0 || 1058 _SEQ_GE(ack, q->ack_fwd)) 1059 q->ack_fwd = ack; 1060 else /* ignore out-of-sequence */ 1061 break; 1062 } else { 1063 if (q->ack_rev == 0 || 1064 _SEQ_GE(ack, q->ack_rev)) 1065 q->ack_rev = ack; 1066 else /* ignore out-of-sequence */ 1067 break; 1068 } 1069 #undef _SEQ_GE 1070 } 1071 q->expire = time_second + dyn_ack_lifetime; 1072 break; 1073 1074 case BOTH_SYN | BOTH_FIN: /* both sides closed */ 1075 KKASSERT(dyn_fin_lifetime < dyn_keepalive_period); 1076 q->expire = time_second + dyn_fin_lifetime; 1077 break; 1078 1079 default: 1080 #if 0 1081 /* 1082 * reset or some invalid combination, but can also 1083 * occur if we use keep-state the wrong way. 1084 */ 1085 if ((q->state & ((TH_RST << 8) | TH_RST)) == 0) 1086 kprintf("invalid state: 0x%x\n", q->state); 1087 #endif 1088 KKASSERT(dyn_rst_lifetime < dyn_keepalive_period); 1089 q->expire = time_second + dyn_rst_lifetime; 1090 break; 1091 } 1092 } else if (pkt->proto == IPPROTO_UDP) { 1093 q->expire = time_second + dyn_udp_lifetime; 1094 } else { 1095 /* other protocols */ 1096 q->expire = time_second + dyn_short_lifetime; 1097 } 1098 done: 1099 if (match_direction) 1100 *match_direction = dir; 1101 return q; 1102 } 1103 1104 static struct ip_fw * 1105 lookup_rule(struct ipfw_flow_id *pkt, int *match_direction, struct tcphdr *tcp, 1106 uint16_t len, int *deny) 1107 { 1108 struct ip_fw *rule = NULL; 1109 ipfw_dyn_rule *q; 1110 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 1111 uint32_t gen; 1112 1113 *deny = 0; 1114 gen = ctx->ipfw_gen; 1115 1116 lockmgr(&dyn_lock, LK_SHARED); 1117 1118 if (ctx->ipfw_gen != gen) { 1119 /* 1120 * Static rules had been change when we were waiting 1121 * for the dynamic hash table lock; deny this packet, 1122 * since it is _not_ known whether it is safe to keep 1123 * iterating the static rules. 1124 */ 1125 *deny = 1; 1126 goto back; 1127 } 1128 1129 q = lookup_dyn_rule(pkt, match_direction, tcp); 1130 if (q == NULL) { 1131 rule = NULL; 1132 } else { 1133 rule = q->stub->rule[mycpuid]; 1134 KKASSERT(rule->stub == q->stub && rule->cpuid == mycpuid); 1135 1136 /* XXX */ 1137 q->pcnt++; 1138 q->bcnt += len; 1139 } 1140 back: 1141 lockmgr(&dyn_lock, LK_RELEASE); 1142 return rule; 1143 } 1144 1145 static void 1146 realloc_dynamic_table(void) 1147 { 1148 ipfw_dyn_rule **old_dyn_v; 1149 uint32_t old_curr_dyn_buckets; 1150 1151 KASSERT(dyn_buckets <= 65536 && (dyn_buckets & (dyn_buckets - 1)) == 0, 1152 ("invalid dyn_buckets %d\n", dyn_buckets)); 1153 1154 /* Save the current buckets array for later error recovery */ 1155 old_dyn_v = ipfw_dyn_v; 1156 old_curr_dyn_buckets = curr_dyn_buckets; 1157 1158 curr_dyn_buckets = dyn_buckets; 1159 for (;;) { 1160 ipfw_dyn_v = kmalloc(curr_dyn_buckets * sizeof(ipfw_dyn_rule *), 1161 M_IPFW, M_NOWAIT | M_ZERO); 1162 if (ipfw_dyn_v != NULL || curr_dyn_buckets <= 2) 1163 break; 1164 1165 curr_dyn_buckets /= 2; 1166 if (curr_dyn_buckets <= old_curr_dyn_buckets && 1167 old_dyn_v != NULL) { 1168 /* 1169 * Don't try allocating smaller buckets array, reuse 1170 * the old one, which alreay contains enough buckets 1171 */ 1172 break; 1173 } 1174 } 1175 1176 if (ipfw_dyn_v != NULL) { 1177 if (old_dyn_v != NULL) 1178 kfree(old_dyn_v, M_IPFW); 1179 } else { 1180 /* Allocation failed, restore old buckets array */ 1181 ipfw_dyn_v = old_dyn_v; 1182 curr_dyn_buckets = old_curr_dyn_buckets; 1183 } 1184 1185 if (ipfw_dyn_v != NULL) 1186 ++dyn_buckets_gen; 1187 } 1188 1189 /** 1190 * Install state of type 'type' for a dynamic session. 1191 * The hash table contains two type of rules: 1192 * - regular rules (O_KEEP_STATE) 1193 * - rules for sessions with limited number of sess per user 1194 * (O_LIMIT). When they are created, the parent is 1195 * increased by 1, and decreased on delete. In this case, 1196 * the third parameter is the parent rule and not the chain. 1197 * - "parent" rules for the above (O_LIMIT_PARENT). 1198 */ 1199 static ipfw_dyn_rule * 1200 add_dyn_rule(struct ipfw_flow_id *id, uint8_t dyn_type, struct ip_fw *rule) 1201 { 1202 ipfw_dyn_rule *r; 1203 int i; 1204 1205 if (ipfw_dyn_v == NULL || 1206 (dyn_count == 0 && dyn_buckets != curr_dyn_buckets)) { 1207 realloc_dynamic_table(); 1208 if (ipfw_dyn_v == NULL) 1209 return NULL; /* failed ! */ 1210 } 1211 i = hash_packet(id); 1212 1213 r = kmalloc(sizeof(*r), M_IPFW, M_NOWAIT | M_ZERO); 1214 if (r == NULL) { 1215 kprintf ("sorry cannot allocate state\n"); 1216 return NULL; 1217 } 1218 1219 /* increase refcount on parent, and set pointer */ 1220 if (dyn_type == O_LIMIT) { 1221 ipfw_dyn_rule *parent = (ipfw_dyn_rule *)rule; 1222 1223 if (parent->dyn_type != O_LIMIT_PARENT) 1224 panic("invalid parent"); 1225 parent->count++; 1226 r->parent = parent; 1227 rule = parent->stub->rule[mycpuid]; 1228 KKASSERT(rule->stub == parent->stub); 1229 } 1230 KKASSERT(rule->cpuid == mycpuid && rule->stub != NULL); 1231 1232 r->id = *id; 1233 r->expire = time_second + dyn_syn_lifetime; 1234 r->stub = rule->stub; 1235 r->dyn_type = dyn_type; 1236 r->pcnt = r->bcnt = 0; 1237 r->count = 0; 1238 1239 r->bucket = i; 1240 r->next = ipfw_dyn_v[i]; 1241 ipfw_dyn_v[i] = r; 1242 dyn_count++; 1243 dyn_buckets_gen++; 1244 DPRINTF("-- add dyn entry ty %d 0x%08x %d -> 0x%08x %d, total %d\n", 1245 dyn_type, 1246 r->id.src_ip, r->id.src_port, 1247 r->id.dst_ip, r->id.dst_port, dyn_count); 1248 return r; 1249 } 1250 1251 /** 1252 * lookup dynamic parent rule using pkt and rule as search keys. 1253 * If the lookup fails, then install one. 1254 */ 1255 static ipfw_dyn_rule * 1256 lookup_dyn_parent(struct ipfw_flow_id *pkt, struct ip_fw *rule) 1257 { 1258 ipfw_dyn_rule *q; 1259 int i; 1260 1261 if (ipfw_dyn_v) { 1262 i = hash_packet(pkt); 1263 for (q = ipfw_dyn_v[i]; q != NULL; q = q->next) { 1264 if (q->dyn_type == O_LIMIT_PARENT && 1265 rule->stub == q->stub && 1266 pkt->proto == q->id.proto && 1267 pkt->src_ip == q->id.src_ip && 1268 pkt->dst_ip == q->id.dst_ip && 1269 pkt->src_port == q->id.src_port && 1270 pkt->dst_port == q->id.dst_port) { 1271 q->expire = time_second + dyn_short_lifetime; 1272 DPRINTF("lookup_dyn_parent found 0x%p\n", q); 1273 return q; 1274 } 1275 } 1276 } 1277 return add_dyn_rule(pkt, O_LIMIT_PARENT, rule); 1278 } 1279 1280 /** 1281 * Install dynamic state for rule type cmd->o.opcode 1282 * 1283 * Returns 1 (failure) if state is not installed because of errors or because 1284 * session limitations are enforced. 1285 */ 1286 static int 1287 install_state_locked(struct ip_fw *rule, ipfw_insn_limit *cmd, 1288 struct ip_fw_args *args) 1289 { 1290 static int last_log; /* XXX */ 1291 1292 ipfw_dyn_rule *q; 1293 1294 DPRINTF("-- install state type %d 0x%08x %u -> 0x%08x %u\n", 1295 cmd->o.opcode, 1296 args->f_id.src_ip, args->f_id.src_port, 1297 args->f_id.dst_ip, args->f_id.dst_port); 1298 1299 q = lookup_dyn_rule(&args->f_id, NULL, NULL); 1300 if (q != NULL) { /* should never occur */ 1301 if (last_log != time_second) { 1302 last_log = time_second; 1303 kprintf(" install_state: entry already present, done\n"); 1304 } 1305 return 0; 1306 } 1307 1308 if (dyn_count >= dyn_max) { 1309 /* 1310 * Run out of slots, try to remove any expired rule. 1311 */ 1312 remove_dyn_rule_locked(NULL, (ipfw_dyn_rule *)1); 1313 if (dyn_count >= dyn_max) { 1314 if (last_log != time_second) { 1315 last_log = time_second; 1316 kprintf("install_state: " 1317 "Too many dynamic rules\n"); 1318 } 1319 return 1; /* cannot install, notify caller */ 1320 } 1321 } 1322 1323 switch (cmd->o.opcode) { 1324 case O_KEEP_STATE: /* bidir rule */ 1325 if (add_dyn_rule(&args->f_id, O_KEEP_STATE, rule) == NULL) 1326 return 1; 1327 break; 1328 1329 case O_LIMIT: /* limit number of sessions */ 1330 { 1331 uint16_t limit_mask = cmd->limit_mask; 1332 struct ipfw_flow_id id; 1333 ipfw_dyn_rule *parent; 1334 1335 DPRINTF("installing dyn-limit rule %d\n", 1336 cmd->conn_limit); 1337 1338 id.dst_ip = id.src_ip = 0; 1339 id.dst_port = id.src_port = 0; 1340 id.proto = args->f_id.proto; 1341 1342 if (limit_mask & DYN_SRC_ADDR) 1343 id.src_ip = args->f_id.src_ip; 1344 if (limit_mask & DYN_DST_ADDR) 1345 id.dst_ip = args->f_id.dst_ip; 1346 if (limit_mask & DYN_SRC_PORT) 1347 id.src_port = args->f_id.src_port; 1348 if (limit_mask & DYN_DST_PORT) 1349 id.dst_port = args->f_id.dst_port; 1350 1351 parent = lookup_dyn_parent(&id, rule); 1352 if (parent == NULL) { 1353 kprintf("add parent failed\n"); 1354 return 1; 1355 } 1356 1357 if (parent->count >= cmd->conn_limit) { 1358 /* 1359 * See if we can remove some expired rule. 1360 */ 1361 remove_dyn_rule_locked(rule, parent); 1362 if (parent->count >= cmd->conn_limit) { 1363 if (fw_verbose && 1364 last_log != time_second) { 1365 last_log = time_second; 1366 log(LOG_SECURITY | LOG_DEBUG, 1367 "drop session, " 1368 "too many entries\n"); 1369 } 1370 return 1; 1371 } 1372 } 1373 if (add_dyn_rule(&args->f_id, O_LIMIT, 1374 (struct ip_fw *)parent) == NULL) 1375 return 1; 1376 } 1377 break; 1378 default: 1379 kprintf("unknown dynamic rule type %u\n", cmd->o.opcode); 1380 return 1; 1381 } 1382 lookup_dyn_rule(&args->f_id, NULL, NULL); /* XXX just set lifetime */ 1383 return 0; 1384 } 1385 1386 static int 1387 install_state(struct ip_fw *rule, ipfw_insn_limit *cmd, 1388 struct ip_fw_args *args, int *deny) 1389 { 1390 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 1391 uint32_t gen; 1392 int ret = 0; 1393 1394 *deny = 0; 1395 gen = ctx->ipfw_gen; 1396 1397 lockmgr(&dyn_lock, LK_EXCLUSIVE); 1398 if (ctx->ipfw_gen != gen) { 1399 /* See the comment in lookup_rule() */ 1400 *deny = 1; 1401 } else { 1402 ret = install_state_locked(rule, cmd, args); 1403 } 1404 lockmgr(&dyn_lock, LK_RELEASE); 1405 1406 return ret; 1407 } 1408 1409 /* 1410 * Transmit a TCP packet, containing either a RST or a keepalive. 1411 * When flags & TH_RST, we are sending a RST packet, because of a 1412 * "reset" action matched the packet. 1413 * Otherwise we are sending a keepalive, and flags & TH_ 1414 */ 1415 static void 1416 send_pkt(struct ipfw_flow_id *id, uint32_t seq, uint32_t ack, int flags) 1417 { 1418 struct mbuf *m; 1419 struct ip *ip; 1420 struct tcphdr *tcp; 1421 struct route sro; /* fake route */ 1422 1423 MGETHDR(m, MB_DONTWAIT, MT_HEADER); 1424 if (m == NULL) 1425 return; 1426 m->m_pkthdr.rcvif = NULL; 1427 m->m_pkthdr.len = m->m_len = sizeof(struct ip) + sizeof(struct tcphdr); 1428 m->m_data += max_linkhdr; 1429 1430 ip = mtod(m, struct ip *); 1431 bzero(ip, m->m_len); 1432 tcp = (struct tcphdr *)(ip + 1); /* no IP options */ 1433 ip->ip_p = IPPROTO_TCP; 1434 tcp->th_off = 5; 1435 1436 /* 1437 * Assume we are sending a RST (or a keepalive in the reverse 1438 * direction), swap src and destination addresses and ports. 1439 */ 1440 ip->ip_src.s_addr = htonl(id->dst_ip); 1441 ip->ip_dst.s_addr = htonl(id->src_ip); 1442 tcp->th_sport = htons(id->dst_port); 1443 tcp->th_dport = htons(id->src_port); 1444 if (flags & TH_RST) { /* we are sending a RST */ 1445 if (flags & TH_ACK) { 1446 tcp->th_seq = htonl(ack); 1447 tcp->th_ack = htonl(0); 1448 tcp->th_flags = TH_RST; 1449 } else { 1450 if (flags & TH_SYN) 1451 seq++; 1452 tcp->th_seq = htonl(0); 1453 tcp->th_ack = htonl(seq); 1454 tcp->th_flags = TH_RST | TH_ACK; 1455 } 1456 } else { 1457 /* 1458 * We are sending a keepalive. flags & TH_SYN determines 1459 * the direction, forward if set, reverse if clear. 1460 * NOTE: seq and ack are always assumed to be correct 1461 * as set by the caller. This may be confusing... 1462 */ 1463 if (flags & TH_SYN) { 1464 /* 1465 * we have to rewrite the correct addresses! 1466 */ 1467 ip->ip_dst.s_addr = htonl(id->dst_ip); 1468 ip->ip_src.s_addr = htonl(id->src_ip); 1469 tcp->th_dport = htons(id->dst_port); 1470 tcp->th_sport = htons(id->src_port); 1471 } 1472 tcp->th_seq = htonl(seq); 1473 tcp->th_ack = htonl(ack); 1474 tcp->th_flags = TH_ACK; 1475 } 1476 1477 /* 1478 * set ip_len to the payload size so we can compute 1479 * the tcp checksum on the pseudoheader 1480 * XXX check this, could save a couple of words ? 1481 */ 1482 ip->ip_len = htons(sizeof(struct tcphdr)); 1483 tcp->th_sum = in_cksum(m, m->m_pkthdr.len); 1484 1485 /* 1486 * now fill fields left out earlier 1487 */ 1488 ip->ip_ttl = ip_defttl; 1489 ip->ip_len = m->m_pkthdr.len; 1490 1491 bzero(&sro, sizeof(sro)); 1492 ip_rtaddr(ip->ip_dst, &sro); 1493 1494 m->m_pkthdr.fw_flags |= IPFW_MBUF_GENERATED; 1495 ip_output(m, NULL, &sro, 0, NULL, NULL); 1496 if (sro.ro_rt) 1497 RTFREE(sro.ro_rt); 1498 } 1499 1500 /* 1501 * sends a reject message, consuming the mbuf passed as an argument. 1502 */ 1503 static void 1504 send_reject(struct ip_fw_args *args, int code, int offset, int ip_len) 1505 { 1506 if (code != ICMP_REJECT_RST) { /* Send an ICMP unreach */ 1507 /* We need the IP header in host order for icmp_error(). */ 1508 if (args->eh != NULL) { 1509 struct ip *ip = mtod(args->m, struct ip *); 1510 1511 ip->ip_len = ntohs(ip->ip_len); 1512 ip->ip_off = ntohs(ip->ip_off); 1513 } 1514 icmp_error(args->m, ICMP_UNREACH, code, 0L, 0); 1515 } else if (offset == 0 && args->f_id.proto == IPPROTO_TCP) { 1516 struct tcphdr *const tcp = 1517 L3HDR(struct tcphdr, mtod(args->m, struct ip *)); 1518 1519 if ((tcp->th_flags & TH_RST) == 0) { 1520 send_pkt(&args->f_id, ntohl(tcp->th_seq), 1521 ntohl(tcp->th_ack), tcp->th_flags | TH_RST); 1522 } 1523 m_freem(args->m); 1524 } else { 1525 m_freem(args->m); 1526 } 1527 args->m = NULL; 1528 } 1529 1530 /** 1531 * 1532 * Given an ip_fw *, lookup_next_rule will return a pointer 1533 * to the next rule, which can be either the jump 1534 * target (for skipto instructions) or the next one in the list (in 1535 * all other cases including a missing jump target). 1536 * The result is also written in the "next_rule" field of the rule. 1537 * Backward jumps are not allowed, so start looking from the next 1538 * rule... 1539 * 1540 * This never returns NULL -- in case we do not have an exact match, 1541 * the next rule is returned. When the ruleset is changed, 1542 * pointers are flushed so we are always correct. 1543 */ 1544 1545 static struct ip_fw * 1546 lookup_next_rule(struct ip_fw *me) 1547 { 1548 struct ip_fw *rule = NULL; 1549 ipfw_insn *cmd; 1550 1551 /* look for action, in case it is a skipto */ 1552 cmd = ACTION_PTR(me); 1553 if (cmd->opcode == O_LOG) 1554 cmd += F_LEN(cmd); 1555 if (cmd->opcode == O_SKIPTO) { 1556 for (rule = me->next; rule; rule = rule->next) { 1557 if (rule->rulenum >= cmd->arg1) 1558 break; 1559 } 1560 } 1561 if (rule == NULL) /* failure or not a skipto */ 1562 rule = me->next; 1563 me->next_rule = rule; 1564 return rule; 1565 } 1566 1567 static int 1568 _ipfw_match_uid(const struct ipfw_flow_id *fid, struct ifnet *oif, 1569 enum ipfw_opcodes opcode, uid_t uid) 1570 { 1571 struct in_addr src_ip, dst_ip; 1572 struct inpcbinfo *pi; 1573 int wildcard; 1574 struct inpcb *pcb; 1575 1576 if (fid->proto == IPPROTO_TCP) { 1577 wildcard = 0; 1578 pi = &tcbinfo[mycpuid]; 1579 } else if (fid->proto == IPPROTO_UDP) { 1580 wildcard = 1; 1581 pi = &udbinfo; 1582 } else { 1583 return 0; 1584 } 1585 1586 /* 1587 * Values in 'fid' are in host byte order 1588 */ 1589 dst_ip.s_addr = htonl(fid->dst_ip); 1590 src_ip.s_addr = htonl(fid->src_ip); 1591 if (oif) { 1592 pcb = in_pcblookup_hash(pi, 1593 dst_ip, htons(fid->dst_port), 1594 src_ip, htons(fid->src_port), 1595 wildcard, oif); 1596 } else { 1597 pcb = in_pcblookup_hash(pi, 1598 src_ip, htons(fid->src_port), 1599 dst_ip, htons(fid->dst_port), 1600 wildcard, NULL); 1601 } 1602 if (pcb == NULL || pcb->inp_socket == NULL) 1603 return 0; 1604 1605 if (opcode == O_UID) { 1606 #define socheckuid(a,b) ((a)->so_cred->cr_uid != (b)) 1607 return !socheckuid(pcb->inp_socket, uid); 1608 #undef socheckuid 1609 } else { 1610 return groupmember(uid, pcb->inp_socket->so_cred); 1611 } 1612 } 1613 1614 static int 1615 ipfw_match_uid(const struct ipfw_flow_id *fid, struct ifnet *oif, 1616 enum ipfw_opcodes opcode, uid_t uid, int *deny) 1617 { 1618 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 1619 uint32_t gen; 1620 int match = 0; 1621 1622 *deny = 0; 1623 gen = ctx->ipfw_gen; 1624 1625 get_mplock(); 1626 if (gen != ctx->ipfw_gen) { 1627 /* See the comment in lookup_rule() */ 1628 *deny = 1; 1629 } else { 1630 match = _ipfw_match_uid(fid, oif, opcode, uid); 1631 } 1632 rel_mplock(); 1633 return match; 1634 } 1635 1636 /* 1637 * The main check routine for the firewall. 1638 * 1639 * All arguments are in args so we can modify them and return them 1640 * back to the caller. 1641 * 1642 * Parameters: 1643 * 1644 * args->m (in/out) The packet; we set to NULL when/if we nuke it. 1645 * Starts with the IP header. 1646 * args->eh (in) Mac header if present, or NULL for layer3 packet. 1647 * args->oif Outgoing interface, or NULL if packet is incoming. 1648 * The incoming interface is in the mbuf. (in) 1649 * 1650 * args->rule Pointer to the last matching rule (in/out) 1651 * args->f_id Addresses grabbed from the packet (out) 1652 * 1653 * Return value: 1654 * 1655 * If the packet was denied/rejected and has been dropped, *m is equal 1656 * to NULL upon return. 1657 * 1658 * IP_FW_DENY the packet must be dropped. 1659 * IP_FW_PASS The packet is to be accepted and routed normally. 1660 * IP_FW_DIVERT Divert the packet to port (args->cookie) 1661 * IP_FW_TEE Tee the packet to port (args->cookie) 1662 * IP_FW_DUMMYNET Send the packet to pipe/queue (args->cookie) 1663 */ 1664 1665 static int 1666 ipfw_chk(struct ip_fw_args *args) 1667 { 1668 /* 1669 * Local variables hold state during the processing of a packet. 1670 * 1671 * IMPORTANT NOTE: to speed up the processing of rules, there 1672 * are some assumption on the values of the variables, which 1673 * are documented here. Should you change them, please check 1674 * the implementation of the various instructions to make sure 1675 * that they still work. 1676 * 1677 * args->eh The MAC header. It is non-null for a layer2 1678 * packet, it is NULL for a layer-3 packet. 1679 * 1680 * m | args->m Pointer to the mbuf, as received from the caller. 1681 * It may change if ipfw_chk() does an m_pullup, or if it 1682 * consumes the packet because it calls send_reject(). 1683 * XXX This has to change, so that ipfw_chk() never modifies 1684 * or consumes the buffer. 1685 * ip is simply an alias of the value of m, and it is kept 1686 * in sync with it (the packet is supposed to start with 1687 * the ip header). 1688 */ 1689 struct mbuf *m = args->m; 1690 struct ip *ip = mtod(m, struct ip *); 1691 1692 /* 1693 * oif | args->oif If NULL, ipfw_chk has been called on the 1694 * inbound path (ether_input, ip_input). 1695 * If non-NULL, ipfw_chk has been called on the outbound path 1696 * (ether_output, ip_output). 1697 */ 1698 struct ifnet *oif = args->oif; 1699 1700 struct ip_fw *f = NULL; /* matching rule */ 1701 int retval = IP_FW_PASS; 1702 struct m_tag *mtag; 1703 struct divert_info *divinfo; 1704 1705 /* 1706 * hlen The length of the IPv4 header. 1707 * hlen >0 means we have an IPv4 packet. 1708 */ 1709 u_int hlen = 0; /* hlen >0 means we have an IP pkt */ 1710 1711 /* 1712 * offset The offset of a fragment. offset != 0 means that 1713 * we have a fragment at this offset of an IPv4 packet. 1714 * offset == 0 means that (if this is an IPv4 packet) 1715 * this is the first or only fragment. 1716 */ 1717 u_short offset = 0; 1718 1719 /* 1720 * Local copies of addresses. They are only valid if we have 1721 * an IP packet. 1722 * 1723 * proto The protocol. Set to 0 for non-ip packets, 1724 * or to the protocol read from the packet otherwise. 1725 * proto != 0 means that we have an IPv4 packet. 1726 * 1727 * src_port, dst_port port numbers, in HOST format. Only 1728 * valid for TCP and UDP packets. 1729 * 1730 * src_ip, dst_ip ip addresses, in NETWORK format. 1731 * Only valid for IPv4 packets. 1732 */ 1733 uint8_t proto; 1734 uint16_t src_port = 0, dst_port = 0; /* NOTE: host format */ 1735 struct in_addr src_ip, dst_ip; /* NOTE: network format */ 1736 uint16_t ip_len = 0; 1737 1738 /* 1739 * dyn_dir = MATCH_UNKNOWN when rules unchecked, 1740 * MATCH_NONE when checked and not matched (dyn_f = NULL), 1741 * MATCH_FORWARD or MATCH_REVERSE otherwise (dyn_f != NULL) 1742 */ 1743 int dyn_dir = MATCH_UNKNOWN; 1744 struct ip_fw *dyn_f = NULL; 1745 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 1746 1747 if (m->m_pkthdr.fw_flags & IPFW_MBUF_GENERATED) 1748 return IP_FW_PASS; /* accept */ 1749 1750 if (args->eh == NULL || /* layer 3 packet */ 1751 (m->m_pkthdr.len >= sizeof(struct ip) && 1752 ntohs(args->eh->ether_type) == ETHERTYPE_IP)) 1753 hlen = ip->ip_hl << 2; 1754 1755 /* 1756 * Collect parameters into local variables for faster matching. 1757 */ 1758 if (hlen == 0) { /* do not grab addresses for non-ip pkts */ 1759 proto = args->f_id.proto = 0; /* mark f_id invalid */ 1760 goto after_ip_checks; 1761 } 1762 1763 proto = args->f_id.proto = ip->ip_p; 1764 src_ip = ip->ip_src; 1765 dst_ip = ip->ip_dst; 1766 if (args->eh != NULL) { /* layer 2 packets are as on the wire */ 1767 offset = ntohs(ip->ip_off) & IP_OFFMASK; 1768 ip_len = ntohs(ip->ip_len); 1769 } else { 1770 offset = ip->ip_off & IP_OFFMASK; 1771 ip_len = ip->ip_len; 1772 } 1773 1774 #define PULLUP_TO(len) \ 1775 do { \ 1776 if (m->m_len < (len)) { \ 1777 args->m = m = m_pullup(m, (len));\ 1778 if (m == NULL) \ 1779 goto pullup_failed; \ 1780 ip = mtod(m, struct ip *); \ 1781 } \ 1782 } while (0) 1783 1784 if (offset == 0) { 1785 switch (proto) { 1786 case IPPROTO_TCP: 1787 { 1788 struct tcphdr *tcp; 1789 1790 PULLUP_TO(hlen + sizeof(struct tcphdr)); 1791 tcp = L3HDR(struct tcphdr, ip); 1792 dst_port = tcp->th_dport; 1793 src_port = tcp->th_sport; 1794 args->f_id.flags = tcp->th_flags; 1795 } 1796 break; 1797 1798 case IPPROTO_UDP: 1799 { 1800 struct udphdr *udp; 1801 1802 PULLUP_TO(hlen + sizeof(struct udphdr)); 1803 udp = L3HDR(struct udphdr, ip); 1804 dst_port = udp->uh_dport; 1805 src_port = udp->uh_sport; 1806 } 1807 break; 1808 1809 case IPPROTO_ICMP: 1810 PULLUP_TO(hlen + 4); /* type, code and checksum. */ 1811 args->f_id.flags = L3HDR(struct icmp, ip)->icmp_type; 1812 break; 1813 1814 default: 1815 break; 1816 } 1817 } 1818 1819 #undef PULLUP_TO 1820 1821 args->f_id.src_ip = ntohl(src_ip.s_addr); 1822 args->f_id.dst_ip = ntohl(dst_ip.s_addr); 1823 args->f_id.src_port = src_port = ntohs(src_port); 1824 args->f_id.dst_port = dst_port = ntohs(dst_port); 1825 1826 after_ip_checks: 1827 if (args->rule) { 1828 /* 1829 * Packet has already been tagged. Look for the next rule 1830 * to restart processing. 1831 * 1832 * If fw_one_pass != 0 then just accept it. 1833 * XXX should not happen here, but optimized out in 1834 * the caller. 1835 */ 1836 if (fw_one_pass) 1837 return IP_FW_PASS; 1838 1839 /* This rule is being/has been flushed */ 1840 if (ipfw_flushing) 1841 return IP_FW_DENY; 1842 1843 KASSERT(args->rule->cpuid == mycpuid, 1844 ("rule used on cpu%d\n", mycpuid)); 1845 1846 /* This rule was deleted */ 1847 if (args->rule->rule_flags & IPFW_RULE_F_INVALID) 1848 return IP_FW_DENY; 1849 1850 f = args->rule->next_rule; 1851 if (f == NULL) 1852 f = lookup_next_rule(args->rule); 1853 } else { 1854 /* 1855 * Find the starting rule. It can be either the first 1856 * one, or the one after divert_rule if asked so. 1857 */ 1858 int skipto; 1859 1860 mtag = m_tag_find(m, PACKET_TAG_IPFW_DIVERT, NULL); 1861 if (mtag != NULL) { 1862 divinfo = m_tag_data(mtag); 1863 skipto = divinfo->skipto; 1864 } else { 1865 skipto = 0; 1866 } 1867 1868 f = ctx->ipfw_layer3_chain; 1869 if (args->eh == NULL && skipto != 0) { 1870 /* No skipto during rule flushing */ 1871 if (ipfw_flushing) 1872 return IP_FW_DENY; 1873 1874 if (skipto >= IPFW_DEFAULT_RULE) 1875 return IP_FW_DENY; /* invalid */ 1876 1877 while (f && f->rulenum <= skipto) 1878 f = f->next; 1879 if (f == NULL) /* drop packet */ 1880 return IP_FW_DENY; 1881 } else if (ipfw_flushing) { 1882 /* Rules are being flushed; skip to default rule */ 1883 f = ctx->ipfw_default_rule; 1884 } 1885 } 1886 if ((mtag = m_tag_find(m, PACKET_TAG_IPFW_DIVERT, NULL)) != NULL) 1887 m_tag_delete(m, mtag); 1888 1889 /* 1890 * Now scan the rules, and parse microinstructions for each rule. 1891 */ 1892 for (; f; f = f->next) { 1893 int l, cmdlen; 1894 ipfw_insn *cmd; 1895 int skip_or; /* skip rest of OR block */ 1896 1897 again: 1898 if (ctx->ipfw_set_disable & (1 << f->set)) 1899 continue; 1900 1901 skip_or = 0; 1902 for (l = f->cmd_len, cmd = f->cmd; l > 0; 1903 l -= cmdlen, cmd += cmdlen) { 1904 int match, deny; 1905 1906 /* 1907 * check_body is a jump target used when we find a 1908 * CHECK_STATE, and need to jump to the body of 1909 * the target rule. 1910 */ 1911 1912 check_body: 1913 cmdlen = F_LEN(cmd); 1914 /* 1915 * An OR block (insn_1 || .. || insn_n) has the 1916 * F_OR bit set in all but the last instruction. 1917 * The first match will set "skip_or", and cause 1918 * the following instructions to be skipped until 1919 * past the one with the F_OR bit clear. 1920 */ 1921 if (skip_or) { /* skip this instruction */ 1922 if ((cmd->len & F_OR) == 0) 1923 skip_or = 0; /* next one is good */ 1924 continue; 1925 } 1926 match = 0; /* set to 1 if we succeed */ 1927 1928 switch (cmd->opcode) { 1929 /* 1930 * The first set of opcodes compares the packet's 1931 * fields with some pattern, setting 'match' if a 1932 * match is found. At the end of the loop there is 1933 * logic to deal with F_NOT and F_OR flags associated 1934 * with the opcode. 1935 */ 1936 case O_NOP: 1937 match = 1; 1938 break; 1939 1940 case O_FORWARD_MAC: 1941 kprintf("ipfw: opcode %d unimplemented\n", 1942 cmd->opcode); 1943 break; 1944 1945 case O_GID: 1946 case O_UID: 1947 /* 1948 * We only check offset == 0 && proto != 0, 1949 * as this ensures that we have an IPv4 1950 * packet with the ports info. 1951 */ 1952 if (offset!=0) 1953 break; 1954 1955 match = ipfw_match_uid(&args->f_id, oif, 1956 cmd->opcode, 1957 (uid_t)((ipfw_insn_u32 *)cmd)->d[0], 1958 &deny); 1959 if (deny) 1960 return IP_FW_DENY; 1961 break; 1962 1963 case O_RECV: 1964 match = iface_match(m->m_pkthdr.rcvif, 1965 (ipfw_insn_if *)cmd); 1966 break; 1967 1968 case O_XMIT: 1969 match = iface_match(oif, (ipfw_insn_if *)cmd); 1970 break; 1971 1972 case O_VIA: 1973 match = iface_match(oif ? oif : 1974 m->m_pkthdr.rcvif, (ipfw_insn_if *)cmd); 1975 break; 1976 1977 case O_MACADDR2: 1978 if (args->eh != NULL) { /* have MAC header */ 1979 uint32_t *want = (uint32_t *) 1980 ((ipfw_insn_mac *)cmd)->addr; 1981 uint32_t *mask = (uint32_t *) 1982 ((ipfw_insn_mac *)cmd)->mask; 1983 uint32_t *hdr = (uint32_t *)args->eh; 1984 1985 match = 1986 (want[0] == (hdr[0] & mask[0]) && 1987 want[1] == (hdr[1] & mask[1]) && 1988 want[2] == (hdr[2] & mask[2])); 1989 } 1990 break; 1991 1992 case O_MAC_TYPE: 1993 if (args->eh != NULL) { 1994 uint16_t t = 1995 ntohs(args->eh->ether_type); 1996 uint16_t *p = 1997 ((ipfw_insn_u16 *)cmd)->ports; 1998 int i; 1999 2000 /* Special vlan handling */ 2001 if (m->m_flags & M_VLANTAG) 2002 t = ETHERTYPE_VLAN; 2003 2004 for (i = cmdlen - 1; !match && i > 0; 2005 i--, p += 2) { 2006 match = 2007 (t >= p[0] && t <= p[1]); 2008 } 2009 } 2010 break; 2011 2012 case O_FRAG: 2013 match = (hlen > 0 && offset != 0); 2014 break; 2015 2016 case O_IN: /* "out" is "not in" */ 2017 match = (oif == NULL); 2018 break; 2019 2020 case O_LAYER2: 2021 match = (args->eh != NULL); 2022 break; 2023 2024 case O_PROTO: 2025 /* 2026 * We do not allow an arg of 0 so the 2027 * check of "proto" only suffices. 2028 */ 2029 match = (proto == cmd->arg1); 2030 break; 2031 2032 case O_IP_SRC: 2033 match = (hlen > 0 && 2034 ((ipfw_insn_ip *)cmd)->addr.s_addr == 2035 src_ip.s_addr); 2036 break; 2037 2038 case O_IP_SRC_MASK: 2039 match = (hlen > 0 && 2040 ((ipfw_insn_ip *)cmd)->addr.s_addr == 2041 (src_ip.s_addr & 2042 ((ipfw_insn_ip *)cmd)->mask.s_addr)); 2043 break; 2044 2045 case O_IP_SRC_ME: 2046 if (hlen > 0) { 2047 struct ifnet *tif; 2048 2049 tif = INADDR_TO_IFP(&src_ip); 2050 match = (tif != NULL); 2051 } 2052 break; 2053 2054 case O_IP_DST_SET: 2055 case O_IP_SRC_SET: 2056 if (hlen > 0) { 2057 uint32_t *d = (uint32_t *)(cmd + 1); 2058 uint32_t addr = 2059 cmd->opcode == O_IP_DST_SET ? 2060 args->f_id.dst_ip : 2061 args->f_id.src_ip; 2062 2063 if (addr < d[0]) 2064 break; 2065 addr -= d[0]; /* subtract base */ 2066 match = 2067 (addr < cmd->arg1) && 2068 (d[1 + (addr >> 5)] & 2069 (1 << (addr & 0x1f))); 2070 } 2071 break; 2072 2073 case O_IP_DST: 2074 match = (hlen > 0 && 2075 ((ipfw_insn_ip *)cmd)->addr.s_addr == 2076 dst_ip.s_addr); 2077 break; 2078 2079 case O_IP_DST_MASK: 2080 match = (hlen > 0) && 2081 (((ipfw_insn_ip *)cmd)->addr.s_addr == 2082 (dst_ip.s_addr & 2083 ((ipfw_insn_ip *)cmd)->mask.s_addr)); 2084 break; 2085 2086 case O_IP_DST_ME: 2087 if (hlen > 0) { 2088 struct ifnet *tif; 2089 2090 tif = INADDR_TO_IFP(&dst_ip); 2091 match = (tif != NULL); 2092 } 2093 break; 2094 2095 case O_IP_SRCPORT: 2096 case O_IP_DSTPORT: 2097 /* 2098 * offset == 0 && proto != 0 is enough 2099 * to guarantee that we have an IPv4 2100 * packet with port info. 2101 */ 2102 if ((proto==IPPROTO_UDP || proto==IPPROTO_TCP) 2103 && offset == 0) { 2104 uint16_t x = 2105 (cmd->opcode == O_IP_SRCPORT) ? 2106 src_port : dst_port ; 2107 uint16_t *p = 2108 ((ipfw_insn_u16 *)cmd)->ports; 2109 int i; 2110 2111 for (i = cmdlen - 1; !match && i > 0; 2112 i--, p += 2) { 2113 match = 2114 (x >= p[0] && x <= p[1]); 2115 } 2116 } 2117 break; 2118 2119 case O_ICMPTYPE: 2120 match = (offset == 0 && proto==IPPROTO_ICMP && 2121 icmptype_match(ip, (ipfw_insn_u32 *)cmd)); 2122 break; 2123 2124 case O_IPOPT: 2125 match = (hlen > 0 && ipopts_match(ip, cmd)); 2126 break; 2127 2128 case O_IPVER: 2129 match = (hlen > 0 && cmd->arg1 == ip->ip_v); 2130 break; 2131 2132 case O_IPTTL: 2133 match = (hlen > 0 && cmd->arg1 == ip->ip_ttl); 2134 break; 2135 2136 case O_IPID: 2137 match = (hlen > 0 && 2138 cmd->arg1 == ntohs(ip->ip_id)); 2139 break; 2140 2141 case O_IPLEN: 2142 match = (hlen > 0 && cmd->arg1 == ip_len); 2143 break; 2144 2145 case O_IPPRECEDENCE: 2146 match = (hlen > 0 && 2147 (cmd->arg1 == (ip->ip_tos & 0xe0))); 2148 break; 2149 2150 case O_IPTOS: 2151 match = (hlen > 0 && 2152 flags_match(cmd, ip->ip_tos)); 2153 break; 2154 2155 case O_TCPFLAGS: 2156 match = (proto == IPPROTO_TCP && offset == 0 && 2157 flags_match(cmd, 2158 L3HDR(struct tcphdr,ip)->th_flags)); 2159 break; 2160 2161 case O_TCPOPTS: 2162 match = (proto == IPPROTO_TCP && offset == 0 && 2163 tcpopts_match(ip, cmd)); 2164 break; 2165 2166 case O_TCPSEQ: 2167 match = (proto == IPPROTO_TCP && offset == 0 && 2168 ((ipfw_insn_u32 *)cmd)->d[0] == 2169 L3HDR(struct tcphdr,ip)->th_seq); 2170 break; 2171 2172 case O_TCPACK: 2173 match = (proto == IPPROTO_TCP && offset == 0 && 2174 ((ipfw_insn_u32 *)cmd)->d[0] == 2175 L3HDR(struct tcphdr,ip)->th_ack); 2176 break; 2177 2178 case O_TCPWIN: 2179 match = (proto == IPPROTO_TCP && offset == 0 && 2180 cmd->arg1 == 2181 L3HDR(struct tcphdr,ip)->th_win); 2182 break; 2183 2184 case O_ESTAB: 2185 /* reject packets which have SYN only */ 2186 /* XXX should i also check for TH_ACK ? */ 2187 match = (proto == IPPROTO_TCP && offset == 0 && 2188 (L3HDR(struct tcphdr,ip)->th_flags & 2189 (TH_RST | TH_ACK | TH_SYN)) != TH_SYN); 2190 break; 2191 2192 case O_LOG: 2193 if (fw_verbose) 2194 ipfw_log(f, hlen, args->eh, m, oif); 2195 match = 1; 2196 break; 2197 2198 case O_PROB: 2199 match = (krandom() < 2200 ((ipfw_insn_u32 *)cmd)->d[0]); 2201 break; 2202 2203 /* 2204 * The second set of opcodes represents 'actions', 2205 * i.e. the terminal part of a rule once the packet 2206 * matches all previous patterns. 2207 * Typically there is only one action for each rule, 2208 * and the opcode is stored at the end of the rule 2209 * (but there are exceptions -- see below). 2210 * 2211 * In general, here we set retval and terminate the 2212 * outer loop (would be a 'break 3' in some language, 2213 * but we need to do a 'goto done'). 2214 * 2215 * Exceptions: 2216 * O_COUNT and O_SKIPTO actions: 2217 * instead of terminating, we jump to the next rule 2218 * ('goto next_rule', equivalent to a 'break 2'), 2219 * or to the SKIPTO target ('goto again' after 2220 * having set f, cmd and l), respectively. 2221 * 2222 * O_LIMIT and O_KEEP_STATE: these opcodes are 2223 * not real 'actions', and are stored right 2224 * before the 'action' part of the rule. 2225 * These opcodes try to install an entry in the 2226 * state tables; if successful, we continue with 2227 * the next opcode (match=1; break;), otherwise 2228 * the packet must be dropped ('goto done' after 2229 * setting retval). If static rules are changed 2230 * during the state installation, the packet will 2231 * be dropped and rule's stats will not beupdated 2232 * ('return IP_FW_DENY'). 2233 * 2234 * O_PROBE_STATE and O_CHECK_STATE: these opcodes 2235 * cause a lookup of the state table, and a jump 2236 * to the 'action' part of the parent rule 2237 * ('goto check_body') if an entry is found, or 2238 * (CHECK_STATE only) a jump to the next rule if 2239 * the entry is not found ('goto next_rule'). 2240 * The result of the lookup is cached to make 2241 * further instances of these opcodes are 2242 * effectively NOPs. If static rules are changed 2243 * during the state looking up, the packet will 2244 * be dropped and rule's stats will not be updated 2245 * ('return IP_FW_DENY'). 2246 */ 2247 case O_LIMIT: 2248 case O_KEEP_STATE: 2249 if (!(f->rule_flags & IPFW_RULE_F_STATE)) { 2250 kprintf("%s rule (%d) is not ready " 2251 "on cpu%d\n", 2252 cmd->opcode == O_LIMIT ? 2253 "limit" : "keep state", 2254 f->rulenum, f->cpuid); 2255 goto next_rule; 2256 } 2257 if (install_state(f, 2258 (ipfw_insn_limit *)cmd, args, &deny)) { 2259 if (deny) 2260 return IP_FW_DENY; 2261 2262 retval = IP_FW_DENY; 2263 goto done; /* error/limit violation */ 2264 } 2265 if (deny) 2266 return IP_FW_DENY; 2267 match = 1; 2268 break; 2269 2270 case O_PROBE_STATE: 2271 case O_CHECK_STATE: 2272 /* 2273 * dynamic rules are checked at the first 2274 * keep-state or check-state occurrence, 2275 * with the result being stored in dyn_dir. 2276 * The compiler introduces a PROBE_STATE 2277 * instruction for us when we have a 2278 * KEEP_STATE (because PROBE_STATE needs 2279 * to be run first). 2280 */ 2281 if (dyn_dir == MATCH_UNKNOWN) { 2282 dyn_f = lookup_rule(&args->f_id, 2283 &dyn_dir, 2284 proto == IPPROTO_TCP ? 2285 L3HDR(struct tcphdr, ip) : NULL, 2286 ip_len, &deny); 2287 if (deny) 2288 return IP_FW_DENY; 2289 if (dyn_f != NULL) { 2290 /* 2291 * Found a rule from a dynamic 2292 * entry; jump to the 'action' 2293 * part of the rule. 2294 */ 2295 f = dyn_f; 2296 cmd = ACTION_PTR(f); 2297 l = f->cmd_len - f->act_ofs; 2298 goto check_body; 2299 } 2300 } 2301 /* 2302 * Dynamic entry not found. If CHECK_STATE, 2303 * skip to next rule, if PROBE_STATE just 2304 * ignore and continue with next opcode. 2305 */ 2306 if (cmd->opcode == O_CHECK_STATE) 2307 goto next_rule; 2308 else if (!(f->rule_flags & IPFW_RULE_F_STATE)) 2309 goto next_rule; /* not ready yet */ 2310 match = 1; 2311 break; 2312 2313 case O_ACCEPT: 2314 retval = IP_FW_PASS; /* accept */ 2315 goto done; 2316 2317 case O_PIPE: 2318 case O_QUEUE: 2319 args->rule = f; /* report matching rule */ 2320 args->cookie = cmd->arg1; 2321 retval = IP_FW_DUMMYNET; 2322 goto done; 2323 2324 case O_DIVERT: 2325 case O_TEE: 2326 if (args->eh) /* not on layer 2 */ 2327 break; 2328 2329 mtag = m_tag_get(PACKET_TAG_IPFW_DIVERT, 2330 sizeof(*divinfo), MB_DONTWAIT); 2331 if (mtag == NULL) { 2332 retval = IP_FW_DENY; 2333 goto done; 2334 } 2335 divinfo = m_tag_data(mtag); 2336 2337 divinfo->skipto = f->rulenum; 2338 divinfo->port = cmd->arg1; 2339 divinfo->tee = (cmd->opcode == O_TEE); 2340 m_tag_prepend(m, mtag); 2341 2342 args->cookie = cmd->arg1; 2343 retval = (cmd->opcode == O_DIVERT) ? 2344 IP_FW_DIVERT : IP_FW_TEE; 2345 goto done; 2346 2347 case O_COUNT: 2348 case O_SKIPTO: 2349 f->pcnt++; /* update stats */ 2350 f->bcnt += ip_len; 2351 f->timestamp = time_second; 2352 if (cmd->opcode == O_COUNT) 2353 goto next_rule; 2354 /* handle skipto */ 2355 if (f->next_rule == NULL) 2356 lookup_next_rule(f); 2357 f = f->next_rule; 2358 goto again; 2359 2360 case O_REJECT: 2361 /* 2362 * Drop the packet and send a reject notice 2363 * if the packet is not ICMP (or is an ICMP 2364 * query), and it is not multicast/broadcast. 2365 */ 2366 if (hlen > 0 && 2367 (proto != IPPROTO_ICMP || 2368 is_icmp_query(ip)) && 2369 !(m->m_flags & (M_BCAST|M_MCAST)) && 2370 !IN_MULTICAST(ntohl(dst_ip.s_addr))) { 2371 /* 2372 * Update statistics before the possible 2373 * blocking 'send_reject' 2374 */ 2375 f->pcnt++; 2376 f->bcnt += ip_len; 2377 f->timestamp = time_second; 2378 2379 send_reject(args, cmd->arg1, 2380 offset,ip_len); 2381 m = args->m; 2382 2383 /* 2384 * Return directly here, rule stats 2385 * have been updated above. 2386 */ 2387 return IP_FW_DENY; 2388 } 2389 /* FALLTHROUGH */ 2390 case O_DENY: 2391 retval = IP_FW_DENY; 2392 goto done; 2393 2394 case O_FORWARD_IP: 2395 if (args->eh) /* not valid on layer2 pkts */ 2396 break; 2397 if (!dyn_f || dyn_dir == MATCH_FORWARD) { 2398 struct sockaddr_in *sin; 2399 2400 mtag = m_tag_get(PACKET_TAG_IPFORWARD, 2401 sizeof(*sin), MB_DONTWAIT); 2402 if (mtag == NULL) { 2403 retval = IP_FW_DENY; 2404 goto done; 2405 } 2406 sin = m_tag_data(mtag); 2407 2408 /* Structure copy */ 2409 *sin = ((ipfw_insn_sa *)cmd)->sa; 2410 2411 m_tag_prepend(m, mtag); 2412 m->m_pkthdr.fw_flags |= 2413 IPFORWARD_MBUF_TAGGED; 2414 m->m_pkthdr.fw_flags &= 2415 ~BRIDGE_MBUF_TAGGED; 2416 } 2417 retval = IP_FW_PASS; 2418 goto done; 2419 2420 default: 2421 panic("-- unknown opcode %d\n", cmd->opcode); 2422 } /* end of switch() on opcodes */ 2423 2424 if (cmd->len & F_NOT) 2425 match = !match; 2426 2427 if (match) { 2428 if (cmd->len & F_OR) 2429 skip_or = 1; 2430 } else { 2431 if (!(cmd->len & F_OR)) /* not an OR block, */ 2432 break; /* try next rule */ 2433 } 2434 2435 } /* end of inner for, scan opcodes */ 2436 2437 next_rule:; /* try next rule */ 2438 2439 } /* end of outer for, scan rules */ 2440 kprintf("+++ ipfw: ouch!, skip past end of rules, denying packet\n"); 2441 return IP_FW_DENY; 2442 2443 done: 2444 /* Update statistics */ 2445 f->pcnt++; 2446 f->bcnt += ip_len; 2447 f->timestamp = time_second; 2448 return retval; 2449 2450 pullup_failed: 2451 if (fw_verbose) 2452 kprintf("pullup failed\n"); 2453 return IP_FW_DENY; 2454 } 2455 2456 static void 2457 ipfw_dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa) 2458 { 2459 struct m_tag *mtag; 2460 struct dn_pkt *pkt; 2461 ipfw_insn *cmd; 2462 const struct ipfw_flow_id *id; 2463 struct dn_flow_id *fid; 2464 2465 M_ASSERTPKTHDR(m); 2466 2467 mtag = m_tag_get(PACKET_TAG_DUMMYNET, sizeof(*pkt), MB_DONTWAIT); 2468 if (mtag == NULL) { 2469 m_freem(m); 2470 return; 2471 } 2472 m_tag_prepend(m, mtag); 2473 2474 pkt = m_tag_data(mtag); 2475 bzero(pkt, sizeof(*pkt)); 2476 2477 cmd = fwa->rule->cmd + fwa->rule->act_ofs; 2478 if (cmd->opcode == O_LOG) 2479 cmd += F_LEN(cmd); 2480 KASSERT(cmd->opcode == O_PIPE || cmd->opcode == O_QUEUE, 2481 ("Rule is not PIPE or QUEUE, opcode %d\n", cmd->opcode)); 2482 2483 pkt->dn_m = m; 2484 pkt->dn_flags = (dir & DN_FLAGS_DIR_MASK); 2485 pkt->ifp = fwa->oif; 2486 pkt->pipe_nr = pipe_nr; 2487 2488 pkt->cpuid = mycpuid; 2489 pkt->msgport = cur_netport(); 2490 2491 id = &fwa->f_id; 2492 fid = &pkt->id; 2493 fid->fid_dst_ip = id->dst_ip; 2494 fid->fid_src_ip = id->src_ip; 2495 fid->fid_dst_port = id->dst_port; 2496 fid->fid_src_port = id->src_port; 2497 fid->fid_proto = id->proto; 2498 fid->fid_flags = id->flags; 2499 2500 ipfw_ref_rule(fwa->rule); 2501 pkt->dn_priv = fwa->rule; 2502 pkt->dn_unref_priv = ipfw_unref_rule; 2503 2504 if (cmd->opcode == O_PIPE) 2505 pkt->dn_flags |= DN_FLAGS_IS_PIPE; 2506 2507 m->m_pkthdr.fw_flags |= DUMMYNET_MBUF_TAGGED; 2508 } 2509 2510 /* 2511 * When a rule is added/deleted, clear the next_rule pointers in all rules. 2512 * These will be reconstructed on the fly as packets are matched. 2513 * Must be called at splimp(). 2514 */ 2515 static void 2516 ipfw_flush_rule_ptrs(struct ipfw_context *ctx) 2517 { 2518 struct ip_fw *rule; 2519 2520 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) 2521 rule->next_rule = NULL; 2522 } 2523 2524 static __inline void 2525 ipfw_inc_static_count(struct ip_fw *rule) 2526 { 2527 /* Static rule's counts are updated only on CPU0 */ 2528 KKASSERT(mycpuid == 0); 2529 2530 static_count++; 2531 static_ioc_len += IOC_RULESIZE(rule); 2532 } 2533 2534 static __inline void 2535 ipfw_dec_static_count(struct ip_fw *rule) 2536 { 2537 int l = IOC_RULESIZE(rule); 2538 2539 /* Static rule's counts are updated only on CPU0 */ 2540 KKASSERT(mycpuid == 0); 2541 2542 KASSERT(static_count > 0, ("invalid static count %u\n", static_count)); 2543 static_count--; 2544 2545 KASSERT(static_ioc_len >= l, 2546 ("invalid static len %u\n", static_ioc_len)); 2547 static_ioc_len -= l; 2548 } 2549 2550 static void 2551 ipfw_link_sibling(struct netmsg_ipfw *fwmsg, struct ip_fw *rule) 2552 { 2553 if (fwmsg->sibling != NULL) { 2554 KKASSERT(mycpuid > 0 && fwmsg->sibling->cpuid == mycpuid - 1); 2555 fwmsg->sibling->sibling = rule; 2556 } 2557 fwmsg->sibling = rule; 2558 } 2559 2560 static struct ip_fw * 2561 ipfw_create_rule(const struct ipfw_ioc_rule *ioc_rule, struct ip_fw_stub *stub) 2562 { 2563 struct ip_fw *rule; 2564 2565 rule = kmalloc(RULESIZE(ioc_rule), M_IPFW, M_WAITOK | M_ZERO); 2566 2567 rule->act_ofs = ioc_rule->act_ofs; 2568 rule->cmd_len = ioc_rule->cmd_len; 2569 rule->rulenum = ioc_rule->rulenum; 2570 rule->set = ioc_rule->set; 2571 rule->usr_flags = ioc_rule->usr_flags; 2572 2573 bcopy(ioc_rule->cmd, rule->cmd, rule->cmd_len * 4 /* XXX */); 2574 2575 rule->refcnt = 1; 2576 rule->cpuid = mycpuid; 2577 2578 rule->stub = stub; 2579 if (stub != NULL) 2580 stub->rule[mycpuid] = rule; 2581 2582 return rule; 2583 } 2584 2585 static void 2586 ipfw_add_rule_dispatch(netmsg_t nmsg) 2587 { 2588 struct netmsg_ipfw *fwmsg = (struct netmsg_ipfw *)nmsg; 2589 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 2590 struct ip_fw *rule; 2591 2592 rule = ipfw_create_rule(fwmsg->ioc_rule, fwmsg->stub); 2593 2594 /* 2595 * Bump generation after ipfw_create_rule(), 2596 * since this function is blocking 2597 */ 2598 ctx->ipfw_gen++; 2599 2600 /* 2601 * Insert rule into the pre-determined position 2602 */ 2603 if (fwmsg->prev_rule != NULL) { 2604 struct ip_fw *prev, *next; 2605 2606 prev = fwmsg->prev_rule; 2607 KKASSERT(prev->cpuid == mycpuid); 2608 2609 next = fwmsg->next_rule; 2610 KKASSERT(next->cpuid == mycpuid); 2611 2612 rule->next = next; 2613 prev->next = rule; 2614 2615 /* 2616 * Move to the position on the next CPU 2617 * before the msg is forwarded. 2618 */ 2619 fwmsg->prev_rule = prev->sibling; 2620 fwmsg->next_rule = next->sibling; 2621 } else { 2622 KKASSERT(fwmsg->next_rule == NULL); 2623 rule->next = ctx->ipfw_layer3_chain; 2624 ctx->ipfw_layer3_chain = rule; 2625 } 2626 2627 /* Link rule CPU sibling */ 2628 ipfw_link_sibling(fwmsg, rule); 2629 2630 ipfw_flush_rule_ptrs(ctx); 2631 2632 if (mycpuid == 0) { 2633 /* Statistics only need to be updated once */ 2634 ipfw_inc_static_count(rule); 2635 2636 /* Return the rule on CPU0 */ 2637 nmsg->lmsg.u.ms_resultp = rule; 2638 } 2639 2640 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1); 2641 } 2642 2643 static void 2644 ipfw_enable_state_dispatch(netmsg_t nmsg) 2645 { 2646 struct lwkt_msg *lmsg = &nmsg->lmsg; 2647 struct ip_fw *rule = lmsg->u.ms_resultp; 2648 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 2649 2650 ctx->ipfw_gen++; 2651 2652 KKASSERT(rule->cpuid == mycpuid); 2653 KKASSERT(rule->stub != NULL && rule->stub->rule[mycpuid] == rule); 2654 KKASSERT(!(rule->rule_flags & IPFW_RULE_F_STATE)); 2655 rule->rule_flags |= IPFW_RULE_F_STATE; 2656 lmsg->u.ms_resultp = rule->sibling; 2657 2658 ifnet_forwardmsg(lmsg, mycpuid + 1); 2659 } 2660 2661 /* 2662 * Add a new rule to the list. Copy the rule into a malloc'ed area, 2663 * then possibly create a rule number and add the rule to the list. 2664 * Update the rule_number in the input struct so the caller knows 2665 * it as well. 2666 */ 2667 static void 2668 ipfw_add_rule(struct ipfw_ioc_rule *ioc_rule, uint32_t rule_flags) 2669 { 2670 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 2671 struct netmsg_ipfw fwmsg; 2672 struct netmsg_base *nmsg; 2673 struct ip_fw *f, *prev, *rule; 2674 struct ip_fw_stub *stub; 2675 2676 IPFW_ASSERT_CFGPORT(&curthread->td_msgport); 2677 2678 /* 2679 * If rulenum is 0, find highest numbered rule before the 2680 * default rule, and add rule number incremental step. 2681 */ 2682 if (ioc_rule->rulenum == 0) { 2683 int step = autoinc_step; 2684 2685 KKASSERT(step >= IPFW_AUTOINC_STEP_MIN && 2686 step <= IPFW_AUTOINC_STEP_MAX); 2687 2688 /* 2689 * Locate the highest numbered rule before default 2690 */ 2691 for (f = ctx->ipfw_layer3_chain; f; f = f->next) { 2692 if (f->rulenum == IPFW_DEFAULT_RULE) 2693 break; 2694 ioc_rule->rulenum = f->rulenum; 2695 } 2696 if (ioc_rule->rulenum < IPFW_DEFAULT_RULE - step) 2697 ioc_rule->rulenum += step; 2698 } 2699 KASSERT(ioc_rule->rulenum != IPFW_DEFAULT_RULE && 2700 ioc_rule->rulenum != 0, 2701 ("invalid rule num %d\n", ioc_rule->rulenum)); 2702 2703 /* 2704 * Now find the right place for the new rule in the sorted list. 2705 */ 2706 for (prev = NULL, f = ctx->ipfw_layer3_chain; f; 2707 prev = f, f = f->next) { 2708 if (f->rulenum > ioc_rule->rulenum) { 2709 /* Found the location */ 2710 break; 2711 } 2712 } 2713 KASSERT(f != NULL, ("no default rule?!\n")); 2714 2715 if (rule_flags & IPFW_RULE_F_STATE) { 2716 int size; 2717 2718 /* 2719 * If the new rule will create states, then allocate 2720 * a rule stub, which will be referenced by states 2721 * (dyn rules) 2722 */ 2723 size = sizeof(*stub) + ((ncpus - 1) * sizeof(struct ip_fw *)); 2724 stub = kmalloc(size, M_IPFW, M_WAITOK | M_ZERO); 2725 } else { 2726 stub = NULL; 2727 } 2728 2729 /* 2730 * Duplicate the rule onto each CPU. 2731 * The rule duplicated on CPU0 will be returned. 2732 */ 2733 bzero(&fwmsg, sizeof(fwmsg)); 2734 nmsg = &fwmsg.base; 2735 netmsg_init(nmsg, NULL, &curthread->td_msgport, 2736 0, ipfw_add_rule_dispatch); 2737 fwmsg.ioc_rule = ioc_rule; 2738 fwmsg.prev_rule = prev; 2739 fwmsg.next_rule = prev == NULL ? NULL : f; 2740 fwmsg.stub = stub; 2741 2742 ifnet_domsg(&nmsg->lmsg, 0); 2743 KKASSERT(fwmsg.prev_rule == NULL && fwmsg.next_rule == NULL); 2744 2745 rule = nmsg->lmsg.u.ms_resultp; 2746 KKASSERT(rule != NULL && rule->cpuid == mycpuid); 2747 2748 if (rule_flags & IPFW_RULE_F_STATE) { 2749 /* 2750 * Turn on state flag, _after_ everything on all 2751 * CPUs have been setup. 2752 */ 2753 bzero(nmsg, sizeof(*nmsg)); 2754 netmsg_init(nmsg, NULL, &curthread->td_msgport, 2755 0, ipfw_enable_state_dispatch); 2756 nmsg->lmsg.u.ms_resultp = rule; 2757 2758 ifnet_domsg(&nmsg->lmsg, 0); 2759 KKASSERT(nmsg->lmsg.u.ms_resultp == NULL); 2760 } 2761 2762 DPRINTF("++ installed rule %d, static count now %d\n", 2763 rule->rulenum, static_count); 2764 } 2765 2766 /** 2767 * Free storage associated with a static rule (including derived 2768 * dynamic rules). 2769 * The caller is in charge of clearing rule pointers to avoid 2770 * dangling pointers. 2771 * @return a pointer to the next entry. 2772 * Arguments are not checked, so they better be correct. 2773 * Must be called at splimp(). 2774 */ 2775 static struct ip_fw * 2776 ipfw_delete_rule(struct ipfw_context *ctx, 2777 struct ip_fw *prev, struct ip_fw *rule) 2778 { 2779 struct ip_fw *n; 2780 struct ip_fw_stub *stub; 2781 2782 ctx->ipfw_gen++; 2783 2784 /* STATE flag should have been cleared before we reach here */ 2785 KKASSERT((rule->rule_flags & IPFW_RULE_F_STATE) == 0); 2786 2787 stub = rule->stub; 2788 n = rule->next; 2789 if (prev == NULL) 2790 ctx->ipfw_layer3_chain = n; 2791 else 2792 prev->next = n; 2793 2794 /* Mark the rule as invalid */ 2795 rule->rule_flags |= IPFW_RULE_F_INVALID; 2796 rule->next_rule = NULL; 2797 rule->sibling = NULL; 2798 rule->stub = NULL; 2799 #ifdef foo 2800 /* Don't reset cpuid here; keep various assertion working */ 2801 rule->cpuid = -1; 2802 #endif 2803 2804 /* Statistics only need to be updated once */ 2805 if (mycpuid == 0) 2806 ipfw_dec_static_count(rule); 2807 2808 /* Free 'stub' on the last CPU */ 2809 if (stub != NULL && mycpuid == ncpus - 1) 2810 kfree(stub, M_IPFW); 2811 2812 /* Try to free this rule */ 2813 ipfw_free_rule(rule); 2814 2815 /* Return the next rule */ 2816 return n; 2817 } 2818 2819 static void 2820 ipfw_flush_dispatch(netmsg_t nmsg) 2821 { 2822 struct lwkt_msg *lmsg = &nmsg->lmsg; 2823 int kill_default = lmsg->u.ms_result; 2824 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 2825 struct ip_fw *rule; 2826 2827 ipfw_flush_rule_ptrs(ctx); /* more efficient to do outside the loop */ 2828 2829 while ((rule = ctx->ipfw_layer3_chain) != NULL && 2830 (kill_default || rule->rulenum != IPFW_DEFAULT_RULE)) 2831 ipfw_delete_rule(ctx, NULL, rule); 2832 2833 ifnet_forwardmsg(lmsg, mycpuid + 1); 2834 } 2835 2836 static void 2837 ipfw_disable_rule_state_dispatch(netmsg_t nmsg) 2838 { 2839 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg; 2840 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 2841 struct ip_fw *rule; 2842 2843 ctx->ipfw_gen++; 2844 2845 rule = dmsg->start_rule; 2846 if (rule != NULL) { 2847 KKASSERT(rule->cpuid == mycpuid); 2848 2849 /* 2850 * Move to the position on the next CPU 2851 * before the msg is forwarded. 2852 */ 2853 dmsg->start_rule = rule->sibling; 2854 } else { 2855 KKASSERT(dmsg->rulenum == 0); 2856 rule = ctx->ipfw_layer3_chain; 2857 } 2858 2859 while (rule != NULL) { 2860 if (dmsg->rulenum && rule->rulenum != dmsg->rulenum) 2861 break; 2862 rule->rule_flags &= ~IPFW_RULE_F_STATE; 2863 rule = rule->next; 2864 } 2865 2866 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1); 2867 } 2868 2869 /* 2870 * Deletes all rules from a chain (including the default rule 2871 * if the second argument is set). 2872 * Must be called at splimp(). 2873 */ 2874 static void 2875 ipfw_flush(int kill_default) 2876 { 2877 struct netmsg_del dmsg; 2878 struct netmsg_base nmsg; 2879 struct lwkt_msg *lmsg; 2880 struct ip_fw *rule; 2881 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 2882 2883 IPFW_ASSERT_CFGPORT(&curthread->td_msgport); 2884 2885 /* 2886 * If 'kill_default' then caller has done the necessary 2887 * msgport syncing; unnecessary to do it again. 2888 */ 2889 if (!kill_default) { 2890 /* 2891 * Let ipfw_chk() know the rules are going to 2892 * be flushed, so it could jump directly to 2893 * the default rule. 2894 */ 2895 ipfw_flushing = 1; 2896 netmsg_service_sync(); 2897 } 2898 2899 /* 2900 * Clear STATE flag on rules, so no more states (dyn rules) 2901 * will be created. 2902 */ 2903 bzero(&dmsg, sizeof(dmsg)); 2904 netmsg_init(&dmsg.base, NULL, &curthread->td_msgport, 2905 0, ipfw_disable_rule_state_dispatch); 2906 ifnet_domsg(&dmsg.base.lmsg, 0); 2907 2908 /* 2909 * This actually nukes all states (dyn rules) 2910 */ 2911 lockmgr(&dyn_lock, LK_EXCLUSIVE); 2912 for (rule = ctx->ipfw_layer3_chain; rule != NULL; rule = rule->next) { 2913 /* 2914 * Can't check IPFW_RULE_F_STATE here, 2915 * since it has been cleared previously. 2916 * Check 'stub' instead. 2917 */ 2918 if (rule->stub != NULL) { 2919 /* Force removal */ 2920 remove_dyn_rule_locked(rule, NULL); 2921 } 2922 } 2923 lockmgr(&dyn_lock, LK_RELEASE); 2924 2925 /* 2926 * Press the 'flush' button 2927 */ 2928 bzero(&nmsg, sizeof(nmsg)); 2929 netmsg_init(&nmsg, NULL, &curthread->td_msgport, 2930 0, ipfw_flush_dispatch); 2931 lmsg = &nmsg.lmsg; 2932 lmsg->u.ms_result = kill_default; 2933 ifnet_domsg(lmsg, 0); 2934 2935 KASSERT(dyn_count == 0, ("%u dyn rule remains\n", dyn_count)); 2936 2937 if (kill_default) { 2938 if (ipfw_dyn_v != NULL) { 2939 /* 2940 * Free dynamic rules(state) hash table 2941 */ 2942 kfree(ipfw_dyn_v, M_IPFW); 2943 ipfw_dyn_v = NULL; 2944 } 2945 2946 KASSERT(static_count == 0, 2947 ("%u static rules remains\n", static_count)); 2948 KASSERT(static_ioc_len == 0, 2949 ("%u bytes of static rules remains\n", static_ioc_len)); 2950 } else { 2951 KASSERT(static_count == 1, 2952 ("%u static rules remains\n", static_count)); 2953 KASSERT(static_ioc_len == IOC_RULESIZE(ctx->ipfw_default_rule), 2954 ("%u bytes of static rules remains, should be %lu\n", 2955 static_ioc_len, 2956 (u_long)IOC_RULESIZE(ctx->ipfw_default_rule))); 2957 } 2958 2959 /* Flush is done */ 2960 ipfw_flushing = 0; 2961 } 2962 2963 static void 2964 ipfw_alt_delete_rule_dispatch(netmsg_t nmsg) 2965 { 2966 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg; 2967 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 2968 struct ip_fw *rule, *prev; 2969 2970 rule = dmsg->start_rule; 2971 KKASSERT(rule->cpuid == mycpuid); 2972 dmsg->start_rule = rule->sibling; 2973 2974 prev = dmsg->prev_rule; 2975 if (prev != NULL) { 2976 KKASSERT(prev->cpuid == mycpuid); 2977 2978 /* 2979 * Move to the position on the next CPU 2980 * before the msg is forwarded. 2981 */ 2982 dmsg->prev_rule = prev->sibling; 2983 } 2984 2985 /* 2986 * flush pointers outside the loop, then delete all matching 2987 * rules. 'prev' remains the same throughout the cycle. 2988 */ 2989 ipfw_flush_rule_ptrs(ctx); 2990 while (rule && rule->rulenum == dmsg->rulenum) 2991 rule = ipfw_delete_rule(ctx, prev, rule); 2992 2993 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1); 2994 } 2995 2996 static int 2997 ipfw_alt_delete_rule(uint16_t rulenum) 2998 { 2999 struct ip_fw *prev, *rule, *f; 3000 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 3001 struct netmsg_del dmsg; 3002 struct netmsg_base *nmsg; 3003 int state; 3004 3005 /* 3006 * Locate first rule to delete 3007 */ 3008 for (prev = NULL, rule = ctx->ipfw_layer3_chain; 3009 rule && rule->rulenum < rulenum; 3010 prev = rule, rule = rule->next) 3011 ; /* EMPTY */ 3012 if (rule->rulenum != rulenum) 3013 return EINVAL; 3014 3015 /* 3016 * Check whether any rules with the given number will 3017 * create states. 3018 */ 3019 state = 0; 3020 for (f = rule; f && f->rulenum == rulenum; f = f->next) { 3021 if (f->rule_flags & IPFW_RULE_F_STATE) { 3022 state = 1; 3023 break; 3024 } 3025 } 3026 3027 if (state) { 3028 /* 3029 * Clear the STATE flag, so no more states will be 3030 * created based the rules numbered 'rulenum'. 3031 */ 3032 bzero(&dmsg, sizeof(dmsg)); 3033 nmsg = &dmsg.base; 3034 netmsg_init(nmsg, NULL, &curthread->td_msgport, 3035 0, ipfw_disable_rule_state_dispatch); 3036 dmsg.start_rule = rule; 3037 dmsg.rulenum = rulenum; 3038 3039 ifnet_domsg(&nmsg->lmsg, 0); 3040 KKASSERT(dmsg.start_rule == NULL); 3041 3042 /* 3043 * Nuke all related states 3044 */ 3045 lockmgr(&dyn_lock, LK_EXCLUSIVE); 3046 for (f = rule; f && f->rulenum == rulenum; f = f->next) { 3047 /* 3048 * Can't check IPFW_RULE_F_STATE here, 3049 * since it has been cleared previously. 3050 * Check 'stub' instead. 3051 */ 3052 if (f->stub != NULL) { 3053 /* Force removal */ 3054 remove_dyn_rule_locked(f, NULL); 3055 } 3056 } 3057 lockmgr(&dyn_lock, LK_RELEASE); 3058 } 3059 3060 /* 3061 * Get rid of the rule duplications on all CPUs 3062 */ 3063 bzero(&dmsg, sizeof(dmsg)); 3064 nmsg = &dmsg.base; 3065 netmsg_init(nmsg, NULL, &curthread->td_msgport, 3066 0, ipfw_alt_delete_rule_dispatch); 3067 dmsg.prev_rule = prev; 3068 dmsg.start_rule = rule; 3069 dmsg.rulenum = rulenum; 3070 3071 ifnet_domsg(&nmsg->lmsg, 0); 3072 KKASSERT(dmsg.prev_rule == NULL && dmsg.start_rule == NULL); 3073 return 0; 3074 } 3075 3076 static void 3077 ipfw_alt_delete_ruleset_dispatch(netmsg_t nmsg) 3078 { 3079 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg; 3080 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 3081 struct ip_fw *prev, *rule; 3082 #ifdef INVARIANTS 3083 int del = 0; 3084 #endif 3085 3086 ipfw_flush_rule_ptrs(ctx); 3087 3088 prev = NULL; 3089 rule = ctx->ipfw_layer3_chain; 3090 while (rule != NULL) { 3091 if (rule->set == dmsg->from_set) { 3092 rule = ipfw_delete_rule(ctx, prev, rule); 3093 #ifdef INVARIANTS 3094 del = 1; 3095 #endif 3096 } else { 3097 prev = rule; 3098 rule = rule->next; 3099 } 3100 } 3101 KASSERT(del, ("no match set?!\n")); 3102 3103 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1); 3104 } 3105 3106 static void 3107 ipfw_disable_ruleset_state_dispatch(netmsg_t nmsg) 3108 { 3109 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg; 3110 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 3111 struct ip_fw *rule; 3112 #ifdef INVARIANTS 3113 int cleared = 0; 3114 #endif 3115 3116 ctx->ipfw_gen++; 3117 3118 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) { 3119 if (rule->set == dmsg->from_set) { 3120 #ifdef INVARIANTS 3121 cleared = 1; 3122 #endif 3123 rule->rule_flags &= ~IPFW_RULE_F_STATE; 3124 } 3125 } 3126 KASSERT(cleared, ("no match set?!\n")); 3127 3128 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1); 3129 } 3130 3131 static int 3132 ipfw_alt_delete_ruleset(uint8_t set) 3133 { 3134 struct netmsg_del dmsg; 3135 struct netmsg_base *nmsg; 3136 int state, del; 3137 struct ip_fw *rule; 3138 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 3139 3140 /* 3141 * Check whether the 'set' exists. If it exists, 3142 * then check whether any rules within the set will 3143 * try to create states. 3144 */ 3145 state = 0; 3146 del = 0; 3147 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) { 3148 if (rule->set == set) { 3149 del = 1; 3150 if (rule->rule_flags & IPFW_RULE_F_STATE) { 3151 state = 1; 3152 break; 3153 } 3154 } 3155 } 3156 if (!del) 3157 return 0; /* XXX EINVAL? */ 3158 3159 if (state) { 3160 /* 3161 * Clear the STATE flag, so no more states will be 3162 * created based the rules in this set. 3163 */ 3164 bzero(&dmsg, sizeof(dmsg)); 3165 nmsg = &dmsg.base; 3166 netmsg_init(nmsg, NULL, &curthread->td_msgport, 3167 0, ipfw_disable_ruleset_state_dispatch); 3168 dmsg.from_set = set; 3169 3170 ifnet_domsg(&nmsg->lmsg, 0); 3171 3172 /* 3173 * Nuke all related states 3174 */ 3175 lockmgr(&dyn_lock, LK_EXCLUSIVE); 3176 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) { 3177 if (rule->set != set) 3178 continue; 3179 3180 /* 3181 * Can't check IPFW_RULE_F_STATE here, 3182 * since it has been cleared previously. 3183 * Check 'stub' instead. 3184 */ 3185 if (rule->stub != NULL) { 3186 /* Force removal */ 3187 remove_dyn_rule_locked(rule, NULL); 3188 } 3189 } 3190 lockmgr(&dyn_lock, LK_RELEASE); 3191 } 3192 3193 /* 3194 * Delete this set 3195 */ 3196 bzero(&dmsg, sizeof(dmsg)); 3197 nmsg = &dmsg.base; 3198 netmsg_init(nmsg, NULL, &curthread->td_msgport, 3199 0, ipfw_alt_delete_ruleset_dispatch); 3200 dmsg.from_set = set; 3201 3202 ifnet_domsg(&nmsg->lmsg, 0); 3203 return 0; 3204 } 3205 3206 static void 3207 ipfw_alt_move_rule_dispatch(netmsg_t nmsg) 3208 { 3209 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg; 3210 struct ip_fw *rule; 3211 3212 rule = dmsg->start_rule; 3213 KKASSERT(rule->cpuid == mycpuid); 3214 3215 /* 3216 * Move to the position on the next CPU 3217 * before the msg is forwarded. 3218 */ 3219 dmsg->start_rule = rule->sibling; 3220 3221 while (rule && rule->rulenum <= dmsg->rulenum) { 3222 if (rule->rulenum == dmsg->rulenum) 3223 rule->set = dmsg->to_set; 3224 rule = rule->next; 3225 } 3226 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1); 3227 } 3228 3229 static int 3230 ipfw_alt_move_rule(uint16_t rulenum, uint8_t set) 3231 { 3232 struct netmsg_del dmsg; 3233 struct netmsg_base *nmsg; 3234 struct ip_fw *rule; 3235 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 3236 3237 /* 3238 * Locate first rule to move 3239 */ 3240 for (rule = ctx->ipfw_layer3_chain; rule && rule->rulenum <= rulenum; 3241 rule = rule->next) { 3242 if (rule->rulenum == rulenum && rule->set != set) 3243 break; 3244 } 3245 if (rule == NULL || rule->rulenum > rulenum) 3246 return 0; /* XXX error? */ 3247 3248 bzero(&dmsg, sizeof(dmsg)); 3249 nmsg = &dmsg.base; 3250 netmsg_init(nmsg, NULL, &curthread->td_msgport, 3251 0, ipfw_alt_move_rule_dispatch); 3252 dmsg.start_rule = rule; 3253 dmsg.rulenum = rulenum; 3254 dmsg.to_set = set; 3255 3256 ifnet_domsg(&nmsg->lmsg, 0); 3257 KKASSERT(dmsg.start_rule == NULL); 3258 return 0; 3259 } 3260 3261 static void 3262 ipfw_alt_move_ruleset_dispatch(netmsg_t nmsg) 3263 { 3264 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg; 3265 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 3266 struct ip_fw *rule; 3267 3268 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) { 3269 if (rule->set == dmsg->from_set) 3270 rule->set = dmsg->to_set; 3271 } 3272 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1); 3273 } 3274 3275 static int 3276 ipfw_alt_move_ruleset(uint8_t from_set, uint8_t to_set) 3277 { 3278 struct netmsg_del dmsg; 3279 struct netmsg_base *nmsg; 3280 3281 bzero(&dmsg, sizeof(dmsg)); 3282 nmsg = &dmsg.base; 3283 netmsg_init(nmsg, NULL, &curthread->td_msgport, 3284 0, ipfw_alt_move_ruleset_dispatch); 3285 dmsg.from_set = from_set; 3286 dmsg.to_set = to_set; 3287 3288 ifnet_domsg(&nmsg->lmsg, 0); 3289 return 0; 3290 } 3291 3292 static void 3293 ipfw_alt_swap_ruleset_dispatch(netmsg_t nmsg) 3294 { 3295 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg; 3296 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 3297 struct ip_fw *rule; 3298 3299 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) { 3300 if (rule->set == dmsg->from_set) 3301 rule->set = dmsg->to_set; 3302 else if (rule->set == dmsg->to_set) 3303 rule->set = dmsg->from_set; 3304 } 3305 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1); 3306 } 3307 3308 static int 3309 ipfw_alt_swap_ruleset(uint8_t set1, uint8_t set2) 3310 { 3311 struct netmsg_del dmsg; 3312 struct netmsg_base *nmsg; 3313 3314 bzero(&dmsg, sizeof(dmsg)); 3315 nmsg = &dmsg.base; 3316 netmsg_init(nmsg, NULL, &curthread->td_msgport, 3317 0, ipfw_alt_swap_ruleset_dispatch); 3318 dmsg.from_set = set1; 3319 dmsg.to_set = set2; 3320 3321 ifnet_domsg(&nmsg->lmsg, 0); 3322 return 0; 3323 } 3324 3325 /** 3326 * Remove all rules with given number, and also do set manipulation. 3327 * 3328 * The argument is an uint32_t. The low 16 bit are the rule or set number, 3329 * the next 8 bits are the new set, the top 8 bits are the command: 3330 * 3331 * 0 delete rules with given number 3332 * 1 delete rules with given set number 3333 * 2 move rules with given number to new set 3334 * 3 move rules with given set number to new set 3335 * 4 swap sets with given numbers 3336 */ 3337 static int 3338 ipfw_ctl_alter(uint32_t arg) 3339 { 3340 uint16_t rulenum; 3341 uint8_t cmd, new_set; 3342 int error = 0; 3343 3344 rulenum = arg & 0xffff; 3345 cmd = (arg >> 24) & 0xff; 3346 new_set = (arg >> 16) & 0xff; 3347 3348 if (cmd > 4) 3349 return EINVAL; 3350 if (new_set >= IPFW_DEFAULT_SET) 3351 return EINVAL; 3352 if (cmd == 0 || cmd == 2) { 3353 if (rulenum == IPFW_DEFAULT_RULE) 3354 return EINVAL; 3355 } else { 3356 if (rulenum >= IPFW_DEFAULT_SET) 3357 return EINVAL; 3358 } 3359 3360 switch (cmd) { 3361 case 0: /* delete rules with given number */ 3362 error = ipfw_alt_delete_rule(rulenum); 3363 break; 3364 3365 case 1: /* delete all rules with given set number */ 3366 error = ipfw_alt_delete_ruleset(rulenum); 3367 break; 3368 3369 case 2: /* move rules with given number to new set */ 3370 error = ipfw_alt_move_rule(rulenum, new_set); 3371 break; 3372 3373 case 3: /* move rules with given set number to new set */ 3374 error = ipfw_alt_move_ruleset(rulenum, new_set); 3375 break; 3376 3377 case 4: /* swap two sets */ 3378 error = ipfw_alt_swap_ruleset(rulenum, new_set); 3379 break; 3380 } 3381 return error; 3382 } 3383 3384 /* 3385 * Clear counters for a specific rule. 3386 */ 3387 static void 3388 clear_counters(struct ip_fw *rule, int log_only) 3389 { 3390 ipfw_insn_log *l = (ipfw_insn_log *)ACTION_PTR(rule); 3391 3392 if (log_only == 0) { 3393 rule->bcnt = rule->pcnt = 0; 3394 rule->timestamp = 0; 3395 } 3396 if (l->o.opcode == O_LOG) 3397 l->log_left = l->max_log; 3398 } 3399 3400 static void 3401 ipfw_zero_entry_dispatch(netmsg_t nmsg) 3402 { 3403 struct netmsg_zent *zmsg = (struct netmsg_zent *)nmsg; 3404 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 3405 struct ip_fw *rule; 3406 3407 if (zmsg->rulenum == 0) { 3408 KKASSERT(zmsg->start_rule == NULL); 3409 3410 ctx->ipfw_norule_counter = 0; 3411 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) 3412 clear_counters(rule, zmsg->log_only); 3413 } else { 3414 struct ip_fw *start = zmsg->start_rule; 3415 3416 KKASSERT(start->cpuid == mycpuid); 3417 KKASSERT(start->rulenum == zmsg->rulenum); 3418 3419 /* 3420 * We can have multiple rules with the same number, so we 3421 * need to clear them all. 3422 */ 3423 for (rule = start; rule && rule->rulenum == zmsg->rulenum; 3424 rule = rule->next) 3425 clear_counters(rule, zmsg->log_only); 3426 3427 /* 3428 * Move to the position on the next CPU 3429 * before the msg is forwarded. 3430 */ 3431 zmsg->start_rule = start->sibling; 3432 } 3433 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1); 3434 } 3435 3436 /** 3437 * Reset some or all counters on firewall rules. 3438 * @arg frwl is null to clear all entries, or contains a specific 3439 * rule number. 3440 * @arg log_only is 1 if we only want to reset logs, zero otherwise. 3441 */ 3442 static int 3443 ipfw_ctl_zero_entry(int rulenum, int log_only) 3444 { 3445 struct netmsg_zent zmsg; 3446 struct netmsg_base *nmsg; 3447 const char *msg; 3448 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 3449 3450 bzero(&zmsg, sizeof(zmsg)); 3451 nmsg = &zmsg.base; 3452 netmsg_init(nmsg, NULL, &curthread->td_msgport, 3453 0, ipfw_zero_entry_dispatch); 3454 zmsg.log_only = log_only; 3455 3456 if (rulenum == 0) { 3457 msg = log_only ? "ipfw: All logging counts reset.\n" 3458 : "ipfw: Accounting cleared.\n"; 3459 } else { 3460 struct ip_fw *rule; 3461 3462 /* 3463 * Locate the first rule with 'rulenum' 3464 */ 3465 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) { 3466 if (rule->rulenum == rulenum) 3467 break; 3468 } 3469 if (rule == NULL) /* we did not find any matching rules */ 3470 return (EINVAL); 3471 zmsg.start_rule = rule; 3472 zmsg.rulenum = rulenum; 3473 3474 msg = log_only ? "ipfw: Entry %d logging count reset.\n" 3475 : "ipfw: Entry %d cleared.\n"; 3476 } 3477 ifnet_domsg(&nmsg->lmsg, 0); 3478 KKASSERT(zmsg.start_rule == NULL); 3479 3480 if (fw_verbose) 3481 log(LOG_SECURITY | LOG_NOTICE, msg, rulenum); 3482 return (0); 3483 } 3484 3485 /* 3486 * Check validity of the structure before insert. 3487 * Fortunately rules are simple, so this mostly need to check rule sizes. 3488 */ 3489 static int 3490 ipfw_check_ioc_rule(struct ipfw_ioc_rule *rule, int size, uint32_t *rule_flags) 3491 { 3492 int l, cmdlen = 0; 3493 int have_action = 0; 3494 ipfw_insn *cmd; 3495 3496 *rule_flags = 0; 3497 3498 /* Check for valid size */ 3499 if (size < sizeof(*rule)) { 3500 kprintf("ipfw: rule too short\n"); 3501 return EINVAL; 3502 } 3503 l = IOC_RULESIZE(rule); 3504 if (l != size) { 3505 kprintf("ipfw: size mismatch (have %d want %d)\n", size, l); 3506 return EINVAL; 3507 } 3508 3509 /* Check rule number */ 3510 if (rule->rulenum == IPFW_DEFAULT_RULE) { 3511 kprintf("ipfw: invalid rule number\n"); 3512 return EINVAL; 3513 } 3514 3515 /* 3516 * Now go for the individual checks. Very simple ones, basically only 3517 * instruction sizes. 3518 */ 3519 for (l = rule->cmd_len, cmd = rule->cmd; l > 0; 3520 l -= cmdlen, cmd += cmdlen) { 3521 cmdlen = F_LEN(cmd); 3522 if (cmdlen > l) { 3523 kprintf("ipfw: opcode %d size truncated\n", 3524 cmd->opcode); 3525 return EINVAL; 3526 } 3527 3528 DPRINTF("ipfw: opcode %d\n", cmd->opcode); 3529 3530 if (cmd->opcode == O_KEEP_STATE || cmd->opcode == O_LIMIT) { 3531 /* This rule will create states */ 3532 *rule_flags |= IPFW_RULE_F_STATE; 3533 } 3534 3535 switch (cmd->opcode) { 3536 case O_NOP: 3537 case O_PROBE_STATE: 3538 case O_KEEP_STATE: 3539 case O_PROTO: 3540 case O_IP_SRC_ME: 3541 case O_IP_DST_ME: 3542 case O_LAYER2: 3543 case O_IN: 3544 case O_FRAG: 3545 case O_IPOPT: 3546 case O_IPLEN: 3547 case O_IPID: 3548 case O_IPTOS: 3549 case O_IPPRECEDENCE: 3550 case O_IPTTL: 3551 case O_IPVER: 3552 case O_TCPWIN: 3553 case O_TCPFLAGS: 3554 case O_TCPOPTS: 3555 case O_ESTAB: 3556 if (cmdlen != F_INSN_SIZE(ipfw_insn)) 3557 goto bad_size; 3558 break; 3559 3560 case O_UID: 3561 case O_GID: 3562 case O_IP_SRC: 3563 case O_IP_DST: 3564 case O_TCPSEQ: 3565 case O_TCPACK: 3566 case O_PROB: 3567 case O_ICMPTYPE: 3568 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32)) 3569 goto bad_size; 3570 break; 3571 3572 case O_LIMIT: 3573 if (cmdlen != F_INSN_SIZE(ipfw_insn_limit)) 3574 goto bad_size; 3575 break; 3576 3577 case O_LOG: 3578 if (cmdlen != F_INSN_SIZE(ipfw_insn_log)) 3579 goto bad_size; 3580 3581 ((ipfw_insn_log *)cmd)->log_left = 3582 ((ipfw_insn_log *)cmd)->max_log; 3583 3584 break; 3585 3586 case O_IP_SRC_MASK: 3587 case O_IP_DST_MASK: 3588 if (cmdlen != F_INSN_SIZE(ipfw_insn_ip)) 3589 goto bad_size; 3590 if (((ipfw_insn_ip *)cmd)->mask.s_addr == 0) { 3591 kprintf("ipfw: opcode %d, useless rule\n", 3592 cmd->opcode); 3593 return EINVAL; 3594 } 3595 break; 3596 3597 case O_IP_SRC_SET: 3598 case O_IP_DST_SET: 3599 if (cmd->arg1 == 0 || cmd->arg1 > 256) { 3600 kprintf("ipfw: invalid set size %d\n", 3601 cmd->arg1); 3602 return EINVAL; 3603 } 3604 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 3605 (cmd->arg1+31)/32 ) 3606 goto bad_size; 3607 break; 3608 3609 case O_MACADDR2: 3610 if (cmdlen != F_INSN_SIZE(ipfw_insn_mac)) 3611 goto bad_size; 3612 break; 3613 3614 case O_MAC_TYPE: 3615 case O_IP_SRCPORT: 3616 case O_IP_DSTPORT: /* XXX artificial limit, 30 port pairs */ 3617 if (cmdlen < 2 || cmdlen > 31) 3618 goto bad_size; 3619 break; 3620 3621 case O_RECV: 3622 case O_XMIT: 3623 case O_VIA: 3624 if (cmdlen != F_INSN_SIZE(ipfw_insn_if)) 3625 goto bad_size; 3626 break; 3627 3628 case O_PIPE: 3629 case O_QUEUE: 3630 if (cmdlen != F_INSN_SIZE(ipfw_insn_pipe)) 3631 goto bad_size; 3632 goto check_action; 3633 3634 case O_FORWARD_IP: 3635 if (cmdlen != F_INSN_SIZE(ipfw_insn_sa)) { 3636 goto bad_size; 3637 } else { 3638 in_addr_t fwd_addr; 3639 3640 fwd_addr = ((ipfw_insn_sa *)cmd)-> 3641 sa.sin_addr.s_addr; 3642 if (IN_MULTICAST(ntohl(fwd_addr))) { 3643 kprintf("ipfw: try forwarding to " 3644 "multicast address\n"); 3645 return EINVAL; 3646 } 3647 } 3648 goto check_action; 3649 3650 case O_FORWARD_MAC: /* XXX not implemented yet */ 3651 case O_CHECK_STATE: 3652 case O_COUNT: 3653 case O_ACCEPT: 3654 case O_DENY: 3655 case O_REJECT: 3656 case O_SKIPTO: 3657 case O_DIVERT: 3658 case O_TEE: 3659 if (cmdlen != F_INSN_SIZE(ipfw_insn)) 3660 goto bad_size; 3661 check_action: 3662 if (have_action) { 3663 kprintf("ipfw: opcode %d, multiple actions" 3664 " not allowed\n", 3665 cmd->opcode); 3666 return EINVAL; 3667 } 3668 have_action = 1; 3669 if (l != cmdlen) { 3670 kprintf("ipfw: opcode %d, action must be" 3671 " last opcode\n", 3672 cmd->opcode); 3673 return EINVAL; 3674 } 3675 break; 3676 default: 3677 kprintf("ipfw: opcode %d, unknown opcode\n", 3678 cmd->opcode); 3679 return EINVAL; 3680 } 3681 } 3682 if (have_action == 0) { 3683 kprintf("ipfw: missing action\n"); 3684 return EINVAL; 3685 } 3686 return 0; 3687 3688 bad_size: 3689 kprintf("ipfw: opcode %d size %d wrong\n", 3690 cmd->opcode, cmdlen); 3691 return EINVAL; 3692 } 3693 3694 static int 3695 ipfw_ctl_add_rule(struct sockopt *sopt) 3696 { 3697 struct ipfw_ioc_rule *ioc_rule; 3698 size_t size; 3699 uint32_t rule_flags; 3700 int error; 3701 3702 size = sopt->sopt_valsize; 3703 if (size > (sizeof(uint32_t) * IPFW_RULE_SIZE_MAX) || 3704 size < sizeof(*ioc_rule)) { 3705 return EINVAL; 3706 } 3707 if (size != (sizeof(uint32_t) * IPFW_RULE_SIZE_MAX)) { 3708 sopt->sopt_val = krealloc(sopt->sopt_val, sizeof(uint32_t) * 3709 IPFW_RULE_SIZE_MAX, M_TEMP, M_WAITOK); 3710 } 3711 ioc_rule = sopt->sopt_val; 3712 3713 error = ipfw_check_ioc_rule(ioc_rule, size, &rule_flags); 3714 if (error) 3715 return error; 3716 3717 ipfw_add_rule(ioc_rule, rule_flags); 3718 3719 if (sopt->sopt_dir == SOPT_GET) 3720 sopt->sopt_valsize = IOC_RULESIZE(ioc_rule); 3721 return 0; 3722 } 3723 3724 static void * 3725 ipfw_copy_rule(const struct ip_fw *rule, struct ipfw_ioc_rule *ioc_rule) 3726 { 3727 const struct ip_fw *sibling; 3728 #ifdef INVARIANTS 3729 int i; 3730 #endif 3731 3732 KKASSERT(rule->cpuid == IPFW_CFGCPUID); 3733 3734 ioc_rule->act_ofs = rule->act_ofs; 3735 ioc_rule->cmd_len = rule->cmd_len; 3736 ioc_rule->rulenum = rule->rulenum; 3737 ioc_rule->set = rule->set; 3738 ioc_rule->usr_flags = rule->usr_flags; 3739 3740 ioc_rule->set_disable = ipfw_ctx[mycpuid]->ipfw_set_disable; 3741 ioc_rule->static_count = static_count; 3742 ioc_rule->static_len = static_ioc_len; 3743 3744 /* 3745 * Visit (read-only) all of the rule's duplications to get 3746 * the necessary statistics 3747 */ 3748 #ifdef INVARIANTS 3749 i = 0; 3750 #endif 3751 ioc_rule->pcnt = 0; 3752 ioc_rule->bcnt = 0; 3753 ioc_rule->timestamp = 0; 3754 for (sibling = rule; sibling != NULL; sibling = sibling->sibling) { 3755 ioc_rule->pcnt += sibling->pcnt; 3756 ioc_rule->bcnt += sibling->bcnt; 3757 if (sibling->timestamp > ioc_rule->timestamp) 3758 ioc_rule->timestamp = sibling->timestamp; 3759 #ifdef INVARIANTS 3760 ++i; 3761 #endif 3762 } 3763 KASSERT(i == ncpus, ("static rule is not duplicated on every cpu\n")); 3764 3765 bcopy(rule->cmd, ioc_rule->cmd, ioc_rule->cmd_len * 4 /* XXX */); 3766 3767 return ((uint8_t *)ioc_rule + IOC_RULESIZE(ioc_rule)); 3768 } 3769 3770 static void 3771 ipfw_copy_state(const ipfw_dyn_rule *dyn_rule, 3772 struct ipfw_ioc_state *ioc_state) 3773 { 3774 const struct ipfw_flow_id *id; 3775 struct ipfw_ioc_flowid *ioc_id; 3776 3777 ioc_state->expire = TIME_LEQ(dyn_rule->expire, time_second) ? 3778 0 : dyn_rule->expire - time_second; 3779 ioc_state->pcnt = dyn_rule->pcnt; 3780 ioc_state->bcnt = dyn_rule->bcnt; 3781 3782 ioc_state->dyn_type = dyn_rule->dyn_type; 3783 ioc_state->count = dyn_rule->count; 3784 3785 ioc_state->rulenum = dyn_rule->stub->rule[mycpuid]->rulenum; 3786 3787 id = &dyn_rule->id; 3788 ioc_id = &ioc_state->id; 3789 3790 ioc_id->type = ETHERTYPE_IP; 3791 ioc_id->u.ip.dst_ip = id->dst_ip; 3792 ioc_id->u.ip.src_ip = id->src_ip; 3793 ioc_id->u.ip.dst_port = id->dst_port; 3794 ioc_id->u.ip.src_port = id->src_port; 3795 ioc_id->u.ip.proto = id->proto; 3796 } 3797 3798 static int 3799 ipfw_ctl_get_rules(struct sockopt *sopt) 3800 { 3801 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 3802 struct ip_fw *rule; 3803 void *bp; 3804 size_t size; 3805 uint32_t dcount = 0; 3806 3807 /* 3808 * pass up a copy of the current rules. Static rules 3809 * come first (the last of which has number IPFW_DEFAULT_RULE), 3810 * followed by a possibly empty list of dynamic rule. 3811 */ 3812 3813 size = static_ioc_len; /* size of static rules */ 3814 if (ipfw_dyn_v) { /* add size of dyn.rules */ 3815 dcount = dyn_count; 3816 size += dcount * sizeof(struct ipfw_ioc_state); 3817 } 3818 3819 if (sopt->sopt_valsize < size) { 3820 /* short length, no need to return incomplete rules */ 3821 /* XXX: if superuser, no need to zero buffer */ 3822 bzero(sopt->sopt_val, sopt->sopt_valsize); 3823 return 0; 3824 } 3825 bp = sopt->sopt_val; 3826 3827 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) 3828 bp = ipfw_copy_rule(rule, bp); 3829 3830 if (ipfw_dyn_v && dcount != 0) { 3831 struct ipfw_ioc_state *ioc_state = bp; 3832 uint32_t dcount2 = 0; 3833 #ifdef INVARIANTS 3834 size_t old_size = size; 3835 #endif 3836 int i; 3837 3838 lockmgr(&dyn_lock, LK_SHARED); 3839 3840 /* Check 'ipfw_dyn_v' again with lock held */ 3841 if (ipfw_dyn_v == NULL) 3842 goto skip; 3843 3844 for (i = 0; i < curr_dyn_buckets; i++) { 3845 ipfw_dyn_rule *p; 3846 3847 /* 3848 * The # of dynamic rules may have grown after the 3849 * snapshot of 'dyn_count' was taken, so we will have 3850 * to check 'dcount' (snapshot of dyn_count) here to 3851 * make sure that we don't overflow the pre-allocated 3852 * buffer. 3853 */ 3854 for (p = ipfw_dyn_v[i]; p != NULL && dcount != 0; 3855 p = p->next, ioc_state++, dcount--, dcount2++) 3856 ipfw_copy_state(p, ioc_state); 3857 } 3858 skip: 3859 lockmgr(&dyn_lock, LK_RELEASE); 3860 3861 /* 3862 * The # of dynamic rules may be shrinked after the 3863 * snapshot of 'dyn_count' was taken. To give user a 3864 * correct dynamic rule count, we use the 'dcount2' 3865 * calculated above (with shared lockmgr lock held). 3866 */ 3867 size = static_ioc_len + 3868 (dcount2 * sizeof(struct ipfw_ioc_state)); 3869 KKASSERT(size <= old_size); 3870 } 3871 3872 sopt->sopt_valsize = size; 3873 return 0; 3874 } 3875 3876 static void 3877 ipfw_set_disable_dispatch(netmsg_t nmsg) 3878 { 3879 struct lwkt_msg *lmsg = &nmsg->lmsg; 3880 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 3881 3882 ctx->ipfw_gen++; 3883 ctx->ipfw_set_disable = lmsg->u.ms_result32; 3884 3885 ifnet_forwardmsg(lmsg, mycpuid + 1); 3886 } 3887 3888 static void 3889 ipfw_ctl_set_disable(uint32_t disable, uint32_t enable) 3890 { 3891 struct netmsg_base nmsg; 3892 struct lwkt_msg *lmsg; 3893 uint32_t set_disable; 3894 3895 /* IPFW_DEFAULT_SET is always enabled */ 3896 enable |= (1 << IPFW_DEFAULT_SET); 3897 set_disable = (ipfw_ctx[mycpuid]->ipfw_set_disable | disable) & ~enable; 3898 3899 bzero(&nmsg, sizeof(nmsg)); 3900 netmsg_init(&nmsg, NULL, &curthread->td_msgport, 3901 0, ipfw_set_disable_dispatch); 3902 lmsg = &nmsg.lmsg; 3903 lmsg->u.ms_result32 = set_disable; 3904 3905 ifnet_domsg(lmsg, 0); 3906 } 3907 3908 /** 3909 * {set|get}sockopt parser. 3910 */ 3911 static int 3912 ipfw_ctl(struct sockopt *sopt) 3913 { 3914 int error, rulenum; 3915 uint32_t *masks; 3916 size_t size; 3917 3918 error = 0; 3919 3920 switch (sopt->sopt_name) { 3921 case IP_FW_GET: 3922 error = ipfw_ctl_get_rules(sopt); 3923 break; 3924 3925 case IP_FW_FLUSH: 3926 ipfw_flush(0 /* keep default rule */); 3927 break; 3928 3929 case IP_FW_ADD: 3930 error = ipfw_ctl_add_rule(sopt); 3931 break; 3932 3933 case IP_FW_DEL: 3934 /* 3935 * IP_FW_DEL is used for deleting single rules or sets, 3936 * and (ab)used to atomically manipulate sets. 3937 * Argument size is used to distinguish between the two: 3938 * sizeof(uint32_t) 3939 * delete single rule or set of rules, 3940 * or reassign rules (or sets) to a different set. 3941 * 2 * sizeof(uint32_t) 3942 * atomic disable/enable sets. 3943 * first uint32_t contains sets to be disabled, 3944 * second uint32_t contains sets to be enabled. 3945 */ 3946 masks = sopt->sopt_val; 3947 size = sopt->sopt_valsize; 3948 if (size == sizeof(*masks)) { 3949 /* 3950 * Delete or reassign static rule 3951 */ 3952 error = ipfw_ctl_alter(masks[0]); 3953 } else if (size == (2 * sizeof(*masks))) { 3954 /* 3955 * Set enable/disable 3956 */ 3957 ipfw_ctl_set_disable(masks[0], masks[1]); 3958 } else { 3959 error = EINVAL; 3960 } 3961 break; 3962 3963 case IP_FW_ZERO: 3964 case IP_FW_RESETLOG: /* argument is an int, the rule number */ 3965 rulenum = 0; 3966 3967 if (sopt->sopt_val != 0) { 3968 error = soopt_to_kbuf(sopt, &rulenum, 3969 sizeof(int), sizeof(int)); 3970 if (error) 3971 break; 3972 } 3973 error = ipfw_ctl_zero_entry(rulenum, 3974 sopt->sopt_name == IP_FW_RESETLOG); 3975 break; 3976 3977 default: 3978 kprintf("ipfw_ctl invalid option %d\n", sopt->sopt_name); 3979 error = EINVAL; 3980 } 3981 return error; 3982 } 3983 3984 /* 3985 * This procedure is only used to handle keepalives. It is invoked 3986 * every dyn_keepalive_period 3987 */ 3988 static void 3989 ipfw_tick_dispatch(netmsg_t nmsg) 3990 { 3991 time_t keep_alive; 3992 uint32_t gen; 3993 int i; 3994 3995 IPFW_ASSERT_CFGPORT(&curthread->td_msgport); 3996 KKASSERT(IPFW_LOADED); 3997 3998 /* Reply ASAP */ 3999 crit_enter(); 4000 lwkt_replymsg(&nmsg->lmsg, 0); 4001 crit_exit(); 4002 4003 if (ipfw_dyn_v == NULL || dyn_count == 0) 4004 goto done; 4005 4006 keep_alive = time_second; 4007 4008 lockmgr(&dyn_lock, LK_EXCLUSIVE); 4009 again: 4010 if (ipfw_dyn_v == NULL || dyn_count == 0) { 4011 lockmgr(&dyn_lock, LK_RELEASE); 4012 goto done; 4013 } 4014 gen = dyn_buckets_gen; 4015 4016 for (i = 0; i < curr_dyn_buckets; i++) { 4017 ipfw_dyn_rule *q, *prev; 4018 4019 for (prev = NULL, q = ipfw_dyn_v[i]; q != NULL;) { 4020 uint32_t ack_rev, ack_fwd; 4021 struct ipfw_flow_id id; 4022 4023 if (q->dyn_type == O_LIMIT_PARENT) 4024 goto next; 4025 4026 if (TIME_LEQ(q->expire, time_second)) { 4027 /* State expired */ 4028 UNLINK_DYN_RULE(prev, ipfw_dyn_v[i], q); 4029 continue; 4030 } 4031 4032 /* 4033 * Keep alive processing 4034 */ 4035 4036 if (!dyn_keepalive) 4037 goto next; 4038 if (q->id.proto != IPPROTO_TCP) 4039 goto next; 4040 if ((q->state & BOTH_SYN) != BOTH_SYN) 4041 goto next; 4042 if (TIME_LEQ(time_second + dyn_keepalive_interval, 4043 q->expire)) 4044 goto next; /* too early */ 4045 if (q->keep_alive == keep_alive) 4046 goto next; /* alreay done */ 4047 4048 /* 4049 * Save necessary information, so that they could 4050 * survive after possible blocking in send_pkt() 4051 */ 4052 id = q->id; 4053 ack_rev = q->ack_rev; 4054 ack_fwd = q->ack_fwd; 4055 4056 /* Sending has been started */ 4057 q->keep_alive = keep_alive; 4058 4059 /* Release lock to avoid possible dead lock */ 4060 lockmgr(&dyn_lock, LK_RELEASE); 4061 send_pkt(&id, ack_rev - 1, ack_fwd, TH_SYN); 4062 send_pkt(&id, ack_fwd - 1, ack_rev, 0); 4063 lockmgr(&dyn_lock, LK_EXCLUSIVE); 4064 4065 if (gen != dyn_buckets_gen) { 4066 /* 4067 * Dyn bucket array has been changed during 4068 * the above two sending; reiterate. 4069 */ 4070 goto again; 4071 } 4072 next: 4073 prev = q; 4074 q = q->next; 4075 } 4076 } 4077 lockmgr(&dyn_lock, LK_RELEASE); 4078 done: 4079 callout_reset(&ipfw_timeout_h, dyn_keepalive_period * hz, 4080 ipfw_tick, NULL); 4081 } 4082 4083 /* 4084 * This procedure is only used to handle keepalives. It is invoked 4085 * every dyn_keepalive_period 4086 */ 4087 static void 4088 ipfw_tick(void *dummy __unused) 4089 { 4090 struct lwkt_msg *lmsg = &ipfw_timeout_netmsg.lmsg; 4091 4092 KKASSERT(mycpuid == IPFW_CFGCPUID); 4093 4094 crit_enter(); 4095 4096 KKASSERT(lmsg->ms_flags & MSGF_DONE); 4097 if (IPFW_LOADED) { 4098 lwkt_sendmsg(IPFW_CFGPORT, lmsg); 4099 /* ipfw_timeout_netmsg's handler reset this callout */ 4100 } 4101 4102 crit_exit(); 4103 } 4104 4105 static int 4106 ipfw_check_in(void *arg, struct mbuf **m0, struct ifnet *ifp, int dir) 4107 { 4108 struct ip_fw_args args; 4109 struct mbuf *m = *m0; 4110 struct m_tag *mtag; 4111 int tee = 0, error = 0, ret; 4112 4113 if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED) { 4114 /* Extract info from dummynet tag */ 4115 mtag = m_tag_find(m, PACKET_TAG_DUMMYNET, NULL); 4116 KKASSERT(mtag != NULL); 4117 args.rule = ((struct dn_pkt *)m_tag_data(mtag))->dn_priv; 4118 KKASSERT(args.rule != NULL); 4119 4120 m_tag_delete(m, mtag); 4121 m->m_pkthdr.fw_flags &= ~DUMMYNET_MBUF_TAGGED; 4122 } else { 4123 args.rule = NULL; 4124 } 4125 4126 args.eh = NULL; 4127 args.oif = NULL; 4128 args.m = m; 4129 ret = ipfw_chk(&args); 4130 m = args.m; 4131 4132 if (m == NULL) { 4133 error = EACCES; 4134 goto back; 4135 } 4136 4137 switch (ret) { 4138 case IP_FW_PASS: 4139 break; 4140 4141 case IP_FW_DENY: 4142 m_freem(m); 4143 m = NULL; 4144 error = EACCES; 4145 break; 4146 4147 case IP_FW_DUMMYNET: 4148 /* Send packet to the appropriate pipe */ 4149 ipfw_dummynet_io(m, args.cookie, DN_TO_IP_IN, &args); 4150 break; 4151 4152 case IP_FW_TEE: 4153 tee = 1; 4154 /* FALL THROUGH */ 4155 4156 case IP_FW_DIVERT: 4157 /* 4158 * Must clear bridge tag when changing 4159 */ 4160 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED; 4161 if (ip_divert_p != NULL) { 4162 m = ip_divert_p(m, tee, 1); 4163 } else { 4164 m_freem(m); 4165 m = NULL; 4166 /* not sure this is the right error msg */ 4167 error = EACCES; 4168 } 4169 break; 4170 4171 default: 4172 panic("unknown ipfw return value: %d\n", ret); 4173 } 4174 back: 4175 *m0 = m; 4176 return error; 4177 } 4178 4179 static int 4180 ipfw_check_out(void *arg, struct mbuf **m0, struct ifnet *ifp, int dir) 4181 { 4182 struct ip_fw_args args; 4183 struct mbuf *m = *m0; 4184 struct m_tag *mtag; 4185 int tee = 0, error = 0, ret; 4186 4187 if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED) { 4188 /* Extract info from dummynet tag */ 4189 mtag = m_tag_find(m, PACKET_TAG_DUMMYNET, NULL); 4190 KKASSERT(mtag != NULL); 4191 args.rule = ((struct dn_pkt *)m_tag_data(mtag))->dn_priv; 4192 KKASSERT(args.rule != NULL); 4193 4194 m_tag_delete(m, mtag); 4195 m->m_pkthdr.fw_flags &= ~DUMMYNET_MBUF_TAGGED; 4196 } else { 4197 args.rule = NULL; 4198 } 4199 4200 args.eh = NULL; 4201 args.m = m; 4202 args.oif = ifp; 4203 ret = ipfw_chk(&args); 4204 m = args.m; 4205 4206 if (m == NULL) { 4207 error = EACCES; 4208 goto back; 4209 } 4210 4211 switch (ret) { 4212 case IP_FW_PASS: 4213 break; 4214 4215 case IP_FW_DENY: 4216 m_freem(m); 4217 m = NULL; 4218 error = EACCES; 4219 break; 4220 4221 case IP_FW_DUMMYNET: 4222 ipfw_dummynet_io(m, args.cookie, DN_TO_IP_OUT, &args); 4223 break; 4224 4225 case IP_FW_TEE: 4226 tee = 1; 4227 /* FALL THROUGH */ 4228 4229 case IP_FW_DIVERT: 4230 if (ip_divert_p != NULL) { 4231 m = ip_divert_p(m, tee, 0); 4232 } else { 4233 m_freem(m); 4234 m = NULL; 4235 /* not sure this is the right error msg */ 4236 error = EACCES; 4237 } 4238 break; 4239 4240 default: 4241 panic("unknown ipfw return value: %d\n", ret); 4242 } 4243 back: 4244 *m0 = m; 4245 return error; 4246 } 4247 4248 static void 4249 ipfw_hook(void) 4250 { 4251 struct pfil_head *pfh; 4252 4253 IPFW_ASSERT_CFGPORT(&curthread->td_msgport); 4254 4255 pfh = pfil_head_get(PFIL_TYPE_AF, AF_INET); 4256 if (pfh == NULL) 4257 return; 4258 4259 pfil_add_hook(ipfw_check_in, NULL, PFIL_IN | PFIL_MPSAFE, pfh); 4260 pfil_add_hook(ipfw_check_out, NULL, PFIL_OUT | PFIL_MPSAFE, pfh); 4261 } 4262 4263 static void 4264 ipfw_dehook(void) 4265 { 4266 struct pfil_head *pfh; 4267 4268 IPFW_ASSERT_CFGPORT(&curthread->td_msgport); 4269 4270 pfh = pfil_head_get(PFIL_TYPE_AF, AF_INET); 4271 if (pfh == NULL) 4272 return; 4273 4274 pfil_remove_hook(ipfw_check_in, NULL, PFIL_IN, pfh); 4275 pfil_remove_hook(ipfw_check_out, NULL, PFIL_OUT, pfh); 4276 } 4277 4278 static void 4279 ipfw_sysctl_enable_dispatch(netmsg_t nmsg) 4280 { 4281 struct lwkt_msg *lmsg = &nmsg->lmsg; 4282 int enable = lmsg->u.ms_result; 4283 4284 if (fw_enable == enable) 4285 goto reply; 4286 4287 fw_enable = enable; 4288 if (fw_enable) 4289 ipfw_hook(); 4290 else 4291 ipfw_dehook(); 4292 reply: 4293 lwkt_replymsg(lmsg, 0); 4294 } 4295 4296 static int 4297 ipfw_sysctl_enable(SYSCTL_HANDLER_ARGS) 4298 { 4299 struct netmsg_base nmsg; 4300 struct lwkt_msg *lmsg; 4301 int enable, error; 4302 4303 enable = fw_enable; 4304 error = sysctl_handle_int(oidp, &enable, 0, req); 4305 if (error || req->newptr == NULL) 4306 return error; 4307 4308 netmsg_init(&nmsg, NULL, &curthread->td_msgport, 4309 0, ipfw_sysctl_enable_dispatch); 4310 lmsg = &nmsg.lmsg; 4311 lmsg->u.ms_result = enable; 4312 4313 return lwkt_domsg(IPFW_CFGPORT, lmsg, 0); 4314 } 4315 4316 static int 4317 ipfw_sysctl_autoinc_step(SYSCTL_HANDLER_ARGS) 4318 { 4319 return sysctl_int_range(oidp, arg1, arg2, req, 4320 IPFW_AUTOINC_STEP_MIN, IPFW_AUTOINC_STEP_MAX); 4321 } 4322 4323 static int 4324 ipfw_sysctl_dyn_buckets(SYSCTL_HANDLER_ARGS) 4325 { 4326 int error, value; 4327 4328 lockmgr(&dyn_lock, LK_EXCLUSIVE); 4329 4330 value = dyn_buckets; 4331 error = sysctl_handle_int(oidp, &value, 0, req); 4332 if (error || !req->newptr) 4333 goto back; 4334 4335 /* 4336 * Make sure we have a power of 2 and 4337 * do not allow more than 64k entries. 4338 */ 4339 error = EINVAL; 4340 if (value <= 1 || value > 65536) 4341 goto back; 4342 if ((value & (value - 1)) != 0) 4343 goto back; 4344 4345 error = 0; 4346 dyn_buckets = value; 4347 back: 4348 lockmgr(&dyn_lock, LK_RELEASE); 4349 return error; 4350 } 4351 4352 static int 4353 ipfw_sysctl_dyn_fin(SYSCTL_HANDLER_ARGS) 4354 { 4355 return sysctl_int_range(oidp, arg1, arg2, req, 4356 1, dyn_keepalive_period - 1); 4357 } 4358 4359 static int 4360 ipfw_sysctl_dyn_rst(SYSCTL_HANDLER_ARGS) 4361 { 4362 return sysctl_int_range(oidp, arg1, arg2, req, 4363 1, dyn_keepalive_period - 1); 4364 } 4365 4366 static void 4367 ipfw_ctx_init_dispatch(netmsg_t nmsg) 4368 { 4369 struct netmsg_ipfw *fwmsg = (struct netmsg_ipfw *)nmsg; 4370 struct ipfw_context *ctx; 4371 struct ip_fw *def_rule; 4372 4373 ctx = kmalloc(sizeof(*ctx), M_IPFW, M_WAITOK | M_ZERO); 4374 ipfw_ctx[mycpuid] = ctx; 4375 4376 def_rule = kmalloc(sizeof(*def_rule), M_IPFW, M_WAITOK | M_ZERO); 4377 4378 def_rule->act_ofs = 0; 4379 def_rule->rulenum = IPFW_DEFAULT_RULE; 4380 def_rule->cmd_len = 1; 4381 def_rule->set = IPFW_DEFAULT_SET; 4382 4383 def_rule->cmd[0].len = 1; 4384 #ifdef IPFIREWALL_DEFAULT_TO_ACCEPT 4385 def_rule->cmd[0].opcode = O_ACCEPT; 4386 #else 4387 def_rule->cmd[0].opcode = O_DENY; 4388 #endif 4389 4390 def_rule->refcnt = 1; 4391 def_rule->cpuid = mycpuid; 4392 4393 /* Install the default rule */ 4394 ctx->ipfw_default_rule = def_rule; 4395 ctx->ipfw_layer3_chain = def_rule; 4396 4397 /* Link rule CPU sibling */ 4398 ipfw_link_sibling(fwmsg, def_rule); 4399 4400 /* Statistics only need to be updated once */ 4401 if (mycpuid == 0) 4402 ipfw_inc_static_count(def_rule); 4403 4404 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1); 4405 } 4406 4407 static void 4408 ipfw_init_dispatch(netmsg_t nmsg) 4409 { 4410 struct netmsg_ipfw fwmsg; 4411 int error = 0; 4412 4413 if (IPFW_LOADED) { 4414 kprintf("IP firewall already loaded\n"); 4415 error = EEXIST; 4416 goto reply; 4417 } 4418 4419 bzero(&fwmsg, sizeof(fwmsg)); 4420 netmsg_init(&fwmsg.base, NULL, &curthread->td_msgport, 4421 0, ipfw_ctx_init_dispatch); 4422 ifnet_domsg(&fwmsg.base.lmsg, 0); 4423 4424 ip_fw_chk_ptr = ipfw_chk; 4425 ip_fw_ctl_ptr = ipfw_ctl; 4426 ip_fw_dn_io_ptr = ipfw_dummynet_io; 4427 4428 kprintf("ipfw2 initialized, default to %s, logging ", 4429 ipfw_ctx[mycpuid]->ipfw_default_rule->cmd[0].opcode == 4430 O_ACCEPT ? "accept" : "deny"); 4431 4432 #ifdef IPFIREWALL_VERBOSE 4433 fw_verbose = 1; 4434 #endif 4435 #ifdef IPFIREWALL_VERBOSE_LIMIT 4436 verbose_limit = IPFIREWALL_VERBOSE_LIMIT; 4437 #endif 4438 if (fw_verbose == 0) { 4439 kprintf("disabled\n"); 4440 } else if (verbose_limit == 0) { 4441 kprintf("unlimited\n"); 4442 } else { 4443 kprintf("limited to %d packets/entry by default\n", 4444 verbose_limit); 4445 } 4446 4447 callout_init_mp(&ipfw_timeout_h); 4448 netmsg_init(&ipfw_timeout_netmsg, NULL, &netisr_adone_rport, 4449 MSGF_DROPABLE | MSGF_PRIORITY, 4450 ipfw_tick_dispatch); 4451 lockinit(&dyn_lock, "ipfw_dyn", 0, 0); 4452 4453 ip_fw_loaded = 1; 4454 callout_reset(&ipfw_timeout_h, hz, ipfw_tick, NULL); 4455 4456 if (fw_enable) 4457 ipfw_hook(); 4458 reply: 4459 lwkt_replymsg(&nmsg->lmsg, error); 4460 } 4461 4462 static int 4463 ipfw_init(void) 4464 { 4465 struct netmsg_base smsg; 4466 4467 netmsg_init(&smsg, NULL, &curthread->td_msgport, 4468 0, ipfw_init_dispatch); 4469 return lwkt_domsg(IPFW_CFGPORT, &smsg.lmsg, 0); 4470 } 4471 4472 #ifdef KLD_MODULE 4473 4474 static void 4475 ipfw_fini_dispatch(netmsg_t nmsg) 4476 { 4477 int error = 0, cpu; 4478 4479 if (ipfw_refcnt != 0) { 4480 error = EBUSY; 4481 goto reply; 4482 } 4483 4484 ip_fw_loaded = 0; 4485 4486 ipfw_dehook(); 4487 callout_stop(&ipfw_timeout_h); 4488 4489 netmsg_service_sync(); 4490 4491 crit_enter(); 4492 if ((ipfw_timeout_netmsg.lmsg.ms_flags & MSGF_DONE) == 0) { 4493 /* 4494 * Callout message is pending; drop it 4495 */ 4496 lwkt_dropmsg(&ipfw_timeout_netmsg.lmsg); 4497 } 4498 crit_exit(); 4499 4500 ip_fw_chk_ptr = NULL; 4501 ip_fw_ctl_ptr = NULL; 4502 ip_fw_dn_io_ptr = NULL; 4503 ipfw_flush(1 /* kill default rule */); 4504 4505 /* Free pre-cpu context */ 4506 for (cpu = 0; cpu < ncpus; ++cpu) 4507 kfree(ipfw_ctx[cpu], M_IPFW); 4508 4509 kprintf("IP firewall unloaded\n"); 4510 reply: 4511 lwkt_replymsg(&nmsg->lmsg, error); 4512 } 4513 4514 static int 4515 ipfw_fini(void) 4516 { 4517 struct netmsg_base smsg; 4518 4519 netmsg_init(&smsg, NULL, &curthread->td_msgport, 4520 0, ipfw_fini_dispatch); 4521 return lwkt_domsg(IPFW_CFGPORT, &smsg.lmsg, 0); 4522 } 4523 4524 #endif /* KLD_MODULE */ 4525 4526 static int 4527 ipfw_modevent(module_t mod, int type, void *unused) 4528 { 4529 int err = 0; 4530 4531 switch (type) { 4532 case MOD_LOAD: 4533 err = ipfw_init(); 4534 break; 4535 4536 case MOD_UNLOAD: 4537 #ifndef KLD_MODULE 4538 kprintf("ipfw statically compiled, cannot unload\n"); 4539 err = EBUSY; 4540 #else 4541 err = ipfw_fini(); 4542 #endif 4543 break; 4544 default: 4545 break; 4546 } 4547 return err; 4548 } 4549 4550 static moduledata_t ipfwmod = { 4551 "ipfw", 4552 ipfw_modevent, 4553 0 4554 }; 4555 DECLARE_MODULE(ipfw, ipfwmod, SI_SUB_PROTO_END, SI_ORDER_ANY); 4556 MODULE_VERSION(ipfw, 1); 4557