1 /* 2 * Copyright (c) 2002 Luigi Rizzo, Universita` di Pisa 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: src/sys/netinet/ip_fw2.c,v 1.6.2.12 2003/04/08 10:42:32 maxim Exp $ 26 */ 27 28 /* 29 * Implement IP packet firewall (new version) 30 */ 31 32 #include "opt_ipfw.h" 33 #include "opt_inet.h" 34 #ifndef INET 35 #error IPFIREWALL requires INET. 36 #endif /* INET */ 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/malloc.h> 41 #include <sys/mbuf.h> 42 #include <sys/kernel.h> 43 #include <sys/proc.h> 44 #include <sys/socket.h> 45 #include <sys/socketvar.h> 46 #include <sys/sysctl.h> 47 #include <sys/syslog.h> 48 #include <sys/ucred.h> 49 #include <sys/in_cksum.h> 50 #include <sys/lock.h> 51 52 #include <net/if.h> 53 #include <net/route.h> 54 #include <net/pfil.h> 55 #include <net/dummynet/ip_dummynet.h> 56 57 #include <sys/thread2.h> 58 #include <sys/mplock2.h> 59 #include <net/netmsg2.h> 60 61 #include <netinet/in.h> 62 #include <netinet/in_systm.h> 63 #include <netinet/in_var.h> 64 #include <netinet/in_pcb.h> 65 #include <netinet/ip.h> 66 #include <netinet/ip_var.h> 67 #include <netinet/ip_icmp.h> 68 #include <netinet/tcp.h> 69 #include <netinet/tcp_timer.h> 70 #include <netinet/tcp_var.h> 71 #include <netinet/tcpip.h> 72 #include <netinet/udp.h> 73 #include <netinet/udp_var.h> 74 #include <netinet/ip_divert.h> 75 #include <netinet/if_ether.h> /* XXX for ETHERTYPE_IP */ 76 77 #include <net/ipfw/ip_fw2.h> 78 79 #ifdef IPFIREWALL_DEBUG 80 #define DPRINTF(fmt, ...) \ 81 do { \ 82 if (fw_debug > 0) \ 83 kprintf(fmt, __VA_ARGS__); \ 84 } while (0) 85 #else 86 #define DPRINTF(fmt, ...) ((void)0) 87 #endif 88 89 /* 90 * Description about per-CPU rule duplication: 91 * 92 * Module loading/unloading and all ioctl operations are serialized 93 * by netisr0, so we don't have any ordering or locking problems. 94 * 95 * Following graph shows how operation on per-CPU rule list is 96 * performed [2 CPU case]: 97 * 98 * CPU0 CPU1 99 * 100 * netisr0 <------------------------------------+ 101 * domsg | 102 * | | 103 * | netmsg | 104 * | | 105 * V | 106 * ifnet0 | 107 * : | netmsg 108 * :(delete/add...) | 109 * : | 110 * : netmsg | 111 * forwardmsg---------->ifnet1 | 112 * : | 113 * :(delete/add...) | 114 * : | 115 * : | 116 * replymsg--------------+ 117 * 118 * 119 * 120 * 121 * Rules which will not create states (dyn rules) [2 CPU case] 122 * 123 * CPU0 CPU1 124 * layer3_chain layer3_chain 125 * | | 126 * V V 127 * +-------+ sibling +-------+ sibling 128 * | rule1 |--------->| rule1 |--------->NULL 129 * +-------+ +-------+ 130 * | | 131 * |next |next 132 * V V 133 * +-------+ sibling +-------+ sibling 134 * | rule2 |--------->| rule2 |--------->NULL 135 * +-------+ +-------+ 136 * 137 * ip_fw.sibling: 138 * 1) Ease statistics calculation during IP_FW_GET. We only need to 139 * iterate layer3_chain on CPU0; the current rule's duplication on 140 * the other CPUs could safely be read-only accessed by using 141 * ip_fw.sibling 142 * 2) Accelerate rule insertion and deletion, e.g. rule insertion: 143 * a) In netisr0 (on CPU0) rule3 is determined to be inserted between 144 * rule1 and rule2. To make this decision we need to iterate the 145 * layer3_chain on CPU0. The netmsg, which is used to insert the 146 * rule, will contain rule1 on CPU0 as prev_rule and rule2 on CPU0 147 * as next_rule 148 * b) After the insertion on CPU0 is done, we will move on to CPU1. 149 * But instead of relocating the rule3's position on CPU1 by 150 * iterating the layer3_chain on CPU1, we set the netmsg's prev_rule 151 * to rule1->sibling and next_rule to rule2->sibling before the 152 * netmsg is forwarded to CPU1 from CPU0 153 * 154 * 155 * 156 * Rules which will create states (dyn rules) [2 CPU case] 157 * (unnecessary parts are omitted; they are same as in the previous figure) 158 * 159 * CPU0 CPU1 160 * 161 * +-------+ +-------+ 162 * | rule1 | | rule1 | 163 * +-------+ +-------+ 164 * ^ | | ^ 165 * | |stub stub| | 166 * | | | | 167 * | +----+ +----+ | 168 * | | | | 169 * | V V | 170 * | +--------------------+ | 171 * | | rule_stub | | 172 * | | (read-only shared) | | 173 * | | | | 174 * | | back pointer array | | 175 * | | (indexed by cpuid) | | 176 * | | | | 177 * +----|---------[0] | | 178 * | [1]--------|----+ 179 * | | 180 * +--------------------+ 181 * ^ ^ 182 * | | 183 * ........|............|............ 184 * : | | : 185 * : |stub |stub : 186 * : | | : 187 * : +---------+ +---------+ : 188 * : | state1a | | state1b | .... : 189 * : +---------+ +---------+ : 190 * : : 191 * : states table : 192 * : (shared) : 193 * : (protected by dyn_lock) : 194 * .................................. 195 * 196 * [state1a and state1b are states created by rule1] 197 * 198 * ip_fw_stub: 199 * This structure is introduced so that shared (locked) state table could 200 * work with per-CPU (duplicated) static rules. It mainly bridges states 201 * and static rules and serves as static rule's place holder (a read-only 202 * shared part of duplicated rules) from states point of view. 203 * 204 * IPFW_RULE_F_STATE (only for rules which create states): 205 * o During rule installation, this flag is turned on after rule's 206 * duplications reach all CPUs, to avoid at least following race: 207 * 1) rule1 is duplicated on CPU0 and is not duplicated on CPU1 yet 208 * 2) rule1 creates state1 209 * 3) state1 is located on CPU1 by check-state 210 * But rule1 is not duplicated on CPU1 yet 211 * o During rule deletion, this flag is turned off before deleting states 212 * created by the rule and before deleting the rule itself, so no 213 * more states will be created by the to-be-deleted rule even when its 214 * duplication on certain CPUs are not eliminated yet. 215 */ 216 217 #define IPFW_AUTOINC_STEP_MIN 1 218 #define IPFW_AUTOINC_STEP_MAX 1000 219 #define IPFW_AUTOINC_STEP_DEF 100 220 221 #define IPFW_DEFAULT_RULE 65535 /* rulenum for the default rule */ 222 #define IPFW_DEFAULT_SET 31 /* set number for the default rule */ 223 224 struct netmsg_ipfw { 225 struct netmsg_base base; 226 const struct ipfw_ioc_rule *ioc_rule; 227 struct ip_fw *next_rule; 228 struct ip_fw *prev_rule; 229 struct ip_fw *sibling; 230 struct ip_fw_stub *stub; 231 }; 232 233 struct netmsg_del { 234 struct netmsg_base base; 235 struct ip_fw *start_rule; 236 struct ip_fw *prev_rule; 237 uint16_t rulenum; 238 uint8_t from_set; 239 uint8_t to_set; 240 }; 241 242 struct netmsg_zent { 243 struct netmsg_base base; 244 struct ip_fw *start_rule; 245 uint16_t rulenum; 246 uint16_t log_only; 247 }; 248 249 struct ipfw_context { 250 struct ip_fw *ipfw_layer3_chain; /* list of rules for layer3 */ 251 struct ip_fw *ipfw_default_rule; /* default rule */ 252 uint64_t ipfw_norule_counter; /* counter for ipfw_log(NULL) */ 253 254 /* 255 * ipfw_set_disable contains one bit per set value (0..31). 256 * If the bit is set, all rules with the corresponding set 257 * are disabled. Set IPDW_DEFAULT_SET is reserved for the 258 * default rule and CANNOT be disabled. 259 */ 260 uint32_t ipfw_set_disable; 261 uint32_t ipfw_gen; /* generation of rule list */ 262 }; 263 264 static struct ipfw_context *ipfw_ctx[MAXCPU]; 265 266 #ifdef KLD_MODULE 267 /* 268 * Module can not be unloaded, if there are references to 269 * certains rules of ipfw(4), e.g. dummynet(4) 270 */ 271 static int ipfw_refcnt; 272 #endif 273 274 MALLOC_DEFINE(M_IPFW, "IpFw/IpAcct", "IpFw/IpAcct chain's"); 275 276 /* 277 * Following two global variables are accessed and 278 * updated only on CPU0 279 */ 280 static uint32_t static_count; /* # of static rules */ 281 static uint32_t static_ioc_len; /* bytes of static rules */ 282 283 /* 284 * If 1, then ipfw static rules are being flushed, 285 * ipfw_chk() will skip to the default rule. 286 */ 287 static int ipfw_flushing; 288 289 static int fw_verbose; 290 static int verbose_limit; 291 292 static int fw_debug; 293 static int autoinc_step = IPFW_AUTOINC_STEP_DEF; 294 295 static int ipfw_sysctl_enable(SYSCTL_HANDLER_ARGS); 296 static int ipfw_sysctl_autoinc_step(SYSCTL_HANDLER_ARGS); 297 static int ipfw_sysctl_dyn_buckets(SYSCTL_HANDLER_ARGS); 298 static int ipfw_sysctl_dyn_fin(SYSCTL_HANDLER_ARGS); 299 static int ipfw_sysctl_dyn_rst(SYSCTL_HANDLER_ARGS); 300 301 SYSCTL_NODE(_net_inet_ip, OID_AUTO, fw, CTLFLAG_RW, 0, "Firewall"); 302 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW, 303 &fw_enable, 0, ipfw_sysctl_enable, "I", "Enable ipfw"); 304 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, autoinc_step, CTLTYPE_INT | CTLFLAG_RW, 305 &autoinc_step, 0, ipfw_sysctl_autoinc_step, "I", 306 "Rule number autincrement step"); 307 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO,one_pass,CTLFLAG_RW, 308 &fw_one_pass, 0, 309 "Only do a single pass through ipfw when using dummynet(4)"); 310 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, debug, CTLFLAG_RW, 311 &fw_debug, 0, "Enable printing of debug ip_fw statements"); 312 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, verbose, CTLFLAG_RW, 313 &fw_verbose, 0, "Log matches to ipfw rules"); 314 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, verbose_limit, CTLFLAG_RW, 315 &verbose_limit, 0, "Set upper limit of matches of ipfw rules logged"); 316 317 /* 318 * Description of dynamic rules. 319 * 320 * Dynamic rules are stored in lists accessed through a hash table 321 * (ipfw_dyn_v) whose size is curr_dyn_buckets. This value can 322 * be modified through the sysctl variable dyn_buckets which is 323 * updated when the table becomes empty. 324 * 325 * XXX currently there is only one list, ipfw_dyn. 326 * 327 * When a packet is received, its address fields are first masked 328 * with the mask defined for the rule, then hashed, then matched 329 * against the entries in the corresponding list. 330 * Dynamic rules can be used for different purposes: 331 * + stateful rules; 332 * + enforcing limits on the number of sessions; 333 * + in-kernel NAT (not implemented yet) 334 * 335 * The lifetime of dynamic rules is regulated by dyn_*_lifetime, 336 * measured in seconds and depending on the flags. 337 * 338 * The total number of dynamic rules is stored in dyn_count. 339 * The max number of dynamic rules is dyn_max. When we reach 340 * the maximum number of rules we do not create anymore. This is 341 * done to avoid consuming too much memory, but also too much 342 * time when searching on each packet (ideally, we should try instead 343 * to put a limit on the length of the list on each bucket...). 344 * 345 * Each dynamic rule holds a pointer to the parent ipfw rule so 346 * we know what action to perform. Dynamic rules are removed when 347 * the parent rule is deleted. XXX we should make them survive. 348 * 349 * There are some limitations with dynamic rules -- we do not 350 * obey the 'randomized match', and we do not do multiple 351 * passes through the firewall. XXX check the latter!!! 352 * 353 * NOTE about the SHARED LOCKMGR LOCK during dynamic rule looking up: 354 * Only TCP state transition will change dynamic rule's state and ack 355 * sequences, while all packets of one TCP connection only goes through 356 * one TCP thread, so it is safe to use shared lockmgr lock during dynamic 357 * rule looking up. The keep alive callout uses exclusive lockmgr lock 358 * when it tries to find suitable dynamic rules to send keep alive, so 359 * it will not see half updated state and ack sequences. Though the expire 360 * field updating looks racy for other protocols, the resolution (second) 361 * of expire field makes this kind of race harmless. 362 * XXX statistics' updating is _not_ MPsafe!!! 363 * XXX once UDP output path is fixed, we could use lockless dynamic rule 364 * hash table 365 */ 366 static ipfw_dyn_rule **ipfw_dyn_v = NULL; 367 static uint32_t dyn_buckets = 256; /* must be power of 2 */ 368 static uint32_t curr_dyn_buckets = 256; /* must be power of 2 */ 369 static uint32_t dyn_buckets_gen; /* generation of dyn buckets array */ 370 static struct lock dyn_lock; /* dynamic rules' hash table lock */ 371 372 static struct netmsg_base ipfw_timeout_netmsg; /* schedule ipfw timeout */ 373 static struct callout ipfw_timeout_h; 374 375 /* 376 * Timeouts for various events in handing dynamic rules. 377 */ 378 static uint32_t dyn_ack_lifetime = 300; 379 static uint32_t dyn_syn_lifetime = 20; 380 static uint32_t dyn_fin_lifetime = 1; 381 static uint32_t dyn_rst_lifetime = 1; 382 static uint32_t dyn_udp_lifetime = 10; 383 static uint32_t dyn_short_lifetime = 5; 384 385 /* 386 * Keepalives are sent if dyn_keepalive is set. They are sent every 387 * dyn_keepalive_period seconds, in the last dyn_keepalive_interval 388 * seconds of lifetime of a rule. 389 * dyn_rst_lifetime and dyn_fin_lifetime should be strictly lower 390 * than dyn_keepalive_period. 391 */ 392 393 static uint32_t dyn_keepalive_interval = 20; 394 static uint32_t dyn_keepalive_period = 5; 395 static uint32_t dyn_keepalive = 1; /* do send keepalives */ 396 397 static uint32_t dyn_count; /* # of dynamic rules */ 398 static uint32_t dyn_max = 4096; /* max # of dynamic rules */ 399 400 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_buckets, CTLTYPE_INT | CTLFLAG_RW, 401 &dyn_buckets, 0, ipfw_sysctl_dyn_buckets, "I", "Number of dyn. buckets"); 402 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, curr_dyn_buckets, CTLFLAG_RD, 403 &curr_dyn_buckets, 0, "Current Number of dyn. buckets"); 404 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_count, CTLFLAG_RD, 405 &dyn_count, 0, "Number of dyn. rules"); 406 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_max, CTLFLAG_RW, 407 &dyn_max, 0, "Max number of dyn. rules"); 408 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, static_count, CTLFLAG_RD, 409 &static_count, 0, "Number of static rules"); 410 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_ack_lifetime, CTLFLAG_RW, 411 &dyn_ack_lifetime, 0, "Lifetime of dyn. rules for acks"); 412 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_syn_lifetime, CTLFLAG_RW, 413 &dyn_syn_lifetime, 0, "Lifetime of dyn. rules for syn"); 414 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_fin_lifetime, 415 CTLTYPE_INT | CTLFLAG_RW, &dyn_fin_lifetime, 0, ipfw_sysctl_dyn_fin, "I", 416 "Lifetime of dyn. rules for fin"); 417 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_rst_lifetime, 418 CTLTYPE_INT | CTLFLAG_RW, &dyn_rst_lifetime, 0, ipfw_sysctl_dyn_rst, "I", 419 "Lifetime of dyn. rules for rst"); 420 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_udp_lifetime, CTLFLAG_RW, 421 &dyn_udp_lifetime, 0, "Lifetime of dyn. rules for UDP"); 422 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_short_lifetime, CTLFLAG_RW, 423 &dyn_short_lifetime, 0, "Lifetime of dyn. rules for other situations"); 424 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_keepalive, CTLFLAG_RW, 425 &dyn_keepalive, 0, "Enable keepalives for dyn. rules"); 426 427 static ip_fw_chk_t ipfw_chk; 428 static void ipfw_tick(void *); 429 430 static __inline int 431 ipfw_free_rule(struct ip_fw *rule) 432 { 433 KASSERT(rule->cpuid == mycpuid, ("rule freed on cpu%d", mycpuid)); 434 KASSERT(rule->refcnt > 0, ("invalid refcnt %u", rule->refcnt)); 435 rule->refcnt--; 436 if (rule->refcnt == 0) { 437 kfree(rule, M_IPFW); 438 return 1; 439 } 440 return 0; 441 } 442 443 static void 444 ipfw_unref_rule(void *priv) 445 { 446 ipfw_free_rule(priv); 447 #ifdef KLD_MODULE 448 atomic_subtract_int(&ipfw_refcnt, 1); 449 #endif 450 } 451 452 static __inline void 453 ipfw_ref_rule(struct ip_fw *rule) 454 { 455 KASSERT(rule->cpuid == mycpuid, ("rule used on cpu%d", mycpuid)); 456 #ifdef KLD_MODULE 457 atomic_add_int(&ipfw_refcnt, 1); 458 #endif 459 rule->refcnt++; 460 } 461 462 /* 463 * This macro maps an ip pointer into a layer3 header pointer of type T 464 */ 465 #define L3HDR(T, ip) ((T *)((uint32_t *)(ip) + (ip)->ip_hl)) 466 467 static __inline int 468 icmptype_match(struct ip *ip, ipfw_insn_u32 *cmd) 469 { 470 int type = L3HDR(struct icmp,ip)->icmp_type; 471 472 return (type <= ICMP_MAXTYPE && (cmd->d[0] & (1 << type))); 473 } 474 475 #define TT ((1 << ICMP_ECHO) | \ 476 (1 << ICMP_ROUTERSOLICIT) | \ 477 (1 << ICMP_TSTAMP) | \ 478 (1 << ICMP_IREQ) | \ 479 (1 << ICMP_MASKREQ)) 480 481 static int 482 is_icmp_query(struct ip *ip) 483 { 484 int type = L3HDR(struct icmp, ip)->icmp_type; 485 486 return (type <= ICMP_MAXTYPE && (TT & (1 << type))); 487 } 488 489 #undef TT 490 491 /* 492 * The following checks use two arrays of 8 or 16 bits to store the 493 * bits that we want set or clear, respectively. They are in the 494 * low and high half of cmd->arg1 or cmd->d[0]. 495 * 496 * We scan options and store the bits we find set. We succeed if 497 * 498 * (want_set & ~bits) == 0 && (want_clear & ~bits) == want_clear 499 * 500 * The code is sometimes optimized not to store additional variables. 501 */ 502 503 static int 504 flags_match(ipfw_insn *cmd, uint8_t bits) 505 { 506 u_char want_clear; 507 bits = ~bits; 508 509 if (((cmd->arg1 & 0xff) & bits) != 0) 510 return 0; /* some bits we want set were clear */ 511 512 want_clear = (cmd->arg1 >> 8) & 0xff; 513 if ((want_clear & bits) != want_clear) 514 return 0; /* some bits we want clear were set */ 515 return 1; 516 } 517 518 static int 519 ipopts_match(struct ip *ip, ipfw_insn *cmd) 520 { 521 int optlen, bits = 0; 522 u_char *cp = (u_char *)(ip + 1); 523 int x = (ip->ip_hl << 2) - sizeof(struct ip); 524 525 for (; x > 0; x -= optlen, cp += optlen) { 526 int opt = cp[IPOPT_OPTVAL]; 527 528 if (opt == IPOPT_EOL) 529 break; 530 531 if (opt == IPOPT_NOP) { 532 optlen = 1; 533 } else { 534 optlen = cp[IPOPT_OLEN]; 535 if (optlen <= 0 || optlen > x) 536 return 0; /* invalid or truncated */ 537 } 538 539 switch (opt) { 540 case IPOPT_LSRR: 541 bits |= IP_FW_IPOPT_LSRR; 542 break; 543 544 case IPOPT_SSRR: 545 bits |= IP_FW_IPOPT_SSRR; 546 break; 547 548 case IPOPT_RR: 549 bits |= IP_FW_IPOPT_RR; 550 break; 551 552 case IPOPT_TS: 553 bits |= IP_FW_IPOPT_TS; 554 break; 555 556 default: 557 break; 558 } 559 } 560 return (flags_match(cmd, bits)); 561 } 562 563 static int 564 tcpopts_match(struct ip *ip, ipfw_insn *cmd) 565 { 566 int optlen, bits = 0; 567 struct tcphdr *tcp = L3HDR(struct tcphdr,ip); 568 u_char *cp = (u_char *)(tcp + 1); 569 int x = (tcp->th_off << 2) - sizeof(struct tcphdr); 570 571 for (; x > 0; x -= optlen, cp += optlen) { 572 int opt = cp[0]; 573 574 if (opt == TCPOPT_EOL) 575 break; 576 577 if (opt == TCPOPT_NOP) { 578 optlen = 1; 579 } else { 580 optlen = cp[1]; 581 if (optlen <= 0) 582 break; 583 } 584 585 switch (opt) { 586 case TCPOPT_MAXSEG: 587 bits |= IP_FW_TCPOPT_MSS; 588 break; 589 590 case TCPOPT_WINDOW: 591 bits |= IP_FW_TCPOPT_WINDOW; 592 break; 593 594 case TCPOPT_SACK_PERMITTED: 595 case TCPOPT_SACK: 596 bits |= IP_FW_TCPOPT_SACK; 597 break; 598 599 case TCPOPT_TIMESTAMP: 600 bits |= IP_FW_TCPOPT_TS; 601 break; 602 603 case TCPOPT_CC: 604 case TCPOPT_CCNEW: 605 case TCPOPT_CCECHO: 606 bits |= IP_FW_TCPOPT_CC; 607 break; 608 609 default: 610 break; 611 } 612 } 613 return (flags_match(cmd, bits)); 614 } 615 616 static int 617 iface_match(struct ifnet *ifp, ipfw_insn_if *cmd) 618 { 619 if (ifp == NULL) /* no iface with this packet, match fails */ 620 return 0; 621 622 /* Check by name or by IP address */ 623 if (cmd->name[0] != '\0') { /* match by name */ 624 /* Check name */ 625 if (cmd->p.glob) { 626 if (kfnmatch(cmd->name, ifp->if_xname, 0) == 0) 627 return(1); 628 } else { 629 if (strncmp(ifp->if_xname, cmd->name, IFNAMSIZ) == 0) 630 return(1); 631 } 632 } else { 633 struct ifaddr_container *ifac; 634 635 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 636 struct ifaddr *ia = ifac->ifa; 637 638 if (ia->ifa_addr == NULL) 639 continue; 640 if (ia->ifa_addr->sa_family != AF_INET) 641 continue; 642 if (cmd->p.ip.s_addr == ((struct sockaddr_in *) 643 (ia->ifa_addr))->sin_addr.s_addr) 644 return(1); /* match */ 645 } 646 } 647 return(0); /* no match, fail ... */ 648 } 649 650 #define SNPARGS(buf, len) buf + len, sizeof(buf) > len ? sizeof(buf) - len : 0 651 652 /* 653 * We enter here when we have a rule with O_LOG. 654 * XXX this function alone takes about 2Kbytes of code! 655 */ 656 static void 657 ipfw_log(struct ip_fw *f, u_int hlen, struct ether_header *eh, 658 struct mbuf *m, struct ifnet *oif) 659 { 660 char *action; 661 int limit_reached = 0; 662 char action2[40], proto[48], fragment[28]; 663 664 fragment[0] = '\0'; 665 proto[0] = '\0'; 666 667 if (f == NULL) { /* bogus pkt */ 668 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 669 670 if (verbose_limit != 0 && 671 ctx->ipfw_norule_counter >= verbose_limit) 672 return; 673 ctx->ipfw_norule_counter++; 674 if (ctx->ipfw_norule_counter == verbose_limit) 675 limit_reached = verbose_limit; 676 action = "Refuse"; 677 } else { /* O_LOG is the first action, find the real one */ 678 ipfw_insn *cmd = ACTION_PTR(f); 679 ipfw_insn_log *l = (ipfw_insn_log *)cmd; 680 681 if (l->max_log != 0 && l->log_left == 0) 682 return; 683 l->log_left--; 684 if (l->log_left == 0) 685 limit_reached = l->max_log; 686 cmd += F_LEN(cmd); /* point to first action */ 687 if (cmd->opcode == O_PROB) 688 cmd += F_LEN(cmd); 689 690 action = action2; 691 switch (cmd->opcode) { 692 case O_DENY: 693 action = "Deny"; 694 break; 695 696 case O_REJECT: 697 if (cmd->arg1==ICMP_REJECT_RST) { 698 action = "Reset"; 699 } else if (cmd->arg1==ICMP_UNREACH_HOST) { 700 action = "Reject"; 701 } else { 702 ksnprintf(SNPARGS(action2, 0), "Unreach %d", 703 cmd->arg1); 704 } 705 break; 706 707 case O_ACCEPT: 708 action = "Accept"; 709 break; 710 711 case O_COUNT: 712 action = "Count"; 713 break; 714 715 case O_DIVERT: 716 ksnprintf(SNPARGS(action2, 0), "Divert %d", cmd->arg1); 717 break; 718 719 case O_TEE: 720 ksnprintf(SNPARGS(action2, 0), "Tee %d", cmd->arg1); 721 break; 722 723 case O_SKIPTO: 724 ksnprintf(SNPARGS(action2, 0), "SkipTo %d", cmd->arg1); 725 break; 726 727 case O_PIPE: 728 ksnprintf(SNPARGS(action2, 0), "Pipe %d", cmd->arg1); 729 break; 730 731 case O_QUEUE: 732 ksnprintf(SNPARGS(action2, 0), "Queue %d", cmd->arg1); 733 break; 734 735 case O_FORWARD_IP: 736 { 737 ipfw_insn_sa *sa = (ipfw_insn_sa *)cmd; 738 int len; 739 740 len = ksnprintf(SNPARGS(action2, 0), 741 "Forward to %s", 742 inet_ntoa(sa->sa.sin_addr)); 743 if (sa->sa.sin_port) { 744 ksnprintf(SNPARGS(action2, len), ":%d", 745 sa->sa.sin_port); 746 } 747 } 748 break; 749 750 default: 751 action = "UNKNOWN"; 752 break; 753 } 754 } 755 756 if (hlen == 0) { /* non-ip */ 757 ksnprintf(SNPARGS(proto, 0), "MAC"); 758 } else { 759 struct ip *ip = mtod(m, struct ip *); 760 /* these three are all aliases to the same thing */ 761 struct icmp *const icmp = L3HDR(struct icmp, ip); 762 struct tcphdr *const tcp = (struct tcphdr *)icmp; 763 struct udphdr *const udp = (struct udphdr *)icmp; 764 765 int ip_off, offset, ip_len; 766 int len; 767 768 if (eh != NULL) { /* layer 2 packets are as on the wire */ 769 ip_off = ntohs(ip->ip_off); 770 ip_len = ntohs(ip->ip_len); 771 } else { 772 ip_off = ip->ip_off; 773 ip_len = ip->ip_len; 774 } 775 offset = ip_off & IP_OFFMASK; 776 switch (ip->ip_p) { 777 case IPPROTO_TCP: 778 len = ksnprintf(SNPARGS(proto, 0), "TCP %s", 779 inet_ntoa(ip->ip_src)); 780 if (offset == 0) { 781 ksnprintf(SNPARGS(proto, len), ":%d %s:%d", 782 ntohs(tcp->th_sport), 783 inet_ntoa(ip->ip_dst), 784 ntohs(tcp->th_dport)); 785 } else { 786 ksnprintf(SNPARGS(proto, len), " %s", 787 inet_ntoa(ip->ip_dst)); 788 } 789 break; 790 791 case IPPROTO_UDP: 792 len = ksnprintf(SNPARGS(proto, 0), "UDP %s", 793 inet_ntoa(ip->ip_src)); 794 if (offset == 0) { 795 ksnprintf(SNPARGS(proto, len), ":%d %s:%d", 796 ntohs(udp->uh_sport), 797 inet_ntoa(ip->ip_dst), 798 ntohs(udp->uh_dport)); 799 } else { 800 ksnprintf(SNPARGS(proto, len), " %s", 801 inet_ntoa(ip->ip_dst)); 802 } 803 break; 804 805 case IPPROTO_ICMP: 806 if (offset == 0) { 807 len = ksnprintf(SNPARGS(proto, 0), 808 "ICMP:%u.%u ", 809 icmp->icmp_type, 810 icmp->icmp_code); 811 } else { 812 len = ksnprintf(SNPARGS(proto, 0), "ICMP "); 813 } 814 len += ksnprintf(SNPARGS(proto, len), "%s", 815 inet_ntoa(ip->ip_src)); 816 ksnprintf(SNPARGS(proto, len), " %s", 817 inet_ntoa(ip->ip_dst)); 818 break; 819 820 default: 821 len = ksnprintf(SNPARGS(proto, 0), "P:%d %s", ip->ip_p, 822 inet_ntoa(ip->ip_src)); 823 ksnprintf(SNPARGS(proto, len), " %s", 824 inet_ntoa(ip->ip_dst)); 825 break; 826 } 827 828 if (ip_off & (IP_MF | IP_OFFMASK)) { 829 ksnprintf(SNPARGS(fragment, 0), " (frag %d:%d@%d%s)", 830 ntohs(ip->ip_id), ip_len - (ip->ip_hl << 2), 831 offset << 3, (ip_off & IP_MF) ? "+" : ""); 832 } 833 } 834 835 if (oif || m->m_pkthdr.rcvif) { 836 log(LOG_SECURITY | LOG_INFO, 837 "ipfw: %d %s %s %s via %s%s\n", 838 f ? f->rulenum : -1, 839 action, proto, oif ? "out" : "in", 840 oif ? oif->if_xname : m->m_pkthdr.rcvif->if_xname, 841 fragment); 842 } else { 843 log(LOG_SECURITY | LOG_INFO, 844 "ipfw: %d %s %s [no if info]%s\n", 845 f ? f->rulenum : -1, 846 action, proto, fragment); 847 } 848 849 if (limit_reached) { 850 log(LOG_SECURITY | LOG_NOTICE, 851 "ipfw: limit %d reached on entry %d\n", 852 limit_reached, f ? f->rulenum : -1); 853 } 854 } 855 856 #undef SNPARGS 857 858 /* 859 * IMPORTANT: the hash function for dynamic rules must be commutative 860 * in source and destination (ip,port), because rules are bidirectional 861 * and we want to find both in the same bucket. 862 */ 863 static __inline int 864 hash_packet(struct ipfw_flow_id *id) 865 { 866 uint32_t i; 867 868 i = (id->dst_ip) ^ (id->src_ip) ^ (id->dst_port) ^ (id->src_port); 869 i &= (curr_dyn_buckets - 1); 870 return i; 871 } 872 873 /** 874 * unlink a dynamic rule from a chain. prev is a pointer to 875 * the previous one, q is a pointer to the rule to delete, 876 * head is a pointer to the head of the queue. 877 * Modifies q and potentially also head. 878 */ 879 #define UNLINK_DYN_RULE(prev, head, q) \ 880 do { \ 881 ipfw_dyn_rule *old_q = q; \ 882 \ 883 /* remove a refcount to the parent */ \ 884 if (q->dyn_type == O_LIMIT) \ 885 q->parent->count--; \ 886 DPRINTF("-- unlink entry 0x%08x %d -> 0x%08x %d, %d left\n", \ 887 q->id.src_ip, q->id.src_port, \ 888 q->id.dst_ip, q->id.dst_port, dyn_count - 1); \ 889 if (prev != NULL) \ 890 prev->next = q = q->next; \ 891 else \ 892 head = q = q->next; \ 893 KASSERT(dyn_count > 0, ("invalid dyn count %u", dyn_count)); \ 894 dyn_count--; \ 895 kfree(old_q, M_IPFW); \ 896 } while (0) 897 898 #define TIME_LEQ(a, b) ((int)((a) - (b)) <= 0) 899 900 /** 901 * Remove dynamic rules pointing to "rule", or all of them if rule == NULL. 902 * 903 * If keep_me == NULL, rules are deleted even if not expired, 904 * otherwise only expired rules are removed. 905 * 906 * The value of the second parameter is also used to point to identify 907 * a rule we absolutely do not want to remove (e.g. because we are 908 * holding a reference to it -- this is the case with O_LIMIT_PARENT 909 * rules). The pointer is only used for comparison, so any non-null 910 * value will do. 911 */ 912 static void 913 remove_dyn_rule_locked(struct ip_fw *rule, ipfw_dyn_rule *keep_me) 914 { 915 static uint32_t last_remove = 0; /* XXX */ 916 917 #define FORCE (keep_me == NULL) 918 919 ipfw_dyn_rule *prev, *q; 920 int i, pass = 0, max_pass = 0, unlinked = 0; 921 922 if (ipfw_dyn_v == NULL || dyn_count == 0) 923 return; 924 /* do not expire more than once per second, it is useless */ 925 if (!FORCE && last_remove == time_second) 926 return; 927 last_remove = time_second; 928 929 /* 930 * because O_LIMIT refer to parent rules, during the first pass only 931 * remove child and mark any pending LIMIT_PARENT, and remove 932 * them in a second pass. 933 */ 934 next_pass: 935 for (i = 0; i < curr_dyn_buckets; i++) { 936 for (prev = NULL, q = ipfw_dyn_v[i]; q;) { 937 /* 938 * Logic can become complex here, so we split tests. 939 */ 940 if (q == keep_me) 941 goto next; 942 if (rule != NULL && rule->stub != q->stub) 943 goto next; /* not the one we are looking for */ 944 if (q->dyn_type == O_LIMIT_PARENT) { 945 /* 946 * handle parent in the second pass, 947 * record we need one. 948 */ 949 max_pass = 1; 950 if (pass == 0) 951 goto next; 952 if (FORCE && q->count != 0) { 953 /* XXX should not happen! */ 954 kprintf("OUCH! cannot remove rule, " 955 "count %d\n", q->count); 956 } 957 } else { 958 if (!FORCE && !TIME_LEQ(q->expire, time_second)) 959 goto next; 960 } 961 unlinked = 1; 962 UNLINK_DYN_RULE(prev, ipfw_dyn_v[i], q); 963 continue; 964 next: 965 prev = q; 966 q = q->next; 967 } 968 } 969 if (pass++ < max_pass) 970 goto next_pass; 971 972 if (unlinked) 973 ++dyn_buckets_gen; 974 975 #undef FORCE 976 } 977 978 /** 979 * lookup a dynamic rule. 980 */ 981 static ipfw_dyn_rule * 982 lookup_dyn_rule(struct ipfw_flow_id *pkt, int *match_direction, 983 struct tcphdr *tcp) 984 { 985 /* 986 * stateful ipfw extensions. 987 * Lookup into dynamic session queue 988 */ 989 #define MATCH_REVERSE 0 990 #define MATCH_FORWARD 1 991 #define MATCH_NONE 2 992 #define MATCH_UNKNOWN 3 993 int i, dir = MATCH_NONE; 994 ipfw_dyn_rule *prev, *q=NULL; 995 996 if (ipfw_dyn_v == NULL) 997 goto done; /* not found */ 998 999 i = hash_packet(pkt); 1000 for (prev = NULL, q = ipfw_dyn_v[i]; q != NULL;) { 1001 if (q->dyn_type == O_LIMIT_PARENT) 1002 goto next; 1003 1004 if (TIME_LEQ(q->expire, time_second)) { 1005 /* 1006 * Entry expired; skip. 1007 * Let ipfw_tick() take care of it 1008 */ 1009 goto next; 1010 } 1011 1012 if (pkt->proto == q->id.proto) { 1013 if (pkt->src_ip == q->id.src_ip && 1014 pkt->dst_ip == q->id.dst_ip && 1015 pkt->src_port == q->id.src_port && 1016 pkt->dst_port == q->id.dst_port) { 1017 dir = MATCH_FORWARD; 1018 break; 1019 } 1020 if (pkt->src_ip == q->id.dst_ip && 1021 pkt->dst_ip == q->id.src_ip && 1022 pkt->src_port == q->id.dst_port && 1023 pkt->dst_port == q->id.src_port) { 1024 dir = MATCH_REVERSE; 1025 break; 1026 } 1027 } 1028 next: 1029 prev = q; 1030 q = q->next; 1031 } 1032 if (q == NULL) 1033 goto done; /* q = NULL, not found */ 1034 1035 if (pkt->proto == IPPROTO_TCP) { /* update state according to flags */ 1036 u_char flags = pkt->flags & (TH_FIN|TH_SYN|TH_RST); 1037 1038 #define BOTH_SYN (TH_SYN | (TH_SYN << 8)) 1039 #define BOTH_FIN (TH_FIN | (TH_FIN << 8)) 1040 1041 q->state |= (dir == MATCH_FORWARD ) ? flags : (flags << 8); 1042 switch (q->state) { 1043 case TH_SYN: /* opening */ 1044 q->expire = time_second + dyn_syn_lifetime; 1045 break; 1046 1047 case BOTH_SYN: /* move to established */ 1048 case BOTH_SYN | TH_FIN : /* one side tries to close */ 1049 case BOTH_SYN | (TH_FIN << 8) : 1050 if (tcp) { 1051 uint32_t ack = ntohl(tcp->th_ack); 1052 1053 #define _SEQ_GE(a, b) ((int)(a) - (int)(b) >= 0) 1054 1055 if (dir == MATCH_FORWARD) { 1056 if (q->ack_fwd == 0 || 1057 _SEQ_GE(ack, q->ack_fwd)) 1058 q->ack_fwd = ack; 1059 else /* ignore out-of-sequence */ 1060 break; 1061 } else { 1062 if (q->ack_rev == 0 || 1063 _SEQ_GE(ack, q->ack_rev)) 1064 q->ack_rev = ack; 1065 else /* ignore out-of-sequence */ 1066 break; 1067 } 1068 #undef _SEQ_GE 1069 } 1070 q->expire = time_second + dyn_ack_lifetime; 1071 break; 1072 1073 case BOTH_SYN | BOTH_FIN: /* both sides closed */ 1074 KKASSERT(dyn_fin_lifetime < dyn_keepalive_period); 1075 q->expire = time_second + dyn_fin_lifetime; 1076 break; 1077 1078 default: 1079 #if 0 1080 /* 1081 * reset or some invalid combination, but can also 1082 * occur if we use keep-state the wrong way. 1083 */ 1084 if ((q->state & ((TH_RST << 8) | TH_RST)) == 0) 1085 kprintf("invalid state: 0x%x\n", q->state); 1086 #endif 1087 KKASSERT(dyn_rst_lifetime < dyn_keepalive_period); 1088 q->expire = time_second + dyn_rst_lifetime; 1089 break; 1090 } 1091 } else if (pkt->proto == IPPROTO_UDP) { 1092 q->expire = time_second + dyn_udp_lifetime; 1093 } else { 1094 /* other protocols */ 1095 q->expire = time_second + dyn_short_lifetime; 1096 } 1097 done: 1098 if (match_direction) 1099 *match_direction = dir; 1100 return q; 1101 } 1102 1103 static struct ip_fw * 1104 lookup_rule(struct ipfw_flow_id *pkt, int *match_direction, struct tcphdr *tcp, 1105 uint16_t len, int *deny) 1106 { 1107 struct ip_fw *rule = NULL; 1108 ipfw_dyn_rule *q; 1109 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 1110 uint32_t gen; 1111 1112 *deny = 0; 1113 gen = ctx->ipfw_gen; 1114 1115 lockmgr(&dyn_lock, LK_SHARED); 1116 1117 if (ctx->ipfw_gen != gen) { 1118 /* 1119 * Static rules had been change when we were waiting 1120 * for the dynamic hash table lock; deny this packet, 1121 * since it is _not_ known whether it is safe to keep 1122 * iterating the static rules. 1123 */ 1124 *deny = 1; 1125 goto back; 1126 } 1127 1128 q = lookup_dyn_rule(pkt, match_direction, tcp); 1129 if (q == NULL) { 1130 rule = NULL; 1131 } else { 1132 rule = q->stub->rule[mycpuid]; 1133 KKASSERT(rule->stub == q->stub && rule->cpuid == mycpuid); 1134 1135 /* XXX */ 1136 q->pcnt++; 1137 q->bcnt += len; 1138 } 1139 back: 1140 lockmgr(&dyn_lock, LK_RELEASE); 1141 return rule; 1142 } 1143 1144 static void 1145 realloc_dynamic_table(void) 1146 { 1147 ipfw_dyn_rule **old_dyn_v; 1148 uint32_t old_curr_dyn_buckets; 1149 1150 KASSERT(dyn_buckets <= 65536 && (dyn_buckets & (dyn_buckets - 1)) == 0, 1151 ("invalid dyn_buckets %d", dyn_buckets)); 1152 1153 /* Save the current buckets array for later error recovery */ 1154 old_dyn_v = ipfw_dyn_v; 1155 old_curr_dyn_buckets = curr_dyn_buckets; 1156 1157 curr_dyn_buckets = dyn_buckets; 1158 for (;;) { 1159 ipfw_dyn_v = kmalloc(curr_dyn_buckets * sizeof(ipfw_dyn_rule *), 1160 M_IPFW, M_NOWAIT | M_ZERO); 1161 if (ipfw_dyn_v != NULL || curr_dyn_buckets <= 2) 1162 break; 1163 1164 curr_dyn_buckets /= 2; 1165 if (curr_dyn_buckets <= old_curr_dyn_buckets && 1166 old_dyn_v != NULL) { 1167 /* 1168 * Don't try allocating smaller buckets array, reuse 1169 * the old one, which alreay contains enough buckets 1170 */ 1171 break; 1172 } 1173 } 1174 1175 if (ipfw_dyn_v != NULL) { 1176 if (old_dyn_v != NULL) 1177 kfree(old_dyn_v, M_IPFW); 1178 } else { 1179 /* Allocation failed, restore old buckets array */ 1180 ipfw_dyn_v = old_dyn_v; 1181 curr_dyn_buckets = old_curr_dyn_buckets; 1182 } 1183 1184 if (ipfw_dyn_v != NULL) 1185 ++dyn_buckets_gen; 1186 } 1187 1188 /** 1189 * Install state of type 'type' for a dynamic session. 1190 * The hash table contains two type of rules: 1191 * - regular rules (O_KEEP_STATE) 1192 * - rules for sessions with limited number of sess per user 1193 * (O_LIMIT). When they are created, the parent is 1194 * increased by 1, and decreased on delete. In this case, 1195 * the third parameter is the parent rule and not the chain. 1196 * - "parent" rules for the above (O_LIMIT_PARENT). 1197 */ 1198 static ipfw_dyn_rule * 1199 add_dyn_rule(struct ipfw_flow_id *id, uint8_t dyn_type, struct ip_fw *rule) 1200 { 1201 ipfw_dyn_rule *r; 1202 int i; 1203 1204 if (ipfw_dyn_v == NULL || 1205 (dyn_count == 0 && dyn_buckets != curr_dyn_buckets)) { 1206 realloc_dynamic_table(); 1207 if (ipfw_dyn_v == NULL) 1208 return NULL; /* failed ! */ 1209 } 1210 i = hash_packet(id); 1211 1212 r = kmalloc(sizeof(*r), M_IPFW, M_NOWAIT | M_ZERO); 1213 if (r == NULL) { 1214 kprintf ("sorry cannot allocate state\n"); 1215 return NULL; 1216 } 1217 1218 /* increase refcount on parent, and set pointer */ 1219 if (dyn_type == O_LIMIT) { 1220 ipfw_dyn_rule *parent = (ipfw_dyn_rule *)rule; 1221 1222 if (parent->dyn_type != O_LIMIT_PARENT) 1223 panic("invalid parent"); 1224 parent->count++; 1225 r->parent = parent; 1226 rule = parent->stub->rule[mycpuid]; 1227 KKASSERT(rule->stub == parent->stub); 1228 } 1229 KKASSERT(rule->cpuid == mycpuid && rule->stub != NULL); 1230 1231 r->id = *id; 1232 r->expire = time_second + dyn_syn_lifetime; 1233 r->stub = rule->stub; 1234 r->dyn_type = dyn_type; 1235 r->pcnt = r->bcnt = 0; 1236 r->count = 0; 1237 1238 r->bucket = i; 1239 r->next = ipfw_dyn_v[i]; 1240 ipfw_dyn_v[i] = r; 1241 dyn_count++; 1242 dyn_buckets_gen++; 1243 DPRINTF("-- add dyn entry ty %d 0x%08x %d -> 0x%08x %d, total %d\n", 1244 dyn_type, 1245 r->id.src_ip, r->id.src_port, 1246 r->id.dst_ip, r->id.dst_port, dyn_count); 1247 return r; 1248 } 1249 1250 /** 1251 * lookup dynamic parent rule using pkt and rule as search keys. 1252 * If the lookup fails, then install one. 1253 */ 1254 static ipfw_dyn_rule * 1255 lookup_dyn_parent(struct ipfw_flow_id *pkt, struct ip_fw *rule) 1256 { 1257 ipfw_dyn_rule *q; 1258 int i; 1259 1260 if (ipfw_dyn_v) { 1261 i = hash_packet(pkt); 1262 for (q = ipfw_dyn_v[i]; q != NULL; q = q->next) { 1263 if (q->dyn_type == O_LIMIT_PARENT && 1264 rule->stub == q->stub && 1265 pkt->proto == q->id.proto && 1266 pkt->src_ip == q->id.src_ip && 1267 pkt->dst_ip == q->id.dst_ip && 1268 pkt->src_port == q->id.src_port && 1269 pkt->dst_port == q->id.dst_port) { 1270 q->expire = time_second + dyn_short_lifetime; 1271 DPRINTF("lookup_dyn_parent found 0x%p\n", q); 1272 return q; 1273 } 1274 } 1275 } 1276 return add_dyn_rule(pkt, O_LIMIT_PARENT, rule); 1277 } 1278 1279 /** 1280 * Install dynamic state for rule type cmd->o.opcode 1281 * 1282 * Returns 1 (failure) if state is not installed because of errors or because 1283 * session limitations are enforced. 1284 */ 1285 static int 1286 install_state_locked(struct ip_fw *rule, ipfw_insn_limit *cmd, 1287 struct ip_fw_args *args) 1288 { 1289 static int last_log; /* XXX */ 1290 1291 ipfw_dyn_rule *q; 1292 1293 DPRINTF("-- install state type %d 0x%08x %u -> 0x%08x %u\n", 1294 cmd->o.opcode, 1295 args->f_id.src_ip, args->f_id.src_port, 1296 args->f_id.dst_ip, args->f_id.dst_port); 1297 1298 q = lookup_dyn_rule(&args->f_id, NULL, NULL); 1299 if (q != NULL) { /* should never occur */ 1300 if (last_log != time_second) { 1301 last_log = time_second; 1302 kprintf(" install_state: entry already present, done\n"); 1303 } 1304 return 0; 1305 } 1306 1307 if (dyn_count >= dyn_max) { 1308 /* 1309 * Run out of slots, try to remove any expired rule. 1310 */ 1311 remove_dyn_rule_locked(NULL, (ipfw_dyn_rule *)1); 1312 if (dyn_count >= dyn_max) { 1313 if (last_log != time_second) { 1314 last_log = time_second; 1315 kprintf("install_state: " 1316 "Too many dynamic rules\n"); 1317 } 1318 return 1; /* cannot install, notify caller */ 1319 } 1320 } 1321 1322 switch (cmd->o.opcode) { 1323 case O_KEEP_STATE: /* bidir rule */ 1324 if (add_dyn_rule(&args->f_id, O_KEEP_STATE, rule) == NULL) 1325 return 1; 1326 break; 1327 1328 case O_LIMIT: /* limit number of sessions */ 1329 { 1330 uint16_t limit_mask = cmd->limit_mask; 1331 struct ipfw_flow_id id; 1332 ipfw_dyn_rule *parent; 1333 1334 DPRINTF("installing dyn-limit rule %d\n", 1335 cmd->conn_limit); 1336 1337 id.dst_ip = id.src_ip = 0; 1338 id.dst_port = id.src_port = 0; 1339 id.proto = args->f_id.proto; 1340 1341 if (limit_mask & DYN_SRC_ADDR) 1342 id.src_ip = args->f_id.src_ip; 1343 if (limit_mask & DYN_DST_ADDR) 1344 id.dst_ip = args->f_id.dst_ip; 1345 if (limit_mask & DYN_SRC_PORT) 1346 id.src_port = args->f_id.src_port; 1347 if (limit_mask & DYN_DST_PORT) 1348 id.dst_port = args->f_id.dst_port; 1349 1350 parent = lookup_dyn_parent(&id, rule); 1351 if (parent == NULL) { 1352 kprintf("add parent failed\n"); 1353 return 1; 1354 } 1355 1356 if (parent->count >= cmd->conn_limit) { 1357 /* 1358 * See if we can remove some expired rule. 1359 */ 1360 remove_dyn_rule_locked(rule, parent); 1361 if (parent->count >= cmd->conn_limit) { 1362 if (fw_verbose && 1363 last_log != time_second) { 1364 last_log = time_second; 1365 log(LOG_SECURITY | LOG_DEBUG, 1366 "drop session, " 1367 "too many entries\n"); 1368 } 1369 return 1; 1370 } 1371 } 1372 if (add_dyn_rule(&args->f_id, O_LIMIT, 1373 (struct ip_fw *)parent) == NULL) 1374 return 1; 1375 } 1376 break; 1377 default: 1378 kprintf("unknown dynamic rule type %u\n", cmd->o.opcode); 1379 return 1; 1380 } 1381 lookup_dyn_rule(&args->f_id, NULL, NULL); /* XXX just set lifetime */ 1382 return 0; 1383 } 1384 1385 static int 1386 install_state(struct ip_fw *rule, ipfw_insn_limit *cmd, 1387 struct ip_fw_args *args, int *deny) 1388 { 1389 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 1390 uint32_t gen; 1391 int ret = 0; 1392 1393 *deny = 0; 1394 gen = ctx->ipfw_gen; 1395 1396 lockmgr(&dyn_lock, LK_EXCLUSIVE); 1397 if (ctx->ipfw_gen != gen) { 1398 /* See the comment in lookup_rule() */ 1399 *deny = 1; 1400 } else { 1401 ret = install_state_locked(rule, cmd, args); 1402 } 1403 lockmgr(&dyn_lock, LK_RELEASE); 1404 1405 return ret; 1406 } 1407 1408 /* 1409 * Transmit a TCP packet, containing either a RST or a keepalive. 1410 * When flags & TH_RST, we are sending a RST packet, because of a 1411 * "reset" action matched the packet. 1412 * Otherwise we are sending a keepalive, and flags & TH_ 1413 */ 1414 static void 1415 send_pkt(struct ipfw_flow_id *id, uint32_t seq, uint32_t ack, int flags) 1416 { 1417 struct mbuf *m; 1418 struct ip *ip; 1419 struct tcphdr *tcp; 1420 struct route sro; /* fake route */ 1421 1422 MGETHDR(m, MB_DONTWAIT, MT_HEADER); 1423 if (m == NULL) 1424 return; 1425 m->m_pkthdr.rcvif = NULL; 1426 m->m_pkthdr.len = m->m_len = sizeof(struct ip) + sizeof(struct tcphdr); 1427 m->m_data += max_linkhdr; 1428 1429 ip = mtod(m, struct ip *); 1430 bzero(ip, m->m_len); 1431 tcp = (struct tcphdr *)(ip + 1); /* no IP options */ 1432 ip->ip_p = IPPROTO_TCP; 1433 tcp->th_off = 5; 1434 1435 /* 1436 * Assume we are sending a RST (or a keepalive in the reverse 1437 * direction), swap src and destination addresses and ports. 1438 */ 1439 ip->ip_src.s_addr = htonl(id->dst_ip); 1440 ip->ip_dst.s_addr = htonl(id->src_ip); 1441 tcp->th_sport = htons(id->dst_port); 1442 tcp->th_dport = htons(id->src_port); 1443 if (flags & TH_RST) { /* we are sending a RST */ 1444 if (flags & TH_ACK) { 1445 tcp->th_seq = htonl(ack); 1446 tcp->th_ack = htonl(0); 1447 tcp->th_flags = TH_RST; 1448 } else { 1449 if (flags & TH_SYN) 1450 seq++; 1451 tcp->th_seq = htonl(0); 1452 tcp->th_ack = htonl(seq); 1453 tcp->th_flags = TH_RST | TH_ACK; 1454 } 1455 } else { 1456 /* 1457 * We are sending a keepalive. flags & TH_SYN determines 1458 * the direction, forward if set, reverse if clear. 1459 * NOTE: seq and ack are always assumed to be correct 1460 * as set by the caller. This may be confusing... 1461 */ 1462 if (flags & TH_SYN) { 1463 /* 1464 * we have to rewrite the correct addresses! 1465 */ 1466 ip->ip_dst.s_addr = htonl(id->dst_ip); 1467 ip->ip_src.s_addr = htonl(id->src_ip); 1468 tcp->th_dport = htons(id->dst_port); 1469 tcp->th_sport = htons(id->src_port); 1470 } 1471 tcp->th_seq = htonl(seq); 1472 tcp->th_ack = htonl(ack); 1473 tcp->th_flags = TH_ACK; 1474 } 1475 1476 /* 1477 * set ip_len to the payload size so we can compute 1478 * the tcp checksum on the pseudoheader 1479 * XXX check this, could save a couple of words ? 1480 */ 1481 ip->ip_len = htons(sizeof(struct tcphdr)); 1482 tcp->th_sum = in_cksum(m, m->m_pkthdr.len); 1483 1484 /* 1485 * now fill fields left out earlier 1486 */ 1487 ip->ip_ttl = ip_defttl; 1488 ip->ip_len = m->m_pkthdr.len; 1489 1490 bzero(&sro, sizeof(sro)); 1491 ip_rtaddr(ip->ip_dst, &sro); 1492 1493 m->m_pkthdr.fw_flags |= IPFW_MBUF_GENERATED; 1494 ip_output(m, NULL, &sro, 0, NULL, NULL); 1495 if (sro.ro_rt) 1496 RTFREE(sro.ro_rt); 1497 } 1498 1499 /* 1500 * sends a reject message, consuming the mbuf passed as an argument. 1501 */ 1502 static void 1503 send_reject(struct ip_fw_args *args, int code, int offset, int ip_len) 1504 { 1505 if (code != ICMP_REJECT_RST) { /* Send an ICMP unreach */ 1506 /* We need the IP header in host order for icmp_error(). */ 1507 if (args->eh != NULL) { 1508 struct ip *ip = mtod(args->m, struct ip *); 1509 1510 ip->ip_len = ntohs(ip->ip_len); 1511 ip->ip_off = ntohs(ip->ip_off); 1512 } 1513 icmp_error(args->m, ICMP_UNREACH, code, 0L, 0); 1514 } else if (offset == 0 && args->f_id.proto == IPPROTO_TCP) { 1515 struct tcphdr *const tcp = 1516 L3HDR(struct tcphdr, mtod(args->m, struct ip *)); 1517 1518 if ((tcp->th_flags & TH_RST) == 0) { 1519 send_pkt(&args->f_id, ntohl(tcp->th_seq), 1520 ntohl(tcp->th_ack), tcp->th_flags | TH_RST); 1521 } 1522 m_freem(args->m); 1523 } else { 1524 m_freem(args->m); 1525 } 1526 args->m = NULL; 1527 } 1528 1529 /** 1530 * 1531 * Given an ip_fw *, lookup_next_rule will return a pointer 1532 * to the next rule, which can be either the jump 1533 * target (for skipto instructions) or the next one in the list (in 1534 * all other cases including a missing jump target). 1535 * The result is also written in the "next_rule" field of the rule. 1536 * Backward jumps are not allowed, so start looking from the next 1537 * rule... 1538 * 1539 * This never returns NULL -- in case we do not have an exact match, 1540 * the next rule is returned. When the ruleset is changed, 1541 * pointers are flushed so we are always correct. 1542 */ 1543 1544 static struct ip_fw * 1545 lookup_next_rule(struct ip_fw *me) 1546 { 1547 struct ip_fw *rule = NULL; 1548 ipfw_insn *cmd; 1549 1550 /* look for action, in case it is a skipto */ 1551 cmd = ACTION_PTR(me); 1552 if (cmd->opcode == O_LOG) 1553 cmd += F_LEN(cmd); 1554 if (cmd->opcode == O_SKIPTO) { 1555 for (rule = me->next; rule; rule = rule->next) { 1556 if (rule->rulenum >= cmd->arg1) 1557 break; 1558 } 1559 } 1560 if (rule == NULL) /* failure or not a skipto */ 1561 rule = me->next; 1562 me->next_rule = rule; 1563 return rule; 1564 } 1565 1566 static int 1567 _ipfw_match_uid(const struct ipfw_flow_id *fid, struct ifnet *oif, 1568 enum ipfw_opcodes opcode, uid_t uid) 1569 { 1570 struct in_addr src_ip, dst_ip; 1571 struct inpcbinfo *pi; 1572 int wildcard; 1573 struct inpcb *pcb; 1574 1575 if (fid->proto == IPPROTO_TCP) { 1576 wildcard = 0; 1577 pi = &tcbinfo[mycpuid]; 1578 } else if (fid->proto == IPPROTO_UDP) { 1579 wildcard = 1; 1580 pi = &udbinfo; 1581 } else { 1582 return 0; 1583 } 1584 1585 /* 1586 * Values in 'fid' are in host byte order 1587 */ 1588 dst_ip.s_addr = htonl(fid->dst_ip); 1589 src_ip.s_addr = htonl(fid->src_ip); 1590 if (oif) { 1591 pcb = in_pcblookup_hash(pi, 1592 dst_ip, htons(fid->dst_port), 1593 src_ip, htons(fid->src_port), 1594 wildcard, oif); 1595 } else { 1596 pcb = in_pcblookup_hash(pi, 1597 src_ip, htons(fid->src_port), 1598 dst_ip, htons(fid->dst_port), 1599 wildcard, NULL); 1600 } 1601 if (pcb == NULL || pcb->inp_socket == NULL) 1602 return 0; 1603 1604 if (opcode == O_UID) { 1605 #define socheckuid(a,b) ((a)->so_cred->cr_uid != (b)) 1606 return !socheckuid(pcb->inp_socket, uid); 1607 #undef socheckuid 1608 } else { 1609 return groupmember(uid, pcb->inp_socket->so_cred); 1610 } 1611 } 1612 1613 static int 1614 ipfw_match_uid(const struct ipfw_flow_id *fid, struct ifnet *oif, 1615 enum ipfw_opcodes opcode, uid_t uid, int *deny) 1616 { 1617 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 1618 uint32_t gen; 1619 int match = 0; 1620 1621 *deny = 0; 1622 gen = ctx->ipfw_gen; 1623 1624 get_mplock(); 1625 if (gen != ctx->ipfw_gen) { 1626 /* See the comment in lookup_rule() */ 1627 *deny = 1; 1628 } else { 1629 match = _ipfw_match_uid(fid, oif, opcode, uid); 1630 } 1631 rel_mplock(); 1632 return match; 1633 } 1634 1635 /* 1636 * The main check routine for the firewall. 1637 * 1638 * All arguments are in args so we can modify them and return them 1639 * back to the caller. 1640 * 1641 * Parameters: 1642 * 1643 * args->m (in/out) The packet; we set to NULL when/if we nuke it. 1644 * Starts with the IP header. 1645 * args->eh (in) Mac header if present, or NULL for layer3 packet. 1646 * args->oif Outgoing interface, or NULL if packet is incoming. 1647 * The incoming interface is in the mbuf. (in) 1648 * 1649 * args->rule Pointer to the last matching rule (in/out) 1650 * args->f_id Addresses grabbed from the packet (out) 1651 * 1652 * Return value: 1653 * 1654 * If the packet was denied/rejected and has been dropped, *m is equal 1655 * to NULL upon return. 1656 * 1657 * IP_FW_DENY the packet must be dropped. 1658 * IP_FW_PASS The packet is to be accepted and routed normally. 1659 * IP_FW_DIVERT Divert the packet to port (args->cookie) 1660 * IP_FW_TEE Tee the packet to port (args->cookie) 1661 * IP_FW_DUMMYNET Send the packet to pipe/queue (args->cookie) 1662 */ 1663 1664 static int 1665 ipfw_chk(struct ip_fw_args *args) 1666 { 1667 /* 1668 * Local variables hold state during the processing of a packet. 1669 * 1670 * IMPORTANT NOTE: to speed up the processing of rules, there 1671 * are some assumption on the values of the variables, which 1672 * are documented here. Should you change them, please check 1673 * the implementation of the various instructions to make sure 1674 * that they still work. 1675 * 1676 * args->eh The MAC header. It is non-null for a layer2 1677 * packet, it is NULL for a layer-3 packet. 1678 * 1679 * m | args->m Pointer to the mbuf, as received from the caller. 1680 * It may change if ipfw_chk() does an m_pullup, or if it 1681 * consumes the packet because it calls send_reject(). 1682 * XXX This has to change, so that ipfw_chk() never modifies 1683 * or consumes the buffer. 1684 * ip is simply an alias of the value of m, and it is kept 1685 * in sync with it (the packet is supposed to start with 1686 * the ip header). 1687 */ 1688 struct mbuf *m = args->m; 1689 struct ip *ip = mtod(m, struct ip *); 1690 1691 /* 1692 * oif | args->oif If NULL, ipfw_chk has been called on the 1693 * inbound path (ether_input, ip_input). 1694 * If non-NULL, ipfw_chk has been called on the outbound path 1695 * (ether_output, ip_output). 1696 */ 1697 struct ifnet *oif = args->oif; 1698 1699 struct ip_fw *f = NULL; /* matching rule */ 1700 int retval = IP_FW_PASS; 1701 struct m_tag *mtag; 1702 struct divert_info *divinfo; 1703 1704 /* 1705 * hlen The length of the IPv4 header. 1706 * hlen >0 means we have an IPv4 packet. 1707 */ 1708 u_int hlen = 0; /* hlen >0 means we have an IP pkt */ 1709 1710 /* 1711 * offset The offset of a fragment. offset != 0 means that 1712 * we have a fragment at this offset of an IPv4 packet. 1713 * offset == 0 means that (if this is an IPv4 packet) 1714 * this is the first or only fragment. 1715 */ 1716 u_short offset = 0; 1717 1718 /* 1719 * Local copies of addresses. They are only valid if we have 1720 * an IP packet. 1721 * 1722 * proto The protocol. Set to 0 for non-ip packets, 1723 * or to the protocol read from the packet otherwise. 1724 * proto != 0 means that we have an IPv4 packet. 1725 * 1726 * src_port, dst_port port numbers, in HOST format. Only 1727 * valid for TCP and UDP packets. 1728 * 1729 * src_ip, dst_ip ip addresses, in NETWORK format. 1730 * Only valid for IPv4 packets. 1731 */ 1732 uint8_t proto; 1733 uint16_t src_port = 0, dst_port = 0; /* NOTE: host format */ 1734 struct in_addr src_ip, dst_ip; /* NOTE: network format */ 1735 uint16_t ip_len = 0; 1736 1737 /* 1738 * dyn_dir = MATCH_UNKNOWN when rules unchecked, 1739 * MATCH_NONE when checked and not matched (dyn_f = NULL), 1740 * MATCH_FORWARD or MATCH_REVERSE otherwise (dyn_f != NULL) 1741 */ 1742 int dyn_dir = MATCH_UNKNOWN; 1743 struct ip_fw *dyn_f = NULL; 1744 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 1745 1746 if (m->m_pkthdr.fw_flags & IPFW_MBUF_GENERATED) 1747 return IP_FW_PASS; /* accept */ 1748 1749 if (args->eh == NULL || /* layer 3 packet */ 1750 (m->m_pkthdr.len >= sizeof(struct ip) && 1751 ntohs(args->eh->ether_type) == ETHERTYPE_IP)) 1752 hlen = ip->ip_hl << 2; 1753 1754 /* 1755 * Collect parameters into local variables for faster matching. 1756 */ 1757 if (hlen == 0) { /* do not grab addresses for non-ip pkts */ 1758 proto = args->f_id.proto = 0; /* mark f_id invalid */ 1759 goto after_ip_checks; 1760 } 1761 1762 proto = args->f_id.proto = ip->ip_p; 1763 src_ip = ip->ip_src; 1764 dst_ip = ip->ip_dst; 1765 if (args->eh != NULL) { /* layer 2 packets are as on the wire */ 1766 offset = ntohs(ip->ip_off) & IP_OFFMASK; 1767 ip_len = ntohs(ip->ip_len); 1768 } else { 1769 offset = ip->ip_off & IP_OFFMASK; 1770 ip_len = ip->ip_len; 1771 } 1772 1773 #define PULLUP_TO(len) \ 1774 do { \ 1775 if (m->m_len < (len)) { \ 1776 args->m = m = m_pullup(m, (len));\ 1777 if (m == NULL) \ 1778 goto pullup_failed; \ 1779 ip = mtod(m, struct ip *); \ 1780 } \ 1781 } while (0) 1782 1783 if (offset == 0) { 1784 switch (proto) { 1785 case IPPROTO_TCP: 1786 { 1787 struct tcphdr *tcp; 1788 1789 PULLUP_TO(hlen + sizeof(struct tcphdr)); 1790 tcp = L3HDR(struct tcphdr, ip); 1791 dst_port = tcp->th_dport; 1792 src_port = tcp->th_sport; 1793 args->f_id.flags = tcp->th_flags; 1794 } 1795 break; 1796 1797 case IPPROTO_UDP: 1798 { 1799 struct udphdr *udp; 1800 1801 PULLUP_TO(hlen + sizeof(struct udphdr)); 1802 udp = L3HDR(struct udphdr, ip); 1803 dst_port = udp->uh_dport; 1804 src_port = udp->uh_sport; 1805 } 1806 break; 1807 1808 case IPPROTO_ICMP: 1809 PULLUP_TO(hlen + 4); /* type, code and checksum. */ 1810 args->f_id.flags = L3HDR(struct icmp, ip)->icmp_type; 1811 break; 1812 1813 default: 1814 break; 1815 } 1816 } 1817 1818 #undef PULLUP_TO 1819 1820 args->f_id.src_ip = ntohl(src_ip.s_addr); 1821 args->f_id.dst_ip = ntohl(dst_ip.s_addr); 1822 args->f_id.src_port = src_port = ntohs(src_port); 1823 args->f_id.dst_port = dst_port = ntohs(dst_port); 1824 1825 after_ip_checks: 1826 if (args->rule) { 1827 /* 1828 * Packet has already been tagged. Look for the next rule 1829 * to restart processing. 1830 * 1831 * If fw_one_pass != 0 then just accept it. 1832 * XXX should not happen here, but optimized out in 1833 * the caller. 1834 */ 1835 if (fw_one_pass) 1836 return IP_FW_PASS; 1837 1838 /* This rule is being/has been flushed */ 1839 if (ipfw_flushing) 1840 return IP_FW_DENY; 1841 1842 KASSERT(args->rule->cpuid == mycpuid, 1843 ("rule used on cpu%d", mycpuid)); 1844 1845 /* This rule was deleted */ 1846 if (args->rule->rule_flags & IPFW_RULE_F_INVALID) 1847 return IP_FW_DENY; 1848 1849 f = args->rule->next_rule; 1850 if (f == NULL) 1851 f = lookup_next_rule(args->rule); 1852 } else { 1853 /* 1854 * Find the starting rule. It can be either the first 1855 * one, or the one after divert_rule if asked so. 1856 */ 1857 int skipto; 1858 1859 mtag = m_tag_find(m, PACKET_TAG_IPFW_DIVERT, NULL); 1860 if (mtag != NULL) { 1861 divinfo = m_tag_data(mtag); 1862 skipto = divinfo->skipto; 1863 } else { 1864 skipto = 0; 1865 } 1866 1867 f = ctx->ipfw_layer3_chain; 1868 if (args->eh == NULL && skipto != 0) { 1869 /* No skipto during rule flushing */ 1870 if (ipfw_flushing) 1871 return IP_FW_DENY; 1872 1873 if (skipto >= IPFW_DEFAULT_RULE) 1874 return IP_FW_DENY; /* invalid */ 1875 1876 while (f && f->rulenum <= skipto) 1877 f = f->next; 1878 if (f == NULL) /* drop packet */ 1879 return IP_FW_DENY; 1880 } else if (ipfw_flushing) { 1881 /* Rules are being flushed; skip to default rule */ 1882 f = ctx->ipfw_default_rule; 1883 } 1884 } 1885 if ((mtag = m_tag_find(m, PACKET_TAG_IPFW_DIVERT, NULL)) != NULL) 1886 m_tag_delete(m, mtag); 1887 1888 /* 1889 * Now scan the rules, and parse microinstructions for each rule. 1890 */ 1891 for (; f; f = f->next) { 1892 int l, cmdlen; 1893 ipfw_insn *cmd; 1894 int skip_or; /* skip rest of OR block */ 1895 1896 again: 1897 if (ctx->ipfw_set_disable & (1 << f->set)) 1898 continue; 1899 1900 skip_or = 0; 1901 for (l = f->cmd_len, cmd = f->cmd; l > 0; 1902 l -= cmdlen, cmd += cmdlen) { 1903 int match, deny; 1904 1905 /* 1906 * check_body is a jump target used when we find a 1907 * CHECK_STATE, and need to jump to the body of 1908 * the target rule. 1909 */ 1910 1911 check_body: 1912 cmdlen = F_LEN(cmd); 1913 /* 1914 * An OR block (insn_1 || .. || insn_n) has the 1915 * F_OR bit set in all but the last instruction. 1916 * The first match will set "skip_or", and cause 1917 * the following instructions to be skipped until 1918 * past the one with the F_OR bit clear. 1919 */ 1920 if (skip_or) { /* skip this instruction */ 1921 if ((cmd->len & F_OR) == 0) 1922 skip_or = 0; /* next one is good */ 1923 continue; 1924 } 1925 match = 0; /* set to 1 if we succeed */ 1926 1927 switch (cmd->opcode) { 1928 /* 1929 * The first set of opcodes compares the packet's 1930 * fields with some pattern, setting 'match' if a 1931 * match is found. At the end of the loop there is 1932 * logic to deal with F_NOT and F_OR flags associated 1933 * with the opcode. 1934 */ 1935 case O_NOP: 1936 match = 1; 1937 break; 1938 1939 case O_FORWARD_MAC: 1940 kprintf("ipfw: opcode %d unimplemented\n", 1941 cmd->opcode); 1942 break; 1943 1944 case O_GID: 1945 case O_UID: 1946 /* 1947 * We only check offset == 0 && proto != 0, 1948 * as this ensures that we have an IPv4 1949 * packet with the ports info. 1950 */ 1951 if (offset!=0) 1952 break; 1953 1954 match = ipfw_match_uid(&args->f_id, oif, 1955 cmd->opcode, 1956 (uid_t)((ipfw_insn_u32 *)cmd)->d[0], 1957 &deny); 1958 if (deny) 1959 return IP_FW_DENY; 1960 break; 1961 1962 case O_RECV: 1963 match = iface_match(m->m_pkthdr.rcvif, 1964 (ipfw_insn_if *)cmd); 1965 break; 1966 1967 case O_XMIT: 1968 match = iface_match(oif, (ipfw_insn_if *)cmd); 1969 break; 1970 1971 case O_VIA: 1972 match = iface_match(oif ? oif : 1973 m->m_pkthdr.rcvif, (ipfw_insn_if *)cmd); 1974 break; 1975 1976 case O_MACADDR2: 1977 if (args->eh != NULL) { /* have MAC header */ 1978 uint32_t *want = (uint32_t *) 1979 ((ipfw_insn_mac *)cmd)->addr; 1980 uint32_t *mask = (uint32_t *) 1981 ((ipfw_insn_mac *)cmd)->mask; 1982 uint32_t *hdr = (uint32_t *)args->eh; 1983 1984 match = 1985 (want[0] == (hdr[0] & mask[0]) && 1986 want[1] == (hdr[1] & mask[1]) && 1987 want[2] == (hdr[2] & mask[2])); 1988 } 1989 break; 1990 1991 case O_MAC_TYPE: 1992 if (args->eh != NULL) { 1993 uint16_t t = 1994 ntohs(args->eh->ether_type); 1995 uint16_t *p = 1996 ((ipfw_insn_u16 *)cmd)->ports; 1997 int i; 1998 1999 /* Special vlan handling */ 2000 if (m->m_flags & M_VLANTAG) 2001 t = ETHERTYPE_VLAN; 2002 2003 for (i = cmdlen - 1; !match && i > 0; 2004 i--, p += 2) { 2005 match = 2006 (t >= p[0] && t <= p[1]); 2007 } 2008 } 2009 break; 2010 2011 case O_FRAG: 2012 match = (hlen > 0 && offset != 0); 2013 break; 2014 2015 case O_IN: /* "out" is "not in" */ 2016 match = (oif == NULL); 2017 break; 2018 2019 case O_LAYER2: 2020 match = (args->eh != NULL); 2021 break; 2022 2023 case O_PROTO: 2024 /* 2025 * We do not allow an arg of 0 so the 2026 * check of "proto" only suffices. 2027 */ 2028 match = (proto == cmd->arg1); 2029 break; 2030 2031 case O_IP_SRC: 2032 match = (hlen > 0 && 2033 ((ipfw_insn_ip *)cmd)->addr.s_addr == 2034 src_ip.s_addr); 2035 break; 2036 2037 case O_IP_SRC_MASK: 2038 match = (hlen > 0 && 2039 ((ipfw_insn_ip *)cmd)->addr.s_addr == 2040 (src_ip.s_addr & 2041 ((ipfw_insn_ip *)cmd)->mask.s_addr)); 2042 break; 2043 2044 case O_IP_SRC_ME: 2045 if (hlen > 0) { 2046 struct ifnet *tif; 2047 2048 tif = INADDR_TO_IFP(&src_ip); 2049 match = (tif != NULL); 2050 } 2051 break; 2052 2053 case O_IP_DST_SET: 2054 case O_IP_SRC_SET: 2055 if (hlen > 0) { 2056 uint32_t *d = (uint32_t *)(cmd + 1); 2057 uint32_t addr = 2058 cmd->opcode == O_IP_DST_SET ? 2059 args->f_id.dst_ip : 2060 args->f_id.src_ip; 2061 2062 if (addr < d[0]) 2063 break; 2064 addr -= d[0]; /* subtract base */ 2065 match = 2066 (addr < cmd->arg1) && 2067 (d[1 + (addr >> 5)] & 2068 (1 << (addr & 0x1f))); 2069 } 2070 break; 2071 2072 case O_IP_DST: 2073 match = (hlen > 0 && 2074 ((ipfw_insn_ip *)cmd)->addr.s_addr == 2075 dst_ip.s_addr); 2076 break; 2077 2078 case O_IP_DST_MASK: 2079 match = (hlen > 0) && 2080 (((ipfw_insn_ip *)cmd)->addr.s_addr == 2081 (dst_ip.s_addr & 2082 ((ipfw_insn_ip *)cmd)->mask.s_addr)); 2083 break; 2084 2085 case O_IP_DST_ME: 2086 if (hlen > 0) { 2087 struct ifnet *tif; 2088 2089 tif = INADDR_TO_IFP(&dst_ip); 2090 match = (tif != NULL); 2091 } 2092 break; 2093 2094 case O_IP_SRCPORT: 2095 case O_IP_DSTPORT: 2096 /* 2097 * offset == 0 && proto != 0 is enough 2098 * to guarantee that we have an IPv4 2099 * packet with port info. 2100 */ 2101 if ((proto==IPPROTO_UDP || proto==IPPROTO_TCP) 2102 && offset == 0) { 2103 uint16_t x = 2104 (cmd->opcode == O_IP_SRCPORT) ? 2105 src_port : dst_port ; 2106 uint16_t *p = 2107 ((ipfw_insn_u16 *)cmd)->ports; 2108 int i; 2109 2110 for (i = cmdlen - 1; !match && i > 0; 2111 i--, p += 2) { 2112 match = 2113 (x >= p[0] && x <= p[1]); 2114 } 2115 } 2116 break; 2117 2118 case O_ICMPTYPE: 2119 match = (offset == 0 && proto==IPPROTO_ICMP && 2120 icmptype_match(ip, (ipfw_insn_u32 *)cmd)); 2121 break; 2122 2123 case O_IPOPT: 2124 match = (hlen > 0 && ipopts_match(ip, cmd)); 2125 break; 2126 2127 case O_IPVER: 2128 match = (hlen > 0 && cmd->arg1 == ip->ip_v); 2129 break; 2130 2131 case O_IPTTL: 2132 match = (hlen > 0 && cmd->arg1 == ip->ip_ttl); 2133 break; 2134 2135 case O_IPID: 2136 match = (hlen > 0 && 2137 cmd->arg1 == ntohs(ip->ip_id)); 2138 break; 2139 2140 case O_IPLEN: 2141 match = (hlen > 0 && cmd->arg1 == ip_len); 2142 break; 2143 2144 case O_IPPRECEDENCE: 2145 match = (hlen > 0 && 2146 (cmd->arg1 == (ip->ip_tos & 0xe0))); 2147 break; 2148 2149 case O_IPTOS: 2150 match = (hlen > 0 && 2151 flags_match(cmd, ip->ip_tos)); 2152 break; 2153 2154 case O_TCPFLAGS: 2155 match = (proto == IPPROTO_TCP && offset == 0 && 2156 flags_match(cmd, 2157 L3HDR(struct tcphdr,ip)->th_flags)); 2158 break; 2159 2160 case O_TCPOPTS: 2161 match = (proto == IPPROTO_TCP && offset == 0 && 2162 tcpopts_match(ip, cmd)); 2163 break; 2164 2165 case O_TCPSEQ: 2166 match = (proto == IPPROTO_TCP && offset == 0 && 2167 ((ipfw_insn_u32 *)cmd)->d[0] == 2168 L3HDR(struct tcphdr,ip)->th_seq); 2169 break; 2170 2171 case O_TCPACK: 2172 match = (proto == IPPROTO_TCP && offset == 0 && 2173 ((ipfw_insn_u32 *)cmd)->d[0] == 2174 L3HDR(struct tcphdr,ip)->th_ack); 2175 break; 2176 2177 case O_TCPWIN: 2178 match = (proto == IPPROTO_TCP && offset == 0 && 2179 cmd->arg1 == 2180 L3HDR(struct tcphdr,ip)->th_win); 2181 break; 2182 2183 case O_ESTAB: 2184 /* reject packets which have SYN only */ 2185 /* XXX should i also check for TH_ACK ? */ 2186 match = (proto == IPPROTO_TCP && offset == 0 && 2187 (L3HDR(struct tcphdr,ip)->th_flags & 2188 (TH_RST | TH_ACK | TH_SYN)) != TH_SYN); 2189 break; 2190 2191 case O_LOG: 2192 if (fw_verbose) 2193 ipfw_log(f, hlen, args->eh, m, oif); 2194 match = 1; 2195 break; 2196 2197 case O_PROB: 2198 match = (krandom() < 2199 ((ipfw_insn_u32 *)cmd)->d[0]); 2200 break; 2201 2202 /* 2203 * The second set of opcodes represents 'actions', 2204 * i.e. the terminal part of a rule once the packet 2205 * matches all previous patterns. 2206 * Typically there is only one action for each rule, 2207 * and the opcode is stored at the end of the rule 2208 * (but there are exceptions -- see below). 2209 * 2210 * In general, here we set retval and terminate the 2211 * outer loop (would be a 'break 3' in some language, 2212 * but we need to do a 'goto done'). 2213 * 2214 * Exceptions: 2215 * O_COUNT and O_SKIPTO actions: 2216 * instead of terminating, we jump to the next rule 2217 * ('goto next_rule', equivalent to a 'break 2'), 2218 * or to the SKIPTO target ('goto again' after 2219 * having set f, cmd and l), respectively. 2220 * 2221 * O_LIMIT and O_KEEP_STATE: these opcodes are 2222 * not real 'actions', and are stored right 2223 * before the 'action' part of the rule. 2224 * These opcodes try to install an entry in the 2225 * state tables; if successful, we continue with 2226 * the next opcode (match=1; break;), otherwise 2227 * the packet must be dropped ('goto done' after 2228 * setting retval). If static rules are changed 2229 * during the state installation, the packet will 2230 * be dropped and rule's stats will not beupdated 2231 * ('return IP_FW_DENY'). 2232 * 2233 * O_PROBE_STATE and O_CHECK_STATE: these opcodes 2234 * cause a lookup of the state table, and a jump 2235 * to the 'action' part of the parent rule 2236 * ('goto check_body') if an entry is found, or 2237 * (CHECK_STATE only) a jump to the next rule if 2238 * the entry is not found ('goto next_rule'). 2239 * The result of the lookup is cached to make 2240 * further instances of these opcodes are 2241 * effectively NOPs. If static rules are changed 2242 * during the state looking up, the packet will 2243 * be dropped and rule's stats will not be updated 2244 * ('return IP_FW_DENY'). 2245 */ 2246 case O_LIMIT: 2247 case O_KEEP_STATE: 2248 if (!(f->rule_flags & IPFW_RULE_F_STATE)) { 2249 kprintf("%s rule (%d) is not ready " 2250 "on cpu%d\n", 2251 cmd->opcode == O_LIMIT ? 2252 "limit" : "keep state", 2253 f->rulenum, f->cpuid); 2254 goto next_rule; 2255 } 2256 if (install_state(f, 2257 (ipfw_insn_limit *)cmd, args, &deny)) { 2258 if (deny) 2259 return IP_FW_DENY; 2260 2261 retval = IP_FW_DENY; 2262 goto done; /* error/limit violation */ 2263 } 2264 if (deny) 2265 return IP_FW_DENY; 2266 match = 1; 2267 break; 2268 2269 case O_PROBE_STATE: 2270 case O_CHECK_STATE: 2271 /* 2272 * dynamic rules are checked at the first 2273 * keep-state or check-state occurrence, 2274 * with the result being stored in dyn_dir. 2275 * The compiler introduces a PROBE_STATE 2276 * instruction for us when we have a 2277 * KEEP_STATE (because PROBE_STATE needs 2278 * to be run first). 2279 */ 2280 if (dyn_dir == MATCH_UNKNOWN) { 2281 dyn_f = lookup_rule(&args->f_id, 2282 &dyn_dir, 2283 proto == IPPROTO_TCP ? 2284 L3HDR(struct tcphdr, ip) : NULL, 2285 ip_len, &deny); 2286 if (deny) 2287 return IP_FW_DENY; 2288 if (dyn_f != NULL) { 2289 /* 2290 * Found a rule from a dynamic 2291 * entry; jump to the 'action' 2292 * part of the rule. 2293 */ 2294 f = dyn_f; 2295 cmd = ACTION_PTR(f); 2296 l = f->cmd_len - f->act_ofs; 2297 goto check_body; 2298 } 2299 } 2300 /* 2301 * Dynamic entry not found. If CHECK_STATE, 2302 * skip to next rule, if PROBE_STATE just 2303 * ignore and continue with next opcode. 2304 */ 2305 if (cmd->opcode == O_CHECK_STATE) 2306 goto next_rule; 2307 else if (!(f->rule_flags & IPFW_RULE_F_STATE)) 2308 goto next_rule; /* not ready yet */ 2309 match = 1; 2310 break; 2311 2312 case O_ACCEPT: 2313 retval = IP_FW_PASS; /* accept */ 2314 goto done; 2315 2316 case O_PIPE: 2317 case O_QUEUE: 2318 args->rule = f; /* report matching rule */ 2319 args->cookie = cmd->arg1; 2320 retval = IP_FW_DUMMYNET; 2321 goto done; 2322 2323 case O_DIVERT: 2324 case O_TEE: 2325 if (args->eh) /* not on layer 2 */ 2326 break; 2327 2328 mtag = m_tag_get(PACKET_TAG_IPFW_DIVERT, 2329 sizeof(*divinfo), MB_DONTWAIT); 2330 if (mtag == NULL) { 2331 retval = IP_FW_DENY; 2332 goto done; 2333 } 2334 divinfo = m_tag_data(mtag); 2335 2336 divinfo->skipto = f->rulenum; 2337 divinfo->port = cmd->arg1; 2338 divinfo->tee = (cmd->opcode == O_TEE); 2339 m_tag_prepend(m, mtag); 2340 2341 args->cookie = cmd->arg1; 2342 retval = (cmd->opcode == O_DIVERT) ? 2343 IP_FW_DIVERT : IP_FW_TEE; 2344 goto done; 2345 2346 case O_COUNT: 2347 case O_SKIPTO: 2348 f->pcnt++; /* update stats */ 2349 f->bcnt += ip_len; 2350 f->timestamp = time_second; 2351 if (cmd->opcode == O_COUNT) 2352 goto next_rule; 2353 /* handle skipto */ 2354 if (f->next_rule == NULL) 2355 lookup_next_rule(f); 2356 f = f->next_rule; 2357 goto again; 2358 2359 case O_REJECT: 2360 /* 2361 * Drop the packet and send a reject notice 2362 * if the packet is not ICMP (or is an ICMP 2363 * query), and it is not multicast/broadcast. 2364 */ 2365 if (hlen > 0 && 2366 (proto != IPPROTO_ICMP || 2367 is_icmp_query(ip)) && 2368 !(m->m_flags & (M_BCAST|M_MCAST)) && 2369 !IN_MULTICAST(ntohl(dst_ip.s_addr))) { 2370 /* 2371 * Update statistics before the possible 2372 * blocking 'send_reject' 2373 */ 2374 f->pcnt++; 2375 f->bcnt += ip_len; 2376 f->timestamp = time_second; 2377 2378 send_reject(args, cmd->arg1, 2379 offset,ip_len); 2380 m = args->m; 2381 2382 /* 2383 * Return directly here, rule stats 2384 * have been updated above. 2385 */ 2386 return IP_FW_DENY; 2387 } 2388 /* FALLTHROUGH */ 2389 case O_DENY: 2390 retval = IP_FW_DENY; 2391 goto done; 2392 2393 case O_FORWARD_IP: 2394 if (args->eh) /* not valid on layer2 pkts */ 2395 break; 2396 if (!dyn_f || dyn_dir == MATCH_FORWARD) { 2397 struct sockaddr_in *sin; 2398 2399 mtag = m_tag_get(PACKET_TAG_IPFORWARD, 2400 sizeof(*sin), MB_DONTWAIT); 2401 if (mtag == NULL) { 2402 retval = IP_FW_DENY; 2403 goto done; 2404 } 2405 sin = m_tag_data(mtag); 2406 2407 /* Structure copy */ 2408 *sin = ((ipfw_insn_sa *)cmd)->sa; 2409 2410 m_tag_prepend(m, mtag); 2411 m->m_pkthdr.fw_flags |= 2412 IPFORWARD_MBUF_TAGGED; 2413 m->m_pkthdr.fw_flags &= 2414 ~BRIDGE_MBUF_TAGGED; 2415 } 2416 retval = IP_FW_PASS; 2417 goto done; 2418 2419 default: 2420 panic("-- unknown opcode %d", cmd->opcode); 2421 } /* end of switch() on opcodes */ 2422 2423 if (cmd->len & F_NOT) 2424 match = !match; 2425 2426 if (match) { 2427 if (cmd->len & F_OR) 2428 skip_or = 1; 2429 } else { 2430 if (!(cmd->len & F_OR)) /* not an OR block, */ 2431 break; /* try next rule */ 2432 } 2433 2434 } /* end of inner for, scan opcodes */ 2435 2436 next_rule:; /* try next rule */ 2437 2438 } /* end of outer for, scan rules */ 2439 kprintf("+++ ipfw: ouch!, skip past end of rules, denying packet\n"); 2440 return IP_FW_DENY; 2441 2442 done: 2443 /* Update statistics */ 2444 f->pcnt++; 2445 f->bcnt += ip_len; 2446 f->timestamp = time_second; 2447 return retval; 2448 2449 pullup_failed: 2450 if (fw_verbose) 2451 kprintf("pullup failed\n"); 2452 return IP_FW_DENY; 2453 } 2454 2455 static void 2456 ipfw_dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa) 2457 { 2458 struct m_tag *mtag; 2459 struct dn_pkt *pkt; 2460 ipfw_insn *cmd; 2461 const struct ipfw_flow_id *id; 2462 struct dn_flow_id *fid; 2463 2464 M_ASSERTPKTHDR(m); 2465 2466 mtag = m_tag_get(PACKET_TAG_DUMMYNET, sizeof(*pkt), MB_DONTWAIT); 2467 if (mtag == NULL) { 2468 m_freem(m); 2469 return; 2470 } 2471 m_tag_prepend(m, mtag); 2472 2473 pkt = m_tag_data(mtag); 2474 bzero(pkt, sizeof(*pkt)); 2475 2476 cmd = fwa->rule->cmd + fwa->rule->act_ofs; 2477 if (cmd->opcode == O_LOG) 2478 cmd += F_LEN(cmd); 2479 KASSERT(cmd->opcode == O_PIPE || cmd->opcode == O_QUEUE, 2480 ("Rule is not PIPE or QUEUE, opcode %d", cmd->opcode)); 2481 2482 pkt->dn_m = m; 2483 pkt->dn_flags = (dir & DN_FLAGS_DIR_MASK); 2484 pkt->ifp = fwa->oif; 2485 pkt->pipe_nr = pipe_nr; 2486 2487 pkt->cpuid = mycpuid; 2488 pkt->msgport = cur_netport(); 2489 2490 id = &fwa->f_id; 2491 fid = &pkt->id; 2492 fid->fid_dst_ip = id->dst_ip; 2493 fid->fid_src_ip = id->src_ip; 2494 fid->fid_dst_port = id->dst_port; 2495 fid->fid_src_port = id->src_port; 2496 fid->fid_proto = id->proto; 2497 fid->fid_flags = id->flags; 2498 2499 ipfw_ref_rule(fwa->rule); 2500 pkt->dn_priv = fwa->rule; 2501 pkt->dn_unref_priv = ipfw_unref_rule; 2502 2503 if (cmd->opcode == O_PIPE) 2504 pkt->dn_flags |= DN_FLAGS_IS_PIPE; 2505 2506 m->m_pkthdr.fw_flags |= DUMMYNET_MBUF_TAGGED; 2507 } 2508 2509 /* 2510 * When a rule is added/deleted, clear the next_rule pointers in all rules. 2511 * These will be reconstructed on the fly as packets are matched. 2512 * Must be called at splimp(). 2513 */ 2514 static void 2515 ipfw_flush_rule_ptrs(struct ipfw_context *ctx) 2516 { 2517 struct ip_fw *rule; 2518 2519 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) 2520 rule->next_rule = NULL; 2521 } 2522 2523 static __inline void 2524 ipfw_inc_static_count(struct ip_fw *rule) 2525 { 2526 /* Static rule's counts are updated only on CPU0 */ 2527 KKASSERT(mycpuid == 0); 2528 2529 static_count++; 2530 static_ioc_len += IOC_RULESIZE(rule); 2531 } 2532 2533 static __inline void 2534 ipfw_dec_static_count(struct ip_fw *rule) 2535 { 2536 int l = IOC_RULESIZE(rule); 2537 2538 /* Static rule's counts are updated only on CPU0 */ 2539 KKASSERT(mycpuid == 0); 2540 2541 KASSERT(static_count > 0, ("invalid static count %u", static_count)); 2542 static_count--; 2543 2544 KASSERT(static_ioc_len >= l, 2545 ("invalid static len %u", static_ioc_len)); 2546 static_ioc_len -= l; 2547 } 2548 2549 static void 2550 ipfw_link_sibling(struct netmsg_ipfw *fwmsg, struct ip_fw *rule) 2551 { 2552 if (fwmsg->sibling != NULL) { 2553 KKASSERT(mycpuid > 0 && fwmsg->sibling->cpuid == mycpuid - 1); 2554 fwmsg->sibling->sibling = rule; 2555 } 2556 fwmsg->sibling = rule; 2557 } 2558 2559 static struct ip_fw * 2560 ipfw_create_rule(const struct ipfw_ioc_rule *ioc_rule, struct ip_fw_stub *stub) 2561 { 2562 struct ip_fw *rule; 2563 2564 rule = kmalloc(RULESIZE(ioc_rule), M_IPFW, M_WAITOK | M_ZERO); 2565 2566 rule->act_ofs = ioc_rule->act_ofs; 2567 rule->cmd_len = ioc_rule->cmd_len; 2568 rule->rulenum = ioc_rule->rulenum; 2569 rule->set = ioc_rule->set; 2570 rule->usr_flags = ioc_rule->usr_flags; 2571 2572 bcopy(ioc_rule->cmd, rule->cmd, rule->cmd_len * 4 /* XXX */); 2573 2574 rule->refcnt = 1; 2575 rule->cpuid = mycpuid; 2576 2577 rule->stub = stub; 2578 if (stub != NULL) 2579 stub->rule[mycpuid] = rule; 2580 2581 return rule; 2582 } 2583 2584 static void 2585 ipfw_add_rule_dispatch(netmsg_t nmsg) 2586 { 2587 struct netmsg_ipfw *fwmsg = (struct netmsg_ipfw *)nmsg; 2588 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 2589 struct ip_fw *rule; 2590 2591 rule = ipfw_create_rule(fwmsg->ioc_rule, fwmsg->stub); 2592 2593 /* 2594 * Bump generation after ipfw_create_rule(), 2595 * since this function is blocking 2596 */ 2597 ctx->ipfw_gen++; 2598 2599 /* 2600 * Insert rule into the pre-determined position 2601 */ 2602 if (fwmsg->prev_rule != NULL) { 2603 struct ip_fw *prev, *next; 2604 2605 prev = fwmsg->prev_rule; 2606 KKASSERT(prev->cpuid == mycpuid); 2607 2608 next = fwmsg->next_rule; 2609 KKASSERT(next->cpuid == mycpuid); 2610 2611 rule->next = next; 2612 prev->next = rule; 2613 2614 /* 2615 * Move to the position on the next CPU 2616 * before the msg is forwarded. 2617 */ 2618 fwmsg->prev_rule = prev->sibling; 2619 fwmsg->next_rule = next->sibling; 2620 } else { 2621 KKASSERT(fwmsg->next_rule == NULL); 2622 rule->next = ctx->ipfw_layer3_chain; 2623 ctx->ipfw_layer3_chain = rule; 2624 } 2625 2626 /* Link rule CPU sibling */ 2627 ipfw_link_sibling(fwmsg, rule); 2628 2629 ipfw_flush_rule_ptrs(ctx); 2630 2631 if (mycpuid == 0) { 2632 /* Statistics only need to be updated once */ 2633 ipfw_inc_static_count(rule); 2634 2635 /* Return the rule on CPU0 */ 2636 nmsg->lmsg.u.ms_resultp = rule; 2637 } 2638 2639 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1); 2640 } 2641 2642 static void 2643 ipfw_enable_state_dispatch(netmsg_t nmsg) 2644 { 2645 struct lwkt_msg *lmsg = &nmsg->lmsg; 2646 struct ip_fw *rule = lmsg->u.ms_resultp; 2647 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 2648 2649 ctx->ipfw_gen++; 2650 2651 KKASSERT(rule->cpuid == mycpuid); 2652 KKASSERT(rule->stub != NULL && rule->stub->rule[mycpuid] == rule); 2653 KKASSERT(!(rule->rule_flags & IPFW_RULE_F_STATE)); 2654 rule->rule_flags |= IPFW_RULE_F_STATE; 2655 lmsg->u.ms_resultp = rule->sibling; 2656 2657 ifnet_forwardmsg(lmsg, mycpuid + 1); 2658 } 2659 2660 /* 2661 * Add a new rule to the list. Copy the rule into a malloc'ed area, 2662 * then possibly create a rule number and add the rule to the list. 2663 * Update the rule_number in the input struct so the caller knows 2664 * it as well. 2665 */ 2666 static void 2667 ipfw_add_rule(struct ipfw_ioc_rule *ioc_rule, uint32_t rule_flags) 2668 { 2669 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 2670 struct netmsg_ipfw fwmsg; 2671 struct netmsg_base *nmsg; 2672 struct ip_fw *f, *prev, *rule; 2673 struct ip_fw_stub *stub; 2674 2675 IPFW_ASSERT_CFGPORT(&curthread->td_msgport); 2676 2677 /* 2678 * If rulenum is 0, find highest numbered rule before the 2679 * default rule, and add rule number incremental step. 2680 */ 2681 if (ioc_rule->rulenum == 0) { 2682 int step = autoinc_step; 2683 2684 KKASSERT(step >= IPFW_AUTOINC_STEP_MIN && 2685 step <= IPFW_AUTOINC_STEP_MAX); 2686 2687 /* 2688 * Locate the highest numbered rule before default 2689 */ 2690 for (f = ctx->ipfw_layer3_chain; f; f = f->next) { 2691 if (f->rulenum == IPFW_DEFAULT_RULE) 2692 break; 2693 ioc_rule->rulenum = f->rulenum; 2694 } 2695 if (ioc_rule->rulenum < IPFW_DEFAULT_RULE - step) 2696 ioc_rule->rulenum += step; 2697 } 2698 KASSERT(ioc_rule->rulenum != IPFW_DEFAULT_RULE && 2699 ioc_rule->rulenum != 0, 2700 ("invalid rule num %d", ioc_rule->rulenum)); 2701 2702 /* 2703 * Now find the right place for the new rule in the sorted list. 2704 */ 2705 for (prev = NULL, f = ctx->ipfw_layer3_chain; f; 2706 prev = f, f = f->next) { 2707 if (f->rulenum > ioc_rule->rulenum) { 2708 /* Found the location */ 2709 break; 2710 } 2711 } 2712 KASSERT(f != NULL, ("no default rule?!")); 2713 2714 if (rule_flags & IPFW_RULE_F_STATE) { 2715 int size; 2716 2717 /* 2718 * If the new rule will create states, then allocate 2719 * a rule stub, which will be referenced by states 2720 * (dyn rules) 2721 */ 2722 size = sizeof(*stub) + ((ncpus - 1) * sizeof(struct ip_fw *)); 2723 stub = kmalloc(size, M_IPFW, M_WAITOK | M_ZERO); 2724 } else { 2725 stub = NULL; 2726 } 2727 2728 /* 2729 * Duplicate the rule onto each CPU. 2730 * The rule duplicated on CPU0 will be returned. 2731 */ 2732 bzero(&fwmsg, sizeof(fwmsg)); 2733 nmsg = &fwmsg.base; 2734 netmsg_init(nmsg, NULL, &curthread->td_msgport, 2735 0, ipfw_add_rule_dispatch); 2736 fwmsg.ioc_rule = ioc_rule; 2737 fwmsg.prev_rule = prev; 2738 fwmsg.next_rule = prev == NULL ? NULL : f; 2739 fwmsg.stub = stub; 2740 2741 ifnet_domsg(&nmsg->lmsg, 0); 2742 KKASSERT(fwmsg.prev_rule == NULL && fwmsg.next_rule == NULL); 2743 2744 rule = nmsg->lmsg.u.ms_resultp; 2745 KKASSERT(rule != NULL && rule->cpuid == mycpuid); 2746 2747 if (rule_flags & IPFW_RULE_F_STATE) { 2748 /* 2749 * Turn on state flag, _after_ everything on all 2750 * CPUs have been setup. 2751 */ 2752 bzero(nmsg, sizeof(*nmsg)); 2753 netmsg_init(nmsg, NULL, &curthread->td_msgport, 2754 0, ipfw_enable_state_dispatch); 2755 nmsg->lmsg.u.ms_resultp = rule; 2756 2757 ifnet_domsg(&nmsg->lmsg, 0); 2758 KKASSERT(nmsg->lmsg.u.ms_resultp == NULL); 2759 } 2760 2761 DPRINTF("++ installed rule %d, static count now %d\n", 2762 rule->rulenum, static_count); 2763 } 2764 2765 /** 2766 * Free storage associated with a static rule (including derived 2767 * dynamic rules). 2768 * The caller is in charge of clearing rule pointers to avoid 2769 * dangling pointers. 2770 * @return a pointer to the next entry. 2771 * Arguments are not checked, so they better be correct. 2772 * Must be called at splimp(). 2773 */ 2774 static struct ip_fw * 2775 ipfw_delete_rule(struct ipfw_context *ctx, 2776 struct ip_fw *prev, struct ip_fw *rule) 2777 { 2778 struct ip_fw *n; 2779 struct ip_fw_stub *stub; 2780 2781 ctx->ipfw_gen++; 2782 2783 /* STATE flag should have been cleared before we reach here */ 2784 KKASSERT((rule->rule_flags & IPFW_RULE_F_STATE) == 0); 2785 2786 stub = rule->stub; 2787 n = rule->next; 2788 if (prev == NULL) 2789 ctx->ipfw_layer3_chain = n; 2790 else 2791 prev->next = n; 2792 2793 /* Mark the rule as invalid */ 2794 rule->rule_flags |= IPFW_RULE_F_INVALID; 2795 rule->next_rule = NULL; 2796 rule->sibling = NULL; 2797 rule->stub = NULL; 2798 #ifdef foo 2799 /* Don't reset cpuid here; keep various assertion working */ 2800 rule->cpuid = -1; 2801 #endif 2802 2803 /* Statistics only need to be updated once */ 2804 if (mycpuid == 0) 2805 ipfw_dec_static_count(rule); 2806 2807 /* Free 'stub' on the last CPU */ 2808 if (stub != NULL && mycpuid == ncpus - 1) 2809 kfree(stub, M_IPFW); 2810 2811 /* Try to free this rule */ 2812 ipfw_free_rule(rule); 2813 2814 /* Return the next rule */ 2815 return n; 2816 } 2817 2818 static void 2819 ipfw_flush_dispatch(netmsg_t nmsg) 2820 { 2821 struct lwkt_msg *lmsg = &nmsg->lmsg; 2822 int kill_default = lmsg->u.ms_result; 2823 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 2824 struct ip_fw *rule; 2825 2826 ipfw_flush_rule_ptrs(ctx); /* more efficient to do outside the loop */ 2827 2828 while ((rule = ctx->ipfw_layer3_chain) != NULL && 2829 (kill_default || rule->rulenum != IPFW_DEFAULT_RULE)) 2830 ipfw_delete_rule(ctx, NULL, rule); 2831 2832 ifnet_forwardmsg(lmsg, mycpuid + 1); 2833 } 2834 2835 static void 2836 ipfw_disable_rule_state_dispatch(netmsg_t nmsg) 2837 { 2838 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg; 2839 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 2840 struct ip_fw *rule; 2841 2842 ctx->ipfw_gen++; 2843 2844 rule = dmsg->start_rule; 2845 if (rule != NULL) { 2846 KKASSERT(rule->cpuid == mycpuid); 2847 2848 /* 2849 * Move to the position on the next CPU 2850 * before the msg is forwarded. 2851 */ 2852 dmsg->start_rule = rule->sibling; 2853 } else { 2854 KKASSERT(dmsg->rulenum == 0); 2855 rule = ctx->ipfw_layer3_chain; 2856 } 2857 2858 while (rule != NULL) { 2859 if (dmsg->rulenum && rule->rulenum != dmsg->rulenum) 2860 break; 2861 rule->rule_flags &= ~IPFW_RULE_F_STATE; 2862 rule = rule->next; 2863 } 2864 2865 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1); 2866 } 2867 2868 /* 2869 * Deletes all rules from a chain (including the default rule 2870 * if the second argument is set). 2871 * Must be called at splimp(). 2872 */ 2873 static void 2874 ipfw_flush(int kill_default) 2875 { 2876 struct netmsg_del dmsg; 2877 struct netmsg_base nmsg; 2878 struct lwkt_msg *lmsg; 2879 struct ip_fw *rule; 2880 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 2881 2882 IPFW_ASSERT_CFGPORT(&curthread->td_msgport); 2883 2884 /* 2885 * If 'kill_default' then caller has done the necessary 2886 * msgport syncing; unnecessary to do it again. 2887 */ 2888 if (!kill_default) { 2889 /* 2890 * Let ipfw_chk() know the rules are going to 2891 * be flushed, so it could jump directly to 2892 * the default rule. 2893 */ 2894 ipfw_flushing = 1; 2895 netmsg_service_sync(); 2896 } 2897 2898 /* 2899 * Clear STATE flag on rules, so no more states (dyn rules) 2900 * will be created. 2901 */ 2902 bzero(&dmsg, sizeof(dmsg)); 2903 netmsg_init(&dmsg.base, NULL, &curthread->td_msgport, 2904 0, ipfw_disable_rule_state_dispatch); 2905 ifnet_domsg(&dmsg.base.lmsg, 0); 2906 2907 /* 2908 * This actually nukes all states (dyn rules) 2909 */ 2910 lockmgr(&dyn_lock, LK_EXCLUSIVE); 2911 for (rule = ctx->ipfw_layer3_chain; rule != NULL; rule = rule->next) { 2912 /* 2913 * Can't check IPFW_RULE_F_STATE here, 2914 * since it has been cleared previously. 2915 * Check 'stub' instead. 2916 */ 2917 if (rule->stub != NULL) { 2918 /* Force removal */ 2919 remove_dyn_rule_locked(rule, NULL); 2920 } 2921 } 2922 lockmgr(&dyn_lock, LK_RELEASE); 2923 2924 /* 2925 * Press the 'flush' button 2926 */ 2927 bzero(&nmsg, sizeof(nmsg)); 2928 netmsg_init(&nmsg, NULL, &curthread->td_msgport, 2929 0, ipfw_flush_dispatch); 2930 lmsg = &nmsg.lmsg; 2931 lmsg->u.ms_result = kill_default; 2932 ifnet_domsg(lmsg, 0); 2933 2934 KASSERT(dyn_count == 0, ("%u dyn rule remains", dyn_count)); 2935 2936 if (kill_default) { 2937 if (ipfw_dyn_v != NULL) { 2938 /* 2939 * Free dynamic rules(state) hash table 2940 */ 2941 kfree(ipfw_dyn_v, M_IPFW); 2942 ipfw_dyn_v = NULL; 2943 } 2944 2945 KASSERT(static_count == 0, 2946 ("%u static rules remain", static_count)); 2947 KASSERT(static_ioc_len == 0, 2948 ("%u bytes of static rules remain", static_ioc_len)); 2949 } else { 2950 KASSERT(static_count == 1, 2951 ("%u static rules remain", static_count)); 2952 KASSERT(static_ioc_len == IOC_RULESIZE(ctx->ipfw_default_rule), 2953 ("%u bytes of static rules remain, should be %lu", 2954 static_ioc_len, 2955 (u_long)IOC_RULESIZE(ctx->ipfw_default_rule))); 2956 } 2957 2958 /* Flush is done */ 2959 ipfw_flushing = 0; 2960 } 2961 2962 static void 2963 ipfw_alt_delete_rule_dispatch(netmsg_t nmsg) 2964 { 2965 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg; 2966 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 2967 struct ip_fw *rule, *prev; 2968 2969 rule = dmsg->start_rule; 2970 KKASSERT(rule->cpuid == mycpuid); 2971 dmsg->start_rule = rule->sibling; 2972 2973 prev = dmsg->prev_rule; 2974 if (prev != NULL) { 2975 KKASSERT(prev->cpuid == mycpuid); 2976 2977 /* 2978 * Move to the position on the next CPU 2979 * before the msg is forwarded. 2980 */ 2981 dmsg->prev_rule = prev->sibling; 2982 } 2983 2984 /* 2985 * flush pointers outside the loop, then delete all matching 2986 * rules. 'prev' remains the same throughout the cycle. 2987 */ 2988 ipfw_flush_rule_ptrs(ctx); 2989 while (rule && rule->rulenum == dmsg->rulenum) 2990 rule = ipfw_delete_rule(ctx, prev, rule); 2991 2992 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1); 2993 } 2994 2995 static int 2996 ipfw_alt_delete_rule(uint16_t rulenum) 2997 { 2998 struct ip_fw *prev, *rule, *f; 2999 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 3000 struct netmsg_del dmsg; 3001 struct netmsg_base *nmsg; 3002 int state; 3003 3004 /* 3005 * Locate first rule to delete 3006 */ 3007 for (prev = NULL, rule = ctx->ipfw_layer3_chain; 3008 rule && rule->rulenum < rulenum; 3009 prev = rule, rule = rule->next) 3010 ; /* EMPTY */ 3011 if (rule->rulenum != rulenum) 3012 return EINVAL; 3013 3014 /* 3015 * Check whether any rules with the given number will 3016 * create states. 3017 */ 3018 state = 0; 3019 for (f = rule; f && f->rulenum == rulenum; f = f->next) { 3020 if (f->rule_flags & IPFW_RULE_F_STATE) { 3021 state = 1; 3022 break; 3023 } 3024 } 3025 3026 if (state) { 3027 /* 3028 * Clear the STATE flag, so no more states will be 3029 * created based the rules numbered 'rulenum'. 3030 */ 3031 bzero(&dmsg, sizeof(dmsg)); 3032 nmsg = &dmsg.base; 3033 netmsg_init(nmsg, NULL, &curthread->td_msgport, 3034 0, ipfw_disable_rule_state_dispatch); 3035 dmsg.start_rule = rule; 3036 dmsg.rulenum = rulenum; 3037 3038 ifnet_domsg(&nmsg->lmsg, 0); 3039 KKASSERT(dmsg.start_rule == NULL); 3040 3041 /* 3042 * Nuke all related states 3043 */ 3044 lockmgr(&dyn_lock, LK_EXCLUSIVE); 3045 for (f = rule; f && f->rulenum == rulenum; f = f->next) { 3046 /* 3047 * Can't check IPFW_RULE_F_STATE here, 3048 * since it has been cleared previously. 3049 * Check 'stub' instead. 3050 */ 3051 if (f->stub != NULL) { 3052 /* Force removal */ 3053 remove_dyn_rule_locked(f, NULL); 3054 } 3055 } 3056 lockmgr(&dyn_lock, LK_RELEASE); 3057 } 3058 3059 /* 3060 * Get rid of the rule duplications on all CPUs 3061 */ 3062 bzero(&dmsg, sizeof(dmsg)); 3063 nmsg = &dmsg.base; 3064 netmsg_init(nmsg, NULL, &curthread->td_msgport, 3065 0, ipfw_alt_delete_rule_dispatch); 3066 dmsg.prev_rule = prev; 3067 dmsg.start_rule = rule; 3068 dmsg.rulenum = rulenum; 3069 3070 ifnet_domsg(&nmsg->lmsg, 0); 3071 KKASSERT(dmsg.prev_rule == NULL && dmsg.start_rule == NULL); 3072 return 0; 3073 } 3074 3075 static void 3076 ipfw_alt_delete_ruleset_dispatch(netmsg_t nmsg) 3077 { 3078 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg; 3079 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 3080 struct ip_fw *prev, *rule; 3081 #ifdef INVARIANTS 3082 int del = 0; 3083 #endif 3084 3085 ipfw_flush_rule_ptrs(ctx); 3086 3087 prev = NULL; 3088 rule = ctx->ipfw_layer3_chain; 3089 while (rule != NULL) { 3090 if (rule->set == dmsg->from_set) { 3091 rule = ipfw_delete_rule(ctx, prev, rule); 3092 #ifdef INVARIANTS 3093 del = 1; 3094 #endif 3095 } else { 3096 prev = rule; 3097 rule = rule->next; 3098 } 3099 } 3100 KASSERT(del, ("no match set?!")); 3101 3102 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1); 3103 } 3104 3105 static void 3106 ipfw_disable_ruleset_state_dispatch(netmsg_t nmsg) 3107 { 3108 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg; 3109 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 3110 struct ip_fw *rule; 3111 #ifdef INVARIANTS 3112 int cleared = 0; 3113 #endif 3114 3115 ctx->ipfw_gen++; 3116 3117 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) { 3118 if (rule->set == dmsg->from_set) { 3119 #ifdef INVARIANTS 3120 cleared = 1; 3121 #endif 3122 rule->rule_flags &= ~IPFW_RULE_F_STATE; 3123 } 3124 } 3125 KASSERT(cleared, ("no match set?!")); 3126 3127 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1); 3128 } 3129 3130 static int 3131 ipfw_alt_delete_ruleset(uint8_t set) 3132 { 3133 struct netmsg_del dmsg; 3134 struct netmsg_base *nmsg; 3135 int state, del; 3136 struct ip_fw *rule; 3137 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 3138 3139 /* 3140 * Check whether the 'set' exists. If it exists, 3141 * then check whether any rules within the set will 3142 * try to create states. 3143 */ 3144 state = 0; 3145 del = 0; 3146 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) { 3147 if (rule->set == set) { 3148 del = 1; 3149 if (rule->rule_flags & IPFW_RULE_F_STATE) { 3150 state = 1; 3151 break; 3152 } 3153 } 3154 } 3155 if (!del) 3156 return 0; /* XXX EINVAL? */ 3157 3158 if (state) { 3159 /* 3160 * Clear the STATE flag, so no more states will be 3161 * created based the rules in this set. 3162 */ 3163 bzero(&dmsg, sizeof(dmsg)); 3164 nmsg = &dmsg.base; 3165 netmsg_init(nmsg, NULL, &curthread->td_msgport, 3166 0, ipfw_disable_ruleset_state_dispatch); 3167 dmsg.from_set = set; 3168 3169 ifnet_domsg(&nmsg->lmsg, 0); 3170 3171 /* 3172 * Nuke all related states 3173 */ 3174 lockmgr(&dyn_lock, LK_EXCLUSIVE); 3175 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) { 3176 if (rule->set != set) 3177 continue; 3178 3179 /* 3180 * Can't check IPFW_RULE_F_STATE here, 3181 * since it has been cleared previously. 3182 * Check 'stub' instead. 3183 */ 3184 if (rule->stub != NULL) { 3185 /* Force removal */ 3186 remove_dyn_rule_locked(rule, NULL); 3187 } 3188 } 3189 lockmgr(&dyn_lock, LK_RELEASE); 3190 } 3191 3192 /* 3193 * Delete this set 3194 */ 3195 bzero(&dmsg, sizeof(dmsg)); 3196 nmsg = &dmsg.base; 3197 netmsg_init(nmsg, NULL, &curthread->td_msgport, 3198 0, ipfw_alt_delete_ruleset_dispatch); 3199 dmsg.from_set = set; 3200 3201 ifnet_domsg(&nmsg->lmsg, 0); 3202 return 0; 3203 } 3204 3205 static void 3206 ipfw_alt_move_rule_dispatch(netmsg_t nmsg) 3207 { 3208 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg; 3209 struct ip_fw *rule; 3210 3211 rule = dmsg->start_rule; 3212 KKASSERT(rule->cpuid == mycpuid); 3213 3214 /* 3215 * Move to the position on the next CPU 3216 * before the msg is forwarded. 3217 */ 3218 dmsg->start_rule = rule->sibling; 3219 3220 while (rule && rule->rulenum <= dmsg->rulenum) { 3221 if (rule->rulenum == dmsg->rulenum) 3222 rule->set = dmsg->to_set; 3223 rule = rule->next; 3224 } 3225 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1); 3226 } 3227 3228 static int 3229 ipfw_alt_move_rule(uint16_t rulenum, uint8_t set) 3230 { 3231 struct netmsg_del dmsg; 3232 struct netmsg_base *nmsg; 3233 struct ip_fw *rule; 3234 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 3235 3236 /* 3237 * Locate first rule to move 3238 */ 3239 for (rule = ctx->ipfw_layer3_chain; rule && rule->rulenum <= rulenum; 3240 rule = rule->next) { 3241 if (rule->rulenum == rulenum && rule->set != set) 3242 break; 3243 } 3244 if (rule == NULL || rule->rulenum > rulenum) 3245 return 0; /* XXX error? */ 3246 3247 bzero(&dmsg, sizeof(dmsg)); 3248 nmsg = &dmsg.base; 3249 netmsg_init(nmsg, NULL, &curthread->td_msgport, 3250 0, ipfw_alt_move_rule_dispatch); 3251 dmsg.start_rule = rule; 3252 dmsg.rulenum = rulenum; 3253 dmsg.to_set = set; 3254 3255 ifnet_domsg(&nmsg->lmsg, 0); 3256 KKASSERT(dmsg.start_rule == NULL); 3257 return 0; 3258 } 3259 3260 static void 3261 ipfw_alt_move_ruleset_dispatch(netmsg_t nmsg) 3262 { 3263 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg; 3264 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 3265 struct ip_fw *rule; 3266 3267 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) { 3268 if (rule->set == dmsg->from_set) 3269 rule->set = dmsg->to_set; 3270 } 3271 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1); 3272 } 3273 3274 static int 3275 ipfw_alt_move_ruleset(uint8_t from_set, uint8_t to_set) 3276 { 3277 struct netmsg_del dmsg; 3278 struct netmsg_base *nmsg; 3279 3280 bzero(&dmsg, sizeof(dmsg)); 3281 nmsg = &dmsg.base; 3282 netmsg_init(nmsg, NULL, &curthread->td_msgport, 3283 0, ipfw_alt_move_ruleset_dispatch); 3284 dmsg.from_set = from_set; 3285 dmsg.to_set = to_set; 3286 3287 ifnet_domsg(&nmsg->lmsg, 0); 3288 return 0; 3289 } 3290 3291 static void 3292 ipfw_alt_swap_ruleset_dispatch(netmsg_t nmsg) 3293 { 3294 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg; 3295 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 3296 struct ip_fw *rule; 3297 3298 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) { 3299 if (rule->set == dmsg->from_set) 3300 rule->set = dmsg->to_set; 3301 else if (rule->set == dmsg->to_set) 3302 rule->set = dmsg->from_set; 3303 } 3304 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1); 3305 } 3306 3307 static int 3308 ipfw_alt_swap_ruleset(uint8_t set1, uint8_t set2) 3309 { 3310 struct netmsg_del dmsg; 3311 struct netmsg_base *nmsg; 3312 3313 bzero(&dmsg, sizeof(dmsg)); 3314 nmsg = &dmsg.base; 3315 netmsg_init(nmsg, NULL, &curthread->td_msgport, 3316 0, ipfw_alt_swap_ruleset_dispatch); 3317 dmsg.from_set = set1; 3318 dmsg.to_set = set2; 3319 3320 ifnet_domsg(&nmsg->lmsg, 0); 3321 return 0; 3322 } 3323 3324 /** 3325 * Remove all rules with given number, and also do set manipulation. 3326 * 3327 * The argument is an uint32_t. The low 16 bit are the rule or set number, 3328 * the next 8 bits are the new set, the top 8 bits are the command: 3329 * 3330 * 0 delete rules with given number 3331 * 1 delete rules with given set number 3332 * 2 move rules with given number to new set 3333 * 3 move rules with given set number to new set 3334 * 4 swap sets with given numbers 3335 */ 3336 static int 3337 ipfw_ctl_alter(uint32_t arg) 3338 { 3339 uint16_t rulenum; 3340 uint8_t cmd, new_set; 3341 int error = 0; 3342 3343 rulenum = arg & 0xffff; 3344 cmd = (arg >> 24) & 0xff; 3345 new_set = (arg >> 16) & 0xff; 3346 3347 if (cmd > 4) 3348 return EINVAL; 3349 if (new_set >= IPFW_DEFAULT_SET) 3350 return EINVAL; 3351 if (cmd == 0 || cmd == 2) { 3352 if (rulenum == IPFW_DEFAULT_RULE) 3353 return EINVAL; 3354 } else { 3355 if (rulenum >= IPFW_DEFAULT_SET) 3356 return EINVAL; 3357 } 3358 3359 switch (cmd) { 3360 case 0: /* delete rules with given number */ 3361 error = ipfw_alt_delete_rule(rulenum); 3362 break; 3363 3364 case 1: /* delete all rules with given set number */ 3365 error = ipfw_alt_delete_ruleset(rulenum); 3366 break; 3367 3368 case 2: /* move rules with given number to new set */ 3369 error = ipfw_alt_move_rule(rulenum, new_set); 3370 break; 3371 3372 case 3: /* move rules with given set number to new set */ 3373 error = ipfw_alt_move_ruleset(rulenum, new_set); 3374 break; 3375 3376 case 4: /* swap two sets */ 3377 error = ipfw_alt_swap_ruleset(rulenum, new_set); 3378 break; 3379 } 3380 return error; 3381 } 3382 3383 /* 3384 * Clear counters for a specific rule. 3385 */ 3386 static void 3387 clear_counters(struct ip_fw *rule, int log_only) 3388 { 3389 ipfw_insn_log *l = (ipfw_insn_log *)ACTION_PTR(rule); 3390 3391 if (log_only == 0) { 3392 rule->bcnt = rule->pcnt = 0; 3393 rule->timestamp = 0; 3394 } 3395 if (l->o.opcode == O_LOG) 3396 l->log_left = l->max_log; 3397 } 3398 3399 static void 3400 ipfw_zero_entry_dispatch(netmsg_t nmsg) 3401 { 3402 struct netmsg_zent *zmsg = (struct netmsg_zent *)nmsg; 3403 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 3404 struct ip_fw *rule; 3405 3406 if (zmsg->rulenum == 0) { 3407 KKASSERT(zmsg->start_rule == NULL); 3408 3409 ctx->ipfw_norule_counter = 0; 3410 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) 3411 clear_counters(rule, zmsg->log_only); 3412 } else { 3413 struct ip_fw *start = zmsg->start_rule; 3414 3415 KKASSERT(start->cpuid == mycpuid); 3416 KKASSERT(start->rulenum == zmsg->rulenum); 3417 3418 /* 3419 * We can have multiple rules with the same number, so we 3420 * need to clear them all. 3421 */ 3422 for (rule = start; rule && rule->rulenum == zmsg->rulenum; 3423 rule = rule->next) 3424 clear_counters(rule, zmsg->log_only); 3425 3426 /* 3427 * Move to the position on the next CPU 3428 * before the msg is forwarded. 3429 */ 3430 zmsg->start_rule = start->sibling; 3431 } 3432 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1); 3433 } 3434 3435 /** 3436 * Reset some or all counters on firewall rules. 3437 * @arg frwl is null to clear all entries, or contains a specific 3438 * rule number. 3439 * @arg log_only is 1 if we only want to reset logs, zero otherwise. 3440 */ 3441 static int 3442 ipfw_ctl_zero_entry(int rulenum, int log_only) 3443 { 3444 struct netmsg_zent zmsg; 3445 struct netmsg_base *nmsg; 3446 const char *msg; 3447 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 3448 3449 bzero(&zmsg, sizeof(zmsg)); 3450 nmsg = &zmsg.base; 3451 netmsg_init(nmsg, NULL, &curthread->td_msgport, 3452 0, ipfw_zero_entry_dispatch); 3453 zmsg.log_only = log_only; 3454 3455 if (rulenum == 0) { 3456 msg = log_only ? "ipfw: All logging counts reset.\n" 3457 : "ipfw: Accounting cleared.\n"; 3458 } else { 3459 struct ip_fw *rule; 3460 3461 /* 3462 * Locate the first rule with 'rulenum' 3463 */ 3464 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) { 3465 if (rule->rulenum == rulenum) 3466 break; 3467 } 3468 if (rule == NULL) /* we did not find any matching rules */ 3469 return (EINVAL); 3470 zmsg.start_rule = rule; 3471 zmsg.rulenum = rulenum; 3472 3473 msg = log_only ? "ipfw: Entry %d logging count reset.\n" 3474 : "ipfw: Entry %d cleared.\n"; 3475 } 3476 ifnet_domsg(&nmsg->lmsg, 0); 3477 KKASSERT(zmsg.start_rule == NULL); 3478 3479 if (fw_verbose) 3480 log(LOG_SECURITY | LOG_NOTICE, msg, rulenum); 3481 return (0); 3482 } 3483 3484 /* 3485 * Check validity of the structure before insert. 3486 * Fortunately rules are simple, so this mostly need to check rule sizes. 3487 */ 3488 static int 3489 ipfw_check_ioc_rule(struct ipfw_ioc_rule *rule, int size, uint32_t *rule_flags) 3490 { 3491 int l, cmdlen = 0; 3492 int have_action = 0; 3493 ipfw_insn *cmd; 3494 3495 *rule_flags = 0; 3496 3497 /* Check for valid size */ 3498 if (size < sizeof(*rule)) { 3499 kprintf("ipfw: rule too short\n"); 3500 return EINVAL; 3501 } 3502 l = IOC_RULESIZE(rule); 3503 if (l != size) { 3504 kprintf("ipfw: size mismatch (have %d want %d)\n", size, l); 3505 return EINVAL; 3506 } 3507 3508 /* Check rule number */ 3509 if (rule->rulenum == IPFW_DEFAULT_RULE) { 3510 kprintf("ipfw: invalid rule number\n"); 3511 return EINVAL; 3512 } 3513 3514 /* 3515 * Now go for the individual checks. Very simple ones, basically only 3516 * instruction sizes. 3517 */ 3518 for (l = rule->cmd_len, cmd = rule->cmd; l > 0; 3519 l -= cmdlen, cmd += cmdlen) { 3520 cmdlen = F_LEN(cmd); 3521 if (cmdlen > l) { 3522 kprintf("ipfw: opcode %d size truncated\n", 3523 cmd->opcode); 3524 return EINVAL; 3525 } 3526 3527 DPRINTF("ipfw: opcode %d\n", cmd->opcode); 3528 3529 if (cmd->opcode == O_KEEP_STATE || cmd->opcode == O_LIMIT) { 3530 /* This rule will create states */ 3531 *rule_flags |= IPFW_RULE_F_STATE; 3532 } 3533 3534 switch (cmd->opcode) { 3535 case O_NOP: 3536 case O_PROBE_STATE: 3537 case O_KEEP_STATE: 3538 case O_PROTO: 3539 case O_IP_SRC_ME: 3540 case O_IP_DST_ME: 3541 case O_LAYER2: 3542 case O_IN: 3543 case O_FRAG: 3544 case O_IPOPT: 3545 case O_IPLEN: 3546 case O_IPID: 3547 case O_IPTOS: 3548 case O_IPPRECEDENCE: 3549 case O_IPTTL: 3550 case O_IPVER: 3551 case O_TCPWIN: 3552 case O_TCPFLAGS: 3553 case O_TCPOPTS: 3554 case O_ESTAB: 3555 if (cmdlen != F_INSN_SIZE(ipfw_insn)) 3556 goto bad_size; 3557 break; 3558 3559 case O_UID: 3560 case O_GID: 3561 case O_IP_SRC: 3562 case O_IP_DST: 3563 case O_TCPSEQ: 3564 case O_TCPACK: 3565 case O_PROB: 3566 case O_ICMPTYPE: 3567 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32)) 3568 goto bad_size; 3569 break; 3570 3571 case O_LIMIT: 3572 if (cmdlen != F_INSN_SIZE(ipfw_insn_limit)) 3573 goto bad_size; 3574 break; 3575 3576 case O_LOG: 3577 if (cmdlen != F_INSN_SIZE(ipfw_insn_log)) 3578 goto bad_size; 3579 3580 ((ipfw_insn_log *)cmd)->log_left = 3581 ((ipfw_insn_log *)cmd)->max_log; 3582 3583 break; 3584 3585 case O_IP_SRC_MASK: 3586 case O_IP_DST_MASK: 3587 if (cmdlen != F_INSN_SIZE(ipfw_insn_ip)) 3588 goto bad_size; 3589 if (((ipfw_insn_ip *)cmd)->mask.s_addr == 0) { 3590 kprintf("ipfw: opcode %d, useless rule\n", 3591 cmd->opcode); 3592 return EINVAL; 3593 } 3594 break; 3595 3596 case O_IP_SRC_SET: 3597 case O_IP_DST_SET: 3598 if (cmd->arg1 == 0 || cmd->arg1 > 256) { 3599 kprintf("ipfw: invalid set size %d\n", 3600 cmd->arg1); 3601 return EINVAL; 3602 } 3603 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 3604 (cmd->arg1+31)/32 ) 3605 goto bad_size; 3606 break; 3607 3608 case O_MACADDR2: 3609 if (cmdlen != F_INSN_SIZE(ipfw_insn_mac)) 3610 goto bad_size; 3611 break; 3612 3613 case O_MAC_TYPE: 3614 case O_IP_SRCPORT: 3615 case O_IP_DSTPORT: /* XXX artificial limit, 30 port pairs */ 3616 if (cmdlen < 2 || cmdlen > 31) 3617 goto bad_size; 3618 break; 3619 3620 case O_RECV: 3621 case O_XMIT: 3622 case O_VIA: 3623 if (cmdlen != F_INSN_SIZE(ipfw_insn_if)) 3624 goto bad_size; 3625 break; 3626 3627 case O_PIPE: 3628 case O_QUEUE: 3629 if (cmdlen != F_INSN_SIZE(ipfw_insn_pipe)) 3630 goto bad_size; 3631 goto check_action; 3632 3633 case O_FORWARD_IP: 3634 if (cmdlen != F_INSN_SIZE(ipfw_insn_sa)) { 3635 goto bad_size; 3636 } else { 3637 in_addr_t fwd_addr; 3638 3639 fwd_addr = ((ipfw_insn_sa *)cmd)-> 3640 sa.sin_addr.s_addr; 3641 if (IN_MULTICAST(ntohl(fwd_addr))) { 3642 kprintf("ipfw: try forwarding to " 3643 "multicast address\n"); 3644 return EINVAL; 3645 } 3646 } 3647 goto check_action; 3648 3649 case O_FORWARD_MAC: /* XXX not implemented yet */ 3650 case O_CHECK_STATE: 3651 case O_COUNT: 3652 case O_ACCEPT: 3653 case O_DENY: 3654 case O_REJECT: 3655 case O_SKIPTO: 3656 case O_DIVERT: 3657 case O_TEE: 3658 if (cmdlen != F_INSN_SIZE(ipfw_insn)) 3659 goto bad_size; 3660 check_action: 3661 if (have_action) { 3662 kprintf("ipfw: opcode %d, multiple actions" 3663 " not allowed\n", 3664 cmd->opcode); 3665 return EINVAL; 3666 } 3667 have_action = 1; 3668 if (l != cmdlen) { 3669 kprintf("ipfw: opcode %d, action must be" 3670 " last opcode\n", 3671 cmd->opcode); 3672 return EINVAL; 3673 } 3674 break; 3675 default: 3676 kprintf("ipfw: opcode %d, unknown opcode\n", 3677 cmd->opcode); 3678 return EINVAL; 3679 } 3680 } 3681 if (have_action == 0) { 3682 kprintf("ipfw: missing action\n"); 3683 return EINVAL; 3684 } 3685 return 0; 3686 3687 bad_size: 3688 kprintf("ipfw: opcode %d size %d wrong\n", 3689 cmd->opcode, cmdlen); 3690 return EINVAL; 3691 } 3692 3693 static int 3694 ipfw_ctl_add_rule(struct sockopt *sopt) 3695 { 3696 struct ipfw_ioc_rule *ioc_rule; 3697 size_t size; 3698 uint32_t rule_flags; 3699 int error; 3700 3701 size = sopt->sopt_valsize; 3702 if (size > (sizeof(uint32_t) * IPFW_RULE_SIZE_MAX) || 3703 size < sizeof(*ioc_rule)) { 3704 return EINVAL; 3705 } 3706 if (size != (sizeof(uint32_t) * IPFW_RULE_SIZE_MAX)) { 3707 sopt->sopt_val = krealloc(sopt->sopt_val, sizeof(uint32_t) * 3708 IPFW_RULE_SIZE_MAX, M_TEMP, M_WAITOK); 3709 } 3710 ioc_rule = sopt->sopt_val; 3711 3712 error = ipfw_check_ioc_rule(ioc_rule, size, &rule_flags); 3713 if (error) 3714 return error; 3715 3716 ipfw_add_rule(ioc_rule, rule_flags); 3717 3718 if (sopt->sopt_dir == SOPT_GET) 3719 sopt->sopt_valsize = IOC_RULESIZE(ioc_rule); 3720 return 0; 3721 } 3722 3723 static void * 3724 ipfw_copy_rule(const struct ip_fw *rule, struct ipfw_ioc_rule *ioc_rule) 3725 { 3726 const struct ip_fw *sibling; 3727 #ifdef INVARIANTS 3728 int i; 3729 #endif 3730 3731 KKASSERT(rule->cpuid == IPFW_CFGCPUID); 3732 3733 ioc_rule->act_ofs = rule->act_ofs; 3734 ioc_rule->cmd_len = rule->cmd_len; 3735 ioc_rule->rulenum = rule->rulenum; 3736 ioc_rule->set = rule->set; 3737 ioc_rule->usr_flags = rule->usr_flags; 3738 3739 ioc_rule->set_disable = ipfw_ctx[mycpuid]->ipfw_set_disable; 3740 ioc_rule->static_count = static_count; 3741 ioc_rule->static_len = static_ioc_len; 3742 3743 /* 3744 * Visit (read-only) all of the rule's duplications to get 3745 * the necessary statistics 3746 */ 3747 #ifdef INVARIANTS 3748 i = 0; 3749 #endif 3750 ioc_rule->pcnt = 0; 3751 ioc_rule->bcnt = 0; 3752 ioc_rule->timestamp = 0; 3753 for (sibling = rule; sibling != NULL; sibling = sibling->sibling) { 3754 ioc_rule->pcnt += sibling->pcnt; 3755 ioc_rule->bcnt += sibling->bcnt; 3756 if (sibling->timestamp > ioc_rule->timestamp) 3757 ioc_rule->timestamp = sibling->timestamp; 3758 #ifdef INVARIANTS 3759 ++i; 3760 #endif 3761 } 3762 KASSERT(i == ncpus, ("static rule is not duplicated on every cpu")); 3763 3764 bcopy(rule->cmd, ioc_rule->cmd, ioc_rule->cmd_len * 4 /* XXX */); 3765 3766 return ((uint8_t *)ioc_rule + IOC_RULESIZE(ioc_rule)); 3767 } 3768 3769 static void 3770 ipfw_copy_state(const ipfw_dyn_rule *dyn_rule, 3771 struct ipfw_ioc_state *ioc_state) 3772 { 3773 const struct ipfw_flow_id *id; 3774 struct ipfw_ioc_flowid *ioc_id; 3775 3776 ioc_state->expire = TIME_LEQ(dyn_rule->expire, time_second) ? 3777 0 : dyn_rule->expire - time_second; 3778 ioc_state->pcnt = dyn_rule->pcnt; 3779 ioc_state->bcnt = dyn_rule->bcnt; 3780 3781 ioc_state->dyn_type = dyn_rule->dyn_type; 3782 ioc_state->count = dyn_rule->count; 3783 3784 ioc_state->rulenum = dyn_rule->stub->rule[mycpuid]->rulenum; 3785 3786 id = &dyn_rule->id; 3787 ioc_id = &ioc_state->id; 3788 3789 ioc_id->type = ETHERTYPE_IP; 3790 ioc_id->u.ip.dst_ip = id->dst_ip; 3791 ioc_id->u.ip.src_ip = id->src_ip; 3792 ioc_id->u.ip.dst_port = id->dst_port; 3793 ioc_id->u.ip.src_port = id->src_port; 3794 ioc_id->u.ip.proto = id->proto; 3795 } 3796 3797 static int 3798 ipfw_ctl_get_rules(struct sockopt *sopt) 3799 { 3800 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 3801 struct ip_fw *rule; 3802 void *bp; 3803 size_t size; 3804 uint32_t dcount = 0; 3805 3806 /* 3807 * pass up a copy of the current rules. Static rules 3808 * come first (the last of which has number IPFW_DEFAULT_RULE), 3809 * followed by a possibly empty list of dynamic rule. 3810 */ 3811 3812 size = static_ioc_len; /* size of static rules */ 3813 if (ipfw_dyn_v) { /* add size of dyn.rules */ 3814 dcount = dyn_count; 3815 size += dcount * sizeof(struct ipfw_ioc_state); 3816 } 3817 3818 if (sopt->sopt_valsize < size) { 3819 /* short length, no need to return incomplete rules */ 3820 /* XXX: if superuser, no need to zero buffer */ 3821 bzero(sopt->sopt_val, sopt->sopt_valsize); 3822 return 0; 3823 } 3824 bp = sopt->sopt_val; 3825 3826 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) 3827 bp = ipfw_copy_rule(rule, bp); 3828 3829 if (ipfw_dyn_v && dcount != 0) { 3830 struct ipfw_ioc_state *ioc_state = bp; 3831 uint32_t dcount2 = 0; 3832 #ifdef INVARIANTS 3833 size_t old_size = size; 3834 #endif 3835 int i; 3836 3837 lockmgr(&dyn_lock, LK_SHARED); 3838 3839 /* Check 'ipfw_dyn_v' again with lock held */ 3840 if (ipfw_dyn_v == NULL) 3841 goto skip; 3842 3843 for (i = 0; i < curr_dyn_buckets; i++) { 3844 ipfw_dyn_rule *p; 3845 3846 /* 3847 * The # of dynamic rules may have grown after the 3848 * snapshot of 'dyn_count' was taken, so we will have 3849 * to check 'dcount' (snapshot of dyn_count) here to 3850 * make sure that we don't overflow the pre-allocated 3851 * buffer. 3852 */ 3853 for (p = ipfw_dyn_v[i]; p != NULL && dcount != 0; 3854 p = p->next, ioc_state++, dcount--, dcount2++) 3855 ipfw_copy_state(p, ioc_state); 3856 } 3857 skip: 3858 lockmgr(&dyn_lock, LK_RELEASE); 3859 3860 /* 3861 * The # of dynamic rules may be shrinked after the 3862 * snapshot of 'dyn_count' was taken. To give user a 3863 * correct dynamic rule count, we use the 'dcount2' 3864 * calculated above (with shared lockmgr lock held). 3865 */ 3866 size = static_ioc_len + 3867 (dcount2 * sizeof(struct ipfw_ioc_state)); 3868 KKASSERT(size <= old_size); 3869 } 3870 3871 sopt->sopt_valsize = size; 3872 return 0; 3873 } 3874 3875 static void 3876 ipfw_set_disable_dispatch(netmsg_t nmsg) 3877 { 3878 struct lwkt_msg *lmsg = &nmsg->lmsg; 3879 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 3880 3881 ctx->ipfw_gen++; 3882 ctx->ipfw_set_disable = lmsg->u.ms_result32; 3883 3884 ifnet_forwardmsg(lmsg, mycpuid + 1); 3885 } 3886 3887 static void 3888 ipfw_ctl_set_disable(uint32_t disable, uint32_t enable) 3889 { 3890 struct netmsg_base nmsg; 3891 struct lwkt_msg *lmsg; 3892 uint32_t set_disable; 3893 3894 /* IPFW_DEFAULT_SET is always enabled */ 3895 enable |= (1 << IPFW_DEFAULT_SET); 3896 set_disable = (ipfw_ctx[mycpuid]->ipfw_set_disable | disable) & ~enable; 3897 3898 bzero(&nmsg, sizeof(nmsg)); 3899 netmsg_init(&nmsg, NULL, &curthread->td_msgport, 3900 0, ipfw_set_disable_dispatch); 3901 lmsg = &nmsg.lmsg; 3902 lmsg->u.ms_result32 = set_disable; 3903 3904 ifnet_domsg(lmsg, 0); 3905 } 3906 3907 /** 3908 * {set|get}sockopt parser. 3909 */ 3910 static int 3911 ipfw_ctl(struct sockopt *sopt) 3912 { 3913 int error, rulenum; 3914 uint32_t *masks; 3915 size_t size; 3916 3917 error = 0; 3918 3919 switch (sopt->sopt_name) { 3920 case IP_FW_GET: 3921 error = ipfw_ctl_get_rules(sopt); 3922 break; 3923 3924 case IP_FW_FLUSH: 3925 ipfw_flush(0 /* keep default rule */); 3926 break; 3927 3928 case IP_FW_ADD: 3929 error = ipfw_ctl_add_rule(sopt); 3930 break; 3931 3932 case IP_FW_DEL: 3933 /* 3934 * IP_FW_DEL is used for deleting single rules or sets, 3935 * and (ab)used to atomically manipulate sets. 3936 * Argument size is used to distinguish between the two: 3937 * sizeof(uint32_t) 3938 * delete single rule or set of rules, 3939 * or reassign rules (or sets) to a different set. 3940 * 2 * sizeof(uint32_t) 3941 * atomic disable/enable sets. 3942 * first uint32_t contains sets to be disabled, 3943 * second uint32_t contains sets to be enabled. 3944 */ 3945 masks = sopt->sopt_val; 3946 size = sopt->sopt_valsize; 3947 if (size == sizeof(*masks)) { 3948 /* 3949 * Delete or reassign static rule 3950 */ 3951 error = ipfw_ctl_alter(masks[0]); 3952 } else if (size == (2 * sizeof(*masks))) { 3953 /* 3954 * Set enable/disable 3955 */ 3956 ipfw_ctl_set_disable(masks[0], masks[1]); 3957 } else { 3958 error = EINVAL; 3959 } 3960 break; 3961 3962 case IP_FW_ZERO: 3963 case IP_FW_RESETLOG: /* argument is an int, the rule number */ 3964 rulenum = 0; 3965 3966 if (sopt->sopt_val != 0) { 3967 error = soopt_to_kbuf(sopt, &rulenum, 3968 sizeof(int), sizeof(int)); 3969 if (error) 3970 break; 3971 } 3972 error = ipfw_ctl_zero_entry(rulenum, 3973 sopt->sopt_name == IP_FW_RESETLOG); 3974 break; 3975 3976 default: 3977 kprintf("ipfw_ctl invalid option %d\n", sopt->sopt_name); 3978 error = EINVAL; 3979 } 3980 return error; 3981 } 3982 3983 /* 3984 * This procedure is only used to handle keepalives. It is invoked 3985 * every dyn_keepalive_period 3986 */ 3987 static void 3988 ipfw_tick_dispatch(netmsg_t nmsg) 3989 { 3990 time_t keep_alive; 3991 uint32_t gen; 3992 int i; 3993 3994 IPFW_ASSERT_CFGPORT(&curthread->td_msgport); 3995 KKASSERT(IPFW_LOADED); 3996 3997 /* Reply ASAP */ 3998 crit_enter(); 3999 lwkt_replymsg(&nmsg->lmsg, 0); 4000 crit_exit(); 4001 4002 if (ipfw_dyn_v == NULL || dyn_count == 0) 4003 goto done; 4004 4005 keep_alive = time_second; 4006 4007 lockmgr(&dyn_lock, LK_EXCLUSIVE); 4008 again: 4009 if (ipfw_dyn_v == NULL || dyn_count == 0) { 4010 lockmgr(&dyn_lock, LK_RELEASE); 4011 goto done; 4012 } 4013 gen = dyn_buckets_gen; 4014 4015 for (i = 0; i < curr_dyn_buckets; i++) { 4016 ipfw_dyn_rule *q, *prev; 4017 4018 for (prev = NULL, q = ipfw_dyn_v[i]; q != NULL;) { 4019 uint32_t ack_rev, ack_fwd; 4020 struct ipfw_flow_id id; 4021 4022 if (q->dyn_type == O_LIMIT_PARENT) 4023 goto next; 4024 4025 if (TIME_LEQ(q->expire, time_second)) { 4026 /* State expired */ 4027 UNLINK_DYN_RULE(prev, ipfw_dyn_v[i], q); 4028 continue; 4029 } 4030 4031 /* 4032 * Keep alive processing 4033 */ 4034 4035 if (!dyn_keepalive) 4036 goto next; 4037 if (q->id.proto != IPPROTO_TCP) 4038 goto next; 4039 if ((q->state & BOTH_SYN) != BOTH_SYN) 4040 goto next; 4041 if (TIME_LEQ(time_second + dyn_keepalive_interval, 4042 q->expire)) 4043 goto next; /* too early */ 4044 if (q->keep_alive == keep_alive) 4045 goto next; /* alreay done */ 4046 4047 /* 4048 * Save necessary information, so that they could 4049 * survive after possible blocking in send_pkt() 4050 */ 4051 id = q->id; 4052 ack_rev = q->ack_rev; 4053 ack_fwd = q->ack_fwd; 4054 4055 /* Sending has been started */ 4056 q->keep_alive = keep_alive; 4057 4058 /* Release lock to avoid possible dead lock */ 4059 lockmgr(&dyn_lock, LK_RELEASE); 4060 send_pkt(&id, ack_rev - 1, ack_fwd, TH_SYN); 4061 send_pkt(&id, ack_fwd - 1, ack_rev, 0); 4062 lockmgr(&dyn_lock, LK_EXCLUSIVE); 4063 4064 if (gen != dyn_buckets_gen) { 4065 /* 4066 * Dyn bucket array has been changed during 4067 * the above two sending; reiterate. 4068 */ 4069 goto again; 4070 } 4071 next: 4072 prev = q; 4073 q = q->next; 4074 } 4075 } 4076 lockmgr(&dyn_lock, LK_RELEASE); 4077 done: 4078 callout_reset(&ipfw_timeout_h, dyn_keepalive_period * hz, 4079 ipfw_tick, NULL); 4080 } 4081 4082 /* 4083 * This procedure is only used to handle keepalives. It is invoked 4084 * every dyn_keepalive_period 4085 */ 4086 static void 4087 ipfw_tick(void *dummy __unused) 4088 { 4089 struct lwkt_msg *lmsg = &ipfw_timeout_netmsg.lmsg; 4090 4091 KKASSERT(mycpuid == IPFW_CFGCPUID); 4092 4093 crit_enter(); 4094 4095 KKASSERT(lmsg->ms_flags & MSGF_DONE); 4096 if (IPFW_LOADED) { 4097 lwkt_sendmsg(IPFW_CFGPORT, lmsg); 4098 /* ipfw_timeout_netmsg's handler reset this callout */ 4099 } 4100 4101 crit_exit(); 4102 } 4103 4104 static int 4105 ipfw_check_in(void *arg, struct mbuf **m0, struct ifnet *ifp, int dir) 4106 { 4107 struct ip_fw_args args; 4108 struct mbuf *m = *m0; 4109 struct m_tag *mtag; 4110 int tee = 0, error = 0, ret; 4111 4112 if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED) { 4113 /* Extract info from dummynet tag */ 4114 mtag = m_tag_find(m, PACKET_TAG_DUMMYNET, NULL); 4115 KKASSERT(mtag != NULL); 4116 args.rule = ((struct dn_pkt *)m_tag_data(mtag))->dn_priv; 4117 KKASSERT(args.rule != NULL); 4118 4119 m_tag_delete(m, mtag); 4120 m->m_pkthdr.fw_flags &= ~DUMMYNET_MBUF_TAGGED; 4121 } else { 4122 args.rule = NULL; 4123 } 4124 4125 args.eh = NULL; 4126 args.oif = NULL; 4127 args.m = m; 4128 ret = ipfw_chk(&args); 4129 m = args.m; 4130 4131 if (m == NULL) { 4132 error = EACCES; 4133 goto back; 4134 } 4135 4136 switch (ret) { 4137 case IP_FW_PASS: 4138 break; 4139 4140 case IP_FW_DENY: 4141 m_freem(m); 4142 m = NULL; 4143 error = EACCES; 4144 break; 4145 4146 case IP_FW_DUMMYNET: 4147 /* Send packet to the appropriate pipe */ 4148 ipfw_dummynet_io(m, args.cookie, DN_TO_IP_IN, &args); 4149 break; 4150 4151 case IP_FW_TEE: 4152 tee = 1; 4153 /* FALL THROUGH */ 4154 4155 case IP_FW_DIVERT: 4156 /* 4157 * Must clear bridge tag when changing 4158 */ 4159 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED; 4160 if (ip_divert_p != NULL) { 4161 m = ip_divert_p(m, tee, 1); 4162 } else { 4163 m_freem(m); 4164 m = NULL; 4165 /* not sure this is the right error msg */ 4166 error = EACCES; 4167 } 4168 break; 4169 4170 default: 4171 panic("unknown ipfw return value: %d", ret); 4172 } 4173 back: 4174 *m0 = m; 4175 return error; 4176 } 4177 4178 static int 4179 ipfw_check_out(void *arg, struct mbuf **m0, struct ifnet *ifp, int dir) 4180 { 4181 struct ip_fw_args args; 4182 struct mbuf *m = *m0; 4183 struct m_tag *mtag; 4184 int tee = 0, error = 0, ret; 4185 4186 if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED) { 4187 /* Extract info from dummynet tag */ 4188 mtag = m_tag_find(m, PACKET_TAG_DUMMYNET, NULL); 4189 KKASSERT(mtag != NULL); 4190 args.rule = ((struct dn_pkt *)m_tag_data(mtag))->dn_priv; 4191 KKASSERT(args.rule != NULL); 4192 4193 m_tag_delete(m, mtag); 4194 m->m_pkthdr.fw_flags &= ~DUMMYNET_MBUF_TAGGED; 4195 } else { 4196 args.rule = NULL; 4197 } 4198 4199 args.eh = NULL; 4200 args.m = m; 4201 args.oif = ifp; 4202 ret = ipfw_chk(&args); 4203 m = args.m; 4204 4205 if (m == NULL) { 4206 error = EACCES; 4207 goto back; 4208 } 4209 4210 switch (ret) { 4211 case IP_FW_PASS: 4212 break; 4213 4214 case IP_FW_DENY: 4215 m_freem(m); 4216 m = NULL; 4217 error = EACCES; 4218 break; 4219 4220 case IP_FW_DUMMYNET: 4221 ipfw_dummynet_io(m, args.cookie, DN_TO_IP_OUT, &args); 4222 break; 4223 4224 case IP_FW_TEE: 4225 tee = 1; 4226 /* FALL THROUGH */ 4227 4228 case IP_FW_DIVERT: 4229 if (ip_divert_p != NULL) { 4230 m = ip_divert_p(m, tee, 0); 4231 } else { 4232 m_freem(m); 4233 m = NULL; 4234 /* not sure this is the right error msg */ 4235 error = EACCES; 4236 } 4237 break; 4238 4239 default: 4240 panic("unknown ipfw return value: %d", ret); 4241 } 4242 back: 4243 *m0 = m; 4244 return error; 4245 } 4246 4247 static void 4248 ipfw_hook(void) 4249 { 4250 struct pfil_head *pfh; 4251 4252 IPFW_ASSERT_CFGPORT(&curthread->td_msgport); 4253 4254 pfh = pfil_head_get(PFIL_TYPE_AF, AF_INET); 4255 if (pfh == NULL) 4256 return; 4257 4258 pfil_add_hook(ipfw_check_in, NULL, PFIL_IN | PFIL_MPSAFE, pfh); 4259 pfil_add_hook(ipfw_check_out, NULL, PFIL_OUT | PFIL_MPSAFE, pfh); 4260 } 4261 4262 static void 4263 ipfw_dehook(void) 4264 { 4265 struct pfil_head *pfh; 4266 4267 IPFW_ASSERT_CFGPORT(&curthread->td_msgport); 4268 4269 pfh = pfil_head_get(PFIL_TYPE_AF, AF_INET); 4270 if (pfh == NULL) 4271 return; 4272 4273 pfil_remove_hook(ipfw_check_in, NULL, PFIL_IN, pfh); 4274 pfil_remove_hook(ipfw_check_out, NULL, PFIL_OUT, pfh); 4275 } 4276 4277 static void 4278 ipfw_sysctl_enable_dispatch(netmsg_t nmsg) 4279 { 4280 struct lwkt_msg *lmsg = &nmsg->lmsg; 4281 int enable = lmsg->u.ms_result; 4282 4283 if (fw_enable == enable) 4284 goto reply; 4285 4286 fw_enable = enable; 4287 if (fw_enable) 4288 ipfw_hook(); 4289 else 4290 ipfw_dehook(); 4291 reply: 4292 lwkt_replymsg(lmsg, 0); 4293 } 4294 4295 static int 4296 ipfw_sysctl_enable(SYSCTL_HANDLER_ARGS) 4297 { 4298 struct netmsg_base nmsg; 4299 struct lwkt_msg *lmsg; 4300 int enable, error; 4301 4302 enable = fw_enable; 4303 error = sysctl_handle_int(oidp, &enable, 0, req); 4304 if (error || req->newptr == NULL) 4305 return error; 4306 4307 netmsg_init(&nmsg, NULL, &curthread->td_msgport, 4308 0, ipfw_sysctl_enable_dispatch); 4309 lmsg = &nmsg.lmsg; 4310 lmsg->u.ms_result = enable; 4311 4312 return lwkt_domsg(IPFW_CFGPORT, lmsg, 0); 4313 } 4314 4315 static int 4316 ipfw_sysctl_autoinc_step(SYSCTL_HANDLER_ARGS) 4317 { 4318 return sysctl_int_range(oidp, arg1, arg2, req, 4319 IPFW_AUTOINC_STEP_MIN, IPFW_AUTOINC_STEP_MAX); 4320 } 4321 4322 static int 4323 ipfw_sysctl_dyn_buckets(SYSCTL_HANDLER_ARGS) 4324 { 4325 int error, value; 4326 4327 lockmgr(&dyn_lock, LK_EXCLUSIVE); 4328 4329 value = dyn_buckets; 4330 error = sysctl_handle_int(oidp, &value, 0, req); 4331 if (error || !req->newptr) 4332 goto back; 4333 4334 /* 4335 * Make sure we have a power of 2 and 4336 * do not allow more than 64k entries. 4337 */ 4338 error = EINVAL; 4339 if (value <= 1 || value > 65536) 4340 goto back; 4341 if ((value & (value - 1)) != 0) 4342 goto back; 4343 4344 error = 0; 4345 dyn_buckets = value; 4346 back: 4347 lockmgr(&dyn_lock, LK_RELEASE); 4348 return error; 4349 } 4350 4351 static int 4352 ipfw_sysctl_dyn_fin(SYSCTL_HANDLER_ARGS) 4353 { 4354 return sysctl_int_range(oidp, arg1, arg2, req, 4355 1, dyn_keepalive_period - 1); 4356 } 4357 4358 static int 4359 ipfw_sysctl_dyn_rst(SYSCTL_HANDLER_ARGS) 4360 { 4361 return sysctl_int_range(oidp, arg1, arg2, req, 4362 1, dyn_keepalive_period - 1); 4363 } 4364 4365 static void 4366 ipfw_ctx_init_dispatch(netmsg_t nmsg) 4367 { 4368 struct netmsg_ipfw *fwmsg = (struct netmsg_ipfw *)nmsg; 4369 struct ipfw_context *ctx; 4370 struct ip_fw *def_rule; 4371 4372 ctx = kmalloc(sizeof(*ctx), M_IPFW, M_WAITOK | M_ZERO); 4373 ipfw_ctx[mycpuid] = ctx; 4374 4375 def_rule = kmalloc(sizeof(*def_rule), M_IPFW, M_WAITOK | M_ZERO); 4376 4377 def_rule->act_ofs = 0; 4378 def_rule->rulenum = IPFW_DEFAULT_RULE; 4379 def_rule->cmd_len = 1; 4380 def_rule->set = IPFW_DEFAULT_SET; 4381 4382 def_rule->cmd[0].len = 1; 4383 #ifdef IPFIREWALL_DEFAULT_TO_ACCEPT 4384 def_rule->cmd[0].opcode = O_ACCEPT; 4385 #else 4386 def_rule->cmd[0].opcode = O_DENY; 4387 #endif 4388 4389 def_rule->refcnt = 1; 4390 def_rule->cpuid = mycpuid; 4391 4392 /* Install the default rule */ 4393 ctx->ipfw_default_rule = def_rule; 4394 ctx->ipfw_layer3_chain = def_rule; 4395 4396 /* Link rule CPU sibling */ 4397 ipfw_link_sibling(fwmsg, def_rule); 4398 4399 /* Statistics only need to be updated once */ 4400 if (mycpuid == 0) 4401 ipfw_inc_static_count(def_rule); 4402 4403 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1); 4404 } 4405 4406 static void 4407 ipfw_init_dispatch(netmsg_t nmsg) 4408 { 4409 struct netmsg_ipfw fwmsg; 4410 int error = 0; 4411 4412 if (IPFW_LOADED) { 4413 kprintf("IP firewall already loaded\n"); 4414 error = EEXIST; 4415 goto reply; 4416 } 4417 4418 bzero(&fwmsg, sizeof(fwmsg)); 4419 netmsg_init(&fwmsg.base, NULL, &curthread->td_msgport, 4420 0, ipfw_ctx_init_dispatch); 4421 ifnet_domsg(&fwmsg.base.lmsg, 0); 4422 4423 ip_fw_chk_ptr = ipfw_chk; 4424 ip_fw_ctl_ptr = ipfw_ctl; 4425 ip_fw_dn_io_ptr = ipfw_dummynet_io; 4426 4427 kprintf("ipfw2 initialized, default to %s, logging ", 4428 ipfw_ctx[mycpuid]->ipfw_default_rule->cmd[0].opcode == 4429 O_ACCEPT ? "accept" : "deny"); 4430 4431 #ifdef IPFIREWALL_VERBOSE 4432 fw_verbose = 1; 4433 #endif 4434 #ifdef IPFIREWALL_VERBOSE_LIMIT 4435 verbose_limit = IPFIREWALL_VERBOSE_LIMIT; 4436 #endif 4437 if (fw_verbose == 0) { 4438 kprintf("disabled\n"); 4439 } else if (verbose_limit == 0) { 4440 kprintf("unlimited\n"); 4441 } else { 4442 kprintf("limited to %d packets/entry by default\n", 4443 verbose_limit); 4444 } 4445 4446 callout_init_mp(&ipfw_timeout_h); 4447 netmsg_init(&ipfw_timeout_netmsg, NULL, &netisr_adone_rport, 4448 MSGF_DROPABLE | MSGF_PRIORITY, 4449 ipfw_tick_dispatch); 4450 lockinit(&dyn_lock, "ipfw_dyn", 0, 0); 4451 4452 ip_fw_loaded = 1; 4453 callout_reset(&ipfw_timeout_h, hz, ipfw_tick, NULL); 4454 4455 if (fw_enable) 4456 ipfw_hook(); 4457 reply: 4458 lwkt_replymsg(&nmsg->lmsg, error); 4459 } 4460 4461 static int 4462 ipfw_init(void) 4463 { 4464 struct netmsg_base smsg; 4465 4466 netmsg_init(&smsg, NULL, &curthread->td_msgport, 4467 0, ipfw_init_dispatch); 4468 return lwkt_domsg(IPFW_CFGPORT, &smsg.lmsg, 0); 4469 } 4470 4471 #ifdef KLD_MODULE 4472 4473 static void 4474 ipfw_fini_dispatch(netmsg_t nmsg) 4475 { 4476 int error = 0, cpu; 4477 4478 if (ipfw_refcnt != 0) { 4479 error = EBUSY; 4480 goto reply; 4481 } 4482 4483 ip_fw_loaded = 0; 4484 4485 ipfw_dehook(); 4486 callout_stop(&ipfw_timeout_h); 4487 4488 netmsg_service_sync(); 4489 4490 crit_enter(); 4491 if ((ipfw_timeout_netmsg.lmsg.ms_flags & MSGF_DONE) == 0) { 4492 /* 4493 * Callout message is pending; drop it 4494 */ 4495 lwkt_dropmsg(&ipfw_timeout_netmsg.lmsg); 4496 } 4497 crit_exit(); 4498 4499 ip_fw_chk_ptr = NULL; 4500 ip_fw_ctl_ptr = NULL; 4501 ip_fw_dn_io_ptr = NULL; 4502 ipfw_flush(1 /* kill default rule */); 4503 4504 /* Free pre-cpu context */ 4505 for (cpu = 0; cpu < ncpus; ++cpu) 4506 kfree(ipfw_ctx[cpu], M_IPFW); 4507 4508 kprintf("IP firewall unloaded\n"); 4509 reply: 4510 lwkt_replymsg(&nmsg->lmsg, error); 4511 } 4512 4513 static int 4514 ipfw_fini(void) 4515 { 4516 struct netmsg_base smsg; 4517 4518 netmsg_init(&smsg, NULL, &curthread->td_msgport, 4519 0, ipfw_fini_dispatch); 4520 return lwkt_domsg(IPFW_CFGPORT, &smsg.lmsg, 0); 4521 } 4522 4523 #endif /* KLD_MODULE */ 4524 4525 static int 4526 ipfw_modevent(module_t mod, int type, void *unused) 4527 { 4528 int err = 0; 4529 4530 switch (type) { 4531 case MOD_LOAD: 4532 err = ipfw_init(); 4533 break; 4534 4535 case MOD_UNLOAD: 4536 #ifndef KLD_MODULE 4537 kprintf("ipfw statically compiled, cannot unload\n"); 4538 err = EBUSY; 4539 #else 4540 err = ipfw_fini(); 4541 #endif 4542 break; 4543 default: 4544 break; 4545 } 4546 return err; 4547 } 4548 4549 static moduledata_t ipfwmod = { 4550 "ipfw", 4551 ipfw_modevent, 4552 0 4553 }; 4554 DECLARE_MODULE(ipfw, ipfwmod, SI_SUB_PROTO_END, SI_ORDER_ANY); 4555 MODULE_VERSION(ipfw, 1); 4556