1 /* 2 * Copyright (c) 2002 Luigi Rizzo, Universita` di Pisa 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: src/sys/netinet/ip_fw2.c,v 1.6.2.12 2003/04/08 10:42:32 maxim Exp $ 26 * $DragonFly: src/sys/net/ipfw/ip_fw2.c,v 1.100 2008/11/22 11:03:35 sephe Exp $ 27 */ 28 29 /* 30 * Implement IP packet firewall (new version) 31 */ 32 33 #include "opt_ipfw.h" 34 #include "opt_inet.h" 35 #ifndef INET 36 #error IPFIREWALL requires INET. 37 #endif /* INET */ 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/malloc.h> 42 #include <sys/mbuf.h> 43 #include <sys/kernel.h> 44 #include <sys/proc.h> 45 #include <sys/socket.h> 46 #include <sys/socketvar.h> 47 #include <sys/sysctl.h> 48 #include <sys/syslog.h> 49 #include <sys/ucred.h> 50 #include <sys/in_cksum.h> 51 #include <sys/lock.h> 52 53 #include <net/if.h> 54 #include <net/route.h> 55 #include <net/pfil.h> 56 #include <net/dummynet/ip_dummynet.h> 57 58 #include <sys/thread2.h> 59 #include <sys/mplock2.h> 60 #include <net/netmsg2.h> 61 62 #include <netinet/in.h> 63 #include <netinet/in_systm.h> 64 #include <netinet/in_var.h> 65 #include <netinet/in_pcb.h> 66 #include <netinet/ip.h> 67 #include <netinet/ip_var.h> 68 #include <netinet/ip_icmp.h> 69 #include <netinet/tcp.h> 70 #include <netinet/tcp_timer.h> 71 #include <netinet/tcp_var.h> 72 #include <netinet/tcpip.h> 73 #include <netinet/udp.h> 74 #include <netinet/udp_var.h> 75 #include <netinet/ip_divert.h> 76 #include <netinet/if_ether.h> /* XXX for ETHERTYPE_IP */ 77 78 #include <net/ipfw/ip_fw2.h> 79 80 #ifdef IPFIREWALL_DEBUG 81 #define DPRINTF(fmt, ...) \ 82 do { \ 83 if (fw_debug > 0) \ 84 kprintf(fmt, __VA_ARGS__); \ 85 } while (0) 86 #else 87 #define DPRINTF(fmt, ...) ((void)0) 88 #endif 89 90 /* 91 * Description about per-CPU rule duplication: 92 * 93 * Module loading/unloading and all ioctl operations are serialized 94 * by netisr0, so we don't have any ordering or locking problems. 95 * 96 * Following graph shows how operation on per-CPU rule list is 97 * performed [2 CPU case]: 98 * 99 * CPU0 CPU1 100 * 101 * netisr0 <------------------------------------+ 102 * domsg | 103 * | | 104 * | netmsg | 105 * | | 106 * V | 107 * ifnet0 | 108 * : | netmsg 109 * :(delete/add...) | 110 * : | 111 * : netmsg | 112 * forwardmsg---------->ifnet1 | 113 * : | 114 * :(delete/add...) | 115 * : | 116 * : | 117 * replymsg--------------+ 118 * 119 * 120 * 121 * 122 * Rules which will not create states (dyn rules) [2 CPU case] 123 * 124 * CPU0 CPU1 125 * layer3_chain layer3_chain 126 * | | 127 * V V 128 * +-------+ sibling +-------+ sibling 129 * | rule1 |--------->| rule1 |--------->NULL 130 * +-------+ +-------+ 131 * | | 132 * |next |next 133 * V V 134 * +-------+ sibling +-------+ sibling 135 * | rule2 |--------->| rule2 |--------->NULL 136 * +-------+ +-------+ 137 * 138 * ip_fw.sibling: 139 * 1) Ease statistics calculation during IP_FW_GET. We only need to 140 * iterate layer3_chain on CPU0; the current rule's duplication on 141 * the other CPUs could safely be read-only accessed by using 142 * ip_fw.sibling 143 * 2) Accelerate rule insertion and deletion, e.g. rule insertion: 144 * a) In netisr0 (on CPU0) rule3 is determined to be inserted between 145 * rule1 and rule2. To make this decision we need to iterate the 146 * layer3_chain on CPU0. The netmsg, which is used to insert the 147 * rule, will contain rule1 on CPU0 as prev_rule and rule2 on CPU0 148 * as next_rule 149 * b) After the insertion on CPU0 is done, we will move on to CPU1. 150 * But instead of relocating the rule3's position on CPU1 by 151 * iterating the layer3_chain on CPU1, we set the netmsg's prev_rule 152 * to rule1->sibling and next_rule to rule2->sibling before the 153 * netmsg is forwarded to CPU1 from CPU0 154 * 155 * 156 * 157 * Rules which will create states (dyn rules) [2 CPU case] 158 * (unnecessary parts are omitted; they are same as in the previous figure) 159 * 160 * CPU0 CPU1 161 * 162 * +-------+ +-------+ 163 * | rule1 | | rule1 | 164 * +-------+ +-------+ 165 * ^ | | ^ 166 * | |stub stub| | 167 * | | | | 168 * | +----+ +----+ | 169 * | | | | 170 * | V V | 171 * | +--------------------+ | 172 * | | rule_stub | | 173 * | | (read-only shared) | | 174 * | | | | 175 * | | back pointer array | | 176 * | | (indexed by cpuid) | | 177 * | | | | 178 * +----|---------[0] | | 179 * | [1]--------|----+ 180 * | | 181 * +--------------------+ 182 * ^ ^ 183 * | | 184 * ........|............|............ 185 * : | | : 186 * : |stub |stub : 187 * : | | : 188 * : +---------+ +---------+ : 189 * : | state1a | | state1b | .... : 190 * : +---------+ +---------+ : 191 * : : 192 * : states table : 193 * : (shared) : 194 * : (protected by dyn_lock) : 195 * .................................. 196 * 197 * [state1a and state1b are states created by rule1] 198 * 199 * ip_fw_stub: 200 * This structure is introduced so that shared (locked) state table could 201 * work with per-CPU (duplicated) static rules. It mainly bridges states 202 * and static rules and serves as static rule's place holder (a read-only 203 * shared part of duplicated rules) from states point of view. 204 * 205 * IPFW_RULE_F_STATE (only for rules which create states): 206 * o During rule installation, this flag is turned on after rule's 207 * duplications reach all CPUs, to avoid at least following race: 208 * 1) rule1 is duplicated on CPU0 and is not duplicated on CPU1 yet 209 * 2) rule1 creates state1 210 * 3) state1 is located on CPU1 by check-state 211 * But rule1 is not duplicated on CPU1 yet 212 * o During rule deletion, this flag is turned off before deleting states 213 * created by the rule and before deleting the rule itself, so no 214 * more states will be created by the to-be-deleted rule even when its 215 * duplication on certain CPUs are not eliminated yet. 216 */ 217 218 #define IPFW_AUTOINC_STEP_MIN 1 219 #define IPFW_AUTOINC_STEP_MAX 1000 220 #define IPFW_AUTOINC_STEP_DEF 100 221 222 #define IPFW_DEFAULT_RULE 65535 /* rulenum for the default rule */ 223 #define IPFW_DEFAULT_SET 31 /* set number for the default rule */ 224 225 struct netmsg_ipfw { 226 struct netmsg nmsg; 227 const struct ipfw_ioc_rule *ioc_rule; 228 struct ip_fw *next_rule; 229 struct ip_fw *prev_rule; 230 struct ip_fw *sibling; 231 struct ip_fw_stub *stub; 232 }; 233 234 struct netmsg_del { 235 struct netmsg nmsg; 236 struct ip_fw *start_rule; 237 struct ip_fw *prev_rule; 238 uint16_t rulenum; 239 uint8_t from_set; 240 uint8_t to_set; 241 }; 242 243 struct netmsg_zent { 244 struct netmsg nmsg; 245 struct ip_fw *start_rule; 246 uint16_t rulenum; 247 uint16_t log_only; 248 }; 249 250 struct ipfw_context { 251 struct ip_fw *ipfw_layer3_chain; /* list of rules for layer3 */ 252 struct ip_fw *ipfw_default_rule; /* default rule */ 253 uint64_t ipfw_norule_counter; /* counter for ipfw_log(NULL) */ 254 255 /* 256 * ipfw_set_disable contains one bit per set value (0..31). 257 * If the bit is set, all rules with the corresponding set 258 * are disabled. Set IPDW_DEFAULT_SET is reserved for the 259 * default rule and CANNOT be disabled. 260 */ 261 uint32_t ipfw_set_disable; 262 uint32_t ipfw_gen; /* generation of rule list */ 263 }; 264 265 static struct ipfw_context *ipfw_ctx[MAXCPU]; 266 267 #ifdef KLD_MODULE 268 /* 269 * Module can not be unloaded, if there are references to 270 * certains rules of ipfw(4), e.g. dummynet(4) 271 */ 272 static int ipfw_refcnt; 273 #endif 274 275 MALLOC_DEFINE(M_IPFW, "IpFw/IpAcct", "IpFw/IpAcct chain's"); 276 277 /* 278 * Following two global variables are accessed and 279 * updated only on CPU0 280 */ 281 static uint32_t static_count; /* # of static rules */ 282 static uint32_t static_ioc_len; /* bytes of static rules */ 283 284 /* 285 * If 1, then ipfw static rules are being flushed, 286 * ipfw_chk() will skip to the default rule. 287 */ 288 static int ipfw_flushing; 289 290 static int fw_verbose; 291 static int verbose_limit; 292 293 static int fw_debug; 294 static int autoinc_step = IPFW_AUTOINC_STEP_DEF; 295 296 static int ipfw_sysctl_enable(SYSCTL_HANDLER_ARGS); 297 static int ipfw_sysctl_autoinc_step(SYSCTL_HANDLER_ARGS); 298 static int ipfw_sysctl_dyn_buckets(SYSCTL_HANDLER_ARGS); 299 static int ipfw_sysctl_dyn_fin(SYSCTL_HANDLER_ARGS); 300 static int ipfw_sysctl_dyn_rst(SYSCTL_HANDLER_ARGS); 301 302 SYSCTL_NODE(_net_inet_ip, OID_AUTO, fw, CTLFLAG_RW, 0, "Firewall"); 303 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW, 304 &fw_enable, 0, ipfw_sysctl_enable, "I", "Enable ipfw"); 305 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, autoinc_step, CTLTYPE_INT | CTLFLAG_RW, 306 &autoinc_step, 0, ipfw_sysctl_autoinc_step, "I", 307 "Rule number autincrement step"); 308 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO,one_pass,CTLFLAG_RW, 309 &fw_one_pass, 0, 310 "Only do a single pass through ipfw when using dummynet(4)"); 311 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, debug, CTLFLAG_RW, 312 &fw_debug, 0, "Enable printing of debug ip_fw statements"); 313 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, verbose, CTLFLAG_RW, 314 &fw_verbose, 0, "Log matches to ipfw rules"); 315 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, verbose_limit, CTLFLAG_RW, 316 &verbose_limit, 0, "Set upper limit of matches of ipfw rules logged"); 317 318 /* 319 * Description of dynamic rules. 320 * 321 * Dynamic rules are stored in lists accessed through a hash table 322 * (ipfw_dyn_v) whose size is curr_dyn_buckets. This value can 323 * be modified through the sysctl variable dyn_buckets which is 324 * updated when the table becomes empty. 325 * 326 * XXX currently there is only one list, ipfw_dyn. 327 * 328 * When a packet is received, its address fields are first masked 329 * with the mask defined for the rule, then hashed, then matched 330 * against the entries in the corresponding list. 331 * Dynamic rules can be used for different purposes: 332 * + stateful rules; 333 * + enforcing limits on the number of sessions; 334 * + in-kernel NAT (not implemented yet) 335 * 336 * The lifetime of dynamic rules is regulated by dyn_*_lifetime, 337 * measured in seconds and depending on the flags. 338 * 339 * The total number of dynamic rules is stored in dyn_count. 340 * The max number of dynamic rules is dyn_max. When we reach 341 * the maximum number of rules we do not create anymore. This is 342 * done to avoid consuming too much memory, but also too much 343 * time when searching on each packet (ideally, we should try instead 344 * to put a limit on the length of the list on each bucket...). 345 * 346 * Each dynamic rule holds a pointer to the parent ipfw rule so 347 * we know what action to perform. Dynamic rules are removed when 348 * the parent rule is deleted. XXX we should make them survive. 349 * 350 * There are some limitations with dynamic rules -- we do not 351 * obey the 'randomized match', and we do not do multiple 352 * passes through the firewall. XXX check the latter!!! 353 * 354 * NOTE about the SHARED LOCKMGR LOCK during dynamic rule looking up: 355 * Only TCP state transition will change dynamic rule's state and ack 356 * sequences, while all packets of one TCP connection only goes through 357 * one TCP thread, so it is safe to use shared lockmgr lock during dynamic 358 * rule looking up. The keep alive callout uses exclusive lockmgr lock 359 * when it tries to find suitable dynamic rules to send keep alive, so 360 * it will not see half updated state and ack sequences. Though the expire 361 * field updating looks racy for other protocols, the resolution (second) 362 * of expire field makes this kind of race harmless. 363 * XXX statistics' updating is _not_ MPsafe!!! 364 * XXX once UDP output path is fixed, we could use lockless dynamic rule 365 * hash table 366 */ 367 static ipfw_dyn_rule **ipfw_dyn_v = NULL; 368 static uint32_t dyn_buckets = 256; /* must be power of 2 */ 369 static uint32_t curr_dyn_buckets = 256; /* must be power of 2 */ 370 static uint32_t dyn_buckets_gen; /* generation of dyn buckets array */ 371 static struct lock dyn_lock; /* dynamic rules' hash table lock */ 372 373 static struct netmsg ipfw_timeout_netmsg; /* schedule ipfw timeout */ 374 static struct callout ipfw_timeout_h; 375 376 /* 377 * Timeouts for various events in handing dynamic rules. 378 */ 379 static uint32_t dyn_ack_lifetime = 300; 380 static uint32_t dyn_syn_lifetime = 20; 381 static uint32_t dyn_fin_lifetime = 1; 382 static uint32_t dyn_rst_lifetime = 1; 383 static uint32_t dyn_udp_lifetime = 10; 384 static uint32_t dyn_short_lifetime = 5; 385 386 /* 387 * Keepalives are sent if dyn_keepalive is set. They are sent every 388 * dyn_keepalive_period seconds, in the last dyn_keepalive_interval 389 * seconds of lifetime of a rule. 390 * dyn_rst_lifetime and dyn_fin_lifetime should be strictly lower 391 * than dyn_keepalive_period. 392 */ 393 394 static uint32_t dyn_keepalive_interval = 20; 395 static uint32_t dyn_keepalive_period = 5; 396 static uint32_t dyn_keepalive = 1; /* do send keepalives */ 397 398 static uint32_t dyn_count; /* # of dynamic rules */ 399 static uint32_t dyn_max = 4096; /* max # of dynamic rules */ 400 401 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_buckets, CTLTYPE_INT | CTLFLAG_RW, 402 &dyn_buckets, 0, ipfw_sysctl_dyn_buckets, "I", "Number of dyn. buckets"); 403 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, curr_dyn_buckets, CTLFLAG_RD, 404 &curr_dyn_buckets, 0, "Current Number of dyn. buckets"); 405 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_count, CTLFLAG_RD, 406 &dyn_count, 0, "Number of dyn. rules"); 407 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_max, CTLFLAG_RW, 408 &dyn_max, 0, "Max number of dyn. rules"); 409 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, static_count, CTLFLAG_RD, 410 &static_count, 0, "Number of static rules"); 411 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_ack_lifetime, CTLFLAG_RW, 412 &dyn_ack_lifetime, 0, "Lifetime of dyn. rules for acks"); 413 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_syn_lifetime, CTLFLAG_RW, 414 &dyn_syn_lifetime, 0, "Lifetime of dyn. rules for syn"); 415 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_fin_lifetime, 416 CTLTYPE_INT | CTLFLAG_RW, &dyn_fin_lifetime, 0, ipfw_sysctl_dyn_fin, "I", 417 "Lifetime of dyn. rules for fin"); 418 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_rst_lifetime, 419 CTLTYPE_INT | CTLFLAG_RW, &dyn_rst_lifetime, 0, ipfw_sysctl_dyn_rst, "I", 420 "Lifetime of dyn. rules for rst"); 421 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_udp_lifetime, CTLFLAG_RW, 422 &dyn_udp_lifetime, 0, "Lifetime of dyn. rules for UDP"); 423 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_short_lifetime, CTLFLAG_RW, 424 &dyn_short_lifetime, 0, "Lifetime of dyn. rules for other situations"); 425 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_keepalive, CTLFLAG_RW, 426 &dyn_keepalive, 0, "Enable keepalives for dyn. rules"); 427 428 static ip_fw_chk_t ipfw_chk; 429 static void ipfw_tick(void *); 430 431 static __inline int 432 ipfw_free_rule(struct ip_fw *rule) 433 { 434 KASSERT(rule->cpuid == mycpuid, ("rule freed on cpu%d\n", mycpuid)); 435 KASSERT(rule->refcnt > 0, ("invalid refcnt %u\n", rule->refcnt)); 436 rule->refcnt--; 437 if (rule->refcnt == 0) { 438 kfree(rule, M_IPFW); 439 return 1; 440 } 441 return 0; 442 } 443 444 static void 445 ipfw_unref_rule(void *priv) 446 { 447 ipfw_free_rule(priv); 448 #ifdef KLD_MODULE 449 atomic_subtract_int(&ipfw_refcnt, 1); 450 #endif 451 } 452 453 static __inline void 454 ipfw_ref_rule(struct ip_fw *rule) 455 { 456 KASSERT(rule->cpuid == mycpuid, ("rule used on cpu%d\n", mycpuid)); 457 #ifdef KLD_MODULE 458 atomic_add_int(&ipfw_refcnt, 1); 459 #endif 460 rule->refcnt++; 461 } 462 463 /* 464 * This macro maps an ip pointer into a layer3 header pointer of type T 465 */ 466 #define L3HDR(T, ip) ((T *)((uint32_t *)(ip) + (ip)->ip_hl)) 467 468 static __inline int 469 icmptype_match(struct ip *ip, ipfw_insn_u32 *cmd) 470 { 471 int type = L3HDR(struct icmp,ip)->icmp_type; 472 473 return (type <= ICMP_MAXTYPE && (cmd->d[0] & (1 << type))); 474 } 475 476 #define TT ((1 << ICMP_ECHO) | \ 477 (1 << ICMP_ROUTERSOLICIT) | \ 478 (1 << ICMP_TSTAMP) | \ 479 (1 << ICMP_IREQ) | \ 480 (1 << ICMP_MASKREQ)) 481 482 static int 483 is_icmp_query(struct ip *ip) 484 { 485 int type = L3HDR(struct icmp, ip)->icmp_type; 486 487 return (type <= ICMP_MAXTYPE && (TT & (1 << type))); 488 } 489 490 #undef TT 491 492 /* 493 * The following checks use two arrays of 8 or 16 bits to store the 494 * bits that we want set or clear, respectively. They are in the 495 * low and high half of cmd->arg1 or cmd->d[0]. 496 * 497 * We scan options and store the bits we find set. We succeed if 498 * 499 * (want_set & ~bits) == 0 && (want_clear & ~bits) == want_clear 500 * 501 * The code is sometimes optimized not to store additional variables. 502 */ 503 504 static int 505 flags_match(ipfw_insn *cmd, uint8_t bits) 506 { 507 u_char want_clear; 508 bits = ~bits; 509 510 if (((cmd->arg1 & 0xff) & bits) != 0) 511 return 0; /* some bits we want set were clear */ 512 513 want_clear = (cmd->arg1 >> 8) & 0xff; 514 if ((want_clear & bits) != want_clear) 515 return 0; /* some bits we want clear were set */ 516 return 1; 517 } 518 519 static int 520 ipopts_match(struct ip *ip, ipfw_insn *cmd) 521 { 522 int optlen, bits = 0; 523 u_char *cp = (u_char *)(ip + 1); 524 int x = (ip->ip_hl << 2) - sizeof(struct ip); 525 526 for (; x > 0; x -= optlen, cp += optlen) { 527 int opt = cp[IPOPT_OPTVAL]; 528 529 if (opt == IPOPT_EOL) 530 break; 531 532 if (opt == IPOPT_NOP) { 533 optlen = 1; 534 } else { 535 optlen = cp[IPOPT_OLEN]; 536 if (optlen <= 0 || optlen > x) 537 return 0; /* invalid or truncated */ 538 } 539 540 switch (opt) { 541 case IPOPT_LSRR: 542 bits |= IP_FW_IPOPT_LSRR; 543 break; 544 545 case IPOPT_SSRR: 546 bits |= IP_FW_IPOPT_SSRR; 547 break; 548 549 case IPOPT_RR: 550 bits |= IP_FW_IPOPT_RR; 551 break; 552 553 case IPOPT_TS: 554 bits |= IP_FW_IPOPT_TS; 555 break; 556 557 default: 558 break; 559 } 560 } 561 return (flags_match(cmd, bits)); 562 } 563 564 static int 565 tcpopts_match(struct ip *ip, ipfw_insn *cmd) 566 { 567 int optlen, bits = 0; 568 struct tcphdr *tcp = L3HDR(struct tcphdr,ip); 569 u_char *cp = (u_char *)(tcp + 1); 570 int x = (tcp->th_off << 2) - sizeof(struct tcphdr); 571 572 for (; x > 0; x -= optlen, cp += optlen) { 573 int opt = cp[0]; 574 575 if (opt == TCPOPT_EOL) 576 break; 577 578 if (opt == TCPOPT_NOP) { 579 optlen = 1; 580 } else { 581 optlen = cp[1]; 582 if (optlen <= 0) 583 break; 584 } 585 586 switch (opt) { 587 case TCPOPT_MAXSEG: 588 bits |= IP_FW_TCPOPT_MSS; 589 break; 590 591 case TCPOPT_WINDOW: 592 bits |= IP_FW_TCPOPT_WINDOW; 593 break; 594 595 case TCPOPT_SACK_PERMITTED: 596 case TCPOPT_SACK: 597 bits |= IP_FW_TCPOPT_SACK; 598 break; 599 600 case TCPOPT_TIMESTAMP: 601 bits |= IP_FW_TCPOPT_TS; 602 break; 603 604 case TCPOPT_CC: 605 case TCPOPT_CCNEW: 606 case TCPOPT_CCECHO: 607 bits |= IP_FW_TCPOPT_CC; 608 break; 609 610 default: 611 break; 612 } 613 } 614 return (flags_match(cmd, bits)); 615 } 616 617 static int 618 iface_match(struct ifnet *ifp, ipfw_insn_if *cmd) 619 { 620 if (ifp == NULL) /* no iface with this packet, match fails */ 621 return 0; 622 623 /* Check by name or by IP address */ 624 if (cmd->name[0] != '\0') { /* match by name */ 625 /* Check name */ 626 if (cmd->p.glob) { 627 if (kfnmatch(cmd->name, ifp->if_xname, 0) == 0) 628 return(1); 629 } else { 630 if (strncmp(ifp->if_xname, cmd->name, IFNAMSIZ) == 0) 631 return(1); 632 } 633 } else { 634 struct ifaddr_container *ifac; 635 636 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 637 struct ifaddr *ia = ifac->ifa; 638 639 if (ia->ifa_addr == NULL) 640 continue; 641 if (ia->ifa_addr->sa_family != AF_INET) 642 continue; 643 if (cmd->p.ip.s_addr == ((struct sockaddr_in *) 644 (ia->ifa_addr))->sin_addr.s_addr) 645 return(1); /* match */ 646 } 647 } 648 return(0); /* no match, fail ... */ 649 } 650 651 #define SNPARGS(buf, len) buf + len, sizeof(buf) > len ? sizeof(buf) - len : 0 652 653 /* 654 * We enter here when we have a rule with O_LOG. 655 * XXX this function alone takes about 2Kbytes of code! 656 */ 657 static void 658 ipfw_log(struct ip_fw *f, u_int hlen, struct ether_header *eh, 659 struct mbuf *m, struct ifnet *oif) 660 { 661 char *action; 662 int limit_reached = 0; 663 char action2[40], proto[48], fragment[28]; 664 665 fragment[0] = '\0'; 666 proto[0] = '\0'; 667 668 if (f == NULL) { /* bogus pkt */ 669 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 670 671 if (verbose_limit != 0 && 672 ctx->ipfw_norule_counter >= verbose_limit) 673 return; 674 ctx->ipfw_norule_counter++; 675 if (ctx->ipfw_norule_counter == verbose_limit) 676 limit_reached = verbose_limit; 677 action = "Refuse"; 678 } else { /* O_LOG is the first action, find the real one */ 679 ipfw_insn *cmd = ACTION_PTR(f); 680 ipfw_insn_log *l = (ipfw_insn_log *)cmd; 681 682 if (l->max_log != 0 && l->log_left == 0) 683 return; 684 l->log_left--; 685 if (l->log_left == 0) 686 limit_reached = l->max_log; 687 cmd += F_LEN(cmd); /* point to first action */ 688 if (cmd->opcode == O_PROB) 689 cmd += F_LEN(cmd); 690 691 action = action2; 692 switch (cmd->opcode) { 693 case O_DENY: 694 action = "Deny"; 695 break; 696 697 case O_REJECT: 698 if (cmd->arg1==ICMP_REJECT_RST) { 699 action = "Reset"; 700 } else if (cmd->arg1==ICMP_UNREACH_HOST) { 701 action = "Reject"; 702 } else { 703 ksnprintf(SNPARGS(action2, 0), "Unreach %d", 704 cmd->arg1); 705 } 706 break; 707 708 case O_ACCEPT: 709 action = "Accept"; 710 break; 711 712 case O_COUNT: 713 action = "Count"; 714 break; 715 716 case O_DIVERT: 717 ksnprintf(SNPARGS(action2, 0), "Divert %d", cmd->arg1); 718 break; 719 720 case O_TEE: 721 ksnprintf(SNPARGS(action2, 0), "Tee %d", cmd->arg1); 722 break; 723 724 case O_SKIPTO: 725 ksnprintf(SNPARGS(action2, 0), "SkipTo %d", cmd->arg1); 726 break; 727 728 case O_PIPE: 729 ksnprintf(SNPARGS(action2, 0), "Pipe %d", cmd->arg1); 730 break; 731 732 case O_QUEUE: 733 ksnprintf(SNPARGS(action2, 0), "Queue %d", cmd->arg1); 734 break; 735 736 case O_FORWARD_IP: 737 { 738 ipfw_insn_sa *sa = (ipfw_insn_sa *)cmd; 739 int len; 740 741 len = ksnprintf(SNPARGS(action2, 0), 742 "Forward to %s", 743 inet_ntoa(sa->sa.sin_addr)); 744 if (sa->sa.sin_port) { 745 ksnprintf(SNPARGS(action2, len), ":%d", 746 sa->sa.sin_port); 747 } 748 } 749 break; 750 751 default: 752 action = "UNKNOWN"; 753 break; 754 } 755 } 756 757 if (hlen == 0) { /* non-ip */ 758 ksnprintf(SNPARGS(proto, 0), "MAC"); 759 } else { 760 struct ip *ip = mtod(m, struct ip *); 761 /* these three are all aliases to the same thing */ 762 struct icmp *const icmp = L3HDR(struct icmp, ip); 763 struct tcphdr *const tcp = (struct tcphdr *)icmp; 764 struct udphdr *const udp = (struct udphdr *)icmp; 765 766 int ip_off, offset, ip_len; 767 int len; 768 769 if (eh != NULL) { /* layer 2 packets are as on the wire */ 770 ip_off = ntohs(ip->ip_off); 771 ip_len = ntohs(ip->ip_len); 772 } else { 773 ip_off = ip->ip_off; 774 ip_len = ip->ip_len; 775 } 776 offset = ip_off & IP_OFFMASK; 777 switch (ip->ip_p) { 778 case IPPROTO_TCP: 779 len = ksnprintf(SNPARGS(proto, 0), "TCP %s", 780 inet_ntoa(ip->ip_src)); 781 if (offset == 0) { 782 ksnprintf(SNPARGS(proto, len), ":%d %s:%d", 783 ntohs(tcp->th_sport), 784 inet_ntoa(ip->ip_dst), 785 ntohs(tcp->th_dport)); 786 } else { 787 ksnprintf(SNPARGS(proto, len), " %s", 788 inet_ntoa(ip->ip_dst)); 789 } 790 break; 791 792 case IPPROTO_UDP: 793 len = ksnprintf(SNPARGS(proto, 0), "UDP %s", 794 inet_ntoa(ip->ip_src)); 795 if (offset == 0) { 796 ksnprintf(SNPARGS(proto, len), ":%d %s:%d", 797 ntohs(udp->uh_sport), 798 inet_ntoa(ip->ip_dst), 799 ntohs(udp->uh_dport)); 800 } else { 801 ksnprintf(SNPARGS(proto, len), " %s", 802 inet_ntoa(ip->ip_dst)); 803 } 804 break; 805 806 case IPPROTO_ICMP: 807 if (offset == 0) { 808 len = ksnprintf(SNPARGS(proto, 0), 809 "ICMP:%u.%u ", 810 icmp->icmp_type, 811 icmp->icmp_code); 812 } else { 813 len = ksnprintf(SNPARGS(proto, 0), "ICMP "); 814 } 815 len += ksnprintf(SNPARGS(proto, len), "%s", 816 inet_ntoa(ip->ip_src)); 817 ksnprintf(SNPARGS(proto, len), " %s", 818 inet_ntoa(ip->ip_dst)); 819 break; 820 821 default: 822 len = ksnprintf(SNPARGS(proto, 0), "P:%d %s", ip->ip_p, 823 inet_ntoa(ip->ip_src)); 824 ksnprintf(SNPARGS(proto, len), " %s", 825 inet_ntoa(ip->ip_dst)); 826 break; 827 } 828 829 if (ip_off & (IP_MF | IP_OFFMASK)) { 830 ksnprintf(SNPARGS(fragment, 0), " (frag %d:%d@%d%s)", 831 ntohs(ip->ip_id), ip_len - (ip->ip_hl << 2), 832 offset << 3, (ip_off & IP_MF) ? "+" : ""); 833 } 834 } 835 836 if (oif || m->m_pkthdr.rcvif) { 837 log(LOG_SECURITY | LOG_INFO, 838 "ipfw: %d %s %s %s via %s%s\n", 839 f ? f->rulenum : -1, 840 action, proto, oif ? "out" : "in", 841 oif ? oif->if_xname : m->m_pkthdr.rcvif->if_xname, 842 fragment); 843 } else { 844 log(LOG_SECURITY | LOG_INFO, 845 "ipfw: %d %s %s [no if info]%s\n", 846 f ? f->rulenum : -1, 847 action, proto, fragment); 848 } 849 850 if (limit_reached) { 851 log(LOG_SECURITY | LOG_NOTICE, 852 "ipfw: limit %d reached on entry %d\n", 853 limit_reached, f ? f->rulenum : -1); 854 } 855 } 856 857 #undef SNPARGS 858 859 /* 860 * IMPORTANT: the hash function for dynamic rules must be commutative 861 * in source and destination (ip,port), because rules are bidirectional 862 * and we want to find both in the same bucket. 863 */ 864 static __inline int 865 hash_packet(struct ipfw_flow_id *id) 866 { 867 uint32_t i; 868 869 i = (id->dst_ip) ^ (id->src_ip) ^ (id->dst_port) ^ (id->src_port); 870 i &= (curr_dyn_buckets - 1); 871 return i; 872 } 873 874 /** 875 * unlink a dynamic rule from a chain. prev is a pointer to 876 * the previous one, q is a pointer to the rule to delete, 877 * head is a pointer to the head of the queue. 878 * Modifies q and potentially also head. 879 */ 880 #define UNLINK_DYN_RULE(prev, head, q) \ 881 do { \ 882 ipfw_dyn_rule *old_q = q; \ 883 \ 884 /* remove a refcount to the parent */ \ 885 if (q->dyn_type == O_LIMIT) \ 886 q->parent->count--; \ 887 DPRINTF("-- unlink entry 0x%08x %d -> 0x%08x %d, %d left\n", \ 888 q->id.src_ip, q->id.src_port, \ 889 q->id.dst_ip, q->id.dst_port, dyn_count - 1); \ 890 if (prev != NULL) \ 891 prev->next = q = q->next; \ 892 else \ 893 head = q = q->next; \ 894 KASSERT(dyn_count > 0, ("invalid dyn count %u\n", dyn_count)); \ 895 dyn_count--; \ 896 kfree(old_q, M_IPFW); \ 897 } while (0) 898 899 #define TIME_LEQ(a, b) ((int)((a) - (b)) <= 0) 900 901 /** 902 * Remove dynamic rules pointing to "rule", or all of them if rule == NULL. 903 * 904 * If keep_me == NULL, rules are deleted even if not expired, 905 * otherwise only expired rules are removed. 906 * 907 * The value of the second parameter is also used to point to identify 908 * a rule we absolutely do not want to remove (e.g. because we are 909 * holding a reference to it -- this is the case with O_LIMIT_PARENT 910 * rules). The pointer is only used for comparison, so any non-null 911 * value will do. 912 */ 913 static void 914 remove_dyn_rule_locked(struct ip_fw *rule, ipfw_dyn_rule *keep_me) 915 { 916 static uint32_t last_remove = 0; /* XXX */ 917 918 #define FORCE (keep_me == NULL) 919 920 ipfw_dyn_rule *prev, *q; 921 int i, pass = 0, max_pass = 0, unlinked = 0; 922 923 if (ipfw_dyn_v == NULL || dyn_count == 0) 924 return; 925 /* do not expire more than once per second, it is useless */ 926 if (!FORCE && last_remove == time_second) 927 return; 928 last_remove = time_second; 929 930 /* 931 * because O_LIMIT refer to parent rules, during the first pass only 932 * remove child and mark any pending LIMIT_PARENT, and remove 933 * them in a second pass. 934 */ 935 next_pass: 936 for (i = 0; i < curr_dyn_buckets; i++) { 937 for (prev = NULL, q = ipfw_dyn_v[i]; q;) { 938 /* 939 * Logic can become complex here, so we split tests. 940 */ 941 if (q == keep_me) 942 goto next; 943 if (rule != NULL && rule->stub != q->stub) 944 goto next; /* not the one we are looking for */ 945 if (q->dyn_type == O_LIMIT_PARENT) { 946 /* 947 * handle parent in the second pass, 948 * record we need one. 949 */ 950 max_pass = 1; 951 if (pass == 0) 952 goto next; 953 if (FORCE && q->count != 0) { 954 /* XXX should not happen! */ 955 kprintf("OUCH! cannot remove rule, " 956 "count %d\n", q->count); 957 } 958 } else { 959 if (!FORCE && !TIME_LEQ(q->expire, time_second)) 960 goto next; 961 } 962 unlinked = 1; 963 UNLINK_DYN_RULE(prev, ipfw_dyn_v[i], q); 964 continue; 965 next: 966 prev = q; 967 q = q->next; 968 } 969 } 970 if (pass++ < max_pass) 971 goto next_pass; 972 973 if (unlinked) 974 ++dyn_buckets_gen; 975 976 #undef FORCE 977 } 978 979 /** 980 * lookup a dynamic rule. 981 */ 982 static ipfw_dyn_rule * 983 lookup_dyn_rule(struct ipfw_flow_id *pkt, int *match_direction, 984 struct tcphdr *tcp) 985 { 986 /* 987 * stateful ipfw extensions. 988 * Lookup into dynamic session queue 989 */ 990 #define MATCH_REVERSE 0 991 #define MATCH_FORWARD 1 992 #define MATCH_NONE 2 993 #define MATCH_UNKNOWN 3 994 int i, dir = MATCH_NONE; 995 ipfw_dyn_rule *prev, *q=NULL; 996 997 if (ipfw_dyn_v == NULL) 998 goto done; /* not found */ 999 1000 i = hash_packet(pkt); 1001 for (prev = NULL, q = ipfw_dyn_v[i]; q != NULL;) { 1002 if (q->dyn_type == O_LIMIT_PARENT) 1003 goto next; 1004 1005 if (TIME_LEQ(q->expire, time_second)) { 1006 /* 1007 * Entry expired; skip. 1008 * Let ipfw_tick() take care of it 1009 */ 1010 goto next; 1011 } 1012 1013 if (pkt->proto == q->id.proto) { 1014 if (pkt->src_ip == q->id.src_ip && 1015 pkt->dst_ip == q->id.dst_ip && 1016 pkt->src_port == q->id.src_port && 1017 pkt->dst_port == q->id.dst_port) { 1018 dir = MATCH_FORWARD; 1019 break; 1020 } 1021 if (pkt->src_ip == q->id.dst_ip && 1022 pkt->dst_ip == q->id.src_ip && 1023 pkt->src_port == q->id.dst_port && 1024 pkt->dst_port == q->id.src_port) { 1025 dir = MATCH_REVERSE; 1026 break; 1027 } 1028 } 1029 next: 1030 prev = q; 1031 q = q->next; 1032 } 1033 if (q == NULL) 1034 goto done; /* q = NULL, not found */ 1035 1036 if (pkt->proto == IPPROTO_TCP) { /* update state according to flags */ 1037 u_char flags = pkt->flags & (TH_FIN|TH_SYN|TH_RST); 1038 1039 #define BOTH_SYN (TH_SYN | (TH_SYN << 8)) 1040 #define BOTH_FIN (TH_FIN | (TH_FIN << 8)) 1041 1042 q->state |= (dir == MATCH_FORWARD ) ? flags : (flags << 8); 1043 switch (q->state) { 1044 case TH_SYN: /* opening */ 1045 q->expire = time_second + dyn_syn_lifetime; 1046 break; 1047 1048 case BOTH_SYN: /* move to established */ 1049 case BOTH_SYN | TH_FIN : /* one side tries to close */ 1050 case BOTH_SYN | (TH_FIN << 8) : 1051 if (tcp) { 1052 uint32_t ack = ntohl(tcp->th_ack); 1053 1054 #define _SEQ_GE(a, b) ((int)(a) - (int)(b) >= 0) 1055 1056 if (dir == MATCH_FORWARD) { 1057 if (q->ack_fwd == 0 || 1058 _SEQ_GE(ack, q->ack_fwd)) 1059 q->ack_fwd = ack; 1060 else /* ignore out-of-sequence */ 1061 break; 1062 } else { 1063 if (q->ack_rev == 0 || 1064 _SEQ_GE(ack, q->ack_rev)) 1065 q->ack_rev = ack; 1066 else /* ignore out-of-sequence */ 1067 break; 1068 } 1069 #undef _SEQ_GE 1070 } 1071 q->expire = time_second + dyn_ack_lifetime; 1072 break; 1073 1074 case BOTH_SYN | BOTH_FIN: /* both sides closed */ 1075 KKASSERT(dyn_fin_lifetime < dyn_keepalive_period); 1076 q->expire = time_second + dyn_fin_lifetime; 1077 break; 1078 1079 default: 1080 #if 0 1081 /* 1082 * reset or some invalid combination, but can also 1083 * occur if we use keep-state the wrong way. 1084 */ 1085 if ((q->state & ((TH_RST << 8) | TH_RST)) == 0) 1086 kprintf("invalid state: 0x%x\n", q->state); 1087 #endif 1088 KKASSERT(dyn_rst_lifetime < dyn_keepalive_period); 1089 q->expire = time_second + dyn_rst_lifetime; 1090 break; 1091 } 1092 } else if (pkt->proto == IPPROTO_UDP) { 1093 q->expire = time_second + dyn_udp_lifetime; 1094 } else { 1095 /* other protocols */ 1096 q->expire = time_second + dyn_short_lifetime; 1097 } 1098 done: 1099 if (match_direction) 1100 *match_direction = dir; 1101 return q; 1102 } 1103 1104 static struct ip_fw * 1105 lookup_rule(struct ipfw_flow_id *pkt, int *match_direction, struct tcphdr *tcp, 1106 uint16_t len, int *deny) 1107 { 1108 struct ip_fw *rule = NULL; 1109 ipfw_dyn_rule *q; 1110 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 1111 uint32_t gen; 1112 1113 *deny = 0; 1114 gen = ctx->ipfw_gen; 1115 1116 lockmgr(&dyn_lock, LK_SHARED); 1117 1118 if (ctx->ipfw_gen != gen) { 1119 /* 1120 * Static rules had been change when we were waiting 1121 * for the dynamic hash table lock; deny this packet, 1122 * since it is _not_ known whether it is safe to keep 1123 * iterating the static rules. 1124 */ 1125 *deny = 1; 1126 goto back; 1127 } 1128 1129 q = lookup_dyn_rule(pkt, match_direction, tcp); 1130 if (q == NULL) { 1131 rule = NULL; 1132 } else { 1133 rule = q->stub->rule[mycpuid]; 1134 KKASSERT(rule->stub == q->stub && rule->cpuid == mycpuid); 1135 1136 /* XXX */ 1137 q->pcnt++; 1138 q->bcnt += len; 1139 } 1140 back: 1141 lockmgr(&dyn_lock, LK_RELEASE); 1142 return rule; 1143 } 1144 1145 static void 1146 realloc_dynamic_table(void) 1147 { 1148 ipfw_dyn_rule **old_dyn_v; 1149 uint32_t old_curr_dyn_buckets; 1150 1151 KASSERT(dyn_buckets <= 65536 && (dyn_buckets & (dyn_buckets - 1)) == 0, 1152 ("invalid dyn_buckets %d\n", dyn_buckets)); 1153 1154 /* Save the current buckets array for later error recovery */ 1155 old_dyn_v = ipfw_dyn_v; 1156 old_curr_dyn_buckets = curr_dyn_buckets; 1157 1158 curr_dyn_buckets = dyn_buckets; 1159 for (;;) { 1160 ipfw_dyn_v = kmalloc(curr_dyn_buckets * sizeof(ipfw_dyn_rule *), 1161 M_IPFW, M_NOWAIT | M_ZERO); 1162 if (ipfw_dyn_v != NULL || curr_dyn_buckets <= 2) 1163 break; 1164 1165 curr_dyn_buckets /= 2; 1166 if (curr_dyn_buckets <= old_curr_dyn_buckets && 1167 old_dyn_v != NULL) { 1168 /* 1169 * Don't try allocating smaller buckets array, reuse 1170 * the old one, which alreay contains enough buckets 1171 */ 1172 break; 1173 } 1174 } 1175 1176 if (ipfw_dyn_v != NULL) { 1177 if (old_dyn_v != NULL) 1178 kfree(old_dyn_v, M_IPFW); 1179 } else { 1180 /* Allocation failed, restore old buckets array */ 1181 ipfw_dyn_v = old_dyn_v; 1182 curr_dyn_buckets = old_curr_dyn_buckets; 1183 } 1184 1185 if (ipfw_dyn_v != NULL) 1186 ++dyn_buckets_gen; 1187 } 1188 1189 /** 1190 * Install state of type 'type' for a dynamic session. 1191 * The hash table contains two type of rules: 1192 * - regular rules (O_KEEP_STATE) 1193 * - rules for sessions with limited number of sess per user 1194 * (O_LIMIT). When they are created, the parent is 1195 * increased by 1, and decreased on delete. In this case, 1196 * the third parameter is the parent rule and not the chain. 1197 * - "parent" rules for the above (O_LIMIT_PARENT). 1198 */ 1199 static ipfw_dyn_rule * 1200 add_dyn_rule(struct ipfw_flow_id *id, uint8_t dyn_type, struct ip_fw *rule) 1201 { 1202 ipfw_dyn_rule *r; 1203 int i; 1204 1205 if (ipfw_dyn_v == NULL || 1206 (dyn_count == 0 && dyn_buckets != curr_dyn_buckets)) { 1207 realloc_dynamic_table(); 1208 if (ipfw_dyn_v == NULL) 1209 return NULL; /* failed ! */ 1210 } 1211 i = hash_packet(id); 1212 1213 r = kmalloc(sizeof(*r), M_IPFW, M_NOWAIT | M_ZERO); 1214 if (r == NULL) { 1215 kprintf ("sorry cannot allocate state\n"); 1216 return NULL; 1217 } 1218 1219 /* increase refcount on parent, and set pointer */ 1220 if (dyn_type == O_LIMIT) { 1221 ipfw_dyn_rule *parent = (ipfw_dyn_rule *)rule; 1222 1223 if (parent->dyn_type != O_LIMIT_PARENT) 1224 panic("invalid parent"); 1225 parent->count++; 1226 r->parent = parent; 1227 rule = parent->stub->rule[mycpuid]; 1228 KKASSERT(rule->stub == parent->stub); 1229 } 1230 KKASSERT(rule->cpuid == mycpuid && rule->stub != NULL); 1231 1232 r->id = *id; 1233 r->expire = time_second + dyn_syn_lifetime; 1234 r->stub = rule->stub; 1235 r->dyn_type = dyn_type; 1236 r->pcnt = r->bcnt = 0; 1237 r->count = 0; 1238 1239 r->bucket = i; 1240 r->next = ipfw_dyn_v[i]; 1241 ipfw_dyn_v[i] = r; 1242 dyn_count++; 1243 dyn_buckets_gen++; 1244 DPRINTF("-- add dyn entry ty %d 0x%08x %d -> 0x%08x %d, total %d\n", 1245 dyn_type, 1246 r->id.src_ip, r->id.src_port, 1247 r->id.dst_ip, r->id.dst_port, dyn_count); 1248 return r; 1249 } 1250 1251 /** 1252 * lookup dynamic parent rule using pkt and rule as search keys. 1253 * If the lookup fails, then install one. 1254 */ 1255 static ipfw_dyn_rule * 1256 lookup_dyn_parent(struct ipfw_flow_id *pkt, struct ip_fw *rule) 1257 { 1258 ipfw_dyn_rule *q; 1259 int i; 1260 1261 if (ipfw_dyn_v) { 1262 i = hash_packet(pkt); 1263 for (q = ipfw_dyn_v[i]; q != NULL; q = q->next) { 1264 if (q->dyn_type == O_LIMIT_PARENT && 1265 rule->stub == q->stub && 1266 pkt->proto == q->id.proto && 1267 pkt->src_ip == q->id.src_ip && 1268 pkt->dst_ip == q->id.dst_ip && 1269 pkt->src_port == q->id.src_port && 1270 pkt->dst_port == q->id.dst_port) { 1271 q->expire = time_second + dyn_short_lifetime; 1272 DPRINTF("lookup_dyn_parent found 0x%p\n", q); 1273 return q; 1274 } 1275 } 1276 } 1277 return add_dyn_rule(pkt, O_LIMIT_PARENT, rule); 1278 } 1279 1280 /** 1281 * Install dynamic state for rule type cmd->o.opcode 1282 * 1283 * Returns 1 (failure) if state is not installed because of errors or because 1284 * session limitations are enforced. 1285 */ 1286 static int 1287 install_state_locked(struct ip_fw *rule, ipfw_insn_limit *cmd, 1288 struct ip_fw_args *args) 1289 { 1290 static int last_log; /* XXX */ 1291 1292 ipfw_dyn_rule *q; 1293 1294 DPRINTF("-- install state type %d 0x%08x %u -> 0x%08x %u\n", 1295 cmd->o.opcode, 1296 args->f_id.src_ip, args->f_id.src_port, 1297 args->f_id.dst_ip, args->f_id.dst_port); 1298 1299 q = lookup_dyn_rule(&args->f_id, NULL, NULL); 1300 if (q != NULL) { /* should never occur */ 1301 if (last_log != time_second) { 1302 last_log = time_second; 1303 kprintf(" install_state: entry already present, done\n"); 1304 } 1305 return 0; 1306 } 1307 1308 if (dyn_count >= dyn_max) { 1309 /* 1310 * Run out of slots, try to remove any expired rule. 1311 */ 1312 remove_dyn_rule_locked(NULL, (ipfw_dyn_rule *)1); 1313 if (dyn_count >= dyn_max) { 1314 if (last_log != time_second) { 1315 last_log = time_second; 1316 kprintf("install_state: " 1317 "Too many dynamic rules\n"); 1318 } 1319 return 1; /* cannot install, notify caller */ 1320 } 1321 } 1322 1323 switch (cmd->o.opcode) { 1324 case O_KEEP_STATE: /* bidir rule */ 1325 if (add_dyn_rule(&args->f_id, O_KEEP_STATE, rule) == NULL) 1326 return 1; 1327 break; 1328 1329 case O_LIMIT: /* limit number of sessions */ 1330 { 1331 uint16_t limit_mask = cmd->limit_mask; 1332 struct ipfw_flow_id id; 1333 ipfw_dyn_rule *parent; 1334 1335 DPRINTF("installing dyn-limit rule %d\n", 1336 cmd->conn_limit); 1337 1338 id.dst_ip = id.src_ip = 0; 1339 id.dst_port = id.src_port = 0; 1340 id.proto = args->f_id.proto; 1341 1342 if (limit_mask & DYN_SRC_ADDR) 1343 id.src_ip = args->f_id.src_ip; 1344 if (limit_mask & DYN_DST_ADDR) 1345 id.dst_ip = args->f_id.dst_ip; 1346 if (limit_mask & DYN_SRC_PORT) 1347 id.src_port = args->f_id.src_port; 1348 if (limit_mask & DYN_DST_PORT) 1349 id.dst_port = args->f_id.dst_port; 1350 1351 parent = lookup_dyn_parent(&id, rule); 1352 if (parent == NULL) { 1353 kprintf("add parent failed\n"); 1354 return 1; 1355 } 1356 1357 if (parent->count >= cmd->conn_limit) { 1358 /* 1359 * See if we can remove some expired rule. 1360 */ 1361 remove_dyn_rule_locked(rule, parent); 1362 if (parent->count >= cmd->conn_limit) { 1363 if (fw_verbose && 1364 last_log != time_second) { 1365 last_log = time_second; 1366 log(LOG_SECURITY | LOG_DEBUG, 1367 "drop session, " 1368 "too many entries\n"); 1369 } 1370 return 1; 1371 } 1372 } 1373 if (add_dyn_rule(&args->f_id, O_LIMIT, 1374 (struct ip_fw *)parent) == NULL) 1375 return 1; 1376 } 1377 break; 1378 default: 1379 kprintf("unknown dynamic rule type %u\n", cmd->o.opcode); 1380 return 1; 1381 } 1382 lookup_dyn_rule(&args->f_id, NULL, NULL); /* XXX just set lifetime */ 1383 return 0; 1384 } 1385 1386 static int 1387 install_state(struct ip_fw *rule, ipfw_insn_limit *cmd, 1388 struct ip_fw_args *args, int *deny) 1389 { 1390 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 1391 uint32_t gen; 1392 int ret = 0; 1393 1394 *deny = 0; 1395 gen = ctx->ipfw_gen; 1396 1397 lockmgr(&dyn_lock, LK_EXCLUSIVE); 1398 if (ctx->ipfw_gen != gen) { 1399 /* See the comment in lookup_rule() */ 1400 *deny = 1; 1401 } else { 1402 ret = install_state_locked(rule, cmd, args); 1403 } 1404 lockmgr(&dyn_lock, LK_RELEASE); 1405 1406 return ret; 1407 } 1408 1409 /* 1410 * Transmit a TCP packet, containing either a RST or a keepalive. 1411 * When flags & TH_RST, we are sending a RST packet, because of a 1412 * "reset" action matched the packet. 1413 * Otherwise we are sending a keepalive, and flags & TH_ 1414 */ 1415 static void 1416 send_pkt(struct ipfw_flow_id *id, uint32_t seq, uint32_t ack, int flags) 1417 { 1418 struct mbuf *m; 1419 struct ip *ip; 1420 struct tcphdr *tcp; 1421 struct route sro; /* fake route */ 1422 1423 MGETHDR(m, MB_DONTWAIT, MT_HEADER); 1424 if (m == NULL) 1425 return; 1426 m->m_pkthdr.rcvif = NULL; 1427 m->m_pkthdr.len = m->m_len = sizeof(struct ip) + sizeof(struct tcphdr); 1428 m->m_data += max_linkhdr; 1429 1430 ip = mtod(m, struct ip *); 1431 bzero(ip, m->m_len); 1432 tcp = (struct tcphdr *)(ip + 1); /* no IP options */ 1433 ip->ip_p = IPPROTO_TCP; 1434 tcp->th_off = 5; 1435 1436 /* 1437 * Assume we are sending a RST (or a keepalive in the reverse 1438 * direction), swap src and destination addresses and ports. 1439 */ 1440 ip->ip_src.s_addr = htonl(id->dst_ip); 1441 ip->ip_dst.s_addr = htonl(id->src_ip); 1442 tcp->th_sport = htons(id->dst_port); 1443 tcp->th_dport = htons(id->src_port); 1444 if (flags & TH_RST) { /* we are sending a RST */ 1445 if (flags & TH_ACK) { 1446 tcp->th_seq = htonl(ack); 1447 tcp->th_ack = htonl(0); 1448 tcp->th_flags = TH_RST; 1449 } else { 1450 if (flags & TH_SYN) 1451 seq++; 1452 tcp->th_seq = htonl(0); 1453 tcp->th_ack = htonl(seq); 1454 tcp->th_flags = TH_RST | TH_ACK; 1455 } 1456 } else { 1457 /* 1458 * We are sending a keepalive. flags & TH_SYN determines 1459 * the direction, forward if set, reverse if clear. 1460 * NOTE: seq and ack are always assumed to be correct 1461 * as set by the caller. This may be confusing... 1462 */ 1463 if (flags & TH_SYN) { 1464 /* 1465 * we have to rewrite the correct addresses! 1466 */ 1467 ip->ip_dst.s_addr = htonl(id->dst_ip); 1468 ip->ip_src.s_addr = htonl(id->src_ip); 1469 tcp->th_dport = htons(id->dst_port); 1470 tcp->th_sport = htons(id->src_port); 1471 } 1472 tcp->th_seq = htonl(seq); 1473 tcp->th_ack = htonl(ack); 1474 tcp->th_flags = TH_ACK; 1475 } 1476 1477 /* 1478 * set ip_len to the payload size so we can compute 1479 * the tcp checksum on the pseudoheader 1480 * XXX check this, could save a couple of words ? 1481 */ 1482 ip->ip_len = htons(sizeof(struct tcphdr)); 1483 tcp->th_sum = in_cksum(m, m->m_pkthdr.len); 1484 1485 /* 1486 * now fill fields left out earlier 1487 */ 1488 ip->ip_ttl = ip_defttl; 1489 ip->ip_len = m->m_pkthdr.len; 1490 1491 bzero(&sro, sizeof(sro)); 1492 ip_rtaddr(ip->ip_dst, &sro); 1493 1494 m->m_pkthdr.fw_flags |= IPFW_MBUF_GENERATED; 1495 ip_output(m, NULL, &sro, 0, NULL, NULL); 1496 if (sro.ro_rt) 1497 RTFREE(sro.ro_rt); 1498 } 1499 1500 /* 1501 * sends a reject message, consuming the mbuf passed as an argument. 1502 */ 1503 static void 1504 send_reject(struct ip_fw_args *args, int code, int offset, int ip_len) 1505 { 1506 if (code != ICMP_REJECT_RST) { /* Send an ICMP unreach */ 1507 /* We need the IP header in host order for icmp_error(). */ 1508 if (args->eh != NULL) { 1509 struct ip *ip = mtod(args->m, struct ip *); 1510 1511 ip->ip_len = ntohs(ip->ip_len); 1512 ip->ip_off = ntohs(ip->ip_off); 1513 } 1514 icmp_error(args->m, ICMP_UNREACH, code, 0L, 0); 1515 } else if (offset == 0 && args->f_id.proto == IPPROTO_TCP) { 1516 struct tcphdr *const tcp = 1517 L3HDR(struct tcphdr, mtod(args->m, struct ip *)); 1518 1519 if ((tcp->th_flags & TH_RST) == 0) { 1520 send_pkt(&args->f_id, ntohl(tcp->th_seq), 1521 ntohl(tcp->th_ack), tcp->th_flags | TH_RST); 1522 } 1523 m_freem(args->m); 1524 } else { 1525 m_freem(args->m); 1526 } 1527 args->m = NULL; 1528 } 1529 1530 /** 1531 * 1532 * Given an ip_fw *, lookup_next_rule will return a pointer 1533 * to the next rule, which can be either the jump 1534 * target (for skipto instructions) or the next one in the list (in 1535 * all other cases including a missing jump target). 1536 * The result is also written in the "next_rule" field of the rule. 1537 * Backward jumps are not allowed, so start looking from the next 1538 * rule... 1539 * 1540 * This never returns NULL -- in case we do not have an exact match, 1541 * the next rule is returned. When the ruleset is changed, 1542 * pointers are flushed so we are always correct. 1543 */ 1544 1545 static struct ip_fw * 1546 lookup_next_rule(struct ip_fw *me) 1547 { 1548 struct ip_fw *rule = NULL; 1549 ipfw_insn *cmd; 1550 1551 /* look for action, in case it is a skipto */ 1552 cmd = ACTION_PTR(me); 1553 if (cmd->opcode == O_LOG) 1554 cmd += F_LEN(cmd); 1555 if (cmd->opcode == O_SKIPTO) { 1556 for (rule = me->next; rule; rule = rule->next) { 1557 if (rule->rulenum >= cmd->arg1) 1558 break; 1559 } 1560 } 1561 if (rule == NULL) /* failure or not a skipto */ 1562 rule = me->next; 1563 me->next_rule = rule; 1564 return rule; 1565 } 1566 1567 static int 1568 _ipfw_match_uid(const struct ipfw_flow_id *fid, struct ifnet *oif, 1569 enum ipfw_opcodes opcode, uid_t uid) 1570 { 1571 struct in_addr src_ip, dst_ip; 1572 struct inpcbinfo *pi; 1573 int wildcard; 1574 struct inpcb *pcb; 1575 1576 if (fid->proto == IPPROTO_TCP) { 1577 wildcard = 0; 1578 pi = &tcbinfo[mycpuid]; 1579 } else if (fid->proto == IPPROTO_UDP) { 1580 wildcard = 1; 1581 pi = &udbinfo; 1582 } else { 1583 return 0; 1584 } 1585 1586 /* 1587 * Values in 'fid' are in host byte order 1588 */ 1589 dst_ip.s_addr = htonl(fid->dst_ip); 1590 src_ip.s_addr = htonl(fid->src_ip); 1591 if (oif) { 1592 pcb = in_pcblookup_hash(pi, 1593 dst_ip, htons(fid->dst_port), 1594 src_ip, htons(fid->src_port), 1595 wildcard, oif); 1596 } else { 1597 pcb = in_pcblookup_hash(pi, 1598 src_ip, htons(fid->src_port), 1599 dst_ip, htons(fid->dst_port), 1600 wildcard, NULL); 1601 } 1602 if (pcb == NULL || pcb->inp_socket == NULL) 1603 return 0; 1604 1605 if (opcode == O_UID) { 1606 #define socheckuid(a,b) ((a)->so_cred->cr_uid != (b)) 1607 return !socheckuid(pcb->inp_socket, uid); 1608 #undef socheckuid 1609 } else { 1610 return groupmember(uid, pcb->inp_socket->so_cred); 1611 } 1612 } 1613 1614 static int 1615 ipfw_match_uid(const struct ipfw_flow_id *fid, struct ifnet *oif, 1616 enum ipfw_opcodes opcode, uid_t uid, int *deny) 1617 { 1618 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 1619 uint32_t gen; 1620 int match = 0; 1621 1622 *deny = 0; 1623 gen = ctx->ipfw_gen; 1624 1625 get_mplock(); 1626 if (gen != ctx->ipfw_gen) { 1627 /* See the comment in lookup_rule() */ 1628 *deny = 1; 1629 } else { 1630 match = _ipfw_match_uid(fid, oif, opcode, uid); 1631 } 1632 rel_mplock(); 1633 return match; 1634 } 1635 1636 /* 1637 * The main check routine for the firewall. 1638 * 1639 * All arguments are in args so we can modify them and return them 1640 * back to the caller. 1641 * 1642 * Parameters: 1643 * 1644 * args->m (in/out) The packet; we set to NULL when/if we nuke it. 1645 * Starts with the IP header. 1646 * args->eh (in) Mac header if present, or NULL for layer3 packet. 1647 * args->oif Outgoing interface, or NULL if packet is incoming. 1648 * The incoming interface is in the mbuf. (in) 1649 * 1650 * args->rule Pointer to the last matching rule (in/out) 1651 * args->f_id Addresses grabbed from the packet (out) 1652 * 1653 * Return value: 1654 * 1655 * If the packet was denied/rejected and has been dropped, *m is equal 1656 * to NULL upon return. 1657 * 1658 * IP_FW_DENY the packet must be dropped. 1659 * IP_FW_PASS The packet is to be accepted and routed normally. 1660 * IP_FW_DIVERT Divert the packet to port (args->cookie) 1661 * IP_FW_TEE Tee the packet to port (args->cookie) 1662 * IP_FW_DUMMYNET Send the packet to pipe/queue (args->cookie) 1663 */ 1664 1665 static int 1666 ipfw_chk(struct ip_fw_args *args) 1667 { 1668 /* 1669 * Local variables hold state during the processing of a packet. 1670 * 1671 * IMPORTANT NOTE: to speed up the processing of rules, there 1672 * are some assumption on the values of the variables, which 1673 * are documented here. Should you change them, please check 1674 * the implementation of the various instructions to make sure 1675 * that they still work. 1676 * 1677 * args->eh The MAC header. It is non-null for a layer2 1678 * packet, it is NULL for a layer-3 packet. 1679 * 1680 * m | args->m Pointer to the mbuf, as received from the caller. 1681 * It may change if ipfw_chk() does an m_pullup, or if it 1682 * consumes the packet because it calls send_reject(). 1683 * XXX This has to change, so that ipfw_chk() never modifies 1684 * or consumes the buffer. 1685 * ip is simply an alias of the value of m, and it is kept 1686 * in sync with it (the packet is supposed to start with 1687 * the ip header). 1688 */ 1689 struct mbuf *m = args->m; 1690 struct ip *ip = mtod(m, struct ip *); 1691 1692 /* 1693 * oif | args->oif If NULL, ipfw_chk has been called on the 1694 * inbound path (ether_input, ip_input). 1695 * If non-NULL, ipfw_chk has been called on the outbound path 1696 * (ether_output, ip_output). 1697 */ 1698 struct ifnet *oif = args->oif; 1699 1700 struct ip_fw *f = NULL; /* matching rule */ 1701 int retval = IP_FW_PASS; 1702 struct m_tag *mtag; 1703 struct divert_info *divinfo; 1704 1705 /* 1706 * hlen The length of the IPv4 header. 1707 * hlen >0 means we have an IPv4 packet. 1708 */ 1709 u_int hlen = 0; /* hlen >0 means we have an IP pkt */ 1710 1711 /* 1712 * offset The offset of a fragment. offset != 0 means that 1713 * we have a fragment at this offset of an IPv4 packet. 1714 * offset == 0 means that (if this is an IPv4 packet) 1715 * this is the first or only fragment. 1716 */ 1717 u_short offset = 0; 1718 1719 /* 1720 * Local copies of addresses. They are only valid if we have 1721 * an IP packet. 1722 * 1723 * proto The protocol. Set to 0 for non-ip packets, 1724 * or to the protocol read from the packet otherwise. 1725 * proto != 0 means that we have an IPv4 packet. 1726 * 1727 * src_port, dst_port port numbers, in HOST format. Only 1728 * valid for TCP and UDP packets. 1729 * 1730 * src_ip, dst_ip ip addresses, in NETWORK format. 1731 * Only valid for IPv4 packets. 1732 */ 1733 uint8_t proto; 1734 uint16_t src_port = 0, dst_port = 0; /* NOTE: host format */ 1735 struct in_addr src_ip, dst_ip; /* NOTE: network format */ 1736 uint16_t ip_len = 0; 1737 1738 /* 1739 * dyn_dir = MATCH_UNKNOWN when rules unchecked, 1740 * MATCH_NONE when checked and not matched (dyn_f = NULL), 1741 * MATCH_FORWARD or MATCH_REVERSE otherwise (dyn_f != NULL) 1742 */ 1743 int dyn_dir = MATCH_UNKNOWN; 1744 struct ip_fw *dyn_f = NULL; 1745 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 1746 1747 if (m->m_pkthdr.fw_flags & IPFW_MBUF_GENERATED) 1748 return IP_FW_PASS; /* accept */ 1749 1750 if (args->eh == NULL || /* layer 3 packet */ 1751 (m->m_pkthdr.len >= sizeof(struct ip) && 1752 ntohs(args->eh->ether_type) == ETHERTYPE_IP)) 1753 hlen = ip->ip_hl << 2; 1754 1755 /* 1756 * Collect parameters into local variables for faster matching. 1757 */ 1758 if (hlen == 0) { /* do not grab addresses for non-ip pkts */ 1759 proto = args->f_id.proto = 0; /* mark f_id invalid */ 1760 goto after_ip_checks; 1761 } 1762 1763 proto = args->f_id.proto = ip->ip_p; 1764 src_ip = ip->ip_src; 1765 dst_ip = ip->ip_dst; 1766 if (args->eh != NULL) { /* layer 2 packets are as on the wire */ 1767 offset = ntohs(ip->ip_off) & IP_OFFMASK; 1768 ip_len = ntohs(ip->ip_len); 1769 } else { 1770 offset = ip->ip_off & IP_OFFMASK; 1771 ip_len = ip->ip_len; 1772 } 1773 1774 #define PULLUP_TO(len) \ 1775 do { \ 1776 if (m->m_len < (len)) { \ 1777 args->m = m = m_pullup(m, (len));\ 1778 if (m == NULL) \ 1779 goto pullup_failed; \ 1780 ip = mtod(m, struct ip *); \ 1781 } \ 1782 } while (0) 1783 1784 if (offset == 0) { 1785 switch (proto) { 1786 case IPPROTO_TCP: 1787 { 1788 struct tcphdr *tcp; 1789 1790 PULLUP_TO(hlen + sizeof(struct tcphdr)); 1791 tcp = L3HDR(struct tcphdr, ip); 1792 dst_port = tcp->th_dport; 1793 src_port = tcp->th_sport; 1794 args->f_id.flags = tcp->th_flags; 1795 } 1796 break; 1797 1798 case IPPROTO_UDP: 1799 { 1800 struct udphdr *udp; 1801 1802 PULLUP_TO(hlen + sizeof(struct udphdr)); 1803 udp = L3HDR(struct udphdr, ip); 1804 dst_port = udp->uh_dport; 1805 src_port = udp->uh_sport; 1806 } 1807 break; 1808 1809 case IPPROTO_ICMP: 1810 PULLUP_TO(hlen + 4); /* type, code and checksum. */ 1811 args->f_id.flags = L3HDR(struct icmp, ip)->icmp_type; 1812 break; 1813 1814 default: 1815 break; 1816 } 1817 } 1818 1819 #undef PULLUP_TO 1820 1821 args->f_id.src_ip = ntohl(src_ip.s_addr); 1822 args->f_id.dst_ip = ntohl(dst_ip.s_addr); 1823 args->f_id.src_port = src_port = ntohs(src_port); 1824 args->f_id.dst_port = dst_port = ntohs(dst_port); 1825 1826 after_ip_checks: 1827 if (args->rule) { 1828 /* 1829 * Packet has already been tagged. Look for the next rule 1830 * to restart processing. 1831 * 1832 * If fw_one_pass != 0 then just accept it. 1833 * XXX should not happen here, but optimized out in 1834 * the caller. 1835 */ 1836 if (fw_one_pass) 1837 return IP_FW_PASS; 1838 1839 /* This rule is being/has been flushed */ 1840 if (ipfw_flushing) 1841 return IP_FW_DENY; 1842 1843 KASSERT(args->rule->cpuid == mycpuid, 1844 ("rule used on cpu%d\n", mycpuid)); 1845 1846 /* This rule was deleted */ 1847 if (args->rule->rule_flags & IPFW_RULE_F_INVALID) 1848 return IP_FW_DENY; 1849 1850 f = args->rule->next_rule; 1851 if (f == NULL) 1852 f = lookup_next_rule(args->rule); 1853 } else { 1854 /* 1855 * Find the starting rule. It can be either the first 1856 * one, or the one after divert_rule if asked so. 1857 */ 1858 int skipto; 1859 1860 mtag = m_tag_find(m, PACKET_TAG_IPFW_DIVERT, NULL); 1861 if (mtag != NULL) { 1862 divinfo = m_tag_data(mtag); 1863 skipto = divinfo->skipto; 1864 } else { 1865 skipto = 0; 1866 } 1867 1868 f = ctx->ipfw_layer3_chain; 1869 if (args->eh == NULL && skipto != 0) { 1870 /* No skipto during rule flushing */ 1871 if (ipfw_flushing) 1872 return IP_FW_DENY; 1873 1874 if (skipto >= IPFW_DEFAULT_RULE) 1875 return IP_FW_DENY; /* invalid */ 1876 1877 while (f && f->rulenum <= skipto) 1878 f = f->next; 1879 if (f == NULL) /* drop packet */ 1880 return IP_FW_DENY; 1881 } else if (ipfw_flushing) { 1882 /* Rules are being flushed; skip to default rule */ 1883 f = ctx->ipfw_default_rule; 1884 } 1885 } 1886 if ((mtag = m_tag_find(m, PACKET_TAG_IPFW_DIVERT, NULL)) != NULL) 1887 m_tag_delete(m, mtag); 1888 1889 /* 1890 * Now scan the rules, and parse microinstructions for each rule. 1891 */ 1892 for (; f; f = f->next) { 1893 int l, cmdlen; 1894 ipfw_insn *cmd; 1895 int skip_or; /* skip rest of OR block */ 1896 1897 again: 1898 if (ctx->ipfw_set_disable & (1 << f->set)) 1899 continue; 1900 1901 skip_or = 0; 1902 for (l = f->cmd_len, cmd = f->cmd; l > 0; 1903 l -= cmdlen, cmd += cmdlen) { 1904 int match, deny; 1905 1906 /* 1907 * check_body is a jump target used when we find a 1908 * CHECK_STATE, and need to jump to the body of 1909 * the target rule. 1910 */ 1911 1912 check_body: 1913 cmdlen = F_LEN(cmd); 1914 /* 1915 * An OR block (insn_1 || .. || insn_n) has the 1916 * F_OR bit set in all but the last instruction. 1917 * The first match will set "skip_or", and cause 1918 * the following instructions to be skipped until 1919 * past the one with the F_OR bit clear. 1920 */ 1921 if (skip_or) { /* skip this instruction */ 1922 if ((cmd->len & F_OR) == 0) 1923 skip_or = 0; /* next one is good */ 1924 continue; 1925 } 1926 match = 0; /* set to 1 if we succeed */ 1927 1928 switch (cmd->opcode) { 1929 /* 1930 * The first set of opcodes compares the packet's 1931 * fields with some pattern, setting 'match' if a 1932 * match is found. At the end of the loop there is 1933 * logic to deal with F_NOT and F_OR flags associated 1934 * with the opcode. 1935 */ 1936 case O_NOP: 1937 match = 1; 1938 break; 1939 1940 case O_FORWARD_MAC: 1941 kprintf("ipfw: opcode %d unimplemented\n", 1942 cmd->opcode); 1943 break; 1944 1945 case O_GID: 1946 case O_UID: 1947 /* 1948 * We only check offset == 0 && proto != 0, 1949 * as this ensures that we have an IPv4 1950 * packet with the ports info. 1951 */ 1952 if (offset!=0) 1953 break; 1954 1955 match = ipfw_match_uid(&args->f_id, oif, 1956 cmd->opcode, 1957 (uid_t)((ipfw_insn_u32 *)cmd)->d[0], 1958 &deny); 1959 if (deny) 1960 return IP_FW_DENY; 1961 break; 1962 1963 case O_RECV: 1964 match = iface_match(m->m_pkthdr.rcvif, 1965 (ipfw_insn_if *)cmd); 1966 break; 1967 1968 case O_XMIT: 1969 match = iface_match(oif, (ipfw_insn_if *)cmd); 1970 break; 1971 1972 case O_VIA: 1973 match = iface_match(oif ? oif : 1974 m->m_pkthdr.rcvif, (ipfw_insn_if *)cmd); 1975 break; 1976 1977 case O_MACADDR2: 1978 if (args->eh != NULL) { /* have MAC header */ 1979 uint32_t *want = (uint32_t *) 1980 ((ipfw_insn_mac *)cmd)->addr; 1981 uint32_t *mask = (uint32_t *) 1982 ((ipfw_insn_mac *)cmd)->mask; 1983 uint32_t *hdr = (uint32_t *)args->eh; 1984 1985 match = 1986 (want[0] == (hdr[0] & mask[0]) && 1987 want[1] == (hdr[1] & mask[1]) && 1988 want[2] == (hdr[2] & mask[2])); 1989 } 1990 break; 1991 1992 case O_MAC_TYPE: 1993 if (args->eh != NULL) { 1994 uint16_t t = 1995 ntohs(args->eh->ether_type); 1996 uint16_t *p = 1997 ((ipfw_insn_u16 *)cmd)->ports; 1998 int i; 1999 2000 /* Special vlan handling */ 2001 if (m->m_flags & M_VLANTAG) 2002 t = ETHERTYPE_VLAN; 2003 2004 for (i = cmdlen - 1; !match && i > 0; 2005 i--, p += 2) { 2006 match = 2007 (t >= p[0] && t <= p[1]); 2008 } 2009 } 2010 break; 2011 2012 case O_FRAG: 2013 match = (hlen > 0 && offset != 0); 2014 break; 2015 2016 case O_IN: /* "out" is "not in" */ 2017 match = (oif == NULL); 2018 break; 2019 2020 case O_LAYER2: 2021 match = (args->eh != NULL); 2022 break; 2023 2024 case O_PROTO: 2025 /* 2026 * We do not allow an arg of 0 so the 2027 * check of "proto" only suffices. 2028 */ 2029 match = (proto == cmd->arg1); 2030 break; 2031 2032 case O_IP_SRC: 2033 match = (hlen > 0 && 2034 ((ipfw_insn_ip *)cmd)->addr.s_addr == 2035 src_ip.s_addr); 2036 break; 2037 2038 case O_IP_SRC_MASK: 2039 match = (hlen > 0 && 2040 ((ipfw_insn_ip *)cmd)->addr.s_addr == 2041 (src_ip.s_addr & 2042 ((ipfw_insn_ip *)cmd)->mask.s_addr)); 2043 break; 2044 2045 case O_IP_SRC_ME: 2046 if (hlen > 0) { 2047 struct ifnet *tif; 2048 2049 tif = INADDR_TO_IFP(&src_ip); 2050 match = (tif != NULL); 2051 } 2052 break; 2053 2054 case O_IP_DST_SET: 2055 case O_IP_SRC_SET: 2056 if (hlen > 0) { 2057 uint32_t *d = (uint32_t *)(cmd + 1); 2058 uint32_t addr = 2059 cmd->opcode == O_IP_DST_SET ? 2060 args->f_id.dst_ip : 2061 args->f_id.src_ip; 2062 2063 if (addr < d[0]) 2064 break; 2065 addr -= d[0]; /* subtract base */ 2066 match = 2067 (addr < cmd->arg1) && 2068 (d[1 + (addr >> 5)] & 2069 (1 << (addr & 0x1f))); 2070 } 2071 break; 2072 2073 case O_IP_DST: 2074 match = (hlen > 0 && 2075 ((ipfw_insn_ip *)cmd)->addr.s_addr == 2076 dst_ip.s_addr); 2077 break; 2078 2079 case O_IP_DST_MASK: 2080 match = (hlen > 0) && 2081 (((ipfw_insn_ip *)cmd)->addr.s_addr == 2082 (dst_ip.s_addr & 2083 ((ipfw_insn_ip *)cmd)->mask.s_addr)); 2084 break; 2085 2086 case O_IP_DST_ME: 2087 if (hlen > 0) { 2088 struct ifnet *tif; 2089 2090 tif = INADDR_TO_IFP(&dst_ip); 2091 match = (tif != NULL); 2092 } 2093 break; 2094 2095 case O_IP_SRCPORT: 2096 case O_IP_DSTPORT: 2097 /* 2098 * offset == 0 && proto != 0 is enough 2099 * to guarantee that we have an IPv4 2100 * packet with port info. 2101 */ 2102 if ((proto==IPPROTO_UDP || proto==IPPROTO_TCP) 2103 && offset == 0) { 2104 uint16_t x = 2105 (cmd->opcode == O_IP_SRCPORT) ? 2106 src_port : dst_port ; 2107 uint16_t *p = 2108 ((ipfw_insn_u16 *)cmd)->ports; 2109 int i; 2110 2111 for (i = cmdlen - 1; !match && i > 0; 2112 i--, p += 2) { 2113 match = 2114 (x >= p[0] && x <= p[1]); 2115 } 2116 } 2117 break; 2118 2119 case O_ICMPTYPE: 2120 match = (offset == 0 && proto==IPPROTO_ICMP && 2121 icmptype_match(ip, (ipfw_insn_u32 *)cmd)); 2122 break; 2123 2124 case O_IPOPT: 2125 match = (hlen > 0 && ipopts_match(ip, cmd)); 2126 break; 2127 2128 case O_IPVER: 2129 match = (hlen > 0 && cmd->arg1 == ip->ip_v); 2130 break; 2131 2132 case O_IPTTL: 2133 match = (hlen > 0 && cmd->arg1 == ip->ip_ttl); 2134 break; 2135 2136 case O_IPID: 2137 match = (hlen > 0 && 2138 cmd->arg1 == ntohs(ip->ip_id)); 2139 break; 2140 2141 case O_IPLEN: 2142 match = (hlen > 0 && cmd->arg1 == ip_len); 2143 break; 2144 2145 case O_IPPRECEDENCE: 2146 match = (hlen > 0 && 2147 (cmd->arg1 == (ip->ip_tos & 0xe0))); 2148 break; 2149 2150 case O_IPTOS: 2151 match = (hlen > 0 && 2152 flags_match(cmd, ip->ip_tos)); 2153 break; 2154 2155 case O_TCPFLAGS: 2156 match = (proto == IPPROTO_TCP && offset == 0 && 2157 flags_match(cmd, 2158 L3HDR(struct tcphdr,ip)->th_flags)); 2159 break; 2160 2161 case O_TCPOPTS: 2162 match = (proto == IPPROTO_TCP && offset == 0 && 2163 tcpopts_match(ip, cmd)); 2164 break; 2165 2166 case O_TCPSEQ: 2167 match = (proto == IPPROTO_TCP && offset == 0 && 2168 ((ipfw_insn_u32 *)cmd)->d[0] == 2169 L3HDR(struct tcphdr,ip)->th_seq); 2170 break; 2171 2172 case O_TCPACK: 2173 match = (proto == IPPROTO_TCP && offset == 0 && 2174 ((ipfw_insn_u32 *)cmd)->d[0] == 2175 L3HDR(struct tcphdr,ip)->th_ack); 2176 break; 2177 2178 case O_TCPWIN: 2179 match = (proto == IPPROTO_TCP && offset == 0 && 2180 cmd->arg1 == 2181 L3HDR(struct tcphdr,ip)->th_win); 2182 break; 2183 2184 case O_ESTAB: 2185 /* reject packets which have SYN only */ 2186 /* XXX should i also check for TH_ACK ? */ 2187 match = (proto == IPPROTO_TCP && offset == 0 && 2188 (L3HDR(struct tcphdr,ip)->th_flags & 2189 (TH_RST | TH_ACK | TH_SYN)) != TH_SYN); 2190 break; 2191 2192 case O_LOG: 2193 if (fw_verbose) 2194 ipfw_log(f, hlen, args->eh, m, oif); 2195 match = 1; 2196 break; 2197 2198 case O_PROB: 2199 match = (krandom() < 2200 ((ipfw_insn_u32 *)cmd)->d[0]); 2201 break; 2202 2203 /* 2204 * The second set of opcodes represents 'actions', 2205 * i.e. the terminal part of a rule once the packet 2206 * matches all previous patterns. 2207 * Typically there is only one action for each rule, 2208 * and the opcode is stored at the end of the rule 2209 * (but there are exceptions -- see below). 2210 * 2211 * In general, here we set retval and terminate the 2212 * outer loop (would be a 'break 3' in some language, 2213 * but we need to do a 'goto done'). 2214 * 2215 * Exceptions: 2216 * O_COUNT and O_SKIPTO actions: 2217 * instead of terminating, we jump to the next rule 2218 * ('goto next_rule', equivalent to a 'break 2'), 2219 * or to the SKIPTO target ('goto again' after 2220 * having set f, cmd and l), respectively. 2221 * 2222 * O_LIMIT and O_KEEP_STATE: these opcodes are 2223 * not real 'actions', and are stored right 2224 * before the 'action' part of the rule. 2225 * These opcodes try to install an entry in the 2226 * state tables; if successful, we continue with 2227 * the next opcode (match=1; break;), otherwise 2228 * the packet must be dropped ('goto done' after 2229 * setting retval). If static rules are changed 2230 * during the state installation, the packet will 2231 * be dropped and rule's stats will not beupdated 2232 * ('return IP_FW_DENY'). 2233 * 2234 * O_PROBE_STATE and O_CHECK_STATE: these opcodes 2235 * cause a lookup of the state table, and a jump 2236 * to the 'action' part of the parent rule 2237 * ('goto check_body') if an entry is found, or 2238 * (CHECK_STATE only) a jump to the next rule if 2239 * the entry is not found ('goto next_rule'). 2240 * The result of the lookup is cached to make 2241 * further instances of these opcodes are 2242 * effectively NOPs. If static rules are changed 2243 * during the state looking up, the packet will 2244 * be dropped and rule's stats will not be updated 2245 * ('return IP_FW_DENY'). 2246 */ 2247 case O_LIMIT: 2248 case O_KEEP_STATE: 2249 if (!(f->rule_flags & IPFW_RULE_F_STATE)) { 2250 kprintf("%s rule (%d) is not ready " 2251 "on cpu%d\n", 2252 cmd->opcode == O_LIMIT ? 2253 "limit" : "keep state", 2254 f->rulenum, f->cpuid); 2255 goto next_rule; 2256 } 2257 if (install_state(f, 2258 (ipfw_insn_limit *)cmd, args, &deny)) { 2259 if (deny) 2260 return IP_FW_DENY; 2261 2262 retval = IP_FW_DENY; 2263 goto done; /* error/limit violation */ 2264 } 2265 if (deny) 2266 return IP_FW_DENY; 2267 match = 1; 2268 break; 2269 2270 case O_PROBE_STATE: 2271 case O_CHECK_STATE: 2272 /* 2273 * dynamic rules are checked at the first 2274 * keep-state or check-state occurrence, 2275 * with the result being stored in dyn_dir. 2276 * The compiler introduces a PROBE_STATE 2277 * instruction for us when we have a 2278 * KEEP_STATE (because PROBE_STATE needs 2279 * to be run first). 2280 */ 2281 if (dyn_dir == MATCH_UNKNOWN) { 2282 dyn_f = lookup_rule(&args->f_id, 2283 &dyn_dir, 2284 proto == IPPROTO_TCP ? 2285 L3HDR(struct tcphdr, ip) : NULL, 2286 ip_len, &deny); 2287 if (deny) 2288 return IP_FW_DENY; 2289 if (dyn_f != NULL) { 2290 /* 2291 * Found a rule from a dynamic 2292 * entry; jump to the 'action' 2293 * part of the rule. 2294 */ 2295 f = dyn_f; 2296 cmd = ACTION_PTR(f); 2297 l = f->cmd_len - f->act_ofs; 2298 goto check_body; 2299 } 2300 } 2301 /* 2302 * Dynamic entry not found. If CHECK_STATE, 2303 * skip to next rule, if PROBE_STATE just 2304 * ignore and continue with next opcode. 2305 */ 2306 if (cmd->opcode == O_CHECK_STATE) 2307 goto next_rule; 2308 else if (!(f->rule_flags & IPFW_RULE_F_STATE)) 2309 goto next_rule; /* not ready yet */ 2310 match = 1; 2311 break; 2312 2313 case O_ACCEPT: 2314 retval = IP_FW_PASS; /* accept */ 2315 goto done; 2316 2317 case O_PIPE: 2318 case O_QUEUE: 2319 args->rule = f; /* report matching rule */ 2320 args->cookie = cmd->arg1; 2321 retval = IP_FW_DUMMYNET; 2322 goto done; 2323 2324 case O_DIVERT: 2325 case O_TEE: 2326 if (args->eh) /* not on layer 2 */ 2327 break; 2328 2329 mtag = m_tag_get(PACKET_TAG_IPFW_DIVERT, 2330 sizeof(*divinfo), MB_DONTWAIT); 2331 if (mtag == NULL) { 2332 retval = IP_FW_DENY; 2333 goto done; 2334 } 2335 divinfo = m_tag_data(mtag); 2336 2337 divinfo->skipto = f->rulenum; 2338 divinfo->port = cmd->arg1; 2339 divinfo->tee = (cmd->opcode == O_TEE); 2340 m_tag_prepend(m, mtag); 2341 2342 args->cookie = cmd->arg1; 2343 retval = (cmd->opcode == O_DIVERT) ? 2344 IP_FW_DIVERT : IP_FW_TEE; 2345 goto done; 2346 2347 case O_COUNT: 2348 case O_SKIPTO: 2349 f->pcnt++; /* update stats */ 2350 f->bcnt += ip_len; 2351 f->timestamp = time_second; 2352 if (cmd->opcode == O_COUNT) 2353 goto next_rule; 2354 /* handle skipto */ 2355 if (f->next_rule == NULL) 2356 lookup_next_rule(f); 2357 f = f->next_rule; 2358 goto again; 2359 2360 case O_REJECT: 2361 /* 2362 * Drop the packet and send a reject notice 2363 * if the packet is not ICMP (or is an ICMP 2364 * query), and it is not multicast/broadcast. 2365 */ 2366 if (hlen > 0 && 2367 (proto != IPPROTO_ICMP || 2368 is_icmp_query(ip)) && 2369 !(m->m_flags & (M_BCAST|M_MCAST)) && 2370 !IN_MULTICAST(ntohl(dst_ip.s_addr))) { 2371 /* 2372 * Update statistics before the possible 2373 * blocking 'send_reject' 2374 */ 2375 f->pcnt++; 2376 f->bcnt += ip_len; 2377 f->timestamp = time_second; 2378 2379 send_reject(args, cmd->arg1, 2380 offset,ip_len); 2381 m = args->m; 2382 2383 /* 2384 * Return directly here, rule stats 2385 * have been updated above. 2386 */ 2387 return IP_FW_DENY; 2388 } 2389 /* FALLTHROUGH */ 2390 case O_DENY: 2391 retval = IP_FW_DENY; 2392 goto done; 2393 2394 case O_FORWARD_IP: 2395 if (args->eh) /* not valid on layer2 pkts */ 2396 break; 2397 if (!dyn_f || dyn_dir == MATCH_FORWARD) { 2398 struct sockaddr_in *sin; 2399 2400 mtag = m_tag_get(PACKET_TAG_IPFORWARD, 2401 sizeof(*sin), MB_DONTWAIT); 2402 if (mtag == NULL) { 2403 retval = IP_FW_DENY; 2404 goto done; 2405 } 2406 sin = m_tag_data(mtag); 2407 2408 /* Structure copy */ 2409 *sin = ((ipfw_insn_sa *)cmd)->sa; 2410 2411 m_tag_prepend(m, mtag); 2412 m->m_pkthdr.fw_flags |= 2413 IPFORWARD_MBUF_TAGGED; 2414 } 2415 retval = IP_FW_PASS; 2416 goto done; 2417 2418 default: 2419 panic("-- unknown opcode %d\n", cmd->opcode); 2420 } /* end of switch() on opcodes */ 2421 2422 if (cmd->len & F_NOT) 2423 match = !match; 2424 2425 if (match) { 2426 if (cmd->len & F_OR) 2427 skip_or = 1; 2428 } else { 2429 if (!(cmd->len & F_OR)) /* not an OR block, */ 2430 break; /* try next rule */ 2431 } 2432 2433 } /* end of inner for, scan opcodes */ 2434 2435 next_rule:; /* try next rule */ 2436 2437 } /* end of outer for, scan rules */ 2438 kprintf("+++ ipfw: ouch!, skip past end of rules, denying packet\n"); 2439 return IP_FW_DENY; 2440 2441 done: 2442 /* Update statistics */ 2443 f->pcnt++; 2444 f->bcnt += ip_len; 2445 f->timestamp = time_second; 2446 return retval; 2447 2448 pullup_failed: 2449 if (fw_verbose) 2450 kprintf("pullup failed\n"); 2451 return IP_FW_DENY; 2452 } 2453 2454 static void 2455 ipfw_dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa) 2456 { 2457 struct m_tag *mtag; 2458 struct dn_pkt *pkt; 2459 ipfw_insn *cmd; 2460 const struct ipfw_flow_id *id; 2461 struct dn_flow_id *fid; 2462 2463 M_ASSERTPKTHDR(m); 2464 2465 mtag = m_tag_get(PACKET_TAG_DUMMYNET, sizeof(*pkt), MB_DONTWAIT); 2466 if (mtag == NULL) { 2467 m_freem(m); 2468 return; 2469 } 2470 m_tag_prepend(m, mtag); 2471 2472 pkt = m_tag_data(mtag); 2473 bzero(pkt, sizeof(*pkt)); 2474 2475 cmd = fwa->rule->cmd + fwa->rule->act_ofs; 2476 if (cmd->opcode == O_LOG) 2477 cmd += F_LEN(cmd); 2478 KASSERT(cmd->opcode == O_PIPE || cmd->opcode == O_QUEUE, 2479 ("Rule is not PIPE or QUEUE, opcode %d\n", cmd->opcode)); 2480 2481 pkt->dn_m = m; 2482 pkt->dn_flags = (dir & DN_FLAGS_DIR_MASK); 2483 pkt->ifp = fwa->oif; 2484 pkt->pipe_nr = pipe_nr; 2485 2486 pkt->cpuid = mycpuid; 2487 pkt->msgport = curnetport; 2488 2489 id = &fwa->f_id; 2490 fid = &pkt->id; 2491 fid->fid_dst_ip = id->dst_ip; 2492 fid->fid_src_ip = id->src_ip; 2493 fid->fid_dst_port = id->dst_port; 2494 fid->fid_src_port = id->src_port; 2495 fid->fid_proto = id->proto; 2496 fid->fid_flags = id->flags; 2497 2498 ipfw_ref_rule(fwa->rule); 2499 pkt->dn_priv = fwa->rule; 2500 pkt->dn_unref_priv = ipfw_unref_rule; 2501 2502 if (cmd->opcode == O_PIPE) 2503 pkt->dn_flags |= DN_FLAGS_IS_PIPE; 2504 2505 m->m_pkthdr.fw_flags |= DUMMYNET_MBUF_TAGGED; 2506 } 2507 2508 /* 2509 * When a rule is added/deleted, clear the next_rule pointers in all rules. 2510 * These will be reconstructed on the fly as packets are matched. 2511 * Must be called at splimp(). 2512 */ 2513 static void 2514 ipfw_flush_rule_ptrs(struct ipfw_context *ctx) 2515 { 2516 struct ip_fw *rule; 2517 2518 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) 2519 rule->next_rule = NULL; 2520 } 2521 2522 static __inline void 2523 ipfw_inc_static_count(struct ip_fw *rule) 2524 { 2525 /* Static rule's counts are updated only on CPU0 */ 2526 KKASSERT(mycpuid == 0); 2527 2528 static_count++; 2529 static_ioc_len += IOC_RULESIZE(rule); 2530 } 2531 2532 static __inline void 2533 ipfw_dec_static_count(struct ip_fw *rule) 2534 { 2535 int l = IOC_RULESIZE(rule); 2536 2537 /* Static rule's counts are updated only on CPU0 */ 2538 KKASSERT(mycpuid == 0); 2539 2540 KASSERT(static_count > 0, ("invalid static count %u\n", static_count)); 2541 static_count--; 2542 2543 KASSERT(static_ioc_len >= l, 2544 ("invalid static len %u\n", static_ioc_len)); 2545 static_ioc_len -= l; 2546 } 2547 2548 static void 2549 ipfw_link_sibling(struct netmsg_ipfw *fwmsg, struct ip_fw *rule) 2550 { 2551 if (fwmsg->sibling != NULL) { 2552 KKASSERT(mycpuid > 0 && fwmsg->sibling->cpuid == mycpuid - 1); 2553 fwmsg->sibling->sibling = rule; 2554 } 2555 fwmsg->sibling = rule; 2556 } 2557 2558 static struct ip_fw * 2559 ipfw_create_rule(const struct ipfw_ioc_rule *ioc_rule, struct ip_fw_stub *stub) 2560 { 2561 struct ip_fw *rule; 2562 2563 rule = kmalloc(RULESIZE(ioc_rule), M_IPFW, M_WAITOK | M_ZERO); 2564 2565 rule->act_ofs = ioc_rule->act_ofs; 2566 rule->cmd_len = ioc_rule->cmd_len; 2567 rule->rulenum = ioc_rule->rulenum; 2568 rule->set = ioc_rule->set; 2569 rule->usr_flags = ioc_rule->usr_flags; 2570 2571 bcopy(ioc_rule->cmd, rule->cmd, rule->cmd_len * 4 /* XXX */); 2572 2573 rule->refcnt = 1; 2574 rule->cpuid = mycpuid; 2575 2576 rule->stub = stub; 2577 if (stub != NULL) 2578 stub->rule[mycpuid] = rule; 2579 2580 return rule; 2581 } 2582 2583 static void 2584 ipfw_add_rule_dispatch(struct netmsg *nmsg) 2585 { 2586 struct netmsg_ipfw *fwmsg = (struct netmsg_ipfw *)nmsg; 2587 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 2588 struct ip_fw *rule; 2589 2590 rule = ipfw_create_rule(fwmsg->ioc_rule, fwmsg->stub); 2591 2592 /* 2593 * Bump generation after ipfw_create_rule(), 2594 * since this function is blocking 2595 */ 2596 ctx->ipfw_gen++; 2597 2598 /* 2599 * Insert rule into the pre-determined position 2600 */ 2601 if (fwmsg->prev_rule != NULL) { 2602 struct ip_fw *prev, *next; 2603 2604 prev = fwmsg->prev_rule; 2605 KKASSERT(prev->cpuid == mycpuid); 2606 2607 next = fwmsg->next_rule; 2608 KKASSERT(next->cpuid == mycpuid); 2609 2610 rule->next = next; 2611 prev->next = rule; 2612 2613 /* 2614 * Move to the position on the next CPU 2615 * before the msg is forwarded. 2616 */ 2617 fwmsg->prev_rule = prev->sibling; 2618 fwmsg->next_rule = next->sibling; 2619 } else { 2620 KKASSERT(fwmsg->next_rule == NULL); 2621 rule->next = ctx->ipfw_layer3_chain; 2622 ctx->ipfw_layer3_chain = rule; 2623 } 2624 2625 /* Link rule CPU sibling */ 2626 ipfw_link_sibling(fwmsg, rule); 2627 2628 ipfw_flush_rule_ptrs(ctx); 2629 2630 if (mycpuid == 0) { 2631 /* Statistics only need to be updated once */ 2632 ipfw_inc_static_count(rule); 2633 2634 /* Return the rule on CPU0 */ 2635 nmsg->nm_lmsg.u.ms_resultp = rule; 2636 } 2637 2638 ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1); 2639 } 2640 2641 static void 2642 ipfw_enable_state_dispatch(struct netmsg *nmsg) 2643 { 2644 struct lwkt_msg *lmsg = &nmsg->nm_lmsg; 2645 struct ip_fw *rule = lmsg->u.ms_resultp; 2646 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 2647 2648 ctx->ipfw_gen++; 2649 2650 KKASSERT(rule->cpuid == mycpuid); 2651 KKASSERT(rule->stub != NULL && rule->stub->rule[mycpuid] == rule); 2652 KKASSERT(!(rule->rule_flags & IPFW_RULE_F_STATE)); 2653 rule->rule_flags |= IPFW_RULE_F_STATE; 2654 lmsg->u.ms_resultp = rule->sibling; 2655 2656 ifnet_forwardmsg(lmsg, mycpuid + 1); 2657 } 2658 2659 /* 2660 * Add a new rule to the list. Copy the rule into a malloc'ed area, 2661 * then possibly create a rule number and add the rule to the list. 2662 * Update the rule_number in the input struct so the caller knows 2663 * it as well. 2664 */ 2665 static void 2666 ipfw_add_rule(struct ipfw_ioc_rule *ioc_rule, uint32_t rule_flags) 2667 { 2668 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 2669 struct netmsg_ipfw fwmsg; 2670 struct netmsg *nmsg; 2671 struct ip_fw *f, *prev, *rule; 2672 struct ip_fw_stub *stub; 2673 2674 IPFW_ASSERT_CFGPORT(&curthread->td_msgport); 2675 2676 /* 2677 * If rulenum is 0, find highest numbered rule before the 2678 * default rule, and add rule number incremental step. 2679 */ 2680 if (ioc_rule->rulenum == 0) { 2681 int step = autoinc_step; 2682 2683 KKASSERT(step >= IPFW_AUTOINC_STEP_MIN && 2684 step <= IPFW_AUTOINC_STEP_MAX); 2685 2686 /* 2687 * Locate the highest numbered rule before default 2688 */ 2689 for (f = ctx->ipfw_layer3_chain; f; f = f->next) { 2690 if (f->rulenum == IPFW_DEFAULT_RULE) 2691 break; 2692 ioc_rule->rulenum = f->rulenum; 2693 } 2694 if (ioc_rule->rulenum < IPFW_DEFAULT_RULE - step) 2695 ioc_rule->rulenum += step; 2696 } 2697 KASSERT(ioc_rule->rulenum != IPFW_DEFAULT_RULE && 2698 ioc_rule->rulenum != 0, 2699 ("invalid rule num %d\n", ioc_rule->rulenum)); 2700 2701 /* 2702 * Now find the right place for the new rule in the sorted list. 2703 */ 2704 for (prev = NULL, f = ctx->ipfw_layer3_chain; f; 2705 prev = f, f = f->next) { 2706 if (f->rulenum > ioc_rule->rulenum) { 2707 /* Found the location */ 2708 break; 2709 } 2710 } 2711 KASSERT(f != NULL, ("no default rule?!\n")); 2712 2713 if (rule_flags & IPFW_RULE_F_STATE) { 2714 int size; 2715 2716 /* 2717 * If the new rule will create states, then allocate 2718 * a rule stub, which will be referenced by states 2719 * (dyn rules) 2720 */ 2721 size = sizeof(*stub) + ((ncpus - 1) * sizeof(struct ip_fw *)); 2722 stub = kmalloc(size, M_IPFW, M_WAITOK | M_ZERO); 2723 } else { 2724 stub = NULL; 2725 } 2726 2727 /* 2728 * Duplicate the rule onto each CPU. 2729 * The rule duplicated on CPU0 will be returned. 2730 */ 2731 bzero(&fwmsg, sizeof(fwmsg)); 2732 nmsg = &fwmsg.nmsg; 2733 netmsg_init(nmsg, NULL, &curthread->td_msgport, 2734 0, ipfw_add_rule_dispatch); 2735 fwmsg.ioc_rule = ioc_rule; 2736 fwmsg.prev_rule = prev; 2737 fwmsg.next_rule = prev == NULL ? NULL : f; 2738 fwmsg.stub = stub; 2739 2740 ifnet_domsg(&nmsg->nm_lmsg, 0); 2741 KKASSERT(fwmsg.prev_rule == NULL && fwmsg.next_rule == NULL); 2742 2743 rule = nmsg->nm_lmsg.u.ms_resultp; 2744 KKASSERT(rule != NULL && rule->cpuid == mycpuid); 2745 2746 if (rule_flags & IPFW_RULE_F_STATE) { 2747 /* 2748 * Turn on state flag, _after_ everything on all 2749 * CPUs have been setup. 2750 */ 2751 bzero(nmsg, sizeof(*nmsg)); 2752 netmsg_init(nmsg, NULL, &curthread->td_msgport, 2753 0, ipfw_enable_state_dispatch); 2754 nmsg->nm_lmsg.u.ms_resultp = rule; 2755 2756 ifnet_domsg(&nmsg->nm_lmsg, 0); 2757 KKASSERT(nmsg->nm_lmsg.u.ms_resultp == NULL); 2758 } 2759 2760 DPRINTF("++ installed rule %d, static count now %d\n", 2761 rule->rulenum, static_count); 2762 } 2763 2764 /** 2765 * Free storage associated with a static rule (including derived 2766 * dynamic rules). 2767 * The caller is in charge of clearing rule pointers to avoid 2768 * dangling pointers. 2769 * @return a pointer to the next entry. 2770 * Arguments are not checked, so they better be correct. 2771 * Must be called at splimp(). 2772 */ 2773 static struct ip_fw * 2774 ipfw_delete_rule(struct ipfw_context *ctx, 2775 struct ip_fw *prev, struct ip_fw *rule) 2776 { 2777 struct ip_fw *n; 2778 struct ip_fw_stub *stub; 2779 2780 ctx->ipfw_gen++; 2781 2782 /* STATE flag should have been cleared before we reach here */ 2783 KKASSERT((rule->rule_flags & IPFW_RULE_F_STATE) == 0); 2784 2785 stub = rule->stub; 2786 n = rule->next; 2787 if (prev == NULL) 2788 ctx->ipfw_layer3_chain = n; 2789 else 2790 prev->next = n; 2791 2792 /* Mark the rule as invalid */ 2793 rule->rule_flags |= IPFW_RULE_F_INVALID; 2794 rule->next_rule = NULL; 2795 rule->sibling = NULL; 2796 rule->stub = NULL; 2797 #ifdef foo 2798 /* Don't reset cpuid here; keep various assertion working */ 2799 rule->cpuid = -1; 2800 #endif 2801 2802 /* Statistics only need to be updated once */ 2803 if (mycpuid == 0) 2804 ipfw_dec_static_count(rule); 2805 2806 /* Free 'stub' on the last CPU */ 2807 if (stub != NULL && mycpuid == ncpus - 1) 2808 kfree(stub, M_IPFW); 2809 2810 /* Try to free this rule */ 2811 ipfw_free_rule(rule); 2812 2813 /* Return the next rule */ 2814 return n; 2815 } 2816 2817 static void 2818 ipfw_flush_dispatch(struct netmsg *nmsg) 2819 { 2820 struct lwkt_msg *lmsg = &nmsg->nm_lmsg; 2821 int kill_default = lmsg->u.ms_result; 2822 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 2823 struct ip_fw *rule; 2824 2825 ipfw_flush_rule_ptrs(ctx); /* more efficient to do outside the loop */ 2826 2827 while ((rule = ctx->ipfw_layer3_chain) != NULL && 2828 (kill_default || rule->rulenum != IPFW_DEFAULT_RULE)) 2829 ipfw_delete_rule(ctx, NULL, rule); 2830 2831 ifnet_forwardmsg(lmsg, mycpuid + 1); 2832 } 2833 2834 static void 2835 ipfw_disable_rule_state_dispatch(struct netmsg *nmsg) 2836 { 2837 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg; 2838 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 2839 struct ip_fw *rule; 2840 2841 ctx->ipfw_gen++; 2842 2843 rule = dmsg->start_rule; 2844 if (rule != NULL) { 2845 KKASSERT(rule->cpuid == mycpuid); 2846 2847 /* 2848 * Move to the position on the next CPU 2849 * before the msg is forwarded. 2850 */ 2851 dmsg->start_rule = rule->sibling; 2852 } else { 2853 KKASSERT(dmsg->rulenum == 0); 2854 rule = ctx->ipfw_layer3_chain; 2855 } 2856 2857 while (rule != NULL) { 2858 if (dmsg->rulenum && rule->rulenum != dmsg->rulenum) 2859 break; 2860 rule->rule_flags &= ~IPFW_RULE_F_STATE; 2861 rule = rule->next; 2862 } 2863 2864 ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1); 2865 } 2866 2867 /* 2868 * Deletes all rules from a chain (including the default rule 2869 * if the second argument is set). 2870 * Must be called at splimp(). 2871 */ 2872 static void 2873 ipfw_flush(int kill_default) 2874 { 2875 struct netmsg_del dmsg; 2876 struct netmsg nmsg; 2877 struct lwkt_msg *lmsg; 2878 struct ip_fw *rule; 2879 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 2880 2881 IPFW_ASSERT_CFGPORT(&curthread->td_msgport); 2882 2883 /* 2884 * If 'kill_default' then caller has done the necessary 2885 * msgport syncing; unnecessary to do it again. 2886 */ 2887 if (!kill_default) { 2888 /* 2889 * Let ipfw_chk() know the rules are going to 2890 * be flushed, so it could jump directly to 2891 * the default rule. 2892 */ 2893 ipfw_flushing = 1; 2894 netmsg_service_sync(); 2895 } 2896 2897 /* 2898 * Clear STATE flag on rules, so no more states (dyn rules) 2899 * will be created. 2900 */ 2901 bzero(&dmsg, sizeof(dmsg)); 2902 netmsg_init(&dmsg.nmsg, NULL, &curthread->td_msgport, 2903 0, ipfw_disable_rule_state_dispatch); 2904 ifnet_domsg(&dmsg.nmsg.nm_lmsg, 0); 2905 2906 /* 2907 * This actually nukes all states (dyn rules) 2908 */ 2909 lockmgr(&dyn_lock, LK_EXCLUSIVE); 2910 for (rule = ctx->ipfw_layer3_chain; rule != NULL; rule = rule->next) { 2911 /* 2912 * Can't check IPFW_RULE_F_STATE here, 2913 * since it has been cleared previously. 2914 * Check 'stub' instead. 2915 */ 2916 if (rule->stub != NULL) { 2917 /* Force removal */ 2918 remove_dyn_rule_locked(rule, NULL); 2919 } 2920 } 2921 lockmgr(&dyn_lock, LK_RELEASE); 2922 2923 /* 2924 * Press the 'flush' button 2925 */ 2926 bzero(&nmsg, sizeof(nmsg)); 2927 netmsg_init(&nmsg, NULL, &curthread->td_msgport, 2928 0, ipfw_flush_dispatch); 2929 lmsg = &nmsg.nm_lmsg; 2930 lmsg->u.ms_result = kill_default; 2931 ifnet_domsg(lmsg, 0); 2932 2933 KASSERT(dyn_count == 0, ("%u dyn rule remains\n", dyn_count)); 2934 2935 if (kill_default) { 2936 if (ipfw_dyn_v != NULL) { 2937 /* 2938 * Free dynamic rules(state) hash table 2939 */ 2940 kfree(ipfw_dyn_v, M_IPFW); 2941 ipfw_dyn_v = NULL; 2942 } 2943 2944 KASSERT(static_count == 0, 2945 ("%u static rules remains\n", static_count)); 2946 KASSERT(static_ioc_len == 0, 2947 ("%u bytes of static rules remains\n", static_ioc_len)); 2948 } else { 2949 KASSERT(static_count == 1, 2950 ("%u static rules remains\n", static_count)); 2951 KASSERT(static_ioc_len == IOC_RULESIZE(ctx->ipfw_default_rule), 2952 ("%u bytes of static rules remains, should be %lu\n", 2953 static_ioc_len, 2954 (u_long)IOC_RULESIZE(ctx->ipfw_default_rule))); 2955 } 2956 2957 /* Flush is done */ 2958 ipfw_flushing = 0; 2959 } 2960 2961 static void 2962 ipfw_alt_delete_rule_dispatch(struct netmsg *nmsg) 2963 { 2964 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg; 2965 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 2966 struct ip_fw *rule, *prev; 2967 2968 rule = dmsg->start_rule; 2969 KKASSERT(rule->cpuid == mycpuid); 2970 dmsg->start_rule = rule->sibling; 2971 2972 prev = dmsg->prev_rule; 2973 if (prev != NULL) { 2974 KKASSERT(prev->cpuid == mycpuid); 2975 2976 /* 2977 * Move to the position on the next CPU 2978 * before the msg is forwarded. 2979 */ 2980 dmsg->prev_rule = prev->sibling; 2981 } 2982 2983 /* 2984 * flush pointers outside the loop, then delete all matching 2985 * rules. 'prev' remains the same throughout the cycle. 2986 */ 2987 ipfw_flush_rule_ptrs(ctx); 2988 while (rule && rule->rulenum == dmsg->rulenum) 2989 rule = ipfw_delete_rule(ctx, prev, rule); 2990 2991 ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1); 2992 } 2993 2994 static int 2995 ipfw_alt_delete_rule(uint16_t rulenum) 2996 { 2997 struct ip_fw *prev, *rule, *f; 2998 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 2999 struct netmsg_del dmsg; 3000 struct netmsg *nmsg; 3001 int state; 3002 3003 /* 3004 * Locate first rule to delete 3005 */ 3006 for (prev = NULL, rule = ctx->ipfw_layer3_chain; 3007 rule && rule->rulenum < rulenum; 3008 prev = rule, rule = rule->next) 3009 ; /* EMPTY */ 3010 if (rule->rulenum != rulenum) 3011 return EINVAL; 3012 3013 /* 3014 * Check whether any rules with the given number will 3015 * create states. 3016 */ 3017 state = 0; 3018 for (f = rule; f && f->rulenum == rulenum; f = f->next) { 3019 if (f->rule_flags & IPFW_RULE_F_STATE) { 3020 state = 1; 3021 break; 3022 } 3023 } 3024 3025 if (state) { 3026 /* 3027 * Clear the STATE flag, so no more states will be 3028 * created based the rules numbered 'rulenum'. 3029 */ 3030 bzero(&dmsg, sizeof(dmsg)); 3031 nmsg = &dmsg.nmsg; 3032 netmsg_init(nmsg, NULL, &curthread->td_msgport, 3033 0, ipfw_disable_rule_state_dispatch); 3034 dmsg.start_rule = rule; 3035 dmsg.rulenum = rulenum; 3036 3037 ifnet_domsg(&nmsg->nm_lmsg, 0); 3038 KKASSERT(dmsg.start_rule == NULL); 3039 3040 /* 3041 * Nuke all related states 3042 */ 3043 lockmgr(&dyn_lock, LK_EXCLUSIVE); 3044 for (f = rule; f && f->rulenum == rulenum; f = f->next) { 3045 /* 3046 * Can't check IPFW_RULE_F_STATE here, 3047 * since it has been cleared previously. 3048 * Check 'stub' instead. 3049 */ 3050 if (f->stub != NULL) { 3051 /* Force removal */ 3052 remove_dyn_rule_locked(f, NULL); 3053 } 3054 } 3055 lockmgr(&dyn_lock, LK_RELEASE); 3056 } 3057 3058 /* 3059 * Get rid of the rule duplications on all CPUs 3060 */ 3061 bzero(&dmsg, sizeof(dmsg)); 3062 nmsg = &dmsg.nmsg; 3063 netmsg_init(nmsg, NULL, &curthread->td_msgport, 3064 0, ipfw_alt_delete_rule_dispatch); 3065 dmsg.prev_rule = prev; 3066 dmsg.start_rule = rule; 3067 dmsg.rulenum = rulenum; 3068 3069 ifnet_domsg(&nmsg->nm_lmsg, 0); 3070 KKASSERT(dmsg.prev_rule == NULL && dmsg.start_rule == NULL); 3071 return 0; 3072 } 3073 3074 static void 3075 ipfw_alt_delete_ruleset_dispatch(struct netmsg *nmsg) 3076 { 3077 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg; 3078 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 3079 struct ip_fw *prev, *rule; 3080 #ifdef INVARIANTS 3081 int del = 0; 3082 #endif 3083 3084 ipfw_flush_rule_ptrs(ctx); 3085 3086 prev = NULL; 3087 rule = ctx->ipfw_layer3_chain; 3088 while (rule != NULL) { 3089 if (rule->set == dmsg->from_set) { 3090 rule = ipfw_delete_rule(ctx, prev, rule); 3091 #ifdef INVARIANTS 3092 del = 1; 3093 #endif 3094 } else { 3095 prev = rule; 3096 rule = rule->next; 3097 } 3098 } 3099 KASSERT(del, ("no match set?!\n")); 3100 3101 ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1); 3102 } 3103 3104 static void 3105 ipfw_disable_ruleset_state_dispatch(struct netmsg *nmsg) 3106 { 3107 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg; 3108 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 3109 struct ip_fw *rule; 3110 #ifdef INVARIANTS 3111 int cleared = 0; 3112 #endif 3113 3114 ctx->ipfw_gen++; 3115 3116 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) { 3117 if (rule->set == dmsg->from_set) { 3118 #ifdef INVARIANTS 3119 cleared = 1; 3120 #endif 3121 rule->rule_flags &= ~IPFW_RULE_F_STATE; 3122 } 3123 } 3124 KASSERT(cleared, ("no match set?!\n")); 3125 3126 ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1); 3127 } 3128 3129 static int 3130 ipfw_alt_delete_ruleset(uint8_t set) 3131 { 3132 struct netmsg_del dmsg; 3133 struct netmsg *nmsg; 3134 int state, del; 3135 struct ip_fw *rule; 3136 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 3137 3138 /* 3139 * Check whether the 'set' exists. If it exists, 3140 * then check whether any rules within the set will 3141 * try to create states. 3142 */ 3143 state = 0; 3144 del = 0; 3145 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) { 3146 if (rule->set == set) { 3147 del = 1; 3148 if (rule->rule_flags & IPFW_RULE_F_STATE) { 3149 state = 1; 3150 break; 3151 } 3152 } 3153 } 3154 if (!del) 3155 return 0; /* XXX EINVAL? */ 3156 3157 if (state) { 3158 /* 3159 * Clear the STATE flag, so no more states will be 3160 * created based the rules in this set. 3161 */ 3162 bzero(&dmsg, sizeof(dmsg)); 3163 nmsg = &dmsg.nmsg; 3164 netmsg_init(nmsg, NULL, &curthread->td_msgport, 3165 0, ipfw_disable_ruleset_state_dispatch); 3166 dmsg.from_set = set; 3167 3168 ifnet_domsg(&nmsg->nm_lmsg, 0); 3169 3170 /* 3171 * Nuke all related states 3172 */ 3173 lockmgr(&dyn_lock, LK_EXCLUSIVE); 3174 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) { 3175 if (rule->set != set) 3176 continue; 3177 3178 /* 3179 * Can't check IPFW_RULE_F_STATE here, 3180 * since it has been cleared previously. 3181 * Check 'stub' instead. 3182 */ 3183 if (rule->stub != NULL) { 3184 /* Force removal */ 3185 remove_dyn_rule_locked(rule, NULL); 3186 } 3187 } 3188 lockmgr(&dyn_lock, LK_RELEASE); 3189 } 3190 3191 /* 3192 * Delete this set 3193 */ 3194 bzero(&dmsg, sizeof(dmsg)); 3195 nmsg = &dmsg.nmsg; 3196 netmsg_init(nmsg, NULL, &curthread->td_msgport, 3197 0, ipfw_alt_delete_ruleset_dispatch); 3198 dmsg.from_set = set; 3199 3200 ifnet_domsg(&nmsg->nm_lmsg, 0); 3201 return 0; 3202 } 3203 3204 static void 3205 ipfw_alt_move_rule_dispatch(struct netmsg *nmsg) 3206 { 3207 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg; 3208 struct ip_fw *rule; 3209 3210 rule = dmsg->start_rule; 3211 KKASSERT(rule->cpuid == mycpuid); 3212 3213 /* 3214 * Move to the position on the next CPU 3215 * before the msg is forwarded. 3216 */ 3217 dmsg->start_rule = rule->sibling; 3218 3219 while (rule && rule->rulenum <= dmsg->rulenum) { 3220 if (rule->rulenum == dmsg->rulenum) 3221 rule->set = dmsg->to_set; 3222 rule = rule->next; 3223 } 3224 ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1); 3225 } 3226 3227 static int 3228 ipfw_alt_move_rule(uint16_t rulenum, uint8_t set) 3229 { 3230 struct netmsg_del dmsg; 3231 struct netmsg *nmsg; 3232 struct ip_fw *rule; 3233 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 3234 3235 /* 3236 * Locate first rule to move 3237 */ 3238 for (rule = ctx->ipfw_layer3_chain; rule && rule->rulenum <= rulenum; 3239 rule = rule->next) { 3240 if (rule->rulenum == rulenum && rule->set != set) 3241 break; 3242 } 3243 if (rule == NULL || rule->rulenum > rulenum) 3244 return 0; /* XXX error? */ 3245 3246 bzero(&dmsg, sizeof(dmsg)); 3247 nmsg = &dmsg.nmsg; 3248 netmsg_init(nmsg, NULL, &curthread->td_msgport, 3249 0, ipfw_alt_move_rule_dispatch); 3250 dmsg.start_rule = rule; 3251 dmsg.rulenum = rulenum; 3252 dmsg.to_set = set; 3253 3254 ifnet_domsg(&nmsg->nm_lmsg, 0); 3255 KKASSERT(dmsg.start_rule == NULL); 3256 return 0; 3257 } 3258 3259 static void 3260 ipfw_alt_move_ruleset_dispatch(struct netmsg *nmsg) 3261 { 3262 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg; 3263 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 3264 struct ip_fw *rule; 3265 3266 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) { 3267 if (rule->set == dmsg->from_set) 3268 rule->set = dmsg->to_set; 3269 } 3270 ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1); 3271 } 3272 3273 static int 3274 ipfw_alt_move_ruleset(uint8_t from_set, uint8_t to_set) 3275 { 3276 struct netmsg_del dmsg; 3277 struct netmsg *nmsg; 3278 3279 bzero(&dmsg, sizeof(dmsg)); 3280 nmsg = &dmsg.nmsg; 3281 netmsg_init(nmsg, NULL, &curthread->td_msgport, 3282 0, ipfw_alt_move_ruleset_dispatch); 3283 dmsg.from_set = from_set; 3284 dmsg.to_set = to_set; 3285 3286 ifnet_domsg(&nmsg->nm_lmsg, 0); 3287 return 0; 3288 } 3289 3290 static void 3291 ipfw_alt_swap_ruleset_dispatch(struct netmsg *nmsg) 3292 { 3293 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg; 3294 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 3295 struct ip_fw *rule; 3296 3297 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) { 3298 if (rule->set == dmsg->from_set) 3299 rule->set = dmsg->to_set; 3300 else if (rule->set == dmsg->to_set) 3301 rule->set = dmsg->from_set; 3302 } 3303 ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1); 3304 } 3305 3306 static int 3307 ipfw_alt_swap_ruleset(uint8_t set1, uint8_t set2) 3308 { 3309 struct netmsg_del dmsg; 3310 struct netmsg *nmsg; 3311 3312 bzero(&dmsg, sizeof(dmsg)); 3313 nmsg = &dmsg.nmsg; 3314 netmsg_init(nmsg, NULL, &curthread->td_msgport, 3315 0, ipfw_alt_swap_ruleset_dispatch); 3316 dmsg.from_set = set1; 3317 dmsg.to_set = set2; 3318 3319 ifnet_domsg(&nmsg->nm_lmsg, 0); 3320 return 0; 3321 } 3322 3323 /** 3324 * Remove all rules with given number, and also do set manipulation. 3325 * 3326 * The argument is an uint32_t. The low 16 bit are the rule or set number, 3327 * the next 8 bits are the new set, the top 8 bits are the command: 3328 * 3329 * 0 delete rules with given number 3330 * 1 delete rules with given set number 3331 * 2 move rules with given number to new set 3332 * 3 move rules with given set number to new set 3333 * 4 swap sets with given numbers 3334 */ 3335 static int 3336 ipfw_ctl_alter(uint32_t arg) 3337 { 3338 uint16_t rulenum; 3339 uint8_t cmd, new_set; 3340 int error = 0; 3341 3342 rulenum = arg & 0xffff; 3343 cmd = (arg >> 24) & 0xff; 3344 new_set = (arg >> 16) & 0xff; 3345 3346 if (cmd > 4) 3347 return EINVAL; 3348 if (new_set >= IPFW_DEFAULT_SET) 3349 return EINVAL; 3350 if (cmd == 0 || cmd == 2) { 3351 if (rulenum == IPFW_DEFAULT_RULE) 3352 return EINVAL; 3353 } else { 3354 if (rulenum >= IPFW_DEFAULT_SET) 3355 return EINVAL; 3356 } 3357 3358 switch (cmd) { 3359 case 0: /* delete rules with given number */ 3360 error = ipfw_alt_delete_rule(rulenum); 3361 break; 3362 3363 case 1: /* delete all rules with given set number */ 3364 error = ipfw_alt_delete_ruleset(rulenum); 3365 break; 3366 3367 case 2: /* move rules with given number to new set */ 3368 error = ipfw_alt_move_rule(rulenum, new_set); 3369 break; 3370 3371 case 3: /* move rules with given set number to new set */ 3372 error = ipfw_alt_move_ruleset(rulenum, new_set); 3373 break; 3374 3375 case 4: /* swap two sets */ 3376 error = ipfw_alt_swap_ruleset(rulenum, new_set); 3377 break; 3378 } 3379 return error; 3380 } 3381 3382 /* 3383 * Clear counters for a specific rule. 3384 */ 3385 static void 3386 clear_counters(struct ip_fw *rule, int log_only) 3387 { 3388 ipfw_insn_log *l = (ipfw_insn_log *)ACTION_PTR(rule); 3389 3390 if (log_only == 0) { 3391 rule->bcnt = rule->pcnt = 0; 3392 rule->timestamp = 0; 3393 } 3394 if (l->o.opcode == O_LOG) 3395 l->log_left = l->max_log; 3396 } 3397 3398 static void 3399 ipfw_zero_entry_dispatch(struct netmsg *nmsg) 3400 { 3401 struct netmsg_zent *zmsg = (struct netmsg_zent *)nmsg; 3402 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 3403 struct ip_fw *rule; 3404 3405 if (zmsg->rulenum == 0) { 3406 KKASSERT(zmsg->start_rule == NULL); 3407 3408 ctx->ipfw_norule_counter = 0; 3409 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) 3410 clear_counters(rule, zmsg->log_only); 3411 } else { 3412 struct ip_fw *start = zmsg->start_rule; 3413 3414 KKASSERT(start->cpuid == mycpuid); 3415 KKASSERT(start->rulenum == zmsg->rulenum); 3416 3417 /* 3418 * We can have multiple rules with the same number, so we 3419 * need to clear them all. 3420 */ 3421 for (rule = start; rule && rule->rulenum == zmsg->rulenum; 3422 rule = rule->next) 3423 clear_counters(rule, zmsg->log_only); 3424 3425 /* 3426 * Move to the position on the next CPU 3427 * before the msg is forwarded. 3428 */ 3429 zmsg->start_rule = start->sibling; 3430 } 3431 ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1); 3432 } 3433 3434 /** 3435 * Reset some or all counters on firewall rules. 3436 * @arg frwl is null to clear all entries, or contains a specific 3437 * rule number. 3438 * @arg log_only is 1 if we only want to reset logs, zero otherwise. 3439 */ 3440 static int 3441 ipfw_ctl_zero_entry(int rulenum, int log_only) 3442 { 3443 struct netmsg_zent zmsg; 3444 struct netmsg *nmsg; 3445 const char *msg; 3446 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 3447 3448 bzero(&zmsg, sizeof(zmsg)); 3449 nmsg = &zmsg.nmsg; 3450 netmsg_init(nmsg, NULL, &curthread->td_msgport, 3451 0, ipfw_zero_entry_dispatch); 3452 zmsg.log_only = log_only; 3453 3454 if (rulenum == 0) { 3455 msg = log_only ? "ipfw: All logging counts reset.\n" 3456 : "ipfw: Accounting cleared.\n"; 3457 } else { 3458 struct ip_fw *rule; 3459 3460 /* 3461 * Locate the first rule with 'rulenum' 3462 */ 3463 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) { 3464 if (rule->rulenum == rulenum) 3465 break; 3466 } 3467 if (rule == NULL) /* we did not find any matching rules */ 3468 return (EINVAL); 3469 zmsg.start_rule = rule; 3470 zmsg.rulenum = rulenum; 3471 3472 msg = log_only ? "ipfw: Entry %d logging count reset.\n" 3473 : "ipfw: Entry %d cleared.\n"; 3474 } 3475 ifnet_domsg(&nmsg->nm_lmsg, 0); 3476 KKASSERT(zmsg.start_rule == NULL); 3477 3478 if (fw_verbose) 3479 log(LOG_SECURITY | LOG_NOTICE, msg, rulenum); 3480 return (0); 3481 } 3482 3483 /* 3484 * Check validity of the structure before insert. 3485 * Fortunately rules are simple, so this mostly need to check rule sizes. 3486 */ 3487 static int 3488 ipfw_check_ioc_rule(struct ipfw_ioc_rule *rule, int size, uint32_t *rule_flags) 3489 { 3490 int l, cmdlen = 0; 3491 int have_action = 0; 3492 ipfw_insn *cmd; 3493 3494 *rule_flags = 0; 3495 3496 /* Check for valid size */ 3497 if (size < sizeof(*rule)) { 3498 kprintf("ipfw: rule too short\n"); 3499 return EINVAL; 3500 } 3501 l = IOC_RULESIZE(rule); 3502 if (l != size) { 3503 kprintf("ipfw: size mismatch (have %d want %d)\n", size, l); 3504 return EINVAL; 3505 } 3506 3507 /* Check rule number */ 3508 if (rule->rulenum == IPFW_DEFAULT_RULE) { 3509 kprintf("ipfw: invalid rule number\n"); 3510 return EINVAL; 3511 } 3512 3513 /* 3514 * Now go for the individual checks. Very simple ones, basically only 3515 * instruction sizes. 3516 */ 3517 for (l = rule->cmd_len, cmd = rule->cmd; l > 0; 3518 l -= cmdlen, cmd += cmdlen) { 3519 cmdlen = F_LEN(cmd); 3520 if (cmdlen > l) { 3521 kprintf("ipfw: opcode %d size truncated\n", 3522 cmd->opcode); 3523 return EINVAL; 3524 } 3525 3526 DPRINTF("ipfw: opcode %d\n", cmd->opcode); 3527 3528 if (cmd->opcode == O_KEEP_STATE || cmd->opcode == O_LIMIT) { 3529 /* This rule will create states */ 3530 *rule_flags |= IPFW_RULE_F_STATE; 3531 } 3532 3533 switch (cmd->opcode) { 3534 case O_NOP: 3535 case O_PROBE_STATE: 3536 case O_KEEP_STATE: 3537 case O_PROTO: 3538 case O_IP_SRC_ME: 3539 case O_IP_DST_ME: 3540 case O_LAYER2: 3541 case O_IN: 3542 case O_FRAG: 3543 case O_IPOPT: 3544 case O_IPLEN: 3545 case O_IPID: 3546 case O_IPTOS: 3547 case O_IPPRECEDENCE: 3548 case O_IPTTL: 3549 case O_IPVER: 3550 case O_TCPWIN: 3551 case O_TCPFLAGS: 3552 case O_TCPOPTS: 3553 case O_ESTAB: 3554 if (cmdlen != F_INSN_SIZE(ipfw_insn)) 3555 goto bad_size; 3556 break; 3557 3558 case O_UID: 3559 case O_GID: 3560 case O_IP_SRC: 3561 case O_IP_DST: 3562 case O_TCPSEQ: 3563 case O_TCPACK: 3564 case O_PROB: 3565 case O_ICMPTYPE: 3566 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32)) 3567 goto bad_size; 3568 break; 3569 3570 case O_LIMIT: 3571 if (cmdlen != F_INSN_SIZE(ipfw_insn_limit)) 3572 goto bad_size; 3573 break; 3574 3575 case O_LOG: 3576 if (cmdlen != F_INSN_SIZE(ipfw_insn_log)) 3577 goto bad_size; 3578 3579 ((ipfw_insn_log *)cmd)->log_left = 3580 ((ipfw_insn_log *)cmd)->max_log; 3581 3582 break; 3583 3584 case O_IP_SRC_MASK: 3585 case O_IP_DST_MASK: 3586 if (cmdlen != F_INSN_SIZE(ipfw_insn_ip)) 3587 goto bad_size; 3588 if (((ipfw_insn_ip *)cmd)->mask.s_addr == 0) { 3589 kprintf("ipfw: opcode %d, useless rule\n", 3590 cmd->opcode); 3591 return EINVAL; 3592 } 3593 break; 3594 3595 case O_IP_SRC_SET: 3596 case O_IP_DST_SET: 3597 if (cmd->arg1 == 0 || cmd->arg1 > 256) { 3598 kprintf("ipfw: invalid set size %d\n", 3599 cmd->arg1); 3600 return EINVAL; 3601 } 3602 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 3603 (cmd->arg1+31)/32 ) 3604 goto bad_size; 3605 break; 3606 3607 case O_MACADDR2: 3608 if (cmdlen != F_INSN_SIZE(ipfw_insn_mac)) 3609 goto bad_size; 3610 break; 3611 3612 case O_MAC_TYPE: 3613 case O_IP_SRCPORT: 3614 case O_IP_DSTPORT: /* XXX artificial limit, 30 port pairs */ 3615 if (cmdlen < 2 || cmdlen > 31) 3616 goto bad_size; 3617 break; 3618 3619 case O_RECV: 3620 case O_XMIT: 3621 case O_VIA: 3622 if (cmdlen != F_INSN_SIZE(ipfw_insn_if)) 3623 goto bad_size; 3624 break; 3625 3626 case O_PIPE: 3627 case O_QUEUE: 3628 if (cmdlen != F_INSN_SIZE(ipfw_insn_pipe)) 3629 goto bad_size; 3630 goto check_action; 3631 3632 case O_FORWARD_IP: 3633 if (cmdlen != F_INSN_SIZE(ipfw_insn_sa)) { 3634 goto bad_size; 3635 } else { 3636 in_addr_t fwd_addr; 3637 3638 fwd_addr = ((ipfw_insn_sa *)cmd)-> 3639 sa.sin_addr.s_addr; 3640 if (IN_MULTICAST(ntohl(fwd_addr))) { 3641 kprintf("ipfw: try forwarding to " 3642 "multicast address\n"); 3643 return EINVAL; 3644 } 3645 } 3646 goto check_action; 3647 3648 case O_FORWARD_MAC: /* XXX not implemented yet */ 3649 case O_CHECK_STATE: 3650 case O_COUNT: 3651 case O_ACCEPT: 3652 case O_DENY: 3653 case O_REJECT: 3654 case O_SKIPTO: 3655 case O_DIVERT: 3656 case O_TEE: 3657 if (cmdlen != F_INSN_SIZE(ipfw_insn)) 3658 goto bad_size; 3659 check_action: 3660 if (have_action) { 3661 kprintf("ipfw: opcode %d, multiple actions" 3662 " not allowed\n", 3663 cmd->opcode); 3664 return EINVAL; 3665 } 3666 have_action = 1; 3667 if (l != cmdlen) { 3668 kprintf("ipfw: opcode %d, action must be" 3669 " last opcode\n", 3670 cmd->opcode); 3671 return EINVAL; 3672 } 3673 break; 3674 default: 3675 kprintf("ipfw: opcode %d, unknown opcode\n", 3676 cmd->opcode); 3677 return EINVAL; 3678 } 3679 } 3680 if (have_action == 0) { 3681 kprintf("ipfw: missing action\n"); 3682 return EINVAL; 3683 } 3684 return 0; 3685 3686 bad_size: 3687 kprintf("ipfw: opcode %d size %d wrong\n", 3688 cmd->opcode, cmdlen); 3689 return EINVAL; 3690 } 3691 3692 static int 3693 ipfw_ctl_add_rule(struct sockopt *sopt) 3694 { 3695 struct ipfw_ioc_rule *ioc_rule; 3696 size_t size; 3697 uint32_t rule_flags; 3698 int error; 3699 3700 size = sopt->sopt_valsize; 3701 if (size > (sizeof(uint32_t) * IPFW_RULE_SIZE_MAX) || 3702 size < sizeof(*ioc_rule)) { 3703 return EINVAL; 3704 } 3705 if (size != (sizeof(uint32_t) * IPFW_RULE_SIZE_MAX)) { 3706 sopt->sopt_val = krealloc(sopt->sopt_val, sizeof(uint32_t) * 3707 IPFW_RULE_SIZE_MAX, M_TEMP, M_WAITOK); 3708 } 3709 ioc_rule = sopt->sopt_val; 3710 3711 error = ipfw_check_ioc_rule(ioc_rule, size, &rule_flags); 3712 if (error) 3713 return error; 3714 3715 ipfw_add_rule(ioc_rule, rule_flags); 3716 3717 if (sopt->sopt_dir == SOPT_GET) 3718 sopt->sopt_valsize = IOC_RULESIZE(ioc_rule); 3719 return 0; 3720 } 3721 3722 static void * 3723 ipfw_copy_rule(const struct ip_fw *rule, struct ipfw_ioc_rule *ioc_rule) 3724 { 3725 const struct ip_fw *sibling; 3726 #ifdef INVARIANTS 3727 int i; 3728 #endif 3729 3730 KKASSERT(rule->cpuid == IPFW_CFGCPUID); 3731 3732 ioc_rule->act_ofs = rule->act_ofs; 3733 ioc_rule->cmd_len = rule->cmd_len; 3734 ioc_rule->rulenum = rule->rulenum; 3735 ioc_rule->set = rule->set; 3736 ioc_rule->usr_flags = rule->usr_flags; 3737 3738 ioc_rule->set_disable = ipfw_ctx[mycpuid]->ipfw_set_disable; 3739 ioc_rule->static_count = static_count; 3740 ioc_rule->static_len = static_ioc_len; 3741 3742 /* 3743 * Visit (read-only) all of the rule's duplications to get 3744 * the necessary statistics 3745 */ 3746 #ifdef INVARIANTS 3747 i = 0; 3748 #endif 3749 ioc_rule->pcnt = 0; 3750 ioc_rule->bcnt = 0; 3751 ioc_rule->timestamp = 0; 3752 for (sibling = rule; sibling != NULL; sibling = sibling->sibling) { 3753 ioc_rule->pcnt += sibling->pcnt; 3754 ioc_rule->bcnt += sibling->bcnt; 3755 if (sibling->timestamp > ioc_rule->timestamp) 3756 ioc_rule->timestamp = sibling->timestamp; 3757 #ifdef INVARIANTS 3758 ++i; 3759 #endif 3760 } 3761 KASSERT(i == ncpus, ("static rule is not duplicated on every cpu\n")); 3762 3763 bcopy(rule->cmd, ioc_rule->cmd, ioc_rule->cmd_len * 4 /* XXX */); 3764 3765 return ((uint8_t *)ioc_rule + IOC_RULESIZE(ioc_rule)); 3766 } 3767 3768 static void 3769 ipfw_copy_state(const ipfw_dyn_rule *dyn_rule, 3770 struct ipfw_ioc_state *ioc_state) 3771 { 3772 const struct ipfw_flow_id *id; 3773 struct ipfw_ioc_flowid *ioc_id; 3774 3775 ioc_state->expire = TIME_LEQ(dyn_rule->expire, time_second) ? 3776 0 : dyn_rule->expire - time_second; 3777 ioc_state->pcnt = dyn_rule->pcnt; 3778 ioc_state->bcnt = dyn_rule->bcnt; 3779 3780 ioc_state->dyn_type = dyn_rule->dyn_type; 3781 ioc_state->count = dyn_rule->count; 3782 3783 ioc_state->rulenum = dyn_rule->stub->rule[mycpuid]->rulenum; 3784 3785 id = &dyn_rule->id; 3786 ioc_id = &ioc_state->id; 3787 3788 ioc_id->type = ETHERTYPE_IP; 3789 ioc_id->u.ip.dst_ip = id->dst_ip; 3790 ioc_id->u.ip.src_ip = id->src_ip; 3791 ioc_id->u.ip.dst_port = id->dst_port; 3792 ioc_id->u.ip.src_port = id->src_port; 3793 ioc_id->u.ip.proto = id->proto; 3794 } 3795 3796 static int 3797 ipfw_ctl_get_rules(struct sockopt *sopt) 3798 { 3799 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 3800 struct ip_fw *rule; 3801 void *bp; 3802 size_t size; 3803 uint32_t dcount = 0; 3804 3805 /* 3806 * pass up a copy of the current rules. Static rules 3807 * come first (the last of which has number IPFW_DEFAULT_RULE), 3808 * followed by a possibly empty list of dynamic rule. 3809 */ 3810 3811 size = static_ioc_len; /* size of static rules */ 3812 if (ipfw_dyn_v) { /* add size of dyn.rules */ 3813 dcount = dyn_count; 3814 size += dcount * sizeof(struct ipfw_ioc_state); 3815 } 3816 3817 if (sopt->sopt_valsize < size) { 3818 /* short length, no need to return incomplete rules */ 3819 /* XXX: if superuser, no need to zero buffer */ 3820 bzero(sopt->sopt_val, sopt->sopt_valsize); 3821 return 0; 3822 } 3823 bp = sopt->sopt_val; 3824 3825 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) 3826 bp = ipfw_copy_rule(rule, bp); 3827 3828 if (ipfw_dyn_v && dcount != 0) { 3829 struct ipfw_ioc_state *ioc_state = bp; 3830 uint32_t dcount2 = 0; 3831 #ifdef INVARIANTS 3832 size_t old_size = size; 3833 #endif 3834 int i; 3835 3836 lockmgr(&dyn_lock, LK_SHARED); 3837 3838 /* Check 'ipfw_dyn_v' again with lock held */ 3839 if (ipfw_dyn_v == NULL) 3840 goto skip; 3841 3842 for (i = 0; i < curr_dyn_buckets; i++) { 3843 ipfw_dyn_rule *p; 3844 3845 /* 3846 * The # of dynamic rules may have grown after the 3847 * snapshot of 'dyn_count' was taken, so we will have 3848 * to check 'dcount' (snapshot of dyn_count) here to 3849 * make sure that we don't overflow the pre-allocated 3850 * buffer. 3851 */ 3852 for (p = ipfw_dyn_v[i]; p != NULL && dcount != 0; 3853 p = p->next, ioc_state++, dcount--, dcount2++) 3854 ipfw_copy_state(p, ioc_state); 3855 } 3856 skip: 3857 lockmgr(&dyn_lock, LK_RELEASE); 3858 3859 /* 3860 * The # of dynamic rules may be shrinked after the 3861 * snapshot of 'dyn_count' was taken. To give user a 3862 * correct dynamic rule count, we use the 'dcount2' 3863 * calculated above (with shared lockmgr lock held). 3864 */ 3865 size = static_ioc_len + 3866 (dcount2 * sizeof(struct ipfw_ioc_state)); 3867 KKASSERT(size <= old_size); 3868 } 3869 3870 sopt->sopt_valsize = size; 3871 return 0; 3872 } 3873 3874 static void 3875 ipfw_set_disable_dispatch(struct netmsg *nmsg) 3876 { 3877 struct lwkt_msg *lmsg = &nmsg->nm_lmsg; 3878 struct ipfw_context *ctx = ipfw_ctx[mycpuid]; 3879 3880 ctx->ipfw_gen++; 3881 ctx->ipfw_set_disable = lmsg->u.ms_result32; 3882 3883 ifnet_forwardmsg(lmsg, mycpuid + 1); 3884 } 3885 3886 static void 3887 ipfw_ctl_set_disable(uint32_t disable, uint32_t enable) 3888 { 3889 struct netmsg nmsg; 3890 struct lwkt_msg *lmsg; 3891 uint32_t set_disable; 3892 3893 /* IPFW_DEFAULT_SET is always enabled */ 3894 enable |= (1 << IPFW_DEFAULT_SET); 3895 set_disable = (ipfw_ctx[mycpuid]->ipfw_set_disable | disable) & ~enable; 3896 3897 bzero(&nmsg, sizeof(nmsg)); 3898 netmsg_init(&nmsg, NULL, &curthread->td_msgport, 3899 0, ipfw_set_disable_dispatch); 3900 lmsg = &nmsg.nm_lmsg; 3901 lmsg->u.ms_result32 = set_disable; 3902 3903 ifnet_domsg(lmsg, 0); 3904 } 3905 3906 /** 3907 * {set|get}sockopt parser. 3908 */ 3909 static int 3910 ipfw_ctl(struct sockopt *sopt) 3911 { 3912 int error, rulenum; 3913 uint32_t *masks; 3914 size_t size; 3915 3916 error = 0; 3917 3918 switch (sopt->sopt_name) { 3919 case IP_FW_GET: 3920 error = ipfw_ctl_get_rules(sopt); 3921 break; 3922 3923 case IP_FW_FLUSH: 3924 ipfw_flush(0 /* keep default rule */); 3925 break; 3926 3927 case IP_FW_ADD: 3928 error = ipfw_ctl_add_rule(sopt); 3929 break; 3930 3931 case IP_FW_DEL: 3932 /* 3933 * IP_FW_DEL is used for deleting single rules or sets, 3934 * and (ab)used to atomically manipulate sets. 3935 * Argument size is used to distinguish between the two: 3936 * sizeof(uint32_t) 3937 * delete single rule or set of rules, 3938 * or reassign rules (or sets) to a different set. 3939 * 2 * sizeof(uint32_t) 3940 * atomic disable/enable sets. 3941 * first uint32_t contains sets to be disabled, 3942 * second uint32_t contains sets to be enabled. 3943 */ 3944 masks = sopt->sopt_val; 3945 size = sopt->sopt_valsize; 3946 if (size == sizeof(*masks)) { 3947 /* 3948 * Delete or reassign static rule 3949 */ 3950 error = ipfw_ctl_alter(masks[0]); 3951 } else if (size == (2 * sizeof(*masks))) { 3952 /* 3953 * Set enable/disable 3954 */ 3955 ipfw_ctl_set_disable(masks[0], masks[1]); 3956 } else { 3957 error = EINVAL; 3958 } 3959 break; 3960 3961 case IP_FW_ZERO: 3962 case IP_FW_RESETLOG: /* argument is an int, the rule number */ 3963 rulenum = 0; 3964 3965 if (sopt->sopt_val != 0) { 3966 error = soopt_to_kbuf(sopt, &rulenum, 3967 sizeof(int), sizeof(int)); 3968 if (error) 3969 break; 3970 } 3971 error = ipfw_ctl_zero_entry(rulenum, 3972 sopt->sopt_name == IP_FW_RESETLOG); 3973 break; 3974 3975 default: 3976 kprintf("ipfw_ctl invalid option %d\n", sopt->sopt_name); 3977 error = EINVAL; 3978 } 3979 return error; 3980 } 3981 3982 /* 3983 * This procedure is only used to handle keepalives. It is invoked 3984 * every dyn_keepalive_period 3985 */ 3986 static void 3987 ipfw_tick_dispatch(struct netmsg *nmsg) 3988 { 3989 time_t keep_alive; 3990 uint32_t gen; 3991 int i; 3992 3993 IPFW_ASSERT_CFGPORT(&curthread->td_msgport); 3994 KKASSERT(IPFW_LOADED); 3995 3996 /* Reply ASAP */ 3997 crit_enter(); 3998 lwkt_replymsg(&nmsg->nm_lmsg, 0); 3999 crit_exit(); 4000 4001 if (ipfw_dyn_v == NULL || dyn_count == 0) 4002 goto done; 4003 4004 keep_alive = time_second; 4005 4006 lockmgr(&dyn_lock, LK_EXCLUSIVE); 4007 again: 4008 if (ipfw_dyn_v == NULL || dyn_count == 0) { 4009 lockmgr(&dyn_lock, LK_RELEASE); 4010 goto done; 4011 } 4012 gen = dyn_buckets_gen; 4013 4014 for (i = 0; i < curr_dyn_buckets; i++) { 4015 ipfw_dyn_rule *q, *prev; 4016 4017 for (prev = NULL, q = ipfw_dyn_v[i]; q != NULL;) { 4018 uint32_t ack_rev, ack_fwd; 4019 struct ipfw_flow_id id; 4020 4021 if (q->dyn_type == O_LIMIT_PARENT) 4022 goto next; 4023 4024 if (TIME_LEQ(q->expire, time_second)) { 4025 /* State expired */ 4026 UNLINK_DYN_RULE(prev, ipfw_dyn_v[i], q); 4027 continue; 4028 } 4029 4030 /* 4031 * Keep alive processing 4032 */ 4033 4034 if (!dyn_keepalive) 4035 goto next; 4036 if (q->id.proto != IPPROTO_TCP) 4037 goto next; 4038 if ((q->state & BOTH_SYN) != BOTH_SYN) 4039 goto next; 4040 if (TIME_LEQ(time_second + dyn_keepalive_interval, 4041 q->expire)) 4042 goto next; /* too early */ 4043 if (q->keep_alive == keep_alive) 4044 goto next; /* alreay done */ 4045 4046 /* 4047 * Save necessary information, so that they could 4048 * survive after possible blocking in send_pkt() 4049 */ 4050 id = q->id; 4051 ack_rev = q->ack_rev; 4052 ack_fwd = q->ack_fwd; 4053 4054 /* Sending has been started */ 4055 q->keep_alive = keep_alive; 4056 4057 /* Release lock to avoid possible dead lock */ 4058 lockmgr(&dyn_lock, LK_RELEASE); 4059 send_pkt(&id, ack_rev - 1, ack_fwd, TH_SYN); 4060 send_pkt(&id, ack_fwd - 1, ack_rev, 0); 4061 lockmgr(&dyn_lock, LK_EXCLUSIVE); 4062 4063 if (gen != dyn_buckets_gen) { 4064 /* 4065 * Dyn bucket array has been changed during 4066 * the above two sending; reiterate. 4067 */ 4068 goto again; 4069 } 4070 next: 4071 prev = q; 4072 q = q->next; 4073 } 4074 } 4075 lockmgr(&dyn_lock, LK_RELEASE); 4076 done: 4077 callout_reset(&ipfw_timeout_h, dyn_keepalive_period * hz, 4078 ipfw_tick, NULL); 4079 } 4080 4081 /* 4082 * This procedure is only used to handle keepalives. It is invoked 4083 * every dyn_keepalive_period 4084 */ 4085 static void 4086 ipfw_tick(void *dummy __unused) 4087 { 4088 struct lwkt_msg *lmsg = &ipfw_timeout_netmsg.nm_lmsg; 4089 4090 KKASSERT(mycpuid == IPFW_CFGCPUID); 4091 4092 crit_enter(); 4093 4094 KKASSERT(lmsg->ms_flags & MSGF_DONE); 4095 if (IPFW_LOADED) { 4096 lwkt_sendmsg(IPFW_CFGPORT, lmsg); 4097 /* ipfw_timeout_netmsg's handler reset this callout */ 4098 } 4099 4100 crit_exit(); 4101 } 4102 4103 static int 4104 ipfw_check_in(void *arg, struct mbuf **m0, struct ifnet *ifp, int dir) 4105 { 4106 struct ip_fw_args args; 4107 struct mbuf *m = *m0; 4108 struct m_tag *mtag; 4109 int tee = 0, error = 0, ret; 4110 4111 if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED) { 4112 /* Extract info from dummynet tag */ 4113 mtag = m_tag_find(m, PACKET_TAG_DUMMYNET, NULL); 4114 KKASSERT(mtag != NULL); 4115 args.rule = ((struct dn_pkt *)m_tag_data(mtag))->dn_priv; 4116 KKASSERT(args.rule != NULL); 4117 4118 m_tag_delete(m, mtag); 4119 m->m_pkthdr.fw_flags &= ~DUMMYNET_MBUF_TAGGED; 4120 } else { 4121 args.rule = NULL; 4122 } 4123 4124 args.eh = NULL; 4125 args.oif = NULL; 4126 args.m = m; 4127 ret = ipfw_chk(&args); 4128 m = args.m; 4129 4130 if (m == NULL) { 4131 error = EACCES; 4132 goto back; 4133 } 4134 4135 switch (ret) { 4136 case IP_FW_PASS: 4137 break; 4138 4139 case IP_FW_DENY: 4140 m_freem(m); 4141 m = NULL; 4142 error = EACCES; 4143 break; 4144 4145 case IP_FW_DUMMYNET: 4146 /* Send packet to the appropriate pipe */ 4147 ipfw_dummynet_io(m, args.cookie, DN_TO_IP_IN, &args); 4148 break; 4149 4150 case IP_FW_TEE: 4151 tee = 1; 4152 /* FALL THROUGH */ 4153 4154 case IP_FW_DIVERT: 4155 if (ip_divert_p != NULL) { 4156 m = ip_divert_p(m, tee, 1); 4157 } else { 4158 m_freem(m); 4159 m = NULL; 4160 /* not sure this is the right error msg */ 4161 error = EACCES; 4162 } 4163 break; 4164 4165 default: 4166 panic("unknown ipfw return value: %d\n", ret); 4167 } 4168 back: 4169 *m0 = m; 4170 return error; 4171 } 4172 4173 static int 4174 ipfw_check_out(void *arg, struct mbuf **m0, struct ifnet *ifp, int dir) 4175 { 4176 struct ip_fw_args args; 4177 struct mbuf *m = *m0; 4178 struct m_tag *mtag; 4179 int tee = 0, error = 0, ret; 4180 4181 if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED) { 4182 /* Extract info from dummynet tag */ 4183 mtag = m_tag_find(m, PACKET_TAG_DUMMYNET, NULL); 4184 KKASSERT(mtag != NULL); 4185 args.rule = ((struct dn_pkt *)m_tag_data(mtag))->dn_priv; 4186 KKASSERT(args.rule != NULL); 4187 4188 m_tag_delete(m, mtag); 4189 m->m_pkthdr.fw_flags &= ~DUMMYNET_MBUF_TAGGED; 4190 } else { 4191 args.rule = NULL; 4192 } 4193 4194 args.eh = NULL; 4195 args.m = m; 4196 args.oif = ifp; 4197 ret = ipfw_chk(&args); 4198 m = args.m; 4199 4200 if (m == NULL) { 4201 error = EACCES; 4202 goto back; 4203 } 4204 4205 switch (ret) { 4206 case IP_FW_PASS: 4207 break; 4208 4209 case IP_FW_DENY: 4210 m_freem(m); 4211 m = NULL; 4212 error = EACCES; 4213 break; 4214 4215 case IP_FW_DUMMYNET: 4216 ipfw_dummynet_io(m, args.cookie, DN_TO_IP_OUT, &args); 4217 break; 4218 4219 case IP_FW_TEE: 4220 tee = 1; 4221 /* FALL THROUGH */ 4222 4223 case IP_FW_DIVERT: 4224 if (ip_divert_p != NULL) { 4225 m = ip_divert_p(m, tee, 0); 4226 } else { 4227 m_freem(m); 4228 m = NULL; 4229 /* not sure this is the right error msg */ 4230 error = EACCES; 4231 } 4232 break; 4233 4234 default: 4235 panic("unknown ipfw return value: %d\n", ret); 4236 } 4237 back: 4238 *m0 = m; 4239 return error; 4240 } 4241 4242 static void 4243 ipfw_hook(void) 4244 { 4245 struct pfil_head *pfh; 4246 4247 IPFW_ASSERT_CFGPORT(&curthread->td_msgport); 4248 4249 pfh = pfil_head_get(PFIL_TYPE_AF, AF_INET); 4250 if (pfh == NULL) 4251 return; 4252 4253 pfil_add_hook(ipfw_check_in, NULL, PFIL_IN | PFIL_MPSAFE, pfh); 4254 pfil_add_hook(ipfw_check_out, NULL, PFIL_OUT | PFIL_MPSAFE, pfh); 4255 } 4256 4257 static void 4258 ipfw_dehook(void) 4259 { 4260 struct pfil_head *pfh; 4261 4262 IPFW_ASSERT_CFGPORT(&curthread->td_msgport); 4263 4264 pfh = pfil_head_get(PFIL_TYPE_AF, AF_INET); 4265 if (pfh == NULL) 4266 return; 4267 4268 pfil_remove_hook(ipfw_check_in, NULL, PFIL_IN, pfh); 4269 pfil_remove_hook(ipfw_check_out, NULL, PFIL_OUT, pfh); 4270 } 4271 4272 static void 4273 ipfw_sysctl_enable_dispatch(struct netmsg *nmsg) 4274 { 4275 struct lwkt_msg *lmsg = &nmsg->nm_lmsg; 4276 int enable = lmsg->u.ms_result; 4277 4278 if (fw_enable == enable) 4279 goto reply; 4280 4281 fw_enable = enable; 4282 if (fw_enable) 4283 ipfw_hook(); 4284 else 4285 ipfw_dehook(); 4286 reply: 4287 lwkt_replymsg(lmsg, 0); 4288 } 4289 4290 static int 4291 ipfw_sysctl_enable(SYSCTL_HANDLER_ARGS) 4292 { 4293 struct netmsg nmsg; 4294 struct lwkt_msg *lmsg; 4295 int enable, error; 4296 4297 enable = fw_enable; 4298 error = sysctl_handle_int(oidp, &enable, 0, req); 4299 if (error || req->newptr == NULL) 4300 return error; 4301 4302 netmsg_init(&nmsg, NULL, &curthread->td_msgport, 4303 0, ipfw_sysctl_enable_dispatch); 4304 lmsg = &nmsg.nm_lmsg; 4305 lmsg->u.ms_result = enable; 4306 4307 return lwkt_domsg(IPFW_CFGPORT, lmsg, 0); 4308 } 4309 4310 static int 4311 ipfw_sysctl_autoinc_step(SYSCTL_HANDLER_ARGS) 4312 { 4313 return sysctl_int_range(oidp, arg1, arg2, req, 4314 IPFW_AUTOINC_STEP_MIN, IPFW_AUTOINC_STEP_MAX); 4315 } 4316 4317 static int 4318 ipfw_sysctl_dyn_buckets(SYSCTL_HANDLER_ARGS) 4319 { 4320 int error, value; 4321 4322 lockmgr(&dyn_lock, LK_EXCLUSIVE); 4323 4324 value = dyn_buckets; 4325 error = sysctl_handle_int(oidp, &value, 0, req); 4326 if (error || !req->newptr) 4327 goto back; 4328 4329 /* 4330 * Make sure we have a power of 2 and 4331 * do not allow more than 64k entries. 4332 */ 4333 error = EINVAL; 4334 if (value <= 1 || value > 65536) 4335 goto back; 4336 if ((value & (value - 1)) != 0) 4337 goto back; 4338 4339 error = 0; 4340 dyn_buckets = value; 4341 back: 4342 lockmgr(&dyn_lock, LK_RELEASE); 4343 return error; 4344 } 4345 4346 static int 4347 ipfw_sysctl_dyn_fin(SYSCTL_HANDLER_ARGS) 4348 { 4349 return sysctl_int_range(oidp, arg1, arg2, req, 4350 1, dyn_keepalive_period - 1); 4351 } 4352 4353 static int 4354 ipfw_sysctl_dyn_rst(SYSCTL_HANDLER_ARGS) 4355 { 4356 return sysctl_int_range(oidp, arg1, arg2, req, 4357 1, dyn_keepalive_period - 1); 4358 } 4359 4360 static void 4361 ipfw_ctx_init_dispatch(struct netmsg *nmsg) 4362 { 4363 struct netmsg_ipfw *fwmsg = (struct netmsg_ipfw *)nmsg; 4364 struct ipfw_context *ctx; 4365 struct ip_fw *def_rule; 4366 4367 ctx = kmalloc(sizeof(*ctx), M_IPFW, M_WAITOK | M_ZERO); 4368 ipfw_ctx[mycpuid] = ctx; 4369 4370 def_rule = kmalloc(sizeof(*def_rule), M_IPFW, M_WAITOK | M_ZERO); 4371 4372 def_rule->act_ofs = 0; 4373 def_rule->rulenum = IPFW_DEFAULT_RULE; 4374 def_rule->cmd_len = 1; 4375 def_rule->set = IPFW_DEFAULT_SET; 4376 4377 def_rule->cmd[0].len = 1; 4378 #ifdef IPFIREWALL_DEFAULT_TO_ACCEPT 4379 def_rule->cmd[0].opcode = O_ACCEPT; 4380 #else 4381 def_rule->cmd[0].opcode = O_DENY; 4382 #endif 4383 4384 def_rule->refcnt = 1; 4385 def_rule->cpuid = mycpuid; 4386 4387 /* Install the default rule */ 4388 ctx->ipfw_default_rule = def_rule; 4389 ctx->ipfw_layer3_chain = def_rule; 4390 4391 /* Link rule CPU sibling */ 4392 ipfw_link_sibling(fwmsg, def_rule); 4393 4394 /* Statistics only need to be updated once */ 4395 if (mycpuid == 0) 4396 ipfw_inc_static_count(def_rule); 4397 4398 ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1); 4399 } 4400 4401 static void 4402 ipfw_init_dispatch(struct netmsg *nmsg) 4403 { 4404 struct netmsg_ipfw fwmsg; 4405 int error = 0; 4406 4407 if (IPFW_LOADED) { 4408 kprintf("IP firewall already loaded\n"); 4409 error = EEXIST; 4410 goto reply; 4411 } 4412 4413 bzero(&fwmsg, sizeof(fwmsg)); 4414 netmsg_init(&fwmsg.nmsg, NULL, &curthread->td_msgport, 4415 0, ipfw_ctx_init_dispatch); 4416 ifnet_domsg(&fwmsg.nmsg.nm_lmsg, 0); 4417 4418 ip_fw_chk_ptr = ipfw_chk; 4419 ip_fw_ctl_ptr = ipfw_ctl; 4420 ip_fw_dn_io_ptr = ipfw_dummynet_io; 4421 4422 kprintf("ipfw2 initialized, default to %s, logging ", 4423 ipfw_ctx[mycpuid]->ipfw_default_rule->cmd[0].opcode == 4424 O_ACCEPT ? "accept" : "deny"); 4425 4426 #ifdef IPFIREWALL_VERBOSE 4427 fw_verbose = 1; 4428 #endif 4429 #ifdef IPFIREWALL_VERBOSE_LIMIT 4430 verbose_limit = IPFIREWALL_VERBOSE_LIMIT; 4431 #endif 4432 if (fw_verbose == 0) { 4433 kprintf("disabled\n"); 4434 } else if (verbose_limit == 0) { 4435 kprintf("unlimited\n"); 4436 } else { 4437 kprintf("limited to %d packets/entry by default\n", 4438 verbose_limit); 4439 } 4440 4441 callout_init_mp(&ipfw_timeout_h); 4442 netmsg_init(&ipfw_timeout_netmsg, NULL, &netisr_adone_rport, 4443 MSGF_MPSAFE | MSGF_DROPABLE | MSGF_PRIORITY, 4444 ipfw_tick_dispatch); 4445 lockinit(&dyn_lock, "ipfw_dyn", 0, 0); 4446 4447 ip_fw_loaded = 1; 4448 callout_reset(&ipfw_timeout_h, hz, ipfw_tick, NULL); 4449 4450 if (fw_enable) 4451 ipfw_hook(); 4452 reply: 4453 lwkt_replymsg(&nmsg->nm_lmsg, error); 4454 } 4455 4456 static int 4457 ipfw_init(void) 4458 { 4459 struct netmsg smsg; 4460 4461 netmsg_init(&smsg, NULL, &curthread->td_msgport, 4462 0, ipfw_init_dispatch); 4463 return lwkt_domsg(IPFW_CFGPORT, &smsg.nm_lmsg, 0); 4464 } 4465 4466 #ifdef KLD_MODULE 4467 4468 static void 4469 ipfw_fini_dispatch(struct netmsg *nmsg) 4470 { 4471 int error = 0, cpu; 4472 4473 if (ipfw_refcnt != 0) { 4474 error = EBUSY; 4475 goto reply; 4476 } 4477 4478 ip_fw_loaded = 0; 4479 4480 ipfw_dehook(); 4481 callout_stop(&ipfw_timeout_h); 4482 4483 netmsg_service_sync(); 4484 4485 crit_enter(); 4486 if ((ipfw_timeout_netmsg.nm_lmsg.ms_flags & MSGF_DONE) == 0) { 4487 /* 4488 * Callout message is pending; drop it 4489 */ 4490 lwkt_dropmsg(&ipfw_timeout_netmsg.nm_lmsg); 4491 } 4492 crit_exit(); 4493 4494 ip_fw_chk_ptr = NULL; 4495 ip_fw_ctl_ptr = NULL; 4496 ip_fw_dn_io_ptr = NULL; 4497 ipfw_flush(1 /* kill default rule */); 4498 4499 /* Free pre-cpu context */ 4500 for (cpu = 0; cpu < ncpus; ++cpu) 4501 kfree(ipfw_ctx[cpu], M_IPFW); 4502 4503 kprintf("IP firewall unloaded\n"); 4504 reply: 4505 lwkt_replymsg(&nmsg->nm_lmsg, error); 4506 } 4507 4508 static int 4509 ipfw_fini(void) 4510 { 4511 struct netmsg smsg; 4512 4513 netmsg_init(&smsg, NULL, &curthread->td_msgport, 4514 0, ipfw_fini_dispatch); 4515 return lwkt_domsg(IPFW_CFGPORT, &smsg.nm_lmsg, 0); 4516 } 4517 4518 #endif /* KLD_MODULE */ 4519 4520 static int 4521 ipfw_modevent(module_t mod, int type, void *unused) 4522 { 4523 int err = 0; 4524 4525 switch (type) { 4526 case MOD_LOAD: 4527 err = ipfw_init(); 4528 break; 4529 4530 case MOD_UNLOAD: 4531 #ifndef KLD_MODULE 4532 kprintf("ipfw statically compiled, cannot unload\n"); 4533 err = EBUSY; 4534 #else 4535 err = ipfw_fini(); 4536 #endif 4537 break; 4538 default: 4539 break; 4540 } 4541 return err; 4542 } 4543 4544 static moduledata_t ipfwmod = { 4545 "ipfw", 4546 ipfw_modevent, 4547 0 4548 }; 4549 DECLARE_MODULE(ipfw, ipfwmod, SI_SUB_PROTO_END, SI_ORDER_ANY); 4550 MODULE_VERSION(ipfw, 1); 4551