1 /* 2 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 3 * 4 * Copyright (c) 2001 Daniel Hartmeier 5 * Copyright (c) 2002 - 2008 Henning Brauer 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * - Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * - Redistributions in binary form must reproduce the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer in the documentation and/or other materials provided 17 * with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 * 32 * Effort sponsored in part by the Defense Advanced Research Projects 33 * Agency (DARPA) and Air Force Research Laboratory, Air Force 34 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 35 * 36 */ 37 38 #include "opt_inet.h" 39 #include "opt_inet6.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/malloc.h> 44 #include <sys/mbuf.h> 45 #include <sys/filio.h> 46 #include <sys/socket.h> 47 #include <sys/socketvar.h> 48 #include <sys/kernel.h> 49 #include <sys/time.h> 50 #include <sys/sysctl.h> 51 #include <sys/endian.h> 52 #include <sys/proc.h> 53 #include <sys/kthread.h> 54 #include <sys/spinlock.h> 55 56 #include <sys/md5.h> 57 58 #include <net/if.h> 59 #include <net/if_types.h> 60 #include <net/bpf.h> 61 #include <net/netisr2.h> 62 #include <net/route.h> 63 64 #include <netinet/in.h> 65 #include <netinet/in_var.h> 66 #include <netinet/in_systm.h> 67 #include <netinet/ip.h> 68 #include <netinet/ip_var.h> 69 #include <netinet/tcp.h> 70 #include <netinet/tcp_seq.h> 71 #include <netinet/udp.h> 72 #include <netinet/ip_icmp.h> 73 #include <netinet/in_pcb.h> 74 #include <netinet/tcp_timer.h> 75 #include <netinet/tcp_var.h> 76 #include <netinet/udp_var.h> 77 #include <netinet/icmp_var.h> 78 #include <netinet/if_ether.h> 79 80 #include <net/pf/pfvar.h> 81 #include <net/pf/if_pflog.h> 82 83 #include <net/pf/if_pfsync.h> 84 85 #ifdef INET6 86 #include <netinet/ip6.h> 87 #include <netinet/icmp6.h> 88 #include <netinet6/nd6.h> 89 #include <netinet6/ip6_var.h> 90 #include <netinet6/in6_pcb.h> 91 #endif /* INET6 */ 92 93 #include <sys/in_cksum.h> 94 #include <sys/ucred.h> 95 #include <machine/limits.h> 96 #include <sys/msgport2.h> 97 #include <sys/spinlock2.h> 98 #include <net/netmsg2.h> 99 #include <net/toeplitz2.h> 100 101 extern int ip_optcopy(struct ip *, struct ip *); 102 extern int debug_pfugidhack; 103 104 /* 105 * pf_token - shared lock for cpu-localized operations, 106 * exclusive lock otherwise. 107 * 108 * pf_gtoken- exclusive lock used for initialization. 109 * 110 * pf_spin - only used to atomically fetch and increment stateid 111 * on 32-bit systems. 112 */ 113 struct lwkt_token pf_token = LWKT_TOKEN_INITIALIZER(pf_token); 114 struct lwkt_token pf_gtoken = LWKT_TOKEN_INITIALIZER(pf_gtoken); 115 #if __SIZEOF_LONG__ != 8 116 struct spinlock pf_spin = SPINLOCK_INITIALIZER(pf_spin, "pf_spin"); 117 #endif 118 119 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) kprintf x 120 121 #define FAIL(code) { error = (code); goto done; } 122 123 /* 124 * Global variables 125 */ 126 127 /* mask radix tree */ 128 struct radix_node_head *pf_maskhead; 129 130 /* state tables */ 131 struct pf_state_tree *pf_statetbl; /* incls one global table */ 132 struct pf_state **purge_cur; 133 struct pf_altqqueue pf_altqs[2]; 134 struct pf_palist pf_pabuf; 135 struct pf_altqqueue *pf_altqs_active; 136 struct pf_altqqueue *pf_altqs_inactive; 137 struct pf_status pf_status; 138 139 u_int32_t ticket_altqs_active; 140 u_int32_t ticket_altqs_inactive; 141 int altqs_inactive_open; 142 u_int32_t ticket_pabuf; 143 144 MD5_CTX pf_tcp_secret_ctx; 145 u_char pf_tcp_secret[16]; 146 int pf_tcp_secret_init; 147 int pf_tcp_iss_off; 148 149 struct pf_anchor_stackframe { 150 struct pf_ruleset *rs; 151 struct pf_rule *r; 152 struct pf_anchor_node *parent; 153 struct pf_anchor *child; 154 } pf_anchor_stack[64]; 155 156 struct malloc_type *pf_src_tree_pl, *pf_rule_pl, *pf_pooladdr_pl; 157 struct malloc_type *pf_state_pl, *pf_state_key_pl, *pf_state_item_pl; 158 struct malloc_type *pf_altq_pl; 159 160 void pf_print_host(struct pf_addr *, u_int16_t, u_int8_t); 161 162 void pf_init_threshold(struct pf_threshold *, u_int32_t, 163 u_int32_t); 164 void pf_add_threshold(struct pf_threshold *); 165 int pf_check_threshold(struct pf_threshold *); 166 167 void pf_change_ap(struct pf_addr *, u_int16_t *, 168 u_int16_t *, u_int16_t *, struct pf_addr *, 169 u_int16_t, u_int8_t, sa_family_t); 170 int pf_modulate_sack(struct mbuf *, int, struct pf_pdesc *, 171 struct tcphdr *, struct pf_state_peer *); 172 #ifdef INET6 173 void pf_change_a6(struct pf_addr *, u_int16_t *, 174 struct pf_addr *, u_int8_t); 175 #endif /* INET6 */ 176 void pf_change_icmp(struct pf_addr *, u_int16_t *, 177 struct pf_addr *, struct pf_addr *, u_int16_t, 178 u_int16_t *, u_int16_t *, u_int16_t *, 179 u_int16_t *, u_int8_t, sa_family_t); 180 void pf_send_tcp(const struct pf_rule *, sa_family_t, 181 const struct pf_addr *, const struct pf_addr *, 182 u_int16_t, u_int16_t, u_int32_t, u_int32_t, 183 u_int8_t, u_int16_t, u_int16_t, u_int8_t, int, 184 u_int16_t, struct ether_header *, struct ifnet *); 185 void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t, 186 sa_family_t, struct pf_rule *); 187 struct pf_rule *pf_match_translation(struct pf_pdesc *, struct mbuf *, 188 int, int, struct pfi_kif *, 189 struct pf_addr *, u_int16_t, struct pf_addr *, 190 u_int16_t, int); 191 struct pf_rule *pf_get_translation(struct pf_pdesc *, struct mbuf *, 192 int, int, struct pfi_kif *, struct pf_src_node **, 193 struct pf_state_key **, struct pf_state_key **, 194 struct pf_state_key **, struct pf_state_key **, 195 struct pf_addr *, struct pf_addr *, 196 u_int16_t, u_int16_t); 197 void pf_detach_state(struct pf_state *); 198 int pf_state_key_setup(struct pf_pdesc *, struct pf_rule *, 199 struct pf_state_key **, struct pf_state_key **, 200 struct pf_state_key **, struct pf_state_key **, 201 struct pf_addr *, struct pf_addr *, 202 u_int16_t, u_int16_t); 203 void pf_state_key_detach(struct pf_state *, int); 204 u_int32_t pf_tcp_iss(struct pf_pdesc *); 205 int pf_test_rule(struct pf_rule **, struct pf_state **, 206 int, struct pfi_kif *, struct mbuf *, int, 207 void *, struct pf_pdesc *, struct pf_rule **, 208 struct pf_ruleset **, struct ifqueue *, struct inpcb *); 209 static __inline int pf_create_state(struct pf_rule *, struct pf_rule *, 210 struct pf_rule *, struct pf_pdesc *, 211 struct pf_src_node *, struct pf_state_key *, 212 struct pf_state_key *, struct pf_state_key *, 213 struct pf_state_key *, struct mbuf *, int, 214 u_int16_t, u_int16_t, int *, struct pfi_kif *, 215 struct pf_state **, int, u_int16_t, u_int16_t, 216 int); 217 int pf_test_fragment(struct pf_rule **, int, 218 struct pfi_kif *, struct mbuf *, void *, 219 struct pf_pdesc *, struct pf_rule **, 220 struct pf_ruleset **); 221 int pf_tcp_track_full(struct pf_state_peer *, 222 struct pf_state_peer *, struct pf_state **, 223 struct pfi_kif *, struct mbuf *, int, 224 struct pf_pdesc *, u_short *, int *); 225 int pf_tcp_track_sloppy(struct pf_state_peer *, 226 struct pf_state_peer *, struct pf_state **, 227 struct pf_pdesc *, u_short *); 228 int pf_test_state_tcp(struct pf_state **, int, 229 struct pfi_kif *, struct mbuf *, int, 230 void *, struct pf_pdesc *, u_short *); 231 int pf_test_state_udp(struct pf_state **, int, 232 struct pfi_kif *, struct mbuf *, int, 233 void *, struct pf_pdesc *); 234 int pf_test_state_icmp(struct pf_state **, int, 235 struct pfi_kif *, struct mbuf *, int, 236 void *, struct pf_pdesc *, u_short *); 237 int pf_test_state_other(struct pf_state **, int, 238 struct pfi_kif *, struct mbuf *, struct pf_pdesc *); 239 void pf_step_into_anchor(int *, struct pf_ruleset **, int, 240 struct pf_rule **, struct pf_rule **, int *); 241 int pf_step_out_of_anchor(int *, struct pf_ruleset **, 242 int, struct pf_rule **, struct pf_rule **, 243 int *); 244 void pf_hash(struct pf_addr *, struct pf_addr *, 245 struct pf_poolhashkey *, sa_family_t); 246 int pf_map_addr(u_int8_t, struct pf_rule *, 247 struct pf_addr *, struct pf_addr *, 248 struct pf_addr *, struct pf_src_node **); 249 int pf_get_sport(struct pf_pdesc *, 250 sa_family_t, u_int8_t, struct pf_rule *, 251 struct pf_addr *, struct pf_addr *, 252 u_int16_t, u_int16_t, 253 struct pf_addr *, u_int16_t *, 254 u_int16_t, u_int16_t, 255 struct pf_src_node **); 256 void pf_route(struct mbuf **, struct pf_rule *, int, 257 struct ifnet *, struct pf_state *, 258 struct pf_pdesc *); 259 void pf_route6(struct mbuf **, struct pf_rule *, int, 260 struct ifnet *, struct pf_state *, 261 struct pf_pdesc *); 262 u_int8_t pf_get_wscale(struct mbuf *, int, u_int16_t, 263 sa_family_t); 264 u_int16_t pf_get_mss(struct mbuf *, int, u_int16_t, 265 sa_family_t); 266 u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t, 267 u_int16_t); 268 void pf_set_rt_ifp(struct pf_state *, 269 struct pf_addr *); 270 int pf_check_proto_cksum(struct mbuf *, int, int, 271 u_int8_t, sa_family_t); 272 struct pf_divert *pf_get_divert(struct mbuf *); 273 void pf_print_state_parts(struct pf_state *, 274 struct pf_state_key *, struct pf_state_key *); 275 int pf_addr_wrap_neq(struct pf_addr_wrap *, 276 struct pf_addr_wrap *); 277 struct pf_state *pf_find_state(struct pfi_kif *, 278 struct pf_state_key_cmp *, u_int, struct mbuf *); 279 int pf_src_connlimit(struct pf_state *); 280 int pf_check_congestion(struct ifqueue *); 281 282 extern int pf_end_threads; 283 284 struct pf_pool_limit pf_pool_limits[PF_LIMIT_MAX] = { 285 { &pf_state_pl, PFSTATE_HIWAT }, 286 { &pf_src_tree_pl, PFSNODE_HIWAT }, 287 { &pf_frent_pl, PFFRAG_FRENT_HIWAT }, 288 { &pfr_ktable_pl, PFR_KTABLE_HIWAT }, 289 { &pfr_kentry_pl, PFR_KENTRY_HIWAT } 290 }; 291 292 /* 293 * If route-to and direction is out we match with no further processing 294 * (rt_kif must be assigned and not equal to the out interface) 295 * If reply-to and direction is in we match with no further processing 296 * (rt_kif must be assigned and not equal to the in interface) 297 */ 298 #define STATE_LOOKUP(i, k, d, s, m) \ 299 do { \ 300 s = pf_find_state(i, k, d, m); \ 301 if (s == NULL || (s)->timeout == PFTM_PURGE) \ 302 return (PF_DROP); \ 303 if (d == PF_OUT && \ 304 (((s)->rule.ptr->rt == PF_ROUTETO && \ 305 (s)->rule.ptr->direction == PF_OUT) || \ 306 ((s)->rule.ptr->rt == PF_REPLYTO && \ 307 (s)->rule.ptr->direction == PF_IN)) && \ 308 (s)->rt_kif != NULL && \ 309 (s)->rt_kif != i) \ 310 return (PF_PASS); \ 311 } while (0) 312 313 #define BOUND_IFACE(r, k) \ 314 ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : pfi_all 315 316 #define STATE_INC_COUNTERS(s) \ 317 do { \ 318 atomic_add_int(&s->rule.ptr->states_cur, 1); \ 319 s->rule.ptr->states_tot++; \ 320 if (s->anchor.ptr != NULL) { \ 321 atomic_add_int(&s->anchor.ptr->states_cur, 1); \ 322 s->anchor.ptr->states_tot++; \ 323 } \ 324 if (s->nat_rule.ptr != NULL) { \ 325 atomic_add_int(&s->nat_rule.ptr->states_cur, 1); \ 326 s->nat_rule.ptr->states_tot++; \ 327 } \ 328 } while (0) 329 330 #define STATE_DEC_COUNTERS(s) \ 331 do { \ 332 if (s->nat_rule.ptr != NULL) \ 333 atomic_add_int(&s->nat_rule.ptr->states_cur, -1); \ 334 if (s->anchor.ptr != NULL) \ 335 atomic_add_int(&s->anchor.ptr->states_cur, -1); \ 336 atomic_add_int(&s->rule.ptr->states_cur, -1); \ 337 } while (0) 338 339 static MALLOC_DEFINE(M_PFSTATEPL, "pfstatepl", "pf state pool list"); 340 static MALLOC_DEFINE(M_PFSRCTREEPL, "pfsrctpl", "pf source tree pool list"); 341 static MALLOC_DEFINE(M_PFSTATEKEYPL, "pfstatekeypl", "pf state key pool list"); 342 static MALLOC_DEFINE(M_PFSTATEITEMPL, "pfstateitempl", "pf state item pool list"); 343 344 static __inline int pf_src_compare(struct pf_src_node *, struct pf_src_node *); 345 static __inline int pf_state_compare_key(struct pf_state_key *, 346 struct pf_state_key *); 347 static __inline int pf_state_compare_rkey(struct pf_state_key *, 348 struct pf_state_key *); 349 static __inline int pf_state_compare_id(struct pf_state *, 350 struct pf_state *); 351 352 struct pf_src_tree *tree_src_tracking; 353 struct pf_state_tree_id *tree_id; 354 struct pf_state_queue *state_list; 355 struct pf_counters *pf_counters; 356 357 RB_GENERATE(pf_src_tree, pf_src_node, entry, pf_src_compare); 358 RB_GENERATE(pf_state_tree, pf_state_key, entry, pf_state_compare_key); 359 RB_GENERATE(pf_state_rtree, pf_state_key, entry, pf_state_compare_rkey); 360 RB_GENERATE(pf_state_tree_id, pf_state, entry_id, pf_state_compare_id); 361 362 static __inline int 363 pf_src_compare(struct pf_src_node *a, struct pf_src_node *b) 364 { 365 int diff; 366 367 if (a->rule.ptr > b->rule.ptr) 368 return (1); 369 if (a->rule.ptr < b->rule.ptr) 370 return (-1); 371 if ((diff = a->af - b->af) != 0) 372 return (diff); 373 switch (a->af) { 374 #ifdef INET 375 case AF_INET: 376 if (a->addr.addr32[0] > b->addr.addr32[0]) 377 return (1); 378 if (a->addr.addr32[0] < b->addr.addr32[0]) 379 return (-1); 380 break; 381 #endif /* INET */ 382 #ifdef INET6 383 case AF_INET6: 384 if (a->addr.addr32[3] > b->addr.addr32[3]) 385 return (1); 386 if (a->addr.addr32[3] < b->addr.addr32[3]) 387 return (-1); 388 if (a->addr.addr32[2] > b->addr.addr32[2]) 389 return (1); 390 if (a->addr.addr32[2] < b->addr.addr32[2]) 391 return (-1); 392 if (a->addr.addr32[1] > b->addr.addr32[1]) 393 return (1); 394 if (a->addr.addr32[1] < b->addr.addr32[1]) 395 return (-1); 396 if (a->addr.addr32[0] > b->addr.addr32[0]) 397 return (1); 398 if (a->addr.addr32[0] < b->addr.addr32[0]) 399 return (-1); 400 break; 401 #endif /* INET6 */ 402 } 403 return (0); 404 } 405 406 u_int32_t 407 pf_state_hash(struct pf_state_key *sk) 408 { 409 u_int32_t hv = (u_int32_t)(((intptr_t)sk >> 6) ^ ((intptr_t)sk >> 15)); 410 if (hv == 0) /* disallow 0 */ 411 hv = 1; 412 return(hv); 413 } 414 415 #ifdef INET6 416 void 417 pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af) 418 { 419 switch (af) { 420 #ifdef INET 421 case AF_INET: 422 dst->addr32[0] = src->addr32[0]; 423 break; 424 #endif /* INET */ 425 case AF_INET6: 426 dst->addr32[0] = src->addr32[0]; 427 dst->addr32[1] = src->addr32[1]; 428 dst->addr32[2] = src->addr32[2]; 429 dst->addr32[3] = src->addr32[3]; 430 break; 431 } 432 } 433 #endif /* INET6 */ 434 435 void 436 pf_init_threshold(struct pf_threshold *threshold, 437 u_int32_t limit, u_int32_t seconds) 438 { 439 threshold->limit = limit * PF_THRESHOLD_MULT; 440 threshold->seconds = seconds; 441 threshold->count = 0; 442 threshold->last = time_second; 443 } 444 445 void 446 pf_add_threshold(struct pf_threshold *threshold) 447 { 448 u_int32_t t = time_second, diff = t - threshold->last; 449 450 if (diff >= threshold->seconds) 451 threshold->count = 0; 452 else 453 threshold->count -= threshold->count * diff / 454 threshold->seconds; 455 threshold->count += PF_THRESHOLD_MULT; 456 threshold->last = t; 457 } 458 459 int 460 pf_check_threshold(struct pf_threshold *threshold) 461 { 462 return (threshold->count > threshold->limit); 463 } 464 465 int 466 pf_src_connlimit(struct pf_state *state) 467 { 468 int bad = 0; 469 int cpu = mycpu->gd_cpuid; 470 471 atomic_add_int(&state->src_node->conn, 1); 472 state->src.tcp_est = 1; 473 pf_add_threshold(&state->src_node->conn_rate); 474 475 if (state->rule.ptr->max_src_conn && 476 state->rule.ptr->max_src_conn < 477 state->src_node->conn) { 478 PF_INC_LCOUNTER(LCNT_SRCCONN); 479 bad++; 480 } 481 482 if (state->rule.ptr->max_src_conn_rate.limit && 483 pf_check_threshold(&state->src_node->conn_rate)) { 484 PF_INC_LCOUNTER(LCNT_SRCCONNRATE); 485 bad++; 486 } 487 488 if (!bad) 489 return 0; 490 491 if (state->rule.ptr->overload_tbl) { 492 struct pfr_addr p; 493 u_int32_t killed = 0; 494 495 PF_INC_LCOUNTER(LCNT_OVERLOAD_TABLE); 496 if (pf_status.debug >= PF_DEBUG_MISC) { 497 kprintf("pf_src_connlimit: blocking address "); 498 pf_print_host(&state->src_node->addr, 0, 499 state->key[PF_SK_WIRE]->af); 500 } 501 502 bzero(&p, sizeof(p)); 503 p.pfra_af = state->key[PF_SK_WIRE]->af; 504 switch (state->key[PF_SK_WIRE]->af) { 505 #ifdef INET 506 case AF_INET: 507 p.pfra_net = 32; 508 p.pfra_ip4addr = state->src_node->addr.v4; 509 break; 510 #endif /* INET */ 511 #ifdef INET6 512 case AF_INET6: 513 p.pfra_net = 128; 514 p.pfra_ip6addr = state->src_node->addr.v6; 515 break; 516 #endif /* INET6 */ 517 } 518 519 pfr_insert_kentry(state->rule.ptr->overload_tbl, 520 &p, time_second); 521 522 /* kill existing states if that's required. */ 523 if (state->rule.ptr->flush) { 524 struct pf_state_key *sk; 525 struct pf_state *st; 526 527 PF_INC_LCOUNTER(LCNT_OVERLOAD_FLUSH); 528 RB_FOREACH(st, pf_state_tree_id, &tree_id[cpu]) { 529 sk = st->key[PF_SK_WIRE]; 530 /* 531 * Kill states from this source. (Only those 532 * from the same rule if PF_FLUSH_GLOBAL is not 533 * set). (Only on current cpu). 534 */ 535 if (sk->af == 536 state->key[PF_SK_WIRE]->af && 537 ((state->direction == PF_OUT && 538 PF_AEQ(&state->src_node->addr, 539 &sk->addr[0], sk->af)) || 540 (state->direction == PF_IN && 541 PF_AEQ(&state->src_node->addr, 542 &sk->addr[1], sk->af))) && 543 (state->rule.ptr->flush & 544 PF_FLUSH_GLOBAL || 545 state->rule.ptr == st->rule.ptr)) { 546 st->timeout = PFTM_PURGE; 547 st->src.state = st->dst.state = 548 TCPS_CLOSED; 549 killed++; 550 } 551 } 552 if (pf_status.debug >= PF_DEBUG_MISC) 553 kprintf(", %u states killed", killed); 554 } 555 if (pf_status.debug >= PF_DEBUG_MISC) 556 kprintf("\n"); 557 } 558 559 /* kill this state */ 560 state->timeout = PFTM_PURGE; 561 state->src.state = state->dst.state = TCPS_CLOSED; 562 563 return 1; 564 } 565 566 int 567 pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule, 568 struct pf_addr *src, sa_family_t af) 569 { 570 struct pf_src_node k; 571 int cpu = mycpu->gd_cpuid; 572 573 bzero(&k, sizeof(k)); /* avoid gcc warnings */ 574 if (*sn == NULL) { 575 k.af = af; 576 PF_ACPY(&k.addr, src, af); 577 if (rule->rule_flag & PFRULE_RULESRCTRACK || 578 rule->rpool.opts & PF_POOL_STICKYADDR) 579 k.rule.ptr = rule; 580 else 581 k.rule.ptr = NULL; 582 PF_INC_SCOUNTER(SCNT_SRC_NODE_SEARCH); 583 *sn = RB_FIND(pf_src_tree, &tree_src_tracking[cpu], &k); 584 } 585 if (*sn == NULL) { 586 if (!rule->max_src_nodes || 587 rule->src_nodes < rule->max_src_nodes) 588 (*sn) = kmalloc(sizeof(struct pf_src_node), 589 M_PFSRCTREEPL, M_NOWAIT|M_ZERO); 590 else 591 PF_INC_LCOUNTER(LCNT_SRCNODES); 592 if ((*sn) == NULL) 593 return (-1); 594 595 pf_init_threshold(&(*sn)->conn_rate, 596 rule->max_src_conn_rate.limit, 597 rule->max_src_conn_rate.seconds); 598 599 (*sn)->af = af; 600 if (rule->rule_flag & PFRULE_RULESRCTRACK || 601 rule->rpool.opts & PF_POOL_STICKYADDR) 602 (*sn)->rule.ptr = rule; 603 else 604 (*sn)->rule.ptr = NULL; 605 PF_ACPY(&(*sn)->addr, src, af); 606 if (RB_INSERT(pf_src_tree, 607 &tree_src_tracking[cpu], *sn) != NULL) { 608 if (pf_status.debug >= PF_DEBUG_MISC) { 609 kprintf("pf: src_tree insert failed: "); 610 pf_print_host(&(*sn)->addr, 0, af); 611 kprintf("\n"); 612 } 613 kfree(*sn, M_PFSRCTREEPL); 614 return (-1); 615 } 616 617 /* 618 * Atomic op required to increment src_nodes in the rule 619 * because we hold a shared token here (decrements will use 620 * an exclusive token). 621 */ 622 (*sn)->creation = time_second; 623 (*sn)->ruletype = rule->action; 624 if ((*sn)->rule.ptr != NULL) 625 atomic_add_int(&(*sn)->rule.ptr->src_nodes, 1); 626 PF_INC_SCOUNTER(SCNT_SRC_NODE_INSERT); 627 atomic_add_int(&pf_status.src_nodes, 1); 628 } else { 629 if (rule->max_src_states && 630 (*sn)->states >= rule->max_src_states) { 631 PF_INC_LCOUNTER(LCNT_SRCSTATES); 632 return (-1); 633 } 634 } 635 return (0); 636 } 637 638 /* 639 * state table (indexed by the pf_state_key structure), normal RBTREE 640 * comparison. 641 */ 642 static __inline int 643 pf_state_compare_key(struct pf_state_key *a, struct pf_state_key *b) 644 { 645 int diff; 646 647 if ((diff = a->proto - b->proto) != 0) 648 return (diff); 649 if ((diff = a->af - b->af) != 0) 650 return (diff); 651 switch (a->af) { 652 #ifdef INET 653 case AF_INET: 654 if (a->addr[0].addr32[0] > b->addr[0].addr32[0]) 655 return (1); 656 if (a->addr[0].addr32[0] < b->addr[0].addr32[0]) 657 return (-1); 658 if (a->addr[1].addr32[0] > b->addr[1].addr32[0]) 659 return (1); 660 if (a->addr[1].addr32[0] < b->addr[1].addr32[0]) 661 return (-1); 662 break; 663 #endif /* INET */ 664 #ifdef INET6 665 case AF_INET6: 666 if (a->addr[0].addr32[3] > b->addr[0].addr32[3]) 667 return (1); 668 if (a->addr[0].addr32[3] < b->addr[0].addr32[3]) 669 return (-1); 670 if (a->addr[1].addr32[3] > b->addr[1].addr32[3]) 671 return (1); 672 if (a->addr[1].addr32[3] < b->addr[1].addr32[3]) 673 return (-1); 674 if (a->addr[0].addr32[2] > b->addr[0].addr32[2]) 675 return (1); 676 if (a->addr[0].addr32[2] < b->addr[0].addr32[2]) 677 return (-1); 678 if (a->addr[1].addr32[2] > b->addr[1].addr32[2]) 679 return (1); 680 if (a->addr[1].addr32[2] < b->addr[1].addr32[2]) 681 return (-1); 682 if (a->addr[0].addr32[1] > b->addr[0].addr32[1]) 683 return (1); 684 if (a->addr[0].addr32[1] < b->addr[0].addr32[1]) 685 return (-1); 686 if (a->addr[1].addr32[1] > b->addr[1].addr32[1]) 687 return (1); 688 if (a->addr[1].addr32[1] < b->addr[1].addr32[1]) 689 return (-1); 690 if (a->addr[0].addr32[0] > b->addr[0].addr32[0]) 691 return (1); 692 if (a->addr[0].addr32[0] < b->addr[0].addr32[0]) 693 return (-1); 694 if (a->addr[1].addr32[0] > b->addr[1].addr32[0]) 695 return (1); 696 if (a->addr[1].addr32[0] < b->addr[1].addr32[0]) 697 return (-1); 698 break; 699 #endif /* INET6 */ 700 } 701 702 if ((diff = a->port[0] - b->port[0]) != 0) 703 return (diff); 704 if ((diff = a->port[1] - b->port[1]) != 0) 705 return (diff); 706 707 return (0); 708 } 709 710 /* 711 * Used for RB_FIND only, compare in the reverse direction. The 712 * element to be reversed is always (a), since we obviously can't 713 * reverse the state tree depicted by (b). 714 */ 715 static __inline int 716 pf_state_compare_rkey(struct pf_state_key *a, struct pf_state_key *b) 717 { 718 int diff; 719 720 if ((diff = a->proto - b->proto) != 0) 721 return (diff); 722 if ((diff = a->af - b->af) != 0) 723 return (diff); 724 switch (a->af) { 725 #ifdef INET 726 case AF_INET: 727 if (a->addr[1].addr32[0] > b->addr[0].addr32[0]) 728 return (1); 729 if (a->addr[1].addr32[0] < b->addr[0].addr32[0]) 730 return (-1); 731 if (a->addr[0].addr32[0] > b->addr[1].addr32[0]) 732 return (1); 733 if (a->addr[0].addr32[0] < b->addr[1].addr32[0]) 734 return (-1); 735 break; 736 #endif /* INET */ 737 #ifdef INET6 738 case AF_INET6: 739 if (a->addr[1].addr32[3] > b->addr[0].addr32[3]) 740 return (1); 741 if (a->addr[1].addr32[3] < b->addr[0].addr32[3]) 742 return (-1); 743 if (a->addr[0].addr32[3] > b->addr[1].addr32[3]) 744 return (1); 745 if (a->addr[0].addr32[3] < b->addr[1].addr32[3]) 746 return (-1); 747 if (a->addr[1].addr32[2] > b->addr[0].addr32[2]) 748 return (1); 749 if (a->addr[1].addr32[2] < b->addr[0].addr32[2]) 750 return (-1); 751 if (a->addr[0].addr32[2] > b->addr[1].addr32[2]) 752 return (1); 753 if (a->addr[0].addr32[2] < b->addr[1].addr32[2]) 754 return (-1); 755 if (a->addr[1].addr32[1] > b->addr[0].addr32[1]) 756 return (1); 757 if (a->addr[1].addr32[1] < b->addr[0].addr32[1]) 758 return (-1); 759 if (a->addr[0].addr32[1] > b->addr[1].addr32[1]) 760 return (1); 761 if (a->addr[0].addr32[1] < b->addr[1].addr32[1]) 762 return (-1); 763 if (a->addr[1].addr32[0] > b->addr[0].addr32[0]) 764 return (1); 765 if (a->addr[1].addr32[0] < b->addr[0].addr32[0]) 766 return (-1); 767 if (a->addr[0].addr32[0] > b->addr[1].addr32[0]) 768 return (1); 769 if (a->addr[0].addr32[0] < b->addr[1].addr32[0]) 770 return (-1); 771 break; 772 #endif /* INET6 */ 773 } 774 775 if ((diff = a->port[1] - b->port[0]) != 0) 776 return (diff); 777 if ((diff = a->port[0] - b->port[1]) != 0) 778 return (diff); 779 780 return (0); 781 } 782 783 static __inline int 784 pf_state_compare_id(struct pf_state *a, struct pf_state *b) 785 { 786 if (a->id > b->id) 787 return (1); 788 if (a->id < b->id) 789 return (-1); 790 if (a->creatorid > b->creatorid) 791 return (1); 792 if (a->creatorid < b->creatorid) 793 return (-1); 794 795 return (0); 796 } 797 798 int 799 pf_state_key_attach(struct pf_state_key *sk, struct pf_state *s, int idx) 800 { 801 struct pf_state_item *si; 802 struct pf_state_key *cur; 803 int cpu; 804 int error; 805 806 /* 807 * PFSTATE_STACK_GLOBAL is set when the state might not hash to the 808 * current cpu. The keys are managed on the global statetbl tree 809 * for this case. Only translations (RDR, NAT) can cause this. 810 * 811 * When this flag is not set we must still check the global statetbl 812 * for a collision, and if we find one we set the HALF_DUPLEX flag 813 * in the state. 814 */ 815 if (s->state_flags & PFSTATE_STACK_GLOBAL) { 816 cpu = ncpus; 817 lockmgr(&pf_global_statetbl_lock, LK_EXCLUSIVE); 818 } else { 819 cpu = mycpu->gd_cpuid; 820 lockmgr(&pf_global_statetbl_lock, LK_SHARED); 821 } 822 KKASSERT(s->key[idx] == NULL); /* XXX handle this? */ 823 824 if (pf_status.debug >= PF_DEBUG_MISC) { 825 kprintf("state_key attach cpu %d (%08x:%d) %s (%08x:%d)\n", 826 cpu, 827 ntohl(sk->addr[0].addr32[0]), ntohs(sk->port[0]), 828 (idx == PF_SK_WIRE ? "->" : "<-"), 829 ntohl(sk->addr[1].addr32[0]), ntohs(sk->port[1])); 830 } 831 832 /* 833 * Check whether (e.g.) a PASS rule being put on a per-cpu tree 834 * collides with a translation rule on the global tree. This is 835 * NOT an error. We *WANT* to establish state for this case so the 836 * packet path is short-cutted and doesn't need to scan the ruleset 837 * on every packet. But the established state will only see one 838 * side of a two-way packet conversation. To prevent this from 839 * causing problems (e.g. generating a RST), we force PFSTATE_SLOPPY 840 * to be set on the established state. 841 * 842 * A collision against RDR state can only occur with a PASS IN in the 843 * opposite direction or a PASS OUT in the forwards direction. This 844 * is because RDRs are processed on the input side. 845 * 846 * A collision against NAT state can only occur with a PASS IN in the 847 * forwards direction or a PASS OUT in the opposite direction. This 848 * is because NATs are processed on the output side. 849 * 850 * In both situations we need to do a reverse addr/port test because 851 * the PASS IN or PASS OUT only establishes if it doesn't match the 852 * established RDR state in the forwards direction. The direction 853 * flag has to be ignored (it will be one way for a PASS IN and the 854 * other way for a PASS OUT). 855 * 856 * pf_global_statetbl_lock will be locked shared when testing and 857 * not entering into the global state table. 858 */ 859 if (cpu != ncpus && 860 (cur = RB_FIND(pf_state_rtree, 861 (struct pf_state_rtree *)&pf_statetbl[ncpus], 862 sk)) != NULL) { 863 TAILQ_FOREACH(si, &cur->states, entry) { 864 /* 865 * NOTE: We must ignore direction mismatches. 866 */ 867 if (si->s->kif == s->kif) { 868 s->state_flags |= PFSTATE_HALF_DUPLEX | 869 PFSTATE_SLOPPY; 870 if (pf_status.debug >= PF_DEBUG_MISC) { 871 kprintf( 872 "pf: %s key attach collision " 873 "on %s: ", 874 (idx == PF_SK_WIRE) ? 875 "wire" : "stack", 876 s->kif->pfik_name); 877 pf_print_state_parts(s, 878 (idx == PF_SK_WIRE) ? sk : NULL, 879 (idx == PF_SK_STACK) ? sk : NULL); 880 kprintf("\n"); 881 } 882 break; 883 } 884 } 885 } 886 887 /* 888 * Enter into either the per-cpu or the global state table. 889 * 890 * pf_global_statetbl_lock will be locked exclusively when entering 891 * into the global state table. 892 */ 893 if ((cur = RB_INSERT(pf_state_tree, &pf_statetbl[cpu], sk)) != NULL) { 894 /* key exists. check for same kif, if none, add to key */ 895 TAILQ_FOREACH(si, &cur->states, entry) { 896 if (si->s->kif == s->kif && 897 si->s->direction == s->direction) { 898 if (pf_status.debug >= PF_DEBUG_MISC) { 899 kprintf( 900 "pf: %s key attach failed on %s: ", 901 (idx == PF_SK_WIRE) ? 902 "wire" : "stack", 903 s->kif->pfik_name); 904 pf_print_state_parts(s, 905 (idx == PF_SK_WIRE) ? sk : NULL, 906 (idx == PF_SK_STACK) ? sk : NULL); 907 kprintf("\n"); 908 } 909 kfree(sk, M_PFSTATEKEYPL); 910 error = -1; 911 goto failed; /* collision! */ 912 } 913 } 914 kfree(sk, M_PFSTATEKEYPL); 915 916 s->key[idx] = cur; 917 } else { 918 s->key[idx] = sk; 919 } 920 921 if ((si = kmalloc(sizeof(struct pf_state_item), 922 M_PFSTATEITEMPL, M_NOWAIT)) == NULL) { 923 pf_state_key_detach(s, idx); 924 error = -1; 925 goto failed; /* collision! */ 926 } 927 si->s = s; 928 929 /* list is sorted, if-bound states before floating */ 930 if (s->kif == pfi_all) 931 TAILQ_INSERT_TAIL(&s->key[idx]->states, si, entry); 932 else 933 TAILQ_INSERT_HEAD(&s->key[idx]->states, si, entry); 934 935 error = 0; 936 failed: 937 lockmgr(&pf_global_statetbl_lock, LK_RELEASE); 938 return error; 939 } 940 941 /* 942 * NOTE: Can only be called indirectly via the purge thread with pf_token 943 * exclusively locked. 944 */ 945 void 946 pf_detach_state(struct pf_state *s) 947 { 948 if (s->key[PF_SK_WIRE] == s->key[PF_SK_STACK]) 949 s->key[PF_SK_WIRE] = NULL; 950 951 if (s->key[PF_SK_STACK] != NULL) 952 pf_state_key_detach(s, PF_SK_STACK); 953 954 if (s->key[PF_SK_WIRE] != NULL) 955 pf_state_key_detach(s, PF_SK_WIRE); 956 } 957 958 /* 959 * NOTE: Can only be called indirectly via the purge thread with pf_token 960 * exclusively locked. 961 */ 962 void 963 pf_state_key_detach(struct pf_state *s, int idx) 964 { 965 struct pf_state_item *si; 966 int cpu; 967 968 /* 969 * PFSTATE_STACK_GLOBAL is set for translations when the translated 970 * address/port is not localized to the same cpu that the untranslated 971 * address/port is on. The wire pf_state_key is managed on the global 972 * statetbl tree for this case. 973 */ 974 if (s->state_flags & PFSTATE_STACK_GLOBAL) { 975 cpu = ncpus; 976 lockmgr(&pf_global_statetbl_lock, LK_EXCLUSIVE); 977 } else { 978 cpu = mycpu->gd_cpuid; 979 } 980 981 si = TAILQ_FIRST(&s->key[idx]->states); 982 while (si && si->s != s) 983 si = TAILQ_NEXT(si, entry); 984 985 if (si) { 986 TAILQ_REMOVE(&s->key[idx]->states, si, entry); 987 kfree(si, M_PFSTATEITEMPL); 988 } 989 990 if (TAILQ_EMPTY(&s->key[idx]->states)) { 991 RB_REMOVE(pf_state_tree, &pf_statetbl[cpu], s->key[idx]); 992 if (s->key[idx]->reverse) 993 s->key[idx]->reverse->reverse = NULL; 994 if (s->key[idx]->inp) 995 s->key[idx]->inp->inp_pf_sk = NULL; 996 kfree(s->key[idx], M_PFSTATEKEYPL); 997 } 998 s->key[idx] = NULL; 999 1000 if (s->state_flags & PFSTATE_STACK_GLOBAL) 1001 lockmgr(&pf_global_statetbl_lock, LK_RELEASE); 1002 } 1003 1004 struct pf_state_key * 1005 pf_alloc_state_key(int pool_flags) 1006 { 1007 struct pf_state_key *sk; 1008 1009 sk = kmalloc(sizeof(struct pf_state_key), M_PFSTATEKEYPL, pool_flags); 1010 if (sk) { 1011 TAILQ_INIT(&sk->states); 1012 } 1013 return (sk); 1014 } 1015 1016 int 1017 pf_state_key_setup(struct pf_pdesc *pd, struct pf_rule *nr, 1018 struct pf_state_key **skw, struct pf_state_key **sks, 1019 struct pf_state_key **skp, struct pf_state_key **nkp, 1020 struct pf_addr *saddr, struct pf_addr *daddr, 1021 u_int16_t sport, u_int16_t dport) 1022 { 1023 KKASSERT((*skp == NULL && *nkp == NULL)); 1024 1025 if ((*skp = pf_alloc_state_key(M_NOWAIT | M_ZERO)) == NULL) 1026 return (ENOMEM); 1027 1028 PF_ACPY(&(*skp)->addr[pd->sidx], saddr, pd->af); 1029 PF_ACPY(&(*skp)->addr[pd->didx], daddr, pd->af); 1030 (*skp)->port[pd->sidx] = sport; 1031 (*skp)->port[pd->didx] = dport; 1032 (*skp)->proto = pd->proto; 1033 (*skp)->af = pd->af; 1034 1035 if (nr != NULL) { 1036 if ((*nkp = pf_alloc_state_key(M_NOWAIT | M_ZERO)) == NULL) 1037 return (ENOMEM); /* caller must handle cleanup */ 1038 1039 /* XXX maybe just bcopy and TAILQ_INIT(&(*nkp)->states) */ 1040 PF_ACPY(&(*nkp)->addr[0], &(*skp)->addr[0], pd->af); 1041 PF_ACPY(&(*nkp)->addr[1], &(*skp)->addr[1], pd->af); 1042 (*nkp)->port[0] = (*skp)->port[0]; 1043 (*nkp)->port[1] = (*skp)->port[1]; 1044 (*nkp)->proto = pd->proto; 1045 (*nkp)->af = pd->af; 1046 } else { 1047 *nkp = *skp; 1048 } 1049 1050 if (pd->dir == PF_IN) { 1051 *skw = *skp; 1052 *sks = *nkp; 1053 } else { 1054 *sks = *skp; 1055 *skw = *nkp; 1056 } 1057 return (0); 1058 } 1059 1060 /* 1061 * Insert pf_state with one or two state keys (allowing a reverse path lookup 1062 * which is used by NAT). In the NAT case skw is the initiator (?) and 1063 * sks is the target. 1064 */ 1065 int 1066 pf_state_insert(struct pfi_kif *kif, struct pf_state_key *skw, 1067 struct pf_state_key *sks, struct pf_state *s) 1068 { 1069 int cpu = mycpu->gd_cpuid; 1070 1071 s->kif = kif; 1072 s->cpuid = cpu; 1073 1074 if (skw == sks) { 1075 if (pf_state_key_attach(skw, s, PF_SK_WIRE)) 1076 return (-1); 1077 s->key[PF_SK_STACK] = s->key[PF_SK_WIRE]; 1078 } else { 1079 /* 1080 skw->reverse = sks; 1081 sks->reverse = skw; 1082 */ 1083 if (pf_state_key_attach(skw, s, PF_SK_WIRE)) { 1084 kfree(sks, M_PFSTATEKEYPL); 1085 return (-1); 1086 } 1087 if (pf_state_key_attach(sks, s, PF_SK_STACK)) { 1088 pf_state_key_detach(s, PF_SK_WIRE); 1089 return (-1); 1090 } 1091 } 1092 1093 if (s->id == 0 && s->creatorid == 0) { 1094 u_int64_t sid; 1095 1096 #if __SIZEOF_LONG__ == 8 1097 sid = atomic_fetchadd_long(&pf_status.stateid, 1); 1098 #else 1099 spin_lock(&pf_spin); 1100 sid = pf_status.stateid++; 1101 spin_unlock(&pf_spin); 1102 #endif 1103 s->id = htobe64(sid); 1104 s->creatorid = pf_status.hostid; 1105 } 1106 1107 /* 1108 * Calculate hash code for altq 1109 */ 1110 s->hash = crc32(s->key[PF_SK_WIRE], PF_STATE_KEY_HASH_LENGTH); 1111 1112 if (RB_INSERT(pf_state_tree_id, &tree_id[cpu], s) != NULL) { 1113 if (pf_status.debug >= PF_DEBUG_MISC) { 1114 kprintf("pf: state insert failed: " 1115 "id: %016jx creatorid: %08x", 1116 (uintmax_t)be64toh(s->id), ntohl(s->creatorid)); 1117 if (s->sync_flags & PFSTATE_FROMSYNC) 1118 kprintf(" (from sync)"); 1119 kprintf("\n"); 1120 } 1121 pf_detach_state(s); 1122 return (-1); 1123 } 1124 TAILQ_INSERT_TAIL(&state_list[cpu], s, entry_list); 1125 PF_INC_FCOUNTER(FCNT_STATE_INSERT); 1126 atomic_add_int(&pf_status.states, 1); 1127 pfi_kif_ref(kif, PFI_KIF_REF_STATE); 1128 pfsync_insert_state(s); 1129 return (0); 1130 } 1131 1132 struct pf_state * 1133 pf_find_state_byid(struct pf_state_cmp *key) 1134 { 1135 int cpu = mycpu->gd_cpuid; 1136 1137 PF_INC_FCOUNTER(FCNT_STATE_SEARCH); 1138 1139 return (RB_FIND(pf_state_tree_id, &tree_id[cpu], 1140 (struct pf_state *)key)); 1141 } 1142 1143 /* 1144 * WARNING! May return a state structure that was localized to another cpu, 1145 * destruction is typically protected by the callers pf_token. 1146 * The element can only be destroyed 1147 */ 1148 struct pf_state * 1149 pf_find_state(struct pfi_kif *kif, struct pf_state_key_cmp *key, u_int dir, 1150 struct mbuf *m) 1151 { 1152 struct pf_state_key *skey = (void *)key; 1153 struct pf_state_key *sk; 1154 struct pf_state_item *si; 1155 struct pf_state *s; 1156 int cpu = mycpu->gd_cpuid; 1157 int globalstl = 0; 1158 1159 PF_INC_FCOUNTER(FCNT_STATE_SEARCH); 1160 1161 if (dir == PF_OUT && m->m_pkthdr.pf.statekey && 1162 ((struct pf_state_key *)m->m_pkthdr.pf.statekey)->reverse) { 1163 sk = ((struct pf_state_key *)m->m_pkthdr.pf.statekey)->reverse; 1164 } else { 1165 sk = RB_FIND(pf_state_tree, &pf_statetbl[cpu], skey); 1166 if (sk == NULL) { 1167 lockmgr(&pf_global_statetbl_lock, LK_SHARED); 1168 sk = RB_FIND(pf_state_tree, &pf_statetbl[ncpus], skey); 1169 if (sk == NULL) { 1170 lockmgr(&pf_global_statetbl_lock, LK_RELEASE); 1171 return (NULL); 1172 } 1173 globalstl = 1; 1174 } 1175 if (dir == PF_OUT && m->m_pkthdr.pf.statekey) { 1176 ((struct pf_state_key *) 1177 m->m_pkthdr.pf.statekey)->reverse = sk; 1178 sk->reverse = m->m_pkthdr.pf.statekey; 1179 } 1180 } 1181 if (dir == PF_OUT) 1182 m->m_pkthdr.pf.statekey = NULL; 1183 1184 /* list is sorted, if-bound states before floating ones */ 1185 TAILQ_FOREACH(si, &sk->states, entry) { 1186 if ((si->s->kif == pfi_all || si->s->kif == kif) && 1187 sk == (dir == PF_IN ? si->s->key[PF_SK_WIRE] : 1188 si->s->key[PF_SK_STACK])) { 1189 break; 1190 } 1191 } 1192 1193 /* 1194 * Extract state before potentially releasing the global statetbl 1195 * lock. Ignore the state if the create is still in-progress as 1196 * it can be deleted out from under us by the owning localized cpu. 1197 * However, if CREATEINPROG is not set, state can only be deleted 1198 * by the purge thread which we are protected from via our shared 1199 * pf_token. 1200 */ 1201 if (si) { 1202 s = si->s; 1203 if (s && (s->state_flags & PFSTATE_CREATEINPROG)) 1204 s = NULL; 1205 } else { 1206 s = NULL; 1207 } 1208 if (globalstl) 1209 lockmgr(&pf_global_statetbl_lock, LK_RELEASE); 1210 return s; 1211 } 1212 1213 /* 1214 * WARNING! May return a state structure that was localized to another cpu, 1215 * destruction is typically protected by the callers pf_token. 1216 */ 1217 struct pf_state * 1218 pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more) 1219 { 1220 struct pf_state_key *skey = (void *)key; 1221 struct pf_state_key *sk; 1222 struct pf_state_item *si, *ret = NULL; 1223 struct pf_state *s; 1224 int cpu = mycpu->gd_cpuid; 1225 int globalstl = 0; 1226 1227 PF_INC_FCOUNTER(FCNT_STATE_SEARCH); 1228 1229 sk = RB_FIND(pf_state_tree, &pf_statetbl[cpu], skey); 1230 if (sk == NULL) { 1231 lockmgr(&pf_global_statetbl_lock, LK_SHARED); 1232 sk = RB_FIND(pf_state_tree, &pf_statetbl[ncpus], skey); 1233 globalstl = 1; 1234 } 1235 if (sk != NULL) { 1236 TAILQ_FOREACH(si, &sk->states, entry) 1237 if (dir == PF_INOUT || 1238 (sk == (dir == PF_IN ? si->s->key[PF_SK_WIRE] : 1239 si->s->key[PF_SK_STACK]))) { 1240 if (more == NULL) { 1241 ret = si; 1242 break; 1243 } 1244 if (ret) 1245 (*more)++; 1246 else 1247 ret = si; 1248 } 1249 } 1250 1251 /* 1252 * Extract state before potentially releasing the global statetbl 1253 * lock. Ignore the state if the create is still in-progress as 1254 * it can be deleted out from under us by the owning localized cpu. 1255 * However, if CREATEINPROG is not set, state can only be deleted 1256 * by the purge thread which we are protected from via our shared 1257 * pf_token. 1258 */ 1259 if (ret) { 1260 s = ret->s; 1261 if (s && (s->state_flags & PFSTATE_CREATEINPROG)) 1262 s = NULL; 1263 } else { 1264 s = NULL; 1265 } 1266 if (globalstl) 1267 lockmgr(&pf_global_statetbl_lock, LK_RELEASE); 1268 return s; 1269 } 1270 1271 /* END state table stuff */ 1272 1273 void 1274 pf_purge_thread(void *v) 1275 { 1276 globaldata_t save_gd = mycpu; 1277 int nloops = 0; 1278 int locked = 0; 1279 int nn; 1280 int endingit; 1281 1282 for (;;) { 1283 tsleep(pf_purge_thread, PWAIT, "pftm", 1 * hz); 1284 1285 endingit = pf_end_threads; 1286 1287 for (nn = 0; nn < ncpus; ++nn) { 1288 lwkt_setcpu_self(globaldata_find(nn)); 1289 1290 lwkt_gettoken(&pf_token); 1291 lockmgr(&pf_consistency_lock, LK_EXCLUSIVE); 1292 crit_enter(); 1293 1294 /* 1295 * process a fraction of the state table every second 1296 */ 1297 if(!pf_purge_expired_states( 1298 1 + (pf_status.states / 1299 pf_default_rule.timeout[ 1300 PFTM_INTERVAL]), 0)) { 1301 pf_purge_expired_states( 1302 1 + (pf_status.states / 1303 pf_default_rule.timeout[ 1304 PFTM_INTERVAL]), 1); 1305 } 1306 1307 /* 1308 * purge other expired types every PFTM_INTERVAL 1309 * seconds 1310 */ 1311 if (++nloops >= 1312 pf_default_rule.timeout[PFTM_INTERVAL]) { 1313 pf_purge_expired_fragments(); 1314 if (!pf_purge_expired_src_nodes(locked)) { 1315 pf_purge_expired_src_nodes(1); 1316 } 1317 nloops = 0; 1318 } 1319 1320 /* 1321 * If terminating the thread, clean everything out 1322 * (on all cpus). 1323 */ 1324 if (endingit) { 1325 pf_purge_expired_states(pf_status.states, 0); 1326 pf_purge_expired_fragments(); 1327 pf_purge_expired_src_nodes(1); 1328 } 1329 1330 crit_exit(); 1331 lockmgr(&pf_consistency_lock, LK_RELEASE); 1332 lwkt_reltoken(&pf_token); 1333 } 1334 lwkt_setcpu_self(save_gd); 1335 if (endingit) 1336 break; 1337 } 1338 1339 /* 1340 * Thread termination 1341 */ 1342 pf_end_threads++; 1343 wakeup(pf_purge_thread); 1344 kthread_exit(); 1345 } 1346 1347 u_int32_t 1348 pf_state_expires(const struct pf_state *state) 1349 { 1350 u_int32_t timeout; 1351 u_int32_t start; 1352 u_int32_t end; 1353 u_int32_t states; 1354 1355 /* handle all PFTM_* > PFTM_MAX here */ 1356 if (state->timeout == PFTM_PURGE) 1357 return (time_second); 1358 if (state->timeout == PFTM_UNTIL_PACKET) 1359 return (0); 1360 KKASSERT(state->timeout != PFTM_UNLINKED); 1361 KKASSERT(state->timeout < PFTM_MAX); 1362 timeout = state->rule.ptr->timeout[state->timeout]; 1363 if (!timeout) 1364 timeout = pf_default_rule.timeout[state->timeout]; 1365 start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START]; 1366 if (start) { 1367 end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END]; 1368 states = state->rule.ptr->states_cur; 1369 } else { 1370 start = pf_default_rule.timeout[PFTM_ADAPTIVE_START]; 1371 end = pf_default_rule.timeout[PFTM_ADAPTIVE_END]; 1372 states = pf_status.states; 1373 } 1374 if (end && states > start && start < end) { 1375 if (states < end) 1376 return (state->expire + timeout * (end - states) / 1377 (end - start)); 1378 else 1379 return (time_second); 1380 } 1381 return (state->expire + timeout); 1382 } 1383 1384 /* 1385 * (called with exclusive pf_token) 1386 */ 1387 int 1388 pf_purge_expired_src_nodes(int waslocked) 1389 { 1390 struct pf_src_node *cur, *next; 1391 int locked = waslocked; 1392 int cpu = mycpu->gd_cpuid; 1393 1394 for (cur = RB_MIN(pf_src_tree, &tree_src_tracking[cpu]); 1395 cur; 1396 cur = next) { 1397 next = RB_NEXT(pf_src_tree, &tree_src_tracking[cpu], cur); 1398 1399 if (cur->states <= 0 && cur->expire <= time_second) { 1400 if (!locked) { 1401 lockmgr(&pf_consistency_lock, LK_EXCLUSIVE); 1402 next = RB_NEXT(pf_src_tree, 1403 &tree_src_tracking[cpu], cur); 1404 locked = 1; 1405 } 1406 if (cur->rule.ptr != NULL) { 1407 /* 1408 * decrements in rule should be ok, token is 1409 * held exclusively in this code path. 1410 */ 1411 atomic_add_int(&cur->rule.ptr->src_nodes, -1); 1412 if (cur->rule.ptr->states_cur <= 0 && 1413 cur->rule.ptr->max_src_nodes <= 0) 1414 pf_rm_rule(NULL, cur->rule.ptr); 1415 } 1416 RB_REMOVE(pf_src_tree, &tree_src_tracking[cpu], cur); 1417 PF_INC_SCOUNTER(SCNT_SRC_NODE_REMOVALS); 1418 atomic_add_int(&pf_status.src_nodes, -1); 1419 kfree(cur, M_PFSRCTREEPL); 1420 } 1421 } 1422 if (locked && !waslocked) 1423 lockmgr(&pf_consistency_lock, LK_RELEASE); 1424 return(1); 1425 } 1426 1427 void 1428 pf_src_tree_remove_state(struct pf_state *s) 1429 { 1430 u_int32_t timeout; 1431 1432 if (s->src_node != NULL) { 1433 if (s->src.tcp_est) 1434 atomic_add_int(&s->src_node->conn, -1); 1435 if (--s->src_node->states <= 0) { 1436 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE]; 1437 if (!timeout) { 1438 timeout = 1439 pf_default_rule.timeout[PFTM_SRC_NODE]; 1440 } 1441 s->src_node->expire = time_second + timeout; 1442 } 1443 } 1444 if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) { 1445 if (--s->nat_src_node->states <= 0) { 1446 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE]; 1447 if (!timeout) 1448 timeout = 1449 pf_default_rule.timeout[PFTM_SRC_NODE]; 1450 s->nat_src_node->expire = time_second + timeout; 1451 } 1452 } 1453 s->src_node = s->nat_src_node = NULL; 1454 } 1455 1456 /* callers should be at crit_enter() */ 1457 void 1458 pf_unlink_state(struct pf_state *cur) 1459 { 1460 int cpu = mycpu->gd_cpuid; 1461 1462 if (cur->src.state == PF_TCPS_PROXY_DST) { 1463 /* XXX wire key the right one? */ 1464 pf_send_tcp(cur->rule.ptr, cur->key[PF_SK_WIRE]->af, 1465 &cur->key[PF_SK_WIRE]->addr[1], 1466 &cur->key[PF_SK_WIRE]->addr[0], 1467 cur->key[PF_SK_WIRE]->port[1], 1468 cur->key[PF_SK_WIRE]->port[0], 1469 cur->src.seqhi, cur->src.seqlo + 1, 1470 TH_RST|TH_ACK, 0, 0, 0, 1, cur->tag, NULL, NULL); 1471 } 1472 RB_REMOVE(pf_state_tree_id, &tree_id[cpu], cur); 1473 if (cur->creatorid == pf_status.hostid) 1474 pfsync_delete_state(cur); 1475 cur->timeout = PFTM_UNLINKED; 1476 pf_src_tree_remove_state(cur); 1477 pf_detach_state(cur); 1478 } 1479 1480 /* 1481 * callers should be at crit_enter() and hold pf_consistency_lock exclusively. 1482 * pf_token must also be held exclusively. 1483 */ 1484 void 1485 pf_free_state(struct pf_state *cur) 1486 { 1487 int cpu = mycpu->gd_cpuid; 1488 1489 KKASSERT(cur->cpuid == cpu); 1490 1491 if (pfsyncif != NULL && 1492 (pfsyncif->sc_bulk_send_next == cur || 1493 pfsyncif->sc_bulk_terminator == cur)) 1494 return; 1495 KKASSERT(cur->timeout == PFTM_UNLINKED); 1496 /* 1497 * decrements in rule should be ok, token is 1498 * held exclusively in this code path. 1499 */ 1500 if (--cur->rule.ptr->states_cur <= 0 && 1501 cur->rule.ptr->src_nodes <= 0) 1502 pf_rm_rule(NULL, cur->rule.ptr); 1503 if (cur->nat_rule.ptr != NULL) { 1504 if (--cur->nat_rule.ptr->states_cur <= 0 && 1505 cur->nat_rule.ptr->src_nodes <= 0) { 1506 pf_rm_rule(NULL, cur->nat_rule.ptr); 1507 } 1508 } 1509 if (cur->anchor.ptr != NULL) { 1510 if (--cur->anchor.ptr->states_cur <= 0) 1511 pf_rm_rule(NULL, cur->anchor.ptr); 1512 } 1513 pf_normalize_tcp_cleanup(cur); 1514 pfi_kif_unref(cur->kif, PFI_KIF_REF_STATE); 1515 1516 /* 1517 * We may be freeing pf_purge_expired_states()'s saved scan entry, 1518 * adjust it if necessary. 1519 */ 1520 if (purge_cur[cpu] == cur) { 1521 kprintf("PURGE CONFLICT\n"); 1522 purge_cur[cpu] = TAILQ_NEXT(purge_cur[cpu], entry_list); 1523 } 1524 TAILQ_REMOVE(&state_list[cpu], cur, entry_list); 1525 if (cur->tag) 1526 pf_tag_unref(cur->tag); 1527 kfree(cur, M_PFSTATEPL); 1528 PF_INC_FCOUNTER(FCNT_STATE_REMOVALS); 1529 atomic_add_int(&pf_status.states, -1); 1530 } 1531 1532 int 1533 pf_purge_expired_states(u_int32_t maxcheck, int waslocked) 1534 { 1535 struct pf_state *cur; 1536 int locked = waslocked; 1537 int cpu = mycpu->gd_cpuid; 1538 1539 while (maxcheck--) { 1540 /* 1541 * Wrap to start of list when we hit the end 1542 */ 1543 cur = purge_cur[cpu]; 1544 if (cur == NULL) { 1545 cur = TAILQ_FIRST(&state_list[cpu]); 1546 if (cur == NULL) 1547 break; /* list empty */ 1548 } 1549 1550 /* 1551 * Setup next (purge_cur) while we process this one. If 1552 * we block and something else deletes purge_cur, 1553 * pf_free_state() will adjust it further ahead. 1554 */ 1555 purge_cur[cpu] = TAILQ_NEXT(cur, entry_list); 1556 1557 if (cur->timeout == PFTM_UNLINKED) { 1558 /* free unlinked state */ 1559 if (! locked) { 1560 lockmgr(&pf_consistency_lock, LK_EXCLUSIVE); 1561 locked = 1; 1562 } 1563 pf_free_state(cur); 1564 } else if (pf_state_expires(cur) <= time_second) { 1565 /* unlink and free expired state */ 1566 pf_unlink_state(cur); 1567 if (! locked) { 1568 if (!lockmgr(&pf_consistency_lock, LK_EXCLUSIVE)) 1569 return (0); 1570 locked = 1; 1571 } 1572 pf_free_state(cur); 1573 } 1574 } 1575 1576 if (locked) 1577 lockmgr(&pf_consistency_lock, LK_RELEASE); 1578 return (1); 1579 } 1580 1581 int 1582 pf_tbladdr_setup(struct pf_ruleset *rs, struct pf_addr_wrap *aw) 1583 { 1584 if (aw->type != PF_ADDR_TABLE) 1585 return (0); 1586 if ((aw->p.tbl = pfr_attach_table(rs, aw->v.tblname)) == NULL) 1587 return (1); 1588 return (0); 1589 } 1590 1591 void 1592 pf_tbladdr_remove(struct pf_addr_wrap *aw) 1593 { 1594 if (aw->type != PF_ADDR_TABLE || aw->p.tbl == NULL) 1595 return; 1596 pfr_detach_table(aw->p.tbl); 1597 aw->p.tbl = NULL; 1598 } 1599 1600 void 1601 pf_tbladdr_copyout(struct pf_addr_wrap *aw) 1602 { 1603 struct pfr_ktable *kt = aw->p.tbl; 1604 1605 if (aw->type != PF_ADDR_TABLE || kt == NULL) 1606 return; 1607 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 1608 kt = kt->pfrkt_root; 1609 aw->p.tbl = NULL; 1610 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ? 1611 kt->pfrkt_cnt : -1; 1612 } 1613 1614 void 1615 pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af) 1616 { 1617 switch (af) { 1618 #ifdef INET 1619 case AF_INET: { 1620 u_int32_t a = ntohl(addr->addr32[0]); 1621 kprintf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255, 1622 (a>>8)&255, a&255); 1623 if (p) { 1624 p = ntohs(p); 1625 kprintf(":%u", p); 1626 } 1627 break; 1628 } 1629 #endif /* INET */ 1630 #ifdef INET6 1631 case AF_INET6: { 1632 u_int16_t b; 1633 u_int8_t i, curstart, curend, maxstart, maxend; 1634 curstart = curend = maxstart = maxend = 255; 1635 for (i = 0; i < 8; i++) { 1636 if (!addr->addr16[i]) { 1637 if (curstart == 255) 1638 curstart = i; 1639 curend = i; 1640 } else { 1641 if ((curend - curstart) > 1642 (maxend - maxstart)) { 1643 maxstart = curstart; 1644 maxend = curend; 1645 } 1646 curstart = curend = 255; 1647 } 1648 } 1649 if ((curend - curstart) > 1650 (maxend - maxstart)) { 1651 maxstart = curstart; 1652 maxend = curend; 1653 } 1654 for (i = 0; i < 8; i++) { 1655 if (i >= maxstart && i <= maxend) { 1656 if (i == 0) 1657 kprintf(":"); 1658 if (i == maxend) 1659 kprintf(":"); 1660 } else { 1661 b = ntohs(addr->addr16[i]); 1662 kprintf("%x", b); 1663 if (i < 7) 1664 kprintf(":"); 1665 } 1666 } 1667 if (p) { 1668 p = ntohs(p); 1669 kprintf("[%u]", p); 1670 } 1671 break; 1672 } 1673 #endif /* INET6 */ 1674 } 1675 } 1676 1677 void 1678 pf_print_state(struct pf_state *s) 1679 { 1680 pf_print_state_parts(s, NULL, NULL); 1681 } 1682 1683 void 1684 pf_print_state_parts(struct pf_state *s, 1685 struct pf_state_key *skwp, struct pf_state_key *sksp) 1686 { 1687 struct pf_state_key *skw, *sks; 1688 u_int8_t proto, dir; 1689 1690 /* Do our best to fill these, but they're skipped if NULL */ 1691 skw = skwp ? skwp : (s ? s->key[PF_SK_WIRE] : NULL); 1692 sks = sksp ? sksp : (s ? s->key[PF_SK_STACK] : NULL); 1693 proto = skw ? skw->proto : (sks ? sks->proto : 0); 1694 dir = s ? s->direction : 0; 1695 1696 switch (proto) { 1697 case IPPROTO_TCP: 1698 kprintf("TCP "); 1699 break; 1700 case IPPROTO_UDP: 1701 kprintf("UDP "); 1702 break; 1703 case IPPROTO_ICMP: 1704 kprintf("ICMP "); 1705 break; 1706 case IPPROTO_ICMPV6: 1707 kprintf("ICMPV6 "); 1708 break; 1709 default: 1710 kprintf("%u ", skw->proto); 1711 break; 1712 } 1713 switch (dir) { 1714 case PF_IN: 1715 kprintf(" in"); 1716 break; 1717 case PF_OUT: 1718 kprintf(" out"); 1719 break; 1720 } 1721 if (skw) { 1722 kprintf(" wire: "); 1723 pf_print_host(&skw->addr[0], skw->port[0], skw->af); 1724 kprintf(" "); 1725 pf_print_host(&skw->addr[1], skw->port[1], skw->af); 1726 } 1727 if (sks) { 1728 kprintf(" stack: "); 1729 if (sks != skw) { 1730 pf_print_host(&sks->addr[0], sks->port[0], sks->af); 1731 kprintf(" "); 1732 pf_print_host(&sks->addr[1], sks->port[1], sks->af); 1733 } else 1734 kprintf("-"); 1735 } 1736 if (s) { 1737 if (proto == IPPROTO_TCP) { 1738 kprintf(" [lo=%u high=%u win=%u modulator=%u", 1739 s->src.seqlo, s->src.seqhi, 1740 s->src.max_win, s->src.seqdiff); 1741 if (s->src.wscale && s->dst.wscale) 1742 kprintf(" wscale=%u", 1743 s->src.wscale & PF_WSCALE_MASK); 1744 kprintf("]"); 1745 kprintf(" [lo=%u high=%u win=%u modulator=%u", 1746 s->dst.seqlo, s->dst.seqhi, 1747 s->dst.max_win, s->dst.seqdiff); 1748 if (s->src.wscale && s->dst.wscale) 1749 kprintf(" wscale=%u", 1750 s->dst.wscale & PF_WSCALE_MASK); 1751 kprintf("]"); 1752 } 1753 kprintf(" %u:%u", s->src.state, s->dst.state); 1754 } 1755 } 1756 1757 void 1758 pf_print_flags(u_int8_t f) 1759 { 1760 if (f) 1761 kprintf(" "); 1762 if (f & TH_FIN) 1763 kprintf("F"); 1764 if (f & TH_SYN) 1765 kprintf("S"); 1766 if (f & TH_RST) 1767 kprintf("R"); 1768 if (f & TH_PUSH) 1769 kprintf("P"); 1770 if (f & TH_ACK) 1771 kprintf("A"); 1772 if (f & TH_URG) 1773 kprintf("U"); 1774 if (f & TH_ECE) 1775 kprintf("E"); 1776 if (f & TH_CWR) 1777 kprintf("W"); 1778 } 1779 1780 #define PF_SET_SKIP_STEPS(i) \ 1781 do { \ 1782 while (head[i] != cur) { \ 1783 head[i]->skip[i].ptr = cur; \ 1784 head[i] = TAILQ_NEXT(head[i], entries); \ 1785 } \ 1786 } while (0) 1787 1788 void 1789 pf_calc_skip_steps(struct pf_rulequeue *rules) 1790 { 1791 struct pf_rule *cur, *prev, *head[PF_SKIP_COUNT]; 1792 int i; 1793 1794 cur = TAILQ_FIRST(rules); 1795 prev = cur; 1796 for (i = 0; i < PF_SKIP_COUNT; ++i) 1797 head[i] = cur; 1798 while (cur != NULL) { 1799 1800 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot) 1801 PF_SET_SKIP_STEPS(PF_SKIP_IFP); 1802 if (cur->direction != prev->direction) 1803 PF_SET_SKIP_STEPS(PF_SKIP_DIR); 1804 if (cur->af != prev->af) 1805 PF_SET_SKIP_STEPS(PF_SKIP_AF); 1806 if (cur->proto != prev->proto) 1807 PF_SET_SKIP_STEPS(PF_SKIP_PROTO); 1808 if (cur->src.neg != prev->src.neg || 1809 pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr)) 1810 PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR); 1811 if (cur->src.port[0] != prev->src.port[0] || 1812 cur->src.port[1] != prev->src.port[1] || 1813 cur->src.port_op != prev->src.port_op) 1814 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT); 1815 if (cur->dst.neg != prev->dst.neg || 1816 pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr)) 1817 PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR); 1818 if (cur->dst.port[0] != prev->dst.port[0] || 1819 cur->dst.port[1] != prev->dst.port[1] || 1820 cur->dst.port_op != prev->dst.port_op) 1821 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT); 1822 1823 prev = cur; 1824 cur = TAILQ_NEXT(cur, entries); 1825 } 1826 for (i = 0; i < PF_SKIP_COUNT; ++i) 1827 PF_SET_SKIP_STEPS(i); 1828 } 1829 1830 int 1831 pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2) 1832 { 1833 if (aw1->type != aw2->type) 1834 return (1); 1835 switch (aw1->type) { 1836 case PF_ADDR_ADDRMASK: 1837 case PF_ADDR_RANGE: 1838 if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, 0)) 1839 return (1); 1840 if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, 0)) 1841 return (1); 1842 return (0); 1843 case PF_ADDR_DYNIFTL: 1844 return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt); 1845 case PF_ADDR_NOROUTE: 1846 case PF_ADDR_URPFFAILED: 1847 return (0); 1848 case PF_ADDR_TABLE: 1849 return (aw1->p.tbl != aw2->p.tbl); 1850 case PF_ADDR_RTLABEL: 1851 return (aw1->v.rtlabel != aw2->v.rtlabel); 1852 default: 1853 kprintf("invalid address type: %d\n", aw1->type); 1854 return (1); 1855 } 1856 } 1857 1858 u_int16_t 1859 pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp) 1860 { 1861 u_int32_t l; 1862 1863 if (udp && !cksum) 1864 return (0x0000); 1865 l = cksum + old - new; 1866 l = (l >> 16) + (l & 65535); 1867 l = l & 65535; 1868 if (udp && !l) 1869 return (0xFFFF); 1870 return (l); 1871 } 1872 1873 void 1874 pf_change_ap(struct pf_addr *a, u_int16_t *p, u_int16_t *ic, u_int16_t *pc, 1875 struct pf_addr *an, u_int16_t pn, u_int8_t u, sa_family_t af) 1876 { 1877 struct pf_addr ao; 1878 u_int16_t po = *p; 1879 1880 PF_ACPY(&ao, a, af); 1881 PF_ACPY(a, an, af); 1882 1883 *p = pn; 1884 1885 switch (af) { 1886 #ifdef INET 1887 case AF_INET: 1888 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic, 1889 ao.addr16[0], an->addr16[0], 0), 1890 ao.addr16[1], an->addr16[1], 0); 1891 *p = pn; 1892 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc, 1893 ao.addr16[0], an->addr16[0], u), 1894 ao.addr16[1], an->addr16[1], u), 1895 po, pn, u); 1896 break; 1897 #endif /* INET */ 1898 #ifdef INET6 1899 case AF_INET6: 1900 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1901 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1902 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc, 1903 ao.addr16[0], an->addr16[0], u), 1904 ao.addr16[1], an->addr16[1], u), 1905 ao.addr16[2], an->addr16[2], u), 1906 ao.addr16[3], an->addr16[3], u), 1907 ao.addr16[4], an->addr16[4], u), 1908 ao.addr16[5], an->addr16[5], u), 1909 ao.addr16[6], an->addr16[6], u), 1910 ao.addr16[7], an->addr16[7], u), 1911 po, pn, u); 1912 break; 1913 #endif /* INET6 */ 1914 } 1915 } 1916 1917 1918 /* Changes a u_int32_t. Uses a void * so there are no align restrictions */ 1919 void 1920 pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u) 1921 { 1922 u_int32_t ao; 1923 1924 memcpy(&ao, a, sizeof(ao)); 1925 memcpy(a, &an, sizeof(u_int32_t)); 1926 *c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u), 1927 ao % 65536, an % 65536, u); 1928 } 1929 1930 #ifdef INET6 1931 void 1932 pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u) 1933 { 1934 struct pf_addr ao; 1935 1936 PF_ACPY(&ao, a, AF_INET6); 1937 PF_ACPY(a, an, AF_INET6); 1938 1939 *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1940 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1941 pf_cksum_fixup(pf_cksum_fixup(*c, 1942 ao.addr16[0], an->addr16[0], u), 1943 ao.addr16[1], an->addr16[1], u), 1944 ao.addr16[2], an->addr16[2], u), 1945 ao.addr16[3], an->addr16[3], u), 1946 ao.addr16[4], an->addr16[4], u), 1947 ao.addr16[5], an->addr16[5], u), 1948 ao.addr16[6], an->addr16[6], u), 1949 ao.addr16[7], an->addr16[7], u); 1950 } 1951 #endif /* INET6 */ 1952 1953 void 1954 pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa, 1955 struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c, 1956 u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af) 1957 { 1958 struct pf_addr oia, ooa; 1959 1960 PF_ACPY(&oia, ia, af); 1961 if (oa) 1962 PF_ACPY(&ooa, oa, af); 1963 1964 /* Change inner protocol port, fix inner protocol checksum. */ 1965 if (ip != NULL) { 1966 u_int16_t oip = *ip; 1967 u_int32_t opc = 0; 1968 1969 if (pc != NULL) 1970 opc = *pc; 1971 *ip = np; 1972 if (pc != NULL) 1973 *pc = pf_cksum_fixup(*pc, oip, *ip, u); 1974 *ic = pf_cksum_fixup(*ic, oip, *ip, 0); 1975 if (pc != NULL) 1976 *ic = pf_cksum_fixup(*ic, opc, *pc, 0); 1977 } 1978 /* Change inner ip address, fix inner ip and icmp checksums. */ 1979 PF_ACPY(ia, na, af); 1980 switch (af) { 1981 #ifdef INET 1982 case AF_INET: { 1983 u_int32_t oh2c = *h2c; 1984 1985 *h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c, 1986 oia.addr16[0], ia->addr16[0], 0), 1987 oia.addr16[1], ia->addr16[1], 0); 1988 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic, 1989 oia.addr16[0], ia->addr16[0], 0), 1990 oia.addr16[1], ia->addr16[1], 0); 1991 *ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0); 1992 break; 1993 } 1994 #endif /* INET */ 1995 #ifdef INET6 1996 case AF_INET6: 1997 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1998 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1999 pf_cksum_fixup(pf_cksum_fixup(*ic, 2000 oia.addr16[0], ia->addr16[0], u), 2001 oia.addr16[1], ia->addr16[1], u), 2002 oia.addr16[2], ia->addr16[2], u), 2003 oia.addr16[3], ia->addr16[3], u), 2004 oia.addr16[4], ia->addr16[4], u), 2005 oia.addr16[5], ia->addr16[5], u), 2006 oia.addr16[6], ia->addr16[6], u), 2007 oia.addr16[7], ia->addr16[7], u); 2008 break; 2009 #endif /* INET6 */ 2010 } 2011 /* Outer ip address, fix outer ip or icmpv6 checksum, if necessary. */ 2012 if (oa) { 2013 PF_ACPY(oa, na, af); 2014 switch (af) { 2015 #ifdef INET 2016 case AF_INET: 2017 *hc = pf_cksum_fixup(pf_cksum_fixup(*hc, 2018 ooa.addr16[0], oa->addr16[0], 0), 2019 ooa.addr16[1], oa->addr16[1], 0); 2020 break; 2021 #endif /* INET */ 2022 #ifdef INET6 2023 case AF_INET6: 2024 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 2025 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 2026 pf_cksum_fixup(pf_cksum_fixup(*ic, 2027 ooa.addr16[0], oa->addr16[0], u), 2028 ooa.addr16[1], oa->addr16[1], u), 2029 ooa.addr16[2], oa->addr16[2], u), 2030 ooa.addr16[3], oa->addr16[3], u), 2031 ooa.addr16[4], oa->addr16[4], u), 2032 ooa.addr16[5], oa->addr16[5], u), 2033 ooa.addr16[6], oa->addr16[6], u), 2034 ooa.addr16[7], oa->addr16[7], u); 2035 break; 2036 #endif /* INET6 */ 2037 } 2038 } 2039 } 2040 2041 2042 /* 2043 * Need to modulate the sequence numbers in the TCP SACK option 2044 * (credits to Krzysztof Pfaff for report and patch) 2045 */ 2046 int 2047 pf_modulate_sack(struct mbuf *m, int off, struct pf_pdesc *pd, 2048 struct tcphdr *th, struct pf_state_peer *dst) 2049 { 2050 int hlen = (th->th_off << 2) - sizeof(*th), thoptlen = hlen; 2051 u_int8_t opts[TCP_MAXOLEN], *opt = opts; 2052 int copyback = 0, i, olen; 2053 struct raw_sackblock sack; 2054 2055 #define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2) 2056 if (hlen < TCPOLEN_SACKLEN || 2057 !pf_pull_hdr(m, off + sizeof(*th), opts, hlen, NULL, NULL, pd->af)) 2058 return 0; 2059 2060 while (hlen >= TCPOLEN_SACKLEN) { 2061 olen = opt[1]; 2062 switch (*opt) { 2063 case TCPOPT_EOL: /* FALLTHROUGH */ 2064 case TCPOPT_NOP: 2065 opt++; 2066 hlen--; 2067 break; 2068 case TCPOPT_SACK: 2069 if (olen > hlen) 2070 olen = hlen; 2071 if (olen >= TCPOLEN_SACKLEN) { 2072 for (i = 2; i + TCPOLEN_SACK <= olen; 2073 i += TCPOLEN_SACK) { 2074 memcpy(&sack, &opt[i], sizeof(sack)); 2075 pf_change_a(&sack.rblk_start, &th->th_sum, 2076 htonl(ntohl(sack.rblk_start) - 2077 dst->seqdiff), 0); 2078 pf_change_a(&sack.rblk_end, &th->th_sum, 2079 htonl(ntohl(sack.rblk_end) - 2080 dst->seqdiff), 0); 2081 memcpy(&opt[i], &sack, sizeof(sack)); 2082 } 2083 copyback = 1; 2084 } 2085 /* FALLTHROUGH */ 2086 default: 2087 if (olen < 2) 2088 olen = 2; 2089 hlen -= olen; 2090 opt += olen; 2091 } 2092 } 2093 2094 if (copyback) 2095 m_copyback(m, off + sizeof(*th), thoptlen, opts); 2096 return (copyback); 2097 } 2098 2099 void 2100 pf_send_tcp(const struct pf_rule *r, sa_family_t af, 2101 const struct pf_addr *saddr, const struct pf_addr *daddr, 2102 u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack, 2103 u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag, 2104 u_int16_t rtag, struct ether_header *eh, struct ifnet *ifp) 2105 { 2106 struct mbuf *m; 2107 int len = 0, tlen; 2108 #ifdef INET 2109 struct ip *h = NULL; 2110 #endif /* INET */ 2111 #ifdef INET6 2112 struct ip6_hdr *h6 = NULL; 2113 #endif /* INET6 */ 2114 struct tcphdr *th = NULL; 2115 char *opt; 2116 2117 ASSERT_LWKT_TOKEN_HELD(&pf_token); 2118 2119 /* maximum segment size tcp option */ 2120 tlen = sizeof(struct tcphdr); 2121 if (mss) 2122 tlen += 4; 2123 2124 switch (af) { 2125 #ifdef INET 2126 case AF_INET: 2127 len = sizeof(struct ip) + tlen; 2128 break; 2129 #endif /* INET */ 2130 #ifdef INET6 2131 case AF_INET6: 2132 len = sizeof(struct ip6_hdr) + tlen; 2133 break; 2134 #endif /* INET6 */ 2135 } 2136 2137 /* 2138 * Create outgoing mbuf. 2139 * 2140 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags, 2141 * so make sure pf.flags is clear. 2142 */ 2143 m = m_gethdr(M_NOWAIT, MT_HEADER); 2144 if (m == NULL) { 2145 return; 2146 } 2147 if (tag) 2148 m->m_pkthdr.fw_flags |= PF_MBUF_TAGGED; 2149 m->m_pkthdr.pf.flags = 0; 2150 m->m_pkthdr.pf.tag = rtag; 2151 /* XXX Recheck when upgrading to > 4.4 */ 2152 m->m_pkthdr.pf.statekey = NULL; 2153 if (r != NULL && r->rtableid >= 0) 2154 m->m_pkthdr.pf.rtableid = r->rtableid; 2155 2156 #ifdef ALTQ 2157 if (r != NULL && r->qid) { 2158 m->m_pkthdr.fw_flags |= PF_MBUF_STRUCTURE; 2159 m->m_pkthdr.pf.qid = r->qid; 2160 m->m_pkthdr.pf.ecn_af = af; 2161 m->m_pkthdr.pf.hdr = mtod(m, struct ip *); 2162 } 2163 #endif /* ALTQ */ 2164 m->m_data += max_linkhdr; 2165 m->m_pkthdr.len = m->m_len = len; 2166 m->m_pkthdr.rcvif = NULL; 2167 bzero(m->m_data, len); 2168 switch (af) { 2169 #ifdef INET 2170 case AF_INET: 2171 h = mtod(m, struct ip *); 2172 2173 /* IP header fields included in the TCP checksum */ 2174 h->ip_p = IPPROTO_TCP; 2175 h->ip_len = tlen; 2176 h->ip_src.s_addr = saddr->v4.s_addr; 2177 h->ip_dst.s_addr = daddr->v4.s_addr; 2178 2179 th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip)); 2180 break; 2181 #endif /* INET */ 2182 #ifdef INET6 2183 case AF_INET6: 2184 h6 = mtod(m, struct ip6_hdr *); 2185 2186 /* IP header fields included in the TCP checksum */ 2187 h6->ip6_nxt = IPPROTO_TCP; 2188 h6->ip6_plen = htons(tlen); 2189 memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr)); 2190 memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr)); 2191 2192 th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr)); 2193 break; 2194 #endif /* INET6 */ 2195 } 2196 2197 /* TCP header */ 2198 th->th_sport = sport; 2199 th->th_dport = dport; 2200 th->th_seq = htonl(seq); 2201 th->th_ack = htonl(ack); 2202 th->th_off = tlen >> 2; 2203 th->th_flags = flags; 2204 th->th_win = htons(win); 2205 2206 if (mss) { 2207 opt = (char *)(th + 1); 2208 opt[0] = TCPOPT_MAXSEG; 2209 opt[1] = 4; 2210 mss = htons(mss); 2211 bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2); 2212 } 2213 2214 switch (af) { 2215 #ifdef INET 2216 case AF_INET: 2217 /* TCP checksum */ 2218 th->th_sum = in_cksum(m, len); 2219 2220 /* Finish the IP header */ 2221 h->ip_v = 4; 2222 h->ip_hl = sizeof(*h) >> 2; 2223 h->ip_tos = IPTOS_LOWDELAY; 2224 h->ip_len = len; 2225 h->ip_off = path_mtu_discovery ? IP_DF : 0; 2226 h->ip_ttl = ttl ? ttl : ip_defttl; 2227 h->ip_sum = 0; 2228 if (eh == NULL) { 2229 lwkt_reltoken(&pf_token); 2230 ip_output(m, NULL, NULL, 0, NULL, NULL); 2231 lwkt_gettoken(&pf_token); 2232 } else { 2233 struct route ro; 2234 struct rtentry rt; 2235 struct ether_header *e = (void *)ro.ro_dst.sa_data; 2236 2237 if (ifp == NULL) { 2238 m_freem(m); 2239 return; 2240 } 2241 rt.rt_ifp = ifp; 2242 ro.ro_rt = &rt; 2243 ro.ro_dst.sa_len = sizeof(ro.ro_dst); 2244 ro.ro_dst.sa_family = pseudo_AF_HDRCMPLT; 2245 bcopy(eh->ether_dhost, e->ether_shost, ETHER_ADDR_LEN); 2246 bcopy(eh->ether_shost, e->ether_dhost, ETHER_ADDR_LEN); 2247 e->ether_type = eh->ether_type; 2248 /* XXX_IMPORT: later */ 2249 lwkt_reltoken(&pf_token); 2250 ip_output(m, NULL, &ro, 0, NULL, NULL); 2251 lwkt_gettoken(&pf_token); 2252 } 2253 break; 2254 #endif /* INET */ 2255 #ifdef INET6 2256 case AF_INET6: 2257 /* TCP checksum */ 2258 th->th_sum = in6_cksum(m, IPPROTO_TCP, 2259 sizeof(struct ip6_hdr), tlen); 2260 2261 h6->ip6_vfc |= IPV6_VERSION; 2262 h6->ip6_hlim = IPV6_DEFHLIM; 2263 2264 lwkt_reltoken(&pf_token); 2265 ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL); 2266 lwkt_gettoken(&pf_token); 2267 break; 2268 #endif /* INET6 */ 2269 } 2270 } 2271 2272 void 2273 pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af, 2274 struct pf_rule *r) 2275 { 2276 struct mbuf *m0; 2277 2278 /* 2279 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags, 2280 * so make sure pf.flags is clear. 2281 */ 2282 if ((m0 = m_copy(m, 0, M_COPYALL)) == NULL) 2283 return; 2284 2285 m0->m_pkthdr.fw_flags |= PF_MBUF_TAGGED; 2286 m0->m_pkthdr.pf.flags = 0; 2287 /* XXX Re-Check when Upgrading to > 4.4 */ 2288 m0->m_pkthdr.pf.statekey = NULL; 2289 2290 if (r->rtableid >= 0) 2291 m0->m_pkthdr.pf.rtableid = r->rtableid; 2292 2293 #ifdef ALTQ 2294 if (r->qid) { 2295 m->m_pkthdr.fw_flags |= PF_MBUF_STRUCTURE; 2296 m0->m_pkthdr.pf.qid = r->qid; 2297 m0->m_pkthdr.pf.ecn_af = af; 2298 m0->m_pkthdr.pf.hdr = mtod(m0, struct ip *); 2299 } 2300 #endif /* ALTQ */ 2301 2302 switch (af) { 2303 #ifdef INET 2304 case AF_INET: 2305 icmp_error(m0, type, code, 0, 0); 2306 break; 2307 #endif /* INET */ 2308 #ifdef INET6 2309 case AF_INET6: 2310 icmp6_error(m0, type, code, 0); 2311 break; 2312 #endif /* INET6 */ 2313 } 2314 } 2315 2316 /* 2317 * Return 1 if the addresses a and b match (with mask m), otherwise return 0. 2318 * If n is 0, they match if they are equal. If n is != 0, they match if they 2319 * are different. 2320 */ 2321 int 2322 pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m, 2323 struct pf_addr *b, sa_family_t af) 2324 { 2325 int match = 0; 2326 2327 switch (af) { 2328 #ifdef INET 2329 case AF_INET: 2330 if ((a->addr32[0] & m->addr32[0]) == 2331 (b->addr32[0] & m->addr32[0])) 2332 match++; 2333 break; 2334 #endif /* INET */ 2335 #ifdef INET6 2336 case AF_INET6: 2337 if (((a->addr32[0] & m->addr32[0]) == 2338 (b->addr32[0] & m->addr32[0])) && 2339 ((a->addr32[1] & m->addr32[1]) == 2340 (b->addr32[1] & m->addr32[1])) && 2341 ((a->addr32[2] & m->addr32[2]) == 2342 (b->addr32[2] & m->addr32[2])) && 2343 ((a->addr32[3] & m->addr32[3]) == 2344 (b->addr32[3] & m->addr32[3]))) 2345 match++; 2346 break; 2347 #endif /* INET6 */ 2348 } 2349 if (match) { 2350 if (n) 2351 return (0); 2352 else 2353 return (1); 2354 } else { 2355 if (n) 2356 return (1); 2357 else 2358 return (0); 2359 } 2360 } 2361 2362 /* 2363 * Return 1 if b <= a <= e, otherwise return 0. 2364 */ 2365 int 2366 pf_match_addr_range(struct pf_addr *b, struct pf_addr *e, 2367 struct pf_addr *a, sa_family_t af) 2368 { 2369 switch (af) { 2370 #ifdef INET 2371 case AF_INET: 2372 if ((a->addr32[0] < b->addr32[0]) || 2373 (a->addr32[0] > e->addr32[0])) 2374 return (0); 2375 break; 2376 #endif /* INET */ 2377 #ifdef INET6 2378 case AF_INET6: { 2379 int i; 2380 2381 /* check a >= b */ 2382 for (i = 0; i < 4; ++i) 2383 if (a->addr32[i] > b->addr32[i]) 2384 break; 2385 else if (a->addr32[i] < b->addr32[i]) 2386 return (0); 2387 /* check a <= e */ 2388 for (i = 0; i < 4; ++i) 2389 if (a->addr32[i] < e->addr32[i]) 2390 break; 2391 else if (a->addr32[i] > e->addr32[i]) 2392 return (0); 2393 break; 2394 } 2395 #endif /* INET6 */ 2396 } 2397 return (1); 2398 } 2399 2400 int 2401 pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p) 2402 { 2403 switch (op) { 2404 case PF_OP_IRG: 2405 return ((p > a1) && (p < a2)); 2406 case PF_OP_XRG: 2407 return ((p < a1) || (p > a2)); 2408 case PF_OP_RRG: 2409 return ((p >= a1) && (p <= a2)); 2410 case PF_OP_EQ: 2411 return (p == a1); 2412 case PF_OP_NE: 2413 return (p != a1); 2414 case PF_OP_LT: 2415 return (p < a1); 2416 case PF_OP_LE: 2417 return (p <= a1); 2418 case PF_OP_GT: 2419 return (p > a1); 2420 case PF_OP_GE: 2421 return (p >= a1); 2422 } 2423 return (0); /* never reached */ 2424 } 2425 2426 int 2427 pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p) 2428 { 2429 a1 = ntohs(a1); 2430 a2 = ntohs(a2); 2431 p = ntohs(p); 2432 return (pf_match(op, a1, a2, p)); 2433 } 2434 2435 int 2436 pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u) 2437 { 2438 if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE) 2439 return (0); 2440 return (pf_match(op, a1, a2, u)); 2441 } 2442 2443 int 2444 pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g) 2445 { 2446 if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE) 2447 return (0); 2448 return (pf_match(op, a1, a2, g)); 2449 } 2450 2451 int 2452 pf_match_tag(struct mbuf *m, struct pf_rule *r, int *tag) 2453 { 2454 if (*tag == -1) 2455 *tag = m->m_pkthdr.pf.tag; 2456 2457 return ((!r->match_tag_not && r->match_tag == *tag) || 2458 (r->match_tag_not && r->match_tag != *tag)); 2459 } 2460 2461 int 2462 pf_tag_packet(struct mbuf *m, int tag, int rtableid) 2463 { 2464 if (tag <= 0 && rtableid < 0) 2465 return (0); 2466 2467 if (tag > 0) 2468 m->m_pkthdr.pf.tag = tag; 2469 if (rtableid >= 0) 2470 m->m_pkthdr.pf.rtableid = rtableid; 2471 2472 return (0); 2473 } 2474 2475 void 2476 pf_step_into_anchor(int *depth, struct pf_ruleset **rs, int n, 2477 struct pf_rule **r, struct pf_rule **a, int *match) 2478 { 2479 struct pf_anchor_stackframe *f; 2480 2481 (*r)->anchor->match = 0; 2482 if (match) 2483 *match = 0; 2484 if (*depth >= NELEM(pf_anchor_stack)) { 2485 kprintf("pf_step_into_anchor: stack overflow\n"); 2486 *r = TAILQ_NEXT(*r, entries); 2487 return; 2488 } else if (*depth == 0 && a != NULL) 2489 *a = *r; 2490 f = pf_anchor_stack + (*depth)++; 2491 f->rs = *rs; 2492 f->r = *r; 2493 if ((*r)->anchor_wildcard) { 2494 f->parent = &(*r)->anchor->children; 2495 if ((f->child = RB_MIN(pf_anchor_node, f->parent)) == 2496 NULL) { 2497 *r = NULL; 2498 return; 2499 } 2500 *rs = &f->child->ruleset; 2501 } else { 2502 f->parent = NULL; 2503 f->child = NULL; 2504 *rs = &(*r)->anchor->ruleset; 2505 } 2506 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr); 2507 } 2508 2509 int 2510 pf_step_out_of_anchor(int *depth, struct pf_ruleset **rs, int n, 2511 struct pf_rule **r, struct pf_rule **a, int *match) 2512 { 2513 struct pf_anchor_stackframe *f; 2514 int quick = 0; 2515 2516 do { 2517 if (*depth <= 0) 2518 break; 2519 f = pf_anchor_stack + *depth - 1; 2520 if (f->parent != NULL && f->child != NULL) { 2521 if (f->child->match || 2522 (match != NULL && *match)) { 2523 f->r->anchor->match = 1; 2524 *match = 0; 2525 } 2526 f->child = RB_NEXT(pf_anchor_node, f->parent, f->child); 2527 if (f->child != NULL) { 2528 *rs = &f->child->ruleset; 2529 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr); 2530 if (*r == NULL) 2531 continue; 2532 else 2533 break; 2534 } 2535 } 2536 (*depth)--; 2537 if (*depth == 0 && a != NULL) 2538 *a = NULL; 2539 *rs = f->rs; 2540 if (f->r->anchor->match || (match != NULL && *match)) 2541 quick = f->r->quick; 2542 *r = TAILQ_NEXT(f->r, entries); 2543 } while (*r == NULL); 2544 2545 return (quick); 2546 } 2547 2548 #ifdef INET6 2549 void 2550 pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr, 2551 struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af) 2552 { 2553 switch (af) { 2554 #ifdef INET 2555 case AF_INET: 2556 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) | 2557 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]); 2558 break; 2559 #endif /* INET */ 2560 case AF_INET6: 2561 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) | 2562 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]); 2563 naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) | 2564 ((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]); 2565 naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) | 2566 ((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]); 2567 naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) | 2568 ((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]); 2569 break; 2570 } 2571 } 2572 2573 void 2574 pf_addr_inc(struct pf_addr *addr, sa_family_t af) 2575 { 2576 switch (af) { 2577 #ifdef INET 2578 case AF_INET: 2579 addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1); 2580 break; 2581 #endif /* INET */ 2582 case AF_INET6: 2583 if (addr->addr32[3] == 0xffffffff) { 2584 addr->addr32[3] = 0; 2585 if (addr->addr32[2] == 0xffffffff) { 2586 addr->addr32[2] = 0; 2587 if (addr->addr32[1] == 0xffffffff) { 2588 addr->addr32[1] = 0; 2589 addr->addr32[0] = 2590 htonl(ntohl(addr->addr32[0]) + 1); 2591 } else 2592 addr->addr32[1] = 2593 htonl(ntohl(addr->addr32[1]) + 1); 2594 } else 2595 addr->addr32[2] = 2596 htonl(ntohl(addr->addr32[2]) + 1); 2597 } else 2598 addr->addr32[3] = 2599 htonl(ntohl(addr->addr32[3]) + 1); 2600 break; 2601 } 2602 } 2603 #endif /* INET6 */ 2604 2605 #define mix(a,b,c) \ 2606 do { \ 2607 a -= b; a -= c; a ^= (c >> 13); \ 2608 b -= c; b -= a; b ^= (a << 8); \ 2609 c -= a; c -= b; c ^= (b >> 13); \ 2610 a -= b; a -= c; a ^= (c >> 12); \ 2611 b -= c; b -= a; b ^= (a << 16); \ 2612 c -= a; c -= b; c ^= (b >> 5); \ 2613 a -= b; a -= c; a ^= (c >> 3); \ 2614 b -= c; b -= a; b ^= (a << 10); \ 2615 c -= a; c -= b; c ^= (b >> 15); \ 2616 } while (0) 2617 2618 /* 2619 * hash function based on bridge_hash in if_bridge.c 2620 */ 2621 void 2622 pf_hash(struct pf_addr *inaddr, struct pf_addr *hash, 2623 struct pf_poolhashkey *key, sa_family_t af) 2624 { 2625 u_int32_t a = 0x9e3779b9, b = 0x9e3779b9, c = key->key32[0]; 2626 2627 switch (af) { 2628 #ifdef INET 2629 case AF_INET: 2630 a += inaddr->addr32[0]; 2631 b += key->key32[1]; 2632 mix(a, b, c); 2633 hash->addr32[0] = c + key->key32[2]; 2634 break; 2635 #endif /* INET */ 2636 #ifdef INET6 2637 case AF_INET6: 2638 a += inaddr->addr32[0]; 2639 b += inaddr->addr32[2]; 2640 mix(a, b, c); 2641 hash->addr32[0] = c; 2642 a += inaddr->addr32[1]; 2643 b += inaddr->addr32[3]; 2644 c += key->key32[1]; 2645 mix(a, b, c); 2646 hash->addr32[1] = c; 2647 a += inaddr->addr32[2]; 2648 b += inaddr->addr32[1]; 2649 c += key->key32[2]; 2650 mix(a, b, c); 2651 hash->addr32[2] = c; 2652 a += inaddr->addr32[3]; 2653 b += inaddr->addr32[0]; 2654 c += key->key32[3]; 2655 mix(a, b, c); 2656 hash->addr32[3] = c; 2657 break; 2658 #endif /* INET6 */ 2659 } 2660 } 2661 2662 int 2663 pf_map_addr(sa_family_t af, struct pf_rule *r, struct pf_addr *saddr, 2664 struct pf_addr *naddr, struct pf_addr *init_addr, struct pf_src_node **sn) 2665 { 2666 unsigned char hash[16]; 2667 struct pf_pool *rpool = &r->rpool; 2668 struct pf_pooladdr *acur = rpool->cur; 2669 struct pf_pooladdr *cur; 2670 struct pf_addr *raddr; 2671 struct pf_addr *rmask; 2672 struct pf_addr counter; 2673 struct pf_src_node k; 2674 int cpu = mycpu->gd_cpuid; 2675 int tblidx; 2676 2677 bzero(hash, sizeof(hash)); /* avoid gcc warnings */ 2678 2679 /* 2680 * NOTE! rpool->cur and rpool->tblidx can be iterators and thus 2681 * may represent a SMP race due to the shared nature of the 2682 * rpool structure. We allow the race and ensure that updates 2683 * do not create a fatal condition. 2684 */ 2685 cpu_ccfence(); 2686 cur = acur; 2687 raddr = &cur->addr.v.a.addr; 2688 rmask = &cur->addr.v.a.mask; 2689 2690 if (*sn == NULL && r->rpool.opts & PF_POOL_STICKYADDR && 2691 (r->rpool.opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) { 2692 k.af = af; 2693 PF_ACPY(&k.addr, saddr, af); 2694 if (r->rule_flag & PFRULE_RULESRCTRACK || 2695 r->rpool.opts & PF_POOL_STICKYADDR) 2696 k.rule.ptr = r; 2697 else 2698 k.rule.ptr = NULL; 2699 PF_INC_SCOUNTER(SCNT_SRC_NODE_SEARCH); 2700 *sn = RB_FIND(pf_src_tree, &tree_src_tracking[cpu], &k); 2701 if (*sn != NULL && !PF_AZERO(&(*sn)->raddr, af)) { 2702 PF_ACPY(naddr, &(*sn)->raddr, af); 2703 if (pf_status.debug >= PF_DEBUG_MISC) { 2704 kprintf("pf_map_addr: src tracking maps "); 2705 pf_print_host(&k.addr, 0, af); 2706 kprintf(" to "); 2707 pf_print_host(naddr, 0, af); 2708 kprintf("\n"); 2709 } 2710 return (0); 2711 } 2712 } 2713 2714 if (cur->addr.type == PF_ADDR_NOROUTE) 2715 return (1); 2716 if (cur->addr.type == PF_ADDR_DYNIFTL) { 2717 switch (af) { 2718 #ifdef INET 2719 case AF_INET: 2720 if (cur->addr.p.dyn->pfid_acnt4 < 1 && 2721 (rpool->opts & PF_POOL_TYPEMASK) != 2722 PF_POOL_ROUNDROBIN) 2723 return (1); 2724 raddr = &cur->addr.p.dyn->pfid_addr4; 2725 rmask = &cur->addr.p.dyn->pfid_mask4; 2726 break; 2727 #endif /* INET */ 2728 #ifdef INET6 2729 case AF_INET6: 2730 if (cur->addr.p.dyn->pfid_acnt6 < 1 && 2731 (rpool->opts & PF_POOL_TYPEMASK) != 2732 PF_POOL_ROUNDROBIN) 2733 return (1); 2734 raddr = &cur->addr.p.dyn->pfid_addr6; 2735 rmask = &cur->addr.p.dyn->pfid_mask6; 2736 break; 2737 #endif /* INET6 */ 2738 } 2739 } else if (cur->addr.type == PF_ADDR_TABLE) { 2740 if ((rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN) 2741 return (1); /* unsupported */ 2742 } else { 2743 raddr = &cur->addr.v.a.addr; 2744 rmask = &cur->addr.v.a.mask; 2745 } 2746 2747 switch (rpool->opts & PF_POOL_TYPEMASK) { 2748 case PF_POOL_NONE: 2749 PF_ACPY(naddr, raddr, af); 2750 break; 2751 case PF_POOL_BITMASK: 2752 PF_POOLMASK(naddr, raddr, rmask, saddr, af); 2753 break; 2754 case PF_POOL_RANDOM: 2755 if (init_addr != NULL && PF_AZERO(init_addr, af)) { 2756 switch (af) { 2757 #ifdef INET 2758 case AF_INET: 2759 counter.addr32[0] = htonl(karc4random()); 2760 break; 2761 #endif /* INET */ 2762 #ifdef INET6 2763 case AF_INET6: 2764 if (rmask->addr32[3] != 0xffffffff) 2765 counter.addr32[3] = 2766 htonl(karc4random()); 2767 else 2768 break; 2769 if (rmask->addr32[2] != 0xffffffff) 2770 counter.addr32[2] = 2771 htonl(karc4random()); 2772 else 2773 break; 2774 if (rmask->addr32[1] != 0xffffffff) 2775 counter.addr32[1] = 2776 htonl(karc4random()); 2777 else 2778 break; 2779 if (rmask->addr32[0] != 0xffffffff) 2780 counter.addr32[0] = 2781 htonl(karc4random()); 2782 break; 2783 #endif /* INET6 */ 2784 } 2785 PF_POOLMASK(naddr, raddr, rmask, &counter, af); 2786 PF_ACPY(init_addr, naddr, af); 2787 2788 } else { 2789 counter = rpool->counter; 2790 cpu_ccfence(); 2791 PF_AINC(&counter, af); 2792 PF_POOLMASK(naddr, raddr, rmask, &counter, af); 2793 rpool->counter = counter; 2794 } 2795 break; 2796 case PF_POOL_SRCHASH: 2797 pf_hash(saddr, (struct pf_addr *)&hash, &rpool->key, af); 2798 PF_POOLMASK(naddr, raddr, rmask, (struct pf_addr *)&hash, af); 2799 break; 2800 case PF_POOL_ROUNDROBIN: 2801 tblidx = rpool->tblidx; 2802 counter = rpool->counter; 2803 if (cur->addr.type == PF_ADDR_TABLE) { 2804 if (!pfr_pool_get(cur->addr.p.tbl, 2805 &tblidx, &counter, 2806 &raddr, &rmask, af)) { 2807 goto get_addr; 2808 } 2809 } else if (cur->addr.type == PF_ADDR_DYNIFTL) { 2810 if (!pfr_pool_get(cur->addr.p.dyn->pfid_kt, 2811 &tblidx, &counter, 2812 &raddr, &rmask, af)) { 2813 goto get_addr; 2814 } 2815 } else if (pf_match_addr(0, raddr, rmask, 2816 &counter, af)) { 2817 goto get_addr; 2818 } 2819 2820 try_next: 2821 if ((cur = TAILQ_NEXT(cur, entries)) == NULL) 2822 cur = TAILQ_FIRST(&rpool->list); 2823 if (cur->addr.type == PF_ADDR_TABLE) { 2824 tblidx = -1; 2825 if (pfr_pool_get(cur->addr.p.tbl, 2826 &tblidx, &counter, 2827 &raddr, &rmask, af)) { 2828 /* table contains no address of type 'af' */ 2829 if (cur != acur) 2830 goto try_next; 2831 return (1); 2832 } 2833 } else if (cur->addr.type == PF_ADDR_DYNIFTL) { 2834 tblidx = -1; 2835 if (pfr_pool_get(cur->addr.p.dyn->pfid_kt, 2836 &tblidx, &counter, 2837 &raddr, &rmask, af)) { 2838 /* table contains no address of type 'af' */ 2839 if (cur != acur) 2840 goto try_next; 2841 return (1); 2842 } 2843 } else { 2844 raddr = &cur->addr.v.a.addr; 2845 rmask = &cur->addr.v.a.mask; 2846 PF_ACPY(&counter, raddr, af); 2847 } 2848 2849 get_addr: 2850 rpool->cur = cur; 2851 rpool->tblidx = tblidx; 2852 PF_ACPY(naddr, &counter, af); 2853 if (init_addr != NULL && PF_AZERO(init_addr, af)) 2854 PF_ACPY(init_addr, naddr, af); 2855 PF_AINC(&counter, af); 2856 rpool->counter = counter; 2857 break; 2858 } 2859 if (*sn != NULL) 2860 PF_ACPY(&(*sn)->raddr, naddr, af); 2861 2862 if (pf_status.debug >= PF_DEBUG_MISC && 2863 (rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) { 2864 kprintf("pf_map_addr: selected address "); 2865 pf_print_host(naddr, 0, af); 2866 kprintf("\n"); 2867 } 2868 2869 return (0); 2870 } 2871 2872 int 2873 pf_get_sport(struct pf_pdesc *pd, sa_family_t af, 2874 u_int8_t proto, struct pf_rule *r, 2875 struct pf_addr *saddr, struct pf_addr *daddr, 2876 u_int16_t sport, u_int16_t dport, 2877 struct pf_addr *naddr, u_int16_t *nport, 2878 u_int16_t low, u_int16_t high, struct pf_src_node **sn) 2879 { 2880 struct pf_state_key_cmp key; 2881 struct pf_addr init_addr; 2882 u_int16_t cut; 2883 u_int32_t hash_base = 0; 2884 int do_hash = 0; 2885 2886 bzero(&init_addr, sizeof(init_addr)); 2887 if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn)) 2888 return (1); 2889 2890 if (proto == IPPROTO_ICMP) { 2891 low = 1; 2892 high = 65535; 2893 } 2894 2895 bzero(&key, sizeof(key)); 2896 key.af = af; 2897 key.proto = proto; 2898 key.port[0] = dport; 2899 PF_ACPY(&key.addr[0], daddr, key.af); 2900 2901 do { 2902 PF_ACPY(&key.addr[1], naddr, key.af); 2903 2904 /* 2905 * We want to select a port that calculates to a toeplitz hash 2906 * that masks to the same cpu, otherwise the response may 2907 * not see the new state. 2908 * 2909 * We can still do this even if the kernel is disregarding 2910 * the hash and vectoring the packets to a specific cpu, 2911 * but it will reduce the number of ports we can use. 2912 */ 2913 switch(af) { 2914 case AF_INET: 2915 if (proto == IPPROTO_TCP) { 2916 do_hash = 1; 2917 hash_base = toeplitz_piecemeal_port(dport) ^ 2918 toeplitz_piecemeal_addr(daddr->v4.s_addr) ^ 2919 toeplitz_piecemeal_addr(naddr->v4.s_addr); 2920 } 2921 break; 2922 case AF_INET6: 2923 /* XXX TODO XXX */ 2924 default: 2925 /* XXX TODO XXX */ 2926 break; 2927 } 2928 2929 /* 2930 * port search; start random, step; 2931 * similar 2 portloop in in_pcbbind 2932 * 2933 * WARNING! We try to match such that the kernel will 2934 * dispatch the translated host/port to the same 2935 * cpu, but this might not be possible. 2936 * 2937 * In the case where the port is fixed, or for the 2938 * UDP case (whos toeplitz does not incorporate the 2939 * port), we set not_cpu_localized which ultimately 2940 * causes the pf_state_tree element 2941 * 2942 * XXX fixed ports present a problem for cpu localization. 2943 */ 2944 if (!(proto == IPPROTO_TCP || 2945 proto == IPPROTO_UDP || 2946 proto == IPPROTO_ICMP)) { 2947 /* 2948 * non-specific protocol, leave port intact. 2949 */ 2950 key.port[1] = sport; 2951 if (pf_find_state_all(&key, PF_IN, NULL) == NULL) { 2952 *nport = sport; 2953 pd->not_cpu_localized = 1; 2954 return (0); 2955 } 2956 } else if (low == 0 && high == 0) { 2957 /* 2958 * static-port same as originator. 2959 */ 2960 key.port[1] = sport; 2961 if (pf_find_state_all(&key, PF_IN, NULL) == NULL) { 2962 *nport = sport; 2963 pd->not_cpu_localized = 1; 2964 return (0); 2965 } 2966 } else if (low == high) { 2967 /* 2968 * specific port as specified. 2969 */ 2970 key.port[1] = htons(low); 2971 if (pf_find_state_all(&key, PF_IN, NULL) == NULL) { 2972 *nport = htons(low); 2973 pd->not_cpu_localized = 1; 2974 return (0); 2975 } 2976 } else { 2977 /* 2978 * normal dynamic port 2979 */ 2980 u_int16_t tmp; 2981 2982 if (low > high) { 2983 tmp = low; 2984 low = high; 2985 high = tmp; 2986 } 2987 /* low < high */ 2988 cut = htonl(karc4random()) % (1 + high - low) + low; 2989 /* low <= cut <= high */ 2990 for (tmp = cut; tmp <= high; ++(tmp)) { 2991 key.port[1] = htons(tmp); 2992 if (do_hash) { 2993 uint32_t hash; 2994 2995 hash = hash_base ^ 2996 toeplitz_piecemeal_port(key.port[1]); 2997 if (netisr_hashcpu(hash) != mycpuid) 2998 continue; 2999 } 3000 if (pf_find_state_all(&key, PF_IN, NULL) == 3001 NULL && !in_baddynamic(tmp, proto)) { 3002 if (proto == IPPROTO_UDP) 3003 pd->not_cpu_localized = 1; 3004 *nport = htons(tmp); 3005 return (0); 3006 } 3007 } 3008 for (tmp = cut - 1; tmp >= low; --(tmp)) { 3009 key.port[1] = htons(tmp); 3010 if (do_hash) { 3011 uint32_t hash; 3012 3013 hash = hash_base ^ 3014 toeplitz_piecemeal_port(key.port[1]); 3015 if (netisr_hashcpu(hash) != mycpuid) 3016 continue; 3017 } 3018 if (pf_find_state_all(&key, PF_IN, NULL) == 3019 NULL && !in_baddynamic(tmp, proto)) { 3020 if (proto == IPPROTO_UDP) 3021 pd->not_cpu_localized = 1; 3022 *nport = htons(tmp); 3023 return (0); 3024 } 3025 } 3026 } 3027 3028 /* 3029 * Next address 3030 */ 3031 switch (r->rpool.opts & PF_POOL_TYPEMASK) { 3032 case PF_POOL_RANDOM: 3033 case PF_POOL_ROUNDROBIN: 3034 if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn)) 3035 return (1); 3036 break; 3037 case PF_POOL_NONE: 3038 case PF_POOL_SRCHASH: 3039 case PF_POOL_BITMASK: 3040 default: 3041 return (1); 3042 } 3043 } while (! PF_AEQ(&init_addr, naddr, af) ); 3044 return (1); /* none available */ 3045 } 3046 3047 struct pf_rule * 3048 pf_match_translation(struct pf_pdesc *pd, struct mbuf *m, int off, 3049 int direction, struct pfi_kif *kif, struct pf_addr *saddr, u_int16_t sport, 3050 struct pf_addr *daddr, u_int16_t dport, int rs_num) 3051 { 3052 struct pf_rule *r, *rm = NULL; 3053 struct pf_ruleset *ruleset = NULL; 3054 int tag = -1; 3055 int rtableid = -1; 3056 int asd = 0; 3057 3058 r = TAILQ_FIRST(pf_main_ruleset.rules[rs_num].active.ptr); 3059 while (r && rm == NULL) { 3060 struct pf_rule_addr *src = NULL, *dst = NULL; 3061 struct pf_addr_wrap *xdst = NULL; 3062 struct pf_pooladdr *cur; 3063 3064 if (r->action == PF_BINAT && direction == PF_IN) { 3065 src = &r->dst; 3066 cur = r->rpool.cur; /* SMP race possible */ 3067 cpu_ccfence(); 3068 if (cur) 3069 xdst = &cur->addr; 3070 } else { 3071 src = &r->src; 3072 dst = &r->dst; 3073 } 3074 3075 r->evaluations++; 3076 if (pfi_kif_match(r->kif, kif) == r->ifnot) 3077 r = r->skip[PF_SKIP_IFP].ptr; 3078 else if (r->direction && r->direction != direction) 3079 r = r->skip[PF_SKIP_DIR].ptr; 3080 else if (r->af && r->af != pd->af) 3081 r = r->skip[PF_SKIP_AF].ptr; 3082 else if (r->proto && r->proto != pd->proto) 3083 r = r->skip[PF_SKIP_PROTO].ptr; 3084 else if (PF_MISMATCHAW(&src->addr, saddr, pd->af, 3085 src->neg, kif)) 3086 r = r->skip[src == &r->src ? PF_SKIP_SRC_ADDR : 3087 PF_SKIP_DST_ADDR].ptr; 3088 else if (src->port_op && !pf_match_port(src->port_op, 3089 src->port[0], src->port[1], sport)) 3090 r = r->skip[src == &r->src ? PF_SKIP_SRC_PORT : 3091 PF_SKIP_DST_PORT].ptr; 3092 else if (dst != NULL && 3093 PF_MISMATCHAW(&dst->addr, daddr, pd->af, dst->neg, NULL)) 3094 r = r->skip[PF_SKIP_DST_ADDR].ptr; 3095 else if (xdst != NULL && PF_MISMATCHAW(xdst, daddr, pd->af, 3096 0, NULL)) 3097 r = TAILQ_NEXT(r, entries); 3098 else if (dst != NULL && dst->port_op && 3099 !pf_match_port(dst->port_op, dst->port[0], 3100 dst->port[1], dport)) 3101 r = r->skip[PF_SKIP_DST_PORT].ptr; 3102 else if (r->match_tag && !pf_match_tag(m, r, &tag)) 3103 r = TAILQ_NEXT(r, entries); 3104 else if (r->os_fingerprint != PF_OSFP_ANY && (pd->proto != 3105 IPPROTO_TCP || !pf_osfp_match(pf_osfp_fingerprint(pd, m, 3106 off, pd->hdr.tcp), r->os_fingerprint))) 3107 r = TAILQ_NEXT(r, entries); 3108 else { 3109 if (r->tag) 3110 tag = r->tag; 3111 if (r->rtableid >= 0) 3112 rtableid = r->rtableid; 3113 if (r->anchor == NULL) { 3114 rm = r; 3115 } else 3116 pf_step_into_anchor(&asd, &ruleset, rs_num, 3117 &r, NULL, NULL); 3118 } 3119 if (r == NULL) 3120 pf_step_out_of_anchor(&asd, &ruleset, rs_num, &r, 3121 NULL, NULL); 3122 } 3123 if (pf_tag_packet(m, tag, rtableid)) 3124 return (NULL); 3125 if (rm != NULL && (rm->action == PF_NONAT || 3126 rm->action == PF_NORDR || rm->action == PF_NOBINAT)) 3127 return (NULL); 3128 return (rm); 3129 } 3130 3131 struct pf_rule * 3132 pf_get_translation(struct pf_pdesc *pd, struct mbuf *m, int off, int direction, 3133 struct pfi_kif *kif, struct pf_src_node **sn, 3134 struct pf_state_key **skw, struct pf_state_key **sks, 3135 struct pf_state_key **skp, struct pf_state_key **nkp, 3136 struct pf_addr *saddr, struct pf_addr *daddr, 3137 u_int16_t sport, u_int16_t dport) 3138 { 3139 struct pf_rule *r = NULL; 3140 3141 if (direction == PF_OUT) { 3142 r = pf_match_translation(pd, m, off, direction, kif, saddr, 3143 sport, daddr, dport, PF_RULESET_BINAT); 3144 if (r == NULL) 3145 r = pf_match_translation(pd, m, off, direction, kif, 3146 saddr, sport, daddr, dport, PF_RULESET_NAT); 3147 } else { 3148 r = pf_match_translation(pd, m, off, direction, kif, saddr, 3149 sport, daddr, dport, PF_RULESET_RDR); 3150 if (r == NULL) 3151 r = pf_match_translation(pd, m, off, direction, kif, 3152 saddr, sport, daddr, dport, PF_RULESET_BINAT); 3153 } 3154 3155 if (r != NULL) { 3156 struct pf_addr *naddr; 3157 u_int16_t *nport; 3158 3159 if (pf_state_key_setup(pd, r, skw, sks, skp, nkp, 3160 saddr, daddr, sport, dport)) 3161 return r; 3162 3163 /* XXX We only modify one side for now. */ 3164 naddr = &(*nkp)->addr[1]; 3165 nport = &(*nkp)->port[1]; 3166 3167 /* 3168 * NOTE: Currently all translations will clear 3169 * BRIDGE_MBUF_TAGGED, telling the bridge to 3170 * ignore the original input encapsulation. 3171 */ 3172 switch (r->action) { 3173 case PF_NONAT: 3174 case PF_NOBINAT: 3175 case PF_NORDR: 3176 return (NULL); 3177 case PF_NAT: 3178 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED; 3179 if (pf_get_sport(pd, pd->af, pd->proto, r, 3180 saddr, daddr, sport, dport, 3181 naddr, nport, r->rpool.proxy_port[0], 3182 r->rpool.proxy_port[1], sn)) { 3183 DPFPRINTF(PF_DEBUG_MISC, 3184 ("pf: NAT proxy port allocation " 3185 "(%u-%u) failed\n", 3186 r->rpool.proxy_port[0], 3187 r->rpool.proxy_port[1])); 3188 return (NULL); 3189 } 3190 break; 3191 case PF_BINAT: 3192 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED; 3193 switch (direction) { 3194 case PF_OUT: 3195 if (r->rpool.cur->addr.type == PF_ADDR_DYNIFTL){ 3196 switch (pd->af) { 3197 #ifdef INET 3198 case AF_INET: 3199 if (r->rpool.cur->addr.p.dyn-> 3200 pfid_acnt4 < 1) 3201 return (NULL); 3202 PF_POOLMASK(naddr, 3203 &r->rpool.cur->addr.p.dyn-> 3204 pfid_addr4, 3205 &r->rpool.cur->addr.p.dyn-> 3206 pfid_mask4, 3207 saddr, AF_INET); 3208 break; 3209 #endif /* INET */ 3210 #ifdef INET6 3211 case AF_INET6: 3212 if (r->rpool.cur->addr.p.dyn-> 3213 pfid_acnt6 < 1) 3214 return (NULL); 3215 PF_POOLMASK(naddr, 3216 &r->rpool.cur->addr.p.dyn-> 3217 pfid_addr6, 3218 &r->rpool.cur->addr.p.dyn-> 3219 pfid_mask6, 3220 saddr, AF_INET6); 3221 break; 3222 #endif /* INET6 */ 3223 } 3224 } else 3225 PF_POOLMASK(naddr, 3226 &r->rpool.cur->addr.v.a.addr, 3227 &r->rpool.cur->addr.v.a.mask, 3228 saddr, pd->af); 3229 break; 3230 case PF_IN: 3231 if (r->src.addr.type == PF_ADDR_DYNIFTL) { 3232 switch (pd->af) { 3233 #ifdef INET 3234 case AF_INET: 3235 if (r->src.addr.p.dyn-> 3236 pfid_acnt4 < 1) 3237 return (NULL); 3238 PF_POOLMASK(naddr, 3239 &r->src.addr.p.dyn-> 3240 pfid_addr4, 3241 &r->src.addr.p.dyn-> 3242 pfid_mask4, 3243 daddr, AF_INET); 3244 break; 3245 #endif /* INET */ 3246 #ifdef INET6 3247 case AF_INET6: 3248 if (r->src.addr.p.dyn-> 3249 pfid_acnt6 < 1) 3250 return (NULL); 3251 PF_POOLMASK(naddr, 3252 &r->src.addr.p.dyn-> 3253 pfid_addr6, 3254 &r->src.addr.p.dyn-> 3255 pfid_mask6, 3256 daddr, AF_INET6); 3257 break; 3258 #endif /* INET6 */ 3259 } 3260 } else 3261 PF_POOLMASK(naddr, 3262 &r->src.addr.v.a.addr, 3263 &r->src.addr.v.a.mask, daddr, 3264 pd->af); 3265 break; 3266 } 3267 break; 3268 case PF_RDR: { 3269 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED; 3270 if (pf_map_addr(pd->af, r, saddr, naddr, NULL, sn)) 3271 return (NULL); 3272 if ((r->rpool.opts & PF_POOL_TYPEMASK) == 3273 PF_POOL_BITMASK) 3274 PF_POOLMASK(naddr, naddr, 3275 &r->rpool.cur->addr.v.a.mask, daddr, 3276 pd->af); 3277 3278 if (r->rpool.proxy_port[1]) { 3279 u_int32_t tmp_nport; 3280 3281 tmp_nport = ((ntohs(dport) - 3282 ntohs(r->dst.port[0])) % 3283 (r->rpool.proxy_port[1] - 3284 r->rpool.proxy_port[0] + 1)) + 3285 r->rpool.proxy_port[0]; 3286 3287 /* wrap around if necessary */ 3288 if (tmp_nport > 65535) 3289 tmp_nport -= 65535; 3290 *nport = htons((u_int16_t)tmp_nport); 3291 } else if (r->rpool.proxy_port[0]) { 3292 *nport = htons(r->rpool.proxy_port[0]); 3293 } 3294 pd->not_cpu_localized = 1; 3295 break; 3296 } 3297 default: 3298 return (NULL); 3299 } 3300 } 3301 3302 return (r); 3303 } 3304 3305 struct netmsg_hashlookup { 3306 struct netmsg_base base; 3307 struct inpcb **nm_pinp; 3308 struct inpcbinfo *nm_pcbinfo; 3309 struct pf_addr *nm_saddr; 3310 struct pf_addr *nm_daddr; 3311 uint16_t nm_sport; 3312 uint16_t nm_dport; 3313 sa_family_t nm_af; 3314 }; 3315 3316 #ifdef PF_SOCKET_LOOKUP_DOMSG 3317 static void 3318 in_pcblookup_hash_handler(netmsg_t msg) 3319 { 3320 struct netmsg_hashlookup *rmsg = (struct netmsg_hashlookup *)msg; 3321 3322 if (rmsg->nm_af == AF_INET) 3323 *rmsg->nm_pinp = in_pcblookup_hash(rmsg->nm_pcbinfo, 3324 rmsg->nm_saddr->v4, rmsg->nm_sport, rmsg->nm_daddr->v4, 3325 rmsg->nm_dport, INPLOOKUP_WILDCARD, NULL); 3326 #ifdef INET6 3327 else 3328 *rmsg->nm_pinp = in6_pcblookup_hash(rmsg->nm_pcbinfo, 3329 &rmsg->nm_saddr->v6, rmsg->nm_sport, &rmsg->nm_daddr->v6, 3330 rmsg->nm_dport, INPLOOKUP_WILDCARD, NULL); 3331 #endif /* INET6 */ 3332 lwkt_replymsg(&rmsg->base.lmsg, 0); 3333 } 3334 #endif /* PF_SOCKET_LOOKUP_DOMSG */ 3335 3336 int 3337 pf_socket_lookup(int direction, struct pf_pdesc *pd) 3338 { 3339 struct pf_addr *saddr, *daddr; 3340 u_int16_t sport, dport; 3341 struct inpcbinfo *pi; 3342 struct inpcb *inp; 3343 struct netmsg_hashlookup *msg = NULL; 3344 #ifdef PF_SOCKET_LOOKUP_DOMSG 3345 struct netmsg_hashlookup msg0; 3346 #endif 3347 int pi_cpu = 0; 3348 3349 if (pd == NULL) 3350 return (-1); 3351 pd->lookup.uid = UID_MAX; 3352 pd->lookup.gid = GID_MAX; 3353 pd->lookup.pid = NO_PID; 3354 if (direction == PF_IN) { 3355 saddr = pd->src; 3356 daddr = pd->dst; 3357 } else { 3358 saddr = pd->dst; 3359 daddr = pd->src; 3360 } 3361 switch (pd->proto) { 3362 case IPPROTO_TCP: 3363 if (pd->hdr.tcp == NULL) 3364 return (-1); 3365 sport = pd->hdr.tcp->th_sport; 3366 dport = pd->hdr.tcp->th_dport; 3367 3368 pi_cpu = tcp_addrcpu(saddr->v4.s_addr, sport, daddr->v4.s_addr, dport); 3369 pi = &tcbinfo[pi_cpu]; 3370 /* 3371 * Our netstack runs lockless on MP systems 3372 * (only for TCP connections at the moment). 3373 * 3374 * As we are not allowed to read another CPU's tcbinfo, 3375 * we have to ask that CPU via remote call to search the 3376 * table for us. 3377 * 3378 * Prepare a msg iff data belongs to another CPU. 3379 */ 3380 if (pi_cpu != mycpu->gd_cpuid) { 3381 #ifdef PF_SOCKET_LOOKUP_DOMSG 3382 /* 3383 * NOTE: 3384 * 3385 * Following lwkt_domsg() is dangerous and could 3386 * lockup the network system, e.g. 3387 * 3388 * On 2 CPU system: 3389 * netisr0 domsg to netisr1 (due to lookup) 3390 * netisr1 domsg to netisr0 (due to lookup) 3391 * 3392 * We simply return -1 here, since we are probably 3393 * called before NAT, so the TCP packet should 3394 * already be on the correct CPU. 3395 */ 3396 msg = &msg0; 3397 netmsg_init(&msg->base, NULL, &curthread->td_msgport, 3398 0, in_pcblookup_hash_handler); 3399 msg->nm_pinp = &inp; 3400 msg->nm_pcbinfo = pi; 3401 msg->nm_saddr = saddr; 3402 msg->nm_sport = sport; 3403 msg->nm_daddr = daddr; 3404 msg->nm_dport = dport; 3405 msg->nm_af = pd->af; 3406 #else /* !PF_SOCKET_LOOKUP_DOMSG */ 3407 kprintf("pf_socket_lookup: tcp packet not on the " 3408 "correct cpu %d, cur cpu %d\n", 3409 pi_cpu, mycpuid); 3410 print_backtrace(-1); 3411 return -1; 3412 #endif /* PF_SOCKET_LOOKUP_DOMSG */ 3413 } 3414 break; 3415 case IPPROTO_UDP: 3416 if (pd->hdr.udp == NULL) 3417 return (-1); 3418 sport = pd->hdr.udp->uh_sport; 3419 dport = pd->hdr.udp->uh_dport; 3420 pi = &udbinfo[mycpuid]; 3421 break; 3422 default: 3423 return (-1); 3424 } 3425 if (direction != PF_IN) { 3426 u_int16_t p; 3427 3428 p = sport; 3429 sport = dport; 3430 dport = p; 3431 } 3432 switch (pd->af) { 3433 #ifdef INET6 3434 case AF_INET6: 3435 /* 3436 * Query other CPU, second part 3437 * 3438 * msg only gets initialized when: 3439 * 1) packet is TCP 3440 * 2) the info belongs to another CPU 3441 * 3442 * Use some switch/case magic to avoid code duplication. 3443 */ 3444 if (msg == NULL) { 3445 inp = in6_pcblookup_hash(pi, &saddr->v6, sport, 3446 &daddr->v6, dport, INPLOOKUP_WILDCARD, NULL); 3447 3448 if (inp == NULL) 3449 return (-1); 3450 break; 3451 } 3452 /* FALLTHROUGH if SMP and on other CPU */ 3453 #endif /* INET6 */ 3454 case AF_INET: 3455 if (msg != NULL) { 3456 lwkt_domsg(netisr_cpuport(pi_cpu), 3457 &msg->base.lmsg, 0); 3458 } else 3459 { 3460 inp = in_pcblookup_hash(pi, saddr->v4, sport, daddr->v4, 3461 dport, INPLOOKUP_WILDCARD, NULL); 3462 } 3463 if (inp == NULL) 3464 return (-1); 3465 break; 3466 3467 default: 3468 return (-1); 3469 } 3470 pd->lookup.uid = inp->inp_socket->so_cred->cr_uid; 3471 pd->lookup.gid = inp->inp_socket->so_cred->cr_groups[0]; 3472 return (1); 3473 } 3474 3475 u_int8_t 3476 pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af) 3477 { 3478 int hlen; 3479 u_int8_t hdr[60]; 3480 u_int8_t *opt, optlen; 3481 u_int8_t wscale = 0; 3482 3483 hlen = th_off << 2; /* hlen <= sizeof(hdr) */ 3484 if (hlen <= sizeof(struct tcphdr)) 3485 return (0); 3486 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af)) 3487 return (0); 3488 opt = hdr + sizeof(struct tcphdr); 3489 hlen -= sizeof(struct tcphdr); 3490 while (hlen >= 3) { 3491 switch (*opt) { 3492 case TCPOPT_EOL: 3493 case TCPOPT_NOP: 3494 ++opt; 3495 --hlen; 3496 break; 3497 case TCPOPT_WINDOW: 3498 wscale = opt[2]; 3499 if (wscale > TCP_MAX_WINSHIFT) 3500 wscale = TCP_MAX_WINSHIFT; 3501 wscale |= PF_WSCALE_FLAG; 3502 /* FALLTHROUGH */ 3503 default: 3504 optlen = opt[1]; 3505 if (optlen < 2) 3506 optlen = 2; 3507 hlen -= optlen; 3508 opt += optlen; 3509 break; 3510 } 3511 } 3512 return (wscale); 3513 } 3514 3515 u_int16_t 3516 pf_get_mss(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af) 3517 { 3518 int hlen; 3519 u_int8_t hdr[60]; 3520 u_int8_t *opt, optlen; 3521 u_int16_t mss = tcp_mssdflt; 3522 3523 hlen = th_off << 2; /* hlen <= sizeof(hdr) */ 3524 if (hlen <= sizeof(struct tcphdr)) 3525 return (0); 3526 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af)) 3527 return (0); 3528 opt = hdr + sizeof(struct tcphdr); 3529 hlen -= sizeof(struct tcphdr); 3530 while (hlen >= TCPOLEN_MAXSEG) { 3531 switch (*opt) { 3532 case TCPOPT_EOL: 3533 case TCPOPT_NOP: 3534 ++opt; 3535 --hlen; 3536 break; 3537 case TCPOPT_MAXSEG: 3538 bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2); 3539 /* FALLTHROUGH */ 3540 default: 3541 optlen = opt[1]; 3542 if (optlen < 2) 3543 optlen = 2; 3544 hlen -= optlen; 3545 opt += optlen; 3546 break; 3547 } 3548 } 3549 return (mss); 3550 } 3551 3552 u_int16_t 3553 pf_calc_mss(struct pf_addr *addr, sa_family_t af, u_int16_t offer) 3554 { 3555 #ifdef INET 3556 struct sockaddr_in *dst; 3557 struct route ro; 3558 #endif /* INET */ 3559 #ifdef INET6 3560 struct sockaddr_in6 *dst6; 3561 struct route_in6 ro6; 3562 #endif /* INET6 */ 3563 struct rtentry *rt = NULL; 3564 int hlen = 0; 3565 u_int16_t mss = tcp_mssdflt; 3566 3567 switch (af) { 3568 #ifdef INET 3569 case AF_INET: 3570 hlen = sizeof(struct ip); 3571 bzero(&ro, sizeof(ro)); 3572 dst = (struct sockaddr_in *)&ro.ro_dst; 3573 dst->sin_family = AF_INET; 3574 dst->sin_len = sizeof(*dst); 3575 dst->sin_addr = addr->v4; 3576 rtalloc_ign(&ro, (RTF_CLONING | RTF_PRCLONING)); 3577 rt = ro.ro_rt; 3578 break; 3579 #endif /* INET */ 3580 #ifdef INET6 3581 case AF_INET6: 3582 hlen = sizeof(struct ip6_hdr); 3583 bzero(&ro6, sizeof(ro6)); 3584 dst6 = (struct sockaddr_in6 *)&ro6.ro_dst; 3585 dst6->sin6_family = AF_INET6; 3586 dst6->sin6_len = sizeof(*dst6); 3587 dst6->sin6_addr = addr->v6; 3588 rtalloc_ign((struct route *)&ro6, (RTF_CLONING | RTF_PRCLONING)); 3589 rt = ro6.ro_rt; 3590 break; 3591 #endif /* INET6 */ 3592 } 3593 3594 if (rt && rt->rt_ifp) { 3595 mss = rt->rt_ifp->if_mtu - hlen - sizeof(struct tcphdr); 3596 mss = max(tcp_mssdflt, mss); 3597 RTFREE(rt); 3598 } 3599 mss = min(mss, offer); 3600 mss = max(mss, 64); /* sanity - at least max opt space */ 3601 return (mss); 3602 } 3603 3604 void 3605 pf_set_rt_ifp(struct pf_state *s, struct pf_addr *saddr) 3606 { 3607 struct pf_rule *r = s->rule.ptr; 3608 3609 s->rt_kif = NULL; 3610 if (!r->rt || r->rt == PF_FASTROUTE) 3611 return; 3612 switch (s->key[PF_SK_WIRE]->af) { 3613 #ifdef INET 3614 case AF_INET: 3615 pf_map_addr(AF_INET, r, saddr, &s->rt_addr, NULL, 3616 &s->nat_src_node); 3617 s->rt_kif = r->rpool.cur->kif; 3618 break; 3619 #endif /* INET */ 3620 #ifdef INET6 3621 case AF_INET6: 3622 pf_map_addr(AF_INET6, r, saddr, &s->rt_addr, NULL, 3623 &s->nat_src_node); 3624 s->rt_kif = r->rpool.cur->kif; 3625 break; 3626 #endif /* INET6 */ 3627 } 3628 } 3629 3630 u_int32_t 3631 pf_tcp_iss(struct pf_pdesc *pd) 3632 { 3633 MD5_CTX ctx; 3634 u_int32_t digest[4]; 3635 3636 if (pf_tcp_secret_init == 0) { 3637 lwkt_gettoken(&pf_gtoken); 3638 if (pf_tcp_secret_init == 0) { 3639 karc4rand(pf_tcp_secret, sizeof(pf_tcp_secret)); 3640 MD5Init(&pf_tcp_secret_ctx); 3641 MD5Update(&pf_tcp_secret_ctx, pf_tcp_secret, 3642 sizeof(pf_tcp_secret)); 3643 pf_tcp_secret_init = 1; 3644 } 3645 lwkt_reltoken(&pf_gtoken); 3646 } 3647 ctx = pf_tcp_secret_ctx; 3648 3649 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_sport, sizeof(u_short)); 3650 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_dport, sizeof(u_short)); 3651 if (pd->af == AF_INET6) { 3652 MD5Update(&ctx, (char *)&pd->src->v6, sizeof(struct in6_addr)); 3653 MD5Update(&ctx, (char *)&pd->dst->v6, sizeof(struct in6_addr)); 3654 } else { 3655 MD5Update(&ctx, (char *)&pd->src->v4, sizeof(struct in_addr)); 3656 MD5Update(&ctx, (char *)&pd->dst->v4, sizeof(struct in_addr)); 3657 } 3658 MD5Final((u_char *)digest, &ctx); 3659 pf_tcp_iss_off += 4096; 3660 3661 return (digest[0] + pd->hdr.tcp->th_seq + pf_tcp_iss_off); 3662 } 3663 3664 int 3665 pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, 3666 struct pfi_kif *kif, struct mbuf *m, int off, void *h, 3667 struct pf_pdesc *pd, struct pf_rule **am, struct pf_ruleset **rsm, 3668 struct ifqueue *ifq, struct inpcb *inp) 3669 { 3670 struct pf_rule *nr = NULL; 3671 struct pf_addr *saddr = pd->src, *daddr = pd->dst; 3672 sa_family_t af = pd->af; 3673 struct pf_rule *r, *a = NULL; 3674 struct pf_ruleset *ruleset = NULL; 3675 struct pf_src_node *nsn = NULL; 3676 struct tcphdr *th = pd->hdr.tcp; 3677 struct pf_state_key *skw = NULL, *sks = NULL; 3678 struct pf_state_key *sk = NULL, *nk = NULL; 3679 u_short reason; 3680 int rewrite = 0, hdrlen = 0; 3681 int tag = -1, rtableid = -1; 3682 int asd = 0; 3683 int match = 0; 3684 int state_icmp = 0; 3685 u_int16_t sport = 0, dport = 0; 3686 u_int16_t bproto_sum = 0, bip_sum = 0; 3687 u_int8_t icmptype = 0, icmpcode = 0; 3688 3689 3690 if (direction == PF_IN && pf_check_congestion(ifq)) { 3691 REASON_SET(&reason, PFRES_CONGEST); 3692 return (PF_DROP); 3693 } 3694 3695 if (inp != NULL) 3696 pd->lookup.done = pf_socket_lookup(direction, pd); 3697 else if (debug_pfugidhack) { 3698 DPFPRINTF(PF_DEBUG_MISC, ("pf: unlocked lookup\n")); 3699 pd->lookup.done = pf_socket_lookup(direction, pd); 3700 } 3701 3702 switch (pd->proto) { 3703 case IPPROTO_TCP: 3704 sport = th->th_sport; 3705 dport = th->th_dport; 3706 hdrlen = sizeof(*th); 3707 break; 3708 case IPPROTO_UDP: 3709 sport = pd->hdr.udp->uh_sport; 3710 dport = pd->hdr.udp->uh_dport; 3711 hdrlen = sizeof(*pd->hdr.udp); 3712 break; 3713 #ifdef INET 3714 case IPPROTO_ICMP: 3715 if (pd->af != AF_INET) 3716 break; 3717 sport = dport = pd->hdr.icmp->icmp_id; 3718 hdrlen = sizeof(*pd->hdr.icmp); 3719 icmptype = pd->hdr.icmp->icmp_type; 3720 icmpcode = pd->hdr.icmp->icmp_code; 3721 3722 if (icmptype == ICMP_UNREACH || 3723 icmptype == ICMP_SOURCEQUENCH || 3724 icmptype == ICMP_REDIRECT || 3725 icmptype == ICMP_TIMXCEED || 3726 icmptype == ICMP_PARAMPROB) 3727 state_icmp++; 3728 break; 3729 #endif /* INET */ 3730 #ifdef INET6 3731 case IPPROTO_ICMPV6: 3732 if (af != AF_INET6) 3733 break; 3734 sport = dport = pd->hdr.icmp6->icmp6_id; 3735 hdrlen = sizeof(*pd->hdr.icmp6); 3736 icmptype = pd->hdr.icmp6->icmp6_type; 3737 icmpcode = pd->hdr.icmp6->icmp6_code; 3738 3739 if (icmptype == ICMP6_DST_UNREACH || 3740 icmptype == ICMP6_PACKET_TOO_BIG || 3741 icmptype == ICMP6_TIME_EXCEEDED || 3742 icmptype == ICMP6_PARAM_PROB) 3743 state_icmp++; 3744 break; 3745 #endif /* INET6 */ 3746 default: 3747 sport = dport = hdrlen = 0; 3748 break; 3749 } 3750 3751 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr); 3752 3753 /* check packet for BINAT/NAT/RDR */ 3754 if ((nr = pf_get_translation(pd, m, off, direction, kif, &nsn, 3755 &skw, &sks, &sk, &nk, saddr, daddr, sport, dport)) != NULL) { 3756 if (nk == NULL || sk == NULL) { 3757 REASON_SET(&reason, PFRES_MEMORY); 3758 goto cleanup; 3759 } 3760 3761 if (pd->ip_sum) 3762 bip_sum = *pd->ip_sum; 3763 3764 m->m_flags &= ~M_HASH; 3765 switch (pd->proto) { 3766 case IPPROTO_TCP: 3767 bproto_sum = th->th_sum; 3768 pd->proto_sum = &th->th_sum; 3769 3770 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) || 3771 nk->port[pd->sidx] != sport) { 3772 pf_change_ap(saddr, &th->th_sport, pd->ip_sum, 3773 &th->th_sum, &nk->addr[pd->sidx], 3774 nk->port[pd->sidx], 0, af); 3775 pd->sport = &th->th_sport; 3776 sport = th->th_sport; 3777 } 3778 3779 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) || 3780 nk->port[pd->didx] != dport) { 3781 pf_change_ap(daddr, &th->th_dport, pd->ip_sum, 3782 &th->th_sum, &nk->addr[pd->didx], 3783 nk->port[pd->didx], 0, af); 3784 dport = th->th_dport; 3785 pd->dport = &th->th_dport; 3786 } 3787 rewrite++; 3788 break; 3789 case IPPROTO_UDP: 3790 bproto_sum = pd->hdr.udp->uh_sum; 3791 pd->proto_sum = &pd->hdr.udp->uh_sum; 3792 3793 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) || 3794 nk->port[pd->sidx] != sport) { 3795 pf_change_ap(saddr, &pd->hdr.udp->uh_sport, 3796 pd->ip_sum, &pd->hdr.udp->uh_sum, 3797 &nk->addr[pd->sidx], 3798 nk->port[pd->sidx], 1, af); 3799 sport = pd->hdr.udp->uh_sport; 3800 pd->sport = &pd->hdr.udp->uh_sport; 3801 } 3802 3803 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) || 3804 nk->port[pd->didx] != dport) { 3805 pf_change_ap(daddr, &pd->hdr.udp->uh_dport, 3806 pd->ip_sum, &pd->hdr.udp->uh_sum, 3807 &nk->addr[pd->didx], 3808 nk->port[pd->didx], 1, af); 3809 dport = pd->hdr.udp->uh_dport; 3810 pd->dport = &pd->hdr.udp->uh_dport; 3811 } 3812 rewrite++; 3813 break; 3814 #ifdef INET 3815 case IPPROTO_ICMP: 3816 nk->port[0] = nk->port[1]; 3817 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET)) 3818 pf_change_a(&saddr->v4.s_addr, pd->ip_sum, 3819 nk->addr[pd->sidx].v4.s_addr, 0); 3820 3821 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET)) 3822 pf_change_a(&daddr->v4.s_addr, pd->ip_sum, 3823 nk->addr[pd->didx].v4.s_addr, 0); 3824 3825 if (nk->port[1] != pd->hdr.icmp->icmp_id) { 3826 pd->hdr.icmp->icmp_cksum = pf_cksum_fixup( 3827 pd->hdr.icmp->icmp_cksum, sport, 3828 nk->port[1], 0); 3829 pd->hdr.icmp->icmp_id = nk->port[1]; 3830 pd->sport = &pd->hdr.icmp->icmp_id; 3831 } 3832 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp); 3833 break; 3834 #endif /* INET */ 3835 #ifdef INET6 3836 case IPPROTO_ICMPV6: 3837 nk->port[0] = nk->port[1]; 3838 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET6)) 3839 pf_change_a6(saddr, &pd->hdr.icmp6->icmp6_cksum, 3840 &nk->addr[pd->sidx], 0); 3841 3842 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET6)) 3843 pf_change_a6(daddr, &pd->hdr.icmp6->icmp6_cksum, 3844 &nk->addr[pd->didx], 0); 3845 rewrite++; 3846 break; 3847 #endif /* INET */ 3848 default: 3849 switch (af) { 3850 #ifdef INET 3851 case AF_INET: 3852 if (PF_ANEQ(saddr, 3853 &nk->addr[pd->sidx], AF_INET)) 3854 pf_change_a(&saddr->v4.s_addr, 3855 pd->ip_sum, 3856 nk->addr[pd->sidx].v4.s_addr, 0); 3857 3858 if (PF_ANEQ(daddr, 3859 &nk->addr[pd->didx], AF_INET)) 3860 pf_change_a(&daddr->v4.s_addr, 3861 pd->ip_sum, 3862 nk->addr[pd->didx].v4.s_addr, 0); 3863 break; 3864 #endif /* INET */ 3865 #ifdef INET6 3866 case AF_INET6: 3867 if (PF_ANEQ(saddr, 3868 &nk->addr[pd->sidx], AF_INET6)) 3869 PF_ACPY(saddr, &nk->addr[pd->sidx], af); 3870 3871 if (PF_ANEQ(daddr, 3872 &nk->addr[pd->didx], AF_INET6)) 3873 PF_ACPY(saddr, &nk->addr[pd->didx], af); 3874 break; 3875 #endif /* INET */ 3876 } 3877 break; 3878 } 3879 if (nr->natpass) 3880 r = NULL; 3881 pd->nat_rule = nr; 3882 } 3883 3884 while (r != NULL) { 3885 r->evaluations++; 3886 if (pfi_kif_match(r->kif, kif) == r->ifnot) 3887 r = r->skip[PF_SKIP_IFP].ptr; 3888 else if (r->direction && r->direction != direction) 3889 r = r->skip[PF_SKIP_DIR].ptr; 3890 else if (r->af && r->af != af) 3891 r = r->skip[PF_SKIP_AF].ptr; 3892 else if (r->proto && r->proto != pd->proto) 3893 r = r->skip[PF_SKIP_PROTO].ptr; 3894 else if (PF_MISMATCHAW(&r->src.addr, saddr, af, 3895 r->src.neg, kif)) 3896 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 3897 /* tcp/udp only. port_op always 0 in other cases */ 3898 else if (r->src.port_op && !pf_match_port(r->src.port_op, 3899 r->src.port[0], r->src.port[1], sport)) 3900 r = r->skip[PF_SKIP_SRC_PORT].ptr; 3901 else if (PF_MISMATCHAW(&r->dst.addr, daddr, af, 3902 r->dst.neg, NULL)) 3903 r = r->skip[PF_SKIP_DST_ADDR].ptr; 3904 /* tcp/udp only. port_op always 0 in other cases */ 3905 else if (r->dst.port_op && !pf_match_port(r->dst.port_op, 3906 r->dst.port[0], r->dst.port[1], dport)) 3907 r = r->skip[PF_SKIP_DST_PORT].ptr; 3908 /* icmp only. type always 0 in other cases */ 3909 else if (r->type && r->type != icmptype + 1) 3910 r = TAILQ_NEXT(r, entries); 3911 /* icmp only. type always 0 in other cases */ 3912 else if (r->code && r->code != icmpcode + 1) 3913 r = TAILQ_NEXT(r, entries); 3914 else if (r->tos && !(r->tos == pd->tos)) 3915 r = TAILQ_NEXT(r, entries); 3916 else if (r->rule_flag & PFRULE_FRAGMENT) 3917 r = TAILQ_NEXT(r, entries); 3918 else if (pd->proto == IPPROTO_TCP && 3919 (r->flagset & th->th_flags) != r->flags) 3920 r = TAILQ_NEXT(r, entries); 3921 /* tcp/udp only. uid.op always 0 in other cases */ 3922 else if (r->uid.op && (pd->lookup.done || (pd->lookup.done = 3923 pf_socket_lookup(direction, pd), 1)) && 3924 !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1], 3925 pd->lookup.uid)) 3926 r = TAILQ_NEXT(r, entries); 3927 /* tcp/udp only. gid.op always 0 in other cases */ 3928 else if (r->gid.op && (pd->lookup.done || (pd->lookup.done = 3929 pf_socket_lookup(direction, pd), 1)) && 3930 !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1], 3931 pd->lookup.gid)) 3932 r = TAILQ_NEXT(r, entries); 3933 else if (r->prob && 3934 r->prob <= karc4random()) 3935 r = TAILQ_NEXT(r, entries); 3936 else if (r->match_tag && !pf_match_tag(m, r, &tag)) 3937 r = TAILQ_NEXT(r, entries); 3938 else if (r->os_fingerprint != PF_OSFP_ANY && 3939 (pd->proto != IPPROTO_TCP || !pf_osfp_match( 3940 pf_osfp_fingerprint(pd, m, off, th), 3941 r->os_fingerprint))) 3942 r = TAILQ_NEXT(r, entries); 3943 else { 3944 if (r->tag) 3945 tag = r->tag; 3946 if (r->rtableid >= 0) 3947 rtableid = r->rtableid; 3948 if (r->anchor == NULL) { 3949 match = 1; 3950 *rm = r; 3951 *am = a; 3952 *rsm = ruleset; 3953 if ((*rm)->quick) 3954 break; 3955 r = TAILQ_NEXT(r, entries); 3956 } else 3957 pf_step_into_anchor(&asd, &ruleset, 3958 PF_RULESET_FILTER, &r, &a, &match); 3959 } 3960 if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset, 3961 PF_RULESET_FILTER, &r, &a, &match)) 3962 break; 3963 } 3964 r = *rm; 3965 a = *am; 3966 ruleset = *rsm; 3967 3968 REASON_SET(&reason, PFRES_MATCH); 3969 3970 if (r->log || (nr != NULL && nr->log)) { 3971 if (rewrite) 3972 m_copyback(m, off, hdrlen, pd->hdr.any); 3973 PFLOG_PACKET(kif, h, m, af, direction, reason, r->log ? r : nr, 3974 a, ruleset, pd); 3975 } 3976 3977 if ((r->action == PF_DROP) && 3978 ((r->rule_flag & PFRULE_RETURNRST) || 3979 (r->rule_flag & PFRULE_RETURNICMP) || 3980 (r->rule_flag & PFRULE_RETURN))) { 3981 /* undo NAT changes, if they have taken place */ 3982 if (nr != NULL) { 3983 PF_ACPY(saddr, &sk->addr[pd->sidx], af); 3984 PF_ACPY(daddr, &sk->addr[pd->didx], af); 3985 if (pd->sport) 3986 *pd->sport = sk->port[pd->sidx]; 3987 if (pd->dport) 3988 *pd->dport = sk->port[pd->didx]; 3989 if (pd->proto_sum) 3990 *pd->proto_sum = bproto_sum; 3991 if (pd->ip_sum) 3992 *pd->ip_sum = bip_sum; 3993 m_copyback(m, off, hdrlen, pd->hdr.any); 3994 } 3995 if (pd->proto == IPPROTO_TCP && 3996 ((r->rule_flag & PFRULE_RETURNRST) || 3997 (r->rule_flag & PFRULE_RETURN)) && 3998 !(th->th_flags & TH_RST)) { 3999 u_int32_t ack = ntohl(th->th_seq) + pd->p_len; 4000 int len = 0; 4001 struct ip *h4; 4002 #ifdef INET6 4003 struct ip6_hdr *h6; 4004 #endif 4005 switch (af) { 4006 case AF_INET: 4007 h4 = mtod(m, struct ip *); 4008 len = h4->ip_len - off; 4009 break; 4010 #ifdef INET6 4011 case AF_INET6: 4012 h6 = mtod(m, struct ip6_hdr *); 4013 len = h6->ip6_plen - (off - sizeof(*h6)); 4014 break; 4015 #endif 4016 } 4017 4018 if (pf_check_proto_cksum(m, off, len, IPPROTO_TCP, af)) 4019 REASON_SET(&reason, PFRES_PROTCKSUM); 4020 else { 4021 if (th->th_flags & TH_SYN) 4022 ack++; 4023 if (th->th_flags & TH_FIN) 4024 ack++; 4025 pf_send_tcp(r, af, pd->dst, 4026 pd->src, th->th_dport, th->th_sport, 4027 ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0, 4028 r->return_ttl, 1, 0, pd->eh, kif->pfik_ifp); 4029 } 4030 } else if (pd->proto != IPPROTO_ICMP && af == AF_INET && 4031 r->return_icmp) 4032 pf_send_icmp(m, r->return_icmp >> 8, 4033 r->return_icmp & 255, af, r); 4034 else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 && 4035 r->return_icmp6) 4036 pf_send_icmp(m, r->return_icmp6 >> 8, 4037 r->return_icmp6 & 255, af, r); 4038 } 4039 4040 if (r->action == PF_DROP) 4041 goto cleanup; 4042 4043 if (pf_tag_packet(m, tag, rtableid)) { 4044 REASON_SET(&reason, PFRES_MEMORY); 4045 goto cleanup; 4046 } 4047 4048 if (!state_icmp && (r->keep_state || nr != NULL || 4049 (pd->flags & PFDESC_TCP_NORM))) { 4050 int action; 4051 action = pf_create_state(r, nr, a, pd, nsn, skw, sks, nk, sk, m, 4052 off, sport, dport, &rewrite, kif, sm, tag, bproto_sum, 4053 bip_sum, hdrlen); 4054 if (action != PF_PASS) 4055 return (action); 4056 } 4057 4058 /* copy back packet headers if we performed NAT operations */ 4059 if (rewrite) 4060 m_copyback(m, off, hdrlen, pd->hdr.any); 4061 4062 return (PF_PASS); 4063 4064 cleanup: 4065 if (sk != NULL) 4066 kfree(sk, M_PFSTATEKEYPL); 4067 if (nk != NULL) 4068 kfree(nk, M_PFSTATEKEYPL); 4069 return (PF_DROP); 4070 } 4071 4072 static __inline int 4073 pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a, 4074 struct pf_pdesc *pd, struct pf_src_node *nsn, struct pf_state_key *skw, 4075 struct pf_state_key *sks, struct pf_state_key *nk, struct pf_state_key *sk, 4076 struct mbuf *m, int off, u_int16_t sport, u_int16_t dport, int *rewrite, 4077 struct pfi_kif *kif, struct pf_state **sm, int tag, u_int16_t bproto_sum, 4078 u_int16_t bip_sum, int hdrlen) 4079 { 4080 struct pf_state *s = NULL; 4081 struct pf_src_node *sn = NULL; 4082 struct tcphdr *th = pd->hdr.tcp; 4083 u_int16_t mss = tcp_mssdflt; 4084 u_short reason; 4085 int cpu = mycpu->gd_cpuid; 4086 4087 /* check maximums */ 4088 if (r->max_states && (r->states_cur >= r->max_states)) { 4089 PF_INC_LCOUNTER(LCNT_STATES); 4090 REASON_SET(&reason, PFRES_MAXSTATES); 4091 return (PF_DROP); 4092 } 4093 /* src node for filter rule */ 4094 if ((r->rule_flag & PFRULE_SRCTRACK || 4095 r->rpool.opts & PF_POOL_STICKYADDR) && 4096 pf_insert_src_node(&sn, r, pd->src, pd->af) != 0) { 4097 REASON_SET(&reason, PFRES_SRCLIMIT); 4098 goto csfailed; 4099 } 4100 /* src node for translation rule */ 4101 if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) && 4102 pf_insert_src_node(&nsn, nr, &sk->addr[pd->sidx], pd->af)) { 4103 REASON_SET(&reason, PFRES_SRCLIMIT); 4104 goto csfailed; 4105 } 4106 s = kmalloc(sizeof(struct pf_state), M_PFSTATEPL, M_NOWAIT|M_ZERO); 4107 if (s == NULL) { 4108 REASON_SET(&reason, PFRES_MEMORY); 4109 goto csfailed; 4110 } 4111 lockinit(&s->lk, "pfstlk", 0, 0); 4112 s->id = 0; /* XXX Do we really need that? not in OpenBSD */ 4113 s->creatorid = 0; 4114 s->rule.ptr = r; 4115 s->nat_rule.ptr = nr; 4116 s->anchor.ptr = a; 4117 s->state_flags = PFSTATE_CREATEINPROG; 4118 STATE_INC_COUNTERS(s); 4119 if (r->allow_opts) 4120 s->state_flags |= PFSTATE_ALLOWOPTS; 4121 if (r->rule_flag & PFRULE_STATESLOPPY) 4122 s->state_flags |= PFSTATE_SLOPPY; 4123 if (pd->not_cpu_localized) 4124 s->state_flags |= PFSTATE_STACK_GLOBAL; 4125 4126 s->log = r->log & PF_LOG_ALL; 4127 if (nr != NULL) 4128 s->log |= nr->log & PF_LOG_ALL; 4129 switch (pd->proto) { 4130 case IPPROTO_TCP: 4131 s->src.seqlo = ntohl(th->th_seq); 4132 s->src.seqhi = s->src.seqlo + pd->p_len + 1; 4133 if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN && 4134 r->keep_state == PF_STATE_MODULATE) { 4135 /* Generate sequence number modulator */ 4136 if ((s->src.seqdiff = pf_tcp_iss(pd) - s->src.seqlo) == 4137 0) 4138 s->src.seqdiff = 1; 4139 pf_change_a(&th->th_seq, &th->th_sum, 4140 htonl(s->src.seqlo + s->src.seqdiff), 0); 4141 *rewrite = 1; 4142 } else 4143 s->src.seqdiff = 0; 4144 if (th->th_flags & TH_SYN) { 4145 s->src.seqhi++; 4146 s->src.wscale = pf_get_wscale(m, off, 4147 th->th_off, pd->af); 4148 } 4149 s->src.max_win = MAX(ntohs(th->th_win), 1); 4150 if (s->src.wscale & PF_WSCALE_MASK) { 4151 /* Remove scale factor from initial window */ 4152 int win = s->src.max_win; 4153 win += 1 << (s->src.wscale & PF_WSCALE_MASK); 4154 s->src.max_win = (win - 1) >> 4155 (s->src.wscale & PF_WSCALE_MASK); 4156 } 4157 if (th->th_flags & TH_FIN) 4158 s->src.seqhi++; 4159 s->dst.seqhi = 1; 4160 s->dst.max_win = 1; 4161 s->src.state = TCPS_SYN_SENT; 4162 s->dst.state = TCPS_CLOSED; 4163 s->timeout = PFTM_TCP_FIRST_PACKET; 4164 break; 4165 case IPPROTO_UDP: 4166 s->src.state = PFUDPS_SINGLE; 4167 s->dst.state = PFUDPS_NO_TRAFFIC; 4168 s->timeout = PFTM_UDP_FIRST_PACKET; 4169 break; 4170 case IPPROTO_ICMP: 4171 #ifdef INET6 4172 case IPPROTO_ICMPV6: 4173 #endif 4174 s->timeout = PFTM_ICMP_FIRST_PACKET; 4175 break; 4176 default: 4177 s->src.state = PFOTHERS_SINGLE; 4178 s->dst.state = PFOTHERS_NO_TRAFFIC; 4179 s->timeout = PFTM_OTHER_FIRST_PACKET; 4180 } 4181 4182 s->creation = time_second; 4183 s->expire = time_second; 4184 4185 if (sn != NULL) { 4186 s->src_node = sn; 4187 s->src_node->states++; 4188 } 4189 if (nsn != NULL) { 4190 /* XXX We only modify one side for now. */ 4191 PF_ACPY(&nsn->raddr, &nk->addr[1], pd->af); 4192 s->nat_src_node = nsn; 4193 s->nat_src_node->states++; 4194 } 4195 if (pd->proto == IPPROTO_TCP) { 4196 if ((pd->flags & PFDESC_TCP_NORM) && pf_normalize_tcp_init(m, 4197 off, pd, th, &s->src, &s->dst)) { 4198 REASON_SET(&reason, PFRES_MEMORY); 4199 pf_src_tree_remove_state(s); 4200 STATE_DEC_COUNTERS(s); 4201 kfree(s, M_PFSTATEPL); 4202 return (PF_DROP); 4203 } 4204 if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub && 4205 pf_normalize_tcp_stateful(m, off, pd, &reason, th, s, 4206 &s->src, &s->dst, rewrite)) { 4207 /* This really shouldn't happen!!! */ 4208 DPFPRINTF(PF_DEBUG_URGENT, 4209 ("pf_normalize_tcp_stateful failed on first pkt")); 4210 pf_normalize_tcp_cleanup(s); 4211 pf_src_tree_remove_state(s); 4212 STATE_DEC_COUNTERS(s); 4213 kfree(s, M_PFSTATEPL); 4214 return (PF_DROP); 4215 } 4216 } 4217 s->direction = pd->dir; 4218 4219 if (sk == NULL && pf_state_key_setup(pd, nr, &skw, &sks, &sk, &nk, 4220 pd->src, pd->dst, sport, dport)) { 4221 REASON_SET(&reason, PFRES_MEMORY); 4222 goto csfailed; 4223 } 4224 4225 if (pf_state_insert(BOUND_IFACE(r, kif), skw, sks, s)) { 4226 if (pd->proto == IPPROTO_TCP) 4227 pf_normalize_tcp_cleanup(s); 4228 REASON_SET(&reason, PFRES_STATEINS); 4229 pf_src_tree_remove_state(s); 4230 STATE_DEC_COUNTERS(s); 4231 kfree(s, M_PFSTATEPL); 4232 return (PF_DROP); 4233 } else 4234 *sm = s; 4235 4236 pf_set_rt_ifp(s, pd->src); /* needs s->state_key set */ 4237 if (tag > 0) { 4238 pf_tag_ref(tag); 4239 s->tag = tag; 4240 } 4241 if (pd->proto == IPPROTO_TCP && (th->th_flags & (TH_SYN|TH_ACK)) == 4242 TH_SYN && r->keep_state == PF_STATE_SYNPROXY) { 4243 s->src.state = PF_TCPS_PROXY_SRC; 4244 /* undo NAT changes, if they have taken place */ 4245 if (nr != NULL) { 4246 struct pf_state_key *skt = s->key[PF_SK_WIRE]; 4247 if (pd->dir == PF_OUT) 4248 skt = s->key[PF_SK_STACK]; 4249 PF_ACPY(pd->src, &skt->addr[pd->sidx], pd->af); 4250 PF_ACPY(pd->dst, &skt->addr[pd->didx], pd->af); 4251 if (pd->sport) 4252 *pd->sport = skt->port[pd->sidx]; 4253 if (pd->dport) 4254 *pd->dport = skt->port[pd->didx]; 4255 if (pd->proto_sum) 4256 *pd->proto_sum = bproto_sum; 4257 if (pd->ip_sum) 4258 *pd->ip_sum = bip_sum; 4259 m->m_flags &= ~M_HASH; 4260 m_copyback(m, off, hdrlen, pd->hdr.any); 4261 } 4262 s->src.seqhi = htonl(karc4random()); 4263 /* Find mss option */ 4264 mss = pf_get_mss(m, off, th->th_off, pd->af); 4265 mss = pf_calc_mss(pd->src, pd->af, mss); 4266 mss = pf_calc_mss(pd->dst, pd->af, mss); 4267 s->src.mss = mss; 4268 s->state_flags &= ~PFSTATE_CREATEINPROG; 4269 pf_send_tcp(r, pd->af, pd->dst, pd->src, th->th_dport, 4270 th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1, 4271 TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0, NULL, NULL); 4272 REASON_SET(&reason, PFRES_SYNPROXY); 4273 return (PF_SYNPROXY_DROP); 4274 } 4275 4276 s->state_flags &= ~PFSTATE_CREATEINPROG; 4277 return (PF_PASS); 4278 4279 csfailed: 4280 if (sk != NULL) 4281 kfree(sk, M_PFSTATEKEYPL); 4282 if (nk != NULL) 4283 kfree(nk, M_PFSTATEKEYPL); 4284 4285 if (sn != NULL && sn->states == 0 && sn->expire == 0) { 4286 RB_REMOVE(pf_src_tree, &tree_src_tracking[cpu], sn); 4287 PF_INC_SCOUNTER(SCNT_SRC_NODE_REMOVALS); 4288 atomic_add_int(&pf_status.src_nodes, -1); 4289 kfree(sn, M_PFSRCTREEPL); 4290 } 4291 if (nsn != sn && nsn != NULL && nsn->states == 0 && nsn->expire == 0) { 4292 RB_REMOVE(pf_src_tree, &tree_src_tracking[cpu], nsn); 4293 PF_INC_SCOUNTER(SCNT_SRC_NODE_REMOVALS); 4294 atomic_add_int(&pf_status.src_nodes, -1); 4295 kfree(nsn, M_PFSRCTREEPL); 4296 } 4297 if (s) { 4298 pf_src_tree_remove_state(s); 4299 STATE_DEC_COUNTERS(s); 4300 kfree(s, M_PFSTATEPL); 4301 } 4302 4303 return (PF_DROP); 4304 } 4305 4306 int 4307 pf_test_fragment(struct pf_rule **rm, int direction, struct pfi_kif *kif, 4308 struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_rule **am, 4309 struct pf_ruleset **rsm) 4310 { 4311 struct pf_rule *r, *a = NULL; 4312 struct pf_ruleset *ruleset = NULL; 4313 sa_family_t af = pd->af; 4314 u_short reason; 4315 int tag = -1; 4316 int asd = 0; 4317 int match = 0; 4318 4319 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr); 4320 while (r != NULL) { 4321 r->evaluations++; 4322 if (pfi_kif_match(r->kif, kif) == r->ifnot) 4323 r = r->skip[PF_SKIP_IFP].ptr; 4324 else if (r->direction && r->direction != direction) 4325 r = r->skip[PF_SKIP_DIR].ptr; 4326 else if (r->af && r->af != af) 4327 r = r->skip[PF_SKIP_AF].ptr; 4328 else if (r->proto && r->proto != pd->proto) 4329 r = r->skip[PF_SKIP_PROTO].ptr; 4330 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, 4331 r->src.neg, kif)) 4332 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 4333 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, 4334 r->dst.neg, NULL)) 4335 r = r->skip[PF_SKIP_DST_ADDR].ptr; 4336 else if (r->tos && !(r->tos == pd->tos)) 4337 r = TAILQ_NEXT(r, entries); 4338 else if (r->os_fingerprint != PF_OSFP_ANY) 4339 r = TAILQ_NEXT(r, entries); 4340 else if (pd->proto == IPPROTO_UDP && 4341 (r->src.port_op || r->dst.port_op)) 4342 r = TAILQ_NEXT(r, entries); 4343 else if (pd->proto == IPPROTO_TCP && 4344 (r->src.port_op || r->dst.port_op || r->flagset)) 4345 r = TAILQ_NEXT(r, entries); 4346 else if ((pd->proto == IPPROTO_ICMP || 4347 pd->proto == IPPROTO_ICMPV6) && 4348 (r->type || r->code)) 4349 r = TAILQ_NEXT(r, entries); 4350 else if (r->prob && r->prob <= karc4random()) 4351 r = TAILQ_NEXT(r, entries); 4352 else if (r->match_tag && !pf_match_tag(m, r, &tag)) 4353 r = TAILQ_NEXT(r, entries); 4354 else { 4355 if (r->anchor == NULL) { 4356 match = 1; 4357 *rm = r; 4358 *am = a; 4359 *rsm = ruleset; 4360 if ((*rm)->quick) 4361 break; 4362 r = TAILQ_NEXT(r, entries); 4363 } else 4364 pf_step_into_anchor(&asd, &ruleset, 4365 PF_RULESET_FILTER, &r, &a, &match); 4366 } 4367 if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset, 4368 PF_RULESET_FILTER, &r, &a, &match)) 4369 break; 4370 } 4371 r = *rm; 4372 a = *am; 4373 ruleset = *rsm; 4374 4375 REASON_SET(&reason, PFRES_MATCH); 4376 4377 if (r->log) 4378 PFLOG_PACKET(kif, h, m, af, direction, reason, r, a, ruleset, 4379 pd); 4380 4381 if (r->action != PF_PASS) 4382 return (PF_DROP); 4383 4384 if (pf_tag_packet(m, tag, -1)) { 4385 REASON_SET(&reason, PFRES_MEMORY); 4386 return (PF_DROP); 4387 } 4388 4389 return (PF_PASS); 4390 } 4391 4392 /* 4393 * Called with state locked 4394 */ 4395 int 4396 pf_tcp_track_full(struct pf_state_peer *src, struct pf_state_peer *dst, 4397 struct pf_state **state, struct pfi_kif *kif, struct mbuf *m, int off, 4398 struct pf_pdesc *pd, u_short *reason, int *copyback) 4399 { 4400 struct tcphdr *th = pd->hdr.tcp; 4401 u_int16_t win = ntohs(th->th_win); 4402 u_int32_t ack, end, seq, orig_seq; 4403 u_int8_t sws, dws; 4404 int ackskew; 4405 4406 if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) { 4407 sws = src->wscale & PF_WSCALE_MASK; 4408 dws = dst->wscale & PF_WSCALE_MASK; 4409 } else { 4410 sws = dws = 0; 4411 } 4412 4413 /* 4414 * Sequence tracking algorithm from Guido van Rooij's paper: 4415 * http://www.madison-gurkha.com/publications/tcp_filtering/ 4416 * tcp_filtering.ps 4417 */ 4418 4419 orig_seq = seq = ntohl(th->th_seq); 4420 if (src->seqlo == 0) { 4421 /* First packet from this end. Set its state */ 4422 4423 if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) && 4424 src->scrub == NULL) { 4425 if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) { 4426 REASON_SET(reason, PFRES_MEMORY); 4427 return (PF_DROP); 4428 } 4429 } 4430 4431 /* Deferred generation of sequence number modulator */ 4432 if (dst->seqdiff && !src->seqdiff) { 4433 /* use random iss for the TCP server */ 4434 while ((src->seqdiff = karc4random() - seq) == 0) 4435 ; 4436 ack = ntohl(th->th_ack) - dst->seqdiff; 4437 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq + 4438 src->seqdiff), 0); 4439 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0); 4440 *copyback = 1; 4441 } else { 4442 ack = ntohl(th->th_ack); 4443 } 4444 4445 end = seq + pd->p_len; 4446 if (th->th_flags & TH_SYN) { 4447 end++; 4448 (*state)->sync_flags |= PFSTATE_GOT_SYN2; 4449 if (dst->wscale & PF_WSCALE_FLAG) { 4450 src->wscale = pf_get_wscale(m, off, th->th_off, 4451 pd->af); 4452 if (src->wscale & PF_WSCALE_FLAG) { 4453 /* Remove scale factor from initial 4454 * window */ 4455 sws = src->wscale & PF_WSCALE_MASK; 4456 win = ((u_int32_t)win + (1 << sws) - 1) 4457 >> sws; 4458 dws = dst->wscale & PF_WSCALE_MASK; 4459 } else { 4460 /* fixup other window */ 4461 dst->max_win <<= dst->wscale & 4462 PF_WSCALE_MASK; 4463 /* in case of a retrans SYN|ACK */ 4464 dst->wscale = 0; 4465 } 4466 } 4467 } 4468 if (th->th_flags & TH_FIN) 4469 end++; 4470 4471 src->seqlo = seq; 4472 if (src->state < TCPS_SYN_SENT) 4473 src->state = TCPS_SYN_SENT; 4474 4475 /* 4476 * May need to slide the window (seqhi may have been set by 4477 * the crappy stack check or if we picked up the connection 4478 * after establishment) 4479 */ 4480 if (src->seqhi == 1 || 4481 SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi)) 4482 src->seqhi = end + MAX(1, dst->max_win << dws); 4483 if (win > src->max_win) 4484 src->max_win = win; 4485 4486 } else { 4487 ack = ntohl(th->th_ack) - dst->seqdiff; 4488 if (src->seqdiff) { 4489 /* Modulate sequence numbers */ 4490 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq + 4491 src->seqdiff), 0); 4492 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0); 4493 *copyback = 1; 4494 } 4495 end = seq + pd->p_len; 4496 if (th->th_flags & TH_SYN) 4497 end++; 4498 if (th->th_flags & TH_FIN) 4499 end++; 4500 } 4501 4502 if ((th->th_flags & TH_ACK) == 0) { 4503 /* Let it pass through the ack skew check */ 4504 ack = dst->seqlo; 4505 } else if ((ack == 0 && 4506 (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) || 4507 /* broken tcp stacks do not set ack */ 4508 (dst->state < TCPS_SYN_SENT)) { 4509 /* 4510 * Many stacks (ours included) will set the ACK number in an 4511 * FIN|ACK if the SYN times out -- no sequence to ACK. 4512 */ 4513 ack = dst->seqlo; 4514 } 4515 4516 if (seq == end) { 4517 /* Ease sequencing restrictions on no data packets */ 4518 seq = src->seqlo; 4519 end = seq; 4520 } 4521 4522 ackskew = dst->seqlo - ack; 4523 4524 4525 /* 4526 * Need to demodulate the sequence numbers in any TCP SACK options 4527 * (Selective ACK). We could optionally validate the SACK values 4528 * against the current ACK window, either forwards or backwards, but 4529 * I'm not confident that SACK has been implemented properly 4530 * everywhere. It wouldn't surprise me if several stacks accidently 4531 * SACK too far backwards of previously ACKed data. There really aren't 4532 * any security implications of bad SACKing unless the target stack 4533 * doesn't validate the option length correctly. Someone trying to 4534 * spoof into a TCP connection won't bother blindly sending SACK 4535 * options anyway. 4536 */ 4537 if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) { 4538 if (pf_modulate_sack(m, off, pd, th, dst)) 4539 *copyback = 1; 4540 } 4541 4542 4543 #define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */ 4544 if (SEQ_GEQ(src->seqhi, end) && 4545 /* Last octet inside other's window space */ 4546 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) && 4547 /* Retrans: not more than one window back */ 4548 (ackskew >= -MAXACKWINDOW) && 4549 /* Acking not more than one reassembled fragment backwards */ 4550 (ackskew <= (MAXACKWINDOW << sws)) && 4551 /* Acking not more than one window forward */ 4552 ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo || 4553 (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo) || 4554 (pd->flags & PFDESC_IP_REAS) == 0)) { 4555 /* Require an exact/+1 sequence match on resets when possible */ 4556 4557 if (dst->scrub || src->scrub) { 4558 if (pf_normalize_tcp_stateful(m, off, pd, reason, th, 4559 *state, src, dst, copyback)) 4560 return (PF_DROP); 4561 } 4562 4563 /* update max window */ 4564 if (src->max_win < win) 4565 src->max_win = win; 4566 /* synchronize sequencing */ 4567 if (SEQ_GT(end, src->seqlo)) 4568 src->seqlo = end; 4569 /* slide the window of what the other end can send */ 4570 if (SEQ_GEQ(ack + (win << sws), dst->seqhi)) 4571 dst->seqhi = ack + MAX((win << sws), 1); 4572 4573 4574 /* update states */ 4575 if (th->th_flags & TH_SYN) 4576 if (src->state < TCPS_SYN_SENT) 4577 src->state = TCPS_SYN_SENT; 4578 if (th->th_flags & TH_FIN) 4579 if (src->state < TCPS_CLOSING) 4580 src->state = TCPS_CLOSING; 4581 if (th->th_flags & TH_ACK) { 4582 if (dst->state == TCPS_SYN_SENT) { 4583 dst->state = TCPS_ESTABLISHED; 4584 if (src->state == TCPS_ESTABLISHED && 4585 (*state)->src_node != NULL && 4586 pf_src_connlimit(*state)) { 4587 REASON_SET(reason, PFRES_SRCLIMIT); 4588 return (PF_DROP); 4589 } 4590 } else if (dst->state == TCPS_CLOSING) 4591 dst->state = TCPS_FIN_WAIT_2; 4592 } 4593 if (th->th_flags & TH_RST) 4594 src->state = dst->state = TCPS_TIME_WAIT; 4595 4596 /* update expire time */ 4597 (*state)->expire = time_second; 4598 if (src->state >= TCPS_FIN_WAIT_2 && 4599 dst->state >= TCPS_FIN_WAIT_2) 4600 (*state)->timeout = PFTM_TCP_CLOSED; 4601 else if (src->state >= TCPS_CLOSING && 4602 dst->state >= TCPS_CLOSING) 4603 (*state)->timeout = PFTM_TCP_FIN_WAIT; 4604 else if (src->state < TCPS_ESTABLISHED || 4605 dst->state < TCPS_ESTABLISHED) 4606 (*state)->timeout = PFTM_TCP_OPENING; 4607 else if (src->state >= TCPS_CLOSING || 4608 dst->state >= TCPS_CLOSING) 4609 (*state)->timeout = PFTM_TCP_CLOSING; 4610 else 4611 (*state)->timeout = PFTM_TCP_ESTABLISHED; 4612 4613 /* Fall through to PASS packet */ 4614 4615 } else if ((dst->state < TCPS_SYN_SENT || 4616 dst->state >= TCPS_FIN_WAIT_2 || 4617 src->state >= TCPS_FIN_WAIT_2) && 4618 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) && 4619 /* Within a window forward of the originating packet */ 4620 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) { 4621 /* Within a window backward of the originating packet */ 4622 4623 /* 4624 * This currently handles three situations: 4625 * 1) Stupid stacks will shotgun SYNs before their peer 4626 * replies. 4627 * 2) When PF catches an already established stream (the 4628 * firewall rebooted, the state table was flushed, routes 4629 * changed...) 4630 * 3) Packets get funky immediately after the connection 4631 * closes (this should catch Solaris spurious ACK|FINs 4632 * that web servers like to spew after a close) 4633 * 4634 * This must be a little more careful than the above code 4635 * since packet floods will also be caught here. We don't 4636 * update the TTL here to mitigate the damage of a packet 4637 * flood and so the same code can handle awkward establishment 4638 * and a loosened connection close. 4639 * In the establishment case, a correct peer response will 4640 * validate the connection, go through the normal state code 4641 * and keep updating the state TTL. 4642 */ 4643 4644 if (pf_status.debug >= PF_DEBUG_MISC) { 4645 kprintf("pf: loose state match: "); 4646 pf_print_state(*state); 4647 pf_print_flags(th->th_flags); 4648 kprintf(" seq=%u (%u) ack=%u len=%u ackskew=%d " 4649 "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack, pd->p_len, 4650 ackskew, (unsigned long long)(*state)->packets[0], 4651 (unsigned long long)(*state)->packets[1], 4652 pd->dir == PF_IN ? "in" : "out", 4653 pd->dir == (*state)->direction ? "fwd" : "rev"); 4654 } 4655 4656 if (dst->scrub || src->scrub) { 4657 if (pf_normalize_tcp_stateful(m, off, pd, reason, th, 4658 *state, src, dst, copyback)) 4659 return (PF_DROP); 4660 } 4661 4662 /* update max window */ 4663 if (src->max_win < win) 4664 src->max_win = win; 4665 /* synchronize sequencing */ 4666 if (SEQ_GT(end, src->seqlo)) 4667 src->seqlo = end; 4668 /* slide the window of what the other end can send */ 4669 if (SEQ_GEQ(ack + (win << sws), dst->seqhi)) 4670 dst->seqhi = ack + MAX((win << sws), 1); 4671 4672 /* 4673 * Cannot set dst->seqhi here since this could be a shotgunned 4674 * SYN and not an already established connection. 4675 */ 4676 4677 if (th->th_flags & TH_FIN) 4678 if (src->state < TCPS_CLOSING) 4679 src->state = TCPS_CLOSING; 4680 if (th->th_flags & TH_RST) 4681 src->state = dst->state = TCPS_TIME_WAIT; 4682 4683 /* Fall through to PASS packet */ 4684 4685 } else if ((*state)->pickup_mode == PF_PICKUPS_HASHONLY || 4686 ((*state)->pickup_mode == PF_PICKUPS_ENABLED && 4687 ((*state)->sync_flags & PFSTATE_GOT_SYN_MASK) != 4688 PFSTATE_GOT_SYN_MASK)) { 4689 /* 4690 * If pickup mode is hash only, do not fail on sequence checks. 4691 * 4692 * If pickup mode is enabled and we did not see the SYN in 4693 * both direction, do not fail on sequence checks because 4694 * we do not have complete information on window scale. 4695 * 4696 * Adjust expiration and fall through to PASS packet. 4697 * XXX Add a FIN check to reduce timeout? 4698 */ 4699 (*state)->expire = time_second; 4700 } else { 4701 /* 4702 * Failure processing 4703 */ 4704 if ((*state)->dst.state == TCPS_SYN_SENT && 4705 (*state)->src.state == TCPS_SYN_SENT) { 4706 /* Send RST for state mismatches during handshake */ 4707 if (!(th->th_flags & TH_RST)) 4708 pf_send_tcp((*state)->rule.ptr, pd->af, 4709 pd->dst, pd->src, th->th_dport, 4710 th->th_sport, ntohl(th->th_ack), 0, 4711 TH_RST, 0, 0, 4712 (*state)->rule.ptr->return_ttl, 1, 0, 4713 pd->eh, kif->pfik_ifp); 4714 src->seqlo = 0; 4715 src->seqhi = 1; 4716 src->max_win = 1; 4717 } else if (pf_status.debug >= PF_DEBUG_MISC) { 4718 kprintf("pf: BAD state: "); 4719 pf_print_state(*state); 4720 pf_print_flags(th->th_flags); 4721 kprintf(" seq=%u (%u) ack=%u len=%u ackskew=%d " 4722 "pkts=%llu:%llu dir=%s,%s\n", 4723 seq, orig_seq, ack, pd->p_len, ackskew, 4724 (unsigned long long)(*state)->packets[0], 4725 (unsigned long long)(*state)->packets[1], 4726 pd->dir == PF_IN ? "in" : "out", 4727 pd->dir == (*state)->direction ? "fwd" : "rev"); 4728 kprintf("pf: State failure on: %c %c %c %c | %c %c\n", 4729 SEQ_GEQ(src->seqhi, end) ? ' ' : '1', 4730 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ? 4731 ' ': '2', 4732 (ackskew >= -MAXACKWINDOW) ? ' ' : '3', 4733 (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4', 4734 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5', 4735 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6'); 4736 } 4737 REASON_SET(reason, PFRES_BADSTATE); 4738 return (PF_DROP); 4739 } 4740 4741 return (PF_PASS); 4742 } 4743 4744 /* 4745 * Called with state locked 4746 */ 4747 int 4748 pf_tcp_track_sloppy(struct pf_state_peer *src, struct pf_state_peer *dst, 4749 struct pf_state **state, struct pf_pdesc *pd, u_short *reason) 4750 { 4751 struct tcphdr *th = pd->hdr.tcp; 4752 4753 if (th->th_flags & TH_SYN) 4754 if (src->state < TCPS_SYN_SENT) 4755 src->state = TCPS_SYN_SENT; 4756 if (th->th_flags & TH_FIN) 4757 if (src->state < TCPS_CLOSING) 4758 src->state = TCPS_CLOSING; 4759 if (th->th_flags & TH_ACK) { 4760 if (dst->state == TCPS_SYN_SENT) { 4761 dst->state = TCPS_ESTABLISHED; 4762 if (src->state == TCPS_ESTABLISHED && 4763 (*state)->src_node != NULL && 4764 pf_src_connlimit(*state)) { 4765 REASON_SET(reason, PFRES_SRCLIMIT); 4766 return (PF_DROP); 4767 } 4768 } else if (dst->state == TCPS_CLOSING) { 4769 dst->state = TCPS_FIN_WAIT_2; 4770 } else if (src->state == TCPS_SYN_SENT && 4771 dst->state < TCPS_SYN_SENT) { 4772 /* 4773 * Handle a special sloppy case where we only see one 4774 * half of the connection. If there is a ACK after 4775 * the initial SYN without ever seeing a packet from 4776 * the destination, set the connection to established. 4777 */ 4778 dst->state = src->state = TCPS_ESTABLISHED; 4779 if ((*state)->src_node != NULL && 4780 pf_src_connlimit(*state)) { 4781 REASON_SET(reason, PFRES_SRCLIMIT); 4782 return (PF_DROP); 4783 } 4784 } else if (src->state == TCPS_CLOSING && 4785 dst->state == TCPS_ESTABLISHED && 4786 dst->seqlo == 0) { 4787 /* 4788 * Handle the closing of half connections where we 4789 * don't see the full bidirectional FIN/ACK+ACK 4790 * handshake. 4791 */ 4792 dst->state = TCPS_CLOSING; 4793 } 4794 } 4795 if (th->th_flags & TH_RST) 4796 src->state = dst->state = TCPS_TIME_WAIT; 4797 4798 /* update expire time */ 4799 (*state)->expire = time_second; 4800 if (src->state >= TCPS_FIN_WAIT_2 && 4801 dst->state >= TCPS_FIN_WAIT_2) 4802 (*state)->timeout = PFTM_TCP_CLOSED; 4803 else if (src->state >= TCPS_CLOSING && 4804 dst->state >= TCPS_CLOSING) 4805 (*state)->timeout = PFTM_TCP_FIN_WAIT; 4806 else if (src->state < TCPS_ESTABLISHED || 4807 dst->state < TCPS_ESTABLISHED) 4808 (*state)->timeout = PFTM_TCP_OPENING; 4809 else if (src->state >= TCPS_CLOSING || 4810 dst->state >= TCPS_CLOSING) 4811 (*state)->timeout = PFTM_TCP_CLOSING; 4812 else 4813 (*state)->timeout = PFTM_TCP_ESTABLISHED; 4814 4815 return (PF_PASS); 4816 } 4817 4818 /* 4819 * Test TCP connection state. Caller must hold the state locked. 4820 */ 4821 int 4822 pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif, 4823 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, 4824 u_short *reason) 4825 { 4826 struct pf_state_key_cmp key; 4827 struct tcphdr *th = pd->hdr.tcp; 4828 int copyback = 0; 4829 int error; 4830 struct pf_state_peer *src, *dst; 4831 struct pf_state_key *sk; 4832 4833 bzero(&key, sizeof(key)); 4834 key.af = pd->af; 4835 key.proto = IPPROTO_TCP; 4836 if (direction == PF_IN) { /* wire side, straight */ 4837 PF_ACPY(&key.addr[0], pd->src, key.af); 4838 PF_ACPY(&key.addr[1], pd->dst, key.af); 4839 key.port[0] = th->th_sport; 4840 key.port[1] = th->th_dport; 4841 if (pf_status.debug >= PF_DEBUG_MISC) { 4842 kprintf("test-tcp IN (%08x:%d) -> (%08x:%d)\n", 4843 ntohl(key.addr[0].addr32[0]), 4844 ntohs(key.port[0]), 4845 ntohl(key.addr[1].addr32[0]), 4846 ntohs(key.port[1])); 4847 } 4848 } else { /* stack side, reverse */ 4849 PF_ACPY(&key.addr[1], pd->src, key.af); 4850 PF_ACPY(&key.addr[0], pd->dst, key.af); 4851 key.port[1] = th->th_sport; 4852 key.port[0] = th->th_dport; 4853 if (pf_status.debug >= PF_DEBUG_MISC) { 4854 kprintf("test-tcp OUT (%08x:%d) <- (%08x:%d)\n", 4855 ntohl(key.addr[0].addr32[0]), 4856 ntohs(key.port[0]), 4857 ntohl(key.addr[1].addr32[0]), 4858 ntohs(key.port[1])); 4859 } 4860 } 4861 4862 STATE_LOOKUP(kif, &key, direction, *state, m); 4863 lockmgr(&(*state)->lk, LK_EXCLUSIVE); 4864 4865 if (direction == (*state)->direction) { 4866 src = &(*state)->src; 4867 dst = &(*state)->dst; 4868 } else { 4869 src = &(*state)->dst; 4870 dst = &(*state)->src; 4871 } 4872 4873 sk = (*state)->key[pd->didx]; 4874 4875 if ((*state)->src.state == PF_TCPS_PROXY_SRC) { 4876 if (direction != (*state)->direction) { 4877 REASON_SET(reason, PFRES_SYNPROXY); 4878 FAIL (PF_SYNPROXY_DROP); 4879 } 4880 if (th->th_flags & TH_SYN) { 4881 if (ntohl(th->th_seq) != (*state)->src.seqlo) { 4882 REASON_SET(reason, PFRES_SYNPROXY); 4883 FAIL (PF_DROP); 4884 } 4885 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst, 4886 pd->src, th->th_dport, th->th_sport, 4887 (*state)->src.seqhi, ntohl(th->th_seq) + 1, 4888 TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1, 4889 0, NULL, NULL); 4890 REASON_SET(reason, PFRES_SYNPROXY); 4891 FAIL (PF_SYNPROXY_DROP); 4892 } else if (!(th->th_flags & TH_ACK) || 4893 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) || 4894 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) { 4895 REASON_SET(reason, PFRES_SYNPROXY); 4896 FAIL (PF_DROP); 4897 } else if ((*state)->src_node != NULL && 4898 pf_src_connlimit(*state)) { 4899 REASON_SET(reason, PFRES_SRCLIMIT); 4900 FAIL (PF_DROP); 4901 } else 4902 (*state)->src.state = PF_TCPS_PROXY_DST; 4903 } 4904 if ((*state)->src.state == PF_TCPS_PROXY_DST) { 4905 if (direction == (*state)->direction) { 4906 if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) || 4907 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) || 4908 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) { 4909 REASON_SET(reason, PFRES_SYNPROXY); 4910 FAIL (PF_DROP); 4911 } 4912 (*state)->src.max_win = MAX(ntohs(th->th_win), 1); 4913 if ((*state)->dst.seqhi == 1) 4914 (*state)->dst.seqhi = htonl(karc4random()); 4915 pf_send_tcp((*state)->rule.ptr, pd->af, 4916 &sk->addr[pd->sidx], &sk->addr[pd->didx], 4917 sk->port[pd->sidx], sk->port[pd->didx], 4918 (*state)->dst.seqhi, 0, TH_SYN, 0, 4919 (*state)->src.mss, 0, 0, (*state)->tag, NULL, NULL); 4920 REASON_SET(reason, PFRES_SYNPROXY); 4921 FAIL (PF_SYNPROXY_DROP); 4922 } else if (((th->th_flags & (TH_SYN|TH_ACK)) != 4923 (TH_SYN|TH_ACK)) || 4924 (ntohl(th->th_ack) != (*state)->dst.seqhi + 1)) { 4925 REASON_SET(reason, PFRES_SYNPROXY); 4926 FAIL (PF_DROP); 4927 } else { 4928 (*state)->dst.max_win = MAX(ntohs(th->th_win), 1); 4929 (*state)->dst.seqlo = ntohl(th->th_seq); 4930 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst, 4931 pd->src, th->th_dport, th->th_sport, 4932 ntohl(th->th_ack), ntohl(th->th_seq) + 1, 4933 TH_ACK, (*state)->src.max_win, 0, 0, 0, 4934 (*state)->tag, NULL, NULL); 4935 pf_send_tcp((*state)->rule.ptr, pd->af, 4936 &sk->addr[pd->sidx], &sk->addr[pd->didx], 4937 sk->port[pd->sidx], sk->port[pd->didx], 4938 (*state)->src.seqhi + 1, (*state)->src.seqlo + 1, 4939 TH_ACK, (*state)->dst.max_win, 0, 0, 1, 4940 0, NULL, NULL); 4941 (*state)->src.seqdiff = (*state)->dst.seqhi - 4942 (*state)->src.seqlo; 4943 (*state)->dst.seqdiff = (*state)->src.seqhi - 4944 (*state)->dst.seqlo; 4945 (*state)->src.seqhi = (*state)->src.seqlo + 4946 (*state)->dst.max_win; 4947 (*state)->dst.seqhi = (*state)->dst.seqlo + 4948 (*state)->src.max_win; 4949 (*state)->src.wscale = (*state)->dst.wscale = 0; 4950 (*state)->src.state = (*state)->dst.state = 4951 TCPS_ESTABLISHED; 4952 REASON_SET(reason, PFRES_SYNPROXY); 4953 FAIL (PF_SYNPROXY_DROP); 4954 } 4955 } 4956 4957 /* 4958 * Check for connection (addr+port pair) reuse. We can't actually 4959 * unlink the state if we don't own it. 4960 */ 4961 if (((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) && 4962 dst->state >= TCPS_FIN_WAIT_2 && 4963 src->state >= TCPS_FIN_WAIT_2) { 4964 if (pf_status.debug >= PF_DEBUG_MISC) { 4965 kprintf("pf: state reuse "); 4966 pf_print_state(*state); 4967 pf_print_flags(th->th_flags); 4968 kprintf("\n"); 4969 } 4970 /* XXX make sure it's the same direction ?? */ 4971 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED; 4972 if ((*state)->cpuid == mycpu->gd_cpuid) { 4973 pf_unlink_state(*state); 4974 *state = NULL; 4975 } else { 4976 (*state)->timeout = PFTM_PURGE; 4977 } 4978 FAIL (PF_DROP); 4979 } 4980 4981 if ((*state)->state_flags & PFSTATE_SLOPPY) { 4982 if (pf_tcp_track_sloppy(src, dst, state, pd, 4983 reason) == PF_DROP) { 4984 FAIL (PF_DROP); 4985 } 4986 } else { 4987 if (pf_tcp_track_full(src, dst, state, kif, m, off, pd, 4988 reason, ©back) == PF_DROP) { 4989 FAIL (PF_DROP); 4990 } 4991 } 4992 4993 /* translate source/destination address, if necessary */ 4994 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { 4995 struct pf_state_key *nk = (*state)->key[pd->didx]; 4996 4997 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) || 4998 nk->port[pd->sidx] != th->th_sport) { 4999 /* 5000 * The translated source address may be completely 5001 * unrelated to the saved link header, make sure 5002 * a bridge doesn't try to use it. 5003 */ 5004 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED; 5005 pf_change_ap(pd->src, &th->th_sport, pd->ip_sum, 5006 &th->th_sum, &nk->addr[pd->sidx], 5007 nk->port[pd->sidx], 0, pd->af); 5008 } 5009 5010 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) || 5011 nk->port[pd->didx] != th->th_dport) { 5012 /* 5013 * If we don't redispatch the packet will go into 5014 * the protocol stack on the wrong cpu for the 5015 * post-translated address. 5016 */ 5017 pf_change_ap(pd->dst, &th->th_dport, pd->ip_sum, 5018 &th->th_sum, &nk->addr[pd->didx], 5019 nk->port[pd->didx], 0, pd->af); 5020 } 5021 copyback = 1; 5022 } 5023 5024 /* Copyback sequence modulation or stateful scrub changes if needed */ 5025 if (copyback) { 5026 m->m_flags &= ~M_HASH; 5027 m_copyback(m, off, sizeof(*th), (caddr_t)th); 5028 } 5029 5030 pfsync_update_state(*state); 5031 error = PF_PASS; 5032 done: 5033 if (*state) 5034 lockmgr(&(*state)->lk, LK_RELEASE); 5035 return (error); 5036 } 5037 5038 /* 5039 * Test UDP connection state. Caller must hold the state locked. 5040 */ 5041 int 5042 pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif, 5043 struct mbuf *m, int off, void *h, struct pf_pdesc *pd) 5044 { 5045 struct pf_state_peer *src, *dst; 5046 struct pf_state_key_cmp key; 5047 struct udphdr *uh = pd->hdr.udp; 5048 5049 bzero(&key, sizeof(key)); 5050 key.af = pd->af; 5051 key.proto = IPPROTO_UDP; 5052 if (direction == PF_IN) { /* wire side, straight */ 5053 PF_ACPY(&key.addr[0], pd->src, key.af); 5054 PF_ACPY(&key.addr[1], pd->dst, key.af); 5055 key.port[0] = uh->uh_sport; 5056 key.port[1] = uh->uh_dport; 5057 } else { /* stack side, reverse */ 5058 PF_ACPY(&key.addr[1], pd->src, key.af); 5059 PF_ACPY(&key.addr[0], pd->dst, key.af); 5060 key.port[1] = uh->uh_sport; 5061 key.port[0] = uh->uh_dport; 5062 } 5063 5064 STATE_LOOKUP(kif, &key, direction, *state, m); 5065 lockmgr(&(*state)->lk, LK_EXCLUSIVE); 5066 5067 if (direction == (*state)->direction) { 5068 src = &(*state)->src; 5069 dst = &(*state)->dst; 5070 } else { 5071 src = &(*state)->dst; 5072 dst = &(*state)->src; 5073 } 5074 5075 /* update states */ 5076 if (src->state < PFUDPS_SINGLE) 5077 src->state = PFUDPS_SINGLE; 5078 if (dst->state == PFUDPS_SINGLE) 5079 dst->state = PFUDPS_MULTIPLE; 5080 5081 /* update expire time */ 5082 (*state)->expire = time_second; 5083 if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE) 5084 (*state)->timeout = PFTM_UDP_MULTIPLE; 5085 else 5086 (*state)->timeout = PFTM_UDP_SINGLE; 5087 5088 /* translate source/destination address, if necessary */ 5089 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { 5090 struct pf_state_key *nk = (*state)->key[pd->didx]; 5091 5092 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) || 5093 nk->port[pd->sidx] != uh->uh_sport) { 5094 /* 5095 * The translated source address may be completely 5096 * unrelated to the saved link header, make sure 5097 * a bridge doesn't try to use it. 5098 */ 5099 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED; 5100 m->m_flags &= ~M_HASH; 5101 pf_change_ap(pd->src, &uh->uh_sport, pd->ip_sum, 5102 &uh->uh_sum, &nk->addr[pd->sidx], 5103 nk->port[pd->sidx], 1, pd->af); 5104 } 5105 5106 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) || 5107 nk->port[pd->didx] != uh->uh_dport) { 5108 /* 5109 * If we don't redispatch the packet will go into 5110 * the protocol stack on the wrong cpu for the 5111 * post-translated address. 5112 */ 5113 m->m_flags &= ~M_HASH; 5114 pf_change_ap(pd->dst, &uh->uh_dport, pd->ip_sum, 5115 &uh->uh_sum, &nk->addr[pd->didx], 5116 nk->port[pd->didx], 1, pd->af); 5117 } 5118 m_copyback(m, off, sizeof(*uh), (caddr_t)uh); 5119 } 5120 5121 pfsync_update_state(*state); 5122 lockmgr(&(*state)->lk, LK_RELEASE); 5123 return (PF_PASS); 5124 } 5125 5126 /* 5127 * Test ICMP connection state. Caller must hold the state locked. 5128 */ 5129 int 5130 pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, 5131 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, 5132 u_short *reason) 5133 { 5134 struct pf_addr *saddr = pd->src, *daddr = pd->dst; 5135 u_int16_t icmpid = 0, *icmpsum = NULL; 5136 u_int8_t icmptype = 0; 5137 int state_icmp = 0; 5138 int error; 5139 struct pf_state_key_cmp key; 5140 5141 bzero(&key, sizeof(key)); 5142 5143 switch (pd->proto) { 5144 #ifdef INET 5145 case IPPROTO_ICMP: 5146 icmptype = pd->hdr.icmp->icmp_type; 5147 icmpid = pd->hdr.icmp->icmp_id; 5148 icmpsum = &pd->hdr.icmp->icmp_cksum; 5149 5150 if (icmptype == ICMP_UNREACH || 5151 icmptype == ICMP_SOURCEQUENCH || 5152 icmptype == ICMP_REDIRECT || 5153 icmptype == ICMP_TIMXCEED || 5154 icmptype == ICMP_PARAMPROB) 5155 state_icmp++; 5156 break; 5157 #endif /* INET */ 5158 #ifdef INET6 5159 case IPPROTO_ICMPV6: 5160 icmptype = pd->hdr.icmp6->icmp6_type; 5161 icmpid = pd->hdr.icmp6->icmp6_id; 5162 icmpsum = &pd->hdr.icmp6->icmp6_cksum; 5163 5164 if (icmptype == ICMP6_DST_UNREACH || 5165 icmptype == ICMP6_PACKET_TOO_BIG || 5166 icmptype == ICMP6_TIME_EXCEEDED || 5167 icmptype == ICMP6_PARAM_PROB) 5168 state_icmp++; 5169 break; 5170 #endif /* INET6 */ 5171 } 5172 5173 if (!state_icmp) { 5174 5175 /* 5176 * ICMP query/reply message not related to a TCP/UDP packet. 5177 * Search for an ICMP state. 5178 */ 5179 key.af = pd->af; 5180 key.proto = pd->proto; 5181 key.port[0] = key.port[1] = icmpid; 5182 if (direction == PF_IN) { /* wire side, straight */ 5183 PF_ACPY(&key.addr[0], pd->src, key.af); 5184 PF_ACPY(&key.addr[1], pd->dst, key.af); 5185 } else { /* stack side, reverse */ 5186 PF_ACPY(&key.addr[1], pd->src, key.af); 5187 PF_ACPY(&key.addr[0], pd->dst, key.af); 5188 } 5189 5190 STATE_LOOKUP(kif, &key, direction, *state, m); 5191 lockmgr(&(*state)->lk, LK_EXCLUSIVE); 5192 5193 (*state)->expire = time_second; 5194 (*state)->timeout = PFTM_ICMP_ERROR_REPLY; 5195 5196 /* translate source/destination address, if necessary */ 5197 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { 5198 struct pf_state_key *nk = (*state)->key[pd->didx]; 5199 5200 switch (pd->af) { 5201 #ifdef INET 5202 case AF_INET: 5203 if (PF_ANEQ(pd->src, 5204 &nk->addr[pd->sidx], AF_INET)) 5205 pf_change_a(&saddr->v4.s_addr, 5206 pd->ip_sum, 5207 nk->addr[pd->sidx].v4.s_addr, 0); 5208 5209 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], 5210 AF_INET)) 5211 pf_change_a(&daddr->v4.s_addr, 5212 pd->ip_sum, 5213 nk->addr[pd->didx].v4.s_addr, 0); 5214 5215 if (nk->port[0] != 5216 pd->hdr.icmp->icmp_id) { 5217 pd->hdr.icmp->icmp_cksum = 5218 pf_cksum_fixup( 5219 pd->hdr.icmp->icmp_cksum, icmpid, 5220 nk->port[pd->sidx], 0); 5221 pd->hdr.icmp->icmp_id = 5222 nk->port[pd->sidx]; 5223 } 5224 5225 m->m_flags &= ~M_HASH; 5226 m_copyback(m, off, ICMP_MINLEN, 5227 (caddr_t)pd->hdr.icmp); 5228 break; 5229 #endif /* INET */ 5230 #ifdef INET6 5231 case AF_INET6: 5232 if (PF_ANEQ(pd->src, 5233 &nk->addr[pd->sidx], AF_INET6)) 5234 pf_change_a6(saddr, 5235 &pd->hdr.icmp6->icmp6_cksum, 5236 &nk->addr[pd->sidx], 0); 5237 5238 if (PF_ANEQ(pd->dst, 5239 &nk->addr[pd->didx], AF_INET6)) 5240 pf_change_a6(daddr, 5241 &pd->hdr.icmp6->icmp6_cksum, 5242 &nk->addr[pd->didx], 0); 5243 5244 m->m_flags &= ~M_HASH; 5245 m_copyback(m, off, 5246 sizeof(struct icmp6_hdr), 5247 (caddr_t)pd->hdr.icmp6); 5248 break; 5249 #endif /* INET6 */ 5250 } 5251 } 5252 } else { 5253 /* 5254 * ICMP error message in response to a TCP/UDP packet. 5255 * Extract the inner TCP/UDP header and search for that state. 5256 */ 5257 5258 struct pf_pdesc pd2; 5259 #ifdef INET 5260 struct ip h2; 5261 #endif /* INET */ 5262 #ifdef INET6 5263 struct ip6_hdr h2_6; 5264 int terminal = 0; 5265 #endif /* INET6 */ 5266 int ipoff2; 5267 int off2; 5268 5269 pd2.not_cpu_localized = 1; 5270 pd2.af = pd->af; 5271 /* Payload packet is from the opposite direction. */ 5272 pd2.sidx = (direction == PF_IN) ? 1 : 0; 5273 pd2.didx = (direction == PF_IN) ? 0 : 1; 5274 switch (pd->af) { 5275 #ifdef INET 5276 case AF_INET: 5277 /* offset of h2 in mbuf chain */ 5278 ipoff2 = off + ICMP_MINLEN; 5279 5280 if (!pf_pull_hdr(m, ipoff2, &h2, sizeof(h2), 5281 NULL, reason, pd2.af)) { 5282 DPFPRINTF(PF_DEBUG_MISC, 5283 ("pf: ICMP error message too short " 5284 "(ip)\n")); 5285 FAIL (PF_DROP); 5286 } 5287 /* 5288 * ICMP error messages don't refer to non-first 5289 * fragments 5290 */ 5291 if (h2.ip_off & htons(IP_OFFMASK)) { 5292 REASON_SET(reason, PFRES_FRAG); 5293 FAIL (PF_DROP); 5294 } 5295 5296 /* offset of protocol header that follows h2 */ 5297 off2 = ipoff2 + (h2.ip_hl << 2); 5298 5299 pd2.proto = h2.ip_p; 5300 pd2.src = (struct pf_addr *)&h2.ip_src; 5301 pd2.dst = (struct pf_addr *)&h2.ip_dst; 5302 pd2.ip_sum = &h2.ip_sum; 5303 break; 5304 #endif /* INET */ 5305 #ifdef INET6 5306 case AF_INET6: 5307 ipoff2 = off + sizeof(struct icmp6_hdr); 5308 5309 if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof(h2_6), 5310 NULL, reason, pd2.af)) { 5311 DPFPRINTF(PF_DEBUG_MISC, 5312 ("pf: ICMP error message too short " 5313 "(ip6)\n")); 5314 FAIL (PF_DROP); 5315 } 5316 pd2.proto = h2_6.ip6_nxt; 5317 pd2.src = (struct pf_addr *)&h2_6.ip6_src; 5318 pd2.dst = (struct pf_addr *)&h2_6.ip6_dst; 5319 pd2.ip_sum = NULL; 5320 off2 = ipoff2 + sizeof(h2_6); 5321 do { 5322 switch (pd2.proto) { 5323 case IPPROTO_FRAGMENT: 5324 /* 5325 * ICMPv6 error messages for 5326 * non-first fragments 5327 */ 5328 REASON_SET(reason, PFRES_FRAG); 5329 FAIL (PF_DROP); 5330 case IPPROTO_AH: 5331 case IPPROTO_HOPOPTS: 5332 case IPPROTO_ROUTING: 5333 case IPPROTO_DSTOPTS: { 5334 /* get next header and header length */ 5335 struct ip6_ext opt6; 5336 5337 if (!pf_pull_hdr(m, off2, &opt6, 5338 sizeof(opt6), NULL, reason, 5339 pd2.af)) { 5340 DPFPRINTF(PF_DEBUG_MISC, 5341 ("pf: ICMPv6 short opt\n")); 5342 FAIL (PF_DROP); 5343 } 5344 if (pd2.proto == IPPROTO_AH) 5345 off2 += (opt6.ip6e_len + 2) * 4; 5346 else 5347 off2 += (opt6.ip6e_len + 1) * 8; 5348 pd2.proto = opt6.ip6e_nxt; 5349 /* goto the next header */ 5350 break; 5351 } 5352 default: 5353 terminal++; 5354 break; 5355 } 5356 } while (!terminal); 5357 break; 5358 #endif /* INET6 */ 5359 default: 5360 DPFPRINTF(PF_DEBUG_MISC, 5361 ("pf: ICMP AF %d unknown (ip6)\n", pd->af)); 5362 FAIL (PF_DROP); 5363 break; 5364 } 5365 5366 switch (pd2.proto) { 5367 case IPPROTO_TCP: { 5368 struct tcphdr th; 5369 u_int32_t seq; 5370 struct pf_state_peer *src, *dst; 5371 u_int8_t dws; 5372 int copyback = 0; 5373 5374 /* 5375 * Only the first 8 bytes of the TCP header can be 5376 * expected. Don't access any TCP header fields after 5377 * th_seq, an ackskew test is not possible. 5378 */ 5379 if (!pf_pull_hdr(m, off2, &th, 8, NULL, reason, 5380 pd2.af)) { 5381 DPFPRINTF(PF_DEBUG_MISC, 5382 ("pf: ICMP error message too short " 5383 "(tcp)\n")); 5384 FAIL (PF_DROP); 5385 } 5386 5387 key.af = pd2.af; 5388 key.proto = IPPROTO_TCP; 5389 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 5390 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 5391 key.port[pd2.sidx] = th.th_sport; 5392 key.port[pd2.didx] = th.th_dport; 5393 5394 STATE_LOOKUP(kif, &key, direction, *state, m); 5395 lockmgr(&(*state)->lk, LK_EXCLUSIVE); 5396 5397 if (direction == (*state)->direction) { 5398 src = &(*state)->dst; 5399 dst = &(*state)->src; 5400 } else { 5401 src = &(*state)->src; 5402 dst = &(*state)->dst; 5403 } 5404 5405 if (src->wscale && dst->wscale) 5406 dws = dst->wscale & PF_WSCALE_MASK; 5407 else 5408 dws = 0; 5409 5410 /* Demodulate sequence number */ 5411 seq = ntohl(th.th_seq) - src->seqdiff; 5412 if (src->seqdiff) { 5413 pf_change_a(&th.th_seq, icmpsum, 5414 htonl(seq), 0); 5415 copyback = 1; 5416 } 5417 5418 if (!((*state)->state_flags & PFSTATE_SLOPPY) && 5419 (!SEQ_GEQ(src->seqhi, seq) || 5420 !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)))) { 5421 if (pf_status.debug >= PF_DEBUG_MISC) { 5422 kprintf("pf: BAD ICMP %d:%d ", 5423 icmptype, pd->hdr.icmp->icmp_code); 5424 pf_print_host(pd->src, 0, pd->af); 5425 kprintf(" -> "); 5426 pf_print_host(pd->dst, 0, pd->af); 5427 kprintf(" state: "); 5428 pf_print_state(*state); 5429 kprintf(" seq=%u\n", seq); 5430 } 5431 REASON_SET(reason, PFRES_BADSTATE); 5432 FAIL (PF_DROP); 5433 } else { 5434 if (pf_status.debug >= PF_DEBUG_MISC) { 5435 kprintf("pf: OK ICMP %d:%d ", 5436 icmptype, pd->hdr.icmp->icmp_code); 5437 pf_print_host(pd->src, 0, pd->af); 5438 kprintf(" -> "); 5439 pf_print_host(pd->dst, 0, pd->af); 5440 kprintf(" state: "); 5441 pf_print_state(*state); 5442 kprintf(" seq=%u\n", seq); 5443 } 5444 } 5445 5446 /* translate source/destination address, if necessary */ 5447 if ((*state)->key[PF_SK_WIRE] != 5448 (*state)->key[PF_SK_STACK]) { 5449 struct pf_state_key *nk = 5450 (*state)->key[pd->didx]; 5451 5452 if (PF_ANEQ(pd2.src, 5453 &nk->addr[pd2.sidx], pd2.af) || 5454 nk->port[pd2.sidx] != th.th_sport) 5455 pf_change_icmp(pd2.src, &th.th_sport, 5456 daddr, &nk->addr[pd2.sidx], 5457 nk->port[pd2.sidx], NULL, 5458 pd2.ip_sum, icmpsum, 5459 pd->ip_sum, 0, pd2.af); 5460 5461 if (PF_ANEQ(pd2.dst, 5462 &nk->addr[pd2.didx], pd2.af) || 5463 nk->port[pd2.didx] != th.th_dport) 5464 pf_change_icmp(pd2.dst, &th.th_dport, 5465 NULL, /* XXX Inbound NAT? */ 5466 &nk->addr[pd2.didx], 5467 nk->port[pd2.didx], NULL, 5468 pd2.ip_sum, icmpsum, 5469 pd->ip_sum, 0, pd2.af); 5470 copyback = 1; 5471 } 5472 5473 if (copyback) { 5474 switch (pd2.af) { 5475 #ifdef INET 5476 case AF_INET: 5477 m_copyback(m, off, ICMP_MINLEN, 5478 (caddr_t)pd->hdr.icmp); 5479 m_copyback(m, ipoff2, sizeof(h2), 5480 (caddr_t)&h2); 5481 break; 5482 #endif /* INET */ 5483 #ifdef INET6 5484 case AF_INET6: 5485 m_copyback(m, off, 5486 sizeof(struct icmp6_hdr), 5487 (caddr_t)pd->hdr.icmp6); 5488 m_copyback(m, ipoff2, sizeof(h2_6), 5489 (caddr_t)&h2_6); 5490 break; 5491 #endif /* INET6 */ 5492 } 5493 m->m_flags &= ~M_HASH; 5494 m_copyback(m, off2, 8, (caddr_t)&th); 5495 } 5496 break; 5497 } 5498 case IPPROTO_UDP: { 5499 struct udphdr uh; 5500 5501 if (!pf_pull_hdr(m, off2, &uh, sizeof(uh), 5502 NULL, reason, pd2.af)) { 5503 DPFPRINTF(PF_DEBUG_MISC, 5504 ("pf: ICMP error message too short " 5505 "(udp)\n")); 5506 return (PF_DROP); 5507 } 5508 5509 key.af = pd2.af; 5510 key.proto = IPPROTO_UDP; 5511 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 5512 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 5513 key.port[pd2.sidx] = uh.uh_sport; 5514 key.port[pd2.didx] = uh.uh_dport; 5515 5516 STATE_LOOKUP(kif, &key, direction, *state, m); 5517 lockmgr(&(*state)->lk, LK_EXCLUSIVE); 5518 5519 /* translate source/destination address, if necessary */ 5520 if ((*state)->key[PF_SK_WIRE] != 5521 (*state)->key[PF_SK_STACK]) { 5522 struct pf_state_key *nk = 5523 (*state)->key[pd->didx]; 5524 5525 if (PF_ANEQ(pd2.src, 5526 &nk->addr[pd2.sidx], pd2.af) || 5527 nk->port[pd2.sidx] != uh.uh_sport) 5528 pf_change_icmp(pd2.src, &uh.uh_sport, 5529 daddr, &nk->addr[pd2.sidx], 5530 nk->port[pd2.sidx], &uh.uh_sum, 5531 pd2.ip_sum, icmpsum, 5532 pd->ip_sum, 1, pd2.af); 5533 5534 if (PF_ANEQ(pd2.dst, 5535 &nk->addr[pd2.didx], pd2.af) || 5536 nk->port[pd2.didx] != uh.uh_dport) 5537 pf_change_icmp(pd2.dst, &uh.uh_dport, 5538 NULL, /* XXX Inbound NAT? */ 5539 &nk->addr[pd2.didx], 5540 nk->port[pd2.didx], &uh.uh_sum, 5541 pd2.ip_sum, icmpsum, 5542 pd->ip_sum, 1, pd2.af); 5543 5544 switch (pd2.af) { 5545 #ifdef INET 5546 case AF_INET: 5547 m_copyback(m, off, ICMP_MINLEN, 5548 (caddr_t)pd->hdr.icmp); 5549 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2); 5550 break; 5551 #endif /* INET */ 5552 #ifdef INET6 5553 case AF_INET6: 5554 m_copyback(m, off, 5555 sizeof(struct icmp6_hdr), 5556 (caddr_t)pd->hdr.icmp6); 5557 m_copyback(m, ipoff2, sizeof(h2_6), 5558 (caddr_t)&h2_6); 5559 break; 5560 #endif /* INET6 */ 5561 } 5562 m->m_flags &= ~M_HASH; 5563 m_copyback(m, off2, sizeof(uh), (caddr_t)&uh); 5564 } 5565 break; 5566 } 5567 #ifdef INET 5568 case IPPROTO_ICMP: { 5569 struct icmp iih; 5570 5571 if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN, 5572 NULL, reason, pd2.af)) { 5573 DPFPRINTF(PF_DEBUG_MISC, 5574 ("pf: ICMP error message too short i" 5575 "(icmp)\n")); 5576 return (PF_DROP); 5577 } 5578 5579 key.af = pd2.af; 5580 key.proto = IPPROTO_ICMP; 5581 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 5582 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 5583 key.port[0] = key.port[1] = iih.icmp_id; 5584 5585 STATE_LOOKUP(kif, &key, direction, *state, m); 5586 lockmgr(&(*state)->lk, LK_EXCLUSIVE); 5587 5588 /* translate source/destination address, if necessary */ 5589 if ((*state)->key[PF_SK_WIRE] != 5590 (*state)->key[PF_SK_STACK]) { 5591 struct pf_state_key *nk = 5592 (*state)->key[pd->didx]; 5593 5594 if (PF_ANEQ(pd2.src, 5595 &nk->addr[pd2.sidx], pd2.af) || 5596 nk->port[pd2.sidx] != iih.icmp_id) 5597 pf_change_icmp(pd2.src, &iih.icmp_id, 5598 daddr, &nk->addr[pd2.sidx], 5599 nk->port[pd2.sidx], NULL, 5600 pd2.ip_sum, icmpsum, 5601 pd->ip_sum, 0, AF_INET); 5602 5603 if (PF_ANEQ(pd2.dst, 5604 &nk->addr[pd2.didx], pd2.af) || 5605 nk->port[pd2.didx] != iih.icmp_id) 5606 pf_change_icmp(pd2.dst, &iih.icmp_id, 5607 NULL, /* XXX Inbound NAT? */ 5608 &nk->addr[pd2.didx], 5609 nk->port[pd2.didx], NULL, 5610 pd2.ip_sum, icmpsum, 5611 pd->ip_sum, 0, AF_INET); 5612 5613 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp); 5614 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2); 5615 m_copyback(m, off2, ICMP_MINLEN, (caddr_t)&iih); 5616 m->m_flags &= ~M_HASH; 5617 } 5618 break; 5619 } 5620 #endif /* INET */ 5621 #ifdef INET6 5622 case IPPROTO_ICMPV6: { 5623 struct icmp6_hdr iih; 5624 5625 if (!pf_pull_hdr(m, off2, &iih, 5626 sizeof(struct icmp6_hdr), NULL, reason, pd2.af)) { 5627 DPFPRINTF(PF_DEBUG_MISC, 5628 ("pf: ICMP error message too short " 5629 "(icmp6)\n")); 5630 FAIL (PF_DROP); 5631 } 5632 5633 key.af = pd2.af; 5634 key.proto = IPPROTO_ICMPV6; 5635 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 5636 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 5637 key.port[0] = key.port[1] = iih.icmp6_id; 5638 5639 STATE_LOOKUP(kif, &key, direction, *state, m); 5640 lockmgr(&(*state)->lk, LK_EXCLUSIVE); 5641 5642 /* translate source/destination address, if necessary */ 5643 if ((*state)->key[PF_SK_WIRE] != 5644 (*state)->key[PF_SK_STACK]) { 5645 struct pf_state_key *nk = 5646 (*state)->key[pd->didx]; 5647 5648 if (PF_ANEQ(pd2.src, 5649 &nk->addr[pd2.sidx], pd2.af) || 5650 nk->port[pd2.sidx] != iih.icmp6_id) 5651 pf_change_icmp(pd2.src, &iih.icmp6_id, 5652 daddr, &nk->addr[pd2.sidx], 5653 nk->port[pd2.sidx], NULL, 5654 pd2.ip_sum, icmpsum, 5655 pd->ip_sum, 0, AF_INET6); 5656 5657 if (PF_ANEQ(pd2.dst, 5658 &nk->addr[pd2.didx], pd2.af) || 5659 nk->port[pd2.didx] != iih.icmp6_id) 5660 pf_change_icmp(pd2.dst, &iih.icmp6_id, 5661 NULL, /* XXX Inbound NAT? */ 5662 &nk->addr[pd2.didx], 5663 nk->port[pd2.didx], NULL, 5664 pd2.ip_sum, icmpsum, 5665 pd->ip_sum, 0, AF_INET6); 5666 5667 m_copyback(m, off, sizeof(struct icmp6_hdr), 5668 (caddr_t)pd->hdr.icmp6); 5669 m_copyback(m, ipoff2, sizeof(h2_6), (caddr_t)&h2_6); 5670 m_copyback(m, off2, sizeof(struct icmp6_hdr), 5671 (caddr_t)&iih); 5672 m->m_flags &= ~M_HASH; 5673 } 5674 break; 5675 } 5676 #endif /* INET6 */ 5677 default: { 5678 key.af = pd2.af; 5679 key.proto = pd2.proto; 5680 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 5681 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 5682 key.port[0] = key.port[1] = 0; 5683 5684 STATE_LOOKUP(kif, &key, direction, *state, m); 5685 lockmgr(&(*state)->lk, LK_EXCLUSIVE); 5686 5687 /* translate source/destination address, if necessary */ 5688 if ((*state)->key[PF_SK_WIRE] != 5689 (*state)->key[PF_SK_STACK]) { 5690 struct pf_state_key *nk = 5691 (*state)->key[pd->didx]; 5692 5693 if (PF_ANEQ(pd2.src, 5694 &nk->addr[pd2.sidx], pd2.af)) 5695 pf_change_icmp(pd2.src, NULL, daddr, 5696 &nk->addr[pd2.sidx], 0, NULL, 5697 pd2.ip_sum, icmpsum, 5698 pd->ip_sum, 0, pd2.af); 5699 5700 if (PF_ANEQ(pd2.dst, 5701 &nk->addr[pd2.didx], pd2.af)) 5702 pf_change_icmp(pd2.src, NULL, 5703 NULL, /* XXX Inbound NAT? */ 5704 &nk->addr[pd2.didx], 0, NULL, 5705 pd2.ip_sum, icmpsum, 5706 pd->ip_sum, 0, pd2.af); 5707 5708 switch (pd2.af) { 5709 #ifdef INET 5710 case AF_INET: 5711 m_copyback(m, off, ICMP_MINLEN, 5712 (caddr_t)pd->hdr.icmp); 5713 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2); 5714 m->m_flags &= ~M_HASH; 5715 break; 5716 #endif /* INET */ 5717 #ifdef INET6 5718 case AF_INET6: 5719 m_copyback(m, off, 5720 sizeof(struct icmp6_hdr), 5721 (caddr_t)pd->hdr.icmp6); 5722 m_copyback(m, ipoff2, sizeof(h2_6), 5723 (caddr_t)&h2_6); 5724 m->m_flags &= ~M_HASH; 5725 break; 5726 #endif /* INET6 */ 5727 } 5728 } 5729 break; 5730 } 5731 } 5732 } 5733 5734 pfsync_update_state(*state); 5735 error = PF_PASS; 5736 done: 5737 if (*state) 5738 lockmgr(&(*state)->lk, LK_RELEASE); 5739 return (error); 5740 } 5741 5742 /* 5743 * Test other connection state. Caller must hold the state locked. 5744 */ 5745 int 5746 pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif, 5747 struct mbuf *m, struct pf_pdesc *pd) 5748 { 5749 struct pf_state_peer *src, *dst; 5750 struct pf_state_key_cmp key; 5751 5752 bzero(&key, sizeof(key)); 5753 key.af = pd->af; 5754 key.proto = pd->proto; 5755 if (direction == PF_IN) { 5756 PF_ACPY(&key.addr[0], pd->src, key.af); 5757 PF_ACPY(&key.addr[1], pd->dst, key.af); 5758 key.port[0] = key.port[1] = 0; 5759 } else { 5760 PF_ACPY(&key.addr[1], pd->src, key.af); 5761 PF_ACPY(&key.addr[0], pd->dst, key.af); 5762 key.port[1] = key.port[0] = 0; 5763 } 5764 5765 STATE_LOOKUP(kif, &key, direction, *state, m); 5766 lockmgr(&(*state)->lk, LK_EXCLUSIVE); 5767 5768 if (direction == (*state)->direction) { 5769 src = &(*state)->src; 5770 dst = &(*state)->dst; 5771 } else { 5772 src = &(*state)->dst; 5773 dst = &(*state)->src; 5774 } 5775 5776 /* update states */ 5777 if (src->state < PFOTHERS_SINGLE) 5778 src->state = PFOTHERS_SINGLE; 5779 if (dst->state == PFOTHERS_SINGLE) 5780 dst->state = PFOTHERS_MULTIPLE; 5781 5782 /* update expire time */ 5783 (*state)->expire = time_second; 5784 if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE) 5785 (*state)->timeout = PFTM_OTHER_MULTIPLE; 5786 else 5787 (*state)->timeout = PFTM_OTHER_SINGLE; 5788 5789 /* translate source/destination address, if necessary */ 5790 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { 5791 struct pf_state_key *nk = (*state)->key[pd->didx]; 5792 5793 KKASSERT(nk); 5794 KKASSERT(pd); 5795 KKASSERT(pd->src); 5796 KKASSERT(pd->dst); 5797 switch (pd->af) { 5798 #ifdef INET 5799 case AF_INET: 5800 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET)) 5801 pf_change_a(&pd->src->v4.s_addr, 5802 pd->ip_sum, 5803 nk->addr[pd->sidx].v4.s_addr, 5804 0); 5805 5806 5807 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET)) 5808 pf_change_a(&pd->dst->v4.s_addr, 5809 pd->ip_sum, 5810 nk->addr[pd->didx].v4.s_addr, 5811 0); 5812 5813 break; 5814 #endif /* INET */ 5815 #ifdef INET6 5816 case AF_INET6: 5817 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET)) 5818 PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af); 5819 5820 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET)) 5821 PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af); 5822 #endif /* INET6 */ 5823 } 5824 } 5825 5826 pfsync_update_state(*state); 5827 lockmgr(&(*state)->lk, LK_RELEASE); 5828 return (PF_PASS); 5829 } 5830 5831 /* 5832 * ipoff and off are measured from the start of the mbuf chain. 5833 * h must be at "ipoff" on the mbuf chain. 5834 */ 5835 void * 5836 pf_pull_hdr(struct mbuf *m, int off, void *p, int len, 5837 u_short *actionp, u_short *reasonp, sa_family_t af) 5838 { 5839 switch (af) { 5840 #ifdef INET 5841 case AF_INET: { 5842 struct ip *h = mtod(m, struct ip *); 5843 u_int16_t fragoff = (h->ip_off & IP_OFFMASK) << 3; 5844 5845 if (fragoff) { 5846 if (fragoff >= len) 5847 ACTION_SET(actionp, PF_PASS); 5848 else { 5849 ACTION_SET(actionp, PF_DROP); 5850 REASON_SET(reasonp, PFRES_FRAG); 5851 } 5852 return (NULL); 5853 } 5854 if (m->m_pkthdr.len < off + len || 5855 h->ip_len < off + len) { 5856 ACTION_SET(actionp, PF_DROP); 5857 REASON_SET(reasonp, PFRES_SHORT); 5858 return (NULL); 5859 } 5860 break; 5861 } 5862 #endif /* INET */ 5863 #ifdef INET6 5864 case AF_INET6: { 5865 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 5866 5867 if (m->m_pkthdr.len < off + len || 5868 (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) < 5869 (unsigned)(off + len)) { 5870 ACTION_SET(actionp, PF_DROP); 5871 REASON_SET(reasonp, PFRES_SHORT); 5872 return (NULL); 5873 } 5874 break; 5875 } 5876 #endif /* INET6 */ 5877 } 5878 m_copydata(m, off, len, p); 5879 return (p); 5880 } 5881 5882 int 5883 pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif) 5884 { 5885 struct sockaddr_in *dst; 5886 int ret = 1; 5887 int check_mpath; 5888 #ifdef INET6 5889 struct sockaddr_in6 *dst6; 5890 struct route_in6 ro; 5891 #else 5892 struct route ro; 5893 #endif 5894 struct radix_node *rn; 5895 struct rtentry *rt; 5896 struct ifnet *ifp; 5897 5898 check_mpath = 0; 5899 bzero(&ro, sizeof(ro)); 5900 switch (af) { 5901 case AF_INET: 5902 dst = satosin(&ro.ro_dst); 5903 dst->sin_family = AF_INET; 5904 dst->sin_len = sizeof(*dst); 5905 dst->sin_addr = addr->v4; 5906 break; 5907 #ifdef INET6 5908 case AF_INET6: 5909 /* 5910 * Skip check for addresses with embedded interface scope, 5911 * as they would always match anyway. 5912 */ 5913 if (IN6_IS_SCOPE_EMBED(&addr->v6)) 5914 goto out; 5915 dst6 = (struct sockaddr_in6 *)&ro.ro_dst; 5916 dst6->sin6_family = AF_INET6; 5917 dst6->sin6_len = sizeof(*dst6); 5918 dst6->sin6_addr = addr->v6; 5919 break; 5920 #endif /* INET6 */ 5921 default: 5922 return (0); 5923 } 5924 5925 /* Skip checks for ipsec interfaces */ 5926 if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC) 5927 goto out; 5928 5929 rtalloc_ign((struct route *)&ro, 0); 5930 5931 if (ro.ro_rt != NULL) { 5932 /* No interface given, this is a no-route check */ 5933 if (kif == NULL) 5934 goto out; 5935 5936 if (kif->pfik_ifp == NULL) { 5937 ret = 0; 5938 goto out; 5939 } 5940 5941 /* Perform uRPF check if passed input interface */ 5942 ret = 0; 5943 rn = (struct radix_node *)ro.ro_rt; 5944 do { 5945 rt = (struct rtentry *)rn; 5946 ifp = rt->rt_ifp; 5947 5948 if (kif->pfik_ifp == ifp) 5949 ret = 1; 5950 rn = NULL; 5951 } while (check_mpath == 1 && rn != NULL && ret == 0); 5952 } else 5953 ret = 0; 5954 out: 5955 if (ro.ro_rt != NULL) 5956 RTFREE(ro.ro_rt); 5957 return (ret); 5958 } 5959 5960 int 5961 pf_rtlabel_match(struct pf_addr *addr, sa_family_t af, struct pf_addr_wrap *aw) 5962 { 5963 struct sockaddr_in *dst; 5964 #ifdef INET6 5965 struct sockaddr_in6 *dst6; 5966 struct route_in6 ro; 5967 #else 5968 struct route ro; 5969 #endif 5970 int ret = 0; 5971 5972 ASSERT_LWKT_TOKEN_HELD(&pf_token); 5973 5974 bzero(&ro, sizeof(ro)); 5975 switch (af) { 5976 case AF_INET: 5977 dst = satosin(&ro.ro_dst); 5978 dst->sin_family = AF_INET; 5979 dst->sin_len = sizeof(*dst); 5980 dst->sin_addr = addr->v4; 5981 break; 5982 #ifdef INET6 5983 case AF_INET6: 5984 dst6 = (struct sockaddr_in6 *)&ro.ro_dst; 5985 dst6->sin6_family = AF_INET6; 5986 dst6->sin6_len = sizeof(*dst6); 5987 dst6->sin6_addr = addr->v6; 5988 break; 5989 #endif /* INET6 */ 5990 default: 5991 return (0); 5992 } 5993 5994 rtalloc_ign((struct route *)&ro, (RTF_CLONING | RTF_PRCLONING)); 5995 5996 if (ro.ro_rt != NULL) { 5997 RTFREE(ro.ro_rt); 5998 } 5999 6000 return (ret); 6001 } 6002 6003 #ifdef INET 6004 void 6005 pf_route(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp, 6006 struct pf_state *s, struct pf_pdesc *pd) 6007 { 6008 struct mbuf *m0, *m1; 6009 struct route iproute; 6010 struct route *ro = NULL; 6011 struct sockaddr_in *dst; 6012 struct ip *ip; 6013 struct ifnet *ifp = NULL; 6014 struct pf_addr naddr; 6015 struct pf_src_node *sn = NULL; 6016 int error = 0; 6017 int sw_csum; 6018 6019 ASSERT_LWKT_TOKEN_HELD(&pf_token); 6020 6021 if (m == NULL || *m == NULL || r == NULL || 6022 (dir != PF_IN && dir != PF_OUT) || oifp == NULL) 6023 panic("pf_route: invalid parameters"); 6024 6025 if (((*m)->m_pkthdr.fw_flags & PF_MBUF_ROUTED) == 0) { 6026 (*m)->m_pkthdr.fw_flags |= PF_MBUF_ROUTED; 6027 (*m)->m_pkthdr.pf.routed = 1; 6028 } else { 6029 if ((*m)->m_pkthdr.pf.routed++ > 3) { 6030 m0 = *m; 6031 *m = NULL; 6032 goto bad; 6033 } 6034 } 6035 6036 if (r->rt == PF_DUPTO) { 6037 if ((m0 = m_dup(*m, M_NOWAIT)) == NULL) { 6038 return; 6039 } 6040 } else { 6041 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) { 6042 return; 6043 } 6044 m0 = *m; 6045 } 6046 6047 if (m0->m_len < sizeof(struct ip)) { 6048 DPFPRINTF(PF_DEBUG_URGENT, 6049 ("pf_route: m0->m_len < sizeof(struct ip)\n")); 6050 goto bad; 6051 } 6052 6053 ip = mtod(m0, struct ip *); 6054 6055 ro = &iproute; 6056 bzero((caddr_t)ro, sizeof(*ro)); 6057 dst = satosin(&ro->ro_dst); 6058 dst->sin_family = AF_INET; 6059 dst->sin_len = sizeof(*dst); 6060 dst->sin_addr = ip->ip_dst; 6061 6062 if (r->rt == PF_FASTROUTE) { 6063 rtalloc(ro); 6064 if (ro->ro_rt == 0) { 6065 ipstat.ips_noroute++; 6066 goto bad; 6067 } 6068 6069 ifp = ro->ro_rt->rt_ifp; 6070 ro->ro_rt->rt_use++; 6071 6072 if (ro->ro_rt->rt_flags & RTF_GATEWAY) 6073 dst = satosin(ro->ro_rt->rt_gateway); 6074 } else { 6075 if (TAILQ_EMPTY(&r->rpool.list)) { 6076 DPFPRINTF(PF_DEBUG_URGENT, 6077 ("pf_route: TAILQ_EMPTY(&r->rpool.list)\n")); 6078 goto bad; 6079 } 6080 if (s == NULL) { 6081 pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src, 6082 &naddr, NULL, &sn); 6083 if (!PF_AZERO(&naddr, AF_INET)) 6084 dst->sin_addr.s_addr = naddr.v4.s_addr; 6085 ifp = r->rpool.cur->kif ? 6086 r->rpool.cur->kif->pfik_ifp : NULL; 6087 } else { 6088 if (!PF_AZERO(&s->rt_addr, AF_INET)) 6089 dst->sin_addr.s_addr = 6090 s->rt_addr.v4.s_addr; 6091 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL; 6092 } 6093 } 6094 if (ifp == NULL) 6095 goto bad; 6096 6097 if (oifp != ifp) { 6098 if (pf_test(PF_OUT, ifp, &m0, NULL, NULL) != PF_PASS) { 6099 goto bad; 6100 } else if (m0 == NULL) { 6101 goto done; 6102 } 6103 if (m0->m_len < sizeof(struct ip)) { 6104 DPFPRINTF(PF_DEBUG_URGENT, 6105 ("pf_route: m0->m_len < sizeof(struct ip)\n")); 6106 goto bad; 6107 } 6108 ip = mtod(m0, struct ip *); 6109 } 6110 6111 /* Copied from FreeBSD 5.1-CURRENT ip_output. */ 6112 m0->m_pkthdr.csum_flags |= CSUM_IP; 6113 sw_csum = m0->m_pkthdr.csum_flags & ~ifp->if_hwassist; 6114 if (sw_csum & CSUM_DELAY_DATA) { 6115 in_delayed_cksum(m0); 6116 sw_csum &= ~CSUM_DELAY_DATA; 6117 } 6118 m0->m_pkthdr.csum_flags &= ifp->if_hwassist; 6119 m0->m_pkthdr.csum_iphlen = (ip->ip_hl << 2); 6120 6121 /* 6122 * WARNING! We cannot fragment if the packet was modified from an 6123 * original which expected to be using TSO. In this 6124 * situation we pray that the target interface is 6125 * compatible with the originating interface. 6126 */ 6127 if (ip->ip_len <= ifp->if_mtu || 6128 (m0->m_pkthdr.csum_flags & CSUM_TSO) || 6129 ((ifp->if_hwassist & CSUM_FRAGMENT) && 6130 (ip->ip_off & IP_DF) == 0)) { 6131 ip->ip_len = htons(ip->ip_len); 6132 ip->ip_off = htons(ip->ip_off); 6133 ip->ip_sum = 0; 6134 if (sw_csum & CSUM_DELAY_IP) { 6135 /* From KAME */ 6136 if (ip->ip_v == IPVERSION && 6137 (ip->ip_hl << 2) == sizeof(*ip)) { 6138 ip->ip_sum = in_cksum_hdr(ip); 6139 } else { 6140 ip->ip_sum = in_cksum(m0, ip->ip_hl << 2); 6141 } 6142 } 6143 lwkt_reltoken(&pf_token); 6144 error = ifp->if_output(ifp, m0, sintosa(dst), ro->ro_rt); 6145 lwkt_gettoken(&pf_token); 6146 goto done; 6147 } 6148 6149 /* 6150 * Too large for interface; fragment if possible. 6151 * Must be able to put at least 8 bytes per fragment. 6152 */ 6153 if (ip->ip_off & IP_DF) { 6154 ipstat.ips_cantfrag++; 6155 if (r->rt != PF_DUPTO) { 6156 icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0, 6157 ifp->if_mtu); 6158 goto done; 6159 } else 6160 goto bad; 6161 } 6162 6163 m1 = m0; 6164 error = ip_fragment(ip, &m0, ifp->if_mtu, ifp->if_hwassist, sw_csum); 6165 if (error) { 6166 goto bad; 6167 } 6168 6169 for (m0 = m1; m0; m0 = m1) { 6170 m1 = m0->m_nextpkt; 6171 m0->m_nextpkt = 0; 6172 if (error == 0) { 6173 lwkt_reltoken(&pf_token); 6174 error = (*ifp->if_output)(ifp, m0, sintosa(dst), 6175 NULL); 6176 lwkt_gettoken(&pf_token); 6177 } else 6178 m_freem(m0); 6179 } 6180 6181 if (error == 0) 6182 ipstat.ips_fragmented++; 6183 6184 done: 6185 if (r->rt != PF_DUPTO) 6186 *m = NULL; 6187 if (ro == &iproute && ro->ro_rt) 6188 RTFREE(ro->ro_rt); 6189 return; 6190 6191 bad: 6192 m_freem(m0); 6193 goto done; 6194 } 6195 #endif /* INET */ 6196 6197 #ifdef INET6 6198 void 6199 pf_route6(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp, 6200 struct pf_state *s, struct pf_pdesc *pd) 6201 { 6202 struct mbuf *m0; 6203 struct route_in6 ip6route; 6204 struct route_in6 *ro; 6205 struct sockaddr_in6 *dst; 6206 struct ip6_hdr *ip6; 6207 struct ifnet *ifp = NULL; 6208 struct pf_addr naddr; 6209 struct pf_src_node *sn = NULL; 6210 6211 if (m == NULL || *m == NULL || r == NULL || 6212 (dir != PF_IN && dir != PF_OUT) || oifp == NULL) 6213 panic("pf_route6: invalid parameters"); 6214 6215 if (((*m)->m_pkthdr.fw_flags & PF_MBUF_ROUTED) == 0) { 6216 (*m)->m_pkthdr.fw_flags |= PF_MBUF_ROUTED; 6217 (*m)->m_pkthdr.pf.routed = 1; 6218 } else { 6219 if ((*m)->m_pkthdr.pf.routed++ > 3) { 6220 m0 = *m; 6221 *m = NULL; 6222 goto bad; 6223 } 6224 } 6225 6226 if (r->rt == PF_DUPTO) { 6227 if ((m0 = m_dup(*m, M_NOWAIT)) == NULL) 6228 return; 6229 } else { 6230 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) 6231 return; 6232 m0 = *m; 6233 } 6234 6235 if (m0->m_len < sizeof(struct ip6_hdr)) { 6236 DPFPRINTF(PF_DEBUG_URGENT, 6237 ("pf_route6: m0->m_len < sizeof(struct ip6_hdr)\n")); 6238 goto bad; 6239 } 6240 ip6 = mtod(m0, struct ip6_hdr *); 6241 6242 ro = &ip6route; 6243 bzero((caddr_t)ro, sizeof(*ro)); 6244 dst = (struct sockaddr_in6 *)&ro->ro_dst; 6245 dst->sin6_family = AF_INET6; 6246 dst->sin6_len = sizeof(*dst); 6247 dst->sin6_addr = ip6->ip6_dst; 6248 6249 /* 6250 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags, 6251 * so make sure pf.flags is clear. 6252 * 6253 * Cheat. XXX why only in the v6 case??? 6254 */ 6255 if (r->rt == PF_FASTROUTE) { 6256 m0->m_pkthdr.fw_flags |= PF_MBUF_TAGGED; 6257 m0->m_pkthdr.pf.flags = 0; 6258 /* XXX Re-Check when Upgrading to > 4.4 */ 6259 m0->m_pkthdr.pf.statekey = NULL; 6260 ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL); 6261 return; 6262 } 6263 6264 if (TAILQ_EMPTY(&r->rpool.list)) { 6265 DPFPRINTF(PF_DEBUG_URGENT, 6266 ("pf_route6: TAILQ_EMPTY(&r->rpool.list)\n")); 6267 goto bad; 6268 } 6269 if (s == NULL) { 6270 pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src, 6271 &naddr, NULL, &sn); 6272 if (!PF_AZERO(&naddr, AF_INET6)) 6273 PF_ACPY((struct pf_addr *)&dst->sin6_addr, 6274 &naddr, AF_INET6); 6275 ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL; 6276 } else { 6277 if (!PF_AZERO(&s->rt_addr, AF_INET6)) 6278 PF_ACPY((struct pf_addr *)&dst->sin6_addr, 6279 &s->rt_addr, AF_INET6); 6280 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL; 6281 } 6282 if (ifp == NULL) 6283 goto bad; 6284 6285 if (oifp != ifp) { 6286 if (pf_test6(PF_OUT, ifp, &m0, NULL, NULL) != PF_PASS) { 6287 goto bad; 6288 } else if (m0 == NULL) { 6289 goto done; 6290 } 6291 if (m0->m_len < sizeof(struct ip6_hdr)) { 6292 DPFPRINTF(PF_DEBUG_URGENT, 6293 ("pf_route6: m0->m_len < sizeof(struct ip6_hdr)\n")); 6294 goto bad; 6295 } 6296 ip6 = mtod(m0, struct ip6_hdr *); 6297 } 6298 6299 /* 6300 * If the packet is too large for the outgoing interface, 6301 * send back an icmp6 error. 6302 */ 6303 if (IN6_IS_SCOPE_EMBED(&dst->sin6_addr)) 6304 dst->sin6_addr.s6_addr16[1] = htons(ifp->if_index); 6305 if ((u_long)m0->m_pkthdr.len <= ifp->if_mtu) { 6306 nd6_output(ifp, ifp, m0, dst, NULL); 6307 } else { 6308 in6_ifstat_inc(ifp, ifs6_in_toobig); 6309 if (r->rt != PF_DUPTO) 6310 icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu); 6311 else 6312 goto bad; 6313 } 6314 6315 done: 6316 if (r->rt != PF_DUPTO) 6317 *m = NULL; 6318 return; 6319 6320 bad: 6321 m_freem(m0); 6322 goto done; 6323 } 6324 #endif /* INET6 */ 6325 6326 6327 /* 6328 * check protocol (tcp/udp/icmp/icmp6) checksum and set mbuf flag 6329 * off is the offset where the protocol header starts 6330 * len is the total length of protocol header plus payload 6331 * returns 0 when the checksum is valid, otherwise returns 1. 6332 */ 6333 /* 6334 * XXX 6335 * FreeBSD supports cksum offload for the following drivers. 6336 * em(4), gx(4), lge(4), nge(4), ti(4), xl(4) 6337 * If we can make full use of it we would outperform ipfw/ipfilter in 6338 * very heavy traffic. 6339 * I have not tested 'cause I don't have NICs that supports cksum offload. 6340 * (There might be problems. Typical phenomena would be 6341 * 1. No route message for UDP packet. 6342 * 2. No connection acceptance from external hosts regardless of rule set.) 6343 */ 6344 int 6345 pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p, 6346 sa_family_t af) 6347 { 6348 u_int16_t sum = 0; 6349 int hw_assist = 0; 6350 struct ip *ip; 6351 6352 if (off < sizeof(struct ip) || len < sizeof(struct udphdr)) 6353 return (1); 6354 if (m->m_pkthdr.len < off + len) 6355 return (1); 6356 6357 switch (p) { 6358 case IPPROTO_TCP: 6359 case IPPROTO_UDP: 6360 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 6361 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) { 6362 sum = m->m_pkthdr.csum_data; 6363 } else { 6364 ip = mtod(m, struct ip *); 6365 sum = in_pseudo(ip->ip_src.s_addr, 6366 ip->ip_dst.s_addr, htonl((u_short)len + 6367 m->m_pkthdr.csum_data + p)); 6368 } 6369 sum ^= 0xffff; 6370 ++hw_assist; 6371 } 6372 break; 6373 case IPPROTO_ICMP: 6374 #ifdef INET6 6375 case IPPROTO_ICMPV6: 6376 #endif /* INET6 */ 6377 break; 6378 default: 6379 return (1); 6380 } 6381 6382 if (!hw_assist) { 6383 switch (af) { 6384 case AF_INET: 6385 if (p == IPPROTO_ICMP) { 6386 if (m->m_len < off) 6387 return (1); 6388 m->m_data += off; 6389 m->m_len -= off; 6390 sum = in_cksum(m, len); 6391 m->m_data -= off; 6392 m->m_len += off; 6393 } else { 6394 if (m->m_len < sizeof(struct ip)) 6395 return (1); 6396 sum = in_cksum_range(m, p, off, len); 6397 if (sum == 0) { 6398 m->m_pkthdr.csum_flags |= 6399 (CSUM_DATA_VALID | 6400 CSUM_PSEUDO_HDR); 6401 m->m_pkthdr.csum_data = 0xffff; 6402 } 6403 } 6404 break; 6405 #ifdef INET6 6406 case AF_INET6: 6407 if (m->m_len < sizeof(struct ip6_hdr)) 6408 return (1); 6409 sum = in6_cksum(m, p, off, len); 6410 /* 6411 * XXX 6412 * IPv6 H/W cksum off-load not supported yet! 6413 * 6414 * if (sum == 0) { 6415 * m->m_pkthdr.csum_flags |= 6416 * (CSUM_DATA_VALID|CSUM_PSEUDO_HDR); 6417 * m->m_pkthdr.csum_data = 0xffff; 6418 *} 6419 */ 6420 break; 6421 #endif /* INET6 */ 6422 default: 6423 return (1); 6424 } 6425 } 6426 if (sum) { 6427 switch (p) { 6428 case IPPROTO_TCP: 6429 tcpstat.tcps_rcvbadsum++; 6430 break; 6431 case IPPROTO_UDP: 6432 udp_stat.udps_badsum++; 6433 break; 6434 case IPPROTO_ICMP: 6435 icmpstat.icps_checksum++; 6436 break; 6437 #ifdef INET6 6438 case IPPROTO_ICMPV6: 6439 icmp6stat.icp6s_checksum++; 6440 break; 6441 #endif /* INET6 */ 6442 } 6443 return (1); 6444 } 6445 return (0); 6446 } 6447 6448 struct pf_divert * 6449 pf_find_divert(struct mbuf *m) 6450 { 6451 struct m_tag *mtag; 6452 6453 if ((mtag = m_tag_find(m, PACKET_TAG_PF_DIVERT, NULL)) == NULL) 6454 return (NULL); 6455 6456 return ((struct pf_divert *)(mtag + 1)); 6457 } 6458 6459 struct pf_divert * 6460 pf_get_divert(struct mbuf *m) 6461 { 6462 struct m_tag *mtag; 6463 6464 if ((mtag = m_tag_find(m, PACKET_TAG_PF_DIVERT, NULL)) == NULL) { 6465 mtag = m_tag_get(PACKET_TAG_PF_DIVERT, sizeof(struct pf_divert), 6466 M_NOWAIT); 6467 if (mtag == NULL) 6468 return (NULL); 6469 bzero(mtag + 1, sizeof(struct pf_divert)); 6470 m_tag_prepend(m, mtag); 6471 } 6472 6473 return ((struct pf_divert *)(mtag + 1)); 6474 } 6475 6476 #ifdef INET 6477 6478 /* 6479 * WARNING: pf_token held shared on entry, THIS IS CPU LOCALIZED CODE 6480 */ 6481 int 6482 pf_test(int dir, struct ifnet *ifp, struct mbuf **m0, 6483 struct ether_header *eh, struct inpcb *inp) 6484 { 6485 struct pfi_kif *kif; 6486 u_short action, reason = 0, log = 0; 6487 struct mbuf *m = *m0; 6488 struct ip *h = NULL; 6489 struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr; 6490 struct pf_state *s = NULL; 6491 struct pf_ruleset *ruleset = NULL; 6492 struct pf_pdesc pd; 6493 int off, dirndx; 6494 #ifdef ALTQ 6495 int pqid = 0; 6496 #endif 6497 6498 if (m->m_pkthdr.fw_flags & IPFW_MBUF_CONTINUE) { 6499 /* Skip us; continue in ipfw. */ 6500 return (PF_PASS); 6501 } 6502 6503 if (!pf_status.running) 6504 return (PF_PASS); 6505 6506 memset(&pd, 0, sizeof(pd)); 6507 #ifdef foo 6508 if (ifp->if_type == IFT_CARP && ifp->if_carpdev) 6509 kif = (struct pfi_kif *)ifp->if_carpdev->if_pf_kif; 6510 else 6511 #endif 6512 kif = (struct pfi_kif *)ifp->if_pf_kif; 6513 6514 if (kif == NULL) { 6515 DPFPRINTF(PF_DEBUG_URGENT, 6516 ("pf_test: kif == NULL, if_xname %s\n", ifp->if_xname)); 6517 return (PF_DROP); 6518 } 6519 if (kif->pfik_flags & PFI_IFLAG_SKIP) 6520 return (PF_PASS); 6521 6522 #ifdef DIAGNOSTIC 6523 if ((m->m_flags & M_PKTHDR) == 0) 6524 panic("non-M_PKTHDR is passed to pf_test"); 6525 #endif /* DIAGNOSTIC */ 6526 6527 if (m->m_pkthdr.len < (int)sizeof(*h)) { 6528 action = PF_DROP; 6529 REASON_SET(&reason, PFRES_SHORT); 6530 log = 1; 6531 goto done; 6532 } 6533 6534 /* 6535 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags, 6536 * so make sure pf.flags is clear. 6537 */ 6538 if (m->m_pkthdr.fw_flags & PF_MBUF_TAGGED) 6539 return (PF_PASS); 6540 m->m_pkthdr.pf.flags = 0; 6541 /* Re-Check when updating to > 4.4 */ 6542 m->m_pkthdr.pf.statekey = NULL; 6543 6544 /* We do IP header normalization and packet reassembly here */ 6545 if (pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) { 6546 action = PF_DROP; 6547 goto done; 6548 } 6549 m = *m0; /* pf_normalize messes with m0 */ 6550 h = mtod(m, struct ip *); 6551 6552 off = h->ip_hl << 2; 6553 if (off < (int)sizeof(*h)) { 6554 action = PF_DROP; 6555 REASON_SET(&reason, PFRES_SHORT); 6556 log = 1; 6557 goto done; 6558 } 6559 6560 pd.src = (struct pf_addr *)&h->ip_src; 6561 pd.dst = (struct pf_addr *)&h->ip_dst; 6562 pd.sport = pd.dport = NULL; 6563 pd.ip_sum = &h->ip_sum; 6564 pd.proto_sum = NULL; 6565 pd.proto = h->ip_p; 6566 pd.dir = dir; 6567 pd.sidx = (dir == PF_IN) ? 0 : 1; 6568 pd.didx = (dir == PF_IN) ? 1 : 0; 6569 pd.af = AF_INET; 6570 pd.tos = h->ip_tos; 6571 pd.tot_len = h->ip_len; 6572 pd.eh = eh; 6573 6574 /* handle fragments that didn't get reassembled by normalization */ 6575 if (h->ip_off & (IP_MF | IP_OFFMASK)) { 6576 action = pf_test_fragment(&r, dir, kif, m, h, 6577 &pd, &a, &ruleset); 6578 goto done; 6579 } 6580 6581 switch (h->ip_p) { 6582 6583 case IPPROTO_TCP: { 6584 struct tcphdr th; 6585 6586 pd.hdr.tcp = &th; 6587 if (!pf_pull_hdr(m, off, &th, sizeof(th), 6588 &action, &reason, AF_INET)) { 6589 log = action != PF_PASS; 6590 goto done; 6591 } 6592 pd.p_len = pd.tot_len - off - (th.th_off << 2); 6593 #ifdef ALTQ 6594 if ((th.th_flags & TH_ACK) && pd.p_len == 0) 6595 pqid = 1; 6596 #endif 6597 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd); 6598 if (action == PF_DROP) 6599 goto done; 6600 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd, 6601 &reason); 6602 if (action == PF_PASS) { 6603 r = s->rule.ptr; 6604 a = s->anchor.ptr; 6605 log = s->log; 6606 } else if (s == NULL) { 6607 action = pf_test_rule(&r, &s, dir, kif, 6608 m, off, h, &pd, &a, 6609 &ruleset, NULL, inp); 6610 } 6611 break; 6612 } 6613 6614 case IPPROTO_UDP: { 6615 struct udphdr uh; 6616 6617 pd.hdr.udp = &uh; 6618 if (!pf_pull_hdr(m, off, &uh, sizeof(uh), 6619 &action, &reason, AF_INET)) { 6620 log = action != PF_PASS; 6621 goto done; 6622 } 6623 if (uh.uh_dport == 0 || 6624 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off || 6625 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) { 6626 action = PF_DROP; 6627 REASON_SET(&reason, PFRES_SHORT); 6628 goto done; 6629 } 6630 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd); 6631 if (action == PF_PASS) { 6632 r = s->rule.ptr; 6633 a = s->anchor.ptr; 6634 log = s->log; 6635 } else if (s == NULL) { 6636 action = pf_test_rule(&r, &s, dir, kif, 6637 m, off, h, &pd, &a, 6638 &ruleset, NULL, inp); 6639 } 6640 break; 6641 } 6642 6643 case IPPROTO_ICMP: { 6644 struct icmp ih; 6645 6646 pd.hdr.icmp = &ih; 6647 if (!pf_pull_hdr(m, off, &ih, ICMP_MINLEN, 6648 &action, &reason, AF_INET)) { 6649 log = action != PF_PASS; 6650 goto done; 6651 } 6652 action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd, 6653 &reason); 6654 if (action == PF_PASS) { 6655 r = s->rule.ptr; 6656 a = s->anchor.ptr; 6657 log = s->log; 6658 } else if (s == NULL) { 6659 action = pf_test_rule(&r, &s, dir, kif, 6660 m, off, h, &pd, &a, 6661 &ruleset, NULL, inp); 6662 } 6663 break; 6664 } 6665 6666 default: 6667 action = pf_test_state_other(&s, dir, kif, m, &pd); 6668 if (action == PF_PASS) { 6669 r = s->rule.ptr; 6670 a = s->anchor.ptr; 6671 log = s->log; 6672 } else if (s == NULL) { 6673 action = pf_test_rule(&r, &s, dir, kif, m, off, h, 6674 &pd, &a, &ruleset, NULL, inp); 6675 } 6676 break; 6677 } 6678 6679 done: 6680 if (action == PF_PASS && h->ip_hl > 5 && 6681 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) { 6682 action = PF_DROP; 6683 REASON_SET(&reason, PFRES_IPOPTIONS); 6684 log = 1; 6685 DPFPRINTF(PF_DEBUG_MISC, 6686 ("pf: dropping packet with ip options\n")); 6687 } 6688 6689 if ((s && s->tag) || r->rtableid) 6690 pf_tag_packet(m, s ? s->tag : 0, r->rtableid); 6691 6692 #if 0 6693 if (dir == PF_IN && s && s->key[PF_SK_STACK]) 6694 m->m_pkthdr.pf.statekey = s->key[PF_SK_STACK]; 6695 #endif 6696 6697 #ifdef ALTQ 6698 /* 6699 * Generate a hash code and qid request for ALTQ. A qid of 0 6700 * is allowed and will cause altq to select the default queue. 6701 */ 6702 if (action == PF_PASS) { 6703 m->m_pkthdr.fw_flags |= PF_MBUF_STRUCTURE; 6704 if (pqid || (pd.tos & IPTOS_LOWDELAY)) 6705 m->m_pkthdr.pf.qid = r->pqid; 6706 else 6707 m->m_pkthdr.pf.qid = r->qid; 6708 m->m_pkthdr.pf.ecn_af = AF_INET; 6709 m->m_pkthdr.pf.hdr = h; 6710 /* add connection hash for fairq */ 6711 if (s) { 6712 /* for fairq */ 6713 m->m_pkthdr.pf.state_hash = s->hash; 6714 m->m_pkthdr.pf.flags |= PF_TAG_STATE_HASHED; 6715 } 6716 } 6717 #endif /* ALTQ */ 6718 6719 /* 6720 * connections redirected to loopback should not match sockets 6721 * bound specifically to loopback due to security implications, 6722 * see tcp_input() and in_pcblookup_listen(). 6723 */ 6724 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP || 6725 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL && 6726 (s->nat_rule.ptr->action == PF_RDR || 6727 s->nat_rule.ptr->action == PF_BINAT) && 6728 (ntohl(pd.dst->v4.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) 6729 m->m_pkthdr.pf.flags |= PF_TAG_TRANSLATE_LOCALHOST; 6730 6731 if (dir == PF_IN && action == PF_PASS && r->divert.port) { 6732 struct pf_divert *divert; 6733 6734 if ((divert = pf_get_divert(m))) { 6735 m->m_pkthdr.pf.flags |= PF_TAG_DIVERTED; 6736 divert->port = r->divert.port; 6737 divert->addr.ipv4 = r->divert.addr.v4; 6738 } 6739 } 6740 6741 if (log) { 6742 struct pf_rule *lr; 6743 6744 if (s != NULL && s->nat_rule.ptr != NULL && 6745 s->nat_rule.ptr->log & PF_LOG_ALL) 6746 lr = s->nat_rule.ptr; 6747 else 6748 lr = r; 6749 PFLOG_PACKET(kif, h, m, AF_INET, dir, reason, lr, a, ruleset, 6750 &pd); 6751 } 6752 6753 kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS] += pd.tot_len; 6754 kif->pfik_packets[0][dir == PF_OUT][action != PF_PASS]++; 6755 6756 if (action == PF_PASS || r->action == PF_DROP) { 6757 dirndx = (dir == PF_OUT); 6758 r->packets[dirndx]++; 6759 r->bytes[dirndx] += pd.tot_len; 6760 if (a != NULL) { 6761 a->packets[dirndx]++; 6762 a->bytes[dirndx] += pd.tot_len; 6763 } 6764 if (s != NULL) { 6765 if (s->nat_rule.ptr != NULL) { 6766 s->nat_rule.ptr->packets[dirndx]++; 6767 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len; 6768 } 6769 if (s->src_node != NULL) { 6770 s->src_node->packets[dirndx]++; 6771 s->src_node->bytes[dirndx] += pd.tot_len; 6772 } 6773 if (s->nat_src_node != NULL) { 6774 s->nat_src_node->packets[dirndx]++; 6775 s->nat_src_node->bytes[dirndx] += pd.tot_len; 6776 } 6777 dirndx = (dir == s->direction) ? 0 : 1; 6778 s->packets[dirndx]++; 6779 s->bytes[dirndx] += pd.tot_len; 6780 } 6781 tr = r; 6782 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule; 6783 if (nr != NULL && r == &pf_default_rule) 6784 tr = nr; 6785 if (tr->src.addr.type == PF_ADDR_TABLE) 6786 pfr_update_stats(tr->src.addr.p.tbl, 6787 (s == NULL) ? pd.src : 6788 &s->key[(s->direction == PF_IN)]-> 6789 addr[(s->direction == PF_OUT)], 6790 pd.af, pd.tot_len, dir == PF_OUT, 6791 r->action == PF_PASS, tr->src.neg); 6792 if (tr->dst.addr.type == PF_ADDR_TABLE) 6793 pfr_update_stats(tr->dst.addr.p.tbl, 6794 (s == NULL) ? pd.dst : 6795 &s->key[(s->direction == PF_IN)]-> 6796 addr[(s->direction == PF_IN)], 6797 pd.af, pd.tot_len, dir == PF_OUT, 6798 r->action == PF_PASS, tr->dst.neg); 6799 } 6800 6801 6802 if (action == PF_SYNPROXY_DROP) { 6803 m_freem(*m0); 6804 *m0 = NULL; 6805 action = PF_PASS; 6806 } else if (r->rt) { 6807 /* pf_route can free the mbuf causing *m0 to become NULL */ 6808 pf_route(m0, r, dir, kif->pfik_ifp, s, &pd); 6809 } 6810 6811 return (action); 6812 } 6813 #endif /* INET */ 6814 6815 #ifdef INET6 6816 6817 /* 6818 * WARNING: pf_token held shared on entry, THIS IS CPU LOCALIZED CODE 6819 */ 6820 int 6821 pf_test6(int dir, struct ifnet *ifp, struct mbuf **m0, 6822 struct ether_header *eh, struct inpcb *inp) 6823 { 6824 struct pfi_kif *kif; 6825 u_short action, reason = 0, log = 0; 6826 struct mbuf *m = *m0, *n = NULL; 6827 struct ip6_hdr *h = NULL; 6828 struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr; 6829 struct pf_state *s = NULL; 6830 struct pf_ruleset *ruleset = NULL; 6831 struct pf_pdesc pd; 6832 int off, terminal = 0, dirndx, rh_cnt = 0; 6833 6834 if (!pf_status.running) 6835 return (PF_PASS); 6836 6837 memset(&pd, 0, sizeof(pd)); 6838 #ifdef foo 6839 if (ifp->if_type == IFT_CARP && ifp->if_carpdev) 6840 kif = (struct pfi_kif *)ifp->if_carpdev->if_pf_kif; 6841 else 6842 #endif 6843 kif = (struct pfi_kif *)ifp->if_pf_kif; 6844 6845 if (kif == NULL) { 6846 DPFPRINTF(PF_DEBUG_URGENT, 6847 ("pf_test6: kif == NULL, if_xname %s\n", ifp->if_xname)); 6848 return (PF_DROP); 6849 } 6850 if (kif->pfik_flags & PFI_IFLAG_SKIP) 6851 return (PF_PASS); 6852 6853 #ifdef DIAGNOSTIC 6854 if ((m->m_flags & M_PKTHDR) == 0) 6855 panic("non-M_PKTHDR is passed to pf_test6"); 6856 #endif /* DIAGNOSTIC */ 6857 6858 if (m->m_pkthdr.len < (int)sizeof(*h)) { 6859 action = PF_DROP; 6860 REASON_SET(&reason, PFRES_SHORT); 6861 log = 1; 6862 goto done; 6863 } 6864 6865 /* 6866 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags, 6867 * so make sure pf.flags is clear. 6868 */ 6869 if (m->m_pkthdr.fw_flags & PF_MBUF_TAGGED) 6870 return (PF_PASS); 6871 m->m_pkthdr.pf.flags = 0; 6872 /* Re-Check when updating to > 4.4 */ 6873 m->m_pkthdr.pf.statekey = NULL; 6874 6875 /* We do IP header normalization and packet reassembly here */ 6876 if (pf_normalize_ip6(m0, dir, kif, &reason, &pd) != PF_PASS) { 6877 action = PF_DROP; 6878 goto done; 6879 } 6880 m = *m0; /* pf_normalize messes with m0 */ 6881 h = mtod(m, struct ip6_hdr *); 6882 6883 #if 1 6884 /* 6885 * we do not support jumbogram yet. if we keep going, zero ip6_plen 6886 * will do something bad, so drop the packet for now. 6887 */ 6888 if (htons(h->ip6_plen) == 0) { 6889 action = PF_DROP; 6890 REASON_SET(&reason, PFRES_NORM); /*XXX*/ 6891 goto done; 6892 } 6893 #endif 6894 6895 pd.src = (struct pf_addr *)&h->ip6_src; 6896 pd.dst = (struct pf_addr *)&h->ip6_dst; 6897 pd.sport = pd.dport = NULL; 6898 pd.ip_sum = NULL; 6899 pd.proto_sum = NULL; 6900 pd.dir = dir; 6901 pd.sidx = (dir == PF_IN) ? 0 : 1; 6902 pd.didx = (dir == PF_IN) ? 1 : 0; 6903 pd.af = AF_INET6; 6904 pd.tos = 0; 6905 pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr); 6906 pd.eh = eh; 6907 6908 off = ((caddr_t)h - m->m_data) + sizeof(struct ip6_hdr); 6909 pd.proto = h->ip6_nxt; 6910 do { 6911 switch (pd.proto) { 6912 case IPPROTO_FRAGMENT: 6913 action = pf_test_fragment(&r, dir, kif, m, h, 6914 &pd, &a, &ruleset); 6915 if (action == PF_DROP) 6916 REASON_SET(&reason, PFRES_FRAG); 6917 goto done; 6918 case IPPROTO_ROUTING: { 6919 struct ip6_rthdr rthdr; 6920 6921 if (rh_cnt++) { 6922 DPFPRINTF(PF_DEBUG_MISC, 6923 ("pf: IPv6 more than one rthdr\n")); 6924 action = PF_DROP; 6925 REASON_SET(&reason, PFRES_IPOPTIONS); 6926 log = 1; 6927 goto done; 6928 } 6929 if (!pf_pull_hdr(m, off, &rthdr, sizeof(rthdr), NULL, 6930 &reason, pd.af)) { 6931 DPFPRINTF(PF_DEBUG_MISC, 6932 ("pf: IPv6 short rthdr\n")); 6933 action = PF_DROP; 6934 REASON_SET(&reason, PFRES_SHORT); 6935 log = 1; 6936 goto done; 6937 } 6938 if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) { 6939 DPFPRINTF(PF_DEBUG_MISC, 6940 ("pf: IPv6 rthdr0\n")); 6941 action = PF_DROP; 6942 REASON_SET(&reason, PFRES_IPOPTIONS); 6943 log = 1; 6944 goto done; 6945 } 6946 /* FALLTHROUGH */ 6947 } 6948 case IPPROTO_AH: 6949 case IPPROTO_HOPOPTS: 6950 case IPPROTO_DSTOPTS: { 6951 /* get next header and header length */ 6952 struct ip6_ext opt6; 6953 6954 if (!pf_pull_hdr(m, off, &opt6, sizeof(opt6), 6955 NULL, &reason, pd.af)) { 6956 DPFPRINTF(PF_DEBUG_MISC, 6957 ("pf: IPv6 short opt\n")); 6958 action = PF_DROP; 6959 log = 1; 6960 goto done; 6961 } 6962 if (pd.proto == IPPROTO_AH) 6963 off += (opt6.ip6e_len + 2) * 4; 6964 else 6965 off += (opt6.ip6e_len + 1) * 8; 6966 pd.proto = opt6.ip6e_nxt; 6967 /* goto the next header */ 6968 break; 6969 } 6970 default: 6971 terminal++; 6972 break; 6973 } 6974 } while (!terminal); 6975 6976 /* if there's no routing header, use unmodified mbuf for checksumming */ 6977 if (!n) 6978 n = m; 6979 6980 switch (pd.proto) { 6981 6982 case IPPROTO_TCP: { 6983 struct tcphdr th; 6984 6985 pd.hdr.tcp = &th; 6986 if (!pf_pull_hdr(m, off, &th, sizeof(th), 6987 &action, &reason, AF_INET6)) { 6988 log = action != PF_PASS; 6989 goto done; 6990 } 6991 pd.p_len = pd.tot_len - off - (th.th_off << 2); 6992 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd); 6993 if (action == PF_DROP) 6994 goto done; 6995 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd, 6996 &reason); 6997 if (action == PF_PASS) { 6998 r = s->rule.ptr; 6999 a = s->anchor.ptr; 7000 log = s->log; 7001 } else if (s == NULL) { 7002 action = pf_test_rule(&r, &s, dir, kif, 7003 m, off, h, &pd, &a, 7004 &ruleset, NULL, inp); 7005 } 7006 break; 7007 } 7008 7009 case IPPROTO_UDP: { 7010 struct udphdr uh; 7011 7012 pd.hdr.udp = &uh; 7013 if (!pf_pull_hdr(m, off, &uh, sizeof(uh), 7014 &action, &reason, AF_INET6)) { 7015 log = action != PF_PASS; 7016 goto done; 7017 } 7018 if (uh.uh_dport == 0 || 7019 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off || 7020 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) { 7021 action = PF_DROP; 7022 REASON_SET(&reason, PFRES_SHORT); 7023 goto done; 7024 } 7025 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd); 7026 if (action == PF_PASS) { 7027 r = s->rule.ptr; 7028 a = s->anchor.ptr; 7029 log = s->log; 7030 } else if (s == NULL) { 7031 action = pf_test_rule(&r, &s, dir, kif, 7032 m, off, h, &pd, &a, 7033 &ruleset, NULL, inp); 7034 } 7035 break; 7036 } 7037 7038 case IPPROTO_ICMPV6: { 7039 struct icmp6_hdr ih; 7040 7041 pd.hdr.icmp6 = &ih; 7042 if (!pf_pull_hdr(m, off, &ih, sizeof(ih), 7043 &action, &reason, AF_INET6)) { 7044 log = action != PF_PASS; 7045 goto done; 7046 } 7047 action = pf_test_state_icmp(&s, dir, kif, 7048 m, off, h, &pd, &reason); 7049 if (action == PF_PASS) { 7050 r = s->rule.ptr; 7051 a = s->anchor.ptr; 7052 log = s->log; 7053 } else if (s == NULL) { 7054 action = pf_test_rule(&r, &s, dir, kif, 7055 m, off, h, &pd, &a, 7056 &ruleset, NULL, inp); 7057 } 7058 break; 7059 } 7060 7061 default: 7062 action = pf_test_state_other(&s, dir, kif, m, &pd); 7063 if (action == PF_PASS) { 7064 r = s->rule.ptr; 7065 a = s->anchor.ptr; 7066 log = s->log; 7067 } else if (s == NULL) { 7068 action = pf_test_rule(&r, &s, dir, kif, m, off, h, 7069 &pd, &a, &ruleset, NULL, inp); 7070 } 7071 break; 7072 } 7073 7074 done: 7075 if (n != m) { 7076 m_freem(n); 7077 n = NULL; 7078 } 7079 7080 /* handle dangerous IPv6 extension headers. */ 7081 if (action == PF_PASS && rh_cnt && 7082 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) { 7083 action = PF_DROP; 7084 REASON_SET(&reason, PFRES_IPOPTIONS); 7085 log = 1; 7086 DPFPRINTF(PF_DEBUG_MISC, 7087 ("pf: dropping packet with dangerous v6 headers\n")); 7088 } 7089 7090 if ((s && s->tag) || r->rtableid) 7091 pf_tag_packet(m, s ? s->tag : 0, r->rtableid); 7092 7093 #if 0 7094 if (dir == PF_IN && s && s->key[PF_SK_STACK]) 7095 m->m_pkthdr.pf.statekey = s->key[PF_SK_STACK]; 7096 #endif 7097 7098 #ifdef ALTQ 7099 /* 7100 * Generate a hash code and qid request for ALTQ. A qid of 0 7101 * is allowed and will cause altq to select the default queue. 7102 */ 7103 if (action == PF_PASS) { 7104 m->m_pkthdr.fw_flags |= PF_MBUF_STRUCTURE; 7105 if (pd.tos & IPTOS_LOWDELAY) 7106 m->m_pkthdr.pf.qid = r->pqid; 7107 else 7108 m->m_pkthdr.pf.qid = r->qid; 7109 m->m_pkthdr.pf.ecn_af = AF_INET6; 7110 m->m_pkthdr.pf.hdr = h; 7111 if (s) { 7112 /* for fairq */ 7113 m->m_pkthdr.pf.state_hash = s->hash; 7114 m->m_pkthdr.pf.flags |= PF_TAG_STATE_HASHED; 7115 } 7116 } 7117 #endif /* ALTQ */ 7118 7119 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP || 7120 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL && 7121 (s->nat_rule.ptr->action == PF_RDR || 7122 s->nat_rule.ptr->action == PF_BINAT) && 7123 IN6_IS_ADDR_LOOPBACK(&pd.dst->v6)) 7124 m->m_pkthdr.pf.flags |= PF_TAG_TRANSLATE_LOCALHOST; 7125 7126 if (dir == PF_IN && action == PF_PASS && r->divert.port) { 7127 struct pf_divert *divert; 7128 7129 if ((divert = pf_get_divert(m))) { 7130 m->m_pkthdr.pf.flags |= PF_TAG_DIVERTED; 7131 divert->port = r->divert.port; 7132 divert->addr.ipv6 = r->divert.addr.v6; 7133 } 7134 } 7135 7136 if (log) { 7137 struct pf_rule *lr; 7138 7139 if (s != NULL && s->nat_rule.ptr != NULL && 7140 s->nat_rule.ptr->log & PF_LOG_ALL) 7141 lr = s->nat_rule.ptr; 7142 else 7143 lr = r; 7144 PFLOG_PACKET(kif, h, m, AF_INET6, dir, reason, lr, a, ruleset, 7145 &pd); 7146 } 7147 7148 kif->pfik_bytes[1][dir == PF_OUT][action != PF_PASS] += pd.tot_len; 7149 kif->pfik_packets[1][dir == PF_OUT][action != PF_PASS]++; 7150 7151 if (action == PF_PASS || r->action == PF_DROP) { 7152 dirndx = (dir == PF_OUT); 7153 r->packets[dirndx]++; 7154 r->bytes[dirndx] += pd.tot_len; 7155 if (a != NULL) { 7156 a->packets[dirndx]++; 7157 a->bytes[dirndx] += pd.tot_len; 7158 } 7159 if (s != NULL) { 7160 if (s->nat_rule.ptr != NULL) { 7161 s->nat_rule.ptr->packets[dirndx]++; 7162 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len; 7163 } 7164 if (s->src_node != NULL) { 7165 s->src_node->packets[dirndx]++; 7166 s->src_node->bytes[dirndx] += pd.tot_len; 7167 } 7168 if (s->nat_src_node != NULL) { 7169 s->nat_src_node->packets[dirndx]++; 7170 s->nat_src_node->bytes[dirndx] += pd.tot_len; 7171 } 7172 dirndx = (dir == s->direction) ? 0 : 1; 7173 s->packets[dirndx]++; 7174 s->bytes[dirndx] += pd.tot_len; 7175 } 7176 tr = r; 7177 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule; 7178 if (nr != NULL && r == &pf_default_rule) 7179 tr = nr; 7180 if (tr->src.addr.type == PF_ADDR_TABLE) 7181 pfr_update_stats(tr->src.addr.p.tbl, 7182 (s == NULL) ? pd.src : 7183 &s->key[(s->direction == PF_IN)]->addr[0], 7184 pd.af, pd.tot_len, dir == PF_OUT, 7185 r->action == PF_PASS, tr->src.neg); 7186 if (tr->dst.addr.type == PF_ADDR_TABLE) 7187 pfr_update_stats(tr->dst.addr.p.tbl, 7188 (s == NULL) ? pd.dst : 7189 &s->key[(s->direction == PF_IN)]->addr[1], 7190 pd.af, pd.tot_len, dir == PF_OUT, 7191 r->action == PF_PASS, tr->dst.neg); 7192 } 7193 7194 7195 if (action == PF_SYNPROXY_DROP) { 7196 m_freem(*m0); 7197 *m0 = NULL; 7198 action = PF_PASS; 7199 } else if (r->rt) 7200 /* pf_route6 can free the mbuf causing *m0 to become NULL */ 7201 pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd); 7202 7203 return (action); 7204 } 7205 #endif /* INET6 */ 7206 7207 int 7208 pf_check_congestion(struct ifqueue *ifq) 7209 { 7210 return (0); 7211 } 7212