1 2 /* 3 * Copyright (C) 2012 by Darren Reed. 4 * 5 * See the IPFILTER.LICENCE file for details on licencing. 6 */ 7 #if defined(KERNEL) || defined(_KERNEL) 8 # undef KERNEL 9 # undef _KERNEL 10 # define KERNEL 1 11 # define _KERNEL 1 12 #endif 13 #include <sys/errno.h> 14 #include <sys/types.h> 15 #include <sys/param.h> 16 #include <sys/time.h> 17 #include <sys/file.h> 18 #if !defined(_KERNEL) 19 # include <stdio.h> 20 # include <string.h> 21 # include <stdlib.h> 22 # define _KERNEL 23 # include <sys/uio.h> 24 # undef _KERNEL 25 #endif 26 #if defined(_KERNEL) && defined(__FreeBSD__) 27 # include <sys/filio.h> 28 # include <sys/fcntl.h> 29 #else 30 # include <sys/ioctl.h> 31 #endif 32 # include <sys/protosw.h> 33 #include <sys/socket.h> 34 #if defined(_KERNEL) 35 # include <sys/systm.h> 36 # if !defined(__SVR4) 37 # include <sys/mbuf.h> 38 # endif 39 #endif 40 #if !defined(__SVR4) 41 # if defined(_KERNEL) 42 # include <sys/kernel.h> 43 # endif 44 #else 45 # include <sys/byteorder.h> 46 # ifdef _KERNEL 47 # include <sys/dditypes.h> 48 # endif 49 # include <sys/stream.h> 50 # include <sys/kmem.h> 51 #endif 52 #include <net/if.h> 53 #ifdef sun 54 # include <net/af.h> 55 #endif 56 #include <netinet/in.h> 57 #include <netinet/in_systm.h> 58 #include <netinet/ip.h> 59 # include <netinet/ip_var.h> 60 #include <netinet/tcp.h> 61 #include <netinet/udp.h> 62 #include <netinet/ip_icmp.h> 63 #include "netinet/ip_compat.h" 64 #include <netinet/tcpip.h> 65 #include "netinet/ip_fil.h" 66 #include "netinet/ip_nat.h" 67 #include "netinet/ip_frag.h" 68 #include "netinet/ip_state.h" 69 #include "netinet/ip_auth.h" 70 #include "netinet/ip_lookup.h" 71 #include "netinet/ip_proxy.h" 72 #include "netinet/ip_sync.h" 73 /* END OF INCLUDES */ 74 75 #if !defined(lint) 76 static const char sccsid[] = "@(#)ip_frag.c 1.11 3/24/96 (C) 1993-2000 Darren Reed"; 77 static const char rcsid[] = "@(#)$FreeBSD$"; 78 /* static const char rcsid[] = "@(#)$Id: ip_frag.c,v 2.77.2.12 2007/09/20 12:51:51 darrenr Exp $"; */ 79 #endif 80 81 82 #ifdef USE_MUTEXES 83 static ipfr_t *ipfr_frag_new(ipf_main_softc_t *, ipf_frag_softc_t *, 84 fr_info_t *, u_32_t, ipfr_t **, 85 ipfrwlock_t *); 86 static ipfr_t *ipf_frag_lookup(ipf_main_softc_t *, ipf_frag_softc_t *, fr_info_t *, ipfr_t **, ipfrwlock_t *); 87 static void ipf_frag_deref(void *, ipfr_t **, ipfrwlock_t *); 88 static int ipf_frag_next(ipf_main_softc_t *, ipftoken_t *, ipfgeniter_t *, 89 ipfr_t **, ipfrwlock_t *); 90 #else 91 static ipfr_t *ipfr_frag_new(ipf_main_softc_t *, ipf_frag_softc_t *, 92 fr_info_t *, u_32_t, ipfr_t **); 93 static ipfr_t *ipf_frag_lookup(ipf_main_softc_t *, ipf_frag_softc_t *, fr_info_t *, ipfr_t **); 94 static void ipf_frag_deref(void *, ipfr_t **); 95 static int ipf_frag_next(ipf_main_softc_t *, ipftoken_t *, ipfgeniter_t *, 96 ipfr_t **); 97 #endif 98 static void ipf_frag_delete(ipf_main_softc_t *, ipfr_t *, ipfr_t ***); 99 static void ipf_frag_free(ipf_frag_softc_t *, ipfr_t *); 100 101 static frentry_t ipfr_block; 102 103 static ipftuneable_t ipf_frag_tuneables[] = { 104 { { (void *)offsetof(ipf_frag_softc_t, ipfr_size) }, 105 "frag_size", 1, 0x7fffffff, 106 stsizeof(ipf_frag_softc_t, ipfr_size), 107 IPFT_WRDISABLED, NULL, NULL }, 108 { { (void *)offsetof(ipf_frag_softc_t, ipfr_ttl) }, 109 "frag_ttl", 1, 0x7fffffff, 110 stsizeof(ipf_frag_softc_t, ipfr_ttl), 111 0, NULL, NULL }, 112 { { NULL }, 113 NULL, 0, 0, 114 0, 115 0, NULL, NULL } 116 }; 117 118 #define FBUMP(x) softf->ipfr_stats.x++ 119 #define FBUMPD(x) do { softf->ipfr_stats.x++; DT(x); } while (0) 120 121 122 /* ------------------------------------------------------------------------ */ 123 /* Function: ipf_frag_main_load */ 124 /* Returns: int - 0 == success, -1 == error */ 125 /* Parameters: Nil */ 126 /* */ 127 /* Initialise the filter rule associted with blocked packets - everyone can */ 128 /* use it. */ 129 /* ------------------------------------------------------------------------ */ 130 int 131 ipf_frag_main_load(void) 132 { 133 bzero((char *)&ipfr_block, sizeof(ipfr_block)); 134 ipfr_block.fr_flags = FR_BLOCK|FR_QUICK; 135 ipfr_block.fr_ref = 1; 136 137 return (0); 138 } 139 140 141 /* ------------------------------------------------------------------------ */ 142 /* Function: ipf_frag_main_unload */ 143 /* Returns: int - 0 == success, -1 == error */ 144 /* Parameters: Nil */ 145 /* */ 146 /* A null-op function that exists as a placeholder so that the flow in */ 147 /* other functions is obvious. */ 148 /* ------------------------------------------------------------------------ */ 149 int 150 ipf_frag_main_unload(void) 151 { 152 return (0); 153 } 154 155 156 /* ------------------------------------------------------------------------ */ 157 /* Function: ipf_frag_soft_create */ 158 /* Returns: void * - NULL = failure, else pointer to local context */ 159 /* Parameters: softc(I) - pointer to soft context main structure */ 160 /* */ 161 /* Allocate a new soft context structure to track fragment related info. */ 162 /* ------------------------------------------------------------------------ */ 163 /*ARGSUSED*/ 164 void * 165 ipf_frag_soft_create(ipf_main_softc_t *softc) 166 { 167 ipf_frag_softc_t *softf; 168 169 KMALLOC(softf, ipf_frag_softc_t *); 170 if (softf == NULL) 171 return (NULL); 172 173 bzero((char *)softf, sizeof(*softf)); 174 175 RWLOCK_INIT(&softf->ipfr_ipidfrag, "frag ipid lock"); 176 RWLOCK_INIT(&softf->ipfr_frag, "ipf fragment rwlock"); 177 RWLOCK_INIT(&softf->ipfr_natfrag, "ipf NAT fragment rwlock"); 178 179 softf->ipf_frag_tune = ipf_tune_array_copy(softf, 180 sizeof(ipf_frag_tuneables), 181 ipf_frag_tuneables); 182 if (softf->ipf_frag_tune == NULL) { 183 ipf_frag_soft_destroy(softc, softf); 184 return (NULL); 185 } 186 if (ipf_tune_array_link(softc, softf->ipf_frag_tune) == -1) { 187 ipf_frag_soft_destroy(softc, softf); 188 return (NULL); 189 } 190 191 softf->ipfr_size = IPFT_SIZE; 192 softf->ipfr_ttl = IPF_TTLVAL(60); 193 softf->ipfr_lock = 1; 194 softf->ipfr_tail = &softf->ipfr_list; 195 softf->ipfr_nattail = &softf->ipfr_natlist; 196 softf->ipfr_ipidtail = &softf->ipfr_ipidlist; 197 198 return (softf); 199 } 200 201 202 /* ------------------------------------------------------------------------ */ 203 /* Function: ipf_frag_soft_destroy */ 204 /* Returns: Nil */ 205 /* Parameters: softc(I) - pointer to soft context main structure */ 206 /* arg(I) - pointer to local context to use */ 207 /* */ 208 /* Initialise the hash tables for the fragment cache lookups. */ 209 /* ------------------------------------------------------------------------ */ 210 void 211 ipf_frag_soft_destroy(ipf_main_softc_t *softc, void *arg) 212 { 213 ipf_frag_softc_t *softf = arg; 214 215 RW_DESTROY(&softf->ipfr_ipidfrag); 216 RW_DESTROY(&softf->ipfr_frag); 217 RW_DESTROY(&softf->ipfr_natfrag); 218 219 if (softf->ipf_frag_tune != NULL) { 220 ipf_tune_array_unlink(softc, softf->ipf_frag_tune); 221 KFREES(softf->ipf_frag_tune, sizeof(ipf_frag_tuneables)); 222 softf->ipf_frag_tune = NULL; 223 } 224 225 KFREE(softf); 226 } 227 228 229 /* ------------------------------------------------------------------------ */ 230 /* Function: ipf_frag_soft_init */ 231 /* Returns: int - 0 == success, -1 == error */ 232 /* Parameters: softc(I) - pointer to soft context main structure */ 233 /* arg(I) - pointer to local context to use */ 234 /* */ 235 /* Initialise the hash tables for the fragment cache lookups. */ 236 /* ------------------------------------------------------------------------ */ 237 /*ARGSUSED*/ 238 int 239 ipf_frag_soft_init(ipf_main_softc_t *softc, void *arg) 240 { 241 ipf_frag_softc_t *softf = arg; 242 243 KMALLOCS(softf->ipfr_heads, ipfr_t **, 244 softf->ipfr_size * sizeof(ipfr_t *)); 245 if (softf->ipfr_heads == NULL) 246 return (-1); 247 248 bzero((char *)softf->ipfr_heads, softf->ipfr_size * sizeof(ipfr_t *)); 249 250 KMALLOCS(softf->ipfr_nattab, ipfr_t **, 251 softf->ipfr_size * sizeof(ipfr_t *)); 252 if (softf->ipfr_nattab == NULL) 253 return (-2); 254 255 bzero((char *)softf->ipfr_nattab, softf->ipfr_size * sizeof(ipfr_t *)); 256 257 KMALLOCS(softf->ipfr_ipidtab, ipfr_t **, 258 softf->ipfr_size * sizeof(ipfr_t *)); 259 if (softf->ipfr_ipidtab == NULL) 260 return (-3); 261 262 bzero((char *)softf->ipfr_ipidtab, 263 softf->ipfr_size * sizeof(ipfr_t *)); 264 265 softf->ipfr_lock = 0; 266 softf->ipfr_inited = 1; 267 268 return (0); 269 } 270 271 272 /* ------------------------------------------------------------------------ */ 273 /* Function: ipf_frag_soft_fini */ 274 /* Returns: int - 0 == success, -1 == error */ 275 /* Parameters: softc(I) - pointer to soft context main structure */ 276 /* arg(I) - pointer to local context to use */ 277 /* */ 278 /* Free all memory allocated whilst running and from initialisation. */ 279 /* ------------------------------------------------------------------------ */ 280 int 281 ipf_frag_soft_fini(ipf_main_softc_t *softc, void *arg) 282 { 283 ipf_frag_softc_t *softf = arg; 284 285 softf->ipfr_lock = 1; 286 287 if (softf->ipfr_inited == 1) { 288 ipf_frag_clear(softc); 289 290 softf->ipfr_inited = 0; 291 } 292 293 if (softf->ipfr_heads != NULL) 294 KFREES(softf->ipfr_heads, 295 softf->ipfr_size * sizeof(ipfr_t *)); 296 softf->ipfr_heads = NULL; 297 298 if (softf->ipfr_nattab != NULL) 299 KFREES(softf->ipfr_nattab, 300 softf->ipfr_size * sizeof(ipfr_t *)); 301 softf->ipfr_nattab = NULL; 302 303 if (softf->ipfr_ipidtab != NULL) 304 KFREES(softf->ipfr_ipidtab, 305 softf->ipfr_size * sizeof(ipfr_t *)); 306 softf->ipfr_ipidtab = NULL; 307 308 return (0); 309 } 310 311 312 /* ------------------------------------------------------------------------ */ 313 /* Function: ipf_frag_set_lock */ 314 /* Returns: Nil */ 315 /* Parameters: arg(I) - pointer to local context to use */ 316 /* tmp(I) - new value for lock */ 317 /* */ 318 /* Stub function that allows for external manipulation of ipfr_lock */ 319 /* ------------------------------------------------------------------------ */ 320 void 321 ipf_frag_setlock(void *arg, int tmp) 322 { 323 ipf_frag_softc_t *softf = arg; 324 325 softf->ipfr_lock = tmp; 326 } 327 328 329 /* ------------------------------------------------------------------------ */ 330 /* Function: ipf_frag_stats */ 331 /* Returns: ipfrstat_t* - pointer to struct with current frag stats */ 332 /* Parameters: arg(I) - pointer to local context to use */ 333 /* */ 334 /* Updates ipfr_stats with current information and returns a pointer to it */ 335 /* ------------------------------------------------------------------------ */ 336 ipfrstat_t * 337 ipf_frag_stats(void *arg) 338 { 339 ipf_frag_softc_t *softf = arg; 340 341 softf->ipfr_stats.ifs_table = softf->ipfr_heads; 342 softf->ipfr_stats.ifs_nattab = softf->ipfr_nattab; 343 return (&softf->ipfr_stats); 344 } 345 346 347 /* ------------------------------------------------------------------------ */ 348 /* Function: ipfr_frag_new */ 349 /* Returns: ipfr_t * - pointer to fragment cache state info or NULL */ 350 /* Parameters: fin(I) - pointer to packet information */ 351 /* table(I) - pointer to frag table to add to */ 352 /* lock(I) - pointer to lock to get a write hold of */ 353 /* */ 354 /* Add a new entry to the fragment cache, registering it as having come */ 355 /* through this box, with the result of the filter operation. */ 356 /* */ 357 /* If this function succeeds, it returns with a write lock held on "lock". */ 358 /* If it fails, no lock is held on return. */ 359 /* ------------------------------------------------------------------------ */ 360 static ipfr_t * 361 ipfr_frag_new(ipf_main_softc_t *softc, ipf_frag_softc_t *softf, 362 fr_info_t *fin, u_32_t pass, ipfr_t *table[] 363 #ifdef USE_MUTEXES 364 , ipfrwlock_t *lock 365 #endif 366 ) 367 { 368 ipfr_t *fra, frag, *fran; 369 u_int idx, off; 370 frentry_t *fr; 371 372 if (softf->ipfr_stats.ifs_inuse >= softf->ipfr_size) { 373 FBUMPD(ifs_maximum); 374 return (NULL); 375 } 376 377 if ((fin->fin_flx & (FI_FRAG|FI_BAD)) != FI_FRAG) { 378 FBUMPD(ifs_newbad); 379 return (NULL); 380 } 381 382 if (pass & FR_FRSTRICT) { 383 if (fin->fin_off != 0) { 384 FBUMPD(ifs_newrestrictnot0); 385 return (NULL); 386 } 387 } 388 389 memset(&frag, 0, sizeof(frag)); 390 frag.ipfr_v = fin->fin_v; 391 idx = fin->fin_v; 392 frag.ipfr_p = fin->fin_p; 393 idx += fin->fin_p; 394 frag.ipfr_id = fin->fin_id; 395 idx += fin->fin_id; 396 frag.ipfr_source = fin->fin_fi.fi_src; 397 idx += frag.ipfr_src.s_addr; 398 frag.ipfr_dest = fin->fin_fi.fi_dst; 399 idx += frag.ipfr_dst.s_addr; 400 frag.ipfr_ifp = fin->fin_ifp; 401 idx *= 127; 402 idx %= softf->ipfr_size; 403 404 frag.ipfr_optmsk = fin->fin_fi.fi_optmsk & IPF_OPTCOPY; 405 frag.ipfr_secmsk = fin->fin_fi.fi_secmsk; 406 frag.ipfr_auth = fin->fin_fi.fi_auth; 407 408 off = fin->fin_off >> 3; 409 if (off == 0) { 410 char *ptr; 411 int end; 412 413 #ifdef USE_INET6 414 if (fin->fin_v == 6) { 415 416 ptr = (char *)fin->fin_fraghdr + 417 sizeof(struct ip6_frag); 418 } else 419 #endif 420 { 421 ptr = fin->fin_dp; 422 } 423 end = fin->fin_plen - (ptr - (char *)fin->fin_ip); 424 frag.ipfr_firstend = end >> 3; 425 } else { 426 frag.ipfr_firstend = 0; 427 } 428 429 /* 430 * allocate some memory, if possible, if not, just record that we 431 * failed to do so. 432 */ 433 KMALLOC(fran, ipfr_t *); 434 if (fran == NULL) { 435 FBUMPD(ifs_nomem); 436 return (NULL); 437 } 438 memset(fran, 0, sizeof(*fran)); 439 440 WRITE_ENTER(lock); 441 442 /* 443 * first, make sure it isn't already there... 444 */ 445 for (fra = table[idx]; (fra != NULL); fra = fra->ipfr_hnext) 446 if (!bcmp((char *)&frag.ipfr_ifp, (char *)&fra->ipfr_ifp, 447 IPFR_CMPSZ)) { 448 RWLOCK_EXIT(lock); 449 FBUMPD(ifs_exists); 450 KFREE(fran); 451 return (NULL); 452 } 453 454 fra = fran; 455 fran = NULL; 456 fr = fin->fin_fr; 457 fra->ipfr_rule = fr; 458 if (fr != NULL) { 459 MUTEX_ENTER(&fr->fr_lock); 460 fr->fr_ref++; 461 MUTEX_EXIT(&fr->fr_lock); 462 } 463 464 /* 465 * Insert the fragment into the fragment table, copy the struct used 466 * in the search using bcopy rather than reassign each field. 467 * Set the ttl to the default. 468 */ 469 if ((fra->ipfr_hnext = table[idx]) != NULL) 470 table[idx]->ipfr_hprev = &fra->ipfr_hnext; 471 fra->ipfr_hprev = table + idx; 472 fra->ipfr_data = NULL; 473 table[idx] = fra; 474 bcopy((char *)&frag.ipfr_ifp, (char *)&fra->ipfr_ifp, IPFR_CMPSZ); 475 fra->ipfr_v = fin->fin_v; 476 fra->ipfr_p = fin->fin_p; 477 fra->ipfr_ttl = softc->ipf_ticks + softf->ipfr_ttl; 478 fra->ipfr_firstend = frag.ipfr_firstend; 479 480 /* 481 * Compute the offset of the expected start of the next packet. 482 */ 483 if (off == 0) 484 fra->ipfr_seen0 = 1; 485 fra->ipfr_off = off + (fin->fin_dlen >> 3); 486 fra->ipfr_pass = pass; 487 fra->ipfr_ref = 1; 488 fra->ipfr_pkts = 1; 489 fra->ipfr_bytes = fin->fin_plen; 490 FBUMP(ifs_inuse); 491 FBUMP(ifs_new); 492 return (fra); 493 } 494 495 496 /* ------------------------------------------------------------------------ */ 497 /* Function: ipf_frag_new */ 498 /* Returns: int - 0 == success, -1 == error */ 499 /* Parameters: fin(I) - pointer to packet information */ 500 /* */ 501 /* Add a new entry to the fragment cache table based on the current packet */ 502 /* ------------------------------------------------------------------------ */ 503 int 504 ipf_frag_new(ipf_main_softc_t *softc, fr_info_t *fin, u_32_t pass) 505 { 506 ipf_frag_softc_t *softf = softc->ipf_frag_soft; 507 ipfr_t *fra; 508 509 if (softf->ipfr_lock != 0) 510 return (-1); 511 512 #ifdef USE_MUTEXES 513 fra = ipfr_frag_new(softc, softf, fin, pass, softf->ipfr_heads, &softc->ipf_frag); 514 #else 515 fra = ipfr_frag_new(softc, softf, fin, pass, softf->ipfr_heads); 516 #endif 517 if (fra != NULL) { 518 *softf->ipfr_tail = fra; 519 fra->ipfr_prev = softf->ipfr_tail; 520 softf->ipfr_tail = &fra->ipfr_next; 521 fra->ipfr_next = NULL; 522 RWLOCK_EXIT(&softc->ipf_frag); 523 } 524 return (fra ? 0 : -1); 525 } 526 527 528 /* ------------------------------------------------------------------------ */ 529 /* Function: ipf_frag_natnew */ 530 /* Returns: int - 0 == success, -1 == error */ 531 /* Parameters: fin(I) - pointer to packet information */ 532 /* nat(I) - pointer to NAT structure */ 533 /* */ 534 /* Create a new NAT fragment cache entry based on the current packet and */ 535 /* the NAT structure for this "session". */ 536 /* ------------------------------------------------------------------------ */ 537 int 538 ipf_frag_natnew(ipf_main_softc_t *softc, fr_info_t *fin, u_32_t pass, 539 nat_t *nat) 540 { 541 ipf_frag_softc_t *softf = softc->ipf_frag_soft; 542 ipfr_t *fra; 543 544 if (softf->ipfr_lock != 0) 545 return (0); 546 547 #ifdef USE_MUTEXES 548 fra = ipfr_frag_new(softc, softf, fin, pass, softf->ipfr_nattab, 549 &softf->ipfr_natfrag); 550 #else 551 fra = ipfr_frag_new(softc, softf, fin, pass, softf->ipfr_nattab); 552 #endif 553 if (fra != NULL) { 554 fra->ipfr_data = nat; 555 nat->nat_data = fra; 556 *softf->ipfr_nattail = fra; 557 fra->ipfr_prev = softf->ipfr_nattail; 558 softf->ipfr_nattail = &fra->ipfr_next; 559 fra->ipfr_next = NULL; 560 RWLOCK_EXIT(&softf->ipfr_natfrag); 561 return (0); 562 } 563 return (-1); 564 } 565 566 567 /* ------------------------------------------------------------------------ */ 568 /* Function: ipf_frag_ipidnew */ 569 /* Returns: int - 0 == success, -1 == error */ 570 /* Parameters: fin(I) - pointer to packet information */ 571 /* ipid(I) - new IP ID for this fragmented packet */ 572 /* */ 573 /* Create a new fragment cache entry for this packet and store, as a data */ 574 /* pointer, the new IP ID value. */ 575 /* ------------------------------------------------------------------------ */ 576 int 577 ipf_frag_ipidnew(fr_info_t *fin, u_32_t ipid) 578 { 579 ipf_main_softc_t *softc = fin->fin_main_soft; 580 ipf_frag_softc_t *softf = softc->ipf_frag_soft; 581 ipfr_t *fra; 582 583 if (softf->ipfr_lock) 584 return (0); 585 586 #ifdef USE_MUTEXES 587 fra = ipfr_frag_new(softc, softf, fin, 0, softf->ipfr_ipidtab, &softf->ipfr_ipidfrag); 588 #else 589 fra = ipfr_frag_new(softc, softf, fin, 0, softf->ipfr_ipidtab); 590 #endif 591 if (fra != NULL) { 592 fra->ipfr_data = (void *)(intptr_t)ipid; 593 *softf->ipfr_ipidtail = fra; 594 fra->ipfr_prev = softf->ipfr_ipidtail; 595 softf->ipfr_ipidtail = &fra->ipfr_next; 596 fra->ipfr_next = NULL; 597 RWLOCK_EXIT(&softf->ipfr_ipidfrag); 598 } 599 return (fra ? 0 : -1); 600 } 601 602 603 /* ------------------------------------------------------------------------ */ 604 /* Function: ipf_frag_lookup */ 605 /* Returns: ipfr_t * - pointer to ipfr_t structure if there's a */ 606 /* matching entry in the frag table, else NULL */ 607 /* Parameters: fin(I) - pointer to packet information */ 608 /* table(I) - pointer to fragment cache table to search */ 609 /* */ 610 /* Check the fragment cache to see if there is already a record of this */ 611 /* packet with its filter result known. */ 612 /* */ 613 /* If this function succeeds, it returns with a write lock held on "lock". */ 614 /* If it fails, no lock is held on return. */ 615 /* ------------------------------------------------------------------------ */ 616 static ipfr_t * 617 ipf_frag_lookup(ipf_main_softc_t *softc, ipf_frag_softc_t *softf, 618 fr_info_t *fin, ipfr_t *table[] 619 #ifdef USE_MUTEXES 620 , ipfrwlock_t *lock 621 #endif 622 ) 623 { 624 ipfr_t *f, frag; 625 u_int idx; 626 627 /* 628 * We don't want to let short packets match because they could be 629 * compromising the security of other rules that want to match on 630 * layer 4 fields (and can't because they have been fragmented off.) 631 * Why do this check here? The counter acts as an indicator of this 632 * kind of attack, whereas if it was elsewhere, it wouldn't know if 633 * other matching packets had been seen. 634 */ 635 if (fin->fin_flx & FI_SHORT) { 636 FBUMPD(ifs_short); 637 return (NULL); 638 } 639 640 if ((fin->fin_flx & FI_BAD) != 0) { 641 FBUMPD(ifs_bad); 642 return (NULL); 643 } 644 645 /* 646 * For fragments, we record protocol, packet id, TOS and both IP#'s 647 * (these should all be the same for all fragments of a packet). 648 * 649 * build up a hash value to index the table with. 650 */ 651 memset(&frag, 0, sizeof(frag)); 652 frag.ipfr_v = fin->fin_v; 653 idx = fin->fin_v; 654 frag.ipfr_p = fin->fin_p; 655 idx += fin->fin_p; 656 frag.ipfr_id = fin->fin_id; 657 idx += fin->fin_id; 658 frag.ipfr_source = fin->fin_fi.fi_src; 659 idx += frag.ipfr_src.s_addr; 660 frag.ipfr_dest = fin->fin_fi.fi_dst; 661 idx += frag.ipfr_dst.s_addr; 662 frag.ipfr_ifp = fin->fin_ifp; 663 idx *= 127; 664 idx %= softf->ipfr_size; 665 666 frag.ipfr_optmsk = fin->fin_fi.fi_optmsk & IPF_OPTCOPY; 667 frag.ipfr_secmsk = fin->fin_fi.fi_secmsk; 668 frag.ipfr_auth = fin->fin_fi.fi_auth; 669 670 READ_ENTER(lock); 671 672 /* 673 * check the table, careful to only compare the right amount of data 674 */ 675 for (f = table[idx]; f; f = f->ipfr_hnext) { 676 if (!bcmp((char *)&frag.ipfr_ifp, (char *)&f->ipfr_ifp, 677 IPFR_CMPSZ)) { 678 u_short off; 679 680 /* 681 * XXX - We really need to be guarding against the 682 * retransmission of (src,dst,id,offset-range) here 683 * because a fragmented packet is never resent with 684 * the same IP ID# (or shouldn't). 685 */ 686 off = fin->fin_off >> 3; 687 if (f->ipfr_seen0) { 688 if (off == 0) { 689 FBUMPD(ifs_retrans0); 690 continue; 691 } 692 693 /* 694 * Case 3. See comment for frpr_fragment6. 695 */ 696 if ((f->ipfr_firstend != 0) && 697 (off < f->ipfr_firstend)) { 698 FBUMP(ifs_overlap); 699 DT2(ifs_overlap, u_short, off, 700 ipfr_t *, f); 701 DT3(ipf_fi_bad_ifs_overlap, fr_info_t *, fin, u_short, off, 702 ipfr_t *, f); 703 fin->fin_flx |= FI_BAD; 704 break; 705 } 706 } else if (off == 0) 707 f->ipfr_seen0 = 1; 708 709 if (f != table[idx] && MUTEX_TRY_UPGRADE(lock)) { 710 ipfr_t **fp; 711 712 /* 713 * Move fragment info. to the top of the list 714 * to speed up searches. First, delink... 715 */ 716 fp = f->ipfr_hprev; 717 (*fp) = f->ipfr_hnext; 718 if (f->ipfr_hnext != NULL) 719 f->ipfr_hnext->ipfr_hprev = fp; 720 /* 721 * Then put back at the top of the chain. 722 */ 723 f->ipfr_hnext = table[idx]; 724 table[idx]->ipfr_hprev = &f->ipfr_hnext; 725 f->ipfr_hprev = table + idx; 726 table[idx] = f; 727 MUTEX_DOWNGRADE(lock); 728 } 729 730 /* 731 * If we've follwed the fragments, and this is the 732 * last (in order), shrink expiration time. 733 */ 734 if (off == f->ipfr_off) { 735 f->ipfr_off = (fin->fin_dlen >> 3) + off; 736 737 /* 738 * Well, we could shrink the expiration time 739 * but only if every fragment has been seen 740 * in order upto this, the last. ipfr_badorder 741 * is used here to count those out of order 742 * and if it equals 0 when we get to the last 743 * fragment then we can assume all of the 744 * fragments have been seen and in order. 745 */ 746 #if 0 747 /* 748 * Doing this properly requires moving it to 749 * the head of the list which is infesible. 750 */ 751 if ((more == 0) && (f->ipfr_badorder == 0)) 752 f->ipfr_ttl = softc->ipf_ticks + 1; 753 #endif 754 } else { 755 f->ipfr_badorder++; 756 FBUMPD(ifs_unordered); 757 if (f->ipfr_pass & FR_FRSTRICT) { 758 FBUMPD(ifs_strict); 759 continue; 760 } 761 } 762 f->ipfr_pkts++; 763 f->ipfr_bytes += fin->fin_plen; 764 FBUMP(ifs_hits); 765 return (f); 766 } 767 } 768 769 RWLOCK_EXIT(lock); 770 FBUMP(ifs_miss); 771 return (NULL); 772 } 773 774 775 /* ------------------------------------------------------------------------ */ 776 /* Function: ipf_frag_natknown */ 777 /* Returns: nat_t* - pointer to 'parent' NAT structure if frag table */ 778 /* match found, else NULL */ 779 /* Parameters: fin(I) - pointer to packet information */ 780 /* */ 781 /* Functional interface for NAT lookups of the NAT fragment cache */ 782 /* ------------------------------------------------------------------------ */ 783 nat_t * 784 ipf_frag_natknown(fr_info_t *fin) 785 { 786 ipf_main_softc_t *softc = fin->fin_main_soft; 787 ipf_frag_softc_t *softf = softc->ipf_frag_soft; 788 nat_t *nat; 789 ipfr_t *ipf; 790 791 if ((softf->ipfr_lock) || !softf->ipfr_natlist) 792 return (NULL); 793 #ifdef USE_MUTEXES 794 ipf = ipf_frag_lookup(softc, softf, fin, softf->ipfr_nattab, 795 &softf->ipfr_natfrag); 796 #else 797 ipf = ipf_frag_lookup(softc, softf, fin, softf->ipfr_nattab); 798 #endif 799 if (ipf != NULL) { 800 nat = ipf->ipfr_data; 801 /* 802 * This is the last fragment for this packet. 803 */ 804 if ((ipf->ipfr_ttl == softc->ipf_ticks + 1) && (nat != NULL)) { 805 nat->nat_data = NULL; 806 ipf->ipfr_data = NULL; 807 } 808 RWLOCK_EXIT(&softf->ipfr_natfrag); 809 } else 810 nat = NULL; 811 return (nat); 812 } 813 814 815 /* ------------------------------------------------------------------------ */ 816 /* Function: ipf_frag_ipidknown */ 817 /* Returns: u_32_t - IPv4 ID for this packet if match found, else */ 818 /* return 0xfffffff to indicate no match. */ 819 /* Parameters: fin(I) - pointer to packet information */ 820 /* */ 821 /* Functional interface for IP ID lookups of the IP ID fragment cache */ 822 /* ------------------------------------------------------------------------ */ 823 u_32_t 824 ipf_frag_ipidknown(fr_info_t *fin) 825 { 826 ipf_main_softc_t *softc = fin->fin_main_soft; 827 ipf_frag_softc_t *softf = softc->ipf_frag_soft; 828 ipfr_t *ipf; 829 u_32_t id; 830 831 if (softf->ipfr_lock || !softf->ipfr_ipidlist) 832 return (0xffffffff); 833 834 #ifdef USE_MUTEXES 835 ipf = ipf_frag_lookup(softc, softf, fin, softf->ipfr_ipidtab, 836 &softf->ipfr_ipidfrag); 837 #else 838 ipf = ipf_frag_lookup(softc, softf, fin, softf->ipfr_ipidtab); 839 #endif 840 if (ipf != NULL) { 841 id = (u_32_t)(intptr_t)ipf->ipfr_data; 842 RWLOCK_EXIT(&softf->ipfr_ipidfrag); 843 } else 844 id = 0xffffffff; 845 return (id); 846 } 847 848 849 /* ------------------------------------------------------------------------ */ 850 /* Function: ipf_frag_known */ 851 /* Returns: frentry_t* - pointer to filter rule if a match is found in */ 852 /* the frag cache table, else NULL. */ 853 /* Parameters: fin(I) - pointer to packet information */ 854 /* passp(O) - pointer to where to store rule flags resturned */ 855 /* */ 856 /* Functional interface for normal lookups of the fragment cache. If a */ 857 /* match is found, return the rule pointer and flags from the rule, except */ 858 /* that if FR_LOGFIRST is set, reset FR_LOG. */ 859 /* ------------------------------------------------------------------------ */ 860 frentry_t * 861 ipf_frag_known(fr_info_t *fin, u_32_t *passp) 862 { 863 ipf_main_softc_t *softc = fin->fin_main_soft; 864 ipf_frag_softc_t *softf = softc->ipf_frag_soft; 865 frentry_t *fr = NULL; 866 ipfr_t *fra; 867 u_32_t pass; 868 869 if ((softf->ipfr_lock) || (softf->ipfr_list == NULL)) 870 return (NULL); 871 872 #ifdef USE_MUTEXES 873 fra = ipf_frag_lookup(softc, softf, fin, softf->ipfr_heads, 874 &softc->ipf_frag); 875 #else 876 fra = ipf_frag_lookup(softc, softf, fin, softf->ipfr_heads); 877 #endif 878 if (fra != NULL) { 879 if (fin->fin_flx & FI_BAD) { 880 fr = &ipfr_block; 881 fin->fin_reason = FRB_BADFRAG; 882 DT2(ipf_frb_badfrag, fr_info_t *, fin, uint, fra); 883 } else { 884 fr = fra->ipfr_rule; 885 } 886 fin->fin_fr = fr; 887 if (fr != NULL) { 888 pass = fr->fr_flags; 889 if ((pass & FR_KEEPSTATE) != 0) { 890 fin->fin_flx |= FI_STATE; 891 /* 892 * Reset the keep state flag here so that we 893 * don't try and add a new state entry because 894 * of a match here. That leads to blocking of 895 * the packet later because the add fails. 896 */ 897 pass &= ~FR_KEEPSTATE; 898 } 899 if ((pass & FR_LOGFIRST) != 0) 900 pass &= ~(FR_LOGFIRST|FR_LOG); 901 *passp = pass; 902 } 903 RWLOCK_EXIT(&softc->ipf_frag); 904 } 905 return (fr); 906 } 907 908 909 /* ------------------------------------------------------------------------ */ 910 /* Function: ipf_frag_natforget */ 911 /* Returns: Nil */ 912 /* Parameters: softc(I) - pointer to soft context main structure */ 913 /* ptr(I) - pointer to data structure */ 914 /* */ 915 /* Search through all of the fragment cache entries for NAT and wherever a */ 916 /* pointer is found to match ptr, reset it to NULL. */ 917 /* ------------------------------------------------------------------------ */ 918 void 919 ipf_frag_natforget(ipf_main_softc_t *softc, void *ptr) 920 { 921 ipf_frag_softc_t *softf = softc->ipf_frag_soft; 922 ipfr_t *fr; 923 924 WRITE_ENTER(&softf->ipfr_natfrag); 925 for (fr = softf->ipfr_natlist; fr; fr = fr->ipfr_next) 926 if (fr->ipfr_data == ptr) 927 fr->ipfr_data = NULL; 928 RWLOCK_EXIT(&softf->ipfr_natfrag); 929 } 930 931 932 /* ------------------------------------------------------------------------ */ 933 /* Function: ipf_frag_delete */ 934 /* Returns: Nil */ 935 /* Parameters: softc(I) - pointer to soft context main structure */ 936 /* fra(I) - pointer to fragment structure to delete */ 937 /* tail(IO) - pointer to the pointer to the tail of the frag */ 938 /* list */ 939 /* */ 940 /* Remove a fragment cache table entry from the table & list. Also free */ 941 /* the filter rule it is associated with it if it is no longer used as a */ 942 /* result of decreasing the reference count. */ 943 /* ------------------------------------------------------------------------ */ 944 static void 945 ipf_frag_delete(ipf_main_softc_t *softc, ipfr_t *fra, ipfr_t ***tail) 946 { 947 ipf_frag_softc_t *softf = softc->ipf_frag_soft; 948 949 if (fra->ipfr_next) 950 fra->ipfr_next->ipfr_prev = fra->ipfr_prev; 951 *fra->ipfr_prev = fra->ipfr_next; 952 if (*tail == &fra->ipfr_next) 953 *tail = fra->ipfr_prev; 954 955 if (fra->ipfr_hnext) 956 fra->ipfr_hnext->ipfr_hprev = fra->ipfr_hprev; 957 *fra->ipfr_hprev = fra->ipfr_hnext; 958 959 if (fra->ipfr_rule != NULL) { 960 (void) ipf_derefrule(softc, &fra->ipfr_rule); 961 } 962 963 if (fra->ipfr_ref <= 0) 964 ipf_frag_free(softf, fra); 965 } 966 967 968 /* ------------------------------------------------------------------------ */ 969 /* Function: ipf_frag_free */ 970 /* Returns: Nil */ 971 /* Parameters: softf(I) - pointer to fragment context information */ 972 /* fra(I) - pointer to fragment structure to free */ 973 /* */ 974 /* Free up a fragment cache entry and bump relevent statistics. */ 975 /* ------------------------------------------------------------------------ */ 976 static void 977 ipf_frag_free(ipf_frag_softc_t *softf, ipfr_t *fra) 978 { 979 KFREE(fra); 980 FBUMP(ifs_expire); 981 softf->ipfr_stats.ifs_inuse--; 982 } 983 984 985 /* ------------------------------------------------------------------------ */ 986 /* Function: ipf_frag_clear */ 987 /* Returns: Nil */ 988 /* Parameters: softc(I) - pointer to soft context main structure */ 989 /* */ 990 /* Free memory in use by fragment state information kept. Do the normal */ 991 /* fragment state stuff first and then the NAT-fragment table. */ 992 /* ------------------------------------------------------------------------ */ 993 void 994 ipf_frag_clear(ipf_main_softc_t *softc) 995 { 996 ipf_frag_softc_t *softf = softc->ipf_frag_soft; 997 ipfr_t *fra; 998 nat_t *nat; 999 1000 WRITE_ENTER(&softc->ipf_frag); 1001 while ((fra = softf->ipfr_list) != NULL) { 1002 fra->ipfr_ref--; 1003 ipf_frag_delete(softc, fra, &softf->ipfr_tail); 1004 } 1005 softf->ipfr_tail = &softf->ipfr_list; 1006 RWLOCK_EXIT(&softc->ipf_frag); 1007 1008 WRITE_ENTER(&softc->ipf_nat); 1009 WRITE_ENTER(&softf->ipfr_natfrag); 1010 while ((fra = softf->ipfr_natlist) != NULL) { 1011 nat = fra->ipfr_data; 1012 if (nat != NULL) { 1013 if (nat->nat_data == fra) 1014 nat->nat_data = NULL; 1015 } 1016 fra->ipfr_ref--; 1017 ipf_frag_delete(softc, fra, &softf->ipfr_nattail); 1018 } 1019 softf->ipfr_nattail = &softf->ipfr_natlist; 1020 RWLOCK_EXIT(&softf->ipfr_natfrag); 1021 RWLOCK_EXIT(&softc->ipf_nat); 1022 } 1023 1024 1025 /* ------------------------------------------------------------------------ */ 1026 /* Function: ipf_frag_expire */ 1027 /* Returns: Nil */ 1028 /* Parameters: softc(I) - pointer to soft context main structure */ 1029 /* */ 1030 /* Expire entries in the fragment cache table that have been there too long */ 1031 /* ------------------------------------------------------------------------ */ 1032 void 1033 ipf_frag_expire(ipf_main_softc_t *softc) 1034 { 1035 ipf_frag_softc_t *softf = softc->ipf_frag_soft; 1036 ipfr_t **fp, *fra; 1037 nat_t *nat; 1038 SPL_INT(s); 1039 1040 if (softf->ipfr_lock) 1041 return; 1042 1043 SPL_NET(s); 1044 WRITE_ENTER(&softc->ipf_frag); 1045 /* 1046 * Go through the entire table, looking for entries to expire, 1047 * which is indicated by the ttl being less than or equal to ipf_ticks. 1048 */ 1049 for (fp = &softf->ipfr_list; ((fra = *fp) != NULL); ) { 1050 if (fra->ipfr_ttl > softc->ipf_ticks) 1051 break; 1052 fra->ipfr_ref--; 1053 ipf_frag_delete(softc, fra, &softf->ipfr_tail); 1054 } 1055 RWLOCK_EXIT(&softc->ipf_frag); 1056 1057 WRITE_ENTER(&softf->ipfr_ipidfrag); 1058 for (fp = &softf->ipfr_ipidlist; ((fra = *fp) != NULL); ) { 1059 if (fra->ipfr_ttl > softc->ipf_ticks) 1060 break; 1061 fra->ipfr_ref--; 1062 ipf_frag_delete(softc, fra, &softf->ipfr_ipidtail); 1063 } 1064 RWLOCK_EXIT(&softf->ipfr_ipidfrag); 1065 1066 /* 1067 * Same again for the NAT table, except that if the structure also 1068 * still points to a NAT structure, and the NAT structure points back 1069 * at the one to be free'd, NULL the reference from the NAT struct. 1070 * NOTE: We need to grab both mutex's early, and in this order so as 1071 * to prevent a deadlock if both try to expire at the same time. 1072 * The extra if() statement here is because it locks out all NAT 1073 * operations - no need to do that if there are no entries in this 1074 * list, right? 1075 */ 1076 if (softf->ipfr_natlist != NULL) { 1077 WRITE_ENTER(&softc->ipf_nat); 1078 WRITE_ENTER(&softf->ipfr_natfrag); 1079 for (fp = &softf->ipfr_natlist; ((fra = *fp) != NULL); ) { 1080 if (fra->ipfr_ttl > softc->ipf_ticks) 1081 break; 1082 nat = fra->ipfr_data; 1083 if (nat != NULL) { 1084 if (nat->nat_data == fra) 1085 nat->nat_data = NULL; 1086 } 1087 fra->ipfr_ref--; 1088 ipf_frag_delete(softc, fra, &softf->ipfr_nattail); 1089 } 1090 RWLOCK_EXIT(&softf->ipfr_natfrag); 1091 RWLOCK_EXIT(&softc->ipf_nat); 1092 } 1093 SPL_X(s); 1094 } 1095 1096 1097 /* ------------------------------------------------------------------------ */ 1098 /* Function: ipf_frag_pkt_next */ 1099 /* Returns: int - 0 == success, else error */ 1100 /* Parameters: softc(I) - pointer to soft context main structure */ 1101 /* token(I) - pointer to token information for this caller */ 1102 /* itp(I) - pointer to generic iterator from caller */ 1103 /* */ 1104 /* This function is used to step through the fragment cache list used for */ 1105 /* filter rules. The hard work is done by the more generic ipf_frag_next. */ 1106 /* ------------------------------------------------------------------------ */ 1107 int 1108 ipf_frag_pkt_next(ipf_main_softc_t *softc, ipftoken_t *token, 1109 ipfgeniter_t *itp) 1110 { 1111 ipf_frag_softc_t *softf = softc->ipf_frag_soft; 1112 1113 #ifdef USE_MUTEXES 1114 return (ipf_frag_next(softc, token, itp, &softf->ipfr_list, 1115 &softf->ipfr_frag)); 1116 #else 1117 return (ipf_frag_next(softc, token, itp, &softf->ipfr_list)); 1118 #endif 1119 } 1120 1121 1122 /* ------------------------------------------------------------------------ */ 1123 /* Function: ipf_frag_nat_next */ 1124 /* Returns: int - 0 == success, else error */ 1125 /* Parameters: softc(I) - pointer to soft context main structure */ 1126 /* token(I) - pointer to token information for this caller */ 1127 /* itp(I) - pointer to generic iterator from caller */ 1128 /* */ 1129 /* This function is used to step through the fragment cache list used for */ 1130 /* NAT. The hard work is done by the more generic ipf_frag_next. */ 1131 /* ------------------------------------------------------------------------ */ 1132 int 1133 ipf_frag_nat_next(ipf_main_softc_t *softc, ipftoken_t *token, 1134 ipfgeniter_t *itp) 1135 { 1136 ipf_frag_softc_t *softf = softc->ipf_frag_soft; 1137 1138 #ifdef USE_MUTEXES 1139 return (ipf_frag_next(softc, token, itp, &softf->ipfr_natlist, 1140 &softf->ipfr_natfrag)); 1141 #else 1142 return (ipf_frag_next(softc, token, itp, &softf->ipfr_natlist)); 1143 #endif 1144 } 1145 1146 /* ------------------------------------------------------------------------ */ 1147 /* Function: ipf_frag_next */ 1148 /* Returns: int - 0 == success, else error */ 1149 /* Parameters: softc(I) - pointer to soft context main structure */ 1150 /* token(I) - pointer to token information for this caller */ 1151 /* itp(I) - pointer to generic iterator from caller */ 1152 /* top(I) - top of the fragment list */ 1153 /* lock(I) - fragment cache lock */ 1154 /* */ 1155 /* This function is used to interate through the list of entries in the */ 1156 /* fragment cache. It increases the reference count on the one currently */ 1157 /* being returned so that the caller can come back and resume from it later.*/ 1158 /* */ 1159 /* This function is used for both the NAT fragment cache as well as the ipf */ 1160 /* fragment cache - hence the reason for passing in top and lock. */ 1161 /* ------------------------------------------------------------------------ */ 1162 static int 1163 ipf_frag_next(ipf_main_softc_t *softc, ipftoken_t *token, ipfgeniter_t *itp, 1164 ipfr_t **top 1165 #ifdef USE_MUTEXES 1166 , ipfrwlock_t *lock 1167 #endif 1168 ) 1169 { 1170 ipfr_t *frag, *next, zero; 1171 int error = 0; 1172 1173 if (itp->igi_data == NULL) { 1174 IPFERROR(20001); 1175 return (EFAULT); 1176 } 1177 1178 if (itp->igi_nitems != 1) { 1179 IPFERROR(20003); 1180 return (EFAULT); 1181 } 1182 1183 frag = token->ipt_data; 1184 1185 READ_ENTER(lock); 1186 1187 if (frag == NULL) 1188 next = *top; 1189 else 1190 next = frag->ipfr_next; 1191 1192 if (next != NULL) { 1193 ATOMIC_INC(next->ipfr_ref); 1194 token->ipt_data = next; 1195 } else { 1196 bzero(&zero, sizeof(zero)); 1197 next = &zero; 1198 token->ipt_data = NULL; 1199 } 1200 if (next->ipfr_next == NULL) 1201 ipf_token_mark_complete(token); 1202 1203 RWLOCK_EXIT(lock); 1204 1205 error = COPYOUT(next, itp->igi_data, sizeof(*next)); 1206 if (error != 0) 1207 IPFERROR(20002); 1208 1209 if (frag != NULL) { 1210 #ifdef USE_MUTEXES 1211 ipf_frag_deref(softc, &frag, lock); 1212 #else 1213 ipf_frag_deref(softc, &frag); 1214 #endif 1215 } 1216 return (error); 1217 } 1218 1219 1220 /* ------------------------------------------------------------------------ */ 1221 /* Function: ipf_frag_pkt_deref */ 1222 /* Returns: Nil */ 1223 /* Parameters: softc(I) - pointer to soft context main structure */ 1224 /* data(I) - pointer to frag cache pointer */ 1225 /* */ 1226 /* This function is the external interface for dropping a reference to a */ 1227 /* fragment cache entry used by filter rules. */ 1228 /* ------------------------------------------------------------------------ */ 1229 void 1230 ipf_frag_pkt_deref(ipf_main_softc_t *softc, void *data) 1231 { 1232 ipfr_t **frp = data; 1233 1234 #ifdef USE_MUTEXES 1235 ipf_frag_softc_t *softf = softc->ipf_frag_soft; 1236 1237 ipf_frag_deref(softc->ipf_frag_soft, frp, &softf->ipfr_frag); 1238 #else 1239 ipf_frag_deref(softc->ipf_frag_soft, frp); 1240 #endif 1241 } 1242 1243 1244 /* ------------------------------------------------------------------------ */ 1245 /* Function: ipf_frag_nat_deref */ 1246 /* Returns: Nil */ 1247 /* Parameters: softc(I) - pointer to soft context main structure */ 1248 /* data(I) - pointer to frag cache pointer */ 1249 /* */ 1250 /* This function is the external interface for dropping a reference to a */ 1251 /* fragment cache entry used by NAT table entries. */ 1252 /* ------------------------------------------------------------------------ */ 1253 void 1254 ipf_frag_nat_deref(ipf_main_softc_t *softc, void *data) 1255 { 1256 ipfr_t **frp = data; 1257 1258 #ifdef USE_MUTEXES 1259 ipf_frag_softc_t *softf = softc->ipf_frag_soft; 1260 1261 ipf_frag_deref(softc->ipf_frag_soft, frp, &softf->ipfr_natfrag); 1262 #else 1263 ipf_frag_deref(softc->ipf_frag_soft, frp); 1264 #endif 1265 } 1266 1267 1268 /* ------------------------------------------------------------------------ */ 1269 /* Function: ipf_frag_deref */ 1270 /* Returns: Nil */ 1271 /* Parameters: frp(IO) - pointer to fragment structure to deference */ 1272 /* lock(I) - lock associated with the fragment */ 1273 /* */ 1274 /* This function dereferences a fragment structure (ipfr_t). The pointer */ 1275 /* passed in will always be reset back to NULL, even if the structure is */ 1276 /* not freed, to enforce the notion that the caller is no longer entitled */ 1277 /* to use the pointer it is dropping the reference to. */ 1278 /* ------------------------------------------------------------------------ */ 1279 static void 1280 ipf_frag_deref(void *arg, ipfr_t **frp 1281 #ifdef USE_MUTEXES 1282 , ipfrwlock_t *lock 1283 #endif 1284 ) 1285 { 1286 ipf_frag_softc_t *softf = arg; 1287 ipfr_t *fra; 1288 1289 fra = *frp; 1290 *frp = NULL; 1291 1292 WRITE_ENTER(lock); 1293 fra->ipfr_ref--; 1294 if (fra->ipfr_ref <= 0) 1295 ipf_frag_free(softf, fra); 1296 RWLOCK_EXIT(lock); 1297 } 1298