1 /* $NetBSD: ip_reass.c,v 1.10 2016/04/26 08:44:44 ozaki-r Exp $ */ 2 3 /* 4 * Copyright (c) 1982, 1986, 1988, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94 32 */ 33 34 /* 35 * IP reassembly. 36 * 37 * Additive-Increase/Multiplicative-Decrease (AIMD) strategy for IP 38 * reassembly queue buffer managment. 39 * 40 * We keep a count of total IP fragments (NB: not fragmented packets), 41 * awaiting reassembly (ip_nfrags) and a limit (ip_maxfrags) on fragments. 42 * If ip_nfrags exceeds ip_maxfrags the limit, we drop half the total 43 * fragments in reassembly queues. This AIMD policy avoids repeatedly 44 * deleting single packets under heavy fragmentation load (e.g., from lossy 45 * NFS peers). 46 */ 47 48 #include <sys/cdefs.h> 49 __KERNEL_RCSID(0, "$NetBSD: ip_reass.c,v 1.10 2016/04/26 08:44:44 ozaki-r Exp $"); 50 51 #include <sys/param.h> 52 #include <sys/types.h> 53 54 #include <sys/malloc.h> 55 #include <sys/mbuf.h> 56 #include <sys/mutex.h> 57 #include <sys/domain.h> 58 #include <sys/protosw.h> 59 #include <sys/pool.h> 60 #include <sys/queue.h> 61 #include <sys/sysctl.h> 62 #include <sys/systm.h> 63 64 #include <net/if.h> 65 66 #include <netinet/in.h> 67 #include <netinet/in_systm.h> 68 #include <netinet/ip.h> 69 #include <netinet/in_pcb.h> 70 #include <netinet/ip_var.h> 71 #include <netinet/in_proto.h> 72 #include <netinet/ip_private.h> 73 #include <netinet/in_var.h> 74 75 /* 76 * IP reassembly queue structures. Each fragment being reassembled is 77 * attached to one of these structures. They are timed out after TTL 78 * drops to 0, and may also be reclaimed if memory becomes tight. 79 */ 80 81 typedef struct ipfr_qent { 82 TAILQ_ENTRY(ipfr_qent) ipqe_q; 83 struct ip * ipqe_ip; 84 struct mbuf * ipqe_m; 85 bool ipqe_mff; 86 } ipfr_qent_t; 87 88 TAILQ_HEAD(ipfr_qent_head, ipfr_qent); 89 90 typedef struct ipfr_queue { 91 LIST_ENTRY(ipfr_queue) ipq_q; /* to other reass headers */ 92 struct ipfr_qent_head ipq_fragq; /* queue of fragment entries */ 93 uint8_t ipq_ttl; /* time for reass q to live */ 94 uint8_t ipq_p; /* protocol of this fragment */ 95 uint16_t ipq_id; /* sequence id for reassembly */ 96 struct in_addr ipq_src; 97 struct in_addr ipq_dst; 98 uint16_t ipq_nfrags; /* frags in this queue entry */ 99 uint8_t ipq_tos; /* TOS of this fragment */ 100 } ipfr_queue_t; 101 102 /* 103 * Hash table of IP reassembly queues. 104 */ 105 #define IPREASS_HASH_SHIFT 6 106 #define IPREASS_HASH_SIZE (1 << IPREASS_HASH_SHIFT) 107 #define IPREASS_HASH_MASK (IPREASS_HASH_SIZE - 1) 108 #define IPREASS_HASH(x, y) \ 109 (((((x) & 0xf) | ((((x) >> 8) & 0xf) << 4)) ^ (y)) & IPREASS_HASH_MASK) 110 111 static LIST_HEAD(, ipfr_queue) ip_frags[IPREASS_HASH_SIZE]; 112 static pool_cache_t ipfren_cache; 113 static kmutex_t ipfr_lock; 114 115 /* Number of packets in reassembly queue and total number of fragments. */ 116 static int ip_nfragpackets; 117 static int ip_nfrags; 118 119 /* Limits on packet and fragments. */ 120 static int ip_maxfragpackets; 121 static int ip_maxfrags; 122 123 /* 124 * Cached copy of nmbclusters. If nbclusters is different, recalculate 125 * IP parameters derived from nmbclusters. 126 */ 127 static int ip_nmbclusters; 128 129 /* 130 * IP reassembly TTL machinery for multiplicative drop. 131 */ 132 static u_int fragttl_histo[IPFRAGTTL + 1]; 133 134 static struct sysctllog *ip_reass_sysctllog; 135 136 void sysctl_ip_reass_setup(void); 137 static void ip_nmbclusters_changed(void); 138 139 static struct mbuf * ip_reass(ipfr_qent_t *, ipfr_queue_t *, u_int); 140 static u_int ip_reass_ttl_decr(u_int ticks); 141 static void ip_reass_drophalf(void); 142 static void ip_freef(ipfr_queue_t *); 143 144 /* 145 * ip_reass_init: 146 * 147 * Initialization of IP reassembly mechanism. 148 */ 149 void 150 ip_reass_init(void) 151 { 152 int i; 153 154 ipfren_cache = pool_cache_init(sizeof(ipfr_qent_t), coherency_unit, 155 0, 0, "ipfrenpl", NULL, IPL_NET, NULL, NULL, NULL); 156 mutex_init(&ipfr_lock, MUTEX_DEFAULT, IPL_VM); 157 158 for (i = 0; i < IPREASS_HASH_SIZE; i++) { 159 LIST_INIT(&ip_frags[i]); 160 } 161 ip_maxfragpackets = 200; 162 ip_maxfrags = 0; 163 ip_nmbclusters_changed(); 164 165 sysctl_ip_reass_setup(); 166 } 167 168 void 169 sysctl_ip_reass_setup(void) 170 { 171 172 sysctl_createv(&ip_reass_sysctllog, 0, NULL, NULL, 173 CTLFLAG_PERMANENT, 174 CTLTYPE_NODE, "inet", 175 SYSCTL_DESCR("PF_INET related settings"), 176 NULL, 0, NULL, 0, 177 CTL_NET, PF_INET, CTL_EOL); 178 sysctl_createv(&ip_reass_sysctllog, 0, NULL, NULL, 179 CTLFLAG_PERMANENT, 180 CTLTYPE_NODE, "ip", 181 SYSCTL_DESCR("IPv4 related settings"), 182 NULL, 0, NULL, 0, 183 CTL_NET, PF_INET, IPPROTO_IP, CTL_EOL); 184 185 sysctl_createv(&ip_reass_sysctllog, 0, NULL, NULL, 186 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 187 CTLTYPE_INT, "maxfragpackets", 188 SYSCTL_DESCR("Maximum number of fragments to retain for " 189 "possible reassembly"), 190 NULL, 0, &ip_maxfragpackets, 0, 191 CTL_NET, PF_INET, IPPROTO_IP, IPCTL_MAXFRAGPACKETS, CTL_EOL); 192 } 193 194 #define CHECK_NMBCLUSTER_PARAMS() \ 195 do { \ 196 if (__predict_false(ip_nmbclusters != nmbclusters)) \ 197 ip_nmbclusters_changed(); \ 198 } while (/*CONSTCOND*/0) 199 200 /* 201 * Compute IP limits derived from the value of nmbclusters. 202 */ 203 static void 204 ip_nmbclusters_changed(void) 205 { 206 ip_maxfrags = nmbclusters / 4; 207 ip_nmbclusters = nmbclusters; 208 } 209 210 /* 211 * ip_reass: 212 * 213 * Take incoming datagram fragment and try to reassemble it into whole 214 * datagram. If a chain for reassembly of this datagram already exists, 215 * then it is given as 'fp'; otherwise have to make a chain. 216 */ 217 struct mbuf * 218 ip_reass(ipfr_qent_t *ipqe, ipfr_queue_t *fp, const u_int hash) 219 { 220 struct ip *ip = ipqe->ipqe_ip, *qip; 221 const int hlen = ip->ip_hl << 2; 222 struct mbuf *m = ipqe->ipqe_m, *t; 223 ipfr_qent_t *nq, *p, *q; 224 int i, next; 225 226 KASSERT(mutex_owned(&ipfr_lock)); 227 228 /* 229 * Presence of header sizes in mbufs would confuse code below. 230 */ 231 m->m_data += hlen; 232 m->m_len -= hlen; 233 234 #ifdef notyet 235 /* Make sure fragment limit is up-to-date. */ 236 CHECK_NMBCLUSTER_PARAMS(); 237 238 /* If we have too many fragments, drop the older half. */ 239 if (ip_nfrags >= ip_maxfrags) { 240 ip_reass_drophalf(void); 241 } 242 #endif 243 244 /* 245 * We are about to add a fragment; increment frag count. 246 */ 247 ip_nfrags++; 248 249 /* 250 * If first fragment to arrive, create a reassembly queue. 251 */ 252 if (fp == NULL) { 253 /* 254 * Enforce upper bound on number of fragmented packets 255 * for which we attempt reassembly: a) if maxfrag is 0, 256 * never accept fragments b) if maxfrag is -1, accept 257 * all fragments without limitation. 258 */ 259 if (ip_maxfragpackets < 0) 260 ; 261 else if (ip_nfragpackets >= ip_maxfragpackets) { 262 goto dropfrag; 263 } 264 fp = malloc(sizeof(ipfr_queue_t), M_FTABLE, M_NOWAIT); 265 if (fp == NULL) { 266 goto dropfrag; 267 } 268 ip_nfragpackets++; 269 TAILQ_INIT(&fp->ipq_fragq); 270 fp->ipq_nfrags = 1; 271 fp->ipq_ttl = IPFRAGTTL; 272 fp->ipq_p = ip->ip_p; 273 fp->ipq_id = ip->ip_id; 274 fp->ipq_tos = ip->ip_tos; 275 fp->ipq_src = ip->ip_src; 276 fp->ipq_dst = ip->ip_dst; 277 LIST_INSERT_HEAD(&ip_frags[hash], fp, ipq_q); 278 p = NULL; 279 goto insert; 280 } else { 281 fp->ipq_nfrags++; 282 } 283 284 /* 285 * Find a segment which begins after this one does. 286 */ 287 TAILQ_FOREACH(q, &fp->ipq_fragq, ipqe_q) { 288 if (ntohs(q->ipqe_ip->ip_off) > ntohs(ip->ip_off)) 289 break; 290 } 291 if (q != NULL) { 292 p = TAILQ_PREV(q, ipfr_qent_head, ipqe_q); 293 } else { 294 p = TAILQ_LAST(&fp->ipq_fragq, ipfr_qent_head); 295 } 296 297 /* 298 * If there is a preceding segment, it may provide some of our 299 * data already. If so, drop the data from the incoming segment. 300 * If it provides all of our data, drop us. 301 */ 302 if (p != NULL) { 303 i = ntohs(p->ipqe_ip->ip_off) + ntohs(p->ipqe_ip->ip_len) - 304 ntohs(ip->ip_off); 305 if (i > 0) { 306 if (i >= ntohs(ip->ip_len)) { 307 goto dropfrag; 308 } 309 m_adj(ipqe->ipqe_m, i); 310 ip->ip_off = htons(ntohs(ip->ip_off) + i); 311 ip->ip_len = htons(ntohs(ip->ip_len) - i); 312 } 313 } 314 315 /* 316 * While we overlap succeeding segments trim them or, if they are 317 * completely covered, dequeue them. 318 */ 319 while (q != NULL) { 320 size_t end; 321 322 qip = q->ipqe_ip; 323 end = ntohs(ip->ip_off) + ntohs(ip->ip_len); 324 if (end <= ntohs(qip->ip_off)) { 325 break; 326 } 327 i = end - ntohs(qip->ip_off); 328 if (i < ntohs(qip->ip_len)) { 329 qip->ip_len = htons(ntohs(qip->ip_len) - i); 330 qip->ip_off = htons(ntohs(qip->ip_off) + i); 331 m_adj(q->ipqe_m, i); 332 break; 333 } 334 nq = TAILQ_NEXT(q, ipqe_q); 335 m_freem(q->ipqe_m); 336 TAILQ_REMOVE(&fp->ipq_fragq, q, ipqe_q); 337 pool_cache_put(ipfren_cache, q); 338 fp->ipq_nfrags--; 339 ip_nfrags--; 340 q = nq; 341 } 342 343 insert: 344 /* 345 * Stick new segment in its place; check for complete reassembly. 346 */ 347 if (p == NULL) { 348 TAILQ_INSERT_HEAD(&fp->ipq_fragq, ipqe, ipqe_q); 349 } else { 350 TAILQ_INSERT_AFTER(&fp->ipq_fragq, p, ipqe, ipqe_q); 351 } 352 next = 0; 353 TAILQ_FOREACH(q, &fp->ipq_fragq, ipqe_q) { 354 qip = q->ipqe_ip; 355 if (ntohs(qip->ip_off) != next) { 356 mutex_exit(&ipfr_lock); 357 return NULL; 358 } 359 next += ntohs(qip->ip_len); 360 } 361 p = TAILQ_LAST(&fp->ipq_fragq, ipfr_qent_head); 362 if (p->ipqe_mff) { 363 mutex_exit(&ipfr_lock); 364 return NULL; 365 } 366 367 /* 368 * Reassembly is complete. Check for a bogus message size. 369 */ 370 q = TAILQ_FIRST(&fp->ipq_fragq); 371 ip = q->ipqe_ip; 372 if ((next + (ip->ip_hl << 2)) > IP_MAXPACKET) { 373 IP_STATINC(IP_STAT_TOOLONG); 374 ip_freef(fp); 375 mutex_exit(&ipfr_lock); 376 return NULL; 377 } 378 LIST_REMOVE(fp, ipq_q); 379 ip_nfrags -= fp->ipq_nfrags; 380 ip_nfragpackets--; 381 mutex_exit(&ipfr_lock); 382 383 /* Concatenate all fragments. */ 384 m = q->ipqe_m; 385 t = m->m_next; 386 m->m_next = NULL; 387 m_cat(m, t); 388 nq = TAILQ_NEXT(q, ipqe_q); 389 pool_cache_put(ipfren_cache, q); 390 391 for (q = nq; q != NULL; q = nq) { 392 t = q->ipqe_m; 393 nq = TAILQ_NEXT(q, ipqe_q); 394 pool_cache_put(ipfren_cache, q); 395 m_cat(m, t); 396 } 397 398 /* 399 * Create header for new packet by modifying header of first 400 * packet. Dequeue and discard fragment reassembly header. Make 401 * header visible. 402 */ 403 ip->ip_len = htons((ip->ip_hl << 2) + next); 404 ip->ip_src = fp->ipq_src; 405 ip->ip_dst = fp->ipq_dst; 406 free(fp, M_FTABLE); 407 408 m->m_len += (ip->ip_hl << 2); 409 m->m_data -= (ip->ip_hl << 2); 410 411 /* Fix up mbuf. XXX This should be done elsewhere. */ 412 if (m->m_flags & M_PKTHDR) { 413 int plen = 0; 414 for (t = m; t; t = t->m_next) { 415 plen += t->m_len; 416 } 417 m->m_pkthdr.len = plen; 418 m->m_pkthdr.csum_flags = 0; 419 } 420 return m; 421 422 dropfrag: 423 if (fp != NULL) { 424 fp->ipq_nfrags--; 425 } 426 ip_nfrags--; 427 IP_STATINC(IP_STAT_FRAGDROPPED); 428 mutex_exit(&ipfr_lock); 429 430 pool_cache_put(ipfren_cache, ipqe); 431 m_freem(m); 432 return NULL; 433 } 434 435 /* 436 * ip_freef: 437 * 438 * Free a fragment reassembly header and all associated datagrams. 439 */ 440 static void 441 ip_freef(ipfr_queue_t *fp) 442 { 443 ipfr_qent_t *q; 444 445 KASSERT(mutex_owned(&ipfr_lock)); 446 447 LIST_REMOVE(fp, ipq_q); 448 ip_nfrags -= fp->ipq_nfrags; 449 ip_nfragpackets--; 450 451 while ((q = TAILQ_FIRST(&fp->ipq_fragq)) != NULL) { 452 TAILQ_REMOVE(&fp->ipq_fragq, q, ipqe_q); 453 m_freem(q->ipqe_m); 454 pool_cache_put(ipfren_cache, q); 455 } 456 free(fp, M_FTABLE); 457 } 458 459 /* 460 * ip_reass_ttl_decr: 461 * 462 * Decrement TTL of all reasembly queue entries by `ticks'. Count 463 * number of distinct fragments (as opposed to partial, fragmented 464 * datagrams) inthe reassembly queue. While we traverse the entire 465 * reassembly queue, compute and return the median TTL over all 466 * fragments. 467 */ 468 static u_int 469 ip_reass_ttl_decr(u_int ticks) 470 { 471 u_int nfrags, median, dropfraction, keepfraction; 472 ipfr_queue_t *fp, *nfp; 473 int i; 474 475 nfrags = 0; 476 memset(fragttl_histo, 0, sizeof(fragttl_histo)); 477 478 for (i = 0; i < IPREASS_HASH_SIZE; i++) { 479 for (fp = LIST_FIRST(&ip_frags[i]); fp != NULL; fp = nfp) { 480 fp->ipq_ttl = ((fp->ipq_ttl <= ticks) ? 481 0 : fp->ipq_ttl - ticks); 482 nfp = LIST_NEXT(fp, ipq_q); 483 if (fp->ipq_ttl == 0) { 484 IP_STATINC(IP_STAT_FRAGTIMEOUT); 485 ip_freef(fp); 486 } else { 487 nfrags += fp->ipq_nfrags; 488 fragttl_histo[fp->ipq_ttl] += fp->ipq_nfrags; 489 } 490 } 491 } 492 493 KASSERT(ip_nfrags == nfrags); 494 495 /* Find median (or other drop fraction) in histogram. */ 496 dropfraction = (ip_nfrags / 2); 497 keepfraction = ip_nfrags - dropfraction; 498 for (i = IPFRAGTTL, median = 0; i >= 0; i--) { 499 median += fragttl_histo[i]; 500 if (median >= keepfraction) 501 break; 502 } 503 504 /* Return TTL of median (or other fraction). */ 505 return (u_int)i; 506 } 507 508 static void 509 ip_reass_drophalf(void) 510 { 511 u_int median_ticks; 512 513 KASSERT(mutex_owned(&ipfr_lock)); 514 515 /* 516 * Compute median TTL of all fragments, and count frags 517 * with that TTL or lower (roughly half of all fragments). 518 */ 519 median_ticks = ip_reass_ttl_decr(0); 520 521 /* Drop half. */ 522 median_ticks = ip_reass_ttl_decr(median_ticks); 523 } 524 525 /* 526 * ip_reass_drain: drain off all datagram fragments. Do not acquire 527 * softnet_lock as can be called from hardware interrupt context. 528 */ 529 void 530 ip_reass_drain(void) 531 { 532 533 /* 534 * We may be called from a device's interrupt context. If 535 * the ipq is already busy, just bail out now. 536 */ 537 if (mutex_tryenter(&ipfr_lock)) { 538 /* 539 * Drop half the total fragments now. If more mbufs are 540 * needed, we will be called again soon. 541 */ 542 ip_reass_drophalf(); 543 mutex_exit(&ipfr_lock); 544 } 545 } 546 547 /* 548 * ip_reass_slowtimo: 549 * 550 * If a timer expires on a reassembly queue, discard it. 551 */ 552 void 553 ip_reass_slowtimo(void) 554 { 555 static u_int dropscanidx = 0; 556 u_int i, median_ttl; 557 558 mutex_enter(&ipfr_lock); 559 560 /* Age TTL of all fragments by 1 tick .*/ 561 median_ttl = ip_reass_ttl_decr(1); 562 563 /* Make sure fragment limit is up-to-date. */ 564 CHECK_NMBCLUSTER_PARAMS(); 565 566 /* If we have too many fragments, drop the older half. */ 567 if (ip_nfrags > ip_maxfrags) { 568 ip_reass_ttl_decr(median_ttl); 569 } 570 571 /* 572 * If we are over the maximum number of fragmented packets (due to 573 * the limit being lowered), drain off enough to get down to the 574 * new limit. Start draining from the reassembly hashqueue most 575 * recently drained. 576 */ 577 if (ip_maxfragpackets < 0) 578 ; 579 else { 580 int wrapped = 0; 581 582 i = dropscanidx; 583 while (ip_nfragpackets > ip_maxfragpackets && wrapped == 0) { 584 while (LIST_FIRST(&ip_frags[i]) != NULL) { 585 ip_freef(LIST_FIRST(&ip_frags[i])); 586 } 587 if (++i >= IPREASS_HASH_SIZE) { 588 i = 0; 589 } 590 /* 591 * Do not scan forever even if fragment counters are 592 * wrong: stop after scanning entire reassembly queue. 593 */ 594 if (i == dropscanidx) { 595 wrapped = 1; 596 } 597 } 598 dropscanidx = i; 599 } 600 mutex_exit(&ipfr_lock); 601 } 602 603 /* 604 * ip_reass_packet: generic routine to perform IP reassembly. 605 * 606 * => Passed fragment should have IP_MF flag and/or offset set. 607 * => Fragment should not have other than IP_MF flags set. 608 * 609 * => Returns 0 on success or error otherwise. 610 * => On complete, m0 represents a constructed final packet. 611 */ 612 int 613 ip_reass_packet(struct mbuf **m0, struct ip *ip) 614 { 615 const int hlen = ip->ip_hl << 2; 616 const int len = ntohs(ip->ip_len); 617 struct mbuf *m = *m0; 618 ipfr_queue_t *fp; 619 ipfr_qent_t *ipqe; 620 u_int hash, off, flen; 621 bool mff; 622 623 /* 624 * Prevent TCP blind data attacks by not allowing non-initial 625 * fragments to start at less than 68 bytes (minimal fragment 626 * size) and making sure the first fragment is at least 68 627 * bytes. 628 */ 629 off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3; 630 if ((off > 0 ? off + hlen : len) < IP_MINFRAGSIZE - 1) { 631 IP_STATINC(IP_STAT_BADFRAGS); 632 return EINVAL; 633 } 634 635 /* 636 * Fragment length and MF flag. Make sure that fragments have 637 * a data length which is non-zero and multiple of 8 bytes. 638 */ 639 flen = ntohs(ip->ip_len) - hlen; 640 mff = (ip->ip_off & htons(IP_MF)) != 0; 641 if (mff && (flen == 0 || (flen & 0x7) != 0)) { 642 IP_STATINC(IP_STAT_BADFRAGS); 643 return EINVAL; 644 } 645 646 /* 647 * Adjust total IP length to not reflect header and convert 648 * offset of this to bytes. XXX: clobbers struct ip. 649 */ 650 ip->ip_len = htons(flen); 651 ip->ip_off = htons(off); 652 653 /* Look for queue of fragments of this datagram. */ 654 mutex_enter(&ipfr_lock); 655 hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id); 656 LIST_FOREACH(fp, &ip_frags[hash], ipq_q) { 657 if (ip->ip_id != fp->ipq_id) 658 continue; 659 if (!in_hosteq(ip->ip_src, fp->ipq_src)) 660 continue; 661 if (!in_hosteq(ip->ip_dst, fp->ipq_dst)) 662 continue; 663 if (ip->ip_p != fp->ipq_p) 664 continue; 665 break; 666 } 667 668 /* Make sure that TOS matches previous fragments. */ 669 if (fp && fp->ipq_tos != ip->ip_tos) { 670 IP_STATINC(IP_STAT_BADFRAGS); 671 mutex_exit(&ipfr_lock); 672 return EINVAL; 673 } 674 675 /* 676 * Create new entry and attempt to reassembly. 677 */ 678 IP_STATINC(IP_STAT_FRAGMENTS); 679 ipqe = pool_cache_get(ipfren_cache, PR_NOWAIT); 680 if (ipqe == NULL) { 681 IP_STATINC(IP_STAT_RCVMEMDROP); 682 mutex_exit(&ipfr_lock); 683 return ENOMEM; 684 } 685 ipqe->ipqe_mff = mff; 686 ipqe->ipqe_m = m; 687 ipqe->ipqe_ip = ip; 688 689 *m0 = ip_reass(ipqe, fp, hash); 690 if (*m0) { 691 /* Note that finally reassembled. */ 692 IP_STATINC(IP_STAT_REASSEMBLED); 693 } 694 return 0; 695 } 696