1 /** 2 * @file 3 * This is the IPv4 packet segmentation and reassembly implementation. 4 * 5 */ 6 7 /* 8 * Copyright (c) 2001-2004 Swedish Institute of Computer Science. 9 * All rights reserved. 10 * 11 * Redistribution and use in source and binary forms, with or without modification, 12 * are permitted provided that the following conditions are met: 13 * 14 * 1. Redistributions of source code must retain the above copyright notice, 15 * this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright notice, 17 * this list of conditions and the following disclaimer in the documentation 18 * and/or other materials provided with the distribution. 19 * 3. The name of the author may not be used to endorse or promote products 20 * derived from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT 25 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 26 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT 27 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 30 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY 31 * OF SUCH DAMAGE. 32 * 33 * This file is part of the lwIP TCP/IP stack. 34 * 35 * Author: Jani Monoses <jani@iv.ro> 36 * Simon Goldschmidt 37 * original reassembly code by Adam Dunkels <adam@sics.se> 38 * 39 */ 40 41 #include "lwip/opt.h" 42 43 #if LWIP_IPV4 44 45 #include "lwip/ip4_frag.h" 46 #include "lwip/def.h" 47 #include "lwip/inet_chksum.h" 48 #include "lwip/netif.h" 49 #include "lwip/stats.h" 50 #include "lwip/icmp.h" 51 52 #include <string.h> 53 54 #if IP_REASSEMBLY 55 /** 56 * The IP reassembly code currently has the following limitations: 57 * - IP header options are not supported 58 * - fragments must not overlap (e.g. due to different routes), 59 * currently, overlapping or duplicate fragments are thrown away 60 * if IP_REASS_CHECK_OVERLAP=1 (the default)! 61 * 62 * @todo: work with IP header options 63 */ 64 65 /** Setting this to 0, you can turn off checking the fragments for overlapping 66 * regions. The code gets a little smaller. Only use this if you know that 67 * overlapping won't occur on your network! */ 68 #ifndef IP_REASS_CHECK_OVERLAP 69 #define IP_REASS_CHECK_OVERLAP 1 70 #endif /* IP_REASS_CHECK_OVERLAP */ 71 72 /** Set to 0 to prevent freeing the oldest datagram when the reassembly buffer is 73 * full (IP_REASS_MAX_PBUFS pbufs are enqueued). The code gets a little smaller. 74 * Datagrams will be freed by timeout only. Especially useful when MEMP_NUM_REASSDATA 75 * is set to 1, so one datagram can be reassembled at a time, only. */ 76 #ifndef IP_REASS_FREE_OLDEST 77 #define IP_REASS_FREE_OLDEST 1 78 #endif /* IP_REASS_FREE_OLDEST */ 79 80 #define IP_REASS_FLAG_LASTFRAG 0x01 81 82 /** This is a helper struct which holds the starting 83 * offset and the ending offset of this fragment to 84 * easily chain the fragments. 85 * It has the same packing requirements as the IP header, since it replaces 86 * the IP header in memory in incoming fragments (after copying it) to keep 87 * track of the various fragments. (-> If the IP header doesn't need packing, 88 * this struct doesn't need packing, too.) 89 */ 90 #ifdef PACK_STRUCT_USE_INCLUDES 91 # include "arch/bpstruct.h" 92 #endif 93 PACK_STRUCT_BEGIN 94 struct ip_reass_helper { 95 PACK_STRUCT_FIELD(struct pbuf *next_pbuf); 96 PACK_STRUCT_FIELD(u16_t start); 97 PACK_STRUCT_FIELD(u16_t end); 98 } PACK_STRUCT_STRUCT; 99 PACK_STRUCT_END 100 #ifdef PACK_STRUCT_USE_INCLUDES 101 # include "arch/epstruct.h" 102 #endif 103 104 #define IP_ADDRESSES_AND_ID_MATCH(iphdrA, iphdrB) \ 105 (ip4_addr_cmp(&(iphdrA)->src, &(iphdrB)->src) && \ 106 ip4_addr_cmp(&(iphdrA)->dest, &(iphdrB)->dest) && \ 107 IPH_ID(iphdrA) == IPH_ID(iphdrB)) ? 1 : 0 108 109 /* global variables */ 110 static struct ip_reassdata *reassdatagrams; 111 static u16_t ip_reass_pbufcount; 112 113 /* function prototypes */ 114 static void ip_reass_dequeue_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev); 115 static int ip_reass_free_complete_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev); 116 117 /** 118 * Reassembly timer base function 119 * for both NO_SYS == 0 and 1 (!). 120 * 121 * Should be called every 1000 msec (defined by IP_TMR_INTERVAL). 122 */ 123 void 124 ip_reass_tmr(void) 125 { 126 struct ip_reassdata *r, *prev = NULL; 127 128 r = reassdatagrams; 129 while (r != NULL) { 130 /* Decrement the timer. Once it reaches 0, 131 * clean up the incomplete fragment assembly */ 132 if (r->timer > 0) { 133 r->timer--; 134 LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_reass_tmr: timer dec %"U16_F"\n",(u16_t)r->timer)); 135 prev = r; 136 r = r->next; 137 } else { 138 /* reassembly timed out */ 139 struct ip_reassdata *tmp; 140 LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_reass_tmr: timer timed out\n")); 141 tmp = r; 142 /* get the next pointer before freeing */ 143 r = r->next; 144 /* free the helper struct and all enqueued pbufs */ 145 ip_reass_free_complete_datagram(tmp, prev); 146 } 147 } 148 } 149 150 /** 151 * Free a datagram (struct ip_reassdata) and all its pbufs. 152 * Updates the total count of enqueued pbufs (ip_reass_pbufcount), 153 * SNMP counters and sends an ICMP time exceeded packet. 154 * 155 * @param ipr datagram to free 156 * @param prev the previous datagram in the linked list 157 * @return the number of pbufs freed 158 */ 159 static int 160 ip_reass_free_complete_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev) 161 { 162 u16_t pbufs_freed = 0; 163 u16_t clen; 164 struct pbuf *p; 165 struct ip_reass_helper *iprh; 166 167 LWIP_ASSERT("prev != ipr", prev != ipr); 168 if (prev != NULL) { 169 LWIP_ASSERT("prev->next == ipr", prev->next == ipr); 170 } 171 172 MIB2_STATS_INC(mib2.ipreasmfails); 173 #if LWIP_ICMP 174 iprh = (struct ip_reass_helper *)ipr->p->payload; 175 if (iprh->start == 0) { 176 /* The first fragment was received, send ICMP time exceeded. */ 177 /* First, de-queue the first pbuf from r->p. */ 178 p = ipr->p; 179 ipr->p = iprh->next_pbuf; 180 /* Then, copy the original header into it. */ 181 SMEMCPY(p->payload, &ipr->iphdr, IP_HLEN); 182 icmp_time_exceeded(p, ICMP_TE_FRAG); 183 clen = pbuf_clen(p); 184 LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff); 185 pbufs_freed += clen; 186 pbuf_free(p); 187 } 188 #endif /* LWIP_ICMP */ 189 190 /* First, free all received pbufs. The individual pbufs need to be released 191 separately as they have not yet been chained */ 192 p = ipr->p; 193 while (p != NULL) { 194 struct pbuf *pcur; 195 iprh = (struct ip_reass_helper *)p->payload; 196 pcur = p; 197 /* get the next pointer before freeing */ 198 p = iprh->next_pbuf; 199 clen = pbuf_clen(pcur); 200 LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff); 201 pbufs_freed += clen; 202 pbuf_free(pcur); 203 } 204 /* Then, unchain the struct ip_reassdata from the list and free it. */ 205 ip_reass_dequeue_datagram(ipr, prev); 206 LWIP_ASSERT("ip_reass_pbufcount >= clen", ip_reass_pbufcount >= pbufs_freed); 207 ip_reass_pbufcount -= pbufs_freed; 208 209 return pbufs_freed; 210 } 211 212 #if IP_REASS_FREE_OLDEST 213 /** 214 * Free the oldest datagram to make room for enqueueing new fragments. 215 * The datagram 'fraghdr' belongs to is not freed! 216 * 217 * @param fraghdr IP header of the current fragment 218 * @param pbufs_needed number of pbufs needed to enqueue 219 * (used for freeing other datagrams if not enough space) 220 * @return the number of pbufs freed 221 */ 222 static int 223 ip_reass_remove_oldest_datagram(struct ip_hdr *fraghdr, int pbufs_needed) 224 { 225 /* @todo Can't we simply remove the last datagram in the 226 * linked list behind reassdatagrams? 227 */ 228 struct ip_reassdata *r, *oldest, *prev, *oldest_prev; 229 int pbufs_freed = 0, pbufs_freed_current; 230 int other_datagrams; 231 232 /* Free datagrams until being allowed to enqueue 'pbufs_needed' pbufs, 233 * but don't free the datagram that 'fraghdr' belongs to! */ 234 do { 235 oldest = NULL; 236 prev = NULL; 237 oldest_prev = NULL; 238 other_datagrams = 0; 239 r = reassdatagrams; 240 while (r != NULL) { 241 if (!IP_ADDRESSES_AND_ID_MATCH(&r->iphdr, fraghdr)) { 242 /* Not the same datagram as fraghdr */ 243 other_datagrams++; 244 if (oldest == NULL) { 245 oldest = r; 246 oldest_prev = prev; 247 } else if (r->timer <= oldest->timer) { 248 /* older than the previous oldest */ 249 oldest = r; 250 oldest_prev = prev; 251 } 252 } 253 if (r->next != NULL) { 254 prev = r; 255 } 256 r = r->next; 257 } 258 if (oldest != NULL) { 259 pbufs_freed_current = ip_reass_free_complete_datagram(oldest, oldest_prev); 260 pbufs_freed += pbufs_freed_current; 261 } 262 } while ((pbufs_freed < pbufs_needed) && (other_datagrams > 1)); 263 return pbufs_freed; 264 } 265 #endif /* IP_REASS_FREE_OLDEST */ 266 267 /** 268 * Enqueues a new fragment into the fragment queue 269 * @param fraghdr points to the new fragments IP hdr 270 * @param clen number of pbufs needed to enqueue (used for freeing other datagrams if not enough space) 271 * @return A pointer to the queue location into which the fragment was enqueued 272 */ 273 static struct ip_reassdata* 274 ip_reass_enqueue_new_datagram(struct ip_hdr *fraghdr, int clen) 275 { 276 struct ip_reassdata* ipr; 277 #if ! IP_REASS_FREE_OLDEST 278 LWIP_UNUSED_ARG(clen); 279 #endif 280 281 /* No matching previous fragment found, allocate a new reassdata struct */ 282 ipr = (struct ip_reassdata *)memp_malloc(MEMP_REASSDATA); 283 if (ipr == NULL) { 284 #if IP_REASS_FREE_OLDEST 285 if (ip_reass_remove_oldest_datagram(fraghdr, clen) >= clen) { 286 ipr = (struct ip_reassdata *)memp_malloc(MEMP_REASSDATA); 287 } 288 if (ipr == NULL) 289 #endif /* IP_REASS_FREE_OLDEST */ 290 { 291 IPFRAG_STATS_INC(ip_frag.memerr); 292 LWIP_DEBUGF(IP_REASS_DEBUG,("Failed to alloc reassdata struct\n")); 293 return NULL; 294 } 295 } 296 memset(ipr, 0, sizeof(struct ip_reassdata)); 297 ipr->timer = IP_REASS_MAXAGE; 298 299 /* enqueue the new structure to the front of the list */ 300 ipr->next = reassdatagrams; 301 reassdatagrams = ipr; 302 /* copy the ip header for later tests and input */ 303 /* @todo: no ip options supported? */ 304 SMEMCPY(&(ipr->iphdr), fraghdr, IP_HLEN); 305 return ipr; 306 } 307 308 /** 309 * Dequeues a datagram from the datagram queue. Doesn't deallocate the pbufs. 310 * @param ipr points to the queue entry to dequeue 311 */ 312 static void 313 ip_reass_dequeue_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev) 314 { 315 /* dequeue the reass struct */ 316 if (reassdatagrams == ipr) { 317 /* it was the first in the list */ 318 reassdatagrams = ipr->next; 319 } else { 320 /* it wasn't the first, so it must have a valid 'prev' */ 321 LWIP_ASSERT("sanity check linked list", prev != NULL); 322 prev->next = ipr->next; 323 } 324 325 /* now we can free the ip_reassdata struct */ 326 memp_free(MEMP_REASSDATA, ipr); 327 } 328 329 /** 330 * Chain a new pbuf into the pbuf list that composes the datagram. The pbuf list 331 * will grow over time as new pbufs are rx. 332 * Also checks that the datagram passes basic continuity checks (if the last 333 * fragment was received at least once). 334 * @param ipr points to the reassembly state 335 * @param new_p points to the pbuf for the current fragment 336 * @return 0 if invalid, >0 otherwise 337 */ 338 static int 339 ip_reass_chain_frag_into_datagram_and_validate(struct ip_reassdata *ipr, struct pbuf *new_p) 340 { 341 struct ip_reass_helper *iprh, *iprh_tmp, *iprh_prev=NULL; 342 struct pbuf *q; 343 u16_t offset, len; 344 struct ip_hdr *fraghdr; 345 int valid = 1; 346 347 /* Extract length and fragment offset from current fragment */ 348 fraghdr = (struct ip_hdr*)new_p->payload; 349 len = lwip_ntohs(IPH_LEN(fraghdr)) - IPH_HL(fraghdr) * 4; 350 offset = (lwip_ntohs(IPH_OFFSET(fraghdr)) & IP_OFFMASK) * 8; 351 352 /* overwrite the fragment's ip header from the pbuf with our helper struct, 353 * and setup the embedded helper structure. */ 354 /* make sure the struct ip_reass_helper fits into the IP header */ 355 LWIP_ASSERT("sizeof(struct ip_reass_helper) <= IP_HLEN", 356 sizeof(struct ip_reass_helper) <= IP_HLEN); 357 iprh = (struct ip_reass_helper*)new_p->payload; 358 iprh->next_pbuf = NULL; 359 iprh->start = offset; 360 iprh->end = offset + len; 361 362 /* Iterate through until we either get to the end of the list (append), 363 * or we find one with a larger offset (insert). */ 364 for (q = ipr->p; q != NULL;) { 365 iprh_tmp = (struct ip_reass_helper*)q->payload; 366 if (iprh->start < iprh_tmp->start) { 367 /* the new pbuf should be inserted before this */ 368 iprh->next_pbuf = q; 369 if (iprh_prev != NULL) { 370 /* not the fragment with the lowest offset */ 371 #if IP_REASS_CHECK_OVERLAP 372 if ((iprh->start < iprh_prev->end) || (iprh->end > iprh_tmp->start)) { 373 /* fragment overlaps with previous or following, throw away */ 374 goto freepbuf; 375 } 376 #endif /* IP_REASS_CHECK_OVERLAP */ 377 iprh_prev->next_pbuf = new_p; 378 } else { 379 /* fragment with the lowest offset */ 380 ipr->p = new_p; 381 } 382 break; 383 } else if (iprh->start == iprh_tmp->start) { 384 /* received the same datagram twice: no need to keep the datagram */ 385 goto freepbuf; 386 #if IP_REASS_CHECK_OVERLAP 387 } else if (iprh->start < iprh_tmp->end) { 388 /* overlap: no need to keep the new datagram */ 389 goto freepbuf; 390 #endif /* IP_REASS_CHECK_OVERLAP */ 391 } else { 392 /* Check if the fragments received so far have no holes. */ 393 if (iprh_prev != NULL) { 394 if (iprh_prev->end != iprh_tmp->start) { 395 /* There is a fragment missing between the current 396 * and the previous fragment */ 397 valid = 0; 398 } 399 } 400 } 401 q = iprh_tmp->next_pbuf; 402 iprh_prev = iprh_tmp; 403 } 404 405 /* If q is NULL, then we made it to the end of the list. Determine what to do now */ 406 if (q == NULL) { 407 if (iprh_prev != NULL) { 408 /* this is (for now), the fragment with the highest offset: 409 * chain it to the last fragment */ 410 #if IP_REASS_CHECK_OVERLAP 411 LWIP_ASSERT("check fragments don't overlap", iprh_prev->end <= iprh->start); 412 #endif /* IP_REASS_CHECK_OVERLAP */ 413 iprh_prev->next_pbuf = new_p; 414 if (iprh_prev->end != iprh->start) { 415 valid = 0; 416 } 417 } else { 418 #if IP_REASS_CHECK_OVERLAP 419 LWIP_ASSERT("no previous fragment, this must be the first fragment!", 420 ipr->p == NULL); 421 #endif /* IP_REASS_CHECK_OVERLAP */ 422 /* this is the first fragment we ever received for this ip datagram */ 423 ipr->p = new_p; 424 } 425 } 426 427 /* At this point, the validation part begins: */ 428 /* If we already received the last fragment */ 429 if ((ipr->flags & IP_REASS_FLAG_LASTFRAG) != 0) { 430 /* and had no holes so far */ 431 if (valid) { 432 /* then check if the rest of the fragments is here */ 433 /* Check if the queue starts with the first datagram */ 434 if ((ipr->p == NULL) || (((struct ip_reass_helper*)ipr->p->payload)->start != 0)) { 435 valid = 0; 436 } else { 437 /* and check that there are no holes after this datagram */ 438 iprh_prev = iprh; 439 q = iprh->next_pbuf; 440 while (q != NULL) { 441 iprh = (struct ip_reass_helper*)q->payload; 442 if (iprh_prev->end != iprh->start) { 443 valid = 0; 444 break; 445 } 446 iprh_prev = iprh; 447 q = iprh->next_pbuf; 448 } 449 /* if still valid, all fragments are received 450 * (because to the MF==0 already arrived */ 451 if (valid) { 452 LWIP_ASSERT("sanity check", ipr->p != NULL); 453 LWIP_ASSERT("sanity check", 454 ((struct ip_reass_helper*)ipr->p->payload) != iprh); 455 LWIP_ASSERT("validate_datagram:next_pbuf!=NULL", 456 iprh->next_pbuf == NULL); 457 LWIP_ASSERT("validate_datagram:datagram end!=datagram len", 458 iprh->end == ipr->datagram_len); 459 } 460 } 461 } 462 /* If valid is 0 here, there are some fragments missing in the middle 463 * (since MF == 0 has already arrived). Such datagrams simply time out if 464 * no more fragments are received... */ 465 return valid; 466 } 467 /* If we come here, not all fragments were received, yet! */ 468 return 0; /* not yet valid! */ 469 #if IP_REASS_CHECK_OVERLAP 470 freepbuf: 471 ip_reass_pbufcount -= pbuf_clen(new_p); 472 pbuf_free(new_p); 473 return 0; 474 #endif /* IP_REASS_CHECK_OVERLAP */ 475 } 476 477 /** 478 * Reassembles incoming IP fragments into an IP datagram. 479 * 480 * @param p points to a pbuf chain of the fragment 481 * @return NULL if reassembly is incomplete, ? otherwise 482 */ 483 struct pbuf * 484 ip4_reass(struct pbuf *p) 485 { 486 struct pbuf *r; 487 struct ip_hdr *fraghdr; 488 struct ip_reassdata *ipr; 489 struct ip_reass_helper *iprh; 490 u16_t offset, len, clen; 491 492 IPFRAG_STATS_INC(ip_frag.recv); 493 MIB2_STATS_INC(mib2.ipreasmreqds); 494 495 fraghdr = (struct ip_hdr*)p->payload; 496 497 if ((IPH_HL(fraghdr) * 4) != IP_HLEN) { 498 LWIP_DEBUGF(IP_REASS_DEBUG,("ip4_reass: IP options currently not supported!\n")); 499 IPFRAG_STATS_INC(ip_frag.err); 500 goto nullreturn; 501 } 502 503 offset = (lwip_ntohs(IPH_OFFSET(fraghdr)) & IP_OFFMASK) * 8; 504 len = lwip_ntohs(IPH_LEN(fraghdr)) - IPH_HL(fraghdr) * 4; 505 506 /* Check if we are allowed to enqueue more datagrams. */ 507 clen = pbuf_clen(p); 508 if ((ip_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS) { 509 #if IP_REASS_FREE_OLDEST 510 if (!ip_reass_remove_oldest_datagram(fraghdr, clen) || 511 ((ip_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS)) 512 #endif /* IP_REASS_FREE_OLDEST */ 513 { 514 /* No datagram could be freed and still too many pbufs enqueued */ 515 LWIP_DEBUGF(IP_REASS_DEBUG,("ip4_reass: Overflow condition: pbufct=%d, clen=%d, MAX=%d\n", 516 ip_reass_pbufcount, clen, IP_REASS_MAX_PBUFS)); 517 IPFRAG_STATS_INC(ip_frag.memerr); 518 /* @todo: send ICMP time exceeded here? */ 519 /* drop this pbuf */ 520 goto nullreturn; 521 } 522 } 523 524 /* Look for the datagram the fragment belongs to in the current datagram queue, 525 * remembering the previous in the queue for later dequeueing. */ 526 for (ipr = reassdatagrams; ipr != NULL; ipr = ipr->next) { 527 /* Check if the incoming fragment matches the one currently present 528 in the reassembly buffer. If so, we proceed with copying the 529 fragment into the buffer. */ 530 if (IP_ADDRESSES_AND_ID_MATCH(&ipr->iphdr, fraghdr)) { 531 LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: matching previous fragment ID=%"X16_F"\n", 532 lwip_ntohs(IPH_ID(fraghdr)))); 533 IPFRAG_STATS_INC(ip_frag.cachehit); 534 break; 535 } 536 } 537 538 if (ipr == NULL) { 539 /* Enqueue a new datagram into the datagram queue */ 540 ipr = ip_reass_enqueue_new_datagram(fraghdr, clen); 541 /* Bail if unable to enqueue */ 542 if (ipr == NULL) { 543 goto nullreturn; 544 } 545 } else { 546 if (((lwip_ntohs(IPH_OFFSET(fraghdr)) & IP_OFFMASK) == 0) && 547 ((lwip_ntohs(IPH_OFFSET(&ipr->iphdr)) & IP_OFFMASK) != 0)) { 548 /* ipr->iphdr is not the header from the first fragment, but fraghdr is 549 * -> copy fraghdr into ipr->iphdr since we want to have the header 550 * of the first fragment (for ICMP time exceeded and later, for copying 551 * all options, if supported)*/ 552 SMEMCPY(&ipr->iphdr, fraghdr, IP_HLEN); 553 } 554 } 555 /* Track the current number of pbufs current 'in-flight', in order to limit 556 the number of fragments that may be enqueued at any one time */ 557 ip_reass_pbufcount += clen; 558 559 /* At this point, we have either created a new entry or pointing 560 * to an existing one */ 561 562 /* check for 'no more fragments', and update queue entry*/ 563 if ((IPH_OFFSET(fraghdr) & PP_NTOHS(IP_MF)) == 0) { 564 ipr->flags |= IP_REASS_FLAG_LASTFRAG; 565 ipr->datagram_len = offset + len; 566 LWIP_DEBUGF(IP_REASS_DEBUG, 567 ("ip4_reass: last fragment seen, total len %"S16_F"\n", 568 ipr->datagram_len)); 569 } 570 /* find the right place to insert this pbuf */ 571 /* @todo: trim pbufs if fragments are overlapping */ 572 if (ip_reass_chain_frag_into_datagram_and_validate(ipr, p)) { 573 struct ip_reassdata *ipr_prev; 574 /* the totally last fragment (flag more fragments = 0) was received at least 575 * once AND all fragments are received */ 576 ipr->datagram_len += IP_HLEN; 577 578 /* save the second pbuf before copying the header over the pointer */ 579 r = ((struct ip_reass_helper*)ipr->p->payload)->next_pbuf; 580 581 /* copy the original ip header back to the first pbuf */ 582 fraghdr = (struct ip_hdr*)(ipr->p->payload); 583 SMEMCPY(fraghdr, &ipr->iphdr, IP_HLEN); 584 IPH_LEN_SET(fraghdr, lwip_htons(ipr->datagram_len)); 585 IPH_OFFSET_SET(fraghdr, 0); 586 IPH_CHKSUM_SET(fraghdr, 0); 587 /* @todo: do we need to set/calculate the correct checksum? */ 588 #if CHECKSUM_GEN_IP 589 IF__NETIF_CHECKSUM_ENABLED(ip_current_input_netif(), NETIF_CHECKSUM_GEN_IP) { 590 IPH_CHKSUM_SET(fraghdr, inet_chksum(fraghdr, IP_HLEN)); 591 } 592 #endif /* CHECKSUM_GEN_IP */ 593 594 p = ipr->p; 595 596 /* chain together the pbufs contained within the reass_data list. */ 597 while (r != NULL) { 598 iprh = (struct ip_reass_helper*)r->payload; 599 600 /* hide the ip header for every succeeding fragment */ 601 pbuf_header(r, -IP_HLEN); 602 pbuf_cat(p, r); 603 r = iprh->next_pbuf; 604 } 605 606 /* find the previous entry in the linked list */ 607 if (ipr == reassdatagrams) { 608 ipr_prev = NULL; 609 } else { 610 for (ipr_prev = reassdatagrams; ipr_prev != NULL; ipr_prev = ipr_prev->next) { 611 if (ipr_prev->next == ipr) { 612 break; 613 } 614 } 615 } 616 617 /* release the sources allocate for the fragment queue entry */ 618 ip_reass_dequeue_datagram(ipr, ipr_prev); 619 620 /* and adjust the number of pbufs currently queued for reassembly. */ 621 ip_reass_pbufcount -= pbuf_clen(p); 622 623 MIB2_STATS_INC(mib2.ipreasmoks); 624 625 /* Return the pbuf chain */ 626 return p; 627 } 628 /* the datagram is not (yet?) reassembled completely */ 629 LWIP_DEBUGF(IP_REASS_DEBUG,("ip_reass_pbufcount: %d out\n", ip_reass_pbufcount)); 630 return NULL; 631 632 nullreturn: 633 LWIP_DEBUGF(IP_REASS_DEBUG,("ip4_reass: nullreturn\n")); 634 IPFRAG_STATS_INC(ip_frag.drop); 635 pbuf_free(p); 636 return NULL; 637 } 638 #endif /* IP_REASSEMBLY */ 639 640 #if IP_FRAG 641 #if !LWIP_NETIF_TX_SINGLE_PBUF 642 /** Allocate a new struct pbuf_custom_ref */ 643 static struct pbuf_custom_ref* 644 ip_frag_alloc_pbuf_custom_ref(void) 645 { 646 return (struct pbuf_custom_ref*)memp_malloc(MEMP_FRAG_PBUF); 647 } 648 649 /** Free a struct pbuf_custom_ref */ 650 static void 651 ip_frag_free_pbuf_custom_ref(struct pbuf_custom_ref* p) 652 { 653 LWIP_ASSERT("p != NULL", p != NULL); 654 memp_free(MEMP_FRAG_PBUF, p); 655 } 656 657 /** Free-callback function to free a 'struct pbuf_custom_ref', called by 658 * pbuf_free. */ 659 static void 660 ipfrag_free_pbuf_custom(struct pbuf *p) 661 { 662 struct pbuf_custom_ref *pcr = (struct pbuf_custom_ref*)p; 663 LWIP_ASSERT("pcr != NULL", pcr != NULL); 664 LWIP_ASSERT("pcr == p", (void*)pcr == (void*)p); 665 if (pcr->original != NULL) { 666 pbuf_free(pcr->original); 667 } 668 ip_frag_free_pbuf_custom_ref(pcr); 669 } 670 #endif /* !LWIP_NETIF_TX_SINGLE_PBUF */ 671 672 /** 673 * Fragment an IP datagram if too large for the netif. 674 * 675 * Chop the datagram in MTU sized chunks and send them in order 676 * by pointing PBUF_REFs into p. 677 * 678 * @param p ip packet to send 679 * @param netif the netif on which to send 680 * @param dest destination ip address to which to send 681 * 682 * @return ERR_OK if sent successfully, err_t otherwise 683 */ 684 err_t 685 ip4_frag(struct pbuf *p, struct netif *netif, const ip4_addr_t *dest) 686 { 687 struct pbuf *rambuf; 688 #if !LWIP_NETIF_TX_SINGLE_PBUF 689 struct pbuf *newpbuf; 690 u16_t newpbuflen = 0; 691 u16_t left_to_copy; 692 #endif 693 struct ip_hdr *original_iphdr; 694 struct ip_hdr *iphdr; 695 const u16_t nfb = (netif->mtu - IP_HLEN) / 8; 696 u16_t left, fragsize; 697 u16_t ofo; 698 int last; 699 u16_t poff = IP_HLEN; 700 u16_t tmp; 701 702 original_iphdr = (struct ip_hdr *)p->payload; 703 iphdr = original_iphdr; 704 LWIP_ERROR("ip4_frag() does not support IP options", IPH_HL(iphdr) * 4 == IP_HLEN, return ERR_VAL); 705 706 /* Save original offset */ 707 tmp = lwip_ntohs(IPH_OFFSET(iphdr)); 708 ofo = tmp & IP_OFFMASK; 709 LWIP_ERROR("ip_frag(): MF already set", (tmp & IP_MF) == 0, return ERR_VAL); 710 711 left = p->tot_len - IP_HLEN; 712 713 while (left) { 714 /* Fill this fragment */ 715 fragsize = LWIP_MIN(left, nfb * 8); 716 717 #if LWIP_NETIF_TX_SINGLE_PBUF 718 rambuf = pbuf_alloc(PBUF_IP, fragsize, PBUF_RAM); 719 if (rambuf == NULL) { 720 goto memerr; 721 } 722 LWIP_ASSERT("this needs a pbuf in one piece!", 723 (rambuf->len == rambuf->tot_len) && (rambuf->next == NULL)); 724 poff += pbuf_copy_partial(p, rambuf->payload, fragsize, poff); 725 /* make room for the IP header */ 726 if (pbuf_header(rambuf, IP_HLEN)) { 727 pbuf_free(rambuf); 728 goto memerr; 729 } 730 /* fill in the IP header */ 731 SMEMCPY(rambuf->payload, original_iphdr, IP_HLEN); 732 iphdr = (struct ip_hdr*)rambuf->payload; 733 #else /* LWIP_NETIF_TX_SINGLE_PBUF */ 734 /* When not using a static buffer, create a chain of pbufs. 735 * The first will be a PBUF_RAM holding the link and IP header. 736 * The rest will be PBUF_REFs mirroring the pbuf chain to be fragged, 737 * but limited to the size of an mtu. 738 */ 739 rambuf = pbuf_alloc(PBUF_LINK, IP_HLEN, PBUF_RAM); 740 if (rambuf == NULL) { 741 goto memerr; 742 } 743 LWIP_ASSERT("this needs a pbuf in one piece!", 744 (p->len >= (IP_HLEN))); 745 SMEMCPY(rambuf->payload, original_iphdr, IP_HLEN); 746 iphdr = (struct ip_hdr *)rambuf->payload; 747 748 left_to_copy = fragsize; 749 while (left_to_copy) { 750 struct pbuf_custom_ref *pcr; 751 u16_t plen = p->len - poff; 752 newpbuflen = LWIP_MIN(left_to_copy, plen); 753 /* Is this pbuf already empty? */ 754 if (!newpbuflen) { 755 poff = 0; 756 p = p->next; 757 continue; 758 } 759 pcr = ip_frag_alloc_pbuf_custom_ref(); 760 if (pcr == NULL) { 761 pbuf_free(rambuf); 762 goto memerr; 763 } 764 /* Mirror this pbuf, although we might not need all of it. */ 765 newpbuf = pbuf_alloced_custom(PBUF_RAW, newpbuflen, PBUF_REF, &pcr->pc, 766 (u8_t*)p->payload + poff, newpbuflen); 767 if (newpbuf == NULL) { 768 ip_frag_free_pbuf_custom_ref(pcr); 769 pbuf_free(rambuf); 770 goto memerr; 771 } 772 pbuf_ref(p); 773 pcr->original = p; 774 pcr->pc.custom_free_function = ipfrag_free_pbuf_custom; 775 776 /* Add it to end of rambuf's chain, but using pbuf_cat, not pbuf_chain 777 * so that it is removed when pbuf_dechain is later called on rambuf. 778 */ 779 pbuf_cat(rambuf, newpbuf); 780 left_to_copy -= newpbuflen; 781 if (left_to_copy) { 782 poff = 0; 783 p = p->next; 784 } 785 } 786 poff += newpbuflen; 787 #endif /* LWIP_NETIF_TX_SINGLE_PBUF */ 788 789 /* Correct header */ 790 last = (left <= netif->mtu - IP_HLEN); 791 792 /* Set new offset and MF flag */ 793 tmp = (IP_OFFMASK & (ofo)); 794 if (!last) { 795 tmp = tmp | IP_MF; 796 } 797 IPH_OFFSET_SET(iphdr, lwip_htons(tmp)); 798 IPH_LEN_SET(iphdr, lwip_htons(fragsize + IP_HLEN)); 799 IPH_CHKSUM_SET(iphdr, 0); 800 #if CHECKSUM_GEN_IP 801 IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_IP) { 802 IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, IP_HLEN)); 803 } 804 #endif /* CHECKSUM_GEN_IP */ 805 806 /* No need for separate header pbuf - we allowed room for it in rambuf 807 * when allocated. 808 */ 809 netif->output(netif, rambuf, dest); 810 IPFRAG_STATS_INC(ip_frag.xmit); 811 812 /* Unfortunately we can't reuse rambuf - the hardware may still be 813 * using the buffer. Instead we free it (and the ensuing chain) and 814 * recreate it next time round the loop. If we're lucky the hardware 815 * will have already sent the packet, the free will really free, and 816 * there will be zero memory penalty. 817 */ 818 819 pbuf_free(rambuf); 820 left -= fragsize; 821 ofo += nfb; 822 } 823 MIB2_STATS_INC(mib2.ipfragoks); 824 return ERR_OK; 825 memerr: 826 MIB2_STATS_INC(mib2.ipfragfails); 827 return ERR_MEM; 828 } 829 #endif /* IP_FRAG */ 830 831 #endif /* LWIP_IPV4 */ 832