xref: /minix/minix/lib/liblwip/dist/src/core/pbuf.c (revision bb9622b5)
1 /**
2  * @file
3  * Packet buffer management
4  */
5 
6 /**
7  * @defgroup pbuf Packet buffers (PBUF)
8  * @ingroup infrastructure
9  *
10  * Packets are built from the pbuf data structure. It supports dynamic
11  * memory allocation for packet contents or can reference externally
12  * managed packet contents both in RAM and ROM. Quick allocation for
13  * incoming packets is provided through pools with fixed sized pbufs.
14  *
15  * A packet may span over multiple pbufs, chained as a singly linked
16  * list. This is called a "pbuf chain".
17  *
18  * Multiple packets may be queued, also using this singly linked list.
19  * This is called a "packet queue".
20  *
21  * So, a packet queue consists of one or more pbuf chains, each of
22  * which consist of one or more pbufs. CURRENTLY, PACKET QUEUES ARE
23  * NOT SUPPORTED!!! Use helper structs to queue multiple packets.
24  *
25  * The differences between a pbuf chain and a packet queue are very
26  * precise but subtle.
27  *
28  * The last pbuf of a packet has a ->tot_len field that equals the
29  * ->len field. It can be found by traversing the list. If the last
30  * pbuf of a packet has a ->next field other than NULL, more packets
31  * are on the queue.
32  *
33  * Therefore, looping through a pbuf of a single packet, has an
34  * loop end condition (tot_len == p->len), NOT (next == NULL).
35  *
36  * Example of custom pbuf usage for zero-copy RX:
37   @code{.c}
38 typedef struct my_custom_pbuf
39 {
40    struct pbuf_custom p;
41    void* dma_descriptor;
42 } my_custom_pbuf_t;
43 
44 LWIP_MEMPOOL_DECLARE(RX_POOL, 10, sizeof(my_custom_pbuf_t), "Zero-copy RX PBUF pool");
45 
46 void my_pbuf_free_custom(void* p)
47 {
48   my_custom_pbuf_t* my_puf = (my_custom_pbuf_t*)p;
49 
50   LOCK_INTERRUPTS();
51   free_rx_dma_descriptor(my_pbuf->dma_descriptor);
52   LWIP_MEMPOOL_FREE(RX_POOL, my_pbuf);
53   UNLOCK_INTERRUPTS();
54 }
55 
56 void eth_rx_irq()
57 {
58   dma_descriptor*   dma_desc = get_RX_DMA_descriptor_from_ethernet();
59   my_custom_pbuf_t* my_pbuf  = (my_custom_pbuf_t*)LWIP_MEMPOOL_ALLOC(RX_POOL);
60 
61   my_pbuf->p.custom_free_function = my_pbuf_free_custom;
62   my_pbuf->dma_descriptor         = dma_desc;
63 
64   invalidate_cpu_cache(dma_desc->rx_data, dma_desc->rx_length);
65 
66   struct pbuf* p = pbuf_alloced_custom(PBUF_RAW,
67      dma_desc->rx_length,
68      PBUF_REF,
69      &my_pbuf->p,
70      dma_desc->rx_data,
71      dma_desc->max_buffer_size);
72 
73   if(netif->input(p, netif) != ERR_OK) {
74     pbuf_free(p);
75   }
76 }
77   @endcode
78  */
79 
80 /*
81  * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
82  * All rights reserved.
83  *
84  * Redistribution and use in source and binary forms, with or without modification,
85  * are permitted provided that the following conditions are met:
86  *
87  * 1. Redistributions of source code must retain the above copyright notice,
88  *    this list of conditions and the following disclaimer.
89  * 2. Redistributions in binary form must reproduce the above copyright notice,
90  *    this list of conditions and the following disclaimer in the documentation
91  *    and/or other materials provided with the distribution.
92  * 3. The name of the author may not be used to endorse or promote products
93  *    derived from this software without specific prior written permission.
94  *
95  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
96  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
97  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
98  * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
99  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
100  * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
101  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
102  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
103  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
104  * OF SUCH DAMAGE.
105  *
106  * This file is part of the lwIP TCP/IP stack.
107  *
108  * Author: Adam Dunkels <adam@sics.se>
109  *
110  */
111 
112 #include "lwip/opt.h"
113 
114 #include "lwip/stats.h"
115 #include "lwip/def.h"
116 #include "lwip/mem.h"
117 #include "lwip/memp.h"
118 #include "lwip/pbuf.h"
119 #include "lwip/sys.h"
120 #include "lwip/netif.h"
121 #if LWIP_TCP && TCP_QUEUE_OOSEQ
122 #include "lwip/priv/tcp_priv.h"
123 #endif
124 #if LWIP_CHECKSUM_ON_COPY
125 #include "lwip/inet_chksum.h"
126 #endif
127 
128 #include <string.h>
129 
130 #define SIZEOF_STRUCT_PBUF        LWIP_MEM_ALIGN_SIZE(sizeof(struct pbuf))
131 /* Since the pool is created in memp, PBUF_POOL_BUFSIZE will be automatically
132    aligned there. Therefore, PBUF_POOL_BUFSIZE_ALIGNED can be used here. */
133 #define PBUF_POOL_BUFSIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(PBUF_POOL_BUFSIZE)
134 
135 #if !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ
136 #define PBUF_POOL_IS_EMPTY()
137 #else /* !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ */
138 
139 #if !NO_SYS
140 #ifndef PBUF_POOL_FREE_OOSEQ_QUEUE_CALL
141 #include "lwip/tcpip.h"
142 #define PBUF_POOL_FREE_OOSEQ_QUEUE_CALL()  do { \
143   if (tcpip_callback_with_block(pbuf_free_ooseq_callback, NULL, 0) != ERR_OK) { \
144       SYS_ARCH_PROTECT(old_level); \
145       pbuf_free_ooseq_pending = 0; \
146       SYS_ARCH_UNPROTECT(old_level); \
147   } } while(0)
148 #endif /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */
149 #endif /* !NO_SYS */
150 
151 volatile u8_t pbuf_free_ooseq_pending;
152 #define PBUF_POOL_IS_EMPTY() pbuf_pool_is_empty()
153 
154 /**
155  * Attempt to reclaim some memory from queued out-of-sequence TCP segments
156  * if we run out of pool pbufs. It's better to give priority to new packets
157  * if we're running out.
158  *
159  * This must be done in the correct thread context therefore this function
160  * can only be used with NO_SYS=0 and through tcpip_callback.
161  */
162 #if !NO_SYS
163 static
164 #endif /* !NO_SYS */
165 void
166 pbuf_free_ooseq(void)
167 {
168   struct tcp_pcb* pcb;
169   SYS_ARCH_SET(pbuf_free_ooseq_pending, 0);
170 
171   for (pcb = tcp_active_pcbs; NULL != pcb; pcb = pcb->next) {
172     if (NULL != pcb->ooseq) {
173       /** Free the ooseq pbufs of one PCB only */
174       LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free_ooseq: freeing out-of-sequence pbufs\n"));
175       tcp_segs_free(pcb->ooseq);
176       pcb->ooseq = NULL;
177       return;
178     }
179   }
180 }
181 
182 #if !NO_SYS
183 /**
184  * Just a callback function for tcpip_callback() that calls pbuf_free_ooseq().
185  */
186 static void
187 pbuf_free_ooseq_callback(void *arg)
188 {
189   LWIP_UNUSED_ARG(arg);
190   pbuf_free_ooseq();
191 }
192 #endif /* !NO_SYS */
193 
194 /** Queue a call to pbuf_free_ooseq if not already queued. */
195 static void
196 pbuf_pool_is_empty(void)
197 {
198 #ifndef PBUF_POOL_FREE_OOSEQ_QUEUE_CALL
199   SYS_ARCH_SET(pbuf_free_ooseq_pending, 1);
200 #else /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */
201   u8_t queued;
202   SYS_ARCH_DECL_PROTECT(old_level);
203   SYS_ARCH_PROTECT(old_level);
204   queued = pbuf_free_ooseq_pending;
205   pbuf_free_ooseq_pending = 1;
206   SYS_ARCH_UNPROTECT(old_level);
207 
208   if (!queued) {
209     /* queue a call to pbuf_free_ooseq if not already queued */
210     PBUF_POOL_FREE_OOSEQ_QUEUE_CALL();
211   }
212 #endif /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */
213 }
214 #endif /* !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ */
215 
216 /**
217  * @ingroup pbuf
218  * Allocates a pbuf of the given type (possibly a chain for PBUF_POOL type).
219  *
220  * The actual memory allocated for the pbuf is determined by the
221  * layer at which the pbuf is allocated and the requested size
222  * (from the size parameter).
223  *
224  * @param layer flag to define header size
225  * @param length size of the pbuf's payload
226  * @param type this parameter decides how and where the pbuf
227  * should be allocated as follows:
228  *
229  * - PBUF_RAM: buffer memory for pbuf is allocated as one large
230  *             chunk. This includes protocol headers as well.
231  * - PBUF_ROM: no buffer memory is allocated for the pbuf, even for
232  *             protocol headers. Additional headers must be prepended
233  *             by allocating another pbuf and chain in to the front of
234  *             the ROM pbuf. It is assumed that the memory used is really
235  *             similar to ROM in that it is immutable and will not be
236  *             changed. Memory which is dynamic should generally not
237  *             be attached to PBUF_ROM pbufs. Use PBUF_REF instead.
238  * - PBUF_REF: no buffer memory is allocated for the pbuf, even for
239  *             protocol headers. It is assumed that the pbuf is only
240  *             being used in a single thread. If the pbuf gets queued,
241  *             then pbuf_take should be called to copy the buffer.
242  * - PBUF_POOL: the pbuf is allocated as a pbuf chain, with pbufs from
243  *              the pbuf pool that is allocated during pbuf_init().
244  *
245  * @return the allocated pbuf. If multiple pbufs where allocated, this
246  * is the first pbuf of a pbuf chain.
247  */
248 struct pbuf *
249 pbuf_alloc(pbuf_layer layer, u16_t length, pbuf_type type)
250 {
251   struct pbuf *p, *q, *r;
252   u16_t offset;
253   s32_t rem_len; /* remaining length */
254   LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F")\n", length));
255 
256   /* determine header offset */
257   switch (layer) {
258   case PBUF_TRANSPORT:
259     /* add room for transport (often TCP) layer header */
260     offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN;
261     break;
262   case PBUF_IP:
263     /* add room for IP layer header */
264     offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN;
265     break;
266   case PBUF_LINK:
267     /* add room for link layer header */
268     offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN;
269     break;
270   case PBUF_RAW_TX:
271     /* add room for encapsulating link layer headers (e.g. 802.11) */
272     offset = PBUF_LINK_ENCAPSULATION_HLEN;
273     break;
274   case PBUF_RAW:
275     /* no offset (e.g. RX buffers or chain successors) */
276     offset = 0;
277     break;
278   default:
279     LWIP_ASSERT("pbuf_alloc: bad pbuf layer", 0);
280     return NULL;
281   }
282 
283   switch (type) {
284   case PBUF_POOL:
285     /* allocate head of pbuf chain into p */
286     p = (struct pbuf *)memp_malloc(MEMP_PBUF_POOL);
287     LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc: allocated pbuf %p\n", (void *)p));
288     if (p == NULL) {
289       PBUF_POOL_IS_EMPTY();
290       return NULL;
291     }
292     p->type = type;
293     p->next = NULL;
294     p->if_idx = NETIF_NO_INDEX;
295 
296     /* make the payload pointer point 'offset' bytes into pbuf data memory */
297     p->payload = LWIP_MEM_ALIGN((void *)((u8_t *)p + (SIZEOF_STRUCT_PBUF + offset)));
298     LWIP_ASSERT("pbuf_alloc: pbuf p->payload properly aligned",
299             ((mem_ptr_t)p->payload % MEM_ALIGNMENT) == 0);
300     /* the total length of the pbuf chain is the requested size */
301     p->tot_len = length;
302     /* set the length of the first pbuf in the chain */
303     p->len = LWIP_MIN(length, PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset));
304     LWIP_ASSERT("check p->payload + p->len does not overflow pbuf",
305                 ((u8_t*)p->payload + p->len <=
306                  (u8_t*)p + SIZEOF_STRUCT_PBUF + PBUF_POOL_BUFSIZE_ALIGNED));
307     LWIP_ASSERT("PBUF_POOL_BUFSIZE must be bigger than MEM_ALIGNMENT",
308       (PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset)) > 0 );
309     /* set reference count (needed here in case we fail) */
310     p->ref = 1;
311 
312     /* now allocate the tail of the pbuf chain */
313 
314     /* remember first pbuf for linkage in next iteration */
315     r = p;
316     /* remaining length to be allocated */
317     rem_len = length - p->len;
318     /* any remaining pbufs to be allocated? */
319     while (rem_len > 0) {
320       q = (struct pbuf *)memp_malloc(MEMP_PBUF_POOL);
321       if (q == NULL) {
322         PBUF_POOL_IS_EMPTY();
323         /* free chain so far allocated */
324         pbuf_free(p);
325         /* bail out unsuccessfully */
326         return NULL;
327       }
328       q->type = type;
329       q->flags = 0;
330       q->next = NULL;
331       /* make previous pbuf point to this pbuf */
332       r->next = q;
333       /* set total length of this pbuf and next in chain */
334       LWIP_ASSERT("rem_len < max_u16_t", rem_len < 0xffff);
335       q->tot_len = (u16_t)rem_len;
336       /* this pbuf length is pool size, unless smaller sized tail */
337       q->len = LWIP_MIN((u16_t)rem_len, PBUF_POOL_BUFSIZE_ALIGNED);
338       q->payload = (void *)((u8_t *)q + SIZEOF_STRUCT_PBUF);
339       LWIP_ASSERT("pbuf_alloc: pbuf q->payload properly aligned",
340               ((mem_ptr_t)q->payload % MEM_ALIGNMENT) == 0);
341       LWIP_ASSERT("check p->payload + p->len does not overflow pbuf",
342                   ((u8_t*)p->payload + p->len <=
343                    (u8_t*)p + SIZEOF_STRUCT_PBUF + PBUF_POOL_BUFSIZE_ALIGNED));
344       q->ref = 1;
345       /* calculate remaining length to be allocated */
346       rem_len -= q->len;
347       /* remember this pbuf for linkage in next iteration */
348       r = q;
349     }
350     /* end of chain */
351     /*r->next = NULL;*/
352 
353     break;
354   case PBUF_RAM:
355     {
356       mem_size_t alloc_len = LWIP_MEM_ALIGN_SIZE(SIZEOF_STRUCT_PBUF + offset) + LWIP_MEM_ALIGN_SIZE(length);
357 
358       /* bug #50040: Check for integer overflow when calculating alloc_len */
359       if (alloc_len < LWIP_MEM_ALIGN_SIZE(length)) {
360         return NULL;
361       }
362 
363       /* If pbuf is to be allocated in RAM, allocate memory for it. */
364       p = (struct pbuf*)mem_malloc(alloc_len);
365     }
366 
367     if (p == NULL) {
368       return NULL;
369     }
370     /* Set up internal structure of the pbuf. */
371     p->payload = LWIP_MEM_ALIGN((void *)((u8_t *)p + SIZEOF_STRUCT_PBUF + offset));
372     p->len = p->tot_len = length;
373     p->next = NULL;
374     p->type = type;
375 
376     LWIP_ASSERT("pbuf_alloc: pbuf->payload properly aligned",
377            ((mem_ptr_t)p->payload % MEM_ALIGNMENT) == 0);
378     break;
379   /* pbuf references existing (non-volatile static constant) ROM payload? */
380   case PBUF_ROM:
381   /* pbuf references existing (externally allocated) RAM payload? */
382   case PBUF_REF:
383     /* only allocate memory for the pbuf structure */
384     p = (struct pbuf *)memp_malloc(MEMP_PBUF);
385     if (p == NULL) {
386       LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
387                   ("pbuf_alloc: Could not allocate MEMP_PBUF for PBUF_%s.\n",
388                   (type == PBUF_ROM) ? "ROM" : "REF"));
389       return NULL;
390     }
391     /* caller must set this field properly, afterwards */
392     p->payload = NULL;
393     p->len = p->tot_len = length;
394     p->next = NULL;
395     p->type = type;
396     break;
397   default:
398     LWIP_ASSERT("pbuf_alloc: erroneous type", 0);
399     return NULL;
400   }
401   /* set reference count */
402   p->ref = 1;
403   /* set flags */
404   p->flags = 0;
405   LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F") == %p\n", length, (void *)p));
406   return p;
407 }
408 
409 #if LWIP_SUPPORT_CUSTOM_PBUF
410 /**
411  * @ingroup pbuf
412  * Initialize a custom pbuf (already allocated).
413  *
414  * @param l flag to define header size
415  * @param length size of the pbuf's payload
416  * @param type type of the pbuf (only used to treat the pbuf accordingly, as
417  *        this function allocates no memory)
418  * @param p pointer to the custom pbuf to initialize (already allocated)
419  * @param payload_mem pointer to the buffer that is used for payload and headers,
420  *        must be at least big enough to hold 'length' plus the header size,
421  *        may be NULL if set later.
422  *        ATTENTION: The caller is responsible for correct alignment of this buffer!!
423  * @param payload_mem_len the size of the 'payload_mem' buffer, must be at least
424  *        big enough to hold 'length' plus the header size
425  */
426 struct pbuf*
427 pbuf_alloced_custom(pbuf_layer l, u16_t length, pbuf_type type, struct pbuf_custom *p,
428                     void *payload_mem, u16_t payload_mem_len)
429 {
430   u16_t offset;
431   LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloced_custom(length=%"U16_F")\n", length));
432 
433   /* determine header offset */
434   switch (l) {
435   case PBUF_TRANSPORT:
436     /* add room for transport (often TCP) layer header */
437     offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN;
438     break;
439   case PBUF_IP:
440     /* add room for IP layer header */
441     offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN;
442     break;
443   case PBUF_LINK:
444     /* add room for link layer header */
445     offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN;
446     break;
447   case PBUF_RAW_TX:
448     /* add room for encapsulating link layer headers (e.g. 802.11) */
449     offset = PBUF_LINK_ENCAPSULATION_HLEN;
450     break;
451   case PBUF_RAW:
452     offset = 0;
453     break;
454   default:
455     LWIP_ASSERT("pbuf_alloced_custom: bad pbuf layer", 0);
456     return NULL;
457   }
458 
459   if (LWIP_MEM_ALIGN_SIZE(offset) + length > payload_mem_len) {
460     LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_WARNING, ("pbuf_alloced_custom(length=%"U16_F") buffer too short\n", length));
461     return NULL;
462   }
463 
464   p->pbuf.next = NULL;
465   if (payload_mem != NULL) {
466     p->pbuf.payload = (u8_t *)payload_mem + LWIP_MEM_ALIGN_SIZE(offset);
467   } else {
468     p->pbuf.payload = NULL;
469   }
470   p->pbuf.flags = PBUF_FLAG_IS_CUSTOM;
471   p->pbuf.len = p->pbuf.tot_len = length;
472   p->pbuf.type = type;
473   p->pbuf.ref = 1;
474   return &p->pbuf;
475 }
476 #endif /* LWIP_SUPPORT_CUSTOM_PBUF */
477 
478 /**
479  * @ingroup pbuf
480  * Shrink a pbuf chain to a desired length.
481  *
482  * @param p pbuf to shrink.
483  * @param new_len desired new length of pbuf chain
484  *
485  * Depending on the desired length, the first few pbufs in a chain might
486  * be skipped and left unchanged. The new last pbuf in the chain will be
487  * resized, and any remaining pbufs will be freed.
488  *
489  * @note If the pbuf is ROM/REF, only the ->tot_len and ->len fields are adjusted.
490  * @note May not be called on a packet queue.
491  *
492  * @note Despite its name, pbuf_realloc cannot grow the size of a pbuf (chain).
493  */
494 void
495 pbuf_realloc(struct pbuf *p, u16_t new_len)
496 {
497   struct pbuf *q;
498   u16_t rem_len; /* remaining length */
499   s32_t grow;
500 
501   LWIP_ASSERT("pbuf_realloc: p != NULL", p != NULL);
502   LWIP_ASSERT("pbuf_realloc: sane p->type", p->type == PBUF_POOL ||
503               p->type == PBUF_ROM ||
504               p->type == PBUF_RAM ||
505               p->type == PBUF_REF);
506 
507   /* desired length larger than current length? */
508   if (new_len >= p->tot_len) {
509     /* enlarging not yet supported */
510     return;
511   }
512 
513   /* the pbuf chain grows by (new_len - p->tot_len) bytes
514    * (which may be negative in case of shrinking) */
515   grow = new_len - p->tot_len;
516 
517   /* first, step over any pbufs that should remain in the chain */
518   rem_len = new_len;
519   q = p;
520   /* should this pbuf be kept? */
521   while (rem_len > q->len) {
522     /* decrease remaining length by pbuf length */
523     rem_len -= q->len;
524     /* decrease total length indicator */
525     LWIP_ASSERT("grow < max_u16_t", grow < 0xffff);
526     q->tot_len += (u16_t)grow;
527     /* proceed to next pbuf in chain */
528     q = q->next;
529     LWIP_ASSERT("pbuf_realloc: q != NULL", q != NULL);
530   }
531   /* we have now reached the new last pbuf (in q) */
532   /* rem_len == desired length for pbuf q */
533 
534   /* shrink allocated memory for PBUF_RAM */
535   /* (other types merely adjust their length fields */
536   if ((q->type == PBUF_RAM) && (rem_len != q->len)
537 #if LWIP_SUPPORT_CUSTOM_PBUF
538       && ((q->flags & PBUF_FLAG_IS_CUSTOM) == 0)
539 #endif /* LWIP_SUPPORT_CUSTOM_PBUF */
540      ) {
541     /* reallocate and adjust the length of the pbuf that will be split */
542     q = (struct pbuf *)mem_trim(q, (u16_t)((u8_t *)q->payload - (u8_t *)q) + rem_len);
543     LWIP_ASSERT("mem_trim returned q == NULL", q != NULL);
544   }
545   /* adjust length fields for new last pbuf */
546   q->len = rem_len;
547   q->tot_len = q->len;
548 
549   /* any remaining pbufs in chain? */
550   if (q->next != NULL) {
551     /* free remaining pbufs in chain */
552     pbuf_free(q->next);
553   }
554   /* q is last packet in chain */
555   q->next = NULL;
556 
557 }
558 
559 /**
560  * Adjusts the payload pointer to hide or reveal headers in the payload.
561  * @see pbuf_header.
562  *
563  * @param p pbuf to change the header size.
564  * @param header_size_increment Number of bytes to increment header size.
565  * @param force Allow 'header_size_increment > 0' for PBUF_REF/PBUF_ROM types
566  *
567  * @return non-zero on failure, zero on success.
568  *
569  */
570 static u8_t
571 pbuf_header_impl(struct pbuf *p, s16_t header_size_increment, u8_t force)
572 {
573   u16_t type;
574   void *payload;
575   u16_t increment_magnitude;
576 
577   LWIP_ASSERT("p != NULL", p != NULL);
578   if ((header_size_increment == 0) || (p == NULL)) {
579     return 0;
580   }
581 
582   if (header_size_increment < 0) {
583     increment_magnitude = (u16_t)-header_size_increment;
584     /* Check that we aren't going to move off the end of the pbuf */
585     LWIP_ERROR("increment_magnitude <= p->len", (increment_magnitude <= p->len), return 1;);
586   } else {
587     increment_magnitude = (u16_t)header_size_increment;
588     /* Do not allow tot_len to wrap as a result. */
589     if ((u16_t)(increment_magnitude + p->tot_len) < increment_magnitude) {
590       return 1;
591     }
592 #if 0
593     /* Can't assert these as some callers speculatively call
594          pbuf_header() to see if it's OK.  Will return 1 below instead. */
595     /* Check that we've got the correct type of pbuf to work with */
596     LWIP_ASSERT("p->type == PBUF_RAM || p->type == PBUF_POOL",
597                 p->type == PBUF_RAM || p->type == PBUF_POOL);
598     /* Check that we aren't going to move off the beginning of the pbuf */
599     LWIP_ASSERT("p->payload - increment_magnitude >= p + SIZEOF_STRUCT_PBUF",
600                 (u8_t *)p->payload - increment_magnitude >= (u8_t *)p + SIZEOF_STRUCT_PBUF);
601 #endif
602   }
603 
604   type = p->type;
605   /* remember current payload pointer */
606   payload = p->payload;
607 
608   /* pbuf types containing payloads? */
609   if (type == PBUF_RAM || type == PBUF_POOL) {
610     /* set new payload pointer */
611     p->payload = (u8_t *)p->payload - header_size_increment;
612     /* boundary check fails? */
613     if ((u8_t *)p->payload < (u8_t *)p + SIZEOF_STRUCT_PBUF) {
614       LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE,
615         ("pbuf_header: failed as %p < %p (not enough space for new header size)\n",
616         (void *)p->payload, (void *)((u8_t *)p + SIZEOF_STRUCT_PBUF)));
617       /* restore old payload pointer */
618       p->payload = payload;
619       /* bail out unsuccessfully */
620       return 1;
621     }
622   /* pbuf types referring to external payloads? */
623   } else if (type == PBUF_REF || type == PBUF_ROM) {
624     /* hide a header in the payload? */
625     if ((header_size_increment < 0) && (increment_magnitude <= p->len)) {
626       /* increase payload pointer */
627       p->payload = (u8_t *)p->payload - header_size_increment;
628     } else if ((header_size_increment > 0) && force) {
629       p->payload = (u8_t *)p->payload - header_size_increment;
630     } else {
631       /* cannot expand payload to front (yet!)
632        * bail out unsuccessfully */
633       return 1;
634     }
635   } else {
636     /* Unknown type */
637     LWIP_ASSERT("bad pbuf type", 0);
638     return 1;
639   }
640   /* modify pbuf length fields */
641   p->len += header_size_increment;
642   p->tot_len += header_size_increment;
643 
644   LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_header: old %p new %p (%"S16_F")\n",
645     (void *)payload, (void *)p->payload, header_size_increment));
646 
647   return 0;
648 }
649 
650 /**
651  * Adjusts the payload pointer to hide or reveal headers in the payload.
652  *
653  * Adjusts the ->payload pointer so that space for a header
654  * (dis)appears in the pbuf payload.
655  *
656  * The ->payload, ->tot_len and ->len fields are adjusted.
657  *
658  * @param p pbuf to change the header size.
659  * @param header_size_increment Number of bytes to increment header size which
660  * increases the size of the pbuf. New space is on the front.
661  * (Using a negative value decreases the header size.)
662  * If hdr_size_inc is 0, this function does nothing and returns successful.
663  *
664  * PBUF_ROM and PBUF_REF type buffers cannot have their sizes increased, so
665  * the call will fail. A check is made that the increase in header size does
666  * not move the payload pointer in front of the start of the buffer.
667  * @return non-zero on failure, zero on success.
668  *
669  */
670 u8_t
671 pbuf_header(struct pbuf *p, s16_t header_size_increment)
672 {
673    return pbuf_header_impl(p, header_size_increment, 0);
674 }
675 
676 /**
677  * Same as pbuf_header but does not check if 'header_size > 0' is allowed.
678  * This is used internally only, to allow PBUF_REF for RX.
679  */
680 u8_t
681 pbuf_header_force(struct pbuf *p, s16_t header_size_increment)
682 {
683    return pbuf_header_impl(p, header_size_increment, 1);
684 }
685 
686 /** Similar to pbuf_header(-size) but de-refs header pbufs for (size >= p->len)
687  *
688  * @param q pbufs to operate on
689  * @param size The number of bytes to remove from the beginning of the pbuf list.
690  *             While size >= p->len, pbufs are freed.
691  *        ATTENTION: this is the opposite direction as @ref pbuf_header, but
692  *                   takes an u16_t not s16_t!
693  * @return the new head pbuf
694  */
695 struct pbuf*
696 pbuf_free_header(struct pbuf *q, u16_t size)
697 {
698   struct pbuf *p = q;
699   u16_t free_left = size;
700   while (free_left && p) {
701     s16_t free_len = (free_left > INT16_MAX ? INT16_MAX : (s16_t)free_left);
702     if (free_len >= p->len) {
703       struct pbuf *f = p;
704       free_left -= p->len;
705       p = p->next;
706       f->next = 0;
707       pbuf_free(f);
708     } else {
709       pbuf_header(p, -free_len);
710       free_left -= free_len;
711     }
712   }
713   return p;
714 }
715 
716 /**
717  * @ingroup pbuf
718  * Dereference a pbuf chain or queue and deallocate any no-longer-used
719  * pbufs at the head of this chain or queue.
720  *
721  * Decrements the pbuf reference count. If it reaches zero, the pbuf is
722  * deallocated.
723  *
724  * For a pbuf chain, this is repeated for each pbuf in the chain,
725  * up to the first pbuf which has a non-zero reference count after
726  * decrementing. So, when all reference counts are one, the whole
727  * chain is free'd.
728  *
729  * @param p The pbuf (chain) to be dereferenced.
730  *
731  * @return the number of pbufs that were de-allocated
732  * from the head of the chain.
733  *
734  * @note MUST NOT be called on a packet queue (Not verified to work yet).
735  * @note the reference counter of a pbuf equals the number of pointers
736  * that refer to the pbuf (or into the pbuf).
737  *
738  * @internal examples:
739  *
740  * Assuming existing chains a->b->c with the following reference
741  * counts, calling pbuf_free(a) results in:
742  *
743  * 1->2->3 becomes ...1->3
744  * 3->3->3 becomes 2->3->3
745  * 1->1->2 becomes ......1
746  * 2->1->1 becomes 1->1->1
747  * 1->1->1 becomes .......
748  *
749  */
750 u8_t
751 pbuf_free(struct pbuf *p)
752 {
753   u16_t type;
754   struct pbuf *q;
755   u8_t count;
756 
757   if (p == NULL) {
758     LWIP_ASSERT("p != NULL", p != NULL);
759     /* if assertions are disabled, proceed with debug output */
760     LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
761       ("pbuf_free(p == NULL) was called.\n"));
762     return 0;
763   }
764   LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free(%p)\n", (void *)p));
765 
766   PERF_START;
767 
768   LWIP_ASSERT("pbuf_free: sane type",
769     p->type == PBUF_RAM || p->type == PBUF_ROM ||
770     p->type == PBUF_REF || p->type == PBUF_POOL);
771 
772   count = 0;
773   /* de-allocate all consecutive pbufs from the head of the chain that
774    * obtain a zero reference count after decrementing*/
775   while (p != NULL) {
776     LWIP_PBUF_REF_T ref;
777     SYS_ARCH_DECL_PROTECT(old_level);
778     /* Since decrementing ref cannot be guaranteed to be a single machine operation
779      * we must protect it. We put the new ref into a local variable to prevent
780      * further protection. */
781     SYS_ARCH_PROTECT(old_level);
782     /* all pbufs in a chain are referenced at least once */
783     LWIP_ASSERT("pbuf_free: p->ref > 0", p->ref > 0);
784     /* decrease reference count (number of pointers to pbuf) */
785     ref = --(p->ref);
786     SYS_ARCH_UNPROTECT(old_level);
787     /* this pbuf is no longer referenced to? */
788     if (ref == 0) {
789       /* remember next pbuf in chain for next iteration */
790       q = p->next;
791       LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: deallocating %p\n", (void *)p));
792       type = p->type;
793 #if LWIP_SUPPORT_CUSTOM_PBUF
794       /* is this a custom pbuf? */
795       if ((p->flags & PBUF_FLAG_IS_CUSTOM) != 0) {
796         struct pbuf_custom *pc = (struct pbuf_custom*)p;
797         LWIP_ASSERT("pc->custom_free_function != NULL", pc->custom_free_function != NULL);
798         pc->custom_free_function(p);
799       } else
800 #endif /* LWIP_SUPPORT_CUSTOM_PBUF */
801       {
802         /* is this a pbuf from the pool? */
803         if (type == PBUF_POOL) {
804           memp_free(MEMP_PBUF_POOL, p);
805         /* is this a ROM or RAM referencing pbuf? */
806         } else if (type == PBUF_ROM || type == PBUF_REF) {
807           memp_free(MEMP_PBUF, p);
808         /* type == PBUF_RAM */
809         } else {
810           mem_free(p);
811         }
812       }
813       count++;
814       /* proceed to next pbuf */
815       p = q;
816     /* p->ref > 0, this pbuf is still referenced to */
817     /* (and so the remaining pbufs in chain as well) */
818     } else {
819       LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: %p has ref %"U16_F", ending here.\n", (void *)p, ref));
820       /* stop walking through the chain */
821       p = NULL;
822     }
823   }
824   PERF_STOP("pbuf_free");
825   /* return number of de-allocated pbufs */
826   return count;
827 }
828 
829 /**
830  * Count number of pbufs in a chain
831  *
832  * @param p first pbuf of chain
833  * @return the number of pbufs in a chain
834  */
835 u16_t
836 pbuf_clen(const struct pbuf *p)
837 {
838   u16_t len;
839 
840   len = 0;
841   while (p != NULL) {
842     ++len;
843     p = p->next;
844   }
845   return len;
846 }
847 
848 /**
849  * @ingroup pbuf
850  * Increment the reference count of the pbuf.
851  *
852  * @param p pbuf to increase reference counter of
853  *
854  */
855 void
856 pbuf_ref(struct pbuf *p)
857 {
858   /* pbuf given? */
859   if (p != NULL) {
860     SYS_ARCH_INC(p->ref, 1);
861     LWIP_ASSERT("pbuf ref overflow", p->ref > 0);
862   }
863 }
864 
865 /**
866  * @ingroup pbuf
867  * Concatenate two pbufs (each may be a pbuf chain) and take over
868  * the caller's reference of the tail pbuf.
869  *
870  * @note The caller MAY NOT reference the tail pbuf afterwards.
871  * Use pbuf_chain() for that purpose.
872  *
873  * @see pbuf_chain()
874  */
875 void
876 pbuf_cat(struct pbuf *h, struct pbuf *t)
877 {
878   struct pbuf *p;
879 
880   LWIP_ERROR("(h != NULL) && (t != NULL) (programmer violates API)",
881              ((h != NULL) && (t != NULL)), return;);
882 
883   /* proceed to last pbuf of chain */
884   for (p = h; p->next != NULL; p = p->next) {
885     /* add total length of second chain to all totals of first chain */
886     p->tot_len += t->tot_len;
887   }
888   /* { p is last pbuf of first h chain, p->next == NULL } */
889   LWIP_ASSERT("p->tot_len == p->len (of last pbuf in chain)", p->tot_len == p->len);
890   LWIP_ASSERT("p->next == NULL", p->next == NULL);
891   /* add total length of second chain to last pbuf total of first chain */
892   p->tot_len += t->tot_len;
893   /* chain last pbuf of head (p) with first of tail (t) */
894   p->next = t;
895   /* p->next now references t, but the caller will drop its reference to t,
896    * so netto there is no change to the reference count of t.
897    */
898 }
899 
900 /**
901  * @ingroup pbuf
902  * Chain two pbufs (or pbuf chains) together.
903  *
904  * The caller MUST call pbuf_free(t) once it has stopped
905  * using it. Use pbuf_cat() instead if you no longer use t.
906  *
907  * @param h head pbuf (chain)
908  * @param t tail pbuf (chain)
909  * @note The pbufs MUST belong to the same packet.
910  * @note MAY NOT be called on a packet queue.
911  *
912  * The ->tot_len fields of all pbufs of the head chain are adjusted.
913  * The ->next field of the last pbuf of the head chain is adjusted.
914  * The ->ref field of the first pbuf of the tail chain is adjusted.
915  *
916  */
917 void
918 pbuf_chain(struct pbuf *h, struct pbuf *t)
919 {
920   pbuf_cat(h, t);
921   /* t is now referenced by h */
922   pbuf_ref(t);
923   LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_chain: %p references %p\n", (void *)h, (void *)t));
924 }
925 
926 /**
927  * Dechains the first pbuf from its succeeding pbufs in the chain.
928  *
929  * Makes p->tot_len field equal to p->len.
930  * @param p pbuf to dechain
931  * @return remainder of the pbuf chain, or NULL if it was de-allocated.
932  * @note May not be called on a packet queue.
933  */
934 struct pbuf *
935 pbuf_dechain(struct pbuf *p)
936 {
937   struct pbuf *q;
938   u8_t tail_gone = 1;
939   /* tail */
940   q = p->next;
941   /* pbuf has successor in chain? */
942   if (q != NULL) {
943     /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */
944     LWIP_ASSERT("p->tot_len == p->len + q->tot_len", q->tot_len == p->tot_len - p->len);
945     /* enforce invariant if assertion is disabled */
946     q->tot_len = p->tot_len - p->len;
947     /* decouple pbuf from remainder */
948     p->next = NULL;
949     /* total length of pbuf p is its own length only */
950     p->tot_len = p->len;
951     /* q is no longer referenced by p, free it */
952     LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_dechain: unreferencing %p\n", (void *)q));
953     tail_gone = pbuf_free(q);
954     if (tail_gone > 0) {
955       LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE,
956                   ("pbuf_dechain: deallocated %p (as it is no longer referenced)\n", (void *)q));
957     }
958     /* return remaining tail or NULL if deallocated */
959   }
960   /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */
961   LWIP_ASSERT("p->tot_len == p->len", p->tot_len == p->len);
962   return ((tail_gone > 0) ? NULL : q);
963 }
964 
965 /**
966  * @ingroup pbuf
967  * Create PBUF_RAM copies of pbufs.
968  *
969  * Used to queue packets on behalf of the lwIP stack, such as
970  * ARP based queueing.
971  *
972  * @note You MUST explicitly use p = pbuf_take(p);
973  *
974  * @note Only one packet is copied, no packet queue!
975  *
976  * @param p_to pbuf destination of the copy
977  * @param p_from pbuf source of the copy
978  *
979  * @return ERR_OK if pbuf was copied
980  *         ERR_ARG if one of the pbufs is NULL or p_to is not big
981  *                 enough to hold p_from
982  */
983 err_t
984 pbuf_copy(struct pbuf *p_to, const struct pbuf *p_from)
985 {
986   u16_t offset_to=0, offset_from=0, len;
987 
988   LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_copy(%p, %p)\n",
989     (const void*)p_to, (const void*)p_from));
990 
991   /* is the target big enough to hold the source? */
992   LWIP_ERROR("pbuf_copy: target not big enough to hold source", ((p_to != NULL) &&
993              (p_from != NULL) && (p_to->tot_len >= p_from->tot_len)), return ERR_ARG;);
994 
995   /* iterate through pbuf chain */
996   do
997   {
998     /* copy one part of the original chain */
999     if ((p_to->len - offset_to) >= (p_from->len - offset_from)) {
1000       /* complete current p_from fits into current p_to */
1001       len = p_from->len - offset_from;
1002     } else {
1003       /* current p_from does not fit into current p_to */
1004       len = p_to->len - offset_to;
1005     }
1006     MEMCPY((u8_t*)p_to->payload + offset_to, (u8_t*)p_from->payload + offset_from, len);
1007     offset_to += len;
1008     offset_from += len;
1009     LWIP_ASSERT("offset_to <= p_to->len", offset_to <= p_to->len);
1010     LWIP_ASSERT("offset_from <= p_from->len", offset_from <= p_from->len);
1011     if (offset_from >= p_from->len) {
1012       /* on to next p_from (if any) */
1013       offset_from = 0;
1014       p_from = p_from->next;
1015     }
1016     if (offset_to == p_to->len) {
1017       /* on to next p_to (if any) */
1018       offset_to = 0;
1019       p_to = p_to->next;
1020       LWIP_ERROR("p_to != NULL", (p_to != NULL) || (p_from == NULL) , return ERR_ARG;);
1021     }
1022 
1023     if ((p_from != NULL) && (p_from->len == p_from->tot_len)) {
1024       /* don't copy more than one packet! */
1025       LWIP_ERROR("pbuf_copy() does not allow packet queues!",
1026                  (p_from->next == NULL), return ERR_VAL;);
1027     }
1028     if ((p_to != NULL) && (p_to->len == p_to->tot_len)) {
1029       /* don't copy more than one packet! */
1030       LWIP_ERROR("pbuf_copy() does not allow packet queues!",
1031                   (p_to->next == NULL), return ERR_VAL;);
1032     }
1033   } while (p_from);
1034   LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_copy: end of chain reached.\n"));
1035   return ERR_OK;
1036 }
1037 
1038 /**
1039  * @ingroup pbuf
1040  * Copy (part of) the contents of a packet buffer
1041  * to an application supplied buffer.
1042  *
1043  * @param buf the pbuf from which to copy data
1044  * @param dataptr the application supplied buffer
1045  * @param len length of data to copy (dataptr must be big enough). No more
1046  * than buf->tot_len will be copied, irrespective of len
1047  * @param offset offset into the packet buffer from where to begin copying len bytes
1048  * @return the number of bytes copied, or 0 on failure
1049  */
1050 u16_t
1051 pbuf_copy_partial(const struct pbuf *buf, void *dataptr, u16_t len, u16_t offset)
1052 {
1053   const struct pbuf *p;
1054   u16_t left;
1055   u16_t buf_copy_len;
1056   u16_t copied_total = 0;
1057 
1058   LWIP_ERROR("pbuf_copy_partial: invalid buf", (buf != NULL), return 0;);
1059   LWIP_ERROR("pbuf_copy_partial: invalid dataptr", (dataptr != NULL), return 0;);
1060 
1061   left = 0;
1062 
1063   if ((buf == NULL) || (dataptr == NULL)) {
1064     return 0;
1065   }
1066 
1067   /* Note some systems use byte copy if dataptr or one of the pbuf payload pointers are unaligned. */
1068   for (p = buf; len != 0 && p != NULL; p = p->next) {
1069     if ((offset != 0) && (offset >= p->len)) {
1070       /* don't copy from this buffer -> on to the next */
1071       offset -= p->len;
1072     } else {
1073       /* copy from this buffer. maybe only partially. */
1074       buf_copy_len = p->len - offset;
1075       if (buf_copy_len > len) {
1076         buf_copy_len = len;
1077       }
1078       /* copy the necessary parts of the buffer */
1079       MEMCPY(&((char*)dataptr)[left], &((char*)p->payload)[offset], buf_copy_len);
1080       copied_total += buf_copy_len;
1081       left += buf_copy_len;
1082       len -= buf_copy_len;
1083       offset = 0;
1084     }
1085   }
1086   return copied_total;
1087 }
1088 
1089 /**
1090  * @ingroup pbuf
1091  * Get part of a pbuf's payload as contiguous memory. The returned memory is
1092  * either a pointer into the pbuf's payload or, if split over multiple pbufs,
1093  * a copy into the user-supplied buffer.
1094  *
1095  * @param p the pbuf from which to copy data
1096  * @param buffer the application supplied buffer
1097  * @param bufsize size of the application supplied buffer
1098  * @param len length of data to copy (dataptr must be big enough). No more
1099  * than buf->tot_len will be copied, irrespective of len
1100  * @param offset offset into the packet buffer from where to begin copying len bytes
1101  * @return the number of bytes copied, or 0 on failure
1102  */
1103 void *
1104 pbuf_get_contiguous(const struct pbuf *p, void *buffer, size_t bufsize, u16_t len, u16_t offset)
1105 {
1106   const struct pbuf *q;
1107 
1108   LWIP_ERROR("pbuf_get_contiguous: invalid buf", (p != NULL), return NULL;);
1109   LWIP_ERROR("pbuf_get_contiguous: invalid dataptr", (buffer != NULL), return NULL;);
1110   LWIP_ERROR("pbuf_get_contiguous: invalid dataptr", (bufsize >= len), return NULL;);
1111 
1112   for (q = p; q != NULL; q = q->next) {
1113     if ((offset != 0) && (offset >= q->len)) {
1114       /* don't copy from this buffer -> on to the next */
1115      offset -= q->len;
1116     } else {
1117       if (q->len >= (offset + len)) {
1118         /* all data in this pbuf, return zero-copy */
1119         return (u8_t*)q->payload + offset;
1120       }
1121       /* need to copy */
1122       if (pbuf_copy_partial(q, buffer, len, offset) != len) {
1123         /* copying failed: pbuf is too short */
1124         return NULL;
1125       }
1126       return buffer;
1127     }
1128   }
1129   /* pbuf is too short (offset does not fit in) */
1130   return NULL;
1131 }
1132 
1133 #if LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE
1134 /**
1135  * This method modifies a 'pbuf chain', so that its total length is
1136  * smaller than 64K. The remainder of the original pbuf chain is stored
1137  * in *rest.
1138  * This function never creates new pbufs, but splits an existing chain
1139  * in two parts. The tot_len of the modified packet queue will likely be
1140  * smaller than 64K.
1141  * 'packet queues' are not supported by this function.
1142  *
1143  * @param p the pbuf queue to be split
1144  * @param rest pointer to store the remainder (after the first 64K)
1145  */
1146 void pbuf_split_64k(struct pbuf *p, struct pbuf **rest)
1147 {
1148   *rest = NULL;
1149   if ((p != NULL) && (p->next != NULL)) {
1150     u16_t tot_len_front = p->len;
1151     struct pbuf *i = p;
1152     struct pbuf *r = p->next;
1153 
1154     /* continue until the total length (summed up as u16_t) overflows */
1155     while ((r != NULL) && ((u16_t)(tot_len_front + r->len) > tot_len_front)) {
1156       tot_len_front += r->len;
1157       i = r;
1158       r = r->next;
1159     }
1160     /* i now points to last packet of the first segment. Set next
1161        pointer to NULL */
1162     i->next = NULL;
1163 
1164     if (r != NULL) {
1165       /* Update the tot_len field in the first part */
1166       for (i = p; i != NULL; i = i->next) {
1167         i->tot_len -= r->tot_len;
1168         LWIP_ASSERT("tot_len/len mismatch in last pbuf",
1169                     (i->next != NULL) || (i->tot_len == i->len));
1170       }
1171       if (p->flags & PBUF_FLAG_TCP_FIN) {
1172         r->flags |= PBUF_FLAG_TCP_FIN;
1173       }
1174 
1175       /* tot_len field in rest does not need modifications */
1176       /* reference counters do not need modifications */
1177       *rest = r;
1178     }
1179   }
1180 }
1181 #endif /* LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */
1182 
1183 /* Actual implementation of pbuf_skip() but returning const pointer... */
1184 static const struct pbuf*
1185 pbuf_skip_const(const struct pbuf* in, u16_t in_offset, u16_t* out_offset)
1186 {
1187   u16_t offset_left = in_offset;
1188   const struct pbuf* q = in;
1189 
1190   /* get the correct pbuf */
1191   while ((q != NULL) && (q->len <= offset_left)) {
1192     offset_left -= q->len;
1193     q = q->next;
1194   }
1195   if (out_offset != NULL) {
1196     *out_offset = offset_left;
1197   }
1198   return q;
1199 }
1200 
1201 /**
1202  * @ingroup pbuf
1203  * Skip a number of bytes at the start of a pbuf
1204  *
1205  * @param in input pbuf
1206  * @param in_offset offset to skip
1207  * @param out_offset resulting offset in the returned pbuf
1208  * @return the pbuf in the queue where the offset is
1209  */
1210 struct pbuf*
1211 pbuf_skip(struct pbuf* in, u16_t in_offset, u16_t* out_offset)
1212 {
1213   const struct pbuf* out = pbuf_skip_const(in, in_offset, out_offset);
1214   return LWIP_CONST_CAST(struct pbuf*, out);
1215 }
1216 
1217 /**
1218  * @ingroup pbuf
1219  * Copy application supplied data into a pbuf.
1220  * This function can only be used to copy the equivalent of buf->tot_len data.
1221  *
1222  * @param buf pbuf to fill with data
1223  * @param dataptr application supplied data buffer
1224  * @param len length of the application supplied data buffer
1225  *
1226  * @return ERR_OK if successful, ERR_MEM if the pbuf is not big enough
1227  */
1228 err_t
1229 pbuf_take(struct pbuf *buf, const void *dataptr, u16_t len)
1230 {
1231   struct pbuf *p;
1232   u16_t buf_copy_len;
1233   u16_t total_copy_len = len;
1234   u16_t copied_total = 0;
1235 
1236   LWIP_ERROR("pbuf_take: invalid buf", (buf != NULL), return ERR_ARG;);
1237   LWIP_ERROR("pbuf_take: invalid dataptr", (dataptr != NULL), return ERR_ARG;);
1238   LWIP_ERROR("pbuf_take: buf not large enough", (buf->tot_len >= len), return ERR_MEM;);
1239 
1240   if ((buf == NULL) || (dataptr == NULL) || (buf->tot_len < len)) {
1241     return ERR_ARG;
1242   }
1243 
1244   /* Note some systems use byte copy if dataptr or one of the pbuf payload pointers are unaligned. */
1245   for (p = buf; total_copy_len != 0; p = p->next) {
1246     LWIP_ASSERT("pbuf_take: invalid pbuf", p != NULL);
1247     buf_copy_len = total_copy_len;
1248     if (buf_copy_len > p->len) {
1249       /* this pbuf cannot hold all remaining data */
1250       buf_copy_len = p->len;
1251     }
1252     /* copy the necessary parts of the buffer */
1253     MEMCPY(p->payload, &((const char*)dataptr)[copied_total], buf_copy_len);
1254     total_copy_len -= buf_copy_len;
1255     copied_total += buf_copy_len;
1256   }
1257   LWIP_ASSERT("did not copy all data", total_copy_len == 0 && copied_total == len);
1258   return ERR_OK;
1259 }
1260 
1261 /**
1262  * @ingroup pbuf
1263  * Same as pbuf_take() but puts data at an offset
1264  *
1265  * @param buf pbuf to fill with data
1266  * @param dataptr application supplied data buffer
1267  * @param len length of the application supplied data buffer
1268  * @param offset offset in pbuf where to copy dataptr to
1269  *
1270  * @return ERR_OK if successful, ERR_MEM if the pbuf is not big enough
1271  */
1272 err_t
1273 pbuf_take_at(struct pbuf *buf, const void *dataptr, u16_t len, u16_t offset)
1274 {
1275   u16_t target_offset;
1276   struct pbuf* q = pbuf_skip(buf, offset, &target_offset);
1277 
1278   /* return requested data if pbuf is OK */
1279   if ((q != NULL) && (q->tot_len >= target_offset + len)) {
1280     u16_t remaining_len = len;
1281     const u8_t* src_ptr = (const u8_t*)dataptr;
1282     /* copy the part that goes into the first pbuf */
1283     u16_t first_copy_len = LWIP_MIN(q->len - target_offset, len);
1284     MEMCPY(((u8_t*)q->payload) + target_offset, dataptr, first_copy_len);
1285     remaining_len -= first_copy_len;
1286     src_ptr += first_copy_len;
1287     if (remaining_len > 0) {
1288       return pbuf_take(q->next, src_ptr, remaining_len);
1289     }
1290     return ERR_OK;
1291   }
1292   return ERR_MEM;
1293 }
1294 
1295 /**
1296  * @ingroup pbuf
1297  * Creates a single pbuf out of a queue of pbufs.
1298  *
1299  * @remark: Either the source pbuf 'p' is freed by this function or the original
1300  *          pbuf 'p' is returned, therefore the caller has to check the result!
1301  *
1302  * @param p the source pbuf
1303  * @param layer pbuf_layer of the new pbuf
1304  *
1305  * @return a new, single pbuf (p->next is NULL)
1306  *         or the old pbuf if allocation fails
1307  */
1308 struct pbuf*
1309 pbuf_coalesce(struct pbuf *p, pbuf_layer layer)
1310 {
1311   struct pbuf *q;
1312   err_t err;
1313   if (p->next == NULL) {
1314     return p;
1315   }
1316   q = pbuf_alloc(layer, p->tot_len, PBUF_RAM);
1317   if (q == NULL) {
1318     /* @todo: what do we do now? */
1319     return p;
1320   }
1321   err = pbuf_copy(q, p);
1322   LWIP_UNUSED_ARG(err); /* in case of LWIP_NOASSERT */
1323   LWIP_ASSERT("pbuf_copy failed", err == ERR_OK);
1324   pbuf_free(p);
1325   return q;
1326 }
1327 
1328 #if LWIP_CHECKSUM_ON_COPY
1329 /**
1330  * Copies data into a single pbuf (*not* into a pbuf queue!) and updates
1331  * the checksum while copying
1332  *
1333  * @param p the pbuf to copy data into
1334  * @param start_offset offset of p->payload where to copy the data to
1335  * @param dataptr data to copy into the pbuf
1336  * @param len length of data to copy into the pbuf
1337  * @param chksum pointer to the checksum which is updated
1338  * @return ERR_OK if successful, another error if the data does not fit
1339  *         within the (first) pbuf (no pbuf queues!)
1340  */
1341 err_t
1342 pbuf_fill_chksum(struct pbuf *p, u16_t start_offset, const void *dataptr,
1343                  u16_t len, u16_t *chksum)
1344 {
1345   u32_t acc;
1346   u16_t copy_chksum;
1347   char *dst_ptr;
1348   LWIP_ASSERT("p != NULL", p != NULL);
1349   LWIP_ASSERT("dataptr != NULL", dataptr != NULL);
1350   LWIP_ASSERT("chksum != NULL", chksum != NULL);
1351   LWIP_ASSERT("len != 0", len != 0);
1352 
1353   if ((start_offset >= p->len) || (start_offset + len > p->len)) {
1354     return ERR_ARG;
1355   }
1356 
1357   dst_ptr = ((char*)p->payload) + start_offset;
1358   copy_chksum = LWIP_CHKSUM_COPY(dst_ptr, dataptr, len);
1359   if ((start_offset & 1) != 0) {
1360     copy_chksum = SWAP_BYTES_IN_WORD(copy_chksum);
1361   }
1362   acc = *chksum;
1363   acc += copy_chksum;
1364   *chksum = FOLD_U32T(acc);
1365   return ERR_OK;
1366 }
1367 #endif /* LWIP_CHECKSUM_ON_COPY */
1368 
1369 /**
1370  * @ingroup pbuf
1371  * Get one byte from the specified position in a pbuf
1372  * WARNING: returns zero for offset >= p->tot_len
1373  *
1374  * @param p pbuf to parse
1375  * @param offset offset into p of the byte to return
1376  * @return byte at an offset into p OR ZERO IF 'offset' >= p->tot_len
1377  */
1378 u8_t
1379 pbuf_get_at(const struct pbuf* p, u16_t offset)
1380 {
1381   int ret = pbuf_try_get_at(p, offset);
1382   if (ret >= 0) {
1383     return (u8_t)ret;
1384   }
1385   return 0;
1386 }
1387 
1388 /**
1389  * @ingroup pbuf
1390  * Get one byte from the specified position in a pbuf
1391  *
1392  * @param p pbuf to parse
1393  * @param offset offset into p of the byte to return
1394  * @return byte at an offset into p [0..0xFF] OR negative if 'offset' >= p->tot_len
1395  */
1396 int
1397 pbuf_try_get_at(const struct pbuf* p, u16_t offset)
1398 {
1399   u16_t q_idx;
1400   const struct pbuf* q = pbuf_skip_const(p, offset, &q_idx);
1401 
1402   /* return requested data if pbuf is OK */
1403   if ((q != NULL) && (q->len > q_idx)) {
1404     return ((u8_t*)q->payload)[q_idx];
1405   }
1406   return -1;
1407 }
1408 
1409 /**
1410  * @ingroup pbuf
1411  * Put one byte to the specified position in a pbuf
1412  * WARNING: silently ignores offset >= p->tot_len
1413  *
1414  * @param p pbuf to fill
1415  * @param offset offset into p of the byte to write
1416  * @param data byte to write at an offset into p
1417  */
1418 void
1419 pbuf_put_at(struct pbuf* p, u16_t offset, u8_t data)
1420 {
1421   u16_t q_idx;
1422   struct pbuf* q = pbuf_skip(p, offset, &q_idx);
1423 
1424   /* write requested data if pbuf is OK */
1425   if ((q != NULL) && (q->len > q_idx)) {
1426     ((u8_t*)q->payload)[q_idx] = data;
1427   }
1428 }
1429 
1430 /**
1431  * @ingroup pbuf
1432  * Compare pbuf contents at specified offset with memory s2, both of length n
1433  *
1434  * @param p pbuf to compare
1435  * @param offset offset into p at which to start comparing
1436  * @param s2 buffer to compare
1437  * @param n length of buffer to compare
1438  * @return zero if equal, nonzero otherwise
1439  *         (0xffff if p is too short, diffoffset+1 otherwise)
1440  */
1441 u16_t
1442 pbuf_memcmp(const struct pbuf* p, u16_t offset, const void* s2, u16_t n)
1443 {
1444   u16_t start = offset;
1445   const struct pbuf* q = p;
1446   u16_t i;
1447 
1448   /* pbuf long enough to perform check? */
1449   if(p->tot_len < (offset + n)) {
1450     return 0xffff;
1451   }
1452 
1453   /* get the correct pbuf from chain. We know it succeeds because of p->tot_len check above. */
1454   while ((q != NULL) && (q->len <= start)) {
1455     start -= q->len;
1456     q = q->next;
1457   }
1458 
1459   /* return requested data if pbuf is OK */
1460   for (i = 0; i < n; i++) {
1461     /* We know pbuf_get_at() succeeds because of p->tot_len check above. */
1462     u8_t a = pbuf_get_at(q, start + i);
1463     u8_t b = ((const u8_t*)s2)[i];
1464     if (a != b) {
1465       return i+1;
1466     }
1467   }
1468   return 0;
1469 }
1470 
1471 /**
1472  * @ingroup pbuf
1473  * Find occurrence of mem (with length mem_len) in pbuf p, starting at offset
1474  * start_offset.
1475  *
1476  * @param p pbuf to search, maximum length is 0xFFFE since 0xFFFF is used as
1477  *        return value 'not found'
1478  * @param mem search for the contents of this buffer
1479  * @param mem_len length of 'mem'
1480  * @param start_offset offset into p at which to start searching
1481  * @return 0xFFFF if substr was not found in p or the index where it was found
1482  */
1483 u16_t
1484 pbuf_memfind(const struct pbuf* p, const void* mem, u16_t mem_len, u16_t start_offset)
1485 {
1486   u16_t i;
1487   u16_t max = p->tot_len - mem_len;
1488   if (p->tot_len >= mem_len + start_offset) {
1489     for (i = start_offset; i <= max; i++) {
1490       u16_t plus = pbuf_memcmp(p, i, mem, mem_len);
1491       if (plus == 0) {
1492         return i;
1493       }
1494     }
1495   }
1496   return 0xFFFF;
1497 }
1498 
1499 /**
1500  * Find occurrence of substr with length substr_len in pbuf p, start at offset
1501  * start_offset
1502  * WARNING: in contrast to strstr(), this one does not stop at the first \0 in
1503  * the pbuf/source string!
1504  *
1505  * @param p pbuf to search, maximum length is 0xFFFE since 0xFFFF is used as
1506  *        return value 'not found'
1507  * @param substr string to search for in p, maximum length is 0xFFFE
1508  * @return 0xFFFF if substr was not found in p or the index where it was found
1509  */
1510 u16_t
1511 pbuf_strstr(const struct pbuf* p, const char* substr)
1512 {
1513   size_t substr_len;
1514   if ((substr == NULL) || (substr[0] == 0) || (p->tot_len == 0xFFFF)) {
1515     return 0xFFFF;
1516   }
1517   substr_len = strlen(substr);
1518   if (substr_len >= 0xFFFF) {
1519     return 0xFFFF;
1520   }
1521   return pbuf_memfind(p, substr, (u16_t)substr_len, 0);
1522 }
1523