xref: /minix/minix/net/lwip/pchain.c (revision bb9622b5)
1 /* LWIP service - pchain.c - pbuf chain utility functions */
2 
3 #include "lwip.h"
4 
5 /*
6  * Allocate a chain of pbuf buffers as though it were a PBUF_POOL allocation,
7  * except that each buffer is of type PBUF_RAM.  Return the pbuf chain on
8  * success, or NULL on memory allocation failure.
9  */
10 struct pbuf *
11 pchain_alloc(int layer, size_t size)
12 {
13 	struct pbuf *pbuf, *phead, **pnext;
14 	size_t chunk, left;
15 	int offset = 0;
16 
17 	/*
18 	 * Check for length overflow.  Note that we do this before prepending
19 	 * the header, because otherwise we could never send a full-sized
20 	 * (65535-byte) IP packet.  This does mean that we are generating a
21 	 * pbuf chain that has over 64KB worth of allocated space, but our
22 	 * header hiding ensures that tot_len stays under 64KB.  A check in
23 	 * pbuf_header() prevents that later header adjustments end up lifting
24 	 * tot_len over this limit.
25 	 */
26 	if (size > UINT16_MAX)
27 		return NULL;
28 
29 	/*
30 	 * Unfortunately, we have no choice but to replicate this block from
31 	 * lwIP's pbuf_alloc() code.  It is however unlikely that the offsets
32 	 * change for the currently supported layer types, and we do not need
33 	 * to support any layer types that we do not use ourselves.
34 	 */
35 	switch (layer) {
36 	case PBUF_TRANSPORT:
37 		offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN +
38 		    PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN;
39 		break;
40 	case PBUF_IP:
41 		offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN +
42 		    PBUF_IP_HLEN;
43 		break;
44 	case PBUF_LINK:
45 		offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN;
46 		break;
47 	case PBUF_RAW_TX:
48 		offset = PBUF_LINK_ENCAPSULATION_HLEN;
49 		break;
50 	case PBUF_RAW:
51 		offset = 0;
52 		break;
53 	default:
54 		panic("invalid pbuf layer: %d", layer);
55 	}
56 
57 	chunk = size + offset;
58 	if (chunk > MEMPOOL_BUFSIZE)
59 		chunk = MEMPOOL_BUFSIZE;
60 
61 	if ((phead = pbuf_alloc(PBUF_RAW, chunk, PBUF_RAM)) == NULL)
62 		return NULL;
63 
64 	if (offset > 0)
65 		util_pbuf_header(phead, -offset);
66 
67 	phead->tot_len = size;
68 
69 	pnext = &phead->next;
70 
71 	for (left = size - (chunk - offset); left > 0; left -= chunk) {
72 		chunk = (left < MEMPOOL_BUFSIZE) ? left : MEMPOOL_BUFSIZE;
73 
74 		if ((pbuf = pbuf_alloc(PBUF_RAW, chunk, PBUF_RAM)) == NULL) {
75 			/*
76 			 * Adjust tot_len to match the actual length of the
77 			 * chain so far, just in case pbuf_free() starts caring
78 			 * about this in the future.
79 			 */
80 			for (pbuf = phead; pbuf != NULL; pbuf = pbuf->next)
81 				pbuf->tot_len -= left;
82 
83 			pbuf_free(phead);
84 
85 			return NULL;
86 		}
87 
88 		pbuf->tot_len = left;
89 
90 		*pnext = pbuf;
91 		pnext = &pbuf->next;
92 	}
93 
94 	return phead;
95 }
96 
97 /*
98  * Given the (non-empty) chain of buffers 'pbuf', return a pointer to the
99  * 'next' field of the last buffer in the chain.  This function is packet queue
100  * friendly.  A packet queue is a queue of packet chains, where each chain is
101  * delimited using the 'tot_len' field.  As a result, while the pointer
102  * returned is never NULL, the value pointed to by the returned pointer may or
103  * may not be NULL (and will point to the next chain if not NULL).  As notable
104  * exception, in cases where the buffer type is a single PBUF_REF, 'tot_len'
105  * may be zero and 'len' may be non-zero.  In such cases, the chain consists of
106  * that single buffer only.  This function must handle that case as well.
107  */
108 struct pbuf **
109 pchain_end(struct pbuf * pbuf)
110 {
111 
112 	assert(pbuf != NULL);
113 
114 	while (pbuf->tot_len > pbuf->len) {
115 		pbuf = pbuf->next;
116 
117 		assert(pbuf != NULL);
118 	}
119 
120 	return &pbuf->next;
121 }
122 
123 /*
124  * Given the (non-empty) chain of buffers 'pbuf', return a byte size estimation
125  * of the memory used by the chain, rounded up to pool buffer sizes.  This
126  * function is packet queue friendly.
127  */
128 size_t
129 pchain_size(struct pbuf * pbuf)
130 {
131 	size_t size;
132 
133 	assert(pbuf != NULL);
134 
135 	/*
136 	 * Count the first buffer separately, as its length may be seriously
137 	 * off due to header hiding.  While the caller should always provide
138 	 * exactly the same pbuf chain twice if it intends to get back the same
139 	 * size twice, this also protects against accidental size differences
140 	 * due to header hiding in that case.
141 	 */
142 	size = MEMPOOL_BUFSIZE;
143 
144 	/*
145 	 * Round up the size of the rest of the chain to whole buffers.
146 	 */
147 	if (pbuf->tot_len > pbuf->len) {
148 		size += pbuf->tot_len - pbuf->len + MEMPOOL_BUFSIZE - 1;
149 
150 		size -= size % MEMPOOL_BUFSIZE;
151 	}
152 
153 	return size;
154 }
155