1 #include <stdio.h>
2 #include <assert.h>
3 #include <stdio.h>
4 #include <stdlib.h>
5 #include <string.h>
6 #include <stddef.h>
7 #include <gtest/gtest.h>
8 #ifdef _WIN32
9 #include <windows.h>
10 #endif
TEST_F(MiscTests,testSanityCheck)11 #include "netbuf/netbuf.h"
12
13
14 #define BIG_BUF_SIZE 5000
15 #define SMALL_BUF_SIZE 50
16
17 class NetbufTest : public ::testing::Test
18 {
19 };
20
21 static void clean_check(nb_MGR *mgr)
TEST_F(MiscTests,testVersionG)22 {
23 EXPECT_NE(0, netbuf_is_clean(mgr));
24 netbuf_cleanup(mgr);
25 }
26
27 TEST_F(NetbufTest, testCleanCheck)
28 {
29 nb_MGR mgr;
30 netbuf_init(&mgr, NULL);
31 nb_SPAN span;
32 span.size = 500;
33 int rv = netbuf_mblock_reserve(&mgr, &span);
34 ASSERT_EQ(0, rv);
35 ASSERT_EQ(0, netbuf_is_clean(&mgr));
36 netbuf_mblock_release(&mgr, &span);
37 ASSERT_NE(0, netbuf_is_clean(&mgr));
38
39 nb_IOV iov;
40 iov.iov_base = (void *)0x01;
41 iov.iov_len = 500;
42 netbuf_enqueue(&mgr, &iov, NULL);
43 ASSERT_EQ(0, netbuf_is_clean(&mgr));
44
45 unsigned toFlush = netbuf_start_flush(&mgr, &iov, 1, NULL);
46 ASSERT_EQ(500, toFlush);
47 netbuf_end_flush(&mgr, toFlush);
48 ASSERT_NE(0, netbuf_is_clean(&mgr));
49
50 clean_check(&mgr);
51 }
52
53 TEST_F(NetbufTest, testBasic)
54 {
55 nb_MGR mgr;
56 int rv;
57 int ii;
58 int n_bigspans = 20;
59 int n_smallspans = 2000;
60
61 nb_SPAN spans_big[20];
62 nb_SPAN spans_small[2000];
63 netbuf_init(&mgr, NULL);
64 clean_check(&mgr);
65 netbuf_init(&mgr, NULL);
66
67
68 for (ii = 0; ii < n_bigspans; ii++) {
69 int filler = 'a' + ii;
70 nb_SPAN *span = spans_big + ii;
71 span->size = BIG_BUF_SIZE;
72 rv = netbuf_mblock_reserve(&mgr, span);
73 ASSERT_EQ(0, rv);
74 memset(SPAN_BUFFER(span), filler, span->size);
75 }
76
77 for (ii = 0; ii < n_smallspans; ii++) {
78 nb_SPAN *span = spans_small + ii;
79 int filler = ii;
80 span->size = SMALL_BUF_SIZE;
81 rv = netbuf_mblock_reserve(&mgr, span);
82 ASSERT_EQ(0, rv);
83 filler = ii;
84 memset(SPAN_BUFFER(span), filler, span->size);
85 }
86
87 for (ii = 0; ii < n_bigspans; ii++) {
88 char expected[BIG_BUF_SIZE];
89 char *curbuf = SPAN_BUFFER(spans_big + ii);
90
91 memset(expected, 'a' + ii, BIG_BUF_SIZE);
92 ASSERT_EQ(0, memcmp(curbuf, expected, BIG_BUF_SIZE));
93
94 netbuf_mblock_release(&mgr, spans_big + ii);
95 }
96
97 for (ii = 0; ii < n_smallspans; ii++) {
98 char expected[SMALL_BUF_SIZE];
99 char *curbuf = SPAN_BUFFER(spans_small + ii);
100 memset(expected, ii, SMALL_BUF_SIZE);
101 ASSERT_EQ(0, memcmp(curbuf, expected, SMALL_BUF_SIZE));
102 netbuf_mblock_release(&mgr, spans_small + ii);
103 }
104
105 {
106 nb_IOV iov[20];
107 netbuf_start_flush(&mgr, iov, 20, NULL);
108 }
109 clean_check(&mgr);
110 }
111
112 TEST_F(NetbufTest, testFlush)
113 {
114 nb_MGR mgr;
115 nb_SETTINGS settings;
116 nb_SPAN span;
117 nb_SPAN spans[3];
118
119 int ii;
120 int rv;
121 nb_IOV iov[10];
122 unsigned int sz;
123
124 netbuf_default_settings(&settings);
125 settings.data_basealloc = 8;
126 netbuf_init(&mgr, &settings);
127
128 span.size = 32;
129 rv = netbuf_mblock_reserve(&mgr, &span);
130 ASSERT_EQ(rv, 0);
131
132 netbuf_enqueue_span(&mgr, &span, NULL);
133 sz = netbuf_start_flush(&mgr, iov, 1, NULL);
134 ASSERT_EQ(32, sz);
135 ASSERT_EQ(32, iov[0].iov_len);
136 netbuf_end_flush(&mgr, 20);
137
138 sz = netbuf_start_flush(&mgr, iov, 1, NULL);
139 ASSERT_EQ(0, sz);
140 netbuf_end_flush(&mgr, 12);
141 netbuf_mblock_release(&mgr, &span);
142
143 for (ii = 0; ii < 3; ii++) {
144 spans[ii].size = 50;
145 ASSERT_EQ(0, netbuf_mblock_reserve(&mgr, spans + ii));
146 }
147
148 for (ii = 0; ii < 3; ii++) {
149 netbuf_enqueue_span(&mgr, spans + ii, NULL);
150 }
151
152 sz = netbuf_start_flush(&mgr, iov, 10, NULL);
153 ASSERT_EQ(150, sz);
154 netbuf_end_flush(&mgr, 75);
155 netbuf_reset_flush(&mgr);
156 sz = netbuf_start_flush(&mgr, iov, 10, NULL);
157 ASSERT_EQ(75, sz);
158 netbuf_end_flush(&mgr, 75);
159 sz = netbuf_start_flush(&mgr, iov, 10, NULL);
160 ASSERT_EQ(0, sz);
161 netbuf_mblock_release(&mgr, &spans[0]);
162
163 spans[0].size = 20;
164 rv = netbuf_mblock_reserve(&mgr, &spans[0]);
165 ASSERT_EQ(0, rv);
166 netbuf_mblock_release(&mgr, &spans[0]);
167
168 for (ii = 1; ii < 3; ii++) {
169 netbuf_mblock_release(&mgr, spans + ii);
170 }
171
172 netbuf_dump_status(&mgr, stdout);
173 clean_check(&mgr);
174 }
175
176 TEST_F(NetbufTest, testWrappingBuffers)
177 {
178 nb_MGR mgr;
179 nb_SETTINGS settings;
180 int rv;
181 nb_SPAN span1, span2, span3;
182
183 #ifdef NETBUFS_LIBC_PROXY
184 return;
185 #endif
186
187 netbuf_default_settings(&settings);
188 settings.data_basealloc = 40;
189 netbuf_init(&mgr, &settings);
190
191 span1.size = 16;
192 span2.size = 16;
193
194 rv = netbuf_mblock_reserve(&mgr, &span1);
195 ASSERT_EQ(0, rv);
196 rv = netbuf_mblock_reserve(&mgr, &span2);
197 ASSERT_EQ(0, rv);
198
199 ASSERT_EQ(span1.parent, span2.parent);
200 ASSERT_EQ(0, span1.offset);
201 ASSERT_EQ(16, span2.offset);
202
203 /* Wewease Wodewick! */
204 netbuf_mblock_release(&mgr, &span1);
205 ASSERT_EQ(16, span2.parent->start);
206
207 /* So we have 8 bytes at the end.. */
208 ASSERT_EQ(32, span2.parent->wrap);
209 span3.size = 10;
210 rv = netbuf_mblock_reserve(&mgr, &span3);
211
212 ASSERT_EQ(0, rv);
213 ASSERT_EQ(10, span2.parent->cursor);
214 ASSERT_EQ(0, span3.offset);
215 ASSERT_EQ(10, span3.parent->cursor);
216 ASSERT_EQ(16, span3.parent->start);
217
218 netbuf_mblock_release(&mgr, &span2);
219 ASSERT_EQ(0, span3.parent->start);
220 netbuf_mblock_release(&mgr, &span3);
221
222 netbuf_dump_status(&mgr, stdout);
223
224 span1.size = 20;
225 rv = netbuf_mblock_reserve(&mgr, &span1);
226 ASSERT_EQ(0, span1.offset);
227 ASSERT_EQ(20, span1.parent->cursor);
228 ASSERT_EQ(0, span1.parent->start);
229 ASSERT_EQ(20, span1.parent->wrap);
230 netbuf_dump_status(&mgr, stdout);
231
232 netbuf_mblock_release(&mgr, &span1);
233
234 clean_check(&mgr);
235 }
236
237 static void assert_iov_eq(nb_IOV *iov, nb_SIZE offset, char expected)
238 {
239 char *buf = (char *)iov->iov_base;
240 ASSERT_EQ(expected, buf[offset]);
241 }
242
243 TEST_F(NetbufTest, testMultipleFlush)
244 {
245 nb_SETTINGS settings;
246 nb_MGR mgr;
247 int rv;
248 nb_SIZE sz;
249 nb_SPAN span1, span2, span3;
250 nb_IOV iov[10];
251
252 netbuf_default_settings(&settings);
253 netbuf_init(&mgr, &settings);
254
255 span1.size = 50;
256 span2.size = 50;
257 span3.size = 50;
258
259 rv = netbuf_mblock_reserve(&mgr, &span1);
260 ASSERT_EQ(0, rv);
261 rv = netbuf_mblock_reserve(&mgr, &span2);
262 ASSERT_EQ(0, rv);
263 rv = netbuf_mblock_reserve(&mgr, &span3);
264 ASSERT_EQ(0, rv);
265
266 netbuf_enqueue_span(&mgr, &span1, NULL);
267 netbuf_enqueue_span(&mgr, &span2, NULL);
268
269 sz = netbuf_start_flush(&mgr, iov, 10, NULL);
270 ASSERT_EQ(100, sz);
271
272 memset(SPAN_BUFFER(&span1), 'A', span1.size);
273 memset(SPAN_BUFFER(&span2), 'B', span2.size);
274 memset(SPAN_BUFFER(&span3), 'C', span3.size);
275
276 #ifndef NETBUFS_LIBC_PROXY
277 ASSERT_EQ(100, iov->iov_len);
278 assert_iov_eq(iov, 0, 'A');
279 assert_iov_eq(iov, 50, 'B');
280
281 netbuf_enqueue_span(&mgr, &span3, NULL);
282 sz = netbuf_start_flush(&mgr, &iov[1], 0, NULL);
283 ASSERT_EQ(sz, 50);
284 assert_iov_eq(&iov[1], 0, 'C');
285 ASSERT_EQ(50, iov[1].iov_len);
286
287 netbuf_dump_status(&mgr, stdout);
288
289 netbuf_end_flush(&mgr, 100);
290 netbuf_dump_status(&mgr, stdout);
291
292 netbuf_end_flush(&mgr, 50);
293 sz = netbuf_start_flush(&mgr, iov, 10, NULL);
294 ASSERT_EQ(0, sz);
295 #endif
296
297 netbuf_mblock_release(&mgr, &span1);
298 netbuf_mblock_release(&mgr, &span2);
299 netbuf_mblock_release(&mgr, &span3);
300 clean_check(&mgr);
301 }
302
303 TEST_F(NetbufTest, testCyclicFlush)
304 {
305 nb_SPAN spans[10];
306 nb_IOV iov[4];
307 nb_MGR mgr;
308 nb_SETTINGS settings;
309 int niov;
310 unsigned nb;
311
312 // Each call to netbuf_start_flush should be considered isolated; so that
313 // the next call to start_flush _never_ overlaps any data from the previous
314 // call to start_flush. Otherwise we might end up in a situation where
315 // the same data ends up being sent out twice. netbuf_reset_flush() should
316 // be called to invalidate any outstanding start_flush() calls, so that
317 // the next call to start_flush() will begin from the beginning of the
318 // send queue, rather than from the last call to start_flush().
319
320 netbuf_default_settings(&settings);
321 settings.data_basealloc = 50;
322 netbuf_init(&mgr, &settings);
323
324 for (size_t ii = 0; ii < 5; ii++) {
325 spans[ii].size = 10;
326 netbuf_mblock_reserve(&mgr, &spans[ii]);
327 memset(SPAN_BUFFER(&spans[ii]), ii, 10);
328 netbuf_enqueue_span(&mgr, &spans[ii], NULL);
329 nb = netbuf_start_flush(&mgr, iov, 1, &niov);
330
331 ASSERT_EQ(10, nb);
332 ASSERT_EQ(1, niov);
333 }
334 // So far have 50 inside the span
335
336 // flush the first span (should have 40 bytes remaining)
337 netbuf_end_flush(&mgr, 10);
338 for (size_t ii = 5; ii < 7; ii++) {
339 spans[ii].size = 10;
340 netbuf_mblock_reserve(&mgr, &spans[ii]);
341 netbuf_enqueue_span(&mgr, &spans[ii], NULL);
342 memset(SPAN_BUFFER(&spans[ii]), ii, 10);
343 }
344
345 nb = netbuf_start_flush(&mgr, iov, 4, &niov);
346 ASSERT_EQ(20, nb);
347 netbuf_end_flush(&mgr, 40);
348 netbuf_end_flush(&mgr, nb);
349 nb = netbuf_start_flush(&mgr, iov, 4, &niov);
350 ASSERT_EQ(0, nb);
351 for (size_t ii = 0; ii < 7; ii++) {
352 netbuf_mblock_release(&mgr, &spans[ii]);
353 }
354 clean_check(&mgr);
355 }
356
357 typedef struct {
358 sllist_node slnode;
359 nb_SIZE size;
360 int is_flushed;
361 nb_SPAN spans[3];
362 nb_SIZE nspans;
363 } my_PDU;
364
365 static nb_SIZE pdu_callback(void *p, nb_SIZE hint, void *arg)
366 {
367 my_PDU *pdu = (my_PDU *)p;
368 (void)arg;
369 if (hint >= pdu->size) {
370 pdu->is_flushed = 1;
371 }
372 return pdu->size;
373 }
374
375 TEST_F(NetbufTest, testPduEnqueue)
376 {
377 nb_SETTINGS settings;
378 nb_MGR mgr;
379 my_PDU pdu;
380 nb_IOV iov[10];
381 nb_SIZE toflush;
382 int ii;
383
384 netbuf_default_settings(&settings);
385 settings.data_basealloc = 1;
386 netbuf_init(&mgr, &settings);
387
388 memset(&pdu, 0, sizeof pdu);
389 pdu.size = 24;
390
391 for (ii = 0; ii < 3; ii++) {
392 pdu.spans[ii].size = 8;
393 netbuf_mblock_reserve(&mgr, pdu.spans + ii);
394 }
395
396 for (ii = 0; ii < 3; ii++) {
397 netbuf_enqueue_span(&mgr, pdu.spans + ii, NULL);
398 }
399
400 netbuf_pdu_enqueue(&mgr, &pdu, offsetof(my_PDU, slnode));
401
402 /** Start the flush */
403 toflush = netbuf_start_flush(&mgr, iov, 2, NULL);
404 ASSERT_EQ(16, toflush);
405 netbuf_end_flush2(&mgr, toflush, pdu_callback, 0, NULL);
406 ASSERT_EQ(0, pdu.is_flushed);
407
408 toflush = netbuf_start_flush(&mgr, iov, 10, NULL);
409 ASSERT_EQ(8, toflush);
410
411 netbuf_end_flush2(&mgr, toflush, pdu_callback, 0, NULL);
412 ASSERT_EQ(1, pdu.is_flushed);
413
414 for (ii = 0; ii < 3; ii++) {
415 netbuf_mblock_release(&mgr, pdu.spans + ii);
416 }
417
418 clean_check(&mgr);
419 }
420
421 TEST_F(NetbufTest, testOutOfOrder)
422 {
423 nb_MGR mgr;
424 nb_SPAN spans[3];
425 int ii;
426
427 netbuf_init(&mgr, NULL);
428
429 for (ii = 0; ii < 3; ii++) {
430 spans[ii].size = 10;
431 int rv = netbuf_mblock_reserve(&mgr, spans + ii);
432 ASSERT_EQ(0, rv);
433 }
434
435 netbuf_mblock_release(&mgr, &spans[1]);
436 spans[1].size = 5;
437
438 netbuf_mblock_reserve(&mgr, &spans[1]);
439 ASSERT_EQ(30, spans[1].offset);
440
441 for (ii = 0; ii < 3; ii++) {
442 netbuf_mblock_release(&mgr, spans + ii);
443 }
444
445 clean_check(&mgr);
446 }
447