1 /* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */
2 /*
3  *     Copyright 2014 Couchbase, Inc.
4  *
5  *   Licensed under the Apache License, Version 2.0 (the "License");
6  *   you may not use this file except in compliance with the License.
7  *   You may obtain a copy of the License at
8  *
9  *       http://www.apache.org/licenses/LICENSE-2.0
10  *
11  *   Unless required by applicable law or agreed to in writing, software
12  *   distributed under the License is distributed on an "AS IS" BASIS,
13  *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  *   See the License for the specific language governing permissions and
15  *   limitations under the License.
16  */
17 
18 #ifdef _WIN32
19 #define WIN32_LEAN_AND_MEAN
20 /* for ULONG */
21 #include <windows.h>
22 #endif
23 
24 #include <stdio.h>
25 #include <stddef.h>
26 #include <stdlib.h>
27 #include <string.h>
28 
29 #include "netbuf.h"
30 #include "sllist-inl.h"
31 
32 #include <libcouchbase/assert.h>
33 
34 /******************************************************************************
35  ******************************************************************************
36  ** Handy Macros                                                             **
37  ******************************************************************************
38  ******************************************************************************/
39 #define MINIMUM(a, b) a < b ? a : b
40 #define MAXIMUM(a, b) a > b ? a : b
41 
42 #define BLOCK_IS_EMPTY(block) ((block)->start == (block)->cursor)
43 
44 #define FIRST_BLOCK(pool) \
45     (SLLIST_ITEM(SLLIST_FIRST(&(pool)->active), nb_MBLOCK, slnode))
46 
47 #define LAST_BLOCK(mgr) \
48     (SLLIST_ITEM((mgr)->active_blocks.last, nb_BLOCKHDR, slnode))
49 
50 #define NEXT_BLOCK(block) \
51     (SLLIST_ITEM((block)->slnode.next, nb_BLOCKHDR, slnode))
52 
53 #define BLOCK_HAS_DEALLOCS(block) \
54     ((block)->deallocs && SLLIST_IS_EMPTY(&(block)->deallocs->pending))
55 
56 /** Static forward decls */
57 static void mblock_release_data(nb_MBPOOL*,nb_MBLOCK*,nb_SIZE,nb_SIZE);
58 static void mblock_release_ptr(nb_MBPOOL*,char*,nb_SIZE);
59 static void mblock_init(nb_MBPOOL*);
60 static void mblock_cleanup(nb_MBPOOL*);
61 static void mblock_wipe_block(nb_MBLOCK *block);
62 
63 /******************************************************************************
64  ******************************************************************************
65  ** Allocation/Reservation                                                   **
66  ******************************************************************************
67  ******************************************************************************/
68 
69 /**
70  * Determines whether the block is allocated as a standalone block, or if it's
71  * part of a larger allocation
72  */
73 static int
mblock_is_standalone(nb_MBLOCK * block)74 mblock_is_standalone(nb_MBLOCK *block)
75 {
76     return block->parent == NULL;
77 }
78 
79 /**
80  * Allocates a new block with at least the given capacity and places it
81  * inside the active list.
82  */
83 static nb_MBLOCK*
alloc_new_block(nb_MBPOOL * pool,nb_SIZE capacity)84 alloc_new_block(nb_MBPOOL *pool, nb_SIZE capacity)
85 {
86     unsigned int ii;
87     nb_MBLOCK *ret = NULL;
88 
89     for (ii = 0; ii < pool->ncacheblocks; ii++) {
90         if (!pool->cacheblocks[ii].nalloc) {
91             ret = pool->cacheblocks + ii;
92             break;
93         }
94     }
95 
96     if (!ret) {
97         ret = calloc(1, sizeof(*ret));
98     }
99 
100     if (!ret) {
101         return NULL;
102     }
103 
104     ret->nalloc = pool->basealloc;
105 
106     while (ret->nalloc < capacity) {
107         ret->nalloc *= 2;
108     }
109 
110     ret->wrap = 0;
111     ret->cursor = 0;
112     ret->root = malloc(ret->nalloc);
113 
114     if (!ret->root) {
115         if (mblock_is_standalone(ret)) {
116             free(ret);
117         }
118         return NULL;
119     }
120 
121     return ret;
122 }
123 
124 /**
125  * Finds an available block within the available list. The block will have
126  * room for at least capacity bytes.
127  */
128 static nb_MBLOCK*
find_free_block(nb_MBPOOL * pool,nb_SIZE capacity)129 find_free_block(nb_MBPOOL *pool, nb_SIZE capacity)
130 {
131     sllist_iterator iter;
132     SLLIST_ITERFOR(&pool->avail, &iter) {
133         nb_MBLOCK *cur = SLLIST_ITEM(iter.cur, nb_MBLOCK, slnode);
134         if (cur->nalloc >= capacity) {
135             sllist_iter_remove(&pool->avail, &iter);
136             pool->curblocks--;
137             return cur;
138         }
139     }
140 
141     return NULL;
142 }
143 
144 /**
145  * Find a new block for the given span and initialize it for a reserved size
146  * correlating to the span.
147  * The block may either be popped from the available section or allocated
148  * as a standalone depending on current constraints.
149  */
150 static int
reserve_empty_block(nb_MBPOOL * pool,nb_SPAN * span)151 reserve_empty_block(nb_MBPOOL *pool, nb_SPAN *span)
152 {
153     nb_MBLOCK *block;
154 
155     if ( (block = find_free_block(pool, span->size)) == NULL) {
156         block = alloc_new_block(pool, span->size);
157     }
158 
159     if (!block) {
160         return -1;
161     }
162 
163     span->parent = block;
164     span->offset = 0;
165     block->start = 0;
166     block->wrap = span->size;
167     block->cursor = span->size;
168 
169     block->deallocs = NULL;
170 
171     sllist_append(&pool->active, &block->slnode);
172     return 0;
173 }
174 
175 /**
176  * Attempt to reserve space from the currently active block for the given
177  * span.
178  * @return 0 if the active block had enough space and the span was initialized
179  * and nonzero otherwise.
180  */
181 static int
reserve_active_block(nb_MBLOCK * block,nb_SPAN * span)182 reserve_active_block(nb_MBLOCK *block, nb_SPAN *span)
183 {
184     if (BLOCK_HAS_DEALLOCS(block)) {
185         return -1;
186     }
187 
188     if (block->cursor > block->start) {
189         if (block->nalloc - block->cursor >= span->size) {
190             span->offset = block->cursor;
191             block->cursor += span->size;
192             block->wrap = block->cursor;
193             return 0;
194 
195         } else if (block->start >= span->size) {
196             /** Wrap around the wrap */
197             span->offset = 0;
198             block->cursor = span->size;
199             return 0;
200         } else {
201             return -1;
202         }
203 
204     } else {
205         /* Already wrapped */
206         if (block->start - block->cursor >= span->size) {
207             span->offset = block->cursor;
208             block->cursor += span->size;
209             return 0;
210         } else {
211             return -1;
212         }
213     }
214 }
215 
216 static int
mblock_reserve_data(nb_MBPOOL * pool,nb_SPAN * span)217 mblock_reserve_data(nb_MBPOOL *pool, nb_SPAN *span)
218 {
219     nb_MBLOCK *block;
220     int rv;
221 
222 #ifdef NETBUF_LIBC_PROXY
223     block = malloc(sizeof(*block) + span->size);
224     block->root = ((char *)block) + sizeof(*block);
225     span->parent = block;
226     span->offset = 0;
227     return 0;
228 #endif
229 
230     if (SLLIST_IS_EMPTY(&pool->active)) {
231         return reserve_empty_block(pool, span);
232 
233     } else {
234         block = SLLIST_ITEM(pool->active.last, nb_MBLOCK, slnode);
235         rv = reserve_active_block(block, span);
236 
237         if (rv != 0) {
238             return reserve_empty_block(pool, span);
239         }
240 
241         span->parent = block;
242         return rv;
243     }
244 }
245 
246 /******************************************************************************
247  ******************************************************************************
248  ** Out-Of-Order Deallocation Functions                                      **
249  ******************************************************************************
250  ******************************************************************************/
251 static void
ooo_queue_dealoc(nb_MGR * mgr,nb_MBLOCK * block,nb_SPAN * span)252 ooo_queue_dealoc(nb_MGR *mgr, nb_MBLOCK *block, nb_SPAN *span)
253 {
254     nb_QDEALLOC *qd;
255     nb_DEALLOC_QUEUE *queue;
256     nb_SPAN qespan;
257 
258     if (!block->deallocs) {
259         queue = calloc(1, sizeof(*queue));
260         queue->qpool.basealloc = sizeof(*qd) * mgr->settings.dea_basealloc;
261         queue->qpool.ncacheblocks = mgr->settings.dea_cacheblocks;
262         queue->qpool.mgr = mgr;
263         mblock_init(&queue->qpool);
264         block->deallocs = queue;
265     }
266 
267     queue = block->deallocs;
268 
269     if (SLLIST_IS_EMPTY(&queue->pending)) {
270         queue->min_offset = span->offset;
271     }
272 
273     qespan.size = sizeof(*qd);
274     mblock_reserve_data(&queue->qpool, &qespan);
275 
276     qd = (nb_QDEALLOC *)(void *)SPAN_MBUFFER_NC(&qespan);
277     qd->offset = span->offset;
278     qd->size = span->size;
279     if (queue->min_offset > qd->offset) {
280         queue->min_offset = qd->offset;
281     }
282     sllist_append(&queue->pending, &qd->slnode);
283 }
284 
285 static INLINE void
maybe_unwrap_block(nb_MBLOCK * block)286 maybe_unwrap_block(nb_MBLOCK *block)
287 {
288     if (!BLOCK_IS_EMPTY(block) && block->start == block->wrap) {
289         block->wrap = block->cursor;
290         block->start = 0;
291     }
292 }
293 
294 static void
ooo_apply_dealloc(nb_MBLOCK * block)295 ooo_apply_dealloc(nb_MBLOCK *block)
296 {
297     nb_SIZE min_next = -1;
298     sllist_iterator iter;
299     nb_DEALLOC_QUEUE *queue = block->deallocs;
300 
301     SLLIST_ITERFOR(&queue->pending, &iter) {
302         nb_QDEALLOC *cur = SLLIST_ITEM(iter.cur, nb_QDEALLOC, slnode);
303         if (cur->offset == block->start) {
304             block->start += cur->size;
305             maybe_unwrap_block(block);
306 
307             sllist_iter_remove(&block->deallocs->pending, &iter);
308             mblock_release_ptr(&queue->qpool, (char *)cur, sizeof(*cur));
309         } else if (cur->offset < min_next) {
310             min_next = cur->offset;
311         }
312     }
313     queue->min_offset = min_next;
314 }
315 
316 
317 static INLINE void
mblock_release_data(nb_MBPOOL * pool,nb_MBLOCK * block,nb_SIZE size,nb_SIZE offset)318 mblock_release_data(nb_MBPOOL *pool,
319                     nb_MBLOCK *block, nb_SIZE size, nb_SIZE offset)
320 {
321     if (offset == block->start) {
322         /** Removing from the beginning */
323         block->start += size;
324 
325         if (block->deallocs && block->deallocs->min_offset == block->start) {
326             ooo_apply_dealloc(block);
327         }
328 
329         maybe_unwrap_block(block);
330 
331     } else if (offset + size == block->cursor) {
332         /** Removing from the end */
333         if (block->cursor == block->wrap) {
334             /** Single region, no wrap */
335             block->cursor -= size;
336             block->wrap -= size;
337 
338         } else {
339             block->cursor -= size;
340             if (!block->cursor) {
341                 /** End has reached around */
342                 block->cursor = block->wrap;
343             }
344         }
345 
346     } else {
347         nb_SPAN span;
348 
349         span.parent = block;
350         span.offset = offset;
351         span.size = size;
352         ooo_queue_dealoc(pool->mgr, block, &span);
353         return;
354     }
355 
356     if (!BLOCK_IS_EMPTY(block)) {
357         return;
358     }
359 
360     {
361         sllist_iterator iter;
362         SLLIST_ITERFOR(&pool->active, &iter) {
363             if (&block->slnode == iter.cur) {
364                 sllist_iter_remove(&pool->active, &iter);
365                 break;
366             }
367         }
368     }
369 
370     if (pool->curblocks < pool->maxblocks) {
371         sllist_append(&pool->avail, &block->slnode);
372         pool->curblocks++;
373     } else {
374         mblock_wipe_block(block);
375     }
376 }
377 
378 static void
mblock_release_ptr(nb_MBPOOL * pool,char * ptr,nb_SIZE size)379 mblock_release_ptr(nb_MBPOOL *pool, char * ptr, nb_SIZE size)
380 {
381     nb_MBLOCK *block;
382     nb_SIZE offset;
383     sllist_node *ll;
384 
385 #ifdef NETBUF_LIBC_PROXY
386     block = (nb_MBLOCK *)(ptr - sizeof(*block));
387     free(block);
388     return;
389 #endif
390 
391 
392     SLLIST_FOREACH(&pool->active, ll) {
393         block = SLLIST_ITEM(ll, nb_MBLOCK, slnode);
394         if (block->root > ptr) {
395             continue;
396         }
397         if (block->root + block->nalloc <= ptr) {
398             continue;
399         }
400         offset = ptr - block->root;
401         mblock_release_data(pool, block, size, offset);
402         return;
403     }
404 
405     fprintf(stderr, "NETBUF: Requested to release pointer %p which was not allocated\n", (void *)ptr);
406     lcb_assert(0);
407 }
408 
409 static int
mblock_get_next_size(const nb_MBPOOL * pool,int allow_wrap)410 mblock_get_next_size(const nb_MBPOOL *pool, int allow_wrap)
411 {
412     nb_MBLOCK *block;
413     if (SLLIST_IS_EMPTY(&pool->avail)) {
414         return 0;
415     }
416 
417     block = FIRST_BLOCK(pool);
418 
419     if (BLOCK_HAS_DEALLOCS(block)) {
420         return 0;
421     }
422 
423     if (!block->start) {
424         /** Plain 'ole buffer */
425         return block->nalloc - block->cursor;
426     }
427 
428     if (block->cursor != block->wrap) {
429         /** Already in second region */
430         return block->start - block->cursor;
431     }
432 
433     if (allow_wrap) {
434         return MINIMUM(block->nalloc - block->wrap, block->start);
435     }
436 
437     return block->nalloc - block->wrap;
438 }
439 
440 static void
mblock_wipe_block(nb_MBLOCK * block)441 mblock_wipe_block(nb_MBLOCK *block)
442 {
443     if (block->root) {
444         free(block->root);
445     }
446     if (block->deallocs) {
447         sllist_iterator dea_iter;
448         nb_DEALLOC_QUEUE *queue = block->deallocs;
449 
450         SLLIST_ITERFOR(&queue->pending, &dea_iter) {
451             nb_QDEALLOC *qd = SLLIST_ITEM(dea_iter.cur, nb_QDEALLOC, slnode);
452             sllist_iter_remove(&queue->pending, &dea_iter);
453             mblock_release_ptr(&queue->qpool, (char *)qd, sizeof(*qd));
454         }
455 
456         mblock_cleanup(&queue->qpool);
457         free(queue);
458         block->deallocs = NULL;
459     }
460 
461     if (mblock_is_standalone(block)) {
462         free(block);
463     }
464 }
465 
466 static void
free_blocklist(nb_MBPOOL * pool,sllist_root * list)467 free_blocklist(nb_MBPOOL *pool, sllist_root *list)
468 {
469     sllist_iterator iter;
470     SLLIST_ITERFOR(list, &iter) {
471         nb_MBLOCK *block = SLLIST_ITEM(iter.cur, nb_MBLOCK, slnode);
472         sllist_iter_remove(list, &iter);
473         mblock_wipe_block(block);
474     }
475     (void)pool;
476 }
477 
478 
479 static void
mblock_cleanup(nb_MBPOOL * pool)480 mblock_cleanup(nb_MBPOOL *pool)
481 {
482     free_blocklist(pool, &pool->active);
483     free_blocklist(pool, &pool->avail);
484     free(pool->cacheblocks);
485 }
486 
487 static void
mblock_init(nb_MBPOOL * pool)488 mblock_init(nb_MBPOOL *pool)
489 {
490     unsigned int ii;
491     pool->cacheblocks = calloc(pool->ncacheblocks, sizeof(*pool->cacheblocks));
492     for (ii = 0; ii < pool->ncacheblocks; ii++) {
493         pool->cacheblocks[ii].parent = pool;
494     }
495     if (pool->ncacheblocks) {
496         pool->maxblocks = pool->ncacheblocks * 2;
497     }
498 }
499 
500 int
netbuf_mblock_reserve(nb_MGR * mgr,nb_SPAN * span)501 netbuf_mblock_reserve(nb_MGR *mgr, nb_SPAN *span)
502 {
503     return mblock_reserve_data(&mgr->datapool, span);
504 }
505 
506 /******************************************************************************
507  ******************************************************************************
508  ** Informational Routines                                                   **
509  ******************************************************************************
510  ******************************************************************************/
511 nb_SIZE
netbuf_mblock_get_next_size(const nb_MGR * mgr,int allow_wrap)512 netbuf_mblock_get_next_size(const nb_MGR *mgr, int allow_wrap)
513 {
514     return mblock_get_next_size(&mgr->datapool, allow_wrap);
515 }
516 
517 unsigned int
netbuf_get_niov(nb_MGR * mgr)518 netbuf_get_niov(nb_MGR *mgr)
519 {
520     sllist_node *ll;
521     unsigned int ret = 0;
522     SLLIST_FOREACH(&mgr->sendq.pending, ll) {
523         ret++;
524     }
525 
526     return ret;
527 }
528 
529 /******************************************************************************
530  ******************************************************************************
531  ** Flush Routines                                                           **
532  ******************************************************************************
533  ******************************************************************************/
534 static nb_SNDQELEM *
get_sendqe(nb_SENDQ * sq,const nb_IOV * bufinfo)535 get_sendqe(nb_SENDQ* sq, const nb_IOV *bufinfo)
536 {
537     nb_SNDQELEM *sndqe;
538     nb_SPAN span;
539     span.size = sizeof(*sndqe);
540     mblock_reserve_data(&sq->elempool, &span);
541     sndqe = (nb_SNDQELEM *)(void *)SPAN_MBUFFER_NC(&span);
542 
543     sndqe->base = bufinfo->iov_base;
544     sndqe->len = bufinfo->iov_len;
545     return sndqe;
546 }
547 
548 void
netbuf_enqueue(nb_MGR * mgr,const nb_IOV * bufinfo,const void * parent)549 netbuf_enqueue(nb_MGR *mgr, const nb_IOV *bufinfo, const void *parent)
550 {
551     nb_SENDQ *q = &mgr->sendq;
552     nb_SNDQELEM *win;
553 
554     if (SLLIST_IS_EMPTY(&q->pending)) {
555         win = get_sendqe(q, bufinfo);
556         sllist_append(&q->pending, &win->slnode);
557 
558     } else {
559         win = SLLIST_ITEM(q->pending.last, nb_SNDQELEM, slnode);
560         if (win->base + win->len == bufinfo->iov_base) {
561             win->len += bufinfo->iov_len;
562 
563         } else {
564             win = get_sendqe(q, bufinfo);
565             sllist_append(&q->pending, &win->slnode);
566         }
567     }
568     win->parent = parent;
569 }
570 
571 void
netbuf_enqueue_span(nb_MGR * mgr,nb_SPAN * span,const void * parent)572 netbuf_enqueue_span(nb_MGR *mgr, nb_SPAN *span, const void *parent)
573 {
574     nb_IOV spinfo;
575     spinfo.iov_base = SPAN_BUFFER(span);
576     spinfo.iov_len = span->size;
577     netbuf_enqueue(mgr, &spinfo, parent);
578 }
579 
580 nb_SIZE
netbuf_start_flush(nb_MGR * mgr,nb_IOV * iovs,int niov,int * nused)581 netbuf_start_flush(nb_MGR *mgr, nb_IOV *iovs, int niov, int *nused)
582 {
583     nb_SIZE ret = 0;
584     nb_IOV *iov_end = iovs + niov, *iov_start = iovs;
585     nb_IOV *iov = iovs;
586     sllist_node *ll;
587     nb_SENDQ *sq = &mgr->sendq;
588     nb_SNDQELEM *win = NULL;
589 
590     if (sq->last_requested) {
591         if (sq->last_offset != sq->last_requested->len) {
592             win = sq->last_requested;
593             lcb_assert(win->len > sq->last_offset);
594 
595             iov->iov_len = win->len - sq->last_offset;
596             iov->iov_base = win->base + sq->last_offset;
597             ret += iov->iov_len;
598             iov++;
599         }
600 
601         ll = sq->last_requested->slnode.next;
602 
603     } else {
604         ll = SLLIST_FIRST(&sq->pending);
605     }
606 
607     while (ll && iov != iov_end) {
608         win = SLLIST_ITEM(ll, nb_SNDQELEM, slnode);
609         iov->iov_len = win->len;
610         iov->iov_base = win->base;
611 
612         ret += iov->iov_len;
613         iov++;
614         ll = ll->next;
615     }
616 
617     if (win) {
618         sq->last_requested = win;
619         sq->last_offset = win->len;
620     }
621     if (ret && nused) {
622         *nused = iov - iov_start;
623     }
624 
625     return ret;
626 }
627 
628 void
netbuf_end_flush(nb_MGR * mgr,unsigned int nflushed)629 netbuf_end_flush(nb_MGR *mgr, unsigned int nflushed)
630 {
631     nb_SENDQ *q = &mgr->sendq;
632     sllist_iterator iter;
633     SLLIST_ITERFOR(&q->pending, &iter) {
634         nb_SNDQELEM *win = SLLIST_ITEM(iter.cur, nb_SNDQELEM, slnode);
635         nb_SIZE to_chop = MINIMUM(win->len, nflushed);
636 
637         win->len -= to_chop;
638         nflushed -= to_chop;
639 
640         if (!win->len) {
641             sllist_iter_remove(&q->pending, &iter);
642             mblock_release_ptr(&mgr->sendq.elempool, (char *)win, sizeof(*win));
643             if (win == q->last_requested) {
644                 q->last_requested = NULL;
645                 q->last_offset = 0;
646             }
647         } else {
648             win->base +=  to_chop;
649             if (win == q->last_requested) {
650                 q->last_offset -= to_chop;
651             }
652         }
653 
654         if (!nflushed) {
655             break;
656         }
657     }
658     lcb_assert(!nflushed);
659 }
660 
661 void
netbuf_pdu_enqueue(nb_MGR * mgr,void * pdu,nb_SIZE lloff)662 netbuf_pdu_enqueue(nb_MGR *mgr, void *pdu, nb_SIZE lloff)
663 {
664     nb_SENDQ *q = &mgr->sendq;
665     sllist_append(&q->pdus, (sllist_node *) (void *)( (char *)pdu + lloff));
666 }
667 
668 void
netbuf_end_flush2(nb_MGR * mgr,unsigned int nflushed,nb_getsize_fn callback,nb_SIZE lloff,void * arg)669 netbuf_end_flush2(nb_MGR *mgr,
670                   unsigned int nflushed,
671                   nb_getsize_fn callback,
672                   nb_SIZE lloff,
673                   void *arg)
674 {
675     sllist_iterator iter;
676     nb_SENDQ *q = &mgr->sendq;
677     netbuf_end_flush(mgr, nflushed);
678 
679     /** Add to the nflushed overflow from last call */
680     nflushed += q->pdu_offset;
681     SLLIST_ITERFOR(&q->pdus, &iter) {
682         nb_SIZE cursize;
683         char *ptmp = (char *)iter.cur;
684         cursize = callback(ptmp - lloff, nflushed, arg);
685 
686         if (cursize > nflushed) {
687             break;
688         }
689 
690         nflushed -= cursize;
691         sllist_iter_remove(&q->pdus, &iter);
692 
693         if (!nflushed) {
694             break;
695         }
696     }
697 
698     /** Store the remainder of data that wasn't processed for next call */
699     q->pdu_offset = nflushed;
700 }
701 
702 /******************************************************************************
703  ******************************************************************************
704  ** Release                                                                  **
705  ******************************************************************************
706  ******************************************************************************/
707 void
netbuf_mblock_release(nb_MGR * mgr,nb_SPAN * span)708 netbuf_mblock_release(nb_MGR *mgr, nb_SPAN *span)
709 {
710 #ifdef NETBUF_LIBC_PROXY
711     free(span->parent);
712     (void)mgr;
713 #else
714     mblock_release_data(&mgr->datapool, span->parent, span->size, span->offset);
715 #endif
716 }
717 
718 /******************************************************************************
719  ******************************************************************************
720  ** Init/Cleanup                                                             **
721  ******************************************************************************
722  ******************************************************************************/
netbuf_default_settings(nb_SETTINGS * settings)723 void netbuf_default_settings(nb_SETTINGS *settings)
724 {
725     settings->data_basealloc = NB_DATA_BASEALLOC;
726     settings->data_cacheblocks = NB_DATA_CACHEBLOCKS;
727     settings->dea_basealloc = NB_MBDEALLOC_BASEALLOC;
728     settings->dea_cacheblocks = NB_MBDEALLOC_CACHEBLOCKS;
729     settings->sndq_basealloc = NB_SNDQ_BASEALLOC;
730     settings->sndq_cacheblocks = NB_SNDQ_CACHEBLOCKS;
731 }
732 
733 void
netbuf_init(nb_MGR * mgr,const nb_SETTINGS * user_settings)734 netbuf_init(nb_MGR *mgr, const nb_SETTINGS *user_settings)
735 {
736     nb_MBPOOL *sqpool = &mgr->sendq.elempool;
737     nb_MBPOOL *bufpool = &mgr->datapool;
738 
739     memset(mgr, 0, sizeof(*mgr));
740 
741     if (user_settings) {
742         mgr->settings = *user_settings;
743     } else {
744         netbuf_default_settings(&mgr->settings);
745     }
746 
747     /** Set our defaults */
748     sqpool->basealloc = sizeof(nb_SNDQELEM) * mgr->settings.sndq_basealloc;
749     sqpool->ncacheblocks = mgr->settings.sndq_cacheblocks;
750     sqpool->mgr = mgr;
751     mblock_init(sqpool);
752 
753     bufpool->basealloc = mgr->settings.data_basealloc;
754     bufpool->ncacheblocks = mgr->settings.data_cacheblocks;
755     bufpool->mgr = mgr;
756     mblock_init(bufpool);
757 }
758 
759 
760 void
netbuf_cleanup(nb_MGR * mgr)761 netbuf_cleanup(nb_MGR *mgr)
762 {
763     sllist_iterator iter;
764 
765     SLLIST_ITERFOR(&mgr->sendq.pending, &iter) {
766         nb_SNDQELEM *e = SLLIST_ITEM(iter.cur, nb_SNDQELEM, slnode);
767         sllist_iter_remove(&mgr->sendq.pending, &iter);
768         mblock_release_ptr(&mgr->sendq.elempool, (char *)e, sizeof(*e));
769     }
770 
771     mblock_cleanup(&mgr->sendq.elempool);
772     mblock_cleanup(&mgr->datapool);
773 }
774 
775 /******************************************************************************
776  ******************************************************************************
777  ** Block Dumping                                                            **
778  ******************************************************************************
779  ******************************************************************************/
780 
781 static void
dump_managed_block(nb_MBLOCK * block,FILE * fp)782 dump_managed_block(nb_MBLOCK *block, FILE *fp)
783 {
784     const char *indent = "  ";
785     fprintf(fp, "%sBLOCK(MANAGED)=%p; BUF=%p, %uB\n", indent,
786         (void *)block, (void *)block->root, block->nalloc);
787     indent = "     ";
788 
789     fprintf(fp, "%sUSAGE:\n", indent);
790     fprintf(fp, "%s", indent);
791     if (BLOCK_IS_EMPTY(block)) {
792         fprintf(fp, "EMPTY\n");
793         return;
794     }
795 
796     printf("[");
797 
798     if (block->cursor == block->wrap) {
799         if (block->start) {
800             fprintf(fp, "ooo{S:%u}xxx", block->start);
801         } else {
802             fprintf(fp, "{S:0}xxxxxx");
803         }
804 
805         if (block->nalloc > block->cursor) {
806             fprintf(fp, "{CW:%u}ooo{A:%u}", block->cursor, block->nalloc);
807         } else {
808             fprintf(fp, "xxx{CWA:%u)}", block->cursor);
809         }
810     } else {
811         fprintf(fp, "xxx{C:%u}ooo{S:%u}xxx", block->cursor, block->start);
812         if (block->wrap != block->nalloc) {
813             fprintf(fp, "{W:%u}ooo{A:%u}", block->wrap, block->nalloc);
814         } else {
815             fprintf(fp, "xxx{WA:%u}", block->wrap);
816         }
817     }
818     fprintf(fp, "]\n");
819 }
820 
821 static void
dump_sendq(nb_SENDQ * q,FILE * fp)822 dump_sendq(nb_SENDQ *q, FILE *fp)
823 {
824     const char *indent = "  ";
825     sllist_node *ll;
826     fprintf(fp, "Send Queue\n");
827     SLLIST_FOREACH(&q->pending, ll) {
828         nb_SNDQELEM *e = SLLIST_ITEM(ll, nb_SNDQELEM, slnode);
829         fprintf(fp, "%s[Base=%p, Len=%u]\n", indent, (void *)e->base, e->len);
830         if (q->last_requested == e) {
831             fprintf(fp, "%s<Current Flush Limit @%u^^^>\n", indent, q->last_offset);
832         }
833     }
834 }
835 
836 void
netbuf_dump_status(nb_MGR * mgr,FILE * fp)837 netbuf_dump_status(nb_MGR *mgr, FILE *fp)
838 {
839     sllist_node *ll;
840     fprintf(fp, "Status for MGR=%p\n", (void *)mgr);
841     fprintf(fp, "ACTIVE:\n");
842 
843     SLLIST_FOREACH(&mgr->datapool.active, ll) {
844         nb_MBLOCK *block = SLLIST_ITEM(ll, nb_MBLOCK, slnode);
845         dump_managed_block(block, fp);
846     }
847     fprintf(fp, "AVAILABLE:\n");
848     SLLIST_FOREACH(&mgr->datapool.avail, ll) {
849         nb_MBLOCK *block = SLLIST_ITEM(ll, nb_MBLOCK, slnode);
850         const char *indent = "    ";
851         fprintf(fp, "%sBLOCK(AVAIL)=%p; BUF=%p, %uB\n", indent,
852             (void*)block, (void *)block->root, block->nalloc);
853     }
854     dump_sendq(&mgr->sendq, fp);
855 }
856 
857 static int
is_pool_clean(const nb_MBPOOL * pool,int is_dealloc)858 is_pool_clean(const nb_MBPOOL *pool, int is_dealloc)
859 {
860     int ret = 1;
861     sllist_node *ll;
862 
863     SLLIST_FOREACH(&pool->active, ll) {
864         nb_MBLOCK *block = SLLIST_ITEM(ll, nb_MBLOCK, slnode);
865 
866         if (!BLOCK_IS_EMPTY(block)) {
867             printf("MBLOCK %p: Cursor (%u) != Start (%u)\n",
868                    (void*)block, block->cursor, block->start);
869             ret = 0;
870         }
871 
872         if (block->deallocs) {
873             nb_DEALLOC_QUEUE *dq = block->deallocs;
874             if (!SLLIST_IS_EMPTY(&dq->pending)) {
875                 printf("MBLOCK %p: Dealloc queue still has items\n", (void*)block);
876                 ret = 0;
877             }
878 
879             if (!is_dealloc) {
880                 if (!is_pool_clean(&block->deallocs->qpool, 1)) {
881                     ret = 0;
882                 }
883             }
884         }
885     }
886     return ret;
887 }
888 
889 int
netbuf_is_clean(nb_MGR * mgr)890 netbuf_is_clean(nb_MGR *mgr)
891 {
892     int ret = 1;
893 
894     if (!is_pool_clean(&mgr->datapool, 0)) {
895         ret = 0;
896     }
897 
898     if (!SLLIST_IS_EMPTY(&mgr->sendq.pending)) {
899         printf("SENDQ @%p: Still have pending flush items\n", (void*)mgr);
900         ret = 0;
901     }
902 
903     if (!SLLIST_IS_EMPTY(&mgr->sendq.pdus)) {
904         printf("PDUQ @%p: Still have pending PDU items\n", (void*)mgr);
905         ret = 0;
906     }
907 
908     if (!is_pool_clean(&mgr->sendq.elempool, 0)) {
909         printf("SENDQ/MBLOCK @%p: Still have unfreed members in send queue\n",
910                (void*)mgr);
911         ret = 0;
912     }
913 
914     return ret;
915 }
916 
917 int
netbuf_has_flushdata(nb_MGR * mgr)918 netbuf_has_flushdata(nb_MGR *mgr)
919 {
920     if (!SLLIST_IS_EMPTY(&mgr->sendq.pending)) {
921         return 1;
922     }
923     if (!SLLIST_IS_EMPTY(&mgr->sendq.pdus)) {
924         return 1;
925     }
926     return 0;
927 }
928