1 /*
2  * include/common/buf.h
3  * Simple buffer handling.
4  *
5  * Copyright (C) 2000-2018 Willy Tarreau - w@1wt.eu
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining
8  * a copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sublicense, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice shall be
16  * included in all copies or substantial portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
20  * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
21  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
22  * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
23  * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25  * OTHER DEALINGS IN THE SOFTWARE.
26  */
27 
28 #ifndef _COMMON_BUF_H
29 #define _COMMON_BUF_H
30 
31 #include <inttypes.h>
32 #include <string.h>
33 #include <unistd.h>
34 
35 #include <common/debug.h>
36 
37 /* Structure defining a buffer's head */
38 struct buffer {
39 	size_t size;                /* buffer size in bytes */
40 	char  *area;                /* points to <size> bytes */
41 	size_t data;                /* amount of data after head including wrapping */
42 	size_t head;                /* start offset of remaining data relative to area */
43 };
44 
45 /* A buffer may be in 3 different states :
46  *   - unallocated : size == 0, area == 0  (b_is_null() is true)
47  *   - waiting     : size == 0, area != 0  (b_is_null() is true)
48  *   - allocated   : size  > 0, area  > 0  (b_is_null() is false)
49  */
50 
51 /* initializers for certain buffer states. It is important that the NULL buffer
52  * remains the one with all fields initialized to zero so that a calloc() or a
53  * memset() on a struct automatically sets a NULL buffer.
54  */
55 #define BUF_NULL   ((struct buffer){ })
56 #define BUF_WANTED ((struct buffer){ .area = (char *)1 })
57 #define BUF_RING   ((struct buffer){ .area = (char *)2 })
58 
59 
60 /***************************************************************************/
61 /* Functions used to compute offsets and pointers. Most of them exist in   */
62 /* both wrapping-safe and unchecked ("__" prefix) variants. Some returning */
63 /* a pointer are also provided with an "_ofs" suffix when they return an   */
64 /* offset relative to the storage area.                                    */
65 /***************************************************************************/
66 
67 /* b_is_null() : returns true if (and only if) the buffer is not yet allocated
68  * and thus has an empty size. Its pointer may then be anything, including NULL
69  * (unallocated) or an invalid pointer such as (char*)1 (allocation pending).
70  */
b_is_null(const struct buffer * buf)71 static inline int b_is_null(const struct buffer *buf)
72 {
73 	return buf->size == 0;
74 }
75 
76 /* b_orig() : returns the pointer to the origin of the storage, which is the
77  * location of byte at offset zero. This is mostly used by functions which
78  * handle the wrapping by themselves.
79  */
b_orig(const struct buffer * b)80 static inline char *b_orig(const struct buffer *b)
81 {
82 	return b->area;
83 }
84 
85 /* b_size() : returns the size of the buffer. */
b_size(const struct buffer * b)86 static inline size_t b_size(const struct buffer *b)
87 {
88 	return b->size;
89 }
90 
91 /* b_wrap() : returns the pointer to the wrapping position of the buffer area,
92  * which is by definition the first byte not part of the buffer.
93  */
b_wrap(const struct buffer * b)94 static inline char *b_wrap(const struct buffer *b)
95 {
96 	return b->area + b->size;
97 }
98 
99 /* b_data() : returns the number of bytes present in the buffer. */
b_data(const struct buffer * b)100 static inline size_t b_data(const struct buffer *b)
101 {
102 	return b->data;
103 }
104 
105 /* b_room() : returns the amount of room left in the buffer */
b_room(const struct buffer * b)106 static inline size_t b_room(const struct buffer *b)
107 {
108 	return b->size - b_data(b);
109 }
110 
111 /* b_full() : returns true if the buffer is full. */
b_full(const struct buffer * b)112 static inline size_t b_full(const struct buffer *b)
113 {
114 	return !b_room(b);
115 }
116 
117 
118 /* b_stop() : returns the pointer to the byte following the end of the buffer,
119  * which may be out of the buffer if the buffer ends on the last byte of the
120  * area.
121  */
__b_stop_ofs(const struct buffer * b)122 static inline size_t __b_stop_ofs(const struct buffer *b)
123 {
124 	return b->head + b->data;
125 }
126 
__b_stop(const struct buffer * b)127 static inline const char *__b_stop(const struct buffer *b)
128 {
129 	return b_orig(b) + __b_stop_ofs(b);
130 }
131 
b_stop_ofs(const struct buffer * b)132 static inline size_t b_stop_ofs(const struct buffer *b)
133 {
134 	size_t stop = __b_stop_ofs(b);
135 
136 	if (stop > b->size)
137 		stop -= b->size;
138 	return stop;
139 }
140 
b_stop(const struct buffer * b)141 static inline const char *b_stop(const struct buffer *b)
142 {
143 	return b_orig(b) + b_stop_ofs(b);
144 }
145 
146 
147 /* b_peek() : returns a pointer to the data at position <ofs> relative to the
148  * head of the buffer. Will typically point to input data if called with the
149  * amount of output data. The wrapped versions will only support wrapping once
150  * before the beginning or after the end.
151  */
__b_peek_ofs(const struct buffer * b,size_t ofs)152 static inline size_t __b_peek_ofs(const struct buffer *b, size_t ofs)
153 {
154 	return b->head + ofs;
155 }
156 
__b_peek(const struct buffer * b,size_t ofs)157 static inline char *__b_peek(const struct buffer *b, size_t ofs)
158 {
159 	return b_orig(b) + __b_peek_ofs(b, ofs);
160 }
161 
b_peek_ofs(const struct buffer * b,size_t ofs)162 static inline size_t b_peek_ofs(const struct buffer *b, size_t ofs)
163 {
164 	size_t ret = __b_peek_ofs(b, ofs);
165 
166 	if (ret >= b->size)
167 		ret -= b->size;
168 
169 	return ret;
170 }
171 
b_peek(const struct buffer * b,size_t ofs)172 static inline char *b_peek(const struct buffer *b, size_t ofs)
173 {
174 	return b_orig(b) + b_peek_ofs(b, ofs);
175 }
176 
177 
178 /* b_head() : returns the pointer to the buffer's head, which is the location
179  * of the next byte to be dequeued. Note that for buffers of size zero, the
180  * returned pointer may be outside of the buffer or even invalid.
181  */
__b_head_ofs(const struct buffer * b)182 static inline size_t __b_head_ofs(const struct buffer *b)
183 {
184 	return b->head;
185 }
186 
__b_head(const struct buffer * b)187 static inline char *__b_head(const struct buffer *b)
188 {
189 	return b_orig(b) + __b_head_ofs(b);
190 }
191 
b_head_ofs(const struct buffer * b)192 static inline size_t b_head_ofs(const struct buffer *b)
193 {
194 	return __b_head_ofs(b);
195 }
196 
b_head(const struct buffer * b)197 static inline char *b_head(const struct buffer *b)
198 {
199 	return __b_head(b);
200 }
201 
202 
203 /* b_tail() : returns the pointer to the tail of the buffer, which is the
204  * location of the first byte where it is possible to enqueue new data. Note
205  * that for buffers of size zero, the returned pointer may be outside of the
206  * buffer or even invalid.
207  */
__b_tail_ofs(const struct buffer * b)208 static inline size_t __b_tail_ofs(const struct buffer *b)
209 {
210 	return __b_peek_ofs(b, b_data(b));
211 }
212 
__b_tail(const struct buffer * b)213 static inline char *__b_tail(const struct buffer *b)
214 {
215 	return __b_peek(b, b_data(b));
216 }
217 
b_tail_ofs(const struct buffer * b)218 static inline size_t b_tail_ofs(const struct buffer *b)
219 {
220 	return b_peek_ofs(b, b_data(b));
221 }
222 
b_tail(const struct buffer * b)223 static inline char *b_tail(const struct buffer *b)
224 {
225 	return b_peek(b, b_data(b));
226 }
227 
228 
229 /* b_next() : for an absolute pointer <p> or a relative offset <o> pointing to
230  * a valid location within buffer <b>, returns either the absolute pointer or
231  * the relative offset pointing to the next byte, which usually is at (p + 1)
232  * unless p reaches the wrapping point and wrapping is needed.
233  */
b_next_ofs(const struct buffer * b,size_t o)234 static inline size_t b_next_ofs(const struct buffer *b, size_t o)
235 {
236 	o++;
237 	if (o == b->size)
238 		o = 0;
239 	return o;
240 }
241 
b_next(const struct buffer * b,const char * p)242 static inline char *b_next(const struct buffer *b, const char *p)
243 {
244 	p++;
245 	if (p == b_wrap(b))
246 		p = b_orig(b);
247 	return (char *)p;
248 }
249 
250 /* b_dist() : returns the distance between two pointers, taking into account
251  * the ability to wrap around the buffer's end. The operation is not defined if
252  * either of the pointers does not belong to the buffer or if their distance is
253  * greater than the buffer's size.
254  */
b_dist(const struct buffer * b,const char * from,const char * to)255 static inline size_t b_dist(const struct buffer *b, const char *from, const char *to)
256 {
257 	ssize_t dist = to - from;
258 
259 	dist += dist < 0 ? b_size(b) : 0;
260 	return dist;
261 }
262 
263 /* b_almost_full() : returns 1 if the buffer uses at least 3/4 of its capacity,
264  * otherwise zero. Buffers of size zero are considered full.
265  */
b_almost_full(const struct buffer * b)266 static inline int b_almost_full(const struct buffer *b)
267 {
268 	return b_data(b) >= b_size(b) * 3 / 4;
269 }
270 
271 /* b_space_wraps() : returns non-zero only if the buffer's free space wraps :
272  *  [     |xxxx|           ]    => yes
273  *  [xxxx|                 ]    => no
274  *  [                 |xxxx]    => no
275  *  [xxxx|            |xxxx]    => no
276  *  [xxxxxxxxxx|xxxxxxxxxxx]    => no
277  *
278  *  So the only case where the buffer does not wrap is when there's data either
279  *  at the beginning or at the end of the buffer. Thus we have this :
280  *  - if (head <= 0)    ==> doesn't wrap
281  *  - if (tail >= size) ==> doesn't wrap
282  *  - otherwise wraps
283  */
b_space_wraps(const struct buffer * b)284 static inline int b_space_wraps(const struct buffer *b)
285 {
286 	if ((ssize_t)__b_head_ofs(b) <= 0)
287 		return 0;
288 	if (__b_tail_ofs(b) >= b_size(b))
289 		return 0;
290 	return 1;
291 }
292 
293 /* b_contig_data() : returns the amount of data that can contiguously be read
294  * at once starting from a relative offset <start> (which allows to easily
295  * pre-compute blocks for memcpy). The start point will typically contain the
296  * amount of past data already returned by a previous call to this function.
297  */
b_contig_data(const struct buffer * b,size_t start)298 static inline size_t b_contig_data(const struct buffer *b, size_t start)
299 {
300 	size_t data = b_wrap(b) - b_peek(b, start);
301 	size_t limit = b_data(b) - start;
302 
303 	if (data > limit)
304 		data = limit;
305 	return data;
306 }
307 
308 /* b_contig_space() : returns the amount of bytes that can be appended to the
309  * buffer at once. We have 8 possible cases :
310  *
311  * [____________________]  return size
312  * [______|_____________]  return size - tail_ofs
313  * [XXXXXX|_____________]  return size - tail_ofs
314  * [___|XXXXXX|_________]  return size - tail_ofs
315  * [______________XXXXXX]  return head_ofs
316  * [XXXX|___________|XXX]  return head_ofs - tail_ofs
317  * [XXXXXXXXXX|XXXXXXXXX]  return 0
318  * [XXXXXXXXXXXXXXXXXXXX]  return 0
319  */
b_contig_space(const struct buffer * b)320 static inline size_t b_contig_space(const struct buffer *b)
321 {
322 	size_t left, right;
323 
324 	right = b_head_ofs(b);
325 	left  = right + b_data(b);
326 
327 	left = b_size(b) - left;
328 	if ((ssize_t)left <= 0)
329 		left += right;
330 	return left;
331 }
332 
333 /* b_getblk() : gets one full block of data at once from a buffer, starting
334  * from offset <offset> after the buffer's head, and limited to no more than
335  * <len> bytes. The caller is responsible for ensuring that neither <offset>
336  * nor <offset>+<len> exceed the total number of bytes available in the buffer.
337  * Return values :
338  *   >0 : number of bytes read, equal to requested size.
339  *   =0 : not enough data available. <blk> is left undefined.
340  * The buffer is left unaffected.
341  */
b_getblk(const struct buffer * buf,char * blk,size_t len,size_t offset)342 static inline size_t b_getblk(const struct buffer *buf, char *blk, size_t len, size_t offset)
343 {
344 	size_t firstblock;
345 
346 	if (len + offset > b_data(buf))
347 		return 0;
348 
349 	firstblock = b_wrap(buf) - b_head(buf);
350 	if (firstblock > offset) {
351 		if (firstblock >= len + offset) {
352 			memcpy(blk, b_head(buf) + offset, len);
353 			return len;
354 		}
355 
356 		memcpy(blk, b_head(buf) + offset, firstblock - offset);
357 		memcpy(blk + firstblock - offset, b_orig(buf), len - firstblock + offset);
358 		return len;
359 	}
360 
361 	memcpy(blk, b_orig(buf) + offset - firstblock, len);
362 	return len;
363 }
364 
365 /* b_getblk_nc() : gets one or two blocks of data at once from a buffer,
366  * starting from offset <ofs> after the beginning of its output, and limited to
367  * no more than <max> bytes. The caller is responsible for ensuring that
368  * neither <ofs> nor <ofs>+<max> exceed the total number of bytes available in
369  * the buffer. Return values :
370  *   >0 : number of blocks filled (1 or 2). blk1 is always filled before blk2.
371  *   =0 : not enough data available. <blk*> are left undefined.
372  * The buffer is left unaffected. Unused buffers are left in an undefined state.
373  */
b_getblk_nc(const struct buffer * buf,const char ** blk1,size_t * len1,const char ** blk2,size_t * len2,size_t ofs,size_t max)374 static inline size_t b_getblk_nc(const struct buffer *buf, const char **blk1, size_t *len1, const char **blk2, size_t *len2, size_t ofs, size_t max)
375 {
376 	size_t l1;
377 
378 	if (!max)
379 		return 0;
380 
381 	*blk1 = b_peek(buf, ofs);
382 	l1 = b_wrap(buf) - *blk1;
383 	if (l1 < max) {
384 		*len1 = l1;
385 		*len2 = max - l1;
386 		*blk2 = b_orig(buf);
387 		return 2;
388 	}
389 	*len1 = max;
390 	return 1;
391 }
392 
393 
394 /*********************************************/
395 /* Functions used to modify the buffer state */
396 /*********************************************/
397 
398 /* b_reset() : resets a buffer. The size is not touched. */
b_reset(struct buffer * b)399 static inline void b_reset(struct buffer *b)
400 {
401 	b->head = 0;
402 	b->data = 0;
403 }
404 
405 /* b_make() : make a buffer from all parameters */
b_make(char * area,size_t size,size_t head,size_t data)406 static inline struct buffer b_make(char *area, size_t size, size_t head, size_t data)
407 {
408 	struct buffer b;
409 
410 	b.area = area;
411 	b.size = size;
412 	b.head = head;
413 	b.data = data;
414 	return b;
415 }
416 
417 /* b_sub() : decreases the buffer length by <count> */
b_sub(struct buffer * b,size_t count)418 static inline void b_sub(struct buffer *b, size_t count)
419 {
420 	b->data -= count;
421 }
422 
423 /* b_add() : increase the buffer length by <count> */
b_add(struct buffer * b,size_t count)424 static inline void b_add(struct buffer *b, size_t count)
425 {
426 	b->data += count;
427 }
428 
429 /* b_set_data() : sets the buffer's length */
b_set_data(struct buffer * b,size_t len)430 static inline void b_set_data(struct buffer *b, size_t len)
431 {
432 	b->data = len;
433 }
434 
435 /* b_del() : skips <del> bytes in a buffer <b>. Covers both the output and the
436  * input parts so it's up to the caller to know where it plays and that <del>
437  * is always smaller than the amount of data in the buffer.
438  */
b_del(struct buffer * b,size_t del)439 static inline void b_del(struct buffer *b, size_t del)
440 {
441 	b->data -= del;
442 	b->head += del;
443 	if (b->head >= b->size)
444 		b->head -= b->size;
445 }
446 
447 /* b_realign_if_empty() : realigns a buffer if it's empty */
b_realign_if_empty(struct buffer * b)448 static inline void b_realign_if_empty(struct buffer *b)
449 {
450 	if (!b_data(b))
451 		b->head = 0;
452 }
453 
454 /* b_slow_realign() : this function realigns a possibly wrapping buffer so that
455  * the part remaining to be parsed is contiguous and starts at the beginning of
456  * the buffer and the already parsed output part ends at the end of the buffer.
457  * This provides the best conditions since it allows the largest inputs to be
458  * processed at once and ensures that once the output data leaves, the whole
459  * buffer is available at once. The number of output bytes supposedly present
460  * at the beginning of the buffer and which need to be moved to the end must be
461  * passed in <output>. A temporary swap area at least as large as b->size must
462  * be provided in <swap>. It's up to the caller to ensure <output> is no larger
463  * than the difference between the whole buffer's length and its input.
464  */
b_slow_realign(struct buffer * b,char * swap,size_t output)465 static inline void b_slow_realign(struct buffer *b, char *swap, size_t output)
466 {
467 	size_t block1 = output;
468 	size_t block2 = 0;
469 
470 	/* process output data in two steps to cover wrapping */
471 	if (block1 > b_size(b) - b_head_ofs(b)) {
472 		block2 = b_size(b) - b_head_ofs(b);
473 		block1 -= block2;
474 	}
475 	memcpy(swap + b_size(b) - output, b_head(b), block1);
476 	memcpy(swap + b_size(b) - block2, b_orig(b), block2);
477 
478 	/* process input data in two steps to cover wrapping */
479 	block1 = b_data(b) - output;
480 	block2 = 0;
481 
482 	if (block1 > b_tail_ofs(b)) {
483 		block2 = b_tail_ofs(b);
484 		block1 = block1 - block2;
485 	}
486 	memcpy(swap, b_peek(b, output), block1);
487 	memcpy(swap + block1, b_orig(b), block2);
488 
489 	/* reinject changes into the buffer */
490 	memcpy(b_orig(b), swap, b_data(b) - output);
491 	memcpy(b_wrap(b) - output, swap + b_size(b) - output, output);
492 
493 	b->head = (output ? b_size(b) - output : 0);
494 }
495 
496 /* b_putchar() : tries to append char <c> at the end of buffer <b>. Supports
497  * wrapping. Data are truncated if buffer is full.
498  */
b_putchr(struct buffer * b,char c)499 static inline void b_putchr(struct buffer *b, char c)
500 {
501 	if (b_full(b))
502 		return;
503 	*b_tail(b) = c;
504 	b->data++;
505 }
506 
507 /* __b_putblk() : tries to append <len> bytes from block <blk> to the end of
508  * buffer <b> without checking for free space (it's up to the caller to do it).
509  * Supports wrapping. It must not be called with len == 0.
510  */
__b_putblk(struct buffer * b,const char * blk,size_t len)511 static inline void __b_putblk(struct buffer *b, const char *blk, size_t len)
512 {
513 	size_t half = b_contig_space(b);
514 
515 	if (half > len)
516 		half = len;
517 
518 	memcpy(b_tail(b), blk, half);
519 
520 	if (len > half)
521 		memcpy(b_peek(b, b_data(b) + half), blk + half, len - half);
522 	b->data += len;
523 }
524 
525 /* b_putblk() : tries to append block <blk> at the end of buffer <b>. Supports
526  * wrapping. Data are truncated if buffer is too short. It returns the number
527  * of bytes copied.
528  */
b_putblk(struct buffer * b,const char * blk,size_t len)529 static inline size_t b_putblk(struct buffer *b, const char *blk, size_t len)
530 {
531 	if (len > b_room(b))
532 		len = b_room(b);
533 	if (len)
534 		__b_putblk(b, blk, len);
535 	return len;
536 }
537 
538 /* b_xfer() : transfers at most <count> bytes from buffer <src> to buffer <dst>
539  * and returns the number of bytes copied. The bytes are removed from <src> and
540  * added to <dst>. The caller is responsible for ensuring that <count> is not
541  * larger than b_room(dst). Whenever possible (if the destination is empty and
542  * at least as much as the source was requested), the buffers are simply
543  * swapped instead of copied.
544  */
b_xfer(struct buffer * dst,struct buffer * src,size_t count)545 static inline size_t b_xfer(struct buffer *dst, struct buffer *src, size_t count)
546 {
547 	size_t ret, block1, block2;
548 
549 	ret = 0;
550 	if (!count)
551 		goto leave;
552 
553 	ret = b_data(src);
554 	if (!ret)
555 		goto leave;
556 
557 	if (ret > count)
558 		ret = count;
559 	else if (!b_data(dst)) {
560 		/* zero copy is possible by just swapping buffers */
561 		struct buffer tmp = *dst;
562 		*dst = *src;
563 		*src = tmp;
564 		goto leave;
565 	}
566 
567 	block1 = b_contig_data(src, 0);
568 	if (block1 > ret)
569 		block1 = ret;
570 	block2 = ret - block1;
571 
572 	if (block1)
573 		__b_putblk(dst, b_head(src), block1);
574 
575 	if (block2)
576 		__b_putblk(dst, b_peek(src, block1), block2);
577 
578 	b_del(src, ret);
579  leave:
580 	return ret;
581 }
582 
583 /* Moves <len> bytes from absolute position <src> of buffer <b> by <shift>
584  * bytes, while supporting wrapping of both the source and the destination.
585  * The position is relative to the buffer's origin and may overlap with the
586  * target position. The <shift>'s absolute value must be strictly lower than
587  * the buffer's size. The main purpose is to aggregate data block during
588  * parsing while removing unused delimiters. The buffer's length is not
589  * modified, and the caller must take care of size adjustments and holes by
590  * itself.
591  */
b_move(const struct buffer * b,size_t src,size_t len,ssize_t shift)592 static inline void b_move(const struct buffer *b, size_t src, size_t len, ssize_t shift)
593 {
594 	char  *orig = b_orig(b);
595 	size_t size = b_size(b);
596 	size_t dst  = src + size + shift;
597 	size_t cnt;
598 
599 	if (dst >= size)
600 		dst -= size;
601 
602 	if (shift < 0) {
603 		/* copy from left to right */
604 		for (; (cnt = len); len -= cnt) {
605 			if (cnt > size - src)
606 				cnt = size - src;
607 			if (cnt > size - dst)
608 				cnt = size - dst;
609 
610 			memmove(orig + dst, orig + src, cnt);
611 			dst += cnt;
612 			src += cnt;
613 			if (dst >= size)
614 				dst -= size;
615 			if (src >= size)
616 				src -= size;
617 		}
618 	}
619 	else if (shift > 0) {
620 		/* copy from right to left */
621 		for (; (cnt = len); len -= cnt) {
622 			size_t src_end = src + len;
623 			size_t dst_end = dst + len;
624 
625 			if (dst_end > size)
626 				dst_end -= size;
627 			if (src_end > size)
628 				src_end -= size;
629 
630 			if (cnt > dst_end)
631 				cnt = dst_end;
632 			if (cnt > src_end)
633 				cnt = src_end;
634 
635 			memmove(orig + dst_end - cnt, orig + src_end - cnt, cnt);
636 		}
637 	}
638 }
639 
640 /* b_rep_blk() : writes the block <blk> at position <pos> which must be in
641  * buffer <b>, and moves the part between <end> and the buffer's tail just
642  * after the end of the copy of <blk>. This effectively replaces the part
643  * located between <pos> and <end> with a copy of <blk> of length <len>. The
644  * buffer's length is automatically updated. This is used to replace a block
645  * with another one inside a buffer. The shift value (positive or negative) is
646  * returned. If there's no space left, the move is not done. If <len> is null,
647  * the <blk> pointer is allowed to be null, in order to erase a block.
648  */
b_rep_blk(struct buffer * b,char * pos,char * end,const char * blk,size_t len)649 static inline int b_rep_blk(struct buffer *b, char *pos, char *end, const char *blk, size_t len)
650 {
651 	int delta;
652 
653 	delta = len - (end - pos);
654 
655 	if (__b_tail(b) + delta > b_wrap(b))
656 		return 0;  /* no space left */
657 
658 	if (b_data(b) &&
659 	    b_tail(b) + delta > b_head(b) &&
660 	    b_head(b) >= b_tail(b))
661 		return 0;  /* no space left before wrapping data */
662 
663 	/* first, protect the end of the buffer */
664 	memmove(end + delta, end, b_tail(b) - end);
665 
666 	/* now, copy blk over pos */
667 	if (len)
668 		memcpy(pos, blk, len);
669 
670 	b_add(b, delta);
671 	b_realign_if_empty(b);
672 
673 	return delta;
674 }
675 
676 
677 /* __b_put_varint(): encode 64-bit value <v> as a varint into buffer <b>. The
678  * caller must have checked that the encoded value fits in the buffer so that
679  * there are no length checks. Wrapping is supported. You don't want to use
680  * this function but b_put_varint() instead.
681  */
__b_put_varint(struct buffer * b,uint64_t v)682 static inline void __b_put_varint(struct buffer *b, uint64_t v)
683 {
684 	size_t data = b->data;
685 	size_t size = b_size(b);
686 	char  *wrap = b_wrap(b);
687 	char  *tail = b_tail(b);
688 
689 	if (v >= 0xF0) {
690 		/* more than one byte, first write the 4 least significant
691 		 * bits, then follow with 7 bits per byte.
692 		 */
693 		*tail = v | 0xF0;
694 		v = (v - 0xF0) >> 4;
695 
696 		while (1) {
697 			if (tail++ == wrap)
698 				tail -= size;
699 			data++;
700 			if (v < 0x80)
701 				break;
702 			*tail = v | 0x80;
703 			v = (v - 0x80) >> 7;
704 		}
705 	}
706 
707 	/* last byte */
708 	*tail = v;
709 	data++;
710 	b->data = data;
711 }
712 
713 /* b_put_varint(): try to encode value <v> as a varint into buffer <b>. Returns
714  * the number of bytes written in case of success, or 0 if there is not enough
715  * room. Wrapping is supported. No partial writes will be performed.
716  */
b_put_varint(struct buffer * b,uint64_t v)717 static inline int b_put_varint(struct buffer *b, uint64_t v)
718 {
719 	size_t data = b->data;
720 	size_t size = b_size(b);
721 	char  *wrap = b_wrap(b);
722 	char  *tail = b_tail(b);
723 
724 	if (data != size && v >= 0xF0) {
725 		/* more than one byte, first write the 4 least significant
726 		 * bits, then follow with 7 bits per byte.
727 		 */
728 		*tail = v | 0xF0;
729 		v = (v - 0xF0) >> 4;
730 
731 		while (1) {
732 			if (tail++ == wrap)
733 				tail -= size;
734 			data++;
735 			if (data == size || v < 0x80)
736 				break;
737 			*tail = v | 0x80;
738 			v = (v - 0x80) >> 7;
739 		}
740 	}
741 
742 	/* last byte */
743 	if (data == size)
744 		return 0;
745 
746 	*tail = v;
747 	data++;
748 
749 	size = data - b->data;
750 	b->data = data;
751 	return size;
752 }
753 
754 /* b_get_varint(): try to decode a varint from buffer <b> into value <vptr>.
755  * Returns the number of bytes read in case of success, or 0 if there were not
756  * enough bytes. Wrapping is supported. No partial reads will be performed.
757  */
b_get_varint(struct buffer * b,uint64_t * vptr)758 static inline int b_get_varint(struct buffer *b, uint64_t *vptr)
759 {
760 	const uint8_t *head = (const uint8_t *)b_head(b);
761 	const uint8_t *wrap = (const uint8_t *)b_wrap(b);
762 	size_t data = b->data;
763 	size_t size = b_size(b);
764 	uint64_t v = 0;
765 	int bits = 0;
766 
767 	if (data != 0 && (*head >= 0xF0)) {
768 		v = *head;
769 		bits += 4;
770 		while (1) {
771 			if (head++ == wrap)
772 				head -= size;
773 			data--;
774 			if (!data || !(*head & 0x80))
775 				break;
776 			v += (uint64_t)*head << bits;
777 			bits += 7;
778 		}
779 	}
780 
781 	/* last byte */
782 	if (!data)
783 		return 0;
784 
785 	v += (uint64_t)*head << bits;
786 	*vptr = v;
787 	data--;
788 	size = b->data - data;
789 	b_del(b, size);
790 	return size;
791 }
792 
793 /* b_peek_varint(): try to decode a varint from buffer <b> at offset <ofs>
794  * relative to head, into value <vptr>. Returns the number of bytes parsed in
795  * case of success, or 0 if there were not enough bytes, in which case the
796  * contents of <vptr> are not updated. Wrapping is supported. The buffer's head
797  * will NOT be updated. It is illegal to call this function with <ofs> greater
798  * than b->data.
799  */
b_peek_varint(struct buffer * b,size_t ofs,uint64_t * vptr)800 static inline int b_peek_varint(struct buffer *b, size_t ofs, uint64_t *vptr)
801 {
802 	const uint8_t *head = (const uint8_t *)b_peek(b, ofs);
803 	const uint8_t *wrap = (const uint8_t *)b_wrap(b);
804 	size_t data = b_data(b) - ofs;
805 	size_t size = b_size(b);
806 	uint64_t v = 0;
807 	int bits = 0;
808 
809 	if (data != 0 && (*head >= 0xF0)) {
810 		v = *head;
811 		bits += 4;
812 		while (1) {
813 			if (head++ == wrap)
814 				head -= size;
815 			data--;
816 			if (!data || !(*head & 0x80))
817 				break;
818 			v += (uint64_t)*head << bits;
819 			bits += 7;
820 		}
821 	}
822 
823 	/* last byte */
824 	if (!data)
825 		return 0;
826 
827 	v += (uint64_t)*head << bits;
828 	*vptr = v;
829 	data--;
830 	size = b->data - ofs - data;
831 	return size;
832 }
833 
834 
835 /*
836  * Buffer ring management.
837  *
838  * A buffer ring is a circular list of buffers, with a head buffer (the oldest,
839  * being read from) and a tail (the newest, being written to). Such a ring is
840  * declared as an array of buffers. The first element in the array is the root
841  * and is used differently. It stores the following elements :
842  *  - size : number of allocated elements in the array, including the root
843  *  - area : magic value BUF_RING (just to help debugging)
844  *  - head : position of the head in the array (starts at one)
845  *  - data : position of the tail in the array (starts at one).
846  *
847  * Note that contrary to a linear buffer, head and tail may be equal with room
848  * available, since the producer is expected to fill the tail. Also, the tail
849  * might pretty much be equal to BUF_WANTED if an allocation is pending, in
850  * which case it's illegal to try to allocate past this point (only one entry
851  * may be subscribed for allocation). It is illegal to allocate a buffer after
852  * an empty one, so that BUF_NULL is always the last buffer. It is also illegal
853  * to remove elements without freeing the buffers. Buffers between <tail> and
854  * <head> are in an undefined state, but <tail> and <head> are always valid.
855  * A ring may not contain less than 2 elements, since the root is mandatory,
856  * and at least one entry is required to always present a valid buffer.
857  *
858  * Given that buffers are 16- or 32- bytes long, it's convenient to set the
859  * size of the array to 2^N in order to keep (2^N)-1 elements, totalizing
860  * 2^N*16(or 32) bytes. For example on a 64-bit system, a ring of 31 usable
861  * buffers takes 1024 bytes.
862  */
863 
864 /* Initialization of a ring, the size argument contains the number of allocated
865  * elements, including the root. There must always be at least 2 elements, one
866  * for the root and one for storage.
867  */
br_init(struct buffer * r,size_t size)868 static inline void br_init(struct buffer *r, size_t size)
869 {
870 	BUG_ON(size < 2);
871 
872 	r->size = size;
873 	r->area = BUF_RING.area;
874 	r->head = r->data = 1;
875 	r[1]    = BUF_NULL;
876 }
877 
878 /* Returns number of elements in the ring, root included */
br_size(const struct buffer * r)879 static inline unsigned int br_size(const struct buffer *r)
880 {
881 	BUG_ON(r->area != BUF_RING.area);
882 
883 	return r->size;
884 }
885 
886 /* Returns true if no more buffers may be added */
br_full(const struct buffer * r)887 static inline unsigned int br_full(const struct buffer *r)
888 {
889 	BUG_ON(r->area != BUF_RING.area);
890 
891 	return r->data + 1 == r->head || r->data + 1 == r->head - 1 + r->size;
892 }
893 
894 /* Returns the index of the ring's head buffer */
br_head_idx(const struct buffer * r)895 static inline unsigned int br_head_idx(const struct buffer *r)
896 {
897 	BUG_ON(r->area != BUF_RING.area);
898 
899 	return r->head;
900 }
901 
902 /* Returns the index of the ring's tail buffer */
br_tail_idx(const struct buffer * r)903 static inline unsigned int br_tail_idx(const struct buffer *r)
904 {
905 	BUG_ON(r->area != BUF_RING.area);
906 
907 	return r->data;
908 }
909 
910 /* Returns a pointer to the ring's head buffer */
br_head(struct buffer * r)911 static inline struct buffer *br_head(struct buffer *r)
912 {
913 	BUG_ON(r->area != BUF_RING.area);
914 
915 	return r + br_head_idx(r);
916 }
917 
918 /* Returns a pointer to the ring's tail buffer */
br_tail(struct buffer * r)919 static inline struct buffer *br_tail(struct buffer *r)
920 {
921 	BUG_ON(r->area != BUF_RING.area);
922 
923 	return r + br_tail_idx(r);
924 }
925 
926 /* Returns the amount of data of the ring's HEAD buffer */
br_data(const struct buffer * r)927 static inline unsigned int br_data(const struct buffer *r)
928 {
929 	BUG_ON(r->area != BUF_RING.area);
930 
931 	return b_data(r + br_head_idx(r));
932 }
933 
934 /* Returns non-zero if the ring is non-full or its tail has some room */
br_has_room(const struct buffer * r)935 static inline unsigned int br_has_room(const struct buffer *r)
936 {
937 	BUG_ON(r->area != BUF_RING.area);
938 
939 	if (!br_full(r))
940 		return 1;
941 	return b_room(r + br_tail_idx(r));
942 }
943 
944 /* Advances the ring's tail if it points to a non-empty buffer, and returns the
945  * buffer, or NULL if the ring is full or the tail buffer is already empty. A
946  * new buffer is initialized to BUF_NULL before being returned. This is to be
947  * used after failing to append data, in order to decide to retry or not.
948  */
br_tail_add(struct buffer * r)949 static inline struct buffer *br_tail_add(struct buffer *r)
950 {
951 	struct buffer *b;
952 
953 	BUG_ON(r->area != BUF_RING.area);
954 
955 	b = br_tail(r);
956 	if (!b_size(b))
957 		return NULL;
958 
959 	if (br_full(r))
960 		return NULL;
961 
962 	r->data++;
963 	if (r->data >= r->size)
964 		r->data = 1;
965 
966 	b = br_tail(r);
967 	*b = BUF_NULL;
968 	return b;
969 }
970 
971 /* Extracts the ring's head buffer and returns it. The last buffer (tail) is
972  * never removed but it is returned. This guarantees that we stop on BUF_WANTED
973  * or BUF_EMPTY and that at the end a valid buffer remains present. This is
974  * used for pre-extraction during a free() loop for example. The caller is
975  * expected to detect the end (e.g. using bsize() since b_free() voids the
976  * buffer).
977  */
br_head_pick(struct buffer * r)978 static inline struct buffer *br_head_pick(struct buffer *r)
979 {
980 	struct buffer *b;
981 
982 	BUG_ON(r->area != BUF_RING.area);
983 
984 	b = br_head(r);
985 	if (r->head != r->data) {
986 		r->head++;
987 		if (r->head >= r->size)
988 			r->head = 1;
989 	}
990 	return b;
991 }
992 
993 /* Advances the ring's head and returns the next buffer, unless it's already
994  * the tail, in which case the tail itself is returned. This is used for post-
995  * parsing deletion. The caller is expected to detect the end (e.g. a parser
996  * will typically purge the head before proceeding).
997  */
br_del_head(struct buffer * r)998 static inline struct buffer *br_del_head(struct buffer *r)
999 {
1000 	BUG_ON(r->area != BUF_RING.area);
1001 
1002 	if (r->head != r->data) {
1003 		r->head++;
1004 		if (r->head >= r->size)
1005 			r->head = 1;
1006 	}
1007 	return br_head(r);
1008 }
1009 
1010 #endif /* _COMMON_BUF_H */
1011 
1012 /*
1013  * Local variables:
1014  *  c-indent-level: 8
1015  *  c-basic-offset: 8
1016  * End:
1017  */
1018