1 /*
2  * include/common/buffer.h
3  * Buffer management definitions, macros and inline functions.
4  *
5  * Copyright (C) 2000-2012 Willy Tarreau - w@1wt.eu
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation, version 2.1
10  * exclusively.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
20  */
21 
22 #ifndef _COMMON_BUFFER_H
23 #define _COMMON_BUFFER_H
24 
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28 
29 #include <common/chunk.h>
30 #include <common/config.h>
31 #include <common/memory.h>
32 
33 
34 struct buffer {
35 	char *p;                        /* buffer's start pointer, separates in and out data */
36 	unsigned int size;              /* buffer size in bytes */
37 	unsigned int i;                 /* number of input bytes pending for analysis in the buffer */
38 	unsigned int o;                 /* number of out bytes the sender can consume from this buffer */
39 	char data[0];                   /* <size> bytes */
40 };
41 
42 /* an element of the <buffer_wq> list. It represents an object that need to
43  * acquire a buffer to continue its process. */
44 struct buffer_wait {
45 	void *target;              /* The waiting object that should be woken up */
46 	int (*wakeup_cb)(void *);  /* The function used to wake up the <target>, passed as argument */
47 	struct list list;          /* Next element in the <buffer_wq> list */
48 };
49 
50 extern struct pool_head *pool2_buffer;
51 extern struct buffer buf_empty;
52 extern struct buffer buf_wanted;
53 extern struct list buffer_wq;
54 
55 int init_buffer();
56 int buffer_replace2(struct buffer *b, char *pos, char *end, const char *str, int len);
57 int buffer_insert_line2(struct buffer *b, char *pos, const char *str, int len);
58 void buffer_dump(FILE *o, struct buffer *b, int from, int to);
59 void buffer_slow_realign(struct buffer *buf);
60 void buffer_bounce_realign(struct buffer *buf);
61 
62 /*****************************************************************/
63 /* These functions are used to compute various buffer area sizes */
64 /*****************************************************************/
65 
66 /* Returns an absolute pointer for a position relative to the current buffer's
67  * pointer. It is written so that it is optimal when <ofs> is a const. It is
68  * written as a macro instead of an inline function so that the compiler knows
69  * when it can optimize out the sign test on <ofs> when passed an unsigned int.
70  * Note that callers MUST cast <ofs> to int if they expect negative values.
71  */
72 #define b_ptr(b, ofs) \
73 	({            \
74 		char *__ret = (b)->p + (ofs);                   \
75 		if ((ofs) > 0 && __ret >= (b)->data + (b)->size)    \
76 			__ret -= (b)->size;                     \
77 		else if ((ofs) < 0 && __ret < (b)->data)        \
78 			__ret += (b)->size;                     \
79 		__ret;                                          \
80 	})
81 
82 /* Advances the buffer by <adv> bytes, which means that the buffer
83  * pointer advances, and that as many bytes from in are transferred
84  * to out. The caller is responsible for ensuring that adv is always
85  * smaller than or equal to b->i.
86  */
b_adv(struct buffer * b,unsigned int adv)87 static inline void b_adv(struct buffer *b, unsigned int adv)
88 {
89 	b->i -= adv;
90 	b->o += adv;
91 	b->p = b_ptr(b, adv);
92 }
93 
94 /* Rewinds the buffer by <adv> bytes, which means that the buffer pointer goes
95  * backwards, and that as many bytes from out are moved to in. The caller is
96  * responsible for ensuring that adv is always smaller than or equal to b->o.
97  */
b_rew(struct buffer * b,unsigned int adv)98 static inline void b_rew(struct buffer *b, unsigned int adv)
99 {
100 	b->i += adv;
101 	b->o -= adv;
102 	b->p = b_ptr(b, (int)-adv);
103 }
104 
105 /* Returns the start of the input data in a buffer */
bi_ptr(const struct buffer * b)106 static inline char *bi_ptr(const struct buffer *b)
107 {
108 	return b->p;
109 }
110 
111 /* Returns the end of the input data in a buffer (pointer to next
112  * insertion point).
113  */
bi_end(const struct buffer * b)114 static inline char *bi_end(const struct buffer *b)
115 {
116 	char *ret = b->p + b->i;
117 
118 	if (ret >= b->data + b->size)
119 		ret -= b->size;
120 	return ret;
121 }
122 
123 /* Returns the amount of input data that can contiguously be read at once */
bi_contig_data(const struct buffer * b)124 static inline int bi_contig_data(const struct buffer *b)
125 {
126 	int data = b->data + b->size - b->p;
127 
128 	if (data > b->i)
129 		data = b->i;
130 	return data;
131 }
132 
133 /* Returns the start of the output data in a buffer */
bo_ptr(const struct buffer * b)134 static inline char *bo_ptr(const struct buffer *b)
135 {
136 	char *ret = b->p - b->o;
137 
138 	if (ret < b->data)
139 		ret += b->size;
140 	return ret;
141 }
142 
143 /* Returns the end of the output data in a buffer */
bo_end(const struct buffer * b)144 static inline char *bo_end(const struct buffer *b)
145 {
146 	return b->p;
147 }
148 
149 /* Returns the amount of output data that can contiguously be read at once */
bo_contig_data(const struct buffer * b)150 static inline int bo_contig_data(const struct buffer *b)
151 {
152 	char *beg = b->p - b->o;
153 
154 	if (beg < b->data)
155 		return b->data - beg;
156 	return b->o;
157 }
158 
159 /* Return the amount of bytes that can be written into the input area at once
160  * including reserved space which may be overwritten (this is the caller
161  * responsibility to know if the reserved space is protected or not).
162 */
bi_contig_space(const struct buffer * b)163 static inline int bi_contig_space(const struct buffer *b)
164 {
165 	const char *left, *right;
166 
167 	left  = b->p + b->i;
168 	right = b->p - b->o;
169 	if (left >= b->data + b->size)
170 		left -= b->size;
171 	else {
172 		if (right < b->data)
173 			right += b->size;
174 		else
175 			right = b->data + b->size;
176 	}
177 	return (right - left);
178 }
179 
180 /* Return the amount of bytes that can be written into the output area at once
181  * including reserved space which may be overwritten (this is the caller
182  * responsibility to know if the reserved space is protected or not). Input data
183  * are assumed to not exist.
184 */
bo_contig_space(const struct buffer * b)185 static inline int bo_contig_space(const struct buffer *b)
186 {
187 	const char *left, *right;
188 
189 	left  = b->p;
190 	right = b->p - b->o;
191 	if (right < b->data)
192 		right += b->size;
193 	else
194 		right = b->data + b->size;
195 
196 	return (right - left);
197 }
198 
199 /* Return the buffer's length in bytes by summing the input and the output */
buffer_len(const struct buffer * buf)200 static inline int buffer_len(const struct buffer *buf)
201 {
202 	return buf->i + buf->o;
203 }
204 
205 /* Return non-zero only if the buffer is not empty */
buffer_not_empty(const struct buffer * buf)206 static inline int buffer_not_empty(const struct buffer *buf)
207 {
208 	return buf->i | buf->o;
209 }
210 
211 /* Return non-zero only if the buffer is empty */
buffer_empty(const struct buffer * buf)212 static inline int buffer_empty(const struct buffer *buf)
213 {
214 	return !buffer_not_empty(buf);
215 }
216 
217 /* Returns non-zero if the buffer's INPUT is considered full, which means that
218  * it holds at least as much INPUT data as (size - reserve). This also means
219  * that data that are scheduled for output are considered as potential free
220  * space, and that the reserved space is always considered as not usable. This
221  * information alone cannot be used as a general purpose free space indicator.
222  * However it accurately indicates that too many data were fed in the buffer
223  * for an analyzer for instance. See the channel_may_recv() function for a more
224  * generic function taking everything into account.
225  */
buffer_full(const struct buffer * b,unsigned int reserve)226 static inline int buffer_full(const struct buffer *b, unsigned int reserve)
227 {
228 	if (b == &buf_empty)
229 		return 0;
230 
231 	return (b->i + reserve >= b->size);
232 }
233 
234 /* Normalizes a pointer after a subtract */
buffer_wrap_sub(const struct buffer * buf,char * ptr)235 static inline char *buffer_wrap_sub(const struct buffer *buf, char *ptr)
236 {
237 	if (ptr < buf->data)
238 		ptr += buf->size;
239 	return ptr;
240 }
241 
242 /* Normalizes a pointer after an addition */
buffer_wrap_add(const struct buffer * buf,char * ptr)243 static inline char *buffer_wrap_add(const struct buffer *buf, char *ptr)
244 {
245 	if (ptr - buf->size >= buf->data)
246 		ptr -= buf->size;
247 	return ptr;
248 }
249 
250 /* Return the maximum amount of bytes that can be written into the buffer,
251  * including reserved space which may be overwritten.
252  */
buffer_total_space(const struct buffer * buf)253 static inline int buffer_total_space(const struct buffer *buf)
254 {
255 	return buf->size - buffer_len(buf);
256 }
257 
258 /* Returns the number of contiguous bytes between <start> and <start>+<count>,
259  * and enforces a limit on buf->data + buf->size. <start> must be within the
260  * buffer.
261  */
buffer_contig_area(const struct buffer * buf,const char * start,int count)262 static inline int buffer_contig_area(const struct buffer *buf, const char *start, int count)
263 {
264 	if (count > buf->data - start + buf->size)
265 		count = buf->data - start + buf->size;
266 	return count;
267 }
268 
269 
270 /* Returns the amount of byte that can be written starting from <p> into the
271  * input buffer at once, including reserved space which may be overwritten.
272  * This is used by Lua to insert data in the input side just before the other
273  * data using buffer_replace(). The goal is to transfer these new data in the
274  * output buffer.
275  */
bi_space_for_replace(const struct buffer * buf)276 static inline int bi_space_for_replace(const struct buffer *buf)
277 {
278 	const char *end;
279 
280 	/* If the input side data overflows, we cannot insert data contiguously. */
281 	if (buf->p + buf->i >= buf->data + buf->size)
282 		return 0;
283 
284 	/* Check the last byte used in the buffer, it may be a byte of the output
285 	 * side if the buffer wraps, or its the end of the buffer.
286 	 */
287 	end = buffer_wrap_sub(buf, buf->p - buf->o);
288 	if (end <= buf->p)
289 		end = buf->data + buf->size;
290 
291 	/* Compute the amount of bytes which can be written. */
292 	return end - (buf->p + buf->i);
293 }
294 
295 
296 /* Normalizes a pointer which is supposed to be relative to the beginning of a
297  * buffer, so that wrapping is correctly handled. The intent is to use this
298  * when increasing a pointer. Note that the wrapping test is only performed
299  * once, so the original pointer must be between ->data-size and ->data+2*size-1,
300  * otherwise an invalid pointer might be returned.
301  */
buffer_pointer(const struct buffer * buf,const char * ptr)302 static inline const char *buffer_pointer(const struct buffer *buf, const char *ptr)
303 {
304 	if (ptr < buf->data)
305 		ptr += buf->size;
306 	else if (ptr - buf->size >= buf->data)
307 		ptr -= buf->size;
308 	return ptr;
309 }
310 
311 /* Returns the distance between two pointers, taking into account the ability
312  * to wrap around the buffer's end.
313  */
buffer_count(const struct buffer * buf,const char * from,const char * to)314 static inline int buffer_count(const struct buffer *buf, const char *from, const char *to)
315 {
316 	int count = to - from;
317 
318 	count += count < 0 ? buf->size : 0;
319 	return count;
320 }
321 
322 /* returns the amount of pending bytes in the buffer. It is the amount of bytes
323  * that is not scheduled to be sent.
324  */
buffer_pending(const struct buffer * buf)325 static inline int buffer_pending(const struct buffer *buf)
326 {
327 	return buf->i;
328 }
329 
330 /* Returns the size of the working area which the caller knows ends at <end>.
331  * If <end> equals buf->r (modulo size), then it means that the free area which
332  * follows is part of the working area. Otherwise, the working area stops at
333  * <end>. It always starts at buf->p. The work area includes the
334  * reserved area.
335  */
buffer_work_area(const struct buffer * buf,const char * end)336 static inline int buffer_work_area(const struct buffer *buf, const char *end)
337 {
338 	end = buffer_pointer(buf, end);
339 	if (end == buffer_wrap_add(buf, buf->p + buf->i))
340 		/* pointer exactly at end, lets push forwards */
341 		end = buffer_wrap_sub(buf, buf->p - buf->o);
342 	return buffer_count(buf, buf->p, end);
343 }
344 
345 /* Return 1 if the buffer has less than 1/4 of its capacity free, otherwise 0 */
buffer_almost_full(const struct buffer * buf)346 static inline int buffer_almost_full(const struct buffer *buf)
347 {
348 	if (buf == &buf_empty)
349 		return 0;
350 
351 	if (!buf->size || buffer_total_space(buf) < buf->size / 4)
352 		return 1;
353 	return 0;
354 }
355 
356 /* Cut the first <n> pending bytes in a contiguous buffer. It is illegal to
357  * call this function with remaining data waiting to be sent (o > 0). The
358  * caller must ensure that <n> is smaller than the actual buffer's length.
359  * This is mainly used to remove empty lines at the beginning of a request
360  * or a response.
361  */
bi_fast_delete(struct buffer * buf,int n)362 static inline void bi_fast_delete(struct buffer *buf, int n)
363 {
364 	buf->i -= n;
365 	buf->p += n;
366 }
367 
368 /* Tries to realign the given buffer. */
buffer_realign(struct buffer * buf)369 static inline void buffer_realign(struct buffer *buf)
370 {
371 	if (!(buf->i | buf->o)) {
372 		/* let's realign the buffer to optimize I/O */
373 		buf->p = buf->data;
374 	}
375 }
376 
377 /* Schedule all remaining buffer data to be sent. ->o is not touched if it
378  * already covers those data. That permits doing a flush even after a forward,
379  * although not recommended.
380  */
buffer_flush(struct buffer * buf)381 static inline void buffer_flush(struct buffer *buf)
382 {
383 	buf->p = buffer_wrap_add(buf, buf->p + buf->i);
384 	buf->o += buf->i;
385 	buf->i = 0;
386 }
387 
388 /* This function writes the string <str> at position <pos> which must be in
389  * buffer <b>, and moves <end> just after the end of <str>. <b>'s parameters
390  * (l, r, lr) are updated to be valid after the shift. the shift value
391  * (positive or negative) is returned. If there's no space left, the move is
392  * not done. The function does not adjust ->o because it does not make sense
393  * to use it on data scheduled to be sent.
394  */
buffer_replace(struct buffer * b,char * pos,char * end,const char * str)395 static inline int buffer_replace(struct buffer *b, char *pos, char *end, const char *str)
396 {
397 	return buffer_replace2(b, pos, end, str, strlen(str));
398 }
399 
400 /* Tries to write char <c> into output data at buffer <b>. Supports wrapping.
401  * Data are truncated if buffer is full.
402  */
bo_putchr(struct buffer * b,char c)403 static inline void bo_putchr(struct buffer *b, char c)
404 {
405 	if (buffer_len(b) == b->size)
406 		return;
407 	*b->p = c;
408 	b->p = b_ptr(b, 1);
409 	b->o++;
410 }
411 
412 /* Tries to copy block <blk> into output data at buffer <b>. Supports wrapping.
413  * Data are truncated if buffer is too short. It returns the number of bytes
414  * copied.
415  */
bo_putblk(struct buffer * b,const char * blk,int len)416 static inline int bo_putblk(struct buffer *b, const char *blk, int len)
417 {
418 	int cur_len = buffer_len(b);
419 	int half;
420 
421 	if (len > b->size - cur_len)
422 		len = (b->size - cur_len);
423 	if (!len)
424 		return 0;
425 
426 	half = bo_contig_space(b);
427 	if (half > len)
428 		half = len;
429 
430 	memcpy(b->p, blk, half);
431 	b->p = b_ptr(b, half);
432 	if (len > half) {
433 		memcpy(b->p, blk + half, len - half);
434 		b->p = b_ptr(b, half);
435 	}
436 	b->o += len;
437 	return len;
438 }
439 
440 /* Tries to copy string <str> into output data at buffer <b>. Supports wrapping.
441  * Data are truncated if buffer is too short. It returns the number of bytes
442  * copied.
443  */
bo_putstr(struct buffer * b,const char * str)444 static inline int bo_putstr(struct buffer *b, const char *str)
445 {
446 	return bo_putblk(b, str, strlen(str));
447 }
448 
449 /* Tries to copy chunk <chk> into output data at buffer <b>. Supports wrapping.
450  * Data are truncated if buffer is too short. It returns the number of bytes
451  * copied.
452  */
bo_putchk(struct buffer * b,const struct chunk * chk)453 static inline int bo_putchk(struct buffer *b, const struct chunk *chk)
454 {
455 	return bo_putblk(b, chk->str, chk->len);
456 }
457 
458 /* Resets a buffer. The size is not touched. */
b_reset(struct buffer * buf)459 static inline void b_reset(struct buffer *buf)
460 {
461 	buf->o = 0;
462 	buf->i = 0;
463 	buf->p = buf->data;
464 }
465 
466 /* Allocates a buffer and replaces *buf with this buffer. If no memory is
467  * available, &buf_wanted is used instead. No control is made to check if *buf
468  * already pointed to another buffer. The allocated buffer is returned, or
469  * NULL in case no memory is available.
470  */
b_alloc(struct buffer ** buf)471 static inline struct buffer *b_alloc(struct buffer **buf)
472 {
473 	struct buffer *b;
474 
475 	*buf = &buf_wanted;
476 	b = pool_alloc_dirty(pool2_buffer);
477 	if (likely(b)) {
478 		b->size = pool2_buffer->size - sizeof(struct buffer);
479 		b_reset(b);
480 		*buf = b;
481 	}
482 	return b;
483 }
484 
485 /* Allocates a buffer and replaces *buf with this buffer. If no memory is
486  * available, &buf_wanted is used instead. No control is made to check if *buf
487  * already pointed to another buffer. The allocated buffer is returned, or
488  * NULL in case no memory is available. The difference with b_alloc() is that
489  * this function only picks from the pool and never calls malloc(), so it can
490  * fail even if some memory is available.
491  */
b_alloc_fast(struct buffer ** buf)492 static inline struct buffer *b_alloc_fast(struct buffer **buf)
493 {
494 	struct buffer *b;
495 
496 	*buf = &buf_wanted;
497 	b = pool_get_first(pool2_buffer);
498 	if (likely(b)) {
499 		b->size = pool2_buffer->size - sizeof(struct buffer);
500 		b_reset(b);
501 		*buf = b;
502 	}
503 	return b;
504 }
505 
506 /* Releases buffer *buf (no check of emptiness) */
__b_drop(struct buffer ** buf)507 static inline void __b_drop(struct buffer **buf)
508 {
509 	pool_free2(pool2_buffer, *buf);
510 }
511 
512 /* Releases buffer *buf if allocated. */
b_drop(struct buffer ** buf)513 static inline void b_drop(struct buffer **buf)
514 {
515 	if (!(*buf)->size)
516 		return;
517 	__b_drop(buf);
518 }
519 
520 /* Releases buffer *buf if allocated, and replaces it with &buf_empty. */
b_free(struct buffer ** buf)521 static inline void b_free(struct buffer **buf)
522 {
523 	b_drop(buf);
524 	*buf = &buf_empty;
525 }
526 
527 /* Ensures that <buf> is allocated. If an allocation is needed, it ensures that
528  * there are still at least <margin> buffers available in the pool after this
529  * allocation so that we don't leave the pool in a condition where a session or
530  * a response buffer could not be allocated anymore, resulting in a deadlock.
531  * This means that we sometimes need to try to allocate extra entries even if
532  * only one buffer is needed.
533  */
b_alloc_margin(struct buffer ** buf,int margin)534 static inline struct buffer *b_alloc_margin(struct buffer **buf, int margin)
535 {
536 	struct buffer *next;
537 
538 	if ((*buf)->size)
539 		return *buf;
540 
541 	/* fast path */
542 	if ((pool2_buffer->allocated - pool2_buffer->used) > margin)
543 		return b_alloc_fast(buf);
544 
545 	next = pool_refill_alloc(pool2_buffer, margin);
546 	if (!next)
547 		return next;
548 
549 	next->size = pool2_buffer->size - sizeof(struct buffer);
550 	b_reset(next);
551 	*buf = next;
552 	return next;
553 }
554 
555 
556 void __offer_buffer(void *from, unsigned int threshold);
557 
offer_buffers(void * from,unsigned int threshold)558 static inline void offer_buffers(void *from, unsigned int threshold)
559 {
560 	if (LIST_ISEMPTY(&buffer_wq))
561 		return;
562 	__offer_buffer(from, threshold);
563 }
564 
565 #endif /* _COMMON_BUFFER_H */
566 
567 /*
568  * Local variables:
569  *  c-indent-level: 8
570  *  c-basic-offset: 8
571  * End:
572  */
573