1 /*
2  * include/proto/channel.h
3  * Channel management definitions, macros and inline functions.
4  *
5  * Copyright (C) 2000-2014 Willy Tarreau - w@1wt.eu
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation, version 2.1
10  * exclusively.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
20  */
21 
22 #ifndef _PROTO_CHANNEL_H
23 #define _PROTO_CHANNEL_H
24 
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28 
29 #include <common/config.h>
30 #include <common/chunk.h>
31 #include <common/ticks.h>
32 #include <common/time.h>
33 
34 #include <types/channel.h>
35 #include <types/global.h>
36 #include <types/stream.h>
37 #include <types/stream_interface.h>
38 
39 #include <proto/applet.h>
40 #include <proto/task.h>
41 
42 /* perform minimal intializations, report 0 in case of error, 1 if OK. */
43 int init_channel();
44 
45 unsigned long long __channel_forward(struct channel *chn, unsigned long long bytes);
46 
47 /* SI-to-channel functions working with buffers */
48 int bi_putblk(struct channel *chn, const char *str, int len);
49 struct buffer *bi_swpbuf(struct channel *chn, struct buffer *buf);
50 int bi_putchr(struct channel *chn, char c);
51 int bi_getline_nc(struct channel *chn, char **blk1, int *len1, char **blk2, int *len2);
52 int bi_getblk_nc(struct channel *chn, char **blk1, int *len1, char **blk2, int *len2);
53 int bo_inject(struct channel *chn, const char *msg, int len);
54 int bo_getline(struct channel *chn, char *str, int len);
55 int bo_getblk(struct channel *chn, char *blk, int len, int offset);
56 int bo_getline_nc(struct channel *chn, char **blk1, int *len1, char **blk2, int *len2);
57 int bo_getblk_nc(struct channel *chn, char **blk1, int *len1, char **blk2, int *len2);
58 
59 
60 /* returns a pointer to the stream the channel belongs to */
chn_strm(const struct channel * chn)61 static inline struct stream *chn_strm(const struct channel *chn)
62 {
63 	if (chn->flags & CF_ISRESP)
64 		return LIST_ELEM(chn, struct stream *, res);
65 	else
66 		return LIST_ELEM(chn, struct stream *, req);
67 }
68 
69 /* returns a pointer to the stream interface feeding the channel (producer) */
chn_prod(const struct channel * chn)70 static inline struct stream_interface *chn_prod(const struct channel *chn)
71 {
72 	if (chn->flags & CF_ISRESP)
73 		return &LIST_ELEM(chn, struct stream *, res)->si[1];
74 	else
75 		return &LIST_ELEM(chn, struct stream *, req)->si[0];
76 }
77 
78 /* returns a pointer to the stream interface consuming the channel (producer) */
chn_cons(const struct channel * chn)79 static inline struct stream_interface *chn_cons(const struct channel *chn)
80 {
81 	if (chn->flags & CF_ISRESP)
82 		return &LIST_ELEM(chn, struct stream *, res)->si[0];
83 	else
84 		return &LIST_ELEM(chn, struct stream *, req)->si[1];
85 }
86 
87 /* Initialize all fields in the channel. */
channel_init(struct channel * chn)88 static inline void channel_init(struct channel *chn)
89 {
90 	chn->buf = &buf_empty;
91 	chn->to_forward = 0;
92 	chn->last_read = now_ms;
93 	chn->xfer_small = chn->xfer_large = 0;
94 	chn->total = 0;
95 	chn->pipe = NULL;
96 	chn->analysers = 0;
97 	chn->flags = 0;
98 }
99 
100 /* Schedule up to <bytes> more bytes to be forwarded via the channel without
101  * notifying the owner task. Any data pending in the buffer are scheduled to be
102  * sent as well, in the limit of the number of bytes to forward. This must be
103  * the only method to use to schedule bytes to be forwarded. If the requested
104  * number is too large, it is automatically adjusted. The number of bytes taken
105  * into account is returned. Directly touching ->to_forward will cause lockups
106  * when buf->o goes down to zero if nobody is ready to push the remaining data.
107  */
channel_forward(struct channel * chn,unsigned long long bytes)108 static inline unsigned long long channel_forward(struct channel *chn, unsigned long long bytes)
109 {
110 	/* hint: avoid comparisons on long long for the fast case, since if the
111 	 * length does not fit in an unsigned it, it will never be forwarded at
112 	 * once anyway.
113 	 */
114 	if (bytes <= ~0U) {
115 		unsigned int bytes32 = bytes;
116 
117 		if (bytes32 <= chn->buf->i) {
118 			/* OK this amount of bytes might be forwarded at once */
119 			b_adv(chn->buf, bytes32);
120 			return bytes;
121 		}
122 	}
123 	return __channel_forward(chn, bytes);
124 }
125 
126 /* Forwards any input data and marks the channel for permanent forwarding */
channel_forward_forever(struct channel * chn)127 static inline void channel_forward_forever(struct channel *chn)
128 {
129 	b_adv(chn->buf, chn->buf->i);
130 	chn->to_forward = CHN_INFINITE_FORWARD;
131 }
132 
133 /*********************************************************************/
134 /* These functions are used to compute various channel content sizes */
135 /*********************************************************************/
136 
137 /* Reports non-zero if the channel is empty, which means both its
138  * buffer and pipe are empty. The construct looks strange but is
139  * jump-less and much more efficient on both 32 and 64-bit than
140  * the boolean test.
141  */
channel_is_empty(struct channel * c)142 static inline unsigned int channel_is_empty(struct channel *c)
143 {
144 	return !(c->buf->o | (long)c->pipe);
145 }
146 
147 /* Returns non-zero if the channel is rewritable, which means that the buffer
148  * it is attached to has at least <maxrewrite> bytes immediately available.
149  * This is used to decide when a request or response may be parsed when some
150  * data from a previous exchange might still be present.
151  */
channel_is_rewritable(const struct channel * chn)152 static inline int channel_is_rewritable(const struct channel *chn)
153 {
154 	int rem = chn->buf->size;
155 
156 	rem -= chn->buf->o;
157 	rem -= chn->buf->i;
158 	rem -= global.tune.maxrewrite;
159 	return rem >= 0;
160 }
161 
162 /* Returns non-zero if the channel is congested with data in transit waiting
163  * for leaving, indicating to the caller that it should wait for the reserve to
164  * be released before starting to process new data in case it needs the ability
165  * to append data. This is meant to be used while waiting for a clean response
166  * buffer before processing a request.
167  */
channel_congested(const struct channel * chn)168 static inline int channel_congested(const struct channel *chn)
169 {
170 	if (!chn->buf->o)
171 		return 0;
172 
173 	if (!channel_is_rewritable(chn))
174 		return 1;
175 
176 	if (chn->buf->p + chn->buf->i >
177 	    chn->buf->data + chn->buf->size - global.tune.maxrewrite)
178 		return 1;
179 
180 	return 0;
181 }
182 
183 /* Tells whether data are likely to leave the buffer. This is used to know when
184  * we can safely ignore the reserve since we know we cannot retry a connection.
185  * It returns zero if data are blocked, non-zero otherwise.
186  */
channel_may_send(const struct channel * chn)187 static inline int channel_may_send(const struct channel *chn)
188 {
189 	return chn_cons(chn)->state == SI_ST_EST;
190 }
191 
192 /* Returns non-zero if the channel can still receive data. This is used to
193  * decide when to stop reading into a buffer when we want to ensure that we
194  * leave the reserve untouched after all pending outgoing data are forwarded.
195  * The reserved space is taken into account if ->to_forward indicates that an
196  * end of transfer is close to happen. Note that both ->buf->o and ->to_forward
197  * are considered as available since they're supposed to leave the buffer. The
198  * test is optimized to avoid as many operations as possible for the fast case
199  * and to be used as an "if" condition. Just like channel_recv_limit(), we
200  * never allow to overwrite the reserve until the output stream interface is
201  * connected, otherwise we could spin on a POST with http-send-name-header.
202  */
channel_may_recv(const struct channel * chn)203 static inline int channel_may_recv(const struct channel *chn)
204 {
205 	int rem = chn->buf->size;
206 
207 	if (chn->buf == &buf_empty)
208 		return 1;
209 
210 	rem -= chn->buf->o;
211 	rem -= chn->buf->i;
212 	if (!rem)
213 		return 0; /* buffer already full */
214 
215 	if (rem > global.tune.maxrewrite)
216 		return 1; /* reserve not yet reached */
217 
218 	if (!channel_may_send(chn))
219 		return 0; /* don't touch reserve until we can send */
220 
221 	/* Now we know there's some room left in the reserve and we may
222 	 * forward. As long as i-to_fwd < size-maxrw, we may still
223 	 * receive. This is equivalent to i+maxrw-size < to_fwd,
224 	 * which is logical since i+maxrw-size is what overlaps with
225 	 * the reserve, and we want to ensure they're covered by scheduled
226 	 * forwards.
227 	 */
228 	rem = chn->buf->i + global.tune.maxrewrite - chn->buf->size;
229 	return rem < 0 || (unsigned int)rem < chn->to_forward;
230 }
231 
232 /* Returns true if the channel's input is already closed */
channel_input_closed(struct channel * chn)233 static inline int channel_input_closed(struct channel *chn)
234 {
235 	return ((chn->flags & CF_SHUTR) != 0);
236 }
237 
238 /* Returns true if the channel's output is already closed */
channel_output_closed(struct channel * chn)239 static inline int channel_output_closed(struct channel *chn)
240 {
241 	return ((chn->flags & CF_SHUTW) != 0);
242 }
243 
244 /* Check channel timeouts, and set the corresponding flags. The likely/unlikely
245  * have been optimized for fastest normal path. The read/write timeouts are not
246  * set if there was activity on the channel. That way, we don't have to update
247  * the timeout on every I/O. Note that the analyser timeout is always checked.
248  */
channel_check_timeouts(struct channel * chn)249 static inline void channel_check_timeouts(struct channel *chn)
250 {
251 	if (likely(!(chn->flags & (CF_SHUTR|CF_READ_TIMEOUT|CF_READ_ACTIVITY|CF_READ_NOEXP))) &&
252 	    unlikely(tick_is_expired(chn->rex, now_ms)))
253 		chn->flags |= CF_READ_TIMEOUT;
254 
255 	if (likely(!(chn->flags & (CF_SHUTW|CF_WRITE_TIMEOUT|CF_WRITE_ACTIVITY|CF_WRITE_EVENT))) &&
256 	    unlikely(tick_is_expired(chn->wex, now_ms)))
257 		chn->flags |= CF_WRITE_TIMEOUT;
258 
259 	if (likely(!(chn->flags & CF_ANA_TIMEOUT)) &&
260 	    unlikely(tick_is_expired(chn->analyse_exp, now_ms)))
261 		chn->flags |= CF_ANA_TIMEOUT;
262 }
263 
264 /* Erase any content from channel <buf> and adjusts flags accordingly. Note
265  * that any spliced data is not affected since we may not have any access to
266  * it.
267  */
channel_erase(struct channel * chn)268 static inline void channel_erase(struct channel *chn)
269 {
270 	chn->to_forward = 0;
271 	b_reset(chn->buf);
272 }
273 
274 /* marks the channel as "shutdown" ASAP for reads */
channel_shutr_now(struct channel * chn)275 static inline void channel_shutr_now(struct channel *chn)
276 {
277 	chn->flags |= CF_SHUTR_NOW;
278 }
279 
280 /* marks the channel as "shutdown" ASAP for writes */
channel_shutw_now(struct channel * chn)281 static inline void channel_shutw_now(struct channel *chn)
282 {
283 	chn->flags |= CF_SHUTW_NOW;
284 }
285 
286 /* marks the channel as "shutdown" ASAP in both directions */
channel_abort(struct channel * chn)287 static inline void channel_abort(struct channel *chn)
288 {
289 	chn->flags |= CF_SHUTR_NOW | CF_SHUTW_NOW;
290 	chn->flags &= ~CF_AUTO_CONNECT;
291 }
292 
293 /* allow the consumer to try to establish a new connection. */
channel_auto_connect(struct channel * chn)294 static inline void channel_auto_connect(struct channel *chn)
295 {
296 	chn->flags |= CF_AUTO_CONNECT;
297 }
298 
299 /* prevent the consumer from trying to establish a new connection, and also
300  * disable auto shutdown forwarding.
301  */
channel_dont_connect(struct channel * chn)302 static inline void channel_dont_connect(struct channel *chn)
303 {
304 	chn->flags &= ~(CF_AUTO_CONNECT|CF_AUTO_CLOSE);
305 }
306 
307 /* allow the producer to forward shutdown requests */
channel_auto_close(struct channel * chn)308 static inline void channel_auto_close(struct channel *chn)
309 {
310 	chn->flags |= CF_AUTO_CLOSE;
311 }
312 
313 /* prevent the producer from forwarding shutdown requests */
channel_dont_close(struct channel * chn)314 static inline void channel_dont_close(struct channel *chn)
315 {
316 	chn->flags &= ~CF_AUTO_CLOSE;
317 }
318 
319 /* allow the producer to read / poll the input */
channel_auto_read(struct channel * chn)320 static inline void channel_auto_read(struct channel *chn)
321 {
322 	chn->flags &= ~CF_DONT_READ;
323 }
324 
325 /* prevent the producer from read / poll the input */
channel_dont_read(struct channel * chn)326 static inline void channel_dont_read(struct channel *chn)
327 {
328 	chn->flags |= CF_DONT_READ;
329 }
330 
331 
332 /*************************************************/
333 /* Buffer operations in the context of a channel */
334 /*************************************************/
335 
336 
337 /* Return the max number of bytes the buffer can contain so that once all the
338  * pending bytes are forwarded, the buffer still has global.tune.maxrewrite
339  * bytes free. The result sits between chn->size - maxrewrite and chn->size.
340  * It is important to mention that if buf->i is already larger than size-maxrw
341  * the condition above cannot be satisfied and the lowest size will be returned
342  * anyway. The principles are the following :
343  *   0) the empty buffer has a limit of zero
344  *   1) a non-connected buffer cannot touch the reserve
345  *   2) infinite forward can always fill the buffer since all data will leave
346  *   3) all output bytes are considered in transit since they're leaving
347  *   4) all input bytes covered by to_forward are considered in transit since
348  *      they'll be converted to output bytes.
349  *   5) all input bytes not covered by to_forward as considered remaining
350  *   6) all bytes scheduled to be forwarded minus what is already in the input
351  *      buffer will be in transit during future rounds.
352  *   7) 4+5+6 imply that the amount of input bytes (i) is irrelevant to the max
353  *      usable length, only to_forward and output count. The difference is
354  *      visible when to_forward > i.
355  *   8) the reserve may be covered up to the amount of bytes in transit since
356  *      these bytes will only take temporary space.
357  *
358  * A typical buffer looks like this :
359  *
360  *      <-------------- max_len ----------->
361  *      <---- o ----><----- i ----->        <--- 0..maxrewrite --->
362  *      +------------+--------------+-------+----------------------+
363  *      |////////////|\\\\\\\\\\\\\\|xxxxxxx|        reserve       |
364  *      +------------+--------+-----+-------+----------------------+
365  *                   <- fwd ->      <-avail->
366  *
367  * Or when to_forward > i :
368  *
369  *      <-------------- max_len ----------->
370  *      <---- o ----><----- i ----->        <--- 0..maxrewrite --->
371  *      +------------+--------------+-------+----------------------+
372  *      |////////////|\\\\\\\\\\\\\\|xxxxxxx|        reserve       |
373  *      +------------+--------+-----+-------+----------------------+
374  *                                  <-avail->
375  *                   <------------------ fwd ---------------->
376  *
377  * - the amount of buffer bytes in transit is : min(i, fwd) + o
378  * - some scheduled bytes may be in transit (up to fwd - i)
379  * - the reserve is max(0, maxrewrite - transit)
380  * - the maximum usable buffer length is size - reserve.
381  * - the available space is max_len - i - o
382  *
383  * So the formula to compute the buffer's maximum length to protect the reserve
384  * when reading new data is :
385  *
386  *    max = size - maxrewrite + min(maxrewrite, transit)
387  *        = size - max(maxrewrite - transit, 0)
388  *
389  * But WARNING! The conditions might change during the transfer and it could
390  * very well happen that a buffer would contain more bytes than max_len due to
391  * i+o already walking over the reserve (eg: after a header rewrite), including
392  * i or o alone hitting the limit. So it is critical to always consider that
393  * bounds may have already been crossed and that available space may be negative
394  * for example. Due to this it is perfectly possible for this function to return
395  * a value that is lower than current i+o.
396  */
channel_recv_limit(const struct channel * chn)397 static inline int channel_recv_limit(const struct channel *chn)
398 {
399 	unsigned int transit;
400 	int reserve;
401 
402 	/* return zero if empty */
403 	reserve = chn->buf->size;
404 	if (chn->buf == &buf_empty)
405 		goto end;
406 
407 	/* return size - maxrewrite if we can't send */
408 	reserve = global.tune.maxrewrite;
409 	if (unlikely(!channel_may_send(chn)))
410 		goto end;
411 
412 	/* We need to check what remains of the reserve after o and to_forward
413 	 * have been transmitted, but they can overflow together and they can
414 	 * cause an integer underflow in the comparison since both are unsigned
415 	 * while maxrewrite is signed.
416 	 * The code below has been verified for being a valid check for this :
417 	 *   - if (o + to_forward) overflow => return size  [ large enough ]
418 	 *   - if o + to_forward >= maxrw   => return size  [ large enough ]
419 	 *   - otherwise return size - (maxrw - (o + to_forward))
420 	 */
421 	transit = chn->buf->o + chn->to_forward;
422 	reserve -= transit;
423 	if (transit < chn->to_forward ||                 // addition overflow
424 	    transit >= (unsigned)global.tune.maxrewrite) // enough transit data
425 		return chn->buf->size;
426  end:
427 	return chn->buf->size - reserve;
428 }
429 
430 /* Returns the amount of space available at the input of the buffer, taking the
431  * reserved space into account if ->to_forward indicates that an end of transfer
432  * is close to happen. The test is optimized to avoid as many operations as
433  * possible for the fast case.
434  */
channel_recv_max(const struct channel * chn)435 static inline int channel_recv_max(const struct channel *chn)
436 {
437 	int ret;
438 
439 	ret = channel_recv_limit(chn) - chn->buf->i - chn->buf->o;
440 	if (ret < 0)
441 		ret = 0;
442 	return ret;
443 }
444 
445 /* Allocates a buffer for channel <chn>, but only if it's guaranteed that it's
446  * not the last available buffer or it's the response buffer. Unless the buffer
447  * is the response buffer, an extra control is made so that we always keep
448  * <tune.buffers.reserved> buffers available after this allocation. Returns 0 in
449  * case of failure, non-zero otherwise.
450  *
451  * If no buffer are available, the requester, represented by <wait> pointer,
452  * will be added in the list of objects waiting for an available buffer.
453  */
channel_alloc_buffer(struct channel * chn,struct buffer_wait * wait)454 static inline int channel_alloc_buffer(struct channel *chn, struct buffer_wait *wait)
455 {
456 	int margin = 0;
457 
458 	if (!(chn->flags & CF_ISRESP))
459 		margin = global.tune.reserved_bufs;
460 
461 	if (b_alloc_margin(&chn->buf, margin) != NULL)
462 		return 1;
463 
464 	if (LIST_ISEMPTY(&wait->list))
465 		LIST_ADDQ(&buffer_wq, &wait->list);
466 	return 0;
467 }
468 
469 /* Releases a possibly allocated buffer for channel <chn>. If it was not
470  * allocated, this function does nothing. Else the buffer is released and we try
471  * to wake up as many streams/applets as possible. */
channel_release_buffer(struct channel * chn,struct buffer_wait * wait)472 static inline void channel_release_buffer(struct channel *chn, struct buffer_wait *wait)
473 {
474 	if (chn->buf->size && buffer_empty(chn->buf)) {
475 		b_free(&chn->buf);
476 		offer_buffers(wait->target, tasks_run_queue + applets_active_queue);
477 	}
478 }
479 
480 /* Truncate any unread data in the channel's buffer, and disable forwarding.
481  * Outgoing data are left intact. This is mainly to be used to send error
482  * messages after existing data.
483  */
channel_truncate(struct channel * chn)484 static inline void channel_truncate(struct channel *chn)
485 {
486 	if (!chn->buf->o)
487 		return channel_erase(chn);
488 
489 	chn->to_forward = 0;
490 	if (!chn->buf->i)
491 		return;
492 
493 	chn->buf->i = 0;
494 }
495 
496 /*
497  * Advance the channel buffer's read pointer by <len> bytes. This is useful
498  * when data have been read directly from the buffer. It is illegal to call
499  * this function with <len> causing a wrapping at the end of the buffer. It's
500  * the caller's responsibility to ensure that <len> is never larger than
501  * chn->o. Channel flag WRITE_PARTIAL is set.
502  */
bo_skip(struct channel * chn,int len)503 static inline void bo_skip(struct channel *chn, int len)
504 {
505 	chn->buf->o -= len;
506 
507 	if (buffer_empty(chn->buf))
508 		chn->buf->p = chn->buf->data;
509 
510 	/* notify that some data was written to the SI from the buffer */
511 	chn->flags |= CF_WRITE_PARTIAL | CF_WRITE_EVENT;
512 }
513 
514 /* Tries to copy chunk <chunk> into the channel's buffer after length controls.
515  * The chn->o and to_forward pointers are updated. If the channel's input is
516  * closed, -2 is returned. If the block is too large for this buffer, -3 is
517  * returned. If there is not enough room left in the buffer, -1 is returned.
518  * Otherwise the number of bytes copied is returned (0 being a valid number).
519  * Channel flag READ_PARTIAL is updated if some data can be transferred. The
520  * chunk's length is updated with the number of bytes sent.
521  */
bi_putchk(struct channel * chn,struct chunk * chunk)522 static inline int bi_putchk(struct channel *chn, struct chunk *chunk)
523 {
524 	int ret;
525 
526 	ret = bi_putblk(chn, chunk->str, chunk->len);
527 	if (ret > 0)
528 		chunk->len -= ret;
529 	return ret;
530 }
531 
532 /* Tries to copy string <str> at once into the channel's buffer after length
533  * controls.  The chn->o and to_forward pointers are updated. If the channel's
534  * input is closed, -2 is returned. If the block is too large for this buffer,
535  * -3 is returned. If there is not enough room left in the buffer, -1 is
536  * returned.  Otherwise the number of bytes copied is returned (0 being a valid
537  * number).  Channel flag READ_PARTIAL is updated if some data can be
538  * transferred.
539  */
bi_putstr(struct channel * chn,const char * str)540 static inline int bi_putstr(struct channel *chn, const char *str)
541 {
542 	return bi_putblk(chn, str, strlen(str));
543 }
544 
545 /*
546  * Return one char from the channel's buffer. If the buffer is empty and the
547  * channel is closed, return -2. If the buffer is just empty, return -1. The
548  * buffer's pointer is not advanced, it's up to the caller to call bo_skip(buf,
549  * 1) when it has consumed the char.  Also note that this function respects the
550  * chn->o limit.
551  */
bo_getchr(struct channel * chn)552 static inline int bo_getchr(struct channel *chn)
553 {
554 	/* closed or empty + imminent close = -2; empty = -1 */
555 	if (unlikely((chn->flags & CF_SHUTW) || channel_is_empty(chn))) {
556 		if (chn->flags & (CF_SHUTW|CF_SHUTW_NOW))
557 			return -2;
558 		return -1;
559 	}
560 	return *buffer_wrap_sub(chn->buf, chn->buf->p - chn->buf->o);
561 }
562 
563 
564 #endif /* _PROTO_CHANNEL_H */
565 
566 /*
567  * Local variables:
568  *  c-indent-level: 8
569  *  c-basic-offset: 8
570  * End:
571  */
572