1 /*
2  * include/proto/channel.h
3  * Channel management definitions, macros and inline functions.
4  *
5  * Copyright (C) 2000-2014 Willy Tarreau - w@1wt.eu
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation, version 2.1
10  * exclusively.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
20  */
21 
22 #ifndef _PROTO_CHANNEL_H
23 #define _PROTO_CHANNEL_H
24 
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28 
29 #include <common/config.h>
30 #include <common/chunk.h>
31 #include <common/ticks.h>
32 #include <common/time.h>
33 
34 #include <types/channel.h>
35 #include <types/global.h>
36 #include <types/stream.h>
37 #include <types/stream_interface.h>
38 
39 #include <proto/applet.h>
40 #include <proto/task.h>
41 
42 /* perform minimal intializations, report 0 in case of error, 1 if OK. */
43 int init_channel();
44 
45 unsigned long long __channel_forward(struct channel *chn, unsigned long long bytes);
46 
47 /* SI-to-channel functions working with buffers */
48 int ci_putblk(struct channel *chn, const char *str, int len);
49 struct buffer *ci_swpbuf(struct channel *chn, struct buffer *buf);
50 int ci_putchr(struct channel *chn, char c);
51 int ci_getline_nc(const struct channel *chn, char **blk1, int *len1, char **blk2, int *len2);
52 int ci_getblk_nc(const struct channel *chn, char **blk1, int *len1, char **blk2, int *len2);
53 int co_inject(struct channel *chn, const char *msg, int len);
54 int co_getline(const struct channel *chn, char *str, int len);
55 int co_getblk(const struct channel *chn, char *blk, int len, int offset);
56 int co_getline_nc(const struct channel *chn, char **blk1, int *len1, char **blk2, int *len2);
57 int co_getblk_nc(const struct channel *chn, char **blk1, int *len1, char **blk2, int *len2);
58 
59 
60 /* returns a pointer to the stream the channel belongs to */
chn_strm(const struct channel * chn)61 static inline struct stream *chn_strm(const struct channel *chn)
62 {
63 	if (chn->flags & CF_ISRESP)
64 		return LIST_ELEM(chn, struct stream *, res);
65 	else
66 		return LIST_ELEM(chn, struct stream *, req);
67 }
68 
69 /* returns a pointer to the stream interface feeding the channel (producer) */
chn_prod(const struct channel * chn)70 static inline struct stream_interface *chn_prod(const struct channel *chn)
71 {
72 	if (chn->flags & CF_ISRESP)
73 		return &LIST_ELEM(chn, struct stream *, res)->si[1];
74 	else
75 		return &LIST_ELEM(chn, struct stream *, req)->si[0];
76 }
77 
78 /* returns a pointer to the stream interface consuming the channel (producer) */
chn_cons(const struct channel * chn)79 static inline struct stream_interface *chn_cons(const struct channel *chn)
80 {
81 	if (chn->flags & CF_ISRESP)
82 		return &LIST_ELEM(chn, struct stream *, res)->si[0];
83 	else
84 		return &LIST_ELEM(chn, struct stream *, req)->si[1];
85 }
86 
87 /* Initialize all fields in the channel. */
channel_init(struct channel * chn)88 static inline void channel_init(struct channel *chn)
89 {
90 	chn->buf = &buf_empty;
91 	chn->to_forward = 0;
92 	chn->last_read = now_ms;
93 	chn->xfer_small = chn->xfer_large = 0;
94 	chn->total = 0;
95 	chn->pipe = NULL;
96 	chn->analysers = 0;
97 	chn->flags = 0;
98 }
99 
100 /* Schedule up to <bytes> more bytes to be forwarded via the channel without
101  * notifying the owner task. Any data pending in the buffer are scheduled to be
102  * sent as well, in the limit of the number of bytes to forward. This must be
103  * the only method to use to schedule bytes to be forwarded. If the requested
104  * number is too large, it is automatically adjusted. The number of bytes taken
105  * into account is returned. Directly touching ->to_forward will cause lockups
106  * when buf->o goes down to zero if nobody is ready to push the remaining data.
107  */
channel_forward(struct channel * chn,unsigned long long bytes)108 static inline unsigned long long channel_forward(struct channel *chn, unsigned long long bytes)
109 {
110 	/* hint: avoid comparisons on long long for the fast case, since if the
111 	 * length does not fit in an unsigned it, it will never be forwarded at
112 	 * once anyway.
113 	 */
114 	if (bytes <= ~0U) {
115 		unsigned int bytes32 = bytes;
116 
117 		if (bytes32 <= chn->buf->i) {
118 			/* OK this amount of bytes might be forwarded at once */
119 			b_adv(chn->buf, bytes32);
120 			return bytes;
121 		}
122 	}
123 	return __channel_forward(chn, bytes);
124 }
125 
126 /* Forwards any input data and marks the channel for permanent forwarding */
channel_forward_forever(struct channel * chn)127 static inline void channel_forward_forever(struct channel *chn)
128 {
129 	b_adv(chn->buf, chn->buf->i);
130 	chn->to_forward = CHN_INFINITE_FORWARD;
131 }
132 
133 /*********************************************************************/
134 /* These functions are used to compute various channel content sizes */
135 /*********************************************************************/
136 
137 /* Reports non-zero if the channel is empty, which means both its
138  * buffer and pipe are empty. The construct looks strange but is
139  * jump-less and much more efficient on both 32 and 64-bit than
140  * the boolean test.
141  */
channel_is_empty(const struct channel * c)142 static inline unsigned int channel_is_empty(const struct channel *c)
143 {
144 	return !(c->buf->o | (long)c->pipe);
145 }
146 
147 /* Returns non-zero if the channel is rewritable, which means that the buffer
148  * it is attached to has at least <maxrewrite> bytes immediately available.
149  * This is used to decide when a request or response may be parsed when some
150  * data from a previous exchange might still be present.
151  */
channel_is_rewritable(const struct channel * chn)152 static inline int channel_is_rewritable(const struct channel *chn)
153 {
154 	int rem = chn->buf->size;
155 
156 	rem -= chn->buf->o;
157 	rem -= chn->buf->i;
158 	rem -= global.tune.maxrewrite;
159 	return rem >= 0;
160 }
161 
162 /* Tells whether data are likely to leave the buffer. This is used to know when
163  * we can safely ignore the reserve since we know we cannot retry a connection.
164  * It returns zero if data are blocked, non-zero otherwise.
165  */
channel_may_send(const struct channel * chn)166 static inline int channel_may_send(const struct channel *chn)
167 {
168 	return chn_cons(chn)->state == SI_ST_EST;
169 }
170 
171 /* Returns non-zero if the channel can still receive data. This is used to
172  * decide when to stop reading into a buffer when we want to ensure that we
173  * leave the reserve untouched after all pending outgoing data are forwarded.
174  * The reserved space is taken into account if ->to_forward indicates that an
175  * end of transfer is close to happen. Note that both ->buf->o and ->to_forward
176  * are considered as available since they're supposed to leave the buffer. The
177  * test is optimized to avoid as many operations as possible for the fast case
178  * and to be used as an "if" condition. Just like channel_recv_limit(), we
179  * never allow to overwrite the reserve until the output stream interface is
180  * connected, otherwise we could spin on a POST with http-send-name-header.
181  */
channel_may_recv(const struct channel * chn)182 static inline int channel_may_recv(const struct channel *chn)
183 {
184 	int rem = chn->buf->size;
185 
186 	if (chn->buf == &buf_empty)
187 		return 1;
188 
189 	rem -= chn->buf->o;
190 	rem -= chn->buf->i;
191 	if (!rem)
192 		return 0; /* buffer already full */
193 
194 	if (rem > global.tune.maxrewrite)
195 		return 1; /* reserve not yet reached */
196 
197 	if (!channel_may_send(chn))
198 		return 0; /* don't touch reserve until we can send */
199 
200 	/* Now we know there's some room left in the reserve and we may
201 	 * forward. As long as i-to_fwd < size-maxrw, we may still
202 	 * receive. This is equivalent to i+maxrw-size < to_fwd,
203 	 * which is logical since i+maxrw-size is what overlaps with
204 	 * the reserve, and we want to ensure they're covered by scheduled
205 	 * forwards.
206 	 */
207 	rem = chn->buf->i + global.tune.maxrewrite - chn->buf->size;
208 	return rem < 0 || (unsigned int)rem < chn->to_forward;
209 }
210 
211 /* Returns true if the channel's input is already closed */
channel_input_closed(struct channel * chn)212 static inline int channel_input_closed(struct channel *chn)
213 {
214 	return ((chn->flags & CF_SHUTR) != 0);
215 }
216 
217 /* Returns true if the channel's output is already closed */
channel_output_closed(struct channel * chn)218 static inline int channel_output_closed(struct channel *chn)
219 {
220 	return ((chn->flags & CF_SHUTW) != 0);
221 }
222 
223 /* Check channel timeouts, and set the corresponding flags. The likely/unlikely
224  * have been optimized for fastest normal path. The read/write timeouts are not
225  * set if there was activity on the channel. That way, we don't have to update
226  * the timeout on every I/O. Note that the analyser timeout is always checked.
227  */
channel_check_timeouts(struct channel * chn)228 static inline void channel_check_timeouts(struct channel *chn)
229 {
230 	if (likely(!(chn->flags & (CF_SHUTR|CF_READ_TIMEOUT|CF_READ_ACTIVITY|CF_READ_NOEXP))) &&
231 	    unlikely(tick_is_expired(chn->rex, now_ms)))
232 		chn->flags |= CF_READ_TIMEOUT;
233 
234 	if (likely(!(chn->flags & (CF_SHUTW|CF_WRITE_TIMEOUT|CF_WRITE_ACTIVITY|CF_WRITE_EVENT))) &&
235 	    unlikely(tick_is_expired(chn->wex, now_ms)))
236 		chn->flags |= CF_WRITE_TIMEOUT;
237 
238 	if (likely(!(chn->flags & CF_ANA_TIMEOUT)) &&
239 	    unlikely(tick_is_expired(chn->analyse_exp, now_ms)))
240 		chn->flags |= CF_ANA_TIMEOUT;
241 }
242 
243 /* Erase any content from channel <buf> and adjusts flags accordingly. Note
244  * that any spliced data is not affected since we may not have any access to
245  * it.
246  */
channel_erase(struct channel * chn)247 static inline void channel_erase(struct channel *chn)
248 {
249 	chn->to_forward = 0;
250 	b_reset(chn->buf);
251 }
252 
253 /* marks the channel as "shutdown" ASAP for reads */
channel_shutr_now(struct channel * chn)254 static inline void channel_shutr_now(struct channel *chn)
255 {
256 	chn->flags |= CF_SHUTR_NOW;
257 }
258 
259 /* marks the channel as "shutdown" ASAP for writes */
channel_shutw_now(struct channel * chn)260 static inline void channel_shutw_now(struct channel *chn)
261 {
262 	chn->flags |= CF_SHUTW_NOW;
263 }
264 
265 /* marks the channel as "shutdown" ASAP in both directions */
channel_abort(struct channel * chn)266 static inline void channel_abort(struct channel *chn)
267 {
268 	chn->flags |= CF_SHUTR_NOW | CF_SHUTW_NOW;
269 	chn->flags &= ~CF_AUTO_CONNECT;
270 }
271 
272 /* allow the consumer to try to establish a new connection. */
channel_auto_connect(struct channel * chn)273 static inline void channel_auto_connect(struct channel *chn)
274 {
275 	chn->flags |= CF_AUTO_CONNECT;
276 }
277 
278 /* prevent the consumer from trying to establish a new connection, and also
279  * disable auto shutdown forwarding.
280  */
channel_dont_connect(struct channel * chn)281 static inline void channel_dont_connect(struct channel *chn)
282 {
283 	chn->flags &= ~(CF_AUTO_CONNECT|CF_AUTO_CLOSE);
284 }
285 
286 /* allow the producer to forward shutdown requests */
channel_auto_close(struct channel * chn)287 static inline void channel_auto_close(struct channel *chn)
288 {
289 	chn->flags |= CF_AUTO_CLOSE;
290 }
291 
292 /* prevent the producer from forwarding shutdown requests */
channel_dont_close(struct channel * chn)293 static inline void channel_dont_close(struct channel *chn)
294 {
295 	chn->flags &= ~CF_AUTO_CLOSE;
296 }
297 
298 /* allow the producer to read / poll the input */
channel_auto_read(struct channel * chn)299 static inline void channel_auto_read(struct channel *chn)
300 {
301 	chn->flags &= ~CF_DONT_READ;
302 }
303 
304 /* prevent the producer from read / poll the input */
channel_dont_read(struct channel * chn)305 static inline void channel_dont_read(struct channel *chn)
306 {
307 	chn->flags |= CF_DONT_READ;
308 }
309 
310 
311 /*************************************************/
312 /* Buffer operations in the context of a channel */
313 /*************************************************/
314 
315 
316 /* Return the max number of bytes the buffer can contain so that once all the
317  * pending bytes are forwarded, the buffer still has global.tune.maxrewrite
318  * bytes free. The result sits between chn->size - maxrewrite and chn->size.
319  * It is important to mention that if buf->i is already larger than size-maxrw
320  * the condition above cannot be satisfied and the lowest size will be returned
321  * anyway. The principles are the following :
322  *   0) the empty buffer has a limit of zero
323  *   1) a non-connected buffer cannot touch the reserve
324  *   2) infinite forward can always fill the buffer since all data will leave
325  *   3) all output bytes are considered in transit since they're leaving
326  *   4) all input bytes covered by to_forward are considered in transit since
327  *      they'll be converted to output bytes.
328  *   5) all input bytes not covered by to_forward as considered remaining
329  *   6) all bytes scheduled to be forwarded minus what is already in the input
330  *      buffer will be in transit during future rounds.
331  *   7) 4+5+6 imply that the amount of input bytes (i) is irrelevant to the max
332  *      usable length, only to_forward and output count. The difference is
333  *      visible when to_forward > i.
334  *   8) the reserve may be covered up to the amount of bytes in transit since
335  *      these bytes will only take temporary space.
336  *
337  * A typical buffer looks like this :
338  *
339  *      <-------------- max_len ----------->
340  *      <---- o ----><----- i ----->        <--- 0..maxrewrite --->
341  *      +------------+--------------+-------+----------------------+
342  *      |////////////|\\\\\\\\\\\\\\|xxxxxxx|        reserve       |
343  *      +------------+--------+-----+-------+----------------------+
344  *                   <- fwd ->      <-avail->
345  *
346  * Or when to_forward > i :
347  *
348  *      <-------------- max_len ----------->
349  *      <---- o ----><----- i ----->        <--- 0..maxrewrite --->
350  *      +------------+--------------+-------+----------------------+
351  *      |////////////|\\\\\\\\\\\\\\|xxxxxxx|        reserve       |
352  *      +------------+--------+-----+-------+----------------------+
353  *                                  <-avail->
354  *                   <------------------ fwd ---------------->
355  *
356  * - the amount of buffer bytes in transit is : min(i, fwd) + o
357  * - some scheduled bytes may be in transit (up to fwd - i)
358  * - the reserve is max(0, maxrewrite - transit)
359  * - the maximum usable buffer length is size - reserve.
360  * - the available space is max_len - i - o
361  *
362  * So the formula to compute the buffer's maximum length to protect the reserve
363  * when reading new data is :
364  *
365  *    max = size - maxrewrite + min(maxrewrite, transit)
366  *        = size - max(maxrewrite - transit, 0)
367  *
368  * But WARNING! The conditions might change during the transfer and it could
369  * very well happen that a buffer would contain more bytes than max_len due to
370  * i+o already walking over the reserve (eg: after a header rewrite), including
371  * i or o alone hitting the limit. So it is critical to always consider that
372  * bounds may have already been crossed and that available space may be negative
373  * for example. Due to this it is perfectly possible for this function to return
374  * a value that is lower than current i+o.
375  */
channel_recv_limit(const struct channel * chn)376 static inline int channel_recv_limit(const struct channel *chn)
377 {
378 	unsigned int transit;
379 	int reserve;
380 
381 	/* return zero if empty */
382 	reserve = chn->buf->size;
383 	if (chn->buf == &buf_empty)
384 		goto end;
385 
386 	/* return size - maxrewrite if we can't send */
387 	reserve = global.tune.maxrewrite;
388 	if (unlikely(!channel_may_send(chn)))
389 		goto end;
390 
391 	/* We need to check what remains of the reserve after o and to_forward
392 	 * have been transmitted, but they can overflow together and they can
393 	 * cause an integer underflow in the comparison since both are unsigned
394 	 * while maxrewrite is signed.
395 	 * The code below has been verified for being a valid check for this :
396 	 *   - if (o + to_forward) overflow => return size  [ large enough ]
397 	 *   - if o + to_forward >= maxrw   => return size  [ large enough ]
398 	 *   - otherwise return size - (maxrw - (o + to_forward))
399 	 */
400 	transit = chn->buf->o + chn->to_forward;
401 	reserve -= transit;
402 	if (transit < chn->to_forward ||                 // addition overflow
403 	    transit >= (unsigned)global.tune.maxrewrite) // enough transit data
404 		return chn->buf->size;
405  end:
406 	return chn->buf->size - reserve;
407 }
408 
409 /* Returns the amount of space available at the input of the buffer, taking the
410  * reserved space into account if ->to_forward indicates that an end of transfer
411  * is close to happen. The test is optimized to avoid as many operations as
412  * possible for the fast case.
413  */
channel_recv_max(const struct channel * chn)414 static inline int channel_recv_max(const struct channel *chn)
415 {
416 	int ret;
417 
418 	ret = channel_recv_limit(chn) - chn->buf->i - chn->buf->o;
419 	if (ret < 0)
420 		ret = 0;
421 	return ret;
422 }
423 
424 /* Allocates a buffer for channel <chn>, but only if it's guaranteed that it's
425  * not the last available buffer or it's the response buffer. Unless the buffer
426  * is the response buffer, an extra control is made so that we always keep
427  * <tune.buffers.reserved> buffers available after this allocation. Returns 0 in
428  * case of failure, non-zero otherwise.
429  *
430  * If no buffer are available, the requester, represented by <wait> pointer,
431  * will be added in the list of objects waiting for an available buffer.
432  */
channel_alloc_buffer(struct channel * chn,struct buffer_wait * wait)433 static inline int channel_alloc_buffer(struct channel *chn, struct buffer_wait *wait)
434 {
435 	int margin = 0;
436 
437 	if (!(chn->flags & CF_ISRESP))
438 		margin = global.tune.reserved_bufs;
439 
440 	if (b_alloc_margin(&chn->buf, margin) != NULL)
441 		return 1;
442 
443 	if (LIST_ISEMPTY(&wait->list)) {
444 		HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
445 		LIST_ADDQ(&buffer_wq, &wait->list);
446 		HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
447 	}
448 
449 	return 0;
450 }
451 
452 /* Releases a possibly allocated buffer for channel <chn>. If it was not
453  * allocated, this function does nothing. Else the buffer is released and we try
454  * to wake up as many streams/applets as possible. */
channel_release_buffer(struct channel * chn,struct buffer_wait * wait)455 static inline void channel_release_buffer(struct channel *chn, struct buffer_wait *wait)
456 {
457 	if (chn->buf->size && buffer_empty(chn->buf)) {
458 		b_free(&chn->buf);
459 		offer_buffers(wait->target, tasks_run_queue + applets_active_queue);
460 	}
461 }
462 
463 /* Truncate any unread data in the channel's buffer, and disable forwarding.
464  * Outgoing data are left intact. This is mainly to be used to send error
465  * messages after existing data.
466  */
channel_truncate(struct channel * chn)467 static inline void channel_truncate(struct channel *chn)
468 {
469 	if (!chn->buf->o)
470 		return channel_erase(chn);
471 
472 	chn->to_forward = 0;
473 	if (!chn->buf->i)
474 		return;
475 
476 	chn->buf->i = 0;
477 }
478 
479 /*
480  * Advance the channel buffer's read pointer by <len> bytes. This is useful
481  * when data have been read directly from the buffer. It is illegal to call
482  * this function with <len> causing a wrapping at the end of the buffer. It's
483  * the caller's responsibility to ensure that <len> is never larger than
484  * chn->o. Channel flag WRITE_PARTIAL is set.
485  */
co_skip(struct channel * chn,int len)486 static inline void co_skip(struct channel *chn, int len)
487 {
488 	chn->buf->o -= len;
489 
490 	if (buffer_empty(chn->buf))
491 		chn->buf->p = chn->buf->data;
492 
493 	/* notify that some data was written to the SI from the buffer */
494 	chn->flags |= CF_WRITE_PARTIAL | CF_WRITE_EVENT;
495 }
496 
497 /* Tries to copy chunk <chunk> into the channel's buffer after length controls.
498  * The chn->o and to_forward pointers are updated. If the channel's input is
499  * closed, -2 is returned. If the block is too large for this buffer, -3 is
500  * returned. If there is not enough room left in the buffer, -1 is returned.
501  * Otherwise the number of bytes copied is returned (0 being a valid number).
502  * Channel flag READ_PARTIAL is updated if some data can be transferred. The
503  * chunk's length is updated with the number of bytes sent.
504  */
ci_putchk(struct channel * chn,struct chunk * chunk)505 static inline int ci_putchk(struct channel *chn, struct chunk *chunk)
506 {
507 	int ret;
508 
509 	ret = ci_putblk(chn, chunk->str, chunk->len);
510 	if (ret > 0)
511 		chunk->len -= ret;
512 	return ret;
513 }
514 
515 /* Tries to copy string <str> at once into the channel's buffer after length
516  * controls.  The chn->o and to_forward pointers are updated. If the channel's
517  * input is closed, -2 is returned. If the block is too large for this buffer,
518  * -3 is returned. If there is not enough room left in the buffer, -1 is
519  * returned.  Otherwise the number of bytes copied is returned (0 being a valid
520  * number).  Channel flag READ_PARTIAL is updated if some data can be
521  * transferred.
522  */
ci_putstr(struct channel * chn,const char * str)523 static inline int ci_putstr(struct channel *chn, const char *str)
524 {
525 	return ci_putblk(chn, str, strlen(str));
526 }
527 
528 /*
529  * Return one char from the channel's buffer. If the buffer is empty and the
530  * channel is closed, return -2. If the buffer is just empty, return -1. The
531  * buffer's pointer is not advanced, it's up to the caller to call co_skip(buf,
532  * 1) when it has consumed the char.  Also note that this function respects the
533  * chn->o limit.
534  */
co_getchr(struct channel * chn)535 static inline int co_getchr(struct channel *chn)
536 {
537 	/* closed or empty + imminent close = -2; empty = -1 */
538 	if (unlikely((chn->flags & CF_SHUTW) || channel_is_empty(chn))) {
539 		if (chn->flags & (CF_SHUTW|CF_SHUTW_NOW))
540 			return -2;
541 		return -1;
542 	}
543 	return *buffer_wrap_sub(chn->buf, chn->buf->p - chn->buf->o);
544 }
545 
546 
547 #endif /* _PROTO_CHANNEL_H */
548 
549 /*
550  * Local variables:
551  *  c-indent-level: 8
552  *  c-basic-offset: 8
553  * End:
554  */
555