1 /*
2  * include/proto/channel.h
3  * Channel management definitions, macros and inline functions.
4  *
5  * Copyright (C) 2000-2014 Willy Tarreau - w@1wt.eu
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation, version 2.1
10  * exclusively.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
20  */
21 
22 #ifndef _PROTO_CHANNEL_H
23 #define _PROTO_CHANNEL_H
24 
25 #include <inttypes.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <string.h>
29 
30 #include <common/config.h>
31 #include <common/chunk.h>
32 #include <common/htx.h>
33 #include <common/ticks.h>
34 #include <common/time.h>
35 
36 #include <types/channel.h>
37 #include <types/global.h>
38 #include <types/stream.h>
39 #include <types/stream_interface.h>
40 
41 #include <proto/stream.h>
42 #include <proto/task.h>
43 
44 /* perform minimal intializations, report 0 in case of error, 1 if OK. */
45 int init_channel();
46 
47 unsigned long long __channel_forward(struct channel *chn, unsigned long long bytes);
48 
49 /* SI-to-channel functions working with buffers */
50 int ci_putblk(struct channel *chn, const char *str, int len);
51 int ci_putchr(struct channel *chn, char c);
52 int ci_getline_nc(const struct channel *chn, char **blk1, size_t *len1, char **blk2, size_t *len2);
53 int ci_getblk_nc(const struct channel *chn, char **blk1, size_t *len1, char **blk2, size_t *len2);
54 int ci_insert_line2(struct channel *c, int pos, const char *str, int len);
55 int co_inject(struct channel *chn, const char *msg, int len);
56 int co_getline(const struct channel *chn, char *str, int len);
57 int co_getblk(const struct channel *chn, char *blk, int len, int offset);
58 int co_getline_nc(const struct channel *chn, const char **blk1, size_t *len1, const char **blk2, size_t *len2);
59 int co_getblk_nc(const struct channel *chn, const char **blk1, size_t *len1, const char **blk2, size_t *len2);
60 
61 
62 /* returns a pointer to the stream the channel belongs to */
chn_strm(const struct channel * chn)63 static inline struct stream *chn_strm(const struct channel *chn)
64 {
65 	if (chn->flags & CF_ISRESP)
66 		return LIST_ELEM(chn, struct stream *, res);
67 	else
68 		return LIST_ELEM(chn, struct stream *, req);
69 }
70 
71 /* returns a pointer to the stream interface feeding the channel (producer) */
chn_prod(const struct channel * chn)72 static inline struct stream_interface *chn_prod(const struct channel *chn)
73 {
74 	if (chn->flags & CF_ISRESP)
75 		return &LIST_ELEM(chn, struct stream *, res)->si[1];
76 	else
77 		return &LIST_ELEM(chn, struct stream *, req)->si[0];
78 }
79 
80 /* returns a pointer to the stream interface consuming the channel (producer) */
chn_cons(const struct channel * chn)81 static inline struct stream_interface *chn_cons(const struct channel *chn)
82 {
83 	if (chn->flags & CF_ISRESP)
84 		return &LIST_ELEM(chn, struct stream *, res)->si[0];
85 	else
86 		return &LIST_ELEM(chn, struct stream *, req)->si[1];
87 }
88 
89 /* c_orig() : returns the pointer to the channel buffer's origin */
c_orig(const struct channel * c)90 static inline char *c_orig(const struct channel *c)
91 {
92 	return b_orig(&c->buf);
93 }
94 
95 /* c_size() : returns the size of the channel's buffer */
c_size(const struct channel * c)96 static inline size_t c_size(const struct channel *c)
97 {
98 	return b_size(&c->buf);
99 }
100 
101 /* c_wrap() : returns the pointer to the channel buffer's wrapping point */
c_wrap(const struct channel * c)102 static inline char *c_wrap(const struct channel *c)
103 {
104 	return b_wrap(&c->buf);
105 }
106 
107 /* c_data() : returns the amount of data in the channel's buffer */
c_data(const struct channel * c)108 static inline size_t c_data(const struct channel *c)
109 {
110 	return b_data(&c->buf);
111 }
112 
113 /* c_room() : returns the room left in the channel's buffer */
c_room(const struct channel * c)114 static inline size_t c_room(const struct channel *c)
115 {
116 	return b_size(&c->buf) - b_data(&c->buf);
117 }
118 
119 /* c_empty() : returns a boolean indicating if the channel's buffer is empty */
c_empty(const struct channel * c)120 static inline size_t c_empty(const struct channel *c)
121 {
122 	return !c_data(c);
123 }
124 
125 /* c_full() : returns a boolean indicating if the channel's buffer is full */
c_full(const struct channel * c)126 static inline size_t c_full(const struct channel *c)
127 {
128 	return !c_room(c);
129 }
130 
131 /* co_data() : returns the amount of output data in the channel's buffer */
co_data(const struct channel * c)132 static inline size_t co_data(const struct channel *c)
133 {
134 	return c->output;
135 }
136 
137 /* ci_data() : returns the amount of input data in the channel's buffer */
ci_data(const struct channel * c)138 static inline size_t ci_data(const struct channel *c)
139 {
140 	return c_data(c) - co_data(c);
141 }
142 
143 /* ci_next() : for an absolute pointer <p> or a relative offset <o> pointing to
144  * a valid location within channel <c>'s buffer, returns either the absolute
145  * pointer or the relative offset pointing to the next byte, which usually is
146  * at (p + 1) unless p reaches the wrapping point and wrapping is needed.
147  */
ci_next_ofs(const struct channel * c,size_t o)148 static inline size_t ci_next_ofs(const struct channel *c, size_t o)
149 {
150 	return b_next_ofs(&c->buf, o);
151 }
ci_next(const struct channel * c,const char * p)152 static inline char *ci_next(const struct channel *c, const char *p)
153 {
154 	return b_next(&c->buf, p);
155 }
156 
157 
158 /* c_ptr() : returns a pointer to an offset relative to the beginning of the
159  * input data in the buffer. If instead the offset is negative, a pointer to
160  * existing output data is returned. The function only takes care of wrapping,
161  * it's up to the caller to ensure the offset is always within byte count
162  * bounds.
163  */
c_ptr(const struct channel * c,ssize_t ofs)164 static inline char *c_ptr(const struct channel *c, ssize_t ofs)
165 {
166 	return b_peek(&c->buf, co_data(c) + ofs);
167 }
168 
169 /* c_adv() : advances the channel's buffer by <adv> bytes, which means that the
170  * buffer's pointer advances, and that as many bytes from in are transferred
171  * from in to out. The caller is responsible for ensuring that adv is always
172  * smaller than or equal to b->i.
173  */
c_adv(struct channel * c,size_t adv)174 static inline void c_adv(struct channel *c, size_t adv)
175 {
176 	c->output += adv;
177 }
178 
179 /* c_rew() : rewinds the channel's buffer by <adv> bytes, which means that the
180  * buffer's pointer goes backwards, and that as many bytes from out are moved
181  * to in. The caller is responsible for ensuring that adv is always smaller
182  * than or equal to b->o.
183  */
c_rew(struct channel * c,size_t adv)184 static inline void c_rew(struct channel *c, size_t adv)
185 {
186 	c->output -= adv;
187 }
188 
189 /* c_realign_if_empty() : realign the channel's buffer if it's empty */
c_realign_if_empty(struct channel * chn)190 static inline void c_realign_if_empty(struct channel *chn)
191 {
192 	b_realign_if_empty(&chn->buf);
193 }
194 
195 /* Sets the amount of output for the channel */
co_set_data(struct channel * c,size_t output)196 static inline void co_set_data(struct channel *c, size_t output)
197 {
198 	c->output = output;
199 }
200 
201 
202 /* co_head() : returns a pointer to the beginning of output data in the buffer.
203  *             The "__" variants don't support wrapping, "ofs" are relative to
204  *             the buffer's origin.
205  */
__co_head_ofs(const struct channel * c)206 static inline size_t __co_head_ofs(const struct channel *c)
207 {
208 	return __b_peek_ofs(&c->buf, 0);
209 }
__co_head(const struct channel * c)210 static inline char *__co_head(const struct channel *c)
211 {
212 	return __b_peek(&c->buf, 0);
213 }
co_head_ofs(const struct channel * c)214 static inline size_t co_head_ofs(const struct channel *c)
215 {
216 	return b_peek_ofs(&c->buf, 0);
217 }
co_head(const struct channel * c)218 static inline char *co_head(const struct channel *c)
219 {
220 	return b_peek(&c->buf, 0);
221 }
222 
223 
224 /* co_tail() : returns a pointer to the end of output data in the buffer.
225  *             The "__" variants don't support wrapping, "ofs" are relative to
226  *             the buffer's origin.
227  */
__co_tail_ofs(const struct channel * c)228 static inline size_t __co_tail_ofs(const struct channel *c)
229 {
230 	return __b_peek_ofs(&c->buf, co_data(c));
231 }
__co_tail(const struct channel * c)232 static inline char *__co_tail(const struct channel *c)
233 {
234 	return __b_peek(&c->buf, co_data(c));
235 }
co_tail_ofs(const struct channel * c)236 static inline size_t co_tail_ofs(const struct channel *c)
237 {
238 	return b_peek_ofs(&c->buf, co_data(c));
239 }
co_tail(const struct channel * c)240 static inline char *co_tail(const struct channel *c)
241 {
242 	return b_peek(&c->buf, co_data(c));
243 }
244 
245 
246 /* ci_head() : returns a pointer to the beginning of input data in the buffer.
247  *             The "__" variants don't support wrapping, "ofs" are relative to
248  *             the buffer's origin.
249  */
__ci_head_ofs(const struct channel * c)250 static inline size_t __ci_head_ofs(const struct channel *c)
251 {
252 	return __b_peek_ofs(&c->buf, co_data(c));
253 }
__ci_head(const struct channel * c)254 static inline char *__ci_head(const struct channel *c)
255 {
256 	return __b_peek(&c->buf, co_data(c));
257 }
ci_head_ofs(const struct channel * c)258 static inline size_t ci_head_ofs(const struct channel *c)
259 {
260 	return b_peek_ofs(&c->buf, co_data(c));
261 }
ci_head(const struct channel * c)262 static inline char *ci_head(const struct channel *c)
263 {
264 	return b_peek(&c->buf, co_data(c));
265 }
266 
267 
268 /* ci_tail() : returns a pointer to the end of input data in the buffer.
269  *             The "__" variants don't support wrapping, "ofs" are relative to
270  *             the buffer's origin.
271  */
__ci_tail_ofs(const struct channel * c)272 static inline size_t __ci_tail_ofs(const struct channel *c)
273 {
274 	return __b_peek_ofs(&c->buf, c_data(c));
275 }
__ci_tail(const struct channel * c)276 static inline char *__ci_tail(const struct channel *c)
277 {
278 	return __b_peek(&c->buf, c_data(c));
279 }
ci_tail_ofs(const struct channel * c)280 static inline size_t ci_tail_ofs(const struct channel *c)
281 {
282 	return b_peek_ofs(&c->buf, c_data(c));
283 }
ci_tail(const struct channel * c)284 static inline char *ci_tail(const struct channel *c)
285 {
286 	return b_peek(&c->buf, c_data(c));
287 }
288 
289 
290 /* ci_stop() : returns the pointer to the byte following the end of input data
291  *             in the channel buffer. It may be out of the buffer. It's used to
292  *             compute lengths or stop pointers.
293  */
__ci_stop_ofs(const struct channel * c)294 static inline size_t __ci_stop_ofs(const struct channel *c)
295 {
296 	return __b_stop_ofs(&c->buf);
297 }
__ci_stop(const struct channel * c)298 static inline const char *__ci_stop(const struct channel *c)
299 {
300 	return __b_stop(&c->buf);
301 }
ci_stop_ofs(const struct channel * c)302 static inline size_t ci_stop_ofs(const struct channel *c)
303 {
304 	return b_stop_ofs(&c->buf);
305 }
ci_stop(const struct channel * c)306 static inline const char *ci_stop(const struct channel *c)
307 {
308 	return b_stop(&c->buf);
309 }
310 
311 
312 /* Returns the amount of input data that can contiguously be read at once */
ci_contig_data(const struct channel * c)313 static inline size_t ci_contig_data(const struct channel *c)
314 {
315 	return b_contig_data(&c->buf, co_data(c));
316 }
317 
318 /* Initialize all fields in the channel. */
channel_init(struct channel * chn)319 static inline void channel_init(struct channel *chn)
320 {
321 	chn->buf = BUF_NULL;
322 	chn->to_forward = 0;
323 	chn->last_read = now_ms;
324 	chn->xfer_small = chn->xfer_large = 0;
325 	chn->total = 0;
326 	chn->pipe = NULL;
327 	chn->analysers = 0;
328 	chn->flags = 0;
329 	chn->output = 0;
330 }
331 
332 /* Schedule up to <bytes> more bytes to be forwarded via the channel without
333  * notifying the owner task. Any data pending in the buffer are scheduled to be
334  * sent as well, in the limit of the number of bytes to forward. This must be
335  * the only method to use to schedule bytes to be forwarded. If the requested
336  * number is too large, it is automatically adjusted. The number of bytes taken
337  * into account is returned. Directly touching ->to_forward will cause lockups
338  * when buf->o goes down to zero if nobody is ready to push the remaining data.
339  */
channel_forward(struct channel * chn,unsigned long long bytes)340 static inline unsigned long long channel_forward(struct channel *chn, unsigned long long bytes)
341 {
342 	/* hint: avoid comparisons on long long for the fast case, since if the
343 	 * length does not fit in an unsigned it, it will never be forwarded at
344 	 * once anyway.
345 	 */
346 	if (bytes <= ~0U) {
347 		unsigned int bytes32 = bytes;
348 
349 		if (bytes32 <= ci_data(chn)) {
350 			/* OK this amount of bytes might be forwarded at once */
351 			c_adv(chn, bytes32);
352 			return bytes;
353 		}
354 	}
355 	return __channel_forward(chn, bytes);
356 }
357 
358 /* Forwards any input data and marks the channel for permanent forwarding */
channel_forward_forever(struct channel * chn)359 static inline void channel_forward_forever(struct channel *chn)
360 {
361 	c_adv(chn, ci_data(chn));
362 	chn->to_forward = CHN_INFINITE_FORWARD;
363 }
364 
365 /* <len> bytes of input data was added into the channel <chn>. This functions
366  * must be called to update the channel state. It also handles the fast
367  * forwarding. */
channel_add_input(struct channel * chn,unsigned int len)368 static inline void channel_add_input(struct channel *chn, unsigned int len)
369 {
370 	if (chn->to_forward) {
371 		unsigned long fwd = len;
372 		if (chn->to_forward != CHN_INFINITE_FORWARD) {
373 			if (fwd > chn->to_forward)
374 				fwd = chn->to_forward;
375 			chn->to_forward -= fwd;
376 		}
377 		c_adv(chn, fwd);
378 	}
379 	/* notify that some data was read */
380 	chn->total += len;
381 	chn->flags |= CF_READ_PARTIAL;
382 }
383 
channel_htx_forward(struct channel * chn,struct htx * htx,unsigned long long bytes)384 static inline unsigned long long channel_htx_forward(struct channel *chn, struct htx *htx, unsigned long long bytes)
385 {
386 	unsigned long long ret = 0;
387 
388 	if (htx->data) {
389 		b_set_data(&chn->buf, htx->data);
390 		ret = channel_forward(chn, bytes);
391 		b_set_data(&chn->buf, b_size(&chn->buf));
392 	}
393 	return ret;
394 }
395 
396 
channel_htx_forward_forever(struct channel * chn,struct htx * htx)397 static inline void channel_htx_forward_forever(struct channel *chn, struct htx *htx)
398 {
399 	c_adv(chn, htx->data - co_data(chn));
400 	chn->to_forward = CHN_INFINITE_FORWARD;
401 }
402 /*********************************************************************/
403 /* These functions are used to compute various channel content sizes */
404 /*********************************************************************/
405 
406 /* Reports non-zero if the channel is empty, which means both its
407  * buffer and pipe are empty. The construct looks strange but is
408  * jump-less and much more efficient on both 32 and 64-bit than
409  * the boolean test.
410  */
channel_is_empty(const struct channel * c)411 static inline unsigned int channel_is_empty(const struct channel *c)
412 {
413 	return !(co_data(c) | (long)c->pipe);
414 }
415 
416 /* Returns non-zero if the channel is rewritable, which means that the buffer
417  * it is attached to has at least <maxrewrite> bytes immediately available.
418  * This is used to decide when a request or response may be parsed when some
419  * data from a previous exchange might still be present.
420  */
channel_is_rewritable(const struct channel * chn)421 static inline int channel_is_rewritable(const struct channel *chn)
422 {
423 	int rem = chn->buf.size;
424 
425 	rem -= b_data(&chn->buf);
426 	rem -= global.tune.maxrewrite;
427 	return rem >= 0;
428 }
429 
430 /* Tells whether data are likely to leave the buffer. This is used to know when
431  * we can safely ignore the reserve since we know we cannot retry a connection.
432  * It returns zero if data are blocked, non-zero otherwise.
433  */
channel_may_send(const struct channel * chn)434 static inline int channel_may_send(const struct channel *chn)
435 {
436 	return chn_cons(chn)->state == SI_ST_EST;
437 }
438 
439 /* Returns non-zero if the channel can still receive data. This is used to
440  * decide when to stop reading into a buffer when we want to ensure that we
441  * leave the reserve untouched after all pending outgoing data are forwarded.
442  * The reserved space is taken into account if ->to_forward indicates that an
443  * end of transfer is close to happen. Note that both ->buf.o and ->to_forward
444  * are considered as available since they're supposed to leave the buffer. The
445  * test is optimized to avoid as many operations as possible for the fast case
446  * and to be used as an "if" condition. Just like channel_recv_limit(), we
447  * never allow to overwrite the reserve until the output stream interface is
448  * connected, otherwise we could spin on a POST with http-send-name-header.
449  */
channel_may_recv(const struct channel * chn)450 static inline int channel_may_recv(const struct channel *chn)
451 {
452 	int rem = chn->buf.size;
453 
454 	if (b_is_null(&chn->buf))
455 		return 1;
456 
457 	rem -= b_data(&chn->buf);
458 	if (!rem)
459 		return 0; /* buffer already full */
460 
461 	if (rem > global.tune.maxrewrite)
462 		return 1; /* reserve not yet reached */
463 
464 	if (!channel_may_send(chn))
465 		return 0; /* don't touch reserve until we can send */
466 
467 	/* Now we know there's some room left in the reserve and we may
468 	 * forward. As long as i-to_fwd < size-maxrw, we may still
469 	 * receive. This is equivalent to i+maxrw-size < to_fwd,
470 	 * which is logical since i+maxrw-size is what overlaps with
471 	 * the reserve, and we want to ensure they're covered by scheduled
472 	 * forwards.
473 	 */
474 	rem = ci_data(chn) + global.tune.maxrewrite - chn->buf.size;
475 	return rem < 0 || (unsigned int)rem < chn->to_forward;
476 }
477 
478 /* HTX version of channel_may_recv(). Returns non-zero if the channel can still
479  * receive data. */
channel_htx_may_recv(const struct channel * chn,const struct htx * htx)480 static inline int channel_htx_may_recv(const struct channel *chn, const struct htx *htx)
481 {
482 	uint32_t rem;
483 
484 	if (!htx->size)
485 		return 1;
486 
487 	if (!channel_may_send(chn))
488 		return 0; /* don't touch reserve until we can send */
489 
490 	rem = htx_free_data_space(htx);
491 	if (!rem)
492 		return 0; /* htx already full */
493 
494 	if (rem > global.tune.maxrewrite)
495 		return 1; /* reserve not yet reached */
496 
497 	/* Now we know there's some room left in the reserve and we may
498 	 * forward. As long as i-to_fwd < size-maxrw, we may still
499 	 * receive. This is equivalent to i+maxrw-size < to_fwd,
500 	 * which is logical since i+maxrw-size is what overlaps with
501 	 * the reserve, and we want to ensure they're covered by scheduled
502 	 * forwards.
503 	 */
504 	rem += co_data(chn);
505 	if (rem > global.tune.maxrewrite)
506 		return 1;
507 
508 	return (global.tune.maxrewrite - rem < chn->to_forward);
509 }
510 
511 /* Returns true if the channel's input is already closed */
channel_input_closed(struct channel * chn)512 static inline int channel_input_closed(struct channel *chn)
513 {
514 	return ((chn->flags & CF_SHUTR) != 0);
515 }
516 
517 /* Returns true if the channel's output is already closed */
channel_output_closed(struct channel * chn)518 static inline int channel_output_closed(struct channel *chn)
519 {
520 	return ((chn->flags & CF_SHUTW) != 0);
521 }
522 
523 /* Check channel timeouts, and set the corresponding flags. The likely/unlikely
524  * have been optimized for fastest normal path. The read/write timeouts are not
525  * set if there was activity on the channel. That way, we don't have to update
526  * the timeout on every I/O. Note that the analyser timeout is always checked.
527  */
channel_check_timeouts(struct channel * chn)528 static inline void channel_check_timeouts(struct channel *chn)
529 {
530 	if (likely(!(chn->flags & (CF_SHUTR|CF_READ_TIMEOUT|CF_READ_ACTIVITY|CF_READ_NOEXP))) &&
531 	    unlikely(tick_is_expired(chn->rex, now_ms)))
532 		chn->flags |= CF_READ_TIMEOUT;
533 
534 	if (likely(!(chn->flags & (CF_SHUTW|CF_WRITE_TIMEOUT|CF_WRITE_ACTIVITY))) &&
535 	    unlikely(tick_is_expired(chn->wex, now_ms)))
536 		chn->flags |= CF_WRITE_TIMEOUT;
537 
538 	if (likely(!(chn->flags & CF_ANA_TIMEOUT)) &&
539 	    unlikely(tick_is_expired(chn->analyse_exp, now_ms)))
540 		chn->flags |= CF_ANA_TIMEOUT;
541 }
542 
543 /* Erase any content from channel <buf> and adjusts flags accordingly. Note
544  * that any spliced data is not affected since we may not have any access to
545  * it.
546  */
channel_erase(struct channel * chn)547 static inline void channel_erase(struct channel *chn)
548 {
549 	chn->to_forward = 0;
550 	chn->output = 0;
551 	b_reset(&chn->buf);
552 }
553 
channel_htx_erase(struct channel * chn,struct htx * htx)554 static inline void channel_htx_erase(struct channel *chn, struct htx *htx)
555 {
556 	htx_reset(htx);
557 	channel_erase(chn);
558 }
559 
560 /* marks the channel as "shutdown" ASAP for reads */
channel_shutr_now(struct channel * chn)561 static inline void channel_shutr_now(struct channel *chn)
562 {
563 	chn->flags |= CF_SHUTR_NOW;
564 }
565 
566 /* marks the channel as "shutdown" ASAP for writes */
channel_shutw_now(struct channel * chn)567 static inline void channel_shutw_now(struct channel *chn)
568 {
569 	chn->flags |= CF_SHUTW_NOW;
570 }
571 
572 /* marks the channel as "shutdown" ASAP in both directions */
channel_abort(struct channel * chn)573 static inline void channel_abort(struct channel *chn)
574 {
575 	chn->flags |= CF_SHUTR_NOW | CF_SHUTW_NOW;
576 	chn->flags &= ~CF_AUTO_CONNECT;
577 }
578 
579 /* allow the consumer to try to establish a new connection. */
channel_auto_connect(struct channel * chn)580 static inline void channel_auto_connect(struct channel *chn)
581 {
582 	chn->flags |= CF_AUTO_CONNECT;
583 }
584 
585 /* prevent the consumer from trying to establish a new connection, and also
586  * disable auto shutdown forwarding.
587  */
channel_dont_connect(struct channel * chn)588 static inline void channel_dont_connect(struct channel *chn)
589 {
590 	chn->flags &= ~(CF_AUTO_CONNECT|CF_AUTO_CLOSE);
591 }
592 
593 /* allow the producer to forward shutdown requests */
channel_auto_close(struct channel * chn)594 static inline void channel_auto_close(struct channel *chn)
595 {
596 	chn->flags |= CF_AUTO_CLOSE;
597 }
598 
599 /* prevent the producer from forwarding shutdown requests */
channel_dont_close(struct channel * chn)600 static inline void channel_dont_close(struct channel *chn)
601 {
602 	chn->flags &= ~CF_AUTO_CLOSE;
603 }
604 
605 /* allow the producer to read / poll the input */
channel_auto_read(struct channel * chn)606 static inline void channel_auto_read(struct channel *chn)
607 {
608 	chn->flags &= ~CF_DONT_READ;
609 }
610 
611 /* prevent the producer from read / poll the input */
channel_dont_read(struct channel * chn)612 static inline void channel_dont_read(struct channel *chn)
613 {
614 	chn->flags |= CF_DONT_READ;
615 }
616 
617 
618 /*************************************************/
619 /* Buffer operations in the context of a channel */
620 /*************************************************/
621 
622 
623 /* Return the max number of bytes the buffer can contain so that once all the
624  * pending bytes are forwarded, the buffer still has global.tune.maxrewrite
625  * bytes free. The result sits between chn->size - maxrewrite and chn->size.
626  * It is important to mention that if buf->i is already larger than size-maxrw
627  * the condition above cannot be satisfied and the lowest size will be returned
628  * anyway. The principles are the following :
629  *   0) the empty buffer has a limit of zero
630  *   1) a non-connected buffer cannot touch the reserve
631  *   2) infinite forward can always fill the buffer since all data will leave
632  *   3) all output bytes are considered in transit since they're leaving
633  *   4) all input bytes covered by to_forward are considered in transit since
634  *      they'll be converted to output bytes.
635  *   5) all input bytes not covered by to_forward as considered remaining
636  *   6) all bytes scheduled to be forwarded minus what is already in the input
637  *      buffer will be in transit during future rounds.
638  *   7) 4+5+6 imply that the amount of input bytes (i) is irrelevant to the max
639  *      usable length, only to_forward and output count. The difference is
640  *      visible when to_forward > i.
641  *   8) the reserve may be covered up to the amount of bytes in transit since
642  *      these bytes will only take temporary space.
643  *
644  * A typical buffer looks like this :
645  *
646  *      <-------------- max_len ----------->
647  *      <---- o ----><----- i ----->        <--- 0..maxrewrite --->
648  *      +------------+--------------+-------+----------------------+
649  *      |////////////|\\\\\\\\\\\\\\|xxxxxxx|        reserve       |
650  *      +------------+--------+-----+-------+----------------------+
651  *                   <- fwd ->      <-avail->
652  *
653  * Or when to_forward > i :
654  *
655  *      <-------------- max_len ----------->
656  *      <---- o ----><----- i ----->        <--- 0..maxrewrite --->
657  *      +------------+--------------+-------+----------------------+
658  *      |////////////|\\\\\\\\\\\\\\|xxxxxxx|        reserve       |
659  *      +------------+--------+-----+-------+----------------------+
660  *                                  <-avail->
661  *                   <------------------ fwd ---------------->
662  *
663  * - the amount of buffer bytes in transit is : min(i, fwd) + o
664  * - some scheduled bytes may be in transit (up to fwd - i)
665  * - the reserve is max(0, maxrewrite - transit)
666  * - the maximum usable buffer length is size - reserve.
667  * - the available space is max_len - i - o
668  *
669  * So the formula to compute the buffer's maximum length to protect the reserve
670  * when reading new data is :
671  *
672  *    max = size - maxrewrite + min(maxrewrite, transit)
673  *        = size - max(maxrewrite - transit, 0)
674  *
675  * But WARNING! The conditions might change during the transfer and it could
676  * very well happen that a buffer would contain more bytes than max_len due to
677  * i+o already walking over the reserve (eg: after a header rewrite), including
678  * i or o alone hitting the limit. So it is critical to always consider that
679  * bounds may have already been crossed and that available space may be negative
680  * for example. Due to this it is perfectly possible for this function to return
681  * a value that is lower than current i+o.
682  */
channel_recv_limit(const struct channel * chn)683 static inline int channel_recv_limit(const struct channel *chn)
684 {
685 	unsigned int transit;
686 	int reserve;
687 
688 	/* return zero if empty */
689 	reserve = chn->buf.size;
690 	if (b_is_null(&chn->buf))
691 		goto end;
692 
693 	/* return size - maxrewrite if we can't send */
694 	reserve = global.tune.maxrewrite;
695 	if (unlikely(!channel_may_send(chn)))
696 		goto end;
697 
698 	/* We need to check what remains of the reserve after o and to_forward
699 	 * have been transmitted, but they can overflow together and they can
700 	 * cause an integer underflow in the comparison since both are unsigned
701 	 * while maxrewrite is signed.
702 	 * The code below has been verified for being a valid check for this :
703 	 *   - if (o + to_forward) overflow => return size  [ large enough ]
704 	 *   - if o + to_forward >= maxrw   => return size  [ large enough ]
705 	 *   - otherwise return size - (maxrw - (o + to_forward))
706 	 */
707 	transit = co_data(chn) + chn->to_forward;
708 	reserve -= transit;
709 	if (transit < chn->to_forward ||                 // addition overflow
710 	    transit >= (unsigned)global.tune.maxrewrite) // enough transit data
711 		return chn->buf.size;
712  end:
713 	return chn->buf.size - reserve;
714 }
715 
716 /* HTX version of channel_recv_limit(). Return the max number of bytes the HTX
717  * buffer can contain so that once all the pending bytes are forwarded, the
718  * buffer still has global.tune.maxrewrite bytes free.
719  */
channel_htx_recv_limit(const struct channel * chn,const struct htx * htx)720 static inline int channel_htx_recv_limit(const struct channel *chn, const struct htx *htx)
721 {
722 	unsigned int transit;
723 	int reserve;
724 
725 	/* return zeor if not allocated */
726 	if (!htx->size)
727 		return 0;
728 
729 	/* return max_data_space - maxrewrite if we can't send */
730 	reserve = global.tune.maxrewrite;
731 	if (unlikely(!channel_may_send(chn)))
732 		goto end;
733 
734 	/* We need to check what remains of the reserve after o and to_forward
735 	 * have been transmitted, but they can overflow together and they can
736 	 * cause an integer underflow in the comparison since both are unsigned
737 	 * while maxrewrite is signed.
738 	 * The code below has been verified for being a valid check for this :
739 	 *   - if (o + to_forward) overflow => return htx->size  [ large enough ]
740 	 *   - if o + to_forward >= maxrw   => return htx->size  [ large enough ]
741 	 *   - otherwise return htx->size - (maxrw - (o + to_forward))
742 	 */
743 	transit = co_data(chn) + chn->to_forward;
744 	reserve -= transit;
745 	if (transit < chn->to_forward ||                 // addition overflow
746 	    transit >= (unsigned)global.tune.maxrewrite) // enough transit data
747 		return htx->size;
748  end:
749 	return (htx->size - reserve);
750 }
751 
752 /* HTX version of channel_full(). Instead of checking if INPUT data exceeds
753  * (size - reserve), this function checks if the free space for data in <htx>
754  * and the data scheduled for output are lower to the reserve. In such case, the
755  * channel is considered as full.
756  */
channel_htx_full(const struct channel * c,const struct htx * htx,unsigned int reserve)757 static inline int channel_htx_full(const struct channel *c, const struct htx *htx,
758 				   unsigned int reserve)
759 {
760 	if (!htx->size)
761 		return 0;
762 	return (htx_free_data_space(htx) + co_data(c) <= reserve);
763 }
764 
765 /* Returns non-zero if the channel's INPUT buffer's is considered full, which
766  * means that it holds at least as much INPUT data as (size - reserve). This
767  * also means that data that are scheduled for output are considered as potential
768  * free space, and that the reserved space is always considered as not usable.
769  * This information alone cannot be used as a general purpose free space indicator.
770  * However it accurately indicates that too many data were fed in the buffer
771  * for an analyzer for instance. See the channel_may_recv() function for a more
772  * generic function taking everything into account.
773  */
channel_full(const struct channel * c,unsigned int reserve)774 static inline int channel_full(const struct channel *c, unsigned int reserve)
775 {
776 	if (b_is_null(&c->buf))
777 		return 0;
778 
779 	if (IS_HTX_STRM(chn_strm(c)))
780 		return channel_htx_full(c, htxbuf(&c->buf), reserve);
781 
782 	return (ci_data(c) + reserve >= c_size(c));
783 }
784 
785 /* HTX version of channel_recv_max(). */
channel_htx_recv_max(const struct channel * chn,const struct htx * htx)786 static inline int channel_htx_recv_max(const struct channel *chn, const struct htx *htx)
787 {
788 	int ret;
789 
790 	ret = channel_htx_recv_limit(chn, htx) - htx_used_space(htx);
791 	if (ret < 0)
792 		ret = 0;
793 	return ret;
794 }
795 
796 /* Returns the amount of space available at the input of the buffer, taking the
797  * reserved space into account if ->to_forward indicates that an end of transfer
798  * is close to happen. The test is optimized to avoid as many operations as
799  * possible for the fast case.
800  */
channel_recv_max(const struct channel * chn)801 static inline int channel_recv_max(const struct channel *chn)
802 {
803 	int ret;
804 
805 	if (IS_HTX_STRM(chn_strm(chn)))
806 		return channel_htx_recv_max(chn, htxbuf(&chn->buf));
807 
808 	ret = channel_recv_limit(chn) - b_data(&chn->buf);
809 	if (ret < 0)
810 		ret = 0;
811 	return ret;
812 }
813 
814 /* Returns the amount of bytes that can be written over the input data at once,
815  * including reserved space which may be overwritten. This is used by Lua to
816  * insert data in the input side just before the other data using buffer_replace().
817  * The goal is to transfer these new data in the output buffer.
818  */
ci_space_for_replace(const struct channel * chn)819 static inline int ci_space_for_replace(const struct channel *chn)
820 {
821 	const struct buffer *buf = &chn->buf;
822 	const char *end;
823 
824 	/* If the input side data overflows, we cannot insert data contiguously. */
825 	if (b_head(buf) + b_data(buf) >= b_wrap(buf))
826 		return 0;
827 
828 	/* Check the last byte used in the buffer, it may be a byte of the output
829 	 * side if the buffer wraps, or its the end of the buffer.
830 	 */
831 	end = b_head(buf);
832 	if (end <= ci_head(chn))
833 		end = b_wrap(buf);
834 
835 	/* Compute the amount of bytes which can be written. */
836 	return end - ci_tail(chn);
837 }
838 
839 /* Allocates a buffer for channel <chn>, but only if it's guaranteed that it's
840  * not the last available buffer or it's the response buffer. Unless the buffer
841  * is the response buffer, an extra control is made so that we always keep
842  * <tune.buffers.reserved> buffers available after this allocation. Returns 0 in
843  * case of failure, non-zero otherwise.
844  *
845  * If no buffer are available, the requester, represented by <wait> pointer,
846  * will be added in the list of objects waiting for an available buffer.
847  */
channel_alloc_buffer(struct channel * chn,struct buffer_wait * wait)848 static inline int channel_alloc_buffer(struct channel *chn, struct buffer_wait *wait)
849 {
850 	int margin = 0;
851 
852 	if (!(chn->flags & CF_ISRESP))
853 		margin = global.tune.reserved_bufs;
854 
855 	if (b_alloc_margin(&chn->buf, margin) != NULL)
856 		return 1;
857 
858 	if (LIST_ISEMPTY(&wait->list)) {
859 		HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
860 		LIST_ADDQ(&buffer_wq, &wait->list);
861 		HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
862 	}
863 
864 	return 0;
865 }
866 
867 /* Releases a possibly allocated buffer for channel <chn>. If it was not
868  * allocated, this function does nothing. Else the buffer is released and we try
869  * to wake up as many streams/applets as possible. */
channel_release_buffer(struct channel * chn,struct buffer_wait * wait)870 static inline void channel_release_buffer(struct channel *chn, struct buffer_wait *wait)
871 {
872 	if (c_size(chn) && c_empty(chn)) {
873 		b_free(&chn->buf);
874 		offer_buffers(wait->target, tasks_run_queue);
875 	}
876 }
877 
878 /* Truncate any unread data in the channel's buffer, and disable forwarding.
879  * Outgoing data are left intact. This is mainly to be used to send error
880  * messages after existing data.
881  */
channel_truncate(struct channel * chn)882 static inline void channel_truncate(struct channel *chn)
883 {
884 	if (!co_data(chn))
885 		return channel_erase(chn);
886 
887 	chn->to_forward = 0;
888 	if (!ci_data(chn))
889 		return;
890 
891 	chn->buf.data = co_data(chn);
892 }
893 
channel_htx_truncate(struct channel * chn,struct htx * htx)894 static inline void channel_htx_truncate(struct channel *chn, struct htx *htx)
895 {
896 	if (!co_data(chn))
897 		return channel_htx_erase(chn, htx);
898 
899 	chn->to_forward = 0;
900 	if (htx->data == co_data(chn))
901 		return;
902 	htx_truncate(htx, co_data(chn));
903 }
904 
905 /* This function realigns a possibly wrapping channel buffer so that the input
906  * part is contiguous and starts at the beginning of the buffer and the output
907  * part ends at the end of the buffer. This provides the best conditions since
908  * it allows the largest inputs to be processed at once and ensures that once
909  * the output data leaves, the whole buffer is available at once.
910  */
channel_slow_realign(struct channel * chn,char * swap)911 static inline void channel_slow_realign(struct channel *chn, char *swap)
912 {
913 	return b_slow_realign(&chn->buf, swap, co_data(chn));
914 }
915 
916 
917 /* Forward all headers of an HTX message, starting from the SL to the EOH. This
918  * function returns the position of the block after the EOH, if
919  * found. Otherwise, it returns -1.
920  */
channel_htx_fwd_headers(struct channel * chn,struct htx * htx)921 static inline int32_t channel_htx_fwd_headers(struct channel *chn, struct htx *htx)
922 {
923 	int32_t pos;
924 	size_t  data = 0;
925 
926 	for (pos = htx_get_first(htx); pos != -1; pos = htx_get_next(htx, pos)) {
927 		struct htx_blk *blk = htx_get_blk(htx, pos);
928 		data += htx_get_blksz(blk);
929 		if (htx_get_blk_type(blk) == HTX_BLK_EOH) {
930 			pos = htx_get_next(htx, pos);
931 			break;
932 		}
933 	}
934 	c_adv(chn, data);
935 	return pos;
936 }
937 
938 /* Copy an HTX message stored in the buffer <msg> to the channel's one. We
939  * take care to not overwrite existing data in the channel. All the message is
940  * copied or nothing. It returns 1 on success and 0 on error.
941  */
channel_htx_copy_msg(struct channel * chn,struct htx * htx,const struct buffer * msg)942 static inline int channel_htx_copy_msg(struct channel *chn, struct htx *htx, const struct buffer *msg)
943 {
944 	/* The channel buffer is empty, we can do a raw copy */
945 	if (c_empty(chn)) {
946 		chn->buf.data = msg->data;
947 		memcpy(chn->buf.area, msg->area, msg->data);
948 		return 1;
949 	}
950 
951 	/* Otherwise, we need to append the HTX message */
952 	return htx_append_msg(htx, htxbuf(msg));
953 }
954 /*
955  * Advance the channel buffer's read pointer by <len> bytes. This is useful
956  * when data have been read directly from the buffer. It is illegal to call
957  * this function with <len> causing a wrapping at the end of the buffer. It's
958  * the caller's responsibility to ensure that <len> is never larger than
959  * chn->o. Channel flags WRITE_PARTIAL and WROTE_DATA are set.
960  */
co_skip(struct channel * chn,int len)961 static inline void co_skip(struct channel *chn, int len)
962 {
963 	b_del(&chn->buf, len);
964 	chn->output -= len;
965 	c_realign_if_empty(chn);
966 
967 	/* notify that some data was written to the SI from the buffer */
968 	chn->flags |= CF_WRITE_PARTIAL | CF_WROTE_DATA;
969 	chn_prod(chn)->flags &= ~SI_FL_RXBLK_ROOM; // si_rx_room_rdy()
970 }
971 
972 /* HTX version of co_skip(). This function skips at most <len> bytes from the
973  * output of the channel <chn>. Depending on how data are stored in <htx> less
974  * than <len> bytes can be skipped. Channel flags WRITE_PARTIAL and WROTE_DATA
975  * are set.
976  */
co_htx_skip(struct channel * chn,struct htx * htx,int len)977 static inline void co_htx_skip(struct channel *chn, struct htx *htx, int len)
978 {
979 	struct htx_ret htxret;
980 
981 	htxret = htx_drain(htx, len);
982 	if (htxret.ret) {
983 		chn->output -= htxret.ret;
984 
985 		/* notify that some data was written to the SI from the buffer */
986 		chn->flags |= CF_WRITE_PARTIAL | CF_WROTE_DATA;
987 		chn_prod(chn)->flags &= ~SI_FL_RXBLK_ROOM; // si_rx_room_rdy()
988 	}
989 }
990 
991 /* Tries to copy chunk <chunk> into the channel's buffer after length controls.
992  * The chn->o and to_forward pointers are updated. If the channel's input is
993  * closed, -2 is returned. If the block is too large for this buffer, -3 is
994  * returned. If there is not enough room left in the buffer, -1 is returned.
995  * Otherwise the number of bytes copied is returned (0 being a valid number).
996  * Channel flag READ_PARTIAL is updated if some data can be transferred. The
997  * chunk's length is updated with the number of bytes sent.
998  */
ci_putchk(struct channel * chn,struct buffer * chunk)999 static inline int ci_putchk(struct channel *chn, struct buffer *chunk)
1000 {
1001 	int ret;
1002 
1003 	ret = ci_putblk(chn, chunk->area, chunk->data);
1004 	if (ret > 0)
1005 		chunk->data -= ret;
1006 	return ret;
1007 }
1008 
1009 /* Tries to copy string <str> at once into the channel's buffer after length
1010  * controls.  The chn->o and to_forward pointers are updated. If the channel's
1011  * input is closed, -2 is returned. If the block is too large for this buffer,
1012  * -3 is returned. If there is not enough room left in the buffer, -1 is
1013  * returned.  Otherwise the number of bytes copied is returned (0 being a valid
1014  * number).  Channel flag READ_PARTIAL is updated if some data can be
1015  * transferred.
1016  */
ci_putstr(struct channel * chn,const char * str)1017 static inline int ci_putstr(struct channel *chn, const char *str)
1018 {
1019 	return ci_putblk(chn, str, strlen(str));
1020 }
1021 
1022 /*
1023  * Return one char from the channel's buffer. If the buffer is empty and the
1024  * channel is closed, return -2. If the buffer is just empty, return -1. The
1025  * buffer's pointer is not advanced, it's up to the caller to call co_skip(buf,
1026  * 1) when it has consumed the char.  Also note that this function respects the
1027  * chn->o limit.
1028  */
co_getchr(struct channel * chn)1029 static inline int co_getchr(struct channel *chn)
1030 {
1031 	/* closed or empty + imminent close = -2; empty = -1 */
1032 	if (unlikely((chn->flags & CF_SHUTW) || channel_is_empty(chn))) {
1033 		if (chn->flags & (CF_SHUTW|CF_SHUTW_NOW))
1034 			return -2;
1035 		return -1;
1036 	}
1037 	return *co_head(chn);
1038 }
1039 
1040 
1041 #endif /* _PROTO_CHANNEL_H */
1042 
1043 /*
1044  * Local variables:
1045  *  c-indent-level: 8
1046  *  c-basic-offset: 8
1047  * End:
1048  */
1049