1 /*
2  * Functions managing stream_interface structures
3  *
4  * Copyright 2000-2012 Willy Tarreau <w@1wt.eu>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  *
11  */
12 
13 #include <errno.h>
14 #include <fcntl.h>
15 #include <stdio.h>
16 #include <stdlib.h>
17 
18 #include <sys/socket.h>
19 #include <sys/stat.h>
20 #include <sys/types.h>
21 
22 #include <haproxy/api.h>
23 #include <haproxy/applet.h>
24 #include <haproxy/channel.h>
25 #include <haproxy/connection.h>
26 #include <haproxy/dynbuf.h>
27 #include <haproxy/http_htx.h>
28 #include <haproxy/pipe-t.h>
29 #include <haproxy/pipe.h>
30 #include <haproxy/proxy.h>
31 #include <haproxy/stream-t.h>
32 #include <haproxy/stream_interface.h>
33 #include <haproxy/task.h>
34 #include <haproxy/ticks.h>
35 #include <haproxy/time.h>
36 #include <haproxy/tools.h>
37 
38 
39 /* functions used by default on a detached stream-interface */
40 static void stream_int_shutr(struct stream_interface *si);
41 static void stream_int_shutw(struct stream_interface *si);
42 static void stream_int_chk_rcv(struct stream_interface *si);
43 static void stream_int_chk_snd(struct stream_interface *si);
44 
45 /* functions used on a conn_stream-based stream-interface */
46 static void stream_int_shutr_conn(struct stream_interface *si);
47 static void stream_int_shutw_conn(struct stream_interface *si);
48 static void stream_int_chk_rcv_conn(struct stream_interface *si);
49 static void stream_int_chk_snd_conn(struct stream_interface *si);
50 
51 /* functions used on an applet-based stream-interface */
52 static void stream_int_shutr_applet(struct stream_interface *si);
53 static void stream_int_shutw_applet(struct stream_interface *si);
54 static void stream_int_chk_rcv_applet(struct stream_interface *si);
55 static void stream_int_chk_snd_applet(struct stream_interface *si);
56 
57 /* last read notification */
58 static void stream_int_read0(struct stream_interface *si);
59 
60 /* post-IO notification callback */
61 static void stream_int_notify(struct stream_interface *si);
62 
63 /* stream-interface operations for embedded tasks */
64 struct si_ops si_embedded_ops = {
65 	.chk_rcv = stream_int_chk_rcv,
66 	.chk_snd = stream_int_chk_snd,
67 	.shutr   = stream_int_shutr,
68 	.shutw   = stream_int_shutw,
69 };
70 
71 /* stream-interface operations for connections */
72 struct si_ops si_conn_ops = {
73 	.chk_rcv = stream_int_chk_rcv_conn,
74 	.chk_snd = stream_int_chk_snd_conn,
75 	.shutr   = stream_int_shutr_conn,
76 	.shutw   = stream_int_shutw_conn,
77 };
78 
79 /* stream-interface operations for connections */
80 struct si_ops si_applet_ops = {
81 	.chk_rcv = stream_int_chk_rcv_applet,
82 	.chk_snd = stream_int_chk_snd_applet,
83 	.shutr   = stream_int_shutr_applet,
84 	.shutw   = stream_int_shutw_applet,
85 };
86 
87 
88 /* Functions used to communicate with a conn_stream. The first two may be used
89  * directly, the last one is mostly a wake callback.
90  */
91 int si_cs_recv(struct conn_stream *cs);
92 int si_cs_send(struct conn_stream *cs);
93 static int si_cs_process(struct conn_stream *cs);
94 
95 
96 struct data_cb si_conn_cb = {
97 	.wake    = si_cs_process,
98 	.name    = "STRM",
99 };
100 
101 /*
102  * This function only has to be called once after a wakeup event in case of
103  * suspected timeout. It controls the stream interface timeouts and sets
104  * si->flags accordingly. It does NOT close anything, as this timeout may
105  * be used for any purpose. It returns 1 if the timeout fired, otherwise
106  * zero.
107  */
si_check_timeouts(struct stream_interface * si)108 int si_check_timeouts(struct stream_interface *si)
109 {
110 	if (tick_is_expired(si->exp, now_ms)) {
111 		si->flags |= SI_FL_EXP;
112 		return 1;
113 	}
114 	return 0;
115 }
116 
117 /* to be called only when in SI_ST_DIS with SI_FL_ERR */
si_report_error(struct stream_interface * si)118 void si_report_error(struct stream_interface *si)
119 {
120 	if (!si->err_type)
121 		si->err_type = SI_ET_DATA_ERR;
122 
123 	si_oc(si)->flags |= CF_WRITE_ERROR;
124 	si_ic(si)->flags |= CF_READ_ERROR;
125 }
126 
127 /*
128  * Returns a message to the client ; the connection is shut down for read,
129  * and the request is cleared so that no server connection can be initiated.
130  * The buffer is marked for read shutdown on the other side to protect the
131  * message, and the buffer write is enabled. The message is contained in a
132  * "chunk". If it is null, then an empty message is used. The reply buffer does
133  * not need to be empty before this, and its contents will not be overwritten.
134  * The primary goal of this function is to return error messages to a client.
135  */
si_retnclose(struct stream_interface * si,const struct buffer * msg)136 void si_retnclose(struct stream_interface *si,
137 			  const struct buffer *msg)
138 {
139 	struct channel *ic = si_ic(si);
140 	struct channel *oc = si_oc(si);
141 
142 	channel_auto_read(ic);
143 	channel_abort(ic);
144 	channel_auto_close(ic);
145 	channel_erase(ic);
146 	channel_truncate(oc);
147 
148 	if (likely(msg && msg->data))
149 		co_inject(oc, msg->area, msg->data);
150 
151 	oc->wex = tick_add_ifset(now_ms, oc->wto);
152 	channel_auto_read(oc);
153 	channel_auto_close(oc);
154 	channel_shutr_now(oc);
155 }
156 
157 /*
158  * This function performs a shutdown-read on a detached stream interface in a
159  * connected or init state (it does nothing for other states). It either shuts
160  * the read side or marks itself as closed. The buffer flags are updated to
161  * reflect the new state. If the stream interface has SI_FL_NOHALF, we also
162  * forward the close to the write side. The owner task is woken up if it exists.
163  */
stream_int_shutr(struct stream_interface * si)164 static void stream_int_shutr(struct stream_interface *si)
165 {
166 	struct channel *ic = si_ic(si);
167 
168 	si_rx_shut_blk(si);
169 	if (ic->flags & CF_SHUTR)
170 		return;
171 	ic->flags |= CF_SHUTR;
172 	ic->rex = TICK_ETERNITY;
173 
174 	if (!si_state_in(si->state, SI_SB_CON|SI_SB_RDY|SI_SB_EST))
175 		return;
176 
177 	if (si_oc(si)->flags & CF_SHUTW) {
178 		si->state = SI_ST_DIS;
179 		si->exp = TICK_ETERNITY;
180 	}
181 	else if (si->flags & SI_FL_NOHALF) {
182 		/* we want to immediately forward this close to the write side */
183 		return stream_int_shutw(si);
184 	}
185 
186 	/* note that if the task exists, it must unregister itself once it runs */
187 	if (!(si->flags & SI_FL_DONT_WAKE))
188 		task_wakeup(si_task(si), TASK_WOKEN_IO);
189 }
190 
191 /*
192  * This function performs a shutdown-write on a detached stream interface in a
193  * connected or init state (it does nothing for other states). It either shuts
194  * the write side or marks itself as closed. The buffer flags are updated to
195  * reflect the new state. It does also close everything if the SI was marked as
196  * being in error state. The owner task is woken up if it exists.
197  */
stream_int_shutw(struct stream_interface * si)198 static void stream_int_shutw(struct stream_interface *si)
199 {
200 	struct channel *ic = si_ic(si);
201 	struct channel *oc = si_oc(si);
202 
203 	oc->flags &= ~CF_SHUTW_NOW;
204 	if (oc->flags & CF_SHUTW)
205 		return;
206 	oc->flags |= CF_SHUTW;
207 	oc->wex = TICK_ETERNITY;
208 	si_done_get(si);
209 
210 	if (tick_isset(si->hcto)) {
211 		ic->rto = si->hcto;
212 		ic->rex = tick_add(now_ms, ic->rto);
213 	}
214 
215 	switch (si->state) {
216 	case SI_ST_RDY:
217 	case SI_ST_EST:
218 		/* we have to shut before closing, otherwise some short messages
219 		 * may never leave the system, especially when there are remaining
220 		 * unread data in the socket input buffer, or when nolinger is set.
221 		 * However, if SI_FL_NOLINGER is explicitly set, we know there is
222 		 * no risk so we close both sides immediately.
223 		 */
224 		if (!(si->flags & (SI_FL_ERR | SI_FL_NOLINGER)) &&
225 		    !(ic->flags & (CF_SHUTR|CF_DONT_READ)))
226 			return;
227 
228 		/* fall through */
229 	case SI_ST_CON:
230 	case SI_ST_CER:
231 	case SI_ST_QUE:
232 	case SI_ST_TAR:
233 		/* Note that none of these states may happen with applets */
234 		si->state = SI_ST_DIS;
235 		/* fall through */
236 	default:
237 		si->flags &= ~SI_FL_NOLINGER;
238 		si_rx_shut_blk(si);
239 		ic->flags |= CF_SHUTR;
240 		ic->rex = TICK_ETERNITY;
241 		si->exp = TICK_ETERNITY;
242 	}
243 
244 	/* note that if the task exists, it must unregister itself once it runs */
245 	if (!(si->flags & SI_FL_DONT_WAKE))
246 		task_wakeup(si_task(si), TASK_WOKEN_IO);
247 }
248 
249 /* default chk_rcv function for scheduled tasks */
stream_int_chk_rcv(struct stream_interface * si)250 static void stream_int_chk_rcv(struct stream_interface *si)
251 {
252 	struct channel *ic = si_ic(si);
253 
254 	DPRINTF(stderr, "%s: si=%p, si->state=%d ic->flags=%08x oc->flags=%08x\n",
255 		__FUNCTION__,
256 		si, si->state, ic->flags, si_oc(si)->flags);
257 
258 	if (ic->pipe) {
259 		/* stop reading */
260 		si_rx_room_blk(si);
261 	}
262 	else {
263 		/* (re)start reading */
264 		tasklet_wakeup(si->wait_event.tasklet);
265 		if (!(si->flags & SI_FL_DONT_WAKE))
266 			task_wakeup(si_task(si), TASK_WOKEN_IO);
267 	}
268 }
269 
270 /* default chk_snd function for scheduled tasks */
stream_int_chk_snd(struct stream_interface * si)271 static void stream_int_chk_snd(struct stream_interface *si)
272 {
273 	struct channel *oc = si_oc(si);
274 
275 	DPRINTF(stderr, "%s: si=%p, si->state=%d ic->flags=%08x oc->flags=%08x\n",
276 		__FUNCTION__,
277 		si, si->state, si_ic(si)->flags, oc->flags);
278 
279 	if (unlikely(si->state != SI_ST_EST || (oc->flags & CF_SHUTW)))
280 		return;
281 
282 	if (!(si->flags & SI_FL_WAIT_DATA) ||        /* not waiting for data */
283 	    channel_is_empty(oc))           /* called with nothing to send ! */
284 		return;
285 
286 	/* Otherwise there are remaining data to be sent in the buffer,
287 	 * so we tell the handler.
288 	 */
289 	si->flags &= ~SI_FL_WAIT_DATA;
290 	if (!tick_isset(oc->wex))
291 		oc->wex = tick_add_ifset(now_ms, oc->wto);
292 
293 	if (!(si->flags & SI_FL_DONT_WAKE))
294 		task_wakeup(si_task(si), TASK_WOKEN_IO);
295 }
296 
297 /* Register an applet to handle a stream_interface as a new appctx. The SI will
298  * wake it up everytime it is solicited. The appctx must be deleted by the task
299  * handler using si_release_endpoint(), possibly from within the function itself.
300  * It also pre-initializes the applet's context and returns it (or NULL in case
301  * it could not be allocated).
302  */
si_register_handler(struct stream_interface * si,struct applet * app)303 struct appctx *si_register_handler(struct stream_interface *si, struct applet *app)
304 {
305 	struct appctx *appctx;
306 
307 	DPRINTF(stderr, "registering handler %p for si %p (was %p)\n", app, si, si_task(si));
308 
309 	appctx = si_alloc_appctx(si, app);
310 	if (!appctx)
311 		return NULL;
312 
313 	si_cant_get(si);
314 	appctx_wakeup(appctx);
315 	return si_appctx(si);
316 }
317 
318 /* This callback is used to send a valid PROXY protocol line to a socket being
319  * established. It returns 0 if it fails in a fatal way or needs to poll to go
320  * further, otherwise it returns non-zero and removes itself from the connection's
321  * flags (the bit is provided in <flag> by the caller). It is designed to be
322  * called by the connection handler and relies on it to commit polling changes.
323  * Note that it can emit a PROXY line by relying on the other end's address
324  * when the connection is attached to a stream interface, or by resolving the
325  * local address otherwise (also called a LOCAL line).
326  */
conn_si_send_proxy(struct connection * conn,unsigned int flag)327 int conn_si_send_proxy(struct connection *conn, unsigned int flag)
328 {
329 	if (!conn_ctrl_ready(conn))
330 		goto out_error;
331 
332 	/* If we have a PROXY line to send, we'll use this to validate the
333 	 * connection, in which case the connection is validated only once
334 	 * we've sent the whole proxy line. Otherwise we use connect().
335 	 */
336 	if (conn->send_proxy_ofs) {
337 		const struct conn_stream *cs;
338 		int ret;
339 
340 		/* If there is no mux attached to the connection, it means the
341 		 * connection context is a conn-stream.
342 		 */
343 		cs = (conn->mux ? cs_get_first(conn) : conn->ctx);
344 
345 		/* The target server expects a PROXY line to be sent first.
346 		 * If the send_proxy_ofs is negative, it corresponds to the
347 		 * offset to start sending from then end of the proxy string
348 		 * (which is recomputed every time since it's constant). If
349 		 * it is positive, it means we have to send from the start.
350 		 * We can only send a "normal" PROXY line when the connection
351 		 * is attached to a stream interface. Otherwise we can only
352 		 * send a LOCAL line (eg: for use with health checks).
353 		 */
354 
355 		if (cs && cs->data_cb == &si_conn_cb) {
356 			struct stream_interface *si = cs->data;
357 			struct conn_stream *remote_cs = objt_cs(si_opposite(si)->end);
358 			struct stream *strm = si_strm(si);
359 
360 			ret = make_proxy_line(trash.area, trash.size,
361 					      objt_server(conn->target),
362 					      remote_cs ? remote_cs->conn : NULL,
363 					      strm);
364 		}
365 		else {
366 			/* The target server expects a LOCAL line to be sent first. Retrieving
367 			 * local or remote addresses may fail until the connection is established.
368 			 */
369 			if (!conn_get_src(conn) || !conn_get_dst(conn))
370 				goto out_wait;
371 
372 			ret = make_proxy_line(trash.area, trash.size,
373 					      objt_server(conn->target), conn,
374 					      NULL);
375 		}
376 
377 		if (!ret)
378 			goto out_error;
379 
380 		if (conn->send_proxy_ofs > 0)
381 			conn->send_proxy_ofs = -ret; /* first call */
382 
383 		/* we have to send trash from (ret+sp for -sp bytes). If the
384 		 * data layer has a pending write, we'll also set MSG_MORE.
385 		 */
386 		ret = conn_sock_send(conn,
387 				     trash.area + ret + conn->send_proxy_ofs,
388 		                     -conn->send_proxy_ofs,
389 		                     (conn->subs && conn->subs->events & SUB_RETRY_SEND) ? MSG_MORE : 0);
390 
391 		if (ret < 0)
392 			goto out_error;
393 
394 		conn->send_proxy_ofs += ret; /* becomes zero once complete */
395 		if (conn->send_proxy_ofs != 0)
396 			goto out_wait;
397 
398 		/* OK we've sent the whole line, we're connected */
399 	}
400 
401 	/* The connection is ready now, simply return and let the connection
402 	 * handler notify upper layers if needed.
403 	 */
404 	conn->flags &= ~CO_FL_WAIT_L4_CONN;
405 	conn->flags &= ~flag;
406 	return 1;
407 
408  out_error:
409 	/* Write error on the file descriptor */
410 	conn->flags |= CO_FL_ERROR;
411 	return 0;
412 
413  out_wait:
414 	return 0;
415 }
416 
417 
418 /* This function is the equivalent to si_update() except that it's
419  * designed to be called from outside the stream handlers, typically the lower
420  * layers (applets, connections) after I/O completion. After updating the stream
421  * interface and timeouts, it will try to forward what can be forwarded, then to
422  * wake the associated task up if an important event requires special handling.
423  * It may update SI_FL_WAIT_DATA and/or SI_FL_RXBLK_ROOM, that the callers are
424  * encouraged to watch to take appropriate action.
425  * It should not be called from within the stream itself, si_update()
426  * is designed for this.
427  */
stream_int_notify(struct stream_interface * si)428 static void stream_int_notify(struct stream_interface *si)
429 {
430 	struct channel *ic = si_ic(si);
431 	struct channel *oc = si_oc(si);
432 	struct stream_interface *sio = si_opposite(si);
433 	struct task *task = si_task(si);
434 
435 	/* process consumer side */
436 	if (channel_is_empty(oc)) {
437 		struct connection *conn = objt_cs(si->end) ? objt_cs(si->end)->conn : NULL;
438 
439 		if (((oc->flags & (CF_SHUTW|CF_SHUTW_NOW)) == CF_SHUTW_NOW) &&
440 		    (si->state == SI_ST_EST) && (!conn || !(conn->flags & (CO_FL_WAIT_XPRT | CO_FL_EARLY_SSL_HS))))
441 			si_shutw(si);
442 		oc->wex = TICK_ETERNITY;
443 	}
444 
445 	/* indicate that we may be waiting for data from the output channel or
446 	 * we're about to close and can't expect more data if SHUTW_NOW is there.
447 	 */
448 	if (!(oc->flags & (CF_SHUTW|CF_SHUTW_NOW)))
449 		si->flags |= SI_FL_WAIT_DATA;
450 	else if ((oc->flags & (CF_SHUTW|CF_SHUTW_NOW)) == CF_SHUTW_NOW)
451 		si->flags &= ~SI_FL_WAIT_DATA;
452 
453 	/* update OC timeouts and wake the other side up if it's waiting for room */
454 	if (oc->flags & CF_WRITE_ACTIVITY) {
455 		if ((oc->flags & (CF_SHUTW|CF_WRITE_PARTIAL)) == CF_WRITE_PARTIAL &&
456 		    !channel_is_empty(oc))
457 			if (tick_isset(oc->wex))
458 				oc->wex = tick_add_ifset(now_ms, oc->wto);
459 
460 		if (!(si->flags & SI_FL_INDEP_STR))
461 			if (tick_isset(ic->rex))
462 				ic->rex = tick_add_ifset(now_ms, ic->rto);
463 	}
464 
465 	if (oc->flags & CF_DONT_READ)
466 		si_rx_chan_blk(sio);
467 	else
468 		si_rx_chan_rdy(sio);
469 
470 	/* Notify the other side when we've injected data into the IC that
471 	 * needs to be forwarded. We can do fast-forwarding as soon as there
472 	 * are output data, but we avoid doing this if some of the data are
473 	 * not yet scheduled for being forwarded, because it is very likely
474 	 * that it will be done again immediately afterwards once the following
475 	 * data are parsed (eg: HTTP chunking). We only SI_FL_RXBLK_ROOM once
476 	 * we've emptied *some* of the output buffer, and not just when there
477 	 * is available room, because applets are often forced to stop before
478 	 * the buffer is full. We must not stop based on input data alone because
479 	 * an HTTP parser might need more data to complete the parsing.
480 	 */
481 	if (!channel_is_empty(ic) &&
482 	    (sio->flags & SI_FL_WAIT_DATA) &&
483 	    (!(ic->flags & CF_EXPECT_MORE) || c_full(ic) || ci_data(ic) == 0 || ic->pipe)) {
484 		int new_len, last_len;
485 
486 		last_len = co_data(ic);
487 		if (ic->pipe)
488 			last_len += ic->pipe->data;
489 
490 		si_chk_snd(sio);
491 
492 		new_len = co_data(ic);
493 		if (ic->pipe)
494 			new_len += ic->pipe->data;
495 
496 		/* check if the consumer has freed some space either in the
497 		 * buffer or in the pipe.
498 		 */
499 		if (new_len < last_len)
500 			si_rx_room_rdy(si);
501 	}
502 
503 	if (!(ic->flags & CF_DONT_READ))
504 		si_rx_chan_rdy(si);
505 
506 	si_chk_rcv(si);
507 	si_chk_rcv(sio);
508 
509 	if (si_rx_blocked(si)) {
510 		ic->rex = TICK_ETERNITY;
511 	}
512 	else if ((ic->flags & (CF_SHUTR|CF_READ_PARTIAL)) == CF_READ_PARTIAL) {
513 		/* we must re-enable reading if si_chk_snd() has freed some space */
514 		if (!(ic->flags & CF_READ_NOEXP) && tick_isset(ic->rex))
515 			ic->rex = tick_add_ifset(now_ms, ic->rto);
516 	}
517 
518 	/* wake the task up only when needed */
519 	if (/* changes on the production side */
520 	    (ic->flags & (CF_READ_NULL|CF_READ_ERROR)) ||
521 	    !si_state_in(si->state, SI_SB_CON|SI_SB_RDY|SI_SB_EST) ||
522 	    (si->flags & SI_FL_ERR) ||
523 	    ((ic->flags & CF_READ_PARTIAL) &&
524 	     ((ic->flags & CF_EOI) || !ic->to_forward || sio->state != SI_ST_EST)) ||
525 
526 	    /* changes on the consumption side */
527 	    (oc->flags & (CF_WRITE_NULL|CF_WRITE_ERROR)) ||
528 	    ((oc->flags & CF_WRITE_ACTIVITY) &&
529 	     ((oc->flags & CF_SHUTW) ||
530 	      (((oc->flags & CF_WAKE_WRITE) ||
531 		!(oc->flags & (CF_AUTO_CLOSE|CF_SHUTW_NOW|CF_SHUTW))) &&
532 	       (sio->state != SI_ST_EST ||
533 	        (channel_is_empty(oc) && !oc->to_forward)))))) {
534 		task_wakeup(task, TASK_WOKEN_IO);
535 	}
536 	else {
537 		/* Update expiration date for the task and requeue it */
538 		task->expire = tick_first((tick_is_expired(task->expire, now_ms) ? 0 : task->expire),
539 					  tick_first(tick_first(ic->rex, ic->wex),
540 						     tick_first(oc->rex, oc->wex)));
541 
542 		task->expire = tick_first(task->expire, ic->analyse_exp);
543 		task->expire = tick_first(task->expire, oc->analyse_exp);
544 
545 		if (si->exp)
546 			task->expire = tick_first(task->expire, si->exp);
547 
548 		if (sio->exp)
549 			task->expire = tick_first(task->expire, sio->exp);
550 
551 		task_queue(task);
552 	}
553 	if (ic->flags & CF_READ_ACTIVITY)
554 		ic->flags &= ~CF_READ_DONTWAIT;
555 }
556 
557 
558 /* Called by I/O handlers after completion.. It propagates
559  * connection flags to the stream interface, updates the stream (which may or
560  * may not take this opportunity to try to forward data), then update the
561  * connection's polling based on the channels and stream interface's final
562  * states. The function always returns 0.
563  */
si_cs_process(struct conn_stream * cs)564 static int si_cs_process(struct conn_stream *cs)
565 {
566 	struct connection *conn = cs->conn;
567 	struct stream_interface *si = cs->data;
568 	struct channel *ic = si_ic(si);
569 	struct channel *oc = si_oc(si);
570 
571 	/* If we have data to send, try it now */
572 	if (!channel_is_empty(oc) && !(si->wait_event.events & SUB_RETRY_SEND))
573 		si_cs_send(cs);
574 
575 	/* First step, report to the stream-int what was detected at the
576 	 * connection layer : errors and connection establishment.
577 	 * Only add SI_FL_ERR if we're connected, or we're attempting to
578 	 * connect, we may get there because we got woken up, but only run
579 	 * after process_stream() noticed there were an error, and decided
580 	 * to retry to connect, the connection may still have CO_FL_ERROR,
581 	 * and we don't want to add SI_FL_ERR back
582 	 *
583 	 * Note: This test is only required because si_cs_process is also the SI
584 	 *       wake callback. Otherwise si_cs_recv()/si_cs_send() already take
585 	 *       care of it.
586 	 */
587 	if (si->state >= SI_ST_CON &&
588 	    (conn->flags & CO_FL_ERROR || cs->flags & CS_FL_ERROR))
589 		si->flags |= SI_FL_ERR;
590 
591 	/* If we had early data, and the handshake ended, then
592 	 * we can remove the flag, and attempt to wake the task up,
593 	 * in the event there's an analyser waiting for the end of
594 	 * the handshake.
595 	 */
596 	if (!(conn->flags & (CO_FL_WAIT_XPRT | CO_FL_EARLY_SSL_HS)) &&
597 	    (cs->flags & CS_FL_WAIT_FOR_HS)) {
598 		cs->flags &= ~CS_FL_WAIT_FOR_HS;
599 		task_wakeup(si_task(si), TASK_WOKEN_MSG);
600 	}
601 
602 	if (!si_state_in(si->state, SI_SB_EST|SI_SB_DIS|SI_SB_CLO) &&
603 	    (conn->flags & CO_FL_WAIT_XPRT) == 0) {
604 		si->exp = TICK_ETERNITY;
605 		oc->flags |= CF_WRITE_NULL;
606 		if (si->state == SI_ST_CON)
607 			si->state = SI_ST_RDY;
608 	}
609 
610 	/* Report EOI on the channel if it was reached from the mux point of
611 	 * view.
612 	 *
613 	 * Note: This test is only required because si_cs_process is also the SI
614 	 *       wake callback. Otherwise si_cs_recv()/si_cs_send() already take
615 	 *       care of it.
616 	 */
617 	if ((cs->flags & CS_FL_EOI) && !(ic->flags & CF_EOI))
618 		ic->flags |= (CF_EOI|CF_READ_PARTIAL);
619 
620 	/* Second step : update the stream-int and channels, try to forward any
621 	 * pending data, then possibly wake the stream up based on the new
622 	 * stream-int status.
623 	 */
624 	stream_int_notify(si);
625 	stream_release_buffers(si_strm(si));
626 	return 0;
627 }
628 
629 /*
630  * This function is called to send buffer data to a stream socket.
631  * It calls the mux layer's snd_buf function. It relies on the
632  * caller to commit polling changes. The caller should check conn->flags
633  * for errors.
634  */
si_cs_send(struct conn_stream * cs)635 int si_cs_send(struct conn_stream *cs)
636 {
637 	struct connection *conn = cs->conn;
638 	struct stream_interface *si = cs->data;
639 	struct channel *oc = si_oc(si);
640 	int ret;
641 	int did_send = 0;
642 
643 	if (conn->flags & CO_FL_ERROR || cs->flags & (CS_FL_ERROR|CS_FL_ERR_PENDING)) {
644 		/* We're probably there because the tasklet was woken up,
645 		 * but process_stream() ran before, detected there were an
646 		 * error and put the si back to SI_ST_TAR. There's still
647 		 * CO_FL_ERROR on the connection but we don't want to add
648 		 * SI_FL_ERR back, so give up
649 		 */
650 		if (si->state < SI_ST_CON)
651 			return 0;
652 		si->flags |= SI_FL_ERR;
653 		return 1;
654 	}
655 
656 	/* We're already waiting to be able to send, give up */
657 	if (si->wait_event.events & SUB_RETRY_SEND)
658 		return 0;
659 
660 	/* we might have been called just after an asynchronous shutw */
661 	if (oc->flags & CF_SHUTW)
662 		return 1;
663 
664 	/* we must wait because the mux is not installed yet */
665 	if (!conn->mux)
666 		return 0;
667 
668 	if (oc->pipe && conn->xprt->snd_pipe && conn->mux->snd_pipe) {
669 		ret = conn->mux->snd_pipe(cs, oc->pipe);
670 		if (ret > 0)
671 			did_send = 1;
672 
673 		if (!oc->pipe->data) {
674 			put_pipe(oc->pipe);
675 			oc->pipe = NULL;
676 		}
677 
678 		if (oc->pipe)
679 			goto end;
680 	}
681 
682 	/* At this point, the pipe is empty, but we may still have data pending
683 	 * in the normal buffer.
684 	 */
685 	if (co_data(oc)) {
686 		/* when we're here, we already know that there is no spliced
687 		 * data left, and that there are sendable buffered data.
688 		 */
689 
690 		/* check if we want to inform the kernel that we're interested in
691 		 * sending more data after this call. We want this if :
692 		 *  - we're about to close after this last send and want to merge
693 		 *    the ongoing FIN with the last segment.
694 		 *  - we know we can't send everything at once and must get back
695 		 *    here because of unaligned data
696 		 *  - there is still a finite amount of data to forward
697 		 * The test is arranged so that the most common case does only 2
698 		 * tests.
699 		 */
700 		unsigned int send_flag = 0;
701 
702 		if ((!(oc->flags & (CF_NEVER_WAIT|CF_SEND_DONTWAIT)) &&
703 		     ((oc->to_forward && oc->to_forward != CHN_INFINITE_FORWARD) ||
704 		      (oc->flags & CF_EXPECT_MORE) ||
705 		      (IS_HTX_STRM(si_strm(si)) &&
706 		       (!(oc->flags & (CF_EOI|CF_SHUTR)) && htx_expect_more(htxbuf(&oc->buf)))))) ||
707 		    ((oc->flags & CF_ISRESP) &&
708 		     ((oc->flags & (CF_AUTO_CLOSE|CF_SHUTW_NOW)) == (CF_AUTO_CLOSE|CF_SHUTW_NOW))))
709 			send_flag |= CO_SFL_MSG_MORE;
710 
711 		if (oc->flags & CF_STREAMER)
712 			send_flag |= CO_SFL_STREAMER;
713 
714 		if ((si->flags & SI_FL_L7_RETRY) && !b_data(&si->l7_buffer)) {
715 			struct stream *s = si_strm(si);
716 			/* If we want to be able to do L7 retries, copy
717 			 * the data we're about to send, so that we are able
718 			 * to resend them if needed
719 			 */
720 			/* Try to allocate a buffer if we had none.
721 			 * If it fails, the next test will just
722 			 * disable the l7 retries by setting
723 			 * l7_conn_retries to 0.
724 			 */
725 			if (!s->txn || (s->txn->req.msg_state != HTTP_MSG_DONE))
726 				si->flags &= ~SI_FL_L7_RETRY;
727 			else {
728 				if (b_is_null(&si->l7_buffer))
729 					b_alloc(&si->l7_buffer);
730 				if (b_is_null(&si->l7_buffer))
731 					si->flags &= ~SI_FL_L7_RETRY;
732 				else {
733 					memcpy(b_orig(&si->l7_buffer),
734 					       b_orig(&oc->buf),
735 					       b_size(&oc->buf));
736 					si->l7_buffer.head = co_data(oc);
737 					b_add(&si->l7_buffer, co_data(oc));
738 				}
739 
740 			}
741 		}
742 
743 		ret = cs->conn->mux->snd_buf(cs, &oc->buf, co_data(oc), send_flag);
744 		if (ret > 0) {
745 			did_send = 1;
746 			co_set_data(oc, co_data(oc) - ret);
747 			c_realign_if_empty(oc);
748 
749 			if (!co_data(oc)) {
750 				/* Always clear both flags once everything has been sent, they're one-shot */
751 				oc->flags &= ~(CF_EXPECT_MORE | CF_SEND_DONTWAIT);
752 			}
753 			/* if some data remain in the buffer, it's only because the
754 			 * system buffers are full, we will try next time.
755 			 */
756 		}
757 	}
758 
759  end:
760 	if (did_send) {
761 		oc->flags |= CF_WRITE_PARTIAL | CF_WROTE_DATA;
762 		if (si->state == SI_ST_CON)
763 			si->state = SI_ST_RDY;
764 
765 		si_rx_room_rdy(si_opposite(si));
766 	}
767 
768 	if (conn->flags & CO_FL_ERROR || cs->flags & (CS_FL_ERROR|CS_FL_ERR_PENDING)) {
769 		si->flags |= SI_FL_ERR;
770 		return 1;
771 	}
772 
773 	/* We couldn't send all of our data, let the mux know we'd like to send more */
774 	if (!channel_is_empty(oc))
775 		conn->mux->subscribe(cs, SUB_RETRY_SEND, &si->wait_event);
776 	return did_send;
777 }
778 
779 /* This is the ->process() function for any stream-interface's wait_event task.
780  * It's assigned during the stream-interface's initialization, for any type of
781  * stream interface. Thus it is always safe to perform a tasklet_wakeup() on a
782  * stream interface, as the presence of the CS is checked there.
783  */
si_cs_io_cb(struct task * t,void * ctx,unsigned short state)784 struct task *si_cs_io_cb(struct task *t, void *ctx, unsigned short state)
785 {
786 	struct stream_interface *si = ctx;
787 	struct conn_stream *cs = objt_cs(si->end);
788 	int ret = 0;
789 
790 	if (!cs)
791 		return NULL;
792 
793 	if (!(si->wait_event.events & SUB_RETRY_SEND) && !channel_is_empty(si_oc(si)))
794 		ret = si_cs_send(cs);
795 	if (!(si->wait_event.events & SUB_RETRY_RECV))
796 		ret |= si_cs_recv(cs);
797 	if (ret != 0)
798 		si_cs_process(cs);
799 
800 	stream_release_buffers(si_strm(si));
801 	return (NULL);
802 }
803 
804 /* This function is designed to be called from within the stream handler to
805  * update the input channel's expiration timer and the stream interface's
806  * Rx flags based on the channel's flags. It needs to be called only once
807  * after the channel's flags have settled down, and before they are cleared,
808  * though it doesn't harm to call it as often as desired (it just slightly
809  * hurts performance). It must not be called from outside of the stream
810  * handler, as what it does will be used to compute the stream task's
811  * expiration.
812  */
si_update_rx(struct stream_interface * si)813 void si_update_rx(struct stream_interface *si)
814 {
815 	struct channel *ic = si_ic(si);
816 
817 	if (ic->flags & CF_SHUTR) {
818 		si_rx_shut_blk(si);
819 		return;
820 	}
821 
822 	/* Read not closed, update FD status and timeout for reads */
823 	if (ic->flags & CF_DONT_READ)
824 		si_rx_chan_blk(si);
825 	else
826 		si_rx_chan_rdy(si);
827 
828 	if (!channel_is_empty(ic) || !channel_may_recv(ic)) {
829 		/* stop reading, imposed by channel's policy or contents */
830 		si_rx_room_blk(si);
831 	}
832 	else {
833 		/* (re)start reading and update timeout. Note: we don't recompute the timeout
834 		 * everytime we get here, otherwise it would risk never to expire. We only
835 		 * update it if is was not yet set. The stream socket handler will already
836 		 * have updated it if there has been a completed I/O.
837 		 */
838 		si_rx_room_rdy(si);
839 	}
840 	if (si->flags & SI_FL_RXBLK_ANY & ~SI_FL_RX_WAIT_EP)
841 		ic->rex = TICK_ETERNITY;
842 	else if (!(ic->flags & CF_READ_NOEXP) && !tick_isset(ic->rex))
843 		ic->rex = tick_add_ifset(now_ms, ic->rto);
844 
845 	si_chk_rcv(si);
846 }
847 
848 /* This function is designed to be called from within the stream handler to
849  * update the output channel's expiration timer and the stream interface's
850  * Tx flags based on the channel's flags. It needs to be called only once
851  * after the channel's flags have settled down, and before they are cleared,
852  * though it doesn't harm to call it as often as desired (it just slightly
853  * hurts performance). It must not be called from outside of the stream
854  * handler, as what it does will be used to compute the stream task's
855  * expiration.
856  */
si_update_tx(struct stream_interface * si)857 void si_update_tx(struct stream_interface *si)
858 {
859 	struct channel *oc = si_oc(si);
860 	struct channel *ic = si_ic(si);
861 
862 	if (oc->flags & CF_SHUTW)
863 		return;
864 
865 	/* Write not closed, update FD status and timeout for writes */
866 	if (channel_is_empty(oc)) {
867 		/* stop writing */
868 		if (!(si->flags & SI_FL_WAIT_DATA)) {
869 			if ((oc->flags & CF_SHUTW_NOW) == 0)
870 				si->flags |= SI_FL_WAIT_DATA;
871 			oc->wex = TICK_ETERNITY;
872 		}
873 		return;
874 	}
875 
876 	/* (re)start writing and update timeout. Note: we don't recompute the timeout
877 	 * everytime we get here, otherwise it would risk never to expire. We only
878 	 * update it if is was not yet set. The stream socket handler will already
879 	 * have updated it if there has been a completed I/O.
880 	 */
881 	si->flags &= ~SI_FL_WAIT_DATA;
882 	if (!tick_isset(oc->wex)) {
883 		oc->wex = tick_add_ifset(now_ms, oc->wto);
884 		if (tick_isset(ic->rex) && !(si->flags & SI_FL_INDEP_STR)) {
885 			/* Note: depending on the protocol, we don't know if we're waiting
886 			 * for incoming data or not. So in order to prevent the socket from
887 			 * expiring read timeouts during writes, we refresh the read timeout,
888 			 * except if it was already infinite or if we have explicitly setup
889 			 * independent streams.
890 			 */
891 			ic->rex = tick_add_ifset(now_ms, ic->rto);
892 		}
893 	}
894 }
895 
896 /* perform a synchronous send() for the stream interface. The CF_WRITE_NULL and
897  * CF_WRITE_PARTIAL flags are cleared prior to the attempt, and will possibly
898  * be updated in case of success.
899  */
si_sync_send(struct stream_interface * si)900 void si_sync_send(struct stream_interface *si)
901 {
902 	struct channel *oc = si_oc(si);
903 	struct conn_stream *cs;
904 
905 	oc->flags &= ~(CF_WRITE_NULL|CF_WRITE_PARTIAL);
906 
907 	if (oc->flags & CF_SHUTW)
908 		return;
909 
910 	if (channel_is_empty(oc))
911 		return;
912 
913 	if (!si_state_in(si->state, SI_SB_CON|SI_SB_RDY|SI_SB_EST))
914 		return;
915 
916 	cs = objt_cs(si->end);
917 	if (!cs || !cs->conn->mux)
918 		return;
919 
920 	si_cs_send(cs);
921 }
922 
923 /* Updates at once the channel flags, and timers of both stream interfaces of a
924  * same stream, to complete the work after the analysers, then updates the data
925  * layer below. This will ensure that any synchronous update performed at the
926  * data layer will be reflected in the channel flags and/or stream-interface.
927  * Note that this does not change the stream interface's current state, though
928  * it updates the previous state to the current one.
929  */
si_update_both(struct stream_interface * si_f,struct stream_interface * si_b)930 void si_update_both(struct stream_interface *si_f, struct stream_interface *si_b)
931 {
932 	struct channel *req = si_ic(si_f);
933 	struct channel *res = si_oc(si_f);
934 
935 	req->flags &= ~(CF_READ_NULL|CF_READ_PARTIAL|CF_READ_ATTACHED|CF_WRITE_NULL|CF_WRITE_PARTIAL);
936 	res->flags &= ~(CF_READ_NULL|CF_READ_PARTIAL|CF_READ_ATTACHED|CF_WRITE_NULL|CF_WRITE_PARTIAL);
937 
938 	si_f->prev_state = si_f->state;
939 	si_b->prev_state = si_b->state;
940 
941 	/* let's recompute both sides states */
942 	if (si_state_in(si_f->state, SI_SB_RDY|SI_SB_EST))
943 		si_update(si_f);
944 
945 	if (si_state_in(si_b->state, SI_SB_RDY|SI_SB_EST))
946 		si_update(si_b);
947 
948 	/* stream ints are processed outside of process_stream() and must be
949 	 * handled at the latest moment.
950 	 */
951 	if (obj_type(si_f->end) == OBJ_TYPE_APPCTX &&
952 	    ((si_rx_endp_ready(si_f) && !si_rx_blocked(si_f)) ||
953 	     (si_tx_endp_ready(si_f) && !si_tx_blocked(si_f))))
954 		appctx_wakeup(si_appctx(si_f));
955 
956 	if (obj_type(si_b->end) == OBJ_TYPE_APPCTX &&
957 	    ((si_rx_endp_ready(si_b) && !si_rx_blocked(si_b)) ||
958 	     (si_tx_endp_ready(si_b) && !si_tx_blocked(si_b))))
959 		appctx_wakeup(si_appctx(si_b));
960 }
961 
962 /*
963  * This function performs a shutdown-read on a stream interface attached to
964  * a connection in a connected or init state (it does nothing for other
965  * states). It either shuts the read side or marks itself as closed. The buffer
966  * flags are updated to reflect the new state. If the stream interface has
967  * SI_FL_NOHALF, we also forward the close to the write side. If a control
968  * layer is defined, then it is supposed to be a socket layer and file
969  * descriptors are then shutdown or closed accordingly. The function
970  * automatically disables polling if needed.
971  */
stream_int_shutr_conn(struct stream_interface * si)972 static void stream_int_shutr_conn(struct stream_interface *si)
973 {
974 	struct conn_stream *cs = __objt_cs(si->end);
975 	struct channel *ic = si_ic(si);
976 
977 	si_rx_shut_blk(si);
978 	if (ic->flags & CF_SHUTR)
979 		return;
980 	ic->flags |= CF_SHUTR;
981 	ic->rex = TICK_ETERNITY;
982 
983 	if (!si_state_in(si->state, SI_SB_CON|SI_SB_RDY|SI_SB_EST))
984 		return;
985 
986 	if (si->flags & SI_FL_KILL_CONN)
987 		cs->flags |= CS_FL_KILL_CONN;
988 
989 	if (si_oc(si)->flags & CF_SHUTW) {
990 		cs_close(cs);
991 		si->state = SI_ST_DIS;
992 		si->exp = TICK_ETERNITY;
993 	}
994 	else if (si->flags & SI_FL_NOHALF) {
995 		/* we want to immediately forward this close to the write side */
996 		return stream_int_shutw_conn(si);
997 	}
998 }
999 
1000 /*
1001  * This function performs a shutdown-write on a stream interface attached to
1002  * a connection in a connected or init state (it does nothing for other
1003  * states). It either shuts the write side or marks itself as closed. The
1004  * buffer flags are updated to reflect the new state.  It does also close
1005  * everything if the SI was marked as being in error state. If there is a
1006  * data-layer shutdown, it is called.
1007  */
stream_int_shutw_conn(struct stream_interface * si)1008 static void stream_int_shutw_conn(struct stream_interface *si)
1009 {
1010 	struct conn_stream *cs = __objt_cs(si->end);
1011 	struct connection *conn = cs->conn;
1012 	struct channel *ic = si_ic(si);
1013 	struct channel *oc = si_oc(si);
1014 
1015 	oc->flags &= ~CF_SHUTW_NOW;
1016 	if (oc->flags & CF_SHUTW)
1017 		return;
1018 	oc->flags |= CF_SHUTW;
1019 	oc->wex = TICK_ETERNITY;
1020 	si_done_get(si);
1021 
1022 	if (tick_isset(si->hcto)) {
1023 		ic->rto = si->hcto;
1024 		ic->rex = tick_add(now_ms, ic->rto);
1025 	}
1026 
1027 	switch (si->state) {
1028 	case SI_ST_RDY:
1029 	case SI_ST_EST:
1030 		/* we have to shut before closing, otherwise some short messages
1031 		 * may never leave the system, especially when there are remaining
1032 		 * unread data in the socket input buffer, or when nolinger is set.
1033 		 * However, if SI_FL_NOLINGER is explicitly set, we know there is
1034 		 * no risk so we close both sides immediately.
1035 		 */
1036 		if (si->flags & SI_FL_KILL_CONN)
1037 			cs->flags |= CS_FL_KILL_CONN;
1038 
1039 		if (si->flags & SI_FL_ERR) {
1040 			/* quick close, the socket is already shut anyway */
1041 		}
1042 		else if (si->flags & SI_FL_NOLINGER) {
1043 			/* unclean data-layer shutdown, typically an aborted request
1044 			 * or a forwarded shutdown from a client to a server due to
1045 			 * option abortonclose. No need for the TLS layer to try to
1046 			 * emit a shutdown message.
1047 			 */
1048 			cs_shutw(cs, CS_SHW_SILENT);
1049 		}
1050 		else {
1051 			/* clean data-layer shutdown. This only happens on the
1052 			 * frontend side, or on the backend side when forwarding
1053 			 * a client close in TCP mode or in HTTP TUNNEL mode
1054 			 * while option abortonclose is set. We want the TLS
1055 			 * layer to try to signal it to the peer before we close.
1056 			 */
1057 			cs_shutw(cs, CS_SHW_NORMAL);
1058 
1059 			if (!(ic->flags & (CF_SHUTR|CF_DONT_READ))) {
1060 				/* OK just a shutw, but we want the caller
1061 				 * to disable polling on this FD if exists.
1062 				 */
1063 				conn_cond_update_polling(conn);
1064 				return;
1065 			}
1066 		}
1067 
1068 		/* fall through */
1069 	case SI_ST_CON:
1070 		/* we may have to close a pending connection, and mark the
1071 		 * response buffer as shutr
1072 		 */
1073 		if (si->flags & SI_FL_KILL_CONN)
1074 			cs->flags |= CS_FL_KILL_CONN;
1075 		cs_close(cs);
1076 		/* fall through */
1077 	case SI_ST_CER:
1078 	case SI_ST_QUE:
1079 	case SI_ST_TAR:
1080 		si->state = SI_ST_DIS;
1081 		/* fall through */
1082 	default:
1083 		si->flags &= ~SI_FL_NOLINGER;
1084 		si_rx_shut_blk(si);
1085 		ic->flags |= CF_SHUTR;
1086 		ic->rex = TICK_ETERNITY;
1087 		si->exp = TICK_ETERNITY;
1088 	}
1089 }
1090 
1091 /* This function is used for inter-stream-interface calls. It is called by the
1092  * consumer to inform the producer side that it may be interested in checking
1093  * for free space in the buffer. Note that it intentionally does not update
1094  * timeouts, so that we can still check them later at wake-up. This function is
1095  * dedicated to connection-based stream interfaces.
1096  */
stream_int_chk_rcv_conn(struct stream_interface * si)1097 static void stream_int_chk_rcv_conn(struct stream_interface *si)
1098 {
1099 	/* (re)start reading */
1100 	if (si_state_in(si->state, SI_SB_CON|SI_SB_RDY|SI_SB_EST))
1101 		tasklet_wakeup(si->wait_event.tasklet);
1102 }
1103 
1104 
1105 /* This function is used for inter-stream-interface calls. It is called by the
1106  * producer to inform the consumer side that it may be interested in checking
1107  * for data in the buffer. Note that it intentionally does not update timeouts,
1108  * so that we can still check them later at wake-up.
1109  */
stream_int_chk_snd_conn(struct stream_interface * si)1110 static void stream_int_chk_snd_conn(struct stream_interface *si)
1111 {
1112 	struct channel *oc = si_oc(si);
1113 	struct conn_stream *cs = __objt_cs(si->end);
1114 
1115 	if (unlikely(!si_state_in(si->state, SI_SB_CON|SI_SB_RDY|SI_SB_EST) ||
1116 	    (oc->flags & CF_SHUTW)))
1117 		return;
1118 
1119 	if (unlikely(channel_is_empty(oc)))  /* called with nothing to send ! */
1120 		return;
1121 
1122 	if (!oc->pipe &&                          /* spliced data wants to be forwarded ASAP */
1123 	    !(si->flags & SI_FL_WAIT_DATA))       /* not waiting for data */
1124 		return;
1125 
1126 	if (!(si->wait_event.events & SUB_RETRY_SEND) && !channel_is_empty(si_oc(si)))
1127 		si_cs_send(cs);
1128 
1129 	if (cs->flags & (CS_FL_ERROR|CS_FL_ERR_PENDING) || cs->conn->flags & CO_FL_ERROR) {
1130 		/* Write error on the file descriptor */
1131 		if (si->state >= SI_ST_CON)
1132 			si->flags |= SI_FL_ERR;
1133 		goto out_wakeup;
1134 	}
1135 
1136 	/* OK, so now we know that some data might have been sent, and that we may
1137 	 * have to poll first. We have to do that too if the buffer is not empty.
1138 	 */
1139 	if (channel_is_empty(oc)) {
1140 		/* the connection is established but we can't write. Either the
1141 		 * buffer is empty, or we just refrain from sending because the
1142 		 * ->o limit was reached. Maybe we just wrote the last
1143 		 * chunk and need to close.
1144 		 */
1145 		if (((oc->flags & (CF_SHUTW|CF_AUTO_CLOSE|CF_SHUTW_NOW)) ==
1146 		     (CF_AUTO_CLOSE|CF_SHUTW_NOW)) &&
1147 		    si_state_in(si->state, SI_SB_RDY|SI_SB_EST)) {
1148 			si_shutw(si);
1149 			goto out_wakeup;
1150 		}
1151 
1152 		if ((oc->flags & (CF_SHUTW|CF_SHUTW_NOW)) == 0)
1153 			si->flags |= SI_FL_WAIT_DATA;
1154 		oc->wex = TICK_ETERNITY;
1155 	}
1156 	else {
1157 		/* Otherwise there are remaining data to be sent in the buffer,
1158 		 * which means we have to poll before doing so.
1159 		 */
1160 		si->flags &= ~SI_FL_WAIT_DATA;
1161 		if (!tick_isset(oc->wex))
1162 			oc->wex = tick_add_ifset(now_ms, oc->wto);
1163 	}
1164 
1165 	if (likely(oc->flags & CF_WRITE_ACTIVITY)) {
1166 		struct channel *ic = si_ic(si);
1167 
1168 		/* update timeout if we have written something */
1169 		if ((oc->flags & (CF_SHUTW|CF_WRITE_PARTIAL)) == CF_WRITE_PARTIAL &&
1170 		    !channel_is_empty(oc))
1171 			oc->wex = tick_add_ifset(now_ms, oc->wto);
1172 
1173 		if (tick_isset(ic->rex) && !(si->flags & SI_FL_INDEP_STR)) {
1174 			/* Note: to prevent the client from expiring read timeouts
1175 			 * during writes, we refresh it. We only do this if the
1176 			 * interface is not configured for "independent streams",
1177 			 * because for some applications it's better not to do this,
1178 			 * for instance when continuously exchanging small amounts
1179 			 * of data which can full the socket buffers long before a
1180 			 * write timeout is detected.
1181 			 */
1182 			ic->rex = tick_add_ifset(now_ms, ic->rto);
1183 		}
1184 	}
1185 
1186 	/* in case of special condition (error, shutdown, end of write...), we
1187 	 * have to notify the task.
1188 	 */
1189 	if (likely((oc->flags & (CF_WRITE_NULL|CF_WRITE_ERROR|CF_SHUTW)) ||
1190 	          ((oc->flags & CF_WAKE_WRITE) &&
1191 	           ((channel_is_empty(oc) && !oc->to_forward) ||
1192 	            !si_state_in(si->state, SI_SB_EST))))) {
1193 	out_wakeup:
1194 		if (!(si->flags & SI_FL_DONT_WAKE))
1195 			task_wakeup(si_task(si), TASK_WOKEN_IO);
1196 	}
1197 }
1198 
1199 /*
1200  * This is the callback which is called by the connection layer to receive data
1201  * into the buffer from the connection. It iterates over the mux layer's
1202  * rcv_buf function.
1203  */
si_cs_recv(struct conn_stream * cs)1204 int si_cs_recv(struct conn_stream *cs)
1205 {
1206 	struct connection *conn = cs->conn;
1207 	struct stream_interface *si = cs->data;
1208 	struct channel *ic = si_ic(si);
1209 	int ret, max, cur_read = 0;
1210 	int read_poll = MAX_READ_POLL_LOOPS;
1211 	int flags = 0;
1212 
1213 	/* If not established yet, do nothing. */
1214 	if (si->state != SI_ST_EST)
1215 		return 0;
1216 
1217 	/* If another call to si_cs_recv() failed, and we subscribed to
1218 	 * recv events already, give up now.
1219 	 */
1220 	if (si->wait_event.events & SUB_RETRY_RECV)
1221 		return 0;
1222 
1223 	/* maybe we were called immediately after an asynchronous shutr */
1224 	if (ic->flags & CF_SHUTR)
1225 		return 1;
1226 
1227 	/* we must wait because the mux is not installed yet */
1228 	if (!conn->mux)
1229 		return 0;
1230 
1231 	/* stop here if we reached the end of data */
1232 	if (cs->flags & CS_FL_EOS)
1233 		goto end_recv;
1234 
1235 	/* stop immediately on errors. Note that we DON'T want to stop on
1236 	 * POLL_ERR, as the poller might report a write error while there
1237 	 * are still data available in the recv buffer. This typically
1238 	 * happens when we send too large a request to a backend server
1239 	 * which rejects it before reading it all.
1240 	 */
1241 	if (!(cs->flags & CS_FL_RCV_MORE)) {
1242 		if (!conn_xprt_ready(conn))
1243 			return 0;
1244 		if (conn->flags & CO_FL_ERROR || cs->flags & CS_FL_ERROR)
1245 			goto end_recv;
1246 	}
1247 
1248 	/* prepare to detect if the mux needs more room */
1249 	cs->flags &= ~CS_FL_WANT_ROOM;
1250 
1251 	if ((ic->flags & (CF_STREAMER | CF_STREAMER_FAST)) && !co_data(ic) &&
1252 	    global.tune.idle_timer &&
1253 	    (unsigned short)(now_ms - ic->last_read) >= global.tune.idle_timer) {
1254 		/* The buffer was empty and nothing was transferred for more
1255 		 * than one second. This was caused by a pause and not by
1256 		 * congestion. Reset any streaming mode to reduce latency.
1257 		 */
1258 		ic->xfer_small = 0;
1259 		ic->xfer_large = 0;
1260 		ic->flags &= ~(CF_STREAMER | CF_STREAMER_FAST);
1261 	}
1262 
1263 	/* First, let's see if we may splice data across the channel without
1264 	 * using a buffer.
1265 	 */
1266 	if (cs->flags & CS_FL_MAY_SPLICE &&
1267 	    (ic->pipe || ic->to_forward >= MIN_SPLICE_FORWARD) &&
1268 	    ic->flags & CF_KERN_SPLICING) {
1269 		if (c_data(ic)) {
1270 			/* We're embarrassed, there are already data pending in
1271 			 * the buffer and we don't want to have them at two
1272 			 * locations at a time. Let's indicate we need some
1273 			 * place and ask the consumer to hurry.
1274 			 */
1275 			flags |= CO_RFL_BUF_FLUSH;
1276 			goto abort_splice;
1277 		}
1278 
1279 		if (unlikely(ic->pipe == NULL)) {
1280 			if (pipes_used >= global.maxpipes || !(ic->pipe = get_pipe())) {
1281 				ic->flags &= ~CF_KERN_SPLICING;
1282 				goto abort_splice;
1283 			}
1284 		}
1285 
1286 		ret = conn->mux->rcv_pipe(cs, ic->pipe, ic->to_forward);
1287 		if (ret < 0) {
1288 			/* splice not supported on this end, let's disable it */
1289 			ic->flags &= ~CF_KERN_SPLICING;
1290 			goto abort_splice;
1291 		}
1292 
1293 		if (ret > 0) {
1294 			if (ic->to_forward != CHN_INFINITE_FORWARD)
1295 				ic->to_forward -= ret;
1296 			ic->total += ret;
1297 			cur_read += ret;
1298 			ic->flags |= CF_READ_PARTIAL;
1299 		}
1300 
1301 		if (conn->flags & CO_FL_ERROR || cs->flags & (CS_FL_EOS|CS_FL_ERROR))
1302 			goto end_recv;
1303 
1304 		if (conn->flags & CO_FL_WAIT_ROOM) {
1305 			/* the pipe is full or we have read enough data that it
1306 			 * could soon be full. Let's stop before needing to poll.
1307 			 */
1308 			si_rx_room_blk(si);
1309 			goto done_recv;
1310 		}
1311 
1312 		/* splice not possible (anymore), let's go on on standard copy */
1313 	}
1314 
1315  abort_splice:
1316 	if (ic->pipe && unlikely(!ic->pipe->data)) {
1317 		put_pipe(ic->pipe);
1318 		ic->pipe = NULL;
1319 	}
1320 
1321 	if (ic->pipe && ic->to_forward && !(flags & CO_RFL_BUF_FLUSH) && cs->flags & CS_FL_MAY_SPLICE) {
1322 		/* don't break splicing by reading, but still call rcv_buf()
1323 		 * to pass the flag.
1324 		 */
1325 		goto done_recv;
1326 	}
1327 
1328 	/* now we'll need a input buffer for the stream */
1329 	if (!si_alloc_ibuf(si, &(si_strm(si)->buffer_wait)))
1330 		goto end_recv;
1331 
1332 	/* For an HTX stream, if the buffer is stuck (no output data with some
1333 	 * input data) and if the HTX message is fragmented or if its free space
1334 	 * wraps, we force an HTX deframentation. It is a way to have a
1335 	 * contiguous free space nad to let the mux to copy as much data as
1336 	 * possible.
1337 	 *
1338 	 * NOTE: A possible optim may be to let the mux decides if defrag is
1339 	 *       required or not, depending on amount of data to be xferred.
1340 	 */
1341 	if (IS_HTX_STRM(si_strm(si)) && !co_data(ic)) {
1342 		struct htx *htx = htxbuf(&ic->buf);
1343 
1344 		if (htx_is_not_empty(htx) && ((htx->flags & HTX_FL_FRAGMENTED) || htx_space_wraps(htx)))
1345 			htx_defrag(htxbuf(&ic->buf), NULL, 0);
1346 	}
1347 
1348 	/* Important note : if we're called with POLL_IN|POLL_HUP, it means the read polling
1349 	 * was enabled, which implies that the recv buffer was not full. So we have a guarantee
1350 	 * that if such an event is not handled above in splice, it will be handled here by
1351 	 * recv().
1352 	 */
1353 	while ((cs->flags & CS_FL_RCV_MORE) ||
1354 	    (!(conn->flags & (CO_FL_ERROR | CO_FL_HANDSHAKE)) &&
1355 	       (!(cs->flags & (CS_FL_ERROR|CS_FL_EOS))) && !(ic->flags & CF_SHUTR))) {
1356 		/* <max> may be null. This is the mux responsibility to set
1357 		 * CS_FL_RCV_MORE on the CS if more space is needed.
1358 		 */
1359 		max = channel_recv_max(ic);
1360 		flags |= ((!conn_is_back(conn) && (si_strm(si)->be->options & PR_O_ABRT_CLOSE)) ? CO_RFL_KEEP_RECV : 0);
1361 		ret = cs->conn->mux->rcv_buf(cs, &ic->buf, max, flags | (co_data(ic) ? CO_RFL_BUF_WET : 0));
1362 
1363 		if (cs->flags & CS_FL_WANT_ROOM) {
1364 			si_rx_room_blk(si);
1365 			/* Add READ_PARTIAL because some data are pending but
1366 			 * cannot be xferred to the channel
1367 			 */
1368 			ic->flags |= CF_READ_PARTIAL;
1369 		}
1370 
1371 		if (cs->flags & CS_FL_READ_PARTIAL) {
1372 			if (tick_isset(ic->rex))
1373 				ic->rex = tick_add_ifset(now_ms, ic->rto);
1374 			cs->flags &= ~CS_FL_READ_PARTIAL;
1375 		}
1376 
1377 		if (ret <= 0) {
1378 			/* if we refrained from reading because we asked for a
1379 			 * flush to satisfy rcv_pipe(), we must not subscribe
1380 			 * and instead report that there's not enough room
1381 			 * here to proceed.
1382 			 */
1383 			if (flags & CO_RFL_BUF_FLUSH)
1384 				si_rx_room_blk(si);
1385 			break;
1386 		}
1387 
1388 		/* L7 retries enabled and maximum connection retries not reached */
1389 		if ((si->flags & SI_FL_L7_RETRY) && si->conn_retries) {
1390 			struct htx *htx;
1391 			struct htx_sl *sl;
1392 
1393 			htx = htxbuf(&ic->buf);
1394 			if (htx) {
1395 				sl = http_get_stline(htx);
1396 				if (sl && l7_status_match(si_strm(si)->be,
1397 				    sl->info.res.status)) {
1398 					/* If we got a status for which we would
1399 					 * like to retry the request, empty
1400 					 * the buffer and pretend there's an
1401 					 * error on the channel.
1402 					 */
1403 					ic->flags |= CF_READ_ERROR;
1404 					htx_reset(htx);
1405 					return 1;
1406 				}
1407 			}
1408 			si->flags &= ~SI_FL_L7_RETRY;
1409 		}
1410 		cur_read += ret;
1411 
1412 		/* if we're allowed to directly forward data, we must update ->o */
1413 		if (ic->to_forward && !(ic->flags & (CF_SHUTW|CF_SHUTW_NOW))) {
1414 			unsigned long fwd = ret;
1415 			if (ic->to_forward != CHN_INFINITE_FORWARD) {
1416 				if (fwd > ic->to_forward)
1417 					fwd = ic->to_forward;
1418 				ic->to_forward -= fwd;
1419 			}
1420 			c_adv(ic, fwd);
1421 		}
1422 
1423 		ic->flags |= CF_READ_PARTIAL;
1424 		ic->total += ret;
1425 
1426 		/* End-of-input reached, we can leave. In this case, it is
1427 		 * important to break the loop to not block the SI because of
1428 		 * the channel's policies.This way, we are still able to receive
1429 		 * shutdowns.
1430 		 */
1431 		if (cs->flags & CS_FL_EOI)
1432 			break;
1433 
1434 		if ((ic->flags & CF_READ_DONTWAIT) || --read_poll <= 0) {
1435 			/* we're stopped by the channel's policy */
1436 			si_rx_chan_blk(si);
1437 			break;
1438 		}
1439 
1440 		/* if too many bytes were missing from last read, it means that
1441 		 * it's pointless trying to read again because the system does
1442 		 * not have them in buffers.
1443 		 */
1444 		if (ret < max) {
1445 			/* if a streamer has read few data, it may be because we
1446 			 * have exhausted system buffers. It's not worth trying
1447 			 * again.
1448 			 */
1449 			if (ic->flags & CF_STREAMER) {
1450 				/* we're stopped by the channel's policy */
1451 				si_rx_chan_blk(si);
1452 				break;
1453 			}
1454 
1455 			/* if we read a large block smaller than what we requested,
1456 			 * it's almost certain we'll never get anything more.
1457 			 */
1458 			if (ret >= global.tune.recv_enough) {
1459 				/* we're stopped by the channel's policy */
1460 				si_rx_chan_blk(si);
1461 				break;
1462 			}
1463 		}
1464 
1465 		/* if we are waiting for more space, don't try to read more data
1466 		 * right now.
1467 		 */
1468 		if (si_rx_blocked(si))
1469 			break;
1470 	} /* while !flags */
1471 
1472  done_recv:
1473 	if (cur_read) {
1474 		if ((ic->flags & (CF_STREAMER | CF_STREAMER_FAST)) &&
1475 		    (cur_read <= ic->buf.size / 2)) {
1476 			ic->xfer_large = 0;
1477 			ic->xfer_small++;
1478 			if (ic->xfer_small >= 3) {
1479 				/* we have read less than half of the buffer in
1480 				 * one pass, and this happened at least 3 times.
1481 				 * This is definitely not a streamer.
1482 				 */
1483 				ic->flags &= ~(CF_STREAMER | CF_STREAMER_FAST);
1484 			}
1485 			else if (ic->xfer_small >= 2) {
1486 				/* if the buffer has been at least half full twice,
1487 				 * we receive faster than we send, so at least it
1488 				 * is not a "fast streamer".
1489 				 */
1490 				ic->flags &= ~CF_STREAMER_FAST;
1491 			}
1492 		}
1493 		else if (!(ic->flags & CF_STREAMER_FAST) &&
1494 			 (cur_read >= ic->buf.size - global.tune.maxrewrite)) {
1495 			/* we read a full buffer at once */
1496 			ic->xfer_small = 0;
1497 			ic->xfer_large++;
1498 			if (ic->xfer_large >= 3) {
1499 				/* we call this buffer a fast streamer if it manages
1500 				 * to be filled in one call 3 consecutive times.
1501 				 */
1502 				ic->flags |= (CF_STREAMER | CF_STREAMER_FAST);
1503 			}
1504 		}
1505 		else {
1506 			ic->xfer_small = 0;
1507 			ic->xfer_large = 0;
1508 		}
1509 		ic->last_read = now_ms;
1510 	}
1511 
1512  end_recv:
1513 	ret = (cur_read != 0);
1514 
1515 	/* Report EOI on the channel if it was reached from the mux point of
1516 	 * view. */
1517 	if ((cs->flags & CS_FL_EOI) && !(ic->flags & CF_EOI)) {
1518 		ic->flags |= (CF_EOI|CF_READ_PARTIAL);
1519 		ret = 1;
1520 	}
1521 
1522 	if (conn->flags & CO_FL_ERROR || cs->flags & CS_FL_ERROR) {
1523 		cs->flags |= CS_FL_ERROR;
1524 		si->flags |= SI_FL_ERR;
1525 		ret = 1;
1526 	}
1527 	else if (cs->flags & CS_FL_EOS) {
1528 		/* we received a shutdown */
1529 		ic->flags |= CF_READ_NULL;
1530 		if (ic->flags & CF_AUTO_CLOSE)
1531 			channel_shutw_now(ic);
1532 		stream_int_read0(si);
1533 		ret = 1;
1534 	}
1535 	else if (!si_rx_blocked(si)) {
1536 		/* Subscribe to receive events if we're blocking on I/O */
1537 		conn->mux->subscribe(cs, SUB_RETRY_RECV, &si->wait_event);
1538 		si_rx_endp_done(si);
1539 	} else {
1540 		si_rx_endp_more(si);
1541 		ret = 1;
1542 	}
1543 	return ret;
1544 }
1545 
1546 /*
1547  * This function propagates a null read received on a socket-based connection.
1548  * It updates the stream interface. If the stream interface has SI_FL_NOHALF,
1549  * the close is also forwarded to the write side as an abort.
1550  */
stream_int_read0(struct stream_interface * si)1551 static void stream_int_read0(struct stream_interface *si)
1552 {
1553 	struct conn_stream *cs = __objt_cs(si->end);
1554 	struct channel *ic = si_ic(si);
1555 	struct channel *oc = si_oc(si);
1556 
1557 	si_rx_shut_blk(si);
1558 	if (ic->flags & CF_SHUTR)
1559 		return;
1560 	ic->flags |= CF_SHUTR;
1561 	ic->rex = TICK_ETERNITY;
1562 
1563 	if (!si_state_in(si->state, SI_SB_CON|SI_SB_RDY|SI_SB_EST))
1564 		return;
1565 
1566 	if (oc->flags & CF_SHUTW)
1567 		goto do_close;
1568 
1569 	if (si->flags & SI_FL_NOHALF) {
1570 		/* we want to immediately forward this close to the write side */
1571 		/* force flag on ssl to keep stream in cache */
1572 		cs_shutw(cs, CS_SHW_SILENT);
1573 		goto do_close;
1574 	}
1575 
1576 	/* otherwise that's just a normal read shutdown */
1577 	return;
1578 
1579  do_close:
1580 	/* OK we completely close the socket here just as if we went through si_shut[rw]() */
1581 	cs_close(cs);
1582 
1583 	oc->flags &= ~CF_SHUTW_NOW;
1584 	oc->flags |= CF_SHUTW;
1585 	oc->wex = TICK_ETERNITY;
1586 
1587 	si_done_get(si);
1588 
1589 	si->state = SI_ST_DIS;
1590 	si->exp = TICK_ETERNITY;
1591 	return;
1592 }
1593 
1594 /* Callback to be used by applet handlers upon completion. It updates the stream
1595  * (which may or may not take this opportunity to try to forward data), then
1596  * may re-enable the applet's based on the channels and stream interface's final
1597  * states.
1598  */
si_applet_wake_cb(struct stream_interface * si)1599 void si_applet_wake_cb(struct stream_interface *si)
1600 {
1601 	struct channel *ic = si_ic(si);
1602 
1603 	/* If the applet wants to write and the channel is closed, it's a
1604 	 * broken pipe and it must be reported.
1605 	 */
1606 	if (!(si->flags & SI_FL_RX_WAIT_EP) && (ic->flags & CF_SHUTR))
1607 		si->flags |= SI_FL_ERR;
1608 
1609 	/* automatically mark the applet having data available if it reported
1610 	 * begin blocked by the channel.
1611 	 */
1612 	if (si_rx_blocked(si))
1613 		si_rx_endp_more(si);
1614 
1615 	/* update the stream-int, channels, and possibly wake the stream up */
1616 	stream_int_notify(si);
1617 	stream_release_buffers(si_strm(si));
1618 
1619 	/* stream_int_notify may have passed through chk_snd and released some
1620 	 * RXBLK flags. Process_stream will consider those flags to wake up the
1621 	 * appctx but in the case the task is not in runqueue we may have to
1622 	 * wakeup the appctx immediately.
1623 	 */
1624 	if ((si_rx_endp_ready(si) && !si_rx_blocked(si)) ||
1625 	    (si_tx_endp_ready(si) && !si_tx_blocked(si)))
1626 		appctx_wakeup(si_appctx(si));
1627 }
1628 
1629 /*
1630  * This function performs a shutdown-read on a stream interface attached to an
1631  * applet in a connected or init state (it does nothing for other states). It
1632  * either shuts the read side or marks itself as closed. The buffer flags are
1633  * updated to reflect the new state. If the stream interface has SI_FL_NOHALF,
1634  * we also forward the close to the write side. The owner task is woken up if
1635  * it exists.
1636  */
stream_int_shutr_applet(struct stream_interface * si)1637 static void stream_int_shutr_applet(struct stream_interface *si)
1638 {
1639 	struct channel *ic = si_ic(si);
1640 
1641 	si_rx_shut_blk(si);
1642 	if (ic->flags & CF_SHUTR)
1643 		return;
1644 	ic->flags |= CF_SHUTR;
1645 	ic->rex = TICK_ETERNITY;
1646 
1647 	/* Note: on shutr, we don't call the applet */
1648 
1649 	if (!si_state_in(si->state, SI_SB_CON|SI_SB_RDY|SI_SB_EST))
1650 		return;
1651 
1652 	if (si_oc(si)->flags & CF_SHUTW) {
1653 		si_applet_release(si);
1654 		si->state = SI_ST_DIS;
1655 		si->exp = TICK_ETERNITY;
1656 	}
1657 	else if (si->flags & SI_FL_NOHALF) {
1658 		/* we want to immediately forward this close to the write side */
1659 		return stream_int_shutw_applet(si);
1660 	}
1661 }
1662 
1663 /*
1664  * This function performs a shutdown-write on a stream interface attached to an
1665  * applet in a connected or init state (it does nothing for other states). It
1666  * either shuts the write side or marks itself as closed. The buffer flags are
1667  * updated to reflect the new state. It does also close everything if the SI
1668  * was marked as being in error state. The owner task is woken up if it exists.
1669  */
stream_int_shutw_applet(struct stream_interface * si)1670 static void stream_int_shutw_applet(struct stream_interface *si)
1671 {
1672 	struct channel *ic = si_ic(si);
1673 	struct channel *oc = si_oc(si);
1674 
1675 	oc->flags &= ~CF_SHUTW_NOW;
1676 	if (oc->flags & CF_SHUTW)
1677 		return;
1678 	oc->flags |= CF_SHUTW;
1679 	oc->wex = TICK_ETERNITY;
1680 	si_done_get(si);
1681 
1682 	if (tick_isset(si->hcto)) {
1683 		ic->rto = si->hcto;
1684 		ic->rex = tick_add(now_ms, ic->rto);
1685 	}
1686 
1687 	/* on shutw we always wake the applet up */
1688 	appctx_wakeup(si_appctx(si));
1689 
1690 	switch (si->state) {
1691 	case SI_ST_RDY:
1692 	case SI_ST_EST:
1693 		/* we have to shut before closing, otherwise some short messages
1694 		 * may never leave the system, especially when there are remaining
1695 		 * unread data in the socket input buffer, or when nolinger is set.
1696 		 * However, if SI_FL_NOLINGER is explicitly set, we know there is
1697 		 * no risk so we close both sides immediately.
1698 		 */
1699 		if (!(si->flags & (SI_FL_ERR | SI_FL_NOLINGER)) &&
1700 		    !(ic->flags & (CF_SHUTR|CF_DONT_READ)))
1701 			return;
1702 
1703 		/* fall through */
1704 	case SI_ST_CON:
1705 	case SI_ST_CER:
1706 	case SI_ST_QUE:
1707 	case SI_ST_TAR:
1708 		/* Note that none of these states may happen with applets */
1709 		si_applet_release(si);
1710 		si->state = SI_ST_DIS;
1711 		/* fall through */
1712 	default:
1713 		si->flags &= ~SI_FL_NOLINGER;
1714 		si_rx_shut_blk(si);
1715 		ic->flags |= CF_SHUTR;
1716 		ic->rex = TICK_ETERNITY;
1717 		si->exp = TICK_ETERNITY;
1718 	}
1719 }
1720 
1721 /* chk_rcv function for applets */
stream_int_chk_rcv_applet(struct stream_interface * si)1722 static void stream_int_chk_rcv_applet(struct stream_interface *si)
1723 {
1724 	struct channel *ic = si_ic(si);
1725 
1726 	DPRINTF(stderr, "%s: si=%p, si->state=%d ic->flags=%08x oc->flags=%08x\n",
1727 		__FUNCTION__,
1728 		si, si->state, ic->flags, si_oc(si)->flags);
1729 
1730 	if (!ic->pipe) {
1731 		/* (re)start reading */
1732 		appctx_wakeup(si_appctx(si));
1733 	}
1734 }
1735 
1736 /* chk_snd function for applets */
stream_int_chk_snd_applet(struct stream_interface * si)1737 static void stream_int_chk_snd_applet(struct stream_interface *si)
1738 {
1739 	struct channel *oc = si_oc(si);
1740 
1741 	DPRINTF(stderr, "%s: si=%p, si->state=%d ic->flags=%08x oc->flags=%08x\n",
1742 		__FUNCTION__,
1743 		si, si->state, si_ic(si)->flags, oc->flags);
1744 
1745 	if (unlikely(si->state != SI_ST_EST || (oc->flags & CF_SHUTW)))
1746 		return;
1747 
1748 	/* we only wake the applet up if it was waiting for some data */
1749 
1750 	if (!(si->flags & SI_FL_WAIT_DATA))
1751 		return;
1752 
1753 	if (!tick_isset(oc->wex))
1754 		oc->wex = tick_add_ifset(now_ms, oc->wto);
1755 
1756 	if (!channel_is_empty(oc)) {
1757 		/* (re)start sending */
1758 		appctx_wakeup(si_appctx(si));
1759 	}
1760 }
1761 
1762 /*
1763  * Local variables:
1764  *  c-indent-level: 8
1765  *  c-basic-offset: 8
1766  * End:
1767  */
1768