1 /*
2 * Stream management functions.
3 *
4 * Copyright 2000-2012 Willy Tarreau <w@1wt.eu>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13 #include <stdlib.h>
14 #include <unistd.h>
15 #include <fcntl.h>
16
17 #include <common/cfgparse.h>
18 #include <common/config.h>
19 #include <common/buffer.h>
20 #include <common/debug.h>
21 #include <common/hathreads.h>
22 #include <common/htx.h>
23 #include <common/initcall.h>
24 #include <common/memory.h>
25
26 #include <types/applet.h>
27 #include <types/capture.h>
28 #include <types/cli.h>
29 #include <types/filters.h>
30 #include <types/global.h>
31 #include <types/stats.h>
32
33 #include <proto/acl.h>
34 #include <proto/action.h>
35 #include <proto/activity.h>
36 #include <proto/arg.h>
37 #include <proto/backend.h>
38 #include <proto/channel.h>
39 #include <proto/checks.h>
40 #include <proto/cli.h>
41 #include <proto/connection.h>
42 #include <proto/dict.h>
43 #include <proto/dns.h>
44 #include <proto/stats.h>
45 #include <proto/fd.h>
46 #include <proto/filters.h>
47 #include <proto/freq_ctr.h>
48 #include <proto/frontend.h>
49 #include <proto/hlua.h>
50 #include <proto/http_rules.h>
51 #include <proto/listener.h>
52 #include <proto/log.h>
53 #include <proto/raw_sock.h>
54 #include <proto/session.h>
55 #include <proto/stream.h>
56 #include <proto/pipe.h>
57 #include <proto/http_ana.h>
58 #include <proto/proxy.h>
59 #include <proto/queue.h>
60 #include <proto/server.h>
61 #include <proto/sample.h>
62 #include <proto/stick_table.h>
63 #include <proto/stream_interface.h>
64 #include <proto/task.h>
65 #include <proto/tcp_rules.h>
66 #include <proto/vars.h>
67
68 DECLARE_POOL(pool_head_stream, "stream", sizeof(struct stream));
69
70 struct list streams = LIST_HEAD_INIT(streams);
71 __decl_spinlock(streams_lock);
72
73 /* List of all use-service keywords. */
74 static struct list service_keywords = LIST_HEAD_INIT(service_keywords);
75
76
77 /* trace source and events */
78 static void strm_trace(enum trace_level level, uint64_t mask,
79 const struct trace_source *src,
80 const struct ist where, const struct ist func,
81 const void *a1, const void *a2, const void *a3, const void *a4);
82
83 /* The event representation is split like this :
84 * strm - stream
85 * si - stream interface
86 * http - http analyzis
87 * tcp - tcp analyzis
88 *
89 * STRM_EV_* macros are defined in <proto/stream.h>
90 */
91 static const struct trace_event strm_trace_events[] = {
92 { .mask = STRM_EV_STRM_NEW, .name = "strm_new", .desc = "new stream" },
93 { .mask = STRM_EV_STRM_FREE, .name = "strm_free", .desc = "release stream" },
94 { .mask = STRM_EV_STRM_ERR, .name = "strm_err", .desc = "error during stream processing" },
95 { .mask = STRM_EV_STRM_ANA, .name = "strm_ana", .desc = "stream analyzers" },
96 { .mask = STRM_EV_STRM_PROC, .name = "strm_proc", .desc = "stream processing" },
97
98 { .mask = STRM_EV_SI_ST, .name = "si_state", .desc = "processing stream-interface states" },
99
100 { .mask = STRM_EV_HTTP_ANA, .name = "http_ana", .desc = "HTTP analyzers" },
101 { .mask = STRM_EV_HTTP_ERR, .name = "http_err", .desc = "error during HTTP analyzis" },
102
103 { .mask = STRM_EV_TCP_ANA, .name = "tcp_ana", .desc = "TCP analyzers" },
104 { .mask = STRM_EV_TCP_ERR, .name = "tcp_err", .desc = "error during TCP analyzis" },
105 {}
106 };
107
108 static const struct name_desc strm_trace_lockon_args[4] = {
109 /* arg1 */ { /* already used by the stream */ },
110 /* arg2 */ { },
111 /* arg3 */ { },
112 /* arg4 */ { }
113 };
114
115 static const struct name_desc strm_trace_decoding[] = {
116 #define STRM_VERB_CLEAN 1
117 { .name="clean", .desc="only user-friendly stuff, generally suitable for level \"user\"" },
118 #define STRM_VERB_MINIMAL 2
119 { .name="minimal", .desc="report info on stream and stream-interfaces" },
120 #define STRM_VERB_SIMPLE 3
121 { .name="simple", .desc="add info on request and response channels" },
122 #define STRM_VERB_ADVANCED 4
123 { .name="advanced", .desc="add info on channel's buffer for data and developer levels only" },
124 #define STRM_VERB_COMPLETE 5
125 { .name="complete", .desc="add info on channel's buffer" },
126 { /* end */ }
127 };
128
129 struct trace_source trace_strm = {
130 .name = IST("stream"),
131 .desc = "Applicative stream",
132 .arg_def = TRC_ARG1_STRM, // TRACE()'s first argument is always a stream
133 .default_cb = strm_trace,
134 .known_events = strm_trace_events,
135 .lockon_args = strm_trace_lockon_args,
136 .decoding = strm_trace_decoding,
137 .report_events = ~0, // report everything by default
138 };
139
140 #define TRACE_SOURCE &trace_strm
141 INITCALL1(STG_REGISTER, trace_register_source, TRACE_SOURCE);
142
143 /* the stream traces always expect that arg1, if non-null, is of a stream (from
144 * which we can derive everything), that arg2, if non-null, is an http
145 * transaction, that arg3, if non-null, is an http message.
146 */
strm_trace(enum trace_level level,uint64_t mask,const struct trace_source * src,const struct ist where,const struct ist func,const void * a1,const void * a2,const void * a3,const void * a4)147 static void strm_trace(enum trace_level level, uint64_t mask, const struct trace_source *src,
148 const struct ist where, const struct ist func,
149 const void *a1, const void *a2, const void *a3, const void *a4)
150 {
151 const struct stream *s = a1;
152 const struct http_txn *txn = a2;
153 const struct http_msg *msg = a3;
154 struct task *task;
155 const struct stream_interface *si_f, *si_b;
156 const struct channel *req, *res;
157 struct htx *htx;
158
159 if (!s || src->verbosity < STRM_VERB_CLEAN)
160 return;
161
162 task = s->task;
163 si_f = &s->si[0];
164 si_b = &s->si[1];
165 req = &s->req;
166 res = &s->res;
167 htx = (msg ? htxbuf(&msg->chn->buf) : NULL);
168
169 /* General info about the stream (htx/tcp, id...) */
170 chunk_appendf(&trace_buf, " : [%u,%s]",
171 s->uniq_id, ((s->flags & SF_HTX) ? "HTX" : "TCP"));
172 if (s->unique_id)
173 chunk_appendf(&trace_buf, " id=%s", s->unique_id);
174
175 /* Front and back stream-int state */
176 chunk_appendf(&trace_buf, " SI=(%s,%s)",
177 si_state_str(si_f->state), si_state_str(si_b->state));
178
179 /* If txn is defined, HTTP req/rep states */
180 if (txn)
181 chunk_appendf(&trace_buf, " HTTP=(%s,%s)",
182 h1_msg_state_str(txn->req.msg_state), h1_msg_state_str(txn->rsp.msg_state));
183 if (msg)
184 chunk_appendf(&trace_buf, " %s", ((msg->chn->flags & CF_ISRESP) ? "RESPONSE" : "REQUEST"));
185
186 if (src->verbosity == STRM_VERB_CLEAN)
187 return;
188
189 /* If msg defined, display status-line if possible (verbosity > MINIMAL) */
190 if (src->verbosity > STRM_VERB_MINIMAL && htx && htx_nbblks(htx)) {
191 const struct htx_blk *blk = htx_get_head_blk(htx);
192 const struct htx_sl *sl = htx_get_blk_ptr(htx, blk);
193 enum htx_blk_type type = htx_get_blk_type(blk);
194
195 if (type == HTX_BLK_REQ_SL || type == HTX_BLK_RES_SL)
196 chunk_appendf(&trace_buf, " - \"%.*s %.*s %.*s\"",
197 HTX_SL_P1_LEN(sl), HTX_SL_P1_PTR(sl),
198 HTX_SL_P2_LEN(sl), HTX_SL_P2_PTR(sl),
199 HTX_SL_P3_LEN(sl), HTX_SL_P3_PTR(sl));
200 }
201
202
203 /* If txn defined info about HTTP msgs, otherwise info about SI. */
204 if (txn) {
205 chunk_appendf(&trace_buf, " - t=%p s=(%p,0x%08x) txn.flags=0x%08x, http.flags=(0x%08x,0x%08x) status=%d",
206 task, s, s->flags, txn->flags, txn->req.flags, txn->rsp.flags, txn->status);
207 }
208 else {
209 chunk_appendf(&trace_buf, " - t=%p s=(%p,0x%08x) si_f=(%p,0x%08x,0x%x) si_b=(%p,0x%08x,0x%x) retries=%d",
210 task, s, s->flags, si_f, si_f->flags, si_f->err_type,
211 si_b, si_b->flags, si_b->err_type, si_b->conn_retries);
212 }
213
214 if (src->verbosity == STRM_VERB_MINIMAL)
215 return;
216
217
218 /* If txn defined, don't display all channel info */
219 if (src->verbosity == STRM_VERB_SIMPLE || txn) {
220 chunk_appendf(&trace_buf, " req=(%p .fl=0x%08x .exp(r,w,a)=(%u,%u,%u))",
221 req, req->flags, req->rex, req->wex, req->analyse_exp);
222 chunk_appendf(&trace_buf, " res=(%p .fl=0x%08x .exp(r,w,a)=(%u,%u,%u))",
223 res, res->flags, res->rex, res->wex, res->analyse_exp);
224 }
225 else {
226 chunk_appendf(&trace_buf, " req=(%p .fl=0x%08x .ana=0x%08x .exp(r,w,a)=(%u,%u,%u) .o=%lu .tot=%llu .to_fwd=%u)",
227 req, req->flags, req->analysers, req->rex, req->wex, req->analyse_exp,
228 (long)req->output, req->total, req->to_forward);
229 chunk_appendf(&trace_buf, " res=(%p .fl=0x%08x .ana=0x%08x .exp(r,w,a)=(%u,%u,%u) .o=%lu .tot=%llu .to_fwd=%u)",
230 res, res->flags, res->analysers, res->rex, res->wex, res->analyse_exp,
231 (long)res->output, res->total, res->to_forward);
232 }
233
234 if (src->verbosity == STRM_VERB_SIMPLE ||
235 (src->verbosity == STRM_VERB_ADVANCED && src->level < TRACE_LEVEL_DATA))
236 return;
237
238 /* channels' buffer info */
239 if (s->flags & SF_HTX) {
240 struct htx *rqhtx = htxbuf(&req->buf);
241 struct htx *rphtx = htxbuf(&res->buf);
242
243 chunk_appendf(&trace_buf, " htx=(%u/%u#%u, %u/%u#%u)",
244 rqhtx->data, rqhtx->size, htx_nbblks(rqhtx),
245 rphtx->data, rphtx->size, htx_nbblks(rphtx));
246 }
247 else {
248 chunk_appendf(&trace_buf, " buf=(%u@%p+%u/%u, %u@%p+%u/%u)",
249 (unsigned int)b_data(&req->buf), b_orig(&req->buf),
250 (unsigned int)b_head_ofs(&req->buf), (unsigned int)b_size(&req->buf),
251 (unsigned int)b_data(&req->buf), b_orig(&req->buf),
252 (unsigned int)b_head_ofs(&req->buf), (unsigned int)b_size(&req->buf));
253 }
254
255 /* If msg defined, display htx info if defined (level > USER) */
256 if (src->level > TRACE_LEVEL_USER && htx && htx_nbblks(htx)) {
257 int full = 0;
258
259 /* Full htx info (level > STATE && verbosity > SIMPLE) */
260 if (src->level > TRACE_LEVEL_STATE) {
261 if (src->verbosity == STRM_VERB_COMPLETE)
262 full = 1;
263 }
264
265 chunk_memcat(&trace_buf, "\n\t", 2);
266 htx_dump(&trace_buf, htx, full);
267 }
268 }
269
270 /* Create a new stream for connection <conn>. Return < 0 on error. This is only
271 * valid right after the handshake, before the connection's data layer is
272 * initialized, because it relies on the session to be in conn->owner.
273 */
stream_create_from_cs(struct conn_stream * cs)274 int stream_create_from_cs(struct conn_stream *cs)
275 {
276 struct stream *strm;
277
278 strm = stream_new(cs->conn->owner, &cs->obj_type);
279 if (strm == NULL)
280 return -1;
281
282 task_wakeup(strm->task, TASK_WOKEN_INIT);
283 return 0;
284 }
285
286 /* Callback used to wake up a stream when an input buffer is available. The
287 * stream <s>'s stream interfaces are checked for a failed buffer allocation
288 * as indicated by the presence of the SI_FL_RXBLK_ROOM flag and the lack of a
289 * buffer, and and input buffer is assigned there (at most one). The function
290 * returns 1 and wakes the stream up if a buffer was taken, otherwise zero.
291 * It's designed to be called from __offer_buffer().
292 */
stream_buf_available(void * arg)293 int stream_buf_available(void *arg)
294 {
295 struct stream *s = arg;
296
297 if (!s->req.buf.size && !s->req.pipe && (s->si[0].flags & SI_FL_RXBLK_BUFF) &&
298 b_alloc_margin(&s->req.buf, global.tune.reserved_bufs))
299 si_rx_buff_rdy(&s->si[0]);
300 else if (!s->res.buf.size && !s->res.pipe && (s->si[1].flags & SI_FL_RXBLK_BUFF) &&
301 b_alloc_margin(&s->res.buf, 0))
302 si_rx_buff_rdy(&s->si[1]);
303 else
304 return 0;
305
306 task_wakeup(s->task, TASK_WOKEN_RES);
307 return 1;
308
309 }
310
311 /* This function is called from the session handler which detects the end of
312 * handshake, in order to complete initialization of a valid stream. It must be
313 * called with a completely initialized session. It returns the pointer to
314 * the newly created stream, or NULL in case of fatal error. The client-facing
315 * end point is assigned to <origin>, which must be valid. The stream's task
316 * is configured with a nice value inherited from the listener's nice if any.
317 * The task's context is set to the new stream, and its function is set to
318 * process_stream(). Target and analysers are null.
319 */
stream_new(struct session * sess,enum obj_type * origin)320 struct stream *stream_new(struct session *sess, enum obj_type *origin)
321 {
322 struct stream *s;
323 struct task *t;
324 struct conn_stream *cs = objt_cs(origin);
325 struct appctx *appctx = objt_appctx(origin);
326 const struct cs_info *csinfo;
327
328 DBG_TRACE_ENTER(STRM_EV_STRM_NEW);
329 if (unlikely((s = pool_alloc(pool_head_stream)) == NULL))
330 goto out_fail_alloc;
331
332 /* minimum stream initialization required for an embryonic stream is
333 * fairly low. We need very little to execute L4 ACLs, then we need a
334 * task to make the client-side connection live on its own.
335 * - flags
336 * - stick-entry tracking
337 */
338 s->flags = 0;
339 s->logs.logwait = sess->fe->to_log;
340 s->logs.level = 0;
341 tv_zero(&s->logs.tv_request);
342 s->logs.t_queue = -1;
343 s->logs.t_connect = -1;
344 s->logs.t_data = -1;
345 s->logs.t_close = 0;
346 s->logs.bytes_in = s->logs.bytes_out = 0;
347 s->logs.prx_queue_pos = 0; /* we get the number of pending conns before us */
348 s->logs.srv_queue_pos = 0; /* we will get this number soon */
349 s->obj_type = OBJ_TYPE_STREAM;
350
351 csinfo = si_get_cs_info(cs);
352 if (csinfo) {
353 s->logs.accept_date = csinfo->create_date;
354 s->logs.tv_accept = csinfo->tv_create;
355 s->logs.t_handshake = csinfo->t_handshake;
356 s->logs.t_idle = csinfo->t_idle;
357 }
358 else {
359 s->logs.accept_date = sess->accept_date;
360 s->logs.tv_accept = sess->tv_accept;
361 s->logs.t_handshake = sess->t_handshake;
362 s->logs.t_idle = -1;
363 }
364
365 /* default logging function */
366 s->do_log = strm_log;
367
368 /* default error reporting function, may be changed by analysers */
369 s->srv_error = default_srv_error;
370
371 /* Initialise the current rule list pointer to NULL. We are sure that
372 * any rulelist match the NULL pointer.
373 */
374 s->current_rule_list = NULL;
375 s->current_rule = NULL;
376
377 /* Copy SC counters for the stream. We don't touch refcounts because
378 * any reference we have is inherited from the session. Since the stream
379 * doesn't exist without the session, the session's existence guarantees
380 * we don't lose the entry. During the store operation, the stream won't
381 * touch these ones.
382 */
383 memcpy(s->stkctr, sess->stkctr, sizeof(s->stkctr));
384
385 s->sess = sess;
386 s->si[0].flags = SI_FL_NONE;
387 s->si[1].flags = SI_FL_ISBACK;
388
389 s->uniq_id = _HA_ATOMIC_XADD(&global.req_count, 1);
390
391 /* OK, we're keeping the stream, so let's properly initialize the stream */
392 LIST_INIT(&s->back_refs);
393
394 LIST_INIT(&s->buffer_wait.list);
395 s->buffer_wait.target = s;
396 s->buffer_wait.wakeup_cb = stream_buf_available;
397
398 s->call_rate.curr_sec = s->call_rate.curr_ctr = s->call_rate.prev_ctr = 0;
399 s->pcli_next_pid = 0;
400 s->pcli_flags = 0;
401 s->unique_id = NULL;
402
403 if ((t = task_new(tid_bit)) == NULL)
404 goto out_fail_alloc;
405
406 s->task = t;
407 s->pending_events = 0;
408 t->process = process_stream;
409 t->context = s;
410 t->expire = TICK_ETERNITY;
411 if (sess->listener)
412 t->nice = sess->listener->nice;
413
414 /* Note: initially, the stream's backend points to the frontend.
415 * This changes later when switching rules are executed or
416 * when the default backend is assigned.
417 */
418 s->be = sess->fe;
419 s->req.buf = BUF_NULL;
420 s->res.buf = BUF_NULL;
421 s->req_cap = NULL;
422 s->res_cap = NULL;
423
424 /* Initialise all the variables contexts even if not used.
425 * This permits to prune these contexts without errors.
426 */
427 vars_init(&s->vars_txn, SCOPE_TXN);
428 vars_init(&s->vars_reqres, SCOPE_REQ);
429
430 /* this part should be common with other protocols */
431 if (si_reset(&s->si[0]) < 0)
432 goto out_fail_alloc;
433 si_set_state(&s->si[0], SI_ST_EST);
434 s->si[0].hcto = sess->fe->timeout.clientfin;
435
436 if (cs && cs->conn->mux) {
437 if (cs->conn->mux->flags & MX_FL_CLEAN_ABRT)
438 s->si[0].flags |= SI_FL_CLEAN_ABRT;
439 if (cs->conn->mux->flags & MX_FL_HTX)
440 s->flags |= SF_HTX;
441 }
442 /* Set SF_HTX flag for HTTP frontends. */
443 if (sess->fe->mode == PR_MODE_HTTP)
444 s->flags |= SF_HTX;
445
446 /* attach the incoming connection to the stream interface now. */
447 if (cs)
448 si_attach_cs(&s->si[0], cs);
449 else if (appctx)
450 si_attach_appctx(&s->si[0], appctx);
451
452 if (likely(sess->fe->options2 & PR_O2_INDEPSTR))
453 s->si[0].flags |= SI_FL_INDEP_STR;
454
455 /* pre-initialize the other side's stream interface to an INIT state. The
456 * callbacks will be initialized before attempting to connect.
457 */
458 if (si_reset(&s->si[1]) < 0)
459 goto out_fail_alloc_si1;
460 s->si[1].hcto = TICK_ETERNITY;
461
462 if (likely(sess->fe->options2 & PR_O2_INDEPSTR))
463 s->si[1].flags |= SI_FL_INDEP_STR;
464
465 stream_init_srv_conn(s);
466 s->target = sess->listener ? sess->listener->default_target : NULL;
467 s->target_addr = NULL;
468
469 s->pend_pos = NULL;
470 s->priority_class = 0;
471 s->priority_offset = 0;
472
473 /* init store persistence */
474 s->store_count = 0;
475
476 channel_init(&s->req);
477 s->req.flags |= CF_READ_ATTACHED; /* the producer is already connected */
478 s->req.analysers = sess->listener ? sess->listener->analysers : 0;
479
480 if (!sess->fe->fe_req_ana) {
481 channel_auto_connect(&s->req); /* don't wait to establish connection */
482 channel_auto_close(&s->req); /* let the producer forward close requests */
483 }
484
485 s->req.rto = sess->fe->timeout.client;
486 s->req.wto = TICK_ETERNITY;
487 s->req.rex = TICK_ETERNITY;
488 s->req.wex = TICK_ETERNITY;
489 s->req.analyse_exp = TICK_ETERNITY;
490
491 channel_init(&s->res);
492 s->res.flags |= CF_ISRESP;
493 s->res.analysers = 0;
494
495 if (sess->fe->options2 & PR_O2_NODELAY) {
496 s->req.flags |= CF_NEVER_WAIT;
497 s->res.flags |= CF_NEVER_WAIT;
498 }
499
500 s->res.wto = sess->fe->timeout.client;
501 s->res.rto = TICK_ETERNITY;
502 s->res.rex = TICK_ETERNITY;
503 s->res.wex = TICK_ETERNITY;
504 s->res.analyse_exp = TICK_ETERNITY;
505
506 s->txn = NULL;
507 s->hlua = NULL;
508
509 s->dns_ctx.dns_requester = NULL;
510 s->dns_ctx.hostname_dn = NULL;
511 s->dns_ctx.hostname_dn_len = 0;
512 s->dns_ctx.parent = NULL;
513
514 HA_SPIN_LOCK(STRMS_LOCK, &streams_lock);
515 LIST_ADDQ(&streams, &s->list);
516 HA_SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
517
518 if (flt_stream_init(s) < 0 || flt_stream_start(s) < 0)
519 goto out_fail_accept;
520
521 s->si[1].l7_buffer = BUF_NULL;
522 /* finish initialization of the accepted file descriptor */
523 if (appctx)
524 si_want_get(&s->si[0]);
525
526 if (sess->fe->accept && sess->fe->accept(s) < 0)
527 goto out_fail_accept;
528
529 /* it is important not to call the wakeup function directly but to
530 * pass through task_wakeup(), because this one knows how to apply
531 * priorities to tasks. Using multi thread we must be sure that
532 * stream is fully initialized before calling task_wakeup. So
533 * the caller must handle the task_wakeup
534 */
535 DBG_TRACE_LEAVE(STRM_EV_STRM_NEW, s);
536 return s;
537
538 /* Error unrolling */
539 out_fail_accept:
540 flt_stream_release(s, 0);
541 task_destroy(t);
542 tasklet_free(s->si[1].wait_event.tasklet);
543 LIST_DEL(&s->list);
544 out_fail_alloc_si1:
545 tasklet_free(s->si[0].wait_event.tasklet);
546 out_fail_alloc:
547 pool_free(pool_head_stream, s);
548 DBG_TRACE_DEVEL("leaving on error", STRM_EV_STRM_NEW|STRM_EV_STRM_ERR);
549 return NULL;
550 }
551
552 /*
553 * frees the context associated to a stream. It must have been removed first.
554 */
stream_free(struct stream * s)555 static void stream_free(struct stream *s)
556 {
557 struct session *sess = strm_sess(s);
558 struct proxy *fe = sess->fe;
559 struct bref *bref, *back;
560 struct conn_stream *cli_cs = objt_cs(s->si[0].end);
561 int must_free_sess;
562 int i;
563
564 DBG_TRACE_POINT(STRM_EV_STRM_FREE, s);
565
566 /* detach the stream from its own task before even releasing it so
567 * that walking over a task list never exhibits a dying stream.
568 */
569 s->task->context = NULL;
570 __ha_barrier_store();
571
572 pendconn_free(s);
573
574 if (objt_server(s->target)) { /* there may be requests left pending in queue */
575 if (s->flags & SF_CURR_SESS) {
576 s->flags &= ~SF_CURR_SESS;
577 _HA_ATOMIC_SUB(&__objt_server(s->target)->cur_sess, 1);
578 }
579 if (may_dequeue_tasks(objt_server(s->target), s->be))
580 process_srv_queue(objt_server(s->target));
581 }
582
583 if (unlikely(s->srv_conn)) {
584 /* the stream still has a reserved slot on a server, but
585 * it should normally be only the same as the one above,
586 * so this should not happen in fact.
587 */
588 sess_change_server(s, NULL);
589 }
590
591 if (s->req.pipe)
592 put_pipe(s->req.pipe);
593
594 if (s->res.pipe)
595 put_pipe(s->res.pipe);
596
597 /* We may still be present in the buffer wait queue */
598 if (!LIST_ISEMPTY(&s->buffer_wait.list)) {
599 HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
600 LIST_DEL(&s->buffer_wait.list);
601 LIST_INIT(&s->buffer_wait.list);
602 HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
603 }
604 if (s->req.buf.size || s->res.buf.size) {
605 b_free(&s->req.buf);
606 b_free(&s->res.buf);
607 offer_buffers(NULL, tasks_run_queue);
608 }
609
610 pool_free(pool_head_uniqueid, s->unique_id);
611 s->unique_id = NULL;
612
613 hlua_ctx_destroy(s->hlua);
614 s->hlua = NULL;
615 if (s->txn)
616 http_end_txn(s);
617
618 /* ensure the client-side transport layer is destroyed */
619 if (cli_cs)
620 cs_close(cli_cs);
621
622 for (i = 0; i < s->store_count; i++) {
623 if (!s->store[i].ts)
624 continue;
625 stksess_free(s->store[i].table, s->store[i].ts);
626 s->store[i].ts = NULL;
627 }
628
629 if (s->txn) {
630 pool_free(pool_head_http_txn, s->txn);
631 s->txn = NULL;
632 }
633
634 if (s->dns_ctx.dns_requester) {
635 __decl_hathreads(struct dns_resolvers *resolvers = s->dns_ctx.parent->arg.dns.resolvers);
636
637 HA_SPIN_LOCK(DNS_LOCK, &resolvers->lock);
638 free(s->dns_ctx.hostname_dn); s->dns_ctx.hostname_dn = NULL;
639 s->dns_ctx.hostname_dn_len = 0;
640 dns_unlink_resolution(s->dns_ctx.dns_requester);
641 HA_SPIN_UNLOCK(DNS_LOCK, &resolvers->lock);
642
643 pool_free(dns_requester_pool, s->dns_ctx.dns_requester);
644 s->dns_ctx.dns_requester = NULL;
645 }
646
647 flt_stream_stop(s);
648 flt_stream_release(s, 0);
649
650 if (fe) {
651 if (s->req_cap) {
652 struct cap_hdr *h;
653 for (h = fe->req_cap; h; h = h->next)
654 pool_free(h->pool, s->req_cap[h->index]);
655 }
656
657 if (s->res_cap) {
658 struct cap_hdr *h;
659 for (h = fe->rsp_cap; h; h = h->next)
660 pool_free(h->pool, s->res_cap[h->index]);
661 }
662
663 pool_free(fe->rsp_cap_pool, s->res_cap);
664 pool_free(fe->req_cap_pool, s->req_cap);
665 }
666
667 /* Cleanup all variable contexts. */
668 if (!LIST_ISEMPTY(&s->vars_txn.head))
669 vars_prune(&s->vars_txn, s->sess, s);
670 if (!LIST_ISEMPTY(&s->vars_reqres.head))
671 vars_prune(&s->vars_reqres, s->sess, s);
672
673 stream_store_counters(s);
674
675 HA_SPIN_LOCK(STRMS_LOCK, &streams_lock);
676 list_for_each_entry_safe(bref, back, &s->back_refs, users) {
677 /* we have to unlink all watchers. We must not relink them if
678 * this stream was the last one in the list.
679 */
680 LIST_DEL(&bref->users);
681 LIST_INIT(&bref->users);
682 if (s->list.n != &streams)
683 LIST_ADDQ(&LIST_ELEM(s->list.n, struct stream *, list)->back_refs, &bref->users);
684 bref->ref = s->list.n;
685 }
686 LIST_DEL(&s->list);
687 HA_SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
688
689 /* applets do not release session yet */
690 must_free_sess = objt_appctx(sess->origin) && sess->origin == s->si[0].end;
691
692
693 si_release_endpoint(&s->si[1]);
694 si_release_endpoint(&s->si[0]);
695
696 tasklet_free(s->si[0].wait_event.tasklet);
697 tasklet_free(s->si[1].wait_event.tasklet);
698
699 b_free(&s->si[1].l7_buffer);
700 if (must_free_sess) {
701 sess->origin = NULL;
702 session_free(sess);
703 }
704
705 sockaddr_free(&s->target_addr);
706 pool_free(pool_head_stream, s);
707
708 /* We may want to free the maximum amount of pools if the proxy is stopping */
709 if (fe && unlikely(fe->state == PR_STSTOPPED)) {
710 pool_flush(pool_head_buffer);
711 pool_flush(pool_head_http_txn);
712 pool_flush(pool_head_requri);
713 pool_flush(pool_head_capture);
714 pool_flush(pool_head_stream);
715 pool_flush(pool_head_session);
716 pool_flush(pool_head_connection);
717 pool_flush(pool_head_pendconn);
718 pool_flush(fe->req_cap_pool);
719 pool_flush(fe->rsp_cap_pool);
720 }
721 }
722
723
724 /* Allocates a work buffer for stream <s>. It is meant to be called inside
725 * process_stream(). It will only allocate the side needed for the function
726 * to work fine, which is the response buffer so that an error message may be
727 * built and returned. Response buffers may be allocated from the reserve, this
728 * is critical to ensure that a response may always flow and will never block a
729 * server from releasing a connection. Returns 0 in case of failure, non-zero
730 * otherwise.
731 */
stream_alloc_work_buffer(struct stream * s)732 static int stream_alloc_work_buffer(struct stream *s)
733 {
734 if (!LIST_ISEMPTY(&s->buffer_wait.list)) {
735 HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
736 LIST_DEL(&s->buffer_wait.list);
737 LIST_INIT(&s->buffer_wait.list);
738 HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
739 }
740
741 if (b_alloc_margin(&s->res.buf, 0))
742 return 1;
743
744 HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
745 LIST_ADDQ(&buffer_wq, &s->buffer_wait.list);
746 HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
747 return 0;
748 }
749
750 /* releases unused buffers after processing. Typically used at the end of the
751 * update() functions. It will try to wake up as many tasks/applets as the
752 * number of buffers that it releases. In practice, most often streams are
753 * blocked on a single buffer, so it makes sense to try to wake two up when two
754 * buffers are released at once.
755 */
stream_release_buffers(struct stream * s)756 void stream_release_buffers(struct stream *s)
757 {
758 int offer = 0;
759
760 if (c_size(&s->req) && c_empty(&s->req)) {
761 offer = 1;
762 b_free(&s->req.buf);
763 }
764 if (c_size(&s->res) && c_empty(&s->res)) {
765 offer = 1;
766 b_free(&s->res.buf);
767 }
768
769 /* if we're certain to have at least 1 buffer available, and there is
770 * someone waiting, we can wake up a waiter and offer them.
771 */
772 if (offer)
773 offer_buffers(s, tasks_run_queue);
774 }
775
stream_process_counters(struct stream * s)776 void stream_process_counters(struct stream *s)
777 {
778 struct session *sess = s->sess;
779 unsigned long long bytes;
780 void *ptr1,*ptr2;
781 struct stksess *ts;
782 int i;
783
784 bytes = s->req.total - s->logs.bytes_in;
785 s->logs.bytes_in = s->req.total;
786 if (bytes) {
787 _HA_ATOMIC_ADD(&sess->fe->fe_counters.bytes_in, bytes);
788 _HA_ATOMIC_ADD(&s->be->be_counters.bytes_in, bytes);
789
790 if (objt_server(s->target))
791 _HA_ATOMIC_ADD(&objt_server(s->target)->counters.bytes_in, bytes);
792
793 if (sess->listener && sess->listener->counters)
794 _HA_ATOMIC_ADD(&sess->listener->counters->bytes_in, bytes);
795
796 for (i = 0; i < MAX_SESS_STKCTR; i++) {
797 struct stkctr *stkctr = &s->stkctr[i];
798
799 ts = stkctr_entry(stkctr);
800 if (!ts) {
801 stkctr = &sess->stkctr[i];
802 ts = stkctr_entry(stkctr);
803 if (!ts)
804 continue;
805 }
806
807 HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
808 ptr1 = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_BYTES_IN_CNT);
809 if (ptr1)
810 stktable_data_cast(ptr1, bytes_in_cnt) += bytes;
811
812 ptr2 = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_BYTES_IN_RATE);
813 if (ptr2)
814 update_freq_ctr_period(&stktable_data_cast(ptr2, bytes_in_rate),
815 stkctr->table->data_arg[STKTABLE_DT_BYTES_IN_RATE].u, bytes);
816 HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
817
818 /* If data was modified, we need to touch to re-schedule sync */
819 if (ptr1 || ptr2)
820 stktable_touch_local(stkctr->table, ts, 0);
821 }
822 }
823
824 bytes = s->res.total - s->logs.bytes_out;
825 s->logs.bytes_out = s->res.total;
826 if (bytes) {
827 _HA_ATOMIC_ADD(&sess->fe->fe_counters.bytes_out, bytes);
828 _HA_ATOMIC_ADD(&s->be->be_counters.bytes_out, bytes);
829
830 if (objt_server(s->target))
831 _HA_ATOMIC_ADD(&objt_server(s->target)->counters.bytes_out, bytes);
832
833 if (sess->listener && sess->listener->counters)
834 _HA_ATOMIC_ADD(&sess->listener->counters->bytes_out, bytes);
835
836 for (i = 0; i < MAX_SESS_STKCTR; i++) {
837 struct stkctr *stkctr = &s->stkctr[i];
838
839 ts = stkctr_entry(stkctr);
840 if (!ts) {
841 stkctr = &sess->stkctr[i];
842 ts = stkctr_entry(stkctr);
843 if (!ts)
844 continue;
845 }
846
847 HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
848 ptr1 = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_BYTES_OUT_CNT);
849 if (ptr1)
850 stktable_data_cast(ptr1, bytes_out_cnt) += bytes;
851
852 ptr2 = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_BYTES_OUT_RATE);
853 if (ptr2)
854 update_freq_ctr_period(&stktable_data_cast(ptr2, bytes_out_rate),
855 stkctr->table->data_arg[STKTABLE_DT_BYTES_OUT_RATE].u, bytes);
856 HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
857
858 /* If data was modified, we need to touch to re-schedule sync */
859 if (ptr1 || ptr2)
860 stktable_touch_local(stkctr->table, stkctr_entry(stkctr), 0);
861 }
862 }
863 }
864
865 /* This function is called with (si->state == SI_ST_CON) meaning that a
866 * connection was attempted and that the file descriptor is already allocated.
867 * We must check for timeout, error and abort. Possible output states are
868 * SI_ST_CER (error), SI_ST_DIS (abort), and SI_ST_CON (no change). This only
869 * works with connection-based streams. We know that there were no I/O event
870 * when reaching this function. Timeouts and errors are *not* cleared.
871 */
sess_update_st_con_tcp(struct stream * s)872 static void sess_update_st_con_tcp(struct stream *s)
873 {
874 struct stream_interface *si = &s->si[1];
875 struct channel *req = &s->req;
876 struct channel *rep = &s->res;
877 struct conn_stream *srv_cs = objt_cs(si->end);
878 struct connection *conn = srv_cs ? srv_cs->conn : objt_conn(si->end);
879
880 DBG_TRACE_ENTER(STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
881
882 /* the client might want to abort */
883 if ((rep->flags & CF_SHUTW) ||
884 ((req->flags & CF_SHUTW_NOW) &&
885 (channel_is_empty(req) || (s->be->options & PR_O_ABRT_CLOSE)))) {
886 si->flags |= SI_FL_NOLINGER;
887 si_shutw(si);
888 si->err_type |= SI_ET_CONN_ABRT;
889 if (s->srv_error)
890 s->srv_error(s, si);
891 /* Note: state = SI_ST_DIS now */
892 DBG_TRACE_STATE("client abort during connection attempt", STRM_EV_STRM_PROC|STRM_EV_SI_ST|STRM_EV_STRM_ERR, s);
893 }
894
895 /* retryable error ? */
896 else if (si->flags & (SI_FL_EXP|SI_FL_ERR)) {
897 if (!(s->flags & SF_SRV_REUSED) && conn) {
898 conn_stop_tracking(conn);
899 conn_full_close(conn);
900 }
901
902 if (!si->err_type) {
903 if (si->flags & SI_FL_ERR)
904 si->err_type = SI_ET_CONN_ERR;
905 else
906 si->err_type = SI_ET_CONN_TO;
907 }
908
909 si->state = SI_ST_CER;
910 DBG_TRACE_STATE("connection failed, retry", STRM_EV_STRM_PROC|STRM_EV_SI_ST|STRM_EV_STRM_ERR, s);
911 }
912
913 DBG_TRACE_LEAVE(STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
914 }
915
916 /* This function is called with (si->state == SI_ST_CER) meaning that a
917 * previous connection attempt has failed and that the file descriptor
918 * has already been released. Possible causes include asynchronous error
919 * notification and time out. Possible output states are SI_ST_CLO when
920 * retries are exhausted, SI_ST_TAR when a delay is wanted before a new
921 * connection attempt, SI_ST_ASS when it's wise to retry on the same server,
922 * and SI_ST_REQ when an immediate redispatch is wanted. The buffers are
923 * marked as in error state. Timeouts and errors are cleared before retrying.
924 */
sess_update_st_cer(struct stream * s)925 static void sess_update_st_cer(struct stream *s)
926 {
927 struct stream_interface *si = &s->si[1];
928 struct conn_stream *cs = objt_cs(si->end);
929 struct connection *conn = cs_conn(cs);
930
931 DBG_TRACE_ENTER(STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
932
933 si->exp = TICK_ETERNITY;
934 si->flags &= ~SI_FL_EXP;
935
936 /* we probably have to release last stream from the server */
937 if (objt_server(s->target)) {
938 health_adjust(objt_server(s->target), HANA_STATUS_L4_ERR);
939
940 if (s->flags & SF_CURR_SESS) {
941 s->flags &= ~SF_CURR_SESS;
942 _HA_ATOMIC_SUB(&__objt_server(s->target)->cur_sess, 1);
943 }
944
945 if ((si->flags & SI_FL_ERR) &&
946 conn && conn->err_code == CO_ER_SSL_MISMATCH_SNI) {
947 /* We tried to connect to a server which is configured
948 * with "verify required" and which doesn't have the
949 * "verifyhost" directive. The server presented a wrong
950 * certificate (a certificate for an unexpected name),
951 * which implies that we have used SNI in the handshake,
952 * and that the server doesn't have the associated cert
953 * and presented a default one.
954 *
955 * This is a serious enough issue not to retry. It's
956 * especially important because this wrong name might
957 * either be the result of a configuration error, and
958 * retrying will only hammer the server, or is caused
959 * by the use of a wrong SNI value, most likely
960 * provided by the client and we don't want to let the
961 * client provoke retries.
962 */
963 si->conn_retries = 0;
964 DBG_TRACE_DEVEL("Bad SSL cert, disable connection retries", STRM_EV_STRM_PROC|STRM_EV_SI_ST|STRM_EV_STRM_ERR, s);
965 }
966 }
967
968 /* ensure that we have enough retries left */
969 si->conn_retries--;
970 if (si->conn_retries < 0 || !(s->be->retry_type & PR_RE_CONN_FAILED)) {
971 if (!si->err_type) {
972 si->err_type = SI_ET_CONN_ERR;
973 }
974
975 if (objt_server(s->target))
976 _HA_ATOMIC_ADD(&objt_server(s->target)->counters.failed_conns, 1);
977 _HA_ATOMIC_ADD(&s->be->be_counters.failed_conns, 1);
978 sess_change_server(s, NULL);
979 if (may_dequeue_tasks(objt_server(s->target), s->be))
980 process_srv_queue(objt_server(s->target));
981
982 /* shutw is enough so stop a connecting socket */
983 si_shutw(si);
984 s->req.flags |= CF_WRITE_ERROR;
985 s->res.flags |= CF_READ_ERROR;
986
987 si->state = SI_ST_CLO;
988 if (s->srv_error)
989 s->srv_error(s, si);
990
991 DBG_TRACE_STATE("connection failed", STRM_EV_STRM_PROC|STRM_EV_SI_ST|STRM_EV_STRM_ERR, s);
992 goto end;
993 }
994
995 stream_choose_redispatch(s);
996
997 if (si->flags & SI_FL_ERR) {
998 /* The error was an asynchronous connection error, and we will
999 * likely have to retry connecting to the same server, most
1000 * likely leading to the same result. To avoid this, we wait
1001 * MIN(one second, connect timeout) before retrying. We don't
1002 * do it when the failure happened on a reused connection
1003 * though.
1004 */
1005
1006 int delay = 1000;
1007
1008 if (s->be->timeout.connect && s->be->timeout.connect < delay)
1009 delay = s->be->timeout.connect;
1010
1011 if (!si->err_type)
1012 si->err_type = SI_ET_CONN_ERR;
1013
1014 /* only wait when we're retrying on the same server */
1015 if ((si->state == SI_ST_ASS ||
1016 (s->be->lbprm.algo & BE_LB_KIND) != BE_LB_KIND_RR ||
1017 (s->be->srv_act <= 1)) && !(s->flags & SF_SRV_REUSED)) {
1018 si->state = SI_ST_TAR;
1019 si->exp = tick_add(now_ms, MS_TO_TICKS(delay));
1020 }
1021 si->flags &= ~SI_FL_ERR;
1022 DBG_TRACE_STATE("retry a new connection", STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
1023 }
1024
1025 end:
1026 DBG_TRACE_LEAVE(STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
1027 }
1028
1029 /* This function is called with (si->state == SI_ST_RDY) meaning that a
1030 * connection was attempted, that the file descriptor is already allocated,
1031 * and that it has succeeded. We must still check for errors and aborts.
1032 * Possible output states are SI_ST_EST (established), SI_ST_CER (error),
1033 * and SI_ST_DIS (abort). This only works with connection-based streams.
1034 * Timeouts and errors are *not* cleared.
1035 */
sess_update_st_rdy_tcp(struct stream * s)1036 static void sess_update_st_rdy_tcp(struct stream *s)
1037 {
1038 struct stream_interface *si = &s->si[1];
1039 struct channel *req = &s->req;
1040 struct channel *rep = &s->res;
1041 struct conn_stream *srv_cs = objt_cs(si->end);
1042 struct connection *conn = srv_cs ? srv_cs->conn : objt_conn(si->end);
1043
1044 DBG_TRACE_ENTER(STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
1045 /* We know the connection at least succeeded, though it could have
1046 * since met an error for any other reason. At least it didn't time out
1047 * eventhough the timeout might have been reported right after success.
1048 * We need to take care of various situations here :
1049 * - everything might be OK. We have to switch to established.
1050 * - an I/O error might have been reported after a successful transfer,
1051 * which is not retryable and needs to be logged correctly, and needs
1052 * established as well
1053 * - SI_ST_CON implies !CF_WROTE_DATA but not conversely as we could
1054 * have validated a connection with incoming data (e.g. TCP with a
1055 * banner protocol), or just a successful connect() probe.
1056 * - the client might have requested a connection abort, this needs to
1057 * be checked before we decide to retry anything.
1058 */
1059
1060 /* it's still possible to handle client aborts or connection retries
1061 * before any data were sent.
1062 */
1063 if (!(req->flags & CF_WROTE_DATA)) {
1064 /* client abort ? */
1065 if ((rep->flags & CF_SHUTW) ||
1066 ((req->flags & CF_SHUTW_NOW) &&
1067 (channel_is_empty(req) || (s->be->options & PR_O_ABRT_CLOSE)))) {
1068 /* give up */
1069 si->flags |= SI_FL_NOLINGER;
1070 si_shutw(si);
1071 si->err_type |= SI_ET_CONN_ABRT;
1072 if (s->srv_error)
1073 s->srv_error(s, si);
1074 DBG_TRACE_STATE("client abort during connection attempt", STRM_EV_STRM_PROC|STRM_EV_SI_ST|STRM_EV_STRM_ERR, s);
1075 goto end;
1076 }
1077
1078 /* retryable error ? */
1079 if (si->flags & SI_FL_ERR) {
1080 if (!(s->flags & SF_SRV_REUSED) && conn) {
1081 conn_stop_tracking(conn);
1082 conn_full_close(conn);
1083 }
1084
1085 if (!si->err_type)
1086 si->err_type = SI_ET_CONN_ERR;
1087 si->state = SI_ST_CER;
1088 DBG_TRACE_STATE("connection failed, retry", STRM_EV_STRM_PROC|STRM_EV_SI_ST|STRM_EV_STRM_ERR, s);
1089 goto end;
1090 }
1091 }
1092
1093 /* data were sent and/or we had no error, sess_establish() will
1094 * now take over.
1095 */
1096 DBG_TRACE_STATE("connection established", STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
1097 si->err_type = SI_ET_NONE;
1098 si->state = SI_ST_EST;
1099
1100 end:
1101 DBG_TRACE_LEAVE(STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
1102 }
1103
1104 /*
1105 * This function handles the transition between the SI_ST_CON state and the
1106 * SI_ST_EST state. It must only be called after switching from SI_ST_CON (or
1107 * SI_ST_INI or SI_ST_RDY) to SI_ST_EST, but only when a ->proto is defined.
1108 * Note that it will switch the interface to SI_ST_DIS if we already have
1109 * the CF_SHUTR flag, it means we were able to forward the request, and
1110 * receive the response, before process_stream() had the opportunity to
1111 * make the switch from SI_ST_CON to SI_ST_EST. When that happens, we want
1112 * to go through sess_establish() anyway, to make sure the analysers run.
1113 * Timeouts are cleared. Error are reported on the channel so that analysers
1114 * can handle them.
1115 */
sess_establish(struct stream * s)1116 static void sess_establish(struct stream *s)
1117 {
1118 struct stream_interface *si = &s->si[1];
1119 struct conn_stream *srv_cs = objt_cs(si->end);
1120 struct connection *conn = srv_cs ? srv_cs->conn : objt_conn(si->end);
1121 struct channel *req = &s->req;
1122 struct channel *rep = &s->res;
1123
1124 DBG_TRACE_ENTER(STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
1125 /* First, centralize the timers information, and clear any irrelevant
1126 * timeout.
1127 */
1128 s->logs.t_connect = tv_ms_elapsed(&s->logs.tv_accept, &now);
1129 si->exp = TICK_ETERNITY;
1130 si->flags &= ~SI_FL_EXP;
1131
1132 /* errors faced after sending data need to be reported */
1133 if (si->flags & SI_FL_ERR && req->flags & CF_WROTE_DATA) {
1134 /* Don't add CF_WRITE_ERROR if we're here because
1135 * early data were rejected by the server, or
1136 * http_wait_for_response() will never be called
1137 * to send a 425.
1138 */
1139 if (conn && conn->err_code != CO_ER_SSL_EARLY_FAILED)
1140 req->flags |= CF_WRITE_ERROR;
1141 rep->flags |= CF_READ_ERROR;
1142 si->err_type = SI_ET_DATA_ERR;
1143 DBG_TRACE_STATE("read/write error", STRM_EV_STRM_PROC|STRM_EV_SI_ST|STRM_EV_STRM_ERR, s);
1144 }
1145
1146 if (objt_server(s->target))
1147 health_adjust(objt_server(s->target), HANA_STATUS_L4_OK);
1148
1149 if (s->be->mode == PR_MODE_TCP) { /* let's allow immediate data connection in this case */
1150 /* if the user wants to log as soon as possible, without counting
1151 * bytes from the server, then this is the right moment. */
1152 if (!LIST_ISEMPTY(&strm_fe(s)->logformat) && !(s->logs.logwait & LW_BYTES)) {
1153 /* note: no pend_pos here, session is established */
1154 s->logs.t_close = s->logs.t_connect; /* to get a valid end date */
1155 s->do_log(s);
1156 }
1157 }
1158 else {
1159 rep->flags |= CF_READ_DONTWAIT; /* a single read is enough to get response headers */
1160 }
1161
1162 rep->analysers |= strm_fe(s)->fe_rsp_ana | s->be->be_rsp_ana;
1163
1164 /* Be sure to filter response headers if the backend is an HTTP proxy
1165 * and if there are filters attached to the stream. */
1166 if (s->be->mode == PR_MODE_HTTP && HAS_FILTERS(s))
1167 rep->analysers |= AN_RES_FLT_HTTP_HDRS;
1168
1169 si_rx_endp_more(si);
1170 rep->flags |= CF_READ_ATTACHED; /* producer is now attached */
1171 if (objt_cs(si->end)) {
1172 /* real connections have timeouts */
1173 req->wto = s->be->timeout.server;
1174 rep->rto = s->be->timeout.server;
1175 /* The connection is now established, try to read data from the
1176 * underlying layer, and subscribe to recv events. We use a
1177 * delayed recv here to give a chance to the data to flow back
1178 * by the time we process other tasks.
1179 */
1180 si_chk_rcv(si);
1181 }
1182 req->wex = TICK_ETERNITY;
1183 /* If we managed to get the whole response, and we don't have anything
1184 * left to send, or can't, switch to SI_ST_DIS now. */
1185 if (rep->flags & (CF_SHUTR | CF_SHUTW)) {
1186 si->state = SI_ST_DIS;
1187 DBG_TRACE_STATE("response channel shutdwn for read/write", STRM_EV_STRM_PROC|STRM_EV_SI_ST|STRM_EV_STRM_ERR, s);
1188 }
1189
1190 DBG_TRACE_LEAVE(STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
1191 }
1192
1193 /* Check if the connection request is in such a state that it can be aborted. */
check_req_may_abort(struct channel * req,struct stream * s)1194 static int check_req_may_abort(struct channel *req, struct stream *s)
1195 {
1196 return ((req->flags & (CF_READ_ERROR)) ||
1197 ((req->flags & (CF_SHUTW_NOW|CF_SHUTW)) && /* empty and client aborted */
1198 (channel_is_empty(req) || (s->be->options & PR_O_ABRT_CLOSE))));
1199 }
1200
1201 /* Update back stream interface status for input states SI_ST_ASS, SI_ST_QUE,
1202 * SI_ST_TAR. Other input states are simply ignored.
1203 * Possible output states are SI_ST_CLO, SI_ST_TAR, SI_ST_ASS, SI_ST_REQ, SI_ST_CON
1204 * and SI_ST_EST. Flags must have previously been updated for timeouts and other
1205 * conditions.
1206 */
sess_update_stream_int(struct stream * s)1207 static void sess_update_stream_int(struct stream *s)
1208 {
1209 struct server *srv = objt_server(s->target);
1210 struct stream_interface *si = &s->si[1];
1211 struct channel *req = &s->req;
1212
1213 DBG_TRACE_ENTER(STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
1214
1215 if (si->state == SI_ST_ASS) {
1216 /* Server assigned to connection request, we have to try to connect now */
1217 int conn_err;
1218
1219 /* Before we try to initiate the connection, see if the
1220 * request may be aborted instead.
1221 */
1222 if (check_req_may_abort(req, s)) {
1223 si->err_type |= SI_ET_CONN_ABRT;
1224 DBG_TRACE_STATE("connection aborted", STRM_EV_STRM_PROC|STRM_EV_SI_ST|STRM_EV_STRM_ERR, s);
1225 goto abort_connection;
1226 }
1227
1228 conn_err = connect_server(s);
1229 srv = objt_server(s->target);
1230
1231 if (conn_err == SF_ERR_NONE) {
1232 /* state = SI_ST_CON or SI_ST_EST now */
1233 if (srv)
1234 srv_inc_sess_ctr(srv);
1235 if (srv)
1236 srv_set_sess_last(srv);
1237 DBG_TRACE_STATE("connection attempt", STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
1238 goto end;
1239 }
1240
1241 /* We have received a synchronous error. We might have to
1242 * abort, retry immediately or redispatch.
1243 */
1244 if (conn_err == SF_ERR_INTERNAL) {
1245 if (!si->err_type) {
1246 si->err_type = SI_ET_CONN_OTHER;
1247 }
1248
1249 if (srv)
1250 srv_inc_sess_ctr(srv);
1251 if (srv)
1252 srv_set_sess_last(srv);
1253 if (srv)
1254 _HA_ATOMIC_ADD(&srv->counters.failed_conns, 1);
1255 _HA_ATOMIC_ADD(&s->be->be_counters.failed_conns, 1);
1256
1257 /* release other streams waiting for this server */
1258 sess_change_server(s, NULL);
1259 if (may_dequeue_tasks(srv, s->be))
1260 process_srv_queue(srv);
1261
1262 /* Failed and not retryable. */
1263 si_shutr(si);
1264 si_shutw(si);
1265 req->flags |= CF_WRITE_ERROR;
1266
1267 s->logs.t_queue = tv_ms_elapsed(&s->logs.tv_accept, &now);
1268
1269 /* we may need to know the position in the queue for logging */
1270 pendconn_cond_unlink(s->pend_pos);
1271
1272 /* no stream was ever accounted for this server */
1273 si->state = SI_ST_CLO;
1274 if (s->srv_error)
1275 s->srv_error(s, si);
1276 DBG_TRACE_STATE("internal error during connection", STRM_EV_STRM_PROC|STRM_EV_SI_ST|STRM_EV_STRM_ERR, s);
1277 goto end;
1278 }
1279
1280 /* We are facing a retryable error, but we don't want to run a
1281 * turn-around now, as the problem is likely a source port
1282 * allocation problem, so we want to retry now.
1283 */
1284 si->state = SI_ST_CER;
1285 si->flags &= ~SI_FL_ERR;
1286 sess_update_st_cer(s);
1287
1288 DBG_TRACE_STATE("connection error, retry", STRM_EV_STRM_PROC|STRM_EV_SI_ST|STRM_EV_STRM_ERR, s);
1289 /* now si->state is one of SI_ST_CLO, SI_ST_TAR, SI_ST_ASS, SI_ST_REQ */
1290 }
1291 else if (si->state == SI_ST_QUE) {
1292 /* connection request was queued, check for any update */
1293 if (!pendconn_dequeue(s)) {
1294 /* The connection is not in the queue anymore. Either
1295 * we have a server connection slot available and we
1296 * go directly to the assigned state, or we need to
1297 * load-balance first and go to the INI state.
1298 */
1299 si->exp = TICK_ETERNITY;
1300 if (unlikely(!(s->flags & SF_ASSIGNED)))
1301 si->state = SI_ST_REQ;
1302 else {
1303 s->logs.t_queue = tv_ms_elapsed(&s->logs.tv_accept, &now);
1304 si->state = SI_ST_ASS;
1305 }
1306 DBG_TRACE_STATE("dequeue connection request", STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
1307 goto end;
1308 }
1309
1310 /* Connection request still in queue... */
1311 if (si->flags & SI_FL_EXP) {
1312 /* ... and timeout expired */
1313 si->exp = TICK_ETERNITY;
1314 si->flags &= ~SI_FL_EXP;
1315 s->logs.t_queue = tv_ms_elapsed(&s->logs.tv_accept, &now);
1316
1317 /* we may need to know the position in the queue for logging */
1318 pendconn_cond_unlink(s->pend_pos);
1319
1320 if (srv)
1321 _HA_ATOMIC_ADD(&srv->counters.failed_conns, 1);
1322 _HA_ATOMIC_ADD(&s->be->be_counters.failed_conns, 1);
1323 si_shutr(si);
1324 si_shutw(si);
1325 req->flags |= CF_WRITE_TIMEOUT;
1326 if (!si->err_type)
1327 si->err_type = SI_ET_QUEUE_TO;
1328 si->state = SI_ST_CLO;
1329 if (s->srv_error)
1330 s->srv_error(s, si);
1331 DBG_TRACE_STATE("connection request still queued", STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
1332 goto end;
1333 }
1334
1335 /* Connection remains in queue, check if we have to abort it */
1336 if (check_req_may_abort(req, s)) {
1337 s->logs.t_queue = tv_ms_elapsed(&s->logs.tv_accept, &now);
1338
1339 /* we may need to know the position in the queue for logging */
1340 pendconn_cond_unlink(s->pend_pos);
1341
1342 si->err_type |= SI_ET_QUEUE_ABRT;
1343 DBG_TRACE_STATE("abort queued connection request", STRM_EV_STRM_PROC|STRM_EV_SI_ST|STRM_EV_STRM_ERR, s);
1344 goto abort_connection;
1345 }
1346
1347 /* Nothing changed */
1348 }
1349 else if (si->state == SI_ST_TAR) {
1350 /* Connection request might be aborted */
1351 if (check_req_may_abort(req, s)) {
1352 si->err_type |= SI_ET_CONN_ABRT;
1353 DBG_TRACE_STATE("connection aborted", STRM_EV_STRM_PROC|STRM_EV_SI_ST|STRM_EV_STRM_ERR, s);
1354 goto abort_connection;
1355 }
1356
1357 if (!(si->flags & SI_FL_EXP))
1358 return; /* still in turn-around */
1359
1360 si->flags &= ~SI_FL_EXP;
1361 si->exp = TICK_ETERNITY;
1362
1363 /* we keep trying on the same server as long as the stream is
1364 * marked "assigned".
1365 * FIXME: Should we force a redispatch attempt when the server is down ?
1366 */
1367 if (s->flags & SF_ASSIGNED)
1368 si->state = SI_ST_ASS;
1369 else
1370 si->state = SI_ST_REQ;
1371
1372 DBG_TRACE_STATE("retry connection now", STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
1373 }
1374
1375 end:
1376 DBG_TRACE_LEAVE(STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
1377 return;
1378
1379 abort_connection:
1380 /* give up */
1381 si->exp = TICK_ETERNITY;
1382 si->flags &= ~SI_FL_EXP;
1383 si_shutr(si);
1384 si_shutw(si);
1385 si->state = SI_ST_CLO;
1386 if (s->srv_error)
1387 s->srv_error(s, si);
1388 DBG_TRACE_DEVEL("leaving on error", STRM_EV_STRM_PROC|STRM_EV_SI_ST|STRM_EV_STRM_ERR, s);
1389 return;
1390 }
1391
1392 /* Set correct stream termination flags in case no analyser has done it. It
1393 * also counts a failed request if the server state has not reached the request
1394 * stage.
1395 */
sess_set_term_flags(struct stream * s)1396 static void sess_set_term_flags(struct stream *s)
1397 {
1398 if (!(s->flags & SF_FINST_MASK)) {
1399 if (s->si[1].state == SI_ST_INI) {
1400 /* anything before REQ in fact */
1401 _HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.failed_req, 1);
1402 if (strm_li(s) && strm_li(s)->counters)
1403 _HA_ATOMIC_ADD(&strm_li(s)->counters->failed_req, 1);
1404
1405 s->flags |= SF_FINST_R;
1406 }
1407 else if (s->si[1].state == SI_ST_QUE)
1408 s->flags |= SF_FINST_Q;
1409 else if (si_state_in(s->si[1].state, SI_SB_REQ|SI_SB_TAR|SI_SB_ASS|SI_SB_CON|SI_SB_CER|SI_SB_RDY))
1410 s->flags |= SF_FINST_C;
1411 else if (s->si[1].state == SI_ST_EST || s->si[1].prev_state == SI_ST_EST)
1412 s->flags |= SF_FINST_D;
1413 else
1414 s->flags |= SF_FINST_L;
1415 }
1416 }
1417
1418 /* This function initiates a server connection request on a stream interface
1419 * already in SI_ST_REQ state. Upon success, the state goes to SI_ST_ASS for
1420 * a real connection to a server, indicating that a server has been assigned,
1421 * or SI_ST_EST for a successful connection to an applet. It may also return
1422 * SI_ST_QUE, or SI_ST_CLO upon error.
1423 */
sess_prepare_conn_req(struct stream * s)1424 static void sess_prepare_conn_req(struct stream *s)
1425 {
1426 struct stream_interface *si = &s->si[1];
1427
1428 if (si->state != SI_ST_REQ)
1429 return;
1430
1431 DBG_TRACE_ENTER(STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
1432
1433 if (unlikely(obj_type(s->target) == OBJ_TYPE_APPLET)) {
1434 /* the applet directly goes to the EST state */
1435 struct appctx *appctx = objt_appctx(si->end);
1436
1437 if (!appctx || appctx->applet != __objt_applet(s->target))
1438 appctx = si_register_handler(si, objt_applet(s->target));
1439
1440 if (!appctx) {
1441 /* No more memory, let's immediately abort. Force the
1442 * error code to ignore the ERR_LOCAL which is not a
1443 * real error.
1444 */
1445 s->flags &= ~(SF_ERR_MASK | SF_FINST_MASK);
1446
1447 si_shutr(si);
1448 si_shutw(si);
1449 s->req.flags |= CF_WRITE_ERROR;
1450 si->err_type = SI_ET_CONN_RES;
1451 si->state = SI_ST_CLO;
1452 if (s->srv_error)
1453 s->srv_error(s, si);
1454 DBG_TRACE_STATE("failed to register applet", STRM_EV_STRM_PROC|STRM_EV_SI_ST|STRM_EV_STRM_ERR, s);
1455 goto end;
1456 }
1457
1458 if (tv_iszero(&s->logs.tv_request))
1459 s->logs.tv_request = now;
1460 s->logs.t_queue = tv_ms_elapsed(&s->logs.tv_accept, &now);
1461 si->state = SI_ST_EST;
1462 si->err_type = SI_ET_NONE;
1463 be_set_sess_last(s->be);
1464
1465 DBG_TRACE_STATE("applet registered", STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
1466 /* let sess_establish() finish the job */
1467 goto end;
1468 }
1469
1470 /* Try to assign a server */
1471 if (srv_redispatch_connect(s) != 0) {
1472 /* We did not get a server. Either we queued the
1473 * connection request, or we encountered an error.
1474 */
1475 if (si->state == SI_ST_QUE) {
1476 DBG_TRACE_STATE("connection request queued", STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
1477 goto end;
1478 }
1479
1480 /* we did not get any server, let's check the cause */
1481 si_shutr(si);
1482 si_shutw(si);
1483 s->req.flags |= CF_WRITE_ERROR;
1484 if (!si->err_type)
1485 si->err_type = SI_ET_CONN_OTHER;
1486 si->state = SI_ST_CLO;
1487 if (s->srv_error)
1488 s->srv_error(s, si);
1489 DBG_TRACE_STATE("connection request failed", STRM_EV_STRM_PROC|STRM_EV_SI_ST|STRM_EV_STRM_ERR, s);
1490 goto end;
1491 }
1492
1493 /* The server is assigned */
1494 s->logs.t_queue = tv_ms_elapsed(&s->logs.tv_accept, &now);
1495 si->state = SI_ST_ASS;
1496 be_set_sess_last(s->be);
1497 DBG_TRACE_STATE("connection request assigned to a server", STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
1498
1499 end:
1500 DBG_TRACE_LEAVE(STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
1501 }
1502
1503 /* This function parses the use-service action ruleset. It executes
1504 * the associated ACL and set an applet as a stream or txn final node.
1505 * it returns ACT_RET_ERR if an error occurs, the proxy left in
1506 * consistent state. It returns ACT_RET_STOP in succes case because
1507 * use-service must be a terminal action. Returns ACT_RET_YIELD
1508 * if the initialisation function require more data.
1509 */
process_use_service(struct act_rule * rule,struct proxy * px,struct session * sess,struct stream * s,int flags)1510 enum act_return process_use_service(struct act_rule *rule, struct proxy *px,
1511 struct session *sess, struct stream *s, int flags)
1512
1513 {
1514 struct appctx *appctx;
1515
1516 /* Initialises the applet if it is required. */
1517 if (flags & ACT_FLAG_FIRST) {
1518 /* Register applet. this function schedules the applet. */
1519 s->target = &rule->applet.obj_type;
1520 if (unlikely(!si_register_handler(&s->si[1], objt_applet(s->target))))
1521 return ACT_RET_ERR;
1522
1523 /* Initialise the context. */
1524 appctx = si_appctx(&s->si[1]);
1525 memset(&appctx->ctx, 0, sizeof(appctx->ctx));
1526 appctx->rule = rule;
1527 }
1528 else
1529 appctx = si_appctx(&s->si[1]);
1530
1531 /* Stops the applet sheduling, in case of the init function miss
1532 * some data.
1533 */
1534 si_stop_get(&s->si[1]);
1535
1536 /* Call initialisation. */
1537 if (rule->applet.init)
1538 switch (rule->applet.init(appctx, px, s)) {
1539 case 0: return ACT_RET_ERR;
1540 case 1: break;
1541 default: return ACT_RET_YIELD;
1542 }
1543
1544 if (rule->from != ACT_F_HTTP_REQ) {
1545 if (sess->fe == s->be) /* report it if the request was intercepted by the frontend */
1546 _HA_ATOMIC_ADD(&sess->fe->fe_counters.intercepted_req, 1);
1547
1548 /* The flag SF_ASSIGNED prevent from server assignment. */
1549 s->flags |= SF_ASSIGNED;
1550 }
1551
1552 /* Now we can schedule the applet. */
1553 si_cant_get(&s->si[1]);
1554 appctx_wakeup(appctx);
1555 return ACT_RET_STOP;
1556 }
1557
1558 /* This stream analyser checks the switching rules and changes the backend
1559 * if appropriate. The default_backend rule is also considered, then the
1560 * target backend's forced persistence rules are also evaluated last if any.
1561 * It returns 1 if the processing can continue on next analysers, or zero if it
1562 * either needs more data or wants to immediately abort the request.
1563 */
process_switching_rules(struct stream * s,struct channel * req,int an_bit)1564 static int process_switching_rules(struct stream *s, struct channel *req, int an_bit)
1565 {
1566 struct persist_rule *prst_rule;
1567 struct session *sess = s->sess;
1568 struct proxy *fe = sess->fe;
1569
1570 req->analysers &= ~an_bit;
1571 req->analyse_exp = TICK_ETERNITY;
1572
1573 DBG_TRACE_ENTER(STRM_EV_STRM_ANA, s);
1574
1575 /* now check whether we have some switching rules for this request */
1576 if (!(s->flags & SF_BE_ASSIGNED)) {
1577 struct switching_rule *rule;
1578
1579 list_for_each_entry(rule, &fe->switching_rules, list) {
1580 int ret = 1;
1581
1582 if (rule->cond) {
1583 ret = acl_exec_cond(rule->cond, fe, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
1584 ret = acl_pass(ret);
1585 if (rule->cond->pol == ACL_COND_UNLESS)
1586 ret = !ret;
1587 }
1588
1589 if (ret) {
1590 /* If the backend name is dynamic, try to resolve the name.
1591 * If we can't resolve the name, or if any error occurs, break
1592 * the loop and fallback to the default backend.
1593 */
1594 struct proxy *backend = NULL;
1595
1596 if (rule->dynamic) {
1597 struct buffer *tmp;
1598
1599 tmp = alloc_trash_chunk();
1600 if (!tmp)
1601 goto sw_failed;
1602
1603 if (build_logline(s, tmp->area, tmp->size, &rule->be.expr))
1604 backend = proxy_be_by_name(tmp->area);
1605
1606 free_trash_chunk(tmp);
1607 tmp = NULL;
1608
1609 if (!backend)
1610 break;
1611 }
1612 else
1613 backend = rule->be.backend;
1614
1615 if (!stream_set_backend(s, backend))
1616 goto sw_failed;
1617 break;
1618 }
1619 }
1620
1621 /* To ensure correct connection accounting on the backend, we
1622 * have to assign one if it was not set (eg: a listen). This
1623 * measure also takes care of correctly setting the default
1624 * backend if any.
1625 */
1626 if (!(s->flags & SF_BE_ASSIGNED))
1627 if (!stream_set_backend(s, fe->defbe.be ? fe->defbe.be : s->be))
1628 goto sw_failed;
1629 }
1630
1631 /* we don't want to run the TCP or HTTP filters again if the backend has not changed */
1632 if (fe == s->be) {
1633 s->req.analysers &= ~AN_REQ_INSPECT_BE;
1634 s->req.analysers &= ~AN_REQ_HTTP_PROCESS_BE;
1635 s->req.analysers &= ~AN_REQ_FLT_START_BE;
1636 }
1637
1638 /* as soon as we know the backend, we must check if we have a matching forced or ignored
1639 * persistence rule, and report that in the stream.
1640 */
1641 list_for_each_entry(prst_rule, &s->be->persist_rules, list) {
1642 int ret = 1;
1643
1644 if (prst_rule->cond) {
1645 ret = acl_exec_cond(prst_rule->cond, s->be, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
1646 ret = acl_pass(ret);
1647 if (prst_rule->cond->pol == ACL_COND_UNLESS)
1648 ret = !ret;
1649 }
1650
1651 if (ret) {
1652 /* no rule, or the rule matches */
1653 if (prst_rule->type == PERSIST_TYPE_FORCE) {
1654 s->flags |= SF_FORCE_PRST;
1655 } else {
1656 s->flags |= SF_IGNORE_PRST;
1657 }
1658 break;
1659 }
1660 }
1661
1662 DBG_TRACE_LEAVE(STRM_EV_STRM_ANA, s);
1663 return 1;
1664
1665 sw_failed:
1666 /* immediately abort this request in case of allocation failure */
1667 channel_abort(&s->req);
1668 channel_abort(&s->res);
1669
1670 if (!(s->flags & SF_ERR_MASK))
1671 s->flags |= SF_ERR_RESOURCE;
1672 if (!(s->flags & SF_FINST_MASK))
1673 s->flags |= SF_FINST_R;
1674
1675 if (s->txn)
1676 s->txn->status = 500;
1677 s->req.analysers &= AN_REQ_FLT_END;
1678 s->req.analyse_exp = TICK_ETERNITY;
1679 DBG_TRACE_DEVEL("leaving on error", STRM_EV_STRM_ANA|STRM_EV_STRM_ERR, s);
1680 return 0;
1681 }
1682
1683 /* This stream analyser works on a request. It applies all use-server rules on
1684 * it then returns 1. The data must already be present in the buffer otherwise
1685 * they won't match. It always returns 1.
1686 */
process_server_rules(struct stream * s,struct channel * req,int an_bit)1687 static int process_server_rules(struct stream *s, struct channel *req, int an_bit)
1688 {
1689 struct proxy *px = s->be;
1690 struct session *sess = s->sess;
1691 struct server_rule *rule;
1692
1693 DBG_TRACE_ENTER(STRM_EV_STRM_ANA, s);
1694
1695 if (!(s->flags & SF_ASSIGNED)) {
1696 list_for_each_entry(rule, &px->server_rules, list) {
1697 int ret;
1698
1699 ret = acl_exec_cond(rule->cond, s->be, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
1700 ret = acl_pass(ret);
1701 if (rule->cond->pol == ACL_COND_UNLESS)
1702 ret = !ret;
1703
1704 if (ret) {
1705 struct server *srv = rule->srv.ptr;
1706
1707 if ((srv->cur_state != SRV_ST_STOPPED) ||
1708 (px->options & PR_O_PERSIST) ||
1709 (s->flags & SF_FORCE_PRST)) {
1710 s->flags |= SF_DIRECT | SF_ASSIGNED;
1711 s->target = &srv->obj_type;
1712 break;
1713 }
1714 /* if the server is not UP, let's go on with next rules
1715 * just in case another one is suited.
1716 */
1717 }
1718 }
1719 }
1720
1721 req->analysers &= ~an_bit;
1722 req->analyse_exp = TICK_ETERNITY;
1723 DBG_TRACE_LEAVE(STRM_EV_STRM_ANA, s);
1724 return 1;
1725 }
1726
sticking_rule_find_target(struct stream * s,struct stktable * t,struct stksess * ts)1727 static inline void sticking_rule_find_target(struct stream *s,
1728 struct stktable *t, struct stksess *ts)
1729 {
1730 struct proxy *px = s->be;
1731 struct eb32_node *node;
1732 struct dict_entry *de;
1733 void *ptr;
1734 struct server *srv;
1735
1736 /* Look for the server name previously stored in <t> stick-table */
1737 HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &ts->lock);
1738 ptr = __stktable_data_ptr(t, ts, STKTABLE_DT_SERVER_NAME);
1739 de = stktable_data_cast(ptr, server_name);
1740 HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock);
1741
1742 if (de) {
1743 struct ebpt_node *name;
1744
1745 name = ebis_lookup(&px->conf.used_server_name, de->value.key);
1746 if (name) {
1747 srv = container_of(name, struct server, conf.name);
1748 goto found;
1749 }
1750 }
1751
1752 /* Look for the server ID */
1753 HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &ts->lock);
1754 ptr = __stktable_data_ptr(t, ts, STKTABLE_DT_SERVER_ID);
1755 node = eb32_lookup(&px->conf.used_server_id, stktable_data_cast(ptr, server_id));
1756 HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock);
1757
1758 if (!node)
1759 return;
1760
1761 srv = container_of(node, struct server, conf.id);
1762 found:
1763 if ((srv->cur_state != SRV_ST_STOPPED) ||
1764 (px->options & PR_O_PERSIST) || (s->flags & SF_FORCE_PRST)) {
1765 s->flags |= SF_DIRECT | SF_ASSIGNED;
1766 s->target = &srv->obj_type;
1767 }
1768 }
1769
1770 /* This stream analyser works on a request. It applies all sticking rules on
1771 * it then returns 1. The data must already be present in the buffer otherwise
1772 * they won't match. It always returns 1.
1773 */
process_sticking_rules(struct stream * s,struct channel * req,int an_bit)1774 static int process_sticking_rules(struct stream *s, struct channel *req, int an_bit)
1775 {
1776 struct proxy *px = s->be;
1777 struct session *sess = s->sess;
1778 struct sticking_rule *rule;
1779
1780 DBG_TRACE_ENTER(STRM_EV_STRM_ANA, s);
1781
1782 list_for_each_entry(rule, &px->sticking_rules, list) {
1783 int ret = 1 ;
1784 int i;
1785
1786 /* Only the first stick store-request of each table is applied
1787 * and other ones are ignored. The purpose is to allow complex
1788 * configurations which look for multiple entries by decreasing
1789 * order of precision and to stop at the first which matches.
1790 * An example could be a store of the IP address from an HTTP
1791 * header first, then from the source if not found.
1792 */
1793 if (rule->flags & STK_IS_STORE) {
1794 for (i = 0; i < s->store_count; i++) {
1795 if (rule->table.t == s->store[i].table)
1796 break;
1797 }
1798
1799 if (i != s->store_count)
1800 continue;
1801 }
1802
1803 if (rule->cond) {
1804 ret = acl_exec_cond(rule->cond, px, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
1805 ret = acl_pass(ret);
1806 if (rule->cond->pol == ACL_COND_UNLESS)
1807 ret = !ret;
1808 }
1809
1810 if (ret) {
1811 struct stktable_key *key;
1812
1813 key = stktable_fetch_key(rule->table.t, px, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL, rule->expr, NULL);
1814 if (!key)
1815 continue;
1816
1817 if (rule->flags & STK_IS_MATCH) {
1818 struct stksess *ts;
1819
1820 if ((ts = stktable_lookup_key(rule->table.t, key)) != NULL) {
1821 if (!(s->flags & SF_ASSIGNED))
1822 sticking_rule_find_target(s, rule->table.t, ts);
1823 stktable_touch_local(rule->table.t, ts, 1);
1824 }
1825 }
1826 if (rule->flags & STK_IS_STORE) {
1827 if (s->store_count < (sizeof(s->store) / sizeof(s->store[0]))) {
1828 struct stksess *ts;
1829
1830 ts = stksess_new(rule->table.t, key);
1831 if (ts) {
1832 s->store[s->store_count].table = rule->table.t;
1833 s->store[s->store_count++].ts = ts;
1834 }
1835 }
1836 }
1837 }
1838 }
1839
1840 req->analysers &= ~an_bit;
1841 req->analyse_exp = TICK_ETERNITY;
1842 DBG_TRACE_LEAVE(STRM_EV_STRM_ANA, s);
1843 return 1;
1844 }
1845
1846 /* This stream analyser works on a response. It applies all store rules on it
1847 * then returns 1. The data must already be present in the buffer otherwise
1848 * they won't match. It always returns 1.
1849 */
process_store_rules(struct stream * s,struct channel * rep,int an_bit)1850 static int process_store_rules(struct stream *s, struct channel *rep, int an_bit)
1851 {
1852 struct proxy *px = s->be;
1853 struct session *sess = s->sess;
1854 struct sticking_rule *rule;
1855 int i;
1856 int nbreq = s->store_count;
1857
1858 DBG_TRACE_ENTER(STRM_EV_STRM_ANA, s);
1859
1860 list_for_each_entry(rule, &px->storersp_rules, list) {
1861 int ret = 1 ;
1862
1863 /* Only the first stick store-response of each table is applied
1864 * and other ones are ignored. The purpose is to allow complex
1865 * configurations which look for multiple entries by decreasing
1866 * order of precision and to stop at the first which matches.
1867 * An example could be a store of a set-cookie value, with a
1868 * fallback to a parameter found in a 302 redirect.
1869 *
1870 * The store-response rules are not allowed to override the
1871 * store-request rules for the same table, but they may coexist.
1872 * Thus we can have up to one store-request entry and one store-
1873 * response entry for the same table at any time.
1874 */
1875 for (i = nbreq; i < s->store_count; i++) {
1876 if (rule->table.t == s->store[i].table)
1877 break;
1878 }
1879
1880 /* skip existing entries for this table */
1881 if (i < s->store_count)
1882 continue;
1883
1884 if (rule->cond) {
1885 ret = acl_exec_cond(rule->cond, px, sess, s, SMP_OPT_DIR_RES|SMP_OPT_FINAL);
1886 ret = acl_pass(ret);
1887 if (rule->cond->pol == ACL_COND_UNLESS)
1888 ret = !ret;
1889 }
1890
1891 if (ret) {
1892 struct stktable_key *key;
1893
1894 key = stktable_fetch_key(rule->table.t, px, sess, s, SMP_OPT_DIR_RES|SMP_OPT_FINAL, rule->expr, NULL);
1895 if (!key)
1896 continue;
1897
1898 if (s->store_count < (sizeof(s->store) / sizeof(s->store[0]))) {
1899 struct stksess *ts;
1900
1901 ts = stksess_new(rule->table.t, key);
1902 if (ts) {
1903 s->store[s->store_count].table = rule->table.t;
1904 s->store[s->store_count++].ts = ts;
1905 }
1906 }
1907 }
1908 }
1909
1910 /* process store request and store response */
1911 for (i = 0; i < s->store_count; i++) {
1912 struct stksess *ts;
1913 void *ptr;
1914 struct dict_entry *de;
1915
1916 if (objt_server(s->target) && objt_server(s->target)->flags & SRV_F_NON_STICK) {
1917 stksess_free(s->store[i].table, s->store[i].ts);
1918 s->store[i].ts = NULL;
1919 continue;
1920 }
1921
1922 ts = stktable_set_entry(s->store[i].table, s->store[i].ts);
1923 if (ts != s->store[i].ts) {
1924 /* the entry already existed, we can free ours */
1925 stksess_free(s->store[i].table, s->store[i].ts);
1926 }
1927 s->store[i].ts = NULL;
1928
1929 HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
1930 ptr = __stktable_data_ptr(s->store[i].table, ts, STKTABLE_DT_SERVER_ID);
1931 stktable_data_cast(ptr, server_id) = __objt_server(s->target)->puid;
1932 HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
1933
1934 HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
1935 de = dict_insert(&server_name_dict, __objt_server(s->target)->id);
1936 if (de) {
1937 ptr = __stktable_data_ptr(s->store[i].table, ts, STKTABLE_DT_SERVER_NAME);
1938 stktable_data_cast(ptr, server_name) = de;
1939 }
1940 HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
1941
1942 stktable_touch_local(s->store[i].table, ts, 1);
1943 }
1944 s->store_count = 0; /* everything is stored */
1945
1946 rep->analysers &= ~an_bit;
1947 rep->analyse_exp = TICK_ETERNITY;
1948
1949 DBG_TRACE_LEAVE(STRM_EV_STRM_ANA, s);
1950 return 1;
1951 }
1952
1953 /* This macro is very specific to the function below. See the comments in
1954 * process_stream() below to understand the logic and the tests.
1955 */
1956 #define UPDATE_ANALYSERS(real, list, back, flag) { \
1957 list = (((list) & ~(flag)) | ~(back)) & (real); \
1958 back = real; \
1959 if (!(list)) \
1960 break; \
1961 if (((list) ^ ((list) & ((list) - 1))) < (flag)) \
1962 continue; \
1963 }
1964
1965 /* These 2 following macros call an analayzer for the specified channel if the
1966 * right flag is set. The first one is used for "filterable" analyzers. If a
1967 * stream has some registered filters, pre and post analyaze callbacks are
1968 * called. The second are used for other analyzers (AN_REQ/RES_FLT_* and
1969 * AN_REQ/RES_HTTP_XFER_BODY) */
1970 #define FLT_ANALYZE(strm, chn, fun, list, back, flag, ...) \
1971 { \
1972 if ((list) & (flag)) { \
1973 if (HAS_FILTERS(strm)) { \
1974 if (!flt_pre_analyze((strm), (chn), (flag))) \
1975 break; \
1976 if (!fun((strm), (chn), (flag), ##__VA_ARGS__)) \
1977 break; \
1978 if (!flt_post_analyze((strm), (chn), (flag))) \
1979 break; \
1980 } \
1981 else { \
1982 if (!fun((strm), (chn), (flag), ##__VA_ARGS__)) \
1983 break; \
1984 } \
1985 UPDATE_ANALYSERS((chn)->analysers, (list), \
1986 (back), (flag)); \
1987 } \
1988 }
1989
1990 #define ANALYZE(strm, chn, fun, list, back, flag, ...) \
1991 { \
1992 if ((list) & (flag)) { \
1993 if (!fun((strm), (chn), (flag), ##__VA_ARGS__)) \
1994 break; \
1995 UPDATE_ANALYSERS((chn)->analysers, (list), \
1996 (back), (flag)); \
1997 } \
1998 }
1999
2000 /* Processes the client, server, request and response jobs of a stream task,
2001 * then puts it back to the wait queue in a clean state, or cleans up its
2002 * resources if it must be deleted. Returns in <next> the date the task wants
2003 * to be woken up, or TICK_ETERNITY. In order not to call all functions for
2004 * nothing too many times, the request and response buffers flags are monitored
2005 * and each function is called only if at least another function has changed at
2006 * least one flag it is interested in.
2007 */
process_stream(struct task * t,void * context,unsigned short state)2008 struct task *process_stream(struct task *t, void *context, unsigned short state)
2009 {
2010 struct server *srv;
2011 struct stream *s = context;
2012 struct session *sess = s->sess;
2013 unsigned int rqf_last, rpf_last;
2014 unsigned int rq_prod_last, rq_cons_last;
2015 unsigned int rp_cons_last, rp_prod_last;
2016 unsigned int req_ana_back;
2017 struct channel *req, *res;
2018 struct stream_interface *si_f, *si_b;
2019 unsigned int rate;
2020
2021 DBG_TRACE_ENTER(STRM_EV_STRM_PROC, s);
2022
2023 activity[tid].stream++;
2024
2025 req = &s->req;
2026 res = &s->res;
2027
2028 si_f = &s->si[0];
2029 si_b = &s->si[1];
2030
2031 /* First, attempt to receive pending data from I/O layers */
2032 si_sync_recv(si_f);
2033 si_sync_recv(si_b);
2034
2035 rate = update_freq_ctr(&s->call_rate, 1);
2036 if (rate >= 100000 && s->call_rate.prev_ctr) { // make sure to wait at least a full second
2037 stream_dump_and_crash(&s->obj_type, read_freq_ctr(&s->call_rate));
2038 }
2039
2040 /* this data may be no longer valid, clear it */
2041 if (s->txn)
2042 memset(&s->txn->auth, 0, sizeof(s->txn->auth));
2043
2044 /* This flag must explicitly be set every time */
2045 req->flags &= ~(CF_READ_NOEXP|CF_WAKE_WRITE);
2046 res->flags &= ~(CF_READ_NOEXP|CF_WAKE_WRITE);
2047
2048 /* Keep a copy of req/rep flags so that we can detect shutdowns */
2049 rqf_last = req->flags & ~CF_MASK_ANALYSER;
2050 rpf_last = res->flags & ~CF_MASK_ANALYSER;
2051
2052 /* we don't want the stream interface functions to recursively wake us up */
2053 si_f->flags |= SI_FL_DONT_WAKE;
2054 si_b->flags |= SI_FL_DONT_WAKE;
2055
2056 /* update pending events */
2057 s->pending_events |= (state & TASK_WOKEN_ANY);
2058
2059 /* 1a: Check for low level timeouts if needed. We just set a flag on
2060 * stream interfaces when their timeouts have expired.
2061 */
2062 if (unlikely(s->pending_events & TASK_WOKEN_TIMER)) {
2063 si_check_timeouts(si_f);
2064 si_check_timeouts(si_b);
2065
2066 /* check channel timeouts, and close the corresponding stream interfaces
2067 * for future reads or writes. Note: this will also concern upper layers
2068 * but we do not touch any other flag. We must be careful and correctly
2069 * detect state changes when calling them.
2070 */
2071
2072 channel_check_timeouts(req);
2073
2074 if (unlikely((req->flags & (CF_SHUTW|CF_WRITE_TIMEOUT)) == CF_WRITE_TIMEOUT)) {
2075 si_b->flags |= SI_FL_NOLINGER;
2076 si_shutw(si_b);
2077 }
2078
2079 if (unlikely((req->flags & (CF_SHUTR|CF_READ_TIMEOUT)) == CF_READ_TIMEOUT)) {
2080 if (si_f->flags & SI_FL_NOHALF)
2081 si_f->flags |= SI_FL_NOLINGER;
2082 si_shutr(si_f);
2083 }
2084
2085 channel_check_timeouts(res);
2086
2087 if (unlikely((res->flags & (CF_SHUTW|CF_WRITE_TIMEOUT)) == CF_WRITE_TIMEOUT)) {
2088 si_f->flags |= SI_FL_NOLINGER;
2089 si_shutw(si_f);
2090 }
2091
2092 if (unlikely((res->flags & (CF_SHUTR|CF_READ_TIMEOUT)) == CF_READ_TIMEOUT)) {
2093 if (si_b->flags & SI_FL_NOHALF)
2094 si_b->flags |= SI_FL_NOLINGER;
2095 si_shutr(si_b);
2096 }
2097
2098 if (HAS_FILTERS(s))
2099 flt_stream_check_timeouts(s);
2100
2101 /* Once in a while we're woken up because the task expires. But
2102 * this does not necessarily mean that a timeout has been reached.
2103 * So let's not run a whole stream processing if only an expiration
2104 * timeout needs to be refreshed.
2105 */
2106 if (!((req->flags | res->flags) &
2107 (CF_SHUTR|CF_READ_ACTIVITY|CF_READ_TIMEOUT|CF_SHUTW|
2108 CF_WRITE_ACTIVITY|CF_WRITE_TIMEOUT|CF_ANA_TIMEOUT)) &&
2109 !((si_f->flags | si_b->flags) & (SI_FL_EXP|SI_FL_ERR)) &&
2110 ((s->pending_events & TASK_WOKEN_ANY) == TASK_WOKEN_TIMER)) {
2111 si_f->flags &= ~SI_FL_DONT_WAKE;
2112 si_b->flags &= ~SI_FL_DONT_WAKE;
2113 goto update_exp_and_leave;
2114 }
2115 }
2116
2117 resync_stream_interface:
2118 /* below we may emit error messages so we have to ensure that we have
2119 * our buffers properly allocated.
2120 */
2121 if (!stream_alloc_work_buffer(s)) {
2122 /* No buffer available, we've been subscribed to the list of
2123 * buffer waiters, let's wait for our turn.
2124 */
2125 si_f->flags &= ~SI_FL_DONT_WAKE;
2126 si_b->flags &= ~SI_FL_DONT_WAKE;
2127 goto update_exp_and_leave;
2128 }
2129
2130 /* 1b: check for low-level errors reported at the stream interface.
2131 * First we check if it's a retryable error (in which case we don't
2132 * want to tell the buffer). Otherwise we report the error one level
2133 * upper by setting flags into the buffers. Note that the side towards
2134 * the client cannot have connect (hence retryable) errors. Also, the
2135 * connection setup code must be able to deal with any type of abort.
2136 */
2137 srv = objt_server(s->target);
2138 if (unlikely(si_f->flags & SI_FL_ERR)) {
2139 if (si_state_in(si_f->state, SI_SB_EST|SI_SB_DIS)) {
2140 si_shutr(si_f);
2141 si_shutw(si_f);
2142 si_report_error(si_f);
2143 if (!(req->analysers) && !(res->analysers)) {
2144 _HA_ATOMIC_ADD(&s->be->be_counters.cli_aborts, 1);
2145 _HA_ATOMIC_ADD(&sess->fe->fe_counters.cli_aborts, 1);
2146 if (srv)
2147 _HA_ATOMIC_ADD(&srv->counters.cli_aborts, 1);
2148 if (!(s->flags & SF_ERR_MASK))
2149 s->flags |= SF_ERR_CLICL;
2150 if (!(s->flags & SF_FINST_MASK))
2151 s->flags |= SF_FINST_D;
2152 }
2153 }
2154 }
2155
2156 if (unlikely(si_b->flags & SI_FL_ERR)) {
2157 if (si_state_in(si_b->state, SI_SB_EST|SI_SB_DIS)) {
2158 si_shutr(si_b);
2159 si_shutw(si_b);
2160 si_report_error(si_b);
2161 _HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1);
2162 if (srv)
2163 _HA_ATOMIC_ADD(&srv->counters.failed_resp, 1);
2164 if (!(req->analysers) && !(res->analysers)) {
2165 _HA_ATOMIC_ADD(&s->be->be_counters.srv_aborts, 1);
2166 _HA_ATOMIC_ADD(&sess->fe->fe_counters.srv_aborts, 1);
2167 if (srv)
2168 _HA_ATOMIC_ADD(&srv->counters.srv_aborts, 1);
2169 if (!(s->flags & SF_ERR_MASK))
2170 s->flags |= SF_ERR_SRVCL;
2171 if (!(s->flags & SF_FINST_MASK))
2172 s->flags |= SF_FINST_D;
2173 }
2174 }
2175 /* note: maybe we should process connection errors here ? */
2176 }
2177
2178 if (si_state_in(si_b->state, SI_SB_CON|SI_SB_RDY)) {
2179 /* we were trying to establish a connection on the server side,
2180 * maybe it succeeded, maybe it failed, maybe we timed out, ...
2181 */
2182 if (si_b->state == SI_ST_RDY)
2183 sess_update_st_rdy_tcp(s);
2184 else if (si_b->state == SI_ST_CON)
2185 sess_update_st_con_tcp(s);
2186
2187 if (si_b->state == SI_ST_CER)
2188 sess_update_st_cer(s);
2189 else if (si_b->state == SI_ST_EST)
2190 sess_establish(s);
2191
2192 /* state is now one of SI_ST_CON (still in progress), SI_ST_EST
2193 * (established), SI_ST_DIS (abort), SI_ST_CLO (last error),
2194 * SI_ST_ASS/SI_ST_TAR/SI_ST_REQ for retryable errors.
2195 */
2196 }
2197
2198 rq_prod_last = si_f->state;
2199 rq_cons_last = si_b->state;
2200 rp_cons_last = si_f->state;
2201 rp_prod_last = si_b->state;
2202
2203 /* Check for connection closure */
2204 DBG_TRACE_POINT(STRM_EV_STRM_PROC, s);
2205
2206 /* nothing special to be done on client side */
2207 if (unlikely(si_f->state == SI_ST_DIS))
2208 si_f->state = SI_ST_CLO;
2209
2210 /* When a server-side connection is released, we have to count it and
2211 * check for pending connections on this server.
2212 */
2213 if (unlikely(si_b->state == SI_ST_DIS)) {
2214 si_b->state = SI_ST_CLO;
2215 srv = objt_server(s->target);
2216 if (srv) {
2217 if (s->flags & SF_CURR_SESS) {
2218 s->flags &= ~SF_CURR_SESS;
2219 _HA_ATOMIC_SUB(&srv->cur_sess, 1);
2220 }
2221 sess_change_server(s, NULL);
2222 if (may_dequeue_tasks(srv, s->be))
2223 process_srv_queue(srv);
2224 }
2225 }
2226
2227 /*
2228 * Note: of the transient states (REQ, CER, DIS), only REQ may remain
2229 * at this point.
2230 */
2231
2232 resync_request:
2233 /* Analyse request */
2234 if (((req->flags & ~rqf_last) & CF_MASK_ANALYSER) ||
2235 ((req->flags ^ rqf_last) & CF_MASK_STATIC) ||
2236 (req->analysers && (req->flags & CF_SHUTW)) ||
2237 si_f->state != rq_prod_last ||
2238 si_b->state != rq_cons_last ||
2239 s->pending_events & TASK_WOKEN_MSG) {
2240 unsigned int flags = req->flags;
2241
2242 if (si_state_in(si_f->state, SI_SB_EST|SI_SB_DIS|SI_SB_CLO)) {
2243 int max_loops = global.tune.maxpollevents;
2244 unsigned int ana_list;
2245 unsigned int ana_back;
2246
2247 /* it's up to the analysers to stop new connections,
2248 * disable reading or closing. Note: if an analyser
2249 * disables any of these bits, it is responsible for
2250 * enabling them again when it disables itself, so
2251 * that other analysers are called in similar conditions.
2252 */
2253 channel_auto_read(req);
2254 channel_auto_connect(req);
2255 channel_auto_close(req);
2256
2257 /* We will call all analysers for which a bit is set in
2258 * req->analysers, following the bit order from LSB
2259 * to MSB. The analysers must remove themselves from
2260 * the list when not needed. Any analyser may return 0
2261 * to break out of the loop, either because of missing
2262 * data to take a decision, or because it decides to
2263 * kill the stream. We loop at least once through each
2264 * analyser, and we may loop again if other analysers
2265 * are added in the middle.
2266 *
2267 * We build a list of analysers to run. We evaluate all
2268 * of these analysers in the order of the lower bit to
2269 * the higher bit. This ordering is very important.
2270 * An analyser will often add/remove other analysers,
2271 * including itself. Any changes to itself have no effect
2272 * on the loop. If it removes any other analysers, we
2273 * want those analysers not to be called anymore during
2274 * this loop. If it adds an analyser that is located
2275 * after itself, we want it to be scheduled for being
2276 * processed during the loop. If it adds an analyser
2277 * which is located before it, we want it to switch to
2278 * it immediately, even if it has already been called
2279 * once but removed since.
2280 *
2281 * In order to achieve this, we compare the analyser
2282 * list after the call with a copy of it before the
2283 * call. The work list is fed with analyser bits that
2284 * appeared during the call. Then we compare previous
2285 * work list with the new one, and check the bits that
2286 * appeared. If the lowest of these bits is lower than
2287 * the current bit, it means we have enabled a previous
2288 * analyser and must immediately loop again.
2289 */
2290
2291 ana_list = ana_back = req->analysers;
2292 while (ana_list && max_loops--) {
2293 /* Warning! ensure that analysers are always placed in ascending order! */
2294 ANALYZE (s, req, flt_start_analyze, ana_list, ana_back, AN_REQ_FLT_START_FE);
2295 FLT_ANALYZE(s, req, tcp_inspect_request, ana_list, ana_back, AN_REQ_INSPECT_FE);
2296 FLT_ANALYZE(s, req, http_wait_for_request, ana_list, ana_back, AN_REQ_WAIT_HTTP);
2297 FLT_ANALYZE(s, req, http_wait_for_request_body, ana_list, ana_back, AN_REQ_HTTP_BODY);
2298 FLT_ANALYZE(s, req, http_process_req_common, ana_list, ana_back, AN_REQ_HTTP_PROCESS_FE, sess->fe);
2299 FLT_ANALYZE(s, req, process_switching_rules, ana_list, ana_back, AN_REQ_SWITCHING_RULES);
2300 ANALYZE (s, req, flt_start_analyze, ana_list, ana_back, AN_REQ_FLT_START_BE);
2301 FLT_ANALYZE(s, req, tcp_inspect_request, ana_list, ana_back, AN_REQ_INSPECT_BE);
2302 FLT_ANALYZE(s, req, http_process_req_common, ana_list, ana_back, AN_REQ_HTTP_PROCESS_BE, s->be);
2303 FLT_ANALYZE(s, req, http_process_tarpit, ana_list, ana_back, AN_REQ_HTTP_TARPIT);
2304 FLT_ANALYZE(s, req, process_server_rules, ana_list, ana_back, AN_REQ_SRV_RULES);
2305 FLT_ANALYZE(s, req, http_process_request, ana_list, ana_back, AN_REQ_HTTP_INNER);
2306 FLT_ANALYZE(s, req, tcp_persist_rdp_cookie, ana_list, ana_back, AN_REQ_PRST_RDP_COOKIE);
2307 FLT_ANALYZE(s, req, process_sticking_rules, ana_list, ana_back, AN_REQ_STICKING_RULES);
2308 ANALYZE (s, req, flt_analyze_http_headers, ana_list, ana_back, AN_REQ_FLT_HTTP_HDRS);
2309 ANALYZE (s, req, http_request_forward_body, ana_list, ana_back, AN_REQ_HTTP_XFER_BODY);
2310 ANALYZE (s, req, pcli_wait_for_request, ana_list, ana_back, AN_REQ_WAIT_CLI);
2311 ANALYZE (s, req, flt_xfer_data, ana_list, ana_back, AN_REQ_FLT_XFER_DATA);
2312 ANALYZE (s, req, flt_end_analyze, ana_list, ana_back, AN_REQ_FLT_END);
2313 break;
2314 }
2315 }
2316
2317 rq_prod_last = si_f->state;
2318 rq_cons_last = si_b->state;
2319 req->flags &= ~CF_WAKE_ONCE;
2320 rqf_last = req->flags;
2321
2322 if ((req->flags ^ flags) & (CF_SHUTR|CF_SHUTW))
2323 goto resync_request;
2324 }
2325
2326 /* we'll monitor the request analysers while parsing the response,
2327 * because some response analysers may indirectly enable new request
2328 * analysers (eg: HTTP keep-alive).
2329 */
2330 req_ana_back = req->analysers;
2331
2332 resync_response:
2333 /* Analyse response */
2334
2335 if (((res->flags & ~rpf_last) & CF_MASK_ANALYSER) ||
2336 (res->flags ^ rpf_last) & CF_MASK_STATIC ||
2337 (res->analysers && (res->flags & CF_SHUTW)) ||
2338 si_f->state != rp_cons_last ||
2339 si_b->state != rp_prod_last ||
2340 s->pending_events & TASK_WOKEN_MSG) {
2341 unsigned int flags = res->flags;
2342
2343 if (si_state_in(si_b->state, SI_SB_EST|SI_SB_DIS|SI_SB_CLO)) {
2344 int max_loops = global.tune.maxpollevents;
2345 unsigned int ana_list;
2346 unsigned int ana_back;
2347
2348 /* it's up to the analysers to stop disable reading or
2349 * closing. Note: if an analyser disables any of these
2350 * bits, it is responsible for enabling them again when
2351 * it disables itself, so that other analysers are called
2352 * in similar conditions.
2353 */
2354 channel_auto_read(res);
2355 channel_auto_close(res);
2356
2357 /* We will call all analysers for which a bit is set in
2358 * res->analysers, following the bit order from LSB
2359 * to MSB. The analysers must remove themselves from
2360 * the list when not needed. Any analyser may return 0
2361 * to break out of the loop, either because of missing
2362 * data to take a decision, or because it decides to
2363 * kill the stream. We loop at least once through each
2364 * analyser, and we may loop again if other analysers
2365 * are added in the middle.
2366 */
2367
2368 ana_list = ana_back = res->analysers;
2369 while (ana_list && max_loops--) {
2370 /* Warning! ensure that analysers are always placed in ascending order! */
2371 ANALYZE (s, res, flt_start_analyze, ana_list, ana_back, AN_RES_FLT_START_FE);
2372 ANALYZE (s, res, flt_start_analyze, ana_list, ana_back, AN_RES_FLT_START_BE);
2373 FLT_ANALYZE(s, res, tcp_inspect_response, ana_list, ana_back, AN_RES_INSPECT);
2374 FLT_ANALYZE(s, res, http_wait_for_response, ana_list, ana_back, AN_RES_WAIT_HTTP);
2375 FLT_ANALYZE(s, res, process_store_rules, ana_list, ana_back, AN_RES_STORE_RULES);
2376 FLT_ANALYZE(s, res, http_process_res_common, ana_list, ana_back, AN_RES_HTTP_PROCESS_BE, s->be);
2377 ANALYZE (s, res, flt_analyze_http_headers, ana_list, ana_back, AN_RES_FLT_HTTP_HDRS);
2378 ANALYZE (s, res, http_response_forward_body, ana_list, ana_back, AN_RES_HTTP_XFER_BODY);
2379 ANALYZE (s, res, pcli_wait_for_response, ana_list, ana_back, AN_RES_WAIT_CLI);
2380 ANALYZE (s, res, flt_xfer_data, ana_list, ana_back, AN_RES_FLT_XFER_DATA);
2381 ANALYZE (s, res, flt_end_analyze, ana_list, ana_back, AN_RES_FLT_END);
2382 break;
2383 }
2384 }
2385
2386 rp_cons_last = si_f->state;
2387 rp_prod_last = si_b->state;
2388 res->flags &= ~CF_WAKE_ONCE;
2389 rpf_last = res->flags;
2390
2391 if ((res->flags ^ flags) & (CF_SHUTR|CF_SHUTW))
2392 goto resync_response;
2393 }
2394
2395 /* maybe someone has added some request analysers, so we must check and loop */
2396 if (req->analysers & ~req_ana_back)
2397 goto resync_request;
2398
2399 if ((req->flags & ~rqf_last) & CF_MASK_ANALYSER)
2400 goto resync_request;
2401
2402 /* FIXME: here we should call protocol handlers which rely on
2403 * both buffers.
2404 */
2405
2406
2407 /*
2408 * Now we propagate unhandled errors to the stream. Normally
2409 * we're just in a data phase here since it means we have not
2410 * seen any analyser who could set an error status.
2411 */
2412 srv = objt_server(s->target);
2413 if (unlikely(!(s->flags & SF_ERR_MASK))) {
2414 if (req->flags & (CF_READ_ERROR|CF_READ_TIMEOUT|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) {
2415 /* Report it if the client got an error or a read timeout expired */
2416 req->analysers = 0;
2417 if (req->flags & CF_READ_ERROR) {
2418 _HA_ATOMIC_ADD(&s->be->be_counters.cli_aborts, 1);
2419 _HA_ATOMIC_ADD(&sess->fe->fe_counters.cli_aborts, 1);
2420 if (srv)
2421 _HA_ATOMIC_ADD(&srv->counters.cli_aborts, 1);
2422 s->flags |= SF_ERR_CLICL;
2423 }
2424 else if (req->flags & CF_READ_TIMEOUT) {
2425 _HA_ATOMIC_ADD(&s->be->be_counters.cli_aborts, 1);
2426 _HA_ATOMIC_ADD(&sess->fe->fe_counters.cli_aborts, 1);
2427 if (srv)
2428 _HA_ATOMIC_ADD(&srv->counters.cli_aborts, 1);
2429 s->flags |= SF_ERR_CLITO;
2430 }
2431 else if (req->flags & CF_WRITE_ERROR) {
2432 _HA_ATOMIC_ADD(&s->be->be_counters.srv_aborts, 1);
2433 _HA_ATOMIC_ADD(&sess->fe->fe_counters.srv_aborts, 1);
2434 if (srv)
2435 _HA_ATOMIC_ADD(&srv->counters.srv_aborts, 1);
2436 s->flags |= SF_ERR_SRVCL;
2437 }
2438 else {
2439 _HA_ATOMIC_ADD(&s->be->be_counters.srv_aborts, 1);
2440 _HA_ATOMIC_ADD(&sess->fe->fe_counters.srv_aborts, 1);
2441 if (srv)
2442 _HA_ATOMIC_ADD(&srv->counters.srv_aborts, 1);
2443 s->flags |= SF_ERR_SRVTO;
2444 }
2445 sess_set_term_flags(s);
2446
2447 /* Abort the request if a client error occurred while
2448 * the backend stream-interface is in the SI_ST_INI
2449 * state. It is switched into the SI_ST_CLO state and
2450 * the request channel is erased. */
2451 if (si_b->state == SI_ST_INI) {
2452 si_b->state = SI_ST_CLO;
2453 channel_abort(req);
2454 if (IS_HTX_STRM(s))
2455 channel_htx_erase(req, htxbuf(&req->buf));
2456 else
2457 channel_erase(req);
2458 }
2459 }
2460 else if (res->flags & (CF_READ_ERROR|CF_READ_TIMEOUT|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) {
2461 /* Report it if the server got an error or a read timeout expired */
2462 res->analysers = 0;
2463 if (res->flags & CF_READ_ERROR) {
2464 _HA_ATOMIC_ADD(&s->be->be_counters.srv_aborts, 1);
2465 _HA_ATOMIC_ADD(&sess->fe->fe_counters.srv_aborts, 1);
2466 if (srv)
2467 _HA_ATOMIC_ADD(&srv->counters.srv_aborts, 1);
2468 s->flags |= SF_ERR_SRVCL;
2469 }
2470 else if (res->flags & CF_READ_TIMEOUT) {
2471 _HA_ATOMIC_ADD(&s->be->be_counters.srv_aborts, 1);
2472 _HA_ATOMIC_ADD(&sess->fe->fe_counters.srv_aborts, 1);
2473 if (srv)
2474 _HA_ATOMIC_ADD(&srv->counters.srv_aborts, 1);
2475 s->flags |= SF_ERR_SRVTO;
2476 }
2477 else if (res->flags & CF_WRITE_ERROR) {
2478 _HA_ATOMIC_ADD(&s->be->be_counters.cli_aborts, 1);
2479 _HA_ATOMIC_ADD(&sess->fe->fe_counters.cli_aborts, 1);
2480 if (srv)
2481 _HA_ATOMIC_ADD(&srv->counters.cli_aborts, 1);
2482 s->flags |= SF_ERR_CLICL;
2483 }
2484 else {
2485 _HA_ATOMIC_ADD(&s->be->be_counters.cli_aborts, 1);
2486 _HA_ATOMIC_ADD(&sess->fe->fe_counters.cli_aborts, 1);
2487 if (srv)
2488 _HA_ATOMIC_ADD(&srv->counters.cli_aborts, 1);
2489 s->flags |= SF_ERR_CLITO;
2490 }
2491 sess_set_term_flags(s);
2492 }
2493 }
2494
2495 /*
2496 * Here we take care of forwarding unhandled data. This also includes
2497 * connection establishments and shutdown requests.
2498 */
2499
2500
2501 /* If noone is interested in analysing data, it's time to forward
2502 * everything. We configure the buffer to forward indefinitely.
2503 * Note that we're checking CF_SHUTR_NOW as an indication of a possible
2504 * recent call to channel_abort().
2505 */
2506 if (unlikely((!req->analysers || (req->analysers == AN_REQ_FLT_END && !(req->flags & CF_FLT_ANALYZE))) &&
2507 !(req->flags & (CF_SHUTW|CF_SHUTR_NOW)) &&
2508 (si_state_in(si_f->state, SI_SB_EST|SI_SB_DIS|SI_SB_CLO)) &&
2509 (req->to_forward != CHN_INFINITE_FORWARD))) {
2510 /* This buffer is freewheeling, there's no analyser
2511 * attached to it. If any data are left in, we'll permit them to
2512 * move.
2513 */
2514 channel_auto_read(req);
2515 channel_auto_connect(req);
2516 channel_auto_close(req);
2517
2518 if (IS_HTX_STRM(s)) {
2519 struct htx *htx = htxbuf(&req->buf);
2520
2521 /* We'll let data flow between the producer (if still connected)
2522 * to the consumer.
2523 */
2524 co_set_data(req, htx->data);
2525 if (!(req->flags & (CF_SHUTR|CF_SHUTW_NOW)))
2526 channel_htx_forward_forever(req, htx);
2527 }
2528 else {
2529 /* We'll let data flow between the producer (if still connected)
2530 * to the consumer (which might possibly not be connected yet).
2531 */
2532 c_adv(req, ci_data(req));
2533 if (!(req->flags & (CF_SHUTR|CF_SHUTW_NOW)))
2534 channel_forward_forever(req);
2535 }
2536 }
2537
2538 /* check if it is wise to enable kernel splicing to forward request data */
2539 if (!(req->flags & (CF_KERN_SPLICING|CF_SHUTR)) &&
2540 req->to_forward &&
2541 (global.tune.options & GTUNE_USE_SPLICE) &&
2542 (objt_cs(si_f->end) && __objt_cs(si_f->end)->conn->xprt && __objt_cs(si_f->end)->conn->xprt->rcv_pipe &&
2543 __objt_cs(si_f->end)->conn->mux && __objt_cs(si_f->end)->conn->mux->rcv_pipe) &&
2544 (objt_cs(si_b->end) && __objt_cs(si_b->end)->conn->xprt && __objt_cs(si_b->end)->conn->xprt->snd_pipe &&
2545 __objt_cs(si_b->end)->conn->mux && __objt_cs(si_b->end)->conn->mux->snd_pipe) &&
2546 (pipes_used < global.maxpipes) &&
2547 (((sess->fe->options2|s->be->options2) & PR_O2_SPLIC_REQ) ||
2548 (((sess->fe->options2|s->be->options2) & PR_O2_SPLIC_AUT) &&
2549 (req->flags & CF_STREAMER_FAST)))) {
2550 req->flags |= CF_KERN_SPLICING;
2551 }
2552
2553 /* reflect what the L7 analysers have seen last */
2554 rqf_last = req->flags;
2555
2556 /* it's possible that an upper layer has requested a connection setup or abort.
2557 * There are 2 situations where we decide to establish a new connection :
2558 * - there are data scheduled for emission in the buffer
2559 * - the CF_AUTO_CONNECT flag is set (active connection)
2560 */
2561 if (si_b->state == SI_ST_INI) {
2562 if (!(req->flags & CF_SHUTW)) {
2563 if ((req->flags & CF_AUTO_CONNECT) || !channel_is_empty(req)) {
2564 /* If we have an appctx, there is no connect method, so we
2565 * immediately switch to the connected state, otherwise we
2566 * perform a connection request.
2567 */
2568 si_b->state = SI_ST_REQ; /* new connection requested */
2569 si_b->conn_retries = s->be->conn_retries;
2570 if ((s->be->retry_type &~ PR_RE_CONN_FAILED) &&
2571 (s->be->mode == PR_MODE_HTTP) &&
2572 !(si_b->flags & SI_FL_D_L7_RETRY))
2573 si_b->flags |= SI_FL_L7_RETRY;
2574 }
2575 }
2576 else {
2577 si_release_endpoint(si_b);
2578 si_b->state = SI_ST_CLO; /* shutw+ini = abort */
2579 channel_shutw_now(req); /* fix buffer flags upon abort */
2580 channel_shutr_now(res);
2581 }
2582 }
2583
2584
2585 /* we may have a pending connection request, or a connection waiting
2586 * for completion.
2587 */
2588 if (si_state_in(si_b->state, SI_SB_REQ|SI_SB_QUE|SI_SB_TAR|SI_SB_ASS)) {
2589 /* prune the request variables and swap to the response variables. */
2590 if (s->vars_reqres.scope != SCOPE_RES) {
2591 if (!LIST_ISEMPTY(&s->vars_reqres.head))
2592 vars_prune(&s->vars_reqres, s->sess, s);
2593 vars_init(&s->vars_reqres, SCOPE_RES);
2594 }
2595
2596 do {
2597 /* nb: step 1 might switch from QUE to ASS, but we first want
2598 * to give a chance to step 2 to perform a redirect if needed.
2599 */
2600 if (si_b->state != SI_ST_REQ)
2601 sess_update_stream_int(s);
2602 if (si_b->state == SI_ST_REQ)
2603 sess_prepare_conn_req(s);
2604
2605 /* applets directly go to the ESTABLISHED state. Similarly,
2606 * servers experience the same fate when their connection
2607 * is reused.
2608 */
2609 if (unlikely(si_b->state == SI_ST_EST))
2610 sess_establish(s);
2611
2612 srv = objt_server(s->target);
2613 if (si_b->state == SI_ST_ASS && srv && srv->rdr_len && (s->flags & SF_REDIRECTABLE))
2614 http_perform_server_redirect(s, si_b);
2615 } while (si_b->state == SI_ST_ASS);
2616 }
2617
2618 /* Let's see if we can send the pending request now */
2619 si_sync_send(si_b);
2620
2621 /*
2622 * Now forward all shutdown requests between both sides of the request buffer
2623 */
2624
2625 /* first, let's check if the request buffer needs to shutdown(write), which may
2626 * happen either because the input is closed or because we want to force a close
2627 * once the server has begun to respond. If a half-closed timeout is set, we adjust
2628 * the other side's timeout as well.
2629 */
2630 if (unlikely((req->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_AUTO_CLOSE|CF_SHUTR)) ==
2631 (CF_AUTO_CLOSE|CF_SHUTR))) {
2632 channel_shutw_now(req);
2633 }
2634
2635 /* shutdown(write) pending */
2636 if (unlikely((req->flags & (CF_SHUTW|CF_SHUTW_NOW)) == CF_SHUTW_NOW &&
2637 channel_is_empty(req))) {
2638 if (req->flags & CF_READ_ERROR)
2639 si_b->flags |= SI_FL_NOLINGER;
2640 si_shutw(si_b);
2641 }
2642
2643 /* shutdown(write) done on server side, we must stop the client too */
2644 if (unlikely((req->flags & (CF_SHUTW|CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTW &&
2645 !req->analysers))
2646 channel_shutr_now(req);
2647
2648 /* shutdown(read) pending */
2649 if (unlikely((req->flags & (CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTR_NOW)) {
2650 if (si_f->flags & SI_FL_NOHALF)
2651 si_f->flags |= SI_FL_NOLINGER;
2652 si_shutr(si_f);
2653 }
2654
2655 /* Benchmarks have shown that it's optimal to do a full resync now */
2656 if (si_f->state == SI_ST_DIS ||
2657 si_state_in(si_b->state, SI_SB_RDY|SI_SB_DIS) ||
2658 (si_f->flags & SI_FL_ERR && si_f->state != SI_ST_CLO) ||
2659 (si_b->flags & SI_FL_ERR && si_b->state != SI_ST_CLO))
2660 goto resync_stream_interface;
2661
2662 /* otherwise we want to check if we need to resync the req buffer or not */
2663 if ((req->flags ^ rqf_last) & (CF_SHUTR|CF_SHUTW))
2664 goto resync_request;
2665
2666 /* perform output updates to the response buffer */
2667
2668 /* If noone is interested in analysing data, it's time to forward
2669 * everything. We configure the buffer to forward indefinitely.
2670 * Note that we're checking CF_SHUTR_NOW as an indication of a possible
2671 * recent call to channel_abort().
2672 */
2673 if (unlikely((!res->analysers || (res->analysers == AN_RES_FLT_END && !(res->flags & CF_FLT_ANALYZE))) &&
2674 !(res->flags & (CF_SHUTW|CF_SHUTR_NOW)) &&
2675 si_state_in(si_b->state, SI_SB_EST|SI_SB_DIS|SI_SB_CLO) &&
2676 (res->to_forward != CHN_INFINITE_FORWARD))) {
2677 /* This buffer is freewheeling, there's no analyser
2678 * attached to it. If any data are left in, we'll permit them to
2679 * move.
2680 */
2681 channel_auto_read(res);
2682 channel_auto_close(res);
2683
2684 if (IS_HTX_STRM(s)) {
2685 struct htx *htx = htxbuf(&res->buf);
2686
2687 /* We'll let data flow between the producer (if still connected)
2688 * to the consumer.
2689 */
2690 co_set_data(res, htx->data);
2691 if (!(res->flags & (CF_SHUTR|CF_SHUTW_NOW)))
2692 channel_htx_forward_forever(res, htx);
2693 }
2694 else {
2695 /* We'll let data flow between the producer (if still connected)
2696 * to the consumer.
2697 */
2698 c_adv(res, ci_data(res));
2699 if (!(res->flags & (CF_SHUTR|CF_SHUTW_NOW)))
2700 channel_forward_forever(res);
2701 }
2702
2703 /* if we have no analyser anymore in any direction and have a
2704 * tunnel timeout set, use it now. Note that we must respect
2705 * the half-closed timeouts as well.
2706 */
2707 if (!req->analysers && s->be->timeout.tunnel) {
2708 req->rto = req->wto = res->rto = res->wto =
2709 s->be->timeout.tunnel;
2710
2711 if ((req->flags & CF_SHUTR) && tick_isset(sess->fe->timeout.clientfin))
2712 res->wto = sess->fe->timeout.clientfin;
2713 if ((req->flags & CF_SHUTW) && tick_isset(s->be->timeout.serverfin))
2714 res->rto = s->be->timeout.serverfin;
2715 if ((res->flags & CF_SHUTR) && tick_isset(s->be->timeout.serverfin))
2716 req->wto = s->be->timeout.serverfin;
2717 if ((res->flags & CF_SHUTW) && tick_isset(sess->fe->timeout.clientfin))
2718 req->rto = sess->fe->timeout.clientfin;
2719
2720 req->rex = tick_add(now_ms, req->rto);
2721 req->wex = tick_add(now_ms, req->wto);
2722 res->rex = tick_add(now_ms, res->rto);
2723 res->wex = tick_add(now_ms, res->wto);
2724 }
2725 }
2726
2727 /* check if it is wise to enable kernel splicing to forward response data */
2728 if (!(res->flags & (CF_KERN_SPLICING|CF_SHUTR)) &&
2729 res->to_forward &&
2730 (global.tune.options & GTUNE_USE_SPLICE) &&
2731 (objt_cs(si_f->end) && __objt_cs(si_f->end)->conn->xprt && __objt_cs(si_f->end)->conn->xprt->snd_pipe &&
2732 __objt_cs(si_f->end)->conn->mux && __objt_cs(si_f->end)->conn->mux->snd_pipe) &&
2733 (objt_cs(si_b->end) && __objt_cs(si_b->end)->conn->xprt && __objt_cs(si_b->end)->conn->xprt->rcv_pipe &&
2734 __objt_cs(si_b->end)->conn->mux && __objt_cs(si_b->end)->conn->mux->rcv_pipe) &&
2735 (pipes_used < global.maxpipes) &&
2736 (((sess->fe->options2|s->be->options2) & PR_O2_SPLIC_RTR) ||
2737 (((sess->fe->options2|s->be->options2) & PR_O2_SPLIC_AUT) &&
2738 (res->flags & CF_STREAMER_FAST)))) {
2739 res->flags |= CF_KERN_SPLICING;
2740 }
2741
2742 /* reflect what the L7 analysers have seen last */
2743 rpf_last = res->flags;
2744
2745 /* Let's see if we can send the pending response now */
2746 si_sync_send(si_f);
2747
2748 /*
2749 * Now forward all shutdown requests between both sides of the buffer
2750 */
2751
2752 /*
2753 * FIXME: this is probably where we should produce error responses.
2754 */
2755
2756 /* first, let's check if the response buffer needs to shutdown(write) */
2757 if (unlikely((res->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_AUTO_CLOSE|CF_SHUTR)) ==
2758 (CF_AUTO_CLOSE|CF_SHUTR))) {
2759 channel_shutw_now(res);
2760 }
2761
2762 /* shutdown(write) pending */
2763 if (unlikely((res->flags & (CF_SHUTW|CF_SHUTW_NOW)) == CF_SHUTW_NOW &&
2764 channel_is_empty(res))) {
2765 si_shutw(si_f);
2766 }
2767
2768 /* shutdown(write) done on the client side, we must stop the server too */
2769 if (unlikely((res->flags & (CF_SHUTW|CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTW) &&
2770 !res->analysers)
2771 channel_shutr_now(res);
2772
2773 /* shutdown(read) pending */
2774 if (unlikely((res->flags & (CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTR_NOW)) {
2775 if (si_b->flags & SI_FL_NOHALF)
2776 si_b->flags |= SI_FL_NOLINGER;
2777 si_shutr(si_b);
2778 }
2779
2780 if (si_f->state == SI_ST_DIS ||
2781 si_state_in(si_b->state, SI_SB_RDY|SI_SB_DIS) ||
2782 (si_f->flags & SI_FL_ERR && si_f->state != SI_ST_CLO) ||
2783 (si_b->flags & SI_FL_ERR && si_b->state != SI_ST_CLO))
2784 goto resync_stream_interface;
2785
2786 if ((req->flags & ~rqf_last) & CF_MASK_ANALYSER)
2787 goto resync_request;
2788
2789 if ((res->flags ^ rpf_last) & CF_MASK_STATIC)
2790 goto resync_response;
2791
2792 if (((req->flags ^ rqf_last) | (res->flags ^ rpf_last)) & CF_MASK_ANALYSER)
2793 goto resync_request;
2794
2795 /* we're interested in getting wakeups again */
2796 si_f->flags &= ~SI_FL_DONT_WAKE;
2797 si_b->flags &= ~SI_FL_DONT_WAKE;
2798
2799 /* This is needed only when debugging is enabled, to indicate
2800 * client-side or server-side close. Please note that in the unlikely
2801 * event where both sides would close at once, the sequence is reported
2802 * on the server side first.
2803 */
2804 if (unlikely((global.mode & MODE_DEBUG) &&
2805 (!(global.mode & MODE_QUIET) ||
2806 (global.mode & MODE_VERBOSE)))) {
2807 if (si_b->state == SI_ST_CLO &&
2808 si_b->prev_state == SI_ST_EST) {
2809 chunk_printf(&trash, "%08x:%s.srvcls[%04x:%04x]\n",
2810 s->uniq_id, s->be->id,
2811 objt_cs(si_f->end) ? (unsigned short)objt_cs(si_f->end)->conn->handle.fd : -1,
2812 objt_cs(si_b->end) ? (unsigned short)objt_cs(si_b->end)->conn->handle.fd : -1);
2813 shut_your_big_mouth_gcc(write(1, trash.area, trash.data));
2814 }
2815
2816 if (si_f->state == SI_ST_CLO &&
2817 si_f->prev_state == SI_ST_EST) {
2818 chunk_printf(&trash, "%08x:%s.clicls[%04x:%04x]\n",
2819 s->uniq_id, s->be->id,
2820 objt_cs(si_f->end) ? (unsigned short)objt_cs(si_f->end)->conn->handle.fd : -1,
2821 objt_cs(si_b->end) ? (unsigned short)objt_cs(si_b->end)->conn->handle.fd : -1);
2822 shut_your_big_mouth_gcc(write(1, trash.area, trash.data));
2823 }
2824 }
2825
2826 if (likely((si_f->state != SI_ST_CLO) || !si_state_in(si_b->state, SI_SB_INI|SI_SB_CLO))) {
2827 if ((sess->fe->options & PR_O_CONTSTATS) && (s->flags & SF_BE_ASSIGNED) && !(s->flags & SF_IGNORE))
2828 stream_process_counters(s);
2829
2830 si_update_both(si_f, si_b);
2831
2832 /* Trick: if a request is being waiting for the server to respond,
2833 * and if we know the server can timeout, we don't want the timeout
2834 * to expire on the client side first, but we're still interested
2835 * in passing data from the client to the server (eg: POST). Thus,
2836 * we can cancel the client's request timeout if the server's
2837 * request timeout is set and the server has not yet sent a response.
2838 */
2839
2840 if ((res->flags & (CF_AUTO_CLOSE|CF_SHUTR)) == 0 &&
2841 (tick_isset(req->wex) || tick_isset(res->rex))) {
2842 req->flags |= CF_READ_NOEXP;
2843 req->rex = TICK_ETERNITY;
2844 }
2845
2846 /* Reset pending events now */
2847 s->pending_events = 0;
2848
2849 update_exp_and_leave:
2850 /* Note: please ensure that if you branch here you disable SI_FL_DONT_WAKE */
2851 t->expire = tick_first((tick_is_expired(t->expire, now_ms) ? 0 : t->expire),
2852 tick_first(tick_first(req->rex, req->wex),
2853 tick_first(res->rex, res->wex)));
2854 if (!req->analysers)
2855 req->analyse_exp = TICK_ETERNITY;
2856
2857 if ((sess->fe->options & PR_O_CONTSTATS) && (s->flags & SF_BE_ASSIGNED) &&
2858 (!tick_isset(req->analyse_exp) || tick_is_expired(req->analyse_exp, now_ms)))
2859 req->analyse_exp = tick_add(now_ms, 5000);
2860
2861 t->expire = tick_first(t->expire, req->analyse_exp);
2862
2863 t->expire = tick_first(t->expire, res->analyse_exp);
2864
2865 if (si_f->exp)
2866 t->expire = tick_first(t->expire, si_f->exp);
2867
2868 if (si_b->exp)
2869 t->expire = tick_first(t->expire, si_b->exp);
2870
2871 s->pending_events &= ~(TASK_WOKEN_TIMER | TASK_WOKEN_RES);
2872 stream_release_buffers(s);
2873
2874 DBG_TRACE_DEVEL("queuing", STRM_EV_STRM_PROC, s);
2875 return t; /* nothing more to do */
2876 }
2877
2878 DBG_TRACE_DEVEL("releasing", STRM_EV_STRM_PROC, s);
2879
2880 if (s->flags & SF_BE_ASSIGNED)
2881 _HA_ATOMIC_SUB(&s->be->beconn, 1);
2882
2883 if (unlikely((global.mode & MODE_DEBUG) &&
2884 (!(global.mode & MODE_QUIET) || (global.mode & MODE_VERBOSE)))) {
2885 chunk_printf(&trash, "%08x:%s.closed[%04x:%04x]\n",
2886 s->uniq_id, s->be->id,
2887 objt_cs(si_f->end) ? (unsigned short)objt_cs(si_f->end)->conn->handle.fd : -1,
2888 objt_cs(si_b->end) ? (unsigned short)objt_cs(si_b->end)->conn->handle.fd : -1);
2889 shut_your_big_mouth_gcc(write(1, trash.area, trash.data));
2890 }
2891
2892 s->logs.t_close = tv_ms_elapsed(&s->logs.tv_accept, &now);
2893 if (!(s->flags & SF_IGNORE))
2894 stream_process_counters(s);
2895
2896 if (s->txn && s->txn->status) {
2897 int n;
2898
2899 n = s->txn->status / 100;
2900 if (n < 1 || n > 5)
2901 n = 0;
2902
2903 if (sess->fe->mode == PR_MODE_HTTP) {
2904 _HA_ATOMIC_ADD(&sess->fe->fe_counters.p.http.rsp[n], 1);
2905 }
2906 if ((s->flags & SF_BE_ASSIGNED) &&
2907 (s->be->mode == PR_MODE_HTTP)) {
2908 _HA_ATOMIC_ADD(&s->be->be_counters.p.http.rsp[n], 1);
2909 _HA_ATOMIC_ADD(&s->be->be_counters.p.http.cum_req, 1);
2910 }
2911 }
2912
2913 /* let's do a final log if we need it */
2914 if (!LIST_ISEMPTY(&sess->fe->logformat) && s->logs.logwait &&
2915 !(s->flags & SF_MONITOR) &&
2916 (!(sess->fe->options & PR_O_NULLNOLOG) || req->total)) {
2917 /* we may need to know the position in the queue */
2918 pendconn_free(s);
2919 s->do_log(s);
2920 }
2921
2922 /* update time stats for this stream */
2923 stream_update_time_stats(s);
2924
2925 /* the task MUST not be in the run queue anymore */
2926 stream_free(s);
2927 task_destroy(t);
2928 return NULL;
2929 }
2930
2931 /* Update the stream's backend and server time stats */
stream_update_time_stats(struct stream * s)2932 void stream_update_time_stats(struct stream *s)
2933 {
2934 int t_request;
2935 int t_queue;
2936 int t_connect;
2937 int t_data;
2938 int t_close;
2939 struct server *srv;
2940
2941 t_request = 0;
2942 t_queue = s->logs.t_queue;
2943 t_connect = s->logs.t_connect;
2944 t_close = s->logs.t_close;
2945 t_data = s->logs.t_data;
2946
2947 if (s->be->mode != PR_MODE_HTTP)
2948 t_data = t_connect;
2949
2950 if (t_connect < 0 || t_data < 0)
2951 return;
2952
2953 if (tv_isge(&s->logs.tv_request, &s->logs.tv_accept))
2954 t_request = tv_ms_elapsed(&s->logs.tv_accept, &s->logs.tv_request);
2955
2956 t_data -= t_connect;
2957 t_connect -= t_queue;
2958 t_queue -= t_request;
2959
2960 srv = objt_server(s->target);
2961 if (srv) {
2962 swrate_add(&srv->counters.q_time, TIME_STATS_SAMPLES, t_queue);
2963 swrate_add(&srv->counters.c_time, TIME_STATS_SAMPLES, t_connect);
2964 swrate_add(&srv->counters.d_time, TIME_STATS_SAMPLES, t_data);
2965 swrate_add(&srv->counters.t_time, TIME_STATS_SAMPLES, t_close);
2966 HA_ATOMIC_UPDATE_MAX(&srv->counters.qtime_max, t_queue);
2967 HA_ATOMIC_UPDATE_MAX(&srv->counters.ctime_max, t_connect);
2968 HA_ATOMIC_UPDATE_MAX(&srv->counters.dtime_max, t_data);
2969 HA_ATOMIC_UPDATE_MAX(&srv->counters.ttime_max, t_close);
2970 }
2971 swrate_add(&s->be->be_counters.q_time, TIME_STATS_SAMPLES, t_queue);
2972 swrate_add(&s->be->be_counters.c_time, TIME_STATS_SAMPLES, t_connect);
2973 swrate_add(&s->be->be_counters.d_time, TIME_STATS_SAMPLES, t_data);
2974 swrate_add(&s->be->be_counters.t_time, TIME_STATS_SAMPLES, t_close);
2975 HA_ATOMIC_UPDATE_MAX(&s->be->be_counters.qtime_max, t_queue);
2976 HA_ATOMIC_UPDATE_MAX(&s->be->be_counters.ctime_max, t_connect);
2977 HA_ATOMIC_UPDATE_MAX(&s->be->be_counters.dtime_max, t_data);
2978 HA_ATOMIC_UPDATE_MAX(&s->be->be_counters.ttime_max, t_close);
2979 }
2980
2981 /*
2982 * This function adjusts sess->srv_conn and maintains the previous and new
2983 * server's served stream counts. Setting newsrv to NULL is enough to release
2984 * current connection slot. This function also notifies any LB algo which might
2985 * expect to be informed about any change in the number of active streams on a
2986 * server.
2987 */
sess_change_server(struct stream * sess,struct server * newsrv)2988 void sess_change_server(struct stream *sess, struct server *newsrv)
2989 {
2990 if (sess->srv_conn == newsrv)
2991 return;
2992
2993 if (sess->srv_conn) {
2994 _HA_ATOMIC_SUB(&sess->srv_conn->served, 1);
2995 _HA_ATOMIC_SUB(&sess->srv_conn->proxy->served, 1);
2996 __ha_barrier_atomic_store();
2997 if (sess->srv_conn->proxy->lbprm.server_drop_conn) {
2998 HA_SPIN_LOCK(SERVER_LOCK, &sess->srv_conn->lock);
2999 sess->srv_conn->proxy->lbprm.server_drop_conn(sess->srv_conn);
3000 HA_SPIN_UNLOCK(SERVER_LOCK, &sess->srv_conn->lock);
3001 }
3002 stream_del_srv_conn(sess);
3003 }
3004
3005 if (newsrv) {
3006 _HA_ATOMIC_ADD(&newsrv->served, 1);
3007 _HA_ATOMIC_ADD(&newsrv->proxy->served, 1);
3008 __ha_barrier_atomic_store();
3009 if (newsrv->proxy->lbprm.server_take_conn) {
3010 HA_SPIN_LOCK(SERVER_LOCK, &newsrv->lock);
3011 newsrv->proxy->lbprm.server_take_conn(newsrv);
3012 HA_SPIN_UNLOCK(SERVER_LOCK, &newsrv->lock);
3013 }
3014 stream_add_srv_conn(sess, newsrv);
3015 }
3016 }
3017
3018 /* Handle server-side errors for default protocols. It is called whenever a a
3019 * connection setup is aborted or a request is aborted in queue. It sets the
3020 * stream termination flags so that the caller does not have to worry about
3021 * them. It's installed as ->srv_error for the server-side stream_interface.
3022 */
default_srv_error(struct stream * s,struct stream_interface * si)3023 void default_srv_error(struct stream *s, struct stream_interface *si)
3024 {
3025 int err_type = si->err_type;
3026 int err = 0, fin = 0;
3027
3028 if (err_type & SI_ET_QUEUE_ABRT) {
3029 err = SF_ERR_CLICL;
3030 fin = SF_FINST_Q;
3031 }
3032 else if (err_type & SI_ET_CONN_ABRT) {
3033 err = SF_ERR_CLICL;
3034 fin = SF_FINST_C;
3035 }
3036 else if (err_type & SI_ET_QUEUE_TO) {
3037 err = SF_ERR_SRVTO;
3038 fin = SF_FINST_Q;
3039 }
3040 else if (err_type & SI_ET_QUEUE_ERR) {
3041 err = SF_ERR_SRVCL;
3042 fin = SF_FINST_Q;
3043 }
3044 else if (err_type & SI_ET_CONN_TO) {
3045 err = SF_ERR_SRVTO;
3046 fin = SF_FINST_C;
3047 }
3048 else if (err_type & SI_ET_CONN_ERR) {
3049 err = SF_ERR_SRVCL;
3050 fin = SF_FINST_C;
3051 }
3052 else if (err_type & SI_ET_CONN_RES) {
3053 err = SF_ERR_RESOURCE;
3054 fin = SF_FINST_C;
3055 }
3056 else /* SI_ET_CONN_OTHER and others */ {
3057 err = SF_ERR_INTERNAL;
3058 fin = SF_FINST_C;
3059 }
3060
3061 if (!(s->flags & SF_ERR_MASK))
3062 s->flags |= err;
3063 if (!(s->flags & SF_FINST_MASK))
3064 s->flags |= fin;
3065 }
3066
3067 /* kill a stream and set the termination flags to <why> (one of SF_ERR_*) */
stream_shutdown(struct stream * stream,int why)3068 void stream_shutdown(struct stream *stream, int why)
3069 {
3070 if (stream->req.flags & (CF_SHUTW|CF_SHUTW_NOW))
3071 return;
3072
3073 channel_shutw_now(&stream->req);
3074 channel_shutr_now(&stream->res);
3075 stream->task->nice = 1024;
3076 if (!(stream->flags & SF_ERR_MASK))
3077 stream->flags |= why;
3078 task_wakeup(stream->task, TASK_WOKEN_OTHER);
3079 }
3080
3081 /* Appends a dump of the state of stream <s> into buffer <buf> which must have
3082 * preliminary be prepared by its caller, with each line prepended by prefix
3083 * <pfx>, and each line terminated by character <eol>.
3084 */
stream_dump(struct buffer * buf,const struct stream * s,const char * pfx,char eol)3085 void stream_dump(struct buffer *buf, const struct stream *s, const char *pfx, char eol)
3086 {
3087 const struct conn_stream *csf, *csb;
3088 const struct connection *cof, *cob;
3089 const struct appctx *acf, *acb;
3090 const struct server *srv;
3091 const char *src = "unknown";
3092 const char *dst = "unknown";
3093 char pn[INET6_ADDRSTRLEN];
3094 const struct channel *req, *res;
3095 const struct stream_interface *si_f, *si_b;
3096
3097 if (!s) {
3098 chunk_appendf(buf, "%sstrm=%p%c", pfx, s, eol);
3099 return;
3100 }
3101
3102 if (s->obj_type != OBJ_TYPE_STREAM) {
3103 chunk_appendf(buf, "%sstrm=%p [invalid type=%d(%s)]%c",
3104 pfx, s, s->obj_type, obj_type_name(&s->obj_type), eol);
3105 return;
3106 }
3107
3108 si_f = &s->si[0];
3109 si_b = &s->si[1];
3110 req = &s->req;
3111 res = &s->res;
3112
3113 csf = objt_cs(si_f->end);
3114 cof = cs_conn(csf);
3115 acf = objt_appctx(si_f->end);
3116 if (cof && cof->src && addr_to_str(cof->src, pn, sizeof(pn)) >= 0)
3117 src = pn;
3118 else if (acf)
3119 src = acf->applet->name;
3120
3121 csb = objt_cs(si_b->end);
3122 cob = cs_conn(csb);
3123 acb = objt_appctx(si_b->end);
3124 srv = objt_server(s->target);
3125 if (srv)
3126 dst = srv->id;
3127 else if (acb)
3128 dst = acb->applet->name;
3129
3130 chunk_appendf(buf,
3131 "%sstrm=%p src=%s fe=%s be=%s dst=%s%c"
3132 "%srqf=%x rqa=%x rpf=%x rpa=%x sif=%s,%x sib=%s,%x%c"
3133 "%saf=%p,%u csf=%p,%x%c"
3134 "%sab=%p,%u csb=%p,%x%c"
3135 "%scof=%p,%x:%s(%p)/%s(%p)/%s(%d)%c"
3136 "%scob=%p,%x:%s(%p)/%s(%p)/%s(%d)%c"
3137 "",
3138 pfx, s, src, s->sess->fe->id, s->be->id, dst, eol,
3139 pfx, req->flags, req->analysers, res->flags, res->analysers,
3140 si_state_str(si_f->state), si_f->flags,
3141 si_state_str(si_b->state), si_b->flags, eol,
3142 pfx, acf, acf ? acf->st0 : 0, csf, csf ? csf->flags : 0, eol,
3143 pfx, acb, acb ? acb->st0 : 0, csb, csb ? csb->flags : 0, eol,
3144 pfx, cof, cof ? cof->flags : 0, conn_get_mux_name(cof), cof?cof->ctx:0, conn_get_xprt_name(cof),
3145 cof ? cof->xprt_ctx : 0, conn_get_ctrl_name(cof), cof ? cof->handle.fd : 0, eol,
3146 pfx, cob, cob ? cob->flags : 0, conn_get_mux_name(cob), cob?cob->ctx:0, conn_get_xprt_name(cob),
3147 cob ? cob->xprt_ctx : 0, conn_get_ctrl_name(cob), cob ? cob->handle.fd : 0, eol);
3148 }
3149
3150 /* dumps an error message for type <type> at ptr <ptr> related to stream <s>,
3151 * having reached loop rate <rate>, then aborts hoping to retrieve a core.
3152 */
stream_dump_and_crash(enum obj_type * obj,int rate)3153 void stream_dump_and_crash(enum obj_type *obj, int rate)
3154 {
3155 const struct stream *s;
3156 char *msg = NULL;
3157 const void *ptr;
3158
3159 ptr = s = objt_stream(obj);
3160 if (!s) {
3161 const struct appctx *appctx = objt_appctx(obj);
3162 if (!appctx)
3163 return;
3164 ptr = appctx;
3165 s = si_strm(appctx->owner);
3166 if (!s)
3167 return;
3168 }
3169
3170 chunk_reset(&trash);
3171 stream_dump(&trash, s, "", ' ');
3172
3173 chunk_appendf(&trash, "filters={");
3174 if (HAS_FILTERS(s)) {
3175 struct filter *filter;
3176
3177 list_for_each_entry(filter, &s->strm_flt.filters, list) {
3178 if (filter->list.p != &s->strm_flt.filters)
3179 chunk_appendf(&trash, ", ");
3180 chunk_appendf(&trash, "%p=\"%s\"", filter, FLT_ID(filter));
3181 }
3182 }
3183 chunk_appendf(&trash, "}");
3184
3185 memprintf(&msg,
3186 "A bogus %s [%p] is spinning at %d calls per second and refuses to die, "
3187 "aborting now! Please report this error to developers "
3188 "[%s]\n",
3189 obj_type_name(obj), ptr, rate, trash.area);
3190
3191 ha_alert("%s", msg);
3192 send_log(NULL, LOG_EMERG, "%s", msg);
3193 abort();
3194 }
3195
3196 /************************************************************************/
3197 /* All supported ACL keywords must be declared here. */
3198 /************************************************************************/
3199
3200 /* 0=OK, <0=Alert, >0=Warning */
stream_parse_use_service(const char ** args,int * cur_arg,struct proxy * px,struct act_rule * rule,char ** err)3201 static enum act_parse_ret stream_parse_use_service(const char **args, int *cur_arg,
3202 struct proxy *px, struct act_rule *rule,
3203 char **err)
3204 {
3205 struct action_kw *kw;
3206
3207 /* Check if the service name exists. */
3208 if (*(args[*cur_arg]) == 0) {
3209 memprintf(err, "'%s' expects a service name.", args[0]);
3210 return ACT_RET_PRS_ERR;
3211 }
3212
3213 /* lookup for keyword corresponding to a service. */
3214 kw = action_lookup(&service_keywords, args[*cur_arg]);
3215 if (!kw) {
3216 memprintf(err, "'%s' unknown service name.", args[1]);
3217 return ACT_RET_PRS_ERR;
3218 }
3219 (*cur_arg)++;
3220
3221 /* executes specific rule parser. */
3222 rule->kw = kw;
3223 if (kw->parse((const char **)args, cur_arg, px, rule, err) == ACT_RET_PRS_ERR)
3224 return ACT_RET_PRS_ERR;
3225
3226 /* Register processing function. */
3227 rule->action_ptr = process_use_service;
3228 rule->action = ACT_CUSTOM;
3229
3230 return ACT_RET_PRS_OK;
3231 }
3232
service_keywords_register(struct action_kw_list * kw_list)3233 void service_keywords_register(struct action_kw_list *kw_list)
3234 {
3235 LIST_ADDQ(&service_keywords, &kw_list->list);
3236 }
3237
service_find(const char * kw)3238 struct action_kw *service_find(const char *kw)
3239 {
3240 return action_lookup(&service_keywords, kw);
3241 }
3242
3243 /* Lists the known services on <out> */
list_services(FILE * out)3244 void list_services(FILE *out)
3245 {
3246 struct action_kw_list *kw_list;
3247 int found = 0;
3248 int i;
3249
3250 fprintf(out, "Available services :");
3251 list_for_each_entry(kw_list, &service_keywords, list) {
3252 for (i = 0; kw_list->kw[i].kw != NULL; i++) {
3253 if (!found)
3254 fputc('\n', out);
3255 found = 1;
3256 fprintf(out, "\t%s\n", kw_list->kw[i].kw);
3257 }
3258 }
3259 if (!found)
3260 fprintf(out, " none\n");
3261 }
3262
3263 /* This function dumps a complete stream state onto the stream interface's
3264 * read buffer. The stream has to be set in strm. It returns 0 if the output
3265 * buffer is full and it needs to be called again, otherwise non-zero. It is
3266 * designed to be called from stats_dump_strm_to_buffer() below.
3267 */
stats_dump_full_strm_to_buffer(struct stream_interface * si,struct stream * strm)3268 static int stats_dump_full_strm_to_buffer(struct stream_interface *si, struct stream *strm)
3269 {
3270 struct appctx *appctx = __objt_appctx(si->end);
3271 struct tm tm;
3272 extern const char *monthname[12];
3273 char pn[INET6_ADDRSTRLEN];
3274 struct conn_stream *cs;
3275 struct connection *conn;
3276 struct appctx *tmpctx;
3277
3278 chunk_reset(&trash);
3279
3280 if (appctx->ctx.sess.section > 0 && appctx->ctx.sess.uid != strm->uniq_id) {
3281 /* stream changed, no need to go any further */
3282 chunk_appendf(&trash, " *** session terminated while we were watching it ***\n");
3283 if (ci_putchk(si_ic(si), &trash) == -1)
3284 goto full;
3285 goto done;
3286 }
3287
3288 switch (appctx->ctx.sess.section) {
3289 case 0: /* main status of the stream */
3290 appctx->ctx.sess.uid = strm->uniq_id;
3291 appctx->ctx.sess.section = 1;
3292 /* fall through */
3293
3294 case 1:
3295 get_localtime(strm->logs.accept_date.tv_sec, &tm);
3296 chunk_appendf(&trash,
3297 "%p: [%02d/%s/%04d:%02d:%02d:%02d.%06d] id=%u proto=%s",
3298 strm,
3299 tm.tm_mday, monthname[tm.tm_mon], tm.tm_year+1900,
3300 tm.tm_hour, tm.tm_min, tm.tm_sec, (int)(strm->logs.accept_date.tv_usec),
3301 strm->uniq_id,
3302 strm_li(strm) ? strm_li(strm)->proto->name : "?");
3303
3304 conn = objt_conn(strm_orig(strm));
3305 switch (conn && conn_get_src(conn) ? addr_to_str(conn->src, pn, sizeof(pn)) : AF_UNSPEC) {
3306 case AF_INET:
3307 case AF_INET6:
3308 chunk_appendf(&trash, " source=%s:%d\n",
3309 pn, get_host_port(conn->src));
3310 break;
3311 case AF_UNIX:
3312 chunk_appendf(&trash, " source=unix:%d\n", strm_li(strm)->luid);
3313 break;
3314 default:
3315 /* no more information to print right now */
3316 chunk_appendf(&trash, "\n");
3317 break;
3318 }
3319
3320 chunk_appendf(&trash,
3321 " flags=0x%x, conn_retries=%d, srv_conn=%p, pend_pos=%p waiting=%d\n",
3322 strm->flags, strm->si[1].conn_retries, strm->srv_conn, strm->pend_pos,
3323 !LIST_ISEMPTY(&strm->buffer_wait.list));
3324
3325 chunk_appendf(&trash,
3326 " frontend=%s (id=%u mode=%s), listener=%s (id=%u)",
3327 strm_fe(strm)->id, strm_fe(strm)->uuid, strm_fe(strm)->mode ? "http" : "tcp",
3328 strm_li(strm) ? strm_li(strm)->name ? strm_li(strm)->name : "?" : "?",
3329 strm_li(strm) ? strm_li(strm)->luid : 0);
3330
3331 switch (conn && conn_get_dst(conn) ? addr_to_str(conn->dst, pn, sizeof(pn)) : AF_UNSPEC) {
3332 case AF_INET:
3333 case AF_INET6:
3334 chunk_appendf(&trash, " addr=%s:%d\n",
3335 pn, get_host_port(conn->dst));
3336 break;
3337 case AF_UNIX:
3338 chunk_appendf(&trash, " addr=unix:%d\n", strm_li(strm)->luid);
3339 break;
3340 default:
3341 /* no more information to print right now */
3342 chunk_appendf(&trash, "\n");
3343 break;
3344 }
3345
3346 if (strm->be->cap & PR_CAP_BE)
3347 chunk_appendf(&trash,
3348 " backend=%s (id=%u mode=%s)",
3349 strm->be->id,
3350 strm->be->uuid, strm->be->mode ? "http" : "tcp");
3351 else
3352 chunk_appendf(&trash, " backend=<NONE> (id=-1 mode=-)");
3353
3354 cs = objt_cs(strm->si[1].end);
3355 conn = cs_conn(cs);
3356
3357 switch (conn && conn_get_src(conn) ? addr_to_str(conn->src, pn, sizeof(pn)) : AF_UNSPEC) {
3358 case AF_INET:
3359 case AF_INET6:
3360 chunk_appendf(&trash, " addr=%s:%d\n",
3361 pn, get_host_port(conn->src));
3362 break;
3363 case AF_UNIX:
3364 chunk_appendf(&trash, " addr=unix\n");
3365 break;
3366 default:
3367 /* no more information to print right now */
3368 chunk_appendf(&trash, "\n");
3369 break;
3370 }
3371
3372 if (strm->be->cap & PR_CAP_BE)
3373 chunk_appendf(&trash,
3374 " server=%s (id=%u)",
3375 objt_server(strm->target) ? objt_server(strm->target)->id : "<none>",
3376 objt_server(strm->target) ? objt_server(strm->target)->puid : 0);
3377 else
3378 chunk_appendf(&trash, " server=<NONE> (id=-1)");
3379
3380 switch (conn && conn_get_dst(conn) ? addr_to_str(conn->dst, pn, sizeof(pn)) : AF_UNSPEC) {
3381 case AF_INET:
3382 case AF_INET6:
3383 chunk_appendf(&trash, " addr=%s:%d\n",
3384 pn, get_host_port(conn->dst));
3385 break;
3386 case AF_UNIX:
3387 chunk_appendf(&trash, " addr=unix\n");
3388 break;
3389 default:
3390 /* no more information to print right now */
3391 chunk_appendf(&trash, "\n");
3392 break;
3393 }
3394
3395 chunk_appendf(&trash,
3396 " task=%p (state=0x%02x nice=%d calls=%u rate=%u exp=%s tmask=0x%lx%s",
3397 strm->task,
3398 strm->task->state,
3399 strm->task->nice, strm->task->calls, read_freq_ctr(&strm->call_rate),
3400 strm->task->expire ?
3401 tick_is_expired(strm->task->expire, now_ms) ? "<PAST>" :
3402 human_time(TICKS_TO_MS(strm->task->expire - now_ms),
3403 TICKS_TO_MS(1000)) : "<NEVER>",
3404 strm->task->thread_mask,
3405 task_in_rq(strm->task) ? ", running" : "");
3406
3407 chunk_appendf(&trash,
3408 " age=%s)\n",
3409 human_time(now.tv_sec - strm->logs.accept_date.tv_sec, 1));
3410
3411 if (strm->txn)
3412 chunk_appendf(&trash,
3413 " txn=%p flags=0x%x meth=%d status=%d req.st=%s rsp.st=%s req.f=0x%02x rsp.f=0x%02x\n",
3414 strm->txn, strm->txn->flags, strm->txn->meth, strm->txn->status,
3415 h1_msg_state_str(strm->txn->req.msg_state), h1_msg_state_str(strm->txn->rsp.msg_state),
3416 strm->txn->req.flags, strm->txn->rsp.flags);
3417
3418 chunk_appendf(&trash,
3419 " si[0]=%p (state=%s flags=0x%02x endp0=%s:%p exp=%s et=0x%03x sub=%d)\n",
3420 &strm->si[0],
3421 si_state_str(strm->si[0].state),
3422 strm->si[0].flags,
3423 obj_type_name(strm->si[0].end),
3424 obj_base_ptr(strm->si[0].end),
3425 strm->si[0].exp ?
3426 tick_is_expired(strm->si[0].exp, now_ms) ? "<PAST>" :
3427 human_time(TICKS_TO_MS(strm->si[0].exp - now_ms),
3428 TICKS_TO_MS(1000)) : "<NEVER>",
3429 strm->si[0].err_type, strm->si[0].wait_event.events);
3430
3431 chunk_appendf(&trash,
3432 " si[1]=%p (state=%s flags=0x%02x endp1=%s:%p exp=%s et=0x%03x sub=%d)\n",
3433 &strm->si[1],
3434 si_state_str(strm->si[1].state),
3435 strm->si[1].flags,
3436 obj_type_name(strm->si[1].end),
3437 obj_base_ptr(strm->si[1].end),
3438 strm->si[1].exp ?
3439 tick_is_expired(strm->si[1].exp, now_ms) ? "<PAST>" :
3440 human_time(TICKS_TO_MS(strm->si[1].exp - now_ms),
3441 TICKS_TO_MS(1000)) : "<NEVER>",
3442 strm->si[1].err_type, strm->si[1].wait_event.events);
3443
3444 if ((cs = objt_cs(strm->si[0].end)) != NULL) {
3445 conn = cs->conn;
3446
3447 chunk_appendf(&trash,
3448 " co0=%p ctrl=%s xprt=%s mux=%s data=%s target=%s:%p\n",
3449 conn,
3450 conn_get_ctrl_name(conn),
3451 conn_get_xprt_name(conn),
3452 conn_get_mux_name(conn),
3453 cs_get_data_name(cs),
3454 obj_type_name(conn->target),
3455 obj_base_ptr(conn->target));
3456
3457 chunk_appendf(&trash,
3458 " flags=0x%08x fd=%d fd.state=%02x updt=%d fd.tmask=0x%lx\n",
3459 conn->flags,
3460 conn->handle.fd,
3461 conn->handle.fd >= 0 ? fdtab[conn->handle.fd].state : 0,
3462 conn->handle.fd >= 0 ? !!(fdtab[conn->handle.fd].update_mask & tid_bit) : 0,
3463 conn->handle.fd >= 0 ? fdtab[conn->handle.fd].thread_mask: 0);
3464
3465 chunk_appendf(&trash, " cs=%p csf=0x%08x ctx=%p\n", cs, cs->flags, cs->ctx);
3466 }
3467 else if ((tmpctx = objt_appctx(strm->si[0].end)) != NULL) {
3468 chunk_appendf(&trash,
3469 " app0=%p st0=%d st1=%d st2=%d applet=%s tmask=0x%lx nice=%d calls=%u rate=%u cpu=%llu lat=%llu\n",
3470 tmpctx,
3471 tmpctx->st0,
3472 tmpctx->st1,
3473 tmpctx->st2,
3474 tmpctx->applet->name,
3475 tmpctx->thread_mask,
3476 tmpctx->t->nice, tmpctx->t->calls, read_freq_ctr(&tmpctx->call_rate),
3477 (unsigned long long)tmpctx->t->cpu_time, (unsigned long long)tmpctx->t->lat_time);
3478 }
3479
3480 if ((cs = objt_cs(strm->si[1].end)) != NULL) {
3481 conn = cs->conn;
3482
3483 chunk_appendf(&trash,
3484 " co1=%p ctrl=%s xprt=%s mux=%s data=%s target=%s:%p\n",
3485 conn,
3486 conn_get_ctrl_name(conn),
3487 conn_get_xprt_name(conn),
3488 conn_get_mux_name(conn),
3489 cs_get_data_name(cs),
3490 obj_type_name(conn->target),
3491 obj_base_ptr(conn->target));
3492
3493 chunk_appendf(&trash,
3494 " flags=0x%08x fd=%d fd.state=%02x updt=%d fd.tmask=0x%lx\n",
3495 conn->flags,
3496 conn->handle.fd,
3497 conn->handle.fd >= 0 ? fdtab[conn->handle.fd].state : 0,
3498 conn->handle.fd >= 0 ? !!(fdtab[conn->handle.fd].update_mask & tid_bit) : 0,
3499 conn->handle.fd >= 0 ? fdtab[conn->handle.fd].thread_mask: 0);
3500
3501 chunk_appendf(&trash, " cs=%p csf=0x%08x ctx=%p\n", cs, cs->flags, cs->ctx);
3502 }
3503 else if ((tmpctx = objt_appctx(strm->si[1].end)) != NULL) {
3504 chunk_appendf(&trash,
3505 " app1=%p st0=%d st1=%d st2=%d applet=%s tmask=0x%lx nice=%d calls=%u rate=%u cpu=%llu lat=%llu\n",
3506 tmpctx,
3507 tmpctx->st0,
3508 tmpctx->st1,
3509 tmpctx->st2,
3510 tmpctx->applet->name,
3511 tmpctx->thread_mask,
3512 tmpctx->t->nice, tmpctx->t->calls, read_freq_ctr(&tmpctx->call_rate),
3513 (unsigned long long)tmpctx->t->cpu_time, (unsigned long long)tmpctx->t->lat_time);
3514 }
3515
3516 chunk_appendf(&trash,
3517 " req=%p (f=0x%06x an=0x%x pipe=%d tofwd=%d total=%lld)\n"
3518 " an_exp=%s",
3519 &strm->req,
3520 strm->req.flags, strm->req.analysers,
3521 strm->req.pipe ? strm->req.pipe->data : 0,
3522 strm->req.to_forward, strm->req.total,
3523 strm->req.analyse_exp ?
3524 human_time(TICKS_TO_MS(strm->req.analyse_exp - now_ms),
3525 TICKS_TO_MS(1000)) : "<NEVER>");
3526
3527 chunk_appendf(&trash,
3528 " rex=%s",
3529 strm->req.rex ?
3530 human_time(TICKS_TO_MS(strm->req.rex - now_ms),
3531 TICKS_TO_MS(1000)) : "<NEVER>");
3532
3533 chunk_appendf(&trash,
3534 " wex=%s\n"
3535 " buf=%p data=%p o=%u p=%u i=%u size=%u\n",
3536 strm->req.wex ?
3537 human_time(TICKS_TO_MS(strm->req.wex - now_ms),
3538 TICKS_TO_MS(1000)) : "<NEVER>",
3539 &strm->req.buf,
3540 b_orig(&strm->req.buf), (unsigned int)co_data(&strm->req),
3541 (unsigned int)ci_head_ofs(&strm->req), (unsigned int)ci_data(&strm->req),
3542 (unsigned int)strm->req.buf.size);
3543
3544 if (IS_HTX_STRM(strm)) {
3545 struct htx *htx = htxbuf(&strm->req.buf);
3546
3547 chunk_appendf(&trash,
3548 " htx=%p flags=0x%x size=%u data=%u used=%u wrap=%s extra=%llu\n",
3549 htx, htx->flags, htx->size, htx->data, htx_nbblks(htx),
3550 (htx->tail >= htx->head) ? "NO" : "YES",
3551 (unsigned long long)htx->extra);
3552 }
3553
3554 chunk_appendf(&trash,
3555 " res=%p (f=0x%06x an=0x%x pipe=%d tofwd=%d total=%lld)\n"
3556 " an_exp=%s",
3557 &strm->res,
3558 strm->res.flags, strm->res.analysers,
3559 strm->res.pipe ? strm->res.pipe->data : 0,
3560 strm->res.to_forward, strm->res.total,
3561 strm->res.analyse_exp ?
3562 human_time(TICKS_TO_MS(strm->res.analyse_exp - now_ms),
3563 TICKS_TO_MS(1000)) : "<NEVER>");
3564
3565 chunk_appendf(&trash,
3566 " rex=%s",
3567 strm->res.rex ?
3568 human_time(TICKS_TO_MS(strm->res.rex - now_ms),
3569 TICKS_TO_MS(1000)) : "<NEVER>");
3570
3571 chunk_appendf(&trash,
3572 " wex=%s\n"
3573 " buf=%p data=%p o=%u p=%u i=%u size=%u\n",
3574 strm->res.wex ?
3575 human_time(TICKS_TO_MS(strm->res.wex - now_ms),
3576 TICKS_TO_MS(1000)) : "<NEVER>",
3577 &strm->res.buf,
3578 b_orig(&strm->res.buf), (unsigned int)co_data(&strm->res),
3579 (unsigned int)ci_head_ofs(&strm->res), (unsigned int)ci_data(&strm->res),
3580 (unsigned int)strm->res.buf.size);
3581
3582 if (IS_HTX_STRM(strm)) {
3583 struct htx *htx = htxbuf(&strm->res.buf);
3584
3585 chunk_appendf(&trash,
3586 " htx=%p flags=0x%x size=%u data=%u used=%u wrap=%s extra=%llu\n",
3587 htx, htx->flags, htx->size, htx->data, htx_nbblks(htx),
3588 (htx->tail >= htx->head) ? "NO" : "YES",
3589 (unsigned long long)htx->extra);
3590 }
3591
3592 if (ci_putchk(si_ic(si), &trash) == -1)
3593 goto full;
3594
3595 /* use other states to dump the contents */
3596 }
3597 /* end of dump */
3598 done:
3599 appctx->ctx.sess.uid = 0;
3600 appctx->ctx.sess.section = 0;
3601 return 1;
3602 full:
3603 return 0;
3604 }
3605
3606
cli_parse_show_sess(char ** args,char * payload,struct appctx * appctx,void * private)3607 static int cli_parse_show_sess(char **args, char *payload, struct appctx *appctx, void *private)
3608 {
3609 if (!cli_has_level(appctx, ACCESS_LVL_OPER))
3610 return 1;
3611
3612 if (*args[2] && strcmp(args[2], "all") == 0)
3613 appctx->ctx.sess.target = (void *)-1;
3614 else if (*args[2])
3615 appctx->ctx.sess.target = (void *)strtoul(args[2], NULL, 0);
3616 else
3617 appctx->ctx.sess.target = NULL;
3618 appctx->ctx.sess.section = 0; /* start with stream status */
3619 appctx->ctx.sess.pos = 0;
3620
3621 /* we need to put an end marker into the streams list. We're just moving
3622 * ourselves there, so that once we found ourselves we know we've reached
3623 * the end. Without this we can run forever if new streams arrive faster
3624 * than we can dump them.
3625 */
3626 HA_SPIN_LOCK(STRMS_LOCK, &streams_lock);
3627 LIST_DEL(&si_strm(appctx->owner)->list);
3628 LIST_ADDQ(&streams, &si_strm(appctx->owner)->list);
3629 HA_SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
3630 return 0;
3631 }
3632
3633 /* This function dumps all streams' states onto the stream interface's
3634 * read buffer. It returns 0 if the output buffer is full and it needs
3635 * to be called again, otherwise non-zero. It proceeds in an isolated
3636 * thread so there is no thread safety issue here.
3637 */
cli_io_handler_dump_sess(struct appctx * appctx)3638 static int cli_io_handler_dump_sess(struct appctx *appctx)
3639 {
3640 struct stream_interface *si = appctx->owner;
3641 struct connection *conn;
3642
3643 thread_isolate();
3644
3645 if (unlikely(si_ic(si)->flags & (CF_WRITE_ERROR|CF_SHUTW))) {
3646 /* If we're forced to shut down, we might have to remove our
3647 * reference to the last stream being dumped.
3648 */
3649 if (appctx->st2 == STAT_ST_LIST) {
3650 if (!LIST_ISEMPTY(&appctx->ctx.sess.bref.users)) {
3651 LIST_DEL(&appctx->ctx.sess.bref.users);
3652 LIST_INIT(&appctx->ctx.sess.bref.users);
3653 }
3654 }
3655 goto done;
3656 }
3657
3658 chunk_reset(&trash);
3659
3660 switch (appctx->st2) {
3661 case STAT_ST_INIT:
3662 /* the function had not been called yet, let's prepare the
3663 * buffer for a response. We initialize the current stream
3664 * pointer to the first in the global list. When a target
3665 * stream is being destroyed, it is responsible for updating
3666 * this pointer. We know we have reached the end when this
3667 * pointer points back to the head of the streams list.
3668 */
3669 LIST_INIT(&appctx->ctx.sess.bref.users);
3670 appctx->ctx.sess.bref.ref = streams.n;
3671 appctx->st2 = STAT_ST_LIST;
3672 /* fall through */
3673
3674 case STAT_ST_LIST:
3675 /* first, let's detach the back-ref from a possible previous stream */
3676 if (!LIST_ISEMPTY(&appctx->ctx.sess.bref.users)) {
3677 LIST_DEL(&appctx->ctx.sess.bref.users);
3678 LIST_INIT(&appctx->ctx.sess.bref.users);
3679 }
3680
3681 /* and start from where we stopped, never going further than ourselves */
3682 while (appctx->ctx.sess.bref.ref != si_strm(appctx->owner)->list.n) {
3683 char pn[INET6_ADDRSTRLEN];
3684 struct stream *curr_strm;
3685
3686 curr_strm = LIST_ELEM(appctx->ctx.sess.bref.ref, struct stream *, list);
3687
3688 if (appctx->ctx.sess.target) {
3689 if (appctx->ctx.sess.target != (void *)-1 && appctx->ctx.sess.target != curr_strm)
3690 goto next_sess;
3691
3692 LIST_ADDQ(&curr_strm->back_refs, &appctx->ctx.sess.bref.users);
3693 /* call the proper dump() function and return if we're missing space */
3694 if (!stats_dump_full_strm_to_buffer(si, curr_strm))
3695 goto full;
3696
3697 /* stream dump complete */
3698 LIST_DEL(&appctx->ctx.sess.bref.users);
3699 LIST_INIT(&appctx->ctx.sess.bref.users);
3700 if (appctx->ctx.sess.target != (void *)-1) {
3701 appctx->ctx.sess.target = NULL;
3702 break;
3703 }
3704 else
3705 goto next_sess;
3706 }
3707
3708 chunk_appendf(&trash,
3709 "%p: proto=%s",
3710 curr_strm,
3711 strm_li(curr_strm) ? strm_li(curr_strm)->proto->name : "?");
3712
3713 conn = objt_conn(strm_orig(curr_strm));
3714 switch (conn && conn_get_src(conn) ? addr_to_str(conn->src, pn, sizeof(pn)) : AF_UNSPEC) {
3715 case AF_INET:
3716 case AF_INET6:
3717 chunk_appendf(&trash,
3718 " src=%s:%d fe=%s be=%s srv=%s",
3719 pn,
3720 get_host_port(conn->src),
3721 strm_fe(curr_strm)->id,
3722 (curr_strm->be->cap & PR_CAP_BE) ? curr_strm->be->id : "<NONE>",
3723 objt_server(curr_strm->target) ? objt_server(curr_strm->target)->id : "<none>"
3724 );
3725 break;
3726 case AF_UNIX:
3727 chunk_appendf(&trash,
3728 " src=unix:%d fe=%s be=%s srv=%s",
3729 strm_li(curr_strm)->luid,
3730 strm_fe(curr_strm)->id,
3731 (curr_strm->be->cap & PR_CAP_BE) ? curr_strm->be->id : "<NONE>",
3732 objt_server(curr_strm->target) ? objt_server(curr_strm->target)->id : "<none>"
3733 );
3734 break;
3735 }
3736
3737 chunk_appendf(&trash,
3738 " ts=%02x age=%s calls=%u rate=%u cpu=%llu lat=%llu",
3739 curr_strm->task->state,
3740 human_time(now.tv_sec - curr_strm->logs.tv_accept.tv_sec, 1),
3741 curr_strm->task->calls, read_freq_ctr(&curr_strm->call_rate),
3742 (unsigned long long)curr_strm->task->cpu_time, (unsigned long long)curr_strm->task->lat_time);
3743
3744 chunk_appendf(&trash,
3745 " rq[f=%06xh,i=%u,an=%02xh,rx=%s",
3746 curr_strm->req.flags,
3747 (unsigned int)ci_data(&curr_strm->req),
3748 curr_strm->req.analysers,
3749 curr_strm->req.rex ?
3750 human_time(TICKS_TO_MS(curr_strm->req.rex - now_ms),
3751 TICKS_TO_MS(1000)) : "");
3752
3753 chunk_appendf(&trash,
3754 ",wx=%s",
3755 curr_strm->req.wex ?
3756 human_time(TICKS_TO_MS(curr_strm->req.wex - now_ms),
3757 TICKS_TO_MS(1000)) : "");
3758
3759 chunk_appendf(&trash,
3760 ",ax=%s]",
3761 curr_strm->req.analyse_exp ?
3762 human_time(TICKS_TO_MS(curr_strm->req.analyse_exp - now_ms),
3763 TICKS_TO_MS(1000)) : "");
3764
3765 chunk_appendf(&trash,
3766 " rp[f=%06xh,i=%u,an=%02xh,rx=%s",
3767 curr_strm->res.flags,
3768 (unsigned int)ci_data(&curr_strm->res),
3769 curr_strm->res.analysers,
3770 curr_strm->res.rex ?
3771 human_time(TICKS_TO_MS(curr_strm->res.rex - now_ms),
3772 TICKS_TO_MS(1000)) : "");
3773
3774 chunk_appendf(&trash,
3775 ",wx=%s",
3776 curr_strm->res.wex ?
3777 human_time(TICKS_TO_MS(curr_strm->res.wex - now_ms),
3778 TICKS_TO_MS(1000)) : "");
3779
3780 chunk_appendf(&trash,
3781 ",ax=%s]",
3782 curr_strm->res.analyse_exp ?
3783 human_time(TICKS_TO_MS(curr_strm->res.analyse_exp - now_ms),
3784 TICKS_TO_MS(1000)) : "");
3785
3786 conn = cs_conn(objt_cs(curr_strm->si[0].end));
3787 chunk_appendf(&trash,
3788 " s0=[%d,%1xh,fd=%d,ex=%s]",
3789 curr_strm->si[0].state,
3790 curr_strm->si[0].flags,
3791 conn ? conn->handle.fd : -1,
3792 curr_strm->si[0].exp ?
3793 human_time(TICKS_TO_MS(curr_strm->si[0].exp - now_ms),
3794 TICKS_TO_MS(1000)) : "");
3795
3796 conn = cs_conn(objt_cs(curr_strm->si[1].end));
3797 chunk_appendf(&trash,
3798 " s1=[%d,%1xh,fd=%d,ex=%s]",
3799 curr_strm->si[1].state,
3800 curr_strm->si[1].flags,
3801 conn ? conn->handle.fd : -1,
3802 curr_strm->si[1].exp ?
3803 human_time(TICKS_TO_MS(curr_strm->si[1].exp - now_ms),
3804 TICKS_TO_MS(1000)) : "");
3805
3806 chunk_appendf(&trash,
3807 " exp=%s",
3808 curr_strm->task->expire ?
3809 human_time(TICKS_TO_MS(curr_strm->task->expire - now_ms),
3810 TICKS_TO_MS(1000)) : "");
3811 if (task_in_rq(curr_strm->task))
3812 chunk_appendf(&trash, " run(nice=%d)", curr_strm->task->nice);
3813
3814 chunk_appendf(&trash, "\n");
3815
3816 if (ci_putchk(si_ic(si), &trash) == -1) {
3817 /* let's try again later from this stream. We add ourselves into
3818 * this stream's users so that it can remove us upon termination.
3819 */
3820 LIST_ADDQ(&curr_strm->back_refs, &appctx->ctx.sess.bref.users);
3821 goto full;
3822 }
3823
3824 next_sess:
3825 appctx->ctx.sess.bref.ref = curr_strm->list.n;
3826 }
3827
3828 if (appctx->ctx.sess.target && appctx->ctx.sess.target != (void *)-1) {
3829 /* specified stream not found */
3830 if (appctx->ctx.sess.section > 0)
3831 chunk_appendf(&trash, " *** session terminated while we were watching it ***\n");
3832 else
3833 chunk_appendf(&trash, "Session not found.\n");
3834
3835 if (ci_putchk(si_ic(si), &trash) == -1)
3836 goto full;
3837
3838 appctx->ctx.sess.target = NULL;
3839 appctx->ctx.sess.uid = 0;
3840 goto done;
3841 }
3842 /* fall through */
3843
3844 default:
3845 appctx->st2 = STAT_ST_FIN;
3846 goto done;
3847 }
3848 done:
3849 thread_release();
3850 return 1;
3851 full:
3852 thread_release();
3853 si_rx_room_blk(si);
3854 return 0;
3855 }
3856
cli_release_show_sess(struct appctx * appctx)3857 static void cli_release_show_sess(struct appctx *appctx)
3858 {
3859 if (appctx->st2 == STAT_ST_LIST) {
3860 HA_SPIN_LOCK(STRMS_LOCK, &streams_lock);
3861 if (!LIST_ISEMPTY(&appctx->ctx.sess.bref.users))
3862 LIST_DEL(&appctx->ctx.sess.bref.users);
3863 HA_SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
3864 }
3865 }
3866
3867 /* Parses the "shutdown session" directive, it always returns 1 */
cli_parse_shutdown_session(char ** args,char * payload,struct appctx * appctx,void * private)3868 static int cli_parse_shutdown_session(char **args, char *payload, struct appctx *appctx, void *private)
3869 {
3870 struct stream *strm, *ptr;
3871
3872 if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
3873 return 1;
3874
3875 if (!*args[2])
3876 return cli_err(appctx, "Session pointer expected (use 'show sess').\n");
3877
3878 ptr = (void *)strtoul(args[2], NULL, 0);
3879
3880 thread_isolate();
3881
3882 /* first, look for the requested stream in the stream table */
3883 list_for_each_entry(strm, &streams, list) {
3884 if (strm == ptr) {
3885 stream_shutdown(strm, SF_ERR_KILLED);
3886 break;
3887 }
3888 }
3889
3890 thread_release();
3891
3892 /* do we have the stream ? */
3893 if (strm != ptr)
3894 return cli_err(appctx, "No such session (use 'show sess').\n");
3895
3896 return 1;
3897 }
3898
3899 /* Parses the "shutdown session server" directive, it always returns 1 */
cli_parse_shutdown_sessions_server(char ** args,char * payload,struct appctx * appctx,void * private)3900 static int cli_parse_shutdown_sessions_server(char **args, char *payload, struct appctx *appctx, void *private)
3901 {
3902 struct server *sv;
3903
3904 if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
3905 return 1;
3906
3907 sv = cli_find_server(appctx, args[3]);
3908 if (!sv)
3909 return 1;
3910
3911 /* kill all the stream that are on this server */
3912 HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
3913 srv_shutdown_streams(sv, SF_ERR_KILLED);
3914 HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
3915 return 1;
3916 }
3917
3918 /* register cli keywords */
3919 static struct cli_kw_list cli_kws = {{ },{
3920 { { "show", "sess", NULL }, "show sess [id] : report the list of current sessions or dump this session", cli_parse_show_sess, cli_io_handler_dump_sess, cli_release_show_sess },
3921 { { "shutdown", "session", NULL }, "shutdown session : kill a specific session", cli_parse_shutdown_session, NULL, NULL },
3922 { { "shutdown", "sessions", "server" }, "shutdown sessions server : kill sessions on a server", cli_parse_shutdown_sessions_server, NULL, NULL },
3923 {{},}
3924 }};
3925
3926 INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
3927
3928 /* main configuration keyword registration. */
3929 static struct action_kw_list stream_tcp_keywords = { ILH, {
3930 { "use-service", stream_parse_use_service },
3931 { /* END */ }
3932 }};
3933
3934 INITCALL1(STG_REGISTER, tcp_req_cont_keywords_register, &stream_tcp_keywords);
3935
3936 static struct action_kw_list stream_http_keywords = { ILH, {
3937 { "use-service", stream_parse_use_service },
3938 { /* END */ }
3939 }};
3940
3941 INITCALL1(STG_REGISTER, http_req_keywords_register, &stream_http_keywords);
3942
3943 /*
3944 * Local variables:
3945 * c-indent-level: 8
3946 * c-basic-offset: 8
3947 * End:
3948 */
3949