1 /*
2 * Backend variables and functions.
3 *
4 * Copyright 2000-2013 Willy Tarreau <w@1wt.eu>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13 #include <errno.h>
14 #include <fcntl.h>
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <syslog.h>
18 #include <string.h>
19 #include <ctype.h>
20 #include <sys/types.h>
21
22 #include <haproxy/acl.h>
23 #include <haproxy/api.h>
24 #include <haproxy/arg.h>
25 #include <haproxy/backend.h>
26 #include <haproxy/channel.h>
27 #include <haproxy/check.h>
28 #include <haproxy/frontend.h>
29 #include <haproxy/global.h>
30 #include <haproxy/hash.h>
31 #include <haproxy/http.h>
32 #include <haproxy/http_ana.h>
33 #include <haproxy/http_htx.h>
34 #include <haproxy/htx.h>
35 #include <haproxy/lb_chash.h>
36 #include <haproxy/lb_fas.h>
37 #include <haproxy/lb_fwlc.h>
38 #include <haproxy/lb_fwrr.h>
39 #include <haproxy/lb_map.h>
40 #include <haproxy/log.h>
41 #include <haproxy/namespace.h>
42 #include <haproxy/obj_type.h>
43 #include <haproxy/payload.h>
44 #include <haproxy/proto_tcp.h>
45 #include <haproxy/protocol.h>
46 #include <haproxy/proxy.h>
47 #include <haproxy/queue.h>
48 #include <haproxy/sample.h>
49 #include <haproxy/server.h>
50 #include <haproxy/session.h>
51 #include <haproxy/ssl_sock.h>
52 #include <haproxy/stream.h>
53 #include <haproxy/stream_interface.h>
54 #include <haproxy/task.h>
55 #include <haproxy/ticks.h>
56 #include <haproxy/time.h>
57 #include <haproxy/trace.h>
58
59 #define TRACE_SOURCE &trace_strm
60
be_lastsession(const struct proxy * be)61 int be_lastsession(const struct proxy *be)
62 {
63 if (be->be_counters.last_sess)
64 return now.tv_sec - be->be_counters.last_sess;
65
66 return -1;
67 }
68
69 /* helper function to invoke the correct hash method */
gen_hash(const struct proxy * px,const char * key,unsigned long len)70 static unsigned int gen_hash(const struct proxy* px, const char* key, unsigned long len)
71 {
72 unsigned int hash;
73
74 switch (px->lbprm.algo & BE_LB_HASH_FUNC) {
75 case BE_LB_HFCN_DJB2:
76 hash = hash_djb2(key, len);
77 break;
78 case BE_LB_HFCN_WT6:
79 hash = hash_wt6(key, len);
80 break;
81 case BE_LB_HFCN_CRC32:
82 hash = hash_crc32(key, len);
83 break;
84 case BE_LB_HFCN_SDBM:
85 /* this is the default hash function */
86 default:
87 hash = hash_sdbm(key, len);
88 break;
89 }
90
91 return hash;
92 }
93
94 /*
95 * This function recounts the number of usable active and backup servers for
96 * proxy <p>. These numbers are returned into the p->srv_act and p->srv_bck.
97 * This function also recomputes the total active and backup weights. However,
98 * it does not update tot_weight nor tot_used. Use update_backend_weight() for
99 * this.
100 * This functions is designed to be called before server's weight and state
101 * commit so it uses 'next' weight and states values.
102 *
103 * threads: this is the caller responsibility to lock data. For now, this
104 * function is called from lb modules, so it should be ok. But if you need to
105 * call it from another place, be careful (and update this comment).
106 */
recount_servers(struct proxy * px)107 void recount_servers(struct proxy *px)
108 {
109 struct server *srv;
110
111 px->srv_act = px->srv_bck = 0;
112 px->lbprm.tot_wact = px->lbprm.tot_wbck = 0;
113 px->lbprm.fbck = NULL;
114 for (srv = px->srv; srv != NULL; srv = srv->next) {
115 if (!srv_willbe_usable(srv))
116 continue;
117
118 if (srv->flags & SRV_F_BACKUP) {
119 if (!px->srv_bck &&
120 !(px->options & PR_O_USE_ALL_BK))
121 px->lbprm.fbck = srv;
122 px->srv_bck++;
123 srv->cumulative_weight = px->lbprm.tot_wbck;
124 px->lbprm.tot_wbck += srv->next_eweight;
125 } else {
126 px->srv_act++;
127 srv->cumulative_weight = px->lbprm.tot_wact;
128 px->lbprm.tot_wact += srv->next_eweight;
129 }
130 }
131 }
132
133 /* This function simply updates the backend's tot_weight and tot_used values
134 * after servers weights have been updated. It is designed to be used after
135 * recount_servers() or equivalent.
136 *
137 * threads: this is the caller responsibility to lock data. For now, this
138 * function is called from lb modules, so it should be ok. But if you need to
139 * call it from another place, be careful (and update this comment).
140 */
update_backend_weight(struct proxy * px)141 void update_backend_weight(struct proxy *px)
142 {
143 if (px->srv_act) {
144 px->lbprm.tot_weight = px->lbprm.tot_wact;
145 px->lbprm.tot_used = px->srv_act;
146 }
147 else if (px->lbprm.fbck) {
148 /* use only the first backup server */
149 px->lbprm.tot_weight = px->lbprm.fbck->next_eweight;
150 px->lbprm.tot_used = 1;
151 }
152 else {
153 px->lbprm.tot_weight = px->lbprm.tot_wbck;
154 px->lbprm.tot_used = px->srv_bck;
155 }
156 }
157
158 /*
159 * This function tries to find a running server for the proxy <px> following
160 * the source hash method. Depending on the number of active/backup servers,
161 * it will either look for active servers, or for backup servers.
162 * If any server is found, it will be returned. If no valid server is found,
163 * NULL is returned.
164 */
get_server_sh(struct proxy * px,const char * addr,int len,const struct server * avoid)165 static struct server *get_server_sh(struct proxy *px, const char *addr, int len, const struct server *avoid)
166 {
167 unsigned int h, l;
168
169 if (px->lbprm.tot_weight == 0)
170 return NULL;
171
172 l = h = 0;
173
174 /* note: we won't hash if there's only one server left */
175 if (px->lbprm.tot_used == 1)
176 goto hash_done;
177
178 while ((l + sizeof (int)) <= len) {
179 h ^= ntohl(*(unsigned int *)(&addr[l]));
180 l += sizeof (int);
181 }
182 if ((px->lbprm.algo & BE_LB_HASH_MOD) == BE_LB_HMOD_AVAL)
183 h = full_hash(h);
184 hash_done:
185 if ((px->lbprm.algo & BE_LB_LKUP) == BE_LB_LKUP_CHTREE)
186 return chash_get_server_hash(px, h, avoid);
187 else
188 return map_get_server_hash(px, h);
189 }
190
191 /*
192 * This function tries to find a running server for the proxy <px> following
193 * the URI hash method. In order to optimize cache hits, the hash computation
194 * ends at the question mark. Depending on the number of active/backup servers,
195 * it will either look for active servers, or for backup servers.
196 * If any server is found, it will be returned. If no valid server is found,
197 * NULL is returned. The lbprm.arg_opt{1,2,3} values correspond respectively to
198 * the "whole" optional argument (boolean, bit0), the "len" argument (numeric)
199 * and the "depth" argument (numeric).
200 *
201 * This code was contributed by Guillaume Dallaire, who also selected this hash
202 * algorithm out of a tens because it gave him the best results.
203 *
204 */
get_server_uh(struct proxy * px,char * uri,int uri_len,const struct server * avoid)205 static struct server *get_server_uh(struct proxy *px, char *uri, int uri_len, const struct server *avoid)
206 {
207 unsigned int hash = 0;
208 int c;
209 int slashes = 0;
210 const char *start, *end;
211
212 if (px->lbprm.tot_weight == 0)
213 return NULL;
214
215 /* note: we won't hash if there's only one server left */
216 if (px->lbprm.tot_used == 1)
217 goto hash_done;
218
219 if (px->lbprm.arg_opt2) // "len"
220 uri_len = MIN(uri_len, px->lbprm.arg_opt2);
221
222 start = end = uri;
223 while (uri_len--) {
224 c = *end;
225 if (c == '/') {
226 slashes++;
227 if (slashes == px->lbprm.arg_opt3) /* depth+1 */
228 break;
229 }
230 else if (c == '?' && !(px->lbprm.arg_opt1 & 1)) // "whole"
231 break;
232 end++;
233 }
234
235 hash = gen_hash(px, start, (end - start));
236
237 if ((px->lbprm.algo & BE_LB_HASH_MOD) == BE_LB_HMOD_AVAL)
238 hash = full_hash(hash);
239 hash_done:
240 if ((px->lbprm.algo & BE_LB_LKUP) == BE_LB_LKUP_CHTREE)
241 return chash_get_server_hash(px, hash, avoid);
242 else
243 return map_get_server_hash(px, hash);
244 }
245
246 /*
247 * This function tries to find a running server for the proxy <px> following
248 * the URL parameter hash method. It looks for a specific parameter in the
249 * URL and hashes it to compute the server ID. This is useful to optimize
250 * performance by avoiding bounces between servers in contexts where sessions
251 * are shared but cookies are not usable. If the parameter is not found, NULL
252 * is returned. If any server is found, it will be returned. If no valid server
253 * is found, NULL is returned.
254 */
get_server_ph(struct proxy * px,const char * uri,int uri_len,const struct server * avoid)255 static struct server *get_server_ph(struct proxy *px, const char *uri, int uri_len, const struct server *avoid)
256 {
257 unsigned int hash = 0;
258 const char *start, *end;
259 const char *p;
260 const char *params;
261 int plen;
262
263 /* when tot_weight is 0 then so is srv_count */
264 if (px->lbprm.tot_weight == 0)
265 return NULL;
266
267 if ((p = memchr(uri, '?', uri_len)) == NULL)
268 return NULL;
269
270 p++;
271
272 uri_len -= (p - uri);
273 plen = px->lbprm.arg_len;
274 params = p;
275
276 while (uri_len > plen) {
277 /* Look for the parameter name followed by an equal symbol */
278 if (params[plen] == '=') {
279 if (memcmp(params, px->lbprm.arg_str, plen) == 0) {
280 /* OK, we have the parameter here at <params>, and
281 * the value after the equal sign, at <p>
282 * skip the equal symbol
283 */
284 p += plen + 1;
285 start = end = p;
286 uri_len -= plen + 1;
287
288 while (uri_len && *end != '&') {
289 uri_len--;
290 end++;
291 }
292 hash = gen_hash(px, start, (end - start));
293
294 if ((px->lbprm.algo & BE_LB_HASH_MOD) == BE_LB_HMOD_AVAL)
295 hash = full_hash(hash);
296
297 if ((px->lbprm.algo & BE_LB_LKUP) == BE_LB_LKUP_CHTREE)
298 return chash_get_server_hash(px, hash, avoid);
299 else
300 return map_get_server_hash(px, hash);
301 }
302 }
303 /* skip to next parameter */
304 p = memchr(params, '&', uri_len);
305 if (!p)
306 return NULL;
307 p++;
308 uri_len -= (p - params);
309 params = p;
310 }
311 return NULL;
312 }
313
314 /*
315 * this does the same as the previous server_ph, but check the body contents
316 */
get_server_ph_post(struct stream * s,const struct server * avoid)317 static struct server *get_server_ph_post(struct stream *s, const struct server *avoid)
318 {
319 unsigned int hash = 0;
320 struct channel *req = &s->req;
321 struct proxy *px = s->be;
322 struct htx *htx = htxbuf(&req->buf);
323 struct htx_blk *blk;
324 unsigned int plen = px->lbprm.arg_len;
325 unsigned long len;
326 const char *params, *p, *start, *end;
327
328 if (px->lbprm.tot_weight == 0)
329 return NULL;
330
331 p = params = NULL;
332 len = 0;
333 for (blk = htx_get_first_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
334 enum htx_blk_type type = htx_get_blk_type(blk);
335 struct ist v;
336
337 if (type != HTX_BLK_DATA)
338 continue;
339 v = htx_get_blk_value(htx, blk);
340 p = params = v.ptr;
341 len = v.len;
342 break;
343 }
344
345 while (len > plen) {
346 /* Look for the parameter name followed by an equal symbol */
347 if (params[plen] == '=') {
348 if (memcmp(params, px->lbprm.arg_str, plen) == 0) {
349 /* OK, we have the parameter here at <params>, and
350 * the value after the equal sign, at <p>
351 * skip the equal symbol
352 */
353 p += plen + 1;
354 start = end = p;
355 len -= plen + 1;
356
357 while (len && *end != '&') {
358 if (unlikely(!HTTP_IS_TOKEN(*p))) {
359 /* if in a POST, body must be URI encoded or it's not a URI.
360 * Do not interpret any possible binary data as a parameter.
361 */
362 if (likely(HTTP_IS_LWS(*p))) /* eol, uncertain uri len */
363 break;
364 return NULL; /* oh, no; this is not uri-encoded.
365 * This body does not contain parameters.
366 */
367 }
368 len--;
369 end++;
370 /* should we break if vlen exceeds limit? */
371 }
372 hash = gen_hash(px, start, (end - start));
373
374 if ((px->lbprm.algo & BE_LB_HASH_MOD) == BE_LB_HMOD_AVAL)
375 hash = full_hash(hash);
376
377 if ((px->lbprm.algo & BE_LB_LKUP) == BE_LB_LKUP_CHTREE)
378 return chash_get_server_hash(px, hash, avoid);
379 else
380 return map_get_server_hash(px, hash);
381 }
382 }
383 /* skip to next parameter */
384 p = memchr(params, '&', len);
385 if (!p)
386 return NULL;
387 p++;
388 len -= (p - params);
389 params = p;
390 }
391 return NULL;
392 }
393
394
395 /*
396 * This function tries to find a running server for the proxy <px> following
397 * the Header parameter hash method. It looks for a specific parameter in the
398 * URL and hashes it to compute the server ID. This is useful to optimize
399 * performance by avoiding bounces between servers in contexts where sessions
400 * are shared but cookies are not usable. If the parameter is not found, NULL
401 * is returned. If any server is found, it will be returned. If no valid server
402 * is found, NULL is returned. When lbprm.arg_opt1 is set, the hash will only
403 * apply to the middle part of a domain name ("use_domain_only" option).
404 */
get_server_hh(struct stream * s,const struct server * avoid)405 static struct server *get_server_hh(struct stream *s, const struct server *avoid)
406 {
407 unsigned int hash = 0;
408 struct proxy *px = s->be;
409 unsigned int plen = px->lbprm.arg_len;
410 unsigned long len;
411 const char *p;
412 const char *start, *end;
413 struct htx *htx = htxbuf(&s->req.buf);
414 struct http_hdr_ctx ctx = { .blk = NULL };
415
416 /* tot_weight appears to mean srv_count */
417 if (px->lbprm.tot_weight == 0)
418 return NULL;
419
420 /* note: we won't hash if there's only one server left */
421 if (px->lbprm.tot_used == 1)
422 goto hash_done;
423
424 http_find_header(htx, ist2(px->lbprm.arg_str, plen), &ctx, 0);
425
426 /* if the header is not found or empty, let's fallback to round robin */
427 if (!ctx.blk || !ctx.value.len)
428 return NULL;
429
430 /* Found a the param_name in the headers.
431 * we will compute the hash based on this value ctx.val.
432 */
433 len = ctx.value.len;
434 p = ctx.value.ptr;
435
436 if (!px->lbprm.arg_opt1) {
437 hash = gen_hash(px, p, len);
438 } else {
439 int dohash = 0;
440 p += len;
441 /* special computation, use only main domain name, not tld/host
442 * going back from the end of string, start hashing at first
443 * dot stop at next.
444 * This is designed to work with the 'Host' header, and requires
445 * a special option to activate this.
446 */
447 end = p;
448 while (len) {
449 if (dohash) {
450 /* Rewind the pointer until the previous char
451 * is a dot, this will allow to set the start
452 * position of the domain. */
453 if (*(p - 1) == '.')
454 break;
455 }
456 else if (*p == '.') {
457 /* The pointer is rewinded to the dot before the
458 * tld, we memorize the end of the domain and
459 * can enter the domain processing. */
460 end = p;
461 dohash = 1;
462 }
463 p--;
464 len--;
465 }
466 start = p;
467 hash = gen_hash(px, start, (end - start));
468 }
469 if ((px->lbprm.algo & BE_LB_HASH_MOD) == BE_LB_HMOD_AVAL)
470 hash = full_hash(hash);
471 hash_done:
472 if ((px->lbprm.algo & BE_LB_LKUP) == BE_LB_LKUP_CHTREE)
473 return chash_get_server_hash(px, hash, avoid);
474 else
475 return map_get_server_hash(px, hash);
476 }
477
478 /* RDP Cookie HASH. */
get_server_rch(struct stream * s,const struct server * avoid)479 static struct server *get_server_rch(struct stream *s, const struct server *avoid)
480 {
481 unsigned int hash = 0;
482 struct proxy *px = s->be;
483 unsigned long len;
484 int ret;
485 struct sample smp;
486 int rewind;
487
488 /* tot_weight appears to mean srv_count */
489 if (px->lbprm.tot_weight == 0)
490 return NULL;
491
492 memset(&smp, 0, sizeof(smp));
493
494 rewind = co_data(&s->req);
495 c_rew(&s->req, rewind);
496
497 ret = fetch_rdp_cookie_name(s, &smp, px->lbprm.arg_str, px->lbprm.arg_len);
498 len = smp.data.u.str.data;
499
500 c_adv(&s->req, rewind);
501
502 if (ret == 0 || (smp.flags & SMP_F_MAY_CHANGE) || len == 0)
503 return NULL;
504
505 /* note: we won't hash if there's only one server left */
506 if (px->lbprm.tot_used == 1)
507 goto hash_done;
508
509 /* Found the param_name in the headers.
510 * we will compute the hash based on this value ctx.val.
511 */
512 hash = gen_hash(px, smp.data.u.str.area, len);
513
514 if ((px->lbprm.algo & BE_LB_HASH_MOD) == BE_LB_HMOD_AVAL)
515 hash = full_hash(hash);
516 hash_done:
517 if ((px->lbprm.algo & BE_LB_LKUP) == BE_LB_LKUP_CHTREE)
518 return chash_get_server_hash(px, hash, avoid);
519 else
520 return map_get_server_hash(px, hash);
521 }
522
523 /* random value */
get_server_rnd(struct stream * s,const struct server * avoid)524 static struct server *get_server_rnd(struct stream *s, const struct server *avoid)
525 {
526 unsigned int hash = 0;
527 struct proxy *px = s->be;
528 struct server *prev, *curr;
529 int draws = px->lbprm.arg_opt1; // number of draws
530
531 /* tot_weight appears to mean srv_count */
532 if (px->lbprm.tot_weight == 0)
533 return NULL;
534
535 curr = NULL;
536 do {
537 prev = curr;
538 hash = ha_random32();
539 curr = chash_get_server_hash(px, hash, avoid);
540 if (!curr)
541 break;
542
543 /* compare the new server to the previous best choice and pick
544 * the one with the least currently served requests.
545 */
546 if (prev && prev != curr &&
547 curr->served * prev->cur_eweight > prev->served * curr->cur_eweight)
548 curr = prev;
549 } while (--draws > 0);
550
551 /* if the selected server is full, pretend we have none so that we reach
552 * the backend's queue instead.
553 */
554 if (curr &&
555 (curr->nbpend || (curr->maxconn && curr->served >= srv_dynamic_maxconn(curr))))
556 curr = NULL;
557
558 return curr;
559 }
560
561 /*
562 * This function applies the load-balancing algorithm to the stream, as
563 * defined by the backend it is assigned to. The stream is then marked as
564 * 'assigned'.
565 *
566 * This function MAY NOT be called with SF_ASSIGNED already set. If the stream
567 * had a server previously assigned, it is rebalanced, trying to avoid the same
568 * server, which should still be present in target_srv(&s->target) before the call.
569 * The function tries to keep the original connection slot if it reconnects to
570 * the same server, otherwise it releases it and tries to offer it.
571 *
572 * It is illegal to call this function with a stream in a queue.
573 *
574 * It may return :
575 * SRV_STATUS_OK if everything is OK. ->srv and ->target are assigned.
576 * SRV_STATUS_NOSRV if no server is available. Stream is not ASSIGNED
577 * SRV_STATUS_FULL if all servers are saturated. Stream is not ASSIGNED
578 * SRV_STATUS_INTERNAL for other unrecoverable errors.
579 *
580 * Upon successful return, the stream flag SF_ASSIGNED is set to indicate that
581 * it does not need to be called anymore. This means that target_srv(&s->target)
582 * can be trusted in balance and direct modes.
583 *
584 */
585
assign_server(struct stream * s)586 int assign_server(struct stream *s)
587 {
588 struct connection *conn = NULL;
589 struct server *conn_slot;
590 struct server *srv = NULL, *prev_srv;
591 int err;
592
593 DPRINTF(stderr,"assign_server : s=%p\n",s);
594
595 err = SRV_STATUS_INTERNAL;
596 if (unlikely(s->pend_pos || s->flags & SF_ASSIGNED))
597 goto out_err;
598
599 prev_srv = objt_server(s->target);
600 conn_slot = s->srv_conn;
601
602 /* We have to release any connection slot before applying any LB algo,
603 * otherwise we may erroneously end up with no available slot.
604 */
605 if (conn_slot)
606 sess_change_server(s, NULL);
607
608 /* We will now try to find the good server and store it into <objt_server(s->target)>.
609 * Note that <objt_server(s->target)> may be NULL in case of dispatch or proxy mode,
610 * as well as if no server is available (check error code).
611 */
612
613 srv = NULL;
614 s->target = NULL;
615
616 if ((s->be->lbprm.algo & BE_LB_KIND) != BE_LB_KIND_HI &&
617 ((s->sess->flags & SESS_FL_PREFER_LAST) ||
618 (s->be->options & PR_O_PREF_LAST))) {
619 struct sess_srv_list *srv_list;
620 list_for_each_entry(srv_list, &s->sess->srv_list, srv_list) {
621 struct server *tmpsrv = objt_server(srv_list->target);
622
623 if (tmpsrv && tmpsrv->proxy == s->be &&
624 ((s->sess->flags & SESS_FL_PREFER_LAST) ||
625 (!s->be->max_ka_queue ||
626 server_has_room(tmpsrv) || (
627 tmpsrv->nbpend + 1 < s->be->max_ka_queue))) &&
628 srv_currently_usable(tmpsrv)) {
629 list_for_each_entry(conn, &srv_list->conn_list, session_list) {
630 if (!(conn->flags & CO_FL_WAIT_XPRT)) {
631 srv = tmpsrv;
632 s->target = &srv->obj_type;
633 if (conn->flags & CO_FL_SESS_IDLE) {
634 conn->flags &= ~CO_FL_SESS_IDLE;
635 s->sess->idle_conns--;
636 }
637 goto out_ok;
638 }
639 }
640 }
641 }
642 }
643
644 if (s->be->lbprm.algo & BE_LB_KIND) {
645 /* we must check if we have at least one server available */
646 if (!s->be->lbprm.tot_weight) {
647 err = SRV_STATUS_NOSRV;
648 goto out;
649 }
650
651 /* if there's some queue on the backend, with certain algos we
652 * know it's because all servers are full.
653 */
654 if (s->be->nbpend && s->be->nbpend != s->be->beconn &&
655 (((s->be->lbprm.algo & (BE_LB_KIND|BE_LB_NEED|BE_LB_PARM)) == BE_LB_ALGO_FAS)|| // first
656 ((s->be->lbprm.algo & (BE_LB_KIND|BE_LB_NEED|BE_LB_PARM)) == BE_LB_ALGO_RR) || // roundrobin
657 ((s->be->lbprm.algo & (BE_LB_KIND|BE_LB_NEED|BE_LB_PARM)) == BE_LB_ALGO_SRR))) { // static-rr
658 err = SRV_STATUS_FULL;
659 goto out;
660 }
661
662 /* First check whether we need to fetch some data or simply call
663 * the LB lookup function. Only the hashing functions will need
664 * some input data in fact, and will support multiple algorithms.
665 */
666 switch (s->be->lbprm.algo & BE_LB_LKUP) {
667 case BE_LB_LKUP_RRTREE:
668 srv = fwrr_get_next_server(s->be, prev_srv);
669 break;
670
671 case BE_LB_LKUP_FSTREE:
672 srv = fas_get_next_server(s->be, prev_srv);
673 break;
674
675 case BE_LB_LKUP_LCTREE:
676 srv = fwlc_get_next_server(s->be, prev_srv);
677 break;
678
679 case BE_LB_LKUP_CHTREE:
680 case BE_LB_LKUP_MAP:
681 if ((s->be->lbprm.algo & BE_LB_KIND) == BE_LB_KIND_RR) {
682 if ((s->be->lbprm.algo & BE_LB_PARM) == BE_LB_RR_RANDOM)
683 srv = get_server_rnd(s, prev_srv);
684 else if ((s->be->lbprm.algo & BE_LB_LKUP) == BE_LB_LKUP_CHTREE)
685 srv = chash_get_next_server(s->be, prev_srv);
686 else
687 srv = map_get_server_rr(s->be, prev_srv);
688 break;
689 }
690 else if ((s->be->lbprm.algo & BE_LB_KIND) != BE_LB_KIND_HI) {
691 /* unknown balancing algorithm */
692 err = SRV_STATUS_INTERNAL;
693 goto out;
694 }
695
696 switch (s->be->lbprm.algo & BE_LB_PARM) {
697 case BE_LB_HASH_SRC:
698 conn = objt_conn(strm_orig(s));
699 if (conn && conn_get_src(conn) && conn->src->ss_family == AF_INET) {
700 srv = get_server_sh(s->be,
701 (void *)&((struct sockaddr_in *)conn->src)->sin_addr,
702 4, prev_srv);
703 }
704 else if (conn && conn_get_src(conn) && conn->src->ss_family == AF_INET6) {
705 srv = get_server_sh(s->be,
706 (void *)&((struct sockaddr_in6 *)conn->src)->sin6_addr,
707 16, prev_srv);
708 }
709 else {
710 /* unknown IP family */
711 err = SRV_STATUS_INTERNAL;
712 goto out;
713 }
714 break;
715
716 case BE_LB_HASH_URI:
717 /* URI hashing */
718 if (IS_HTX_STRM(s) && s->txn->req.msg_state >= HTTP_MSG_BODY) {
719 struct ist uri;
720
721 uri = htx_sl_req_uri(http_get_stline(htxbuf(&s->req.buf)));
722 if (s->be->lbprm.arg_opt1 & 2) {
723 uri = http_get_path(uri);
724 if (!uri.ptr)
725 uri = ist("");
726 }
727 srv = get_server_uh(s->be, uri.ptr, uri.len, prev_srv);
728 }
729 break;
730
731 case BE_LB_HASH_PRM:
732 /* URL Parameter hashing */
733 if (IS_HTX_STRM(s) && s->txn->req.msg_state >= HTTP_MSG_BODY) {
734 struct ist uri;
735
736 uri = htx_sl_req_uri(http_get_stline(htxbuf(&s->req.buf)));
737 srv = get_server_ph(s->be, uri.ptr, uri.len, prev_srv);
738
739 if (!srv && s->txn->meth == HTTP_METH_POST)
740 srv = get_server_ph_post(s, prev_srv);
741 }
742 break;
743
744 case BE_LB_HASH_HDR:
745 /* Header Parameter hashing */
746 if (IS_HTX_STRM(s) && s->txn->req.msg_state >= HTTP_MSG_BODY)
747 srv = get_server_hh(s, prev_srv);
748 break;
749
750 case BE_LB_HASH_RDP:
751 /* RDP Cookie hashing */
752 srv = get_server_rch(s, prev_srv);
753 break;
754
755 default:
756 /* unknown balancing algorithm */
757 err = SRV_STATUS_INTERNAL;
758 goto out;
759 }
760
761 /* If the hashing parameter was not found, let's fall
762 * back to round robin on the map.
763 */
764 if (!srv) {
765 if ((s->be->lbprm.algo & BE_LB_LKUP) == BE_LB_LKUP_CHTREE)
766 srv = chash_get_next_server(s->be, prev_srv);
767 else
768 srv = map_get_server_rr(s->be, prev_srv);
769 }
770
771 /* end of map-based LB */
772 break;
773
774 default:
775 /* unknown balancing algorithm */
776 err = SRV_STATUS_INTERNAL;
777 goto out;
778 }
779
780 if (!srv) {
781 err = SRV_STATUS_FULL;
782 goto out;
783 }
784 else if (srv != prev_srv) {
785 _HA_ATOMIC_ADD(&s->be->be_counters.cum_lbconn, 1);
786 _HA_ATOMIC_ADD(&srv->counters.cum_lbconn, 1);
787 }
788 s->target = &srv->obj_type;
789 }
790 else if (s->be->options & (PR_O_DISPATCH | PR_O_TRANSP)) {
791 s->target = &s->be->obj_type;
792 }
793 else if ((s->be->options & PR_O_HTTP_PROXY)) {
794 conn = cs_conn(objt_cs(s->si[1].end));
795
796 if (conn && conn->dst && is_addr(conn->dst)) {
797 /* in proxy mode, we need a valid destination address */
798 s->target = &s->be->obj_type;
799 } else {
800 err = SRV_STATUS_NOSRV;
801 goto out;
802 }
803 }
804 else {
805 err = SRV_STATUS_NOSRV;
806 goto out;
807 }
808
809 out_ok:
810 s->flags |= SF_ASSIGNED;
811 err = SRV_STATUS_OK;
812 out:
813
814 /* Either we take back our connection slot, or we offer it to someone
815 * else if we don't need it anymore.
816 */
817 if (conn_slot) {
818 if (conn_slot == srv) {
819 sess_change_server(s, srv);
820 } else {
821 if (may_dequeue_tasks(conn_slot, s->be))
822 process_srv_queue(conn_slot, 0);
823 }
824 }
825
826 out_err:
827 return err;
828 }
829
830 /*
831 * This function assigns a server address to a stream, and sets SF_ADDR_SET.
832 * The address is taken from the currently assigned server, or from the
833 * dispatch or transparent address.
834 *
835 * It may return :
836 * SRV_STATUS_OK if everything is OK.
837 * SRV_STATUS_INTERNAL for other unrecoverable errors.
838 *
839 * Upon successful return, the stream flag SF_ADDR_SET is set. This flag is
840 * not cleared, so it's to the caller to clear it if required.
841 */
assign_server_address(struct stream * s)842 int assign_server_address(struct stream *s)
843 {
844 struct connection *cli_conn = objt_conn(strm_orig(s));
845
846 DPRINTF(stderr,"assign_server_address : s=%p\n",s);
847
848 if (!sockaddr_alloc(&s->target_addr, NULL, 0))
849 return SRV_STATUS_INTERNAL;
850
851 if ((s->flags & SF_DIRECT) || (s->be->lbprm.algo & BE_LB_KIND)) {
852 /* A server is necessarily known for this stream */
853 if (!(s->flags & SF_ASSIGNED))
854 return SRV_STATUS_INTERNAL;
855
856 *s->target_addr = __objt_server(s->target)->addr;
857 set_host_port(s->target_addr, __objt_server(s->target)->svc_port);
858
859 if (!is_addr(s->target_addr) && cli_conn) {
860 /* if the server has no address, we use the same address
861 * the client asked, which is handy for remapping ports
862 * locally on multiple addresses at once. Nothing is done
863 * for AF_UNIX addresses.
864 */
865 if (!conn_get_dst(cli_conn)) {
866 /* do nothing if we can't retrieve the address */
867 } else if (cli_conn->dst->ss_family == AF_INET) {
868 ((struct sockaddr_in *)s->target_addr)->sin_family = AF_INET;
869 ((struct sockaddr_in *)s->target_addr)->sin_addr = ((struct sockaddr_in *)cli_conn->dst)->sin_addr;
870 } else if (cli_conn->dst->ss_family == AF_INET6) {
871 ((struct sockaddr_in6 *)s->target_addr)->sin6_family = AF_INET6;
872 ((struct sockaddr_in6 *)s->target_addr)->sin6_addr = ((struct sockaddr_in6 *)cli_conn->dst)->sin6_addr;
873 }
874 }
875
876 /* if this server remaps proxied ports, we'll use
877 * the port the client connected to with an offset. */
878 if ((__objt_server(s->target)->flags & SRV_F_MAPPORTS) && cli_conn) {
879 int base_port;
880
881 if (conn_get_dst(cli_conn)) {
882 /* First, retrieve the port from the incoming connection */
883 base_port = get_host_port(cli_conn->dst);
884
885 /* Second, assign the outgoing connection's port */
886 base_port += get_host_port(s->target_addr);
887 set_host_port(s->target_addr, base_port);
888 }
889 }
890 }
891 else if (s->be->options & PR_O_DISPATCH) {
892 /* connect to the defined dispatch addr */
893 *s->target_addr = s->be->dispatch_addr;
894 }
895 else if ((s->be->options & PR_O_TRANSP) && cli_conn) {
896 /* in transparent mode, use the original dest addr if no dispatch specified */
897 if (conn_get_dst(cli_conn) &&
898 (cli_conn->dst->ss_family == AF_INET || cli_conn->dst->ss_family == AF_INET6))
899 *s->target_addr = *cli_conn->dst;
900 }
901 else if (s->be->options & PR_O_HTTP_PROXY) {
902 /* If HTTP PROXY option is set, then server is already assigned
903 * during incoming client request parsing. */
904 }
905 else {
906 /* no server and no LB algorithm ! */
907 return SRV_STATUS_INTERNAL;
908 }
909
910 s->flags |= SF_ADDR_SET;
911 return SRV_STATUS_OK;
912 }
913
914 /* This function assigns a server to stream <s> if required, and can add the
915 * connection to either the assigned server's queue or to the proxy's queue.
916 * If ->srv_conn is set, the stream is first released from the server.
917 * It may also be called with SF_DIRECT and/or SF_ASSIGNED though. It will
918 * be called before any connection and after any retry or redispatch occurs.
919 *
920 * It is not allowed to call this function with a stream in a queue.
921 *
922 * Returns :
923 *
924 * SRV_STATUS_OK if everything is OK.
925 * SRV_STATUS_NOSRV if no server is available. objt_server(s->target) = NULL.
926 * SRV_STATUS_QUEUED if the connection has been queued.
927 * SRV_STATUS_FULL if the server(s) is/are saturated and the
928 * connection could not be queued at the server's,
929 * which may be NULL if we queue on the backend.
930 * SRV_STATUS_INTERNAL for other unrecoverable errors.
931 *
932 */
assign_server_and_queue(struct stream * s)933 int assign_server_and_queue(struct stream *s)
934 {
935 struct pendconn *p;
936 struct server *srv;
937 int err;
938
939 if (s->pend_pos)
940 return SRV_STATUS_INTERNAL;
941
942 err = SRV_STATUS_OK;
943 if (!(s->flags & SF_ASSIGNED)) {
944 struct server *prev_srv = objt_server(s->target);
945
946 err = assign_server(s);
947 if (prev_srv) {
948 /* This stream was previously assigned to a server. We have to
949 * update the stream's and the server's stats :
950 * - if the server changed :
951 * - set TX_CK_DOWN if txn.flags was TX_CK_VALID
952 * - set SF_REDISP if it was successfully redispatched
953 * - increment srv->redispatches and be->redispatches
954 * - if the server remained the same : update retries.
955 */
956
957 if (prev_srv != objt_server(s->target)) {
958 if (s->txn && (s->txn->flags & TX_CK_MASK) == TX_CK_VALID) {
959 s->txn->flags &= ~TX_CK_MASK;
960 s->txn->flags |= TX_CK_DOWN;
961 }
962 s->flags |= SF_REDISP;
963 _HA_ATOMIC_ADD(&prev_srv->counters.redispatches, 1);
964 _HA_ATOMIC_ADD(&s->be->be_counters.redispatches, 1);
965 } else {
966 _HA_ATOMIC_ADD(&prev_srv->counters.retries, 1);
967 _HA_ATOMIC_ADD(&s->be->be_counters.retries, 1);
968 }
969 }
970 }
971
972 switch (err) {
973 case SRV_STATUS_OK:
974 /* we have SF_ASSIGNED set */
975 srv = objt_server(s->target);
976 if (!srv)
977 return SRV_STATUS_OK; /* dispatch or proxy mode */
978
979 /* If we already have a connection slot, no need to check any queue */
980 if (s->srv_conn == srv)
981 return SRV_STATUS_OK;
982
983 /* OK, this stream already has an assigned server, but no
984 * connection slot yet. Either it is a redispatch, or it was
985 * assigned from persistence information (direct mode).
986 */
987 if ((s->flags & SF_REDIRECTABLE) && srv->rdr_len) {
988 /* server scheduled for redirection, and already assigned. We
989 * don't want to go further nor check the queue.
990 */
991 sess_change_server(s, srv); /* not really needed in fact */
992 return SRV_STATUS_OK;
993 }
994
995 /* We might have to queue this stream if the assigned server is full.
996 * We know we have to queue it into the server's queue, so if a maxqueue
997 * is set on the server, we must also check that the server's queue is
998 * not full, in which case we have to return FULL.
999 */
1000 if (srv->maxconn &&
1001 (srv->nbpend || srv->served >= srv_dynamic_maxconn(srv))) {
1002
1003 if (srv->maxqueue > 0 && srv->nbpend >= srv->maxqueue)
1004 return SRV_STATUS_FULL;
1005
1006 p = pendconn_add(s);
1007 if (p)
1008 return SRV_STATUS_QUEUED;
1009 else
1010 return SRV_STATUS_INTERNAL;
1011 }
1012
1013 /* OK, we can use this server. Let's reserve our place */
1014 sess_change_server(s, srv);
1015 return SRV_STATUS_OK;
1016
1017 case SRV_STATUS_FULL:
1018 /* queue this stream into the proxy's queue */
1019 p = pendconn_add(s);
1020 if (p)
1021 return SRV_STATUS_QUEUED;
1022 else
1023 return SRV_STATUS_INTERNAL;
1024
1025 case SRV_STATUS_NOSRV:
1026 return err;
1027
1028 case SRV_STATUS_INTERNAL:
1029 return err;
1030
1031 default:
1032 return SRV_STATUS_INTERNAL;
1033 }
1034 }
1035
1036 /* If an explicit source binding is specified on the server and/or backend, and
1037 * this source makes use of the transparent proxy, then it is extracted now and
1038 * assigned to the stream's pending connection. This function assumes that an
1039 * outgoing connection has already been assigned to s->si[1].end.
1040 */
assign_tproxy_address(struct stream * s)1041 static void assign_tproxy_address(struct stream *s)
1042 {
1043 #if defined(CONFIG_HAP_TRANSPARENT)
1044 struct server *srv = objt_server(s->target);
1045 struct conn_src *src;
1046 struct connection *cli_conn;
1047 struct connection *srv_conn;
1048
1049 if (objt_cs(s->si[1].end))
1050 srv_conn = cs_conn(__objt_cs(s->si[1].end));
1051 else
1052 srv_conn = objt_conn(s->si[1].end);
1053
1054 if (srv && srv->conn_src.opts & CO_SRC_BIND)
1055 src = &srv->conn_src;
1056 else if (s->be->conn_src.opts & CO_SRC_BIND)
1057 src = &s->be->conn_src;
1058 else
1059 return;
1060
1061 if (!sockaddr_alloc(&srv_conn->src, NULL, 0))
1062 return;
1063
1064 switch (src->opts & CO_SRC_TPROXY_MASK) {
1065 case CO_SRC_TPROXY_ADDR:
1066 *srv_conn->src = src->tproxy_addr;
1067 break;
1068 case CO_SRC_TPROXY_CLI:
1069 case CO_SRC_TPROXY_CIP:
1070 /* FIXME: what can we do if the client connects in IPv6 or unix socket ? */
1071 cli_conn = objt_conn(strm_orig(s));
1072 if (cli_conn && conn_get_src(cli_conn))
1073 *srv_conn->src = *cli_conn->src;
1074 else {
1075 sockaddr_free(&srv_conn->src);
1076 }
1077 break;
1078 case CO_SRC_TPROXY_DYN:
1079 if (src->bind_hdr_occ && IS_HTX_STRM(s)) {
1080 char *vptr;
1081 size_t vlen;
1082
1083 /* bind to the IP in a header */
1084 ((struct sockaddr_in *)srv_conn->src)->sin_family = AF_INET;
1085 ((struct sockaddr_in *)srv_conn->src)->sin_port = 0;
1086 ((struct sockaddr_in *)srv_conn->src)->sin_addr.s_addr = 0;
1087 if (http_get_htx_hdr(htxbuf(&s->req.buf),
1088 ist2(src->bind_hdr_name, src->bind_hdr_len),
1089 src->bind_hdr_occ, NULL, &vptr, &vlen)) {
1090 ((struct sockaddr_in *)srv_conn->src)->sin_addr.s_addr =
1091 htonl(inetaddr_host_lim(vptr, vptr + vlen));
1092 }
1093 }
1094 break;
1095 default:
1096 sockaddr_free(&srv_conn->src);
1097 }
1098 #endif
1099 }
1100
1101 /* Attempt to get a backend connection from the specified mt_list array
1102 * (safe or idle connections). The <is_safe> argument means what type of
1103 * connection the caller wants.
1104 */
conn_backend_get(struct stream * s,struct server * srv,int is_safe)1105 static struct connection *conn_backend_get(struct stream *s, struct server *srv, int is_safe)
1106 {
1107 struct mt_list *mt_list = is_safe ? srv->safe_conns : srv->idle_conns;
1108 struct connection *conn;
1109 int i; // thread number
1110 int found = 0;
1111 int stop;
1112
1113 /* We need to lock even if this is our own list, because another
1114 * thread may be trying to migrate that connection, and we don't want
1115 * to end up with two threads using the same connection.
1116 */
1117 i = tid;
1118 HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
1119 conn = MT_LIST_POP(&mt_list[tid], struct connection *, list);
1120
1121 /* If we failed to pick a connection from the idle list, let's try again with
1122 * the safe list.
1123 */
1124 if (!conn && !is_safe && srv->curr_safe_nb > 0) {
1125 conn = MT_LIST_POP(&srv->safe_conns[tid], struct connection *, list);
1126 if (conn) {
1127 is_safe = 1;
1128 mt_list = srv->safe_conns;
1129 }
1130 }
1131 HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
1132
1133 /* If we found a connection in our own list, and we don't have to
1134 * steal one from another thread, then we're done.
1135 */
1136 if (conn)
1137 goto done;
1138
1139 /* pool sharing globally disabled ? */
1140 if (!(global.tune.options & GTUNE_IDLE_POOL_SHARED))
1141 goto done;
1142
1143 /* Are we allowed to pick from another thread ? We'll still try
1144 * it if we're running low on FDs as we don't want to create
1145 * extra conns in this case, otherwise we can give up if we have
1146 * too few idle conns.
1147 */
1148 if (srv->curr_idle_conns < srv->low_idle_conns &&
1149 ha_used_fds < global.tune.pool_low_count)
1150 goto done;
1151
1152 /* Lookup all other threads for an idle connection, starting from last
1153 * unvisited thread.
1154 */
1155 stop = srv->next_takeover;
1156 if (stop >= global.nbthread)
1157 stop = 0;
1158
1159 i = stop;
1160 do {
1161 struct mt_list *elt1, elt2;
1162
1163 if (!srv->curr_idle_thr[i] || i == tid)
1164 continue;
1165
1166 if (HA_SPIN_TRYLOCK(OTHER_LOCK, &idle_conns[i].takeover_lock) != 0)
1167 continue;
1168 mt_list_for_each_entry_safe(conn, &mt_list[i], list, elt1, elt2) {
1169 if (conn->mux->takeover && conn->mux->takeover(conn, i) == 0) {
1170 MT_LIST_DEL_SAFE(elt1);
1171 _HA_ATOMIC_ADD(&activity[tid].fd_takeover, 1);
1172 found = 1;
1173
1174 break;
1175 }
1176 }
1177
1178 if (!found && !is_safe && srv->curr_safe_nb > 0) {
1179 mt_list_for_each_entry_safe(conn, &srv->safe_conns[i], list, elt1, elt2) {
1180 if (conn->mux->takeover && conn->mux->takeover(conn, i) == 0) {
1181 MT_LIST_DEL_SAFE(elt1);
1182 _HA_ATOMIC_ADD(&activity[tid].fd_takeover, 1);
1183 found = 1;
1184 is_safe = 1;
1185 mt_list = srv->safe_conns;
1186
1187 break;
1188 }
1189 }
1190 }
1191 HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[i].takeover_lock);
1192 } while (!found && (i = (i + 1 == global.nbthread) ? 0 : i + 1) != stop);
1193
1194 if (!found)
1195 conn = NULL;
1196 done:
1197 if (conn) {
1198 _HA_ATOMIC_STORE(&srv->next_takeover, (i + 1 == global.nbthread) ? 0 : i + 1);
1199
1200 srv_use_conn(srv, conn);
1201
1202 _HA_ATOMIC_SUB(&srv->curr_idle_conns, 1);
1203 _HA_ATOMIC_SUB(conn->flags & CO_FL_SAFE_LIST ? &srv->curr_safe_nb : &srv->curr_idle_nb, 1);
1204 _HA_ATOMIC_SUB(&srv->curr_idle_thr[i], 1);
1205 conn->flags &= ~CO_FL_LIST_MASK;
1206 __ha_barrier_atomic_store();
1207
1208 if ((s->be->options & PR_O_REUSE_MASK) == PR_O_REUSE_SAFE &&
1209 conn->mux->flags & MX_FL_HOL_RISK) {
1210 /* attach the connection to the session private list
1211 */
1212 conn->owner = s->sess;
1213 session_add_conn(s->sess, conn, conn->target);
1214 }
1215 else {
1216 LIST_ADDQ(&srv->available_conns[tid], mt_list_to_list(&conn->list));
1217 }
1218 }
1219 return conn;
1220 }
1221
1222 /*
1223 * This function initiates a connection to the server assigned to this stream
1224 * (s->target, s->si[1].addr.to). It will assign a server if none
1225 * is assigned yet.
1226 * It can return one of :
1227 * - SF_ERR_NONE if everything's OK
1228 * - SF_ERR_SRVTO if there are no more servers
1229 * - SF_ERR_SRVCL if the connection was refused by the server
1230 * - SF_ERR_PRXCOND if the connection has been limited by the proxy (maxconn)
1231 * - SF_ERR_RESOURCE if a system resource is lacking (eg: fd limits, ports, ...)
1232 * - SF_ERR_INTERNAL for any other purely internal errors
1233 * Additionally, in the case of SF_ERR_RESOURCE, an emergency log will be emitted.
1234 * The server-facing stream interface is expected to hold a pre-allocated connection
1235 * in s->si[1].conn.
1236 */
connect_server(struct stream * s)1237 int connect_server(struct stream *s)
1238 {
1239 struct connection *cli_conn = objt_conn(strm_orig(s));
1240 struct connection *srv_conn = NULL;
1241 struct conn_stream *srv_cs = NULL;
1242 struct server *srv;
1243 const int reuse_mode = s->be->options & PR_O_REUSE_MASK;
1244 int reuse = 0;
1245 int init_mux = 0;
1246 int err;
1247
1248
1249 /* This will catch some corner cases such as lying connections resulting from
1250 * retries or connect timeouts but will rarely trigger.
1251 */
1252 si_release_endpoint(&s->si[1]);
1253
1254 srv = objt_server(s->target);
1255
1256 /* do not reuse if mode is http or if avail list is not allocated */
1257 if ((s->be->mode != PR_MODE_HTTP) || (srv && !srv->available_conns))
1258 goto skip_reuse;
1259
1260 /* first, search for a matching connection in the session's idle conns */
1261 srv_conn = session_get_conn(s->sess, s->target);
1262 if (srv_conn)
1263 reuse = 1;
1264
1265 if (srv && !reuse && reuse_mode != PR_O_REUSE_NEVR) {
1266 /* Below we pick connections from the safe, idle or
1267 * available (which are safe too) lists based
1268 * on the strategy, the fact that this is a first or second
1269 * (retryable) request, with the indicated priority (1 or 2) :
1270 *
1271 * SAFE AGGR ALWS
1272 *
1273 * +-----+-----+ +-----+-----+ +-----+-----+
1274 * req| 1st | 2nd | req| 1st | 2nd | req| 1st | 2nd |
1275 * ----+-----+-----+ ----+-----+-----+ ----+-----+-----+
1276 * safe| - | 2 | safe| 1 | 2 | safe| 1 | 2 |
1277 * ----+-----+-----+ ----+-----+-----+ ----+-----+-----+
1278 * idle| - | 1 | idle| - | 1 | idle| 2 | 1 |
1279 * ----+-----+-----+ ----+-----+-----+ ----+-----+-----+
1280 *
1281 * Idle conns are necessarily looked up on the same thread so
1282 * that there is no concurrency issues.
1283 */
1284 if (!LIST_ISEMPTY(&srv->available_conns[tid])) {
1285 srv_conn = LIST_ELEM(srv->available_conns[tid].n, struct connection *, list);
1286 reuse = 1;
1287 }
1288 /* if no available connections found, search for an idle/safe */
1289 else if (srv->max_idle_conns && srv->curr_idle_conns > 0) {
1290 const int not_first_req = s->txn && s->txn->flags & TX_NOT_FIRST;
1291 const int idle = srv->curr_idle_nb > 0;
1292 const int safe = srv->curr_safe_nb > 0;
1293
1294 /* second column of the tables above,
1295 * search for an idle then safe conn */
1296 if (not_first_req) {
1297 if (idle || safe)
1298 srv_conn = conn_backend_get(s, srv, 0);
1299 }
1300 /* first column of the tables above */
1301 else if (reuse_mode >= PR_O_REUSE_AGGR) {
1302 /* search for a safe conn */
1303 if (safe)
1304 srv_conn = conn_backend_get(s, srv, 1);
1305 else if (reuse_mode == PR_O_REUSE_ALWS && idle)
1306 srv_conn = conn_backend_get(s, srv, 0);
1307 }
1308 /* If we've picked a connection from the pool, we now have to
1309 * detach it. We may have to get rid of the previous idle
1310 * connection we had, so for this we try to swap it with the
1311 * other owner's. That way it may remain alive for others to
1312 * pick.
1313 */
1314 if (srv_conn)
1315 reuse = 1;
1316 }
1317 }
1318
1319
1320 /* here reuse might have been set above, indicating srv_conn finally
1321 * is OK.
1322 */
1323 if (reuse) {
1324 /* Disable connection reuse if a dynamic source is used.
1325 * As long as we don't share connections between servers,
1326 * we don't need to disable connection reuse on no-idempotent
1327 * requests nor when PROXY protocol is used.
1328 */
1329 if (srv && srv->conn_src.opts & CO_SRC_BIND) {
1330 if ((srv->conn_src.opts & CO_SRC_TPROXY_MASK) == CO_SRC_TPROXY_DYN)
1331 reuse = 0;
1332 }
1333 else if (s->be->conn_src.opts & CO_SRC_BIND) {
1334 if ((s->be->conn_src.opts & CO_SRC_TPROXY_MASK) == CO_SRC_TPROXY_DYN)
1335 reuse = 0;
1336 }
1337 }
1338
1339 if (ha_used_fds > global.tune.pool_high_count && srv && srv->idle_conns) {
1340 struct connection *tokill_conn;
1341
1342 /* We can't reuse a connection, and e have more FDs than deemd
1343 * acceptable, attempt to kill an idling connection
1344 */
1345 /* First, try from our own idle list */
1346 tokill_conn = MT_LIST_POP(&srv->idle_conns[tid],
1347 struct connection *, list);
1348 if (tokill_conn)
1349 tokill_conn->mux->destroy(tokill_conn->ctx);
1350 /* If not, iterate over other thread's idling pool, and try to grab one */
1351 else {
1352 int i;
1353
1354 for (i = tid; (i = ((i + 1 == global.nbthread) ? 0 : i + 1)) != tid;) {
1355 // just silence stupid gcc which reports an absurd
1356 // out-of-bounds warning for <i> which is always
1357 // exactly zero without threads, but it seems to
1358 // see it possibly larger.
1359 ALREADY_CHECKED(i);
1360
1361 if (HA_SPIN_TRYLOCK(OTHER_LOCK, &idle_conns[i].takeover_lock) != 0)
1362 continue;
1363
1364 tokill_conn = MT_LIST_POP(&srv->idle_conns[i],
1365 struct connection *, list);
1366 if (!tokill_conn)
1367 tokill_conn = MT_LIST_POP(&srv->safe_conns[i],
1368 struct connection *, list);
1369
1370 if (tokill_conn) {
1371 /* We got one, put it into the concerned thread's to kill list, and wake it's kill task */
1372
1373 MT_LIST_ADDQ(&idle_conns[i].toremove_conns,
1374 (struct mt_list *)&tokill_conn->list);
1375 task_wakeup(idle_conns[i].cleanup_task, TASK_WOKEN_OTHER);
1376 HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[i].takeover_lock);
1377 break;
1378 }
1379 HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[i].takeover_lock);
1380 }
1381 }
1382
1383 }
1384
1385 if (reuse) {
1386 if (srv_conn->mux) {
1387 int avail = srv_conn->mux->avail_streams(srv_conn);
1388
1389 if (avail <= 1) {
1390 /* No more streams available, remove it from the list */
1391 MT_LIST_DEL(&srv_conn->list);
1392 }
1393
1394 if (avail >= 1) {
1395 srv_cs = srv_conn->mux->attach(srv_conn, s->sess);
1396 if (srv_cs)
1397 si_attach_cs(&s->si[1], srv_cs);
1398 else
1399 srv_conn = NULL;
1400 }
1401 else
1402 srv_conn = NULL;
1403 }
1404 /* otherwise srv_conn is left intact */
1405 }
1406 else
1407 srv_conn = NULL;
1408
1409 skip_reuse:
1410 /* no reuse or failed to reuse the connection above, pick a new one */
1411 if (!srv_conn) {
1412 srv_conn = conn_new(s->target);
1413 srv_cs = NULL;
1414
1415 if (srv_conn) {
1416 srv_conn->owner = s->sess;
1417 if (reuse_mode == PR_O_REUSE_NEVR)
1418 conn_set_private(srv_conn);
1419 }
1420 }
1421
1422 if (!srv_conn || !sockaddr_alloc(&srv_conn->dst, 0, 0)) {
1423 if (srv_conn)
1424 conn_free(srv_conn);
1425 return SF_ERR_RESOURCE;
1426 }
1427
1428 if (!(s->flags & SF_ADDR_SET)) {
1429 err = assign_server_address(s);
1430 if (err != SRV_STATUS_OK) {
1431 conn_free(srv_conn);
1432 return SF_ERR_INTERNAL;
1433 }
1434 }
1435
1436 /* copy the target address into the connection */
1437 *srv_conn->dst = *s->target_addr;
1438
1439 /* Copy network namespace from client connection */
1440 srv_conn->proxy_netns = cli_conn ? cli_conn->proxy_netns : NULL;
1441
1442 if (!conn_xprt_ready(srv_conn) && !srv_conn->mux) {
1443 /* set the correct protocol on the output stream interface */
1444 if (srv)
1445 conn_prepare(srv_conn, protocol_by_family(srv_conn->dst->ss_family), srv->xprt);
1446 else if (obj_type(s->target) == OBJ_TYPE_PROXY) {
1447 /* proxies exclusively run on raw_sock right now */
1448 conn_prepare(srv_conn, protocol_by_family(srv_conn->dst->ss_family), xprt_get(XPRT_RAW));
1449 if (!(srv_conn->ctrl)) {
1450 conn_free(srv_conn);
1451 return SF_ERR_INTERNAL;
1452 }
1453 }
1454 else {
1455 conn_free(srv_conn);
1456 return SF_ERR_INTERNAL; /* how did we get there ? */
1457 }
1458
1459 srv_cs = si_alloc_cs(&s->si[1], srv_conn);
1460 if (!srv_cs) {
1461 conn_free(srv_conn);
1462 return SF_ERR_RESOURCE;
1463 }
1464 srv_conn->ctx = srv_cs;
1465 #if defined(USE_OPENSSL) && defined(TLSEXT_TYPE_application_layer_protocol_negotiation)
1466 if (!srv ||
1467 (srv->use_ssl != 1 || (!(srv->ssl_ctx.alpn_str) && !(srv->ssl_ctx.npn_str)) ||
1468 srv->mux_proto || s->be->mode != PR_MODE_HTTP))
1469 #endif
1470 init_mux = 1;
1471
1472 /* process the case where the server requires the PROXY protocol to be sent */
1473 srv_conn->send_proxy_ofs = 0;
1474
1475 if (srv && srv->pp_opts) {
1476 conn_set_private(srv_conn);
1477 srv_conn->flags |= CO_FL_SEND_PROXY;
1478 srv_conn->send_proxy_ofs = 1; /* must compute size */
1479 if (cli_conn)
1480 conn_get_dst(cli_conn);
1481 }
1482
1483 assign_tproxy_address(s);
1484
1485 if (srv && (srv->flags & SRV_F_SOCKS4_PROXY)) {
1486 srv_conn->send_proxy_ofs = 1;
1487 srv_conn->flags |= CO_FL_SOCKS4;
1488 }
1489 }
1490 else if (!conn_xprt_ready(srv_conn)) {
1491 if (srv_conn->mux->reset)
1492 srv_conn->mux->reset(srv_conn);
1493 }
1494 else {
1495 /* Only consider we're doing reuse if the connection was
1496 * ready.
1497 */
1498 if (srv_conn->mux->ctl(srv_conn, MUX_STATUS, NULL) & MUX_STATUS_READY)
1499 s->flags |= SF_SRV_REUSED;
1500 }
1501
1502 /* flag for logging source ip/port */
1503 if (strm_fe(s)->options2 & PR_O2_SRC_ADDR)
1504 s->si[1].flags |= SI_FL_SRC_ADDR;
1505
1506 /* disable lingering */
1507 if (s->be->options & PR_O_TCP_NOLING)
1508 s->si[1].flags |= SI_FL_NOLINGER;
1509
1510 if (s->flags & SF_SRV_REUSED) {
1511 _HA_ATOMIC_ADD(&s->be->be_counters.reuse, 1);
1512 if (srv)
1513 _HA_ATOMIC_ADD(&srv->counters.reuse, 1);
1514 } else {
1515 _HA_ATOMIC_ADD(&s->be->be_counters.connect, 1);
1516 if (srv)
1517 _HA_ATOMIC_ADD(&srv->counters.connect, 1);
1518 }
1519
1520 err = si_connect(&s->si[1], srv_conn);
1521 if (err != SF_ERR_NONE)
1522 return err;
1523
1524 #ifdef USE_OPENSSL
1525 if (srv && srv->ssl_ctx.sni) {
1526 struct sample *smp;
1527
1528 smp = sample_fetch_as_type(s->be, s->sess, s, SMP_OPT_DIR_REQ | SMP_OPT_FINAL,
1529 srv->ssl_ctx.sni, SMP_T_STR);
1530 if (smp_make_safe(smp)) {
1531 ssl_sock_set_servername(srv_conn, smp->data.u.str.area);
1532 if (!(srv->ssl_ctx.sni->fetch->use & SMP_USE_INTRN) ||
1533 smp->flags & SMP_F_VOLATILE) {
1534 conn_set_private(srv_conn);
1535 }
1536 }
1537 }
1538 #endif /* USE_OPENSSL */
1539
1540 /* The CO_FL_SEND_PROXY flag may have been set by the connect method,
1541 * if so, add our handshake pseudo-XPRT now.
1542 */
1543 if ((srv_conn->flags & CO_FL_HANDSHAKE)) {
1544 if (xprt_add_hs(srv_conn) < 0) {
1545 conn_full_close(srv_conn);
1546 return SF_ERR_INTERNAL;
1547 }
1548 }
1549
1550 /* We have to defer the mux initialization until after si_connect()
1551 * has been called, as we need the xprt to have been properly
1552 * initialized, or any attempt to recv during the mux init may
1553 * fail, and flag the connection as CO_FL_ERROR.
1554 */
1555 if (init_mux) {
1556 if (conn_install_mux_be(srv_conn, srv_cs, s->sess) < 0) {
1557 conn_full_close(srv_conn);
1558 return SF_ERR_INTERNAL;
1559 }
1560 if (s->be->mode == PR_MODE_HTTP) {
1561 /* If we're doing http-reuse always, and the connection
1562 * is not private with available streams (an http2
1563 * connection), add it to the available list, so that
1564 * others can use it right away. If the connection is
1565 * private or we're doing http-reuse safe and the mux
1566 * protocol supports multiplexing, add it in the
1567 * session server list.
1568 */
1569 if (srv && reuse_mode == PR_O_REUSE_ALWS &&
1570 !(srv_conn->flags & CO_FL_PRIVATE) &&
1571 srv_conn->mux->avail_streams(srv_conn) > 0) {
1572 LIST_ADDQ(&srv->available_conns[tid], mt_list_to_list(&srv_conn->list));
1573 }
1574 else if (srv_conn->flags & CO_FL_PRIVATE ||
1575 (reuse_mode == PR_O_REUSE_SAFE &&
1576 srv_conn->mux->flags & MX_FL_HOL_RISK)) {
1577 /* If it fail now, the same will be done in mux->detach() callback */
1578 session_add_conn(s->sess, srv_conn, srv_conn->target);
1579 }
1580 }
1581 }
1582
1583 #if USE_OPENSSL && (defined(OPENSSL_IS_BORINGSSL) || (HA_OPENSSL_VERSION_NUMBER >= 0x10101000L))
1584
1585 if (!reuse && cli_conn && srv && srv_conn->mux &&
1586 (srv->ssl_ctx.options & SRV_SSL_O_EARLY_DATA) &&
1587 /* Only attempt to use early data if either the client sent
1588 * early data, so that we know it can handle a 425, or if
1589 * we are allwoed to retry requests on early data failure, and
1590 * it's our first try
1591 */
1592 ((cli_conn->flags & CO_FL_EARLY_DATA) ||
1593 ((s->be->retry_type & PR_RE_EARLY_ERROR) &&
1594 s->si[1].conn_retries == s->be->conn_retries)) &&
1595 !channel_is_empty(si_oc(&s->si[1])) &&
1596 srv_conn->flags & CO_FL_SSL_WAIT_HS)
1597 srv_conn->flags &= ~(CO_FL_SSL_WAIT_HS | CO_FL_WAIT_L6_CONN);
1598 #endif
1599
1600 /* set connect timeout */
1601 s->si[1].exp = tick_add_ifset(now_ms, s->be->timeout.connect);
1602
1603 if (srv) {
1604 int count;
1605
1606 s->flags |= SF_CURR_SESS;
1607 count = _HA_ATOMIC_ADD(&srv->cur_sess, 1);
1608 HA_ATOMIC_UPDATE_MAX(&srv->counters.cur_sess_max, count);
1609 if (s->be->lbprm.server_take_conn)
1610 s->be->lbprm.server_take_conn(srv, 0);
1611 }
1612
1613 /* Now handle synchronously connected sockets. We know the stream-int
1614 * is at least in state SI_ST_CON. These ones typically are UNIX
1615 * sockets, socket pairs, and occasionally TCP connections on the
1616 * loopback on a heavily loaded system.
1617 */
1618 if ((srv_conn->flags & CO_FL_ERROR || srv_cs->flags & CS_FL_ERROR))
1619 s->si[1].flags |= SI_FL_ERR;
1620
1621 /* If we had early data, and the handshake ended, then
1622 * we can remove the flag, and attempt to wake the task up,
1623 * in the event there's an analyser waiting for the end of
1624 * the handshake.
1625 */
1626 if (!(srv_conn->flags & (CO_FL_WAIT_XPRT | CO_FL_EARLY_SSL_HS)))
1627 srv_cs->flags &= ~CS_FL_WAIT_FOR_HS;
1628
1629 if (!si_state_in(s->si[1].state, SI_SB_EST|SI_SB_DIS|SI_SB_CLO) &&
1630 (srv_conn->flags & CO_FL_WAIT_XPRT) == 0) {
1631 s->si[1].exp = TICK_ETERNITY;
1632 si_oc(&s->si[1])->flags |= CF_WRITE_NULL;
1633 if (s->si[1].state == SI_ST_CON)
1634 s->si[1].state = SI_ST_RDY;
1635 }
1636
1637 /* Report EOI on the channel if it was reached from the mux point of
1638 * view.
1639 *
1640 * Note: This test is only required because si_cs_process is also the SI
1641 * wake callback. Otherwise si_cs_recv()/si_cs_send() already take
1642 * care of it.
1643 */
1644 if ((srv_cs->flags & CS_FL_EOI) && !(si_ic(&s->si[1])->flags & CF_EOI))
1645 si_ic(&s->si[1])->flags |= (CF_EOI|CF_READ_PARTIAL);
1646
1647 /* catch all sync connect while the mux is not already installed */
1648 if (!srv_conn->mux && !(srv_conn->flags & CO_FL_WAIT_XPRT)) {
1649 if (conn_create_mux(srv_conn) < 0) {
1650 conn_full_close(srv_conn);
1651 return SF_ERR_INTERNAL;
1652 }
1653 }
1654
1655 return SF_ERR_NONE; /* connection is OK */
1656 }
1657
1658
1659 /* This function performs the "redispatch" part of a connection attempt. It
1660 * will assign a server if required, queue the connection if required, and
1661 * handle errors that might arise at this level. It can change the server
1662 * state. It will return 1 if it encounters an error, switches the server
1663 * state, or has to queue a connection. Otherwise, it will return 0 indicating
1664 * that the connection is ready to use.
1665 */
1666
srv_redispatch_connect(struct stream * s)1667 int srv_redispatch_connect(struct stream *s)
1668 {
1669 struct server *srv;
1670 int conn_err;
1671
1672 /* We know that we don't have any connection pending, so we will
1673 * try to get a new one, and wait in this state if it's queued
1674 */
1675 redispatch:
1676 conn_err = assign_server_and_queue(s);
1677 srv = objt_server(s->target);
1678
1679 switch (conn_err) {
1680 case SRV_STATUS_OK:
1681 break;
1682
1683 case SRV_STATUS_FULL:
1684 /* The server has reached its maxqueue limit. Either PR_O_REDISP is set
1685 * and we can redispatch to another server, or it is not and we return
1686 * 503. This only makes sense in DIRECT mode however, because normal LB
1687 * algorithms would never select such a server, and hash algorithms
1688 * would bring us on the same server again. Note that s->target is set
1689 * in this case.
1690 */
1691 if (((s->flags & (SF_DIRECT|SF_FORCE_PRST)) == SF_DIRECT) &&
1692 (s->be->options & PR_O_REDISP)) {
1693 s->flags &= ~(SF_DIRECT | SF_ASSIGNED | SF_ADDR_SET);
1694 sockaddr_free(&s->target_addr);
1695 goto redispatch;
1696 }
1697
1698 if (!s->si[1].err_type) {
1699 s->si[1].err_type = SI_ET_QUEUE_ERR;
1700 }
1701
1702 _HA_ATOMIC_ADD(&srv->counters.failed_conns, 1);
1703 _HA_ATOMIC_ADD(&s->be->be_counters.failed_conns, 1);
1704 return 1;
1705
1706 case SRV_STATUS_NOSRV:
1707 /* note: it is guaranteed that srv == NULL here */
1708 if (!s->si[1].err_type) {
1709 s->si[1].err_type = SI_ET_CONN_ERR;
1710 }
1711
1712 _HA_ATOMIC_ADD(&s->be->be_counters.failed_conns, 1);
1713 return 1;
1714
1715 case SRV_STATUS_QUEUED:
1716 s->si[1].exp = tick_add_ifset(now_ms, s->be->timeout.queue);
1717 s->si[1].state = SI_ST_QUE;
1718 /* do nothing else and do not wake any other stream up */
1719 return 1;
1720
1721 case SRV_STATUS_INTERNAL:
1722 default:
1723 if (!s->si[1].err_type) {
1724 s->si[1].err_type = SI_ET_CONN_OTHER;
1725 }
1726
1727 if (srv)
1728 srv_inc_sess_ctr(srv);
1729 if (srv)
1730 srv_set_sess_last(srv);
1731 if (srv)
1732 _HA_ATOMIC_ADD(&srv->counters.failed_conns, 1);
1733 _HA_ATOMIC_ADD(&s->be->be_counters.failed_conns, 1);
1734
1735 /* release other streams waiting for this server */
1736 if (may_dequeue_tasks(srv, s->be))
1737 process_srv_queue(srv, 0);
1738 return 1;
1739 }
1740 /* if we get here, it's because we got SRV_STATUS_OK, which also
1741 * means that the connection has not been queued.
1742 */
1743 return 0;
1744 }
1745
1746 /* Check if the connection request is in such a state that it can be aborted. */
back_may_abort_req(struct channel * req,struct stream * s)1747 static int back_may_abort_req(struct channel *req, struct stream *s)
1748 {
1749 return ((req->flags & (CF_READ_ERROR)) ||
1750 ((req->flags & (CF_SHUTW_NOW|CF_SHUTW)) && /* empty and client aborted */
1751 (channel_is_empty(req) || (s->be->options & PR_O_ABRT_CLOSE))));
1752 }
1753
1754 /* Update back stream interface status for input states SI_ST_ASS, SI_ST_QUE,
1755 * SI_ST_TAR. Other input states are simply ignored.
1756 * Possible output states are SI_ST_CLO, SI_ST_TAR, SI_ST_ASS, SI_ST_REQ, SI_ST_CON
1757 * and SI_ST_EST. Flags must have previously been updated for timeouts and other
1758 * conditions.
1759 */
back_try_conn_req(struct stream * s)1760 void back_try_conn_req(struct stream *s)
1761 {
1762 struct server *srv = objt_server(s->target);
1763 struct stream_interface *si = &s->si[1];
1764 struct channel *req = &s->req;
1765
1766 DBG_TRACE_ENTER(STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
1767
1768 if (si->state == SI_ST_ASS) {
1769 /* Server assigned to connection request, we have to try to connect now */
1770 int conn_err;
1771
1772 /* Before we try to initiate the connection, see if the
1773 * request may be aborted instead.
1774 */
1775 if (back_may_abort_req(req, s)) {
1776 si->err_type |= SI_ET_CONN_ABRT;
1777 DBG_TRACE_STATE("connection aborted", STRM_EV_STRM_PROC|STRM_EV_SI_ST|STRM_EV_STRM_ERR, s);
1778 goto abort_connection;
1779 }
1780
1781 conn_err = connect_server(s);
1782 srv = objt_server(s->target);
1783
1784 if (conn_err == SF_ERR_NONE) {
1785 /* state = SI_ST_CON or SI_ST_EST now */
1786 if (srv)
1787 srv_inc_sess_ctr(srv);
1788 if (srv)
1789 srv_set_sess_last(srv);
1790 DBG_TRACE_STATE("connection attempt", STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
1791 goto end;
1792 }
1793
1794 /* We have received a synchronous error. We might have to
1795 * abort, retry immediately or redispatch.
1796 */
1797 if (conn_err == SF_ERR_INTERNAL) {
1798 if (!si->err_type) {
1799 si->err_type = SI_ET_CONN_OTHER;
1800 }
1801
1802 if (srv)
1803 srv_inc_sess_ctr(srv);
1804 if (srv)
1805 srv_set_sess_last(srv);
1806 if (srv)
1807 _HA_ATOMIC_ADD(&srv->counters.failed_conns, 1);
1808 _HA_ATOMIC_ADD(&s->be->be_counters.failed_conns, 1);
1809
1810 /* release other streams waiting for this server */
1811 sess_change_server(s, NULL);
1812 if (may_dequeue_tasks(srv, s->be))
1813 process_srv_queue(srv, 0);
1814
1815 /* Failed and not retryable. */
1816 si_shutr(si);
1817 si_shutw(si);
1818 req->flags |= CF_WRITE_ERROR;
1819
1820 s->logs.t_queue = tv_ms_elapsed(&s->logs.tv_accept, &now);
1821
1822 /* we may need to know the position in the queue for logging */
1823 pendconn_cond_unlink(s->pend_pos);
1824
1825 /* no stream was ever accounted for this server */
1826 si->state = SI_ST_CLO;
1827 if (s->srv_error)
1828 s->srv_error(s, si);
1829 DBG_TRACE_STATE("internal error during connection", STRM_EV_STRM_PROC|STRM_EV_SI_ST|STRM_EV_STRM_ERR, s);
1830 goto end;
1831 }
1832
1833 /* We are facing a retryable error, but we don't want to run a
1834 * turn-around now, as the problem is likely a source port
1835 * allocation problem, so we want to retry now.
1836 */
1837 si->state = SI_ST_CER;
1838 si->flags &= ~SI_FL_ERR;
1839 back_handle_st_cer(s);
1840
1841 DBG_TRACE_STATE("connection error, retry", STRM_EV_STRM_PROC|STRM_EV_SI_ST|STRM_EV_STRM_ERR, s);
1842 /* now si->state is one of SI_ST_CLO, SI_ST_TAR, SI_ST_ASS, SI_ST_REQ */
1843 }
1844 else if (si->state == SI_ST_QUE) {
1845 /* connection request was queued, check for any update */
1846 if (!pendconn_dequeue(s)) {
1847 /* The connection is not in the queue anymore. Either
1848 * we have a server connection slot available and we
1849 * go directly to the assigned state, or we need to
1850 * load-balance first and go to the INI state.
1851 */
1852 si->exp = TICK_ETERNITY;
1853 if (unlikely(!(s->flags & SF_ASSIGNED)))
1854 si->state = SI_ST_REQ;
1855 else {
1856 s->logs.t_queue = tv_ms_elapsed(&s->logs.tv_accept, &now);
1857 si->state = SI_ST_ASS;
1858 }
1859 DBG_TRACE_STATE("dequeue connection request", STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
1860 goto end;
1861 }
1862
1863 /* Connection request still in queue... */
1864 if (si->flags & SI_FL_EXP) {
1865 /* ... and timeout expired */
1866 si->exp = TICK_ETERNITY;
1867 si->flags &= ~SI_FL_EXP;
1868 s->logs.t_queue = tv_ms_elapsed(&s->logs.tv_accept, &now);
1869
1870 /* we may need to know the position in the queue for logging */
1871 pendconn_cond_unlink(s->pend_pos);
1872
1873 if (srv)
1874 _HA_ATOMIC_ADD(&srv->counters.failed_conns, 1);
1875 _HA_ATOMIC_ADD(&s->be->be_counters.failed_conns, 1);
1876 si_shutr(si);
1877 si_shutw(si);
1878 req->flags |= CF_WRITE_TIMEOUT;
1879 if (!si->err_type)
1880 si->err_type = SI_ET_QUEUE_TO;
1881 si->state = SI_ST_CLO;
1882 if (s->srv_error)
1883 s->srv_error(s, si);
1884 DBG_TRACE_STATE("connection request still queued", STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
1885 goto end;
1886 }
1887
1888 /* Connection remains in queue, check if we have to abort it */
1889 if (back_may_abort_req(req, s)) {
1890 s->logs.t_queue = tv_ms_elapsed(&s->logs.tv_accept, &now);
1891
1892 /* we may need to know the position in the queue for logging */
1893 pendconn_cond_unlink(s->pend_pos);
1894
1895 si->err_type |= SI_ET_QUEUE_ABRT;
1896 DBG_TRACE_STATE("abort queued connection request", STRM_EV_STRM_PROC|STRM_EV_SI_ST|STRM_EV_STRM_ERR, s);
1897 goto abort_connection;
1898 }
1899
1900 /* Nothing changed */
1901 }
1902 else if (si->state == SI_ST_TAR) {
1903 /* Connection request might be aborted */
1904 if (back_may_abort_req(req, s)) {
1905 si->err_type |= SI_ET_CONN_ABRT;
1906 DBG_TRACE_STATE("connection aborted", STRM_EV_STRM_PROC|STRM_EV_SI_ST|STRM_EV_STRM_ERR, s);
1907 goto abort_connection;
1908 }
1909
1910 if (!(si->flags & SI_FL_EXP))
1911 return; /* still in turn-around */
1912
1913 si->flags &= ~SI_FL_EXP;
1914 si->exp = TICK_ETERNITY;
1915
1916 /* we keep trying on the same server as long as the stream is
1917 * marked "assigned".
1918 * FIXME: Should we force a redispatch attempt when the server is down ?
1919 */
1920 if (s->flags & SF_ASSIGNED)
1921 si->state = SI_ST_ASS;
1922 else
1923 si->state = SI_ST_REQ;
1924
1925 DBG_TRACE_STATE("retry connection now", STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
1926 }
1927
1928 end:
1929 DBG_TRACE_LEAVE(STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
1930 return;
1931
1932 abort_connection:
1933 /* give up */
1934 si->exp = TICK_ETERNITY;
1935 si->flags &= ~SI_FL_EXP;
1936 si_shutr(si);
1937 si_shutw(si);
1938 si->state = SI_ST_CLO;
1939 if (s->srv_error)
1940 s->srv_error(s, si);
1941 DBG_TRACE_DEVEL("leaving on error", STRM_EV_STRM_PROC|STRM_EV_SI_ST|STRM_EV_STRM_ERR, s);
1942 return;
1943 }
1944
1945 /* This function initiates a server connection request on a stream interface
1946 * already in SI_ST_REQ state. Upon success, the state goes to SI_ST_ASS for
1947 * a real connection to a server, indicating that a server has been assigned,
1948 * or SI_ST_EST for a successful connection to an applet. It may also return
1949 * SI_ST_QUE, or SI_ST_CLO upon error.
1950 */
back_handle_st_req(struct stream * s)1951 void back_handle_st_req(struct stream *s)
1952 {
1953 struct stream_interface *si = &s->si[1];
1954
1955 if (si->state != SI_ST_REQ)
1956 return;
1957
1958 DBG_TRACE_ENTER(STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
1959
1960 if (unlikely(obj_type(s->target) == OBJ_TYPE_APPLET)) {
1961 /* the applet directly goes to the EST state */
1962 struct appctx *appctx = objt_appctx(si->end);
1963
1964 if (!appctx || appctx->applet != __objt_applet(s->target))
1965 appctx = si_register_handler(si, objt_applet(s->target));
1966
1967 if (!appctx) {
1968 /* No more memory, let's immediately abort. Force the
1969 * error code to ignore the ERR_LOCAL which is not a
1970 * real error.
1971 */
1972 s->flags &= ~(SF_ERR_MASK | SF_FINST_MASK);
1973
1974 si_shutr(si);
1975 si_shutw(si);
1976 s->req.flags |= CF_WRITE_ERROR;
1977 si->err_type = SI_ET_CONN_RES;
1978 si->state = SI_ST_CLO;
1979 if (s->srv_error)
1980 s->srv_error(s, si);
1981 DBG_TRACE_STATE("failed to register applet", STRM_EV_STRM_PROC|STRM_EV_SI_ST|STRM_EV_STRM_ERR, s);
1982 goto end;
1983 }
1984
1985 if (tv_iszero(&s->logs.tv_request))
1986 s->logs.tv_request = now;
1987 s->logs.t_queue = tv_ms_elapsed(&s->logs.tv_accept, &now);
1988 si->state = SI_ST_EST;
1989 si->err_type = SI_ET_NONE;
1990 be_set_sess_last(s->be);
1991
1992 DBG_TRACE_STATE("applet registered", STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
1993 /* let back_establish() finish the job */
1994 goto end;
1995 }
1996
1997 /* Try to assign a server */
1998 if (srv_redispatch_connect(s) != 0) {
1999 /* We did not get a server. Either we queued the
2000 * connection request, or we encountered an error.
2001 */
2002 if (si->state == SI_ST_QUE) {
2003 DBG_TRACE_STATE("connection request queued", STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
2004 goto end;
2005 }
2006
2007 /* we did not get any server, let's check the cause */
2008 si_shutr(si);
2009 si_shutw(si);
2010 s->req.flags |= CF_WRITE_ERROR;
2011 if (!si->err_type)
2012 si->err_type = SI_ET_CONN_OTHER;
2013 si->state = SI_ST_CLO;
2014 if (s->srv_error)
2015 s->srv_error(s, si);
2016 DBG_TRACE_STATE("connection request failed", STRM_EV_STRM_PROC|STRM_EV_SI_ST|STRM_EV_STRM_ERR, s);
2017 goto end;
2018 }
2019
2020 /* The server is assigned */
2021 s->logs.t_queue = tv_ms_elapsed(&s->logs.tv_accept, &now);
2022 si->state = SI_ST_ASS;
2023 be_set_sess_last(s->be);
2024 DBG_TRACE_STATE("connection request assigned to a server", STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
2025
2026 end:
2027 DBG_TRACE_LEAVE(STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
2028 }
2029
2030 /* This function is called with (si->state == SI_ST_CON) meaning that a
2031 * connection was attempted and that the file descriptor is already allocated.
2032 * We must check for timeout, error and abort. Possible output states are
2033 * SI_ST_CER (error), SI_ST_DIS (abort), and SI_ST_CON (no change). This only
2034 * works with connection-based streams. We know that there were no I/O event
2035 * when reaching this function. Timeouts and errors are *not* cleared.
2036 */
back_handle_st_con(struct stream * s)2037 void back_handle_st_con(struct stream *s)
2038 {
2039 struct stream_interface *si = &s->si[1];
2040 struct channel *req = &s->req;
2041 struct channel *rep = &s->res;
2042
2043 DBG_TRACE_ENTER(STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
2044
2045 /* the client might want to abort */
2046 if ((rep->flags & CF_SHUTW) ||
2047 ((req->flags & CF_SHUTW_NOW) &&
2048 (channel_is_empty(req) || (s->be->options & PR_O_ABRT_CLOSE)))) {
2049 si->flags |= SI_FL_NOLINGER;
2050 si_shutw(si);
2051 si->err_type |= SI_ET_CONN_ABRT;
2052 if (s->srv_error)
2053 s->srv_error(s, si);
2054 /* Note: state = SI_ST_DIS now */
2055 DBG_TRACE_STATE("client abort during connection attempt", STRM_EV_STRM_PROC|STRM_EV_SI_ST|STRM_EV_STRM_ERR, s);
2056 goto end;
2057 }
2058
2059 done:
2060 /* retryable error ? */
2061 if (si->flags & (SI_FL_EXP|SI_FL_ERR)) {
2062 if (!si->err_type) {
2063 if (si->flags & SI_FL_ERR)
2064 si->err_type = SI_ET_CONN_ERR;
2065 else
2066 si->err_type = SI_ET_CONN_TO;
2067 }
2068
2069 si->state = SI_ST_CER;
2070 DBG_TRACE_STATE("connection failed, retry", STRM_EV_STRM_PROC|STRM_EV_SI_ST|STRM_EV_STRM_ERR, s);
2071 }
2072
2073 end:
2074 DBG_TRACE_LEAVE(STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
2075 }
2076
2077 /* This function is called with (si->state == SI_ST_CER) meaning that a
2078 * previous connection attempt has failed and that the file descriptor
2079 * has already been released. Possible causes include asynchronous error
2080 * notification and time out. Possible output states are SI_ST_CLO when
2081 * retries are exhausted, SI_ST_TAR when a delay is wanted before a new
2082 * connection attempt, SI_ST_ASS when it's wise to retry on the same server,
2083 * and SI_ST_REQ when an immediate redispatch is wanted. The buffers are
2084 * marked as in error state. Timeouts and errors are cleared before retrying.
2085 */
back_handle_st_cer(struct stream * s)2086 void back_handle_st_cer(struct stream *s)
2087 {
2088 struct stream_interface *si = &s->si[1];
2089 struct conn_stream *cs = objt_cs(si->end);
2090 struct connection *conn = cs_conn(cs);
2091
2092 DBG_TRACE_ENTER(STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
2093
2094 si->exp = TICK_ETERNITY;
2095 si->flags &= ~SI_FL_EXP;
2096
2097 /* we probably have to release last stream from the server */
2098 if (objt_server(s->target)) {
2099 health_adjust(objt_server(s->target), HANA_STATUS_L4_ERR);
2100
2101 if (s->flags & SF_CURR_SESS) {
2102 s->flags &= ~SF_CURR_SESS;
2103 _HA_ATOMIC_SUB(&__objt_server(s->target)->cur_sess, 1);
2104 }
2105
2106 if ((si->flags & SI_FL_ERR) &&
2107 conn && conn->err_code == CO_ER_SSL_MISMATCH_SNI) {
2108 /* We tried to connect to a server which is configured
2109 * with "verify required" and which doesn't have the
2110 * "verifyhost" directive. The server presented a wrong
2111 * certificate (a certificate for an unexpected name),
2112 * which implies that we have used SNI in the handshake,
2113 * and that the server doesn't have the associated cert
2114 * and presented a default one.
2115 *
2116 * This is a serious enough issue not to retry. It's
2117 * especially important because this wrong name might
2118 * either be the result of a configuration error, and
2119 * retrying will only hammer the server, or is caused
2120 * by the use of a wrong SNI value, most likely
2121 * provided by the client and we don't want to let the
2122 * client provoke retries.
2123 */
2124 si->conn_retries = 0;
2125 DBG_TRACE_DEVEL("Bad SSL cert, disable connection retries", STRM_EV_STRM_PROC|STRM_EV_SI_ST|STRM_EV_STRM_ERR, s);
2126 }
2127 }
2128
2129 /* ensure that we have enough retries left */
2130 si->conn_retries--;
2131 if (si->conn_retries < 0 || !(s->be->retry_type & PR_RE_CONN_FAILED)) {
2132 if (!si->err_type) {
2133 si->err_type = SI_ET_CONN_ERR;
2134 }
2135
2136 if (objt_server(s->target))
2137 _HA_ATOMIC_ADD(&objt_server(s->target)->counters.failed_conns, 1);
2138 _HA_ATOMIC_ADD(&s->be->be_counters.failed_conns, 1);
2139 sess_change_server(s, NULL);
2140 if (may_dequeue_tasks(objt_server(s->target), s->be))
2141 process_srv_queue(objt_server(s->target), 0);
2142
2143 /* shutw is enough so stop a connecting socket */
2144 si_shutw(si);
2145 s->req.flags |= CF_WRITE_ERROR;
2146 s->res.flags |= CF_READ_ERROR;
2147
2148 si->state = SI_ST_CLO;
2149 if (s->srv_error)
2150 s->srv_error(s, si);
2151
2152 DBG_TRACE_STATE("connection failed", STRM_EV_STRM_PROC|STRM_EV_SI_ST|STRM_EV_STRM_ERR, s);
2153 goto end;
2154 }
2155
2156 /* At this stage, we will trigger a connection retry (with or without
2157 * redispatch). Thus we must release the SI endpoint on the server side
2158 * an close the attached connection. It is especially important to do it
2159 * now if the retry is not immediately performed, to be sure to release
2160 * ressources as soon as possible and to not catch errors from the lower
2161 * layers in an unexpected state (i.e < ST_CONN).
2162 *
2163 * Note: the stream-interface will be switched to ST_REQ, ST_ASS or
2164 * ST_TAR and SI_FL_ERR and SI_FL_EXP flags will be unset.
2165 */
2166 si_release_endpoint(&s->si[1]);
2167
2168 stream_choose_redispatch(s);
2169
2170 if (si->flags & SI_FL_ERR) {
2171 /* The error was an asynchronous connection error, and we will
2172 * likely have to retry connecting to the same server, most
2173 * likely leading to the same result. To avoid this, we wait
2174 * MIN(one second, connect timeout) before retrying. We don't
2175 * do it when the failure happened on a reused connection
2176 * though.
2177 */
2178
2179 int delay = 1000;
2180
2181 if (s->be->timeout.connect && s->be->timeout.connect < delay)
2182 delay = s->be->timeout.connect;
2183
2184 if (!si->err_type)
2185 si->err_type = SI_ET_CONN_ERR;
2186
2187 /* only wait when we're retrying on the same server */
2188 if ((si->state == SI_ST_ASS ||
2189 (s->be->lbprm.algo & BE_LB_KIND) != BE_LB_KIND_RR ||
2190 (s->be->srv_act <= 1)) && !(s->flags & SF_SRV_REUSED)) {
2191 si->state = SI_ST_TAR;
2192 si->exp = tick_add(now_ms, MS_TO_TICKS(delay));
2193 }
2194 si->flags &= ~SI_FL_ERR;
2195 DBG_TRACE_STATE("retry a new connection", STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
2196 }
2197
2198 end:
2199 DBG_TRACE_LEAVE(STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
2200 }
2201
2202 /* This function is called with (si->state == SI_ST_RDY) meaning that a
2203 * connection was attempted, that the file descriptor is already allocated,
2204 * and that it has succeeded. We must still check for errors and aborts.
2205 * Possible output states are SI_ST_EST (established), SI_ST_CER (error),
2206 * and SI_ST_DIS (abort). This only works with connection-based streams.
2207 * Timeouts and errors are *not* cleared.
2208 */
back_handle_st_rdy(struct stream * s)2209 void back_handle_st_rdy(struct stream *s)
2210 {
2211 struct stream_interface *si = &s->si[1];
2212 struct channel *req = &s->req;
2213 struct channel *rep = &s->res;
2214
2215 DBG_TRACE_ENTER(STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
2216 /* We know the connection at least succeeded, though it could have
2217 * since met an error for any other reason. At least it didn't time out
2218 * eventhough the timeout might have been reported right after success.
2219 * We need to take care of various situations here :
2220 * - everything might be OK. We have to switch to established.
2221 * - an I/O error might have been reported after a successful transfer,
2222 * which is not retryable and needs to be logged correctly, and needs
2223 * established as well
2224 * - SI_ST_CON implies !CF_WROTE_DATA but not conversely as we could
2225 * have validated a connection with incoming data (e.g. TCP with a
2226 * banner protocol), or just a successful connect() probe.
2227 * - the client might have requested a connection abort, this needs to
2228 * be checked before we decide to retry anything.
2229 */
2230
2231 /* it's still possible to handle client aborts or connection retries
2232 * before any data were sent.
2233 */
2234 if (!(req->flags & CF_WROTE_DATA)) {
2235 /* client abort ? */
2236 if ((rep->flags & CF_SHUTW) ||
2237 ((req->flags & CF_SHUTW_NOW) &&
2238 (channel_is_empty(req) || (s->be->options & PR_O_ABRT_CLOSE)))) {
2239 /* give up */
2240 si->flags |= SI_FL_NOLINGER;
2241 si_shutw(si);
2242 si->err_type |= SI_ET_CONN_ABRT;
2243 if (s->srv_error)
2244 s->srv_error(s, si);
2245 DBG_TRACE_STATE("client abort during connection attempt", STRM_EV_STRM_PROC|STRM_EV_SI_ST|STRM_EV_STRM_ERR, s);
2246 goto end;
2247 }
2248
2249 /* retryable error ? */
2250 if (si->flags & SI_FL_ERR) {
2251 if (!si->err_type)
2252 si->err_type = SI_ET_CONN_ERR;
2253 si->state = SI_ST_CER;
2254 DBG_TRACE_STATE("connection failed, retry", STRM_EV_STRM_PROC|STRM_EV_SI_ST|STRM_EV_STRM_ERR, s);
2255 goto end;
2256 }
2257 }
2258
2259 /* data were sent and/or we had no error, back_establish() will
2260 * now take over.
2261 */
2262 DBG_TRACE_STATE("connection established", STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
2263 si->err_type = SI_ET_NONE;
2264 si->state = SI_ST_EST;
2265
2266 end:
2267 DBG_TRACE_LEAVE(STRM_EV_STRM_PROC|STRM_EV_SI_ST, s);
2268 }
2269
2270 /* sends a log message when a backend goes down, and also sets last
2271 * change date.
2272 */
set_backend_down(struct proxy * be)2273 void set_backend_down(struct proxy *be)
2274 {
2275 be->last_change = now.tv_sec;
2276 _HA_ATOMIC_ADD(&be->down_trans, 1);
2277
2278 if (!(global.mode & MODE_STARTING)) {
2279 ha_alert("%s '%s' has no server available!\n", proxy_type_str(be), be->id);
2280 send_log(be, LOG_EMERG, "%s %s has no server available!\n", proxy_type_str(be), be->id);
2281 }
2282 }
2283
2284 /* Apply RDP cookie persistence to the current stream. For this, the function
2285 * tries to extract an RDP cookie from the request buffer, and look for the
2286 * matching server in the list. If the server is found, it is assigned to the
2287 * stream. This always returns 1, and the analyser removes itself from the
2288 * list. Nothing is performed if a server was already assigned.
2289 */
tcp_persist_rdp_cookie(struct stream * s,struct channel * req,int an_bit)2290 int tcp_persist_rdp_cookie(struct stream *s, struct channel *req, int an_bit)
2291 {
2292 struct proxy *px = s->be;
2293 int ret;
2294 struct sample smp;
2295 struct server *srv = px->srv;
2296 uint16_t port;
2297 uint32_t addr;
2298 char *p;
2299
2300 DBG_TRACE_ENTER(STRM_EV_STRM_ANA|STRM_EV_TCP_ANA, s);
2301
2302 if (s->flags & SF_ASSIGNED)
2303 goto no_cookie;
2304
2305 memset(&smp, 0, sizeof(smp));
2306
2307 ret = fetch_rdp_cookie_name(s, &smp, s->be->rdp_cookie_name, s->be->rdp_cookie_len);
2308 if (ret == 0 || (smp.flags & SMP_F_MAY_CHANGE) || smp.data.u.str.data == 0)
2309 goto no_cookie;
2310
2311 /* Considering an rdp cookie detected using acl, str ended with <cr><lf> and should return.
2312 * The cookie format is <ip> "." <port> where "ip" is the integer corresponding to the
2313 * server's IP address in network order, and "port" is the integer corresponding to the
2314 * server's port in network order. Comments please Emeric.
2315 */
2316 addr = strtoul(smp.data.u.str.area, &p, 10);
2317 if (*p != '.')
2318 goto no_cookie;
2319 p++;
2320
2321 port = ntohs(strtoul(p, &p, 10));
2322 if (*p != '.')
2323 goto no_cookie;
2324
2325 s->target = NULL;
2326 while (srv) {
2327 if (srv->addr.ss_family == AF_INET &&
2328 port == srv->svc_port &&
2329 addr == ((struct sockaddr_in *)&srv->addr)->sin_addr.s_addr) {
2330 if ((srv->cur_state != SRV_ST_STOPPED) || (px->options & PR_O_PERSIST)) {
2331 /* we found the server and it is usable */
2332 s->flags |= SF_DIRECT | SF_ASSIGNED;
2333 s->target = &srv->obj_type;
2334 break;
2335 }
2336 }
2337 srv = srv->next;
2338 }
2339
2340 no_cookie:
2341 req->analysers &= ~an_bit;
2342 req->analyse_exp = TICK_ETERNITY;
2343 DBG_TRACE_LEAVE(STRM_EV_STRM_ANA|STRM_EV_TCP_ANA, s);
2344 return 1;
2345 }
2346
be_downtime(struct proxy * px)2347 int be_downtime(struct proxy *px) {
2348 if (px->lbprm.tot_weight && px->last_change < now.tv_sec) // ignore negative time
2349 return px->down_time;
2350
2351 return now.tv_sec - px->last_change + px->down_time;
2352 }
2353
2354 /*
2355 * This function returns a string containing the balancing
2356 * mode of the proxy in a format suitable for stats.
2357 */
2358
backend_lb_algo_str(int algo)2359 const char *backend_lb_algo_str(int algo) {
2360
2361 if (algo == BE_LB_ALGO_RR)
2362 return "roundrobin";
2363 else if (algo == BE_LB_ALGO_SRR)
2364 return "static-rr";
2365 else if (algo == BE_LB_ALGO_FAS)
2366 return "first";
2367 else if (algo == BE_LB_ALGO_LC)
2368 return "leastconn";
2369 else if (algo == BE_LB_ALGO_SH)
2370 return "source";
2371 else if (algo == BE_LB_ALGO_UH)
2372 return "uri";
2373 else if (algo == BE_LB_ALGO_PH)
2374 return "url_param";
2375 else if (algo == BE_LB_ALGO_HH)
2376 return "hdr";
2377 else if (algo == BE_LB_ALGO_RCH)
2378 return "rdp-cookie";
2379 else if (algo == BE_LB_ALGO_NONE)
2380 return "none";
2381 else
2382 return "unknown";
2383 }
2384
2385 /* This function parses a "balance" statement in a backend section describing
2386 * <curproxy>. It returns -1 if there is any error, otherwise zero. If it
2387 * returns -1, it will write an error message into the <err> buffer which will
2388 * automatically be allocated and must be passed as NULL. The trailing '\n'
2389 * will not be written. The function must be called with <args> pointing to the
2390 * first word after "balance".
2391 */
backend_parse_balance(const char ** args,char ** err,struct proxy * curproxy)2392 int backend_parse_balance(const char **args, char **err, struct proxy *curproxy)
2393 {
2394 if (!*(args[0])) {
2395 /* if no option is set, use round-robin by default */
2396 curproxy->lbprm.algo &= ~BE_LB_ALGO;
2397 curproxy->lbprm.algo |= BE_LB_ALGO_RR;
2398 return 0;
2399 }
2400
2401 if (!strcmp(args[0], "roundrobin")) {
2402 curproxy->lbprm.algo &= ~BE_LB_ALGO;
2403 curproxy->lbprm.algo |= BE_LB_ALGO_RR;
2404 }
2405 else if (!strcmp(args[0], "static-rr")) {
2406 curproxy->lbprm.algo &= ~BE_LB_ALGO;
2407 curproxy->lbprm.algo |= BE_LB_ALGO_SRR;
2408 }
2409 else if (!strcmp(args[0], "first")) {
2410 curproxy->lbprm.algo &= ~BE_LB_ALGO;
2411 curproxy->lbprm.algo |= BE_LB_ALGO_FAS;
2412 }
2413 else if (!strcmp(args[0], "leastconn")) {
2414 curproxy->lbprm.algo &= ~BE_LB_ALGO;
2415 curproxy->lbprm.algo |= BE_LB_ALGO_LC;
2416 }
2417 else if (!strncmp(args[0], "random", 6)) {
2418 curproxy->lbprm.algo &= ~BE_LB_ALGO;
2419 curproxy->lbprm.algo |= BE_LB_ALGO_RND;
2420 curproxy->lbprm.arg_opt1 = 2;
2421
2422 if (*(args[0] + 6) == '(' && *(args[0] + 7) != ')') { /* number of draws */
2423 const char *beg;
2424 char *end;
2425
2426 beg = args[0] + 7;
2427 curproxy->lbprm.arg_opt1 = strtol(beg, &end, 0);
2428
2429 if (*end != ')') {
2430 if (!*end)
2431 memprintf(err, "random : missing closing parenthesis.");
2432 else
2433 memprintf(err, "random : unexpected character '%c' after argument.", *end);
2434 return -1;
2435 }
2436
2437 if (curproxy->lbprm.arg_opt1 < 1) {
2438 memprintf(err, "random : number of draws must be at least 1.");
2439 return -1;
2440 }
2441 }
2442 }
2443 else if (!strcmp(args[0], "source")) {
2444 curproxy->lbprm.algo &= ~BE_LB_ALGO;
2445 curproxy->lbprm.algo |= BE_LB_ALGO_SH;
2446 }
2447 else if (!strcmp(args[0], "uri")) {
2448 int arg = 1;
2449
2450 curproxy->lbprm.algo &= ~BE_LB_ALGO;
2451 curproxy->lbprm.algo |= BE_LB_ALGO_UH;
2452 curproxy->lbprm.arg_opt1 = 0; // "whole", "path-only"
2453 curproxy->lbprm.arg_opt2 = 0; // "len"
2454 curproxy->lbprm.arg_opt3 = 0; // "depth"
2455
2456 while (*args[arg]) {
2457 if (!strcmp(args[arg], "len")) {
2458 if (!*args[arg+1] || (atoi(args[arg+1]) <= 0)) {
2459 memprintf(err, "%s : '%s' expects a positive integer (got '%s').", args[0], args[arg], args[arg+1]);
2460 return -1;
2461 }
2462 curproxy->lbprm.arg_opt2 = atoi(args[arg+1]);
2463 arg += 2;
2464 }
2465 else if (!strcmp(args[arg], "depth")) {
2466 if (!*args[arg+1] || (atoi(args[arg+1]) <= 0)) {
2467 memprintf(err, "%s : '%s' expects a positive integer (got '%s').", args[0], args[arg], args[arg+1]);
2468 return -1;
2469 }
2470 /* hint: we store the position of the ending '/' (depth+1) so
2471 * that we avoid a comparison while computing the hash.
2472 */
2473 curproxy->lbprm.arg_opt3 = atoi(args[arg+1]) + 1;
2474 arg += 2;
2475 }
2476 else if (!strcmp(args[arg], "whole")) {
2477 curproxy->lbprm.arg_opt1 |= 1;
2478 arg += 1;
2479 }
2480 else if (!strcmp(args[arg], "path-only")) {
2481 curproxy->lbprm.arg_opt1 |= 2;
2482 arg += 1;
2483 }
2484 else {
2485 memprintf(err, "%s only accepts parameters 'len', 'depth', 'path-only', and 'whole' (got '%s').", args[0], args[arg]);
2486 return -1;
2487 }
2488 }
2489 }
2490 else if (!strcmp(args[0], "url_param")) {
2491 if (!*args[1]) {
2492 memprintf(err, "%s requires an URL parameter name.", args[0]);
2493 return -1;
2494 }
2495 curproxy->lbprm.algo &= ~BE_LB_ALGO;
2496 curproxy->lbprm.algo |= BE_LB_ALGO_PH;
2497
2498 free(curproxy->lbprm.arg_str);
2499 curproxy->lbprm.arg_str = strdup(args[1]);
2500 curproxy->lbprm.arg_len = strlen(args[1]);
2501 if (*args[2]) {
2502 if (strcmp(args[2], "check_post")) {
2503 memprintf(err, "%s only accepts 'check_post' modifier (got '%s').", args[0], args[2]);
2504 return -1;
2505 }
2506 }
2507 }
2508 else if (!strncmp(args[0], "hdr(", 4)) {
2509 const char *beg, *end;
2510
2511 beg = args[0] + 4;
2512 end = strchr(beg, ')');
2513
2514 if (!end || end == beg) {
2515 memprintf(err, "hdr requires an http header field name.");
2516 return -1;
2517 }
2518
2519 curproxy->lbprm.algo &= ~BE_LB_ALGO;
2520 curproxy->lbprm.algo |= BE_LB_ALGO_HH;
2521
2522 free(curproxy->lbprm.arg_str);
2523 curproxy->lbprm.arg_len = end - beg;
2524 curproxy->lbprm.arg_str = my_strndup(beg, end - beg);
2525 curproxy->lbprm.arg_opt1 = 0;
2526
2527 if (*args[1]) {
2528 if (strcmp(args[1], "use_domain_only")) {
2529 memprintf(err, "%s only accepts 'use_domain_only' modifier (got '%s').", args[0], args[1]);
2530 return -1;
2531 }
2532 curproxy->lbprm.arg_opt1 = 1;
2533 }
2534 }
2535 else if (!strncmp(args[0], "rdp-cookie", 10)) {
2536 curproxy->lbprm.algo &= ~BE_LB_ALGO;
2537 curproxy->lbprm.algo |= BE_LB_ALGO_RCH;
2538
2539 if ( *(args[0] + 10 ) == '(' ) { /* cookie name */
2540 const char *beg, *end;
2541
2542 beg = args[0] + 11;
2543 end = strchr(beg, ')');
2544
2545 if (!end || end == beg) {
2546 memprintf(err, "rdp-cookie : missing cookie name.");
2547 return -1;
2548 }
2549
2550 free(curproxy->lbprm.arg_str);
2551 curproxy->lbprm.arg_str = my_strndup(beg, end - beg);
2552 curproxy->lbprm.arg_len = end - beg;
2553 }
2554 else if ( *(args[0] + 10 ) == '\0' ) { /* default cookie name 'mstshash' */
2555 free(curproxy->lbprm.arg_str);
2556 curproxy->lbprm.arg_str = strdup("mstshash");
2557 curproxy->lbprm.arg_len = strlen(curproxy->lbprm.arg_str);
2558 }
2559 else { /* syntax */
2560 memprintf(err, "rdp-cookie : missing cookie name.");
2561 return -1;
2562 }
2563 }
2564 else {
2565 memprintf(err, "only supports 'roundrobin', 'static-rr', 'leastconn', 'source', 'uri', 'url_param', 'hdr(name)' and 'rdp-cookie(name)' options.");
2566 return -1;
2567 }
2568 return 0;
2569 }
2570
2571
2572 /************************************************************************/
2573 /* All supported sample and ACL keywords must be declared here. */
2574 /************************************************************************/
2575
2576 /* set temp integer to the number of enabled servers on the proxy.
2577 * Accepts exactly 1 argument. Argument is a backend, other types will lead to
2578 * undefined behaviour.
2579 */
2580 static int
smp_fetch_nbsrv(const struct arg * args,struct sample * smp,const char * kw,void * private)2581 smp_fetch_nbsrv(const struct arg *args, struct sample *smp, const char *kw, void *private)
2582 {
2583 struct proxy *px;
2584
2585 smp->flags = SMP_F_VOL_TEST;
2586 smp->data.type = SMP_T_SINT;
2587 px = args->data.prx;
2588
2589 smp->data.u.sint = be_usable_srv(px);
2590
2591 return 1;
2592 }
2593
2594 /* report in smp->flags a success or failure depending on the designated
2595 * server's state. There is no match function involved since there's no pattern.
2596 * Accepts exactly 1 argument. Argument is a server, other types will lead to
2597 * undefined behaviour.
2598 */
2599 static int
smp_fetch_srv_is_up(const struct arg * args,struct sample * smp,const char * kw,void * private)2600 smp_fetch_srv_is_up(const struct arg *args, struct sample *smp, const char *kw, void *private)
2601 {
2602 struct server *srv = args->data.srv;
2603
2604 smp->flags = SMP_F_VOL_TEST;
2605 smp->data.type = SMP_T_BOOL;
2606 if (!(srv->cur_admin & SRV_ADMF_MAINT) &&
2607 (!(srv->check.state & CHK_ST_CONFIGURED) || (srv->cur_state != SRV_ST_STOPPED)))
2608 smp->data.u.sint = 1;
2609 else
2610 smp->data.u.sint = 0;
2611 return 1;
2612 }
2613
2614 /* set temp integer to the number of enabled servers on the proxy.
2615 * Accepts exactly 1 argument. Argument is a backend, other types will lead to
2616 * undefined behaviour.
2617 */
2618 static int
smp_fetch_connslots(const struct arg * args,struct sample * smp,const char * kw,void * private)2619 smp_fetch_connslots(const struct arg *args, struct sample *smp, const char *kw, void *private)
2620 {
2621 struct server *iterator;
2622
2623 smp->flags = SMP_F_VOL_TEST;
2624 smp->data.type = SMP_T_SINT;
2625 smp->data.u.sint = 0;
2626
2627 for (iterator = args->data.prx->srv; iterator; iterator = iterator->next) {
2628 if (iterator->cur_state == SRV_ST_STOPPED)
2629 continue;
2630
2631 if (iterator->maxconn == 0 || iterator->maxqueue == 0) {
2632 /* configuration is stupid */
2633 smp->data.u.sint = -1; /* FIXME: stupid value! */
2634 return 1;
2635 }
2636
2637 smp->data.u.sint += (iterator->maxconn - iterator->cur_sess)
2638 + (iterator->maxqueue - iterator->nbpend);
2639 }
2640
2641 return 1;
2642 }
2643
2644 /* set temp integer to the id of the backend */
2645 static int
smp_fetch_be_id(const struct arg * args,struct sample * smp,const char * kw,void * private)2646 smp_fetch_be_id(const struct arg *args, struct sample *smp, const char *kw, void *private)
2647 {
2648 struct proxy *px = NULL;
2649
2650 if (smp->strm)
2651 px = smp->strm->be;
2652 else if (obj_type(smp->sess->origin) == OBJ_TYPE_CHECK)
2653 px = __objt_check(smp->sess->origin)->proxy;
2654 if (!px)
2655 return 0;
2656
2657 smp->flags = SMP_F_VOL_TXN;
2658 smp->data.type = SMP_T_SINT;
2659 smp->data.u.sint = px->uuid;
2660 return 1;
2661 }
2662
2663 /* set string to the name of the backend */
2664 static int
smp_fetch_be_name(const struct arg * args,struct sample * smp,const char * kw,void * private)2665 smp_fetch_be_name(const struct arg *args, struct sample *smp, const char *kw, void *private)
2666 {
2667 struct proxy *px = NULL;
2668
2669 if (smp->strm)
2670 px = smp->strm->be;
2671 else if (obj_type(smp->sess->origin) == OBJ_TYPE_CHECK)
2672 px = __objt_check(smp->sess->origin)->proxy;
2673 if (!px)
2674 return 0;
2675
2676 smp->data.u.str.area = (char *)px->id;
2677 if (!smp->data.u.str.area)
2678 return 0;
2679
2680 smp->data.type = SMP_T_STR;
2681 smp->flags = SMP_F_CONST;
2682 smp->data.u.str.data = strlen(smp->data.u.str.area);
2683
2684 return 1;
2685 }
2686
2687 /* set temp integer to the id of the server */
2688 static int
smp_fetch_srv_id(const struct arg * args,struct sample * smp,const char * kw,void * private)2689 smp_fetch_srv_id(const struct arg *args, struct sample *smp, const char *kw, void *private)
2690 {
2691 struct server *srv = NULL;
2692
2693 if (smp->strm)
2694 srv = objt_server(smp->strm->target);
2695 else if (obj_type(smp->sess->origin) == OBJ_TYPE_CHECK)
2696 srv = __objt_check(smp->sess->origin)->server;
2697 if (!srv)
2698 return 0;
2699
2700 smp->data.type = SMP_T_SINT;
2701 smp->data.u.sint = srv->puid;
2702
2703 return 1;
2704 }
2705
2706 /* set string to the name of the server */
2707 static int
smp_fetch_srv_name(const struct arg * args,struct sample * smp,const char * kw,void * private)2708 smp_fetch_srv_name(const struct arg *args, struct sample *smp, const char *kw, void *private)
2709 {
2710 struct server *srv = NULL;
2711
2712 if (smp->strm)
2713 srv = objt_server(smp->strm->target);
2714 else if (obj_type(smp->sess->origin) == OBJ_TYPE_CHECK)
2715 srv = __objt_check(smp->sess->origin)->server;
2716 if (!srv)
2717 return 0;
2718
2719 smp->data.u.str.area = srv->id;
2720 if (!smp->data.u.str.area)
2721 return 0;
2722
2723 smp->data.type = SMP_T_STR;
2724 smp->data.u.str.data = strlen(smp->data.u.str.area);
2725
2726 return 1;
2727 }
2728
2729 /* set temp integer to the number of connections per second reaching the backend.
2730 * Accepts exactly 1 argument. Argument is a backend, other types will lead to
2731 * undefined behaviour.
2732 */
2733 static int
smp_fetch_be_sess_rate(const struct arg * args,struct sample * smp,const char * kw,void * private)2734 smp_fetch_be_sess_rate(const struct arg *args, struct sample *smp, const char *kw, void *private)
2735 {
2736 smp->flags = SMP_F_VOL_TEST;
2737 smp->data.type = SMP_T_SINT;
2738 smp->data.u.sint = read_freq_ctr(&args->data.prx->be_sess_per_sec);
2739 return 1;
2740 }
2741
2742 /* set temp integer to the number of concurrent connections on the backend.
2743 * Accepts exactly 1 argument. Argument is a backend, other types will lead to
2744 * undefined behaviour.
2745 */
2746 static int
smp_fetch_be_conn(const struct arg * args,struct sample * smp,const char * kw,void * private)2747 smp_fetch_be_conn(const struct arg *args, struct sample *smp, const char *kw, void *private)
2748 {
2749 smp->flags = SMP_F_VOL_TEST;
2750 smp->data.type = SMP_T_SINT;
2751 smp->data.u.sint = args->data.prx->beconn;
2752 return 1;
2753 }
2754
2755 /* set temp integer to the number of available connections across available
2756 * servers on the backend.
2757 * Accepts exactly 1 argument. Argument is a backend, other types will lead to
2758 * undefined behaviour.
2759 */
2760 static int
smp_fetch_be_conn_free(const struct arg * args,struct sample * smp,const char * kw,void * private)2761 smp_fetch_be_conn_free(const struct arg *args, struct sample *smp, const char *kw, void *private)
2762 {
2763 struct server *iterator;
2764 struct proxy *px;
2765 unsigned int maxconn;
2766
2767 smp->flags = SMP_F_VOL_TEST;
2768 smp->data.type = SMP_T_SINT;
2769 smp->data.u.sint = 0;
2770
2771 for (iterator = args->data.prx->srv; iterator; iterator = iterator->next) {
2772 if (iterator->cur_state == SRV_ST_STOPPED)
2773 continue;
2774
2775 px = iterator->proxy;
2776 if (!srv_currently_usable(iterator) ||
2777 ((iterator->flags & SRV_F_BACKUP) &&
2778 (px->srv_act || (iterator != px->lbprm.fbck && !(px->options & PR_O_USE_ALL_BK)))))
2779 continue;
2780
2781 if (iterator->maxconn == 0) {
2782 /* one active server is unlimited, return -1 */
2783 smp->data.u.sint = -1;
2784 return 1;
2785 }
2786
2787 maxconn = srv_dynamic_maxconn(iterator);
2788 if (maxconn > iterator->cur_sess)
2789 smp->data.u.sint += maxconn - iterator->cur_sess;
2790 }
2791
2792 return 1;
2793 }
2794
2795 /* set temp integer to the total number of queued connections on the backend.
2796 * Accepts exactly 1 argument. Argument is a backend, other types will lead to
2797 * undefined behaviour.
2798 */
2799 static int
smp_fetch_queue_size(const struct arg * args,struct sample * smp,const char * kw,void * private)2800 smp_fetch_queue_size(const struct arg *args, struct sample *smp, const char *kw, void *private)
2801 {
2802 smp->flags = SMP_F_VOL_TEST;
2803 smp->data.type = SMP_T_SINT;
2804 smp->data.u.sint = args->data.prx->totpend;
2805 return 1;
2806 }
2807
2808 /* set temp integer to the total number of queued connections on the backend divided
2809 * by the number of running servers and rounded up. If there is no running
2810 * server, we return twice the total, just as if we had half a running server.
2811 * This is more or less correct anyway, since we expect the last server to come
2812 * back soon.
2813 * Accepts exactly 1 argument. Argument is a backend, other types will lead to
2814 * undefined behaviour.
2815 */
2816 static int
smp_fetch_avg_queue_size(const struct arg * args,struct sample * smp,const char * kw,void * private)2817 smp_fetch_avg_queue_size(const struct arg *args, struct sample *smp, const char *kw, void *private)
2818 {
2819 int nbsrv;
2820 struct proxy *px;
2821
2822 smp->flags = SMP_F_VOL_TEST;
2823 smp->data.type = SMP_T_SINT;
2824 px = args->data.prx;
2825
2826 nbsrv = be_usable_srv(px);
2827
2828 if (nbsrv > 0)
2829 smp->data.u.sint = (px->totpend + nbsrv - 1) / nbsrv;
2830 else
2831 smp->data.u.sint = px->totpend * 2;
2832
2833 return 1;
2834 }
2835
2836 /* set temp integer to the number of concurrent connections on the server in the backend.
2837 * Accepts exactly 1 argument. Argument is a server, other types will lead to
2838 * undefined behaviour.
2839 */
2840 static int
smp_fetch_srv_conn(const struct arg * args,struct sample * smp,const char * kw,void * private)2841 smp_fetch_srv_conn(const struct arg *args, struct sample *smp, const char *kw, void *private)
2842 {
2843 smp->flags = SMP_F_VOL_TEST;
2844 smp->data.type = SMP_T_SINT;
2845 smp->data.u.sint = args->data.srv->cur_sess;
2846 return 1;
2847 }
2848
2849 /* set temp integer to the number of available connections on the server in the backend.
2850 * Accepts exactly 1 argument. Argument is a server, other types will lead to
2851 * undefined behaviour.
2852 */
2853 static int
smp_fetch_srv_conn_free(const struct arg * args,struct sample * smp,const char * kw,void * private)2854 smp_fetch_srv_conn_free(const struct arg *args, struct sample *smp, const char *kw, void *private)
2855 {
2856 unsigned int maxconn;
2857
2858 smp->flags = SMP_F_VOL_TEST;
2859 smp->data.type = SMP_T_SINT;
2860
2861 if (args->data.srv->maxconn == 0) {
2862 /* one active server is unlimited, return -1 */
2863 smp->data.u.sint = -1;
2864 return 1;
2865 }
2866
2867 maxconn = srv_dynamic_maxconn(args->data.srv);
2868 if (maxconn > args->data.srv->cur_sess)
2869 smp->data.u.sint = maxconn - args->data.srv->cur_sess;
2870 else
2871 smp->data.u.sint = 0;
2872
2873 return 1;
2874 }
2875
2876 /* set temp integer to the number of connections pending in the server's queue.
2877 * Accepts exactly 1 argument. Argument is a server, other types will lead to
2878 * undefined behaviour.
2879 */
2880 static int
smp_fetch_srv_queue(const struct arg * args,struct sample * smp,const char * kw,void * private)2881 smp_fetch_srv_queue(const struct arg *args, struct sample *smp, const char *kw, void *private)
2882 {
2883 smp->flags = SMP_F_VOL_TEST;
2884 smp->data.type = SMP_T_SINT;
2885 smp->data.u.sint = args->data.srv->nbpend;
2886 return 1;
2887 }
2888
2889 /* set temp integer to the number of enabled servers on the proxy.
2890 * Accepts exactly 1 argument. Argument is a server, other types will lead to
2891 * undefined behaviour.
2892 */
2893 static int
smp_fetch_srv_sess_rate(const struct arg * args,struct sample * smp,const char * kw,void * private)2894 smp_fetch_srv_sess_rate(const struct arg *args, struct sample *smp, const char *kw, void *private)
2895 {
2896 smp->flags = SMP_F_VOL_TEST;
2897 smp->data.type = SMP_T_SINT;
2898 smp->data.u.sint = read_freq_ctr(&args->data.srv->sess_per_sec);
2899 return 1;
2900 }
2901
2902 /* set temp integer to the server weight.
2903 * Accepts exactly 1 argument. Argument is a server, other types will lead to
2904 * undefined behaviour.
2905 */
2906 static int
smp_fetch_srv_weight(const struct arg * args,struct sample * smp,const char * kw,void * private)2907 smp_fetch_srv_weight(const struct arg *args, struct sample *smp, const char *kw, void *private)
2908 {
2909 struct server *srv = args->data.srv;
2910 struct proxy *px = srv->proxy;
2911
2912 smp->flags = SMP_F_VOL_TEST;
2913 smp->data.type = SMP_T_SINT;
2914 smp->data.u.sint = (srv->cur_eweight * px->lbprm.wmult + px->lbprm.wdiv - 1) / px->lbprm.wdiv;
2915 return 1;
2916 }
2917
2918 /* set temp integer to the server initial weight.
2919 * Accepts exactly 1 argument. Argument is a server, other types will lead to
2920 * undefined behaviour.
2921 */
2922 static int
smp_fetch_srv_iweight(const struct arg * args,struct sample * smp,const char * kw,void * private)2923 smp_fetch_srv_iweight(const struct arg *args, struct sample *smp, const char *kw, void *private)
2924 {
2925 smp->flags = SMP_F_VOL_TEST;
2926 smp->data.type = SMP_T_SINT;
2927 smp->data.u.sint = args->data.srv->iweight;
2928 return 1;
2929 }
2930
2931 /* set temp integer to the server user-specified weight.
2932 * Accepts exactly 1 argument. Argument is a server, other types will lead to
2933 * undefined behaviour.
2934 */
2935 static int
smp_fetch_srv_uweight(const struct arg * args,struct sample * smp,const char * kw,void * private)2936 smp_fetch_srv_uweight(const struct arg *args, struct sample *smp, const char *kw, void *private)
2937 {
2938 smp->flags = SMP_F_VOL_TEST;
2939 smp->data.type = SMP_T_SINT;
2940 smp->data.u.sint = args->data.srv->uweight;
2941 return 1;
2942 }
2943
sample_conv_nbsrv(const struct arg * args,struct sample * smp,void * private)2944 static int sample_conv_nbsrv(const struct arg *args, struct sample *smp, void *private)
2945 {
2946
2947 struct proxy *px;
2948
2949 if (!smp_make_safe(smp))
2950 return 0;
2951
2952 px = proxy_find_by_name(smp->data.u.str.area, PR_CAP_BE, 0);
2953 if (!px)
2954 return 0;
2955
2956 smp->data.type = SMP_T_SINT;
2957 smp->data.u.sint = be_usable_srv(px);
2958
2959 return 1;
2960 }
2961
2962 static int
sample_conv_srv_queue(const struct arg * args,struct sample * smp,void * private)2963 sample_conv_srv_queue(const struct arg *args, struct sample *smp, void *private)
2964 {
2965 struct proxy *px;
2966 struct server *srv;
2967 char *bksep;
2968
2969 if (!smp_make_safe(smp))
2970 return 0;
2971
2972 bksep = strchr(smp->data.u.str.area, '/');
2973
2974 if (bksep) {
2975 *bksep = '\0';
2976 px = proxy_find_by_name(smp->data.u.str.area, PR_CAP_BE, 0);
2977 if (!px)
2978 return 0;
2979 smp->data.u.str.area = bksep + 1;
2980 } else {
2981 if (!(smp->px->cap & PR_CAP_BE))
2982 return 0;
2983 px = smp->px;
2984 }
2985
2986 srv = server_find_by_name(px, smp->data.u.str.area);
2987 if (!srv)
2988 return 0;
2989
2990 smp->data.type = SMP_T_SINT;
2991 smp->data.u.sint = srv->nbpend;
2992 return 1;
2993 }
2994
2995 /* Note: must not be declared <const> as its list will be overwritten.
2996 * Please take care of keeping this list alphabetically sorted.
2997 */
2998 static struct sample_fetch_kw_list smp_kws = {ILH, {
2999 { "avg_queue", smp_fetch_avg_queue_size, ARG1(1,BE), NULL, SMP_T_SINT, SMP_USE_INTRN, },
3000 { "be_conn", smp_fetch_be_conn, ARG1(1,BE), NULL, SMP_T_SINT, SMP_USE_INTRN, },
3001 { "be_conn_free", smp_fetch_be_conn_free, ARG1(1,BE), NULL, SMP_T_SINT, SMP_USE_INTRN, },
3002 { "be_id", smp_fetch_be_id, 0, NULL, SMP_T_SINT, SMP_USE_BKEND, },
3003 { "be_name", smp_fetch_be_name, 0, NULL, SMP_T_STR, SMP_USE_BKEND, },
3004 { "be_sess_rate", smp_fetch_be_sess_rate, ARG1(1,BE), NULL, SMP_T_SINT, SMP_USE_INTRN, },
3005 { "connslots", smp_fetch_connslots, ARG1(1,BE), NULL, SMP_T_SINT, SMP_USE_INTRN, },
3006 { "nbsrv", smp_fetch_nbsrv, ARG1(1,BE), NULL, SMP_T_SINT, SMP_USE_INTRN, },
3007 { "queue", smp_fetch_queue_size, ARG1(1,BE), NULL, SMP_T_SINT, SMP_USE_INTRN, },
3008 { "srv_conn", smp_fetch_srv_conn, ARG1(1,SRV), NULL, SMP_T_SINT, SMP_USE_INTRN, },
3009 { "srv_conn_free", smp_fetch_srv_conn_free, ARG1(1,SRV), NULL, SMP_T_SINT, SMP_USE_INTRN, },
3010 { "srv_id", smp_fetch_srv_id, 0, NULL, SMP_T_SINT, SMP_USE_SERVR, },
3011 { "srv_is_up", smp_fetch_srv_is_up, ARG1(1,SRV), NULL, SMP_T_BOOL, SMP_USE_INTRN, },
3012 { "srv_name", smp_fetch_srv_name, 0, NULL, SMP_T_STR, SMP_USE_SERVR, },
3013 { "srv_queue", smp_fetch_srv_queue, ARG1(1,SRV), NULL, SMP_T_SINT, SMP_USE_INTRN, },
3014 { "srv_sess_rate", smp_fetch_srv_sess_rate, ARG1(1,SRV), NULL, SMP_T_SINT, SMP_USE_INTRN, },
3015 { "srv_weight", smp_fetch_srv_weight, ARG1(1,SRV), NULL, SMP_T_SINT, SMP_USE_INTRN, },
3016 { "srv_iweight", smp_fetch_srv_iweight, ARG1(1,SRV), NULL, SMP_T_SINT, SMP_USE_INTRN, },
3017 { "srv_uweight", smp_fetch_srv_uweight, ARG1(1,SRV), NULL, SMP_T_SINT, SMP_USE_INTRN, },
3018 { /* END */ },
3019 }};
3020
3021 INITCALL1(STG_REGISTER, sample_register_fetches, &smp_kws);
3022
3023 /* Note: must not be declared <const> as its list will be overwritten */
3024 static struct sample_conv_kw_list sample_conv_kws = {ILH, {
3025 { "nbsrv", sample_conv_nbsrv, 0, NULL, SMP_T_STR, SMP_T_SINT },
3026 { "srv_queue", sample_conv_srv_queue, 0, NULL, SMP_T_STR, SMP_T_SINT },
3027 { /* END */ },
3028 }};
3029
3030 INITCALL1(STG_REGISTER, sample_register_convs, &sample_conv_kws);
3031
3032 /* Note: must not be declared <const> as its list will be overwritten.
3033 * Please take care of keeping this list alphabetically sorted.
3034 */
3035 static struct acl_kw_list acl_kws = {ILH, {
3036 { /* END */ },
3037 }};
3038
3039 INITCALL1(STG_REGISTER, acl_register_keywords, &acl_kws);
3040
3041 /*
3042 * Local variables:
3043 * c-indent-level: 8
3044 * c-basic-offset: 8
3045 * End:
3046 */
3047