1 /*
2 * Backend variables and functions.
3 *
4 * Copyright 2000-2013 Willy Tarreau <w@1wt.eu>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13 #include <errno.h>
14 #include <fcntl.h>
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <syslog.h>
18 #include <string.h>
19 #include <ctype.h>
20 #include <sys/types.h>
21
22 #include <common/buffer.h>
23 #include <common/compat.h>
24 #include <common/config.h>
25 #include <common/debug.h>
26 #include <common/hash.h>
27 #include <common/htx.h>
28 #include <common/initcall.h>
29 #include <common/ticks.h>
30 #include <common/time.h>
31 #include <common/namespace.h>
32
33 #include <types/global.h>
34
35 #include <proto/acl.h>
36 #include <proto/arg.h>
37 #include <proto/backend.h>
38 #include <proto/channel.h>
39 #include <proto/frontend.h>
40 #include <proto/http_htx.h>
41 #include <proto/lb_chash.h>
42 #include <proto/lb_fas.h>
43 #include <proto/lb_fwlc.h>
44 #include <proto/lb_fwrr.h>
45 #include <proto/lb_map.h>
46 #include <proto/log.h>
47 #include <proto/mux_pt.h>
48 #include <proto/obj_type.h>
49 #include <proto/payload.h>
50 #include <proto/protocol.h>
51 #include <proto/http_ana.h>
52 #include <proto/proto_tcp.h>
53 #include <proto/proxy.h>
54 #include <proto/queue.h>
55 #include <proto/sample.h>
56 #include <proto/server.h>
57 #include <proto/session.h>
58 #include <proto/stream.h>
59 #include <proto/stream_interface.h>
60 #include <proto/ssl_sock.h>
61 #include <proto/task.h>
62
63 #define TRACE_SOURCE &trace_strm
64
be_lastsession(const struct proxy * be)65 int be_lastsession(const struct proxy *be)
66 {
67 if (be->be_counters.last_sess)
68 return now.tv_sec - be->be_counters.last_sess;
69
70 return -1;
71 }
72
73 /* helper function to invoke the correct hash method */
gen_hash(const struct proxy * px,const char * key,unsigned long len)74 static unsigned int gen_hash(const struct proxy* px, const char* key, unsigned long len)
75 {
76 unsigned int hash;
77
78 switch (px->lbprm.algo & BE_LB_HASH_FUNC) {
79 case BE_LB_HFCN_DJB2:
80 hash = hash_djb2(key, len);
81 break;
82 case BE_LB_HFCN_WT6:
83 hash = hash_wt6(key, len);
84 break;
85 case BE_LB_HFCN_CRC32:
86 hash = hash_crc32(key, len);
87 break;
88 case BE_LB_HFCN_SDBM:
89 /* this is the default hash function */
90 default:
91 hash = hash_sdbm(key, len);
92 break;
93 }
94
95 return hash;
96 }
97
98 /*
99 * This function recounts the number of usable active and backup servers for
100 * proxy <p>. These numbers are returned into the p->srv_act and p->srv_bck.
101 * This function also recomputes the total active and backup weights. However,
102 * it does not update tot_weight nor tot_used. Use update_backend_weight() for
103 * this.
104 * This functions is designed to be called before server's weight and state
105 * commit so it uses 'next' weight and states values.
106 *
107 * threads: this is the caller responsibility to lock data. For now, this
108 * function is called from lb modules, so it should be ok. But if you need to
109 * call it from another place, be careful (and update this comment).
110 */
recount_servers(struct proxy * px)111 void recount_servers(struct proxy *px)
112 {
113 struct server *srv;
114
115 px->srv_act = px->srv_bck = 0;
116 px->lbprm.tot_wact = px->lbprm.tot_wbck = 0;
117 px->lbprm.fbck = NULL;
118 for (srv = px->srv; srv != NULL; srv = srv->next) {
119 if (!srv_willbe_usable(srv))
120 continue;
121
122 if (srv->flags & SRV_F_BACKUP) {
123 if (!px->srv_bck &&
124 !(px->options & PR_O_USE_ALL_BK))
125 px->lbprm.fbck = srv;
126 px->srv_bck++;
127 srv->cumulative_weight = px->lbprm.tot_wbck;
128 px->lbprm.tot_wbck += srv->next_eweight;
129 } else {
130 px->srv_act++;
131 srv->cumulative_weight = px->lbprm.tot_wact;
132 px->lbprm.tot_wact += srv->next_eweight;
133 }
134 }
135 }
136
137 /* This function simply updates the backend's tot_weight and tot_used values
138 * after servers weights have been updated. It is designed to be used after
139 * recount_servers() or equivalent.
140 *
141 * threads: this is the caller responsibility to lock data. For now, this
142 * function is called from lb modules, so it should be ok. But if you need to
143 * call it from another place, be careful (and update this comment).
144 */
update_backend_weight(struct proxy * px)145 void update_backend_weight(struct proxy *px)
146 {
147 if (px->srv_act) {
148 px->lbprm.tot_weight = px->lbprm.tot_wact;
149 px->lbprm.tot_used = px->srv_act;
150 }
151 else if (px->lbprm.fbck) {
152 /* use only the first backup server */
153 px->lbprm.tot_weight = px->lbprm.fbck->next_eweight;
154 px->lbprm.tot_used = 1;
155 }
156 else {
157 px->lbprm.tot_weight = px->lbprm.tot_wbck;
158 px->lbprm.tot_used = px->srv_bck;
159 }
160 }
161
162 /*
163 * This function tries to find a running server for the proxy <px> following
164 * the source hash method. Depending on the number of active/backup servers,
165 * it will either look for active servers, or for backup servers.
166 * If any server is found, it will be returned. If no valid server is found,
167 * NULL is returned.
168 */
get_server_sh(struct proxy * px,const char * addr,int len,const struct server * avoid)169 static struct server *get_server_sh(struct proxy *px, const char *addr, int len, const struct server *avoid)
170 {
171 unsigned int h, l;
172
173 if (px->lbprm.tot_weight == 0)
174 return NULL;
175
176 l = h = 0;
177
178 /* note: we won't hash if there's only one server left */
179 if (px->lbprm.tot_used == 1)
180 goto hash_done;
181
182 while ((l + sizeof (int)) <= len) {
183 h ^= ntohl(*(unsigned int *)(&addr[l]));
184 l += sizeof (int);
185 }
186 if ((px->lbprm.algo & BE_LB_HASH_MOD) == BE_LB_HMOD_AVAL)
187 h = full_hash(h);
188 hash_done:
189 if ((px->lbprm.algo & BE_LB_LKUP) == BE_LB_LKUP_CHTREE)
190 return chash_get_server_hash(px, h, avoid);
191 else
192 return map_get_server_hash(px, h);
193 }
194
195 /*
196 * This function tries to find a running server for the proxy <px> following
197 * the URI hash method. In order to optimize cache hits, the hash computation
198 * ends at the question mark. Depending on the number of active/backup servers,
199 * it will either look for active servers, or for backup servers.
200 * If any server is found, it will be returned. If no valid server is found,
201 * NULL is returned. The lbprm.arg_opt{1,2,3} values correspond respectively to
202 * the "whole" optional argument (boolean, bit0), the "len" argument (numeric)
203 * and the "depth" argument (numeric).
204 *
205 * This code was contributed by Guillaume Dallaire, who also selected this hash
206 * algorithm out of a tens because it gave him the best results.
207 *
208 */
get_server_uh(struct proxy * px,char * uri,int uri_len,const struct server * avoid)209 static struct server *get_server_uh(struct proxy *px, char *uri, int uri_len, const struct server *avoid)
210 {
211 unsigned int hash = 0;
212 int c;
213 int slashes = 0;
214 const char *start, *end;
215
216 if (px->lbprm.tot_weight == 0)
217 return NULL;
218
219 /* note: we won't hash if there's only one server left */
220 if (px->lbprm.tot_used == 1)
221 goto hash_done;
222
223 if (px->lbprm.arg_opt2) // "len"
224 uri_len = MIN(uri_len, px->lbprm.arg_opt2);
225
226 start = end = uri;
227 while (uri_len--) {
228 c = *end;
229 if (c == '/') {
230 slashes++;
231 if (slashes == px->lbprm.arg_opt3) /* depth+1 */
232 break;
233 }
234 else if (c == '?' && !(px->lbprm.arg_opt1 & 1)) // "whole"
235 break;
236 end++;
237 }
238
239 hash = gen_hash(px, start, (end - start));
240
241 if ((px->lbprm.algo & BE_LB_HASH_MOD) == BE_LB_HMOD_AVAL)
242 hash = full_hash(hash);
243 hash_done:
244 if ((px->lbprm.algo & BE_LB_LKUP) == BE_LB_LKUP_CHTREE)
245 return chash_get_server_hash(px, hash, avoid);
246 else
247 return map_get_server_hash(px, hash);
248 }
249
250 /*
251 * This function tries to find a running server for the proxy <px> following
252 * the URL parameter hash method. It looks for a specific parameter in the
253 * URL and hashes it to compute the server ID. This is useful to optimize
254 * performance by avoiding bounces between servers in contexts where sessions
255 * are shared but cookies are not usable. If the parameter is not found, NULL
256 * is returned. If any server is found, it will be returned. If no valid server
257 * is found, NULL is returned.
258 */
get_server_ph(struct proxy * px,const char * uri,int uri_len,const struct server * avoid)259 static struct server *get_server_ph(struct proxy *px, const char *uri, int uri_len, const struct server *avoid)
260 {
261 unsigned int hash = 0;
262 const char *start, *end;
263 const char *p;
264 const char *params;
265 int plen;
266
267 /* when tot_weight is 0 then so is srv_count */
268 if (px->lbprm.tot_weight == 0)
269 return NULL;
270
271 if ((p = memchr(uri, '?', uri_len)) == NULL)
272 return NULL;
273
274 p++;
275
276 uri_len -= (p - uri);
277 plen = px->lbprm.arg_len;
278 params = p;
279
280 while (uri_len > plen) {
281 /* Look for the parameter name followed by an equal symbol */
282 if (params[plen] == '=') {
283 if (memcmp(params, px->lbprm.arg_str, plen) == 0) {
284 /* OK, we have the parameter here at <params>, and
285 * the value after the equal sign, at <p>
286 * skip the equal symbol
287 */
288 p += plen + 1;
289 start = end = p;
290 uri_len -= plen + 1;
291
292 while (uri_len && *end != '&') {
293 uri_len--;
294 end++;
295 }
296 hash = gen_hash(px, start, (end - start));
297
298 if ((px->lbprm.algo & BE_LB_HASH_MOD) == BE_LB_HMOD_AVAL)
299 hash = full_hash(hash);
300
301 if ((px->lbprm.algo & BE_LB_LKUP) == BE_LB_LKUP_CHTREE)
302 return chash_get_server_hash(px, hash, avoid);
303 else
304 return map_get_server_hash(px, hash);
305 }
306 }
307 /* skip to next parameter */
308 p = memchr(params, '&', uri_len);
309 if (!p)
310 return NULL;
311 p++;
312 uri_len -= (p - params);
313 params = p;
314 }
315 return NULL;
316 }
317
318 /*
319 * this does the same as the previous server_ph, but check the body contents
320 */
get_server_ph_post(struct stream * s,const struct server * avoid)321 static struct server *get_server_ph_post(struct stream *s, const struct server *avoid)
322 {
323 unsigned int hash = 0;
324 struct channel *req = &s->req;
325 struct proxy *px = s->be;
326 struct htx *htx = htxbuf(&req->buf);
327 struct htx_blk *blk;
328 unsigned int plen = px->lbprm.arg_len;
329 unsigned long len;
330 const char *params, *p, *start, *end;
331
332 if (px->lbprm.tot_weight == 0)
333 return NULL;
334
335 p = params = NULL;
336 len = 0;
337 for (blk = htx_get_first_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
338 enum htx_blk_type type = htx_get_blk_type(blk);
339 struct ist v;
340
341 if (type != HTX_BLK_DATA)
342 continue;
343 v = htx_get_blk_value(htx, blk);
344 p = params = v.ptr;
345 len = v.len;
346 break;
347 }
348
349 while (len > plen) {
350 /* Look for the parameter name followed by an equal symbol */
351 if (params[plen] == '=') {
352 if (memcmp(params, px->lbprm.arg_str, plen) == 0) {
353 /* OK, we have the parameter here at <params>, and
354 * the value after the equal sign, at <p>
355 * skip the equal symbol
356 */
357 p += plen + 1;
358 start = end = p;
359 len -= plen + 1;
360
361 while (len && *end != '&') {
362 if (unlikely(!HTTP_IS_TOKEN(*p))) {
363 /* if in a POST, body must be URI encoded or it's not a URI.
364 * Do not interpret any possible binary data as a parameter.
365 */
366 if (likely(HTTP_IS_LWS(*p))) /* eol, uncertain uri len */
367 break;
368 return NULL; /* oh, no; this is not uri-encoded.
369 * This body does not contain parameters.
370 */
371 }
372 len--;
373 end++;
374 /* should we break if vlen exceeds limit? */
375 }
376 hash = gen_hash(px, start, (end - start));
377
378 if ((px->lbprm.algo & BE_LB_HASH_MOD) == BE_LB_HMOD_AVAL)
379 hash = full_hash(hash);
380
381 if ((px->lbprm.algo & BE_LB_LKUP) == BE_LB_LKUP_CHTREE)
382 return chash_get_server_hash(px, hash, avoid);
383 else
384 return map_get_server_hash(px, hash);
385 }
386 }
387 /* skip to next parameter */
388 p = memchr(params, '&', len);
389 if (!p)
390 return NULL;
391 p++;
392 len -= (p - params);
393 params = p;
394 }
395 return NULL;
396 }
397
398
399 /*
400 * This function tries to find a running server for the proxy <px> following
401 * the Header parameter hash method. It looks for a specific parameter in the
402 * URL and hashes it to compute the server ID. This is useful to optimize
403 * performance by avoiding bounces between servers in contexts where sessions
404 * are shared but cookies are not usable. If the parameter is not found, NULL
405 * is returned. If any server is found, it will be returned. If no valid server
406 * is found, NULL is returned. When lbprm.arg_opt1 is set, the hash will only
407 * apply to the middle part of a domain name ("use_domain_only" option).
408 */
get_server_hh(struct stream * s,const struct server * avoid)409 static struct server *get_server_hh(struct stream *s, const struct server *avoid)
410 {
411 unsigned int hash = 0;
412 struct proxy *px = s->be;
413 unsigned int plen = px->lbprm.arg_len;
414 unsigned long len;
415 const char *p;
416 const char *start, *end;
417 struct htx *htx = htxbuf(&s->req.buf);
418 struct http_hdr_ctx ctx = { .blk = NULL };
419
420 /* tot_weight appears to mean srv_count */
421 if (px->lbprm.tot_weight == 0)
422 return NULL;
423
424 /* note: we won't hash if there's only one server left */
425 if (px->lbprm.tot_used == 1)
426 goto hash_done;
427
428 http_find_header(htx, ist2(px->lbprm.arg_str, plen), &ctx, 0);
429
430 /* if the header is not found or empty, let's fallback to round robin */
431 if (!ctx.blk || !ctx.value.len)
432 return NULL;
433
434 /* Found a the param_name in the headers.
435 * we will compute the hash based on this value ctx.val.
436 */
437 len = ctx.value.len;
438 p = ctx.value.ptr;
439
440 if (!px->lbprm.arg_opt1) {
441 hash = gen_hash(px, p, len);
442 } else {
443 int dohash = 0;
444 p += len;
445 /* special computation, use only main domain name, not tld/host
446 * going back from the end of string, start hashing at first
447 * dot stop at next.
448 * This is designed to work with the 'Host' header, and requires
449 * a special option to activate this.
450 */
451 end = p;
452 while (len) {
453 if (dohash) {
454 /* Rewind the pointer until the previous char
455 * is a dot, this will allow to set the start
456 * position of the domain. */
457 if (*(p - 1) == '.')
458 break;
459 }
460 else if (*p == '.') {
461 /* The pointer is rewinded to the dot before the
462 * tld, we memorize the end of the domain and
463 * can enter the domain processing. */
464 end = p;
465 dohash = 1;
466 }
467 p--;
468 len--;
469 }
470 start = p;
471 hash = gen_hash(px, start, (end - start));
472 }
473 if ((px->lbprm.algo & BE_LB_HASH_MOD) == BE_LB_HMOD_AVAL)
474 hash = full_hash(hash);
475 hash_done:
476 if ((px->lbprm.algo & BE_LB_LKUP) == BE_LB_LKUP_CHTREE)
477 return chash_get_server_hash(px, hash, avoid);
478 else
479 return map_get_server_hash(px, hash);
480 }
481
482 /* RDP Cookie HASH. */
get_server_rch(struct stream * s,const struct server * avoid)483 static struct server *get_server_rch(struct stream *s, const struct server *avoid)
484 {
485 unsigned int hash = 0;
486 struct proxy *px = s->be;
487 unsigned long len;
488 int ret;
489 struct sample smp;
490 int rewind;
491
492 /* tot_weight appears to mean srv_count */
493 if (px->lbprm.tot_weight == 0)
494 return NULL;
495
496 memset(&smp, 0, sizeof(smp));
497
498 rewind = co_data(&s->req);
499 c_rew(&s->req, rewind);
500
501 ret = fetch_rdp_cookie_name(s, &smp, px->lbprm.arg_str, px->lbprm.arg_len);
502 len = smp.data.u.str.data;
503
504 c_adv(&s->req, rewind);
505
506 if (ret == 0 || (smp.flags & SMP_F_MAY_CHANGE) || len == 0)
507 return NULL;
508
509 /* note: we won't hash if there's only one server left */
510 if (px->lbprm.tot_used == 1)
511 goto hash_done;
512
513 /* Found the param_name in the headers.
514 * we will compute the hash based on this value ctx.val.
515 */
516 hash = gen_hash(px, smp.data.u.str.area, len);
517
518 if ((px->lbprm.algo & BE_LB_HASH_MOD) == BE_LB_HMOD_AVAL)
519 hash = full_hash(hash);
520 hash_done:
521 if ((px->lbprm.algo & BE_LB_LKUP) == BE_LB_LKUP_CHTREE)
522 return chash_get_server_hash(px, hash, avoid);
523 else
524 return map_get_server_hash(px, hash);
525 }
526
527 /* random value */
get_server_rnd(struct stream * s,const struct server * avoid)528 static struct server *get_server_rnd(struct stream *s, const struct server *avoid)
529 {
530 unsigned int hash = 0;
531 struct proxy *px = s->be;
532 struct server *prev, *curr;
533 int draws = px->lbprm.arg_opt1; // number of draws
534
535 /* tot_weight appears to mean srv_count */
536 if (px->lbprm.tot_weight == 0)
537 return NULL;
538
539 curr = NULL;
540 do {
541 prev = curr;
542 hash = ha_random32();
543 curr = chash_get_server_hash(px, hash, avoid);
544 if (!curr)
545 break;
546
547 /* compare the new server to the previous best choice and pick
548 * the one with the least currently served requests.
549 */
550 if (prev && prev != curr &&
551 curr->served * prev->cur_eweight > prev->served * curr->cur_eweight)
552 curr = prev;
553 } while (--draws > 0);
554
555 return curr;
556 }
557
558 /*
559 * This function applies the load-balancing algorithm to the stream, as
560 * defined by the backend it is assigned to. The stream is then marked as
561 * 'assigned'.
562 *
563 * This function MAY NOT be called with SF_ASSIGNED already set. If the stream
564 * had a server previously assigned, it is rebalanced, trying to avoid the same
565 * server, which should still be present in target_srv(&s->target) before the call.
566 * The function tries to keep the original connection slot if it reconnects to
567 * the same server, otherwise it releases it and tries to offer it.
568 *
569 * It is illegal to call this function with a stream in a queue.
570 *
571 * It may return :
572 * SRV_STATUS_OK if everything is OK. ->srv and ->target are assigned.
573 * SRV_STATUS_NOSRV if no server is available. Stream is not ASSIGNED
574 * SRV_STATUS_FULL if all servers are saturated. Stream is not ASSIGNED
575 * SRV_STATUS_INTERNAL for other unrecoverable errors.
576 *
577 * Upon successful return, the stream flag SF_ASSIGNED is set to indicate that
578 * it does not need to be called anymore. This means that target_srv(&s->target)
579 * can be trusted in balance and direct modes.
580 *
581 */
582
assign_server(struct stream * s)583 int assign_server(struct stream *s)
584 {
585 struct connection *conn = NULL;
586 struct server *conn_slot;
587 struct server *srv = NULL, *prev_srv;
588 int err;
589
590 DPRINTF(stderr,"assign_server : s=%p\n",s);
591
592 err = SRV_STATUS_INTERNAL;
593 if (unlikely(s->pend_pos || s->flags & SF_ASSIGNED))
594 goto out_err;
595
596 prev_srv = objt_server(s->target);
597 conn_slot = s->srv_conn;
598
599 /* We have to release any connection slot before applying any LB algo,
600 * otherwise we may erroneously end up with no available slot.
601 */
602 if (conn_slot)
603 sess_change_server(s, NULL);
604
605 /* We will now try to find the good server and store it into <objt_server(s->target)>.
606 * Note that <objt_server(s->target)> may be NULL in case of dispatch or proxy mode,
607 * as well as if no server is available (check error code).
608 */
609
610 srv = NULL;
611 s->target = NULL;
612
613 if ((s->be->lbprm.algo & BE_LB_KIND) != BE_LB_KIND_HI &&
614 ((s->sess->flags & SESS_FL_PREFER_LAST) ||
615 (s->be->options & PR_O_PREF_LAST))) {
616 struct sess_srv_list *srv_list;
617 list_for_each_entry(srv_list, &s->sess->srv_list, srv_list) {
618 struct server *tmpsrv = objt_server(srv_list->target);
619
620 if (tmpsrv && tmpsrv->proxy == s->be &&
621 ((s->sess->flags & SESS_FL_PREFER_LAST) ||
622 (!s->be->max_ka_queue ||
623 server_has_room(tmpsrv) || (
624 tmpsrv->nbpend + 1 < s->be->max_ka_queue))) &&
625 srv_currently_usable(tmpsrv)) {
626 list_for_each_entry(conn, &srv_list->conn_list, session_list) {
627 if (conn->flags & CO_FL_CONNECTED) {
628
629 srv = tmpsrv;
630 s->target = &srv->obj_type;
631 if (conn->flags & CO_FL_SESS_IDLE) {
632 conn->flags &= ~CO_FL_SESS_IDLE;
633 s->sess->idle_conns--;
634 }
635 goto out_ok;
636 }
637 }
638 }
639 }
640 }
641 if (s->be->lbprm.algo & BE_LB_KIND) {
642
643 /* we must check if we have at least one server available */
644 if (!s->be->lbprm.tot_weight) {
645 err = SRV_STATUS_NOSRV;
646 goto out;
647 }
648
649 /* First check whether we need to fetch some data or simply call
650 * the LB lookup function. Only the hashing functions will need
651 * some input data in fact, and will support multiple algorithms.
652 */
653 switch (s->be->lbprm.algo & BE_LB_LKUP) {
654 case BE_LB_LKUP_RRTREE:
655 srv = fwrr_get_next_server(s->be, prev_srv);
656 break;
657
658 case BE_LB_LKUP_FSTREE:
659 srv = fas_get_next_server(s->be, prev_srv);
660 break;
661
662 case BE_LB_LKUP_LCTREE:
663 srv = fwlc_get_next_server(s->be, prev_srv);
664 break;
665
666 case BE_LB_LKUP_CHTREE:
667 case BE_LB_LKUP_MAP:
668 if ((s->be->lbprm.algo & BE_LB_KIND) == BE_LB_KIND_RR) {
669 if ((s->be->lbprm.algo & BE_LB_PARM) == BE_LB_RR_RANDOM)
670 srv = get_server_rnd(s, prev_srv);
671 else if ((s->be->lbprm.algo & BE_LB_LKUP) == BE_LB_LKUP_CHTREE)
672 srv = chash_get_next_server(s->be, prev_srv);
673 else
674 srv = map_get_server_rr(s->be, prev_srv);
675 break;
676 }
677 else if ((s->be->lbprm.algo & BE_LB_KIND) != BE_LB_KIND_HI) {
678 /* unknown balancing algorithm */
679 err = SRV_STATUS_INTERNAL;
680 goto out;
681 }
682
683 switch (s->be->lbprm.algo & BE_LB_PARM) {
684 case BE_LB_HASH_SRC:
685 conn = objt_conn(strm_orig(s));
686 if (conn && conn_get_src(conn) && conn->src->ss_family == AF_INET) {
687 srv = get_server_sh(s->be,
688 (void *)&((struct sockaddr_in *)conn->src)->sin_addr,
689 4, prev_srv);
690 }
691 else if (conn && conn_get_src(conn) && conn->src->ss_family == AF_INET6) {
692 srv = get_server_sh(s->be,
693 (void *)&((struct sockaddr_in6 *)conn->src)->sin6_addr,
694 16, prev_srv);
695 }
696 else {
697 /* unknown IP family */
698 err = SRV_STATUS_INTERNAL;
699 goto out;
700 }
701 break;
702
703 case BE_LB_HASH_URI:
704 /* URI hashing */
705 if (IS_HTX_STRM(s) && s->txn->req.msg_state >= HTTP_MSG_BODY) {
706 struct ist uri;
707
708 uri = htx_sl_req_uri(http_get_stline(htxbuf(&s->req.buf)));
709 if (s->be->lbprm.arg_opt1 & 2) {
710 uri = http_get_path(uri);
711 if (!uri.ptr)
712 uri = ist("");
713 }
714 srv = get_server_uh(s->be, uri.ptr, uri.len, prev_srv);
715 }
716 break;
717
718 case BE_LB_HASH_PRM:
719 /* URL Parameter hashing */
720 if (IS_HTX_STRM(s) && s->txn->req.msg_state >= HTTP_MSG_BODY) {
721 struct ist uri;
722
723 uri = htx_sl_req_uri(http_get_stline(htxbuf(&s->req.buf)));
724 srv = get_server_ph(s->be, uri.ptr, uri.len, prev_srv);
725
726 if (!srv && s->txn->meth == HTTP_METH_POST)
727 srv = get_server_ph_post(s, prev_srv);
728 }
729 break;
730
731 case BE_LB_HASH_HDR:
732 /* Header Parameter hashing */
733 if (IS_HTX_STRM(s) && s->txn->req.msg_state >= HTTP_MSG_BODY)
734 srv = get_server_hh(s, prev_srv);
735 break;
736
737 case BE_LB_HASH_RDP:
738 /* RDP Cookie hashing */
739 srv = get_server_rch(s, prev_srv);
740 break;
741
742 default:
743 /* unknown balancing algorithm */
744 err = SRV_STATUS_INTERNAL;
745 goto out;
746 }
747
748 /* If the hashing parameter was not found, let's fall
749 * back to round robin on the map.
750 */
751 if (!srv) {
752 if ((s->be->lbprm.algo & BE_LB_LKUP) == BE_LB_LKUP_CHTREE)
753 srv = chash_get_next_server(s->be, prev_srv);
754 else
755 srv = map_get_server_rr(s->be, prev_srv);
756 }
757
758 /* end of map-based LB */
759 break;
760
761 default:
762 /* unknown balancing algorithm */
763 err = SRV_STATUS_INTERNAL;
764 goto out;
765 }
766
767 if (!srv) {
768 err = SRV_STATUS_FULL;
769 goto out;
770 }
771 else if (srv != prev_srv) {
772 _HA_ATOMIC_ADD(&s->be->be_counters.cum_lbconn, 1);
773 _HA_ATOMIC_ADD(&srv->counters.cum_lbconn, 1);
774 }
775 s->target = &srv->obj_type;
776 }
777 else if (s->be->options & (PR_O_DISPATCH | PR_O_TRANSP)) {
778 s->target = &s->be->obj_type;
779 }
780 else if ((s->be->options & PR_O_HTTP_PROXY)) {
781 conn = cs_conn(objt_cs(s->si[1].end));
782
783 if (conn && conn->dst && is_addr(conn->dst)) {
784 /* in proxy mode, we need a valid destination address */
785 s->target = &s->be->obj_type;
786 } else {
787 err = SRV_STATUS_NOSRV;
788 goto out;
789 }
790 }
791 else {
792 err = SRV_STATUS_NOSRV;
793 goto out;
794 }
795
796 out_ok:
797 s->flags |= SF_ASSIGNED;
798 err = SRV_STATUS_OK;
799 out:
800
801 /* Either we take back our connection slot, or we offer it to someone
802 * else if we don't need it anymore.
803 */
804 if (conn_slot) {
805 if (conn_slot == srv) {
806 sess_change_server(s, srv);
807 } else {
808 if (may_dequeue_tasks(conn_slot, s->be))
809 process_srv_queue(conn_slot);
810 }
811 }
812
813 out_err:
814 return err;
815 }
816
817 /*
818 * This function assigns a server address to a stream, and sets SF_ADDR_SET.
819 * The address is taken from the currently assigned server, or from the
820 * dispatch or transparent address.
821 *
822 * It may return :
823 * SRV_STATUS_OK if everything is OK.
824 * SRV_STATUS_INTERNAL for other unrecoverable errors.
825 *
826 * Upon successful return, the stream flag SF_ADDR_SET is set. This flag is
827 * not cleared, so it's to the caller to clear it if required.
828 */
assign_server_address(struct stream * s)829 int assign_server_address(struct stream *s)
830 {
831 struct connection *cli_conn = objt_conn(strm_orig(s));
832
833 DPRINTF(stderr,"assign_server_address : s=%p\n",s);
834
835 if (!sockaddr_alloc(&s->target_addr))
836 return SRV_STATUS_INTERNAL;
837
838 if ((s->flags & SF_DIRECT) || (s->be->lbprm.algo & BE_LB_KIND)) {
839 /* A server is necessarily known for this stream */
840 if (!(s->flags & SF_ASSIGNED))
841 return SRV_STATUS_INTERNAL;
842
843 *s->target_addr = __objt_server(s->target)->addr;
844 set_host_port(s->target_addr, __objt_server(s->target)->svc_port);
845
846 if (!is_addr(s->target_addr) && cli_conn) {
847 /* if the server has no address, we use the same address
848 * the client asked, which is handy for remapping ports
849 * locally on multiple addresses at once. Nothing is done
850 * for AF_UNIX addresses.
851 */
852 if (!conn_get_dst(cli_conn)) {
853 /* do nothing if we can't retrieve the address */
854 } else if (cli_conn->dst->ss_family == AF_INET) {
855 ((struct sockaddr_in *)s->target_addr)->sin_family = AF_INET;
856 ((struct sockaddr_in *)s->target_addr)->sin_addr = ((struct sockaddr_in *)cli_conn->dst)->sin_addr;
857 } else if (cli_conn->dst->ss_family == AF_INET6) {
858 ((struct sockaddr_in6 *)s->target_addr)->sin6_family = AF_INET6;
859 ((struct sockaddr_in6 *)s->target_addr)->sin6_addr = ((struct sockaddr_in6 *)cli_conn->dst)->sin6_addr;
860 }
861 }
862
863 /* if this server remaps proxied ports, we'll use
864 * the port the client connected to with an offset. */
865 if ((__objt_server(s->target)->flags & SRV_F_MAPPORTS) && cli_conn) {
866 int base_port;
867
868 if (conn_get_dst(cli_conn)) {
869 /* First, retrieve the port from the incoming connection */
870 base_port = get_host_port(cli_conn->dst);
871
872 /* Second, assign the outgoing connection's port */
873 base_port += get_host_port(s->target_addr);
874 set_host_port(s->target_addr, base_port);
875 }
876 }
877 }
878 else if (s->be->options & PR_O_DISPATCH) {
879 /* connect to the defined dispatch addr */
880 *s->target_addr = s->be->dispatch_addr;
881 }
882 else if ((s->be->options & PR_O_TRANSP) && cli_conn) {
883 /* in transparent mode, use the original dest addr if no dispatch specified */
884 if (conn_get_dst(cli_conn) &&
885 (cli_conn->dst->ss_family == AF_INET || cli_conn->dst->ss_family == AF_INET6))
886 *s->target_addr = *cli_conn->dst;
887 }
888 else if (s->be->options & PR_O_HTTP_PROXY) {
889 /* If HTTP PROXY option is set, then server is already assigned
890 * during incoming client request parsing. */
891 }
892 else {
893 /* no server and no LB algorithm ! */
894 return SRV_STATUS_INTERNAL;
895 }
896
897 s->flags |= SF_ADDR_SET;
898 return SRV_STATUS_OK;
899 }
900
901 /* This function assigns a server to stream <s> if required, and can add the
902 * connection to either the assigned server's queue or to the proxy's queue.
903 * If ->srv_conn is set, the stream is first released from the server.
904 * It may also be called with SF_DIRECT and/or SF_ASSIGNED though. It will
905 * be called before any connection and after any retry or redispatch occurs.
906 *
907 * It is not allowed to call this function with a stream in a queue.
908 *
909 * Returns :
910 *
911 * SRV_STATUS_OK if everything is OK.
912 * SRV_STATUS_NOSRV if no server is available. objt_server(s->target) = NULL.
913 * SRV_STATUS_QUEUED if the connection has been queued.
914 * SRV_STATUS_FULL if the server(s) is/are saturated and the
915 * connection could not be queued at the server's,
916 * which may be NULL if we queue on the backend.
917 * SRV_STATUS_INTERNAL for other unrecoverable errors.
918 *
919 */
assign_server_and_queue(struct stream * s)920 int assign_server_and_queue(struct stream *s)
921 {
922 struct pendconn *p;
923 struct server *srv;
924 int err;
925
926 if (s->pend_pos)
927 return SRV_STATUS_INTERNAL;
928
929 err = SRV_STATUS_OK;
930 if (!(s->flags & SF_ASSIGNED)) {
931 struct server *prev_srv = objt_server(s->target);
932
933 err = assign_server(s);
934 if (prev_srv) {
935 /* This stream was previously assigned to a server. We have to
936 * update the stream's and the server's stats :
937 * - if the server changed :
938 * - set TX_CK_DOWN if txn.flags was TX_CK_VALID
939 * - set SF_REDISP if it was successfully redispatched
940 * - increment srv->redispatches and be->redispatches
941 * - if the server remained the same : update retries.
942 */
943
944 if (prev_srv != objt_server(s->target)) {
945 if (s->txn && (s->txn->flags & TX_CK_MASK) == TX_CK_VALID) {
946 s->txn->flags &= ~TX_CK_MASK;
947 s->txn->flags |= TX_CK_DOWN;
948 }
949 s->flags |= SF_REDISP;
950 _HA_ATOMIC_ADD(&prev_srv->counters.redispatches, 1);
951 _HA_ATOMIC_ADD(&s->be->be_counters.redispatches, 1);
952 } else {
953 _HA_ATOMIC_ADD(&prev_srv->counters.retries, 1);
954 _HA_ATOMIC_ADD(&s->be->be_counters.retries, 1);
955 }
956 }
957 }
958
959 switch (err) {
960 case SRV_STATUS_OK:
961 /* we have SF_ASSIGNED set */
962 srv = objt_server(s->target);
963 if (!srv)
964 return SRV_STATUS_OK; /* dispatch or proxy mode */
965
966 /* If we already have a connection slot, no need to check any queue */
967 if (s->srv_conn == srv)
968 return SRV_STATUS_OK;
969
970 /* OK, this stream already has an assigned server, but no
971 * connection slot yet. Either it is a redispatch, or it was
972 * assigned from persistence information (direct mode).
973 */
974 if ((s->flags & SF_REDIRECTABLE) && srv->rdr_len) {
975 /* server scheduled for redirection, and already assigned. We
976 * don't want to go further nor check the queue.
977 */
978 sess_change_server(s, srv); /* not really needed in fact */
979 return SRV_STATUS_OK;
980 }
981
982 /* We might have to queue this stream if the assigned server is full.
983 * We know we have to queue it into the server's queue, so if a maxqueue
984 * is set on the server, we must also check that the server's queue is
985 * not full, in which case we have to return FULL.
986 */
987 if (srv->maxconn &&
988 (srv->nbpend || srv->served >= srv_dynamic_maxconn(srv))) {
989
990 if (srv->maxqueue > 0 && srv->nbpend >= srv->maxqueue)
991 return SRV_STATUS_FULL;
992
993 p = pendconn_add(s);
994 if (p)
995 return SRV_STATUS_QUEUED;
996 else
997 return SRV_STATUS_INTERNAL;
998 }
999
1000 /* OK, we can use this server. Let's reserve our place */
1001 sess_change_server(s, srv);
1002 return SRV_STATUS_OK;
1003
1004 case SRV_STATUS_FULL:
1005 /* queue this stream into the proxy's queue */
1006 p = pendconn_add(s);
1007 if (p)
1008 return SRV_STATUS_QUEUED;
1009 else
1010 return SRV_STATUS_INTERNAL;
1011
1012 case SRV_STATUS_NOSRV:
1013 return err;
1014
1015 case SRV_STATUS_INTERNAL:
1016 return err;
1017
1018 default:
1019 return SRV_STATUS_INTERNAL;
1020 }
1021 }
1022
1023 /* If an explicit source binding is specified on the server and/or backend, and
1024 * this source makes use of the transparent proxy, then it is extracted now and
1025 * assigned to the stream's pending connection. This function assumes that an
1026 * outgoing connection has already been assigned to s->si[1].end.
1027 */
assign_tproxy_address(struct stream * s)1028 static void assign_tproxy_address(struct stream *s)
1029 {
1030 #if defined(CONFIG_HAP_TRANSPARENT)
1031 struct server *srv = objt_server(s->target);
1032 struct conn_src *src;
1033 struct connection *cli_conn;
1034 struct connection *srv_conn;
1035
1036 if (objt_cs(s->si[1].end))
1037 srv_conn = cs_conn(__objt_cs(s->si[1].end));
1038 else
1039 srv_conn = objt_conn(s->si[1].end);
1040
1041 if (srv && srv->conn_src.opts & CO_SRC_BIND)
1042 src = &srv->conn_src;
1043 else if (s->be->conn_src.opts & CO_SRC_BIND)
1044 src = &s->be->conn_src;
1045 else
1046 return;
1047
1048 if (!sockaddr_alloc(&srv_conn->src))
1049 return;
1050
1051 switch (src->opts & CO_SRC_TPROXY_MASK) {
1052 case CO_SRC_TPROXY_ADDR:
1053 *srv_conn->src = src->tproxy_addr;
1054 break;
1055 case CO_SRC_TPROXY_CLI:
1056 case CO_SRC_TPROXY_CIP:
1057 /* FIXME: what can we do if the client connects in IPv6 or unix socket ? */
1058 cli_conn = objt_conn(strm_orig(s));
1059 if (cli_conn && conn_get_src(cli_conn))
1060 *srv_conn->src = *cli_conn->src;
1061 else {
1062 sockaddr_free(&srv_conn->src);
1063 }
1064 break;
1065 case CO_SRC_TPROXY_DYN:
1066 if (src->bind_hdr_occ && IS_HTX_STRM(s)) {
1067 char *vptr;
1068 size_t vlen;
1069
1070 /* bind to the IP in a header */
1071 ((struct sockaddr_in *)srv_conn->src)->sin_family = AF_INET;
1072 ((struct sockaddr_in *)srv_conn->src)->sin_port = 0;
1073 ((struct sockaddr_in *)srv_conn->src)->sin_addr.s_addr = 0;
1074 if (http_get_htx_hdr(htxbuf(&s->req.buf),
1075 ist2(src->bind_hdr_name, src->bind_hdr_len),
1076 src->bind_hdr_occ, NULL, &vptr, &vlen)) {
1077 ((struct sockaddr_in *)srv_conn->src)->sin_addr.s_addr =
1078 htonl(inetaddr_host_lim(vptr, vptr + vlen));
1079 }
1080 }
1081 break;
1082 default:
1083 sockaddr_free(&srv_conn->src);
1084 }
1085 #endif
1086 }
1087
1088 #if defined(USE_OPENSSL) && defined(TLSEXT_TYPE_application_layer_protocol_negotiation)
1089 /*
1090 * Pick the right mux once the connection is established, we should now have
1091 * an alpn if available, so we are now able to choose. In this specific case
1092 * the connection's context is &si[i].end.
1093 */
conn_complete_server(struct connection * conn)1094 static int conn_complete_server(struct connection *conn)
1095 {
1096 struct conn_stream *cs = NULL;
1097 struct stream *s = container_of(conn->ctx, struct stream, si[1].end);
1098 struct server *srv;
1099
1100 task_wakeup(s->task, TASK_WOKEN_IO);
1101 conn_clear_xprt_done_cb(conn);
1102 /* Verify if the connection just established. */
1103 if (unlikely(!(conn->flags & (CO_FL_WAIT_L4_CONN | CO_FL_WAIT_L6_CONN | CO_FL_CONNECTED))))
1104 conn->flags |= CO_FL_CONNECTED;
1105
1106 if (conn->flags & CO_FL_ERROR)
1107 goto fail;
1108 si_detach_endpoint(&s->si[1]);
1109 cs = si_alloc_cs(&s->si[1], conn);
1110 if (!cs)
1111 goto fail;
1112 if (conn_install_mux_be(conn, cs, s->sess) < 0)
1113 goto fail;
1114 srv = objt_server(s->target);
1115 if (srv && ((s->be->options & PR_O_REUSE_MASK) != PR_O_REUSE_NEVR) &&
1116 conn->mux->avail_streams(conn) > 0)
1117 LIST_ADD(&srv->idle_conns[tid], &conn->list);
1118
1119 return 0;
1120
1121 fail:
1122 si_detach_endpoint(&s->si[1]);
1123
1124 if (cs)
1125 cs_free(cs);
1126 /* kill the connection now */
1127 conn_stop_tracking(conn);
1128 conn_full_close(conn);
1129 conn_free(conn);
1130 /* Let process_stream know it went wrong */
1131 s->si[1].flags |= SI_FL_ERR;
1132 return -1;
1133 }
1134 #endif
1135
1136
1137 /*
1138 * This function initiates a connection to the server assigned to this stream
1139 * (s->target, s->si[1].addr.to). It will assign a server if none
1140 * is assigned yet.
1141 * It can return one of :
1142 * - SF_ERR_NONE if everything's OK
1143 * - SF_ERR_SRVTO if there are no more servers
1144 * - SF_ERR_SRVCL if the connection was refused by the server
1145 * - SF_ERR_PRXCOND if the connection has been limited by the proxy (maxconn)
1146 * - SF_ERR_RESOURCE if a system resource is lacking (eg: fd limits, ports, ...)
1147 * - SF_ERR_INTERNAL for any other purely internal errors
1148 * Additionally, in the case of SF_ERR_RESOURCE, an emergency log will be emitted.
1149 * The server-facing stream interface is expected to hold a pre-allocated connection
1150 * in s->si[1].conn.
1151 */
connect_server(struct stream * s)1152 int connect_server(struct stream *s)
1153 {
1154 struct connection *cli_conn = objt_conn(strm_orig(s));
1155 struct connection *srv_conn = NULL;
1156 struct connection *old_conn = NULL;
1157 struct conn_stream *srv_cs = NULL;
1158 struct sess_srv_list *srv_list;
1159 struct server *srv;
1160 int reuse = 0;
1161 int reuse_orphan = 0;
1162 int init_mux = 0;
1163 int alloced_cs = 0;
1164 int err;
1165
1166
1167 /* This will catch some corner cases such as lying connections resulting from
1168 * retries or connect timeouts but will rarely trigger.
1169 */
1170 si_release_endpoint(&s->si[1]);
1171
1172 /* first, search for a matching connection in the session's idle conns */
1173 list_for_each_entry(srv_list, &s->sess->srv_list, srv_list) {
1174 if (srv_list->target == s->target) {
1175 list_for_each_entry(srv_conn, &srv_list->conn_list, session_list) {
1176 if (conn_xprt_ready(srv_conn) &&
1177 srv_conn->mux && (srv_conn->mux->avail_streams(srv_conn) > 0)) {
1178 reuse = 1;
1179 break;
1180 }
1181 }
1182 break;
1183 }
1184 }
1185
1186 if (!reuse) {
1187 /* no connection was found in our session's list. Pick any
1188 * random one that we could trade against another one.
1189 */
1190 srv_conn = NULL;
1191 if (!LIST_ISEMPTY(&s->sess->srv_list)) {
1192 srv_list = LIST_ELEM(s->sess->srv_list.n, struct sess_srv_list *, srv_list);
1193 if (!LIST_ISEMPTY(&srv_list->conn_list))
1194 srv_conn = LIST_ELEM(srv_list->conn_list.n, struct connection *, session_list);
1195 }
1196
1197 }
1198 /* OK at this point we have this :
1199 * - srv_conn points to an existing connection or NULL
1200 * - if reuse is set, srv_conn holds a valid connection, otherwise it
1201 * points to any of our old connections we may want to trade against
1202 * another one
1203 */
1204
1205 old_conn = srv_conn;
1206
1207 srv = objt_server(s->target);
1208
1209 if (srv && !reuse) {
1210 srv_conn = NULL;
1211
1212 /* Below we pick connections from the safe or idle lists based
1213 * on the strategy, the fact that this is a first or second
1214 * (retryable) request, with the indicated priority (1 or 2) :
1215 *
1216 * SAFE AGGR ALWS
1217 *
1218 * +-----+-----+ +-----+-----+ +-----+-----+
1219 * req| 1st | 2nd | req| 1st | 2nd | req| 1st | 2nd |
1220 * ----+-----+-----+ ----+-----+-----+ ----+-----+-----+
1221 * safe| - | 2 | safe| 1 | 2 | safe| 1 | 2 |
1222 * ----+-----+-----+ ----+-----+-----+ ----+-----+-----+
1223 * idle| - | 1 | idle| - | 1 | idle| 2 | 1 |
1224 * ----+-----+-----+ ----+-----+-----+ ----+-----+-----+
1225 *
1226 * Idle conns are necessarily looked up on the same thread so
1227 * that there is no concurrency issues.
1228 */
1229 if (srv->idle_conns && !LIST_ISEMPTY(&srv->idle_conns[tid]) &&
1230 ((s->be->options & PR_O_REUSE_MASK) != PR_O_REUSE_NEVR &&
1231 s->txn && (s->txn->flags & TX_NOT_FIRST))) {
1232 srv_conn = LIST_ELEM(srv->idle_conns[tid].n, struct connection *, list);
1233 }
1234 else if (srv->safe_conns && !LIST_ISEMPTY(&srv->safe_conns[tid]) &&
1235 ((s->txn && (s->txn->flags & TX_NOT_FIRST)) ||
1236 (s->be->options & PR_O_REUSE_MASK) >= PR_O_REUSE_AGGR)) {
1237 srv_conn = LIST_ELEM(srv->safe_conns[tid].n, struct connection *, list);
1238 }
1239 else if (srv->idle_conns && !LIST_ISEMPTY(&srv->idle_conns[tid]) &&
1240 (s->be->options & PR_O_REUSE_MASK) == PR_O_REUSE_ALWS) {
1241 srv_conn = LIST_ELEM(srv->idle_conns[tid].n, struct connection *, list);
1242 } else if (srv->idle_orphan_conns && !MT_LIST_ISEMPTY(&srv->idle_orphan_conns[tid]) &&
1243 (((s->be->options & PR_O_REUSE_MASK) == PR_O_REUSE_ALWS) ||
1244 (((s->be->options & PR_O_REUSE_MASK) != PR_O_REUSE_NEVR) &&
1245 s->txn && (s->txn->flags & TX_NOT_FIRST)))) {
1246 srv_conn = MT_LIST_POP(&srv->idle_orphan_conns[tid],
1247 struct connection *, list);
1248 if (srv_conn)
1249 reuse_orphan = 1;
1250 }
1251
1252 /* If we've picked a connection from the pool, we now have to
1253 * detach it. We may have to get rid of the previous idle
1254 * connection we had, so for this we try to swap it with the
1255 * other owner's. That way it may remain alive for others to
1256 * pick.
1257 */
1258 if (srv_conn)
1259 reuse = 1;
1260 }
1261
1262
1263 /* here reuse might have been set above, indicating srv_conn finally
1264 * is OK.
1265 */
1266 if (reuse) {
1267 /* Disable connection reuse if a dynamic source is used.
1268 * As long as we don't share connections between servers,
1269 * we don't need to disable connection reuse on no-idempotent
1270 * requests nor when PROXY protocol is used.
1271 */
1272 if (srv && srv->conn_src.opts & CO_SRC_BIND) {
1273 if ((srv->conn_src.opts & CO_SRC_TPROXY_MASK) == CO_SRC_TPROXY_DYN)
1274 reuse = 0;
1275 }
1276 else if (s->be->conn_src.opts & CO_SRC_BIND) {
1277 if ((s->be->conn_src.opts & CO_SRC_TPROXY_MASK) == CO_SRC_TPROXY_DYN)
1278 reuse = 0;
1279 }
1280 }
1281
1282 if (((!reuse || (srv_conn && !(srv_conn->flags & CO_FL_CONNECTED)))
1283 && ha_used_fds > global.tune.pool_high_count) && srv && srv->idle_orphan_conns) {
1284 struct connection *tokill_conn;
1285
1286 /* We can't reuse a connection, and e have more FDs than deemd
1287 * acceptable, attempt to kill an idling connection
1288 */
1289 /* First, try from our own idle list */
1290 tokill_conn = MT_LIST_POP(&srv->idle_orphan_conns[tid],
1291 struct connection *, list);
1292 if (tokill_conn)
1293 tokill_conn->mux->destroy(tokill_conn->ctx);
1294 /* If not, iterate over other thread's idling pool, and try to grab one */
1295 else {
1296 int i;
1297
1298 for (i = 0; i < global.nbthread; i++) {
1299 if (i == tid)
1300 continue;
1301
1302 // just silence stupid gcc which reports an absurd
1303 // out-of-bounds warning for <i> which is always
1304 // exactly zero without threads, but it seems to
1305 // see it possibly larger.
1306 ALREADY_CHECKED(i);
1307
1308 HA_SPIN_LOCK(OTHER_LOCK, &toremove_lock[i]);
1309 tokill_conn = MT_LIST_POP(&srv->idle_orphan_conns[i],
1310 struct connection *, list);
1311 if (tokill_conn) {
1312 /* We got one, put it into the concerned thread's to kill list, and wake it's kill task */
1313
1314 MT_LIST_ADDQ(&toremove_connections[i],
1315 (struct mt_list *)&tokill_conn->list);
1316 task_wakeup(idle_conn_cleanup[i], TASK_WOKEN_OTHER);
1317 HA_SPIN_UNLOCK(OTHER_LOCK, &toremove_lock[i]);
1318 break;
1319 }
1320 HA_SPIN_UNLOCK(OTHER_LOCK, &toremove_lock[i]);
1321 }
1322 }
1323
1324 }
1325 /* If we're really reusing the connection, remove it from the orphan
1326 * list and add it back to the idle list.
1327 */
1328 if (reuse) {
1329 if (reuse_orphan) {
1330 srv_conn->idle_time = 0;
1331 _HA_ATOMIC_SUB(&srv->curr_idle_conns, 1);
1332 __ha_barrier_atomic_store();
1333 srv->curr_idle_thr[tid]--;
1334 LIST_ADDQ(&srv->idle_conns[tid], &srv_conn->list);
1335 }
1336 else {
1337 if (srv_conn->flags & CO_FL_SESS_IDLE) {
1338 struct session *sess = srv_conn->owner;
1339
1340 srv_conn->flags &= ~CO_FL_SESS_IDLE;
1341 sess->idle_conns--;
1342 }
1343 }
1344 }
1345
1346 /* We're about to use another connection, let the mux know we're
1347 * done with this one.
1348 */
1349 if (old_conn != srv_conn && old_conn && reuse && !reuse_orphan) {
1350 struct session *sess = srv_conn->owner;
1351
1352 if (sess) {
1353 if (old_conn && !(old_conn->flags & CO_FL_PRIVATE) &&
1354 old_conn->mux != NULL) {
1355 if (old_conn->flags & CO_FL_SESS_IDLE)
1356 s->sess->idle_conns--;
1357 session_unown_conn(s->sess, old_conn);
1358 old_conn->owner = sess;
1359 if (!session_add_conn(sess, old_conn, old_conn->target)) {
1360 old_conn->flags &= ~CO_FL_SESS_IDLE;
1361 old_conn->owner = NULL;
1362 old_conn->mux->destroy(old_conn->ctx);
1363 } else
1364 session_check_idle_conn(sess, old_conn);
1365 }
1366 }
1367 }
1368
1369 if (reuse) {
1370 if (srv_conn->mux) {
1371 int avail = srv_conn->mux->avail_streams(srv_conn);
1372
1373 if (avail <= 1) {
1374 /* No more streams available, remove it from the list */
1375 LIST_DEL(&srv_conn->list);
1376 LIST_INIT(&srv_conn->list);
1377 }
1378
1379 if (avail >= 1) {
1380 srv_cs = srv_conn->mux->attach(srv_conn, s->sess);
1381 if (srv_cs) {
1382 alloced_cs = 1;
1383 si_attach_cs(&s->si[1], srv_cs);
1384 } else
1385 srv_conn = NULL;
1386 }
1387 else
1388 srv_conn = NULL;
1389 }
1390 /* otherwise srv_conn is left intact */
1391 }
1392 else
1393 srv_conn = NULL;
1394
1395 /* no reuse or failed to reuse the connection above, pick a new one */
1396 if (!srv_conn) {
1397 srv_conn = conn_new();
1398 if (srv_conn)
1399 srv_conn->target = s->target;
1400 srv_cs = NULL;
1401 }
1402
1403 if (srv_conn && old_conn != srv_conn) {
1404 if (srv_conn->owner)
1405 session_unown_conn(srv_conn->owner, srv_conn);
1406 srv_conn->owner = s->sess;
1407 if (!session_add_conn(s->sess, srv_conn, srv_conn->target)) {
1408 /* If we failed to attach the connection, detach the
1409 * conn_stream, possibly destroying the connection */
1410 if (alloced_cs)
1411 si_release_endpoint(&s->si[1]);
1412 srv_conn->owner = NULL;
1413 if (srv_conn->mux && !srv_add_to_idle_list(objt_server(srv_conn->target), srv_conn))
1414 /* The server doesn't want it, let's kill the connection right away */
1415 srv_conn->mux->destroy(srv_conn->ctx);
1416 srv_conn = NULL;
1417
1418 }
1419 }
1420
1421 if (!srv_conn || !sockaddr_alloc(&srv_conn->dst)) {
1422 if (srv_conn)
1423 conn_free(srv_conn);
1424 return SF_ERR_RESOURCE;
1425 }
1426
1427 if (!(s->flags & SF_ADDR_SET)) {
1428 err = assign_server_address(s);
1429 if (err != SRV_STATUS_OK) {
1430 conn_free(srv_conn);
1431 return SF_ERR_INTERNAL;
1432 }
1433 }
1434
1435 /* copy the target address into the connection */
1436 *srv_conn->dst = *s->target_addr;
1437
1438 /* Copy network namespace from client connection */
1439 srv_conn->proxy_netns = cli_conn ? cli_conn->proxy_netns : NULL;
1440
1441 if (!conn_xprt_ready(srv_conn) && !srv_conn->mux) {
1442 /* set the correct protocol on the output stream interface */
1443 if (srv)
1444 conn_prepare(srv_conn, protocol_by_family(srv_conn->dst->ss_family), srv->xprt);
1445 else if (obj_type(s->target) == OBJ_TYPE_PROXY) {
1446 /* proxies exclusively run on raw_sock right now */
1447 conn_prepare(srv_conn, protocol_by_family(srv_conn->dst->ss_family), xprt_get(XPRT_RAW));
1448 if (!(srv_conn->ctrl)) {
1449 conn_free(srv_conn);
1450 return SF_ERR_INTERNAL;
1451 }
1452 }
1453 else {
1454 conn_free(srv_conn);
1455 return SF_ERR_INTERNAL; /* how did we get there ? */
1456 }
1457
1458 #if defined(USE_OPENSSL) && defined(TLSEXT_TYPE_application_layer_protocol_negotiation)
1459 if (!srv ||
1460 (srv->use_ssl != 1 || (!(srv->ssl_ctx.alpn_str) && !(srv->ssl_ctx.npn_str)) ||
1461 srv->mux_proto || s->be->mode != PR_MODE_HTTP))
1462 #endif
1463 {
1464 srv_cs = objt_cs(s->si[1].end);
1465 if (!srv_cs || srv_cs->conn != srv_conn)
1466 srv_cs = si_alloc_cs(&s->si[1], srv_conn);
1467 if (!srv_cs) {
1468 conn_free(srv_conn);
1469 return SF_ERR_RESOURCE;
1470 }
1471 init_mux = 1;
1472 }
1473 #if defined(USE_OPENSSL) && defined(TLSEXT_TYPE_application_layer_protocol_negotiation)
1474 else {
1475 srv_conn->ctx = &s->si[1].end;
1476 /* Store the connection into the stream interface,
1477 * while we still don't have a mux, so that if the
1478 * stream is destroyed before the connection is
1479 * established, we have a chance to destroy it even
1480 * if it is no longer referenced in the session.
1481 */
1482 s->si[1].end = &srv_conn->obj_type;
1483 conn_set_xprt_done_cb(srv_conn, conn_complete_server);
1484 }
1485
1486 #endif
1487 /* process the case where the server requires the PROXY protocol to be sent */
1488 srv_conn->send_proxy_ofs = 0;
1489
1490 if (srv && srv->pp_opts) {
1491 srv_conn->flags |= CO_FL_PRIVATE;
1492 srv_conn->flags |= CO_FL_SEND_PROXY;
1493 srv_conn->send_proxy_ofs = 1; /* must compute size */
1494 if (cli_conn)
1495 conn_get_dst(cli_conn);
1496 }
1497
1498 assign_tproxy_address(s);
1499
1500 if (srv && (srv->flags & SRV_F_SOCKS4_PROXY)) {
1501 srv_conn->send_proxy_ofs = 1;
1502 srv_conn->flags |= CO_FL_SOCKS4;
1503 }
1504 }
1505 else if (!conn_xprt_ready(srv_conn)) {
1506 if (srv_conn->mux->reset)
1507 srv_conn->mux->reset(srv_conn);
1508 }
1509 else {
1510 /* Only consider we're doing reuse if the connection was
1511 * ready.
1512 */
1513 if (srv_conn->mux->ctl(srv_conn, MUX_STATUS, NULL) & MUX_STATUS_READY)
1514 s->flags |= SF_SRV_REUSED;
1515 }
1516
1517 /* flag for logging source ip/port */
1518 if (strm_fe(s)->options2 & PR_O2_SRC_ADDR)
1519 s->si[1].flags |= SI_FL_SRC_ADDR;
1520
1521 /* disable lingering */
1522 if (s->be->options & PR_O_TCP_NOLING)
1523 s->si[1].flags |= SI_FL_NOLINGER;
1524
1525 if (s->flags & SF_SRV_REUSED) {
1526 _HA_ATOMIC_ADD(&s->be->be_counters.reuse, 1);
1527 if (srv)
1528 _HA_ATOMIC_ADD(&srv->counters.reuse, 1);
1529 } else {
1530 _HA_ATOMIC_ADD(&s->be->be_counters.connect, 1);
1531 if (srv)
1532 _HA_ATOMIC_ADD(&srv->counters.connect, 1);
1533 }
1534
1535 err = si_connect(&s->si[1], srv_conn);
1536 if (err != SF_ERR_NONE)
1537 return err;
1538
1539 /* The CO_FL_SEND_PROXY flag may have been set by the connect method,
1540 * if so, add our handshake pseudo-XPRT now.
1541 */
1542 if ((srv_conn->flags & CO_FL_HANDSHAKE_NOSSL)) {
1543 if (xprt_add_hs(srv_conn) < 0) {
1544 conn_full_close(srv_conn);
1545 return SF_ERR_INTERNAL;
1546 }
1547 }
1548
1549 /* We have to defer the mux initialization until after si_connect()
1550 * has been called, as we need the xprt to have been properly
1551 * initialized, or any attempt to recv during the mux init may
1552 * fail, and flag the connection as CO_FL_ERROR.
1553 */
1554 if (init_mux) {
1555 if (conn_install_mux_be(srv_conn, srv_cs, s->sess) < 0) {
1556 conn_full_close(srv_conn);
1557 return SF_ERR_INTERNAL;
1558 }
1559 /* If we're doing http-reuse always, and the connection
1560 * is an http2 connection, add it to the available list,
1561 * so that others can use it right away.
1562 */
1563 if (srv && ((s->be->options & PR_O_REUSE_MASK) == PR_O_REUSE_ALWS) &&
1564 srv_conn->mux->avail_streams(srv_conn) > 0)
1565 LIST_ADD(&srv->idle_conns[tid], &srv_conn->list);
1566 }
1567
1568 #if USE_OPENSSL && (defined(OPENSSL_IS_BORINGSSL) || (HA_OPENSSL_VERSION_NUMBER >= 0x10101000L))
1569
1570 if (!reuse && cli_conn && srv && srv_conn->mux &&
1571 (srv->ssl_ctx.options & SRV_SSL_O_EARLY_DATA) &&
1572 /* Only attempt to use early data if either the client sent
1573 * early data, so that we know it can handle a 425, or if
1574 * we are allwoed to retry requests on early data failure, and
1575 * it's our first try
1576 */
1577 ((cli_conn->flags & CO_FL_EARLY_DATA) ||
1578 ((s->be->retry_type & PR_RE_EARLY_ERROR) &&
1579 s->si[1].conn_retries == s->be->conn_retries)) &&
1580 !channel_is_empty(si_oc(&s->si[1])) &&
1581 srv_conn->flags & CO_FL_SSL_WAIT_HS)
1582 srv_conn->flags &= ~(CO_FL_SSL_WAIT_HS | CO_FL_WAIT_L6_CONN);
1583 #endif
1584
1585 /* set connect timeout */
1586 s->si[1].exp = tick_add_ifset(now_ms, s->be->timeout.connect);
1587
1588 if (srv) {
1589 int count;
1590
1591 s->flags |= SF_CURR_SESS;
1592 count = _HA_ATOMIC_ADD(&srv->cur_sess, 1);
1593 HA_ATOMIC_UPDATE_MAX(&srv->counters.cur_sess_max, count);
1594 if (s->be->lbprm.server_take_conn) {
1595 HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
1596 s->be->lbprm.server_take_conn(srv);
1597 HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
1598 }
1599
1600 #ifdef USE_OPENSSL
1601 if (srv->ssl_ctx.sni) {
1602 struct sample *smp;
1603
1604 smp = sample_fetch_as_type(s->be, s->sess, s, SMP_OPT_DIR_REQ | SMP_OPT_FINAL,
1605 srv->ssl_ctx.sni, SMP_T_STR);
1606 if (smp_make_safe(smp)) {
1607 ssl_sock_set_servername(srv_conn,
1608 smp->data.u.str.area);
1609 srv_conn->flags |= CO_FL_PRIVATE;
1610 }
1611 }
1612 #endif /* USE_OPENSSL */
1613
1614 }
1615
1616 return SF_ERR_NONE; /* connection is OK */
1617 }
1618
1619
1620 /* This function performs the "redispatch" part of a connection attempt. It
1621 * will assign a server if required, queue the connection if required, and
1622 * handle errors that might arise at this level. It can change the server
1623 * state. It will return 1 if it encounters an error, switches the server
1624 * state, or has to queue a connection. Otherwise, it will return 0 indicating
1625 * that the connection is ready to use.
1626 */
1627
srv_redispatch_connect(struct stream * s)1628 int srv_redispatch_connect(struct stream *s)
1629 {
1630 struct server *srv;
1631 int conn_err;
1632
1633 /* We know that we don't have any connection pending, so we will
1634 * try to get a new one, and wait in this state if it's queued
1635 */
1636 redispatch:
1637 conn_err = assign_server_and_queue(s);
1638 srv = objt_server(s->target);
1639
1640 switch (conn_err) {
1641 case SRV_STATUS_OK:
1642 break;
1643
1644 case SRV_STATUS_FULL:
1645 /* The server has reached its maxqueue limit. Either PR_O_REDISP is set
1646 * and we can redispatch to another server, or it is not and we return
1647 * 503. This only makes sense in DIRECT mode however, because normal LB
1648 * algorithms would never select such a server, and hash algorithms
1649 * would bring us on the same server again. Note that s->target is set
1650 * in this case.
1651 */
1652 if (((s->flags & (SF_DIRECT|SF_FORCE_PRST)) == SF_DIRECT) &&
1653 (s->be->options & PR_O_REDISP)) {
1654 s->flags &= ~(SF_DIRECT | SF_ASSIGNED | SF_ADDR_SET);
1655 sockaddr_free(&s->target_addr);
1656 goto redispatch;
1657 }
1658
1659 if (!s->si[1].err_type) {
1660 s->si[1].err_type = SI_ET_QUEUE_ERR;
1661 }
1662
1663 _HA_ATOMIC_ADD(&srv->counters.failed_conns, 1);
1664 _HA_ATOMIC_ADD(&s->be->be_counters.failed_conns, 1);
1665 return 1;
1666
1667 case SRV_STATUS_NOSRV:
1668 /* note: it is guaranteed that srv == NULL here */
1669 if (!s->si[1].err_type) {
1670 s->si[1].err_type = SI_ET_CONN_ERR;
1671 }
1672
1673 _HA_ATOMIC_ADD(&s->be->be_counters.failed_conns, 1);
1674 return 1;
1675
1676 case SRV_STATUS_QUEUED:
1677 s->si[1].exp = tick_add_ifset(now_ms, s->be->timeout.queue);
1678 s->si[1].state = SI_ST_QUE;
1679 /* do nothing else and do not wake any other stream up */
1680 return 1;
1681
1682 case SRV_STATUS_INTERNAL:
1683 default:
1684 if (!s->si[1].err_type) {
1685 s->si[1].err_type = SI_ET_CONN_OTHER;
1686 }
1687
1688 if (srv)
1689 srv_inc_sess_ctr(srv);
1690 if (srv)
1691 srv_set_sess_last(srv);
1692 if (srv)
1693 _HA_ATOMIC_ADD(&srv->counters.failed_conns, 1);
1694 _HA_ATOMIC_ADD(&s->be->be_counters.failed_conns, 1);
1695
1696 /* release other streams waiting for this server */
1697 if (may_dequeue_tasks(srv, s->be))
1698 process_srv_queue(srv);
1699 return 1;
1700 }
1701 /* if we get here, it's because we got SRV_STATUS_OK, which also
1702 * means that the connection has not been queued.
1703 */
1704 return 0;
1705 }
1706
1707 /* sends a log message when a backend goes down, and also sets last
1708 * change date.
1709 */
set_backend_down(struct proxy * be)1710 void set_backend_down(struct proxy *be)
1711 {
1712 be->last_change = now.tv_sec;
1713 _HA_ATOMIC_ADD(&be->down_trans, 1);
1714
1715 if (!(global.mode & MODE_STARTING)) {
1716 ha_alert("%s '%s' has no server available!\n", proxy_type_str(be), be->id);
1717 send_log(be, LOG_EMERG, "%s %s has no server available!\n", proxy_type_str(be), be->id);
1718 }
1719 }
1720
1721 /* Apply RDP cookie persistence to the current stream. For this, the function
1722 * tries to extract an RDP cookie from the request buffer, and look for the
1723 * matching server in the list. If the server is found, it is assigned to the
1724 * stream. This always returns 1, and the analyser removes itself from the
1725 * list. Nothing is performed if a server was already assigned.
1726 */
tcp_persist_rdp_cookie(struct stream * s,struct channel * req,int an_bit)1727 int tcp_persist_rdp_cookie(struct stream *s, struct channel *req, int an_bit)
1728 {
1729 struct proxy *px = s->be;
1730 int ret;
1731 struct sample smp;
1732 struct server *srv = px->srv;
1733 uint16_t port;
1734 uint32_t addr;
1735 char *p;
1736
1737 DBG_TRACE_ENTER(STRM_EV_STRM_ANA|STRM_EV_TCP_ANA, s);
1738
1739 if (s->flags & SF_ASSIGNED)
1740 goto no_cookie;
1741
1742 memset(&smp, 0, sizeof(smp));
1743
1744 ret = fetch_rdp_cookie_name(s, &smp, s->be->rdp_cookie_name, s->be->rdp_cookie_len);
1745 if (ret == 0 || (smp.flags & SMP_F_MAY_CHANGE) || smp.data.u.str.data == 0)
1746 goto no_cookie;
1747
1748 /* Considering an rdp cookie detected using acl, str ended with <cr><lf> and should return.
1749 * The cookie format is <ip> "." <port> where "ip" is the integer corresponding to the
1750 * server's IP address in network order, and "port" is the integer corresponding to the
1751 * server's port in network order. Comments please Emeric.
1752 */
1753 addr = strtoul(smp.data.u.str.area, &p, 10);
1754 if (*p != '.')
1755 goto no_cookie;
1756 p++;
1757
1758 port = ntohs(strtoul(p, &p, 10));
1759 if (*p != '.')
1760 goto no_cookie;
1761
1762 s->target = NULL;
1763 while (srv) {
1764 if (srv->addr.ss_family == AF_INET &&
1765 port == srv->svc_port &&
1766 addr == ((struct sockaddr_in *)&srv->addr)->sin_addr.s_addr) {
1767 if ((srv->cur_state != SRV_ST_STOPPED) || (px->options & PR_O_PERSIST)) {
1768 /* we found the server and it is usable */
1769 s->flags |= SF_DIRECT | SF_ASSIGNED;
1770 s->target = &srv->obj_type;
1771 break;
1772 }
1773 }
1774 srv = srv->next;
1775 }
1776
1777 no_cookie:
1778 req->analysers &= ~an_bit;
1779 req->analyse_exp = TICK_ETERNITY;
1780 DBG_TRACE_LEAVE(STRM_EV_STRM_ANA|STRM_EV_TCP_ANA, s);
1781 return 1;
1782 }
1783
be_downtime(struct proxy * px)1784 int be_downtime(struct proxy *px) {
1785 if (px->lbprm.tot_weight && px->last_change < now.tv_sec) // ignore negative time
1786 return px->down_time;
1787
1788 return now.tv_sec - px->last_change + px->down_time;
1789 }
1790
1791 /*
1792 * This function returns a string containing the balancing
1793 * mode of the proxy in a format suitable for stats.
1794 */
1795
backend_lb_algo_str(int algo)1796 const char *backend_lb_algo_str(int algo) {
1797
1798 if (algo == BE_LB_ALGO_RR)
1799 return "roundrobin";
1800 else if (algo == BE_LB_ALGO_SRR)
1801 return "static-rr";
1802 else if (algo == BE_LB_ALGO_FAS)
1803 return "first";
1804 else if (algo == BE_LB_ALGO_LC)
1805 return "leastconn";
1806 else if (algo == BE_LB_ALGO_SH)
1807 return "source";
1808 else if (algo == BE_LB_ALGO_UH)
1809 return "uri";
1810 else if (algo == BE_LB_ALGO_PH)
1811 return "url_param";
1812 else if (algo == BE_LB_ALGO_HH)
1813 return "hdr";
1814 else if (algo == BE_LB_ALGO_RCH)
1815 return "rdp-cookie";
1816 else if (algo == BE_LB_ALGO_NONE)
1817 return "none";
1818 else
1819 return "unknown";
1820 }
1821
1822 /* This function parses a "balance" statement in a backend section describing
1823 * <curproxy>. It returns -1 if there is any error, otherwise zero. If it
1824 * returns -1, it will write an error message into the <err> buffer which will
1825 * automatically be allocated and must be passed as NULL. The trailing '\n'
1826 * will not be written. The function must be called with <args> pointing to the
1827 * first word after "balance".
1828 */
backend_parse_balance(const char ** args,char ** err,struct proxy * curproxy)1829 int backend_parse_balance(const char **args, char **err, struct proxy *curproxy)
1830 {
1831 if (!*(args[0])) {
1832 /* if no option is set, use round-robin by default */
1833 curproxy->lbprm.algo &= ~BE_LB_ALGO;
1834 curproxy->lbprm.algo |= BE_LB_ALGO_RR;
1835 return 0;
1836 }
1837
1838 if (!strcmp(args[0], "roundrobin")) {
1839 curproxy->lbprm.algo &= ~BE_LB_ALGO;
1840 curproxy->lbprm.algo |= BE_LB_ALGO_RR;
1841 }
1842 else if (!strcmp(args[0], "static-rr")) {
1843 curproxy->lbprm.algo &= ~BE_LB_ALGO;
1844 curproxy->lbprm.algo |= BE_LB_ALGO_SRR;
1845 }
1846 else if (!strcmp(args[0], "first")) {
1847 curproxy->lbprm.algo &= ~BE_LB_ALGO;
1848 curproxy->lbprm.algo |= BE_LB_ALGO_FAS;
1849 }
1850 else if (!strcmp(args[0], "leastconn")) {
1851 curproxy->lbprm.algo &= ~BE_LB_ALGO;
1852 curproxy->lbprm.algo |= BE_LB_ALGO_LC;
1853 }
1854 else if (!strncmp(args[0], "random", 6)) {
1855 curproxy->lbprm.algo &= ~BE_LB_ALGO;
1856 curproxy->lbprm.algo |= BE_LB_ALGO_RND;
1857 curproxy->lbprm.arg_opt1 = 2;
1858
1859 if (*(args[0] + 6) == '(' && *(args[0] + 7) != ')') { /* number of draws */
1860 const char *beg;
1861 char *end;
1862
1863 beg = args[0] + 7;
1864 curproxy->lbprm.arg_opt1 = strtol(beg, &end, 0);
1865
1866 if (*end != ')') {
1867 if (!*end)
1868 memprintf(err, "random : missing closing parenthesis.");
1869 else
1870 memprintf(err, "random : unexpected character '%c' after argument.", *end);
1871 return -1;
1872 }
1873
1874 if (curproxy->lbprm.arg_opt1 < 1) {
1875 memprintf(err, "random : number of draws must be at least 1.");
1876 return -1;
1877 }
1878 }
1879 }
1880 else if (!strcmp(args[0], "source")) {
1881 curproxy->lbprm.algo &= ~BE_LB_ALGO;
1882 curproxy->lbprm.algo |= BE_LB_ALGO_SH;
1883 }
1884 else if (!strcmp(args[0], "uri")) {
1885 int arg = 1;
1886
1887 curproxy->lbprm.algo &= ~BE_LB_ALGO;
1888 curproxy->lbprm.algo |= BE_LB_ALGO_UH;
1889 curproxy->lbprm.arg_opt1 = 0; // "whole", "path-only"
1890 curproxy->lbprm.arg_opt2 = 0; // "len"
1891 curproxy->lbprm.arg_opt3 = 0; // "depth"
1892
1893 while (*args[arg]) {
1894 if (!strcmp(args[arg], "len")) {
1895 if (!*args[arg+1] || (atoi(args[arg+1]) <= 0)) {
1896 memprintf(err, "%s : '%s' expects a positive integer (got '%s').", args[0], args[arg], args[arg+1]);
1897 return -1;
1898 }
1899 curproxy->lbprm.arg_opt2 = atoi(args[arg+1]);
1900 arg += 2;
1901 }
1902 else if (!strcmp(args[arg], "depth")) {
1903 if (!*args[arg+1] || (atoi(args[arg+1]) <= 0)) {
1904 memprintf(err, "%s : '%s' expects a positive integer (got '%s').", args[0], args[arg], args[arg+1]);
1905 return -1;
1906 }
1907 /* hint: we store the position of the ending '/' (depth+1) so
1908 * that we avoid a comparison while computing the hash.
1909 */
1910 curproxy->lbprm.arg_opt3 = atoi(args[arg+1]) + 1;
1911 arg += 2;
1912 }
1913 else if (!strcmp(args[arg], "whole")) {
1914 curproxy->lbprm.arg_opt1 |= 1;
1915 arg += 1;
1916 }
1917 else if (!strcmp(args[arg], "path-only")) {
1918 curproxy->lbprm.arg_opt1 |= 2;
1919 arg += 1;
1920 }
1921 else {
1922 memprintf(err, "%s only accepts parameters 'len', 'depth', 'path-only', and 'whole' (got '%s').", args[0], args[arg]);
1923 return -1;
1924 }
1925 }
1926 }
1927 else if (!strcmp(args[0], "url_param")) {
1928 if (!*args[1]) {
1929 memprintf(err, "%s requires an URL parameter name.", args[0]);
1930 return -1;
1931 }
1932 curproxy->lbprm.algo &= ~BE_LB_ALGO;
1933 curproxy->lbprm.algo |= BE_LB_ALGO_PH;
1934
1935 free(curproxy->lbprm.arg_str);
1936 curproxy->lbprm.arg_str = strdup(args[1]);
1937 curproxy->lbprm.arg_len = strlen(args[1]);
1938 if (*args[2]) {
1939 if (strcmp(args[2], "check_post")) {
1940 memprintf(err, "%s only accepts 'check_post' modifier (got '%s').", args[0], args[2]);
1941 return -1;
1942 }
1943 }
1944 }
1945 else if (!strncmp(args[0], "hdr(", 4)) {
1946 const char *beg, *end;
1947
1948 beg = args[0] + 4;
1949 end = strchr(beg, ')');
1950
1951 if (!end || end == beg) {
1952 memprintf(err, "hdr requires an http header field name.");
1953 return -1;
1954 }
1955
1956 curproxy->lbprm.algo &= ~BE_LB_ALGO;
1957 curproxy->lbprm.algo |= BE_LB_ALGO_HH;
1958
1959 free(curproxy->lbprm.arg_str);
1960 curproxy->lbprm.arg_len = end - beg;
1961 curproxy->lbprm.arg_str = my_strndup(beg, end - beg);
1962 curproxy->lbprm.arg_opt1 = 0;
1963
1964 if (*args[1]) {
1965 if (strcmp(args[1], "use_domain_only")) {
1966 memprintf(err, "%s only accepts 'use_domain_only' modifier (got '%s').", args[0], args[1]);
1967 return -1;
1968 }
1969 curproxy->lbprm.arg_opt1 = 1;
1970 }
1971 }
1972 else if (!strncmp(args[0], "rdp-cookie", 10)) {
1973 curproxy->lbprm.algo &= ~BE_LB_ALGO;
1974 curproxy->lbprm.algo |= BE_LB_ALGO_RCH;
1975
1976 if ( *(args[0] + 10 ) == '(' ) { /* cookie name */
1977 const char *beg, *end;
1978
1979 beg = args[0] + 11;
1980 end = strchr(beg, ')');
1981
1982 if (!end || end == beg) {
1983 memprintf(err, "rdp-cookie : missing cookie name.");
1984 return -1;
1985 }
1986
1987 free(curproxy->lbprm.arg_str);
1988 curproxy->lbprm.arg_str = my_strndup(beg, end - beg);
1989 curproxy->lbprm.arg_len = end - beg;
1990 }
1991 else if ( *(args[0] + 10 ) == '\0' ) { /* default cookie name 'mstshash' */
1992 free(curproxy->lbprm.arg_str);
1993 curproxy->lbprm.arg_str = strdup("mstshash");
1994 curproxy->lbprm.arg_len = strlen(curproxy->lbprm.arg_str);
1995 }
1996 else { /* syntax */
1997 memprintf(err, "rdp-cookie : missing cookie name.");
1998 return -1;
1999 }
2000 }
2001 else {
2002 memprintf(err, "only supports 'roundrobin', 'static-rr', 'leastconn', 'source', 'uri', 'url_param', 'hdr(name)' and 'rdp-cookie(name)' options.");
2003 return -1;
2004 }
2005 return 0;
2006 }
2007
2008
2009 /************************************************************************/
2010 /* All supported sample and ACL keywords must be declared here. */
2011 /************************************************************************/
2012
2013 /* set temp integer to the number of enabled servers on the proxy.
2014 * Accepts exactly 1 argument. Argument is a backend, other types will lead to
2015 * undefined behaviour.
2016 */
2017 static int
smp_fetch_nbsrv(const struct arg * args,struct sample * smp,const char * kw,void * private)2018 smp_fetch_nbsrv(const struct arg *args, struct sample *smp, const char *kw, void *private)
2019 {
2020 struct proxy *px;
2021
2022 smp->flags = SMP_F_VOL_TEST;
2023 smp->data.type = SMP_T_SINT;
2024 px = args->data.prx;
2025
2026 smp->data.u.sint = be_usable_srv(px);
2027
2028 return 1;
2029 }
2030
2031 /* report in smp->flags a success or failure depending on the designated
2032 * server's state. There is no match function involved since there's no pattern.
2033 * Accepts exactly 1 argument. Argument is a server, other types will lead to
2034 * undefined behaviour.
2035 */
2036 static int
smp_fetch_srv_is_up(const struct arg * args,struct sample * smp,const char * kw,void * private)2037 smp_fetch_srv_is_up(const struct arg *args, struct sample *smp, const char *kw, void *private)
2038 {
2039 struct server *srv = args->data.srv;
2040
2041 smp->flags = SMP_F_VOL_TEST;
2042 smp->data.type = SMP_T_BOOL;
2043 if (!(srv->cur_admin & SRV_ADMF_MAINT) &&
2044 (!(srv->check.state & CHK_ST_CONFIGURED) || (srv->cur_state != SRV_ST_STOPPED)))
2045 smp->data.u.sint = 1;
2046 else
2047 smp->data.u.sint = 0;
2048 return 1;
2049 }
2050
2051 /* set temp integer to the number of enabled servers on the proxy.
2052 * Accepts exactly 1 argument. Argument is a backend, other types will lead to
2053 * undefined behaviour.
2054 */
2055 static int
smp_fetch_connslots(const struct arg * args,struct sample * smp,const char * kw,void * private)2056 smp_fetch_connslots(const struct arg *args, struct sample *smp, const char *kw, void *private)
2057 {
2058 struct server *iterator;
2059
2060 smp->flags = SMP_F_VOL_TEST;
2061 smp->data.type = SMP_T_SINT;
2062 smp->data.u.sint = 0;
2063
2064 for (iterator = args->data.prx->srv; iterator; iterator = iterator->next) {
2065 if (iterator->cur_state == SRV_ST_STOPPED)
2066 continue;
2067
2068 if (iterator->maxconn == 0 || iterator->maxqueue == 0) {
2069 /* configuration is stupid */
2070 smp->data.u.sint = -1; /* FIXME: stupid value! */
2071 return 1;
2072 }
2073
2074 smp->data.u.sint += (iterator->maxconn - iterator->cur_sess)
2075 + (iterator->maxqueue - iterator->nbpend);
2076 }
2077
2078 return 1;
2079 }
2080
2081 /* set temp integer to the id of the backend */
2082 static int
smp_fetch_be_id(const struct arg * args,struct sample * smp,const char * kw,void * private)2083 smp_fetch_be_id(const struct arg *args, struct sample *smp, const char *kw, void *private)
2084 {
2085 if (!smp->strm)
2086 return 0;
2087
2088 smp->flags = SMP_F_VOL_TXN;
2089 smp->data.type = SMP_T_SINT;
2090 smp->data.u.sint = smp->strm->be->uuid;
2091 return 1;
2092 }
2093
2094 /* set string to the name of the backend */
2095 static int
smp_fetch_be_name(const struct arg * args,struct sample * smp,const char * kw,void * private)2096 smp_fetch_be_name(const struct arg *args, struct sample *smp, const char *kw, void *private)
2097 {
2098 if (!smp->strm)
2099 return 0;
2100
2101 smp->data.u.str.area = (char *)smp->strm->be->id;
2102 if (!smp->data.u.str.area)
2103 return 0;
2104
2105 smp->data.type = SMP_T_STR;
2106 smp->flags = SMP_F_CONST;
2107 smp->data.u.str.data = strlen(smp->data.u.str.area);
2108
2109 return 1;
2110 }
2111
2112 /* set temp integer to the id of the server */
2113 static int
smp_fetch_srv_id(const struct arg * args,struct sample * smp,const char * kw,void * private)2114 smp_fetch_srv_id(const struct arg *args, struct sample *smp, const char *kw, void *private)
2115 {
2116 if (!smp->strm)
2117 return 0;
2118
2119 if (!objt_server(smp->strm->target))
2120 return 0;
2121
2122 smp->data.type = SMP_T_SINT;
2123 smp->data.u.sint = __objt_server(smp->strm->target)->puid;
2124
2125 return 1;
2126 }
2127
2128 /* set string to the name of the server */
2129 static int
smp_fetch_srv_name(const struct arg * args,struct sample * smp,const char * kw,void * private)2130 smp_fetch_srv_name(const struct arg *args, struct sample *smp, const char *kw, void *private)
2131 {
2132 if (!smp->strm)
2133 return 0;
2134
2135 if (!objt_server(smp->strm->target))
2136 return 0;
2137
2138 smp->data.u.str.area = (char *)__objt_server(smp->strm->target)->id;
2139 if (!smp->data.u.str.area)
2140 return 0;
2141
2142 smp->data.type = SMP_T_STR;
2143 smp->data.u.str.data = strlen(smp->data.u.str.area);
2144
2145 return 1;
2146 }
2147
2148 /* set temp integer to the number of connections per second reaching the backend.
2149 * Accepts exactly 1 argument. Argument is a backend, other types will lead to
2150 * undefined behaviour.
2151 */
2152 static int
smp_fetch_be_sess_rate(const struct arg * args,struct sample * smp,const char * kw,void * private)2153 smp_fetch_be_sess_rate(const struct arg *args, struct sample *smp, const char *kw, void *private)
2154 {
2155 smp->flags = SMP_F_VOL_TEST;
2156 smp->data.type = SMP_T_SINT;
2157 smp->data.u.sint = read_freq_ctr(&args->data.prx->be_sess_per_sec);
2158 return 1;
2159 }
2160
2161 /* set temp integer to the number of concurrent connections on the backend.
2162 * Accepts exactly 1 argument. Argument is a backend, other types will lead to
2163 * undefined behaviour.
2164 */
2165 static int
smp_fetch_be_conn(const struct arg * args,struct sample * smp,const char * kw,void * private)2166 smp_fetch_be_conn(const struct arg *args, struct sample *smp, const char *kw, void *private)
2167 {
2168 smp->flags = SMP_F_VOL_TEST;
2169 smp->data.type = SMP_T_SINT;
2170 smp->data.u.sint = args->data.prx->beconn;
2171 return 1;
2172 }
2173
2174 /* set temp integer to the number of available connections across available
2175 * servers on the backend.
2176 * Accepts exactly 1 argument. Argument is a backend, other types will lead to
2177 * undefined behaviour.
2178 */
2179 static int
smp_fetch_be_conn_free(const struct arg * args,struct sample * smp,const char * kw,void * private)2180 smp_fetch_be_conn_free(const struct arg *args, struct sample *smp, const char *kw, void *private)
2181 {
2182 struct server *iterator;
2183 struct proxy *px;
2184 unsigned int maxconn;
2185
2186 smp->flags = SMP_F_VOL_TEST;
2187 smp->data.type = SMP_T_SINT;
2188 smp->data.u.sint = 0;
2189
2190 for (iterator = args->data.prx->srv; iterator; iterator = iterator->next) {
2191 if (iterator->cur_state == SRV_ST_STOPPED)
2192 continue;
2193
2194 px = iterator->proxy;
2195 if (!srv_currently_usable(iterator) ||
2196 ((iterator->flags & SRV_F_BACKUP) &&
2197 (px->srv_act || (iterator != px->lbprm.fbck && !(px->options & PR_O_USE_ALL_BK)))))
2198 continue;
2199
2200 if (iterator->maxconn == 0) {
2201 /* one active server is unlimited, return -1 */
2202 smp->data.u.sint = -1;
2203 return 1;
2204 }
2205
2206 maxconn = srv_dynamic_maxconn(iterator);
2207 if (maxconn > iterator->cur_sess)
2208 smp->data.u.sint += maxconn - iterator->cur_sess;
2209 }
2210
2211 return 1;
2212 }
2213
2214 /* set temp integer to the total number of queued connections on the backend.
2215 * Accepts exactly 1 argument. Argument is a backend, other types will lead to
2216 * undefined behaviour.
2217 */
2218 static int
smp_fetch_queue_size(const struct arg * args,struct sample * smp,const char * kw,void * private)2219 smp_fetch_queue_size(const struct arg *args, struct sample *smp, const char *kw, void *private)
2220 {
2221 smp->flags = SMP_F_VOL_TEST;
2222 smp->data.type = SMP_T_SINT;
2223 smp->data.u.sint = args->data.prx->totpend;
2224 return 1;
2225 }
2226
2227 /* set temp integer to the total number of queued connections on the backend divided
2228 * by the number of running servers and rounded up. If there is no running
2229 * server, we return twice the total, just as if we had half a running server.
2230 * This is more or less correct anyway, since we expect the last server to come
2231 * back soon.
2232 * Accepts exactly 1 argument. Argument is a backend, other types will lead to
2233 * undefined behaviour.
2234 */
2235 static int
smp_fetch_avg_queue_size(const struct arg * args,struct sample * smp,const char * kw,void * private)2236 smp_fetch_avg_queue_size(const struct arg *args, struct sample *smp, const char *kw, void *private)
2237 {
2238 int nbsrv;
2239 struct proxy *px;
2240
2241 smp->flags = SMP_F_VOL_TEST;
2242 smp->data.type = SMP_T_SINT;
2243 px = args->data.prx;
2244
2245 nbsrv = be_usable_srv(px);
2246
2247 if (nbsrv > 0)
2248 smp->data.u.sint = (px->totpend + nbsrv - 1) / nbsrv;
2249 else
2250 smp->data.u.sint = px->totpend * 2;
2251
2252 return 1;
2253 }
2254
2255 /* set temp integer to the number of concurrent connections on the server in the backend.
2256 * Accepts exactly 1 argument. Argument is a server, other types will lead to
2257 * undefined behaviour.
2258 */
2259 static int
smp_fetch_srv_conn(const struct arg * args,struct sample * smp,const char * kw,void * private)2260 smp_fetch_srv_conn(const struct arg *args, struct sample *smp, const char *kw, void *private)
2261 {
2262 smp->flags = SMP_F_VOL_TEST;
2263 smp->data.type = SMP_T_SINT;
2264 smp->data.u.sint = args->data.srv->cur_sess;
2265 return 1;
2266 }
2267
2268 /* set temp integer to the number of available connections on the server in the backend.
2269 * Accepts exactly 1 argument. Argument is a server, other types will lead to
2270 * undefined behaviour.
2271 */
2272 static int
smp_fetch_srv_conn_free(const struct arg * args,struct sample * smp,const char * kw,void * private)2273 smp_fetch_srv_conn_free(const struct arg *args, struct sample *smp, const char *kw, void *private)
2274 {
2275 unsigned int maxconn;
2276
2277 smp->flags = SMP_F_VOL_TEST;
2278 smp->data.type = SMP_T_SINT;
2279
2280 if (args->data.srv->maxconn == 0) {
2281 /* one active server is unlimited, return -1 */
2282 smp->data.u.sint = -1;
2283 return 1;
2284 }
2285
2286 maxconn = srv_dynamic_maxconn(args->data.srv);
2287 if (maxconn > args->data.srv->cur_sess)
2288 smp->data.u.sint = maxconn - args->data.srv->cur_sess;
2289 else
2290 smp->data.u.sint = 0;
2291
2292 return 1;
2293 }
2294
2295 /* set temp integer to the number of connections pending in the server's queue.
2296 * Accepts exactly 1 argument. Argument is a server, other types will lead to
2297 * undefined behaviour.
2298 */
2299 static int
smp_fetch_srv_queue(const struct arg * args,struct sample * smp,const char * kw,void * private)2300 smp_fetch_srv_queue(const struct arg *args, struct sample *smp, const char *kw, void *private)
2301 {
2302 smp->flags = SMP_F_VOL_TEST;
2303 smp->data.type = SMP_T_SINT;
2304 smp->data.u.sint = args->data.srv->nbpend;
2305 return 1;
2306 }
2307
2308 /* set temp integer to the number of enabled servers on the proxy.
2309 * Accepts exactly 1 argument. Argument is a server, other types will lead to
2310 * undefined behaviour.
2311 */
2312 static int
smp_fetch_srv_sess_rate(const struct arg * args,struct sample * smp,const char * kw,void * private)2313 smp_fetch_srv_sess_rate(const struct arg *args, struct sample *smp, const char *kw, void *private)
2314 {
2315 smp->flags = SMP_F_VOL_TEST;
2316 smp->data.type = SMP_T_SINT;
2317 smp->data.u.sint = read_freq_ctr(&args->data.srv->sess_per_sec);
2318 return 1;
2319 }
2320
sample_conv_nbsrv(const struct arg * args,struct sample * smp,void * private)2321 static int sample_conv_nbsrv(const struct arg *args, struct sample *smp, void *private)
2322 {
2323
2324 struct proxy *px;
2325
2326 if (!smp_make_safe(smp))
2327 return 0;
2328
2329 px = proxy_find_by_name(smp->data.u.str.area, PR_CAP_BE, 0);
2330 if (!px)
2331 return 0;
2332
2333 smp->data.type = SMP_T_SINT;
2334 smp->data.u.sint = be_usable_srv(px);
2335
2336 return 1;
2337 }
2338
2339 static int
sample_conv_srv_queue(const struct arg * args,struct sample * smp,void * private)2340 sample_conv_srv_queue(const struct arg *args, struct sample *smp, void *private)
2341 {
2342 struct proxy *px;
2343 struct server *srv;
2344 char *bksep;
2345
2346 if (!smp_make_safe(smp))
2347 return 0;
2348
2349 bksep = strchr(smp->data.u.str.area, '/');
2350
2351 if (bksep) {
2352 *bksep = '\0';
2353 px = proxy_find_by_name(smp->data.u.str.area, PR_CAP_BE, 0);
2354 if (!px)
2355 return 0;
2356 smp->data.u.str.area = bksep + 1;
2357 } else {
2358 if (!(smp->px->cap & PR_CAP_BE))
2359 return 0;
2360 px = smp->px;
2361 }
2362
2363 srv = server_find_by_name(px, smp->data.u.str.area);
2364 if (!srv)
2365 return 0;
2366
2367 smp->data.type = SMP_T_SINT;
2368 smp->data.u.sint = srv->nbpend;
2369 return 1;
2370 }
2371
2372 /* Note: must not be declared <const> as its list will be overwritten.
2373 * Please take care of keeping this list alphabetically sorted.
2374 */
2375 static struct sample_fetch_kw_list smp_kws = {ILH, {
2376 { "avg_queue", smp_fetch_avg_queue_size, ARG1(1,BE), NULL, SMP_T_SINT, SMP_USE_INTRN, },
2377 { "be_conn", smp_fetch_be_conn, ARG1(1,BE), NULL, SMP_T_SINT, SMP_USE_INTRN, },
2378 { "be_conn_free", smp_fetch_be_conn_free, ARG1(1,BE), NULL, SMP_T_SINT, SMP_USE_INTRN, },
2379 { "be_id", smp_fetch_be_id, 0, NULL, SMP_T_SINT, SMP_USE_BKEND, },
2380 { "be_name", smp_fetch_be_name, 0, NULL, SMP_T_STR, SMP_USE_BKEND, },
2381 { "be_sess_rate", smp_fetch_be_sess_rate, ARG1(1,BE), NULL, SMP_T_SINT, SMP_USE_INTRN, },
2382 { "connslots", smp_fetch_connslots, ARG1(1,BE), NULL, SMP_T_SINT, SMP_USE_INTRN, },
2383 { "nbsrv", smp_fetch_nbsrv, ARG1(1,BE), NULL, SMP_T_SINT, SMP_USE_INTRN, },
2384 { "queue", smp_fetch_queue_size, ARG1(1,BE), NULL, SMP_T_SINT, SMP_USE_INTRN, },
2385 { "srv_conn", smp_fetch_srv_conn, ARG1(1,SRV), NULL, SMP_T_SINT, SMP_USE_INTRN, },
2386 { "srv_conn_free", smp_fetch_srv_conn_free, ARG1(1,SRV), NULL, SMP_T_SINT, SMP_USE_INTRN, },
2387 { "srv_id", smp_fetch_srv_id, 0, NULL, SMP_T_SINT, SMP_USE_SERVR, },
2388 { "srv_is_up", smp_fetch_srv_is_up, ARG1(1,SRV), NULL, SMP_T_BOOL, SMP_USE_INTRN, },
2389 { "srv_name", smp_fetch_srv_name, 0, NULL, SMP_T_STR, SMP_USE_SERVR, },
2390 { "srv_queue", smp_fetch_srv_queue, ARG1(1,SRV), NULL, SMP_T_SINT, SMP_USE_INTRN, },
2391 { "srv_sess_rate", smp_fetch_srv_sess_rate, ARG1(1,SRV), NULL, SMP_T_SINT, SMP_USE_INTRN, },
2392 { /* END */ },
2393 }};
2394
2395 INITCALL1(STG_REGISTER, sample_register_fetches, &smp_kws);
2396
2397 /* Note: must not be declared <const> as its list will be overwritten */
2398 static struct sample_conv_kw_list sample_conv_kws = {ILH, {
2399 { "nbsrv", sample_conv_nbsrv, 0, NULL, SMP_T_STR, SMP_T_SINT },
2400 { "srv_queue", sample_conv_srv_queue, 0, NULL, SMP_T_STR, SMP_T_SINT },
2401 { /* END */ },
2402 }};
2403
2404 INITCALL1(STG_REGISTER, sample_register_convs, &sample_conv_kws);
2405
2406 /* Note: must not be declared <const> as its list will be overwritten.
2407 * Please take care of keeping this list alphabetically sorted.
2408 */
2409 static struct acl_kw_list acl_kws = {ILH, {
2410 { /* END */ },
2411 }};
2412
2413 INITCALL1(STG_REGISTER, acl_register_keywords, &acl_kws);
2414
2415 /*
2416 * Local variables:
2417 * c-indent-level: 8
2418 * c-basic-offset: 8
2419 * End:
2420 */
2421