1 /*-
2 * Copyright (c) 2015 Varnish Software AS
3 * All rights reserved.
4 *
5 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
6 *
7 * SPDX-License-Identifier: BSD-2-Clause
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * (TCP|UDS) connection pools.
31 *
32 */
33
34 #include "config.h"
35
36 #include <stdlib.h>
37
38 #include "cache_varnishd.h"
39
40 #include "vend.h"
41 #include "vsa.h"
42 #include "vsha256.h"
43 #include "vtcp.h"
44 #include "vus.h"
45 #include "vtim.h"
46 #include "waiter/waiter.h"
47
48 #include "cache_conn_pool.h"
49 #include "cache_pool.h"
50
51 struct conn_pool;
52
53 /*--------------------------------------------------------------------
54 */
55
56 struct pfd {
57 unsigned magic;
58 #define PFD_MAGIC 0x0c5e6593
59 int fd;
60 VTAILQ_ENTRY(pfd) list;
61 VCL_IP addr;
62 uint8_t state;
63 struct waited waited[1];
64 struct conn_pool *conn_pool;
65
66 pthread_cond_t *cond;
67 };
68
69 /*--------------------------------------------------------------------
70 */
71
72 typedef int cp_open_f(const struct conn_pool *, vtim_dur tmo, VCL_IP *ap);
73 typedef void cp_close_f(struct pfd *);
74 typedef void cp_name_f(const struct pfd *, char *, unsigned, char *, unsigned);
75
76 struct cp_methods {
77 cp_open_f *open;
78 cp_close_f *close;
79 cp_name_f *local_name;
80 cp_name_f *remote_name;
81 };
82
83 struct conn_pool {
84 unsigned magic;
85 #define CONN_POOL_MAGIC 0x85099bc3
86
87 const struct cp_methods *methods;
88
89 struct vrt_endpoint *endpoint;
90 char ident[VSHA256_DIGEST_LENGTH];
91
92 VTAILQ_ENTRY(conn_pool) list;
93 int refcnt;
94 struct lock mtx;
95
96 VTAILQ_HEAD(, pfd) connlist;
97 int n_conn;
98
99 VTAILQ_HEAD(, pfd) killlist;
100 int n_kill;
101
102 int n_used;
103
104 vtim_mono holddown;
105 int holddown_errno;
106 };
107
108 static struct lock conn_pools_mtx;
109
110 static VTAILQ_HEAD(, conn_pool) conn_pools =
111 VTAILQ_HEAD_INITIALIZER(conn_pools);
112
113
114 /*--------------------------------------------------------------------
115 */
116
117 unsigned
PFD_State(const struct pfd * p)118 PFD_State(const struct pfd *p)
119 {
120 CHECK_OBJ_NOTNULL(p, PFD_MAGIC);
121 return (p->state);
122 }
123
124 int *
PFD_Fd(struct pfd * p)125 PFD_Fd(struct pfd *p)
126 {
127 CHECK_OBJ_NOTNULL(p, PFD_MAGIC);
128 return (&(p->fd));
129 }
130
131 void
PFD_LocalName(const struct pfd * p,char * abuf,unsigned alen,char * pbuf,unsigned plen)132 PFD_LocalName(const struct pfd *p, char *abuf, unsigned alen, char *pbuf,
133 unsigned plen)
134 {
135 CHECK_OBJ_NOTNULL(p, PFD_MAGIC);
136 CHECK_OBJ_NOTNULL(p->conn_pool, CONN_POOL_MAGIC);
137 p->conn_pool->methods->local_name(p, abuf, alen, pbuf, plen);
138 }
139
140 void
PFD_RemoteName(const struct pfd * p,char * abuf,unsigned alen,char * pbuf,unsigned plen)141 PFD_RemoteName(const struct pfd *p, char *abuf, unsigned alen, char *pbuf,
142 unsigned plen)
143 {
144 CHECK_OBJ_NOTNULL(p, PFD_MAGIC);
145 CHECK_OBJ_NOTNULL(p->conn_pool, CONN_POOL_MAGIC);
146 p->conn_pool->methods->remote_name(p, abuf, alen, pbuf, plen);
147 }
148
149 /*--------------------------------------------------------------------
150 * Waiter-handler
151 */
152
v_matchproto_(waiter_handle_f)153 static void v_matchproto_(waiter_handle_f)
154 vcp_handle(struct waited *w, enum wait_event ev, vtim_real now)
155 {
156 struct pfd *pfd;
157 struct conn_pool *cp;
158
159 CAST_OBJ_NOTNULL(pfd, w->priv1, PFD_MAGIC);
160 (void)ev;
161 (void)now;
162 CHECK_OBJ_NOTNULL(pfd->conn_pool, CONN_POOL_MAGIC);
163 cp = pfd->conn_pool;
164
165 Lck_Lock(&cp->mtx);
166
167 switch (pfd->state) {
168 case PFD_STATE_STOLEN:
169 pfd->state = PFD_STATE_USED;
170 VTAILQ_REMOVE(&cp->connlist, pfd, list);
171 AN(pfd->cond);
172 AZ(pthread_cond_signal(pfd->cond));
173 break;
174 case PFD_STATE_AVAIL:
175 cp->methods->close(pfd);
176 VTAILQ_REMOVE(&cp->connlist, pfd, list);
177 cp->n_conn--;
178 FREE_OBJ(pfd);
179 break;
180 case PFD_STATE_CLEANUP:
181 cp->methods->close(pfd);
182 cp->n_kill--;
183 VTAILQ_REMOVE(&cp->killlist, pfd, list);
184 memset(pfd, 0x11, sizeof *pfd);
185 free(pfd);
186 break;
187 default:
188 WRONG("Wrong pfd state");
189 }
190 Lck_Unlock(&cp->mtx);
191 }
192
193
194 /*--------------------------------------------------------------------
195 */
196
197 void
VCP_AddRef(struct conn_pool * cp)198 VCP_AddRef(struct conn_pool *cp)
199 {
200 CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
201
202 Lck_Lock(&conn_pools_mtx);
203 assert(cp->refcnt > 0);
204 cp->refcnt++;
205 Lck_Unlock(&conn_pools_mtx);
206 }
207
208 /*--------------------------------------------------------------------
209 * Release Conn pool, destroy if last reference.
210 */
211
212 void
VCP_Rel(struct conn_pool ** cpp)213 VCP_Rel(struct conn_pool **cpp)
214 {
215 struct conn_pool *cp;
216 struct pfd *pfd, *pfd2;
217
218 TAKE_OBJ_NOTNULL(cp, cpp, CONN_POOL_MAGIC);
219
220 Lck_Lock(&conn_pools_mtx);
221 assert(cp->refcnt > 0);
222 if (--cp->refcnt > 0) {
223 Lck_Unlock(&conn_pools_mtx);
224 return;
225 }
226 AZ(cp->n_used);
227 VTAILQ_REMOVE(&conn_pools, cp, list);
228 Lck_Unlock(&conn_pools_mtx);
229
230 Lck_Lock(&cp->mtx);
231 VTAILQ_FOREACH_SAFE(pfd, &cp->connlist, list, pfd2) {
232 VTAILQ_REMOVE(&cp->connlist, pfd, list);
233 cp->n_conn--;
234 assert(pfd->state == PFD_STATE_AVAIL);
235 pfd->state = PFD_STATE_CLEANUP;
236 (void)shutdown(pfd->fd, SHUT_WR);
237 VTAILQ_INSERT_TAIL(&cp->killlist, pfd, list);
238 cp->n_kill++;
239 }
240 while (cp->n_kill) {
241 Lck_Unlock(&cp->mtx);
242 (void)usleep(20000);
243 Lck_Lock(&cp->mtx);
244 }
245 Lck_Unlock(&cp->mtx);
246 Lck_Delete(&cp->mtx);
247 AZ(cp->n_conn);
248 AZ(cp->n_kill);
249 free(cp->endpoint);
250 FREE_OBJ(cp);
251 }
252
253 /*--------------------------------------------------------------------
254 * Recycle a connection.
255 */
256
257 void
VCP_Recycle(const struct worker * wrk,struct pfd ** pfdp)258 VCP_Recycle(const struct worker *wrk, struct pfd **pfdp)
259 {
260 struct pfd *pfd;
261 struct conn_pool *cp;
262 int i = 0;
263
264 CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
265 TAKE_OBJ_NOTNULL(pfd, pfdp, PFD_MAGIC);
266 cp = pfd->conn_pool;
267 CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
268
269 assert(pfd->state == PFD_STATE_USED);
270 assert(pfd->fd > 0);
271
272 Lck_Lock(&cp->mtx);
273 cp->n_used--;
274
275 pfd->waited->priv1 = pfd;
276 pfd->waited->fd = pfd->fd;
277 pfd->waited->idle = VTIM_real();
278 pfd->state = PFD_STATE_AVAIL;
279 pfd->waited->func = vcp_handle;
280 pfd->waited->tmo = cache_param->backend_idle_timeout;
281 if (Wait_Enter(wrk->pool->waiter, pfd->waited)) {
282 cp->methods->close(pfd);
283 memset(pfd, 0x33, sizeof *pfd);
284 free(pfd);
285 // XXX: stats
286 pfd = NULL;
287 } else {
288 VTAILQ_INSERT_HEAD(&cp->connlist, pfd, list);
289 i++;
290 }
291
292 if (pfd != NULL)
293 cp->n_conn++;
294 Lck_Unlock(&cp->mtx);
295
296 if (i && DO_DEBUG(DBG_VTC_MODE)) {
297 /*
298 * In varnishtest we do not have the luxury of using
299 * multiple backend connections, so whenever we end up
300 * in the "pending" case, take a short nap to let the
301 * waiter catch up and put the pfd back into circulations.
302 *
303 * In particular ESI:include related tests suffer random
304 * failures without this.
305 *
306 * In normal operation, the only effect is that we will
307 * have N+1 backend connections rather than N, which is
308 * entirely harmless.
309 */
310 (void)usleep(10000);
311 }
312 }
313
314 /*--------------------------------------------------------------------
315 * Open a new connection from pool.
316 */
317
318 int
VCP_Open(struct conn_pool * cp,vtim_dur tmo,VCL_IP * ap,int * err)319 VCP_Open(struct conn_pool *cp, vtim_dur tmo, VCL_IP *ap, int *err)
320 {
321 int r;
322 vtim_mono h;
323
324 CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
325 AN(err);
326
327 while (cp->holddown > 0) {
328 Lck_Lock(&cp->mtx);
329 if (cp->holddown == 0) {
330 Lck_Unlock(&cp->mtx);
331 break;
332 }
333
334 if (VTIM_mono() >= cp->holddown) {
335 cp->holddown = 0;
336 Lck_Unlock(&cp->mtx);
337 break;
338 }
339
340 *err = 0;
341 errno = cp->holddown_errno;
342 Lck_Unlock(&cp->mtx);
343 return (-1);
344 }
345
346 *err = errno = 0;
347 r = cp->methods->open(cp, tmo, ap);
348
349 if (r >= 0 && errno == 0 && cp->endpoint->preamble != NULL &&
350 cp->endpoint->preamble->len > 0) {
351 if (write(r, cp->endpoint->preamble->blob,
352 cp->endpoint->preamble->len) !=
353 cp->endpoint->preamble->len) {
354 *err = errno;
355 closefd(&r);
356 }
357 } else {
358 *err = errno;
359 }
360
361 if (r >= 0)
362 return (r);
363
364 h = 0;
365
366 switch (errno) {
367 case EACCES:
368 case EPERM:
369 h = cache_param->backend_local_error_holddown;
370 break;
371 case EADDRNOTAVAIL:
372 h = cache_param->backend_local_error_holddown;
373 break;
374 case ECONNREFUSED:
375 h = cache_param->backend_remote_error_holddown;
376 break;
377 case ENETUNREACH:
378 h = cache_param->backend_remote_error_holddown;
379 break;
380 default:
381 break;
382 }
383
384 if (h == 0)
385 return (r);
386
387 Lck_Lock(&cp->mtx);
388 h += VTIM_mono();
389 if (cp->holddown == 0 || h < cp->holddown) {
390 cp->holddown = h;
391 cp->holddown_errno = errno;
392 }
393
394 Lck_Unlock(&cp->mtx);
395
396 return (r);
397 }
398
399 /*--------------------------------------------------------------------
400 * Close a connection.
401 */
402
403 void
VCP_Close(struct pfd ** pfdp)404 VCP_Close(struct pfd **pfdp)
405 {
406 struct pfd *pfd;
407 struct conn_pool *cp;
408
409 TAKE_OBJ_NOTNULL(pfd, pfdp, PFD_MAGIC);
410 cp = pfd->conn_pool;
411 CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
412
413 assert(pfd->fd > 0);
414
415 Lck_Lock(&cp->mtx);
416 assert(pfd->state == PFD_STATE_USED || pfd->state == PFD_STATE_STOLEN);
417 cp->n_used--;
418 if (pfd->state == PFD_STATE_STOLEN) {
419 (void)shutdown(pfd->fd, SHUT_RDWR);
420 VTAILQ_REMOVE(&cp->connlist, pfd, list);
421 pfd->state = PFD_STATE_CLEANUP;
422 VTAILQ_INSERT_HEAD(&cp->killlist, pfd, list);
423 cp->n_kill++;
424 } else {
425 assert(pfd->state == PFD_STATE_USED);
426 cp->methods->close(pfd);
427 memset(pfd, 0x44, sizeof *pfd);
428 free(pfd);
429 }
430 Lck_Unlock(&cp->mtx);
431 }
432
433 /*--------------------------------------------------------------------
434 * Get a connection, possibly recycled
435 */
436
437 struct pfd *
VCP_Get(struct conn_pool * cp,vtim_dur tmo,struct worker * wrk,unsigned force_fresh,int * err)438 VCP_Get(struct conn_pool *cp, vtim_dur tmo, struct worker *wrk,
439 unsigned force_fresh, int *err)
440 {
441 struct pfd *pfd;
442
443 CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
444 CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
445 AN(err);
446
447 *err = 0;
448 Lck_Lock(&cp->mtx);
449 pfd = VTAILQ_FIRST(&cp->connlist);
450 CHECK_OBJ_ORNULL(pfd, PFD_MAGIC);
451 if (force_fresh || pfd == NULL || pfd->state == PFD_STATE_STOLEN) {
452 pfd = NULL;
453 } else {
454 assert(pfd->conn_pool == cp);
455 assert(pfd->state == PFD_STATE_AVAIL);
456 VTAILQ_REMOVE(&cp->connlist, pfd, list);
457 VTAILQ_INSERT_TAIL(&cp->connlist, pfd, list);
458 cp->n_conn--;
459 VSC_C_main->backend_reuse++;
460 pfd->state = PFD_STATE_STOLEN;
461 pfd->cond = &wrk->cond;
462 }
463 cp->n_used++; // Opening mostly works
464 Lck_Unlock(&cp->mtx);
465
466 if (pfd != NULL)
467 return (pfd);
468
469 ALLOC_OBJ(pfd, PFD_MAGIC);
470 AN(pfd);
471 INIT_OBJ(pfd->waited, WAITED_MAGIC);
472 pfd->state = PFD_STATE_USED;
473 pfd->conn_pool = cp;
474 pfd->fd = VCP_Open(cp, tmo, &pfd->addr, err);
475 if (pfd->fd < 0) {
476 FREE_OBJ(pfd);
477 Lck_Lock(&cp->mtx);
478 cp->n_used--; // Nope, didn't work after all.
479 Lck_Unlock(&cp->mtx);
480 } else
481 VSC_C_main->backend_conn++;
482
483 return (pfd);
484 }
485
486 /*--------------------------------------------------------------------
487 */
488
489 int
VCP_Wait(struct worker * wrk,struct pfd * pfd,vtim_real tmo)490 VCP_Wait(struct worker *wrk, struct pfd *pfd, vtim_real tmo)
491 {
492 struct conn_pool *cp;
493 int r;
494
495 CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
496 CHECK_OBJ_NOTNULL(pfd, PFD_MAGIC);
497 cp = pfd->conn_pool;
498 CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
499 assert(pfd->cond == &wrk->cond);
500 Lck_Lock(&cp->mtx);
501 while (pfd->state == PFD_STATE_STOLEN) {
502 r = Lck_CondWait(&wrk->cond, &cp->mtx, tmo);
503 if (r != 0) {
504 if (r == EINTR)
505 continue;
506 assert(r == ETIMEDOUT);
507 Lck_Unlock(&cp->mtx);
508 return (1);
509 }
510 }
511 assert(pfd->state == PFD_STATE_USED);
512 pfd->cond = NULL;
513 Lck_Unlock(&cp->mtx);
514
515 return (0);
516 }
517
518 /*--------------------------------------------------------------------
519 */
520
521 VCL_IP
VCP_GetIp(struct pfd * pfd)522 VCP_GetIp(struct pfd *pfd)
523 {
524
525 CHECK_OBJ_NOTNULL(pfd, PFD_MAGIC);
526 return (pfd->addr);
527 }
528
529 /*--------------------------------------------------------------------*/
530
531 static void
vcp_panic_endpoint(struct vsb * vsb,const struct vrt_endpoint * vep)532 vcp_panic_endpoint(struct vsb *vsb, const struct vrt_endpoint *vep)
533 {
534 char h[VTCP_ADDRBUFSIZE];
535 char p[VTCP_PORTBUFSIZE];
536
537 if (PAN_dump_struct(vsb, vep, VRT_ENDPOINT_MAGIC, "vrt_endpoint"))
538 return;
539 if (vep->uds_path)
540 VSB_printf(vsb, "uds_path = %s,\n", vep->uds_path);
541 if (vep->ipv4 && VSA_Sane(vep->ipv4)) {
542 VTCP_name(vep->ipv4, h, sizeof h, p, sizeof p);
543 VSB_printf(vsb, "ipv4 = %s, ", h);
544 VSB_printf(vsb, "port = %s,\n", p);
545 }
546 if (vep->ipv6 && VSA_Sane(vep->ipv6)) {
547 VTCP_name(vep->ipv6, h, sizeof h, p, sizeof p);
548 VSB_printf(vsb, "ipv6 = %s, ", h);
549 VSB_printf(vsb, "port = %s,\n", p);
550 }
551 VSB_indent(vsb, -2);
552 VSB_cat(vsb, "},\n");
553 }
554
555 void
VCP_Panic(struct vsb * vsb,struct conn_pool * cp)556 VCP_Panic(struct vsb *vsb, struct conn_pool *cp)
557 {
558
559 if (PAN_dump_struct(vsb, cp, CONN_POOL_MAGIC, "conn_pool"))
560 return;
561 VSB_cat(vsb, "ident = ");
562 VSB_quote(vsb, cp->ident, VSHA256_DIGEST_LENGTH, VSB_QUOTE_HEX);
563 VSB_cat(vsb, ",\n");
564 vcp_panic_endpoint(vsb, cp->endpoint);
565 VSB_indent(vsb, -2);
566 VSB_cat(vsb, "},\n");
567 }
568
569 /*--------------------------------------------------------------------*/
570
571 void
VCP_Init(void)572 VCP_Init(void)
573 {
574 Lck_New(&conn_pools_mtx, lck_conn_pool);
575 }
576
577 /**********************************************************************/
578
579 static inline int
tmo2msec(vtim_dur tmo)580 tmo2msec(vtim_dur tmo)
581 {
582 return ((int)floor(tmo * 1000.0));
583 }
584
v_matchproto_(cp_open_f)585 static int v_matchproto_(cp_open_f)
586 vtp_open(const struct conn_pool *cp, vtim_dur tmo, VCL_IP *ap)
587 {
588 int s;
589 int msec;
590
591 CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
592
593 msec = tmo2msec(tmo);
594 if (cache_param->prefer_ipv6) {
595 *ap = cp->endpoint->ipv6;
596 s = VTCP_connect(*ap, msec);
597 if (s >= 0)
598 return (s);
599 }
600 *ap = cp->endpoint->ipv4;
601 s = VTCP_connect(*ap, msec);
602 if (s >= 0)
603 return (s);
604 if (!cache_param->prefer_ipv6) {
605 *ap = cp->endpoint->ipv6;
606 s = VTCP_connect(*ap, msec);
607 }
608 return (s);
609 }
610
611
612 /*--------------------------------------------------------------------*/
613
v_matchproto_(cp_close_f)614 static void v_matchproto_(cp_close_f)
615 vtp_close(struct pfd *pfd)
616 {
617
618 CHECK_OBJ_NOTNULL(pfd, PFD_MAGIC);
619 VTCP_close(&pfd->fd);
620 }
621
v_matchproto_(cp_name_f)622 static void v_matchproto_(cp_name_f)
623 vtp_local_name(const struct pfd *pfd, char *addr, unsigned alen, char *pbuf,
624 unsigned plen)
625 {
626 CHECK_OBJ_NOTNULL(pfd, PFD_MAGIC);
627 VTCP_myname(pfd->fd, addr, alen, pbuf, plen);
628 }
629
v_matchproto_(cp_name_f)630 static void v_matchproto_(cp_name_f)
631 vtp_remote_name(const struct pfd *pfd, char *addr, unsigned alen, char *pbuf,
632 unsigned plen)
633 {
634 CHECK_OBJ_NOTNULL(pfd, PFD_MAGIC);
635 VTCP_hisname(pfd->fd, addr, alen, pbuf, plen);
636 }
637
638 static const struct cp_methods vtp_methods = {
639 .open = vtp_open,
640 .close = vtp_close,
641 .local_name = vtp_local_name,
642 .remote_name = vtp_remote_name,
643 };
644
645 /*--------------------------------------------------------------------
646 */
647
v_matchproto_(cp_open_f)648 static int v_matchproto_(cp_open_f)
649 vus_open(const struct conn_pool *cp, vtim_dur tmo, VCL_IP *ap)
650 {
651 int s;
652 int msec;
653
654 CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
655 AN(cp->endpoint->uds_path);
656
657 msec = tmo2msec(tmo);
658 *ap = bogo_ip;
659 s = VUS_connect(cp->endpoint->uds_path, msec);
660 return (s);
661 }
662
v_matchproto_(cp_name_f)663 static void v_matchproto_(cp_name_f)
664 vus_name(const struct pfd *pfd, char *addr, unsigned alen, char *pbuf,
665 unsigned plen)
666 {
667 (void) pfd;
668 assert(alen > strlen("0.0.0.0"));
669 assert(plen > 1);
670 strcpy(addr, "0.0.0.0");
671 strcpy(pbuf, "0");
672 }
673
674 static const struct cp_methods vus_methods = {
675 .open = vus_open,
676 .close = vtp_close,
677 .local_name = vus_name,
678 .remote_name = vus_name,
679 };
680
681 /*--------------------------------------------------------------------
682 * Reference a TCP pool given by {ip4, ip6} pair or a UDS. Create if
683 * it doesn't exist already.
684 */
685
686 struct conn_pool *
VCP_Ref(const struct vrt_endpoint * vep,const char * ident)687 VCP_Ref(const struct vrt_endpoint *vep, const char *ident)
688 {
689 struct conn_pool *cp, *cp2;
690 struct VSHA256Context cx[1];
691 unsigned char digest[VSHA256_DIGEST_LENGTH];
692
693 CHECK_OBJ_NOTNULL(vep, VRT_ENDPOINT_MAGIC);
694 AN(ident);
695 VSHA256_Init(cx);
696 VSHA256_Update(cx, ident, strlen(ident) + 1); // include \0
697 if (vep->uds_path != NULL) {
698 AZ(vep->ipv4);
699 AZ(vep->ipv6);
700 VSHA256_Update(cx, "UDS", 4); // include \0
701 VSHA256_Update(cx, vep->uds_path, strlen(vep->uds_path));
702 } else {
703 assert(vep->ipv4 != NULL || vep->ipv6 != NULL);
704 if (vep->ipv4 != NULL) {
705 assert(VSA_Sane(vep->ipv4));
706 VSHA256_Update(cx, "IP4", 4); // include \0
707 VSHA256_Update(cx, vep->ipv4, vsa_suckaddr_len);
708 }
709 if (vep->ipv6 != NULL) {
710 assert(VSA_Sane(vep->ipv6));
711 VSHA256_Update(cx, "IP6", 4); // include \0
712 VSHA256_Update(cx, vep->ipv6, vsa_suckaddr_len);
713 }
714 }
715 if (vep->preamble != NULL && vep->preamble->len > 0) {
716 VSHA256_Update(cx, "PRE", 4); // include \0
717 VSHA256_Update(cx, vep->preamble->blob, vep->preamble->len);
718 }
719 VSHA256_Final(digest, cx);
720
721 /*
722 * In heavy use of dynamic backends, traversing this list
723 * can become expensive. In order to not do so twice we
724 * pessimistically create the necessary pool, and discard
725 * it on a hit. (XXX: Consider hash or tree ?)
726 */
727
728 ALLOC_OBJ(cp, CONN_POOL_MAGIC);
729 AN(cp);
730 cp->refcnt = 1;
731 cp->holddown = 0;
732 cp->endpoint = VRT_Endpoint_Clone(vep);
733 memcpy(cp->ident, digest, sizeof cp->ident);
734 if (vep->uds_path != NULL)
735 cp->methods = &vus_methods;
736 else
737 cp->methods = &vtp_methods;
738 Lck_New(&cp->mtx, lck_conn_pool);
739 VTAILQ_INIT(&cp->connlist);
740 VTAILQ_INIT(&cp->killlist);
741
742 CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
743 Lck_Lock(&conn_pools_mtx);
744 VTAILQ_FOREACH(cp2, &conn_pools, list) {
745 CHECK_OBJ_NOTNULL(cp2, CONN_POOL_MAGIC);
746 assert(cp2->refcnt > 0);
747 if (!memcmp(digest, cp2->ident, sizeof cp2->ident))
748 break;
749 }
750 if (cp2 == NULL)
751 VTAILQ_INSERT_HEAD(&conn_pools, cp, list);
752 else
753 cp2->refcnt++;
754 Lck_Unlock(&conn_pools_mtx);
755
756 if (cp2 == NULL) {
757 CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
758 return (cp);
759 }
760
761 Lck_Delete(&cp->mtx);
762 AZ(cp->n_conn);
763 AZ(cp->n_kill);
764 FREE_OBJ(cp->endpoint);
765 FREE_OBJ(cp);
766 CHECK_OBJ_NOTNULL(cp2, CONN_POOL_MAGIC);
767 return (cp2);
768 }
769