1 /*-
2 * Copyright (c) 2009-2018 UPLEX - Nils Goroll Systemoptimierung
3 * All rights reserved.
4 *
5 * Authors: Julian Wiesener <jw@uplex.de>
6 * Nils Goroll <slink@uplex.de>
7 *
8 * SPDX-License-Identifier: BSD-2-Clause
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include "config.h"
33
34 #include <stdlib.h>
35 #include <string.h>
36
37 #include "cache/cache.h"
38 #include "vcl.h"
39
40 #include "vend.h"
41
42 #include "vcc_directors_if.h"
43 #include "vmod_directors_shard_dir.h"
44 #include "vmod_directors_shard_cfg.h"
45 #include "vsb.h"
46
47 /* -------------------------------------------------------------------------
48 * shard director: LAZY mode (vdi resolve function), parameter objects
49 *
50 * By associating a parameter object with a shard director, we enable LAZY
51 * lookups as with the other directors. Parameter objects are defined with VCL
52 * scope (normal vmod objects), but can be overridden per backend request using
53 * a task priv.
54 *
55 * We use the same concept to carry shard.backend() parameters to vdi resolve
56 * for LAZY mode: They get saved in a per-director task scope parameter object.
57 *
58 * Each object points to another object providing defaults for values which are
59 * not defined.
60 *
61 * Actual resolution of the various parameter objects does not happen before
62 * they are used, which enables changing them independently (ie, shard
63 * .backend() parameters have precedence over an associated parameter object,
64 * which by itself can be overridden).
65 *
66 * Overview of parameter objects (pointers are alternatives)
67 *
68 * shard() director shard_param() object default praram
69 *
70 * ---------------------------------> vmod static
71 * VCL obj / ->
72 * .param -+---------> VCL obj / _
73 * .default -------- /|
74 * /
75 * ^ /
76 * | /
77 * /
78 * .default /
79 * -------------> TASK priv /
80 * / /
81 * .default -----------------------------
82 * TASK priv
83 */
84
85 /* -------------------------------------------------------------------------
86 * method arguments and set parameters bitmask in vmod_directors_shard_param
87 */
88
89 #define arg_by ((uint32_t)1)
90 #define arg_key ((uint32_t)1 << 1)
91 #define arg_key_blob ((uint32_t)1 << 2)
92 #define arg_alt ((uint32_t)1 << 3)
93 #define arg_warmup ((uint32_t)1 << 4)
94 #define arg_rampup ((uint32_t)1 << 5)
95 #define arg_healthy ((uint32_t)1 << 6)
96 #define arg_param ((uint32_t)1 << 7)
97 #define arg_resolve ((uint32_t)1 << 8)
98 #define arg_mask_ ((arg_resolve << 1) - 1)
99 /* allowed in shard_param.set */
100 #define arg_mask_set_ (arg_param - 1)
101 /* allowed in shard_param */
102 #define arg_mask_param_ ( arg_mask_set_ \
103 & ~arg_key \
104 & ~arg_key_blob )
105
106 /* -------------------------------------------------------------------------
107 * shard parameters - declaration & defaults
108 */
109 enum vmod_directors_shard_param_scope {
110 _SCOPE_INVALID = 0,
111 SCOPE_VMOD,
112 SCOPE_VCL,
113 SCOPE_TASK,
114 SCOPE_STACK
115 };
116
117 struct vmod_directors_shard_param;
118
119 #define VMOD_SHARD_SHARD_PARAM_BLOB 0xdf5ca116
120
121 struct vmod_directors_shard_param {
122 unsigned magic;
123 #define VMOD_SHARD_SHARD_PARAM_MAGIC 0xdf5ca117
124
125 /* internals */
126 uint32_t key;
127 const char *vcl_name;
128 const struct vmod_directors_shard_param *defaults;
129 enum vmod_directors_shard_param_scope scope;
130
131 /* parameters */
132 VCL_ENUM by;
133 VCL_ENUM healthy;
134 uint32_t mask;
135 VCL_BOOL rampup;
136 VCL_INT alt;
137 VCL_REAL warmup;
138 };
139
140 static const struct vmod_directors_shard_param shard_param_default = {
141 .magic = VMOD_SHARD_SHARD_PARAM_MAGIC,
142
143 .key = 0,
144 .vcl_name = "builtin defaults",
145 .defaults = NULL,
146 .scope = SCOPE_VMOD,
147
148 .mask = arg_mask_param_,
149 .rampup = 1,
150 .alt = 0,
151 .warmup = -1,
152 };
153
154 #define default_by(ptr) (ptr == NULL ? VENUM(HASH) : ptr)
155 #define default_healthy(ptr) (ptr == NULL ? VENUM(CHOSEN) : ptr)
156
157 static struct vmod_directors_shard_param *
158 shard_param_stack(struct vmod_directors_shard_param *p,
159 const struct vmod_directors_shard_param *pa, const char *who);
160
161 static const struct vmod_directors_shard_param *
162 shard_param_task_r(VRT_CTX, const void *id, const char *who,
163 const struct vmod_directors_shard_param *pa);
164
165 static struct vmod_directors_shard_param *
166 shard_param_task_l(VRT_CTX, const void *id, const char *who,
167 const struct vmod_directors_shard_param *pa);
168
169 static const struct vmod_directors_shard_param *
170 shard_param_blob(VCL_BLOB blob);
171
172 static const struct vmod_directors_shard_param *
173 vmod_shard_param_read(VRT_CTX, const void *id, const char *who,
174 const struct vmod_directors_shard_param *p,
175 struct vmod_directors_shard_param *pstk);
176
177 // XXX #3329 #3330 revisit - for now, treat pipe like backend
178 #define SHARD_VCL_TASK_REQ (VCL_MET_TASK_C & ~VCL_MET_PIPE)
179 #define SHARD_VCL_TASK_BEREQ (VCL_MET_TASK_B | VCL_MET_PIPE)
180 /* -------------------------------------------------------------------------
181 * shard vmod interface
182 */
183 static vdi_healthy_f vmod_shard_healthy;
184 static vdi_resolve_f vmod_shard_resolve;
185 static vdi_list_f vmod_shard_list;
186
187 struct vmod_directors_shard {
188 unsigned magic;
189 #define VMOD_SHARD_SHARD_MAGIC 0x6e63e1bf
190 struct sharddir *shardd;
191 VCL_BACKEND dir;
192 };
193
194 static void
shard__assert(void)195 shard__assert(void)
196 {
197 VCL_INT t1;
198 uint32_t t2a, t2b;
199
200 /* we put our uint32 key in a VCL_INT container */
201 assert(sizeof(VCL_INT) >= sizeof(uint32_t));
202 t2a = UINT32_MAX;
203 t1 = (VCL_INT)t2a;
204 t2b = (uint32_t)t1;
205 assert(t2a == t2b);
206 }
207
v_matchproto_(vdi_destroy_f)208 static void v_matchproto_(vdi_destroy_f)
209 vmod_shard_destroy(VCL_BACKEND dir)
210 {
211 struct sharddir *shardd;
212
213 CAST_OBJ_NOTNULL(shardd, dir->priv, SHARDDIR_MAGIC);
214 sharddir_delete(&shardd);
215 }
216
217 static const struct vdi_methods vmod_shard_methods[1] = {{
218 .magic = VDI_METHODS_MAGIC,
219 .type = "shard",
220 .resolve = vmod_shard_resolve,
221 .healthy = vmod_shard_healthy,
222 .destroy = vmod_shard_destroy,
223 .list = vmod_shard_list
224 }};
225
226
v_matchproto_(td_directors_shard__init)227 VCL_VOID v_matchproto_(td_directors_shard__init)
228 vmod_shard__init(VRT_CTX, struct vmod_directors_shard **vshardp,
229 const char *vcl_name)
230 {
231 struct vmod_directors_shard *vshard;
232
233 shard__assert();
234
235 AN(vshardp);
236 AZ(*vshardp);
237 ALLOC_OBJ(vshard, VMOD_SHARD_SHARD_MAGIC);
238 AN(vshard);
239
240 *vshardp = vshard;
241 sharddir_new(&vshard->shardd, vcl_name, &shard_param_default);
242
243 vshard->dir = VRT_AddDirector(ctx, vmod_shard_methods, vshard->shardd,
244 "%s", vcl_name);
245 }
246
v_matchproto_(td_directors_shard__fini)247 VCL_VOID v_matchproto_(td_directors_shard__fini)
248 vmod_shard__fini(struct vmod_directors_shard **vshardp)
249 {
250 struct vmod_directors_shard *vshard;
251
252 TAKE_OBJ_NOTNULL(vshard, vshardp, VMOD_SHARD_SHARD_MAGIC);
253 VRT_DelDirector(&vshard->dir);
254 FREE_OBJ(vshard);
255 }
256
v_matchproto_(td_directors_shard_key)257 VCL_INT v_matchproto_(td_directors_shard_key)
258 vmod_shard_key(VRT_CTX, struct vmod_directors_shard *vshard, VCL_STRANDS s)
259 {
260
261 (void)ctx;
262 (void)vshard;
263
264 return ((VCL_INT)VRT_HashStrands32(s));
265 }
266
v_matchproto_(td_directors_set_warmup)267 VCL_VOID v_matchproto_(td_directors_set_warmup)
268 vmod_shard_set_warmup(VRT_CTX, struct vmod_directors_shard *vshard,
269 VCL_REAL probability)
270 {
271 CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
272 if (probability < 0 || probability >= 1) {
273 shard_notice(ctx->vsl, vshard->shardd->name,
274 ".set_warmup(%f) ignored", probability);
275 return;
276 }
277 shardcfg_set_warmup(vshard->shardd, probability);
278 }
279
v_matchproto_(td_directors_set_rampup)280 VCL_VOID v_matchproto_(td_directors_set_rampup)
281 vmod_shard_set_rampup(VRT_CTX, struct vmod_directors_shard *vshard,
282 VCL_DURATION duration)
283 {
284 (void)ctx;
285 CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
286 shardcfg_set_rampup(vshard->shardd, duration);
287 }
288
v_matchproto_(td_directors_shard_associate)289 VCL_VOID v_matchproto_(td_directors_shard_associate)
290 vmod_shard_associate(VRT_CTX,
291 struct vmod_directors_shard *vshard, VCL_BLOB b)
292 {
293 const struct vmod_directors_shard_param *ppt;
294 CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
295
296 if (b == NULL) {
297 sharddir_set_param(vshard->shardd, &shard_param_default);
298 return;
299 }
300
301 ppt = shard_param_blob(b);
302
303 if (ppt == NULL) {
304 shard_fail(ctx, vshard->shardd->name, "%s",
305 "shard .associate param invalid");
306 return;
307 }
308
309 sharddir_set_param(vshard->shardd, ppt);
310 }
311
v_matchproto_(td_directors_shard_add_backend)312 VCL_BOOL v_matchproto_(td_directors_shard_add_backend)
313 vmod_shard_add_backend(VRT_CTX, struct vmod_directors_shard *vshard,
314 struct VARGS(shard_add_backend) *args)
315 {
316 VCL_REAL weight = 1;
317
318 CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
319
320 if (args->backend == NULL) {
321 shard_fail(ctx, vshard->shardd->name, "%s",
322 "None backend cannot be added");
323 return (0);
324 }
325
326 if (args->valid_weight) {
327 if (args->weight >= 1)
328 weight = args->weight;
329 else
330 shard_notice(ctx->vsl, vshard->shardd->name,
331 ".add_backend(weight=%f) ignored", args->weight);
332 }
333
334 return (shardcfg_add_backend(ctx, vshard->shardd, args->backend,
335 args->valid_ident ? args->ident : NULL,
336 args->valid_rampup ? args->rampup : nan(""),
337 weight));
338 }
339
v_matchproto_(td_directors_shard_remove_backend)340 VCL_BOOL v_matchproto_(td_directors_shard_remove_backend)
341 vmod_shard_remove_backend(VRT_CTX, struct vmod_directors_shard *vshard,
342 struct VARGS(shard_remove_backend) *args)
343 {
344 VCL_BACKEND be = args->valid_backend ? args->backend : NULL;
345 VCL_STRING ident = args->valid_ident ? args->ident : NULL;
346
347 CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
348
349 if (be == NULL && ident == NULL) {
350 shard_fail(ctx, vshard->shardd->name, "%s",
351 ".remove_backend(): either backend or ident are required");
352 return (0);
353 }
354
355 return (shardcfg_remove_backend(ctx, vshard->shardd, be, ident));
356 }
357
v_matchproto_(td_directors_shard_clear)358 VCL_BOOL v_matchproto_(td_directors_shard_clear)
359 vmod_shard_clear(VRT_CTX, struct vmod_directors_shard *vshard)
360 {
361 CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
362 return (shardcfg_clear(ctx, vshard->shardd));
363 }
364
v_matchproto_(td_directors_shard_reconfigure)365 VCL_BOOL v_matchproto_(td_directors_shard_reconfigure)
366 vmod_shard_reconfigure(VRT_CTX, struct vmod_directors_shard *vshard,
367 VCL_INT replicas)
368 {
369 return (shardcfg_reconfigure(ctx, vshard->shardd, replicas));
370 }
371
372 static inline uint32_t
shard_get_key(VRT_CTX,const struct vmod_directors_shard_param * p)373 shard_get_key(VRT_CTX, const struct vmod_directors_shard_param *p)
374 {
375 struct http *http;
376 struct strands s[1];
377 const char *sp[1];
378 VCL_ENUM by = default_by(p->by);
379
380 if (by == VENUM(KEY) || by == VENUM(BLOB))
381 return (p->key);
382 if (by == VENUM(HASH) && ctx->bo != NULL) {
383 CHECK_OBJ(ctx->bo, BUSYOBJ_MAGIC);
384 return (vbe32dec(ctx->bo->digest));
385 }
386 if (by == VENUM(HASH) || by == VENUM(URL)) {
387 if (ctx->http_req) {
388 AN(http = ctx->http_req);
389 } else {
390 AN(ctx->http_bereq);
391 AN(http = ctx->http_bereq);
392 }
393 sp[0] = http->hd[HTTP_HDR_URL].b;
394 s->n = 1;
395 s->p = sp;
396 return (VRT_HashStrands32(s));
397 }
398 WRONG("by enum");
399 }
400
401 /*
402 * merge parameters to resolve all undef values
403 * key is to be calculated after merging
404 */
405 static void
shard_param_merge(struct vmod_directors_shard_param * to,const struct vmod_directors_shard_param * from)406 shard_param_merge(struct vmod_directors_shard_param *to,
407 const struct vmod_directors_shard_param *from)
408 {
409 CHECK_OBJ_NOTNULL(to, VMOD_SHARD_SHARD_PARAM_MAGIC);
410 assert((to->mask & ~arg_mask_param_) == 0);
411
412 if (to->mask == arg_mask_param_)
413 return;
414
415 CHECK_OBJ_NOTNULL(from, VMOD_SHARD_SHARD_PARAM_MAGIC);
416 assert((from->mask & ~arg_mask_param_) == 0);
417
418 if ((to->mask & arg_by) == 0 && (from->mask & arg_by) != 0) {
419 to->by = from->by;
420 if (from->by == VENUM(KEY) || from->by == VENUM(BLOB))
421 to->key = from->key;
422 }
423
424 #define mrg(to, from, field) do { \
425 if (((to)->mask & arg_ ## field) == 0 && \
426 ((from)->mask & arg_ ## field) != 0) \
427 (to)->field = (from)->field; \
428 } while(0)
429
430 mrg(to, from, healthy);
431 mrg(to, from, rampup);
432 mrg(to, from, alt);
433 mrg(to, from, warmup);
434 #undef mrg
435
436 to->mask |= from->mask;
437
438 if (to->mask == arg_mask_param_)
439 return;
440
441 AN(from->defaults);
442 shard_param_merge(to, from->defaults);
443 }
444
445 static uint32_t
shard_blob_key(VCL_BLOB key_blob)446 shard_blob_key(VCL_BLOB key_blob)
447 {
448 uint8_t k[4] = { 0 };
449 const uint8_t *b;
450 size_t i, ki;
451
452 AN(key_blob);
453 AN(key_blob->blob);
454 assert(key_blob->len > 0);
455
456 if (key_blob->len >= 4)
457 ki = 0;
458 else
459 ki = 4 - key_blob->len;
460
461 b = key_blob->blob;
462 for (i = 0; ki < 4; i++, ki++)
463 k[ki] = b[i];
464 assert(i <= key_blob->len);
465
466 return (vbe32dec(k));
467 }
468
469 /*
470 * convert vmod interface valid_* to our bitmask
471 */
472
473 #define tobit(args, name) ((args)->valid_##name ? arg_##name : 0)
474
475 static uint32_t
shard_backendarg_mask_(const struct VARGS (shard_backend)* const a)476 shard_backendarg_mask_(const struct VARGS(shard_backend) * const a)
477 {
478 return (tobit(a, by) |
479 tobit(a, key) |
480 tobit(a, key_blob) |
481 tobit(a, alt) |
482 tobit(a, warmup) |
483 tobit(a, rampup) |
484 tobit(a, healthy) |
485 tobit(a, param) |
486 tobit(a, resolve));
487 }
488 static uint32_t
shard_param_set_mask(const struct VARGS (shard_param_set)* const a)489 shard_param_set_mask(const struct VARGS(shard_param_set) * const a)
490 {
491 return (tobit(a, by) |
492 tobit(a, key) |
493 tobit(a, key_blob) |
494 tobit(a, alt) |
495 tobit(a, warmup) |
496 tobit(a, rampup) |
497 tobit(a, healthy));
498 }
499 #undef tobit
500
501 /*
502 * check arguments and return in a struct param
503 */
504 static struct vmod_directors_shard_param *
shard_param_args(VRT_CTX,struct vmod_directors_shard_param * p,const char * func,uint32_t args,VCL_ENUM by_s,VCL_INT key_int,VCL_BLOB key_blob,VCL_INT alt,VCL_REAL warmup,VCL_BOOL rampup,VCL_ENUM healthy_s)505 shard_param_args(VRT_CTX,
506 struct vmod_directors_shard_param *p, const char *func,
507 uint32_t args, VCL_ENUM by_s, VCL_INT key_int, VCL_BLOB key_blob,
508 VCL_INT alt, VCL_REAL warmup, VCL_BOOL rampup, VCL_ENUM healthy_s)
509 {
510
511 CHECK_OBJ_NOTNULL(p, VMOD_SHARD_SHARD_PARAM_MAGIC);
512 AN(p->vcl_name);
513
514 assert((args & ~arg_mask_set_) == 0);
515
516 if (!(args & arg_by))
517 by_s = NULL;
518 by_s = default_by(by_s);
519
520 /* by_s / key_int / key_blob */
521 if (by_s == VENUM(KEY)) {
522 if ((args & arg_key) == 0) {
523 shard_fail(ctx, p->vcl_name,
524 "%s missing key argument with by=%s",
525 func, by_s);
526 return (NULL);
527 }
528 if (key_int < 0 || key_int > UINT32_MAX) {
529 shard_fail(ctx, p->vcl_name,
530 "%s invalid key argument %jd with by=%s",
531 func, (intmax_t)key_int, by_s);
532 return (NULL);
533 }
534 assert(key_int >= 0);
535 assert(key_int <= UINT32_MAX);
536 p->key = (uint32_t)key_int;
537 } else if (by_s == VENUM(BLOB)) {
538 if ((args & arg_key_blob) == 0) {
539 shard_fail(ctx, p->vcl_name,
540 "%s missing key_blob argument with by=%s",
541 func, by_s);
542 return (NULL);
543 }
544 if (key_blob == NULL || key_blob->len == 0 ||
545 key_blob->blob == NULL) {
546 shard_err(ctx->vsl, p->vcl_name,
547 "%s by=BLOB but no or empty key_blob - using key 0",
548 func);
549 p->key = 0;
550 } else
551 p->key = shard_blob_key(key_blob);
552 } else if (by_s == VENUM(HASH) || by_s == VENUM(URL)) {
553 if (args & (arg_key|arg_key_blob)) {
554 shard_fail(ctx, p->vcl_name,
555 "%s key and key_blob arguments are "
556 "invalid with by=%s", func, by_s);
557 return (NULL);
558 }
559 } else {
560 WRONG("by enum");
561 }
562 p->by = by_s;
563
564 if (args & arg_alt) {
565 if (alt < 0) {
566 shard_fail(ctx, p->vcl_name,
567 "%s invalid alt argument %jd",
568 func, (intmax_t)alt);
569 return (NULL);
570 }
571 p->alt = alt;
572 }
573
574 if (args & arg_warmup) {
575 if ((warmup < 0 && warmup != -1) || warmup > 1) {
576 shard_fail(ctx, p->vcl_name,
577 "%s invalid warmup argument %f",
578 func, warmup);
579 return (NULL);
580 }
581 p->warmup = warmup;
582 }
583
584 if (args & arg_rampup)
585 p->rampup = !!rampup;
586
587 if (args & arg_healthy)
588 p->healthy = healthy_s;
589
590 p->mask = args & arg_mask_param_;
591 return (p);
592 }
593
v_matchproto_(td_directors_shard_backend)594 VCL_BACKEND v_matchproto_(td_directors_shard_backend)
595 vmod_shard_backend(VRT_CTX, struct vmod_directors_shard *vshard,
596 struct VARGS(shard_backend) *a)
597 {
598 struct sharddir *shardd;
599 struct vmod_directors_shard_param pstk;
600 struct vmod_directors_shard_param *pp = NULL;
601 const struct vmod_directors_shard_param *ppt;
602 VCL_ENUM resolve;
603 uint32_t args = shard_backendarg_mask_(a);
604
605 CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
606 CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
607 shardd = vshard->shardd;
608 CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
609 assert((args & ~arg_mask_) == 0);
610
611 if (args & arg_resolve)
612 resolve = a->resolve;
613 else if (ctx->method & VCL_MET_TASK_H)
614 resolve = VENUM(LAZY);
615 else
616 resolve = VENUM(NOW);
617
618 if (resolve == VENUM(LAZY)) {
619 if ((args & ~arg_resolve) == 0) {
620 AN(vshard->dir);
621 return (vshard->dir);
622 }
623
624 if ((ctx->method & SHARD_VCL_TASK_BEREQ) == 0) {
625 shard_fail(ctx, shardd->name, "%s",
626 ".backend(resolve=LAZY) with other "
627 "parameters can only be used in backend/pipe "
628 "context");
629 return (NULL);
630 }
631
632 pp = shard_param_task_l(ctx, shardd, shardd->name,
633 shardd->param);
634 if (pp == NULL)
635 return (NULL);
636 } else if (resolve == VENUM(NOW)) {
637 if (ctx->method & VCL_MET_TASK_H) {
638 shard_fail(ctx, shardd->name, "%s",
639 ".backend(resolve=NOW) can not be "
640 "used in vcl_init{}/vcl_fini{}");
641 return (NULL);
642 }
643 ppt = shard_param_task_r(ctx, shardd, shardd->name,
644 shardd->param);
645 AN(ppt);
646 pp = shard_param_stack(&pstk, ppt, shardd->name);
647 } else {
648 WRONG("resolve enum");
649 }
650
651 AN(pp);
652
653 if (args & arg_param) {
654 ppt = shard_param_blob(a->param);
655 if (ppt == NULL) {
656 shard_fail(ctx, shardd->name, "%s",
657 ".backend(key_blob) param invalid");
658 return (NULL);
659 }
660 pp->defaults = ppt;
661 }
662
663 pp = shard_param_args(ctx, pp, "shard.backend()",
664 args & arg_mask_set_,
665 a->by, a->key, a->key_blob, a->alt, a->warmup,
666 a->rampup, a->healthy);
667 if (pp == NULL)
668 return (NULL);
669
670 if (resolve == VENUM(LAZY))
671 return (vshard->dir);
672
673 assert(resolve == VENUM(NOW));
674 shard_param_merge(pp, pp->defaults);
675 return (sharddir_pick_be(ctx, shardd, shard_get_key(ctx, pp),
676 pp->alt, pp->warmup, pp->rampup, pp->healthy));
677 }
678
v_matchproto_(vdi_healthy)679 static VCL_BOOL v_matchproto_(vdi_healthy)
680 vmod_shard_healthy(VRT_CTX, VCL_BACKEND dir, VCL_TIME *changed)
681 {
682 struct sharddir *shardd;
683
684 CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
685 CHECK_OBJ_NOTNULL(dir, DIRECTOR_MAGIC);
686 CAST_OBJ_NOTNULL(shardd, dir->priv, SHARDDIR_MAGIC);
687 return (sharddir_any_healthy(ctx, shardd, changed));
688 }
689
v_matchproto_(vdi_resolve_f)690 static VCL_BACKEND v_matchproto_(vdi_resolve_f)
691 vmod_shard_resolve(VRT_CTX, VCL_BACKEND dir)
692 {
693 struct sharddir *shardd;
694 struct vmod_directors_shard_param pstk[1];
695 const struct vmod_directors_shard_param *pp;
696
697 CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
698 CHECK_OBJ_NOTNULL(dir, DIRECTOR_MAGIC);
699 CAST_OBJ_NOTNULL(shardd, dir->priv, SHARDDIR_MAGIC);
700
701 pp = vmod_shard_param_read(ctx, shardd, shardd->name,
702 shardd->param, pstk);
703 CHECK_OBJ_NOTNULL(pp, VMOD_SHARD_SHARD_PARAM_MAGIC);
704
705 return (sharddir_pick_be(ctx, shardd,
706 shard_get_key(ctx, pp), pp->alt, pp->warmup,
707 pp->rampup, pp->healthy));
708 }
709
v_matchproto_(vdi_list_f)710 static void v_matchproto_(vdi_list_f)
711 vmod_shard_list(VRT_CTX, VCL_BACKEND dir, struct vsb *vsb, int pflag, int jflag)
712 {
713 struct sharddir *shardd;
714 struct shard_backend *sbe;
715 VCL_TIME c, changed = 0;
716 VCL_DURATION rampup_d, d;
717 VCL_BACKEND be;
718 VCL_BOOL h;
719 unsigned i, nh = 0;
720 double rampup_p;
721
722 CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
723 CHECK_OBJ_NOTNULL(dir, DIRECTOR_MAGIC);
724 CAST_OBJ_NOTNULL(shardd, dir->priv, SHARDDIR_MAGIC);
725
726 if (pflag) {
727 if (jflag) {
728 VSB_cat(vsb, "{\n");
729 VSB_indent(vsb, 2);
730 VSB_printf(vsb, "\"warmup\": %f,\n", shardd->warmup);
731 VSB_printf(vsb, "\"rampup_duration\": %f,\n",
732 shardd->rampup_duration);
733 VSB_cat(vsb, "\"backends\": {\n");
734 VSB_indent(vsb, 2);
735 } else {
736 VSB_cat(vsb, "\n\n\tBackend\tIdent\tHealth\t"
737 "Rampup Remaining\n");
738 }
739 }
740
741 sharddir_rdlock(shardd);
742 for (i = 0; i < shardd->n_backend; i++) {
743 sbe = &shardd->backend[i];
744 AN(sbe);
745 be = sbe->backend;
746 CHECK_OBJ_NOTNULL(be, DIRECTOR_MAGIC);
747
748 c = 0;
749 h = VRT_Healthy(ctx, be, &c);
750 if (h)
751 nh++;
752 if (c > changed)
753 changed = c;
754 if ((pflag) == 0)
755 continue;
756
757 d = ctx->now - c;
758 rampup_d = shardcfg_get_rampup(shardd, i);
759 if (! h) {
760 rampup_p = 0.0;
761 rampup_d = 0.0;
762 } else if (d < rampup_d) {
763 rampup_p = d / rampup_d;
764 rampup_d -= d;
765 } else {
766 rampup_p = 1.0;
767 rampup_d = 0.0;
768 }
769
770 if (jflag) {
771 if (i)
772 VSB_cat(vsb, ",\n");
773 VSB_printf(vsb, "\"%s\": {\n",
774 be->vcl_name);
775 VSB_indent(vsb, 2);
776 VSB_printf(vsb, "\"ident\": \"%s\",\n",
777 sbe->ident ? sbe->ident : be->vcl_name);
778 VSB_printf(vsb, "\"health\": \"%s\",\n",
779 h ? "healthy" : "sick");
780 VSB_printf(vsb, "\"rampup\": %f,\n", rampup_p);
781 VSB_printf(vsb, "\"rampup_remaining\": %.3f\n",
782 rampup_d);
783 VSB_indent(vsb, -2);
784 VSB_cat(vsb, "}");
785 } else {
786 VSB_printf(vsb, "\t%s\t%s\t%s\t%6.2f%% %8.3fs\n",
787 be->vcl_name,
788 sbe->ident ? sbe->ident : be->vcl_name,
789 h ? "healthy" : "sick",
790 rampup_p * 100, rampup_d);
791 }
792 }
793 sharddir_unlock(shardd);
794
795 if (jflag && (pflag)) {
796 VSB_cat(vsb, "\n");
797 VSB_indent(vsb, -2);
798 VSB_cat(vsb, "}\n");
799 VSB_indent(vsb, -2);
800 VSB_cat(vsb, "},\n");
801 }
802
803 if (pflag)
804 return;
805
806 if (jflag)
807 VSB_printf(vsb, "[%u, %u, \"%s\"]", nh, i,
808 nh ? "healthy" : "sick");
809 else
810 VSB_printf(vsb, "%u/%u\t%s", nh, i, nh ? "healthy" : "sick");
811 }
812
v_matchproto_(td_directors_shard_backend)813 VCL_VOID v_matchproto_(td_directors_shard_backend)
814 vmod_shard_debug(VRT_CTX, struct vmod_directors_shard *vshard,
815 VCL_INT i)
816 {
817 CHECK_OBJ_NOTNULL(vshard, VMOD_SHARD_SHARD_MAGIC);
818
819 (void)ctx;
820 sharddir_debug(vshard->shardd, i & UINT32_MAX);
821 }
822
823 /* =============================================================
824 * shard_param
825 */
826
v_matchproto_(td_directors_shard_param__init)827 VCL_VOID v_matchproto_(td_directors_shard_param__init)
828 vmod_shard_param__init(VRT_CTX,
829 struct vmod_directors_shard_param **pp, const char *vcl_name)
830 {
831 struct vmod_directors_shard_param *p;
832
833 (void) ctx;
834 AN(pp);
835 AZ(*pp);
836 ALLOC_OBJ(p, VMOD_SHARD_SHARD_PARAM_MAGIC);
837 AN(p);
838 p->vcl_name = vcl_name;
839 p->scope = SCOPE_VCL;
840 p->defaults = &shard_param_default;
841
842 *pp = p;
843 }
844
v_matchproto_(td_directors_shard_param__fini)845 VCL_VOID v_matchproto_(td_directors_shard_param__fini)
846 vmod_shard_param__fini(struct vmod_directors_shard_param **pp)
847 {
848 struct vmod_directors_shard_param *p;
849
850 TAKE_OBJ_NOTNULL(p, pp, VMOD_SHARD_SHARD_PARAM_MAGIC);
851 FREE_OBJ(p);
852 }
853
854 /*
855 * init a stack param struct defaulting to pa with the given name
856 */
857 static struct vmod_directors_shard_param *
shard_param_stack(struct vmod_directors_shard_param * p,const struct vmod_directors_shard_param * pa,const char * who)858 shard_param_stack(struct vmod_directors_shard_param *p,
859 const struct vmod_directors_shard_param *pa, const char *who)
860 {
861 CHECK_OBJ_NOTNULL(pa, VMOD_SHARD_SHARD_PARAM_MAGIC);
862 assert(pa->scope > _SCOPE_INVALID);
863
864 AN(p);
865 INIT_OBJ(p, VMOD_SHARD_SHARD_PARAM_MAGIC);
866 p->vcl_name = who;
867 p->scope = SCOPE_STACK;
868 p->defaults = pa;
869
870 return (p);
871 }
872
873 static const struct vmod_directors_shard_param *
shard_param_task_r(VRT_CTX,const void * id,const char * who,const struct vmod_directors_shard_param * pa)874 shard_param_task_r(VRT_CTX, const void *id, const char *who,
875 const struct vmod_directors_shard_param *pa)
876 {
877 const struct vmod_directors_shard_param *p;
878 const struct vmod_priv *task;
879 const void *task_id;
880
881 CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
882 CHECK_OBJ_NOTNULL(pa, VMOD_SHARD_SHARD_PARAM_MAGIC);
883 assert(pa->scope > _SCOPE_INVALID);
884
885 task_id = (const char *)id + task_off_param;
886 task = VRT_priv_task_get(ctx, task_id);
887
888 if (task) {
889 CAST_OBJ_NOTNULL(p, task->priv, VMOD_SHARD_SHARD_PARAM_MAGIC);
890 assert(p->scope == SCOPE_TASK);
891 assert(who == p->vcl_name);
892 return (p);
893 }
894
895 if (id == pa || pa->scope != SCOPE_VCL)
896 return (pa);
897
898 return (shard_param_task_r(ctx, pa, pa->vcl_name, pa));
899 }
900
901 /*
902 * get a task scoped param struct for id defaulting to pa
903 * if id != pa and pa has VCL scope, also get a task scoped param struct for pa
904 */
905 static struct vmod_directors_shard_param *
shard_param_task_l(VRT_CTX,const void * id,const char * who,const struct vmod_directors_shard_param * pa)906 shard_param_task_l(VRT_CTX, const void *id, const char *who,
907 const struct vmod_directors_shard_param *pa)
908 {
909 struct vmod_directors_shard_param *p;
910 struct vmod_priv *task;
911 const void *task_id;
912
913 CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
914 CHECK_OBJ_NOTNULL(pa, VMOD_SHARD_SHARD_PARAM_MAGIC);
915 assert(pa->scope > _SCOPE_INVALID);
916
917 task_id = (const char *)id + task_off_param;
918 task = VRT_priv_task(ctx, task_id);
919
920 if (task == NULL) {
921 shard_fail(ctx, who, "%s", "no priv_task");
922 return (NULL);
923 }
924
925 if (task->priv) {
926 CAST_OBJ_NOTNULL(p, task->priv, VMOD_SHARD_SHARD_PARAM_MAGIC);
927 assert(p->scope == SCOPE_TASK);
928 assert(who == p->vcl_name);
929 return (p);
930 }
931
932 p = WS_Alloc(ctx->ws, sizeof *p);
933 if (p == NULL) {
934 shard_fail(ctx, who, "%s", "WS_Alloc failed");
935 return (NULL);
936 }
937 task->priv = p;
938 INIT_OBJ(p, VMOD_SHARD_SHARD_PARAM_MAGIC);
939 p->vcl_name = who;
940 p->scope = SCOPE_TASK;
941
942 if (id == pa || pa->scope != SCOPE_VCL)
943 p->defaults = pa;
944 else
945 p->defaults = shard_param_task_l(ctx, pa, pa->vcl_name, pa);
946
947 if (p->defaults == NULL)
948 return (NULL);
949
950 return (p);
951 }
952
953 static struct vmod_directors_shard_param *
shard_param_prep(VRT_CTX,struct vmod_directors_shard_param * p,const char * who)954 shard_param_prep(VRT_CTX, struct vmod_directors_shard_param *p,
955 const char *who)
956 {
957 CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
958 CHECK_OBJ_NOTNULL(p, VMOD_SHARD_SHARD_PARAM_MAGIC);
959
960 if (ctx->method & SHARD_VCL_TASK_REQ) {
961 shard_fail(ctx, p->vcl_name, "%s may only be used "
962 "in vcl_init and in backend/pipe context", who);
963 return (NULL);
964 } else if (ctx->method & SHARD_VCL_TASK_BEREQ)
965 p = shard_param_task_l(ctx, p, p->vcl_name, p);
966 else
967 assert(ctx->method & VCL_MET_TASK_H);
968
969 return (p);
970 }
971
v_matchproto_(td_directors_shard_param_set)972 VCL_VOID v_matchproto_(td_directors_shard_param_set)
973 vmod_shard_param_set(VRT_CTX, struct vmod_directors_shard_param *p,
974 struct VARGS(shard_param_set) *a)
975 {
976 uint32_t args = shard_param_set_mask(a);
977
978 assert((args & ~arg_mask_set_) == 0);
979
980 p = shard_param_prep(ctx, p, "shard_param.set()");
981 if (p == NULL)
982 return;
983 (void) shard_param_args(ctx, p, "shard_param.set()", args,
984 a->by, a->key, a->key_blob, a->alt, a->warmup,
985 a->rampup, a->healthy);
986 }
987
v_matchproto_(td_directors_shard_param_clear)988 VCL_VOID v_matchproto_(td_directors_shard_param_clear)
989 vmod_shard_param_clear(VRT_CTX,
990 struct vmod_directors_shard_param *p)
991 {
992 p = shard_param_prep(ctx, p, "shard_param.clear()");
993 if (p == NULL)
994 return;
995 p->mask = 0;
996 }
997
998 static const struct vmod_directors_shard_param *
vmod_shard_param_read(VRT_CTX,const void * id,const char * who,const struct vmod_directors_shard_param * p,struct vmod_directors_shard_param * pstk)999 vmod_shard_param_read(VRT_CTX, const void *id, const char *who,
1000 const struct vmod_directors_shard_param *p,
1001 struct vmod_directors_shard_param *pstk)
1002 {
1003 struct vmod_directors_shard_param *pp;
1004
1005 CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
1006 CHECK_OBJ_NOTNULL(p, VMOD_SHARD_SHARD_PARAM_MAGIC);
1007
1008 if (ctx->method == 0 || (ctx->method & SHARD_VCL_TASK_BEREQ))
1009 p = shard_param_task_r(ctx, id, who, p);
1010
1011 CHECK_OBJ_NOTNULL(p, VMOD_SHARD_SHARD_PARAM_MAGIC);
1012 pp = shard_param_stack(pstk, p, p->vcl_name);
1013 shard_param_merge(pp, p);
1014 return (pp);
1015 }
1016
v_matchproto_(td_directors_shard_param_get_by)1017 VCL_STRING v_matchproto_(td_directors_shard_param_get_by)
1018 vmod_shard_param_get_by(VRT_CTX,
1019 struct vmod_directors_shard_param *p)
1020 {
1021 struct vmod_directors_shard_param pstk;
1022 const struct vmod_directors_shard_param *pp;
1023
1024 pp = vmod_shard_param_read(ctx, p, p->vcl_name, p, &pstk);
1025 CHECK_OBJ_NOTNULL(pp, VMOD_SHARD_SHARD_PARAM_MAGIC);
1026 return (default_by(pp->by));
1027 }
1028
v_matchproto_(td_directors_shard_param_get_key)1029 VCL_INT v_matchproto_(td_directors_shard_param_get_key)
1030 vmod_shard_param_get_key(VRT_CTX,
1031 struct vmod_directors_shard_param *p)
1032 {
1033 struct vmod_directors_shard_param pstk;
1034 const struct vmod_directors_shard_param *pp;
1035
1036 pp = vmod_shard_param_read(ctx, p, p->vcl_name, p, &pstk);
1037 CHECK_OBJ_NOTNULL(pp, VMOD_SHARD_SHARD_PARAM_MAGIC);
1038 return ((VCL_INT)shard_get_key(ctx, pp));
1039 }
v_matchproto_(td_directors_shard_param_get_alt)1040 VCL_INT v_matchproto_(td_directors_shard_param_get_alt)
1041 vmod_shard_param_get_alt(VRT_CTX,
1042 struct vmod_directors_shard_param *p)
1043 {
1044 struct vmod_directors_shard_param pstk;
1045 const struct vmod_directors_shard_param *pp;
1046
1047 pp = vmod_shard_param_read(ctx, p, p->vcl_name, p, &pstk);
1048 CHECK_OBJ_NOTNULL(pp, VMOD_SHARD_SHARD_PARAM_MAGIC);
1049 return (pp->alt);
1050 }
1051
v_matchproto_(td_directors_shard_param_get_warmup)1052 VCL_REAL v_matchproto_(td_directors_shard_param_get_warmup)
1053 vmod_shard_param_get_warmup(VRT_CTX,
1054 struct vmod_directors_shard_param *p)
1055 {
1056 struct vmod_directors_shard_param pstk;
1057 const struct vmod_directors_shard_param *pp;
1058
1059 pp = vmod_shard_param_read(ctx, p, p->vcl_name, p, &pstk);
1060 CHECK_OBJ_NOTNULL(pp, VMOD_SHARD_SHARD_PARAM_MAGIC);
1061 return (pp->warmup);
1062 }
1063
v_matchproto_(td_directors_shard_param_get_rampup)1064 VCL_BOOL v_matchproto_(td_directors_shard_param_get_rampup)
1065 vmod_shard_param_get_rampup(VRT_CTX,
1066 struct vmod_directors_shard_param *p)
1067 {
1068 struct vmod_directors_shard_param pstk;
1069 const struct vmod_directors_shard_param *pp;
1070
1071 pp = vmod_shard_param_read(ctx, p, p->vcl_name, p, &pstk);
1072 CHECK_OBJ_NOTNULL(pp, VMOD_SHARD_SHARD_PARAM_MAGIC);
1073 return (pp->rampup);
1074 }
1075
v_matchproto_(td_directors_shard_param_get_healthy)1076 VCL_STRING v_matchproto_(td_directors_shard_param_get_healthy)
1077 vmod_shard_param_get_healthy(VRT_CTX,
1078 struct vmod_directors_shard_param *p)
1079 {
1080 struct vmod_directors_shard_param pstk;
1081 const struct vmod_directors_shard_param *pp;
1082
1083 pp = vmod_shard_param_read(ctx, p, p->vcl_name, p, &pstk);
1084 CHECK_OBJ_NOTNULL(pp, VMOD_SHARD_SHARD_PARAM_MAGIC);
1085 return (default_healthy(pp->healthy));
1086 }
1087
1088 static const struct vmod_directors_shard_param *
shard_param_blob(VCL_BLOB blob)1089 shard_param_blob(VCL_BLOB blob)
1090 {
1091 const struct vmod_directors_shard_param *p;
1092
1093 if (blob && blob->type == VMOD_SHARD_SHARD_PARAM_BLOB &&
1094 blob->blob != NULL &&
1095 blob->len == sizeof(struct vmod_directors_shard_param)) {
1096 CAST_OBJ_NOTNULL(p, blob->blob, VMOD_SHARD_SHARD_PARAM_MAGIC);
1097 return (p);
1098 }
1099
1100 return (NULL);
1101 }
1102
v_matchproto_(td_directors_shard_param_use)1103 VCL_BLOB v_matchproto_(td_directors_shard_param_use)
1104 vmod_shard_param_use(VRT_CTX,
1105 struct vmod_directors_shard_param *p)
1106 {
1107 CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
1108 CHECK_OBJ_NOTNULL(p, VMOD_SHARD_SHARD_PARAM_MAGIC);
1109
1110 return (VRT_blob(ctx, "xshard_param.use()", p, sizeof *p,
1111 VMOD_SHARD_SHARD_PARAM_BLOB));
1112 }
1113