1 /*
2 * gw_backend - gateway backend code shared by dynamic socket backends
3 *
4 * Copyright(c) 2017 Glenn Strauss gstrauss()gluelogic.com All rights reserved
5 * License: BSD 3-clause (same as lighttpd)
6 */
7 #include "first.h"
8
9 #include "gw_backend.h"
10
11 #include <sys/types.h>
12 #include <sys/stat.h>
13 #include "sys-socket.h"
14 #ifdef HAVE_SYS_UIO_H
15 #include <sys/uio.h>
16 #endif
17 #ifdef HAVE_SYS_WAIT_H
18 #include <sys/wait.h>
19 #endif
20
21 #include <errno.h>
22 #include <fcntl.h>
23 #include <limits.h>
24 #include <stdint.h>
25 #include <stdlib.h>
26 #include <signal.h>
27 #include <string.h>
28 #include <unistd.h>
29
30 #include "base.h"
31 #include "algo_md.h"
32 #include "array.h"
33 #include "buffer.h"
34 #include "chunk.h"
35 #include "fdevent.h"
36 #include "http_header.h"
37 #include "log.h"
38 #include "sock_addr.h"
39
40
41
42
43 #include "status_counter.h"
44
45 __attribute_noinline__
gw_status_get_counter(gw_host * host,gw_proc * proc,const char * tag,size_t tlen)46 static int * gw_status_get_counter(gw_host *host, gw_proc *proc, const char *tag, size_t tlen) {
47 /*(At the cost of some memory, could prepare strings for host and for proc
48 * so that here we would copy ready made string for proc (or if NULL,
49 * for host), and then append tag to produce key)*/
50 char label[288];
51 size_t llen = sizeof("gw.backend.")-1, len;
52 memcpy(label, "gw.backend.", llen);
53
54 len = buffer_clen(host->id);
55 if (len) {
56 force_assert(len < sizeof(label) - llen);
57 memcpy(label+llen, host->id->ptr, len);
58 llen += len;
59 }
60
61 if (proc) {
62 force_assert(llen < sizeof(label) - (LI_ITOSTRING_LENGTH + 1));
63 label[llen++] = '.';
64 len = li_utostrn(label+llen, LI_ITOSTRING_LENGTH, proc->id);
65 llen += len;
66 }
67
68 force_assert(tlen < sizeof(label) - llen);
69 memcpy(label+llen, tag, tlen);
70 llen += tlen;
71 label[llen] = '\0';
72
73 return status_counter_get_counter(label, llen);
74 }
75
gw_proc_tag_inc(gw_host * host,gw_proc * proc,const char * tag,size_t len)76 static void gw_proc_tag_inc(gw_host *host, gw_proc *proc, const char *tag, size_t len) {
77 ++(*gw_status_get_counter(host, proc, tag, len));
78 }
79
gw_proc_connected_inc(gw_host * host,gw_proc * proc)80 static void gw_proc_connected_inc(gw_host *host, gw_proc *proc) {
81 UNUSED(host);
82 ++(*proc->stats_connected); /* "gw.backend...connected" */
83 }
84
gw_proc_load_inc(gw_host * host,gw_proc * proc)85 static void gw_proc_load_inc(gw_host *host, gw_proc *proc) {
86 *proc->stats_load = ++proc->load; /* "gw.backend...load" */
87 ++(*host->stats_global_active); /* "gw.active-requests" */
88 }
89
gw_proc_load_dec(gw_host * host,gw_proc * proc)90 static void gw_proc_load_dec(gw_host *host, gw_proc *proc) {
91 *proc->stats_load = --proc->load; /* "gw.backend...load" */
92 --(*host->stats_global_active); /* "gw.active-requests" */
93 }
94
gw_host_assign(gw_host * host)95 static void gw_host_assign(gw_host *host) {
96 *host->stats_load = ++host->load; /* "gw.backend...load" */
97 }
98
gw_host_reset(gw_host * host)99 static void gw_host_reset(gw_host *host) {
100 *host->stats_load = --host->load; /* "gw.backend...load" */
101 }
102
gw_status_init_proc(gw_host * host,gw_proc * proc)103 static void gw_status_init_proc(gw_host *host, gw_proc *proc) {
104 *gw_status_get_counter(host, proc, CONST_STR_LEN(".disabled")) = 0;
105 *gw_status_get_counter(host, proc, CONST_STR_LEN(".died")) = 0;
106 *gw_status_get_counter(host, proc, CONST_STR_LEN(".overloaded")) = 0;
107 proc->stats_connected =
108 gw_status_get_counter(host, proc, CONST_STR_LEN(".connected"));
109 *proc->stats_connected = 0;
110 proc->stats_load =
111 gw_status_get_counter(host, proc, CONST_STR_LEN(".load"));
112 *proc->stats_load = 0;
113 }
114
gw_status_init_host(gw_host * host)115 static void gw_status_init_host(gw_host *host) {
116 host->stats_load =
117 gw_status_get_counter(host, NULL, CONST_STR_LEN(".load"));
118 *host->stats_load = 0;
119 host->stats_global_active =
120 status_counter_get_counter(CONST_STR_LEN("gw.active-requests"));
121 }
122
123
124
125
126 __attribute_cold__
gw_proc_set_state(gw_host * host,gw_proc * proc,int state)127 static void gw_proc_set_state(gw_host *host, gw_proc *proc, int state) {
128 if ((int)proc->state == state) return;
129 if (proc->state == PROC_STATE_RUNNING) {
130 --host->active_procs;
131 } else if (state == PROC_STATE_RUNNING) {
132 ++host->active_procs;
133 }
134 proc->state = state;
135 }
136
137
138 __attribute_cold__
139 __attribute_noinline__
gw_proc_init_portpath(gw_host * host,gw_proc * proc)140 static void gw_proc_init_portpath(gw_host *host, gw_proc *proc) {
141 if (!host->unixsocket) {
142 proc->port = host->port + proc->id;
143 return;
144 }
145
146 if (!proc->unixsocket)
147 proc->unixsocket = buffer_init();
148
149 if (!host->bin_path)
150 buffer_copy_buffer(proc->unixsocket, host->unixsocket);
151 else {
152 buffer_clear(proc->unixsocket);
153 buffer_append_str2(proc->unixsocket, BUF_PTR_LEN(host->unixsocket),
154 CONST_STR_LEN("-"));
155 buffer_append_int(proc->unixsocket, proc->id);
156 }
157 }
158
159 __attribute_cold__
160 __attribute_noinline__
161 __attribute_returns_nonnull__
gw_proc_init(gw_host * host)162 static gw_proc *gw_proc_init(gw_host *host) {
163 gw_proc *proc = calloc(1, sizeof(*proc));
164 force_assert(proc);
165
166 /*proc->unixsocket = buffer_init();*//*(init on demand)*/
167 proc->connection_name = buffer_init();
168
169 proc->prev = NULL;
170 proc->next = NULL;
171 proc->state = PROC_STATE_DIED;
172
173 proc->id = host->max_id++;
174 gw_status_init_proc(host, proc); /*(proc->id must be set)*/
175 gw_proc_init_portpath(host, proc);
176
177 return proc;
178 }
179
gw_proc_free(gw_proc * proc)180 static void gw_proc_free(gw_proc *proc) {
181 if (!proc) return;
182
183 gw_proc_free(proc->next);
184
185 buffer_free(proc->unixsocket);
186 buffer_free(proc->connection_name);
187 free(proc->saddr);
188
189 free(proc);
190 }
191
192 __attribute_malloc__
193 __attribute_returns_nonnull__
gw_host_init(void)194 static gw_host *gw_host_init(void) {
195 gw_host *f = calloc(1, sizeof(*f));
196 force_assert(f);
197 return f;
198 }
199
gw_host_free(gw_host * h)200 static void gw_host_free(gw_host *h) {
201 if (!h) return;
202 if (h->refcount) {
203 --h->refcount;
204 return;
205 }
206
207 gw_proc_free(h->first);
208 gw_proc_free(h->unused_procs);
209
210 for (uint32_t i = 0; i < h->args.used; ++i) free(h->args.ptr[i]);
211 free(h->args.ptr);
212 free(h);
213 }
214
215 __attribute_malloc__
216 __attribute_returns_nonnull__
gw_extensions_init(void)217 static gw_exts *gw_extensions_init(void) {
218 gw_exts *f = calloc(1, sizeof(*f));
219 force_assert(f);
220 return f;
221 }
222
gw_extensions_free(gw_exts * f)223 static void gw_extensions_free(gw_exts *f) {
224 if (!f) return;
225 for (uint32_t i = 0; i < f->used; ++i) {
226 gw_extension *fe = f->exts+i;
227 for (uint32_t j = 0; j < fe->used; ++j) {
228 gw_host_free(fe->hosts[j]);
229 }
230 free(fe->hosts);
231 }
232 free(f->exts);
233 free(f);
234 }
235
gw_extension_insert(gw_exts * ext,const buffer * key,gw_host * fh)236 static int gw_extension_insert(gw_exts *ext, const buffer *key, gw_host *fh) {
237 gw_extension *fe = NULL;
238 for (uint32_t i = 0; i < ext->used; ++i) {
239 if (buffer_is_equal(key, &ext->exts[i].key)) {
240 fe = ext->exts+i;
241 break;
242 }
243 }
244
245 if (NULL == fe) {
246 if (ext->used == ext->size) {
247 ext->size += 8;
248 ext->exts = realloc(ext->exts, ext->size * sizeof(gw_extension));
249 force_assert(ext->exts);
250 memset(ext->exts + ext->used, 0, 8 * sizeof(gw_extension));
251 }
252 fe = ext->exts + ext->used++;
253 fe->last_used_ndx = -1;
254 buffer *b;
255 *(const buffer **)&b = &fe->key;
256 memcpy(b, key, sizeof(buffer)); /*(copy; not later free'd)*/
257 }
258
259 if (fe->size == fe->used) {
260 fe->size += 4;
261 fe->hosts = realloc(fe->hosts, fe->size * sizeof(*(fe->hosts)));
262 force_assert(fe->hosts);
263 }
264
265 fe->hosts[fe->used++] = fh;
266 return 0;
267 }
268
gw_proc_connect_success(gw_host * host,gw_proc * proc,int debug,request_st * const r)269 static void gw_proc_connect_success(gw_host *host, gw_proc *proc, int debug, request_st * const r) {
270 gw_proc_connected_inc(host, proc); /*(".connected")*/
271 proc->last_used = log_monotonic_secs;
272
273 if (debug) {
274 log_error(r->conf.errh, __FILE__, __LINE__,
275 "got proc: pid: %d socket: %s load: %d",
276 proc->pid, proc->connection_name->ptr, proc->load);
277 }
278 }
279
280 __attribute_cold__
gw_proc_connect_error(request_st * const r,gw_host * host,gw_proc * proc,pid_t pid,int errnum,int debug)281 static void gw_proc_connect_error(request_st * const r, gw_host *host, gw_proc *proc, pid_t pid, int errnum, int debug) {
282 const unix_time64_t cur_ts = log_monotonic_secs;
283 log_error_st * const errh = r->conf.errh;
284 errno = errnum; /*(for log_perror())*/
285 log_perror(errh, __FILE__, __LINE__,
286 "establishing connection failed: socket: %s", proc->connection_name->ptr);
287
288 if (!proc->is_local) {
289 proc->disabled_until = cur_ts + host->disable_time;
290 gw_proc_set_state(host, proc, PROC_STATE_OVERLOADED);
291 }
292 else if (proc->pid == pid && proc->state == PROC_STATE_RUNNING) {
293 /* several requests from lighttpd might reference the same proc
294 *
295 * Only one of them should mark the proc
296 * and all other ones should just take a new one.
297 *
298 * If a new proc was started with the old struct, this might
299 * otherwise lead to marking a perfectly good proc as dead
300 */
301 log_error(errh, __FILE__, __LINE__,
302 "backend error; we'll disable for %d"
303 "secs and send the request to another backend instead:"
304 "load: %d", host->disable_time, host->load);
305 if (EAGAIN == errnum) {
306 /* - EAGAIN: cool down the backend; it is overloaded */
307 #ifdef __linux__
308 log_error(errh, __FILE__, __LINE__,
309 "If this happened on Linux: You have run out of local ports. "
310 "Check the manual, section Performance how to handle this.");
311 #endif
312 if (debug) {
313 log_error(errh, __FILE__, __LINE__,
314 "This means that you have more incoming requests than your "
315 "FastCGI backend can handle in parallel. It might help to "
316 "spawn more FastCGI backends or PHP children; if not, "
317 "decrease server.max-connections. The load for this FastCGI "
318 "backend %s is %d", proc->connection_name->ptr, proc->load);
319 }
320 proc->disabled_until = cur_ts + host->disable_time;
321 gw_proc_set_state(host, proc, PROC_STATE_OVERLOADED);
322 }
323 else {
324 /* we got a hard error from the backend like
325 * - ECONNREFUSED for tcp-ip sockets
326 * - ENOENT for unix-domain-sockets
327 */
328 #if 0
329 gw_proc_set_state(host, proc, PROC_STATE_DIED_WAIT_FOR_PID);
330 #else /* treat as overloaded (future: unless we send kill() signal)*/
331 proc->disabled_until = cur_ts + host->disable_time;
332 gw_proc_set_state(host, proc, PROC_STATE_OVERLOADED);
333 #endif
334 }
335 }
336
337 if (EAGAIN == errnum) {
338 gw_proc_tag_inc(host, proc, CONST_STR_LEN(".overloaded"));
339 }
340 else {
341 gw_proc_tag_inc(host, proc, CONST_STR_LEN(".died"));
342 }
343 }
344
gw_proc_release(gw_host * host,gw_proc * proc,int debug,log_error_st * errh)345 static void gw_proc_release(gw_host *host, gw_proc *proc, int debug, log_error_st *errh) {
346 gw_proc_load_dec(host, proc);
347
348 if (debug) {
349 log_error(errh, __FILE__, __LINE__,
350 "released proc: pid: %d socket: %s load: %u",
351 proc->pid, proc->connection_name->ptr, proc->load);
352 }
353 }
354
355 __attribute_cold__
gw_proc_check_enable(gw_host * const host,gw_proc * const proc,log_error_st * const errh)356 static void gw_proc_check_enable(gw_host * const host, gw_proc * const proc, log_error_st * const errh) {
357 if (log_monotonic_secs <= proc->disabled_until) return;
358 if (proc->state != PROC_STATE_OVERLOADED) return;
359
360 gw_proc_set_state(host, proc, PROC_STATE_RUNNING);
361
362 log_error(errh, __FILE__, __LINE__,
363 "gw-server re-enabled: %s %s %hu %s",
364 proc->connection_name->ptr,
365 host->host ? host->host->ptr : "", host->port,
366 host->unixsocket ? host->unixsocket->ptr : "");
367 }
368
369 __attribute_cold__
gw_proc_waitpid_log(const gw_host * const host,const gw_proc * const proc,log_error_st * const errh,const int status)370 static void gw_proc_waitpid_log(const gw_host * const host, const gw_proc * const proc, log_error_st * const errh, const int status) {
371 if (WIFEXITED(status)) {
372 if (proc->state != PROC_STATE_KILLED) {
373 log_error(errh, __FILE__, __LINE__,
374 "child exited: %d %s",
375 WEXITSTATUS(status), proc->connection_name->ptr);
376 }
377 } else if (WIFSIGNALED(status)) {
378 if (WTERMSIG(status) != SIGTERM && WTERMSIG(status) != SIGINT
379 && WTERMSIG(status) != host->kill_signal) {
380 log_error(errh, __FILE__, __LINE__,
381 "child signalled: %d", WTERMSIG(status));
382 }
383 } else {
384 log_error(errh, __FILE__, __LINE__,
385 "child died somehow: %d", status);
386 }
387 }
388
gw_proc_waitpid(gw_host * host,gw_proc * proc,log_error_st * errh)389 static int gw_proc_waitpid(gw_host *host, gw_proc *proc, log_error_st *errh) {
390 int rc, status;
391
392 if (!proc->is_local) return 0;
393 if (proc->pid <= 0) return 0;
394
395 rc = fdevent_waitpid(proc->pid, &status, 1);
396 if (0 == rc) return 0; /* child still running */
397
398 /* child terminated */
399 if (-1 == rc) {
400 /* EINVAL or ECHILD no child processes */
401 /* should not happen; someone else has cleaned up for us */
402 log_perror(errh, __FILE__, __LINE__,
403 "pid %d %d not found", proc->pid, proc->state);
404 }
405 else {
406 gw_proc_waitpid_log(host, proc, errh, status);
407 }
408
409 proc->pid = 0;
410 if (proc->state != PROC_STATE_KILLED)
411 proc->disabled_until = log_monotonic_secs;
412 gw_proc_set_state(host, proc, PROC_STATE_DIED);
413 return 1;
414 }
415
416 __attribute_cold__
gw_proc_sockaddr_init(gw_host * const host,gw_proc * const proc,log_error_st * const errh)417 static int gw_proc_sockaddr_init(gw_host * const host, gw_proc * const proc, log_error_st * const errh) {
418 sock_addr addr;
419 socklen_t addrlen;
420
421 if (proc->unixsocket) {
422 if (1 != sock_addr_from_str_hints(&addr,&addrlen,proc->unixsocket->ptr,
423 AF_UNIX, 0, errh)) {
424 errno = EINVAL;
425 return -1;
426 }
427 buffer_clear(proc->connection_name);
428 buffer_append_str2(proc->connection_name,
429 CONST_STR_LEN("unix:"),
430 BUF_PTR_LEN(proc->unixsocket));
431 }
432 else {
433 #ifdef __COVERITY__
434 force_assert(host->host); /*(not NULL if !host->unixsocket)*/
435 #endif
436 /*(note: name resolution here is *blocking* if IP string not supplied)*/
437 if (1 != sock_addr_from_str_hints(&addr, &addrlen, host->host->ptr,
438 0, proc->port, errh)) {
439 errno = EINVAL;
440 return -1;
441 }
442 else if (host->host->size) {
443 /*(skip if constant string set in gw_set_defaults_backend())*/
444 /* overwrite host->host buffer with IP addr string so that
445 * any further use of gw_host does not block on DNS lookup */
446 buffer *h;
447 *(const buffer **)&h = host->host;
448 sock_addr_inet_ntop_copy_buffer(h, &addr);
449 host->family = sock_addr_get_family(&addr);
450 }
451 buffer_clear(proc->connection_name);
452 buffer_append_str3(proc->connection_name,
453 CONST_STR_LEN("tcp:"),
454 BUF_PTR_LEN(host->host),
455 CONST_STR_LEN(":"));
456 buffer_append_int(proc->connection_name, proc->port);
457 }
458
459 if (NULL != proc->saddr && proc->saddrlen < addrlen) {
460 free(proc->saddr);
461 proc->saddr = NULL;
462 }
463 if (NULL == proc->saddr) {
464 proc->saddr = (struct sockaddr *)malloc(addrlen);
465 force_assert(proc->saddr);
466 }
467 proc->saddrlen = addrlen;
468 memcpy(proc->saddr, &addr, addrlen);
469 return 0;
470 }
471
env_add(char_array * env,const char * key,size_t key_len,const char * val,size_t val_len)472 static int env_add(char_array *env, const char *key, size_t key_len, const char *val, size_t val_len) {
473 char *dst;
474
475 if (!key || !val) return -1;
476
477 dst = malloc(key_len + val_len + 3);
478 force_assert(dst);
479 memcpy(dst, key, key_len);
480 dst[key_len] = '=';
481 memcpy(dst + key_len + 1, val, val_len + 1); /* add the \0 from the value */
482
483 for (uint32_t i = 0; i < env->used; ++i) {
484 #ifdef __COVERITY__
485 force_assert(env->ptr); /*(non-NULL if env->used != 0)*/
486 #endif
487 if (0 == strncmp(dst, env->ptr[i], key_len + 1)) {
488 free(env->ptr[i]);
489 env->ptr[i] = dst;
490 return 0;
491 }
492 }
493
494 if (env->size <= env->used + 1) {
495 env->size += 16;
496 env->ptr = realloc(env->ptr, env->size * sizeof(*env->ptr));
497 force_assert(env->ptr);
498 }
499
500 #ifdef __COVERITY__
501 force_assert(env->ptr); /*(non-NULL if env->used != 0; guaranteed above)*/
502 #endif
503 env->ptr[env->used++] = dst;
504
505 return 0;
506 }
507
508 __attribute_cold__
gw_spawn_connection(gw_host * const host,gw_proc * const proc,log_error_st * const errh,int debug)509 static int gw_spawn_connection(gw_host * const host, gw_proc * const proc, log_error_st * const errh, int debug) {
510 int gw_fd;
511 int status;
512
513 if (debug) {
514 log_error(errh, __FILE__, __LINE__,
515 "new proc, socket: %hu %s",
516 proc->port, proc->unixsocket ? proc->unixsocket->ptr : "");
517 }
518
519 gw_fd = fdevent_socket_cloexec(proc->saddr->sa_family, SOCK_STREAM, 0);
520 if (-1 == gw_fd) {
521 log_perror(errh, __FILE__, __LINE__, "socket()");
522 return -1;
523 }
524
525 do {
526 status = connect(gw_fd, proc->saddr, proc->saddrlen);
527 } while (-1 == status && errno == EINTR);
528
529 if (-1 == status && errno != ENOENT && proc->unixsocket) {
530 log_perror(errh, __FILE__, __LINE__,
531 "connect %s", proc->unixsocket->ptr);
532 unlink(proc->unixsocket->ptr);
533 }
534
535 close(gw_fd);
536
537 if (-1 == status) {
538 /* server is not up, spawn it */
539 char_array env;
540 uint32_t i;
541
542 /* reopen socket */
543 gw_fd = fdevent_socket_cloexec(proc->saddr->sa_family, SOCK_STREAM, 0);
544 if (-1 == gw_fd) {
545 log_perror(errh, __FILE__, __LINE__, "socket()");
546 return -1;
547 }
548
549 if (fdevent_set_so_reuseaddr(gw_fd, 1) < 0) {
550 log_perror(errh, __FILE__, __LINE__, "socketsockopt()");
551 close(gw_fd);
552 return -1;
553 }
554
555 /* create socket */
556 if (-1 == bind(gw_fd, proc->saddr, proc->saddrlen)) {
557 log_perror(errh, __FILE__, __LINE__,
558 "bind failed for: %s", proc->connection_name->ptr);
559 close(gw_fd);
560 return -1;
561 }
562
563 if (-1 == listen(gw_fd, host->listen_backlog)) {
564 log_perror(errh, __FILE__, __LINE__, "listen()");
565 close(gw_fd);
566 return -1;
567 }
568
569 {
570 /* create environment */
571 env.ptr = NULL;
572 env.size = 0;
573 env.used = 0;
574
575 /* build clean environment */
576 if (host->bin_env_copy && host->bin_env_copy->used) {
577 for (i = 0; i < host->bin_env_copy->used; ++i) {
578 data_string *ds=(data_string *)host->bin_env_copy->data[i];
579 char *ge;
580
581 if (NULL != (ge = getenv(ds->value.ptr))) {
582 env_add(&env, BUF_PTR_LEN(&ds->value), ge, strlen(ge));
583 }
584 }
585 } else {
586 char ** const e = fdevent_environ();
587 for (i = 0; e[i]; ++i) {
588 char *eq;
589
590 if (NULL != (eq = strchr(e[i], '='))) {
591 env_add(&env, e[i], eq - e[i], eq+1, strlen(eq+1));
592 }
593 }
594 }
595
596 /* create environment */
597 if (host->bin_env) {
598 for (i = 0; i < host->bin_env->used; ++i) {
599 data_string *ds = (data_string *)host->bin_env->data[i];
600 env_add(&env, BUF_PTR_LEN(&ds->key),
601 BUF_PTR_LEN(&ds->value));
602 }
603 }
604
605 for (i = 0; i < env.used; ++i) {
606 /* search for PHP_FCGI_CHILDREN */
607 if (0 == strncmp(env.ptr[i], "PHP_FCGI_CHILDREN=",
608 sizeof("PHP_FCGI_CHILDREN=")-1)) {
609 break;
610 }
611 }
612
613 /* not found, add a default */
614 if (i == env.used) {
615 env_add(&env, CONST_STR_LEN("PHP_FCGI_CHILDREN"),
616 CONST_STR_LEN("1"));
617 }
618
619 env.ptr[env.used] = NULL;
620 }
621
622 int dfd = fdevent_open_dirname(host->args.ptr[0], 1);/*permit symlinks*/
623 if (-1 == dfd) {
624 log_perror(errh, __FILE__, __LINE__,
625 "open dirname failed: %s", host->args.ptr[0]);
626 }
627
628 /*(FCGI_LISTENSOCK_FILENO == STDIN_FILENO == 0)*/
629 proc->pid = (dfd >= 0)
630 ? fdevent_fork_execve(host->args.ptr[0], host->args.ptr,
631 env.ptr, gw_fd, -1, -1, dfd)
632 : -1;
633
634 for (i = 0; i < env.used; ++i) free(env.ptr[i]);
635 free(env.ptr);
636 if (-1 != dfd) close(dfd);
637 close(gw_fd);
638
639 if (-1 == proc->pid) {
640 log_error(errh, __FILE__, __LINE__,
641 "gw-backend failed to start: %s", host->bin_path->ptr);
642 proc->pid = 0;
643 proc->disabled_until = log_monotonic_secs;
644 return -1;
645 }
646
647 /* register process */
648 proc->last_used = log_monotonic_secs;
649 proc->is_local = 1;
650
651 /* wait */
652 struct timeval tv = { 0, 1000 };
653 select(0, NULL, NULL, NULL, &tv);
654
655 if (0 != gw_proc_waitpid(host, proc, errh)) {
656 log_error(errh, __FILE__, __LINE__,
657 "gw-backend failed to start: %s", host->bin_path->ptr);
658 log_error(errh, __FILE__, __LINE__,
659 "If you're trying to run your app as a FastCGI backend, make "
660 "sure you're using the FastCGI-enabled version. If this is PHP "
661 "on Gentoo, add 'fastcgi' to the USE flags. If this is PHP, try "
662 "removing the bytecode caches for now and try again.");
663 return -1;
664 }
665 } else {
666 proc->is_local = 0;
667 proc->pid = 0;
668
669 if (debug) {
670 log_error(errh, __FILE__, __LINE__,
671 "(debug) socket is already used; won't spawn: %s",
672 proc->connection_name->ptr);
673 }
674 }
675
676 gw_proc_set_state(host, proc, PROC_STATE_RUNNING);
677 return 0;
678 }
679
680 __attribute_cold__
gw_proc_spawn(gw_host * const host,log_error_st * const errh,const int debug)681 static void gw_proc_spawn(gw_host * const host, log_error_st * const errh, const int debug) {
682 gw_proc *proc;
683 for (proc = host->unused_procs; proc; proc = proc->next) {
684 /* (proc->pid <= 0 indicates PROC_STATE_DIED, not PROC_STATE_KILLED) */
685 if (proc->pid > 0) continue;
686 /* (do not attempt to spawn another proc if a proc just exited) */
687 if (proc->disabled_until >= log_monotonic_secs) return;
688 break;
689 }
690 if (proc) {
691 if (proc == host->unused_procs)
692 host->unused_procs = proc->next;
693 else
694 proc->prev->next = proc->next;
695
696 if (proc->next) {
697 proc->next->prev = proc->prev;
698 proc->next = NULL;
699 }
700
701 proc->prev = NULL;
702 gw_proc_init_portpath(host, proc);
703 } else {
704 proc = gw_proc_init(host);
705 }
706
707 if (0 != gw_proc_sockaddr_init(host, proc, errh)) {
708 /*(should not happen if host->host validated at startup,
709 * and translated from name to IP address at startup)*/
710 log_error(errh, __FILE__, __LINE__,
711 "ERROR: spawning backend failed.");
712 if (proc->id == host->max_id-1) --host->max_id;
713 gw_proc_free(proc);
714 } else if (gw_spawn_connection(host, proc, errh, debug)) {
715 log_error(errh, __FILE__, __LINE__,
716 "ERROR: spawning backend failed.");
717 proc->next = host->unused_procs;
718 if (host->unused_procs)
719 host->unused_procs->prev = proc;
720 host->unused_procs = proc;
721 } else {
722 proc->next = host->first;
723 if (host->first)
724 host->first->prev = proc;
725 host->first = proc;
726 ++host->num_procs;
727 }
728 }
729
730 __attribute_cold__
gw_proc_kill(gw_host * host,gw_proc * proc)731 static void gw_proc_kill(gw_host *host, gw_proc *proc) {
732 if (proc->next) proc->next->prev = proc->prev;
733 if (proc->prev) proc->prev->next = proc->next;
734 else host->first = proc->next;
735 --host->num_procs;
736
737 proc->prev = NULL;
738 proc->next = host->unused_procs;
739 proc->disabled_until = 0;
740
741 if (host->unused_procs)
742 host->unused_procs->prev = proc;
743 host->unused_procs = proc;
744
745 kill(proc->pid, host->kill_signal);
746
747 gw_proc_set_state(host, proc, PROC_STATE_KILLED);
748 }
749
750 __attribute_pure__
unixsocket_is_dup(gw_plugin_data * p,const buffer * unixsocket)751 static gw_host * unixsocket_is_dup(gw_plugin_data *p, const buffer *unixsocket) {
752 if (NULL == p->cvlist) return NULL;
753 /* (init i to 0 if global context; to 1 to skip empty global context) */
754 for (int i = !p->cvlist[0].v.u2[1], used = p->nconfig; i < used; ++i) {
755 config_plugin_value_t *cpv = p->cvlist + p->cvlist[i].v.u2[0];
756 gw_plugin_config *conf = NULL;
757 for (; -1 != cpv->k_id; ++cpv) {
758 switch (cpv->k_id) {
759 case 0: /* xxxxx.server */
760 if (cpv->vtype == T_CONFIG_LOCAL) conf = cpv->v.v;
761 break;
762 default:
763 break;
764 }
765 }
766
767 if (NULL == conf || NULL == conf->exts) continue;
768
769 gw_exts *exts = conf->exts;
770 for (uint32_t j = 0; j < exts->used; ++j) {
771 gw_extension *ex = exts->exts+j;
772 for (uint32_t n = 0; n < ex->used; ++n) {
773 gw_host *host = ex->hosts[n];
774 if (host->unixsocket
775 && buffer_is_equal(host->unixsocket, unixsocket)
776 && host->bin_path)
777 return host;
778 }
779 }
780 }
781
782 return NULL;
783 }
784
parse_binpath(char_array * env,const buffer * b)785 static int parse_binpath(char_array *env, const buffer *b) {
786 char *start = b->ptr;
787 char c;
788 /* search for spaces */
789 for (size_t i = 0, used = buffer_clen(b); i < used; ++i) {
790 switch(b->ptr[i]) {
791 case ' ':
792 case '\t':
793 /* a WS, stop here and copy the argument */
794
795 if (env->size == env->used) {
796 env->size += 16;
797 env->ptr = realloc(env->ptr, env->size * sizeof(*env->ptr));
798 force_assert(env->ptr);
799 }
800
801 c = b->ptr[i];
802 b->ptr[i] = '\0';
803 env->ptr[env->used++] = strdup(start);
804 b->ptr[i] = c;
805
806 start = b->ptr + i + 1;
807 break;
808 default:
809 break;
810 }
811 }
812
813 if (env->size == env->used) { /*need one extra for terminating NULL*/
814 env->size += 16;
815 env->ptr = realloc(env->ptr, env->size * sizeof(*env->ptr));
816 }
817
818 /* the rest */
819 env->ptr[env->used++] = strdup(start);
820
821 if (env->size == env->used) { /*need one extra for terminating NULL*/
822 env->size += 16;
823 env->ptr = realloc(env->ptr, env->size * sizeof(*env->ptr));
824 }
825
826 /* terminate */
827 env->ptr[env->used++] = NULL;
828
829 return 0;
830 }
831
832 enum {
833 GW_BALANCE_LEAST_CONNECTION,
834 GW_BALANCE_RR,
835 GW_BALANCE_HASH,
836 GW_BALANCE_STICKY
837 };
838
839 __attribute_noinline__
840 __attribute_pure__
841 static uint32_t
gw_hash(const char * str,const uint32_t len,uint32_t hash)842 gw_hash(const char *str, const uint32_t len, uint32_t hash)
843 {
844 return djbhash(str, len, hash);
845 }
846
gw_host_get(request_st * const r,gw_extension * extension,int balance,int debug)847 static gw_host * gw_host_get(request_st * const r, gw_extension *extension, int balance, int debug) {
848 int ndx = -1;
849 const int ext_used = (int)extension->used;
850
851 if (ext_used <= 1) {
852 if (1 == ext_used && extension->hosts[0]->active_procs > 0)
853 ndx = 0;
854 }
855 else {
856 /*const char *balancing = "";*/
857 switch(balance) {
858 case GW_BALANCE_HASH:
859 { /* hash balancing */
860 const uint32_t base_hash =
861 gw_hash(BUF_PTR_LEN(&r->uri.authority),
862 gw_hash(BUF_PTR_LEN(&r->uri.path), DJBHASH_INIT));
863 uint32_t last_max = UINT32_MAX;
864 for (int k = 0; k < ext_used; ++k) {
865 const gw_host * const host = extension->hosts[k];
866 if (0 == host->active_procs) continue;
867 const uint32_t cur_max = base_hash ^ host->gw_hash;
868 #if 0
869 if (debug) {
870 log_error(r->conf.errh, __FILE__, __LINE__,
871 "proxy - election: %s %s %s %u", r->uri.path.ptr,
872 host->host ? host->host->ptr : "",
873 r->uri.authority.ptr, cur_max);
874 }
875 #endif
876 if (last_max < cur_max || last_max == UINT32_MAX) {
877 last_max = cur_max;
878 ndx = k;
879 }
880 }
881 /*balancing = "hash";*/
882 break;
883 }
884 case GW_BALANCE_LEAST_CONNECTION:
885 { /* fair balancing */
886 for (int k = 0, max_usage = INT_MAX; k < ext_used; ++k) {
887 const gw_host * const host = extension->hosts[k];
888 if (0 == host->active_procs) continue;
889 if (host->load < max_usage) {
890 max_usage = host->load;
891 ndx = k;
892 }
893 }
894 /*balancing = "least connection";*/
895 break;
896 }
897 case GW_BALANCE_RR:
898 { /* round robin */
899 const gw_host *host = extension->hosts[0];
900
901 /* Use last_used_ndx from first host in list */
902 int k = extension->last_used_ndx;
903 ndx = k + 1; /* use next host after the last one */
904 if (ndx < 0) ndx = 0;
905
906 /* Search first active host after last_used_ndx */
907 while (ndx < ext_used
908 && 0 == (host = extension->hosts[ndx])->active_procs) ++ndx;
909
910 if (ndx >= ext_used) {
911 /* didn't find a higher id, wrap to the start */
912 for (ndx = 0; ndx <= (int) k; ++ndx) {
913 host = extension->hosts[ndx];
914 if (0 != host->active_procs) break;
915 }
916
917 /* No active host found */
918 if (0 == host->active_procs) ndx = -1;
919 }
920
921 /* Save new index for next round */
922 extension->last_used_ndx = ndx;
923
924 /*balancing = "round-robin";*/
925 break;
926 }
927 case GW_BALANCE_STICKY:
928 { /* source sticky balancing */
929 const buffer * const dst_addr_buf = &r->con->dst_addr_buf;
930 const uint32_t base_hash =
931 gw_hash(BUF_PTR_LEN(dst_addr_buf), DJBHASH_INIT);
932 uint32_t last_max = UINT32_MAX;
933 for (int k = 0; k < ext_used; ++k) {
934 const gw_host * const host = extension->hosts[k];
935 if (0 == host->active_procs) continue;
936 const uint32_t cur_max = base_hash ^ host->gw_hash ^ host->port;
937 #if 0
938 if (debug) {
939 log_error(r->conf.errh, __FILE__, __LINE__,
940 "proxy - election: %s %s %hu %u", dst_addr_buf->ptr,
941 host->host ? host->host->ptr : "",
942 host->port, cur_max);
943 }
944 #endif
945 if (last_max < cur_max || last_max == UINT32_MAX) {
946 last_max = cur_max;
947 ndx = k;
948 }
949 }
950 /*balancing = "sticky";*/
951 break;
952 }
953 default:
954 break;
955 }
956 #if 0
957 if (debug) {
958 log_error(r->conf.errh, __FILE__, __LINE__,
959 "gw - balancing: %s, hosts: %d", balancing, ext_used);
960 }
961 #endif
962 }
963
964 if (__builtin_expect( (-1 != ndx), 1)) {
965 /* found a server */
966
967 if (debug) {
968 gw_host * const host = extension->hosts[ndx];
969 log_error(r->conf.errh, __FILE__, __LINE__,
970 "gw - found a host %s %hu",
971 host->host ? host->host->ptr : "", host->port);
972 return host;
973 }
974
975 return extension->hosts[ndx];
976 } else if (0 == r->con->srv->srvconf.max_worker) {
977 /* special-case adaptive spawning and 0 == host->min_procs */
978 for (int k = 0; k < ext_used; ++k) {
979 gw_host * const host = extension->hosts[k];
980 if (0 == host->min_procs && 0 == host->num_procs && host->bin_path){
981 gw_proc_spawn(host, r->con->srv->errh, debug);
982 if (host->num_procs) return host;
983 }
984 }
985 }
986
987 /* all hosts are down */
988 /* sorry, we don't have a server alive for this ext */
989 r->http_status = 503; /* Service Unavailable */
990 r->handler_module = NULL;
991
992 /* only send the 'no handler' once */
993 if (!extension->note_is_sent) {
994 extension->note_is_sent = 1;
995 log_error(r->conf.errh, __FILE__, __LINE__,
996 "all handlers for %s?%.*s on %s are down.",
997 r->uri.path.ptr, BUFFER_INTLEN_PTR(&r->uri.query),
998 extension->key.ptr);
999 }
1000
1001 return NULL;
1002 }
1003
gw_establish_connection(request_st * const r,gw_host * host,gw_proc * proc,pid_t pid,int gw_fd,int debug)1004 static int gw_establish_connection(request_st * const r, gw_host *host, gw_proc *proc, pid_t pid, int gw_fd, int debug) {
1005 if (-1 == connect(gw_fd, proc->saddr, proc->saddrlen)) {
1006 int errnum = errno;
1007 if (errnum == EINPROGRESS || errnum == EALREADY || errnum == EINTR
1008 || (errnum == EAGAIN && host->unixsocket)) {
1009 if (debug > 2) {
1010 log_error(r->conf.errh, __FILE__, __LINE__,
1011 "connect delayed; will continue later: %s",
1012 proc->connection_name->ptr);
1013 }
1014
1015 return 1;
1016 } else {
1017 gw_proc_connect_error(r, host, proc, pid, errnum, debug);
1018 return -1;
1019 }
1020 }
1021
1022 if (debug > 1) {
1023 log_error(r->conf.errh, __FILE__, __LINE__,
1024 "connect succeeded: %d", gw_fd);
1025 }
1026
1027 return 0;
1028 }
1029
1030 __attribute_cold__
1031 __attribute_noinline__
gw_restart_dead_proc(gw_host * const host,log_error_st * const errh,const int debug,const int trigger,gw_proc * const proc)1032 static void gw_restart_dead_proc(gw_host * const host, log_error_st * const errh, const int debug, const int trigger, gw_proc * const proc) {
1033 switch (proc->state) {
1034 case PROC_STATE_RUNNING:
1035 break;
1036 case PROC_STATE_OVERLOADED:
1037 gw_proc_check_enable(host, proc, errh);
1038 break;
1039 case PROC_STATE_KILLED:
1040 if (trigger && ++proc->disabled_until > 4) {
1041 int sig = (proc->disabled_until <= 8)
1042 ? host->kill_signal
1043 : proc->disabled_until <= 16 ? SIGTERM : SIGKILL;
1044 kill(proc->pid, sig);
1045 }
1046 break;
1047 case PROC_STATE_DIED_WAIT_FOR_PID:
1048 /*(state should not happen in workers if server.max-worker > 0)*/
1049 /*(if PROC_STATE_DIED_WAIT_FOR_PID is used in future, might want
1050 * to save proc->disabled_until before gw_proc_waitpid() since
1051 * gw_proc_waitpid will set proc->disabled_until=log_monotonic_secs,
1052 * and so process will not be restarted below until one sec later)*/
1053 if (0 == gw_proc_waitpid(host, proc, errh)) {
1054 gw_proc_check_enable(host, proc, errh);
1055 }
1056
1057 if (proc->state != PROC_STATE_DIED) break;
1058 __attribute_fallthrough__/*(we have a dead proc now)*/
1059
1060 case PROC_STATE_DIED:
1061 /* local procs get restarted by us,
1062 * remote ones hopefully by the admin */
1063
1064 if (host->bin_path) {
1065 /* we still have connections bound to this proc,
1066 * let them terminate first */
1067 if (proc->load != 0) break;
1068
1069 /* avoid spinning if child exits too quickly */
1070 if (proc->disabled_until >= log_monotonic_secs) break;
1071
1072 /* restart the child */
1073
1074 if (debug) {
1075 log_error(errh, __FILE__, __LINE__,
1076 "--- gw spawning"
1077 "\n\tsocket %s"
1078 "\n\tcurrent: 1 / %u",
1079 proc->connection_name->ptr, host->max_procs);
1080 }
1081
1082 if (gw_spawn_connection(host, proc, errh, debug)) {
1083 log_error(errh, __FILE__, __LINE__,
1084 "ERROR: spawning gw failed.");
1085 }
1086 } else {
1087 gw_proc_check_enable(host, proc, errh);
1088 }
1089 break;
1090 }
1091 }
1092
gw_restart_dead_procs(gw_host * const host,log_error_st * const errh,const int debug,const int trigger)1093 static void gw_restart_dead_procs(gw_host * const host, log_error_st * const errh, const int debug, const int trigger) {
1094 for (gw_proc *proc = host->first; proc; proc = proc->next) {
1095 if (debug > 2) {
1096 log_error(errh, __FILE__, __LINE__,
1097 "proc: %s %d %d %d %d", proc->connection_name->ptr,
1098 proc->state, proc->is_local, proc->load, proc->pid);
1099 }
1100 if (proc->state != PROC_STATE_RUNNING)
1101 gw_restart_dead_proc(host, errh, debug, trigger, proc);
1102 }
1103 }
1104
1105
1106
1107
1108 #include "base.h"
1109 #include "response.h"
1110
1111
1112 /* ok, we need a prototype */
1113 static handler_t gw_handle_fdevent(void *ctx, int revents);
1114 static handler_t gw_process_fdevent(gw_handler_ctx *hctx, request_st *r, int revents);
1115
1116
1117 __attribute_returns_nonnull__
handler_ctx_init(size_t sz)1118 static gw_handler_ctx * handler_ctx_init(size_t sz) {
1119 gw_handler_ctx *hctx = calloc(1, 0 == sz ? sizeof(*hctx) : sz);
1120 force_assert(hctx);
1121
1122 /*hctx->response = chunk_buffer_acquire();*//*(allocated when needed)*/
1123
1124 hctx->request_id = 0;
1125 hctx->gw_mode = GW_RESPONDER;
1126 hctx->state = GW_STATE_INIT;
1127 hctx->proc = NULL;
1128
1129 hctx->fd = -1;
1130
1131 hctx->reconnects = 0;
1132 hctx->send_content_body = 1;
1133
1134 /*hctx->rb = chunkqueue_init();*//*(allocated when needed)*/
1135 chunkqueue_init(&hctx->wb);
1136 hctx->wb_reqlen = 0;
1137
1138 return hctx;
1139 }
1140
handler_ctx_free(gw_handler_ctx * hctx)1141 static void handler_ctx_free(gw_handler_ctx *hctx) {
1142 /* caller MUST have called gw_backend_close(hctx, r) if necessary */
1143 if (hctx->handler_ctx_free) hctx->handler_ctx_free(hctx);
1144 chunk_buffer_release(hctx->response);
1145
1146 if (hctx->rb) chunkqueue_free(hctx->rb);
1147 chunkqueue_reset(&hctx->wb);
1148
1149 free(hctx);
1150 }
1151
handler_ctx_clear(gw_handler_ctx * hctx)1152 static void handler_ctx_clear(gw_handler_ctx *hctx) {
1153 /* caller MUST have called gw_backend_close(hctx, r) if necessary */
1154
1155 hctx->proc = NULL;
1156 hctx->host = NULL;
1157 hctx->ext = NULL;
1158 /*hctx->ext_auth is intentionally preserved to flag prior authorizer*/
1159
1160 hctx->gw_mode = GW_RESPONDER;
1161 hctx->state = GW_STATE_INIT;
1162 /*hctx->state_timestamp = 0;*//*(unused; left as-is)*/
1163
1164 if (hctx->rb) chunkqueue_reset(hctx->rb);
1165 chunkqueue_reset(&hctx->wb);
1166 hctx->wb_reqlen = 0;
1167
1168 if (hctx->response) buffer_clear(hctx->response);
1169
1170 hctx->fd = -1;
1171 hctx->reconnects = 0;
1172 hctx->request_id = 0;
1173 hctx->send_content_body = 1;
1174
1175 /*plugin_config conf;*//*(no need to reset for same request)*/
1176
1177 /*hctx->r = NULL;*//*(no need to reset for same request)*/
1178 /*hctx->plugin_data = NULL;*//*(no need to reset for same request)*/
1179 }
1180
1181
gw_init(void)1182 void * gw_init(void) {
1183 return calloc(1, sizeof(gw_plugin_data));
1184 }
1185
1186
gw_plugin_config_free(gw_plugin_config * s)1187 void gw_plugin_config_free(gw_plugin_config *s) {
1188 gw_exts *exts = s->exts;
1189 if (exts) {
1190 for (uint32_t j = 0; j < exts->used; ++j) {
1191 gw_extension *ex = exts->exts+j;
1192 for (uint32_t n = 0; n < ex->used; ++n) {
1193 gw_proc *proc;
1194 gw_host *host = ex->hosts[n];
1195
1196 for (proc = host->first; proc; proc = proc->next) {
1197 if (proc->pid > 0) {
1198 kill(proc->pid, host->kill_signal);
1199 }
1200
1201 if (proc->is_local && proc->unixsocket) {
1202 unlink(proc->unixsocket->ptr);
1203 }
1204 }
1205
1206 for (proc = host->unused_procs; proc; proc = proc->next) {
1207 if (proc->pid > 0) {
1208 kill(proc->pid, host->kill_signal);
1209 }
1210 if (proc->is_local && proc->unixsocket) {
1211 unlink(proc->unixsocket->ptr);
1212 }
1213 }
1214 }
1215 }
1216
1217 gw_extensions_free(s->exts);
1218 gw_extensions_free(s->exts_auth);
1219 gw_extensions_free(s->exts_resp);
1220 }
1221 free(s);
1222 }
1223
gw_free(void * p_d)1224 void gw_free(void *p_d) {
1225 gw_plugin_data * const p = p_d;
1226 if (NULL == p->cvlist) return;
1227 /* (init i to 0 if global context; to 1 to skip empty global context) */
1228 for (int i = !p->cvlist[0].v.u2[1], used = p->nconfig; i < used; ++i) {
1229 config_plugin_value_t *cpv = p->cvlist + p->cvlist[i].v.u2[0];
1230 for (; -1 != cpv->k_id; ++cpv) {
1231 switch (cpv->k_id) {
1232 case 0: /* xxxxx.server */
1233 if (cpv->vtype == T_CONFIG_LOCAL)
1234 gw_plugin_config_free(cpv->v.v);
1235 break;
1236 default:
1237 break;
1238 }
1239 }
1240 }
1241 }
1242
gw_exts_clear_check_local(gw_exts * exts)1243 void gw_exts_clear_check_local(gw_exts *exts) {
1244 for (uint32_t j = 0; j < exts->used; ++j) {
1245 gw_extension *ex = exts->exts+j;
1246 for (uint32_t n = 0; n < ex->used; ++n) {
1247 ex->hosts[n]->check_local = 0;
1248 }
1249 }
1250 }
1251
gw_set_defaults_backend(server * srv,gw_plugin_data * p,const array * a,gw_plugin_config * s,int sh_exec,const char * cpkkey)1252 int gw_set_defaults_backend(server *srv, gw_plugin_data *p, const array *a, gw_plugin_config *s, int sh_exec, const char *cpkkey) {
1253 /* per-module plugin_config MUST have common "base class" gw_plugin_config*/
1254 /* per-module plugin_data MUST have pointer-compatible common "base class"
1255 * with gw_plugin_data (stemming from gw_plugin_config compatibility) */
1256
1257 static const config_plugin_keys_t cpk[] = {
1258 { CONST_STR_LEN("host"),
1259 T_CONFIG_STRING,
1260 T_CONFIG_SCOPE_CONNECTION }
1261 ,{ CONST_STR_LEN("port"),
1262 T_CONFIG_SHORT,
1263 T_CONFIG_SCOPE_CONNECTION }
1264 ,{ CONST_STR_LEN("socket"),
1265 T_CONFIG_STRING,
1266 T_CONFIG_SCOPE_CONNECTION }
1267 ,{ CONST_STR_LEN("listen-backlog"),
1268 T_CONFIG_INT,
1269 T_CONFIG_SCOPE_CONNECTION }
1270 ,{ CONST_STR_LEN("bin-path"),
1271 T_CONFIG_STRING,
1272 T_CONFIG_SCOPE_CONNECTION }
1273 ,{ CONST_STR_LEN("kill-signal"),
1274 T_CONFIG_SHORT,
1275 T_CONFIG_SCOPE_CONNECTION }
1276 ,{ CONST_STR_LEN("check-local"),
1277 T_CONFIG_BOOL,
1278 T_CONFIG_SCOPE_CONNECTION }
1279 ,{ CONST_STR_LEN("mode"),
1280 T_CONFIG_STRING,
1281 T_CONFIG_SCOPE_CONNECTION }
1282 ,{ CONST_STR_LEN("docroot"),
1283 T_CONFIG_STRING,
1284 T_CONFIG_SCOPE_CONNECTION }
1285 ,{ CONST_STR_LEN("min-procs"),
1286 T_CONFIG_SHORT,
1287 T_CONFIG_SCOPE_CONNECTION }
1288 ,{ CONST_STR_LEN("max-procs"),
1289 T_CONFIG_SHORT,
1290 T_CONFIG_SCOPE_CONNECTION }
1291 ,{ CONST_STR_LEN("max-load-per-proc"),
1292 T_CONFIG_SHORT,
1293 T_CONFIG_SCOPE_CONNECTION }
1294 ,{ CONST_STR_LEN("idle-timeout"),
1295 T_CONFIG_SHORT,
1296 T_CONFIG_SCOPE_CONNECTION }
1297 ,{ CONST_STR_LEN("disable-time"),
1298 T_CONFIG_SHORT,
1299 T_CONFIG_SCOPE_CONNECTION }
1300 ,{ CONST_STR_LEN("bin-environment"),
1301 T_CONFIG_ARRAY_KVSTRING,
1302 T_CONFIG_SCOPE_CONNECTION }
1303 ,{ CONST_STR_LEN("bin-copy-environment"),
1304 T_CONFIG_ARRAY_VLIST,
1305 T_CONFIG_SCOPE_CONNECTION }
1306 ,{ CONST_STR_LEN("broken-scriptfilename"),
1307 T_CONFIG_BOOL,
1308 T_CONFIG_SCOPE_CONNECTION }
1309 ,{ CONST_STR_LEN("strip-request-uri"),
1310 T_CONFIG_STRING,
1311 T_CONFIG_SCOPE_CONNECTION }
1312 ,{ CONST_STR_LEN("fix-root-scriptname"),
1313 T_CONFIG_BOOL,
1314 T_CONFIG_SCOPE_CONNECTION }
1315 ,{ CONST_STR_LEN("allow-x-send-file"),
1316 T_CONFIG_BOOL,
1317 T_CONFIG_SCOPE_CONNECTION }
1318 ,{ CONST_STR_LEN("x-sendfile"),
1319 T_CONFIG_BOOL,
1320 T_CONFIG_SCOPE_CONNECTION }
1321 ,{ CONST_STR_LEN("x-sendfile-docroot"),
1322 T_CONFIG_ARRAY_VLIST,
1323 T_CONFIG_SCOPE_CONNECTION }
1324 ,{ CONST_STR_LEN("tcp-fin-propagate"),
1325 T_CONFIG_BOOL,
1326 T_CONFIG_SCOPE_CONNECTION }
1327 ,{ CONST_STR_LEN("connect-timeout"),
1328 T_CONFIG_INT,
1329 T_CONFIG_SCOPE_CONNECTION }
1330 ,{ CONST_STR_LEN("write-timeout"),
1331 T_CONFIG_INT,
1332 T_CONFIG_SCOPE_CONNECTION }
1333 ,{ CONST_STR_LEN("read-timeout"),
1334 T_CONFIG_INT,
1335 T_CONFIG_SCOPE_CONNECTION }
1336 ,{ NULL, 0,
1337 T_CONFIG_UNSET,
1338 T_CONFIG_SCOPE_UNSET }
1339 };
1340
1341 gw_host *host = NULL;
1342
1343 int graceful_restart_bg =
1344 config_feature_bool(srv, "server.graceful-restart-bg", 0);
1345
1346 p->srv_pid = srv->pid;
1347
1348 s->exts = gw_extensions_init();
1349 s->exts_auth = gw_extensions_init();
1350 s->exts_resp = gw_extensions_init();
1351 /*s->balance = GW_BALANCE_LEAST_CONNECTION;*//*(default)*/
1352
1353 /*
1354 * gw.server = ( "<ext>" => ( ... ),
1355 * "<ext>" => ( ... ) )
1356 */
1357
1358 for (uint32_t j = 0; j < a->used; ++j) {
1359 data_array *da_ext = (data_array *)a->data[j];
1360
1361 /*
1362 * da_ext->key == name of the extension
1363 */
1364
1365 /*
1366 * gw.server = ( "<ext>" =>
1367 * ( "<host>" => ( ... ),
1368 * "<host>" => ( ... )
1369 * ),
1370 * "<ext>" => ... )
1371 */
1372
1373 for (uint32_t n = 0; n < da_ext->value.used; ++n) {
1374 data_array * const da_host = (data_array *)da_ext->value.data[n];
1375
1376 if (da_host->type != TYPE_ARRAY
1377 || !array_is_kvany(&da_host->value)){
1378 log_error(srv->errh, __FILE__, __LINE__,
1379 "unexpected value for gw.server near [%s](string); "
1380 "expected ( \"ext\" => "
1381 "( \"backend-label\" => ( \"key\" => \"value\" )))",
1382 da_host->key.ptr ? da_host->key.ptr : "");
1383 goto error;
1384 }
1385
1386 config_plugin_value_t cvlist[sizeof(cpk)/sizeof(cpk[0])+1];
1387 memset(cvlist, 0, sizeof(cvlist));
1388
1389 array *ca = &da_host->value;
1390 if (!config_plugin_values_init_block(srv, ca, cpk, cpkkey, cvlist))
1391 goto error;
1392
1393 unsigned short host_mode = GW_RESPONDER;
1394
1395 host = gw_host_init();
1396 host->id = &da_host->key;
1397 host->check_local = 1;
1398 host->min_procs = 4;
1399 host->max_procs = 4;
1400 host->max_load_per_proc = 1;
1401 host->idle_timeout = 60;
1402 host->connect_timeout = 8;
1403 host->disable_time = 1;
1404 host->break_scriptfilename_for_php = 0;
1405 host->kill_signal = SIGTERM;
1406 host->fix_root_path_name = 0;
1407 host->listen_backlog = 1024;
1408 host->xsendfile_allow = 0;
1409 host->refcount = 0;
1410
1411 config_plugin_value_t *cpv = cvlist;
1412 for (; -1 != cpv->k_id; ++cpv) {
1413 switch (cpv->k_id) {
1414 case 0: /* host */
1415 if (!buffer_is_blank(cpv->v.b))
1416 host->host = cpv->v.b;
1417 break;
1418 case 1: /* port */
1419 host->port = cpv->v.shrt;
1420 break;
1421 case 2: /* socket */
1422 if (!buffer_is_blank(cpv->v.b))
1423 host->unixsocket = cpv->v.b;
1424 break;
1425 case 3: /* listen-backlog */
1426 host->listen_backlog = cpv->v.u;
1427 break;
1428 case 4: /* bin-path */
1429 if (!buffer_is_blank(cpv->v.b))
1430 host->bin_path = cpv->v.b;
1431 break;
1432 case 5: /* kill-signal */
1433 host->kill_signal = cpv->v.shrt;
1434 break;
1435 case 6: /* check-local */
1436 host->check_local = (0 != cpv->v.u);
1437 break;
1438 case 7: /* mode */
1439 if (!buffer_is_blank(cpv->v.b)) {
1440 const buffer *b = cpv->v.b;
1441 if (buffer_eq_slen(b, CONST_STR_LEN("responder")))
1442 host_mode = GW_RESPONDER;
1443 else if (buffer_eq_slen(b, CONST_STR_LEN("authorizer")))
1444 host_mode = GW_AUTHORIZER;
1445 else
1446 log_error(srv->errh, __FILE__, __LINE__,
1447 "WARNING: unknown gw mode: %s "
1448 "(ignored, mode set to responder)", b->ptr);
1449 }
1450 break;
1451 case 8: /* docroot */
1452 if (!buffer_is_blank(cpv->v.b))
1453 host->docroot = cpv->v.b;
1454 break;
1455 case 9: /* min-procs */
1456 host->min_procs = cpv->v.shrt;
1457 break;
1458 case 10:/* max-procs */
1459 host->max_procs = cpv->v.shrt;
1460 break;
1461 case 11:/* max-load-per-proc */
1462 host->max_load_per_proc = cpv->v.shrt;
1463 break;
1464 case 12:/* idle-timeout */
1465 host->idle_timeout = cpv->v.shrt;
1466 break;
1467 case 13:/* disable-time */
1468 host->disable_time = cpv->v.shrt;
1469 break;
1470 case 14:/* bin-environment */
1471 host->bin_env = cpv->v.a;
1472 break;
1473 case 15:/* bin-copy-environment */
1474 host->bin_env_copy = cpv->v.a;
1475 break;
1476 case 16:/* broken-scriptfilename */
1477 host->break_scriptfilename_for_php = (0 != cpv->v.u);
1478 break;
1479 case 17:/* strip-request-uri */
1480 host->strip_request_uri = cpv->v.b;
1481 if (buffer_has_slash_suffix(host->strip_request_uri)) {
1482 buffer *b; /*(remove trailing slash; see http_cgi.c)*/
1483 *(const buffer **)&b = host->strip_request_uri;
1484 buffer_truncate(b, buffer_clen(b)-1);
1485 }
1486 break;
1487 case 18:/* fix-root-scriptname */
1488 host->fix_root_path_name = (0 != cpv->v.u);
1489 break;
1490 case 19:/* allow-x-send-file */
1491 host->xsendfile_allow = (0 != cpv->v.u);
1492 break;
1493 case 20:/* x-sendfile */
1494 host->xsendfile_allow = (0 != cpv->v.u);
1495 break;
1496 case 21:/* x-sendfile-docroot */
1497 host->xsendfile_docroot = cpv->v.a;
1498 if (cpv->v.a->used) {
1499 for (uint32_t k = 0; k < cpv->v.a->used; ++k) {
1500 data_string *ds = (data_string *)cpv->v.a->data[k];
1501 if (ds->type != TYPE_STRING) {
1502 log_error(srv->errh, __FILE__, __LINE__,
1503 "unexpected type for x-sendfile-docroot; "
1504 "expected: \"x-sendfile-docroot\" => "
1505 "( \"/allowed/path\", ... )");
1506 goto error;
1507 }
1508 if (ds->value.ptr[0] != '/') {
1509 log_error(srv->errh, __FILE__, __LINE__,
1510 "x-sendfile-docroot paths must begin with "
1511 "'/'; invalid: \"%s\"", ds->value.ptr);
1512 goto error;
1513 }
1514 buffer_path_simplify(&ds->value);
1515 buffer_append_slash(&ds->value);
1516 }
1517 }
1518 break;
1519 case 22:/* tcp-fin-propagate */
1520 host->tcp_fin_propagate = (0 != cpv->v.u);
1521 break;
1522 case 23:/* connect-timeout */
1523 host->connect_timeout = cpv->v.u;
1524 break;
1525 case 24:/* write-timeout */
1526 host->write_timeout = cpv->v.u;
1527 break;
1528 case 25:/* read-timeout */
1529 host->read_timeout = cpv->v.u;
1530 break;
1531 default:
1532 break;
1533 }
1534 }
1535
1536 for (uint32_t m = 0; m < da_host->value.used; ++m) {
1537 if (NULL != strchr(da_host->value.data[m]->key.ptr, '_')) {
1538 log_error(srv->errh, __FILE__, __LINE__,
1539 "incorrect directive contains underscore ('_') instead of dash ('-'): %s",
1540 da_host->value.data[m]->key.ptr);
1541 }
1542 }
1543
1544 if ((host->host || host->port) && host->unixsocket) {
1545 log_error(srv->errh, __FILE__, __LINE__,
1546 "either host/port or socket have to be set in: "
1547 "%s = (%s => (%s ( ...", cpkkey, da_ext->key.ptr,
1548 da_host->key.ptr);
1549
1550 goto error;
1551 }
1552
1553 if (host->host && *host->host->ptr == '/' && !host->unixsocket) {
1554 host->unixsocket = host->host;
1555 }
1556
1557 if (host->unixsocket) {
1558 /* unix domain socket */
1559 struct sockaddr_un un;
1560
1561 if (buffer_clen(host->unixsocket) + 1 > sizeof(un.sun_path) - 2) {
1562 log_error(srv->errh, __FILE__, __LINE__,
1563 "unixsocket is too long in: %s = (%s => (%s ( ...",
1564 cpkkey, da_ext->key.ptr, da_host->key.ptr);
1565
1566 goto error;
1567 }
1568
1569 if (host->bin_path) {
1570 gw_host *duplicate = unixsocket_is_dup(p, host->unixsocket);
1571 if (NULL != duplicate) {
1572 if (!buffer_is_equal(host->bin_path, duplicate->bin_path)) {
1573 log_error(srv->errh, __FILE__, __LINE__,
1574 "duplicate unixsocket path: %s",
1575 host->unixsocket->ptr);
1576 goto error;
1577 }
1578 gw_host_free(host);
1579 host = duplicate;
1580 ++host->refcount;
1581 }
1582 }
1583
1584 host->family = AF_UNIX;
1585 } else {
1586 /* tcp/ip */
1587
1588 if (!host->host && !host->bin_path) {
1589 log_error(srv->errh, __FILE__, __LINE__,
1590 "host or bin-path have to be set in: "
1591 "%s = (%s => (%s ( ...", cpkkey, da_ext->key.ptr,
1592 da_host->key.ptr);
1593
1594 goto error;
1595 } else if (0 == host->port) {
1596 host->port = 80;
1597 }
1598
1599 if (!host->host) {
1600 static const buffer lhost ={CONST_STR_LEN("127.0.0.1")+1,0};
1601 host->host = &lhost;
1602 }
1603
1604 host->family = (NULL != strchr(host->host->ptr, ':'))
1605 ? AF_INET6
1606 : AF_INET;
1607 }
1608 if (!host->refcount)
1609 gw_status_init_host(host);
1610
1611 if (host->refcount) {
1612 /* already init'd; skip spawning */
1613 } else if (host->bin_path) {
1614 /* a local socket + self spawning */
1615 struct stat st;
1616 parse_binpath(&host->args, host->bin_path);
1617 if (0 != stat(host->args.ptr[0], &st) || !S_ISREG(st.st_mode)
1618 || !(st.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) {
1619 log_error(srv->errh, __FILE__, __LINE__,
1620 "invalid \"bin-path\" => \"%s\" "
1621 "(check that file exists, is regular file, "
1622 "and is executable by lighttpd)", host->bin_path->ptr);
1623 }
1624
1625 if (sh_exec) {
1626 /*(preserve prior behavior for SCGI exec of command)*/
1627 /*(admin should really prefer to put
1628 * any complex command into a script)*/
1629 for (uint32_t m = 0; m < host->args.used; ++m)
1630 free(host->args.ptr[m]);
1631 free(host->args.ptr);
1632
1633 host->args.ptr = calloc(4, sizeof(char *));
1634 force_assert(host->args.ptr);
1635 host->args.used = 3;
1636 host->args.size = 4;
1637 host->args.ptr[0] = malloc(sizeof("/bin/sh"));
1638 force_assert(host->args.ptr[0]);
1639 memcpy(host->args.ptr[0], "/bin/sh", sizeof("/bin/sh"));
1640 host->args.ptr[1] = malloc(sizeof("-c"));
1641 force_assert(host->args.ptr[1]);
1642 memcpy(host->args.ptr[1], "-c", sizeof("-c"));
1643 host->args.ptr[2] =
1644 malloc(sizeof("exec ")-1+buffer_clen(host->bin_path)+1);
1645 force_assert(host->args.ptr[2]);
1646 memcpy(host->args.ptr[2], "exec ", sizeof("exec ")-1);
1647 memcpy(host->args.ptr[2]+sizeof("exec ")-1,
1648 host->bin_path->ptr, buffer_clen(host->bin_path)+1);
1649 host->args.ptr[3] = NULL;
1650 }
1651
1652 if (host->min_procs > host->max_procs)
1653 host->min_procs = host->max_procs;
1654 if (host->min_procs!= host->max_procs
1655 && 0 != srv->srvconf.max_worker) {
1656 host->min_procs = host->max_procs;
1657 log_error(srv->errh, __FILE__, __LINE__,
1658 "adaptive backend spawning disabled "
1659 "(server.max_worker is non-zero)");
1660 }
1661 if (host->max_load_per_proc < 1)
1662 host->max_load_per_proc = 0;
1663
1664 if (s->debug) {
1665 log_error(srv->errh, __FILE__, __LINE__,
1666 "--- gw spawning local"
1667 "\n\tproc: %s"
1668 "\n\tport: %hu"
1669 "\n\tsocket %s"
1670 "\n\tmin-procs: %d"
1671 "\n\tmax-procs: %d",
1672 host->bin_path->ptr,
1673 host->port,
1674 host->unixsocket ? host->unixsocket->ptr : "",
1675 host->min_procs,
1676 host->max_procs);
1677 }
1678
1679 for (uint32_t pno = 0; pno < host->min_procs; ++pno) {
1680 gw_proc * const proc = gw_proc_init(host);
1681
1682 if (s->debug) {
1683 log_error(srv->errh, __FILE__, __LINE__,
1684 "--- gw spawning"
1685 "\n\tport: %hu"
1686 "\n\tsocket %s"
1687 "\n\tcurrent: %u / %u",
1688 host->port,
1689 host->unixsocket ? host->unixsocket->ptr : "",
1690 pno, host->max_procs);
1691 }
1692
1693 if (0 != gw_proc_sockaddr_init(host, proc, srv->errh)) {
1694 gw_proc_free(proc);
1695 goto error;
1696 }
1697
1698 if (!srv->srvconf.preflight_check
1699 && gw_spawn_connection(host, proc, srv->errh, s->debug)) {
1700 log_error(srv->errh, __FILE__, __LINE__,
1701 "[ERROR]: spawning gw failed.");
1702 gw_proc_free(proc);
1703 goto error;
1704 }
1705
1706 proc->next = host->first;
1707 if (host->first) host->first->prev = proc;
1708 host->first = proc;
1709 ++host->num_procs;
1710 }
1711
1712 if (graceful_restart_bg) {
1713 /*(set flag to false to avoid repeating)*/
1714 graceful_restart_bg = 0;
1715 log_error(srv->errh, __FILE__, __LINE__,
1716 "server.graceful-restart-bg disabled "
1717 "(incompatible with %s.server \"bin-path\")",
1718 p->self->name);
1719 data_unset * const du =
1720 array_get_data_unset(srv->srvconf.feature_flags,
1721 CONST_STR_LEN("server.graceful-restart-bg"));
1722 if (du->type == TYPE_STRING)
1723 buffer_copy_string_len(&((data_string *)du)->value,
1724 CONST_STR_LEN("false"));
1725 else /* (du->type == TYPE_INTEGER) */
1726 ((data_integer *)du)->value = 0;
1727 }
1728 } else {
1729 gw_proc * const proc = gw_proc_init(host);
1730 host->first = proc;
1731 ++host->num_procs;
1732 host->min_procs = 1;
1733 host->max_procs = 1;
1734 if (0 != gw_proc_sockaddr_init(host, proc, srv->errh)) goto error;
1735 gw_proc_set_state(host, proc, PROC_STATE_RUNNING);
1736 }
1737
1738 const buffer * const h = host->host ? host->host : host->unixsocket;
1739 host->gw_hash = gw_hash(BUF_PTR_LEN(h), DJBHASH_INIT);
1740
1741 /* s->exts is list of exts -> hosts
1742 * s->exts now used as combined list
1743 * of authorizer and responder hosts (for backend maintenance)
1744 * s->exts_auth is list of exts -> authorizer hosts
1745 * s->exts_resp is list of exts -> responder hosts
1746 * For each path/extension:
1747 * there may be an independent GW_AUTHORIZER and GW_RESPONDER
1748 * (The GW_AUTHORIZER and GW_RESPONDER could be handled by the same
1749 * host, and an admin might want to do that for large uploads,
1750 * since GW_AUTHORIZER runs prior to receiving (potentially large)
1751 * request body from client and can authorizer or deny request
1752 * prior to receiving the full upload)
1753 */
1754 gw_extension_insert(s->exts, &da_ext->key, host);
1755
1756 if (host_mode == GW_AUTHORIZER) {
1757 ++host->refcount;
1758 gw_extension_insert(s->exts_auth, &da_ext->key, host);
1759 } else if (host_mode == GW_RESPONDER) {
1760 ++host->refcount;
1761 gw_extension_insert(s->exts_resp, &da_ext->key, host);
1762 } /*(else should have been rejected above)*/
1763
1764 host = NULL;
1765 }
1766 }
1767
1768 return 1;
1769
1770 error:
1771 if (NULL != host) gw_host_free(host);
1772 return 0;
1773 }
1774
gw_get_defaults_balance(server * srv,const buffer * b)1775 int gw_get_defaults_balance(server *srv, const buffer *b) {
1776 if (!b || buffer_is_blank(b))
1777 return GW_BALANCE_LEAST_CONNECTION;
1778 if (buffer_eq_slen(b, CONST_STR_LEN("fair")))
1779 return GW_BALANCE_LEAST_CONNECTION;
1780 if (buffer_eq_slen(b, CONST_STR_LEN("least-connection")))
1781 return GW_BALANCE_LEAST_CONNECTION;
1782 if (buffer_eq_slen(b, CONST_STR_LEN("round-robin")))
1783 return GW_BALANCE_RR;
1784 if (buffer_eq_slen(b, CONST_STR_LEN("hash")))
1785 return GW_BALANCE_HASH;
1786 if (buffer_eq_slen(b, CONST_STR_LEN("sticky")))
1787 return GW_BALANCE_STICKY;
1788
1789 log_error(srv->errh, __FILE__, __LINE__,
1790 "xxxxx.balance has to be one of: "
1791 "least-connection, round-robin, hash, sticky, but not: %s", b->ptr);
1792 return GW_BALANCE_LEAST_CONNECTION;
1793 }
1794
1795
gw_set_state(gw_handler_ctx * hctx,gw_connection_state_t state)1796 static void gw_set_state(gw_handler_ctx *hctx, gw_connection_state_t state) {
1797 hctx->state = state;
1798 /*hctx->state_timestamp = log_monotonic_secs;*/
1799 }
1800
1801
gw_set_transparent(gw_handler_ctx * hctx)1802 void gw_set_transparent(gw_handler_ctx *hctx) {
1803 if (AF_UNIX != hctx->host->family) {
1804 if (-1 == fdevent_set_tcp_nodelay(hctx->fd, 1)) {
1805 /*(error, but not critical)*/
1806 }
1807 }
1808 hctx->wb_reqlen = -1;
1809 gw_set_state(hctx, GW_STATE_WRITE);
1810 }
1811
1812
gw_host_hctx_enq(gw_handler_ctx * const hctx)1813 static void gw_host_hctx_enq(gw_handler_ctx * const hctx) {
1814 gw_host * const host = hctx->host;
1815 /*if (__builtin_expect( (host == NULL), 0)) return;*/
1816
1817 hctx->prev = NULL;
1818 hctx->next = host->hctxs;
1819 if (hctx->next)
1820 hctx->next->prev = hctx;
1821 host->hctxs = hctx;
1822 }
1823
1824
gw_host_hctx_deq(gw_handler_ctx * const hctx)1825 static void gw_host_hctx_deq(gw_handler_ctx * const hctx) {
1826 /*if (__builtin_expect( (hctx->host == NULL), 0)) return;*/
1827
1828 if (hctx->prev)
1829 hctx->prev->next = hctx->next;
1830 else
1831 hctx->host->hctxs= hctx->next;
1832
1833 if (hctx->next)
1834 hctx->next->prev = hctx->prev;
1835
1836 hctx->next = NULL;
1837 hctx->prev = NULL;
1838 }
1839
1840
gw_backend_close(gw_handler_ctx * const hctx,request_st * const r)1841 static void gw_backend_close(gw_handler_ctx * const hctx, request_st * const r) {
1842 if (hctx->fd >= 0) {
1843 fdevent_fdnode_event_del(hctx->ev, hctx->fdn);
1844 /*fdevent_unregister(ev, hctx->fd);*//*(handled below)*/
1845 fdevent_sched_close(hctx->ev, hctx->fd, 1);
1846 hctx->fdn = NULL;
1847 hctx->fd = -1;
1848 gw_host_hctx_deq(hctx);
1849 }
1850
1851 if (hctx->host) {
1852 if (hctx->proc) {
1853 gw_proc_release(hctx->host, hctx->proc, hctx->conf.debug,
1854 r->conf.errh);
1855 hctx->proc = NULL;
1856 }
1857
1858 gw_host_reset(hctx->host);
1859 hctx->host = NULL;
1860 }
1861 }
1862
gw_connection_close(gw_handler_ctx * const hctx,request_st * const r)1863 static void gw_connection_close(gw_handler_ctx * const hctx, request_st * const r) {
1864 gw_plugin_data *p = hctx->plugin_data;
1865
1866 gw_backend_close(hctx, r);
1867 handler_ctx_free(hctx);
1868 r->plugin_ctx[p->id] = NULL;
1869
1870 if (r->handler_module == p->self) {
1871 http_response_backend_done(r);
1872 }
1873 }
1874
gw_reconnect(gw_handler_ctx * const hctx,request_st * const r)1875 static handler_t gw_reconnect(gw_handler_ctx * const hctx, request_st * const r) {
1876 gw_backend_close(hctx, r);
1877
1878 hctx->host = gw_host_get(r,hctx->ext,hctx->conf.balance,hctx->conf.debug);
1879 if (NULL == hctx->host) return HANDLER_FINISHED;
1880
1881 gw_host_assign(hctx->host);
1882 hctx->request_id = 0;
1883 hctx->opts.xsendfile_allow = hctx->host->xsendfile_allow;
1884 hctx->opts.xsendfile_docroot = hctx->host->xsendfile_docroot;
1885 gw_set_state(hctx, GW_STATE_INIT);
1886 return HANDLER_COMEBACK;
1887 }
1888
1889
gw_handle_request_reset(request_st * const r,void * p_d)1890 handler_t gw_handle_request_reset(request_st * const r, void *p_d) {
1891 gw_plugin_data *p = p_d;
1892 gw_handler_ctx *hctx = r->plugin_ctx[p->id];
1893 if (hctx) gw_connection_close(hctx, r);
1894
1895 return HANDLER_GO_ON;
1896 }
1897
1898
1899 __attribute_cold__
gw_conditional_tcp_fin(gw_handler_ctx * const hctx,request_st * const r)1900 static void gw_conditional_tcp_fin(gw_handler_ctx * const hctx, request_st * const r) {
1901 /*assert(r->conf.stream_request_body & FDEVENT_STREAM_REQUEST_TCP_FIN);*/
1902 if (!chunkqueue_is_empty(&hctx->wb))return;
1903 if (!hctx->host->tcp_fin_propagate) return;
1904 if (hctx->gw_mode == GW_AUTHORIZER) return;
1905 if (r->conf.stream_request_body & FDEVENT_STREAM_REQUEST_BACKEND_SHUT_WR)
1906 return;
1907
1908 /* propagate shutdown SHUT_WR to backend if TCP half-close on con->fd */
1909 r->conf.stream_request_body |= FDEVENT_STREAM_REQUEST_BACKEND_SHUT_WR;
1910 r->conf.stream_request_body &= ~FDEVENT_STREAM_REQUEST_POLLIN;
1911 r->con->is_readable = 0;
1912 shutdown(hctx->fd, SHUT_WR);
1913 fdevent_fdnode_event_clr(hctx->ev, hctx->fdn, FDEVENT_OUT);
1914 }
1915
gw_write_request(gw_handler_ctx * const hctx,request_st * const r)1916 static handler_t gw_write_request(gw_handler_ctx * const hctx, request_st * const r) {
1917 switch(hctx->state) {
1918 case GW_STATE_INIT:
1919 /* do we have a running process for this host (max-procs) ? */
1920 hctx->proc = NULL;
1921
1922 for (gw_proc *proc = hctx->host->first; proc; proc = proc->next) {
1923 if (proc->state == PROC_STATE_RUNNING) {
1924 hctx->proc = proc;
1925 break;
1926 }
1927 }
1928
1929 /* all children are dead */
1930 if (hctx->proc == NULL) {
1931 return HANDLER_ERROR;
1932 }
1933
1934 /* check the other procs if they have a lower load */
1935 for (gw_proc *proc = hctx->proc->next; proc; proc = proc->next) {
1936 if (proc->state != PROC_STATE_RUNNING) continue;
1937 if (proc->load < hctx->proc->load) hctx->proc = proc;
1938 }
1939
1940 gw_proc_load_inc(hctx->host, hctx->proc);
1941
1942 hctx->fd = fdevent_socket_nb_cloexec(hctx->host->family,SOCK_STREAM,0);
1943 if (-1 == hctx->fd) {
1944 log_perror(r->conf.errh, __FILE__, __LINE__,
1945 "socket() failed (cur_fds:%d) (max_fds:%d)",
1946 r->con->srv->cur_fds, r->con->srv->max_fds);
1947 return HANDLER_ERROR;
1948 }
1949
1950 ++r->con->srv->cur_fds;
1951
1952 hctx->fdn = fdevent_register(hctx->ev,hctx->fd,gw_handle_fdevent,hctx);
1953
1954 if (hctx->proc->is_local) {
1955 hctx->pid = hctx->proc->pid;
1956 }
1957
1958 hctx->write_ts = log_monotonic_secs;
1959 gw_host_hctx_enq(hctx);
1960 switch (gw_establish_connection(r, hctx->host, hctx->proc, hctx->pid,
1961 hctx->fd, hctx->conf.debug)) {
1962 case 1: /* connection is in progress */
1963 fdevent_fdnode_event_set(hctx->ev, hctx->fdn, FDEVENT_OUT);
1964 gw_set_state(hctx, GW_STATE_CONNECT_DELAYED);
1965 return HANDLER_WAIT_FOR_EVENT;
1966 case -1:/* connection error */
1967 return HANDLER_ERROR;
1968 case 0: /* everything is ok, go on */
1969 hctx->reconnects = 0;
1970 break;
1971 }
1972 __attribute_fallthrough__
1973 case GW_STATE_CONNECT_DELAYED:
1974 if (hctx->state == GW_STATE_CONNECT_DELAYED) { /*(not GW_STATE_INIT)*/
1975 int socket_error = fdevent_connect_status(hctx->fd);
1976 if (socket_error != 0) {
1977 gw_proc_connect_error(r, hctx->host, hctx->proc, hctx->pid,
1978 socket_error, hctx->conf.debug);
1979 return HANDLER_ERROR;
1980 }
1981 /* go on with preparing the request */
1982 hctx->write_ts = log_monotonic_secs;
1983 }
1984
1985 gw_proc_connect_success(hctx->host, hctx->proc, hctx->conf.debug, r);
1986
1987 gw_set_state(hctx, GW_STATE_PREPARE_WRITE);
1988 __attribute_fallthrough__
1989 case GW_STATE_PREPARE_WRITE:
1990 /* ok, we have the connection */
1991
1992 {
1993 handler_t rc = hctx->create_env(hctx);
1994 if (HANDLER_GO_ON != rc) {
1995 if (HANDLER_FINISHED != rc && HANDLER_ERROR != rc)
1996 fdevent_fdnode_event_clr(hctx->ev, hctx->fdn, FDEVENT_OUT);
1997 return rc;
1998 }
1999 }
2000
2001 /*(disable Nagle algorithm if streaming and content-length unknown)*/
2002 if (AF_UNIX != hctx->host->family) {
2003 if (r->reqbody_length < 0) {
2004 if (-1 == fdevent_set_tcp_nodelay(hctx->fd, 1)) {
2005 /*(error, but not critical)*/
2006 }
2007 }
2008 }
2009
2010 hctx->read_ts = log_monotonic_secs;
2011 fdevent_fdnode_event_add(hctx->ev, hctx->fdn, FDEVENT_IN|FDEVENT_RDHUP);
2012 gw_set_state(hctx, GW_STATE_WRITE);
2013 __attribute_fallthrough__
2014 case GW_STATE_WRITE:
2015 if (!chunkqueue_is_empty(&hctx->wb)) {
2016 log_error_st * const errh = r->conf.errh;
2017 #if 0
2018 if (hctx->conf.debug > 1) {
2019 log_error(errh, __FILE__, __LINE__, "sdsx",
2020 "send data to backend (fd=%d), size=%zu",
2021 hctx->fd, chunkqueue_length(&hctx->wb));
2022 }
2023 #endif
2024 off_t bytes_out = hctx->wb.bytes_out;
2025 if (r->con->srv->network_backend_write(hctx->fd, &hctx->wb,
2026 MAX_WRITE_LIMIT, errh) < 0) {
2027 switch(errno) {
2028 case EPIPE:
2029 case ENOTCONN:
2030 case ECONNRESET:
2031 /* the connection got dropped after accept()
2032 * we don't care about that --
2033 * if you accept() it, you have to handle it.
2034 */
2035 log_error(errh, __FILE__, __LINE__,
2036 "connection was dropped after accept() "
2037 "(perhaps the gw process died), "
2038 "write-offset: %lld socket: %s",
2039 (long long)hctx->wb.bytes_out,
2040 hctx->proc->connection_name->ptr);
2041 return HANDLER_ERROR;
2042 default:
2043 log_perror(errh, __FILE__, __LINE__, "write failed");
2044 return HANDLER_ERROR;
2045 }
2046 }
2047 else if (hctx->wb.bytes_out > bytes_out) {
2048 hctx->write_ts = hctx->proc->last_used = log_monotonic_secs;
2049 if (hctx->stdin_append
2050 && chunkqueue_length(&hctx->wb) < 65536 - 16384
2051 && !chunkqueue_is_empty(&r->reqbody_queue)) {
2052 handler_t rc = hctx->stdin_append(hctx);
2053 if (HANDLER_GO_ON != rc) return rc;
2054 }
2055 }
2056 }
2057
2058 if (hctx->wb.bytes_out == hctx->wb_reqlen) {
2059 fdevent_fdnode_event_clr(hctx->ev, hctx->fdn, FDEVENT_OUT);
2060 gw_set_state(hctx, GW_STATE_READ);
2061 } else {
2062 off_t wblen = chunkqueue_length(&hctx->wb);
2063 if ((hctx->wb.bytes_in < hctx->wb_reqlen || hctx->wb_reqlen < 0)
2064 && wblen < 65536 - 16384) {
2065 /*(r->conf.stream_request_body & FDEVENT_STREAM_REQUEST)*/
2066 if (!(r->conf.stream_request_body
2067 & FDEVENT_STREAM_REQUEST_POLLIN)) {
2068 r->conf.stream_request_body |=
2069 FDEVENT_STREAM_REQUEST_POLLIN;
2070 r->con->is_readable = 1; /* trigger optimistic client read */
2071 }
2072 }
2073 if (0 == wblen) {
2074 fdevent_fdnode_event_clr(hctx->ev, hctx->fdn, FDEVENT_OUT);
2075 }
2076 else if (!(fdevent_fdnode_interest(hctx->fdn) & FDEVENT_OUT)) {
2077 hctx->write_ts = log_monotonic_secs;
2078 fdevent_fdnode_event_add(hctx->ev, hctx->fdn, FDEVENT_OUT);
2079 }
2080 }
2081
2082 if (r->conf.stream_request_body
2083 & FDEVENT_STREAM_REQUEST_TCP_FIN)
2084 gw_conditional_tcp_fin(hctx, r);
2085
2086 return HANDLER_WAIT_FOR_EVENT;
2087 case GW_STATE_READ:
2088 /* waiting for a response */
2089 return HANDLER_WAIT_FOR_EVENT;
2090 default:
2091 log_error(r->conf.errh, __FILE__, __LINE__,
2092 "(debug) unknown state");
2093 return HANDLER_ERROR;
2094 }
2095 }
2096
2097
2098 __attribute_cold__
2099 __attribute_noinline__
gw_backend_error(gw_handler_ctx * const hctx,request_st * const r)2100 static handler_t gw_backend_error(gw_handler_ctx * const hctx, request_st * const r)
2101 {
2102 if (hctx->backend_error) hctx->backend_error(hctx);
2103 http_response_backend_error(r);
2104 gw_connection_close(hctx, r);
2105 return HANDLER_FINISHED;
2106 }
2107
2108
2109 static handler_t gw_recv_response(gw_handler_ctx *hctx, request_st *r);
2110
2111
2112 __attribute_cold__
gw_write_error(gw_handler_ctx * const hctx,request_st * const r)2113 static handler_t gw_write_error(gw_handler_ctx * const hctx, request_st * const r) {
2114
2115 if (hctx->state == GW_STATE_INIT ||
2116 hctx->state == GW_STATE_CONNECT_DELAYED) {
2117
2118 /* (optimization to detect backend process exit while processing a
2119 * large number of ready events; (this block could be removed)) */
2120 if (hctx->proc && hctx->proc->is_local) {
2121 server * const srv = r->con->srv;
2122 if (0 == srv->srvconf.max_worker)
2123 gw_restart_dead_procs(hctx->host,srv->errh,hctx->conf.debug,0);
2124 }
2125
2126 /* cleanup this request and let request handler start request again */
2127 if (hctx->reconnects++ < 5) return gw_reconnect(hctx, r);
2128 }
2129 else {
2130 /* backend might not read request body (even though backend should)
2131 * before sending response, so it is possible to get EPIPE trying to
2132 * write request body to the backend when backend has already sent a
2133 * response. If called from gw_handle_fdevent(), response should have
2134 * been read prior to getting here. However, if reqbody arrived on
2135 * client side, and called gw_handle_subrequest() and we tried to write
2136 * in gw_send_request() in state GW_STATE_WRITE, then it is possible to
2137 * get EPIPE and error out here when response is waiting to be read from
2138 * kernel socket buffers. Since we did not actually receive FDEVENT_HUP
2139 * or FDEVENT_RDHUP, calling gw_handle_fdevent() and fabricating
2140 * FDEVENT_RDHUP would cause an infinite loop trying to read().
2141 * Instead, try once to read (small) response in this theoretical race*/
2142 handler_t rc = gw_recv_response(hctx, r); /*(might invalidate hctx)*/
2143 if (rc != HANDLER_GO_ON) return rc; /*(unless HANDLER_GO_ON)*/
2144 }
2145
2146 /*(r->status == 400 if hctx->create_env() failed)*/
2147 if (!r->resp_body_started && r->http_status < 500 && r->http_status != 400)
2148 r->http_status = 503; /* Service Unavailable */
2149
2150 return gw_backend_error(hctx, r); /* HANDLER_FINISHED */
2151 }
2152
gw_send_request(gw_handler_ctx * const hctx,request_st * const r)2153 static handler_t gw_send_request(gw_handler_ctx * const hctx, request_st * const r) {
2154 handler_t rc = gw_write_request(hctx, r);
2155 return (HANDLER_ERROR != rc) ? rc : gw_write_error(hctx, r);
2156 }
2157
2158
gw_handle_subrequest(request_st * const r,void * p_d)2159 handler_t gw_handle_subrequest(request_st * const r, void *p_d) {
2160 gw_plugin_data *p = p_d;
2161 gw_handler_ctx *hctx = r->plugin_ctx[p->id];
2162 if (NULL == hctx) return HANDLER_GO_ON;
2163
2164 const int revents = hctx->revents;
2165 if (revents) {
2166 hctx->revents = 0;
2167 handler_t rc = gw_process_fdevent(hctx, r, revents);
2168 if (rc != HANDLER_GO_ON && rc != HANDLER_WAIT_FOR_EVENT)
2169 return rc; /*(might invalidate hctx)*/
2170 }
2171
2172 if ((r->conf.stream_response_body & FDEVENT_STREAM_RESPONSE_BUFMIN)
2173 && r->resp_body_started) {
2174 if (chunkqueue_length(&r->write_queue) > 65536 - 4096) {
2175 /* Note: if apps inheriting gw_handle use hctx->rb, then those apps
2176 * are responsible for limiting amount of data buffered in memory
2177 * in hctx->rb. Currently, mod_fastcgi is the only core app doing
2178 * so, and the maximum FCGI_Record size is 8 + 65535 + 255 = 65798
2179 * (FCGI_HEADER_LEN(8)+contentLength(65535)+paddingLength(255)) */
2180 fdevent_fdnode_event_clr(hctx->ev, hctx->fdn, FDEVENT_IN);
2181 }
2182 else if (!(fdevent_fdnode_interest(hctx->fdn) & FDEVENT_IN)) {
2183 /* optimistic read from backend */
2184 handler_t rc;
2185 rc = gw_recv_response(hctx, r); /*(might invalidate hctx)*/
2186 if (rc != HANDLER_GO_ON) return rc; /*(unless HANDLER_GO_ON)*/
2187 hctx->read_ts = log_monotonic_secs;
2188 fdevent_fdnode_event_add(hctx->ev, hctx->fdn, FDEVENT_IN);
2189 }
2190 }
2191
2192 /* (do not receive request body before GW_AUTHORIZER has run or else
2193 * the request body is discarded with handler_ctx_clear() after running
2194 * the FastCGI Authorizer) */
2195
2196 if (hctx->gw_mode != GW_AUTHORIZER
2197 && (0 == hctx->wb.bytes_in
2198 ? (r->state == CON_STATE_READ_POST || -1 == hctx->wb_reqlen)
2199 : (hctx->wb.bytes_in < hctx->wb_reqlen || hctx->wb_reqlen < 0))) {
2200 /* leave excess data in r->reqbody_queue, which is
2201 * buffered to disk if too large and backend can not keep up */
2202 /*(64k - 4k to attempt to avoid temporary files
2203 * in conjunction with FDEVENT_STREAM_REQUEST_BUFMIN)*/
2204 if (chunkqueue_length(&hctx->wb) > 65536 - 4096) {
2205 if (r->conf.stream_request_body & FDEVENT_STREAM_REQUEST_BUFMIN) {
2206 r->conf.stream_request_body &= ~FDEVENT_STREAM_REQUEST_POLLIN;
2207 }
2208 if (0 != hctx->wb.bytes_in) return HANDLER_WAIT_FOR_EVENT;
2209 }
2210 else {
2211 handler_t rc = r->con->reqbody_read(r);
2212
2213 /* XXX: create configurable flag */
2214 /* CGI environment requires that Content-Length be set.
2215 * Send 411 Length Required if Content-Length missing.
2216 * (occurs here if client sends Transfer-Encoding: chunked
2217 * and module is flagged to stream request body to backend) */
2218 if (-1 == r->reqbody_length && hctx->opts.backend != BACKEND_PROXY){
2219 return (r->conf.stream_request_body & FDEVENT_STREAM_REQUEST)
2220 ? http_response_reqbody_read_error(r, 411)
2221 : HANDLER_WAIT_FOR_EVENT;
2222 }
2223
2224 if (hctx->wb_reqlen < -1 && r->reqbody_length >= 0) {
2225 /* (completed receiving Transfer-Encoding: chunked) */
2226 hctx->wb_reqlen = -hctx->wb_reqlen;
2227 if (hctx->stdin_append) {
2228 handler_t rca = hctx->stdin_append(hctx);
2229 if (HANDLER_GO_ON != rca) return rca;
2230 }
2231 }
2232
2233 if ((0 != hctx->wb.bytes_in || -1 == hctx->wb_reqlen)
2234 && !chunkqueue_is_empty(&r->reqbody_queue)) {
2235 if (hctx->stdin_append) {
2236 if (chunkqueue_length(&hctx->wb) < 65536 - 16384) {
2237 handler_t rca = hctx->stdin_append(hctx);
2238 if (HANDLER_GO_ON != rca) return rca;
2239 }
2240 }
2241 else
2242 chunkqueue_append_chunkqueue(&hctx->wb, &r->reqbody_queue);
2243 if (fdevent_fdnode_interest(hctx->fdn) & FDEVENT_OUT) {
2244 return (rc == HANDLER_GO_ON) ? HANDLER_WAIT_FOR_EVENT : rc;
2245 }
2246 }
2247 if (rc != HANDLER_GO_ON) return rc;
2248 }
2249 }
2250
2251 {
2252 handler_t rc =((0==hctx->wb.bytes_in || !chunkqueue_is_empty(&hctx->wb))
2253 && hctx->state != GW_STATE_CONNECT_DELAYED)
2254 ? gw_send_request(hctx, r)
2255 : HANDLER_WAIT_FOR_EVENT;
2256 if (HANDLER_WAIT_FOR_EVENT != rc) return rc;
2257 }
2258
2259 if (r->conf.stream_request_body & FDEVENT_STREAM_REQUEST_TCP_FIN)
2260 gw_conditional_tcp_fin(hctx, r);
2261
2262 return HANDLER_WAIT_FOR_EVENT;
2263 }
2264
2265
gw_authorizer_ok(gw_handler_ctx * const hctx,request_st * const r)2266 static handler_t gw_authorizer_ok(gw_handler_ctx * const hctx, request_st * const r) {
2267 /*
2268 * If we are here in AUTHORIZER mode then a request for authorizer
2269 * was processed already, and status 200 has been returned. We need
2270 * now to handle authorized request.
2271 */
2272 char *physpath = NULL;
2273
2274 gw_host * const host = hctx->host;
2275 if (host->docroot) {
2276 buffer_copy_buffer(&r->physical.doc_root, host->docroot);
2277 buffer_copy_buffer(&r->physical.basedir, host->docroot);
2278 buffer_copy_path_len2(&r->physical.path,
2279 BUF_PTR_LEN(host->docroot),
2280 BUF_PTR_LEN(&r->uri.path));
2281 physpath = r->physical.path.ptr;
2282 }
2283
2284 /*(restore streaming flags removed during authorizer processing)*/
2285 r->conf.stream_response_body |= (hctx->opts.authorizer >> 1);
2286
2287 gw_backend_close(hctx, r);
2288 handler_ctx_clear(hctx);
2289
2290 /* don't do more than 6 loops here; normally shouldn't happen */
2291 if (++r->loops_per_request > 5) {
2292 log_error(r->conf.errh, __FILE__, __LINE__,
2293 "too many loops while processing request: %s",
2294 r->target_orig.ptr);
2295 r->http_status = 500; /* Internal Server Error */
2296 r->handler_module = NULL;
2297 return HANDLER_FINISHED;
2298 }
2299
2300 /* restart the request so other handlers can process it */
2301
2302 if (physpath) r->physical.path.ptr = NULL;
2303 http_response_reset(r); /*(includes r->http_status=0)*/
2304 /* preserve r->physical.path.ptr with modified docroot */
2305 if (physpath) r->physical.path.ptr = physpath;
2306
2307 /*(FYI: if multiple FastCGI authorizers were to be supported,
2308 * next one could be started here instead of restarting request)*/
2309
2310 r->handler_module = NULL;
2311 return HANDLER_COMEBACK;
2312 }
2313
2314
2315 __attribute_cold__
2316 static handler_t gw_recv_response_error(gw_handler_ctx * const hctx, request_st * const r, gw_proc * const proc);
2317
2318
gw_recv_response(gw_handler_ctx * const hctx,request_st * const r)2319 static handler_t gw_recv_response(gw_handler_ctx * const hctx, request_st * const r) {
2320 /*(XXX: make this a configurable flag for other protocols)*/
2321 buffer *b = (hctx->opts.backend == BACKEND_FASTCGI
2322 || hctx->opts.backend == BACKEND_AJP13)
2323 ? chunk_buffer_acquire()
2324 : hctx->response;
2325 const off_t bytes_in = r->write_queue.bytes_in;
2326
2327 handler_t rc = http_response_read(r, &hctx->opts, b, hctx->fdn);
2328
2329 if (b != hctx->response) chunk_buffer_release(b);
2330
2331 gw_proc * const proc = hctx->proc;
2332
2333 switch (rc) {
2334 default:
2335 /* change in r->write_queue.bytes_in used to approximate backend read,
2336 * since bytes read from backend, if any, might be consumed from b by
2337 * hctx->opts->parse callback, hampering detection here. However, this
2338 * may not be triggered for partial collection of HTTP response headers
2339 * or partial packets for backend protocol (e.g. FastCGI) */
2340 if (r->write_queue.bytes_in > bytes_in)
2341 hctx->read_ts = proc->last_used = log_monotonic_secs;
2342 return HANDLER_GO_ON;
2343 case HANDLER_FINISHED:
2344 /*hctx->read_ts =*/ proc->last_used = log_monotonic_secs;
2345
2346 if (hctx->gw_mode == GW_AUTHORIZER
2347 && (200 == r->http_status || 0 == r->http_status))
2348 return gw_authorizer_ok(hctx, r);
2349
2350 gw_connection_close(hctx, r);
2351 return HANDLER_FINISHED;
2352 case HANDLER_COMEBACK: /*(not expected; treat as error)*/
2353 case HANDLER_ERROR:
2354 return gw_recv_response_error(hctx, r, proc);
2355 }
2356 }
2357
2358
2359 __attribute_cold__
gw_recv_response_error(gw_handler_ctx * const hctx,request_st * const r,gw_proc * const proc)2360 static handler_t gw_recv_response_error(gw_handler_ctx * const hctx, request_st * const r, gw_proc * const proc)
2361 {
2362 /* (optimization to detect backend process exit while processing a
2363 * large number of ready events; (this block could be removed)) */
2364 if (proc->is_local && 1 == proc->load && proc->pid == hctx->pid
2365 && proc->state != PROC_STATE_DIED
2366 && 0 == r->con->srv->srvconf.max_worker) {
2367 /* intentionally check proc->disabed_until before gw_proc_waitpid */
2368 gw_host * const host = hctx->host;
2369 log_error_st * const errh = r->con->srv->errh;
2370 if (proc->disabled_until < log_monotonic_secs
2371 && 0 != gw_proc_waitpid(host, proc, errh)) {
2372 if (hctx->conf.debug) {
2373 log_error(errh, __FILE__, __LINE__,
2374 "--- gw spawning\n\tsocket %s\n\tcurrent: 1/%d",
2375 proc->connection_name->ptr, host->num_procs);
2376 }
2377
2378 if (gw_spawn_connection(host, proc, errh, hctx->conf.debug)) {
2379 log_error(errh, __FILE__, __LINE__,
2380 "respawning failed, will retry later");
2381 }
2382 }
2383 }
2384
2385 if (r->resp_body_started == 0) {
2386 /* nothing has been sent out yet, try to use another child */
2387
2388 if (hctx->wb.bytes_out == 0 && hctx->reconnects++ < 5) {
2389 log_error(r->conf.errh, __FILE__, __LINE__,
2390 "response not received, request not sent on "
2391 "socket: %s for %s?%.*s, reconnecting",
2392 proc->connection_name->ptr,
2393 r->uri.path.ptr, BUFFER_INTLEN_PTR(&r->uri.query));
2394
2395 return gw_reconnect(hctx, r);
2396 }
2397
2398 log_error(r->conf.errh, __FILE__, __LINE__,
2399 "response not received, request sent: %lld on "
2400 "socket: %s for %s?%.*s, closing connection",
2401 (long long)hctx->wb.bytes_out, proc->connection_name->ptr,
2402 r->uri.path.ptr, BUFFER_INTLEN_PTR(&r->uri.query));
2403 } else if (!light_btst(r->resp_htags, HTTP_HEADER_UPGRADE)) {
2404 log_error(r->conf.errh, __FILE__, __LINE__,
2405 "response already sent out, but backend returned error on "
2406 "socket: %s for %s?%.*s, terminating connection",
2407 proc->connection_name->ptr,
2408 r->uri.path.ptr, BUFFER_INTLEN_PTR(&r->uri.query));
2409 }
2410
2411 return gw_backend_error(hctx, r); /* HANDLER_FINISHED */
2412 }
2413
2414
gw_handle_fdevent(void * ctx,int revents)2415 static handler_t gw_handle_fdevent(void *ctx, int revents) {
2416 gw_handler_ctx *hctx = ctx;
2417 hctx->revents |= revents;
2418 joblist_append(hctx->con);
2419 return HANDLER_FINISHED;
2420 }
2421
gw_process_fdevent(gw_handler_ctx * const hctx,request_st * const r,int revents)2422 static handler_t gw_process_fdevent(gw_handler_ctx * const hctx, request_st * const r, int revents) {
2423 if (revents & FDEVENT_IN) {
2424 handler_t rc = gw_recv_response(hctx, r); /*(might invalidate hctx)*/
2425 if (rc != HANDLER_GO_ON) return rc; /*(unless HANDLER_GO_ON)*/
2426 }
2427
2428 if (revents & FDEVENT_OUT) {
2429 return gw_send_request(hctx, r); /*(might invalidate hctx)*/
2430 }
2431
2432 /* perhaps this issue is already handled */
2433 if (revents & (FDEVENT_HUP|FDEVENT_RDHUP)) {
2434 if (hctx->state == GW_STATE_CONNECT_DELAYED) {
2435 return gw_send_request(hctx, r); /*(might invalidate hctx)*/
2436 } else if (r->resp_body_started) {
2437 /* drain any remaining data from kernel pipe buffers
2438 * even if (r->conf.stream_response_body
2439 * & FDEVENT_STREAM_RESPONSE_BUFMIN)
2440 * since event loop will spin on fd FDEVENT_HUP event
2441 * until unregistered. */
2442 handler_t rc;
2443 const unsigned short flags = r->conf.stream_response_body;
2444 r->conf.stream_response_body &= ~FDEVENT_STREAM_RESPONSE_BUFMIN;
2445 r->conf.stream_response_body |= FDEVENT_STREAM_RESPONSE_POLLRDHUP;
2446 do {
2447 rc = gw_recv_response(hctx, r); /*(might invalidate hctx)*/
2448 } while (rc == HANDLER_GO_ON); /*(unless HANDLER_GO_ON)*/
2449 r->conf.stream_response_body = flags;
2450 return rc; /* HANDLER_FINISHED or HANDLER_ERROR */
2451 } else {
2452 gw_proc *proc = hctx->proc;
2453 log_error(r->conf.errh, __FILE__, __LINE__,
2454 "error: unexpected close of gw connection for %s?%.*s "
2455 "(no gw process on socket: %s ?) %d",
2456 r->uri.path.ptr, BUFFER_INTLEN_PTR(&r->uri.query),
2457 proc->connection_name->ptr, hctx->state);
2458
2459 gw_connection_close(hctx, r);
2460 return HANDLER_FINISHED;
2461 }
2462 } else if (revents & FDEVENT_ERR) {
2463 log_error(r->conf.errh, __FILE__, __LINE__,
2464 "gw: got a FDEVENT_ERR. Don't know why.");
2465 return gw_backend_error(hctx, r); /* HANDLER_FINISHED */
2466 }
2467
2468 return HANDLER_GO_ON;
2469 }
2470
gw_check_extension(request_st * const r,gw_plugin_data * const p,int uri_path_handler,size_t hctx_sz)2471 handler_t gw_check_extension(request_st * const r, gw_plugin_data * const p, int uri_path_handler, size_t hctx_sz) {
2472 #if 0 /*(caller must handle)*/
2473 if (NULL != r->handler_module) return HANDLER_GO_ON;
2474 gw_patch_connection(r, p);
2475 if (NULL == p->conf.exts) return HANDLER_GO_ON;
2476 #endif
2477
2478 buffer *fn = uri_path_handler ? &r->uri.path : &r->physical.path;
2479 const size_t s_len = buffer_clen(fn);
2480 gw_extension *extension = NULL;
2481 gw_host *host = NULL;
2482 gw_handler_ctx *hctx;
2483 unsigned short gw_mode;
2484
2485 if (0 == s_len) return HANDLER_GO_ON; /*(not expected)*/
2486
2487 /* check p->conf.exts_auth list and then p->conf.ext_resp list
2488 * (skip p->conf.exts_auth if array is empty
2489 * or if GW_AUTHORIZER already ran in this request) */
2490 hctx = r->plugin_ctx[p->id];
2491 /*(hctx not NULL if GW_AUTHORIZER ran; hctx->ext_auth check is redundant)*/
2492 gw_mode = (NULL == hctx || NULL == hctx->ext_auth)
2493 ? 0 /*GW_AUTHORIZER p->conf.exts_auth will be searched next*/
2494 : GW_AUTHORIZER; /*GW_RESPONDER p->conf.exts_resp will be searched next*/
2495
2496 do {
2497
2498 gw_exts *exts;
2499 if (0 == gw_mode) {
2500 gw_mode = GW_AUTHORIZER;
2501 exts = p->conf.exts_auth;
2502 } else {
2503 gw_mode = GW_RESPONDER;
2504 exts = p->conf.exts_resp;
2505 }
2506
2507 if (0 == exts->used) continue;
2508
2509 /* gw.map-extensions maps extensions to existing gw.server entries
2510 *
2511 * gw.map-extensions = ( ".php3" => ".php" )
2512 *
2513 * gw.server = ( ".php" => ... )
2514 *
2515 * */
2516
2517 /* check if extension-mapping matches */
2518 if (p->conf.ext_mapping) {
2519 data_string *ds =
2520 (data_string *)array_match_key_suffix(p->conf.ext_mapping, fn);
2521 if (NULL != ds) {
2522 /* found a mapping */
2523 /* check if we know the extension */
2524 uint32_t k;
2525 for (k = 0; k < exts->used; ++k) {
2526 extension = exts->exts+k;
2527
2528 if (buffer_is_equal(&ds->value, &extension->key)) {
2529 break;
2530 }
2531 }
2532
2533 if (k == exts->used) {
2534 /* found nothing */
2535 extension = NULL;
2536 }
2537 }
2538 }
2539
2540 if (extension == NULL) {
2541 size_t uri_path_len = buffer_clen(&r->uri.path);
2542
2543 /* check if extension matches */
2544 for (uint32_t k = 0; k < exts->used; ++k) {
2545 gw_extension *ext = exts->exts+k;
2546 #ifdef __clang_analyzer__
2547 force_assert(ext); /*(unnecessary; quiet clang analyzer)*/
2548 #endif
2549 size_t ct_len = buffer_clen(&ext->key);
2550
2551 /* check _url_ in the form "/gw_pattern" */
2552 if (ext->key.ptr[0] == '/') {
2553 if (ct_len <= uri_path_len
2554 && 0 == memcmp(r->uri.path.ptr, ext->key.ptr, ct_len)) {
2555 extension = ext;
2556 break;
2557 }
2558 } else if (ct_len <= s_len
2559 && 0 == memcmp(fn->ptr + s_len - ct_len,
2560 ext->key.ptr, ct_len)) {
2561 /* check extension in the form ".fcg" */
2562 extension = ext;
2563 break;
2564 }
2565 }
2566 }
2567
2568 } while (NULL == extension && gw_mode != GW_RESPONDER);
2569
2570 /* extension doesn't match */
2571 if (NULL == extension) {
2572 return HANDLER_GO_ON;
2573 }
2574
2575 /* check if we have at least one server for this extension up and running */
2576 host = gw_host_get(r, extension, p->conf.balance, p->conf.debug);
2577 if (NULL == host) {
2578 return HANDLER_FINISHED;
2579 }
2580
2581 /* a note about no handler is not sent yet */
2582 extension->note_is_sent = 0;
2583
2584 /*
2585 * if check-local is disabled, use the uri.path handler
2586 *
2587 */
2588
2589 /* init handler-context */
2590 if (uri_path_handler) {
2591 if (host->check_local)
2592 return HANDLER_GO_ON;
2593
2594 /* path info rewrite is done only for /prefix/? matches */
2595 /* do not split path info for authorizer */
2596 if (extension->key.ptr[0] == '/' && gw_mode != GW_AUTHORIZER) {
2597 /* the prefix is the SCRIPT_NAME,
2598 * everything from start to the next slash
2599 * this is important for check-local = "disable"
2600 *
2601 * if prefix = /admin.gw
2602 *
2603 * /admin.gw/foo/bar
2604 *
2605 * SCRIPT_NAME = /admin.gw
2606 * PATH_INFO = /foo/bar
2607 *
2608 * if prefix = /cgi-bin/
2609 *
2610 * /cgi-bin/foo/bar
2611 *
2612 * SCRIPT_NAME = /cgi-bin/foo
2613 * PATH_INFO = /bar
2614 *
2615 * if prefix = /, and fix-root-path-name is enable
2616 *
2617 * /cgi-bin/foo/bar
2618 *
2619 * SCRIPT_NAME = /cgi-bin/foo
2620 * PATH_INFO = /bar
2621 *
2622 */
2623 /* (s_len is buffer_clen(&r->uri.path) if (uri_path_handler) */
2624 uint32_t elen = buffer_clen(&extension->key);
2625 const char *pathinfo;
2626 if (1 == elen && host->fix_root_path_name) {
2627 buffer_copy_buffer(&r->pathinfo, &r->uri.path);
2628 buffer_truncate(&r->uri.path, 0);
2629 }
2630 else if (s_len > elen
2631 && (pathinfo = strchr(r->uri.path.ptr+elen, '/'))) {
2632 /* rewrite uri.path and pathinfo */
2633 const uint32_t plen = r->uri.path.ptr + s_len - pathinfo;
2634 buffer_copy_string_len(&r->pathinfo, pathinfo, plen);
2635 buffer_truncate(&r->uri.path, s_len - plen);
2636 }
2637 }
2638 }
2639
2640 if (!hctx) hctx = handler_ctx_init(hctx_sz);
2641
2642 hctx->ev = r->con->srv->ev;
2643 hctx->r = r;
2644 hctx->con = r->con;
2645 hctx->plugin_data = p;
2646 hctx->host = host;
2647 hctx->proc = NULL;
2648 hctx->ext = extension;
2649 gw_host_assign(host);
2650
2651 hctx->gw_mode = gw_mode;
2652 if (gw_mode == GW_AUTHORIZER) {
2653 hctx->ext_auth = hctx->ext;
2654 }
2655
2656 /*hctx->conf.exts = p->conf.exts;*/
2657 /*hctx->conf.exts_auth = p->conf.exts_auth;*/
2658 /*hctx->conf.exts_resp = p->conf.exts_resp;*/
2659 /*hctx->conf.ext_mapping = p->conf.ext_mapping;*/
2660 hctx->conf.balance = p->conf.balance;
2661 hctx->conf.proto = p->conf.proto;
2662 hctx->conf.debug = p->conf.debug;
2663
2664 hctx->opts.max_per_read =
2665 !(r->conf.stream_response_body /*(if not streaming response body)*/
2666 & (FDEVENT_STREAM_RESPONSE|FDEVENT_STREAM_RESPONSE_BUFMIN))
2667 ? 262144
2668 : (r->conf.stream_response_body & FDEVENT_STREAM_RESPONSE_BUFMIN)
2669 ? 16384 /* FDEVENT_STREAM_RESPONSE_BUFMIN */
2670 : 65536; /* FDEVENT_STREAM_RESPONSE */
2671 hctx->opts.fdfmt = S_IFSOCK;
2672 hctx->opts.authorizer = (gw_mode == GW_AUTHORIZER);
2673 hctx->opts.local_redir = 0;
2674 hctx->opts.xsendfile_allow = host->xsendfile_allow;
2675 hctx->opts.xsendfile_docroot = host->xsendfile_docroot;
2676
2677 r->plugin_ctx[p->id] = hctx;
2678
2679 r->handler_module = p->self;
2680
2681 if (r->conf.log_request_handling) {
2682 log_error(r->conf.errh, __FILE__, __LINE__,
2683 "handling the request using %s", p->self->name);
2684 }
2685
2686 return HANDLER_GO_ON;
2687 }
2688
2689 __attribute_cold__
2690 __attribute_noinline__
gw_handle_trigger_hctx_timeout(gw_handler_ctx * const hctx,const char * const msg)2691 static void gw_handle_trigger_hctx_timeout(gw_handler_ctx * const hctx, const char * const msg) {
2692
2693 request_st * const r = hctx->r;
2694 joblist_append(r->con);
2695
2696 if (*msg == 'c') { /* "connect" */
2697 /* temporarily disable backend proc */
2698 gw_proc_connect_error(r, hctx->host, hctx->proc, hctx->pid,
2699 ETIMEDOUT, hctx->conf.debug);
2700 /* cleanup this request and let request handler start request again */
2701 /* retry only once since request already waited write_timeout secs */
2702 if (hctx->reconnects++ < 1) {
2703 gw_reconnect(hctx, r);
2704 return;
2705 }
2706 r->http_status = 503; /* Service Unavailable */
2707 }
2708 else { /* "read" or "write" */
2709 /* blocked waiting to send (more) data to or to receive response
2710 * (neither are a definite indication that the proc is no longer
2711 * responsive on other socket connections; not marking proc overloaded)
2712 * (If connect() to backend succeeded, then we began sending
2713 * request and filled kernel socket buffers, so request is
2714 * in progress and it is not safe or possible to retry) */
2715 /*if (hctx->conf.debug)*/
2716 log_error(r->conf.errh, __FILE__, __LINE__,
2717 "%s timeout on socket: %s (fd: %d)",
2718 msg, hctx->proc->connection_name->ptr, hctx->fd);
2719
2720 if (*msg == 'w') { /* "write" */
2721 gw_write_error(hctx, r); /*(calls gw_backend_error())*/
2722 if (r->http_status == 503) r->http_status = 504; /*Gateway Timeout*/
2723 return;
2724 } /* else "read" */
2725 }
2726 gw_backend_error(hctx, r);
2727 if (r->http_status == 500 && !r->resp_body_started && !r->handler_module)
2728 r->http_status = 504; /*Gateway Timeout*/
2729 }
2730
2731 __attribute_noinline__
gw_handle_trigger_host_timeouts(gw_host * const host)2732 static void gw_handle_trigger_host_timeouts(gw_host * const host) {
2733
2734 if (NULL == host->hctxs) return;
2735 const unix_time64_t rsecs = (unix_time64_t)host->read_timeout;
2736 const unix_time64_t wsecs = (unix_time64_t)host->write_timeout;
2737 const unix_time64_t csecs = (unix_time64_t)host->connect_timeout;
2738 if (!rsecs && !wsecs && !csecs)
2739 return; /*(no timeout policy (default))*/
2740
2741 const unix_time64_t mono = log_monotonic_secs; /*(could have callers pass)*/
2742 for (gw_handler_ctx *hctx = host->hctxs, *next; hctx; hctx = next) {
2743 /* if timeout occurs, hctx might be invalidated and removed from list,
2744 * so next element must be store before checking for timeout */
2745 next = hctx->next;
2746
2747 if (hctx->state == GW_STATE_CONNECT_DELAYED) {
2748 if (mono - hctx->write_ts > csecs && csecs) /*(waiting for write)*/
2749 gw_handle_trigger_hctx_timeout(hctx, "connect");
2750 continue; /*(do not apply wsecs below to GW_STATE_CONNECT_DELAYED)*/
2751 }
2752
2753 const int events = fdevent_fdnode_interest(hctx->fdn);
2754 if ((events & FDEVENT_IN) && mono - hctx->read_ts > rsecs && rsecs) {
2755 gw_handle_trigger_hctx_timeout(hctx, "read");
2756 continue;
2757 }
2758 if ((events & FDEVENT_OUT) && mono - hctx->write_ts > wsecs && wsecs) {
2759 gw_handle_trigger_hctx_timeout(hctx, "write");
2760 continue;
2761 }
2762 }
2763 }
2764
gw_handle_trigger_host(gw_host * const host,log_error_st * const errh,const int debug)2765 static void gw_handle_trigger_host(gw_host * const host, log_error_st * const errh, const int debug) {
2766
2767 /* check for socket timeouts on active requests to backend host */
2768 gw_handle_trigger_host_timeouts(host);
2769
2770 /* check each child proc to detect if proc exited */
2771
2772 gw_proc *proc;
2773 unix_time64_t idle_timestamp;
2774 int overload = 1;
2775
2776 #if 0 /* redundant w/ handle_waitpid hook since lighttpd 1.4.46 */
2777 for (proc = host->first; proc; proc = proc->next) {
2778 gw_proc_waitpid(host, proc, errh);
2779 }
2780 #endif
2781
2782 gw_restart_dead_procs(host, errh, debug, 1);
2783
2784 /* check if adaptive spawning enabled */
2785 if (host->min_procs == host->max_procs) return;
2786 if (!host->bin_path) return;
2787
2788 for (proc = host->first; proc; proc = proc->next) {
2789 if (proc->load <= host->max_load_per_proc) {
2790 overload = 0;
2791 break;
2792 }
2793 }
2794
2795 if (overload && host->num_procs && host->num_procs < host->max_procs) {
2796 /* overload, spawn new child */
2797 if (debug) {
2798 log_error(errh, __FILE__, __LINE__,
2799 "overload detected, spawning a new child");
2800 }
2801
2802 gw_proc_spawn(host, errh, debug);
2803 }
2804
2805 idle_timestamp = log_monotonic_secs - host->idle_timeout;
2806 for (proc = host->first; proc; proc = proc->next) {
2807 if (host->num_procs <= host->min_procs) break;
2808 if (0 != proc->load) continue;
2809 if (proc->pid <= 0) continue;
2810 if (proc->last_used >= idle_timestamp) continue;
2811
2812 /* terminate proc that has been idling for a long time */
2813 if (debug) {
2814 log_error(errh, __FILE__, __LINE__,
2815 "idle-timeout reached, terminating child: socket: %s pid %d",
2816 proc->unixsocket ? proc->unixsocket->ptr : "", proc->pid);
2817 }
2818
2819 gw_proc_kill(host, proc);
2820
2821 /* proc is now in unused, let next second handle next process */
2822 break;
2823 }
2824
2825 #if 0 /* redundant w/ handle_waitpid hook since lighttpd 1.4.46 */
2826 for (proc = host->unused_procs; proc; proc = proc->next) {
2827 gw_proc_waitpid(host, proc, errh);
2828 }
2829 #endif
2830 }
2831
gw_handle_trigger_exts(gw_exts * const exts,log_error_st * const errh,const int debug)2832 static void gw_handle_trigger_exts(gw_exts * const exts, log_error_st * const errh, const int debug) {
2833 for (uint32_t j = 0; j < exts->used; ++j) {
2834 gw_extension *ex = exts->exts+j;
2835 for (uint32_t n = 0; n < ex->used; ++n) {
2836 gw_handle_trigger_host(ex->hosts[n], errh, debug);
2837 }
2838 }
2839 }
2840
gw_handle_trigger_exts_wkr(gw_exts * exts,log_error_st * errh)2841 static void gw_handle_trigger_exts_wkr(gw_exts *exts, log_error_st *errh) {
2842 for (uint32_t j = 0; j < exts->used; ++j) {
2843 gw_extension * const ex = exts->exts+j;
2844 for (uint32_t n = 0; n < ex->used; ++n) {
2845 gw_host * const host = ex->hosts[n];
2846 gw_handle_trigger_host_timeouts(host);
2847 for (gw_proc *proc = host->first; proc; proc = proc->next) {
2848 if (proc->state == PROC_STATE_OVERLOADED)
2849 gw_proc_check_enable(host, proc, errh);
2850 }
2851 }
2852 }
2853 }
2854
gw_handle_trigger(server * srv,void * p_d)2855 handler_t gw_handle_trigger(server *srv, void *p_d) {
2856 gw_plugin_data * const p = p_d;
2857 int wkr = (0 != srv->srvconf.max_worker && p->srv_pid != srv->pid);
2858 log_error_st * const errh = srv->errh;
2859 int global_debug = 0;
2860
2861 if (NULL == p->cvlist) return HANDLER_GO_ON;
2862 /* (init i to 0 if global context; to 1 to skip empty global context) */
2863 for (int i = !p->cvlist[0].v.u2[1], used = p->nconfig; i < used; ++i) {
2864 config_plugin_value_t *cpv = p->cvlist + p->cvlist[i].v.u2[0];
2865 gw_plugin_config *conf = NULL;
2866 int debug = global_debug;
2867 for (; -1 != cpv->k_id; ++cpv) {
2868 switch (cpv->k_id) {
2869 case 0: /* xxxxx.server */
2870 if (cpv->vtype == T_CONFIG_LOCAL) conf = cpv->v.v;
2871 break;
2872 case 2: /* xxxxx.debug */
2873 debug = (int)cpv->v.u;
2874 if (0 == i) global_debug = (int)cpv->v.u;
2875 default:
2876 break;
2877 }
2878 }
2879
2880 if (NULL == conf || NULL == conf->exts) continue;
2881
2882 /* (debug flag is only active if set in same scope as xxxxx.server
2883 * or global scope (for convenience))
2884 * (unable to use p->defaults.debug since gw_plugin_config
2885 * might be part of a larger plugin_config) */
2886 wkr
2887 ? gw_handle_trigger_exts_wkr(conf->exts, errh)
2888 : gw_handle_trigger_exts(conf->exts, errh, debug);
2889 }
2890
2891 return HANDLER_GO_ON;
2892 }
2893
gw_handle_waitpid_cb(server * srv,void * p_d,pid_t pid,int status)2894 handler_t gw_handle_waitpid_cb(server *srv, void *p_d, pid_t pid, int status) {
2895 gw_plugin_data * const p = p_d;
2896 if (0 != srv->srvconf.max_worker && p->srv_pid != srv->pid)
2897 return HANDLER_GO_ON;
2898 log_error_st * const errh = srv->errh;
2899 int global_debug = 0;
2900
2901 if (NULL == p->cvlist) return HANDLER_GO_ON;
2902 /* (init i to 0 if global context; to 1 to skip empty global context) */
2903 for (int i = !p->cvlist[0].v.u2[1], used = p->nconfig; i < used; ++i) {
2904 config_plugin_value_t *cpv = p->cvlist + p->cvlist[i].v.u2[0];
2905 gw_plugin_config *conf = NULL;
2906 int debug = global_debug;
2907 for (; -1 != cpv->k_id; ++cpv) {
2908 switch (cpv->k_id) {
2909 case 0: /* xxxxx.server */
2910 if (cpv->vtype == T_CONFIG_LOCAL) conf = cpv->v.v;
2911 break;
2912 case 2: /* xxxxx.debug */
2913 debug = (int)cpv->v.u;
2914 if (0 == i) global_debug = (int)cpv->v.u;
2915 default:
2916 break;
2917 }
2918 }
2919
2920 if (NULL == conf || NULL == conf->exts) continue;
2921
2922 /* (debug flag is only active if set in same scope as xxxxx.server
2923 * or global scope (for convenience))
2924 * (unable to use p->defaults.debug since gw_plugin_config
2925 * might be part of a larger plugin_config) */
2926 const unix_time64_t cur_ts = log_monotonic_secs;
2927 gw_exts *exts = conf->exts;
2928 for (uint32_t j = 0; j < exts->used; ++j) {
2929 gw_extension *ex = exts->exts+j;
2930 for (uint32_t n = 0; n < ex->used; ++n) {
2931 gw_host *host = ex->hosts[n];
2932 gw_proc *proc;
2933 for (proc = host->first; proc; proc = proc->next) {
2934 if (!proc->is_local || proc->pid != pid) continue;
2935
2936 gw_proc_waitpid_log(host, proc, errh, status);
2937 gw_proc_set_state(host, proc, PROC_STATE_DIED);
2938 proc->pid = 0;
2939
2940 /* restart, but avoid spinning if child exits too quickly */
2941 if (proc->disabled_until < cur_ts) {
2942 /*(set state PROC_STATE_DIED above, so != KILLED here)*/
2943 /*(PROC_STATE_KILLED belongs in unused_procs, anyway)*/
2944 if (proc->state != PROC_STATE_KILLED)
2945 proc->disabled_until = cur_ts;
2946 if (gw_spawn_connection(host, proc, errh, debug)) {
2947 log_error(errh, __FILE__, __LINE__,
2948 "ERROR: spawning gw failed.");
2949 }
2950 }
2951
2952 return HANDLER_FINISHED;
2953 }
2954 for (proc = host->unused_procs; proc; proc = proc->next) {
2955 if (!proc->is_local || proc->pid != pid) continue;
2956
2957 gw_proc_waitpid_log(host, proc, errh, status);
2958 if (proc->state != PROC_STATE_KILLED)
2959 proc->disabled_until = cur_ts;
2960 gw_proc_set_state(host, proc, PROC_STATE_DIED);
2961 proc->pid = 0;
2962 return HANDLER_FINISHED;
2963 }
2964 }
2965 }
2966 }
2967
2968 return HANDLER_GO_ON;
2969 }
2970