1 #include "uwsgi.h"
2
3 extern struct uwsgi_server uwsgi;
4
uwsgi_update_load_counters()5 void uwsgi_update_load_counters() {
6
7 int i;
8 uint64_t busy_workers = 0;
9 uint64_t idle_workers = 0;
10 static time_t last_sos = 0;
11
12 for (i = 1; i <= uwsgi.numproc; i++) {
13 if (uwsgi.workers[i].cheaped == 0 && uwsgi.workers[i].pid > 0) {
14 if (uwsgi_worker_is_busy(i) == 0) {
15 idle_workers++;
16 }
17 else {
18 busy_workers++;
19 }
20 }
21 }
22
23 if (busy_workers >= (uint64_t) uwsgi.numproc) {
24 ushared->overloaded++;
25
26 if (uwsgi.vassal_sos) {
27 if (uwsgi.current_time - last_sos > uwsgi.vassal_sos) {
28 uwsgi_log_verbose("asking Emperor for reinforcements (overload: %llu)...\n", (unsigned long long) ushared->overloaded);
29 vassal_sos();
30 last_sos = uwsgi.current_time;
31 }
32 }
33
34 }
35
36 ushared->busy_workers = busy_workers;
37 ushared->idle_workers = idle_workers;
38
39 }
40
uwsgi_block_signal(int signum)41 void uwsgi_block_signal(int signum) {
42 sigset_t smask;
43 sigemptyset(&smask);
44 sigaddset(&smask, signum);
45 if (sigprocmask(SIG_BLOCK, &smask, NULL)) {
46 uwsgi_error("sigprocmask()");
47 }
48 }
49
uwsgi_unblock_signal(int signum)50 void uwsgi_unblock_signal(int signum) {
51 sigset_t smask;
52 sigemptyset(&smask);
53 sigaddset(&smask, signum);
54 if (sigprocmask(SIG_UNBLOCK, &smask, NULL)) {
55 uwsgi_error("sigprocmask()");
56 }
57 }
58
uwsgi_master_manage_udp(int udp_fd)59 void uwsgi_master_manage_udp(int udp_fd) {
60 char buf[4096];
61 struct sockaddr_in udp_client;
62 char udp_client_addr[16];
63 int i;
64
65 socklen_t udp_len = sizeof(udp_client);
66 ssize_t rlen = recvfrom(udp_fd, buf, 4096, 0, (struct sockaddr *) &udp_client, &udp_len);
67
68 if (rlen < 0) {
69 uwsgi_error("uwsgi_master_manage_udp()/recvfrom()");
70 }
71 else if (rlen > 0) {
72
73 memset(udp_client_addr, 0, 16);
74 if (inet_ntop(AF_INET, &udp_client.sin_addr.s_addr, udp_client_addr, 16)) {
75 if (buf[0] == UWSGI_MODIFIER_MULTICAST_ANNOUNCE) {
76 }
77 else if (buf[0] == 0x30 && uwsgi.snmp) {
78 manage_snmp(udp_fd, (uint8_t *) buf, rlen, &udp_client);
79 }
80 else {
81
82 // loop the various udp manager until one returns true
83 int udp_managed = 0;
84 for (i = 0; i < 256; i++) {
85 if (uwsgi.p[i]->manage_udp) {
86 if (uwsgi.p[i]->manage_udp(udp_client_addr, udp_client.sin_port, buf, rlen)) {
87 udp_managed = 1;
88 break;
89 }
90 }
91 }
92
93 // else a simple udp logger
94 if (!udp_managed) {
95 uwsgi_log("[udp:%s:%d] %.*s", udp_client_addr, ntohs(udp_client.sin_port), (int) rlen, buf);
96 }
97 }
98 }
99 else {
100 uwsgi_error("uwsgi_master_manage_udp()/inet_ntop()");
101 }
102
103 }
104 }
105
suspend_resume_them_all(int signum)106 void suspend_resume_them_all(int signum) {
107
108 int i;
109 int suspend = 0;
110
111 if (uwsgi.workers[0].suspended == 1) {
112 uwsgi_log_verbose("*** (SIGTSTP received) resuming workers ***\n");
113 uwsgi.workers[0].suspended = 0;
114 }
115 else {
116 uwsgi_log_verbose("*** PAUSE (press start to resume, if you do not have a joypad send SIGTSTP) ***\n");
117 suspend = 1;
118 uwsgi.workers[0].suspended = 1;
119 }
120
121 // subscribe/unsubscribe if needed
122 uwsgi_subscribe_all(suspend, 1);
123
124 for (i = 1; i <= uwsgi.numproc; i++) {
125 uwsgi.workers[i].suspended = suspend;
126 if (uwsgi.workers[i].pid > 0) {
127 if (kill(uwsgi.workers[i].pid, SIGTSTP)) {
128 uwsgi_error("kill()");
129 }
130 }
131 }
132 }
133
134
uwsgi_master_check_mercy()135 void uwsgi_master_check_mercy() {
136
137 int i;
138
139 for (i = 1; i <= uwsgi.numproc; i++) {
140 if (uwsgi.workers[i].pid > 0 && uwsgi.workers[i].cursed_at) {
141 if (uwsgi_now() > uwsgi.workers[i].no_mercy_at) {
142 uwsgi_log_verbose("worker %d (pid: %d) is taking too much time to die...NO MERCY !!!\n", i, uwsgi.workers[i].pid);
143 // yes that look strangem but we avoid callign it again if we skip waitpid() call below
144 uwsgi_curse(i, SIGKILL);
145 }
146 }
147 }
148
149 for (i = 0; i < uwsgi.mules_cnt; i++) {
150 if (uwsgi.mules[i].pid > 0 && uwsgi.mules[i].cursed_at) {
151 if (uwsgi_now() > uwsgi.mules[i].no_mercy_at) {
152 uwsgi_log_verbose("mule %d (pid: %d) is taking too much time to die...NO MERCY !!!\n", i + 1, uwsgi.mules[i].pid);
153 uwsgi_curse_mule(i, SIGKILL);
154 }
155 }
156 }
157
158
159 struct uwsgi_spooler *us;
160 for (us = uwsgi.spoolers; us; us = us->next) {
161 if (us->pid > 0 && us->cursed_at && uwsgi_now() > us->no_mercy_at) {
162 uwsgi_log_verbose("spooler %d (pid: %d) is taking too much time to die...NO MERCY !!!\n", i + 1, us->pid);
163 kill(us->pid, SIGKILL);
164 }
165 }
166 }
167
168
expire_rb_timeouts(struct uwsgi_rbtree * tree)169 void expire_rb_timeouts(struct uwsgi_rbtree *tree) {
170
171 uint64_t current = (uint64_t) uwsgi_now();
172 struct uwsgi_rb_timer *urbt;
173 struct uwsgi_signal_rb_timer *usrbt;
174
175 for (;;) {
176
177 urbt = uwsgi_min_rb_timer(tree, NULL);
178
179 if (urbt == NULL)
180 return;
181
182 if (urbt->value <= current) {
183 // remove the timeout and add another
184 usrbt = (struct uwsgi_signal_rb_timer *) urbt->data;
185 uwsgi_del_rb_timer(tree, urbt);
186 free(urbt);
187 usrbt->iterations_done++;
188 uwsgi_route_signal(usrbt->sig);
189 if (!usrbt->iterations || usrbt->iterations_done < usrbt->iterations) {
190 usrbt->uwsgi_rb_timer = uwsgi_add_rb_timer(tree, uwsgi_now() + usrbt->value, usrbt);
191 }
192 continue;
193 }
194
195 break;
196 }
197 }
198
get_tcp_info(struct uwsgi_socket * uwsgi_sock)199 static void get_tcp_info(struct uwsgi_socket *uwsgi_sock) {
200
201 #if defined(__linux__) || defined(__FreeBSD__)
202 int fd = uwsgi_sock->fd;
203 struct tcp_info ti;
204 socklen_t tis = sizeof(struct tcp_info);
205
206 if (!getsockopt(fd, IPPROTO_TCP, TCP_INFO, &ti, &tis)) {
207
208 // checks for older kernels
209 #if defined(__linux__)
210 if (!ti.tcpi_sacked) {
211 #elif defined(__FreeBSD__)
212 if (!ti.__tcpi_sacked) {
213 #endif
214 return;
215 }
216
217 #if defined(__linux__)
218 uwsgi_sock->queue = (uint64_t) ti.tcpi_unacked;
219 uwsgi_sock->max_queue = (uint64_t) ti.tcpi_sacked;
220 #elif defined(__FreeBSD__)
221 uwsgi_sock->queue = (uint64_t) ti.__tcpi_unacked;
222 uwsgi_sock->max_queue = (uint64_t) ti.__tcpi_sacked;
223 #endif
224 }
225
226 #endif
227 }
228
229
230 #ifdef __linux__
231 #include <linux/sockios.h>
232
233 #ifdef UNBIT
234 #define SIOBKLGQ 0x8908
235 #endif
236
237 #ifdef SIOBKLGQ
238
239 static void get_linux_unbit_SIOBKLGQ(struct uwsgi_socket *uwsgi_sock) {
240
241 int fd = uwsgi_sock->fd;
242 int queue = 0;
243 if (ioctl(fd, SIOBKLGQ, &queue) >= 0) {
244 uwsgi_sock->queue = (uint64_t) queue;
245 uwsgi_sock->max_queue = (uint64_t) uwsgi.listen_queue;
246 }
247 }
248 #endif
249 #endif
250
251 static void master_check_listen_queue() {
252
253 uint64_t backlog = 0;
254 struct uwsgi_socket *uwsgi_sock = uwsgi.sockets;
255 while(uwsgi_sock) {
256 if (uwsgi_sock->family == AF_INET) {
257 get_tcp_info(uwsgi_sock);
258 }
259 #ifdef __linux__
260 #ifdef SIOBKLGQ
261 else if (uwsgi_sock->family == AF_UNIX) {
262 get_linux_unbit_SIOBKLGQ(uwsgi_sock);
263 }
264 #endif
265 #endif
266
267 if (uwsgi_sock->queue > backlog) {
268 backlog = uwsgi_sock->queue;
269 }
270
271 if (uwsgi_sock->queue > 0 && uwsgi_sock->queue >= uwsgi_sock->max_queue) {
272 uwsgi_log_verbose("*** uWSGI listen queue of socket \"%s\" (fd: %d) full !!! (%llu/%llu) ***\n", uwsgi_sock->name, uwsgi_sock->fd, (unsigned long long) uwsgi_sock->queue, (unsigned long long) uwsgi_sock->max_queue);
273
274 if (uwsgi.alarm_backlog) {
275 char buf[1024];
276 int ret = snprintf(buf, 1024, "listen queue of socket \"%s\" (fd: %d) full !!! (%llu/%llu)", uwsgi_sock->name, uwsgi_sock->fd, (unsigned long long) uwsgi_sock->queue, (unsigned long long) uwsgi_sock->max_queue);
277 if (ret > 0 && ret < 1024) {
278 struct uwsgi_string_list *usl = NULL;
279 uwsgi_foreach(usl, uwsgi.alarm_backlog) {
280 uwsgi_alarm_trigger(usl->value, buf, ret);
281 }
282 }
283 }
284 }
285 uwsgi_sock = uwsgi_sock->next;
286 }
287
288 // TODO load should be something more advanced based on different values
289 uwsgi.shared->load = backlog;
290
291 uwsgi.shared->backlog = backlog;
292
293 if (uwsgi.vassal_sos_backlog > 0 && uwsgi.has_emperor) {
294 if (uwsgi.shared->backlog >= (uint64_t) uwsgi.vassal_sos_backlog) {
295 // ask emperor for help
296 uwsgi_log_verbose("asking Emperor for reinforcements (backlog: %llu)...\n", (unsigned long long) uwsgi.shared->backlog);
297 vassal_sos();
298 }
299 }
300 }
301
302 void vassal_sos() {
303 if (!uwsgi.has_emperor) {
304 uwsgi_log("[broodlord] instance not governed by an Emperor !!!\n");
305 return;
306 }
307 char byte = 30;
308 if (write(uwsgi.emperor_fd, &byte, 1) != 1) {
309 uwsgi_error("vassal_sos()/write()");
310 }
311 }
312
313 int master_loop(char **argv, char **environ) {
314
315 struct timeval last_respawn;
316 int last_respawn_rate = 0;
317
318 pid_t diedpid;
319 int waitpid_status;
320
321 time_t now = 0;
322
323 int i = 0;
324 int rlen;
325
326 int check_interval = 1;
327
328 struct uwsgi_rb_timer *min_timeout;
329 struct uwsgi_rbtree *rb_timers = uwsgi_init_rb_timer();
330
331 if (uwsgi.procname_master) {
332 uwsgi_set_processname(uwsgi.procname_master);
333 }
334 else if (uwsgi.procname) {
335 uwsgi_set_processname(uwsgi.procname);
336 }
337 else if (uwsgi.auto_procname) {
338 uwsgi_set_processname("uWSGI master");
339 }
340
341
342 uwsgi.current_time = uwsgi_now();
343
344 uwsgi_unix_signal(SIGTSTP, suspend_resume_them_all);
345 uwsgi_unix_signal(SIGHUP, grace_them_all);
346 if (uwsgi.die_on_term) {
347 uwsgi_unix_signal(SIGTERM, kill_them_all);
348 uwsgi_unix_signal(SIGQUIT, reap_them_all);
349 }
350 else {
351 uwsgi_unix_signal(SIGTERM, reap_them_all);
352 uwsgi_unix_signal(SIGQUIT, kill_them_all);
353 }
354 uwsgi_unix_signal(SIGINT, kill_them_all);
355 uwsgi_unix_signal(SIGUSR1, stats);
356
357 atexit(uwsgi_master_cleanup_hooks);
358
359 uwsgi.master_queue = event_queue_init();
360
361 /* route signals to workers... */
362 #ifdef UWSGI_DEBUG
363 uwsgi_log("adding %d to signal poll\n", uwsgi.shared->worker_signal_pipe[0]);
364 #endif
365 event_queue_add_fd_read(uwsgi.master_queue, uwsgi.shared->worker_signal_pipe[0]);
366
367 if (uwsgi.master_fifo) {
368 uwsgi.master_fifo_fd = uwsgi_master_fifo();
369 event_queue_add_fd_read(uwsgi.master_queue, uwsgi.master_fifo_fd);
370 }
371
372 if (uwsgi.notify_socket) {
373 uwsgi.notify_socket_fd = bind_to_unix_dgram(uwsgi.notify_socket);
374 uwsgi_log("notification socket enabled on %s (fd: %d)\n", uwsgi.notify_socket, uwsgi.notify_socket_fd);
375 event_queue_add_fd_read(uwsgi.master_queue, uwsgi.notify_socket_fd);
376 }
377
378 if (uwsgi.spoolers) {
379 event_queue_add_fd_read(uwsgi.master_queue, uwsgi.shared->spooler_signal_pipe[0]);
380 }
381
382 if (uwsgi.mules_cnt > 0) {
383 event_queue_add_fd_read(uwsgi.master_queue, uwsgi.shared->mule_signal_pipe[0]);
384 }
385
386 if (uwsgi.log_master) {
387 uwsgi.log_master_buf = uwsgi_malloc(uwsgi.log_master_bufsize);
388 if (!uwsgi.threaded_logger) {
389 #ifdef UWSGI_DEBUG
390 uwsgi_log("adding %d to master logging\n", uwsgi.shared->worker_log_pipe[0]);
391 #endif
392 event_queue_add_fd_read(uwsgi.master_queue, uwsgi.shared->worker_log_pipe[0]);
393 if (uwsgi.req_log_master) {
394 event_queue_add_fd_read(uwsgi.master_queue, uwsgi.shared->worker_req_log_pipe[0]);
395 }
396 }
397 else {
398 uwsgi_threaded_logger_spawn();
399 }
400
401 }
402
403 #ifdef UWSGI_SSL
404 uwsgi_start_legions();
405 #endif
406 uwsgi_metrics_start_collector();
407
408 uwsgi_add_reload_fds();
409
410 uwsgi_cache_start_sweepers();
411 uwsgi_cache_start_sync_servers();
412
413 uwsgi.wsgi_req->buffer = uwsgi.workers[0].cores[0].buffer;
414
415 if (uwsgi.has_emperor) {
416 if (uwsgi.emperor_proxy) {
417 uwsgi.emperor_fd_proxy = bind_to_unix(uwsgi.emperor_proxy, uwsgi.listen_queue, 0, 0);
418 if (uwsgi.emperor_fd_proxy < 0) exit(1);
419 if (chmod(uwsgi.emperor_proxy, S_IRUSR|S_IWUSR)) {
420 uwsgi_error("[emperor-proxy] chmod()");
421 exit(1);
422 }
423 event_queue_add_fd_read(uwsgi.master_queue, uwsgi.emperor_fd_proxy);
424 }
425 else {
426 event_queue_add_fd_read(uwsgi.master_queue, uwsgi.emperor_fd);
427 }
428 }
429
430 #ifdef __linux__
431 if (uwsgi.setns_socket) {
432 uwsgi.setns_socket_fd = bind_to_unix(uwsgi.setns_socket, uwsgi.listen_queue, 0, 0);
433 if (uwsgi.setns_socket_fd < 0) exit(1);
434 if (chmod(uwsgi.setns_socket, S_IRUSR|S_IWUSR)) {
435 uwsgi_error("[setns-socket] chmod()");
436 exit(1);
437 }
438 event_queue_add_fd_read(uwsgi.master_queue, uwsgi.setns_socket_fd);
439 }
440 #endif
441
442 if (uwsgi.zerg_server) {
443 uwsgi.zerg_server_fd = bind_to_unix(uwsgi.zerg_server, uwsgi.listen_queue, 0, 0);
444 event_queue_add_fd_read(uwsgi.master_queue, uwsgi.zerg_server_fd);
445 uwsgi_log("*** Zerg server enabled on %s ***\n", uwsgi.zerg_server);
446 }
447
448 if (uwsgi.stats) {
449 char *tcp_port = strrchr(uwsgi.stats, ':');
450 if (tcp_port) {
451 // disable deferred accept for this socket
452 int current_defer_accept = uwsgi.no_defer_accept;
453 uwsgi.no_defer_accept = 1;
454 uwsgi.stats_fd = bind_to_tcp(uwsgi.stats, uwsgi.listen_queue, tcp_port);
455 uwsgi.no_defer_accept = current_defer_accept;
456 }
457 else {
458 uwsgi.stats_fd = bind_to_unix(uwsgi.stats, uwsgi.listen_queue, uwsgi.chmod_socket, uwsgi.abstract_socket);
459 }
460
461 event_queue_add_fd_read(uwsgi.master_queue, uwsgi.stats_fd);
462 uwsgi_log("*** Stats server enabled on %s fd: %d ***\n", uwsgi.stats, uwsgi.stats_fd);
463 }
464
465
466 if (uwsgi.stats_pusher_instances) {
467 if (!uwsgi_thread_new(uwsgi_stats_pusher_loop)) {
468 uwsgi_log("!!! unable to spawn stats pusher thread !!!\n");
469 exit(1);
470 }
471 }
472
473 if (uwsgi.udp_socket) {
474 uwsgi.udp_fd = bind_to_udp(uwsgi.udp_socket, 0, 0);
475 if (uwsgi.udp_fd < 0) {
476 uwsgi_log("unable to bind to udp socket. SNMP services will be disabled.\n");
477 }
478 else {
479 uwsgi_log("UDP server enabled.\n");
480 event_queue_add_fd_read(uwsgi.master_queue, uwsgi.udp_fd);
481 }
482 }
483
484 uwsgi.snmp_fd = uwsgi_setup_snmp();
485
486 if (uwsgi.status.is_cheap) {
487 uwsgi_add_sockets_to_queue(uwsgi.master_queue, -1);
488 for (i = 1; i <= uwsgi.numproc; i++) {
489 uwsgi.workers[i].cheaped = 1;
490 }
491 uwsgi_log("cheap mode enabled: waiting for socket connection...\n");
492 }
493
494
495 // spawn mules
496 for (i = 0; i < uwsgi.mules_cnt; i++) {
497 size_t mule_patch_size = 0;
498 uwsgi.mules[i].patch = uwsgi_string_get_list(&uwsgi.mules_patches, i, &mule_patch_size);
499 uwsgi_mule(i + 1);
500 }
501
502 // spawn gateways
503 for (i = 0; i < ushared->gateways_cnt; i++) {
504 if (ushared->gateways[i].pid == 0) {
505 gateway_respawn(i);
506 }
507 }
508
509 // spawn daemons
510 uwsgi_daemons_spawn_all();
511
512 // first subscription
513 uwsgi_subscribe_all(0, 1);
514
515 // sync the cache store if needed
516 uwsgi_cache_sync_all();
517
518 if (uwsgi.queue_store && uwsgi.queue_filesize) {
519 if (msync(uwsgi.queue_header, uwsgi.queue_filesize, MS_ASYNC)) {
520 uwsgi_error("msync()");
521 }
522 }
523
524 // update touches timestamps
525 uwsgi_check_touches(uwsgi.touch_reload);
526 uwsgi_check_touches(uwsgi.touch_logrotate);
527 uwsgi_check_touches(uwsgi.touch_logreopen);
528 uwsgi_check_touches(uwsgi.touch_chain_reload);
529 uwsgi_check_touches(uwsgi.touch_workers_reload);
530 uwsgi_check_touches(uwsgi.touch_gracefully_stop);
531 // update exec touches
532 struct uwsgi_string_list *usl = uwsgi.touch_exec;
533 while (usl) {
534 char *space = strchr(usl->value, ' ');
535 if (space) {
536 *space = 0;
537 usl->len = strlen(usl->value);
538 usl->custom_ptr = space + 1;
539 }
540 usl = usl->next;
541 }
542 uwsgi_check_touches(uwsgi.touch_exec);
543 // update signal touches
544 usl = uwsgi.touch_signal;
545 while (usl) {
546 char *space = strchr(usl->value, ' ');
547 if (space) {
548 *space = 0;
549 usl->len = strlen(usl->value);
550 usl->custom_ptr = space + 1;
551 }
552 usl = usl->next;
553 }
554 uwsgi_check_touches(uwsgi.touch_signal);
555 // daemon touches
556 struct uwsgi_daemon *ud = uwsgi.daemons;
557 while (ud) {
558 if (ud->touch) {
559 uwsgi_check_touches(ud->touch);
560 }
561 ud = ud->next;
562 }
563 // hook touches
564 uwsgi_foreach(usl, uwsgi.hook_touch) {
565 char *space = strchr(usl->value, ' ');
566 if (space) {
567 *space = 0;
568 usl->len = strlen(usl->value);
569 uwsgi_string_new_list((struct uwsgi_string_list **)&usl->custom_ptr, space+1);
570 }
571 }
572 uwsgi_check_touches(uwsgi.hook_touch);
573
574 // fsmon
575 uwsgi_fsmon_setup();
576
577 uwsgi_foreach(usl, uwsgi.signal_timers) {
578 char *space = strchr(usl->value, ' ');
579 if (!space) {
580 uwsgi_log("invalid signal timer syntax, must be: <signal> <seconds>\n");
581 exit(1);
582 }
583 *space = 0;
584 uwsgi_add_timer(atoi(usl->value), atoi(space+1));
585 *space = ' ';
586 }
587
588 uwsgi_foreach(usl, uwsgi.rb_signal_timers) {
589 char *space = strchr(usl->value, ' ');
590 if (!space) {
591 uwsgi_log("invalid redblack signal timer syntax, must be: <signal> <seconds>\n");
592 exit(1);
593 }
594 *space = 0;
595 uwsgi_signal_add_rb_timer(atoi(usl->value), atoi(space+1), 0);
596 *space = ' ';
597 }
598
599 // setup cheaper algos (can be stacked)
600 uwsgi.cheaper_algo = uwsgi_cheaper_algo_spare;
601 if (uwsgi.requested_cheaper_algo) {
602 uwsgi.cheaper_algo = NULL;
603 struct uwsgi_cheaper_algo *uca = uwsgi.cheaper_algos;
604 while (uca) {
605 if (!strcmp(uca->name, uwsgi.requested_cheaper_algo)) {
606 uwsgi.cheaper_algo = uca->func;
607 break;
608 }
609 uca = uca->next;
610 }
611
612 if (!uwsgi.cheaper_algo) {
613 uwsgi_log("unable to find requested cheaper algorithm, falling back to spare\n");
614 uwsgi.cheaper_algo = uwsgi_cheaper_algo_spare;
615 }
616
617 }
618
619 // here really starts the master loop
620 uwsgi_hooks_run(uwsgi.hook_master_start, "master-start", 1);
621
622 for (;;) {
623 //uwsgi_log("uwsgi.ready_to_reload %d %d\n", uwsgi.ready_to_reload, uwsgi.numproc);
624
625 // run master_cycle hook for every plugin
626 for (i = 0; i < uwsgi.gp_cnt; i++) {
627 if (uwsgi.gp[i]->master_cycle) {
628 uwsgi.gp[i]->master_cycle();
629 }
630 }
631 for (i = 0; i < 256; i++) {
632 if (uwsgi.p[i]->master_cycle) {
633 uwsgi.p[i]->master_cycle();
634 }
635 }
636
637 // check for death (before reload !!!)
638 uwsgi_master_check_death();
639 // check for realod
640 if (uwsgi_master_check_reload(argv)) {
641 return -1;
642 }
643
644 // check chain reload
645 uwsgi_master_check_chain();
646
647 // check if some worker is taking too much to die...
648 uwsgi_master_check_mercy();
649
650 // check for daemons (smart and dumb)
651 uwsgi_daemons_smart_check();
652
653 // cheaper management
654 if (uwsgi.cheaper && !uwsgi.status.is_cheap && !uwsgi_instance_is_reloading && !uwsgi_instance_is_dying && !uwsgi.workers[0].suspended) {
655 if (!uwsgi_calc_cheaper())
656 return 0;
657 }
658
659
660 // check if someone is dead
661 diedpid = waitpid(WAIT_ANY, &waitpid_status, WNOHANG);
662 if (diedpid == -1) {
663 if (errno == ECHILD) {
664 // something did not work as expected, just assume all has been cleared
665 uwsgi_master_commit_status();
666 diedpid = 0;
667 }
668 else {
669 uwsgi_error("waitpid()");
670 /* here is better to reload all the uWSGI stack */
671 uwsgi_log("something horrible happened...\n");
672 reap_them_all(0);
673 exit(1);
674 }
675 }
676
677 // no one died just run all of the standard master tasks
678 if (diedpid == 0) {
679
680 /* all processes ok, doing status scan after N seconds */
681 check_interval = uwsgi.master_interval;
682 if (!check_interval) {
683 check_interval = 1;
684 uwsgi.master_interval = 1;
685 }
686
687
688 // add unregistered file monitors
689 // locking is not needed as monitors can only increase
690 for (i = 0; i < ushared->files_monitored_cnt; i++) {
691 if (!ushared->files_monitored[i].registered) {
692 ushared->files_monitored[i].fd = event_queue_add_file_monitor(uwsgi.master_queue, ushared->files_monitored[i].filename, &ushared->files_monitored[i].id);
693 ushared->files_monitored[i].registered = 1;
694 }
695 }
696
697
698 // add unregistered timers
699 // locking is not needed as timers can only increase
700 for (i = 0; i < ushared->timers_cnt; i++) {
701 if (!ushared->timers[i].registered) {
702 ushared->timers[i].fd = event_queue_add_timer(uwsgi.master_queue, &ushared->timers[i].id, ushared->timers[i].value);
703 ushared->timers[i].registered = 1;
704 }
705 }
706
707 // add unregistered rb_timers
708 // locking is not needed as rb_timers can only increase
709 for (i = 0; i < ushared->rb_timers_cnt; i++) {
710 if (!ushared->rb_timers[i].registered) {
711 ushared->rb_timers[i].uwsgi_rb_timer = uwsgi_add_rb_timer(rb_timers, uwsgi_now() + ushared->rb_timers[i].value, &ushared->rb_timers[i]);
712 ushared->rb_timers[i].registered = 1;
713 }
714 }
715
716 int interesting_fd = -1;
717
718 if (ushared->rb_timers_cnt > 0) {
719 min_timeout = uwsgi_min_rb_timer(rb_timers, NULL);
720 if (min_timeout) {
721 int delta = min_timeout->value - uwsgi_now();
722 if (delta <= 0) {
723 expire_rb_timeouts(rb_timers);
724 }
725 // if the timer expires before the check_interval, override it
726 else if (delta < check_interval) {
727 check_interval = delta;
728 }
729 }
730 }
731
732 // wait for event
733 rlen = event_queue_wait(uwsgi.master_queue, check_interval, &interesting_fd);
734
735 if (rlen == 0) {
736 if (ushared->rb_timers_cnt > 0) {
737 expire_rb_timeouts(rb_timers);
738 }
739 }
740
741 // update load counter
742 uwsgi_update_load_counters();
743
744
745 // check uwsgi-cron table
746 if (ushared->cron_cnt) {
747 uwsgi_manage_signal_cron(uwsgi_now());
748 }
749
750 if (uwsgi.crons) {
751 uwsgi_manage_command_cron(uwsgi_now());
752 }
753
754 // some event returned
755 if (rlen > 0) {
756 // if the following function returns -1, a new worker has just spawned
757 if (uwsgi_master_manage_events(interesting_fd)) {
758 return 0;
759 }
760 }
761
762 now = uwsgi_now();
763 if (now - uwsgi.current_time < 1) {
764 continue;
765 }
766 uwsgi.current_time = now;
767
768 // checking logsize
769 if (uwsgi.logfile) {
770 uwsgi_check_logrotate();
771 }
772
773 // this will be incremented at (more or less) regular intervals
774 uwsgi.master_cycles++;
775
776 // recalculate requests counter on race conditions risky configurations
777 // a bit of inaccuracy is better than locking;)
778 uwsgi_master_fix_request_counters();
779
780 // check for idle
781 uwsgi_master_check_idle();
782
783 check_interval = uwsgi.master_interval;
784 if (!check_interval) {
785 check_interval = 1;
786 uwsgi.master_interval = 1;
787 }
788
789
790 // check listen_queue status
791 master_check_listen_queue();
792
793 int someone_killed = 0;
794 // check if some worker has to die (harakiri, evil checks...)
795 if (uwsgi_master_check_workers_deadline()) someone_killed++;
796 if (uwsgi_master_check_gateways_deadline()) someone_killed++;
797 if (uwsgi_master_check_mules_deadline()) someone_killed++;
798 if (uwsgi_master_check_spoolers_deadline()) someone_killed++;
799 if (uwsgi_master_check_crons_deadline()) someone_killed++;
800
801 // this could trigger a complete exit...
802 uwsgi_master_check_mountpoints();
803
804 #ifdef __linux__
805 #ifdef MADV_MERGEABLE
806 if (uwsgi.linux_ksm > 0 && (uwsgi.master_cycles % uwsgi.linux_ksm) == 0) {
807 uwsgi_linux_ksm_map();
808 }
809 #endif
810 #endif
811
812 // resubscribe every 10 cycles by default
813 if (((uwsgi.subscriptions || uwsgi.subscriptions2) && ((uwsgi.master_cycles % uwsgi.subscribe_freq) == 0 || uwsgi.master_cycles == 1)) && !uwsgi_instance_is_reloading && !uwsgi_instance_is_dying && !uwsgi.workers[0].suspended) {
814 uwsgi_subscribe_all(0, 0);
815 }
816
817 uwsgi_cache_sync_all();
818
819 if (uwsgi.queue_store && uwsgi.queue_filesize && uwsgi.queue_store_sync && ((uwsgi.master_cycles % uwsgi.queue_store_sync) == 0)) {
820 if (msync(uwsgi.queue_header, uwsgi.queue_filesize, MS_ASYNC)) {
821 uwsgi_error("msync()");
822 }
823 }
824
825 // check touch_reload
826 if (!uwsgi_instance_is_reloading && !uwsgi_instance_is_dying) {
827 char *touched = uwsgi_check_touches(uwsgi.touch_reload);
828 if (touched) {
829 uwsgi_log_verbose("*** %s has been touched... grace them all !!! ***\n", touched);
830 uwsgi_block_signal(SIGHUP);
831 grace_them_all(0);
832 uwsgi_unblock_signal(SIGHUP);
833 continue;
834 }
835 touched = uwsgi_check_touches(uwsgi.touch_workers_reload);
836 if (touched) {
837 uwsgi_log_verbose("*** %s has been touched... workers reload !!! ***\n", touched);
838 uwsgi_reload_workers();
839 continue;
840 }
841 touched = uwsgi_check_touches(uwsgi.touch_mules_reload);
842 if (touched) {
843 uwsgi_log_verbose("*** %s has been touched... mules reload !!! ***\n", touched);
844 uwsgi_reload_mules();
845 continue;
846 }
847 touched = uwsgi_check_touches(uwsgi.touch_spoolers_reload);
848 if (touched) {
849 uwsgi_log_verbose("*** %s has been touched... spoolers reload !!! ***\n", touched);
850 uwsgi_reload_spoolers();
851 continue;
852 }
853 touched = uwsgi_check_touches(uwsgi.touch_chain_reload);
854 if (touched) {
855 if (uwsgi.status.chain_reloading == 0) {
856 uwsgi_log_verbose("*** %s has been touched... chain reload !!! ***\n", touched);
857 uwsgi.status.chain_reloading = 1;
858 }
859 else {
860 uwsgi_log_verbose("*** %s has been touched... but chain reload is already running ***\n", touched);
861 }
862 }
863
864 // be sure to run it as the last touch check
865 touched = uwsgi_check_touches(uwsgi.touch_exec);
866 if (touched) {
867 if (uwsgi_run_command(touched, NULL, -1) >= 0) {
868 uwsgi_log_verbose("[uwsgi-touch-exec] running %s\n", touched);
869 }
870 }
871 touched = uwsgi_check_touches(uwsgi.touch_signal);
872 if (touched) {
873 uint8_t signum = atoi(touched);
874 uwsgi_route_signal(signum);
875 uwsgi_log_verbose("[uwsgi-touch-signal] raising %u\n", signum);
876 }
877
878 // daemon touches
879 struct uwsgi_daemon *ud = uwsgi.daemons;
880 while (ud) {
881 if (ud->pid > 0 && ud->touch) {
882 touched = uwsgi_check_touches(ud->touch);
883 if (touched) {
884 uwsgi_log_verbose("*** %s has been touched... reloading daemon \"%s\" (pid: %d) !!! ***\n", touched, ud->command, (int) ud->pid);
885 if (kill(-ud->pid, ud->stop_signal)) {
886 // killing process group failed, try to kill by process id
887 if (kill(ud->pid, ud->stop_signal)) {
888 uwsgi_error("[uwsgi-daemon/touch] kill()");
889 }
890 }
891 }
892 }
893 ud = ud->next;
894 }
895
896 // hook touches
897 touched = uwsgi_check_touches(uwsgi.hook_touch);
898 if (touched) {
899 uwsgi_hooks_run((struct uwsgi_string_list *) touched, "touch", 0);
900 }
901
902 }
903
904 // allows the KILL signal to be delivered;
905 if (someone_killed > 0) sleep(1);
906 continue;
907
908 }
909
910 // no one died
911 if (diedpid <= 0)
912 continue;
913
914 // check for deadlocks first
915 uwsgi_deadlock_check(diedpid);
916
917 // reload gateways and daemons only on normal workflow (+outworld status)
918 if (!uwsgi_instance_is_reloading && !uwsgi_instance_is_dying) {
919
920 if (uwsgi_master_check_emperor_death(diedpid))
921 continue;
922 if (uwsgi_master_check_spoolers_death(diedpid))
923 continue;
924 if (uwsgi_master_check_mules_death(diedpid))
925 continue;
926 if (uwsgi_master_check_gateways_death(diedpid))
927 continue;
928 if (uwsgi_master_check_daemons_death(diedpid))
929 continue;
930 if (uwsgi_master_check_cron_death(diedpid))
931 continue;
932 }
933
934
935 /* What happens here ?
936
937 case 1) the diedpid is not a worker, report it and continue
938 case 2) the diedpid is a worker and we are not in a reload procedure -> reload it
939 case 3) the diedpid is a worker and we are in graceful reload -> uwsgi.ready_to_reload++ and continue
940 case 3) the diedpid is a worker and we are in brutal reload -> uwsgi.ready_to_die++ and continue
941
942
943 */
944
945 int thewid = find_worker_id(diedpid);
946 if (thewid <= 0) {
947 // check spooler, mules, gateways and daemons
948 struct uwsgi_spooler *uspool = uwsgi.spoolers;
949 while (uspool) {
950 if (uspool->pid > 0 && diedpid == uspool->pid) {
951 uwsgi_log("spooler (pid: %d) annihilated\n", (int) diedpid);
952 goto next;
953 }
954 uspool = uspool->next;
955 }
956
957 for (i = 0; i < uwsgi.mules_cnt; i++) {
958 if (uwsgi.mules[i].pid == diedpid) {
959 uwsgi_log("mule %d (pid: %d) annihilated\n", i + 1, (int) diedpid);
960 uwsgi.mules[i].pid = 0;
961 goto next;
962 }
963 }
964
965 for (i = 0; i < ushared->gateways_cnt; i++) {
966 if (ushared->gateways[i].pid == diedpid) {
967 uwsgi_log("gateway %d (%s, pid: %d) annihilated\n", i + 1, ushared->gateways[i].fullname, (int) diedpid);
968 goto next;
969 }
970 }
971
972 if (uwsgi_daemon_check_pid_death(diedpid))
973 goto next;
974
975 if (WIFEXITED(waitpid_status)) {
976 uwsgi_log("subprocess %d exited with code %d\n", (int) diedpid, WEXITSTATUS(waitpid_status));
977 }
978 else if (WIFSIGNALED(waitpid_status)) {
979 uwsgi_log("subprocess %d exited by signal %d\n", (int) diedpid, WTERMSIG(waitpid_status));
980 }
981 else if (WIFSTOPPED(waitpid_status)) {
982 uwsgi_log("subprocess %d stopped\n", (int) diedpid);
983 }
984 next:
985 continue;
986 }
987
988
989 // ok a worker died...
990 uwsgi.workers[thewid].pid = 0;
991 // only to be safe :P
992 uwsgi.workers[thewid].harakiri = 0;
993
994
995 // first check failed app loading in need-app mode
996 if (WIFEXITED(waitpid_status) && WEXITSTATUS(waitpid_status) == UWSGI_FAILED_APP_CODE) {
997 if (uwsgi.lazy_apps && uwsgi.need_app) {
998 uwsgi_log("OOPS ! failed loading app in worker %d (pid %d)\n", thewid, (int) diedpid);
999 uwsgi_log_verbose("need-app requested, destroying the instance...\n");
1000 uwsgi.status.dying_for_need_app = 1;
1001 kill_them_all(0);
1002 continue;
1003 }
1004 else {
1005 uwsgi_log("OOPS ! failed loading app in worker %d (pid %d) :( trying again...\n", thewid, (int) diedpid);
1006 }
1007 }
1008
1009 // ok, if we are reloading or dying, just continue the master loop
1010 // as soon as all of the workers have pid == 0, the action (exit, or reload) is triggered
1011 if (uwsgi_instance_is_reloading || uwsgi_instance_is_dying) {
1012 if (!uwsgi.workers[thewid].cursed_at)
1013 uwsgi.workers[thewid].cursed_at = uwsgi_now();
1014 uwsgi_log("worker %d buried after %d seconds\n", thewid, (int) (uwsgi_now() - uwsgi.workers[thewid].cursed_at));
1015 uwsgi.workers[thewid].cursed_at = 0;
1016 // if we are stopping workers, just end here
1017 continue;
1018 }
1019
1020 if (WIFEXITED(waitpid_status) && WEXITSTATUS(waitpid_status) == UWSGI_DE_HIJACKED_CODE) {
1021 uwsgi_log("...restoring worker %d (pid: %d)...\n", thewid, (int) diedpid);
1022 }
1023 else if (WIFEXITED(waitpid_status) && WEXITSTATUS(waitpid_status) == UWSGI_EXCEPTION_CODE) {
1024 uwsgi_log("... monitored exception detected, respawning worker %d (pid: %d)...\n", thewid, (int) diedpid);
1025 }
1026 else if (WIFEXITED(waitpid_status) && WEXITSTATUS(waitpid_status) == UWSGI_QUIET_CODE) {
1027 // noop
1028 }
1029 else if (WIFEXITED(waitpid_status) && WEXITSTATUS(waitpid_status) == UWSGI_BRUTAL_RELOAD_CODE) {
1030 uwsgi_log("!!! inconsistent state reported by worker %d (pid: %d) !!!\n", thewid, (int) diedpid);
1031 reap_them_all(0);
1032 continue;
1033 }
1034 else if (WIFEXITED(waitpid_status) && WEXITSTATUS(waitpid_status) == UWSGI_GO_CHEAP_CODE) {
1035 uwsgi_log("worker %d asked for cheap mode (pid: %d)...\n", thewid, (int) diedpid);
1036 uwsgi.workers[thewid].cheaped = 1;
1037 }
1038 else if (uwsgi.workers[thewid].manage_next_request) {
1039 if (WIFSIGNALED(waitpid_status)) {
1040 uwsgi_log("DAMN ! worker %d (pid: %d) died, killed by signal %d :( trying respawn ...\n", thewid, (int) diedpid, (int) WTERMSIG(waitpid_status));
1041 }
1042 else {
1043 uwsgi_log("DAMN ! worker %d (pid: %d) died :( trying respawn ...\n", thewid, (int) diedpid);
1044 }
1045 }
1046 else if (uwsgi.workers[thewid].cursed_at > 0) {
1047 uwsgi_log("worker %d killed successfully (pid: %d)\n", thewid, (int) diedpid);
1048 }
1049 // manage_next_request is zero, but killed by signal...
1050 else if (WIFSIGNALED(waitpid_status)) {
1051 uwsgi_log("DAMN ! worker %d (pid: %d) MISTERIOUSLY killed by signal %d :( trying respawn ...\n", thewid, (int) diedpid, (int) WTERMSIG(waitpid_status));
1052 }
1053
1054 if (uwsgi.workers[thewid].cheaped == 1) {
1055 uwsgi_log("uWSGI worker %d cheaped.\n", thewid);
1056 continue;
1057 }
1058
1059 // avoid fork bombing
1060 gettimeofday(&last_respawn, NULL);
1061 if (last_respawn.tv_sec <= uwsgi.respawn_delta + check_interval) {
1062 last_respawn_rate++;
1063 if (last_respawn_rate > uwsgi.numproc) {
1064 if (uwsgi.forkbomb_delay > 0) {
1065 uwsgi_log("worker respawning too fast !!! i have to sleep a bit (%d seconds)...\n", uwsgi.forkbomb_delay);
1066 /* use --forkbomb-delay 0 to disable sleeping */
1067 sleep(uwsgi.forkbomb_delay);
1068 }
1069 last_respawn_rate = 0;
1070 }
1071 }
1072 else {
1073 last_respawn_rate = 0;
1074 }
1075 gettimeofday(&last_respawn, NULL);
1076 uwsgi.respawn_delta = last_respawn.tv_sec;
1077
1078 // are we chain reloading it ?
1079 if (uwsgi.status.chain_reloading == thewid) {
1080 uwsgi.status.chain_reloading++;
1081 }
1082
1083 // respawn the worker (if needed)
1084 if (uwsgi_respawn_worker(thewid))
1085 return 0;
1086
1087 // end of the loop
1088 }
1089
1090 // never here
1091 }
1092
1093 void uwsgi_reload_workers() {
1094 int i;
1095 uwsgi_block_signal(SIGHUP);
1096 for (i = 1; i <= uwsgi.numproc; i++) {
1097 if (uwsgi.workers[i].pid > 0) {
1098 uwsgi_curse(i, SIGHUP);
1099 }
1100 }
1101 uwsgi_unblock_signal(SIGHUP);
1102 }
1103
1104 void uwsgi_reload_mules() {
1105 int i;
1106
1107 uwsgi_block_signal(SIGHUP);
1108 for (i = 0; i <= uwsgi.mules_cnt; i++) {
1109 if (uwsgi.mules[i].pid > 0) {
1110 uwsgi_curse_mule(i, SIGHUP);
1111 }
1112 }
1113 uwsgi_unblock_signal(SIGHUP);
1114 }
1115
1116 void uwsgi_reload_spoolers() {
1117 struct uwsgi_spooler *us;
1118
1119 uwsgi_block_signal(SIGHUP);
1120 for (us = uwsgi.spoolers; us; us = us->next) {
1121 if (us->pid > 0) {
1122 kill(us->pid, SIGHUP);
1123 us->cursed_at = uwsgi_now();
1124 us->no_mercy_at = us->cursed_at + uwsgi.spooler_reload_mercy;
1125 }
1126 }
1127 uwsgi_unblock_signal(SIGHUP);
1128 }
1129
1130 void uwsgi_chain_reload() {
1131 if (!uwsgi.status.chain_reloading) {
1132 uwsgi_log_verbose("chain reload starting...\n");
1133 uwsgi.status.chain_reloading = 1;
1134 }
1135 else {
1136 uwsgi_log_verbose("chain reload already running...\n");
1137 }
1138 }
1139
1140 void uwsgi_brutally_reload_workers() {
1141 int i;
1142 for (i = 1; i <= uwsgi.numproc; i++) {
1143 if (uwsgi.workers[i].pid > 0) {
1144 uwsgi_log_verbose("killing worker %d (pid: %d)\n", i, (int) uwsgi.workers[i].pid);
1145 uwsgi_curse(i, SIGINT);
1146 }
1147 }
1148 }
1149