1 #include "uwsgi.h"
2
3 extern struct uwsgi_server uwsgi;
4
5 // check if all of the workers are dead and exit uWSGI
uwsgi_master_check_death()6 void uwsgi_master_check_death() {
7 if (uwsgi_instance_is_dying) {
8 int i;
9 for(i=1;i<=uwsgi.numproc;i++) {
10 if (uwsgi.workers[i].pid > 0) {
11 return;
12 }
13 }
14 for(i=0;i<uwsgi.mules_cnt;i++) {
15 if (uwsgi.mules[i].pid > 0) {
16 return;
17 }
18 }
19 uwsgi_log("goodbye to uWSGI.\n");
20 exit(uwsgi.status.dying_for_need_app ? UWSGI_FAILED_APP_CODE : 0);
21 }
22 }
23
24 // check if all of the workers are dead, and trigger a reload
uwsgi_master_check_reload(char ** argv)25 int uwsgi_master_check_reload(char **argv) {
26 if (uwsgi_instance_is_reloading) {
27 int i;
28 for(i=1;i<=uwsgi.numproc;i++) {
29 if (uwsgi.workers[i].pid > 0) {
30 return 0;
31 }
32 }
33 for(i=0;i<uwsgi.mules_cnt;i++) {
34 if (uwsgi.mules[i].pid > 0) {
35 return 0;
36 }
37 }
38 uwsgi_reload(argv);
39 // never here (unless in shared library mode)
40 return -1;
41 }
42 return 0;
43 }
44
45 // check for chain reload
uwsgi_master_check_chain()46 void uwsgi_master_check_chain() {
47 static time_t last_check = 0;
48
49 if (!uwsgi.status.chain_reloading) return;
50
51 // we need to ensure the previous worker (if alive) is accepting new requests
52 // before going on
53 if (uwsgi.status.chain_reloading > 1) {
54 struct uwsgi_worker *previous_worker = &uwsgi.workers[uwsgi.status.chain_reloading-1];
55 // is the previous worker alive ?
56 if (previous_worker->pid > 0 && !previous_worker->cheaped) {
57 // the worker has been respawned but it is still not ready
58 if (previous_worker->accepting == 0) {
59 time_t now = uwsgi_now();
60 if (now != last_check) {
61 uwsgi_log_verbose("chain is still waiting for worker %d...\n", uwsgi.status.chain_reloading-1);
62 last_check = now;
63 }
64 return;
65 }
66 }
67 }
68
69 // if all the processes are recycled, the chain is over
70 if (uwsgi.status.chain_reloading > uwsgi.numproc) {
71 uwsgi.status.chain_reloading = 0;
72 uwsgi_log_verbose("chain reloading complete\n");
73 return;
74 }
75
76 uwsgi_block_signal(SIGHUP);
77 int i;
78 for(i=uwsgi.status.chain_reloading;i<=uwsgi.numproc;i++) {
79 struct uwsgi_worker *uw = &uwsgi.workers[i];
80 if (uw->pid > 0 && !uw->cheaped && uw->accepting) {
81 // the worker could have been already cursed
82 if (uw->cursed_at == 0) {
83 uwsgi_log_verbose("chain next victim is worker %d\n", i);
84 uwsgi_curse(i, SIGHUP);
85 }
86 break;
87 }
88 else {
89 uwsgi.status.chain_reloading++;
90 }
91 }
92 uwsgi_unblock_signal(SIGHUP);
93 }
94
95
96 // special function for assuming all of the workers are dead
uwsgi_master_commit_status()97 void uwsgi_master_commit_status() {
98 int i;
99 for(i=1;i<=uwsgi.numproc;i++) {
100 uwsgi.workers[i].pid = 0;
101 }
102 }
103
uwsgi_master_check_idle()104 void uwsgi_master_check_idle() {
105
106 static time_t last_request_timecheck = 0;
107 static uint64_t last_request_count = 0;
108 int i;
109 int waitpid_status;
110
111 if (!uwsgi.idle || uwsgi.status.is_cheap)
112 return;
113
114 uwsgi.current_time = uwsgi_now();
115 if (!last_request_timecheck)
116 last_request_timecheck = uwsgi.current_time;
117
118 // security check, stop the check if there are busy workers
119 for (i = 1; i <= uwsgi.numproc; i++) {
120 if (uwsgi.workers[i].cheaped == 0 && uwsgi.workers[i].pid > 0) {
121 if (uwsgi_worker_is_busy(i)) {
122 return;
123 }
124 }
125 }
126
127 if (last_request_count != uwsgi.workers[0].requests) {
128 last_request_timecheck = uwsgi.current_time;
129 last_request_count = uwsgi.workers[0].requests;
130 }
131 // a bit of over-engeneering to avoid clock skews
132 else if (last_request_timecheck < uwsgi.current_time && (uwsgi.current_time - last_request_timecheck > uwsgi.idle)) {
133 uwsgi_log("workers have been inactive for more than %d seconds (%llu-%llu)\n", uwsgi.idle, (unsigned long long) uwsgi.current_time, (unsigned long long) last_request_timecheck);
134 uwsgi.status.is_cheap = 1;
135 if (uwsgi.die_on_idle) {
136 if (uwsgi.has_emperor) {
137 char byte = 22;
138 if (write(uwsgi.emperor_fd, &byte, 1) != 1) {
139 uwsgi_error("write()");
140 kill_them_all(0);
141 }
142 }
143 else {
144 kill_them_all(0);
145 }
146 return;
147 }
148 for (i = 1; i <= uwsgi.numproc; i++) {
149 uwsgi.workers[i].cheaped = 1;
150 if (uwsgi.workers[i].pid == 0)
151 continue;
152 // first send SIGINT
153 kill(uwsgi.workers[i].pid, SIGINT);
154 // and start waiting upto 3 seconds
155 int j;
156 for(j=0;j<3;j++) {
157 sleep(1);
158 int ret = waitpid(uwsgi.workers[i].pid, &waitpid_status, WNOHANG);
159 if (ret == 0) continue;
160 if (ret == (int) uwsgi.workers[i].pid) goto done;
161 // on error, directly send SIGKILL
162 break;
163 }
164 kill(uwsgi.workers[i].pid, SIGKILL);
165 if (waitpid(uwsgi.workers[i].pid, &waitpid_status, 0) < 0) {
166 if (errno != ECHILD)
167 uwsgi_error("uwsgi_master_check_idle()/waitpid()");
168 }
169 else {
170 done:
171 uwsgi.workers[i].pid = 0;
172 uwsgi.workers[i].rss_size = 0;
173 uwsgi.workers[i].vsz_size = 0;
174 }
175 }
176 uwsgi_add_sockets_to_queue(uwsgi.master_queue, -1);
177 uwsgi_log("cheap mode enabled: waiting for socket connection...\n");
178 last_request_timecheck = 0;
179 }
180
181 }
182
uwsgi_master_check_workers_deadline()183 int uwsgi_master_check_workers_deadline() {
184 int i;
185 int ret = 0;
186 for (i = 1; i <= uwsgi.numproc; i++) {
187 /* first check for harakiri */
188 if (uwsgi.workers[i].harakiri > 0) {
189 if (uwsgi.workers[i].harakiri < (time_t) uwsgi.current_time) {
190 trigger_harakiri(i);
191 ret = 1;
192 }
193 }
194 /* then user-defined harakiri */
195 if (uwsgi.workers[i].user_harakiri > 0) {
196 if (uwsgi.workers[i].user_harakiri < (time_t) uwsgi.current_time) {
197 trigger_harakiri(i);
198 ret = 1;
199 }
200 }
201 // then for evil memory checkers
202 if (uwsgi.evil_reload_on_as) {
203 if ((rlim_t) uwsgi.workers[i].vsz_size >= uwsgi.evil_reload_on_as) {
204 uwsgi_log("*** EVIL RELOAD ON WORKER %d ADDRESS SPACE: %lld (pid: %d) ***\n", i, (long long) uwsgi.workers[i].vsz_size, uwsgi.workers[i].pid);
205 kill(uwsgi.workers[i].pid, SIGKILL);
206 uwsgi.workers[i].vsz_size = 0;
207 ret = 1;
208 }
209 }
210 if (uwsgi.evil_reload_on_rss) {
211 if ((rlim_t) uwsgi.workers[i].rss_size >= uwsgi.evil_reload_on_rss) {
212 uwsgi_log("*** EVIL RELOAD ON WORKER %d RSS: %lld (pid: %d) ***\n", i, (long long) uwsgi.workers[i].rss_size, uwsgi.workers[i].pid);
213 kill(uwsgi.workers[i].pid, SIGKILL);
214 uwsgi.workers[i].rss_size = 0;
215 ret = 1;
216 }
217 }
218 // check if worker was running longer than allowed lifetime
219 if (uwsgi.workers[i].pid > 0 && uwsgi.workers[i].cheaped == 0 && uwsgi.max_worker_lifetime > 0) {
220 uint64_t lifetime = uwsgi_now() - uwsgi.workers[i].last_spawn;
221 if (lifetime > (uwsgi.max_worker_lifetime + (i-1) * uwsgi.max_worker_lifetime_delta) && uwsgi.workers[i].manage_next_request == 1) {
222 uwsgi_log("worker %d lifetime reached, it was running for %llu second(s)\n", i, (unsigned long long) lifetime);
223 uwsgi.workers[i].manage_next_request = 0;
224 kill(uwsgi.workers[i].pid, SIGWINCH);
225 ret = 1;
226 }
227 }
228
229 // need to find a better way
230 //uwsgi.workers[i].last_running_time = uwsgi.workers[i].running_time;
231 }
232
233
234 return ret;
235 }
236
237
uwsgi_master_check_gateways_deadline()238 int uwsgi_master_check_gateways_deadline() {
239
240 int i;
241 int ret = 0;
242 for (i = 0; i < ushared->gateways_cnt; i++) {
243 if (ushared->gateways_harakiri[i] > 0) {
244 if (ushared->gateways_harakiri[i] < (time_t) uwsgi.current_time) {
245 if (ushared->gateways[i].pid > 0) {
246 uwsgi_log("*** HARAKIRI ON GATEWAY %s %d (pid: %d) ***\n", ushared->gateways[i].name, ushared->gateways[i].num, ushared->gateways[i].pid);
247 kill(ushared->gateways[i].pid, SIGKILL);
248 ret = 1;
249 }
250 ushared->gateways_harakiri[i] = 0;
251 }
252 }
253 }
254 return ret;
255 }
256
uwsgi_master_check_mules_deadline()257 int uwsgi_master_check_mules_deadline() {
258 int i;
259 int ret = 0;
260
261 for (i = 0; i < uwsgi.mules_cnt; i++) {
262 if (uwsgi.mules[i].harakiri > 0) {
263 if (uwsgi.mules[i].harakiri < (time_t) uwsgi.current_time) {
264 uwsgi_log("*** HARAKIRI ON MULE %d HANDLING SIGNAL %d (pid: %d) ***\n", i + 1, uwsgi.mules[i].signum, uwsgi.mules[i].pid);
265 kill(uwsgi.mules[i].pid, SIGKILL);
266 uwsgi.mules[i].harakiri = 0;
267 ret = 1;
268 }
269 }
270 // user harakiri
271 if (uwsgi.mules[i].user_harakiri > 0) {
272 if (uwsgi.mules[i].user_harakiri < (time_t) uwsgi.current_time) {
273 uwsgi_log("*** HARAKIRI ON MULE %d (pid: %d) ***\n", i + 1, uwsgi.mules[i].pid);
274 kill(uwsgi.mules[i].pid, SIGKILL);
275 uwsgi.mules[i].user_harakiri = 0;
276 ret = 1;
277 }
278 }
279 }
280 return ret;
281 }
282
uwsgi_master_check_spoolers_deadline()283 int uwsgi_master_check_spoolers_deadline() {
284 int ret = 0;
285 struct uwsgi_spooler *uspool = uwsgi.spoolers;
286 while (uspool) {
287 if (uspool->harakiri > 0 && uspool->harakiri < (time_t) uwsgi.current_time) {
288 uwsgi_log("*** HARAKIRI ON THE SPOOLER (pid: %d) ***\n", uspool->pid);
289 kill(uspool->pid, SIGKILL);
290 uspool->harakiri = 0;
291 ret = 1;
292 }
293 if (uspool->user_harakiri > 0 && uspool->user_harakiri < (time_t) uwsgi.current_time) {
294 uwsgi_log("*** HARAKIRI ON THE SPOOLER (pid: %d) ***\n", uspool->pid);
295 kill(uspool->pid, SIGKILL);
296 uspool->user_harakiri = 0;
297 ret = 1;
298 }
299 uspool = uspool->next;
300 }
301 return ret;
302 }
303
304
uwsgi_master_check_spoolers_death(int diedpid)305 int uwsgi_master_check_spoolers_death(int diedpid) {
306
307 struct uwsgi_spooler *uspool = uwsgi.spoolers;
308
309 while (uspool) {
310 if (uspool->pid > 0 && diedpid == uspool->pid) {
311 if (uspool->cursed_at) {
312 uspool->pid = 0;
313 uspool->cursed_at = 0;
314 uspool->no_mercy_at = 0;
315 }
316 uwsgi_log("OOOPS the spooler is no more...trying respawn...\n");
317 uspool->respawned++;
318 uspool->pid = spooler_start(uspool);
319 return -1;
320 }
321 uspool = uspool->next;
322 }
323 return 0;
324 }
325
uwsgi_master_check_emperor_death(int diedpid)326 int uwsgi_master_check_emperor_death(int diedpid) {
327 if (uwsgi.emperor_pid >= 0 && diedpid == uwsgi.emperor_pid) {
328 uwsgi_log_verbose("!!! Emperor died !!!\n");
329 uwsgi_emperor_start();
330 return -1;
331 }
332 return 0;
333 }
334
uwsgi_master_check_mules_death(int diedpid)335 int uwsgi_master_check_mules_death(int diedpid) {
336 int i;
337 for (i = 0; i < uwsgi.mules_cnt; i++) {
338 if (!(uwsgi.mules[i].pid == diedpid)) continue;
339 if (!uwsgi.mules[i].cursed_at) {
340 uwsgi_log("OOOPS mule %d (pid: %d) crippled...trying respawn...\n", i + 1, uwsgi.mules[i].pid);
341 }
342 uwsgi.mules[i].no_mercy_at = 0;
343 uwsgi.mules[i].cursed_at = 0;
344 uwsgi_mule(i + 1);
345 return -1;
346 }
347 return 0;
348 }
349
uwsgi_master_check_gateways_death(int diedpid)350 int uwsgi_master_check_gateways_death(int diedpid) {
351 int i;
352 for (i = 0; i < ushared->gateways_cnt; i++) {
353 if (ushared->gateways[i].pid == diedpid) {
354 gateway_respawn(i);
355 return -1;
356 }
357 }
358 return 0;
359 }
360
uwsgi_master_check_daemons_death(int diedpid)361 int uwsgi_master_check_daemons_death(int diedpid) {
362 /* reload the daemons */
363 if (uwsgi_daemon_check_pid_reload(diedpid)) {
364 return -1;
365 }
366 return 0;
367 }
368
uwsgi_worker_is_busy(int wid)369 int uwsgi_worker_is_busy(int wid) {
370 int i;
371 if (uwsgi.workers[wid].sig) return 1;
372 for(i=0;i<uwsgi.cores;i++) {
373 if (uwsgi.workers[wid].cores[i].in_request) {
374 return 1;
375 }
376 }
377 return 0;
378 }
379
uwsgi_master_check_cron_death(int diedpid)380 int uwsgi_master_check_cron_death(int diedpid) {
381 struct uwsgi_cron *uc = uwsgi.crons;
382 while (uc) {
383 if (uc->pid == (pid_t) diedpid) {
384 uwsgi_log("[uwsgi-cron] command \"%s\" running with pid %d exited after %d second(s)\n", uc->command, uc->pid, uwsgi_now() - uc->started_at);
385 uc->pid = -1;
386 uc->started_at = 0;
387 return -1;
388 }
389 uc = uc->next;
390 }
391 return 0;
392 }
393
uwsgi_master_check_crons_deadline()394 int uwsgi_master_check_crons_deadline() {
395 int ret = 0;
396 struct uwsgi_cron *uc = uwsgi.crons;
397 while (uc) {
398 if (uc->pid >= 0 && uc->harakiri > 0 && uc->harakiri < (time_t) uwsgi.current_time) {
399 uwsgi_log("*** HARAKIRI ON CRON \"%s\" (pid: %d) ***\n", uc->command, uc->pid);
400 kill(-uc->pid, SIGKILL);
401 ret = 1;
402 }
403 uc = uc->next;
404 }
405 return ret;
406 }
407
uwsgi_master_check_mountpoints()408 void uwsgi_master_check_mountpoints() {
409 struct uwsgi_string_list *usl;
410 uwsgi_foreach(usl, uwsgi.mountpoints_check) {
411 if (uwsgi_check_mountpoint(usl->value)) {
412 uwsgi_log_verbose("mountpoint %s failed, triggering detonation...\n", usl->value);
413 uwsgi_nuclear_blast();
414 //never here
415 exit(1);
416 }
417 }
418 }
419