1 #include "uwsgi.h"
2
3 extern struct uwsgi_server uwsgi;
4
uwsgi_register_lock(char * id,int rw)5 static struct uwsgi_lock_item *uwsgi_register_lock(char *id, int rw) {
6
7 struct uwsgi_lock_item *uli = uwsgi.registered_locks;
8 if (!uli) {
9 uwsgi.registered_locks = uwsgi_malloc_shared(sizeof(struct uwsgi_lock_item));
10 uwsgi.registered_locks->id = id;
11 uwsgi.registered_locks->pid = 0;
12 if (rw) {
13 uwsgi.registered_locks->lock_ptr = uwsgi_malloc_shared(uwsgi.rwlock_size);
14 }
15 else {
16 uwsgi.registered_locks->lock_ptr = uwsgi_malloc_shared(uwsgi.lock_size);
17 }
18 uwsgi.registered_locks->rw = rw;
19 uwsgi.registered_locks->next = NULL;
20 return uwsgi.registered_locks;
21 }
22
23 while (uli) {
24 if (!uli->next) {
25 uli->next = uwsgi_malloc_shared(sizeof(struct uwsgi_lock_item));
26 if (rw) {
27 uli->next->lock_ptr = uwsgi_malloc_shared(uwsgi.rwlock_size);
28 }
29 else {
30 uli->next->lock_ptr = uwsgi_malloc_shared(uwsgi.lock_size);
31 }
32 uli->next->id = id;
33 uli->next->pid = 0;
34 uli->next->rw = rw;
35 uli->next->next = NULL;
36 return uli->next;
37 }
38 uli = uli->next;
39 }
40
41 uwsgi_log("*** DANGER: unable to allocate lock %s ***\n", id);
42 exit(1);
43
44 }
45
46 #ifdef UWSGI_LOCK_USE_MUTEX
47
48 #ifdef OBSOLETE_LINUX_KERNEL
49 #undef EOWNERDEAD
50 #endif
51
52 #ifdef EOWNERDEAD
53 #define UWSGI_LOCK_ENGINE_NAME "pthread robust mutexes"
54 int uwsgi_pthread_robust_mutexes_enabled = 1;
55 #else
56 #define UWSGI_LOCK_ENGINE_NAME "pthread mutexes"
57 #endif
58
59 #define UWSGI_LOCK_SIZE sizeof(pthread_mutex_t)
60
61 #ifdef OBSOLETE_LINUX_KERNEL
62 #define UWSGI_RWLOCK_SIZE sizeof(pthread_mutex_t)
63 #else
64 #define UWSGI_RWLOCK_SIZE sizeof(pthread_rwlock_t)
65 #endif
66
67 #ifndef PTHREAD_PRIO_INHERIT
68 int pthread_mutexattr_setprotocol (pthread_mutexattr_t *__attr,
69 int __protocol);
70 #define PTHREAD_PRIO_INHERIT 1
71 #endif
72
73 // REMEMBER lock must contains space for both pthread_mutex_t and pthread_mutexattr_t !!!
uwsgi_lock_fast_init(char * id)74 struct uwsgi_lock_item *uwsgi_lock_fast_init(char *id) {
75
76 pthread_mutexattr_t attr;
77
78 struct uwsgi_lock_item *uli = uwsgi_register_lock(id, 0);
79
80 #ifdef EOWNERDEAD
81 retry:
82 #endif
83 if (pthread_mutexattr_init(&attr)) {
84 uwsgi_log("unable to allocate mutexattr structure\n");
85 exit(1);
86 }
87
88 if (pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED)) {
89 uwsgi_log("unable to share mutex\n");
90 exit(1);
91 }
92
93 #ifdef EOWNERDEAD
94 #ifndef PTHREAD_MUTEX_ROBUST
95 #define PTHREAD_MUTEX_ROBUST PTHREAD_MUTEX_ROBUST_NP
96 #define pthread_mutexattr_setrobust pthread_mutexattr_setrobust_np
97 #define pthread_mutex_consistent pthread_mutex_consistent_np
98 #endif
99 if (uwsgi_pthread_robust_mutexes_enabled) {
100 int ret;
101 if ((ret = pthread_mutexattr_setprotocol(&attr, PTHREAD_PRIO_INHERIT)) != 0) {
102 switch (ret) {
103 case ENOTSUP:
104 // PTHREAD_PRIO_INHERIT will only prevent
105 // priority inversion when SCHED_FIFO or
106 // SCHED_RR is used, so this is non-fatal and
107 // also currently unsupported on musl.
108 break;
109 default:
110 uwsgi_log("unable to set PTHREAD_PRIO_INHERIT\n");
111 exit(1);
112 }
113 }
114 if (pthread_mutexattr_setrobust(&attr, PTHREAD_MUTEX_ROBUST)) {
115 uwsgi_log("unable to make the mutex 'robust'\n");
116 exit(1);
117 }
118 }
119 #endif
120
121 if (pthread_mutex_init((pthread_mutex_t *) uli->lock_ptr, &attr)) {
122 #ifdef EOWNERDEAD
123 if (uwsgi_pthread_robust_mutexes_enabled) {
124 uwsgi_log("!!! it looks like your kernel does not support pthread robust mutexes !!!\n");
125 uwsgi_log("!!! falling back to standard pthread mutexes !!!\n");
126 uwsgi_pthread_robust_mutexes_enabled = 0;
127 pthread_mutexattr_destroy(&attr);
128 goto retry;
129 }
130 #endif
131 uwsgi_log("unable to initialize mutex\n");
132 exit(1);
133 }
134
135 pthread_mutexattr_destroy(&attr);
136
137 #ifdef EOWNERDEAD
138 if (!uwsgi_pthread_robust_mutexes_enabled) {
139 uli->can_deadlock = 1;
140 }
141 #else
142 uli->can_deadlock = 1;
143 #endif
144
145 return uli;
146 }
147
uwsgi_lock_fast_check(struct uwsgi_lock_item * uli)148 pid_t uwsgi_lock_fast_check(struct uwsgi_lock_item * uli) {
149
150 if (pthread_mutex_trylock((pthread_mutex_t *) uli->lock_ptr) == 0) {
151 pthread_mutex_unlock((pthread_mutex_t *) uli->lock_ptr);
152 return 0;
153 }
154 return uli->pid;
155 }
156
uwsgi_rwlock_fast_check(struct uwsgi_lock_item * uli)157 pid_t uwsgi_rwlock_fast_check(struct uwsgi_lock_item * uli) {
158 #ifdef OBSOLETE_LINUX_KERNEL
159 return uwsgi_lock_fast_check(uli);
160 #else
161
162 if (pthread_rwlock_trywrlock((pthread_rwlock_t *) uli->lock_ptr) == 0) {
163 pthread_rwlock_unlock((pthread_rwlock_t *) uli->lock_ptr);
164 return 0;
165 }
166 return uli->pid;
167 #endif
168 }
169
170
uwsgi_lock_fast(struct uwsgi_lock_item * uli)171 void uwsgi_lock_fast(struct uwsgi_lock_item *uli) {
172
173 #ifdef EOWNERDEAD
174 if (pthread_mutex_lock((pthread_mutex_t *) uli->lock_ptr) == EOWNERDEAD) {
175 uwsgi_log("[deadlock-detector] a process holding a robust mutex died. recovering...\n");
176 pthread_mutex_consistent((pthread_mutex_t *) uli->lock_ptr);
177 }
178 #else
179 pthread_mutex_lock((pthread_mutex_t *) uli->lock_ptr);
180 #endif
181 uli->pid = uwsgi.mypid;
182 }
183
uwsgi_unlock_fast(struct uwsgi_lock_item * uli)184 void uwsgi_unlock_fast(struct uwsgi_lock_item *uli) {
185
186 pthread_mutex_unlock((pthread_mutex_t *) uli->lock_ptr);
187 uli->pid = 0;
188
189 }
190
uwsgi_rlock_fast(struct uwsgi_lock_item * uli)191 void uwsgi_rlock_fast(struct uwsgi_lock_item *uli) {
192 #ifdef OBSOLETE_LINUX_KERNEL
193 uwsgi_lock_fast(uli);
194 #else
195 pthread_rwlock_rdlock((pthread_rwlock_t *) uli->lock_ptr);
196 uli->pid = uwsgi.mypid;
197 #endif
198 }
199
uwsgi_wlock_fast(struct uwsgi_lock_item * uli)200 void uwsgi_wlock_fast(struct uwsgi_lock_item *uli) {
201 #ifdef OBSOLETE_LINUX_KERNEL
202 uwsgi_lock_fast(uli);
203 #else
204 pthread_rwlock_wrlock((pthread_rwlock_t *) uli->lock_ptr);
205 uli->pid = uwsgi.mypid;
206 #endif
207 }
208
uwsgi_rwunlock_fast(struct uwsgi_lock_item * uli)209 void uwsgi_rwunlock_fast(struct uwsgi_lock_item *uli) {
210 #ifdef OBSOLETE_LINUX_KERNEL
211 uwsgi_unlock_fast(uli);
212 #else
213 pthread_rwlock_unlock((pthread_rwlock_t *) uli->lock_ptr);
214 uli->pid = 0;
215 #endif
216 }
217
uwsgi_rwlock_fast_init(char * id)218 struct uwsgi_lock_item *uwsgi_rwlock_fast_init(char *id) {
219
220 #ifdef OBSOLETE_LINUX_KERNEL
221 return uwsgi_lock_fast_init(id);
222 #else
223
224 pthread_rwlockattr_t attr;
225
226 struct uwsgi_lock_item *uli = uwsgi_register_lock(id, 1);
227
228 if (pthread_rwlockattr_init(&attr)) {
229 uwsgi_log("unable to allocate rwlock structure\n");
230 exit(1);
231 }
232
233 if (pthread_rwlockattr_setpshared(&attr, PTHREAD_PROCESS_SHARED)) {
234 uwsgi_log("unable to share rwlock\n");
235 exit(1);
236 }
237
238 if (pthread_rwlock_init((pthread_rwlock_t *) uli->lock_ptr, &attr)) {
239 uwsgi_log("unable to initialize rwlock\n");
240 exit(1);
241 }
242
243 pthread_rwlockattr_destroy(&attr);
244
245 uli->can_deadlock = 1;
246
247 return uli;
248 #endif
249
250
251
252 }
253
254
255
256 #elif defined(UWSGI_LOCK_USE_UMTX)
257
258 /* Warning: FreeBSD is still not ready for process-shared UMTX */
259
260 #include <machine/atomic.h>
261 #include <sys/umtx.h>
262
263 #define UWSGI_LOCK_SIZE sizeof(struct umtx)
264 #define UWSGI_RWLOCK_SIZE sizeof(struct umtx)
265 #define UWSGI_LOCK_ENGINE_NAME "FreeBSD umtx"
266
uwsgi_rwlock_fast_init(char * id)267 struct uwsgi_lock_item *uwsgi_rwlock_fast_init(char *id) {
268 return uwsgi_lock_fast_init(id);
269 }
uwsgi_rlock_fast(struct uwsgi_lock_item * uli)270 void uwsgi_rlock_fast(struct uwsgi_lock_item *uli) {
271 uwsgi_lock_fast(uli);
272 }
uwsgi_wlock_fast(struct uwsgi_lock_item * uli)273 void uwsgi_wlock_fast(struct uwsgi_lock_item *uli) {
274 uwsgi_lock_fast(uli);
275 }
uwsgi_rwunlock_fast(struct uwsgi_lock_item * uli)276 void uwsgi_rwunlock_fast(struct uwsgi_lock_item *uli) {
277 uwsgi_unlock_fast(uli);
278 }
279
uwsgi_lock_fast_init(char * id)280 struct uwsgi_lock_item *uwsgi_lock_fast_init(char *id) {
281 struct uwsgi_lock_item *uli = uwsgi_register_lock(id, 0);
282 umtx_init((struct umtx *) uli->lock_ptr);
283 return uli;
284 }
285
uwsgi_lock_fast(struct uwsgi_lock_item * uli)286 void uwsgi_lock_fast(struct uwsgi_lock_item *uli) {
287 umtx_lock((struct umtx *) uli->lock_ptr, (u_long) getpid());
288 uli->pid = uwsgi.mypid;
289 }
290
uwsgi_unlock_fast(struct uwsgi_lock_item * uli)291 void uwsgi_unlock_fast(struct uwsgi_lock_item *uli) {
292 umtx_unlock((struct umtx *) uli->lock_ptr, (u_long) getpid());
293 uli->pid = 0;
294 }
295
uwsgi_lock_fast_check(struct uwsgi_lock_item * uli)296 pid_t uwsgi_lock_fast_check(struct uwsgi_lock_item *uli) {
297 if (umtx_trylock((struct umtx *) uli->lock_ptr, (u_long) getpid())) {
298 umtx_unlock((struct umtx *) uli->lock_ptr, (u_long) getpid());
299 return 0;
300 }
301 return uli->pid;
302 }
303
uwsgi_rwlock_fast_check(struct uwsgi_lock_item * uli)304 pid_t uwsgi_rwlock_fast_check(struct uwsgi_lock_item * uli) {
305 return uwsgi_lock_fast_check(uli);
306 }
307
308 #elif defined(UWSGI_LOCK_USE_POSIX_SEM)
309
310 #define UWSGI_LOCK_SIZE sizeof(sem_t)
311 #define UWSGI_RWLOCK_SIZE sizeof(sem_t)
312 #define UWSGI_LOCK_ENGINE_NAME "POSIX semaphores"
313
314 #include <semaphore.h>
315
uwsgi_lock_fast_init(char * id)316 struct uwsgi_lock_item *uwsgi_lock_fast_init(char *id) {
317 struct uwsgi_lock_item *uli = uwsgi_register_lock(id, 0);
318 sem_init((sem_t *) uli->lock_ptr, 1, 1);
319 uli->can_deadlock = 1;
320 return uli;
321 }
322
uwsgi_rwlock_fast_init(char * id)323 struct uwsgi_lock_item *uwsgi_rwlock_fast_init(char *id) {
324 return uwsgi_lock_fast_init(id);
325 }
326
uwsgi_lock_fast(struct uwsgi_lock_item * uli)327 void uwsgi_lock_fast(struct uwsgi_lock_item *uli) {
328 sem_wait((sem_t *) uli->lock_ptr);
329 uli->pid = uwsgi.mypid;
330 }
331
uwsgi_unlock_fast(struct uwsgi_lock_item * uli)332 void uwsgi_unlock_fast(struct uwsgi_lock_item *uli) {
333 sem_post((sem_t *) uli->lock_ptr);
334 uli->pid = 0;
335 }
336
uwsgi_lock_fast_check(struct uwsgi_lock_item * uli)337 pid_t uwsgi_lock_fast_check(struct uwsgi_lock_item *uli) {
338 if (sem_trywait((sem_t *) uli->lock_ptr) == 0) {
339 sem_post((sem_t *) uli->lock_ptr);
340 return 0;
341 }
342 return uli->pid;
343 }
344
uwsgi_rwlock_fast_check(struct uwsgi_lock_item * uli)345 pid_t uwsgi_rwlock_fast_check(struct uwsgi_lock_item * uli) {
346 return uwsgi_lock_fast_check(uli);
347 }
uwsgi_rlock_fast(struct uwsgi_lock_item * uli)348 void uwsgi_rlock_fast(struct uwsgi_lock_item *uli) {
349 uwsgi_lock_fast(uli);
350 }
uwsgi_wlock_fast(struct uwsgi_lock_item * uli)351 void uwsgi_wlock_fast(struct uwsgi_lock_item *uli) {
352 uwsgi_lock_fast(uli);
353 }
uwsgi_rwunlock_fast(struct uwsgi_lock_item * uli)354 void uwsgi_rwunlock_fast(struct uwsgi_lock_item *uli) {
355 uwsgi_unlock_fast(uli);
356 }
357
358
359 #elif defined(UWSGI_LOCK_USE_OSX_SPINLOCK)
360
361 #define UWSGI_LOCK_ENGINE_NAME "OSX spinlocks"
362 #define UWSGI_LOCK_SIZE sizeof(OSSpinLock)
363 #define UWSGI_RWLOCK_SIZE sizeof(OSSpinLock)
364
365
uwsgi_lock_fast_init(char * id)366 struct uwsgi_lock_item *uwsgi_lock_fast_init(char *id) {
367
368 struct uwsgi_lock_item *uli = uwsgi_register_lock(id, 0);
369 memset(uli->lock_ptr, 0, UWSGI_LOCK_SIZE);
370 uli->can_deadlock = 1;
371 return uli;
372 }
373
uwsgi_lock_fast(struct uwsgi_lock_item * uli)374 void uwsgi_lock_fast(struct uwsgi_lock_item *uli) {
375
376 OSSpinLockLock((OSSpinLock *) uli->lock_ptr);
377 uli->pid = uwsgi.mypid;
378 }
379
uwsgi_unlock_fast(struct uwsgi_lock_item * uli)380 void uwsgi_unlock_fast(struct uwsgi_lock_item *uli) {
381
382 OSSpinLockUnlock((OSSpinLock *) uli->lock_ptr);
383 uli->pid = 0;
384 }
385
uwsgi_lock_fast_check(struct uwsgi_lock_item * uli)386 pid_t uwsgi_lock_fast_check(struct uwsgi_lock_item *uli) {
387 if (OSSpinLockTry((OSSpinLock *) uli->lock_ptr)) {
388 OSSpinLockUnlock((OSSpinLock *) uli->lock_ptr);
389 return 0;
390 }
391 return uli->pid;
392 }
393
uwsgi_rwlock_fast_init(char * id)394 struct uwsgi_lock_item *uwsgi_rwlock_fast_init(char *id) {
395 struct uwsgi_lock_item *uli = uwsgi_register_lock(id, 1);
396 memset(uli->lock_ptr, 0, UWSGI_LOCK_SIZE);
397 uli->can_deadlock = 1;
398 return uli;
399 }
400
uwsgi_rlock_fast(struct uwsgi_lock_item * uli)401 void uwsgi_rlock_fast(struct uwsgi_lock_item *uli) {
402 uwsgi_lock_fast(uli);
403 }
uwsgi_wlock_fast(struct uwsgi_lock_item * uli)404 void uwsgi_wlock_fast(struct uwsgi_lock_item *uli) {
405 uwsgi_lock_fast(uli);
406 }
407
uwsgi_rwlock_fast_check(struct uwsgi_lock_item * uli)408 pid_t uwsgi_rwlock_fast_check(struct uwsgi_lock_item *uli) {
409 return uwsgi_lock_fast_check(uli);
410 }
411
uwsgi_rwunlock_fast(struct uwsgi_lock_item * uli)412 void uwsgi_rwunlock_fast(struct uwsgi_lock_item *uli) {
413 uwsgi_unlock_fast(uli);
414 }
415
416 #elif defined(UWSGI_LOCK_USE_WINDOWS_MUTEX)
417
418 #define UWSGI_LOCK_ENGINE_NAME "windows mutexes"
419 #define UWSGI_LOCK_SIZE sizeof(HANDLE)
420 #define UWSGI_RWLOCK_SIZE sizeof(HANDLE)
421
422
uwsgi_lock_fast_init(char * id)423 struct uwsgi_lock_item *uwsgi_lock_fast_init(char *id) {
424
425 struct uwsgi_lock_item *uli = uwsgi_register_lock(id, 0);
426 struct _SECURITY_ATTRIBUTES sa;
427 memset(&sa, 0, sizeof(struct _SECURITY_ATTRIBUTES));
428 sa.bInheritHandle = 1;
429 uli->lock_ptr = CreateMutex(&sa, FALSE, NULL);
430 return uli;
431 }
432
uwsgi_lock_fast(struct uwsgi_lock_item * uli)433 void uwsgi_lock_fast(struct uwsgi_lock_item *uli) {
434 WaitForSingleObject(uli->lock_ptr, INFINITE);
435 uli->pid = uwsgi.mypid;
436 }
437
uwsgi_unlock_fast(struct uwsgi_lock_item * uli)438 void uwsgi_unlock_fast(struct uwsgi_lock_item *uli) {
439 ReleaseMutex(uli->lock_ptr);
440 uli->pid = 0;
441 }
442
uwsgi_lock_fast_check(struct uwsgi_lock_item * uli)443 pid_t uwsgi_lock_fast_check(struct uwsgi_lock_item *uli) {
444 if (WaitForSingleObject(uli->lock_ptr, 0) == WAIT_TIMEOUT) {
445 return 0;
446 }
447 return uli->pid;
448 }
449
uwsgi_rwlock_fast_init(char * id)450 struct uwsgi_lock_item *uwsgi_rwlock_fast_init(char *id) {
451 return uwsgi_lock_fast_init(id);
452 }
453
uwsgi_rlock_fast(struct uwsgi_lock_item * uli)454 void uwsgi_rlock_fast(struct uwsgi_lock_item *uli) {
455 uwsgi_lock_fast(uli);
456 }
uwsgi_wlock_fast(struct uwsgi_lock_item * uli)457 void uwsgi_wlock_fast(struct uwsgi_lock_item *uli) {
458 uwsgi_lock_fast(uli);
459 }
460
uwsgi_rwlock_fast_check(struct uwsgi_lock_item * uli)461 pid_t uwsgi_rwlock_fast_check(struct uwsgi_lock_item *uli) {
462 return uwsgi_lock_fast_check(uli);
463 }
464
uwsgi_rwunlock_fast(struct uwsgi_lock_item * uli)465 void uwsgi_rwunlock_fast(struct uwsgi_lock_item *uli) {
466 uwsgi_unlock_fast(uli);
467 }
468
469
470 #else
471
472 #define uwsgi_lock_fast_init uwsgi_lock_ipcsem_init
473 #define uwsgi_lock_fast_check uwsgi_lock_ipcsem_check
474 #define uwsgi_lock_fast uwsgi_lock_ipcsem
475 #define uwsgi_unlock_fast uwsgi_unlock_ipcsem
476
477 #define uwsgi_rwlock_fast_init uwsgi_rwlock_ipcsem_init
478 #define uwsgi_rwlock_fast_check uwsgi_rwlock_ipcsem_check
479
480 #define uwsgi_rlock_fast uwsgi_rlock_ipcsem
481 #define uwsgi_wlock_fast uwsgi_wlock_ipcsem
482 #define uwsgi_rwunlock_fast uwsgi_rwunlock_ipcsem
483
484 #define UWSGI_LOCK_SIZE sizeof(int)
485 #define UWSGI_RWLOCK_SIZE sizeof(int)
486
487 #define UWSGI_LOCK_ENGINE_NAME "ipcsem"
488
489 #endif
490
uwsgi_lock_ipcsem_init(char * id)491 struct uwsgi_lock_item *uwsgi_lock_ipcsem_init(char *id) {
492
493 // used by ftok
494 static int counter = 1;
495 union semun {
496 int val;
497 struct semid_ds *buf;
498 ushort *array;
499 } semu;
500 int semid;
501 key_t myKey;
502
503 struct uwsgi_lock_item *uli = uwsgi_register_lock(id, 0);
504
505 if (uwsgi.ftok) {
506 myKey = ftok(uwsgi.ftok, counter);
507 if (myKey < 0) {
508 uwsgi_error("uwsgi_lock_ipcsem_init()/ftok()");
509 exit(1);
510 }
511 counter++;
512 semid = semget(myKey, 1, IPC_CREAT | 0666);
513 }
514 else {
515 semid = semget(IPC_PRIVATE, 1, IPC_CREAT | IPC_EXCL | 0666);
516 }
517
518 if (semid < 0) {
519 uwsgi_error("uwsgi_lock_ipcsem_init()/semget()");
520 exit(1);
521 }
522 // do this now, to allows triggering of atexit hook in case of problems
523 memcpy(uli->lock_ptr, &semid, sizeof(int));
524
525 semu.val = 1;
526 if (semctl(semid, 0, SETVAL, semu)) {
527 uwsgi_error("uwsgi_lock_ipcsem_init()/semctl()");
528 exit(1);
529 }
530
531 return uli;
532 }
533
uwsgi_lock_ipcsem(struct uwsgi_lock_item * uli)534 void uwsgi_lock_ipcsem(struct uwsgi_lock_item *uli) {
535
536 int semid;
537 struct sembuf sb;
538 sb.sem_num = 0;
539 sb.sem_op = -1;
540 sb.sem_flg = SEM_UNDO;
541
542 memcpy(&semid, uli->lock_ptr, sizeof(int));
543
544 retry:
545 if (semop(semid, &sb, 1)) {
546 if (errno == EINTR) goto retry;
547 uwsgi_error("uwsgi_lock_ipcsem()/semop()");
548 #ifdef EIDRM
549 if (errno == EIDRM) {
550 exit(UWSGI_BRUTAL_RELOAD_CODE);
551 }
552 #endif
553 exit(1);
554 }
555 }
556
uwsgi_unlock_ipcsem(struct uwsgi_lock_item * uli)557 void uwsgi_unlock_ipcsem(struct uwsgi_lock_item *uli) {
558
559 int semid;
560 struct sembuf sb;
561 sb.sem_num = 0;
562 sb.sem_op = 1;
563 sb.sem_flg = SEM_UNDO;
564
565 memcpy(&semid, uli->lock_ptr, sizeof(int));
566
567 retry:
568 if (semop(semid, &sb, 1)) {
569 if (errno == EINTR) goto retry;
570 uwsgi_error("uwsgi_unlock_ipcsem()/semop()");
571 #ifdef EIDRM
572 if (errno == EIDRM) {
573 exit(UWSGI_BRUTAL_RELOAD_CODE);
574 }
575 #endif
576 exit(1);
577 }
578
579 }
580
uwsgi_rwlock_ipcsem_init(char * id)581 struct uwsgi_lock_item *uwsgi_rwlock_ipcsem_init(char *id) {
582 return uwsgi_lock_ipcsem_init(id);
583 }
uwsgi_rlock_ipcsem(struct uwsgi_lock_item * uli)584 void uwsgi_rlock_ipcsem(struct uwsgi_lock_item *uli) {
585 uwsgi_lock_ipcsem(uli);
586 }
uwsgi_wlock_ipcsem(struct uwsgi_lock_item * uli)587 void uwsgi_wlock_ipcsem(struct uwsgi_lock_item *uli) {
588 uwsgi_lock_ipcsem(uli);
589 }
uwsgi_rwunlock_ipcsem(struct uwsgi_lock_item * uli)590 void uwsgi_rwunlock_ipcsem(struct uwsgi_lock_item *uli) {
591 uwsgi_unlock_ipcsem(uli);
592 }
593
594 // ipc cannot deadlock
uwsgi_lock_ipcsem_check(struct uwsgi_lock_item * uli)595 pid_t uwsgi_lock_ipcsem_check(struct uwsgi_lock_item *uli) {
596 return 0;
597 }
598
uwsgi_ipcsem_clear(void)599 void uwsgi_ipcsem_clear(void) {
600
601 if (uwsgi.persistent_ipcsem) return;
602
603 struct uwsgi_lock_item *uli = uwsgi.registered_locks;
604
605 if (!uwsgi.workers)
606 goto clear;
607
608 if (uwsgi.mywid == 0)
609 goto clear;
610
611 if (uwsgi.master_process && getpid() == uwsgi.workers[0].pid)
612 goto clear;
613
614 if (!uwsgi.master_process && uwsgi.mywid == 1)
615 goto clear;
616
617 return;
618
619 clear:
620
621 #ifdef UWSGI_DEBUG
622 uwsgi_log("removing sysvipc semaphores...\n");
623 #endif
624 #ifdef GETPID
625 while (uli) {
626 int semid = 0;
627 memcpy(&semid, uli->lock_ptr, sizeof(int));
628 int ret = semctl(semid, 0, GETPID);
629 if (ret > 0) {
630 if (ret != (int) getpid() && !kill((pid_t) ret, 0)) {
631 uwsgi_log("found ipcsem mapped to alive pid %d. skipping ipcsem removal.\n", ret);
632 return;
633 }
634 }
635 uli = uli->next;
636 }
637 uli = uwsgi.registered_locks;
638 #endif
639 while (uli) {
640 int semid = 0;
641 memcpy(&semid, uli->lock_ptr, sizeof(int));
642 if (semctl(semid, 0, IPC_RMID)) {
643 uwsgi_error("uwsgi_ipcsem_clear()/semctl()");
644 }
645 uli = uli->next;
646 }
647 }
648
649
uwsgi_rwlock_ipcsem_check(struct uwsgi_lock_item * uli)650 pid_t uwsgi_rwlock_ipcsem_check(struct uwsgi_lock_item *uli) {
651 return uwsgi_lock_ipcsem_check(uli);
652 }
653
654 #ifdef UNBIT
655 /*
656 Unbit-specific workaround for robust-mutexes
657 */
uwsgi_robust_mutexes_watchdog_loop(void * arg)658 void *uwsgi_robust_mutexes_watchdog_loop(void *arg) {
659 for(;;) {
660 uwsgi_lock(uwsgi.the_thunder_lock);
661 uwsgi_unlock(uwsgi.the_thunder_lock);
662 sleep(1);
663 }
664 return NULL;
665 }
uwsgi_robust_mutexes_watchdog()666 void uwsgi_robust_mutexes_watchdog() {
667 pthread_t tid;
668 pthread_attr_t attr;
669 pthread_attr_init(&attr);
670 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
671 // 32K should be more than enough...
672 pthread_attr_setstacksize(&attr, 32 * 1024);
673
674 if (pthread_create(&tid, &attr, uwsgi_robust_mutexes_watchdog_loop, NULL)) {
675 uwsgi_error("uwsgi_robust_mutexes_watchdog()/pthread_create()");
676 exit(1);
677 }
678 }
679
680 #endif
681
uwsgi_setup_locking()682 void uwsgi_setup_locking() {
683
684 int i;
685
686 if (uwsgi.locking_setup) return;
687
688 // use the fastest available locking
689 if (uwsgi.lock_engine) {
690 if (!strcmp(uwsgi.lock_engine, "ipcsem")) {
691 uwsgi_log_initial("lock engine: ipcsem\n");
692 atexit(uwsgi_ipcsem_clear);
693 uwsgi.lock_ops.lock_init = uwsgi_lock_ipcsem_init;
694 uwsgi.lock_ops.lock_check = uwsgi_lock_ipcsem_check;
695 uwsgi.lock_ops.lock = uwsgi_lock_ipcsem;
696 uwsgi.lock_ops.unlock = uwsgi_unlock_ipcsem;
697 uwsgi.lock_ops.rwlock_init = uwsgi_rwlock_ipcsem_init;
698 uwsgi.lock_ops.rwlock_check = uwsgi_rwlock_ipcsem_check;
699 uwsgi.lock_ops.rlock = uwsgi_rlock_ipcsem;
700 uwsgi.lock_ops.wlock = uwsgi_wlock_ipcsem;
701 uwsgi.lock_ops.rwunlock = uwsgi_rwunlock_ipcsem;
702 uwsgi.lock_size = 8;
703 uwsgi.rwlock_size = 8;
704 goto ready;
705 }
706 uwsgi_log("unable to find lock engine \"%s\"\n", uwsgi.lock_engine);
707 exit(1);
708 }
709
710 uwsgi_log_initial("lock engine: %s\n", UWSGI_LOCK_ENGINE_NAME);
711 #ifdef UWSGI_IPCSEM_ATEXIT
712 atexit(uwsgi_ipcsem_clear);
713 #endif
714 uwsgi.lock_ops.lock_init = uwsgi_lock_fast_init;
715 uwsgi.lock_ops.lock_check = uwsgi_lock_fast_check;
716 uwsgi.lock_ops.lock = uwsgi_lock_fast;
717 uwsgi.lock_ops.unlock = uwsgi_unlock_fast;
718 uwsgi.lock_ops.rwlock_init = uwsgi_rwlock_fast_init;
719 uwsgi.lock_ops.rwlock_check = uwsgi_rwlock_fast_check;
720 uwsgi.lock_ops.rlock = uwsgi_rlock_fast;
721 uwsgi.lock_ops.wlock = uwsgi_wlock_fast;
722 uwsgi.lock_ops.rwunlock = uwsgi_rwunlock_fast;
723 uwsgi.lock_size = UWSGI_LOCK_SIZE;
724 uwsgi.rwlock_size = UWSGI_RWLOCK_SIZE;
725
726 ready:
727 // application generic lock
728 uwsgi.user_lock = uwsgi_malloc(sizeof(void *) * (uwsgi.locks + 1));
729 for (i = 0; i < uwsgi.locks + 1; i++) {
730 char *num = uwsgi_num2str(i);
731 uwsgi.user_lock[i] = uwsgi_lock_init(uwsgi_concat2("user ", num));
732 free(num);
733 }
734
735 // event queue lock (mitigate same event on multiple queues)
736 if (uwsgi.threads > 1) {
737 pthread_mutex_init(&uwsgi.thunder_mutex, NULL);
738 }
739
740 if (uwsgi.master_process) {
741 // signal table lock
742 uwsgi.signal_table_lock = uwsgi_lock_init("signal");
743
744 // fmon table lock
745 uwsgi.fmon_table_lock = uwsgi_lock_init("filemon");
746
747 // timer table lock
748 uwsgi.timer_table_lock = uwsgi_lock_init("timer");
749
750 // rb_timer table lock
751 uwsgi.rb_timer_table_lock = uwsgi_lock_init("rbtimer");
752
753 // cron table lock
754 uwsgi.cron_table_lock = uwsgi_lock_init("cron");
755 }
756
757 if (uwsgi.use_thunder_lock) {
758 // process shared thunder lock
759 uwsgi.the_thunder_lock = uwsgi_lock_init("thunder");
760 #ifdef UNBIT
761 // we have a serious bug on Unbit (and very probably on older libc)
762 // when all of the workers die in the same moment the pthread robust mutes is left
763 // in inconsistent state and we have no way to recover
764 // we span a thread in the master constantly ensuring the lock is ok
765 // for now we apply it only for Unbit (where thunder-lock is automatically enabled)
766 uwsgi_robust_mutexes_watchdog();
767 #endif
768 }
769
770 uwsgi.rpc_table_lock = uwsgi_lock_init("rpc");
771
772 #ifdef UWSGI_SSL
773 // register locking for legions
774 struct uwsgi_legion *ul = uwsgi.legions;
775 while(ul) {
776 ul->lock = uwsgi_lock_init(uwsgi_concat2("legion_", ul->legion));
777 ul = ul->next;
778 }
779 #endif
780 uwsgi.locking_setup = 1;
781 }
782
783
uwsgi_fcntl_lock(int fd)784 int uwsgi_fcntl_lock(int fd) {
785 struct flock fl;
786 fl.l_type = F_WRLCK;
787 fl.l_whence = SEEK_SET;
788 fl.l_start = 0;
789 fl.l_len = 0;
790 fl.l_pid = 0;
791
792 int ret = fcntl(fd, F_SETLKW, &fl);
793 if (ret < 0)
794 uwsgi_error("fcntl()");
795
796 return ret;
797 }
798
uwsgi_fcntl_is_locked(int fd)799 int uwsgi_fcntl_is_locked(int fd) {
800
801 struct flock fl;
802 fl.l_type = F_WRLCK;
803 fl.l_whence = SEEK_SET;
804 fl.l_start = 0;
805 fl.l_len = 0;
806 fl.l_pid = 0;
807
808 if (fcntl(fd, F_SETLK, &fl)) {
809 return 1;
810 }
811
812 return 0;
813
814 }
815
uwsgi_deadlock_check(pid_t diedpid)816 void uwsgi_deadlock_check(pid_t diedpid) {
817 struct uwsgi_lock_item *uli = uwsgi.registered_locks;
818 while (uli) {
819 if (!uli->can_deadlock)
820 goto nextlock;
821 pid_t locked_pid = 0;
822 if (uli->rw) {
823 locked_pid = uwsgi_rwlock_check(uli);
824 }
825 else {
826 locked_pid = uwsgi_lock_check(uli);
827 }
828 if (locked_pid == diedpid) {
829 uwsgi_log("[deadlock-detector] pid %d was holding lock %s (%p)\n", (int) diedpid, uli->id, uli->lock_ptr);
830 if (uli->rw) {
831 uwsgi_rwunlock(uli);
832 }
833 else {
834 uwsgi_unlock(uli);
835 }
836 }
837 nextlock:
838 uli = uli->next;
839 }
840
841 }
842
uwsgi_user_lock(int lock_num)843 int uwsgi_user_lock(int lock_num) {
844 if (lock_num < 0 || lock_num > uwsgi.locks) {
845 return -1;
846 }
847 uwsgi_lock(uwsgi.user_lock[lock_num]);
848 return 0;
849 }
850
uwsgi_user_unlock(int lock_num)851 int uwsgi_user_unlock(int lock_num) {
852 if (lock_num < 0 || lock_num > uwsgi.locks) {
853 return -1;
854 }
855 uwsgi_unlock(uwsgi.user_lock[lock_num]);
856 return 0;
857 }
858