1 /*
2 * include/common/hathreads.h
3 * definitions, macros and inline functions about threads.
4 *
5 * Copyright (C) 2017 Christopher Fauet - cfaulet@haproxy.com
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation, version 2.1
10 * exclusively.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #ifndef _COMMON_HATHREADS_H
23 #define _COMMON_HATHREADS_H
24
25 #include <common/config.h>
26 #include <common/initcall.h>
27
28 /* Note about all_threads_mask :
29 * - with threads support disabled, this symbol is defined as zero (0UL).
30 * - with threads enabled, this variable is never zero, it contains the mask
31 * of enabled threads. Thus if only one thread is enabled, it equals 1.
32 */
33
34 #ifndef USE_THREAD
35
36 #define MAX_THREADS 1
37 #define MAX_THREADS_MASK 1
38
39 /* Only way found to replace variables with constants that are optimized away
40 * at build time.
41 */
42 enum { all_threads_mask = 1UL };
43 enum { tid_bit = 1UL };
44 enum { tid = 0 };
45
46 #define __decl_hathreads(decl)
47 #define __decl_spinlock(lock)
48 #define __decl_aligned_spinlock(lock)
49 #define __decl_rwlock(lock)
50 #define __decl_aligned_rwlock(lock)
51
52 #define HA_ATOMIC_CAS(val, old, new) \
53 ({ \
54 typeof(val) _v = (val); \
55 typeof(old) _o = (old); \
56 (*_v == *_o) ? ((*_v = (new)), 1) : ((*_o = *_v), 0); \
57 })
58
59 /* warning, n is a pointer to the double value for dwcas */
60 #define HA_ATOMIC_DWCAS(val, o, n) \
61 ({ \
62 long *_v = (long*)(val); \
63 long *_o = (long*)(o); \
64 long *_n = (long*)(n); \
65 long _v0 = _v[0], _v1 = _v[1]; \
66 (_v0 == _o[0] && _v1 == _o[1]) ? \
67 (_v[0] = _n[0], _v[1] = _n[1], 1) : \
68 (_o[0] = _v0, _o[1] = _v1, 0); \
69 })
70
71 #define HA_ATOMIC_ADD(val, i) ({*(val) += (i);})
72 #define HA_ATOMIC_SUB(val, i) ({*(val) -= (i);})
73 #define HA_ATOMIC_XADD(val, i) \
74 ({ \
75 typeof((val)) __p_xadd = (val); \
76 typeof(*(val)) __old_xadd = *__p_xadd; \
77 *__p_xadd += i; \
78 __old_xadd; \
79 })
80 #define HA_ATOMIC_AND(val, flags) ({*(val) &= (flags);})
81 #define HA_ATOMIC_OR(val, flags) ({*(val) |= (flags);})
82 #define HA_ATOMIC_XCHG(val, new) \
83 ({ \
84 typeof(*(val)) __old_xchg = *(val); \
85 *(val) = new; \
86 __old_xchg; \
87 })
88 #define HA_ATOMIC_BTS(val, bit) \
89 ({ \
90 typeof((val)) __p_bts = (val); \
91 typeof(*__p_bts) __b_bts = (1UL << (bit)); \
92 typeof(*__p_bts) __t_bts = *__p_bts & __b_bts; \
93 if (!__t_bts) \
94 *__p_bts |= __b_bts; \
95 __t_bts; \
96 })
97 #define HA_ATOMIC_BTR(val, bit) \
98 ({ \
99 typeof((val)) __p_btr = (val); \
100 typeof(*__p_btr) __b_btr = (1UL << (bit)); \
101 typeof(*__p_btr) __t_btr = *__p_btr & __b_btr; \
102 if (__t_btr) \
103 *__p_btr &= ~__b_btr; \
104 __t_btr; \
105 })
106 #define HA_ATOMIC_LOAD(val) *(val)
107 #define HA_ATOMIC_STORE(val, new) ({*(val) = new;})
108 #define HA_ATOMIC_UPDATE_MAX(val, new) \
109 ({ \
110 typeof(val) __val = (val); \
111 typeof(*(val)) __new_max = (new); \
112 \
113 if (*__val < __new_max) \
114 *__val = __new_max; \
115 *__val; \
116 })
117
118 #define HA_ATOMIC_UPDATE_MIN(val, new) \
119 ({ \
120 typeof(val) __val = (val); \
121 typeof(*(val)) __new_min = (new); \
122 \
123 if (*__val > __new_min) \
124 *__val = __new_min; \
125 *__val; \
126 })
127
128 #define HA_BARRIER() do { } while (0)
129
130 #define HA_SPIN_INIT(l) do { /* do nothing */ } while(0)
131 #define HA_SPIN_DESTROY(l) do { /* do nothing */ } while(0)
132 #define HA_SPIN_LOCK(lbl, l) do { /* do nothing */ } while(0)
133 #define HA_SPIN_TRYLOCK(lbl, l) ({ 0; })
134 #define HA_SPIN_UNLOCK(lbl, l) do { /* do nothing */ } while(0)
135
136 #define HA_RWLOCK_INIT(l) do { /* do nothing */ } while(0)
137 #define HA_RWLOCK_DESTROY(l) do { /* do nothing */ } while(0)
138 #define HA_RWLOCK_WRLOCK(lbl, l) do { /* do nothing */ } while(0)
139 #define HA_RWLOCK_TRYWRLOCK(lbl, l) ({ 0; })
140 #define HA_RWLOCK_WRUNLOCK(lbl, l) do { /* do nothing */ } while(0)
141 #define HA_RWLOCK_RDLOCK(lbl, l) do { /* do nothing */ } while(0)
142 #define HA_RWLOCK_TRYRDLOCK(lbl, l) ({ 0; })
143 #define HA_RWLOCK_RDUNLOCK(lbl, l) do { /* do nothing */ } while(0)
144
145 #define ha_sigmask(how, set, oldset) sigprocmask(how, set, oldset)
146
ha_set_tid(unsigned int tid)147 static inline void ha_set_tid(unsigned int tid)
148 {
149 }
150
__ha_barrier_load(void)151 static inline void __ha_barrier_load(void)
152 {
153 }
154
__ha_barrier_store(void)155 static inline void __ha_barrier_store(void)
156 {
157 }
158
__ha_barrier_full(void)159 static inline void __ha_barrier_full(void)
160 {
161 }
162
thread_harmless_now()163 static inline void thread_harmless_now()
164 {
165 }
166
thread_harmless_end()167 static inline void thread_harmless_end()
168 {
169 }
170
thread_isolate()171 static inline void thread_isolate()
172 {
173 }
174
thread_release()175 static inline void thread_release()
176 {
177 }
178
thread_isolated()179 static inline unsigned long thread_isolated()
180 {
181 return 1;
182 }
183
184 #else /* USE_THREAD */
185
186 #include <stdio.h>
187 #include <stdlib.h>
188 #include <string.h>
189 #include <pthread.h>
190 #include <import/plock.h>
191
192 #define MAX_THREADS LONGBITS
193 #define MAX_THREADS_MASK ((unsigned long)-1)
194
195 #define __decl_hathreads(decl) decl
196
197 /* declare a self-initializing spinlock */
198 #define __decl_spinlock(lock) \
199 HA_SPINLOCK_T (lock); \
200 INITCALL1(STG_LOCK, ha_spin_init, &(lock))
201
202 /* declare a self-initializing spinlock, aligned on a cache line */
203 #define __decl_aligned_spinlock(lock) \
204 HA_SPINLOCK_T (lock) __attribute__((aligned(64))); \
205 INITCALL1(STG_LOCK, ha_spin_init, &(lock))
206
207 /* declare a self-initializing rwlock */
208 #define __decl_rwlock(lock) \
209 HA_RWLOCK_T (lock); \
210 INITCALL1(STG_LOCK, ha_rwlock_init, &(lock))
211
212 /* declare a self-initializing rwlock, aligned on a cache line */
213 #define __decl_aligned_rwlock(lock) \
214 HA_RWLOCK_T (lock) __attribute__((aligned(64))); \
215 INITCALL1(STG_LOCK, ha_rwlock_init, &(lock))
216
217 /* TODO: thread: For now, we rely on GCC builtins but it could be a good idea to
218 * have a header file regrouping all functions dealing with threads. */
219
220 #if (defined(__GNUC__) && (__GNUC__ < 4 || __GNUC__ == 4 && __GNUC_MINOR__ < 7) && !defined(__clang__)) || (defined(__clang__) && defined(__i386__))
221 /* gcc < 4.7 */
222
223 #define HA_ATOMIC_ADD(val, i) __sync_add_and_fetch(val, i)
224 #define HA_ATOMIC_SUB(val, i) __sync_sub_and_fetch(val, i)
225 #define HA_ATOMIC_XADD(val, i) __sync_fetch_and_add(val, i)
226 #define HA_ATOMIC_AND(val, flags) __sync_and_and_fetch(val, flags)
227 #define HA_ATOMIC_OR(val, flags) __sync_or_and_fetch(val, flags)
228
229 /* the CAS is a bit complicated. The older API doesn't support returning the
230 * value and the swap's result at the same time. So here we take what looks
231 * like the safest route, consisting in using the boolean version guaranteeing
232 * that the operation was performed or not, and we snoop a previous value. If
233 * the compare succeeds, we return. If it fails, we return the previous value,
234 * but only if it differs from the expected one. If it's the same it's a race
235 * thus we try again to avoid confusing a possibly sensitive caller.
236 */
237 #define HA_ATOMIC_CAS(val, old, new) \
238 ({ \
239 typeof((val)) __val_cas = (val); \
240 typeof((old)) __oldp_cas = (old); \
241 typeof(*(old)) __oldv_cas; \
242 typeof((new)) __new_cas = (new); \
243 int __ret_cas; \
244 do { \
245 __oldv_cas = *__val_cas; \
246 __ret_cas = __sync_bool_compare_and_swap(__val_cas, *__oldp_cas, __new_cas); \
247 } while (!__ret_cas && *__oldp_cas == __oldv_cas); \
248 if (!__ret_cas) \
249 *__oldp_cas = __oldv_cas; \
250 __ret_cas; \
251 })
252
253 /* warning, n is a pointer to the double value for dwcas */
254 #define HA_ATOMIC_DWCAS(val, o, n) __ha_cas_dw(val, o, n)
255
256 #define HA_ATOMIC_XCHG(val, new) \
257 ({ \
258 typeof((val)) __val_xchg = (val); \
259 typeof(*(val)) __old_xchg; \
260 typeof((new)) __new_xchg = (new); \
261 do { __old_xchg = *__val_xchg; \
262 } while (!__sync_bool_compare_and_swap(__val_xchg, __old_xchg, __new_xchg)); \
263 __old_xchg; \
264 })
265
266 #define HA_ATOMIC_BTS(val, bit) \
267 ({ \
268 typeof(*(val)) __b_bts = (1UL << (bit)); \
269 __sync_fetch_and_or((val), __b_bts) & __b_bts; \
270 })
271
272 #define HA_ATOMIC_BTR(val, bit) \
273 ({ \
274 typeof(*(val)) __b_btr = (1UL << (bit)); \
275 __sync_fetch_and_and((val), ~__b_btr) & __b_btr; \
276 })
277
278 #define HA_ATOMIC_LOAD(val) \
279 ({ \
280 typeof(*(val)) ret; \
281 __sync_synchronize(); \
282 ret = *(volatile typeof(val))val; \
283 __sync_synchronize(); \
284 ret; \
285 })
286
287 #define HA_ATOMIC_STORE(val, new) \
288 ({ \
289 typeof((val)) __val_store = (val); \
290 typeof(*(val)) __old_store; \
291 typeof((new)) __new_store = (new); \
292 do { __old_store = *__val_store; \
293 } while (!__sync_bool_compare_and_swap(__val_store, __old_store, __new_store)); \
294 })
295 #else
296 /* gcc >= 4.7 */
297 #define HA_ATOMIC_CAS(val, old, new) __atomic_compare_exchange_n(val, old, new, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
298 /* warning, n is a pointer to the double value for dwcas */
299 #define HA_ATOMIC_DWCAS(val, o, n) __ha_cas_dw(val, o, n)
300 #define HA_ATOMIC_ADD(val, i) __atomic_add_fetch(val, i, __ATOMIC_SEQ_CST)
301 #define HA_ATOMIC_XADD(val, i) __atomic_fetch_add(val, i, __ATOMIC_SEQ_CST)
302 #define HA_ATOMIC_SUB(val, i) __atomic_sub_fetch(val, i, __ATOMIC_SEQ_CST)
303 #define HA_ATOMIC_AND(val, flags) __atomic_and_fetch(val, flags, __ATOMIC_SEQ_CST)
304 #define HA_ATOMIC_OR(val, flags) __atomic_or_fetch(val, flags, __ATOMIC_SEQ_CST)
305 #define HA_ATOMIC_BTS(val, bit) \
306 ({ \
307 typeof(*(val)) __b_bts = (1UL << (bit)); \
308 __sync_fetch_and_or((val), __b_bts) & __b_bts; \
309 })
310
311 #define HA_ATOMIC_BTR(val, bit) \
312 ({ \
313 typeof(*(val)) __b_btr = (1UL << (bit)); \
314 __sync_fetch_and_and((val), ~__b_btr) & __b_btr; \
315 })
316
317 #define HA_ATOMIC_XCHG(val, new) __atomic_exchange_n(val, new, __ATOMIC_SEQ_CST)
318 #define HA_ATOMIC_STORE(val, new) __atomic_store_n(val, new, __ATOMIC_SEQ_CST)
319 #define HA_ATOMIC_LOAD(val) __atomic_load_n(val, __ATOMIC_SEQ_CST)
320
321 #endif
322
323 #define HA_ATOMIC_UPDATE_MAX(val, new) \
324 ({ \
325 typeof(val) __val = (val); \
326 typeof(*(val)) __old_max = *__val; \
327 typeof(*(val)) __new_max = (new); \
328 \
329 while (__old_max < __new_max && \
330 !HA_ATOMIC_CAS(__val, &__old_max, __new_max)); \
331 *__val; \
332 })
333 #define HA_ATOMIC_UPDATE_MIN(val, new) \
334 ({ \
335 typeof(val) __val = (val); \
336 typeof(*(val)) __old_min = *__val; \
337 typeof(*(val)) __new_min = (new); \
338 \
339 while (__old_min > __new_min && \
340 !HA_ATOMIC_CAS(__val, &__old_min, __new_min)); \
341 *__val; \
342 })
343
344 #define HA_BARRIER() pl_barrier()
345
346 void thread_harmless_till_end();
347 void thread_isolate();
348 void thread_release();
349
350 extern THREAD_LOCAL unsigned int tid; /* The thread id */
351 extern THREAD_LOCAL unsigned long tid_bit; /* The bit corresponding to the thread id */
352 extern volatile unsigned long all_threads_mask;
353 extern volatile unsigned long threads_want_rdv_mask;
354 extern volatile unsigned long threads_harmless_mask;
355
356 /* explanation for threads_want_rdv_mask and threads_harmless_mask :
357 * - threads_want_rdv_mask is a bit field indicating all threads that have
358 * requested a rendez-vous of other threads using thread_isolate().
359 * - threads_harmless_mask is a bit field indicating all threads that are
360 * currently harmless in that they promise not to access a shared resource.
361 *
362 * For a given thread, its bits in want_rdv and harmless can be translated like
363 * this :
364 *
365 * ----------+----------+----------------------------------------------------
366 * want_rdv | harmless | description
367 * ----------+----------+----------------------------------------------------
368 * 0 | 0 | thread not interested in RDV, possibly harmful
369 * 0 | 1 | thread not interested in RDV but harmless
370 * 1 | 1 | thread interested in RDV and waiting for its turn
371 * 1 | 0 | thread currently working isolated from others
372 * ----------+----------+----------------------------------------------------
373 */
374
375 #define ha_sigmask(how, set, oldset) pthread_sigmask(how, set, oldset)
376
377 /* sets the thread ID and the TID bit for the current thread */
ha_set_tid(unsigned int data)378 static inline void ha_set_tid(unsigned int data)
379 {
380 tid = data;
381 tid_bit = (1UL << tid);
382 }
383
384 /* Marks the thread as harmless. Note: this must be true, i.e. the thread must
385 * not be touching any unprotected shared resource during this period. Usually
386 * this is called before poll(), but it may also be placed around very slow
387 * calls (eg: some crypto operations). Needs to be terminated using
388 * thread_harmless_end().
389 */
thread_harmless_now()390 static inline void thread_harmless_now()
391 {
392 HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
393 }
394
395 /* Ends the harmless period started by thread_harmless_now(). Usually this is
396 * placed after the poll() call. If it is discovered that a job was running and
397 * is relying on the thread still being harmless, the thread waits for the
398 * other one to finish.
399 */
thread_harmless_end()400 static inline void thread_harmless_end()
401 {
402 while (1) {
403 HA_ATOMIC_AND(&threads_harmless_mask, ~tid_bit);
404 if (likely((threads_want_rdv_mask & all_threads_mask) == 0))
405 break;
406 thread_harmless_till_end();
407 }
408 }
409
410 /* an isolated thread has harmless cleared and want_rdv set */
thread_isolated()411 static inline unsigned long thread_isolated()
412 {
413 return threads_want_rdv_mask & ~threads_harmless_mask & tid_bit;
414 }
415
416
417 #if defined(DEBUG_THREAD) || defined(DEBUG_FULL)
418
419 /* WARNING!!! if you update this enum, please also keep lock_label() up to date below */
420 enum lock_label {
421 FD_LOCK,
422 TASK_RQ_LOCK,
423 TASK_WQ_LOCK,
424 POOL_LOCK,
425 LISTENER_LOCK,
426 PROXY_LOCK,
427 SERVER_LOCK,
428 LBPRM_LOCK,
429 SIGNALS_LOCK,
430 STK_TABLE_LOCK,
431 STK_SESS_LOCK,
432 APPLETS_LOCK,
433 PEER_LOCK,
434 BUF_WQ_LOCK,
435 STRMS_LOCK,
436 SSL_LOCK,
437 SSL_GEN_CERTS_LOCK,
438 PATREF_LOCK,
439 PATEXP_LOCK,
440 VARS_LOCK,
441 COMP_POOL_LOCK,
442 LUA_LOCK,
443 NOTIF_LOCK,
444 SPOE_APPLET_LOCK,
445 DNS_LOCK,
446 PID_LIST_LOCK,
447 EMAIL_ALERTS_LOCK,
448 PIPES_LOCK,
449 START_LOCK,
450 TLSKEYS_REF_LOCK,
451 AUTH_LOCK,
452 LOGSRV_LOCK,
453 PROTO_LOCK,
454 OTHER_LOCK,
455 LOCK_LABELS
456 };
457 struct lock_stat {
458 uint64_t nsec_wait_for_write;
459 uint64_t nsec_wait_for_read;
460 uint64_t num_write_locked;
461 uint64_t num_write_unlocked;
462 uint64_t num_read_locked;
463 uint64_t num_read_unlocked;
464 };
465
466 extern struct lock_stat lock_stats[LOCK_LABELS];
467
468 #define __HA_SPINLOCK_T unsigned long
469
470 #define __SPIN_INIT(l) ({ (*l) = 0; })
471 #define __SPIN_DESTROY(l) ({ (*l) = 0; })
472 #define __SPIN_LOCK(l) pl_take_s(l)
473 #define __SPIN_TRYLOCK(l) !pl_try_s(l)
474 #define __SPIN_UNLOCK(l) pl_drop_s(l)
475
476 #define __HA_RWLOCK_T unsigned long
477
478 #define __RWLOCK_INIT(l) ({ (*l) = 0; })
479 #define __RWLOCK_DESTROY(l) ({ (*l) = 0; })
480 #define __RWLOCK_WRLOCK(l) pl_take_w(l)
481 #define __RWLOCK_TRYWRLOCK(l) !pl_try_w(l)
482 #define __RWLOCK_WRUNLOCK(l) pl_drop_w(l)
483 #define __RWLOCK_RDLOCK(l) pl_take_r(l)
484 #define __RWLOCK_TRYRDLOCK(l) !pl_try_r(l)
485 #define __RWLOCK_RDUNLOCK(l) pl_drop_r(l)
486
487 #define HA_SPINLOCK_T struct ha_spinlock
488
489 #define HA_SPIN_INIT(l) __spin_init(l)
490 #define HA_SPIN_DESTROY(l) __spin_destroy(l)
491
492 #define HA_SPIN_LOCK(lbl, l) __spin_lock(lbl, l, __func__, __FILE__, __LINE__)
493 #define HA_SPIN_TRYLOCK(lbl, l) __spin_trylock(lbl, l, __func__, __FILE__, __LINE__)
494 #define HA_SPIN_UNLOCK(lbl, l) __spin_unlock(lbl, l, __func__, __FILE__, __LINE__)
495
496 #define HA_RWLOCK_T struct ha_rwlock
497
498 #define HA_RWLOCK_INIT(l) __ha_rwlock_init((l))
499 #define HA_RWLOCK_DESTROY(l) __ha_rwlock_destroy((l))
500 #define HA_RWLOCK_WRLOCK(lbl,l) __ha_rwlock_wrlock(lbl, l, __func__, __FILE__, __LINE__)
501 #define HA_RWLOCK_TRYWRLOCK(lbl,l) __ha_rwlock_trywrlock(lbl, l, __func__, __FILE__, __LINE__)
502 #define HA_RWLOCK_WRUNLOCK(lbl,l) __ha_rwlock_wrunlock(lbl, l, __func__, __FILE__, __LINE__)
503 #define HA_RWLOCK_RDLOCK(lbl,l) __ha_rwlock_rdlock(lbl, l)
504 #define HA_RWLOCK_TRYRDLOCK(lbl,l) __ha_rwlock_tryrdlock(lbl, l)
505 #define HA_RWLOCK_RDUNLOCK(lbl,l) __ha_rwlock_rdunlock(lbl, l)
506
507 struct ha_spinlock {
508 __HA_SPINLOCK_T lock;
509 struct {
510 unsigned long owner; /* a bit is set to 1 << tid for the lock owner */
511 unsigned long waiters; /* a bit is set to 1 << tid for waiting threads */
512 struct {
513 const char *function;
514 const char *file;
515 int line;
516 } last_location; /* location of the last owner */
517 } info;
518 };
519
520 struct ha_rwlock {
521 __HA_RWLOCK_T lock;
522 struct {
523 unsigned long cur_writer; /* a bit is set to 1 << tid for the lock owner */
524 unsigned long wait_writers; /* a bit is set to 1 << tid for waiting writers */
525 unsigned long cur_readers; /* a bit is set to 1 << tid for current readers */
526 unsigned long wait_readers; /* a bit is set to 1 << tid for waiting waiters */
527 struct {
528 const char *function;
529 const char *file;
530 int line;
531 } last_location; /* location of the last write owner */
532 } info;
533 };
534
lock_label(enum lock_label label)535 static inline const char *lock_label(enum lock_label label)
536 {
537 switch (label) {
538 case FD_LOCK: return "FD";
539 case TASK_RQ_LOCK: return "TASK_RQ";
540 case TASK_WQ_LOCK: return "TASK_WQ";
541 case POOL_LOCK: return "POOL";
542 case LISTENER_LOCK: return "LISTENER";
543 case PROXY_LOCK: return "PROXY";
544 case SERVER_LOCK: return "SERVER";
545 case LBPRM_LOCK: return "LBPRM";
546 case SIGNALS_LOCK: return "SIGNALS";
547 case STK_TABLE_LOCK: return "STK_TABLE";
548 case STK_SESS_LOCK: return "STK_SESS";
549 case APPLETS_LOCK: return "APPLETS";
550 case PEER_LOCK: return "PEER";
551 case BUF_WQ_LOCK: return "BUF_WQ";
552 case STRMS_LOCK: return "STRMS";
553 case SSL_LOCK: return "SSL";
554 case SSL_GEN_CERTS_LOCK: return "SSL_GEN_CERTS";
555 case PATREF_LOCK: return "PATREF";
556 case PATEXP_LOCK: return "PATEXP";
557 case VARS_LOCK: return "VARS";
558 case COMP_POOL_LOCK: return "COMP_POOL";
559 case LUA_LOCK: return "LUA";
560 case NOTIF_LOCK: return "NOTIF";
561 case SPOE_APPLET_LOCK: return "SPOE_APPLET";
562 case DNS_LOCK: return "DNS";
563 case PID_LIST_LOCK: return "PID_LIST";
564 case EMAIL_ALERTS_LOCK: return "EMAIL_ALERTS";
565 case PIPES_LOCK: return "PIPES";
566 case START_LOCK: return "START";
567 case TLSKEYS_REF_LOCK: return "TLSKEYS_REF";
568 case AUTH_LOCK: return "AUTH";
569 case LOGSRV_LOCK: return "LOGSRV";
570 case PROTO_LOCK: return "PROTO";
571 case OTHER_LOCK: return "OTHER";
572 case LOCK_LABELS: break; /* keep compiler happy */
573 };
574 /* only way to come here is consecutive to an internal bug */
575 abort();
576 }
577
show_lock_stats()578 static inline void show_lock_stats()
579 {
580 int lbl;
581
582 for (lbl = 0; lbl < LOCK_LABELS; lbl++) {
583 fprintf(stderr,
584 "Stats about Lock %s: \n"
585 "\t # write lock : %lu\n"
586 "\t # write unlock: %lu (%ld)\n"
587 "\t # wait time for write : %.3f msec\n"
588 "\t # wait time for write/lock: %.3f nsec\n"
589 "\t # read lock : %lu\n"
590 "\t # read unlock : %lu (%ld)\n"
591 "\t # wait time for read : %.3f msec\n"
592 "\t # wait time for read/lock : %.3f nsec\n",
593 lock_label(lbl),
594 lock_stats[lbl].num_write_locked,
595 lock_stats[lbl].num_write_unlocked,
596 lock_stats[lbl].num_write_unlocked - lock_stats[lbl].num_write_locked,
597 (double)lock_stats[lbl].nsec_wait_for_write / 1000000.0,
598 lock_stats[lbl].num_write_locked ? ((double)lock_stats[lbl].nsec_wait_for_write / (double)lock_stats[lbl].num_write_locked) : 0,
599 lock_stats[lbl].num_read_locked,
600 lock_stats[lbl].num_read_unlocked,
601 lock_stats[lbl].num_read_unlocked - lock_stats[lbl].num_read_locked,
602 (double)lock_stats[lbl].nsec_wait_for_read / 1000000.0,
603 lock_stats[lbl].num_read_locked ? ((double)lock_stats[lbl].nsec_wait_for_read / (double)lock_stats[lbl].num_read_locked) : 0);
604 }
605 }
606
607 /* Following functions are used to collect some stats about locks. We wrap
608 * pthread functions to known how much time we wait in a lock. */
609
nsec_now(void)610 static uint64_t nsec_now(void) {
611 struct timespec ts;
612
613 clock_gettime(CLOCK_MONOTONIC, &ts);
614 return ((uint64_t) ts.tv_sec * 1000000000ULL +
615 (uint64_t) ts.tv_nsec);
616 }
617
__ha_rwlock_init(struct ha_rwlock * l)618 static inline void __ha_rwlock_init(struct ha_rwlock *l)
619 {
620 memset(l, 0, sizeof(struct ha_rwlock));
621 __RWLOCK_INIT(&l->lock);
622 }
623
__ha_rwlock_destroy(struct ha_rwlock * l)624 static inline void __ha_rwlock_destroy(struct ha_rwlock *l)
625 {
626 __RWLOCK_DESTROY(&l->lock);
627 memset(l, 0, sizeof(struct ha_rwlock));
628 }
629
630
__ha_rwlock_wrlock(enum lock_label lbl,struct ha_rwlock * l,const char * func,const char * file,int line)631 static inline void __ha_rwlock_wrlock(enum lock_label lbl, struct ha_rwlock *l,
632 const char *func, const char *file, int line)
633 {
634 uint64_t start_time;
635
636 if (unlikely(l->info.cur_writer & tid_bit)) {
637 /* the thread is already owning the lock for write */
638 abort();
639 }
640
641 if (unlikely(l->info.cur_readers & tid_bit)) {
642 /* the thread is already owning the lock for read */
643 abort();
644 }
645
646 HA_ATOMIC_OR(&l->info.wait_writers, tid_bit);
647
648 start_time = nsec_now();
649 __RWLOCK_WRLOCK(&l->lock);
650 HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (nsec_now() - start_time));
651
652 HA_ATOMIC_ADD(&lock_stats[lbl].num_write_locked, 1);
653
654 l->info.cur_writer = tid_bit;
655 l->info.last_location.function = func;
656 l->info.last_location.file = file;
657 l->info.last_location.line = line;
658
659 HA_ATOMIC_AND(&l->info.wait_writers, ~tid_bit);
660 }
661
__ha_rwlock_trywrlock(enum lock_label lbl,struct ha_rwlock * l,const char * func,const char * file,int line)662 static inline int __ha_rwlock_trywrlock(enum lock_label lbl, struct ha_rwlock *l,
663 const char *func, const char *file, int line)
664 {
665 uint64_t start_time;
666 int r;
667
668 if (unlikely(l->info.cur_writer & tid_bit)) {
669 /* the thread is already owning the lock for write */
670 abort();
671 }
672
673 if (unlikely(l->info.cur_readers & tid_bit)) {
674 /* the thread is already owning the lock for read */
675 abort();
676 }
677
678 /* We set waiting writer because trywrlock could wait for readers to quit */
679 HA_ATOMIC_OR(&l->info.wait_writers, tid_bit);
680
681 start_time = nsec_now();
682 r = __RWLOCK_TRYWRLOCK(&l->lock);
683 HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (nsec_now() - start_time));
684 if (unlikely(r)) {
685 HA_ATOMIC_AND(&l->info.wait_writers, ~tid_bit);
686 return r;
687 }
688 HA_ATOMIC_ADD(&lock_stats[lbl].num_write_locked, 1);
689
690 l->info.cur_writer = tid_bit;
691 l->info.last_location.function = func;
692 l->info.last_location.file = file;
693 l->info.last_location.line = line;
694
695 HA_ATOMIC_AND(&l->info.wait_writers, ~tid_bit);
696
697 return 0;
698 }
699
__ha_rwlock_wrunlock(enum lock_label lbl,struct ha_rwlock * l,const char * func,const char * file,int line)700 static inline void __ha_rwlock_wrunlock(enum lock_label lbl,struct ha_rwlock *l,
701 const char *func, const char *file, int line)
702 {
703 if (unlikely(!(l->info.cur_writer & tid_bit))) {
704 /* the thread is not owning the lock for write */
705 abort();
706 }
707
708 l->info.cur_writer = 0;
709 l->info.last_location.function = func;
710 l->info.last_location.file = file;
711 l->info.last_location.line = line;
712
713 __RWLOCK_WRUNLOCK(&l->lock);
714
715 HA_ATOMIC_ADD(&lock_stats[lbl].num_write_unlocked, 1);
716 }
717
__ha_rwlock_rdlock(enum lock_label lbl,struct ha_rwlock * l)718 static inline void __ha_rwlock_rdlock(enum lock_label lbl,struct ha_rwlock *l)
719 {
720 uint64_t start_time;
721
722 if (unlikely(l->info.cur_writer & tid_bit)) {
723 /* the thread is already owning the lock for write */
724 abort();
725 }
726
727 if (unlikely(l->info.cur_readers & tid_bit)) {
728 /* the thread is already owning the lock for read */
729 abort();
730 }
731
732 HA_ATOMIC_OR(&l->info.wait_readers, tid_bit);
733
734 start_time = nsec_now();
735 __RWLOCK_RDLOCK(&l->lock);
736 HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (nsec_now() - start_time));
737 HA_ATOMIC_ADD(&lock_stats[lbl].num_read_locked, 1);
738
739 HA_ATOMIC_OR(&l->info.cur_readers, tid_bit);
740
741 HA_ATOMIC_AND(&l->info.wait_readers, ~tid_bit);
742 }
743
__ha_rwlock_tryrdlock(enum lock_label lbl,struct ha_rwlock * l)744 static inline int __ha_rwlock_tryrdlock(enum lock_label lbl,struct ha_rwlock *l)
745 {
746 int r;
747
748 if (unlikely(l->info.cur_writer & tid_bit)) {
749 /* the thread is already owning the lock for write */
750 abort();
751 }
752
753 if (unlikely(l->info.cur_readers & tid_bit)) {
754 /* the thread is already owning the lock for read */
755 abort();
756 }
757
758 /* try read should never wait */
759 r = __RWLOCK_TRYRDLOCK(&l->lock);
760 if (unlikely(r))
761 return r;
762 HA_ATOMIC_ADD(&lock_stats[lbl].num_read_locked, 1);
763
764 HA_ATOMIC_OR(&l->info.cur_readers, tid_bit);
765
766 return 0;
767 }
768
__ha_rwlock_rdunlock(enum lock_label lbl,struct ha_rwlock * l)769 static inline void __ha_rwlock_rdunlock(enum lock_label lbl,struct ha_rwlock *l)
770 {
771 if (unlikely(!(l->info.cur_readers & tid_bit))) {
772 /* the thread is not owning the lock for read */
773 abort();
774 }
775
776 HA_ATOMIC_AND(&l->info.cur_readers, ~tid_bit);
777
778 __RWLOCK_RDUNLOCK(&l->lock);
779
780 HA_ATOMIC_ADD(&lock_stats[lbl].num_read_unlocked, 1);
781 }
782
__spin_init(struct ha_spinlock * l)783 static inline void __spin_init(struct ha_spinlock *l)
784 {
785 memset(l, 0, sizeof(struct ha_spinlock));
786 __SPIN_INIT(&l->lock);
787 }
788
__spin_destroy(struct ha_spinlock * l)789 static inline void __spin_destroy(struct ha_spinlock *l)
790 {
791 __SPIN_DESTROY(&l->lock);
792 memset(l, 0, sizeof(struct ha_spinlock));
793 }
794
__spin_lock(enum lock_label lbl,struct ha_spinlock * l,const char * func,const char * file,int line)795 static inline void __spin_lock(enum lock_label lbl, struct ha_spinlock *l,
796 const char *func, const char *file, int line)
797 {
798 uint64_t start_time;
799
800 if (unlikely(l->info.owner & tid_bit)) {
801 /* the thread is already owning the lock */
802 abort();
803 }
804
805 HA_ATOMIC_OR(&l->info.waiters, tid_bit);
806
807 start_time = nsec_now();
808 __SPIN_LOCK(&l->lock);
809 HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (nsec_now() - start_time));
810
811 HA_ATOMIC_ADD(&lock_stats[lbl].num_write_locked, 1);
812
813
814 l->info.owner = tid_bit;
815 l->info.last_location.function = func;
816 l->info.last_location.file = file;
817 l->info.last_location.line = line;
818
819 HA_ATOMIC_AND(&l->info.waiters, ~tid_bit);
820 }
821
__spin_trylock(enum lock_label lbl,struct ha_spinlock * l,const char * func,const char * file,int line)822 static inline int __spin_trylock(enum lock_label lbl, struct ha_spinlock *l,
823 const char *func, const char *file, int line)
824 {
825 int r;
826
827 if (unlikely(l->info.owner & tid_bit)) {
828 /* the thread is already owning the lock */
829 abort();
830 }
831
832 /* try read should never wait */
833 r = __SPIN_TRYLOCK(&l->lock);
834 if (unlikely(r))
835 return r;
836 HA_ATOMIC_ADD(&lock_stats[lbl].num_write_locked, 1);
837
838 l->info.owner = tid_bit;
839 l->info.last_location.function = func;
840 l->info.last_location.file = file;
841 l->info.last_location.line = line;
842
843 return 0;
844 }
845
__spin_unlock(enum lock_label lbl,struct ha_spinlock * l,const char * func,const char * file,int line)846 static inline void __spin_unlock(enum lock_label lbl, struct ha_spinlock *l,
847 const char *func, const char *file, int line)
848 {
849 if (unlikely(!(l->info.owner & tid_bit))) {
850 /* the thread is not owning the lock */
851 abort();
852 }
853
854 l->info.owner = 0;
855 l->info.last_location.function = func;
856 l->info.last_location.file = file;
857 l->info.last_location.line = line;
858
859 __SPIN_UNLOCK(&l->lock);
860 HA_ATOMIC_ADD(&lock_stats[lbl].num_write_unlocked, 1);
861 }
862
863 #else /* DEBUG_THREAD */
864
865 #define HA_SPINLOCK_T unsigned long
866
867 #define HA_SPIN_INIT(l) ({ (*l) = 0; })
868 #define HA_SPIN_DESTROY(l) ({ (*l) = 0; })
869 #define HA_SPIN_LOCK(lbl, l) pl_take_s(l)
870 #define HA_SPIN_TRYLOCK(lbl, l) !pl_try_s(l)
871 #define HA_SPIN_UNLOCK(lbl, l) pl_drop_s(l)
872
873 #define HA_RWLOCK_T unsigned long
874
875 #define HA_RWLOCK_INIT(l) ({ (*l) = 0; })
876 #define HA_RWLOCK_DESTROY(l) ({ (*l) = 0; })
877 #define HA_RWLOCK_WRLOCK(lbl,l) pl_take_w(l)
878 #define HA_RWLOCK_TRYWRLOCK(lbl,l) !pl_try_w(l)
879 #define HA_RWLOCK_WRUNLOCK(lbl,l) pl_drop_w(l)
880 #define HA_RWLOCK_RDLOCK(lbl,l) pl_take_r(l)
881 #define HA_RWLOCK_TRYRDLOCK(lbl,l) !pl_try_r(l)
882 #define HA_RWLOCK_RDUNLOCK(lbl,l) pl_drop_r(l)
883
884 #endif /* DEBUG_THREAD */
885
886 #ifdef __x86_64__
887
888 static __inline int
__ha_cas_dw(void * target,void * compare,const void * set)889 __ha_cas_dw(void *target, void *compare, const void *set)
890 {
891 char ret;
892
893 __asm __volatile("lock cmpxchg16b %0; setz %3"
894 : "+m" (*(void **)target),
895 "=a" (((void **)compare)[0]),
896 "=d" (((void **)compare)[1]),
897 "=q" (ret)
898 : "a" (((void **)compare)[0]),
899 "d" (((void **)compare)[1]),
900 "b" (((const void **)set)[0]),
901 "c" (((const void **)set)[1])
902 : "memory", "cc");
903 return (ret);
904 }
905
906 static __inline void
__ha_barrier_load(void)907 __ha_barrier_load(void)
908 {
909 __asm __volatile("lfence" ::: "memory");
910 }
911
912 static __inline void
__ha_barrier_store(void)913 __ha_barrier_store(void)
914 {
915 __asm __volatile("sfence" ::: "memory");
916 }
917
918 static __inline void
__ha_barrier_full(void)919 __ha_barrier_full(void)
920 {
921 __asm __volatile("mfence" ::: "memory");
922 }
923
924 #elif defined(__arm__) && (defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__))
925
926 static __inline void
__ha_barrier_load(void)927 __ha_barrier_load(void)
928 {
929 __asm __volatile("dmb" ::: "memory");
930 }
931
932 static __inline void
__ha_barrier_store(void)933 __ha_barrier_store(void)
934 {
935 __asm __volatile("dsb" ::: "memory");
936 }
937
938 static __inline void
__ha_barrier_full(void)939 __ha_barrier_full(void)
940 {
941 __asm __volatile("dmb" ::: "memory");
942 }
943
__ha_cas_dw(void * target,void * compare,const void * set)944 static __inline int __ha_cas_dw(void *target, void *compare, const void *set)
945 {
946 uint64_t previous;
947 int tmp;
948
949 __asm __volatile("1:"
950 "ldrexd %0, [%4];"
951 "cmp %Q0, %Q2;"
952 "ittt eq;"
953 "cmpeq %R0, %R2;"
954 "strexdeq %1, %3, [%4];"
955 "cmpeq %1, #1;"
956 "beq 1b;"
957 : "=&r" (previous), "=&r" (tmp)
958 : "r" (*(uint64_t *)compare), "r" (*(uint64_t *)set), "r" (target)
959 : "memory", "cc");
960 tmp = (previous == *(uint64_t *)compare);
961 *(uint64_t *)compare = previous;
962 return (tmp);
963 }
964
965 #elif defined (__aarch64__)
966
967 static __inline void
__ha_barrier_load(void)968 __ha_barrier_load(void)
969 {
970 __asm __volatile("dmb ishld" ::: "memory");
971 }
972
973 static __inline void
__ha_barrier_store(void)974 __ha_barrier_store(void)
975 {
976 __asm __volatile("dmb ishst" ::: "memory");
977 }
978
979 static __inline void
__ha_barrier_full(void)980 __ha_barrier_full(void)
981 {
982 __asm __volatile("dmb ish" ::: "memory");
983 }
984
__ha_cas_dw(void * target,void * compare,void * set)985 static __inline int __ha_cas_dw(void *target, void *compare, void *set)
986 {
987 void *value[2];
988 uint64_t tmp1, tmp2;
989
990 __asm__ __volatile__("1:"
991 "ldxp %0, %1, [%4];"
992 "mov %2, %0;"
993 "mov %3, %1;"
994 "eor %0, %0, %5;"
995 "eor %1, %1, %6;"
996 "orr %1, %0, %1;"
997 "mov %w0, #0;"
998 "cbnz %1, 2f;"
999 "stxp %w0, %7, %8, [%4];"
1000 "cbnz %w0, 1b;"
1001 "mov %w0, #1;"
1002 "2:"
1003 : "=&r" (tmp1), "=&r" (tmp2), "=&r" (value[0]), "=&r" (value[1])
1004 : "r" (target), "r" (((void **)(compare))[0]), "r" (((void **)(compare))[1]), "r" (((void **)(set))[0]), "r" (((void **)(set))[1])
1005 : "cc", "memory");
1006
1007 memcpy(compare, &value, sizeof(value));
1008 return (tmp1);
1009 }
1010
1011 #else
1012 #define __ha_barrier_load __sync_synchronize
1013 #define __ha_barrier_store __sync_synchronize
1014 #define __ha_barrier_full __sync_synchronize
1015 #endif
1016
1017 void ha_spin_init(HA_SPINLOCK_T *l);
1018 void ha_rwlock_init(HA_RWLOCK_T *l);
1019
1020 #endif /* USE_THREAD */
1021
__ha_compiler_barrier(void)1022 static inline void __ha_compiler_barrier(void)
1023 {
1024 __asm __volatile("" ::: "memory");
1025 }
1026
1027 int parse_nbthread(const char *arg, char **err);
1028
1029 #endif /* _COMMON_HATHREADS_H */
1030