1 /*
2  * include/common/hathreads.h
3  * definitions, macros and inline functions about threads.
4  *
5  * Copyright (C) 2017 Christopher Fauet - cfaulet@haproxy.com
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation, version 2.1
10  * exclusively.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
20  */
21 
22 #ifndef _COMMON_HATHREADS_H
23 #define _COMMON_HATHREADS_H
24 
25 #include <signal.h>
26 #include <unistd.h>
27 #ifdef _POSIX_PRIORITY_SCHEDULING
28 #include <sched.h>
29 #endif
30 
31 #include <common/config.h>
32 #include <common/initcall.h>
33 
34 /* Note about all_threads_mask :
35  *    - this variable is comprised between 1 and LONGBITS.
36  *    - with threads support disabled, this symbol is defined as constant 1UL.
37  *    - with threads enabled, it contains the mask of enabled threads. Thus if
38  *      only one thread is enabled, it equals 1.
39  */
40 
41 /* thread info flags, for thread_info[].flags */
42 #define TI_FL_STUCK             0x00000001
43 
44 
45 #ifndef USE_THREAD
46 
47 #define MAX_THREADS 1
48 #define MAX_THREADS_MASK 1
49 
50 /* Only way found to replace variables with constants that are optimized away
51  * at build time.
52  */
53 enum { all_threads_mask = 1UL };
54 enum { threads_harmless_mask = 0 };
55 enum { threads_want_rdv_mask = 0 };
56 enum { threads_sync_mask = 0 };
57 enum { tid_bit = 1UL };
58 enum { tid = 0 };
59 
60 extern struct thread_info {
61 	clockid_t clock_id;
62 	timer_t wd_timer;          /* valid timer or TIMER_INVALID if not set */
63 	uint64_t prev_cpu_time;    /* previous per thread CPU time */
64 	uint64_t prev_mono_time;   /* previous system wide monotonic time  */
65 	unsigned int idle_pct;     /* idle to total ratio over last sample (percent) */
66 	unsigned int flags;        /* thread info flags, TI_FL_* */
67 	/* pad to cache line (64B) */
68 	char __pad[0];            /* unused except to check remaining room */
69 	char __end[0] __attribute__((aligned(64)));
70 } thread_info[MAX_THREADS];
71 
72 extern THREAD_LOCAL struct thread_info *ti; /* thread_info for the current thread */
73 
74 #define __decl_hathreads(decl)
75 #define __decl_spinlock(lock)
76 #define __decl_aligned_spinlock(lock)
77 #define __decl_rwlock(lock)
78 #define __decl_aligned_rwlock(lock)
79 
80 #define HA_ATOMIC_CAS(val, old, new)                                    \
81 	({                                                              \
82 		typeof(val) _v = (val);                                 \
83 		typeof(old) _o = (old);                                 \
84 		(*_v == *_o) ? ((*_v = (new)), 1) : ((*_o = *_v), 0);   \
85 	})
86 
87 /* warning, n is a pointer to the double value for dwcas */
88 #define HA_ATOMIC_DWCAS(val, o, n)				       \
89 	({                                                             \
90 		long *_v = (long*)(val);                               \
91 		long *_o = (long*)(o);				       \
92 		long *_n = (long*)(n);				       \
93 		long _v0 = _v[0], _v1 = _v[1];			       \
94 		(_v0 == _o[0] && _v1 == _o[1]) ?                       \
95 			(_v[0] = _n[0], _v[1] = _n[1], 1) :	       \
96 			(_o[0] = _v0,   _o[1] = _v1,   0);	       \
97 	})
98 
99 #define HA_ATOMIC_ADD(val, i)        ({*(val) += (i);})
100 #define HA_ATOMIC_SUB(val, i)        ({*(val) -= (i);})
101 #define HA_ATOMIC_XADD(val, i)						\
102 	({								\
103 		typeof((val)) __p_xadd = (val);				\
104 		typeof(*(val)) __old_xadd = *__p_xadd;			\
105 		*__p_xadd += i;						\
106 		__old_xadd;						\
107 	})
108 #define HA_ATOMIC_AND(val, flags)    ({*(val) &= (flags);})
109 #define HA_ATOMIC_OR(val, flags)     ({*(val) |= (flags);})
110 #define HA_ATOMIC_XCHG(val, new)					\
111 	({								\
112 		typeof(*(val)) __old_xchg = *(val);			\
113 		*(val) = new;						\
114 		__old_xchg;						\
115 	})
116 #define HA_ATOMIC_BTS(val, bit)						\
117 	({								\
118 		typeof((val)) __p_bts = (val);				\
119 		typeof(*__p_bts)  __b_bts = (1UL << (bit));		\
120 		typeof(*__p_bts)  __t_bts = *__p_bts & __b_bts;		\
121 		if (!__t_bts)						\
122 			*__p_bts |= __b_bts;				\
123 		__t_bts;						\
124 	})
125 #define HA_ATOMIC_BTR(val, bit)						\
126 	({								\
127 		typeof((val)) __p_btr = (val);				\
128 		typeof(*__p_btr)  __b_btr = (1UL << (bit));		\
129 		typeof(*__p_btr)  __t_btr = *__p_btr & __b_btr;		\
130 		if (__t_btr)						\
131 			*__p_btr &= ~__b_btr;				\
132 		__t_btr;						\
133 	})
134 #define HA_ATOMIC_LOAD(val)          *(val)
135 #define HA_ATOMIC_STORE(val, new)    ({*(val) = new;})
136 #define HA_ATOMIC_UPDATE_MAX(val, new)					\
137 	({								\
138 		typeof(val) __val = (val);                              \
139 		typeof(*(val)) __new_max = (new);			\
140 									\
141 		if (*__val < __new_max)					\
142 			*__val = __new_max;				\
143 		*__val;							\
144 	})
145 
146 #define HA_ATOMIC_UPDATE_MIN(val, new)					\
147 	({								\
148 		typeof(val) __val = (val);                              \
149 		typeof(*(val)) __new_min = (new);			\
150 									\
151 		if (*__val > __new_min)					\
152 			*__val = __new_min;				\
153 		*__val;							\
154 	})
155 
156 #define HA_BARRIER() do { } while (0)
157 
158 #define HA_SPIN_INIT(l)         do { /* do nothing */ } while(0)
159 #define HA_SPIN_DESTROY(l)      do { /* do nothing */ } while(0)
160 #define HA_SPIN_LOCK(lbl, l)    do { /* do nothing */ } while(0)
161 #define HA_SPIN_TRYLOCK(lbl, l) ({ 0; })
162 #define HA_SPIN_UNLOCK(lbl, l)  do { /* do nothing */ } while(0)
163 
164 #define HA_RWLOCK_INIT(l)          do { /* do nothing */ } while(0)
165 #define HA_RWLOCK_DESTROY(l)       do { /* do nothing */ } while(0)
166 #define HA_RWLOCK_WRLOCK(lbl, l)   do { /* do nothing */ } while(0)
167 #define HA_RWLOCK_TRYWRLOCK(lbl, l)   ({ 0; })
168 #define HA_RWLOCK_WRUNLOCK(lbl, l) do { /* do nothing */ } while(0)
169 #define HA_RWLOCK_RDLOCK(lbl, l)   do { /* do nothing */ } while(0)
170 #define HA_RWLOCK_TRYRDLOCK(lbl, l)   ({ 0; })
171 #define HA_RWLOCK_RDUNLOCK(lbl, l) do { /* do nothing */ } while(0)
172 
173 #define ha_sigmask(how, set, oldset)  sigprocmask(how, set, oldset)
174 
ha_set_tid(unsigned int tid)175 static inline void ha_set_tid(unsigned int tid)
176 {
177 	ti = &thread_info[tid];
178 }
179 
ha_get_pthread_id(unsigned int thr)180 static inline unsigned long long ha_get_pthread_id(unsigned int thr)
181 {
182 	return 0;
183 }
184 
ha_thread_relax(void)185 static inline void ha_thread_relax(void)
186 {
187 #if _POSIX_PRIORITY_SCHEDULING
188 	sched_yield();
189 #endif
190 }
191 
192 /* send signal <sig> to thread <thr> */
ha_tkill(unsigned int thr,int sig)193 static inline void ha_tkill(unsigned int thr, int sig)
194 {
195 	raise(sig);
196 }
197 
198 /* send signal <sig> to all threads */
ha_tkillall(int sig)199 static inline void ha_tkillall(int sig)
200 {
201 	raise(sig);
202 }
203 
__ha_barrier_atomic_load(void)204 static inline void __ha_barrier_atomic_load(void)
205 {
206 }
207 
__ha_barrier_atomic_store(void)208 static inline void __ha_barrier_atomic_store(void)
209 {
210 }
211 
__ha_barrier_atomic_full(void)212 static inline void __ha_barrier_atomic_full(void)
213 {
214 }
215 
__ha_barrier_load(void)216 static inline void __ha_barrier_load(void)
217 {
218 }
219 
__ha_barrier_store(void)220 static inline void __ha_barrier_store(void)
221 {
222 }
223 
__ha_barrier_full(void)224 static inline void __ha_barrier_full(void)
225 {
226 }
227 
thread_harmless_now()228 static inline void thread_harmless_now()
229 {
230 }
231 
thread_harmless_end()232 static inline void thread_harmless_end()
233 {
234 }
235 
thread_isolate()236 static inline void thread_isolate()
237 {
238 }
239 
thread_release()240 static inline void thread_release()
241 {
242 }
243 
thread_sync_release()244 static inline void thread_sync_release()
245 {
246 }
247 
thread_isolated()248 static inline unsigned long thread_isolated()
249 {
250 	return 1;
251 }
252 
253 #else /* USE_THREAD */
254 
255 #include <stdio.h>
256 #include <stdlib.h>
257 #include <string.h>
258 #include <pthread.h>
259 #include <import/plock.h>
260 
261 #ifndef MAX_THREADS
262 #define MAX_THREADS LONGBITS
263 #endif
264 
265 #define MAX_THREADS_MASK (~0UL >> (LONGBITS - MAX_THREADS))
266 
267 #define __decl_hathreads(decl) decl
268 
269 /* declare a self-initializing spinlock */
270 #define __decl_spinlock(lock)                               \
271 	HA_SPINLOCK_T (lock);                               \
272 	INITCALL1(STG_LOCK, ha_spin_init, &(lock))
273 
274 /* declare a self-initializing spinlock, aligned on a cache line */
275 #define __decl_aligned_spinlock(lock)                       \
276 	HA_SPINLOCK_T (lock) __attribute__((aligned(64)));  \
277 	INITCALL1(STG_LOCK, ha_spin_init, &(lock))
278 
279 /* declare a self-initializing rwlock */
280 #define __decl_rwlock(lock)                                 \
281 	HA_RWLOCK_T   (lock);                               \
282 	INITCALL1(STG_LOCK, ha_rwlock_init, &(lock))
283 
284 /* declare a self-initializing rwlock, aligned on a cache line */
285 #define __decl_aligned_rwlock(lock)                         \
286 	HA_RWLOCK_T   (lock) __attribute__((aligned(64)));  \
287 	INITCALL1(STG_LOCK, ha_rwlock_init, &(lock))
288 
289 /* TODO: thread: For now, we rely on GCC builtins but it could be a good idea to
290  * have a header file regrouping all functions dealing with threads. */
291 
292 #if (defined(__GNUC__) && (__GNUC__ < 4 || __GNUC__ == 4 && __GNUC_MINOR__ < 7) && !defined(__clang__)) || (defined(__clang__) && defined(__i386__))
293 /* gcc < 4.7 */
294 
295 #define HA_ATOMIC_ADD(val, i)        __sync_add_and_fetch(val, i)
296 #define HA_ATOMIC_SUB(val, i)        __sync_sub_and_fetch(val, i)
297 #define HA_ATOMIC_XADD(val, i)       __sync_fetch_and_add(val, i)
298 #define HA_ATOMIC_AND(val, flags)    __sync_and_and_fetch(val, flags)
299 #define HA_ATOMIC_OR(val, flags)     __sync_or_and_fetch(val,  flags)
300 
301 /* the CAS is a bit complicated. The older API doesn't support returning the
302  * value and the swap's result at the same time. So here we take what looks
303  * like the safest route, consisting in using the boolean version guaranteeing
304  * that the operation was performed or not, and we snoop a previous value. If
305  * the compare succeeds, we return. If it fails, we return the previous value,
306  * but only if it differs from the expected one. If it's the same it's a race
307  * thus we try again to avoid confusing a possibly sensitive caller.
308  */
309 #define HA_ATOMIC_CAS(val, old, new)					\
310 	({								\
311 		typeof((val)) __val_cas = (val);			\
312 		typeof((old)) __oldp_cas = (old);			\
313 		typeof(*(old)) __oldv_cas;				\
314 		typeof((new)) __new_cas = (new);			\
315 		int __ret_cas;						\
316 		do {							\
317 			__oldv_cas = *__val_cas;			\
318 			__ret_cas = __sync_bool_compare_and_swap(__val_cas, *__oldp_cas, __new_cas); \
319 		} while (!__ret_cas && *__oldp_cas == __oldv_cas);	\
320 		if (!__ret_cas)						\
321 			*__oldp_cas = __oldv_cas;			\
322 		__ret_cas;						\
323 	})
324 
325 /* warning, n is a pointer to the double value for dwcas */
326 #define HA_ATOMIC_DWCAS(val, o, n) __ha_cas_dw(val, o, n)
327 
328 #define HA_ATOMIC_XCHG(val, new)					\
329 	({								\
330 		typeof((val)) __val_xchg = (val);			\
331 		typeof(*(val)) __old_xchg;				\
332 		typeof((new)) __new_xchg = (new);			\
333 		do { __old_xchg = *__val_xchg;				\
334 		} while (!__sync_bool_compare_and_swap(__val_xchg, __old_xchg, __new_xchg)); \
335 		__old_xchg;						\
336 	})
337 
338 #define HA_ATOMIC_BTS(val, bit)						\
339 	({								\
340 		typeof(*(val)) __b_bts = (1UL << (bit));		\
341 		__sync_fetch_and_or((val), __b_bts) & __b_bts;		\
342 	})
343 
344 #define HA_ATOMIC_BTR(val, bit)						\
345 	({								\
346 		typeof(*(val)) __b_btr = (1UL << (bit));		\
347 		__sync_fetch_and_and((val), ~__b_btr) & __b_btr;	\
348 	})
349 
350 #define HA_ATOMIC_LOAD(val)                                             \
351         ({                                                              \
352 	        typeof(*(val)) ret;                                     \
353 		__sync_synchronize();                                   \
354 		ret = *(volatile typeof(val))val;                       \
355 		__sync_synchronize();                                   \
356 		ret;                                                    \
357 	})
358 
359 #define HA_ATOMIC_STORE(val, new)					\
360 	({								\
361 		typeof((val)) __val_store = (val);			\
362 		typeof(*(val)) __old_store;				\
363 		typeof((new)) __new_store = (new);			\
364 		do { __old_store = *__val_store;			\
365 		} while (!__sync_bool_compare_and_swap(__val_store, __old_store, __new_store));	\
366 	})
367 #else
368 /* gcc >= 4.7 */
369 #define HA_ATOMIC_CAS(val, old, new) __atomic_compare_exchange_n(val, old, new, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
370 /* warning, n is a pointer to the double value for dwcas */
371 #define HA_ATOMIC_DWCAS(val, o, n)   __ha_cas_dw(val, o, n)
372 #define HA_ATOMIC_ADD(val, i)        __atomic_add_fetch(val, i, __ATOMIC_SEQ_CST)
373 #define HA_ATOMIC_XADD(val, i)       __atomic_fetch_add(val, i, __ATOMIC_SEQ_CST)
374 #define HA_ATOMIC_SUB(val, i)        __atomic_sub_fetch(val, i, __ATOMIC_SEQ_CST)
375 #define HA_ATOMIC_AND(val, flags)    __atomic_and_fetch(val, flags, __ATOMIC_SEQ_CST)
376 #define HA_ATOMIC_OR(val, flags)     __atomic_or_fetch(val,  flags, __ATOMIC_SEQ_CST)
377 #define HA_ATOMIC_BTS(val, bit)						\
378 	({								\
379 		typeof(*(val)) __b_bts = (1UL << (bit));		\
380 		__sync_fetch_and_or((val), __b_bts) & __b_bts;		\
381 	})
382 
383 #define HA_ATOMIC_BTR(val, bit)						\
384 	({								\
385 		typeof(*(val)) __b_btr = (1UL << (bit));		\
386 		__sync_fetch_and_and((val), ~__b_btr) & __b_btr;	\
387 	})
388 
389 #define HA_ATOMIC_XCHG(val, new)     __atomic_exchange_n(val, new, __ATOMIC_SEQ_CST)
390 #define HA_ATOMIC_STORE(val, new)    __atomic_store_n(val, new, __ATOMIC_SEQ_CST)
391 #define HA_ATOMIC_LOAD(val)          __atomic_load_n(val, __ATOMIC_SEQ_CST)
392 
393 /* Variants that don't generate any memory barrier.
394  * If you're unsure how to deal with barriers, just use the HA_ATOMIC_* version,
395  * that will always generate correct code.
396  * Usually it's fine to use those when updating data that have no dependency,
397  * ie updating a counter. Otherwise a barrier is required.
398  */
399 #define _HA_ATOMIC_CAS(val, old, new) __atomic_compare_exchange_n(val, old, new, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED)
400 /* warning, n is a pointer to the double value for dwcas */
401 #define _HA_ATOMIC_DWCAS(val, o, n)   __ha_cas_dw(val, o, n)
402 #define _HA_ATOMIC_ADD(val, i)        __atomic_add_fetch(val, i, __ATOMIC_RELAXED)
403 #define _HA_ATOMIC_XADD(val, i)       __atomic_fetch_add(val, i, __ATOMIC_RELAXED)
404 #define _HA_ATOMIC_SUB(val, i)        __atomic_sub_fetch(val, i, __ATOMIC_RELAXED)
405 #define _HA_ATOMIC_AND(val, flags)    __atomic_and_fetch(val, flags, __ATOMIC_RELAXED)
406 #define _HA_ATOMIC_OR(val, flags)     __atomic_or_fetch(val,  flags, __ATOMIC_RELAXED)
407 #define _HA_ATOMIC_XCHG(val, new)     __atomic_exchange_n(val, new, __ATOMIC_RELAXED)
408 #define _HA_ATOMIC_STORE(val, new)    __atomic_store_n(val, new, __ATOMIC_RELAXED)
409 #define _HA_ATOMIC_LOAD(val)          __atomic_load_n(val, __ATOMIC_RELAXED)
410 
411 #endif /* gcc >= 4.7 */
412 
413 #define HA_ATOMIC_UPDATE_MAX(val, new)					\
414 	({								\
415 		typeof(val) __val = (val);                              \
416 		typeof(*(val)) __old_max = *__val;			\
417 		typeof(*(val)) __new_max = (new);			\
418 									\
419 		while (__old_max < __new_max &&				\
420 		       !HA_ATOMIC_CAS(__val, &__old_max, __new_max));	\
421 		*__val;							\
422 	})
423 #define HA_ATOMIC_UPDATE_MIN(val, new)					\
424 	({								\
425 		typeof(val) __val = (val);                              \
426 		typeof(*(val)) __old_min = *__val;			\
427 		typeof(*(val)) __new_min = (new);			\
428 									\
429 		while (__old_min > __new_min &&				\
430 		       !HA_ATOMIC_CAS(__val, &__old_min, __new_min));	\
431 		*__val;							\
432 	})
433 
434 #define HA_BARRIER() pl_barrier()
435 
436 void thread_harmless_till_end();
437 void thread_isolate();
438 void thread_release();
439 void thread_sync_release();
440 void ha_tkill(unsigned int thr, int sig);
441 void ha_tkillall(int sig);
442 
443 extern struct thread_info {
444 	pthread_t pthread;
445 	clockid_t clock_id;
446 	timer_t wd_timer;          /* valid timer or TIMER_INVALID if not set */
447 	uint64_t prev_cpu_time;    /* previous per thread CPU time */
448 	uint64_t prev_mono_time;   /* previous system wide monotonic time  */
449 	unsigned int idle_pct;     /* idle to total ratio over last sample (percent) */
450 	unsigned int flags;        /* thread info flags, TI_FL_* */
451 	/* pad to cache line (64B) */
452 	char __pad[0];            /* unused except to check remaining room */
453 	char __end[0] __attribute__((aligned(64)));
454 } thread_info[MAX_THREADS];
455 
456 extern THREAD_LOCAL unsigned int tid;     /* The thread id */
457 extern THREAD_LOCAL unsigned long tid_bit; /* The bit corresponding to the thread id */
458 extern THREAD_LOCAL struct thread_info *ti; /* thread_info for the current thread */
459 extern volatile unsigned long all_threads_mask;
460 extern volatile unsigned long threads_want_rdv_mask;
461 extern volatile unsigned long threads_harmless_mask;
462 extern volatile unsigned long threads_sync_mask;
463 
464 /* explanation for threads_want_rdv_mask, threads_harmless_mask, and
465  * threads_sync_mask :
466  * - threads_want_rdv_mask is a bit field indicating all threads that have
467  *   requested a rendez-vous of other threads using thread_isolate().
468  * - threads_harmless_mask is a bit field indicating all threads that are
469  *   currently harmless in that they promise not to access a shared resource.
470  * - threads_sync_mask is a bit field indicating that a thread waiting for
471  *   others to finish wants to leave synchronized with others and as such
472  *   promises to do so as well using thread_sync_release().
473  *
474  * For a given thread, its bits in want_rdv and harmless can be translated like
475  * this :
476  *
477  *  ----------+----------+----------------------------------------------------
478  *   want_rdv | harmless | description
479  *  ----------+----------+----------------------------------------------------
480  *       0    |     0    | thread not interested in RDV, possibly harmful
481  *       0    |     1    | thread not interested in RDV but harmless
482  *       1    |     1    | thread interested in RDV and waiting for its turn
483  *       1    |     0    | thread currently working isolated from others
484  *  ----------+----------+----------------------------------------------------
485  *
486  * thread_sync_mask only delays the leaving of threads_sync_release() to make
487  * sure that each thread's harmless bit is cleared before leaving the function.
488  */
489 
490 #define ha_sigmask(how, set, oldset)  pthread_sigmask(how, set, oldset)
491 
492 /* sets the thread ID and the TID bit for the current thread */
ha_set_tid(unsigned int data)493 static inline void ha_set_tid(unsigned int data)
494 {
495 	tid     = data;
496 	tid_bit = (1UL << tid);
497 	ti      = &thread_info[tid];
498 }
499 
500 /* Retrieves the opaque pthread_t of thread <thr> cast to an unsigned long long
501  * since POSIX took great care of not specifying its representation, making it
502  * hard to export for post-mortem analysis. For this reason we copy it into a
503  * union and will use the smallest scalar type at least as large as its size,
504  * which will keep endianness and alignment for all regular sizes. As a last
505  * resort we end up with a long long ligned to the first bytes in memory, which
506  * will be endian-dependent if pthread_t is larger than a long long (not seen
507  * yet).
508  */
ha_get_pthread_id(unsigned int thr)509 static inline unsigned long long ha_get_pthread_id(unsigned int thr)
510 {
511 	union {
512 		pthread_t t;
513 		unsigned long long ll;
514 		unsigned int i;
515 		unsigned short s;
516 		unsigned char c;
517 	} u;
518 
519 	memset(&u, 0, sizeof(u));
520 	u.t = thread_info[thr].pthread;
521 
522 	if (sizeof(u.t) <= sizeof(u.c))
523 		return u.c;
524 	else if (sizeof(u.t) <= sizeof(u.s))
525 		return u.s;
526 	else if (sizeof(u.t) <= sizeof(u.i))
527 		return u.i;
528 	return u.ll;
529 }
530 
ha_thread_relax(void)531 static inline void ha_thread_relax(void)
532 {
533 #if _POSIX_PRIORITY_SCHEDULING
534 	sched_yield();
535 #else
536 	pl_cpu_relax();
537 #endif
538 }
539 
540 /* Marks the thread as harmless. Note: this must be true, i.e. the thread must
541  * not be touching any unprotected shared resource during this period. Usually
542  * this is called before poll(), but it may also be placed around very slow
543  * calls (eg: some crypto operations). Needs to be terminated using
544  * thread_harmless_end().
545  */
thread_harmless_now()546 static inline void thread_harmless_now()
547 {
548 	HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
549 }
550 
551 /* Ends the harmless period started by thread_harmless_now(). Usually this is
552  * placed after the poll() call. If it is discovered that a job was running and
553  * is relying on the thread still being harmless, the thread waits for the
554  * other one to finish.
555  */
thread_harmless_end()556 static inline void thread_harmless_end()
557 {
558 	while (1) {
559 		HA_ATOMIC_AND(&threads_harmless_mask, ~tid_bit);
560 		if (likely((threads_want_rdv_mask & ~tid_bit) == 0))
561 			break;
562 		thread_harmless_till_end();
563 	}
564 }
565 
566 /* an isolated thread has harmless cleared and want_rdv set */
thread_isolated()567 static inline unsigned long thread_isolated()
568 {
569 	return threads_want_rdv_mask & ~threads_harmless_mask & tid_bit;
570 }
571 
572 
573 #if defined(DEBUG_THREAD) || defined(DEBUG_FULL)
574 
575 /* WARNING!!! if you update this enum, please also keep lock_label() up to date below */
576 enum lock_label {
577 	FD_LOCK,
578 	TASK_RQ_LOCK,
579 	TASK_WQ_LOCK,
580 	POOL_LOCK,
581 	LISTENER_LOCK,
582 	PROXY_LOCK,
583 	SERVER_LOCK,
584 	LBPRM_LOCK,
585 	SIGNALS_LOCK,
586 	STK_TABLE_LOCK,
587 	STK_SESS_LOCK,
588 	APPLETS_LOCK,
589 	PEER_LOCK,
590 	BUF_WQ_LOCK,
591 	STRMS_LOCK,
592 	SSL_LOCK,
593 	SSL_GEN_CERTS_LOCK,
594 	PATREF_LOCK,
595 	PATEXP_LOCK,
596 	VARS_LOCK,
597 	COMP_POOL_LOCK,
598 	LUA_LOCK,
599 	NOTIF_LOCK,
600 	SPOE_APPLET_LOCK,
601 	DNS_LOCK,
602 	PID_LIST_LOCK,
603 	EMAIL_ALERTS_LOCK,
604 	PIPES_LOCK,
605 	TLSKEYS_REF_LOCK,
606 	AUTH_LOCK,
607 	LOGSRV_LOCK,
608 	DICT_LOCK,
609 	PROTO_LOCK,
610 	OTHER_LOCK,
611 	LOCK_LABELS
612 };
613 struct lock_stat {
614 	uint64_t nsec_wait_for_write;
615 	uint64_t nsec_wait_for_read;
616 	uint64_t num_write_locked;
617 	uint64_t num_write_unlocked;
618 	uint64_t num_read_locked;
619 	uint64_t num_read_unlocked;
620 };
621 
622 extern struct lock_stat lock_stats[LOCK_LABELS];
623 
624 #define __HA_SPINLOCK_T      unsigned long
625 
626 #define __SPIN_INIT(l)         ({ (*l) = 0; })
627 #define __SPIN_DESTROY(l)      ({ (*l) = 0; })
628 #define __SPIN_LOCK(l)         pl_take_s(l)
629 #define __SPIN_TRYLOCK(l)      (!pl_try_s(l))
630 #define __SPIN_UNLOCK(l)       pl_drop_s(l)
631 
632 #define __HA_RWLOCK_T		unsigned long
633 
634 #define __RWLOCK_INIT(l)          ({ (*l) = 0; })
635 #define __RWLOCK_DESTROY(l)       ({ (*l) = 0; })
636 #define __RWLOCK_WRLOCK(l)        pl_take_w(l)
637 #define __RWLOCK_TRYWRLOCK(l)     (!pl_try_w(l))
638 #define __RWLOCK_WRUNLOCK(l)      pl_drop_w(l)
639 #define __RWLOCK_RDLOCK(l)        pl_take_r(l)
640 #define __RWLOCK_TRYRDLOCK(l)     (!pl_try_r(l))
641 #define __RWLOCK_RDUNLOCK(l)      pl_drop_r(l)
642 
643 #define HA_SPINLOCK_T       struct ha_spinlock
644 
645 #define HA_SPIN_INIT(l)        __spin_init(l)
646 #define HA_SPIN_DESTROY(l)      __spin_destroy(l)
647 
648 #define HA_SPIN_LOCK(lbl, l)    __spin_lock(lbl, l, __func__, __FILE__, __LINE__)
649 #define HA_SPIN_TRYLOCK(lbl, l) __spin_trylock(lbl, l, __func__, __FILE__, __LINE__)
650 #define HA_SPIN_UNLOCK(lbl, l)  __spin_unlock(lbl, l, __func__, __FILE__, __LINE__)
651 
652 #define HA_RWLOCK_T         struct ha_rwlock
653 
654 #define HA_RWLOCK_INIT(l)          __ha_rwlock_init((l))
655 #define HA_RWLOCK_DESTROY(l)       __ha_rwlock_destroy((l))
656 #define HA_RWLOCK_WRLOCK(lbl,l)    __ha_rwlock_wrlock(lbl, l, __func__, __FILE__, __LINE__)
657 #define HA_RWLOCK_TRYWRLOCK(lbl,l) __ha_rwlock_trywrlock(lbl, l, __func__, __FILE__, __LINE__)
658 #define HA_RWLOCK_WRUNLOCK(lbl,l)  __ha_rwlock_wrunlock(lbl, l, __func__, __FILE__, __LINE__)
659 #define HA_RWLOCK_RDLOCK(lbl,l)    __ha_rwlock_rdlock(lbl, l)
660 #define HA_RWLOCK_TRYRDLOCK(lbl,l) __ha_rwlock_tryrdlock(lbl, l)
661 #define HA_RWLOCK_RDUNLOCK(lbl,l)  __ha_rwlock_rdunlock(lbl, l)
662 
663 struct ha_spinlock {
664 	__HA_SPINLOCK_T lock;
665 	struct {
666 		unsigned long owner; /* a bit is set to 1 << tid for the lock owner */
667 		unsigned long waiters; /* a bit is set to 1 << tid for waiting threads  */
668 		struct {
669 			const char *function;
670 			const char *file;
671 			int line;
672 		} last_location; /* location of the last owner */
673 	} info;
674 };
675 
676 struct ha_rwlock {
677 	__HA_RWLOCK_T lock;
678 	struct {
679 		unsigned long cur_writer; /* a bit is set to 1 << tid for the lock owner */
680 		unsigned long wait_writers; /* a bit is set to 1 << tid for waiting writers */
681 		unsigned long cur_readers; /* a bit is set to 1 << tid for current readers */
682 		unsigned long wait_readers; /* a bit is set to 1 << tid for waiting waiters */
683 		struct {
684 			const char *function;
685 			const char *file;
686 			int line;
687 		} last_location; /* location of the last write owner */
688 	} info;
689 };
690 
lock_label(enum lock_label label)691 static inline const char *lock_label(enum lock_label label)
692 {
693 	switch (label) {
694 	case FD_LOCK:              return "FD";
695 	case TASK_RQ_LOCK:         return "TASK_RQ";
696 	case TASK_WQ_LOCK:         return "TASK_WQ";
697 	case POOL_LOCK:            return "POOL";
698 	case LISTENER_LOCK:        return "LISTENER";
699 	case PROXY_LOCK:           return "PROXY";
700 	case SERVER_LOCK:          return "SERVER";
701 	case LBPRM_LOCK:           return "LBPRM";
702 	case SIGNALS_LOCK:         return "SIGNALS";
703 	case STK_TABLE_LOCK:       return "STK_TABLE";
704 	case STK_SESS_LOCK:        return "STK_SESS";
705 	case APPLETS_LOCK:         return "APPLETS";
706 	case PEER_LOCK:            return "PEER";
707 	case BUF_WQ_LOCK:          return "BUF_WQ";
708 	case STRMS_LOCK:           return "STRMS";
709 	case SSL_LOCK:             return "SSL";
710 	case SSL_GEN_CERTS_LOCK:   return "SSL_GEN_CERTS";
711 	case PATREF_LOCK:          return "PATREF";
712 	case PATEXP_LOCK:          return "PATEXP";
713 	case VARS_LOCK:            return "VARS";
714 	case COMP_POOL_LOCK:       return "COMP_POOL";
715 	case LUA_LOCK:             return "LUA";
716 	case NOTIF_LOCK:           return "NOTIF";
717 	case SPOE_APPLET_LOCK:     return "SPOE_APPLET";
718 	case DNS_LOCK:             return "DNS";
719 	case PID_LIST_LOCK:        return "PID_LIST";
720 	case EMAIL_ALERTS_LOCK:    return "EMAIL_ALERTS";
721 	case PIPES_LOCK:           return "PIPES";
722 	case TLSKEYS_REF_LOCK:     return "TLSKEYS_REF";
723 	case AUTH_LOCK:            return "AUTH";
724 	case LOGSRV_LOCK:          return "LOGSRV";
725 	case DICT_LOCK:            return "DICT";
726 	case PROTO_LOCK:           return "PROTO";
727 	case OTHER_LOCK:           return "OTHER";
728 	case LOCK_LABELS:          break; /* keep compiler happy */
729 	};
730 	/* only way to come here is consecutive to an internal bug */
731 	abort();
732 }
733 
show_lock_stats()734 static inline void show_lock_stats()
735 {
736 	int lbl;
737 
738 	for (lbl = 0; lbl < LOCK_LABELS; lbl++) {
739 		fprintf(stderr,
740 			"Stats about Lock %s: \n"
741 			"\t # write lock  : %lu\n"
742 			"\t # write unlock: %lu (%ld)\n"
743 			"\t # wait time for write     : %.3f msec\n"
744 			"\t # wait time for write/lock: %.3f nsec\n"
745 			"\t # read lock   : %lu\n"
746 			"\t # read unlock : %lu (%ld)\n"
747 			"\t # wait time for read      : %.3f msec\n"
748 			"\t # wait time for read/lock : %.3f nsec\n",
749 			lock_label(lbl),
750 			lock_stats[lbl].num_write_locked,
751 			lock_stats[lbl].num_write_unlocked,
752 			lock_stats[lbl].num_write_unlocked - lock_stats[lbl].num_write_locked,
753 			(double)lock_stats[lbl].nsec_wait_for_write / 1000000.0,
754 			lock_stats[lbl].num_write_locked ? ((double)lock_stats[lbl].nsec_wait_for_write / (double)lock_stats[lbl].num_write_locked) : 0,
755 			lock_stats[lbl].num_read_locked,
756 			lock_stats[lbl].num_read_unlocked,
757 			lock_stats[lbl].num_read_unlocked - lock_stats[lbl].num_read_locked,
758 			(double)lock_stats[lbl].nsec_wait_for_read / 1000000.0,
759 			lock_stats[lbl].num_read_locked ? ((double)lock_stats[lbl].nsec_wait_for_read / (double)lock_stats[lbl].num_read_locked) : 0);
760 	}
761 }
762 
763 /* Following functions are used to collect some stats about locks. We wrap
764  * pthread functions to known how much time we wait in a lock. */
765 
nsec_now(void)766 static uint64_t nsec_now(void) {
767         struct timespec ts;
768 
769         clock_gettime(CLOCK_MONOTONIC, &ts);
770         return ((uint64_t) ts.tv_sec * 1000000000ULL +
771                 (uint64_t) ts.tv_nsec);
772 }
773 
__ha_rwlock_init(struct ha_rwlock * l)774 static inline void __ha_rwlock_init(struct ha_rwlock *l)
775 {
776 	memset(l, 0, sizeof(struct ha_rwlock));
777 	__RWLOCK_INIT(&l->lock);
778 }
779 
__ha_rwlock_destroy(struct ha_rwlock * l)780 static inline void __ha_rwlock_destroy(struct ha_rwlock *l)
781 {
782 	__RWLOCK_DESTROY(&l->lock);
783 	memset(l, 0, sizeof(struct ha_rwlock));
784 }
785 
786 
__ha_rwlock_wrlock(enum lock_label lbl,struct ha_rwlock * l,const char * func,const char * file,int line)787 static inline void __ha_rwlock_wrlock(enum lock_label lbl, struct ha_rwlock *l,
788 				      const char *func, const char *file, int line)
789 {
790 	uint64_t start_time;
791 
792 	if (unlikely(l->info.cur_writer & tid_bit)) {
793 		/* the thread is already owning the lock for write */
794 		abort();
795 	}
796 
797 	if (unlikely(l->info.cur_readers & tid_bit)) {
798 		/* the thread is already owning the lock for read */
799 		abort();
800 	}
801 
802 	HA_ATOMIC_OR(&l->info.wait_writers, tid_bit);
803 
804 	start_time = nsec_now();
805 	__RWLOCK_WRLOCK(&l->lock);
806 	HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (nsec_now() - start_time));
807 
808 	HA_ATOMIC_ADD(&lock_stats[lbl].num_write_locked, 1);
809 
810 	l->info.cur_writer             = tid_bit;
811 	l->info.last_location.function = func;
812 	l->info.last_location.file     = file;
813 	l->info.last_location.line     = line;
814 
815 	HA_ATOMIC_AND(&l->info.wait_writers, ~tid_bit);
816 }
817 
__ha_rwlock_trywrlock(enum lock_label lbl,struct ha_rwlock * l,const char * func,const char * file,int line)818 static inline int __ha_rwlock_trywrlock(enum lock_label lbl, struct ha_rwlock *l,
819 				        const char *func, const char *file, int line)
820 {
821 	uint64_t start_time;
822 	int r;
823 
824 	if (unlikely(l->info.cur_writer & tid_bit)) {
825 		/* the thread is already owning the lock for write */
826 		abort();
827 	}
828 
829 	if (unlikely(l->info.cur_readers & tid_bit)) {
830 		/* the thread is already owning the lock for read */
831 		abort();
832 	}
833 
834 	/* We set waiting writer because trywrlock could wait for readers to quit */
835 	HA_ATOMIC_OR(&l->info.wait_writers, tid_bit);
836 
837 	start_time = nsec_now();
838 	r = __RWLOCK_TRYWRLOCK(&l->lock);
839 	HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (nsec_now() - start_time));
840 	if (unlikely(r)) {
841 		HA_ATOMIC_AND(&l->info.wait_writers, ~tid_bit);
842 		return r;
843 	}
844 	HA_ATOMIC_ADD(&lock_stats[lbl].num_write_locked, 1);
845 
846 	l->info.cur_writer             = tid_bit;
847 	l->info.last_location.function = func;
848 	l->info.last_location.file     = file;
849 	l->info.last_location.line     = line;
850 
851 	HA_ATOMIC_AND(&l->info.wait_writers, ~tid_bit);
852 
853 	return 0;
854 }
855 
__ha_rwlock_wrunlock(enum lock_label lbl,struct ha_rwlock * l,const char * func,const char * file,int line)856 static inline void __ha_rwlock_wrunlock(enum lock_label lbl,struct ha_rwlock *l,
857 				        const char *func, const char *file, int line)
858 {
859 	if (unlikely(!(l->info.cur_writer & tid_bit))) {
860 		/* the thread is not owning the lock for write */
861 		abort();
862 	}
863 
864 	l->info.cur_writer             = 0;
865 	l->info.last_location.function = func;
866 	l->info.last_location.file     = file;
867 	l->info.last_location.line     = line;
868 
869 	__RWLOCK_WRUNLOCK(&l->lock);
870 
871 	HA_ATOMIC_ADD(&lock_stats[lbl].num_write_unlocked, 1);
872 }
873 
__ha_rwlock_rdlock(enum lock_label lbl,struct ha_rwlock * l)874 static inline void __ha_rwlock_rdlock(enum lock_label lbl,struct ha_rwlock *l)
875 {
876 	uint64_t start_time;
877 
878 	if (unlikely(l->info.cur_writer & tid_bit)) {
879 		/* the thread is already owning the lock for write */
880 		abort();
881 	}
882 
883 	if (unlikely(l->info.cur_readers & tid_bit)) {
884 		/* the thread is already owning the lock for read */
885 		abort();
886 	}
887 
888 	HA_ATOMIC_OR(&l->info.wait_readers, tid_bit);
889 
890 	start_time = nsec_now();
891 	__RWLOCK_RDLOCK(&l->lock);
892 	HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (nsec_now() - start_time));
893 	HA_ATOMIC_ADD(&lock_stats[lbl].num_read_locked, 1);
894 
895 	HA_ATOMIC_OR(&l->info.cur_readers, tid_bit);
896 
897 	HA_ATOMIC_AND(&l->info.wait_readers, ~tid_bit);
898 }
899 
__ha_rwlock_tryrdlock(enum lock_label lbl,struct ha_rwlock * l)900 static inline int __ha_rwlock_tryrdlock(enum lock_label lbl,struct ha_rwlock *l)
901 {
902 	int r;
903 
904 	if (unlikely(l->info.cur_writer & tid_bit)) {
905 		/* the thread is already owning the lock for write */
906 		abort();
907 	}
908 
909 	if (unlikely(l->info.cur_readers & tid_bit)) {
910 		/* the thread is already owning the lock for read */
911 		abort();
912 	}
913 
914 	/* try read should never wait */
915 	r = __RWLOCK_TRYRDLOCK(&l->lock);
916 	if (unlikely(r))
917 		return r;
918 	HA_ATOMIC_ADD(&lock_stats[lbl].num_read_locked, 1);
919 
920 	HA_ATOMIC_OR(&l->info.cur_readers, tid_bit);
921 
922 	return 0;
923 }
924 
__ha_rwlock_rdunlock(enum lock_label lbl,struct ha_rwlock * l)925 static inline void __ha_rwlock_rdunlock(enum lock_label lbl,struct ha_rwlock *l)
926 {
927 	if (unlikely(!(l->info.cur_readers & tid_bit))) {
928 		/* the thread is not owning the lock for read */
929 		abort();
930 	}
931 
932 	HA_ATOMIC_AND(&l->info.cur_readers, ~tid_bit);
933 
934 	__RWLOCK_RDUNLOCK(&l->lock);
935 
936 	HA_ATOMIC_ADD(&lock_stats[lbl].num_read_unlocked, 1);
937 }
938 
__spin_init(struct ha_spinlock * l)939 static inline void __spin_init(struct ha_spinlock *l)
940 {
941 	memset(l, 0, sizeof(struct ha_spinlock));
942 	__SPIN_INIT(&l->lock);
943 }
944 
__spin_destroy(struct ha_spinlock * l)945 static inline void __spin_destroy(struct ha_spinlock *l)
946 {
947 	__SPIN_DESTROY(&l->lock);
948 	memset(l, 0, sizeof(struct ha_spinlock));
949 }
950 
__spin_lock(enum lock_label lbl,struct ha_spinlock * l,const char * func,const char * file,int line)951 static inline void __spin_lock(enum lock_label lbl, struct ha_spinlock *l,
952 			      const char *func, const char *file, int line)
953 {
954 	uint64_t start_time;
955 
956 	if (unlikely(l->info.owner & tid_bit)) {
957 		/* the thread is already owning the lock */
958 		abort();
959 	}
960 
961 	HA_ATOMIC_OR(&l->info.waiters, tid_bit);
962 
963 	start_time = nsec_now();
964 	__SPIN_LOCK(&l->lock);
965 	HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (nsec_now() - start_time));
966 
967 	HA_ATOMIC_ADD(&lock_stats[lbl].num_write_locked, 1);
968 
969 
970 	l->info.owner                  = tid_bit;
971 	l->info.last_location.function = func;
972 	l->info.last_location.file     = file;
973 	l->info.last_location.line     = line;
974 
975 	HA_ATOMIC_AND(&l->info.waiters, ~tid_bit);
976 }
977 
__spin_trylock(enum lock_label lbl,struct ha_spinlock * l,const char * func,const char * file,int line)978 static inline int __spin_trylock(enum lock_label lbl, struct ha_spinlock *l,
979 				 const char *func, const char *file, int line)
980 {
981 	int r;
982 
983 	if (unlikely(l->info.owner & tid_bit)) {
984 		/* the thread is already owning the lock */
985 		abort();
986 	}
987 
988 	/* try read should never wait */
989 	r = __SPIN_TRYLOCK(&l->lock);
990 	if (unlikely(r))
991 		return r;
992 	HA_ATOMIC_ADD(&lock_stats[lbl].num_write_locked, 1);
993 
994 	l->info.owner                  = tid_bit;
995 	l->info.last_location.function = func;
996 	l->info.last_location.file     = file;
997 	l->info.last_location.line     = line;
998 
999 	return 0;
1000 }
1001 
__spin_unlock(enum lock_label lbl,struct ha_spinlock * l,const char * func,const char * file,int line)1002 static inline void __spin_unlock(enum lock_label lbl, struct ha_spinlock *l,
1003 				 const char *func, const char *file, int line)
1004 {
1005 	if (unlikely(!(l->info.owner & tid_bit))) {
1006 		/* the thread is not owning the lock */
1007 		abort();
1008 	}
1009 
1010 	l->info.owner                  = 0;
1011 	l->info.last_location.function = func;
1012 	l->info.last_location.file     = file;
1013 	l->info.last_location.line     = line;
1014 
1015 	__SPIN_UNLOCK(&l->lock);
1016 	HA_ATOMIC_ADD(&lock_stats[lbl].num_write_unlocked, 1);
1017 }
1018 
1019 #else /* DEBUG_THREAD */
1020 
1021 #define HA_SPINLOCK_T        unsigned long
1022 
1023 #define HA_SPIN_INIT(l)         ({ (*l) = 0; })
1024 #define HA_SPIN_DESTROY(l)      ({ (*l) = 0; })
1025 #define HA_SPIN_LOCK(lbl, l)    pl_take_s(l)
1026 #define HA_SPIN_TRYLOCK(lbl, l) (!pl_try_s(l))
1027 #define HA_SPIN_UNLOCK(lbl, l)  pl_drop_s(l)
1028 
1029 #define HA_RWLOCK_T		unsigned long
1030 
1031 #define HA_RWLOCK_INIT(l)          ({ (*l) = 0; })
1032 #define HA_RWLOCK_DESTROY(l)       ({ (*l) = 0; })
1033 #define HA_RWLOCK_WRLOCK(lbl,l)    pl_take_w(l)
1034 #define HA_RWLOCK_TRYWRLOCK(lbl,l) (!pl_try_w(l))
1035 #define HA_RWLOCK_WRUNLOCK(lbl,l)  pl_drop_w(l)
1036 #define HA_RWLOCK_RDLOCK(lbl,l)    pl_take_r(l)
1037 #define HA_RWLOCK_TRYRDLOCK(lbl,l) (!pl_try_r(l))
1038 #define HA_RWLOCK_RDUNLOCK(lbl,l)  pl_drop_r(l)
1039 
1040 #endif  /* DEBUG_THREAD */
1041 
1042 #ifdef __x86_64__
1043 
1044 static __inline int
__ha_cas_dw(void * target,void * compare,const void * set)1045 __ha_cas_dw(void *target, void *compare, const void *set)
1046 {
1047         char ret;
1048 
1049         __asm __volatile("lock cmpxchg16b %0; setz %3"
1050                           : "+m" (*(void **)target),
1051                             "=a" (((void **)compare)[0]),
1052                             "=d" (((void **)compare)[1]),
1053                             "=q" (ret)
1054                           : "a" (((void **)compare)[0]),
1055                             "d" (((void **)compare)[1]),
1056                             "b" (((const void **)set)[0]),
1057                             "c" (((const void **)set)[1])
1058                           : "memory", "cc");
1059         return (ret);
1060 }
1061 
1062 /* Use __ha_barrier_atomic* when you're trying to protect data that are
1063  * are modified using HA_ATOMIC* (except HA_ATOMIC_STORE)
1064  */
1065 static __inline void
__ha_barrier_atomic_load(void)1066 __ha_barrier_atomic_load(void)
1067 {
1068 	__asm __volatile("" ::: "memory");
1069 }
1070 
1071 static __inline void
__ha_barrier_atomic_store(void)1072 __ha_barrier_atomic_store(void)
1073 {
1074 	__asm __volatile("" ::: "memory");
1075 }
1076 
1077 static __inline void
__ha_barrier_atomic_full(void)1078 __ha_barrier_atomic_full(void)
1079 {
1080 	__asm __volatile("" ::: "memory");
1081 }
1082 
1083 static __inline void
__ha_barrier_load(void)1084 __ha_barrier_load(void)
1085 {
1086 	__asm __volatile("lfence" ::: "memory");
1087 }
1088 
1089 static __inline void
__ha_barrier_store(void)1090 __ha_barrier_store(void)
1091 {
1092 	__asm __volatile("sfence" ::: "memory");
1093 }
1094 
1095 static __inline void
__ha_barrier_full(void)1096 __ha_barrier_full(void)
1097 {
1098 	__asm __volatile("mfence" ::: "memory");
1099 }
1100 
1101 #elif defined(__arm__) && (defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__))
1102 
1103 /* Use __ha_barrier_atomic* when you're trying to protect data that are
1104  * are modified using HA_ATOMIC* (except HA_ATOMIC_STORE)
1105  */
1106 static __inline void
__ha_barrier_atomic_load(void)1107 __ha_barrier_atomic_load(void)
1108 {
1109 	__asm __volatile("dmb" ::: "memory");
1110 }
1111 
1112 static __inline void
__ha_barrier_atomic_store(void)1113 __ha_barrier_atomic_store(void)
1114 {
1115 	__asm __volatile("dsb" ::: "memory");
1116 }
1117 
1118 static __inline void
__ha_barrier_atomic_full(void)1119 __ha_barrier_atomic_full(void)
1120 {
1121 	__asm __volatile("dmb" ::: "memory");
1122 }
1123 
1124 static __inline void
__ha_barrier_load(void)1125 __ha_barrier_load(void)
1126 {
1127 	__asm __volatile("dmb" ::: "memory");
1128 }
1129 
1130 static __inline void
__ha_barrier_store(void)1131 __ha_barrier_store(void)
1132 {
1133 	__asm __volatile("dsb" ::: "memory");
1134 }
1135 
1136 static __inline void
__ha_barrier_full(void)1137 __ha_barrier_full(void)
1138 {
1139 	__asm __volatile("dmb" ::: "memory");
1140 }
1141 
__ha_cas_dw(void * target,void * compare,const void * set)1142 static __inline int __ha_cas_dw(void *target, void *compare, const void *set)
1143 {
1144 	uint64_t previous;
1145 	int tmp;
1146 
1147 	__asm __volatile("1:"
1148 	                 "ldrexd %0, [%4];"
1149 			 "cmp %Q0, %Q2;"
1150 			 "ittt eq;"
1151 			 "cmpeq %R0, %R2;"
1152 			 "strexdeq %1, %3, [%4];"
1153 			 "cmpeq %1, #1;"
1154 			 "beq 1b;"
1155 			 : "=&r" (previous), "=&r" (tmp)
1156 			 : "r" (*(uint64_t *)compare), "r" (*(uint64_t *)set), "r" (target)
1157 			 : "memory", "cc");
1158 	tmp = (previous == *(uint64_t *)compare);
1159 	*(uint64_t *)compare = previous;
1160 	return (tmp);
1161 }
1162 
1163 #elif defined (__aarch64__)
1164 
1165 /* Use __ha_barrier_atomic* when you're trying to protect data that are
1166  * are modified using HA_ATOMIC* (except HA_ATOMIC_STORE)
1167  */
1168 static __inline void
__ha_barrier_atomic_load(void)1169 __ha_barrier_atomic_load(void)
1170 {
1171 	__asm __volatile("dmb ishld" ::: "memory");
1172 }
1173 
1174 static __inline void
__ha_barrier_atomic_store(void)1175 __ha_barrier_atomic_store(void)
1176 {
1177 	__asm __volatile("dmb ishst" ::: "memory");
1178 }
1179 
1180 static __inline void
__ha_barrier_atomic_full(void)1181 __ha_barrier_atomic_full(void)
1182 {
1183 	__asm __volatile("dmb ish" ::: "memory");
1184 }
1185 
1186 static __inline void
__ha_barrier_load(void)1187 __ha_barrier_load(void)
1188 {
1189 	__asm __volatile("dmb ishld" ::: "memory");
1190 }
1191 
1192 static __inline void
__ha_barrier_store(void)1193 __ha_barrier_store(void)
1194 {
1195 	__asm __volatile("dmb ishst" ::: "memory");
1196 }
1197 
1198 static __inline void
__ha_barrier_full(void)1199 __ha_barrier_full(void)
1200 {
1201 	__asm __volatile("dmb ish" ::: "memory");
1202 }
1203 
__ha_cas_dw(void * target,void * compare,void * set)1204 static __inline int __ha_cas_dw(void *target, void *compare, void *set)
1205 {
1206 	void *value[2];
1207 	uint64_t tmp1, tmp2;
1208 
1209 	__asm__ __volatile__("1:"
1210                              "ldxp %0, %1, [%4]\n"
1211                              "mov %2, %0\n"
1212                              "mov %3, %1\n"
1213                              "eor %0, %0, %5\n"
1214                              "eor %1, %1, %6\n"
1215                              "orr %1, %0, %1\n"
1216                              "mov %w0, #0\n"
1217                              "cbnz %1, 2f\n"
1218                              "stxp %w0, %7, %8, [%4]\n"
1219                              "cbnz %w0, 1b\n"
1220                              "mov %w0, #1\n"
1221                              "2:"
1222                              : "=&r" (tmp1), "=&r" (tmp2), "=&r" (value[0]), "=&r" (value[1])
1223                              : "r" (target), "r" (((void **)(compare))[0]), "r" (((void **)(compare))[1]), "r" (((void **)(set))[0]), "r" (((void **)(set))[1])
1224                              : "cc", "memory");
1225 
1226 	memcpy(compare, &value, sizeof(value));
1227         return (tmp1);
1228 }
1229 
1230 #else
1231 #define __ha_barrier_atomic_load __sync_synchronize
1232 #define __ha_barrier_atomic_store __sync_synchronize
1233 #define __ha_barrier_atomic_full __sync_synchronize
1234 #define __ha_barrier_load __sync_synchronize
1235 #define __ha_barrier_store __sync_synchronize
1236 #define __ha_barrier_full __sync_synchronize
1237 #endif
1238 
1239 void ha_spin_init(HA_SPINLOCK_T *l);
1240 void ha_rwlock_init(HA_RWLOCK_T *l);
1241 
1242 #endif /* USE_THREAD */
1243 
1244 extern int thread_cpus_enabled_at_boot;
1245 
__ha_compiler_barrier(void)1246 static inline void __ha_compiler_barrier(void)
1247 {
1248 	__asm __volatile("" ::: "memory");
1249 }
1250 
1251 int parse_nbthread(const char *arg, char **err);
1252 int thread_get_default_count();
1253 
1254 #ifndef _HA_ATOMIC_CAS
1255 #define _HA_ATOMIC_CAS HA_ATOMIC_CAS
1256 #endif /* !_HA_ATOMIC_CAS */
1257 
1258 #ifndef _HA_ATOMIC_DWCAS
1259 #define _HA_ATOMIC_DWCAS HA_ATOMIC_DWCAS
1260 #endif /* !_HA_ATOMIC_CAS */
1261 
1262 #ifndef _HA_ATOMIC_ADD
1263 #define _HA_ATOMIC_ADD HA_ATOMIC_ADD
1264 #endif /* !_HA_ATOMIC_ADD */
1265 
1266 #ifndef _HA_ATOMIC_XADD
1267 #define _HA_ATOMIC_XADD HA_ATOMIC_XADD
1268 #endif /* !_HA_ATOMIC_SUB */
1269 
1270 #ifndef _HA_ATOMIC_SUB
1271 #define _HA_ATOMIC_SUB HA_ATOMIC_SUB
1272 #endif /* !_HA_ATOMIC_SUB */
1273 
1274 #ifndef _HA_ATOMIC_AND
1275 #define _HA_ATOMIC_AND HA_ATOMIC_AND
1276 #endif /* !_HA_ATOMIC_AND */
1277 
1278 #ifndef _HA_ATOMIC_OR
1279 #define _HA_ATOMIC_OR HA_ATOMIC_OR
1280 #endif /* !_HA_ATOMIC_OR */
1281 
1282 #ifndef _HA_ATOMIC_XCHG
1283 #define _HA_ATOMIC_XCHG HA_ATOMIC_XCHG
1284 #endif /* !_HA_ATOMIC_XCHG */
1285 
1286 #ifndef _HA_ATOMIC_STORE
1287 #define _HA_ATOMIC_STORE HA_ATOMIC_STORE
1288 #endif /* !_HA_ATOMIC_STORE */
1289 
1290 #ifndef _HA_ATOMIC_LOAD
1291 #define _HA_ATOMIC_LOAD HA_ATOMIC_LOAD
1292 #endif /* !_HA_ATOMIC_LOAD */
1293 #endif /* _COMMON_HATHREADS_H */
1294