1 /*
2 * %CopyrightBegin%
3 *
4 * Copyright Ericsson AB 2010-2016. All Rights Reserved.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * %CopyrightEnd%
19 */
20
21 /*
22 * Description: Mutex, rwmutex and condition variable implementation
23 * Author: Rickard Green
24 */
25
26 /*
27 * IMPORTANT note about ethr_cond_signal() and ethr_cond_broadcast()
28 *
29 * POSIX allow a call to `pthread_cond_signal' or `pthread_cond_broadcast'
30 * even though the associated mutex/mutexes isn't/aren't locked by the
31 * caller. We do not allow that by default in order to avoid a performance
32 * penalty on some platforms.
33 *
34 * Mutexes and condition variables can, however, be initialized as POSIX
35 * compliant. When initialized as such ethr_cond_signal(), and
36 * ethr_cond_broadcast() are allowed to be called even though the associated
37 * mutexes aren't locked. This will, however, incur a performance penalty on
38 * some platforms.
39 *
40 * POSIX compliant mutexes and condition variables *need* to be used together.
41 */
42
43 #ifndef ETHR_MUTEX_H__
44 #define ETHR_MUTEX_H__
45
46 #define ETHR_RWMUTEX_INITIALIZED 0x99999999
47 #define ETHR_MUTEX_INITIALIZED 0x77777777
48 #define ETHR_COND_INITIALIZED 0x55555555
49
50 #if 0
51 # define ETHR_MTX_HARD_DEBUG
52 #endif
53
54 #if 0
55 # define ETHR_MTX_CHK_EXCL
56 #if 1
57 # define ETHR_MTX_CHK_NON_EXCL
58 #endif
59 #endif
60
61 /* #define ETHR_DBG_WIN_MTX_WITH_PTHREADS */
62 #ifdef ETHR_DBG_WIN_MTX_WITH_PTHREADS
63 typedef pthread_mutex_t CRITICAL_SECTION;
64 int TryEnterCriticalSection(CRITICAL_SECTION *);
65 void EnterCriticalSection(CRITICAL_SECTION *);
66 void LeaveCriticalSection(CRITICAL_SECTION *);
67 #endif
68
69 #ifdef ETHR_MTX_HARD_DEBUG
70 # ifdef __GNUC__
71 # warning ETHR_MTX_HARD_DEBUG
72 # endif
73 /*# define ETHR_MTX_HARD_DEBUG_LFS*/
74 /*# define ETHR_MTX_HARD_DEBUG_FENCE*/
75 /*# define ETHR_MTX_HARD_DEBUG_Q*/
76 # define ETHR_MTX_HARD_DEBUG_WSQ
77
78 # if !defined(ETHR_MTX_HARD_DEBUG_WSQ) && defined(ETHR_MTX_HARD_DEBUG_Q)
79 # define ETHR_MTX_HARD_DEBUG_WSQ
80 # endif
81 #endif
82
83 #ifndef ETHR_INLINE_MTX_FUNC_NAME_
84 # define ETHR_INLINE_MTX_FUNC_NAME_(X) X
85 #endif
86
87 #if defined(ETHR_USE_OWN_RWMTX_IMPL__) || defined(ETHR_USE_OWN_MTX_IMPL__)
88
89 #ifdef ETHR_DEBUG
90 # ifndef ETHR_MTX_CHK_EXCL
91 # define ETHR_MTX_CHK_EXCL
92 # endif
93 # ifndef ETHR_MTX_CHK_NON_EXCL
94 # define ETHR_MTX_CHK_NON_EXCL
95 # endif
96 #endif
97
98 #if 0
99 # define ETHR_MTX_Q_LOCK_SPINLOCK__
100 # define ETHR_MTX_QLOCK_TYPE__ ethr_spinlock_t
101 #elif defined(ETHR_PTHREADS)
102 # define ETHR_MTX_Q_LOCK_PTHREAD_MUTEX__
103 # define ETHR_MTX_QLOCK_TYPE__ pthread_mutex_t
104 #elif defined(ETHR_WIN32_THREADS)
105 # define ETHR_MTX_Q_LOCK_CRITICAL_SECTION__
106 # define ETHR_MTX_QLOCK_TYPE__ CRITICAL_SECTION
107 #else
108 # error Need a qlock implementation
109 #endif
110
111 #define ETHR_RWMTX_W_FLG__ ((ethr_sint32_t) (1U << 31))
112 #define ETHR_RWMTX_W_WAIT_FLG__ ((ethr_sint32_t) (1U << 30))
113 #define ETHR_RWMTX_R_WAIT_FLG__ ((ethr_sint32_t) (1U << 29))
114
115 /* frequent read kind */
116 #define ETHR_RWMTX_R_FLG__ ((ethr_sint32_t) (1U << 28))
117 #define ETHR_RWMTX_R_ABRT_UNLCK_FLG__ ((ethr_sint32_t) (1U << 27))
118 #define ETHR_RWMTX_R_PEND_UNLCK_MASK__ (ETHR_RWMTX_R_ABRT_UNLCK_FLG__ - 1)
119
120 /* normal kind */
121 #define ETHR_RWMTX_RS_MASK__ (ETHR_RWMTX_R_WAIT_FLG__ - 1)
122
123 #define ETHR_RWMTX_WAIT_FLGS__ \
124 (ETHR_RWMTX_W_WAIT_FLG__|ETHR_RWMTX_R_WAIT_FLG__)
125
126 #define ETHR_CND_WAIT_FLG__ ETHR_RWMTX_R_WAIT_FLG__
127
128 #ifdef ETHR_DEBUG
129 #define ETHR_DBG_CHK_UNUSED_FLG_BITS(V) \
130 ETHR_ASSERT(!((V) & ~(ETHR_RWMTX_W_FLG__ \
131 | ETHR_RWMTX_W_WAIT_FLG__ \
132 | ETHR_RWMTX_R_WAIT_FLG__ \
133 | ETHR_RWMTX_RS_MASK__)))
134 #else
135 #define ETHR_DBG_CHK_UNUSED_FLG_BITS(V)
136 #endif
137
138 #define ETHR_MTX_DBG_CHK_UNUSED_FLG_BITS(MTX) \
139 ETHR_DBG_CHK_UNUSED_FLG_BITS(ethr_atomic32_read(&(MTX)->mtxb.flgs))
140
141 struct ethr_mutex_base_ {
142 #ifdef ETHR_MTX_HARD_DEBUG_FENCE
143 long pre_fence;
144 #endif
145 ethr_atomic32_t flgs;
146 short aux_scnt;
147 short main_scnt;
148 ETHR_MTX_QLOCK_TYPE__ qlck;
149 ethr_ts_event *q;
150 #ifdef ETHR_MTX_HARD_DEBUG_WSQ
151 int ws;
152 #endif
153 #ifdef ETHR_MTX_CHK_EXCL
154 ethr_atomic32_t exclusive;
155 #endif
156 #ifdef ETHR_MTX_CHK_NON_EXCL
157 ethr_atomic32_t non_exclusive;
158 #endif
159 #ifdef ETHR_MTX_HARD_DEBUG_LFS
160 ethr_atomic32_t hdbg_lfs;
161 #endif
162 };
163
164 #endif
165
166 typedef struct {
167 int main_spincount;
168 int aux_spincount;
169 int posix_compliant;
170 } ethr_mutex_opt;
171
172 #define ETHR_MUTEX_OPT_DEFAULT_INITER {-1, -1, 0}
173
174 typedef struct {
175 int main_spincount;
176 int aux_spincount;
177 int posix_compliant;
178 } ethr_cond_opt;
179
180 #define ETHR_COND_OPT_DEFAULT_INITER {-1, -1, 0}
181
182 #ifdef ETHR_USE_OWN_MTX_IMPL__
183
184 typedef struct ethr_mutex_ ethr_mutex;
185 struct ethr_mutex_ {
186 struct ethr_mutex_base_ mtxb;
187 #ifdef ETHR_MTX_HARD_DEBUG_FENCE
188 long post_fence;
189 #endif
190 #if ETHR_XCHK
191 int initialized;
192 #endif
193 };
194
195 typedef struct ethr_cond_ ethr_cond;
196 struct ethr_cond_ {
197 #ifdef ETHR_MTX_HARD_DEBUG_FENCE
198 struct {
199 long pre_fence;
200 } mtxb; /* mtxb allows us to use same macro as for mutex and rwmutex... */
201 #endif
202 ETHR_MTX_QLOCK_TYPE__ qlck;
203 ethr_ts_event *q;
204 short aux_scnt;
205 short main_scnt;
206 #ifdef ETHR_MTX_HARD_DEBUG_FENCE
207 long post_fence;
208 #endif
209 #if ETHR_XCHK
210 int initialized;
211 #endif
212 };
213
214 #elif defined(ETHR_PTHREADS) && !defined(ETHR_DBG_WIN_MTX_WITH_PTHREADS)
215
216 typedef struct ethr_mutex_ ethr_mutex;
217 struct ethr_mutex_ {
218 pthread_mutex_t pt_mtx;
219 #if ETHR_XCHK
220 int initialized;
221 #endif
222 };
223
224 typedef struct ethr_cond_ ethr_cond;
225 struct ethr_cond_ {
226 pthread_cond_t pt_cnd;
227 #if ETHR_XCHK
228 int initialized;
229 #endif
230 };
231
232 #elif defined(ETHR_WIN32_THREADS) || defined(ETHR_DBG_WIN_MTX_WITH_PTHREADS)
233 # define ETHR_WIN_MUTEX__
234
235 typedef struct ethr_mutex_ ethr_mutex;
236 struct ethr_mutex_ {
237 int posix_compliant;
238 CRITICAL_SECTION cs;
239 ethr_ts_event *wakeups;
240 ethr_atomic32_t have_wakeups; /* only when posix compliant */
241 ethr_atomic32_t locked; /* only when posix compliant */
242 ethr_spinlock_t lock; /* only when posix compliant */
243 #if ETHR_XCHK
244 int initialized;
245 #endif
246 };
247
248 typedef struct ethr_cond_ ethr_cond;
249 struct ethr_cond_ {
250 int posix_compliant;
251 CRITICAL_SECTION cs;
252 ethr_ts_event *waiters;
253 int spincount;
254 #if ETHR_XCHK
255 int initialized;
256 #endif
257 };
258
259 #else
260 # error "no mutex implementation"
261 #endif
262
263 int ethr_mutex_init_opt(ethr_mutex *, ethr_mutex_opt *);
264 int ethr_mutex_init(ethr_mutex *);
265 int ethr_mutex_destroy(ethr_mutex *);
266 #if !defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_MUTEX_IMPL__)
267 int ethr_mutex_trylock(ethr_mutex *);
268 void ethr_mutex_lock(ethr_mutex *);
269 void ethr_mutex_unlock(ethr_mutex *);
270 #endif
271 int ethr_cond_init_opt(ethr_cond *, ethr_cond_opt *);
272 int ethr_cond_init(ethr_cond *);
273 int ethr_cond_destroy(ethr_cond *);
274 void ethr_cond_signal(ethr_cond *);
275 void ethr_cond_broadcast(ethr_cond *);
276 int ethr_cond_wait(ethr_cond *, ethr_mutex *);
277
278 typedef enum {
279 ETHR_RWMUTEX_TYPE_NORMAL,
280 ETHR_RWMUTEX_TYPE_FREQUENT_READ,
281 ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ
282 } ethr_rwmutex_type;
283
284 typedef enum {
285 ETHR_RWMUTEX_LONG_LIVED,
286 ETHR_RWMUTEX_SHORT_LIVED,
287 ETHR_RWMUTEX_UNKNOWN_LIVED
288 } ethr_rwmutex_lived;
289
290 typedef struct {
291 ethr_rwmutex_type type;
292 ethr_rwmutex_lived lived;
293 int main_spincount;
294 int aux_spincount;
295 } ethr_rwmutex_opt;
296
297 #define ETHR_RWMUTEX_OPT_DEFAULT_INITER \
298 {ETHR_RWMUTEX_TYPE_NORMAL, ETHR_RWMUTEX_UNKNOWN_LIVED, -1, -1}
299
300 #ifdef ETHR_USE_OWN_RWMTX_IMPL__
301
302 typedef union {
303 struct {
304 ethr_atomic32_t readers;
305 int waiting_readers;
306 int byte_offset;
307 ethr_rwmutex_lived lived;
308 } data;
309 char align__[ETHR_CACHE_LINE_SIZE];
310 } ethr_rwmtx_readers_array__;
311
312 typedef struct ethr_rwmutex_ ethr_rwmutex;
313 struct ethr_rwmutex_ {
314 struct ethr_mutex_base_ mtxb;
315 ethr_rwmutex_type type;
316 ethr_ts_event *rq_end;
317 union {
318 ethr_rwmtx_readers_array__ *ra;
319 int rs;
320 } tdata;
321 #ifdef ETHR_MTX_HARD_DEBUG_FENCE
322 long post_fence;
323 #endif
324 #if ETHR_XCHK
325 int initialized;
326 #endif
327 };
328
329 #else /* pthread_rwlock */
330
331 typedef struct ethr_rwmutex_ ethr_rwmutex;
332 struct ethr_rwmutex_ {
333 pthread_rwlock_t pt_rwlock;
334 #if ETHR_XCHK
335 int initialized;
336 #endif
337 };
338
339 #endif /* pthread_rwlock */
340
341 int ethr_rwmutex_set_reader_group(int);
342 int ethr_rwmutex_init_opt(ethr_rwmutex *, ethr_rwmutex_opt *);
343 int ethr_rwmutex_init(ethr_rwmutex *);
344 int ethr_rwmutex_destroy(ethr_rwmutex *);
345 #if defined(ETHR_USE_OWN_RWMTX_IMPL__) \
346 || !defined(ETHR_TRY_INLINE_FUNCS) \
347 || defined(ETHR_MUTEX_IMPL__)
348 int ethr_rwmutex_tryrlock(ethr_rwmutex *);
349 void ethr_rwmutex_rlock(ethr_rwmutex *);
350 void ethr_rwmutex_runlock(ethr_rwmutex *);
351 int ethr_rwmutex_tryrwlock(ethr_rwmutex *);
352 void ethr_rwmutex_rwlock(ethr_rwmutex *);
353 void ethr_rwmutex_rwunlock(ethr_rwmutex *);
354 #endif
355
356 #ifdef ETHR_MTX_HARD_DEBUG
357 #define ETHR_MTX_HARD_ASSERT(A) \
358 ((void) ((A) ? 1 : ethr_assert_failed(__FILE__, __LINE__, __func__, #A)))
359 #else
360 #define ETHR_MTX_HARD_ASSERT(A) ((void) 1)
361 #endif
362
363 #ifdef ETHR_MTX_HARD_DEBUG_LFS
364 # define ETHR_MTX_HARD_DEBUG_LFS_INIT(MTXB) \
365 do { \
366 ethr_atomic32_init(&(MTXB)->hdbg_lfs, 0); \
367 } while (0)
368 # define ETHR_MTX_HARD_DEBUG_LFS_RLOCK(MTXB) \
369 do { \
370 ethr_sint32_t val__; \
371 ETHR_COMPILER_BARRIER; \
372 val__ = ethr_atomic32_inc_read(&(MTXB)->hdbg_lfs); \
373 ETHR_MTX_HARD_ASSERT(val__ > 0); \
374 } while (0)
375 # define ETHR_MTX_HARD_DEBUG_LFS_TRYRLOCK(MTXB, RES) \
376 do { \
377 ETHR_COMPILER_BARRIER; \
378 if ((RES) == 0) \
379 ETHR_MTX_HARD_DEBUG_LFS_RLOCK((MTXB)); \
380 else \
381 ETHR_MTX_HARD_ASSERT((RES) == EBUSY); \
382 } while (0)
383 # define ETHR_MTX_HARD_DEBUG_LFS_RUNLOCK(MTXB) \
384 do { \
385 ethr_sint32_t val__ = ethr_atomic32_dec_read(&(MTXB)->hdbg_lfs); \
386 ETHR_MTX_HARD_ASSERT(val__ >= 0); \
387 ETHR_COMPILER_BARRIER; \
388 } while (0)
389 # define ETHR_MTX_HARD_DEBUG_LFS_RWLOCK(MTXB) \
390 do { \
391 ethr_sint32_t val__; \
392 ETHR_COMPILER_BARRIER; \
393 val__ = ethr_atomic32_dec_read(&(MTXB)->hdbg_lfs); \
394 ETHR_MTX_HARD_ASSERT(val__ == -1); \
395 } while (0)
396 # define ETHR_MTX_HARD_DEBUG_LFS_TRYRWLOCK(MTXB, RES) \
397 do { \
398 ETHR_COMPILER_BARRIER; \
399 if ((RES) == 0) \
400 ETHR_MTX_HARD_DEBUG_LFS_RWLOCK((MTXB)); \
401 else \
402 ETHR_MTX_HARD_ASSERT((RES) == EBUSY); \
403 } while (0)
404 # define ETHR_MTX_HARD_DEBUG_LFS_RWUNLOCK(MTXB) \
405 do { \
406 ethr_sint32_t val__ = ethr_atomic32_inctest(&(MTXB)->hdbg_lfs); \
407 ETHR_MTX_HARD_ASSERT(val__ == 0); \
408 ETHR_COMPILER_BARRIER; \
409 } while (0)
410 #else
411 # define ETHR_MTX_HARD_DEBUG_LFS_INIT(MTXB)
412 # define ETHR_MTX_HARD_DEBUG_LFS_RLOCK(MTXB)
413 # define ETHR_MTX_HARD_DEBUG_LFS_TRYRLOCK(MTXB, RES)
414 # define ETHR_MTX_HARD_DEBUG_LFS_RUNLOCK(MTXB)
415 # define ETHR_MTX_HARD_DEBUG_LFS_RWLOCK(MTXB)
416 # define ETHR_MTX_HARD_DEBUG_LFS_TRYRWLOCK(MTXB, RES)
417 # define ETHR_MTX_HARD_DEBUG_LFS_RWUNLOCK(MTXB)
418 #endif
419
420 #ifdef ETHR_MTX_HARD_DEBUG_FENCE
421
422 #if ETHR_SIZEOF_PTR == 8
423 # define ETHR_MTX_HARD_DEBUG_PRE_FENCE 0xdeadbeefdeadbeefL
424 # define ETHR_MTX_HARD_DEBUG_POST_FENCE 0xdeaddeaddeaddeadL
425 #else
426 # define ETHR_MTX_HARD_DEBUG_PRE_FENCE 0xdeaddeadL
427 # define ETHR_MTX_HARD_DEBUG_POST_FENCE 0xdeaddeadL
428 #endif
429
430 #define ETHR_MTX_HARD_DEBUG_FENCE_CHK(X) \
431 do { \
432 ETHR_COMPILER_BARRIER; \
433 ETHR_MTX_HARD_ASSERT((X)->mtxb.pre_fence == ETHR_MTX_HARD_DEBUG_PRE_FENCE);\
434 ETHR_MTX_HARD_ASSERT((X)->post_fence == ETHR_MTX_HARD_DEBUG_POST_FENCE); \
435 ETHR_COMPILER_BARRIER; \
436 } while (0)
437 #define ETHR_MTX_HARD_DEBUG_FENCE_INIT(X) \
438 do { \
439 (X)->mtxb.pre_fence = ETHR_MTX_HARD_DEBUG_PRE_FENCE; \
440 (X)->post_fence = ETHR_MTX_HARD_DEBUG_POST_FENCE; \
441 } while (0)
442 #else
443 #define ETHR_MTX_HARD_DEBUG_FENCE_CHK(X)
444 #define ETHR_MTX_HARD_DEBUG_FENCE_INIT(X)
445 #endif
446
447 #ifdef ETHR_MTX_CHK_EXCL
448
449 #if !defined(ETHR_DEBUG) && defined(__GNUC__)
450 #warning "check exclusive is enabled"
451 #endif
452
453 # define ETHR_MTX_CHK_EXCL_INIT__(MTXB) \
454 ethr_atomic32_init(&(MTXB)->exclusive, 0)
455
456 # define ETHR_MTX_CHK_EXCL_IS_EXCL(MTXB) \
457 do { \
458 ETHR_COMPILER_BARRIER; \
459 if (!ethr_atomic32_read(&(MTXB)->exclusive)) \
460 ethr_assert_failed(__FILE__, __LINE__, __func__,\
461 "is exclusive"); \
462 ETHR_COMPILER_BARRIER; \
463 } while (0)
464 # define ETHR_MTX_CHK_EXCL_IS_NOT_EXCL(MTXB) \
465 do { \
466 ETHR_COMPILER_BARRIER; \
467 if (ethr_atomic32_read(&(MTXB)->exclusive)) \
468 ethr_assert_failed(__FILE__, __LINE__, __func__,\
469 "is not exclusive"); \
470 ETHR_COMPILER_BARRIER; \
471 } while (0)
472 # define ETHR_MTX_CHK_EXCL_SET_EXCL(MTXB) \
473 do { \
474 ETHR_MTX_CHK_EXCL_IS_NOT_EXCL((MTXB)); \
475 ethr_atomic32_set(&(MTXB)->exclusive, 1); \
476 ETHR_COMPILER_BARRIER; \
477 } while (0)
478 # define ETHR_MTX_CHK_EXCL_UNSET_EXCL(MTXB) \
479 do { \
480 ETHR_MTX_CHK_EXCL_IS_EXCL((MTXB)); \
481 ethr_atomic32_set(&(MTXB)->exclusive, 0); \
482 ETHR_COMPILER_BARRIER; \
483 } while (0)
484
485 #ifdef ETHR_MTX_CHK_NON_EXCL
486
487 #if !defined(ETHR_DEBUG) && defined(__GNUC__)
488 #warning "check non-exclusive is enabled"
489 #endif
490
491 # define ETHR_MTX_CHK_NON_EXCL_INIT__(MTXB) \
492 ethr_atomic32_init(&(MTXB)->non_exclusive, 0)
493 # define ETHR_MTX_CHK_EXCL_IS_NON_EXCL(MTXB) \
494 do { \
495 ETHR_COMPILER_BARRIER; \
496 if (!ethr_atomic32_read(&(MTXB)->non_exclusive)) \
497 ethr_assert_failed(__FILE__, __LINE__, __func__,\
498 "is non-exclusive"); \
499 ETHR_COMPILER_BARRIER; \
500 } while (0)
501 # define ETHR_MTX_CHK_EXCL_IS_NOT_NON_EXCL(MTXB) \
502 do { \
503 ETHR_COMPILER_BARRIER; \
504 if (ethr_atomic32_read(&(MTXB)->non_exclusive)) \
505 ethr_assert_failed(__FILE__, __LINE__, __func__,\
506 "is not non-exclusive"); \
507 ETHR_COMPILER_BARRIER; \
508 } while (0)
509 # define ETHR_MTX_CHK_EXCL_SET_NON_EXCL(MTXB) \
510 do { \
511 ETHR_COMPILER_BARRIER; \
512 ethr_atomic32_inc(&(MTXB)->non_exclusive); \
513 ETHR_COMPILER_BARRIER; \
514 } while (0)
515 # define ETHR_MTX_CHK_EXCL_SET_NON_EXCL_NO(MTXB, NO) \
516 do { \
517 ETHR_COMPILER_BARRIER; \
518 ethr_atomic32_add(&(MTXB)->non_exclusive, (NO)); \
519 ETHR_COMPILER_BARRIER; \
520 } while (0)
521 # define ETHR_MTX_CHK_EXCL_UNSET_NON_EXCL(MTXB) \
522 do { \
523 ETHR_COMPILER_BARRIER; \
524 ethr_atomic32_dec(&(MTXB)->non_exclusive); \
525 ETHR_COMPILER_BARRIER; \
526 } while (0)
527 #else
528 # define ETHR_MTX_CHK_NON_EXCL_INIT__(MTXB)
529 # define ETHR_MTX_CHK_EXCL_IS_NON_EXCL(MTXB)
530 # define ETHR_MTX_CHK_EXCL_IS_NOT_NON_EXCL(MTXB)
531 # define ETHR_MTX_CHK_EXCL_SET_NON_EXCL_NO(MTXB, NO)
532 # define ETHR_MTX_CHK_EXCL_SET_NON_EXCL(MTXB)
533 # define ETHR_MTX_CHK_EXCL_UNSET_NON_EXCL(MTXB)
534 #endif
535
536 #else
537 # define ETHR_MTX_CHK_EXCL_INIT__(MTXB)
538 # define ETHR_MTX_CHK_EXCL_IS_EXCL(MTXB)
539 # define ETHR_MTX_CHK_EXCL_IS_NOT_EXCL(MTXB)
540 # define ETHR_MTX_CHK_EXCL_SET_EXCL(MTXB)
541 # define ETHR_MTX_CHK_EXCL_UNSET_EXCL(MTXB)
542 # define ETHR_MTX_CHK_NON_EXCL_INIT__(MTXB)
543 # define ETHR_MTX_CHK_EXCL_IS_NON_EXCL(MTXB)
544 # define ETHR_MTX_CHK_EXCL_IS_NOT_NON_EXCL(MTXB)
545 # define ETHR_MTX_CHK_EXCL_SET_NON_EXCL_NO(MTXB, NO)
546 # define ETHR_MTX_CHK_EXCL_SET_NON_EXCL(MTXB)
547 # define ETHR_MTX_CHK_EXCL_UNSET_NON_EXCL(MTXB)
548 #endif
549
550 # define ETHR_MTX_CHK_EXCL_INIT(MTXB) \
551 do { \
552 ETHR_MTX_CHK_EXCL_INIT__((MTXB)); \
553 ETHR_MTX_CHK_NON_EXCL_INIT__((MTXB)); \
554 } while (0)
555
556
557 #ifdef ETHR_USE_OWN_MTX_IMPL__
558
559 #define ETHR_MTX_DEFAULT_MAIN_SPINCOUNT_MAX 2000
560 #define ETHR_MTX_DEFAULT_MAIN_SPINCOUNT_BASE 800
561 #define ETHR_MTX_DEFAULT_MAIN_SPINCOUNT_INC 50
562 #define ETHR_MTX_DEFAULT_AUX_SPINCOUNT 50
563
564 #define ETHR_CND_DEFAULT_MAIN_SPINCOUNT 0
565 #define ETHR_CND_DEFAULT_AUX_SPINCOUNT 0
566
567 #if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_MUTEX_IMPL__)
568
569 void ethr_mutex_lock_wait__(ethr_mutex *, ethr_sint32_t);
570 void ethr_mutex_unlock_wake__(ethr_mutex *, ethr_sint32_t);
571
572 static ETHR_INLINE int
ETHR_INLINE_MTX_FUNC_NAME_(ethr_mutex_trylock)573 ETHR_INLINE_MTX_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
574 {
575 ethr_sint32_t act;
576 int res;
577 ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
578 ETHR_MTX_DBG_CHK_UNUSED_FLG_BITS(mtx);
579
580 act = ethr_atomic32_cmpxchg_acqb(&mtx->mtxb.flgs, ETHR_RWMTX_W_FLG__, 0);
581 res = (act == 0) ? 0 : EBUSY;
582
583 #ifdef ETHR_MTX_CHK_EXCL
584 if (res == 0)
585 ETHR_MTX_CHK_EXCL_SET_EXCL(&mtx->mtxb);
586 #endif
587
588 ETHR_MTX_HARD_DEBUG_LFS_TRYRWLOCK(&mtx->mtxb, res);
589 ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
590 ETHR_MTX_DBG_CHK_UNUSED_FLG_BITS(mtx);
591
592 ETHR_COMPILER_BARRIER;
593 return res;
594 }
595
596 static ETHR_INLINE void
ETHR_INLINE_MTX_FUNC_NAME_(ethr_mutex_lock)597 ETHR_INLINE_MTX_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
598 {
599 ethr_sint32_t act;
600 ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
601 ETHR_MTX_DBG_CHK_UNUSED_FLG_BITS(mtx);
602
603 act = ethr_atomic32_cmpxchg_acqb(&mtx->mtxb.flgs, ETHR_RWMTX_W_FLG__, 0);
604 if (act != 0)
605 ethr_mutex_lock_wait__(mtx, act);
606
607 ETHR_MTX_CHK_EXCL_SET_EXCL(&mtx->mtxb);
608
609 ETHR_MTX_HARD_DEBUG_LFS_RWLOCK(&mtx->mtxb);
610 ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
611 ETHR_MTX_DBG_CHK_UNUSED_FLG_BITS(mtx);
612
613 ETHR_COMPILER_BARRIER;
614 }
615
616 static ETHR_INLINE void
ETHR_INLINE_MTX_FUNC_NAME_(ethr_mutex_unlock)617 ETHR_INLINE_MTX_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
618 {
619 ethr_sint32_t act;
620 ETHR_COMPILER_BARRIER;
621 ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
622 ETHR_MTX_HARD_DEBUG_LFS_RWUNLOCK(&mtx->mtxb);
623 ETHR_MTX_DBG_CHK_UNUSED_FLG_BITS(mtx);
624
625 ETHR_MTX_CHK_EXCL_UNSET_EXCL(&mtx->mtxb);
626
627 act = ethr_atomic32_cmpxchg_relb(&mtx->mtxb.flgs, 0, ETHR_RWMTX_W_FLG__);
628 if (act != ETHR_RWMTX_W_FLG__)
629 ethr_mutex_unlock_wake__(mtx, act);
630
631 ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
632 ETHR_MTX_DBG_CHK_UNUSED_FLG_BITS(mtx);
633 }
634
635 #endif /* ETHR_TRY_INLINE_FUNCS */
636
637 #elif defined(ETHR_PTHREADS) && !defined(ETHR_DBG_WIN_MTX_WITH_PTHREADS)
638
639 #if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_MUTEX_IMPL__)
640
641 static ETHR_INLINE int
ETHR_INLINE_MTX_FUNC_NAME_(ethr_mutex_trylock)642 ETHR_INLINE_MTX_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
643 {
644 int res;
645 res = pthread_mutex_trylock(&mtx->pt_mtx);
646 if (res != 0 && res != EBUSY)
647 ETHR_FATAL_ERROR__(res);
648 return res;
649 }
650
651 static ETHR_INLINE void
ETHR_INLINE_MTX_FUNC_NAME_(ethr_mutex_lock)652 ETHR_INLINE_MTX_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
653 {
654 int res = pthread_mutex_lock(&mtx->pt_mtx);
655 if (res != 0)
656 ETHR_FATAL_ERROR__(res);
657 }
658
659 static ETHR_INLINE void
ETHR_INLINE_MTX_FUNC_NAME_(ethr_mutex_unlock)660 ETHR_INLINE_MTX_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
661 {
662 int res = pthread_mutex_unlock(&mtx->pt_mtx);
663 if (res != 0)
664 ETHR_FATAL_ERROR__(res);
665 }
666
667 #endif /* ETHR_TRY_INLINE_FUNCS */
668
669 #elif defined(ETHR_WIN32_THREADS) || defined(ETHR_DBG_WIN_MTX_WITH_PTHREADS)
670
671 #if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_MUTEX_IMPL__)
672
673 static ETHR_INLINE int
ETHR_INLINE_MTX_FUNC_NAME_(ethr_mutex_trylock)674 ETHR_INLINE_MTX_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
675 {
676 if (!TryEnterCriticalSection(&mtx->cs))
677 return EBUSY;
678 if (mtx->posix_compliant)
679 ethr_atomic32_set(&mtx->locked, 1);
680 return 0;
681 }
682
683 static ETHR_INLINE void
ETHR_INLINE_MTX_FUNC_NAME_(ethr_mutex_lock)684 ETHR_INLINE_MTX_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
685 {
686 EnterCriticalSection(&mtx->cs);
687 if (mtx->posix_compliant)
688 ethr_atomic32_set(&mtx->locked, 1);
689 }
690
691 void ethr_mutex_cond_wakeup__(ethr_mutex *mtx);
692
693 static ETHR_INLINE void
ETHR_INLINE_MTX_FUNC_NAME_(ethr_mutex_unlock)694 ETHR_INLINE_MTX_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
695 {
696 if (mtx->posix_compliant) {
697 ethr_atomic32_set_mb(&mtx->locked, 0);
698 if (ethr_atomic32_read_acqb(&mtx->have_wakeups))
699 goto cond_wakeup;
700 else
701 goto leave_cs;
702 }
703
704 if (mtx->wakeups) {
705 cond_wakeup:
706 ethr_mutex_cond_wakeup__(mtx);
707 }
708 else {
709 leave_cs:
710 LeaveCriticalSection(&mtx->cs);
711 }
712 }
713
714 #endif /* ETHR_TRY_INLINE_FUNCS */
715
716 #endif
717
718 #ifdef ETHR_USE_OWN_RWMTX_IMPL__
719
720 #define ETHR_RWMTX_DEFAULT_MAIN_SPINCOUNT_MAX 2000
721 #define ETHR_RWMTX_DEFAULT_MAIN_SPINCOUNT_BASE 800
722 #define ETHR_RWMTX_DEFAULT_MAIN_SPINCOUNT_INC 50
723 #define ETHR_RWMTX_DEFAULT_AUX_SPINCOUNT 50
724
725 #else /* pthread_rwlock */
726
727 #if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_MUTEX_IMPL__)
728
729 static ETHR_INLINE int
ETHR_INLINE_MTX_FUNC_NAME_(ethr_rwmutex_tryrlock)730 ETHR_INLINE_MTX_FUNC_NAME_(ethr_rwmutex_tryrlock)(ethr_rwmutex *rwmtx)
731 {
732 int res = pthread_rwlock_tryrdlock(&rwmtx->pt_rwlock);
733 if (res != 0 && res != EBUSY)
734 ETHR_FATAL_ERROR__(res);
735 return res;
736 }
737
738 static ETHR_INLINE void
ETHR_INLINE_MTX_FUNC_NAME_(ethr_rwmutex_rlock)739 ETHR_INLINE_MTX_FUNC_NAME_(ethr_rwmutex_rlock)(ethr_rwmutex *rwmtx)
740 {
741 int res = pthread_rwlock_rdlock(&rwmtx->pt_rwlock);
742 if (res != 0)
743 ETHR_FATAL_ERROR__(res);
744 }
745
746 static ETHR_INLINE void
ETHR_INLINE_MTX_FUNC_NAME_(ethr_rwmutex_runlock)747 ETHR_INLINE_MTX_FUNC_NAME_(ethr_rwmutex_runlock)(ethr_rwmutex *rwmtx)
748 {
749 int res = pthread_rwlock_unlock(&rwmtx->pt_rwlock);
750 if (res != 0)
751 ETHR_FATAL_ERROR__(res);
752 }
753
754 static ETHR_INLINE int
ETHR_INLINE_MTX_FUNC_NAME_(ethr_rwmutex_tryrwlock)755 ETHR_INLINE_MTX_FUNC_NAME_(ethr_rwmutex_tryrwlock)(ethr_rwmutex *rwmtx)
756 {
757 int res = pthread_rwlock_trywrlock(&rwmtx->pt_rwlock);
758 if (res != 0 && res != EBUSY)
759 ETHR_FATAL_ERROR__(res);
760 return res;
761 }
762
763 static ETHR_INLINE void
ETHR_INLINE_MTX_FUNC_NAME_(ethr_rwmutex_rwlock)764 ETHR_INLINE_MTX_FUNC_NAME_(ethr_rwmutex_rwlock)(ethr_rwmutex *rwmtx)
765 {
766 int res = pthread_rwlock_wrlock(&rwmtx->pt_rwlock);
767 if (res != 0)
768 ETHR_FATAL_ERROR__(res);
769 }
770
771 static ETHR_INLINE void
ETHR_INLINE_MTX_FUNC_NAME_(ethr_rwmutex_rwunlock)772 ETHR_INLINE_MTX_FUNC_NAME_(ethr_rwmutex_rwunlock)(ethr_rwmutex *rwmtx)
773 {
774 int res = pthread_rwlock_unlock(&rwmtx->pt_rwlock);
775 if (res != 0)
776 ETHR_FATAL_ERROR__(res);
777 }
778
779 #endif /* ETHR_TRY_INLINE_FUNCS */
780
781 #endif /* pthread_rwlock */
782
783 int ethr_mutex_lib_init(int);
784 int ethr_mutex_lib_late_init(int, int);
785
786 #endif /* #ifndef ETHR_MUTEX_H__ */
787