1 /*------------------------------------------------------------------------------
2 *
3 * Copyright (c) 2011-2021, EURid vzw. All rights reserved.
4 * The YADIFA TM software product is provided under the BSD 3-clause license:
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * * Neither the name of EURid nor the names of its contributors may be
16 * used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 *------------------------------------------------------------------------------
32 *
33 */
34
35 /** @defgroup
36 * @ingroup dnscore
37 * @brief
38 *
39 *
40 *
41 * @{
42 *
43 *----------------------------------------------------------------------------*/
44 #ifndef _MUTEX_H
45 #define _MUTEX_H
46
47 /**
48 * This helper header allows to chose the kind of mutex used.
49 * This is part of the sendto queue experiment.
50 */
51
52 #include <unistd.h>
53 #include <dnscore/thread.h>
54 #include <time.h>
55
56 #define SEMAPHORE_SUPPORT 0
57
58 #if SEMAPHORE_SUPPORT
59 #include <semaphore.h>
60 #endif
61
62 #include <dnscore/sys_types.h>
63 #include <dnscore/timems.h>
64
65 #if defined(__MACH__)
66 #include <dnscore/osx_clock_gettime.h>
67 #endif
68
69 #ifdef __cplusplus
70 extern "C"
71 {
72 #endif
73
74 #ifndef DNSCORE_HAS_MUTEX_DEBUG_SUPPORT
75 #error "DNSCORE_HAS_MUTEX_DEBUG_SUPPORT must be set to either 0 or 1"
76 #endif
77
78 #ifndef MUTEX_USE_SPINLOCK
79 #define MUTEX_USE_SPINLOCK 0 // keep it that way
80 #endif
81
82 #if MUTEX_USE_SPINLOCK && DNSCORE_HAS_MUTEX_DEBUG_SUPPORT
83 #error "Cannot mix spinlock and mutex debug support"
84 #endif
85
86 // these two are for error reporting in debug builds
87 #define MUTEX_LOCKED_TOO_MUCH_TIME_US 5000000
88 #define MUTEX_WAITED_TOO_MUCH_TIME_US 2000000
89
90 typedef pthread_cond_t cond_t;
91
92 #define COND_INITIALIZER PTHREAD_COND_INITIALIZER
93
94 // DNSCORE_HAS_MUTEX_DEBUG_SUPPORT
95
96 #if !MUTEX_USE_SPINLOCK // do not use SPINLOCK
97
98 #if !DNSCORE_HAS_MUTEX_DEBUG_SUPPORT
99
100 typedef pthread_mutex_t mutex_t;
101
102 #define MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
103
mutex_lock_unchecked(mutex_t * mtx)104 static inline int mutex_lock_unchecked(mutex_t* mtx)
105 {
106 int ret = pthread_mutex_lock(mtx);
107 return ret;
108 }
109
mutex_unlock_unchecked(mutex_t * mtx)110 static inline int mutex_unlock_unchecked(mutex_t* mtx)
111 {
112 int ret = pthread_mutex_unlock(mtx);
113 return ret;
114 }
115
116 #if !DEBUG
117 #define mutex_lock(mtx__) pthread_mutex_lock(mtx__)
118 #define mutex_trylock(mtx__) (pthread_mutex_trylock(mtx__)==0)
119 #define mutex_unlock(mtx__) pthread_mutex_unlock(mtx__)
120 #else
121
mutex_lock(mutex_t * mtx)122 static inline void mutex_lock(mutex_t* mtx)
123 {
124 int ret = pthread_mutex_lock(mtx);
125
126 if(ret != 0)
127 {
128 abort();
129 }
130 }
131
mutex_trylock(mutex_t * mtx)132 static inline bool mutex_trylock(mutex_t* mtx)
133 {
134 int ret = pthread_mutex_trylock(mtx);
135 if((ret != 0) && (ret != EBUSY))
136 {
137 abort();
138 }
139 return ret == 0;
140 }
141
mutex_unlock(mutex_t * mtx)142 static inline void mutex_unlock(mutex_t* mtx)
143 {
144 int ret = pthread_mutex_unlock(mtx);
145 if(ret != 0)
146 {
147 abort();
148 }
149 }
150
151 #if SEMAPHORE_SUPPORT
152
153 typedef sem_t semaphore_t;
154
semaphone_init(semaphore_t * sem)155 static inline int semaphone_init(semaphore_t *sem)
156 {
157 int ret = sem_init(sem, 0, 0);
158 return ret;
159 }
160
semaphone_init_process_shared(semaphore_t * sem)161 static inline int semaphone_init_process_shared(semaphore_t *sem)
162 {
163 int ret = sem_init(sem, 1, 1);
164 return ret;
165 }
166
semaphore_finalize(semaphore_t * sem)167 static inline void semaphore_finalize(semaphore_t *sem)
168 {
169 sem_destroy(sem);
170 }
171
semaphore_lock(semaphore_t * sem)172 static inline void semaphore_lock(semaphore_t *sem)
173 {
174 for(;;)
175 {
176 if(sem_wait(sem) == 0)
177 {
178 return;
179 }
180
181 int err = errno;
182 if(err != EINTR)
183 {
184 abort();
185 }
186 }
187 }
188
semaphore_trylock(semaphore_t * sem)189 static inline bool semaphore_trylock(semaphore_t *sem)
190 {
191 int ret = sem_trywait(sem); // fails if
192 return ret == 0;
193 }
194
semaphore_unlock(semaphore_t * sem)195 static inline void semaphore_unlock(semaphore_t *sem)
196 {
197 sem_post(sem);
198 }
199
200 #endif
201
202 #endif
203
204 #else // #if !DNSCORE_HAS_MUTEX_DEBUG_SUPPORT
205
206 #define MUTEX_CONTENTION_MONITOR 0
207
208 #if MUTEX_CONTENTION_MONITOR
209 #pragma message("***********************************************************")
210 #pragma message("***********************************************************")
211 #pragma message("MUTEX_CONTENTION_MONITOR 1")
212 #pragma message("***********************************************************")
213 #pragma message("***********************************************************")
214 #endif
215
216 #if MUTEX_CONTENTION_MONITOR
217 struct mutex_contention_monitor_s;
218 void mutex_contention_object_create(void *mutex_ptr, bool recursive);
219 void mutex_contention_object_destroy(void *mutex_ptr);
220 struct mutex_contention_monitor_s * mutex_contention_lock_begin(thread_t thread, void *mutex_ptr, stacktrace st, const char *type_name);
221 void mutex_contention_lock_wait(struct mutex_contention_monitor_s *mcm);
222 void mutex_contention_lock_wait_with_mutex(thread_t thread, void *mutex_ptr);
223 void mutex_contention_lock_resume(struct mutex_contention_monitor_s *mcm);
224 void mutex_contention_lock_resume_with_mutex(thread_t thread, void *mutex_ptr);
225 void mutex_contention_lock_end(struct mutex_contention_monitor_s *mcm);
226 void mutex_contention_lock_fail(struct mutex_contention_monitor_s *mcm);
227 void mutex_contention_unlock(thread_t thread, void *mutex_ptr);
228 void mutex_contention_unlock_with_monitor(struct mutex_contention_monitor_s *mcm);
229
230 void mutex_contention_monitor_start();
231 void mutex_contention_monitor_stop();
232 #endif
233
234 typedef pthread_mutex_t mutex_t;
235
236 #define MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
237
238 void mutex_lock(mutex_t *mtx);
239 bool mutex_trylock(mutex_t *mtx);
240 void mutex_unlock(mutex_t *mtx);
241
242 int mutex_lock_unchecked(mutex_t* mtx);
243 int mutex_unlock_unchecked(mutex_t* mtx);
244
245 #ifdef UNDEF_MSG_ERR
246 #undef MSG_ERR
247 #undef UNDEF_MSG_ERR
248 #endif
249
250 #endif // DNSCORE_HAS_MUTEX_DEBUG_SUPPORT
251
252 void mutex_init_recursive(mutex_t *mtx);
253 int mutex_init_process_shared(mutex_t *mtx);
254 void mutex_init(mutex_t *mtx);
255 void mutex_destroy(mutex_t *mtx);
256
257 /**
258 * Wrapper for mutex_destroy, with a more appropriate name.
259 */
260
mutex_finalize(mutex_t * mtx)261 static inline void mutex_finalize(mutex_t *mtx)
262 {
263 mutex_destroy(mtx);
264 }
265
266 #if __APPLE__
267 typedef mutex_t spinlock_t;
268
spinlock_init(spinlock_t * spin)269 static inline void spinlock_init(spinlock_t *spin)
270 {
271 mutex_init(spin);
272 }
273
spinlock_destroy(spinlock_t * spin)274 static inline void spinlock_destroy(spinlock_t *spin)
275 {
276 mutex_destroy(spin);
277 }
278
spinlock_lock(spinlock_t * spin)279 static inline void spinlock_lock(spinlock_t *spin)
280 {
281 mutex_lock(spin);
282 }
283
spinlock_unlock(spinlock_t * spin)284 static inline void spinlock_unlock(spinlock_t *spin)
285 {
286 mutex_unlock(spin);
287 }
288 #else
289
290 typedef pthread_spinlock_t spinlock_t;
291
spinlock_init(spinlock_t * spin)292 static inline void spinlock_init(spinlock_t *spin)
293 {
294 pthread_spin_init(spin, 0);
295 }
296
spinlock_destroy(spinlock_t * spin)297 static inline void spinlock_destroy(spinlock_t *spin)
298 {
299 pthread_spin_destroy(spin);
300 }
301
spinlock_lock(spinlock_t * spin)302 static inline void spinlock_lock(spinlock_t *spin)
303 {
304 pthread_spin_lock(spin);
305 }
306
spinlock_unlock(spinlock_t * spin)307 static inline void spinlock_unlock(spinlock_t *spin)
308 {
309 pthread_spin_unlock(spin);
310 }
311
312 #endif
313
cond_wait(cond_t * cond,mutex_t * mtx)314 static inline void cond_wait(cond_t *cond, mutex_t *mtx)
315 {
316 #if DNSCORE_HAS_MUTEX_DEBUG_SUPPORT
317 #if MUTEX_CONTENTION_MONITOR
318 mutex_contention_lock_wait_with_mutex(thread_self(), mtx);
319 #endif
320 #endif
321 int ret = pthread_cond_wait(cond, mtx);
322
323 #if DNSCORE_HAS_MUTEX_DEBUG_SUPPORT
324 #if MUTEX_CONTENTION_MONITOR
325 mutex_contention_lock_resume_with_mutex(thread_self(), mtx);
326 #endif
327 #endif
328
329 if(ret != 0)
330 {
331 perror("cond_wait");
332 fflush(stderr);
333 }
334 }
335
336 extern struct timespec __alarm__approximate_time_10s;
337
cond_wait_auto_time_out(cond_t * cond,mutex_t * mtx)338 static inline void cond_wait_auto_time_out(cond_t *cond, mutex_t *mtx)
339 {
340 #if DNSCORE_HAS_MUTEX_DEBUG_SUPPORT
341 #if MUTEX_CONTENTION_MONITOR
342 mutex_contention_lock_wait_with_mutex(thread_self(), mtx);
343 #endif
344 #endif
345
346 int ret = pthread_cond_timedwait(cond, mtx, &__alarm__approximate_time_10s);
347
348 #if DNSCORE_HAS_MUTEX_DEBUG_SUPPORT
349 #if MUTEX_CONTENTION_MONITOR
350 mutex_contention_lock_resume_with_mutex(thread_self(), mtx);
351 #endif
352 #endif
353 #ifndef WIN32
354 if(ret != 0)
355 {
356 #if DEBUG
357 fprintf(stderr, "cond_wait_auto_time_out: %s\n", strerror(ret));
358 fflush(stderr);
359 #endif
360 time_t now = time(NULL);
361 __alarm__approximate_time_10s.tv_sec = now + 10;
362 }
363 #endif
364 }
365
366 #else
367
368 typedef pthread_spinlock_t mutex_t;
369
370 #define MUTEX_INITIALIZER 0
371
372 #define mutex_init(mtx) pthread_spin_init((mtx), 0)
373 #define mutex_destroy(mtx) pthread_spin_destroy(mtx)
374 #define mutex_lock(mtx) pthread_spin_lock(mtx)
375 #define mutex_trylock(mtx) (pthread_spin_trylock(mtx)==0)
376 #define mutex_unlock(mtx) pthread_spin_unlock(mtx)
377
cond_wait(cond_t * cond,mutex_t * mtx)378 static inline void cond_wait(cond_t *cond, mutex_t *mtx)
379 {
380 pthread_cond_wait(cond, mtx);
381 }
382
383 #endif
384
385 int cond_init_process_shared(cond_t *cond);
386
cond_init(cond_t * cond)387 static inline void cond_init(cond_t *cond)
388 {
389 pthread_cond_init(cond, NULL);
390 }
391
392 #if !_POSIX_TIMERS
393 #ifndef _TIMEMS_H
394 u64 timeus();
395 #endif
396 #endif
397
cond_timedwait(cond_t * cond,mutex_t * mtx,u64 usec)398 static inline int cond_timedwait(cond_t *cond, mutex_t *mtx, u64 usec)
399 {
400 struct timespec ts;
401 #if (defined(_POSIX_TIMERS) && (_POSIX_TIMERS > 0)) || defined(__MACH__)
402 clock_gettime(CLOCK_REALTIME, &ts);
403
404 usec *= 1000;
405
406 ts.tv_nsec += usec;
407
408 if(ts.tv_nsec > 1000000000LL)
409 {
410 ts.tv_sec += ts.tv_nsec / 1000000000LL;
411 ts.tv_nsec = ts.tv_nsec % 1000000000LL;
412 }
413 #else
414 usec += timeus();
415 usec *= 1000ULL;
416 ts.tv_nsec = usec % 1000000000LL;
417 ts.tv_sec = usec / 1000000000LL;
418 #endif
419
420
421 #if DNSCORE_HAS_MUTEX_DEBUG_SUPPORT
422 #if MUTEX_CONTENTION_MONITOR
423 mutex_contention_lock_wait_with_mutex(thread_self(), mtx);
424 #endif
425 #endif
426
427 int ret = pthread_cond_timedwait(cond, mtx, &ts);
428
429 #if DNSCORE_HAS_MUTEX_DEBUG_SUPPORT
430 #if MUTEX_CONTENTION_MONITOR
431 mutex_contention_lock_resume_with_mutex(thread_self(), mtx);
432 #endif
433 #endif
434
435 return ret;
436 }
437
cond_timedwait_absolute(cond_t * cond,mutex_t * mtx,u64 usec_epoch)438 static inline int cond_timedwait_absolute(cond_t *cond, mutex_t *mtx, u64 usec_epoch)
439 {
440 struct timespec ts;
441
442 ts.tv_sec = usec_epoch / ONE_SECOND_US;
443 ts.tv_nsec = (usec_epoch % ONE_SECOND_US) * 1000LL;
444
445 #if DNSCORE_HAS_MUTEX_DEBUG_SUPPORT
446 #if MUTEX_CONTENTION_MONITOR
447 mutex_contention_lock_wait_with_mutex(thread_self(), mtx);
448 #endif
449 #endif
450
451 int ret = pthread_cond_timedwait(cond, mtx, &ts);
452
453 #if DNSCORE_HAS_MUTEX_DEBUG_SUPPORT
454 #if MUTEX_CONTENTION_MONITOR
455 mutex_contention_lock_resume_with_mutex(thread_self(), mtx);
456 #endif
457 #endif
458
459 return ret;
460 }
461
cond_timedwait_absolute_ts(cond_t * cond,mutex_t * mtx,struct timespec * ts)462 static inline int cond_timedwait_absolute_ts(cond_t *cond, mutex_t *mtx, struct timespec *ts)
463 {
464 #if DNSCORE_HAS_MUTEX_DEBUG_SUPPORT
465 #if MUTEX_CONTENTION_MONITOR
466 mutex_contention_lock_wait_with_mutex(thread_self(), mtx);
467 #endif
468 #endif
469
470 int ret = pthread_cond_timedwait(cond, mtx, ts);
471
472 #if DNSCORE_HAS_MUTEX_DEBUG_SUPPORT
473 #if MUTEX_CONTENTION_MONITOR
474 mutex_contention_lock_resume_with_mutex(thread_self(), mtx);
475 #endif
476 #endif
477
478 return ret;
479 }
480
481 // Only use this if there is only one possible thread waiting on
482 // the condition.
483
cond_notify_one(cond_t * cond)484 static inline void cond_notify_one(cond_t *cond)
485 {
486 pthread_cond_signal(cond);
487 }
488
cond_notify(cond_t * cond)489 static inline void cond_notify(cond_t *cond)
490 {
491 pthread_cond_broadcast(cond);
492 }
493
cond_finalize(cond_t * cond)494 static inline void cond_finalize(cond_t *cond)
495 {
496 for(;;)
497 {
498 int ret = pthread_cond_destroy(cond);
499
500 if(ret == 0)
501 {
502 break;
503 }
504
505 if(ret != EBUSY)
506 {
507 //osformat(termerr, "async_wait_finalize: pthread_cond_destroy returned another error than EBUSY: %r", MAKE_ERRNO_ERROR(ret));
508 //flusherr();
509 break;
510 }
511
512 usleep(5000);
513 }
514 }
515
516 struct smp_int
517 {
518 pthread_mutex_t mutex;
519 volatile int value;
520 };
521
522 #define SMP_INT_INITIALIZER {PTHREAD_MUTEX_INITIALIZER,0}
523 #define SMP_INT_INITIALIZER_AT(value_) {PTHREAD_MUTEX_INITIALIZER, (value_)}
524
525 typedef struct smp_int smp_int;
526
smp_int_init(smp_int * v)527 static inline void smp_int_init(smp_int *v)
528 {
529 pthread_mutex_init(&v->mutex, NULL);
530 v->value = 0;
531 }
532
smp_int_init_set(smp_int * v,int value)533 static inline void smp_int_init_set(smp_int *v, int value)
534 {
535 pthread_mutex_init(&v->mutex, NULL);
536 v->value = value;
537 }
538
smp_int_set(smp_int * v,int i)539 static inline void smp_int_set(smp_int *v, int i)
540 {
541 pthread_mutex_lock(&v->mutex);
542 v->value = i;
543 pthread_mutex_unlock(&v->mutex);
544 }
545
546
smp_int_inc(smp_int * v)547 static inline void smp_int_inc(smp_int *v)
548 {
549 pthread_mutex_lock(&v->mutex);
550 v->value++;
551 pthread_mutex_unlock(&v->mutex);
552 }
553
smp_int_add(smp_int * v,int value)554 static inline void smp_int_add(smp_int *v, int value)
555 {
556 pthread_mutex_lock(&v->mutex);
557 v->value += value;
558 pthread_mutex_unlock(&v->mutex);
559 }
560
smp_int_or(smp_int * v,int value)561 static inline void smp_int_or(smp_int *v, int value)
562 {
563 pthread_mutex_lock(&v->mutex);
564 v->value |= value;
565 pthread_mutex_unlock(&v->mutex);
566 }
567
smp_int_and(smp_int * v,int value)568 static inline void smp_int_and(smp_int *v, int value)
569 {
570 pthread_mutex_lock(&v->mutex);
571 v->value &= value;
572 pthread_mutex_unlock(&v->mutex);
573 }
574
smp_int_inc_get(smp_int * v)575 static inline int smp_int_inc_get(smp_int *v)
576 {
577 u32 ret;
578 pthread_mutex_lock(&v->mutex);
579 ret = ++v->value;
580 pthread_mutex_unlock(&v->mutex);
581 return ret;
582 }
583
smp_int_dec(smp_int * v)584 static inline void smp_int_dec(smp_int *v)
585 {
586 pthread_mutex_lock(&v->mutex);
587 v->value--;
588 pthread_mutex_unlock(&v->mutex);
589 }
590
smp_int_sub(smp_int * v,int value)591 static inline void smp_int_sub(smp_int *v, int value)
592 {
593 pthread_mutex_lock(&v->mutex);
594 v->value -= value;
595 pthread_mutex_unlock(&v->mutex);
596 }
597
smp_int_dec_get(smp_int * v)598 static inline int smp_int_dec_get(smp_int *v)
599 {
600 int ret;
601 pthread_mutex_lock(&v->mutex);
602 ret = --v->value;
603 pthread_mutex_unlock(&v->mutex);
604 return ret;
605 }
606
smp_int_get_dec(smp_int * v)607 static inline int smp_int_get_dec(smp_int *v)
608 {
609 int ret;
610 pthread_mutex_lock(&v->mutex);
611 ret = v->value--;
612 pthread_mutex_unlock(&v->mutex);
613 return ret;
614 }
615
smp_int_setifequal(smp_int * v,int from,int to)616 static inline bool smp_int_setifequal(smp_int *v, int from, int to)
617 {
618 bool didit = FALSE;
619
620 pthread_mutex_lock(&v->mutex);
621
622 if(v->value == from)
623 {
624 v->value = to;
625 didit = TRUE;
626 }
627
628 pthread_mutex_unlock(&v->mutex);
629
630 return didit;
631 }
632
smp_int_get(smp_int * v)633 static inline int smp_int_get(smp_int *v)
634 {
635 int ret;
636 pthread_mutex_lock(&v->mutex);
637 ret = v->value;
638 pthread_mutex_unlock(&v->mutex);
639 return ret;
640 }
641
smp_int_get_set(smp_int * v,int newvalue)642 static inline int smp_int_get_set(smp_int *v, int newvalue)
643 {
644 int ret;
645 pthread_mutex_lock(&v->mutex);
646 ret = v->value;
647 v->value = newvalue;
648 pthread_mutex_unlock(&v->mutex);
649 return ret;
650 }
651
smp_int_destroy(smp_int * v)652 static inline void smp_int_destroy(smp_int *v)
653 {
654 pthread_mutex_destroy(&v->mutex);
655 }
656
657 /**
658 * A group mutex is a mutex that can be used by a group with or without exclusive access.
659 * A mutex is private if the msb is set, it means only one of that group can own it
660 * A mutex is shared if the msb is not set, it means many owner of the same type can own it
661 */
662
663 #define GROUP_MUTEX_NOBODY 0x00
664 #define GROUP_MUTEX_READ 0x01 // default
665 #define GROUP_MUTEX_WRITE 0x82 // default
666 #define GROUP_MUTEX_PRIVATE 0x80 // THIS IS A MASK, ADD IT TO THE OWNER ID
667 #define GROUP_MUTEX_DESTROY 0xfe
668
669 #define GROUP_MUTEX_LOCKMASK_FLAG 0x7f
670 #define GROUP_MUTEX_EXCLUSIVE_FLAG 0x80
671
672 typedef struct group_mutex_t group_mutex_t;
673
674 struct group_mutex_t
675 {
676 cond_t cond;
677 mutex_t mutex;
678 volatile s32 count;
679 volatile u8 owner;
680 volatile u8 reserved_owner;
681 };
682
683 #define GROUP_MUTEX_INITIALIZER {COND_INITIALIZER, MUTEX_INITIALIZER, 0, 0, 0}
684
685 void group_mutex_init(group_mutex_t* mtx);
686 void group_mutex_lock(group_mutex_t *mtx, u8 owner);
687 bool group_mutex_trylock(group_mutex_t *mtx, u8 owner);
688 void group_mutex_unlock(group_mutex_t *mtx, u8 owner);
689 bool group_mutex_transferlock(group_mutex_t *mtx, u8 owner, u8 newowner);
690 void group_mutex_destroy(group_mutex_t* mtx);
691 bool group_mutex_islocked(group_mutex_t* mtx);
692
693 void group_mutex_double_lock(group_mutex_t *mtx, u8 owner, u8 secondary_owner);
694 void group_mutex_double_unlock(group_mutex_t *mtx, u8 owner, u8 secondary_owner);
695 void group_mutex_exchange_locks(group_mutex_t *mtx, u8 owner, u8 secondary_owner);
696
group_mutex_read_lock(group_mutex_t * mtx)697 static inline void group_mutex_read_lock(group_mutex_t *mtx)
698 {
699 group_mutex_lock(mtx, GROUP_MUTEX_READ);
700 }
701
group_mutex_read_unlock(group_mutex_t * mtx)702 static inline void group_mutex_read_unlock(group_mutex_t *mtx)
703 {
704 group_mutex_unlock(mtx, GROUP_MUTEX_READ);
705 }
706
group_mutex_write_lock(group_mutex_t * mtx)707 static inline void group_mutex_write_lock(group_mutex_t *mtx)
708 {
709 group_mutex_lock(mtx, GROUP_MUTEX_WRITE);
710 }
711
group_mutex_write_unlock(group_mutex_t * mtx)712 static inline void group_mutex_write_unlock(group_mutex_t *mtx)
713 {
714 group_mutex_unlock(mtx, GROUP_MUTEX_WRITE);
715 }
716
717 /**
718 * The shared group mutex is a group mutex that only uses N mutex(es) and N condition(s).
719 * This is especially useful when millions of instances are required.
720 * The mutex is used commonly by each structure as its own.
721 * The downside is that every waiting task on the same mutex will be woken up each time one of them broadcasts the condition.
722 *
723 * The current implementation uses N=1
724 */
725
726 struct shared_group_shared_mutex_t
727 {
728 mutex_t mutex;
729 cond_t cond;
730 volatile s32 rc;
731 };
732
733 typedef struct shared_group_shared_mutex_t shared_group_shared_mutex_t;
734
735 #define SHARED_GROUP_SHARED_MUTEX_INTIALIZER {MUTEX_INITIALIZER, COND_INITIALIZER, 0}
736
737 struct shared_group_mutex_t
738 {
739 shared_group_shared_mutex_t *shared_mutex;
740 #if DNSCORE_HAS_MUTEX_DEBUG_SUPPORT
741 #if MUTEX_CONTENTION_MONITOR
742 struct mutex_contention_monitor_s *mcm;
743 #else
744 stacktrace trace;
745 volatile thread_t id;
746 volatile u64 timestamp;
747 #endif
748 #endif
749
750 volatile s32 count;
751 volatile u8 owner;
752 };
753
754 #define SHARED_GROUP_MUTEX_INTIALIZER THIS_CANNOT_WORK
755
756 typedef struct shared_group_mutex_t shared_group_mutex_t;
757
758 void shared_group_shared_mutex_init(shared_group_shared_mutex_t* smtx);
759 void shared_group_shared_mutex_init_recursive(shared_group_shared_mutex_t* smtx);
760 void shared_group_shared_mutex_destroy(shared_group_shared_mutex_t* smtx);
761
762 void shared_group_mutex_init(shared_group_mutex_t* mtx, shared_group_shared_mutex_t* smtx, const char *name);
763 void shared_group_mutex_lock(shared_group_mutex_t *mtx, u8 owner);
764 bool shared_group_mutex_trylock(shared_group_mutex_t *mtx, u8 owner);
765 void shared_group_mutex_unlock(shared_group_mutex_t *mtx, u8 owner);
766 bool shared_group_mutex_transferlock(shared_group_mutex_t *mtx, u8 owner, u8 newowner);
767 void shared_group_mutex_destroy(shared_group_mutex_t* mtx);
768 bool shared_group_mutex_islocked(shared_group_mutex_t* mtx);
769 bool shared_group_mutex_islocked_by(shared_group_mutex_t *mtx, u8 owner);
770
771 #ifdef __cplusplus
772 }
773 #endif
774
775 #endif /* _MUTEX_H */
776 /** @} */
777