1 /*
2 * kmp_lock.h -- lock header file
3 */
4
5 //===----------------------------------------------------------------------===//
6 //
7 // The LLVM Compiler Infrastructure
8 //
9 // This file is dual licensed under the MIT and the University of Illinois Open
10 // Source Licenses. See LICENSE.txt for details.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #ifndef KMP_LOCK_H
15 #define KMP_LOCK_H
16
17 #include <limits.h> // CHAR_BIT
18 #include <stddef.h> // offsetof
19
20 #include "kmp_debug.h"
21 #include "kmp_os.h"
22
23 #ifdef __cplusplus
24 #include <atomic>
25
26 extern "C" {
27 #endif // __cplusplus
28
29 // ----------------------------------------------------------------------------
30 // Have to copy these definitions from kmp.h because kmp.h cannot be included
31 // due to circular dependencies. Will undef these at end of file.
32
33 #define KMP_PAD(type, sz) \
34 (sizeof(type) + (sz - ((sizeof(type) - 1) % (sz)) - 1))
35 #define KMP_GTID_DNE (-2)
36
37 // Forward declaration of ident and ident_t
38
39 struct ident;
40 typedef struct ident ident_t;
41
42 // End of copied code.
43 // ----------------------------------------------------------------------------
44
45 // We need to know the size of the area we can assume that the compiler(s)
46 // allocated for obects of type omp_lock_t and omp_nest_lock_t. The Intel
47 // compiler always allocates a pointer-sized area, as does visual studio.
48 //
49 // gcc however, only allocates 4 bytes for regular locks, even on 64-bit
50 // intel archs. It allocates at least 8 bytes for nested lock (more on
51 // recent versions), but we are bounded by the pointer-sized chunks that
52 // the Intel compiler allocates.
53
54 #if KMP_OS_LINUX && defined(KMP_GOMP_COMPAT)
55 #define OMP_LOCK_T_SIZE sizeof(int)
56 #define OMP_NEST_LOCK_T_SIZE sizeof(void *)
57 #else
58 #define OMP_LOCK_T_SIZE sizeof(void *)
59 #define OMP_NEST_LOCK_T_SIZE sizeof(void *)
60 #endif
61
62 // The Intel compiler allocates a 32-byte chunk for a critical section.
63 // Both gcc and visual studio only allocate enough space for a pointer.
64 // Sometimes we know that the space was allocated by the Intel compiler.
65 #define OMP_CRITICAL_SIZE sizeof(void *)
66 #define INTEL_CRITICAL_SIZE 32
67
68 // lock flags
69 typedef kmp_uint32 kmp_lock_flags_t;
70
71 #define kmp_lf_critical_section 1
72
73 // When a lock table is used, the indices are of kmp_lock_index_t
74 typedef kmp_uint32 kmp_lock_index_t;
75
76 // When memory allocated for locks are on the lock pool (free list),
77 // it is treated as structs of this type.
78 struct kmp_lock_pool {
79 union kmp_user_lock *next;
80 kmp_lock_index_t index;
81 };
82
83 typedef struct kmp_lock_pool kmp_lock_pool_t;
84
85 extern void __kmp_validate_locks(void);
86
87 // ----------------------------------------------------------------------------
88 // There are 5 lock implementations:
89 // 1. Test and set locks.
90 // 2. futex locks (Linux* OS on x86 and
91 // Intel(R) Many Integrated Core Architecture)
92 // 3. Ticket (Lamport bakery) locks.
93 // 4. Queuing locks (with separate spin fields).
94 // 5. DRPA (Dynamically Reconfigurable Distributed Polling Area) locks
95 //
96 // and 3 lock purposes:
97 // 1. Bootstrap locks -- Used for a few locks available at library
98 // startup-shutdown time.
99 // These do not require non-negative global thread ID's.
100 // 2. Internal RTL locks -- Used everywhere else in the RTL
101 // 3. User locks (includes critical sections)
102 // ----------------------------------------------------------------------------
103
104 // ============================================================================
105 // Lock implementations.
106 //
107 // Test and set locks.
108 //
109 // Non-nested test and set locks differ from the other lock kinds (except
110 // futex) in that we use the memory allocated by the compiler for the lock,
111 // rather than a pointer to it.
112 //
113 // On lin32, lin_32e, and win_32, the space allocated may be as small as 4
114 // bytes, so we have to use a lock table for nested locks, and avoid accessing
115 // the depth_locked field for non-nested locks.
116 //
117 // Information normally available to the tools, such as lock location, lock
118 // usage (normal lock vs. critical section), etc. is not available with test and
119 // set locks.
120 // ----------------------------------------------------------------------------
121
122 struct kmp_base_tas_lock {
123 // KMP_LOCK_FREE(tas) => unlocked; locked: (gtid+1) of owning thread
124 std::atomic<kmp_int32> poll;
125 kmp_int32 depth_locked; // depth locked, for nested locks only
126 };
127
128 typedef struct kmp_base_tas_lock kmp_base_tas_lock_t;
129
130 union kmp_tas_lock {
131 kmp_base_tas_lock_t lk;
132 kmp_lock_pool_t pool; // make certain struct is large enough
133 double lk_align; // use worst case alignment; no cache line padding
134 };
135
136 typedef union kmp_tas_lock kmp_tas_lock_t;
137
138 // Static initializer for test and set lock variables. Usage:
139 // kmp_tas_lock_t xlock = KMP_TAS_LOCK_INITIALIZER( xlock );
140 #define KMP_TAS_LOCK_INITIALIZER(lock) \
141 { \
142 { ATOMIC_VAR_INIT(KMP_LOCK_FREE(tas)), 0 } \
143 }
144
145 extern int __kmp_acquire_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
146 extern int __kmp_test_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
147 extern int __kmp_release_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
148 extern void __kmp_init_tas_lock(kmp_tas_lock_t *lck);
149 extern void __kmp_destroy_tas_lock(kmp_tas_lock_t *lck);
150
151 extern int __kmp_acquire_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
152 extern int __kmp_test_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
153 extern int __kmp_release_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
154 extern void __kmp_init_nested_tas_lock(kmp_tas_lock_t *lck);
155 extern void __kmp_destroy_nested_tas_lock(kmp_tas_lock_t *lck);
156
157 #define KMP_LOCK_RELEASED 1
158 #define KMP_LOCK_STILL_HELD 0
159 #define KMP_LOCK_ACQUIRED_FIRST 1
160 #define KMP_LOCK_ACQUIRED_NEXT 0
161 #ifndef KMP_USE_FUTEX
162 #define KMP_USE_FUTEX \
163 (KMP_OS_LINUX && !KMP_OS_CNK && \
164 (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64))
165 #endif
166 #if KMP_USE_FUTEX
167
168 // ----------------------------------------------------------------------------
169 // futex locks. futex locks are only available on Linux* OS.
170 //
171 // Like non-nested test and set lock, non-nested futex locks use the memory
172 // allocated by the compiler for the lock, rather than a pointer to it.
173 //
174 // Information normally available to the tools, such as lock location, lock
175 // usage (normal lock vs. critical section), etc. is not available with test and
176 // set locks. With non-nested futex locks, the lock owner is not even available.
177 // ----------------------------------------------------------------------------
178
179 struct kmp_base_futex_lock {
180 volatile kmp_int32 poll; // KMP_LOCK_FREE(futex) => unlocked
181 // 2*(gtid+1) of owning thread, 0 if unlocked
182 // locked: (gtid+1) of owning thread
183 kmp_int32 depth_locked; // depth locked, for nested locks only
184 };
185
186 typedef struct kmp_base_futex_lock kmp_base_futex_lock_t;
187
188 union kmp_futex_lock {
189 kmp_base_futex_lock_t lk;
190 kmp_lock_pool_t pool; // make certain struct is large enough
191 double lk_align; // use worst case alignment
192 // no cache line padding
193 };
194
195 typedef union kmp_futex_lock kmp_futex_lock_t;
196
197 // Static initializer for futex lock variables. Usage:
198 // kmp_futex_lock_t xlock = KMP_FUTEX_LOCK_INITIALIZER( xlock );
199 #define KMP_FUTEX_LOCK_INITIALIZER(lock) \
200 { \
201 { KMP_LOCK_FREE(futex), 0 } \
202 }
203
204 extern int __kmp_acquire_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid);
205 extern int __kmp_test_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid);
206 extern int __kmp_release_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid);
207 extern void __kmp_init_futex_lock(kmp_futex_lock_t *lck);
208 extern void __kmp_destroy_futex_lock(kmp_futex_lock_t *lck);
209
210 extern int __kmp_acquire_nested_futex_lock(kmp_futex_lock_t *lck,
211 kmp_int32 gtid);
212 extern int __kmp_test_nested_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid);
213 extern int __kmp_release_nested_futex_lock(kmp_futex_lock_t *lck,
214 kmp_int32 gtid);
215 extern void __kmp_init_nested_futex_lock(kmp_futex_lock_t *lck);
216 extern void __kmp_destroy_nested_futex_lock(kmp_futex_lock_t *lck);
217
218 #endif // KMP_USE_FUTEX
219
220 // ----------------------------------------------------------------------------
221 // Ticket locks.
222
223 #ifdef __cplusplus
224
225 #ifdef _MSC_VER
226 // MSVC won't allow use of std::atomic<> in a union since it has non-trivial
227 // copy constructor.
228
229 struct kmp_base_ticket_lock {
230 // `initialized' must be the first entry in the lock data structure!
231 std::atomic_bool initialized;
232 volatile union kmp_ticket_lock *self; // points to the lock union
233 ident_t const *location; // Source code location of omp_init_lock().
234 std::atomic_uint
235 next_ticket; // ticket number to give to next thread which acquires
236 std::atomic_uint now_serving; // ticket number for thread which holds the lock
237 std::atomic_int owner_id; // (gtid+1) of owning thread, 0 if unlocked
238 std::atomic_int depth_locked; // depth locked, for nested locks only
239 kmp_lock_flags_t flags; // lock specifics, e.g. critical section lock
240 };
241 #else
242 struct kmp_base_ticket_lock {
243 // `initialized' must be the first entry in the lock data structure!
244 std::atomic<bool> initialized;
245 volatile union kmp_ticket_lock *self; // points to the lock union
246 ident_t const *location; // Source code location of omp_init_lock().
247 std::atomic<unsigned>
248 next_ticket; // ticket number to give to next thread which acquires
249 std::atomic<unsigned>
250 now_serving; // ticket number for thread which holds the lock
251 std::atomic<int> owner_id; // (gtid+1) of owning thread, 0 if unlocked
252 std::atomic<int> depth_locked; // depth locked, for nested locks only
253 kmp_lock_flags_t flags; // lock specifics, e.g. critical section lock
254 };
255 #endif
256
257 #else // __cplusplus
258
259 struct kmp_base_ticket_lock;
260
261 #endif // !__cplusplus
262
263 typedef struct kmp_base_ticket_lock kmp_base_ticket_lock_t;
264
265 union KMP_ALIGN_CACHE kmp_ticket_lock {
266 kmp_base_ticket_lock_t
267 lk; // This field must be first to allow static initializing.
268 kmp_lock_pool_t pool;
269 double lk_align; // use worst case alignment
270 char lk_pad[KMP_PAD(kmp_base_ticket_lock_t, CACHE_LINE)];
271 };
272
273 typedef union kmp_ticket_lock kmp_ticket_lock_t;
274
275 // Static initializer for simple ticket lock variables. Usage:
276 // kmp_ticket_lock_t xlock = KMP_TICKET_LOCK_INITIALIZER( xlock );
277 // Note the macro argument. It is important to make var properly initialized.
278 #define KMP_TICKET_LOCK_INITIALIZER(lock) \
279 { \
280 { \
281 ATOMIC_VAR_INIT(true) \
282 , &(lock), NULL, ATOMIC_VAR_INIT(0U), ATOMIC_VAR_INIT(0U), \
283 ATOMIC_VAR_INIT(0), ATOMIC_VAR_INIT(-1) \
284 } \
285 }
286
287 extern int __kmp_acquire_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid);
288 extern int __kmp_test_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid);
289 extern int __kmp_test_ticket_lock_with_cheks(kmp_ticket_lock_t *lck,
290 kmp_int32 gtid);
291 extern int __kmp_release_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid);
292 extern void __kmp_init_ticket_lock(kmp_ticket_lock_t *lck);
293 extern void __kmp_destroy_ticket_lock(kmp_ticket_lock_t *lck);
294
295 extern int __kmp_acquire_nested_ticket_lock(kmp_ticket_lock_t *lck,
296 kmp_int32 gtid);
297 extern int __kmp_test_nested_ticket_lock(kmp_ticket_lock_t *lck,
298 kmp_int32 gtid);
299 extern int __kmp_release_nested_ticket_lock(kmp_ticket_lock_t *lck,
300 kmp_int32 gtid);
301 extern void __kmp_init_nested_ticket_lock(kmp_ticket_lock_t *lck);
302 extern void __kmp_destroy_nested_ticket_lock(kmp_ticket_lock_t *lck);
303
304 // ----------------------------------------------------------------------------
305 // Queuing locks.
306
307 #if KMP_USE_ADAPTIVE_LOCKS
308
309 struct kmp_adaptive_lock_info;
310
311 typedef struct kmp_adaptive_lock_info kmp_adaptive_lock_info_t;
312
313 #if KMP_DEBUG_ADAPTIVE_LOCKS
314
315 struct kmp_adaptive_lock_statistics {
316 /* So we can get stats from locks that haven't been destroyed. */
317 kmp_adaptive_lock_info_t *next;
318 kmp_adaptive_lock_info_t *prev;
319
320 /* Other statistics */
321 kmp_uint32 successfulSpeculations;
322 kmp_uint32 hardFailedSpeculations;
323 kmp_uint32 softFailedSpeculations;
324 kmp_uint32 nonSpeculativeAcquires;
325 kmp_uint32 nonSpeculativeAcquireAttempts;
326 kmp_uint32 lemmingYields;
327 };
328
329 typedef struct kmp_adaptive_lock_statistics kmp_adaptive_lock_statistics_t;
330
331 extern void __kmp_print_speculative_stats();
332 extern void __kmp_init_speculative_stats();
333
334 #endif // KMP_DEBUG_ADAPTIVE_LOCKS
335
336 struct kmp_adaptive_lock_info {
337 /* Values used for adaptivity.
338 Although these are accessed from multiple threads we don't access them
339 atomically, because if we miss updates it probably doesn't matter much. (It
340 just affects our decision about whether to try speculation on the lock). */
341 kmp_uint32 volatile badness;
342 kmp_uint32 volatile acquire_attempts;
343 /* Parameters of the lock. */
344 kmp_uint32 max_badness;
345 kmp_uint32 max_soft_retries;
346
347 #if KMP_DEBUG_ADAPTIVE_LOCKS
348 kmp_adaptive_lock_statistics_t volatile stats;
349 #endif
350 };
351
352 #endif // KMP_USE_ADAPTIVE_LOCKS
353
354 struct kmp_base_queuing_lock {
355
356 // `initialized' must be the first entry in the lock data structure!
357 volatile union kmp_queuing_lock
358 *initialized; // Points to the lock union if in initialized state.
359
360 ident_t const *location; // Source code location of omp_init_lock().
361
362 KMP_ALIGN(8) // tail_id must be 8-byte aligned!
363
364 volatile kmp_int32
365 tail_id; // (gtid+1) of thread at tail of wait queue, 0 if empty
366 // Must be no padding here since head/tail used in 8-byte CAS
367 volatile kmp_int32
368 head_id; // (gtid+1) of thread at head of wait queue, 0 if empty
369 // Decl order assumes little endian
370 // bakery-style lock
371 volatile kmp_uint32
372 next_ticket; // ticket number to give to next thread which acquires
373 volatile kmp_uint32
374 now_serving; // ticket number for thread which holds the lock
375 volatile kmp_int32 owner_id; // (gtid+1) of owning thread, 0 if unlocked
376 kmp_int32 depth_locked; // depth locked, for nested locks only
377
378 kmp_lock_flags_t flags; // lock specifics, e.g. critical section lock
379 };
380
381 typedef struct kmp_base_queuing_lock kmp_base_queuing_lock_t;
382
383 KMP_BUILD_ASSERT(offsetof(kmp_base_queuing_lock_t, tail_id) % 8 == 0);
384
385 union KMP_ALIGN_CACHE kmp_queuing_lock {
386 kmp_base_queuing_lock_t
387 lk; // This field must be first to allow static initializing.
388 kmp_lock_pool_t pool;
389 double lk_align; // use worst case alignment
390 char lk_pad[KMP_PAD(kmp_base_queuing_lock_t, CACHE_LINE)];
391 };
392
393 typedef union kmp_queuing_lock kmp_queuing_lock_t;
394
395 extern int __kmp_acquire_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid);
396 extern int __kmp_test_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid);
397 extern int __kmp_release_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid);
398 extern void __kmp_init_queuing_lock(kmp_queuing_lock_t *lck);
399 extern void __kmp_destroy_queuing_lock(kmp_queuing_lock_t *lck);
400
401 extern int __kmp_acquire_nested_queuing_lock(kmp_queuing_lock_t *lck,
402 kmp_int32 gtid);
403 extern int __kmp_test_nested_queuing_lock(kmp_queuing_lock_t *lck,
404 kmp_int32 gtid);
405 extern int __kmp_release_nested_queuing_lock(kmp_queuing_lock_t *lck,
406 kmp_int32 gtid);
407 extern void __kmp_init_nested_queuing_lock(kmp_queuing_lock_t *lck);
408 extern void __kmp_destroy_nested_queuing_lock(kmp_queuing_lock_t *lck);
409
410 #if KMP_USE_ADAPTIVE_LOCKS
411
412 // ----------------------------------------------------------------------------
413 // Adaptive locks.
414 struct kmp_base_adaptive_lock {
415 kmp_base_queuing_lock qlk;
416 KMP_ALIGN(CACHE_LINE)
417 kmp_adaptive_lock_info_t
418 adaptive; // Information for the speculative adaptive lock
419 };
420
421 typedef struct kmp_base_adaptive_lock kmp_base_adaptive_lock_t;
422
423 union KMP_ALIGN_CACHE kmp_adaptive_lock {
424 kmp_base_adaptive_lock_t lk;
425 kmp_lock_pool_t pool;
426 double lk_align;
427 char lk_pad[KMP_PAD(kmp_base_adaptive_lock_t, CACHE_LINE)];
428 };
429 typedef union kmp_adaptive_lock kmp_adaptive_lock_t;
430
431 #define GET_QLK_PTR(l) ((kmp_queuing_lock_t *)&(l)->lk.qlk)
432
433 #endif // KMP_USE_ADAPTIVE_LOCKS
434
435 // ----------------------------------------------------------------------------
436 // DRDPA ticket locks.
437 struct kmp_base_drdpa_lock {
438 // All of the fields on the first cache line are only written when
439 // initializing or reconfiguring the lock. These are relatively rare
440 // operations, so data from the first cache line will usually stay resident in
441 // the cache of each thread trying to acquire the lock.
442 //
443 // initialized must be the first entry in the lock data structure!
444 KMP_ALIGN_CACHE
445
446 volatile union kmp_drdpa_lock
447 *initialized; // points to the lock union if in initialized state
448 ident_t const *location; // Source code location of omp_init_lock().
449 std::atomic<std::atomic<kmp_uint64> *> polls;
450 std::atomic<kmp_uint64> mask; // is 2**num_polls-1 for mod op
451 kmp_uint64 cleanup_ticket; // thread with cleanup ticket
452 std::atomic<kmp_uint64> *old_polls; // will deallocate old_polls
453 kmp_uint32 num_polls; // must be power of 2
454
455 // next_ticket it needs to exist in a separate cache line, as it is
456 // invalidated every time a thread takes a new ticket.
457 KMP_ALIGN_CACHE
458
459 std::atomic<kmp_uint64> next_ticket;
460
461 // now_serving is used to store our ticket value while we hold the lock. It
462 // has a slightly different meaning in the DRDPA ticket locks (where it is
463 // written by the acquiring thread) than it does in the simple ticket locks
464 // (where it is written by the releasing thread).
465 //
466 // Since now_serving is only read an written in the critical section,
467 // it is non-volatile, but it needs to exist on a separate cache line,
468 // as it is invalidated at every lock acquire.
469 //
470 // Likewise, the vars used for nested locks (owner_id and depth_locked) are
471 // only written by the thread owning the lock, so they are put in this cache
472 // line. owner_id is read by other threads, so it must be declared volatile.
473 KMP_ALIGN_CACHE
474 kmp_uint64 now_serving; // doesn't have to be volatile
475 volatile kmp_uint32 owner_id; // (gtid+1) of owning thread, 0 if unlocked
476 kmp_int32 depth_locked; // depth locked
477 kmp_lock_flags_t flags; // lock specifics, e.g. critical section lock
478 };
479
480 typedef struct kmp_base_drdpa_lock kmp_base_drdpa_lock_t;
481
482 union KMP_ALIGN_CACHE kmp_drdpa_lock {
483 kmp_base_drdpa_lock_t
484 lk; // This field must be first to allow static initializing. */
485 kmp_lock_pool_t pool;
486 double lk_align; // use worst case alignment
487 char lk_pad[KMP_PAD(kmp_base_drdpa_lock_t, CACHE_LINE)];
488 };
489
490 typedef union kmp_drdpa_lock kmp_drdpa_lock_t;
491
492 extern int __kmp_acquire_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid);
493 extern int __kmp_test_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid);
494 extern int __kmp_release_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid);
495 extern void __kmp_init_drdpa_lock(kmp_drdpa_lock_t *lck);
496 extern void __kmp_destroy_drdpa_lock(kmp_drdpa_lock_t *lck);
497
498 extern int __kmp_acquire_nested_drdpa_lock(kmp_drdpa_lock_t *lck,
499 kmp_int32 gtid);
500 extern int __kmp_test_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid);
501 extern int __kmp_release_nested_drdpa_lock(kmp_drdpa_lock_t *lck,
502 kmp_int32 gtid);
503 extern void __kmp_init_nested_drdpa_lock(kmp_drdpa_lock_t *lck);
504 extern void __kmp_destroy_nested_drdpa_lock(kmp_drdpa_lock_t *lck);
505
506 // ============================================================================
507 // Lock purposes.
508 // ============================================================================
509
510 // Bootstrap locks.
511 //
512 // Bootstrap locks -- very few locks used at library initialization time.
513 // Bootstrap locks are currently implemented as ticket locks.
514 // They could also be implemented as test and set lock, but cannot be
515 // implemented with other lock kinds as they require gtids which are not
516 // available at initialization time.
517
518 typedef kmp_ticket_lock_t kmp_bootstrap_lock_t;
519
520 #define KMP_BOOTSTRAP_LOCK_INITIALIZER(lock) KMP_TICKET_LOCK_INITIALIZER((lock))
521 #define KMP_BOOTSTRAP_LOCK_INIT(lock) \
522 kmp_bootstrap_lock_t lock = KMP_TICKET_LOCK_INITIALIZER(lock)
523
__kmp_acquire_bootstrap_lock(kmp_bootstrap_lock_t * lck)524 static inline int __kmp_acquire_bootstrap_lock(kmp_bootstrap_lock_t *lck) {
525 return __kmp_acquire_ticket_lock(lck, KMP_GTID_DNE);
526 }
527
__kmp_test_bootstrap_lock(kmp_bootstrap_lock_t * lck)528 static inline int __kmp_test_bootstrap_lock(kmp_bootstrap_lock_t *lck) {
529 return __kmp_test_ticket_lock(lck, KMP_GTID_DNE);
530 }
531
__kmp_release_bootstrap_lock(kmp_bootstrap_lock_t * lck)532 static inline void __kmp_release_bootstrap_lock(kmp_bootstrap_lock_t *lck) {
533 __kmp_release_ticket_lock(lck, KMP_GTID_DNE);
534 }
535
__kmp_init_bootstrap_lock(kmp_bootstrap_lock_t * lck)536 static inline void __kmp_init_bootstrap_lock(kmp_bootstrap_lock_t *lck) {
537 __kmp_init_ticket_lock(lck);
538 }
539
__kmp_destroy_bootstrap_lock(kmp_bootstrap_lock_t * lck)540 static inline void __kmp_destroy_bootstrap_lock(kmp_bootstrap_lock_t *lck) {
541 __kmp_destroy_ticket_lock(lck);
542 }
543
544 // Internal RTL locks.
545 //
546 // Internal RTL locks are also implemented as ticket locks, for now.
547 //
548 // FIXME - We should go through and figure out which lock kind works best for
549 // each internal lock, and use the type declaration and function calls for
550 // that explicit lock kind (and get rid of this section).
551
552 typedef kmp_ticket_lock_t kmp_lock_t;
553
554 #define KMP_LOCK_INIT(lock) kmp_lock_t lock = KMP_TICKET_LOCK_INITIALIZER(lock)
555
__kmp_acquire_lock(kmp_lock_t * lck,kmp_int32 gtid)556 static inline int __kmp_acquire_lock(kmp_lock_t *lck, kmp_int32 gtid) {
557 return __kmp_acquire_ticket_lock(lck, gtid);
558 }
559
__kmp_test_lock(kmp_lock_t * lck,kmp_int32 gtid)560 static inline int __kmp_test_lock(kmp_lock_t *lck, kmp_int32 gtid) {
561 return __kmp_test_ticket_lock(lck, gtid);
562 }
563
__kmp_release_lock(kmp_lock_t * lck,kmp_int32 gtid)564 static inline void __kmp_release_lock(kmp_lock_t *lck, kmp_int32 gtid) {
565 __kmp_release_ticket_lock(lck, gtid);
566 }
567
__kmp_init_lock(kmp_lock_t * lck)568 static inline void __kmp_init_lock(kmp_lock_t *lck) {
569 __kmp_init_ticket_lock(lck);
570 }
571
__kmp_destroy_lock(kmp_lock_t * lck)572 static inline void __kmp_destroy_lock(kmp_lock_t *lck) {
573 __kmp_destroy_ticket_lock(lck);
574 }
575
576 // User locks.
577 //
578 // Do not allocate objects of type union kmp_user_lock!!! This will waste space
579 // unless __kmp_user_lock_kind == lk_drdpa. Instead, check the value of
580 // __kmp_user_lock_kind and allocate objects of the type of the appropriate
581 // union member, and cast their addresses to kmp_user_lock_p.
582
583 enum kmp_lock_kind {
584 lk_default = 0,
585 lk_tas,
586 #if KMP_USE_FUTEX
587 lk_futex,
588 #endif
589 #if KMP_USE_DYNAMIC_LOCK && KMP_USE_TSX
590 lk_hle,
591 lk_rtm,
592 #endif
593 lk_ticket,
594 lk_queuing,
595 lk_drdpa,
596 #if KMP_USE_ADAPTIVE_LOCKS
597 lk_adaptive
598 #endif // KMP_USE_ADAPTIVE_LOCKS
599 };
600
601 typedef enum kmp_lock_kind kmp_lock_kind_t;
602
603 extern kmp_lock_kind_t __kmp_user_lock_kind;
604
605 union kmp_user_lock {
606 kmp_tas_lock_t tas;
607 #if KMP_USE_FUTEX
608 kmp_futex_lock_t futex;
609 #endif
610 kmp_ticket_lock_t ticket;
611 kmp_queuing_lock_t queuing;
612 kmp_drdpa_lock_t drdpa;
613 #if KMP_USE_ADAPTIVE_LOCKS
614 kmp_adaptive_lock_t adaptive;
615 #endif // KMP_USE_ADAPTIVE_LOCKS
616 kmp_lock_pool_t pool;
617 };
618
619 typedef union kmp_user_lock *kmp_user_lock_p;
620
621 #if !KMP_USE_DYNAMIC_LOCK
622
623 extern size_t __kmp_base_user_lock_size;
624 extern size_t __kmp_user_lock_size;
625
626 extern kmp_int32 (*__kmp_get_user_lock_owner_)(kmp_user_lock_p lck);
627
__kmp_get_user_lock_owner(kmp_user_lock_p lck)628 static inline kmp_int32 __kmp_get_user_lock_owner(kmp_user_lock_p lck) {
629 KMP_DEBUG_ASSERT(__kmp_get_user_lock_owner_ != NULL);
630 return (*__kmp_get_user_lock_owner_)(lck);
631 }
632
633 extern int (*__kmp_acquire_user_lock_with_checks_)(kmp_user_lock_p lck,
634 kmp_int32 gtid);
635
636 #if KMP_OS_LINUX && \
637 (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
638
639 #define __kmp_acquire_user_lock_with_checks(lck, gtid) \
640 if (__kmp_user_lock_kind == lk_tas) { \
641 if (__kmp_env_consistency_check) { \
642 char const *const func = "omp_set_lock"; \
643 if ((sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) && \
644 lck->tas.lk.depth_locked != -1) { \
645 KMP_FATAL(LockNestableUsedAsSimple, func); \
646 } \
647 if ((gtid >= 0) && (lck->tas.lk.poll - 1 == gtid)) { \
648 KMP_FATAL(LockIsAlreadyOwned, func); \
649 } \
650 } \
651 if (lck->tas.lk.poll != 0 || \
652 !__kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1)) { \
653 kmp_uint32 spins; \
654 KMP_FSYNC_PREPARE(lck); \
655 KMP_INIT_YIELD(spins); \
656 if (TCR_4(__kmp_nth) > \
657 (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) { \
658 KMP_YIELD(TRUE); \
659 } else { \
660 KMP_YIELD_SPIN(spins); \
661 } \
662 while (lck->tas.lk.poll != 0 || !__kmp_atomic_compare_store_acq( \
663 &lck->tas.lk.poll, 0, gtid + 1)) { \
664 if (TCR_4(__kmp_nth) > \
665 (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) { \
666 KMP_YIELD(TRUE); \
667 } else { \
668 KMP_YIELD_SPIN(spins); \
669 } \
670 } \
671 } \
672 KMP_FSYNC_ACQUIRED(lck); \
673 } else { \
674 KMP_DEBUG_ASSERT(__kmp_acquire_user_lock_with_checks_ != NULL); \
675 (*__kmp_acquire_user_lock_with_checks_)(lck, gtid); \
676 }
677
678 #else
__kmp_acquire_user_lock_with_checks(kmp_user_lock_p lck,kmp_int32 gtid)679 static inline int __kmp_acquire_user_lock_with_checks(kmp_user_lock_p lck,
680 kmp_int32 gtid) {
681 KMP_DEBUG_ASSERT(__kmp_acquire_user_lock_with_checks_ != NULL);
682 return (*__kmp_acquire_user_lock_with_checks_)(lck, gtid);
683 }
684 #endif
685
686 extern int (*__kmp_test_user_lock_with_checks_)(kmp_user_lock_p lck,
687 kmp_int32 gtid);
688
689 #if KMP_OS_LINUX && \
690 (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
691
692 #include "kmp_i18n.h" /* AC: KMP_FATAL definition */
693 extern int __kmp_env_consistency_check; /* AC: copy from kmp.h here */
__kmp_test_user_lock_with_checks(kmp_user_lock_p lck,kmp_int32 gtid)694 static inline int __kmp_test_user_lock_with_checks(kmp_user_lock_p lck,
695 kmp_int32 gtid) {
696 if (__kmp_user_lock_kind == lk_tas) {
697 if (__kmp_env_consistency_check) {
698 char const *const func = "omp_test_lock";
699 if ((sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) &&
700 lck->tas.lk.depth_locked != -1) {
701 KMP_FATAL(LockNestableUsedAsSimple, func);
702 }
703 }
704 return ((lck->tas.lk.poll == 0) &&
705 __kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1));
706 } else {
707 KMP_DEBUG_ASSERT(__kmp_test_user_lock_with_checks_ != NULL);
708 return (*__kmp_test_user_lock_with_checks_)(lck, gtid);
709 }
710 }
711 #else
__kmp_test_user_lock_with_checks(kmp_user_lock_p lck,kmp_int32 gtid)712 static inline int __kmp_test_user_lock_with_checks(kmp_user_lock_p lck,
713 kmp_int32 gtid) {
714 KMP_DEBUG_ASSERT(__kmp_test_user_lock_with_checks_ != NULL);
715 return (*__kmp_test_user_lock_with_checks_)(lck, gtid);
716 }
717 #endif
718
719 extern int (*__kmp_release_user_lock_with_checks_)(kmp_user_lock_p lck,
720 kmp_int32 gtid);
721
__kmp_release_user_lock_with_checks(kmp_user_lock_p lck,kmp_int32 gtid)722 static inline void __kmp_release_user_lock_with_checks(kmp_user_lock_p lck,
723 kmp_int32 gtid) {
724 KMP_DEBUG_ASSERT(__kmp_release_user_lock_with_checks_ != NULL);
725 (*__kmp_release_user_lock_with_checks_)(lck, gtid);
726 }
727
728 extern void (*__kmp_init_user_lock_with_checks_)(kmp_user_lock_p lck);
729
__kmp_init_user_lock_with_checks(kmp_user_lock_p lck)730 static inline void __kmp_init_user_lock_with_checks(kmp_user_lock_p lck) {
731 KMP_DEBUG_ASSERT(__kmp_init_user_lock_with_checks_ != NULL);
732 (*__kmp_init_user_lock_with_checks_)(lck);
733 }
734
735 // We need a non-checking version of destroy lock for when the RTL is
736 // doing the cleanup as it can't always tell if the lock is nested or not.
737 extern void (*__kmp_destroy_user_lock_)(kmp_user_lock_p lck);
738
__kmp_destroy_user_lock(kmp_user_lock_p lck)739 static inline void __kmp_destroy_user_lock(kmp_user_lock_p lck) {
740 KMP_DEBUG_ASSERT(__kmp_destroy_user_lock_ != NULL);
741 (*__kmp_destroy_user_lock_)(lck);
742 }
743
744 extern void (*__kmp_destroy_user_lock_with_checks_)(kmp_user_lock_p lck);
745
__kmp_destroy_user_lock_with_checks(kmp_user_lock_p lck)746 static inline void __kmp_destroy_user_lock_with_checks(kmp_user_lock_p lck) {
747 KMP_DEBUG_ASSERT(__kmp_destroy_user_lock_with_checks_ != NULL);
748 (*__kmp_destroy_user_lock_with_checks_)(lck);
749 }
750
751 extern int (*__kmp_acquire_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
752 kmp_int32 gtid);
753
754 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
755
756 #define __kmp_acquire_nested_user_lock_with_checks(lck, gtid, depth) \
757 if (__kmp_user_lock_kind == lk_tas) { \
758 if (__kmp_env_consistency_check) { \
759 char const *const func = "omp_set_nest_lock"; \
760 if ((sizeof(kmp_tas_lock_t) <= OMP_NEST_LOCK_T_SIZE) && \
761 lck->tas.lk.depth_locked == -1) { \
762 KMP_FATAL(LockSimpleUsedAsNestable, func); \
763 } \
764 } \
765 if (lck->tas.lk.poll - 1 == gtid) { \
766 lck->tas.lk.depth_locked += 1; \
767 *depth = KMP_LOCK_ACQUIRED_NEXT; \
768 } else { \
769 if ((lck->tas.lk.poll != 0) || \
770 !__kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1)) { \
771 kmp_uint32 spins; \
772 KMP_FSYNC_PREPARE(lck); \
773 KMP_INIT_YIELD(spins); \
774 if (TCR_4(__kmp_nth) > \
775 (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) { \
776 KMP_YIELD(TRUE); \
777 } else { \
778 KMP_YIELD_SPIN(spins); \
779 } \
780 while ( \
781 (lck->tas.lk.poll != 0) || \
782 !__kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1)) { \
783 if (TCR_4(__kmp_nth) > \
784 (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) { \
785 KMP_YIELD(TRUE); \
786 } else { \
787 KMP_YIELD_SPIN(spins); \
788 } \
789 } \
790 } \
791 lck->tas.lk.depth_locked = 1; \
792 *depth = KMP_LOCK_ACQUIRED_FIRST; \
793 } \
794 KMP_FSYNC_ACQUIRED(lck); \
795 } else { \
796 KMP_DEBUG_ASSERT(__kmp_acquire_nested_user_lock_with_checks_ != NULL); \
797 *depth = (*__kmp_acquire_nested_user_lock_with_checks_)(lck, gtid); \
798 }
799
800 #else
801 static inline void
__kmp_acquire_nested_user_lock_with_checks(kmp_user_lock_p lck,kmp_int32 gtid,int * depth)802 __kmp_acquire_nested_user_lock_with_checks(kmp_user_lock_p lck, kmp_int32 gtid,
803 int *depth) {
804 KMP_DEBUG_ASSERT(__kmp_acquire_nested_user_lock_with_checks_ != NULL);
805 *depth = (*__kmp_acquire_nested_user_lock_with_checks_)(lck, gtid);
806 }
807 #endif
808
809 extern int (*__kmp_test_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
810 kmp_int32 gtid);
811
812 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
__kmp_test_nested_user_lock_with_checks(kmp_user_lock_p lck,kmp_int32 gtid)813 static inline int __kmp_test_nested_user_lock_with_checks(kmp_user_lock_p lck,
814 kmp_int32 gtid) {
815 if (__kmp_user_lock_kind == lk_tas) {
816 int retval;
817 if (__kmp_env_consistency_check) {
818 char const *const func = "omp_test_nest_lock";
819 if ((sizeof(kmp_tas_lock_t) <= OMP_NEST_LOCK_T_SIZE) &&
820 lck->tas.lk.depth_locked == -1) {
821 KMP_FATAL(LockSimpleUsedAsNestable, func);
822 }
823 }
824 KMP_DEBUG_ASSERT(gtid >= 0);
825 if (lck->tas.lk.poll - 1 ==
826 gtid) { /* __kmp_get_tas_lock_owner( lck ) == gtid */
827 return ++lck->tas.lk.depth_locked; /* same owner, depth increased */
828 }
829 retval = ((lck->tas.lk.poll == 0) &&
830 __kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1));
831 if (retval) {
832 KMP_MB();
833 lck->tas.lk.depth_locked = 1;
834 }
835 return retval;
836 } else {
837 KMP_DEBUG_ASSERT(__kmp_test_nested_user_lock_with_checks_ != NULL);
838 return (*__kmp_test_nested_user_lock_with_checks_)(lck, gtid);
839 }
840 }
841 #else
__kmp_test_nested_user_lock_with_checks(kmp_user_lock_p lck,kmp_int32 gtid)842 static inline int __kmp_test_nested_user_lock_with_checks(kmp_user_lock_p lck,
843 kmp_int32 gtid) {
844 KMP_DEBUG_ASSERT(__kmp_test_nested_user_lock_with_checks_ != NULL);
845 return (*__kmp_test_nested_user_lock_with_checks_)(lck, gtid);
846 }
847 #endif
848
849 extern int (*__kmp_release_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
850 kmp_int32 gtid);
851
852 static inline int
__kmp_release_nested_user_lock_with_checks(kmp_user_lock_p lck,kmp_int32 gtid)853 __kmp_release_nested_user_lock_with_checks(kmp_user_lock_p lck,
854 kmp_int32 gtid) {
855 KMP_DEBUG_ASSERT(__kmp_release_nested_user_lock_with_checks_ != NULL);
856 return (*__kmp_release_nested_user_lock_with_checks_)(lck, gtid);
857 }
858
859 extern void (*__kmp_init_nested_user_lock_with_checks_)(kmp_user_lock_p lck);
860
861 static inline void
__kmp_init_nested_user_lock_with_checks(kmp_user_lock_p lck)862 __kmp_init_nested_user_lock_with_checks(kmp_user_lock_p lck) {
863 KMP_DEBUG_ASSERT(__kmp_init_nested_user_lock_with_checks_ != NULL);
864 (*__kmp_init_nested_user_lock_with_checks_)(lck);
865 }
866
867 extern void (*__kmp_destroy_nested_user_lock_with_checks_)(kmp_user_lock_p lck);
868
869 static inline void
__kmp_destroy_nested_user_lock_with_checks(kmp_user_lock_p lck)870 __kmp_destroy_nested_user_lock_with_checks(kmp_user_lock_p lck) {
871 KMP_DEBUG_ASSERT(__kmp_destroy_nested_user_lock_with_checks_ != NULL);
872 (*__kmp_destroy_nested_user_lock_with_checks_)(lck);
873 }
874
875 // user lock functions which do not necessarily exist for all lock kinds.
876 //
877 // The "set" functions usually have wrapper routines that check for a NULL set
878 // function pointer and call it if non-NULL.
879 //
880 // In some cases, it makes sense to have a "get" wrapper function check for a
881 // NULL get function pointer and return NULL / invalid value / error code if
882 // the function pointer is NULL.
883 //
884 // In other cases, the calling code really should differentiate between an
885 // unimplemented function and one that is implemented but returning NULL /
886 // invalied value. If this is the case, no get function wrapper exists.
887
888 extern int (*__kmp_is_user_lock_initialized_)(kmp_user_lock_p lck);
889
890 // no set function; fields set durining local allocation
891
892 extern const ident_t *(*__kmp_get_user_lock_location_)(kmp_user_lock_p lck);
893
__kmp_get_user_lock_location(kmp_user_lock_p lck)894 static inline const ident_t *__kmp_get_user_lock_location(kmp_user_lock_p lck) {
895 if (__kmp_get_user_lock_location_ != NULL) {
896 return (*__kmp_get_user_lock_location_)(lck);
897 } else {
898 return NULL;
899 }
900 }
901
902 extern void (*__kmp_set_user_lock_location_)(kmp_user_lock_p lck,
903 const ident_t *loc);
904
__kmp_set_user_lock_location(kmp_user_lock_p lck,const ident_t * loc)905 static inline void __kmp_set_user_lock_location(kmp_user_lock_p lck,
906 const ident_t *loc) {
907 if (__kmp_set_user_lock_location_ != NULL) {
908 (*__kmp_set_user_lock_location_)(lck, loc);
909 }
910 }
911
912 extern kmp_lock_flags_t (*__kmp_get_user_lock_flags_)(kmp_user_lock_p lck);
913
914 extern void (*__kmp_set_user_lock_flags_)(kmp_user_lock_p lck,
915 kmp_lock_flags_t flags);
916
__kmp_set_user_lock_flags(kmp_user_lock_p lck,kmp_lock_flags_t flags)917 static inline void __kmp_set_user_lock_flags(kmp_user_lock_p lck,
918 kmp_lock_flags_t flags) {
919 if (__kmp_set_user_lock_flags_ != NULL) {
920 (*__kmp_set_user_lock_flags_)(lck, flags);
921 }
922 }
923
924 // The fuction which sets up all of the vtbl pointers for kmp_user_lock_t.
925 extern void __kmp_set_user_lock_vptrs(kmp_lock_kind_t user_lock_kind);
926
927 // Macros for binding user lock functions.
928 #define KMP_BIND_USER_LOCK_TEMPLATE(nest, kind, suffix) \
929 { \
930 __kmp_acquire##nest##user_lock_with_checks_ = (int (*)( \
931 kmp_user_lock_p, kmp_int32))__kmp_acquire##nest##kind##_##suffix; \
932 __kmp_release##nest##user_lock_with_checks_ = (int (*)( \
933 kmp_user_lock_p, kmp_int32))__kmp_release##nest##kind##_##suffix; \
934 __kmp_test##nest##user_lock_with_checks_ = (int (*)( \
935 kmp_user_lock_p, kmp_int32))__kmp_test##nest##kind##_##suffix; \
936 __kmp_init##nest##user_lock_with_checks_ = \
937 (void (*)(kmp_user_lock_p))__kmp_init##nest##kind##_##suffix; \
938 __kmp_destroy##nest##user_lock_with_checks_ = \
939 (void (*)(kmp_user_lock_p))__kmp_destroy##nest##kind##_##suffix; \
940 }
941
942 #define KMP_BIND_USER_LOCK(kind) KMP_BIND_USER_LOCK_TEMPLATE(_, kind, lock)
943 #define KMP_BIND_USER_LOCK_WITH_CHECKS(kind) \
944 KMP_BIND_USER_LOCK_TEMPLATE(_, kind, lock_with_checks)
945 #define KMP_BIND_NESTED_USER_LOCK(kind) \
946 KMP_BIND_USER_LOCK_TEMPLATE(_nested_, kind, lock)
947 #define KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(kind) \
948 KMP_BIND_USER_LOCK_TEMPLATE(_nested_, kind, lock_with_checks)
949
950 // User lock table & lock allocation
951 /* On 64-bit Linux* OS (and OS X*) GNU compiler allocates only 4 bytems memory
952 for lock variable, which is not enough to store a pointer, so we have to use
953 lock indexes instead of pointers and maintain lock table to map indexes to
954 pointers.
955
956
957 Note: The first element of the table is not a pointer to lock! It is a
958 pointer to previously allocated table (or NULL if it is the first table).
959
960 Usage:
961
962 if ( OMP_LOCK_T_SIZE < sizeof( <lock> ) ) { // or OMP_NEST_LOCK_T_SIZE
963 Lock table is fully utilized. User locks are indexes, so table is used on
964 user lock operation.
965 Note: it may be the case (lin_32) that we don't need to use a lock
966 table for regular locks, but do need the table for nested locks.
967 }
968 else {
969 Lock table initialized but not actually used.
970 }
971 */
972
973 struct kmp_lock_table {
974 kmp_lock_index_t used; // Number of used elements
975 kmp_lock_index_t allocated; // Number of allocated elements
976 kmp_user_lock_p *table; // Lock table.
977 };
978
979 typedef struct kmp_lock_table kmp_lock_table_t;
980
981 extern kmp_lock_table_t __kmp_user_lock_table;
982 extern kmp_user_lock_p __kmp_lock_pool;
983
984 struct kmp_block_of_locks {
985 struct kmp_block_of_locks *next_block;
986 void *locks;
987 };
988
989 typedef struct kmp_block_of_locks kmp_block_of_locks_t;
990
991 extern kmp_block_of_locks_t *__kmp_lock_blocks;
992 extern int __kmp_num_locks_in_block;
993
994 extern kmp_user_lock_p __kmp_user_lock_allocate(void **user_lock,
995 kmp_int32 gtid,
996 kmp_lock_flags_t flags);
997 extern void __kmp_user_lock_free(void **user_lock, kmp_int32 gtid,
998 kmp_user_lock_p lck);
999 extern kmp_user_lock_p __kmp_lookup_user_lock(void **user_lock,
1000 char const *func);
1001 extern void __kmp_cleanup_user_locks();
1002
1003 #define KMP_CHECK_USER_LOCK_INIT() \
1004 { \
1005 if (!TCR_4(__kmp_init_user_locks)) { \
1006 __kmp_acquire_bootstrap_lock(&__kmp_initz_lock); \
1007 if (!TCR_4(__kmp_init_user_locks)) { \
1008 TCW_4(__kmp_init_user_locks, TRUE); \
1009 } \
1010 __kmp_release_bootstrap_lock(&__kmp_initz_lock); \
1011 } \
1012 }
1013
1014 #endif // KMP_USE_DYNAMIC_LOCK
1015
1016 #undef KMP_PAD
1017 #undef KMP_GTID_DNE
1018
1019 #if KMP_USE_DYNAMIC_LOCK
1020 // KMP_USE_DYNAMIC_LOCK enables dynamic dispatch of lock functions without
1021 // breaking the current compatibility. Essential functionality of this new code
1022 // is dynamic dispatch, but it also implements (or enables implementation of)
1023 // hinted user lock and critical section which will be part of OMP 4.5 soon.
1024 //
1025 // Lock type can be decided at creation time (i.e., lock initialization), and
1026 // subsequent lock function call on the created lock object requires type
1027 // extraction and call through jump table using the extracted type. This type
1028 // information is stored in two different ways depending on the size of the lock
1029 // object, and we differentiate lock types by this size requirement - direct and
1030 // indirect locks.
1031 //
1032 // Direct locks:
1033 // A direct lock object fits into the space created by the compiler for an
1034 // omp_lock_t object, and TAS/Futex lock falls into this category. We use low
1035 // one byte of the lock object as the storage for the lock type, and appropriate
1036 // bit operation is required to access the data meaningful to the lock
1037 // algorithms. Also, to differentiate direct lock from indirect lock, 1 is
1038 // written to LSB of the lock object. The newly introduced "hle" lock is also a
1039 // direct lock.
1040 //
1041 // Indirect locks:
1042 // An indirect lock object requires more space than the compiler-generated
1043 // space, and it should be allocated from heap. Depending on the size of the
1044 // compiler-generated space for the lock (i.e., size of omp_lock_t), this
1045 // omp_lock_t object stores either the address of the heap-allocated indirect
1046 // lock (void * fits in the object) or an index to the indirect lock table entry
1047 // that holds the address. Ticket/Queuing/DRDPA/Adaptive lock falls into this
1048 // category, and the newly introduced "rtm" lock is also an indirect lock which
1049 // was implemented on top of the Queuing lock. When the omp_lock_t object holds
1050 // an index (not lock address), 0 is written to LSB to differentiate the lock
1051 // from a direct lock, and the remaining part is the actual index to the
1052 // indirect lock table.
1053
1054 #include <stdint.h> // for uintptr_t
1055
1056 // Shortcuts
1057 #define KMP_USE_INLINED_TAS \
1058 (KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM)) && 1
1059 #define KMP_USE_INLINED_FUTEX KMP_USE_FUTEX && 0
1060
1061 // List of lock definitions; all nested locks are indirect locks.
1062 // hle lock is xchg lock prefixed with XACQUIRE/XRELEASE.
1063 // All nested locks are indirect lock types.
1064 #if KMP_USE_TSX
1065 #if KMP_USE_FUTEX
1066 #define KMP_FOREACH_D_LOCK(m, a) m(tas, a) m(futex, a) m(hle, a)
1067 #define KMP_FOREACH_I_LOCK(m, a) \
1068 m(ticket, a) m(queuing, a) m(adaptive, a) m(drdpa, a) m(rtm, a) \
1069 m(nested_tas, a) m(nested_futex, a) m(nested_ticket, a) \
1070 m(nested_queuing, a) m(nested_drdpa, a)
1071 #else
1072 #define KMP_FOREACH_D_LOCK(m, a) m(tas, a) m(hle, a)
1073 #define KMP_FOREACH_I_LOCK(m, a) \
1074 m(ticket, a) m(queuing, a) m(adaptive, a) m(drdpa, a) m(rtm, a) \
1075 m(nested_tas, a) m(nested_ticket, a) m(nested_queuing, a) \
1076 m(nested_drdpa, a)
1077 #endif // KMP_USE_FUTEX
1078 #define KMP_LAST_D_LOCK lockseq_hle
1079 #else
1080 #if KMP_USE_FUTEX
1081 #define KMP_FOREACH_D_LOCK(m, a) m(tas, a) m(futex, a)
1082 #define KMP_FOREACH_I_LOCK(m, a) \
1083 m(ticket, a) m(queuing, a) m(drdpa, a) m(nested_tas, a) m(nested_futex, a) \
1084 m(nested_ticket, a) m(nested_queuing, a) m(nested_drdpa, a)
1085 #define KMP_LAST_D_LOCK lockseq_futex
1086 #else
1087 #define KMP_FOREACH_D_LOCK(m, a) m(tas, a)
1088 #define KMP_FOREACH_I_LOCK(m, a) \
1089 m(ticket, a) m(queuing, a) m(drdpa, a) m(nested_tas, a) m(nested_ticket, a) \
1090 m(nested_queuing, a) m(nested_drdpa, a)
1091 #define KMP_LAST_D_LOCK lockseq_tas
1092 #endif // KMP_USE_FUTEX
1093 #endif // KMP_USE_TSX
1094
1095 // Information used in dynamic dispatch
1096 #define KMP_LOCK_SHIFT \
1097 8 // number of low bits to be used as tag for direct locks
1098 #define KMP_FIRST_D_LOCK lockseq_tas
1099 #define KMP_FIRST_I_LOCK lockseq_ticket
1100 #define KMP_LAST_I_LOCK lockseq_nested_drdpa
1101 #define KMP_NUM_I_LOCKS \
1102 (locktag_nested_drdpa + 1) // number of indirect lock types
1103
1104 // Base type for dynamic locks.
1105 typedef kmp_uint32 kmp_dyna_lock_t;
1106
1107 // Lock sequence that enumerates all lock kinds. Always make this enumeration
1108 // consistent with kmp_lockseq_t in the include directory.
1109 typedef enum {
1110 lockseq_indirect = 0,
1111 #define expand_seq(l, a) lockseq_##l,
1112 KMP_FOREACH_D_LOCK(expand_seq, 0) KMP_FOREACH_I_LOCK(expand_seq, 0)
1113 #undef expand_seq
1114 } kmp_dyna_lockseq_t;
1115
1116 // Enumerates indirect lock tags.
1117 typedef enum {
1118 #define expand_tag(l, a) locktag_##l,
1119 KMP_FOREACH_I_LOCK(expand_tag, 0)
1120 #undef expand_tag
1121 } kmp_indirect_locktag_t;
1122
1123 // Utility macros that extract information from lock sequences.
1124 #define KMP_IS_D_LOCK(seq) \
1125 ((seq) >= KMP_FIRST_D_LOCK && (seq) <= KMP_LAST_D_LOCK)
1126 #define KMP_IS_I_LOCK(seq) \
1127 ((seq) >= KMP_FIRST_I_LOCK && (seq) <= KMP_LAST_I_LOCK)
1128 #define KMP_GET_I_TAG(seq) (kmp_indirect_locktag_t)((seq)-KMP_FIRST_I_LOCK)
1129 #define KMP_GET_D_TAG(seq) ((seq) << 1 | 1)
1130
1131 // Enumerates direct lock tags starting from indirect tag.
1132 typedef enum {
1133 #define expand_tag(l, a) locktag_##l = KMP_GET_D_TAG(lockseq_##l),
1134 KMP_FOREACH_D_LOCK(expand_tag, 0)
1135 #undef expand_tag
1136 } kmp_direct_locktag_t;
1137
1138 // Indirect lock type
1139 typedef struct {
1140 kmp_user_lock_p lock;
1141 kmp_indirect_locktag_t type;
1142 } kmp_indirect_lock_t;
1143
1144 // Function tables for direct locks. Set/unset/test differentiate functions
1145 // with/without consistency checking.
1146 extern void (*__kmp_direct_init[])(kmp_dyna_lock_t *, kmp_dyna_lockseq_t);
1147 extern void (*(*__kmp_direct_destroy))(kmp_dyna_lock_t *);
1148 extern int (*(*__kmp_direct_set))(kmp_dyna_lock_t *, kmp_int32);
1149 extern int (*(*__kmp_direct_unset))(kmp_dyna_lock_t *, kmp_int32);
1150 extern int (*(*__kmp_direct_test))(kmp_dyna_lock_t *, kmp_int32);
1151
1152 // Function tables for indirect locks. Set/unset/test differentiate functions
1153 // with/withuot consistency checking.
1154 extern void (*__kmp_indirect_init[])(kmp_user_lock_p);
1155 extern void (*(*__kmp_indirect_destroy))(kmp_user_lock_p);
1156 extern int (*(*__kmp_indirect_set))(kmp_user_lock_p, kmp_int32);
1157 extern int (*(*__kmp_indirect_unset))(kmp_user_lock_p, kmp_int32);
1158 extern int (*(*__kmp_indirect_test))(kmp_user_lock_p, kmp_int32);
1159
1160 // Extracts direct lock tag from a user lock pointer
1161 #define KMP_EXTRACT_D_TAG(l) \
1162 (*((kmp_dyna_lock_t *)(l)) & ((1 << KMP_LOCK_SHIFT) - 1) & \
1163 -(*((kmp_dyna_lock_t *)(l)) & 1))
1164
1165 // Extracts indirect lock index from a user lock pointer
1166 #define KMP_EXTRACT_I_INDEX(l) (*(kmp_lock_index_t *)(l) >> 1)
1167
1168 // Returns function pointer to the direct lock function with l (kmp_dyna_lock_t
1169 // *) and op (operation type).
1170 #define KMP_D_LOCK_FUNC(l, op) __kmp_direct_##op[KMP_EXTRACT_D_TAG(l)]
1171
1172 // Returns function pointer to the indirect lock function with l
1173 // (kmp_indirect_lock_t *) and op (operation type).
1174 #define KMP_I_LOCK_FUNC(l, op) \
1175 __kmp_indirect_##op[((kmp_indirect_lock_t *)(l))->type]
1176
1177 // Initializes a direct lock with the given lock pointer and lock sequence.
1178 #define KMP_INIT_D_LOCK(l, seq) \
1179 __kmp_direct_init[KMP_GET_D_TAG(seq)]((kmp_dyna_lock_t *)l, seq)
1180
1181 // Initializes an indirect lock with the given lock pointer and lock sequence.
1182 #define KMP_INIT_I_LOCK(l, seq) \
1183 __kmp_direct_init[0]((kmp_dyna_lock_t *)(l), seq)
1184
1185 // Returns "free" lock value for the given lock type.
1186 #define KMP_LOCK_FREE(type) (locktag_##type)
1187
1188 // Returns "busy" lock value for the given lock teyp.
1189 #define KMP_LOCK_BUSY(v, type) ((v) << KMP_LOCK_SHIFT | locktag_##type)
1190
1191 // Returns lock value after removing (shifting) lock tag.
1192 #define KMP_LOCK_STRIP(v) ((v) >> KMP_LOCK_SHIFT)
1193
1194 // Initializes global states and data structures for managing dynamic user
1195 // locks.
1196 extern void __kmp_init_dynamic_user_locks();
1197
1198 // Allocates and returns an indirect lock with the given indirect lock tag.
1199 extern kmp_indirect_lock_t *
1200 __kmp_allocate_indirect_lock(void **, kmp_int32, kmp_indirect_locktag_t);
1201
1202 // Cleans up global states and data structures for managing dynamic user locks.
1203 extern void __kmp_cleanup_indirect_user_locks();
1204
1205 // Default user lock sequence when not using hinted locks.
1206 extern kmp_dyna_lockseq_t __kmp_user_lock_seq;
1207
1208 // Jump table for "set lock location", available only for indirect locks.
1209 extern void (*__kmp_indirect_set_location[KMP_NUM_I_LOCKS])(kmp_user_lock_p,
1210 const ident_t *);
1211 #define KMP_SET_I_LOCK_LOCATION(lck, loc) \
1212 { \
1213 if (__kmp_indirect_set_location[(lck)->type] != NULL) \
1214 __kmp_indirect_set_location[(lck)->type]((lck)->lock, loc); \
1215 }
1216
1217 // Jump table for "set lock flags", available only for indirect locks.
1218 extern void (*__kmp_indirect_set_flags[KMP_NUM_I_LOCKS])(kmp_user_lock_p,
1219 kmp_lock_flags_t);
1220 #define KMP_SET_I_LOCK_FLAGS(lck, flag) \
1221 { \
1222 if (__kmp_indirect_set_flags[(lck)->type] != NULL) \
1223 __kmp_indirect_set_flags[(lck)->type]((lck)->lock, flag); \
1224 }
1225
1226 // Jump table for "get lock location", available only for indirect locks.
1227 extern const ident_t *(*__kmp_indirect_get_location[KMP_NUM_I_LOCKS])(
1228 kmp_user_lock_p);
1229 #define KMP_GET_I_LOCK_LOCATION(lck) \
1230 (__kmp_indirect_get_location[(lck)->type] != NULL \
1231 ? __kmp_indirect_get_location[(lck)->type]((lck)->lock) \
1232 : NULL)
1233
1234 // Jump table for "get lock flags", available only for indirect locks.
1235 extern kmp_lock_flags_t (*__kmp_indirect_get_flags[KMP_NUM_I_LOCKS])(
1236 kmp_user_lock_p);
1237 #define KMP_GET_I_LOCK_FLAGS(lck) \
1238 (__kmp_indirect_get_flags[(lck)->type] != NULL \
1239 ? __kmp_indirect_get_flags[(lck)->type]((lck)->lock) \
1240 : NULL)
1241
1242 #define KMP_I_LOCK_CHUNK \
1243 1024 // number of kmp_indirect_lock_t objects to be allocated together
1244
1245 // Lock table for indirect locks.
1246 typedef struct kmp_indirect_lock_table {
1247 kmp_indirect_lock_t **table; // blocks of indirect locks allocated
1248 kmp_lock_index_t size; // size of the indirect lock table
1249 kmp_lock_index_t next; // index to the next lock to be allocated
1250 } kmp_indirect_lock_table_t;
1251
1252 extern kmp_indirect_lock_table_t __kmp_i_lock_table;
1253
1254 // Returns the indirect lock associated with the given index.
1255 #define KMP_GET_I_LOCK(index) \
1256 (*(__kmp_i_lock_table.table + (index) / KMP_I_LOCK_CHUNK) + \
1257 (index) % KMP_I_LOCK_CHUNK)
1258
1259 // Number of locks in a lock block, which is fixed to "1" now.
1260 // TODO: No lock block implementation now. If we do support, we need to manage
1261 // lock block data structure for each indirect lock type.
1262 extern int __kmp_num_locks_in_block;
1263
1264 // Fast lock table lookup without consistency checking
1265 #define KMP_LOOKUP_I_LOCK(l) \
1266 ((OMP_LOCK_T_SIZE < sizeof(void *)) ? KMP_GET_I_LOCK(KMP_EXTRACT_I_INDEX(l)) \
1267 : *((kmp_indirect_lock_t **)(l)))
1268
1269 // Used once in kmp_error.cpp
1270 extern kmp_int32 __kmp_get_user_lock_owner(kmp_user_lock_p, kmp_uint32);
1271
1272 #else // KMP_USE_DYNAMIC_LOCK
1273
1274 #define KMP_LOCK_BUSY(v, type) (v)
1275 #define KMP_LOCK_FREE(type) 0
1276 #define KMP_LOCK_STRIP(v) (v)
1277
1278 #endif // KMP_USE_DYNAMIC_LOCK
1279
1280 // data structure for using backoff within spin locks.
1281 typedef struct {
1282 kmp_uint32 step; // current step
1283 kmp_uint32 max_backoff; // upper bound of outer delay loop
1284 kmp_uint32 min_tick; // size of inner delay loop in ticks (machine-dependent)
1285 } kmp_backoff_t;
1286
1287 // Runtime's default backoff parameters
1288 extern kmp_backoff_t __kmp_spin_backoff_params;
1289
1290 // Backoff function
1291 extern void __kmp_spin_backoff(kmp_backoff_t *);
1292
1293 #ifdef __cplusplus
1294 } // extern "C"
1295 #endif // __cplusplus
1296
1297 #endif /* KMP_LOCK_H */
1298