1 /*
2  * kmp_global.cpp -- KPTS global variables for runtime support library
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "kmp.h"
14 #include "kmp_affinity.h"
15 #if KMP_USE_HIER_SCHED
16 #include "kmp_dispatch_hier.h"
17 #endif
18 
19 kmp_key_t __kmp_gtid_threadprivate_key;
20 
21 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
22 kmp_cpuinfo_t __kmp_cpuinfo = {0}; // Not initialized
23 #endif
24 
25 #if KMP_STATS_ENABLED
26 #include "kmp_stats.h"
27 // lock for modifying the global __kmp_stats_list
28 kmp_tas_lock_t __kmp_stats_lock;
29 
30 // global list of per thread stats, the head is a sentinel node which
31 // accumulates all stats produced before __kmp_create_worker is called.
32 kmp_stats_list *__kmp_stats_list;
33 
34 // thread local pointer to stats node within list
35 KMP_THREAD_LOCAL kmp_stats_list *__kmp_stats_thread_ptr = NULL;
36 
37 // gives reference tick for all events (considered the 0 tick)
38 tsc_tick_count __kmp_stats_start_time;
39 #endif
40 
41 /* ----------------------------------------------------- */
42 /* INITIALIZATION VARIABLES */
43 /* they are syncronized to write during init, but read anytime */
44 volatile int __kmp_init_serial = FALSE;
45 volatile int __kmp_init_gtid = FALSE;
46 volatile int __kmp_init_common = FALSE;
47 volatile int __kmp_need_register_serial = TRUE;
48 volatile int __kmp_init_middle = FALSE;
49 volatile int __kmp_init_parallel = FALSE;
50 volatile int __kmp_init_hidden_helper = FALSE;
51 volatile int __kmp_init_hidden_helper_threads = FALSE;
52 volatile int __kmp_hidden_helper_team_done = FALSE;
53 #if KMP_USE_MONITOR
54 volatile int __kmp_init_monitor =
55     0; /* 1 - launched, 2 - actually started (Windows* OS only) */
56 #endif
57 volatile int __kmp_init_user_locks = FALSE;
58 
59 /* list of address of allocated caches for commons */
60 kmp_cached_addr_t *__kmp_threadpriv_cache_list = NULL;
61 
62 int __kmp_init_counter = 0;
63 int __kmp_root_counter = 0;
64 int __kmp_version = 0;
65 
66 std::atomic<kmp_int32> __kmp_team_counter = ATOMIC_VAR_INIT(0);
67 std::atomic<kmp_int32> __kmp_task_counter = ATOMIC_VAR_INIT(0);
68 
69 size_t __kmp_stksize = KMP_DEFAULT_STKSIZE;
70 #if KMP_USE_MONITOR
71 size_t __kmp_monitor_stksize = 0; // auto adjust
72 #endif
73 size_t __kmp_stkoffset = KMP_DEFAULT_STKOFFSET;
74 int __kmp_stkpadding = KMP_MIN_STKPADDING;
75 
76 size_t __kmp_malloc_pool_incr = KMP_DEFAULT_MALLOC_POOL_INCR;
77 
78 // Barrier method defaults, settings, and strings.
79 // branch factor = 2^branch_bits (only relevant for tree & hyper barrier types)
80 kmp_uint32 __kmp_barrier_gather_bb_dflt = 2;
81 /* branch_factor = 4 */ /* hyper2: C78980 */
82 kmp_uint32 __kmp_barrier_release_bb_dflt = 2;
83 /* branch_factor = 4 */ /* hyper2: C78980 */
84 
85 kmp_bar_pat_e __kmp_barrier_gather_pat_dflt = bp_hyper_bar;
86 /* hyper2: C78980 */
87 kmp_bar_pat_e __kmp_barrier_release_pat_dflt = bp_hyper_bar;
88 /* hyper2: C78980 */
89 
90 kmp_uint32 __kmp_barrier_gather_branch_bits[bs_last_barrier] = {0};
91 kmp_uint32 __kmp_barrier_release_branch_bits[bs_last_barrier] = {0};
92 kmp_bar_pat_e __kmp_barrier_gather_pattern[bs_last_barrier] = {bp_linear_bar};
93 kmp_bar_pat_e __kmp_barrier_release_pattern[bs_last_barrier] = {bp_linear_bar};
94 char const *__kmp_barrier_branch_bit_env_name[bs_last_barrier] = {
95     "KMP_PLAIN_BARRIER", "KMP_FORKJOIN_BARRIER"
96 #if KMP_FAST_REDUCTION_BARRIER
97     ,
98     "KMP_REDUCTION_BARRIER"
99 #endif // KMP_FAST_REDUCTION_BARRIER
100 };
101 char const *__kmp_barrier_pattern_env_name[bs_last_barrier] = {
102     "KMP_PLAIN_BARRIER_PATTERN", "KMP_FORKJOIN_BARRIER_PATTERN"
103 #if KMP_FAST_REDUCTION_BARRIER
104     ,
105     "KMP_REDUCTION_BARRIER_PATTERN"
106 #endif // KMP_FAST_REDUCTION_BARRIER
107 };
108 char const *__kmp_barrier_type_name[bs_last_barrier] = {"plain", "forkjoin"
109 #if KMP_FAST_REDUCTION_BARRIER
110                                                         ,
111                                                         "reduction"
112 #endif // KMP_FAST_REDUCTION_BARRIER
113 };
114 char const *__kmp_barrier_pattern_name[bp_last_bar] = {
115     "linear", "tree", "hyper", "hierarchical", "dist"};
116 
117 int __kmp_allThreadsSpecified = 0;
118 size_t __kmp_align_alloc = CACHE_LINE;
119 
120 int __kmp_generate_warnings = kmp_warnings_low;
121 int __kmp_reserve_warn = 0;
122 int __kmp_xproc = 0;
123 int __kmp_avail_proc = 0;
124 size_t __kmp_sys_min_stksize = KMP_MIN_STKSIZE;
125 int __kmp_sys_max_nth = KMP_MAX_NTH;
126 int __kmp_max_nth = 0;
127 int __kmp_cg_max_nth = 0;
128 int __kmp_teams_max_nth = 0;
129 int __kmp_threads_capacity = 0;
130 int __kmp_dflt_team_nth = 0;
131 int __kmp_dflt_team_nth_ub = 0;
132 int __kmp_tp_capacity = 0;
133 int __kmp_tp_cached = 0;
134 int __kmp_dispatch_num_buffers = KMP_DFLT_DISP_NUM_BUFF;
135 int __kmp_dflt_max_active_levels = 1; // Nesting off by default
136 bool __kmp_dflt_max_active_levels_set = false; // Don't override set value
137 #if KMP_NESTED_HOT_TEAMS
138 int __kmp_hot_teams_mode = 0; /* 0 - free extra threads when reduced */
139 /* 1 - keep extra threads when reduced */
140 int __kmp_hot_teams_max_level = 1; /* nesting level of hot teams */
141 #endif
142 enum library_type __kmp_library = library_none;
143 enum sched_type __kmp_sched =
144     kmp_sch_default; /* scheduling method for runtime scheduling */
145 enum sched_type __kmp_static =
146     kmp_sch_static_greedy; /* default static scheduling method */
147 enum sched_type __kmp_guided =
148     kmp_sch_guided_iterative_chunked; /* default guided scheduling method */
149 enum sched_type __kmp_auto =
150     kmp_sch_guided_analytical_chunked; /* default auto scheduling method */
151 #if KMP_USE_HIER_SCHED
152 int __kmp_dispatch_hand_threading = 0;
153 int __kmp_hier_max_units[kmp_hier_layer_e::LAYER_LAST + 1];
154 int __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_LAST + 1];
155 kmp_hier_sched_env_t __kmp_hier_scheds = {0, 0, NULL, NULL, NULL};
156 #endif
157 int __kmp_dflt_blocktime = KMP_DEFAULT_BLOCKTIME;
158 bool __kmp_wpolicy_passive = false;
159 #if KMP_USE_MONITOR
160 int __kmp_monitor_wakeups = KMP_MIN_MONITOR_WAKEUPS;
161 int __kmp_bt_intervals = KMP_INTERVALS_FROM_BLOCKTIME(KMP_DEFAULT_BLOCKTIME,
162                                                       KMP_MIN_MONITOR_WAKEUPS);
163 #endif
164 #ifdef KMP_ADJUST_BLOCKTIME
165 int __kmp_zero_bt = FALSE;
166 #endif /* KMP_ADJUST_BLOCKTIME */
167 #ifdef KMP_DFLT_NTH_CORES
168 int __kmp_ncores = 0;
169 #endif
170 int __kmp_chunk = 0;
171 int __kmp_force_monotonic = 0;
172 int __kmp_abort_delay = 0;
173 #if KMP_OS_LINUX && defined(KMP_TDATA_GTID)
174 int __kmp_gtid_mode = 3; /* use __declspec(thread) TLS to store gtid */
175 int __kmp_adjust_gtid_mode = FALSE;
176 #elif KMP_OS_WINDOWS
177 int __kmp_gtid_mode = 2; /* use TLS functions to store gtid */
178 int __kmp_adjust_gtid_mode = FALSE;
179 #else
180 int __kmp_gtid_mode = 0; /* select method to get gtid based on #threads */
181 int __kmp_adjust_gtid_mode = TRUE;
182 #endif /* KMP_OS_LINUX && defined(KMP_TDATA_GTID) */
183 #ifdef KMP_TDATA_GTID
184 KMP_THREAD_LOCAL int __kmp_gtid = KMP_GTID_DNE;
185 #endif /* KMP_TDATA_GTID */
186 int __kmp_tls_gtid_min = INT_MAX;
187 int __kmp_foreign_tp = TRUE;
188 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
189 int __kmp_inherit_fp_control = TRUE;
190 kmp_int16 __kmp_init_x87_fpu_control_word = 0;
191 kmp_uint32 __kmp_init_mxcsr = 0;
192 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
193 
194 #ifdef USE_LOAD_BALANCE
195 double __kmp_load_balance_interval = 1.0;
196 #endif /* USE_LOAD_BALANCE */
197 
198 kmp_nested_nthreads_t __kmp_nested_nth = {NULL, 0, 0};
199 
200 #if KMP_USE_ADAPTIVE_LOCKS
201 
202 kmp_adaptive_backoff_params_t __kmp_adaptive_backoff_params = {
203     1, 1024}; // TODO: tune it!
204 
205 #if KMP_DEBUG_ADAPTIVE_LOCKS
206 const char *__kmp_speculative_statsfile = "-";
207 #endif
208 
209 #endif // KMP_USE_ADAPTIVE_LOCKS
210 
211 int __kmp_display_env = FALSE;
212 int __kmp_display_env_verbose = FALSE;
213 int __kmp_omp_cancellation = FALSE;
214 int __kmp_nteams = 0;
215 int __kmp_teams_thread_limit = 0;
216 
217 #if KMP_HAVE_MWAIT || KMP_HAVE_UMWAIT
218 int __kmp_user_level_mwait = FALSE;
219 int __kmp_umwait_enabled = FALSE;
220 int __kmp_mwait_enabled = FALSE;
221 int __kmp_mwait_hints = 0;
222 #endif
223 
224 #if KMP_HAVE_UMWAIT
225 int __kmp_waitpkg_enabled = 0;
226 int __kmp_tpause_state = 0;
227 int __kmp_tpause_hint = 1;
228 int __kmp_tpause_enabled = 0;
229 #endif
230 
231 /* map OMP 3.0 schedule types with our internal schedule types */
232 enum sched_type __kmp_sch_map[kmp_sched_upper - kmp_sched_lower_ext +
233                               kmp_sched_upper_std - kmp_sched_lower - 2] = {
234     kmp_sch_static_chunked, // ==> kmp_sched_static            = 1
235     kmp_sch_dynamic_chunked, // ==> kmp_sched_dynamic           = 2
236     kmp_sch_guided_chunked, // ==> kmp_sched_guided            = 3
237     kmp_sch_auto, // ==> kmp_sched_auto              = 4
238     kmp_sch_trapezoidal // ==> kmp_sched_trapezoidal       = 101
239     // will likely not be used, introduced here just to debug the code
240     // of public intel extension schedules
241 };
242 
243 #if KMP_OS_LINUX
244 enum clock_function_type __kmp_clock_function;
245 int __kmp_clock_function_param;
246 #endif /* KMP_OS_LINUX */
247 
248 #if KMP_MIC_SUPPORTED
249 enum mic_type __kmp_mic_type = non_mic;
250 #endif
251 
252 #if KMP_AFFINITY_SUPPORTED
253 
254 KMPAffinity *__kmp_affinity_dispatch = NULL;
255 
256 #if KMP_USE_HWLOC
257 int __kmp_hwloc_error = FALSE;
258 hwloc_topology_t __kmp_hwloc_topology = NULL;
259 #endif
260 
261 #if KMP_OS_WINDOWS
262 #if KMP_GROUP_AFFINITY
263 int __kmp_num_proc_groups = 1;
264 #endif /* KMP_GROUP_AFFINITY */
265 kmp_GetActiveProcessorCount_t __kmp_GetActiveProcessorCount = NULL;
266 kmp_GetActiveProcessorGroupCount_t __kmp_GetActiveProcessorGroupCount = NULL;
267 kmp_GetThreadGroupAffinity_t __kmp_GetThreadGroupAffinity = NULL;
268 kmp_SetThreadGroupAffinity_t __kmp_SetThreadGroupAffinity = NULL;
269 #endif /* KMP_OS_WINDOWS */
270 
271 size_t __kmp_affin_mask_size = 0;
272 enum affinity_type __kmp_affinity_type = affinity_default;
273 kmp_hw_t __kmp_affinity_gran = KMP_HW_UNKNOWN;
274 int __kmp_affinity_gran_levels = -1;
275 int __kmp_affinity_dups = TRUE;
276 enum affinity_top_method __kmp_affinity_top_method =
277     affinity_top_method_default;
278 int __kmp_affinity_compact = 0;
279 int __kmp_affinity_offset = 0;
280 int __kmp_affinity_verbose = FALSE;
281 int __kmp_affinity_warnings = TRUE;
282 int __kmp_affinity_respect_mask = affinity_respect_mask_default;
283 char *__kmp_affinity_proclist = NULL;
284 kmp_affin_mask_t *__kmp_affinity_masks = NULL;
285 unsigned __kmp_affinity_num_masks = 0;
286 
287 char *__kmp_cpuinfo_file = NULL;
288 bool __kmp_affin_reset = 0;
289 
290 #endif /* KMP_AFFINITY_SUPPORTED */
291 
292 kmp_nested_proc_bind_t __kmp_nested_proc_bind = {NULL, 0, 0};
293 kmp_proc_bind_t __kmp_teams_proc_bind = proc_bind_spread;
294 int __kmp_affinity_num_places = 0;
295 int __kmp_display_affinity = FALSE;
296 char *__kmp_affinity_format = NULL;
297 
298 kmp_int32 __kmp_default_device = 0;
299 
300 kmp_tasking_mode_t __kmp_tasking_mode = tskm_task_teams;
301 kmp_int32 __kmp_max_task_priority = 0;
302 kmp_uint64 __kmp_taskloop_min_tasks = 0;
303 
304 int __kmp_memkind_available = 0;
305 omp_allocator_handle_t const omp_null_allocator = NULL;
306 omp_allocator_handle_t const omp_default_mem_alloc =
307     (omp_allocator_handle_t const)1;
308 omp_allocator_handle_t const omp_large_cap_mem_alloc =
309     (omp_allocator_handle_t const)2;
310 omp_allocator_handle_t const omp_const_mem_alloc =
311     (omp_allocator_handle_t const)3;
312 omp_allocator_handle_t const omp_high_bw_mem_alloc =
313     (omp_allocator_handle_t const)4;
314 omp_allocator_handle_t const omp_low_lat_mem_alloc =
315     (omp_allocator_handle_t const)5;
316 omp_allocator_handle_t const omp_cgroup_mem_alloc =
317     (omp_allocator_handle_t const)6;
318 omp_allocator_handle_t const omp_pteam_mem_alloc =
319     (omp_allocator_handle_t const)7;
320 omp_allocator_handle_t const omp_thread_mem_alloc =
321     (omp_allocator_handle_t const)8;
322 omp_allocator_handle_t const llvm_omp_target_host_mem_alloc =
323     (omp_allocator_handle_t const)100;
324 omp_allocator_handle_t const llvm_omp_target_shared_mem_alloc =
325     (omp_allocator_handle_t const)101;
326 omp_allocator_handle_t const llvm_omp_target_device_mem_alloc =
327     (omp_allocator_handle_t const)102;
328 omp_allocator_handle_t const kmp_max_mem_alloc =
329     (omp_allocator_handle_t const)1024;
330 omp_allocator_handle_t __kmp_def_allocator = omp_default_mem_alloc;
331 
332 omp_memspace_handle_t const omp_default_mem_space =
333     (omp_memspace_handle_t const)0;
334 omp_memspace_handle_t const omp_large_cap_mem_space =
335     (omp_memspace_handle_t const)1;
336 omp_memspace_handle_t const omp_const_mem_space =
337     (omp_memspace_handle_t const)2;
338 omp_memspace_handle_t const omp_high_bw_mem_space =
339     (omp_memspace_handle_t const)3;
340 omp_memspace_handle_t const omp_low_lat_mem_space =
341     (omp_memspace_handle_t const)4;
342 omp_memspace_handle_t const llvm_omp_target_host_mem_space =
343     (omp_memspace_handle_t const)100;
344 omp_memspace_handle_t const llvm_omp_target_shared_mem_space =
345     (omp_memspace_handle_t const)101;
346 omp_memspace_handle_t const llvm_omp_target_device_mem_space =
347     (omp_memspace_handle_t const)102;
348 
349 /* This check ensures that the compiler is passing the correct data type for the
350    flags formal parameter of the function kmpc_omp_task_alloc(). If the type is
351    not a 4-byte type, then give an error message about a non-positive length
352    array pointing here.  If that happens, the kmp_tasking_flags_t structure must
353    be redefined to have exactly 32 bits. */
354 KMP_BUILD_ASSERT(sizeof(kmp_tasking_flags_t) == 4);
355 
356 int __kmp_task_stealing_constraint = 1; /* Constrain task stealing by default */
357 int __kmp_enable_task_throttling = 1;
358 
359 #ifdef DEBUG_SUSPEND
360 int __kmp_suspend_count = 0;
361 #endif
362 
363 int __kmp_settings = FALSE;
364 int __kmp_duplicate_library_ok = 0;
365 #if USE_ITT_BUILD
366 int __kmp_forkjoin_frames = 1;
367 int __kmp_forkjoin_frames_mode = 3;
368 #endif
369 PACKED_REDUCTION_METHOD_T __kmp_force_reduction_method =
370     reduction_method_not_defined;
371 int __kmp_determ_red = FALSE;
372 
373 #ifdef KMP_DEBUG
374 int kmp_a_debug = 0;
375 int kmp_b_debug = 0;
376 int kmp_c_debug = 0;
377 int kmp_d_debug = 0;
378 int kmp_e_debug = 0;
379 int kmp_f_debug = 0;
380 int kmp_diag = 0;
381 #endif
382 
383 /* For debug information logging using rotating buffer */
384 int __kmp_debug_buf =
385     FALSE; /* TRUE means use buffer, FALSE means print to stderr */
386 int __kmp_debug_buf_lines =
387     KMP_DEBUG_BUF_LINES_INIT; /* Lines of debug stored in buffer */
388 int __kmp_debug_buf_chars =
389     KMP_DEBUG_BUF_CHARS_INIT; /* Characters allowed per line in buffer */
390 int __kmp_debug_buf_atomic =
391     FALSE; /* TRUE means use atomic update of buffer entry pointer */
392 
393 char *__kmp_debug_buffer = NULL; /* Debug buffer itself */
394 std::atomic<int> __kmp_debug_count =
395     ATOMIC_VAR_INIT(0); /* number of lines printed in buffer so far */
396 int __kmp_debug_buf_warn_chars =
397     0; /* Keep track of char increase recommended in warnings */
398 /* end rotating debug buffer */
399 
400 #ifdef KMP_DEBUG
401 int __kmp_par_range; /* +1 => only go par for constructs in range */
402 /* -1 => only go par for constructs outside range */
403 char __kmp_par_range_routine[KMP_PAR_RANGE_ROUTINE_LEN] = {'\0'};
404 char __kmp_par_range_filename[KMP_PAR_RANGE_FILENAME_LEN] = {'\0'};
405 int __kmp_par_range_lb = 0;
406 int __kmp_par_range_ub = INT_MAX;
407 #endif /* KMP_DEBUG */
408 
409 /* For printing out dynamic storage map for threads and teams */
410 int __kmp_storage_map =
411     FALSE; /* True means print storage map for threads and teams */
412 int __kmp_storage_map_verbose =
413     FALSE; /* True means storage map includes placement info */
414 int __kmp_storage_map_verbose_specified = FALSE;
415 /* Initialize the library data structures when we fork a child process, defaults
416  * to TRUE */
417 int __kmp_need_register_atfork =
418     TRUE; /* At initialization, call pthread_atfork to install fork handler */
419 int __kmp_need_register_atfork_specified = TRUE;
420 
421 int __kmp_env_stksize = FALSE; /* KMP_STACKSIZE specified? */
422 int __kmp_env_blocktime = FALSE; /* KMP_BLOCKTIME specified? */
423 int __kmp_env_checks = FALSE; /* KMP_CHECKS specified?    */
424 int __kmp_env_consistency_check = FALSE; /* KMP_CONSISTENCY_CHECK specified? */
425 
426 // From KMP_USE_YIELD:
427 // 0 = never yield;
428 // 1 = always yield (default);
429 // 2 = yield only if oversubscribed
430 #if KMP_OS_DARWIN && KMP_ARCH_AARCH64
431 // Set to 0 for environments where yield is slower
432 kmp_int32 __kmp_use_yield = 0;
433 #else
434 kmp_int32 __kmp_use_yield = 1;
435 #endif
436 
437 // This will be 1 if KMP_USE_YIELD environment variable was set explicitly
438 kmp_int32 __kmp_use_yield_exp_set = 0;
439 
440 kmp_uint32 __kmp_yield_init = KMP_INIT_WAIT;
441 kmp_uint32 __kmp_yield_next = KMP_NEXT_WAIT;
442 kmp_uint64 __kmp_pause_init = 1; // for tpause
443 
444 /* ------------------------------------------------------ */
445 /* STATE mostly syncronized with global lock */
446 /* data written to rarely by primary threads, read often by workers */
447 /* TODO: None of this global padding stuff works consistently because the order
448    of declaration is not necessarily correlated to storage order. To fix this,
449    all the important globals must be put in a big structure instead. */
450 KMP_ALIGN_CACHE
451 kmp_info_t **__kmp_threads = NULL;
452 kmp_root_t **__kmp_root = NULL;
453 kmp_old_threads_list_t *__kmp_old_threads_list = NULL;
454 
455 /* data read/written to often by primary threads */
456 KMP_ALIGN_CACHE
457 volatile int __kmp_nth = 0;
458 volatile int __kmp_all_nth = 0;
459 volatile kmp_info_t *__kmp_thread_pool = NULL;
460 volatile kmp_team_t *__kmp_team_pool = NULL;
461 
462 KMP_ALIGN_CACHE
463 std::atomic<int> __kmp_thread_pool_active_nth = ATOMIC_VAR_INIT(0);
464 
465 /* -------------------------------------------------
466  * GLOBAL/ROOT STATE */
467 KMP_ALIGN_CACHE
468 kmp_global_t __kmp_global;
469 
470 /* ----------------------------------------------- */
471 /* GLOBAL SYNCHRONIZATION LOCKS */
472 /* TODO verify the need for these locks and if they need to be global */
473 
474 #if KMP_USE_INTERNODE_ALIGNMENT
475 /* Multinode systems have larger cache line granularity which can cause
476  * false sharing if the alignment is not large enough for these locks */
477 KMP_ALIGN_CACHE_INTERNODE
478 
479 KMP_BOOTSTRAP_LOCK_INIT(__kmp_initz_lock); /* Control initializations */
480 KMP_ALIGN_CACHE_INTERNODE
481 KMP_BOOTSTRAP_LOCK_INIT(__kmp_forkjoin_lock); /* control fork/join access */
482 KMP_ALIGN_CACHE_INTERNODE
483 KMP_BOOTSTRAP_LOCK_INIT(__kmp_exit_lock); /* exit() is not always thread-safe */
484 #if KMP_USE_MONITOR
485 /* control monitor thread creation */
486 KMP_ALIGN_CACHE_INTERNODE
487 KMP_BOOTSTRAP_LOCK_INIT(__kmp_monitor_lock);
488 #endif
489 /* used for the hack to allow threadprivate cache and __kmp_threads expansion
490    to co-exist */
491 KMP_ALIGN_CACHE_INTERNODE
492 KMP_BOOTSTRAP_LOCK_INIT(__kmp_tp_cached_lock);
493 
494 KMP_ALIGN_CACHE_INTERNODE
495 KMP_LOCK_INIT(__kmp_global_lock); /* Control OS/global access */
496 KMP_ALIGN_CACHE_INTERNODE
497 kmp_queuing_lock_t __kmp_dispatch_lock; /* Control dispatch access  */
498 KMP_ALIGN_CACHE_INTERNODE
499 KMP_LOCK_INIT(__kmp_debug_lock); /* Control I/O access for KMP_DEBUG */
500 #else
501 KMP_ALIGN_CACHE
502 
503 KMP_BOOTSTRAP_LOCK_INIT(__kmp_initz_lock); /* Control initializations */
504 KMP_BOOTSTRAP_LOCK_INIT(__kmp_forkjoin_lock); /* control fork/join access */
505 KMP_BOOTSTRAP_LOCK_INIT(__kmp_exit_lock); /* exit() is not always thread-safe */
506 #if KMP_USE_MONITOR
507 /* control monitor thread creation */
508 KMP_BOOTSTRAP_LOCK_INIT(__kmp_monitor_lock);
509 #endif
510 /* used for the hack to allow threadprivate cache and __kmp_threads expansion
511    to co-exist */
512 KMP_BOOTSTRAP_LOCK_INIT(__kmp_tp_cached_lock);
513 
514 KMP_ALIGN(128)
515 KMP_LOCK_INIT(__kmp_global_lock); /* Control OS/global access */
516 KMP_ALIGN(128)
517 kmp_queuing_lock_t __kmp_dispatch_lock; /* Control dispatch access  */
518 KMP_ALIGN(128)
519 KMP_LOCK_INIT(__kmp_debug_lock); /* Control I/O access for KMP_DEBUG */
520 #endif
521 
522 /* ----------------------------------------------- */
523 
524 #if KMP_HANDLE_SIGNALS
525 /* Signal handling is disabled by default, because it confuses users: In case of
526    sigsegv (or other trouble) in user code signal handler catches the signal,
527    which then "appears" in the monitor thread (when the monitor executes raise()
528    function). Users see signal in the monitor thread and blame OpenMP RTL.
529 
530    Grant said signal handling required on some older OSes (Irix?) supported by
531    KAI, because bad applications hung but not aborted. Currently it is not a
532    problem for Linux* OS, OS X* and Windows* OS.
533 
534    Grant: Found new hangs for EL4, EL5, and a Fedora Core machine.  So I'm
535    putting the default back for now to see if that fixes hangs on those
536    machines.
537 
538    2010-04013 Lev: It was a bug in Fortran RTL. Fortran RTL prints a kind of
539    stack backtrace when program is aborting, but the code is not signal-safe.
540    When multiple signals raised at the same time (which occurs in dynamic
541    negative tests because all the worker threads detects the same error),
542    Fortran RTL may hang. The bug finally fixed in Fortran RTL library provided
543    by Steve R., and will be available soon. */
544 int __kmp_handle_signals = FALSE;
545 #endif
546 
547 #ifdef DEBUG_SUSPEND
548 int get_suspend_count_(void) {
549   int count = __kmp_suspend_count;
550   __kmp_suspend_count = 0;
551   return count;
552 }
553 void set_suspend_count_(int *value) { __kmp_suspend_count = *value; }
554 #endif
555 
556 // Symbols for MS mutual detection.
557 int _You_must_link_with_exactly_one_OpenMP_library = 1;
558 int _You_must_link_with_Intel_OpenMP_library = 1;
559 #if KMP_OS_WINDOWS && (KMP_VERSION_MAJOR > 4)
560 int _You_must_link_with_Microsoft_OpenMP_library = 1;
561 #endif
562 
563 kmp_target_offload_kind_t __kmp_target_offload = tgt_default;
564 
565 // OMP Pause Resources
566 kmp_pause_status_t __kmp_pause_status = kmp_not_paused;
567 
568 // Nesting mode
569 int __kmp_nesting_mode = 0;
570 int __kmp_nesting_mode_nlevels = 1;
571 int *__kmp_nesting_nth_level;
572 
573 // end of file //
574