1 /*
2  * z_Linux_util.cpp -- platform specific routines.
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "kmp.h"
14 #include "kmp_affinity.h"
15 #include "kmp_i18n.h"
16 #include "kmp_io.h"
17 #include "kmp_itt.h"
18 #include "kmp_lock.h"
19 #include "kmp_stats.h"
20 #include "kmp_str.h"
21 #include "kmp_wait_release.h"
22 #include "kmp_wrapper_getpid.h"
23 
24 #if !KMP_OS_DRAGONFLY && !KMP_OS_FREEBSD && !KMP_OS_NETBSD && !KMP_OS_OPENBSD
25 #include <alloca.h>
26 #endif
27 #include <math.h> // HUGE_VAL.
28 #if KMP_OS_LINUX
29 #include <semaphore.h>
30 #endif // KMP_OS_LINUX
31 #include <sys/resource.h>
32 #include <sys/syscall.h>
33 #include <sys/time.h>
34 #include <sys/times.h>
35 #include <unistd.h>
36 
37 #if KMP_OS_LINUX
38 #include <sys/sysinfo.h>
39 #if KMP_USE_FUTEX
40 // We should really include <futex.h>, but that causes compatibility problems on
41 // different Linux* OS distributions that either require that you include (or
42 // break when you try to include) <pci/types.h>. Since all we need is the two
43 // macros below (which are part of the kernel ABI, so can't change) we just
44 // define the constants here and don't include <futex.h>
45 #ifndef FUTEX_WAIT
46 #define FUTEX_WAIT 0
47 #endif
48 #ifndef FUTEX_WAKE
49 #define FUTEX_WAKE 1
50 #endif
51 #endif
52 #elif KMP_OS_DARWIN
53 #include <mach/mach.h>
54 #include <sys/sysctl.h>
55 #elif KMP_OS_DRAGONFLY || KMP_OS_FREEBSD
56 #include <sys/types.h>
57 #include <sys/sysctl.h>
58 #include <sys/user.h>
59 #include <pthread_np.h>
60 #elif KMP_OS_NETBSD || KMP_OS_OPENBSD
61 #include <sys/types.h>
62 #include <sys/sysctl.h>
63 #elif KMP_OS_SOLARIS
64 #include <sys/loadavg.h>
65 #endif
66 
67 #include <ctype.h>
68 #include <dirent.h>
69 #include <fcntl.h>
70 
71 struct kmp_sys_timer {
72   struct timespec start;
73 };
74 
75 #ifndef TIMEVAL_TO_TIMESPEC
76 // Convert timeval to timespec.
77 #define TIMEVAL_TO_TIMESPEC(tv, ts)                                            \
78   do {                                                                         \
79     (ts)->tv_sec = (tv)->tv_sec;                                               \
80     (ts)->tv_nsec = (tv)->tv_usec * 1000;                                      \
81   } while (0)
82 #endif
83 
84 // Convert timespec to nanoseconds.
85 #define TS2NS(timespec)                                                        \
86   (((timespec).tv_sec * (long int)1e9) + (timespec).tv_nsec)
87 
88 static struct kmp_sys_timer __kmp_sys_timer_data;
89 
90 #if KMP_HANDLE_SIGNALS
91 typedef void (*sig_func_t)(int);
92 STATIC_EFI2_WORKAROUND struct sigaction __kmp_sighldrs[NSIG];
93 static sigset_t __kmp_sigset;
94 #endif
95 
96 static int __kmp_init_runtime = FALSE;
97 
98 static int __kmp_fork_count = 0;
99 
100 static pthread_condattr_t __kmp_suspend_cond_attr;
101 static pthread_mutexattr_t __kmp_suspend_mutex_attr;
102 
103 static kmp_cond_align_t __kmp_wait_cv;
104 static kmp_mutex_align_t __kmp_wait_mx;
105 
106 kmp_uint64 __kmp_ticks_per_msec = 1000000;
107 kmp_uint64 __kmp_ticks_per_usec = 1000;
108 
109 #ifdef DEBUG_SUSPEND
110 static void __kmp_print_cond(char *buffer, kmp_cond_align_t *cond) {
111   KMP_SNPRINTF(buffer, 128, "(cond (lock (%ld, %d)), (descr (%p)))",
112                cond->c_cond.__c_lock.__status, cond->c_cond.__c_lock.__spinlock,
113                cond->c_cond.__c_waiting);
114 }
115 #endif
116 
117 #if ((KMP_OS_LINUX || KMP_OS_FREEBSD) && KMP_AFFINITY_SUPPORTED)
118 
119 /* Affinity support */
120 
121 void __kmp_affinity_bind_thread(int which) {
122   KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
123               "Illegal set affinity operation when not capable");
124 
125   kmp_affin_mask_t *mask;
126   KMP_CPU_ALLOC_ON_STACK(mask);
127   KMP_CPU_ZERO(mask);
128   KMP_CPU_SET(which, mask);
129   __kmp_set_system_affinity(mask, TRUE);
130   KMP_CPU_FREE_FROM_STACK(mask);
131 }
132 
133 /* Determine if we can access affinity functionality on this version of
134  * Linux* OS by checking __NR_sched_{get,set}affinity system calls, and set
135  * __kmp_affin_mask_size to the appropriate value (0 means not capable). */
136 void __kmp_affinity_determine_capable(const char *env_var) {
137   // Check and see if the OS supports thread affinity.
138 
139 #if KMP_OS_LINUX
140 #define KMP_CPU_SET_SIZE_LIMIT (1024 * 1024)
141 #define KMP_CPU_SET_TRY_SIZE CACHE_LINE
142 #elif KMP_OS_FREEBSD
143 #define KMP_CPU_SET_SIZE_LIMIT (sizeof(cpuset_t))
144 #endif
145 
146   int verbose = __kmp_affinity.flags.verbose;
147   int warnings = __kmp_affinity.flags.warnings;
148   enum affinity_type type = __kmp_affinity.type;
149 
150 #if KMP_OS_LINUX
151   long gCode;
152   unsigned char *buf;
153   buf = (unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
154 
155   // If the syscall returns a suggestion for the size,
156   // then we don't have to search for an appropriate size.
157   gCode = syscall(__NR_sched_getaffinity, 0, KMP_CPU_SET_TRY_SIZE, buf);
158   KA_TRACE(30, ("__kmp_affinity_determine_capable: "
159                 "initial getaffinity call returned %ld errno = %d\n",
160                 gCode, errno));
161 
162   if (gCode < 0 && errno != EINVAL) {
163     // System call not supported
164     if (verbose ||
165         (warnings && (type != affinity_none) && (type != affinity_default) &&
166          (type != affinity_disabled))) {
167       int error = errno;
168       kmp_msg_t err_code = KMP_ERR(error);
169       __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
170                 err_code, __kmp_msg_null);
171       if (__kmp_generate_warnings == kmp_warnings_off) {
172         __kmp_str_free(&err_code.str);
173       }
174     }
175     KMP_AFFINITY_DISABLE();
176     KMP_INTERNAL_FREE(buf);
177     return;
178   } else if (gCode > 0) {
179     // The optimal situation: the OS returns the size of the buffer it expects.
180     KMP_AFFINITY_ENABLE(gCode);
181     KA_TRACE(10, ("__kmp_affinity_determine_capable: "
182                   "affinity supported (mask size %d)\n",
183                   (int)__kmp_affin_mask_size));
184     KMP_INTERNAL_FREE(buf);
185     return;
186   }
187 
188   // Call the getaffinity system call repeatedly with increasing set sizes
189   // until we succeed, or reach an upper bound on the search.
190   KA_TRACE(30, ("__kmp_affinity_determine_capable: "
191                 "searching for proper set size\n"));
192   int size;
193   for (size = 1; size <= KMP_CPU_SET_SIZE_LIMIT; size *= 2) {
194     gCode = syscall(__NR_sched_getaffinity, 0, size, buf);
195     KA_TRACE(30, ("__kmp_affinity_determine_capable: "
196                   "getaffinity for mask size %ld returned %ld errno = %d\n",
197                   size, gCode, errno));
198 
199     if (gCode < 0) {
200       if (errno == ENOSYS) {
201         // We shouldn't get here
202         KA_TRACE(30, ("__kmp_affinity_determine_capable: "
203                       "inconsistent OS call behavior: errno == ENOSYS for mask "
204                       "size %d\n",
205                       size));
206         if (verbose ||
207             (warnings && (type != affinity_none) &&
208              (type != affinity_default) && (type != affinity_disabled))) {
209           int error = errno;
210           kmp_msg_t err_code = KMP_ERR(error);
211           __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
212                     err_code, __kmp_msg_null);
213           if (__kmp_generate_warnings == kmp_warnings_off) {
214             __kmp_str_free(&err_code.str);
215           }
216         }
217         KMP_AFFINITY_DISABLE();
218         KMP_INTERNAL_FREE(buf);
219         return;
220       }
221       continue;
222     }
223 
224     KMP_AFFINITY_ENABLE(gCode);
225     KA_TRACE(10, ("__kmp_affinity_determine_capable: "
226                   "affinity supported (mask size %d)\n",
227                   (int)__kmp_affin_mask_size));
228     KMP_INTERNAL_FREE(buf);
229     return;
230   }
231 #elif KMP_OS_FREEBSD
232   long gCode;
233   unsigned char *buf;
234   buf = (unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
235   gCode = pthread_getaffinity_np(pthread_self(), KMP_CPU_SET_SIZE_LIMIT,
236                                  reinterpret_cast<cpuset_t *>(buf));
237   KA_TRACE(30, ("__kmp_affinity_determine_capable: "
238                 "initial getaffinity call returned %d errno = %d\n",
239                 gCode, errno));
240   if (gCode == 0) {
241     KMP_AFFINITY_ENABLE(KMP_CPU_SET_SIZE_LIMIT);
242     KA_TRACE(10, ("__kmp_affinity_determine_capable: "
243                   "affinity supported (mask size %d)\n",
244                   (int)__kmp_affin_mask_size));
245     KMP_INTERNAL_FREE(buf);
246     return;
247   }
248 #endif
249   KMP_INTERNAL_FREE(buf);
250 
251   // Affinity is not supported
252   KMP_AFFINITY_DISABLE();
253   KA_TRACE(10, ("__kmp_affinity_determine_capable: "
254                 "cannot determine mask size - affinity not supported\n"));
255   if (verbose || (warnings && (type != affinity_none) &&
256                   (type != affinity_default) && (type != affinity_disabled))) {
257     KMP_WARNING(AffCantGetMaskSize, env_var);
258   }
259 }
260 
261 #endif // KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED
262 
263 #if KMP_USE_FUTEX
264 
265 int __kmp_futex_determine_capable() {
266   int loc = 0;
267   long rc = syscall(__NR_futex, &loc, FUTEX_WAKE, 1, NULL, NULL, 0);
268   int retval = (rc == 0) || (errno != ENOSYS);
269 
270   KA_TRACE(10,
271            ("__kmp_futex_determine_capable: rc = %d errno = %d\n", rc, errno));
272   KA_TRACE(10, ("__kmp_futex_determine_capable: futex syscall%s supported\n",
273                 retval ? "" : " not"));
274 
275   return retval;
276 }
277 
278 #endif // KMP_USE_FUTEX
279 
280 #if (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_WASM) && (!KMP_ASM_INTRINS)
281 /* Only 32-bit "add-exchange" instruction on IA-32 architecture causes us to
282    use compare_and_store for these routines */
283 
284 kmp_int8 __kmp_test_then_or8(volatile kmp_int8 *p, kmp_int8 d) {
285   kmp_int8 old_value, new_value;
286 
287   old_value = TCR_1(*p);
288   new_value = old_value | d;
289 
290   while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
291     KMP_CPU_PAUSE();
292     old_value = TCR_1(*p);
293     new_value = old_value | d;
294   }
295   return old_value;
296 }
297 
298 kmp_int8 __kmp_test_then_and8(volatile kmp_int8 *p, kmp_int8 d) {
299   kmp_int8 old_value, new_value;
300 
301   old_value = TCR_1(*p);
302   new_value = old_value & d;
303 
304   while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
305     KMP_CPU_PAUSE();
306     old_value = TCR_1(*p);
307     new_value = old_value & d;
308   }
309   return old_value;
310 }
311 
312 kmp_uint32 __kmp_test_then_or32(volatile kmp_uint32 *p, kmp_uint32 d) {
313   kmp_uint32 old_value, new_value;
314 
315   old_value = TCR_4(*p);
316   new_value = old_value | d;
317 
318   while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
319     KMP_CPU_PAUSE();
320     old_value = TCR_4(*p);
321     new_value = old_value | d;
322   }
323   return old_value;
324 }
325 
326 kmp_uint32 __kmp_test_then_and32(volatile kmp_uint32 *p, kmp_uint32 d) {
327   kmp_uint32 old_value, new_value;
328 
329   old_value = TCR_4(*p);
330   new_value = old_value & d;
331 
332   while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
333     KMP_CPU_PAUSE();
334     old_value = TCR_4(*p);
335     new_value = old_value & d;
336   }
337   return old_value;
338 }
339 
340 #if KMP_ARCH_X86 || KMP_ARCH_WASM
341 kmp_int8 __kmp_test_then_add8(volatile kmp_int8 *p, kmp_int8 d) {
342   kmp_int8 old_value, new_value;
343 
344   old_value = TCR_1(*p);
345   new_value = old_value + d;
346 
347   while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
348     KMP_CPU_PAUSE();
349     old_value = TCR_1(*p);
350     new_value = old_value + d;
351   }
352   return old_value;
353 }
354 
355 kmp_int64 __kmp_test_then_add64(volatile kmp_int64 *p, kmp_int64 d) {
356   kmp_int64 old_value, new_value;
357 
358   old_value = TCR_8(*p);
359   new_value = old_value + d;
360 
361   while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
362     KMP_CPU_PAUSE();
363     old_value = TCR_8(*p);
364     new_value = old_value + d;
365   }
366   return old_value;
367 }
368 #endif /* KMP_ARCH_X86 */
369 
370 kmp_uint64 __kmp_test_then_or64(volatile kmp_uint64 *p, kmp_uint64 d) {
371   kmp_uint64 old_value, new_value;
372 
373   old_value = TCR_8(*p);
374   new_value = old_value | d;
375   while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
376     KMP_CPU_PAUSE();
377     old_value = TCR_8(*p);
378     new_value = old_value | d;
379   }
380   return old_value;
381 }
382 
383 kmp_uint64 __kmp_test_then_and64(volatile kmp_uint64 *p, kmp_uint64 d) {
384   kmp_uint64 old_value, new_value;
385 
386   old_value = TCR_8(*p);
387   new_value = old_value & d;
388   while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
389     KMP_CPU_PAUSE();
390     old_value = TCR_8(*p);
391     new_value = old_value & d;
392   }
393   return old_value;
394 }
395 
396 #endif /* (KMP_ARCH_X86 || KMP_ARCH_X86_64) && (! KMP_ASM_INTRINS) */
397 
398 void __kmp_terminate_thread(int gtid) {
399   int status;
400   kmp_info_t *th = __kmp_threads[gtid];
401 
402   if (!th)
403     return;
404 
405 #ifdef KMP_CANCEL_THREADS
406   KA_TRACE(10, ("__kmp_terminate_thread: kill (%d)\n", gtid));
407   status = pthread_cancel(th->th.th_info.ds.ds_thread);
408   if (status != 0 && status != ESRCH) {
409     __kmp_fatal(KMP_MSG(CantTerminateWorkerThread), KMP_ERR(status),
410                 __kmp_msg_null);
411   }
412 #endif
413   KMP_YIELD(TRUE);
414 } //
415 
416 /* Set thread stack info according to values returned by pthread_getattr_np().
417    If values are unreasonable, assume call failed and use incremental stack
418    refinement method instead. Returns TRUE if the stack parameters could be
419    determined exactly, FALSE if incremental refinement is necessary. */
420 static kmp_int32 __kmp_set_stack_info(int gtid, kmp_info_t *th) {
421   int stack_data;
422 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD ||     \
423     KMP_OS_HURD || KMP_OS_SOLARIS
424   pthread_attr_t attr;
425   int status;
426   size_t size = 0;
427   void *addr = 0;
428 
429   /* Always do incremental stack refinement for ubermaster threads since the
430      initial thread stack range can be reduced by sibling thread creation so
431      pthread_attr_getstack may cause thread gtid aliasing */
432   if (!KMP_UBER_GTID(gtid)) {
433 
434     /* Fetch the real thread attributes */
435     status = pthread_attr_init(&attr);
436     KMP_CHECK_SYSFAIL("pthread_attr_init", status);
437 #if KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD
438     status = pthread_attr_get_np(pthread_self(), &attr);
439     KMP_CHECK_SYSFAIL("pthread_attr_get_np", status);
440 #else
441     status = pthread_getattr_np(pthread_self(), &attr);
442     KMP_CHECK_SYSFAIL("pthread_getattr_np", status);
443 #endif
444     status = pthread_attr_getstack(&attr, &addr, &size);
445     KMP_CHECK_SYSFAIL("pthread_attr_getstack", status);
446     KA_TRACE(60,
447              ("__kmp_set_stack_info: T#%d pthread_attr_getstack returned size:"
448               " %lu, low addr: %p\n",
449               gtid, size, addr));
450     status = pthread_attr_destroy(&attr);
451     KMP_CHECK_SYSFAIL("pthread_attr_destroy", status);
452   }
453 
454   if (size != 0 && addr != 0) { // was stack parameter determination successful?
455     /* Store the correct base and size */
456     TCW_PTR(th->th.th_info.ds.ds_stackbase, (((char *)addr) + size));
457     TCW_PTR(th->th.th_info.ds.ds_stacksize, size);
458     TCW_4(th->th.th_info.ds.ds_stackgrow, FALSE);
459     return TRUE;
460   }
461 #endif /* KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD  \
462           || KMP_OS_HURD || KMP_OS_SOLARIS */
463   /* Use incremental refinement starting from initial conservative estimate */
464   TCW_PTR(th->th.th_info.ds.ds_stacksize, 0);
465   TCW_PTR(th->th.th_info.ds.ds_stackbase, &stack_data);
466   TCW_4(th->th.th_info.ds.ds_stackgrow, TRUE);
467   return FALSE;
468 }
469 
470 static void *__kmp_launch_worker(void *thr) {
471   int status, old_type, old_state;
472 #ifdef KMP_BLOCK_SIGNALS
473   sigset_t new_set, old_set;
474 #endif /* KMP_BLOCK_SIGNALS */
475   void *exit_val;
476 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD ||     \
477     KMP_OS_OPENBSD || KMP_OS_HURD || KMP_OS_SOLARIS
478   void *volatile padding = 0;
479 #endif
480   int gtid;
481 
482   gtid = ((kmp_info_t *)thr)->th.th_info.ds.ds_gtid;
483   __kmp_gtid_set_specific(gtid);
484 #ifdef KMP_TDATA_GTID
485   __kmp_gtid = gtid;
486 #endif
487 #if KMP_STATS_ENABLED
488   // set thread local index to point to thread-specific stats
489   __kmp_stats_thread_ptr = ((kmp_info_t *)thr)->th.th_stats;
490   __kmp_stats_thread_ptr->startLife();
491   KMP_SET_THREAD_STATE(IDLE);
492   KMP_INIT_PARTITIONED_TIMERS(OMP_idle);
493 #endif
494 
495 #if USE_ITT_BUILD
496   __kmp_itt_thread_name(gtid);
497 #endif /* USE_ITT_BUILD */
498 
499 #if KMP_AFFINITY_SUPPORTED
500   __kmp_affinity_bind_init_mask(gtid);
501 #endif
502 
503 #ifdef KMP_CANCEL_THREADS
504   status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
505   KMP_CHECK_SYSFAIL("pthread_setcanceltype", status);
506   // josh todo: isn't PTHREAD_CANCEL_ENABLE default for newly-created threads?
507   status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
508   KMP_CHECK_SYSFAIL("pthread_setcancelstate", status);
509 #endif
510 
511 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
512   // Set FP control regs to be a copy of the parallel initialization thread's.
513   __kmp_clear_x87_fpu_status_word();
514   __kmp_load_x87_fpu_control_word(&__kmp_init_x87_fpu_control_word);
515   __kmp_load_mxcsr(&__kmp_init_mxcsr);
516 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
517 
518 #ifdef KMP_BLOCK_SIGNALS
519   status = sigfillset(&new_set);
520   KMP_CHECK_SYSFAIL_ERRNO("sigfillset", status);
521   status = pthread_sigmask(SIG_BLOCK, &new_set, &old_set);
522   KMP_CHECK_SYSFAIL("pthread_sigmask", status);
523 #endif /* KMP_BLOCK_SIGNALS */
524 
525 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD ||     \
526     KMP_OS_OPENBSD || KMP_OS_HURD || KMP_OS_SOLARIS
527   if (__kmp_stkoffset > 0 && gtid > 0) {
528     padding = KMP_ALLOCA(gtid * __kmp_stkoffset);
529     (void)padding;
530   }
531 #endif
532 
533   KMP_MB();
534   __kmp_set_stack_info(gtid, (kmp_info_t *)thr);
535 
536   __kmp_check_stack_overlap((kmp_info_t *)thr);
537 
538   exit_val = __kmp_launch_thread((kmp_info_t *)thr);
539 
540 #ifdef KMP_BLOCK_SIGNALS
541   status = pthread_sigmask(SIG_SETMASK, &old_set, NULL);
542   KMP_CHECK_SYSFAIL("pthread_sigmask", status);
543 #endif /* KMP_BLOCK_SIGNALS */
544 
545   return exit_val;
546 }
547 
548 #if KMP_USE_MONITOR
549 /* The monitor thread controls all of the threads in the complex */
550 
551 static void *__kmp_launch_monitor(void *thr) {
552   int status, old_type, old_state;
553 #ifdef KMP_BLOCK_SIGNALS
554   sigset_t new_set;
555 #endif /* KMP_BLOCK_SIGNALS */
556   struct timespec interval;
557 
558   KMP_MB(); /* Flush all pending memory write invalidates.  */
559 
560   KA_TRACE(10, ("__kmp_launch_monitor: #1 launched\n"));
561 
562   /* register us as the monitor thread */
563   __kmp_gtid_set_specific(KMP_GTID_MONITOR);
564 #ifdef KMP_TDATA_GTID
565   __kmp_gtid = KMP_GTID_MONITOR;
566 #endif
567 
568   KMP_MB();
569 
570 #if USE_ITT_BUILD
571   // Instruct Intel(R) Threading Tools to ignore monitor thread.
572   __kmp_itt_thread_ignore();
573 #endif /* USE_ITT_BUILD */
574 
575   __kmp_set_stack_info(((kmp_info_t *)thr)->th.th_info.ds.ds_gtid,
576                        (kmp_info_t *)thr);
577 
578   __kmp_check_stack_overlap((kmp_info_t *)thr);
579 
580 #ifdef KMP_CANCEL_THREADS
581   status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
582   KMP_CHECK_SYSFAIL("pthread_setcanceltype", status);
583   // josh todo: isn't PTHREAD_CANCEL_ENABLE default for newly-created threads?
584   status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
585   KMP_CHECK_SYSFAIL("pthread_setcancelstate", status);
586 #endif
587 
588 #if KMP_REAL_TIME_FIX
589   // This is a potential fix which allows application with real-time scheduling
590   // policy work. However, decision about the fix is not made yet, so it is
591   // disabled by default.
592   { // Are program started with real-time scheduling policy?
593     int sched = sched_getscheduler(0);
594     if (sched == SCHED_FIFO || sched == SCHED_RR) {
595       // Yes, we are a part of real-time application. Try to increase the
596       // priority of the monitor.
597       struct sched_param param;
598       int max_priority = sched_get_priority_max(sched);
599       int rc;
600       KMP_WARNING(RealTimeSchedNotSupported);
601       sched_getparam(0, &param);
602       if (param.sched_priority < max_priority) {
603         param.sched_priority += 1;
604         rc = sched_setscheduler(0, sched, &param);
605         if (rc != 0) {
606           int error = errno;
607           kmp_msg_t err_code = KMP_ERR(error);
608           __kmp_msg(kmp_ms_warning, KMP_MSG(CantChangeMonitorPriority),
609                     err_code, KMP_MSG(MonitorWillStarve), __kmp_msg_null);
610           if (__kmp_generate_warnings == kmp_warnings_off) {
611             __kmp_str_free(&err_code.str);
612           }
613         }
614       } else {
615         // We cannot abort here, because number of CPUs may be enough for all
616         // the threads, including the monitor thread, so application could
617         // potentially work...
618         __kmp_msg(kmp_ms_warning, KMP_MSG(RunningAtMaxPriority),
619                   KMP_MSG(MonitorWillStarve), KMP_HNT(RunningAtMaxPriority),
620                   __kmp_msg_null);
621       }
622     }
623     // AC: free thread that waits for monitor started
624     TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
625   }
626 #endif // KMP_REAL_TIME_FIX
627 
628   KMP_MB(); /* Flush all pending memory write invalidates.  */
629 
630   if (__kmp_monitor_wakeups == 1) {
631     interval.tv_sec = 1;
632     interval.tv_nsec = 0;
633   } else {
634     interval.tv_sec = 0;
635     interval.tv_nsec = (KMP_NSEC_PER_SEC / __kmp_monitor_wakeups);
636   }
637 
638   KA_TRACE(10, ("__kmp_launch_monitor: #2 monitor\n"));
639 
640   while (!TCR_4(__kmp_global.g.g_done)) {
641     struct timespec now;
642     struct timeval tval;
643 
644     /*  This thread monitors the state of the system */
645 
646     KA_TRACE(15, ("__kmp_launch_monitor: update\n"));
647 
648     status = gettimeofday(&tval, NULL);
649     KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
650     TIMEVAL_TO_TIMESPEC(&tval, &now);
651 
652     now.tv_sec += interval.tv_sec;
653     now.tv_nsec += interval.tv_nsec;
654 
655     if (now.tv_nsec >= KMP_NSEC_PER_SEC) {
656       now.tv_sec += 1;
657       now.tv_nsec -= KMP_NSEC_PER_SEC;
658     }
659 
660     status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
661     KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
662     // AC: the monitor should not fall asleep if g_done has been set
663     if (!TCR_4(__kmp_global.g.g_done)) { // check once more under mutex
664       status = pthread_cond_timedwait(&__kmp_wait_cv.c_cond,
665                                       &__kmp_wait_mx.m_mutex, &now);
666       if (status != 0) {
667         if (status != ETIMEDOUT && status != EINTR) {
668           KMP_SYSFAIL("pthread_cond_timedwait", status);
669         }
670       }
671     }
672     status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
673     KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
674 
675     TCW_4(__kmp_global.g.g_time.dt.t_value,
676           TCR_4(__kmp_global.g.g_time.dt.t_value) + 1);
677 
678     KMP_MB(); /* Flush all pending memory write invalidates.  */
679   }
680 
681   KA_TRACE(10, ("__kmp_launch_monitor: #3 cleanup\n"));
682 
683 #ifdef KMP_BLOCK_SIGNALS
684   status = sigfillset(&new_set);
685   KMP_CHECK_SYSFAIL_ERRNO("sigfillset", status);
686   status = pthread_sigmask(SIG_UNBLOCK, &new_set, NULL);
687   KMP_CHECK_SYSFAIL("pthread_sigmask", status);
688 #endif /* KMP_BLOCK_SIGNALS */
689 
690   KA_TRACE(10, ("__kmp_launch_monitor: #4 finished\n"));
691 
692   if (__kmp_global.g.g_abort != 0) {
693     /* now we need to terminate the worker threads  */
694     /* the value of t_abort is the signal we caught */
695 
696     int gtid;
697 
698     KA_TRACE(10, ("__kmp_launch_monitor: #5 terminate sig=%d\n",
699                   __kmp_global.g.g_abort));
700 
701     /* terminate the OpenMP worker threads */
702     /* TODO this is not valid for sibling threads!!
703      * the uber master might not be 0 anymore.. */
704     for (gtid = 1; gtid < __kmp_threads_capacity; ++gtid)
705       __kmp_terminate_thread(gtid);
706 
707     __kmp_cleanup();
708 
709     KA_TRACE(10, ("__kmp_launch_monitor: #6 raise sig=%d\n",
710                   __kmp_global.g.g_abort));
711 
712     if (__kmp_global.g.g_abort > 0)
713       raise(__kmp_global.g.g_abort);
714   }
715 
716   KA_TRACE(10, ("__kmp_launch_monitor: #7 exit\n"));
717 
718   return thr;
719 }
720 #endif // KMP_USE_MONITOR
721 
722 void __kmp_create_worker(int gtid, kmp_info_t *th, size_t stack_size) {
723   pthread_t handle;
724   pthread_attr_t thread_attr;
725   int status;
726 
727   th->th.th_info.ds.ds_gtid = gtid;
728 
729 #if KMP_STATS_ENABLED
730   // sets up worker thread stats
731   __kmp_acquire_tas_lock(&__kmp_stats_lock, gtid);
732 
733   // th->th.th_stats is used to transfer thread-specific stats-pointer to
734   // __kmp_launch_worker. So when thread is created (goes into
735   // __kmp_launch_worker) it will set its thread local pointer to
736   // th->th.th_stats
737   if (!KMP_UBER_GTID(gtid)) {
738     th->th.th_stats = __kmp_stats_list->push_back(gtid);
739   } else {
740     // For root threads, __kmp_stats_thread_ptr is set in __kmp_register_root(),
741     // so set the th->th.th_stats field to it.
742     th->th.th_stats = __kmp_stats_thread_ptr;
743   }
744   __kmp_release_tas_lock(&__kmp_stats_lock, gtid);
745 
746 #endif // KMP_STATS_ENABLED
747 
748   if (KMP_UBER_GTID(gtid)) {
749     KA_TRACE(10, ("__kmp_create_worker: uber thread (%d)\n", gtid));
750     th->th.th_info.ds.ds_thread = pthread_self();
751     __kmp_set_stack_info(gtid, th);
752     __kmp_check_stack_overlap(th);
753     return;
754   }
755 
756   KA_TRACE(10, ("__kmp_create_worker: try to create thread (%d)\n", gtid));
757 
758   KMP_MB(); /* Flush all pending memory write invalidates.  */
759 
760 #ifdef KMP_THREAD_ATTR
761   status = pthread_attr_init(&thread_attr);
762   if (status != 0) {
763     __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
764   }
765   status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
766   if (status != 0) {
767     __kmp_fatal(KMP_MSG(CantSetWorkerState), KMP_ERR(status), __kmp_msg_null);
768   }
769 
770   /* Set stack size for this thread now.
771      The multiple of 2 is there because on some machines, requesting an unusual
772      stacksize causes the thread to have an offset before the dummy alloca()
773      takes place to create the offset.  Since we want the user to have a
774      sufficient stacksize AND support a stack offset, we alloca() twice the
775      offset so that the upcoming alloca() does not eliminate any premade offset,
776      and also gives the user the stack space they requested for all threads */
777   stack_size += gtid * __kmp_stkoffset * 2;
778 
779   KA_TRACE(10, ("__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
780                 "__kmp_stksize = %lu bytes, final stacksize = %lu bytes\n",
781                 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
782 
783 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
784   status = pthread_attr_setstacksize(&thread_attr, stack_size);
785 #ifdef KMP_BACKUP_STKSIZE
786   if (status != 0) {
787     if (!__kmp_env_stksize) {
788       stack_size = KMP_BACKUP_STKSIZE + gtid * __kmp_stkoffset;
789       __kmp_stksize = KMP_BACKUP_STKSIZE;
790       KA_TRACE(10, ("__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
791                     "__kmp_stksize = %lu bytes, (backup) final stacksize = %lu "
792                     "bytes\n",
793                     gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
794       status = pthread_attr_setstacksize(&thread_attr, stack_size);
795     }
796   }
797 #endif /* KMP_BACKUP_STKSIZE */
798   if (status != 0) {
799     __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
800                 KMP_HNT(ChangeWorkerStackSize), __kmp_msg_null);
801   }
802 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
803 
804 #endif /* KMP_THREAD_ATTR */
805 
806   status =
807       pthread_create(&handle, &thread_attr, __kmp_launch_worker, (void *)th);
808   if (status != 0 || !handle) { // ??? Why do we check handle??
809 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
810     if (status == EINVAL) {
811       __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
812                   KMP_HNT(IncreaseWorkerStackSize), __kmp_msg_null);
813     }
814     if (status == ENOMEM) {
815       __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
816                   KMP_HNT(DecreaseWorkerStackSize), __kmp_msg_null);
817     }
818 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
819     if (status == EAGAIN) {
820       __kmp_fatal(KMP_MSG(NoResourcesForWorkerThread), KMP_ERR(status),
821                   KMP_HNT(Decrease_NUM_THREADS), __kmp_msg_null);
822     }
823     KMP_SYSFAIL("pthread_create", status);
824   }
825 
826   th->th.th_info.ds.ds_thread = handle;
827 
828 #ifdef KMP_THREAD_ATTR
829   status = pthread_attr_destroy(&thread_attr);
830   if (status) {
831     kmp_msg_t err_code = KMP_ERR(status);
832     __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
833               __kmp_msg_null);
834     if (__kmp_generate_warnings == kmp_warnings_off) {
835       __kmp_str_free(&err_code.str);
836     }
837   }
838 #endif /* KMP_THREAD_ATTR */
839 
840   KMP_MB(); /* Flush all pending memory write invalidates.  */
841 
842   KA_TRACE(10, ("__kmp_create_worker: done creating thread (%d)\n", gtid));
843 
844 } // __kmp_create_worker
845 
846 #if KMP_USE_MONITOR
847 void __kmp_create_monitor(kmp_info_t *th) {
848   pthread_t handle;
849   pthread_attr_t thread_attr;
850   size_t size;
851   int status;
852   int auto_adj_size = FALSE;
853 
854   if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME) {
855     // We don't need monitor thread in case of MAX_BLOCKTIME
856     KA_TRACE(10, ("__kmp_create_monitor: skipping monitor thread because of "
857                   "MAX blocktime\n"));
858     th->th.th_info.ds.ds_tid = 0; // this makes reap_monitor no-op
859     th->th.th_info.ds.ds_gtid = 0;
860     return;
861   }
862   KA_TRACE(10, ("__kmp_create_monitor: try to create monitor\n"));
863 
864   KMP_MB(); /* Flush all pending memory write invalidates.  */
865 
866   th->th.th_info.ds.ds_tid = KMP_GTID_MONITOR;
867   th->th.th_info.ds.ds_gtid = KMP_GTID_MONITOR;
868 #if KMP_REAL_TIME_FIX
869   TCW_4(__kmp_global.g.g_time.dt.t_value,
870         -1); // Will use it for synchronization a bit later.
871 #else
872   TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
873 #endif // KMP_REAL_TIME_FIX
874 
875 #ifdef KMP_THREAD_ATTR
876   if (__kmp_monitor_stksize == 0) {
877     __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
878     auto_adj_size = TRUE;
879   }
880   status = pthread_attr_init(&thread_attr);
881   if (status != 0) {
882     __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
883   }
884   status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
885   if (status != 0) {
886     __kmp_fatal(KMP_MSG(CantSetMonitorState), KMP_ERR(status), __kmp_msg_null);
887   }
888 
889 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
890   status = pthread_attr_getstacksize(&thread_attr, &size);
891   KMP_CHECK_SYSFAIL("pthread_attr_getstacksize", status);
892 #else
893   size = __kmp_sys_min_stksize;
894 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
895 #endif /* KMP_THREAD_ATTR */
896 
897   if (__kmp_monitor_stksize == 0) {
898     __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
899   }
900   if (__kmp_monitor_stksize < __kmp_sys_min_stksize) {
901     __kmp_monitor_stksize = __kmp_sys_min_stksize;
902   }
903 
904   KA_TRACE(10, ("__kmp_create_monitor: default stacksize = %lu bytes,"
905                 "requested stacksize = %lu bytes\n",
906                 size, __kmp_monitor_stksize));
907 
908 retry:
909 
910 /* Set stack size for this thread now. */
911 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
912   KA_TRACE(10, ("__kmp_create_monitor: setting stacksize = %lu bytes,",
913                 __kmp_monitor_stksize));
914   status = pthread_attr_setstacksize(&thread_attr, __kmp_monitor_stksize);
915   if (status != 0) {
916     if (auto_adj_size) {
917       __kmp_monitor_stksize *= 2;
918       goto retry;
919     }
920     kmp_msg_t err_code = KMP_ERR(status);
921     __kmp_msg(kmp_ms_warning, // should this be fatal?  BB
922               KMP_MSG(CantSetMonitorStackSize, (long int)__kmp_monitor_stksize),
923               err_code, KMP_HNT(ChangeMonitorStackSize), __kmp_msg_null);
924     if (__kmp_generate_warnings == kmp_warnings_off) {
925       __kmp_str_free(&err_code.str);
926     }
927   }
928 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
929 
930   status =
931       pthread_create(&handle, &thread_attr, __kmp_launch_monitor, (void *)th);
932 
933   if (status != 0) {
934 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
935     if (status == EINVAL) {
936       if (auto_adj_size && (__kmp_monitor_stksize < (size_t)0x40000000)) {
937         __kmp_monitor_stksize *= 2;
938         goto retry;
939       }
940       __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
941                   KMP_ERR(status), KMP_HNT(IncreaseMonitorStackSize),
942                   __kmp_msg_null);
943     }
944     if (status == ENOMEM) {
945       __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
946                   KMP_ERR(status), KMP_HNT(DecreaseMonitorStackSize),
947                   __kmp_msg_null);
948     }
949 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
950     if (status == EAGAIN) {
951       __kmp_fatal(KMP_MSG(NoResourcesForMonitorThread), KMP_ERR(status),
952                   KMP_HNT(DecreaseNumberOfThreadsInUse), __kmp_msg_null);
953     }
954     KMP_SYSFAIL("pthread_create", status);
955   }
956 
957   th->th.th_info.ds.ds_thread = handle;
958 
959 #if KMP_REAL_TIME_FIX
960   // Wait for the monitor thread is really started and set its *priority*.
961   KMP_DEBUG_ASSERT(sizeof(kmp_uint32) ==
962                    sizeof(__kmp_global.g.g_time.dt.t_value));
963   __kmp_wait_4((kmp_uint32 volatile *)&__kmp_global.g.g_time.dt.t_value, -1,
964                &__kmp_neq_4, NULL);
965 #endif // KMP_REAL_TIME_FIX
966 
967 #ifdef KMP_THREAD_ATTR
968   status = pthread_attr_destroy(&thread_attr);
969   if (status != 0) {
970     kmp_msg_t err_code = KMP_ERR(status);
971     __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
972               __kmp_msg_null);
973     if (__kmp_generate_warnings == kmp_warnings_off) {
974       __kmp_str_free(&err_code.str);
975     }
976   }
977 #endif
978 
979   KMP_MB(); /* Flush all pending memory write invalidates.  */
980 
981   KA_TRACE(10, ("__kmp_create_monitor: monitor created %#.8lx\n",
982                 th->th.th_info.ds.ds_thread));
983 
984 } // __kmp_create_monitor
985 #endif // KMP_USE_MONITOR
986 
987 void __kmp_exit_thread(int exit_status) {
988 #if KMP_OS_WASI
989 // TODO: the wasm32-wasi-threads target does not yet support pthread_exit.
990 #else
991   pthread_exit((void *)(intptr_t)exit_status);
992 #endif
993 } // __kmp_exit_thread
994 
995 #if KMP_USE_MONITOR
996 void __kmp_resume_monitor();
997 
998 extern "C" void __kmp_reap_monitor(kmp_info_t *th) {
999   int status;
1000   void *exit_val;
1001 
1002   KA_TRACE(10, ("__kmp_reap_monitor: try to reap monitor thread with handle"
1003                 " %#.8lx\n",
1004                 th->th.th_info.ds.ds_thread));
1005 
1006   // If monitor has been created, its tid and gtid should be KMP_GTID_MONITOR.
1007   // If both tid and gtid are 0, it means the monitor did not ever start.
1008   // If both tid and gtid are KMP_GTID_DNE, the monitor has been shut down.
1009   KMP_DEBUG_ASSERT(th->th.th_info.ds.ds_tid == th->th.th_info.ds.ds_gtid);
1010   if (th->th.th_info.ds.ds_gtid != KMP_GTID_MONITOR) {
1011     KA_TRACE(10, ("__kmp_reap_monitor: monitor did not start, returning\n"));
1012     return;
1013   }
1014 
1015   KMP_MB(); /* Flush all pending memory write invalidates.  */
1016 
1017   /* First, check to see whether the monitor thread exists to wake it up. This
1018      is to avoid performance problem when the monitor sleeps during
1019      blocktime-size interval */
1020 
1021   status = pthread_kill(th->th.th_info.ds.ds_thread, 0);
1022   if (status != ESRCH) {
1023     __kmp_resume_monitor(); // Wake up the monitor thread
1024   }
1025   KA_TRACE(10, ("__kmp_reap_monitor: try to join with monitor\n"));
1026   status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1027   if (exit_val != th) {
1028     __kmp_fatal(KMP_MSG(ReapMonitorError), KMP_ERR(status), __kmp_msg_null);
1029   }
1030 
1031   th->th.th_info.ds.ds_tid = KMP_GTID_DNE;
1032   th->th.th_info.ds.ds_gtid = KMP_GTID_DNE;
1033 
1034   KA_TRACE(10, ("__kmp_reap_monitor: done reaping monitor thread with handle"
1035                 " %#.8lx\n",
1036                 th->th.th_info.ds.ds_thread));
1037 
1038   KMP_MB(); /* Flush all pending memory write invalidates.  */
1039 }
1040 #else
1041 // Empty symbol to export (see exports_so.txt) when
1042 // monitor thread feature is disabled
1043 extern "C" void __kmp_reap_monitor(kmp_info_t *th) {
1044   (void)th;
1045 }
1046 #endif // KMP_USE_MONITOR
1047 
1048 void __kmp_reap_worker(kmp_info_t *th) {
1049   int status;
1050   void *exit_val;
1051 
1052   KMP_MB(); /* Flush all pending memory write invalidates.  */
1053 
1054   KA_TRACE(
1055       10, ("__kmp_reap_worker: try to reap T#%d\n", th->th.th_info.ds.ds_gtid));
1056 
1057   status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1058 #ifdef KMP_DEBUG
1059   /* Don't expose these to the user until we understand when they trigger */
1060   if (status != 0) {
1061     __kmp_fatal(KMP_MSG(ReapWorkerError), KMP_ERR(status), __kmp_msg_null);
1062   }
1063   if (exit_val != th) {
1064     KA_TRACE(10, ("__kmp_reap_worker: worker T#%d did not reap properly, "
1065                   "exit_val = %p\n",
1066                   th->th.th_info.ds.ds_gtid, exit_val));
1067   }
1068 #else
1069   (void)status; // unused variable
1070 #endif /* KMP_DEBUG */
1071 
1072   KA_TRACE(10, ("__kmp_reap_worker: done reaping T#%d\n",
1073                 th->th.th_info.ds.ds_gtid));
1074 
1075   KMP_MB(); /* Flush all pending memory write invalidates.  */
1076 }
1077 
1078 #if KMP_HANDLE_SIGNALS
1079 
1080 static void __kmp_null_handler(int signo) {
1081   //  Do nothing, for doing SIG_IGN-type actions.
1082 } // __kmp_null_handler
1083 
1084 static void __kmp_team_handler(int signo) {
1085   if (__kmp_global.g.g_abort == 0) {
1086 /* Stage 1 signal handler, let's shut down all of the threads */
1087 #ifdef KMP_DEBUG
1088     __kmp_debug_printf("__kmp_team_handler: caught signal = %d\n", signo);
1089 #endif
1090     switch (signo) {
1091     case SIGHUP:
1092     case SIGINT:
1093     case SIGQUIT:
1094     case SIGILL:
1095     case SIGABRT:
1096     case SIGFPE:
1097     case SIGBUS:
1098     case SIGSEGV:
1099 #ifdef SIGSYS
1100     case SIGSYS:
1101 #endif
1102     case SIGTERM:
1103       if (__kmp_debug_buf) {
1104         __kmp_dump_debug_buffer();
1105       }
1106       __kmp_unregister_library(); // cleanup shared memory
1107       KMP_MB(); // Flush all pending memory write invalidates.
1108       TCW_4(__kmp_global.g.g_abort, signo);
1109       KMP_MB(); // Flush all pending memory write invalidates.
1110       TCW_4(__kmp_global.g.g_done, TRUE);
1111       KMP_MB(); // Flush all pending memory write invalidates.
1112       break;
1113     default:
1114 #ifdef KMP_DEBUG
1115       __kmp_debug_printf("__kmp_team_handler: unknown signal type");
1116 #endif
1117       break;
1118     }
1119   }
1120 } // __kmp_team_handler
1121 
1122 static void __kmp_sigaction(int signum, const struct sigaction *act,
1123                             struct sigaction *oldact) {
1124   int rc = sigaction(signum, act, oldact);
1125   KMP_CHECK_SYSFAIL_ERRNO("sigaction", rc);
1126 }
1127 
1128 static void __kmp_install_one_handler(int sig, sig_func_t handler_func,
1129                                       int parallel_init) {
1130   KMP_MB(); // Flush all pending memory write invalidates.
1131   KB_TRACE(60,
1132            ("__kmp_install_one_handler( %d, ..., %d )\n", sig, parallel_init));
1133   if (parallel_init) {
1134     struct sigaction new_action;
1135     struct sigaction old_action;
1136     new_action.sa_handler = handler_func;
1137     new_action.sa_flags = 0;
1138     sigfillset(&new_action.sa_mask);
1139     __kmp_sigaction(sig, &new_action, &old_action);
1140     if (old_action.sa_handler == __kmp_sighldrs[sig].sa_handler) {
1141       sigaddset(&__kmp_sigset, sig);
1142     } else {
1143       // Restore/keep user's handler if one previously installed.
1144       __kmp_sigaction(sig, &old_action, NULL);
1145     }
1146   } else {
1147     // Save initial/system signal handlers to see if user handlers installed.
1148     __kmp_sigaction(sig, NULL, &__kmp_sighldrs[sig]);
1149   }
1150   KMP_MB(); // Flush all pending memory write invalidates.
1151 } // __kmp_install_one_handler
1152 
1153 static void __kmp_remove_one_handler(int sig) {
1154   KB_TRACE(60, ("__kmp_remove_one_handler( %d )\n", sig));
1155   if (sigismember(&__kmp_sigset, sig)) {
1156     struct sigaction old;
1157     KMP_MB(); // Flush all pending memory write invalidates.
1158     __kmp_sigaction(sig, &__kmp_sighldrs[sig], &old);
1159     if ((old.sa_handler != __kmp_team_handler) &&
1160         (old.sa_handler != __kmp_null_handler)) {
1161       // Restore the users signal handler.
1162       KB_TRACE(10, ("__kmp_remove_one_handler: oops, not our handler, "
1163                     "restoring: sig=%d\n",
1164                     sig));
1165       __kmp_sigaction(sig, &old, NULL);
1166     }
1167     sigdelset(&__kmp_sigset, sig);
1168     KMP_MB(); // Flush all pending memory write invalidates.
1169   }
1170 } // __kmp_remove_one_handler
1171 
1172 void __kmp_install_signals(int parallel_init) {
1173   KB_TRACE(10, ("__kmp_install_signals( %d )\n", parallel_init));
1174   if (__kmp_handle_signals || !parallel_init) {
1175     // If ! parallel_init, we do not install handlers, just save original
1176     // handlers. Let us do it even __handle_signals is 0.
1177     sigemptyset(&__kmp_sigset);
1178     __kmp_install_one_handler(SIGHUP, __kmp_team_handler, parallel_init);
1179     __kmp_install_one_handler(SIGINT, __kmp_team_handler, parallel_init);
1180     __kmp_install_one_handler(SIGQUIT, __kmp_team_handler, parallel_init);
1181     __kmp_install_one_handler(SIGILL, __kmp_team_handler, parallel_init);
1182     __kmp_install_one_handler(SIGABRT, __kmp_team_handler, parallel_init);
1183     __kmp_install_one_handler(SIGFPE, __kmp_team_handler, parallel_init);
1184     __kmp_install_one_handler(SIGBUS, __kmp_team_handler, parallel_init);
1185     __kmp_install_one_handler(SIGSEGV, __kmp_team_handler, parallel_init);
1186 #ifdef SIGSYS
1187     __kmp_install_one_handler(SIGSYS, __kmp_team_handler, parallel_init);
1188 #endif // SIGSYS
1189     __kmp_install_one_handler(SIGTERM, __kmp_team_handler, parallel_init);
1190 #ifdef SIGPIPE
1191     __kmp_install_one_handler(SIGPIPE, __kmp_team_handler, parallel_init);
1192 #endif // SIGPIPE
1193   }
1194 } // __kmp_install_signals
1195 
1196 void __kmp_remove_signals(void) {
1197   int sig;
1198   KB_TRACE(10, ("__kmp_remove_signals()\n"));
1199   for (sig = 1; sig < NSIG; ++sig) {
1200     __kmp_remove_one_handler(sig);
1201   }
1202 } // __kmp_remove_signals
1203 
1204 #endif // KMP_HANDLE_SIGNALS
1205 
1206 void __kmp_enable(int new_state) {
1207 #ifdef KMP_CANCEL_THREADS
1208   int status, old_state;
1209   status = pthread_setcancelstate(new_state, &old_state);
1210   KMP_CHECK_SYSFAIL("pthread_setcancelstate", status);
1211   KMP_DEBUG_ASSERT(old_state == PTHREAD_CANCEL_DISABLE);
1212 #endif
1213 }
1214 
1215 void __kmp_disable(int *old_state) {
1216 #ifdef KMP_CANCEL_THREADS
1217   int status;
1218   status = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, old_state);
1219   KMP_CHECK_SYSFAIL("pthread_setcancelstate", status);
1220 #endif
1221 }
1222 
1223 static void __kmp_atfork_prepare(void) {
1224   __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
1225   __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
1226 }
1227 
1228 static void __kmp_atfork_parent(void) {
1229   __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1230   __kmp_release_bootstrap_lock(&__kmp_initz_lock);
1231 }
1232 
1233 /* Reset the library so execution in the child starts "all over again" with
1234    clean data structures in initial states.  Don't worry about freeing memory
1235    allocated by parent, just abandon it to be safe. */
1236 static void __kmp_atfork_child(void) {
1237   __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1238   __kmp_release_bootstrap_lock(&__kmp_initz_lock);
1239   /* TODO make sure this is done right for nested/sibling */
1240   // ATT:  Memory leaks are here? TODO: Check it and fix.
1241   /* KMP_ASSERT( 0 ); */
1242 
1243   ++__kmp_fork_count;
1244 
1245 #if KMP_AFFINITY_SUPPORTED
1246 #if KMP_OS_LINUX || KMP_OS_FREEBSD
1247   // reset the affinity in the child to the initial thread
1248   // affinity in the parent
1249   kmp_set_thread_affinity_mask_initial();
1250 #endif
1251   // Set default not to bind threads tightly in the child (we're expecting
1252   // over-subscription after the fork and this can improve things for
1253   // scripting languages that use OpenMP inside process-parallel code).
1254   if (__kmp_nested_proc_bind.bind_types != NULL) {
1255     __kmp_nested_proc_bind.bind_types[0] = proc_bind_false;
1256   }
1257   for (kmp_affinity_t *affinity : __kmp_affinities)
1258     *affinity = KMP_AFFINITY_INIT(affinity->env_var);
1259   __kmp_affin_fullMask = nullptr;
1260   __kmp_affin_origMask = nullptr;
1261   __kmp_topology = nullptr;
1262 #endif // KMP_AFFINITY_SUPPORTED
1263 
1264 #if KMP_USE_MONITOR
1265   __kmp_init_monitor = 0;
1266 #endif
1267   __kmp_init_parallel = FALSE;
1268   __kmp_init_middle = FALSE;
1269   __kmp_init_serial = FALSE;
1270   TCW_4(__kmp_init_gtid, FALSE);
1271   __kmp_init_common = FALSE;
1272 
1273   TCW_4(__kmp_init_user_locks, FALSE);
1274 #if !KMP_USE_DYNAMIC_LOCK
1275   __kmp_user_lock_table.used = 1;
1276   __kmp_user_lock_table.allocated = 0;
1277   __kmp_user_lock_table.table = NULL;
1278   __kmp_lock_blocks = NULL;
1279 #endif
1280 
1281   __kmp_all_nth = 0;
1282   TCW_4(__kmp_nth, 0);
1283 
1284   __kmp_thread_pool = NULL;
1285   __kmp_thread_pool_insert_pt = NULL;
1286   __kmp_team_pool = NULL;
1287 
1288   /* Must actually zero all the *cache arguments passed to __kmpc_threadprivate
1289      here so threadprivate doesn't use stale data */
1290   KA_TRACE(10, ("__kmp_atfork_child: checking cache address list %p\n",
1291                 __kmp_threadpriv_cache_list));
1292 
1293   while (__kmp_threadpriv_cache_list != NULL) {
1294 
1295     if (*__kmp_threadpriv_cache_list->addr != NULL) {
1296       KC_TRACE(50, ("__kmp_atfork_child: zeroing cache at address %p\n",
1297                     &(*__kmp_threadpriv_cache_list->addr)));
1298 
1299       *__kmp_threadpriv_cache_list->addr = NULL;
1300     }
1301     __kmp_threadpriv_cache_list = __kmp_threadpriv_cache_list->next;
1302   }
1303 
1304   __kmp_init_runtime = FALSE;
1305 
1306   /* reset statically initialized locks */
1307   __kmp_init_bootstrap_lock(&__kmp_initz_lock);
1308   __kmp_init_bootstrap_lock(&__kmp_stdio_lock);
1309   __kmp_init_bootstrap_lock(&__kmp_console_lock);
1310   __kmp_init_bootstrap_lock(&__kmp_task_team_lock);
1311 
1312 #if USE_ITT_BUILD
1313   __kmp_itt_reset(); // reset ITT's global state
1314 #endif /* USE_ITT_BUILD */
1315 
1316   {
1317     // Child process often get terminated without any use of OpenMP. That might
1318     // cause mapped shared memory file to be left unattended. Thus we postpone
1319     // library registration till middle initialization in the child process.
1320     __kmp_need_register_serial = FALSE;
1321     __kmp_serial_initialize();
1322   }
1323 
1324   /* This is necessary to make sure no stale data is left around */
1325   /* AC: customers complain that we use unsafe routines in the atfork
1326      handler. Mathworks: dlsym() is unsafe. We call dlsym and dlopen
1327      in dynamic_link when check the presence of shared tbbmalloc library.
1328      Suggestion is to make the library initialization lazier, similar
1329      to what done for __kmpc_begin(). */
1330   // TODO: synchronize all static initializations with regular library
1331   //       startup; look at kmp_global.cpp and etc.
1332   //__kmp_internal_begin ();
1333 }
1334 
1335 void __kmp_register_atfork(void) {
1336   if (__kmp_need_register_atfork) {
1337 #if !KMP_OS_WASI
1338     int status = pthread_atfork(__kmp_atfork_prepare, __kmp_atfork_parent,
1339                                 __kmp_atfork_child);
1340     KMP_CHECK_SYSFAIL("pthread_atfork", status);
1341 #endif
1342     __kmp_need_register_atfork = FALSE;
1343   }
1344 }
1345 
1346 void __kmp_suspend_initialize(void) {
1347   int status;
1348   status = pthread_mutexattr_init(&__kmp_suspend_mutex_attr);
1349   KMP_CHECK_SYSFAIL("pthread_mutexattr_init", status);
1350   status = pthread_condattr_init(&__kmp_suspend_cond_attr);
1351   KMP_CHECK_SYSFAIL("pthread_condattr_init", status);
1352 }
1353 
1354 void __kmp_suspend_initialize_thread(kmp_info_t *th) {
1355   int old_value = KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count);
1356   int new_value = __kmp_fork_count + 1;
1357   // Return if already initialized
1358   if (old_value == new_value)
1359     return;
1360   // Wait, then return if being initialized
1361   if (old_value == -1 || !__kmp_atomic_compare_store(
1362                              &th->th.th_suspend_init_count, old_value, -1)) {
1363     while (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) != new_value) {
1364       KMP_CPU_PAUSE();
1365     }
1366   } else {
1367     // Claim to be the initializer and do initializations
1368     int status;
1369     status = pthread_cond_init(&th->th.th_suspend_cv.c_cond,
1370                                &__kmp_suspend_cond_attr);
1371     KMP_CHECK_SYSFAIL("pthread_cond_init", status);
1372     status = pthread_mutex_init(&th->th.th_suspend_mx.m_mutex,
1373                                 &__kmp_suspend_mutex_attr);
1374     KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
1375     KMP_ATOMIC_ST_REL(&th->th.th_suspend_init_count, new_value);
1376   }
1377 }
1378 
1379 void __kmp_suspend_uninitialize_thread(kmp_info_t *th) {
1380   if (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) > __kmp_fork_count) {
1381     /* this means we have initialize the suspension pthread objects for this
1382        thread in this instance of the process */
1383     int status;
1384 
1385     status = pthread_cond_destroy(&th->th.th_suspend_cv.c_cond);
1386     if (status != 0 && status != EBUSY) {
1387       KMP_SYSFAIL("pthread_cond_destroy", status);
1388     }
1389     status = pthread_mutex_destroy(&th->th.th_suspend_mx.m_mutex);
1390     if (status != 0 && status != EBUSY) {
1391       KMP_SYSFAIL("pthread_mutex_destroy", status);
1392     }
1393     --th->th.th_suspend_init_count;
1394     KMP_DEBUG_ASSERT(KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count) ==
1395                      __kmp_fork_count);
1396   }
1397 }
1398 
1399 // return true if lock obtained, false otherwise
1400 int __kmp_try_suspend_mx(kmp_info_t *th) {
1401   return (pthread_mutex_trylock(&th->th.th_suspend_mx.m_mutex) == 0);
1402 }
1403 
1404 void __kmp_lock_suspend_mx(kmp_info_t *th) {
1405   int status = pthread_mutex_lock(&th->th.th_suspend_mx.m_mutex);
1406   KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
1407 }
1408 
1409 void __kmp_unlock_suspend_mx(kmp_info_t *th) {
1410   int status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
1411   KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
1412 }
1413 
1414 /* This routine puts the calling thread to sleep after setting the
1415    sleep bit for the indicated flag variable to true. */
1416 template <class C>
1417 static inline void __kmp_suspend_template(int th_gtid, C *flag) {
1418   KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_suspend);
1419   kmp_info_t *th = __kmp_threads[th_gtid];
1420   int status;
1421   typename C::flag_t old_spin;
1422 
1423   KF_TRACE(30, ("__kmp_suspend_template: T#%d enter for flag = %p\n", th_gtid,
1424                 flag->get()));
1425 
1426   __kmp_suspend_initialize_thread(th);
1427 
1428   __kmp_lock_suspend_mx(th);
1429 
1430   KF_TRACE(10, ("__kmp_suspend_template: T#%d setting sleep bit for spin(%p)\n",
1431                 th_gtid, flag->get()));
1432 
1433   /* TODO: shouldn't this use release semantics to ensure that
1434      __kmp_suspend_initialize_thread gets called first? */
1435   old_spin = flag->set_sleeping();
1436   TCW_PTR(th->th.th_sleep_loc, (void *)flag);
1437   th->th.th_sleep_loc_type = flag->get_type();
1438   if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME &&
1439       __kmp_pause_status != kmp_soft_paused) {
1440     flag->unset_sleeping();
1441     TCW_PTR(th->th.th_sleep_loc, NULL);
1442     th->th.th_sleep_loc_type = flag_unset;
1443     __kmp_unlock_suspend_mx(th);
1444     return;
1445   }
1446   KF_TRACE(5, ("__kmp_suspend_template: T#%d set sleep bit for spin(%p)==%x,"
1447                " was %x\n",
1448                th_gtid, flag->get(), flag->load(), old_spin));
1449 
1450   if (flag->done_check_val(old_spin) || flag->done_check()) {
1451     flag->unset_sleeping();
1452     TCW_PTR(th->th.th_sleep_loc, NULL);
1453     th->th.th_sleep_loc_type = flag_unset;
1454     KF_TRACE(5, ("__kmp_suspend_template: T#%d false alarm, reset sleep bit "
1455                  "for spin(%p)\n",
1456                  th_gtid, flag->get()));
1457   } else {
1458     /* Encapsulate in a loop as the documentation states that this may
1459        "with low probability" return when the condition variable has
1460        not been signaled or broadcast */
1461     int deactivated = FALSE;
1462 
1463     while (flag->is_sleeping()) {
1464 #ifdef DEBUG_SUSPEND
1465       char buffer[128];
1466       __kmp_suspend_count++;
1467       __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1468       __kmp_printf("__kmp_suspend_template: suspending T#%d: %s\n", th_gtid,
1469                    buffer);
1470 #endif
1471       // Mark the thread as no longer active (only in the first iteration of the
1472       // loop).
1473       if (!deactivated) {
1474         th->th.th_active = FALSE;
1475         if (th->th.th_active_in_pool) {
1476           th->th.th_active_in_pool = FALSE;
1477           KMP_ATOMIC_DEC(&__kmp_thread_pool_active_nth);
1478           KMP_DEBUG_ASSERT(TCR_4(__kmp_thread_pool_active_nth) >= 0);
1479         }
1480         deactivated = TRUE;
1481       }
1482 
1483       KMP_DEBUG_ASSERT(th->th.th_sleep_loc);
1484       KMP_DEBUG_ASSERT(flag->get_type() == th->th.th_sleep_loc_type);
1485 
1486 #if USE_SUSPEND_TIMEOUT
1487       struct timespec now;
1488       struct timeval tval;
1489       int msecs;
1490 
1491       status = gettimeofday(&tval, NULL);
1492       KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
1493       TIMEVAL_TO_TIMESPEC(&tval, &now);
1494 
1495       msecs = (4 * __kmp_dflt_blocktime) + 200;
1496       now.tv_sec += msecs / 1000;
1497       now.tv_nsec += (msecs % 1000) * 1000;
1498 
1499       KF_TRACE(15, ("__kmp_suspend_template: T#%d about to perform "
1500                     "pthread_cond_timedwait\n",
1501                     th_gtid));
1502       status = pthread_cond_timedwait(&th->th.th_suspend_cv.c_cond,
1503                                       &th->th.th_suspend_mx.m_mutex, &now);
1504 #else
1505       KF_TRACE(15, ("__kmp_suspend_template: T#%d about to perform"
1506                     " pthread_cond_wait\n",
1507                     th_gtid));
1508       status = pthread_cond_wait(&th->th.th_suspend_cv.c_cond,
1509                                  &th->th.th_suspend_mx.m_mutex);
1510 #endif // USE_SUSPEND_TIMEOUT
1511 
1512       if ((status != 0) && (status != EINTR) && (status != ETIMEDOUT)) {
1513         KMP_SYSFAIL("pthread_cond_wait", status);
1514       }
1515 
1516       KMP_DEBUG_ASSERT(flag->get_type() == flag->get_ptr_type());
1517 
1518       if (!flag->is_sleeping() &&
1519           ((status == EINTR) || (status == ETIMEDOUT))) {
1520         // if interrupt or timeout, and thread is no longer sleeping, we need to
1521         // make sure sleep_loc gets reset; however, this shouldn't be needed if
1522         // we woke up with resume
1523         flag->unset_sleeping();
1524         TCW_PTR(th->th.th_sleep_loc, NULL);
1525         th->th.th_sleep_loc_type = flag_unset;
1526       }
1527 #ifdef KMP_DEBUG
1528       if (status == ETIMEDOUT) {
1529         if (flag->is_sleeping()) {
1530           KF_TRACE(100,
1531                    ("__kmp_suspend_template: T#%d timeout wakeup\n", th_gtid));
1532         } else {
1533           KF_TRACE(2, ("__kmp_suspend_template: T#%d timeout wakeup, sleep bit "
1534                        "not set!\n",
1535                        th_gtid));
1536           TCW_PTR(th->th.th_sleep_loc, NULL);
1537           th->th.th_sleep_loc_type = flag_unset;
1538         }
1539       } else if (flag->is_sleeping()) {
1540         KF_TRACE(100,
1541                  ("__kmp_suspend_template: T#%d spurious wakeup\n", th_gtid));
1542       }
1543 #endif
1544     } // while
1545 
1546     // Mark the thread as active again (if it was previous marked as inactive)
1547     if (deactivated) {
1548       th->th.th_active = TRUE;
1549       if (TCR_4(th->th.th_in_pool)) {
1550         KMP_ATOMIC_INC(&__kmp_thread_pool_active_nth);
1551         th->th.th_active_in_pool = TRUE;
1552       }
1553     }
1554   }
1555   // We may have had the loop variable set before entering the loop body;
1556   // so we need to reset sleep_loc.
1557   TCW_PTR(th->th.th_sleep_loc, NULL);
1558   th->th.th_sleep_loc_type = flag_unset;
1559 
1560   KMP_DEBUG_ASSERT(!flag->is_sleeping());
1561   KMP_DEBUG_ASSERT(!th->th.th_sleep_loc);
1562 #ifdef DEBUG_SUSPEND
1563   {
1564     char buffer[128];
1565     __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1566     __kmp_printf("__kmp_suspend_template: T#%d has awakened: %s\n", th_gtid,
1567                  buffer);
1568   }
1569 #endif
1570 
1571   __kmp_unlock_suspend_mx(th);
1572   KF_TRACE(30, ("__kmp_suspend_template: T#%d exit\n", th_gtid));
1573 }
1574 
1575 template <bool C, bool S>
1576 void __kmp_suspend_32(int th_gtid, kmp_flag_32<C, S> *flag) {
1577   __kmp_suspend_template(th_gtid, flag);
1578 }
1579 template <bool C, bool S>
1580 void __kmp_suspend_64(int th_gtid, kmp_flag_64<C, S> *flag) {
1581   __kmp_suspend_template(th_gtid, flag);
1582 }
1583 template <bool C, bool S>
1584 void __kmp_atomic_suspend_64(int th_gtid, kmp_atomic_flag_64<C, S> *flag) {
1585   __kmp_suspend_template(th_gtid, flag);
1586 }
1587 void __kmp_suspend_oncore(int th_gtid, kmp_flag_oncore *flag) {
1588   __kmp_suspend_template(th_gtid, flag);
1589 }
1590 
1591 template void __kmp_suspend_32<false, false>(int, kmp_flag_32<false, false> *);
1592 template void __kmp_suspend_64<false, true>(int, kmp_flag_64<false, true> *);
1593 template void __kmp_suspend_64<true, false>(int, kmp_flag_64<true, false> *);
1594 template void
1595 __kmp_atomic_suspend_64<false, true>(int, kmp_atomic_flag_64<false, true> *);
1596 template void
1597 __kmp_atomic_suspend_64<true, false>(int, kmp_atomic_flag_64<true, false> *);
1598 
1599 /* This routine signals the thread specified by target_gtid to wake up
1600    after setting the sleep bit indicated by the flag argument to FALSE.
1601    The target thread must already have called __kmp_suspend_template() */
1602 template <class C>
1603 static inline void __kmp_resume_template(int target_gtid, C *flag) {
1604   KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1605   kmp_info_t *th = __kmp_threads[target_gtid];
1606   int status;
1607 
1608 #ifdef KMP_DEBUG
1609   int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1610 #endif
1611 
1612   KF_TRACE(30, ("__kmp_resume_template: T#%d wants to wakeup T#%d enter\n",
1613                 gtid, target_gtid));
1614   KMP_DEBUG_ASSERT(gtid != target_gtid);
1615 
1616   __kmp_suspend_initialize_thread(th);
1617 
1618   __kmp_lock_suspend_mx(th);
1619 
1620   if (!flag || flag != th->th.th_sleep_loc) {
1621     // coming from __kmp_null_resume_wrapper, or thread is now sleeping on a
1622     // different location; wake up at new location
1623     flag = (C *)CCAST(void *, th->th.th_sleep_loc);
1624   }
1625 
1626   // First, check if the flag is null or its type has changed. If so, someone
1627   // else woke it up.
1628   if (!flag) { // Thread doesn't appear to be sleeping on anything
1629     KF_TRACE(5, ("__kmp_resume_template: T#%d exiting, thread T#%d already "
1630                  "awake: flag(%p)\n",
1631                  gtid, target_gtid, (void *)NULL));
1632     __kmp_unlock_suspend_mx(th);
1633     return;
1634   } else if (flag->get_type() != th->th.th_sleep_loc_type) {
1635     // Flag type does not appear to match this function template; possibly the
1636     // thread is sleeping on something else. Try null resume again.
1637     KF_TRACE(
1638         5,
1639         ("__kmp_resume_template: T#%d retrying, thread T#%d Mismatch flag(%p), "
1640          "spin(%p) type=%d ptr_type=%d\n",
1641          gtid, target_gtid, flag, flag->get(), flag->get_type(),
1642          th->th.th_sleep_loc_type));
1643     __kmp_unlock_suspend_mx(th);
1644     __kmp_null_resume_wrapper(th);
1645     return;
1646   } else { // if multiple threads are sleeping, flag should be internally
1647     // referring to a specific thread here
1648     if (!flag->is_sleeping()) {
1649       KF_TRACE(5, ("__kmp_resume_template: T#%d exiting, thread T#%d already "
1650                    "awake: flag(%p): %u\n",
1651                    gtid, target_gtid, flag->get(), (unsigned int)flag->load()));
1652       __kmp_unlock_suspend_mx(th);
1653       return;
1654     }
1655   }
1656   KMP_DEBUG_ASSERT(flag);
1657   flag->unset_sleeping();
1658   TCW_PTR(th->th.th_sleep_loc, NULL);
1659   th->th.th_sleep_loc_type = flag_unset;
1660 
1661   KF_TRACE(5, ("__kmp_resume_template: T#%d about to wakeup T#%d, reset "
1662                "sleep bit for flag's loc(%p): %u\n",
1663                gtid, target_gtid, flag->get(), (unsigned int)flag->load()));
1664 
1665 #ifdef DEBUG_SUSPEND
1666   {
1667     char buffer[128];
1668     __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1669     __kmp_printf("__kmp_resume_template: T#%d resuming T#%d: %s\n", gtid,
1670                  target_gtid, buffer);
1671   }
1672 #endif
1673   status = pthread_cond_signal(&th->th.th_suspend_cv.c_cond);
1674   KMP_CHECK_SYSFAIL("pthread_cond_signal", status);
1675   __kmp_unlock_suspend_mx(th);
1676   KF_TRACE(30, ("__kmp_resume_template: T#%d exiting after signaling wake up"
1677                 " for T#%d\n",
1678                 gtid, target_gtid));
1679 }
1680 
1681 template <bool C, bool S>
1682 void __kmp_resume_32(int target_gtid, kmp_flag_32<C, S> *flag) {
1683   __kmp_resume_template(target_gtid, flag);
1684 }
1685 template <bool C, bool S>
1686 void __kmp_resume_64(int target_gtid, kmp_flag_64<C, S> *flag) {
1687   __kmp_resume_template(target_gtid, flag);
1688 }
1689 template <bool C, bool S>
1690 void __kmp_atomic_resume_64(int target_gtid, kmp_atomic_flag_64<C, S> *flag) {
1691   __kmp_resume_template(target_gtid, flag);
1692 }
1693 void __kmp_resume_oncore(int target_gtid, kmp_flag_oncore *flag) {
1694   __kmp_resume_template(target_gtid, flag);
1695 }
1696 
1697 template void __kmp_resume_32<false, true>(int, kmp_flag_32<false, true> *);
1698 template void __kmp_resume_32<false, false>(int, kmp_flag_32<false, false> *);
1699 template void __kmp_resume_64<false, true>(int, kmp_flag_64<false, true> *);
1700 template void
1701 __kmp_atomic_resume_64<false, true>(int, kmp_atomic_flag_64<false, true> *);
1702 
1703 #if KMP_USE_MONITOR
1704 void __kmp_resume_monitor() {
1705   KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1706   int status;
1707 #ifdef KMP_DEBUG
1708   int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1709   KF_TRACE(30, ("__kmp_resume_monitor: T#%d wants to wakeup T#%d enter\n", gtid,
1710                 KMP_GTID_MONITOR));
1711   KMP_DEBUG_ASSERT(gtid != KMP_GTID_MONITOR);
1712 #endif
1713   status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
1714   KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
1715 #ifdef DEBUG_SUSPEND
1716   {
1717     char buffer[128];
1718     __kmp_print_cond(buffer, &__kmp_wait_cv.c_cond);
1719     __kmp_printf("__kmp_resume_monitor: T#%d resuming T#%d: %s\n", gtid,
1720                  KMP_GTID_MONITOR, buffer);
1721   }
1722 #endif
1723   status = pthread_cond_signal(&__kmp_wait_cv.c_cond);
1724   KMP_CHECK_SYSFAIL("pthread_cond_signal", status);
1725   status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
1726   KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
1727   KF_TRACE(30, ("__kmp_resume_monitor: T#%d exiting after signaling wake up"
1728                 " for T#%d\n",
1729                 gtid, KMP_GTID_MONITOR));
1730 }
1731 #endif // KMP_USE_MONITOR
1732 
1733 void __kmp_yield() { sched_yield(); }
1734 
1735 void __kmp_gtid_set_specific(int gtid) {
1736   if (__kmp_init_gtid) {
1737     int status;
1738     status = pthread_setspecific(__kmp_gtid_threadprivate_key,
1739                                  (void *)(intptr_t)(gtid + 1));
1740     KMP_CHECK_SYSFAIL("pthread_setspecific", status);
1741   } else {
1742     KA_TRACE(50, ("__kmp_gtid_set_specific: runtime shutdown, returning\n"));
1743   }
1744 }
1745 
1746 int __kmp_gtid_get_specific() {
1747   int gtid;
1748   if (!__kmp_init_gtid) {
1749     KA_TRACE(50, ("__kmp_gtid_get_specific: runtime shutdown, returning "
1750                   "KMP_GTID_SHUTDOWN\n"));
1751     return KMP_GTID_SHUTDOWN;
1752   }
1753   gtid = (int)(size_t)pthread_getspecific(__kmp_gtid_threadprivate_key);
1754   if (gtid == 0) {
1755     gtid = KMP_GTID_DNE;
1756   } else {
1757     gtid--;
1758   }
1759   KA_TRACE(50, ("__kmp_gtid_get_specific: key:%d gtid:%d\n",
1760                 __kmp_gtid_threadprivate_key, gtid));
1761   return gtid;
1762 }
1763 
1764 double __kmp_read_cpu_time(void) {
1765   /*clock_t   t;*/
1766   struct tms buffer;
1767 
1768   /*t =*/times(&buffer);
1769 
1770   return (double)(buffer.tms_utime + buffer.tms_cutime) /
1771          (double)CLOCKS_PER_SEC;
1772 }
1773 
1774 int __kmp_read_system_info(struct kmp_sys_info *info) {
1775   int status;
1776   struct rusage r_usage;
1777 
1778   memset(info, 0, sizeof(*info));
1779 
1780   status = getrusage(RUSAGE_SELF, &r_usage);
1781   KMP_CHECK_SYSFAIL_ERRNO("getrusage", status);
1782 
1783 #if !KMP_OS_WASI
1784   // The maximum resident set size utilized (in kilobytes)
1785   info->maxrss = r_usage.ru_maxrss;
1786   // The number of page faults serviced without any I/O
1787   info->minflt = r_usage.ru_minflt;
1788   // The number of page faults serviced that required I/O
1789   info->majflt = r_usage.ru_majflt;
1790   // The number of times a process was "swapped" out of memory
1791   info->nswap = r_usage.ru_nswap;
1792   // The number of times the file system had to perform input
1793   info->inblock = r_usage.ru_inblock;
1794   // The number of times the file system had to perform output
1795   info->oublock = r_usage.ru_oublock;
1796   // The number of times a context switch was voluntarily
1797   info->nvcsw = r_usage.ru_nvcsw;
1798   // The number of times a context switch was forced
1799   info->nivcsw = r_usage.ru_nivcsw;
1800 #endif
1801 
1802   return (status != 0);
1803 }
1804 
1805 void __kmp_read_system_time(double *delta) {
1806   double t_ns;
1807   struct timeval tval;
1808   struct timespec stop;
1809   int status;
1810 
1811   status = gettimeofday(&tval, NULL);
1812   KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
1813   TIMEVAL_TO_TIMESPEC(&tval, &stop);
1814   t_ns = (double)(TS2NS(stop) - TS2NS(__kmp_sys_timer_data.start));
1815   *delta = (t_ns * 1e-9);
1816 }
1817 
1818 void __kmp_clear_system_time(void) {
1819   struct timeval tval;
1820   int status;
1821   status = gettimeofday(&tval, NULL);
1822   KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
1823   TIMEVAL_TO_TIMESPEC(&tval, &__kmp_sys_timer_data.start);
1824 }
1825 
1826 static int __kmp_get_xproc(void) {
1827 
1828   int r = 0;
1829 
1830 #if KMP_OS_LINUX
1831 
1832   __kmp_type_convert(sysconf(_SC_NPROCESSORS_CONF), &(r));
1833 
1834 #elif KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_OPENBSD || \
1835     KMP_OS_HURD || KMP_OS_SOLARIS || KMP_OS_WASI
1836 
1837   __kmp_type_convert(sysconf(_SC_NPROCESSORS_ONLN), &(r));
1838 
1839 #elif KMP_OS_DARWIN
1840 
1841   // Bug C77011 High "OpenMP Threads and number of active cores".
1842 
1843   // Find the number of available CPUs.
1844   kern_return_t rc;
1845   host_basic_info_data_t info;
1846   mach_msg_type_number_t num = HOST_BASIC_INFO_COUNT;
1847   rc = host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t)&info, &num);
1848   if (rc == 0 && num == HOST_BASIC_INFO_COUNT) {
1849     // Cannot use KA_TRACE() here because this code works before trace support
1850     // is initialized.
1851     r = info.avail_cpus;
1852   } else {
1853     KMP_WARNING(CantGetNumAvailCPU);
1854     KMP_INFORM(AssumedNumCPU);
1855   }
1856 
1857 #else
1858 
1859 #error "Unknown or unsupported OS."
1860 
1861 #endif
1862 
1863   return r > 0 ? r : 2; /* guess value of 2 if OS told us 0 */
1864 
1865 } // __kmp_get_xproc
1866 
1867 int __kmp_read_from_file(char const *path, char const *format, ...) {
1868   int result;
1869   va_list args;
1870 
1871   va_start(args, format);
1872   FILE *f = fopen(path, "rb");
1873   if (f == NULL) {
1874     va_end(args);
1875     return 0;
1876   }
1877   result = vfscanf(f, format, args);
1878   fclose(f);
1879   va_end(args);
1880 
1881   return result;
1882 }
1883 
1884 void __kmp_runtime_initialize(void) {
1885   int status;
1886   pthread_mutexattr_t mutex_attr;
1887   pthread_condattr_t cond_attr;
1888 
1889   if (__kmp_init_runtime) {
1890     return;
1891   }
1892 
1893 #if (KMP_ARCH_X86 || KMP_ARCH_X86_64)
1894   if (!__kmp_cpuinfo.initialized) {
1895     __kmp_query_cpuid(&__kmp_cpuinfo);
1896   }
1897 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
1898 
1899   __kmp_xproc = __kmp_get_xproc();
1900 
1901 #if !KMP_32_BIT_ARCH
1902   struct rlimit rlim;
1903   // read stack size of calling thread, save it as default for worker threads;
1904   // this should be done before reading environment variables
1905   status = getrlimit(RLIMIT_STACK, &rlim);
1906   if (status == 0) { // success?
1907     __kmp_stksize = rlim.rlim_cur;
1908     __kmp_check_stksize(&__kmp_stksize); // check value and adjust if needed
1909   }
1910 #endif /* KMP_32_BIT_ARCH */
1911 
1912   if (sysconf(_SC_THREADS)) {
1913 
1914     /* Query the maximum number of threads */
1915     __kmp_type_convert(sysconf(_SC_THREAD_THREADS_MAX), &(__kmp_sys_max_nth));
1916 #ifdef __ve__
1917     if (__kmp_sys_max_nth == -1) {
1918       // VE's pthread supports only up to 64 threads per a VE process.
1919       // So we use that KMP_MAX_NTH (predefined as 64) here.
1920       __kmp_sys_max_nth = KMP_MAX_NTH;
1921     }
1922 #else
1923     if (__kmp_sys_max_nth == -1) {
1924       /* Unlimited threads for NPTL */
1925       __kmp_sys_max_nth = INT_MAX;
1926     } else if (__kmp_sys_max_nth <= 1) {
1927       /* Can't tell, just use PTHREAD_THREADS_MAX */
1928       __kmp_sys_max_nth = KMP_MAX_NTH;
1929     }
1930 #endif
1931 
1932     /* Query the minimum stack size */
1933     __kmp_sys_min_stksize = sysconf(_SC_THREAD_STACK_MIN);
1934     if (__kmp_sys_min_stksize <= 1) {
1935       __kmp_sys_min_stksize = KMP_MIN_STKSIZE;
1936     }
1937   }
1938 
1939   /* Set up minimum number of threads to switch to TLS gtid */
1940   __kmp_tls_gtid_min = KMP_TLS_GTID_MIN;
1941 
1942   status = pthread_key_create(&__kmp_gtid_threadprivate_key,
1943                               __kmp_internal_end_dest);
1944   KMP_CHECK_SYSFAIL("pthread_key_create", status);
1945   status = pthread_mutexattr_init(&mutex_attr);
1946   KMP_CHECK_SYSFAIL("pthread_mutexattr_init", status);
1947   status = pthread_mutex_init(&__kmp_wait_mx.m_mutex, &mutex_attr);
1948   KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
1949   status = pthread_mutexattr_destroy(&mutex_attr);
1950   KMP_CHECK_SYSFAIL("pthread_mutexattr_destroy", status);
1951   status = pthread_condattr_init(&cond_attr);
1952   KMP_CHECK_SYSFAIL("pthread_condattr_init", status);
1953   status = pthread_cond_init(&__kmp_wait_cv.c_cond, &cond_attr);
1954   KMP_CHECK_SYSFAIL("pthread_cond_init", status);
1955   status = pthread_condattr_destroy(&cond_attr);
1956   KMP_CHECK_SYSFAIL("pthread_condattr_destroy", status);
1957 #if USE_ITT_BUILD
1958   __kmp_itt_initialize();
1959 #endif /* USE_ITT_BUILD */
1960 
1961   __kmp_init_runtime = TRUE;
1962 }
1963 
1964 void __kmp_runtime_destroy(void) {
1965   int status;
1966 
1967   if (!__kmp_init_runtime) {
1968     return; // Nothing to do.
1969   }
1970 
1971 #if USE_ITT_BUILD
1972   __kmp_itt_destroy();
1973 #endif /* USE_ITT_BUILD */
1974 
1975   status = pthread_key_delete(__kmp_gtid_threadprivate_key);
1976   KMP_CHECK_SYSFAIL("pthread_key_delete", status);
1977 
1978   status = pthread_mutex_destroy(&__kmp_wait_mx.m_mutex);
1979   if (status != 0 && status != EBUSY) {
1980     KMP_SYSFAIL("pthread_mutex_destroy", status);
1981   }
1982   status = pthread_cond_destroy(&__kmp_wait_cv.c_cond);
1983   if (status != 0 && status != EBUSY) {
1984     KMP_SYSFAIL("pthread_cond_destroy", status);
1985   }
1986 #if KMP_AFFINITY_SUPPORTED
1987   __kmp_affinity_uninitialize();
1988 #endif
1989 
1990   __kmp_init_runtime = FALSE;
1991 }
1992 
1993 /* Put the thread to sleep for a time period */
1994 /* NOTE: not currently used anywhere */
1995 void __kmp_thread_sleep(int millis) { sleep((millis + 500) / 1000); }
1996 
1997 /* Calculate the elapsed wall clock time for the user */
1998 void __kmp_elapsed(double *t) {
1999   int status;
2000 #ifdef FIX_SGI_CLOCK
2001   struct timespec ts;
2002 
2003   status = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts);
2004   KMP_CHECK_SYSFAIL_ERRNO("clock_gettime", status);
2005   *t =
2006       (double)ts.tv_nsec * (1.0 / (double)KMP_NSEC_PER_SEC) + (double)ts.tv_sec;
2007 #else
2008   struct timeval tv;
2009 
2010   status = gettimeofday(&tv, NULL);
2011   KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
2012   *t =
2013       (double)tv.tv_usec * (1.0 / (double)KMP_USEC_PER_SEC) + (double)tv.tv_sec;
2014 #endif
2015 }
2016 
2017 /* Calculate the elapsed wall clock tick for the user */
2018 void __kmp_elapsed_tick(double *t) { *t = 1 / (double)CLOCKS_PER_SEC; }
2019 
2020 /* Return the current time stamp in nsec */
2021 kmp_uint64 __kmp_now_nsec() {
2022   struct timeval t;
2023   gettimeofday(&t, NULL);
2024   kmp_uint64 nsec = (kmp_uint64)KMP_NSEC_PER_SEC * (kmp_uint64)t.tv_sec +
2025                     (kmp_uint64)1000 * (kmp_uint64)t.tv_usec;
2026   return nsec;
2027 }
2028 
2029 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
2030 /* Measure clock ticks per millisecond */
2031 void __kmp_initialize_system_tick() {
2032   kmp_uint64 now, nsec2, diff;
2033   kmp_uint64 delay = 1000000; // ~450 usec on most machines.
2034   kmp_uint64 nsec = __kmp_now_nsec();
2035   kmp_uint64 goal = __kmp_hardware_timestamp() + delay;
2036   while ((now = __kmp_hardware_timestamp()) < goal)
2037     ;
2038   nsec2 = __kmp_now_nsec();
2039   diff = nsec2 - nsec;
2040   if (diff > 0) {
2041     double tpus = 1000.0 * (double)(delay + (now - goal)) / (double)diff;
2042     if (tpus > 0.0) {
2043       __kmp_ticks_per_msec = (kmp_uint64)(tpus * 1000.0);
2044       __kmp_ticks_per_usec = (kmp_uint64)tpus;
2045     }
2046   }
2047 }
2048 #endif
2049 
2050 /* Determine whether the given address is mapped into the current address
2051    space. */
2052 
2053 int __kmp_is_address_mapped(void *addr) {
2054 
2055   int found = 0;
2056   int rc;
2057 
2058 #if KMP_OS_LINUX || KMP_OS_HURD
2059 
2060   /* On GNUish OSes, read the /proc/<pid>/maps pseudo-file to get all the
2061      address ranges mapped into the address space. */
2062 
2063   char *name = __kmp_str_format("/proc/%d/maps", getpid());
2064   FILE *file = NULL;
2065 
2066   file = fopen(name, "r");
2067   KMP_ASSERT(file != NULL);
2068 
2069   for (;;) {
2070 
2071     void *beginning = NULL;
2072     void *ending = NULL;
2073     char perms[5];
2074 
2075     rc = fscanf(file, "%p-%p %4s %*[^\n]\n", &beginning, &ending, perms);
2076     if (rc == EOF) {
2077       break;
2078     }
2079     KMP_ASSERT(rc == 3 &&
2080                KMP_STRLEN(perms) == 4); // Make sure all fields are read.
2081 
2082     // Ending address is not included in the region, but beginning is.
2083     if ((addr >= beginning) && (addr < ending)) {
2084       perms[2] = 0; // 3th and 4th character does not matter.
2085       if (strcmp(perms, "rw") == 0) {
2086         // Memory we are looking for should be readable and writable.
2087         found = 1;
2088       }
2089       break;
2090     }
2091   }
2092 
2093   // Free resources.
2094   fclose(file);
2095   KMP_INTERNAL_FREE(name);
2096 #elif KMP_OS_FREEBSD
2097   char *buf;
2098   size_t lstsz;
2099   int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid()};
2100   rc = sysctl(mib, 4, NULL, &lstsz, NULL, 0);
2101   if (rc < 0)
2102     return 0;
2103   // We pass from number of vm entry's semantic
2104   // to size of whole entry map list.
2105   lstsz = lstsz * 4 / 3;
2106   buf = reinterpret_cast<char *>(kmpc_malloc(lstsz));
2107   rc = sysctl(mib, 4, buf, &lstsz, NULL, 0);
2108   if (rc < 0) {
2109     kmpc_free(buf);
2110     return 0;
2111   }
2112 
2113   char *lw = buf;
2114   char *up = buf + lstsz;
2115 
2116   while (lw < up) {
2117     struct kinfo_vmentry *cur = reinterpret_cast<struct kinfo_vmentry *>(lw);
2118     size_t cursz = cur->kve_structsize;
2119     if (cursz == 0)
2120       break;
2121     void *start = reinterpret_cast<void *>(cur->kve_start);
2122     void *end = reinterpret_cast<void *>(cur->kve_end);
2123     // Readable/Writable addresses within current map entry
2124     if ((addr >= start) && (addr < end)) {
2125       if ((cur->kve_protection & KVME_PROT_READ) != 0 &&
2126           (cur->kve_protection & KVME_PROT_WRITE) != 0) {
2127         found = 1;
2128         break;
2129       }
2130     }
2131     lw += cursz;
2132   }
2133   kmpc_free(buf);
2134 
2135 #elif KMP_OS_DARWIN
2136 
2137   /* On OS X*, /proc pseudo filesystem is not available. Try to read memory
2138      using vm interface. */
2139 
2140   int buffer;
2141   vm_size_t count;
2142   rc = vm_read_overwrite(
2143       mach_task_self(), // Task to read memory of.
2144       (vm_address_t)(addr), // Address to read from.
2145       1, // Number of bytes to be read.
2146       (vm_address_t)(&buffer), // Address of buffer to save read bytes in.
2147       &count // Address of var to save number of read bytes in.
2148   );
2149   if (rc == 0) {
2150     // Memory successfully read.
2151     found = 1;
2152   }
2153 
2154 #elif KMP_OS_NETBSD
2155 
2156   int mib[5];
2157   mib[0] = CTL_VM;
2158   mib[1] = VM_PROC;
2159   mib[2] = VM_PROC_MAP;
2160   mib[3] = getpid();
2161   mib[4] = sizeof(struct kinfo_vmentry);
2162 
2163   size_t size;
2164   rc = sysctl(mib, __arraycount(mib), NULL, &size, NULL, 0);
2165   KMP_ASSERT(!rc);
2166   KMP_ASSERT(size);
2167 
2168   size = size * 4 / 3;
2169   struct kinfo_vmentry *kiv = (struct kinfo_vmentry *)KMP_INTERNAL_MALLOC(size);
2170   KMP_ASSERT(kiv);
2171 
2172   rc = sysctl(mib, __arraycount(mib), kiv, &size, NULL, 0);
2173   KMP_ASSERT(!rc);
2174   KMP_ASSERT(size);
2175 
2176   for (size_t i = 0; i < size; i++) {
2177     if (kiv[i].kve_start >= (uint64_t)addr &&
2178         kiv[i].kve_end <= (uint64_t)addr) {
2179       found = 1;
2180       break;
2181     }
2182   }
2183   KMP_INTERNAL_FREE(kiv);
2184 #elif KMP_OS_OPENBSD
2185 
2186   int mib[3];
2187   mib[0] = CTL_KERN;
2188   mib[1] = KERN_PROC_VMMAP;
2189   mib[2] = getpid();
2190 
2191   size_t size;
2192   uint64_t end;
2193   rc = sysctl(mib, 3, NULL, &size, NULL, 0);
2194   KMP_ASSERT(!rc);
2195   KMP_ASSERT(size);
2196   end = size;
2197 
2198   struct kinfo_vmentry kiv = {.kve_start = 0};
2199 
2200   while ((rc = sysctl(mib, 3, &kiv, &size, NULL, 0)) == 0) {
2201     KMP_ASSERT(size);
2202     if (kiv.kve_end == end)
2203       break;
2204 
2205     if (kiv.kve_start >= (uint64_t)addr && kiv.kve_end <= (uint64_t)addr) {
2206       found = 1;
2207       break;
2208     }
2209     kiv.kve_start += 1;
2210   }
2211 #elif KMP_OS_WASI
2212   found = (int)addr < (__builtin_wasm_memory_size(0) * PAGESIZE);
2213 #elif KMP_OS_DRAGONFLY || KMP_OS_SOLARIS
2214 
2215   // FIXME(DragonFly, Solaris): Implement this
2216   found = 1;
2217 
2218 #else
2219 
2220 #error "Unknown or unsupported OS"
2221 
2222 #endif
2223 
2224   return found;
2225 
2226 } // __kmp_is_address_mapped
2227 
2228 #ifdef USE_LOAD_BALANCE
2229 
2230 #if KMP_OS_DARWIN || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD ||    \
2231     KMP_OS_OPENBSD || KMP_OS_SOLARIS
2232 
2233 // The function returns the rounded value of the system load average
2234 // during given time interval which depends on the value of
2235 // __kmp_load_balance_interval variable (default is 60 sec, other values
2236 // may be 300 sec or 900 sec).
2237 // It returns -1 in case of error.
2238 int __kmp_get_load_balance(int max) {
2239   double averages[3];
2240   int ret_avg = 0;
2241 
2242   int res = getloadavg(averages, 3);
2243 
2244   // Check __kmp_load_balance_interval to determine which of averages to use.
2245   // getloadavg() may return the number of samples less than requested that is
2246   // less than 3.
2247   if (__kmp_load_balance_interval < 180 && (res >= 1)) {
2248     ret_avg = (int)averages[0]; // 1 min
2249   } else if ((__kmp_load_balance_interval >= 180 &&
2250               __kmp_load_balance_interval < 600) &&
2251              (res >= 2)) {
2252     ret_avg = (int)averages[1]; // 5 min
2253   } else if ((__kmp_load_balance_interval >= 600) && (res == 3)) {
2254     ret_avg = (int)averages[2]; // 15 min
2255   } else { // Error occurred
2256     return -1;
2257   }
2258 
2259   return ret_avg;
2260 }
2261 
2262 #else // Linux* OS
2263 
2264 // The function returns number of running (not sleeping) threads, or -1 in case
2265 // of error. Error could be reported if Linux* OS kernel too old (without
2266 // "/proc" support). Counting running threads stops if max running threads
2267 // encountered.
2268 int __kmp_get_load_balance(int max) {
2269   static int permanent_error = 0;
2270   static int glb_running_threads = 0; // Saved count of the running threads for
2271   // the thread balance algorithm
2272   static double glb_call_time = 0; /* Thread balance algorithm call time */
2273 
2274   int running_threads = 0; // Number of running threads in the system.
2275 
2276   DIR *proc_dir = NULL; // Handle of "/proc/" directory.
2277   struct dirent *proc_entry = NULL;
2278 
2279   kmp_str_buf_t task_path; // "/proc/<pid>/task/<tid>/" path.
2280   DIR *task_dir = NULL; // Handle of "/proc/<pid>/task/<tid>/" directory.
2281   struct dirent *task_entry = NULL;
2282   int task_path_fixed_len;
2283 
2284   kmp_str_buf_t stat_path; // "/proc/<pid>/task/<tid>/stat" path.
2285   int stat_file = -1;
2286   int stat_path_fixed_len;
2287 
2288 #ifdef KMP_DEBUG
2289   int total_processes = 0; // Total number of processes in system.
2290 #endif
2291 
2292   double call_time = 0.0;
2293 
2294   __kmp_str_buf_init(&task_path);
2295   __kmp_str_buf_init(&stat_path);
2296 
2297   __kmp_elapsed(&call_time);
2298 
2299   if (glb_call_time &&
2300       (call_time - glb_call_time < __kmp_load_balance_interval)) {
2301     running_threads = glb_running_threads;
2302     goto finish;
2303   }
2304 
2305   glb_call_time = call_time;
2306 
2307   // Do not spend time on scanning "/proc/" if we have a permanent error.
2308   if (permanent_error) {
2309     running_threads = -1;
2310     goto finish;
2311   }
2312 
2313   if (max <= 0) {
2314     max = INT_MAX;
2315   }
2316 
2317   // Open "/proc/" directory.
2318   proc_dir = opendir("/proc");
2319   if (proc_dir == NULL) {
2320     // Cannot open "/prroc/". Probably the kernel does not support it. Return an
2321     // error now and in subsequent calls.
2322     running_threads = -1;
2323     permanent_error = 1;
2324     goto finish;
2325   }
2326 
2327   // Initialize fixed part of task_path. This part will not change.
2328   __kmp_str_buf_cat(&task_path, "/proc/", 6);
2329   task_path_fixed_len = task_path.used; // Remember number of used characters.
2330 
2331   proc_entry = readdir(proc_dir);
2332   while (proc_entry != NULL) {
2333     // Proc entry is a directory and name starts with a digit. Assume it is a
2334     // process' directory.
2335     if (proc_entry->d_type == DT_DIR && isdigit(proc_entry->d_name[0])) {
2336 
2337 #ifdef KMP_DEBUG
2338       ++total_processes;
2339 #endif
2340       // Make sure init process is the very first in "/proc", so we can replace
2341       // strcmp( proc_entry->d_name, "1" ) == 0 with simpler total_processes ==
2342       // 1. We are going to check that total_processes == 1 => d_name == "1" is
2343       // true (where "=>" is implication). Since C++ does not have => operator,
2344       // let us replace it with its equivalent: a => b == ! a || b.
2345       KMP_DEBUG_ASSERT(total_processes != 1 ||
2346                        strcmp(proc_entry->d_name, "1") == 0);
2347 
2348       // Construct task_path.
2349       task_path.used = task_path_fixed_len; // Reset task_path to "/proc/".
2350       __kmp_str_buf_cat(&task_path, proc_entry->d_name,
2351                         KMP_STRLEN(proc_entry->d_name));
2352       __kmp_str_buf_cat(&task_path, "/task", 5);
2353 
2354       task_dir = opendir(task_path.str);
2355       if (task_dir == NULL) {
2356         // Process can finish between reading "/proc/" directory entry and
2357         // opening process' "task/" directory. So, in general case we should not
2358         // complain, but have to skip this process and read the next one. But on
2359         // systems with no "task/" support we will spend lot of time to scan
2360         // "/proc/" tree again and again without any benefit. "init" process
2361         // (its pid is 1) should exist always, so, if we cannot open
2362         // "/proc/1/task/" directory, it means "task/" is not supported by
2363         // kernel. Report an error now and in the future.
2364         if (strcmp(proc_entry->d_name, "1") == 0) {
2365           running_threads = -1;
2366           permanent_error = 1;
2367           goto finish;
2368         }
2369       } else {
2370         // Construct fixed part of stat file path.
2371         __kmp_str_buf_clear(&stat_path);
2372         __kmp_str_buf_cat(&stat_path, task_path.str, task_path.used);
2373         __kmp_str_buf_cat(&stat_path, "/", 1);
2374         stat_path_fixed_len = stat_path.used;
2375 
2376         task_entry = readdir(task_dir);
2377         while (task_entry != NULL) {
2378           // It is a directory and name starts with a digit.
2379           if (proc_entry->d_type == DT_DIR && isdigit(task_entry->d_name[0])) {
2380 
2381             // Construct complete stat file path. Easiest way would be:
2382             //  __kmp_str_buf_print( & stat_path, "%s/%s/stat", task_path.str,
2383             //  task_entry->d_name );
2384             // but seriae of __kmp_str_buf_cat works a bit faster.
2385             stat_path.used =
2386                 stat_path_fixed_len; // Reset stat path to its fixed part.
2387             __kmp_str_buf_cat(&stat_path, task_entry->d_name,
2388                               KMP_STRLEN(task_entry->d_name));
2389             __kmp_str_buf_cat(&stat_path, "/stat", 5);
2390 
2391             // Note: Low-level API (open/read/close) is used. High-level API
2392             // (fopen/fclose)  works ~ 30 % slower.
2393             stat_file = open(stat_path.str, O_RDONLY);
2394             if (stat_file == -1) {
2395               // We cannot report an error because task (thread) can terminate
2396               // just before reading this file.
2397             } else {
2398               /* Content of "stat" file looks like:
2399                  24285 (program) S ...
2400 
2401                  It is a single line (if program name does not include funny
2402                  symbols). First number is a thread id, then name of executable
2403                  file name in paretheses, then state of the thread. We need just
2404                  thread state.
2405 
2406                  Good news: Length of program name is 15 characters max. Longer
2407                  names are truncated.
2408 
2409                  Thus, we need rather short buffer: 15 chars for program name +
2410                  2 parenthesis, + 3 spaces + ~7 digits of pid = 37.
2411 
2412                  Bad news: Program name may contain special symbols like space,
2413                  closing parenthesis, or even new line. This makes parsing
2414                  "stat" file not 100 % reliable. In case of fanny program names
2415                  parsing may fail (report incorrect thread state).
2416 
2417                  Parsing "status" file looks more promissing (due to different
2418                  file structure and escaping special symbols) but reading and
2419                  parsing of "status" file works slower.
2420                   -- ln
2421               */
2422               char buffer[65];
2423               ssize_t len;
2424               len = read(stat_file, buffer, sizeof(buffer) - 1);
2425               if (len >= 0) {
2426                 buffer[len] = 0;
2427                 // Using scanf:
2428                 //     sscanf( buffer, "%*d (%*s) %c ", & state );
2429                 // looks very nice, but searching for a closing parenthesis
2430                 // works a bit faster.
2431                 char *close_parent = strstr(buffer, ") ");
2432                 if (close_parent != NULL) {
2433                   char state = *(close_parent + 2);
2434                   if (state == 'R') {
2435                     ++running_threads;
2436                     if (running_threads >= max) {
2437                       goto finish;
2438                     }
2439                   }
2440                 }
2441               }
2442               close(stat_file);
2443               stat_file = -1;
2444             }
2445           }
2446           task_entry = readdir(task_dir);
2447         }
2448         closedir(task_dir);
2449         task_dir = NULL;
2450       }
2451     }
2452     proc_entry = readdir(proc_dir);
2453   }
2454 
2455   // There _might_ be a timing hole where the thread executing this
2456   // code get skipped in the load balance, and running_threads is 0.
2457   // Assert in the debug builds only!!!
2458   KMP_DEBUG_ASSERT(running_threads > 0);
2459   if (running_threads <= 0) {
2460     running_threads = 1;
2461   }
2462 
2463 finish: // Clean up and exit.
2464   if (proc_dir != NULL) {
2465     closedir(proc_dir);
2466   }
2467   __kmp_str_buf_free(&task_path);
2468   if (task_dir != NULL) {
2469     closedir(task_dir);
2470   }
2471   __kmp_str_buf_free(&stat_path);
2472   if (stat_file != -1) {
2473     close(stat_file);
2474   }
2475 
2476   glb_running_threads = running_threads;
2477 
2478   return running_threads;
2479 
2480 } // __kmp_get_load_balance
2481 
2482 #endif // KMP_OS_DARWIN
2483 
2484 #endif // USE_LOAD_BALANCE
2485 
2486 #if !(KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_MIC ||                            \
2487       ((KMP_OS_LINUX || KMP_OS_DARWIN) && KMP_ARCH_AARCH64) ||                 \
2488       KMP_ARCH_PPC64 || KMP_ARCH_RISCV64 || KMP_ARCH_LOONGARCH64 ||            \
2489       KMP_ARCH_ARM || KMP_ARCH_VE || KMP_ARCH_S390X)
2490 
2491 // we really only need the case with 1 argument, because CLANG always build
2492 // a struct of pointers to shared variables referenced in the outlined function
2493 int __kmp_invoke_microtask(microtask_t pkfn, int gtid, int tid, int argc,
2494                            void *p_argv[]
2495 #if OMPT_SUPPORT
2496                            ,
2497                            void **exit_frame_ptr
2498 #endif
2499 ) {
2500 #if OMPT_SUPPORT
2501   *exit_frame_ptr = OMPT_GET_FRAME_ADDRESS(0);
2502 #endif
2503 
2504   switch (argc) {
2505   default:
2506     fprintf(stderr, "Too many args to microtask: %d!\n", argc);
2507     fflush(stderr);
2508     exit(-1);
2509   case 0:
2510     (*pkfn)(&gtid, &tid);
2511     break;
2512   case 1:
2513     (*pkfn)(&gtid, &tid, p_argv[0]);
2514     break;
2515   case 2:
2516     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1]);
2517     break;
2518   case 3:
2519     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2]);
2520     break;
2521   case 4:
2522     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3]);
2523     break;
2524   case 5:
2525     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4]);
2526     break;
2527   case 6:
2528     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2529             p_argv[5]);
2530     break;
2531   case 7:
2532     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2533             p_argv[5], p_argv[6]);
2534     break;
2535   case 8:
2536     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2537             p_argv[5], p_argv[6], p_argv[7]);
2538     break;
2539   case 9:
2540     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2541             p_argv[5], p_argv[6], p_argv[7], p_argv[8]);
2542     break;
2543   case 10:
2544     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2545             p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9]);
2546     break;
2547   case 11:
2548     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2549             p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10]);
2550     break;
2551   case 12:
2552     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2553             p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2554             p_argv[11]);
2555     break;
2556   case 13:
2557     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2558             p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2559             p_argv[11], p_argv[12]);
2560     break;
2561   case 14:
2562     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2563             p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2564             p_argv[11], p_argv[12], p_argv[13]);
2565     break;
2566   case 15:
2567     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2568             p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2569             p_argv[11], p_argv[12], p_argv[13], p_argv[14]);
2570     break;
2571   }
2572 
2573   return 1;
2574 }
2575 
2576 #endif
2577 
2578 #if KMP_OS_LINUX
2579 // Functions for hidden helper task
2580 namespace {
2581 // Condition variable for initializing hidden helper team
2582 pthread_cond_t hidden_helper_threads_initz_cond_var;
2583 pthread_mutex_t hidden_helper_threads_initz_lock;
2584 volatile int hidden_helper_initz_signaled = FALSE;
2585 
2586 // Condition variable for deinitializing hidden helper team
2587 pthread_cond_t hidden_helper_threads_deinitz_cond_var;
2588 pthread_mutex_t hidden_helper_threads_deinitz_lock;
2589 volatile int hidden_helper_deinitz_signaled = FALSE;
2590 
2591 // Condition variable for the wrapper function of main thread
2592 pthread_cond_t hidden_helper_main_thread_cond_var;
2593 pthread_mutex_t hidden_helper_main_thread_lock;
2594 volatile int hidden_helper_main_thread_signaled = FALSE;
2595 
2596 // Semaphore for worker threads. We don't use condition variable here in case
2597 // that when multiple signals are sent at the same time, only one thread might
2598 // be waken.
2599 sem_t hidden_helper_task_sem;
2600 } // namespace
2601 
2602 void __kmp_hidden_helper_worker_thread_wait() {
2603   int status = sem_wait(&hidden_helper_task_sem);
2604   KMP_CHECK_SYSFAIL("sem_wait", status);
2605 }
2606 
2607 void __kmp_do_initialize_hidden_helper_threads() {
2608   // Initialize condition variable
2609   int status =
2610       pthread_cond_init(&hidden_helper_threads_initz_cond_var, nullptr);
2611   KMP_CHECK_SYSFAIL("pthread_cond_init", status);
2612 
2613   status = pthread_cond_init(&hidden_helper_threads_deinitz_cond_var, nullptr);
2614   KMP_CHECK_SYSFAIL("pthread_cond_init", status);
2615 
2616   status = pthread_cond_init(&hidden_helper_main_thread_cond_var, nullptr);
2617   KMP_CHECK_SYSFAIL("pthread_cond_init", status);
2618 
2619   status = pthread_mutex_init(&hidden_helper_threads_initz_lock, nullptr);
2620   KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
2621 
2622   status = pthread_mutex_init(&hidden_helper_threads_deinitz_lock, nullptr);
2623   KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
2624 
2625   status = pthread_mutex_init(&hidden_helper_main_thread_lock, nullptr);
2626   KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
2627 
2628   // Initialize the semaphore
2629   status = sem_init(&hidden_helper_task_sem, 0, 0);
2630   KMP_CHECK_SYSFAIL("sem_init", status);
2631 
2632   // Create a new thread to finish initialization
2633   pthread_t handle;
2634   status = pthread_create(
2635       &handle, nullptr,
2636       [](void *) -> void * {
2637         __kmp_hidden_helper_threads_initz_routine();
2638         return nullptr;
2639       },
2640       nullptr);
2641   KMP_CHECK_SYSFAIL("pthread_create", status);
2642 }
2643 
2644 void __kmp_hidden_helper_threads_initz_wait() {
2645   // Initial thread waits here for the completion of the initialization. The
2646   // condition variable will be notified by main thread of hidden helper teams.
2647   int status = pthread_mutex_lock(&hidden_helper_threads_initz_lock);
2648   KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2649 
2650   if (!TCR_4(hidden_helper_initz_signaled)) {
2651     status = pthread_cond_wait(&hidden_helper_threads_initz_cond_var,
2652                                &hidden_helper_threads_initz_lock);
2653     KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2654   }
2655 
2656   status = pthread_mutex_unlock(&hidden_helper_threads_initz_lock);
2657   KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2658 }
2659 
2660 void __kmp_hidden_helper_initz_release() {
2661   // After all initialization, reset __kmp_init_hidden_helper_threads to false.
2662   int status = pthread_mutex_lock(&hidden_helper_threads_initz_lock);
2663   KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2664 
2665   status = pthread_cond_signal(&hidden_helper_threads_initz_cond_var);
2666   KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2667 
2668   TCW_SYNC_4(hidden_helper_initz_signaled, TRUE);
2669 
2670   status = pthread_mutex_unlock(&hidden_helper_threads_initz_lock);
2671   KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2672 }
2673 
2674 void __kmp_hidden_helper_main_thread_wait() {
2675   // The main thread of hidden helper team will be blocked here. The
2676   // condition variable can only be signal in the destructor of RTL.
2677   int status = pthread_mutex_lock(&hidden_helper_main_thread_lock);
2678   KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2679 
2680   if (!TCR_4(hidden_helper_main_thread_signaled)) {
2681     status = pthread_cond_wait(&hidden_helper_main_thread_cond_var,
2682                                &hidden_helper_main_thread_lock);
2683     KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2684   }
2685 
2686   status = pthread_mutex_unlock(&hidden_helper_main_thread_lock);
2687   KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2688 }
2689 
2690 void __kmp_hidden_helper_main_thread_release() {
2691   // The initial thread of OpenMP RTL should call this function to wake up the
2692   // main thread of hidden helper team.
2693   int status = pthread_mutex_lock(&hidden_helper_main_thread_lock);
2694   KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2695 
2696   status = pthread_cond_signal(&hidden_helper_main_thread_cond_var);
2697   KMP_CHECK_SYSFAIL("pthread_cond_signal", status);
2698 
2699   // The hidden helper team is done here
2700   TCW_SYNC_4(hidden_helper_main_thread_signaled, TRUE);
2701 
2702   status = pthread_mutex_unlock(&hidden_helper_main_thread_lock);
2703   KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2704 }
2705 
2706 void __kmp_hidden_helper_worker_thread_signal() {
2707   int status = sem_post(&hidden_helper_task_sem);
2708   KMP_CHECK_SYSFAIL("sem_post", status);
2709 }
2710 
2711 void __kmp_hidden_helper_threads_deinitz_wait() {
2712   // Initial thread waits here for the completion of the deinitialization. The
2713   // condition variable will be notified by main thread of hidden helper teams.
2714   int status = pthread_mutex_lock(&hidden_helper_threads_deinitz_lock);
2715   KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2716 
2717   if (!TCR_4(hidden_helper_deinitz_signaled)) {
2718     status = pthread_cond_wait(&hidden_helper_threads_deinitz_cond_var,
2719                                &hidden_helper_threads_deinitz_lock);
2720     KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2721   }
2722 
2723   status = pthread_mutex_unlock(&hidden_helper_threads_deinitz_lock);
2724   KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2725 }
2726 
2727 void __kmp_hidden_helper_threads_deinitz_release() {
2728   int status = pthread_mutex_lock(&hidden_helper_threads_deinitz_lock);
2729   KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2730 
2731   status = pthread_cond_signal(&hidden_helper_threads_deinitz_cond_var);
2732   KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2733 
2734   TCW_SYNC_4(hidden_helper_deinitz_signaled, TRUE);
2735 
2736   status = pthread_mutex_unlock(&hidden_helper_threads_deinitz_lock);
2737   KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2738 }
2739 #else // KMP_OS_LINUX
2740 void __kmp_hidden_helper_worker_thread_wait() {
2741   KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2742 }
2743 
2744 void __kmp_do_initialize_hidden_helper_threads() {
2745   KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2746 }
2747 
2748 void __kmp_hidden_helper_threads_initz_wait() {
2749   KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2750 }
2751 
2752 void __kmp_hidden_helper_initz_release() {
2753   KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2754 }
2755 
2756 void __kmp_hidden_helper_main_thread_wait() {
2757   KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2758 }
2759 
2760 void __kmp_hidden_helper_main_thread_release() {
2761   KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2762 }
2763 
2764 void __kmp_hidden_helper_worker_thread_signal() {
2765   KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2766 }
2767 
2768 void __kmp_hidden_helper_threads_deinitz_wait() {
2769   KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2770 }
2771 
2772 void __kmp_hidden_helper_threads_deinitz_release() {
2773   KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2774 }
2775 #endif // KMP_OS_LINUX
2776 
2777 bool __kmp_detect_shm() {
2778   DIR *dir = opendir("/dev/shm");
2779   if (dir) { // /dev/shm exists
2780     closedir(dir);
2781     return true;
2782   } else if (ENOENT == errno) { // /dev/shm does not exist
2783     return false;
2784   } else { // opendir() failed
2785     return false;
2786   }
2787 }
2788 
2789 bool __kmp_detect_tmp() {
2790   DIR *dir = opendir("/tmp");
2791   if (dir) { // /tmp exists
2792     closedir(dir);
2793     return true;
2794   } else if (ENOENT == errno) { // /tmp does not exist
2795     return false;
2796   } else { // opendir() failed
2797     return false;
2798   }
2799 }
2800 
2801 // end of file //
2802