1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2  *
3  * Permission is hereby granted, free of charge, to any person obtaining a copy
4  * of this software and associated documentation files (the "Software"), to
5  * deal in the Software without restriction, including without limitation the
6  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7  * sell copies of the Software, and to permit persons to whom the Software is
8  * furnished to do so, subject to the following conditions:
9  *
10  * The above copyright notice and this permission notice shall be included in
11  * all copies or substantial portions of the Software.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19  * IN THE SOFTWARE.
20  */
21 
22 #include "uv.h"
23 #include "internal.h"
24 
25 #include <pthread.h>
26 #include <assert.h>
27 #include <errno.h>
28 
29 #include <sys/time.h>
30 #include <sys/resource.h>  /* getrlimit() */
31 #include <unistd.h>  /* getpagesize() */
32 
33 #include <limits.h>
34 
35 #ifdef __MVS__
36 #include <sys/ipc.h>
37 #include <sys/sem.h>
38 #endif
39 
40 #if defined(__GLIBC__) && !defined(__UCLIBC__)
41 #include <gnu/libc-version.h>  /* gnu_get_libc_version() */
42 #endif
43 
44 #undef NANOSEC
45 #define NANOSEC ((uint64_t) 1e9)
46 
47 #if defined(PTHREAD_BARRIER_SERIAL_THREAD)
48 STATIC_ASSERT(sizeof(uv_barrier_t) == sizeof(pthread_barrier_t));
49 #endif
50 
51 /* Note: guard clauses should match uv_barrier_t's in include/uv/unix.h. */
52 #if defined(_AIX) || \
53     defined(__OpenBSD__) || \
54     !defined(PTHREAD_BARRIER_SERIAL_THREAD)
uv_barrier_init(uv_barrier_t * barrier,unsigned int count)55 int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
56   struct _uv_barrier* b;
57   int rc;
58 
59   if (barrier == NULL || count == 0)
60     return UV_EINVAL;
61 
62   b = uv__malloc(sizeof(*b));
63   if (b == NULL)
64     return UV_ENOMEM;
65 
66   b->in = 0;
67   b->out = 0;
68   b->threshold = count;
69 
70   rc = uv_mutex_init(&b->mutex);
71   if (rc != 0)
72     goto error2;
73 
74   rc = uv_cond_init(&b->cond);
75   if (rc != 0)
76     goto error;
77 
78   barrier->b = b;
79   return 0;
80 
81 error:
82   uv_mutex_destroy(&b->mutex);
83 error2:
84   uv__free(b);
85   return rc;
86 }
87 
88 
uv_barrier_wait(uv_barrier_t * barrier)89 int uv_barrier_wait(uv_barrier_t* barrier) {
90   struct _uv_barrier* b;
91   int last;
92 
93   if (barrier == NULL || barrier->b == NULL)
94     return UV_EINVAL;
95 
96   b = barrier->b;
97   uv_mutex_lock(&b->mutex);
98 
99   if (++b->in == b->threshold) {
100     b->in = 0;
101     b->out = b->threshold;
102     uv_cond_signal(&b->cond);
103   } else {
104     do
105       uv_cond_wait(&b->cond, &b->mutex);
106     while (b->in != 0);
107   }
108 
109   last = (--b->out == 0);
110   if (!last)
111     uv_cond_signal(&b->cond);  /* Not needed for last thread. */
112 
113   uv_mutex_unlock(&b->mutex);
114   return last;
115 }
116 
117 
uv_barrier_destroy(uv_barrier_t * barrier)118 void uv_barrier_destroy(uv_barrier_t* barrier) {
119   struct _uv_barrier* b;
120 
121   b = barrier->b;
122   uv_mutex_lock(&b->mutex);
123 
124   assert(b->in == 0);
125   assert(b->out == 0);
126 
127   if (b->in != 0 || b->out != 0)
128     abort();
129 
130   uv_mutex_unlock(&b->mutex);
131   uv_mutex_destroy(&b->mutex);
132   uv_cond_destroy(&b->cond);
133 
134   uv__free(barrier->b);
135   barrier->b = NULL;
136 }
137 
138 #else
139 
uv_barrier_init(uv_barrier_t * barrier,unsigned int count)140 int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
141   return UV__ERR(pthread_barrier_init(barrier, NULL, count));
142 }
143 
144 
uv_barrier_wait(uv_barrier_t * barrier)145 int uv_barrier_wait(uv_barrier_t* barrier) {
146   int rc;
147 
148   rc = pthread_barrier_wait(barrier);
149   if (rc != 0)
150     if (rc != PTHREAD_BARRIER_SERIAL_THREAD)
151       abort();
152 
153   return rc == PTHREAD_BARRIER_SERIAL_THREAD;
154 }
155 
156 
uv_barrier_destroy(uv_barrier_t * barrier)157 void uv_barrier_destroy(uv_barrier_t* barrier) {
158   if (pthread_barrier_destroy(barrier))
159     abort();
160 }
161 
162 #endif
163 
164 
165 /* On MacOS, threads other than the main thread are created with a reduced
166  * stack size by default.  Adjust to RLIMIT_STACK aligned to the page size.
167  *
168  * On Linux, threads created by musl have a much smaller stack than threads
169  * created by glibc (80 vs. 2048 or 4096 kB.)  Follow glibc for consistency.
170  */
thread_stack_size(void)171 static size_t thread_stack_size(void) {
172 #if defined(__APPLE__) || defined(__linux__)
173   struct rlimit lim;
174 
175   /* getrlimit() can fail on some aarch64 systems due to a glibc bug where
176    * the system call wrapper invokes the wrong system call. Don't treat
177    * that as fatal, just use the default stack size instead.
178    */
179   if (0 == getrlimit(RLIMIT_STACK, &lim) && lim.rlim_cur != RLIM_INFINITY) {
180     /* pthread_attr_setstacksize() expects page-aligned values. */
181     lim.rlim_cur -= lim.rlim_cur % (rlim_t) getpagesize();
182 
183     /* Musl's PTHREAD_STACK_MIN is 2 KB on all architectures, which is
184      * too small to safely receive signals on.
185      *
186      * Musl's PTHREAD_STACK_MIN + MINSIGSTKSZ == 8192 on arm64 (which has
187      * the largest MINSIGSTKSZ of the architectures that musl supports) so
188      * let's use that as a lower bound.
189      *
190      * We use a hardcoded value because PTHREAD_STACK_MIN + MINSIGSTKSZ
191      * is between 28 and 133 KB when compiling against glibc, depending
192      * on the architecture.
193      */
194     if (lim.rlim_cur >= 8192)
195       if (lim.rlim_cur >= PTHREAD_STACK_MIN)
196         return lim.rlim_cur;
197   }
198 #endif
199 
200 #if !defined(__linux__)
201   return 0;
202 #elif defined(__PPC__) || defined(__ppc__) || defined(__powerpc__)
203   return 4 << 20;  /* glibc default. */
204 #else
205   return 2 << 20;  /* glibc default. */
206 #endif
207 }
208 
209 
uv_thread_create(uv_thread_t * tid,void (* entry)(void * arg),void * arg)210 int uv_thread_create(uv_thread_t *tid, void (*entry)(void *arg), void *arg) {
211   uv_thread_options_t params;
212   params.flags = UV_THREAD_NO_FLAGS;
213   return uv_thread_create_ex(tid, &params, entry, arg);
214 }
215 
uv_thread_create_ex(uv_thread_t * tid,const uv_thread_options_t * params,void (* entry)(void * arg),void * arg)216 int uv_thread_create_ex(uv_thread_t* tid,
217                         const uv_thread_options_t* params,
218                         void (*entry)(void *arg),
219                         void *arg) {
220   int err;
221   pthread_attr_t* attr;
222   pthread_attr_t attr_storage;
223   size_t pagesize;
224   size_t stack_size;
225 
226   /* Used to squelch a -Wcast-function-type warning. */
227   union {
228     void (*in)(void*);
229     void* (*out)(void*);
230   } f;
231 
232   stack_size =
233       params->flags & UV_THREAD_HAS_STACK_SIZE ? params->stack_size : 0;
234 
235   attr = NULL;
236   if (stack_size == 0) {
237     stack_size = thread_stack_size();
238   } else {
239     pagesize = (size_t)getpagesize();
240     /* Round up to the nearest page boundary. */
241     stack_size = (stack_size + pagesize - 1) &~ (pagesize - 1);
242 #ifdef PTHREAD_STACK_MIN
243     if (stack_size < PTHREAD_STACK_MIN)
244       stack_size = PTHREAD_STACK_MIN;
245 #endif
246   }
247 
248   if (stack_size > 0) {
249     attr = &attr_storage;
250 
251     if (pthread_attr_init(attr))
252       abort();
253 
254     if (pthread_attr_setstacksize(attr, stack_size))
255       abort();
256   }
257 
258   f.in = entry;
259   err = pthread_create(tid, attr, f.out, arg);
260 
261   if (attr != NULL)
262     pthread_attr_destroy(attr);
263 
264   return UV__ERR(err);
265 }
266 
267 
uv_thread_self(void)268 uv_thread_t uv_thread_self(void) {
269   return pthread_self();
270 }
271 
uv_thread_join(uv_thread_t * tid)272 int uv_thread_join(uv_thread_t *tid) {
273   return UV__ERR(pthread_join(*tid, NULL));
274 }
275 
276 
uv_thread_equal(const uv_thread_t * t1,const uv_thread_t * t2)277 int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2) {
278   return pthread_equal(*t1, *t2);
279 }
280 
281 
uv_mutex_init(uv_mutex_t * mutex)282 int uv_mutex_init(uv_mutex_t* mutex) {
283 #if defined(NDEBUG) || !defined(PTHREAD_MUTEX_ERRORCHECK)
284   return UV__ERR(pthread_mutex_init(mutex, NULL));
285 #else
286   pthread_mutexattr_t attr;
287   int err;
288 
289   if (pthread_mutexattr_init(&attr))
290     abort();
291 
292   if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK))
293     abort();
294 
295   err = pthread_mutex_init(mutex, &attr);
296 
297   if (pthread_mutexattr_destroy(&attr))
298     abort();
299 
300   return UV__ERR(err);
301 #endif
302 }
303 
304 
uv_mutex_init_recursive(uv_mutex_t * mutex)305 int uv_mutex_init_recursive(uv_mutex_t* mutex) {
306   pthread_mutexattr_t attr;
307   int err;
308 
309   if (pthread_mutexattr_init(&attr))
310     abort();
311 
312   if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE))
313     abort();
314 
315   err = pthread_mutex_init(mutex, &attr);
316 
317   if (pthread_mutexattr_destroy(&attr))
318     abort();
319 
320   return UV__ERR(err);
321 }
322 
323 
uv_mutex_destroy(uv_mutex_t * mutex)324 void uv_mutex_destroy(uv_mutex_t* mutex) {
325   if (pthread_mutex_destroy(mutex))
326     abort();
327 }
328 
329 
uv_mutex_lock(uv_mutex_t * mutex)330 void uv_mutex_lock(uv_mutex_t* mutex) {
331   if (pthread_mutex_lock(mutex))
332     abort();
333 }
334 
335 
uv_mutex_trylock(uv_mutex_t * mutex)336 int uv_mutex_trylock(uv_mutex_t* mutex) {
337   int err;
338 
339   err = pthread_mutex_trylock(mutex);
340   if (err) {
341     if (err != EBUSY && err != EAGAIN)
342       abort();
343     return UV_EBUSY;
344   }
345 
346   return 0;
347 }
348 
349 
uv_mutex_unlock(uv_mutex_t * mutex)350 void uv_mutex_unlock(uv_mutex_t* mutex) {
351   if (pthread_mutex_unlock(mutex))
352     abort();
353 }
354 
355 
uv_rwlock_init(uv_rwlock_t * rwlock)356 int uv_rwlock_init(uv_rwlock_t* rwlock) {
357   return UV__ERR(pthread_rwlock_init(rwlock, NULL));
358 }
359 
360 
uv_rwlock_destroy(uv_rwlock_t * rwlock)361 void uv_rwlock_destroy(uv_rwlock_t* rwlock) {
362   if (pthread_rwlock_destroy(rwlock))
363     abort();
364 }
365 
366 
uv_rwlock_rdlock(uv_rwlock_t * rwlock)367 void uv_rwlock_rdlock(uv_rwlock_t* rwlock) {
368   if (pthread_rwlock_rdlock(rwlock))
369     abort();
370 }
371 
372 
uv_rwlock_tryrdlock(uv_rwlock_t * rwlock)373 int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock) {
374   int err;
375 
376   err = pthread_rwlock_tryrdlock(rwlock);
377   if (err) {
378     if (err != EBUSY && err != EAGAIN)
379       abort();
380     return UV_EBUSY;
381   }
382 
383   return 0;
384 }
385 
386 
uv_rwlock_rdunlock(uv_rwlock_t * rwlock)387 void uv_rwlock_rdunlock(uv_rwlock_t* rwlock) {
388   if (pthread_rwlock_unlock(rwlock))
389     abort();
390 }
391 
392 
uv_rwlock_wrlock(uv_rwlock_t * rwlock)393 void uv_rwlock_wrlock(uv_rwlock_t* rwlock) {
394   if (pthread_rwlock_wrlock(rwlock))
395     abort();
396 }
397 
398 
uv_rwlock_trywrlock(uv_rwlock_t * rwlock)399 int uv_rwlock_trywrlock(uv_rwlock_t* rwlock) {
400   int err;
401 
402   err = pthread_rwlock_trywrlock(rwlock);
403   if (err) {
404     if (err != EBUSY && err != EAGAIN)
405       abort();
406     return UV_EBUSY;
407   }
408 
409   return 0;
410 }
411 
412 
uv_rwlock_wrunlock(uv_rwlock_t * rwlock)413 void uv_rwlock_wrunlock(uv_rwlock_t* rwlock) {
414   if (pthread_rwlock_unlock(rwlock))
415     abort();
416 }
417 
418 
uv_once(uv_once_t * guard,void (* callback)(void))419 void uv_once(uv_once_t* guard, void (*callback)(void)) {
420   if (pthread_once(guard, callback))
421     abort();
422 }
423 
424 #if defined(__APPLE__) && defined(__MACH__)
425 
uv_sem_init(uv_sem_t * sem,unsigned int value)426 int uv_sem_init(uv_sem_t* sem, unsigned int value) {
427   kern_return_t err;
428 
429   err = semaphore_create(mach_task_self(), sem, SYNC_POLICY_FIFO, value);
430   if (err == KERN_SUCCESS)
431     return 0;
432   if (err == KERN_INVALID_ARGUMENT)
433     return UV_EINVAL;
434   if (err == KERN_RESOURCE_SHORTAGE)
435     return UV_ENOMEM;
436 
437   abort();
438   return UV_EINVAL;  /* Satisfy the compiler. */
439 }
440 
441 
uv_sem_destroy(uv_sem_t * sem)442 void uv_sem_destroy(uv_sem_t* sem) {
443   if (semaphore_destroy(mach_task_self(), *sem))
444     abort();
445 }
446 
447 
uv_sem_post(uv_sem_t * sem)448 void uv_sem_post(uv_sem_t* sem) {
449   if (semaphore_signal(*sem))
450     abort();
451 }
452 
453 
uv_sem_wait(uv_sem_t * sem)454 void uv_sem_wait(uv_sem_t* sem) {
455   int r;
456 
457   do
458     r = semaphore_wait(*sem);
459   while (r == KERN_ABORTED);
460 
461   if (r != KERN_SUCCESS)
462     abort();
463 }
464 
465 
uv_sem_trywait(uv_sem_t * sem)466 int uv_sem_trywait(uv_sem_t* sem) {
467   mach_timespec_t interval;
468   kern_return_t err;
469 
470   interval.tv_sec = 0;
471   interval.tv_nsec = 0;
472 
473   err = semaphore_timedwait(*sem, interval);
474   if (err == KERN_SUCCESS)
475     return 0;
476   if (err == KERN_OPERATION_TIMED_OUT)
477     return UV_EAGAIN;
478 
479   abort();
480   return UV_EINVAL;  /* Satisfy the compiler. */
481 }
482 
483 #else /* !(defined(__APPLE__) && defined(__MACH__)) */
484 
485 #if defined(__GLIBC__) && !defined(__UCLIBC__)
486 
487 /* Hack around https://sourceware.org/bugzilla/show_bug.cgi?id=12674
488  * by providing a custom implementation for glibc < 2.21 in terms of other
489  * concurrency primitives.
490  * Refs: https://github.com/nodejs/node/issues/19903 */
491 
492 /* To preserve ABI compatibility, we treat the uv_sem_t as storage for
493  * a pointer to the actual struct we're using underneath. */
494 
495 static uv_once_t glibc_version_check_once = UV_ONCE_INIT;
496 static int platform_needs_custom_semaphore = 0;
497 
glibc_version_check(void)498 static void glibc_version_check(void) {
499   const char* version = gnu_get_libc_version();
500   platform_needs_custom_semaphore =
501       version[0] == '2' && version[1] == '.' &&
502       atoi(version + 2) < 21;
503 }
504 
505 #elif defined(__MVS__)
506 
507 #define platform_needs_custom_semaphore 1
508 
509 #else /* !defined(__GLIBC__) && !defined(__MVS__) */
510 
511 #define platform_needs_custom_semaphore 0
512 
513 #endif
514 
515 typedef struct uv_semaphore_s {
516   uv_mutex_t mutex;
517   uv_cond_t cond;
518   unsigned int value;
519 } uv_semaphore_t;
520 
521 #if (defined(__GLIBC__) && !defined(__UCLIBC__)) || \
522     platform_needs_custom_semaphore
523 STATIC_ASSERT(sizeof(uv_sem_t) >= sizeof(uv_semaphore_t*));
524 #endif
525 
uv__custom_sem_init(uv_sem_t * sem_,unsigned int value)526 static int uv__custom_sem_init(uv_sem_t* sem_, unsigned int value) {
527   int err;
528   uv_semaphore_t* sem;
529 
530   sem = uv__malloc(sizeof(*sem));
531   if (sem == NULL)
532     return UV_ENOMEM;
533 
534   if ((err = uv_mutex_init(&sem->mutex)) != 0) {
535     uv__free(sem);
536     return err;
537   }
538 
539   if ((err = uv_cond_init(&sem->cond)) != 0) {
540     uv_mutex_destroy(&sem->mutex);
541     uv__free(sem);
542     return err;
543   }
544 
545   sem->value = value;
546   *(uv_semaphore_t**)sem_ = sem;
547   return 0;
548 }
549 
550 
uv__custom_sem_destroy(uv_sem_t * sem_)551 static void uv__custom_sem_destroy(uv_sem_t* sem_) {
552   uv_semaphore_t* sem;
553 
554   sem = *(uv_semaphore_t**)sem_;
555   uv_cond_destroy(&sem->cond);
556   uv_mutex_destroy(&sem->mutex);
557   uv__free(sem);
558 }
559 
560 
uv__custom_sem_post(uv_sem_t * sem_)561 static void uv__custom_sem_post(uv_sem_t* sem_) {
562   uv_semaphore_t* sem;
563 
564   sem = *(uv_semaphore_t**)sem_;
565   uv_mutex_lock(&sem->mutex);
566   sem->value++;
567   if (sem->value == 1)
568     uv_cond_signal(&sem->cond);
569   uv_mutex_unlock(&sem->mutex);
570 }
571 
572 
uv__custom_sem_wait(uv_sem_t * sem_)573 static void uv__custom_sem_wait(uv_sem_t* sem_) {
574   uv_semaphore_t* sem;
575 
576   sem = *(uv_semaphore_t**)sem_;
577   uv_mutex_lock(&sem->mutex);
578   while (sem->value == 0)
579     uv_cond_wait(&sem->cond, &sem->mutex);
580   sem->value--;
581   uv_mutex_unlock(&sem->mutex);
582 }
583 
584 
uv__custom_sem_trywait(uv_sem_t * sem_)585 static int uv__custom_sem_trywait(uv_sem_t* sem_) {
586   uv_semaphore_t* sem;
587 
588   sem = *(uv_semaphore_t**)sem_;
589   if (uv_mutex_trylock(&sem->mutex) != 0)
590     return UV_EAGAIN;
591 
592   if (sem->value == 0) {
593     uv_mutex_unlock(&sem->mutex);
594     return UV_EAGAIN;
595   }
596 
597   sem->value--;
598   uv_mutex_unlock(&sem->mutex);
599 
600   return 0;
601 }
602 
uv__sem_init(uv_sem_t * sem,unsigned int value)603 static int uv__sem_init(uv_sem_t* sem, unsigned int value) {
604   if (sem_init(sem, 0, value))
605     return UV__ERR(errno);
606   return 0;
607 }
608 
609 
uv__sem_destroy(uv_sem_t * sem)610 static void uv__sem_destroy(uv_sem_t* sem) {
611   if (sem_destroy(sem))
612     abort();
613 }
614 
615 
uv__sem_post(uv_sem_t * sem)616 static void uv__sem_post(uv_sem_t* sem) {
617   if (sem_post(sem))
618     abort();
619 }
620 
621 
uv__sem_wait(uv_sem_t * sem)622 static void uv__sem_wait(uv_sem_t* sem) {
623   int r;
624 
625   do
626     r = sem_wait(sem);
627   while (r == -1 && errno == EINTR);
628 
629   if (r)
630     abort();
631 }
632 
633 
uv__sem_trywait(uv_sem_t * sem)634 static int uv__sem_trywait(uv_sem_t* sem) {
635   int r;
636 
637   do
638     r = sem_trywait(sem);
639   while (r == -1 && errno == EINTR);
640 
641   if (r) {
642     if (errno == EAGAIN)
643       return UV_EAGAIN;
644     abort();
645   }
646 
647   return 0;
648 }
649 
uv_sem_init(uv_sem_t * sem,unsigned int value)650 int uv_sem_init(uv_sem_t* sem, unsigned int value) {
651 #if defined(__GLIBC__) && !defined(__UCLIBC__)
652   uv_once(&glibc_version_check_once, glibc_version_check);
653 #endif
654 
655   if (platform_needs_custom_semaphore)
656     return uv__custom_sem_init(sem, value);
657   else
658     return uv__sem_init(sem, value);
659 }
660 
661 
uv_sem_destroy(uv_sem_t * sem)662 void uv_sem_destroy(uv_sem_t* sem) {
663   if (platform_needs_custom_semaphore)
664     uv__custom_sem_destroy(sem);
665   else
666     uv__sem_destroy(sem);
667 }
668 
669 
uv_sem_post(uv_sem_t * sem)670 void uv_sem_post(uv_sem_t* sem) {
671   if (platform_needs_custom_semaphore)
672     uv__custom_sem_post(sem);
673   else
674     uv__sem_post(sem);
675 }
676 
677 
uv_sem_wait(uv_sem_t * sem)678 void uv_sem_wait(uv_sem_t* sem) {
679   if (platform_needs_custom_semaphore)
680     uv__custom_sem_wait(sem);
681   else
682     uv__sem_wait(sem);
683 }
684 
685 
uv_sem_trywait(uv_sem_t * sem)686 int uv_sem_trywait(uv_sem_t* sem) {
687   if (platform_needs_custom_semaphore)
688     return uv__custom_sem_trywait(sem);
689   else
690     return uv__sem_trywait(sem);
691 }
692 
693 #endif /* defined(__APPLE__) && defined(__MACH__) */
694 
695 
696 #if defined(__APPLE__) && defined(__MACH__) || defined(__MVS__)
697 
uv_cond_init(uv_cond_t * cond)698 int uv_cond_init(uv_cond_t* cond) {
699   return UV__ERR(pthread_cond_init(cond, NULL));
700 }
701 
702 #else /* !(defined(__APPLE__) && defined(__MACH__)) */
703 
uv_cond_init(uv_cond_t * cond)704 int uv_cond_init(uv_cond_t* cond) {
705   pthread_condattr_t attr;
706   int err;
707 
708   err = pthread_condattr_init(&attr);
709   if (err)
710     return UV__ERR(err);
711 
712 #if !defined(__hpux)
713   err = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
714   if (err)
715     goto error2;
716 #endif
717 
718   err = pthread_cond_init(cond, &attr);
719   if (err)
720     goto error2;
721 
722   err = pthread_condattr_destroy(&attr);
723   if (err)
724     goto error;
725 
726   return 0;
727 
728 error:
729   pthread_cond_destroy(cond);
730 error2:
731   pthread_condattr_destroy(&attr);
732   return UV__ERR(err);
733 }
734 
735 #endif /* defined(__APPLE__) && defined(__MACH__) */
736 
uv_cond_destroy(uv_cond_t * cond)737 void uv_cond_destroy(uv_cond_t* cond) {
738 #if defined(__APPLE__) && defined(__MACH__)
739   /* It has been reported that destroying condition variables that have been
740    * signalled but not waited on can sometimes result in application crashes.
741    * See https://codereview.chromium.org/1323293005.
742    */
743   pthread_mutex_t mutex;
744   struct timespec ts;
745   int err;
746 
747   if (pthread_mutex_init(&mutex, NULL))
748     abort();
749 
750   if (pthread_mutex_lock(&mutex))
751     abort();
752 
753   ts.tv_sec = 0;
754   ts.tv_nsec = 1;
755 
756   err = pthread_cond_timedwait_relative_np(cond, &mutex, &ts);
757   if (err != 0 && err != ETIMEDOUT)
758     abort();
759 
760   if (pthread_mutex_unlock(&mutex))
761     abort();
762 
763   if (pthread_mutex_destroy(&mutex))
764     abort();
765 #endif /* defined(__APPLE__) && defined(__MACH__) */
766 
767   if (pthread_cond_destroy(cond))
768     abort();
769 }
770 
uv_cond_signal(uv_cond_t * cond)771 void uv_cond_signal(uv_cond_t* cond) {
772   if (pthread_cond_signal(cond))
773     abort();
774 }
775 
uv_cond_broadcast(uv_cond_t * cond)776 void uv_cond_broadcast(uv_cond_t* cond) {
777   if (pthread_cond_broadcast(cond))
778     abort();
779 }
780 
uv_cond_wait(uv_cond_t * cond,uv_mutex_t * mutex)781 void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) {
782   if (pthread_cond_wait(cond, mutex))
783     abort();
784 }
785 
786 
uv_cond_timedwait(uv_cond_t * cond,uv_mutex_t * mutex,uint64_t timeout)787 int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {
788   int r;
789   struct timespec ts;
790 #if defined(__MVS__)
791   struct timeval tv;
792 #endif
793 
794 #if defined(__APPLE__) && defined(__MACH__)
795   ts.tv_sec = timeout / NANOSEC;
796   ts.tv_nsec = timeout % NANOSEC;
797   r = pthread_cond_timedwait_relative_np(cond, mutex, &ts);
798 #else
799 #if defined(__MVS__)
800   if (gettimeofday(&tv, NULL))
801     abort();
802   timeout += tv.tv_sec * NANOSEC + tv.tv_usec * 1e3;
803 #else
804   timeout += uv__hrtime(UV_CLOCK_PRECISE);
805 #endif
806   ts.tv_sec = timeout / NANOSEC;
807   ts.tv_nsec = timeout % NANOSEC;
808   r = pthread_cond_timedwait(cond, mutex, &ts);
809 #endif
810 
811 
812   if (r == 0)
813     return 0;
814 
815   if (r == ETIMEDOUT)
816     return UV_ETIMEDOUT;
817 
818   abort();
819 #ifndef __SUNPRO_C
820   return UV_EINVAL;  /* Satisfy the compiler. */
821 #endif
822 }
823 
824 
uv_key_create(uv_key_t * key)825 int uv_key_create(uv_key_t* key) {
826   return UV__ERR(pthread_key_create(key, NULL));
827 }
828 
829 
uv_key_delete(uv_key_t * key)830 void uv_key_delete(uv_key_t* key) {
831   if (pthread_key_delete(*key))
832     abort();
833 }
834 
835 
uv_key_get(uv_key_t * key)836 void* uv_key_get(uv_key_t* key) {
837   return pthread_getspecific(*key);
838 }
839 
840 
uv_key_set(uv_key_t * key,void * value)841 void uv_key_set(uv_key_t* key, void* value) {
842   if (pthread_setspecific(*key, value))
843     abort();
844 }
845