1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2  *
3  * Permission is hereby granted, free of charge, to any person obtaining a copy
4  * of this software and associated documentation files (the "Software"), to
5  * deal in the Software without restriction, including without limitation the
6  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7  * sell copies of the Software, and to permit persons to whom the Software is
8  * furnished to do so, subject to the following conditions:
9  *
10  * The above copyright notice and this permission notice shall be included in
11  * all copies or substantial portions of the Software.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19  * IN THE SOFTWARE.
20  */
21 
22 #include "uv.h"
23 #include "internal.h"
24 
25 #include <pthread.h>
26 #include <assert.h>
27 #include <errno.h>
28 
29 #include <sys/time.h>
30 #include <sys/resource.h>  /* getrlimit() */
31 #include <unistd.h>  /* getpagesize() */
32 
33 #include <limits.h>
34 
35 #ifdef __MVS__
36 #include <sys/ipc.h>
37 #include <sys/sem.h>
38 #endif
39 
40 #if defined(__GLIBC__) && !defined(__UCLIBC__)
41 #include <gnu/libc-version.h>  /* gnu_get_libc_version() */
42 #endif
43 
44 #undef NANOSEC
45 #define NANOSEC ((uint64_t) 1e9)
46 
47 #if defined(PTHREAD_BARRIER_SERIAL_THREAD)
48 STATIC_ASSERT(sizeof(uv_barrier_t) == sizeof(pthread_barrier_t));
49 #endif
50 
51 /* Note: guard clauses should match uv_barrier_t's in include/uv/unix.h. */
52 #if defined(_AIX) || \
53     defined(__OpenBSD__) || \
54     !defined(PTHREAD_BARRIER_SERIAL_THREAD)
uv_barrier_init(uv_barrier_t * barrier,unsigned int count)55 int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
56   struct _uv_barrier* b;
57   int rc;
58 
59   if (barrier == NULL || count == 0)
60     return UV_EINVAL;
61 
62   b = uv__malloc(sizeof(*b));
63   if (b == NULL)
64     return UV_ENOMEM;
65 
66   b->in = 0;
67   b->out = 0;
68   b->threshold = count;
69 
70   rc = uv_mutex_init(&b->mutex);
71   if (rc != 0)
72     goto error2;
73 
74   rc = uv_cond_init(&b->cond);
75   if (rc != 0)
76     goto error;
77 
78   barrier->b = b;
79   return 0;
80 
81 error:
82   uv_mutex_destroy(&b->mutex);
83 error2:
84   uv__free(b);
85   return rc;
86 }
87 
88 
uv_barrier_wait(uv_barrier_t * barrier)89 int uv_barrier_wait(uv_barrier_t* barrier) {
90   struct _uv_barrier* b;
91   int last;
92 
93   if (barrier == NULL || barrier->b == NULL)
94     return UV_EINVAL;
95 
96   b = barrier->b;
97   uv_mutex_lock(&b->mutex);
98 
99   if (++b->in == b->threshold) {
100     b->in = 0;
101     b->out = b->threshold;
102     uv_cond_signal(&b->cond);
103   } else {
104     do
105       uv_cond_wait(&b->cond, &b->mutex);
106     while (b->in != 0);
107   }
108 
109   last = (--b->out == 0);
110   if (!last)
111     uv_cond_signal(&b->cond);  /* Not needed for last thread. */
112 
113   uv_mutex_unlock(&b->mutex);
114   return last;
115 }
116 
117 
uv_barrier_destroy(uv_barrier_t * barrier)118 void uv_barrier_destroy(uv_barrier_t* barrier) {
119   struct _uv_barrier* b;
120 
121   b = barrier->b;
122   uv_mutex_lock(&b->mutex);
123 
124   assert(b->in == 0);
125   assert(b->out == 0);
126 
127   if (b->in != 0 || b->out != 0)
128     abort();
129 
130   uv_mutex_unlock(&b->mutex);
131   uv_mutex_destroy(&b->mutex);
132   uv_cond_destroy(&b->cond);
133 
134   uv__free(barrier->b);
135   barrier->b = NULL;
136 }
137 
138 #else
139 
uv_barrier_init(uv_barrier_t * barrier,unsigned int count)140 int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
141   return UV__ERR(pthread_barrier_init(barrier, NULL, count));
142 }
143 
144 
uv_barrier_wait(uv_barrier_t * barrier)145 int uv_barrier_wait(uv_barrier_t* barrier) {
146   int rc;
147 
148   rc = pthread_barrier_wait(barrier);
149   if (rc != 0)
150     if (rc != PTHREAD_BARRIER_SERIAL_THREAD)
151       abort();
152 
153   return rc == PTHREAD_BARRIER_SERIAL_THREAD;
154 }
155 
156 
uv_barrier_destroy(uv_barrier_t * barrier)157 void uv_barrier_destroy(uv_barrier_t* barrier) {
158   if (pthread_barrier_destroy(barrier))
159     abort();
160 }
161 
162 #endif
163 
164 
165 /* On MacOS, threads other than the main thread are created with a reduced
166  * stack size by default.  Adjust to RLIMIT_STACK aligned to the page size.
167  *
168  * On Linux, threads created by musl have a much smaller stack than threads
169  * created by glibc (80 vs. 2048 or 4096 kB.)  Follow glibc for consistency.
170  */
thread_stack_size(void)171 static size_t thread_stack_size(void) {
172 #if defined(__APPLE__) || defined(__linux__)
173   struct rlimit lim;
174 
175   if (getrlimit(RLIMIT_STACK, &lim))
176     abort();
177 
178   if (lim.rlim_cur != RLIM_INFINITY) {
179     /* pthread_attr_setstacksize() expects page-aligned values. */
180     lim.rlim_cur -= lim.rlim_cur % (rlim_t) getpagesize();
181 
182     /* Musl's PTHREAD_STACK_MIN is 2 KB on all architectures, which is
183      * too small to safely receive signals on.
184      *
185      * Musl's PTHREAD_STACK_MIN + MINSIGSTKSZ == 8192 on arm64 (which has
186      * the largest MINSIGSTKSZ of the architectures that musl supports) so
187      * let's use that as a lower bound.
188      *
189      * We use a hardcoded value because PTHREAD_STACK_MIN + MINSIGSTKSZ
190      * is between 28 and 133 KB when compiling against glibc, depending
191      * on the architecture.
192      */
193     if (lim.rlim_cur >= 8192)
194       if (lim.rlim_cur >= PTHREAD_STACK_MIN)
195         return lim.rlim_cur;
196   }
197 #endif
198 
199 #if !defined(__linux__)
200   return 0;
201 #elif defined(__PPC__) || defined(__ppc__) || defined(__powerpc__)
202   return 4 << 20;  /* glibc default. */
203 #else
204   return 2 << 20;  /* glibc default. */
205 #endif
206 }
207 
208 
uv_thread_create(uv_thread_t * tid,void (* entry)(void * arg),void * arg)209 int uv_thread_create(uv_thread_t *tid, void (*entry)(void *arg), void *arg) {
210   uv_thread_options_t params;
211   params.flags = UV_THREAD_NO_FLAGS;
212   return uv_thread_create_ex(tid, &params, entry, arg);
213 }
214 
uv_thread_create_ex(uv_thread_t * tid,const uv_thread_options_t * params,void (* entry)(void * arg),void * arg)215 int uv_thread_create_ex(uv_thread_t* tid,
216                         const uv_thread_options_t* params,
217                         void (*entry)(void *arg),
218                         void *arg) {
219   int err;
220   pthread_attr_t* attr;
221   pthread_attr_t attr_storage;
222   size_t pagesize;
223   size_t stack_size;
224 
225   /* Used to squelch a -Wcast-function-type warning. */
226   union {
227     void (*in)(void*);
228     void* (*out)(void*);
229   } f;
230 
231   stack_size =
232       params->flags & UV_THREAD_HAS_STACK_SIZE ? params->stack_size : 0;
233 
234   attr = NULL;
235   if (stack_size == 0) {
236     stack_size = thread_stack_size();
237   } else {
238     pagesize = (size_t)getpagesize();
239     /* Round up to the nearest page boundary. */
240     stack_size = (stack_size + pagesize - 1) &~ (pagesize - 1);
241 #ifdef PTHREAD_STACK_MIN
242     if (stack_size < PTHREAD_STACK_MIN)
243       stack_size = PTHREAD_STACK_MIN;
244 #endif
245   }
246 
247   if (stack_size > 0) {
248     attr = &attr_storage;
249 
250     if (pthread_attr_init(attr))
251       abort();
252 
253     if (pthread_attr_setstacksize(attr, stack_size))
254       abort();
255   }
256 
257   f.in = entry;
258   err = pthread_create(tid, attr, f.out, arg);
259 
260   if (attr != NULL)
261     pthread_attr_destroy(attr);
262 
263   return UV__ERR(err);
264 }
265 
266 
uv_thread_self(void)267 uv_thread_t uv_thread_self(void) {
268   return pthread_self();
269 }
270 
uv_thread_join(uv_thread_t * tid)271 int uv_thread_join(uv_thread_t *tid) {
272   return UV__ERR(pthread_join(*tid, NULL));
273 }
274 
275 
uv_thread_equal(const uv_thread_t * t1,const uv_thread_t * t2)276 int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2) {
277   return pthread_equal(*t1, *t2);
278 }
279 
280 
uv_mutex_init(uv_mutex_t * mutex)281 int uv_mutex_init(uv_mutex_t* mutex) {
282 #if defined(NDEBUG) || !defined(PTHREAD_MUTEX_ERRORCHECK)
283   return UV__ERR(pthread_mutex_init(mutex, NULL));
284 #else
285   pthread_mutexattr_t attr;
286   int err;
287 
288   if (pthread_mutexattr_init(&attr))
289     abort();
290 
291   if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK))
292     abort();
293 
294   err = pthread_mutex_init(mutex, &attr);
295 
296   if (pthread_mutexattr_destroy(&attr))
297     abort();
298 
299   return UV__ERR(err);
300 #endif
301 }
302 
303 
uv_mutex_init_recursive(uv_mutex_t * mutex)304 int uv_mutex_init_recursive(uv_mutex_t* mutex) {
305   pthread_mutexattr_t attr;
306   int err;
307 
308   if (pthread_mutexattr_init(&attr))
309     abort();
310 
311   if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE))
312     abort();
313 
314   err = pthread_mutex_init(mutex, &attr);
315 
316   if (pthread_mutexattr_destroy(&attr))
317     abort();
318 
319   return UV__ERR(err);
320 }
321 
322 
uv_mutex_destroy(uv_mutex_t * mutex)323 void uv_mutex_destroy(uv_mutex_t* mutex) {
324   if (pthread_mutex_destroy(mutex))
325     abort();
326 }
327 
328 
uv_mutex_lock(uv_mutex_t * mutex)329 void uv_mutex_lock(uv_mutex_t* mutex) {
330   if (pthread_mutex_lock(mutex))
331     abort();
332 }
333 
334 
uv_mutex_trylock(uv_mutex_t * mutex)335 int uv_mutex_trylock(uv_mutex_t* mutex) {
336   int err;
337 
338   err = pthread_mutex_trylock(mutex);
339   if (err) {
340     if (err != EBUSY && err != EAGAIN)
341       abort();
342     return UV_EBUSY;
343   }
344 
345   return 0;
346 }
347 
348 
uv_mutex_unlock(uv_mutex_t * mutex)349 void uv_mutex_unlock(uv_mutex_t* mutex) {
350   if (pthread_mutex_unlock(mutex))
351     abort();
352 }
353 
354 
uv_rwlock_init(uv_rwlock_t * rwlock)355 int uv_rwlock_init(uv_rwlock_t* rwlock) {
356   return UV__ERR(pthread_rwlock_init(rwlock, NULL));
357 }
358 
359 
uv_rwlock_destroy(uv_rwlock_t * rwlock)360 void uv_rwlock_destroy(uv_rwlock_t* rwlock) {
361   if (pthread_rwlock_destroy(rwlock))
362     abort();
363 }
364 
365 
uv_rwlock_rdlock(uv_rwlock_t * rwlock)366 void uv_rwlock_rdlock(uv_rwlock_t* rwlock) {
367   if (pthread_rwlock_rdlock(rwlock))
368     abort();
369 }
370 
371 
uv_rwlock_tryrdlock(uv_rwlock_t * rwlock)372 int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock) {
373   int err;
374 
375   err = pthread_rwlock_tryrdlock(rwlock);
376   if (err) {
377     if (err != EBUSY && err != EAGAIN)
378       abort();
379     return UV_EBUSY;
380   }
381 
382   return 0;
383 }
384 
385 
uv_rwlock_rdunlock(uv_rwlock_t * rwlock)386 void uv_rwlock_rdunlock(uv_rwlock_t* rwlock) {
387   if (pthread_rwlock_unlock(rwlock))
388     abort();
389 }
390 
391 
uv_rwlock_wrlock(uv_rwlock_t * rwlock)392 void uv_rwlock_wrlock(uv_rwlock_t* rwlock) {
393   if (pthread_rwlock_wrlock(rwlock))
394     abort();
395 }
396 
397 
uv_rwlock_trywrlock(uv_rwlock_t * rwlock)398 int uv_rwlock_trywrlock(uv_rwlock_t* rwlock) {
399   int err;
400 
401   err = pthread_rwlock_trywrlock(rwlock);
402   if (err) {
403     if (err != EBUSY && err != EAGAIN)
404       abort();
405     return UV_EBUSY;
406   }
407 
408   return 0;
409 }
410 
411 
uv_rwlock_wrunlock(uv_rwlock_t * rwlock)412 void uv_rwlock_wrunlock(uv_rwlock_t* rwlock) {
413   if (pthread_rwlock_unlock(rwlock))
414     abort();
415 }
416 
417 
uv_once(uv_once_t * guard,void (* callback)(void))418 void uv_once(uv_once_t* guard, void (*callback)(void)) {
419   if (pthread_once(guard, callback))
420     abort();
421 }
422 
423 #if defined(__APPLE__) && defined(__MACH__)
424 
uv_sem_init(uv_sem_t * sem,unsigned int value)425 int uv_sem_init(uv_sem_t* sem, unsigned int value) {
426   kern_return_t err;
427 
428   err = semaphore_create(mach_task_self(), sem, SYNC_POLICY_FIFO, value);
429   if (err == KERN_SUCCESS)
430     return 0;
431   if (err == KERN_INVALID_ARGUMENT)
432     return UV_EINVAL;
433   if (err == KERN_RESOURCE_SHORTAGE)
434     return UV_ENOMEM;
435 
436   abort();
437   return UV_EINVAL;  /* Satisfy the compiler. */
438 }
439 
440 
uv_sem_destroy(uv_sem_t * sem)441 void uv_sem_destroy(uv_sem_t* sem) {
442   if (semaphore_destroy(mach_task_self(), *sem))
443     abort();
444 }
445 
446 
uv_sem_post(uv_sem_t * sem)447 void uv_sem_post(uv_sem_t* sem) {
448   if (semaphore_signal(*sem))
449     abort();
450 }
451 
452 
uv_sem_wait(uv_sem_t * sem)453 void uv_sem_wait(uv_sem_t* sem) {
454   int r;
455 
456   do
457     r = semaphore_wait(*sem);
458   while (r == KERN_ABORTED);
459 
460   if (r != KERN_SUCCESS)
461     abort();
462 }
463 
464 
uv_sem_trywait(uv_sem_t * sem)465 int uv_sem_trywait(uv_sem_t* sem) {
466   mach_timespec_t interval;
467   kern_return_t err;
468 
469   interval.tv_sec = 0;
470   interval.tv_nsec = 0;
471 
472   err = semaphore_timedwait(*sem, interval);
473   if (err == KERN_SUCCESS)
474     return 0;
475   if (err == KERN_OPERATION_TIMED_OUT)
476     return UV_EAGAIN;
477 
478   abort();
479   return UV_EINVAL;  /* Satisfy the compiler. */
480 }
481 
482 #else /* !(defined(__APPLE__) && defined(__MACH__)) */
483 
484 #if defined(__GLIBC__) && !defined(__UCLIBC__)
485 
486 /* Hack around https://sourceware.org/bugzilla/show_bug.cgi?id=12674
487  * by providing a custom implementation for glibc < 2.21 in terms of other
488  * concurrency primitives.
489  * Refs: https://github.com/nodejs/node/issues/19903 */
490 
491 /* To preserve ABI compatibility, we treat the uv_sem_t as storage for
492  * a pointer to the actual struct we're using underneath. */
493 
494 static uv_once_t glibc_version_check_once = UV_ONCE_INIT;
495 static int platform_needs_custom_semaphore = 0;
496 
glibc_version_check(void)497 static void glibc_version_check(void) {
498   const char* version = gnu_get_libc_version();
499   platform_needs_custom_semaphore =
500       version[0] == '2' && version[1] == '.' &&
501       atoi(version + 2) < 21;
502 }
503 
504 #elif defined(__MVS__)
505 
506 #define platform_needs_custom_semaphore 1
507 
508 #else /* !defined(__GLIBC__) && !defined(__MVS__) */
509 
510 #define platform_needs_custom_semaphore 0
511 
512 #endif
513 
514 typedef struct uv_semaphore_s {
515   uv_mutex_t mutex;
516   uv_cond_t cond;
517   unsigned int value;
518 } uv_semaphore_t;
519 
520 #if (defined(__GLIBC__) && !defined(__UCLIBC__)) || \
521     platform_needs_custom_semaphore
522 STATIC_ASSERT(sizeof(uv_sem_t) >= sizeof(uv_semaphore_t*));
523 #endif
524 
uv__custom_sem_init(uv_sem_t * sem_,unsigned int value)525 static int uv__custom_sem_init(uv_sem_t* sem_, unsigned int value) {
526   int err;
527   uv_semaphore_t* sem;
528 
529   sem = uv__malloc(sizeof(*sem));
530   if (sem == NULL)
531     return UV_ENOMEM;
532 
533   if ((err = uv_mutex_init(&sem->mutex)) != 0) {
534     uv__free(sem);
535     return err;
536   }
537 
538   if ((err = uv_cond_init(&sem->cond)) != 0) {
539     uv_mutex_destroy(&sem->mutex);
540     uv__free(sem);
541     return err;
542   }
543 
544   sem->value = value;
545   *(uv_semaphore_t**)sem_ = sem;
546   return 0;
547 }
548 
549 
uv__custom_sem_destroy(uv_sem_t * sem_)550 static void uv__custom_sem_destroy(uv_sem_t* sem_) {
551   uv_semaphore_t* sem;
552 
553   sem = *(uv_semaphore_t**)sem_;
554   uv_cond_destroy(&sem->cond);
555   uv_mutex_destroy(&sem->mutex);
556   uv__free(sem);
557 }
558 
559 
uv__custom_sem_post(uv_sem_t * sem_)560 static void uv__custom_sem_post(uv_sem_t* sem_) {
561   uv_semaphore_t* sem;
562 
563   sem = *(uv_semaphore_t**)sem_;
564   uv_mutex_lock(&sem->mutex);
565   sem->value++;
566   if (sem->value == 1)
567     uv_cond_signal(&sem->cond);
568   uv_mutex_unlock(&sem->mutex);
569 }
570 
571 
uv__custom_sem_wait(uv_sem_t * sem_)572 static void uv__custom_sem_wait(uv_sem_t* sem_) {
573   uv_semaphore_t* sem;
574 
575   sem = *(uv_semaphore_t**)sem_;
576   uv_mutex_lock(&sem->mutex);
577   while (sem->value == 0)
578     uv_cond_wait(&sem->cond, &sem->mutex);
579   sem->value--;
580   uv_mutex_unlock(&sem->mutex);
581 }
582 
583 
uv__custom_sem_trywait(uv_sem_t * sem_)584 static int uv__custom_sem_trywait(uv_sem_t* sem_) {
585   uv_semaphore_t* sem;
586 
587   sem = *(uv_semaphore_t**)sem_;
588   if (uv_mutex_trylock(&sem->mutex) != 0)
589     return UV_EAGAIN;
590 
591   if (sem->value == 0) {
592     uv_mutex_unlock(&sem->mutex);
593     return UV_EAGAIN;
594   }
595 
596   sem->value--;
597   uv_mutex_unlock(&sem->mutex);
598 
599   return 0;
600 }
601 
uv__sem_init(uv_sem_t * sem,unsigned int value)602 static int uv__sem_init(uv_sem_t* sem, unsigned int value) {
603   if (sem_init(sem, 0, value))
604     return UV__ERR(errno);
605   return 0;
606 }
607 
608 
uv__sem_destroy(uv_sem_t * sem)609 static void uv__sem_destroy(uv_sem_t* sem) {
610   if (sem_destroy(sem))
611     abort();
612 }
613 
614 
uv__sem_post(uv_sem_t * sem)615 static void uv__sem_post(uv_sem_t* sem) {
616   if (sem_post(sem))
617     abort();
618 }
619 
620 
uv__sem_wait(uv_sem_t * sem)621 static void uv__sem_wait(uv_sem_t* sem) {
622   int r;
623 
624   do
625     r = sem_wait(sem);
626   while (r == -1 && errno == EINTR);
627 
628   if (r)
629     abort();
630 }
631 
632 
uv__sem_trywait(uv_sem_t * sem)633 static int uv__sem_trywait(uv_sem_t* sem) {
634   int r;
635 
636   do
637     r = sem_trywait(sem);
638   while (r == -1 && errno == EINTR);
639 
640   if (r) {
641     if (errno == EAGAIN)
642       return UV_EAGAIN;
643     abort();
644   }
645 
646   return 0;
647 }
648 
uv_sem_init(uv_sem_t * sem,unsigned int value)649 int uv_sem_init(uv_sem_t* sem, unsigned int value) {
650 #if defined(__GLIBC__) && !defined(__UCLIBC__)
651   uv_once(&glibc_version_check_once, glibc_version_check);
652 #endif
653 
654   if (platform_needs_custom_semaphore)
655     return uv__custom_sem_init(sem, value);
656   else
657     return uv__sem_init(sem, value);
658 }
659 
660 
uv_sem_destroy(uv_sem_t * sem)661 void uv_sem_destroy(uv_sem_t* sem) {
662   if (platform_needs_custom_semaphore)
663     uv__custom_sem_destroy(sem);
664   else
665     uv__sem_destroy(sem);
666 }
667 
668 
uv_sem_post(uv_sem_t * sem)669 void uv_sem_post(uv_sem_t* sem) {
670   if (platform_needs_custom_semaphore)
671     uv__custom_sem_post(sem);
672   else
673     uv__sem_post(sem);
674 }
675 
676 
uv_sem_wait(uv_sem_t * sem)677 void uv_sem_wait(uv_sem_t* sem) {
678   if (platform_needs_custom_semaphore)
679     uv__custom_sem_wait(sem);
680   else
681     uv__sem_wait(sem);
682 }
683 
684 
uv_sem_trywait(uv_sem_t * sem)685 int uv_sem_trywait(uv_sem_t* sem) {
686   if (platform_needs_custom_semaphore)
687     return uv__custom_sem_trywait(sem);
688   else
689     return uv__sem_trywait(sem);
690 }
691 
692 #endif /* defined(__APPLE__) && defined(__MACH__) */
693 
694 
695 #if defined(__APPLE__) && defined(__MACH__) || defined(__MVS__)
696 
uv_cond_init(uv_cond_t * cond)697 int uv_cond_init(uv_cond_t* cond) {
698   return UV__ERR(pthread_cond_init(cond, NULL));
699 }
700 
701 #else /* !(defined(__APPLE__) && defined(__MACH__)) */
702 
uv_cond_init(uv_cond_t * cond)703 int uv_cond_init(uv_cond_t* cond) {
704   pthread_condattr_t attr;
705   int err;
706 
707   err = pthread_condattr_init(&attr);
708   if (err)
709     return UV__ERR(err);
710 
711 #if !(defined(__ANDROID_API__) && __ANDROID_API__ < 21)
712   err = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
713   if (err)
714     goto error2;
715 #endif
716 
717   err = pthread_cond_init(cond, &attr);
718   if (err)
719     goto error2;
720 
721   err = pthread_condattr_destroy(&attr);
722   if (err)
723     goto error;
724 
725   return 0;
726 
727 error:
728   pthread_cond_destroy(cond);
729 error2:
730   pthread_condattr_destroy(&attr);
731   return UV__ERR(err);
732 }
733 
734 #endif /* defined(__APPLE__) && defined(__MACH__) */
735 
uv_cond_destroy(uv_cond_t * cond)736 void uv_cond_destroy(uv_cond_t* cond) {
737 #if defined(__APPLE__) && defined(__MACH__)
738   /* It has been reported that destroying condition variables that have been
739    * signalled but not waited on can sometimes result in application crashes.
740    * See https://codereview.chromium.org/1323293005.
741    */
742   pthread_mutex_t mutex;
743   struct timespec ts;
744   int err;
745 
746   if (pthread_mutex_init(&mutex, NULL))
747     abort();
748 
749   if (pthread_mutex_lock(&mutex))
750     abort();
751 
752   ts.tv_sec = 0;
753   ts.tv_nsec = 1;
754 
755   err = pthread_cond_timedwait_relative_np(cond, &mutex, &ts);
756   if (err != 0 && err != ETIMEDOUT)
757     abort();
758 
759   if (pthread_mutex_unlock(&mutex))
760     abort();
761 
762   if (pthread_mutex_destroy(&mutex))
763     abort();
764 #endif /* defined(__APPLE__) && defined(__MACH__) */
765 
766   if (pthread_cond_destroy(cond))
767     abort();
768 }
769 
uv_cond_signal(uv_cond_t * cond)770 void uv_cond_signal(uv_cond_t* cond) {
771   if (pthread_cond_signal(cond))
772     abort();
773 }
774 
uv_cond_broadcast(uv_cond_t * cond)775 void uv_cond_broadcast(uv_cond_t* cond) {
776   if (pthread_cond_broadcast(cond))
777     abort();
778 }
779 
uv_cond_wait(uv_cond_t * cond,uv_mutex_t * mutex)780 void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) {
781   if (pthread_cond_wait(cond, mutex))
782     abort();
783 }
784 
785 
uv_cond_timedwait(uv_cond_t * cond,uv_mutex_t * mutex,uint64_t timeout)786 int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {
787   int r;
788   struct timespec ts;
789 #if defined(__MVS__)
790   struct timeval tv;
791 #endif
792 
793 #if defined(__APPLE__) && defined(__MACH__)
794   ts.tv_sec = timeout / NANOSEC;
795   ts.tv_nsec = timeout % NANOSEC;
796   r = pthread_cond_timedwait_relative_np(cond, mutex, &ts);
797 #else
798 #if defined(__MVS__)
799   if (gettimeofday(&tv, NULL))
800     abort();
801   timeout += tv.tv_sec * NANOSEC + tv.tv_usec * 1e3;
802 #else
803   timeout += uv__hrtime(UV_CLOCK_PRECISE);
804 #endif
805   ts.tv_sec = timeout / NANOSEC;
806   ts.tv_nsec = timeout % NANOSEC;
807 #if defined(__ANDROID_API__) && __ANDROID_API__ < 21
808 
809   /*
810    * The bionic pthread implementation doesn't support CLOCK_MONOTONIC,
811    * but has this alternative function instead.
812    */
813   r = pthread_cond_timedwait_monotonic_np(cond, mutex, &ts);
814 #else
815   r = pthread_cond_timedwait(cond, mutex, &ts);
816 #endif /* __ANDROID_API__ */
817 #endif
818 
819 
820   if (r == 0)
821     return 0;
822 
823   if (r == ETIMEDOUT)
824     return UV_ETIMEDOUT;
825 
826   abort();
827 #ifndef __SUNPRO_C
828   return UV_EINVAL;  /* Satisfy the compiler. */
829 #endif
830 }
831 
832 
uv_key_create(uv_key_t * key)833 int uv_key_create(uv_key_t* key) {
834   return UV__ERR(pthread_key_create(key, NULL));
835 }
836 
837 
uv_key_delete(uv_key_t * key)838 void uv_key_delete(uv_key_t* key) {
839   if (pthread_key_delete(*key))
840     abort();
841 }
842 
843 
uv_key_get(uv_key_t * key)844 void* uv_key_get(uv_key_t* key) {
845   return pthread_getspecific(*key);
846 }
847 
848 
uv_key_set(uv_key_t * key,void * value)849 void uv_key_set(uv_key_t* key, void* value) {
850   if (pthread_setspecific(*key, value))
851     abort();
852 }
853