1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to
5 * deal in the Software without restriction, including without limitation the
6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7 * sell copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19 * IN THE SOFTWARE.
20 */
21
22 #include <assert.h>
23 #include <limits.h>
24 #include <stdlib.h>
25
26 #include "uv.h"
27 #include "internal.h"
28
29
30 #define HAVE_CONDVAR_API() (pInitializeConditionVariable != NULL)
31
32 static int uv_cond_fallback_init(uv_cond_t* cond);
33 static void uv_cond_fallback_destroy(uv_cond_t* cond);
34 static void uv_cond_fallback_signal(uv_cond_t* cond);
35 static void uv_cond_fallback_broadcast(uv_cond_t* cond);
36 static void uv_cond_fallback_wait(uv_cond_t* cond, uv_mutex_t* mutex);
37 static int uv_cond_fallback_timedwait(uv_cond_t* cond,
38 uv_mutex_t* mutex, uint64_t timeout);
39
40 static int uv_cond_condvar_init(uv_cond_t* cond);
41 static void uv_cond_condvar_destroy(uv_cond_t* cond);
42 static void uv_cond_condvar_signal(uv_cond_t* cond);
43 static void uv_cond_condvar_broadcast(uv_cond_t* cond);
44 static void uv_cond_condvar_wait(uv_cond_t* cond, uv_mutex_t* mutex);
45 static int uv_cond_condvar_timedwait(uv_cond_t* cond,
46 uv_mutex_t* mutex, uint64_t timeout);
47
48
uv__once_inner(uv_once_t * guard,void (* callback)(void))49 static void uv__once_inner(uv_once_t* guard, void (*callback)(void)) {
50 DWORD result;
51 HANDLE existing_event, created_event;
52
53 created_event = CreateEvent(NULL, 1, 0, NULL);
54 if (created_event == 0) {
55 /* Could fail in a low-memory situation? */
56 uv_fatal_error(GetLastError(), "CreateEvent");
57 }
58
59 existing_event = InterlockedCompareExchangePointer(&guard->event,
60 created_event,
61 NULL);
62
63 if (existing_event == NULL) {
64 /* We won the race */
65 callback();
66
67 result = SetEvent(created_event);
68 assert(result);
69 guard->ran = 1;
70
71 } else {
72 /* We lost the race. Destroy the event we created and wait for the */
73 /* existing one to become signaled. */
74 CloseHandle(created_event);
75 result = WaitForSingleObject(existing_event, INFINITE);
76 assert(result == WAIT_OBJECT_0);
77 }
78 }
79
80
uv_once(uv_once_t * guard,void (* callback)(void))81 void uv_once(uv_once_t* guard, void (*callback)(void)) {
82 /* Fast case - avoid WaitForSingleObject. */
83 if (guard->ran) {
84 return;
85 }
86
87 uv__once_inner(guard, callback);
88 }
89
90
91 /* Verify that uv_thread_t can be stored in a TLS slot. */
92 STATIC_ASSERT(sizeof(uv_thread_t) <= sizeof(void*));
93
94 static uv_key_t uv__current_thread_key;
95 static uv_once_t uv__current_thread_init_guard = UV_ONCE_INIT;
96
97
uv__init_current_thread_key(void)98 static void uv__init_current_thread_key(void) {
99 if (uv_key_create(&uv__current_thread_key))
100 abort();
101 }
102
103
104 struct thread_ctx {
105 void (*entry)(void* arg);
106 void* arg;
107 uv_thread_t self;
108 };
109
110
uv__thread_start(void * arg)111 static UINT __stdcall uv__thread_start(void* arg) {
112 struct thread_ctx *ctx_p;
113 struct thread_ctx ctx;
114
115 ctx_p = arg;
116 ctx = *ctx_p;
117 uv__free(ctx_p);
118
119 uv_once(&uv__current_thread_init_guard, uv__init_current_thread_key);
120 uv_key_set(&uv__current_thread_key, (void*) ctx.self);
121
122 ctx.entry(ctx.arg);
123
124 return 0;
125 }
126
127
uv_thread_create(uv_thread_t * tid,void (* entry)(void * arg),void * arg)128 int uv_thread_create(uv_thread_t *tid, void (*entry)(void *arg), void *arg) {
129 struct thread_ctx* ctx;
130 int err;
131 HANDLE thread;
132
133 ctx = uv__malloc(sizeof(*ctx));
134 if (ctx == NULL)
135 return UV_ENOMEM;
136
137 ctx->entry = entry;
138 ctx->arg = arg;
139
140 /* Create the thread in suspended state so we have a chance to pass
141 * its own creation handle to it */
142 thread = (HANDLE) _beginthreadex(NULL,
143 0,
144 uv__thread_start,
145 ctx,
146 CREATE_SUSPENDED,
147 NULL);
148 if (thread == NULL) {
149 err = errno;
150 uv__free(ctx);
151 } else {
152 err = 0;
153 *tid = thread;
154 ctx->self = thread;
155 ResumeThread(thread);
156 }
157
158 switch (err) {
159 case 0:
160 return 0;
161 case EACCES:
162 return UV_EACCES;
163 case EAGAIN:
164 return UV_EAGAIN;
165 case EINVAL:
166 return UV_EINVAL;
167 }
168
169 return UV_EIO;
170 }
171
172
uv_thread_self(void)173 uv_thread_t uv_thread_self(void) {
174 uv_once(&uv__current_thread_init_guard, uv__init_current_thread_key);
175 return (uv_thread_t) uv_key_get(&uv__current_thread_key);
176 }
177
178
uv_thread_join(uv_thread_t * tid)179 int uv_thread_join(uv_thread_t *tid) {
180 if (WaitForSingleObject(*tid, INFINITE))
181 return uv_translate_sys_error(GetLastError());
182 else {
183 CloseHandle(*tid);
184 *tid = 0;
185 MemoryBarrier(); /* For feature parity with pthread_join(). */
186 return 0;
187 }
188 }
189
190
uv_thread_equal(const uv_thread_t * t1,const uv_thread_t * t2)191 int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2) {
192 return *t1 == *t2;
193 }
194
195
uv_mutex_init(uv_mutex_t * mutex)196 int uv_mutex_init(uv_mutex_t* mutex) {
197 InitializeCriticalSection(mutex);
198 return 0;
199 }
200
201
uv_mutex_init_recursive(uv_mutex_t * mutex)202 int uv_mutex_init_recursive(uv_mutex_t* mutex) {
203 return uv_mutex_init(mutex);
204 }
205
206
uv_mutex_destroy(uv_mutex_t * mutex)207 void uv_mutex_destroy(uv_mutex_t* mutex) {
208 DeleteCriticalSection(mutex);
209 }
210
211
uv_mutex_lock(uv_mutex_t * mutex)212 void uv_mutex_lock(uv_mutex_t* mutex) {
213 EnterCriticalSection(mutex);
214 }
215
216
uv_mutex_trylock(uv_mutex_t * mutex)217 int uv_mutex_trylock(uv_mutex_t* mutex) {
218 if (TryEnterCriticalSection(mutex))
219 return 0;
220 else
221 return UV_EBUSY;
222 }
223
224
uv_mutex_unlock(uv_mutex_t * mutex)225 void uv_mutex_unlock(uv_mutex_t* mutex) {
226 LeaveCriticalSection(mutex);
227 }
228
229
uv_rwlock_init(uv_rwlock_t * rwlock)230 int uv_rwlock_init(uv_rwlock_t* rwlock) {
231 /* Initialize the semaphore that acts as the write lock. */
232 HANDLE handle = CreateSemaphoreW(NULL, 1, 1, NULL);
233 if (handle == NULL)
234 return uv_translate_sys_error(GetLastError());
235 rwlock->state_.write_semaphore_ = handle;
236
237 /* Initialize the critical section protecting the reader count. */
238 InitializeCriticalSection(&rwlock->state_.num_readers_lock_);
239
240 /* Initialize the reader count. */
241 rwlock->state_.num_readers_ = 0;
242
243 return 0;
244 }
245
246
uv_rwlock_destroy(uv_rwlock_t * rwlock)247 void uv_rwlock_destroy(uv_rwlock_t* rwlock) {
248 DeleteCriticalSection(&rwlock->state_.num_readers_lock_);
249 CloseHandle(rwlock->state_.write_semaphore_);
250 }
251
252
uv_rwlock_rdlock(uv_rwlock_t * rwlock)253 void uv_rwlock_rdlock(uv_rwlock_t* rwlock) {
254 /* Acquire the lock that protects the reader count. */
255 EnterCriticalSection(&rwlock->state_.num_readers_lock_);
256
257 /* Increase the reader count, and lock for write if this is the first
258 * reader.
259 */
260 if (++rwlock->state_.num_readers_ == 1) {
261 DWORD r = WaitForSingleObject(rwlock->state_.write_semaphore_, INFINITE);
262 if (r != WAIT_OBJECT_0)
263 uv_fatal_error(GetLastError(), "WaitForSingleObject");
264 }
265
266 /* Release the lock that protects the reader count. */
267 LeaveCriticalSection(&rwlock->state_.num_readers_lock_);
268 }
269
270
uv_rwlock_tryrdlock(uv_rwlock_t * rwlock)271 int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock) {
272 int err;
273
274 if (!TryEnterCriticalSection(&rwlock->state_.num_readers_lock_))
275 return UV_EBUSY;
276
277 err = 0;
278
279 if (rwlock->state_.num_readers_ == 0) {
280 /* Currently there are no other readers, which means that the write lock
281 * needs to be acquired.
282 */
283 DWORD r = WaitForSingleObject(rwlock->state_.write_semaphore_, 0);
284 if (r == WAIT_OBJECT_0)
285 rwlock->state_.num_readers_++;
286 else if (r == WAIT_TIMEOUT)
287 err = UV_EBUSY;
288 else if (r == WAIT_FAILED)
289 uv_fatal_error(GetLastError(), "WaitForSingleObject");
290
291 } else {
292 /* The write lock has already been acquired because there are other
293 * active readers.
294 */
295 rwlock->state_.num_readers_++;
296 }
297
298 LeaveCriticalSection(&rwlock->state_.num_readers_lock_);
299 return err;
300 }
301
302
uv_rwlock_rdunlock(uv_rwlock_t * rwlock)303 void uv_rwlock_rdunlock(uv_rwlock_t* rwlock) {
304 EnterCriticalSection(&rwlock->state_.num_readers_lock_);
305
306 if (--rwlock->state_.num_readers_ == 0) {
307 if (!ReleaseSemaphore(rwlock->state_.write_semaphore_, 1, NULL))
308 uv_fatal_error(GetLastError(), "ReleaseSemaphore");
309 }
310
311 LeaveCriticalSection(&rwlock->state_.num_readers_lock_);
312 }
313
314
uv_rwlock_wrlock(uv_rwlock_t * rwlock)315 void uv_rwlock_wrlock(uv_rwlock_t* rwlock) {
316 DWORD r = WaitForSingleObject(rwlock->state_.write_semaphore_, INFINITE);
317 if (r != WAIT_OBJECT_0)
318 uv_fatal_error(GetLastError(), "WaitForSingleObject");
319 }
320
321
uv_rwlock_trywrlock(uv_rwlock_t * rwlock)322 int uv_rwlock_trywrlock(uv_rwlock_t* rwlock) {
323 DWORD r = WaitForSingleObject(rwlock->state_.write_semaphore_, 0);
324 if (r == WAIT_OBJECT_0)
325 return 0;
326 else if (r == WAIT_TIMEOUT)
327 return UV_EBUSY;
328 else
329 uv_fatal_error(GetLastError(), "WaitForSingleObject");
330 }
331
332
uv_rwlock_wrunlock(uv_rwlock_t * rwlock)333 void uv_rwlock_wrunlock(uv_rwlock_t* rwlock) {
334 if (!ReleaseSemaphore(rwlock->state_.write_semaphore_, 1, NULL))
335 uv_fatal_error(GetLastError(), "ReleaseSemaphore");
336 }
337
338
uv_sem_init(uv_sem_t * sem,unsigned int value)339 int uv_sem_init(uv_sem_t* sem, unsigned int value) {
340 *sem = CreateSemaphore(NULL, value, INT_MAX, NULL);
341 if (*sem == NULL)
342 return uv_translate_sys_error(GetLastError());
343 else
344 return 0;
345 }
346
347
uv_sem_destroy(uv_sem_t * sem)348 void uv_sem_destroy(uv_sem_t* sem) {
349 if (!CloseHandle(*sem))
350 abort();
351 }
352
353
uv_sem_post(uv_sem_t * sem)354 void uv_sem_post(uv_sem_t* sem) {
355 if (!ReleaseSemaphore(*sem, 1, NULL))
356 abort();
357 }
358
359
uv_sem_wait(uv_sem_t * sem)360 void uv_sem_wait(uv_sem_t* sem) {
361 if (WaitForSingleObject(*sem, INFINITE) != WAIT_OBJECT_0)
362 abort();
363 }
364
365
uv_sem_trywait(uv_sem_t * sem)366 int uv_sem_trywait(uv_sem_t* sem) {
367 DWORD r = WaitForSingleObject(*sem, 0);
368
369 if (r == WAIT_OBJECT_0)
370 return 0;
371
372 if (r == WAIT_TIMEOUT)
373 return UV_EAGAIN;
374
375 abort();
376 return -1; /* Satisfy the compiler. */
377 }
378
379
380 /* This condition variable implementation is based on the SetEvent solution
381 * (section 3.2) at http://www.cs.wustl.edu/~schmidt/win32-cv-1.html
382 * We could not use the SignalObjectAndWait solution (section 3.4) because
383 * it want the 2nd argument (type uv_mutex_t) of uv_cond_wait() and
384 * uv_cond_timedwait() to be HANDLEs, but we use CRITICAL_SECTIONs.
385 */
386
uv_cond_fallback_init(uv_cond_t * cond)387 static int uv_cond_fallback_init(uv_cond_t* cond) {
388 int err;
389
390 /* Initialize the count to 0. */
391 cond->fallback.waiters_count = 0;
392
393 InitializeCriticalSection(&cond->fallback.waiters_count_lock);
394
395 /* Create an auto-reset event. */
396 cond->fallback.signal_event = CreateEvent(NULL, /* no security */
397 FALSE, /* auto-reset event */
398 FALSE, /* non-signaled initially */
399 NULL); /* unnamed */
400 if (!cond->fallback.signal_event) {
401 err = GetLastError();
402 goto error2;
403 }
404
405 /* Create a manual-reset event. */
406 cond->fallback.broadcast_event = CreateEvent(NULL, /* no security */
407 TRUE, /* manual-reset */
408 FALSE, /* non-signaled */
409 NULL); /* unnamed */
410 if (!cond->fallback.broadcast_event) {
411 err = GetLastError();
412 goto error;
413 }
414
415 return 0;
416
417 error:
418 CloseHandle(cond->fallback.signal_event);
419 error2:
420 DeleteCriticalSection(&cond->fallback.waiters_count_lock);
421 return uv_translate_sys_error(err);
422 }
423
424
uv_cond_condvar_init(uv_cond_t * cond)425 static int uv_cond_condvar_init(uv_cond_t* cond) {
426 pInitializeConditionVariable(&cond->cond_var);
427 return 0;
428 }
429
430
uv_cond_init(uv_cond_t * cond)431 int uv_cond_init(uv_cond_t* cond) {
432 uv__once_init();
433
434 if (HAVE_CONDVAR_API())
435 return uv_cond_condvar_init(cond);
436 else
437 return uv_cond_fallback_init(cond);
438 }
439
440
uv_cond_fallback_destroy(uv_cond_t * cond)441 static void uv_cond_fallback_destroy(uv_cond_t* cond) {
442 if (!CloseHandle(cond->fallback.broadcast_event))
443 abort();
444 if (!CloseHandle(cond->fallback.signal_event))
445 abort();
446 DeleteCriticalSection(&cond->fallback.waiters_count_lock);
447 }
448
449
uv_cond_condvar_destroy(uv_cond_t * cond)450 static void uv_cond_condvar_destroy(uv_cond_t* cond) {
451 /* nothing to do */
452 }
453
454
uv_cond_destroy(uv_cond_t * cond)455 void uv_cond_destroy(uv_cond_t* cond) {
456 if (HAVE_CONDVAR_API())
457 uv_cond_condvar_destroy(cond);
458 else
459 uv_cond_fallback_destroy(cond);
460 }
461
462
uv_cond_fallback_signal(uv_cond_t * cond)463 static void uv_cond_fallback_signal(uv_cond_t* cond) {
464 int have_waiters;
465
466 /* Avoid race conditions. */
467 EnterCriticalSection(&cond->fallback.waiters_count_lock);
468 have_waiters = cond->fallback.waiters_count > 0;
469 LeaveCriticalSection(&cond->fallback.waiters_count_lock);
470
471 if (have_waiters)
472 SetEvent(cond->fallback.signal_event);
473 }
474
475
uv_cond_condvar_signal(uv_cond_t * cond)476 static void uv_cond_condvar_signal(uv_cond_t* cond) {
477 pWakeConditionVariable(&cond->cond_var);
478 }
479
480
uv_cond_signal(uv_cond_t * cond)481 void uv_cond_signal(uv_cond_t* cond) {
482 if (HAVE_CONDVAR_API())
483 uv_cond_condvar_signal(cond);
484 else
485 uv_cond_fallback_signal(cond);
486 }
487
488
uv_cond_fallback_broadcast(uv_cond_t * cond)489 static void uv_cond_fallback_broadcast(uv_cond_t* cond) {
490 int have_waiters;
491
492 /* Avoid race conditions. */
493 EnterCriticalSection(&cond->fallback.waiters_count_lock);
494 have_waiters = cond->fallback.waiters_count > 0;
495 LeaveCriticalSection(&cond->fallback.waiters_count_lock);
496
497 if (have_waiters)
498 SetEvent(cond->fallback.broadcast_event);
499 }
500
501
uv_cond_condvar_broadcast(uv_cond_t * cond)502 static void uv_cond_condvar_broadcast(uv_cond_t* cond) {
503 pWakeAllConditionVariable(&cond->cond_var);
504 }
505
506
uv_cond_broadcast(uv_cond_t * cond)507 void uv_cond_broadcast(uv_cond_t* cond) {
508 if (HAVE_CONDVAR_API())
509 uv_cond_condvar_broadcast(cond);
510 else
511 uv_cond_fallback_broadcast(cond);
512 }
513
514
uv_cond_wait_helper(uv_cond_t * cond,uv_mutex_t * mutex,DWORD dwMilliseconds)515 static int uv_cond_wait_helper(uv_cond_t* cond, uv_mutex_t* mutex,
516 DWORD dwMilliseconds) {
517 DWORD result;
518 int last_waiter;
519 HANDLE handles[2] = {
520 cond->fallback.signal_event,
521 cond->fallback.broadcast_event
522 };
523
524 /* Avoid race conditions. */
525 EnterCriticalSection(&cond->fallback.waiters_count_lock);
526 cond->fallback.waiters_count++;
527 LeaveCriticalSection(&cond->fallback.waiters_count_lock);
528
529 /* It's ok to release the <mutex> here since Win32 manual-reset events */
530 /* maintain state when used with <SetEvent>. This avoids the "lost wakeup" */
531 /* bug. */
532 uv_mutex_unlock(mutex);
533
534 /* Wait for either event to become signaled due to <uv_cond_signal> being */
535 /* called or <uv_cond_broadcast> being called. */
536 result = WaitForMultipleObjects(2, handles, FALSE, dwMilliseconds);
537
538 EnterCriticalSection(&cond->fallback.waiters_count_lock);
539 cond->fallback.waiters_count--;
540 last_waiter = result == WAIT_OBJECT_0 + 1
541 && cond->fallback.waiters_count == 0;
542 LeaveCriticalSection(&cond->fallback.waiters_count_lock);
543
544 /* Some thread called <pthread_cond_broadcast>. */
545 if (last_waiter) {
546 /* We're the last waiter to be notified or to stop waiting, so reset the */
547 /* the manual-reset event. */
548 ResetEvent(cond->fallback.broadcast_event);
549 }
550
551 /* Reacquire the <mutex>. */
552 uv_mutex_lock(mutex);
553
554 if (result == WAIT_OBJECT_0 || result == WAIT_OBJECT_0 + 1)
555 return 0;
556
557 if (result == WAIT_TIMEOUT)
558 return UV_ETIMEDOUT;
559
560 abort();
561 return -1; /* Satisfy the compiler. */
562 }
563
564
uv_cond_fallback_wait(uv_cond_t * cond,uv_mutex_t * mutex)565 static void uv_cond_fallback_wait(uv_cond_t* cond, uv_mutex_t* mutex) {
566 if (uv_cond_wait_helper(cond, mutex, INFINITE))
567 abort();
568 }
569
570
uv_cond_condvar_wait(uv_cond_t * cond,uv_mutex_t * mutex)571 static void uv_cond_condvar_wait(uv_cond_t* cond, uv_mutex_t* mutex) {
572 if (!pSleepConditionVariableCS(&cond->cond_var, mutex, INFINITE))
573 abort();
574 }
575
576
uv_cond_wait(uv_cond_t * cond,uv_mutex_t * mutex)577 void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) {
578 if (HAVE_CONDVAR_API())
579 uv_cond_condvar_wait(cond, mutex);
580 else
581 uv_cond_fallback_wait(cond, mutex);
582 }
583
584
uv_cond_fallback_timedwait(uv_cond_t * cond,uv_mutex_t * mutex,uint64_t timeout)585 static int uv_cond_fallback_timedwait(uv_cond_t* cond,
586 uv_mutex_t* mutex, uint64_t timeout) {
587 return uv_cond_wait_helper(cond, mutex, (DWORD)(timeout / 1e6));
588 }
589
590
uv_cond_condvar_timedwait(uv_cond_t * cond,uv_mutex_t * mutex,uint64_t timeout)591 static int uv_cond_condvar_timedwait(uv_cond_t* cond,
592 uv_mutex_t* mutex, uint64_t timeout) {
593 if (pSleepConditionVariableCS(&cond->cond_var, mutex, (DWORD)(timeout / 1e6)))
594 return 0;
595 if (GetLastError() != ERROR_TIMEOUT)
596 abort();
597 return UV_ETIMEDOUT;
598 }
599
600
uv_cond_timedwait(uv_cond_t * cond,uv_mutex_t * mutex,uint64_t timeout)601 int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex,
602 uint64_t timeout) {
603 if (HAVE_CONDVAR_API())
604 return uv_cond_condvar_timedwait(cond, mutex, timeout);
605 else
606 return uv_cond_fallback_timedwait(cond, mutex, timeout);
607 }
608
609
uv_barrier_init(uv_barrier_t * barrier,unsigned int count)610 int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
611 int err;
612
613 barrier->n = count;
614 barrier->count = 0;
615
616 err = uv_mutex_init(&barrier->mutex);
617 if (err)
618 return err;
619
620 err = uv_sem_init(&barrier->turnstile1, 0);
621 if (err)
622 goto error2;
623
624 err = uv_sem_init(&barrier->turnstile2, 1);
625 if (err)
626 goto error;
627
628 return 0;
629
630 error:
631 uv_sem_destroy(&barrier->turnstile1);
632 error2:
633 uv_mutex_destroy(&barrier->mutex);
634 return err;
635
636 }
637
638
uv_barrier_destroy(uv_barrier_t * barrier)639 void uv_barrier_destroy(uv_barrier_t* barrier) {
640 uv_sem_destroy(&barrier->turnstile2);
641 uv_sem_destroy(&barrier->turnstile1);
642 uv_mutex_destroy(&barrier->mutex);
643 }
644
645
uv_barrier_wait(uv_barrier_t * barrier)646 int uv_barrier_wait(uv_barrier_t* barrier) {
647 int serial_thread;
648
649 uv_mutex_lock(&barrier->mutex);
650 if (++barrier->count == barrier->n) {
651 uv_sem_wait(&barrier->turnstile2);
652 uv_sem_post(&barrier->turnstile1);
653 }
654 uv_mutex_unlock(&barrier->mutex);
655
656 uv_sem_wait(&barrier->turnstile1);
657 uv_sem_post(&barrier->turnstile1);
658
659 uv_mutex_lock(&barrier->mutex);
660 serial_thread = (--barrier->count == 0);
661 if (serial_thread) {
662 uv_sem_wait(&barrier->turnstile1);
663 uv_sem_post(&barrier->turnstile2);
664 }
665 uv_mutex_unlock(&barrier->mutex);
666
667 uv_sem_wait(&barrier->turnstile2);
668 uv_sem_post(&barrier->turnstile2);
669 return serial_thread;
670 }
671
672
uv_key_create(uv_key_t * key)673 int uv_key_create(uv_key_t* key) {
674 key->tls_index = TlsAlloc();
675 if (key->tls_index == TLS_OUT_OF_INDEXES)
676 return UV_ENOMEM;
677 return 0;
678 }
679
680
uv_key_delete(uv_key_t * key)681 void uv_key_delete(uv_key_t* key) {
682 if (TlsFree(key->tls_index) == FALSE)
683 abort();
684 key->tls_index = TLS_OUT_OF_INDEXES;
685 }
686
687
uv_key_get(uv_key_t * key)688 void* uv_key_get(uv_key_t* key) {
689 void* value;
690
691 value = TlsGetValue(key->tls_index);
692 if (value == NULL)
693 if (GetLastError() != ERROR_SUCCESS)
694 abort();
695
696 return value;
697 }
698
699
uv_key_set(uv_key_t * key,void * value)700 void uv_key_set(uv_key_t* key, void* value) {
701 if (TlsSetValue(key->tls_index, value) == FALSE)
702 abort();
703 }
704