1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2  *
3  * Permission is hereby granted, free of charge, to any person obtaining a copy
4  * of this software and associated documentation files (the "Software"), to
5  * deal in the Software without restriction, including without limitation the
6  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7  * sell copies of the Software, and to permit persons to whom the Software is
8  * furnished to do so, subject to the following conditions:
9  *
10  * The above copyright notice and this permission notice shall be included in
11  * all copies or substantial portions of the Software.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19  * IN THE SOFTWARE.
20  */
21 
22 #include <assert.h>
23 #include <limits.h>
24 #include <stdlib.h>
25 
26 #if defined(__MINGW64_VERSION_MAJOR)
27 /* MemoryBarrier expands to __mm_mfence in some cases (x86+sse2), which may
28  * require this header in some versions of mingw64. */
29 #include <intrin.h>
30 #endif
31 
32 #include "uv.h"
33 #include "internal.h"
34 
uv__once_inner(uv_once_t * guard,void (* callback)(void))35 static void uv__once_inner(uv_once_t* guard, void (*callback)(void)) {
36   DWORD result;
37   HANDLE existing_event, created_event;
38 
39   created_event = CreateEvent(NULL, 1, 0, NULL);
40   if (created_event == 0) {
41     /* Could fail in a low-memory situation? */
42     uv_fatal_error(GetLastError(), "CreateEvent");
43   }
44 
45   existing_event = InterlockedCompareExchangePointer(&guard->event,
46                                                      created_event,
47                                                      NULL);
48 
49   if (existing_event == NULL) {
50     /* We won the race */
51     callback();
52 
53     result = SetEvent(created_event);
54     assert(result);
55     guard->ran = 1;
56 
57   } else {
58     /* We lost the race. Destroy the event we created and wait for the existing
59      * one to become signaled. */
60     CloseHandle(created_event);
61     result = WaitForSingleObject(existing_event, INFINITE);
62     assert(result == WAIT_OBJECT_0);
63   }
64 }
65 
66 
uv_once(uv_once_t * guard,void (* callback)(void))67 void uv_once(uv_once_t* guard, void (*callback)(void)) {
68   /* Fast case - avoid WaitForSingleObject. */
69   if (guard->ran) {
70     return;
71   }
72 
73   uv__once_inner(guard, callback);
74 }
75 
76 
77 /* Verify that uv_thread_t can be stored in a TLS slot. */
78 STATIC_ASSERT(sizeof(uv_thread_t) <= sizeof(void*));
79 
80 static uv_key_t uv__current_thread_key;
81 static uv_once_t uv__current_thread_init_guard = UV_ONCE_INIT;
82 
83 
uv__init_current_thread_key(void)84 static void uv__init_current_thread_key(void) {
85   if (uv_key_create(&uv__current_thread_key))
86     abort();
87 }
88 
89 
90 struct thread_ctx {
91   void (*entry)(void* arg);
92   void* arg;
93   uv_thread_t self;
94 };
95 
96 
uv__thread_start(void * arg)97 static UINT __stdcall uv__thread_start(void* arg) {
98   struct thread_ctx *ctx_p;
99   struct thread_ctx ctx;
100 
101   ctx_p = arg;
102   ctx = *ctx_p;
103   uv__free(ctx_p);
104 
105   uv_once(&uv__current_thread_init_guard, uv__init_current_thread_key);
106   uv_key_set(&uv__current_thread_key, (void*) ctx.self);
107 
108   ctx.entry(ctx.arg);
109 
110   return 0;
111 }
112 
113 
uv_thread_create(uv_thread_t * tid,void (* entry)(void * arg),void * arg)114 int uv_thread_create(uv_thread_t *tid, void (*entry)(void *arg), void *arg) {
115   uv_thread_options_t params;
116   params.flags = UV_THREAD_NO_FLAGS;
117   return uv_thread_create_ex(tid, &params, entry, arg);
118 }
119 
uv_thread_create_ex(uv_thread_t * tid,const uv_thread_options_t * params,void (* entry)(void * arg),void * arg)120 int uv_thread_create_ex(uv_thread_t* tid,
121                         const uv_thread_options_t* params,
122                         void (*entry)(void *arg),
123                         void *arg) {
124   struct thread_ctx* ctx;
125   int err;
126   HANDLE thread;
127   SYSTEM_INFO sysinfo;
128   size_t stack_size;
129   size_t pagesize;
130 
131   stack_size =
132       params->flags & UV_THREAD_HAS_STACK_SIZE ? params->stack_size : 0;
133 
134   if (stack_size != 0) {
135     GetNativeSystemInfo(&sysinfo);
136     pagesize = (size_t)sysinfo.dwPageSize;
137     /* Round up to the nearest page boundary. */
138     stack_size = (stack_size + pagesize - 1) &~ (pagesize - 1);
139 
140     if ((unsigned)stack_size != stack_size)
141       return UV_EINVAL;
142   }
143 
144   ctx = uv__malloc(sizeof(*ctx));
145   if (ctx == NULL)
146     return UV_ENOMEM;
147 
148   ctx->entry = entry;
149   ctx->arg = arg;
150 
151   /* Create the thread in suspended state so we have a chance to pass
152    * its own creation handle to it */
153   thread = (HANDLE) _beginthreadex(NULL,
154                                    (unsigned)stack_size,
155                                    uv__thread_start,
156                                    ctx,
157                                    CREATE_SUSPENDED,
158                                    NULL);
159   if (thread == NULL) {
160     err = errno;
161     uv__free(ctx);
162   } else {
163     err = 0;
164     *tid = thread;
165     ctx->self = thread;
166     ResumeThread(thread);
167   }
168 
169   switch (err) {
170     case 0:
171       return 0;
172     case EACCES:
173       return UV_EACCES;
174     case EAGAIN:
175       return UV_EAGAIN;
176     case EINVAL:
177       return UV_EINVAL;
178   }
179 
180   return UV_EIO;
181 }
182 
183 
uv_thread_self(void)184 uv_thread_t uv_thread_self(void) {
185   uv_once(&uv__current_thread_init_guard, uv__init_current_thread_key);
186   return (uv_thread_t) uv_key_get(&uv__current_thread_key);
187 }
188 
189 
uv_thread_join(uv_thread_t * tid)190 int uv_thread_join(uv_thread_t *tid) {
191   if (WaitForSingleObject(*tid, INFINITE))
192     return uv_translate_sys_error(GetLastError());
193   else {
194     CloseHandle(*tid);
195     *tid = 0;
196     MemoryBarrier();  /* For feature parity with pthread_join(). */
197     return 0;
198   }
199 }
200 
201 
uv_thread_equal(const uv_thread_t * t1,const uv_thread_t * t2)202 int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2) {
203   return *t1 == *t2;
204 }
205 
206 
uv_mutex_init(uv_mutex_t * mutex)207 int uv_mutex_init(uv_mutex_t* mutex) {
208   InitializeCriticalSection(mutex);
209   return 0;
210 }
211 
212 
uv_mutex_init_recursive(uv_mutex_t * mutex)213 int uv_mutex_init_recursive(uv_mutex_t* mutex) {
214   return uv_mutex_init(mutex);
215 }
216 
217 
uv_mutex_destroy(uv_mutex_t * mutex)218 void uv_mutex_destroy(uv_mutex_t* mutex) {
219   DeleteCriticalSection(mutex);
220 }
221 
222 
uv_mutex_lock(uv_mutex_t * mutex)223 void uv_mutex_lock(uv_mutex_t* mutex) {
224   EnterCriticalSection(mutex);
225 }
226 
227 
uv_mutex_trylock(uv_mutex_t * mutex)228 int uv_mutex_trylock(uv_mutex_t* mutex) {
229   if (TryEnterCriticalSection(mutex))
230     return 0;
231   else
232     return UV_EBUSY;
233 }
234 
235 
uv_mutex_unlock(uv_mutex_t * mutex)236 void uv_mutex_unlock(uv_mutex_t* mutex) {
237   LeaveCriticalSection(mutex);
238 }
239 
240 
uv_rwlock_init(uv_rwlock_t * rwlock)241 int uv_rwlock_init(uv_rwlock_t* rwlock) {
242   /* Initialize the semaphore that acts as the write lock. */
243   HANDLE handle = CreateSemaphoreW(NULL, 1, 1, NULL);
244   if (handle == NULL)
245     return uv_translate_sys_error(GetLastError());
246   rwlock->state_.write_semaphore_ = handle;
247 
248   /* Initialize the critical section protecting the reader count. */
249   InitializeCriticalSection(&rwlock->state_.num_readers_lock_);
250 
251   /* Initialize the reader count. */
252   rwlock->state_.num_readers_ = 0;
253 
254   return 0;
255 }
256 
257 
uv_rwlock_destroy(uv_rwlock_t * rwlock)258 void uv_rwlock_destroy(uv_rwlock_t* rwlock) {
259   DeleteCriticalSection(&rwlock->state_.num_readers_lock_);
260   CloseHandle(rwlock->state_.write_semaphore_);
261 }
262 
263 
uv_rwlock_rdlock(uv_rwlock_t * rwlock)264 void uv_rwlock_rdlock(uv_rwlock_t* rwlock) {
265   /* Acquire the lock that protects the reader count. */
266   EnterCriticalSection(&rwlock->state_.num_readers_lock_);
267 
268   /* Increase the reader count, and lock for write if this is the first
269    * reader.
270    */
271   if (++rwlock->state_.num_readers_ == 1) {
272     DWORD r = WaitForSingleObject(rwlock->state_.write_semaphore_, INFINITE);
273     if (r != WAIT_OBJECT_0)
274       uv_fatal_error(GetLastError(), "WaitForSingleObject");
275   }
276 
277   /* Release the lock that protects the reader count. */
278   LeaveCriticalSection(&rwlock->state_.num_readers_lock_);
279 }
280 
281 
uv_rwlock_tryrdlock(uv_rwlock_t * rwlock)282 int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock) {
283   int err;
284 
285   if (!TryEnterCriticalSection(&rwlock->state_.num_readers_lock_))
286     return UV_EBUSY;
287 
288   err = 0;
289 
290   if (rwlock->state_.num_readers_ == 0) {
291     /* Currently there are no other readers, which means that the write lock
292      * needs to be acquired.
293      */
294     DWORD r = WaitForSingleObject(rwlock->state_.write_semaphore_, 0);
295     if (r == WAIT_OBJECT_0)
296       rwlock->state_.num_readers_++;
297     else if (r == WAIT_TIMEOUT)
298       err = UV_EBUSY;
299     else if (r == WAIT_FAILED)
300       uv_fatal_error(GetLastError(), "WaitForSingleObject");
301 
302   } else {
303     /* The write lock has already been acquired because there are other
304      * active readers.
305      */
306     rwlock->state_.num_readers_++;
307   }
308 
309   LeaveCriticalSection(&rwlock->state_.num_readers_lock_);
310   return err;
311 }
312 
313 
uv_rwlock_rdunlock(uv_rwlock_t * rwlock)314 void uv_rwlock_rdunlock(uv_rwlock_t* rwlock) {
315   EnterCriticalSection(&rwlock->state_.num_readers_lock_);
316 
317   if (--rwlock->state_.num_readers_ == 0) {
318     if (!ReleaseSemaphore(rwlock->state_.write_semaphore_, 1, NULL))
319       uv_fatal_error(GetLastError(), "ReleaseSemaphore");
320   }
321 
322   LeaveCriticalSection(&rwlock->state_.num_readers_lock_);
323 }
324 
325 
uv_rwlock_wrlock(uv_rwlock_t * rwlock)326 void uv_rwlock_wrlock(uv_rwlock_t* rwlock) {
327   DWORD r = WaitForSingleObject(rwlock->state_.write_semaphore_, INFINITE);
328   if (r != WAIT_OBJECT_0)
329     uv_fatal_error(GetLastError(), "WaitForSingleObject");
330 }
331 
332 
uv_rwlock_trywrlock(uv_rwlock_t * rwlock)333 int uv_rwlock_trywrlock(uv_rwlock_t* rwlock) {
334   DWORD r = WaitForSingleObject(rwlock->state_.write_semaphore_, 0);
335   if (r == WAIT_OBJECT_0)
336     return 0;
337   else if (r == WAIT_TIMEOUT)
338     return UV_EBUSY;
339   else
340     uv_fatal_error(GetLastError(), "WaitForSingleObject");
341 }
342 
343 
uv_rwlock_wrunlock(uv_rwlock_t * rwlock)344 void uv_rwlock_wrunlock(uv_rwlock_t* rwlock) {
345   if (!ReleaseSemaphore(rwlock->state_.write_semaphore_, 1, NULL))
346     uv_fatal_error(GetLastError(), "ReleaseSemaphore");
347 }
348 
349 
uv_sem_init(uv_sem_t * sem,unsigned int value)350 int uv_sem_init(uv_sem_t* sem, unsigned int value) {
351   *sem = CreateSemaphore(NULL, value, INT_MAX, NULL);
352   if (*sem == NULL)
353     return uv_translate_sys_error(GetLastError());
354   else
355     return 0;
356 }
357 
358 
uv_sem_destroy(uv_sem_t * sem)359 void uv_sem_destroy(uv_sem_t* sem) {
360   if (!CloseHandle(*sem))
361     abort();
362 }
363 
364 
uv_sem_post(uv_sem_t * sem)365 void uv_sem_post(uv_sem_t* sem) {
366   if (!ReleaseSemaphore(*sem, 1, NULL))
367     abort();
368 }
369 
370 
uv_sem_wait(uv_sem_t * sem)371 void uv_sem_wait(uv_sem_t* sem) {
372   if (WaitForSingleObject(*sem, INFINITE) != WAIT_OBJECT_0)
373     abort();
374 }
375 
376 
uv_sem_trywait(uv_sem_t * sem)377 int uv_sem_trywait(uv_sem_t* sem) {
378   DWORD r = WaitForSingleObject(*sem, 0);
379 
380   if (r == WAIT_OBJECT_0)
381     return 0;
382 
383   if (r == WAIT_TIMEOUT)
384     return UV_EAGAIN;
385 
386   abort();
387   return -1; /* Satisfy the compiler. */
388 }
389 
390 
uv_cond_init(uv_cond_t * cond)391 int uv_cond_init(uv_cond_t* cond) {
392   InitializeConditionVariable(&cond->cond_var);
393   return 0;
394 }
395 
396 
uv_cond_destroy(uv_cond_t * cond)397 void uv_cond_destroy(uv_cond_t* cond) {
398   /* nothing to do */
399   (void) &cond;
400 }
401 
402 
uv_cond_signal(uv_cond_t * cond)403 void uv_cond_signal(uv_cond_t* cond) {
404   WakeConditionVariable(&cond->cond_var);
405 }
406 
407 
uv_cond_broadcast(uv_cond_t * cond)408 void uv_cond_broadcast(uv_cond_t* cond) {
409   WakeAllConditionVariable(&cond->cond_var);
410 }
411 
412 
uv_cond_wait(uv_cond_t * cond,uv_mutex_t * mutex)413 void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) {
414   if (!SleepConditionVariableCS(&cond->cond_var, mutex, INFINITE))
415     abort();
416 }
417 
uv_cond_timedwait(uv_cond_t * cond,uv_mutex_t * mutex,uint64_t timeout)418 int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {
419   if (SleepConditionVariableCS(&cond->cond_var, mutex, (DWORD)(timeout / 1e6)))
420     return 0;
421   if (GetLastError() != ERROR_TIMEOUT)
422     abort();
423   return UV_ETIMEDOUT;
424 }
425 
426 
uv_barrier_init(uv_barrier_t * barrier,unsigned int count)427 int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
428   int err;
429 
430   barrier->n = count;
431   barrier->count = 0;
432 
433   err = uv_mutex_init(&barrier->mutex);
434   if (err)
435     return err;
436 
437   err = uv_sem_init(&barrier->turnstile1, 0);
438   if (err)
439     goto error2;
440 
441   err = uv_sem_init(&barrier->turnstile2, 1);
442   if (err)
443     goto error;
444 
445   return 0;
446 
447 error:
448   uv_sem_destroy(&barrier->turnstile1);
449 error2:
450   uv_mutex_destroy(&barrier->mutex);
451   return err;
452 
453 }
454 
455 
uv_barrier_destroy(uv_barrier_t * barrier)456 void uv_barrier_destroy(uv_barrier_t* barrier) {
457   uv_sem_destroy(&barrier->turnstile2);
458   uv_sem_destroy(&barrier->turnstile1);
459   uv_mutex_destroy(&barrier->mutex);
460 }
461 
462 
uv_barrier_wait(uv_barrier_t * barrier)463 int uv_barrier_wait(uv_barrier_t* barrier) {
464   int serial_thread;
465 
466   uv_mutex_lock(&barrier->mutex);
467   if (++barrier->count == barrier->n) {
468     uv_sem_wait(&barrier->turnstile2);
469     uv_sem_post(&barrier->turnstile1);
470   }
471   uv_mutex_unlock(&barrier->mutex);
472 
473   uv_sem_wait(&barrier->turnstile1);
474   uv_sem_post(&barrier->turnstile1);
475 
476   uv_mutex_lock(&barrier->mutex);
477   serial_thread = (--barrier->count == 0);
478   if (serial_thread) {
479     uv_sem_wait(&barrier->turnstile1);
480     uv_sem_post(&barrier->turnstile2);
481   }
482   uv_mutex_unlock(&barrier->mutex);
483 
484   uv_sem_wait(&barrier->turnstile2);
485   uv_sem_post(&barrier->turnstile2);
486   return serial_thread;
487 }
488 
489 
uv_key_create(uv_key_t * key)490 int uv_key_create(uv_key_t* key) {
491   key->tls_index = TlsAlloc();
492   if (key->tls_index == TLS_OUT_OF_INDEXES)
493     return UV_ENOMEM;
494   return 0;
495 }
496 
497 
uv_key_delete(uv_key_t * key)498 void uv_key_delete(uv_key_t* key) {
499   if (TlsFree(key->tls_index) == FALSE)
500     abort();
501   key->tls_index = TLS_OUT_OF_INDEXES;
502 }
503 
504 
uv_key_get(uv_key_t * key)505 void* uv_key_get(uv_key_t* key) {
506   void* value;
507 
508   value = TlsGetValue(key->tls_index);
509   if (value == NULL)
510     if (GetLastError() != ERROR_SUCCESS)
511       abort();
512 
513   return value;
514 }
515 
516 
uv_key_set(uv_key_t * key,void * value)517 void uv_key_set(uv_key_t* key, void* value) {
518   if (TlsSetValue(key->tls_index, value) == FALSE)
519     abort();
520 }
521