xref: /netbsd/external/mit/libuv/dist/src/uv-common.c (revision b29f2fbf)
1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2  *
3  * Permission is hereby granted, free of charge, to any person obtaining a copy
4  * of this software and associated documentation files (the "Software"), to
5  * deal in the Software without restriction, including without limitation the
6  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7  * sell copies of the Software, and to permit persons to whom the Software is
8  * furnished to do so, subject to the following conditions:
9  *
10  * The above copyright notice and this permission notice shall be included in
11  * all copies or substantial portions of the Software.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19  * IN THE SOFTWARE.
20  */
21 
22 #include "uv.h"
23 #include "uv-common.h"
24 
25 #include <assert.h>
26 #include <errno.h>
27 #include <stdarg.h>
28 #include <stddef.h> /* NULL */
29 #include <stdio.h>
30 #include <stdlib.h> /* malloc */
31 #include <string.h> /* memset */
32 
33 #if defined(_WIN32)
34 # include <malloc.h> /* malloc */
35 #else
36 # include <net/if.h> /* if_nametoindex */
37 # include <sys/un.h> /* AF_UNIX, sockaddr_un */
38 #endif
39 
40 
41 typedef struct {
42   uv_malloc_func local_malloc;
43   uv_realloc_func local_realloc;
44   uv_calloc_func local_calloc;
45   uv_free_func local_free;
46 } uv__allocator_t;
47 
48 static uv__allocator_t uv__allocator = {
49   malloc,
50   realloc,
51   calloc,
52   free,
53 };
54 
uv__strdup(const char * s)55 char* uv__strdup(const char* s) {
56   size_t len = strlen(s) + 1;
57   char* m = uv__malloc(len);
58   if (m == NULL)
59     return NULL;
60   return memcpy(m, s, len);
61 }
62 
uv__strndup(const char * s,size_t n)63 char* uv__strndup(const char* s, size_t n) {
64   char* m;
65   size_t len = strlen(s);
66   if (n < len)
67     len = n;
68   m = uv__malloc(len + 1);
69   if (m == NULL)
70     return NULL;
71   m[len] = '\0';
72   return memcpy(m, s, len);
73 }
74 
uv__malloc(size_t size)75 void* uv__malloc(size_t size) {
76   if (size > 0)
77     return uv__allocator.local_malloc(size);
78   return NULL;
79 }
80 
uv__free(void * ptr)81 void uv__free(void* ptr) {
82   int saved_errno;
83 
84   /* Libuv expects that free() does not clobber errno.  The system allocator
85    * honors that assumption but custom allocators may not be so careful.
86    */
87   saved_errno = errno;
88   uv__allocator.local_free(ptr);
89   errno = saved_errno;
90 }
91 
uv__calloc(size_t count,size_t size)92 void* uv__calloc(size_t count, size_t size) {
93   return uv__allocator.local_calloc(count, size);
94 }
95 
uv__realloc(void * ptr,size_t size)96 void* uv__realloc(void* ptr, size_t size) {
97   if (size > 0)
98     return uv__allocator.local_realloc(ptr, size);
99   uv__free(ptr);
100   return NULL;
101 }
102 
uv__reallocf(void * ptr,size_t size)103 void* uv__reallocf(void* ptr, size_t size) {
104   void* newptr;
105 
106   newptr = uv__realloc(ptr, size);
107   if (newptr == NULL)
108     if (size > 0)
109       uv__free(ptr);
110 
111   return newptr;
112 }
113 
uv_replace_allocator(uv_malloc_func malloc_func,uv_realloc_func realloc_func,uv_calloc_func calloc_func,uv_free_func free_func)114 int uv_replace_allocator(uv_malloc_func malloc_func,
115                          uv_realloc_func realloc_func,
116                          uv_calloc_func calloc_func,
117                          uv_free_func free_func) {
118   if (malloc_func == NULL || realloc_func == NULL ||
119       calloc_func == NULL || free_func == NULL) {
120     return UV_EINVAL;
121   }
122 
123   uv__allocator.local_malloc = malloc_func;
124   uv__allocator.local_realloc = realloc_func;
125   uv__allocator.local_calloc = calloc_func;
126   uv__allocator.local_free = free_func;
127 
128   return 0;
129 }
130 
131 #define XX(uc, lc) case UV_##uc: return sizeof(uv_##lc##_t);
132 
uv_handle_size(uv_handle_type type)133 size_t uv_handle_size(uv_handle_type type) {
134   switch (type) {
135     UV_HANDLE_TYPE_MAP(XX)
136     default:
137       return -1;
138   }
139 }
140 
uv_req_size(uv_req_type type)141 size_t uv_req_size(uv_req_type type) {
142   switch(type) {
143     UV_REQ_TYPE_MAP(XX)
144     default:
145       return -1;
146   }
147 }
148 
149 #undef XX
150 
151 
uv_loop_size(void)152 size_t uv_loop_size(void) {
153   return sizeof(uv_loop_t);
154 }
155 
156 
uv_buf_init(char * base,unsigned int len)157 uv_buf_t uv_buf_init(char* base, unsigned int len) {
158   uv_buf_t buf;
159   buf.base = base;
160   buf.len = len;
161   return buf;
162 }
163 
164 
uv__unknown_err_code(int err)165 static const char* uv__unknown_err_code(int err) {
166   char buf[32];
167   char* copy;
168 
169   snprintf(buf, sizeof(buf), "Unknown system error %d", err);
170   copy = uv__strdup(buf);
171 
172   return copy != NULL ? copy : "Unknown system error";
173 }
174 
175 #define UV_ERR_NAME_GEN_R(name, _) \
176 case UV_## name: \
177   uv__strscpy(buf, #name, buflen); break;
uv_err_name_r(int err,char * buf,size_t buflen)178 char* uv_err_name_r(int err, char* buf, size_t buflen) {
179   switch (err) {
180     UV_ERRNO_MAP(UV_ERR_NAME_GEN_R)
181     default: snprintf(buf, buflen, "Unknown system error %d", err);
182   }
183   return buf;
184 }
185 #undef UV_ERR_NAME_GEN_R
186 
187 
188 #define UV_ERR_NAME_GEN(name, _) case UV_ ## name: return #name;
uv_err_name(int err)189 const char* uv_err_name(int err) {
190   switch (err) {
191     UV_ERRNO_MAP(UV_ERR_NAME_GEN)
192   }
193   return uv__unknown_err_code(err);
194 }
195 #undef UV_ERR_NAME_GEN
196 
197 
198 #define UV_STRERROR_GEN_R(name, msg) \
199 case UV_ ## name: \
200   snprintf(buf, buflen, "%s", msg); break;
uv_strerror_r(int err,char * buf,size_t buflen)201 char* uv_strerror_r(int err, char* buf, size_t buflen) {
202   switch (err) {
203     UV_ERRNO_MAP(UV_STRERROR_GEN_R)
204     default: snprintf(buf, buflen, "Unknown system error %d", err);
205   }
206   return buf;
207 }
208 #undef UV_STRERROR_GEN_R
209 
210 
211 #define UV_STRERROR_GEN(name, msg) case UV_ ## name: return msg;
uv_strerror(int err)212 const char* uv_strerror(int err) {
213   switch (err) {
214     UV_ERRNO_MAP(UV_STRERROR_GEN)
215   }
216   return uv__unknown_err_code(err);
217 }
218 #undef UV_STRERROR_GEN
219 
220 
uv_ip4_addr(const char * ip,int port,struct sockaddr_in * addr)221 int uv_ip4_addr(const char* ip, int port, struct sockaddr_in* addr) {
222   memset(addr, 0, sizeof(*addr));
223   addr->sin_family = AF_INET;
224   addr->sin_port = htons(port);
225 #ifdef SIN6_LEN
226   addr->sin_len = sizeof(*addr);
227 #endif
228   return uv_inet_pton(AF_INET, ip, &(addr->sin_addr.s_addr));
229 }
230 
231 
uv_ip6_addr(const char * ip,int port,struct sockaddr_in6 * addr)232 int uv_ip6_addr(const char* ip, int port, struct sockaddr_in6* addr) {
233   char address_part[40];
234   size_t address_part_size;
235   const char* zone_index;
236 
237   memset(addr, 0, sizeof(*addr));
238   addr->sin6_family = AF_INET6;
239   addr->sin6_port = htons(port);
240 #ifdef SIN6_LEN
241   addr->sin6_len = sizeof(*addr);
242 #endif
243 
244   zone_index = strchr(ip, '%');
245   if (zone_index != NULL) {
246     address_part_size = zone_index - ip;
247     if (address_part_size >= sizeof(address_part))
248       address_part_size = sizeof(address_part) - 1;
249 
250     memcpy(address_part, ip, address_part_size);
251     address_part[address_part_size] = '\0';
252     ip = address_part;
253 
254     zone_index++; /* skip '%' */
255     /* NOTE: unknown interface (id=0) is silently ignored */
256 #ifdef _WIN32
257     addr->sin6_scope_id = atoi(zone_index);
258 #else
259     addr->sin6_scope_id = if_nametoindex(zone_index);
260 #endif
261   }
262 
263   return uv_inet_pton(AF_INET6, ip, &addr->sin6_addr);
264 }
265 
266 
uv_ip4_name(const struct sockaddr_in * src,char * dst,size_t size)267 int uv_ip4_name(const struct sockaddr_in* src, char* dst, size_t size) {
268   return uv_inet_ntop(AF_INET, &src->sin_addr, dst, size);
269 }
270 
271 
uv_ip6_name(const struct sockaddr_in6 * src,char * dst,size_t size)272 int uv_ip6_name(const struct sockaddr_in6* src, char* dst, size_t size) {
273   return uv_inet_ntop(AF_INET6, &src->sin6_addr, dst, size);
274 }
275 
276 
uv_ip_name(const struct sockaddr * src,char * dst,size_t size)277 int uv_ip_name(const struct sockaddr *src, char *dst, size_t size) {
278   switch (src->sa_family) {
279   case AF_INET:
280     return uv_inet_ntop(AF_INET, &((struct sockaddr_in *)src)->sin_addr,
281                         dst, size);
282   case AF_INET6:
283     return uv_inet_ntop(AF_INET6, &((struct sockaddr_in6 *)src)->sin6_addr,
284                         dst, size);
285   default:
286     return UV_EAFNOSUPPORT;
287   }
288 }
289 
290 
uv_tcp_bind(uv_tcp_t * handle,const struct sockaddr * addr,unsigned int flags)291 int uv_tcp_bind(uv_tcp_t* handle,
292                 const struct sockaddr* addr,
293                 unsigned int flags) {
294   unsigned int addrlen;
295 
296   if (handle->type != UV_TCP)
297     return UV_EINVAL;
298   if (uv__is_closing(handle)) {
299     return UV_EINVAL;
300   }
301   if (addr->sa_family == AF_INET)
302     addrlen = sizeof(struct sockaddr_in);
303   else if (addr->sa_family == AF_INET6)
304     addrlen = sizeof(struct sockaddr_in6);
305   else
306     return UV_EINVAL;
307 
308   return uv__tcp_bind(handle, addr, addrlen, flags);
309 }
310 
311 
uv_udp_init_ex(uv_loop_t * loop,uv_udp_t * handle,unsigned flags)312 int uv_udp_init_ex(uv_loop_t* loop, uv_udp_t* handle, unsigned flags) {
313   unsigned extra_flags;
314   int domain;
315   int rc;
316 
317   /* Use the lower 8 bits for the domain. */
318   domain = flags & 0xFF;
319   if (domain != AF_INET && domain != AF_INET6 && domain != AF_UNSPEC)
320     return UV_EINVAL;
321 
322   /* Use the higher bits for extra flags. */
323   extra_flags = flags & ~0xFF;
324   if (extra_flags & ~UV_UDP_RECVMMSG)
325     return UV_EINVAL;
326 
327   rc = uv__udp_init_ex(loop, handle, flags, domain);
328 
329   if (rc == 0)
330     if (extra_flags & UV_UDP_RECVMMSG)
331       handle->flags |= UV_HANDLE_UDP_RECVMMSG;
332 
333   return rc;
334 }
335 
336 
uv_udp_init(uv_loop_t * loop,uv_udp_t * handle)337 int uv_udp_init(uv_loop_t* loop, uv_udp_t* handle) {
338   return uv_udp_init_ex(loop, handle, AF_UNSPEC);
339 }
340 
341 
uv_udp_bind(uv_udp_t * handle,const struct sockaddr * addr,unsigned int flags)342 int uv_udp_bind(uv_udp_t* handle,
343                 const struct sockaddr* addr,
344                 unsigned int flags) {
345   unsigned int addrlen;
346 
347   if (handle->type != UV_UDP)
348     return UV_EINVAL;
349 
350   if (addr->sa_family == AF_INET)
351     addrlen = sizeof(struct sockaddr_in);
352   else if (addr->sa_family == AF_INET6)
353     addrlen = sizeof(struct sockaddr_in6);
354   else
355     return UV_EINVAL;
356 
357   return uv__udp_bind(handle, addr, addrlen, flags);
358 }
359 
360 
uv_tcp_connect(uv_connect_t * req,uv_tcp_t * handle,const struct sockaddr * addr,uv_connect_cb cb)361 int uv_tcp_connect(uv_connect_t* req,
362                    uv_tcp_t* handle,
363                    const struct sockaddr* addr,
364                    uv_connect_cb cb) {
365   unsigned int addrlen;
366 
367   if (handle->type != UV_TCP)
368     return UV_EINVAL;
369 
370   if (addr->sa_family == AF_INET)
371     addrlen = sizeof(struct sockaddr_in);
372   else if (addr->sa_family == AF_INET6)
373     addrlen = sizeof(struct sockaddr_in6);
374   else
375     return UV_EINVAL;
376 
377   return uv__tcp_connect(req, handle, addr, addrlen, cb);
378 }
379 
380 
uv_udp_connect(uv_udp_t * handle,const struct sockaddr * addr)381 int uv_udp_connect(uv_udp_t* handle, const struct sockaddr* addr) {
382   unsigned int addrlen;
383 
384   if (handle->type != UV_UDP)
385     return UV_EINVAL;
386 
387   /* Disconnect the handle */
388   if (addr == NULL) {
389     if (!(handle->flags & UV_HANDLE_UDP_CONNECTED))
390       return UV_ENOTCONN;
391 
392     return uv__udp_disconnect(handle);
393   }
394 
395   if (addr->sa_family == AF_INET)
396     addrlen = sizeof(struct sockaddr_in);
397   else if (addr->sa_family == AF_INET6)
398     addrlen = sizeof(struct sockaddr_in6);
399   else
400     return UV_EINVAL;
401 
402   if (handle->flags & UV_HANDLE_UDP_CONNECTED)
403     return UV_EISCONN;
404 
405   return uv__udp_connect(handle, addr, addrlen);
406 }
407 
408 
uv__udp_is_connected(uv_udp_t * handle)409 int uv__udp_is_connected(uv_udp_t* handle) {
410   struct sockaddr_storage addr;
411   int addrlen;
412   if (handle->type != UV_UDP)
413     return 0;
414 
415   addrlen = sizeof(addr);
416   if (uv_udp_getpeername(handle, (struct sockaddr*) &addr, &addrlen) != 0)
417     return 0;
418 
419   return addrlen > 0;
420 }
421 
422 
uv__udp_check_before_send(uv_udp_t * handle,const struct sockaddr * addr)423 int uv__udp_check_before_send(uv_udp_t* handle, const struct sockaddr* addr) {
424   unsigned int addrlen;
425 
426   if (handle->type != UV_UDP)
427     return UV_EINVAL;
428 
429   if (addr != NULL && (handle->flags & UV_HANDLE_UDP_CONNECTED))
430     return UV_EISCONN;
431 
432   if (addr == NULL && !(handle->flags & UV_HANDLE_UDP_CONNECTED))
433     return UV_EDESTADDRREQ;
434 
435   if (addr != NULL) {
436     if (addr->sa_family == AF_INET)
437       addrlen = sizeof(struct sockaddr_in);
438     else if (addr->sa_family == AF_INET6)
439       addrlen = sizeof(struct sockaddr_in6);
440 #if defined(AF_UNIX) && !defined(_WIN32)
441     else if (addr->sa_family == AF_UNIX)
442       addrlen = sizeof(struct sockaddr_un);
443 #endif
444     else
445       return UV_EINVAL;
446   } else {
447     addrlen = 0;
448   }
449 
450   return addrlen;
451 }
452 
453 
uv_udp_send(uv_udp_send_t * req,uv_udp_t * handle,const uv_buf_t bufs[],unsigned int nbufs,const struct sockaddr * addr,uv_udp_send_cb send_cb)454 int uv_udp_send(uv_udp_send_t* req,
455                 uv_udp_t* handle,
456                 const uv_buf_t bufs[],
457                 unsigned int nbufs,
458                 const struct sockaddr* addr,
459                 uv_udp_send_cb send_cb) {
460   int addrlen;
461 
462   addrlen = uv__udp_check_before_send(handle, addr);
463   if (addrlen < 0)
464     return addrlen;
465 
466   return uv__udp_send(req, handle, bufs, nbufs, addr, addrlen, send_cb);
467 }
468 
469 
uv_udp_try_send(uv_udp_t * handle,const uv_buf_t bufs[],unsigned int nbufs,const struct sockaddr * addr)470 int uv_udp_try_send(uv_udp_t* handle,
471                     const uv_buf_t bufs[],
472                     unsigned int nbufs,
473                     const struct sockaddr* addr) {
474   int addrlen;
475 
476   addrlen = uv__udp_check_before_send(handle, addr);
477   if (addrlen < 0)
478     return addrlen;
479 
480   return uv__udp_try_send(handle, bufs, nbufs, addr, addrlen);
481 }
482 
483 
uv_udp_recv_start(uv_udp_t * handle,uv_alloc_cb alloc_cb,uv_udp_recv_cb recv_cb)484 int uv_udp_recv_start(uv_udp_t* handle,
485                       uv_alloc_cb alloc_cb,
486                       uv_udp_recv_cb recv_cb) {
487   if (handle->type != UV_UDP || alloc_cb == NULL || recv_cb == NULL)
488     return UV_EINVAL;
489   else
490     return uv__udp_recv_start(handle, alloc_cb, recv_cb);
491 }
492 
493 
uv_udp_recv_stop(uv_udp_t * handle)494 int uv_udp_recv_stop(uv_udp_t* handle) {
495   if (handle->type != UV_UDP)
496     return UV_EINVAL;
497   else
498     return uv__udp_recv_stop(handle);
499 }
500 
501 
uv_walk(uv_loop_t * loop,uv_walk_cb walk_cb,void * arg)502 void uv_walk(uv_loop_t* loop, uv_walk_cb walk_cb, void* arg) {
503   QUEUE queue;
504   QUEUE* q;
505   uv_handle_t* h;
506 
507   QUEUE_MOVE(&loop->handle_queue, &queue);
508   while (!QUEUE_EMPTY(&queue)) {
509     q = QUEUE_HEAD(&queue);
510     h = QUEUE_DATA(q, uv_handle_t, handle_queue);
511 
512     QUEUE_REMOVE(q);
513     QUEUE_INSERT_TAIL(&loop->handle_queue, q);
514 
515     if (h->flags & UV_HANDLE_INTERNAL) continue;
516     walk_cb(h, arg);
517   }
518 }
519 
520 
uv__print_handles(uv_loop_t * loop,int only_active,FILE * stream)521 static void uv__print_handles(uv_loop_t* loop, int only_active, FILE* stream) {
522   const char* type;
523   QUEUE* q;
524   uv_handle_t* h;
525 
526   if (loop == NULL)
527     loop = uv_default_loop();
528 
529   QUEUE_FOREACH(q, &loop->handle_queue) {
530     h = QUEUE_DATA(q, uv_handle_t, handle_queue);
531 
532     if (only_active && !uv__is_active(h))
533       continue;
534 
535     switch (h->type) {
536 #define X(uc, lc) case UV_##uc: type = #lc; break;
537       UV_HANDLE_TYPE_MAP(X)
538 #undef X
539       default: type = "<unknown>";
540     }
541 
542     fprintf(stream,
543             "[%c%c%c] %-8s %p\n",
544             "R-"[!(h->flags & UV_HANDLE_REF)],
545             "A-"[!(h->flags & UV_HANDLE_ACTIVE)],
546             "I-"[!(h->flags & UV_HANDLE_INTERNAL)],
547             type,
548             (void*)h);
549   }
550 }
551 
552 
uv_print_all_handles(uv_loop_t * loop,FILE * stream)553 void uv_print_all_handles(uv_loop_t* loop, FILE* stream) {
554   uv__print_handles(loop, 0, stream);
555 }
556 
557 
uv_print_active_handles(uv_loop_t * loop,FILE * stream)558 void uv_print_active_handles(uv_loop_t* loop, FILE* stream) {
559   uv__print_handles(loop, 1, stream);
560 }
561 
562 
uv_ref(uv_handle_t * handle)563 void uv_ref(uv_handle_t* handle) {
564   uv__handle_ref(handle);
565 }
566 
567 
uv_unref(uv_handle_t * handle)568 void uv_unref(uv_handle_t* handle) {
569   uv__handle_unref(handle);
570 }
571 
572 
uv_has_ref(const uv_handle_t * handle)573 int uv_has_ref(const uv_handle_t* handle) {
574   return uv__has_ref(handle);
575 }
576 
577 
uv_stop(uv_loop_t * loop)578 void uv_stop(uv_loop_t* loop) {
579   loop->stop_flag = 1;
580 }
581 
582 
uv_now(const uv_loop_t * loop)583 uint64_t uv_now(const uv_loop_t* loop) {
584   return loop->time;
585 }
586 
587 
588 
uv__count_bufs(const uv_buf_t bufs[],unsigned int nbufs)589 size_t uv__count_bufs(const uv_buf_t bufs[], unsigned int nbufs) {
590   unsigned int i;
591   size_t bytes;
592 
593   bytes = 0;
594   for (i = 0; i < nbufs; i++)
595     bytes += (size_t) bufs[i].len;
596 
597   return bytes;
598 }
599 
uv_recv_buffer_size(uv_handle_t * handle,int * value)600 int uv_recv_buffer_size(uv_handle_t* handle, int* value) {
601   return uv__socket_sockopt(handle, SO_RCVBUF, value);
602 }
603 
uv_send_buffer_size(uv_handle_t * handle,int * value)604 int uv_send_buffer_size(uv_handle_t* handle, int *value) {
605   return uv__socket_sockopt(handle, SO_SNDBUF, value);
606 }
607 
uv_fs_event_getpath(uv_fs_event_t * handle,char * buffer,size_t * size)608 int uv_fs_event_getpath(uv_fs_event_t* handle, char* buffer, size_t* size) {
609   size_t required_len;
610 
611   if (!uv__is_active(handle)) {
612     *size = 0;
613     return UV_EINVAL;
614   }
615 
616   required_len = strlen(handle->path);
617   if (required_len >= *size) {
618     *size = required_len + 1;
619     return UV_ENOBUFS;
620   }
621 
622   memcpy(buffer, handle->path, required_len);
623   *size = required_len;
624   buffer[required_len] = '\0';
625 
626   return 0;
627 }
628 
629 /* The windows implementation does not have the same structure layout as
630  * the unix implementation (nbufs is not directly inside req but is
631  * contained in a nested union/struct) so this function locates it.
632 */
uv__get_nbufs(uv_fs_t * req)633 static unsigned int* uv__get_nbufs(uv_fs_t* req) {
634 #ifdef _WIN32
635   return &req->fs.info.nbufs;
636 #else
637   return &req->nbufs;
638 #endif
639 }
640 
641 /* uv_fs_scandir() uses the system allocator to allocate memory on non-Windows
642  * systems. So, the memory should be released using free(). On Windows,
643  * uv__malloc() is used, so use uv__free() to free memory.
644 */
645 #ifdef _WIN32
646 # define uv__fs_scandir_free uv__free
647 #else
648 # define uv__fs_scandir_free free
649 #endif
650 
uv__fs_scandir_cleanup(uv_fs_t * req)651 void uv__fs_scandir_cleanup(uv_fs_t* req) {
652   uv__dirent_t** dents;
653 
654   unsigned int* nbufs = uv__get_nbufs(req);
655 
656   dents = req->ptr;
657   if (*nbufs > 0 && *nbufs != (unsigned int) req->result)
658     (*nbufs)--;
659   for (; *nbufs < (unsigned int) req->result; (*nbufs)++)
660     uv__fs_scandir_free(dents[*nbufs]);
661 
662   uv__fs_scandir_free(req->ptr);
663   req->ptr = NULL;
664 }
665 
666 
uv_fs_scandir_next(uv_fs_t * req,uv_dirent_t * ent)667 int uv_fs_scandir_next(uv_fs_t* req, uv_dirent_t* ent) {
668   uv__dirent_t** dents;
669   uv__dirent_t* dent;
670   unsigned int* nbufs;
671 
672   /* Check to see if req passed */
673   if (req->result < 0)
674     return req->result;
675 
676   /* Ptr will be null if req was canceled or no files found */
677   if (!req->ptr)
678     return UV_EOF;
679 
680   nbufs = uv__get_nbufs(req);
681   assert(nbufs);
682 
683   dents = req->ptr;
684 
685   /* Free previous entity */
686   if (*nbufs > 0)
687     uv__fs_scandir_free(dents[*nbufs - 1]);
688 
689   /* End was already reached */
690   if (*nbufs == (unsigned int) req->result) {
691     uv__fs_scandir_free(dents);
692     req->ptr = NULL;
693     return UV_EOF;
694   }
695 
696   dent = dents[(*nbufs)++];
697 
698   ent->name = dent->d_name;
699   ent->type = uv__fs_get_dirent_type(dent);
700 
701   return 0;
702 }
703 
uv__fs_get_dirent_type(uv__dirent_t * dent)704 uv_dirent_type_t uv__fs_get_dirent_type(uv__dirent_t* dent) {
705   uv_dirent_type_t type;
706 
707 #ifdef HAVE_DIRENT_TYPES
708   switch (dent->d_type) {
709     case UV__DT_DIR:
710       type = UV_DIRENT_DIR;
711       break;
712     case UV__DT_FILE:
713       type = UV_DIRENT_FILE;
714       break;
715     case UV__DT_LINK:
716       type = UV_DIRENT_LINK;
717       break;
718     case UV__DT_FIFO:
719       type = UV_DIRENT_FIFO;
720       break;
721     case UV__DT_SOCKET:
722       type = UV_DIRENT_SOCKET;
723       break;
724     case UV__DT_CHAR:
725       type = UV_DIRENT_CHAR;
726       break;
727     case UV__DT_BLOCK:
728       type = UV_DIRENT_BLOCK;
729       break;
730     default:
731       type = UV_DIRENT_UNKNOWN;
732   }
733 #else
734   type = UV_DIRENT_UNKNOWN;
735 #endif
736 
737   return type;
738 }
739 
uv__fs_readdir_cleanup(uv_fs_t * req)740 void uv__fs_readdir_cleanup(uv_fs_t* req) {
741   uv_dir_t* dir;
742   uv_dirent_t* dirents;
743   int i;
744 
745   if (req->ptr == NULL)
746     return;
747 
748   dir = req->ptr;
749   dirents = dir->dirents;
750   req->ptr = NULL;
751 
752   if (dirents == NULL)
753     return;
754 
755   for (i = 0; i < req->result; ++i) {
756     uv__free((char*) dirents[i].name);
757     dirents[i].name = NULL;
758   }
759 }
760 
761 
uv_loop_configure(uv_loop_t * loop,uv_loop_option option,...)762 int uv_loop_configure(uv_loop_t* loop, uv_loop_option option, ...) {
763   va_list ap;
764   int err;
765 
766   va_start(ap, option);
767   /* Any platform-agnostic options should be handled here. */
768   err = uv__loop_configure(loop, option, ap);
769   va_end(ap);
770 
771   return err;
772 }
773 
774 
775 static uv_loop_t default_loop_struct;
776 static uv_loop_t* default_loop_ptr;
777 
778 
uv_default_loop(void)779 uv_loop_t* uv_default_loop(void) {
780   if (default_loop_ptr != NULL)
781     return default_loop_ptr;
782 
783   if (uv_loop_init(&default_loop_struct))
784     return NULL;
785 
786   default_loop_ptr = &default_loop_struct;
787   return default_loop_ptr;
788 }
789 
790 
uv_loop_new(void)791 uv_loop_t* uv_loop_new(void) {
792   uv_loop_t* loop;
793 
794   loop = uv__malloc(sizeof(*loop));
795   if (loop == NULL)
796     return NULL;
797 
798   if (uv_loop_init(loop)) {
799     uv__free(loop);
800     return NULL;
801   }
802 
803   return loop;
804 }
805 
806 
uv_loop_close(uv_loop_t * loop)807 int uv_loop_close(uv_loop_t* loop) {
808   QUEUE* q;
809   uv_handle_t* h;
810 #ifndef NDEBUG
811   void* saved_data;
812 #endif
813 
814   if (uv__has_active_reqs(loop))
815     return UV_EBUSY;
816 
817   QUEUE_FOREACH(q, &loop->handle_queue) {
818     h = QUEUE_DATA(q, uv_handle_t, handle_queue);
819     if (!(h->flags & UV_HANDLE_INTERNAL))
820       return UV_EBUSY;
821   }
822 
823   uv__loop_close(loop);
824 
825 #ifndef NDEBUG
826   saved_data = loop->data;
827   memset(loop, -1, sizeof(*loop));
828   loop->data = saved_data;
829 #endif
830   if (loop == default_loop_ptr)
831     default_loop_ptr = NULL;
832 
833   return 0;
834 }
835 
836 
uv_loop_delete(uv_loop_t * loop)837 void uv_loop_delete(uv_loop_t* loop) {
838   uv_loop_t* default_loop;
839   int err;
840 
841   default_loop = default_loop_ptr;
842 
843   err = uv_loop_close(loop);
844   (void) err;    /* Squelch compiler warnings. */
845   assert(err == 0);
846   if (loop != default_loop)
847     uv__free(loop);
848 }
849 
850 
uv_read_start(uv_stream_t * stream,uv_alloc_cb alloc_cb,uv_read_cb read_cb)851 int uv_read_start(uv_stream_t* stream,
852                   uv_alloc_cb alloc_cb,
853                   uv_read_cb read_cb) {
854   if (stream == NULL || alloc_cb == NULL || read_cb == NULL)
855     return UV_EINVAL;
856 
857   if (stream->flags & UV_HANDLE_CLOSING)
858     return UV_EINVAL;
859 
860   if (stream->flags & UV_HANDLE_READING)
861     return UV_EALREADY;
862 
863   if (!(stream->flags & UV_HANDLE_READABLE))
864     return UV_ENOTCONN;
865 
866   return uv__read_start(stream, alloc_cb, read_cb);
867 }
868 
869 
uv_os_free_environ(uv_env_item_t * envitems,int count)870 void uv_os_free_environ(uv_env_item_t* envitems, int count) {
871   int i;
872 
873   for (i = 0; i < count; i++) {
874     uv__free(envitems[i].name);
875   }
876 
877   uv__free(envitems);
878 }
879 
880 
uv_free_cpu_info(uv_cpu_info_t * cpu_infos,int count)881 void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
882   int i;
883 
884   for (i = 0; i < count; i++)
885     uv__free(cpu_infos[i].model);
886 
887   uv__free(cpu_infos);
888 }
889 
890 
891 /* Also covers __clang__ and __INTEL_COMPILER. Disabled on Windows because
892  * threads have already been forcibly terminated by the operating system
893  * by the time destructors run, ergo, it's not safe to try to clean them up.
894  */
895 #if defined(__GNUC__) && !defined(_WIN32)
896 __attribute__((destructor))
897 #endif
uv_library_shutdown(void)898 void uv_library_shutdown(void) {
899   static int was_shutdown;
900 
901   if (uv__load_relaxed(&was_shutdown))
902     return;
903 
904   uv__process_title_cleanup();
905   uv__signal_cleanup();
906 #ifdef __MVS__
907   /* TODO(itodorov) - zos: revisit when Woz compiler is available. */
908   uv__os390_cleanup();
909 #else
910   uv__threadpool_cleanup();
911 #endif
912   uv__store_relaxed(&was_shutdown, 1);
913 }
914 
915 
uv__metrics_update_idle_time(uv_loop_t * loop)916 void uv__metrics_update_idle_time(uv_loop_t* loop) {
917   uv__loop_metrics_t* loop_metrics;
918   uint64_t entry_time;
919   uint64_t exit_time;
920 
921   if (!(uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME))
922     return;
923 
924   loop_metrics = uv__get_loop_metrics(loop);
925 
926   /* The thread running uv__metrics_update_idle_time() is always the same
927    * thread that sets provider_entry_time. So it's unnecessary to lock before
928    * retrieving this value.
929    */
930   if (loop_metrics->provider_entry_time == 0)
931     return;
932 
933   exit_time = uv_hrtime();
934 
935   uv_mutex_lock(&loop_metrics->lock);
936   entry_time = loop_metrics->provider_entry_time;
937   loop_metrics->provider_entry_time = 0;
938   loop_metrics->provider_idle_time += exit_time - entry_time;
939   uv_mutex_unlock(&loop_metrics->lock);
940 }
941 
942 
uv__metrics_set_provider_entry_time(uv_loop_t * loop)943 void uv__metrics_set_provider_entry_time(uv_loop_t* loop) {
944   uv__loop_metrics_t* loop_metrics;
945   uint64_t now;
946 
947   if (!(uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME))
948     return;
949 
950   now = uv_hrtime();
951   loop_metrics = uv__get_loop_metrics(loop);
952   uv_mutex_lock(&loop_metrics->lock);
953   loop_metrics->provider_entry_time = now;
954   uv_mutex_unlock(&loop_metrics->lock);
955 }
956 
957 
uv_metrics_idle_time(uv_loop_t * loop)958 uint64_t uv_metrics_idle_time(uv_loop_t* loop) {
959   uv__loop_metrics_t* loop_metrics;
960   uint64_t entry_time;
961   uint64_t idle_time;
962 
963   loop_metrics = uv__get_loop_metrics(loop);
964   uv_mutex_lock(&loop_metrics->lock);
965   idle_time = loop_metrics->provider_idle_time;
966   entry_time = loop_metrics->provider_entry_time;
967   uv_mutex_unlock(&loop_metrics->lock);
968 
969   if (entry_time > 0)
970     idle_time += uv_hrtime() - entry_time;
971   return idle_time;
972 }
973