1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 * Permission is hereby granted, free of charge, to any person obtaining a copy
3 * of this software and associated documentation files (the "Software"), to
4 * deal in the Software without restriction, including without limitation the
5 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6 * sell copies of the Software, and to permit persons to whom the Software is
7 * furnished to do so, subject to the following conditions:
8 *
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
18 * IN THE SOFTWARE.
19 */
20
21 #include "uv.h"
22 #include "internal.h"
23
24 #include <stddef.h> /* NULL */
25 #include <stdio.h> /* printf */
26 #include <stdlib.h>
27 #include <string.h> /* strerror */
28 #include <errno.h>
29 #include <assert.h>
30 #include <unistd.h>
31 #include <sys/types.h>
32 #include <sys/stat.h>
33 #include <fcntl.h>
34 #include <sys/ioctl.h>
35 #include <sys/socket.h>
36 #include <sys/un.h>
37 #include <netinet/in.h>
38 #include <arpa/inet.h>
39 #include <limits.h> /* INT_MAX, PATH_MAX, IOV_MAX */
40 #include <sys/uio.h> /* writev */
41 #include <sys/resource.h> /* getrusage */
42 #include <pwd.h>
43
44 #ifdef __sun
45 # include <netdb.h> /* MAXHOSTNAMELEN on Solaris */
46 # include <sys/filio.h>
47 # include <sys/types.h>
48 # include <sys/wait.h>
49 #endif
50
51 #ifdef __APPLE__
52 # include <mach-o/dyld.h> /* _NSGetExecutablePath */
53 # include <sys/filio.h>
54 # if defined(O_CLOEXEC)
55 # define UV__O_CLOEXEC O_CLOEXEC
56 # endif
57 #endif
58
59 #if defined(__DragonFly__) || \
60 defined(__FreeBSD__) || \
61 defined(__FreeBSD_kernel__) || \
62 defined(__NetBSD__)
63 # include <sys/sysctl.h>
64 # include <sys/filio.h>
65 # include <sys/wait.h>
66 # define UV__O_CLOEXEC O_CLOEXEC
67 # if defined(__FreeBSD__) && __FreeBSD__ >= 10
68 # define uv__accept4 accept4
69 # endif
70 # if defined(__NetBSD__)
71 # define uv__accept4(a, b, c, d) paccept((a), (b), (c), NULL, (d))
72 # endif
73 # if (defined(__FreeBSD__) && __FreeBSD__ >= 10) || defined(__NetBSD__)
74 # define UV__SOCK_NONBLOCK SOCK_NONBLOCK
75 # define UV__SOCK_CLOEXEC SOCK_CLOEXEC
76 # endif
77 # if !defined(F_DUP2FD_CLOEXEC) && defined(_F_DUP2FD_CLOEXEC)
78 # define F_DUP2FD_CLOEXEC _F_DUP2FD_CLOEXEC
79 # endif
80 #endif
81
82 #if defined(__ANDROID_API__) && __ANDROID_API__ < 21
83 # include <dlfcn.h> /* for dlsym */
84 #endif
85
86 #if defined(__MVS__)
87 #include <sys/ioctl.h>
88 #endif
89
90 #if !defined(__MVS__)
91 #include <sys/param.h> /* MAXHOSTNAMELEN on Linux and the BSDs */
92 #endif
93
94 /* Fallback for the maximum hostname length */
95 #ifndef MAXHOSTNAMELEN
96 # define MAXHOSTNAMELEN 256
97 #endif
98
99 static int uv__run_pending(uv_loop_t* loop);
100
101 /* Verify that uv_buf_t is ABI-compatible with struct iovec. */
102 STATIC_ASSERT(sizeof(uv_buf_t) == sizeof(struct iovec));
103 STATIC_ASSERT(sizeof(&((uv_buf_t*) 0)->base) ==
104 sizeof(((struct iovec*) 0)->iov_base));
105 STATIC_ASSERT(sizeof(&((uv_buf_t*) 0)->len) ==
106 sizeof(((struct iovec*) 0)->iov_len));
107 STATIC_ASSERT(offsetof(uv_buf_t, base) == offsetof(struct iovec, iov_base));
108 STATIC_ASSERT(offsetof(uv_buf_t, len) == offsetof(struct iovec, iov_len));
109
110
uv_hrtime(void)111 uint64_t uv_hrtime(void) {
112 return uv__hrtime(UV_CLOCK_PRECISE);
113 }
114
115
uv_close(uv_handle_t * handle,uv_close_cb close_cb)116 void uv_close(uv_handle_t* handle, uv_close_cb close_cb) {
117 assert(!uv__is_closing(handle));
118
119 handle->flags |= UV_CLOSING;
120 handle->close_cb = close_cb;
121
122 switch (handle->type) {
123 case UV_NAMED_PIPE:
124 uv__pipe_close((uv_pipe_t*)handle);
125 break;
126
127 case UV_TTY:
128 uv__stream_close((uv_stream_t*)handle);
129 break;
130
131 case UV_TCP:
132 uv__tcp_close((uv_tcp_t*)handle);
133 break;
134
135 case UV_UDP:
136 uv__udp_close((uv_udp_t*)handle);
137 break;
138
139 case UV_PREPARE:
140 uv__prepare_close((uv_prepare_t*)handle);
141 break;
142
143 case UV_CHECK:
144 uv__check_close((uv_check_t*)handle);
145 break;
146
147 case UV_IDLE:
148 uv__idle_close((uv_idle_t*)handle);
149 break;
150
151 case UV_ASYNC:
152 uv__async_close((uv_async_t*)handle);
153 break;
154
155 case UV_TIMER:
156 uv__timer_close((uv_timer_t*)handle);
157 break;
158
159 case UV_PROCESS:
160 uv__process_close((uv_process_t*)handle);
161 break;
162
163 case UV_FS_EVENT:
164 uv__fs_event_close((uv_fs_event_t*)handle);
165 break;
166
167 case UV_POLL:
168 uv__poll_close((uv_poll_t*)handle);
169 break;
170
171 case UV_FS_POLL:
172 uv__fs_poll_close((uv_fs_poll_t*)handle);
173 break;
174
175 case UV_SIGNAL:
176 uv__signal_close((uv_signal_t*) handle);
177 /* Signal handles may not be closed immediately. The signal code will */
178 /* itself close uv__make_close_pending whenever appropriate. */
179 return;
180
181 default:
182 assert(0);
183 }
184
185 uv__make_close_pending(handle);
186 }
187
uv__socket_sockopt(uv_handle_t * handle,int optname,int * value)188 int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value) {
189 int r;
190 int fd;
191 socklen_t len;
192
193 if (handle == NULL || value == NULL)
194 return UV_EINVAL;
195
196 if (handle->type == UV_TCP || handle->type == UV_NAMED_PIPE)
197 fd = uv__stream_fd((uv_stream_t*) handle);
198 else if (handle->type == UV_UDP)
199 fd = ((uv_udp_t *) handle)->io_watcher.fd;
200 else
201 return UV_ENOTSUP;
202
203 len = sizeof(*value);
204
205 if (*value == 0)
206 r = getsockopt(fd, SOL_SOCKET, optname, value, &len);
207 else
208 r = setsockopt(fd, SOL_SOCKET, optname, (const void*) value, len);
209
210 if (r < 0)
211 return UV__ERR(errno);
212
213 return 0;
214 }
215
uv__make_close_pending(uv_handle_t * handle)216 void uv__make_close_pending(uv_handle_t* handle) {
217 assert(handle->flags & UV_CLOSING);
218 assert(!(handle->flags & UV_CLOSED));
219 handle->next_closing = handle->loop->closing_handles;
220 handle->loop->closing_handles = handle;
221 }
222
uv__getiovmax(void)223 int uv__getiovmax(void) {
224 #if defined(IOV_MAX)
225 return IOV_MAX;
226 #elif defined(_SC_IOV_MAX)
227 static int iovmax = -1;
228 if (iovmax == -1) {
229 iovmax = sysconf(_SC_IOV_MAX);
230 /* On some embedded devices (arm-linux-uclibc based ip camera),
231 * sysconf(_SC_IOV_MAX) can not get the correct value. The return
232 * value is -1 and the errno is EINPROGRESS. Degrade the value to 1.
233 */
234 if (iovmax == -1) iovmax = 1;
235 }
236 return iovmax;
237 #else
238 return 1024;
239 #endif
240 }
241
242
uv__finish_close(uv_handle_t * handle)243 static void uv__finish_close(uv_handle_t* handle) {
244 /* Note: while the handle is in the UV_CLOSING state now, it's still possible
245 * for it to be active in the sense that uv__is_active() returns true.
246 * A good example is when the user calls uv_shutdown(), immediately followed
247 * by uv_close(). The handle is considered active at this point because the
248 * completion of the shutdown req is still pending.
249 */
250 assert(handle->flags & UV_CLOSING);
251 assert(!(handle->flags & UV_CLOSED));
252 handle->flags |= UV_CLOSED;
253
254 switch (handle->type) {
255 case UV_PREPARE:
256 case UV_CHECK:
257 case UV_IDLE:
258 case UV_ASYNC:
259 case UV_TIMER:
260 case UV_PROCESS:
261 case UV_FS_EVENT:
262 case UV_FS_POLL:
263 case UV_POLL:
264 case UV_SIGNAL:
265 break;
266
267 case UV_NAMED_PIPE:
268 case UV_TCP:
269 case UV_TTY:
270 uv__stream_destroy((uv_stream_t*)handle);
271 break;
272
273 case UV_UDP:
274 uv__udp_finish_close((uv_udp_t*)handle);
275 break;
276
277 default:
278 assert(0);
279 break;
280 }
281
282 uv__handle_unref(handle);
283 QUEUE_REMOVE(&handle->handle_queue);
284
285 if (handle->close_cb) {
286 handle->close_cb(handle);
287 }
288 }
289
290
uv__run_closing_handles(uv_loop_t * loop)291 static void uv__run_closing_handles(uv_loop_t* loop) {
292 uv_handle_t* p;
293 uv_handle_t* q;
294
295 p = loop->closing_handles;
296 loop->closing_handles = NULL;
297
298 while (p) {
299 q = p->next_closing;
300 uv__finish_close(p);
301 p = q;
302 }
303 }
304
305
uv_is_closing(const uv_handle_t * handle)306 int uv_is_closing(const uv_handle_t* handle) {
307 return uv__is_closing(handle);
308 }
309
310
uv_backend_fd(const uv_loop_t * loop)311 int uv_backend_fd(const uv_loop_t* loop) {
312 return loop->backend_fd;
313 }
314
315
uv_backend_timeout(const uv_loop_t * loop)316 int uv_backend_timeout(const uv_loop_t* loop) {
317 if (loop->stop_flag != 0)
318 return 0;
319
320 if (!uv__has_active_handles(loop) && !uv__has_active_reqs(loop))
321 return 0;
322
323 if (!QUEUE_EMPTY(&loop->idle_handles))
324 return 0;
325
326 if (!QUEUE_EMPTY(&loop->pending_queue))
327 return 0;
328
329 if (loop->closing_handles)
330 return 0;
331
332 return uv__next_timeout(loop);
333 }
334
335
uv__loop_alive(const uv_loop_t * loop)336 static int uv__loop_alive(const uv_loop_t* loop) {
337 return uv__has_active_handles(loop) ||
338 uv__has_active_reqs(loop) ||
339 loop->closing_handles != NULL;
340 }
341
342
uv_loop_alive(const uv_loop_t * loop)343 int uv_loop_alive(const uv_loop_t* loop) {
344 return uv__loop_alive(loop);
345 }
346
347
uv_run(uv_loop_t * loop,uv_run_mode mode)348 int uv_run(uv_loop_t* loop, uv_run_mode mode) {
349 int timeout;
350 int r;
351 int ran_pending;
352
353 r = uv__loop_alive(loop);
354 if (!r)
355 uv__update_time(loop);
356
357 while (r != 0 && loop->stop_flag == 0) {
358 uv__update_time(loop);
359 uv__run_timers(loop);
360 ran_pending = uv__run_pending(loop);
361 uv__run_idle(loop);
362 uv__run_prepare(loop);
363
364 timeout = 0;
365 if ((mode == UV_RUN_ONCE && !ran_pending) || mode == UV_RUN_DEFAULT)
366 timeout = uv_backend_timeout(loop);
367
368 uv__io_poll(loop, timeout);
369 uv__run_check(loop);
370 uv__run_closing_handles(loop);
371
372 if (mode == UV_RUN_ONCE) {
373 /* UV_RUN_ONCE implies forward progress: at least one callback must have
374 * been invoked when it returns. uv__io_poll() can return without doing
375 * I/O (meaning: no callbacks) when its timeout expires - which means we
376 * have pending timers that satisfy the forward progress constraint.
377 *
378 * UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from
379 * the check.
380 */
381 uv__update_time(loop);
382 uv__run_timers(loop);
383 }
384
385 r = uv__loop_alive(loop);
386 if (mode == UV_RUN_ONCE || mode == UV_RUN_NOWAIT)
387 break;
388 }
389
390 /* The if statement lets gcc compile it to a conditional store. Avoids
391 * dirtying a cache line.
392 */
393 if (loop->stop_flag != 0)
394 loop->stop_flag = 0;
395
396 return r;
397 }
398
399
uv_update_time(uv_loop_t * loop)400 void uv_update_time(uv_loop_t* loop) {
401 uv__update_time(loop);
402 }
403
404
uv_is_active(const uv_handle_t * handle)405 int uv_is_active(const uv_handle_t* handle) {
406 return uv__is_active(handle);
407 }
408
409
410 /* Open a socket in non-blocking close-on-exec mode, atomically if possible. */
uv__socket(int domain,int type,int protocol)411 int uv__socket(int domain, int type, int protocol) {
412 int sockfd;
413 int err;
414
415 #if defined(SOCK_NONBLOCK) && defined(SOCK_CLOEXEC)
416 sockfd = socket(domain, type | SOCK_NONBLOCK | SOCK_CLOEXEC, protocol);
417 if (sockfd != -1)
418 return sockfd;
419
420 if (errno != EINVAL)
421 return UV__ERR(errno);
422 #endif
423
424 sockfd = socket(domain, type, protocol);
425 if (sockfd == -1)
426 return UV__ERR(errno);
427
428 err = uv__nonblock(sockfd, 1);
429 if (err == 0)
430 err = uv__cloexec(sockfd, 1);
431
432 if (err) {
433 uv__close(sockfd);
434 return err;
435 }
436
437 #if defined(SO_NOSIGPIPE)
438 {
439 int on = 1;
440 setsockopt(sockfd, SOL_SOCKET, SO_NOSIGPIPE, &on, sizeof(on));
441 }
442 #endif
443
444 return sockfd;
445 }
446
447 /* get a file pointer to a file in read-only and close-on-exec mode */
uv__open_file(const char * path)448 FILE* uv__open_file(const char* path) {
449 int fd;
450 FILE* fp;
451
452 fd = uv__open_cloexec(path, O_RDONLY);
453 if (fd < 0)
454 return NULL;
455
456 fp = fdopen(fd, "r");
457 if (fp == NULL)
458 uv__close(fd);
459
460 return fp;
461 }
462
463
uv__accept(int sockfd)464 int uv__accept(int sockfd) {
465 int peerfd;
466 int err;
467
468 assert(sockfd >= 0);
469
470 while (1) {
471 #if defined(__linux__) || \
472 (defined(__FreeBSD__) && __FreeBSD__ >= 10) || \
473 defined(__NetBSD__)
474 static int no_accept4;
475
476 if (no_accept4)
477 goto skip;
478
479 peerfd = uv__accept4(sockfd,
480 NULL,
481 NULL,
482 UV__SOCK_NONBLOCK|UV__SOCK_CLOEXEC);
483 if (peerfd != -1)
484 return peerfd;
485
486 if (errno == EINTR)
487 continue;
488
489 if (errno != ENOSYS)
490 return UV__ERR(errno);
491
492 no_accept4 = 1;
493 skip:
494 #endif
495
496 peerfd = accept(sockfd, NULL, NULL);
497 if (peerfd == -1) {
498 if (errno == EINTR)
499 continue;
500 return UV__ERR(errno);
501 }
502
503 err = uv__cloexec(peerfd, 1);
504 if (err == 0)
505 err = uv__nonblock(peerfd, 1);
506
507 if (err) {
508 uv__close(peerfd);
509 return err;
510 }
511
512 return peerfd;
513 }
514 }
515
516
uv__close_nocheckstdio(int fd)517 int uv__close_nocheckstdio(int fd) {
518 int saved_errno;
519 int rc;
520
521 assert(fd > -1); /* Catch uninitialized io_watcher.fd bugs. */
522
523 saved_errno = errno;
524 rc = close(fd);
525 if (rc == -1) {
526 rc = UV__ERR(errno);
527 if (rc == UV_EINTR || rc == UV__ERR(EINPROGRESS))
528 rc = 0; /* The close is in progress, not an error. */
529 errno = saved_errno;
530 }
531
532 return rc;
533 }
534
535
uv__close(int fd)536 int uv__close(int fd) {
537 assert(fd > STDERR_FILENO); /* Catch stdio close bugs. */
538 #if defined(__MVS__)
539 epoll_file_close(fd);
540 #endif
541 return uv__close_nocheckstdio(fd);
542 }
543
544
uv__nonblock_ioctl(int fd,int set)545 int uv__nonblock_ioctl(int fd, int set) {
546 int r;
547
548 do
549 r = ioctl(fd, FIONBIO, &set);
550 while (r == -1 && errno == EINTR);
551
552 if (r)
553 return UV__ERR(errno);
554
555 return 0;
556 }
557
558
559 #if !defined(__CYGWIN__) && !defined(__MSYS__)
uv__cloexec_ioctl(int fd,int set)560 int uv__cloexec_ioctl(int fd, int set) {
561 int r;
562
563 do
564 r = ioctl(fd, set ? FIOCLEX : FIONCLEX);
565 while (r == -1 && errno == EINTR);
566
567 if (r)
568 return UV__ERR(errno);
569
570 return 0;
571 }
572 #endif
573
574
uv__nonblock_fcntl(int fd,int set)575 int uv__nonblock_fcntl(int fd, int set) {
576 int flags;
577 int r;
578
579 do
580 r = fcntl(fd, F_GETFL);
581 while (r == -1 && errno == EINTR);
582
583 if (r == -1)
584 return UV__ERR(errno);
585
586 /* Bail out now if already set/clear. */
587 if (!!(r & O_NONBLOCK) == !!set)
588 return 0;
589
590 if (set)
591 flags = r | O_NONBLOCK;
592 else
593 flags = r & ~O_NONBLOCK;
594
595 do
596 r = fcntl(fd, F_SETFL, flags);
597 while (r == -1 && errno == EINTR);
598
599 if (r)
600 return UV__ERR(errno);
601
602 return 0;
603 }
604
605
uv__cloexec_fcntl(int fd,int set)606 int uv__cloexec_fcntl(int fd, int set) {
607 int flags;
608 int r;
609
610 do
611 r = fcntl(fd, F_GETFD);
612 while (r == -1 && errno == EINTR);
613
614 if (r == -1)
615 return UV__ERR(errno);
616
617 /* Bail out now if already set/clear. */
618 if (!!(r & FD_CLOEXEC) == !!set)
619 return 0;
620
621 if (set)
622 flags = r | FD_CLOEXEC;
623 else
624 flags = r & ~FD_CLOEXEC;
625
626 do
627 r = fcntl(fd, F_SETFD, flags);
628 while (r == -1 && errno == EINTR);
629
630 if (r)
631 return UV__ERR(errno);
632
633 return 0;
634 }
635
636
637 /* This function is not execve-safe, there is a race window
638 * between the call to dup() and fcntl(FD_CLOEXEC).
639 */
uv__dup(int fd)640 int uv__dup(int fd) {
641 int err;
642
643 fd = dup(fd);
644
645 if (fd == -1)
646 return UV__ERR(errno);
647
648 err = uv__cloexec(fd, 1);
649 if (err) {
650 uv__close(fd);
651 return err;
652 }
653
654 return fd;
655 }
656
657
uv__recvmsg(int fd,struct msghdr * msg,int flags)658 ssize_t uv__recvmsg(int fd, struct msghdr* msg, int flags) {
659 struct cmsghdr* cmsg;
660 ssize_t rc;
661 int* pfd;
662 int* end;
663 #if defined(__linux__)
664 static int no_msg_cmsg_cloexec;
665 if (no_msg_cmsg_cloexec == 0) {
666 rc = recvmsg(fd, msg, flags | 0x40000000); /* MSG_CMSG_CLOEXEC */
667 if (rc != -1)
668 return rc;
669 if (errno != EINVAL)
670 return UV__ERR(errno);
671 rc = recvmsg(fd, msg, flags);
672 if (rc == -1)
673 return UV__ERR(errno);
674 no_msg_cmsg_cloexec = 1;
675 } else {
676 rc = recvmsg(fd, msg, flags);
677 }
678 #else
679 rc = recvmsg(fd, msg, flags);
680 #endif
681 if (rc == -1)
682 return UV__ERR(errno);
683 if (msg->msg_controllen == 0)
684 return rc;
685 for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg))
686 if (cmsg->cmsg_type == SCM_RIGHTS)
687 for (pfd = (int*) CMSG_DATA(cmsg),
688 end = (int*) ((char*) cmsg + cmsg->cmsg_len);
689 pfd < end;
690 pfd += 1)
691 uv__cloexec(*pfd, 1);
692 return rc;
693 }
694
695
uv_cwd(char * buffer,size_t * size)696 int uv_cwd(char* buffer, size_t* size) {
697 if (buffer == NULL || size == NULL)
698 return UV_EINVAL;
699
700 if (getcwd(buffer, *size) == NULL)
701 return UV__ERR(errno);
702
703 *size = strlen(buffer);
704 if (*size > 1 && buffer[*size - 1] == '/') {
705 buffer[*size-1] = '\0';
706 (*size)--;
707 }
708
709 return 0;
710 }
711
712
uv_chdir(const char * dir)713 int uv_chdir(const char* dir) {
714 if (chdir(dir))
715 return UV__ERR(errno);
716
717 return 0;
718 }
719
720
uv_disable_stdio_inheritance(void)721 void uv_disable_stdio_inheritance(void) {
722 int fd;
723
724 /* Set the CLOEXEC flag on all open descriptors. Unconditionally try the
725 * first 16 file descriptors. After that, bail out after the first error.
726 */
727 for (fd = 0; ; fd++)
728 if (uv__cloexec(fd, 1) && fd > 15)
729 break;
730 }
731
732
uv_fileno(const uv_handle_t * handle,uv_os_fd_t * fd)733 int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd) {
734 int fd_out;
735
736 switch (handle->type) {
737 case UV_TCP:
738 case UV_NAMED_PIPE:
739 case UV_TTY:
740 fd_out = uv__stream_fd((uv_stream_t*) handle);
741 break;
742
743 case UV_UDP:
744 fd_out = ((uv_udp_t *) handle)->io_watcher.fd;
745 break;
746
747 case UV_POLL:
748 fd_out = ((uv_poll_t *) handle)->io_watcher.fd;
749 break;
750
751 default:
752 return UV_EINVAL;
753 }
754
755 if (uv__is_closing(handle) || fd_out == -1)
756 return UV_EBADF;
757
758 *fd = fd_out;
759 return 0;
760 }
761
762
uv__run_pending(uv_loop_t * loop)763 static int uv__run_pending(uv_loop_t* loop) {
764 QUEUE* q;
765 QUEUE pq;
766 uv__io_t* w;
767
768 if (QUEUE_EMPTY(&loop->pending_queue))
769 return 0;
770
771 QUEUE_MOVE(&loop->pending_queue, &pq);
772
773 while (!QUEUE_EMPTY(&pq)) {
774 q = QUEUE_HEAD(&pq);
775 QUEUE_REMOVE(q);
776 QUEUE_INIT(q);
777 w = QUEUE_DATA(q, uv__io_t, pending_queue);
778 w->cb(loop, w, POLLOUT);
779 }
780
781 return 1;
782 }
783
784
next_power_of_two(unsigned int val)785 static unsigned int next_power_of_two(unsigned int val) {
786 val -= 1;
787 val |= val >> 1;
788 val |= val >> 2;
789 val |= val >> 4;
790 val |= val >> 8;
791 val |= val >> 16;
792 val += 1;
793 return val;
794 }
795
maybe_resize(uv_loop_t * loop,unsigned int len)796 static void maybe_resize(uv_loop_t* loop, unsigned int len) {
797 uv__io_t** watchers;
798 void* fake_watcher_list;
799 void* fake_watcher_count;
800 unsigned int nwatchers;
801 unsigned int i;
802
803 if (len <= loop->nwatchers)
804 return;
805
806 /* Preserve fake watcher list and count at the end of the watchers */
807 if (loop->watchers != NULL) {
808 fake_watcher_list = loop->watchers[loop->nwatchers];
809 fake_watcher_count = loop->watchers[loop->nwatchers + 1];
810 } else {
811 fake_watcher_list = NULL;
812 fake_watcher_count = NULL;
813 }
814
815 nwatchers = next_power_of_two(len + 2) - 2;
816 watchers = uv__realloc(loop->watchers,
817 (nwatchers + 2) * sizeof(loop->watchers[0]));
818
819 if (watchers == NULL)
820 abort();
821 for (i = loop->nwatchers; i < nwatchers; i++)
822 watchers[i] = NULL;
823 watchers[nwatchers] = fake_watcher_list;
824 watchers[nwatchers + 1] = fake_watcher_count;
825
826 loop->watchers = watchers;
827 loop->nwatchers = nwatchers;
828 }
829
830
uv__io_init(uv__io_t * w,uv__io_cb cb,int fd)831 void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd) {
832 assert(cb != NULL);
833 assert(fd >= -1);
834 QUEUE_INIT(&w->pending_queue);
835 QUEUE_INIT(&w->watcher_queue);
836 w->cb = cb;
837 w->fd = fd;
838 w->events = 0;
839 w->pevents = 0;
840
841 #if defined(UV_HAVE_KQUEUE)
842 w->rcount = 0;
843 w->wcount = 0;
844 #endif /* defined(UV_HAVE_KQUEUE) */
845 }
846
847
uv__io_start(uv_loop_t * loop,uv__io_t * w,unsigned int events)848 void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
849 assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
850 assert(0 != events);
851 assert(w->fd >= 0);
852 assert(w->fd < INT_MAX);
853
854 w->pevents |= events;
855 maybe_resize(loop, w->fd + 1);
856
857 #if !defined(__sun)
858 /* The event ports backend needs to rearm all file descriptors on each and
859 * every tick of the event loop but the other backends allow us to
860 * short-circuit here if the event mask is unchanged.
861 */
862 if (w->events == w->pevents)
863 return;
864 #endif
865
866 if (QUEUE_EMPTY(&w->watcher_queue))
867 QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
868
869 if (loop->watchers[w->fd] == NULL) {
870 loop->watchers[w->fd] = w;
871 loop->nfds++;
872 }
873 }
874
875
uv__io_stop(uv_loop_t * loop,uv__io_t * w,unsigned int events)876 void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
877 assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
878 assert(0 != events);
879
880 if (w->fd == -1)
881 return;
882
883 assert(w->fd >= 0);
884
885 /* Happens when uv__io_stop() is called on a handle that was never started. */
886 if ((unsigned) w->fd >= loop->nwatchers)
887 return;
888
889 w->pevents &= ~events;
890
891 if (w->pevents == 0) {
892 QUEUE_REMOVE(&w->watcher_queue);
893 QUEUE_INIT(&w->watcher_queue);
894
895 if (loop->watchers[w->fd] != NULL) {
896 assert(loop->watchers[w->fd] == w);
897 assert(loop->nfds > 0);
898 loop->watchers[w->fd] = NULL;
899 loop->nfds--;
900 w->events = 0;
901 }
902 }
903 else if (QUEUE_EMPTY(&w->watcher_queue))
904 QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
905 }
906
907
uv__io_close(uv_loop_t * loop,uv__io_t * w)908 void uv__io_close(uv_loop_t* loop, uv__io_t* w) {
909 uv__io_stop(loop, w, POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
910 QUEUE_REMOVE(&w->pending_queue);
911
912 /* Remove stale events for this file descriptor */
913 uv__platform_invalidate_fd(loop, w->fd);
914 }
915
916
uv__io_feed(uv_loop_t * loop,uv__io_t * w)917 void uv__io_feed(uv_loop_t* loop, uv__io_t* w) {
918 if (QUEUE_EMPTY(&w->pending_queue))
919 QUEUE_INSERT_TAIL(&loop->pending_queue, &w->pending_queue);
920 }
921
922
uv__io_active(const uv__io_t * w,unsigned int events)923 int uv__io_active(const uv__io_t* w, unsigned int events) {
924 assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
925 assert(0 != events);
926 return 0 != (w->pevents & events);
927 }
928
929
uv_getrusage(uv_rusage_t * rusage)930 int uv_getrusage(uv_rusage_t* rusage) {
931 struct rusage usage;
932
933 if (getrusage(RUSAGE_SELF, &usage))
934 return UV__ERR(errno);
935
936 rusage->ru_utime.tv_sec = usage.ru_utime.tv_sec;
937 rusage->ru_utime.tv_usec = usage.ru_utime.tv_usec;
938
939 rusage->ru_stime.tv_sec = usage.ru_stime.tv_sec;
940 rusage->ru_stime.tv_usec = usage.ru_stime.tv_usec;
941
942 #if !defined(__MVS__)
943 rusage->ru_maxrss = usage.ru_maxrss;
944 rusage->ru_ixrss = usage.ru_ixrss;
945 rusage->ru_idrss = usage.ru_idrss;
946 rusage->ru_isrss = usage.ru_isrss;
947 rusage->ru_minflt = usage.ru_minflt;
948 rusage->ru_majflt = usage.ru_majflt;
949 rusage->ru_nswap = usage.ru_nswap;
950 rusage->ru_inblock = usage.ru_inblock;
951 rusage->ru_oublock = usage.ru_oublock;
952 rusage->ru_msgsnd = usage.ru_msgsnd;
953 rusage->ru_msgrcv = usage.ru_msgrcv;
954 rusage->ru_nsignals = usage.ru_nsignals;
955 rusage->ru_nvcsw = usage.ru_nvcsw;
956 rusage->ru_nivcsw = usage.ru_nivcsw;
957 #endif
958
959 return 0;
960 }
961
962
uv__open_cloexec(const char * path,int flags)963 int uv__open_cloexec(const char* path, int flags) {
964 int err;
965 int fd;
966
967 #if defined(UV__O_CLOEXEC)
968 static int no_cloexec;
969
970 if (!no_cloexec) {
971 fd = open(path, flags | UV__O_CLOEXEC);
972 if (fd != -1)
973 return fd;
974
975 if (errno != EINVAL)
976 return UV__ERR(errno);
977
978 /* O_CLOEXEC not supported. */
979 no_cloexec = 1;
980 }
981 #endif
982
983 fd = open(path, flags);
984 if (fd == -1)
985 return UV__ERR(errno);
986
987 err = uv__cloexec(fd, 1);
988 if (err) {
989 uv__close(fd);
990 return err;
991 }
992
993 return fd;
994 }
995
996
uv__dup2_cloexec(int oldfd,int newfd)997 int uv__dup2_cloexec(int oldfd, int newfd) {
998 int r;
999 #if (defined(__FreeBSD__) && __FreeBSD__ >= 10) || defined(__NetBSD__)
1000 r = dup3(oldfd, newfd, O_CLOEXEC);
1001 if (r == -1)
1002 return UV__ERR(errno);
1003 return r;
1004 #elif defined(__FreeBSD__) && defined(F_DUP2FD_CLOEXEC)
1005 r = fcntl(oldfd, F_DUP2FD_CLOEXEC, newfd);
1006 if (r != -1)
1007 return r;
1008 if (errno != EINVAL)
1009 return UV__ERR(errno);
1010 /* Fall through. */
1011 #elif defined(__linux__)
1012 static int no_dup3;
1013 if (!no_dup3) {
1014 do
1015 r = uv__dup3(oldfd, newfd, UV__O_CLOEXEC);
1016 while (r == -1 && errno == EBUSY);
1017 if (r != -1)
1018 return r;
1019 if (errno != ENOSYS)
1020 return UV__ERR(errno);
1021 /* Fall through. */
1022 no_dup3 = 1;
1023 }
1024 #endif
1025 {
1026 int err;
1027 do
1028 r = dup2(oldfd, newfd);
1029 #if defined(__linux__)
1030 while (r == -1 && errno == EBUSY);
1031 #else
1032 while (0); /* Never retry. */
1033 #endif
1034
1035 if (r == -1)
1036 return UV__ERR(errno);
1037
1038 err = uv__cloexec(newfd, 1);
1039 if (err) {
1040 uv__close(newfd);
1041 return err;
1042 }
1043
1044 return r;
1045 }
1046 }
1047
1048
uv_os_homedir(char * buffer,size_t * size)1049 int uv_os_homedir(char* buffer, size_t* size) {
1050 uv_passwd_t pwd;
1051 char* buf;
1052 size_t len;
1053 int r;
1054
1055 if (buffer == NULL || size == NULL || *size == 0)
1056 return UV_EINVAL;
1057
1058 /* Check if the HOME environment variable is set first */
1059 buf = getenv("HOME");
1060
1061 if (buf != NULL) {
1062 len = strlen(buf);
1063
1064 if (len >= *size) {
1065 *size = len + 1;
1066 return UV_ENOBUFS;
1067 }
1068
1069 memcpy(buffer, buf, len + 1);
1070 *size = len;
1071
1072 return 0;
1073 }
1074
1075 /* HOME is not set, so call uv__getpwuid_r() */
1076 r = uv__getpwuid_r(&pwd);
1077
1078 if (r != 0) {
1079 return r;
1080 }
1081
1082 len = strlen(pwd.homedir);
1083
1084 if (len >= *size) {
1085 *size = len + 1;
1086 uv_os_free_passwd(&pwd);
1087 return UV_ENOBUFS;
1088 }
1089
1090 memcpy(buffer, pwd.homedir, len + 1);
1091 *size = len;
1092 uv_os_free_passwd(&pwd);
1093
1094 return 0;
1095 }
1096
1097
uv_os_tmpdir(char * buffer,size_t * size)1098 int uv_os_tmpdir(char* buffer, size_t* size) {
1099 const char* buf;
1100 size_t len;
1101
1102 if (buffer == NULL || size == NULL || *size == 0)
1103 return UV_EINVAL;
1104
1105 #define CHECK_ENV_VAR(name) \
1106 do { \
1107 buf = getenv(name); \
1108 if (buf != NULL) \
1109 goto return_buffer; \
1110 } \
1111 while (0)
1112
1113 /* Check the TMPDIR, TMP, TEMP, and TEMPDIR environment variables in order */
1114 CHECK_ENV_VAR("TMPDIR");
1115 CHECK_ENV_VAR("TMP");
1116 CHECK_ENV_VAR("TEMP");
1117 CHECK_ENV_VAR("TEMPDIR");
1118
1119 #undef CHECK_ENV_VAR
1120
1121 /* No temp environment variables defined */
1122 #if defined(__ANDROID__)
1123 buf = "/data/local/tmp";
1124 #else
1125 buf = "/tmp";
1126 #endif
1127
1128 return_buffer:
1129 len = strlen(buf);
1130
1131 if (len >= *size) {
1132 *size = len + 1;
1133 return UV_ENOBUFS;
1134 }
1135
1136 /* The returned directory should not have a trailing slash. */
1137 if (len > 1 && buf[len - 1] == '/') {
1138 len--;
1139 }
1140
1141 memcpy(buffer, buf, len + 1);
1142 buffer[len] = '\0';
1143 *size = len;
1144
1145 return 0;
1146 }
1147
1148
uv__getpwuid_r(uv_passwd_t * pwd)1149 int uv__getpwuid_r(uv_passwd_t* pwd) {
1150 struct passwd pw;
1151 struct passwd* result;
1152 char* buf;
1153 uid_t uid;
1154 size_t bufsize;
1155 size_t name_size;
1156 size_t homedir_size;
1157 size_t shell_size;
1158 long initsize;
1159 int r;
1160 #if defined(__ANDROID_API__) && __ANDROID_API__ < 21
1161 int (*getpwuid_r)(uid_t, struct passwd*, char*, size_t, struct passwd**);
1162
1163 getpwuid_r = dlsym(RTLD_DEFAULT, "getpwuid_r");
1164 if (getpwuid_r == NULL)
1165 return UV_ENOSYS;
1166 #endif
1167
1168 if (pwd == NULL)
1169 return UV_EINVAL;
1170
1171 initsize = sysconf(_SC_GETPW_R_SIZE_MAX);
1172
1173 if (initsize <= 0)
1174 bufsize = 4096;
1175 else
1176 bufsize = (size_t) initsize;
1177
1178 uid = geteuid();
1179 buf = NULL;
1180
1181 for (;;) {
1182 uv__free(buf);
1183 buf = uv__malloc(bufsize);
1184
1185 if (buf == NULL)
1186 return UV_ENOMEM;
1187
1188 r = getpwuid_r(uid, &pw, buf, bufsize, &result);
1189
1190 if (r != ERANGE)
1191 break;
1192
1193 bufsize *= 2;
1194 }
1195
1196 if (r != 0) {
1197 uv__free(buf);
1198 return -r;
1199 }
1200
1201 if (result == NULL) {
1202 uv__free(buf);
1203 return UV_ENOENT;
1204 }
1205
1206 /* Allocate memory for the username, shell, and home directory */
1207 name_size = strlen(pw.pw_name) + 1;
1208 homedir_size = strlen(pw.pw_dir) + 1;
1209 shell_size = strlen(pw.pw_shell) + 1;
1210 pwd->username = uv__malloc(name_size + homedir_size + shell_size);
1211
1212 if (pwd->username == NULL) {
1213 uv__free(buf);
1214 return UV_ENOMEM;
1215 }
1216
1217 /* Copy the username */
1218 memcpy(pwd->username, pw.pw_name, name_size);
1219
1220 /* Copy the home directory */
1221 pwd->homedir = pwd->username + name_size;
1222 memcpy(pwd->homedir, pw.pw_dir, homedir_size);
1223
1224 /* Copy the shell */
1225 pwd->shell = pwd->homedir + homedir_size;
1226 memcpy(pwd->shell, pw.pw_shell, shell_size);
1227
1228 /* Copy the uid and gid */
1229 pwd->uid = pw.pw_uid;
1230 pwd->gid = pw.pw_gid;
1231
1232 uv__free(buf);
1233
1234 return 0;
1235 }
1236
1237
uv_os_free_passwd(uv_passwd_t * pwd)1238 void uv_os_free_passwd(uv_passwd_t* pwd) {
1239 if (pwd == NULL)
1240 return;
1241
1242 /*
1243 The memory for name, shell, and homedir are allocated in a single
1244 uv__malloc() call. The base of the pointer is stored in pwd->username, so
1245 that is the field that needs to be freed.
1246 */
1247 uv__free(pwd->username);
1248 pwd->username = NULL;
1249 pwd->shell = NULL;
1250 pwd->homedir = NULL;
1251 }
1252
1253
uv_os_get_passwd(uv_passwd_t * pwd)1254 int uv_os_get_passwd(uv_passwd_t* pwd) {
1255 return uv__getpwuid_r(pwd);
1256 }
1257
1258
uv_translate_sys_error(int sys_errno)1259 int uv_translate_sys_error(int sys_errno) {
1260 /* If < 0 then it's already a libuv error. */
1261 return sys_errno <= 0 ? sys_errno : -sys_errno;
1262 }
1263
1264
uv_os_getenv(const char * name,char * buffer,size_t * size)1265 int uv_os_getenv(const char* name, char* buffer, size_t* size) {
1266 char* var;
1267 size_t len;
1268
1269 if (name == NULL || buffer == NULL || size == NULL || *size == 0)
1270 return UV_EINVAL;
1271
1272 var = getenv(name);
1273
1274 if (var == NULL)
1275 return UV_ENOENT;
1276
1277 len = strlen(var);
1278
1279 if (len >= *size) {
1280 *size = len + 1;
1281 return UV_ENOBUFS;
1282 }
1283
1284 memcpy(buffer, var, len + 1);
1285 *size = len;
1286
1287 return 0;
1288 }
1289
1290
uv_os_setenv(const char * name,const char * value)1291 int uv_os_setenv(const char* name, const char* value) {
1292 if (name == NULL || value == NULL)
1293 return UV_EINVAL;
1294
1295 if (setenv(name, value, 1) != 0)
1296 return UV__ERR(errno);
1297
1298 return 0;
1299 }
1300
1301
uv_os_unsetenv(const char * name)1302 int uv_os_unsetenv(const char* name) {
1303 if (name == NULL)
1304 return UV_EINVAL;
1305
1306 if (unsetenv(name) != 0)
1307 return UV__ERR(errno);
1308
1309 return 0;
1310 }
1311
1312
uv_os_gethostname(char * buffer,size_t * size)1313 int uv_os_gethostname(char* buffer, size_t* size) {
1314 /*
1315 On some platforms, if the input buffer is not large enough, gethostname()
1316 succeeds, but truncates the result. libuv can detect this and return ENOBUFS
1317 instead by creating a large enough buffer and comparing the hostname length
1318 to the size input.
1319 */
1320 char buf[MAXHOSTNAMELEN + 1];
1321 size_t len;
1322
1323 if (buffer == NULL || size == NULL || *size == 0)
1324 return UV_EINVAL;
1325
1326 if (gethostname(buf, sizeof(buf)) != 0)
1327 return UV__ERR(errno);
1328
1329 buf[sizeof(buf) - 1] = '\0'; /* Null terminate, just to be safe. */
1330 len = strlen(buf);
1331
1332 if (len >= *size) {
1333 *size = len + 1;
1334 return UV_ENOBUFS;
1335 }
1336
1337 memcpy(buffer, buf, len + 1);
1338 *size = len;
1339 return 0;
1340 }
1341
1342
uv_get_osfhandle(int fd)1343 uv_os_fd_t uv_get_osfhandle(int fd) {
1344 return fd;
1345 }
1346
1347
uv_os_getpid(void)1348 uv_pid_t uv_os_getpid(void) {
1349 return getpid();
1350 }
1351
1352
uv_os_getppid(void)1353 uv_pid_t uv_os_getppid(void) {
1354 return getppid();
1355 }
1356