1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to
5 * deal in the Software without restriction, including without limitation the
6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7 * sell copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19 * IN THE SOFTWARE.
20 */
21
22 /* Caveat emptor: this file deviates from the libuv convention of returning
23 * negated errno codes. Most uv_fs_*() functions map directly to the system
24 * call of the same name. For more complex wrappers, it's easier to just
25 * return -1 with errno set. The dispatcher in uv__fs_work() takes care of
26 * getting the errno to the right place (req->result or as the return value.)
27 */
28
29 #include "uv.h"
30 #include "internal.h"
31
32 #include <errno.h>
33 #include <dlfcn.h>
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <limits.h> /* PATH_MAX */
38
39 #include <sys/types.h>
40 #include <sys/socket.h>
41 #include <sys/stat.h>
42 #include <sys/time.h>
43 #include <sys/uio.h>
44 #include <pthread.h>
45 #include <unistd.h>
46 #include <fcntl.h>
47 #include <poll.h>
48
49 #if defined(__DragonFly__) || \
50 defined(__FreeBSD__) || \
51 defined(__FreeBSD_kernel__) || \
52 defined(__OpenBSD__) || \
53 defined(__NetBSD__)
54 # define HAVE_PREADV 1
55 #else
56 # define HAVE_PREADV 0
57 #endif
58
59 #if defined(__linux__) || defined(__sun)
60 # include <sys/sendfile.h>
61 #endif
62
63 #if defined(__APPLE__)
64 # include <sys/sysctl.h>
65 #elif defined(__linux__) && !defined(FICLONE)
66 # include <sys/ioctl.h>
67 # define FICLONE _IOW(0x94, 9, int)
68 #endif
69
70 #if defined(_AIX) && !defined(_AIX71)
71 # include <utime.h>
72 #endif
73
74 #if defined(__APPLE__) || \
75 defined(__DragonFly__) || \
76 defined(__FreeBSD__) || \
77 defined(__FreeBSD_kernel__) || \
78 defined(__OpenBSD__) || \
79 defined(__NetBSD__)
80 # include <sys/param.h>
81 # include <sys/mount.h>
82 #elif defined(__sun) || defined(__MVS__) || defined(__NetBSD__) || defined(__HAIKU__)
83 # include <sys/statvfs.h>
84 #else
85 # include <sys/statfs.h>
86 #endif
87
88 #if defined(_AIX) && _XOPEN_SOURCE <= 600
89 extern char *mkdtemp(char *template); /* See issue #740 on AIX < 7 */
90 #endif
91
92 #define INIT(subtype) \
93 do { \
94 if (req == NULL) \
95 return UV_EINVAL; \
96 UV_REQ_INIT(req, UV_FS); \
97 req->fs_type = UV_FS_ ## subtype; \
98 req->result = 0; \
99 req->ptr = NULL; \
100 req->loop = loop; \
101 req->path = NULL; \
102 req->new_path = NULL; \
103 req->bufs = NULL; \
104 req->cb = cb; \
105 } \
106 while (0)
107
108 #define PATH \
109 do { \
110 assert(path != NULL); \
111 if (cb == NULL) { \
112 req->path = path; \
113 } else { \
114 req->path = uv__strdup(path); \
115 if (req->path == NULL) \
116 return UV_ENOMEM; \
117 } \
118 } \
119 while (0)
120
121 #define PATH2 \
122 do { \
123 if (cb == NULL) { \
124 req->path = path; \
125 req->new_path = new_path; \
126 } else { \
127 size_t path_len; \
128 size_t new_path_len; \
129 path_len = strlen(path) + 1; \
130 new_path_len = strlen(new_path) + 1; \
131 req->path = uv__malloc(path_len + new_path_len); \
132 if (req->path == NULL) \
133 return UV_ENOMEM; \
134 req->new_path = req->path + path_len; \
135 memcpy((void*) req->path, path, path_len); \
136 memcpy((void*) req->new_path, new_path, new_path_len); \
137 } \
138 } \
139 while (0)
140
141 #define POST \
142 do { \
143 if (cb != NULL) { \
144 uv__req_register(loop, req); \
145 uv__work_submit(loop, \
146 &req->work_req, \
147 UV__WORK_FAST_IO, \
148 uv__fs_work, \
149 uv__fs_done); \
150 return 0; \
151 } \
152 else { \
153 uv__fs_work(&req->work_req); \
154 return req->result; \
155 } \
156 } \
157 while (0)
158
159
uv__fs_close(int fd)160 static int uv__fs_close(int fd) {
161 int rc;
162
163 rc = uv__close_nocancel(fd);
164 if (rc == -1)
165 if (errno == EINTR || errno == EINPROGRESS)
166 rc = 0; /* The close is in progress, not an error. */
167
168 return rc;
169 }
170
171
uv__fs_fsync(uv_fs_t * req)172 static ssize_t uv__fs_fsync(uv_fs_t* req) {
173 #if defined(__APPLE__)
174 /* Apple's fdatasync and fsync explicitly do NOT flush the drive write cache
175 * to the drive platters. This is in contrast to Linux's fdatasync and fsync
176 * which do, according to recent man pages. F_FULLFSYNC is Apple's equivalent
177 * for flushing buffered data to permanent storage. If F_FULLFSYNC is not
178 * supported by the file system we fall back to F_BARRIERFSYNC or fsync().
179 * This is the same approach taken by sqlite, except sqlite does not issue
180 * an F_BARRIERFSYNC call.
181 */
182 int r;
183
184 r = fcntl(req->file, F_FULLFSYNC);
185 if (r != 0)
186 r = fcntl(req->file, 85 /* F_BARRIERFSYNC */); /* fsync + barrier */
187 if (r != 0)
188 r = fsync(req->file);
189 return r;
190 #else
191 return fsync(req->file);
192 #endif
193 }
194
195
uv__fs_fdatasync(uv_fs_t * req)196 static ssize_t uv__fs_fdatasync(uv_fs_t* req) {
197 #if defined(__linux__) || defined(__sun) || defined(__NetBSD__)
198 return fdatasync(req->file);
199 #elif defined(__APPLE__)
200 /* See the comment in uv__fs_fsync. */
201 return uv__fs_fsync(req);
202 #else
203 return fsync(req->file);
204 #endif
205 }
206
207
uv__fs_futime(uv_fs_t * req)208 static ssize_t uv__fs_futime(uv_fs_t* req) {
209 #if defined(__linux__) \
210 || defined(_AIX71) \
211 || defined(__HAIKU__)
212 /* utimesat() has nanosecond resolution but we stick to microseconds
213 * for the sake of consistency with other platforms.
214 */
215 struct timespec ts[2];
216 ts[0].tv_sec = req->atime;
217 ts[0].tv_nsec = (uint64_t)(req->atime * 1000000) % 1000000 * 1000;
218 ts[1].tv_sec = req->mtime;
219 ts[1].tv_nsec = (uint64_t)(req->mtime * 1000000) % 1000000 * 1000;
220 #if defined(__ANDROID_API__) && __ANDROID_API__ < 21
221 return utimensat(req->file, NULL, ts, 0);
222 #else
223 return futimens(req->file, ts);
224 #endif
225 #elif defined(__APPLE__) \
226 || defined(__DragonFly__) \
227 || defined(__FreeBSD__) \
228 || defined(__FreeBSD_kernel__) \
229 || defined(__NetBSD__) \
230 || defined(__OpenBSD__) \
231 || defined(__sun)
232 struct timeval tv[2];
233 tv[0].tv_sec = req->atime;
234 tv[0].tv_usec = (uint64_t)(req->atime * 1000000) % 1000000;
235 tv[1].tv_sec = req->mtime;
236 tv[1].tv_usec = (uint64_t)(req->mtime * 1000000) % 1000000;
237 # if defined(__sun)
238 return futimesat(req->file, NULL, tv);
239 # else
240 return futimes(req->file, tv);
241 # endif
242 #elif defined(__MVS__)
243 attrib_t atr;
244 memset(&atr, 0, sizeof(atr));
245 atr.att_mtimechg = 1;
246 atr.att_atimechg = 1;
247 atr.att_mtime = req->mtime;
248 atr.att_atime = req->atime;
249 return __fchattr(req->file, &atr, sizeof(atr));
250 #else
251 errno = ENOSYS;
252 return -1;
253 #endif
254 }
255
256
uv__fs_mkdtemp(uv_fs_t * req)257 static ssize_t uv__fs_mkdtemp(uv_fs_t* req) {
258 return mkdtemp((char*) req->path) ? 0 : -1;
259 }
260
261
262 static int (*uv__mkostemp)(char*, int);
263
264
uv__mkostemp_initonce(void)265 static void uv__mkostemp_initonce(void) {
266 /* z/os doesn't have RTLD_DEFAULT but that's okay
267 * because it doesn't have mkostemp(O_CLOEXEC) either.
268 */
269 #ifdef RTLD_DEFAULT
270 uv__mkostemp = (int (*)(char*, int)) dlsym(RTLD_DEFAULT, "mkostemp");
271
272 /* We don't care about errors, but we do want to clean them up.
273 * If there has been no error, then dlerror() will just return
274 * NULL.
275 */
276 dlerror();
277 #endif /* RTLD_DEFAULT */
278 }
279
280
uv__fs_mkstemp(uv_fs_t * req)281 static int uv__fs_mkstemp(uv_fs_t* req) {
282 static uv_once_t once = UV_ONCE_INIT;
283 int r;
284 #ifdef O_CLOEXEC
285 static int no_cloexec_support;
286 #endif
287 static const char pattern[] = "XXXXXX";
288 static const size_t pattern_size = sizeof(pattern) - 1;
289 char* path;
290 size_t path_length;
291
292 path = (char*) req->path;
293 path_length = strlen(path);
294
295 /* EINVAL can be returned for 2 reasons:
296 1. The template's last 6 characters were not XXXXXX
297 2. open() didn't support O_CLOEXEC
298 We want to avoid going to the fallback path in case
299 of 1, so it's manually checked before. */
300 if (path_length < pattern_size ||
301 strcmp(path + path_length - pattern_size, pattern)) {
302 errno = EINVAL;
303 return -1;
304 }
305
306 uv_once(&once, uv__mkostemp_initonce);
307
308 #ifdef O_CLOEXEC
309 if (no_cloexec_support == 0 && uv__mkostemp != NULL) {
310 r = uv__mkostemp(path, O_CLOEXEC);
311
312 if (r >= 0)
313 return r;
314
315 /* If mkostemp() returns EINVAL, it means the kernel doesn't
316 support O_CLOEXEC, so we just fallback to mkstemp() below. */
317 if (errno != EINVAL)
318 return r;
319
320 /* We set the static variable so that next calls don't even
321 try to use mkostemp. */
322 no_cloexec_support = 1;
323 }
324 #endif /* O_CLOEXEC */
325
326 if (req->cb != NULL)
327 uv_rwlock_rdlock(&req->loop->cloexec_lock);
328
329 r = mkstemp(path);
330
331 /* In case of failure `uv__cloexec` will leave error in `errno`,
332 * so it is enough to just set `r` to `-1`.
333 */
334 if (r >= 0 && uv__cloexec(r, 1) != 0) {
335 r = uv__close(r);
336 if (r != 0)
337 abort();
338 r = -1;
339 }
340
341 if (req->cb != NULL)
342 uv_rwlock_rdunlock(&req->loop->cloexec_lock);
343
344 return r;
345 }
346
347
uv__fs_open(uv_fs_t * req)348 static ssize_t uv__fs_open(uv_fs_t* req) {
349 #ifdef O_CLOEXEC
350 return open(req->path, req->flags | O_CLOEXEC, req->mode);
351 #else /* O_CLOEXEC */
352 int r;
353
354 if (req->cb != NULL)
355 uv_rwlock_rdlock(&req->loop->cloexec_lock);
356
357 r = open(req->path, req->flags, req->mode);
358
359 /* In case of failure `uv__cloexec` will leave error in `errno`,
360 * so it is enough to just set `r` to `-1`.
361 */
362 if (r >= 0 && uv__cloexec(r, 1) != 0) {
363 r = uv__close(r);
364 if (r != 0)
365 abort();
366 r = -1;
367 }
368
369 if (req->cb != NULL)
370 uv_rwlock_rdunlock(&req->loop->cloexec_lock);
371
372 return r;
373 #endif /* O_CLOEXEC */
374 }
375
376
377 #if !HAVE_PREADV
uv__fs_preadv(uv_file fd,uv_buf_t * bufs,unsigned int nbufs,off_t off)378 static ssize_t uv__fs_preadv(uv_file fd,
379 uv_buf_t* bufs,
380 unsigned int nbufs,
381 off_t off) {
382 uv_buf_t* buf;
383 uv_buf_t* end;
384 ssize_t result;
385 ssize_t rc;
386 size_t pos;
387
388 assert(nbufs > 0);
389
390 result = 0;
391 pos = 0;
392 buf = bufs + 0;
393 end = bufs + nbufs;
394
395 for (;;) {
396 do
397 rc = pread(fd, buf->base + pos, buf->len - pos, off + result);
398 while (rc == -1 && errno == EINTR);
399
400 if (rc == 0)
401 break;
402
403 if (rc == -1 && result == 0)
404 return UV__ERR(errno);
405
406 if (rc == -1)
407 break; /* We read some data so return that, ignore the error. */
408
409 pos += rc;
410 result += rc;
411
412 if (pos < buf->len)
413 continue;
414
415 pos = 0;
416 buf += 1;
417
418 if (buf == end)
419 break;
420 }
421
422 return result;
423 }
424 #endif
425
426
uv__fs_read(uv_fs_t * req)427 static ssize_t uv__fs_read(uv_fs_t* req) {
428 #if defined(__linux__)
429 static int no_preadv;
430 #endif
431 unsigned int iovmax;
432 ssize_t result;
433
434 iovmax = uv__getiovmax();
435 if (req->nbufs > iovmax)
436 req->nbufs = iovmax;
437
438 if (req->off < 0) {
439 if (req->nbufs == 1)
440 result = read(req->file, req->bufs[0].base, req->bufs[0].len);
441 else
442 result = readv(req->file, (struct iovec*) req->bufs, req->nbufs);
443 } else {
444 if (req->nbufs == 1) {
445 result = pread(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
446 goto done;
447 }
448
449 #if HAVE_PREADV
450 result = preadv(req->file, (struct iovec*) req->bufs, req->nbufs, req->off);
451 #else
452 # if defined(__linux__)
453 if (no_preadv) retry:
454 # endif
455 {
456 result = uv__fs_preadv(req->file, req->bufs, req->nbufs, req->off);
457 }
458 # if defined(__linux__)
459 else {
460 result = uv__preadv(req->file,
461 (struct iovec*)req->bufs,
462 req->nbufs,
463 req->off);
464 if (result == -1 && errno == ENOSYS) {
465 no_preadv = 1;
466 goto retry;
467 }
468 }
469 # endif
470 #endif
471 }
472
473 done:
474 /* Early cleanup of bufs allocation, since we're done with it. */
475 if (req->bufs != req->bufsml)
476 uv__free(req->bufs);
477
478 req->bufs = NULL;
479 req->nbufs = 0;
480
481 #ifdef __PASE__
482 /* PASE returns EOPNOTSUPP when reading a directory, convert to EISDIR */
483 if (result == -1 && errno == EOPNOTSUPP) {
484 struct stat buf;
485 ssize_t rc;
486 rc = fstat(req->file, &buf);
487 if (rc == 0 && S_ISDIR(buf.st_mode)) {
488 errno = EISDIR;
489 }
490 }
491 #endif
492
493 return result;
494 }
495
496
497 #if defined(__APPLE__) && !defined(MAC_OS_X_VERSION_10_8)
498 #define UV_CONST_DIRENT uv__dirent_t
499 #else
500 #define UV_CONST_DIRENT const uv__dirent_t
501 #endif
502
503
uv__fs_scandir_filter(UV_CONST_DIRENT * dent)504 static int uv__fs_scandir_filter(UV_CONST_DIRENT* dent) {
505 return strcmp(dent->d_name, ".") != 0 && strcmp(dent->d_name, "..") != 0;
506 }
507
508
uv__fs_scandir_sort(UV_CONST_DIRENT ** a,UV_CONST_DIRENT ** b)509 static int uv__fs_scandir_sort(UV_CONST_DIRENT** a, UV_CONST_DIRENT** b) {
510 return strcmp((*a)->d_name, (*b)->d_name);
511 }
512
513
uv__fs_scandir(uv_fs_t * req)514 static ssize_t uv__fs_scandir(uv_fs_t* req) {
515 uv__dirent_t** dents;
516 int n;
517
518 dents = NULL;
519 n = scandir(req->path, &dents, uv__fs_scandir_filter, uv__fs_scandir_sort);
520
521 /* NOTE: We will use nbufs as an index field */
522 req->nbufs = 0;
523
524 if (n == 0) {
525 /* OS X still needs to deallocate some memory.
526 * Memory was allocated using the system allocator, so use free() here.
527 */
528 free(dents);
529 dents = NULL;
530 } else if (n == -1) {
531 return n;
532 }
533
534 req->ptr = dents;
535
536 return n;
537 }
538
uv__fs_opendir(uv_fs_t * req)539 static int uv__fs_opendir(uv_fs_t* req) {
540 uv_dir_t* dir;
541
542 dir = uv__malloc(sizeof(*dir));
543 if (dir == NULL)
544 goto error;
545
546 dir->dir = opendir(req->path);
547 if (dir->dir == NULL)
548 goto error;
549
550 req->ptr = dir;
551 return 0;
552
553 error:
554 uv__free(dir);
555 req->ptr = NULL;
556 return -1;
557 }
558
uv__fs_readdir(uv_fs_t * req)559 static int uv__fs_readdir(uv_fs_t* req) {
560 uv_dir_t* dir;
561 uv_dirent_t* dirent;
562 struct dirent* res;
563 unsigned int dirent_idx;
564 unsigned int i;
565
566 dir = req->ptr;
567 dirent_idx = 0;
568
569 while (dirent_idx < dir->nentries) {
570 /* readdir() returns NULL on end of directory, as well as on error. errno
571 is used to differentiate between the two conditions. */
572 errno = 0;
573 res = readdir(dir->dir);
574
575 if (res == NULL) {
576 if (errno != 0)
577 goto error;
578 break;
579 }
580
581 if (strcmp(res->d_name, ".") == 0 || strcmp(res->d_name, "..") == 0)
582 continue;
583
584 dirent = &dir->dirents[dirent_idx];
585 dirent->name = uv__strdup(res->d_name);
586
587 if (dirent->name == NULL)
588 goto error;
589
590 dirent->type = uv__fs_get_dirent_type(res);
591 ++dirent_idx;
592 }
593
594 return dirent_idx;
595
596 error:
597 for (i = 0; i < dirent_idx; ++i) {
598 uv__free((char*) dir->dirents[i].name);
599 dir->dirents[i].name = NULL;
600 }
601
602 return -1;
603 }
604
uv__fs_closedir(uv_fs_t * req)605 static int uv__fs_closedir(uv_fs_t* req) {
606 uv_dir_t* dir;
607
608 dir = req->ptr;
609
610 if (dir->dir != NULL) {
611 closedir(dir->dir);
612 dir->dir = NULL;
613 }
614
615 uv__free(req->ptr);
616 req->ptr = NULL;
617 return 0;
618 }
619
uv__fs_statfs(uv_fs_t * req)620 static int uv__fs_statfs(uv_fs_t* req) {
621 uv_statfs_t* stat_fs;
622 #if defined(__sun) || defined(__MVS__) || defined(__NetBSD__) || defined(__HAIKU__)
623 struct statvfs buf;
624
625 if (0 != statvfs(req->path, &buf))
626 #else
627 struct statfs buf;
628
629 if (0 != statfs(req->path, &buf))
630 #endif /* defined(__sun) */
631 return -1;
632
633 stat_fs = uv__malloc(sizeof(*stat_fs));
634 if (stat_fs == NULL) {
635 errno = ENOMEM;
636 return -1;
637 }
638
639 #if defined(__sun) || defined(__MVS__) || defined(__OpenBSD__) || defined(__NetBSD__) || defined(__HAIKU__)
640 stat_fs->f_type = 0; /* f_type is not supported. */
641 #else
642 stat_fs->f_type = buf.f_type;
643 #endif
644 stat_fs->f_bsize = buf.f_bsize;
645 stat_fs->f_blocks = buf.f_blocks;
646 stat_fs->f_bfree = buf.f_bfree;
647 stat_fs->f_bavail = buf.f_bavail;
648 stat_fs->f_files = buf.f_files;
649 stat_fs->f_ffree = buf.f_ffree;
650 req->ptr = stat_fs;
651 return 0;
652 }
653
uv__fs_pathmax_size(const char * path)654 static ssize_t uv__fs_pathmax_size(const char* path) {
655 ssize_t pathmax;
656
657 pathmax = pathconf(path, _PC_PATH_MAX);
658
659 if (pathmax == -1)
660 pathmax = UV__PATH_MAX;
661
662 return pathmax;
663 }
664
uv__fs_readlink(uv_fs_t * req)665 static ssize_t uv__fs_readlink(uv_fs_t* req) {
666 ssize_t maxlen;
667 ssize_t len;
668 char* buf;
669 char* newbuf;
670
671 #if defined(_POSIX_PATH_MAX) || defined(PATH_MAX)
672 maxlen = uv__fs_pathmax_size(req->path);
673 #else
674 /* We may not have a real PATH_MAX. Read size of link. */
675 struct stat st;
676 int ret;
677 ret = lstat(req->path, &st);
678 if (ret != 0)
679 return -1;
680 if (!S_ISLNK(st.st_mode)) {
681 errno = EINVAL;
682 return -1;
683 }
684
685 maxlen = st.st_size;
686
687 /* According to readlink(2) lstat can report st_size == 0
688 for some symlinks, such as those in /proc or /sys. */
689 if (maxlen == 0)
690 maxlen = uv__fs_pathmax_size(req->path);
691 #endif
692
693 buf = uv__malloc(maxlen);
694
695 if (buf == NULL) {
696 errno = ENOMEM;
697 return -1;
698 }
699
700 #if defined(__MVS__)
701 len = os390_readlink(req->path, buf, maxlen);
702 #else
703 len = readlink(req->path, buf, maxlen);
704 #endif
705
706 if (len == -1) {
707 uv__free(buf);
708 return -1;
709 }
710
711 /* Uncommon case: resize to make room for the trailing nul byte. */
712 if (len == maxlen) {
713 newbuf = uv__realloc(buf, len + 1);
714
715 if (newbuf == NULL) {
716 uv__free(buf);
717 return -1;
718 }
719
720 buf = newbuf;
721 }
722
723 buf[len] = '\0';
724 req->ptr = buf;
725
726 return 0;
727 }
728
uv__fs_realpath(uv_fs_t * req)729 static ssize_t uv__fs_realpath(uv_fs_t* req) {
730 char* buf;
731
732 #if defined(_POSIX_VERSION) && _POSIX_VERSION >= 200809L
733 buf = realpath(req->path, NULL);
734 if (buf == NULL)
735 return -1;
736 #else
737 ssize_t len;
738
739 len = uv__fs_pathmax_size(req->path);
740 buf = uv__malloc(len + 1);
741
742 if (buf == NULL) {
743 errno = ENOMEM;
744 return -1;
745 }
746
747 if (realpath(req->path, buf) == NULL) {
748 uv__free(buf);
749 return -1;
750 }
751 #endif
752
753 req->ptr = buf;
754
755 return 0;
756 }
757
uv__fs_sendfile_emul(uv_fs_t * req)758 static ssize_t uv__fs_sendfile_emul(uv_fs_t* req) {
759 struct pollfd pfd;
760 int use_pread;
761 off_t offset;
762 ssize_t nsent;
763 ssize_t nread;
764 ssize_t nwritten;
765 size_t buflen;
766 size_t len;
767 ssize_t n;
768 int in_fd;
769 int out_fd;
770 char buf[8192];
771
772 len = req->bufsml[0].len;
773 in_fd = req->flags;
774 out_fd = req->file;
775 offset = req->off;
776 use_pread = 1;
777
778 /* Here are the rules regarding errors:
779 *
780 * 1. Read errors are reported only if nsent==0, otherwise we return nsent.
781 * The user needs to know that some data has already been sent, to stop
782 * them from sending it twice.
783 *
784 * 2. Write errors are always reported. Write errors are bad because they
785 * mean data loss: we've read data but now we can't write it out.
786 *
787 * We try to use pread() and fall back to regular read() if the source fd
788 * doesn't support positional reads, for example when it's a pipe fd.
789 *
790 * If we get EAGAIN when writing to the target fd, we poll() on it until
791 * it becomes writable again.
792 *
793 * FIXME: If we get a write error when use_pread==1, it should be safe to
794 * return the number of sent bytes instead of an error because pread()
795 * is, in theory, idempotent. However, special files in /dev or /proc
796 * may support pread() but not necessarily return the same data on
797 * successive reads.
798 *
799 * FIXME: There is no way now to signal that we managed to send *some* data
800 * before a write error.
801 */
802 for (nsent = 0; (size_t) nsent < len; ) {
803 buflen = len - nsent;
804
805 if (buflen > sizeof(buf))
806 buflen = sizeof(buf);
807
808 do
809 if (use_pread)
810 nread = pread(in_fd, buf, buflen, offset);
811 else
812 nread = read(in_fd, buf, buflen);
813 while (nread == -1 && errno == EINTR);
814
815 if (nread == 0)
816 goto out;
817
818 if (nread == -1) {
819 if (use_pread && nsent == 0 && (errno == EIO || errno == ESPIPE)) {
820 use_pread = 0;
821 continue;
822 }
823
824 if (nsent == 0)
825 nsent = -1;
826
827 goto out;
828 }
829
830 for (nwritten = 0; nwritten < nread; ) {
831 do
832 n = write(out_fd, buf + nwritten, nread - nwritten);
833 while (n == -1 && errno == EINTR);
834
835 if (n != -1) {
836 nwritten += n;
837 continue;
838 }
839
840 if (errno != EAGAIN && errno != EWOULDBLOCK) {
841 nsent = -1;
842 goto out;
843 }
844
845 pfd.fd = out_fd;
846 pfd.events = POLLOUT;
847 pfd.revents = 0;
848
849 do
850 n = poll(&pfd, 1, -1);
851 while (n == -1 && errno == EINTR);
852
853 if (n == -1 || (pfd.revents & ~POLLOUT) != 0) {
854 errno = EIO;
855 nsent = -1;
856 goto out;
857 }
858 }
859
860 offset += nread;
861 nsent += nread;
862 }
863
864 out:
865 if (nsent != -1)
866 req->off = offset;
867
868 return nsent;
869 }
870
871
uv__fs_sendfile(uv_fs_t * req)872 static ssize_t uv__fs_sendfile(uv_fs_t* req) {
873 int in_fd;
874 int out_fd;
875
876 in_fd = req->flags;
877 out_fd = req->file;
878
879 #if defined(__linux__) || defined(__sun)
880 {
881 off_t off;
882 ssize_t r;
883
884 off = req->off;
885 r = sendfile(out_fd, in_fd, &off, req->bufsml[0].len);
886
887 /* sendfile() on SunOS returns EINVAL if the target fd is not a socket but
888 * it still writes out data. Fortunately, we can detect it by checking if
889 * the offset has been updated.
890 */
891 if (r != -1 || off > req->off) {
892 r = off - req->off;
893 req->off = off;
894 return r;
895 }
896
897 if (errno == EINVAL ||
898 errno == EIO ||
899 errno == ENOTSOCK ||
900 errno == EXDEV) {
901 errno = 0;
902 return uv__fs_sendfile_emul(req);
903 }
904
905 return -1;
906 }
907 #elif defined(__APPLE__) || \
908 defined(__DragonFly__) || \
909 defined(__FreeBSD__) || \
910 defined(__FreeBSD_kernel__)
911 {
912 off_t len;
913 ssize_t r;
914
915 /* sendfile() on FreeBSD and Darwin returns EAGAIN if the target fd is in
916 * non-blocking mode and not all data could be written. If a non-zero
917 * number of bytes have been sent, we don't consider it an error.
918 */
919
920 #if defined(__FreeBSD__) || defined(__DragonFly__)
921 len = 0;
922 r = sendfile(in_fd, out_fd, req->off, req->bufsml[0].len, NULL, &len, 0);
923 #elif defined(__FreeBSD_kernel__)
924 len = 0;
925 r = bsd_sendfile(in_fd,
926 out_fd,
927 req->off,
928 req->bufsml[0].len,
929 NULL,
930 &len,
931 0);
932 #else
933 /* The darwin sendfile takes len as an input for the length to send,
934 * so make sure to initialize it with the caller's value. */
935 len = req->bufsml[0].len;
936 r = sendfile(in_fd, out_fd, req->off, &len, NULL, 0);
937 #endif
938
939 /*
940 * The man page for sendfile(2) on DragonFly states that `len` contains
941 * a meaningful value ONLY in case of EAGAIN and EINTR.
942 * Nothing is said about it's value in case of other errors, so better
943 * not depend on the potential wrong assumption that is was not modified
944 * by the syscall.
945 */
946 if (r == 0 || ((errno == EAGAIN || errno == EINTR) && len != 0)) {
947 req->off += len;
948 return (ssize_t) len;
949 }
950
951 if (errno == EINVAL ||
952 errno == EIO ||
953 errno == ENOTSOCK ||
954 errno == EXDEV) {
955 errno = 0;
956 return uv__fs_sendfile_emul(req);
957 }
958
959 return -1;
960 }
961 #else
962 /* Squelch compiler warnings. */
963 (void) &in_fd;
964 (void) &out_fd;
965
966 return uv__fs_sendfile_emul(req);
967 #endif
968 }
969
970
uv__fs_utime(uv_fs_t * req)971 static ssize_t uv__fs_utime(uv_fs_t* req) {
972 #if defined(__linux__) \
973 || defined(_AIX71) \
974 || defined(__sun) \
975 || defined(__HAIKU__)
976 /* utimesat() has nanosecond resolution but we stick to microseconds
977 * for the sake of consistency with other platforms.
978 */
979 struct timespec ts[2];
980 ts[0].tv_sec = req->atime;
981 ts[0].tv_nsec = (uint64_t)(req->atime * 1000000) % 1000000 * 1000;
982 ts[1].tv_sec = req->mtime;
983 ts[1].tv_nsec = (uint64_t)(req->mtime * 1000000) % 1000000 * 1000;
984 return utimensat(AT_FDCWD, req->path, ts, 0);
985 #elif defined(__APPLE__) \
986 || defined(__DragonFly__) \
987 || defined(__FreeBSD__) \
988 || defined(__FreeBSD_kernel__) \
989 || defined(__NetBSD__) \
990 || defined(__OpenBSD__)
991 struct timeval tv[2];
992 tv[0].tv_sec = req->atime;
993 tv[0].tv_usec = (uint64_t)(req->atime * 1000000) % 1000000;
994 tv[1].tv_sec = req->mtime;
995 tv[1].tv_usec = (uint64_t)(req->mtime * 1000000) % 1000000;
996 return utimes(req->path, tv);
997 #elif defined(_AIX) \
998 && !defined(_AIX71)
999 struct utimbuf buf;
1000 buf.actime = req->atime;
1001 buf.modtime = req->mtime;
1002 return utime(req->path, &buf);
1003 #elif defined(__MVS__)
1004 attrib_t atr;
1005 memset(&atr, 0, sizeof(atr));
1006 atr.att_mtimechg = 1;
1007 atr.att_atimechg = 1;
1008 atr.att_mtime = req->mtime;
1009 atr.att_atime = req->atime;
1010 return __lchattr((char*) req->path, &atr, sizeof(atr));
1011 #else
1012 errno = ENOSYS;
1013 return -1;
1014 #endif
1015 }
1016
1017
uv__fs_write(uv_fs_t * req)1018 static ssize_t uv__fs_write(uv_fs_t* req) {
1019 #if defined(__linux__)
1020 static int no_pwritev;
1021 #endif
1022 ssize_t r;
1023
1024 /* Serialize writes on OS X, concurrent write() and pwrite() calls result in
1025 * data loss. We can't use a per-file descriptor lock, the descriptor may be
1026 * a dup().
1027 */
1028 #if defined(__APPLE__)
1029 static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
1030
1031 if (pthread_mutex_lock(&lock))
1032 abort();
1033 #endif
1034
1035 if (req->off < 0) {
1036 if (req->nbufs == 1)
1037 r = write(req->file, req->bufs[0].base, req->bufs[0].len);
1038 else
1039 r = writev(req->file, (struct iovec*) req->bufs, req->nbufs);
1040 } else {
1041 if (req->nbufs == 1) {
1042 r = pwrite(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
1043 goto done;
1044 }
1045 #if HAVE_PREADV
1046 r = pwritev(req->file, (struct iovec*) req->bufs, req->nbufs, req->off);
1047 #else
1048 # if defined(__linux__)
1049 if (no_pwritev) retry:
1050 # endif
1051 {
1052 r = pwrite(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
1053 }
1054 # if defined(__linux__)
1055 else {
1056 r = uv__pwritev(req->file,
1057 (struct iovec*) req->bufs,
1058 req->nbufs,
1059 req->off);
1060 if (r == -1 && errno == ENOSYS) {
1061 no_pwritev = 1;
1062 goto retry;
1063 }
1064 }
1065 # endif
1066 #endif
1067 }
1068
1069 done:
1070 #if defined(__APPLE__)
1071 if (pthread_mutex_unlock(&lock))
1072 abort();
1073 #endif
1074
1075 return r;
1076 }
1077
uv__fs_copyfile(uv_fs_t * req)1078 static ssize_t uv__fs_copyfile(uv_fs_t* req) {
1079 uv_fs_t fs_req;
1080 uv_file srcfd;
1081 uv_file dstfd;
1082 struct stat src_statsbuf;
1083 struct stat dst_statsbuf;
1084 int dst_flags;
1085 int result;
1086 int err;
1087 size_t bytes_to_send;
1088 int64_t in_offset;
1089 ssize_t bytes_written;
1090
1091 dstfd = -1;
1092 err = 0;
1093
1094 /* Open the source file. */
1095 srcfd = uv_fs_open(NULL, &fs_req, req->path, O_RDONLY, 0, NULL);
1096 uv_fs_req_cleanup(&fs_req);
1097
1098 if (srcfd < 0)
1099 return srcfd;
1100
1101 /* Get the source file's mode. */
1102 if (fstat(srcfd, &src_statsbuf)) {
1103 err = UV__ERR(errno);
1104 goto out;
1105 }
1106
1107 dst_flags = O_WRONLY | O_CREAT | O_TRUNC;
1108
1109 if (req->flags & UV_FS_COPYFILE_EXCL)
1110 dst_flags |= O_EXCL;
1111
1112 /* Open the destination file. */
1113 dstfd = uv_fs_open(NULL,
1114 &fs_req,
1115 req->new_path,
1116 dst_flags,
1117 src_statsbuf.st_mode,
1118 NULL);
1119 uv_fs_req_cleanup(&fs_req);
1120
1121 if (dstfd < 0) {
1122 err = dstfd;
1123 goto out;
1124 }
1125
1126 /* Get the destination file's mode. */
1127 if (fstat(dstfd, &dst_statsbuf)) {
1128 err = UV__ERR(errno);
1129 goto out;
1130 }
1131
1132 /* Check if srcfd and dstfd refer to the same file */
1133 if (src_statsbuf.st_dev == dst_statsbuf.st_dev &&
1134 src_statsbuf.st_ino == dst_statsbuf.st_ino) {
1135 goto out;
1136 }
1137
1138 if (fchmod(dstfd, src_statsbuf.st_mode) == -1) {
1139 err = UV__ERR(errno);
1140 #ifdef __linux__
1141 if (err != UV_EPERM)
1142 goto out;
1143
1144 {
1145 struct statfs s;
1146
1147 /* fchmod() on CIFS shares always fails with EPERM unless the share is
1148 * mounted with "noperm". As fchmod() is a meaningless operation on such
1149 * shares anyway, detect that condition and squelch the error.
1150 */
1151 if (fstatfs(dstfd, &s) == -1)
1152 goto out;
1153
1154 if (s.f_type != /* CIFS */ 0xFF534D42u)
1155 goto out;
1156 }
1157
1158 err = 0;
1159 #else /* !__linux__ */
1160 goto out;
1161 #endif /* !__linux__ */
1162 }
1163
1164 #ifdef FICLONE
1165 if (req->flags & UV_FS_COPYFILE_FICLONE ||
1166 req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
1167 if (ioctl(dstfd, FICLONE, srcfd) == 0) {
1168 /* ioctl() with FICLONE succeeded. */
1169 goto out;
1170 }
1171 /* If an error occurred and force was set, return the error to the caller;
1172 * fall back to sendfile() when force was not set. */
1173 if (req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
1174 err = UV__ERR(errno);
1175 goto out;
1176 }
1177 }
1178 #else
1179 if (req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
1180 err = UV_ENOSYS;
1181 goto out;
1182 }
1183 #endif
1184
1185 bytes_to_send = src_statsbuf.st_size;
1186 in_offset = 0;
1187 while (bytes_to_send != 0) {
1188 uv_fs_sendfile(NULL, &fs_req, dstfd, srcfd, in_offset, bytes_to_send, NULL);
1189 bytes_written = fs_req.result;
1190 uv_fs_req_cleanup(&fs_req);
1191
1192 if (bytes_written < 0) {
1193 err = bytes_written;
1194 break;
1195 }
1196
1197 bytes_to_send -= bytes_written;
1198 in_offset += bytes_written;
1199 }
1200
1201 out:
1202 if (err < 0)
1203 result = err;
1204 else
1205 result = 0;
1206
1207 /* Close the source file. */
1208 err = uv__close_nocheckstdio(srcfd);
1209
1210 /* Don't overwrite any existing errors. */
1211 if (err != 0 && result == 0)
1212 result = err;
1213
1214 /* Close the destination file if it is open. */
1215 if (dstfd >= 0) {
1216 err = uv__close_nocheckstdio(dstfd);
1217
1218 /* Don't overwrite any existing errors. */
1219 if (err != 0 && result == 0)
1220 result = err;
1221
1222 /* Remove the destination file if something went wrong. */
1223 if (result != 0) {
1224 uv_fs_unlink(NULL, &fs_req, req->new_path, NULL);
1225 /* Ignore the unlink return value, as an error already happened. */
1226 uv_fs_req_cleanup(&fs_req);
1227 }
1228 }
1229
1230 if (result == 0)
1231 return 0;
1232
1233 errno = UV__ERR(result);
1234 return -1;
1235 }
1236
uv__to_stat(struct stat * src,uv_stat_t * dst)1237 static void uv__to_stat(struct stat* src, uv_stat_t* dst) {
1238 dst->st_dev = src->st_dev;
1239 dst->st_mode = src->st_mode;
1240 dst->st_nlink = src->st_nlink;
1241 dst->st_uid = src->st_uid;
1242 dst->st_gid = src->st_gid;
1243 dst->st_rdev = src->st_rdev;
1244 dst->st_ino = src->st_ino;
1245 dst->st_size = src->st_size;
1246 dst->st_blksize = src->st_blksize;
1247 dst->st_blocks = src->st_blocks;
1248
1249 #if defined(__APPLE__)
1250 dst->st_atim.tv_sec = src->st_atimespec.tv_sec;
1251 dst->st_atim.tv_nsec = src->st_atimespec.tv_nsec;
1252 dst->st_mtim.tv_sec = src->st_mtimespec.tv_sec;
1253 dst->st_mtim.tv_nsec = src->st_mtimespec.tv_nsec;
1254 dst->st_ctim.tv_sec = src->st_ctimespec.tv_sec;
1255 dst->st_ctim.tv_nsec = src->st_ctimespec.tv_nsec;
1256 dst->st_birthtim.tv_sec = src->st_birthtimespec.tv_sec;
1257 dst->st_birthtim.tv_nsec = src->st_birthtimespec.tv_nsec;
1258 dst->st_flags = src->st_flags;
1259 dst->st_gen = src->st_gen;
1260 #elif defined(__ANDROID__)
1261 dst->st_atim.tv_sec = src->st_atime;
1262 dst->st_atim.tv_nsec = src->st_atimensec;
1263 dst->st_mtim.tv_sec = src->st_mtime;
1264 dst->st_mtim.tv_nsec = src->st_mtimensec;
1265 dst->st_ctim.tv_sec = src->st_ctime;
1266 dst->st_ctim.tv_nsec = src->st_ctimensec;
1267 dst->st_birthtim.tv_sec = src->st_ctime;
1268 dst->st_birthtim.tv_nsec = src->st_ctimensec;
1269 dst->st_flags = 0;
1270 dst->st_gen = 0;
1271 #elif !defined(_AIX) && ( \
1272 defined(__DragonFly__) || \
1273 defined(__FreeBSD__) || \
1274 defined(__OpenBSD__) || \
1275 defined(__NetBSD__) || \
1276 defined(_GNU_SOURCE) || \
1277 defined(_BSD_SOURCE) || \
1278 defined(_SVID_SOURCE) || \
1279 defined(_XOPEN_SOURCE) || \
1280 defined(_DEFAULT_SOURCE))
1281 dst->st_atim.tv_sec = src->st_atim.tv_sec;
1282 dst->st_atim.tv_nsec = src->st_atim.tv_nsec;
1283 dst->st_mtim.tv_sec = src->st_mtim.tv_sec;
1284 dst->st_mtim.tv_nsec = src->st_mtim.tv_nsec;
1285 dst->st_ctim.tv_sec = src->st_ctim.tv_sec;
1286 dst->st_ctim.tv_nsec = src->st_ctim.tv_nsec;
1287 # if defined(__FreeBSD__) || \
1288 defined(__NetBSD__)
1289 dst->st_birthtim.tv_sec = src->st_birthtim.tv_sec;
1290 dst->st_birthtim.tv_nsec = src->st_birthtim.tv_nsec;
1291 dst->st_flags = src->st_flags;
1292 dst->st_gen = src->st_gen;
1293 # else
1294 dst->st_birthtim.tv_sec = src->st_ctim.tv_sec;
1295 dst->st_birthtim.tv_nsec = src->st_ctim.tv_nsec;
1296 dst->st_flags = 0;
1297 dst->st_gen = 0;
1298 # endif
1299 #else
1300 dst->st_atim.tv_sec = src->st_atime;
1301 dst->st_atim.tv_nsec = 0;
1302 dst->st_mtim.tv_sec = src->st_mtime;
1303 dst->st_mtim.tv_nsec = 0;
1304 dst->st_ctim.tv_sec = src->st_ctime;
1305 dst->st_ctim.tv_nsec = 0;
1306 dst->st_birthtim.tv_sec = src->st_ctime;
1307 dst->st_birthtim.tv_nsec = 0;
1308 dst->st_flags = 0;
1309 dst->st_gen = 0;
1310 #endif
1311 }
1312
1313
uv__fs_statx(int fd,const char * path,int is_fstat,int is_lstat,uv_stat_t * buf)1314 static int uv__fs_statx(int fd,
1315 const char* path,
1316 int is_fstat,
1317 int is_lstat,
1318 uv_stat_t* buf) {
1319 STATIC_ASSERT(UV_ENOSYS != -1);
1320 #ifdef __linux__
1321 static int no_statx;
1322 struct uv__statx statxbuf;
1323 int dirfd;
1324 int flags;
1325 int mode;
1326 int rc;
1327
1328 if (no_statx)
1329 return UV_ENOSYS;
1330
1331 dirfd = AT_FDCWD;
1332 flags = 0; /* AT_STATX_SYNC_AS_STAT */
1333 mode = 0xFFF; /* STATX_BASIC_STATS + STATX_BTIME */
1334
1335 if (is_fstat) {
1336 dirfd = fd;
1337 flags |= 0x1000; /* AT_EMPTY_PATH */
1338 }
1339
1340 if (is_lstat)
1341 flags |= AT_SYMLINK_NOFOLLOW;
1342
1343 rc = uv__statx(dirfd, path, flags, mode, &statxbuf);
1344
1345 switch (rc) {
1346 case 0:
1347 break;
1348 case -1:
1349 /* EPERM happens when a seccomp filter rejects the system call.
1350 * Has been observed with libseccomp < 2.3.3 and docker < 18.04.
1351 */
1352 if (errno != EINVAL && errno != EPERM && errno != ENOSYS)
1353 return -1;
1354 /* Fall through. */
1355 default:
1356 /* Normally on success, zero is returned and On error, -1 is returned.
1357 * Observed on S390 RHEL running in a docker container with statx not
1358 * implemented, rc might return 1 with 0 set as the error code in which
1359 * case we return ENOSYS.
1360 */
1361 no_statx = 1;
1362 return UV_ENOSYS;
1363 }
1364
1365 buf->st_dev = 256 * statxbuf.stx_dev_major + statxbuf.stx_dev_minor;
1366 buf->st_mode = statxbuf.stx_mode;
1367 buf->st_nlink = statxbuf.stx_nlink;
1368 buf->st_uid = statxbuf.stx_uid;
1369 buf->st_gid = statxbuf.stx_gid;
1370 buf->st_rdev = statxbuf.stx_rdev_major;
1371 buf->st_ino = statxbuf.stx_ino;
1372 buf->st_size = statxbuf.stx_size;
1373 buf->st_blksize = statxbuf.stx_blksize;
1374 buf->st_blocks = statxbuf.stx_blocks;
1375 buf->st_atim.tv_sec = statxbuf.stx_atime.tv_sec;
1376 buf->st_atim.tv_nsec = statxbuf.stx_atime.tv_nsec;
1377 buf->st_mtim.tv_sec = statxbuf.stx_mtime.tv_sec;
1378 buf->st_mtim.tv_nsec = statxbuf.stx_mtime.tv_nsec;
1379 buf->st_ctim.tv_sec = statxbuf.stx_ctime.tv_sec;
1380 buf->st_ctim.tv_nsec = statxbuf.stx_ctime.tv_nsec;
1381 buf->st_birthtim.tv_sec = statxbuf.stx_btime.tv_sec;
1382 buf->st_birthtim.tv_nsec = statxbuf.stx_btime.tv_nsec;
1383 buf->st_flags = 0;
1384 buf->st_gen = 0;
1385
1386 return 0;
1387 #else
1388 return UV_ENOSYS;
1389 #endif /* __linux__ */
1390 }
1391
1392
uv__fs_stat(const char * path,uv_stat_t * buf)1393 static int uv__fs_stat(const char *path, uv_stat_t *buf) {
1394 struct stat pbuf;
1395 int ret;
1396
1397 ret = uv__fs_statx(-1, path, /* is_fstat */ 0, /* is_lstat */ 0, buf);
1398 if (ret != UV_ENOSYS)
1399 return ret;
1400
1401 ret = stat(path, &pbuf);
1402 if (ret == 0)
1403 uv__to_stat(&pbuf, buf);
1404
1405 return ret;
1406 }
1407
1408
uv__fs_lstat(const char * path,uv_stat_t * buf)1409 static int uv__fs_lstat(const char *path, uv_stat_t *buf) {
1410 struct stat pbuf;
1411 int ret;
1412
1413 ret = uv__fs_statx(-1, path, /* is_fstat */ 0, /* is_lstat */ 1, buf);
1414 if (ret != UV_ENOSYS)
1415 return ret;
1416
1417 ret = lstat(path, &pbuf);
1418 if (ret == 0)
1419 uv__to_stat(&pbuf, buf);
1420
1421 return ret;
1422 }
1423
1424
uv__fs_fstat(int fd,uv_stat_t * buf)1425 static int uv__fs_fstat(int fd, uv_stat_t *buf) {
1426 struct stat pbuf;
1427 int ret;
1428
1429 ret = uv__fs_statx(fd, "", /* is_fstat */ 1, /* is_lstat */ 0, buf);
1430 if (ret != UV_ENOSYS)
1431 return ret;
1432
1433 ret = fstat(fd, &pbuf);
1434 if (ret == 0)
1435 uv__to_stat(&pbuf, buf);
1436
1437 return ret;
1438 }
1439
uv__fs_buf_offset(uv_buf_t * bufs,size_t size)1440 static size_t uv__fs_buf_offset(uv_buf_t* bufs, size_t size) {
1441 size_t offset;
1442 /* Figure out which bufs are done */
1443 for (offset = 0; size > 0 && bufs[offset].len <= size; ++offset)
1444 size -= bufs[offset].len;
1445
1446 /* Fix a partial read/write */
1447 if (size > 0) {
1448 bufs[offset].base += size;
1449 bufs[offset].len -= size;
1450 }
1451 return offset;
1452 }
1453
uv__fs_write_all(uv_fs_t * req)1454 static ssize_t uv__fs_write_all(uv_fs_t* req) {
1455 unsigned int iovmax;
1456 unsigned int nbufs;
1457 uv_buf_t* bufs;
1458 ssize_t total;
1459 ssize_t result;
1460
1461 iovmax = uv__getiovmax();
1462 nbufs = req->nbufs;
1463 bufs = req->bufs;
1464 total = 0;
1465
1466 while (nbufs > 0) {
1467 req->nbufs = nbufs;
1468 if (req->nbufs > iovmax)
1469 req->nbufs = iovmax;
1470
1471 do
1472 result = uv__fs_write(req);
1473 while (result < 0 && errno == EINTR);
1474
1475 if (result <= 0) {
1476 if (total == 0)
1477 total = result;
1478 break;
1479 }
1480
1481 if (req->off >= 0)
1482 req->off += result;
1483
1484 req->nbufs = uv__fs_buf_offset(req->bufs, result);
1485 req->bufs += req->nbufs;
1486 nbufs -= req->nbufs;
1487 total += result;
1488 }
1489
1490 if (bufs != req->bufsml)
1491 uv__free(bufs);
1492
1493 req->bufs = NULL;
1494 req->nbufs = 0;
1495
1496 return total;
1497 }
1498
1499
uv__fs_work(struct uv__work * w)1500 static void uv__fs_work(struct uv__work* w) {
1501 int retry_on_eintr;
1502 uv_fs_t* req;
1503 ssize_t r;
1504
1505 req = container_of(w, uv_fs_t, work_req);
1506 retry_on_eintr = !(req->fs_type == UV_FS_CLOSE ||
1507 req->fs_type == UV_FS_READ);
1508
1509 do {
1510 errno = 0;
1511
1512 #define X(type, action) \
1513 case UV_FS_ ## type: \
1514 r = action; \
1515 break;
1516
1517 switch (req->fs_type) {
1518 X(ACCESS, access(req->path, req->flags));
1519 X(CHMOD, chmod(req->path, req->mode));
1520 X(CHOWN, chown(req->path, req->uid, req->gid));
1521 X(CLOSE, uv__fs_close(req->file));
1522 X(COPYFILE, uv__fs_copyfile(req));
1523 X(FCHMOD, fchmod(req->file, req->mode));
1524 X(FCHOWN, fchown(req->file, req->uid, req->gid));
1525 X(LCHOWN, lchown(req->path, req->uid, req->gid));
1526 X(FDATASYNC, uv__fs_fdatasync(req));
1527 X(FSTAT, uv__fs_fstat(req->file, &req->statbuf));
1528 X(FSYNC, uv__fs_fsync(req));
1529 X(FTRUNCATE, ftruncate(req->file, req->off));
1530 X(FUTIME, uv__fs_futime(req));
1531 X(LSTAT, uv__fs_lstat(req->path, &req->statbuf));
1532 X(LINK, link(req->path, req->new_path));
1533 X(MKDIR, mkdir(req->path, req->mode));
1534 X(MKDTEMP, uv__fs_mkdtemp(req));
1535 X(MKSTEMP, uv__fs_mkstemp(req));
1536 X(OPEN, uv__fs_open(req));
1537 X(READ, uv__fs_read(req));
1538 X(SCANDIR, uv__fs_scandir(req));
1539 X(OPENDIR, uv__fs_opendir(req));
1540 X(READDIR, uv__fs_readdir(req));
1541 X(CLOSEDIR, uv__fs_closedir(req));
1542 X(READLINK, uv__fs_readlink(req));
1543 X(REALPATH, uv__fs_realpath(req));
1544 X(RENAME, rename(req->path, req->new_path));
1545 X(RMDIR, rmdir(req->path));
1546 X(SENDFILE, uv__fs_sendfile(req));
1547 X(STAT, uv__fs_stat(req->path, &req->statbuf));
1548 X(STATFS, uv__fs_statfs(req));
1549 X(SYMLINK, symlink(req->path, req->new_path));
1550 X(UNLINK, unlink(req->path));
1551 X(UTIME, uv__fs_utime(req));
1552 X(WRITE, uv__fs_write_all(req));
1553 default: abort();
1554 }
1555 #undef X
1556 } while (r == -1 && errno == EINTR && retry_on_eintr);
1557
1558 if (r == -1)
1559 req->result = UV__ERR(errno);
1560 else
1561 req->result = r;
1562
1563 if (r == 0 && (req->fs_type == UV_FS_STAT ||
1564 req->fs_type == UV_FS_FSTAT ||
1565 req->fs_type == UV_FS_LSTAT)) {
1566 req->ptr = &req->statbuf;
1567 }
1568 }
1569
1570
uv__fs_done(struct uv__work * w,int status)1571 static void uv__fs_done(struct uv__work* w, int status) {
1572 uv_fs_t* req;
1573
1574 req = container_of(w, uv_fs_t, work_req);
1575 uv__req_unregister(req->loop, req);
1576
1577 if (status == UV_ECANCELED) {
1578 assert(req->result == 0);
1579 req->result = UV_ECANCELED;
1580 }
1581
1582 req->cb(req);
1583 }
1584
1585
uv_fs_access(uv_loop_t * loop,uv_fs_t * req,const char * path,int flags,uv_fs_cb cb)1586 int uv_fs_access(uv_loop_t* loop,
1587 uv_fs_t* req,
1588 const char* path,
1589 int flags,
1590 uv_fs_cb cb) {
1591 INIT(ACCESS);
1592 PATH;
1593 req->flags = flags;
1594 POST;
1595 }
1596
1597
uv_fs_chmod(uv_loop_t * loop,uv_fs_t * req,const char * path,int mode,uv_fs_cb cb)1598 int uv_fs_chmod(uv_loop_t* loop,
1599 uv_fs_t* req,
1600 const char* path,
1601 int mode,
1602 uv_fs_cb cb) {
1603 INIT(CHMOD);
1604 PATH;
1605 req->mode = mode;
1606 POST;
1607 }
1608
1609
uv_fs_chown(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_uid_t uid,uv_gid_t gid,uv_fs_cb cb)1610 int uv_fs_chown(uv_loop_t* loop,
1611 uv_fs_t* req,
1612 const char* path,
1613 uv_uid_t uid,
1614 uv_gid_t gid,
1615 uv_fs_cb cb) {
1616 INIT(CHOWN);
1617 PATH;
1618 req->uid = uid;
1619 req->gid = gid;
1620 POST;
1621 }
1622
1623
uv_fs_close(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_fs_cb cb)1624 int uv_fs_close(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1625 INIT(CLOSE);
1626 req->file = file;
1627 POST;
1628 }
1629
1630
uv_fs_fchmod(uv_loop_t * loop,uv_fs_t * req,uv_file file,int mode,uv_fs_cb cb)1631 int uv_fs_fchmod(uv_loop_t* loop,
1632 uv_fs_t* req,
1633 uv_file file,
1634 int mode,
1635 uv_fs_cb cb) {
1636 INIT(FCHMOD);
1637 req->file = file;
1638 req->mode = mode;
1639 POST;
1640 }
1641
1642
uv_fs_fchown(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_uid_t uid,uv_gid_t gid,uv_fs_cb cb)1643 int uv_fs_fchown(uv_loop_t* loop,
1644 uv_fs_t* req,
1645 uv_file file,
1646 uv_uid_t uid,
1647 uv_gid_t gid,
1648 uv_fs_cb cb) {
1649 INIT(FCHOWN);
1650 req->file = file;
1651 req->uid = uid;
1652 req->gid = gid;
1653 POST;
1654 }
1655
1656
uv_fs_lchown(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_uid_t uid,uv_gid_t gid,uv_fs_cb cb)1657 int uv_fs_lchown(uv_loop_t* loop,
1658 uv_fs_t* req,
1659 const char* path,
1660 uv_uid_t uid,
1661 uv_gid_t gid,
1662 uv_fs_cb cb) {
1663 INIT(LCHOWN);
1664 PATH;
1665 req->uid = uid;
1666 req->gid = gid;
1667 POST;
1668 }
1669
1670
uv_fs_fdatasync(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_fs_cb cb)1671 int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1672 INIT(FDATASYNC);
1673 req->file = file;
1674 POST;
1675 }
1676
1677
uv_fs_fstat(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_fs_cb cb)1678 int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1679 INIT(FSTAT);
1680 req->file = file;
1681 POST;
1682 }
1683
1684
uv_fs_fsync(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_fs_cb cb)1685 int uv_fs_fsync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1686 INIT(FSYNC);
1687 req->file = file;
1688 POST;
1689 }
1690
1691
uv_fs_ftruncate(uv_loop_t * loop,uv_fs_t * req,uv_file file,int64_t off,uv_fs_cb cb)1692 int uv_fs_ftruncate(uv_loop_t* loop,
1693 uv_fs_t* req,
1694 uv_file file,
1695 int64_t off,
1696 uv_fs_cb cb) {
1697 INIT(FTRUNCATE);
1698 req->file = file;
1699 req->off = off;
1700 POST;
1701 }
1702
1703
uv_fs_futime(uv_loop_t * loop,uv_fs_t * req,uv_file file,double atime,double mtime,uv_fs_cb cb)1704 int uv_fs_futime(uv_loop_t* loop,
1705 uv_fs_t* req,
1706 uv_file file,
1707 double atime,
1708 double mtime,
1709 uv_fs_cb cb) {
1710 INIT(FUTIME);
1711 req->file = file;
1712 req->atime = atime;
1713 req->mtime = mtime;
1714 POST;
1715 }
1716
1717
uv_fs_lstat(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)1718 int uv_fs_lstat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
1719 INIT(LSTAT);
1720 PATH;
1721 POST;
1722 }
1723
1724
uv_fs_link(uv_loop_t * loop,uv_fs_t * req,const char * path,const char * new_path,uv_fs_cb cb)1725 int uv_fs_link(uv_loop_t* loop,
1726 uv_fs_t* req,
1727 const char* path,
1728 const char* new_path,
1729 uv_fs_cb cb) {
1730 INIT(LINK);
1731 PATH2;
1732 POST;
1733 }
1734
1735
uv_fs_mkdir(uv_loop_t * loop,uv_fs_t * req,const char * path,int mode,uv_fs_cb cb)1736 int uv_fs_mkdir(uv_loop_t* loop,
1737 uv_fs_t* req,
1738 const char* path,
1739 int mode,
1740 uv_fs_cb cb) {
1741 INIT(MKDIR);
1742 PATH;
1743 req->mode = mode;
1744 POST;
1745 }
1746
1747
uv_fs_mkdtemp(uv_loop_t * loop,uv_fs_t * req,const char * tpl,uv_fs_cb cb)1748 int uv_fs_mkdtemp(uv_loop_t* loop,
1749 uv_fs_t* req,
1750 const char* tpl,
1751 uv_fs_cb cb) {
1752 INIT(MKDTEMP);
1753 req->path = uv__strdup(tpl);
1754 if (req->path == NULL)
1755 return UV_ENOMEM;
1756 POST;
1757 }
1758
1759
uv_fs_mkstemp(uv_loop_t * loop,uv_fs_t * req,const char * tpl,uv_fs_cb cb)1760 int uv_fs_mkstemp(uv_loop_t* loop,
1761 uv_fs_t* req,
1762 const char* tpl,
1763 uv_fs_cb cb) {
1764 INIT(MKSTEMP);
1765 req->path = uv__strdup(tpl);
1766 if (req->path == NULL)
1767 return UV_ENOMEM;
1768 POST;
1769 }
1770
1771
uv_fs_open(uv_loop_t * loop,uv_fs_t * req,const char * path,int flags,int mode,uv_fs_cb cb)1772 int uv_fs_open(uv_loop_t* loop,
1773 uv_fs_t* req,
1774 const char* path,
1775 int flags,
1776 int mode,
1777 uv_fs_cb cb) {
1778 INIT(OPEN);
1779 PATH;
1780 req->flags = flags;
1781 req->mode = mode;
1782 POST;
1783 }
1784
1785
uv_fs_read(uv_loop_t * loop,uv_fs_t * req,uv_file file,const uv_buf_t bufs[],unsigned int nbufs,int64_t off,uv_fs_cb cb)1786 int uv_fs_read(uv_loop_t* loop, uv_fs_t* req,
1787 uv_file file,
1788 const uv_buf_t bufs[],
1789 unsigned int nbufs,
1790 int64_t off,
1791 uv_fs_cb cb) {
1792 INIT(READ);
1793
1794 if (bufs == NULL || nbufs == 0)
1795 return UV_EINVAL;
1796
1797 req->file = file;
1798
1799 req->nbufs = nbufs;
1800 req->bufs = req->bufsml;
1801 if (nbufs > ARRAY_SIZE(req->bufsml))
1802 req->bufs = uv__malloc(nbufs * sizeof(*bufs));
1803
1804 if (req->bufs == NULL)
1805 return UV_ENOMEM;
1806
1807 memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
1808
1809 req->off = off;
1810 POST;
1811 }
1812
1813
uv_fs_scandir(uv_loop_t * loop,uv_fs_t * req,const char * path,int flags,uv_fs_cb cb)1814 int uv_fs_scandir(uv_loop_t* loop,
1815 uv_fs_t* req,
1816 const char* path,
1817 int flags,
1818 uv_fs_cb cb) {
1819 INIT(SCANDIR);
1820 PATH;
1821 req->flags = flags;
1822 POST;
1823 }
1824
uv_fs_opendir(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)1825 int uv_fs_opendir(uv_loop_t* loop,
1826 uv_fs_t* req,
1827 const char* path,
1828 uv_fs_cb cb) {
1829 INIT(OPENDIR);
1830 PATH;
1831 POST;
1832 }
1833
uv_fs_readdir(uv_loop_t * loop,uv_fs_t * req,uv_dir_t * dir,uv_fs_cb cb)1834 int uv_fs_readdir(uv_loop_t* loop,
1835 uv_fs_t* req,
1836 uv_dir_t* dir,
1837 uv_fs_cb cb) {
1838 INIT(READDIR);
1839
1840 if (dir == NULL || dir->dir == NULL || dir->dirents == NULL)
1841 return UV_EINVAL;
1842
1843 req->ptr = dir;
1844 POST;
1845 }
1846
uv_fs_closedir(uv_loop_t * loop,uv_fs_t * req,uv_dir_t * dir,uv_fs_cb cb)1847 int uv_fs_closedir(uv_loop_t* loop,
1848 uv_fs_t* req,
1849 uv_dir_t* dir,
1850 uv_fs_cb cb) {
1851 INIT(CLOSEDIR);
1852
1853 if (dir == NULL)
1854 return UV_EINVAL;
1855
1856 req->ptr = dir;
1857 POST;
1858 }
1859
uv_fs_readlink(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)1860 int uv_fs_readlink(uv_loop_t* loop,
1861 uv_fs_t* req,
1862 const char* path,
1863 uv_fs_cb cb) {
1864 INIT(READLINK);
1865 PATH;
1866 POST;
1867 }
1868
1869
uv_fs_realpath(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)1870 int uv_fs_realpath(uv_loop_t* loop,
1871 uv_fs_t* req,
1872 const char * path,
1873 uv_fs_cb cb) {
1874 INIT(REALPATH);
1875 PATH;
1876 POST;
1877 }
1878
1879
uv_fs_rename(uv_loop_t * loop,uv_fs_t * req,const char * path,const char * new_path,uv_fs_cb cb)1880 int uv_fs_rename(uv_loop_t* loop,
1881 uv_fs_t* req,
1882 const char* path,
1883 const char* new_path,
1884 uv_fs_cb cb) {
1885 INIT(RENAME);
1886 PATH2;
1887 POST;
1888 }
1889
1890
uv_fs_rmdir(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)1891 int uv_fs_rmdir(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
1892 INIT(RMDIR);
1893 PATH;
1894 POST;
1895 }
1896
1897
uv_fs_sendfile(uv_loop_t * loop,uv_fs_t * req,uv_file out_fd,uv_file in_fd,int64_t off,size_t len,uv_fs_cb cb)1898 int uv_fs_sendfile(uv_loop_t* loop,
1899 uv_fs_t* req,
1900 uv_file out_fd,
1901 uv_file in_fd,
1902 int64_t off,
1903 size_t len,
1904 uv_fs_cb cb) {
1905 INIT(SENDFILE);
1906 req->flags = in_fd; /* hack */
1907 req->file = out_fd;
1908 req->off = off;
1909 req->bufsml[0].len = len;
1910 POST;
1911 }
1912
1913
uv_fs_stat(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)1914 int uv_fs_stat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
1915 INIT(STAT);
1916 PATH;
1917 POST;
1918 }
1919
1920
uv_fs_symlink(uv_loop_t * loop,uv_fs_t * req,const char * path,const char * new_path,int flags,uv_fs_cb cb)1921 int uv_fs_symlink(uv_loop_t* loop,
1922 uv_fs_t* req,
1923 const char* path,
1924 const char* new_path,
1925 int flags,
1926 uv_fs_cb cb) {
1927 INIT(SYMLINK);
1928 PATH2;
1929 req->flags = flags;
1930 POST;
1931 }
1932
1933
uv_fs_unlink(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)1934 int uv_fs_unlink(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
1935 INIT(UNLINK);
1936 PATH;
1937 POST;
1938 }
1939
1940
uv_fs_utime(uv_loop_t * loop,uv_fs_t * req,const char * path,double atime,double mtime,uv_fs_cb cb)1941 int uv_fs_utime(uv_loop_t* loop,
1942 uv_fs_t* req,
1943 const char* path,
1944 double atime,
1945 double mtime,
1946 uv_fs_cb cb) {
1947 INIT(UTIME);
1948 PATH;
1949 req->atime = atime;
1950 req->mtime = mtime;
1951 POST;
1952 }
1953
1954
uv_fs_write(uv_loop_t * loop,uv_fs_t * req,uv_file file,const uv_buf_t bufs[],unsigned int nbufs,int64_t off,uv_fs_cb cb)1955 int uv_fs_write(uv_loop_t* loop,
1956 uv_fs_t* req,
1957 uv_file file,
1958 const uv_buf_t bufs[],
1959 unsigned int nbufs,
1960 int64_t off,
1961 uv_fs_cb cb) {
1962 INIT(WRITE);
1963
1964 if (bufs == NULL || nbufs == 0)
1965 return UV_EINVAL;
1966
1967 req->file = file;
1968
1969 req->nbufs = nbufs;
1970 req->bufs = req->bufsml;
1971 if (nbufs > ARRAY_SIZE(req->bufsml))
1972 req->bufs = uv__malloc(nbufs * sizeof(*bufs));
1973
1974 if (req->bufs == NULL)
1975 return UV_ENOMEM;
1976
1977 memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
1978
1979 req->off = off;
1980 POST;
1981 }
1982
1983
uv_fs_req_cleanup(uv_fs_t * req)1984 void uv_fs_req_cleanup(uv_fs_t* req) {
1985 if (req == NULL)
1986 return;
1987
1988 /* Only necessary for asychronous requests, i.e., requests with a callback.
1989 * Synchronous ones don't copy their arguments and have req->path and
1990 * req->new_path pointing to user-owned memory. UV_FS_MKDTEMP and
1991 * UV_FS_MKSTEMP are the exception to the rule, they always allocate memory.
1992 */
1993 if (req->path != NULL &&
1994 (req->cb != NULL ||
1995 req->fs_type == UV_FS_MKDTEMP || req->fs_type == UV_FS_MKSTEMP))
1996 uv__free((void*) req->path); /* Memory is shared with req->new_path. */
1997
1998 req->path = NULL;
1999 req->new_path = NULL;
2000
2001 if (req->fs_type == UV_FS_READDIR && req->ptr != NULL)
2002 uv__fs_readdir_cleanup(req);
2003
2004 if (req->fs_type == UV_FS_SCANDIR && req->ptr != NULL)
2005 uv__fs_scandir_cleanup(req);
2006
2007 if (req->bufs != req->bufsml)
2008 uv__free(req->bufs);
2009 req->bufs = NULL;
2010
2011 if (req->fs_type != UV_FS_OPENDIR && req->ptr != &req->statbuf)
2012 uv__free(req->ptr);
2013 req->ptr = NULL;
2014 }
2015
2016
uv_fs_copyfile(uv_loop_t * loop,uv_fs_t * req,const char * path,const char * new_path,int flags,uv_fs_cb cb)2017 int uv_fs_copyfile(uv_loop_t* loop,
2018 uv_fs_t* req,
2019 const char* path,
2020 const char* new_path,
2021 int flags,
2022 uv_fs_cb cb) {
2023 INIT(COPYFILE);
2024
2025 if (flags & ~(UV_FS_COPYFILE_EXCL |
2026 UV_FS_COPYFILE_FICLONE |
2027 UV_FS_COPYFILE_FICLONE_FORCE)) {
2028 return UV_EINVAL;
2029 }
2030
2031 PATH2;
2032 req->flags = flags;
2033 POST;
2034 }
2035
2036
uv_fs_statfs(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2037 int uv_fs_statfs(uv_loop_t* loop,
2038 uv_fs_t* req,
2039 const char* path,
2040 uv_fs_cb cb) {
2041 INIT(STATFS);
2042 PATH;
2043 POST;
2044 }
2045