1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to
5 * deal in the Software without restriction, including without limitation the
6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7 * sell copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19 * IN THE SOFTWARE.
20 */
21
22 /* Caveat emptor: this file deviates from the libuv convention of returning
23 * negated errno codes. Most uv_fs_*() functions map directly to the system
24 * call of the same name. For more complex wrappers, it's easier to just
25 * return -1 with errno set. The dispatcher in uv__fs_work() takes care of
26 * getting the errno to the right place (req->result or as the return value.)
27 */
28
29 #include "uv.h"
30 #include "internal.h"
31
32 #include <errno.h>
33 #include <dlfcn.h>
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <limits.h> /* PATH_MAX */
38
39 #include <sys/types.h>
40 #include <sys/socket.h>
41 #include <sys/stat.h>
42 #include <sys/time.h>
43 #include <sys/uio.h>
44 #include <pthread.h>
45 #include <unistd.h>
46 #include <fcntl.h>
47 #include <poll.h>
48
49 #if defined(__DragonFly__) || \
50 defined(__FreeBSD__) || \
51 defined(__FreeBSD_kernel__) || \
52 defined(__OpenBSD__) || \
53 defined(__NetBSD__)
54 # define HAVE_PREADV 1
55 #else
56 # define HAVE_PREADV 0
57 #endif
58
59 #if defined(__linux__)
60 # include "sys/utsname.h"
61 #endif
62
63 #if defined(__linux__) || defined(__sun)
64 # include <sys/sendfile.h>
65 # include <sys/sysmacros.h>
66 #endif
67
68 #if defined(__APPLE__)
69 # include <sys/sysctl.h>
70 #elif defined(__linux__) && !defined(FICLONE)
71 # include <sys/ioctl.h>
72 # define FICLONE _IOW(0x94, 9, int)
73 #endif
74
75 #if defined(_AIX) && !defined(_AIX71)
76 # include <utime.h>
77 #endif
78
79 #if defined(__APPLE__) || \
80 defined(__DragonFly__) || \
81 defined(__FreeBSD__) || \
82 defined(__FreeBSD_kernel__) || \
83 defined(__OpenBSD__) || \
84 defined(__NetBSD__)
85 # include <sys/param.h>
86 # include <sys/mount.h>
87 #elif defined(__sun) || \
88 defined(__MVS__) || \
89 defined(__NetBSD__) || \
90 defined(__HAIKU__) || \
91 defined(__QNX__)
92 # include <sys/statvfs.h>
93 #else
94 # include <sys/statfs.h>
95 #endif
96
97 #if defined(_AIX) && _XOPEN_SOURCE <= 600
98 extern char *mkdtemp(char *template); /* See issue #740 on AIX < 7 */
99 #endif
100
101 #define INIT(subtype) \
102 do { \
103 if (req == NULL) \
104 return UV_EINVAL; \
105 UV_REQ_INIT(req, UV_FS); \
106 req->fs_type = UV_FS_ ## subtype; \
107 req->result = 0; \
108 req->ptr = NULL; \
109 req->loop = loop; \
110 req->path = NULL; \
111 req->new_path = NULL; \
112 req->bufs = NULL; \
113 req->cb = cb; \
114 } \
115 while (0)
116
117 #define PATH \
118 do { \
119 assert(path != NULL); \
120 if (cb == NULL) { \
121 req->path = path; \
122 } else { \
123 req->path = uv__strdup(path); \
124 if (req->path == NULL) \
125 return UV_ENOMEM; \
126 } \
127 } \
128 while (0)
129
130 #define PATH2 \
131 do { \
132 if (cb == NULL) { \
133 req->path = path; \
134 req->new_path = new_path; \
135 } else { \
136 size_t path_len; \
137 size_t new_path_len; \
138 path_len = strlen(path) + 1; \
139 new_path_len = strlen(new_path) + 1; \
140 req->path = uv__malloc(path_len + new_path_len); \
141 if (req->path == NULL) \
142 return UV_ENOMEM; \
143 req->new_path = req->path + path_len; \
144 memcpy((void*) req->path, path, path_len); \
145 memcpy((void*) req->new_path, new_path, new_path_len); \
146 } \
147 } \
148 while (0)
149
150 #define POST \
151 do { \
152 if (cb != NULL) { \
153 uv__req_register(loop, req); \
154 uv__work_submit(loop, \
155 &req->work_req, \
156 UV__WORK_FAST_IO, \
157 uv__fs_work, \
158 uv__fs_done); \
159 return 0; \
160 } \
161 else { \
162 uv__fs_work(&req->work_req); \
163 return req->result; \
164 } \
165 } \
166 while (0)
167
168
uv__fs_close(int fd)169 static int uv__fs_close(int fd) {
170 int rc;
171
172 rc = uv__close_nocancel(fd);
173 if (rc == -1)
174 if (errno == EINTR || errno == EINPROGRESS)
175 rc = 0; /* The close is in progress, not an error. */
176
177 return rc;
178 }
179
180
uv__fs_fsync(uv_fs_t * req)181 static ssize_t uv__fs_fsync(uv_fs_t* req) {
182 #if defined(__APPLE__)
183 /* Apple's fdatasync and fsync explicitly do NOT flush the drive write cache
184 * to the drive platters. This is in contrast to Linux's fdatasync and fsync
185 * which do, according to recent man pages. F_FULLFSYNC is Apple's equivalent
186 * for flushing buffered data to permanent storage. If F_FULLFSYNC is not
187 * supported by the file system we fall back to F_BARRIERFSYNC or fsync().
188 * This is the same approach taken by sqlite, except sqlite does not issue
189 * an F_BARRIERFSYNC call.
190 */
191 int r;
192
193 r = fcntl(req->file, F_FULLFSYNC);
194 if (r != 0)
195 r = fcntl(req->file, 85 /* F_BARRIERFSYNC */); /* fsync + barrier */
196 if (r != 0)
197 r = fsync(req->file);
198 return r;
199 #else
200 return fsync(req->file);
201 #endif
202 }
203
204
uv__fs_fdatasync(uv_fs_t * req)205 static ssize_t uv__fs_fdatasync(uv_fs_t* req) {
206 #if defined(__linux__) || defined(__sun) || defined(__NetBSD__)
207 return fdatasync(req->file);
208 #elif defined(__APPLE__)
209 /* See the comment in uv__fs_fsync. */
210 return uv__fs_fsync(req);
211 #else
212 return fsync(req->file);
213 #endif
214 }
215
216
UV_UNUSED(static struct timespec uv__fs_to_timespec (double time))217 UV_UNUSED(static struct timespec uv__fs_to_timespec(double time)) {
218 struct timespec ts;
219 ts.tv_sec = time;
220 ts.tv_nsec = (time - ts.tv_sec) * 1e9;
221
222 /* TODO(bnoordhuis) Remove this. utimesat() has nanosecond resolution but we
223 * stick to microsecond resolution for the sake of consistency with other
224 * platforms. I'm the original author of this compatibility hack but I'm
225 * less convinced it's useful nowadays.
226 */
227 ts.tv_nsec -= ts.tv_nsec % 1000;
228
229 if (ts.tv_nsec < 0) {
230 ts.tv_nsec += 1e9;
231 ts.tv_sec -= 1;
232 }
233 return ts;
234 }
235
UV_UNUSED(static struct timeval uv__fs_to_timeval (double time))236 UV_UNUSED(static struct timeval uv__fs_to_timeval(double time)) {
237 struct timeval tv;
238 tv.tv_sec = time;
239 tv.tv_usec = (time - tv.tv_sec) * 1e6;
240 if (tv.tv_usec < 0) {
241 tv.tv_usec += 1e6;
242 tv.tv_sec -= 1;
243 }
244 return tv;
245 }
246
uv__fs_futime(uv_fs_t * req)247 static ssize_t uv__fs_futime(uv_fs_t* req) {
248 #if defined(__linux__) \
249 || defined(_AIX71) \
250 || defined(__HAIKU__)
251 struct timespec ts[2];
252 ts[0] = uv__fs_to_timespec(req->atime);
253 ts[1] = uv__fs_to_timespec(req->mtime);
254 return futimens(req->file, ts);
255 #elif defined(__APPLE__) \
256 || defined(__DragonFly__) \
257 || defined(__FreeBSD__) \
258 || defined(__FreeBSD_kernel__) \
259 || defined(__NetBSD__) \
260 || defined(__OpenBSD__) \
261 || defined(__sun)
262 struct timeval tv[2];
263 tv[0] = uv__fs_to_timeval(req->atime);
264 tv[1] = uv__fs_to_timeval(req->mtime);
265 # if defined(__sun)
266 return futimesat(req->file, NULL, tv);
267 # else
268 return futimes(req->file, tv);
269 # endif
270 #elif defined(__MVS__)
271 attrib_t atr;
272 memset(&atr, 0, sizeof(atr));
273 atr.att_mtimechg = 1;
274 atr.att_atimechg = 1;
275 atr.att_mtime = req->mtime;
276 atr.att_atime = req->atime;
277 return __fchattr(req->file, &atr, sizeof(atr));
278 #else
279 errno = ENOSYS;
280 return -1;
281 #endif
282 }
283
284
uv__fs_mkdtemp(uv_fs_t * req)285 static ssize_t uv__fs_mkdtemp(uv_fs_t* req) {
286 return mkdtemp((char*) req->path) ? 0 : -1;
287 }
288
289
290 static int (*uv__mkostemp)(char*, int);
291
292
uv__mkostemp_initonce(void)293 static void uv__mkostemp_initonce(void) {
294 /* z/os doesn't have RTLD_DEFAULT but that's okay
295 * because it doesn't have mkostemp(O_CLOEXEC) either.
296 */
297 #ifdef RTLD_DEFAULT
298 uv__mkostemp = (int (*)(char*, int)) dlsym(RTLD_DEFAULT, "mkostemp");
299
300 /* We don't care about errors, but we do want to clean them up.
301 * If there has been no error, then dlerror() will just return
302 * NULL.
303 */
304 dlerror();
305 #endif /* RTLD_DEFAULT */
306 }
307
308
uv__fs_mkstemp(uv_fs_t * req)309 static int uv__fs_mkstemp(uv_fs_t* req) {
310 static uv_once_t once = UV_ONCE_INIT;
311 int r;
312 #ifdef O_CLOEXEC
313 static int no_cloexec_support;
314 #endif
315 static const char pattern[] = "XXXXXX";
316 static const size_t pattern_size = sizeof(pattern) - 1;
317 char* path;
318 size_t path_length;
319
320 path = (char*) req->path;
321 path_length = strlen(path);
322
323 /* EINVAL can be returned for 2 reasons:
324 1. The template's last 6 characters were not XXXXXX
325 2. open() didn't support O_CLOEXEC
326 We want to avoid going to the fallback path in case
327 of 1, so it's manually checked before. */
328 if (path_length < pattern_size ||
329 strcmp(path + path_length - pattern_size, pattern)) {
330 errno = EINVAL;
331 r = -1;
332 goto clobber;
333 }
334
335 uv_once(&once, uv__mkostemp_initonce);
336
337 #ifdef O_CLOEXEC
338 if (uv__load_relaxed(&no_cloexec_support) == 0 && uv__mkostemp != NULL) {
339 r = uv__mkostemp(path, O_CLOEXEC);
340
341 if (r >= 0)
342 return r;
343
344 /* If mkostemp() returns EINVAL, it means the kernel doesn't
345 support O_CLOEXEC, so we just fallback to mkstemp() below. */
346 if (errno != EINVAL)
347 goto clobber;
348
349 /* We set the static variable so that next calls don't even
350 try to use mkostemp. */
351 uv__store_relaxed(&no_cloexec_support, 1);
352 }
353 #endif /* O_CLOEXEC */
354
355 if (req->cb != NULL)
356 uv_rwlock_rdlock(&req->loop->cloexec_lock);
357
358 r = mkstemp(path);
359
360 /* In case of failure `uv__cloexec` will leave error in `errno`,
361 * so it is enough to just set `r` to `-1`.
362 */
363 if (r >= 0 && uv__cloexec(r, 1) != 0) {
364 r = uv__close(r);
365 if (r != 0)
366 abort();
367 r = -1;
368 }
369
370 if (req->cb != NULL)
371 uv_rwlock_rdunlock(&req->loop->cloexec_lock);
372
373 clobber:
374 if (r < 0)
375 path[0] = '\0';
376 return r;
377 }
378
379
uv__fs_open(uv_fs_t * req)380 static ssize_t uv__fs_open(uv_fs_t* req) {
381 #ifdef O_CLOEXEC
382 return open(req->path, req->flags | O_CLOEXEC, req->mode);
383 #else /* O_CLOEXEC */
384 int r;
385
386 if (req->cb != NULL)
387 uv_rwlock_rdlock(&req->loop->cloexec_lock);
388
389 r = open(req->path, req->flags, req->mode);
390
391 /* In case of failure `uv__cloexec` will leave error in `errno`,
392 * so it is enough to just set `r` to `-1`.
393 */
394 if (r >= 0 && uv__cloexec(r, 1) != 0) {
395 r = uv__close(r);
396 if (r != 0)
397 abort();
398 r = -1;
399 }
400
401 if (req->cb != NULL)
402 uv_rwlock_rdunlock(&req->loop->cloexec_lock);
403
404 return r;
405 #endif /* O_CLOEXEC */
406 }
407
408
409 #if !HAVE_PREADV
uv__fs_preadv(uv_file fd,uv_buf_t * bufs,unsigned int nbufs,off_t off)410 static ssize_t uv__fs_preadv(uv_file fd,
411 uv_buf_t* bufs,
412 unsigned int nbufs,
413 off_t off) {
414 uv_buf_t* buf;
415 uv_buf_t* end;
416 ssize_t result;
417 ssize_t rc;
418 size_t pos;
419
420 assert(nbufs > 0);
421
422 result = 0;
423 pos = 0;
424 buf = bufs + 0;
425 end = bufs + nbufs;
426
427 for (;;) {
428 do
429 rc = pread(fd, buf->base + pos, buf->len - pos, off + result);
430 while (rc == -1 && errno == EINTR);
431
432 if (rc == 0)
433 break;
434
435 if (rc == -1 && result == 0)
436 return UV__ERR(errno);
437
438 if (rc == -1)
439 break; /* We read some data so return that, ignore the error. */
440
441 pos += rc;
442 result += rc;
443
444 if (pos < buf->len)
445 continue;
446
447 pos = 0;
448 buf += 1;
449
450 if (buf == end)
451 break;
452 }
453
454 return result;
455 }
456 #endif
457
458
uv__fs_read(uv_fs_t * req)459 static ssize_t uv__fs_read(uv_fs_t* req) {
460 #if defined(__linux__)
461 static int no_preadv;
462 #endif
463 unsigned int iovmax;
464 ssize_t result;
465
466 iovmax = uv__getiovmax();
467 if (req->nbufs > iovmax)
468 req->nbufs = iovmax;
469
470 if (req->off < 0) {
471 if (req->nbufs == 1)
472 result = read(req->file, req->bufs[0].base, req->bufs[0].len);
473 else
474 result = readv(req->file, (struct iovec*) req->bufs, req->nbufs);
475 } else {
476 if (req->nbufs == 1) {
477 result = pread(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
478 goto done;
479 }
480
481 #if HAVE_PREADV
482 result = preadv(req->file, (struct iovec*) req->bufs, req->nbufs, req->off);
483 #else
484 # if defined(__linux__)
485 if (uv__load_relaxed(&no_preadv)) retry:
486 # endif
487 {
488 result = uv__fs_preadv(req->file, req->bufs, req->nbufs, req->off);
489 }
490 # if defined(__linux__)
491 else {
492 result = uv__preadv(req->file,
493 (struct iovec*)req->bufs,
494 req->nbufs,
495 req->off);
496 if (result == -1 && errno == ENOSYS) {
497 uv__store_relaxed(&no_preadv, 1);
498 goto retry;
499 }
500 }
501 # endif
502 #endif
503 }
504
505 done:
506 /* Early cleanup of bufs allocation, since we're done with it. */
507 if (req->bufs != req->bufsml)
508 uv__free(req->bufs);
509
510 req->bufs = NULL;
511 req->nbufs = 0;
512
513 #ifdef __PASE__
514 /* PASE returns EOPNOTSUPP when reading a directory, convert to EISDIR */
515 if (result == -1 && errno == EOPNOTSUPP) {
516 struct stat buf;
517 ssize_t rc;
518 rc = fstat(req->file, &buf);
519 if (rc == 0 && S_ISDIR(buf.st_mode)) {
520 errno = EISDIR;
521 }
522 }
523 #endif
524
525 return result;
526 }
527
528
529 #if defined(__APPLE__) && !defined(MAC_OS_X_VERSION_10_8)
530 #define UV_CONST_DIRENT uv__dirent_t
531 #else
532 #define UV_CONST_DIRENT const uv__dirent_t
533 #endif
534
535
uv__fs_scandir_filter(UV_CONST_DIRENT * dent)536 static int uv__fs_scandir_filter(UV_CONST_DIRENT* dent) {
537 return strcmp(dent->d_name, ".") != 0 && strcmp(dent->d_name, "..") != 0;
538 }
539
540
uv__fs_scandir_sort(UV_CONST_DIRENT ** a,UV_CONST_DIRENT ** b)541 static int uv__fs_scandir_sort(UV_CONST_DIRENT** a, UV_CONST_DIRENT** b) {
542 return strcmp((*a)->d_name, (*b)->d_name);
543 }
544
545
uv__fs_scandir(uv_fs_t * req)546 static ssize_t uv__fs_scandir(uv_fs_t* req) {
547 uv__dirent_t** dents;
548 int n;
549
550 dents = NULL;
551 n = scandir(req->path, &dents, uv__fs_scandir_filter, uv__fs_scandir_sort);
552
553 /* NOTE: We will use nbufs as an index field */
554 req->nbufs = 0;
555
556 if (n == 0) {
557 /* OS X still needs to deallocate some memory.
558 * Memory was allocated using the system allocator, so use free() here.
559 */
560 free(dents);
561 dents = NULL;
562 } else if (n == -1) {
563 return n;
564 }
565
566 req->ptr = dents;
567
568 return n;
569 }
570
uv__fs_opendir(uv_fs_t * req)571 static int uv__fs_opendir(uv_fs_t* req) {
572 uv_dir_t* dir;
573
574 dir = uv__malloc(sizeof(*dir));
575 if (dir == NULL)
576 goto error;
577
578 dir->dir = opendir(req->path);
579 if (dir->dir == NULL)
580 goto error;
581
582 req->ptr = dir;
583 return 0;
584
585 error:
586 uv__free(dir);
587 req->ptr = NULL;
588 return -1;
589 }
590
uv__fs_readdir(uv_fs_t * req)591 static int uv__fs_readdir(uv_fs_t* req) {
592 uv_dir_t* dir;
593 uv_dirent_t* dirent;
594 struct dirent* res;
595 unsigned int dirent_idx;
596 unsigned int i;
597
598 dir = req->ptr;
599 dirent_idx = 0;
600
601 while (dirent_idx < dir->nentries) {
602 /* readdir() returns NULL on end of directory, as well as on error. errno
603 is used to differentiate between the two conditions. */
604 errno = 0;
605 res = readdir(dir->dir);
606
607 if (res == NULL) {
608 if (errno != 0)
609 goto error;
610 break;
611 }
612
613 if (strcmp(res->d_name, ".") == 0 || strcmp(res->d_name, "..") == 0)
614 continue;
615
616 dirent = &dir->dirents[dirent_idx];
617 dirent->name = uv__strdup(res->d_name);
618
619 if (dirent->name == NULL)
620 goto error;
621
622 dirent->type = uv__fs_get_dirent_type(res);
623 ++dirent_idx;
624 }
625
626 return dirent_idx;
627
628 error:
629 for (i = 0; i < dirent_idx; ++i) {
630 uv__free((char*) dir->dirents[i].name);
631 dir->dirents[i].name = NULL;
632 }
633
634 return -1;
635 }
636
uv__fs_closedir(uv_fs_t * req)637 static int uv__fs_closedir(uv_fs_t* req) {
638 uv_dir_t* dir;
639
640 dir = req->ptr;
641
642 if (dir->dir != NULL) {
643 closedir(dir->dir);
644 dir->dir = NULL;
645 }
646
647 uv__free(req->ptr);
648 req->ptr = NULL;
649 return 0;
650 }
651
uv__fs_statfs(uv_fs_t * req)652 static int uv__fs_statfs(uv_fs_t* req) {
653 uv_statfs_t* stat_fs;
654 #if defined(__sun) || \
655 defined(__MVS__) || \
656 defined(__NetBSD__) || \
657 defined(__HAIKU__) || \
658 defined(__QNX__)
659 struct statvfs buf;
660
661 if (0 != statvfs(req->path, &buf))
662 #else
663 struct statfs buf;
664
665 if (0 != statfs(req->path, &buf))
666 #endif /* defined(__sun) */
667 return -1;
668
669 stat_fs = uv__malloc(sizeof(*stat_fs));
670 if (stat_fs == NULL) {
671 errno = ENOMEM;
672 return -1;
673 }
674
675 #if defined(__sun) || \
676 defined(__MVS__) || \
677 defined(__OpenBSD__) || \
678 defined(__NetBSD__) || \
679 defined(__HAIKU__) || \
680 defined(__QNX__)
681 stat_fs->f_type = 0; /* f_type is not supported. */
682 #else
683 stat_fs->f_type = buf.f_type;
684 #endif
685 stat_fs->f_bsize = buf.f_bsize;
686 stat_fs->f_blocks = buf.f_blocks;
687 stat_fs->f_bfree = buf.f_bfree;
688 stat_fs->f_bavail = buf.f_bavail;
689 stat_fs->f_files = buf.f_files;
690 stat_fs->f_ffree = buf.f_ffree;
691 req->ptr = stat_fs;
692 return 0;
693 }
694
uv__fs_pathmax_size(const char * path)695 static ssize_t uv__fs_pathmax_size(const char* path) {
696 ssize_t pathmax;
697
698 pathmax = pathconf(path, _PC_PATH_MAX);
699
700 if (pathmax == -1)
701 pathmax = UV__PATH_MAX;
702
703 return pathmax;
704 }
705
uv__fs_readlink(uv_fs_t * req)706 static ssize_t uv__fs_readlink(uv_fs_t* req) {
707 ssize_t maxlen;
708 ssize_t len;
709 char* buf;
710
711 #if defined(_POSIX_PATH_MAX) || defined(PATH_MAX)
712 maxlen = uv__fs_pathmax_size(req->path);
713 #else
714 /* We may not have a real PATH_MAX. Read size of link. */
715 struct stat st;
716 int ret;
717 ret = lstat(req->path, &st);
718 if (ret != 0)
719 return -1;
720 if (!S_ISLNK(st.st_mode)) {
721 errno = EINVAL;
722 return -1;
723 }
724
725 maxlen = st.st_size;
726
727 /* According to readlink(2) lstat can report st_size == 0
728 for some symlinks, such as those in /proc or /sys. */
729 if (maxlen == 0)
730 maxlen = uv__fs_pathmax_size(req->path);
731 #endif
732
733 buf = uv__malloc(maxlen);
734
735 if (buf == NULL) {
736 errno = ENOMEM;
737 return -1;
738 }
739
740 #if defined(__MVS__)
741 len = os390_readlink(req->path, buf, maxlen);
742 #else
743 len = readlink(req->path, buf, maxlen);
744 #endif
745
746 if (len == -1) {
747 uv__free(buf);
748 return -1;
749 }
750
751 /* Uncommon case: resize to make room for the trailing nul byte. */
752 if (len == maxlen) {
753 buf = uv__reallocf(buf, len + 1);
754
755 if (buf == NULL)
756 return -1;
757 }
758
759 buf[len] = '\0';
760 req->ptr = buf;
761
762 return 0;
763 }
764
uv__fs_realpath(uv_fs_t * req)765 static ssize_t uv__fs_realpath(uv_fs_t* req) {
766 char* buf;
767
768 #if defined(_POSIX_VERSION) && _POSIX_VERSION >= 200809L
769 buf = realpath(req->path, NULL);
770 if (buf == NULL)
771 return -1;
772 #else
773 ssize_t len;
774
775 len = uv__fs_pathmax_size(req->path);
776 buf = uv__malloc(len + 1);
777
778 if (buf == NULL) {
779 errno = ENOMEM;
780 return -1;
781 }
782
783 if (realpath(req->path, buf) == NULL) {
784 uv__free(buf);
785 return -1;
786 }
787 #endif
788
789 req->ptr = buf;
790
791 return 0;
792 }
793
uv__fs_sendfile_emul(uv_fs_t * req)794 static ssize_t uv__fs_sendfile_emul(uv_fs_t* req) {
795 struct pollfd pfd;
796 int use_pread;
797 off_t offset;
798 ssize_t nsent;
799 ssize_t nread;
800 ssize_t nwritten;
801 size_t buflen;
802 size_t len;
803 ssize_t n;
804 int in_fd;
805 int out_fd;
806 char buf[8192];
807
808 len = req->bufsml[0].len;
809 in_fd = req->flags;
810 out_fd = req->file;
811 offset = req->off;
812 use_pread = 1;
813
814 /* Here are the rules regarding errors:
815 *
816 * 1. Read errors are reported only if nsent==0, otherwise we return nsent.
817 * The user needs to know that some data has already been sent, to stop
818 * them from sending it twice.
819 *
820 * 2. Write errors are always reported. Write errors are bad because they
821 * mean data loss: we've read data but now we can't write it out.
822 *
823 * We try to use pread() and fall back to regular read() if the source fd
824 * doesn't support positional reads, for example when it's a pipe fd.
825 *
826 * If we get EAGAIN when writing to the target fd, we poll() on it until
827 * it becomes writable again.
828 *
829 * FIXME: If we get a write error when use_pread==1, it should be safe to
830 * return the number of sent bytes instead of an error because pread()
831 * is, in theory, idempotent. However, special files in /dev or /proc
832 * may support pread() but not necessarily return the same data on
833 * successive reads.
834 *
835 * FIXME: There is no way now to signal that we managed to send *some* data
836 * before a write error.
837 */
838 for (nsent = 0; (size_t) nsent < len; ) {
839 buflen = len - nsent;
840
841 if (buflen > sizeof(buf))
842 buflen = sizeof(buf);
843
844 do
845 if (use_pread)
846 nread = pread(in_fd, buf, buflen, offset);
847 else
848 nread = read(in_fd, buf, buflen);
849 while (nread == -1 && errno == EINTR);
850
851 if (nread == 0)
852 goto out;
853
854 if (nread == -1) {
855 if (use_pread && nsent == 0 && (errno == EIO || errno == ESPIPE)) {
856 use_pread = 0;
857 continue;
858 }
859
860 if (nsent == 0)
861 nsent = -1;
862
863 goto out;
864 }
865
866 for (nwritten = 0; nwritten < nread; ) {
867 do
868 n = write(out_fd, buf + nwritten, nread - nwritten);
869 while (n == -1 && errno == EINTR);
870
871 if (n != -1) {
872 nwritten += n;
873 continue;
874 }
875
876 if (errno != EAGAIN && errno != EWOULDBLOCK) {
877 nsent = -1;
878 goto out;
879 }
880
881 pfd.fd = out_fd;
882 pfd.events = POLLOUT;
883 pfd.revents = 0;
884
885 do
886 n = poll(&pfd, 1, -1);
887 while (n == -1 && errno == EINTR);
888
889 if (n == -1 || (pfd.revents & ~POLLOUT) != 0) {
890 errno = EIO;
891 nsent = -1;
892 goto out;
893 }
894 }
895
896 offset += nread;
897 nsent += nread;
898 }
899
900 out:
901 if (nsent != -1)
902 req->off = offset;
903
904 return nsent;
905 }
906
907
908 #ifdef __linux__
uv__kernel_version(void)909 static unsigned uv__kernel_version(void) {
910 static unsigned cached_version;
911 struct utsname u;
912 unsigned version;
913 unsigned major;
914 unsigned minor;
915 unsigned patch;
916
917 version = uv__load_relaxed(&cached_version);
918 if (version != 0)
919 return version;
920
921 if (-1 == uname(&u))
922 return 0;
923
924 if (3 != sscanf(u.release, "%u.%u.%u", &major, &minor, &patch))
925 return 0;
926
927 version = major * 65536 + minor * 256 + patch;
928 uv__store_relaxed(&cached_version, version);
929
930 return version;
931 }
932
933
934 /* Pre-4.20 kernels have a bug where CephFS uses the RADOS copy-from command
935 * in copy_file_range() when it shouldn't. There is no workaround except to
936 * fall back to a regular copy.
937 */
uv__is_buggy_cephfs(int fd)938 static int uv__is_buggy_cephfs(int fd) {
939 struct statfs s;
940
941 if (-1 == fstatfs(fd, &s))
942 return 0;
943
944 if (s.f_type != /* CephFS */ 0xC36400)
945 return 0;
946
947 return uv__kernel_version() < /* 4.20.0 */ 0x041400;
948 }
949 #endif /* __linux__ */
950
951
uv__fs_sendfile(uv_fs_t * req)952 static ssize_t uv__fs_sendfile(uv_fs_t* req) {
953 int in_fd;
954 int out_fd;
955
956 in_fd = req->flags;
957 out_fd = req->file;
958
959 #if defined(__linux__) || defined(__sun)
960 {
961 off_t off;
962 ssize_t r;
963
964 off = req->off;
965
966 #ifdef __linux__
967 {
968 static int no_copy_file_range_support;
969
970 if (uv__load_relaxed(&no_copy_file_range_support) == 0) {
971 r = uv__fs_copy_file_range(in_fd, &off, out_fd, NULL, req->bufsml[0].len, 0);
972
973 if (r == -1 && errno == ENOSYS) {
974 /* ENOSYS - it will never work */
975 errno = 0;
976 uv__store_relaxed(&no_copy_file_range_support, 1);
977 } else if (r == -1 && errno == EACCES && uv__is_buggy_cephfs(in_fd)) {
978 /* EACCES - pre-4.20 kernels have a bug where CephFS uses the RADOS
979 copy-from command when it shouldn't */
980 errno = 0;
981 uv__store_relaxed(&no_copy_file_range_support, 1);
982 } else if (r == -1 && (errno == ENOTSUP || errno == EXDEV)) {
983 /* ENOTSUP - it could work on another file system type */
984 /* EXDEV - it will not work when in_fd and out_fd are not on the same
985 mounted filesystem (pre Linux 5.3) */
986 errno = 0;
987 } else {
988 goto ok;
989 }
990 }
991 }
992 #endif
993
994 r = sendfile(out_fd, in_fd, &off, req->bufsml[0].len);
995
996 ok:
997 /* sendfile() on SunOS returns EINVAL if the target fd is not a socket but
998 * it still writes out data. Fortunately, we can detect it by checking if
999 * the offset has been updated.
1000 */
1001 if (r != -1 || off > req->off) {
1002 r = off - req->off;
1003 req->off = off;
1004 return r;
1005 }
1006
1007 if (errno == EINVAL ||
1008 errno == EIO ||
1009 errno == ENOTSOCK ||
1010 errno == EXDEV) {
1011 errno = 0;
1012 return uv__fs_sendfile_emul(req);
1013 }
1014
1015 return -1;
1016 }
1017 #elif defined(__APPLE__) || \
1018 defined(__DragonFly__) || \
1019 defined(__FreeBSD__) || \
1020 defined(__FreeBSD_kernel__)
1021 {
1022 off_t len;
1023 ssize_t r;
1024
1025 /* sendfile() on FreeBSD and Darwin returns EAGAIN if the target fd is in
1026 * non-blocking mode and not all data could be written. If a non-zero
1027 * number of bytes have been sent, we don't consider it an error.
1028 */
1029
1030 #if defined(__FreeBSD__) || defined(__DragonFly__)
1031 len = 0;
1032 r = sendfile(in_fd, out_fd, req->off, req->bufsml[0].len, NULL, &len, 0);
1033 #elif defined(__FreeBSD_kernel__)
1034 len = 0;
1035 r = bsd_sendfile(in_fd,
1036 out_fd,
1037 req->off,
1038 req->bufsml[0].len,
1039 NULL,
1040 &len,
1041 0);
1042 #else
1043 /* The darwin sendfile takes len as an input for the length to send,
1044 * so make sure to initialize it with the caller's value. */
1045 len = req->bufsml[0].len;
1046 r = sendfile(in_fd, out_fd, req->off, &len, NULL, 0);
1047 #endif
1048
1049 /*
1050 * The man page for sendfile(2) on DragonFly states that `len` contains
1051 * a meaningful value ONLY in case of EAGAIN and EINTR.
1052 * Nothing is said about it's value in case of other errors, so better
1053 * not depend on the potential wrong assumption that is was not modified
1054 * by the syscall.
1055 */
1056 if (r == 0 || ((errno == EAGAIN || errno == EINTR) && len != 0)) {
1057 req->off += len;
1058 return (ssize_t) len;
1059 }
1060
1061 if (errno == EINVAL ||
1062 errno == EIO ||
1063 errno == ENOTSOCK ||
1064 errno == EXDEV) {
1065 errno = 0;
1066 return uv__fs_sendfile_emul(req);
1067 }
1068
1069 return -1;
1070 }
1071 #else
1072 /* Squelch compiler warnings. */
1073 (void) &in_fd;
1074 (void) &out_fd;
1075
1076 return uv__fs_sendfile_emul(req);
1077 #endif
1078 }
1079
1080
uv__fs_utime(uv_fs_t * req)1081 static ssize_t uv__fs_utime(uv_fs_t* req) {
1082 #if defined(__linux__) \
1083 || defined(_AIX71) \
1084 || defined(__sun) \
1085 || defined(__HAIKU__)
1086 struct timespec ts[2];
1087 ts[0] = uv__fs_to_timespec(req->atime);
1088 ts[1] = uv__fs_to_timespec(req->mtime);
1089 return utimensat(AT_FDCWD, req->path, ts, 0);
1090 #elif defined(__APPLE__) \
1091 || defined(__DragonFly__) \
1092 || defined(__FreeBSD__) \
1093 || defined(__FreeBSD_kernel__) \
1094 || defined(__NetBSD__) \
1095 || defined(__OpenBSD__)
1096 struct timeval tv[2];
1097 tv[0] = uv__fs_to_timeval(req->atime);
1098 tv[1] = uv__fs_to_timeval(req->mtime);
1099 return utimes(req->path, tv);
1100 #elif defined(_AIX) \
1101 && !defined(_AIX71)
1102 struct utimbuf buf;
1103 buf.actime = req->atime;
1104 buf.modtime = req->mtime;
1105 return utime(req->path, &buf);
1106 #elif defined(__MVS__)
1107 attrib_t atr;
1108 memset(&atr, 0, sizeof(atr));
1109 atr.att_mtimechg = 1;
1110 atr.att_atimechg = 1;
1111 atr.att_mtime = req->mtime;
1112 atr.att_atime = req->atime;
1113 return __lchattr((char*) req->path, &atr, sizeof(atr));
1114 #else
1115 errno = ENOSYS;
1116 return -1;
1117 #endif
1118 }
1119
1120
uv__fs_lutime(uv_fs_t * req)1121 static ssize_t uv__fs_lutime(uv_fs_t* req) {
1122 #if defined(__linux__) || \
1123 defined(_AIX71) || \
1124 defined(__sun) || \
1125 defined(__HAIKU__)
1126 struct timespec ts[2];
1127 ts[0] = uv__fs_to_timespec(req->atime);
1128 ts[1] = uv__fs_to_timespec(req->mtime);
1129 return utimensat(AT_FDCWD, req->path, ts, AT_SYMLINK_NOFOLLOW);
1130 #elif defined(__APPLE__) || \
1131 defined(__DragonFly__) || \
1132 defined(__FreeBSD__) || \
1133 defined(__FreeBSD_kernel__) || \
1134 defined(__NetBSD__)
1135 struct timeval tv[2];
1136 tv[0] = uv__fs_to_timeval(req->atime);
1137 tv[1] = uv__fs_to_timeval(req->mtime);
1138 return lutimes(req->path, tv);
1139 #else
1140 errno = ENOSYS;
1141 return -1;
1142 #endif
1143 }
1144
1145
uv__fs_write(uv_fs_t * req)1146 static ssize_t uv__fs_write(uv_fs_t* req) {
1147 #if defined(__linux__)
1148 static int no_pwritev;
1149 #endif
1150 ssize_t r;
1151
1152 /* Serialize writes on OS X, concurrent write() and pwrite() calls result in
1153 * data loss. We can't use a per-file descriptor lock, the descriptor may be
1154 * a dup().
1155 */
1156 #if defined(__APPLE__)
1157 static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
1158
1159 if (pthread_mutex_lock(&lock))
1160 abort();
1161 #endif
1162
1163 if (req->off < 0) {
1164 if (req->nbufs == 1)
1165 r = write(req->file, req->bufs[0].base, req->bufs[0].len);
1166 else
1167 r = writev(req->file, (struct iovec*) req->bufs, req->nbufs);
1168 } else {
1169 if (req->nbufs == 1) {
1170 r = pwrite(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
1171 goto done;
1172 }
1173 #if HAVE_PREADV
1174 r = pwritev(req->file, (struct iovec*) req->bufs, req->nbufs, req->off);
1175 #else
1176 # if defined(__linux__)
1177 if (no_pwritev) retry:
1178 # endif
1179 {
1180 r = pwrite(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
1181 }
1182 # if defined(__linux__)
1183 else {
1184 r = uv__pwritev(req->file,
1185 (struct iovec*) req->bufs,
1186 req->nbufs,
1187 req->off);
1188 if (r == -1 && errno == ENOSYS) {
1189 no_pwritev = 1;
1190 goto retry;
1191 }
1192 }
1193 # endif
1194 #endif
1195 }
1196
1197 done:
1198 #if defined(__APPLE__)
1199 if (pthread_mutex_unlock(&lock))
1200 abort();
1201 #endif
1202
1203 return r;
1204 }
1205
uv__fs_copyfile(uv_fs_t * req)1206 static ssize_t uv__fs_copyfile(uv_fs_t* req) {
1207 uv_fs_t fs_req;
1208 uv_file srcfd;
1209 uv_file dstfd;
1210 struct stat src_statsbuf;
1211 struct stat dst_statsbuf;
1212 int dst_flags;
1213 int result;
1214 int err;
1215 off_t bytes_to_send;
1216 off_t in_offset;
1217 off_t bytes_written;
1218 size_t bytes_chunk;
1219
1220 dstfd = -1;
1221 err = 0;
1222
1223 /* Open the source file. */
1224 srcfd = uv_fs_open(NULL, &fs_req, req->path, O_RDONLY, 0, NULL);
1225 uv_fs_req_cleanup(&fs_req);
1226
1227 if (srcfd < 0)
1228 return srcfd;
1229
1230 /* Get the source file's mode. */
1231 if (fstat(srcfd, &src_statsbuf)) {
1232 err = UV__ERR(errno);
1233 goto out;
1234 }
1235
1236 dst_flags = O_WRONLY | O_CREAT;
1237
1238 if (req->flags & UV_FS_COPYFILE_EXCL)
1239 dst_flags |= O_EXCL;
1240
1241 /* Open the destination file. */
1242 dstfd = uv_fs_open(NULL,
1243 &fs_req,
1244 req->new_path,
1245 dst_flags,
1246 src_statsbuf.st_mode,
1247 NULL);
1248 uv_fs_req_cleanup(&fs_req);
1249
1250 if (dstfd < 0) {
1251 err = dstfd;
1252 goto out;
1253 }
1254
1255 /* If the file is not being opened exclusively, verify that the source and
1256 destination are not the same file. If they are the same, bail out early. */
1257 if ((req->flags & UV_FS_COPYFILE_EXCL) == 0) {
1258 /* Get the destination file's mode. */
1259 if (fstat(dstfd, &dst_statsbuf)) {
1260 err = UV__ERR(errno);
1261 goto out;
1262 }
1263
1264 /* Check if srcfd and dstfd refer to the same file */
1265 if (src_statsbuf.st_dev == dst_statsbuf.st_dev &&
1266 src_statsbuf.st_ino == dst_statsbuf.st_ino) {
1267 goto out;
1268 }
1269
1270 /* Truncate the file in case the destination already existed. */
1271 if (ftruncate(dstfd, 0) != 0) {
1272 err = UV__ERR(errno);
1273 goto out;
1274 }
1275 }
1276
1277 if (fchmod(dstfd, src_statsbuf.st_mode) == -1) {
1278 err = UV__ERR(errno);
1279 #ifdef __linux__
1280 if (err != UV_EPERM)
1281 goto out;
1282
1283 {
1284 struct statfs s;
1285
1286 /* fchmod() on CIFS shares always fails with EPERM unless the share is
1287 * mounted with "noperm". As fchmod() is a meaningless operation on such
1288 * shares anyway, detect that condition and squelch the error.
1289 */
1290 if (fstatfs(dstfd, &s) == -1)
1291 goto out;
1292
1293 if ((unsigned) s.f_type != /* CIFS */ 0xFF534D42u)
1294 goto out;
1295 }
1296
1297 err = 0;
1298 #else /* !__linux__ */
1299 goto out;
1300 #endif /* !__linux__ */
1301 }
1302
1303 #ifdef FICLONE
1304 if (req->flags & UV_FS_COPYFILE_FICLONE ||
1305 req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
1306 if (ioctl(dstfd, FICLONE, srcfd) == 0) {
1307 /* ioctl() with FICLONE succeeded. */
1308 goto out;
1309 }
1310 /* If an error occurred and force was set, return the error to the caller;
1311 * fall back to sendfile() when force was not set. */
1312 if (req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
1313 err = UV__ERR(errno);
1314 goto out;
1315 }
1316 }
1317 #else
1318 if (req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
1319 err = UV_ENOSYS;
1320 goto out;
1321 }
1322 #endif
1323
1324 bytes_to_send = src_statsbuf.st_size;
1325 in_offset = 0;
1326 while (bytes_to_send != 0) {
1327 bytes_chunk = SSIZE_MAX;
1328 if (bytes_to_send < (off_t) bytes_chunk)
1329 bytes_chunk = bytes_to_send;
1330 uv_fs_sendfile(NULL, &fs_req, dstfd, srcfd, in_offset, bytes_chunk, NULL);
1331 bytes_written = fs_req.result;
1332 uv_fs_req_cleanup(&fs_req);
1333
1334 if (bytes_written < 0) {
1335 err = bytes_written;
1336 break;
1337 }
1338
1339 bytes_to_send -= bytes_written;
1340 in_offset += bytes_written;
1341 }
1342
1343 out:
1344 if (err < 0)
1345 result = err;
1346 else
1347 result = 0;
1348
1349 /* Close the source file. */
1350 err = uv__close_nocheckstdio(srcfd);
1351
1352 /* Don't overwrite any existing errors. */
1353 if (err != 0 && result == 0)
1354 result = err;
1355
1356 /* Close the destination file if it is open. */
1357 if (dstfd >= 0) {
1358 err = uv__close_nocheckstdio(dstfd);
1359
1360 /* Don't overwrite any existing errors. */
1361 if (err != 0 && result == 0)
1362 result = err;
1363
1364 /* Remove the destination file if something went wrong. */
1365 if (result != 0) {
1366 uv_fs_unlink(NULL, &fs_req, req->new_path, NULL);
1367 /* Ignore the unlink return value, as an error already happened. */
1368 uv_fs_req_cleanup(&fs_req);
1369 }
1370 }
1371
1372 if (result == 0)
1373 return 0;
1374
1375 errno = UV__ERR(result);
1376 return -1;
1377 }
1378
uv__to_stat(struct stat * src,uv_stat_t * dst)1379 static void uv__to_stat(struct stat* src, uv_stat_t* dst) {
1380 dst->st_dev = src->st_dev;
1381 dst->st_mode = src->st_mode;
1382 dst->st_nlink = src->st_nlink;
1383 dst->st_uid = src->st_uid;
1384 dst->st_gid = src->st_gid;
1385 dst->st_rdev = src->st_rdev;
1386 dst->st_ino = src->st_ino;
1387 dst->st_size = src->st_size;
1388 dst->st_blksize = src->st_blksize;
1389 dst->st_blocks = src->st_blocks;
1390
1391 #if defined(__APPLE__)
1392 dst->st_atim.tv_sec = src->st_atimespec.tv_sec;
1393 dst->st_atim.tv_nsec = src->st_atimespec.tv_nsec;
1394 dst->st_mtim.tv_sec = src->st_mtimespec.tv_sec;
1395 dst->st_mtim.tv_nsec = src->st_mtimespec.tv_nsec;
1396 dst->st_ctim.tv_sec = src->st_ctimespec.tv_sec;
1397 dst->st_ctim.tv_nsec = src->st_ctimespec.tv_nsec;
1398 dst->st_birthtim.tv_sec = src->st_birthtimespec.tv_sec;
1399 dst->st_birthtim.tv_nsec = src->st_birthtimespec.tv_nsec;
1400 dst->st_flags = src->st_flags;
1401 dst->st_gen = src->st_gen;
1402 #elif defined(__ANDROID__)
1403 dst->st_atim.tv_sec = src->st_atime;
1404 dst->st_atim.tv_nsec = src->st_atimensec;
1405 dst->st_mtim.tv_sec = src->st_mtime;
1406 dst->st_mtim.tv_nsec = src->st_mtimensec;
1407 dst->st_ctim.tv_sec = src->st_ctime;
1408 dst->st_ctim.tv_nsec = src->st_ctimensec;
1409 dst->st_birthtim.tv_sec = src->st_ctime;
1410 dst->st_birthtim.tv_nsec = src->st_ctimensec;
1411 dst->st_flags = 0;
1412 dst->st_gen = 0;
1413 #elif !defined(_AIX) && \
1414 !defined(__MVS__) && ( \
1415 defined(__DragonFly__) || \
1416 defined(__FreeBSD__) || \
1417 defined(__OpenBSD__) || \
1418 defined(__NetBSD__) || \
1419 defined(_GNU_SOURCE) || \
1420 defined(_BSD_SOURCE) || \
1421 defined(_SVID_SOURCE) || \
1422 defined(_XOPEN_SOURCE) || \
1423 defined(_DEFAULT_SOURCE))
1424 dst->st_atim.tv_sec = src->st_atim.tv_sec;
1425 dst->st_atim.tv_nsec = src->st_atim.tv_nsec;
1426 dst->st_mtim.tv_sec = src->st_mtim.tv_sec;
1427 dst->st_mtim.tv_nsec = src->st_mtim.tv_nsec;
1428 dst->st_ctim.tv_sec = src->st_ctim.tv_sec;
1429 dst->st_ctim.tv_nsec = src->st_ctim.tv_nsec;
1430 # if defined(__FreeBSD__) || \
1431 defined(__NetBSD__)
1432 dst->st_birthtim.tv_sec = src->st_birthtim.tv_sec;
1433 dst->st_birthtim.tv_nsec = src->st_birthtim.tv_nsec;
1434 dst->st_flags = src->st_flags;
1435 dst->st_gen = src->st_gen;
1436 # else
1437 dst->st_birthtim.tv_sec = src->st_ctim.tv_sec;
1438 dst->st_birthtim.tv_nsec = src->st_ctim.tv_nsec;
1439 dst->st_flags = 0;
1440 dst->st_gen = 0;
1441 # endif
1442 #else
1443 dst->st_atim.tv_sec = src->st_atime;
1444 dst->st_atim.tv_nsec = 0;
1445 dst->st_mtim.tv_sec = src->st_mtime;
1446 dst->st_mtim.tv_nsec = 0;
1447 dst->st_ctim.tv_sec = src->st_ctime;
1448 dst->st_ctim.tv_nsec = 0;
1449 dst->st_birthtim.tv_sec = src->st_ctime;
1450 dst->st_birthtim.tv_nsec = 0;
1451 dst->st_flags = 0;
1452 dst->st_gen = 0;
1453 #endif
1454 }
1455
1456
uv__fs_statx(int fd,const char * path,int is_fstat,int is_lstat,uv_stat_t * buf)1457 static int uv__fs_statx(int fd,
1458 const char* path,
1459 int is_fstat,
1460 int is_lstat,
1461 uv_stat_t* buf) {
1462 STATIC_ASSERT(UV_ENOSYS != -1);
1463 #ifdef __linux__
1464 static int no_statx;
1465 struct uv__statx statxbuf;
1466 int dirfd;
1467 int flags;
1468 int mode;
1469 int rc;
1470
1471 if (uv__load_relaxed(&no_statx))
1472 return UV_ENOSYS;
1473
1474 dirfd = AT_FDCWD;
1475 flags = 0; /* AT_STATX_SYNC_AS_STAT */
1476 mode = 0xFFF; /* STATX_BASIC_STATS + STATX_BTIME */
1477
1478 if (is_fstat) {
1479 dirfd = fd;
1480 flags |= 0x1000; /* AT_EMPTY_PATH */
1481 }
1482
1483 if (is_lstat)
1484 flags |= AT_SYMLINK_NOFOLLOW;
1485
1486 rc = uv__statx(dirfd, path, flags, mode, &statxbuf);
1487
1488 switch (rc) {
1489 case 0:
1490 break;
1491 case -1:
1492 /* EPERM happens when a seccomp filter rejects the system call.
1493 * Has been observed with libseccomp < 2.3.3 and docker < 18.04.
1494 * EOPNOTSUPP is used on DVS exported filesystems
1495 */
1496 if (errno != EINVAL && errno != EPERM && errno != ENOSYS && errno != EOPNOTSUPP)
1497 return -1;
1498 /* Fall through. */
1499 default:
1500 /* Normally on success, zero is returned and On error, -1 is returned.
1501 * Observed on S390 RHEL running in a docker container with statx not
1502 * implemented, rc might return 1 with 0 set as the error code in which
1503 * case we return ENOSYS.
1504 */
1505 uv__store_relaxed(&no_statx, 1);
1506 return UV_ENOSYS;
1507 }
1508
1509 buf->st_dev = makedev(statxbuf.stx_dev_major, statxbuf.stx_dev_minor);
1510 buf->st_mode = statxbuf.stx_mode;
1511 buf->st_nlink = statxbuf.stx_nlink;
1512 buf->st_uid = statxbuf.stx_uid;
1513 buf->st_gid = statxbuf.stx_gid;
1514 buf->st_rdev = makedev(statxbuf.stx_rdev_major, statxbuf.stx_rdev_minor);
1515 buf->st_ino = statxbuf.stx_ino;
1516 buf->st_size = statxbuf.stx_size;
1517 buf->st_blksize = statxbuf.stx_blksize;
1518 buf->st_blocks = statxbuf.stx_blocks;
1519 buf->st_atim.tv_sec = statxbuf.stx_atime.tv_sec;
1520 buf->st_atim.tv_nsec = statxbuf.stx_atime.tv_nsec;
1521 buf->st_mtim.tv_sec = statxbuf.stx_mtime.tv_sec;
1522 buf->st_mtim.tv_nsec = statxbuf.stx_mtime.tv_nsec;
1523 buf->st_ctim.tv_sec = statxbuf.stx_ctime.tv_sec;
1524 buf->st_ctim.tv_nsec = statxbuf.stx_ctime.tv_nsec;
1525 buf->st_birthtim.tv_sec = statxbuf.stx_btime.tv_sec;
1526 buf->st_birthtim.tv_nsec = statxbuf.stx_btime.tv_nsec;
1527 buf->st_flags = 0;
1528 buf->st_gen = 0;
1529
1530 return 0;
1531 #else
1532 return UV_ENOSYS;
1533 #endif /* __linux__ */
1534 }
1535
1536
uv__fs_stat(const char * path,uv_stat_t * buf)1537 static int uv__fs_stat(const char *path, uv_stat_t *buf) {
1538 struct stat pbuf;
1539 int ret;
1540
1541 ret = uv__fs_statx(-1, path, /* is_fstat */ 0, /* is_lstat */ 0, buf);
1542 if (ret != UV_ENOSYS)
1543 return ret;
1544
1545 ret = stat(path, &pbuf);
1546 if (ret == 0)
1547 uv__to_stat(&pbuf, buf);
1548
1549 return ret;
1550 }
1551
1552
uv__fs_lstat(const char * path,uv_stat_t * buf)1553 static int uv__fs_lstat(const char *path, uv_stat_t *buf) {
1554 struct stat pbuf;
1555 int ret;
1556
1557 ret = uv__fs_statx(-1, path, /* is_fstat */ 0, /* is_lstat */ 1, buf);
1558 if (ret != UV_ENOSYS)
1559 return ret;
1560
1561 ret = lstat(path, &pbuf);
1562 if (ret == 0)
1563 uv__to_stat(&pbuf, buf);
1564
1565 return ret;
1566 }
1567
1568
uv__fs_fstat(int fd,uv_stat_t * buf)1569 static int uv__fs_fstat(int fd, uv_stat_t *buf) {
1570 struct stat pbuf;
1571 int ret;
1572
1573 ret = uv__fs_statx(fd, "", /* is_fstat */ 1, /* is_lstat */ 0, buf);
1574 if (ret != UV_ENOSYS)
1575 return ret;
1576
1577 ret = fstat(fd, &pbuf);
1578 if (ret == 0)
1579 uv__to_stat(&pbuf, buf);
1580
1581 return ret;
1582 }
1583
uv__fs_buf_offset(uv_buf_t * bufs,size_t size)1584 static size_t uv__fs_buf_offset(uv_buf_t* bufs, size_t size) {
1585 size_t offset;
1586 /* Figure out which bufs are done */
1587 for (offset = 0; size > 0 && bufs[offset].len <= size; ++offset)
1588 size -= bufs[offset].len;
1589
1590 /* Fix a partial read/write */
1591 if (size > 0) {
1592 bufs[offset].base += size;
1593 bufs[offset].len -= size;
1594 }
1595 return offset;
1596 }
1597
uv__fs_write_all(uv_fs_t * req)1598 static ssize_t uv__fs_write_all(uv_fs_t* req) {
1599 unsigned int iovmax;
1600 unsigned int nbufs;
1601 uv_buf_t* bufs;
1602 ssize_t total;
1603 ssize_t result;
1604
1605 iovmax = uv__getiovmax();
1606 nbufs = req->nbufs;
1607 bufs = req->bufs;
1608 total = 0;
1609
1610 while (nbufs > 0) {
1611 req->nbufs = nbufs;
1612 if (req->nbufs > iovmax)
1613 req->nbufs = iovmax;
1614
1615 do
1616 result = uv__fs_write(req);
1617 while (result < 0 && errno == EINTR);
1618
1619 if (result <= 0) {
1620 if (total == 0)
1621 total = result;
1622 break;
1623 }
1624
1625 if (req->off >= 0)
1626 req->off += result;
1627
1628 req->nbufs = uv__fs_buf_offset(req->bufs, result);
1629 req->bufs += req->nbufs;
1630 nbufs -= req->nbufs;
1631 total += result;
1632 }
1633
1634 if (bufs != req->bufsml)
1635 uv__free(bufs);
1636
1637 req->bufs = NULL;
1638 req->nbufs = 0;
1639
1640 return total;
1641 }
1642
1643
uv__fs_work(struct uv__work * w)1644 static void uv__fs_work(struct uv__work* w) {
1645 int retry_on_eintr;
1646 uv_fs_t* req;
1647 ssize_t r;
1648
1649 req = container_of(w, uv_fs_t, work_req);
1650 retry_on_eintr = !(req->fs_type == UV_FS_CLOSE ||
1651 req->fs_type == UV_FS_READ);
1652
1653 do {
1654 errno = 0;
1655
1656 #define X(type, action) \
1657 case UV_FS_ ## type: \
1658 r = action; \
1659 break;
1660
1661 switch (req->fs_type) {
1662 X(ACCESS, access(req->path, req->flags));
1663 X(CHMOD, chmod(req->path, req->mode));
1664 X(CHOWN, chown(req->path, req->uid, req->gid));
1665 X(CLOSE, uv__fs_close(req->file));
1666 X(COPYFILE, uv__fs_copyfile(req));
1667 X(FCHMOD, fchmod(req->file, req->mode));
1668 X(FCHOWN, fchown(req->file, req->uid, req->gid));
1669 X(LCHOWN, lchown(req->path, req->uid, req->gid));
1670 X(FDATASYNC, uv__fs_fdatasync(req));
1671 X(FSTAT, uv__fs_fstat(req->file, &req->statbuf));
1672 X(FSYNC, uv__fs_fsync(req));
1673 X(FTRUNCATE, ftruncate(req->file, req->off));
1674 X(FUTIME, uv__fs_futime(req));
1675 X(LUTIME, uv__fs_lutime(req));
1676 X(LSTAT, uv__fs_lstat(req->path, &req->statbuf));
1677 X(LINK, link(req->path, req->new_path));
1678 X(MKDIR, mkdir(req->path, req->mode));
1679 X(MKDTEMP, uv__fs_mkdtemp(req));
1680 X(MKSTEMP, uv__fs_mkstemp(req));
1681 X(OPEN, uv__fs_open(req));
1682 X(READ, uv__fs_read(req));
1683 X(SCANDIR, uv__fs_scandir(req));
1684 X(OPENDIR, uv__fs_opendir(req));
1685 X(READDIR, uv__fs_readdir(req));
1686 X(CLOSEDIR, uv__fs_closedir(req));
1687 X(READLINK, uv__fs_readlink(req));
1688 X(REALPATH, uv__fs_realpath(req));
1689 X(RENAME, rename(req->path, req->new_path));
1690 X(RMDIR, rmdir(req->path));
1691 X(SENDFILE, uv__fs_sendfile(req));
1692 X(STAT, uv__fs_stat(req->path, &req->statbuf));
1693 X(STATFS, uv__fs_statfs(req));
1694 X(SYMLINK, symlink(req->path, req->new_path));
1695 X(UNLINK, unlink(req->path));
1696 X(UTIME, uv__fs_utime(req));
1697 X(WRITE, uv__fs_write_all(req));
1698 default: abort();
1699 }
1700 #undef X
1701 } while (r == -1 && errno == EINTR && retry_on_eintr);
1702
1703 if (r == -1)
1704 req->result = UV__ERR(errno);
1705 else
1706 req->result = r;
1707
1708 if (r == 0 && (req->fs_type == UV_FS_STAT ||
1709 req->fs_type == UV_FS_FSTAT ||
1710 req->fs_type == UV_FS_LSTAT)) {
1711 req->ptr = &req->statbuf;
1712 }
1713 }
1714
1715
uv__fs_done(struct uv__work * w,int status)1716 static void uv__fs_done(struct uv__work* w, int status) {
1717 uv_fs_t* req;
1718
1719 req = container_of(w, uv_fs_t, work_req);
1720 uv__req_unregister(req->loop, req);
1721
1722 if (status == UV_ECANCELED) {
1723 assert(req->result == 0);
1724 req->result = UV_ECANCELED;
1725 }
1726
1727 req->cb(req);
1728 }
1729
1730
uv_fs_access(uv_loop_t * loop,uv_fs_t * req,const char * path,int flags,uv_fs_cb cb)1731 int uv_fs_access(uv_loop_t* loop,
1732 uv_fs_t* req,
1733 const char* path,
1734 int flags,
1735 uv_fs_cb cb) {
1736 INIT(ACCESS);
1737 PATH;
1738 req->flags = flags;
1739 POST;
1740 }
1741
1742
uv_fs_chmod(uv_loop_t * loop,uv_fs_t * req,const char * path,int mode,uv_fs_cb cb)1743 int uv_fs_chmod(uv_loop_t* loop,
1744 uv_fs_t* req,
1745 const char* path,
1746 int mode,
1747 uv_fs_cb cb) {
1748 INIT(CHMOD);
1749 PATH;
1750 req->mode = mode;
1751 POST;
1752 }
1753
1754
uv_fs_chown(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_uid_t uid,uv_gid_t gid,uv_fs_cb cb)1755 int uv_fs_chown(uv_loop_t* loop,
1756 uv_fs_t* req,
1757 const char* path,
1758 uv_uid_t uid,
1759 uv_gid_t gid,
1760 uv_fs_cb cb) {
1761 INIT(CHOWN);
1762 PATH;
1763 req->uid = uid;
1764 req->gid = gid;
1765 POST;
1766 }
1767
1768
uv_fs_close(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_fs_cb cb)1769 int uv_fs_close(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1770 INIT(CLOSE);
1771 req->file = file;
1772 POST;
1773 }
1774
1775
uv_fs_fchmod(uv_loop_t * loop,uv_fs_t * req,uv_file file,int mode,uv_fs_cb cb)1776 int uv_fs_fchmod(uv_loop_t* loop,
1777 uv_fs_t* req,
1778 uv_file file,
1779 int mode,
1780 uv_fs_cb cb) {
1781 INIT(FCHMOD);
1782 req->file = file;
1783 req->mode = mode;
1784 POST;
1785 }
1786
1787
uv_fs_fchown(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_uid_t uid,uv_gid_t gid,uv_fs_cb cb)1788 int uv_fs_fchown(uv_loop_t* loop,
1789 uv_fs_t* req,
1790 uv_file file,
1791 uv_uid_t uid,
1792 uv_gid_t gid,
1793 uv_fs_cb cb) {
1794 INIT(FCHOWN);
1795 req->file = file;
1796 req->uid = uid;
1797 req->gid = gid;
1798 POST;
1799 }
1800
1801
uv_fs_lchown(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_uid_t uid,uv_gid_t gid,uv_fs_cb cb)1802 int uv_fs_lchown(uv_loop_t* loop,
1803 uv_fs_t* req,
1804 const char* path,
1805 uv_uid_t uid,
1806 uv_gid_t gid,
1807 uv_fs_cb cb) {
1808 INIT(LCHOWN);
1809 PATH;
1810 req->uid = uid;
1811 req->gid = gid;
1812 POST;
1813 }
1814
1815
uv_fs_fdatasync(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_fs_cb cb)1816 int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1817 INIT(FDATASYNC);
1818 req->file = file;
1819 POST;
1820 }
1821
1822
uv_fs_fstat(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_fs_cb cb)1823 int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1824 INIT(FSTAT);
1825 req->file = file;
1826 POST;
1827 }
1828
1829
uv_fs_fsync(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_fs_cb cb)1830 int uv_fs_fsync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1831 INIT(FSYNC);
1832 req->file = file;
1833 POST;
1834 }
1835
1836
uv_fs_ftruncate(uv_loop_t * loop,uv_fs_t * req,uv_file file,int64_t off,uv_fs_cb cb)1837 int uv_fs_ftruncate(uv_loop_t* loop,
1838 uv_fs_t* req,
1839 uv_file file,
1840 int64_t off,
1841 uv_fs_cb cb) {
1842 INIT(FTRUNCATE);
1843 req->file = file;
1844 req->off = off;
1845 POST;
1846 }
1847
1848
uv_fs_futime(uv_loop_t * loop,uv_fs_t * req,uv_file file,double atime,double mtime,uv_fs_cb cb)1849 int uv_fs_futime(uv_loop_t* loop,
1850 uv_fs_t* req,
1851 uv_file file,
1852 double atime,
1853 double mtime,
1854 uv_fs_cb cb) {
1855 INIT(FUTIME);
1856 req->file = file;
1857 req->atime = atime;
1858 req->mtime = mtime;
1859 POST;
1860 }
1861
uv_fs_lutime(uv_loop_t * loop,uv_fs_t * req,const char * path,double atime,double mtime,uv_fs_cb cb)1862 int uv_fs_lutime(uv_loop_t* loop,
1863 uv_fs_t* req,
1864 const char* path,
1865 double atime,
1866 double mtime,
1867 uv_fs_cb cb) {
1868 INIT(LUTIME);
1869 PATH;
1870 req->atime = atime;
1871 req->mtime = mtime;
1872 POST;
1873 }
1874
1875
uv_fs_lstat(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)1876 int uv_fs_lstat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
1877 INIT(LSTAT);
1878 PATH;
1879 POST;
1880 }
1881
1882
uv_fs_link(uv_loop_t * loop,uv_fs_t * req,const char * path,const char * new_path,uv_fs_cb cb)1883 int uv_fs_link(uv_loop_t* loop,
1884 uv_fs_t* req,
1885 const char* path,
1886 const char* new_path,
1887 uv_fs_cb cb) {
1888 INIT(LINK);
1889 PATH2;
1890 POST;
1891 }
1892
1893
uv_fs_mkdir(uv_loop_t * loop,uv_fs_t * req,const char * path,int mode,uv_fs_cb cb)1894 int uv_fs_mkdir(uv_loop_t* loop,
1895 uv_fs_t* req,
1896 const char* path,
1897 int mode,
1898 uv_fs_cb cb) {
1899 INIT(MKDIR);
1900 PATH;
1901 req->mode = mode;
1902 POST;
1903 }
1904
1905
uv_fs_mkdtemp(uv_loop_t * loop,uv_fs_t * req,const char * tpl,uv_fs_cb cb)1906 int uv_fs_mkdtemp(uv_loop_t* loop,
1907 uv_fs_t* req,
1908 const char* tpl,
1909 uv_fs_cb cb) {
1910 INIT(MKDTEMP);
1911 req->path = uv__strdup(tpl);
1912 if (req->path == NULL)
1913 return UV_ENOMEM;
1914 POST;
1915 }
1916
1917
uv_fs_mkstemp(uv_loop_t * loop,uv_fs_t * req,const char * tpl,uv_fs_cb cb)1918 int uv_fs_mkstemp(uv_loop_t* loop,
1919 uv_fs_t* req,
1920 const char* tpl,
1921 uv_fs_cb cb) {
1922 INIT(MKSTEMP);
1923 req->path = uv__strdup(tpl);
1924 if (req->path == NULL)
1925 return UV_ENOMEM;
1926 POST;
1927 }
1928
1929
uv_fs_open(uv_loop_t * loop,uv_fs_t * req,const char * path,int flags,int mode,uv_fs_cb cb)1930 int uv_fs_open(uv_loop_t* loop,
1931 uv_fs_t* req,
1932 const char* path,
1933 int flags,
1934 int mode,
1935 uv_fs_cb cb) {
1936 INIT(OPEN);
1937 PATH;
1938 req->flags = flags;
1939 req->mode = mode;
1940 POST;
1941 }
1942
1943
uv_fs_read(uv_loop_t * loop,uv_fs_t * req,uv_file file,const uv_buf_t bufs[],unsigned int nbufs,int64_t off,uv_fs_cb cb)1944 int uv_fs_read(uv_loop_t* loop, uv_fs_t* req,
1945 uv_file file,
1946 const uv_buf_t bufs[],
1947 unsigned int nbufs,
1948 int64_t off,
1949 uv_fs_cb cb) {
1950 INIT(READ);
1951
1952 if (bufs == NULL || nbufs == 0)
1953 return UV_EINVAL;
1954
1955 req->file = file;
1956
1957 req->nbufs = nbufs;
1958 req->bufs = req->bufsml;
1959 if (nbufs > ARRAY_SIZE(req->bufsml))
1960 req->bufs = uv__malloc(nbufs * sizeof(*bufs));
1961
1962 if (req->bufs == NULL)
1963 return UV_ENOMEM;
1964
1965 memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
1966
1967 req->off = off;
1968 POST;
1969 }
1970
1971
uv_fs_scandir(uv_loop_t * loop,uv_fs_t * req,const char * path,int flags,uv_fs_cb cb)1972 int uv_fs_scandir(uv_loop_t* loop,
1973 uv_fs_t* req,
1974 const char* path,
1975 int flags,
1976 uv_fs_cb cb) {
1977 INIT(SCANDIR);
1978 PATH;
1979 req->flags = flags;
1980 POST;
1981 }
1982
uv_fs_opendir(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)1983 int uv_fs_opendir(uv_loop_t* loop,
1984 uv_fs_t* req,
1985 const char* path,
1986 uv_fs_cb cb) {
1987 INIT(OPENDIR);
1988 PATH;
1989 POST;
1990 }
1991
uv_fs_readdir(uv_loop_t * loop,uv_fs_t * req,uv_dir_t * dir,uv_fs_cb cb)1992 int uv_fs_readdir(uv_loop_t* loop,
1993 uv_fs_t* req,
1994 uv_dir_t* dir,
1995 uv_fs_cb cb) {
1996 INIT(READDIR);
1997
1998 if (dir == NULL || dir->dir == NULL || dir->dirents == NULL)
1999 return UV_EINVAL;
2000
2001 req->ptr = dir;
2002 POST;
2003 }
2004
uv_fs_closedir(uv_loop_t * loop,uv_fs_t * req,uv_dir_t * dir,uv_fs_cb cb)2005 int uv_fs_closedir(uv_loop_t* loop,
2006 uv_fs_t* req,
2007 uv_dir_t* dir,
2008 uv_fs_cb cb) {
2009 INIT(CLOSEDIR);
2010
2011 if (dir == NULL)
2012 return UV_EINVAL;
2013
2014 req->ptr = dir;
2015 POST;
2016 }
2017
uv_fs_readlink(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2018 int uv_fs_readlink(uv_loop_t* loop,
2019 uv_fs_t* req,
2020 const char* path,
2021 uv_fs_cb cb) {
2022 INIT(READLINK);
2023 PATH;
2024 POST;
2025 }
2026
2027
uv_fs_realpath(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2028 int uv_fs_realpath(uv_loop_t* loop,
2029 uv_fs_t* req,
2030 const char * path,
2031 uv_fs_cb cb) {
2032 INIT(REALPATH);
2033 PATH;
2034 POST;
2035 }
2036
2037
uv_fs_rename(uv_loop_t * loop,uv_fs_t * req,const char * path,const char * new_path,uv_fs_cb cb)2038 int uv_fs_rename(uv_loop_t* loop,
2039 uv_fs_t* req,
2040 const char* path,
2041 const char* new_path,
2042 uv_fs_cb cb) {
2043 INIT(RENAME);
2044 PATH2;
2045 POST;
2046 }
2047
2048
uv_fs_rmdir(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2049 int uv_fs_rmdir(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
2050 INIT(RMDIR);
2051 PATH;
2052 POST;
2053 }
2054
2055
uv_fs_sendfile(uv_loop_t * loop,uv_fs_t * req,uv_file out_fd,uv_file in_fd,int64_t off,size_t len,uv_fs_cb cb)2056 int uv_fs_sendfile(uv_loop_t* loop,
2057 uv_fs_t* req,
2058 uv_file out_fd,
2059 uv_file in_fd,
2060 int64_t off,
2061 size_t len,
2062 uv_fs_cb cb) {
2063 INIT(SENDFILE);
2064 req->flags = in_fd; /* hack */
2065 req->file = out_fd;
2066 req->off = off;
2067 req->bufsml[0].len = len;
2068 POST;
2069 }
2070
2071
uv_fs_stat(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2072 int uv_fs_stat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
2073 INIT(STAT);
2074 PATH;
2075 POST;
2076 }
2077
2078
uv_fs_symlink(uv_loop_t * loop,uv_fs_t * req,const char * path,const char * new_path,int flags,uv_fs_cb cb)2079 int uv_fs_symlink(uv_loop_t* loop,
2080 uv_fs_t* req,
2081 const char* path,
2082 const char* new_path,
2083 int flags,
2084 uv_fs_cb cb) {
2085 INIT(SYMLINK);
2086 PATH2;
2087 req->flags = flags;
2088 POST;
2089 }
2090
2091
uv_fs_unlink(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2092 int uv_fs_unlink(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
2093 INIT(UNLINK);
2094 PATH;
2095 POST;
2096 }
2097
2098
uv_fs_utime(uv_loop_t * loop,uv_fs_t * req,const char * path,double atime,double mtime,uv_fs_cb cb)2099 int uv_fs_utime(uv_loop_t* loop,
2100 uv_fs_t* req,
2101 const char* path,
2102 double atime,
2103 double mtime,
2104 uv_fs_cb cb) {
2105 INIT(UTIME);
2106 PATH;
2107 req->atime = atime;
2108 req->mtime = mtime;
2109 POST;
2110 }
2111
2112
uv_fs_write(uv_loop_t * loop,uv_fs_t * req,uv_file file,const uv_buf_t bufs[],unsigned int nbufs,int64_t off,uv_fs_cb cb)2113 int uv_fs_write(uv_loop_t* loop,
2114 uv_fs_t* req,
2115 uv_file file,
2116 const uv_buf_t bufs[],
2117 unsigned int nbufs,
2118 int64_t off,
2119 uv_fs_cb cb) {
2120 INIT(WRITE);
2121
2122 if (bufs == NULL || nbufs == 0)
2123 return UV_EINVAL;
2124
2125 req->file = file;
2126
2127 req->nbufs = nbufs;
2128 req->bufs = req->bufsml;
2129 if (nbufs > ARRAY_SIZE(req->bufsml))
2130 req->bufs = uv__malloc(nbufs * sizeof(*bufs));
2131
2132 if (req->bufs == NULL)
2133 return UV_ENOMEM;
2134
2135 memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
2136
2137 req->off = off;
2138 POST;
2139 }
2140
2141
uv_fs_req_cleanup(uv_fs_t * req)2142 void uv_fs_req_cleanup(uv_fs_t* req) {
2143 if (req == NULL)
2144 return;
2145
2146 /* Only necessary for asychronous requests, i.e., requests with a callback.
2147 * Synchronous ones don't copy their arguments and have req->path and
2148 * req->new_path pointing to user-owned memory. UV_FS_MKDTEMP and
2149 * UV_FS_MKSTEMP are the exception to the rule, they always allocate memory.
2150 */
2151 if (req->path != NULL &&
2152 (req->cb != NULL ||
2153 req->fs_type == UV_FS_MKDTEMP || req->fs_type == UV_FS_MKSTEMP))
2154 uv__free((void*) req->path); /* Memory is shared with req->new_path. */
2155
2156 req->path = NULL;
2157 req->new_path = NULL;
2158
2159 if (req->fs_type == UV_FS_READDIR && req->ptr != NULL)
2160 uv__fs_readdir_cleanup(req);
2161
2162 if (req->fs_type == UV_FS_SCANDIR && req->ptr != NULL)
2163 uv__fs_scandir_cleanup(req);
2164
2165 if (req->bufs != req->bufsml)
2166 uv__free(req->bufs);
2167 req->bufs = NULL;
2168
2169 if (req->fs_type != UV_FS_OPENDIR && req->ptr != &req->statbuf)
2170 uv__free(req->ptr);
2171 req->ptr = NULL;
2172 }
2173
2174
uv_fs_copyfile(uv_loop_t * loop,uv_fs_t * req,const char * path,const char * new_path,int flags,uv_fs_cb cb)2175 int uv_fs_copyfile(uv_loop_t* loop,
2176 uv_fs_t* req,
2177 const char* path,
2178 const char* new_path,
2179 int flags,
2180 uv_fs_cb cb) {
2181 INIT(COPYFILE);
2182
2183 if (flags & ~(UV_FS_COPYFILE_EXCL |
2184 UV_FS_COPYFILE_FICLONE |
2185 UV_FS_COPYFILE_FICLONE_FORCE)) {
2186 return UV_EINVAL;
2187 }
2188
2189 PATH2;
2190 req->flags = flags;
2191 POST;
2192 }
2193
2194
uv_fs_statfs(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2195 int uv_fs_statfs(uv_loop_t* loop,
2196 uv_fs_t* req,
2197 const char* path,
2198 uv_fs_cb cb) {
2199 INIT(STATFS);
2200 PATH;
2201 POST;
2202 }
2203
uv_fs_get_system_error(const uv_fs_t * req)2204 int uv_fs_get_system_error(const uv_fs_t* req) {
2205 return -req->result;
2206 }
2207