xref: /freebsd/tests/sys/fs/fusefs/write.cc (revision 206b73d0)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2019 The FreeBSD Foundation
5  *
6  * This software was developed by BFF Storage Systems, LLC under sponsorship
7  * from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  * $FreeBSD$
31  */
32 
33 extern "C" {
34 #include <sys/param.h>
35 #include <sys/mman.h>
36 #include <sys/resource.h>
37 #include <sys/stat.h>
38 #include <sys/time.h>
39 #include <sys/uio.h>
40 
41 #include <aio.h>
42 #include <fcntl.h>
43 #include <signal.h>
44 #include <unistd.h>
45 }
46 
47 #include "mockfs.hh"
48 #include "utils.hh"
49 
50 using namespace testing;
51 
52 class Write: public FuseTest {
53 
54 public:
55 static sig_atomic_t s_sigxfsz;
56 
57 void SetUp() {
58 	s_sigxfsz = 0;
59 	FuseTest::SetUp();
60 }
61 
62 void TearDown() {
63 	struct sigaction sa;
64 
65 	bzero(&sa, sizeof(sa));
66 	sa.sa_handler = SIG_DFL;
67 	sigaction(SIGXFSZ, &sa, NULL);
68 
69 	FuseTest::TearDown();
70 }
71 
72 void expect_lookup(const char *relpath, uint64_t ino, uint64_t size)
73 {
74 	FuseTest::expect_lookup(relpath, ino, S_IFREG | 0644, size, 1);
75 }
76 
77 void expect_release(uint64_t ino, ProcessMockerT r)
78 {
79 	EXPECT_CALL(*m_mock, process(
80 		ResultOf([=](auto in) {
81 			return (in.header.opcode == FUSE_RELEASE &&
82 				in.header.nodeid == ino);
83 		}, Eq(true)),
84 		_)
85 	).WillRepeatedly(Invoke(r));
86 }
87 
88 void expect_write(uint64_t ino, uint64_t offset, uint64_t isize,
89 	uint64_t osize, const void *contents)
90 {
91 	FuseTest::expect_write(ino, offset, isize, osize, 0, 0, contents);
92 }
93 
94 /* Expect a write that may or may not come, depending on the cache mode */
95 void maybe_expect_write(uint64_t ino, uint64_t offset, uint64_t size,
96 	const void *contents)
97 {
98 	EXPECT_CALL(*m_mock, process(
99 		ResultOf([=](auto in) {
100 			const char *buf = (const char*)in.body.bytes +
101 				sizeof(struct fuse_write_in);
102 
103 			return (in.header.opcode == FUSE_WRITE &&
104 				in.header.nodeid == ino &&
105 				in.body.write.offset == offset  &&
106 				in.body.write.size == size &&
107 				0 == bcmp(buf, contents, size));
108 		}, Eq(true)),
109 		_)
110 	).Times(AtMost(1))
111 	.WillRepeatedly(Invoke(
112 		ReturnImmediate([=](auto in __unused, auto& out) {
113 			SET_OUT_HEADER_LEN(out, write);
114 			out.body.write.size = size;
115 		})
116 	));
117 }
118 
119 };
120 
121 sig_atomic_t Write::s_sigxfsz = 0;
122 
123 class Write_7_8: public FuseTest {
124 
125 public:
126 virtual void SetUp() {
127 	m_kernel_minor_version = 8;
128 	FuseTest::SetUp();
129 }
130 
131 void expect_lookup(const char *relpath, uint64_t ino, uint64_t size)
132 {
133 	FuseTest::expect_lookup_7_8(relpath, ino, S_IFREG | 0644, size, 1);
134 }
135 
136 };
137 
138 class AioWrite: public Write {
139 virtual void SetUp() {
140 	if (!is_unsafe_aio_enabled())
141 		GTEST_SKIP() <<
142 			"vfs.aio.enable_unsafe must be set for this test";
143 	FuseTest::SetUp();
144 }
145 };
146 
147 /* Tests for the writeback cache mode */
148 class WriteBack: public Write {
149 public:
150 virtual void SetUp() {
151 	m_init_flags |= FUSE_WRITEBACK_CACHE;
152 	FuseTest::SetUp();
153 	if (IsSkipped())
154 		return;
155 }
156 
157 void expect_write(uint64_t ino, uint64_t offset, uint64_t isize,
158 	uint64_t osize, const void *contents)
159 {
160 	FuseTest::expect_write(ino, offset, isize, osize, FUSE_WRITE_CACHE, 0,
161 		contents);
162 }
163 };
164 
165 class WriteBackAsync: public WriteBack {
166 public:
167 virtual void SetUp() {
168 	m_async = true;
169 	WriteBack::SetUp();
170 }
171 };
172 
173 class TimeGran: public WriteBackAsync, public WithParamInterface<unsigned> {
174 public:
175 virtual void SetUp() {
176 	m_time_gran = 1 << GetParam();
177 	WriteBackAsync::SetUp();
178 }
179 };
180 
181 /* Tests for clustered writes with WriteBack cacheing */
182 class WriteCluster: public WriteBack {
183 public:
184 virtual void SetUp() {
185 	m_async = true;
186 	m_maxwrite = m_maxphys;
187 	WriteBack::SetUp();
188 	if (m_maxphys < 2 * DFLTPHYS)
189 		GTEST_SKIP() << "MAXPHYS must be at least twice DFLTPHYS"
190 			<< " for this test";
191 	if (m_maxphys < 2 * m_maxbcachebuf)
192 		GTEST_SKIP() << "MAXPHYS must be at least twice maxbcachebuf"
193 			<< " for this test";
194 }
195 };
196 
197 void sigxfsz_handler(int __unused sig) {
198 	Write::s_sigxfsz = 1;
199 }
200 
201 /* AIO writes need to set the header's pid field correctly */
202 /* https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=236379 */
203 TEST_F(AioWrite, DISABLED_aio_write)
204 {
205 	const char FULLPATH[] = "mountpoint/some_file.txt";
206 	const char RELPATH[] = "some_file.txt";
207 	const char *CONTENTS = "abcdefgh";
208 	uint64_t ino = 42;
209 	uint64_t offset = 4096;
210 	int fd;
211 	ssize_t bufsize = strlen(CONTENTS);
212 	struct aiocb iocb, *piocb;
213 
214 	expect_lookup(RELPATH, ino, 0);
215 	expect_open(ino, 0, 1);
216 	expect_write(ino, offset, bufsize, bufsize, CONTENTS);
217 
218 	fd = open(FULLPATH, O_WRONLY);
219 	EXPECT_LE(0, fd) << strerror(errno);
220 
221 	iocb.aio_nbytes = bufsize;
222 	iocb.aio_fildes = fd;
223 	iocb.aio_buf = __DECONST(void *, CONTENTS);
224 	iocb.aio_offset = offset;
225 	iocb.aio_sigevent.sigev_notify = SIGEV_NONE;
226 	ASSERT_EQ(0, aio_write(&iocb)) << strerror(errno);
227 	ASSERT_EQ(bufsize, aio_waitcomplete(&piocb, NULL)) << strerror(errno);
228 	leak(fd);
229 }
230 
231 /*
232  * When a file is opened with O_APPEND, we should forward that flag to
233  * FUSE_OPEN (tested by Open.o_append) but still attempt to calculate the
234  * offset internally.  That way we'll work both with filesystems that
235  * understand O_APPEND (and ignore the offset) and filesystems that don't (and
236  * simply use the offset).
237  *
238  * Note that verifying the O_APPEND flag in FUSE_OPEN is done in the
239  * Open.o_append test.
240  */
241 TEST_F(Write, append)
242 {
243 	const ssize_t BUFSIZE = 9;
244 	const char FULLPATH[] = "mountpoint/some_file.txt";
245 	const char RELPATH[] = "some_file.txt";
246 	const char CONTENTS[BUFSIZE] = "abcdefgh";
247 	uint64_t ino = 42;
248 	/*
249 	 * Set offset to a maxbcachebuf boundary so we don't need to RMW when
250 	 * using writeback caching
251 	 */
252 	uint64_t initial_offset = m_maxbcachebuf;
253 	int fd;
254 
255 	expect_lookup(RELPATH, ino, initial_offset);
256 	expect_open(ino, 0, 1);
257 	expect_write(ino, initial_offset, BUFSIZE, BUFSIZE, CONTENTS);
258 
259 	/* Must open O_RDWR or fuse(4) implicitly sets direct_io */
260 	fd = open(FULLPATH, O_RDWR | O_APPEND);
261 	EXPECT_LE(0, fd) << strerror(errno);
262 
263 	ASSERT_EQ(BUFSIZE, write(fd, CONTENTS, BUFSIZE)) << strerror(errno);
264 	leak(fd);
265 }
266 
267 /* If a file is cached, then appending to the end should not cause a read */
268 TEST_F(Write, append_to_cached)
269 {
270 	const ssize_t BUFSIZE = 9;
271 	const char FULLPATH[] = "mountpoint/some_file.txt";
272 	const char RELPATH[] = "some_file.txt";
273 	char *oldcontents, *oldbuf;
274 	const char CONTENTS[BUFSIZE] = "abcdefgh";
275 	uint64_t ino = 42;
276 	/*
277 	 * Set offset in between maxbcachebuf boundary to test buffer handling
278 	 */
279 	uint64_t oldsize = m_maxbcachebuf / 2;
280 	int fd;
281 
282 	oldcontents = (char*)calloc(1, oldsize);
283 	ASSERT_NE(nullptr, oldcontents) << strerror(errno);
284 	oldbuf = (char*)malloc(oldsize);
285 	ASSERT_NE(nullptr, oldbuf) << strerror(errno);
286 
287 	expect_lookup(RELPATH, ino, oldsize);
288 	expect_open(ino, 0, 1);
289 	expect_read(ino, 0, oldsize, oldsize, oldcontents);
290 	maybe_expect_write(ino, oldsize, BUFSIZE, CONTENTS);
291 
292 	/* Must open O_RDWR or fuse(4) implicitly sets direct_io */
293 	fd = open(FULLPATH, O_RDWR | O_APPEND);
294 	EXPECT_LE(0, fd) << strerror(errno);
295 
296 	/* Read the old data into the cache */
297 	ASSERT_EQ((ssize_t)oldsize, read(fd, oldbuf, oldsize))
298 		<< strerror(errno);
299 
300 	/* Write the new data.  There should be no more read operations */
301 	ASSERT_EQ(BUFSIZE, write(fd, CONTENTS, BUFSIZE)) << strerror(errno);
302 	leak(fd);
303 }
304 
305 TEST_F(Write, append_direct_io)
306 {
307 	const ssize_t BUFSIZE = 9;
308 	const char FULLPATH[] = "mountpoint/some_file.txt";
309 	const char RELPATH[] = "some_file.txt";
310 	const char CONTENTS[BUFSIZE] = "abcdefgh";
311 	uint64_t ino = 42;
312 	uint64_t initial_offset = 4096;
313 	int fd;
314 
315 	expect_lookup(RELPATH, ino, initial_offset);
316 	expect_open(ino, FOPEN_DIRECT_IO, 1);
317 	expect_write(ino, initial_offset, BUFSIZE, BUFSIZE, CONTENTS);
318 
319 	fd = open(FULLPATH, O_WRONLY | O_APPEND);
320 	EXPECT_LE(0, fd) << strerror(errno);
321 
322 	ASSERT_EQ(BUFSIZE, write(fd, CONTENTS, BUFSIZE)) << strerror(errno);
323 	leak(fd);
324 }
325 
326 /* A direct write should evict any overlapping cached data */
327 TEST_F(Write, direct_io_evicts_cache)
328 {
329 	const char FULLPATH[] = "mountpoint/some_file.txt";
330 	const char RELPATH[] = "some_file.txt";
331 	const char CONTENTS0[] = "abcdefgh";
332 	const char CONTENTS1[] = "ijklmnop";
333 	uint64_t ino = 42;
334 	int fd;
335 	ssize_t bufsize = strlen(CONTENTS0) + 1;
336 	char readbuf[bufsize];
337 
338 	expect_lookup(RELPATH, ino, bufsize);
339 	expect_open(ino, 0, 1);
340 	expect_read(ino, 0, bufsize, bufsize, CONTENTS0);
341 	expect_write(ino, 0, bufsize, bufsize, CONTENTS1);
342 
343 	fd = open(FULLPATH, O_RDWR);
344 	EXPECT_LE(0, fd) << strerror(errno);
345 
346 	// Prime cache
347 	ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno);
348 
349 	// Write directly, evicting cache
350 	ASSERT_EQ(0, fcntl(fd, F_SETFL, O_DIRECT)) << strerror(errno);
351 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
352 	ASSERT_EQ(bufsize, write(fd, CONTENTS1, bufsize)) << strerror(errno);
353 
354 	// Read again.  Cache should be bypassed
355 	expect_read(ino, 0, bufsize, bufsize, CONTENTS1);
356 	ASSERT_EQ(0, fcntl(fd, F_SETFL, 0)) << strerror(errno);
357 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
358 	ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno);
359 	ASSERT_STREQ(readbuf, CONTENTS1);
360 
361 	leak(fd);
362 }
363 
364 /*
365  * If the server doesn't return FOPEN_DIRECT_IO during FUSE_OPEN, then it's not
366  * allowed to return a short write for that file handle.  However, if it does
367  * then we should still do our darndest to handle it by resending the unwritten
368  * portion.
369  */
370 TEST_F(Write, indirect_io_short_write)
371 {
372 	const char FULLPATH[] = "mountpoint/some_file.txt";
373 	const char RELPATH[] = "some_file.txt";
374 	const char *CONTENTS = "abcdefghijklmnop";
375 	uint64_t ino = 42;
376 	int fd;
377 	ssize_t bufsize = strlen(CONTENTS);
378 	ssize_t bufsize0 = 11;
379 	ssize_t bufsize1 = strlen(CONTENTS) - bufsize0;
380 	const char *contents1 = CONTENTS + bufsize0;
381 
382 	expect_lookup(RELPATH, ino, 0);
383 	expect_open(ino, 0, 1);
384 	expect_write(ino, 0, bufsize, bufsize0, CONTENTS);
385 	expect_write(ino, bufsize0, bufsize1, bufsize1, contents1);
386 
387 	fd = open(FULLPATH, O_WRONLY);
388 	EXPECT_LE(0, fd) << strerror(errno);
389 
390 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
391 	leak(fd);
392 }
393 
394 /*
395  * When the direct_io option is used, filesystems are allowed to write less
396  * data than requested.  We should return the short write to userland.
397  */
398 TEST_F(Write, direct_io_short_write)
399 {
400 	const char FULLPATH[] = "mountpoint/some_file.txt";
401 	const char RELPATH[] = "some_file.txt";
402 	const char *CONTENTS = "abcdefghijklmnop";
403 	uint64_t ino = 42;
404 	int fd;
405 	ssize_t bufsize = strlen(CONTENTS);
406 	ssize_t halfbufsize = bufsize / 2;
407 
408 	expect_lookup(RELPATH, ino, 0);
409 	expect_open(ino, FOPEN_DIRECT_IO, 1);
410 	expect_write(ino, 0, bufsize, halfbufsize, CONTENTS);
411 
412 	fd = open(FULLPATH, O_WRONLY);
413 	EXPECT_LE(0, fd) << strerror(errno);
414 
415 	ASSERT_EQ(halfbufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
416 	leak(fd);
417 }
418 
419 /*
420  * An insidious edge case: the filesystem returns a short write, and the
421  * difference between what we requested and what it actually wrote crosses an
422  * iov element boundary
423  */
424 TEST_F(Write, direct_io_short_write_iov)
425 {
426 	const char FULLPATH[] = "mountpoint/some_file.txt";
427 	const char RELPATH[] = "some_file.txt";
428 	const char *CONTENTS0 = "abcdefgh";
429 	const char *CONTENTS1 = "ijklmnop";
430 	const char *EXPECTED0 = "abcdefghijklmnop";
431 	uint64_t ino = 42;
432 	int fd;
433 	ssize_t size0 = strlen(CONTENTS0) - 1;
434 	ssize_t size1 = strlen(CONTENTS1) + 1;
435 	ssize_t totalsize = size0 + size1;
436 	struct iovec iov[2];
437 
438 	expect_lookup(RELPATH, ino, 0);
439 	expect_open(ino, FOPEN_DIRECT_IO, 1);
440 	expect_write(ino, 0, totalsize, size0, EXPECTED0);
441 
442 	fd = open(FULLPATH, O_WRONLY);
443 	EXPECT_LE(0, fd) << strerror(errno);
444 
445 	iov[0].iov_base = __DECONST(void*, CONTENTS0);
446 	iov[0].iov_len = strlen(CONTENTS0);
447 	iov[1].iov_base = __DECONST(void*, CONTENTS1);
448 	iov[1].iov_len = strlen(CONTENTS1);
449 	ASSERT_EQ(size0, writev(fd, iov, 2)) << strerror(errno);
450 	leak(fd);
451 }
452 
453 /* fusefs should respect RLIMIT_FSIZE */
454 TEST_F(Write, rlimit_fsize)
455 {
456 	const char FULLPATH[] = "mountpoint/some_file.txt";
457 	const char RELPATH[] = "some_file.txt";
458 	const char *CONTENTS = "abcdefgh";
459 	struct rlimit rl;
460 	ssize_t bufsize = strlen(CONTENTS);
461 	off_t offset = 1'000'000'000;
462 	uint64_t ino = 42;
463 	int fd;
464 
465 	expect_lookup(RELPATH, ino, 0);
466 	expect_open(ino, 0, 1);
467 
468 	rl.rlim_cur = offset;
469 	rl.rlim_max = 10 * offset;
470 	ASSERT_EQ(0, setrlimit(RLIMIT_FSIZE, &rl)) << strerror(errno);
471 	ASSERT_NE(SIG_ERR, signal(SIGXFSZ, sigxfsz_handler)) << strerror(errno);
472 
473 	fd = open(FULLPATH, O_WRONLY);
474 
475 	EXPECT_LE(0, fd) << strerror(errno);
476 
477 	ASSERT_EQ(-1, pwrite(fd, CONTENTS, bufsize, offset));
478 	EXPECT_EQ(EFBIG, errno);
479 	EXPECT_EQ(1, s_sigxfsz);
480 	leak(fd);
481 }
482 
483 /*
484  * A short read indicates EOF.  Test that nothing bad happens if we get EOF
485  * during the R of a RMW operation.
486  */
487 TEST_F(Write, eof_during_rmw)
488 {
489 	const char FULLPATH[] = "mountpoint/some_file.txt";
490 	const char RELPATH[] = "some_file.txt";
491 	const char *CONTENTS = "abcdefgh";
492 	const char *INITIAL   = "XXXXXXXXXX";
493 	uint64_t ino = 42;
494 	uint64_t offset = 1;
495 	ssize_t bufsize = strlen(CONTENTS);
496 	off_t orig_fsize = 10;
497 	off_t truncated_fsize = 5;
498 	off_t final_fsize = bufsize;
499 	int fd;
500 
501 	FuseTest::expect_lookup(RELPATH, ino, S_IFREG | 0644, orig_fsize, 1);
502 	expect_open(ino, 0, 1);
503 	expect_read(ino, 0, orig_fsize, truncated_fsize, INITIAL, O_RDWR);
504 	expect_getattr(ino, truncated_fsize);
505 	expect_read(ino, 0, final_fsize, final_fsize, INITIAL, O_RDWR);
506 	maybe_expect_write(ino, offset, bufsize, CONTENTS);
507 
508 	fd = open(FULLPATH, O_RDWR);
509 	EXPECT_LE(0, fd) << strerror(errno);
510 
511 	ASSERT_EQ(bufsize, pwrite(fd, CONTENTS, bufsize, offset))
512 		<< strerror(errno);
513 	leak(fd);
514 }
515 
516 /*
517  * If the kernel cannot be sure which uid, gid, or pid was responsible for a
518  * write, then it must set the FUSE_WRITE_CACHE bit
519  */
520 /* https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=236378 */
521 TEST_F(Write, mmap)
522 {
523 	const char FULLPATH[] = "mountpoint/some_file.txt";
524 	const char RELPATH[] = "some_file.txt";
525 	const char *CONTENTS = "abcdefgh";
526 	uint64_t ino = 42;
527 	int fd;
528 	ssize_t bufsize = strlen(CONTENTS);
529 	void *p;
530 	uint64_t offset = 10;
531 	size_t len;
532 	void *zeros, *expected;
533 
534 	len = getpagesize();
535 
536 	zeros = calloc(1, len);
537 	ASSERT_NE(nullptr, zeros);
538 	expected = calloc(1, len);
539 	ASSERT_NE(nullptr, expected);
540 	memmove((uint8_t*)expected + offset, CONTENTS, bufsize);
541 
542 	expect_lookup(RELPATH, ino, len);
543 	expect_open(ino, 0, 1);
544 	expect_read(ino, 0, len, len, zeros);
545 	/*
546 	 * Writes from the pager may or may not be associated with the correct
547 	 * pid, so they must set FUSE_WRITE_CACHE.
548 	 */
549 	FuseTest::expect_write(ino, 0, len, len, FUSE_WRITE_CACHE, 0, expected);
550 	expect_flush(ino, 1, ReturnErrno(0));
551 	expect_release(ino, ReturnErrno(0));
552 
553 	fd = open(FULLPATH, O_RDWR);
554 	EXPECT_LE(0, fd) << strerror(errno);
555 
556 	p = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
557 	ASSERT_NE(MAP_FAILED, p) << strerror(errno);
558 
559 	memmove((uint8_t*)p + offset, CONTENTS, bufsize);
560 
561 	ASSERT_EQ(0, munmap(p, len)) << strerror(errno);
562 	close(fd);	// Write mmap'd data on close
563 
564 	free(expected);
565 	free(zeros);
566 }
567 
568 TEST_F(Write, pwrite)
569 {
570 	const char FULLPATH[] = "mountpoint/some_file.txt";
571 	const char RELPATH[] = "some_file.txt";
572 	const char *CONTENTS = "abcdefgh";
573 	uint64_t ino = 42;
574 	uint64_t offset = m_maxbcachebuf;
575 	int fd;
576 	ssize_t bufsize = strlen(CONTENTS);
577 
578 	expect_lookup(RELPATH, ino, 0);
579 	expect_open(ino, 0, 1);
580 	expect_write(ino, offset, bufsize, bufsize, CONTENTS);
581 
582 	fd = open(FULLPATH, O_WRONLY);
583 	EXPECT_LE(0, fd) << strerror(errno);
584 
585 	ASSERT_EQ(bufsize, pwrite(fd, CONTENTS, bufsize, offset))
586 		<< strerror(errno);
587 	leak(fd);
588 }
589 
590 /* Writing a file should update its cached mtime and ctime */
591 TEST_F(Write, timestamps)
592 {
593 	const char FULLPATH[] = "mountpoint/some_file.txt";
594 	const char RELPATH[] = "some_file.txt";
595 	const char *CONTENTS = "abcdefgh";
596 	ssize_t bufsize = strlen(CONTENTS);
597 	uint64_t ino = 42;
598 	struct stat sb0, sb1;
599 	int fd;
600 
601 	expect_lookup(RELPATH, ino, 0);
602 	expect_open(ino, 0, 1);
603 	maybe_expect_write(ino, 0, bufsize, CONTENTS);
604 
605 	fd = open(FULLPATH, O_RDWR);
606 	EXPECT_LE(0, fd) << strerror(errno);
607 	ASSERT_EQ(0, fstat(fd, &sb0)) << strerror(errno);
608 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
609 
610 	nap();
611 
612 	ASSERT_EQ(0, fstat(fd, &sb1)) << strerror(errno);
613 
614 	EXPECT_EQ(sb0.st_atime, sb1.st_atime);
615 	EXPECT_NE(sb0.st_mtime, sb1.st_mtime);
616 	EXPECT_NE(sb0.st_ctime, sb1.st_ctime);
617 }
618 
619 TEST_F(Write, write)
620 {
621 	const char FULLPATH[] = "mountpoint/some_file.txt";
622 	const char RELPATH[] = "some_file.txt";
623 	const char *CONTENTS = "abcdefgh";
624 	uint64_t ino = 42;
625 	int fd;
626 	ssize_t bufsize = strlen(CONTENTS);
627 
628 	expect_lookup(RELPATH, ino, 0);
629 	expect_open(ino, 0, 1);
630 	expect_write(ino, 0, bufsize, bufsize, CONTENTS);
631 
632 	fd = open(FULLPATH, O_WRONLY);
633 	EXPECT_LE(0, fd) << strerror(errno);
634 
635 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
636 	leak(fd);
637 }
638 
639 /* fuse(4) should not issue writes of greater size than the daemon requests */
640 TEST_F(Write, write_large)
641 {
642 	const char FULLPATH[] = "mountpoint/some_file.txt";
643 	const char RELPATH[] = "some_file.txt";
644 	int *contents;
645 	uint64_t ino = 42;
646 	int fd;
647 	ssize_t halfbufsize, bufsize;
648 
649 	halfbufsize = m_mock->m_maxwrite;
650 	bufsize = halfbufsize * 2;
651 	contents = (int*)malloc(bufsize);
652 	ASSERT_NE(nullptr, contents);
653 	for (int i = 0; i < (int)bufsize / (int)sizeof(i); i++) {
654 		contents[i] = i;
655 	}
656 
657 	expect_lookup(RELPATH, ino, 0);
658 	expect_open(ino, 0, 1);
659 	maybe_expect_write(ino, 0, halfbufsize, contents);
660 	maybe_expect_write(ino, halfbufsize, halfbufsize,
661 		&contents[halfbufsize / sizeof(int)]);
662 
663 	fd = open(FULLPATH, O_WRONLY);
664 	EXPECT_LE(0, fd) << strerror(errno);
665 
666 	ASSERT_EQ(bufsize, write(fd, contents, bufsize)) << strerror(errno);
667 	leak(fd);
668 
669 	free(contents);
670 }
671 
672 TEST_F(Write, write_nothing)
673 {
674 	const char FULLPATH[] = "mountpoint/some_file.txt";
675 	const char RELPATH[] = "some_file.txt";
676 	const char *CONTENTS = "";
677 	uint64_t ino = 42;
678 	int fd;
679 	ssize_t bufsize = 0;
680 
681 	expect_lookup(RELPATH, ino, 0);
682 	expect_open(ino, 0, 1);
683 
684 	fd = open(FULLPATH, O_WRONLY);
685 	EXPECT_LE(0, fd) << strerror(errno);
686 
687 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
688 	leak(fd);
689 }
690 
691 TEST_F(Write_7_8, write)
692 {
693 	const char FULLPATH[] = "mountpoint/some_file.txt";
694 	const char RELPATH[] = "some_file.txt";
695 	const char *CONTENTS = "abcdefgh";
696 	uint64_t ino = 42;
697 	int fd;
698 	ssize_t bufsize = strlen(CONTENTS);
699 
700 	expect_lookup(RELPATH, ino, 0);
701 	expect_open(ino, 0, 1);
702 	expect_write_7_8(ino, 0, bufsize, bufsize, CONTENTS);
703 
704 	fd = open(FULLPATH, O_WRONLY);
705 	EXPECT_LE(0, fd) << strerror(errno);
706 
707 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
708 	leak(fd);
709 }
710 
711 /* In writeback mode, dirty data should be written on close */
712 TEST_F(WriteBackAsync, close)
713 {
714 	const char FULLPATH[] = "mountpoint/some_file.txt";
715 	const char RELPATH[] = "some_file.txt";
716 	const char *CONTENTS = "abcdefgh";
717 	uint64_t ino = 42;
718 	int fd;
719 	ssize_t bufsize = strlen(CONTENTS);
720 
721 	expect_lookup(RELPATH, ino, 0);
722 	expect_open(ino, 0, 1);
723 	expect_write(ino, 0, bufsize, bufsize, CONTENTS);
724 	EXPECT_CALL(*m_mock, process(
725 		ResultOf([=](auto in) {
726 			return (in.header.opcode == FUSE_SETATTR);
727 		}, Eq(true)),
728 		_)
729 	).WillRepeatedly(Invoke(ReturnImmediate([=](auto i __unused, auto& out) {
730 		SET_OUT_HEADER_LEN(out, attr);
731 		out.body.attr.attr.ino = ino;	// Must match nodeid
732 	})));
733 	expect_flush(ino, 1, ReturnErrno(0));
734 	expect_release(ino, ReturnErrno(0));
735 
736 	fd = open(FULLPATH, O_RDWR);
737 	ASSERT_LE(0, fd) << strerror(errno);
738 
739 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
740 	close(fd);
741 }
742 
743 /* In writeback mode, adjacent writes will be clustered together */
744 TEST_F(WriteCluster, clustering)
745 {
746 	const char FULLPATH[] = "mountpoint/some_file.txt";
747 	const char RELPATH[] = "some_file.txt";
748 	uint64_t ino = 42;
749 	int i, fd;
750 	void *wbuf, *wbuf2x;
751 	ssize_t bufsize = m_maxbcachebuf;
752 	off_t filesize = 5 * bufsize;
753 
754 	wbuf = malloc(bufsize);
755 	ASSERT_NE(nullptr, wbuf) << strerror(errno);
756 	memset(wbuf, 'X', bufsize);
757 	wbuf2x = malloc(2 * bufsize);
758 	ASSERT_NE(nullptr, wbuf2x) << strerror(errno);
759 	memset(wbuf2x, 'X', 2 * bufsize);
760 
761 	expect_lookup(RELPATH, ino, filesize);
762 	expect_open(ino, 0, 1);
763 	/*
764 	 * Writes of bufsize-bytes each should be clustered into greater sizes.
765 	 * The amount of clustering is adaptive, so the first write actually
766 	 * issued will be 2x bufsize and subsequent writes may be larger
767 	 */
768 	expect_write(ino, 0, 2 * bufsize, 2 * bufsize, wbuf2x);
769 	expect_write(ino, 2 * bufsize, 2 * bufsize, 2 * bufsize, wbuf2x);
770 	expect_flush(ino, 1, ReturnErrno(0));
771 	expect_release(ino, ReturnErrno(0));
772 
773 	fd = open(FULLPATH, O_RDWR);
774 	ASSERT_LE(0, fd) << strerror(errno);
775 
776 	for (i = 0; i < 4; i++) {
777 		ASSERT_EQ(bufsize, write(fd, wbuf, bufsize))
778 			<< strerror(errno);
779 	}
780 	close(fd);
781 }
782 
783 /*
784  * When clustering writes, an I/O error to any of the cluster's children should
785  * not panic the system on unmount
786  */
787 /*
788  * Disabled because it panics.
789  * https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=238565
790  */
791 TEST_F(WriteCluster, DISABLED_cluster_write_err)
792 {
793 	const char FULLPATH[] = "mountpoint/some_file.txt";
794 	const char RELPATH[] = "some_file.txt";
795 	uint64_t ino = 42;
796 	int i, fd;
797 	void *wbuf;
798 	ssize_t bufsize = m_maxbcachebuf;
799 	off_t filesize = 4 * bufsize;
800 
801 	wbuf = malloc(bufsize);
802 	ASSERT_NE(nullptr, wbuf) << strerror(errno);
803 	memset(wbuf, 'X', bufsize);
804 
805 	expect_lookup(RELPATH, ino, filesize);
806 	expect_open(ino, 0, 1);
807 	EXPECT_CALL(*m_mock, process(
808 		ResultOf([=](auto in) {
809 			return (in.header.opcode == FUSE_WRITE);
810 		}, Eq(true)),
811 		_)
812 	).WillRepeatedly(Invoke(ReturnErrno(EIO)));
813 	expect_flush(ino, 1, ReturnErrno(0));
814 	expect_release(ino, ReturnErrno(0));
815 
816 	fd = open(FULLPATH, O_RDWR);
817 	ASSERT_LE(0, fd) << strerror(errno);
818 
819 	for (i = 0; i < 3; i++) {
820 		ASSERT_EQ(bufsize, write(fd, wbuf, bufsize))
821 			<< strerror(errno);
822 	}
823 	close(fd);
824 }
825 
826 /*
827  * In writeback mode, writes to an O_WRONLY file could trigger reads from the
828  * server.  The FUSE protocol explicitly allows that.
829  */
830 TEST_F(WriteBack, rmw)
831 {
832 	const char FULLPATH[] = "mountpoint/some_file.txt";
833 	const char RELPATH[] = "some_file.txt";
834 	const char *CONTENTS = "abcdefgh";
835 	const char *INITIAL   = "XXXXXXXXXX";
836 	uint64_t ino = 42;
837 	uint64_t offset = 1;
838 	off_t fsize = 10;
839 	int fd;
840 	ssize_t bufsize = strlen(CONTENTS);
841 
842 	FuseTest::expect_lookup(RELPATH, ino, S_IFREG | 0644, fsize, 1);
843 	expect_open(ino, 0, 1);
844 	expect_read(ino, 0, fsize, fsize, INITIAL, O_WRONLY);
845 	maybe_expect_write(ino, offset, bufsize, CONTENTS);
846 
847 	fd = open(FULLPATH, O_WRONLY);
848 	EXPECT_LE(0, fd) << strerror(errno);
849 
850 	ASSERT_EQ(bufsize, pwrite(fd, CONTENTS, bufsize, offset))
851 		<< strerror(errno);
852 	leak(fd);
853 }
854 
855 /*
856  * Without direct_io, writes should be committed to cache
857  */
858 TEST_F(WriteBack, cache)
859 {
860 	const char FULLPATH[] = "mountpoint/some_file.txt";
861 	const char RELPATH[] = "some_file.txt";
862 	const char *CONTENTS = "abcdefgh";
863 	uint64_t ino = 42;
864 	int fd;
865 	ssize_t bufsize = strlen(CONTENTS);
866 	char readbuf[bufsize];
867 
868 	expect_lookup(RELPATH, ino, 0);
869 	expect_open(ino, 0, 1);
870 	expect_write(ino, 0, bufsize, bufsize, CONTENTS);
871 
872 	fd = open(FULLPATH, O_RDWR);
873 	EXPECT_LE(0, fd) << strerror(errno);
874 
875 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
876 	/*
877 	 * A subsequent read should be serviced by cache, without querying the
878 	 * filesystem daemon
879 	 */
880 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
881 	ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno);
882 	leak(fd);
883 }
884 
885 /*
886  * With O_DIRECT, writes should be not committed to cache.  Admittedly this is
887  * an odd test, because it would be unusual to use O_DIRECT for writes but not
888  * reads.
889  */
890 TEST_F(WriteBack, o_direct)
891 {
892 	const char FULLPATH[] = "mountpoint/some_file.txt";
893 	const char RELPATH[] = "some_file.txt";
894 	const char *CONTENTS = "abcdefgh";
895 	uint64_t ino = 42;
896 	int fd;
897 	ssize_t bufsize = strlen(CONTENTS);
898 	char readbuf[bufsize];
899 
900 	expect_lookup(RELPATH, ino, 0);
901 	expect_open(ino, 0, 1);
902 	FuseTest::expect_write(ino, 0, bufsize, bufsize, 0, FUSE_WRITE_CACHE,
903 		CONTENTS);
904 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
905 
906 	fd = open(FULLPATH, O_RDWR | O_DIRECT);
907 	EXPECT_LE(0, fd) << strerror(errno);
908 
909 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
910 	/* A subsequent read must query the daemon because cache is empty */
911 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
912 	ASSERT_EQ(0, fcntl(fd, F_SETFL, 0)) << strerror(errno);
913 	ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno);
914 	leak(fd);
915 }
916 
917 /*
918  * When mounted with -o async, the writeback cache mode should delay writes
919  */
920 TEST_F(WriteBackAsync, delay)
921 {
922 	const char FULLPATH[] = "mountpoint/some_file.txt";
923 	const char RELPATH[] = "some_file.txt";
924 	const char *CONTENTS = "abcdefgh";
925 	uint64_t ino = 42;
926 	int fd;
927 	ssize_t bufsize = strlen(CONTENTS);
928 
929 	expect_lookup(RELPATH, ino, 0);
930 	expect_open(ino, 0, 1);
931 	/* Write should be cached, but FUSE_WRITE shouldn't be sent */
932 	EXPECT_CALL(*m_mock, process(
933 		ResultOf([=](auto in) {
934 			return (in.header.opcode == FUSE_WRITE);
935 		}, Eq(true)),
936 		_)
937 	).Times(0);
938 
939 	fd = open(FULLPATH, O_RDWR);
940 	EXPECT_LE(0, fd) << strerror(errno);
941 
942 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
943 
944 	/* Don't close the file because that would flush the cache */
945 }
946 
947 /*
948  * A direct write should not evict dirty cached data from outside of its own
949  * byte range.
950  */
951 TEST_F(WriteBackAsync, direct_io_ignores_unrelated_cached)
952 {
953 	const char FULLPATH[] = "mountpoint/some_file.txt";
954 	const char RELPATH[] = "some_file.txt";
955 	const char CONTENTS0[] = "abcdefgh";
956 	const char CONTENTS1[] = "ijklmnop";
957 	uint64_t ino = 42;
958 	int fd;
959 	ssize_t bufsize = strlen(CONTENTS0) + 1;
960 	ssize_t fsize = 2 * m_maxbcachebuf;
961 	char readbuf[bufsize];
962 	void *zeros;
963 
964 	zeros = calloc(1, m_maxbcachebuf);
965 	ASSERT_NE(nullptr, zeros);
966 
967 	expect_lookup(RELPATH, ino, fsize);
968 	expect_open(ino, 0, 1);
969 	expect_read(ino, 0, m_maxbcachebuf, m_maxbcachebuf, zeros);
970 	FuseTest::expect_write(ino, m_maxbcachebuf, bufsize, bufsize, 0, 0,
971 		CONTENTS1);
972 
973 	fd = open(FULLPATH, O_RDWR);
974 	EXPECT_LE(0, fd) << strerror(errno);
975 
976 	// Cache first block with dirty data.  This will entail first reading
977 	// the existing data.
978 	ASSERT_EQ(bufsize, pwrite(fd, CONTENTS0, bufsize, 0))
979 		<< strerror(errno);
980 
981 	// Write directly to second block
982 	ASSERT_EQ(0, fcntl(fd, F_SETFL, O_DIRECT)) << strerror(errno);
983 	ASSERT_EQ(bufsize, pwrite(fd, CONTENTS1, bufsize, m_maxbcachebuf))
984 		<< strerror(errno);
985 
986 	// Read from the first block again.  Should be serviced by cache.
987 	ASSERT_EQ(0, fcntl(fd, F_SETFL, 0)) << strerror(errno);
988 	ASSERT_EQ(bufsize, pread(fd, readbuf, bufsize, 0)) << strerror(errno);
989 	ASSERT_STREQ(readbuf, CONTENTS0);
990 
991 	leak(fd);
992 	free(zeros);
993 }
994 
995 /*
996  * If a direct io write partially overlaps one or two blocks of dirty cached
997  * data, No dirty data should be lost.  Admittedly this is a weird test,
998  * because it would be unusual to use O_DIRECT and the writeback cache.
999  */
1000 TEST_F(WriteBackAsync, direct_io_partially_overlaps_cached_block)
1001 {
1002 	const char FULLPATH[] = "mountpoint/some_file.txt";
1003 	const char RELPATH[] = "some_file.txt";
1004 	uint64_t ino = 42;
1005 	int fd;
1006 	off_t bs = m_maxbcachebuf;
1007 	ssize_t fsize = 3 * bs;
1008 	void *readbuf, *zeros, *ones, *zeroones, *onezeros;
1009 
1010 	readbuf = malloc(bs);
1011 	ASSERT_NE(nullptr, readbuf) << strerror(errno);
1012 	zeros = calloc(1, 3 * bs);
1013 	ASSERT_NE(nullptr, zeros);
1014 	ones = calloc(1, 2 * bs);
1015 	ASSERT_NE(nullptr, ones);
1016 	memset(ones, 1, 2 * bs);
1017 	zeroones = calloc(1, bs);
1018 	ASSERT_NE(nullptr, zeroones);
1019 	memset((uint8_t*)zeroones + bs / 2, 1, bs / 2);
1020 	onezeros = calloc(1, bs);
1021 	ASSERT_NE(nullptr, onezeros);
1022 	memset(onezeros, 1, bs / 2);
1023 
1024 	expect_lookup(RELPATH, ino, fsize);
1025 	expect_open(ino, 0, 1);
1026 
1027 	fd = open(FULLPATH, O_RDWR);
1028 	EXPECT_LE(0, fd) << strerror(errno);
1029 
1030 	/* Cache first and third blocks with dirty data.  */
1031 	ASSERT_EQ(3 * bs, pwrite(fd, zeros, 3 * bs, 0)) << strerror(errno);
1032 
1033 	/*
1034 	 * Write directly to all three blocks.  The partially written blocks
1035 	 * will be flushed because they're dirty.
1036 	 */
1037 	FuseTest::expect_write(ino, 0, bs, bs, 0, 0, zeros);
1038 	FuseTest::expect_write(ino, 2 * bs, bs, bs, 0, 0, zeros);
1039 	/* The direct write is split in two because of the m_maxwrite value */
1040 	FuseTest::expect_write(ino,     bs / 2, bs, bs, 0, 0, ones);
1041 	FuseTest::expect_write(ino, 3 * bs / 2, bs, bs, 0, 0, ones);
1042 	ASSERT_EQ(0, fcntl(fd, F_SETFL, O_DIRECT)) << strerror(errno);
1043 	ASSERT_EQ(2 * bs, pwrite(fd, ones, 2 * bs, bs / 2)) << strerror(errno);
1044 
1045 	/*
1046 	 * Read from both the valid and invalid portions of the first and third
1047 	 * blocks again.  This will entail FUSE_READ operations because these
1048 	 * blocks were invalidated by the direct write.
1049 	 */
1050 	expect_read(ino, 0, bs, bs, zeroones);
1051 	expect_read(ino, 2 * bs, bs, bs, onezeros);
1052 	ASSERT_EQ(0, fcntl(fd, F_SETFL, 0)) << strerror(errno);
1053 	ASSERT_EQ(bs / 2, pread(fd, readbuf, bs / 2, 0)) << strerror(errno);
1054 	EXPECT_EQ(0, memcmp(zeros, readbuf, bs / 2));
1055 	ASSERT_EQ(bs / 2, pread(fd, readbuf, bs / 2, 5 * bs / 2))
1056 		<< strerror(errno);
1057 	EXPECT_EQ(0, memcmp(zeros, readbuf, bs / 2));
1058 	ASSERT_EQ(bs / 2, pread(fd, readbuf, bs / 2, bs / 2))
1059 		<< strerror(errno);
1060 	EXPECT_EQ(0, memcmp(ones, readbuf, bs / 2));
1061 	ASSERT_EQ(bs / 2, pread(fd, readbuf, bs / 2, 2 * bs))
1062 		<< strerror(errno);
1063 	EXPECT_EQ(0, memcmp(ones, readbuf, bs / 2));
1064 
1065 	leak(fd);
1066 	free(zeroones);
1067 	free(onezeros);
1068 	free(ones);
1069 	free(zeros);
1070 	free(readbuf);
1071 }
1072 
1073 /*
1074  * In WriteBack mode, writes may be cached beyond what the server thinks is the
1075  * EOF.  In this case, a short read at EOF should _not_ cause fusefs to update
1076  * the file's size.
1077  */
1078 TEST_F(WriteBackAsync, eof)
1079 {
1080 	const char FULLPATH[] = "mountpoint/some_file.txt";
1081 	const char RELPATH[] = "some_file.txt";
1082 	const char *CONTENTS0 = "abcdefgh";
1083 	const char *CONTENTS1 = "ijklmnop";
1084 	uint64_t ino = 42;
1085 	int fd;
1086 	off_t offset = m_maxbcachebuf;
1087 	ssize_t wbufsize = strlen(CONTENTS1);
1088 	off_t old_filesize = (off_t)strlen(CONTENTS0);
1089 	ssize_t rbufsize = 2 * old_filesize;
1090 	char readbuf[rbufsize];
1091 	size_t holesize = rbufsize - old_filesize;
1092 	char hole[holesize];
1093 	struct stat sb;
1094 	ssize_t r;
1095 
1096 	expect_lookup(RELPATH, ino, 0);
1097 	expect_open(ino, 0, 1);
1098 	expect_read(ino, 0, m_maxbcachebuf, old_filesize, CONTENTS0);
1099 
1100 	fd = open(FULLPATH, O_RDWR);
1101 	EXPECT_LE(0, fd) << strerror(errno);
1102 
1103 	/* Write and cache data beyond EOF */
1104 	ASSERT_EQ(wbufsize, pwrite(fd, CONTENTS1, wbufsize, offset))
1105 		<< strerror(errno);
1106 
1107 	/* Read from the old EOF */
1108 	r = pread(fd, readbuf, rbufsize, 0);
1109 	ASSERT_LE(0, r) << strerror(errno);
1110 	EXPECT_EQ(rbufsize, r) << "read should've synthesized a hole";
1111 	EXPECT_EQ(0, memcmp(CONTENTS0, readbuf, old_filesize));
1112 	bzero(hole, holesize);
1113 	EXPECT_EQ(0, memcmp(hole, readbuf + old_filesize, holesize));
1114 
1115 	/* The file's size should still be what was established by pwrite */
1116 	ASSERT_EQ(0, fstat(fd, &sb)) << strerror(errno);
1117 	EXPECT_EQ(offset + wbufsize, sb.st_size);
1118 	leak(fd);
1119 }
1120 
1121 /*
1122  * When a file has dirty writes that haven't been flushed, the server's notion
1123  * of its mtime and ctime will be wrong.  The kernel should ignore those if it
1124  * gets them from a FUSE_GETATTR before flushing.
1125  */
1126 TEST_F(WriteBackAsync, timestamps)
1127 {
1128 	const char FULLPATH[] = "mountpoint/some_file.txt";
1129 	const char RELPATH[] = "some_file.txt";
1130 	const char *CONTENTS = "abcdefgh";
1131 	ssize_t bufsize = strlen(CONTENTS);
1132 	uint64_t ino = 42;
1133 	uint64_t attr_valid = 0;
1134 	uint64_t attr_valid_nsec = 0;
1135 	uint64_t server_time = 12345;
1136 	mode_t mode = S_IFREG | 0644;
1137 	int fd;
1138 
1139 	struct stat sb;
1140 
1141 	EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH)
1142 	.WillRepeatedly(Invoke(
1143 		ReturnImmediate([=](auto in __unused, auto& out) {
1144 		SET_OUT_HEADER_LEN(out, entry);
1145 		out.body.entry.attr.mode = mode;
1146 		out.body.entry.nodeid = ino;
1147 		out.body.entry.attr.nlink = 1;
1148 		out.body.entry.attr_valid = attr_valid;
1149 		out.body.entry.attr_valid_nsec = attr_valid_nsec;
1150 	})));
1151 	expect_open(ino, 0, 1);
1152 	EXPECT_CALL(*m_mock, process(
1153 		ResultOf([=](auto in) {
1154 			return (in.header.opcode == FUSE_GETATTR &&
1155 				in.header.nodeid == ino);
1156 		}, Eq(true)),
1157 		_)
1158 	).WillRepeatedly(Invoke(
1159 	ReturnImmediate([=](auto i __unused, auto& out) {
1160 		SET_OUT_HEADER_LEN(out, attr);
1161 		out.body.attr.attr.ino = ino;
1162 		out.body.attr.attr.mode = mode;
1163 		out.body.attr.attr_valid = attr_valid;
1164 		out.body.attr.attr_valid_nsec = attr_valid_nsec;
1165 		out.body.attr.attr.atime = server_time;
1166 		out.body.attr.attr.mtime = server_time;
1167 		out.body.attr.attr.ctime = server_time;
1168 	})));
1169 
1170 	fd = open(FULLPATH, O_RDWR);
1171 	EXPECT_LE(0, fd) << strerror(errno);
1172 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1173 
1174 	ASSERT_EQ(0, fstat(fd, &sb)) << strerror(errno);
1175 	EXPECT_EQ((time_t)server_time, sb.st_atime);
1176 	EXPECT_NE((time_t)server_time, sb.st_mtime);
1177 	EXPECT_NE((time_t)server_time, sb.st_ctime);
1178 }
1179 
1180 /* Any dirty timestamp fields should be flushed during a SETATTR */
1181 TEST_F(WriteBackAsync, timestamps_during_setattr)
1182 {
1183 	const char FULLPATH[] = "mountpoint/some_file.txt";
1184 	const char RELPATH[] = "some_file.txt";
1185 	const char *CONTENTS = "abcdefgh";
1186 	ssize_t bufsize = strlen(CONTENTS);
1187 	uint64_t ino = 42;
1188 	const mode_t newmode = 0755;
1189 	int fd;
1190 
1191 	expect_lookup(RELPATH, ino, 0);
1192 	expect_open(ino, 0, 1);
1193 	EXPECT_CALL(*m_mock, process(
1194 		ResultOf([=](auto in) {
1195 			uint32_t valid = FATTR_MODE | FATTR_MTIME | FATTR_CTIME;
1196 			return (in.header.opcode == FUSE_SETATTR &&
1197 				in.header.nodeid == ino &&
1198 				in.body.setattr.valid == valid);
1199 		}, Eq(true)),
1200 		_)
1201 	).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
1202 		SET_OUT_HEADER_LEN(out, attr);
1203 		out.body.attr.attr.ino = ino;
1204 		out.body.attr.attr.mode = S_IFREG | newmode;
1205 	})));
1206 
1207 	fd = open(FULLPATH, O_RDWR);
1208 	EXPECT_LE(0, fd) << strerror(errno);
1209 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1210 	ASSERT_EQ(0, fchmod(fd, newmode)) << strerror(errno);
1211 }
1212 
1213 /* fuse_init_out.time_gran controls the granularity of timestamps */
1214 TEST_P(TimeGran, timestamps_during_setattr)
1215 {
1216 	const char FULLPATH[] = "mountpoint/some_file.txt";
1217 	const char RELPATH[] = "some_file.txt";
1218 	const char *CONTENTS = "abcdefgh";
1219 	ssize_t bufsize = strlen(CONTENTS);
1220 	uint64_t ino = 42;
1221 	const mode_t newmode = 0755;
1222 	int fd;
1223 
1224 	expect_lookup(RELPATH, ino, 0);
1225 	expect_open(ino, 0, 1);
1226 	EXPECT_CALL(*m_mock, process(
1227 		ResultOf([=](auto in) {
1228 			uint32_t valid = FATTR_MODE | FATTR_MTIME | FATTR_CTIME;
1229 			return (in.header.opcode == FUSE_SETATTR &&
1230 				in.header.nodeid == ino &&
1231 				in.body.setattr.valid == valid &&
1232 				in.body.setattr.mtimensec % m_time_gran == 0 &&
1233 				in.body.setattr.ctimensec % m_time_gran == 0);
1234 		}, Eq(true)),
1235 		_)
1236 	).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
1237 		SET_OUT_HEADER_LEN(out, attr);
1238 		out.body.attr.attr.ino = ino;
1239 		out.body.attr.attr.mode = S_IFREG | newmode;
1240 	})));
1241 
1242 	fd = open(FULLPATH, O_RDWR);
1243 	EXPECT_LE(0, fd) << strerror(errno);
1244 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1245 	ASSERT_EQ(0, fchmod(fd, newmode)) << strerror(errno);
1246 }
1247 
1248 INSTANTIATE_TEST_CASE_P(RA, TimeGran, Range(0u, 10u));
1249 
1250 /*
1251  * Without direct_io, writes should be committed to cache
1252  */
1253 TEST_F(Write, writethrough)
1254 {
1255 	const char FULLPATH[] = "mountpoint/some_file.txt";
1256 	const char RELPATH[] = "some_file.txt";
1257 	const char *CONTENTS = "abcdefgh";
1258 	uint64_t ino = 42;
1259 	int fd;
1260 	ssize_t bufsize = strlen(CONTENTS);
1261 	char readbuf[bufsize];
1262 
1263 	expect_lookup(RELPATH, ino, 0);
1264 	expect_open(ino, 0, 1);
1265 	expect_write(ino, 0, bufsize, bufsize, CONTENTS);
1266 
1267 	fd = open(FULLPATH, O_RDWR);
1268 	EXPECT_LE(0, fd) << strerror(errno);
1269 
1270 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1271 	/*
1272 	 * A subsequent read should be serviced by cache, without querying the
1273 	 * filesystem daemon
1274 	 */
1275 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
1276 	ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno);
1277 	leak(fd);
1278 }
1279 
1280 /* Writes that extend a file should update the cached file size */
1281 TEST_F(Write, update_file_size)
1282 {
1283 	const char FULLPATH[] = "mountpoint/some_file.txt";
1284 	const char RELPATH[] = "some_file.txt";
1285 	const char *CONTENTS = "abcdefgh";
1286 	struct stat sb;
1287 	uint64_t ino = 42;
1288 	int fd;
1289 	ssize_t bufsize = strlen(CONTENTS);
1290 
1291 	expect_lookup(RELPATH, ino, 0);
1292 	expect_open(ino, 0, 1);
1293 	expect_write(ino, 0, bufsize, bufsize, CONTENTS);
1294 
1295 	fd = open(FULLPATH, O_RDWR);
1296 	EXPECT_LE(0, fd) << strerror(errno);
1297 
1298 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1299 	/* Get cached attributes */
1300 	ASSERT_EQ(0, fstat(fd, &sb)) << strerror(errno);
1301 	ASSERT_EQ(bufsize, sb.st_size);
1302 	leak(fd);
1303 }
1304