xref: /freebsd/tests/sys/fs/fusefs/write.cc (revision 10ff414c)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2019 The FreeBSD Foundation
5  *
6  * This software was developed by BFF Storage Systems, LLC under sponsorship
7  * from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  * $FreeBSD$
31  */
32 
33 extern "C" {
34 #include <sys/param.h>
35 #include <sys/mman.h>
36 #include <sys/resource.h>
37 #include <sys/stat.h>
38 #include <sys/time.h>
39 #include <sys/uio.h>
40 
41 #include <aio.h>
42 #include <fcntl.h>
43 #include <signal.h>
44 #include <unistd.h>
45 }
46 
47 #include "mockfs.hh"
48 #include "utils.hh"
49 
50 using namespace testing;
51 
52 class Write: public FuseTest {
53 
54 public:
55 static sig_atomic_t s_sigxfsz;
56 
57 void SetUp() {
58 	s_sigxfsz = 0;
59 	FuseTest::SetUp();
60 }
61 
62 void TearDown() {
63 	struct sigaction sa;
64 
65 	bzero(&sa, sizeof(sa));
66 	sa.sa_handler = SIG_DFL;
67 	sigaction(SIGXFSZ, &sa, NULL);
68 
69 	FuseTest::TearDown();
70 }
71 
72 void expect_lookup(const char *relpath, uint64_t ino, uint64_t size)
73 {
74 	FuseTest::expect_lookup(relpath, ino, S_IFREG | 0644, size, 1);
75 }
76 
77 void expect_release(uint64_t ino, ProcessMockerT r)
78 {
79 	EXPECT_CALL(*m_mock, process(
80 		ResultOf([=](auto in) {
81 			return (in.header.opcode == FUSE_RELEASE &&
82 				in.header.nodeid == ino);
83 		}, Eq(true)),
84 		_)
85 	).WillRepeatedly(Invoke(r));
86 }
87 
88 void expect_write(uint64_t ino, uint64_t offset, uint64_t isize,
89 	uint64_t osize, const void *contents)
90 {
91 	FuseTest::expect_write(ino, offset, isize, osize, 0, 0, contents);
92 }
93 
94 /* Expect a write that may or may not come, depending on the cache mode */
95 void maybe_expect_write(uint64_t ino, uint64_t offset, uint64_t size,
96 	const void *contents)
97 {
98 	EXPECT_CALL(*m_mock, process(
99 		ResultOf([=](auto in) {
100 			const char *buf = (const char*)in.body.bytes +
101 				sizeof(struct fuse_write_in);
102 
103 			return (in.header.opcode == FUSE_WRITE &&
104 				in.header.nodeid == ino &&
105 				in.body.write.offset == offset  &&
106 				in.body.write.size == size &&
107 				0 == bcmp(buf, contents, size));
108 		}, Eq(true)),
109 		_)
110 	).Times(AtMost(1))
111 	.WillRepeatedly(Invoke(
112 		ReturnImmediate([=](auto in __unused, auto& out) {
113 			SET_OUT_HEADER_LEN(out, write);
114 			out.body.write.size = size;
115 		})
116 	));
117 }
118 
119 };
120 
121 sig_atomic_t Write::s_sigxfsz = 0;
122 
123 class Write_7_8: public FuseTest {
124 
125 public:
126 virtual void SetUp() {
127 	m_kernel_minor_version = 8;
128 	FuseTest::SetUp();
129 }
130 
131 void expect_lookup(const char *relpath, uint64_t ino, uint64_t size)
132 {
133 	FuseTest::expect_lookup_7_8(relpath, ino, S_IFREG | 0644, size, 1);
134 }
135 
136 };
137 
138 class AioWrite: public Write {
139 virtual void SetUp() {
140 	if (!is_unsafe_aio_enabled())
141 		GTEST_SKIP() <<
142 			"vfs.aio.enable_unsafe must be set for this test";
143 	FuseTest::SetUp();
144 }
145 };
146 
147 /* Tests for the writeback cache mode */
148 class WriteBack: public Write {
149 public:
150 virtual void SetUp() {
151 	m_init_flags |= FUSE_WRITEBACK_CACHE;
152 	FuseTest::SetUp();
153 	if (IsSkipped())
154 		return;
155 }
156 
157 void expect_write(uint64_t ino, uint64_t offset, uint64_t isize,
158 	uint64_t osize, const void *contents)
159 {
160 	FuseTest::expect_write(ino, offset, isize, osize, FUSE_WRITE_CACHE, 0,
161 		contents);
162 }
163 };
164 
165 class WriteBackAsync: public WriteBack {
166 public:
167 virtual void SetUp() {
168 	m_async = true;
169 	m_maxwrite = 65536;
170 	WriteBack::SetUp();
171 }
172 };
173 
174 class TimeGran: public WriteBackAsync, public WithParamInterface<unsigned> {
175 public:
176 virtual void SetUp() {
177 	m_time_gran = 1 << GetParam();
178 	WriteBackAsync::SetUp();
179 }
180 };
181 
182 /* Tests for clustered writes with WriteBack cacheing */
183 class WriteCluster: public WriteBack {
184 public:
185 virtual void SetUp() {
186 	m_async = true;
187 	m_maxwrite = 1 << 25;	// Anything larger than MAXPHYS will suffice
188 	WriteBack::SetUp();
189 	if (m_maxphys < 2 * DFLTPHYS)
190 		GTEST_SKIP() << "MAXPHYS must be at least twice DFLTPHYS"
191 			<< " for this test";
192 	if (m_maxphys < 2 * m_maxbcachebuf)
193 		GTEST_SKIP() << "MAXPHYS must be at least twice maxbcachebuf"
194 			<< " for this test";
195 }
196 };
197 
198 /* Tests relating to the server's max_write property */
199 class WriteMaxWrite: public Write {
200 public:
201 virtual void SetUp() {
202 	/*
203 	 * For this test, m_maxwrite must be less than either m_maxbcachebuf or
204 	 * maxphys.
205 	 */
206 	m_maxwrite = 32768;
207 	Write::SetUp();
208 }
209 };
210 
211 class WriteEofDuringVnopStrategy: public Write, public WithParamInterface<int>
212 {};
213 
214 void sigxfsz_handler(int __unused sig) {
215 	Write::s_sigxfsz = 1;
216 }
217 
218 /* AIO writes need to set the header's pid field correctly */
219 /* https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=236379 */
220 TEST_F(AioWrite, DISABLED_aio_write)
221 {
222 	const char FULLPATH[] = "mountpoint/some_file.txt";
223 	const char RELPATH[] = "some_file.txt";
224 	const char *CONTENTS = "abcdefgh";
225 	uint64_t ino = 42;
226 	uint64_t offset = 4096;
227 	int fd;
228 	ssize_t bufsize = strlen(CONTENTS);
229 	struct aiocb iocb, *piocb;
230 
231 	expect_lookup(RELPATH, ino, 0);
232 	expect_open(ino, 0, 1);
233 	expect_write(ino, offset, bufsize, bufsize, CONTENTS);
234 
235 	fd = open(FULLPATH, O_WRONLY);
236 	ASSERT_LE(0, fd) << strerror(errno);
237 
238 	iocb.aio_nbytes = bufsize;
239 	iocb.aio_fildes = fd;
240 	iocb.aio_buf = __DECONST(void *, CONTENTS);
241 	iocb.aio_offset = offset;
242 	iocb.aio_sigevent.sigev_notify = SIGEV_NONE;
243 	ASSERT_EQ(0, aio_write(&iocb)) << strerror(errno);
244 	ASSERT_EQ(bufsize, aio_waitcomplete(&piocb, NULL)) << strerror(errno);
245 	leak(fd);
246 }
247 
248 /*
249  * When a file is opened with O_APPEND, we should forward that flag to
250  * FUSE_OPEN (tested by Open.o_append) but still attempt to calculate the
251  * offset internally.  That way we'll work both with filesystems that
252  * understand O_APPEND (and ignore the offset) and filesystems that don't (and
253  * simply use the offset).
254  *
255  * Note that verifying the O_APPEND flag in FUSE_OPEN is done in the
256  * Open.o_append test.
257  */
258 TEST_F(Write, append)
259 {
260 	const ssize_t BUFSIZE = 9;
261 	const char FULLPATH[] = "mountpoint/some_file.txt";
262 	const char RELPATH[] = "some_file.txt";
263 	const char CONTENTS[BUFSIZE] = "abcdefgh";
264 	uint64_t ino = 42;
265 	/*
266 	 * Set offset to a maxbcachebuf boundary so we don't need to RMW when
267 	 * using writeback caching
268 	 */
269 	uint64_t initial_offset = m_maxbcachebuf;
270 	int fd;
271 
272 	expect_lookup(RELPATH, ino, initial_offset);
273 	expect_open(ino, 0, 1);
274 	expect_write(ino, initial_offset, BUFSIZE, BUFSIZE, CONTENTS);
275 
276 	/* Must open O_RDWR or fuse(4) implicitly sets direct_io */
277 	fd = open(FULLPATH, O_RDWR | O_APPEND);
278 	ASSERT_LE(0, fd) << strerror(errno);
279 
280 	ASSERT_EQ(BUFSIZE, write(fd, CONTENTS, BUFSIZE)) << strerror(errno);
281 	leak(fd);
282 }
283 
284 /* If a file is cached, then appending to the end should not cause a read */
285 TEST_F(Write, append_to_cached)
286 {
287 	const ssize_t BUFSIZE = 9;
288 	const char FULLPATH[] = "mountpoint/some_file.txt";
289 	const char RELPATH[] = "some_file.txt";
290 	char *oldcontents, *oldbuf;
291 	const char CONTENTS[BUFSIZE] = "abcdefgh";
292 	uint64_t ino = 42;
293 	/*
294 	 * Set offset in between maxbcachebuf boundary to test buffer handling
295 	 */
296 	uint64_t oldsize = m_maxbcachebuf / 2;
297 	int fd;
298 
299 	oldcontents = (char*)calloc(1, oldsize);
300 	ASSERT_NE(nullptr, oldcontents) << strerror(errno);
301 	oldbuf = (char*)malloc(oldsize);
302 	ASSERT_NE(nullptr, oldbuf) << strerror(errno);
303 
304 	expect_lookup(RELPATH, ino, oldsize);
305 	expect_open(ino, 0, 1);
306 	expect_read(ino, 0, oldsize, oldsize, oldcontents);
307 	maybe_expect_write(ino, oldsize, BUFSIZE, CONTENTS);
308 
309 	/* Must open O_RDWR or fuse(4) implicitly sets direct_io */
310 	fd = open(FULLPATH, O_RDWR | O_APPEND);
311 	ASSERT_LE(0, fd) << strerror(errno);
312 
313 	/* Read the old data into the cache */
314 	ASSERT_EQ((ssize_t)oldsize, read(fd, oldbuf, oldsize))
315 		<< strerror(errno);
316 
317 	/* Write the new data.  There should be no more read operations */
318 	ASSERT_EQ(BUFSIZE, write(fd, CONTENTS, BUFSIZE)) << strerror(errno);
319 	leak(fd);
320 	free(oldbuf);
321 	free(oldcontents);
322 }
323 
324 TEST_F(Write, append_direct_io)
325 {
326 	const ssize_t BUFSIZE = 9;
327 	const char FULLPATH[] = "mountpoint/some_file.txt";
328 	const char RELPATH[] = "some_file.txt";
329 	const char CONTENTS[BUFSIZE] = "abcdefgh";
330 	uint64_t ino = 42;
331 	uint64_t initial_offset = 4096;
332 	int fd;
333 
334 	expect_lookup(RELPATH, ino, initial_offset);
335 	expect_open(ino, FOPEN_DIRECT_IO, 1);
336 	expect_write(ino, initial_offset, BUFSIZE, BUFSIZE, CONTENTS);
337 
338 	fd = open(FULLPATH, O_WRONLY | O_APPEND);
339 	ASSERT_LE(0, fd) << strerror(errno);
340 
341 	ASSERT_EQ(BUFSIZE, write(fd, CONTENTS, BUFSIZE)) << strerror(errno);
342 	leak(fd);
343 }
344 
345 /* A direct write should evict any overlapping cached data */
346 TEST_F(Write, direct_io_evicts_cache)
347 {
348 	const char FULLPATH[] = "mountpoint/some_file.txt";
349 	const char RELPATH[] = "some_file.txt";
350 	const char CONTENTS0[] = "abcdefgh";
351 	const char CONTENTS1[] = "ijklmnop";
352 	uint64_t ino = 42;
353 	int fd;
354 	ssize_t bufsize = strlen(CONTENTS0) + 1;
355 	char readbuf[bufsize];
356 
357 	expect_lookup(RELPATH, ino, bufsize);
358 	expect_open(ino, 0, 1);
359 	expect_read(ino, 0, bufsize, bufsize, CONTENTS0);
360 	expect_write(ino, 0, bufsize, bufsize, CONTENTS1);
361 
362 	fd = open(FULLPATH, O_RDWR);
363 	ASSERT_LE(0, fd) << strerror(errno);
364 
365 	// Prime cache
366 	ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno);
367 
368 	// Write directly, evicting cache
369 	ASSERT_EQ(0, fcntl(fd, F_SETFL, O_DIRECT)) << strerror(errno);
370 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
371 	ASSERT_EQ(bufsize, write(fd, CONTENTS1, bufsize)) << strerror(errno);
372 
373 	// Read again.  Cache should be bypassed
374 	expect_read(ino, 0, bufsize, bufsize, CONTENTS1);
375 	ASSERT_EQ(0, fcntl(fd, F_SETFL, 0)) << strerror(errno);
376 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
377 	ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno);
378 	ASSERT_STREQ(readbuf, CONTENTS1);
379 
380 	leak(fd);
381 }
382 
383 /*
384  * If the server doesn't return FOPEN_DIRECT_IO during FUSE_OPEN, then it's not
385  * allowed to return a short write for that file handle.  However, if it does
386  * then we should still do our darndest to handle it by resending the unwritten
387  * portion.
388  */
389 TEST_F(Write, indirect_io_short_write)
390 {
391 	const char FULLPATH[] = "mountpoint/some_file.txt";
392 	const char RELPATH[] = "some_file.txt";
393 	const char *CONTENTS = "abcdefghijklmnop";
394 	uint64_t ino = 42;
395 	int fd;
396 	ssize_t bufsize = strlen(CONTENTS);
397 	ssize_t bufsize0 = 11;
398 	ssize_t bufsize1 = strlen(CONTENTS) - bufsize0;
399 	const char *contents1 = CONTENTS + bufsize0;
400 
401 	expect_lookup(RELPATH, ino, 0);
402 	expect_open(ino, 0, 1);
403 	expect_write(ino, 0, bufsize, bufsize0, CONTENTS);
404 	expect_write(ino, bufsize0, bufsize1, bufsize1, contents1);
405 
406 	fd = open(FULLPATH, O_WRONLY);
407 	ASSERT_LE(0, fd) << strerror(errno);
408 
409 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
410 	leak(fd);
411 }
412 
413 /*
414  * When the direct_io option is used, filesystems are allowed to write less
415  * data than requested.  We should return the short write to userland.
416  */
417 TEST_F(Write, direct_io_short_write)
418 {
419 	const char FULLPATH[] = "mountpoint/some_file.txt";
420 	const char RELPATH[] = "some_file.txt";
421 	const char *CONTENTS = "abcdefghijklmnop";
422 	uint64_t ino = 42;
423 	int fd;
424 	ssize_t bufsize = strlen(CONTENTS);
425 	ssize_t halfbufsize = bufsize / 2;
426 
427 	expect_lookup(RELPATH, ino, 0);
428 	expect_open(ino, FOPEN_DIRECT_IO, 1);
429 	expect_write(ino, 0, bufsize, halfbufsize, CONTENTS);
430 
431 	fd = open(FULLPATH, O_WRONLY);
432 	ASSERT_LE(0, fd) << strerror(errno);
433 
434 	ASSERT_EQ(halfbufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
435 	leak(fd);
436 }
437 
438 /*
439  * An insidious edge case: the filesystem returns a short write, and the
440  * difference between what we requested and what it actually wrote crosses an
441  * iov element boundary
442  */
443 TEST_F(Write, direct_io_short_write_iov)
444 {
445 	const char FULLPATH[] = "mountpoint/some_file.txt";
446 	const char RELPATH[] = "some_file.txt";
447 	const char *CONTENTS0 = "abcdefgh";
448 	const char *CONTENTS1 = "ijklmnop";
449 	const char *EXPECTED0 = "abcdefghijklmnop";
450 	uint64_t ino = 42;
451 	int fd;
452 	ssize_t size0 = strlen(CONTENTS0) - 1;
453 	ssize_t size1 = strlen(CONTENTS1) + 1;
454 	ssize_t totalsize = size0 + size1;
455 	struct iovec iov[2];
456 
457 	expect_lookup(RELPATH, ino, 0);
458 	expect_open(ino, FOPEN_DIRECT_IO, 1);
459 	expect_write(ino, 0, totalsize, size0, EXPECTED0);
460 
461 	fd = open(FULLPATH, O_WRONLY);
462 	ASSERT_LE(0, fd) << strerror(errno);
463 
464 	iov[0].iov_base = __DECONST(void*, CONTENTS0);
465 	iov[0].iov_len = strlen(CONTENTS0);
466 	iov[1].iov_base = __DECONST(void*, CONTENTS1);
467 	iov[1].iov_len = strlen(CONTENTS1);
468 	ASSERT_EQ(size0, writev(fd, iov, 2)) << strerror(errno);
469 	leak(fd);
470 }
471 
472 /* fusefs should respect RLIMIT_FSIZE */
473 TEST_F(Write, rlimit_fsize)
474 {
475 	const char FULLPATH[] = "mountpoint/some_file.txt";
476 	const char RELPATH[] = "some_file.txt";
477 	const char *CONTENTS = "abcdefgh";
478 	struct rlimit rl;
479 	ssize_t bufsize = strlen(CONTENTS);
480 	off_t offset = 1'000'000'000;
481 	uint64_t ino = 42;
482 	int fd;
483 
484 	expect_lookup(RELPATH, ino, 0);
485 	expect_open(ino, 0, 1);
486 
487 	rl.rlim_cur = offset;
488 	rl.rlim_max = 10 * offset;
489 	ASSERT_EQ(0, setrlimit(RLIMIT_FSIZE, &rl)) << strerror(errno);
490 	ASSERT_NE(SIG_ERR, signal(SIGXFSZ, sigxfsz_handler)) << strerror(errno);
491 
492 	fd = open(FULLPATH, O_WRONLY);
493 
494 	ASSERT_LE(0, fd) << strerror(errno);
495 
496 	ASSERT_EQ(-1, pwrite(fd, CONTENTS, bufsize, offset));
497 	EXPECT_EQ(EFBIG, errno);
498 	EXPECT_EQ(1, s_sigxfsz);
499 	leak(fd);
500 }
501 
502 /*
503  * A short read indicates EOF.  Test that nothing bad happens if we get EOF
504  * during the R of a RMW operation.
505  */
506 TEST_F(Write, eof_during_rmw)
507 {
508 	const char FULLPATH[] = "mountpoint/some_file.txt";
509 	const char RELPATH[] = "some_file.txt";
510 	const char *CONTENTS = "abcdefgh";
511 	const char *INITIAL   = "XXXXXXXXXX";
512 	uint64_t ino = 42;
513 	uint64_t offset = 1;
514 	ssize_t bufsize = strlen(CONTENTS) + 1;
515 	off_t orig_fsize = 10;
516 	off_t truncated_fsize = 5;
517 	int fd;
518 
519 	FuseTest::expect_lookup(RELPATH, ino, S_IFREG | 0644, orig_fsize, 1);
520 	expect_open(ino, 0, 1);
521 	expect_read(ino, 0, orig_fsize, truncated_fsize, INITIAL, O_RDWR);
522 	maybe_expect_write(ino, offset, bufsize, CONTENTS);
523 
524 	fd = open(FULLPATH, O_RDWR);
525 	ASSERT_LE(0, fd) << strerror(errno);
526 
527 	ASSERT_EQ(bufsize, pwrite(fd, CONTENTS, bufsize, offset))
528 		<< strerror(errno);
529 	leak(fd);
530 }
531 
532 /*
533  * VOP_STRATEGY should not query the server for the file's size, even if its
534  * cached attributes have expired.
535  * Regression test for https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=256937
536  */
537 TEST_P(WriteEofDuringVnopStrategy, eof_during_vop_strategy)
538 {
539 	const char FULLPATH[] = "mountpoint/some_file.txt";
540 	const char RELPATH[] = "some_file.txt";
541 	Sequence seq;
542 	const off_t filesize = 2 * m_maxbcachebuf;
543 	void *contents;
544 	uint64_t ino = 42;
545 	uint64_t attr_valid = 0;
546 	uint64_t attr_valid_nsec = 0;
547 	mode_t mode = S_IFREG | 0644;
548 	int fd;
549 	int ngetattrs;
550 
551 	ngetattrs = GetParam();
552 	contents = calloc(1, filesize);
553 
554 	EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH)
555 	.WillRepeatedly(Invoke(
556 		ReturnImmediate([=](auto in __unused, auto& out) {
557 		SET_OUT_HEADER_LEN(out, entry);
558 		out.body.entry.attr.mode = mode;
559 		out.body.entry.nodeid = ino;
560 		out.body.entry.attr.nlink = 1;
561 		out.body.entry.attr.size = filesize;
562 		out.body.entry.attr_valid = attr_valid;
563 		out.body.entry.attr_valid_nsec = attr_valid_nsec;
564 	})));
565 	expect_open(ino, 0, 1);
566 	EXPECT_CALL(*m_mock, process(
567 		ResultOf([=](auto in) {
568 			return (in.header.opcode == FUSE_GETATTR &&
569 				in.header.nodeid == ino);
570 		}, Eq(true)),
571 		_)
572 	).Times(Between(ngetattrs - 1, ngetattrs))
573 	.InSequence(seq)
574 	.WillRepeatedly(Invoke(ReturnImmediate([=](auto i __unused, auto& out) {
575 		SET_OUT_HEADER_LEN(out, attr);
576 		out.body.attr.attr.ino = ino;
577 		out.body.attr.attr.mode = mode;
578 		out.body.attr.attr_valid = attr_valid;
579 		out.body.attr.attr_valid_nsec = attr_valid_nsec;
580 		out.body.attr.attr.size = filesize;
581 	})));
582 	EXPECT_CALL(*m_mock, process(
583 		ResultOf([=](auto in) {
584 			return (in.header.opcode == FUSE_GETATTR &&
585 				in.header.nodeid == ino);
586 		}, Eq(true)),
587 		_)
588 	).InSequence(seq)
589 	.WillRepeatedly(Invoke(ReturnImmediate([=](auto i __unused, auto& out) {
590 		SET_OUT_HEADER_LEN(out, attr);
591 		out.body.attr.attr.ino = ino;
592 		out.body.attr.attr.mode = mode;
593 		out.body.attr.attr_valid = attr_valid;
594 		out.body.attr.attr_valid_nsec = attr_valid_nsec;
595 		out.body.attr.attr.size = filesize / 2;
596 	})));
597 	expect_write(ino, 0, filesize / 2, filesize / 2, contents);
598 
599 	fd = open(FULLPATH, O_RDWR);
600 	ASSERT_LE(0, fd) << strerror(errno);
601 	ASSERT_EQ(filesize / 2, write(fd, contents, filesize / 2))
602 		<< strerror(errno);
603 
604 }
605 
606 INSTANTIATE_TEST_CASE_P(W, WriteEofDuringVnopStrategy,
607 	Values(1, 2, 3)
608 );
609 
610 /*
611  * If the kernel cannot be sure which uid, gid, or pid was responsible for a
612  * write, then it must set the FUSE_WRITE_CACHE bit
613  */
614 /* https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=236378 */
615 TEST_F(Write, mmap)
616 {
617 	const char FULLPATH[] = "mountpoint/some_file.txt";
618 	const char RELPATH[] = "some_file.txt";
619 	const char *CONTENTS = "abcdefgh";
620 	uint64_t ino = 42;
621 	int fd;
622 	ssize_t bufsize = strlen(CONTENTS);
623 	void *p;
624 	uint64_t offset = 10;
625 	size_t len;
626 	void *zeros, *expected;
627 
628 	len = getpagesize();
629 
630 	zeros = calloc(1, len);
631 	ASSERT_NE(nullptr, zeros);
632 	expected = calloc(1, len);
633 	ASSERT_NE(nullptr, expected);
634 	memmove((uint8_t*)expected + offset, CONTENTS, bufsize);
635 
636 	expect_lookup(RELPATH, ino, len);
637 	expect_open(ino, 0, 1);
638 	expect_read(ino, 0, len, len, zeros);
639 	/*
640 	 * Writes from the pager may or may not be associated with the correct
641 	 * pid, so they must set FUSE_WRITE_CACHE.
642 	 */
643 	FuseTest::expect_write(ino, 0, len, len, FUSE_WRITE_CACHE, 0, expected);
644 	expect_flush(ino, 1, ReturnErrno(0));
645 	expect_release(ino, ReturnErrno(0));
646 
647 	fd = open(FULLPATH, O_RDWR);
648 	ASSERT_LE(0, fd) << strerror(errno);
649 
650 	p = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
651 	ASSERT_NE(MAP_FAILED, p) << strerror(errno);
652 
653 	memmove((uint8_t*)p + offset, CONTENTS, bufsize);
654 
655 	ASSERT_EQ(0, munmap(p, len)) << strerror(errno);
656 	close(fd);	// Write mmap'd data on close
657 
658 	free(expected);
659 	free(zeros);
660 
661 	leak(fd);
662 }
663 
664 TEST_F(Write, pwrite)
665 {
666 	const char FULLPATH[] = "mountpoint/some_file.txt";
667 	const char RELPATH[] = "some_file.txt";
668 	const char *CONTENTS = "abcdefgh";
669 	uint64_t ino = 42;
670 	uint64_t offset = m_maxbcachebuf;
671 	int fd;
672 	ssize_t bufsize = strlen(CONTENTS);
673 
674 	expect_lookup(RELPATH, ino, 0);
675 	expect_open(ino, 0, 1);
676 	expect_write(ino, offset, bufsize, bufsize, CONTENTS);
677 
678 	fd = open(FULLPATH, O_WRONLY);
679 	ASSERT_LE(0, fd) << strerror(errno);
680 
681 	ASSERT_EQ(bufsize, pwrite(fd, CONTENTS, bufsize, offset))
682 		<< strerror(errno);
683 	leak(fd);
684 }
685 
686 /* Writing a file should update its cached mtime and ctime */
687 TEST_F(Write, timestamps)
688 {
689 	const char FULLPATH[] = "mountpoint/some_file.txt";
690 	const char RELPATH[] = "some_file.txt";
691 	const char *CONTENTS = "abcdefgh";
692 	ssize_t bufsize = strlen(CONTENTS);
693 	uint64_t ino = 42;
694 	struct stat sb0, sb1;
695 	int fd;
696 
697 	expect_lookup(RELPATH, ino, 0);
698 	expect_open(ino, 0, 1);
699 	maybe_expect_write(ino, 0, bufsize, CONTENTS);
700 
701 	fd = open(FULLPATH, O_RDWR);
702 	ASSERT_LE(0, fd) << strerror(errno);
703 	ASSERT_EQ(0, fstat(fd, &sb0)) << strerror(errno);
704 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
705 
706 	nap();
707 
708 	ASSERT_EQ(0, fstat(fd, &sb1)) << strerror(errno);
709 
710 	EXPECT_EQ(sb0.st_atime, sb1.st_atime);
711 	EXPECT_NE(sb0.st_mtime, sb1.st_mtime);
712 	EXPECT_NE(sb0.st_ctime, sb1.st_ctime);
713 
714 	leak(fd);
715 }
716 
717 TEST_F(Write, write)
718 {
719 	const char FULLPATH[] = "mountpoint/some_file.txt";
720 	const char RELPATH[] = "some_file.txt";
721 	const char *CONTENTS = "abcdefgh";
722 	uint64_t ino = 42;
723 	int fd;
724 	ssize_t bufsize = strlen(CONTENTS);
725 
726 	expect_lookup(RELPATH, ino, 0);
727 	expect_open(ino, 0, 1);
728 	expect_write(ino, 0, bufsize, bufsize, CONTENTS);
729 
730 	fd = open(FULLPATH, O_WRONLY);
731 	ASSERT_LE(0, fd) << strerror(errno);
732 
733 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
734 	leak(fd);
735 }
736 
737 /* fuse(4) should not issue writes of greater size than the daemon requests */
738 TEST_F(WriteMaxWrite, write)
739 {
740 	const char FULLPATH[] = "mountpoint/some_file.txt";
741 	const char RELPATH[] = "some_file.txt";
742 	int *contents;
743 	uint64_t ino = 42;
744 	int fd;
745 	ssize_t halfbufsize, bufsize;
746 
747 	halfbufsize = m_mock->m_maxwrite;
748 	if (halfbufsize >= m_maxbcachebuf || halfbufsize >= m_maxphys)
749 		GTEST_SKIP() << "Must lower m_maxwrite for this test";
750 	bufsize = halfbufsize * 2;
751 	contents = (int*)malloc(bufsize);
752 	ASSERT_NE(nullptr, contents);
753 	for (int i = 0; i < (int)bufsize / (int)sizeof(i); i++) {
754 		contents[i] = i;
755 	}
756 
757 	expect_lookup(RELPATH, ino, 0);
758 	expect_open(ino, 0, 1);
759 	maybe_expect_write(ino, 0, halfbufsize, contents);
760 	maybe_expect_write(ino, halfbufsize, halfbufsize,
761 		&contents[halfbufsize / sizeof(int)]);
762 
763 	fd = open(FULLPATH, O_WRONLY);
764 	ASSERT_LE(0, fd) << strerror(errno);
765 
766 	ASSERT_EQ(bufsize, write(fd, contents, bufsize)) << strerror(errno);
767 	leak(fd);
768 
769 	free(contents);
770 }
771 
772 TEST_F(Write, write_nothing)
773 {
774 	const char FULLPATH[] = "mountpoint/some_file.txt";
775 	const char RELPATH[] = "some_file.txt";
776 	const char *CONTENTS = "";
777 	uint64_t ino = 42;
778 	int fd;
779 	ssize_t bufsize = 0;
780 
781 	expect_lookup(RELPATH, ino, 0);
782 	expect_open(ino, 0, 1);
783 
784 	fd = open(FULLPATH, O_WRONLY);
785 	ASSERT_LE(0, fd) << strerror(errno);
786 
787 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
788 	leak(fd);
789 }
790 
791 TEST_F(Write_7_8, write)
792 {
793 	const char FULLPATH[] = "mountpoint/some_file.txt";
794 	const char RELPATH[] = "some_file.txt";
795 	const char *CONTENTS = "abcdefgh";
796 	uint64_t ino = 42;
797 	int fd;
798 	ssize_t bufsize = strlen(CONTENTS);
799 
800 	expect_lookup(RELPATH, ino, 0);
801 	expect_open(ino, 0, 1);
802 	expect_write_7_8(ino, 0, bufsize, bufsize, CONTENTS);
803 
804 	fd = open(FULLPATH, O_WRONLY);
805 	ASSERT_LE(0, fd) << strerror(errno);
806 
807 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
808 	leak(fd);
809 }
810 
811 /* In writeback mode, dirty data should be written on close */
812 TEST_F(WriteBackAsync, close)
813 {
814 	const char FULLPATH[] = "mountpoint/some_file.txt";
815 	const char RELPATH[] = "some_file.txt";
816 	const char *CONTENTS = "abcdefgh";
817 	uint64_t ino = 42;
818 	int fd;
819 	ssize_t bufsize = strlen(CONTENTS);
820 
821 	expect_lookup(RELPATH, ino, 0);
822 	expect_open(ino, 0, 1);
823 	expect_write(ino, 0, bufsize, bufsize, CONTENTS);
824 	EXPECT_CALL(*m_mock, process(
825 		ResultOf([=](auto in) {
826 			return (in.header.opcode == FUSE_SETATTR);
827 		}, Eq(true)),
828 		_)
829 	).WillRepeatedly(Invoke(ReturnImmediate([=](auto i __unused, auto& out) {
830 		SET_OUT_HEADER_LEN(out, attr);
831 		out.body.attr.attr.ino = ino;	// Must match nodeid
832 	})));
833 	expect_flush(ino, 1, ReturnErrno(0));
834 	expect_release(ino, ReturnErrno(0));
835 
836 	fd = open(FULLPATH, O_RDWR);
837 	ASSERT_LE(0, fd) << strerror(errno);
838 
839 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
840 	close(fd);
841 }
842 
843 /* In writeback mode, adjacent writes will be clustered together */
844 TEST_F(WriteCluster, clustering)
845 {
846 	const char FULLPATH[] = "mountpoint/some_file.txt";
847 	const char RELPATH[] = "some_file.txt";
848 	uint64_t ino = 42;
849 	int i, fd;
850 	void *wbuf, *wbuf2x;
851 	ssize_t bufsize = m_maxbcachebuf;
852 	off_t filesize = 5 * bufsize;
853 
854 	wbuf = malloc(bufsize);
855 	ASSERT_NE(nullptr, wbuf) << strerror(errno);
856 	memset(wbuf, 'X', bufsize);
857 	wbuf2x = malloc(2 * bufsize);
858 	ASSERT_NE(nullptr, wbuf2x) << strerror(errno);
859 	memset(wbuf2x, 'X', 2 * bufsize);
860 
861 	expect_lookup(RELPATH, ino, filesize);
862 	expect_open(ino, 0, 1);
863 	/*
864 	 * Writes of bufsize-bytes each should be clustered into greater sizes.
865 	 * The amount of clustering is adaptive, so the first write actually
866 	 * issued will be 2x bufsize and subsequent writes may be larger
867 	 */
868 	expect_write(ino, 0, 2 * bufsize, 2 * bufsize, wbuf2x);
869 	expect_write(ino, 2 * bufsize, 2 * bufsize, 2 * bufsize, wbuf2x);
870 	expect_flush(ino, 1, ReturnErrno(0));
871 	expect_release(ino, ReturnErrno(0));
872 
873 	fd = open(FULLPATH, O_RDWR);
874 	ASSERT_LE(0, fd) << strerror(errno);
875 
876 	for (i = 0; i < 4; i++) {
877 		ASSERT_EQ(bufsize, write(fd, wbuf, bufsize))
878 			<< strerror(errno);
879 	}
880 	close(fd);
881 	free(wbuf2x);
882 	free(wbuf);
883 }
884 
885 /*
886  * When clustering writes, an I/O error to any of the cluster's children should
887  * not panic the system on unmount
888  */
889 /*
890  * Regression test for bug 238585
891  * https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=238565
892  */
893 TEST_F(WriteCluster, cluster_write_err)
894 {
895 	const char FULLPATH[] = "mountpoint/some_file.txt";
896 	const char RELPATH[] = "some_file.txt";
897 	uint64_t ino = 42;
898 	int i, fd;
899 	void *wbuf;
900 	ssize_t bufsize = m_maxbcachebuf;
901 	off_t filesize = 4 * bufsize;
902 
903 	wbuf = malloc(bufsize);
904 	ASSERT_NE(nullptr, wbuf) << strerror(errno);
905 	memset(wbuf, 'X', bufsize);
906 
907 	expect_lookup(RELPATH, ino, filesize);
908 	expect_open(ino, 0, 1);
909 	EXPECT_CALL(*m_mock, process(
910 		ResultOf([=](auto in) {
911 			return (in.header.opcode == FUSE_WRITE);
912 		}, Eq(true)),
913 		_)
914 	).WillRepeatedly(Invoke(ReturnErrno(EIO)));
915 	expect_flush(ino, 1, ReturnErrno(0));
916 	expect_release(ino, ReturnErrno(0));
917 
918 	fd = open(FULLPATH, O_RDWR);
919 	ASSERT_LE(0, fd) << strerror(errno);
920 
921 	for (i = 0; i < 3; i++) {
922 		ASSERT_EQ(bufsize, write(fd, wbuf, bufsize))
923 			<< strerror(errno);
924 	}
925 	close(fd);
926 	free(wbuf);
927 }
928 
929 /*
930  * In writeback mode, writes to an O_WRONLY file could trigger reads from the
931  * server.  The FUSE protocol explicitly allows that.
932  */
933 TEST_F(WriteBack, rmw)
934 {
935 	const char FULLPATH[] = "mountpoint/some_file.txt";
936 	const char RELPATH[] = "some_file.txt";
937 	const char *CONTENTS = "abcdefgh";
938 	const char *INITIAL   = "XXXXXXXXXX";
939 	uint64_t ino = 42;
940 	uint64_t offset = 1;
941 	off_t fsize = 10;
942 	int fd;
943 	ssize_t bufsize = strlen(CONTENTS);
944 
945 	FuseTest::expect_lookup(RELPATH, ino, S_IFREG | 0644, fsize, 1);
946 	expect_open(ino, 0, 1);
947 	expect_read(ino, 0, fsize, fsize, INITIAL, O_WRONLY);
948 	maybe_expect_write(ino, offset, bufsize, CONTENTS);
949 
950 	fd = open(FULLPATH, O_WRONLY);
951 	ASSERT_LE(0, fd) << strerror(errno);
952 
953 	ASSERT_EQ(bufsize, pwrite(fd, CONTENTS, bufsize, offset))
954 		<< strerror(errno);
955 	leak(fd);
956 }
957 
958 /*
959  * Without direct_io, writes should be committed to cache
960  */
961 TEST_F(WriteBack, cache)
962 {
963 	const char FULLPATH[] = "mountpoint/some_file.txt";
964 	const char RELPATH[] = "some_file.txt";
965 	const char *CONTENTS = "abcdefgh";
966 	uint64_t ino = 42;
967 	int fd;
968 	ssize_t bufsize = strlen(CONTENTS);
969 	uint8_t readbuf[bufsize];
970 
971 	expect_lookup(RELPATH, ino, 0);
972 	expect_open(ino, 0, 1);
973 	expect_write(ino, 0, bufsize, bufsize, CONTENTS);
974 
975 	fd = open(FULLPATH, O_RDWR);
976 	ASSERT_LE(0, fd) << strerror(errno);
977 
978 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
979 	/*
980 	 * A subsequent read should be serviced by cache, without querying the
981 	 * filesystem daemon
982 	 */
983 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
984 	ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno);
985 	leak(fd);
986 }
987 
988 /*
989  * With O_DIRECT, writes should be not committed to cache.  Admittedly this is
990  * an odd test, because it would be unusual to use O_DIRECT for writes but not
991  * reads.
992  */
993 TEST_F(WriteBack, o_direct)
994 {
995 	const char FULLPATH[] = "mountpoint/some_file.txt";
996 	const char RELPATH[] = "some_file.txt";
997 	const char *CONTENTS = "abcdefgh";
998 	uint64_t ino = 42;
999 	int fd;
1000 	ssize_t bufsize = strlen(CONTENTS);
1001 	uint8_t readbuf[bufsize];
1002 
1003 	expect_lookup(RELPATH, ino, 0);
1004 	expect_open(ino, 0, 1);
1005 	FuseTest::expect_write(ino, 0, bufsize, bufsize, 0, FUSE_WRITE_CACHE,
1006 		CONTENTS);
1007 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
1008 
1009 	fd = open(FULLPATH, O_RDWR | O_DIRECT);
1010 	ASSERT_LE(0, fd) << strerror(errno);
1011 
1012 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1013 	/* A subsequent read must query the daemon because cache is empty */
1014 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
1015 	ASSERT_EQ(0, fcntl(fd, F_SETFL, 0)) << strerror(errno);
1016 	ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno);
1017 	leak(fd);
1018 }
1019 
1020 TEST_F(WriteBack, direct_io)
1021 {
1022 	const char FULLPATH[] = "mountpoint/some_file.txt";
1023 	const char RELPATH[] = "some_file.txt";
1024 	const char *CONTENTS = "abcdefgh";
1025 	uint64_t ino = 42;
1026 	int fd;
1027 	ssize_t bufsize = strlen(CONTENTS);
1028 	uint8_t readbuf[bufsize];
1029 
1030 	expect_lookup(RELPATH, ino, 0);
1031 	expect_open(ino, FOPEN_DIRECT_IO, 1);
1032 	FuseTest::expect_write(ino, 0, bufsize, bufsize, 0, FUSE_WRITE_CACHE,
1033 		CONTENTS);
1034 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
1035 
1036 	fd = open(FULLPATH, O_RDWR);
1037 	ASSERT_LE(0, fd) << strerror(errno);
1038 
1039 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1040 	/* A subsequent read must query the daemon because cache is empty */
1041 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
1042 	ASSERT_EQ(0, fcntl(fd, F_SETFL, 0)) << strerror(errno);
1043 	ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno);
1044 	leak(fd);
1045 }
1046 
1047 /*
1048  * mmap should still be possible even if the server used direct_io.  Mmap will
1049  * still use the cache, though.
1050  *
1051  * Regression test for bug 247276
1052  * https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=247276
1053  */
1054 TEST_F(WriteBack, mmap_direct_io)
1055 {
1056 	const char FULLPATH[] = "mountpoint/some_file.txt";
1057 	const char RELPATH[] = "some_file.txt";
1058 	const char *CONTENTS = "abcdefgh";
1059 	uint64_t ino = 42;
1060 	int fd;
1061 	size_t len;
1062 	ssize_t bufsize = strlen(CONTENTS);
1063 	void *p, *zeros;
1064 
1065 	len = getpagesize();
1066 	zeros = calloc(1, len);
1067 	ASSERT_NE(nullptr, zeros);
1068 
1069 	expect_lookup(RELPATH, ino, len);
1070 	expect_open(ino, FOPEN_DIRECT_IO, 1);
1071 	expect_read(ino, 0, len, len, zeros);
1072 	expect_flush(ino, 1, ReturnErrno(0));
1073 	FuseTest::expect_write(ino, 0, len, len, FUSE_WRITE_CACHE, 0, zeros);
1074 	expect_release(ino, ReturnErrno(0));
1075 
1076 	fd = open(FULLPATH, O_RDWR);
1077 	ASSERT_LE(0, fd) << strerror(errno);
1078 
1079 	p = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
1080 	ASSERT_NE(MAP_FAILED, p) << strerror(errno);
1081 
1082 	memmove((uint8_t*)p, CONTENTS, bufsize);
1083 
1084 	ASSERT_EQ(0, munmap(p, len)) << strerror(errno);
1085 	close(fd);	// Write mmap'd data on close
1086 
1087 	free(zeros);
1088 }
1089 
1090 /*
1091  * When mounted with -o async, the writeback cache mode should delay writes
1092  */
1093 TEST_F(WriteBackAsync, delay)
1094 {
1095 	const char FULLPATH[] = "mountpoint/some_file.txt";
1096 	const char RELPATH[] = "some_file.txt";
1097 	const char *CONTENTS = "abcdefgh";
1098 	uint64_t ino = 42;
1099 	int fd;
1100 	ssize_t bufsize = strlen(CONTENTS);
1101 
1102 	expect_lookup(RELPATH, ino, 0);
1103 	expect_open(ino, 0, 1);
1104 	/* Write should be cached, but FUSE_WRITE shouldn't be sent */
1105 	EXPECT_CALL(*m_mock, process(
1106 		ResultOf([=](auto in) {
1107 			return (in.header.opcode == FUSE_WRITE);
1108 		}, Eq(true)),
1109 		_)
1110 	).Times(0);
1111 
1112 	fd = open(FULLPATH, O_RDWR);
1113 	ASSERT_LE(0, fd) << strerror(errno);
1114 
1115 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1116 
1117 	/* Don't close the file because that would flush the cache */
1118 	leak(fd);
1119 }
1120 
1121 /*
1122  * A direct write should not evict dirty cached data from outside of its own
1123  * byte range.
1124  */
1125 TEST_F(WriteBackAsync, direct_io_ignores_unrelated_cached)
1126 {
1127 	const char FULLPATH[] = "mountpoint/some_file.txt";
1128 	const char RELPATH[] = "some_file.txt";
1129 	const char CONTENTS0[] = "abcdefgh";
1130 	const char CONTENTS1[] = "ijklmnop";
1131 	uint64_t ino = 42;
1132 	int fd;
1133 	ssize_t bufsize = strlen(CONTENTS0) + 1;
1134 	ssize_t fsize = 2 * m_maxbcachebuf;
1135 	char readbuf[bufsize];
1136 	void *zeros;
1137 
1138 	zeros = calloc(1, m_maxbcachebuf);
1139 	ASSERT_NE(nullptr, zeros);
1140 
1141 	expect_lookup(RELPATH, ino, fsize);
1142 	expect_open(ino, 0, 1);
1143 	expect_read(ino, 0, m_maxbcachebuf, m_maxbcachebuf, zeros);
1144 	FuseTest::expect_write(ino, m_maxbcachebuf, bufsize, bufsize, 0, 0,
1145 		CONTENTS1);
1146 
1147 	fd = open(FULLPATH, O_RDWR);
1148 	ASSERT_LE(0, fd) << strerror(errno);
1149 
1150 	// Cache first block with dirty data.  This will entail first reading
1151 	// the existing data.
1152 	ASSERT_EQ(bufsize, pwrite(fd, CONTENTS0, bufsize, 0))
1153 		<< strerror(errno);
1154 
1155 	// Write directly to second block
1156 	ASSERT_EQ(0, fcntl(fd, F_SETFL, O_DIRECT)) << strerror(errno);
1157 	ASSERT_EQ(bufsize, pwrite(fd, CONTENTS1, bufsize, m_maxbcachebuf))
1158 		<< strerror(errno);
1159 
1160 	// Read from the first block again.  Should be serviced by cache.
1161 	ASSERT_EQ(0, fcntl(fd, F_SETFL, 0)) << strerror(errno);
1162 	ASSERT_EQ(bufsize, pread(fd, readbuf, bufsize, 0)) << strerror(errno);
1163 	ASSERT_STREQ(readbuf, CONTENTS0);
1164 
1165 	leak(fd);
1166 	free(zeros);
1167 }
1168 
1169 /*
1170  * If a direct io write partially overlaps one or two blocks of dirty cached
1171  * data, No dirty data should be lost.  Admittedly this is a weird test,
1172  * because it would be unusual to use O_DIRECT and the writeback cache.
1173  */
1174 TEST_F(WriteBackAsync, direct_io_partially_overlaps_cached_block)
1175 {
1176 	const char FULLPATH[] = "mountpoint/some_file.txt";
1177 	const char RELPATH[] = "some_file.txt";
1178 	uint64_t ino = 42;
1179 	int fd;
1180 	off_t bs = m_maxbcachebuf;
1181 	ssize_t fsize = 3 * bs;
1182 	void *readbuf, *zeros, *ones, *zeroones, *onezeros;
1183 
1184 	readbuf = malloc(bs);
1185 	ASSERT_NE(nullptr, readbuf) << strerror(errno);
1186 	zeros = calloc(1, 3 * bs);
1187 	ASSERT_NE(nullptr, zeros);
1188 	ones = calloc(1, 2 * bs);
1189 	ASSERT_NE(nullptr, ones);
1190 	memset(ones, 1, 2 * bs);
1191 	zeroones = calloc(1, bs);
1192 	ASSERT_NE(nullptr, zeroones);
1193 	memset((uint8_t*)zeroones + bs / 2, 1, bs / 2);
1194 	onezeros = calloc(1, bs);
1195 	ASSERT_NE(nullptr, onezeros);
1196 	memset(onezeros, 1, bs / 2);
1197 
1198 	expect_lookup(RELPATH, ino, fsize);
1199 	expect_open(ino, 0, 1);
1200 
1201 	fd = open(FULLPATH, O_RDWR);
1202 	ASSERT_LE(0, fd) << strerror(errno);
1203 
1204 	/* Cache first and third blocks with dirty data.  */
1205 	ASSERT_EQ(3 * bs, pwrite(fd, zeros, 3 * bs, 0)) << strerror(errno);
1206 
1207 	/*
1208 	 * Write directly to all three blocks.  The partially written blocks
1209 	 * will be flushed because they're dirty.
1210 	 */
1211 	FuseTest::expect_write(ino, 0, bs, bs, 0, 0, zeros);
1212 	FuseTest::expect_write(ino, 2 * bs, bs, bs, 0, 0, zeros);
1213 	/* The direct write is split in two because of the m_maxwrite value */
1214 	FuseTest::expect_write(ino,     bs / 2, bs, bs, 0, 0, ones);
1215 	FuseTest::expect_write(ino, 3 * bs / 2, bs, bs, 0, 0, ones);
1216 	ASSERT_EQ(0, fcntl(fd, F_SETFL, O_DIRECT)) << strerror(errno);
1217 	ASSERT_EQ(2 * bs, pwrite(fd, ones, 2 * bs, bs / 2)) << strerror(errno);
1218 
1219 	/*
1220 	 * Read from both the valid and invalid portions of the first and third
1221 	 * blocks again.  This will entail FUSE_READ operations because these
1222 	 * blocks were invalidated by the direct write.
1223 	 */
1224 	expect_read(ino, 0, bs, bs, zeroones);
1225 	expect_read(ino, 2 * bs, bs, bs, onezeros);
1226 	ASSERT_EQ(0, fcntl(fd, F_SETFL, 0)) << strerror(errno);
1227 	ASSERT_EQ(bs / 2, pread(fd, readbuf, bs / 2, 0)) << strerror(errno);
1228 	EXPECT_EQ(0, memcmp(zeros, readbuf, bs / 2));
1229 	ASSERT_EQ(bs / 2, pread(fd, readbuf, bs / 2, 5 * bs / 2))
1230 		<< strerror(errno);
1231 	EXPECT_EQ(0, memcmp(zeros, readbuf, bs / 2));
1232 	ASSERT_EQ(bs / 2, pread(fd, readbuf, bs / 2, bs / 2))
1233 		<< strerror(errno);
1234 	EXPECT_EQ(0, memcmp(ones, readbuf, bs / 2));
1235 	ASSERT_EQ(bs / 2, pread(fd, readbuf, bs / 2, 2 * bs))
1236 		<< strerror(errno);
1237 	EXPECT_EQ(0, memcmp(ones, readbuf, bs / 2));
1238 
1239 	leak(fd);
1240 	free(zeroones);
1241 	free(onezeros);
1242 	free(ones);
1243 	free(zeros);
1244 	free(readbuf);
1245 }
1246 
1247 /*
1248  * In WriteBack mode, writes may be cached beyond what the server thinks is the
1249  * EOF.  In this case, a short read at EOF should _not_ cause fusefs to update
1250  * the file's size.
1251  */
1252 TEST_F(WriteBackAsync, eof)
1253 {
1254 	const char FULLPATH[] = "mountpoint/some_file.txt";
1255 	const char RELPATH[] = "some_file.txt";
1256 	const char *CONTENTS0 = "abcdefgh";
1257 	const char *CONTENTS1 = "ijklmnop";
1258 	uint64_t ino = 42;
1259 	int fd;
1260 	off_t offset = m_maxbcachebuf;
1261 	ssize_t wbufsize = strlen(CONTENTS1);
1262 	off_t old_filesize = (off_t)strlen(CONTENTS0);
1263 	ssize_t rbufsize = 2 * old_filesize;
1264 	char readbuf[rbufsize];
1265 	size_t holesize = rbufsize - old_filesize;
1266 	char hole[holesize];
1267 	struct stat sb;
1268 	ssize_t r;
1269 
1270 	expect_lookup(RELPATH, ino, 0);
1271 	expect_open(ino, 0, 1);
1272 	expect_read(ino, 0, m_maxbcachebuf, old_filesize, CONTENTS0);
1273 
1274 	fd = open(FULLPATH, O_RDWR);
1275 	ASSERT_LE(0, fd) << strerror(errno);
1276 
1277 	/* Write and cache data beyond EOF */
1278 	ASSERT_EQ(wbufsize, pwrite(fd, CONTENTS1, wbufsize, offset))
1279 		<< strerror(errno);
1280 
1281 	/* Read from the old EOF */
1282 	r = pread(fd, readbuf, rbufsize, 0);
1283 	ASSERT_LE(0, r) << strerror(errno);
1284 	EXPECT_EQ(rbufsize, r) << "read should've synthesized a hole";
1285 	EXPECT_EQ(0, memcmp(CONTENTS0, readbuf, old_filesize));
1286 	bzero(hole, holesize);
1287 	EXPECT_EQ(0, memcmp(hole, readbuf + old_filesize, holesize));
1288 
1289 	/* The file's size should still be what was established by pwrite */
1290 	ASSERT_EQ(0, fstat(fd, &sb)) << strerror(errno);
1291 	EXPECT_EQ(offset + wbufsize, sb.st_size);
1292 	leak(fd);
1293 }
1294 
1295 /*
1296  * When a file has dirty writes that haven't been flushed, the server's notion
1297  * of its mtime and ctime will be wrong.  The kernel should ignore those if it
1298  * gets them from a FUSE_GETATTR before flushing.
1299  */
1300 TEST_F(WriteBackAsync, timestamps)
1301 {
1302 	const char FULLPATH[] = "mountpoint/some_file.txt";
1303 	const char RELPATH[] = "some_file.txt";
1304 	const char *CONTENTS = "abcdefgh";
1305 	ssize_t bufsize = strlen(CONTENTS);
1306 	uint64_t ino = 42;
1307 	uint64_t attr_valid = 0;
1308 	uint64_t attr_valid_nsec = 0;
1309 	uint64_t server_time = 12345;
1310 	mode_t mode = S_IFREG | 0644;
1311 	int fd;
1312 
1313 	struct stat sb;
1314 
1315 	EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH)
1316 	.WillRepeatedly(Invoke(
1317 		ReturnImmediate([=](auto in __unused, auto& out) {
1318 		SET_OUT_HEADER_LEN(out, entry);
1319 		out.body.entry.attr.mode = mode;
1320 		out.body.entry.nodeid = ino;
1321 		out.body.entry.attr.nlink = 1;
1322 		out.body.entry.attr_valid = attr_valid;
1323 		out.body.entry.attr_valid_nsec = attr_valid_nsec;
1324 	})));
1325 	expect_open(ino, 0, 1);
1326 	EXPECT_CALL(*m_mock, process(
1327 		ResultOf([=](auto in) {
1328 			return (in.header.opcode == FUSE_GETATTR &&
1329 				in.header.nodeid == ino);
1330 		}, Eq(true)),
1331 		_)
1332 	).WillRepeatedly(Invoke(
1333 	ReturnImmediate([=](auto i __unused, auto& out) {
1334 		SET_OUT_HEADER_LEN(out, attr);
1335 		out.body.attr.attr.ino = ino;
1336 		out.body.attr.attr.mode = mode;
1337 		out.body.attr.attr_valid = attr_valid;
1338 		out.body.attr.attr_valid_nsec = attr_valid_nsec;
1339 		out.body.attr.attr.atime = server_time;
1340 		out.body.attr.attr.mtime = server_time;
1341 		out.body.attr.attr.ctime = server_time;
1342 	})));
1343 
1344 	fd = open(FULLPATH, O_RDWR);
1345 	ASSERT_LE(0, fd) << strerror(errno);
1346 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1347 
1348 	ASSERT_EQ(0, fstat(fd, &sb)) << strerror(errno);
1349 	EXPECT_EQ((time_t)server_time, sb.st_atime);
1350 	EXPECT_NE((time_t)server_time, sb.st_mtime);
1351 	EXPECT_NE((time_t)server_time, sb.st_ctime);
1352 
1353 	leak(fd);
1354 }
1355 
1356 /* Any dirty timestamp fields should be flushed during a SETATTR */
1357 TEST_F(WriteBackAsync, timestamps_during_setattr)
1358 {
1359 	const char FULLPATH[] = "mountpoint/some_file.txt";
1360 	const char RELPATH[] = "some_file.txt";
1361 	const char *CONTENTS = "abcdefgh";
1362 	ssize_t bufsize = strlen(CONTENTS);
1363 	uint64_t ino = 42;
1364 	const mode_t newmode = 0755;
1365 	int fd;
1366 
1367 	expect_lookup(RELPATH, ino, 0);
1368 	expect_open(ino, 0, 1);
1369 	EXPECT_CALL(*m_mock, process(
1370 		ResultOf([=](auto in) {
1371 			uint32_t valid = FATTR_MODE | FATTR_MTIME | FATTR_CTIME;
1372 			return (in.header.opcode == FUSE_SETATTR &&
1373 				in.header.nodeid == ino &&
1374 				in.body.setattr.valid == valid);
1375 		}, Eq(true)),
1376 		_)
1377 	).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
1378 		SET_OUT_HEADER_LEN(out, attr);
1379 		out.body.attr.attr.ino = ino;
1380 		out.body.attr.attr.mode = S_IFREG | newmode;
1381 	})));
1382 
1383 	fd = open(FULLPATH, O_RDWR);
1384 	ASSERT_LE(0, fd) << strerror(errno);
1385 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1386 	ASSERT_EQ(0, fchmod(fd, newmode)) << strerror(errno);
1387 
1388 	leak(fd);
1389 }
1390 
1391 /* fuse_init_out.time_gran controls the granularity of timestamps */
1392 TEST_P(TimeGran, timestamps_during_setattr)
1393 {
1394 	const char FULLPATH[] = "mountpoint/some_file.txt";
1395 	const char RELPATH[] = "some_file.txt";
1396 	const char *CONTENTS = "abcdefgh";
1397 	ssize_t bufsize = strlen(CONTENTS);
1398 	uint64_t ino = 42;
1399 	const mode_t newmode = 0755;
1400 	int fd;
1401 
1402 	expect_lookup(RELPATH, ino, 0);
1403 	expect_open(ino, 0, 1);
1404 	EXPECT_CALL(*m_mock, process(
1405 		ResultOf([=](auto in) {
1406 			uint32_t valid = FATTR_MODE | FATTR_MTIME | FATTR_CTIME;
1407 			return (in.header.opcode == FUSE_SETATTR &&
1408 				in.header.nodeid == ino &&
1409 				in.body.setattr.valid == valid &&
1410 				in.body.setattr.mtimensec % m_time_gran == 0 &&
1411 				in.body.setattr.ctimensec % m_time_gran == 0);
1412 		}, Eq(true)),
1413 		_)
1414 	).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
1415 		SET_OUT_HEADER_LEN(out, attr);
1416 		out.body.attr.attr.ino = ino;
1417 		out.body.attr.attr.mode = S_IFREG | newmode;
1418 	})));
1419 
1420 	fd = open(FULLPATH, O_RDWR);
1421 	ASSERT_LE(0, fd) << strerror(errno);
1422 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1423 	ASSERT_EQ(0, fchmod(fd, newmode)) << strerror(errno);
1424 
1425 	leak(fd);
1426 }
1427 
1428 INSTANTIATE_TEST_CASE_P(RA, TimeGran, Range(0u, 10u));
1429 
1430 /*
1431  * Without direct_io, writes should be committed to cache
1432  */
1433 TEST_F(Write, writethrough)
1434 {
1435 	const char FULLPATH[] = "mountpoint/some_file.txt";
1436 	const char RELPATH[] = "some_file.txt";
1437 	const char *CONTENTS = "abcdefgh";
1438 	uint64_t ino = 42;
1439 	int fd;
1440 	ssize_t bufsize = strlen(CONTENTS);
1441 	uint8_t readbuf[bufsize];
1442 
1443 	expect_lookup(RELPATH, ino, 0);
1444 	expect_open(ino, 0, 1);
1445 	expect_write(ino, 0, bufsize, bufsize, CONTENTS);
1446 
1447 	fd = open(FULLPATH, O_RDWR);
1448 	ASSERT_LE(0, fd) << strerror(errno);
1449 
1450 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1451 	/*
1452 	 * A subsequent read should be serviced by cache, without querying the
1453 	 * filesystem daemon
1454 	 */
1455 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
1456 	ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno);
1457 	leak(fd);
1458 }
1459 
1460 /* Writes that extend a file should update the cached file size */
1461 TEST_F(Write, update_file_size)
1462 {
1463 	const char FULLPATH[] = "mountpoint/some_file.txt";
1464 	const char RELPATH[] = "some_file.txt";
1465 	const char *CONTENTS = "abcdefgh";
1466 	struct stat sb;
1467 	uint64_t ino = 42;
1468 	int fd;
1469 	ssize_t bufsize = strlen(CONTENTS);
1470 
1471 	expect_lookup(RELPATH, ino, 0);
1472 	expect_open(ino, 0, 1);
1473 	expect_write(ino, 0, bufsize, bufsize, CONTENTS);
1474 
1475 	fd = open(FULLPATH, O_RDWR);
1476 	ASSERT_LE(0, fd) << strerror(errno);
1477 
1478 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1479 	/* Get cached attributes */
1480 	ASSERT_EQ(0, fstat(fd, &sb)) << strerror(errno);
1481 	ASSERT_EQ(bufsize, sb.st_size);
1482 	leak(fd);
1483 }
1484