xref: /freebsd/tests/sys/fs/fusefs/write.cc (revision 9768746b)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2019 The FreeBSD Foundation
5  *
6  * This software was developed by BFF Storage Systems, LLC under sponsorship
7  * from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  * $FreeBSD$
31  */
32 
33 extern "C" {
34 #include <sys/param.h>
35 #include <sys/mman.h>
36 #include <sys/resource.h>
37 #include <sys/stat.h>
38 #include <sys/time.h>
39 #include <sys/uio.h>
40 
41 #include <aio.h>
42 #include <fcntl.h>
43 #include <signal.h>
44 #include <unistd.h>
45 }
46 
47 #include "mockfs.hh"
48 #include "utils.hh"
49 
50 using namespace testing;
51 
52 class Write: public FuseTest {
53 
54 public:
55 void SetUp() {
56 	FuseTest::SetUp();
57 }
58 
59 void TearDown() {
60 	struct sigaction sa;
61 
62 	bzero(&sa, sizeof(sa));
63 	sa.sa_handler = SIG_DFL;
64 	sigaction(SIGXFSZ, &sa, NULL);
65 
66 	FuseTest::TearDown();
67 }
68 
69 void expect_lookup(const char *relpath, uint64_t ino, uint64_t size)
70 {
71 	FuseTest::expect_lookup(relpath, ino, S_IFREG | 0644, size, 1);
72 }
73 
74 void expect_release(uint64_t ino, ProcessMockerT r)
75 {
76 	EXPECT_CALL(*m_mock, process(
77 		ResultOf([=](auto in) {
78 			return (in.header.opcode == FUSE_RELEASE &&
79 				in.header.nodeid == ino);
80 		}, Eq(true)),
81 		_)
82 	).WillRepeatedly(Invoke(r));
83 }
84 
85 void expect_write(uint64_t ino, uint64_t offset, uint64_t isize,
86 	uint64_t osize, const void *contents)
87 {
88 	FuseTest::expect_write(ino, offset, isize, osize, 0, 0, contents);
89 }
90 
91 /* Expect a write that may or may not come, depending on the cache mode */
92 void maybe_expect_write(uint64_t ino, uint64_t offset, uint64_t size,
93 	const void *contents)
94 {
95 	EXPECT_CALL(*m_mock, process(
96 		ResultOf([=](auto in) {
97 			const char *buf = (const char*)in.body.bytes +
98 				sizeof(struct fuse_write_in);
99 
100 			return (in.header.opcode == FUSE_WRITE &&
101 				in.header.nodeid == ino &&
102 				in.body.write.offset == offset  &&
103 				in.body.write.size == size &&
104 				0 == bcmp(buf, contents, size));
105 		}, Eq(true)),
106 		_)
107 	).Times(AtMost(1))
108 	.WillRepeatedly(Invoke(
109 		ReturnImmediate([=](auto in __unused, auto& out) {
110 			SET_OUT_HEADER_LEN(out, write);
111 			out.body.write.size = size;
112 		})
113 	));
114 }
115 
116 };
117 
118 class Write_7_8: public FuseTest {
119 
120 public:
121 virtual void SetUp() {
122 	m_kernel_minor_version = 8;
123 	FuseTest::SetUp();
124 }
125 
126 void expect_lookup(const char *relpath, uint64_t ino, uint64_t size)
127 {
128 	FuseTest::expect_lookup_7_8(relpath, ino, S_IFREG | 0644, size, 1);
129 }
130 
131 };
132 
133 class AioWrite: public Write {
134 virtual void SetUp() {
135 	if (!is_unsafe_aio_enabled())
136 		GTEST_SKIP() <<
137 			"vfs.aio.enable_unsafe must be set for this test";
138 	FuseTest::SetUp();
139 }
140 };
141 
142 /* Tests for the writeback cache mode */
143 class WriteBack: public Write {
144 public:
145 virtual void SetUp() {
146 	m_init_flags |= FUSE_WRITEBACK_CACHE;
147 	FuseTest::SetUp();
148 	if (IsSkipped())
149 		return;
150 }
151 
152 void expect_write(uint64_t ino, uint64_t offset, uint64_t isize,
153 	uint64_t osize, const void *contents)
154 {
155 	FuseTest::expect_write(ino, offset, isize, osize, FUSE_WRITE_CACHE, 0,
156 		contents);
157 }
158 };
159 
160 class WriteBackAsync: public WriteBack {
161 public:
162 virtual void SetUp() {
163 	m_async = true;
164 	m_maxwrite = 65536;
165 	WriteBack::SetUp();
166 }
167 };
168 
169 class TimeGran: public WriteBackAsync, public WithParamInterface<unsigned> {
170 public:
171 virtual void SetUp() {
172 	m_time_gran = 1 << GetParam();
173 	WriteBackAsync::SetUp();
174 }
175 };
176 
177 /* Tests for clustered writes with WriteBack cacheing */
178 class WriteCluster: public WriteBack {
179 public:
180 virtual void SetUp() {
181 	m_async = true;
182 	m_maxwrite = 1 << 25;	// Anything larger than MAXPHYS will suffice
183 	WriteBack::SetUp();
184 	if (m_maxphys < 2 * DFLTPHYS)
185 		GTEST_SKIP() << "MAXPHYS must be at least twice DFLTPHYS"
186 			<< " for this test";
187 	if (m_maxphys < 2 * m_maxbcachebuf)
188 		GTEST_SKIP() << "MAXPHYS must be at least twice maxbcachebuf"
189 			<< " for this test";
190 }
191 };
192 
193 /* Tests relating to the server's max_write property */
194 class WriteMaxWrite: public Write {
195 public:
196 virtual void SetUp() {
197 	/*
198 	 * For this test, m_maxwrite must be less than either m_maxbcachebuf or
199 	 * maxphys.
200 	 */
201 	m_maxwrite = 32768;
202 	Write::SetUp();
203 }
204 };
205 
206 class WriteEofDuringVnopStrategy: public Write, public WithParamInterface<int>
207 {};
208 
209 class WriteRlimitFsize: public Write, public WithParamInterface<int> {
210 public:
211 static sig_atomic_t s_sigxfsz;
212 struct rlimit	m_initial_limit;
213 
214 void SetUp() {
215 	s_sigxfsz = 0;
216 	getrlimit(RLIMIT_FSIZE, &m_initial_limit);
217 	FuseTest::SetUp();
218 }
219 
220 void TearDown() {
221 	setrlimit(RLIMIT_FSIZE, &m_initial_limit);
222 
223 	FuseTest::TearDown();
224 }
225 };
226 
227 sig_atomic_t WriteRlimitFsize::s_sigxfsz = 0;
228 
229 void sigxfsz_handler(int __unused sig) {
230 	WriteRlimitFsize::s_sigxfsz = 1;
231 }
232 
233 /* AIO writes need to set the header's pid field correctly */
234 /* https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=236379 */
235 TEST_F(AioWrite, DISABLED_aio_write)
236 {
237 	const char FULLPATH[] = "mountpoint/some_file.txt";
238 	const char RELPATH[] = "some_file.txt";
239 	const char *CONTENTS = "abcdefgh";
240 	uint64_t ino = 42;
241 	uint64_t offset = 4096;
242 	int fd;
243 	ssize_t bufsize = strlen(CONTENTS);
244 	struct aiocb iocb, *piocb;
245 
246 	expect_lookup(RELPATH, ino, 0);
247 	expect_open(ino, 0, 1);
248 	expect_write(ino, offset, bufsize, bufsize, CONTENTS);
249 
250 	fd = open(FULLPATH, O_WRONLY);
251 	ASSERT_LE(0, fd) << strerror(errno);
252 
253 	iocb.aio_nbytes = bufsize;
254 	iocb.aio_fildes = fd;
255 	iocb.aio_buf = __DECONST(void *, CONTENTS);
256 	iocb.aio_offset = offset;
257 	iocb.aio_sigevent.sigev_notify = SIGEV_NONE;
258 	ASSERT_EQ(0, aio_write(&iocb)) << strerror(errno);
259 	ASSERT_EQ(bufsize, aio_waitcomplete(&piocb, NULL)) << strerror(errno);
260 	leak(fd);
261 }
262 
263 /*
264  * When a file is opened with O_APPEND, we should forward that flag to
265  * FUSE_OPEN (tested by Open.o_append) but still attempt to calculate the
266  * offset internally.  That way we'll work both with filesystems that
267  * understand O_APPEND (and ignore the offset) and filesystems that don't (and
268  * simply use the offset).
269  *
270  * Note that verifying the O_APPEND flag in FUSE_OPEN is done in the
271  * Open.o_append test.
272  */
273 TEST_F(Write, append)
274 {
275 	const ssize_t BUFSIZE = 9;
276 	const char FULLPATH[] = "mountpoint/some_file.txt";
277 	const char RELPATH[] = "some_file.txt";
278 	const char CONTENTS[BUFSIZE] = "abcdefgh";
279 	uint64_t ino = 42;
280 	/*
281 	 * Set offset to a maxbcachebuf boundary so we don't need to RMW when
282 	 * using writeback caching
283 	 */
284 	uint64_t initial_offset = m_maxbcachebuf;
285 	int fd;
286 
287 	expect_lookup(RELPATH, ino, initial_offset);
288 	expect_open(ino, 0, 1);
289 	expect_write(ino, initial_offset, BUFSIZE, BUFSIZE, CONTENTS);
290 
291 	/* Must open O_RDWR or fuse(4) implicitly sets direct_io */
292 	fd = open(FULLPATH, O_RDWR | O_APPEND);
293 	ASSERT_LE(0, fd) << strerror(errno);
294 
295 	ASSERT_EQ(BUFSIZE, write(fd, CONTENTS, BUFSIZE)) << strerror(errno);
296 	leak(fd);
297 }
298 
299 /* If a file is cached, then appending to the end should not cause a read */
300 TEST_F(Write, append_to_cached)
301 {
302 	const ssize_t BUFSIZE = 9;
303 	const char FULLPATH[] = "mountpoint/some_file.txt";
304 	const char RELPATH[] = "some_file.txt";
305 	char *oldcontents, *oldbuf;
306 	const char CONTENTS[BUFSIZE] = "abcdefgh";
307 	uint64_t ino = 42;
308 	/*
309 	 * Set offset in between maxbcachebuf boundary to test buffer handling
310 	 */
311 	uint64_t oldsize = m_maxbcachebuf / 2;
312 	int fd;
313 
314 	oldcontents = (char*)calloc(1, oldsize);
315 	ASSERT_NE(nullptr, oldcontents) << strerror(errno);
316 	oldbuf = (char*)malloc(oldsize);
317 	ASSERT_NE(nullptr, oldbuf) << strerror(errno);
318 
319 	expect_lookup(RELPATH, ino, oldsize);
320 	expect_open(ino, 0, 1);
321 	expect_read(ino, 0, oldsize, oldsize, oldcontents);
322 	maybe_expect_write(ino, oldsize, BUFSIZE, CONTENTS);
323 
324 	/* Must open O_RDWR or fuse(4) implicitly sets direct_io */
325 	fd = open(FULLPATH, O_RDWR | O_APPEND);
326 	ASSERT_LE(0, fd) << strerror(errno);
327 
328 	/* Read the old data into the cache */
329 	ASSERT_EQ((ssize_t)oldsize, read(fd, oldbuf, oldsize))
330 		<< strerror(errno);
331 
332 	/* Write the new data.  There should be no more read operations */
333 	ASSERT_EQ(BUFSIZE, write(fd, CONTENTS, BUFSIZE)) << strerror(errno);
334 	leak(fd);
335 	free(oldbuf);
336 	free(oldcontents);
337 }
338 
339 TEST_F(Write, append_direct_io)
340 {
341 	const ssize_t BUFSIZE = 9;
342 	const char FULLPATH[] = "mountpoint/some_file.txt";
343 	const char RELPATH[] = "some_file.txt";
344 	const char CONTENTS[BUFSIZE] = "abcdefgh";
345 	uint64_t ino = 42;
346 	uint64_t initial_offset = 4096;
347 	int fd;
348 
349 	expect_lookup(RELPATH, ino, initial_offset);
350 	expect_open(ino, FOPEN_DIRECT_IO, 1);
351 	expect_write(ino, initial_offset, BUFSIZE, BUFSIZE, CONTENTS);
352 
353 	fd = open(FULLPATH, O_WRONLY | O_APPEND);
354 	ASSERT_LE(0, fd) << strerror(errno);
355 
356 	ASSERT_EQ(BUFSIZE, write(fd, CONTENTS, BUFSIZE)) << strerror(errno);
357 	leak(fd);
358 }
359 
360 /* A direct write should evict any overlapping cached data */
361 TEST_F(Write, direct_io_evicts_cache)
362 {
363 	const char FULLPATH[] = "mountpoint/some_file.txt";
364 	const char RELPATH[] = "some_file.txt";
365 	const char CONTENTS0[] = "abcdefgh";
366 	const char CONTENTS1[] = "ijklmnop";
367 	uint64_t ino = 42;
368 	int fd;
369 	ssize_t bufsize = strlen(CONTENTS0) + 1;
370 	char readbuf[bufsize];
371 
372 	expect_lookup(RELPATH, ino, bufsize);
373 	expect_open(ino, 0, 1);
374 	expect_read(ino, 0, bufsize, bufsize, CONTENTS0);
375 	expect_write(ino, 0, bufsize, bufsize, CONTENTS1);
376 
377 	fd = open(FULLPATH, O_RDWR);
378 	ASSERT_LE(0, fd) << strerror(errno);
379 
380 	// Prime cache
381 	ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno);
382 
383 	// Write directly, evicting cache
384 	ASSERT_EQ(0, fcntl(fd, F_SETFL, O_DIRECT)) << strerror(errno);
385 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
386 	ASSERT_EQ(bufsize, write(fd, CONTENTS1, bufsize)) << strerror(errno);
387 
388 	// Read again.  Cache should be bypassed
389 	expect_read(ino, 0, bufsize, bufsize, CONTENTS1);
390 	ASSERT_EQ(0, fcntl(fd, F_SETFL, 0)) << strerror(errno);
391 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
392 	ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno);
393 	ASSERT_STREQ(readbuf, CONTENTS1);
394 
395 	leak(fd);
396 }
397 
398 /*
399  * If the server doesn't return FOPEN_DIRECT_IO during FUSE_OPEN, then it's not
400  * allowed to return a short write for that file handle.  However, if it does
401  * then we should still do our darndest to handle it by resending the unwritten
402  * portion.
403  */
404 TEST_F(Write, indirect_io_short_write)
405 {
406 	const char FULLPATH[] = "mountpoint/some_file.txt";
407 	const char RELPATH[] = "some_file.txt";
408 	const char *CONTENTS = "abcdefghijklmnop";
409 	uint64_t ino = 42;
410 	int fd;
411 	ssize_t bufsize = strlen(CONTENTS);
412 	ssize_t bufsize0 = 11;
413 	ssize_t bufsize1 = strlen(CONTENTS) - bufsize0;
414 	const char *contents1 = CONTENTS + bufsize0;
415 
416 	expect_lookup(RELPATH, ino, 0);
417 	expect_open(ino, 0, 1);
418 	expect_write(ino, 0, bufsize, bufsize0, CONTENTS);
419 	expect_write(ino, bufsize0, bufsize1, bufsize1, contents1);
420 
421 	fd = open(FULLPATH, O_WRONLY);
422 	ASSERT_LE(0, fd) << strerror(errno);
423 
424 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
425 	leak(fd);
426 }
427 
428 /* It is an error if the daemon claims to have written more data than we sent */
429 TEST_F(Write, indirect_io_long_write)
430 {
431 	const char FULLPATH[] = "mountpoint/some_file.txt";
432 	const char RELPATH[] = "some_file.txt";
433 	const char *CONTENTS = "abcdefghijklmnop";
434 	uint64_t ino = 42;
435 	int fd;
436 	ssize_t bufsize = strlen(CONTENTS);
437 	ssize_t bufsize_out = 100;
438 	off_t some_other_size = 25;
439 	struct stat sb;
440 
441 	expect_lookup(RELPATH, ino, 0);
442 	expect_open(ino, 0, 1);
443 	expect_write(ino, 0, bufsize, bufsize_out, CONTENTS);
444 	expect_getattr(ino, some_other_size);
445 
446 	fd = open(FULLPATH, O_WRONLY);
447 	ASSERT_LE(0, fd) << strerror(errno);
448 
449 	ASSERT_EQ(-1, write(fd, CONTENTS, bufsize)) << strerror(errno);
450 	ASSERT_EQ(EINVAL, errno);
451 
452 	/*
453 	 * Following such an error, we should requery the server for the file's
454 	 * size.
455 	 */
456 	fstat(fd, &sb);
457 	ASSERT_EQ(sb.st_size, some_other_size);
458 
459 	leak(fd);
460 }
461 
462 /*
463  * Don't crash if the server returns a write that can't be represented as a
464  * signed 32 bit number.  Regression test for
465  * https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=263263
466  */
467 TEST_F(Write, indirect_io_very_long_write)
468 {
469 	const char FULLPATH[] = "mountpoint/some_file.txt";
470 	const char RELPATH[] = "some_file.txt";
471 	const char *CONTENTS = "abcdefghijklmnop";
472 	uint64_t ino = 42;
473 	int fd;
474 	ssize_t bufsize = strlen(CONTENTS);
475 	ssize_t bufsize_out = 3 << 30;
476 
477 	expect_lookup(RELPATH, ino, 0);
478 	expect_open(ino, 0, 1);
479 	expect_write(ino, 0, bufsize, bufsize_out, CONTENTS);
480 
481 	fd = open(FULLPATH, O_WRONLY);
482 	ASSERT_LE(0, fd) << strerror(errno);
483 
484 	ASSERT_EQ(-1, write(fd, CONTENTS, bufsize)) << strerror(errno);
485 	ASSERT_EQ(EINVAL, errno);
486 	leak(fd);
487 }
488 
489 /*
490  * When the direct_io option is used, filesystems are allowed to write less
491  * data than requested.  We should return the short write to userland.
492  */
493 TEST_F(Write, direct_io_short_write)
494 {
495 	const char FULLPATH[] = "mountpoint/some_file.txt";
496 	const char RELPATH[] = "some_file.txt";
497 	const char *CONTENTS = "abcdefghijklmnop";
498 	uint64_t ino = 42;
499 	int fd;
500 	ssize_t bufsize = strlen(CONTENTS);
501 	ssize_t halfbufsize = bufsize / 2;
502 
503 	expect_lookup(RELPATH, ino, 0);
504 	expect_open(ino, FOPEN_DIRECT_IO, 1);
505 	expect_write(ino, 0, bufsize, halfbufsize, CONTENTS);
506 
507 	fd = open(FULLPATH, O_WRONLY);
508 	ASSERT_LE(0, fd) << strerror(errno);
509 
510 	ASSERT_EQ(halfbufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
511 	leak(fd);
512 }
513 
514 /*
515  * An insidious edge case: the filesystem returns a short write, and the
516  * difference between what we requested and what it actually wrote crosses an
517  * iov element boundary
518  */
519 TEST_F(Write, direct_io_short_write_iov)
520 {
521 	const char FULLPATH[] = "mountpoint/some_file.txt";
522 	const char RELPATH[] = "some_file.txt";
523 	const char *CONTENTS0 = "abcdefgh";
524 	const char *CONTENTS1 = "ijklmnop";
525 	const char *EXPECTED0 = "abcdefghijklmnop";
526 	uint64_t ino = 42;
527 	int fd;
528 	ssize_t size0 = strlen(CONTENTS0) - 1;
529 	ssize_t size1 = strlen(CONTENTS1) + 1;
530 	ssize_t totalsize = size0 + size1;
531 	struct iovec iov[2];
532 
533 	expect_lookup(RELPATH, ino, 0);
534 	expect_open(ino, FOPEN_DIRECT_IO, 1);
535 	expect_write(ino, 0, totalsize, size0, EXPECTED0);
536 
537 	fd = open(FULLPATH, O_WRONLY);
538 	ASSERT_LE(0, fd) << strerror(errno);
539 
540 	iov[0].iov_base = __DECONST(void*, CONTENTS0);
541 	iov[0].iov_len = strlen(CONTENTS0);
542 	iov[1].iov_base = __DECONST(void*, CONTENTS1);
543 	iov[1].iov_len = strlen(CONTENTS1);
544 	ASSERT_EQ(size0, writev(fd, iov, 2)) << strerror(errno);
545 	leak(fd);
546 }
547 
548 /* fusefs should respect RLIMIT_FSIZE */
549 TEST_P(WriteRlimitFsize, rlimit_fsize)
550 {
551 	const char FULLPATH[] = "mountpoint/some_file.txt";
552 	const char RELPATH[] = "some_file.txt";
553 	const char *CONTENTS = "abcdefgh";
554 	struct rlimit rl;
555 	ssize_t bufsize = strlen(CONTENTS);
556 	off_t offset = 1'000'000'000;
557 	uint64_t ino = 42;
558 	int fd, oflag;
559 
560 	oflag = GetParam();
561 
562 	expect_lookup(RELPATH, ino, 0);
563 	expect_open(ino, 0, 1);
564 
565 	rl.rlim_cur = offset;
566 	rl.rlim_max = m_initial_limit.rlim_max;
567 	ASSERT_EQ(0, setrlimit(RLIMIT_FSIZE, &rl)) << strerror(errno);
568 	ASSERT_NE(SIG_ERR, signal(SIGXFSZ, sigxfsz_handler)) << strerror(errno);
569 
570 	fd = open(FULLPATH, O_WRONLY | oflag);
571 
572 	ASSERT_LE(0, fd) << strerror(errno);
573 
574 	ASSERT_EQ(-1, pwrite(fd, CONTENTS, bufsize, offset));
575 	EXPECT_EQ(EFBIG, errno);
576 	EXPECT_EQ(1, s_sigxfsz);
577 	leak(fd);
578 }
579 
580 /*
581  * When crossing the RLIMIT_FSIZE boundary, writes should be truncated, not
582  * aborted.
583  * https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=164793
584  */
585 TEST_P(WriteRlimitFsize, rlimit_fsize_truncate)
586 {
587 	const char FULLPATH[] = "mountpoint/some_file.txt";
588 	const char RELPATH[] = "some_file.txt";
589 	const char *CONTENTS = "abcdefghijklmnopqrstuvwxyz";
590 	struct rlimit rl;
591 	ssize_t bufsize = strlen(CONTENTS);
592 	uint64_t ino = 42;
593 	off_t offset = 1 << 30;
594 	off_t limit = offset + strlen(CONTENTS) / 2;
595 	int fd, oflag;
596 
597 	oflag = GetParam();
598 
599 	expect_lookup(RELPATH, ino, 0);
600 	expect_open(ino, 0, 1);
601 	expect_write(ino, offset, bufsize / 2, bufsize / 2, CONTENTS);
602 
603 	rl.rlim_cur = limit;
604 	rl.rlim_max = m_initial_limit.rlim_max;
605 	ASSERT_EQ(0, setrlimit(RLIMIT_FSIZE, &rl)) << strerror(errno);
606 	ASSERT_NE(SIG_ERR, signal(SIGXFSZ, sigxfsz_handler)) << strerror(errno);
607 
608 	fd = open(FULLPATH, O_WRONLY | oflag);
609 
610 	ASSERT_LE(0, fd) << strerror(errno);
611 
612 	ASSERT_EQ(bufsize / 2, pwrite(fd, CONTENTS, bufsize, offset))
613 		<< strerror(errno);
614 	leak(fd);
615 }
616 
617 INSTANTIATE_TEST_CASE_P(W, WriteRlimitFsize,
618 	Values(0, O_DIRECT)
619 );
620 
621 /*
622  * A short read indicates EOF.  Test that nothing bad happens if we get EOF
623  * during the R of a RMW operation.
624  */
625 TEST_F(Write, eof_during_rmw)
626 {
627 	const char FULLPATH[] = "mountpoint/some_file.txt";
628 	const char RELPATH[] = "some_file.txt";
629 	const char *CONTENTS = "abcdefgh";
630 	const char *INITIAL   = "XXXXXXXXXX";
631 	uint64_t ino = 42;
632 	uint64_t offset = 1;
633 	ssize_t bufsize = strlen(CONTENTS) + 1;
634 	off_t orig_fsize = 10;
635 	off_t truncated_fsize = 5;
636 	int fd;
637 
638 	FuseTest::expect_lookup(RELPATH, ino, S_IFREG | 0644, orig_fsize, 1);
639 	expect_open(ino, 0, 1);
640 	expect_read(ino, 0, orig_fsize, truncated_fsize, INITIAL, O_RDWR);
641 	maybe_expect_write(ino, offset, bufsize, CONTENTS);
642 
643 	fd = open(FULLPATH, O_RDWR);
644 	ASSERT_LE(0, fd) << strerror(errno);
645 
646 	ASSERT_EQ(bufsize, pwrite(fd, CONTENTS, bufsize, offset))
647 		<< strerror(errno);
648 	leak(fd);
649 }
650 
651 /*
652  * VOP_STRATEGY should not query the server for the file's size, even if its
653  * cached attributes have expired.
654  * Regression test for https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=256937
655  */
656 TEST_P(WriteEofDuringVnopStrategy, eof_during_vop_strategy)
657 {
658 	const char FULLPATH[] = "mountpoint/some_file.txt";
659 	const char RELPATH[] = "some_file.txt";
660 	Sequence seq;
661 	const off_t filesize = 2 * m_maxbcachebuf;
662 	void *contents;
663 	uint64_t ino = 42;
664 	uint64_t attr_valid = 0;
665 	uint64_t attr_valid_nsec = 0;
666 	mode_t mode = S_IFREG | 0644;
667 	int fd;
668 	int ngetattrs;
669 
670 	ngetattrs = GetParam();
671 	contents = calloc(1, filesize);
672 
673 	EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH)
674 	.WillRepeatedly(Invoke(
675 		ReturnImmediate([=](auto in __unused, auto& out) {
676 		SET_OUT_HEADER_LEN(out, entry);
677 		out.body.entry.attr.mode = mode;
678 		out.body.entry.nodeid = ino;
679 		out.body.entry.attr.nlink = 1;
680 		out.body.entry.attr.size = filesize;
681 		out.body.entry.attr_valid = attr_valid;
682 		out.body.entry.attr_valid_nsec = attr_valid_nsec;
683 	})));
684 	expect_open(ino, 0, 1);
685 	EXPECT_CALL(*m_mock, process(
686 		ResultOf([=](auto in) {
687 			return (in.header.opcode == FUSE_GETATTR &&
688 				in.header.nodeid == ino);
689 		}, Eq(true)),
690 		_)
691 	).Times(Between(ngetattrs - 1, ngetattrs))
692 	.InSequence(seq)
693 	.WillRepeatedly(Invoke(ReturnImmediate([=](auto i __unused, auto& out) {
694 		SET_OUT_HEADER_LEN(out, attr);
695 		out.body.attr.attr.ino = ino;
696 		out.body.attr.attr.mode = mode;
697 		out.body.attr.attr_valid = attr_valid;
698 		out.body.attr.attr_valid_nsec = attr_valid_nsec;
699 		out.body.attr.attr.size = filesize;
700 	})));
701 	EXPECT_CALL(*m_mock, process(
702 		ResultOf([=](auto in) {
703 			return (in.header.opcode == FUSE_GETATTR &&
704 				in.header.nodeid == ino);
705 		}, Eq(true)),
706 		_)
707 	).InSequence(seq)
708 	.WillRepeatedly(Invoke(ReturnImmediate([=](auto i __unused, auto& out) {
709 		SET_OUT_HEADER_LEN(out, attr);
710 		out.body.attr.attr.ino = ino;
711 		out.body.attr.attr.mode = mode;
712 		out.body.attr.attr_valid = attr_valid;
713 		out.body.attr.attr_valid_nsec = attr_valid_nsec;
714 		out.body.attr.attr.size = filesize / 2;
715 	})));
716 	expect_write(ino, 0, filesize / 2, filesize / 2, contents);
717 
718 	fd = open(FULLPATH, O_RDWR);
719 	ASSERT_LE(0, fd) << strerror(errno);
720 	ASSERT_EQ(filesize / 2, write(fd, contents, filesize / 2))
721 		<< strerror(errno);
722 
723 }
724 
725 INSTANTIATE_TEST_CASE_P(W, WriteEofDuringVnopStrategy,
726 	Values(1, 2, 3)
727 );
728 
729 /*
730  * If the kernel cannot be sure which uid, gid, or pid was responsible for a
731  * write, then it must set the FUSE_WRITE_CACHE bit
732  */
733 /* https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=236378 */
734 TEST_F(Write, mmap)
735 {
736 	const char FULLPATH[] = "mountpoint/some_file.txt";
737 	const char RELPATH[] = "some_file.txt";
738 	const char *CONTENTS = "abcdefgh";
739 	uint64_t ino = 42;
740 	int fd;
741 	ssize_t bufsize = strlen(CONTENTS);
742 	void *p;
743 	uint64_t offset = 10;
744 	size_t len;
745 	void *zeros, *expected;
746 
747 	len = getpagesize();
748 
749 	zeros = calloc(1, len);
750 	ASSERT_NE(nullptr, zeros);
751 	expected = calloc(1, len);
752 	ASSERT_NE(nullptr, expected);
753 	memmove((uint8_t*)expected + offset, CONTENTS, bufsize);
754 
755 	expect_lookup(RELPATH, ino, len);
756 	expect_open(ino, 0, 1);
757 	expect_read(ino, 0, len, len, zeros);
758 	/*
759 	 * Writes from the pager may or may not be associated with the correct
760 	 * pid, so they must set FUSE_WRITE_CACHE.
761 	 */
762 	FuseTest::expect_write(ino, 0, len, len, FUSE_WRITE_CACHE, 0, expected);
763 	expect_flush(ino, 1, ReturnErrno(0));
764 	expect_release(ino, ReturnErrno(0));
765 
766 	fd = open(FULLPATH, O_RDWR);
767 	ASSERT_LE(0, fd) << strerror(errno);
768 
769 	p = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
770 	ASSERT_NE(MAP_FAILED, p) << strerror(errno);
771 
772 	memmove((uint8_t*)p + offset, CONTENTS, bufsize);
773 
774 	ASSERT_EQ(0, munmap(p, len)) << strerror(errno);
775 	close(fd);	// Write mmap'd data on close
776 
777 	free(expected);
778 	free(zeros);
779 
780 	leak(fd);
781 }
782 
783 TEST_F(Write, pwrite)
784 {
785 	const char FULLPATH[] = "mountpoint/some_file.txt";
786 	const char RELPATH[] = "some_file.txt";
787 	const char *CONTENTS = "abcdefgh";
788 	uint64_t ino = 42;
789 	uint64_t offset = m_maxbcachebuf;
790 	int fd;
791 	ssize_t bufsize = strlen(CONTENTS);
792 
793 	expect_lookup(RELPATH, ino, 0);
794 	expect_open(ino, 0, 1);
795 	expect_write(ino, offset, bufsize, bufsize, CONTENTS);
796 
797 	fd = open(FULLPATH, O_WRONLY);
798 	ASSERT_LE(0, fd) << strerror(errno);
799 
800 	ASSERT_EQ(bufsize, pwrite(fd, CONTENTS, bufsize, offset))
801 		<< strerror(errno);
802 	leak(fd);
803 }
804 
805 /* Writing a file should update its cached mtime and ctime */
806 TEST_F(Write, timestamps)
807 {
808 	const char FULLPATH[] = "mountpoint/some_file.txt";
809 	const char RELPATH[] = "some_file.txt";
810 	const char *CONTENTS = "abcdefgh";
811 	ssize_t bufsize = strlen(CONTENTS);
812 	uint64_t ino = 42;
813 	struct stat sb0, sb1;
814 	int fd;
815 
816 	expect_lookup(RELPATH, ino, 0);
817 	expect_open(ino, 0, 1);
818 	maybe_expect_write(ino, 0, bufsize, CONTENTS);
819 
820 	fd = open(FULLPATH, O_RDWR);
821 	ASSERT_LE(0, fd) << strerror(errno);
822 	ASSERT_EQ(0, fstat(fd, &sb0)) << strerror(errno);
823 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
824 
825 	nap();
826 
827 	ASSERT_EQ(0, fstat(fd, &sb1)) << strerror(errno);
828 
829 	EXPECT_EQ(sb0.st_atime, sb1.st_atime);
830 	EXPECT_NE(sb0.st_mtime, sb1.st_mtime);
831 	EXPECT_NE(sb0.st_ctime, sb1.st_ctime);
832 
833 	leak(fd);
834 }
835 
836 TEST_F(Write, write)
837 {
838 	const char FULLPATH[] = "mountpoint/some_file.txt";
839 	const char RELPATH[] = "some_file.txt";
840 	const char *CONTENTS = "abcdefgh";
841 	uint64_t ino = 42;
842 	int fd;
843 	ssize_t bufsize = strlen(CONTENTS);
844 
845 	expect_lookup(RELPATH, ino, 0);
846 	expect_open(ino, 0, 1);
847 	expect_write(ino, 0, bufsize, bufsize, CONTENTS);
848 
849 	fd = open(FULLPATH, O_WRONLY);
850 	ASSERT_LE(0, fd) << strerror(errno);
851 
852 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
853 	leak(fd);
854 }
855 
856 /* fuse(4) should not issue writes of greater size than the daemon requests */
857 TEST_F(WriteMaxWrite, write)
858 {
859 	const char FULLPATH[] = "mountpoint/some_file.txt";
860 	const char RELPATH[] = "some_file.txt";
861 	int *contents;
862 	uint64_t ino = 42;
863 	int fd;
864 	ssize_t halfbufsize, bufsize;
865 
866 	halfbufsize = m_mock->m_maxwrite;
867 	if (halfbufsize >= m_maxbcachebuf || halfbufsize >= m_maxphys)
868 		GTEST_SKIP() << "Must lower m_maxwrite for this test";
869 	bufsize = halfbufsize * 2;
870 	contents = (int*)malloc(bufsize);
871 	ASSERT_NE(nullptr, contents);
872 	for (int i = 0; i < (int)bufsize / (int)sizeof(i); i++) {
873 		contents[i] = i;
874 	}
875 
876 	expect_lookup(RELPATH, ino, 0);
877 	expect_open(ino, 0, 1);
878 	maybe_expect_write(ino, 0, halfbufsize, contents);
879 	maybe_expect_write(ino, halfbufsize, halfbufsize,
880 		&contents[halfbufsize / sizeof(int)]);
881 
882 	fd = open(FULLPATH, O_WRONLY);
883 	ASSERT_LE(0, fd) << strerror(errno);
884 
885 	ASSERT_EQ(bufsize, write(fd, contents, bufsize)) << strerror(errno);
886 	leak(fd);
887 
888 	free(contents);
889 }
890 
891 TEST_F(Write, write_nothing)
892 {
893 	const char FULLPATH[] = "mountpoint/some_file.txt";
894 	const char RELPATH[] = "some_file.txt";
895 	const char *CONTENTS = "";
896 	uint64_t ino = 42;
897 	int fd;
898 	ssize_t bufsize = 0;
899 
900 	expect_lookup(RELPATH, ino, 0);
901 	expect_open(ino, 0, 1);
902 
903 	fd = open(FULLPATH, O_WRONLY);
904 	ASSERT_LE(0, fd) << strerror(errno);
905 
906 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
907 	leak(fd);
908 }
909 
910 TEST_F(Write_7_8, write)
911 {
912 	const char FULLPATH[] = "mountpoint/some_file.txt";
913 	const char RELPATH[] = "some_file.txt";
914 	const char *CONTENTS = "abcdefgh";
915 	uint64_t ino = 42;
916 	int fd;
917 	ssize_t bufsize = strlen(CONTENTS);
918 
919 	expect_lookup(RELPATH, ino, 0);
920 	expect_open(ino, 0, 1);
921 	expect_write_7_8(ino, 0, bufsize, bufsize, CONTENTS);
922 
923 	fd = open(FULLPATH, O_WRONLY);
924 	ASSERT_LE(0, fd) << strerror(errno);
925 
926 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
927 	leak(fd);
928 }
929 
930 /* In writeback mode, dirty data should be written on close */
931 TEST_F(WriteBackAsync, close)
932 {
933 	const char FULLPATH[] = "mountpoint/some_file.txt";
934 	const char RELPATH[] = "some_file.txt";
935 	const char *CONTENTS = "abcdefgh";
936 	uint64_t ino = 42;
937 	int fd;
938 	ssize_t bufsize = strlen(CONTENTS);
939 
940 	expect_lookup(RELPATH, ino, 0);
941 	expect_open(ino, 0, 1);
942 	expect_write(ino, 0, bufsize, bufsize, CONTENTS);
943 	EXPECT_CALL(*m_mock, process(
944 		ResultOf([=](auto in) {
945 			return (in.header.opcode == FUSE_SETATTR);
946 		}, Eq(true)),
947 		_)
948 	).WillRepeatedly(Invoke(ReturnImmediate([=](auto i __unused, auto& out) {
949 		SET_OUT_HEADER_LEN(out, attr);
950 		out.body.attr.attr.ino = ino;	// Must match nodeid
951 	})));
952 	expect_flush(ino, 1, ReturnErrno(0));
953 	expect_release(ino, ReturnErrno(0));
954 
955 	fd = open(FULLPATH, O_RDWR);
956 	ASSERT_LE(0, fd) << strerror(errno);
957 
958 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
959 	close(fd);
960 }
961 
962 /* In writeback mode, adjacent writes will be clustered together */
963 TEST_F(WriteCluster, clustering)
964 {
965 	const char FULLPATH[] = "mountpoint/some_file.txt";
966 	const char RELPATH[] = "some_file.txt";
967 	uint64_t ino = 42;
968 	int i, fd;
969 	void *wbuf, *wbuf2x;
970 	ssize_t bufsize = m_maxbcachebuf;
971 	off_t filesize = 5 * bufsize;
972 
973 	wbuf = malloc(bufsize);
974 	ASSERT_NE(nullptr, wbuf) << strerror(errno);
975 	memset(wbuf, 'X', bufsize);
976 	wbuf2x = malloc(2 * bufsize);
977 	ASSERT_NE(nullptr, wbuf2x) << strerror(errno);
978 	memset(wbuf2x, 'X', 2 * bufsize);
979 
980 	expect_lookup(RELPATH, ino, filesize);
981 	expect_open(ino, 0, 1);
982 	/*
983 	 * Writes of bufsize-bytes each should be clustered into greater sizes.
984 	 * The amount of clustering is adaptive, so the first write actually
985 	 * issued will be 2x bufsize and subsequent writes may be larger
986 	 */
987 	expect_write(ino, 0, 2 * bufsize, 2 * bufsize, wbuf2x);
988 	expect_write(ino, 2 * bufsize, 2 * bufsize, 2 * bufsize, wbuf2x);
989 	expect_flush(ino, 1, ReturnErrno(0));
990 	expect_release(ino, ReturnErrno(0));
991 
992 	fd = open(FULLPATH, O_RDWR);
993 	ASSERT_LE(0, fd) << strerror(errno);
994 
995 	for (i = 0; i < 4; i++) {
996 		ASSERT_EQ(bufsize, write(fd, wbuf, bufsize))
997 			<< strerror(errno);
998 	}
999 	close(fd);
1000 	free(wbuf2x);
1001 	free(wbuf);
1002 }
1003 
1004 /*
1005  * When clustering writes, an I/O error to any of the cluster's children should
1006  * not panic the system on unmount
1007  */
1008 /*
1009  * Regression test for bug 238585
1010  * https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=238565
1011  */
1012 TEST_F(WriteCluster, cluster_write_err)
1013 {
1014 	const char FULLPATH[] = "mountpoint/some_file.txt";
1015 	const char RELPATH[] = "some_file.txt";
1016 	uint64_t ino = 42;
1017 	int i, fd;
1018 	void *wbuf;
1019 	ssize_t bufsize = m_maxbcachebuf;
1020 	off_t filesize = 4 * bufsize;
1021 
1022 	wbuf = malloc(bufsize);
1023 	ASSERT_NE(nullptr, wbuf) << strerror(errno);
1024 	memset(wbuf, 'X', bufsize);
1025 
1026 	expect_lookup(RELPATH, ino, filesize);
1027 	expect_open(ino, 0, 1);
1028 	EXPECT_CALL(*m_mock, process(
1029 		ResultOf([=](auto in) {
1030 			return (in.header.opcode == FUSE_WRITE);
1031 		}, Eq(true)),
1032 		_)
1033 	).WillRepeatedly(Invoke(ReturnErrno(EIO)));
1034 	expect_flush(ino, 1, ReturnErrno(0));
1035 	expect_release(ino, ReturnErrno(0));
1036 
1037 	fd = open(FULLPATH, O_RDWR);
1038 	ASSERT_LE(0, fd) << strerror(errno);
1039 
1040 	for (i = 0; i < 3; i++) {
1041 		ASSERT_EQ(bufsize, write(fd, wbuf, bufsize))
1042 			<< strerror(errno);
1043 	}
1044 	close(fd);
1045 	free(wbuf);
1046 }
1047 
1048 /*
1049  * In writeback mode, writes to an O_WRONLY file could trigger reads from the
1050  * server.  The FUSE protocol explicitly allows that.
1051  */
1052 TEST_F(WriteBack, rmw)
1053 {
1054 	const char FULLPATH[] = "mountpoint/some_file.txt";
1055 	const char RELPATH[] = "some_file.txt";
1056 	const char *CONTENTS = "abcdefgh";
1057 	const char *INITIAL   = "XXXXXXXXXX";
1058 	uint64_t ino = 42;
1059 	uint64_t offset = 1;
1060 	off_t fsize = 10;
1061 	int fd;
1062 	ssize_t bufsize = strlen(CONTENTS);
1063 
1064 	FuseTest::expect_lookup(RELPATH, ino, S_IFREG | 0644, fsize, 1);
1065 	expect_open(ino, 0, 1);
1066 	expect_read(ino, 0, fsize, fsize, INITIAL, O_WRONLY);
1067 	maybe_expect_write(ino, offset, bufsize, CONTENTS);
1068 
1069 	fd = open(FULLPATH, O_WRONLY);
1070 	ASSERT_LE(0, fd) << strerror(errno);
1071 
1072 	ASSERT_EQ(bufsize, pwrite(fd, CONTENTS, bufsize, offset))
1073 		<< strerror(errno);
1074 	leak(fd);
1075 }
1076 
1077 /*
1078  * Without direct_io, writes should be committed to cache
1079  */
1080 TEST_F(WriteBack, cache)
1081 {
1082 	const char FULLPATH[] = "mountpoint/some_file.txt";
1083 	const char RELPATH[] = "some_file.txt";
1084 	const char *CONTENTS = "abcdefgh";
1085 	uint64_t ino = 42;
1086 	int fd;
1087 	ssize_t bufsize = strlen(CONTENTS);
1088 	uint8_t readbuf[bufsize];
1089 
1090 	expect_lookup(RELPATH, ino, 0);
1091 	expect_open(ino, 0, 1);
1092 	expect_write(ino, 0, bufsize, bufsize, CONTENTS);
1093 
1094 	fd = open(FULLPATH, O_RDWR);
1095 	ASSERT_LE(0, fd) << strerror(errno);
1096 
1097 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1098 	/*
1099 	 * A subsequent read should be serviced by cache, without querying the
1100 	 * filesystem daemon
1101 	 */
1102 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
1103 	ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno);
1104 	leak(fd);
1105 }
1106 
1107 /*
1108  * With O_DIRECT, writes should be not committed to cache.  Admittedly this is
1109  * an odd test, because it would be unusual to use O_DIRECT for writes but not
1110  * reads.
1111  */
1112 TEST_F(WriteBack, o_direct)
1113 {
1114 	const char FULLPATH[] = "mountpoint/some_file.txt";
1115 	const char RELPATH[] = "some_file.txt";
1116 	const char *CONTENTS = "abcdefgh";
1117 	uint64_t ino = 42;
1118 	int fd;
1119 	ssize_t bufsize = strlen(CONTENTS);
1120 	uint8_t readbuf[bufsize];
1121 
1122 	expect_lookup(RELPATH, ino, 0);
1123 	expect_open(ino, 0, 1);
1124 	FuseTest::expect_write(ino, 0, bufsize, bufsize, 0, FUSE_WRITE_CACHE,
1125 		CONTENTS);
1126 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
1127 
1128 	fd = open(FULLPATH, O_RDWR | O_DIRECT);
1129 	ASSERT_LE(0, fd) << strerror(errno);
1130 
1131 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1132 	/* A subsequent read must query the daemon because cache is empty */
1133 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
1134 	ASSERT_EQ(0, fcntl(fd, F_SETFL, 0)) << strerror(errno);
1135 	ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno);
1136 	leak(fd);
1137 }
1138 
1139 TEST_F(WriteBack, direct_io)
1140 {
1141 	const char FULLPATH[] = "mountpoint/some_file.txt";
1142 	const char RELPATH[] = "some_file.txt";
1143 	const char *CONTENTS = "abcdefgh";
1144 	uint64_t ino = 42;
1145 	int fd;
1146 	ssize_t bufsize = strlen(CONTENTS);
1147 	uint8_t readbuf[bufsize];
1148 
1149 	expect_lookup(RELPATH, ino, 0);
1150 	expect_open(ino, FOPEN_DIRECT_IO, 1);
1151 	FuseTest::expect_write(ino, 0, bufsize, bufsize, 0, FUSE_WRITE_CACHE,
1152 		CONTENTS);
1153 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
1154 
1155 	fd = open(FULLPATH, O_RDWR);
1156 	ASSERT_LE(0, fd) << strerror(errno);
1157 
1158 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1159 	/* A subsequent read must query the daemon because cache is empty */
1160 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
1161 	ASSERT_EQ(0, fcntl(fd, F_SETFL, 0)) << strerror(errno);
1162 	ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno);
1163 	leak(fd);
1164 }
1165 
1166 /*
1167  * mmap should still be possible even if the server used direct_io.  Mmap will
1168  * still use the cache, though.
1169  *
1170  * Regression test for bug 247276
1171  * https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=247276
1172  */
1173 TEST_F(WriteBack, mmap_direct_io)
1174 {
1175 	const char FULLPATH[] = "mountpoint/some_file.txt";
1176 	const char RELPATH[] = "some_file.txt";
1177 	const char *CONTENTS = "abcdefgh";
1178 	uint64_t ino = 42;
1179 	int fd;
1180 	size_t len;
1181 	ssize_t bufsize = strlen(CONTENTS);
1182 	void *p, *zeros;
1183 
1184 	len = getpagesize();
1185 	zeros = calloc(1, len);
1186 	ASSERT_NE(nullptr, zeros);
1187 
1188 	expect_lookup(RELPATH, ino, len);
1189 	expect_open(ino, FOPEN_DIRECT_IO, 1);
1190 	expect_read(ino, 0, len, len, zeros);
1191 	expect_flush(ino, 1, ReturnErrno(0));
1192 	FuseTest::expect_write(ino, 0, len, len, FUSE_WRITE_CACHE, 0, zeros);
1193 	expect_release(ino, ReturnErrno(0));
1194 
1195 	fd = open(FULLPATH, O_RDWR);
1196 	ASSERT_LE(0, fd) << strerror(errno);
1197 
1198 	p = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
1199 	ASSERT_NE(MAP_FAILED, p) << strerror(errno);
1200 
1201 	memmove((uint8_t*)p, CONTENTS, bufsize);
1202 
1203 	ASSERT_EQ(0, munmap(p, len)) << strerror(errno);
1204 	close(fd);	// Write mmap'd data on close
1205 
1206 	free(zeros);
1207 }
1208 
1209 /*
1210  * When mounted with -o async, the writeback cache mode should delay writes
1211  */
1212 TEST_F(WriteBackAsync, delay)
1213 {
1214 	const char FULLPATH[] = "mountpoint/some_file.txt";
1215 	const char RELPATH[] = "some_file.txt";
1216 	const char *CONTENTS = "abcdefgh";
1217 	uint64_t ino = 42;
1218 	int fd;
1219 	ssize_t bufsize = strlen(CONTENTS);
1220 
1221 	expect_lookup(RELPATH, ino, 0);
1222 	expect_open(ino, 0, 1);
1223 	/* Write should be cached, but FUSE_WRITE shouldn't be sent */
1224 	EXPECT_CALL(*m_mock, process(
1225 		ResultOf([=](auto in) {
1226 			return (in.header.opcode == FUSE_WRITE);
1227 		}, Eq(true)),
1228 		_)
1229 	).Times(0);
1230 
1231 	fd = open(FULLPATH, O_RDWR);
1232 	ASSERT_LE(0, fd) << strerror(errno);
1233 
1234 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1235 
1236 	/* Don't close the file because that would flush the cache */
1237 	leak(fd);
1238 }
1239 
1240 /*
1241  * A direct write should not evict dirty cached data from outside of its own
1242  * byte range.
1243  */
1244 TEST_F(WriteBackAsync, direct_io_ignores_unrelated_cached)
1245 {
1246 	const char FULLPATH[] = "mountpoint/some_file.txt";
1247 	const char RELPATH[] = "some_file.txt";
1248 	const char CONTENTS0[] = "abcdefgh";
1249 	const char CONTENTS1[] = "ijklmnop";
1250 	uint64_t ino = 42;
1251 	int fd;
1252 	ssize_t bufsize = strlen(CONTENTS0) + 1;
1253 	ssize_t fsize = 2 * m_maxbcachebuf;
1254 	char readbuf[bufsize];
1255 	void *zeros;
1256 
1257 	zeros = calloc(1, m_maxbcachebuf);
1258 	ASSERT_NE(nullptr, zeros);
1259 
1260 	expect_lookup(RELPATH, ino, fsize);
1261 	expect_open(ino, 0, 1);
1262 	expect_read(ino, 0, m_maxbcachebuf, m_maxbcachebuf, zeros);
1263 	FuseTest::expect_write(ino, m_maxbcachebuf, bufsize, bufsize, 0, 0,
1264 		CONTENTS1);
1265 
1266 	fd = open(FULLPATH, O_RDWR);
1267 	ASSERT_LE(0, fd) << strerror(errno);
1268 
1269 	// Cache first block with dirty data.  This will entail first reading
1270 	// the existing data.
1271 	ASSERT_EQ(bufsize, pwrite(fd, CONTENTS0, bufsize, 0))
1272 		<< strerror(errno);
1273 
1274 	// Write directly to second block
1275 	ASSERT_EQ(0, fcntl(fd, F_SETFL, O_DIRECT)) << strerror(errno);
1276 	ASSERT_EQ(bufsize, pwrite(fd, CONTENTS1, bufsize, m_maxbcachebuf))
1277 		<< strerror(errno);
1278 
1279 	// Read from the first block again.  Should be serviced by cache.
1280 	ASSERT_EQ(0, fcntl(fd, F_SETFL, 0)) << strerror(errno);
1281 	ASSERT_EQ(bufsize, pread(fd, readbuf, bufsize, 0)) << strerror(errno);
1282 	ASSERT_STREQ(readbuf, CONTENTS0);
1283 
1284 	leak(fd);
1285 	free(zeros);
1286 }
1287 
1288 /*
1289  * If a direct io write partially overlaps one or two blocks of dirty cached
1290  * data, No dirty data should be lost.  Admittedly this is a weird test,
1291  * because it would be unusual to use O_DIRECT and the writeback cache.
1292  */
1293 TEST_F(WriteBackAsync, direct_io_partially_overlaps_cached_block)
1294 {
1295 	const char FULLPATH[] = "mountpoint/some_file.txt";
1296 	const char RELPATH[] = "some_file.txt";
1297 	uint64_t ino = 42;
1298 	int fd;
1299 	off_t bs = m_maxbcachebuf;
1300 	ssize_t fsize = 3 * bs;
1301 	void *readbuf, *zeros, *ones, *zeroones, *onezeros;
1302 
1303 	readbuf = malloc(bs);
1304 	ASSERT_NE(nullptr, readbuf) << strerror(errno);
1305 	zeros = calloc(1, 3 * bs);
1306 	ASSERT_NE(nullptr, zeros);
1307 	ones = calloc(1, 2 * bs);
1308 	ASSERT_NE(nullptr, ones);
1309 	memset(ones, 1, 2 * bs);
1310 	zeroones = calloc(1, bs);
1311 	ASSERT_NE(nullptr, zeroones);
1312 	memset((uint8_t*)zeroones + bs / 2, 1, bs / 2);
1313 	onezeros = calloc(1, bs);
1314 	ASSERT_NE(nullptr, onezeros);
1315 	memset(onezeros, 1, bs / 2);
1316 
1317 	expect_lookup(RELPATH, ino, fsize);
1318 	expect_open(ino, 0, 1);
1319 
1320 	fd = open(FULLPATH, O_RDWR);
1321 	ASSERT_LE(0, fd) << strerror(errno);
1322 
1323 	/* Cache first and third blocks with dirty data.  */
1324 	ASSERT_EQ(3 * bs, pwrite(fd, zeros, 3 * bs, 0)) << strerror(errno);
1325 
1326 	/*
1327 	 * Write directly to all three blocks.  The partially written blocks
1328 	 * will be flushed because they're dirty.
1329 	 */
1330 	FuseTest::expect_write(ino, 0, bs, bs, 0, 0, zeros);
1331 	FuseTest::expect_write(ino, 2 * bs, bs, bs, 0, 0, zeros);
1332 	/* The direct write is split in two because of the m_maxwrite value */
1333 	FuseTest::expect_write(ino,     bs / 2, bs, bs, 0, 0, ones);
1334 	FuseTest::expect_write(ino, 3 * bs / 2, bs, bs, 0, 0, ones);
1335 	ASSERT_EQ(0, fcntl(fd, F_SETFL, O_DIRECT)) << strerror(errno);
1336 	ASSERT_EQ(2 * bs, pwrite(fd, ones, 2 * bs, bs / 2)) << strerror(errno);
1337 
1338 	/*
1339 	 * Read from both the valid and invalid portions of the first and third
1340 	 * blocks again.  This will entail FUSE_READ operations because these
1341 	 * blocks were invalidated by the direct write.
1342 	 */
1343 	expect_read(ino, 0, bs, bs, zeroones);
1344 	expect_read(ino, 2 * bs, bs, bs, onezeros);
1345 	ASSERT_EQ(0, fcntl(fd, F_SETFL, 0)) << strerror(errno);
1346 	ASSERT_EQ(bs / 2, pread(fd, readbuf, bs / 2, 0)) << strerror(errno);
1347 	EXPECT_EQ(0, memcmp(zeros, readbuf, bs / 2));
1348 	ASSERT_EQ(bs / 2, pread(fd, readbuf, bs / 2, 5 * bs / 2))
1349 		<< strerror(errno);
1350 	EXPECT_EQ(0, memcmp(zeros, readbuf, bs / 2));
1351 	ASSERT_EQ(bs / 2, pread(fd, readbuf, bs / 2, bs / 2))
1352 		<< strerror(errno);
1353 	EXPECT_EQ(0, memcmp(ones, readbuf, bs / 2));
1354 	ASSERT_EQ(bs / 2, pread(fd, readbuf, bs / 2, 2 * bs))
1355 		<< strerror(errno);
1356 	EXPECT_EQ(0, memcmp(ones, readbuf, bs / 2));
1357 
1358 	leak(fd);
1359 	free(zeroones);
1360 	free(onezeros);
1361 	free(ones);
1362 	free(zeros);
1363 	free(readbuf);
1364 }
1365 
1366 /*
1367  * In WriteBack mode, writes may be cached beyond what the server thinks is the
1368  * EOF.  In this case, a short read at EOF should _not_ cause fusefs to update
1369  * the file's size.
1370  */
1371 TEST_F(WriteBackAsync, eof)
1372 {
1373 	const char FULLPATH[] = "mountpoint/some_file.txt";
1374 	const char RELPATH[] = "some_file.txt";
1375 	const char *CONTENTS0 = "abcdefgh";
1376 	const char *CONTENTS1 = "ijklmnop";
1377 	uint64_t ino = 42;
1378 	int fd;
1379 	off_t offset = m_maxbcachebuf;
1380 	ssize_t wbufsize = strlen(CONTENTS1);
1381 	off_t old_filesize = (off_t)strlen(CONTENTS0);
1382 	ssize_t rbufsize = 2 * old_filesize;
1383 	char readbuf[rbufsize];
1384 	size_t holesize = rbufsize - old_filesize;
1385 	char hole[holesize];
1386 	struct stat sb;
1387 	ssize_t r;
1388 
1389 	expect_lookup(RELPATH, ino, 0);
1390 	expect_open(ino, 0, 1);
1391 	expect_read(ino, 0, m_maxbcachebuf, old_filesize, CONTENTS0);
1392 
1393 	fd = open(FULLPATH, O_RDWR);
1394 	ASSERT_LE(0, fd) << strerror(errno);
1395 
1396 	/* Write and cache data beyond EOF */
1397 	ASSERT_EQ(wbufsize, pwrite(fd, CONTENTS1, wbufsize, offset))
1398 		<< strerror(errno);
1399 
1400 	/* Read from the old EOF */
1401 	r = pread(fd, readbuf, rbufsize, 0);
1402 	ASSERT_LE(0, r) << strerror(errno);
1403 	EXPECT_EQ(rbufsize, r) << "read should've synthesized a hole";
1404 	EXPECT_EQ(0, memcmp(CONTENTS0, readbuf, old_filesize));
1405 	bzero(hole, holesize);
1406 	EXPECT_EQ(0, memcmp(hole, readbuf + old_filesize, holesize));
1407 
1408 	/* The file's size should still be what was established by pwrite */
1409 	ASSERT_EQ(0, fstat(fd, &sb)) << strerror(errno);
1410 	EXPECT_EQ(offset + wbufsize, sb.st_size);
1411 	leak(fd);
1412 }
1413 
1414 /*
1415  * When a file has dirty writes that haven't been flushed, the server's notion
1416  * of its mtime and ctime will be wrong.  The kernel should ignore those if it
1417  * gets them from a FUSE_GETATTR before flushing.
1418  */
1419 TEST_F(WriteBackAsync, timestamps)
1420 {
1421 	const char FULLPATH[] = "mountpoint/some_file.txt";
1422 	const char RELPATH[] = "some_file.txt";
1423 	const char *CONTENTS = "abcdefgh";
1424 	ssize_t bufsize = strlen(CONTENTS);
1425 	uint64_t ino = 42;
1426 	uint64_t attr_valid = 0;
1427 	uint64_t attr_valid_nsec = 0;
1428 	uint64_t server_time = 12345;
1429 	mode_t mode = S_IFREG | 0644;
1430 	int fd;
1431 
1432 	struct stat sb;
1433 
1434 	EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH)
1435 	.WillRepeatedly(Invoke(
1436 		ReturnImmediate([=](auto in __unused, auto& out) {
1437 		SET_OUT_HEADER_LEN(out, entry);
1438 		out.body.entry.attr.mode = mode;
1439 		out.body.entry.nodeid = ino;
1440 		out.body.entry.attr.nlink = 1;
1441 		out.body.entry.attr_valid = attr_valid;
1442 		out.body.entry.attr_valid_nsec = attr_valid_nsec;
1443 	})));
1444 	expect_open(ino, 0, 1);
1445 	EXPECT_CALL(*m_mock, process(
1446 		ResultOf([=](auto in) {
1447 			return (in.header.opcode == FUSE_GETATTR &&
1448 				in.header.nodeid == ino);
1449 		}, Eq(true)),
1450 		_)
1451 	).WillRepeatedly(Invoke(
1452 	ReturnImmediate([=](auto i __unused, auto& out) {
1453 		SET_OUT_HEADER_LEN(out, attr);
1454 		out.body.attr.attr.ino = ino;
1455 		out.body.attr.attr.mode = mode;
1456 		out.body.attr.attr_valid = attr_valid;
1457 		out.body.attr.attr_valid_nsec = attr_valid_nsec;
1458 		out.body.attr.attr.atime = server_time;
1459 		out.body.attr.attr.mtime = server_time;
1460 		out.body.attr.attr.ctime = server_time;
1461 	})));
1462 
1463 	fd = open(FULLPATH, O_RDWR);
1464 	ASSERT_LE(0, fd) << strerror(errno);
1465 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1466 
1467 	ASSERT_EQ(0, fstat(fd, &sb)) << strerror(errno);
1468 	EXPECT_EQ((time_t)server_time, sb.st_atime);
1469 	EXPECT_NE((time_t)server_time, sb.st_mtime);
1470 	EXPECT_NE((time_t)server_time, sb.st_ctime);
1471 
1472 	leak(fd);
1473 }
1474 
1475 /* Any dirty timestamp fields should be flushed during a SETATTR */
1476 TEST_F(WriteBackAsync, timestamps_during_setattr)
1477 {
1478 	const char FULLPATH[] = "mountpoint/some_file.txt";
1479 	const char RELPATH[] = "some_file.txt";
1480 	const char *CONTENTS = "abcdefgh";
1481 	ssize_t bufsize = strlen(CONTENTS);
1482 	uint64_t ino = 42;
1483 	const mode_t newmode = 0755;
1484 	int fd;
1485 
1486 	expect_lookup(RELPATH, ino, 0);
1487 	expect_open(ino, 0, 1);
1488 	EXPECT_CALL(*m_mock, process(
1489 		ResultOf([=](auto in) {
1490 			uint32_t valid = FATTR_MODE | FATTR_MTIME | FATTR_CTIME;
1491 			return (in.header.opcode == FUSE_SETATTR &&
1492 				in.header.nodeid == ino &&
1493 				in.body.setattr.valid == valid);
1494 		}, Eq(true)),
1495 		_)
1496 	).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
1497 		SET_OUT_HEADER_LEN(out, attr);
1498 		out.body.attr.attr.ino = ino;
1499 		out.body.attr.attr.mode = S_IFREG | newmode;
1500 	})));
1501 
1502 	fd = open(FULLPATH, O_RDWR);
1503 	ASSERT_LE(0, fd) << strerror(errno);
1504 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1505 	ASSERT_EQ(0, fchmod(fd, newmode)) << strerror(errno);
1506 
1507 	leak(fd);
1508 }
1509 
1510 /* fuse_init_out.time_gran controls the granularity of timestamps */
1511 TEST_P(TimeGran, timestamps_during_setattr)
1512 {
1513 	const char FULLPATH[] = "mountpoint/some_file.txt";
1514 	const char RELPATH[] = "some_file.txt";
1515 	const char *CONTENTS = "abcdefgh";
1516 	ssize_t bufsize = strlen(CONTENTS);
1517 	uint64_t ino = 42;
1518 	const mode_t newmode = 0755;
1519 	int fd;
1520 
1521 	expect_lookup(RELPATH, ino, 0);
1522 	expect_open(ino, 0, 1);
1523 	EXPECT_CALL(*m_mock, process(
1524 		ResultOf([=](auto in) {
1525 			uint32_t valid = FATTR_MODE | FATTR_MTIME | FATTR_CTIME;
1526 			return (in.header.opcode == FUSE_SETATTR &&
1527 				in.header.nodeid == ino &&
1528 				in.body.setattr.valid == valid &&
1529 				in.body.setattr.mtimensec % m_time_gran == 0 &&
1530 				in.body.setattr.ctimensec % m_time_gran == 0);
1531 		}, Eq(true)),
1532 		_)
1533 	).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
1534 		SET_OUT_HEADER_LEN(out, attr);
1535 		out.body.attr.attr.ino = ino;
1536 		out.body.attr.attr.mode = S_IFREG | newmode;
1537 	})));
1538 
1539 	fd = open(FULLPATH, O_RDWR);
1540 	ASSERT_LE(0, fd) << strerror(errno);
1541 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1542 	ASSERT_EQ(0, fchmod(fd, newmode)) << strerror(errno);
1543 
1544 	leak(fd);
1545 }
1546 
1547 INSTANTIATE_TEST_CASE_P(RA, TimeGran, Range(0u, 10u));
1548 
1549 /*
1550  * Without direct_io, writes should be committed to cache
1551  */
1552 TEST_F(Write, writethrough)
1553 {
1554 	const char FULLPATH[] = "mountpoint/some_file.txt";
1555 	const char RELPATH[] = "some_file.txt";
1556 	const char *CONTENTS = "abcdefgh";
1557 	uint64_t ino = 42;
1558 	int fd;
1559 	ssize_t bufsize = strlen(CONTENTS);
1560 	uint8_t readbuf[bufsize];
1561 
1562 	expect_lookup(RELPATH, ino, 0);
1563 	expect_open(ino, 0, 1);
1564 	expect_write(ino, 0, bufsize, bufsize, CONTENTS);
1565 
1566 	fd = open(FULLPATH, O_RDWR);
1567 	ASSERT_LE(0, fd) << strerror(errno);
1568 
1569 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1570 	/*
1571 	 * A subsequent read should be serviced by cache, without querying the
1572 	 * filesystem daemon
1573 	 */
1574 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
1575 	ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno);
1576 	leak(fd);
1577 }
1578 
1579 /* Writes that extend a file should update the cached file size */
1580 TEST_F(Write, update_file_size)
1581 {
1582 	const char FULLPATH[] = "mountpoint/some_file.txt";
1583 	const char RELPATH[] = "some_file.txt";
1584 	const char *CONTENTS = "abcdefgh";
1585 	struct stat sb;
1586 	uint64_t ino = 42;
1587 	int fd;
1588 	ssize_t bufsize = strlen(CONTENTS);
1589 
1590 	expect_lookup(RELPATH, ino, 0);
1591 	expect_open(ino, 0, 1);
1592 	expect_write(ino, 0, bufsize, bufsize, CONTENTS);
1593 
1594 	fd = open(FULLPATH, O_RDWR);
1595 	ASSERT_LE(0, fd) << strerror(errno);
1596 
1597 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1598 	/* Get cached attributes */
1599 	ASSERT_EQ(0, fstat(fd, &sb)) << strerror(errno);
1600 	ASSERT_EQ(bufsize, sb.st_size);
1601 	leak(fd);
1602 }
1603