xref: /freebsd/tests/sys/fs/fusefs/io.cc (revision 78ae60b4)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2019 The FreeBSD Foundation
5  *
6  * This software was developed by BFF Storage Systems, LLC under sponsorship
7  * from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 extern "C" {
32 #include <sys/types.h>
33 #include <sys/mman.h>
34 #include <sys/sysctl.h>
35 
36 #include <fcntl.h>
37 #include <stdlib.h>
38 #include <unistd.h>
39 }
40 
41 #include "mockfs.hh"
42 #include "utils.hh"
43 
44 /*
45  * For testing I/O like fsx does, but deterministically and without a real
46  * underlying file system
47  */
48 
49 using namespace testing;
50 
51 const char FULLPATH[] = "mountpoint/some_file.txt";
52 const char RELPATH[] = "some_file.txt";
53 const uint64_t ino = 42;
54 
55 static void compare(const void *tbuf, const void *controlbuf, off_t baseofs,
56 	ssize_t size)
57 {
58 	int i;
59 
60 	for (i = 0; i < size; i++) {
61 		if (((const char*)tbuf)[i] != ((const char*)controlbuf)[i]) {
62 			off_t ofs = baseofs + i;
63 			FAIL() << "miscompare at offset "
64 			       << std::hex
65 			       << std::showbase
66 			       << ofs
67 			       << ".  expected = "
68 			       << std::setw(2)
69 			       << (unsigned)((const uint8_t*)controlbuf)[i]
70 			       << " got = "
71 			       << (unsigned)((const uint8_t*)tbuf)[i];
72 		}
73 	}
74 }
75 
76 typedef tuple<bool, uint32_t, cache_mode, uint32_t> IoParam;
77 
78 class Io: public FuseTest, public WithParamInterface<IoParam> {
79 public:
80 int m_backing_fd, m_control_fd, m_test_fd;
81 off_t m_filesize;
82 bool m_direct_io;
83 
84 Io(): m_backing_fd(-1), m_control_fd(-1), m_test_fd(-1), m_filesize(0),
85 	m_direct_io(false) {};
86 
87 void SetUp()
88 {
89 	m_backing_fd = open("backing_file", O_RDWR | O_CREAT | O_TRUNC, 0644);
90 	if (m_backing_fd < 0)
91 		FAIL() << strerror(errno);
92 	m_control_fd = open("control", O_RDWR | O_CREAT | O_TRUNC, 0644);
93 	if (m_control_fd < 0)
94 		FAIL() << strerror(errno);
95 	srandom(22'9'1982);	// Seed with my birthday
96 
97 	if (get<0>(GetParam()))
98 		m_init_flags |= FUSE_ASYNC_READ;
99 	m_maxwrite = get<1>(GetParam());
100 	switch (get<2>(GetParam())) {
101 		case Uncached:
102 			m_direct_io = true;
103 			break;
104 		case WritebackAsync:
105 			m_async = true;
106 			/* FALLTHROUGH */
107 		case Writeback:
108 			m_init_flags |= FUSE_WRITEBACK_CACHE;
109 			/* FALLTHROUGH */
110 		case Writethrough:
111 			break;
112 		default:
113 			FAIL() << "Unknown cache mode";
114 	}
115 	m_kernel_minor_version = get<3>(GetParam());
116 	m_noatime = true;	// To prevent SETATTR for atime on close
117 
118 	FuseTest::SetUp();
119 	if (IsSkipped())
120 		return;
121 
122 	if (verbosity > 0) {
123 		printf("Test Parameters: init_flags=%#x maxwrite=%#x "
124 		    "%sasync cache=%s kernel_minor_version=%d\n",
125 		    m_init_flags, m_maxwrite, m_async? "" : "no",
126 		    cache_mode_to_s(get<2>(GetParam())),
127 		    m_kernel_minor_version);
128 	}
129 
130 	expect_lookup(RELPATH, ino, S_IFREG | 0644, 0, 1);
131 	expect_open(ino, m_direct_io ? FOPEN_DIRECT_IO : 0, 1);
132 	EXPECT_CALL(*m_mock, process(
133 		ResultOf([=](auto in) {
134 			return (in.header.opcode == FUSE_WRITE &&
135 				in.header.nodeid == ino);
136 		}, Eq(true)),
137 		_)
138 	).WillRepeatedly(Invoke(ReturnImmediate([=](auto in, auto& out) {
139 		const char *buf = (const char*)in.body.bytes +
140 			sizeof(struct fuse_write_in);
141 		ssize_t isize = in.body.write.size;
142 		off_t iofs = in.body.write.offset;
143 
144 		assert((size_t)isize <= sizeof(in.body.bytes) -
145 			sizeof(struct fuse_write_in));
146 		ASSERT_EQ(isize, pwrite(m_backing_fd, buf, isize, iofs))
147 			<< strerror(errno);
148 		SET_OUT_HEADER_LEN(out, write);
149 		out.body.write.size = isize;
150 	})));
151 	EXPECT_CALL(*m_mock, process(
152 		ResultOf([=](auto in) {
153 			return (in.header.opcode == FUSE_READ &&
154 				in.header.nodeid == ino);
155 		}, Eq(true)),
156 		_)
157 	).WillRepeatedly(Invoke(ReturnImmediate([=](auto in, auto& out) {
158 		ssize_t isize = in.body.write.size;
159 		off_t iofs = in.body.write.offset;
160 		void *buf = out.body.bytes;
161 		ssize_t osize;
162 
163 		assert((size_t)isize <= sizeof(out.body.bytes));
164 		osize = pread(m_backing_fd, buf, isize, iofs);
165 		ASSERT_LE(0, osize) << strerror(errno);
166 		out.header.len = sizeof(struct fuse_out_header) + osize;
167 	})));
168 	EXPECT_CALL(*m_mock, process(
169 		ResultOf([=](auto in) {
170 			return (in.header.opcode == FUSE_SETATTR &&
171 				in.header.nodeid == ino &&
172 				(in.body.setattr.valid & FATTR_SIZE));
173 
174 		}, Eq(true)),
175 		_)
176 	).WillRepeatedly(Invoke(ReturnImmediate([=](auto in, auto& out) {
177 		ASSERT_EQ(0, ftruncate(m_backing_fd, in.body.setattr.size))
178 			<< strerror(errno);
179 		SET_OUT_HEADER_LEN(out, attr);
180 		out.body.attr.attr.ino = ino;
181 		out.body.attr.attr.mode = S_IFREG | 0755;
182 		out.body.attr.attr.size = in.body.setattr.size;
183 		out.body.attr.attr_valid = UINT64_MAX;
184 	})));
185 	/* Any test that close()s will send FUSE_FLUSH and FUSE_RELEASE */
186 	EXPECT_CALL(*m_mock, process(
187 		ResultOf([=](auto in) {
188 			return (in.header.opcode == FUSE_FLUSH &&
189 				in.header.nodeid == ino);
190 		}, Eq(true)),
191 		_)
192 	).WillRepeatedly(Invoke(ReturnErrno(0)));
193 	EXPECT_CALL(*m_mock, process(
194 		ResultOf([=](auto in) {
195 			return (in.header.opcode == FUSE_RELEASE &&
196 				in.header.nodeid == ino);
197 		}, Eq(true)),
198 		_)
199 	).WillRepeatedly(Invoke(ReturnErrno(0)));
200 	EXPECT_CALL(*m_mock, process(
201 		ResultOf([=](auto in) {
202 			return (in.header.opcode == FUSE_COPY_FILE_RANGE &&
203 				in.header.nodeid == ino &&
204 				in.body.copy_file_range.nodeid_out == ino &&
205 				in.body.copy_file_range.flags == 0);
206 		}, Eq(true)),
207 		_)
208 	).WillRepeatedly(Invoke(ReturnImmediate([=](auto in, auto& out) {
209 		off_t off_in = in.body.copy_file_range.off_in;
210 		off_t off_out = in.body.copy_file_range.off_out;
211 		ASSERT_EQ((ssize_t)in.body.copy_file_range.len,
212 		    copy_file_range(m_backing_fd, &off_in, m_backing_fd,
213 			    &off_out, in.body.copy_file_range.len, 0));
214 		SET_OUT_HEADER_LEN(out, write);
215 		out.body.write.size = in.body.copy_file_range.len;
216 	})));
217 	/* Claim that we don't support FUSE_LSEEK */
218 	EXPECT_CALL(*m_mock, process(
219 		ResultOf([=](auto in) {
220 			return (in.header.opcode == FUSE_LSEEK);
221 		}, Eq(true)),
222 		_)
223 	).WillRepeatedly(Invoke(ReturnErrno(ENOSYS)));
224 
225 	m_test_fd = open(FULLPATH, O_RDWR );
226 	EXPECT_LE(0, m_test_fd) << strerror(errno);
227 }
228 
229 void TearDown()
230 {
231 	if (m_test_fd >= 0)
232 		close(m_test_fd);
233 	if (m_backing_fd >= 0)
234 		close(m_backing_fd);
235 	if (m_control_fd >= 0)
236 		close(m_control_fd);
237 	FuseTest::TearDown();
238 	leak(m_test_fd);
239 }
240 
241 void do_closeopen()
242 {
243 	ASSERT_EQ(0, close(m_test_fd)) << strerror(errno);
244 	m_test_fd = open("backing_file", O_RDWR);
245 	ASSERT_LE(0, m_test_fd) << strerror(errno);
246 
247 	ASSERT_EQ(0, close(m_control_fd)) << strerror(errno);
248 	m_control_fd = open("control", O_RDWR);
249 	ASSERT_LE(0, m_control_fd) << strerror(errno);
250 }
251 
252 void do_copy_file_range(off_t off_in, off_t off_out, size_t size)
253 {
254 	ssize_t r;
255 	off_t test_off_in = off_in;
256 	off_t test_off_out = off_out;
257 	off_t test_size = size;
258 	off_t control_off_in = off_in;
259 	off_t control_off_out = off_out;
260 	off_t control_size = size;
261 
262 	while (test_size > 0) {
263 		r = copy_file_range(m_test_fd, &test_off_in, m_test_fd,
264 				&test_off_out, test_size, 0);
265 		ASSERT_GT(r, 0) << strerror(errno);
266 		test_size -= r;
267 	}
268 	while (control_size > 0) {
269 		r = copy_file_range(m_control_fd, &control_off_in, m_control_fd,
270 				&control_off_out, control_size, 0);
271 		ASSERT_GT(r, 0) << strerror(errno);
272 		control_size -= r;
273 	}
274 	m_filesize = std::max(m_filesize, off_out + (off_t)size);
275 }
276 
277 void do_ftruncate(off_t offs)
278 {
279 	ASSERT_EQ(0, ftruncate(m_test_fd, offs)) << strerror(errno);
280 	ASSERT_EQ(0, ftruncate(m_control_fd, offs)) << strerror(errno);
281 	m_filesize = offs;
282 }
283 
284 void do_mapread(off_t offs, ssize_t size)
285 {
286 	void *control_buf, *p;
287 	off_t pg_offset, page_mask;
288 	size_t map_size;
289 
290 	page_mask = getpagesize() - 1;
291 	pg_offset = offs & page_mask;
292 	map_size = pg_offset + size;
293 
294 	p = mmap(NULL, map_size, PROT_READ, MAP_FILE | MAP_SHARED, m_test_fd,
295 	    offs - pg_offset);
296 	ASSERT_NE(p, MAP_FAILED) << strerror(errno);
297 
298 	control_buf = malloc(size);
299 	ASSERT_NE(nullptr, control_buf) << strerror(errno);
300 
301 	ASSERT_EQ(size, pread(m_control_fd, control_buf, size, offs))
302 		<< strerror(errno);
303 
304 	compare((void*)((char*)p + pg_offset), control_buf, offs, size);
305 
306 	ASSERT_EQ(0, munmap(p, map_size)) << strerror(errno);
307 	free(control_buf);
308 }
309 
310 void do_read(off_t offs, ssize_t size)
311 {
312 	void *test_buf, *control_buf;
313 	ssize_t r;
314 
315 	test_buf = malloc(size);
316 	ASSERT_NE(nullptr, test_buf) << strerror(errno);
317 	control_buf = malloc(size);
318 	ASSERT_NE(nullptr, control_buf) << strerror(errno);
319 
320 	errno = 0;
321 	r = pread(m_test_fd, test_buf, size, offs);
322 	ASSERT_NE(-1, r) << strerror(errno);
323 	ASSERT_EQ(size, r) << "unexpected short read";
324 	r = pread(m_control_fd, control_buf, size, offs);
325 	ASSERT_NE(-1, r) << strerror(errno);
326 	ASSERT_EQ(size, r) << "unexpected short read";
327 
328 	compare(test_buf, control_buf, offs, size);
329 
330 	free(control_buf);
331 	free(test_buf);
332 }
333 
334 void do_mapwrite(off_t offs, ssize_t size)
335 {
336 	char *buf;
337 	void *p;
338 	off_t pg_offset, page_mask;
339 	size_t map_size;
340 	long i;
341 
342 	page_mask = getpagesize() - 1;
343 	pg_offset = offs & page_mask;
344 	map_size = pg_offset + size;
345 
346 	buf = (char*)malloc(size);
347 	ASSERT_NE(nullptr, buf) << strerror(errno);
348 	for (i=0; i < size; i++)
349 		buf[i] = random();
350 
351 	if (offs + size > m_filesize) {
352 		/*
353 		 * Must manually extend.  vm_mmap_vnode will not implicitly
354 		 * extend a vnode
355 		 */
356 		do_ftruncate(offs + size);
357 	}
358 
359 	p = mmap(NULL, map_size, PROT_READ | PROT_WRITE,
360 	    MAP_FILE | MAP_SHARED, m_test_fd, offs - pg_offset);
361 	ASSERT_NE(p, MAP_FAILED) << strerror(errno);
362 
363 	bcopy(buf, (char*)p + pg_offset, size);
364 	ASSERT_EQ(size, pwrite(m_control_fd, buf, size, offs))
365 		<< strerror(errno);
366 
367 	free(buf);
368 	ASSERT_EQ(0, munmap(p, map_size)) << strerror(errno);
369 }
370 
371 void do_write(off_t offs, ssize_t size)
372 {
373 	char *buf;
374 	long i;
375 
376 	buf = (char*)malloc(size);
377 	ASSERT_NE(nullptr, buf) << strerror(errno);
378 	for (i=0; i < size; i++)
379 		buf[i] = random();
380 
381 	ASSERT_EQ(size, pwrite(m_test_fd, buf, size, offs ))
382 		<< strerror(errno);
383 	ASSERT_EQ(size, pwrite(m_control_fd, buf, size, offs))
384 		<< strerror(errno);
385 	m_filesize = std::max(m_filesize, offs + size);
386 
387 	free(buf);
388 }
389 
390 };
391 
392 class IoCacheable: public Io {
393 public:
394 virtual void SetUp() {
395 	Io::SetUp();
396 }
397 };
398 
399 class IoCopyFileRange: public Io {
400 public:
401 virtual void SetUp() {
402 	Io::SetUp();
403 }
404 };
405 
406 /*
407  * Extend a file with dirty data in the last page of the last block.
408  *
409  * fsx -WR -P /tmp -S8 -N3 fsx.bin
410  */
411 TEST_P(Io, extend_from_dirty_page)
412 {
413 	off_t wofs = 0x21a0;
414 	ssize_t wsize = 0xf0a8;
415 	off_t rofs = 0xb284;
416 	ssize_t rsize = 0x9b22;
417 	off_t truncsize = 0x28702;
418 
419 	do_write(wofs, wsize);
420 	do_ftruncate(truncsize);
421 	do_read(rofs, rsize);
422 }
423 
424 /*
425  * mapwrite into a newly extended part of a file.
426  *
427  * fsx -c 100 -i 100 -l 524288 -o 131072 -N5 -P /tmp -S19 fsx.bin
428  */
429 TEST_P(IoCacheable, extend_by_mapwrite)
430 {
431 	do_mapwrite(0x29a3a, 0x849e);	/* [0x29a3a, 0x31ed7] */
432 	do_mapwrite(0x3c7d8, 0x3994);	/* [0x3c7d8, 0x4016b] */
433 	do_read(0x30c16, 0xf556);	/* [0x30c16, 0x4016b] */
434 }
435 
436 /*
437  * When writing the last page of a file, it must be written synchronously.
438  * Otherwise the cached page can become invalid by a subsequent extend
439  * operation.
440  *
441  * fsx -WR -P /tmp -S642 -N3 fsx.bin
442  */
443 TEST_P(Io, last_page)
444 {
445 	do_write(0x1134f, 0xcc77);	/* [0x1134f, 0x1dfc5] */
446 	do_write(0x2096a, 0xdfa7);	/* [0x2096a, 0x2e910] */
447 	do_read(0x1a3aa, 0xb5b7);	/* [0x1a3aa, 0x25960] */
448 }
449 
450 /*
451  * Read a hole using mmap
452  *
453  * fsx -c 100 -i 100 -l 524288 -o 131072 -N11 -P /tmp  -S14 fsx.bin
454  */
455 TEST_P(IoCacheable, mapread_hole)
456 {
457 	do_write(0xf205, 0x123b7);	/* [0xf205, 0x215bb] */
458 	do_mapread(0x2f4c, 0xeeea);	/* [0x2f4c, 0x11e35] */
459 }
460 
461 /*
462  * Read a hole from a block that contains some cached data.
463  *
464  * fsx -WR -P /tmp -S55  fsx.bin
465  */
466 TEST_P(Io, read_hole_from_cached_block)
467 {
468 	off_t wofs = 0x160c5;
469 	ssize_t wsize = 0xa996;
470 	off_t rofs = 0x472e;
471 	ssize_t rsize = 0xd8d5;
472 
473 	do_write(wofs, wsize);
474 	do_read(rofs, rsize);
475 }
476 
477 /*
478  * Truncating a file into a dirty buffer should not causing anything untoward
479  * to happen when that buffer is eventually flushed.
480  *
481  * fsx -WR -P /tmp -S839 -d -N6 fsx.bin
482  */
483 TEST_P(Io, truncate_into_dirty_buffer)
484 {
485 	off_t wofs0 = 0x3bad7;
486 	ssize_t wsize0 = 0x4529;
487 	off_t wofs1 = 0xc30d;
488 	ssize_t wsize1 = 0x5f77;
489 	off_t truncsize0 = 0x10916;
490 	off_t rofs = 0xdf17;
491 	ssize_t rsize = 0x29ff;
492 	off_t truncsize1 = 0x152b4;
493 
494 	do_write(wofs0, wsize0);
495 	do_write(wofs1, wsize1);
496 	do_ftruncate(truncsize0);
497 	do_read(rofs, rsize);
498 	do_ftruncate(truncsize1);
499 	close(m_test_fd);
500 }
501 
502 /*
503  * Truncating a file into a dirty buffer should not causing anything untoward
504  * to happen when that buffer is eventually flushed, even when the buffer's
505  * dirty_off is > 0.
506  *
507  * Based on this command with a few steps removed:
508  * fsx -WR -P /tmp -S677 -d -N8 fsx.bin
509  */
510 TEST_P(Io, truncate_into_dirty_buffer2)
511 {
512 	off_t truncsize0 = 0x344f3;
513 	off_t wofs = 0x2790c;
514 	ssize_t wsize = 0xd86a;
515 	off_t truncsize1 = 0x2de38;
516 	off_t rofs2 = 0x1fd7a;
517 	ssize_t rsize2 = 0xc594;
518 	off_t truncsize2 = 0x31e71;
519 
520 	/* Sets the file size to something larger than the next write */
521 	do_ftruncate(truncsize0);
522 	/*
523 	 * Creates a dirty buffer.  The part in lbn 2 doesn't flush
524 	 * synchronously.
525 	 */
526 	do_write(wofs, wsize);
527 	/* Truncates part of the dirty buffer created in step 2 */
528 	do_ftruncate(truncsize1);
529 	/* XXX ?I don't know why this is necessary? */
530 	do_read(rofs2, rsize2);
531 	/* Truncates the dirty buffer */
532 	do_ftruncate(truncsize2);
533 	close(m_test_fd);
534 }
535 
536 /*
537  * Regression test for a bug introduced in r348931
538  *
539  * Sequence of operations:
540  * 1) The first write reads lbn so it can modify it
541  * 2) The first write flushes lbn 3 immediately because it's the end of file
542  * 3) The first write then flushes lbn 4 because it's the end of the file
543  * 4) The second write modifies the cached versions of lbn 3 and 4
544  * 5) The third write's getblkx invalidates lbn 4's B_CACHE because it's
545  *    extending the buffer.  Then it flushes lbn 4 because B_DELWRI was set but
546  *    B_CACHE was clear.
547  * 6) fuse_write_biobackend erroneously called vfs_bio_clrbuf, putting the
548  *    buffer into a weird write-only state.  All read operations would return
549  *    0.  Writes were apparently still processed, because the buffer's contents
550  *    were correct when examined in a core dump.
551  * 7) The third write reads lbn 4 because cache is clear
552  * 9) uiomove dutifully copies new data into the buffer
553  * 10) The buffer's dirty is flushed to lbn 4
554  * 11) The read returns all zeros because of step 6.
555  *
556  * Based on:
557  * fsx -WR -l 524388 -o 131072 -P /tmp -S6456 -q  fsx.bin
558  */
559 TEST_P(Io, resize_a_valid_buffer_while_extending)
560 {
561 	do_write(0x36ee6, 0x14530);	/* [0x36ee6, 0x4b415] */
562 	do_write(0x33256, 0x1507c);	/* [0x33256, 0x482d1] */
563 	do_write(0x4c03d, 0x175c);	/* [0x4c03d, 0x4d798] */
564 	do_read(0x3599c, 0xe277);	/* [0x3599c, 0x43c12] */
565 	close(m_test_fd);
566 }
567 
568 /*
569  * mmap of a suitable region could trigger a panic.  I'm not sure what
570  * combination of size and offset counts as "suitable".  Regression test for
571  * https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=276191
572  */
573 TEST_P(IoCacheable, vnode_pager_generic_putpage_clean_block_at_eof)
574 {
575 	do_mapwrite(0x3b4e0, 0x1bbc3);
576 }
577 
578 /*
579  * A copy_file_range that follows an mmap write to the input area needs to
580  * flush the mmap buffer first.
581  */
582 TEST_P(IoCopyFileRange, copy_file_range_from_mapped_write)
583 {
584 	do_mapwrite(0, 0x1000);
585 	do_copy_file_range(0, 0x1000, 0x1000);
586 	do_read(0x1000, 0x1000);
587 }
588 
589 
590 INSTANTIATE_TEST_SUITE_P(Io, Io,
591 	Combine(Bool(),					/* async read */
592 		Values(0x1000, 0x10000, 0x20000),	/* m_maxwrite */
593 		Values(Uncached, Writethrough, Writeback, WritebackAsync),
594 		Values(28)				/* kernel_minor_vers */
595 	)
596 );
597 
598 INSTANTIATE_TEST_SUITE_P(Io, IoCacheable,
599 	Combine(Bool(),					/* async read */
600 		Values(0x1000, 0x10000, 0x20000),	/* m_maxwrite */
601 		Values(Writethrough, Writeback, WritebackAsync),
602 		Values(28)				/* kernel_minor_vers */
603 	)
604 );
605 
606 INSTANTIATE_TEST_SUITE_P(Io, IoCopyFileRange,
607 	Combine(Values(true),				/* async read */
608 		Values(0x10000),			/* m_maxwrite */
609 		Values(Writethrough, Writeback, WritebackAsync),
610 		Values(27, 28)				/* kernel_minor_vers */
611 	)
612 );
613