1#-
2# SPDX-License-Identifier: BSD-2-Clause
3#
4# Copyright (c) 2022 The FreeBSD Foundation
5#
6# This software was developed by Mark Johnston under sponsorship from
7# the FreeBSD Foundation.
8#
9# Redistribution and use in source and binary forms, with or without
10# modification, are permitted provided that the following conditions are
11# met:
12# 1. Redistributions of source code must retain the above copyright
13#    notice, this list of conditions and the following disclaimer.
14# 2. Redistributions in binary form must reproduce the above copyright
15#    notice, this list of conditions and the following disclaimer in
16#    the documentation and/or other materials provided with the distribution.
17#
18# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21# ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28# SUCH DAMAGE.
29#
30
31MAKEFS="makefs -t zfs -o nowarn=true"
32ZFS_POOL_NAME="makefstest$$"
33TEST_ZFS_POOL_NAME="$TMPDIR/poolname"
34
35. "$(dirname "$0")/makefs_tests_common.sh"
36
37common_cleanup()
38{
39	local pool md
40
41	# Try to force a TXG, this can help catch bugs by triggering a panic.
42	sync
43
44	pool=$(cat $TEST_ZFS_POOL_NAME)
45	if zpool list "$pool" >/dev/null; then
46		zpool destroy "$pool"
47	fi
48
49	md=$(cat $TEST_MD_DEVICE_FILE)
50	if [ -c /dev/"$md" ]; then
51		mdconfig -d -u "$md"
52	fi
53}
54
55import_image()
56{
57	atf_check -e empty -o save:$TEST_MD_DEVICE_FILE -s exit:0 \
58	    mdconfig -a -f $TEST_IMAGE
59	atf_check zpool import -R $TEST_MOUNT_DIR $ZFS_POOL_NAME
60	echo "$ZFS_POOL_NAME" > $TEST_ZFS_POOL_NAME
61}
62
63#
64# Test autoexpansion of the vdev.
65#
66# The pool is initially 10GB, so we get 10GB minus one metaslab's worth of
67# usable space for data.  Then the pool is expanded to 50GB, and the amount of
68# usable space is 50GB minus one metaslab.
69#
70atf_test_case autoexpand cleanup
71autoexpand_body()
72{
73	local mssize poolsize poolsize1 newpoolsize
74
75	create_test_inputs
76
77	mssize=$((128 * 1024 * 1024))
78	poolsize=$((10 * 1024 * 1024 * 1024))
79	atf_check $MAKEFS -s $poolsize -o mssize=$mssize -o rootpath=/ \
80	    -o poolname=$ZFS_POOL_NAME \
81	    $TEST_IMAGE $TEST_INPUTS_DIR
82
83	newpoolsize=$((50 * 1024 * 1024 * 1024))
84	truncate -s $newpoolsize $TEST_IMAGE
85
86	import_image
87
88	check_image_contents
89
90	poolsize1=$(zpool list -Hp -o size $ZFS_POOL_NAME)
91	atf_check [ $((poolsize1 + $mssize)) -eq $poolsize ]
92
93	atf_check zpool online -e $ZFS_POOL_NAME /dev/$(cat $TEST_MD_DEVICE_FILE)
94
95	check_image_contents
96
97	poolsize1=$(zpool list -Hp -o size $ZFS_POOL_NAME)
98	atf_check [ $((poolsize1 + $mssize)) -eq $newpoolsize ]
99}
100autoexpand_cleanup()
101{
102	common_cleanup
103}
104
105#
106# Test with some default layout defined by the common code.
107#
108atf_test_case basic cleanup
109basic_body()
110{
111	create_test_inputs
112
113	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
114	    $TEST_IMAGE $TEST_INPUTS_DIR
115
116	import_image
117
118	check_image_contents
119}
120basic_cleanup()
121{
122	common_cleanup
123}
124
125atf_test_case dataset_removal cleanup
126dataset_removal_body()
127{
128	create_test_dirs
129
130	cd $TEST_INPUTS_DIR
131	mkdir dir
132	cd -
133
134	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
135	    -o fs=${ZFS_POOL_NAME}/dir \
136	    $TEST_IMAGE $TEST_INPUTS_DIR
137
138	import_image
139
140	check_image_contents
141
142	atf_check zfs destroy ${ZFS_POOL_NAME}/dir
143}
144dataset_removal_cleanup()
145{
146	common_cleanup
147}
148
149#
150# Make sure that we can create and remove an empty directory.
151#
152atf_test_case empty_dir cleanup
153empty_dir_body()
154{
155	create_test_dirs
156
157	cd $TEST_INPUTS_DIR
158	mkdir dir
159	cd -
160
161	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
162	    $TEST_IMAGE $TEST_INPUTS_DIR
163
164	import_image
165
166	check_image_contents
167
168	atf_check rmdir ${TEST_MOUNT_DIR}/dir
169}
170empty_dir_cleanup()
171{
172	common_cleanup
173}
174
175atf_test_case empty_fs cleanup
176empty_fs_body()
177{
178	create_test_dirs
179
180	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
181	    $TEST_IMAGE $TEST_INPUTS_DIR
182
183	import_image
184
185	check_image_contents
186}
187empty_fs_cleanup()
188{
189	common_cleanup
190}
191
192atf_test_case file_sizes cleanup
193file_sizes_body()
194{
195	local i
196
197	create_test_dirs
198	cd $TEST_INPUTS_DIR
199
200	i=1
201	while [ $i -lt $((1 << 20)) ]; do
202		truncate -s $i ${i}.1
203		truncate -s $(($i - 1)) ${i}.2
204		truncate -s $(($i + 1)) ${i}.3
205		i=$(($i << 1))
206	done
207
208	cd -
209
210	# XXXMJ this creates sparse files, make sure makefs doesn't
211	#       preserve the sparseness.
212	# XXXMJ need to test with larger files (at least 128MB for L2 indirs)
213	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
214	    $TEST_IMAGE $TEST_INPUTS_DIR
215
216	import_image
217
218	check_image_contents
219}
220file_sizes_cleanup()
221{
222	common_cleanup
223}
224
225atf_test_case hard_links cleanup
226hard_links_body()
227{
228	local f
229
230	create_test_dirs
231	cd $TEST_INPUTS_DIR
232
233	mkdir dir
234	echo "hello" > 1
235	ln 1 2
236	ln 1 dir/1
237
238	echo "goodbye" > dir/a
239	ln dir/a dir/b
240	ln dir/a a
241
242	cd -
243
244	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
245	    $TEST_IMAGE $TEST_INPUTS_DIR
246
247	import_image
248
249	check_image_contents
250
251	stat -f '%i' ${TEST_MOUNT_DIR}/1 > ./ino
252	stat -f '%l' ${TEST_MOUNT_DIR}/1 > ./nlink
253	for f in 1 2 dir/1; do
254		atf_check -o file:./nlink -e empty -s exit:0 \
255		    stat -f '%l' ${TEST_MOUNT_DIR}/${f}
256		atf_check -o file:./ino -e empty -s exit:0 \
257		    stat -f '%i' ${TEST_MOUNT_DIR}/${f}
258		atf_check cmp -s ${TEST_INPUTS_DIR}/1 ${TEST_MOUNT_DIR}/${f}
259	done
260
261	stat -f '%i' ${TEST_MOUNT_DIR}/dir/a > ./ino
262	stat -f '%l' ${TEST_MOUNT_DIR}/dir/a > ./nlink
263	for f in dir/a dir/b a; do
264		atf_check -o file:./nlink -e empty -s exit:0 \
265		    stat -f '%l' ${TEST_MOUNT_DIR}/${f}
266		atf_check -o file:./ino -e empty -s exit:0 \
267		    stat -f '%i' ${TEST_MOUNT_DIR}/${f}
268		atf_check cmp -s ${TEST_INPUTS_DIR}/dir/a ${TEST_MOUNT_DIR}/${f}
269	done
270}
271hard_links_cleanup()
272{
273	common_cleanup
274}
275
276# Allocate enough dnodes from an object set that the meta dnode needs to use
277# indirect blocks.
278atf_test_case indirect_dnode_array cleanup
279indirect_dnode_array_body()
280{
281	local count i
282
283	# How many dnodes do we need to allocate?  Well, the data block size
284	# for meta dnodes is always 16KB, so with a dnode size of 512B we get
285	# 32 dnodes per direct block.  The maximum indirect block size is 128KB
286	# and that can fit 1024 block pointers, so we need at least 32 * 1024
287	# files to force the use of two levels of indirection.
288	#
289	# Unfortunately that number of files makes the test run quite slowly,
290	# so we settle for a single indirect block for now...
291	count=$(jot -r 1 32 1024)
292
293	create_test_dirs
294	cd $TEST_INPUTS_DIR
295	for i in $(seq 1 $count); do
296		touch $i
297	done
298	cd -
299
300	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
301	    $TEST_IMAGE $TEST_INPUTS_DIR
302
303	import_image
304
305	check_image_contents
306}
307indirect_dnode_array_cleanup()
308{
309	common_cleanup
310}
311
312#
313# Create some files with long names, so as to test fat ZAP handling.
314#
315atf_test_case long_file_name cleanup
316long_file_name_body()
317{
318	local dir i
319
320	create_test_dirs
321	cd $TEST_INPUTS_DIR
322
323	# micro ZAP keys can be at most 50 bytes.
324	for i in $(seq 1 60); do
325		touch $(jot -s '' $i 1 1)
326	done
327	dir=$(jot -s '' 61 1 1)
328	mkdir $dir
329	for i in $(seq 1 60); do
330		touch ${dir}/$(jot -s '' $i 1 1)
331	done
332
333	cd -
334
335	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
336	    $TEST_IMAGE $TEST_INPUTS_DIR
337
338	import_image
339
340	check_image_contents
341
342	# Add a directory entry in the hope that OpenZFS might catch a bug
343	# in makefs' fat ZAP encoding.
344	touch ${TEST_MOUNT_DIR}/foo
345}
346long_file_name_cleanup()
347{
348	common_cleanup
349}
350
351#
352# Exercise handling of multiple datasets.
353#
354atf_test_case multi_dataset_1 cleanup
355multi_dataset_1_body()
356{
357	create_test_dirs
358	cd $TEST_INPUTS_DIR
359
360	mkdir dir1
361	echo a > dir1/a
362	mkdir dir2
363	echo b > dir2/b
364
365	cd -
366
367	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
368	    -o fs=${ZFS_POOL_NAME}/dir1 -o fs=${ZFS_POOL_NAME}/dir2 \
369	    $TEST_IMAGE $TEST_INPUTS_DIR
370
371	import_image
372
373	check_image_contents
374
375	# Make sure that we have three datasets with the expected mount points.
376	atf_check -o inline:${ZFS_POOL_NAME}\\n -e empty -s exit:0 \
377	    zfs list -H -o name ${ZFS_POOL_NAME}
378	atf_check -o inline:${TEST_MOUNT_DIR}\\n -e empty -s exit:0 \
379	    zfs list -H -o mountpoint ${ZFS_POOL_NAME}
380
381	atf_check -o inline:${ZFS_POOL_NAME}/dir1\\n -e empty -s exit:0 \
382	    zfs list -H -o name ${ZFS_POOL_NAME}/dir1
383	atf_check -o inline:${TEST_MOUNT_DIR}/dir1\\n -e empty -s exit:0 \
384	    zfs list -H -o mountpoint ${ZFS_POOL_NAME}/dir1
385
386	atf_check -o inline:${ZFS_POOL_NAME}/dir2\\n -e empty -s exit:0 \
387	    zfs list -H -o name ${ZFS_POOL_NAME}/dir2
388	atf_check -o inline:${TEST_MOUNT_DIR}/dir2\\n -e empty -s exit:0 \
389	    zfs list -H -o mountpoint ${ZFS_POOL_NAME}/dir2
390}
391multi_dataset_1_cleanup()
392{
393	common_cleanup
394}
395
396#
397# Create a pool with two datasets, where the root dataset is mounted below
398# the child dataset.
399#
400atf_test_case multi_dataset_2 cleanup
401multi_dataset_2_body()
402{
403	create_test_dirs
404	cd $TEST_INPUTS_DIR
405
406	mkdir dir1
407	echo a > dir1/a
408	mkdir dir2
409	echo b > dir2/b
410
411	cd -
412
413	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
414	    -o fs=${ZFS_POOL_NAME}/dir1\;mountpoint=/ \
415	    -o fs=${ZFS_POOL_NAME}\;mountpoint=/dir1 \
416	    $TEST_IMAGE $TEST_INPUTS_DIR
417
418	import_image
419
420	check_image_contents
421}
422multi_dataset_2_cleanup()
423{
424	common_cleanup
425}
426
427#
428# Create a dataset with a non-existent mount point.
429#
430atf_test_case multi_dataset_3 cleanup
431multi_dataset_3_body()
432{
433	create_test_dirs
434	cd $TEST_INPUTS_DIR
435
436	mkdir dir1
437	echo a > dir1/a
438
439	cd -
440
441	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
442	    -o fs=${ZFS_POOL_NAME}/dir1 \
443	    -o fs=${ZFS_POOL_NAME}/dir2 \
444	    $TEST_IMAGE $TEST_INPUTS_DIR
445
446	import_image
447
448	atf_check -o inline:${TEST_MOUNT_DIR}/dir2\\n -e empty -s exit:0 \
449	    zfs list -H -o mountpoint ${ZFS_POOL_NAME}/dir2
450
451	# Mounting dir2 should have created a directory called dir2.  Go
452	# back and create it in the staging tree before comparing.
453	atf_check mkdir ${TEST_INPUTS_DIR}/dir2
454
455	check_image_contents
456}
457multi_dataset_3_cleanup()
458{
459	common_cleanup
460}
461
462#
463# Create an unmounted dataset.
464#
465atf_test_case multi_dataset_4 cleanup
466multi_dataset_4_body()
467{
468	create_test_dirs
469	cd $TEST_INPUTS_DIR
470
471	mkdir dir1
472	echo a > dir1/a
473
474	cd -
475
476	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
477	    -o fs=${ZFS_POOL_NAME}/dir1\;canmount=noauto\;mountpoint=none \
478	    $TEST_IMAGE $TEST_INPUTS_DIR
479
480	import_image
481
482	atf_check -o inline:none\\n -e empty -s exit:0 \
483	    zfs list -H -o mountpoint ${ZFS_POOL_NAME}/dir1
484
485	check_image_contents
486
487	atf_check zfs set mountpoint=/dir1 ${ZFS_POOL_NAME}/dir1
488	atf_check zfs mount ${ZFS_POOL_NAME}/dir1
489	atf_check -o inline:${TEST_MOUNT_DIR}/dir1\\n -e empty -s exit:0 \
490	    zfs list -H -o mountpoint ${ZFS_POOL_NAME}/dir1
491
492	# dir1/a should be part of the root dataset, not dir1.
493	atf_check -s not-exit:0 -e not-empty stat ${TEST_MOUNT_DIR}dir1/a
494}
495multi_dataset_4_cleanup()
496{
497	common_cleanup
498}
499
500#
501# Validate handling of multiple staging directories.
502#
503atf_test_case multi_staging_1 cleanup
504multi_staging_1_body()
505{
506	local tmpdir
507
508	create_test_dirs
509	cd $TEST_INPUTS_DIR
510
511	mkdir dir1
512	echo a > a
513	echo a > dir1/a
514	echo z > z
515
516	cd -
517
518	tmpdir=$(mktemp -d)
519	cd $tmpdir
520
521	mkdir dir2 dir2/dir3
522	echo b > dir2/b
523	echo c > dir2/dir3/c
524	ln -s dir2/dir3c s
525
526	cd -
527
528	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
529	    $TEST_IMAGE ${TEST_INPUTS_DIR} $tmpdir
530
531	import_image
532
533	check_image_contents -d $tmpdir
534}
535multi_staging_1_cleanup()
536{
537	common_cleanup
538}
539
540atf_test_case multi_staging_2 cleanup
541multi_staging_2_body()
542{
543	local tmpdir
544
545	create_test_dirs
546	cd $TEST_INPUTS_DIR
547
548	mkdir dir
549	echo a > dir/foo
550	echo b > dir/bar
551
552	cd -
553
554	tmpdir=$(mktemp -d)
555	cd $tmpdir
556
557	mkdir dir
558	echo c > dir/baz
559
560	cd -
561
562	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
563	    $TEST_IMAGE ${TEST_INPUTS_DIR} $tmpdir
564
565	import_image
566
567	# check_image_contents can't easily handle merged directories, so
568	# just check that the merged directory contains the files we expect.
569	atf_check -o not-empty stat ${TEST_MOUNT_DIR}/dir/foo
570	atf_check -o not-empty stat ${TEST_MOUNT_DIR}/dir/bar
571	atf_check -o not-empty stat ${TEST_MOUNT_DIR}/dir/baz
572
573	if [ "$(ls ${TEST_MOUNT_DIR}/dir | wc -l)" -ne 3 ]; then
574		atf_fail "Expected 3 files in ${TEST_MOUNT_DIR}/dir"
575	fi
576}
577multi_staging_2_cleanup()
578{
579	common_cleanup
580}
581
582#
583# Rudimentary test to verify that two ZFS images created using the same
584# parameters and input hierarchy are byte-identical.  In particular, makefs(1)
585# does not preserve file access times.
586#
587atf_test_case reproducible cleanup
588reproducible_body()
589{
590	create_test_inputs
591
592	atf_check $MAKEFS -s 512m -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
593	    ${TEST_IMAGE}.1 $TEST_INPUTS_DIR
594
595	atf_check $MAKEFS -s 512m -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
596	    ${TEST_IMAGE}.2 $TEST_INPUTS_DIR
597
598	# XXX-MJ cmp(1) is really slow
599	atf_check cmp ${TEST_IMAGE}.1 ${TEST_IMAGE}.2
600}
601reproducible_cleanup()
602{
603}
604
605#
606# Verify that we can take a snapshot of a generated dataset.
607#
608atf_test_case snapshot cleanup
609snapshot_body()
610{
611	create_test_dirs
612	cd $TEST_INPUTS_DIR
613
614	mkdir dir
615	echo "hello" > dir/hello
616	echo "goodbye" > goodbye
617
618	cd -
619
620	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
621	    $TEST_IMAGE $TEST_INPUTS_DIR
622
623	import_image
624
625	atf_check zfs snapshot ${ZFS_POOL_NAME}@1
626}
627snapshot_cleanup()
628{
629	common_cleanup
630}
631
632#
633# Check handling of symbolic links.
634#
635atf_test_case soft_links cleanup
636soft_links_body()
637{
638	create_test_dirs
639	cd $TEST_INPUTS_DIR
640
641	mkdir dir
642	ln -s a a
643	ln -s dir/../a a
644	ln -s dir/b b
645	echo 'c' > dir
646	ln -s dir/c c
647	# XXX-MJ overflows bonus buffer ln -s $(jot -s '' 320 1 1) 1
648
649	cd -
650
651	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
652	    $TEST_IMAGE $TEST_INPUTS_DIR
653
654	import_image
655
656	check_image_contents
657}
658soft_links_cleanup()
659{
660	common_cleanup
661}
662
663#
664# Verify that we can set properties on the root dataset.
665#
666atf_test_case root_props cleanup
667root_props_body()
668{
669	create_test_inputs
670
671	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
672	    -o fs=${ZFS_POOL_NAME}\;atime=off\;setuid=off \
673	    $TEST_IMAGE $TEST_INPUTS_DIR
674
675	import_image
676
677	check_image_contents
678
679	atf_check -o inline:off\\n -e empty -s exit:0 \
680	    zfs get -H -o value atime $ZFS_POOL_NAME
681	atf_check -o inline:local\\n -e empty -s exit:0 \
682	    zfs get -H -o source atime $ZFS_POOL_NAME
683	atf_check -o inline:off\\n -e empty -s exit:0 \
684	    zfs get -H -o value setuid $ZFS_POOL_NAME
685	atf_check -o inline:local\\n -e empty -s exit:0 \
686	    zfs get -H -o source setuid $ZFS_POOL_NAME
687}
688root_props_cleanup()
689{
690	common_cleanup
691}
692
693atf_init_test_cases()
694{
695	atf_add_test_case autoexpand
696	atf_add_test_case basic
697	atf_add_test_case dataset_removal
698	atf_add_test_case empty_dir
699	atf_add_test_case empty_fs
700	atf_add_test_case file_sizes
701	atf_add_test_case hard_links
702	atf_add_test_case indirect_dnode_array
703	atf_add_test_case long_file_name
704	atf_add_test_case multi_dataset_1
705	atf_add_test_case multi_dataset_2
706	atf_add_test_case multi_dataset_3
707	atf_add_test_case multi_dataset_4
708	atf_add_test_case multi_staging_1
709	atf_add_test_case multi_staging_2
710	atf_add_test_case reproducible
711	atf_add_test_case snapshot
712	atf_add_test_case soft_links
713	atf_add_test_case root_props
714
715	# XXXMJ tests:
716	# - test with different ashifts (at least, 9 and 12), different image sizes
717	# - create datasets in imported pool
718}
719