1#
2# CDDL HEADER START
3#
4# The contents of this file are subject to the terms of the
5# Common Development and Distribution License (the "License").
6# You may not use this file except in compliance with the License.
7#
8# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9# or https://opensource.org/licenses/CDDL-1.0.
10# See the License for the specific language governing permissions
11# and limitations under the License.
12#
13# When distributing Covered Code, include this CDDL HEADER in each
14# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15# If applicable, add the following below this CDDL HEADER, with the
16# fields enclosed by brackets "[]" replaced with your own identifying
17# information: Portions Copyright [yyyy] [name of copyright owner]
18#
19# CDDL HEADER END
20#
21
22#
23# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24# Use is subject to license terms.
25#
26
27#
28# Copyright (c) 2013, 2018 by Delphix. All rights reserved.
29# Copyright (c) 2020 by Datto Inc. All rights reserved.
30#
31
32. $STF_SUITE/include/libtest.shlib
33. $STF_SUITE/include/math.shlib
34. $STF_SUITE/tests/functional/cli_root/zfs_set/zfs_set_common.kshlib
35. $STF_SUITE/tests/functional/rsend/rsend.cfg
36
37#
38# Set up test model which includes various datasets
39#
40#               @final
41#               @snapB
42#               @init
43#               |
44#   ______ pclone
45#  |      /
46#  |@psnap
47#  ||                         @final
48#  ||@final       @final      @snapC
49#  ||@snapC       @snapC      @snapB
50#  ||@snapA       @snapB      @snapA
51#  ||@init        @init       @init
52#  |||            |           |
53# $pool -------- $FS ------- fs1 ------- fs2
54#    \             \\_____     \          |
55#     vol           vol   \____ \         @fsnap
56#      |              |        \ \              \
57#      @init          @vsnap   |  ------------ fclone
58#      @snapA         @init \  |                    |
59#      @final         @snapB \ |                    @init
60#                     @snapC  vclone                @snapA
61#                     @final       |                @final
62#                                 @init
63#                                 @snapC
64#                                 @final
65#
66# $1 pool name
67#
68function setup_test_model
69{
70	typeset pool=$1
71
72	log_must zfs create -p $pool/$FS/fs1/fs2
73
74	log_must zfs snapshot $pool@psnap
75	log_must zfs clone $pool@psnap $pool/pclone
76
77	if is_global_zone ; then
78		log_must zfs create -V 16M $pool/vol
79		log_must zfs create -V 16M $pool/$FS/vol
80		block_device_wait
81
82		log_must zfs snapshot $pool/$FS/vol@vsnap
83		log_must zfs clone $pool/$FS/vol@vsnap $pool/$FS/vclone
84		block_device_wait
85	fi
86
87	log_must snapshot_tree $pool/$FS/fs1/fs2@fsnap
88	log_must zfs clone $pool/$FS/fs1/fs2@fsnap $pool/$FS/fs1/fclone
89	log_must zfs snapshot -r $pool@init
90
91	log_must snapshot_tree $pool@snapA
92	log_must snapshot_tree $pool@snapC
93	log_must snapshot_tree $pool/pclone@snapB
94	log_must snapshot_tree $pool/$FS@snapB
95	log_must snapshot_tree $pool/$FS@snapC
96	log_must snapshot_tree $pool/$FS/fs1@snapA
97	log_must snapshot_tree $pool/$FS/fs1@snapB
98	log_must snapshot_tree $pool/$FS/fs1@snapC
99	log_must snapshot_tree $pool/$FS/fs1/fclone@snapA
100
101	if is_global_zone ; then
102		log_must zfs snapshot $pool/vol@snapA
103		log_must zfs snapshot $pool/$FS/vol@snapB
104		log_must zfs snapshot $pool/$FS/vol@snapC
105		log_must zfs snapshot $pool/$FS/vclone@snapC
106	fi
107
108	log_must zfs snapshot -r $pool@final
109
110	return 0
111}
112
113#
114# Cleanup the BACKDIR and given pool content and all the sub datasets
115#
116# $1 pool name
117#
118function cleanup_pool
119{
120	typeset pool=$1
121	log_must rm -rf $BACKDIR/*
122
123	if is_global_zone ; then
124		#
125		# Linux: Issuing a `df` seems to properly force any negative
126		# dcache entries to be invalidated preventing failures when
127		# accessing the mount point. Additional investigation required.
128		#
129		# https://github.com/openzfs/zfs/issues/6143
130		#
131		log_must eval "df >/dev/null"
132		log_must_busy zfs destroy -Rf $pool
133	else
134		typeset list=$(zfs list -H -r -t all -o name $pool)
135		for ds in $list ; do
136			if [[ $ds != $pool ]] ; then
137				if datasetexists $ds ; then
138					log_must_busy zfs destroy -Rf $ds
139				fi
140			fi
141		done
142	fi
143
144	typeset mntpnt=$(get_prop mountpoint $pool)
145	if ! ismounted $pool ; then
146		# Make sure mountpoint directory is empty
147		if [[ -d $mntpnt ]]; then
148			log_must rm -rf $mntpnt/*
149		fi
150
151		log_must zfs mount $pool
152	fi
153	if [[ -d $mntpnt ]]; then
154		rm -rf $mntpnt/*
155	fi
156}
157
158function cmp_md5s {
159	typeset file1=$1
160	typeset file2=$2
161
162	[ "$(md5digest $file1)" = "$(md5digest $file2)" ]
163}
164
165#
166# Detect if the given two filesystems have same sub-datasets
167#
168# $1 source filesystem
169# $2 destination filesystem
170#
171function cmp_ds_subs
172{
173	typeset src_fs=$1
174	typeset dst_fs=$2
175
176	diff \
177		<(zfs list -rHt all -o name $src_fs | sed "s:^$src_fs:PREFIX:g") \
178		<(zfs list -rHt all -o name $dst_fs | sed "s:^$dst_fs:PREFIX:g")
179}
180
181#
182# Compare all the directories and files in two filesystems
183#
184# $1 source filesystem
185# $2 destination filesystem
186#
187function cmp_ds_cont
188{
189	typeset src_fs=$1
190	typeset dst_fs=$2
191
192	typeset srcdir dstdir
193	srcdir=$(get_prop mountpoint $src_fs)
194	dstdir=$(get_prop mountpoint $dst_fs)
195
196	replay_directory_diff $srcdir $dstdir
197}
198
199#
200# Compare the given two dataset properties
201#
202# $1 dataset 1
203# $2 dataset 2
204# $3 -n == don't track property source
205# $4 -n == don't track the origin property
206#
207function cmp_ds_prop
208{
209	typeset dtst1=$1
210	typeset dtst2=$2
211	typeset nosource=$3
212	typeset noorigin=$4
213	typeset source=",source"; [ -n "$nosource" ] && source=
214	typeset origin=",origin"; [ -n "$noorigin" ] && origin=
215	typeset props="type$origin,volblocksize,acltype,dnodesize"
216	props+=",atime,canmount,checksum,compression,copies,devices"
217	props+=",exec,quota,readonly,recordsize,reservation,setuid"
218	props+=",snapdir,version,volsize,xattr,mountpoint"
219	if is_freebsd; then
220		props+=",jailed"
221	else
222		props+=",zoned"
223	fi
224
225	diff \
226		<(zfs get -Ho property,value$source $props $dtst1 | sed -e "s:$dtst1:PREFIX:g" -e 's/^origin	[^@]*/origin	POOL/' -e 's/	inherited from [^/]*/	inherited from POOL/') \
227		<(zfs get -Ho property,value$source $props $dtst2 | sed -e "s:$dtst2:PREFIX:g" -e 's/^origin	[^@]*/origin	POOL/' -e 's/	inherited from [^/]*/	inherited from POOL/')
228}
229
230#
231# Random create directories and files
232#
233# $1 directory
234#
235function random_tree
236{
237	typeset dir=$1
238
239	if [[ -d $dir ]]; then
240		rm -rf $dir
241	fi
242	mkdir -p $dir
243	typeset -i ret=$?
244
245	typeset -i nl nd nf
246	((nl = RANDOM % 6 + 1))
247	((nd = RANDOM % 3 ))
248	((nf = RANDOM % 5 ))
249	mktree -b $dir -l $nl -d $nd -f $nf
250	((ret |= $?))
251
252	return $ret
253}
254
255#
256# Put data in filesystem and take snapshot
257#
258# $1 snapshot name
259#
260function snapshot_tree
261{
262	typeset snap=$1
263	typeset ds=${snap%%@*}
264	typeset type=$(get_prop "type" $ds)
265
266	typeset -i ret=0
267	if [[ $type == "filesystem" ]]; then
268		typeset mntpnt=$(get_prop mountpoint $ds)
269
270		if ((ret == 0)) ; then
271			eval random_tree $mntpnt/${snap##$ds}
272			((ret |= $?))
273		fi
274	fi
275
276	if ((ret == 0)) ; then
277		zfs snapshot $snap
278		((ret |= $?))
279	fi
280
281	return $ret
282}
283
284#
285# Destroy the given snapshot and stuff
286#
287# $1 snapshot
288#
289function destroy_tree
290{
291	typeset -i ret=0
292	typeset snap
293	for snap in "$@" ; do
294		log_must_busy zfs destroy $snap
295
296		typeset ds=${snap%%@*}
297		typeset type=$(get_prop "type" $ds)
298		if [[ $type == "filesystem" ]]; then
299			typeset mntpnt=$(get_prop mountpoint $ds)
300			if [[ -n $mntpnt ]]; then
301				rm -rf $mntpnt/$snap
302			fi
303		fi
304	done
305
306	return 0
307}
308
309#
310# Get all the sub-datasets of give dataset with specific suffix
311#
312# $1 Given dataset
313# $2 Suffix
314#
315function getds_with_suffix
316{
317	typeset ds=$1
318	typeset suffix=$2
319
320	zfs list -rHt all -o name $ds | grep "$suffix$"
321}
322
323#
324# Output inherited properties which is edited for file system
325#
326function fs_inherit_prop
327{
328	typeset fs_prop
329	if is_global_zone ; then
330		fs_prop=$(zfs inherit 2>&1 | \
331		    awk '$2=="YES" && $3=="YES" {print $1}')
332		if ! is_te_enabled ; then
333		        fs_prop=$(echo $fs_prop | grep -v "mlslabel")
334		fi
335	else
336		fs_prop=$(zfs inherit 2>&1 | \
337		    awk '$2=="YES" && $3=="YES" && !/devices|mlslabel|sharenfs|sharesmb|zoned/ {print $1}')
338	fi
339
340	echo $fs_prop
341}
342
343#
344# Output inherited properties for volume
345#
346function vol_inherit_prop
347{
348	echo checksum readonly
349}
350
351#
352# Get the destination dataset to compare
353#
354function get_dst_ds
355{
356	typeset srcfs=$1
357	typeset dstfs=$2
358
359	#
360	# If the srcfs is not pool
361	#
362	if ! zpool list $srcfs > /dev/null 2>&1 ; then
363		eval dstfs="$dstfs/${srcfs#*/}"
364	fi
365
366	echo $dstfs
367}
368
369#
370# Make test files
371#
372# $1 Number of files to create
373# $2 Maximum file size
374# $3 File ID offset
375# $4 File system to create the files on
376#
377function mk_files
378{
379	nfiles=$1
380	maxsize=$2
381	file_id_offset=$3
382	fs=$4
383	bs=512
384
385	for ((i=0; i<$nfiles; i=i+1)); do
386		file_name="/$fs/file-$maxsize-$((i+$file_id_offset))"
387		file_size=$((($RANDOM * $RANDOM % ($maxsize - 1)) + 1))
388
389		#
390		# Create an interesting mix of files which contain both
391		# data blocks and holes for more realistic test coverage.
392		# Half the files are created as sparse then partially filled,
393		# the other half is dense then a hole is punched in the file.
394		#
395		if [ $((RANDOM % 2)) -eq 0 ]; then
396			truncate -s $file_size $file_name || \
397			    log_fail "Failed to create $file_name"
398			dd if=/dev/urandom of=$file_name \
399			    bs=$bs count=$(($file_size / 2 / $bs)) \
400			    seek=$(($RANDOM % (($file_size / 2 / $bs) + 1))) \
401			    conv=notrunc >/dev/null 2>&1 || \
402			    log_fail "Failed to create $file_name"
403		else
404			dd if=/dev/urandom of=$file_name \
405			    bs=$file_size count=1 >/dev/null 2>&1 || \
406			    log_fail "Failed to create $file_name"
407			dd if=/dev/zero of=$file_name \
408			    bs=$bs count=$(($file_size / 2 / $bs)) \
409			    seek=$(($RANDOM % (($file_size / 2 / $bs) + 1))) \
410			    conv=notrunc >/dev/null 2>&1 || \
411			    log_fail "Failed to create $file_name"
412		fi
413	done
414	echo Created $nfiles files of random sizes up to $maxsize bytes
415}
416
417#
418# Remove test files
419#
420# $1 Number of files to remove
421# $2 Maximum file size
422# $3 File ID offset
423# $4 File system to remove the files from
424#
425function rm_files
426{
427	nfiles=$1
428	maxsize=$2
429	file_id_offset=$3
430	fs=$4
431
432	for ((i=0; i<$nfiles; i=i+1)); do
433		rm -f /$fs/file-$maxsize-$((i+$file_id_offset))
434	done
435	echo Removed $nfiles files of random sizes up to $maxsize bytes
436}
437
438#
439# Simulate a random set of operations which could be reasonably expected
440# to occur on an average filesystem.
441#
442# $1 Number of files to modify
443# $2 Maximum file size
444# $3 File system to modify the file on
445# $4 Enabled xattrs (optional)
446#
447function churn_files
448{
449	nfiles=$1
450	maxsize=$2
451	fs=$3
452	xattrs=${4:-1}
453
454	#
455	# Remove roughly half of the files in order to make it more
456	# likely that a dnode will be reallocated.
457	#
458	for ((i=0; i<$nfiles; i=i+1)); do
459		file_name="/$fs/file-$i"
460
461		if [[ -e $file_name ]]; then
462			if [ $((RANDOM % 2)) -eq 0 ]; then
463				rm $file_name || \
464				    log_fail "Failed to remove $file_name"
465			fi
466		fi
467	done
468
469	#
470	# Remount the filesystem to simulate normal usage.  This resets
471	# the last allocated object id allowing for new objects to be
472	# reallocated in the locations of previously freed objects.
473	#
474	log_must zfs unmount $fs
475	log_must zfs mount $fs
476
477	for i in {0..$nfiles}; do
478		file_name="/$fs/file-$i"
479		file_size=$((($RANDOM * $RANDOM % ($maxsize - 1)) + 1))
480
481		#
482		# When the file exists modify it in one of five ways to
483		# simulate normal usage:
484		# - (20%) Remove and set and extended attribute on the file
485		# - (20%) Overwrite the existing file
486		# - (20%) Truncate the existing file to a random length
487		# - (20%) Truncate the existing file to zero length
488		# - (20%) Remove the file
489		#
490		# Otherwise create the missing file.  20% of the created
491		# files will be small and use embedded block pointers, the
492		# remainder with have random sizes up to the maximum size.
493		# Three extended attributes are attached to all of the files.
494		#
495		if [[ -e $file_name ]]; then
496			value=$((RANDOM % 5))
497			if [ $value -eq 0 -a $xattrs -ne 0 ]; then
498				attrname="testattr$((RANDOM % 3))"
499				attrlen="$(((RANDOM % 1000) + 1))"
500				attrvalue="$(random_string VALID_NAME_CHAR \
501				    $attrlen)"
502				rm_xattr $attrname $file_name || \
503				    log_fail "Failed to remove $attrname"
504				set_xattr $attrname "$attrvalue" $file_name || \
505				    log_fail "Failed to set $attrname"
506			elif [ $value -eq 1 ]; then
507				dd if=/dev/urandom of=$file_name \
508				    bs=$file_size count=1 >/dev/null 2>&1 || \
509				    log_fail "Failed to overwrite $file_name"
510			elif [ $value -eq 2 ]; then
511				truncate -s $file_size $file_name || \
512				    log_fail "Failed to truncate $file_name"
513			elif [ $value -eq 3 ]; then
514				truncate -s 0 $file_name || \
515				    log_fail "Failed to truncate $file_name"
516			else
517				rm $file_name || \
518				    log_fail "Failed to remove $file_name"
519			fi
520		else
521			if [ $((RANDOM % 5)) -eq 0 ]; then
522				file_size=$((($RANDOM % 64) + 1))
523			fi
524
525			dd if=/dev/urandom of=$file_name \
526			    bs=$file_size count=1 >/dev/null 2>&1 || \
527			    log_fail "Failed to create $file_name"
528
529			if [ $xattrs -ne 0 ]; then
530				for j in {0..2}; do
531					attrname="testattr$j"
532					attrlen="$(((RANDOM % 1000) + 1))"
533					attrvalue="$(random_string \
534					    VALID_NAME_CHAR $attrlen)"
535					set_xattr $attrname \
536					    "$attrvalue" $file_name || \
537					    log_fail "Failed to set $attrname"
538				done
539			fi
540		fi
541	done
542
543	return 0
544}
545
546#
547# Mess up a send file's contents
548#
549# $1 The send file path
550#
551function mess_send_file
552{
553	typeset -i minsize=2072
554	file=$1
555
556	filesize=$(stat_size $file)
557	if [ $filesize -lt $minsize ]; then
558		log_fail "Send file too small: $filesize < $minsize"
559	fi
560
561	# Truncate the send stream at a random offset after the DRR_BEGIN
562	# record (beyond 2072 bytes), any smaller than this and the receiving
563	# system won't have enough info to create the partial dataset at all.
564	# We use zstream dump to verify there is an intact DRR_BEGIN record.
565	offset=$(((($RANDOM * $RANDOM) % ($filesize - $minsize)) + $minsize))
566	nr_begins=$(head -c $offset $file | zstream dump | \
567	    awk '/DRR_BEGIN/ { print $5 }')
568	log_must [ "$nr_begins" -eq 1 ]
569
570	if (($RANDOM % 7 <= 1)); then
571		#
572		# We corrupt 2 bytes to minimize the chance that we
573		# write the same value that's already there.
574		#
575		log_must eval "dd if=/dev/urandom of=$file conv=notrunc " \
576		    "bs=1 count=2 seek=$offset >/dev/null 2>&1"
577	else
578		log_must truncate -s $offset $file
579	fi
580}
581
582#
583# Diff the send/receive filesystems
584#
585# $1 The sent filesystem
586# $2 The received filesystem
587#
588function file_check
589{
590	sendfs=$1
591	recvfs=$2
592
593	if [[ -d /$recvfs/.zfs/snapshot/a && -d \
594	    /$sendfs/.zfs/snapshot/a ]]; then
595		log_must directory_diff /$recvfs/.zfs/snapshot/a /$sendfs/.zfs/snapshot/a
596	fi
597	if [[ -d /$recvfs/.zfs/snapshot/b && -d \
598	    /$sendfs/.zfs/snapshot/b ]]; then
599		log_must directory_diff /$recvfs/.zfs/snapshot/b /$sendfs/.zfs/snapshot/b
600	fi
601}
602
603#
604# Resume test helper
605#
606# $1 The ZFS send command
607# $2 The filesystem where the streams are sent
608# $3 The receive filesystem
609# $4 Test dry-run (optional)
610#
611function resume_test
612{
613	typeset sendcmd=$1
614	typeset streamfs=$2
615	typeset recvfs=$3
616	typeset dryrun=${4:-1}
617
618	stream_num=1
619	log_must eval "$sendcmd >/$streamfs/$stream_num"
620
621	for ((i=0; i<2; i=i+1)); do
622		mess_send_file /$streamfs/$stream_num
623		log_mustnot eval "zfs recv -suv $recvfs </$streamfs/$stream_num"
624		stream_num=$((stream_num+1))
625
626		token=$(get_prop receive_resume_token $recvfs)
627
628		# Do a dry-run
629		[ $dryrun -ne 0 ] && \
630			log_must eval "zfs send -nvt $token > /dev/null"
631
632		log_must eval "zfs send -t $token  >/$streamfs/$stream_num"
633	done
634	log_must eval "zfs recv -suv $recvfs </$streamfs/$stream_num"
635}
636
637function get_resume_token
638{
639	sendcmd=$1
640	streamfs=$2
641	recvfs=$3
642
643	log_must eval "$sendcmd > /$streamfs/1"
644	mess_send_file /$streamfs/1
645	log_mustnot eval "zfs recv -suv $recvfs < /$streamfs/1 2>&1"
646	get_prop receive_resume_token $recvfs > /$streamfs/resume_token
647}
648
649#
650# Setup filesystems for the resumable send/receive tests
651#
652# $1 The "send" filesystem
653# $2 The "recv" filesystem
654#
655function test_fs_setup
656{
657	typeset sendfs=$1
658	typeset recvfs=$2
659	typeset streamfs=$3
660	typeset sendpool=${sendfs%%/*}
661	typeset recvpool=${recvfs%%/*}
662
663	datasetexists $sendfs && log_must_busy zfs destroy -r $sendpool
664	datasetexists $recvfs && log_must_busy zfs destroy -r $recvpool
665	datasetexists $streamfs && log_must_busy zfs destroy -r $streamfs
666
667	if datasetexists $sendfs || zfs create -o compress=lz4 $sendfs; then
668		mk_files 1000 256 0 $sendfs &
669		mk_files 1000 131072 0 $sendfs &
670		mk_files 100 1048576 0 $sendfs &
671		mk_files 10 10485760 0 $sendfs &
672		mk_files 1 104857600 0 $sendfs &
673		log_must wait
674		log_must zfs snapshot $sendfs@a
675
676		rm_files 200 256 0 $sendfs &
677		rm_files 200 131072 0 $sendfs &
678		rm_files 20 1048576 0 $sendfs &
679		rm_files 2 10485760 0 $sendfs &
680		log_must wait
681
682		mk_files 400 256 0 $sendfs &
683		mk_files 400 131072 0 $sendfs &
684		mk_files 40 1048576 0 $sendfs &
685		mk_files 4 10485760 0 $sendfs &
686		log_must wait
687
688		log_must zfs snapshot $sendfs@b
689		log_must eval "zfs send -v $sendfs@a >/$sendpool/initial.zsend"
690		log_must eval "zfs send -v -i @a $sendfs@b " \
691		    ">/$sendpool/incremental.zsend"
692	fi
693
694	log_must zfs create -o compress=lz4 $streamfs
695}
696
697#
698# Check to see if the specified features are set in a send stream.
699# The values for these features are found in include/sys/zfs_ioctl.h
700#
701# $1 The stream file
702# $2-$n The flags expected in the stream
703#
704function stream_has_features
705{
706	typeset file=$1
707	shift
708
709	[[ -f $file ]] || log_fail "Couldn't find file: $file"
710	typeset flags=$(zstream dump $file | \
711	    awk '/features =/ {features = $3} END {print features}')
712	typeset -A feature
713	feature[dedup]="1"
714	feature[dedupprops]="2"
715	feature[sa_spill]="4"
716	feature[embed_data]="10000"
717	feature[lz4]="20000"
718	feature[mooch_byteswap]="40000"
719	feature[large_blocks]="80000"
720	feature[resuming]="100000"
721	feature[redacted]="200000"
722	feature[compressed]="400000"
723
724	typeset flag known derived=0
725	for flag in "$@"; do
726		known=${feature[$flag]}
727		[[ -z $known ]] && log_fail "Unknown feature: $flag"
728
729		derived=$(printf "%x" $((0x${flags} & 0x${feature[$flag]})))
730		[[ $derived = $known ]] || return 1
731	done
732
733	return 0
734}
735
736#
737# Given a send stream, verify that the size of the stream matches what's
738# expected based on the source or target dataset. If the stream is an
739# incremental stream, subtract the size of the source snapshot before
740# comparing. This function does not currently handle incremental streams
741# that remove data.
742#
743# $1 The zstream dump output file
744# $2 The dataset to compare against
745#    This can be a source of a send or recv target (fs, not snapshot)
746# $3 The percentage below which verification is deemed a failure
747# $4 The source snapshot of an incremental send
748#
749
750function verify_stream_size
751{
752	typeset stream=$1
753	typeset ds=$2
754	typeset percent=${3:-90}
755	typeset inc_src=$4
756
757	[[ -f $stream ]] || log_fail "No such file: $stream"
758	datasetexists $ds || log_fail "No such dataset: $ds"
759
760	typeset stream_size=$(zstream dump $stream | sed -n \
761	    's/	Total payload size = \(.*\) (0x.*)/\1/p')
762
763	typeset inc_size=0
764	if [[ -n $inc_src ]]; then
765		inc_size=$(get_prop lrefer $inc_src)
766		if stream_has_features $stream compressed; then
767			inc_size=$(get_prop refer $inc_src)
768		fi
769	fi
770
771	if stream_has_features $stream compressed; then
772		ds_size=$(get_prop refer $ds)
773	else
774		ds_size=$(get_prop lrefer $ds)
775	fi
776	ds_size=$((ds_size - inc_size))
777
778	log_must within_percent $stream_size $ds_size $percent
779}
780
781# Cleanup function for tests involving resumable send
782function resume_cleanup
783{
784	typeset sendfs=$1
785	typeset streamfs=$2
786	typeset sendpool=${sendfs%%/*}
787
788	datasetexists $sendfs && log_must_busy zfs destroy -r $sendfs
789	datasetexists $streamfs && log_must_busy zfs destroy -r $streamfs
790	cleanup_pool $POOL2
791	rm -f /$sendpool/initial.zsend /$sendpool/incremental.zsend
792}
793
794# Randomly set the property to one of the enumerated values.
795function rand_set_prop
796{
797	typeset dtst=$1
798	typeset prop=$2
799	shift 2
800	typeset value=$(random_get $@)
801
802	log_must eval "zfs set $prop='$value' $dtst"
803}
804
805# Generate a recursive checksum of a filesystem which includes the file
806# contents and any associated extended attributes.
807function recursive_cksum
808{
809	case "$(uname)" in
810	FreeBSD)
811		find $1 -type f -exec sh -c 'sha256 -q {}; lsextattr -q \
812		    system {} | sha256 -q; lsextattr -q user {} | sha256 -q' \
813		    \; | sort | sha256 -q
814		;;
815	*)
816		find $1 -type f -exec sh -c 'sha256sum {}; getfattr \
817		    --absolute-names --only-values -d {} | sha256sum' \; | \
818		    sort -k 2 | awk '{ print $1 }' | sha256sum | \
819		    awk '{ print $1 }'
820		;;
821	esac
822}
823