1#
2# CDDL HEADER START
3#
4# The contents of this file are subject to the terms of the
5# Common Development and Distribution License (the "License").
6# You may not use this file except in compliance with the License.
7#
8# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9# or http://www.opensolaris.org/os/licensing.
10# See the License for the specific language governing permissions
11# and limitations under the License.
12#
13# When distributing Covered Code, include this CDDL HEADER in each
14# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15# If applicable, add the following below this CDDL HEADER, with the
16# fields enclosed by brackets "[]" replaced with your own identifying
17# information: Portions Copyright [yyyy] [name of copyright owner]
18#
19# CDDL HEADER END
20#
21
22#
23# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24# Use is subject to license terms.
25# Copyright (c) 2012, 2016 by Delphix. All rights reserved.
26# Copyright 2016 Nexenta Systems, Inc.
27#
28
29. ${STF_TOOLS}/contrib/include/logapi.shlib
30
31ZFS=${ZFS:-/usr/sbin/zfs}
32ZPOOL=${ZPOOL:-/usr/sbin/zpool}
33
34# Determine whether a dataset is mounted
35#
36# $1 dataset name
37# $2 filesystem type; optional - defaulted to zfs
38#
39# Return 0 if dataset is mounted; 1 if unmounted; 2 on error
40
41function ismounted
42{
43	typeset fstype=$2
44	[[ -z $fstype ]] && fstype=zfs
45	typeset out dir name ret
46
47	case $fstype in
48		zfs)
49			if [[ "$1" == "/"* ]] ; then
50				for out in $(zfs mount | awk '{print $2}'); do
51					[[ $1 == $out ]] && return 0
52				done
53			else
54				for out in $(zfs mount | awk '{print $1}'); do
55					[[ $1 == $out ]] && return 0
56				done
57			fi
58		;;
59		ufs|nfs)
60			out=$(df -F $fstype $1 2>/dev/null)
61			ret=$?
62			(($ret != 0)) && return $ret
63
64			dir=${out%%\(*}
65			dir=${dir%% *}
66			name=${out##*\(}
67			name=${name%%\)*}
68			name=${name%% *}
69
70			[[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
71		;;
72	esac
73
74	return 1
75}
76
77# Return 0 if a dataset is mounted; 1 otherwise
78#
79# $1 dataset name
80# $2 filesystem type; optional - defaulted to zfs
81
82function mounted
83{
84	ismounted $1 $2
85	(($? == 0)) && return 0
86	return 1
87}
88
89# Return 0 if a dataset is unmounted; 1 otherwise
90#
91# $1 dataset name
92# $2 filesystem type; optional - defaulted to zfs
93
94function unmounted
95{
96	ismounted $1 $2
97	(($? == 1)) && return 0
98	return 1
99}
100
101# split line on ","
102#
103# $1 - line to split
104
105function splitline
106{
107	echo $1 | sed "s/,/ /g"
108}
109
110function default_setup
111{
112	default_setup_noexit "$@"
113
114	log_pass
115}
116
117#
118# Given a list of disks, setup storage pools and datasets.
119#
120function default_setup_noexit
121{
122	typeset disklist=$1
123	typeset container=$2
124	typeset volume=$3
125
126	if is_global_zone; then
127		if poolexists $TESTPOOL ; then
128			destroy_pool $TESTPOOL
129		fi
130		[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
131		log_must zpool create -f $TESTPOOL $disklist
132	else
133		reexport_pool
134	fi
135
136	rm -rf $TESTDIR  || log_unresolved Could not remove $TESTDIR
137	mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
138
139	log_must zfs create $TESTPOOL/$TESTFS
140	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
141
142	if [[ -n $container ]]; then
143		rm -rf $TESTDIR1  || \
144			log_unresolved Could not remove $TESTDIR1
145		mkdir -p $TESTDIR1 || \
146			log_unresolved Could not create $TESTDIR1
147
148		log_must zfs create $TESTPOOL/$TESTCTR
149		log_must zfs set canmount=off $TESTPOOL/$TESTCTR
150		log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
151		log_must zfs set mountpoint=$TESTDIR1 \
152		    $TESTPOOL/$TESTCTR/$TESTFS1
153	fi
154
155	if [[ -n $volume ]]; then
156		if is_global_zone ; then
157			log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
158		else
159			log_must zfs create $TESTPOOL/$TESTVOL
160		fi
161	fi
162}
163
164#
165# Given a list of disks, setup a storage pool, file system and
166# a container.
167#
168function default_container_setup
169{
170	typeset disklist=$1
171
172	default_setup "$disklist" "true"
173}
174
175#
176# Given a list of disks, setup a storage pool,file system
177# and a volume.
178#
179function default_volume_setup
180{
181	typeset disklist=$1
182
183	default_setup "$disklist" "" "true"
184}
185
186#
187# Given a list of disks, setup a storage pool,file system,
188# a container and a volume.
189#
190function default_container_volume_setup
191{
192	typeset disklist=$1
193
194	default_setup "$disklist" "true" "true"
195}
196
197#
198# Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
199# filesystem
200#
201# $1 Existing filesystem or volume name. Default, $TESTFS
202# $2 snapshot name. Default, $TESTSNAP
203#
204function create_snapshot
205{
206	typeset fs_vol=${1:-$TESTFS}
207	typeset snap=${2:-$TESTSNAP}
208
209	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
210	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
211
212	if snapexists $fs_vol@$snap; then
213		log_fail "$fs_vol@$snap already exists."
214	fi
215	datasetexists $fs_vol || \
216		log_fail "$fs_vol must exist."
217
218	log_must zfs snapshot $fs_vol@$snap
219}
220
221#
222# Create a clone from a snapshot, default clone name is $TESTCLONE.
223#
224# $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
225# $2 Clone name, $TESTPOOL/$TESTCLONE is default.
226#
227function create_clone   # snapshot clone
228{
229	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
230	typeset clone=${2:-$TESTPOOL/$TESTCLONE}
231
232	[[ -z $snap ]] && \
233		log_fail "Snapshot name is undefined."
234	[[ -z $clone ]] && \
235		log_fail "Clone name is undefined."
236
237	log_must zfs clone $snap $clone
238}
239
240#
241# Create a bookmark of the given snapshot.  Defaultly create a bookmark on
242# filesystem.
243#
244# $1 Existing filesystem or volume name. Default, $TESTFS
245# $2 Existing snapshot name. Default, $TESTSNAP
246# $3 bookmark name. Default, $TESTBKMARK
247#
248function create_bookmark
249{
250	typeset fs_vol=${1:-$TESTFS}
251	typeset snap=${2:-$TESTSNAP}
252	typeset bkmark=${3:-$TESTBKMARK}
253
254	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
255	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
256	[[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
257
258	if bkmarkexists $fs_vol#$bkmark; then
259		log_fail "$fs_vol#$bkmark already exists."
260	fi
261	datasetexists $fs_vol || \
262		log_fail "$fs_vol must exist."
263	snapexists $fs_vol@$snap || \
264		log_fail "$fs_vol@$snap must exist."
265
266	log_must $ZFS bookmark $fs_vol@$snap $fs_vol#$bkmark
267}
268
269function default_mirror_setup
270{
271	default_mirror_setup_noexit $1 $2 $3
272
273	log_pass
274}
275
276#
277# Given a pair of disks, set up a storage pool and dataset for the mirror
278# @parameters: $1 the primary side of the mirror
279#   $2 the secondary side of the mirror
280# @uses: ZPOOL ZFS TESTPOOL TESTFS
281function default_mirror_setup_noexit
282{
283	readonly func="default_mirror_setup_noexit"
284	typeset primary=$1
285	typeset secondary=$2
286
287	[[ -z $primary ]] && \
288		log_fail "$func: No parameters passed"
289	[[ -z $secondary ]] && \
290		log_fail "$func: No secondary partition passed"
291	[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
292	log_must zpool create -f $TESTPOOL mirror $@
293	log_must zfs create $TESTPOOL/$TESTFS
294	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
295}
296
297#
298# create a number of mirrors.
299# We create a number($1) of 2 way mirrors using the pairs of disks named
300# on the command line. These mirrors are *not* mounted
301# @parameters: $1 the number of mirrors to create
302#  $... the devices to use to create the mirrors on
303# @uses: ZPOOL ZFS TESTPOOL
304function setup_mirrors
305{
306	typeset -i nmirrors=$1
307
308	shift
309	while ((nmirrors > 0)); do
310		log_must test -n "$1" -a -n "$2"
311		[[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors
312		log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2
313		shift 2
314		((nmirrors = nmirrors - 1))
315	done
316}
317
318#
319# create a number of raidz pools.
320# We create a number($1) of 2 raidz pools  using the pairs of disks named
321# on the command line. These pools are *not* mounted
322# @parameters: $1 the number of pools to create
323#  $... the devices to use to create the pools on
324# @uses: ZPOOL ZFS TESTPOOL
325function setup_raidzs
326{
327	typeset -i nraidzs=$1
328
329	shift
330	while ((nraidzs > 0)); do
331		log_must test -n "$1" -a -n "$2"
332		[[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs
333		log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2
334		shift 2
335		((nraidzs = nraidzs - 1))
336	done
337}
338
339#
340# Destroy the configured testpool mirrors.
341# the mirrors are of the form ${TESTPOOL}{number}
342# @uses: ZPOOL ZFS TESTPOOL
343function destroy_mirrors
344{
345	default_cleanup_noexit
346
347	log_pass
348}
349
350#
351# Given a minimum of two disks, set up a storage pool and dataset for the raid-z
352# $1 the list of disks
353#
354function default_raidz_setup
355{
356	typeset disklist="$*"
357	disks=(${disklist[*]})
358
359	if [[ ${#disks[*]} -lt 2 ]]; then
360		log_fail "A raid-z requires a minimum of two disks."
361	fi
362
363	[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
364	log_must zpool create -f $TESTPOOL raidz $1 $2 $3
365	log_must zfs create $TESTPOOL/$TESTFS
366	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
367
368	log_pass
369}
370
371#
372# Common function used to cleanup storage pools and datasets.
373#
374# Invoked at the start of the test suite to ensure the system
375# is in a known state, and also at the end of each set of
376# sub-tests to ensure errors from one set of tests doesn't
377# impact the execution of the next set.
378
379function default_cleanup
380{
381	default_cleanup_noexit
382
383	log_pass
384}
385
386function default_cleanup_noexit
387{
388	typeset exclude=""
389	typeset pool=""
390	#
391	# Destroying the pool will also destroy any
392	# filesystems it contains.
393	#
394	if is_global_zone; then
395		zfs unmount -a > /dev/null 2>&1
396		exclude=`eval echo \"'(${KEEP})'\"`
397		ALL_POOLS=$(zpool list -H -o name \
398		    | grep -v "$NO_POOLS" | egrep -v "$exclude")
399		# Here, we loop through the pools we're allowed to
400		# destroy, only destroying them if it's safe to do
401		# so.
402		while [ ! -z ${ALL_POOLS} ]
403		do
404			for pool in ${ALL_POOLS}
405			do
406				if safe_to_destroy_pool $pool ;
407				then
408					destroy_pool $pool
409				fi
410				ALL_POOLS=$(zpool list -H -o name \
411				    | grep -v "$NO_POOLS" \
412				    | egrep -v "$exclude")
413			done
414		done
415
416		zfs mount -a
417	else
418		typeset fs=""
419		for fs in $(zfs list -H -o name \
420		    | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
421			datasetexists $fs && \
422				log_must zfs destroy -Rf $fs
423		done
424
425		# Need cleanup here to avoid garbage dir left.
426		for fs in $(zfs list -H -o name); do
427			[[ $fs == /$ZONE_POOL ]] && continue
428			[[ -d $fs ]] && log_must rm -rf $fs/*
429		done
430
431		#
432		# Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
433		# the default value
434		#
435		for fs in $(zfs list -H -o name); do
436			if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
437				log_must zfs set reservation=none $fs
438				log_must zfs set recordsize=128K $fs
439				log_must zfs set mountpoint=/$fs $fs
440				typeset enc=""
441				enc=$(get_prop encryption $fs)
442				if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
443					[[ "$enc" == "off" ]]; then
444					log_must zfs set checksum=on $fs
445				fi
446				log_must zfs set compression=off $fs
447				log_must zfs set atime=on $fs
448				log_must zfs set devices=off $fs
449				log_must zfs set exec=on $fs
450				log_must zfs set setuid=on $fs
451				log_must zfs set readonly=off $fs
452				log_must zfs set snapdir=hidden $fs
453				log_must zfs set aclmode=groupmask $fs
454				log_must zfs set aclinherit=secure $fs
455			fi
456		done
457	fi
458
459	[[ -d $TESTDIR ]] && \
460		log_must rm -rf $TESTDIR
461}
462
463
464#
465# Common function used to cleanup storage pools, file systems
466# and containers.
467#
468function default_container_cleanup
469{
470	if ! is_global_zone; then
471		reexport_pool
472	fi
473
474	ismounted $TESTPOOL/$TESTCTR/$TESTFS1
475	[[ $? -eq 0 ]] && \
476	    log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
477
478	datasetexists $TESTPOOL/$TESTCTR/$TESTFS1 && \
479	    log_must zfs destroy -R $TESTPOOL/$TESTCTR/$TESTFS1
480
481	datasetexists $TESTPOOL/$TESTCTR && \
482	    log_must zfs destroy -Rf $TESTPOOL/$TESTCTR
483
484	[[ -e $TESTDIR1 ]] && \
485	    log_must rm -rf $TESTDIR1 > /dev/null 2>&1
486
487	default_cleanup
488}
489
490#
491# Common function used to cleanup snapshot of file system or volume. Default to
492# delete the file system's snapshot
493#
494# $1 snapshot name
495#
496function destroy_snapshot
497{
498	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
499
500	if ! snapexists $snap; then
501		log_fail "'$snap' does not existed."
502	fi
503
504	#
505	# For the sake of the value which come from 'get_prop' is not equal
506	# to the really mountpoint when the snapshot is unmounted. So, firstly
507	# check and make sure this snapshot's been mounted in current system.
508	#
509	typeset mtpt=""
510	if ismounted $snap; then
511		mtpt=$(get_prop mountpoint $snap)
512		(($? != 0)) && \
513			log_fail "get_prop mountpoint $snap failed."
514	fi
515
516	log_must zfs destroy $snap
517	[[ $mtpt != "" && -d $mtpt ]] && \
518		log_must rm -rf $mtpt
519}
520
521#
522# Common function used to cleanup clone.
523#
524# $1 clone name
525#
526function destroy_clone
527{
528	typeset clone=${1:-$TESTPOOL/$TESTCLONE}
529
530	if ! datasetexists $clone; then
531		log_fail "'$clone' does not existed."
532	fi
533
534	# With the same reason in destroy_snapshot
535	typeset mtpt=""
536	if ismounted $clone; then
537		mtpt=$(get_prop mountpoint $clone)
538		(($? != 0)) && \
539			log_fail "get_prop mountpoint $clone failed."
540	fi
541
542	log_must zfs destroy $clone
543	[[ $mtpt != "" && -d $mtpt ]] && \
544		log_must rm -rf $mtpt
545}
546
547#
548# Common function used to cleanup bookmark of file system or volume.  Default
549# to delete the file system's bookmark.
550#
551# $1 bookmark name
552#
553function destroy_bookmark
554{
555	typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
556
557	if ! bkmarkexists $bkmark; then
558		log_fail "'$bkmarkp' does not existed."
559	fi
560
561	log_must $ZFS destroy $bkmark
562}
563
564# Return 0 if a snapshot exists; $? otherwise
565#
566# $1 - snapshot name
567
568function snapexists
569{
570	zfs list -H -t snapshot "$1" > /dev/null 2>&1
571	return $?
572}
573
574#
575# Return 0 if a bookmark exists; $? otherwise
576#
577# $1 - bookmark name
578#
579function bkmarkexists
580{
581	$ZFS list -H -t bookmark "$1" > /dev/null 2>&1
582	return $?
583}
584
585#
586# Set a property to a certain value on a dataset.
587# Sets a property of the dataset to the value as passed in.
588# @param:
589#	$1 dataset who's property is being set
590#	$2 property to set
591#	$3 value to set property to
592# @return:
593#	0 if the property could be set.
594#	non-zero otherwise.
595# @use: ZFS
596#
597function dataset_setprop
598{
599	typeset fn=dataset_setprop
600
601	if (($# < 3)); then
602		log_note "$fn: Insufficient parameters (need 3, had $#)"
603		return 1
604	fi
605	typeset output=
606	output=$(zfs set $2=$3 $1 2>&1)
607	typeset rv=$?
608	if ((rv != 0)); then
609		log_note "Setting property on $1 failed."
610		log_note "property $2=$3"
611		log_note "Return Code: $rv"
612		log_note "Output: $output"
613		return $rv
614	fi
615	return 0
616}
617
618#
619# Assign suite defined dataset properties.
620# This function is used to apply the suite's defined default set of
621# properties to a dataset.
622# @parameters: $1 dataset to use
623# @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
624# @returns:
625#   0 if the dataset has been altered.
626#   1 if no pool name was passed in.
627#   2 if the dataset could not be found.
628#   3 if the dataset could not have it's properties set.
629#
630function dataset_set_defaultproperties
631{
632	typeset dataset="$1"
633
634	[[ -z $dataset ]] && return 1
635
636	typeset confset=
637	typeset -i found=0
638	for confset in $(zfs list); do
639		if [[ $dataset = $confset ]]; then
640			found=1
641			break
642		fi
643	done
644	[[ $found -eq 0 ]] && return 2
645	if [[ -n $COMPRESSION_PROP ]]; then
646		dataset_setprop $dataset compression $COMPRESSION_PROP || \
647			return 3
648		log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
649	fi
650	if [[ -n $CHECKSUM_PROP ]]; then
651		dataset_setprop $dataset checksum $CHECKSUM_PROP || \
652			return 3
653		log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
654	fi
655	return 0
656}
657
658#
659# Check a numeric assertion
660# @parameter: $@ the assertion to check
661# @output: big loud notice if assertion failed
662# @use: log_fail
663#
664function assert
665{
666	(($@)) || log_fail "$@"
667}
668
669#
670# Function to format partition size of a disk
671# Given a disk cxtxdx reduces all partitions
672# to 0 size
673#
674function zero_partitions #<whole_disk_name>
675{
676	typeset diskname=$1
677	typeset i
678
679	for i in 0 1 3 4 5 6 7
680	do
681		set_partition $i "" 0mb $diskname
682	done
683}
684
685#
686# Given a slice, size and disk, this function
687# formats the slice to the specified size.
688# Size should be specified with units as per
689# the `format` command requirements eg. 100mb 3gb
690#
691function set_partition #<slice_num> <slice_start> <size_plus_units>  <whole_disk_name>
692{
693	typeset -i slicenum=$1
694	typeset start=$2
695	typeset size=$3
696	typeset disk=$4
697	[[ -z $slicenum || -z $size || -z $disk ]] && \
698	    log_fail "The slice, size or disk name is unspecified."
699	typeset format_file=/var/tmp/format_in.$$
700
701	echo "partition" >$format_file
702	echo "$slicenum" >> $format_file
703	echo "" >> $format_file
704	echo "" >> $format_file
705	echo "$start" >> $format_file
706	echo "$size" >> $format_file
707	echo "label" >> $format_file
708	echo "" >> $format_file
709	echo "q" >> $format_file
710	echo "q" >> $format_file
711
712	format -e -s -d $disk -f $format_file
713	typeset ret_val=$?
714	rm -f $format_file
715	[[ $ret_val -ne 0 ]] && \
716	    log_fail "Unable to format $disk slice $slicenum to $size"
717	return 0
718}
719
720#
721# Get the end cyl of the given slice
722#
723function get_endslice #<disk> <slice>
724{
725	typeset disk=$1
726	typeset slice=$2
727	if [[ -z $disk || -z $slice ]] ; then
728		log_fail "The disk name or slice number is unspecified."
729	fi
730
731	disk=${disk#/dev/dsk/}
732	disk=${disk#/dev/rdsk/}
733	disk=${disk%s*}
734
735	typeset -i ratio=0
736	ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
737		grep "sectors\/cylinder" | \
738		awk '{print $2}')
739
740	if ((ratio == 0)); then
741		return
742	fi
743
744	typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
745		nawk -v token="$slice" '{if ($1==token) print $6}')
746
747	((endcyl = (endcyl + 1) / ratio))
748	echo $endcyl
749}
750
751
752#
753# Given a size,disk and total slice number,  this function formats the
754# disk slices from 0 to the total slice number with the same specified
755# size.
756#
757function partition_disk	#<slice_size> <whole_disk_name>	<total_slices>
758{
759	typeset -i i=0
760	typeset slice_size=$1
761	typeset disk_name=$2
762	typeset total_slices=$3
763	typeset cyl
764
765	zero_partitions $disk_name
766	while ((i < $total_slices)); do
767		if ((i == 2)); then
768			((i = i + 1))
769			continue
770		fi
771		set_partition $i "$cyl" $slice_size $disk_name
772		cyl=$(get_endslice $disk_name $i)
773		((i = i+1))
774	done
775}
776
777#
778# This function continues to write to a filenum number of files into dirnum
779# number of directories until either file_write returns an error or the
780# maximum number of files per directory have been written.
781#
782# Usage:
783# fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
784#
785# Return value: 0 on success
786#		non 0 on error
787#
788# Where :
789#	destdir:    is the directory where everything is to be created under
790#	dirnum:	    the maximum number of subdirectories to use, -1 no limit
791#	filenum:    the maximum number of files per subdirectory
792#	bytes:	    number of bytes to write
793#	num_writes: numer of types to write out bytes
794#	data:	    the data that will be writen
795#
796#	E.g.
797#	file_fs /testdir 20 25 1024 256 0
798#
799# Note: bytes * num_writes equals the size of the testfile
800#
801function fill_fs # destdir dirnum filenum bytes num_writes data
802{
803	typeset destdir=${1:-$TESTDIR}
804	typeset -i dirnum=${2:-50}
805	typeset -i filenum=${3:-50}
806	typeset -i bytes=${4:-8192}
807	typeset -i num_writes=${5:-10240}
808	typeset -i data=${6:-0}
809
810	typeset -i odirnum=1
811	typeset -i idirnum=0
812	typeset -i fn=0
813	typeset -i retval=0
814
815	log_must mkdir -p $destdir/$idirnum
816	while (($odirnum > 0)); do
817		if ((dirnum >= 0 && idirnum >= dirnum)); then
818			odirnum=0
819			break
820		fi
821		file_write -o create -f $destdir/$idirnum/$TESTFILE.$fn \
822		    -b $bytes -c $num_writes -d $data
823		retval=$?
824		if (($retval != 0)); then
825			odirnum=0
826			break
827		fi
828		if (($fn >= $filenum)); then
829			fn=0
830			((idirnum = idirnum + 1))
831			log_must mkdir -p $destdir/$idirnum
832		else
833			((fn = fn + 1))
834		fi
835	done
836	return $retval
837}
838
839#
840# Simple function to get the specified property. If unable to
841# get the property then exits.
842#
843# Note property is in 'parsable' format (-p)
844#
845function get_prop # property dataset
846{
847	typeset prop_val
848	typeset prop=$1
849	typeset dataset=$2
850
851	prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null)
852	if [[ $? -ne 0 ]]; then
853		log_note "Unable to get $prop property for dataset " \
854		"$dataset"
855		return 1
856	fi
857
858	echo $prop_val
859	return 0
860}
861
862#
863# Simple function to get the specified property of pool. If unable to
864# get the property then exits.
865#
866function get_pool_prop # property pool
867{
868	typeset prop_val
869	typeset prop=$1
870	typeset pool=$2
871
872	if poolexists $pool ; then
873		prop_val=$(zpool get $prop $pool 2>/dev/null | tail -1 | \
874			awk '{print $3}')
875		if [[ $? -ne 0 ]]; then
876			log_note "Unable to get $prop property for pool " \
877			"$pool"
878			return 1
879		fi
880	else
881		log_note "Pool $pool not exists."
882		return 1
883	fi
884
885	echo $prop_val
886	return 0
887}
888
889# Return 0 if a pool exists; $? otherwise
890#
891# $1 - pool name
892
893function poolexists
894{
895	typeset pool=$1
896
897	if [[ -z $pool ]]; then
898		log_note "No pool name given."
899		return 1
900	fi
901
902	zpool get name "$pool" > /dev/null 2>&1
903	return $?
904}
905
906# Return 0 if all the specified datasets exist; $? otherwise
907#
908# $1-n  dataset name
909function datasetexists
910{
911	if (($# == 0)); then
912		log_note "No dataset name given."
913		return 1
914	fi
915
916	while (($# > 0)); do
917		zfs get name $1 > /dev/null 2>&1 || \
918			return $?
919		shift
920	done
921
922	return 0
923}
924
925# return 0 if none of the specified datasets exists, otherwise return 1.
926#
927# $1-n  dataset name
928function datasetnonexists
929{
930	if (($# == 0)); then
931		log_note "No dataset name given."
932		return 1
933	fi
934
935	while (($# > 0)); do
936		zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
937		    && return 1
938		shift
939	done
940
941	return 0
942}
943
944#
945# Given a mountpoint, or a dataset name, determine if it is shared.
946#
947# Returns 0 if shared, 1 otherwise.
948#
949function is_shared
950{
951	typeset fs=$1
952	typeset mtpt
953
954	if [[ $fs != "/"* ]] ; then
955		if datasetnonexists "$fs" ; then
956			return 1
957		else
958			mtpt=$(get_prop mountpoint "$fs")
959			case $mtpt in
960				none|legacy|-) return 1
961					;;
962				*)	fs=$mtpt
963					;;
964			esac
965		fi
966	fi
967
968	for mtpt in `share | awk '{print $2}'` ; do
969		if [[ $mtpt == $fs ]] ; then
970			return 0
971		fi
972	done
973
974	typeset stat=$(svcs -H -o STA nfs/server:default)
975	if [[ $stat != "ON" ]]; then
976		log_note "Current nfs/server status: $stat"
977	fi
978
979	return 1
980}
981
982#
983# Given a mountpoint, determine if it is not shared.
984#
985# Returns 0 if not shared, 1 otherwise.
986#
987function not_shared
988{
989	typeset fs=$1
990
991	is_shared $fs
992	if (($? == 0)); then
993		return 1
994	fi
995
996	return 0
997}
998
999#
1000# Helper function to unshare a mountpoint.
1001#
1002function unshare_fs #fs
1003{
1004	typeset fs=$1
1005
1006	is_shared $fs
1007	if (($? == 0)); then
1008		log_must zfs unshare $fs
1009	fi
1010
1011	return 0
1012}
1013
1014#
1015# Check NFS server status and trigger it online.
1016#
1017function setup_nfs_server
1018{
1019	# Cannot share directory in non-global zone.
1020	#
1021	if ! is_global_zone; then
1022		log_note "Cannot trigger NFS server by sharing in LZ."
1023		return
1024	fi
1025
1026	typeset nfs_fmri="svc:/network/nfs/server:default"
1027	if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
1028		#
1029		# Only really sharing operation can enable NFS server
1030		# to online permanently.
1031		#
1032		typeset dummy=/tmp/dummy
1033
1034		if [[ -d $dummy ]]; then
1035			log_must rm -rf $dummy
1036		fi
1037
1038		log_must mkdir $dummy
1039		log_must share $dummy
1040
1041		#
1042		# Waiting for fmri's status to be the final status.
1043		# Otherwise, in transition, an asterisk (*) is appended for
1044		# instances, unshare will reverse status to 'DIS' again.
1045		#
1046		# Waiting for 1's at least.
1047		#
1048		log_must sleep 1
1049		timeout=10
1050		while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
1051		do
1052			log_must sleep 1
1053
1054			((timeout -= 1))
1055		done
1056
1057		log_must unshare $dummy
1058		log_must rm -rf $dummy
1059	fi
1060
1061	log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
1062}
1063
1064#
1065# To verify whether calling process is in global zone
1066#
1067# Return 0 if in global zone, 1 in non-global zone
1068#
1069function is_global_zone
1070{
1071	typeset cur_zone=$(zonename 2>/dev/null)
1072	if [[ $cur_zone != "global" ]]; then
1073		return 1
1074	fi
1075	return 0
1076}
1077
1078#
1079# Verify whether test is permitted to run from
1080# global zone, local zone, or both
1081#
1082# $1 zone limit, could be "global", "local", or "both"(no limit)
1083#
1084# Return 0 if permitted, otherwise exit with log_unsupported
1085#
1086function verify_runnable # zone limit
1087{
1088	typeset limit=$1
1089
1090	[[ -z $limit ]] && return 0
1091
1092	if is_global_zone ; then
1093		case $limit in
1094			global|both)
1095				;;
1096			local)	log_unsupported "Test is unable to run from "\
1097					"global zone."
1098				;;
1099			*)	log_note "Warning: unknown limit $limit - " \
1100					"use both."
1101				;;
1102		esac
1103	else
1104		case $limit in
1105			local|both)
1106				;;
1107			global)	log_unsupported "Test is unable to run from "\
1108					"local zone."
1109				;;
1110			*)	log_note "Warning: unknown limit $limit - " \
1111					"use both."
1112				;;
1113		esac
1114
1115		reexport_pool
1116	fi
1117
1118	return 0
1119}
1120
1121# Return 0 if create successfully or the pool exists; $? otherwise
1122# Note: In local zones, this function should return 0 silently.
1123#
1124# $1 - pool name
1125# $2-n - [keyword] devs_list
1126
1127function create_pool #pool devs_list
1128{
1129	typeset pool=${1%%/*}
1130
1131	shift
1132
1133	if [[ -z $pool ]]; then
1134		log_note "Missing pool name."
1135		return 1
1136	fi
1137
1138	if poolexists $pool ; then
1139		destroy_pool $pool
1140	fi
1141
1142	if is_global_zone ; then
1143		[[ -d /$pool ]] && rm -rf /$pool
1144		log_must zpool create -f $pool $@
1145	fi
1146
1147	return 0
1148}
1149
1150# Return 0 if destroy successfully or the pool exists; $? otherwise
1151# Note: In local zones, this function should return 0 silently.
1152#
1153# $1 - pool name
1154# Destroy pool with the given parameters.
1155
1156function destroy_pool #pool
1157{
1158	typeset pool=${1%%/*}
1159	typeset mtpt
1160
1161	if [[ -z $pool ]]; then
1162		log_note "No pool name given."
1163		return 1
1164	fi
1165
1166	if is_global_zone ; then
1167		if poolexists "$pool" ; then
1168			mtpt=$(get_prop mountpoint "$pool")
1169
1170			# At times, syseventd activity can cause attempts to
1171			# destroy a pool to fail with EBUSY. We retry a few
1172			# times allowing failures before requiring the destroy
1173			# to succeed.
1174			typeset -i wait_time=10 ret=1 count=0
1175			must=""
1176			while [[ $ret -ne 0 ]]; do
1177				$must zpool destroy -f $pool
1178				ret=$?
1179				[[ $ret -eq 0 ]] && break
1180				log_note "zpool destroy failed with $ret"
1181				[[ count++ -ge 7 ]] && must=log_must
1182				sleep $wait_time
1183			done
1184
1185			[[ -d $mtpt ]] && \
1186				log_must rm -rf $mtpt
1187		else
1188			log_note "Pool does not exist. ($pool)"
1189			return 1
1190		fi
1191	fi
1192
1193	return 0
1194}
1195
1196#
1197# Firstly, create a pool with 5 datasets. Then, create a single zone and
1198# export the 5 datasets to it. In addition, we also add a ZFS filesystem
1199# and a zvol device to the zone.
1200#
1201# $1 zone name
1202# $2 zone root directory prefix
1203# $3 zone ip
1204#
1205function zfs_zones_setup #zone_name zone_root zone_ip
1206{
1207	typeset zone_name=${1:-$(hostname)-z}
1208	typeset zone_root=${2:-"/zone_root"}
1209	typeset zone_ip=${3:-"10.1.1.10"}
1210	typeset prefix_ctr=$ZONE_CTR
1211	typeset pool_name=$ZONE_POOL
1212	typeset -i cntctr=5
1213	typeset -i i=0
1214
1215	# Create pool and 5 container within it
1216	#
1217	[[ -d /$pool_name ]] && rm -rf /$pool_name
1218	log_must zpool create -f $pool_name $DISKS
1219	while ((i < cntctr)); do
1220		log_must zfs create $pool_name/$prefix_ctr$i
1221		((i += 1))
1222	done
1223
1224	# create a zvol
1225	log_must zfs create -V 1g $pool_name/zone_zvol
1226
1227	#
1228	# If current system support slog, add slog device for pool
1229	#
1230	if verify_slog_support ; then
1231		typeset sdevs="/var/tmp/sdev1 /var/tmp/sdev2"
1232		log_must mkfile $MINVDEVSIZE $sdevs
1233		log_must zpool add $pool_name log mirror $sdevs
1234	fi
1235
1236	# this isn't supported just yet.
1237	# Create a filesystem. In order to add this to
1238	# the zone, it must have it's mountpoint set to 'legacy'
1239	# log_must zfs create $pool_name/zfs_filesystem
1240	# log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem
1241
1242	[[ -d $zone_root ]] && \
1243		log_must rm -rf $zone_root/$zone_name
1244	[[ ! -d $zone_root ]] && \
1245		log_must mkdir -p -m 0700 $zone_root/$zone_name
1246
1247	# Create zone configure file and configure the zone
1248	#
1249	typeset zone_conf=/tmp/zone_conf.$$
1250	echo "create" > $zone_conf
1251	echo "set zonepath=$zone_root/$zone_name" >> $zone_conf
1252	echo "set autoboot=true" >> $zone_conf
1253	i=0
1254	while ((i < cntctr)); do
1255		echo "add dataset" >> $zone_conf
1256		echo "set name=$pool_name/$prefix_ctr$i" >> \
1257			$zone_conf
1258		echo "end" >> $zone_conf
1259		((i += 1))
1260	done
1261
1262	# add our zvol to the zone
1263	echo "add device" >> $zone_conf
1264	echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
1265	echo "end" >> $zone_conf
1266
1267	# add a corresponding zvol rdsk to the zone
1268	echo "add device" >> $zone_conf
1269	echo "set match=/dev/zvol/rdsk/$pool_name/zone_zvol" >> $zone_conf
1270	echo "end" >> $zone_conf
1271
1272	# once it's supported, we'll add our filesystem to the zone
1273	# echo "add fs" >> $zone_conf
1274	# echo "set type=zfs" >> $zone_conf
1275	# echo "set special=$pool_name/zfs_filesystem" >> $zone_conf
1276	# echo "set dir=/export/zfs_filesystem" >> $zone_conf
1277	# echo "end" >> $zone_conf
1278
1279	echo "verify" >> $zone_conf
1280	echo "commit" >> $zone_conf
1281	log_must zonecfg -z $zone_name -f $zone_conf
1282	log_must rm -f $zone_conf
1283
1284	# Install the zone
1285	zoneadm -z $zone_name install
1286	if (($? == 0)); then
1287		log_note "SUCCESS: zoneadm -z $zone_name install"
1288	else
1289		log_fail "FAIL: zoneadm -z $zone_name install"
1290	fi
1291
1292	# Install sysidcfg file
1293	#
1294	typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1295	echo "system_locale=C" > $sysidcfg
1296	echo  "terminal=dtterm" >> $sysidcfg
1297	echo  "network_interface=primary {" >> $sysidcfg
1298	echo  "hostname=$zone_name" >> $sysidcfg
1299	echo  "}" >> $sysidcfg
1300	echo  "name_service=NONE" >> $sysidcfg
1301	echo  "root_password=mo791xfZ/SFiw" >> $sysidcfg
1302	echo  "security_policy=NONE" >> $sysidcfg
1303	echo  "timezone=US/Eastern" >> $sysidcfg
1304
1305	# Boot this zone
1306	log_must zoneadm -z $zone_name boot
1307}
1308
1309#
1310# Reexport TESTPOOL & TESTPOOL(1-4)
1311#
1312function reexport_pool
1313{
1314	typeset -i cntctr=5
1315	typeset -i i=0
1316
1317	while ((i < cntctr)); do
1318		if ((i == 0)); then
1319			TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1320			if ! ismounted $TESTPOOL; then
1321				log_must zfs mount $TESTPOOL
1322			fi
1323		else
1324			eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1325			if eval ! ismounted \$TESTPOOL$i; then
1326				log_must eval zfs mount \$TESTPOOL$i
1327			fi
1328		fi
1329		((i += 1))
1330	done
1331}
1332
1333#
1334# Verify a given disk is online or offline
1335#
1336# Return 0 is pool/disk matches expected state, 1 otherwise
1337#
1338function check_state # pool disk state{online,offline}
1339{
1340	typeset pool=$1
1341	typeset disk=${2#/dev/dsk/}
1342	typeset state=$3
1343
1344	zpool status -v $pool | grep "$disk"  \
1345	    | grep -i "$state" > /dev/null 2>&1
1346
1347	return $?
1348}
1349
1350#
1351# Get the mountpoint of snapshot
1352# For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1353# as its mountpoint
1354#
1355function snapshot_mountpoint
1356{
1357	typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1358
1359	if [[ $dataset != *@* ]]; then
1360		log_fail "Error name of snapshot '$dataset'."
1361	fi
1362
1363	typeset fs=${dataset%@*}
1364	typeset snap=${dataset#*@}
1365
1366	if [[ -z $fs || -z $snap ]]; then
1367		log_fail "Error name of snapshot '$dataset'."
1368	fi
1369
1370	echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
1371}
1372
1373#
1374# Given a pool and file system, this function will verify the file system
1375# using the zdb internal tool. Note that the pool is exported and imported
1376# to ensure it has consistent state.
1377#
1378function verify_filesys # pool filesystem dir
1379{
1380	typeset pool="$1"
1381	typeset filesys="$2"
1382	typeset zdbout="/tmp/zdbout.$$"
1383
1384	shift
1385	shift
1386	typeset dirs=$@
1387	typeset search_path=""
1388
1389	log_note "Calling zdb to verify filesystem '$filesys'"
1390	zfs unmount -a > /dev/null 2>&1
1391	log_must zpool export $pool
1392
1393	if [[ -n $dirs ]] ; then
1394		for dir in $dirs ; do
1395			search_path="$search_path -d $dir"
1396		done
1397	fi
1398
1399	log_must zpool import $search_path $pool
1400
1401	zdb -cudi $filesys > $zdbout 2>&1
1402	if [[ $? != 0 ]]; then
1403		log_note "Output: zdb -cudi $filesys"
1404		cat $zdbout
1405		log_fail "zdb detected errors with: '$filesys'"
1406	fi
1407
1408	log_must zfs mount -a
1409	log_must rm -rf $zdbout
1410}
1411
1412#
1413# Given a pool, and this function list all disks in the pool
1414#
1415function get_disklist # pool
1416{
1417	typeset disklist=""
1418
1419	disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \
1420	    grep -v "\-\-\-\-\-" | \
1421	    egrep -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$")
1422
1423	echo $disklist
1424}
1425
1426# /**
1427#  This function kills a given list of processes after a time period. We use
1428#  this in the stress tests instead of STF_TIMEOUT so that we can have processes
1429#  run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1430#  would be listed as FAIL, which we don't want : we're happy with stress tests
1431#  running for a certain amount of time, then finishing.
1432#
1433# @param $1 the time in seconds after which we should terminate these processes
1434# @param $2..$n the processes we wish to terminate.
1435# */
1436function stress_timeout
1437{
1438	typeset -i TIMEOUT=$1
1439	shift
1440	typeset cpids="$@"
1441
1442	log_note "Waiting for child processes($cpids). " \
1443		"It could last dozens of minutes, please be patient ..."
1444	log_must sleep $TIMEOUT
1445
1446	log_note "Killing child processes after ${TIMEOUT} stress timeout."
1447	typeset pid
1448	for pid in $cpids; do
1449		ps -p $pid > /dev/null 2>&1
1450		if (($? == 0)); then
1451			log_must kill -USR1 $pid
1452		fi
1453	done
1454}
1455
1456#
1457# Verify a given hotspare disk is inuse or avail
1458#
1459# Return 0 is pool/disk matches expected state, 1 otherwise
1460#
1461function check_hotspare_state # pool disk state{inuse,avail}
1462{
1463	typeset pool=$1
1464	typeset disk=${2#/dev/dsk/}
1465	typeset state=$3
1466
1467	cur_state=$(get_device_state $pool $disk "spares")
1468
1469	if [[ $state != ${cur_state} ]]; then
1470		return 1
1471	fi
1472	return 0
1473}
1474
1475#
1476# Verify a given slog disk is inuse or avail
1477#
1478# Return 0 is pool/disk matches expected state, 1 otherwise
1479#
1480function check_slog_state # pool disk state{online,offline,unavail}
1481{
1482	typeset pool=$1
1483	typeset disk=${2#/dev/dsk/}
1484	typeset state=$3
1485
1486	cur_state=$(get_device_state $pool $disk "logs")
1487
1488	if [[ $state != ${cur_state} ]]; then
1489		return 1
1490	fi
1491	return 0
1492}
1493
1494#
1495# Verify a given vdev disk is inuse or avail
1496#
1497# Return 0 is pool/disk matches expected state, 1 otherwise
1498#
1499function check_vdev_state # pool disk state{online,offline,unavail}
1500{
1501	typeset pool=$1
1502	typeset disk=${2#/dev/dsk/}
1503	typeset state=$3
1504
1505	cur_state=$(get_device_state $pool $disk)
1506
1507	if [[ $state != ${cur_state} ]]; then
1508		return 1
1509	fi
1510	return 0
1511}
1512
1513#
1514# Check the output of 'zpool status -v <pool>',
1515# and to see if the content of <token> contain the <keyword> specified.
1516#
1517# Return 0 is contain, 1 otherwise
1518#
1519function check_pool_status # pool token keyword
1520{
1521	typeset pool=$1
1522	typeset token=$2
1523	typeset keyword=$3
1524
1525	zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" '
1526		($1==token) {print $0}' \
1527	| grep -i "$keyword" > /dev/null 2>&1
1528
1529	return $?
1530}
1531
1532#
1533# These 5 following functions are instance of check_pool_status()
1534#	is_pool_resilvering - to check if the pool is resilver in progress
1535#	is_pool_resilvered - to check if the pool is resilver completed
1536#	is_pool_scrubbing - to check if the pool is scrub in progress
1537#	is_pool_scrubbed - to check if the pool is scrub completed
1538#	is_pool_scrub_stopped - to check if the pool is scrub stopped
1539#
1540function is_pool_resilvering #pool
1541{
1542	check_pool_status "$1" "scan" "resilver in progress since "
1543	return $?
1544}
1545
1546function is_pool_resilvered #pool
1547{
1548	check_pool_status "$1" "scan" "resilvered "
1549	return $?
1550}
1551
1552function is_pool_scrubbing #pool
1553{
1554	check_pool_status "$1" "scan" "scrub in progress since "
1555	return $?
1556}
1557
1558function is_pool_scrubbed #pool
1559{
1560	check_pool_status "$1" "scan" "scrub repaired"
1561	return $?
1562}
1563
1564function is_pool_scrub_stopped #pool
1565{
1566	check_pool_status "$1" "scan" "scrub canceled"
1567	return $?
1568}
1569
1570#
1571# Use create_pool()/destroy_pool() to clean up the infomation in
1572# in the given disk to avoid slice overlapping.
1573#
1574function cleanup_devices #vdevs
1575{
1576	typeset pool="foopool$$"
1577
1578	if poolexists $pool ; then
1579		destroy_pool $pool
1580	fi
1581
1582	create_pool $pool $@
1583	destroy_pool $pool
1584
1585	return 0
1586}
1587
1588#
1589# Verify the rsh connectivity to each remote host in RHOSTS.
1590#
1591# Return 0 if remote host is accessible; otherwise 1.
1592# $1 remote host name
1593# $2 username
1594#
1595function verify_rsh_connect #rhost, username
1596{
1597	typeset rhost=$1
1598	typeset username=$2
1599	typeset rsh_cmd="rsh -n"
1600	typeset cur_user=
1601
1602	getent hosts $rhost >/dev/null 2>&1
1603	if (($? != 0)); then
1604		log_note "$rhost cannot be found from" \
1605			"administrative database."
1606		return 1
1607	fi
1608
1609	ping $rhost 3 >/dev/null 2>&1
1610	if (($? != 0)); then
1611		log_note "$rhost is not reachable."
1612		return 1
1613	fi
1614
1615	if ((${#username} != 0)); then
1616		rsh_cmd="$rsh_cmd -l $username"
1617		cur_user="given user \"$username\""
1618	else
1619		cur_user="current user \"`logname`\""
1620	fi
1621
1622	 if ! $rsh_cmd $rhost true; then
1623		log_note "rsh to $rhost is not accessible" \
1624			"with $cur_user."
1625		return 1
1626	fi
1627
1628	return 0
1629}
1630
1631#
1632# Verify the remote host connection via rsh after rebooting
1633# $1 remote host
1634#
1635function verify_remote
1636{
1637	rhost=$1
1638
1639	#
1640	# The following loop waits for the remote system rebooting.
1641	# Each iteration will wait for 150 seconds. there are
1642	# total 5 iterations, so the total timeout value will
1643	# be 12.5  minutes for the system rebooting. This number
1644	# is an approxiate number.
1645	#
1646	typeset -i count=0
1647	while ! verify_rsh_connect $rhost; do
1648		sleep 150
1649		((count = count + 1))
1650		if ((count > 5)); then
1651			return 1
1652		fi
1653	done
1654	return 0
1655}
1656
1657#
1658# Replacement function for /usr/bin/rsh. This function will include
1659# the /usr/bin/rsh and meanwhile return the execution status of the
1660# last command.
1661#
1662# $1 usrname passing down to -l option of /usr/bin/rsh
1663# $2 remote machine hostname
1664# $3... command string
1665#
1666
1667function rsh_status
1668{
1669	typeset ruser=$1
1670	typeset rhost=$2
1671	typeset -i ret=0
1672	typeset cmd_str=""
1673	typeset rsh_str=""
1674
1675	shift; shift
1676	cmd_str="$@"
1677
1678	err_file=/tmp/${rhost}.$$.err
1679	if ((${#ruser} == 0)); then
1680		rsh_str="rsh -n"
1681	else
1682		rsh_str="rsh -n -l $ruser"
1683	fi
1684
1685	$rsh_str $rhost /usr/bin/ksh -c "'$cmd_str; \
1686		print -u 2 \"status=\$?\"'" \
1687		>/dev/null 2>$err_file
1688	ret=$?
1689	if (($ret != 0)); then
1690		cat $err_file
1691		rm -f $std_file $err_file
1692		log_fail  "rsh itself failed with exit code $ret..."
1693	fi
1694
1695	 ret=$(grep -v 'print -u 2' $err_file | grep 'status=' | \
1696		cut -d= -f2)
1697	(($ret != 0)) && cat $err_file >&2
1698
1699	rm -f $err_file >/dev/null 2>&1
1700	return $ret
1701}
1702
1703#
1704# Get the SUNWstc-fs-zfs package installation path in a remote host
1705# $1 remote host name
1706#
1707function get_remote_pkgpath
1708{
1709	typeset rhost=$1
1710	typeset pkgpath=""
1711
1712	pkgpath=$(rsh -n $rhost "pkginfo -l SUNWstc-fs-zfs | grep BASEDIR: |\
1713			cut -d: -f2")
1714
1715	echo $pkgpath
1716}
1717
1718#/**
1719# A function to find and locate free disks on a system or from given
1720# disks as the parameter. It works by locating disks that are in use
1721# as swap devices and dump devices, and also disks listed in /etc/vfstab
1722#
1723# $@ given disks to find which are free, default is all disks in
1724# the test system
1725#
1726# @return a string containing the list of available disks
1727#*/
1728function find_disks
1729{
1730	sfi=/tmp/swaplist.$$
1731	dmpi=/tmp/dumpdev.$$
1732	max_finddisksnum=${MAX_FINDDISKSNUM:-6}
1733
1734	swap -l > $sfi
1735	dumpadm > $dmpi 2>/dev/null
1736
1737# write an awk script that can process the output of format
1738# to produce a list of disks we know about. Note that we have
1739# to escape "$2" so that the shell doesn't interpret it while
1740# we're creating the awk script.
1741# -------------------
1742	cat > /tmp/find_disks.awk <<EOF
1743#!/bin/nawk -f
1744	BEGIN { FS="."; }
1745
1746	/^Specify disk/{
1747		searchdisks=0;
1748	}
1749
1750	{
1751		if (searchdisks && \$2 !~ "^$"){
1752			split(\$2,arr," ");
1753			print arr[1];
1754		}
1755	}
1756
1757	/^AVAILABLE DISK SELECTIONS:/{
1758		searchdisks=1;
1759	}
1760EOF
1761#---------------------
1762
1763	chmod 755 /tmp/find_disks.awk
1764	disks=${@:-$(echo "" | format -e 2>/dev/null | /tmp/find_disks.awk)}
1765	rm /tmp/find_disks.awk
1766
1767	unused=""
1768	for disk in $disks; do
1769	# Check for mounted
1770		grep "${disk}[sp]" /etc/mnttab >/dev/null
1771		(($? == 0)) && continue
1772	# Check for swap
1773		grep "${disk}[sp]" $sfi >/dev/null
1774		(($? == 0)) && continue
1775	# check for dump device
1776		grep "${disk}[sp]" $dmpi >/dev/null
1777		(($? == 0)) && continue
1778	# check to see if this disk hasn't been explicitly excluded
1779	# by a user-set environment variable
1780		echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null
1781		(($? == 0)) && continue
1782		unused_candidates="$unused_candidates $disk"
1783	done
1784	rm $sfi
1785	rm $dmpi
1786
1787# now just check to see if those disks do actually exist
1788# by looking for a device pointing to the first slice in
1789# each case. limit the number to max_finddisksnum
1790	count=0
1791	for disk in $unused_candidates; do
1792		if [ -b /dev/dsk/${disk}s0 ]; then
1793		if [ $count -lt $max_finddisksnum ]; then
1794			unused="$unused $disk"
1795			# do not impose limit if $@ is provided
1796			[[ -z $@ ]] && ((count = count + 1))
1797		fi
1798		fi
1799	done
1800
1801# finally, return our disk list
1802	echo $unused
1803}
1804
1805#
1806# Add specified user to specified group
1807#
1808# $1 group name
1809# $2 user name
1810# $3 base of the homedir (optional)
1811#
1812function add_user #<group_name> <user_name> <basedir>
1813{
1814	typeset gname=$1
1815	typeset uname=$2
1816	typeset basedir=${3:-"/var/tmp"}
1817
1818	if ((${#gname} == 0 || ${#uname} == 0)); then
1819		log_fail "group name or user name are not defined."
1820	fi
1821
1822	log_must useradd -g $gname -d $basedir/$uname -m $uname
1823
1824	return 0
1825}
1826
1827#
1828# Delete the specified user.
1829#
1830# $1 login name
1831# $2 base of the homedir (optional)
1832#
1833function del_user #<logname> <basedir>
1834{
1835	typeset user=$1
1836	typeset basedir=${2:-"/var/tmp"}
1837
1838	if ((${#user} == 0)); then
1839		log_fail "login name is necessary."
1840	fi
1841
1842	if id $user > /dev/null 2>&1; then
1843		log_must userdel $user
1844	fi
1845
1846	[[ -d $basedir/$user ]] && rm -fr $basedir/$user
1847
1848	return 0
1849}
1850
1851#
1852# Select valid gid and create specified group.
1853#
1854# $1 group name
1855#
1856function add_group #<group_name>
1857{
1858	typeset group=$1
1859
1860	if ((${#group} == 0)); then
1861		log_fail "group name is necessary."
1862	fi
1863
1864	# Assign 100 as the base gid
1865	typeset -i gid=100
1866	while true; do
1867		groupadd -g $gid $group > /dev/null 2>&1
1868		typeset -i ret=$?
1869		case $ret in
1870			0) return 0 ;;
1871			# The gid is not  unique
1872			4) ((gid += 1)) ;;
1873			*) return 1 ;;
1874		esac
1875	done
1876}
1877
1878#
1879# Delete the specified group.
1880#
1881# $1 group name
1882#
1883function del_group #<group_name>
1884{
1885	typeset grp=$1
1886	if ((${#grp} == 0)); then
1887		log_fail "group name is necessary."
1888	fi
1889
1890	groupmod -n $grp $grp > /dev/null 2>&1
1891	typeset -i ret=$?
1892	case $ret in
1893		# Group does not exist.
1894		6) return 0 ;;
1895		# Name already exists as a group name
1896		9) log_must groupdel $grp ;;
1897		*) return 1 ;;
1898	esac
1899
1900	return 0
1901}
1902
1903#
1904# This function will return true if it's safe to destroy the pool passed
1905# as argument 1. It checks for pools based on zvols and files, and also
1906# files contained in a pool that may have a different mountpoint.
1907#
1908function safe_to_destroy_pool { # $1 the pool name
1909
1910	typeset pool=""
1911	typeset DONT_DESTROY=""
1912
1913	# We check that by deleting the $1 pool, we're not
1914	# going to pull the rug out from other pools. Do this
1915	# by looking at all other pools, ensuring that they
1916	# aren't built from files or zvols contained in this pool.
1917
1918	for pool in $(zpool list -H -o name)
1919	do
1920		ALTMOUNTPOOL=""
1921
1922		# this is a list of the top-level directories in each of the
1923		# files that make up the path to the files the pool is based on
1924		FILEPOOL=$(zpool status -v $pool | grep /$1/ | \
1925			awk '{print $1}')
1926
1927		# this is a list of the zvols that make up the pool
1928		ZVOLPOOL=$(zpool status -v $pool | grep "/dev/zvol/dsk/$1$" \
1929		    | awk '{print $1}')
1930
1931		# also want to determine if it's a file-based pool using an
1932		# alternate mountpoint...
1933		POOL_FILE_DIRS=$(zpool status -v $pool | \
1934					grep / | awk '{print $1}' | \
1935					awk -F/ '{print $2}' | grep -v "dev")
1936
1937		for pooldir in $POOL_FILE_DIRS
1938		do
1939			OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
1940					grep "${pooldir}$" | awk '{print $1}')
1941
1942			ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
1943		done
1944
1945
1946		if [ ! -z "$ZVOLPOOL" ]
1947		then
1948			DONT_DESTROY="true"
1949			log_note "Pool $pool is built from $ZVOLPOOL on $1"
1950		fi
1951
1952		if [ ! -z "$FILEPOOL" ]
1953		then
1954			DONT_DESTROY="true"
1955			log_note "Pool $pool is built from $FILEPOOL on $1"
1956		fi
1957
1958		if [ ! -z "$ALTMOUNTPOOL" ]
1959		then
1960			DONT_DESTROY="true"
1961			log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
1962		fi
1963	done
1964
1965	if [ -z "${DONT_DESTROY}" ]
1966	then
1967		return 0
1968	else
1969		log_note "Warning: it is not safe to destroy $1!"
1970		return 1
1971	fi
1972}
1973
1974#
1975# Get the available ZFS compression options
1976# $1 option type zfs_set|zfs_compress
1977#
1978function get_compress_opts
1979{
1980	typeset COMPRESS_OPTS
1981	typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
1982			gzip-6 gzip-7 gzip-8 gzip-9"
1983
1984	if [[ $1 == "zfs_compress" ]] ; then
1985		COMPRESS_OPTS="on lzjb"
1986	elif [[ $1 == "zfs_set" ]] ; then
1987		COMPRESS_OPTS="on off lzjb"
1988	fi
1989	typeset valid_opts="$COMPRESS_OPTS"
1990	zfs get 2>&1 | grep gzip >/dev/null 2>&1
1991	if [[ $? -eq 0 ]]; then
1992		valid_opts="$valid_opts $GZIP_OPTS"
1993	fi
1994	echo "$valid_opts"
1995}
1996
1997#
1998# Verify zfs operation with -p option work as expected
1999# $1 operation, value could be create, clone or rename
2000# $2 dataset type, value could be fs or vol
2001# $3 dataset name
2002# $4 new dataset name
2003#
2004function verify_opt_p_ops
2005{
2006	typeset ops=$1
2007	typeset datatype=$2
2008	typeset dataset=$3
2009	typeset newdataset=$4
2010
2011	if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2012		log_fail "$datatype is not supported."
2013	fi
2014
2015	# check parameters accordingly
2016	case $ops in
2017		create)
2018			newdataset=$dataset
2019			dataset=""
2020			if [[ $datatype == "vol" ]]; then
2021				ops="create -V $VOLSIZE"
2022			fi
2023			;;
2024		clone)
2025			if [[ -z $newdataset ]]; then
2026				log_fail "newdataset should not be empty" \
2027					"when ops is $ops."
2028			fi
2029			log_must datasetexists $dataset
2030			log_must snapexists $dataset
2031			;;
2032		rename)
2033			if [[ -z $newdataset ]]; then
2034				log_fail "newdataset should not be empty" \
2035					"when ops is $ops."
2036			fi
2037			log_must datasetexists $dataset
2038			log_mustnot snapexists $dataset
2039			;;
2040		*)
2041			log_fail "$ops is not supported."
2042			;;
2043	esac
2044
2045	# make sure the upper level filesystem does not exist
2046	if datasetexists ${newdataset%/*} ; then
2047		log_must zfs destroy -rRf ${newdataset%/*}
2048	fi
2049
2050	# without -p option, operation will fail
2051	log_mustnot zfs $ops $dataset $newdataset
2052	log_mustnot datasetexists $newdataset ${newdataset%/*}
2053
2054	# with -p option, operation should succeed
2055	log_must zfs $ops -p $dataset $newdataset
2056	if ! datasetexists $newdataset ; then
2057		log_fail "-p option does not work for $ops"
2058	fi
2059
2060	# when $ops is create or clone, redo the operation still return zero
2061	if [[ $ops != "rename" ]]; then
2062		log_must zfs $ops -p $dataset $newdataset
2063	fi
2064
2065	return 0
2066}
2067
2068#
2069# Get configuration of pool
2070# $1 pool name
2071# $2 config name
2072#
2073function get_config
2074{
2075	typeset pool=$1
2076	typeset config=$2
2077	typeset alt_root
2078
2079	if ! poolexists "$pool" ; then
2080		return 1
2081	fi
2082	alt_root=$(zpool list -H $pool | awk '{print $NF}')
2083	if [[ $alt_root == "-" ]]; then
2084		value=$(zdb -C $pool | grep "$config:" | awk -F: \
2085		    '{print $2}')
2086	else
2087		value=$(zdb -e $pool | grep "$config:" | awk -F: \
2088		    '{print $2}')
2089	fi
2090	if [[ -n $value ]] ; then
2091		value=${value#'}
2092		value=${value%'}
2093	fi
2094	echo $value
2095
2096	return 0
2097}
2098
2099#
2100# Privated function. Random select one of items from arguments.
2101#
2102# $1 count
2103# $2-n string
2104#
2105function _random_get
2106{
2107	typeset cnt=$1
2108	shift
2109
2110	typeset str="$@"
2111	typeset -i ind
2112	((ind = RANDOM % cnt + 1))
2113
2114	typeset ret=$(echo "$str" | cut -f $ind -d ' ')
2115	echo $ret
2116}
2117
2118#
2119# Random select one of item from arguments which include NONE string
2120#
2121function random_get_with_non
2122{
2123	typeset -i cnt=$#
2124	((cnt =+ 1))
2125
2126	_random_get "$cnt" "$@"
2127}
2128
2129#
2130# Random select one of item from arguments which doesn't include NONE string
2131#
2132function random_get
2133{
2134	_random_get "$#" "$@"
2135}
2136
2137#
2138# Detect if the current system support slog
2139#
2140function verify_slog_support
2141{
2142	typeset dir=/tmp/disk.$$
2143	typeset pool=foo.$$
2144	typeset vdev=$dir/a
2145	typeset sdev=$dir/b
2146
2147	mkdir -p $dir
2148	mkfile $MINVDEVSIZE $vdev $sdev
2149
2150	typeset -i ret=0
2151	if ! zpool create -n $pool $vdev log $sdev > /dev/null 2>&1; then
2152		ret=1
2153	fi
2154	rm -r $dir
2155
2156	return $ret
2157}
2158
2159#
2160# The function will generate a dataset name with specific length
2161# $1, the length of the name
2162# $2, the base string to construct the name
2163#
2164function gen_dataset_name
2165{
2166	typeset -i len=$1
2167	typeset basestr="$2"
2168	typeset -i baselen=${#basestr}
2169	typeset -i iter=0
2170	typeset l_name=""
2171
2172	if ((len % baselen == 0)); then
2173		((iter = len / baselen))
2174	else
2175		((iter = len / baselen + 1))
2176	fi
2177	while ((iter > 0)); do
2178		l_name="${l_name}$basestr"
2179
2180		((iter -= 1))
2181	done
2182
2183	echo $l_name
2184}
2185
2186#
2187# Get cksum tuple of dataset
2188# $1 dataset name
2189#
2190# sample zdb output:
2191# Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2192# DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2193# lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2194# fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2195function datasetcksum
2196{
2197	typeset cksum
2198	sync
2199	cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
2200		| awk -F= '{print $7}')
2201	echo $cksum
2202}
2203
2204#
2205# Get cksum of file
2206# #1 file path
2207#
2208function checksum
2209{
2210	typeset cksum
2211	cksum=$(cksum $1 | awk '{print $1}')
2212	echo $cksum
2213}
2214
2215#
2216# Get the given disk/slice state from the specific field of the pool
2217#
2218function get_device_state #pool disk field("", "spares","logs")
2219{
2220	typeset pool=$1
2221	typeset disk=${2#/dev/dsk/}
2222	typeset field=${3:-$pool}
2223
2224	state=$(zpool status -v "$pool" 2>/dev/null | \
2225		nawk -v device=$disk -v pool=$pool -v field=$field \
2226		'BEGIN {startconfig=0; startfield=0; }
2227		/config:/ {startconfig=1}
2228		(startconfig==1) && ($1==field) {startfield=1; next;}
2229		(startfield==1) && ($1==device) {print $2; exit;}
2230		(startfield==1) &&
2231		($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
2232	echo $state
2233}
2234
2235
2236#
2237# print the given directory filesystem type
2238#
2239# $1 directory name
2240#
2241function get_fstype
2242{
2243	typeset dir=$1
2244
2245	if [[ -z $dir ]]; then
2246		log_fail "Usage: get_fstype <directory>"
2247	fi
2248
2249	#
2250	#  $ df -n /
2251	#  /		  : ufs
2252	#
2253	df -n $dir | awk '{print $3}'
2254}
2255
2256#
2257# Given a disk, label it to VTOC regardless what label was on the disk
2258# $1 disk
2259#
2260function labelvtoc
2261{
2262	typeset disk=$1
2263	if [[ -z $disk ]]; then
2264		log_fail "The disk name is unspecified."
2265	fi
2266	typeset label_file=/var/tmp/labelvtoc.$$
2267	typeset arch=$(uname -p)
2268
2269	if [[ $arch == "i386" ]]; then
2270		echo "label" > $label_file
2271		echo "0" >> $label_file
2272		echo "" >> $label_file
2273		echo "q" >> $label_file
2274		echo "q" >> $label_file
2275
2276		fdisk -B $disk >/dev/null 2>&1
2277		# wait a while for fdisk finishes
2278		sleep 60
2279	elif [[ $arch == "sparc" ]]; then
2280		echo "label" > $label_file
2281		echo "0" >> $label_file
2282		echo "" >> $label_file
2283		echo "" >> $label_file
2284		echo "" >> $label_file
2285		echo "q" >> $label_file
2286	else
2287		log_fail "unknown arch type"
2288	fi
2289
2290	format -e -s -d $disk -f $label_file
2291	typeset -i ret_val=$?
2292	rm -f $label_file
2293	#
2294	# wait the format to finish
2295	#
2296	sleep 60
2297	if ((ret_val != 0)); then
2298		log_fail "unable to label $disk as VTOC."
2299	fi
2300
2301	return 0
2302}
2303
2304#
2305# check if the system was installed as zfsroot or not
2306# return: 0 ture, otherwise false
2307#
2308function is_zfsroot
2309{
2310	df -n / | grep zfs > /dev/null 2>&1
2311	return $?
2312}
2313
2314#
2315# get the root filesystem name if it's zfsroot system.
2316#
2317# return: root filesystem name
2318function get_rootfs
2319{
2320	typeset rootfs=""
2321	rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \
2322		/etc/mnttab)
2323	if [[ -z "$rootfs" ]]; then
2324		log_fail "Can not get rootfs"
2325	fi
2326	zfs list $rootfs > /dev/null 2>&1
2327	if (($? == 0)); then
2328		echo $rootfs
2329	else
2330		log_fail "This is not a zfsroot system."
2331	fi
2332}
2333
2334#
2335# get the rootfs's pool name
2336# return:
2337#       rootpool name
2338#
2339function get_rootpool
2340{
2341	typeset rootfs=""
2342	typeset rootpool=""
2343	rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \
2344		 /etc/mnttab)
2345	if [[ -z "$rootfs" ]]; then
2346		log_fail "Can not get rootpool"
2347	fi
2348	zfs list $rootfs > /dev/null 2>&1
2349	if (($? == 0)); then
2350		rootpool=`echo $rootfs | awk -F\/ '{print $1}'`
2351		echo $rootpool
2352	else
2353		log_fail "This is not a zfsroot system."
2354	fi
2355}
2356
2357#
2358# Check if the given device is physical device
2359#
2360function is_physical_device #device
2361{
2362	typeset device=${1#/dev/dsk/}
2363	device=${device#/dev/rdsk/}
2364
2365	echo $device | egrep "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
2366	return $?
2367}
2368
2369#
2370# Get the directory path of given device
2371#
2372function get_device_dir #device
2373{
2374	typeset device=$1
2375
2376	if ! $(is_physical_device $device) ; then
2377		if [[ $device != "/" ]]; then
2378			device=${device%/*}
2379		fi
2380		echo $device
2381	else
2382		echo "/dev/dsk"
2383	fi
2384}
2385
2386#
2387# Get the package name
2388#
2389function get_package_name
2390{
2391	typeset dirpath=${1:-$STC_NAME}
2392
2393	echo "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
2394}
2395
2396#
2397# Get the word numbers from a string separated by white space
2398#
2399function get_word_count
2400{
2401	echo $1 | wc -w
2402}
2403
2404#
2405# To verify if the require numbers of disks is given
2406#
2407function verify_disk_count
2408{
2409	typeset -i min=${2:-1}
2410
2411	typeset -i count=$(get_word_count "$1")
2412
2413	if ((count < min)); then
2414		log_untested "A minimum of $min disks is required to run." \
2415			" You specified $count disk(s)"
2416	fi
2417}
2418
2419function ds_is_volume
2420{
2421	typeset type=$(get_prop type $1)
2422	[[ $type = "volume" ]] && return 0
2423	return 1
2424}
2425
2426function ds_is_filesystem
2427{
2428	typeset type=$(get_prop type $1)
2429	[[ $type = "filesystem" ]] && return 0
2430	return 1
2431}
2432
2433function ds_is_snapshot
2434{
2435	typeset type=$(get_prop type $1)
2436	[[ $type = "snapshot" ]] && return 0
2437	return 1
2438}
2439
2440#
2441# Check if Trusted Extensions are installed and enabled
2442#
2443function is_te_enabled
2444{
2445	svcs -H -o state labeld 2>/dev/null | grep "enabled"
2446	if (($? != 0)); then
2447		return 1
2448	else
2449		return 0
2450	fi
2451}
2452
2453# Utility function to determine if a system has multiple cpus.
2454function is_mp
2455{
2456	(($(psrinfo | wc -l) > 1))
2457}
2458
2459function get_cpu_freq
2460{
2461	psrinfo -v 0 | awk '/processor operates at/ {print $6}'
2462}
2463
2464# Run the given command as the user provided.
2465function user_run
2466{
2467	typeset user=$1
2468	shift
2469
2470	eval su \$user -c \"$@\" > /tmp/out 2>/tmp/err
2471	return $?
2472}
2473
2474#
2475# Check if the pool contains the specified vdevs
2476#
2477# $1 pool
2478# $2..n <vdev> ...
2479#
2480# Return 0 if the vdevs are contained in the pool, 1 if any of the specified
2481# vdevs is not in the pool, and 2 if pool name is missing.
2482#
2483function vdevs_in_pool
2484{
2485	typeset pool=$1
2486	typeset vdev
2487
2488        if [[ -z $pool ]]; then
2489                log_note "Missing pool name."
2490                return 2
2491        fi
2492
2493	shift
2494
2495	typeset tmpfile=$(mktemp)
2496	zpool list -Hv "$pool" >$tmpfile
2497	for vdev in $@; do
2498		grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1
2499		[[ $? -ne 0 ]] && return 1
2500	done
2501
2502	rm -f $tmpfile
2503
2504	return 0;
2505}
2506
2507function get_max
2508{
2509	typeset -l i max=$1
2510	shift
2511
2512	for i in "$@"; do
2513		max=$(echo $((max > i ? max : i)))
2514	done
2515
2516	echo $max
2517}
2518
2519function get_min
2520{
2521	typeset -l i min=$1
2522	shift
2523
2524	for i in "$@"; do
2525		min=$(echo $((min < i ? min : i)))
2526	done
2527
2528	echo $min
2529}
2530
2531#
2532# Generate a random number between 1 and the argument.
2533#
2534function random
2535{
2536        typeset max=$1
2537        echo $(( ($RANDOM % $max) + 1 ))
2538}
2539
2540# Write data that can be compressed into a directory
2541function write_compressible
2542{
2543	typeset dir=$1
2544	typeset megs=$2
2545	typeset nfiles=${3:-1}
2546	typeset bs=${4:-1024k}
2547	typeset fname=${5:-file}
2548
2549	[[ -d $dir ]] || log_fail "No directory: $dir"
2550
2551	log_must eval "fio \
2552	    --name=job \
2553	    --fallocate=0 \
2554	    --minimal \
2555	    --randrepeat=0 \
2556	    --buffer_compress_percentage=66 \
2557	    --buffer_compress_chunk=4096 \
2558	    --directory=$dir \
2559	    --numjobs=$nfiles \
2560	    --rw=write \
2561	    --bs=$bs \
2562	    --filesize=$megs \
2563	    --filename_format='$fname.\$jobnum' >/dev/null"
2564}
2565
2566function get_objnum
2567{
2568	typeset pathname=$1
2569	typeset objnum
2570
2571	[[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
2572	objnum=$(stat -c %i $pathname)
2573	echo $objnum
2574}
2575