1#
2# CDDL HEADER START
3#
4# The contents of this file are subject to the terms of the
5# Common Development and Distribution License (the "License").
6# You may not use this file except in compliance with the License.
7#
8# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9# or http://www.opensolaris.org/os/licensing.
10# See the License for the specific language governing permissions
11# and limitations under the License.
12#
13# When distributing Covered Code, include this CDDL HEADER in each
14# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15# If applicable, add the following below this CDDL HEADER, with the
16# fields enclosed by brackets "[]" replaced with your own identifying
17# information: Portions Copyright [yyyy] [name of copyright owner]
18#
19# CDDL HEADER END
20#
21
22#
23# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24# Use is subject to license terms.
25# Copyright (c) 2012, 2016 by Delphix. All rights reserved.
26# Copyright 2016 Nexenta Systems, Inc.
27# Copyright (c) 2017 Datto Inc.
28#
29
30. ${STF_TOOLS}/contrib/include/logapi.shlib
31
32# Determine whether a dataset is mounted
33#
34# $1 dataset name
35# $2 filesystem type; optional - defaulted to zfs
36#
37# Return 0 if dataset is mounted; 1 if unmounted; 2 on error
38
39function ismounted
40{
41	typeset fstype=$2
42	[[ -z $fstype ]] && fstype=zfs
43	typeset out dir name ret
44
45	case $fstype in
46		zfs)
47			if [[ "$1" == "/"* ]] ; then
48				for out in $(zfs mount | awk '{print $2}'); do
49					[[ $1 == $out ]] && return 0
50				done
51			else
52				for out in $(zfs mount | awk '{print $1}'); do
53					[[ $1 == $out ]] && return 0
54				done
55			fi
56		;;
57		ufs|nfs)
58			out=$(df -F $fstype $1 2>/dev/null)
59			ret=$?
60			(($ret != 0)) && return $ret
61
62			dir=${out%%\(*}
63			dir=${dir%% *}
64			name=${out##*\(}
65			name=${name%%\)*}
66			name=${name%% *}
67
68			[[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
69		;;
70	esac
71
72	return 1
73}
74
75# Return 0 if a dataset is mounted; 1 otherwise
76#
77# $1 dataset name
78# $2 filesystem type; optional - defaulted to zfs
79
80function mounted
81{
82	ismounted $1 $2
83	(($? == 0)) && return 0
84	return 1
85}
86
87# Return 0 if a dataset is unmounted; 1 otherwise
88#
89# $1 dataset name
90# $2 filesystem type; optional - defaulted to zfs
91
92function unmounted
93{
94	ismounted $1 $2
95	(($? == 1)) && return 0
96	return 1
97}
98
99# split line on ","
100#
101# $1 - line to split
102
103function splitline
104{
105	echo $1 | sed "s/,/ /g"
106}
107
108function default_setup
109{
110	default_setup_noexit "$@"
111
112	log_pass
113}
114
115#
116# Given a list of disks, setup storage pools and datasets.
117#
118function default_setup_noexit
119{
120	typeset disklist=$1
121	typeset container=$2
122	typeset volume=$3
123
124	if is_global_zone; then
125		if poolexists $TESTPOOL ; then
126			destroy_pool $TESTPOOL
127		fi
128		[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
129		log_must zpool create -f $TESTPOOL $disklist
130	else
131		reexport_pool
132	fi
133
134	rm -rf $TESTDIR  || log_unresolved Could not remove $TESTDIR
135	mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
136
137	log_must zfs create $TESTPOOL/$TESTFS
138	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
139
140	if [[ -n $container ]]; then
141		rm -rf $TESTDIR1  || \
142			log_unresolved Could not remove $TESTDIR1
143		mkdir -p $TESTDIR1 || \
144			log_unresolved Could not create $TESTDIR1
145
146		log_must zfs create $TESTPOOL/$TESTCTR
147		log_must zfs set canmount=off $TESTPOOL/$TESTCTR
148		log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
149		log_must zfs set mountpoint=$TESTDIR1 \
150		    $TESTPOOL/$TESTCTR/$TESTFS1
151	fi
152
153	if [[ -n $volume ]]; then
154		if is_global_zone ; then
155			log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
156		else
157			log_must zfs create $TESTPOOL/$TESTVOL
158		fi
159	fi
160}
161
162#
163# Given a list of disks, setup a storage pool, file system and
164# a container.
165#
166function default_container_setup
167{
168	typeset disklist=$1
169
170	default_setup "$disklist" "true"
171}
172
173#
174# Given a list of disks, setup a storage pool,file system
175# and a volume.
176#
177function default_volume_setup
178{
179	typeset disklist=$1
180
181	default_setup "$disklist" "" "true"
182}
183
184#
185# Given a list of disks, setup a storage pool,file system,
186# a container and a volume.
187#
188function default_container_volume_setup
189{
190	typeset disklist=$1
191
192	default_setup "$disklist" "true" "true"
193}
194
195#
196# Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
197# filesystem
198#
199# $1 Existing filesystem or volume name. Default, $TESTPOOL/$TESTFS
200# $2 snapshot name. Default, $TESTSNAP
201#
202function create_snapshot
203{
204	typeset fs_vol=${1:-$TESTPOOL/$TESTFS}
205	typeset snap=${2:-$TESTSNAP}
206
207	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
208	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
209
210	if snapexists $fs_vol@$snap; then
211		log_fail "$fs_vol@$snap already exists."
212	fi
213	datasetexists $fs_vol || \
214		log_fail "$fs_vol must exist."
215
216	log_must zfs snapshot $fs_vol@$snap
217}
218
219#
220# Create a clone from a snapshot, default clone name is $TESTCLONE.
221#
222# $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
223# $2 Clone name, $TESTPOOL/$TESTCLONE is default.
224#
225function create_clone   # snapshot clone
226{
227	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
228	typeset clone=${2:-$TESTPOOL/$TESTCLONE}
229
230	[[ -z $snap ]] && \
231		log_fail "Snapshot name is undefined."
232	[[ -z $clone ]] && \
233		log_fail "Clone name is undefined."
234
235	log_must zfs clone $snap $clone
236}
237
238#
239# Create a bookmark of the given snapshot.  Defaultly create a bookmark on
240# filesystem.
241#
242# $1 Existing filesystem or volume name. Default, $TESTFS
243# $2 Existing snapshot name. Default, $TESTSNAP
244# $3 bookmark name. Default, $TESTBKMARK
245#
246function create_bookmark
247{
248	typeset fs_vol=${1:-$TESTFS}
249	typeset snap=${2:-$TESTSNAP}
250	typeset bkmark=${3:-$TESTBKMARK}
251
252	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
253	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
254	[[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
255
256	if bkmarkexists $fs_vol#$bkmark; then
257		log_fail "$fs_vol#$bkmark already exists."
258	fi
259	datasetexists $fs_vol || \
260		log_fail "$fs_vol must exist."
261	snapexists $fs_vol@$snap || \
262		log_fail "$fs_vol@$snap must exist."
263
264	log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
265}
266
267function default_mirror_setup
268{
269	default_mirror_setup_noexit $1 $2 $3
270
271	log_pass
272}
273
274#
275# Given a pair of disks, set up a storage pool and dataset for the mirror
276# @parameters: $1 the primary side of the mirror
277#   $2 the secondary side of the mirror
278# @uses: ZPOOL ZFS TESTPOOL TESTFS
279function default_mirror_setup_noexit
280{
281	readonly func="default_mirror_setup_noexit"
282	typeset primary=$1
283	typeset secondary=$2
284
285	[[ -z $primary ]] && \
286		log_fail "$func: No parameters passed"
287	[[ -z $secondary ]] && \
288		log_fail "$func: No secondary partition passed"
289	[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
290	log_must zpool create -f $TESTPOOL mirror $@
291	log_must zfs create $TESTPOOL/$TESTFS
292	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
293}
294
295#
296# create a number of mirrors.
297# We create a number($1) of 2 way mirrors using the pairs of disks named
298# on the command line. These mirrors are *not* mounted
299# @parameters: $1 the number of mirrors to create
300#  $... the devices to use to create the mirrors on
301# @uses: ZPOOL ZFS TESTPOOL
302function setup_mirrors
303{
304	typeset -i nmirrors=$1
305
306	shift
307	while ((nmirrors > 0)); do
308		log_must test -n "$1" -a -n "$2"
309		[[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors
310		log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2
311		shift 2
312		((nmirrors = nmirrors - 1))
313	done
314}
315
316#
317# create a number of raidz pools.
318# We create a number($1) of 2 raidz pools  using the pairs of disks named
319# on the command line. These pools are *not* mounted
320# @parameters: $1 the number of pools to create
321#  $... the devices to use to create the pools on
322# @uses: ZPOOL ZFS TESTPOOL
323function setup_raidzs
324{
325	typeset -i nraidzs=$1
326
327	shift
328	while ((nraidzs > 0)); do
329		log_must test -n "$1" -a -n "$2"
330		[[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs
331		log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2
332		shift 2
333		((nraidzs = nraidzs - 1))
334	done
335}
336
337#
338# Destroy the configured testpool mirrors.
339# the mirrors are of the form ${TESTPOOL}{number}
340# @uses: ZPOOL ZFS TESTPOOL
341function destroy_mirrors
342{
343	default_cleanup_noexit
344
345	log_pass
346}
347
348#
349# Given a minimum of two disks, set up a storage pool and dataset for the raid-z
350# $1 the list of disks
351#
352function default_raidz_setup
353{
354	typeset disklist="$*"
355	disks=(${disklist[*]})
356
357	if [[ ${#disks[*]} -lt 2 ]]; then
358		log_fail "A raid-z requires a minimum of two disks."
359	fi
360
361	[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
362	log_must zpool create -f $TESTPOOL raidz $1 $2 $3
363	log_must zfs create $TESTPOOL/$TESTFS
364	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
365
366	log_pass
367}
368
369#
370# Common function used to cleanup storage pools and datasets.
371#
372# Invoked at the start of the test suite to ensure the system
373# is in a known state, and also at the end of each set of
374# sub-tests to ensure errors from one set of tests doesn't
375# impact the execution of the next set.
376
377function default_cleanup
378{
379	default_cleanup_noexit
380
381	log_pass
382}
383
384function default_cleanup_noexit
385{
386	typeset exclude=""
387	typeset pool=""
388	#
389	# Destroying the pool will also destroy any
390	# filesystems it contains.
391	#
392	if is_global_zone; then
393		zfs unmount -a > /dev/null 2>&1
394		exclude=`eval echo \"'(${KEEP})'\"`
395		ALL_POOLS=$(zpool list -H -o name \
396		    | grep -v "$NO_POOLS" | egrep -v "$exclude")
397		# Here, we loop through the pools we're allowed to
398		# destroy, only destroying them if it's safe to do
399		# so.
400		while [ ! -z ${ALL_POOLS} ]
401		do
402			for pool in ${ALL_POOLS}
403			do
404				if safe_to_destroy_pool $pool ;
405				then
406					destroy_pool $pool
407				fi
408				ALL_POOLS=$(zpool list -H -o name \
409				    | grep -v "$NO_POOLS" \
410				    | egrep -v "$exclude")
411			done
412		done
413
414		zfs mount -a
415	else
416		typeset fs=""
417		for fs in $(zfs list -H -o name \
418		    | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
419			datasetexists $fs && \
420				log_must zfs destroy -Rf $fs
421		done
422
423		# Need cleanup here to avoid garbage dir left.
424		for fs in $(zfs list -H -o name); do
425			[[ $fs == /$ZONE_POOL ]] && continue
426			[[ -d $fs ]] && log_must rm -rf $fs/*
427		done
428
429		#
430		# Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
431		# the default value
432		#
433		for fs in $(zfs list -H -o name); do
434			if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
435				log_must zfs set reservation=none $fs
436				log_must zfs set recordsize=128K $fs
437				log_must zfs set mountpoint=/$fs $fs
438				typeset enc=""
439				enc=$(get_prop encryption $fs)
440				if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
441					[[ "$enc" == "off" ]]; then
442					log_must zfs set checksum=on $fs
443				fi
444				log_must zfs set compression=off $fs
445				log_must zfs set atime=on $fs
446				log_must zfs set devices=off $fs
447				log_must zfs set exec=on $fs
448				log_must zfs set setuid=on $fs
449				log_must zfs set readonly=off $fs
450				log_must zfs set snapdir=hidden $fs
451				log_must zfs set aclmode=groupmask $fs
452				log_must zfs set aclinherit=secure $fs
453			fi
454		done
455	fi
456
457	[[ -d $TESTDIR ]] && \
458		log_must rm -rf $TESTDIR
459}
460
461
462#
463# Common function used to cleanup storage pools, file systems
464# and containers.
465#
466function default_container_cleanup
467{
468	if ! is_global_zone; then
469		reexport_pool
470	fi
471
472	ismounted $TESTPOOL/$TESTCTR/$TESTFS1
473	[[ $? -eq 0 ]] && \
474	    log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
475
476	datasetexists $TESTPOOL/$TESTCTR/$TESTFS1 && \
477	    log_must zfs destroy -R $TESTPOOL/$TESTCTR/$TESTFS1
478
479	datasetexists $TESTPOOL/$TESTCTR && \
480	    log_must zfs destroy -Rf $TESTPOOL/$TESTCTR
481
482	[[ -e $TESTDIR1 ]] && \
483	    log_must rm -rf $TESTDIR1 > /dev/null 2>&1
484
485	default_cleanup
486}
487
488#
489# Common function used to cleanup snapshot of file system or volume. Default to
490# delete the file system's snapshot
491#
492# $1 snapshot name
493#
494function destroy_snapshot
495{
496	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
497
498	if ! snapexists $snap; then
499		log_fail "'$snap' does not existed."
500	fi
501
502	#
503	# For the sake of the value which come from 'get_prop' is not equal
504	# to the really mountpoint when the snapshot is unmounted. So, firstly
505	# check and make sure this snapshot's been mounted in current system.
506	#
507	typeset mtpt=""
508	if ismounted $snap; then
509		mtpt=$(get_prop mountpoint $snap)
510		(($? != 0)) && \
511			log_fail "get_prop mountpoint $snap failed."
512	fi
513
514	log_must zfs destroy $snap
515	[[ $mtpt != "" && -d $mtpt ]] && \
516		log_must rm -rf $mtpt
517}
518
519#
520# Common function used to cleanup clone.
521#
522# $1 clone name
523#
524function destroy_clone
525{
526	typeset clone=${1:-$TESTPOOL/$TESTCLONE}
527
528	if ! datasetexists $clone; then
529		log_fail "'$clone' does not existed."
530	fi
531
532	# With the same reason in destroy_snapshot
533	typeset mtpt=""
534	if ismounted $clone; then
535		mtpt=$(get_prop mountpoint $clone)
536		(($? != 0)) && \
537			log_fail "get_prop mountpoint $clone failed."
538	fi
539
540	log_must zfs destroy $clone
541	[[ $mtpt != "" && -d $mtpt ]] && \
542		log_must rm -rf $mtpt
543}
544
545#
546# Common function used to cleanup bookmark of file system or volume.  Default
547# to delete the file system's bookmark.
548#
549# $1 bookmark name
550#
551function destroy_bookmark
552{
553	typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
554
555	if ! bkmarkexists $bkmark; then
556		log_fail "'$bkmarkp' does not existed."
557	fi
558
559	log_must zfs destroy $bkmark
560}
561
562# Return 0 if a snapshot exists; $? otherwise
563#
564# $1 - snapshot name
565
566function snapexists
567{
568	zfs list -H -t snapshot "$1" > /dev/null 2>&1
569	return $?
570}
571
572#
573# Return 0 if a bookmark exists; $? otherwise
574#
575# $1 - bookmark name
576#
577function bkmarkexists
578{
579	zfs list -H -t bookmark "$1" > /dev/null 2>&1
580	return $?
581}
582
583#
584# Set a property to a certain value on a dataset.
585# Sets a property of the dataset to the value as passed in.
586# @param:
587#	$1 dataset who's property is being set
588#	$2 property to set
589#	$3 value to set property to
590# @return:
591#	0 if the property could be set.
592#	non-zero otherwise.
593# @use: ZFS
594#
595function dataset_setprop
596{
597	typeset fn=dataset_setprop
598
599	if (($# < 3)); then
600		log_note "$fn: Insufficient parameters (need 3, had $#)"
601		return 1
602	fi
603	typeset output=
604	output=$(zfs set $2=$3 $1 2>&1)
605	typeset rv=$?
606	if ((rv != 0)); then
607		log_note "Setting property on $1 failed."
608		log_note "property $2=$3"
609		log_note "Return Code: $rv"
610		log_note "Output: $output"
611		return $rv
612	fi
613	return 0
614}
615
616#
617# Assign suite defined dataset properties.
618# This function is used to apply the suite's defined default set of
619# properties to a dataset.
620# @parameters: $1 dataset to use
621# @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
622# @returns:
623#   0 if the dataset has been altered.
624#   1 if no pool name was passed in.
625#   2 if the dataset could not be found.
626#   3 if the dataset could not have it's properties set.
627#
628function dataset_set_defaultproperties
629{
630	typeset dataset="$1"
631
632	[[ -z $dataset ]] && return 1
633
634	typeset confset=
635	typeset -i found=0
636	for confset in $(zfs list); do
637		if [[ $dataset = $confset ]]; then
638			found=1
639			break
640		fi
641	done
642	[[ $found -eq 0 ]] && return 2
643	if [[ -n $COMPRESSION_PROP ]]; then
644		dataset_setprop $dataset compression $COMPRESSION_PROP || \
645			return 3
646		log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
647	fi
648	if [[ -n $CHECKSUM_PROP ]]; then
649		dataset_setprop $dataset checksum $CHECKSUM_PROP || \
650			return 3
651		log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
652	fi
653	return 0
654}
655
656#
657# Check a numeric assertion
658# @parameter: $@ the assertion to check
659# @output: big loud notice if assertion failed
660# @use: log_fail
661#
662function assert
663{
664	(($@)) || log_fail "$@"
665}
666
667#
668# Function to format partition size of a disk
669# Given a disk cxtxdx reduces all partitions
670# to 0 size
671#
672function zero_partitions #<whole_disk_name>
673{
674	typeset diskname=$1
675	typeset i
676
677	for i in 0 1 3 4 5 6 7
678	do
679		set_partition $i "" 0mb $diskname
680	done
681}
682
683#
684# Given a slice, size and disk, this function
685# formats the slice to the specified size.
686# Size should be specified with units as per
687# the `format` command requirements eg. 100mb 3gb
688#
689function set_partition #<slice_num> <slice_start> <size_plus_units>  <whole_disk_name>
690{
691	typeset -i slicenum=$1
692	typeset start=$2
693	typeset size=$3
694	typeset disk=$4
695	[[ -z $slicenum || -z $size || -z $disk ]] && \
696	    log_fail "The slice, size or disk name is unspecified."
697	typeset format_file=/var/tmp/format_in.$$
698
699	echo "partition" >$format_file
700	echo "$slicenum" >> $format_file
701	echo "" >> $format_file
702	echo "" >> $format_file
703	echo "$start" >> $format_file
704	echo "$size" >> $format_file
705	echo "label" >> $format_file
706	echo "" >> $format_file
707	echo "q" >> $format_file
708	echo "q" >> $format_file
709
710	format -e -s -d $disk -f $format_file
711	typeset ret_val=$?
712	rm -f $format_file
713	[[ $ret_val -ne 0 ]] && \
714	    log_fail "Unable to format $disk slice $slicenum to $size"
715	return 0
716}
717
718#
719# Get the end cyl of the given slice
720#
721function get_endslice #<disk> <slice>
722{
723	typeset disk=$1
724	typeset slice=$2
725	if [[ -z $disk || -z $slice ]] ; then
726		log_fail "The disk name or slice number is unspecified."
727	fi
728
729	disk=${disk#/dev/dsk/}
730	disk=${disk#/dev/rdsk/}
731	disk=${disk%s*}
732
733	typeset -i ratio=0
734	ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
735		grep "sectors\/cylinder" | \
736		awk '{print $2}')
737
738	if ((ratio == 0)); then
739		return
740	fi
741
742	typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
743		nawk -v token="$slice" '{if ($1==token) print $6}')
744
745	((endcyl = (endcyl + 1) / ratio))
746	echo $endcyl
747}
748
749
750#
751# Given a size,disk and total slice number,  this function formats the
752# disk slices from 0 to the total slice number with the same specified
753# size.
754#
755function partition_disk	#<slice_size> <whole_disk_name>	<total_slices>
756{
757	typeset -i i=0
758	typeset slice_size=$1
759	typeset disk_name=$2
760	typeset total_slices=$3
761	typeset cyl
762
763	zero_partitions $disk_name
764	while ((i < $total_slices)); do
765		if ((i == 2)); then
766			((i = i + 1))
767			continue
768		fi
769		set_partition $i "$cyl" $slice_size $disk_name
770		cyl=$(get_endslice $disk_name $i)
771		((i = i+1))
772	done
773}
774
775#
776# This function continues to write to a filenum number of files into dirnum
777# number of directories until either file_write returns an error or the
778# maximum number of files per directory have been written.
779#
780# Usage:
781# fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
782#
783# Return value: 0 on success
784#		non 0 on error
785#
786# Where :
787#	destdir:    is the directory where everything is to be created under
788#	dirnum:	    the maximum number of subdirectories to use, -1 no limit
789#	filenum:    the maximum number of files per subdirectory
790#	bytes:	    number of bytes to write
791#	num_writes: numer of types to write out bytes
792#	data:	    the data that will be writen
793#
794#	E.g.
795#	file_fs /testdir 20 25 1024 256 0
796#
797# Note: bytes * num_writes equals the size of the testfile
798#
799function fill_fs # destdir dirnum filenum bytes num_writes data
800{
801	typeset destdir=${1:-$TESTDIR}
802	typeset -i dirnum=${2:-50}
803	typeset -i filenum=${3:-50}
804	typeset -i bytes=${4:-8192}
805	typeset -i num_writes=${5:-10240}
806	typeset -i data=${6:-0}
807
808	typeset -i odirnum=1
809	typeset -i idirnum=0
810	typeset -i fn=0
811	typeset -i retval=0
812
813	log_must mkdir -p $destdir/$idirnum
814	while (($odirnum > 0)); do
815		if ((dirnum >= 0 && idirnum >= dirnum)); then
816			odirnum=0
817			break
818		fi
819		file_write -o create -f $destdir/$idirnum/$TESTFILE.$fn \
820		    -b $bytes -c $num_writes -d $data
821		retval=$?
822		if (($retval != 0)); then
823			odirnum=0
824			break
825		fi
826		if (($fn >= $filenum)); then
827			fn=0
828			((idirnum = idirnum + 1))
829			log_must mkdir -p $destdir/$idirnum
830		else
831			((fn = fn + 1))
832		fi
833	done
834	return $retval
835}
836
837#
838# Simple function to get the specified property. If unable to
839# get the property then exits.
840#
841# Note property is in 'parsable' format (-p)
842#
843function get_prop # property dataset
844{
845	typeset prop_val
846	typeset prop=$1
847	typeset dataset=$2
848
849	prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null)
850	if [[ $? -ne 0 ]]; then
851		log_note "Unable to get $prop property for dataset " \
852		"$dataset"
853		return 1
854	fi
855
856	echo "$prop_val"
857	return 0
858}
859
860#
861# Simple function to get the specified property of pool. If unable to
862# get the property then exits.
863#
864function get_pool_prop # property pool
865{
866	typeset prop_val
867	typeset prop=$1
868	typeset pool=$2
869
870	if poolexists $pool ; then
871		prop_val=$(zpool get $prop $pool 2>/dev/null | tail -1 | \
872			awk '{print $3}')
873		if [[ $? -ne 0 ]]; then
874			log_note "Unable to get $prop property for pool " \
875			"$pool"
876			return 1
877		fi
878	else
879		log_note "Pool $pool not exists."
880		return 1
881	fi
882
883	echo $prop_val
884	return 0
885}
886
887# Return 0 if a pool exists; $? otherwise
888#
889# $1 - pool name
890
891function poolexists
892{
893	typeset pool=$1
894
895	if [[ -z $pool ]]; then
896		log_note "No pool name given."
897		return 1
898	fi
899
900	zpool get name "$pool" > /dev/null 2>&1
901	return $?
902}
903
904# Return 0 if all the specified datasets exist; $? otherwise
905#
906# $1-n  dataset name
907function datasetexists
908{
909	if (($# == 0)); then
910		log_note "No dataset name given."
911		return 1
912	fi
913
914	while (($# > 0)); do
915		zfs get name $1 > /dev/null 2>&1 || \
916			return $?
917		shift
918	done
919
920	return 0
921}
922
923# return 0 if none of the specified datasets exists, otherwise return 1.
924#
925# $1-n  dataset name
926function datasetnonexists
927{
928	if (($# == 0)); then
929		log_note "No dataset name given."
930		return 1
931	fi
932
933	while (($# > 0)); do
934		zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
935		    && return 1
936		shift
937	done
938
939	return 0
940}
941
942#
943# Given a mountpoint, or a dataset name, determine if it is shared.
944#
945# Returns 0 if shared, 1 otherwise.
946#
947function is_shared
948{
949	typeset fs=$1
950	typeset mtpt
951
952	if [[ $fs != "/"* ]] ; then
953		if datasetnonexists "$fs" ; then
954			return 1
955		else
956			mtpt=$(get_prop mountpoint "$fs")
957			case $mtpt in
958				none|legacy|-) return 1
959					;;
960				*)	fs=$mtpt
961					;;
962			esac
963		fi
964	fi
965
966	for mtpt in `share | awk '{print $2}'` ; do
967		if [[ $mtpt == $fs ]] ; then
968			return 0
969		fi
970	done
971
972	typeset stat=$(svcs -H -o STA nfs/server:default)
973	if [[ $stat != "ON" ]]; then
974		log_note "Current nfs/server status: $stat"
975	fi
976
977	return 1
978}
979
980#
981# Given a mountpoint, determine if it is not shared.
982#
983# Returns 0 if not shared, 1 otherwise.
984#
985function not_shared
986{
987	typeset fs=$1
988
989	is_shared $fs
990	if (($? == 0)); then
991		return 1
992	fi
993
994	return 0
995}
996
997#
998# Helper function to unshare a mountpoint.
999#
1000function unshare_fs #fs
1001{
1002	typeset fs=$1
1003
1004	is_shared $fs
1005	if (($? == 0)); then
1006		log_must zfs unshare $fs
1007	fi
1008
1009	return 0
1010}
1011
1012#
1013# Check NFS server status and trigger it online.
1014#
1015function setup_nfs_server
1016{
1017	# Cannot share directory in non-global zone.
1018	#
1019	if ! is_global_zone; then
1020		log_note "Cannot trigger NFS server by sharing in LZ."
1021		return
1022	fi
1023
1024	typeset nfs_fmri="svc:/network/nfs/server:default"
1025	if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
1026		#
1027		# Only really sharing operation can enable NFS server
1028		# to online permanently.
1029		#
1030		typeset dummy=/tmp/dummy
1031
1032		if [[ -d $dummy ]]; then
1033			log_must rm -rf $dummy
1034		fi
1035
1036		log_must mkdir $dummy
1037		log_must share $dummy
1038
1039		#
1040		# Waiting for fmri's status to be the final status.
1041		# Otherwise, in transition, an asterisk (*) is appended for
1042		# instances, unshare will reverse status to 'DIS' again.
1043		#
1044		# Waiting for 1's at least.
1045		#
1046		log_must sleep 1
1047		timeout=10
1048		while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
1049		do
1050			log_must sleep 1
1051
1052			((timeout -= 1))
1053		done
1054
1055		log_must unshare $dummy
1056		log_must rm -rf $dummy
1057	fi
1058
1059	log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
1060}
1061
1062#
1063# To verify whether calling process is in global zone
1064#
1065# Return 0 if in global zone, 1 in non-global zone
1066#
1067function is_global_zone
1068{
1069	typeset cur_zone=$(zonename 2>/dev/null)
1070	if [[ $cur_zone != "global" ]]; then
1071		return 1
1072	fi
1073	return 0
1074}
1075
1076#
1077# Verify whether test is permitted to run from
1078# global zone, local zone, or both
1079#
1080# $1 zone limit, could be "global", "local", or "both"(no limit)
1081#
1082# Return 0 if permitted, otherwise exit with log_unsupported
1083#
1084function verify_runnable # zone limit
1085{
1086	typeset limit=$1
1087
1088	[[ -z $limit ]] && return 0
1089
1090	if is_global_zone ; then
1091		case $limit in
1092			global|both)
1093				;;
1094			local)	log_unsupported "Test is unable to run from "\
1095					"global zone."
1096				;;
1097			*)	log_note "Warning: unknown limit $limit - " \
1098					"use both."
1099				;;
1100		esac
1101	else
1102		case $limit in
1103			local|both)
1104				;;
1105			global)	log_unsupported "Test is unable to run from "\
1106					"local zone."
1107				;;
1108			*)	log_note "Warning: unknown limit $limit - " \
1109					"use both."
1110				;;
1111		esac
1112
1113		reexport_pool
1114	fi
1115
1116	return 0
1117}
1118
1119# Return 0 if create successfully or the pool exists; $? otherwise
1120# Note: In local zones, this function should return 0 silently.
1121#
1122# $1 - pool name
1123# $2-n - [keyword] devs_list
1124
1125function create_pool #pool devs_list
1126{
1127	typeset pool=${1%%/*}
1128
1129	shift
1130
1131	if [[ -z $pool ]]; then
1132		log_note "Missing pool name."
1133		return 1
1134	fi
1135
1136	if poolexists $pool ; then
1137		destroy_pool $pool
1138	fi
1139
1140	if is_global_zone ; then
1141		[[ -d /$pool ]] && rm -rf /$pool
1142		log_must zpool create -f $pool $@
1143	fi
1144
1145	return 0
1146}
1147
1148# Return 0 if destroy successfully or the pool exists; $? otherwise
1149# Note: In local zones, this function should return 0 silently.
1150#
1151# $1 - pool name
1152# Destroy pool with the given parameters.
1153
1154function destroy_pool #pool
1155{
1156	typeset pool=${1%%/*}
1157	typeset mtpt
1158
1159	if [[ -z $pool ]]; then
1160		log_note "No pool name given."
1161		return 1
1162	fi
1163
1164	if is_global_zone ; then
1165		if poolexists "$pool" ; then
1166			mtpt=$(get_prop mountpoint "$pool")
1167
1168			# At times, syseventd activity can cause attempts to
1169			# destroy a pool to fail with EBUSY. We retry a few
1170			# times allowing failures before requiring the destroy
1171			# to succeed.
1172			typeset -i wait_time=10 ret=1 count=0
1173			must=""
1174			while [[ $ret -ne 0 ]]; do
1175				$must zpool destroy -f $pool
1176				ret=$?
1177				[[ $ret -eq 0 ]] && break
1178				log_note "zpool destroy failed with $ret"
1179				[[ count++ -ge 7 ]] && must=log_must
1180				sleep $wait_time
1181			done
1182
1183			[[ -d $mtpt ]] && \
1184				log_must rm -rf $mtpt
1185		else
1186			log_note "Pool does not exist. ($pool)"
1187			return 1
1188		fi
1189	fi
1190
1191	return 0
1192}
1193
1194#
1195# Firstly, create a pool with 5 datasets. Then, create a single zone and
1196# export the 5 datasets to it. In addition, we also add a ZFS filesystem
1197# and a zvol device to the zone.
1198#
1199# $1 zone name
1200# $2 zone root directory prefix
1201# $3 zone ip
1202#
1203function zfs_zones_setup #zone_name zone_root zone_ip
1204{
1205	typeset zone_name=${1:-$(hostname)-z}
1206	typeset zone_root=${2:-"/zone_root"}
1207	typeset zone_ip=${3:-"10.1.1.10"}
1208	typeset prefix_ctr=$ZONE_CTR
1209	typeset pool_name=$ZONE_POOL
1210	typeset -i cntctr=5
1211	typeset -i i=0
1212
1213	# Create pool and 5 container within it
1214	#
1215	[[ -d /$pool_name ]] && rm -rf /$pool_name
1216	log_must zpool create -f $pool_name $DISKS
1217	while ((i < cntctr)); do
1218		log_must zfs create $pool_name/$prefix_ctr$i
1219		((i += 1))
1220	done
1221
1222	# create a zvol
1223	log_must zfs create -V 1g $pool_name/zone_zvol
1224
1225	#
1226	# If current system support slog, add slog device for pool
1227	#
1228	if verify_slog_support ; then
1229		typeset sdevs="/var/tmp/sdev1 /var/tmp/sdev2"
1230		log_must mkfile $MINVDEVSIZE $sdevs
1231		log_must zpool add $pool_name log mirror $sdevs
1232	fi
1233
1234	# this isn't supported just yet.
1235	# Create a filesystem. In order to add this to
1236	# the zone, it must have it's mountpoint set to 'legacy'
1237	# log_must zfs create $pool_name/zfs_filesystem
1238	# log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem
1239
1240	[[ -d $zone_root ]] && \
1241		log_must rm -rf $zone_root/$zone_name
1242	[[ ! -d $zone_root ]] && \
1243		log_must mkdir -p -m 0700 $zone_root/$zone_name
1244
1245	# Create zone configure file and configure the zone
1246	#
1247	typeset zone_conf=/tmp/zone_conf.$$
1248	echo "create" > $zone_conf
1249	echo "set zonepath=$zone_root/$zone_name" >> $zone_conf
1250	echo "set autoboot=true" >> $zone_conf
1251	i=0
1252	while ((i < cntctr)); do
1253		echo "add dataset" >> $zone_conf
1254		echo "set name=$pool_name/$prefix_ctr$i" >> \
1255			$zone_conf
1256		echo "end" >> $zone_conf
1257		((i += 1))
1258	done
1259
1260	# add our zvol to the zone
1261	echo "add device" >> $zone_conf
1262	echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
1263	echo "end" >> $zone_conf
1264
1265	# add a corresponding zvol rdsk to the zone
1266	echo "add device" >> $zone_conf
1267	echo "set match=/dev/zvol/rdsk/$pool_name/zone_zvol" >> $zone_conf
1268	echo "end" >> $zone_conf
1269
1270	# once it's supported, we'll add our filesystem to the zone
1271	# echo "add fs" >> $zone_conf
1272	# echo "set type=zfs" >> $zone_conf
1273	# echo "set special=$pool_name/zfs_filesystem" >> $zone_conf
1274	# echo "set dir=/export/zfs_filesystem" >> $zone_conf
1275	# echo "end" >> $zone_conf
1276
1277	echo "verify" >> $zone_conf
1278	echo "commit" >> $zone_conf
1279	log_must zonecfg -z $zone_name -f $zone_conf
1280	log_must rm -f $zone_conf
1281
1282	# Install the zone
1283	zoneadm -z $zone_name install
1284	if (($? == 0)); then
1285		log_note "SUCCESS: zoneadm -z $zone_name install"
1286	else
1287		log_fail "FAIL: zoneadm -z $zone_name install"
1288	fi
1289
1290	# Install sysidcfg file
1291	#
1292	typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1293	echo "system_locale=C" > $sysidcfg
1294	echo  "terminal=dtterm" >> $sysidcfg
1295	echo  "network_interface=primary {" >> $sysidcfg
1296	echo  "hostname=$zone_name" >> $sysidcfg
1297	echo  "}" >> $sysidcfg
1298	echo  "name_service=NONE" >> $sysidcfg
1299	echo  "root_password=mo791xfZ/SFiw" >> $sysidcfg
1300	echo  "security_policy=NONE" >> $sysidcfg
1301	echo  "timezone=US/Eastern" >> $sysidcfg
1302
1303	# Boot this zone
1304	log_must zoneadm -z $zone_name boot
1305}
1306
1307#
1308# Reexport TESTPOOL & TESTPOOL(1-4)
1309#
1310function reexport_pool
1311{
1312	typeset -i cntctr=5
1313	typeset -i i=0
1314
1315	while ((i < cntctr)); do
1316		if ((i == 0)); then
1317			TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1318			if ! ismounted $TESTPOOL; then
1319				log_must zfs mount $TESTPOOL
1320			fi
1321		else
1322			eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1323			if eval ! ismounted \$TESTPOOL$i; then
1324				log_must eval zfs mount \$TESTPOOL$i
1325			fi
1326		fi
1327		((i += 1))
1328	done
1329}
1330
1331#
1332# Verify a given disk is online or offline
1333#
1334# Return 0 is pool/disk matches expected state, 1 otherwise
1335#
1336function check_state # pool disk state{online,offline}
1337{
1338	typeset pool=$1
1339	typeset disk=${2#/dev/dsk/}
1340	typeset state=$3
1341
1342	zpool status -v $pool | grep "$disk"  \
1343	    | grep -i "$state" > /dev/null 2>&1
1344
1345	return $?
1346}
1347
1348#
1349# Get the mountpoint of snapshot
1350# For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1351# as its mountpoint
1352#
1353function snapshot_mountpoint
1354{
1355	typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1356
1357	if [[ $dataset != *@* ]]; then
1358		log_fail "Error name of snapshot '$dataset'."
1359	fi
1360
1361	typeset fs=${dataset%@*}
1362	typeset snap=${dataset#*@}
1363
1364	if [[ -z $fs || -z $snap ]]; then
1365		log_fail "Error name of snapshot '$dataset'."
1366	fi
1367
1368	echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
1369}
1370
1371#
1372# Given a pool and file system, this function will verify the file system
1373# using the zdb internal tool. Note that the pool is exported and imported
1374# to ensure it has consistent state.
1375#
1376function verify_filesys # pool filesystem dir
1377{
1378	typeset pool="$1"
1379	typeset filesys="$2"
1380	typeset zdbout="/tmp/zdbout.$$"
1381
1382	shift
1383	shift
1384	typeset dirs=$@
1385	typeset search_path=""
1386
1387	log_note "Calling zdb to verify filesystem '$filesys'"
1388	zfs unmount -a > /dev/null 2>&1
1389	log_must zpool export $pool
1390
1391	if [[ -n $dirs ]] ; then
1392		for dir in $dirs ; do
1393			search_path="$search_path -d $dir"
1394		done
1395	fi
1396
1397	log_must zpool import $search_path $pool
1398
1399	zdb -cudi $filesys > $zdbout 2>&1
1400	if [[ $? != 0 ]]; then
1401		log_note "Output: zdb -cudi $filesys"
1402		cat $zdbout
1403		log_fail "zdb detected errors with: '$filesys'"
1404	fi
1405
1406	log_must zfs mount -a
1407	log_must rm -rf $zdbout
1408}
1409
1410#
1411# Given a pool, and this function list all disks in the pool
1412#
1413function get_disklist # pool
1414{
1415	typeset disklist=""
1416
1417	disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \
1418	    grep -v "\-\-\-\-\-" | \
1419	    egrep -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$")
1420
1421	echo $disklist
1422}
1423
1424# /**
1425#  This function kills a given list of processes after a time period. We use
1426#  this in the stress tests instead of STF_TIMEOUT so that we can have processes
1427#  run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1428#  would be listed as FAIL, which we don't want : we're happy with stress tests
1429#  running for a certain amount of time, then finishing.
1430#
1431# @param $1 the time in seconds after which we should terminate these processes
1432# @param $2..$n the processes we wish to terminate.
1433# */
1434function stress_timeout
1435{
1436	typeset -i TIMEOUT=$1
1437	shift
1438	typeset cpids="$@"
1439
1440	log_note "Waiting for child processes($cpids). " \
1441		"It could last dozens of minutes, please be patient ..."
1442	log_must sleep $TIMEOUT
1443
1444	log_note "Killing child processes after ${TIMEOUT} stress timeout."
1445	typeset pid
1446	for pid in $cpids; do
1447		ps -p $pid > /dev/null 2>&1
1448		if (($? == 0)); then
1449			log_must kill -USR1 $pid
1450		fi
1451	done
1452}
1453
1454#
1455# Verify a given hotspare disk is inuse or avail
1456#
1457# Return 0 is pool/disk matches expected state, 1 otherwise
1458#
1459function check_hotspare_state # pool disk state{inuse,avail}
1460{
1461	typeset pool=$1
1462	typeset disk=${2#/dev/dsk/}
1463	typeset state=$3
1464
1465	cur_state=$(get_device_state $pool $disk "spares")
1466
1467	if [[ $state != ${cur_state} ]]; then
1468		return 1
1469	fi
1470	return 0
1471}
1472
1473#
1474# Verify a given slog disk is inuse or avail
1475#
1476# Return 0 is pool/disk matches expected state, 1 otherwise
1477#
1478function check_slog_state # pool disk state{online,offline,unavail}
1479{
1480	typeset pool=$1
1481	typeset disk=${2#/dev/dsk/}
1482	typeset state=$3
1483
1484	cur_state=$(get_device_state $pool $disk "logs")
1485
1486	if [[ $state != ${cur_state} ]]; then
1487		return 1
1488	fi
1489	return 0
1490}
1491
1492#
1493# Verify a given vdev disk is inuse or avail
1494#
1495# Return 0 is pool/disk matches expected state, 1 otherwise
1496#
1497function check_vdev_state # pool disk state{online,offline,unavail}
1498{
1499	typeset pool=$1
1500	typeset disk=${2#/dev/dsk/}
1501	typeset state=$3
1502
1503	cur_state=$(get_device_state $pool $disk)
1504
1505	if [[ $state != ${cur_state} ]]; then
1506		return 1
1507	fi
1508	return 0
1509}
1510
1511#
1512# Check the output of 'zpool status -v <pool>',
1513# and to see if the content of <token> contain the <keyword> specified.
1514#
1515# Return 0 is contain, 1 otherwise
1516#
1517function check_pool_status # pool token keyword <verbose>
1518{
1519	typeset pool=$1
1520	typeset token=$2
1521	typeset keyword=$3
1522	typeset verbose=${4:-false}
1523
1524	scan=$(zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" '
1525		($1==token) {print $0}')
1526	if [[ $verbose == true ]]; then
1527		log_note $scan
1528	fi
1529	echo $scan | grep -i "$keyword" > /dev/null 2>&1
1530
1531	return $?
1532}
1533
1534#
1535# These 6 following functions are instance of check_pool_status()
1536#	is_pool_resilvering - to check if the pool is resilver in progress
1537#	is_pool_resilvered - to check if the pool is resilver completed
1538#	is_pool_scrubbing - to check if the pool is scrub in progress
1539#	is_pool_scrubbed - to check if the pool is scrub completed
1540#	is_pool_scrub_stopped - to check if the pool is scrub stopped
1541#	is_pool_scrub_paused - to check if the pool has scrub paused
1542#	is_pool_removing - to check if the pool is removing a vdev
1543#	is_pool_removed - to check if the pool is remove completed
1544#
1545function is_pool_resilvering #pool <verbose>
1546{
1547	check_pool_status "$1" "scan" "resilver in progress since " $2
1548	return $?
1549}
1550
1551function is_pool_resilvered #pool <verbose>
1552{
1553	check_pool_status "$1" "scan" "resilvered " $2
1554	return $?
1555}
1556
1557function is_pool_scrubbing #pool <verbose>
1558{
1559	check_pool_status "$1" "scan" "scrub in progress since " $2
1560	return $?
1561}
1562
1563function is_pool_scrubbed #pool <verbose>
1564{
1565	check_pool_status "$1" "scan" "scrub repaired" $2
1566	return $?
1567}
1568
1569function is_pool_scrub_stopped #pool <verbose>
1570{
1571	check_pool_status "$1" "scan" "scrub canceled" $2
1572	return $?
1573}
1574
1575function is_pool_scrub_paused #pool <verbose>
1576{
1577	check_pool_status "$1" "scan" "scrub paused since " $2
1578	return $?
1579}
1580
1581function is_pool_removing #pool
1582{
1583	check_pool_status "$1" "remove" "in progress since "
1584	return $?
1585}
1586
1587function is_pool_removed #pool
1588{
1589	check_pool_status "$1" "remove" "completed on"
1590	return $?
1591}
1592
1593#
1594# Use create_pool()/destroy_pool() to clean up the infomation in
1595# in the given disk to avoid slice overlapping.
1596#
1597function cleanup_devices #vdevs
1598{
1599	typeset pool="foopool$$"
1600
1601	if poolexists $pool ; then
1602		destroy_pool $pool
1603	fi
1604
1605	create_pool $pool $@
1606	destroy_pool $pool
1607
1608	return 0
1609}
1610
1611#/**
1612# A function to find and locate free disks on a system or from given
1613# disks as the parameter. It works by locating disks that are in use
1614# as swap devices and dump devices, and also disks listed in /etc/vfstab
1615#
1616# $@ given disks to find which are free, default is all disks in
1617# the test system
1618#
1619# @return a string containing the list of available disks
1620#*/
1621function find_disks
1622{
1623	sfi=/tmp/swaplist.$$
1624	dmpi=/tmp/dumpdev.$$
1625	max_finddisksnum=${MAX_FINDDISKSNUM:-6}
1626
1627	swap -l > $sfi
1628	dumpadm > $dmpi 2>/dev/null
1629
1630# write an awk script that can process the output of format
1631# to produce a list of disks we know about. Note that we have
1632# to escape "$2" so that the shell doesn't interpret it while
1633# we're creating the awk script.
1634# -------------------
1635	cat > /tmp/find_disks.awk <<EOF
1636#!/bin/nawk -f
1637	BEGIN { FS="."; }
1638
1639	/^Specify disk/{
1640		searchdisks=0;
1641	}
1642
1643	{
1644		if (searchdisks && \$2 !~ "^$"){
1645			split(\$2,arr," ");
1646			print arr[1];
1647		}
1648	}
1649
1650	/^AVAILABLE DISK SELECTIONS:/{
1651		searchdisks=1;
1652	}
1653EOF
1654#---------------------
1655
1656	chmod 755 /tmp/find_disks.awk
1657	disks=${@:-$(echo "" | format -e 2>/dev/null | /tmp/find_disks.awk)}
1658	rm /tmp/find_disks.awk
1659
1660	unused=""
1661	for disk in $disks; do
1662	# Check for mounted
1663		grep "${disk}[sp]" /etc/mnttab >/dev/null
1664		(($? == 0)) && continue
1665	# Check for swap
1666		grep "${disk}[sp]" $sfi >/dev/null
1667		(($? == 0)) && continue
1668	# check for dump device
1669		grep "${disk}[sp]" $dmpi >/dev/null
1670		(($? == 0)) && continue
1671	# check to see if this disk hasn't been explicitly excluded
1672	# by a user-set environment variable
1673		echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null
1674		(($? == 0)) && continue
1675		unused_candidates="$unused_candidates $disk"
1676	done
1677	rm $sfi
1678	rm $dmpi
1679
1680# now just check to see if those disks do actually exist
1681# by looking for a device pointing to the first slice in
1682# each case. limit the number to max_finddisksnum
1683	count=0
1684	for disk in $unused_candidates; do
1685		if [ -b /dev/dsk/${disk}s0 ]; then
1686		if [ $count -lt $max_finddisksnum ]; then
1687			unused="$unused $disk"
1688			# do not impose limit if $@ is provided
1689			[[ -z $@ ]] && ((count = count + 1))
1690		fi
1691		fi
1692	done
1693
1694# finally, return our disk list
1695	echo $unused
1696}
1697
1698#
1699# Add specified user to specified group
1700#
1701# $1 group name
1702# $2 user name
1703# $3 base of the homedir (optional)
1704#
1705function add_user #<group_name> <user_name> <basedir>
1706{
1707	typeset gname=$1
1708	typeset uname=$2
1709	typeset basedir=${3:-"/var/tmp"}
1710
1711	if ((${#gname} == 0 || ${#uname} == 0)); then
1712		log_fail "group name or user name are not defined."
1713	fi
1714
1715	log_must useradd -g $gname -d $basedir/$uname -m $uname
1716
1717	return 0
1718}
1719
1720#
1721# Delete the specified user.
1722#
1723# $1 login name
1724# $2 base of the homedir (optional)
1725#
1726function del_user #<logname> <basedir>
1727{
1728	typeset user=$1
1729	typeset basedir=${2:-"/var/tmp"}
1730
1731	if ((${#user} == 0)); then
1732		log_fail "login name is necessary."
1733	fi
1734
1735	if id $user > /dev/null 2>&1; then
1736		log_must userdel $user
1737	fi
1738
1739	[[ -d $basedir/$user ]] && rm -fr $basedir/$user
1740
1741	return 0
1742}
1743
1744#
1745# Select valid gid and create specified group.
1746#
1747# $1 group name
1748#
1749function add_group #<group_name>
1750{
1751	typeset group=$1
1752
1753	if ((${#group} == 0)); then
1754		log_fail "group name is necessary."
1755	fi
1756
1757	# Assign 100 as the base gid
1758	typeset -i gid=100
1759	while true; do
1760		groupadd -g $gid $group > /dev/null 2>&1
1761		typeset -i ret=$?
1762		case $ret in
1763			0) return 0 ;;
1764			# The gid is not  unique
1765			4) ((gid += 1)) ;;
1766			*) return 1 ;;
1767		esac
1768	done
1769}
1770
1771#
1772# Delete the specified group.
1773#
1774# $1 group name
1775#
1776function del_group #<group_name>
1777{
1778	typeset grp=$1
1779	if ((${#grp} == 0)); then
1780		log_fail "group name is necessary."
1781	fi
1782
1783	groupmod -n $grp $grp > /dev/null 2>&1
1784	typeset -i ret=$?
1785	case $ret in
1786		# Group does not exist.
1787		6) return 0 ;;
1788		# Name already exists as a group name
1789		9) log_must groupdel $grp ;;
1790		*) return 1 ;;
1791	esac
1792
1793	return 0
1794}
1795
1796#
1797# This function will return true if it's safe to destroy the pool passed
1798# as argument 1. It checks for pools based on zvols and files, and also
1799# files contained in a pool that may have a different mountpoint.
1800#
1801function safe_to_destroy_pool { # $1 the pool name
1802
1803	typeset pool=""
1804	typeset DONT_DESTROY=""
1805
1806	# We check that by deleting the $1 pool, we're not
1807	# going to pull the rug out from other pools. Do this
1808	# by looking at all other pools, ensuring that they
1809	# aren't built from files or zvols contained in this pool.
1810
1811	for pool in $(zpool list -H -o name)
1812	do
1813		ALTMOUNTPOOL=""
1814
1815		# this is a list of the top-level directories in each of the
1816		# files that make up the path to the files the pool is based on
1817		FILEPOOL=$(zpool status -v $pool | grep /$1/ | \
1818			awk '{print $1}')
1819
1820		# this is a list of the zvols that make up the pool
1821		ZVOLPOOL=$(zpool status -v $pool | grep "/dev/zvol/dsk/$1$" \
1822		    | awk '{print $1}')
1823
1824		# also want to determine if it's a file-based pool using an
1825		# alternate mountpoint...
1826		POOL_FILE_DIRS=$(zpool status -v $pool | \
1827					grep / | awk '{print $1}' | \
1828					awk -F/ '{print $2}' | grep -v "dev")
1829
1830		for pooldir in $POOL_FILE_DIRS
1831		do
1832			OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
1833					grep "${pooldir}$" | awk '{print $1}')
1834
1835			ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
1836		done
1837
1838
1839		if [ ! -z "$ZVOLPOOL" ]
1840		then
1841			DONT_DESTROY="true"
1842			log_note "Pool $pool is built from $ZVOLPOOL on $1"
1843		fi
1844
1845		if [ ! -z "$FILEPOOL" ]
1846		then
1847			DONT_DESTROY="true"
1848			log_note "Pool $pool is built from $FILEPOOL on $1"
1849		fi
1850
1851		if [ ! -z "$ALTMOUNTPOOL" ]
1852		then
1853			DONT_DESTROY="true"
1854			log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
1855		fi
1856	done
1857
1858	if [ -z "${DONT_DESTROY}" ]
1859	then
1860		return 0
1861	else
1862		log_note "Warning: it is not safe to destroy $1!"
1863		return 1
1864	fi
1865}
1866
1867#
1868# Get the available ZFS compression options
1869# $1 option type zfs_set|zfs_compress
1870#
1871function get_compress_opts
1872{
1873	typeset COMPRESS_OPTS
1874	typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
1875			gzip-6 gzip-7 gzip-8 gzip-9"
1876
1877	if [[ $1 == "zfs_compress" ]] ; then
1878		COMPRESS_OPTS="on lzjb"
1879	elif [[ $1 == "zfs_set" ]] ; then
1880		COMPRESS_OPTS="on off lzjb"
1881	fi
1882	typeset valid_opts="$COMPRESS_OPTS"
1883	zfs get 2>&1 | grep gzip >/dev/null 2>&1
1884	if [[ $? -eq 0 ]]; then
1885		valid_opts="$valid_opts $GZIP_OPTS"
1886	fi
1887	echo "$valid_opts"
1888}
1889
1890#
1891# Verify zfs operation with -p option work as expected
1892# $1 operation, value could be create, clone or rename
1893# $2 dataset type, value could be fs or vol
1894# $3 dataset name
1895# $4 new dataset name
1896#
1897function verify_opt_p_ops
1898{
1899	typeset ops=$1
1900	typeset datatype=$2
1901	typeset dataset=$3
1902	typeset newdataset=$4
1903
1904	if [[ $datatype != "fs" && $datatype != "vol" ]]; then
1905		log_fail "$datatype is not supported."
1906	fi
1907
1908	# check parameters accordingly
1909	case $ops in
1910		create)
1911			newdataset=$dataset
1912			dataset=""
1913			if [[ $datatype == "vol" ]]; then
1914				ops="create -V $VOLSIZE"
1915			fi
1916			;;
1917		clone)
1918			if [[ -z $newdataset ]]; then
1919				log_fail "newdataset should not be empty" \
1920					"when ops is $ops."
1921			fi
1922			log_must datasetexists $dataset
1923			log_must snapexists $dataset
1924			;;
1925		rename)
1926			if [[ -z $newdataset ]]; then
1927				log_fail "newdataset should not be empty" \
1928					"when ops is $ops."
1929			fi
1930			log_must datasetexists $dataset
1931			log_mustnot snapexists $dataset
1932			;;
1933		*)
1934			log_fail "$ops is not supported."
1935			;;
1936	esac
1937
1938	# make sure the upper level filesystem does not exist
1939	if datasetexists ${newdataset%/*} ; then
1940		log_must zfs destroy -rRf ${newdataset%/*}
1941	fi
1942
1943	# without -p option, operation will fail
1944	log_mustnot zfs $ops $dataset $newdataset
1945	log_mustnot datasetexists $newdataset ${newdataset%/*}
1946
1947	# with -p option, operation should succeed
1948	log_must zfs $ops -p $dataset $newdataset
1949	if ! datasetexists $newdataset ; then
1950		log_fail "-p option does not work for $ops"
1951	fi
1952
1953	# when $ops is create or clone, redo the operation still return zero
1954	if [[ $ops != "rename" ]]; then
1955		log_must zfs $ops -p $dataset $newdataset
1956	fi
1957
1958	return 0
1959}
1960
1961#
1962# Get configuration of pool
1963# $1 pool name
1964# $2 config name
1965#
1966function get_config
1967{
1968	typeset pool=$1
1969	typeset config=$2
1970	typeset alt_root
1971
1972	if ! poolexists "$pool" ; then
1973		return 1
1974	fi
1975	alt_root=$(zpool list -H $pool | awk '{print $NF}')
1976	if [[ $alt_root == "-" ]]; then
1977		value=$(zdb -C $pool | grep "$config:" | awk -F: \
1978		    '{print $2}')
1979	else
1980		value=$(zdb -e $pool | grep "$config:" | awk -F: \
1981		    '{print $2}')
1982	fi
1983	if [[ -n $value ]] ; then
1984		value=${value#'}
1985		value=${value%'}
1986	fi
1987	echo $value
1988
1989	return 0
1990}
1991
1992#
1993# Privated function. Random select one of items from arguments.
1994#
1995# $1 count
1996# $2-n string
1997#
1998function _random_get
1999{
2000	typeset cnt=$1
2001	shift
2002
2003	typeset str="$@"
2004	typeset -i ind
2005	((ind = RANDOM % cnt + 1))
2006
2007	typeset ret=$(echo "$str" | cut -f $ind -d ' ')
2008	echo $ret
2009}
2010
2011#
2012# Random select one of item from arguments which include NONE string
2013#
2014function random_get_with_non
2015{
2016	typeset -i cnt=$#
2017	((cnt =+ 1))
2018
2019	_random_get "$cnt" "$@"
2020}
2021
2022#
2023# Random select one of item from arguments which doesn't include NONE string
2024#
2025function random_get
2026{
2027	_random_get "$#" "$@"
2028}
2029
2030#
2031# Detect if the current system support slog
2032#
2033function verify_slog_support
2034{
2035	typeset dir=/tmp/disk.$$
2036	typeset pool=foo.$$
2037	typeset vdev=$dir/a
2038	typeset sdev=$dir/b
2039
2040	mkdir -p $dir
2041	mkfile $MINVDEVSIZE $vdev $sdev
2042
2043	typeset -i ret=0
2044	if ! zpool create -n $pool $vdev log $sdev > /dev/null 2>&1; then
2045		ret=1
2046	fi
2047	rm -r $dir
2048
2049	return $ret
2050}
2051
2052#
2053# The function will generate a dataset name with specific length
2054# $1, the length of the name
2055# $2, the base string to construct the name
2056#
2057function gen_dataset_name
2058{
2059	typeset -i len=$1
2060	typeset basestr="$2"
2061	typeset -i baselen=${#basestr}
2062	typeset -i iter=0
2063	typeset l_name=""
2064
2065	if ((len % baselen == 0)); then
2066		((iter = len / baselen))
2067	else
2068		((iter = len / baselen + 1))
2069	fi
2070	while ((iter > 0)); do
2071		l_name="${l_name}$basestr"
2072
2073		((iter -= 1))
2074	done
2075
2076	echo $l_name
2077}
2078
2079#
2080# Get cksum tuple of dataset
2081# $1 dataset name
2082#
2083# sample zdb output:
2084# Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2085# DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2086# lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2087# fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2088function datasetcksum
2089{
2090	typeset cksum
2091	sync
2092	cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
2093		| awk -F= '{print $7}')
2094	echo $cksum
2095}
2096
2097#
2098# Get cksum of file
2099# #1 file path
2100#
2101function checksum
2102{
2103	typeset cksum
2104	cksum=$(cksum $1 | awk '{print $1}')
2105	echo $cksum
2106}
2107
2108#
2109# Get the given disk/slice state from the specific field of the pool
2110#
2111function get_device_state #pool disk field("", "spares","logs")
2112{
2113	typeset pool=$1
2114	typeset disk=${2#/dev/dsk/}
2115	typeset field=${3:-$pool}
2116
2117	state=$(zpool status -v "$pool" 2>/dev/null | \
2118		nawk -v device=$disk -v pool=$pool -v field=$field \
2119		'BEGIN {startconfig=0; startfield=0; }
2120		/config:/ {startconfig=1}
2121		(startconfig==1) && ($1==field) {startfield=1; next;}
2122		(startfield==1) && ($1==device) {print $2; exit;}
2123		(startfield==1) &&
2124		($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
2125	echo $state
2126}
2127
2128
2129#
2130# print the given directory filesystem type
2131#
2132# $1 directory name
2133#
2134function get_fstype
2135{
2136	typeset dir=$1
2137
2138	if [[ -z $dir ]]; then
2139		log_fail "Usage: get_fstype <directory>"
2140	fi
2141
2142	#
2143	#  $ df -n /
2144	#  /		  : ufs
2145	#
2146	df -n $dir | awk '{print $3}'
2147}
2148
2149#
2150# Given a disk, label it to VTOC regardless what label was on the disk
2151# $1 disk
2152#
2153function labelvtoc
2154{
2155	typeset disk=$1
2156	if [[ -z $disk ]]; then
2157		log_fail "The disk name is unspecified."
2158	fi
2159	typeset label_file=/var/tmp/labelvtoc.$$
2160	typeset arch=$(uname -p)
2161
2162	if [[ $arch == "i386" ]]; then
2163		echo "label" > $label_file
2164		echo "0" >> $label_file
2165		echo "" >> $label_file
2166		echo "q" >> $label_file
2167		echo "q" >> $label_file
2168
2169		fdisk -B $disk >/dev/null 2>&1
2170		# wait a while for fdisk finishes
2171		sleep 60
2172	elif [[ $arch == "sparc" ]]; then
2173		echo "label" > $label_file
2174		echo "0" >> $label_file
2175		echo "" >> $label_file
2176		echo "" >> $label_file
2177		echo "" >> $label_file
2178		echo "q" >> $label_file
2179	else
2180		log_fail "unknown arch type"
2181	fi
2182
2183	format -e -s -d $disk -f $label_file
2184	typeset -i ret_val=$?
2185	rm -f $label_file
2186	#
2187	# wait the format to finish
2188	#
2189	sleep 60
2190	if ((ret_val != 0)); then
2191		log_fail "unable to label $disk as VTOC."
2192	fi
2193
2194	return 0
2195}
2196
2197#
2198# check if the system was installed as zfsroot or not
2199# return: 0 ture, otherwise false
2200#
2201function is_zfsroot
2202{
2203	df -n / | grep zfs > /dev/null 2>&1
2204	return $?
2205}
2206
2207#
2208# get the root filesystem name if it's zfsroot system.
2209#
2210# return: root filesystem name
2211function get_rootfs
2212{
2213	typeset rootfs=""
2214	rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \
2215		/etc/mnttab)
2216	if [[ -z "$rootfs" ]]; then
2217		log_fail "Can not get rootfs"
2218	fi
2219	zfs list $rootfs > /dev/null 2>&1
2220	if (($? == 0)); then
2221		echo $rootfs
2222	else
2223		log_fail "This is not a zfsroot system."
2224	fi
2225}
2226
2227#
2228# get the rootfs's pool name
2229# return:
2230#       rootpool name
2231#
2232function get_rootpool
2233{
2234	typeset rootfs=""
2235	typeset rootpool=""
2236	rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \
2237		 /etc/mnttab)
2238	if [[ -z "$rootfs" ]]; then
2239		log_fail "Can not get rootpool"
2240	fi
2241	zfs list $rootfs > /dev/null 2>&1
2242	if (($? == 0)); then
2243		rootpool=`echo $rootfs | awk -F\/ '{print $1}'`
2244		echo $rootpool
2245	else
2246		log_fail "This is not a zfsroot system."
2247	fi
2248}
2249
2250#
2251# Check if the given device is physical device
2252#
2253function is_physical_device #device
2254{
2255	typeset device=${1#/dev/dsk/}
2256	device=${device#/dev/rdsk/}
2257
2258	echo $device | egrep "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
2259	return $?
2260}
2261
2262#
2263# Get the directory path of given device
2264#
2265function get_device_dir #device
2266{
2267	typeset device=$1
2268
2269	if ! $(is_physical_device $device) ; then
2270		if [[ $device != "/" ]]; then
2271			device=${device%/*}
2272		fi
2273		echo $device
2274	else
2275		echo "/dev/dsk"
2276	fi
2277}
2278
2279#
2280# Get the package name
2281#
2282function get_package_name
2283{
2284	typeset dirpath=${1:-$STC_NAME}
2285
2286	echo "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
2287}
2288
2289#
2290# Get the word numbers from a string separated by white space
2291#
2292function get_word_count
2293{
2294	echo $1 | wc -w
2295}
2296
2297#
2298# To verify if the require numbers of disks is given
2299#
2300function verify_disk_count
2301{
2302	typeset -i min=${2:-1}
2303
2304	typeset -i count=$(get_word_count "$1")
2305
2306	if ((count < min)); then
2307		log_untested "A minimum of $min disks is required to run." \
2308			" You specified $count disk(s)"
2309	fi
2310}
2311
2312function ds_is_volume
2313{
2314	typeset type=$(get_prop type $1)
2315	[[ $type = "volume" ]] && return 0
2316	return 1
2317}
2318
2319function ds_is_filesystem
2320{
2321	typeset type=$(get_prop type $1)
2322	[[ $type = "filesystem" ]] && return 0
2323	return 1
2324}
2325
2326function ds_is_snapshot
2327{
2328	typeset type=$(get_prop type $1)
2329	[[ $type = "snapshot" ]] && return 0
2330	return 1
2331}
2332
2333#
2334# Check if Trusted Extensions are installed and enabled
2335#
2336function is_te_enabled
2337{
2338	svcs -H -o state labeld 2>/dev/null | grep "enabled"
2339	if (($? != 0)); then
2340		return 1
2341	else
2342		return 0
2343	fi
2344}
2345
2346# Utility function to determine if a system has multiple cpus.
2347function is_mp
2348{
2349	(($(psrinfo | wc -l) > 1))
2350}
2351
2352function get_cpu_freq
2353{
2354	psrinfo -v 0 | awk '/processor operates at/ {print $6}'
2355}
2356
2357# Run the given command as the user provided.
2358function user_run
2359{
2360	typeset user=$1
2361	shift
2362
2363	eval su \$user -c \"$@\" > /tmp/out 2>/tmp/err
2364	return $?
2365}
2366
2367#
2368# Check if the pool contains the specified vdevs
2369#
2370# $1 pool
2371# $2..n <vdev> ...
2372#
2373# Return 0 if the vdevs are contained in the pool, 1 if any of the specified
2374# vdevs is not in the pool, and 2 if pool name is missing.
2375#
2376function vdevs_in_pool
2377{
2378	typeset pool=$1
2379	typeset vdev
2380
2381        if [[ -z $pool ]]; then
2382                log_note "Missing pool name."
2383                return 2
2384        fi
2385
2386	shift
2387
2388	typeset tmpfile=$(mktemp)
2389	zpool list -Hv "$pool" >$tmpfile
2390	for vdev in $@; do
2391		grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1
2392		[[ $? -ne 0 ]] && return 1
2393	done
2394
2395	rm -f $tmpfile
2396
2397	return 0;
2398}
2399
2400function get_max
2401{
2402	typeset -l i max=$1
2403	shift
2404
2405	for i in "$@"; do
2406		max=$(echo $((max > i ? max : i)))
2407	done
2408
2409	echo $max
2410}
2411
2412function get_min
2413{
2414	typeset -l i min=$1
2415	shift
2416
2417	for i in "$@"; do
2418		min=$(echo $((min < i ? min : i)))
2419	done
2420
2421	echo $min
2422}
2423
2424#
2425# Generate a random number between 1 and the argument.
2426#
2427function random
2428{
2429        typeset max=$1
2430        echo $(( ($RANDOM % $max) + 1 ))
2431}
2432
2433# Write data that can be compressed into a directory
2434function write_compressible
2435{
2436	typeset dir=$1
2437	typeset megs=$2
2438	typeset nfiles=${3:-1}
2439	typeset bs=${4:-1024k}
2440	typeset fname=${5:-file}
2441
2442	[[ -d $dir ]] || log_fail "No directory: $dir"
2443
2444	log_must eval "fio \
2445	    --name=job \
2446	    --fallocate=0 \
2447	    --minimal \
2448	    --randrepeat=0 \
2449	    --buffer_compress_percentage=66 \
2450	    --buffer_compress_chunk=4096 \
2451	    --directory=$dir \
2452	    --numjobs=$nfiles \
2453	    --rw=write \
2454	    --bs=$bs \
2455	    --filesize=$megs \
2456	    --filename_format='$fname.\$jobnum' >/dev/null"
2457}
2458
2459function get_objnum
2460{
2461	typeset pathname=$1
2462	typeset objnum
2463
2464	[[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
2465	objnum=$(stat -c %i $pathname)
2466	echo $objnum
2467}
2468
2469#
2470# Prints the current time in seconds since UNIX Epoch.
2471#
2472function current_epoch
2473{
2474	printf '%(%s)T'
2475}
2476
2477#
2478# Get decimal value of global uint32_t variable using mdb.
2479#
2480function mdb_get_uint32
2481{
2482	typeset variable=$1
2483	typeset value
2484
2485	value=$(mdb -k -e "$variable/X | ::eval .=U")
2486	if [[ $? -ne 0 ]]; then
2487		log_fail "Failed to get value of '$variable' from mdb."
2488		return 1
2489	fi
2490
2491	echo $value
2492	return 0
2493}
2494
2495#
2496# Set global uint32_t variable to a decimal value using mdb.
2497#
2498function mdb_set_uint32
2499{
2500	typeset variable=$1
2501	typeset value=$2
2502
2503	mdb -kw -e "$variable/W 0t$value" > /dev/null
2504	if [[ $? -ne 0 ]]; then
2505		echo "Failed to set '$variable' to '$value' in mdb."
2506		return 1
2507	fi
2508
2509	return 0
2510}
2511