1#!/bin/ksh -p
2#
3# CDDL HEADER START
4#
5# The contents of this file are subject to the terms of the
6# Common Development and Distribution License (the "License").
7# You may not use this file except in compliance with the License.
8#
9# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10# or http://www.opensolaris.org/os/licensing.
11# See the License for the specific language governing permissions
12# and limitations under the License.
13#
14# When distributing Covered Code, include this CDDL HEADER in each
15# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16# If applicable, add the following below this CDDL HEADER, with the
17# fields enclosed by brackets "[]" replaced with your own identifying
18# information: Portions Copyright [yyyy] [name of copyright owner]
19#
20# CDDL HEADER END
21#
22
23#
24# Copyright (c) 2021 by vStack. All rights reserved.
25#
26
27. $STF_SUITE/include/libtest.shlib
28
29#
30# DESCRIPTION:
31#	Check device replacement during raidz expansion using expansion pausing.
32#
33# STRATEGY:
34#	1. Create block device files for the test raidz pool
35#	2. For each parity value [1..3]
36#	    - create raidz pool with minimum block device files required
37#	    - create couple of datasets with different recordsize and fill it
38#	    - set raidz expand maximum reflow bytes
39#	    - attach new device to the pool
40#	    - wait for reflow bytes to reach the maximum
41#	    - offline and zero vdevs allowed by parity
42#	    - wait some time and start offlined vdevs replacement
43#	    - wait replacement completion and verify pool status
44#	    - loop thru vdevs replacing with the max reflow bytes increasing
45#	    - verify pool
46#	    - set reflow bytes to max value to complete the expansion
47
48typeset -r devs=10
49typeset -r dev_size_mb=128
50
51typeset -a disks
52
53embedded_slog_min_ms=$(get_tunable EMBEDDED_SLOG_MIN_MS)
54original_scrub_after_expand=$(get_tunable SCRUB_AFTER_EXPAND)
55
56function cleanup
57{
58	poolexists "$TESTPOOL" && zpool status -v "$TESTPOOL"
59	poolexists "$TESTPOOL" && log_must_busy zpool destroy "$TESTPOOL"
60
61	for i in {0..$devs}; do
62		log_must rm -f "$TEST_BASE_DIR/dev-$i"
63	done
64
65	log_must set_tunable32 EMBEDDED_SLOG_MIN_MS $embedded_slog_min_ms
66	log_must set_tunable64 RAIDZ_EXPAND_MAX_REFLOW_BYTES 0
67	log_must set_tunable32 SCRUB_AFTER_EXPAND $original_scrub_after_expand
68}
69
70function wait_expand_paused
71{
72	oldcopied='0'
73	newcopied='1'
74	while [[ $oldcopied != $newcopied ]]; do
75		oldcopied=$newcopied
76		sleep 1
77		newcopied=$(zpool status $TESTPOOL | \
78		    grep 'copied out of' | \
79		    awk '{print $1}')
80	done
81}
82
83log_onexit cleanup
84
85function test_replace # <pool> <devices> <parity>
86{
87	pool=${1}
88	devices=${2}
89	nparity=${3}
90	device_count=0
91
92	log_must echo "devices=$devices"
93
94	for dev in ${devices}; do
95		device_count=$((device_count+1))
96	done
97
98	index=$((RANDOM%(device_count-nparity)))
99	for (( j=1; j<=$nparity; j=j+1 )); do
100		log_must zpool offline $pool ${disks[$((index+j))]}
101		log_must dd if=/dev/zero of=${disks[$((index+j))]} \
102		    bs=1024k count=$dev_size_mb conv=notrunc
103	done
104
105	for (( j=1; j<=$nparity; j=j+1 )); do
106		log_must zpool replace $pool ${disks[$((index+j))]}
107	done
108
109	log_must zpool wait -t replace $pool
110	log_must check_pool_status $pool "scan" "with 0 errors"
111
112	log_must zpool clear $pool
113	log_must zpool scrub -w $pool
114
115	log_must zpool status -v
116	log_must check_pool_status $pool "scan" "with 0 errors"
117}
118
119log_must set_tunable32 EMBEDDED_SLOG_MIN_MS 99999
120
121# Disk files which will be used by pool
122for i in {0..$(($devs))}; do
123	device=$TEST_BASE_DIR/dev-$i
124	log_must truncate -s ${dev_size_mb}M $device
125	disks[${#disks[*]}+1]=$device
126done
127
128nparity=$((RANDOM%(3) + 1))
129raid=raidz$nparity
130pool=$TESTPOOL
131opts="-o cachefile=none"
132devices=""
133
134log_must set_tunable32 SCRUB_AFTER_EXPAND 0
135
136log_must zpool create -f $opts $pool $raid ${disks[1..$(($nparity+1))]}
137devices="${disks[1..$(($nparity+1))]}"
138
139log_must zfs create -o recordsize=8k $pool/fs
140log_must fill_fs /$pool/fs 1 128 100 1024 R
141
142log_must zfs create -o recordsize=128k $pool/fs2
143log_must fill_fs /$pool/fs2 1 128 100 1024 R
144
145for disk in ${disks[$(($nparity+2))..$devs]}; do
146	# Set pause to some random value near halfway point
147	reflow_size=$(get_pool_prop allocated $pool)
148	pause=$((((RANDOM << 15) + RANDOM) % reflow_size / 2))
149	log_must set_tunable64 RAIDZ_EXPAND_MAX_REFLOW_BYTES $pause
150
151	log_must zpool attach $pool ${raid}-0 $disk
152	devices="$devices $disk"
153
154	wait_expand_paused
155
156	for (( i=0; i<2; i++ )); do
157		test_replace $pool "$devices" $nparity
158
159		# Increase pause by about 25%
160		pause=$((pause + (((RANDOM << 15) + RANDOM) % \
161		    reflow_size) / 4))
162		log_must set_tunable64 RAIDZ_EXPAND_MAX_REFLOW_BYTES $pause
163
164		wait_expand_paused
165	done
166
167	# Set pause past largest possible value for this pool
168	pause=$((devs*dev_size_mb*1024*1024))
169	log_must set_tunable64 RAIDZ_EXPAND_MAX_REFLOW_BYTES $pause
170
171	log_must zpool wait -t raidz_expand $pool
172done
173
174log_must zpool destroy "$pool"
175
176log_pass "raidz expansion test succeeded."
177
178