1#!/bin/ksh -p 2# 3# CDDL HEADER START 4# 5# The contents of this file are subject to the terms of the 6# Common Development and Distribution License (the "License"). 7# You may not use this file except in compliance with the License. 8# 9# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10# or https://opensource.org/licenses/CDDL-1.0. 11# See the License for the specific language governing permissions 12# and limitations under the License. 13# 14# When distributing Covered Code, include this CDDL HEADER in each 15# file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16# If applicable, add the following below this CDDL HEADER, with the 17# fields enclosed by brackets "[]" replaced with your own identifying 18# information: Portions Copyright [yyyy] [name of copyright owner] 19# 20# CDDL HEADER END 21# 22# Copyright (c) 2022 by Lawrence Livermore National Security, LLC. 23 24. $STF_SUITE/include/libtest.shlib 25. $STF_SUITE/tests/functional/reservation/reservation.shlib 26. $STF_SUITE/tests/functional/zvol/zvol_common.shlib 27 28# 29# DESCRIPTION: 30# Stress test multithreaded transfers to multiple zvols. Also verify 31# zvol errors show up in zpool status. 32# 33# STRATEGY: 34# 35# For both the normal submit_bio() codepath and the blk-mq codepath, do 36# the following: 37# 38# 1. Create one zvol per CPU 39# 2. In parallel, spawn an fio "write and verify" for each zvol 40# 3. Inject write errors 41# 4. Write to one of the zvols with dd and verify the errors 42# 43 44verify_runnable "global" 45 46num_zvols=$(get_num_cpus) 47 48# If we were making one big zvol from all the pool space, it would 49# be this big: 50biggest_zvol_size_possible=$(largest_volsize_from_pool $TESTPOOL) 51 52# Crude calculation: take the biggest zvol size we could possibly 53# create, knock 10% off it (for overhead) and divide by the number 54# of ZVOLs we want to make. 55# 56# Round the value using a printf 57typeset -f each_zvol_size=$(( floor($biggest_zvol_size_possible * 0.9 / \ 58 $num_zvols ))) 59 60typeset tmpdir="$(mktemp -d zvol_stress_fio_state.XXXXXX)" 61 62function create_zvols 63{ 64 log_note "Creating $num_zvols zvols that are ${each_zvol_size}B each" 65 for i in $(seq $num_zvols) ; do 66 log_must zfs create -V $each_zvol_size $TESTPOOL/testvol$i 67 block_device_wait "$ZVOL_DEVDIR/$TESTPOOL/testvol$i" 68 done 69} 70 71function destroy_zvols 72{ 73 for i in $(seq $num_zvols) ; do 74 log_must_busy zfs destroy $TESTPOOL/testvol$i 75 done 76} 77 78function do_zvol_stress 79{ 80 # Write 10% of each zvol, or 50MB, whichever is less 81 zvol_write_size=$((each_zvol_size / 10)) 82 if [ $zvol_write_size -gt $((50 * 1048576)) ] ; then 83 zvol_write_size=$((50 * 1048576)) 84 fi 85 zvol_write_size_mb=$(($zvol_write_size / 1048576)) 86 87 if is_linux ; then 88 engine=libaio 89 else 90 engine=psync 91 fi 92 93 # Spawn off one fio per zvol in parallel 94 pids="" 95 for i in $(seq $num_zvols) ; do 96 # Spawn one fio per zvol as its own process 97 fio --ioengine=$engine --name=zvol_stress$i --direct=0 \ 98 --filename="$ZVOL_DEVDIR/$TESTPOOL/testvol$i" --bs=1048576 \ 99 --iodepth=10 --readwrite=randwrite --size=${zvol_write_size} \ 100 --verify_async=2 --numjobs=1 --verify=sha1 \ 101 --verify_fatal=1 \ 102 --continue_on_error=none \ 103 --error_dump=1 \ 104 --exitall_on_error \ 105 --aux-path="$tmpdir" --do_verify=1 & 106 pids="$pids $!" 107 done 108 109 # Wait for all the spawned fios to finish and look for errors 110 fail="" 111 i=0 112 for pid in $pids ; do 113 log_note "$s waiting on $pid" 114 if ! wait $pid ; then 115 log_fail "fio error on $TESTPOOL/testvol$i" 116 fi 117 i=$(($i + 1)) 118 done 119} 120 121function cleanup 122{ 123 log_must zinject -c all 124 log_must zpool clear $TESTPOOL 125 destroy_zvols 126 set_blk_mq 0 127 128 # Remove all fio's leftover state files 129 if [ -n "$tmpdir" ] ; then 130 log_must rm -fd "$tmpdir"/*.state "$tmpdir" 131 fi 132} 133 134log_onexit cleanup 135 136log_assert "Stress test zvols" 137 138set_blk_mq 0 139create_zvols 140# Do some fio write/verifies in parallel 141do_zvol_stress 142destroy_zvols 143 144# Enable blk-mq (block multi-queue), and re-run the same test 145set_blk_mq 1 146create_zvols 147do_zvol_stress 148 149# Inject some errors, and verify we see some IO errors in zpool status 150for DISK in $DISKS ; do 151 log_must zinject -d $DISK -f 10 -e io -T write $TESTPOOL 152done 153log_must dd if=/dev/zero of=$ZVOL_DEVDIR/$TESTPOOL/testvol1 bs=512 count=50 154sync_pool $TESTPOOL 155log_must zinject -c all 156 157# We should see write errors 158typeset -i write_errors=$(zpool status -p | awk ' 159 !NF { isvdev = 0 } 160 isvdev { errors += $4 } 161 /CKSUM$/ { isvdev = 1 } 162 END { print errors } 163') 164 165if [ $write_errors -eq 0 ] ; then 166 log_fail "Expected to see some write errors" 167else 168 log_note "Correctly saw $write_errors write errors" 169fi 170log_pass "Done with zvol_stress" 171