1#! /bin/ksh -p 2# 3# CDDL HEADER START 4# 5# This file and its contents are supplied under the terms of the 6# Common Development and Distribution License ("CDDL"), version 1.0. 7# You may only use this file in accordance with the terms of version 8# 1.0 of the CDDL. 9# 10# A full copy of the text of the CDDL should have accompanied this 11# source. A copy of the CDDL is also available via the Internet at 12# http://www.illumos.org/license/CDDL. 13# 14# CDDL HEADER END 15# 16 17# 18# Copyright (c) 2015, 2016 by Delphix. All rights reserved. 19# 20 21. $STF_SUITE/include/libtest.shlib 22. $STF_SUITE/tests/functional/removal/removal.kshlib 23 24function reset 25{ 26 log_must set_tunable64 CONDENSE_INDIRECT_COMMIT_ENTRY_DELAY_MS 0 27 log_must set_tunable64 CONDENSE_INDIRECT_OBSOLETE_PCT 25 28 log_must set_tunable64 CONDENSE_MIN_MAPPING_BYTES 131072 29 default_cleanup_noexit 30} 31 32default_setup_noexit "$DISKS" "true" 33log_onexit reset 34log_must set_tunable64 CONDENSE_INDIRECT_COMMIT_ENTRY_DELAY_MS 5000 35log_must set_tunable64 CONDENSE_INDIRECT_OBSOLETE_PCT 5 36log_must set_tunable64 CONDENSE_MIN_MAPPING_BYTES 1 37 38log_must zfs set recordsize=512 $TESTPOOL/$TESTFS 39 40# 41# Create a large file so that we know some of the blocks will be on the 42# removed device, and hence eligible for remapping. 43# 44log_must dd if=/dev/urandom of=$TESTDIR/file bs=1024k count=10 45 46# 47# Create a file in the other filesystem, which will not be remapped. 48# 49log_must dd if=/dev/urandom of=$TESTDIR1/file bs=1024k count=10 50 51# 52# Randomly rewrite some of blocks in the file so that there will be holes and 53# we will not be able to remap the entire file in a few huge chunks. 54# 55for i in {1..4096}; do 56 # 57 # We have to sync periodically so that all the writes don't end up in 58 # the same txg. If they were all in the same txg, only the last write 59 # would go through and we would not have as many allocations to 60 # fragment the file. 61 # 62 ((i % 100 > 0 )) || sync_pool $TESTPOOL || log_fail "Could not sync." 63 random_write $TESTDIR/file 512 || \ 64 log_fail "Could not random write." 65done 66 67REMOVEDISKPATH=/dev 68case $REMOVEDISK in 69 /*) 70 REMOVEDISKPATH=$(dirname $REMOVEDISK) 71 ;; 72esac 73 74log_must zpool remove $TESTPOOL $REMOVEDISK 75log_must wait_for_removal $TESTPOOL 76log_mustnot vdevs_in_pool $TESTPOOL $REMOVEDISK 77 78# 79# Touch one block under each L1 indirect block, so that the other data blocks 80# will be remapped to their concrete locations. These parameters assume 81# recordsize=512, indirect block size of 128K (1024 block pointers per 82# indirect block), and file size of less than 20*1024 blocks (10MB). 83# 84log_must stride_dd -i /dev/urandom -o $TESTDIR/file -b 512 -c 20 -s 1024 85 86sync_pool $TESTPOOL 87sleep 4 88sync_pool $TESTPOOL 89log_must zpool export $TESTPOOL 90zdb -e -p $REMOVEDISKPATH $TESTPOOL | grep 'Condensing indirect vdev' || \ 91 log_fail "Did not export during a condense." 92log_must zdb -e -p $REMOVEDISKPATH -cudi $TESTPOOL 93log_must zpool import $TESTPOOL 94 95log_pass "Pool can be exported in the middle of a condense." 96