1#!/usr/local/bin/ksh93 -p
2#
3# CDDL HEADER START
4#
5# The contents of this file are subject to the terms of the
6# Common Development and Distribution License (the "License").
7# You may not use this file except in compliance with the License.
8#
9# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10# or http://www.opensolaris.org/os/licensing.
11# See the License for the specific language governing permissions
12# and limitations under the License.
13#
14# When distributing Covered Code, include this CDDL HEADER in each
15# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16# If applicable, add the following below this CDDL HEADER, with the
17# fields enclosed by brackets "[]" replaced with your own identifying
18# information: Portions Copyright [yyyy] [name of copyright owner]
19#
20# CDDL HEADER END
21#
22
23#
24# Copyright (c) 2012,2013 Spectra Logic Corporation.  All rights reserved.
25# Use is subject to license terms.
26#
27
28. $STF_SUITE/include/libtest.kshlib
29. $STF_SUITE/include/libgnop.kshlib
30
31################################################################################
32#
33# __stc_assertion_start
34#
35# ID: zfsd_fault_001_pos
36#
37# DESCRIPTION:
38#   If a vdev experiences IO errors, it will become faulted.
39#
40#
41# STRATEGY:
42#   1. Create a storage pool.  Use gnop vdevs so we can inject I/O errors.
43#   2. Inject IO errors while doing IO to the pool.
44#   3. Verify that the vdev becomes FAULTED.
45#   4. ONLINE it and verify that it resilvers and joins the pool.
46#
47# TESTABILITY: explicit
48#
49# TEST_AUTOMATION_LEVEL: automated
50#
51# CODING STATUS: COMPLETED (2012-08-09)
52#
53# __stc_assertion_end
54#
55###############################################################################
56
57verify_runnable "global"
58
59log_assert "ZFS will fault a vdev that produces IO errors"
60
61ensure_zfsd_running
62
63DISK0_NOP=${DISK0}.nop
64DISK1_NOP=${DISK1}.nop
65
66log_must create_gnops $DISK0 $DISK1
67
68for type in "raidz" "mirror"; do
69	log_note "Testing raid type $type"
70
71	# Create a pool on the supplied disks
72	create_pool $TESTPOOL $type "$DISK0_NOP" "$DISK1_NOP"
73	log_must $ZFS create $TESTPOOL/$TESTFS
74
75	# Cause some IO errors writing to the pool
76	while true; do
77		log_must gnop configure -e 5 -w 100 "$DISK1_NOP"
78		$DD if=/dev/zero bs=128k count=1 >> \
79			/$TESTPOOL/$TESTFS/$TESTFILE 2> /dev/null
80		$FSYNC /$TESTPOOL/$TESTFS/$TESTFILE
81		# Check to see if the pool is faulted yet
82		$ZPOOL status $TESTPOOL | grep -q 'state: DEGRADED'
83		if [ $? == 0 ]
84		then
85			log_note "$TESTPOOL got degraded"
86			break
87		fi
88	done
89
90	log_must check_state $TESTPOOL $TMPDISK "FAULTED"
91
92	# Heal and reattach the failed disk
93	log_must gnop configure -w 0 "$DISK1_NOP"
94	log_must $ZPOOL online $TESTPOOL "$DISK1_NOP"
95
96	# Verify that the pool resilvers and goes to the ONLINE state
97	for (( retries=60; $retries>0; retries=$retries+1 ))
98	do
99		$ZPOOL status $TESTPOOL | egrep -q "scan:.*resilvered"
100		RESILVERED=$?
101		$ZPOOL status $TESTPOOL | egrep -q "state:.*ONLINE"
102		ONLINE=$?
103		if test $RESILVERED -a $ONLINE
104		then
105			break
106		fi
107		$SLEEP 2
108	done
109
110	if [ $retries == 0 ]
111	then
112		log_fail "$TESTPOOL never resilvered in the allowed time"
113	fi
114
115	destroy_pool $TESTPOOL
116	log_must $RM -rf /$TESTPOOL
117done
118
119log_pass
120