1#!/usr/local/bin/ksh93 -p
2#
3# Copyright 2015 Spectra Logic Corporation.
4#
5
6. $STF_SUITE/include/libtest.kshlib
7
8################################################################################
9#
10# __stc_assertion_start
11#
12# ID: zpool_destroy_004_pos
13#
14# DESCRIPTION:
15#	'zpool destroy -f <pool>' can forcibly destroy the specified pool,
16#       even if that pool has running zfs send or receive activity.
17#
18# STRATEGY:
19#	1. Create a storage pool
20#       2. For each sleep time in a set:
21#       2a. For each destroy type (same pool, sender only, receiver only):
22#	    - Create a dataset with some amount of data
23#           - Run zfs send | zfs receive in the background.
24#           - Sleep the amount of time specified for this run.
25#	    - 'zpool destroy -f' the pool.
26#	    - Wait for the send|receive to exit.  It must not be killed in
27#	      order to ensure that the destroy takes care of doing so.
28#	    - Verify the pool destroyed successfully
29#
30# __stc_assertion_end
31#
32###############################################################################
33
34verify_runnable "global"
35
36function cleanup
37{
38	poolexists $TESTPOOL && destroy_pool $TESTPOOL
39	poolexists $TESTPOOL1 && destroy_pool $TESTPOOL1
40}
41
42function create_sender
43{
44	cleanup
45	create_pool "$TESTPOOL" "$DISK0"
46	log_must $ZFS create $TESTPOOL/$TESTFS
47	log_must $MKDIR -p $TESTDIR
48	log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
49	log_must dd if=/dev/zero of=$TESTDIR/f0 bs=1024k count=$datasz
50	log_must $ZFS snapshot $TESTPOOL/$TESTFS@snap1
51}
52
53function create_sender_and_receiver
54{
55	create_sender
56	create_pool "$TESTPOOL1" "$DISK1"
57}
58
59function send_recv_destroy
60{
61	sleeptime=$1
62	recv=$2
63	to_destroy=$3
64	who_to_destroy="$4"
65
66	# The pid of this pipe line is that of zfs receive
67	#
68	( $ZFS send -RP $TESTPOOL/$TESTFS@snap1 | $ZFS receive -Fu $recv/d1 ) &
69	sndrcv_start=$(date '+%s')
70	rcvpid=$!
71	sndpid=$(pgrep -P $rcvpid)
72
73	log_must sleep $sleeptime
74	log_note "post sleep: $(ps -p ${sndpid},${rcvpid} -o command)"
75
76	destroy_start=$(date '+%s')
77	log_must $ZPOOL destroy -f $to_destroy
78	destroy_end=$(date '+%s')
79	dtime=$((destroy_end - destroy_start))
80	log_note "Destroy of $who_to_destroy took ${dtime} seconds."
81
82	log_note "post destroy: $(ps -p ${sndpid},${rcvpid} -o command)"
83
84	# Wait for send and recv to exit.
85	#
86	wait $sndpid
87	snderr=$?
88	wait $rcvpid
89	rcverr=$?
90	wait_end=$(date '+%s')
91	wtime=$((wait_end - sndrcv_start))
92	log_note "send|receive took ${wtime} seconds to finish."
93
94	# KSH: "wait pid" exit status of 127 means that process never existed
95	# or already completed; ksh's wait only returns the status of the
96	# child process if the child was running when the wait was issued.
97	# Therefore, we can not imply much about the interruption of the
98	# send | recv by zpool destroy -f
99	#
100	# The real test of success is simply that the pool was destroyed.
101	#
102	log_note \
103	"Destruction of ${who_to_destroy}: send ${snderr}, recv ${rcverr}"
104
105	log_mustnot $ZPOOL list $to_destroy
106}
107
108function run_tests
109{
110	log_note "TEST: send|receive to the same pool"
111	create_sender
112	send_recv_destroy $sleeptime $TESTPOOL $TESTPOOL SAME_POOL
113
114	log_note "TEST: send|receive to different pools, destroy sender"
115	create_sender_and_receiver
116	send_recv_destroy $sleeptime $TESTPOOL1 $TESTPOOL SENDER
117
118	log_note "TEST: send|receive to different pools, destroy receiver"
119	create_sender_and_receiver
120	send_recv_destroy $sleeptime $TESTPOOL1 $TESTPOOL1 RECEIVER
121}
122
123log_assert "'zpool destroy -f <pool>' can force destroy active pool"
124log_onexit cleanup
125set_disks
126
127# Faster tests using 1GB data size
128datasz=1000
129log_note "Running fast tests with 1000MB of data"
130for sleeptime in 0.1 0.3 0.5 0.75 1 2 3; do
131	run_tests
132done
133
134# A longer test that simulates a more realistic send|receive that exceeds
135# the size of arc memory by 1/3 and gets interrupted a decent amount of
136# time after the start of the run.
137arcmem=$(sysctl -n vfs.zfs.arc_max)
138# ARC will use 2xdatasz memory since it caches both the src and dst copies
139datasz=$((arcmem / 1048576 * 2 / 3))
140log_note "Running longer test with ${datasz}MB of data"
141sleeptime=15
142run_tests
143
144log_pass "'zpool destroy -f <pool>' successful with active pools."
145