1 /**
2  * Licensed to the Apache Software Foundation (ASF) under one
3  * or more contributor license agreements.  See the NOTICE file
4  * distributed with this work for additional information
5  * regarding copyright ownership.  The ASF licenses this file
6  * to you under the Apache License, Version 2.0 (the
7  * "License"); you may not use this file except in compliance
8  * with the License.  You may obtain a copy of the License at
9  *
10  *     http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  */
18 
19 package org.apache.hadoop.hdfs.server.datanode;
20 
21 import static org.junit.Assert.assertEquals;
22 import static org.junit.Assert.assertFalse;
23 import static org.junit.Assert.assertTrue;
24 import static org.junit.Assert.fail;
25 
26 import java.io.File;
27 import java.io.IOException;
28 
29 import org.apache.hadoop.conf.Configuration;
30 import org.apache.hadoop.fs.FileSystem;
31 import org.apache.hadoop.fs.Path;
32 import org.apache.hadoop.hdfs.DFSConfigKeys;
33 import org.apache.hadoop.hdfs.DFSTestUtil;
34 import org.apache.hadoop.hdfs.MiniDFSCluster;
35 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
36 import org.apache.hadoop.hdfs.tools.DFSAdmin;
37 import org.junit.Test;
38 
39 /**
40  * Tests deleteBlockPool functionality.
41  */
42 public class TestDeleteBlockPool {
43 
44   @Test
testDeleteBlockPool()45   public void testDeleteBlockPool() throws Exception {
46     // Start cluster with a 2 NN and 2 DN
47     Configuration conf = new Configuration();
48     MiniDFSCluster cluster = null;
49     try {
50       conf.set(DFSConfigKeys.DFS_NAMESERVICES,
51           "namesServerId1,namesServerId2");
52       cluster = new MiniDFSCluster.Builder(conf)
53         .nnTopology(MiniDFSNNTopology.simpleFederatedTopology
54             (conf.get(DFSConfigKeys.DFS_NAMESERVICES)))
55         .numDataNodes(2).build();
56 
57       cluster.waitActive();
58 
59       FileSystem fs1 = cluster.getFileSystem(0);
60       FileSystem fs2 = cluster.getFileSystem(1);
61 
62       DFSTestUtil.createFile(fs1, new Path("/alpha"), 1024, (short) 2, 54);
63       DFSTestUtil.createFile(fs2, new Path("/beta"), 1024, (short) 2, 54);
64 
65       DataNode dn1 = cluster.getDataNodes().get(0);
66       DataNode dn2 = cluster.getDataNodes().get(1);
67 
68       String bpid1 = cluster.getNamesystem(0).getBlockPoolId();
69       String bpid2 = cluster.getNamesystem(1).getBlockPoolId();
70 
71       File dn1StorageDir1 = cluster.getInstanceStorageDir(0, 0);
72       File dn1StorageDir2 = cluster.getInstanceStorageDir(0, 1);
73       File dn2StorageDir1 = cluster.getInstanceStorageDir(1, 0);
74       File dn2StorageDir2 = cluster.getInstanceStorageDir(1, 1);
75 
76       // Although namenode is shutdown, the bp offerservice is still running
77       try {
78         dn1.deleteBlockPool(bpid1, true);
79         fail("Must not delete a running block pool");
80       } catch (IOException expected) {
81       }
82 
83       Configuration nn1Conf = cluster.getConfiguration(1);
84       nn1Conf.set(DFSConfigKeys.DFS_NAMESERVICES, "namesServerId2");
85       dn1.refreshNamenodes(nn1Conf);
86       assertEquals(1, dn1.getAllBpOs().length);
87 
88       try {
89         dn1.deleteBlockPool(bpid1, false);
90         fail("Must not delete if any block files exist unless "
91             + "force is true");
92       } catch (IOException expected) {
93       }
94 
95       verifyBlockPoolDirectories(true, dn1StorageDir1, bpid1);
96       verifyBlockPoolDirectories(true, dn1StorageDir2, bpid1);
97 
98       dn1.deleteBlockPool(bpid1, true);
99 
100       verifyBlockPoolDirectories(false, dn1StorageDir1, bpid1);
101       verifyBlockPoolDirectories(false, dn1StorageDir2, bpid1);
102 
103       fs1.delete(new Path("/alpha"), true);
104 
105       // Wait till all blocks are deleted from the dn2 for bpid1.
106       File finalDir1 = MiniDFSCluster.getFinalizedDir(dn2StorageDir1, bpid1);
107       File finalDir2 = MiniDFSCluster.getFinalizedDir(dn2StorageDir1, bpid2);
108       while ((!DatanodeUtil.dirNoFilesRecursive(finalDir1)) ||
109           (!DatanodeUtil.dirNoFilesRecursive(finalDir2))) {
110         try {
111           Thread.sleep(3000);
112         } catch (Exception ignored) {
113         }
114       }
115       cluster.shutdownNameNode(0);
116 
117       // Although namenode is shutdown, the bp offerservice is still running
118       // on dn2
119       try {
120         dn2.deleteBlockPool(bpid1, true);
121         fail("Must not delete a running block pool");
122       } catch (IOException expected) {
123       }
124 
125       dn2.refreshNamenodes(nn1Conf);
126       assertEquals(1, dn2.getAllBpOs().length);
127 
128       verifyBlockPoolDirectories(true, dn2StorageDir1, bpid1);
129       verifyBlockPoolDirectories(true, dn2StorageDir2, bpid1);
130 
131       // Now deleteBlockPool must succeed with force as false, because no
132       // blocks exist for bpid1 and bpOfferService is also stopped for bpid1.
133       dn2.deleteBlockPool(bpid1, false);
134 
135       verifyBlockPoolDirectories(false, dn2StorageDir1, bpid1);
136       verifyBlockPoolDirectories(false, dn2StorageDir2, bpid1);
137 
138       //bpid2 must not be impacted
139       verifyBlockPoolDirectories(true, dn1StorageDir1, bpid2);
140       verifyBlockPoolDirectories(true, dn1StorageDir2, bpid2);
141       verifyBlockPoolDirectories(true, dn2StorageDir1, bpid2);
142       verifyBlockPoolDirectories(true, dn2StorageDir2, bpid2);
143       //make sure second block pool is running all fine
144       Path gammaFile = new Path("/gamma");
145       DFSTestUtil.createFile(fs2, gammaFile, 1024, (short) 1, 55);
146       fs2.setReplication(gammaFile, (short)2);
147       DFSTestUtil.waitReplication(fs2, gammaFile, (short) 2);
148 
149     } finally {
150       if (cluster != null) {
151         cluster.shutdown();
152       }
153     }
154   }
155 
156   @Test
testDfsAdminDeleteBlockPool()157   public void testDfsAdminDeleteBlockPool() throws Exception {
158     Configuration conf = new Configuration();
159     MiniDFSCluster cluster = null;
160     try {
161       conf.set(DFSConfigKeys.DFS_NAMESERVICES,
162           "namesServerId1,namesServerId2");
163       cluster = new MiniDFSCluster.Builder(conf)
164         .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(
165             conf.get(DFSConfigKeys.DFS_NAMESERVICES)))
166         .numDataNodes(1).build();
167 
168       cluster.waitActive();
169 
170       FileSystem fs1 = cluster.getFileSystem(0);
171       FileSystem fs2 = cluster.getFileSystem(1);
172 
173       DFSTestUtil.createFile(fs1, new Path("/alpha"), 1024, (short) 1, 54);
174       DFSTestUtil.createFile(fs2, new Path("/beta"), 1024, (short) 1, 54);
175 
176       DataNode dn1 = cluster.getDataNodes().get(0);
177 
178       String bpid1 = cluster.getNamesystem(0).getBlockPoolId();
179       String bpid2 = cluster.getNamesystem(1).getBlockPoolId();
180 
181       File dn1StorageDir1 = cluster.getInstanceStorageDir(0, 0);
182       File dn1StorageDir2 = cluster.getInstanceStorageDir(0, 1);
183 
184       Configuration nn1Conf = cluster.getConfiguration(0);
185       nn1Conf.set(DFSConfigKeys.DFS_NAMESERVICES, "namesServerId1");
186       dn1.refreshNamenodes(nn1Conf);
187       assertEquals(1, dn1.getAllBpOs().length);
188 
189       DFSAdmin admin = new DFSAdmin(nn1Conf);
190       String dn1Address = dn1.getDatanodeId().getIpAddr() + ":" + dn1.getIpcPort();
191       String[] args = { "-deleteBlockPool", dn1Address, bpid2 };
192 
193       int ret = admin.run(args);
194       assertFalse(0 == ret);
195 
196       verifyBlockPoolDirectories(true, dn1StorageDir1, bpid2);
197       verifyBlockPoolDirectories(true, dn1StorageDir2, bpid2);
198 
199       String[] forceArgs = { "-deleteBlockPool", dn1Address, bpid2, "force" };
200       ret = admin.run(forceArgs);
201       assertEquals(0, ret);
202 
203       verifyBlockPoolDirectories(false, dn1StorageDir1, bpid2);
204       verifyBlockPoolDirectories(false, dn1StorageDir2, bpid2);
205 
206       //bpid1 remains good
207       verifyBlockPoolDirectories(true, dn1StorageDir1, bpid1);
208       verifyBlockPoolDirectories(true, dn1StorageDir2, bpid1);
209 
210     } finally {
211       if (cluster != null) {
212         cluster.shutdown();
213       }
214     }
215   }
216 
verifyBlockPoolDirectories(boolean shouldExist, File storageDir, String bpid)217   private void verifyBlockPoolDirectories(boolean shouldExist,
218       File storageDir, String bpid) throws IOException {
219     File bpDir = new File(storageDir, DataStorage.STORAGE_DIR_CURRENT + "/"
220         + bpid);
221 
222     if (shouldExist == false) {
223       assertFalse(bpDir.exists());
224     } else {
225       File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
226       File finalizedDir = new File(bpCurrentDir,
227           DataStorage.STORAGE_DIR_FINALIZED);
228       File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW);
229       File versionFile = new File(bpCurrentDir, "VERSION");
230 
231       assertTrue(finalizedDir.isDirectory());
232       assertTrue(rbwDir.isDirectory());
233       assertTrue(versionFile.exists());
234     }
235   }
236 }
237