1 /*
2  * UpgradeUtilities.java
3  *
4  * Licensed to the Apache Software Foundation (ASF) under one
5  * or more contributor license agreements.  See the NOTICE file
6  * distributed with this work for additional information
7  * regarding copyright ownership.  The ASF licenses this file
8  * to you under the Apache License, Version 2.0 (the
9  * "License"); you may not use this file except in compliance
10  * with the License.  You may obtain a copy of the License at
11  *
12  *     http://www.apache.org/licenses/LICENSE-2.0
13  *
14  * Unless required by applicable law or agreed to in writing, software
15  * distributed under the License is distributed on an "AS IS" BASIS,
16  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17  * See the License for the specific language governing permissions and
18  * limitations under the License.
19  */
20 
21 package org.apache.hadoop.hdfs;
22 
23 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
24 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
25 
26 import java.io.File;
27 import java.io.FileInputStream;
28 import java.io.IOException;
29 import java.io.OutputStream;
30 import java.net.URI;
31 import java.util.Arrays;
32 import java.util.Collections;
33 import java.util.zip.CRC32;
34 
35 import org.apache.hadoop.conf.Configuration;
36 import org.apache.hadoop.fs.FileSystem;
37 import org.apache.hadoop.fs.FileUtil;
38 import org.apache.hadoop.fs.LocalFileSystem;
39 import org.apache.hadoop.fs.Path;
40 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
41 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
42 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
43 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
44 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
45 import org.apache.hadoop.hdfs.server.common.Storage;
46 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
47 import org.apache.hadoop.hdfs.server.common.StorageInfo;
48 import org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceStorage;
49 import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion;
50 import org.apache.hadoop.hdfs.server.datanode.DataStorage;
51 import org.apache.hadoop.hdfs.server.namenode.NNStorage;
52 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
53 
54 import com.google.common.base.Preconditions;
55 import com.google.common.io.Files;
56 import com.google.common.primitives.Bytes;
57 
58 /**
59  * This class defines a number of static helper methods used by the
60  * DFS Upgrade unit tests.  By default, a singleton master populated storage
61  * directory is created for a Namenode (contains edits, fsimage,
62  * version, and time files) and a Datanode (contains version and
63  * block files).  The master directories are lazily created.  They are then
64  * copied by the createStorageDirs() method to create new storage
65  * directories of the appropriate type (Namenode or Datanode).
66  */
67 public class UpgradeUtilities {
68 
69   // Root scratch directory on local filesystem
70   private static final File TEST_ROOT_DIR =
71                       new File(MiniDFSCluster.getBaseDirectory());
72   // The singleton master storage directory for Namenode
73   private static final File namenodeStorage = new File(TEST_ROOT_DIR, "namenodeMaster");
74   // A checksum of the contents in namenodeStorage directory
75   private static long namenodeStorageChecksum;
76   // The namespaceId of the namenodeStorage directory
77   private static int namenodeStorageNamespaceID;
78   // The clusterId of the namenodeStorage directory
79   private static String namenodeStorageClusterID;
80   // The blockpoolId of the namenodeStorage directory
81   private static String namenodeStorageBlockPoolID;
82   // The fsscTime of the namenodeStorage directory
83   private static long namenodeStorageFsscTime;
84   // The singleton master storage directory for Datanode
85   private static final File datanodeStorage = new File(TEST_ROOT_DIR, "datanodeMaster");
86   // A checksum of the contents in datanodeStorage directory
87   private static long datanodeStorageChecksum;
88   // A checksum of the contents in blockpool storage directory
89   private static long blockPoolStorageChecksum;
90   // A checksum of the contents in blockpool finalize storage directory
91   private static long blockPoolFinalizedStorageChecksum;
92   // A checksum of the contents in blockpool rbw storage directory
93   private static long blockPoolRbwStorageChecksum;
94 
95   /**
96    * Initialize the data structures used by this class.
97    * IMPORTANT NOTE: This method must be called once before calling
98    *                 any other public method on this class.
99    * <p>
100    * Creates a singleton master populated storage
101    * directory for a Namenode (contains edits, fsimage,
102    * version, and time files) and a Datanode (contains version and
103    * block files).  This can be a lengthy operation.
104    */
initialize()105   public static void initialize() throws Exception {
106     createEmptyDirs(new String[] {TEST_ROOT_DIR.toString()});
107     Configuration config = new HdfsConfiguration();
108     config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, namenodeStorage.toString());
109     config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, namenodeStorage.toString());
110     config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, datanodeStorage.toString());
111     MiniDFSCluster cluster = null;
112     String bpid = null;
113     try {
114       // format data-node
115       createEmptyDirs(new String[] {datanodeStorage.toString()});
116 
117       // format and start NameNode and start DataNode
118       DFSTestUtil.formatNameNode(config);
119       cluster =  new MiniDFSCluster.Builder(config)
120                                    .numDataNodes(1)
121                                    .startupOption(StartupOption.REGULAR)
122                                    .format(false)
123                                    .manageDataDfsDirs(false)
124                                    .manageNameDfsDirs(false)
125                                    .build();
126 
127       NamenodeProtocols namenode = cluster.getNameNodeRpc();
128       namenodeStorageNamespaceID = namenode.versionRequest().getNamespaceID();
129       namenodeStorageFsscTime = namenode.versionRequest().getCTime();
130       namenodeStorageClusterID = namenode.versionRequest().getClusterID();
131       namenodeStorageBlockPoolID = namenode.versionRequest().getBlockPoolID();
132 
133       FileSystem fs = FileSystem.get(config);
134       Path baseDir = new Path("/TestUpgrade");
135       fs.mkdirs(baseDir);
136 
137       // write some files
138       int bufferSize = 4096;
139       byte[] buffer = new byte[bufferSize];
140       for(int i=0; i < bufferSize; i++)
141         buffer[i] = (byte)('0' + i % 50);
142       writeFile(fs, new Path(baseDir, "file1"), buffer, bufferSize);
143       writeFile(fs, new Path(baseDir, "file2"), buffer, bufferSize);
144 
145       // save image
146       namenode.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
147       namenode.saveNamespace();
148       namenode.setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
149 
150       // write more files
151       writeFile(fs, new Path(baseDir, "file3"), buffer, bufferSize);
152       writeFile(fs, new Path(baseDir, "file4"), buffer, bufferSize);
153       bpid = cluster.getNamesystem(0).getBlockPoolId();
154     } finally {
155       // shutdown
156       if (cluster != null) cluster.shutdown();
157       FileUtil.fullyDelete(new File(namenodeStorage,"in_use.lock"));
158       FileUtil.fullyDelete(new File(datanodeStorage,"in_use.lock"));
159     }
160     namenodeStorageChecksum = checksumContents(NAME_NODE,
161         new File(namenodeStorage, "current"), false);
162     File dnCurDir = new File(datanodeStorage, "current");
163     datanodeStorageChecksum = checksumContents(DATA_NODE, dnCurDir, false);
164 
165     File bpCurDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir),
166         "current");
167     blockPoolStorageChecksum = checksumContents(DATA_NODE, bpCurDir, false);
168 
169     File bpCurFinalizeDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir),
170         "current/"+DataStorage.STORAGE_DIR_FINALIZED);
171     blockPoolFinalizedStorageChecksum = checksumContents(DATA_NODE,
172         bpCurFinalizeDir, true);
173 
174     File bpCurRbwDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir),
175         "current/"+DataStorage.STORAGE_DIR_RBW);
176     blockPoolRbwStorageChecksum = checksumContents(DATA_NODE, bpCurRbwDir,
177         false);
178   }
179 
180   // Private helper method that writes a file to the given file system.
writeFile(FileSystem fs, Path path, byte[] buffer, int bufferSize)181   private static void writeFile(FileSystem fs, Path path, byte[] buffer,
182                                 int bufferSize) throws IOException
183   {
184     OutputStream out;
185     out = fs.create(path, true, bufferSize, (short) 1, 1024);
186     out.write(buffer, 0, bufferSize);
187     out.close();
188   }
189 
190   /**
191    * Initialize {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} and
192    * {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} with the specified
193    * number of directory entries. Also initialize dfs.blockreport.intervalMsec.
194    */
initializeStorageStateConf(int numDirs, Configuration conf)195   public static Configuration initializeStorageStateConf(int numDirs,
196                                                          Configuration conf) {
197     StringBuffer nameNodeDirs =
198       new StringBuffer(new File(TEST_ROOT_DIR, "name1").toString());
199     StringBuffer dataNodeDirs =
200       new StringBuffer(new File(TEST_ROOT_DIR, "data1").toString());
201     for (int i = 2; i <= numDirs; i++) {
202       nameNodeDirs.append("," + new File(TEST_ROOT_DIR, "name"+i));
203       dataNodeDirs.append("," + new File(TEST_ROOT_DIR, "data"+i));
204     }
205     if (conf == null) {
206       conf = new HdfsConfiguration();
207     }
208     conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameNodeDirs.toString());
209     conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameNodeDirs.toString());
210     conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataNodeDirs.toString());
211     conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000);
212     return conf;
213   }
214 
215   /**
216    * Create empty directories.  If a specified directory already exists
217    * then it is first removed.
218    */
createEmptyDirs(String[] dirs)219   public static void createEmptyDirs(String[] dirs) throws IOException {
220     for (String d : dirs) {
221       File dir = new File(d);
222       if (dir.exists()) {
223         FileUtil.fullyDelete(dir);
224       }
225       dir.mkdirs();
226     }
227   }
228 
229   /**
230    * Return the checksum for the singleton master storage directory
231    * for namenode
232    */
checksumMasterNameNodeContents()233   public static long checksumMasterNameNodeContents() {
234     return namenodeStorageChecksum;
235   }
236 
237   /**
238    * Return the checksum for the singleton master storage directory
239    * for datanode
240    */
checksumMasterDataNodeContents()241   public static long checksumMasterDataNodeContents() {
242     return datanodeStorageChecksum;
243   }
244 
245   /**
246    * Return the checksum for the singleton master storage directory
247    * for block pool.
248    */
checksumMasterBlockPoolContents()249   public static long checksumMasterBlockPoolContents() {
250     return blockPoolStorageChecksum;
251   }
252 
253   /**
254    * Return the checksum for the singleton master storage directory
255    * for finalized dir under block pool.
256    */
checksumMasterBlockPoolFinalizedContents()257   public static long checksumMasterBlockPoolFinalizedContents() {
258     return blockPoolFinalizedStorageChecksum;
259   }
260 
261   /**
262    * Return the checksum for the singleton master storage directory
263    * for rbw dir under block pool.
264    */
checksumMasterBlockPoolRbwContents()265   public static long checksumMasterBlockPoolRbwContents() {
266     return blockPoolRbwStorageChecksum;
267   }
268 
269   /**
270    * Compute the checksum of all the files in the specified directory.
271    * This method provides an easy way to ensure equality between the contents
272    * of two directories.
273    *
274    * @param nodeType if DATA_NODE then any file named "VERSION" is ignored.
275    *    This is because this file file is changed every time
276    *    the Datanode is started.
277    * @param dir must be a directory
278    * @param recursive whether or not to consider subdirectories
279    *
280    * @throws IllegalArgumentException if specified directory is not a directory
281    * @throws IOException if an IOException occurs while reading the files
282    * @return the computed checksum value
283    */
checksumContents(NodeType nodeType, File dir, boolean recursive)284   public static long checksumContents(NodeType nodeType, File dir,
285       boolean recursive) throws IOException {
286     CRC32 checksum = new CRC32();
287     checksumContentsHelper(nodeType, dir, checksum, recursive);
288     return checksum.getValue();
289   }
290 
checksumContentsHelper(NodeType nodeType, File dir, CRC32 checksum, boolean recursive)291   public static void checksumContentsHelper(NodeType nodeType, File dir,
292       CRC32 checksum, boolean recursive) throws IOException {
293     if (!dir.isDirectory()) {
294       throw new IllegalArgumentException(
295           "Given argument is not a directory:" + dir);
296     }
297     File[] list = dir.listFiles();
298     Arrays.sort(list);
299     for (int i = 0; i < list.length; i++) {
300       if (!list[i].isFile()) {
301         if (recursive) {
302           checksumContentsHelper(nodeType, list[i], checksum, recursive);
303         }
304         continue;
305       }
306 
307       // skip VERSION and dfsUsed file for DataNodes
308       if (nodeType == DATA_NODE &&
309           (list[i].getName().equals("VERSION") ||
310               list[i].getName().equals("dfsUsed"))) {
311         continue;
312       }
313 
314       FileInputStream fis = null;
315       try {
316         fis = new FileInputStream(list[i]);
317         byte[] buffer = new byte[1024];
318         int bytesRead;
319         while ((bytesRead = fis.read(buffer)) != -1) {
320           checksum.update(buffer, 0, bytesRead);
321         }
322       } finally {
323         if(fis != null) {
324           fis.close();
325         }
326       }
327     }
328   }
329 
330   /**
331    * Simulate the {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} of a populated
332    * DFS filesystem.
333    * This method populates for each parent directory, <code>parent/dirName</code>
334    * with the content of namenode storage directory that comes from a singleton
335    * namenode master (that contains edits, fsimage, version and time files).
336    * If the destination directory does not exist, it will be created.
337    * If the directory already exists, it will first be deleted.
338    *
339    * @param parents parent directory where {@code dirName} is created
340    * @param dirName directory under which storage directory is created
341    * @return the array of created directories
342    */
createNameNodeStorageDirs(String[] parents, String dirName)343   public static File[] createNameNodeStorageDirs(String[] parents,
344       String dirName) throws Exception {
345     File[] retVal = new File[parents.length];
346     for (int i = 0; i < parents.length; i++) {
347       File newDir = new File(parents[i], dirName);
348       createEmptyDirs(new String[] {newDir.toString()});
349       LocalFileSystem localFS = FileSystem.getLocal(new HdfsConfiguration());
350       localFS.copyToLocalFile(new Path(namenodeStorage.toString(), "current"),
351                               new Path(newDir.toString()),
352                               false);
353       retVal[i] = newDir;
354     }
355     return retVal;
356   }
357 
358   /**
359    * Simulate the {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} of a
360    * populated DFS filesystem.
361    * This method populates for each parent directory, <code>parent/dirName</code>
362    * with the content of datanode storage directory that comes from a singleton
363    * datanode master (that contains version and block files). If the destination
364    * directory does not exist, it will be created.  If the directory already
365    * exists, it will first be deleted.
366    *
367    * @param parents parent directory where {@code dirName} is created
368    * @param dirName directory under which storage directory is created
369    * @return the array of created directories
370    */
createDataNodeStorageDirs(String[] parents, String dirName)371   public static File[] createDataNodeStorageDirs(String[] parents,
372       String dirName) throws Exception {
373     File[] retVal = new File[parents.length];
374     for (int i = 0; i < parents.length; i++) {
375       File newDir = new File(parents[i], dirName);
376       createEmptyDirs(new String[] {newDir.toString()});
377       LocalFileSystem localFS = FileSystem.getLocal(new HdfsConfiguration());
378       localFS.copyToLocalFile(new Path(datanodeStorage.toString(), "current"),
379                               new Path(newDir.toString()),
380                               false);
381       retVal[i] = newDir;
382     }
383     return retVal;
384   }
385 
386   /**
387    * Simulate the {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} of a
388    * populated DFS filesystem.
389    * This method populates for each parent directory, <code>parent/dirName</code>
390    * with the content of block pool storage directory that comes from a singleton
391    * datanode master (that contains version and block files). If the destination
392    * directory does not exist, it will be created.  If the directory already
393    * exists, it will first be deleted.
394    *
395    * @param parents parent directory where {@code dirName} is created
396    * @param dirName directory under which storage directory is created
397    * @param bpid block pool id for which the storage directory is created.
398    * @return the array of created directories
399    */
createBlockPoolStorageDirs(String[] parents, String dirName, String bpid)400   public static File[] createBlockPoolStorageDirs(String[] parents,
401       String dirName, String bpid) throws Exception {
402     File[] retVal = new File[parents.length];
403     Path bpCurDir = new Path(MiniDFSCluster.getBPDir(datanodeStorage,
404         bpid, Storage.STORAGE_DIR_CURRENT));
405     for (int i = 0; i < parents.length; i++) {
406       File newDir = new File(parents[i] + "/current/" + bpid, dirName);
407       createEmptyDirs(new String[] {newDir.toString()});
408       LocalFileSystem localFS = FileSystem.getLocal(new HdfsConfiguration());
409       localFS.copyToLocalFile(bpCurDir,
410                               new Path(newDir.toString()),
411                               false);
412       retVal[i] = newDir;
413     }
414     return retVal;
415   }
416 
417   /**
418    * Create a <code>version</code> file for namenode inside the specified parent
419    * directory.  If such a file already exists, it will be overwritten.
420    * The given version string will be written to the file as the layout
421    * version. None of the parameters may be null.
422    *
423    * @param parent directory where namenode VERSION file is stored
424    * @param version StorageInfo to create VERSION file from
425    * @param bpid Block pool Id
426    *
427    * @return the created version file
428    */
createNameNodeVersionFile(Configuration conf, File[] parent, StorageInfo version, String bpid)429   public static File[] createNameNodeVersionFile(Configuration conf,
430       File[] parent, StorageInfo version, String bpid) throws IOException {
431     Storage storage = new NNStorage(conf,
432                               Collections.<URI>emptyList(),
433                               Collections.<URI>emptyList());
434     storage.setStorageInfo(version);
435     File[] versionFiles = new File[parent.length];
436     for (int i = 0; i < parent.length; i++) {
437       versionFiles[i] = new File(parent[i], "VERSION");
438       StorageDirectory sd = new StorageDirectory(parent[i].getParentFile());
439       storage.writeProperties(versionFiles[i], sd);
440     }
441     return versionFiles;
442   }
443 
444   /**
445    * Create a <code>version</code> file for datanode inside the specified parent
446    * directory.  If such a file already exists, it will be overwritten.
447    * The given version string will be written to the file as the layout
448    * version. None of the parameters may be null.
449    *
450    * @param parent directory where namenode VERSION file is stored
451    * @param version StorageInfo to create VERSION file from
452    * @param bpid Block pool Id
453    */
createDataNodeVersionFile(File[] parent, StorageInfo version, String bpid)454   public static void createDataNodeVersionFile(File[] parent,
455       StorageInfo version, String bpid) throws IOException {
456     createDataNodeVersionFile(parent, version, bpid, bpid);
457   }
458 
459   /**
460    * Create a <code>version</code> file for datanode inside the specified parent
461    * directory.  If such a file already exists, it will be overwritten.
462    * The given version string will be written to the file as the layout
463    * version. None of the parameters may be null.
464    *
465    * @param parent directory where namenode VERSION file is stored
466    * @param version StorageInfo to create VERSION file from
467    * @param bpid Block pool Id
468    * @param bpidToWrite Block pool Id to write into the version file
469    */
createDataNodeVersionFile(File[] parent, StorageInfo version, String bpid, String bpidToWrite)470   public static void createDataNodeVersionFile(File[] parent,
471       StorageInfo version, String bpid, String bpidToWrite) throws IOException {
472     DataStorage storage = new DataStorage(version);
473     storage.setDatanodeUuid("FixedDatanodeUuid");
474 
475     File[] versionFiles = new File[parent.length];
476     for (int i = 0; i < parent.length; i++) {
477       File versionFile = new File(parent[i], "VERSION");
478       StorageDirectory sd = new StorageDirectory(parent[i].getParentFile());
479       storage.createStorageID(sd, false);
480       storage.writeProperties(versionFile, sd);
481       versionFiles[i] = versionFile;
482       File bpDir = BlockPoolSliceStorage.getBpRoot(bpid, parent[i]);
483       createBlockPoolVersionFile(bpDir, version, bpidToWrite);
484     }
485   }
486 
createBlockPoolVersionFile(File bpDir, StorageInfo version, String bpid)487   public static void createBlockPoolVersionFile(File bpDir,
488       StorageInfo version, String bpid) throws IOException {
489     // Create block pool version files
490     if (DataNodeLayoutVersion.supports(
491         LayoutVersion.Feature.FEDERATION, version.layoutVersion)) {
492       File bpCurDir = new File(bpDir, Storage.STORAGE_DIR_CURRENT);
493       BlockPoolSliceStorage bpStorage = new BlockPoolSliceStorage(version,
494           bpid);
495       File versionFile = new File(bpCurDir, "VERSION");
496       StorageDirectory sd = new StorageDirectory(bpDir);
497       bpStorage.writeProperties(versionFile, sd);
498     }
499   }
500 
501   /**
502    * Corrupt the specified file.  Some random bytes within the file
503    * will be changed to some random values.
504    *
505    * @throws IllegalArgumentException if the given file is not a file
506    * @throws IOException if an IOException occurs while reading or writing the file
507    */
corruptFile(File file, byte[] stringToCorrupt, byte[] replacement)508   public static void corruptFile(File file,
509       byte[] stringToCorrupt,
510       byte[] replacement) throws IOException {
511     Preconditions.checkArgument(replacement.length == stringToCorrupt.length);
512     if (!file.isFile()) {
513       throw new IllegalArgumentException(
514           "Given argument is not a file:" + file);
515     }
516     byte[] data = Files.toByteArray(file);
517     int index = Bytes.indexOf(data, stringToCorrupt);
518     if (index == -1) {
519       throw new IOException(
520           "File " + file + " does not contain string " +
521           new String(stringToCorrupt));
522     }
523 
524     for (int i = 0; i < stringToCorrupt.length; i++) {
525       data[index + i] = replacement[i];
526     }
527     Files.write(data, file);
528   }
529 
530   /**
531    * Return the layout version inherent in the current version
532    * of the Namenode, whether it is running or not.
533    */
getCurrentNameNodeLayoutVersion()534   public static int getCurrentNameNodeLayoutVersion() {
535     return HdfsConstants.NAMENODE_LAYOUT_VERSION;
536   }
537 
538   /**
539    * Return the namespace ID inherent in the currently running
540    * Namenode.  If no Namenode is running, return the namespace ID of
541    * the master Namenode storage directory.
542    *
543    * The UpgradeUtilities.initialize() method must be called once before
544    * calling this method.
545    */
getCurrentNamespaceID(MiniDFSCluster cluster)546   public static int getCurrentNamespaceID(MiniDFSCluster cluster) throws IOException {
547     if (cluster != null) {
548       return cluster.getNameNodeRpc().versionRequest().getNamespaceID();
549     }
550     return namenodeStorageNamespaceID;
551   }
552 
553   /**
554    * Return the cluster ID inherent in the currently running
555    * Namenode.
556    */
getCurrentClusterID(MiniDFSCluster cluster)557   public static String getCurrentClusterID(MiniDFSCluster cluster) throws IOException {
558     if (cluster != null) {
559       return cluster.getNameNodeRpc().versionRequest().getClusterID();
560     }
561     return namenodeStorageClusterID;
562   }
563 
564   /**
565    * Return the blockpool ID inherent in the currently running
566    * Namenode.
567    */
getCurrentBlockPoolID(MiniDFSCluster cluster)568   public static String getCurrentBlockPoolID(MiniDFSCluster cluster) throws IOException {
569     if (cluster != null) {
570       return cluster.getNameNodeRpc().versionRequest().getBlockPoolID();
571     }
572     return namenodeStorageBlockPoolID;
573   }
574 
575   /**
576    * Return the File System State Creation Timestamp (FSSCTime) inherent
577    * in the currently running Namenode.  If no Namenode is running,
578    * return the FSSCTime of the master Namenode storage directory.
579    *
580    * The UpgradeUtilities.initialize() method must be called once before
581    * calling this method.
582    */
getCurrentFsscTime(MiniDFSCluster cluster)583   public static long getCurrentFsscTime(MiniDFSCluster cluster) throws IOException {
584     if (cluster != null) {
585       return cluster.getNameNodeRpc().versionRequest().getCTime();
586     }
587     return namenodeStorageFsscTime;
588   }
589 
590   /**
591    * Create empty block pool directories
592    * @return array of block pool directories
593    */
createEmptyBPDirs(String[] baseDirs, String bpid)594   public static String[] createEmptyBPDirs(String[] baseDirs, String bpid)
595       throws IOException {
596     String[] bpDirs = new String[baseDirs.length];
597     for (int i = 0; i < baseDirs.length; i++) {
598       bpDirs[i] = MiniDFSCluster.getBPDir(new File(baseDirs[i]), bpid);
599     }
600     createEmptyDirs(bpDirs);
601     return bpDirs;
602   }
603 }
604 
605