1 /**
2  * Licensed to the Apache Software Foundation (ASF) under one
3  * or more contributor license agreements.  See the NOTICE file
4  * distributed with this work for additional information
5  * regarding copyright ownership.  The ASF licenses this file
6  * to you under the Apache License, Version 2.0 (the
7  * "License"); you may not use this file except in compliance
8  * with the License.  You may obtain a copy of the License at
9  *
10  *     http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  */
18 package org.apache.hadoop.hdfs.server.namenode.snapshot;
19 
20 import static org.junit.Assert.assertEquals;
21 import static org.junit.Assert.assertFalse;
22 import static org.junit.Assert.assertSame;
23 import static org.junit.Assert.assertTrue;
24 
25 import java.util.List;
26 
27 import org.apache.hadoop.conf.Configuration;
28 import org.apache.hadoop.fs.Path;
29 import org.apache.hadoop.hdfs.DFSConfigKeys;
30 import org.apache.hadoop.hdfs.DFSTestUtil;
31 import org.apache.hadoop.hdfs.DFSUtil;
32 import org.apache.hadoop.hdfs.DistributedFileSystem;
33 import org.apache.hadoop.hdfs.MiniDFSCluster;
34 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
35 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
36 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
37 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
38 import org.apache.hadoop.hdfs.server.namenode.INode;
39 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
40 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
41 import org.apache.hadoop.hdfs.util.Diff.ListType;
42 import org.junit.After;
43 import org.junit.Before;
44 import org.junit.Rule;
45 import org.junit.Test;
46 import org.junit.rules.ExpectedException;
47 
48 public class TestSetQuotaWithSnapshot {
49   protected static final long seed = 0;
50   protected static final short REPLICATION = 3;
51   protected static final long BLOCKSIZE = 1024;
52 
53   protected Configuration conf;
54   protected MiniDFSCluster cluster;
55   protected FSNamesystem fsn;
56   protected FSDirectory fsdir;
57   protected DistributedFileSystem hdfs;
58 
59   @Rule
60   public ExpectedException exception = ExpectedException.none();
61 
62   @Before
setUp()63   public void setUp() throws Exception {
64     conf = new Configuration();
65     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
66     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
67         .format(true).build();
68     cluster.waitActive();
69 
70     fsn = cluster.getNamesystem();
71     fsdir = fsn.getFSDirectory();
72     hdfs = cluster.getFileSystem();
73   }
74 
75   @After
tearDown()76   public void tearDown() throws Exception {
77     if (cluster != null) {
78       cluster.shutdown();
79     }
80   }
81 
82   @Test (timeout=60000)
testSetQuota()83   public void testSetQuota() throws Exception {
84     final Path dir = new Path("/TestSnapshot");
85     hdfs.mkdirs(dir);
86     // allow snapshot on dir and create snapshot s1
87     SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
88 
89     Path sub = new Path(dir, "sub");
90     hdfs.mkdirs(sub);
91     Path fileInSub = new Path(sub, "file");
92     DFSTestUtil.createFile(hdfs, fileInSub, BLOCKSIZE, REPLICATION, seed);
93     INodeDirectory subNode = INodeDirectory.valueOf(
94         fsdir.getINode(sub.toString()), sub);
95     // subNode should be a INodeDirectory, but not an INodeDirectoryWithSnapshot
96     assertFalse(subNode.isWithSnapshot());
97 
98     hdfs.setQuota(sub, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
99     subNode = INodeDirectory.valueOf(fsdir.getINode(sub.toString()), sub);
100     assertTrue(subNode.isQuotaSet());
101     assertFalse(subNode.isWithSnapshot());
102   }
103 
104   /**
105    * Test clear quota of a snapshottable dir or a dir with snapshot.
106    */
107   @Test
testClearQuota()108   public void testClearQuota() throws Exception {
109     final Path dir = new Path("/TestSnapshot");
110     hdfs.mkdirs(dir);
111 
112     hdfs.allowSnapshot(dir);
113     hdfs.setQuota(dir, HdfsConstants.QUOTA_DONT_SET,
114         HdfsConstants.QUOTA_DONT_SET);
115     INodeDirectory dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
116     assertTrue(dirNode.isSnapshottable());
117     assertEquals(0, dirNode.getDiffs().asList().size());
118 
119     hdfs.setQuota(dir, HdfsConstants.QUOTA_DONT_SET - 1,
120         HdfsConstants.QUOTA_DONT_SET - 1);
121     dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
122     assertTrue(dirNode.isSnapshottable());
123     assertEquals(0, dirNode.getDiffs().asList().size());
124 
125     hdfs.setQuota(dir, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET);
126     dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
127     assertTrue(dirNode.isSnapshottable());
128     assertEquals(0, dirNode.getDiffs().asList().size());
129 
130     // allow snapshot on dir and create snapshot s1
131     SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
132 
133     // clear quota of dir
134     hdfs.setQuota(dir, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET);
135     // dir should still be a snapshottable directory
136     dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
137     assertTrue(dirNode.isSnapshottable());
138     assertEquals(1, dirNode.getDiffs().asList().size());
139     SnapshottableDirectoryStatus[] status = hdfs.getSnapshottableDirListing();
140     assertEquals(1, status.length);
141     assertEquals(dir, status[0].getFullPath());
142 
143     final Path subDir = new Path(dir, "sub");
144     hdfs.mkdirs(subDir);
145     hdfs.createSnapshot(dir, "s2");
146     final Path file = new Path(subDir, "file");
147     DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
148     hdfs.setQuota(dir, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET);
149     INode subNode = fsdir.getINode4Write(subDir.toString());
150     assertTrue(subNode.asDirectory().isWithSnapshot());
151     List<DirectoryDiff> diffList = subNode.asDirectory().getDiffs().asList();
152     assertEquals(1, diffList.size());
153     Snapshot s2 = dirNode.getSnapshot(DFSUtil.string2Bytes("s2"));
154     assertEquals(s2.getId(), diffList.get(0).getSnapshotId());
155     List<INode> createdList = diffList.get(0).getChildrenDiff().getList(ListType.CREATED);
156     assertEquals(1, createdList.size());
157     assertSame(fsdir.getINode4Write(file.toString()), createdList.get(0));
158   }
159 }
160