1 /**
2  * Licensed to the Apache Software Foundation (ASF) under one
3  * or more contributor license agreements.  See the NOTICE file
4  * distributed with this work for additional information
5  * regarding copyright ownership.  The ASF licenses this file
6  * to you under the Apache License, Version 2.0 (the
7  * "License"); you may not use this file except in compliance
8  * with the License.  You may obtain a copy of the License at
9  *
10  *     http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  */
18 package org.apache.hadoop.hdfs.web;
19 
20 import static org.mockito.Mockito.spy;
21 import static org.mockito.Mockito.doReturn;
22 
23 import java.io.IOException;
24 import java.io.InputStream;
25 import java.net.HttpURLConnection;
26 import java.net.URI;
27 import java.net.URL;
28 import java.security.PrivilegedExceptionAction;
29 
30 import org.apache.commons.logging.impl.Log4JLogger;
31 import org.apache.hadoop.conf.Configuration;
32 import org.apache.hadoop.fs.ContentSummary;
33 import org.apache.hadoop.fs.FSMainOperationsBaseTest;
34 import org.apache.hadoop.fs.FileStatus;
35 import org.apache.hadoop.fs.FileSystem;
36 import org.apache.hadoop.fs.Path;
37 import org.apache.hadoop.fs.permission.FsPermission;
38 import org.apache.hadoop.hdfs.AppendTestUtil;
39 import org.apache.hadoop.hdfs.DFSConfigKeys;
40 import org.apache.hadoop.hdfs.DFSTestUtil;
41 import org.apache.hadoop.hdfs.MiniDFSCluster;
42 import org.apache.hadoop.hdfs.web.resources.ExceptionHandler;
43 import org.apache.hadoop.hdfs.web.resources.GetOpParam;
44 import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
45 import org.apache.hadoop.security.AccessControlException;
46 import org.apache.hadoop.security.UserGroupInformation;
47 import org.apache.log4j.Level;
48 import org.junit.AfterClass;
49 import org.junit.Assert;
50 import org.junit.BeforeClass;
51 import org.junit.Test;
52 
53 public class TestFSMainOperationsWebHdfs extends FSMainOperationsBaseTest {
54   {
55     ((Log4JLogger)ExceptionHandler.LOG).getLogger().setLevel(Level.ALL);
56   }
57 
58   private static MiniDFSCluster cluster = null;
59   private static Path defaultWorkingDirectory;
60   private static FileSystem fileSystem;
61 
TestFSMainOperationsWebHdfs()62   public TestFSMainOperationsWebHdfs() {
63     super("/tmp/TestFSMainOperationsWebHdfs");
64   }
65 
66   @Override
createFileSystem()67   protected FileSystem createFileSystem() throws Exception {
68     return fileSystem;
69   }
70 
71   @BeforeClass
setupCluster()72   public static void setupCluster() {
73     final Configuration conf = new Configuration();
74     conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
75     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
76     try {
77       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
78       cluster.waitActive();
79 
80       //change root permission to 777
81       cluster.getFileSystem().setPermission(
82           new Path("/"), new FsPermission((short)0777));
83 
84       final String uri = WebHdfsFileSystem.SCHEME  + "://"
85           + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
86 
87       //get file system as a non-superuser
88       final UserGroupInformation current = UserGroupInformation.getCurrentUser();
89       final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
90           current.getShortUserName() + "x", new String[]{"user"});
91       fileSystem = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
92         @Override
93         public FileSystem run() throws Exception {
94           return FileSystem.get(new URI(uri), conf);
95         }
96       });
97 
98       defaultWorkingDirectory = fileSystem.getWorkingDirectory();
99     } catch (Exception e) {
100       throw new RuntimeException(e);
101     }
102   }
103 
104   @AfterClass
shutdownCluster()105   public static void shutdownCluster() {
106     if (cluster != null) {
107       cluster.shutdown();
108       cluster = null;
109     }
110   }
111 
112   @Override
getDefaultWorkingDirectory()113   protected Path getDefaultWorkingDirectory() {
114     return defaultWorkingDirectory;
115   }
116 
117   @Test
testConcat()118   public void testConcat() throws Exception {
119     Path[] paths = {new Path("/test/hadoop/file1"),
120                     new Path("/test/hadoop/file2"),
121                     new Path("/test/hadoop/file3")};
122 
123     DFSTestUtil.createFile(fSys, paths[0], 1024, (short) 3, 0);
124     DFSTestUtil.createFile(fSys, paths[1], 1024, (short) 3, 0);
125     DFSTestUtil.createFile(fSys, paths[2], 1024, (short) 3, 0);
126 
127     Path catPath = new Path("/test/hadoop/catFile");
128     DFSTestUtil.createFile(fSys, catPath, 1024, (short) 3, 0);
129     Assert.assertTrue(exists(fSys, catPath));
130 
131     fSys.concat(catPath, paths);
132 
133     Assert.assertFalse(exists(fSys, paths[0]));
134     Assert.assertFalse(exists(fSys, paths[1]));
135     Assert.assertFalse(exists(fSys, paths[2]));
136 
137     FileStatus fileStatus = fSys.getFileStatus(catPath);
138     Assert.assertEquals(1024*4, fileStatus.getLen());
139   }
140 
141   @Test
testTruncate()142   public void testTruncate() throws Exception {
143     final short repl = 3;
144     final int blockSize = 1024;
145     final int numOfBlocks = 2;
146     Path dir = getTestRootPath(fSys, "test/hadoop");
147     Path file = getTestRootPath(fSys, "test/hadoop/file");
148 
149     final byte[] data = getFileData(numOfBlocks, blockSize);
150     createFile(fSys, file, data, blockSize, repl);
151 
152     final int newLength = blockSize;
153 
154     boolean isReady = fSys.truncate(file, newLength);
155 
156     Assert.assertTrue("Recovery is not expected.", isReady);
157 
158     FileStatus fileStatus = fSys.getFileStatus(file);
159     Assert.assertEquals(fileStatus.getLen(), newLength);
160     AppendTestUtil.checkFullFile(fSys, file, newLength, data, file.toString());
161 
162     ContentSummary cs = fSys.getContentSummary(dir);
163     Assert.assertEquals("Bad disk space usage", cs.getSpaceConsumed(),
164         newLength * repl);
165     Assert.assertTrue("Deleted", fSys.delete(dir, true));
166   }
167 
168   // Test that WebHdfsFileSystem.jsonParse() closes the connection's input
169   // stream.
170   // Closing the inputstream in jsonParse will allow WebHDFS to reuse
171   // connections to the namenode rather than needing to always open new ones.
172   boolean closedInputStream = false;
173   @Test
testJsonParseClosesInputStream()174   public void testJsonParseClosesInputStream() throws Exception {
175     final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fileSystem;
176     Path file = getTestRootPath(fSys, "test/hadoop/file");
177     createFile(file);
178     final HttpOpParam.Op op = GetOpParam.Op.GETHOMEDIRECTORY;
179     final URL url = webhdfs.toUrl(op, file);
180     final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
181     conn.setRequestMethod(op.getType().toString());
182     conn.connect();
183 
184     InputStream myIn = new InputStream(){
185       private HttpURLConnection localConn = conn;
186       @Override
187       public void close() throws IOException {
188         closedInputStream = true;
189         localConn.getInputStream().close();
190       }
191       @Override
192       public int read() throws IOException {
193         return localConn.getInputStream().read();
194       }
195     };
196     final HttpURLConnection spyConn = spy(conn);
197     doReturn(myIn).when(spyConn).getInputStream();
198 
199     try {
200       Assert.assertFalse(closedInputStream);
201       WebHdfsFileSystem.jsonParse(spyConn, false);
202       Assert.assertTrue(closedInputStream);
203     } catch(IOException ioe) {
204       junit.framework.TestCase.fail();
205     }
206     conn.disconnect();
207   }
208 
209   @Override
210   @Test
testMkdirsFailsForSubdirectoryOfExistingFile()211   public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {
212     Path testDir = getTestRootPath(fSys, "test/hadoop");
213     Assert.assertFalse(exists(fSys, testDir));
214     fSys.mkdirs(testDir);
215     Assert.assertTrue(exists(fSys, testDir));
216 
217     createFile(getTestRootPath(fSys, "test/hadoop/file"));
218 
219     Path testSubDir = getTestRootPath(fSys, "test/hadoop/file/subdir");
220     try {
221       fSys.mkdirs(testSubDir);
222       Assert.fail("Should throw IOException.");
223     } catch (IOException e) {
224       // expected
225     }
226     try {
227       Assert.assertFalse(exists(fSys, testSubDir));
228     } catch(AccessControlException e) {
229       // also okay for HDFS.
230     }
231 
232     Path testDeepSubDir = getTestRootPath(fSys, "test/hadoop/file/deep/sub/dir");
233     try {
234       fSys.mkdirs(testDeepSubDir);
235       Assert.fail("Should throw IOException.");
236     } catch (IOException e) {
237       // expected
238     }
239     try {
240       Assert.assertFalse(exists(fSys, testDeepSubDir));
241     } catch(AccessControlException e) {
242       // also okay for HDFS.
243     }
244   }
245 }
246