1 /** 2 * Licensed to the Apache Software Foundation (ASF) under one 3 * or more contributor license agreements. See the NOTICE file 4 * distributed with this work for additional information 5 * regarding copyright ownership. The ASF licenses this file 6 * to you under the Apache License, Version 2.0 (the 7 * "License"); you may not use this file except in compliance 8 * with the License. You may obtain a copy of the License at 9 * 10 * http://www.apache.org/licenses/LICENSE-2.0 11 * 12 * Unless required by applicable law or agreed to in writing, software 13 * distributed under the License is distributed on an "AS IS" BASIS, 14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 * See the License for the specific language governing permissions and 16 * limitations under the License. 17 */ 18 package org.apache.hadoop.hdfs.server.blockmanagement; 19 20 import java.util.Arrays; 21 import java.util.Iterator; 22 import java.util.List; 23 24 import com.google.common.annotations.VisibleForTesting; 25 26 import org.apache.hadoop.fs.StorageType; 27 import org.apache.hadoop.hdfs.protocol.DatanodeInfo; 28 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; 29 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State; 30 import org.apache.hadoop.hdfs.server.protocol.StorageReport; 31 32 /** 33 * A Datanode has one or more storages. A storage in the Datanode is represented 34 * by this class. 35 */ 36 public class DatanodeStorageInfo { 37 public static final DatanodeStorageInfo[] EMPTY_ARRAY = {}; 38 toDatanodeInfos(DatanodeStorageInfo[] storages)39 public static DatanodeInfo[] toDatanodeInfos(DatanodeStorageInfo[] storages) { 40 return toDatanodeInfos(Arrays.asList(storages)); 41 } toDatanodeInfos(List<DatanodeStorageInfo> storages)42 static DatanodeInfo[] toDatanodeInfos(List<DatanodeStorageInfo> storages) { 43 final DatanodeInfo[] datanodes = new DatanodeInfo[storages.size()]; 44 for(int i = 0; i < storages.size(); i++) { 45 datanodes[i] = storages.get(i).getDatanodeDescriptor(); 46 } 47 return datanodes; 48 } 49 toDatanodeDescriptors( DatanodeStorageInfo[] storages)50 static DatanodeDescriptor[] toDatanodeDescriptors( 51 DatanodeStorageInfo[] storages) { 52 DatanodeDescriptor[] datanodes = new DatanodeDescriptor[storages.length]; 53 for (int i = 0; i < storages.length; ++i) { 54 datanodes[i] = storages[i].getDatanodeDescriptor(); 55 } 56 return datanodes; 57 } 58 toStorageIDs(DatanodeStorageInfo[] storages)59 public static String[] toStorageIDs(DatanodeStorageInfo[] storages) { 60 String[] storageIDs = new String[storages.length]; 61 for(int i = 0; i < storageIDs.length; i++) { 62 storageIDs[i] = storages[i].getStorageID(); 63 } 64 return storageIDs; 65 } 66 toStorageTypes(DatanodeStorageInfo[] storages)67 public static StorageType[] toStorageTypes(DatanodeStorageInfo[] storages) { 68 StorageType[] storageTypes = new StorageType[storages.length]; 69 for(int i = 0; i < storageTypes.length; i++) { 70 storageTypes[i] = storages[i].getStorageType(); 71 } 72 return storageTypes; 73 } 74 updateFromStorage(DatanodeStorage storage)75 public void updateFromStorage(DatanodeStorage storage) { 76 state = storage.getState(); 77 storageType = storage.getStorageType(); 78 } 79 80 /** 81 * Iterates over the list of blocks belonging to the data-node. 82 */ 83 class BlockIterator implements Iterator<BlockInfoContiguous> { 84 private BlockInfoContiguous current; 85 BlockIterator(BlockInfoContiguous head)86 BlockIterator(BlockInfoContiguous head) { 87 this.current = head; 88 } 89 hasNext()90 public boolean hasNext() { 91 return current != null; 92 } 93 next()94 public BlockInfoContiguous next() { 95 BlockInfoContiguous res = current; 96 current = current.getNext(current.findStorageInfo(DatanodeStorageInfo.this)); 97 return res; 98 } 99 remove()100 public void remove() { 101 throw new UnsupportedOperationException("Sorry. can't remove."); 102 } 103 } 104 105 private final DatanodeDescriptor dn; 106 private final String storageID; 107 private StorageType storageType; 108 private State state; 109 110 private long capacity; 111 private long dfsUsed; 112 private volatile long remaining; 113 private long blockPoolUsed; 114 115 private volatile BlockInfoContiguous blockList = null; 116 private int numBlocks = 0; 117 118 // The ID of the last full block report which updated this storage. 119 private long lastBlockReportId = 0; 120 121 /** The number of block reports received */ 122 private int blockReportCount = 0; 123 124 /** 125 * Set to false on any NN failover, and reset to true 126 * whenever a block report is received. 127 */ 128 private boolean heartbeatedSinceFailover = false; 129 130 /** 131 * At startup or at failover, the storages in the cluster may have pending 132 * block deletions from a previous incarnation of the NameNode. The block 133 * contents are considered as stale until a block report is received. When a 134 * storage is considered as stale, the replicas on it are also considered as 135 * stale. If any block has at least one stale replica, then no invalidations 136 * will be processed for this block. See HDFS-1972. 137 */ 138 private boolean blockContentsStale = true; 139 DatanodeStorageInfo(DatanodeDescriptor dn, DatanodeStorage s)140 DatanodeStorageInfo(DatanodeDescriptor dn, DatanodeStorage s) { 141 this.dn = dn; 142 this.storageID = s.getStorageID(); 143 this.storageType = s.getStorageType(); 144 this.state = s.getState(); 145 } 146 getBlockReportCount()147 int getBlockReportCount() { 148 return blockReportCount; 149 } 150 setBlockReportCount(int blockReportCount)151 void setBlockReportCount(int blockReportCount) { 152 this.blockReportCount = blockReportCount; 153 } 154 areBlockContentsStale()155 boolean areBlockContentsStale() { 156 return blockContentsStale; 157 } 158 markStaleAfterFailover()159 void markStaleAfterFailover() { 160 heartbeatedSinceFailover = false; 161 blockContentsStale = true; 162 } 163 receivedHeartbeat(StorageReport report)164 void receivedHeartbeat(StorageReport report) { 165 updateState(report); 166 heartbeatedSinceFailover = true; 167 } 168 receivedBlockReport()169 void receivedBlockReport() { 170 if (heartbeatedSinceFailover) { 171 blockContentsStale = false; 172 } 173 blockReportCount++; 174 } 175 176 @VisibleForTesting setUtilizationForTesting(long capacity, long dfsUsed, long remaining, long blockPoolUsed)177 public void setUtilizationForTesting(long capacity, long dfsUsed, 178 long remaining, long blockPoolUsed) { 179 this.capacity = capacity; 180 this.dfsUsed = dfsUsed; 181 this.remaining = remaining; 182 this.blockPoolUsed = blockPoolUsed; 183 } 184 getLastBlockReportId()185 long getLastBlockReportId() { 186 return lastBlockReportId; 187 } 188 setLastBlockReportId(long lastBlockReportId)189 void setLastBlockReportId(long lastBlockReportId) { 190 this.lastBlockReportId = lastBlockReportId; 191 } 192 getState()193 State getState() { 194 return this.state; 195 } 196 setState(State state)197 void setState(State state) { 198 this.state = state; 199 } 200 areBlocksOnFailedStorage()201 boolean areBlocksOnFailedStorage() { 202 return getState() == State.FAILED && numBlocks != 0; 203 } 204 getStorageID()205 String getStorageID() { 206 return storageID; 207 } 208 getStorageType()209 public StorageType getStorageType() { 210 return storageType; 211 } 212 getCapacity()213 long getCapacity() { 214 return capacity; 215 } 216 getDfsUsed()217 long getDfsUsed() { 218 return dfsUsed; 219 } 220 getRemaining()221 long getRemaining() { 222 return remaining; 223 } 224 getBlockPoolUsed()225 long getBlockPoolUsed() { 226 return blockPoolUsed; 227 } 228 addBlock(BlockInfoContiguous b)229 public AddBlockResult addBlock(BlockInfoContiguous b) { 230 // First check whether the block belongs to a different storage 231 // on the same DN. 232 AddBlockResult result = AddBlockResult.ADDED; 233 DatanodeStorageInfo otherStorage = 234 b.findStorageInfo(getDatanodeDescriptor()); 235 236 if (otherStorage != null) { 237 if (otherStorage != this) { 238 // The block belongs to a different storage. Remove it first. 239 otherStorage.removeBlock(b); 240 result = AddBlockResult.REPLACED; 241 } else { 242 // The block is already associated with this storage. 243 return AddBlockResult.ALREADY_EXIST; 244 } 245 } 246 247 // add to the head of the data-node list 248 b.addStorage(this); 249 blockList = b.listInsert(blockList, this); 250 numBlocks++; 251 return result; 252 } 253 removeBlock(BlockInfoContiguous b)254 public boolean removeBlock(BlockInfoContiguous b) { 255 blockList = b.listRemove(blockList, this); 256 if (b.removeStorage(this)) { 257 numBlocks--; 258 return true; 259 } else { 260 return false; 261 } 262 } 263 numBlocks()264 int numBlocks() { 265 return numBlocks; 266 } 267 getBlockIterator()268 Iterator<BlockInfoContiguous> getBlockIterator() { 269 return new BlockIterator(blockList); 270 271 } 272 273 /** 274 * Move block to the head of the list of blocks belonging to the data-node. 275 * @return the index of the head of the blockList 276 */ moveBlockToHead(BlockInfoContiguous b, int curIndex, int headIndex)277 int moveBlockToHead(BlockInfoContiguous b, int curIndex, int headIndex) { 278 blockList = b.moveBlockToHead(blockList, this, curIndex, headIndex); 279 return curIndex; 280 } 281 282 /** 283 * Used for testing only 284 * @return the head of the blockList 285 */ 286 @VisibleForTesting getBlockListHeadForTesting()287 BlockInfoContiguous getBlockListHeadForTesting(){ 288 return blockList; 289 } 290 updateState(StorageReport r)291 void updateState(StorageReport r) { 292 capacity = r.getCapacity(); 293 dfsUsed = r.getDfsUsed(); 294 remaining = r.getRemaining(); 295 blockPoolUsed = r.getBlockPoolUsed(); 296 } 297 getDatanodeDescriptor()298 public DatanodeDescriptor getDatanodeDescriptor() { 299 return dn; 300 } 301 302 /** Increment the number of blocks scheduled for each given storage */ incrementBlocksScheduled(DatanodeStorageInfo... storages)303 public static void incrementBlocksScheduled(DatanodeStorageInfo... storages) { 304 for (DatanodeStorageInfo s : storages) { 305 s.getDatanodeDescriptor().incrementBlocksScheduled(s.getStorageType()); 306 } 307 } 308 309 @Override equals(Object obj)310 public boolean equals(Object obj) { 311 if (this == obj) { 312 return true; 313 } else if (obj == null || !(obj instanceof DatanodeStorageInfo)) { 314 return false; 315 } 316 final DatanodeStorageInfo that = (DatanodeStorageInfo)obj; 317 return this.storageID.equals(that.storageID); 318 } 319 320 @Override hashCode()321 public int hashCode() { 322 return storageID.hashCode(); 323 } 324 325 @Override toString()326 public String toString() { 327 return "[" + storageType + "]" + storageID + ":" + state + ":" + dn; 328 } 329 toStorageReport()330 StorageReport toStorageReport() { 331 return new StorageReport( 332 new DatanodeStorage(storageID, state, storageType), 333 false, capacity, dfsUsed, remaining, blockPoolUsed); 334 } 335 toStorageTypes( final Iterable<DatanodeStorageInfo> infos)336 static Iterable<StorageType> toStorageTypes( 337 final Iterable<DatanodeStorageInfo> infos) { 338 return new Iterable<StorageType>() { 339 @Override 340 public Iterator<StorageType> iterator() { 341 return new Iterator<StorageType>() { 342 final Iterator<DatanodeStorageInfo> i = infos.iterator(); 343 @Override 344 public boolean hasNext() {return i.hasNext();} 345 @Override 346 public StorageType next() {return i.next().getStorageType();} 347 @Override 348 public void remove() { 349 throw new UnsupportedOperationException(); 350 } 351 }; 352 } 353 }; 354 } 355 356 /** @return the first {@link DatanodeStorageInfo} corresponding to 357 * the given datanode 358 */ 359 static DatanodeStorageInfo getDatanodeStorageInfo( 360 final Iterable<DatanodeStorageInfo> infos, 361 final DatanodeDescriptor datanode) { 362 if (datanode == null) { 363 return null; 364 } 365 for(DatanodeStorageInfo storage : infos) { 366 if (storage.getDatanodeDescriptor() == datanode) { 367 return storage; 368 } 369 } 370 return null; 371 } 372 373 static enum AddBlockResult { 374 ADDED, REPLACED, ALREADY_EXIST; 375 } 376 } 377