| Package | Description |
|---|---|
| org.apache.hadoop.hdfs.server.blockmanagement | |
| org.apache.hadoop.hdfs.server.namenode | |
| org.apache.hadoop.hdfs.server.protocol |
| Modifier and Type | Field | Description |
|---|---|---|
static DatanodeStorageInfo[] |
DatanodeStorageInfo.EMPTY_ARRAY |
|
DatanodeStorageInfo[] |
DatanodeDescriptor.BlockTargetPair.targets |
| Modifier and Type | Field | Description |
|---|---|---|
protected java.util.Map<java.lang.String,DatanodeStorageInfo> |
DatanodeDescriptor.storageMap |
| Modifier and Type | Method | Description |
|---|---|---|
protected DatanodeStorageInfo |
BlockPlacementPolicyDefault.chooseLocalOrFavoredStorage(org.apache.hadoop.net.Node localOrFavoredNode,
boolean isFavoredNode,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
int maxNodesPerRack,
java.util.List<DatanodeStorageInfo> results,
boolean avoidStaleNodes,
java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes) |
Choose storage of local or favored node.
|
protected DatanodeStorageInfo |
BlockPlacementPolicyDefault.chooseLocalRack(org.apache.hadoop.net.Node localMachine,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
int maxNodesPerRack,
java.util.List<DatanodeStorageInfo> results,
boolean avoidStaleNodes,
java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes) |
Choose one node from the rack that localMachine is on.
|
protected DatanodeStorageInfo |
BlockPlacementPolicyWithNodeGroup.chooseLocalRack(org.apache.hadoop.net.Node localMachine,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
int maxNodesPerRack,
java.util.List<DatanodeStorageInfo> results,
boolean avoidStaleNodes,
java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes) |
|
protected DatanodeStorageInfo |
AvailableSpaceBlockPlacementPolicy.chooseLocalStorage(org.apache.hadoop.net.Node localMachine,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
int maxNodesPerRack,
java.util.List<DatanodeStorageInfo> results,
boolean avoidStaleNodes,
java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes,
boolean fallbackToLocalRack) |
|
protected DatanodeStorageInfo |
BlockPlacementPolicyDefault.chooseLocalStorage(org.apache.hadoop.net.Node localMachine,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
int maxNodesPerRack,
java.util.List<DatanodeStorageInfo> results,
boolean avoidStaleNodes,
java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes) |
|
protected DatanodeStorageInfo |
BlockPlacementPolicyDefault.chooseLocalStorage(org.apache.hadoop.net.Node localMachine,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
int maxNodesPerRack,
java.util.List<DatanodeStorageInfo> results,
boolean avoidStaleNodes,
java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes,
boolean fallbackToLocalRack) |
Choose localMachine as the target.
|
protected DatanodeStorageInfo |
BlockPlacementPolicyWithNodeGroup.chooseLocalStorage(org.apache.hadoop.net.Node localMachine,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
int maxNodesPerRack,
java.util.List<DatanodeStorageInfo> results,
boolean avoidStaleNodes,
java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes,
boolean fallbackToNodeGroupAndLocalRack) |
choose local node of localMachine as the target.
|
protected DatanodeStorageInfo |
BlockPlacementPolicyDefault.chooseRandom(int numOfReplicas,
java.lang.String scope,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
int maxNodesPerRack,
java.util.List<DatanodeStorageInfo> results,
boolean avoidStaleNodes,
java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes) |
Randomly choose numOfReplicas targets from the given scope.
|
protected DatanodeStorageInfo |
BlockPlacementPolicyDefault.chooseRandom(java.lang.String scope,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
int maxNodesPerRack,
java.util.List<DatanodeStorageInfo> results,
boolean avoidStaleNodes,
java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes) |
Randomly choose one target from the given scope.
|
DatanodeStorageInfo |
BlockPlacementPolicyDefault.chooseReplicaToDelete(java.util.Collection<DatanodeStorageInfo> moreThanOne,
java.util.Collection<DatanodeStorageInfo> exactlyOne,
java.util.List<org.apache.hadoop.fs.StorageType> excessTypes,
java.util.Map<java.lang.String,java.util.List<DatanodeStorageInfo>> rackMap) |
Decide whether deleting the specified replica of the block still makes
the block conform to the configured block placement policy.
|
DatanodeStorageInfo |
DatanodeDescriptor.chooseStorage4Block(org.apache.hadoop.fs.StorageType t,
long blockSize,
int minBlocksForWrite) |
Find whether the datanode contains good storage of given type to
place block of size
blockSize. |
abstract DatanodeStorageInfo[] |
BlockPlacementPolicy.chooseTarget(java.lang.String srcPath,
int numOfReplicas,
org.apache.hadoop.net.Node writer,
java.util.List<DatanodeStorageInfo> chosen,
boolean returnChosenNodes,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
org.apache.hadoop.hdfs.protocol.BlockStoragePolicy storagePolicy,
java.util.EnumSet<org.apache.hadoop.hdfs.AddBlockFlag> flags) |
choose numOfReplicas data nodes for writer
to re-replicate a block with size blocksize
If not, return as many as we can.
|
DatanodeStorageInfo[] |
BlockPlacementPolicy.chooseTarget(java.lang.String srcPath,
int numOfReplicas,
org.apache.hadoop.net.Node writer,
java.util.List<DatanodeStorageInfo> chosen,
boolean returnChosenNodes,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
org.apache.hadoop.hdfs.protocol.BlockStoragePolicy storagePolicy,
java.util.EnumSet<org.apache.hadoop.hdfs.AddBlockFlag> flags,
java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes) |
|
DatanodeStorageInfo[] |
BlockPlacementPolicyDefault.chooseTarget(java.lang.String srcPath,
int numOfReplicas,
org.apache.hadoop.net.Node writer,
java.util.List<DatanodeStorageInfo> chosenNodes,
boolean returnChosenNodes,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
org.apache.hadoop.hdfs.protocol.BlockStoragePolicy storagePolicy,
java.util.EnumSet<org.apache.hadoop.hdfs.AddBlockFlag> flags) |
|
DatanodeStorageInfo[] |
BlockPlacementPolicyDefault.chooseTarget(java.lang.String srcPath,
int numOfReplicas,
org.apache.hadoop.net.Node writer,
java.util.List<DatanodeStorageInfo> chosen,
boolean returnChosenNodes,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
org.apache.hadoop.hdfs.protocol.BlockStoragePolicy storagePolicy,
java.util.EnumSet<org.apache.hadoop.hdfs.AddBlockFlag> flags,
java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes) |
|
DatanodeStorageInfo[] |
BlockManager.chooseTarget4AdditionalDatanode(java.lang.String src,
int numAdditionalNodes,
org.apache.hadoop.net.Node clientnode,
java.util.List<DatanodeStorageInfo> chosen,
java.util.Set<org.apache.hadoop.net.Node> excludes,
long blocksize,
byte storagePolicyID,
org.apache.hadoop.hdfs.protocol.BlockType blockType) |
Choose target for getting additional datanodes for an existing pipeline.
|
DatanodeStorageInfo[] |
BlockManager.chooseTarget4NewBlock(java.lang.String src,
int numOfReplicas,
org.apache.hadoop.net.Node client,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
java.util.List<java.lang.String> favoredNodes,
byte storagePolicyID,
org.apache.hadoop.hdfs.protocol.BlockType blockType,
org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy ecPolicy,
java.util.EnumSet<org.apache.hadoop.hdfs.AddBlockFlag> flags) |
Choose target datanodes for creating a new block.
|
DatanodeStorageInfo[] |
BlockManager.chooseTarget4WebHDFS(java.lang.String src,
DatanodeDescriptor clientnode,
java.util.Set<org.apache.hadoop.net.Node> excludes,
long blocksize) |
Choose target for WebHDFS redirection.
|
DatanodeStorageInfo[] |
DatanodeManager.getDatanodeStorageInfos(org.apache.hadoop.hdfs.protocol.DatanodeID[] datanodeID,
java.lang.String[] storageIDs,
java.lang.String format,
java.lang.Object... args) |
|
DatanodeStorageInfo[] |
BlockUnderConstructionFeature.getExpectedStorageLocations() |
Create array of expected replica locations
(as has been assigned by chooseTargets()).
|
DatanodeStorageInfo |
ProvidedStorageMap.getProvidedStorageInfo() |
|
DatanodeStorageInfo |
BlockInfoStriped.StorageAndBlockIndex.getStorage() |
|
DatanodeStorageInfo |
DatanodeDescriptor.getStorageInfo(java.lang.String storageID) |
|
DatanodeStorageInfo[] |
DatanodeDescriptor.getStorageInfos() |
|
DatanodeStorageInfo[] |
BlockManager.getStorages(BlockInfo block) |
| Modifier and Type | Method | Description |
|---|---|---|
abstract java.util.List<DatanodeStorageInfo> |
BlockPlacementPolicy.chooseReplicasToDelete(java.util.Collection<DatanodeStorageInfo> availableReplicas,
java.util.Collection<DatanodeStorageInfo> delCandidates,
int expectedNumOfReplicas,
java.util.List<org.apache.hadoop.fs.StorageType> excessTypes,
DatanodeDescriptor addedNode,
DatanodeDescriptor delNodeHint) |
Select the excess replica storages for deletion based on either
delNodehint/Excess storage types.
|
java.util.List<DatanodeStorageInfo> |
BlockPlacementPolicyDefault.chooseReplicasToDelete(java.util.Collection<DatanodeStorageInfo> availableReplicas,
java.util.Collection<DatanodeStorageInfo> delCandidates,
int expectedNumOfReplicas,
java.util.List<org.apache.hadoop.fs.StorageType> excessTypes,
DatanodeDescriptor addedNode,
DatanodeDescriptor delNodeHint) |
|
java.util.Iterator<DatanodeStorageInfo> |
BlockUnderConstructionFeature.getExpectedStorageLocationsIterator() |
Note that this iterator doesn't guarantee thread-safe.
|
java.util.Iterator<DatanodeStorageInfo> |
BlockInfo.getStorageInfos() |
|
java.lang.Iterable<DatanodeStorageInfo> |
BlockManager.getStorages(org.apache.hadoop.hdfs.protocol.Block block) |
|
protected java.util.Collection<DatanodeStorageInfo> |
BlockPlacementPolicyDefault.pickupReplicaSet(java.util.Collection<DatanodeStorageInfo> moreThanOne,
java.util.Collection<DatanodeStorageInfo> exactlyOne,
java.util.Map<java.lang.String,java.util.List<DatanodeStorageInfo>> rackMap) |
Pick up replica node set for deleting replica as over-replicated.
|
protected java.util.Collection<DatanodeStorageInfo> |
BlockPlacementPolicyRackFaultTolerant.pickupReplicaSet(java.util.Collection<DatanodeStorageInfo> moreThanOne,
java.util.Collection<DatanodeStorageInfo> exactlyOne,
java.util.Map<java.lang.String,java.util.List<DatanodeStorageInfo>> rackMap) |
|
java.util.Collection<DatanodeStorageInfo> |
BlockPlacementPolicyWithNodeGroup.pickupReplicaSet(java.util.Collection<DatanodeStorageInfo> first,
java.util.Collection<DatanodeStorageInfo> second,
java.util.Map<java.lang.String,java.util.List<DatanodeStorageInfo>> rackMap) |
Pick up replica node set for deleting replica as over-replicated.
|
protected java.util.Collection<DatanodeStorageInfo> |
BlockPlacementPolicyWithUpgradeDomain.pickupReplicaSet(java.util.Collection<DatanodeStorageInfo> moreThanOne,
java.util.Collection<DatanodeStorageInfo> exactlyOne,
java.util.Map<java.lang.String,java.util.List<DatanodeStorageInfo>> rackMap) |
| Modifier and Type | Method | Description |
|---|---|---|
void |
BlockManager.addBlock(DatanodeStorageInfo storageInfo,
org.apache.hadoop.hdfs.protocol.Block block,
java.lang.String delHint) |
The given node is reporting that it received a certain block.
|
void |
DatanodeDescriptor.addBlockToBeReplicated(org.apache.hadoop.hdfs.protocol.Block block,
DatanodeStorageInfo[] targets) |
Store block replication work.
|
void |
ProvidedStorageMap.ProvidedDescriptor.addBlockToBeReplicated(org.apache.hadoop.hdfs.protocol.Block block,
DatanodeStorageInfo[] targets) |
|
void |
DatanodeDescriptor.addECBlockToBeReplicated(org.apache.hadoop.hdfs.protocol.Block block,
DatanodeStorageInfo[] targets) |
Store ec block to be replicated work.
|
void |
BlockPlacementPolicy.adjustSetsWithChosenReplica(java.util.Map<java.lang.String,java.util.List<DatanodeStorageInfo>> rackMap,
java.util.List<DatanodeStorageInfo> moreThanOne,
java.util.List<DatanodeStorageInfo> exactlyOne,
DatanodeStorageInfo cur) |
Adjust rackmap, moreThanOne, and exactlyOne after removing replica on cur.
|
void |
BlockCollection.convertLastBlockToUC(BlockInfo lastBlock,
DatanodeStorageInfo[] targets) |
Convert the last block of the collection to an under-construction block
and set the locations.
|
void |
BlockInfo.convertToBlockUnderConstruction(HdfsServerConstants.BlockUCState s,
DatanodeStorageInfo[] targets) |
Add/Update the under construction feature.
|
static void |
DatanodeStorageInfo.decrementBlocksScheduled(DatanodeStorageInfo... storages) |
Decrement the number of blocks scheduled for each given storage.
|
byte |
BlockInfoStriped.getStorageBlockIndex(DatanodeStorageInfo storage) |
|
static void |
DatanodeStorageInfo.incrementBlocksScheduled(DatanodeStorageInfo... storages) |
Increment the number of blocks scheduled for each given storage
|
void |
BlockManager.markBlockReplicasAsCorrupt(org.apache.hadoop.hdfs.protocol.Block oldBlock,
BlockInfo block,
long oldGenerationStamp,
long oldNumBytes,
DatanodeStorageInfo[] newStorages) |
Mark block replicas as corrupt except those on the storages in
newStorages list.
|
BlockInfo |
BlockInfo.moveBlockToHead(BlockInfo head,
DatanodeStorageInfo storage,
int curIndex,
int headIndex) |
Remove this block from the list of blocks related to the specified
DatanodeDescriptor.
|
static org.apache.hadoop.hdfs.protocol.LocatedBlock |
BlockManager.newLocatedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock eb,
BlockInfo info,
DatanodeStorageInfo[] locs,
long offset) |
|
static org.apache.hadoop.hdfs.protocol.LocatedBlock |
BlockManager.newLocatedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock b,
DatanodeStorageInfo[] storages,
long startOffset,
boolean corrupt) |
|
static org.apache.hadoop.hdfs.protocol.LocatedStripedBlock |
BlockManager.newLocatedStripedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock b,
DatanodeStorageInfo[] storages,
byte[] indices,
long startOffset,
boolean corrupt) |
|
void |
BlockUnderConstructionFeature.setExpectedLocations(org.apache.hadoop.hdfs.protocol.Block block,
DatanodeStorageInfo[] targets,
org.apache.hadoop.hdfs.protocol.BlockType blockType) |
Set expected locations
|
static org.apache.hadoop.hdfs.protocol.DatanodeInfo[] |
DatanodeStorageInfo.toDatanodeInfos(DatanodeStorageInfo[] storages) |
|
static java.lang.String[] |
DatanodeStorageInfo.toStorageIDs(DatanodeStorageInfo[] storages) |
|
static org.apache.hadoop.fs.StorageType[] |
DatanodeStorageInfo.toStorageTypes(DatanodeStorageInfo[] storages) |
| Modifier and Type | Method | Description |
|---|---|---|
void |
BlockPlacementPolicy.adjustSetsWithChosenReplica(java.util.Map<java.lang.String,java.util.List<DatanodeStorageInfo>> rackMap,
java.util.List<DatanodeStorageInfo> moreThanOne,
java.util.List<DatanodeStorageInfo> exactlyOne,
DatanodeStorageInfo cur) |
Adjust rackmap, moreThanOne, and exactlyOne after removing replica on cur.
|
void |
BlockPlacementPolicy.adjustSetsWithChosenReplica(java.util.Map<java.lang.String,java.util.List<DatanodeStorageInfo>> rackMap,
java.util.List<DatanodeStorageInfo> moreThanOne,
java.util.List<DatanodeStorageInfo> exactlyOne,
DatanodeStorageInfo cur) |
Adjust rackmap, moreThanOne, and exactlyOne after removing replica on cur.
|
protected DatanodeStorageInfo |
BlockPlacementPolicyDefault.chooseLocalOrFavoredStorage(org.apache.hadoop.net.Node localOrFavoredNode,
boolean isFavoredNode,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
int maxNodesPerRack,
java.util.List<DatanodeStorageInfo> results,
boolean avoidStaleNodes,
java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes) |
Choose storage of local or favored node.
|
protected DatanodeStorageInfo |
BlockPlacementPolicyDefault.chooseLocalRack(org.apache.hadoop.net.Node localMachine,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
int maxNodesPerRack,
java.util.List<DatanodeStorageInfo> results,
boolean avoidStaleNodes,
java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes) |
Choose one node from the rack that localMachine is on.
|
protected DatanodeStorageInfo |
BlockPlacementPolicyWithNodeGroup.chooseLocalRack(org.apache.hadoop.net.Node localMachine,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
int maxNodesPerRack,
java.util.List<DatanodeStorageInfo> results,
boolean avoidStaleNodes,
java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes) |
|
protected DatanodeStorageInfo |
AvailableSpaceBlockPlacementPolicy.chooseLocalStorage(org.apache.hadoop.net.Node localMachine,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
int maxNodesPerRack,
java.util.List<DatanodeStorageInfo> results,
boolean avoidStaleNodes,
java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes,
boolean fallbackToLocalRack) |
|
protected DatanodeStorageInfo |
BlockPlacementPolicyDefault.chooseLocalStorage(org.apache.hadoop.net.Node localMachine,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
int maxNodesPerRack,
java.util.List<DatanodeStorageInfo> results,
boolean avoidStaleNodes,
java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes) |
|
protected DatanodeStorageInfo |
BlockPlacementPolicyDefault.chooseLocalStorage(org.apache.hadoop.net.Node localMachine,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
int maxNodesPerRack,
java.util.List<DatanodeStorageInfo> results,
boolean avoidStaleNodes,
java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes,
boolean fallbackToLocalRack) |
Choose localMachine as the target.
|
protected DatanodeStorageInfo |
BlockPlacementPolicyWithNodeGroup.chooseLocalStorage(org.apache.hadoop.net.Node localMachine,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
int maxNodesPerRack,
java.util.List<DatanodeStorageInfo> results,
boolean avoidStaleNodes,
java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes,
boolean fallbackToNodeGroupAndLocalRack) |
choose local node of localMachine as the target.
|
protected DatanodeStorageInfo |
BlockPlacementPolicyDefault.chooseRandom(int numOfReplicas,
java.lang.String scope,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
int maxNodesPerRack,
java.util.List<DatanodeStorageInfo> results,
boolean avoidStaleNodes,
java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes) |
Randomly choose numOfReplicas targets from the given scope.
|
protected DatanodeStorageInfo |
BlockPlacementPolicyDefault.chooseRandom(java.lang.String scope,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
int maxNodesPerRack,
java.util.List<DatanodeStorageInfo> results,
boolean avoidStaleNodes,
java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes) |
Randomly choose one target from the given scope.
|
protected void |
BlockPlacementPolicyDefault.chooseRemoteRack(int numOfReplicas,
DatanodeDescriptor localMachine,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
int maxReplicasPerRack,
java.util.List<DatanodeStorageInfo> results,
boolean avoidStaleNodes,
java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes) |
Choose numOfReplicas nodes from the racks
that localMachine is NOT on.
|
protected void |
BlockPlacementPolicyWithNodeGroup.chooseRemoteRack(int numOfReplicas,
DatanodeDescriptor localMachine,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
int maxReplicasPerRack,
java.util.List<DatanodeStorageInfo> results,
boolean avoidStaleNodes,
java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes) |
|
abstract java.util.List<DatanodeStorageInfo> |
BlockPlacementPolicy.chooseReplicasToDelete(java.util.Collection<DatanodeStorageInfo> availableReplicas,
java.util.Collection<DatanodeStorageInfo> delCandidates,
int expectedNumOfReplicas,
java.util.List<org.apache.hadoop.fs.StorageType> excessTypes,
DatanodeDescriptor addedNode,
DatanodeDescriptor delNodeHint) |
Select the excess replica storages for deletion based on either
delNodehint/Excess storage types.
|
java.util.List<DatanodeStorageInfo> |
BlockPlacementPolicyDefault.chooseReplicasToDelete(java.util.Collection<DatanodeStorageInfo> availableReplicas,
java.util.Collection<DatanodeStorageInfo> delCandidates,
int expectedNumOfReplicas,
java.util.List<org.apache.hadoop.fs.StorageType> excessTypes,
DatanodeDescriptor addedNode,
DatanodeDescriptor delNodeHint) |
|
DatanodeStorageInfo |
BlockPlacementPolicyDefault.chooseReplicaToDelete(java.util.Collection<DatanodeStorageInfo> moreThanOne,
java.util.Collection<DatanodeStorageInfo> exactlyOne,
java.util.List<org.apache.hadoop.fs.StorageType> excessTypes,
java.util.Map<java.lang.String,java.util.List<DatanodeStorageInfo>> rackMap) |
Decide whether deleting the specified replica of the block still makes
the block conform to the configured block placement policy.
|
DatanodeStorageInfo |
BlockPlacementPolicyDefault.chooseReplicaToDelete(java.util.Collection<DatanodeStorageInfo> moreThanOne,
java.util.Collection<DatanodeStorageInfo> exactlyOne,
java.util.List<org.apache.hadoop.fs.StorageType> excessTypes,
java.util.Map<java.lang.String,java.util.List<DatanodeStorageInfo>> rackMap) |
Decide whether deleting the specified replica of the block still makes
the block conform to the configured block placement policy.
|
abstract DatanodeStorageInfo[] |
BlockPlacementPolicy.chooseTarget(java.lang.String srcPath,
int numOfReplicas,
org.apache.hadoop.net.Node writer,
java.util.List<DatanodeStorageInfo> chosen,
boolean returnChosenNodes,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
org.apache.hadoop.hdfs.protocol.BlockStoragePolicy storagePolicy,
java.util.EnumSet<org.apache.hadoop.hdfs.AddBlockFlag> flags) |
choose numOfReplicas data nodes for writer
to re-replicate a block with size blocksize
If not, return as many as we can.
|
DatanodeStorageInfo[] |
BlockPlacementPolicy.chooseTarget(java.lang.String srcPath,
int numOfReplicas,
org.apache.hadoop.net.Node writer,
java.util.List<DatanodeStorageInfo> chosen,
boolean returnChosenNodes,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
org.apache.hadoop.hdfs.protocol.BlockStoragePolicy storagePolicy,
java.util.EnumSet<org.apache.hadoop.hdfs.AddBlockFlag> flags,
java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes) |
|
DatanodeStorageInfo[] |
BlockPlacementPolicyDefault.chooseTarget(java.lang.String srcPath,
int numOfReplicas,
org.apache.hadoop.net.Node writer,
java.util.List<DatanodeStorageInfo> chosenNodes,
boolean returnChosenNodes,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
org.apache.hadoop.hdfs.protocol.BlockStoragePolicy storagePolicy,
java.util.EnumSet<org.apache.hadoop.hdfs.AddBlockFlag> flags) |
|
DatanodeStorageInfo[] |
BlockPlacementPolicyDefault.chooseTarget(java.lang.String srcPath,
int numOfReplicas,
org.apache.hadoop.net.Node writer,
java.util.List<DatanodeStorageInfo> chosen,
boolean returnChosenNodes,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
org.apache.hadoop.hdfs.protocol.BlockStoragePolicy storagePolicy,
java.util.EnumSet<org.apache.hadoop.hdfs.AddBlockFlag> flags,
java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes) |
|
DatanodeStorageInfo[] |
BlockManager.chooseTarget4AdditionalDatanode(java.lang.String src,
int numAdditionalNodes,
org.apache.hadoop.net.Node clientnode,
java.util.List<DatanodeStorageInfo> chosen,
java.util.Set<org.apache.hadoop.net.Node> excludes,
long blocksize,
byte storagePolicyID,
org.apache.hadoop.hdfs.protocol.BlockType blockType) |
Choose target for getting additional datanodes for an existing pipeline.
|
protected org.apache.hadoop.net.Node |
BlockPlacementPolicyDefault.chooseTargetInOrder(int numOfReplicas,
org.apache.hadoop.net.Node writer,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
int maxNodesPerRack,
java.util.List<DatanodeStorageInfo> results,
boolean avoidStaleNodes,
boolean newBlock,
java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes) |
|
protected org.apache.hadoop.net.Node |
BlockPlacementPolicyRackFaultTolerant.chooseTargetInOrder(int numOfReplicas,
org.apache.hadoop.net.Node writer,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
int maxNodesPerRack,
java.util.List<DatanodeStorageInfo> results,
boolean avoidStaleNodes,
boolean newBlock,
java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes) |
Choose numOfReplicas in order:
1.
|
protected boolean |
BlockPlacementPolicyWithUpgradeDomain.isGoodDatanode(DatanodeDescriptor node,
int maxTargetPerRack,
boolean considerLoad,
java.util.List<DatanodeStorageInfo> results,
boolean avoidStaleNodes) |
|
protected void |
DatanodeAdminManager.logBlockReplicationInfo(BlockInfo block,
BlockCollection bc,
DatanodeDescriptor srcNode,
NumberReplicas num,
java.lang.Iterable<DatanodeStorageInfo> storages) |
|
protected java.util.Collection<DatanodeStorageInfo> |
BlockPlacementPolicyDefault.pickupReplicaSet(java.util.Collection<DatanodeStorageInfo> moreThanOne,
java.util.Collection<DatanodeStorageInfo> exactlyOne,
java.util.Map<java.lang.String,java.util.List<DatanodeStorageInfo>> rackMap) |
Pick up replica node set for deleting replica as over-replicated.
|
protected java.util.Collection<DatanodeStorageInfo> |
BlockPlacementPolicyDefault.pickupReplicaSet(java.util.Collection<DatanodeStorageInfo> moreThanOne,
java.util.Collection<DatanodeStorageInfo> exactlyOne,
java.util.Map<java.lang.String,java.util.List<DatanodeStorageInfo>> rackMap) |
Pick up replica node set for deleting replica as over-replicated.
|
protected java.util.Collection<DatanodeStorageInfo> |
BlockPlacementPolicyRackFaultTolerant.pickupReplicaSet(java.util.Collection<DatanodeStorageInfo> moreThanOne,
java.util.Collection<DatanodeStorageInfo> exactlyOne,
java.util.Map<java.lang.String,java.util.List<DatanodeStorageInfo>> rackMap) |
|
protected java.util.Collection<DatanodeStorageInfo> |
BlockPlacementPolicyRackFaultTolerant.pickupReplicaSet(java.util.Collection<DatanodeStorageInfo> moreThanOne,
java.util.Collection<DatanodeStorageInfo> exactlyOne,
java.util.Map<java.lang.String,java.util.List<DatanodeStorageInfo>> rackMap) |
|
java.util.Collection<DatanodeStorageInfo> |
BlockPlacementPolicyWithNodeGroup.pickupReplicaSet(java.util.Collection<DatanodeStorageInfo> first,
java.util.Collection<DatanodeStorageInfo> second,
java.util.Map<java.lang.String,java.util.List<DatanodeStorageInfo>> rackMap) |
Pick up replica node set for deleting replica as over-replicated.
|
java.util.Collection<DatanodeStorageInfo> |
BlockPlacementPolicyWithNodeGroup.pickupReplicaSet(java.util.Collection<DatanodeStorageInfo> first,
java.util.Collection<DatanodeStorageInfo> second,
java.util.Map<java.lang.String,java.util.List<DatanodeStorageInfo>> rackMap) |
Pick up replica node set for deleting replica as over-replicated.
|
protected java.util.Collection<DatanodeStorageInfo> |
BlockPlacementPolicyWithUpgradeDomain.pickupReplicaSet(java.util.Collection<DatanodeStorageInfo> moreThanOne,
java.util.Collection<DatanodeStorageInfo> exactlyOne,
java.util.Map<java.lang.String,java.util.List<DatanodeStorageInfo>> rackMap) |
|
protected java.util.Collection<DatanodeStorageInfo> |
BlockPlacementPolicyWithUpgradeDomain.pickupReplicaSet(java.util.Collection<DatanodeStorageInfo> moreThanOne,
java.util.Collection<DatanodeStorageInfo> exactlyOne,
java.util.Map<java.lang.String,java.util.List<DatanodeStorageInfo>> rackMap) |
| Constructor | Description |
|---|---|
BlockUnderConstructionFeature(org.apache.hadoop.hdfs.protocol.Block blk,
HdfsServerConstants.BlockUCState state,
DatanodeStorageInfo[] targets,
org.apache.hadoop.hdfs.protocol.BlockType blockType) |
| Modifier and Type | Method | Description |
|---|---|---|
void |
INodeFile.convertLastBlockToUC(BlockInfo lastBlock,
DatanodeStorageInfo[] locations) |
| Constructor | Description |
|---|---|
BlockECReconstructionInfo(org.apache.hadoop.hdfs.protocol.ExtendedBlock block,
org.apache.hadoop.hdfs.protocol.DatanodeInfo[] sources,
DatanodeStorageInfo[] targetDnStorageInfo,
byte[] liveBlockIndices,
byte[] excludeReconstructedIndices,
org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy ecPolicy) |
Copyright © 2008–2025 Apache Software Foundation. All rights reserved.