public class BlockPlacementPolicyWithNodeGroup extends BlockPlacementPolicyDefault
BlockPlacementPolicy.NotEnoughReplicasExceptionclusterMap, considerLoad, considerLoadFactor, heartbeatInterval, host2datanodeMap, tolerateHeartbeatMultiplierLOG| Modifier | Constructor | Description |
|---|---|---|
protected |
BlockPlacementPolicyWithNodeGroup() |
| Modifier and Type | Method | Description |
|---|---|---|
protected int |
addToExcludedNodes(DatanodeDescriptor chosenNode,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes) |
Find other nodes in the same nodegroup of localMachine and add them
into excludeNodes as replica should not be duplicated for nodes
within the same nodegroup
|
protected void |
chooseFavouredNodes(java.lang.String src,
int numOfReplicas,
java.util.List<DatanodeDescriptor> favoredNodes,
java.util.Set<org.apache.hadoop.net.Node> favoriteAndExcludedNodes,
long blocksize,
int maxNodesPerRack,
java.util.List<DatanodeStorageInfo> results,
boolean avoidStaleNodes,
java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes) |
choose all good favored nodes as target.
|
protected DatanodeStorageInfo |
chooseLocalRack(org.apache.hadoop.net.Node localMachine,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
int maxNodesPerRack,
java.util.List<DatanodeStorageInfo> results,
boolean avoidStaleNodes,
java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes) |
Choose one node from the rack that localMachine is on.
|
protected DatanodeStorageInfo |
chooseLocalStorage(org.apache.hadoop.net.Node localMachine,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
int maxNodesPerRack,
java.util.List<DatanodeStorageInfo> results,
boolean avoidStaleNodes,
java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes,
boolean fallbackToNodeGroupAndLocalRack) |
choose local node of localMachine as the target.
|
protected void |
chooseRemoteRack(int numOfReplicas,
DatanodeDescriptor localMachine,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
int maxReplicasPerRack,
java.util.List<DatanodeStorageInfo> results,
boolean avoidStaleNodes,
java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes) |
Choose numOfReplicas nodes from the racks
that localMachine is NOT on.
|
protected java.lang.String |
getRack(org.apache.hadoop.hdfs.protocol.DatanodeInfo cur) |
Get rack string from a data node
|
void |
initialize(org.apache.hadoop.conf.Configuration conf,
FSClusterStats stats,
org.apache.hadoop.net.NetworkTopology clusterMap,
org.apache.hadoop.hdfs.server.blockmanagement.Host2NodesMap host2datanodeMap) |
Used to setup a BlockPlacementPolicy object.
|
boolean |
isMovable(java.util.Collection<org.apache.hadoop.hdfs.protocol.DatanodeInfo> locs,
org.apache.hadoop.hdfs.protocol.DatanodeInfo source,
org.apache.hadoop.hdfs.protocol.DatanodeInfo target) |
Check if there are any replica (other than source) on the same node group
with target.
|
java.util.Collection<DatanodeStorageInfo> |
pickupReplicaSet(java.util.Collection<DatanodeStorageInfo> first,
java.util.Collection<DatanodeStorageInfo> second,
java.util.Map<java.lang.String,java.util.List<DatanodeStorageInfo>> rackMap) |
Pick up replica node set for deleting replica as over-replicated.
|
BlockPlacementStatus |
verifyBlockPlacement(org.apache.hadoop.hdfs.protocol.DatanodeInfo[] locs,
int numberOfReplicas) |
Verify if the block's placement meets requirement of placement policy,
i.e.
|
chooseDataNode, chooseDataNode, chooseLocalOrFavoredStorage, chooseLocalStorage, chooseRandom, chooseRandom, chooseReplicasToDelete, chooseReplicaToDelete, chooseTarget, chooseTarget, chooseTargetInOrder, getExcludeSlowNodesEnabled, getMaxNodesPerRack, getMinBlocksForWrite, setExcludeSlowNodesEnabled, setMinBlocksForWriteadjustSetsWithChosenReplica, getDatanodeInfo, splitNodesWithRackpublic void initialize(org.apache.hadoop.conf.Configuration conf,
FSClusterStats stats,
org.apache.hadoop.net.NetworkTopology clusterMap,
org.apache.hadoop.hdfs.server.blockmanagement.Host2NodesMap host2datanodeMap)
BlockPlacementPolicyinitialize in class BlockPlacementPolicyDefaultconf - the configuration objectstats - retrieve cluster status from hereclusterMap - cluster topologyprotected void chooseFavouredNodes(java.lang.String src,
int numOfReplicas,
java.util.List<DatanodeDescriptor> favoredNodes,
java.util.Set<org.apache.hadoop.net.Node> favoriteAndExcludedNodes,
long blocksize,
int maxNodesPerRack,
java.util.List<DatanodeStorageInfo> results,
boolean avoidStaleNodes,
java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes)
throws BlockPlacementPolicy.NotEnoughReplicasException
chooseFavouredNodes in class BlockPlacementPolicyDefaultBlockPlacementPolicy.NotEnoughReplicasExceptionprotected DatanodeStorageInfo chooseLocalStorage(org.apache.hadoop.net.Node localMachine, java.util.Set<org.apache.hadoop.net.Node> excludedNodes, long blocksize, int maxNodesPerRack, java.util.List<DatanodeStorageInfo> results, boolean avoidStaleNodes, java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes, boolean fallbackToNodeGroupAndLocalRack) throws BlockPlacementPolicy.NotEnoughReplicasException
chooseLocalStorage in class BlockPlacementPolicyDefaultBlockPlacementPolicy.NotEnoughReplicasExceptionprotected DatanodeStorageInfo chooseLocalRack(org.apache.hadoop.net.Node localMachine, java.util.Set<org.apache.hadoop.net.Node> excludedNodes, long blocksize, int maxNodesPerRack, java.util.List<DatanodeStorageInfo> results, boolean avoidStaleNodes, java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes) throws BlockPlacementPolicy.NotEnoughReplicasException
BlockPlacementPolicyDefaultchooseLocalRack in class BlockPlacementPolicyDefaultBlockPlacementPolicy.NotEnoughReplicasExceptionprotected void chooseRemoteRack(int numOfReplicas,
DatanodeDescriptor localMachine,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
int maxReplicasPerRack,
java.util.List<DatanodeStorageInfo> results,
boolean avoidStaleNodes,
java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes)
throws BlockPlacementPolicy.NotEnoughReplicasException
BlockPlacementPolicyDefaultchooseRemoteRack in class BlockPlacementPolicyDefaultBlockPlacementPolicy.NotEnoughReplicasExceptionprotected java.lang.String getRack(org.apache.hadoop.hdfs.protocol.DatanodeInfo cur)
BlockPlacementPolicygetRack in class BlockPlacementPolicyprotected int addToExcludedNodes(DatanodeDescriptor chosenNode, java.util.Set<org.apache.hadoop.net.Node> excludedNodes)
addToExcludedNodes in class BlockPlacementPolicyDefaultpublic java.util.Collection<DatanodeStorageInfo> pickupReplicaSet(java.util.Collection<DatanodeStorageInfo> first, java.util.Collection<DatanodeStorageInfo> second, java.util.Map<java.lang.String,java.util.List<DatanodeStorageInfo>> rackMap)
pickupReplicaSet in class BlockPlacementPolicyDefaultpublic boolean isMovable(java.util.Collection<org.apache.hadoop.hdfs.protocol.DatanodeInfo> locs,
org.apache.hadoop.hdfs.protocol.DatanodeInfo source,
org.apache.hadoop.hdfs.protocol.DatanodeInfo target)
isMovable in class BlockPlacementPolicyDefaultlocs - all replicas including source and targetsource - source replica of the movetarget - target replica of the movepublic BlockPlacementStatus verifyBlockPlacement(org.apache.hadoop.hdfs.protocol.DatanodeInfo[] locs, int numberOfReplicas)
BlockPlacementPolicyverifyBlockPlacement in class BlockPlacementPolicyDefaultlocs - block with locationsnumberOfReplicas - replica number of file to be verifiedCopyright © 2008–2025 Apache Software Foundation. All rights reserved.