BlockPlacementPolicyDefault@Private
public abstract class BlockPlacementPolicy
extends java.lang.Object
| Modifier and Type | Class | Description |
|---|---|---|
static class |
BlockPlacementPolicy.NotEnoughReplicasException |
| Modifier and Type | Field | Description |
|---|---|---|
static org.slf4j.Logger |
LOG |
| Constructor | Description |
|---|---|
BlockPlacementPolicy() |
| Modifier and Type | Method | Description |
|---|---|---|
void |
adjustSetsWithChosenReplica(java.util.Map<java.lang.String,java.util.List<DatanodeStorageInfo>> rackMap,
java.util.List<DatanodeStorageInfo> moreThanOne,
java.util.List<DatanodeStorageInfo> exactlyOne,
DatanodeStorageInfo cur) |
Adjust rackmap, moreThanOne, and exactlyOne after removing replica on cur.
|
abstract java.util.List<DatanodeStorageInfo> |
chooseReplicasToDelete(java.util.Collection<DatanodeStorageInfo> availableReplicas,
java.util.Collection<DatanodeStorageInfo> delCandidates,
int expectedNumOfReplicas,
java.util.List<org.apache.hadoop.fs.StorageType> excessTypes,
DatanodeDescriptor addedNode,
DatanodeDescriptor delNodeHint) |
Select the excess replica storages for deletion based on either
delNodehint/Excess storage types.
|
abstract DatanodeStorageInfo[] |
chooseTarget(java.lang.String srcPath,
int numOfReplicas,
org.apache.hadoop.net.Node writer,
java.util.List<DatanodeStorageInfo> chosen,
boolean returnChosenNodes,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
org.apache.hadoop.hdfs.protocol.BlockStoragePolicy storagePolicy,
java.util.EnumSet<org.apache.hadoop.hdfs.AddBlockFlag> flags) |
choose numOfReplicas data nodes for writer
to re-replicate a block with size blocksize
If not, return as many as we can.
|
DatanodeStorageInfo[] |
chooseTarget(java.lang.String srcPath,
int numOfReplicas,
org.apache.hadoop.net.Node writer,
java.util.List<DatanodeStorageInfo> chosen,
boolean returnChosenNodes,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
org.apache.hadoop.hdfs.protocol.BlockStoragePolicy storagePolicy,
java.util.EnumSet<org.apache.hadoop.hdfs.AddBlockFlag> flags,
java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes) |
|
protected <T> org.apache.hadoop.hdfs.protocol.DatanodeInfo |
getDatanodeInfo(T datanode) |
|
abstract boolean |
getExcludeSlowNodesEnabled() |
|
abstract int |
getMinBlocksForWrite() |
|
protected java.lang.String |
getRack(org.apache.hadoop.hdfs.protocol.DatanodeInfo datanode) |
Get rack string from a data node
|
protected abstract void |
initialize(org.apache.hadoop.conf.Configuration conf,
FSClusterStats stats,
org.apache.hadoop.net.NetworkTopology clusterMap,
org.apache.hadoop.hdfs.server.blockmanagement.Host2NodesMap host2datanodeMap) |
Used to setup a BlockPlacementPolicy object.
|
abstract boolean |
isMovable(java.util.Collection<org.apache.hadoop.hdfs.protocol.DatanodeInfo> candidates,
org.apache.hadoop.hdfs.protocol.DatanodeInfo source,
org.apache.hadoop.hdfs.protocol.DatanodeInfo target) |
Check if the move is allowed.
|
abstract void |
setExcludeSlowNodesEnabled(boolean enable) |
Updates the value used for excludeSlowNodesEnabled, which is set by
DFSConfigKeys.DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY
initially. |
abstract void |
setMinBlocksForWrite(int minBlocksForWrite) |
Updates the value used for minBlocksForWrite, which is set by
DFSConfigKeys.DFS_NAMENODE_BLOCKPLACEMENTPOLICY_MIN_BLOCKS_FOR_WRITE_KEY. |
<T> void |
splitNodesWithRack(java.lang.Iterable<T> availableSet,
java.util.Collection<T> candidates,
java.util.Map<java.lang.String,java.util.List<T>> rackMap,
java.util.List<T> moreThanOne,
java.util.List<T> exactlyOne) |
Split data nodes into two sets, one set includes nodes on rack with
more than one replica, the other set contains the remaining nodes.
|
abstract BlockPlacementStatus |
verifyBlockPlacement(org.apache.hadoop.hdfs.protocol.DatanodeInfo[] locs,
int numOfReplicas) |
Verify if the block's placement meets requirement of placement policy,
i.e.
|
public abstract DatanodeStorageInfo[] chooseTarget(java.lang.String srcPath, int numOfReplicas, org.apache.hadoop.net.Node writer, java.util.List<DatanodeStorageInfo> chosen, boolean returnChosenNodes, java.util.Set<org.apache.hadoop.net.Node> excludedNodes, long blocksize, org.apache.hadoop.hdfs.protocol.BlockStoragePolicy storagePolicy, java.util.EnumSet<org.apache.hadoop.hdfs.AddBlockFlag> flags)
srcPath - the file to which this chooseTargets is being invoked.numOfReplicas - additional number of replicas wanted.writer - the writer's machine, null if not in the cluster.chosen - datanodes that have been chosen as targets.returnChosenNodes - decide if the chosenNodes are returned.excludedNodes - datanodes that should not be considered as targets.blocksize - size of the data to be written.flags - Block placement flags.public DatanodeStorageInfo[] chooseTarget(java.lang.String srcPath, int numOfReplicas, org.apache.hadoop.net.Node writer, java.util.List<DatanodeStorageInfo> chosen, boolean returnChosenNodes, java.util.Set<org.apache.hadoop.net.Node> excludedNodes, long blocksize, org.apache.hadoop.hdfs.protocol.BlockStoragePolicy storagePolicy, java.util.EnumSet<org.apache.hadoop.hdfs.AddBlockFlag> flags, java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes)
storageTypes - storage types that should be used as targets.public abstract BlockPlacementStatus verifyBlockPlacement(org.apache.hadoop.hdfs.protocol.DatanodeInfo[] locs, int numOfReplicas)
locs - block with locationsnumOfReplicas - replica number of file to be verifiedpublic abstract java.util.List<DatanodeStorageInfo> chooseReplicasToDelete(java.util.Collection<DatanodeStorageInfo> availableReplicas, java.util.Collection<DatanodeStorageInfo> delCandidates, int expectedNumOfReplicas, java.util.List<org.apache.hadoop.fs.StorageType> excessTypes, DatanodeDescriptor addedNode, DatanodeDescriptor delNodeHint)
availableReplicas - available replicasdelCandidates - Candidates for deletion. For normal replication, this set is the
same with availableReplicas. For striped blocks, this set is a
subset of availableReplicas.expectedNumOfReplicas - The expected number of replicas remaining in the delCandidatesexcessTypes - type of the storagepolicyaddedNode - New replica reporteddelNodeHint - Hint for excess storage selectionprotected abstract void initialize(org.apache.hadoop.conf.Configuration conf,
FSClusterStats stats,
org.apache.hadoop.net.NetworkTopology clusterMap,
org.apache.hadoop.hdfs.server.blockmanagement.Host2NodesMap host2datanodeMap)
conf - the configuration objectstats - retrieve cluster status from hereclusterMap - cluster topologypublic abstract boolean isMovable(java.util.Collection<org.apache.hadoop.hdfs.protocol.DatanodeInfo> candidates,
org.apache.hadoop.hdfs.protocol.DatanodeInfo source,
org.apache.hadoop.hdfs.protocol.DatanodeInfo target)
candidates - all replicas including source and targetsource - source replica of the movetarget - target replica of the movepublic void adjustSetsWithChosenReplica(java.util.Map<java.lang.String,java.util.List<DatanodeStorageInfo>> rackMap, java.util.List<DatanodeStorageInfo> moreThanOne, java.util.List<DatanodeStorageInfo> exactlyOne, DatanodeStorageInfo cur)
rackMap - a map from rack to replicamoreThanOne - The List of replica nodes on rack which has more than
one replicaexactlyOne - The List of replica nodes on rack with only one replicacur - current replica to removeprotected <T> org.apache.hadoop.hdfs.protocol.DatanodeInfo getDatanodeInfo(T datanode)
protected java.lang.String getRack(org.apache.hadoop.hdfs.protocol.DatanodeInfo datanode)
public <T> void splitNodesWithRack(java.lang.Iterable<T> availableSet,
java.util.Collection<T> candidates,
java.util.Map<java.lang.String,java.util.List<T>> rackMap,
java.util.List<T> moreThanOne,
java.util.List<T> exactlyOne)
availableSet - all the available DataNodes/storages of the blockcandidates - DatanodeStorageInfo/DatanodeInfo to be split
into two setsrackMap - a map from rack to datanodesmoreThanOne - contains nodes on rack with more than one replicaexactlyOne - remains contains the remaining nodespublic abstract void setExcludeSlowNodesEnabled(boolean enable)
DFSConfigKeys.DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY
initially.enable - true, we will filter out slow nodes
when choosing targets for blocks, otherwise false not filter.public abstract boolean getExcludeSlowNodesEnabled()
public abstract void setMinBlocksForWrite(int minBlocksForWrite)
DFSConfigKeys.DFS_NAMENODE_BLOCKPLACEMENTPOLICY_MIN_BLOCKS_FOR_WRITE_KEY.minBlocksForWrite - the minimum number of blocks required for write operations.public abstract int getMinBlocksForWrite()
Copyright © 2008–2025 Apache Software Foundation. All rights reserved.