@Private
@Evolving
public class DatanodeManager
extends java.lang.Object
| Modifier and Type | Method | Description |
|---|---|---|
void |
addSlowPeers(java.lang.String dnUuid) |
|
void |
clearPendingCachingCommands() |
|
void |
clearPendingQueues() |
Clear any actions that are queued up to be sent to the DNs
on their next heartbeats.
|
void |
fetchDatanodes(java.util.List<DatanodeDescriptor> live,
java.util.List<DatanodeDescriptor> dead,
boolean removeDecommissionNode) |
Fetch live and dead datanodes.
|
java.util.List<DatanodeDescriptor> |
getAllSlowDataNodes() |
|
int |
getBlockInvalidateLimit() |
|
long |
getBlocksPerPostponedMisreplicatedBlocksRescan() |
|
DatanodeDescriptor |
getDatanode(java.lang.String datanodeUuid) |
Get a datanode descriptor given corresponding DatanodeUUID
|
DatanodeDescriptor |
getDatanode(org.apache.hadoop.hdfs.protocol.DatanodeID nodeID) |
Get data node by datanode ID.
|
DatanodeAdminManager |
getDatanodeAdminManager() |
|
DatanodeDescriptor |
getDatanodeByHost(java.lang.String host) |
|
DatanodeDescriptor |
getDatanodeByXferAddr(java.lang.String host,
int xferPort) |
|
java.util.List<DatanodeDescriptor> |
getDatanodeListForReport(org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType type) |
For generating datanode reports
|
java.util.Map<java.lang.String,DatanodeDescriptor> |
getDatanodeMap() |
|
java.util.Set<DatanodeDescriptor> |
getDatanodes() |
|
java.util.HashMap<java.lang.String,java.lang.Integer> |
getDatanodesSoftwareVersions() |
|
DatanodeStatistics |
getDatanodeStatistics() |
|
DatanodeStorageInfo[] |
getDatanodeStorageInfos(org.apache.hadoop.hdfs.protocol.DatanodeID[] datanodeID,
java.lang.String[] storageIDs,
java.lang.String format,
java.lang.Object... args) |
|
org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport[] |
getDatanodeStorageReport(org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType type) |
Generates datanode reports for the given report type.
|
java.util.List<DatanodeDescriptor> |
getDecommissioningNodes() |
|
boolean |
getEnableAvoidSlowDataNodesForRead() |
|
java.util.List<DatanodeDescriptor> |
getEnteringMaintenanceNodes() |
|
FSClusterStats |
getFSClusterStats() |
|
long |
getHeartbeatInterval() |
|
long |
getHeartbeatRecheckInterval() |
|
org.apache.hadoop.hdfs.server.blockmanagement.Host2NodesMap |
getHost2DatanodeMap() |
|
HostConfigManager |
getHostConfigManager() |
|
int |
getMaxSlowpeerCollectNodes() |
|
org.apache.hadoop.net.NetworkTopology |
getNetworkTopology() |
|
int |
getNumDeadDataNodes() |
|
int |
getNumLiveDataNodes() |
|
int |
getNumOfDataNodes() |
|
int |
getNumStaleNodes() |
|
int |
getNumStaleStorages() |
Get the number of content stale storages.
|
java.lang.String |
getSlowDisksReport() |
Retrieve information about slow disks as a JSON.
|
SlowDiskTracker |
getSlowDiskTracker() |
Use only for testing.
|
static java.util.Set<java.lang.String> |
getSlowNodesUuidSet() |
Returns all tracking slow datanodes uuids.
|
long |
getSlowPeerCollectionInterval() |
|
java.lang.String |
getSlowPeersReport() |
Retrieve information about slow peers as a JSON.
|
java.util.Set<java.lang.String> |
getSlowPeersUuidSet() |
Returns all tracking slow peers.
|
SlowPeerTracker |
getSlowPeerTracker() |
Use only for testing.
|
DatanodeCommand[] |
handleHeartbeat(DatanodeRegistration nodeReg,
org.apache.hadoop.hdfs.server.protocol.StorageReport[] reports,
java.lang.String blockPoolId,
long cacheCapacity,
long cacheUsed,
int xceiverCount,
int xmitsInProgress,
int failedVolumes,
VolumeFailureSummary volumeFailureSummary,
org.apache.hadoop.hdfs.server.protocol.SlowPeerReports slowPeers,
org.apache.hadoop.hdfs.server.protocol.SlowDiskReports slowDisks) |
Handle heartbeat from datanodes.
|
void |
handleLifeline(DatanodeRegistration nodeReg,
org.apache.hadoop.hdfs.server.protocol.StorageReport[] reports,
long cacheCapacity,
long cacheUsed,
int xceiverCount,
int failedVolumes,
VolumeFailureSummary volumeFailureSummary) |
Handles a lifeline message sent by a DataNode.
|
void |
initSlowPeerTracker(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.util.Timer timer,
boolean dataNodePeerStatsEnabled) |
Determines whether slow peer tracker should be enabled.
|
boolean |
isSlowPeerCollectorInitialized() |
|
void |
markAllDatanodesStale() |
|
void |
refreshNodes(org.apache.hadoop.conf.Configuration conf) |
Rereads conf to get hosts and exclude list file names.
|
void |
registerDatanode(DatanodeRegistration nodeReg) |
Register the given datanode with the namenode.
|
void |
removeDatanode(org.apache.hadoop.hdfs.protocol.DatanodeID node) |
Remove a datanode
|
void |
resetLastCachingDirectiveSentTime() |
Reset the lastCachingDirectiveSentTimeMs field of all the DataNodes we
know about.
|
java.util.List<java.lang.String> |
resolveNetworkLocation(java.util.List<java.lang.String> names) |
Resolve network locations for specified hosts
|
void |
restartSlowPeerCollector(long interval) |
|
void |
setAvoidSlowDataNodesForReadEnabled(boolean enable) |
|
void |
setBalancerBandwidth(long bandwidth) |
Tell all datanodes to use a new, non-persistent bandwidth value for
dfs.datanode.balance.bandwidthPerSec.
|
void |
setBlockInvalidateLimit(int configuredBlockInvalidateLimit) |
|
void |
setHeartbeatExpireInterval(long expiryMs) |
|
void |
setHeartbeatInterval(long intervalSeconds) |
|
void |
setHeartbeatRecheckInterval(int recheckInterval) |
|
void |
setMaxSlowpeerCollectNodes(int maxNodes) |
|
void |
setMaxSlowPeersToReport(int maxSlowPeersToReport) |
|
void |
setShouldSendCachingCommands(boolean shouldSendCachingCommands) |
|
boolean |
shouldAvoidStaleDataNodesForWrite() |
Whether stale datanodes should be avoided as targets on the write path.
|
void |
sortLocatedBlocks(java.lang.String targetHost,
java.util.List<org.apache.hadoop.hdfs.protocol.LocatedBlock> locatedBlocks) |
Sort the non-striped located blocks by the distance to the target host.
|
java.lang.String |
toString() |
public void initSlowPeerTracker(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.util.Timer timer,
boolean dataNodePeerStatsEnabled)
conf - The configuration to use while initializing slowPeerTracker.timer - Timer object for slowPeerTracker.dataNodePeerStatsEnabled - To determine whether slow peer tracking should be enabled.public void restartSlowPeerCollector(long interval)
public org.apache.hadoop.net.NetworkTopology getNetworkTopology()
@VisibleForTesting public DatanodeAdminManager getDatanodeAdminManager()
public HostConfigManager getHostConfigManager()
@VisibleForTesting public void setHeartbeatExpireInterval(long expiryMs)
@VisibleForTesting public FSClusterStats getFSClusterStats()
@VisibleForTesting public int getBlockInvalidateLimit()
public DatanodeStatistics getDatanodeStatistics()
public void setAvoidSlowDataNodesForReadEnabled(boolean enable)
@VisibleForTesting public boolean getEnableAvoidSlowDataNodesForRead()
public void setMaxSlowpeerCollectNodes(int maxNodes)
@VisibleForTesting public int getMaxSlowpeerCollectNodes()
public void sortLocatedBlocks(java.lang.String targetHost,
java.util.List<org.apache.hadoop.hdfs.protocol.LocatedBlock> locatedBlocks)
public DatanodeDescriptor getDatanodeByHost(java.lang.String host)
public DatanodeDescriptor getDatanodeByXferAddr(java.lang.String host, int xferPort)
public java.util.Set<DatanodeDescriptor> getDatanodes()
public org.apache.hadoop.hdfs.server.blockmanagement.Host2NodesMap getHost2DatanodeMap()
public DatanodeDescriptor getDatanode(java.lang.String datanodeUuid)
public DatanodeDescriptor getDatanode(org.apache.hadoop.hdfs.protocol.DatanodeID nodeID) throws UnregisteredNodeException
nodeID - datanode IDUnregisteredNodeExceptionpublic DatanodeStorageInfo[] getDatanodeStorageInfos(org.apache.hadoop.hdfs.protocol.DatanodeID[] datanodeID, java.lang.String[] storageIDs, java.lang.String format, java.lang.Object... args) throws UnregisteredNodeException
UnregisteredNodeExceptionpublic void removeDatanode(org.apache.hadoop.hdfs.protocol.DatanodeID node)
throws UnregisteredNodeException
UnregisteredNodeExceptionpublic java.util.HashMap<java.lang.String,java.lang.Integer> getDatanodesSoftwareVersions()
public java.util.List<java.lang.String> resolveNetworkLocation(java.util.List<java.lang.String> names)
public void registerDatanode(DatanodeRegistration nodeReg) throws DisallowedDatanodeException, UnresolvedTopologyException
nodeReg - the datanode registrationDisallowedDatanodeException - if the registration request is
denied because the datanode does not match includes/excludesUnresolvedTopologyException - if the registration request is
denied because resolving datanode network location fails.public void refreshNodes(org.apache.hadoop.conf.Configuration conf)
throws java.io.IOException
java.io.IOExceptionpublic int getNumLiveDataNodes()
public int getNumDeadDataNodes()
public int getNumOfDataNodes()
public java.util.List<DatanodeDescriptor> getDecommissioningNodes()
public java.util.List<DatanodeDescriptor> getEnteringMaintenanceNodes()
public boolean shouldAvoidStaleDataNodesForWrite()
public long getBlocksPerPostponedMisreplicatedBlocksRescan()
public long getHeartbeatInterval()
public long getHeartbeatRecheckInterval()
public int getNumStaleNodes()
public int getNumStaleStorages()
public void fetchDatanodes(java.util.List<DatanodeDescriptor> live, java.util.List<DatanodeDescriptor> dead, boolean removeDecommissionNode)
public java.util.List<DatanodeDescriptor> getDatanodeListForReport(org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType type)
public java.util.List<DatanodeDescriptor> getAllSlowDataNodes()
public DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg, org.apache.hadoop.hdfs.server.protocol.StorageReport[] reports, java.lang.String blockPoolId, long cacheCapacity, long cacheUsed, int xceiverCount, int xmitsInProgress, int failedVolumes, VolumeFailureSummary volumeFailureSummary, @Nonnull org.apache.hadoop.hdfs.server.protocol.SlowPeerReports slowPeers, @Nonnull org.apache.hadoop.hdfs.server.protocol.SlowDiskReports slowDisks) throws java.io.IOException
java.io.IOExceptionpublic void handleLifeline(DatanodeRegistration nodeReg, org.apache.hadoop.hdfs.server.protocol.StorageReport[] reports, long cacheCapacity, long cacheUsed, int xceiverCount, int failedVolumes, VolumeFailureSummary volumeFailureSummary) throws java.io.IOException
nodeReg - registration info for DataNode sending the lifelinereports - storage reports from DataNodecacheCapacity - cache capacity at DataNodecacheUsed - cache used at DataNodexceiverCount - estimated count of transfer threads running at DataNodefailedVolumes - count of failed volumes at DataNodevolumeFailureSummary - info on failed volumes at DataNodejava.io.IOException - if there is an errorpublic void setBalancerBandwidth(long bandwidth)
throws java.io.IOException
bandwidth - Blanacer bandwidth in bytes per second for all datanodes.java.io.IOExceptionpublic void markAllDatanodesStale()
public void clearPendingQueues()
public void resetLastCachingDirectiveSentTime()
public java.lang.String toString()
toString in class java.lang.Objectpublic void clearPendingCachingCommands()
public void setShouldSendCachingCommands(boolean shouldSendCachingCommands)
public void setHeartbeatInterval(long intervalSeconds)
public void setHeartbeatRecheckInterval(int recheckInterval)
public void setBlockInvalidateLimit(int configuredBlockInvalidateLimit)
public java.lang.String getSlowPeersReport()
public java.util.Set<java.lang.String> getSlowPeersUuidSet()
public static java.util.Set<java.lang.String> getSlowNodesUuidSet()
@VisibleForTesting public SlowPeerTracker getSlowPeerTracker()
@VisibleForTesting public SlowDiskTracker getSlowDiskTracker()
@VisibleForTesting public void addSlowPeers(java.lang.String dnUuid)
public java.lang.String getSlowDisksReport()
public org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport[] getDatanodeStorageReport(org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType type)
type - type of the datanode report@VisibleForTesting public java.util.Map<java.lang.String,DatanodeDescriptor> getDatanodeMap()
public void setMaxSlowPeersToReport(int maxSlowPeersToReport)
@VisibleForTesting public boolean isSlowPeerCollectorInitialized()
@VisibleForTesting public long getSlowPeerCollectionInterval()
Copyright © 2008–2025 Apache Software Foundation. All rights reserved.