java.lang.Comparable<org.apache.hadoop.hdfs.protocol.DatanodeID>, org.apache.hadoop.net.NodeProvidedStorageMap.ProvidedDescriptor@Private
@Evolving
public class DatanodeDescriptor
extends org.apache.hadoop.hdfs.protocol.DatanodeInfo
| Modifier and Type | Class | Description |
|---|---|---|
static class |
DatanodeDescriptor.BlockTargetPair |
Block and targets pair
|
static class |
DatanodeDescriptor.CachedBlocksList |
A list of CachedBlock objects on this datanode.
|
class |
DatanodeDescriptor.LeavingServiceStatus |
Leaving service status.
|
| Modifier and Type | Field | Description |
|---|---|---|
static DatanodeDescriptor[] |
EMPTY_ARRAY |
|
static org.slf4j.Logger |
LOG |
|
protected java.util.Map<java.lang.String,DatanodeStorageInfo> |
storageMap |
| Constructor | Description |
|---|---|
DatanodeDescriptor(org.apache.hadoop.hdfs.protocol.DatanodeID nodeID) |
DatanodeDescriptor constructor
|
DatanodeDescriptor(org.apache.hadoop.hdfs.protocol.DatanodeID nodeID,
java.lang.String networkLocation) |
DatanodeDescriptor constructor
|
| Modifier and Type | Method | Description |
|---|---|---|
void |
addBlockToBeReplicated(org.apache.hadoop.hdfs.protocol.Block block,
DatanodeStorageInfo[] targets) |
Store block replication work.
|
void |
addECBlockToBeReplicated(org.apache.hadoop.hdfs.protocol.Block block,
DatanodeStorageInfo[] targets) |
Store ec block to be replicated work.
|
boolean |
checkBlockReportReceived() |
|
DatanodeStorageInfo |
chooseStorage4Block(org.apache.hadoop.fs.StorageType t,
long blockSize,
int minBlocksForWrite) |
Find whether the datanode contains good storage of given type to
place block of size
blockSize. |
void |
clearBlockQueues() |
|
boolean |
containsInvalidateBlock(org.apache.hadoop.hdfs.protocol.Block block) |
|
void |
decrementPendingReplicationWithoutTargets() |
|
java.lang.String |
dumpDatanode() |
|
boolean |
equals(java.lang.Object obj) |
|
long |
getBalancerBandwidth() |
|
int |
getBlocksScheduled() |
|
int |
getBlocksScheduled(org.apache.hadoop.fs.StorageType t) |
|
DatanodeDescriptor.CachedBlocksList |
getCached() |
|
java.util.List<BlockECReconstructionCommand.BlockECReconstructionInfo> |
getErasureCodeCommand(int maxTransfers) |
|
org.apache.hadoop.hdfs.protocol.Block[] |
getInvalidateBlocks(int maxblocks) |
Remove the specified number of blocks to be invalidated
|
long |
getLastCachingDirectiveSentTimeMs() |
|
BlockInfo[] |
getLeaseRecoveryCommand(int maxTransfers) |
|
DatanodeDescriptor.LeavingServiceStatus |
getLeavingServiceStatus() |
|
int |
getNumberOfBlocksToBeErasureCoded() |
The number of work items that are pending to be reconstructed.
|
int |
getNumberOfECBlocksToBeReplicated() |
The number of ec work items that are pending to be replicated.
|
int |
getNumberOfReplicateBlocks() |
|
int |
getNumVolumesAvailable() |
Return the number of volumes that can be written.
|
DatanodeDescriptor.CachedBlocksList |
getPendingCached() |
|
DatanodeDescriptor.CachedBlocksList |
getPendingUncached() |
|
DatanodeStorageInfo |
getStorageInfo(java.lang.String storageID) |
|
DatanodeStorageInfo[] |
getStorageInfos() |
|
org.apache.hadoop.hdfs.server.protocol.StorageReport[] |
getStorageReports() |
|
java.util.EnumSet<org.apache.hadoop.fs.StorageType> |
getStorageTypes() |
|
int |
getVolumeFailures() |
|
VolumeFailureSummary |
getVolumeFailureSummary() |
Returns info about volume failures.
|
int |
hashCode() |
|
boolean |
hasStorageType(org.apache.hadoop.fs.StorageType type) |
|
void |
incrementPendingReplicationWithoutTargets() |
|
boolean |
isAlive() |
|
boolean |
isDisallowed() |
Is the datanode disallowed from communicating with the namenode?
|
boolean |
isHeartbeatedSinceRegistration() |
|
boolean |
isRegistered() |
|
boolean |
needKeyUpdate() |
|
int |
numBlocks() |
|
void |
resetBlocks() |
|
void |
setAlive(boolean isAlive) |
|
void |
setBalancerBandwidth(long bandwidth) |
|
void |
setDisallowed(boolean flag) |
Set the flag to indicate if this datanode is disallowed from communicating
with the namenode.
|
void |
setForceRegistration(boolean force) |
|
void |
setLastCachingDirectiveSentTimeMs(long time) |
|
void |
setNeedKeyUpdate(boolean needKeyUpdate) |
|
void |
updateRegInfo(org.apache.hadoop.hdfs.protocol.DatanodeID nodeReg) |
addDependentHostName, getAdminState, getBlockPoolUsed, getBlockPoolUsedPercent, getCacheCapacity, getCacheRemaining, getCacheRemainingPercent, getCacheUsed, getCacheUsedPercent, getCapacity, getDatanodeReport, getDependentHostNames, getDfsUsed, getDfsUsedPercent, getLastBlockReportMonotonic, getLastBlockReportTime, getLastUpdate, getLastUpdateMonotonic, getLevel, getMaintenanceExpireTimeInMS, getName, getNetworkLocation, getNonDfsUsed, getNumBlocks, getParent, getRemaining, getRemainingPercent, getSoftwareVersion, getUpgradeDomain, getXceiverCount, isDecommissioned, isDecommissionInProgress, isEnteringMaintenance, isInMaintenance, isInService, isMaintenance, isStale, maintenanceExpired, maintenanceNotExpired, setAdminState, setBlockPoolUsed, setCacheCapacity, setCacheUsed, setCapacity, setDecommissioned, setDependentHostNames, setDfsUsed, setInMaintenance, setLastBlockReportMonotonic, setLastBlockReportTime, setLastUpdate, setLastUpdateMonotonic, setLevel, setMaintenanceExpireTimeInMS, setNetworkLocation, setNonDfsUsed, setNumBlocks, setParent, setRemaining, setSoftwareVersion, setUpgradeDomain, setXceiverCount, startDecommission, startMaintenance, stopDecommission, stopMaintenancecompareTo, getDatanodeUuid, getDatanodeUuidBytes, getHostName, getHostNameBytes, getInfoAddr, getInfoPort, getInfoSecureAddr, getInfoSecurePort, getIpAddr, getIpAddrBytes, getIpcAddr, getIpcPort, getPeerHostName, getResolvedAddress, getXferAddr, getXferAddr, getXferAddrWithHostname, getXferPort, setIpAddr, setPeerHostName, toStringpublic static final org.slf4j.Logger LOG
public static final DatanodeDescriptor[] EMPTY_ARRAY
protected final java.util.Map<java.lang.String,DatanodeStorageInfo> storageMap
public DatanodeDescriptor(org.apache.hadoop.hdfs.protocol.DatanodeID nodeID)
nodeID - id of the data nodepublic DatanodeDescriptor(org.apache.hadoop.hdfs.protocol.DatanodeID nodeID,
java.lang.String networkLocation)
nodeID - id of the data nodenetworkLocation - location of the data node in networkpublic DatanodeDescriptor.CachedBlocksList getPendingCached()
public DatanodeDescriptor.CachedBlocksList getCached()
public DatanodeDescriptor.CachedBlocksList getPendingUncached()
public boolean isAlive()
public void setAlive(boolean isAlive)
public boolean needKeyUpdate()
public void setNeedKeyUpdate(boolean needKeyUpdate)
public DatanodeDescriptor.LeavingServiceStatus getLeavingServiceStatus()
@VisibleForTesting public boolean isHeartbeatedSinceRegistration()
@VisibleForTesting public DatanodeStorageInfo getStorageInfo(java.lang.String storageID)
@VisibleForTesting public DatanodeStorageInfo[] getStorageInfos()
public java.util.EnumSet<org.apache.hadoop.fs.StorageType> getStorageTypes()
public org.apache.hadoop.hdfs.server.protocol.StorageReport[] getStorageReports()
public void resetBlocks()
public void clearBlockQueues()
public int numBlocks()
@VisibleForTesting public void incrementPendingReplicationWithoutTargets()
@VisibleForTesting public void decrementPendingReplicationWithoutTargets()
@VisibleForTesting
public void addBlockToBeReplicated(org.apache.hadoop.hdfs.protocol.Block block,
DatanodeStorageInfo[] targets)
@VisibleForTesting
public void addECBlockToBeReplicated(org.apache.hadoop.hdfs.protocol.Block block,
DatanodeStorageInfo[] targets)
@VisibleForTesting public int getNumberOfBlocksToBeErasureCoded()
@VisibleForTesting public int getNumberOfECBlocksToBeReplicated()
@VisibleForTesting public int getNumberOfReplicateBlocks()
public java.util.List<BlockECReconstructionCommand.BlockECReconstructionInfo> getErasureCodeCommand(int maxTransfers)
public BlockInfo[] getLeaseRecoveryCommand(int maxTransfers)
public org.apache.hadoop.hdfs.protocol.Block[] getInvalidateBlocks(int maxblocks)
@VisibleForTesting public boolean containsInvalidateBlock(org.apache.hadoop.hdfs.protocol.Block block)
public DatanodeStorageInfo chooseStorage4Block(org.apache.hadoop.fs.StorageType t, long blockSize, int minBlocksForWrite)
blockSize.
Currently datanode only cares about the storage type, in this method, the first storage of given type we see is returned.
t - requested storage typeblockSize - requested block sizeminBlocksForWrite - requested the minimum number of blockspublic int getBlocksScheduled(org.apache.hadoop.fs.StorageType t)
public int getBlocksScheduled()
public int hashCode()
hashCode in class org.apache.hadoop.hdfs.protocol.DatanodeInfopublic boolean equals(java.lang.Object obj)
equals in class org.apache.hadoop.hdfs.protocol.DatanodeInfopublic void setDisallowed(boolean flag)
public boolean isDisallowed()
public int getVolumeFailures()
public VolumeFailureSummary getVolumeFailureSummary()
public int getNumVolumesAvailable()
public void updateRegInfo(org.apache.hadoop.hdfs.protocol.DatanodeID nodeReg)
updateRegInfo in class org.apache.hadoop.hdfs.protocol.DatanodeIDnodeReg - DatanodeID to update registration for.public long getBalancerBandwidth()
public void setBalancerBandwidth(long bandwidth)
bandwidth - balancer bandwidth in bytes per second for this datanodepublic java.lang.String dumpDatanode()
dumpDatanode in class org.apache.hadoop.hdfs.protocol.DatanodeInfopublic long getLastCachingDirectiveSentTimeMs()
public void setLastCachingDirectiveSentTimeMs(long time)
time - The time at which we last sent caching directives to this
DataNode, in monotonic milliseconds.public boolean checkBlockReportReceived()
public void setForceRegistration(boolean force)
public boolean isRegistered()
public boolean hasStorageType(org.apache.hadoop.fs.StorageType type)
Copyright © 2008–2025 Apache Software Foundation. All rights reserved.