org.apache.hadoop.conf.Configurable, org.apache.hadoop.conf.Reconfigurable, org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol, org.apache.hadoop.hdfs.protocol.ReconfigurationProtocol, DataNodeMXBean, InterDatanodeProtocol@Private public class DataNode extends org.apache.hadoop.conf.ReconfigurableBase implements InterDatanodeProtocol, org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol, DataNodeMXBean, org.apache.hadoop.hdfs.protocol.ReconfigurationProtocol
| Modifier and Type | Class | Description |
|---|---|---|
static class |
DataNode.ShortCircuitFdsUnsupportedException |
|
static class |
DataNode.ShortCircuitFdsVersionException |
| Modifier and Type | Field | Description |
|---|---|---|
static java.lang.String |
DN_CLIENTTRACE_FORMAT |
|
org.apache.hadoop.ipc.RPC.Server |
ipcServer |
|
static org.slf4j.Logger |
LOG |
|
static int |
MAX_VOLUME_FAILURE_TOLERATED_LIMIT |
|
static java.lang.String |
MAX_VOLUME_FAILURES_TOLERATED_MSG |
|
static java.lang.String |
METRICS_LOG_NAME |
versionID| Modifier and Type | Method | Description |
|---|---|---|
void |
cancelDiskBalancePlan(java.lang.String planID) |
Cancels a running plan.
|
void |
checkDiskError() |
Check the disk error synchronously.
|
void |
checkDiskErrorAsync(FsVolumeSpi volume) |
Check if there is a disk failure asynchronously
and if so, handle the error.
|
void |
clearAllBlockSecretKeys() |
|
static DataNode |
createDataNode(java.lang.String[] args,
org.apache.hadoop.conf.Configuration conf) |
Instantiate & Start a single datanode daemon and wait for it to
finish.
|
static DataNode |
createDataNode(java.lang.String[] args,
org.apache.hadoop.conf.Configuration conf,
SecureDataNodeStarter.SecureResources resources) |
Instantiate & Start a single datanode daemon and wait for it to
finish.
|
static InterDatanodeProtocol |
createInterDataNodeProtocolProxy(org.apache.hadoop.hdfs.protocol.DatanodeID datanodeid,
org.apache.hadoop.conf.Configuration conf,
int socketTimeout,
boolean connectToDnViaHostname) |
|
static java.net.InetSocketAddress |
createSocketAddr(java.lang.String target) |
Deprecated.
|
void |
decrementXmitsInProgress() |
Decrements the xmitsInProgress count
|
void |
decrementXmitsInProgress(int delta) |
Decrements the xmitsInProgress count by given value.
|
void |
deleteBlockPool(java.lang.String blockPoolId,
boolean force) |
|
void |
evictWriters() |
|
static java.lang.String |
generateUuid() |
|
int |
getActiveTransferThreadCount() |
Returns the number of Datanode threads actively transferring blocks.
|
long |
getBalancerBandwidth() |
|
org.apache.hadoop.security.token.Token<org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier> |
getBlockAccessToken(org.apache.hadoop.hdfs.protocol.ExtendedBlock b,
java.util.EnumSet<org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier.AccessMode> mode,
org.apache.hadoop.fs.StorageType[] storageTypes,
java.lang.String[] storageIds) |
Use BlockTokenSecretManager to generate block token for current user.
|
org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo |
getBlockLocalPathInfo(org.apache.hadoop.hdfs.protocol.ExtendedBlock block,
org.apache.hadoop.security.token.Token<org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier> token) |
|
org.apache.hadoop.hdfs.server.datanode.BlockPoolManager |
getBlockPoolManager() |
|
BlockPoolTokenSecretManager |
getBlockPoolTokenSecretManager() |
|
BlockRecoveryWorker |
getBlockRecoveryWorker() |
|
BlockScanner |
getBlockScanner() |
|
int |
getBpOsCount() |
|
java.lang.String |
getBPServiceActorInfo() |
Returned information is a JSON representation of an array,
each element of the array is a map contains the information
about a block pool service actor.
|
java.util.List<java.util.Map<java.lang.String,java.lang.String>> |
getBPServiceActorInfoMap() |
|
java.lang.String |
getClusterId() |
Gets the cluster id.
|
org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory |
getDataEncryptionKeyFactoryForBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock block) |
Returns a new DataEncryptionKeyFactory that generates a key from the
BlockPoolTokenSecretManager, using the block pool ID of the given block.
|
java.lang.String |
getDatanodeHostname() |
Return hostname of the datanode.
|
org.apache.hadoop.hdfs.protocol.DatanodeID |
getDatanodeId() |
|
org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo |
getDatanodeInfo() |
|
java.util.Map<java.lang.String,java.util.Map<java.lang.String,java.lang.Long>> |
getDatanodeNetworkCounts() |
Gets the network error counts on a per-Datanode basis.
|
java.lang.String |
getDatanodeUuid() |
|
java.lang.String |
getDataPort() |
Gets the data port.
|
DataSetLockManager |
getDataSetLockManager() |
|
DiskBalancer |
getDiskBalancer() |
|
java.lang.String |
getDiskBalancerSetting(java.lang.String key) |
Gets a runtime configuration value from diskbalancer instance.
|
java.lang.String |
getDiskBalancerStatus() |
Gets the diskBalancer Status.
|
DataNodeDiskMetrics |
getDiskMetrics() |
|
java.lang.String |
getDisplayName() |
|
DNConf |
getDnConf() |
|
DatanodeRegistration |
getDNRegistrationForBP(java.lang.String bpid) |
get BP registration by blockPool id
|
long |
getDNStartedTimeInMillis() |
Get the start time of the DataNode.
|
org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.ECN |
getECN() |
The ECN bit for the DataNode.
|
DataTransferThrottler |
getEcReconstuctReadThrottler() |
|
DataTransferThrottler |
getEcReconstuctWriteThrottler() |
|
ErasureCodingWorker |
getErasureCodingWorker() |
|
FileIoProvider |
getFileIoProvider() |
|
FsDatasetSpi<?> |
getFSDataset() |
Examples are adding and deleting blocks directly.
|
java.lang.String |
getHttpPort() |
Gets the http port.
|
static java.net.InetSocketAddress |
getInfoAddr(org.apache.hadoop.conf.Configuration conf) |
Determine the http server's effective addr
|
int |
getInfoPort() |
|
int |
getInfoSecurePort() |
|
int |
getIpcPort() |
|
long |
getLastDiskErrorCheck() |
|
long |
getMaxNumberOfBlocksToLog() |
|
DataNodeMetrics |
getMetrics() |
|
java.lang.String |
getNamenodeAddresses() |
Returned information is a JSON representation of a map with
name node host name as the key and block pool Id as the value.
|
protected org.apache.hadoop.conf.Configuration |
getNewConf() |
|
long |
getOOBTimeout(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status status) |
Get the timeout to be used for transmitting the OOB type
|
DataNodePeerMetrics |
getPeerMetrics() |
|
java.util.Collection<java.lang.String> |
getReconfigurableProperties() |
Get a list of the keys of the re-configurable properties in configuration.
|
org.apache.hadoop.conf.ReconfigurationTaskStatus |
getReconfigurationStatus() |
|
long |
getReplicaVisibleLength(org.apache.hadoop.hdfs.protocol.ExtendedBlock block) |
|
java.lang.String |
getRevision() |
|
java.lang.String |
getRpcPort() |
Gets the rpc port.
|
org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient |
getSaslClient() |
|
SaslDataTransferServer |
getSaslServer() |
|
java.lang.String |
getSendPacketDownstreamAvgInfo() |
Gets the average info (e.g.
|
ShortCircuitRegistry |
getShortCircuitRegistry() |
|
org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.SLOW |
getSLOWByBlockPoolId(java.lang.String bpId) |
The SLOW bit for the DataNode of the specific BlockPool.
|
java.lang.String |
getSlowDisks() |
Gets the slow disks in the Datanode.
|
java.lang.String |
getSoftwareVersion() |
Get the version of software running on the DataNode
|
static java.util.List<StorageLocation> |
getStorageLocations(org.apache.hadoop.conf.Configuration conf) |
|
org.apache.hadoop.tracing.Tracer |
getTracer() |
|
java.lang.String |
getVersion() |
Gets the version of Hadoop.
|
java.lang.String |
getVolumeInfo() |
Returned information is a JSON representation of a map with
volume name as the key and value is a map of volume attribute
keys to its values
|
java.util.List<org.apache.hadoop.hdfs.protocol.DatanodeVolumeInfo> |
getVolumeReport() |
|
int |
getXceiverCount() |
Number of concurrent xceivers per node.
|
java.net.InetSocketAddress |
getXferAddress() |
NB: The datanode can perform data transfer on the streaming
address however clients are given the IPC IP address for data
transfer, and that may be a different address.
|
int |
getXferPort() |
|
org.apache.hadoop.hdfs.server.datanode.DataXceiverServer |
getXferServer() |
|
int |
getXmitsInProgress() |
Returns an estimate of the number of data replication/reconstruction tasks
running currently.
|
void |
incrementXmitsInProcess(int delta) |
Increments the xmitInProgress count by given value.
|
void |
incrementXmitsInProgress() |
Increments the xmitsInProgress count.
|
ReplicaRecoveryInfo |
initReplicaRecovery(BlockRecoveryCommand.RecoveringBlock rBlock) |
Initialize a replica recovery.
|
static DataNode |
instantiateDataNode(java.lang.String[] args,
org.apache.hadoop.conf.Configuration conf) |
Instantiate a single datanode object.
|
static DataNode |
instantiateDataNode(java.lang.String[] args,
org.apache.hadoop.conf.Configuration conf,
SecureDataNodeStarter.SecureResources resources) |
Instantiate a single datanode object, along with its secure resources.
|
boolean |
isBPServiceAlive(java.lang.String bpid) |
|
boolean |
isConnectedToNN(java.net.InetSocketAddress addr) |
|
boolean |
isDatanodeFullyStarted() |
A datanode is considered to be fully started if all the BP threads are
alive and all the block pools are initialized.
|
boolean |
isDatanodeFullyStarted(boolean checkConnectionToActiveNamenode) |
A datanode is considered to be fully started if all the BP threads are
alive and all the block pools are initialized.
|
boolean |
isDatanodeUp() |
A data node is considered to be up if one of the bp services is up
|
boolean |
isSecurityEnabled() |
Gets if security is enabled.
|
java.util.List<java.lang.String> |
listReconfigurableProperties() |
|
static void |
main(java.lang.String[] args) |
|
java.net.Socket |
newSocket() |
Creates either NIO or regular depending on socketWriteTimeout.
|
void |
notifyNamenodeDeletedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock block,
java.lang.String storageUuid) |
Notify the corresponding namenode to delete the block.
|
void |
notifyNamenodeReceivedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock block,
java.lang.String delHint,
java.lang.String storageUuid,
boolean isOnTransientStorage) |
|
protected void |
notifyNamenodeReceivingBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock block,
java.lang.String storageUuid) |
|
org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus |
queryDiskBalancerPlan() |
Returns the status of current or last executed work plan.
|
java.lang.String |
reconfigurePropertyImpl(java.lang.String property,
java.lang.String newVal) |
.
|
void |
refreshNamenodes() |
|
void |
refreshNamenodes(org.apache.hadoop.conf.Configuration conf) |
|
void |
reportBadBlocks(org.apache.hadoop.hdfs.protocol.ExtendedBlock block) |
Report a bad block which is hosted on the local DN.
|
void |
reportBadBlocks(org.apache.hadoop.hdfs.protocol.ExtendedBlock block,
FsVolumeSpi volume) |
Report a bad block which is hosted on the local DN.
|
void |
reportCorruptedBlocks(org.apache.hadoop.hdfs.DFSUtilClient.CorruptedBlocks corruptedBlocks) |
|
void |
reportRemoteBadBlock(org.apache.hadoop.hdfs.protocol.DatanodeInfo srcDataNode,
org.apache.hadoop.hdfs.protocol.ExtendedBlock block) |
Report a bad block on another DN (eg if we received a corrupt replica
from a remote host).
|
void |
runDatanodeDaemon() |
Start a single datanode daemon and wait for it to finish.
|
void |
scheduleAllBlockReport(long delay) |
This methods arranges for the data node to send
the block report at the next heartbeat.
|
static void |
secureMain(java.lang.String[] args,
SecureDataNodeStarter.SecureResources resources) |
|
void |
setHeartbeatsDisabledForTests(boolean heartbeatsDisabledForTests) |
|
void |
shutdown() |
Shut down this instance of the datanode.
|
void |
shutdownDatanode(boolean forUpgrade) |
|
protected void |
startMetricsLogger() |
Start a timer to periodically write DataNode metrics to the log file.
|
void |
startReconfiguration() |
|
protected void |
stopMetricsLogger() |
|
void |
submitDiskBalancerPlan(java.lang.String planID,
long planVersion,
java.lang.String planFile,
java.lang.String planData,
boolean skipDateCheck) |
Allows submission of a disk balancer Job.
|
java.lang.String |
toString() |
|
void |
triggerBlockReport(org.apache.hadoop.hdfs.client.BlockReportOptions options) |
|
java.lang.String |
updateReplicaUnderRecovery(org.apache.hadoop.hdfs.protocol.ExtendedBlock oldBlock,
long recoveryId,
long newBlockId,
long newLength) |
Update replica with the new generation stamp and length.
|
getChangedProperties, getReconfigurationTaskStatus, isPropertyReconfigurable, reconfigureProperty, setReconfigurationUtil, shutdownReconfigurationTask, startReconfigurationTaskpublic static final org.slf4j.Logger LOG
public static final java.lang.String DN_CLIENTTRACE_FORMAT
public static final int MAX_VOLUME_FAILURE_TOLERATED_LIMIT
public static final java.lang.String MAX_VOLUME_FAILURES_TOLERATED_MSG
public static final java.lang.String METRICS_LOG_NAME
public org.apache.hadoop.ipc.RPC.Server ipcServer
@Deprecated public static java.net.InetSocketAddress createSocketAddr(java.lang.String target)
NetUtils.createSocketAddr(String) instead.protected org.apache.hadoop.conf.Configuration getNewConf()
getNewConf in class org.apache.hadoop.conf.ReconfigurableBasepublic java.lang.String reconfigurePropertyImpl(java.lang.String property,
java.lang.String newVal)
throws org.apache.hadoop.conf.ReconfigurationException
reconfigurePropertyImpl in class org.apache.hadoop.conf.ReconfigurableBaseorg.apache.hadoop.conf.ReconfigurationExceptionpublic java.util.Collection<java.lang.String> getReconfigurableProperties()
getReconfigurableProperties in interface org.apache.hadoop.conf.ReconfigurablegetReconfigurableProperties in class org.apache.hadoop.conf.ReconfigurableBasepublic org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.ECN getECN()
public org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.SLOW getSLOWByBlockPoolId(java.lang.String bpId)
public FileIoProvider getFileIoProvider()
public void notifyNamenodeReceivedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock block,
java.lang.String delHint,
java.lang.String storageUuid,
boolean isOnTransientStorage)
protected void notifyNamenodeReceivingBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock block,
java.lang.String storageUuid)
public void notifyNamenodeDeletedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock block,
java.lang.String storageUuid)
public void reportBadBlocks(org.apache.hadoop.hdfs.protocol.ExtendedBlock block)
throws java.io.IOException
java.io.IOExceptionpublic void reportBadBlocks(org.apache.hadoop.hdfs.protocol.ExtendedBlock block,
FsVolumeSpi volume)
throws java.io.IOException
block - the bad block which is hosted on the local DNvolume - the volume that block is stored in and the volume
must not be nulljava.io.IOExceptionpublic void reportRemoteBadBlock(org.apache.hadoop.hdfs.protocol.DatanodeInfo srcDataNode,
org.apache.hadoop.hdfs.protocol.ExtendedBlock block)
throws java.io.IOException
srcDataNode - the DN hosting the bad blockblock - the block itselfjava.io.IOExceptionpublic void reportCorruptedBlocks(org.apache.hadoop.hdfs.DFSUtilClient.CorruptedBlocks corruptedBlocks)
throws java.io.IOException
java.io.IOException@VisibleForTesting public void setHeartbeatsDisabledForTests(boolean heartbeatsDisabledForTests)
public static java.lang.String generateUuid()
public org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient getSaslClient()
public int getBpOsCount()
public static java.net.InetSocketAddress getInfoAddr(org.apache.hadoop.conf.Configuration conf)
@VisibleForTesting public org.apache.hadoop.hdfs.server.datanode.DataXceiverServer getXferServer()
@VisibleForTesting public int getXferPort()
@VisibleForTesting public SaslDataTransferServer getSaslServer()
public java.lang.String getDisplayName()
public java.net.InetSocketAddress getXferAddress()
public int getIpcPort()
@VisibleForTesting public DatanodeRegistration getDNRegistrationForBP(java.lang.String bpid) throws java.io.IOException
java.io.IOException - on errorpublic java.net.Socket newSocket()
throws java.io.IOException
java.io.IOExceptionpublic static InterDatanodeProtocol createInterDataNodeProtocolProxy(org.apache.hadoop.hdfs.protocol.DatanodeID datanodeid, org.apache.hadoop.conf.Configuration conf, int socketTimeout, boolean connectToDnViaHostname) throws java.io.IOException
java.io.IOExceptionpublic DataNodeMetrics getMetrics()
public DataNodeDiskMetrics getDiskMetrics()
public DataNodePeerMetrics getPeerMetrics()
public long getMaxNumberOfBlocksToLog()
public org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo getBlockLocalPathInfo(org.apache.hadoop.hdfs.protocol.ExtendedBlock block,
org.apache.hadoop.security.token.Token<org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier> token)
throws java.io.IOException
getBlockLocalPathInfo in interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocoljava.io.IOExceptionpublic void shutdown()
public void checkDiskErrorAsync(FsVolumeSpi volume)
public int getXceiverCount()
getXceiverCount in interface DataNodeMXBeanpublic int getActiveTransferThreadCount()
DataNodeMXBeangetActiveTransferThreadCount in interface DataNodeMXBeanpublic java.util.Map<java.lang.String,java.util.Map<java.lang.String,java.lang.Long>> getDatanodeNetworkCounts()
DataNodeMXBeangetDatanodeNetworkCounts in interface DataNodeMXBeanpublic int getXmitsInProgress()
DataNodeMXBeangetXmitsInProgress in interface DataNodeMXBeanpublic void incrementXmitsInProgress()
public void incrementXmitsInProcess(int delta)
delta - the amount of xmitsInProgress to increase.incrementXmitsInProgress()public void decrementXmitsInProgress()
public void decrementXmitsInProgress(int delta)
decrementXmitsInProgress()public org.apache.hadoop.security.token.Token<org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier> getBlockAccessToken(org.apache.hadoop.hdfs.protocol.ExtendedBlock b,
java.util.EnumSet<org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier.AccessMode> mode,
org.apache.hadoop.fs.StorageType[] storageTypes,
java.lang.String[] storageIds)
throws java.io.IOException
java.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory getDataEncryptionKeyFactoryForBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock block)
block - for which the factory needs to create a keypublic void runDatanodeDaemon()
throws java.io.IOException
java.io.IOExceptionpublic boolean isDatanodeUp()
public static DataNode instantiateDataNode(java.lang.String[] args, org.apache.hadoop.conf.Configuration conf) throws java.io.IOException
runDatanodeDaemon() subsequently.java.io.IOExceptionpublic static DataNode instantiateDataNode(java.lang.String[] args, org.apache.hadoop.conf.Configuration conf, SecureDataNodeStarter.SecureResources resources) throws java.io.IOException
runDatanodeDaemon()
subsequently.java.io.IOExceptionpublic static java.util.List<StorageLocation> getStorageLocations(org.apache.hadoop.conf.Configuration conf)
@VisibleForTesting public static DataNode createDataNode(java.lang.String[] args, org.apache.hadoop.conf.Configuration conf) throws java.io.IOException
java.io.IOException@VisibleForTesting @Private public static DataNode createDataNode(java.lang.String[] args, org.apache.hadoop.conf.Configuration conf, SecureDataNodeStarter.SecureResources resources) throws java.io.IOException
java.io.IOExceptionpublic java.lang.String toString()
toString in class java.lang.Objectpublic void scheduleAllBlockReport(long delay)
@VisibleForTesting public FsDatasetSpi<?> getFSDataset()
@VisibleForTesting public BlockScanner getBlockScanner()
@VisibleForTesting public BlockPoolTokenSecretManager getBlockPoolTokenSecretManager()
public static void secureMain(java.lang.String[] args,
SecureDataNodeStarter.SecureResources resources)
public static void main(java.lang.String[] args)
public ReplicaRecoveryInfo initReplicaRecovery(BlockRecoveryCommand.RecoveringBlock rBlock) throws java.io.IOException
InterDatanodeProtocolinitReplicaRecovery in interface InterDatanodeProtocoljava.io.IOExceptionpublic java.lang.String updateReplicaUnderRecovery(org.apache.hadoop.hdfs.protocol.ExtendedBlock oldBlock,
long recoveryId,
long newBlockId,
long newLength)
throws java.io.IOException
updateReplicaUnderRecovery in interface InterDatanodeProtocoljava.io.IOExceptionpublic long getReplicaVisibleLength(org.apache.hadoop.hdfs.protocol.ExtendedBlock block)
throws java.io.IOException
getReplicaVisibleLength in interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocoljava.io.IOExceptionpublic java.lang.String getSoftwareVersion()
DataNodeMXBeangetSoftwareVersion in interface DataNodeMXBeanpublic java.lang.String getVersion()
DataNodeMXBeangetVersion in interface DataNodeMXBeanpublic java.lang.String getRpcPort()
DataNodeMXBeangetRpcPort in interface DataNodeMXBeanpublic java.lang.String getDataPort()
DataNodeMXBeangetDataPort in interface DataNodeMXBeanpublic java.lang.String getHttpPort()
DataNodeMXBeangetHttpPort in interface DataNodeMXBeanpublic long getDNStartedTimeInMillis()
DataNodeMXBeangetDNStartedTimeInMillis in interface DataNodeMXBeanpublic java.lang.String getRevision()
public int getInfoPort()
public int getInfoSecurePort()
public java.lang.String getNamenodeAddresses()
getNamenodeAddresses in interface DataNodeMXBeanpublic java.lang.String getDatanodeHostname()
getDatanodeHostname in interface DataNodeMXBeanpublic java.lang.String getBPServiceActorInfo()
getBPServiceActorInfo in interface DataNodeMXBean@VisibleForTesting public java.util.List<java.util.Map<java.lang.String,java.lang.String>> getBPServiceActorInfoMap()
public java.lang.String getVolumeInfo()
getVolumeInfo in interface DataNodeMXBeanpublic java.lang.String getClusterId()
DataNodeMXBeangetClusterId in interface DataNodeMXBeanpublic java.lang.String getDiskBalancerStatus()
DataNodeMXBeangetDiskBalancerStatus in interface DataNodeMXBeanpublic boolean isSecurityEnabled()
DataNodeMXBeanisSecurityEnabled in interface DataNodeMXBeanpublic void refreshNamenodes(org.apache.hadoop.conf.Configuration conf)
throws java.io.IOException
java.io.IOExceptionpublic void refreshNamenodes()
throws java.io.IOException
refreshNamenodes in interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocoljava.io.IOExceptionpublic void deleteBlockPool(java.lang.String blockPoolId,
boolean force)
throws java.io.IOException
deleteBlockPool in interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocoljava.io.IOExceptionpublic void shutdownDatanode(boolean forUpgrade)
throws java.io.IOException
shutdownDatanode in interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocoljava.io.IOExceptionpublic void evictWriters()
throws java.io.IOException
evictWriters in interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo getDatanodeInfo()
getDatanodeInfo in interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocolpublic void startReconfiguration()
throws java.io.IOException
startReconfiguration in interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocolstartReconfiguration in interface org.apache.hadoop.hdfs.protocol.ReconfigurationProtocoljava.io.IOExceptionpublic org.apache.hadoop.conf.ReconfigurationTaskStatus getReconfigurationStatus()
throws java.io.IOException
getReconfigurationStatus in interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocolgetReconfigurationStatus in interface org.apache.hadoop.hdfs.protocol.ReconfigurationProtocoljava.io.IOExceptionpublic java.util.List<java.lang.String> listReconfigurableProperties()
throws java.io.IOException
listReconfigurableProperties in interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocollistReconfigurableProperties in interface org.apache.hadoop.hdfs.protocol.ReconfigurationProtocoljava.io.IOExceptionpublic void triggerBlockReport(org.apache.hadoop.hdfs.client.BlockReportOptions options)
throws java.io.IOException
triggerBlockReport in interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocoljava.io.IOExceptionpublic boolean isConnectedToNN(java.net.InetSocketAddress addr)
addr - rpc address of the namenodepublic boolean isBPServiceAlive(java.lang.String bpid)
bpid - block pool Idpublic boolean isDatanodeFullyStarted()
public boolean isDatanodeFullyStarted(boolean checkConnectionToActiveNamenode)
checkConnectionToActiveNamenode - if true, performs additional check of whether datanode
is heartbeating to active namenode.@VisibleForTesting public org.apache.hadoop.hdfs.protocol.DatanodeID getDatanodeId()
@VisibleForTesting public void clearAllBlockSecretKeys()
public long getBalancerBandwidth()
getBalancerBandwidth in interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocolpublic DNConf getDnConf()
public java.lang.String getDatanodeUuid()
public ShortCircuitRegistry getShortCircuitRegistry()
public DataTransferThrottler getEcReconstuctReadThrottler()
public DataTransferThrottler getEcReconstuctWriteThrottler()
@VisibleForTesting
public void checkDiskError()
throws java.io.IOException
java.io.IOException@VisibleForTesting public long getLastDiskErrorCheck()
public BlockRecoveryWorker getBlockRecoveryWorker()
public ErasureCodingWorker getErasureCodingWorker()
public long getOOBTimeout(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status status)
throws java.io.IOException
java.io.IOExceptionprotected void startMetricsLogger()
protected void stopMetricsLogger()
public org.apache.hadoop.tracing.Tracer getTracer()
public void submitDiskBalancerPlan(java.lang.String planID,
long planVersion,
java.lang.String planFile,
java.lang.String planData,
boolean skipDateCheck)
throws java.io.IOException
submitDiskBalancerPlan in interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocolplanID - - Hash value of the plan.planVersion - - Plan version, reserved for future use. We have only
version 1 now.planFile - - Plan file nameplanData - - Actual plan data in json formatjava.io.IOExceptionpublic void cancelDiskBalancePlan(java.lang.String planID)
throws java.io.IOException
cancelDiskBalancePlan in interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocolplanID - - Hash string that identifies a planjava.io.IOExceptionpublic org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus queryDiskBalancerPlan()
throws java.io.IOException
queryDiskBalancerPlan in interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocoljava.io.IOExceptionpublic java.lang.String getDiskBalancerSetting(java.lang.String key)
throws java.io.IOException
getDiskBalancerSetting in interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocolkey - - String that represents the run time key value.java.io.IOException - - Throws if there is no such keypublic java.lang.String getSendPacketDownstreamAvgInfo()
DataNodeMXBeanExample Json: {"[185.164.159.81:9801]RollingAvgTime":504.867, "[49.236.149.246:9801]RollingAvgTime":504.463, "[84.125.113.65:9801]RollingAvgTime":497.954}
getSendPacketDownstreamAvgInfo in interface DataNodeMXBeanpublic java.lang.String getSlowDisks()
DataNodeMXBeangetSlowDisks in interface DataNodeMXBeanpublic java.util.List<org.apache.hadoop.hdfs.protocol.DatanodeVolumeInfo> getVolumeReport()
throws java.io.IOException
getVolumeReport in interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocoljava.io.IOException@VisibleForTesting public DiskBalancer getDiskBalancer() throws java.io.IOException
java.io.IOExceptionpublic DataSetLockManager getDataSetLockManager()
@VisibleForTesting public org.apache.hadoop.hdfs.server.datanode.BlockPoolManager getBlockPoolManager()
Copyright © 2008–2025 Apache Software Foundation. All rights reserved.