ECBlockGroupsMBean, FSNamesystemMBean, ReplicatedBlocksMBean, NameNodeMXBean, Namesystem, SafeMode, RwLock@Private @Metrics(context="dfs") public class FSNamesystem extends java.lang.Object implements Namesystem, FSNamesystemMBean, NameNodeMXBean, ReplicatedBlocksMBean, ECBlockGroupsMBean
| Modifier and Type | Field | Description |
|---|---|---|
static org.slf4j.Logger |
AUDIT_LOG |
Logger for audit events, noting successful FSNamesystem operations.
|
static java.lang.String |
DFS_NAMENODE_SNAPSHOT_TRASHROOT_ENABLED |
|
static boolean |
DFS_NAMENODE_SNAPSHOT_TRASHROOT_ENABLED_DEFAULT |
|
static org.slf4j.Logger |
LOG |
| Modifier and Type | Method | Description |
|---|---|---|
void |
checkAndProvisionSnapshotTrashRoots() |
Check if snapshot roots are created for all existing snapshottable
directories.
|
void |
checkErasureCodingSupported(java.lang.String operationName) |
Check whether operation is supported.
|
void |
checkOperation(NameNode.OperationCategory op) |
|
void |
cpLock() |
Lock the checkpoint lock
|
void |
cpLockInterruptibly() |
Lock the checkpoint lock interrupibly
|
void |
cpUnlock() |
Unlock the checkpoint lock
|
void |
gcDeletedSnapshot(java.lang.String snapshotRoot,
java.lang.String snapshotName) |
|
java.util.List<AuditLogger> |
getAuditLoggers() |
|
int |
getBlockCapacity() |
|
INodeFile |
getBlockCollection(long id) |
|
long |
getBlockDeletionStartTime() |
Time when block deletions will begin
|
BlockManager |
getBlockManager() |
|
java.lang.String |
getBlockPoolId() |
Gets the block pool id.
|
long |
getBlockPoolUsedSpace() |
Get the total space used by the block pools of this namenode.
|
BlocksWithLocations |
getBlocks(org.apache.hadoop.hdfs.protocol.DatanodeID datanode,
long size,
long minimumBlockSize,
long timeInterval,
org.apache.hadoop.fs.StorageType storageType) |
return a list of blocks & their locations on
datanode whose
total size is size |
long |
getBlocksTotal() |
Get the total number of blocks in the system.
|
long |
getBytesInFuture() |
Gets number of bytes in the blocks in future generation stamps.
|
long |
getBytesInFutureECBlockGroups() |
Return total bytes of erasure coded future block groups.
|
long |
getBytesInFutureReplicatedBlocks() |
Return total bytes of future blocks.
|
long |
getCacheCapacity() |
Gets the total cache capacity of the datanode (in bytes).
|
CacheManager |
getCacheManager() |
|
long |
getCacheUsed() |
Gets the amount of cache used by the datanode (in bytes).
|
long |
getCapacityRemaining() |
Free (unused) storage capacity
|
float |
getCapacityRemainingGB() |
|
long |
getCapacityTotal() |
Total storage capacity
|
float |
getCapacityTotalGB() |
|
long |
getCapacityUsed() |
Used storage capacity
|
float |
getCapacityUsedGB() |
|
long |
getCapacityUsedNonDFS() |
|
java.lang.String |
getClusterId() |
Gets the cluster id.
|
java.lang.String |
getCompileInfo() |
Get the compilation information which contains date, user and branch.
|
long |
getCompleteBlocksTotal() |
Get the total number of COMPLETE blocks in the system.
|
long |
getCorruptECBlockGroups() |
Return count of erasure coded block groups that are corrupt.
|
java.lang.String |
getCorruptFiles() |
Get the list of corrupt files.
|
int |
getCorruptFilesCount() |
Get the length of the list of corrupt files.
|
long |
getCorruptReplicaBlocks() |
Returns number of blocks with corrupt replicas
|
long |
getCorruptReplicatedBlocks() |
Return corrupt blocks count.
|
java.util.concurrent.locks.ReentrantLock |
getCpLockForTests() |
|
long |
getCurrentTokensCount() |
Get the current number of delegation tokens in memory.
|
java.lang.String |
getDeadNodes() |
Returned information is a JSON representation of map with host name as the
key and value is a map of dead node attribute keys to its values.
|
java.lang.String |
getDecomNodes() |
Returned information is a JSON representation of map with host name as the
key and value is a map of decommissioning node attribute keys to its
values.
|
int |
getDistinctVersionCount() |
Get the number of distinct versions of live datanodes.
|
java.util.Map<java.lang.String,java.lang.Integer> |
getDistinctVersions() |
Get the number of live datanodes for each distinct versions.
|
org.apache.hadoop.hdfs.protocol.ECTopologyVerifierResult |
getECTopologyResultForPolicies(java.lang.String[] policyNames) |
Verifies if the given policies are supported in the given cluster setup.
|
FSEditLog |
getEditLog() |
|
EditLogTailer |
getEditLogTailer() |
|
int |
getEffectiveLayoutVersion() |
Returns the layout version in effect.
|
java.lang.String |
getEnabledEcPolicies() |
Get the enabled erasure coding policies separated with comma.
|
java.lang.String |
getEnteringMaintenanceNodes() |
Returned information is a JSON representation of map with host name of
nodes entering maintenance as the key and value as a map of various node
attributes to its values.
|
ErasureCodingPolicyManager |
getErasureCodingPolicyManager() |
|
long |
getEstimatedCapacityLostTotal() |
Returns an estimate of total capacity lost due to volume failures in bytes
across all live data nodes.
|
long |
getExcessBlocks() |
|
int |
getExpiredHeartbeats() |
|
org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries<org.apache.hadoop.hdfs.protocol.OpenFileEntry> |
getFilesBlockingDecom(long prevId,
java.lang.String path) |
|
long |
getFilesTotal() |
Total number of files and directories
|
long |
getFree() |
Gets total non-used raw bytes.
|
FSDirectory |
getFSDirectory() |
|
FSImage |
getFSImage() |
|
java.util.concurrent.locks.ReentrantReadWriteLock |
getFsLockForTests() |
|
int |
getFsLockQueueLength() |
Returns the length of the wait Queue for the FSNameSystemLock.
|
java.lang.String |
getFSState() |
The state of the file system: Safemode or Operational
|
HAContext |
getHAContext() |
|
java.lang.String |
getHAState() |
|
long |
getHighestPriorityLowRedundancyECBlocks() |
Gets the total number of erasure coded low redundancy blocks on the cluster
with the highest risk of loss.
|
long |
getHighestPriorityLowRedundancyReplicatedBlocks() |
Gets the total number of replicated low redundancy blocks on the cluster
with the highest risk of loss.
|
java.lang.String |
getJournalTransactionInfo() |
Get information about the transaction ID, including the last applied
transaction ID and the most recent checkpoint's transaction ID.
|
long |
getLastCheckpointTime() |
|
long |
getLastWrittenTransactionId() |
|
long |
getLazyPersistFileScrubberTS() |
Used as ad hoc to check the time stamp of the last full cycle of
lazyPersistFileScrubber daemon. |
long |
getLeaseRecheckIntervalMs() |
|
java.lang.String |
getLiveNodes() |
Returned information is a JSON representation of map with host name as the
key and value is a map of live node attribute keys to its values.
|
long |
getLowRedundancyBlocks() |
Get aggregated count of all blocks with low redundancy.
|
long |
getLowRedundancyECBlockGroups() |
Return count of erasure coded block groups with low redundancy.
|
long |
getLowRedundancyReplicatedBlocks() |
Return low redundancy blocks count.
|
int |
getMaxListOpenFilesResponses() |
|
long |
getMaxLockHoldToReleaseLeaseMs() |
|
long |
getMaxObjects() |
Return the maximum number of inodes in the file system
|
long |
getMillisSinceLastLoadedEdits() |
|
long |
getMissingBlocksCount() |
|
long |
getMissingECBlockGroups() |
Return count of erasure coded block groups that are missing.
|
long |
getMissingReplicatedBlocks() |
Return missing blocks count.
|
long |
getMissingReplicationOneBlocks() |
Return count of missing blocks with replication factor one.
|
long |
getMissingReplOneBlocksCount() |
|
java.lang.String |
getNameDirSize() |
Get namenode directory size.
|
java.lang.String |
getNameDirStatuses() |
Get status information about the directories storing image and edits logs
of the NN.
|
java.lang.String |
getNameJournalStatus() |
Get status information about the journals of the NN.
|
static java.util.Collection<java.net.URI> |
getNamespaceDirs(org.apache.hadoop.conf.Configuration conf) |
|
static java.util.List<java.net.URI> |
getNamespaceEditsDirs(org.apache.hadoop.conf.Configuration conf) |
Return an ordered list of edits directories to write to.
|
static java.util.List<java.net.URI> |
getNamespaceEditsDirs(org.apache.hadoop.conf.Configuration conf,
boolean includeShared) |
|
long |
getNNStartedTimeInMillis() |
Gets the NN start time in milliseconds.
|
java.lang.String |
getNodeUsage() |
Get Max, Median, Min and Standard Deviation of DataNodes usage.
|
long |
getNonDfsUsedSpace() |
Gets total used space by data nodes for non DFS purposes such as storing
temporary files on the local file system.
|
long |
getNumActiveClients() |
Get the total number of active clients holding lease in the system.
|
long |
getNumberOfMissingBlocks() |
Gets the total number of missing blocks on the cluster.
|
long |
getNumberOfMissingBlocksWithReplicationFactorOne() |
Gets the total number of missing blocks on the cluster with
replication factor 1.
|
long |
getNumberOfSnapshottableDirs() |
Gets the total number of snapshottable dirs in the system.
|
int |
getNumDeadDataNodes() |
Number of dead data nodes
|
int |
getNumDecomDeadDataNodes() |
Number of decommissioned dead data nodes
|
int |
getNumDecomLiveDataNodes() |
Number of decommissioned Live data nodes
|
int |
getNumDecommissioningDataNodes() |
Number of data nodes that are in the decommissioning state
|
int |
getNumEncryptionZones() |
Return the number of encryption zones in the system.
|
int |
getNumEnteringMaintenanceDataNodes() |
|
long |
getNumFilesUnderConstruction() |
Get the number of files under construction in the system.
|
int |
getNumInMaintenanceDeadDataNodes() |
|
int |
getNumInMaintenanceLiveDataNodes() |
|
int |
getNumInServiceLiveDataNodes() |
|
int |
getNumLiveDataNodes() |
Number of Live data nodes
|
long |
getNumOfReadLockLongHold() |
|
long |
getNumOfWriteLockLongHold() |
|
int |
getNumSnapshots() |
|
int |
getNumSnapshottableDirs() |
|
int |
getNumStaleDataNodes() |
Number of stale data nodes
|
int |
getNumStaleStorages() |
Storages are marked as "content stale" after NN restart or fails over and
before NN receives the first Heartbeat followed by the first Blockreport.
|
long |
getNumTimedOutPendingReconstructions() |
|
int |
getPendingDataNodeMessageCount() |
|
long |
getPendingDeletionBlocks() |
Number of blocks pending deletion
|
long |
getPendingDeletionECBlocks() |
Return count of erasure coded blocks that are pending deletion.
|
long |
getPendingDeletionReplicatedBlocks() |
Return count of blocks that are pending deletion.
|
long |
getPendingReconstructionBlocks() |
Get aggregated count of all blocks pending to be reconstructed.
|
long |
getPendingReplicationBlocks() |
Deprecated.
|
int |
getPendingSPSPaths() |
Returns the number of paths to be processed by storage policy satisfier.
|
float |
getPercentBlockPoolUsed() |
Get the total space used by the block pool as percentage of total capacity.
|
float |
getPercentRemaining() |
Gets the total remaining space by data nodes as percentage of total
capacity.
|
float |
getPercentUsed() |
Gets the total used space by data nodes as percentage of total capacity.
|
long |
getPostponedMisreplicatedBlocks() |
|
long |
getProvidedCapacity() |
Gets capacity of the provided storage mounted, in bytes.
|
long |
getProvidedCapacityTotal() |
Total PROVIDED storage capacity.
|
org.apache.hadoop.crypto.key.KeyProviderCryptoExtension |
getProvider() |
|
int |
getReadHoldCount() |
|
long |
getReadLockReportingThresholdMs() |
|
float |
getReconstructionQueuesInitProgress() |
Get the progress of the reconstruction queues initialisation.
|
static java.util.Collection<java.net.URI> |
getRequiredNamespaceEditsDirs(org.apache.hadoop.conf.Configuration conf) |
Get all edits dirs which are required.
|
org.apache.hadoop.ipc.RetryCache |
getRetryCache() |
|
org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo |
getRollingUpgradeInfo() |
|
org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo.Bean |
getRollingUpgradeStatus() |
Gets the RollingUpgrade information.
|
java.lang.String |
getSafemode() |
Gets the safemode status.
|
long |
getScheduledReplicationBlocks() |
Blocks scheduled for replication
|
org.apache.hadoop.fs.FsServerDefaults |
getServerDefaults() |
|
static java.util.List<java.net.URI> |
getSharedEditsDirs(org.apache.hadoop.conf.Configuration conf) |
Returns edit directories that are shared between primary and secondary.
|
org.apache.hadoop.hdfs.protocol.SnapshotStatus[] |
getSnapshotListing(java.lang.String snapshotRoot) |
Get the list of snapshots for a given snapshottable directory.
|
SnapshotManager |
getSnapshotManager() |
|
java.lang.String |
getSnapshotStats() |
The statistics of snapshots
|
org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus[] |
getSnapshottableDirListing() |
Get the list of snapshottable directories that are owned
by the current user.
|
java.lang.String |
getSoftwareVersion() |
Get the version of software running on the Namenode.
|
byte[] |
getSrcPathsHash(java.lang.String[] srcs) |
|
org.apache.hadoop.ha.HAServiceProtocol.HAServiceState |
getState() |
|
int |
getThreads() |
Gets the number of threads.
|
java.lang.String |
getTopUserOpCounts() |
Returns a nested JSON object listing the top users for different RPC
operations over tracked time windows.
|
long |
getTotal() |
Gets total raw bytes including non-dfs used space.
|
long |
getTotalBlocks() |
Gets the total numbers of blocks on the cluster.
|
long |
getTotalECBlockGroups() |
Return total number of erasure coded block groups.
|
int |
getTotalLoad() |
Total number of connections.
|
long |
getTotalReplicatedBlocks() |
Return total number of replicated blocks.
|
long |
getTotalSyncCount() |
Return total number of Sync Operations on FSEditLog.
|
java.lang.String |
getTotalSyncTimes() |
Return total time spent doing sync operations on FSEditLog.
|
long |
getTransactionsSinceLastCheckpoint() |
|
long |
getTransactionsSinceLastLogRoll() |
|
long |
getUnderReplicatedBlocks() |
Deprecated.
- Use
getLowRedundancyBlocks() instead. |
long |
getUsed() |
Gets the used space by data nodes.
|
java.lang.String |
getVerifyECWithTopologyResult() |
Verifies whether the cluster setup can support all enabled EC policies.
|
java.lang.String |
getVersion() |
Class representing Namenode information for JMX interfaces.
|
int |
getVolumeFailuresTotal() |
Number of failed data volumes across all live data nodes.
|
int |
getWriteHoldCount() |
|
long |
getWriteLockReportingThresholdMs() |
|
boolean |
hasReadLock() |
Check if the current thread holds read lock.
|
boolean |
hasWriteLock() |
Check if the current thread holds write lock.
|
boolean |
inTransitionToActive() |
|
boolean |
isHaEnabled() |
|
boolean |
isInSafeMode() |
Is the system in safe mode?
|
boolean |
isInSnapshot(long blockCollectionID) |
|
boolean |
isInStandbyState() |
|
boolean |
isInStartupSafeMode() |
Is the system in startup safe mode, i.e.
|
boolean |
isMetricsEnabled() |
|
boolean |
isNeedRollbackFsImage() |
|
boolean |
isRollingUpgrade() |
Is rolling upgrade in progress?
|
boolean |
isRunning() |
Is this name system running?
|
boolean |
isUpgradeFinalized() |
Checks if upgrade is finalized.
|
void |
logExpireDelegationToken(org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier id) |
Log the cancellation of expired tokens to edit logs.
|
void |
logUpdateMasterKey(org.apache.hadoop.security.token.delegation.DelegationKey key) |
Log the updateMasterKey operation to edit logs.
|
void |
processIncrementalBlockReport(org.apache.hadoop.hdfs.protocol.DatanodeID nodeID,
StorageReceivedDeletedBlocks srdb) |
|
void |
readLock() |
Acquire read lock.
|
void |
readLockInterruptibly() |
Acquire read lock, unless interrupted while waiting
|
void |
readUnlock() |
Release read lock.
|
void |
readUnlock(java.lang.String opName) |
Release read lock with operation name.
|
void |
readUnlock(java.lang.String opName,
java.util.function.Supplier<java.lang.String> lockReportInfoSupplier) |
|
void |
removeXattr(long id,
java.lang.String xattrName) |
Remove xAttr from the inode.
|
void |
setBlockManagerForTesting(BlockManager bm) |
|
void |
setCreatedRollbackImages(boolean created) |
|
void |
setEditLogTailerForTests(EditLogTailer tailer) |
|
void |
setFSDirectory(FSDirectory dir) |
Set the FSDirectory.
|
protected void |
setImageLoaded(boolean flag) |
|
void |
setMetricsEnabled(boolean metricsEnabled) |
|
void |
setNeedRollbackFsImage(boolean needRollbackFsImage) |
|
void |
setNNResourceChecker(NameNodeResourceChecker nnResourceChecker) |
|
void |
setReadLockReportingThresholdMs(long readLockReportingThresholdMs) |
|
void |
setWriteLockReportingThresholdMs(long writeLockReportingThresholdMs) |
|
void |
startSecretManagerIfNecessary() |
|
void |
verifyToken(org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier identifier,
byte[] password) |
Verifies that the given identifier and password are valid and match.
|
void |
writeLock() |
Acquire write lock.
|
void |
writeLockInterruptibly() |
Acquire write lock, unless interrupted while waiting
|
void |
writeUnlock() |
Release write lock.
|
void |
writeUnlock(java.lang.String opName) |
Release write lock with operation name.
|
void |
writeUnlock(java.lang.String opName,
boolean suppressWriteLockReport) |
|
void |
writeUnlock(java.lang.String opName,
java.util.function.Supplier<java.lang.String> lockReportInfoSupplier) |
public static final org.slf4j.Logger LOG
public static final java.lang.String DFS_NAMENODE_SNAPSHOT_TRASHROOT_ENABLED
public static final boolean DFS_NAMENODE_SNAPSHOT_TRASHROOT_ENABLED_DEFAULT
public static final org.slf4j.Logger AUDIT_LOG
key=value pairs to be written for the following properties:
ugi=<ugi in RPC>
ip=<remote IP>
cmd=<command>
src=<src path>
dst=<dst path (optional)>
perm=<permissions (optional)>
protected void setImageLoaded(boolean flag)
@VisibleForTesting public long getLazyPersistFileScrubberTS()
lazyPersistFileScrubber daemon. This is used by the Junit tests to block
until lazyPersistFileScrubberTS is updated.lazyPersistFileScrubberTS if lazyPersistFileScrubber is not null.public boolean isHaEnabled()
@VisibleForTesting public java.util.List<AuditLogger> getAuditLoggers()
@VisibleForTesting public org.apache.hadoop.ipc.RetryCache getRetryCache()
@VisibleForTesting public long getLeaseRecheckIntervalMs()
@VisibleForTesting public long getMaxLockHoldToReleaseLeaseMs()
public int getMaxListOpenFilesResponses()
@VisibleForTesting public org.apache.hadoop.crypto.key.KeyProviderCryptoExtension getProvider()
public void startSecretManagerIfNecessary()
startSecretManagerIfNecessary in interface Namesystempublic boolean inTransitionToActive()
inTransitionToActive in interface Namesystempublic void checkOperation(NameNode.OperationCategory op) throws org.apache.hadoop.ipc.StandbyException
org.apache.hadoop.ipc.StandbyExceptionpublic static java.util.Collection<java.net.URI> getNamespaceDirs(org.apache.hadoop.conf.Configuration conf)
public static java.util.Collection<java.net.URI> getRequiredNamespaceEditsDirs(org.apache.hadoop.conf.Configuration conf)
conf - the HDFS configuration.public static java.util.List<java.net.URI> getNamespaceEditsDirs(org.apache.hadoop.conf.Configuration conf)
throws java.io.IOException
java.io.IOException - if multiple shared edits directories are configuredpublic static java.util.List<java.net.URI> getNamespaceEditsDirs(org.apache.hadoop.conf.Configuration conf,
boolean includeShared)
throws java.io.IOException
java.io.IOExceptionpublic static java.util.List<java.net.URI> getSharedEditsDirs(org.apache.hadoop.conf.Configuration conf)
conf - configurationconfpublic void readLock()
RwLockpublic void readLockInterruptibly()
throws java.lang.InterruptedException
RwLockreadLockInterruptibly in interface RwLockjava.lang.InterruptedExceptionpublic void readUnlock()
RwLockreadUnlock in interface RwLockpublic void readUnlock(java.lang.String opName)
RwLockreadUnlock in interface RwLockopName - Option name.public void readUnlock(java.lang.String opName,
java.util.function.Supplier<java.lang.String> lockReportInfoSupplier)
public void writeLock()
RwLockpublic void writeLockInterruptibly()
throws java.lang.InterruptedException
RwLockwriteLockInterruptibly in interface RwLockjava.lang.InterruptedExceptionpublic void writeUnlock()
RwLockwriteUnlock in interface RwLockpublic void writeUnlock(java.lang.String opName)
RwLockwriteUnlock in interface RwLockopName - Option name.public void writeUnlock(java.lang.String opName,
boolean suppressWriteLockReport)
public void writeUnlock(java.lang.String opName,
java.util.function.Supplier<java.lang.String> lockReportInfoSupplier)
public boolean hasWriteLock()
RwLockhasWriteLock in interface RwLockpublic boolean hasReadLock()
RwLockhasReadLock in interface RwLockpublic int getReadHoldCount()
public int getWriteHoldCount()
public void cpLock()
public void cpLockInterruptibly()
throws java.lang.InterruptedException
java.lang.InterruptedExceptionpublic void cpUnlock()
public boolean isRunning()
NamesystemisRunning in interface Namesystempublic boolean isInStandbyState()
public BlocksWithLocations getBlocks(org.apache.hadoop.hdfs.protocol.DatanodeID datanode, long size, long minimumBlockSize, long timeInterval, org.apache.hadoop.fs.StorageType storageType) throws java.io.IOException
datanode whose
total size is sizedatanode - on which blocks are locatedsize - total size of blocksminimumBlockSize - each block should be of this minimum Block SizetimeInterval - prefer to get blocks which are belong to
the cold files accessed before the time intervalstorageType - the given storage type StorageTypejava.io.IOExceptionpublic org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries<org.apache.hadoop.hdfs.protocol.OpenFileEntry> getFilesBlockingDecom(long prevId,
java.lang.String path)
@VisibleForTesting
public org.apache.hadoop.fs.FsServerDefaults getServerDefaults()
throws org.apache.hadoop.ipc.StandbyException
org.apache.hadoop.ipc.StandbyExceptionpublic boolean isInSnapshot(long blockCollectionID)
isInSnapshot in interface Namesystempublic INodeFile getBlockCollection(long id)
getBlockCollection in interface Namesystempublic byte[] getSrcPathsHash(java.lang.String[] srcs)
public FSImage getFSImage()
public FSEditLog getEditLog()
@Metric({"MissingBlocks","Number of missing blocks"})
public long getMissingBlocksCount()
@Metric({"MissingReplOneBlocks","Number of missing blocks with replication factor 1"})
public long getMissingReplOneBlocksCount()
@Metric(value={"ExpiredHeartbeats","Number of expired heartbeats"},
type=COUNTER)
public int getExpiredHeartbeats()
@Metric({"TransactionsSinceLastCheckpoint","Number of transactions since last checkpoint"})
public long getTransactionsSinceLastCheckpoint()
@Metric({"TransactionsSinceLastLogRoll","Number of transactions since last edit log roll"})
public long getTransactionsSinceLastLogRoll()
@Metric({"LastWrittenTransactionId","Transaction ID written to the edit log"})
public long getLastWrittenTransactionId()
@Metric({"LastCheckpointTime","Time in milliseconds since the epoch of the last checkpoint"})
public long getLastCheckpointTime()
@Metric({"CapacityTotal","Total raw capacity of data nodes in bytes"})
public long getCapacityTotal()
FSNamesystemMBeangetCapacityTotal in interface FSNamesystemMBean@Metric({"CapacityTotalGB","Total raw capacity of data nodes in GB"})
public float getCapacityTotalGB()
@Metric({"CapacityUsed","Total used capacity across all data nodes in bytes"})
public long getCapacityUsed()
FSNamesystemMBeangetCapacityUsed in interface FSNamesystemMBean@Metric({"CapacityUsedGB","Total used capacity across all data nodes in GB"})
public float getCapacityUsedGB()
@Metric({"CapacityRemaining","Remaining capacity in bytes"})
public long getCapacityRemaining()
FSNamesystemMBeangetCapacityRemaining in interface FSNamesystemMBean@Metric({"ProvidedCapacityTotal","Total space used in PROVIDED storage in bytes"})
public long getProvidedCapacityTotal()
FSNamesystemMBeangetProvidedCapacityTotal in interface FSNamesystemMBean@Metric({"CapacityRemainingGB","Remaining capacity in GB"})
public float getCapacityRemainingGB()
@Metric({"CapacityUsedNonDFS","Total space used by data nodes for non DFS purposes in bytes"})
public long getCapacityUsedNonDFS()
@Metric public int getTotalLoad()
getTotalLoad in interface FSNamesystemMBean@Metric({"SnapshottableDirectories","Number of snapshottable directories"})
public int getNumSnapshottableDirs()
@Metric({"Snapshots","The number of snapshots"})
public int getNumSnapshots()
public java.lang.String getSnapshotStats()
FSNamesystemMBeangetSnapshotStats in interface FSNamesystemMBean@Metric({"NumEncryptionZones","The number of encryption zones"})
public int getNumEncryptionZones()
FSNamesystemMBeangetNumEncryptionZones in interface FSNamesystemMBean@Metric({"CurrentTokensCount","The number of delegation tokens"})
public long getCurrentTokensCount()
FSNamesystemMBeangetCurrentTokensCount in interface FSNamesystemMBean@Metric({"PendingSPSPaths","The number of paths to be processed by storage policy satisfier"})
public int getPendingSPSPaths()
FSNamesystemMBeangetPendingSPSPaths in interface FSNamesystemMBean@Metric public float getReconstructionQueuesInitProgress()
getReconstructionQueuesInitProgress in interface FSNamesystemMBean@Metric({"LockQueueLength","Number of threads waiting to acquire FSNameSystemLock"})
public int getFsLockQueueLength()
getFsLockQueueLength in interface FSNamesystemMBean@Metric(value={"ReadLockLongHoldCount","The number of time the read lock has been held for longer than the threshold"},
type=COUNTER)
public long getNumOfReadLockLongHold()
@Metric(value={"WriteLockLongHoldCount","The number of time the write lock has been held for longer than the threshold"},
type=COUNTER)
public long getNumOfWriteLockLongHold()
@Metric public long getBlocksTotal()
getBlocksTotal in interface FSNamesystemMBean@Metric({"NumFilesUnderConstruction","Number of files under construction"})
public long getNumFilesUnderConstruction()
@Metric({"NumActiveClients","Number of active clients holding lease"})
public long getNumActiveClients()
public long getCompleteBlocksTotal()
public boolean isInSafeMode()
SafeModeisInSafeMode in interface SafeModepublic boolean isInStartupSafeMode()
SafeModeisInStartupSafeMode in interface SafeModepublic void processIncrementalBlockReport(org.apache.hadoop.hdfs.protocol.DatanodeID nodeID,
StorageReceivedDeletedBlocks srdb)
throws java.io.IOException
java.io.IOExceptionpublic long getMaxObjects()
FSNamesystemMBeangetMaxObjects in interface FSNamesystemMBean@Metric public long getFilesTotal()
FSNamesystemMBeangetFilesTotal in interface FSNamesystemMBean@Metric @Deprecated public long getPendingReplicationBlocks()
getPendingReplicationBlocks in interface FSNamesystemMBean@Metric public long getPendingReconstructionBlocks()
getPendingReconstructionBlocks in interface FSNamesystemMBean@Metric @Deprecated public long getUnderReplicatedBlocks()
getLowRedundancyBlocks() instead.getUnderReplicatedBlocks in interface FSNamesystemMBean@Metric public long getLowRedundancyBlocks()
getLowRedundancyBlocks in interface FSNamesystemMBean@Metric({"CorruptBlocks","Number of blocks with corrupt replicas"})
public long getCorruptReplicaBlocks()
@Metric public long getScheduledReplicationBlocks()
FSNamesystemMBeangetScheduledReplicationBlocks in interface FSNamesystemMBean@Metric public long getPendingDeletionBlocks()
FSNamesystemMBeangetPendingDeletionBlocks in interface FSNamesystemMBean@Metric({"LowRedundancyReplicatedBlocks","Number of low redundancy replicated blocks"})
public long getLowRedundancyReplicatedBlocks()
ReplicatedBlocksMBeangetLowRedundancyReplicatedBlocks in interface ReplicatedBlocksMBean@Metric({"CorruptReplicatedBlocks","Number of corrupted replicated blocks"})
public long getCorruptReplicatedBlocks()
ReplicatedBlocksMBeangetCorruptReplicatedBlocks in interface ReplicatedBlocksMBean@Metric({"MissingReplicatedBlocks","Number of missing replicated blocks"})
public long getMissingReplicatedBlocks()
ReplicatedBlocksMBeangetMissingReplicatedBlocks in interface ReplicatedBlocksMBean@Metric({"MissingReplicationOneBlocks","Number of missing replicated blocks with replication factor 1"})
public long getMissingReplicationOneBlocks()
ReplicatedBlocksMBeangetMissingReplicationOneBlocks in interface ReplicatedBlocksMBean@Metric({"HighestPriorityLowRedundancyReplicatedBlocks","Number of replicated blocks which have the highest risk of loss."})
public long getHighestPriorityLowRedundancyReplicatedBlocks()
NameNodeMXBeangetHighestPriorityLowRedundancyReplicatedBlocks in interface NameNodeMXBean@Metric({"HighestPriorityLowRedundancyECBlocks","Number of erasure coded blocks which have the highest risk of loss."})
public long getHighestPriorityLowRedundancyECBlocks()
NameNodeMXBeangetHighestPriorityLowRedundancyECBlocks in interface NameNodeMXBean@Metric({"BytesInFutureReplicatedBlocks","Total bytes in replicated blocks with future generation stamp"})
public long getBytesInFutureReplicatedBlocks()
ReplicatedBlocksMBeangetBytesInFutureReplicatedBlocks in interface ReplicatedBlocksMBean@Metric({"PendingDeletionReplicatedBlocks","Number of replicated blocks that are pending deletion"})
public long getPendingDeletionReplicatedBlocks()
ReplicatedBlocksMBeangetPendingDeletionReplicatedBlocks in interface ReplicatedBlocksMBean@Metric({"TotalReplicatedBlocks","Total number of replicated blocks"})
public long getTotalReplicatedBlocks()
ReplicatedBlocksMBeangetTotalReplicatedBlocks in interface ReplicatedBlocksMBean@Metric({"LowRedundancyECBlockGroups","Number of erasure coded block groups with low redundancy"})
public long getLowRedundancyECBlockGroups()
ECBlockGroupsMBeangetLowRedundancyECBlockGroups in interface ECBlockGroupsMBean@Metric({"CorruptECBlockGroups","Number of erasure coded block groups that are corrupt"})
public long getCorruptECBlockGroups()
ECBlockGroupsMBeangetCorruptECBlockGroups in interface ECBlockGroupsMBean@Metric({"MissingECBlockGroups","Number of erasure coded block groups that are missing"})
public long getMissingECBlockGroups()
ECBlockGroupsMBeangetMissingECBlockGroups in interface ECBlockGroupsMBean@Metric({"BytesInFutureECBlockGroups","Total bytes in erasure coded block groups with future generation stamp"})
public long getBytesInFutureECBlockGroups()
ECBlockGroupsMBeangetBytesInFutureECBlockGroups in interface ECBlockGroupsMBean@Metric({"PendingDeletionECBlocks","Number of erasure coded blocks that are pending deletion"})
public long getPendingDeletionECBlocks()
ECBlockGroupsMBeangetPendingDeletionECBlocks in interface ECBlockGroupsMBean@Metric({"TotalECBlockGroups","Total number of erasure coded block groups"})
public long getTotalECBlockGroups()
ECBlockGroupsMBeangetTotalECBlockGroups in interface ECBlockGroupsMBean@Metric({"EnabledEcPolicies","Enabled erasure coding policies"})
public java.lang.String getEnabledEcPolicies()
getEnabledEcPolicies in interface ECBlockGroupsMBeanpublic long getBlockDeletionStartTime()
FSNamesystemMBeangetBlockDeletionStartTime in interface FSNamesystemMBean@Metric public long getExcessBlocks()
@Metric public long getNumTimedOutPendingReconstructions()
@Metric public long getPostponedMisreplicatedBlocks()
@Metric public int getPendingDataNodeMessageCount()
@Metric public java.lang.String getHAState()
@Metric public long getMillisSinceLastLoadedEdits()
@Metric public int getBlockCapacity()
public org.apache.hadoop.ha.HAServiceProtocol.HAServiceState getState()
public java.lang.String getFSState()
FSNamesystemMBeangetFSState in interface FSNamesystemMBean@Metric({"NumLiveDataNodes","Number of datanodes which are currently live"})
public int getNumLiveDataNodes()
FSNamesystemMBeangetNumLiveDataNodes in interface FSNamesystemMBean@Metric({"NumDeadDataNodes","Number of datanodes which are currently dead"})
public int getNumDeadDataNodes()
FSNamesystemMBeangetNumDeadDataNodes in interface FSNamesystemMBean@Metric({"NumDecomLiveDataNodes","Number of datanodes which have been decommissioned and are now live"})
public int getNumDecomLiveDataNodes()
FSNamesystemMBeangetNumDecomLiveDataNodes in interface FSNamesystemMBean@Metric({"NumDecomDeadDataNodes","Number of datanodes which have been decommissioned and are now dead"})
public int getNumDecomDeadDataNodes()
FSNamesystemMBeangetNumDecomDeadDataNodes in interface FSNamesystemMBean@Metric({"NumInServiceLiveDataNodes","Number of live datanodes which are currently in service"})
public int getNumInServiceLiveDataNodes()
getNumInServiceLiveDataNodes in interface FSNamesystemMBean@Metric({"VolumeFailuresTotal","Total number of volume failures across all Datanodes"})
public int getVolumeFailuresTotal()
FSNamesystemMBeangetVolumeFailuresTotal in interface FSNamesystemMBean@Metric({"EstimatedCapacityLostTotal","An estimate of the total capacity lost due to volume failures"})
public long getEstimatedCapacityLostTotal()
FSNamesystemMBeangetEstimatedCapacityLostTotal in interface FSNamesystemMBean@Metric({"NumDecommissioningDataNodes","Number of datanodes in decommissioning state"})
public int getNumDecommissioningDataNodes()
FSNamesystemMBeangetNumDecommissioningDataNodes in interface FSNamesystemMBean@Metric({"StaleDataNodes","Number of datanodes marked stale due to delayed heartbeat"})
public int getNumStaleDataNodes()
FSNamesystemMBeangetNumStaleDataNodes in interface FSNamesystemMBean@Metric({"NumStaleStorages","Number of storages marked as content stale"})
public int getNumStaleStorages()
getNumStaleStorages in interface FSNamesystemMBeanpublic java.lang.String getTopUserOpCounts()
FSNamesystemMBeangetTopUserOpCounts in interface FSNamesystemMBeanpublic void logUpdateMasterKey(org.apache.hadoop.security.token.delegation.DelegationKey key)
key - new delegation key.public void logExpireDelegationToken(org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier id)
id - token identifier to cancelpublic java.lang.String getVersion()
getVersion in interface NameNodeMXBeanpublic long getUsed()
NameNodeMXBeangetUsed in interface NameNodeMXBeanpublic long getFree()
NameNodeMXBeangetFree in interface NameNodeMXBeanpublic long getTotal()
NameNodeMXBeangetTotal in interface NameNodeMXBeanpublic long getProvidedCapacity()
NameNodeMXBeangetProvidedCapacity in interface NameNodeMXBeanpublic java.lang.String getSafemode()
NameNodeMXBeangetSafemode in interface NameNodeMXBeanpublic boolean isUpgradeFinalized()
NameNodeMXBeanisUpgradeFinalized in interface NameNodeMXBeanpublic long getNonDfsUsedSpace()
NameNodeMXBeangetNonDfsUsedSpace in interface NameNodeMXBeanpublic float getPercentUsed()
NameNodeMXBeangetPercentUsed in interface NameNodeMXBeanpublic long getBlockPoolUsedSpace()
NameNodeMXBeangetBlockPoolUsedSpace in interface NameNodeMXBeanpublic float getPercentBlockPoolUsed()
NameNodeMXBeangetPercentBlockPoolUsed in interface NameNodeMXBeanpublic float getPercentRemaining()
NameNodeMXBeangetPercentRemaining in interface NameNodeMXBeanpublic long getCacheCapacity()
NameNodeMXBeangetCacheCapacity in interface NameNodeMXBeanpublic long getCacheUsed()
NameNodeMXBeangetCacheUsed in interface NameNodeMXBeanpublic long getTotalBlocks()
NameNodeMXBeangetTotalBlocks in interface NameNodeMXBeanpublic long getNumberOfMissingBlocks()
NameNodeMXBeangetNumberOfMissingBlocks in interface NameNodeMXBeanpublic long getNumberOfMissingBlocksWithReplicationFactorOne()
NameNodeMXBeangetNumberOfMissingBlocksWithReplicationFactorOne in interface NameNodeMXBeanpublic int getThreads()
NameNodeMXBeangetThreads in interface NameNodeMXBeanpublic java.lang.String getLiveNodes()
getLiveNodes in interface NameNodeMXBeanpublic java.lang.String getDeadNodes()
getDeadNodes in interface NameNodeMXBeanpublic java.lang.String getDecomNodes()
getDecomNodes in interface NameNodeMXBeanpublic java.lang.String getEnteringMaintenanceNodes()
getEnteringMaintenanceNodes in interface NameNodeMXBeanpublic java.lang.String getClusterId()
NameNodeMXBeangetClusterId in interface NameNodeMXBeanpublic java.lang.String getBlockPoolId()
NameNodeMXBeangetBlockPoolId in interface NameNodeMXBeanpublic java.lang.String getNameDirStatuses()
NameNodeMXBeangetNameDirStatuses in interface NameNodeMXBeanpublic java.lang.String getNodeUsage()
NameNodeMXBeangetNodeUsage in interface NameNodeMXBeanpublic java.lang.String getNameJournalStatus()
NameNodeMXBeangetNameJournalStatus in interface NameNodeMXBeanpublic java.lang.String getJournalTransactionInfo()
NameNodeMXBeangetJournalTransactionInfo in interface NameNodeMXBeanpublic long getNNStartedTimeInMillis()
NameNodeMXBeangetNNStartedTimeInMillis in interface NameNodeMXBeanpublic java.lang.String getCompileInfo()
NameNodeMXBeangetCompileInfo in interface NameNodeMXBeanpublic BlockManager getBlockManager()
@VisibleForTesting public void setBlockManagerForTesting(BlockManager bm)
public FSDirectory getFSDirectory()
getFSDirectory in interface Namesystem@VisibleForTesting public void setFSDirectory(FSDirectory dir)
public CacheManager getCacheManager()
getCacheManager in interface Namesystempublic ErasureCodingPolicyManager getErasureCodingPolicyManager()
public HAContext getHAContext()
getHAContext in interface Namesystempublic java.lang.String getCorruptFiles()
NameNodeMXBeangetCorruptFiles in interface NameNodeMXBeanpublic int getCorruptFilesCount()
NameNodeMXBeangetCorruptFilesCount in interface NameNodeMXBeanpublic long getNumberOfSnapshottableDirs()
NameNodeMXBeangetNumberOfSnapshottableDirs in interface NameNodeMXBeanpublic int getDistinctVersionCount()
NameNodeMXBeangetDistinctVersionCount in interface NameNodeMXBeanpublic java.util.Map<java.lang.String,java.lang.Integer> getDistinctVersions()
NameNodeMXBeangetDistinctVersions in interface NameNodeMXBeanpublic java.lang.String getSoftwareVersion()
NameNodeMXBeangetSoftwareVersion in interface NameNodeMXBeanpublic java.lang.String getNameDirSize()
NameNodeMXBeangetNameDirSize in interface NameNodeMXBeanpublic void verifyToken(org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier identifier,
byte[] password)
throws org.apache.hadoop.security.token.SecretManager.InvalidToken,
org.apache.hadoop.ipc.RetriableException
identifier - Token identifier.password - Password in the token.org.apache.hadoop.security.token.SecretManager.InvalidTokenorg.apache.hadoop.ipc.RetriableException@VisibleForTesting public EditLogTailer getEditLogTailer()
@VisibleForTesting public void setEditLogTailerForTests(EditLogTailer tailer)
@VisibleForTesting public java.util.concurrent.locks.ReentrantReadWriteLock getFsLockForTests()
@VisibleForTesting public java.util.concurrent.locks.ReentrantLock getCpLockForTests()
@VisibleForTesting public void setNNResourceChecker(NameNodeResourceChecker nnResourceChecker)
public SnapshotManager getSnapshotManager()
public org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus[] getSnapshottableDirListing()
throws java.io.IOException
java.io.IOException - If an I/O error occurred.public org.apache.hadoop.hdfs.protocol.SnapshotStatus[] getSnapshotListing(java.lang.String snapshotRoot)
throws java.io.IOException
java.io.IOExceptionpublic void gcDeletedSnapshot(java.lang.String snapshotRoot,
java.lang.String snapshotName)
throws java.io.IOException
java.io.IOExceptionpublic void setCreatedRollbackImages(boolean created)
public org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo getRollingUpgradeInfo()
public boolean isNeedRollbackFsImage()
public void setNeedRollbackFsImage(boolean needRollbackFsImage)
public org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo.Bean getRollingUpgradeStatus()
NameNodeMXBeangetRollingUpgradeStatus in interface NameNodeMXBeanpublic boolean isRollingUpgrade()
public int getEffectiveLayoutVersion()
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION. During a rolling
upgrade, this can retain the layout version that was persisted to metadata
prior to starting the rolling upgrade, back to a lower bound defined in
NameNodeLayoutVersion.MINIMUM_COMPATIBLE_LAYOUT_VERSION. New
fsimage files and edit log segments will continue to be written with this
older layout version, so that the files are still readable by the old
software version if the admin chooses to downgrade.public org.apache.hadoop.hdfs.protocol.ECTopologyVerifierResult getECTopologyResultForPolicies(java.lang.String[] policyNames)
throws java.io.IOException
policyNames - name of policies.java.io.IOExceptionpublic void removeXattr(long id,
java.lang.String xattrName)
throws java.io.IOException
NamesystemremoveXattr in interface Namesystemjava.io.IOExceptionpublic void checkAndProvisionSnapshotTrashRoots()
checkAndProvisionSnapshotTrashRoots in interface Namesystem@Metric({"TotalSyncCount","Total number of sync operations performed on edit logs"})
public long getTotalSyncCount()
getTotalSyncCount in interface FSNamesystemMBean@Metric({"TotalSyncTimes","Total time spend in sync operation on various edit logs"})
public java.lang.String getTotalSyncTimes()
getTotalSyncTimes in interface FSNamesystemMBeanpublic long getBytesInFuture()
@Metric({"NumInMaintenanceLiveDataNodes","Number of live Datanodes which are in maintenance state"})
public int getNumInMaintenanceLiveDataNodes()
getNumInMaintenanceLiveDataNodes in interface FSNamesystemMBean@Metric({"NumInMaintenanceDeadDataNodes","Number of dead Datanodes which are in maintenance state"})
public int getNumInMaintenanceDeadDataNodes()
getNumInMaintenanceDeadDataNodes in interface FSNamesystemMBean@Metric({"NumEnteringMaintenanceDataNodes","Number of Datanodes that are entering the maintenance state"})
public int getNumEnteringMaintenanceDataNodes()
getNumEnteringMaintenanceDataNodes in interface FSNamesystemMBeanpublic java.lang.String getVerifyECWithTopologyResult()
NameNodeMXBeangetVerifyECWithTopologyResult in interface NameNodeMXBeanpublic void checkErasureCodingSupported(java.lang.String operationName)
throws UnsupportedActionException
operationName - the name of operation.UnsupportedActionException - throws UAE if not supported.public void setMetricsEnabled(boolean metricsEnabled)
@VisibleForTesting public boolean isMetricsEnabled()
public void setReadLockReportingThresholdMs(long readLockReportingThresholdMs)
@VisibleForTesting public long getReadLockReportingThresholdMs()
public void setWriteLockReportingThresholdMs(long writeLockReportingThresholdMs)
@VisibleForTesting public long getWriteLockReportingThresholdMs()
Copyright © 2008–2025 Apache Software Foundation. All rights reserved.