BlockStatsMXBean@Private public class BlockManager extends java.lang.Object implements BlockStatsMXBean
DFSConfigKeys.DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_KEY. This number has to <=
DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY.
For erasure encoding, # of min live replicas for maintenance is
BlockInfoStriped.getRealDataBlockNum().
Another safety property is to satisfy the block placement policy. While the
policy is configurable, the replicas the policy is applied to are the live
replicas + maintenance replicas.| Modifier and Type | Field | Description |
|---|---|---|
static org.slf4j.Logger |
blockLog |
|
int |
defaultReplication |
Default number of replicas
|
static org.slf4j.Logger |
LOG |
|
short |
maxReplication |
The maximum number of replicas allowed for a block
|
short |
minReplication |
Minimum copies needed or else write is disallowed
|
org.apache.hadoop.hdfs.server.blockmanagement.LowRedundancyBlocks |
neededReconstruction |
Store set of Blocks that need to be replicated 1 or more times.
|
| Constructor | Description |
|---|---|
BlockManager(Namesystem namesystem,
boolean haEnabled,
org.apache.hadoop.conf.Configuration conf) |
| Modifier and Type | Method | Description |
|---|---|---|
void |
activate(org.apache.hadoop.conf.Configuration conf,
long blockTotal) |
|
void |
addBlock(DatanodeStorageInfo storageInfo,
org.apache.hadoop.hdfs.protocol.Block block,
java.lang.String delHint) |
The given node is reporting that it received a certain block.
|
BlockInfo |
addBlockCollection(BlockInfo block,
BlockCollection bc) |
|
BlockInfo |
addBlockCollectionWithCheck(BlockInfo block,
BlockCollection bc) |
Do some check when adding a block to blocksmap.
|
boolean |
addBlockRecoveryAttempt(BlockInfo b) |
Checks whether a recovery attempt has been made for the given block.
|
void |
addBLocksToMarkedDeleteQueue(java.util.List<BlockInfo> blockInfos) |
|
void |
addExpectedReplicasToPending(BlockInfo blk) |
If IBR is not sent from expected locations yet, add the datanodes to
pendingReconstruction in order to keep RedundancyMonitor from scheduling
the block.
|
short |
adjustReplication(short replication) |
Clamp the specified replication between the minimum and the maximum
replication levels.
|
boolean |
checkBlockReportLease(BlockReportContext context,
org.apache.hadoop.hdfs.protocol.DatanodeID nodeID) |
Check block report lease.
|
void |
checkRedundancy(BlockCollection bc) |
Check sufficient redundancy of the blocks in the collection.
|
void |
checkSafeMode() |
|
DatanodeStorageInfo[] |
chooseTarget4AdditionalDatanode(java.lang.String src,
int numAdditionalNodes,
org.apache.hadoop.net.Node clientnode,
java.util.List<DatanodeStorageInfo> chosen,
java.util.Set<org.apache.hadoop.net.Node> excludes,
long blocksize,
byte storagePolicyID,
org.apache.hadoop.hdfs.protocol.BlockType blockType) |
Choose target for getting additional datanodes for an existing pipeline.
|
DatanodeStorageInfo[] |
chooseTarget4NewBlock(java.lang.String src,
int numOfReplicas,
org.apache.hadoop.net.Node client,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
java.util.List<java.lang.String> favoredNodes,
byte storagePolicyID,
org.apache.hadoop.hdfs.protocol.BlockType blockType,
org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy ecPolicy,
java.util.EnumSet<org.apache.hadoop.hdfs.AddBlockFlag> flags) |
Choose target datanodes for creating a new block.
|
DatanodeStorageInfo[] |
chooseTarget4WebHDFS(java.lang.String src,
DatanodeDescriptor clientnode,
java.util.Set<org.apache.hadoop.net.Node> excludes,
long blocksize) |
Choose target for WebHDFS redirection.
|
void |
clear() |
|
void |
clearQueues() |
Clear all queues that hold decisions previously made by
this NameNode.
|
void |
close() |
|
boolean |
commitOrCompleteLastBlock(BlockCollection bc,
org.apache.hadoop.hdfs.protocol.Block commitBlock,
INodesInPath iip) |
Commit the last block of the file and mark it as complete if it has
meets the minimum redundancy requirement
|
boolean |
containsInvalidateBlock(org.apache.hadoop.hdfs.protocol.DatanodeInfo dn,
org.apache.hadoop.hdfs.protocol.Block block) |
|
org.apache.hadoop.hdfs.protocol.LocatedBlock |
convertLastBlockToUnderConstruction(BlockCollection bc,
long bytesToRemove) |
Convert the last block of the file to an under construction block.
|
NumberReplicas |
countNodes(BlockInfo b) |
Return the number of nodes hosting a given block, grouped
by the state of those replicas.
|
org.apache.hadoop.hdfs.protocol.LocatedBlocks |
createLocatedBlocks(BlockInfo[] blocks,
long fileSizeExcludeBlocksUnderConstruction,
boolean isFileUnderConstruction,
long offset,
long length,
boolean needBlockToken,
boolean inSnapshot,
org.apache.hadoop.fs.FileEncryptionInfo feInfo,
org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy ecPolicy) |
Create a LocatedBlocks.
|
boolean |
createSPSManager(org.apache.hadoop.conf.Configuration conf,
java.lang.String spsMode) |
Create SPS manager instance.
|
void |
disableSPS() |
Nullify SPS manager as this feature is disabled fully.
|
void |
enqueueBlockOp(java.lang.Runnable action) |
|
void |
findAndMarkBlockAsCorrupt(org.apache.hadoop.hdfs.protocol.ExtendedBlock blk,
org.apache.hadoop.hdfs.protocol.DatanodeInfo dn,
java.lang.String storageID,
java.lang.String reason) |
Mark the block belonging to datanode as corrupt
|
void |
flushBlockOps() |
|
void |
forceCompleteBlock(BlockInfo block) |
Force the given block in the given file to be marked as complete,
regardless of whether enough replicas are present.
|
org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey |
generateDataEncryptionKey() |
|
int |
getActiveBlockCount() |
|
BlockIdManager |
getBlockIdManager() |
|
ExportedBlockKeys |
getBlockKeys() |
|
int |
getBlockOpQueueLength() |
|
BlockPlacementPolicy |
getBlockPlacementPolicy() |
|
java.lang.String |
getBlockPoolId() |
|
org.apache.hadoop.hdfs.server.blockmanagement.BlockReportLeaseManager |
getBlockReportLeaseManager() |
|
int |
getBlocksReplWorkMultiplier() |
Returns the current setting for blocksReplWorkMultiplier, set by
DFSConfigKeys.
DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION. |
BlocksWithLocations |
getBlocksWithLocations(org.apache.hadoop.hdfs.protocol.DatanodeID datanode,
long size,
long minBlockSize,
long timeInterval,
org.apache.hadoop.fs.StorageType storageType) |
Get all blocks with location information from a datanode.
|
BlockTokenSecretManager |
getBlockTokenSecretManager() |
get the BlockTokenSecretManager
|
long |
getBytesInFuture() |
|
long |
getBytesInFutureECBlockGroups() |
|
long |
getBytesInFutureReplicatedBlocks() |
|
int |
getCapacity() |
|
long |
getCorruptBlocks() |
Used by metrics.
|
long |
getCorruptECBlockGroups() |
Used by metrics.
|
java.lang.String |
getCorruptReason(org.apache.hadoop.hdfs.protocol.Block block,
DatanodeDescriptor node) |
Get reason for certain corrupted replicas for a given block and a given dn.
|
java.util.Iterator<BlockInfo> |
getCorruptReplicaBlockIterator() |
Return an iterator over the set of blocks for which there are no replicas.
|
long |
getCorruptReplicaBlocksCount() |
Used by metrics
|
java.util.Collection<DatanodeDescriptor> |
getCorruptReplicas(org.apache.hadoop.hdfs.protocol.Block block) |
Get the replicas which are corrupt for a given block.
|
DatanodeManager |
getDatanodeManager() |
|
int |
getDefaultStorageNum(BlockInfo block) |
|
long |
getExcessBlocksCount() |
Used by metrics
|
boolean |
getExcludeSlowNodesEnabled(org.apache.hadoop.hdfs.protocol.BlockType blockType) |
|
short |
getExpectedLiveRedundancyNum(BlockInfo block,
NumberReplicas numberReplicas) |
|
short |
getExpectedRedundancyNum(BlockInfo block) |
|
long |
getHighestPriorityECBlockCount() |
|
long |
getHighestPriorityReplicatedBlockCount() |
|
long |
getLastRedundancyMonitorTS() |
Used as ad hoc to check the time stamp of the last full cycle of
redundancyThread. |
long |
getLowRedundancyBlocks() |
Used by metrics.
|
long |
getLowRedundancyBlocksCount() |
Used by metrics
|
long |
getLowRedundancyECBlockGroups() |
Used by metrics.
|
java.util.concurrent.ConcurrentLinkedQueue<java.util.List<BlockInfo>> |
getMarkedDeleteQueue() |
|
int |
getMaxReplicationStreams() |
Returns the current setting for maxReplicationStreams, which is set by
DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY. |
int |
getMinBlocksForWrite(org.apache.hadoop.hdfs.protocol.BlockType blockType) |
|
short |
getMinReplication() |
|
short |
getMinReplicationToBeInMaintenance() |
|
short |
getMinStorageNum(BlockInfo block) |
|
long |
getMissingBlocks() |
Used by metrics.
|
long |
getMissingBlocksCount() |
|
long |
getMissingECBlockGroups() |
Used by metrics.
|
long |
getMissingReplicationOneBlocks() |
Used by metrics.
|
long |
getMissingReplOneBlocksCount() |
|
long |
getNumTimedOutPendingReconstructions() |
Used by metrics.
|
int |
getPendingDataNodeMessageCount() |
Used by metrics
|
long |
getPendingDeletionBlocksCount() |
Used by metrics
|
long |
getPendingDeletionECBlocks() |
Used by metrics.
|
long |
getPendingDeletionReplicatedBlocks() |
Used by metrics.
|
long |
getPendingReconstructionBlocksCount() |
Used by metrics
|
int |
getPendingSPSPaths() |
Used by metrics.
|
long |
getPostponedMisreplicatedBlocksCount() |
Used by metrics
|
long |
getProvidedCapacity() |
|
ProvidedStorageMap |
getProvidedStorageMap() |
|
int |
getReconstructionPendingTimeout() |
Returns the current setting for pendingReconstruction timeout, set by
DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY. |
float |
getReconstructionQueuesInitProgress() |
Get the progress of the reconstruction queues initialisation
|
int |
getReplicationStreamsHardLimit() |
Returns the current setting for maxReplicationStreamsHardLimit, set by
DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY. |
java.lang.String |
getSafeModeTip() |
|
long |
getScheduledReplicationBlocksCount() |
Used by metrics
|
StoragePolicySatisfyManager |
getSPSManager() |
|
long |
getStartupDelayBlockDeletionInMs() |
Used by metrics
|
org.apache.hadoop.hdfs.protocol.BlockStoragePolicy[] |
getStoragePolicies() |
|
org.apache.hadoop.hdfs.protocol.BlockStoragePolicy |
getStoragePolicy(byte policyId) |
|
org.apache.hadoop.hdfs.protocol.BlockStoragePolicy |
getStoragePolicy(java.lang.String policyName) |
|
BlockStoragePolicySuite |
getStoragePolicySuite() |
|
java.lang.Iterable<DatanodeStorageInfo> |
getStorages(org.apache.hadoop.hdfs.protocol.Block block) |
|
DatanodeStorageInfo[] |
getStorages(BlockInfo block) |
|
java.util.Map<org.apache.hadoop.fs.StorageType,StorageTypeStats> |
getStorageTypeStats() |
The statistics of storage types.
|
BlockInfo |
getStoredBlock(org.apache.hadoop.hdfs.protocol.Block block) |
|
BlockPlacementPolicy |
getStriptedBlockPlacementPolicy() |
|
int |
getTotalBlocks() |
|
long |
getTotalECBlockGroups() |
Used by metrics.
|
long |
getTotalReplicatedBlocks() |
Used by metrics.
|
int |
getUnderReplicatedNotMissingBlocks() |
Return number of low redundancy blocks but not missing blocks.
|
boolean |
hasMinStorage(BlockInfo block) |
|
boolean |
hasMinStorage(BlockInfo block,
int liveNum) |
|
boolean |
hasNonEcBlockUsingStripedID() |
Get the value of whether there are any non-EC blocks using StripedID.
|
void |
initializeReplQueues() |
Initialize replication queues.
|
boolean |
isExcess(DatanodeDescriptor dn,
BlockInfo blk) |
|
boolean |
isInSafeMode() |
|
boolean |
isLegacyBlock(org.apache.hadoop.hdfs.protocol.Block block) |
|
boolean |
isPopulatingReplQueues() |
Check if replication queues are to be populated
|
boolean |
isSufficientlyReplicated(BlockInfo b) |
Check if a block is replicated to at least the minimum replication.
|
boolean |
leaveSafeMode(boolean force) |
|
void |
markBlockReplicasAsCorrupt(org.apache.hadoop.hdfs.protocol.Block oldBlock,
BlockInfo block,
long oldGenerationStamp,
long oldNumBytes,
DatanodeStorageInfo[] newStorages) |
Mark block replicas as corrupt except those on the storages in
newStorages list.
|
void |
metaSave(java.io.PrintWriter out) |
Dump meta data to out.
|
static org.apache.hadoop.hdfs.protocol.LocatedBlock |
newLocatedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock eb,
BlockInfo info,
DatanodeStorageInfo[] locs,
long offset) |
|
static org.apache.hadoop.hdfs.protocol.LocatedBlock |
newLocatedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock b,
DatanodeStorageInfo[] storages,
long startOffset,
boolean corrupt) |
|
static org.apache.hadoop.hdfs.protocol.LocatedStripedBlock |
newLocatedStripedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock b,
DatanodeStorageInfo[] storages,
byte[] indices,
long startOffset,
boolean corrupt) |
|
long |
nextBlockId(org.apache.hadoop.hdfs.protocol.BlockType blockType) |
|
long |
nextGenerationStamp(boolean legacyBlock) |
|
int |
numCorruptReplicas(org.apache.hadoop.hdfs.protocol.Block block) |
|
int |
numOfUnderReplicatedBlocks() |
|
void |
processAllPendingDNMessages() |
Process any remaining queued datanode messages after entering
active state.
|
void |
processIncrementalBlockReport(org.apache.hadoop.hdfs.protocol.DatanodeID nodeID,
StorageReceivedDeletedBlocks srdb) |
The given node is reporting incremental information about some blocks.
|
void |
processMisReplicatedBlocks() |
For each block in the name-node verify whether it belongs to any file,
extra or low redundancy.
|
int |
processMisReplicatedBlocks(java.util.List<BlockInfo> blocks) |
Schedule replication work for a specified list of mis-replicated
blocks and return total number of blocks scheduled for replication.
|
void |
processQueuedMessagesForBlock(org.apache.hadoop.hdfs.protocol.Block b) |
Try to process any messages that were previously queued for the given
block.
|
boolean |
processReport(org.apache.hadoop.hdfs.protocol.DatanodeID nodeID,
org.apache.hadoop.hdfs.server.protocol.DatanodeStorage storage,
BlockListAsLongs newReport,
BlockReportContext context) |
The given storage is reporting all its blocks.
|
void |
refreshBlockPlacementPolicy(org.apache.hadoop.conf.Configuration conf) |
|
void |
registerDatanode(DatanodeRegistration nodeReg) |
|
void |
removeBlock(BlockInfo block) |
|
void |
removeBlockFromMap(BlockInfo block) |
|
void |
removeBlocksAndUpdateSafemodeTotal(INode.BlocksMapUpdateInfo blocks) |
Removes the blocks from blocksmap and updates the safemode blocks total.
|
void |
removeBRLeaseIfNeeded(org.apache.hadoop.hdfs.protocol.DatanodeID nodeID,
BlockReportContext context) |
|
void |
removeStoredBlock(BlockInfo storedBlock,
DatanodeDescriptor node) |
Modify (block-->datanode) map.
|
long |
requestBlockReportLeaseId(DatanodeRegistration nodeReg) |
|
<T> T |
runBlockOp(java.util.concurrent.Callable<T> action) |
|
void |
setBlockPoolId(java.lang.String blockPoolId) |
|
void |
setBlockRecoveryTimeout(long blockRecoveryTimeout) |
|
void |
setBlocksReplWorkMultiplier(int newVal) |
Updates the value used for blocksReplWorkMultiplier, set by
DFSConfigKeys.
DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION initially. |
void |
setBlockToken(org.apache.hadoop.hdfs.protocol.LocatedBlock b,
org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier.AccessMode mode) |
Generate a block token for the located block.
|
void |
setBlockTotal(long total) |
Set the total number of blocks in the system.
|
void |
setExcessRedundancyTimeout(long timeout) |
Sets the timeout (in seconds) for excess redundancy blocks, if the provided timeout is
less than or equal to 0, the default value is used (converted to milliseconds).
|
void |
setExcessRedundancyTimeoutCheckLimit(long limit) |
Sets the limit number of blocks for checking excess redundancy timeout.
|
void |
setExcludeSlowNodesEnabled(boolean enable) |
|
void |
setInitializedReplQueues(boolean v) |
|
void |
setMaxReplicationStreams(int newVal) |
|
void |
setMaxReplicationStreams(int newVal,
boolean ensurePositiveInt) |
Updates the value used for maxReplicationStreams, which is set by
DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY initially. |
void |
setMinBlocksForWrite(int minBlocksForWrite) |
|
void |
setPostponeBlocksFromFuture(boolean postpone) |
|
void |
setReconstructionPendingTimeout(int newVal) |
Updates the value used for pendingReconstruction timeout, which is set by
DFSConfigKeys.
DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY initially. |
void |
setReplication(short oldRepl,
short newRepl,
BlockInfo b) |
Set replication for the blocks.
|
void |
setReplicationStreamsHardLimit(int newVal) |
Updates the value used for replicationStreamsHardLimit, which is set by
DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY
initially. |
boolean |
shouldPopulateReplQueues() |
|
void |
shutdown() |
|
void |
stopReconstructionInitializer() |
|
void |
successfulBlockRecovery(BlockInfo block) |
Notification of a successful block recovery.
|
void |
updateLastBlock(BlockInfo lastBlock,
org.apache.hadoop.hdfs.protocol.ExtendedBlock newBlock) |
|
void |
verifyReplication(java.lang.String src,
short replication,
java.lang.String clientName) |
Check whether the replication parameter is within the range
determined by system configuration and throw an exception if it's not.
|
public static final org.slf4j.Logger LOG
public static final org.slf4j.Logger blockLog
public final org.apache.hadoop.hdfs.server.blockmanagement.LowRedundancyBlocks neededReconstruction
public final short maxReplication
public final short minReplication
public final int defaultReplication
public BlockManager(Namesystem namesystem, boolean haEnabled, org.apache.hadoop.conf.Configuration conf) throws java.io.IOException
java.io.IOExceptionpublic long getPendingReconstructionBlocksCount()
public long getLowRedundancyBlocksCount()
public long getCorruptReplicaBlocksCount()
public long getScheduledReplicationBlocksCount()
public long getPendingDeletionBlocksCount()
public long getStartupDelayBlockDeletionInMs()
public long getExcessBlocksCount()
public long getPostponedMisreplicatedBlocksCount()
public int getPendingDataNodeMessageCount()
public long getNumTimedOutPendingReconstructions()
public long getLowRedundancyBlocks()
public long getCorruptBlocks()
public long getMissingBlocks()
public long getMissingReplicationOneBlocks()
public long getPendingDeletionReplicatedBlocks()
public long getTotalReplicatedBlocks()
public long getLowRedundancyECBlockGroups()
public long getCorruptECBlockGroups()
public long getMissingECBlockGroups()
public long getPendingDeletionECBlocks()
public long getTotalECBlockGroups()
public int getPendingSPSPaths()
public org.apache.hadoop.hdfs.protocol.BlockStoragePolicy getStoragePolicy(java.lang.String policyName)
public org.apache.hadoop.hdfs.protocol.BlockStoragePolicy getStoragePolicy(byte policyId)
public org.apache.hadoop.hdfs.protocol.BlockStoragePolicy[] getStoragePolicies()
public void setBlockPoolId(java.lang.String blockPoolId)
public java.lang.String getBlockPoolId()
public BlockStoragePolicySuite getStoragePolicySuite()
@VisibleForTesting public BlockTokenSecretManager getBlockTokenSecretManager()
public void activate(org.apache.hadoop.conf.Configuration conf,
long blockTotal)
public void close()
public DatanodeManager getDatanodeManager()
@VisibleForTesting public BlockPlacementPolicy getBlockPlacementPolicy()
@VisibleForTesting public BlockPlacementPolicy getStriptedBlockPlacementPolicy()
public void refreshBlockPlacementPolicy(org.apache.hadoop.conf.Configuration conf)
public void metaSave(java.io.PrintWriter out)
public int getMaxReplicationStreams()
DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY.@VisibleForTesting
public void setMaxReplicationStreams(int newVal,
boolean ensurePositiveInt)
DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY initially.newVal - - Must be a positive non-zero integer.public void setMaxReplicationStreams(int newVal)
public int getReplicationStreamsHardLimit()
DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY.public void setReplicationStreamsHardLimit(int newVal)
DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY
initially.newVal - - Must be a positive non-zero integer.public int getBlocksReplWorkMultiplier()
DFSConfigKeys.
DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION.public void setBlocksReplWorkMultiplier(int newVal)
DFSConfigKeys.
DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION initially.newVal - - Must be a positive non-zero integer.public void setReconstructionPendingTimeout(int newVal)
DFSConfigKeys.
DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY initially.newVal - - Must be a positive non-zero integer.public int getReconstructionPendingTimeout()
DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY.public int getDefaultStorageNum(BlockInfo block)
public short getMinReplication()
public short getMinStorageNum(BlockInfo block)
public short getMinReplicationToBeInMaintenance()
public boolean hasMinStorage(BlockInfo block)
public boolean hasMinStorage(BlockInfo block, int liveNum)
public boolean commitOrCompleteLastBlock(BlockCollection bc, org.apache.hadoop.hdfs.protocol.Block commitBlock, INodesInPath iip) throws java.io.IOException
bc - block collectioncommitBlock - - contains client reported block length and generationiip - - INodes in path to bcjava.io.IOException - if the block does not have at least a minimal number
of replicas reported from data-nodes.public void addExpectedReplicasToPending(BlockInfo blk)
public void forceCompleteBlock(BlockInfo block) throws java.io.IOException
java.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.LocatedBlock convertLastBlockToUnderConstruction(BlockCollection bc, long bytesToRemove) throws java.io.IOException
The block is converted only if the file has blocks and the last one
is a partial block (its size is less than the preferred block size).
The converted block is returned to the client.
The client uses the returned block locations to form the data pipeline
for this block.
The methods returns null if there is no partial block at the end.
The client is supposed to allocate a new block with the next call.
bc - filebytesToRemove - num of bytes to remove from blockjava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.LocatedBlocks createLocatedBlocks(BlockInfo[] blocks, long fileSizeExcludeBlocksUnderConstruction, boolean isFileUnderConstruction, long offset, long length, boolean needBlockToken, boolean inSnapshot, org.apache.hadoop.fs.FileEncryptionInfo feInfo, org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy ecPolicy) throws java.io.IOException
java.io.IOExceptionpublic ExportedBlockKeys getBlockKeys()
public void setBlockToken(org.apache.hadoop.hdfs.protocol.LocatedBlock b,
org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier.AccessMode mode)
throws java.io.IOException
java.io.IOExceptionpublic org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey generateDataEncryptionKey()
public short adjustReplication(short replication)
public void verifyReplication(java.lang.String src,
short replication,
java.lang.String clientName)
throws java.io.IOException
src - the path to the target filereplication - the requested replication factorclientName - the name of the client node making the requestjava.io.IOException - thrown if the requested replication factor
is out of boundspublic boolean isSufficientlyReplicated(BlockInfo b)
public BlocksWithLocations getBlocksWithLocations(org.apache.hadoop.hdfs.protocol.DatanodeID datanode, long size, long minBlockSize, long timeInterval, org.apache.hadoop.fs.StorageType storageType) throws UnregisteredNodeException
UnregisteredNodeExceptionpublic void findAndMarkBlockAsCorrupt(org.apache.hadoop.hdfs.protocol.ExtendedBlock blk,
org.apache.hadoop.hdfs.protocol.DatanodeInfo dn,
java.lang.String storageID,
java.lang.String reason)
throws java.io.IOException
blk - Block to be marked as corruptdn - Datanode which holds the corrupt replicastorageID - if known, null otherwise.reason - a textual reason why the block should be marked corrupt,
for logging purposesjava.io.IOExceptionpublic void setPostponeBlocksFromFuture(boolean postpone)
public int getUnderReplicatedNotMissingBlocks()
public DatanodeStorageInfo[] chooseTarget4WebHDFS(java.lang.String src, DatanodeDescriptor clientnode, java.util.Set<org.apache.hadoop.net.Node> excludes, long blocksize)
public DatanodeStorageInfo[] chooseTarget4AdditionalDatanode(java.lang.String src, int numAdditionalNodes, org.apache.hadoop.net.Node clientnode, java.util.List<DatanodeStorageInfo> chosen, java.util.Set<org.apache.hadoop.net.Node> excludes, long blocksize, byte storagePolicyID, org.apache.hadoop.hdfs.protocol.BlockType blockType)
public DatanodeStorageInfo[] chooseTarget4NewBlock(java.lang.String src, int numOfReplicas, org.apache.hadoop.net.Node client, java.util.Set<org.apache.hadoop.net.Node> excludedNodes, long blocksize, java.util.List<java.lang.String> favoredNodes, byte storagePolicyID, org.apache.hadoop.hdfs.protocol.BlockType blockType, org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy ecPolicy, java.util.EnumSet<org.apache.hadoop.hdfs.AddBlockFlag> flags) throws java.io.IOException
java.io.IOException - if the number of targets < minimum replication.BlockPlacementPolicy.chooseTarget(String, int, Node,
Set, long, List, BlockStoragePolicy, EnumSet)public long requestBlockReportLeaseId(DatanodeRegistration nodeReg)
public void registerDatanode(DatanodeRegistration nodeReg) throws java.io.IOException
java.io.IOExceptionpublic void setBlockTotal(long total)
public boolean isInSafeMode()
public java.lang.String getSafeModeTip()
public boolean leaveSafeMode(boolean force)
public void checkSafeMode()
public long getBytesInFuture()
public long getBytesInFutureReplicatedBlocks()
public long getBytesInFutureECBlockGroups()
public void removeBlocksAndUpdateSafemodeTotal(INode.BlocksMapUpdateInfo blocks)
blocks - An instance of INode.BlocksMapUpdateInfo which contains a
list of blocks that need to be removed from blocksMappublic long getProvidedCapacity()
public boolean checkBlockReportLease(BlockReportContext context, org.apache.hadoop.hdfs.protocol.DatanodeID nodeID) throws UnregisteredNodeException
UnregisteredNodeExceptionpublic boolean processReport(org.apache.hadoop.hdfs.protocol.DatanodeID nodeID,
org.apache.hadoop.hdfs.server.protocol.DatanodeStorage storage,
BlockListAsLongs newReport,
BlockReportContext context)
throws java.io.IOException
java.io.IOExceptionpublic void removeBRLeaseIfNeeded(org.apache.hadoop.hdfs.protocol.DatanodeID nodeID,
BlockReportContext context)
throws java.io.IOException
java.io.IOExceptionpublic void setExcessRedundancyTimeout(long timeout)
timeout - The time (in seconds) to set as the excess redundancy block timeout.public void setExcessRedundancyTimeoutCheckLimit(long limit)
limit - The limit number of blocks used to check for excess redundancy timeout.public void markBlockReplicasAsCorrupt(org.apache.hadoop.hdfs.protocol.Block oldBlock,
BlockInfo block,
long oldGenerationStamp,
long oldNumBytes,
DatanodeStorageInfo[] newStorages)
throws java.io.IOException
java.io.IOExceptionpublic void processQueuedMessagesForBlock(org.apache.hadoop.hdfs.protocol.Block b)
throws java.io.IOException
java.io.IOExceptionpublic void processAllPendingDNMessages()
throws java.io.IOException
java.io.IOExceptionpublic void processMisReplicatedBlocks()
public void stopReconstructionInitializer()
public float getReconstructionQueuesInitProgress()
public boolean hasNonEcBlockUsingStripedID()
public int processMisReplicatedBlocks(java.util.List<BlockInfo> blocks)
blocks - A list of blocks for which replication work needs to
be scheduled.public void setReplication(short oldRepl,
short newRepl,
BlockInfo b)
public void removeStoredBlock(BlockInfo storedBlock, DatanodeDescriptor node)
@VisibleForTesting public void addBlock(DatanodeStorageInfo storageInfo, org.apache.hadoop.hdfs.protocol.Block block, java.lang.String delHint) throws java.io.IOException
java.io.IOExceptionpublic void processIncrementalBlockReport(org.apache.hadoop.hdfs.protocol.DatanodeID nodeID,
StorageReceivedDeletedBlocks srdb)
throws java.io.IOException
java.io.IOExceptionpublic NumberReplicas countNodes(BlockInfo b)
NumberReplicas.liveReplicas(). If the replica on a
decommissioning node is the same as the replica on a live node, the
internal block for this replica is live, not decommissioning.public boolean isExcess(DatanodeDescriptor dn, BlockInfo blk)
public int getActiveBlockCount()
public DatanodeStorageInfo[] getStorages(BlockInfo block)
public java.lang.Iterable<DatanodeStorageInfo> getStorages(org.apache.hadoop.hdfs.protocol.Block block)
public int getTotalBlocks()
public void removeBlock(BlockInfo block)
public BlockInfo getStoredBlock(org.apache.hadoop.hdfs.protocol.Block block)
public void updateLastBlock(BlockInfo lastBlock, org.apache.hadoop.hdfs.protocol.ExtendedBlock newBlock)
public void checkRedundancy(BlockCollection bc)
@VisibleForTesting
public boolean containsInvalidateBlock(org.apache.hadoop.hdfs.protocol.DatanodeInfo dn,
org.apache.hadoop.hdfs.protocol.Block block)
public short getExpectedLiveRedundancyNum(BlockInfo block, NumberReplicas numberReplicas)
public short getExpectedRedundancyNum(BlockInfo block)
public long getMissingBlocksCount()
public long getMissingReplOneBlocksCount()
public long getHighestPriorityReplicatedBlockCount()
public long getHighestPriorityECBlockCount()
public BlockInfo addBlockCollection(BlockInfo block, BlockCollection bc)
public BlockInfo addBlockCollectionWithCheck(BlockInfo block, BlockCollection bc)
public int numCorruptReplicas(org.apache.hadoop.hdfs.protocol.Block block)
public void removeBlockFromMap(BlockInfo block)
public int getCapacity()
public java.util.Iterator<BlockInfo> getCorruptReplicaBlockIterator()
public java.util.Collection<DatanodeDescriptor> getCorruptReplicas(org.apache.hadoop.hdfs.protocol.Block block)
public java.lang.String getCorruptReason(org.apache.hadoop.hdfs.protocol.Block block,
DatanodeDescriptor node)
public int numOfUnderReplicatedBlocks()
@VisibleForTesting public long getLastRedundancyMonitorTS()
redundancyThread. This is used by the Junit tests to block until
lastRedundancyCycleTS is updated.lastRedundancyCycleTS.public void clearQueues()
public static org.apache.hadoop.hdfs.protocol.LocatedBlock newLocatedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock b,
DatanodeStorageInfo[] storages,
long startOffset,
boolean corrupt)
public static org.apache.hadoop.hdfs.protocol.LocatedStripedBlock newLocatedStripedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock b,
DatanodeStorageInfo[] storages,
byte[] indices,
long startOffset,
boolean corrupt)
public static org.apache.hadoop.hdfs.protocol.LocatedBlock newLocatedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock eb,
BlockInfo info,
DatanodeStorageInfo[] locs,
long offset)
throws java.io.IOException
java.io.IOExceptionpublic void shutdown()
public void clear()
public org.apache.hadoop.hdfs.server.blockmanagement.BlockReportLeaseManager getBlockReportLeaseManager()
public java.util.Map<org.apache.hadoop.fs.StorageType,StorageTypeStats> getStorageTypeStats()
BlockStatsMXBeangetStorageTypeStats in interface BlockStatsMXBeanpublic void initializeReplQueues()
public boolean isPopulatingReplQueues()
public void setInitializedReplQueues(boolean v)
public boolean shouldPopulateReplQueues()
public void enqueueBlockOp(java.lang.Runnable action)
throws java.io.IOException
java.io.IOExceptionpublic <T> T runBlockOp(java.util.concurrent.Callable<T> action)
throws java.io.IOException
java.io.IOExceptionpublic void successfulBlockRecovery(BlockInfo block)
block - for which the recovery succeededpublic boolean addBlockRecoveryAttempt(BlockInfo b)
b - block for which recovery is being attempted@VisibleForTesting
public void flushBlockOps()
throws java.io.IOException
java.io.IOExceptionpublic int getBlockOpQueueLength()
public BlockIdManager getBlockIdManager()
@VisibleForTesting public java.util.concurrent.ConcurrentLinkedQueue<java.util.List<BlockInfo>> getMarkedDeleteQueue()
public void addBLocksToMarkedDeleteQueue(java.util.List<BlockInfo> blockInfos)
public long nextGenerationStamp(boolean legacyBlock)
throws java.io.IOException
java.io.IOExceptionpublic boolean isLegacyBlock(org.apache.hadoop.hdfs.protocol.Block block)
public long nextBlockId(org.apache.hadoop.hdfs.protocol.BlockType blockType)
@VisibleForTesting public void setBlockRecoveryTimeout(long blockRecoveryTimeout)
@VisibleForTesting public ProvidedStorageMap getProvidedStorageMap()
public boolean createSPSManager(org.apache.hadoop.conf.Configuration conf,
java.lang.String spsMode)
conf - configurationspsMode - satisfier modepublic void disableSPS()
public StoragePolicySatisfyManager getSPSManager()
public void setExcludeSlowNodesEnabled(boolean enable)
@VisibleForTesting public boolean getExcludeSlowNodesEnabled(org.apache.hadoop.hdfs.protocol.BlockType blockType)
public void setMinBlocksForWrite(int minBlocksForWrite)
@VisibleForTesting public int getMinBlocksForWrite(org.apache.hadoop.hdfs.protocol.BlockType blockType)
Copyright © 2008–2025 Apache Software Foundation. All rights reserved.