FSDatasetMBean, org.apache.hadoop.metrics2.MetricsSource@Private public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean
| Modifier and Type | Interface | Description |
|---|---|---|
static class |
FsDatasetSpi.Factory<D extends FsDatasetSpi<?>> |
A factory for creating
FsDatasetSpi objects. |
static class |
FsDatasetSpi.FsVolumeReferences |
It behaviors as an unmodifiable list of FsVolume.
|
| Modifier and Type | Method | Description |
|---|---|---|
DataNodeLockManager<? extends AutoCloseDataSetLock> |
acquireDatasetLockManager() |
Acquire lock Manager for the data set.
|
void |
addBlockPool(java.lang.String bpid,
org.apache.hadoop.conf.Configuration conf) |
add new block pool ID
|
void |
addVolume(StorageLocation location,
java.util.List<NamespaceInfo> nsInfos) |
Add a new volume to the FsDataset.
|
void |
adjustCrcChannelPosition(org.apache.hadoop.hdfs.protocol.ExtendedBlock b,
ReplicaOutputStreams outs,
int checksumSize) |
Sets the file pointer of the checksum stream so that the last checksum
will be overwritten
|
ReplicaHandler |
append(org.apache.hadoop.hdfs.protocol.ExtendedBlock b,
long newGS,
long expectedBlockLen) |
Append to a finalized replica and returns the meta info of the replica.
|
void |
cache(java.lang.String bpid,
long[] blockIds) |
Caches the specified block
|
void |
checkAndUpdate(java.lang.String bpid,
FsVolumeSpi.ScanInfo info) |
Check whether the in-memory block record matches the block on the disk,
and, in case that they are not matched, update the record or mark it
as corrupted.
|
void |
checkBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock b,
long minLength,
HdfsServerConstants.ReplicaState state) |
Check if a block is valid.
|
void |
clearRollingUpgradeMarker(java.lang.String bpid) |
Delete the rolling upgrade marker file if it exists.
|
void |
clearTrash(java.lang.String bpid) |
Clear trash
|
boolean |
contains(org.apache.hadoop.hdfs.protocol.ExtendedBlock block) |
Does the dataset contain the block?
|
ReplicaInPipeline |
convertTemporaryToRbw(org.apache.hadoop.hdfs.protocol.ExtendedBlock temporary) |
Covert a temporary replica to a RBW.
|
ReplicaHandler |
createRbw(org.apache.hadoop.fs.StorageType storageType,
java.lang.String storageId,
org.apache.hadoop.hdfs.protocol.ExtendedBlock b,
boolean allowLazyPersist) |
Creates a RBW replica and returns the meta info of the replica
|
ReplicaHandler |
createRbw(org.apache.hadoop.fs.StorageType storageType,
java.lang.String storageId,
org.apache.hadoop.hdfs.protocol.ExtendedBlock b,
boolean allowLazyPersist,
long newGS) |
Creates a RBW replica and returns the meta info of the replica
|
ReplicaHandler |
createTemporary(org.apache.hadoop.fs.StorageType storageType,
java.lang.String storageId,
org.apache.hadoop.hdfs.protocol.ExtendedBlock b,
boolean isTransfer) |
Creates a temporary replica and returns the meta information of the replica
.
|
java.util.Set<? extends Replica> |
deepCopyReplica(java.lang.String bpid) |
Deep copy the replica info belonging to given block pool.
|
void |
deleteBlockPool(java.lang.String bpid,
boolean force) |
Deletes the block pool directories.
|
void |
enableTrash(java.lang.String bpid) |
Enable 'trash' for the given dataset.
|
void |
finalizeBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock b,
boolean fsyncDir) |
Finalizes the block previously opened for writing using writeToBlock.
|
java.io.InputStream |
getBlockInputStream(org.apache.hadoop.hdfs.protocol.ExtendedBlock b,
long seekOffset) |
Returns an input stream at specified offset of the specified block.
|
org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo |
getBlockLocalPathInfo(org.apache.hadoop.hdfs.protocol.ExtendedBlock b) |
Get
BlockLocalPathInfo for the given block. |
java.util.Map<org.apache.hadoop.hdfs.server.protocol.DatanodeStorage,BlockListAsLongs> |
getBlockReports(java.lang.String bpid) |
Returns one block report per volume.
|
java.util.List<java.lang.Long> |
getCacheReport(java.lang.String bpid) |
Returns the cache report - the full list of cached block IDs of a
block pool.
|
java.util.List<ReplicaInfo> |
getFinalizedBlocks(java.lang.String bpid) |
Gets a list of references to the finalized blocks for the given block pool.
|
FsDatasetSpi.FsVolumeReferences |
getFsVolumeReferences() |
Returns a list of FsVolumes that hold reference counts.
|
long |
getLength(org.apache.hadoop.hdfs.protocol.ExtendedBlock b) |
Returns the specified block's on-disk length (excluding metadata).
|
LengthInputStream |
getMetaDataInputStream(org.apache.hadoop.hdfs.protocol.ExtendedBlock b) |
|
MountVolumeMap |
getMountVolumeMap() |
Get relationship between disk mount and FsVolume.
|
boolean |
getPinning(org.apache.hadoop.hdfs.protocol.ExtendedBlock block) |
Check whether the block was pinned
|
Replica |
getReplica(java.lang.String bpid,
long blockId) |
Deprecated.
|
java.lang.String |
getReplicaString(java.lang.String bpid,
long blockId) |
|
long |
getReplicaVisibleLength(org.apache.hadoop.hdfs.protocol.ExtendedBlock block) |
Get visible length of the specified replica.
|
org.apache.hadoop.hdfs.server.protocol.DatanodeStorage |
getStorage(java.lang.String storageUuid) |
|
org.apache.hadoop.hdfs.server.protocol.StorageReport[] |
getStorageReports(java.lang.String bpid) |
|
org.apache.hadoop.hdfs.protocol.Block |
getStoredBlock(java.lang.String bpid,
long blkid) |
|
ReplicaInputStreams |
getTmpInputStreams(org.apache.hadoop.hdfs.protocol.ExtendedBlock b,
long blkoff,
long ckoff) |
Returns an input stream at specified offset of the specified block.
|
V |
getVolume(org.apache.hadoop.hdfs.protocol.ExtendedBlock b) |
|
VolumeFailureSummary |
getVolumeFailureSummary() |
Returns info about volume failures.
|
java.util.Map<java.lang.String,java.lang.Object> |
getVolumeInfoMap() |
|
java.util.List<FsVolumeImpl> |
getVolumeList() |
Get the volume list.
|
void |
handleVolumeFailures(java.util.Set<FsVolumeSpi> failedVolumes) |
Check if all the data directories are healthy
|
boolean |
hasEnoughResource() |
Checks how many valid storage volumes there are in the DataNode.
|
ReplicaRecoveryInfo |
initReplicaRecovery(BlockRecoveryCommand.RecoveringBlock rBlock) |
Initialize a replica recovery.
|
void |
invalidate(java.lang.String bpid,
org.apache.hadoop.hdfs.protocol.Block[] invalidBlks) |
Invalidates the specified blocks.
|
void |
invalidateMissingBlock(java.lang.String bpid,
org.apache.hadoop.hdfs.protocol.Block block) |
Invalidate a block which is not found on disk.
|
boolean |
isCached(java.lang.String bpid,
long blockId) |
Determine if the specified block is cached.
|
boolean |
isDeletingBlock(java.lang.String bpid,
long blockId) |
Confirm whether the block is deleting
|
boolean |
isValidBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock b) |
Is the block valid?
|
boolean |
isValidRbw(org.apache.hadoop.hdfs.protocol.ExtendedBlock b) |
Is the block a valid RBW?
|
ReplicaInfo |
moveBlockAcrossStorage(org.apache.hadoop.hdfs.protocol.ExtendedBlock block,
org.apache.hadoop.fs.StorageType targetStorageType,
java.lang.String storageId) |
Move block from one storage to another storage
|
ReplicaInfo |
moveBlockAcrossVolumes(org.apache.hadoop.hdfs.protocol.ExtendedBlock block,
FsVolumeSpi destination) |
Moves a given block from one volume to another volume.
|
void |
onCompleteLazyPersist(java.lang.String bpId,
long blockId,
long creationTime,
java.io.File[] savedFiles,
V targetVolume) |
Callback from RamDiskAsyncLazyPersistService upon async lazy persist task end
|
void |
onFailLazyPersist(java.lang.String bpId,
long blockId) |
Callback from RamDiskAsyncLazyPersistService upon async lazy persist task fail
|
ReplicaHandler |
recoverAppend(org.apache.hadoop.hdfs.protocol.ExtendedBlock b,
long newGS,
long expectedBlockLen) |
Recover a failed append to a finalized replica and returns the meta
info of the replica.
|
Replica |
recoverClose(org.apache.hadoop.hdfs.protocol.ExtendedBlock b,
long newGS,
long expectedBlockLen) |
Recover a failed pipeline close.
|
ReplicaHandler |
recoverRbw(org.apache.hadoop.hdfs.protocol.ExtendedBlock b,
long newGS,
long minBytesRcvd,
long maxBytesRcvd) |
Recovers a RBW replica and returns the meta info of the replica.
|
void |
removeVolumes(java.util.Collection<StorageLocation> volumes,
boolean clearFailure) |
Removes a collection of volumes from FsDataset.
|
default void |
setLastDirScannerFinishTime(long time) |
Set the last time in milliseconds when the directory scanner successfully ran.
|
void |
setPinning(org.apache.hadoop.hdfs.protocol.ExtendedBlock block) |
Set a block to be pinned on this datanode so that it cannot be moved
by Balancer/Mover.
|
void |
setRollingUpgradeMarker(java.lang.String bpid) |
Create a marker file indicating that a rolling upgrade is in progress.
|
void |
shutdown() |
Shutdown the FSDataset
|
void |
shutdownBlockPool(java.lang.String bpid) |
Shutdown and remove the block pool from underlying storage.
|
void |
submitBackgroundSyncFileRangeRequest(org.apache.hadoop.hdfs.protocol.ExtendedBlock block,
ReplicaOutputStreams outs,
long offset,
long nbytes,
int flags) |
submit a sync_file_range request to AsyncDiskService.
|
boolean |
trashEnabled(java.lang.String bpid) |
|
void |
uncache(java.lang.String bpid,
long[] blockIds) |
Uncaches the specified blocks
|
void |
unfinalizeBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock b) |
Unfinalizes the block previously opened for writing using writeToBlock.
|
Replica |
updateReplicaUnderRecovery(org.apache.hadoop.hdfs.protocol.ExtendedBlock oldBlock,
long recoveryId,
long newBlockId,
long newLength) |
Update replica's generation stamp and length and finalize it.
|
getBlockPoolUsed, getCacheCapacity, getCacheUsed, getCapacity, getDfsUsed, getEstimatedCapacityLostTotal, getFailedStorageLocations, getLastDirScannerFinishTime, getLastVolumeFailureDate, getNumBlocksCached, getNumBlocksFailedToCache, getNumBlocksFailedToUncache, getNumFailedVolumes, getPendingAsyncDeletions, getRemaining, getStorageInfoFsDatasetSpi.FsVolumeReferences getFsVolumeReferences()
FsDatasetSpi.FsVolumeReferences.close().void addVolume(StorageLocation location, java.util.List<NamespaceInfo> nsInfos) throws java.io.IOException
location - The storage location for the new volume.nsInfos - Namespace information for the new volume.java.io.IOExceptionvoid removeVolumes(java.util.Collection<StorageLocation> volumes, boolean clearFailure)
volumes - The paths of the volumes to be removed.clearFailure - set true to clear the failure information about the
volumes.org.apache.hadoop.hdfs.server.protocol.DatanodeStorage getStorage(java.lang.String storageUuid)
org.apache.hadoop.hdfs.server.protocol.StorageReport[] getStorageReports(java.lang.String bpid)
throws java.io.IOException
java.io.IOExceptionV getVolume(org.apache.hadoop.hdfs.protocol.ExtendedBlock b)
java.util.Map<java.lang.String,java.lang.Object> getVolumeInfoMap()
VolumeFailureSummary getVolumeFailureSummary()
java.util.List<ReplicaInfo> getFinalizedBlocks(java.lang.String bpid)
Callers of this function should call
acquireDatasetLockManager() to avoid blocks' status being
changed during list iteration.
void checkAndUpdate(java.lang.String bpid,
FsVolumeSpi.ScanInfo info)
throws java.io.IOException
java.io.IOExceptionLengthInputStream getMetaDataInputStream(org.apache.hadoop.hdfs.protocol.ExtendedBlock b) throws java.io.IOException
b - - the blockjava.io.IOExceptionlong getLength(org.apache.hadoop.hdfs.protocol.ExtendedBlock b)
throws java.io.IOException
java.io.IOException - on error@Deprecated Replica getReplica(java.lang.String bpid, long blockId)
FsDatasetSpijava.lang.String getReplicaString(java.lang.String bpid,
long blockId)
org.apache.hadoop.hdfs.protocol.Block getStoredBlock(java.lang.String bpid,
long blkid)
throws java.io.IOException
java.io.IOExceptionjava.io.InputStream getBlockInputStream(org.apache.hadoop.hdfs.protocol.ExtendedBlock b,
long seekOffset)
throws java.io.IOException
b - blockseekOffset - offset with in the block to seek tojava.io.IOExceptionReplicaInputStreams getTmpInputStreams(org.apache.hadoop.hdfs.protocol.ExtendedBlock b, long blkoff, long ckoff) throws java.io.IOException
java.io.IOExceptionReplicaHandler createTemporary(org.apache.hadoop.fs.StorageType storageType, java.lang.String storageId, org.apache.hadoop.hdfs.protocol.ExtendedBlock b, boolean isTransfer) throws java.io.IOException
b - blockjava.io.IOException - if an error occursReplicaHandler createRbw(org.apache.hadoop.fs.StorageType storageType, java.lang.String storageId, org.apache.hadoop.hdfs.protocol.ExtendedBlock b, boolean allowLazyPersist) throws java.io.IOException
b - blockjava.io.IOException - if an error occursReplicaHandler createRbw(org.apache.hadoop.fs.StorageType storageType, java.lang.String storageId, org.apache.hadoop.hdfs.protocol.ExtendedBlock b, boolean allowLazyPersist, long newGS) throws java.io.IOException
b - blockjava.io.IOException - if an error occursReplicaHandler recoverRbw(org.apache.hadoop.hdfs.protocol.ExtendedBlock b, long newGS, long minBytesRcvd, long maxBytesRcvd) throws java.io.IOException
b - blocknewGS - the new generation stamp for the replicaminBytesRcvd - the minimum number of bytes that the replica could havemaxBytesRcvd - the maximum number of bytes that the replica could havejava.io.IOException - if an error occursReplicaInPipeline convertTemporaryToRbw(org.apache.hadoop.hdfs.protocol.ExtendedBlock temporary) throws java.io.IOException
temporary - the temporary replica being convertedjava.io.IOExceptionReplicaHandler append(org.apache.hadoop.hdfs.protocol.ExtendedBlock b, long newGS, long expectedBlockLen) throws java.io.IOException
b - blocknewGS - the new generation stamp for the replicaexpectedBlockLen - the number of bytes the replica is expected to havejava.io.IOExceptionReplicaHandler recoverAppend(org.apache.hadoop.hdfs.protocol.ExtendedBlock b, long newGS, long expectedBlockLen) throws java.io.IOException
b - blocknewGS - the new generation stamp for the replicaexpectedBlockLen - the number of bytes the replica is expected to havejava.io.IOExceptionReplica recoverClose(org.apache.hadoop.hdfs.protocol.ExtendedBlock b, long newGS, long expectedBlockLen) throws java.io.IOException
b - blocknewGS - the new generation stamp for the replicaexpectedBlockLen - the number of bytes the replica is expected to havejava.io.IOExceptionvoid finalizeBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock b,
boolean fsyncDir)
throws java.io.IOException
b - Block to be finalizedfsyncDir - whether to sync the directory changes to durable device.java.io.IOExceptionorg.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException - if the replica can not be found when the
block is been finalized. For instance, the block resides on an HDFS volume
that has been removed.void unfinalizeBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock b)
throws java.io.IOException
java.io.IOExceptionjava.util.Map<org.apache.hadoop.hdfs.server.protocol.DatanodeStorage,BlockListAsLongs> getBlockReports(java.lang.String bpid)
bpid - Block Pool Idjava.util.List<java.lang.Long> getCacheReport(java.lang.String bpid)
bpid - Block Pool Idboolean contains(org.apache.hadoop.hdfs.protocol.ExtendedBlock block)
void checkBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock b,
long minLength,
HdfsServerConstants.ReplicaState state)
throws org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException,
UnexpectedReplicaStateException,
java.io.FileNotFoundException,
java.io.EOFException,
java.io.IOException
b - The block to check.minLength - The minimum length that the block must have. May be 0.state - If this is null, it is ignored. If it is non-null, we
will check that the replica has this state.org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException - If the replica is not foundUnexpectedReplicaStateException - If the replica is not in the
expected state.java.io.FileNotFoundException - If the block file is not found or there
was an error locating it.java.io.EOFException - If the replica length is too short.java.io.IOException - May be thrown from the methods called.boolean isValidBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock b)
boolean isValidRbw(org.apache.hadoop.hdfs.protocol.ExtendedBlock b)
void invalidate(java.lang.String bpid,
org.apache.hadoop.hdfs.protocol.Block[] invalidBlks)
throws java.io.IOException
bpid - Block pool IdinvalidBlks - - the blocks to be invalidatedjava.io.IOExceptionvoid invalidateMissingBlock(java.lang.String bpid,
org.apache.hadoop.hdfs.protocol.Block block)
throws java.io.IOException
bpid - the block pool ID.block - The block to be invalidated.java.io.IOExceptionvoid cache(java.lang.String bpid,
long[] blockIds)
bpid - Block pool idblockIds - - block ids to cachevoid uncache(java.lang.String bpid,
long[] blockIds)
bpid - Block pool idblockIds - - blocks ids to uncacheboolean isCached(java.lang.String bpid,
long blockId)
bpid - Block pool idblockId - - block idvoid handleVolumeFailures(java.util.Set<FsVolumeSpi> failedVolumes)
failedVolumes - void shutdown()
void adjustCrcChannelPosition(org.apache.hadoop.hdfs.protocol.ExtendedBlock b,
ReplicaOutputStreams outs,
int checksumSize)
throws java.io.IOException
b - blockouts - The streams for the data file and checksum filechecksumSize - number of bytes each checksum hasjava.io.IOExceptionboolean hasEnoughResource()
long getReplicaVisibleLength(org.apache.hadoop.hdfs.protocol.ExtendedBlock block)
throws java.io.IOException
java.io.IOExceptionReplicaRecoveryInfo initReplicaRecovery(BlockRecoveryCommand.RecoveringBlock rBlock) throws java.io.IOException
java.io.IOExceptionReplica updateReplicaUnderRecovery(org.apache.hadoop.hdfs.protocol.ExtendedBlock oldBlock, long recoveryId, long newBlockId, long newLength) throws java.io.IOException
java.io.IOExceptionvoid addBlockPool(java.lang.String bpid,
org.apache.hadoop.conf.Configuration conf)
throws java.io.IOException
bpid - Block pool Idconf - Configurationjava.io.IOExceptionvoid shutdownBlockPool(java.lang.String bpid)
bpid - Block pool Id to be removedvoid deleteBlockPool(java.lang.String bpid,
boolean force)
throws java.io.IOException
bpid - BlockPool Id to be deleted.force - If force is false, directories are deleted only if no
block files exist for the block pool, otherwise entire
directory for the blockpool is deleted along with its contents.java.io.IOExceptionorg.apache.hadoop.hdfs.protocol.BlockLocalPathInfo getBlockLocalPathInfo(org.apache.hadoop.hdfs.protocol.ExtendedBlock b)
throws java.io.IOException
BlockLocalPathInfo for the given block.java.io.IOExceptionvoid enableTrash(java.lang.String bpid)
void clearTrash(java.lang.String bpid)
boolean trashEnabled(java.lang.String bpid)
void setRollingUpgradeMarker(java.lang.String bpid)
throws java.io.IOException
java.io.IOExceptionvoid clearRollingUpgradeMarker(java.lang.String bpid)
throws java.io.IOException
bpid - java.io.IOExceptionvoid submitBackgroundSyncFileRangeRequest(org.apache.hadoop.hdfs.protocol.ExtendedBlock block,
ReplicaOutputStreams outs,
long offset,
long nbytes,
int flags)
void onCompleteLazyPersist(java.lang.String bpId,
long blockId,
long creationTime,
java.io.File[] savedFiles,
V targetVolume)
void onFailLazyPersist(java.lang.String bpId,
long blockId)
ReplicaInfo moveBlockAcrossStorage(org.apache.hadoop.hdfs.protocol.ExtendedBlock block, org.apache.hadoop.fs.StorageType targetStorageType, java.lang.String storageId) throws java.io.IOException
java.io.IOExceptionvoid setPinning(org.apache.hadoop.hdfs.protocol.ExtendedBlock block)
throws java.io.IOException
java.io.IOExceptionboolean getPinning(org.apache.hadoop.hdfs.protocol.ExtendedBlock block)
throws java.io.IOException
java.io.IOExceptionboolean isDeletingBlock(java.lang.String bpid,
long blockId)
ReplicaInfo moveBlockAcrossVolumes(org.apache.hadoop.hdfs.protocol.ExtendedBlock block, FsVolumeSpi destination) throws java.io.IOException
block - - ExtendedBlockdestination - - Destination volumejava.io.IOExceptionDataNodeLockManager<? extends AutoCloseDataSetLock> acquireDatasetLockManager()
java.util.Set<? extends Replica> deepCopyReplica(java.lang.String bpid) throws java.io.IOException
bpid - Specified block pool id.java.io.IOExceptionMountVolumeMap getMountVolumeMap() throws java.io.IOException
java.io.IOExceptionjava.util.List<FsVolumeImpl> getVolumeList()
default void setLastDirScannerFinishTime(long time)
time - the last time in milliseconds when the directory scanner successfully ran.Copyright © 2008–2025 Apache Software Foundation. All rights reserved.