Checkable<FsVolumeSpi.VolumeCheckContext,VolumeCheckResult>, FsVolumeSpi@Private @VisibleForTesting public class FsVolumeImpl extends java.lang.Object implements FsVolumeSpi
FsDatasetImpl object for synchronization.| Modifier and Type | Class | Description |
|---|---|---|
static class |
FsVolumeImpl.BlockDirFilter |
Filter for block file names stored on the file system volumes.
|
FsVolumeSpi.BlockIterator, FsVolumeSpi.ScanInfo, FsVolumeSpi.VolumeCheckContext| Modifier and Type | Field | Description |
|---|---|---|
protected java.util.concurrent.ThreadPoolExecutor |
cacheExecutor |
Per-volume worker pool that processes new blocks to cache.
|
protected long |
configuredCapacity |
|
static org.slf4j.Logger |
LOG |
| Modifier and Type | Method | Description |
|---|---|---|
ReplicaInfo |
activateSavedReplica(java.lang.String bpid,
ReplicaInfo replicaInfo,
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaTracker.RamDiskReplica replicaState) |
|
ReplicaInPipeline |
append(java.lang.String bpid,
ReplicaInfo replicaInfo,
long newGS,
long estimateBlockLen) |
|
VolumeCheckResult |
check(FsVolumeSpi.VolumeCheckContext ignored) |
Query the health of this object.
|
void |
compileReport(java.lang.String bpid,
java.util.Collection<FsVolumeSpi.ScanInfo> report,
DirectoryScanner.ReportCompiler reportCompiler) |
Compile a list of
FsVolumeSpi.ScanInfo for the blocks in
the block pool with id bpid. |
ReplicaInPipeline |
convertTemporaryToRbw(org.apache.hadoop.hdfs.protocol.ExtendedBlock b,
ReplicaInfo temp) |
|
java.io.File[] |
copyBlockToLazyPersistLocation(java.lang.String bpId,
long blockId,
long genStamp,
ReplicaInfo replicaInfo,
int smallBufferSize,
org.apache.hadoop.conf.Configuration conf) |
|
ReplicaInPipeline |
createRbw(org.apache.hadoop.hdfs.protocol.ExtendedBlock b) |
|
ReplicaInPipeline |
createTemporary(org.apache.hadoop.hdfs.protocol.ExtendedBlock b) |
|
long |
getAvailable() |
Calculate the available space of the filesystem, excluding space reserved
for non-HDFS and space reserved for RBW.
|
java.net.URI |
getBaseURI() |
|
java.lang.String[] |
getBlockPoolList() |
Make a deep copy of the list of currently active BPIDs.
|
java.util.Map<java.lang.String,BlockPoolSlice> |
getBlockPoolSlices() |
|
long |
getCapacity() |
Return either the configured capacity of the file system if configured; or
the capacity of the file system excluding space reserved for non-HDFS.
|
java.io.File |
getCurrentDir() |
|
FsDatasetSpi<? extends FsVolumeSpi> |
getDataset() |
Get the FSDatasetSpi which this volume is a part of.
|
long |
getDfsUsed() |
|
long |
getDfUsed() |
This function is only used for Mock.
|
FileIoProvider |
getFileIoProvider() |
|
java.io.File |
getFinalizedDir(java.lang.String bpid) |
|
protected java.io.File |
getLazyPersistDir(java.lang.String bpid) |
|
DataNodeVolumeMetrics |
getMetrics() |
|
long |
getNonDfsUsed() |
Unplanned Non-DFS usage, i.e.
|
protected java.io.File |
getRbwDir(java.lang.String bpid) |
|
int |
getReferenceCount() |
|
long |
getReservedForReplicas() |
|
java.lang.String |
getStorageID() |
|
StorageLocation |
getStorageLocation() |
|
org.apache.hadoop.fs.StorageType |
getStorageType() |
|
protected java.io.File |
getTmpDir(java.lang.String bpid) |
|
org.apache.hadoop.fs.DF |
getUsageStats(org.apache.hadoop.conf.Configuration conf) |
|
ReplicaInfo |
hardLinkBlockToTmpLocation(org.apache.hadoop.hdfs.protocol.ExtendedBlock block,
ReplicaInfo replicaInfo) |
|
void |
incrNumBlocks(java.lang.String bpid) |
|
protected java.util.concurrent.ThreadPoolExecutor |
initializeCacheExecutor(java.io.File parent) |
|
boolean |
isRAMStorage() |
Returns true if the volume is backed by RAM storage.
|
boolean |
isTransientStorage() |
Returns true if the volume is NOT backed by persistent storage.
|
FsVolumeSpi.BlockIterator |
loadBlockIterator(java.lang.String bpid,
java.lang.String name) |
Load a saved block iterator.
|
byte[] |
loadLastPartialChunkChecksum(java.io.File blockFile,
java.io.File metaFile) |
Load last partial chunk checksum from checksum file.
|
ReplicaInfo |
moveBlockToTmpLocation(org.apache.hadoop.hdfs.protocol.ExtendedBlock block,
ReplicaInfo replicaInfo,
int smallBufferSize,
org.apache.hadoop.conf.Configuration conf) |
|
FsVolumeSpi.BlockIterator |
newBlockIterator(java.lang.String bpid,
java.lang.String name) |
Create a new block iterator.
|
static java.lang.String |
nextSorted(java.util.List<java.lang.String> arr,
java.lang.String prev) |
|
FsVolumeReference |
obtainReference() |
Obtain a reference object that had increased 1 reference count of the
volume.
|
void |
releaseLockedMemory(long bytesToRelease) |
Release reserved memory for an RBW block written to transient storage
i.e.
|
void |
releaseReservedSpace(long bytesToRelease) |
Release disk space previously reserved for block opened for write.
|
void |
reserveSpaceForReplica(long bytesToReserve) |
Reserve disk space for a block (RBW or Re-replicating)
so a writer does not run out of space before the block is full.
|
void |
resolveDuplicateReplicas(java.lang.String bpid,
ReplicaInfo memBlockInfo,
ReplicaInfo diskBlockInfo,
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReplicaMap volumeMap) |
|
void |
setCapacityForTesting(long capacity) |
This function MUST NOT be used outside of tests.
|
java.lang.String |
toString() |
|
ReplicaInPipeline |
updateRURCopyOnTruncate(ReplicaInfo rur,
java.lang.String bpid,
long newBlockId,
long recoveryId,
long newlength) |
public static final org.slf4j.Logger LOG
protected volatile long configuredCapacity
protected java.util.concurrent.ThreadPoolExecutor cacheExecutor
protected java.util.concurrent.ThreadPoolExecutor initializeCacheExecutor(java.io.File parent)
public FsVolumeReference obtainReference() throws java.nio.channels.ClosedChannelException
FsVolumeSpiFsVolumeReference to decrease
the reference count on the volume.obtainReference in interface FsVolumeSpijava.nio.channels.ClosedChannelException@VisibleForTesting public int getReferenceCount()
@VisibleForTesting public java.io.File getCurrentDir()
protected java.io.File getRbwDir(java.lang.String bpid)
throws java.io.IOException
java.io.IOExceptionprotected java.io.File getLazyPersistDir(java.lang.String bpid)
throws java.io.IOException
java.io.IOExceptionprotected java.io.File getTmpDir(java.lang.String bpid)
throws java.io.IOException
java.io.IOException@VisibleForTesting
public long getDfsUsed()
throws java.io.IOException
java.io.IOException@VisibleForTesting public long getCapacity()
@VisibleForTesting public void setCapacityForTesting(long capacity)
capacity - public long getAvailable()
throws java.io.IOException
getAvailable in interface FsVolumeSpijava.io.IOException@VisibleForTesting public long getDfUsed()
public long getNonDfsUsed()
throws java.io.IOException
java.io.IOException@VisibleForTesting public long getReservedForReplicas()
public java.util.Map<java.lang.String,BlockPoolSlice> getBlockPoolSlices()
public java.net.URI getBaseURI()
getBaseURI in interface FsVolumeSpipublic org.apache.hadoop.fs.DF getUsageStats(org.apache.hadoop.conf.Configuration conf)
getUsageStats in interface FsVolumeSpipublic StorageLocation getStorageLocation()
getStorageLocation in interface FsVolumeSpiStorageLocation to the volumepublic boolean isTransientStorage()
FsVolumeSpiisTransientStorage in interface FsVolumeSpipublic boolean isRAMStorage()
FsVolumeSpiisRAMStorage in interface FsVolumeSpi@VisibleForTesting
public java.io.File getFinalizedDir(java.lang.String bpid)
throws java.io.IOException
java.io.IOExceptionpublic java.lang.String[] getBlockPoolList()
getBlockPoolList in interface FsVolumeSpipublic void reserveSpaceForReplica(long bytesToReserve)
FsVolumeSpireserveSpaceForReplica in interface FsVolumeSpipublic void releaseReservedSpace(long bytesToRelease)
FsVolumeSpireleaseReservedSpace in interface FsVolumeSpipublic void releaseLockedMemory(long bytesToRelease)
FsVolumeSpireleaseLockedMemory in interface FsVolumeSpi@VisibleForTesting
public static java.lang.String nextSorted(java.util.List<java.lang.String> arr,
java.lang.String prev)
public FsVolumeSpi.BlockIterator newBlockIterator(java.lang.String bpid, java.lang.String name)
FsVolumeSpinewBlockIterator in interface FsVolumeSpibpid - The block pool id to iterate over.name - The name of the block iterator to create.public FsVolumeSpi.BlockIterator loadBlockIterator(java.lang.String bpid, java.lang.String name) throws java.io.IOException
FsVolumeSpiloadBlockIterator in interface FsVolumeSpibpid - The block pool id to iterate over.name - The name of the block iterator to load.java.io.IOException - If there was an IO error loading the saved
block iterator.public FsDatasetSpi<? extends FsVolumeSpi> getDataset()
FsVolumeSpigetDataset in interface FsVolumeSpipublic VolumeCheckResult check(FsVolumeSpi.VolumeCheckContext ignored) throws org.apache.hadoop.util.DiskChecker.DiskErrorException
Checkablecheck in interface Checkable<FsVolumeSpi.VolumeCheckContext,VolumeCheckResult>ignored - for the probe operation. May be null depending
on the implementation.org.apache.hadoop.util.DiskChecker.DiskErrorExceptionpublic java.lang.String toString()
toString in class java.lang.Objectpublic java.lang.String getStorageID()
getStorageID in interface FsVolumeSpipublic org.apache.hadoop.fs.StorageType getStorageType()
getStorageType in interface FsVolumeSpiStorageType of the volumepublic byte[] loadLastPartialChunkChecksum(java.io.File blockFile,
java.io.File metaFile)
throws java.io.IOException
FsVolumeSpiloadLastPartialChunkChecksum in interface FsVolumeSpijava.io.IOExceptionpublic ReplicaInPipeline append(java.lang.String bpid, ReplicaInfo replicaInfo, long newGS, long estimateBlockLen) throws java.io.IOException
java.io.IOExceptionpublic ReplicaInPipeline createRbw(org.apache.hadoop.hdfs.protocol.ExtendedBlock b) throws java.io.IOException
java.io.IOExceptionpublic ReplicaInPipeline convertTemporaryToRbw(org.apache.hadoop.hdfs.protocol.ExtendedBlock b, ReplicaInfo temp) throws java.io.IOException
java.io.IOExceptionpublic ReplicaInPipeline createTemporary(org.apache.hadoop.hdfs.protocol.ExtendedBlock b) throws java.io.IOException
java.io.IOExceptionpublic ReplicaInPipeline updateRURCopyOnTruncate(ReplicaInfo rur, java.lang.String bpid, long newBlockId, long recoveryId, long newlength) throws java.io.IOException
java.io.IOExceptionpublic void compileReport(java.lang.String bpid,
java.util.Collection<FsVolumeSpi.ScanInfo> report,
DirectoryScanner.ReportCompiler reportCompiler)
throws java.lang.InterruptedException,
java.io.IOException
FsVolumeSpiFsVolumeSpi.ScanInfo for the blocks in
the block pool with id bpid.compileReport in interface FsVolumeSpibpid - block pool id to scanreport - the list onto which blocks reports are placedjava.lang.InterruptedExceptionjava.io.IOExceptionpublic FileIoProvider getFileIoProvider()
getFileIoProvider in interface FsVolumeSpipublic DataNodeVolumeMetrics getMetrics()
getMetrics in interface FsVolumeSpipublic ReplicaInfo moveBlockToTmpLocation(org.apache.hadoop.hdfs.protocol.ExtendedBlock block, ReplicaInfo replicaInfo, int smallBufferSize, org.apache.hadoop.conf.Configuration conf) throws java.io.IOException
java.io.IOExceptionpublic ReplicaInfo hardLinkBlockToTmpLocation(org.apache.hadoop.hdfs.protocol.ExtendedBlock block, ReplicaInfo replicaInfo) throws java.io.IOException
java.io.IOExceptionpublic java.io.File[] copyBlockToLazyPersistLocation(java.lang.String bpId,
long blockId,
long genStamp,
ReplicaInfo replicaInfo,
int smallBufferSize,
org.apache.hadoop.conf.Configuration conf)
throws java.io.IOException
java.io.IOExceptionpublic void incrNumBlocks(java.lang.String bpid)
throws java.io.IOException
java.io.IOExceptionpublic void resolveDuplicateReplicas(java.lang.String bpid,
ReplicaInfo memBlockInfo,
ReplicaInfo diskBlockInfo,
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReplicaMap volumeMap)
throws java.io.IOException
java.io.IOExceptionpublic ReplicaInfo activateSavedReplica(java.lang.String bpid, ReplicaInfo replicaInfo, org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaTracker.RamDiskReplica replicaState) throws java.io.IOException
java.io.IOExceptionCopyright © 2008–2025 Apache Software Foundation. All rights reserved.