java.io.Closeable, java.lang.AutoCloseable, org.apache.hadoop.conf.Configurable, org.apache.hadoop.crypto.key.KeyProviderTokenIssuer, org.apache.hadoop.fs.BatchListingOperations, org.apache.hadoop.fs.BulkDeleteSource, org.apache.hadoop.fs.LeaseRecoverable, org.apache.hadoop.fs.PathCapabilities, org.apache.hadoop.fs.SafeMode, org.apache.hadoop.security.token.DelegationTokenIssuerViewDistributedFileSystem@LimitedPrivate({"MapReduce","HBase"})
@Unstable
public class DistributedFileSystem
extends org.apache.hadoop.fs.FileSystem
implements org.apache.hadoop.crypto.key.KeyProviderTokenIssuer, org.apache.hadoop.fs.BatchListingOperations, org.apache.hadoop.fs.LeaseRecoverable, org.apache.hadoop.fs.SafeMode
| Modifier and Type | Class | Description |
|---|---|---|
static class |
DistributedFileSystem.HdfsDataOutputStreamBuilder |
HdfsDataOutputStreamBuilder provides the HDFS-specific capabilities to
write file on HDFS.
|
| Constructor | Description |
|---|---|
DistributedFileSystem() |
| Modifier and Type | Method | Description |
|---|---|---|
void |
access(org.apache.hadoop.fs.Path path,
org.apache.hadoop.fs.permission.FsAction mode) |
|
long |
addCacheDirective(CacheDirectiveInfo info) |
|
long |
addCacheDirective(CacheDirectiveInfo info,
java.util.EnumSet<CacheFlag> flags) |
Add a new CacheDirective.
|
void |
addCachePool(CachePoolInfo info) |
Add a cache pool.
|
AddErasureCodingPolicyResponse[] |
addErasureCodingPolicies(ErasureCodingPolicy[] policies) |
Add Erasure coding policies to HDFS.
|
void |
allowSnapshot(org.apache.hadoop.fs.Path path) |
|
org.apache.hadoop.fs.FSDataOutputStream |
append(org.apache.hadoop.fs.Path f,
int bufferSize,
org.apache.hadoop.util.Progressable progress) |
|
org.apache.hadoop.fs.FSDataOutputStream |
append(org.apache.hadoop.fs.Path f,
int bufferSize,
org.apache.hadoop.util.Progressable progress,
boolean appendToNewBlock) |
|
org.apache.hadoop.fs.FSDataOutputStream |
append(org.apache.hadoop.fs.Path f,
java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> flag,
int bufferSize,
org.apache.hadoop.util.Progressable progress) |
Append to an existing file (optional operation).
|
org.apache.hadoop.fs.FSDataOutputStream |
append(org.apache.hadoop.fs.Path f,
java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> flag,
int bufferSize,
org.apache.hadoop.util.Progressable progress,
java.net.InetSocketAddress[] favoredNodes) |
Append to an existing file (optional operation).
|
DistributedFileSystem.HdfsDataOutputStreamBuilder |
appendFile(org.apache.hadoop.fs.Path path) |
Create a
DistributedFileSystem.HdfsDataOutputStreamBuilder to append a file on DFS. |
org.apache.hadoop.fs.RemoteIterator<org.apache.hadoop.fs.PartialListing<org.apache.hadoop.fs.LocatedFileStatus>> |
batchedListLocatedStatusIterator(java.util.List<org.apache.hadoop.fs.Path> paths) |
|
org.apache.hadoop.fs.RemoteIterator<org.apache.hadoop.fs.PartialListing<org.apache.hadoop.fs.FileStatus>> |
batchedListStatusIterator(java.util.List<org.apache.hadoop.fs.Path> paths) |
|
protected java.net.URI |
canonicalizeUri(java.net.URI uri) |
|
void |
close() |
|
void |
concat(org.apache.hadoop.fs.Path trg,
org.apache.hadoop.fs.Path[] psrcs) |
Move blocks from srcs to trg and delete srcs afterwards.
|
org.apache.hadoop.fs.FSDataOutputStream |
create(org.apache.hadoop.fs.Path f,
org.apache.hadoop.fs.permission.FsPermission permission,
boolean overwrite,
int bufferSize,
short replication,
long blockSize,
org.apache.hadoop.util.Progressable progress) |
|
HdfsDataOutputStream |
create(org.apache.hadoop.fs.Path f,
org.apache.hadoop.fs.permission.FsPermission permission,
boolean overwrite,
int bufferSize,
short replication,
long blockSize,
org.apache.hadoop.util.Progressable progress,
java.net.InetSocketAddress[] favoredNodes) |
Same as
create(Path, FsPermission, boolean, int, short, long,
Progressable) with the addition of favoredNodes that is a hint to
where the namenode should place the file blocks. |
org.apache.hadoop.fs.FSDataOutputStream |
create(org.apache.hadoop.fs.Path f,
org.apache.hadoop.fs.permission.FsPermission permission,
java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> cflags,
int bufferSize,
short replication,
long blockSize,
org.apache.hadoop.util.Progressable progress,
org.apache.hadoop.fs.Options.ChecksumOpt checksumOpt) |
|
void |
createEncryptionZone(org.apache.hadoop.fs.Path path,
java.lang.String keyName) |
|
DistributedFileSystem.HdfsDataOutputStreamBuilder |
createFile(org.apache.hadoop.fs.Path path) |
Create a HdfsDataOutputStreamBuilder to create a file on DFS.
|
org.apache.hadoop.fs.MultipartUploaderBuilder |
createMultipartUploader(org.apache.hadoop.fs.Path basePath) |
|
org.apache.hadoop.fs.FSDataOutputStream |
createNonRecursive(org.apache.hadoop.fs.Path f,
org.apache.hadoop.fs.permission.FsPermission permission,
java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> flag,
int bufferSize,
short replication,
long blockSize,
org.apache.hadoop.util.Progressable progress) |
Same as create(), except fails if parent directory doesn't already exist.
|
protected HdfsPathHandle |
createPathHandle(org.apache.hadoop.fs.FileStatus st,
org.apache.hadoop.fs.Options.HandleOpt... opts) |
Create a handle to an HDFS file.
|
org.apache.hadoop.fs.Path |
createSnapshot(org.apache.hadoop.fs.Path path,
java.lang.String snapshotName) |
|
void |
createSymlink(org.apache.hadoop.fs.Path target,
org.apache.hadoop.fs.Path link,
boolean createParent) |
|
boolean |
delete(org.apache.hadoop.fs.Path f,
boolean recursive) |
|
void |
deleteSnapshot(org.apache.hadoop.fs.Path snapshotDir,
java.lang.String snapshotName) |
|
void |
disableErasureCodingPolicy(java.lang.String ecPolicyName) |
Disable erasure coding policy.
|
void |
disallowSnapshot(org.apache.hadoop.fs.Path path) |
|
void |
enableErasureCodingPolicy(java.lang.String ecPolicyName) |
Enable erasure coding policy.
|
void |
finalizeUpgrade() |
Finalize previously upgraded files system state.
|
protected org.apache.hadoop.fs.Path |
fixRelativePart(org.apache.hadoop.fs.Path p) |
|
org.apache.hadoop.fs.permission.AclStatus |
getAclStatus(org.apache.hadoop.fs.Path path) |
|
org.apache.hadoop.security.token.DelegationTokenIssuer[] |
getAdditionalTokenIssuers() |
|
java.util.Map<java.lang.String,java.lang.String> |
getAllErasureCodingCodecs() |
Retrieve all the erasure coding codecs and coders supported by this file
system.
|
java.util.Collection<ErasureCodingPolicyInfo> |
getAllErasureCodingPolicies() |
Retrieve all the erasure coding policies supported by this file system,
including enabled, disabled and removed policies, but excluding
REPLICATION policy.
|
java.util.Collection<BlockStoragePolicy> |
getAllStoragePolicies() |
|
long |
getBytesWithFutureGenerationStamps() |
Returns number of bytes within blocks with future generation stamp.
|
java.lang.String |
getCanonicalServiceName() |
Get a canonical service name for this file system.
|
DFSClient |
getClient() |
|
org.apache.hadoop.fs.ContentSummary |
getContentSummary(org.apache.hadoop.fs.Path f) |
|
long |
getCorruptBlocksCount() |
Returns count of blocks with at least one replica marked corrupt.
|
DatanodeInfo[] |
getDataNodeStats() |
|
DatanodeInfo[] |
getDataNodeStats(HdfsConstants.DatanodeReportType type) |
|
long |
getDefaultBlockSize() |
|
protected int |
getDefaultPort() |
|
short |
getDefaultReplication() |
|
org.apache.hadoop.security.token.Token<DelegationTokenIdentifier> |
getDelegationToken(java.lang.String renewer) |
|
ECTopologyVerifierResult |
getECTopologyResultForPolicies(java.lang.String... policyNames) |
Verifies if the given policies are supported in the given cluster setup.
|
org.apache.hadoop.fs.Path |
getEnclosingRoot(org.apache.hadoop.fs.Path path) |
Return path of the enclosing root for a given path
The enclosing root path is a common ancestor that should be used for temp and staging dirs
as well as within encryption zones and other restricted directories.
|
ErasureCodingPolicy |
getErasureCodingPolicy(org.apache.hadoop.fs.Path path) |
Get erasure coding policy information for the specified path.
|
EncryptionZone |
getEZForPath(org.apache.hadoop.fs.Path path) |
|
org.apache.hadoop.fs.BlockLocation[] |
getFileBlockLocations(org.apache.hadoop.fs.FileStatus file,
long start,
long len) |
|
org.apache.hadoop.fs.BlockLocation[] |
getFileBlockLocations(org.apache.hadoop.fs.Path p,
long start,
long len) |
The returned BlockLocation will have different formats for replicated
and erasure coded file.
|
org.apache.hadoop.fs.FileChecksum |
getFileChecksum(org.apache.hadoop.fs.Path f) |
|
org.apache.hadoop.fs.FileChecksum |
getFileChecksum(org.apache.hadoop.fs.Path f,
long length) |
|
org.apache.hadoop.fs.FileEncryptionInfo |
getFileEncryptionInfo(org.apache.hadoop.fs.Path path) |
|
org.apache.hadoop.fs.FileStatus |
getFileLinkStatus(org.apache.hadoop.fs.Path f) |
|
org.apache.hadoop.fs.FileStatus |
getFileStatus(org.apache.hadoop.fs.Path f) |
Returns the stat information about the file.
|
DFSHedgedReadMetrics |
getHedgedReadMetrics() |
Returns the hedged read metrics object for this client.
|
org.apache.hadoop.fs.Path |
getHomeDirectory() |
|
DFSInotifyEventInputStream |
getInotifyEventStream() |
|
DFSInotifyEventInputStream |
getInotifyEventStream(long lastReadTxid) |
|
org.apache.hadoop.crypto.key.KeyProvider |
getKeyProvider() |
|
java.net.URI |
getKeyProviderUri() |
|
org.apache.hadoop.fs.Path |
getLinkTarget(org.apache.hadoop.fs.Path f) |
|
LocatedBlocks |
getLocatedBlocks(org.apache.hadoop.fs.Path p,
long start,
long len) |
Returns LocatedBlocks of the corresponding HDFS file p from offset start
for length len.
|
long |
getLowRedundancyBlocksCount() |
Returns aggregated count of blocks with less redundancy.
|
long |
getMissingBlocksCount() |
Returns count of blocks with no good replicas left.
|
long |
getMissingReplOneBlocksCount() |
Returns count of blocks with replication factor 1 and have
lost the only replica.
|
long |
getPendingDeletionBlocksCount() |
Returns count of blocks pending on deletion.
|
org.apache.hadoop.fs.QuotaUsage |
getQuotaUsage(org.apache.hadoop.fs.Path f) |
|
java.lang.String |
getScheme() |
Return the protocol scheme for the FileSystem.
|
org.apache.hadoop.fs.FsServerDefaults |
getServerDefaults() |
|
DatanodeInfo[] |
getSlowDatanodeStats() |
Retrieve stats for slow running datanodes.
|
SnapshotDiffReport |
getSnapshotDiffReport(org.apache.hadoop.fs.Path snapshotDir,
java.lang.String fromSnapshot,
java.lang.String toSnapshot) |
Get the difference between two snapshots, or between a snapshot and the
current tree of a directory.
|
SnapshotDiffReportListing |
getSnapshotDiffReportListing(org.apache.hadoop.fs.Path snapshotDir,
java.lang.String fromSnapshotName,
java.lang.String toSnapshotName,
java.lang.String snapshotDiffStartPath,
int snapshotDiffIndex) |
Get the difference between two snapshots of a directory iteratively.
|
SnapshotStatus[] |
getSnapshotListing(org.apache.hadoop.fs.Path snapshotRoot) |
|
SnapshottableDirectoryStatus[] |
getSnapshottableDirListing() |
Get the list of snapshottable directories that are owned
by the current user.
|
org.apache.hadoop.fs.FsStatus |
getStatus(org.apache.hadoop.fs.Path p) |
|
BlockStoragePolicy[] |
getStoragePolicies() |
Deprecated.
|
org.apache.hadoop.fs.BlockStoragePolicySpi |
getStoragePolicy(org.apache.hadoop.fs.Path path) |
|
org.apache.hadoop.fs.Path |
getTrashRoot(org.apache.hadoop.fs.Path path) |
Get the root directory of Trash for a path in HDFS.
|
java.util.Collection<org.apache.hadoop.fs.FileStatus> |
getTrashRoots(boolean allUsers) |
Get all the trash roots of HDFS for current user or for all the users.
|
java.net.URI |
getUri() |
|
org.apache.hadoop.fs.Path |
getWorkingDirectory() |
|
byte[] |
getXAttr(org.apache.hadoop.fs.Path path,
java.lang.String name) |
|
java.util.Map<java.lang.String,byte[]> |
getXAttrs(org.apache.hadoop.fs.Path path) |
|
java.util.Map<java.lang.String,byte[]> |
getXAttrs(org.apache.hadoop.fs.Path path,
java.util.List<java.lang.String> names) |
|
boolean |
hasPathCapability(org.apache.hadoop.fs.Path path,
java.lang.String capability) |
HDFS client capabilities.
|
void |
initialize(java.net.URI uri,
org.apache.hadoop.conf.Configuration conf) |
|
boolean |
isFileClosed(org.apache.hadoop.fs.Path src) |
Get the close status of a file
|
boolean |
isInSafeMode() |
Utility function that returns if the NameNode is in safemode or not.
|
boolean |
isSnapshotTrashRootEnabled() |
HDFS only.
|
org.apache.hadoop.fs.RemoteIterator<CacheDirectiveEntry> |
listCacheDirectives(CacheDirectiveInfo filter) |
List cache directives.
|
org.apache.hadoop.fs.RemoteIterator<CachePoolEntry> |
listCachePools() |
List all cache pools.
|
org.apache.hadoop.fs.RemoteIterator<org.apache.hadoop.fs.Path> |
listCorruptFileBlocks(org.apache.hadoop.fs.Path path) |
|
org.apache.hadoop.fs.RemoteIterator<EncryptionZone> |
listEncryptionZones() |
|
protected org.apache.hadoop.fs.RemoteIterator<org.apache.hadoop.fs.LocatedFileStatus> |
listLocatedStatus(org.apache.hadoop.fs.Path p,
org.apache.hadoop.fs.PathFilter filter) |
The BlockLocation of returned LocatedFileStatus will have different
formats for replicated and erasure coded file.
|
org.apache.hadoop.fs.RemoteIterator<OpenFileEntry> |
listOpenFiles() |
Deprecated.
|
org.apache.hadoop.fs.RemoteIterator<OpenFileEntry> |
listOpenFiles(java.util.EnumSet<OpenFilesIterator.OpenFilesType> openFilesTypes) |
Deprecated.
|
org.apache.hadoop.fs.RemoteIterator<OpenFileEntry> |
listOpenFiles(java.util.EnumSet<OpenFilesIterator.OpenFilesType> openFilesTypes,
java.lang.String path) |
|
org.apache.hadoop.fs.RemoteIterator<ZoneReencryptionStatus> |
listReencryptionStatus() |
|
org.apache.hadoop.fs.FileStatus[] |
listStatus(org.apache.hadoop.fs.Path p) |
List all the entries of a directory
Note that this operation is not atomic for a large directory.
|
org.apache.hadoop.fs.RemoteIterator<org.apache.hadoop.fs.FileStatus> |
listStatusIterator(org.apache.hadoop.fs.Path p) |
Returns a remote iterator so that followup calls are made on demand
while consuming the entries.
|
java.util.List<java.lang.String> |
listXAttrs(org.apache.hadoop.fs.Path path) |
|
void |
metaSave(java.lang.String pathname) |
|
boolean |
mkdir(org.apache.hadoop.fs.Path f,
org.apache.hadoop.fs.permission.FsPermission permission) |
Create a directory, only when the parent directories exist.
|
boolean |
mkdirs(org.apache.hadoop.fs.Path f,
org.apache.hadoop.fs.permission.FsPermission permission) |
Create a directory and its parent directories.
|
void |
modifyAclEntries(org.apache.hadoop.fs.Path path,
java.util.List<org.apache.hadoop.fs.permission.AclEntry> aclSpec) |
|
void |
modifyCacheDirective(CacheDirectiveInfo info) |
|
void |
modifyCacheDirective(CacheDirectiveInfo info,
java.util.EnumSet<CacheFlag> flags) |
Modify a CacheDirective.
|
void |
modifyCachePool(CachePoolInfo info) |
Modify an existing cache pool.
|
void |
msync() |
Synchronize client metadata state with Active NameNode.
|
org.apache.hadoop.fs.FSDataInputStream |
open(org.apache.hadoop.fs.PathHandle fd,
int bufferSize) |
Opens an FSDataInputStream with the indicated file ID extracted from
the
PathHandle. |
org.apache.hadoop.fs.FSDataInputStream |
open(org.apache.hadoop.fs.Path f,
int bufferSize) |
|
protected HdfsDataOutputStream |
primitiveCreate(org.apache.hadoop.fs.Path f,
org.apache.hadoop.fs.permission.FsPermission absolutePermission,
java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> flag,
int bufferSize,
short replication,
long blockSize,
org.apache.hadoop.util.Progressable progress,
org.apache.hadoop.fs.Options.ChecksumOpt checksumOpt) |
|
protected boolean |
primitiveMkdir(org.apache.hadoop.fs.Path f,
org.apache.hadoop.fs.permission.FsPermission absolutePermission) |
|
void |
provisionEZTrash(org.apache.hadoop.fs.Path path,
org.apache.hadoop.fs.permission.FsPermission trashPermission) |
|
org.apache.hadoop.fs.Path |
provisionSnapshotTrash(org.apache.hadoop.fs.Path path,
org.apache.hadoop.fs.permission.FsPermission trashPermission) |
HDFS only.
|
boolean |
recoverLease(org.apache.hadoop.fs.Path f) |
Start the lease recovery of a file
|
void |
reencryptEncryptionZone(org.apache.hadoop.fs.Path zone,
HdfsConstants.ReencryptAction action) |
|
void |
refreshNodes() |
Refreshes the list of hosts and excluded hosts from the configured
files.
|
void |
removeAcl(org.apache.hadoop.fs.Path path) |
|
void |
removeAclEntries(org.apache.hadoop.fs.Path path,
java.util.List<org.apache.hadoop.fs.permission.AclEntry> aclSpec) |
|
void |
removeCacheDirective(long id) |
Remove a CacheDirectiveInfo.
|
void |
removeCachePool(java.lang.String poolName) |
Remove a cache pool.
|
void |
removeDefaultAcl(org.apache.hadoop.fs.Path path) |
|
void |
removeErasureCodingPolicy(java.lang.String ecPolicyName) |
Remove erasure coding policy.
|
void |
removeXAttr(org.apache.hadoop.fs.Path path,
java.lang.String name) |
|
boolean |
rename(org.apache.hadoop.fs.Path src,
org.apache.hadoop.fs.Path dst) |
|
void |
rename(org.apache.hadoop.fs.Path src,
org.apache.hadoop.fs.Path dst,
org.apache.hadoop.fs.Options.Rename... options) |
This rename operation is guaranteed to be atomic.
|
void |
renameSnapshot(org.apache.hadoop.fs.Path path,
java.lang.String snapshotOldName,
java.lang.String snapshotNewName) |
|
protected org.apache.hadoop.fs.Path |
resolveLink(org.apache.hadoop.fs.Path f) |
|
boolean |
restoreFailedStorage(java.lang.String arg) |
enable/disable/check restoreFaileStorage.
|
long |
rollEdits() |
Rolls the edit log on the active NameNode.
|
RollingUpgradeInfo |
rollingUpgrade(HdfsConstants.RollingUpgradeAction action) |
Rolling upgrade: prepare/finalize/query.
|
void |
satisfyStoragePolicy(org.apache.hadoop.fs.Path path) |
Set the source path to satisfy storage policy.
|
void |
saveNamespace() |
Save namespace image.
|
boolean |
saveNamespace(long timeWindow,
long txGap) |
Save namespace image.
|
void |
setAcl(org.apache.hadoop.fs.Path path,
java.util.List<org.apache.hadoop.fs.permission.AclEntry> aclSpec) |
|
void |
setBalancerBandwidth(long bandwidth) |
Requests the namenode to tell all datanodes to use a new, non-persistent
bandwidth value for dfs.datanode.balance.bandwidthPerSec.
|
void |
setErasureCodingPolicy(org.apache.hadoop.fs.Path path,
java.lang.String ecPolicyName) |
Set the source path to the specified erasure coding policy.
|
void |
setOwner(org.apache.hadoop.fs.Path p,
java.lang.String username,
java.lang.String groupname) |
|
void |
setPermission(org.apache.hadoop.fs.Path p,
org.apache.hadoop.fs.permission.FsPermission permission) |
|
void |
setQuota(org.apache.hadoop.fs.Path src,
long namespaceQuota,
long storagespaceQuota) |
Set a directory's quotas
|
void |
setQuotaByStorageType(org.apache.hadoop.fs.Path src,
org.apache.hadoop.fs.StorageType type,
long quota) |
Set the per type storage quota of a directory.
|
boolean |
setReplication(org.apache.hadoop.fs.Path src,
short replication) |
|
boolean |
setSafeMode(org.apache.hadoop.fs.SafeModeAction action) |
Enter, leave or get safe mode.
|
boolean |
setSafeMode(org.apache.hadoop.fs.SafeModeAction action,
boolean isChecked) |
Enter, leave or get safe mode.
|
boolean |
setSafeMode(HdfsConstants.SafeModeAction action) |
Deprecated.
please instead use
setSafeMode(SafeModeAction). |
boolean |
setSafeMode(HdfsConstants.SafeModeAction action,
boolean isChecked) |
Deprecated.
please instead use
setSafeMode(SafeModeAction, boolean). |
void |
setStoragePolicy(org.apache.hadoop.fs.Path src,
java.lang.String policyName) |
Set the source path to the specified storage policy.
|
void |
setTimes(org.apache.hadoop.fs.Path p,
long mtime,
long atime) |
|
void |
setVerifyChecksum(boolean verifyChecksum) |
|
void |
setWorkingDirectory(org.apache.hadoop.fs.Path dir) |
|
void |
setXAttr(org.apache.hadoop.fs.Path path,
java.lang.String name,
byte[] value,
java.util.EnumSet<org.apache.hadoop.fs.XAttrSetFlag> flag) |
|
org.apache.hadoop.fs.RemoteIterator<SnapshotDiffReportListing> |
snapshotDiffReportListingRemoteIterator(org.apache.hadoop.fs.Path snapshotDir,
java.lang.String fromSnapshot,
java.lang.String toSnapshot) |
Returns a remote iterator so that followup calls are made on demand
while consuming the SnapshotDiffReportListing entries.
|
boolean |
supportsSymlinks() |
|
java.lang.String |
toString() |
|
boolean |
truncate(org.apache.hadoop.fs.Path f,
long newLength) |
|
void |
unsetErasureCodingPolicy(org.apache.hadoop.fs.Path path) |
Unset the erasure coding policy from the source path.
|
void |
unsetStoragePolicy(org.apache.hadoop.fs.Path src) |
|
boolean |
upgradeStatus() |
Get status of upgrade - finalized or not.
|
append, append, append, areSymlinksEnabled, cancelDeleteOnExit, checkPath, clearStatistics, closeAll, closeAllForUGI, completeLocalOutput, copyFromLocalFile, copyFromLocalFile, copyFromLocalFile, copyFromLocalFile, copyToLocalFile, copyToLocalFile, copyToLocalFile, create, create, create, create, create, create, create, create, create, create, create, createBulkDelete, createDataInputStreamBuilder, createDataInputStreamBuilder, createDataOutputStreamBuilder, createFid, createNewFile, createNonRecursive, createNonRecursive, createPathId, createSnapshot, delete, deleteFid, deleteOnExit, enableSymlinks, exists, get, get, get, getAllStatistics, getBlockSize, getCanonicalUri, getChildFileSystems, getDefaultBlockSize, getDefaultReplication, getDefaultUri, getFileSystemClass, getFSofPath, getGlobalStorageStatistics, getInitialWorkingDirectory, getJobTrackerAddrs, getLength, getLocal, getName, getNamed, getPathHandle, getReplication, getServerDefaults, getStatistics, getStatistics, getStatus, getStorageStatistics, getUsed, getUsed, getZkConnectString, globStatus, globStatus, isDirectory, isFile, listFiles, listLocatedStatus, listStatus, listStatus, listStatus, listStatusBatch, makeQualified, mkdirs, mkdirs, mkdirsFid, mkdirsFid, moveFromLocalFile, moveFromLocalFile, moveToLocalFile, newInstance, newInstance, newInstance, newInstanceLocal, open, open, openFid, openFid, openFid2, openFile, openFile, openFileWithOptions, openFileWithOptions, primitiveMkdir, printStatistics, processDeleteOnExit, resolvePath, setDefaultUri, setDefaultUri, setOwnerFid, setWriteChecksum, setXAttr, startLocalOutputpublic java.lang.String getScheme()
getScheme in class org.apache.hadoop.fs.FileSystemhdfspublic java.net.URI getUri()
getUri in class org.apache.hadoop.fs.FileSystempublic void initialize(java.net.URI uri,
org.apache.hadoop.conf.Configuration conf)
throws java.io.IOException
initialize in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.Path getWorkingDirectory()
getWorkingDirectory in class org.apache.hadoop.fs.FileSystempublic long getDefaultBlockSize()
getDefaultBlockSize in class org.apache.hadoop.fs.FileSystempublic short getDefaultReplication()
getDefaultReplication in class org.apache.hadoop.fs.FileSystempublic void setWorkingDirectory(org.apache.hadoop.fs.Path dir)
setWorkingDirectory in class org.apache.hadoop.fs.FileSystempublic org.apache.hadoop.fs.Path getHomeDirectory()
getHomeDirectory in class org.apache.hadoop.fs.FileSystempublic DFSHedgedReadMetrics getHedgedReadMetrics()
public org.apache.hadoop.fs.BlockLocation[] getFileBlockLocations(org.apache.hadoop.fs.FileStatus file,
long start,
long len)
throws java.io.IOException
getFileBlockLocations in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.BlockLocation[] getFileBlockLocations(org.apache.hadoop.fs.Path p,
long start,
long len)
throws java.io.IOException
FileSystem.getFileBlockLocations(FileStatus, long, long)
for more details.getFileBlockLocations in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic void setVerifyChecksum(boolean verifyChecksum)
setVerifyChecksum in class org.apache.hadoop.fs.FileSystempublic boolean recoverLease(org.apache.hadoop.fs.Path f)
throws java.io.IOException
recoverLease in interface org.apache.hadoop.fs.LeaseRecoverablef - a filejava.io.IOException - if an error occurspublic org.apache.hadoop.fs.FSDataInputStream open(org.apache.hadoop.fs.Path f,
int bufferSize)
throws java.io.IOException
open in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.FSDataInputStream open(org.apache.hadoop.fs.PathHandle fd,
int bufferSize)
throws java.io.IOException
PathHandle.open in class org.apache.hadoop.fs.FileSystemfd - Reference to entity in this FileSystem.bufferSize - the size of the buffer to be used.org.apache.hadoop.fs.InvalidPathHandleException - If PathHandle constraints do not holdjava.io.IOException - On I/O errorsprotected HdfsPathHandle createPathHandle(org.apache.hadoop.fs.FileStatus st, org.apache.hadoop.fs.Options.HandleOpt... opts)
createPathHandle in class org.apache.hadoop.fs.FileSystemst - HdfsFileStatus instance from NameNodeopts - Standard handle argumentsjava.lang.IllegalArgumentException - If the FileStatus instance refers to a
directory, symlink, or another namesystem.java.lang.UnsupportedOperationException - If opts are not specified or both
data and location are not allowed to change.public org.apache.hadoop.fs.FSDataOutputStream append(org.apache.hadoop.fs.Path f,
int bufferSize,
org.apache.hadoop.util.Progressable progress)
throws java.io.IOException
append in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.FSDataOutputStream append(org.apache.hadoop.fs.Path f,
int bufferSize,
org.apache.hadoop.util.Progressable progress,
boolean appendToNewBlock)
throws java.io.IOException
append in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.FSDataOutputStream append(org.apache.hadoop.fs.Path f,
java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> flag,
int bufferSize,
org.apache.hadoop.util.Progressable progress)
throws java.io.IOException
f - the existing file to be appended.flag - Flags for the Append operation. CreateFlag.APPEND is mandatory
to be present.bufferSize - the size of the buffer to be used.progress - for reporting progress if it is not null.FSDataOutputStreamjava.io.IOExceptionpublic org.apache.hadoop.fs.FSDataOutputStream append(org.apache.hadoop.fs.Path f,
java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> flag,
int bufferSize,
org.apache.hadoop.util.Progressable progress,
java.net.InetSocketAddress[] favoredNodes)
throws java.io.IOException
f - the existing file to be appended.flag - Flags for the Append operation. CreateFlag.APPEND is mandatory
to be present.bufferSize - the size of the buffer to be used.progress - for reporting progress if it is not null.favoredNodes - Favored nodes for new blocksFSDataOutputStreamjava.io.IOExceptionpublic org.apache.hadoop.fs.FSDataOutputStream create(org.apache.hadoop.fs.Path f,
org.apache.hadoop.fs.permission.FsPermission permission,
boolean overwrite,
int bufferSize,
short replication,
long blockSize,
org.apache.hadoop.util.Progressable progress)
throws java.io.IOException
create in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic HdfsDataOutputStream create(org.apache.hadoop.fs.Path f, org.apache.hadoop.fs.permission.FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, org.apache.hadoop.util.Progressable progress, java.net.InetSocketAddress[] favoredNodes) throws java.io.IOException
create(Path, FsPermission, boolean, int, short, long,
Progressable) with the addition of favoredNodes that is a hint to
where the namenode should place the file blocks.
The favored nodes hint is not persisted in HDFS. Hence it may be honored
at the creation time only. And with favored nodes, blocks will be pinned
on the datanodes to prevent balancing move the block. HDFS could move the
blocks during replication, to move the blocks from favored nodes. A value
of null means no favored nodes for this createjava.io.IOExceptionpublic org.apache.hadoop.fs.FSDataOutputStream create(org.apache.hadoop.fs.Path f,
org.apache.hadoop.fs.permission.FsPermission permission,
java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> cflags,
int bufferSize,
short replication,
long blockSize,
org.apache.hadoop.util.Progressable progress,
org.apache.hadoop.fs.Options.ChecksumOpt checksumOpt)
throws java.io.IOException
create in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionprotected HdfsDataOutputStream primitiveCreate(org.apache.hadoop.fs.Path f, org.apache.hadoop.fs.permission.FsPermission absolutePermission, java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> flag, int bufferSize, short replication, long blockSize, org.apache.hadoop.util.Progressable progress, org.apache.hadoop.fs.Options.ChecksumOpt checksumOpt) throws java.io.IOException
primitiveCreate in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.FSDataOutputStream createNonRecursive(org.apache.hadoop.fs.Path f,
org.apache.hadoop.fs.permission.FsPermission permission,
java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> flag,
int bufferSize,
short replication,
long blockSize,
org.apache.hadoop.util.Progressable progress)
throws java.io.IOException
createNonRecursive in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic boolean setReplication(org.apache.hadoop.fs.Path src,
short replication)
throws java.io.IOException
setReplication in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic void setStoragePolicy(org.apache.hadoop.fs.Path src,
java.lang.String policyName)
throws java.io.IOException
setStoragePolicy in class org.apache.hadoop.fs.FileSystemsrc - The source path referring to either a directory or a file.policyName - The name of the storage policy.java.io.IOExceptionpublic void unsetStoragePolicy(org.apache.hadoop.fs.Path src)
throws java.io.IOException
unsetStoragePolicy in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.BlockStoragePolicySpi getStoragePolicy(org.apache.hadoop.fs.Path path)
throws java.io.IOException
getStoragePolicy in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic java.util.Collection<BlockStoragePolicy> getAllStoragePolicies() throws java.io.IOException
getAllStoragePolicies in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic long getBytesWithFutureGenerationStamps()
throws java.io.IOException
java.io.IOException@Deprecated public BlockStoragePolicy[] getStoragePolicies() throws java.io.IOException
FileSystem.getAllStoragePolicies()java.io.IOExceptionpublic void concat(org.apache.hadoop.fs.Path trg,
org.apache.hadoop.fs.Path[] psrcs)
throws java.io.IOException
concat in class org.apache.hadoop.fs.FileSystemtrg - existing file to append topsrcs - list of files (same block size, same replication)java.io.IOExceptionpublic boolean rename(org.apache.hadoop.fs.Path src,
org.apache.hadoop.fs.Path dst)
throws java.io.IOException
rename in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic void rename(org.apache.hadoop.fs.Path src,
org.apache.hadoop.fs.Path dst,
org.apache.hadoop.fs.Options.Rename... options)
throws java.io.IOException
rename in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic boolean truncate(org.apache.hadoop.fs.Path f,
long newLength)
throws java.io.IOException
truncate in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic boolean delete(org.apache.hadoop.fs.Path f,
boolean recursive)
throws java.io.IOException
delete in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.ContentSummary getContentSummary(org.apache.hadoop.fs.Path f)
throws java.io.IOException
getContentSummary in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.QuotaUsage getQuotaUsage(org.apache.hadoop.fs.Path f)
throws java.io.IOException
getQuotaUsage in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic void setQuota(org.apache.hadoop.fs.Path src,
long namespaceQuota,
long storagespaceQuota)
throws java.io.IOException
setQuota in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionClientProtocol.setQuota(String,
long, long, StorageType)public void setQuotaByStorageType(org.apache.hadoop.fs.Path src,
org.apache.hadoop.fs.StorageType type,
long quota)
throws java.io.IOException
setQuotaByStorageType in class org.apache.hadoop.fs.FileSystemsrc - target directory whose quota is to be modified.type - storage type of the specific storage type quota to be modified.quota - value of the specific storage type quota to be modified.
Maybe HdfsConstants.QUOTA_RESET to clear quota by storage type.java.io.IOExceptionpublic org.apache.hadoop.fs.FileStatus[] listStatus(org.apache.hadoop.fs.Path p)
throws java.io.IOException
getFileStatus(Path f)listStatus in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionprotected org.apache.hadoop.fs.RemoteIterator<org.apache.hadoop.fs.LocatedFileStatus> listLocatedStatus(org.apache.hadoop.fs.Path p,
org.apache.hadoop.fs.PathFilter filter)
throws java.io.IOException
FileSystem.getFileBlockLocations(FileStatus, long, long) for
more details.listLocatedStatus in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.RemoteIterator<org.apache.hadoop.fs.FileStatus> listStatusIterator(org.apache.hadoop.fs.Path p)
throws java.io.IOException
listStatusIterator in class org.apache.hadoop.fs.FileSystemp - target pathjava.io.IOExceptionpublic org.apache.hadoop.fs.RemoteIterator<org.apache.hadoop.fs.PartialListing<org.apache.hadoop.fs.FileStatus>> batchedListStatusIterator(java.util.List<org.apache.hadoop.fs.Path> paths)
throws java.io.IOException
batchedListStatusIterator in interface org.apache.hadoop.fs.BatchListingOperationsjava.io.IOExceptionpublic org.apache.hadoop.fs.RemoteIterator<org.apache.hadoop.fs.PartialListing<org.apache.hadoop.fs.LocatedFileStatus>> batchedListLocatedStatusIterator(java.util.List<org.apache.hadoop.fs.Path> paths)
throws java.io.IOException
batchedListLocatedStatusIterator in interface org.apache.hadoop.fs.BatchListingOperationsjava.io.IOExceptionpublic boolean mkdir(org.apache.hadoop.fs.Path f,
org.apache.hadoop.fs.permission.FsPermission permission)
throws java.io.IOException
FsPermission.applyUMask(FsPermission) for details of how
the permission is applied.f - The path to createpermission - The permission. See FsPermission#applyUMask for
details about how this is used to calculate the
effective permission.java.io.IOExceptionpublic boolean mkdirs(org.apache.hadoop.fs.Path f,
org.apache.hadoop.fs.permission.FsPermission permission)
throws java.io.IOException
FsPermission.applyUMask(FsPermission) for details of how
the permission is applied.mkdirs in class org.apache.hadoop.fs.FileSystemf - The path to createpermission - The permission. See FsPermission#applyUMask for
details about how this is used to calculate the
effective permission.java.io.IOExceptionprotected boolean primitiveMkdir(org.apache.hadoop.fs.Path f,
org.apache.hadoop.fs.permission.FsPermission absolutePermission)
throws java.io.IOException
primitiveMkdir in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic void close()
throws java.io.IOException
close in interface java.lang.AutoCloseableclose in interface java.io.Closeableclose in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic java.lang.String toString()
toString in class java.lang.Object@Private @VisibleForTesting public DFSClient getClient()
public org.apache.hadoop.fs.FsStatus getStatus(org.apache.hadoop.fs.Path p)
throws java.io.IOException
getStatus in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic long getMissingBlocksCount()
throws java.io.IOException
java.io.IOExceptionpublic long getPendingDeletionBlocksCount()
throws java.io.IOException
java.io.IOExceptionpublic long getMissingReplOneBlocksCount()
throws java.io.IOException
java.io.IOExceptionpublic long getLowRedundancyBlocksCount()
throws java.io.IOException
java.io.IOExceptionpublic long getCorruptBlocksCount()
throws java.io.IOException
java.io.IOExceptionpublic org.apache.hadoop.fs.RemoteIterator<org.apache.hadoop.fs.Path> listCorruptFileBlocks(org.apache.hadoop.fs.Path path)
throws java.io.IOException
listCorruptFileBlocks in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic DatanodeInfo[] getDataNodeStats() throws java.io.IOException
java.io.IOExceptionpublic DatanodeInfo[] getDataNodeStats(HdfsConstants.DatanodeReportType type) throws java.io.IOException
java.io.IOExceptionpublic boolean setSafeMode(org.apache.hadoop.fs.SafeModeAction action)
throws java.io.IOException
setSafeMode in interface org.apache.hadoop.fs.SafeModejava.io.IOExceptionClientProtocol.setSafeMode(
HdfsConstants.SafeModeAction,boolean)public boolean setSafeMode(org.apache.hadoop.fs.SafeModeAction action,
boolean isChecked)
throws java.io.IOException
setSafeMode in interface org.apache.hadoop.fs.SafeModeaction - One of SafeModeAction.ENTER, SafeModeAction.LEAVE and
SafeModeAction.GET.isChecked - If true check only for Active NNs status, else check first NN's
status.java.io.IOException@Deprecated public boolean setSafeMode(HdfsConstants.SafeModeAction action) throws java.io.IOException
setSafeMode(SafeModeAction).java.io.IOExceptionClientProtocol.setSafeMode(HdfsConstants.SafeModeAction,
boolean)@Deprecated public boolean setSafeMode(HdfsConstants.SafeModeAction action, boolean isChecked) throws java.io.IOException
setSafeMode(SafeModeAction, boolean).action - One of SafeModeAction.ENTER, SafeModeAction.LEAVE and
SafeModeAction.GET.isChecked - If true check only for Active NNs status, else check first NN's
status.java.io.IOExceptionClientProtocol.setSafeMode(HdfsConstants.SafeModeAction,
boolean)public boolean saveNamespace(long timeWindow,
long txGap)
throws java.io.IOException
timeWindow - NameNode can ignore this command if the latest
checkpoint was done within the given time period (in
seconds).java.io.IOExceptionClientProtocol.saveNamespace(long, long)public void saveNamespace()
throws java.io.IOException
java.io.IOExceptionpublic long rollEdits()
throws java.io.IOException
java.io.IOExceptionClientProtocol.rollEdits()public boolean restoreFailedStorage(java.lang.String arg)
throws java.io.IOException
java.io.IOExceptionClientProtocol.restoreFailedStorage(String arg)public void refreshNodes()
throws java.io.IOException
java.io.IOExceptionpublic void finalizeUpgrade()
throws java.io.IOException
java.io.IOExceptionpublic boolean upgradeStatus()
throws java.io.IOException
java.io.IOExceptionpublic RollingUpgradeInfo rollingUpgrade(HdfsConstants.RollingUpgradeAction action) throws java.io.IOException
java.io.IOExceptionpublic void metaSave(java.lang.String pathname)
throws java.io.IOException
java.io.IOExceptionpublic org.apache.hadoop.fs.FsServerDefaults getServerDefaults()
throws java.io.IOException
getServerDefaults in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.FileStatus getFileStatus(org.apache.hadoop.fs.Path f)
throws java.io.IOException
getFileStatus in class org.apache.hadoop.fs.FileSystemjava.io.FileNotFoundException - if the file does not exist.java.io.IOExceptionpublic void msync()
throws java.io.IOException
In HA the client synchronizes its state with the Active NameNode in order to guarantee subsequent read consistency from Observer Nodes.
msync in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic void createSymlink(org.apache.hadoop.fs.Path target,
org.apache.hadoop.fs.Path link,
boolean createParent)
throws java.io.IOException
createSymlink in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic boolean supportsSymlinks()
supportsSymlinks in class org.apache.hadoop.fs.FileSystempublic org.apache.hadoop.fs.FileStatus getFileLinkStatus(org.apache.hadoop.fs.Path f)
throws java.io.IOException
getFileLinkStatus in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.Path getLinkTarget(org.apache.hadoop.fs.Path f)
throws java.io.IOException
getLinkTarget in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionprotected org.apache.hadoop.fs.Path resolveLink(org.apache.hadoop.fs.Path f)
throws java.io.IOException
resolveLink in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.FileChecksum getFileChecksum(org.apache.hadoop.fs.Path f)
throws java.io.IOException
getFileChecksum in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.FileChecksum getFileChecksum(org.apache.hadoop.fs.Path f,
long length)
throws java.io.IOException
getFileChecksum in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic void setPermission(org.apache.hadoop.fs.Path p,
org.apache.hadoop.fs.permission.FsPermission permission)
throws java.io.IOException
setPermission in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic void setOwner(org.apache.hadoop.fs.Path p,
java.lang.String username,
java.lang.String groupname)
throws java.io.IOException
setOwner in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic void setTimes(org.apache.hadoop.fs.Path p,
long mtime,
long atime)
throws java.io.IOException
setTimes in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionprotected int getDefaultPort()
getDefaultPort in class org.apache.hadoop.fs.FileSystempublic org.apache.hadoop.security.token.Token<DelegationTokenIdentifier> getDelegationToken(java.lang.String renewer) throws java.io.IOException
getDelegationToken in interface org.apache.hadoop.security.token.DelegationTokenIssuergetDelegationToken in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic void setBalancerBandwidth(long bandwidth)
throws java.io.IOException
bandwidth - Balancer bandwidth in bytes per second for all datanodes.java.io.IOExceptionpublic java.lang.String getCanonicalServiceName()
getCanonicalServiceName in interface org.apache.hadoop.security.token.DelegationTokenIssuergetCanonicalServiceName in class org.apache.hadoop.fs.FileSystemprotected java.net.URI canonicalizeUri(java.net.URI uri)
canonicalizeUri in class org.apache.hadoop.fs.FileSystempublic boolean isInSafeMode()
throws java.io.IOException
java.io.IOException - when there is an issue communicating with the NameNodepublic boolean isSnapshotTrashRootEnabled()
throws java.io.IOException
java.io.IOException - when there is an issue communicating with the NameNodepublic void allowSnapshot(org.apache.hadoop.fs.Path path)
throws java.io.IOException
java.io.IOExceptionHdfsAdmin.allowSnapshot(Path)public void disallowSnapshot(org.apache.hadoop.fs.Path path)
throws java.io.IOException
java.io.IOExceptionHdfsAdmin.disallowSnapshot(Path)public org.apache.hadoop.fs.Path createSnapshot(org.apache.hadoop.fs.Path path,
java.lang.String snapshotName)
throws java.io.IOException
createSnapshot in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic void renameSnapshot(org.apache.hadoop.fs.Path path,
java.lang.String snapshotOldName,
java.lang.String snapshotNewName)
throws java.io.IOException
renameSnapshot in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic SnapshottableDirectoryStatus[] getSnapshottableDirListing() throws java.io.IOException
java.io.IOException - If an I/O error occurred.public SnapshotStatus[] getSnapshotListing(org.apache.hadoop.fs.Path snapshotRoot) throws java.io.IOException
java.io.IOExceptionpublic void deleteSnapshot(org.apache.hadoop.fs.Path snapshotDir,
java.lang.String snapshotName)
throws java.io.IOException
deleteSnapshot in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.RemoteIterator<SnapshotDiffReportListing> snapshotDiffReportListingRemoteIterator(org.apache.hadoop.fs.Path snapshotDir, java.lang.String fromSnapshot, java.lang.String toSnapshot) throws java.io.IOException
snapshotDir - full path of the directory where snapshots are takenfromSnapshot - snapshot name of the from point. Null indicates the current
treetoSnapshot - snapshot name of the to point. Null indicates the current
tree.java.io.IOExceptionpublic SnapshotDiffReport getSnapshotDiffReport(org.apache.hadoop.fs.Path snapshotDir, java.lang.String fromSnapshot, java.lang.String toSnapshot) throws java.io.IOException
java.io.IOExceptionDFSClient.getSnapshotDiffReportListing(java.lang.String, java.lang.String, java.lang.String, byte[], int)public SnapshotDiffReportListing getSnapshotDiffReportListing(org.apache.hadoop.fs.Path snapshotDir, java.lang.String fromSnapshotName, java.lang.String toSnapshotName, java.lang.String snapshotDiffStartPath, int snapshotDiffIndex) throws java.io.IOException
snapshotDir - full path of the directory where snapshots are taken.fromSnapshotName - snapshot name of the from point. Null indicates the current tree.toSnapshotName - snapshot name of the to point. Null indicates the current tree.snapshotDiffStartPath - path relative to the snapshottable root directory from where
the snapshotdiff computation needs to start.snapshotDiffIndex - index in the created or deleted list of the directory at which the
snapshotdiff computation stopped during the last rpc call. -1 indicates the diff
computation needs to start right from the start path.SnapshotDiffReportListing.java.io.IOException - if an I/O error occurred.public boolean isFileClosed(org.apache.hadoop.fs.Path src)
throws java.io.IOException
isFileClosed in interface org.apache.hadoop.fs.LeaseRecoverablesrc - The path to the filejava.io.FileNotFoundException - if the file does not exist.java.io.IOException - If an I/O error occurredpublic long addCacheDirective(CacheDirectiveInfo info) throws java.io.IOException
java.io.IOExceptionaddCacheDirective(CacheDirectiveInfo, EnumSet)public long addCacheDirective(CacheDirectiveInfo info, java.util.EnumSet<CacheFlag> flags) throws java.io.IOException
info - Information about a directive to add.flags - CacheFlags to use for this operation.java.io.IOException - if the directive could not be addedpublic void modifyCacheDirective(CacheDirectiveInfo info) throws java.io.IOException
java.io.IOExceptionmodifyCacheDirective(CacheDirectiveInfo, EnumSet)public void modifyCacheDirective(CacheDirectiveInfo info, java.util.EnumSet<CacheFlag> flags) throws java.io.IOException
info - Information about the directive to modify. You must set the ID
to indicate which CacheDirective you want to modify.flags - CacheFlags to use for this operation.java.io.IOException - if the directive could not be modifiedpublic void removeCacheDirective(long id)
throws java.io.IOException
id - identifier of the CacheDirectiveInfo to removejava.io.IOException - if the directive could not be removedpublic org.apache.hadoop.fs.RemoteIterator<CacheDirectiveEntry> listCacheDirectives(CacheDirectiveInfo filter) throws java.io.IOException
filter - Filter parameters to use when listing the directives, null to
list all directives visible to us.java.io.IOExceptionpublic void addCachePool(CachePoolInfo info) throws java.io.IOException
info - The request to add a cache pool.java.io.IOException - If the request could not be completed.public void modifyCachePool(CachePoolInfo info) throws java.io.IOException
info - The request to modify a cache pool.java.io.IOException - If the request could not be completed.public void removeCachePool(java.lang.String poolName)
throws java.io.IOException
poolName - Name of the cache pool to remove.java.io.IOException - if the cache pool did not exist, or could not be removed.public org.apache.hadoop.fs.RemoteIterator<CachePoolEntry> listCachePools() throws java.io.IOException
java.io.IOException - If there was an error listing cache pools.public void modifyAclEntries(org.apache.hadoop.fs.Path path,
java.util.List<org.apache.hadoop.fs.permission.AclEntry> aclSpec)
throws java.io.IOException
modifyAclEntries in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic void removeAclEntries(org.apache.hadoop.fs.Path path,
java.util.List<org.apache.hadoop.fs.permission.AclEntry> aclSpec)
throws java.io.IOException
removeAclEntries in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic void removeDefaultAcl(org.apache.hadoop.fs.Path path)
throws java.io.IOException
removeDefaultAcl in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic void removeAcl(org.apache.hadoop.fs.Path path)
throws java.io.IOException
removeAcl in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic void setAcl(org.apache.hadoop.fs.Path path,
java.util.List<org.apache.hadoop.fs.permission.AclEntry> aclSpec)
throws java.io.IOException
setAcl in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.permission.AclStatus getAclStatus(org.apache.hadoop.fs.Path path)
throws java.io.IOException
getAclStatus in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic void createEncryptionZone(org.apache.hadoop.fs.Path path,
java.lang.String keyName)
throws java.io.IOException
java.io.IOExceptionpublic EncryptionZone getEZForPath(org.apache.hadoop.fs.Path path) throws java.io.IOException
java.io.IOExceptionpublic org.apache.hadoop.fs.RemoteIterator<EncryptionZone> listEncryptionZones() throws java.io.IOException
java.io.IOExceptionpublic void reencryptEncryptionZone(org.apache.hadoop.fs.Path zone,
HdfsConstants.ReencryptAction action)
throws java.io.IOException
java.io.IOExceptionpublic org.apache.hadoop.fs.RemoteIterator<ZoneReencryptionStatus> listReencryptionStatus() throws java.io.IOException
java.io.IOExceptionpublic org.apache.hadoop.fs.FileEncryptionInfo getFileEncryptionInfo(org.apache.hadoop.fs.Path path)
throws java.io.IOException
java.io.IOExceptionpublic void provisionEZTrash(org.apache.hadoop.fs.Path path,
org.apache.hadoop.fs.permission.FsPermission trashPermission)
throws java.io.IOException
java.io.IOExceptionpublic org.apache.hadoop.fs.Path provisionSnapshotTrash(org.apache.hadoop.fs.Path path,
org.apache.hadoop.fs.permission.FsPermission trashPermission)
throws java.io.IOException
path - Path to a snapshottable directory.trashPermission - Expected FsPermission of the trash root.java.io.IOExceptionpublic void setXAttr(org.apache.hadoop.fs.Path path,
java.lang.String name,
byte[] value,
java.util.EnumSet<org.apache.hadoop.fs.XAttrSetFlag> flag)
throws java.io.IOException
setXAttr in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic byte[] getXAttr(org.apache.hadoop.fs.Path path,
java.lang.String name)
throws java.io.IOException
getXAttr in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic java.util.Map<java.lang.String,byte[]> getXAttrs(org.apache.hadoop.fs.Path path)
throws java.io.IOException
getXAttrs in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic java.util.Map<java.lang.String,byte[]> getXAttrs(org.apache.hadoop.fs.Path path,
java.util.List<java.lang.String> names)
throws java.io.IOException
getXAttrs in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic java.util.List<java.lang.String> listXAttrs(org.apache.hadoop.fs.Path path)
throws java.io.IOException
listXAttrs in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic void removeXAttr(org.apache.hadoop.fs.Path path,
java.lang.String name)
throws java.io.IOException
removeXAttr in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic void access(org.apache.hadoop.fs.Path path,
org.apache.hadoop.fs.permission.FsAction mode)
throws java.io.IOException
access in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic java.net.URI getKeyProviderUri()
throws java.io.IOException
getKeyProviderUri in interface org.apache.hadoop.crypto.key.KeyProviderTokenIssuerjava.io.IOExceptionpublic org.apache.hadoop.crypto.key.KeyProvider getKeyProvider()
throws java.io.IOException
getKeyProvider in interface org.apache.hadoop.crypto.key.KeyProviderTokenIssuerjava.io.IOExceptionpublic org.apache.hadoop.security.token.DelegationTokenIssuer[] getAdditionalTokenIssuers()
throws java.io.IOException
getAdditionalTokenIssuers in interface org.apache.hadoop.security.token.DelegationTokenIssuergetAdditionalTokenIssuers in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic DFSInotifyEventInputStream getInotifyEventStream() throws java.io.IOException
java.io.IOExceptionpublic DFSInotifyEventInputStream getInotifyEventStream(long lastReadTxid) throws java.io.IOException
java.io.IOExceptionpublic void setErasureCodingPolicy(org.apache.hadoop.fs.Path path,
java.lang.String ecPolicyName)
throws java.io.IOException
path - The directory to set the policyecPolicyName - The erasure coding policy name.java.io.IOExceptionpublic void satisfyStoragePolicy(org.apache.hadoop.fs.Path path)
throws java.io.IOException
satisfyStoragePolicy in class org.apache.hadoop.fs.FileSystempath - The source path referring to either a directory or a file.java.io.IOExceptionpublic ErasureCodingPolicy getErasureCodingPolicy(org.apache.hadoop.fs.Path path) throws java.io.IOException
path - The path of the file or directoryjava.io.IOExceptionpublic java.util.Collection<ErasureCodingPolicyInfo> getAllErasureCodingPolicies() throws java.io.IOException
java.io.IOExceptionpublic java.util.Map<java.lang.String,java.lang.String> getAllErasureCodingCodecs()
throws java.io.IOException
java.io.IOExceptionpublic AddErasureCodingPolicyResponse[] addErasureCodingPolicies(ErasureCodingPolicy[] policies) throws java.io.IOException
policies - The user defined ec policy list to add.java.io.IOExceptionpublic void removeErasureCodingPolicy(java.lang.String ecPolicyName)
throws java.io.IOException
ecPolicyName - The name of the policy to be removed.java.io.IOExceptionpublic void enableErasureCodingPolicy(java.lang.String ecPolicyName)
throws java.io.IOException
ecPolicyName - The name of the policy to be enabled.java.io.IOExceptionpublic void disableErasureCodingPolicy(java.lang.String ecPolicyName)
throws java.io.IOException
ecPolicyName - The name of the policy to be disabled.java.io.IOExceptionpublic void unsetErasureCodingPolicy(org.apache.hadoop.fs.Path path)
throws java.io.IOException
path - The directory to unset the policyjava.io.IOExceptionpublic ECTopologyVerifierResult getECTopologyResultForPolicies(java.lang.String... policyNames) throws java.io.IOException
policyNames - name of policies.java.io.IOExceptionpublic org.apache.hadoop.fs.Path getTrashRoot(org.apache.hadoop.fs.Path path)
getTrashRoot in class org.apache.hadoop.fs.FileSystempath - the trash root of the path to be determined.public java.util.Collection<org.apache.hadoop.fs.FileStatus> getTrashRoots(boolean allUsers)
getTrashRoots in class org.apache.hadoop.fs.FileSystemallUsers - return trashRoots of all users if true, used by emptierprotected org.apache.hadoop.fs.Path fixRelativePart(org.apache.hadoop.fs.Path p)
fixRelativePart in class org.apache.hadoop.fs.FileSystempublic DistributedFileSystem.HdfsDataOutputStreamBuilder createFile(org.apache.hadoop.fs.Path path)
FileSystem.create(Path), file is overwritten by default.createFile in class org.apache.hadoop.fs.FileSystempath - the path of the file to create.@Deprecated public org.apache.hadoop.fs.RemoteIterator<OpenFileEntry> listOpenFiles() throws java.io.IOException
Since the list is fetched in batches, it does not represent a consistent snapshot of the all open files.
This method can only be called by HDFS superusers.
java.io.IOException@Deprecated public org.apache.hadoop.fs.RemoteIterator<OpenFileEntry> listOpenFiles(java.util.EnumSet<OpenFilesIterator.OpenFilesType> openFilesTypes) throws java.io.IOException
java.io.IOExceptionpublic org.apache.hadoop.fs.RemoteIterator<OpenFileEntry> listOpenFiles(java.util.EnumSet<OpenFilesIterator.OpenFilesType> openFilesTypes, java.lang.String path) throws java.io.IOException
java.io.IOExceptionpublic DistributedFileSystem.HdfsDataOutputStreamBuilder appendFile(org.apache.hadoop.fs.Path path)
DistributedFileSystem.HdfsDataOutputStreamBuilder to append a file on DFS.appendFile in class org.apache.hadoop.fs.FileSystempath - file path.DistributedFileSystem.HdfsDataOutputStreamBuilder for appending a file.public boolean hasPathCapability(org.apache.hadoop.fs.Path path,
java.lang.String capability)
throws java.io.IOException
DfsPathCapabilities to keep WebHdfsFileSystem in sync.
hasPathCapability in interface org.apache.hadoop.fs.PathCapabilitieshasPathCapability in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.MultipartUploaderBuilder createMultipartUploader(org.apache.hadoop.fs.Path basePath)
throws java.io.IOException
createMultipartUploader in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic DatanodeInfo[] getSlowDatanodeStats() throws java.io.IOException
java.io.IOException - If an I/O error occurs.public LocatedBlocks getLocatedBlocks(org.apache.hadoop.fs.Path p, long start, long len) throws java.io.IOException
getFileBlockLocations(Path, long, long) except
that it returns LocatedBlocks rather than BlockLocation array.p - path representing the file of interest.start - offsetlen - lengthjava.io.IOExceptionpublic org.apache.hadoop.fs.Path getEnclosingRoot(org.apache.hadoop.fs.Path path)
throws java.io.IOException
getEnclosingRoot in class org.apache.hadoop.fs.FileSystempath - file path to find the enclosing root path forjava.io.IOException - early checks like failure to resolve path cause IO failuresCopyright © 2008–2025 Apache Software Foundation. All rights reserved.