java.io.Closeable, java.lang.AutoCloseable, org.apache.hadoop.conf.Configurable, org.apache.hadoop.crypto.key.KeyProviderTokenIssuer, org.apache.hadoop.fs.BatchListingOperations, org.apache.hadoop.fs.BulkDeleteSource, org.apache.hadoop.fs.LeaseRecoverable, org.apache.hadoop.fs.PathCapabilities, org.apache.hadoop.fs.SafeMode, org.apache.hadoop.security.token.DelegationTokenIssuer@Private @Unstable public class ViewDistributedFileSystem extends DistributedFileSystem
ViewFileSystemOverloadScheme is a new
filesystem with inherited mounting functionality from ViewFileSystem.
For the user who is using ViewFileSystemOverloadScheme by setting
fs.hdfs.impl=org.apache.hadoop.fs.viewfs.ViewFileSystemOverloadScheme, now
they can set fs.hdfs.impl=org.apache.hadoop.hdfs.ViewDistributedFileSystem.
So, that the hdfs users will get closely compatible API with mount
functionality. For the rest of all other schemes can continue to use
ViewFileSystemOverloadScheme class directly for mount functionality. Please
note that ViewFileSystemOverloadScheme provides only
ViewFileSystemViewFileSystem APIs.
If user configured this class but no mount point configured? Then it will
simply work as existing DistributedFileSystem class. If user configured both
fs.hdfs.impl to this class and mount configurations, then users will be able
to make calls the APIs available in this class, they are nothing but DFS
APIs, but they will be delegated to viewfs functionality. Please note, APIs
without any path in arguments( ex: isInSafeMode), will be delegated to
default filesystem only, that is the configured fallback link. If you want to
make these API calls on specific child filesystem, you may want to initialize
them separately and call. In ViewDistributedFileSystem, we strongly recommend
to configure linkFallBack when you add mount links and it's recommended to
point be to your base cluster, usually your current fs.defaultFS if that's
pointing to hdfs.DistributedFileSystem.HdfsDataOutputStreamBuilder| Constructor | Description |
|---|---|
ViewDistributedFileSystem() |
| Modifier and Type | Method | Description |
|---|---|---|
void |
access(org.apache.hadoop.fs.Path path,
org.apache.hadoop.fs.permission.FsAction mode) |
|
long |
addCacheDirective(CacheDirectiveInfo info) |
|
long |
addCacheDirective(CacheDirectiveInfo info,
java.util.EnumSet<CacheFlag> flags) |
Add a new CacheDirective.
|
void |
addCachePool(CachePoolInfo info) |
Add a cache pool.
|
AddErasureCodingPolicyResponse[] |
addErasureCodingPolicies(ErasureCodingPolicy[] policies) |
Add Erasure coding policies to HDFS.
|
void |
allowSnapshot(org.apache.hadoop.fs.Path path) |
|
org.apache.hadoop.fs.FSDataOutputStream |
append(org.apache.hadoop.fs.Path f,
int bufferSize,
org.apache.hadoop.util.Progressable progress) |
|
org.apache.hadoop.fs.FSDataOutputStream |
append(org.apache.hadoop.fs.Path f,
java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> flag,
int bufferSize,
org.apache.hadoop.util.Progressable progress) |
Append to an existing file (optional operation).
|
org.apache.hadoop.fs.FSDataOutputStream |
append(org.apache.hadoop.fs.Path f,
java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> flag,
int bufferSize,
org.apache.hadoop.util.Progressable progress,
java.net.InetSocketAddress[] favoredNodes) |
Append to an existing file (optional operation).
|
DistributedFileSystem.HdfsDataOutputStreamBuilder |
appendFile(org.apache.hadoop.fs.Path path) |
Create a
DistributedFileSystem.HdfsDataOutputStreamBuilder to append a file on DFS. |
org.apache.hadoop.fs.RemoteIterator<org.apache.hadoop.fs.PartialListing<org.apache.hadoop.fs.LocatedFileStatus>> |
batchedListLocatedStatusIterator(java.util.List<org.apache.hadoop.fs.Path> paths) |
|
org.apache.hadoop.fs.RemoteIterator<org.apache.hadoop.fs.PartialListing<org.apache.hadoop.fs.FileStatus>> |
batchedListStatusIterator(java.util.List<org.apache.hadoop.fs.Path> paths) |
|
protected java.net.URI |
canonicalizeUri(java.net.URI uri) |
|
void |
close() |
|
void |
concat(org.apache.hadoop.fs.Path trg,
org.apache.hadoop.fs.Path[] psrcs) |
Move blocks from srcs to trg and delete srcs afterwards.
|
org.apache.hadoop.fs.FSDataOutputStream |
create(org.apache.hadoop.fs.Path f,
org.apache.hadoop.fs.permission.FsPermission permission,
boolean overwrite,
int bufferSize,
short replication,
long blockSize,
org.apache.hadoop.util.Progressable progress) |
|
HdfsDataOutputStream |
create(org.apache.hadoop.fs.Path f,
org.apache.hadoop.fs.permission.FsPermission permission,
boolean overwrite,
int bufferSize,
short replication,
long blockSize,
org.apache.hadoop.util.Progressable progress,
java.net.InetSocketAddress[] favoredNodes) |
Same as
DistributedFileSystem.create(Path, FsPermission, boolean, int, short, long,
Progressable) with the addition of favoredNodes that is a hint to
where the namenode should place the file blocks. |
org.apache.hadoop.fs.FSDataOutputStream |
create(org.apache.hadoop.fs.Path f,
org.apache.hadoop.fs.permission.FsPermission permission,
java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> cflags,
int bufferSize,
short replication,
long blockSize,
org.apache.hadoop.util.Progressable progress,
org.apache.hadoop.fs.Options.ChecksumOpt checksumOpt) |
|
void |
createEncryptionZone(org.apache.hadoop.fs.Path path,
java.lang.String keyName) |
|
DistributedFileSystem.HdfsDataOutputStreamBuilder |
createFile(org.apache.hadoop.fs.Path path) |
Create a HdfsDataOutputStreamBuilder to create a file on DFS.
|
org.apache.hadoop.fs.FSDataOutputStream |
createNonRecursive(org.apache.hadoop.fs.Path f,
org.apache.hadoop.fs.permission.FsPermission permission,
java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> flags,
int bufferSize,
short replication,
long blockSize,
org.apache.hadoop.util.Progressable progress) |
Same as create(), except fails if parent directory doesn't already exist.
|
protected HdfsPathHandle |
createPathHandle(org.apache.hadoop.fs.FileStatus st,
org.apache.hadoop.fs.Options.HandleOpt... opts) |
Create a handle to an HDFS file.
|
org.apache.hadoop.fs.Path |
createSnapshot(org.apache.hadoop.fs.Path path,
java.lang.String snapshotName) |
|
void |
createSymlink(org.apache.hadoop.fs.Path target,
org.apache.hadoop.fs.Path link,
boolean createParent) |
|
boolean |
delete(org.apache.hadoop.fs.Path f) |
|
boolean |
delete(org.apache.hadoop.fs.Path f,
boolean recursive) |
|
void |
deleteSnapshot(org.apache.hadoop.fs.Path path,
java.lang.String snapshotName) |
|
void |
disableErasureCodingPolicy(java.lang.String ecPolicyName) |
Disable erasure coding policy.
|
void |
disallowSnapshot(org.apache.hadoop.fs.Path path) |
|
void |
enableErasureCodingPolicy(java.lang.String ecPolicyName) |
Enable erasure coding policy.
|
void |
finalizeUpgrade() |
Finalize previously upgraded files system state.
|
protected org.apache.hadoop.fs.Path |
fixRelativePart(org.apache.hadoop.fs.Path p) |
|
org.apache.hadoop.fs.permission.AclStatus |
getAclStatus(org.apache.hadoop.fs.Path path) |
|
org.apache.hadoop.security.token.DelegationTokenIssuer[] |
getAdditionalTokenIssuers() |
|
java.util.Map<java.lang.String,java.lang.String> |
getAllErasureCodingCodecs() |
Retrieve all the erasure coding codecs and coders supported by this file
system.
|
java.util.Collection<ErasureCodingPolicyInfo> |
getAllErasureCodingPolicies() |
Gets all erasure coding policies from all available child file systems.
|
java.util.Collection<BlockStoragePolicy> |
getAllStoragePolicies() |
|
long |
getBytesWithFutureGenerationStamps() |
Returns number of bytes within blocks with future generation stamp.
|
java.lang.String |
getCanonicalServiceName() |
Get a canonical service name for this file system.
|
org.apache.hadoop.fs.FileSystem[] |
getChildFileSystems() |
|
DFSClient |
getClient() |
|
org.apache.hadoop.fs.ContentSummary |
getContentSummary(org.apache.hadoop.fs.Path f) |
|
long |
getCorruptBlocksCount() |
Returns count of blocks with at least one replica marked corrupt.
|
DatanodeInfo[] |
getDataNodeStats() |
|
DatanodeInfo[] |
getDataNodeStats(HdfsConstants.DatanodeReportType type) |
|
long |
getDefaultBlockSize(org.apache.hadoop.fs.Path f) |
|
protected int |
getDefaultPort() |
|
short |
getDefaultReplication(org.apache.hadoop.fs.Path f) |
|
org.apache.hadoop.security.token.Token<DelegationTokenIdentifier> |
getDelegationToken(java.lang.String renewer) |
If no mount points configured, it works same as
DistributedFileSystem.getDelegationToken(String). |
ECTopologyVerifierResult |
getECTopologyResultForPolicies(java.lang.String... policyNames) |
Verifies if the given policies are supported in the given cluster setup.
|
ErasureCodingPolicy |
getErasureCodingPolicy(org.apache.hadoop.fs.Path path) |
Get erasure coding policy information for the specified path.
|
EncryptionZone |
getEZForPath(org.apache.hadoop.fs.Path path) |
|
org.apache.hadoop.fs.BlockLocation[] |
getFileBlockLocations(org.apache.hadoop.fs.FileStatus fs,
long start,
long len) |
|
org.apache.hadoop.fs.BlockLocation[] |
getFileBlockLocations(org.apache.hadoop.fs.Path p,
long start,
long len) |
The returned BlockLocation will have different formats for replicated
and erasure coded file.
|
org.apache.hadoop.fs.FileChecksum |
getFileChecksum(org.apache.hadoop.fs.Path f) |
|
org.apache.hadoop.fs.FileChecksum |
getFileChecksum(org.apache.hadoop.fs.Path f,
long length) |
|
org.apache.hadoop.fs.FileEncryptionInfo |
getFileEncryptionInfo(org.apache.hadoop.fs.Path path) |
|
org.apache.hadoop.fs.FileStatus |
getFileLinkStatus(org.apache.hadoop.fs.Path f) |
|
org.apache.hadoop.fs.FileStatus |
getFileStatus(org.apache.hadoop.fs.Path f) |
Returns the stat information about the file.
|
DFSHedgedReadMetrics |
getHedgedReadMetrics() |
Returns only default cluster getHedgedReadMetrics.
|
org.apache.hadoop.fs.Path |
getHomeDirectory() |
|
DFSInotifyEventInputStream |
getInotifyEventStream() |
|
DFSInotifyEventInputStream |
getInotifyEventStream(long lastReadTxid) |
|
org.apache.hadoop.crypto.key.KeyProvider |
getKeyProvider() |
|
java.net.URI |
getKeyProviderUri() |
|
org.apache.hadoop.fs.Path |
getLinkTarget(org.apache.hadoop.fs.Path path) |
|
long |
getLowRedundancyBlocksCount() |
Returns aggregated count of blocks with less redundancy.
|
long |
getMissingBlocksCount() |
Returns count of blocks with no good replicas left.
|
long |
getMissingReplOneBlocksCount() |
Returns count of blocks with replication factor 1 and have
lost the only replica.
|
org.apache.hadoop.fs.viewfs.ViewFileSystem.MountPoint[] |
getMountPoints() |
|
long |
getPendingDeletionBlocksCount() |
Returns count of blocks pending on deletion.
|
org.apache.hadoop.fs.QuotaUsage |
getQuotaUsage(org.apache.hadoop.fs.Path f) |
|
java.lang.String |
getScheme() |
Return the protocol scheme for the FileSystem.
|
org.apache.hadoop.fs.FsServerDefaults |
getServerDefaults() |
|
org.apache.hadoop.fs.FsServerDefaults |
getServerDefaults(org.apache.hadoop.fs.Path f) |
|
DatanodeInfo[] |
getSlowDatanodeStats() |
Retrieve stats for slow running datanodes.
|
SnapshotDiffReport |
getSnapshotDiffReport(org.apache.hadoop.fs.Path snapshotDir,
java.lang.String fromSnapshot,
java.lang.String toSnapshot) |
Get the difference between two snapshots, or between a snapshot and the
current tree of a directory.
|
SnapshottableDirectoryStatus[] |
getSnapshottableDirListing() |
Get the list of snapshottable directories that are owned
by the current user.
|
org.apache.hadoop.fs.FsStatus |
getStatus() |
|
org.apache.hadoop.fs.FsStatus |
getStatus(org.apache.hadoop.fs.Path p) |
|
BlockStoragePolicy[] |
getStoragePolicies() |
Deprecated.
|
org.apache.hadoop.fs.BlockStoragePolicySpi |
getStoragePolicy(org.apache.hadoop.fs.Path src) |
|
org.apache.hadoop.fs.Path |
getTrashRoot(org.apache.hadoop.fs.Path path) |
Get the root directory of Trash for a path in HDFS.
|
java.util.Collection<org.apache.hadoop.fs.FileStatus> |
getTrashRoots(boolean allUsers) |
Get all the trash roots of HDFS for current user or for all the users.
|
java.net.URI |
getUri() |
|
long |
getUsed() |
|
org.apache.hadoop.fs.Path |
getWorkingDirectory() |
|
byte[] |
getXAttr(org.apache.hadoop.fs.Path path,
java.lang.String name) |
|
java.util.Map<java.lang.String,byte[]> |
getXAttrs(org.apache.hadoop.fs.Path path) |
|
java.util.Map<java.lang.String,byte[]> |
getXAttrs(org.apache.hadoop.fs.Path path,
java.util.List<java.lang.String> names) |
|
boolean |
hasPathCapability(org.apache.hadoop.fs.Path path,
java.lang.String capability) |
HDFS client capabilities.
|
void |
initialize(java.net.URI uri,
org.apache.hadoop.conf.Configuration conf) |
|
boolean |
isFileClosed(org.apache.hadoop.fs.Path src) |
Get the close status of a file
|
boolean |
isInSafeMode() |
Utility function that returns if the NameNode is in safemode or not.
|
org.apache.hadoop.fs.RemoteIterator<CacheDirectiveEntry> |
listCacheDirectives(CacheDirectiveInfo filter) |
List cache directives.
|
org.apache.hadoop.fs.RemoteIterator<CachePoolEntry> |
listCachePools() |
List all cache pools.
|
org.apache.hadoop.fs.RemoteIterator<org.apache.hadoop.fs.Path> |
listCorruptFileBlocks(org.apache.hadoop.fs.Path path) |
|
org.apache.hadoop.fs.RemoteIterator<EncryptionZone> |
listEncryptionZones() |
Returns the results from default DFS (fallback).
|
org.apache.hadoop.fs.RemoteIterator<org.apache.hadoop.fs.LocatedFileStatus> |
listLocatedStatus(org.apache.hadoop.fs.Path f,
org.apache.hadoop.fs.PathFilter filter) |
The BlockLocation of returned LocatedFileStatus will have different
formats for replicated and erasure coded file.
|
org.apache.hadoop.fs.RemoteIterator<OpenFileEntry> |
listOpenFiles() |
Deprecated.
|
org.apache.hadoop.fs.RemoteIterator<OpenFileEntry> |
listOpenFiles(java.util.EnumSet<OpenFilesIterator.OpenFilesType> openFilesTypes) |
Deprecated.
|
org.apache.hadoop.fs.RemoteIterator<OpenFileEntry> |
listOpenFiles(java.util.EnumSet<OpenFilesIterator.OpenFilesType> openFilesTypes,
java.lang.String path) |
|
org.apache.hadoop.fs.RemoteIterator<ZoneReencryptionStatus> |
listReencryptionStatus() |
Returns the results from default DFS (fallback).
|
org.apache.hadoop.fs.FileStatus[] |
listStatus(org.apache.hadoop.fs.Path p) |
List all the entries of a directory
Note that this operation is not atomic for a large directory.
|
org.apache.hadoop.fs.RemoteIterator<org.apache.hadoop.fs.FileStatus> |
listStatusIterator(org.apache.hadoop.fs.Path p) |
Returns a remote iterator so that followup calls are made on demand
while consuming the entries.
|
java.util.List<java.lang.String> |
listXAttrs(org.apache.hadoop.fs.Path path) |
|
void |
metaSave(java.lang.String pathname) |
|
boolean |
mkdir(org.apache.hadoop.fs.Path f,
org.apache.hadoop.fs.permission.FsPermission permission) |
Create a directory, only when the parent directories exist.
|
boolean |
mkdirs(org.apache.hadoop.fs.Path dir) |
|
boolean |
mkdirs(org.apache.hadoop.fs.Path f,
org.apache.hadoop.fs.permission.FsPermission permission) |
Create a directory and its parent directories.
|
void |
modifyAclEntries(org.apache.hadoop.fs.Path path,
java.util.List<org.apache.hadoop.fs.permission.AclEntry> aclSpec) |
|
void |
modifyCacheDirective(CacheDirectiveInfo info) |
|
void |
modifyCacheDirective(CacheDirectiveInfo info,
java.util.EnumSet<CacheFlag> flags) |
Modify a CacheDirective.
|
void |
modifyCachePool(CachePoolInfo info) |
Modify an existing cache pool.
|
org.apache.hadoop.fs.FSDataInputStream |
open(org.apache.hadoop.fs.PathHandle fd,
int bufferSize) |
Opens an FSDataInputStream with the indicated file ID extracted from
the
PathHandle. |
org.apache.hadoop.fs.FSDataInputStream |
open(org.apache.hadoop.fs.Path f,
int bufferSize) |
|
protected HdfsDataOutputStream |
primitiveCreate(org.apache.hadoop.fs.Path f,
org.apache.hadoop.fs.permission.FsPermission absolutePermission,
java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> flag,
int bufferSize,
short replication,
long blockSize,
org.apache.hadoop.util.Progressable progress,
org.apache.hadoop.fs.Options.ChecksumOpt checksumOpt) |
|
protected boolean |
primitiveMkdir(org.apache.hadoop.fs.Path f,
org.apache.hadoop.fs.permission.FsPermission absolutePermission) |
|
void |
provisionEZTrash(org.apache.hadoop.fs.Path path,
org.apache.hadoop.fs.permission.FsPermission trashPermission) |
|
org.apache.hadoop.fs.Path |
provisionSnapshotTrash(org.apache.hadoop.fs.Path path,
org.apache.hadoop.fs.permission.FsPermission trashPermission) |
HDFS only.
|
boolean |
recoverLease(org.apache.hadoop.fs.Path f) |
Start the lease recovery of a file
|
void |
reencryptEncryptionZone(org.apache.hadoop.fs.Path zone,
HdfsConstants.ReencryptAction action) |
|
void |
refreshNodes() |
Refreshes the list of hosts and excluded hosts from the configured
files.
|
void |
removeAcl(org.apache.hadoop.fs.Path path) |
|
void |
removeAclEntries(org.apache.hadoop.fs.Path path,
java.util.List<org.apache.hadoop.fs.permission.AclEntry> aclSpec) |
|
void |
removeCacheDirective(long id) |
Remove a CacheDirectiveInfo.
|
void |
removeCachePool(java.lang.String poolName) |
Remove a cache pool.
|
void |
removeDefaultAcl(org.apache.hadoop.fs.Path path) |
|
void |
removeErasureCodingPolicy(java.lang.String ecPolicyName) |
Remove erasure coding policy.
|
void |
removeXAttr(org.apache.hadoop.fs.Path path,
java.lang.String name) |
|
boolean |
rename(org.apache.hadoop.fs.Path src,
org.apache.hadoop.fs.Path dst) |
|
void |
rename(org.apache.hadoop.fs.Path src,
org.apache.hadoop.fs.Path dst,
org.apache.hadoop.fs.Options.Rename... options) |
This rename operation is guaranteed to be atomic.
|
void |
renameSnapshot(org.apache.hadoop.fs.Path path,
java.lang.String snapshotOldName,
java.lang.String snapshotNewName) |
|
protected org.apache.hadoop.fs.Path |
resolveLink(org.apache.hadoop.fs.Path f) |
|
org.apache.hadoop.fs.Path |
resolvePath(org.apache.hadoop.fs.Path f) |
|
boolean |
restoreFailedStorage(java.lang.String arg) |
enable/disable/check restoreFaileStorage.
|
long |
rollEdits() |
Rolls the edit log on the active NameNode.
|
RollingUpgradeInfo |
rollingUpgrade(HdfsConstants.RollingUpgradeAction action) |
Rolling upgrade: prepare/finalize/query.
|
void |
satisfyStoragePolicy(org.apache.hadoop.fs.Path src) |
Set the source path to satisfy storage policy.
|
void |
saveNamespace() |
Save namespace image.
|
boolean |
saveNamespace(long timeWindow,
long txGap) |
Save namespace image.
|
void |
setAcl(org.apache.hadoop.fs.Path path,
java.util.List<org.apache.hadoop.fs.permission.AclEntry> aclSpec) |
|
void |
setBalancerBandwidth(long bandwidth) |
Requests the namenode to tell all datanodes to use a new, non-persistent
bandwidth value for dfs.datanode.balance.bandwidthPerSec.
|
void |
setErasureCodingPolicy(org.apache.hadoop.fs.Path path,
java.lang.String ecPolicyName) |
Set the source path to the specified erasure coding policy.
|
void |
setOwner(org.apache.hadoop.fs.Path f,
java.lang.String username,
java.lang.String groupname) |
|
void |
setPermission(org.apache.hadoop.fs.Path f,
org.apache.hadoop.fs.permission.FsPermission permission) |
|
void |
setQuota(org.apache.hadoop.fs.Path src,
long namespaceQuota,
long storagespaceQuota) |
Set a directory's quotas
|
void |
setQuotaByStorageType(org.apache.hadoop.fs.Path src,
org.apache.hadoop.fs.StorageType type,
long quota) |
Set the per type storage quota of a directory.
|
boolean |
setReplication(org.apache.hadoop.fs.Path f,
short replication) |
|
boolean |
setSafeMode(HdfsConstants.SafeModeAction action) |
Enter, leave or get safe mode.
|
boolean |
setSafeMode(HdfsConstants.SafeModeAction action,
boolean isChecked) |
Enter, leave or get safe mode.
|
void |
setStoragePolicy(org.apache.hadoop.fs.Path src,
java.lang.String policyName) |
Set the source path to the specified storage policy.
|
void |
setTimes(org.apache.hadoop.fs.Path f,
long mtime,
long atime) |
|
void |
setVerifyChecksum(boolean verifyChecksum) |
|
void |
setWorkingDirectory(org.apache.hadoop.fs.Path dir) |
|
void |
setWriteChecksum(boolean writeChecksum) |
|
void |
setXAttr(org.apache.hadoop.fs.Path path,
java.lang.String name,
byte[] value,
java.util.EnumSet<org.apache.hadoop.fs.XAttrSetFlag> flag) |
|
org.apache.hadoop.fs.RemoteIterator<SnapshotDiffReportListing> |
snapshotDiffReportListingRemoteIterator(org.apache.hadoop.fs.Path snapshotDir,
java.lang.String fromSnapshot,
java.lang.String toSnapshot) |
Returns a remote iterator so that followup calls are made on demand
while consuming the SnapshotDiffReportListing entries.
|
boolean |
supportsSymlinks() |
|
boolean |
truncate(org.apache.hadoop.fs.Path f,
long newLength) |
|
void |
unsetErasureCodingPolicy(org.apache.hadoop.fs.Path path) |
Unset the erasure coding policy from the source path.
|
void |
unsetStoragePolicy(org.apache.hadoop.fs.Path src) |
|
boolean |
upgradeStatus() |
Get status of upgrade - finalized or not.
|
append, createMultipartUploader, getDefaultBlockSize, getDefaultReplication, getEnclosingRoot, getLocatedBlocks, getSnapshotDiffReportListing, getSnapshotListing, isSnapshotTrashRootEnabled, msync, setSafeMode, setSafeMode, toStringappend, append, append, areSymlinksEnabled, cancelDeleteOnExit, checkPath, clearStatistics, closeAll, closeAllForUGI, completeLocalOutput, copyFromLocalFile, copyFromLocalFile, copyFromLocalFile, copyFromLocalFile, copyToLocalFile, copyToLocalFile, copyToLocalFile, create, create, create, create, create, create, create, create, create, create, create, createBulkDelete, createDataInputStreamBuilder, createDataInputStreamBuilder, createDataOutputStreamBuilder, createFid, createNewFile, createNonRecursive, createNonRecursive, createPathId, createSnapshot, deleteFid, deleteOnExit, enableSymlinks, exists, get, get, get, getAllStatistics, getBlockSize, getCanonicalUri, getDefaultUri, getFileSystemClass, getFSofPath, getGlobalStorageStatistics, getInitialWorkingDirectory, getJobTrackerAddrs, getLength, getLocal, getName, getNamed, getPathHandle, getReplication, getStatistics, getStatistics, getStorageStatistics, getUsed, getZkConnectString, globStatus, globStatus, isDirectory, isFile, listFiles, listLocatedStatus, listStatus, listStatus, listStatus, listStatusBatch, makeQualified, mkdirs, mkdirsFid, mkdirsFid, moveFromLocalFile, moveFromLocalFile, moveToLocalFile, newInstance, newInstance, newInstance, newInstanceLocal, open, open, openFid, openFid, openFid2, openFile, openFile, openFileWithOptions, openFileWithOptions, primitiveMkdir, printStatistics, processDeleteOnExit, setDefaultUri, setDefaultUri, setOwnerFid, setXAttr, startLocalOutputpublic void initialize(java.net.URI uri,
org.apache.hadoop.conf.Configuration conf)
throws java.io.IOException
initialize in class DistributedFileSystemjava.io.IOExceptionpublic java.net.URI getUri()
getUri in class DistributedFileSystempublic java.lang.String getScheme()
DistributedFileSystemgetScheme in class DistributedFileSystemhdfspublic org.apache.hadoop.fs.Path getWorkingDirectory()
getWorkingDirectory in class DistributedFileSystempublic void setWorkingDirectory(org.apache.hadoop.fs.Path dir)
setWorkingDirectory in class DistributedFileSystempublic org.apache.hadoop.fs.Path getHomeDirectory()
getHomeDirectory in class DistributedFileSystempublic DFSHedgedReadMetrics getHedgedReadMetrics()
getHedgedReadMetrics in class DistributedFileSystempublic org.apache.hadoop.fs.BlockLocation[] getFileBlockLocations(org.apache.hadoop.fs.FileStatus fs,
long start,
long len)
throws java.io.IOException
getFileBlockLocations in class DistributedFileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.BlockLocation[] getFileBlockLocations(org.apache.hadoop.fs.Path p,
long start,
long len)
throws java.io.IOException
DistributedFileSystemFileSystem.getFileBlockLocations(FileStatus, long, long)
for more details.getFileBlockLocations in class DistributedFileSystemjava.io.IOExceptionpublic void setVerifyChecksum(boolean verifyChecksum)
setVerifyChecksum in class DistributedFileSystempublic boolean recoverLease(org.apache.hadoop.fs.Path f)
throws java.io.IOException
DistributedFileSystemrecoverLease in interface org.apache.hadoop.fs.LeaseRecoverablerecoverLease in class DistributedFileSystemf - a filejava.io.IOException - if an error occurspublic org.apache.hadoop.fs.FSDataInputStream open(org.apache.hadoop.fs.Path f,
int bufferSize)
throws org.apache.hadoop.security.AccessControlException,
java.io.FileNotFoundException,
java.io.IOException
open in class DistributedFileSystemorg.apache.hadoop.security.AccessControlExceptionjava.io.FileNotFoundExceptionjava.io.IOExceptionpublic org.apache.hadoop.fs.FSDataInputStream open(org.apache.hadoop.fs.PathHandle fd,
int bufferSize)
throws java.io.IOException
DistributedFileSystemPathHandle.open in class DistributedFileSystemfd - Reference to entity in this FileSystem.bufferSize - the size of the buffer to be used.org.apache.hadoop.fs.InvalidPathHandleException - If PathHandle constraints do not holdjava.io.IOException - On I/O errorsprotected HdfsPathHandle createPathHandle(org.apache.hadoop.fs.FileStatus st, org.apache.hadoop.fs.Options.HandleOpt... opts)
DistributedFileSystemcreatePathHandle in class DistributedFileSystemst - HdfsFileStatus instance from NameNodeopts - Standard handle argumentspublic org.apache.hadoop.fs.FSDataOutputStream append(org.apache.hadoop.fs.Path f,
int bufferSize,
org.apache.hadoop.util.Progressable progress)
throws java.io.IOException
append in class DistributedFileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.FSDataOutputStream append(org.apache.hadoop.fs.Path f,
java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> flag,
int bufferSize,
org.apache.hadoop.util.Progressable progress)
throws java.io.IOException
DistributedFileSystemappend in class DistributedFileSystemf - the existing file to be appended.flag - Flags for the Append operation. CreateFlag.APPEND is mandatory
to be present.bufferSize - the size of the buffer to be used.progress - for reporting progress if it is not null.FSDataOutputStreamjava.io.IOExceptionpublic org.apache.hadoop.fs.FSDataOutputStream append(org.apache.hadoop.fs.Path f,
java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> flag,
int bufferSize,
org.apache.hadoop.util.Progressable progress,
java.net.InetSocketAddress[] favoredNodes)
throws java.io.IOException
DistributedFileSystemappend in class DistributedFileSystemf - the existing file to be appended.flag - Flags for the Append operation. CreateFlag.APPEND is mandatory
to be present.bufferSize - the size of the buffer to be used.progress - for reporting progress if it is not null.favoredNodes - Favored nodes for new blocksFSDataOutputStreamjava.io.IOExceptionpublic org.apache.hadoop.fs.FSDataOutputStream create(org.apache.hadoop.fs.Path f,
org.apache.hadoop.fs.permission.FsPermission permission,
boolean overwrite,
int bufferSize,
short replication,
long blockSize,
org.apache.hadoop.util.Progressable progress)
throws java.io.IOException
create in class DistributedFileSystemjava.io.IOExceptionpublic HdfsDataOutputStream create(org.apache.hadoop.fs.Path f, org.apache.hadoop.fs.permission.FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, org.apache.hadoop.util.Progressable progress, java.net.InetSocketAddress[] favoredNodes) throws java.io.IOException
DistributedFileSystemDistributedFileSystem.create(Path, FsPermission, boolean, int, short, long,
Progressable) with the addition of favoredNodes that is a hint to
where the namenode should place the file blocks.
The favored nodes hint is not persisted in HDFS. Hence it may be honored
at the creation time only. And with favored nodes, blocks will be pinned
on the datanodes to prevent balancing move the block. HDFS could move the
blocks during replication, to move the blocks from favored nodes. A value
of null means no favored nodes for this createcreate in class DistributedFileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.FSDataOutputStream create(org.apache.hadoop.fs.Path f,
org.apache.hadoop.fs.permission.FsPermission permission,
java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> cflags,
int bufferSize,
short replication,
long blockSize,
org.apache.hadoop.util.Progressable progress,
org.apache.hadoop.fs.Options.ChecksumOpt checksumOpt)
throws java.io.IOException
create in class DistributedFileSystemjava.io.IOExceptionprotected HdfsDataOutputStream primitiveCreate(org.apache.hadoop.fs.Path f, org.apache.hadoop.fs.permission.FsPermission absolutePermission, java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> flag, int bufferSize, short replication, long blockSize, org.apache.hadoop.util.Progressable progress, org.apache.hadoop.fs.Options.ChecksumOpt checksumOpt) throws java.io.IOException
primitiveCreate in class DistributedFileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.FSDataOutputStream createNonRecursive(org.apache.hadoop.fs.Path f,
org.apache.hadoop.fs.permission.FsPermission permission,
java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> flags,
int bufferSize,
short replication,
long blockSize,
org.apache.hadoop.util.Progressable progress)
throws java.io.IOException
DistributedFileSystemcreateNonRecursive in class DistributedFileSystemjava.io.IOExceptionpublic boolean setReplication(org.apache.hadoop.fs.Path f,
short replication)
throws org.apache.hadoop.security.AccessControlException,
java.io.FileNotFoundException,
java.io.IOException
setReplication in class DistributedFileSystemorg.apache.hadoop.security.AccessControlExceptionjava.io.FileNotFoundExceptionjava.io.IOExceptionpublic void setStoragePolicy(org.apache.hadoop.fs.Path src,
java.lang.String policyName)
throws java.io.IOException
DistributedFileSystemsetStoragePolicy in class DistributedFileSystemsrc - The source path referring to either a directory or a file.policyName - The name of the storage policy.java.io.IOExceptionpublic void unsetStoragePolicy(org.apache.hadoop.fs.Path src)
throws java.io.IOException
unsetStoragePolicy in class DistributedFileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.BlockStoragePolicySpi getStoragePolicy(org.apache.hadoop.fs.Path src)
throws java.io.IOException
getStoragePolicy in class DistributedFileSystemjava.io.IOExceptionpublic java.util.Collection<BlockStoragePolicy> getAllStoragePolicies() throws java.io.IOException
getAllStoragePolicies in class DistributedFileSystemjava.io.IOExceptionpublic long getBytesWithFutureGenerationStamps()
throws java.io.IOException
DistributedFileSystemgetBytesWithFutureGenerationStamps in class DistributedFileSystemjava.io.IOException@Deprecated public BlockStoragePolicy[] getStoragePolicies() throws java.io.IOException
DistributedFileSystemFileSystem.getAllStoragePolicies()getStoragePolicies in class DistributedFileSystemjava.io.IOExceptionpublic void concat(org.apache.hadoop.fs.Path trg,
org.apache.hadoop.fs.Path[] psrcs)
throws java.io.IOException
DistributedFileSystemconcat in class DistributedFileSystemtrg - existing file to append topsrcs - list of files (same block size, same replication)java.io.IOExceptionpublic boolean rename(org.apache.hadoop.fs.Path src,
org.apache.hadoop.fs.Path dst)
throws java.io.IOException
rename in class DistributedFileSystemjava.io.IOExceptionpublic void rename(org.apache.hadoop.fs.Path src,
org.apache.hadoop.fs.Path dst,
org.apache.hadoop.fs.Options.Rename... options)
throws java.io.IOException
DistributedFileSystemrename in class DistributedFileSystemjava.io.IOExceptionpublic boolean truncate(org.apache.hadoop.fs.Path f,
long newLength)
throws java.io.IOException
truncate in class DistributedFileSystemjava.io.IOExceptionpublic boolean delete(org.apache.hadoop.fs.Path f,
boolean recursive)
throws org.apache.hadoop.security.AccessControlException,
java.io.FileNotFoundException,
java.io.IOException
delete in class DistributedFileSystemorg.apache.hadoop.security.AccessControlExceptionjava.io.FileNotFoundExceptionjava.io.IOExceptionpublic org.apache.hadoop.fs.ContentSummary getContentSummary(org.apache.hadoop.fs.Path f)
throws java.io.IOException
getContentSummary in class DistributedFileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.QuotaUsage getQuotaUsage(org.apache.hadoop.fs.Path f)
throws java.io.IOException
getQuotaUsage in class DistributedFileSystemjava.io.IOExceptionpublic void setQuota(org.apache.hadoop.fs.Path src,
long namespaceQuota,
long storagespaceQuota)
throws java.io.IOException
DistributedFileSystemsetQuota in class DistributedFileSystemjava.io.IOExceptionClientProtocol.setQuota(String,
long, long, StorageType)public void setQuotaByStorageType(org.apache.hadoop.fs.Path src,
org.apache.hadoop.fs.StorageType type,
long quota)
throws java.io.IOException
DistributedFileSystemsetQuotaByStorageType in class DistributedFileSystemsrc - target directory whose quota is to be modified.type - storage type of the specific storage type quota to be modified.quota - value of the specific storage type quota to be modified.
Maybe HdfsConstants.QUOTA_RESET to clear quota by storage type.java.io.IOExceptionpublic org.apache.hadoop.fs.FileStatus[] listStatus(org.apache.hadoop.fs.Path p)
throws java.io.IOException
DistributedFileSystemDistributedFileSystem.getFileStatus(Path f)listStatus in class DistributedFileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.RemoteIterator<org.apache.hadoop.fs.LocatedFileStatus> listLocatedStatus(org.apache.hadoop.fs.Path f,
org.apache.hadoop.fs.PathFilter filter)
throws java.io.FileNotFoundException,
java.io.IOException
DistributedFileSystemFileSystem.getFileBlockLocations(FileStatus, long, long) for
more details.listLocatedStatus in class DistributedFileSystemjava.io.FileNotFoundExceptionjava.io.IOExceptionpublic org.apache.hadoop.fs.RemoteIterator<org.apache.hadoop.fs.FileStatus> listStatusIterator(org.apache.hadoop.fs.Path p)
throws java.io.IOException
DistributedFileSystemlistStatusIterator in class DistributedFileSystemp - target pathjava.io.IOExceptionpublic org.apache.hadoop.fs.RemoteIterator<org.apache.hadoop.fs.PartialListing<org.apache.hadoop.fs.FileStatus>> batchedListStatusIterator(java.util.List<org.apache.hadoop.fs.Path> paths)
throws java.io.IOException
batchedListStatusIterator in interface org.apache.hadoop.fs.BatchListingOperationsbatchedListStatusIterator in class DistributedFileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.RemoteIterator<org.apache.hadoop.fs.PartialListing<org.apache.hadoop.fs.LocatedFileStatus>> batchedListLocatedStatusIterator(java.util.List<org.apache.hadoop.fs.Path> paths)
throws java.io.IOException
batchedListLocatedStatusIterator in interface org.apache.hadoop.fs.BatchListingOperationsbatchedListLocatedStatusIterator in class DistributedFileSystemjava.io.IOExceptionpublic boolean mkdir(org.apache.hadoop.fs.Path f,
org.apache.hadoop.fs.permission.FsPermission permission)
throws java.io.IOException
DistributedFileSystemFsPermission.applyUMask(FsPermission) for details of how
the permission is applied.mkdir in class DistributedFileSystemf - The path to createpermission - The permission. See FsPermission#applyUMask for
details about how this is used to calculate the
effective permission.java.io.IOExceptionpublic boolean mkdirs(org.apache.hadoop.fs.Path f,
org.apache.hadoop.fs.permission.FsPermission permission)
throws java.io.IOException
DistributedFileSystemFsPermission.applyUMask(FsPermission) for details of how
the permission is applied.mkdirs in class DistributedFileSystemf - The path to createpermission - The permission. See FsPermission#applyUMask for
details about how this is used to calculate the
effective permission.java.io.IOExceptionprotected boolean primitiveMkdir(org.apache.hadoop.fs.Path f,
org.apache.hadoop.fs.permission.FsPermission absolutePermission)
throws java.io.IOException
primitiveMkdir in class DistributedFileSystemjava.io.IOExceptionpublic void close()
throws java.io.IOException
close in interface java.lang.AutoCloseableclose in interface java.io.Closeableclose in class DistributedFileSystemjava.io.IOException@Private public DFSClient getClient()
getClient in class DistributedFileSystempublic org.apache.hadoop.fs.FsStatus getStatus(org.apache.hadoop.fs.Path p)
throws java.io.IOException
getStatus in class DistributedFileSystemjava.io.IOExceptionpublic long getMissingBlocksCount()
throws java.io.IOException
DistributedFileSystemgetMissingBlocksCount in class DistributedFileSystemjava.io.IOExceptionpublic long getPendingDeletionBlocksCount()
throws java.io.IOException
DistributedFileSystemgetPendingDeletionBlocksCount in class DistributedFileSystemjava.io.IOExceptionpublic long getMissingReplOneBlocksCount()
throws java.io.IOException
DistributedFileSystemgetMissingReplOneBlocksCount in class DistributedFileSystemjava.io.IOExceptionpublic long getLowRedundancyBlocksCount()
throws java.io.IOException
DistributedFileSystemgetLowRedundancyBlocksCount in class DistributedFileSystemjava.io.IOExceptionpublic long getCorruptBlocksCount()
throws java.io.IOException
DistributedFileSystemgetCorruptBlocksCount in class DistributedFileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.RemoteIterator<org.apache.hadoop.fs.Path> listCorruptFileBlocks(org.apache.hadoop.fs.Path path)
throws java.io.IOException
listCorruptFileBlocks in class DistributedFileSystemjava.io.IOExceptionpublic DatanodeInfo[] getDataNodeStats() throws java.io.IOException
getDataNodeStats in class DistributedFileSystemjava.io.IOExceptionpublic DatanodeInfo[] getDataNodeStats(HdfsConstants.DatanodeReportType type) throws java.io.IOException
getDataNodeStats in class DistributedFileSystemjava.io.IOExceptionpublic boolean setSafeMode(HdfsConstants.SafeModeAction action) throws java.io.IOException
DistributedFileSystemsetSafeMode in class DistributedFileSystemjava.io.IOExceptionClientProtocol.setSafeMode(HdfsConstants.SafeModeAction,
boolean)public boolean setSafeMode(HdfsConstants.SafeModeAction action, boolean isChecked) throws java.io.IOException
DistributedFileSystemsetSafeMode in class DistributedFileSystemaction - One of SafeModeAction.ENTER, SafeModeAction.LEAVE and
SafeModeAction.GET.isChecked - If true check only for Active NNs status, else check first NN's
status.java.io.IOExceptionClientProtocol.setSafeMode(HdfsConstants.SafeModeAction,
boolean)public boolean saveNamespace(long timeWindow,
long txGap)
throws java.io.IOException
DistributedFileSystemsaveNamespace in class DistributedFileSystemtimeWindow - NameNode can ignore this command if the latest
checkpoint was done within the given time period (in
seconds).java.io.IOExceptionClientProtocol.saveNamespace(long, long)public void saveNamespace()
throws java.io.IOException
DistributedFileSystemsaveNamespace in class DistributedFileSystemjava.io.IOExceptionpublic long rollEdits()
throws java.io.IOException
DistributedFileSystemrollEdits in class DistributedFileSystemjava.io.IOExceptionClientProtocol.rollEdits()public boolean restoreFailedStorage(java.lang.String arg)
throws java.io.IOException
DistributedFileSystemrestoreFailedStorage in class DistributedFileSystemjava.io.IOExceptionClientProtocol.restoreFailedStorage(String arg)public void refreshNodes()
throws java.io.IOException
DistributedFileSystemrefreshNodes in class DistributedFileSystemjava.io.IOExceptionpublic void finalizeUpgrade()
throws java.io.IOException
DistributedFileSystemfinalizeUpgrade in class DistributedFileSystemjava.io.IOExceptionpublic boolean upgradeStatus()
throws java.io.IOException
DistributedFileSystemupgradeStatus in class DistributedFileSystemjava.io.IOExceptionpublic RollingUpgradeInfo rollingUpgrade(HdfsConstants.RollingUpgradeAction action) throws java.io.IOException
DistributedFileSystemrollingUpgrade in class DistributedFileSystemjava.io.IOExceptionpublic void metaSave(java.lang.String pathname)
throws java.io.IOException
metaSave in class DistributedFileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.FsServerDefaults getServerDefaults()
throws java.io.IOException
getServerDefaults in class DistributedFileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.FileStatus getFileStatus(org.apache.hadoop.fs.Path f)
throws org.apache.hadoop.security.AccessControlException,
java.io.FileNotFoundException,
java.io.IOException
DistributedFileSystemgetFileStatus in class DistributedFileSystemjava.io.FileNotFoundException - if the file does not exist.org.apache.hadoop.security.AccessControlExceptionjava.io.IOExceptionpublic void createSymlink(org.apache.hadoop.fs.Path target,
org.apache.hadoop.fs.Path link,
boolean createParent)
throws java.io.IOException
createSymlink in class DistributedFileSystemjava.io.IOExceptionpublic boolean supportsSymlinks()
supportsSymlinks in class DistributedFileSystempublic org.apache.hadoop.fs.FileStatus getFileLinkStatus(org.apache.hadoop.fs.Path f)
throws java.io.IOException
getFileLinkStatus in class DistributedFileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.Path getLinkTarget(org.apache.hadoop.fs.Path path)
throws java.io.IOException
getLinkTarget in class DistributedFileSystemjava.io.IOExceptionprotected org.apache.hadoop.fs.Path resolveLink(org.apache.hadoop.fs.Path f)
throws java.io.IOException
resolveLink in class DistributedFileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.FileChecksum getFileChecksum(org.apache.hadoop.fs.Path f)
throws org.apache.hadoop.security.AccessControlException,
java.io.FileNotFoundException,
java.io.IOException
getFileChecksum in class DistributedFileSystemorg.apache.hadoop.security.AccessControlExceptionjava.io.FileNotFoundExceptionjava.io.IOExceptionpublic void setPermission(org.apache.hadoop.fs.Path f,
org.apache.hadoop.fs.permission.FsPermission permission)
throws org.apache.hadoop.security.AccessControlException,
java.io.FileNotFoundException,
java.io.IOException
setPermission in class DistributedFileSystemorg.apache.hadoop.security.AccessControlExceptionjava.io.FileNotFoundExceptionjava.io.IOExceptionpublic void setOwner(org.apache.hadoop.fs.Path f,
java.lang.String username,
java.lang.String groupname)
throws org.apache.hadoop.security.AccessControlException,
java.io.FileNotFoundException,
java.io.IOException
setOwner in class DistributedFileSystemorg.apache.hadoop.security.AccessControlExceptionjava.io.FileNotFoundExceptionjava.io.IOExceptionpublic void setTimes(org.apache.hadoop.fs.Path f,
long mtime,
long atime)
throws org.apache.hadoop.security.AccessControlException,
java.io.FileNotFoundException,
java.io.IOException
setTimes in class DistributedFileSystemorg.apache.hadoop.security.AccessControlExceptionjava.io.FileNotFoundExceptionjava.io.IOExceptionprotected int getDefaultPort()
getDefaultPort in class DistributedFileSystempublic org.apache.hadoop.security.token.Token<DelegationTokenIdentifier> getDelegationToken(java.lang.String renewer) throws java.io.IOException
DistributedFileSystem.getDelegationToken(String). If
there are mount points configured and if default fs(linkFallback)
configured, then it will return default fs delegation token. Otherwise
it will return null.getDelegationToken in interface org.apache.hadoop.security.token.DelegationTokenIssuergetDelegationToken in class DistributedFileSystemjava.io.IOExceptionpublic void setBalancerBandwidth(long bandwidth)
throws java.io.IOException
DistributedFileSystemsetBalancerBandwidth in class DistributedFileSystembandwidth - Balancer bandwidth in bytes per second for all datanodes.java.io.IOExceptionpublic java.lang.String getCanonicalServiceName()
DistributedFileSystemgetCanonicalServiceName in interface org.apache.hadoop.security.token.DelegationTokenIssuergetCanonicalServiceName in class DistributedFileSystemprotected java.net.URI canonicalizeUri(java.net.URI uri)
canonicalizeUri in class DistributedFileSystempublic boolean isInSafeMode()
throws java.io.IOException
DistributedFileSystemisInSafeMode in class DistributedFileSystemjava.io.IOException - when there is an issue communicating with the NameNodepublic void allowSnapshot(org.apache.hadoop.fs.Path path)
throws java.io.IOException
allowSnapshot in class DistributedFileSystemjava.io.IOExceptionHdfsAdmin.allowSnapshot(Path)public void disallowSnapshot(org.apache.hadoop.fs.Path path)
throws java.io.IOException
disallowSnapshot in class DistributedFileSystemjava.io.IOExceptionHdfsAdmin.disallowSnapshot(Path)public org.apache.hadoop.fs.Path createSnapshot(org.apache.hadoop.fs.Path path,
java.lang.String snapshotName)
throws java.io.IOException
createSnapshot in class DistributedFileSystemjava.io.IOExceptionpublic void renameSnapshot(org.apache.hadoop.fs.Path path,
java.lang.String snapshotOldName,
java.lang.String snapshotNewName)
throws java.io.IOException
renameSnapshot in class DistributedFileSystemjava.io.IOExceptionpublic SnapshottableDirectoryStatus[] getSnapshottableDirListing() throws java.io.IOException
DistributedFileSystemgetSnapshottableDirListing in class DistributedFileSystemjava.io.IOException - If an I/O error occurred.public void deleteSnapshot(org.apache.hadoop.fs.Path path,
java.lang.String snapshotName)
throws java.io.IOException
deleteSnapshot in class DistributedFileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.RemoteIterator<SnapshotDiffReportListing> snapshotDiffReportListingRemoteIterator(org.apache.hadoop.fs.Path snapshotDir, java.lang.String fromSnapshot, java.lang.String toSnapshot) throws java.io.IOException
DistributedFileSystemsnapshotDiffReportListingRemoteIterator in class DistributedFileSystemsnapshotDir - full path of the directory where snapshots are takenfromSnapshot - snapshot name of the from point. Null indicates the current
treetoSnapshot - snapshot name of the to point. Null indicates the current
tree.java.io.IOExceptionpublic SnapshotDiffReport getSnapshotDiffReport(org.apache.hadoop.fs.Path snapshotDir, java.lang.String fromSnapshot, java.lang.String toSnapshot) throws java.io.IOException
DistributedFileSystemgetSnapshotDiffReport in class DistributedFileSystemjava.io.IOExceptionDFSClient.getSnapshotDiffReportListing(java.lang.String, java.lang.String, java.lang.String, byte[], int)public boolean isFileClosed(org.apache.hadoop.fs.Path src)
throws java.io.IOException
DistributedFileSystemisFileClosed in interface org.apache.hadoop.fs.LeaseRecoverableisFileClosed in class DistributedFileSystemsrc - The path to the filejava.io.FileNotFoundException - if the file does not exist.java.io.IOException - If an I/O error occurredpublic long addCacheDirective(CacheDirectiveInfo info) throws java.io.IOException
addCacheDirective in class DistributedFileSystemjava.io.IOExceptionDistributedFileSystem.addCacheDirective(CacheDirectiveInfo, EnumSet)public long addCacheDirective(CacheDirectiveInfo info, java.util.EnumSet<CacheFlag> flags) throws java.io.IOException
DistributedFileSystemaddCacheDirective in class DistributedFileSysteminfo - Information about a directive to add.flags - CacheFlags to use for this operation.java.io.IOException - if the directive could not be addedpublic void modifyCacheDirective(CacheDirectiveInfo info) throws java.io.IOException
modifyCacheDirective in class DistributedFileSystemjava.io.IOExceptionDistributedFileSystem.modifyCacheDirective(CacheDirectiveInfo, EnumSet)public void modifyCacheDirective(CacheDirectiveInfo info, java.util.EnumSet<CacheFlag> flags) throws java.io.IOException
DistributedFileSystemmodifyCacheDirective in class DistributedFileSysteminfo - Information about the directive to modify. You must set the ID
to indicate which CacheDirective you want to modify.flags - CacheFlags to use for this operation.java.io.IOException - if the directive could not be modifiedpublic void removeCacheDirective(long id)
throws java.io.IOException
DistributedFileSystemremoveCacheDirective in class DistributedFileSystemid - identifier of the CacheDirectiveInfo to removejava.io.IOException - if the directive could not be removedpublic org.apache.hadoop.fs.RemoteIterator<CacheDirectiveEntry> listCacheDirectives(CacheDirectiveInfo filter) throws java.io.IOException
DistributedFileSystemlistCacheDirectives in class DistributedFileSystemfilter - Filter parameters to use when listing the directives, null to
list all directives visible to us.java.io.IOExceptionpublic void addCachePool(CachePoolInfo info) throws java.io.IOException
DistributedFileSystemaddCachePool in class DistributedFileSysteminfo - The request to add a cache pool.java.io.IOException - If the request could not be completed.public void modifyCachePool(CachePoolInfo info) throws java.io.IOException
DistributedFileSystemmodifyCachePool in class DistributedFileSysteminfo - The request to modify a cache pool.java.io.IOException - If the request could not be completed.public void removeCachePool(java.lang.String poolName)
throws java.io.IOException
DistributedFileSystemremoveCachePool in class DistributedFileSystempoolName - Name of the cache pool to remove.java.io.IOException - if the cache pool did not exist, or could not be removed.public org.apache.hadoop.fs.RemoteIterator<CachePoolEntry> listCachePools() throws java.io.IOException
DistributedFileSystemlistCachePools in class DistributedFileSystemjava.io.IOException - If there was an error listing cache pools.public void modifyAclEntries(org.apache.hadoop.fs.Path path,
java.util.List<org.apache.hadoop.fs.permission.AclEntry> aclSpec)
throws java.io.IOException
DistributedFileSystemmodifyAclEntries in class DistributedFileSystemjava.io.IOExceptionpublic void removeAclEntries(org.apache.hadoop.fs.Path path,
java.util.List<org.apache.hadoop.fs.permission.AclEntry> aclSpec)
throws java.io.IOException
DistributedFileSystemremoveAclEntries in class DistributedFileSystemjava.io.IOExceptionpublic void removeDefaultAcl(org.apache.hadoop.fs.Path path)
throws java.io.IOException
DistributedFileSystemremoveDefaultAcl in class DistributedFileSystemjava.io.IOExceptionpublic void removeAcl(org.apache.hadoop.fs.Path path)
throws java.io.IOException
DistributedFileSystemremoveAcl in class DistributedFileSystemjava.io.IOExceptionpublic void setAcl(org.apache.hadoop.fs.Path path,
java.util.List<org.apache.hadoop.fs.permission.AclEntry> aclSpec)
throws java.io.IOException
DistributedFileSystemsetAcl in class DistributedFileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.permission.AclStatus getAclStatus(org.apache.hadoop.fs.Path path)
throws java.io.IOException
DistributedFileSystemgetAclStatus in class DistributedFileSystemjava.io.IOExceptionpublic void createEncryptionZone(org.apache.hadoop.fs.Path path,
java.lang.String keyName)
throws java.io.IOException
createEncryptionZone in class DistributedFileSystemjava.io.IOExceptionpublic EncryptionZone getEZForPath(org.apache.hadoop.fs.Path path) throws java.io.IOException
getEZForPath in class DistributedFileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.RemoteIterator<EncryptionZone> listEncryptionZones() throws java.io.IOException
listEncryptionZones in class DistributedFileSystemjava.io.IOExceptionpublic void reencryptEncryptionZone(org.apache.hadoop.fs.Path zone,
HdfsConstants.ReencryptAction action)
throws java.io.IOException
reencryptEncryptionZone in class DistributedFileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.RemoteIterator<ZoneReencryptionStatus> listReencryptionStatus() throws java.io.IOException
listReencryptionStatus in class DistributedFileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.FileEncryptionInfo getFileEncryptionInfo(org.apache.hadoop.fs.Path path)
throws java.io.IOException
getFileEncryptionInfo in class DistributedFileSystemjava.io.IOExceptionpublic void provisionEZTrash(org.apache.hadoop.fs.Path path,
org.apache.hadoop.fs.permission.FsPermission trashPermission)
throws java.io.IOException
provisionEZTrash in class DistributedFileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.Path provisionSnapshotTrash(org.apache.hadoop.fs.Path path,
org.apache.hadoop.fs.permission.FsPermission trashPermission)
throws java.io.IOException
DistributedFileSystemprovisionSnapshotTrash in class DistributedFileSystempath - Path to a snapshottable directory.trashPermission - Expected FsPermission of the trash root.java.io.IOExceptionpublic void setXAttr(org.apache.hadoop.fs.Path path,
java.lang.String name,
byte[] value,
java.util.EnumSet<org.apache.hadoop.fs.XAttrSetFlag> flag)
throws java.io.IOException
setXAttr in class DistributedFileSystemjava.io.IOExceptionpublic byte[] getXAttr(org.apache.hadoop.fs.Path path,
java.lang.String name)
throws java.io.IOException
getXAttr in class DistributedFileSystemjava.io.IOExceptionpublic java.util.Map<java.lang.String,byte[]> getXAttrs(org.apache.hadoop.fs.Path path)
throws java.io.IOException
getXAttrs in class DistributedFileSystemjava.io.IOExceptionpublic java.util.Map<java.lang.String,byte[]> getXAttrs(org.apache.hadoop.fs.Path path,
java.util.List<java.lang.String> names)
throws java.io.IOException
getXAttrs in class DistributedFileSystemjava.io.IOExceptionpublic java.util.List<java.lang.String> listXAttrs(org.apache.hadoop.fs.Path path)
throws java.io.IOException
listXAttrs in class DistributedFileSystemjava.io.IOExceptionpublic void removeXAttr(org.apache.hadoop.fs.Path path,
java.lang.String name)
throws java.io.IOException
removeXAttr in class DistributedFileSystemjava.io.IOExceptionpublic void access(org.apache.hadoop.fs.Path path,
org.apache.hadoop.fs.permission.FsAction mode)
throws org.apache.hadoop.security.AccessControlException,
java.io.FileNotFoundException,
java.io.IOException
access in class DistributedFileSystemorg.apache.hadoop.security.AccessControlExceptionjava.io.FileNotFoundExceptionjava.io.IOExceptionpublic java.net.URI getKeyProviderUri()
throws java.io.IOException
getKeyProviderUri in interface org.apache.hadoop.crypto.key.KeyProviderTokenIssuergetKeyProviderUri in class DistributedFileSystemjava.io.IOExceptionpublic org.apache.hadoop.crypto.key.KeyProvider getKeyProvider()
throws java.io.IOException
getKeyProvider in interface org.apache.hadoop.crypto.key.KeyProviderTokenIssuergetKeyProvider in class DistributedFileSystemjava.io.IOExceptionpublic org.apache.hadoop.security.token.DelegationTokenIssuer[] getAdditionalTokenIssuers()
throws java.io.IOException
getAdditionalTokenIssuers in interface org.apache.hadoop.security.token.DelegationTokenIssuergetAdditionalTokenIssuers in class DistributedFileSystemjava.io.IOExceptionpublic DFSInotifyEventInputStream getInotifyEventStream() throws java.io.IOException
getInotifyEventStream in class DistributedFileSystemjava.io.IOExceptionpublic DFSInotifyEventInputStream getInotifyEventStream(long lastReadTxid) throws java.io.IOException
getInotifyEventStream in class DistributedFileSystemjava.io.IOExceptionpublic void setErasureCodingPolicy(org.apache.hadoop.fs.Path path,
java.lang.String ecPolicyName)
throws java.io.IOException
DistributedFileSystemsetErasureCodingPolicy in class DistributedFileSystempath - The directory to set the policyecPolicyName - The erasure coding policy name.java.io.IOExceptionpublic void satisfyStoragePolicy(org.apache.hadoop.fs.Path src)
throws java.io.IOException
DistributedFileSystemsatisfyStoragePolicy in class DistributedFileSystemsrc - The source path referring to either a directory or a file.java.io.IOExceptionpublic ErasureCodingPolicy getErasureCodingPolicy(org.apache.hadoop.fs.Path path) throws java.io.IOException
DistributedFileSystemgetErasureCodingPolicy in class DistributedFileSystempath - The path of the file or directoryjava.io.IOExceptionpublic java.util.Collection<ErasureCodingPolicyInfo> getAllErasureCodingPolicies() throws java.io.IOException
getAllErasureCodingPolicies in class DistributedFileSystemjava.io.IOExceptionpublic java.util.Map<java.lang.String,java.lang.String> getAllErasureCodingCodecs()
throws java.io.IOException
DistributedFileSystemgetAllErasureCodingCodecs in class DistributedFileSystemjava.io.IOExceptionpublic AddErasureCodingPolicyResponse[] addErasureCodingPolicies(ErasureCodingPolicy[] policies) throws java.io.IOException
DistributedFileSystemaddErasureCodingPolicies in class DistributedFileSystempolicies - The user defined ec policy list to add.java.io.IOExceptionpublic void removeErasureCodingPolicy(java.lang.String ecPolicyName)
throws java.io.IOException
DistributedFileSystemremoveErasureCodingPolicy in class DistributedFileSystemecPolicyName - The name of the policy to be removed.java.io.IOExceptionpublic void enableErasureCodingPolicy(java.lang.String ecPolicyName)
throws java.io.IOException
DistributedFileSystemenableErasureCodingPolicy in class DistributedFileSystemecPolicyName - The name of the policy to be enabled.java.io.IOExceptionpublic void disableErasureCodingPolicy(java.lang.String ecPolicyName)
throws java.io.IOException
DistributedFileSystemdisableErasureCodingPolicy in class DistributedFileSystemecPolicyName - The name of the policy to be disabled.java.io.IOExceptionpublic void unsetErasureCodingPolicy(org.apache.hadoop.fs.Path path)
throws java.io.IOException
DistributedFileSystemunsetErasureCodingPolicy in class DistributedFileSystempath - The directory to unset the policyjava.io.IOExceptionpublic ECTopologyVerifierResult getECTopologyResultForPolicies(java.lang.String... policyNames) throws java.io.IOException
DistributedFileSystemgetECTopologyResultForPolicies in class DistributedFileSystempolicyNames - name of policies.java.io.IOExceptionpublic org.apache.hadoop.fs.Path getTrashRoot(org.apache.hadoop.fs.Path path)
DistributedFileSystemgetTrashRoot in class DistributedFileSystempath - the trash root of the path to be determined.public java.util.Collection<org.apache.hadoop.fs.FileStatus> getTrashRoots(boolean allUsers)
DistributedFileSystemgetTrashRoots in class DistributedFileSystemallUsers - return trashRoots of all users if true, used by emptierprotected org.apache.hadoop.fs.Path fixRelativePart(org.apache.hadoop.fs.Path p)
fixRelativePart in class DistributedFileSystempublic DistributedFileSystem.HdfsDataOutputStreamBuilder createFile(org.apache.hadoop.fs.Path path)
DistributedFileSystemFileSystem.create(Path), file is overwritten by default.createFile in class DistributedFileSystempath - the path of the file to create.@Deprecated public org.apache.hadoop.fs.RemoteIterator<OpenFileEntry> listOpenFiles() throws java.io.IOException
DistributedFileSystemSince the list is fetched in batches, it does not represent a consistent snapshot of the all open files.
This method can only be called by HDFS superusers.
listOpenFiles in class DistributedFileSystemjava.io.IOException@Deprecated public org.apache.hadoop.fs.RemoteIterator<OpenFileEntry> listOpenFiles(java.util.EnumSet<OpenFilesIterator.OpenFilesType> openFilesTypes) throws java.io.IOException
listOpenFiles in class DistributedFileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.RemoteIterator<OpenFileEntry> listOpenFiles(java.util.EnumSet<OpenFilesIterator.OpenFilesType> openFilesTypes, java.lang.String path) throws java.io.IOException
listOpenFiles in class DistributedFileSystemjava.io.IOExceptionpublic DistributedFileSystem.HdfsDataOutputStreamBuilder appendFile(org.apache.hadoop.fs.Path path)
DistributedFileSystemDistributedFileSystem.HdfsDataOutputStreamBuilder to append a file on DFS.appendFile in class DistributedFileSystempath - file path.DistributedFileSystem.HdfsDataOutputStreamBuilder for appending a file.public boolean hasPathCapability(org.apache.hadoop.fs.Path path,
java.lang.String capability)
throws java.io.IOException
DistributedFileSystemDfsPathCapabilities to keep WebHdfsFileSystem in sync.
hasPathCapability in interface org.apache.hadoop.fs.PathCapabilitieshasPathCapability in class DistributedFileSystemjava.io.IOExceptionpublic org.apache.hadoop.fs.Path resolvePath(org.apache.hadoop.fs.Path f)
throws java.io.IOException
resolvePath in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic boolean delete(org.apache.hadoop.fs.Path f)
throws org.apache.hadoop.security.AccessControlException,
java.io.FileNotFoundException,
java.io.IOException
delete in class org.apache.hadoop.fs.FileSystemorg.apache.hadoop.security.AccessControlExceptionjava.io.FileNotFoundExceptionjava.io.IOExceptionpublic org.apache.hadoop.fs.FileChecksum getFileChecksum(org.apache.hadoop.fs.Path f,
long length)
throws org.apache.hadoop.security.AccessControlException,
java.io.FileNotFoundException,
java.io.IOException
getFileChecksum in class DistributedFileSystemorg.apache.hadoop.security.AccessControlExceptionjava.io.FileNotFoundExceptionjava.io.IOExceptionpublic boolean mkdirs(org.apache.hadoop.fs.Path dir)
throws java.io.IOException
mkdirs in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic long getDefaultBlockSize(org.apache.hadoop.fs.Path f)
getDefaultBlockSize in class org.apache.hadoop.fs.FileSystempublic short getDefaultReplication(org.apache.hadoop.fs.Path f)
getDefaultReplication in class org.apache.hadoop.fs.FileSystempublic org.apache.hadoop.fs.FsServerDefaults getServerDefaults(org.apache.hadoop.fs.Path f)
throws java.io.IOException
getServerDefaults in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic void setWriteChecksum(boolean writeChecksum)
setWriteChecksum in class org.apache.hadoop.fs.FileSystempublic org.apache.hadoop.fs.FileSystem[] getChildFileSystems()
getChildFileSystems in class org.apache.hadoop.fs.FileSystempublic org.apache.hadoop.fs.viewfs.ViewFileSystem.MountPoint[] getMountPoints()
public org.apache.hadoop.fs.FsStatus getStatus()
throws java.io.IOException
getStatus in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic long getUsed()
throws java.io.IOException
getUsed in class org.apache.hadoop.fs.FileSystemjava.io.IOExceptionpublic DatanodeInfo[] getSlowDatanodeStats() throws java.io.IOException
DistributedFileSystemgetSlowDatanodeStats in class DistributedFileSystemjava.io.IOException - If an I/O error occurs.Copyright © 2008–2025 Apache Software Foundation. All rights reserved.