java.io.Closeable, java.lang.AutoCloseable, org.apache.hadoop.crypto.key.KeyProviderTokenIssuer, DataEncryptionKeyFactory, RemotePeerFactory, org.apache.hadoop.security.token.DelegationTokenIssuer@Private public class DFSClient extends java.lang.Object implements java.io.Closeable, RemotePeerFactory, DataEncryptionKeyFactory, org.apache.hadoop.crypto.key.KeyProviderTokenIssuer
| Modifier and Type | Class | Description |
|---|---|---|
static class |
DFSClient.DFSDataInputStream |
Deprecated.
use
HdfsDataInputStream instead. |
static class |
DFSClient.Renewer |
| Modifier and Type | Field | Description |
|---|---|---|
static org.slf4j.Logger |
LOG |
| Constructor | Description |
|---|---|
DFSClient(java.net.InetSocketAddress address,
org.apache.hadoop.conf.Configuration conf) |
|
DFSClient(java.net.URI nameNodeUri,
org.apache.hadoop.conf.Configuration conf) |
Same as this(nameNodeUri, conf, null);
|
DFSClient(java.net.URI nameNodeUri,
org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem.Statistics stats) |
Same as this(nameNodeUri, null, conf, stats);
|
DFSClient(java.net.URI nameNodeUri,
ClientProtocol rpcNamenode,
org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem.Statistics stats) |
Create a new DFSClient connected to the given nameNodeUri or rpcNamenode.
|
DFSClient(org.apache.hadoop.conf.Configuration conf) |
Deprecated.
Deprecated at 0.21
|
| Modifier and Type | Method | Description |
|---|---|---|
long |
addCacheDirective(CacheDirectiveInfo info,
java.util.EnumSet<CacheFlag> flags) |
|
void |
addCachePool(CachePoolInfo info) |
|
AddErasureCodingPolicyResponse[] |
addErasureCodingPolicies(ErasureCodingPolicy[] policies) |
|
void |
addLocatedBlocksRefresh(DFSInputStream dfsInputStream) |
Adds the
DFSInputStream to the LocatedBlocksRefresher, so that
the underlying LocatedBlocks is periodically refreshed. |
void |
addNodeToDeadNodeDetector(DFSInputStream dfsInputStream,
DatanodeInfo datanodeInfo) |
Add given datanode in DeadNodeDetector.
|
void |
allowSnapshot(java.lang.String snapshotRoot) |
Allow snapshot on a directory.
|
HdfsDataOutputStream |
append(java.lang.String src,
int buffersize,
java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> flag,
org.apache.hadoop.util.Progressable progress,
org.apache.hadoop.fs.FileSystem.Statistics statistics) |
Append to an existing HDFS file.
|
HdfsDataOutputStream |
append(java.lang.String src,
int buffersize,
java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> flag,
org.apache.hadoop.util.Progressable progress,
org.apache.hadoop.fs.FileSystem.Statistics statistics,
java.net.InetSocketAddress[] favoredNodes) |
Append to an existing HDFS file.
|
BatchedDirectoryListing |
batchedListPaths(java.lang.String[] srcs,
byte[] startAfter,
boolean needLocation) |
Get a batched listing for the indicated directories
|
void |
cancelDelegationToken(org.apache.hadoop.security.token.Token<DelegationTokenIdentifier> token) |
Deprecated.
Use Token.cancel instead.
|
void |
checkAccess(java.lang.String src,
org.apache.hadoop.fs.permission.FsAction mode) |
|
void |
clearDataEncryptionKey() |
|
void |
close() |
Close the file system, abandoning all of the leases and files being
created and close connections to the namenode.
|
void |
closeAllFilesBeingWritten(boolean abort) |
Close/abort all files being written.
|
void |
closeOutputStreams(boolean abort) |
Close all open streams, abandoning all of the leases and files being
created.
|
void |
concat(java.lang.String trg,
java.lang.String[] srcs) |
Move blocks from src to trg and delete src
See
ClientProtocol.concat(java.lang.String, java.lang.String[]). |
protected IOStreamPair |
connectToDN(DatanodeInfo dn,
int timeout,
org.apache.hadoop.security.token.Token<BlockTokenIdentifier> blockToken) |
|
java.io.OutputStream |
create(java.lang.String src,
boolean overwrite) |
Call
create(String, boolean, short, long, Progressable) with
default replication and blockSize and null
progress. |
java.io.OutputStream |
create(java.lang.String src,
boolean overwrite,
short replication,
long blockSize) |
Call
create(String, boolean, short, long, Progressable) with
null progress. |
java.io.OutputStream |
create(java.lang.String src,
boolean overwrite,
short replication,
long blockSize,
org.apache.hadoop.util.Progressable progress) |
Call
create(String, boolean, short, long, Progressable, int)
with default bufferSize. |
java.io.OutputStream |
create(java.lang.String src,
boolean overwrite,
short replication,
long blockSize,
org.apache.hadoop.util.Progressable progress,
int buffersize) |
Call
create(String, FsPermission, EnumSet, short, long,
Progressable, int, ChecksumOpt) with default permission
FsPermission.getFileDefault(). |
java.io.OutputStream |
create(java.lang.String src,
boolean overwrite,
org.apache.hadoop.util.Progressable progress) |
|
DFSOutputStream |
create(java.lang.String src,
org.apache.hadoop.fs.permission.FsPermission permission,
java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> flag,
boolean createParent,
short replication,
long blockSize,
org.apache.hadoop.util.Progressable progress,
int buffersize,
org.apache.hadoop.fs.Options.ChecksumOpt checksumOpt) |
Create a new dfs file with the specified block replication
with write-progress reporting and return an output stream for writing
into the file.
|
DFSOutputStream |
create(java.lang.String src,
org.apache.hadoop.fs.permission.FsPermission permission,
java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> flag,
boolean createParent,
short replication,
long blockSize,
org.apache.hadoop.util.Progressable progress,
int buffersize,
org.apache.hadoop.fs.Options.ChecksumOpt checksumOpt,
java.net.InetSocketAddress[] favoredNodes) |
Same as
create(String, FsPermission, EnumSet, boolean, short, long,
Progressable, int, ChecksumOpt) with the addition of favoredNodes that is
a hint to where the namenode should place the file blocks. |
DFSOutputStream |
create(java.lang.String src,
org.apache.hadoop.fs.permission.FsPermission permission,
java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> flag,
boolean createParent,
short replication,
long blockSize,
org.apache.hadoop.util.Progressable progress,
int buffersize,
org.apache.hadoop.fs.Options.ChecksumOpt checksumOpt,
java.net.InetSocketAddress[] favoredNodes,
java.lang.String ecPolicyName) |
Same as
create(String, FsPermission, EnumSet, boolean, short, long,
Progressable, int, ChecksumOpt, InetSocketAddress[]) with the addition of
ecPolicyName that is used to specify a specific erasure coding policy
instead of inheriting any policy from this new file's parent directory. |
DFSOutputStream |
create(java.lang.String src,
org.apache.hadoop.fs.permission.FsPermission permission,
java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> flag,
boolean createParent,
short replication,
long blockSize,
org.apache.hadoop.util.Progressable progress,
int buffersize,
org.apache.hadoop.fs.Options.ChecksumOpt checksumOpt,
java.net.InetSocketAddress[] favoredNodes,
java.lang.String ecPolicyName,
java.lang.String storagePolicy) |
Same as
create(String, FsPermission, EnumSet, boolean, short, long,
Progressable, int, ChecksumOpt, InetSocketAddress[], String)
with the storagePolicy that is used to specify a specific storage policy
instead of inheriting any policy from this new file's parent directory. |
DFSOutputStream |
create(java.lang.String src,
org.apache.hadoop.fs.permission.FsPermission permission,
java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> flag,
short replication,
long blockSize,
org.apache.hadoop.util.Progressable progress,
int buffersize,
org.apache.hadoop.fs.Options.ChecksumOpt checksumOpt) |
Call
create(String, FsPermission, EnumSet, boolean, short,
long, Progressable, int, ChecksumOpt) with createParent
set to true. |
void |
createEncryptionZone(java.lang.String src,
java.lang.String keyName) |
|
java.lang.String |
createSnapshot(java.lang.String snapshotRoot,
java.lang.String snapshotName) |
Create one snapshot.
|
void |
createSymlink(java.lang.String target,
java.lang.String link,
boolean createParent) |
Creates a symbolic link.
|
HdfsDataInputStream |
createWrappedInputStream(DFSInputStream dfsis) |
Wraps the stream in a CryptoInputStream if the underlying file is
encrypted.
|
HdfsDataOutputStream |
createWrappedOutputStream(DFSOutputStream dfsos,
org.apache.hadoop.fs.FileSystem.Statistics statistics) |
Wraps the stream in a CryptoOutputStream if the underlying file is
encrypted.
|
HdfsDataOutputStream |
createWrappedOutputStream(DFSOutputStream dfsos,
org.apache.hadoop.fs.FileSystem.Statistics statistics,
long startPos) |
Wraps the stream in a CryptoOutputStream if the underlying file is
encrypted.
|
DatanodeInfo[] |
datanodeReport(HdfsConstants.DatanodeReportType type) |
|
boolean |
delete(java.lang.String src) |
Deprecated.
|
boolean |
delete(java.lang.String src,
boolean recursive) |
delete file or directory.
|
void |
deleteSnapshot(java.lang.String snapshotRoot,
java.lang.String snapshotName) |
Delete a snapshot of a snapshottable directory.
|
void |
disableErasureCodingPolicy(java.lang.String ecPolicyName) |
|
void |
disallowSnapshot(java.lang.String snapshotRoot) |
Disallow snapshot on a directory.
|
void |
enableErasureCodingPolicy(java.lang.String ecPolicyName) |
|
boolean |
exists(java.lang.String src) |
Implemented using getFileInfo(src)
|
void |
finalizeUpgrade() |
|
org.apache.hadoop.fs.permission.AclStatus |
getAclStatus(java.lang.String src) |
|
protected LocatedBlocks |
getBlockLocations(java.lang.String src,
long length) |
|
org.apache.hadoop.fs.BlockLocation[] |
getBlockLocations(java.lang.String src,
long start,
long length) |
Get block location info about file
getBlockLocations() returns a list of hostnames that store
data for a specific file region.
|
long |
getBlockSize(java.lang.String f) |
|
long |
getBytesInFutureBlocks() |
Returns number of bytes that reside in Blocks with future generation
stamps.
|
java.lang.String |
getCanonicalServiceName() |
Get a canonical token service name for this client's tokens.
|
ClientContext |
getClientContext() |
|
java.lang.String |
getClientName() |
|
DfsClientConf |
getConf() |
|
long |
getCorruptBlocksCount() |
Returns count of blocks with at least one replica marked corrupt.
|
DatanodeStorageReport[] |
getDatanodeStorageReport(HdfsConstants.DatanodeReportType type) |
|
DeadNodeDetector |
getDeadNodeDetector() |
Obtain DeadNodeDetector of the current client.
|
java.util.concurrent.ConcurrentHashMap<DatanodeInfo,DatanodeInfo> |
getDeadNodes(DFSInputStream dfsInputStream) |
If deadNodeDetectionEnabled is true, return the dead nodes that detected by
all the DFSInputStreams in the same client.
|
CachingStrategy |
getDefaultReadCachingStrategy() |
|
CachingStrategy |
getDefaultWriteCachingStrategy() |
|
org.apache.hadoop.security.token.Token<?> |
getDelegationToken(java.lang.String renewer) |
|
org.apache.hadoop.security.token.Token<DelegationTokenIdentifier> |
getDelegationToken(org.apache.hadoop.io.Text renewer) |
|
org.apache.hadoop.fs.FsStatus |
getDiskStatus() |
|
ECTopologyVerifierResult |
getECTopologyResultForPolicies(java.lang.String... policyNames) |
|
org.apache.hadoop.fs.Path |
getEnclosingRoot(java.lang.String src) |
|
DataEncryptionKey |
getEncryptionKey() |
|
java.util.Map<java.lang.String,java.lang.String> |
getErasureCodingCodecs() |
|
ErasureCodingPolicyInfo[] |
getErasureCodingPolicies() |
|
ErasureCodingPolicy |
getErasureCodingPolicy(java.lang.String src) |
Get the erasure coding policy information for the specified path
|
EncryptionZone |
getEZForPath(java.lang.String src) |
|
org.apache.hadoop.fs.MD5MD5CRC32FileChecksum |
getFileChecksum(java.lang.String src,
long length) |
Get the checksum of the whole file or a range of the file.
|
org.apache.hadoop.fs.FileChecksum |
getFileChecksumWithCombineMode(java.lang.String src,
long length) |
Get the checksum of the whole file or a range of the file.
|
HdfsFileStatus |
getFileInfo(java.lang.String src) |
Get the file info for a specific file or directory.
|
HdfsFileStatus |
getFileLinkInfo(java.lang.String src) |
Get the file info for a specific file or directory.
|
org.apache.hadoop.ha.HAServiceProtocol.HAServiceState |
getHAServiceState() |
An unblocking call to get the HA service state of NameNode.
|
DFSInotifyEventInputStream |
getInotifyEventStream() |
|
DFSInotifyEventInputStream |
getInotifyEventStream(long lastReadTxid) |
|
org.apache.hadoop.crypto.key.KeyProvider |
getKeyProvider() |
|
java.net.URI |
getKeyProviderUri() |
|
LeaseRenewer |
getLeaseRenewer() |
Return the lease renewer instance.
|
java.lang.String |
getLinkTarget(java.lang.String path) |
Resolve the *first* symlink, if any, in the path.
|
LocatedBlocksRefresher |
getLocatedBlockRefresher() |
Obtain LocatedBlocksRefresher of the current client.
|
LocatedBlocks |
getLocatedBlocks(java.lang.String src,
long start) |
Get locations of the blocks of the specified file `src` from offset
`start` within the prefetch size which is related to parameter
`dfs.client.read.prefetch.size`.
|
LocatedBlocks |
getLocatedBlocks(java.lang.String src,
long start,
long length) |
This is just a wrapper around callGetBlockLocations, but non-static so that
we can stub it out for tests.
|
HdfsLocatedFileStatus |
getLocatedFileInfo(java.lang.String src,
boolean needBlockToken) |
Get the file info for a specific file or directory.
|
long |
getLowRedundancyBlocksCount() |
Returns aggregated count of blocks with less redundancy.
|
long |
getMissingBlocksCount() |
Returns count of blocks with no good replicas left.
|
long |
getMissingReplOneBlocksCount() |
Returns count of blocks with replication factor 1 and have
lost the only replica.
|
ClientProtocol |
getNamenode() |
Get the namenode associated with this DFSClient object
|
int |
getNumOfFilesBeingWritten() |
|
long |
getPendingDeletionBlocksCount() |
Returns count of blocks pending on deletion.
|
long |
getRefreshReadBlkLocationsInterval() |
|
SaslDataTransferClient |
getSaslDataTransferClient() |
Returns the SaslDataTransferClient configured for this DFSClient.
|
org.apache.hadoop.fs.FsServerDefaults |
getServerDefaults() |
Get server default values for a number of configuration params.
|
SnapshotDiffReport |
getSnapshotDiffReport(java.lang.String snapshotDir,
java.lang.String fromSnapshot,
java.lang.String toSnapshot) |
Get the difference between two snapshots, or between a snapshot and the
current tree of a directory.
|
SnapshotDiffReportListing |
getSnapshotDiffReportListing(java.lang.String snapshotDir,
java.lang.String fromSnapshot,
java.lang.String toSnapshot,
byte[] startPath,
int index) |
Get the difference between two snapshots of a directory iteratively.
|
SnapshotStatus[] |
getSnapshotListing(java.lang.String snapshotRoot) |
Get listing of all the snapshots for a snapshottable directory.
|
SnapshottableDirectoryStatus[] |
getSnapshottableDirListing() |
Get all the current snapshottable directories.
|
static long |
getStateAtIndex(long[] states,
int index) |
|
BlockStoragePolicy[] |
getStoragePolicies() |
|
BlockStoragePolicy |
getStoragePolicy(java.lang.String path) |
|
byte[] |
getXAttr(java.lang.String src,
java.lang.String name) |
|
java.util.Map<java.lang.String,byte[]> |
getXAttrs(java.lang.String src) |
|
java.util.Map<java.lang.String,byte[]> |
getXAttrs(java.lang.String src,
java.util.List<java.lang.String> names) |
|
protected org.apache.hadoop.util.DataChecksum.Type |
inferChecksumTypeByReading(LocatedBlock lb,
DatanodeInfo dn) |
Infer the checksum type for a replica by sending an OP_READ_BLOCK
for the first byte of that replica.
|
boolean |
isClientRunning() |
|
boolean |
isDeadNode(DFSInputStream dfsInputStream,
DatanodeInfo datanodeInfo) |
If deadNodeDetectionEnabled is true, judgement based on whether this
datanode is included or not in DeadNodeDetector.
|
boolean |
isFileClosed(java.lang.String src) |
Close status of a file
|
boolean |
isFilesBeingWrittenEmpty() |
Is file-being-written map empty?
|
org.apache.hadoop.fs.RemoteIterator<CacheDirectiveEntry> |
listCacheDirectives(CacheDirectiveInfo filter) |
|
org.apache.hadoop.fs.RemoteIterator<CachePoolEntry> |
listCachePools() |
|
CorruptFileBlocks |
listCorruptFileBlocks(java.lang.String path,
java.lang.String cookie) |
|
org.apache.hadoop.fs.RemoteIterator<EncryptionZone> |
listEncryptionZones() |
|
org.apache.hadoop.fs.RemoteIterator<OpenFileEntry> |
listOpenFiles() |
Deprecated.
|
org.apache.hadoop.fs.RemoteIterator<OpenFileEntry> |
listOpenFiles(java.lang.String path) |
Get a remote iterator to the open files list by path,
managed by NameNode.
|
org.apache.hadoop.fs.RemoteIterator<OpenFileEntry> |
listOpenFiles(java.util.EnumSet<OpenFilesIterator.OpenFilesType> openFilesTypes) |
Get a remote iterator to the open files list by type,
managed by NameNode.
|
org.apache.hadoop.fs.RemoteIterator<OpenFileEntry> |
listOpenFiles(java.util.EnumSet<OpenFilesIterator.OpenFilesType> openFilesTypes,
java.lang.String path) |
Get a remote iterator to the open files list by type and path,
managed by NameNode.
|
DirectoryListing |
listPaths(java.lang.String src,
byte[] startAfter) |
Get a partial listing of the indicated directory
No block locations need to be fetched
|
DirectoryListing |
listPaths(java.lang.String src,
byte[] startAfter,
boolean needLocation) |
Get a partial listing of the indicated directory
Recommend to use HdfsFileStatus.EMPTY_NAME as startAfter
if the application wants to fetch a listing starting from
the first entry in the directory
|
org.apache.hadoop.fs.RemoteIterator<ZoneReencryptionStatus> |
listReencryptionStatus() |
|
java.util.List<java.lang.String> |
listXAttrs(java.lang.String src) |
|
void |
metaSave(java.lang.String pathname) |
Dumps DFS data structures into specified file.
|
boolean |
mkdirs(java.lang.String src) |
Deprecated.
|
boolean |
mkdirs(java.lang.String src,
org.apache.hadoop.fs.permission.FsPermission permission,
boolean createParent) |
Create a directory (or hierarchy of directories) with the given
name and permission.
|
void |
modifyAclEntries(java.lang.String src,
java.util.List<org.apache.hadoop.fs.permission.AclEntry> aclSpec) |
|
void |
modifyCacheDirective(CacheDirectiveInfo info,
java.util.EnumSet<CacheFlag> flags) |
|
void |
modifyCachePool(CachePoolInfo info) |
|
void |
msync() |
A blocking call to wait for Observer NameNode state ID to reach to the
current client state ID.
|
Peer |
newConnectedPeer(java.net.InetSocketAddress addr,
org.apache.hadoop.security.token.Token<BlockTokenIdentifier> blockToken,
DatanodeID datanodeId) |
|
DataEncryptionKey |
newDataEncryptionKey() |
Creates a new DataEncryptionKey.
|
DFSInputStream |
open(java.lang.String src) |
|
DFSInputStream |
open(java.lang.String src,
int buffersize,
boolean verifyChecksum) |
Create an input stream that obtains a nodelist from the
namenode, and then reads from all the right places.
|
DFSInputStream |
open(java.lang.String src,
int buffersize,
boolean verifyChecksum,
org.apache.hadoop.fs.FileSystem.Statistics stats) |
Deprecated.
Use
open(String, int, boolean) instead. |
DFSInputStream |
open(HdfsPathHandle fd,
int buffersize,
boolean verifyChecksum) |
Create an input stream from the
HdfsPathHandle if the
constraints encoded from DistributedFileSystem.createPathHandle(FileStatus, Options.HandleOpt...)
are satisfied. |
DFSOutputStream |
primitiveCreate(java.lang.String src,
org.apache.hadoop.fs.permission.FsPermission absPermission,
java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> flag,
boolean createParent,
short replication,
long blockSize,
org.apache.hadoop.util.Progressable progress,
int buffersize,
org.apache.hadoop.fs.Options.ChecksumOpt checksumOpt) |
Same as {
create(String, FsPermission, EnumSet, short, long,
Progressable, int, ChecksumOpt) except that the permission
is absolute (ie has already been masked with umask. |
boolean |
primitiveMkdir(java.lang.String src,
org.apache.hadoop.fs.permission.FsPermission absPermission) |
Same {
mkdirs(String, FsPermission, boolean) except
that the permissions has already been masked against umask. |
boolean |
primitiveMkdir(java.lang.String src,
org.apache.hadoop.fs.permission.FsPermission absPermission,
boolean createParent) |
Same {
mkdirs(String, FsPermission, boolean) except
that the permissions has already been masked against umask. |
void |
putFileBeingWritten(java.lang.String key,
DFSOutputStream out) |
Put a file.
|
void |
reencryptEncryptionZone(java.lang.String zone,
HdfsConstants.ReencryptAction action) |
|
void |
refreshNodes() |
Refresh the hosts and exclude files.
|
void |
removeAcl(java.lang.String src) |
|
void |
removeAclEntries(java.lang.String src,
java.util.List<org.apache.hadoop.fs.permission.AclEntry> aclSpec) |
|
void |
removeCacheDirective(long id) |
|
void |
removeCachePool(java.lang.String poolName) |
|
void |
removeDefaultAcl(java.lang.String src) |
|
void |
removeErasureCodingPolicy(java.lang.String ecPolicyName) |
|
void |
removeFileBeingWritten(java.lang.String key) |
Remove a file.
|
void |
removeLocatedBlocksRefresh(DFSInputStream dfsInputStream) |
Removes the
DFSInputStream from the LocatedBlocksRefresher, so that
the underlying LocatedBlocks is no longer periodically refreshed. |
void |
removeNodeFromDeadNodeDetector(DFSInputStream dfsInputStream,
DatanodeInfo datanodeInfo) |
Remove given datanode from DeadNodeDetector.
|
void |
removeNodeFromDeadNodeDetector(DFSInputStream dfsInputStream,
LocatedBlocks locatedBlocks) |
Remove datanodes that given block placed on from DeadNodeDetector.
|
void |
removeXAttr(java.lang.String src,
java.lang.String name) |
|
boolean |
rename(java.lang.String src,
java.lang.String dst) |
Deprecated.
Use
rename(String, String, Options.Rename...) instead. |
void |
rename(java.lang.String src,
java.lang.String dst,
org.apache.hadoop.fs.Options.Rename... options) |
Rename file or directory.
|
void |
renameSnapshot(java.lang.String snapshotDir,
java.lang.String snapshotOldName,
java.lang.String snapshotNewName) |
Rename a snapshot.
|
long |
renewDelegationToken(org.apache.hadoop.security.token.Token<DelegationTokenIdentifier> token) |
Deprecated.
Use Token.renew instead.
|
boolean |
renewLease() |
Renew leases.
|
void |
reportBadBlocks(LocatedBlock[] blocks) |
Report corrupt blocks that were discovered by the client.
|
void |
satisfyStoragePolicy(java.lang.String src) |
Satisfy storage policy for an existing file/directory.
|
void |
setAcl(java.lang.String src,
java.util.List<org.apache.hadoop.fs.permission.AclEntry> aclSpec) |
|
void |
setBalancerBandwidth(long bandwidth) |
Requests the namenode to tell all datanodes to use a new, non-persistent
bandwidth value for dfs.datanode.balance.bandwidthPerSec.
|
static void |
setDisabledStopDeadNodeDetectorThreadForTest(boolean disabledStopDeadNodeDetectorThreadForTest) |
|
void |
setErasureCodingPolicy(java.lang.String src,
java.lang.String ecPolicyName) |
|
void |
setKeyProvider(org.apache.hadoop.crypto.key.KeyProvider provider) |
|
void |
setOwner(java.lang.String src,
java.lang.String username,
java.lang.String groupname) |
Set file or directory owner.
|
void |
setPermission(java.lang.String src,
org.apache.hadoop.fs.permission.FsPermission permission) |
Set permissions to a file or directory.
|
boolean |
setReplication(java.lang.String src,
short replication) |
Set replication for an existing file.
|
boolean |
setSafeMode(HdfsConstants.SafeModeAction action) |
Enter, leave or get safe mode.
|
boolean |
setSafeMode(HdfsConstants.SafeModeAction action,
boolean isChecked) |
Enter, leave or get safe mode.
|
void |
setStoragePolicy(java.lang.String src,
java.lang.String policyName) |
Set storage policy for an existing file/directory
|
void |
setTimes(java.lang.String src,
long mtime,
long atime) |
set the modification and access time of a file.
|
void |
setXAttr(java.lang.String src,
java.lang.String name,
byte[] value,
java.util.EnumSet<org.apache.hadoop.fs.XAttrSetFlag> flag) |
|
DatanodeInfo[] |
slowDatanodeReport() |
|
java.lang.String |
toString() |
|
boolean |
truncate(java.lang.String src,
long newLength) |
Truncate a file to an indicated size
See
ClientProtocol.truncate(java.lang.String, long, java.lang.String). |
void |
unsetErasureCodingPolicy(java.lang.String src) |
|
void |
unsetStoragePolicy(java.lang.String src) |
Unset storage policy set for a given file/directory.
|
boolean |
upgradeStatus() |
@Deprecated
public DFSClient(org.apache.hadoop.conf.Configuration conf)
throws java.io.IOException
java.io.IOExceptionDFSClient(InetSocketAddress, Configuration)public DFSClient(java.net.InetSocketAddress address,
org.apache.hadoop.conf.Configuration conf)
throws java.io.IOException
java.io.IOExceptionpublic DFSClient(java.net.URI nameNodeUri,
org.apache.hadoop.conf.Configuration conf)
throws java.io.IOException
java.io.IOExceptionDFSClient(URI, Configuration, FileSystem.Statistics)public DFSClient(java.net.URI nameNodeUri,
org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem.Statistics stats)
throws java.io.IOException
java.io.IOExceptionDFSClient(URI, ClientProtocol, Configuration, FileSystem.Statistics)@VisibleForTesting
public DFSClient(java.net.URI nameNodeUri,
ClientProtocol rpcNamenode,
org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem.Statistics stats)
throws java.io.IOException
HdfsClientConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY
in the configuration, the DFSClient will use
LossyRetryInvocationHandler as its RetryInvocationHandler.
Otherwise one of nameNodeUri or rpcNamenode must be null.java.io.IOException@VisibleForTesting public static void setDisabledStopDeadNodeDetectorThreadForTest(boolean disabledStopDeadNodeDetectorThreadForTest)
public DfsClientConf getConf()
@VisibleForTesting public java.lang.String getClientName()
public LeaseRenewer getLeaseRenewer()
public void putFileBeingWritten(java.lang.String key,
DFSOutputStream out)
public void removeFileBeingWritten(java.lang.String key)
public boolean isFilesBeingWrittenEmpty()
public boolean isClientRunning()
@VisibleForTesting public int getNumOfFilesBeingWritten()
public boolean renewLease()
throws java.io.IOException
java.io.IOExceptionpublic void closeAllFilesBeingWritten(boolean abort)
public void close()
throws java.io.IOException
close in interface java.lang.AutoCloseableclose in interface java.io.Closeablejava.io.IOExceptionpublic void closeOutputStreams(boolean abort)
abort - whether streams should be gracefully closedpublic long getBlockSize(java.lang.String f)
throws java.io.IOException
java.io.IOExceptionClientProtocol.getPreferredBlockSize(String)public org.apache.hadoop.fs.FsServerDefaults getServerDefaults()
throws java.io.IOException
java.io.IOExceptionClientProtocol.getServerDefaults()@LimitedPrivate("HDFS")
public java.lang.String getCanonicalServiceName()
getCanonicalServiceName in interface org.apache.hadoop.security.token.DelegationTokenIssuerpublic org.apache.hadoop.security.token.Token<?> getDelegationToken(java.lang.String renewer)
throws java.io.IOException
getDelegationToken in interface org.apache.hadoop.security.token.DelegationTokenIssuerjava.io.IOExceptionpublic org.apache.hadoop.security.token.Token<DelegationTokenIdentifier> getDelegationToken(org.apache.hadoop.io.Text renewer) throws java.io.IOException
java.io.IOExceptionClientProtocol.getDelegationToken(Text)@Deprecated public long renewDelegationToken(org.apache.hadoop.security.token.Token<DelegationTokenIdentifier> token) throws java.io.IOException
token - the token to renewjava.io.IOException@Deprecated public void cancelDelegationToken(org.apache.hadoop.security.token.Token<DelegationTokenIdentifier> token) throws java.io.IOException
token - the token to canceljava.io.IOExceptionpublic void reportBadBlocks(LocatedBlock[] blocks) throws java.io.IOException
java.io.IOExceptionClientProtocol.reportBadBlocks(LocatedBlock[])public long getRefreshReadBlkLocationsInterval()
public LocatedBlocks getLocatedBlocks(java.lang.String src, long start) throws java.io.IOException
src - the file path.start - starting offset.java.io.IOException@VisibleForTesting public LocatedBlocks getLocatedBlocks(java.lang.String src, long start, long length) throws java.io.IOException
java.io.IOExceptionpublic org.apache.hadoop.fs.BlockLocation[] getBlockLocations(java.lang.String src,
long start,
long length)
throws java.io.IOException
FileSystem.getFileBlockLocations(FileStatus, long, long)
for more details.java.io.IOExceptionpublic HdfsDataInputStream createWrappedInputStream(DFSInputStream dfsis) throws java.io.IOException
java.io.IOExceptionpublic HdfsDataOutputStream createWrappedOutputStream(DFSOutputStream dfsos, org.apache.hadoop.fs.FileSystem.Statistics statistics) throws java.io.IOException
java.io.IOExceptionpublic HdfsDataOutputStream createWrappedOutputStream(DFSOutputStream dfsos, org.apache.hadoop.fs.FileSystem.Statistics statistics, long startPos) throws java.io.IOException
java.io.IOExceptionpublic DFSInputStream open(java.lang.String src) throws java.io.IOException
java.io.IOException@Deprecated public DFSInputStream open(java.lang.String src, int buffersize, boolean verifyChecksum, org.apache.hadoop.fs.FileSystem.Statistics stats) throws java.io.IOException
open(String, int, boolean) instead.java.io.IOExceptionpublic DFSInputStream open(java.lang.String src, int buffersize, boolean verifyChecksum) throws java.io.IOException
java.io.IOExceptionpublic DFSInputStream open(HdfsPathHandle fd, int buffersize, boolean verifyChecksum) throws java.io.IOException
HdfsPathHandle if the
constraints encoded from DistributedFileSystem.createPathHandle(FileStatus, Options.HandleOpt...)
are satisfied. Note that HDFS does not ensure that these constraints
remain invariant for the life of the stream. It only checks that they
still held when the stream was opened.fd - Handle to an entity in HDFS, with constraintsbuffersize - ignoredverifyChecksum - Verify checksums before returning data to clientHdfsPathHandle.java.io.IOException - On I/O errorpublic ClientProtocol getNamenode()
public java.io.OutputStream create(java.lang.String src,
boolean overwrite)
throws java.io.IOException
create(String, boolean, short, long, Progressable) with
default replication and blockSize and null
progress.java.io.IOExceptionpublic java.io.OutputStream create(java.lang.String src,
boolean overwrite,
org.apache.hadoop.util.Progressable progress)
throws java.io.IOException
java.io.IOExceptionpublic java.io.OutputStream create(java.lang.String src,
boolean overwrite,
short replication,
long blockSize)
throws java.io.IOException
create(String, boolean, short, long, Progressable) with
null progress.java.io.IOExceptionpublic java.io.OutputStream create(java.lang.String src,
boolean overwrite,
short replication,
long blockSize,
org.apache.hadoop.util.Progressable progress)
throws java.io.IOException
create(String, boolean, short, long, Progressable, int)
with default bufferSize.java.io.IOExceptionpublic java.io.OutputStream create(java.lang.String src,
boolean overwrite,
short replication,
long blockSize,
org.apache.hadoop.util.Progressable progress,
int buffersize)
throws java.io.IOException
create(String, FsPermission, EnumSet, short, long,
Progressable, int, ChecksumOpt) with default permission
FsPermission.getFileDefault().src - File nameoverwrite - overwrite an existing file if truereplication - replication factor for the fileblockSize - maximum block sizeprogress - interface for reporting client progressbuffersize - underlying buffersizejava.io.IOExceptionpublic DFSOutputStream create(java.lang.String src, org.apache.hadoop.fs.permission.FsPermission permission, java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> flag, short replication, long blockSize, org.apache.hadoop.util.Progressable progress, int buffersize, org.apache.hadoop.fs.Options.ChecksumOpt checksumOpt) throws java.io.IOException
create(String, FsPermission, EnumSet, boolean, short,
long, Progressable, int, ChecksumOpt) with createParent
set to true.java.io.IOExceptionpublic DFSOutputStream create(java.lang.String src, org.apache.hadoop.fs.permission.FsPermission permission, java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> flag, boolean createParent, short replication, long blockSize, org.apache.hadoop.util.Progressable progress, int buffersize, org.apache.hadoop.fs.Options.ChecksumOpt checksumOpt) throws java.io.IOException
src - File namepermission - The permission of the directory being created.
If null, use default permission
FsPermission.getFileDefault()flag - indicates create a new file or create/overwrite an
existing file or append to an existing filecreateParent - create missing parent directory if truereplication - block replicationblockSize - maximum block sizeprogress - interface for reporting client progressbuffersize - underlying buffer sizechecksumOpt - checksum optionsjava.io.IOExceptionfor detailed description of exceptions thrownpublic DFSOutputStream create(java.lang.String src, org.apache.hadoop.fs.permission.FsPermission permission, java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> flag, boolean createParent, short replication, long blockSize, org.apache.hadoop.util.Progressable progress, int buffersize, org.apache.hadoop.fs.Options.ChecksumOpt checksumOpt, java.net.InetSocketAddress[] favoredNodes) throws java.io.IOException
create(String, FsPermission, EnumSet, boolean, short, long,
Progressable, int, ChecksumOpt) with the addition of favoredNodes that is
a hint to where the namenode should place the file blocks.
The favored nodes hint is not persisted in HDFS. Hence it may be honored
at the creation time only. HDFS could move the blocks during balancing or
replication, to move the blocks from favored nodes. A value of null means
no favored nodes for this createjava.io.IOExceptionpublic DFSOutputStream create(java.lang.String src, org.apache.hadoop.fs.permission.FsPermission permission, java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> flag, boolean createParent, short replication, long blockSize, org.apache.hadoop.util.Progressable progress, int buffersize, org.apache.hadoop.fs.Options.ChecksumOpt checksumOpt, java.net.InetSocketAddress[] favoredNodes, java.lang.String ecPolicyName) throws java.io.IOException
create(String, FsPermission, EnumSet, boolean, short, long,
Progressable, int, ChecksumOpt, InetSocketAddress[]) with the addition of
ecPolicyName that is used to specify a specific erasure coding policy
instead of inheriting any policy from this new file's parent directory.
This policy will be persisted in HDFS. A value of null means inheriting
parent groups' whatever policy.java.io.IOExceptionpublic DFSOutputStream create(java.lang.String src, org.apache.hadoop.fs.permission.FsPermission permission, java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> flag, boolean createParent, short replication, long blockSize, org.apache.hadoop.util.Progressable progress, int buffersize, org.apache.hadoop.fs.Options.ChecksumOpt checksumOpt, java.net.InetSocketAddress[] favoredNodes, java.lang.String ecPolicyName, java.lang.String storagePolicy) throws java.io.IOException
create(String, FsPermission, EnumSet, boolean, short, long,
Progressable, int, ChecksumOpt, InetSocketAddress[], String)
with the storagePolicy that is used to specify a specific storage policy
instead of inheriting any policy from this new file's parent directory.
This policy will be persisted in HDFS. A value of null means inheriting
parent groups' whatever policy.java.io.IOExceptionpublic DFSOutputStream primitiveCreate(java.lang.String src, org.apache.hadoop.fs.permission.FsPermission absPermission, java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> flag, boolean createParent, short replication, long blockSize, org.apache.hadoop.util.Progressable progress, int buffersize, org.apache.hadoop.fs.Options.ChecksumOpt checksumOpt) throws java.io.IOException
create(String, FsPermission, EnumSet, short, long,
Progressable, int, ChecksumOpt) except that the permission
is absolute (ie has already been masked with umask.java.io.IOExceptionpublic void createSymlink(java.lang.String target,
java.lang.String link,
boolean createParent)
throws java.io.IOException
java.io.IOExceptionClientProtocol.createSymlink(String, String,FsPermission, boolean)public java.lang.String getLinkTarget(java.lang.String path)
throws java.io.IOException
java.io.IOExceptionClientProtocol.getLinkTarget(String)public HdfsDataOutputStream append(java.lang.String src, int buffersize, java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> flag, org.apache.hadoop.util.Progressable progress, org.apache.hadoop.fs.FileSystem.Statistics statistics) throws java.io.IOException
src - file namebuffersize - buffer sizeflag - indicates whether to append data to a new block instead of
the last blockprogress - for reporting write-progress; null is acceptable.statistics - file system statistics; null is acceptable.java.io.IOExceptionClientProtocol.append(String, String, EnumSetWritable)public HdfsDataOutputStream append(java.lang.String src, int buffersize, java.util.EnumSet<org.apache.hadoop.fs.CreateFlag> flag, org.apache.hadoop.util.Progressable progress, org.apache.hadoop.fs.FileSystem.Statistics statistics, java.net.InetSocketAddress[] favoredNodes) throws java.io.IOException
src - file namebuffersize - buffer sizeflag - indicates whether to append data to a new block instead of the
last blockprogress - for reporting write-progress; null is acceptable.statistics - file system statistics; null is acceptable.favoredNodes - FavoredNodes for new blocksjava.io.IOExceptionClientProtocol.append(String, String, EnumSetWritable)public boolean setReplication(java.lang.String src,
short replication)
throws java.io.IOException
src - file namereplication - replication to set the file tojava.io.IOExceptionClientProtocol.setReplication(String, short)public void setStoragePolicy(java.lang.String src,
java.lang.String policyName)
throws java.io.IOException
src - file/directory namepolicyName - name of the storage policyjava.io.IOExceptionpublic void unsetStoragePolicy(java.lang.String src)
throws java.io.IOException
src - file/directory namejava.io.IOExceptionpublic BlockStoragePolicy getStoragePolicy(java.lang.String path) throws java.io.IOException
path - file/directory namejava.io.IOExceptionpublic BlockStoragePolicy[] getStoragePolicies() throws java.io.IOException
java.io.IOException@Deprecated
public boolean rename(java.lang.String src,
java.lang.String dst)
throws java.io.IOException
rename(String, String, Options.Rename...) instead.java.io.IOExceptionClientProtocol.rename(String, String)public void concat(java.lang.String trg,
java.lang.String[] srcs)
throws java.io.IOException
ClientProtocol.concat(java.lang.String, java.lang.String[]).java.io.IOExceptionpublic void rename(java.lang.String src,
java.lang.String dst,
org.apache.hadoop.fs.Options.Rename... options)
throws java.io.IOException
java.io.IOExceptionClientProtocol.rename2(String, String, Options.Rename...)public boolean truncate(java.lang.String src,
long newLength)
throws java.io.IOException
ClientProtocol.truncate(java.lang.String, long, java.lang.String).java.io.IOException@Deprecated
public boolean delete(java.lang.String src)
throws java.io.IOException
ClientProtocol.delete(String, boolean).java.io.IOExceptionpublic boolean delete(java.lang.String src,
boolean recursive)
throws java.io.IOException
java.io.IOExceptionClientProtocol.delete(String, boolean)public boolean exists(java.lang.String src)
throws java.io.IOException
java.io.IOExceptionpublic DirectoryListing listPaths(java.lang.String src, byte[] startAfter) throws java.io.IOException
java.io.IOExceptionpublic DirectoryListing listPaths(java.lang.String src, byte[] startAfter, boolean needLocation) throws java.io.IOException
java.io.IOExceptionClientProtocol.getListing(String, byte[], boolean)public BatchedDirectoryListing batchedListPaths(java.lang.String[] srcs, byte[] startAfter, boolean needLocation) throws java.io.IOException
java.io.IOExceptionClientProtocol.getBatchedListing(String[], byte[], boolean)public HdfsFileStatus getFileInfo(java.lang.String src) throws java.io.IOException
src - The string representation of the path to the filejava.io.IOExceptionfor description of exceptionspublic HdfsLocatedFileStatus getLocatedFileInfo(java.lang.String src, boolean needBlockToken) throws java.io.IOException
src - The string representation of the path to the fileneedBlockToken - Include block tokens in LocatedBlocks.
When block tokens are included, this call is a superset of
getBlockLocations(String, long).java.io.IOExceptionopen(HdfsPathHandle, int, boolean),
for description of
exceptionspublic boolean isFileClosed(java.lang.String src)
throws java.io.IOException
java.io.IOExceptionpublic HdfsFileStatus getFileLinkInfo(java.lang.String src) throws java.io.IOException
src - path to a file or directory.
For description of exceptions thrownjava.io.IOExceptionClientProtocol.getFileLinkInfo(String)@Private public void clearDataEncryptionKey()
public DataEncryptionKey newDataEncryptionKey() throws java.io.IOException
DataEncryptionKeyFactorynewDataEncryptionKey in interface DataEncryptionKeyFactoryjava.io.IOException - for any error@VisibleForTesting public DataEncryptionKey getEncryptionKey()
public org.apache.hadoop.fs.FileChecksum getFileChecksumWithCombineMode(java.lang.String src,
long length)
throws java.io.IOException
src - The file pathlength - the length of the range, i.e., the range is [0, length]java.io.IOExceptionDistributedFileSystem.getFileChecksum(Path)public org.apache.hadoop.fs.MD5MD5CRC32FileChecksum getFileChecksum(java.lang.String src,
long length)
throws java.io.IOException
src - The file pathlength - the length of the range, i.e., the range is [0, length]java.io.IOExceptionDistributedFileSystem.getFileChecksum(Path)protected LocatedBlocks getBlockLocations(java.lang.String src, long length) throws java.io.IOException
java.io.IOExceptionprotected IOStreamPair connectToDN(DatanodeInfo dn, int timeout, org.apache.hadoop.security.token.Token<BlockTokenIdentifier> blockToken) throws java.io.IOException
java.io.IOExceptionprotected org.apache.hadoop.util.DataChecksum.Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn) throws java.io.IOException
lb - the located blockdn - the connected datanodejava.io.IOException - if an error occurspublic void setPermission(java.lang.String src,
org.apache.hadoop.fs.permission.FsPermission permission)
throws java.io.IOException
src - path name.permission - permission to set tojava.io.IOExceptionClientProtocol.setPermission(String, FsPermission)public void setOwner(java.lang.String src,
java.lang.String username,
java.lang.String groupname)
throws java.io.IOException
src - path name.username - user id.groupname - user group.java.io.IOExceptionClientProtocol.setOwner(String, String, String)public org.apache.hadoop.fs.FsStatus getDiskStatus()
throws java.io.IOException
java.io.IOExceptionClientProtocol.getStats()public static long getStateAtIndex(long[] states,
int index)
public long getMissingBlocksCount()
throws java.io.IOException
java.io.IOExceptionpublic long getMissingReplOneBlocksCount()
throws java.io.IOException
java.io.IOExceptionpublic long getPendingDeletionBlocksCount()
throws java.io.IOException
java.io.IOExceptionpublic long getLowRedundancyBlocksCount()
throws java.io.IOException
java.io.IOExceptionpublic long getCorruptBlocksCount()
throws java.io.IOException
java.io.IOExceptionpublic long getBytesInFutureBlocks()
throws java.io.IOException
java.io.IOExceptionpublic CorruptFileBlocks listCorruptFileBlocks(java.lang.String path, java.lang.String cookie) throws java.io.IOException
java.io.IOExceptionpublic DatanodeInfo[] datanodeReport(HdfsConstants.DatanodeReportType type) throws java.io.IOException
java.io.IOExceptionpublic DatanodeStorageReport[] getDatanodeStorageReport(HdfsConstants.DatanodeReportType type) throws java.io.IOException
java.io.IOExceptionpublic boolean setSafeMode(HdfsConstants.SafeModeAction action) throws java.io.IOException
java.io.IOExceptionClientProtocol.setSafeMode(HdfsConstants.SafeModeAction,boolean)public boolean setSafeMode(HdfsConstants.SafeModeAction action, boolean isChecked) throws java.io.IOException
action - One of SafeModeAction.GET, SafeModeAction.ENTER and
SafeModeActiob.LEAVEisChecked - If true, then check only active namenode's safemode status, else
check first namenode's status.java.io.IOExceptionClientProtocol.setSafeMode(HdfsConstants.SafeModeAction, boolean)public java.lang.String createSnapshot(java.lang.String snapshotRoot,
java.lang.String snapshotName)
throws java.io.IOException
snapshotRoot - The directory where the snapshot is to be takensnapshotName - Name of the snapshotjava.io.IOExceptionClientProtocol.createSnapshot(String, String)public void deleteSnapshot(java.lang.String snapshotRoot,
java.lang.String snapshotName)
throws java.io.IOException
snapshotRoot - The snapshottable directory that the
to-be-deleted snapshot belongs tosnapshotName - The name of the to-be-deleted snapshotjava.io.IOExceptionClientProtocol.deleteSnapshot(String, String)public void renameSnapshot(java.lang.String snapshotDir,
java.lang.String snapshotOldName,
java.lang.String snapshotNewName)
throws java.io.IOException
snapshotDir - The directory path where the snapshot was takensnapshotOldName - Old name of the snapshotsnapshotNewName - New name of the snapshotjava.io.IOExceptionClientProtocol.renameSnapshot(String, String, String)public SnapshottableDirectoryStatus[] getSnapshottableDirListing() throws java.io.IOException
java.io.IOExceptionClientProtocol.getSnapshottableDirListing()public SnapshotStatus[] getSnapshotListing(java.lang.String snapshotRoot) throws java.io.IOException
java.io.IOException - If an I/O error occurredClientProtocol.getSnapshotListing(String)public void allowSnapshot(java.lang.String snapshotRoot)
throws java.io.IOException
java.io.IOExceptionClientProtocol.allowSnapshot(String snapshotRoot)public void disallowSnapshot(java.lang.String snapshotRoot)
throws java.io.IOException
java.io.IOExceptionClientProtocol.disallowSnapshot(String snapshotRoot)public SnapshotDiffReport getSnapshotDiffReport(java.lang.String snapshotDir, java.lang.String fromSnapshot, java.lang.String toSnapshot) throws java.io.IOException
java.io.IOExceptionClientProtocol.getSnapshotDiffReport(java.lang.String, java.lang.String, java.lang.String)public SnapshotDiffReportListing getSnapshotDiffReportListing(java.lang.String snapshotDir, java.lang.String fromSnapshot, java.lang.String toSnapshot, byte[] startPath, int index) throws java.io.IOException
java.io.IOExceptionClientProtocol.getSnapshotDiffReportListing(java.lang.String, java.lang.String, java.lang.String, byte[], int)public long addCacheDirective(CacheDirectiveInfo info, java.util.EnumSet<CacheFlag> flags) throws java.io.IOException
java.io.IOExceptionpublic void modifyCacheDirective(CacheDirectiveInfo info, java.util.EnumSet<CacheFlag> flags) throws java.io.IOException
java.io.IOExceptionpublic void removeCacheDirective(long id)
throws java.io.IOException
java.io.IOExceptionpublic org.apache.hadoop.fs.RemoteIterator<CacheDirectiveEntry> listCacheDirectives(CacheDirectiveInfo filter) throws java.io.IOException
java.io.IOExceptionpublic void addCachePool(CachePoolInfo info) throws java.io.IOException
java.io.IOExceptionpublic void modifyCachePool(CachePoolInfo info) throws java.io.IOException
java.io.IOExceptionpublic void removeCachePool(java.lang.String poolName)
throws java.io.IOException
java.io.IOExceptionpublic org.apache.hadoop.fs.RemoteIterator<CachePoolEntry> listCachePools() throws java.io.IOException
java.io.IOExceptionpublic void refreshNodes()
throws java.io.IOException
ClientProtocol.refreshNodes()
for more details.java.io.IOExceptionClientProtocol.refreshNodes()public void metaSave(java.lang.String pathname)
throws java.io.IOException
java.io.IOExceptionClientProtocol.metaSave(String)public void setBalancerBandwidth(long bandwidth)
throws java.io.IOException
ClientProtocol.setBalancerBandwidth(long)
for more details.java.io.IOExceptionClientProtocol.setBalancerBandwidth(long)public void finalizeUpgrade()
throws java.io.IOException
java.io.IOExceptionClientProtocol.finalizeUpgrade()public boolean upgradeStatus()
throws java.io.IOException
java.io.IOExceptionClientProtocol.upgradeStatus()@Deprecated
public boolean mkdirs(java.lang.String src)
throws java.io.IOException
java.io.IOExceptionpublic boolean mkdirs(java.lang.String src,
org.apache.hadoop.fs.permission.FsPermission permission,
boolean createParent)
throws java.io.IOException
src - The path of the directory being createdpermission - The permission of the directory being created.
If permission == null, use FsPermission.getDirDefault().createParent - create missing parent directory if truejava.io.IOExceptionClientProtocol.mkdirs(String, FsPermission, boolean)public boolean primitiveMkdir(java.lang.String src,
org.apache.hadoop.fs.permission.FsPermission absPermission)
throws java.io.IOException
mkdirs(String, FsPermission, boolean) except
that the permissions has already been masked against umask.java.io.IOExceptionpublic boolean primitiveMkdir(java.lang.String src,
org.apache.hadoop.fs.permission.FsPermission absPermission,
boolean createParent)
throws java.io.IOException
mkdirs(String, FsPermission, boolean) except
that the permissions has already been masked against umask.java.io.IOExceptionpublic void setTimes(java.lang.String src,
long mtime,
long atime)
throws java.io.IOException
java.io.IOExceptionClientProtocol.setTimes(String, long, long)public java.lang.String toString()
toString in class java.lang.Objectpublic CachingStrategy getDefaultReadCachingStrategy()
public CachingStrategy getDefaultWriteCachingStrategy()
public ClientContext getClientContext()
public void modifyAclEntries(java.lang.String src,
java.util.List<org.apache.hadoop.fs.permission.AclEntry> aclSpec)
throws java.io.IOException
java.io.IOExceptionpublic void removeAclEntries(java.lang.String src,
java.util.List<org.apache.hadoop.fs.permission.AclEntry> aclSpec)
throws java.io.IOException
java.io.IOExceptionpublic void removeDefaultAcl(java.lang.String src)
throws java.io.IOException
java.io.IOExceptionpublic void removeAcl(java.lang.String src)
throws java.io.IOException
java.io.IOExceptionpublic void setAcl(java.lang.String src,
java.util.List<org.apache.hadoop.fs.permission.AclEntry> aclSpec)
throws java.io.IOException
java.io.IOExceptionpublic org.apache.hadoop.fs.permission.AclStatus getAclStatus(java.lang.String src)
throws java.io.IOException
java.io.IOExceptionpublic void createEncryptionZone(java.lang.String src,
java.lang.String keyName)
throws java.io.IOException
java.io.IOExceptionpublic EncryptionZone getEZForPath(java.lang.String src) throws java.io.IOException
java.io.IOExceptionpublic org.apache.hadoop.fs.RemoteIterator<EncryptionZone> listEncryptionZones() throws java.io.IOException
java.io.IOExceptionpublic void reencryptEncryptionZone(java.lang.String zone,
HdfsConstants.ReencryptAction action)
throws java.io.IOException
java.io.IOExceptionpublic org.apache.hadoop.fs.RemoteIterator<ZoneReencryptionStatus> listReencryptionStatus() throws java.io.IOException
java.io.IOExceptionpublic void setErasureCodingPolicy(java.lang.String src,
java.lang.String ecPolicyName)
throws java.io.IOException
java.io.IOExceptionpublic void unsetErasureCodingPolicy(java.lang.String src)
throws java.io.IOException
java.io.IOExceptionpublic ECTopologyVerifierResult getECTopologyResultForPolicies(java.lang.String... policyNames) throws java.io.IOException
java.io.IOExceptionpublic void setXAttr(java.lang.String src,
java.lang.String name,
byte[] value,
java.util.EnumSet<org.apache.hadoop.fs.XAttrSetFlag> flag)
throws java.io.IOException
java.io.IOExceptionpublic byte[] getXAttr(java.lang.String src,
java.lang.String name)
throws java.io.IOException
java.io.IOExceptionpublic java.util.Map<java.lang.String,byte[]> getXAttrs(java.lang.String src)
throws java.io.IOException
java.io.IOExceptionpublic java.util.Map<java.lang.String,byte[]> getXAttrs(java.lang.String src,
java.util.List<java.lang.String> names)
throws java.io.IOException
java.io.IOExceptionpublic java.util.List<java.lang.String> listXAttrs(java.lang.String src)
throws java.io.IOException
java.io.IOExceptionpublic void removeXAttr(java.lang.String src,
java.lang.String name)
throws java.io.IOException
java.io.IOExceptionpublic void checkAccess(java.lang.String src,
org.apache.hadoop.fs.permission.FsAction mode)
throws java.io.IOException
java.io.IOExceptionpublic ErasureCodingPolicyInfo[] getErasureCodingPolicies() throws java.io.IOException
java.io.IOExceptionpublic java.util.Map<java.lang.String,java.lang.String> getErasureCodingCodecs()
throws java.io.IOException
java.io.IOExceptionpublic AddErasureCodingPolicyResponse[] addErasureCodingPolicies(ErasureCodingPolicy[] policies) throws java.io.IOException
java.io.IOExceptionpublic void removeErasureCodingPolicy(java.lang.String ecPolicyName)
throws java.io.IOException
java.io.IOExceptionpublic void enableErasureCodingPolicy(java.lang.String ecPolicyName)
throws java.io.IOException
java.io.IOExceptionpublic void disableErasureCodingPolicy(java.lang.String ecPolicyName)
throws java.io.IOException
java.io.IOExceptionpublic DFSInotifyEventInputStream getInotifyEventStream() throws java.io.IOException
java.io.IOExceptionpublic DFSInotifyEventInputStream getInotifyEventStream(long lastReadTxid) throws java.io.IOException
java.io.IOExceptionpublic Peer newConnectedPeer(java.net.InetSocketAddress addr, org.apache.hadoop.security.token.Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId) throws java.io.IOException
newConnectedPeer in interface RemotePeerFactoryaddr - The address to connect to.blockToken - Token used during optional SASL negotiationdatanodeId - ID of destination DataNodejava.io.IOException - If there was an error connecting or creating
the remote socket, encrypted stream, etc.public java.net.URI getKeyProviderUri()
throws java.io.IOException
getKeyProviderUri in interface org.apache.hadoop.crypto.key.KeyProviderTokenIssuerjava.io.IOExceptionpublic org.apache.hadoop.crypto.key.KeyProvider getKeyProvider()
throws java.io.IOException
getKeyProvider in interface org.apache.hadoop.crypto.key.KeyProviderTokenIssuerjava.io.IOException@VisibleForTesting public void setKeyProvider(org.apache.hadoop.crypto.key.KeyProvider provider)
public SaslDataTransferClient getSaslDataTransferClient()
public ErasureCodingPolicy getErasureCodingPolicy(java.lang.String src) throws java.io.IOException
src - path to get the information forjava.io.IOExceptionpublic void satisfyStoragePolicy(java.lang.String src)
throws java.io.IOException
src - file/directory namejava.io.IOException@Deprecated public org.apache.hadoop.fs.RemoteIterator<OpenFileEntry> listOpenFiles() throws java.io.IOException
java.io.IOExceptionpublic org.apache.hadoop.fs.RemoteIterator<OpenFileEntry> listOpenFiles(java.lang.String path) throws java.io.IOException
path - java.io.IOExceptionpublic org.apache.hadoop.fs.RemoteIterator<OpenFileEntry> listOpenFiles(java.util.EnumSet<OpenFilesIterator.OpenFilesType> openFilesTypes) throws java.io.IOException
openFilesTypes - java.io.IOExceptionpublic org.apache.hadoop.fs.RemoteIterator<OpenFileEntry> listOpenFiles(java.util.EnumSet<OpenFilesIterator.OpenFilesType> openFilesTypes, java.lang.String path) throws java.io.IOException
openFilesTypes - path - java.io.IOExceptionpublic void msync()
throws java.io.IOException
java.io.IOException@VisibleForTesting
public org.apache.hadoop.ha.HAServiceProtocol.HAServiceState getHAServiceState()
throws java.io.IOException
java.io.IOExceptionpublic java.util.concurrent.ConcurrentHashMap<DatanodeInfo,DatanodeInfo> getDeadNodes(DFSInputStream dfsInputStream)
public boolean isDeadNode(DFSInputStream dfsInputStream, DatanodeInfo datanodeInfo)
public void addNodeToDeadNodeDetector(DFSInputStream dfsInputStream, DatanodeInfo datanodeInfo)
public void removeNodeFromDeadNodeDetector(DFSInputStream dfsInputStream, DatanodeInfo datanodeInfo)
public void removeNodeFromDeadNodeDetector(DFSInputStream dfsInputStream, LocatedBlocks locatedBlocks)
public DeadNodeDetector getDeadNodeDetector()
public LocatedBlocksRefresher getLocatedBlockRefresher()
public void addLocatedBlocksRefresh(DFSInputStream dfsInputStream)
DFSInputStream to the LocatedBlocksRefresher, so that
the underlying LocatedBlocks is periodically refreshed.public void removeLocatedBlocksRefresh(DFSInputStream dfsInputStream)
DFSInputStream from the LocatedBlocksRefresher, so that
the underlying LocatedBlocks is no longer periodically refreshed.dfsInputStream - public DatanodeInfo[] slowDatanodeReport() throws java.io.IOException
java.io.IOExceptionpublic org.apache.hadoop.fs.Path getEnclosingRoot(java.lang.String src)
throws java.io.IOException
java.io.IOExceptionCopyright © 2008–2025 Apache Software Foundation. All rights reserved.