java.io.Closeable, java.lang.AutoCloseable, ClientProtocol, org.apache.hadoop.ipc.ProtocolMetaInterface, org.apache.hadoop.ipc.ProtocolTranslator@Private @Stable public class ClientNamenodeProtocolTranslatorPB extends java.lang.Object implements org.apache.hadoop.ipc.ProtocolMetaInterface, ClientProtocol, java.io.Closeable, org.apache.hadoop.ipc.ProtocolTranslator
GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX, GET_STATS_CAPACITY_IDX, GET_STATS_CORRUPT_BLOCKS_IDX, GET_STATS_LOW_REDUNDANCY_IDX, GET_STATS_MISSING_BLOCKS_IDX, GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX, GET_STATS_PENDING_DELETION_BLOCKS_IDX, GET_STATS_REMAINING_IDX, GET_STATS_UNDER_REPLICATED_IDX, GET_STATS_USED_IDX, STATS_ARRAY_LENGTH, versionID| Constructor | Description |
|---|---|
ClientNamenodeProtocolTranslatorPB(ClientNamenodeProtocolPB proxy) |
| Modifier and Type | Method | Description |
|---|---|---|
void |
abandonBlock(ExtendedBlock b,
long fileId,
java.lang.String src,
java.lang.String holder) |
The client can give up on a block by calling abandonBlock().
|
LocatedBlock |
addBlock(java.lang.String src,
java.lang.String clientName,
ExtendedBlock previous,
DatanodeInfo[] excludeNodes,
long fileId,
java.lang.String[] favoredNodes,
java.util.EnumSet<AddBlockFlag> addBlockFlags) |
A client that wants to write an additional block to the
indicated filename (which must currently be open for writing)
should call addBlock().
|
long |
addCacheDirective(CacheDirectiveInfo directive,
java.util.EnumSet<CacheFlag> flags) |
Add a CacheDirective to the CacheManager.
|
void |
addCachePool(CachePoolInfo info) |
Add a new cache pool.
|
AddErasureCodingPolicyResponse[] |
addErasureCodingPolicies(ErasureCodingPolicy[] policies) |
Add Erasure coding policies to HDFS.
|
void |
allowSnapshot(java.lang.String snapshotRoot) |
Allow snapshot on a directory.
|
LastBlockWithStatus |
append(java.lang.String src,
java.lang.String clientName,
org.apache.hadoop.io.EnumSetWritable<org.apache.hadoop.fs.CreateFlag> flag) |
Append to the end of the file.
|
void |
cancelDelegationToken(org.apache.hadoop.security.token.Token<DelegationTokenIdentifier> token) |
Cancel an existing delegation token.
|
void |
checkAccess(java.lang.String path,
org.apache.hadoop.fs.permission.FsAction mode) |
Checks if the user can access a path.
|
void |
close() |
|
boolean |
complete(java.lang.String src,
java.lang.String clientName,
ExtendedBlock last,
long fileId) |
The client is done writing data to the given filename, and would
like to complete it.
|
void |
concat(java.lang.String trg,
java.lang.String[] srcs) |
Moves blocks from srcs to trg and delete srcs.
|
HdfsFileStatus |
create(java.lang.String src,
org.apache.hadoop.fs.permission.FsPermission masked,
java.lang.String clientName,
org.apache.hadoop.io.EnumSetWritable<org.apache.hadoop.fs.CreateFlag> flag,
boolean createParent,
short replication,
long blockSize,
org.apache.hadoop.crypto.CryptoProtocolVersion[] supportedVersions,
java.lang.String ecPolicyName,
java.lang.String storagePolicy) |
Create a new file entry in the namespace.
|
void |
createEncryptionZone(java.lang.String src,
java.lang.String keyName) |
Create an encryption zone.
|
java.lang.String |
createSnapshot(java.lang.String snapshotRoot,
java.lang.String snapshotName) |
Create a snapshot.
|
void |
createSymlink(java.lang.String target,
java.lang.String link,
org.apache.hadoop.fs.permission.FsPermission dirPerm,
boolean createParent) |
Create symlink to a file or directory.
|
boolean |
delete(java.lang.String src,
boolean recursive) |
Delete the given file or directory from the file system.
|
void |
deleteSnapshot(java.lang.String snapshotRoot,
java.lang.String snapshotName) |
Delete a specific snapshot of a snapshottable directory.
|
void |
disableErasureCodingPolicy(java.lang.String ecPolicyName) |
Disable erasure coding policy.
|
void |
disallowSnapshot(java.lang.String snapshotRoot) |
Disallow snapshot on a directory.
|
void |
enableErasureCodingPolicy(java.lang.String ecPolicyName) |
Enable erasure coding policy.
|
void |
finalizeUpgrade() |
Finalize previous upgrade.
|
void |
fsync(java.lang.String src,
long fileId,
java.lang.String client,
long lastBlockLength) |
Write all metadata for this file into persistent storage.
|
org.apache.hadoop.fs.permission.AclStatus |
getAclStatus(java.lang.String src) |
Gets the ACLs of files and directories.
|
LocatedBlock |
getAdditionalDatanode(java.lang.String src,
long fileId,
ExtendedBlock blk,
DatanodeInfo[] existings,
java.lang.String[] existingStorageIDs,
DatanodeInfo[] excludes,
int numAdditionalNodes,
java.lang.String clientName) |
Get a datanode for an existing pipeline.
|
BatchedDirectoryListing |
getBatchedListing(java.lang.String[] srcs,
byte[] startAfter,
boolean needLocation) |
Get a partial listing of the input directories
|
LocatedBlocks |
getBlockLocations(java.lang.String src,
long offset,
long length) |
Get locations of the blocks of the specified file
within the specified range.
|
org.apache.hadoop.fs.ContentSummary |
getContentSummary(java.lang.String path) |
Get
ContentSummary rooted at the specified directory. |
long |
getCurrentEditLogTxid() |
Get the highest txid the NameNode knows has been written to the edit
log, or -1 if the NameNode's edit log is not yet open for write.
|
DataEncryptionKey |
getDataEncryptionKey() |
|
DatanodeInfo[] |
getDatanodeReport(HdfsConstants.DatanodeReportType type) |
Get a report on the system's current datanodes.
|
DatanodeStorageReport[] |
getDatanodeStorageReport(HdfsConstants.DatanodeReportType type) |
Get a report on the current datanode storages.
|
org.apache.hadoop.security.token.Token<DelegationTokenIdentifier> |
getDelegationToken(org.apache.hadoop.io.Text renewer) |
Get a valid Delegation Token.
|
ECBlockGroupStats |
getECBlockGroupStats() |
Get statistics pertaining to blocks of type
BlockType.STRIPED
in the filesystem. |
ECTopologyVerifierResult |
getECTopologyResultForPolicies(java.lang.String... policyNames) |
Verifies if the given policies are supported in the given cluster setup.
|
EventBatchList |
getEditsFromTxid(long txid) |
Get an ordered list of batches of events corresponding to the edit log
transactions for txids equal to or greater than txid.
|
org.apache.hadoop.fs.Path |
getEnclosingRoot(java.lang.String filename) |
Get the enclosing root for a path.
|
java.util.Map<java.lang.String,java.lang.String> |
getErasureCodingCodecs() |
Get the erasure coding codecs loaded in Namenode.
|
ErasureCodingPolicyInfo[] |
getErasureCodingPolicies() |
Get the erasure coding policies loaded in Namenode, excluding REPLICATION
policy.
|
ErasureCodingPolicy |
getErasureCodingPolicy(java.lang.String src) |
Get the information about the EC policy for the path.
|
EncryptionZone |
getEZForPath(java.lang.String src) |
Get the encryption zone for a path.
|
HdfsFileStatus |
getFileInfo(java.lang.String src) |
Get the file info for a specific file or directory.
|
HdfsFileStatus |
getFileLinkInfo(java.lang.String src) |
Get the file info for a specific file or directory.
|
org.apache.hadoop.ha.HAServiceProtocol.HAServiceState |
getHAServiceState() |
Get HA service state of the server.
|
java.lang.String |
getLinkTarget(java.lang.String path) |
Return the target of the given symlink.
|
DirectoryListing |
getListing(java.lang.String src,
byte[] startAfter,
boolean needLocation) |
Get a partial listing of the indicated directory.
|
HdfsLocatedFileStatus |
getLocatedFileInfo(java.lang.String src,
boolean needBlockToken) |
Get the file info for a specific file or directory with
LocatedBlocks. |
long |
getPreferredBlockSize(java.lang.String filename) |
Get the block size for the given file.
|
org.apache.hadoop.fs.QuotaUsage |
getQuotaUsage(java.lang.String path) |
Get
QuotaUsage rooted at the specified directory. |
ReplicatedBlockStats |
getReplicatedBlockStats() |
Get statistics pertaining to blocks of type
BlockType.CONTIGUOUS
in the filesystem. |
org.apache.hadoop.fs.FsServerDefaults |
getServerDefaults() |
Get server default values for a number of configuration params.
|
DatanodeInfo[] |
getSlowDatanodeReport() |
Get report on all of the slow Datanodes.
|
SnapshotDiffReport |
getSnapshotDiffReport(java.lang.String snapshotRoot,
java.lang.String fromSnapshot,
java.lang.String toSnapshot) |
Get the difference between two snapshots, or between a snapshot and the
current tree of a directory.
|
SnapshotDiffReportListing |
getSnapshotDiffReportListing(java.lang.String snapshotRoot,
java.lang.String fromSnapshot,
java.lang.String toSnapshot,
byte[] startPath,
int index) |
Get the difference between two snapshots of a directory iteratively.
|
SnapshotStatus[] |
getSnapshotListing(java.lang.String path) |
Get listing of all the snapshots for a snapshottable directory.
|
SnapshottableDirectoryStatus[] |
getSnapshottableDirListing() |
Get the list of snapshottable directories that are owned
by the current user.
|
long[] |
getStats() |
Get an array of aggregated statistics combining blocks of both type
BlockType.CONTIGUOUS and BlockType.STRIPED in the
filesystem. |
BlockStoragePolicy[] |
getStoragePolicies() |
Get all the available block storage policies.
|
BlockStoragePolicy |
getStoragePolicy(java.lang.String path) |
Get the storage policy for a file/directory.
|
java.lang.Object |
getUnderlyingProxyObject() |
|
java.util.List<XAttr> |
getXAttrs(java.lang.String src,
java.util.List<XAttr> xAttrs) |
Get xattrs of a file or directory.
|
boolean |
isFileClosed(java.lang.String src) |
Get the close status of a file.
|
boolean |
isMethodSupported(java.lang.String methodName) |
|
org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries<CacheDirectiveEntry> |
listCacheDirectives(long prevId,
CacheDirectiveInfo filter) |
List the set of cached paths of a cache pool.
|
org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries<CachePoolEntry> |
listCachePools(java.lang.String prevKey) |
List the set of cache pools.
|
CorruptFileBlocks |
listCorruptFileBlocks(java.lang.String path,
java.lang.String cookie) |
|
org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries<EncryptionZone> |
listEncryptionZones(long id) |
Used to implement cursor-based batched listing of
EncryptionZones. |
org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries<OpenFileEntry> |
listOpenFiles(long prevId) |
Deprecated.
|
org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries<OpenFileEntry> |
listOpenFiles(long prevId,
java.util.EnumSet<OpenFilesIterator.OpenFilesType> openFilesTypes,
java.lang.String path) |
List open files in the system in batches.
|
org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries<ZoneReencryptionStatus> |
listReencryptionStatus(long id) |
Used to implement cursor-based batched listing of
ZoneReencryptionStatuss. |
java.util.List<XAttr> |
listXAttrs(java.lang.String src) |
List the xattrs names for a file or directory.
|
void |
metaSave(java.lang.String filename) |
Dumps namenode data structures into specified file.
|
boolean |
mkdirs(java.lang.String src,
org.apache.hadoop.fs.permission.FsPermission masked,
boolean createParent) |
Create a directory (or hierarchy of directories) with the given
name and permission.
|
void |
modifyAclEntries(java.lang.String src,
java.util.List<org.apache.hadoop.fs.permission.AclEntry> aclSpec) |
Modifies ACL entries of files and directories.
|
void |
modifyCacheDirective(CacheDirectiveInfo directive,
java.util.EnumSet<CacheFlag> flags) |
Modify a CacheDirective in the CacheManager.
|
void |
modifyCachePool(CachePoolInfo req) |
Modify an existing cache pool.
|
void |
msync() |
Called by client to wait until the server has reached the state id of the
client.
|
boolean |
recoverLease(java.lang.String src,
java.lang.String clientName) |
Start lease recovery.
|
void |
reencryptEncryptionZone(java.lang.String zone,
HdfsConstants.ReencryptAction action) |
Used to implement re-encryption of encryption zones.
|
void |
refreshNodes() |
Tells the namenode to reread the hosts and exclude files.
|
void |
removeAcl(java.lang.String src) |
Removes all but the base ACL entries of files and directories.
|
void |
removeAclEntries(java.lang.String src,
java.util.List<org.apache.hadoop.fs.permission.AclEntry> aclSpec) |
Removes ACL entries from files and directories.
|
void |
removeCacheDirective(long id) |
Remove a CacheDirectiveInfo from the CacheManager.
|
void |
removeCachePool(java.lang.String cachePoolName) |
Remove a cache pool.
|
void |
removeDefaultAcl(java.lang.String src) |
Removes all default ACL entries from files and directories.
|
void |
removeErasureCodingPolicy(java.lang.String ecPolicyName) |
Remove erasure coding policy.
|
void |
removeXAttr(java.lang.String src,
XAttr xAttr) |
Remove xattr of a file or directory.Value in xAttr parameter is ignored.
|
boolean |
rename(java.lang.String src,
java.lang.String dst) |
Rename an item in the file system namespace.
|
void |
rename2(java.lang.String src,
java.lang.String dst,
org.apache.hadoop.fs.Options.Rename... options) |
Rename src to dst.
|
void |
renameSnapshot(java.lang.String snapshotRoot,
java.lang.String snapshotOldName,
java.lang.String snapshotNewName) |
Rename a snapshot.
|
long |
renewDelegationToken(org.apache.hadoop.security.token.Token<DelegationTokenIdentifier> token) |
Renew an existing delegation token.
|
void |
renewLease(java.lang.String clientName,
java.util.List<java.lang.String> namespaces) |
Client programs can cause stateful changes in the NameNode
that affect other clients.
|
void |
reportBadBlocks(LocatedBlock[] blocks) |
The client wants to report corrupted blocks (blocks with specified
locations on datanodes).
|
boolean |
restoreFailedStorage(java.lang.String arg) |
Enable/Disable restore failed storage.
|
long |
rollEdits() |
Roll the edit log.
|
RollingUpgradeInfo |
rollingUpgrade(HdfsConstants.RollingUpgradeAction action) |
Rolling upgrade operations.
|
void |
satisfyStoragePolicy(java.lang.String src) |
Satisfy the storage policy for a file/directory.
|
boolean |
saveNamespace(long timeWindow,
long txGap) |
Save namespace image.
|
void |
setAcl(java.lang.String src,
java.util.List<org.apache.hadoop.fs.permission.AclEntry> aclSpec) |
Fully replaces ACL of files and directories, discarding all existing
entries.
|
void |
setBalancerBandwidth(long bandwidth) |
Tell all datanodes to use a new, non-persistent bandwidth value for
dfs.datanode.balance.bandwidthPerSec.
|
void |
setErasureCodingPolicy(java.lang.String src,
java.lang.String ecPolicyName) |
Set an erasure coding policy on a specified path.
|
void |
setOwner(java.lang.String src,
java.lang.String username,
java.lang.String groupname) |
Set Owner of a path (i.e.
|
void |
setPermission(java.lang.String src,
org.apache.hadoop.fs.permission.FsPermission permission) |
Set permissions for an existing file/directory.
|
void |
setQuota(java.lang.String path,
long namespaceQuota,
long storagespaceQuota,
org.apache.hadoop.fs.StorageType type) |
Set the quota for a directory.
|
boolean |
setReplication(java.lang.String src,
short replication) |
Set replication for an existing file.
|
boolean |
setSafeMode(HdfsConstants.SafeModeAction action,
boolean isChecked) |
Enter, leave or get safe mode.
|
void |
setStoragePolicy(java.lang.String src,
java.lang.String policyName) |
Set the storage policy for a file/directory.
|
void |
setTimes(java.lang.String src,
long mtime,
long atime) |
Sets the modification and access time of the file to the specified time.
|
void |
setXAttr(java.lang.String src,
XAttr xAttr,
java.util.EnumSet<org.apache.hadoop.fs.XAttrSetFlag> flag) |
Set xattr of a file or directory.
|
boolean |
truncate(java.lang.String src,
long newLength,
java.lang.String clientName) |
Truncate file src to new size.
|
void |
unsetErasureCodingPolicy(java.lang.String src) |
Unset erasure coding policy from a specified path.
|
void |
unsetStoragePolicy(java.lang.String src) |
Unset the storage policy set for a given file or directory.
|
LocatedBlock |
updateBlockForPipeline(ExtendedBlock block,
java.lang.String clientName) |
Get a new generation stamp together with an access token for
a block under construction
This method is called only when a client needs to recover a failed
pipeline or set up a pipeline for appending to a block.
|
void |
updatePipeline(java.lang.String clientName,
ExtendedBlock oldBlock,
ExtendedBlock newBlock,
DatanodeID[] newNodes,
java.lang.String[] storageIDs) |
Update a pipeline for a block under construction.
|
boolean |
upgradeStatus() |
Get status of upgrade - finalized or not.
|
public ClientNamenodeProtocolTranslatorPB(ClientNamenodeProtocolPB proxy)
public void close()
close in interface java.lang.AutoCloseableclose in interface java.io.Closeablepublic LocatedBlocks getBlockLocations(java.lang.String src, long offset, long length) throws java.io.IOException
ClientProtocol
Return LocatedBlocks which contains
file length, blocks and their locations.
DataNode locations for each block are sorted by
the distance to the client's address.
The client will then have to contact one of the indicated DataNodes to obtain the actual data.
getBlockLocations in interface ClientProtocolsrc - file nameoffset - range start offsetlength - range lengthorg.apache.hadoop.security.AccessControlException - If access is
deniedjava.io.FileNotFoundException - If file src does not
existorg.apache.hadoop.fs.UnresolvedLinkException - If src
contains a symlinkjava.io.IOException - If an I/O error occurredpublic org.apache.hadoop.fs.FsServerDefaults getServerDefaults()
throws java.io.IOException
ClientProtocolgetServerDefaults in interface ClientProtocoljava.io.IOExceptionpublic HdfsFileStatus create(java.lang.String src, org.apache.hadoop.fs.permission.FsPermission masked, java.lang.String clientName, org.apache.hadoop.io.EnumSetWritable<org.apache.hadoop.fs.CreateFlag> flag, boolean createParent, short replication, long blockSize, org.apache.hadoop.crypto.CryptoProtocolVersion[] supportedVersions, java.lang.String ecPolicyName, java.lang.String storagePolicy) throws java.io.IOException
ClientProtocolThis will create an empty file specified by the source path. The path should reflect a full path originated at the root. The name-node does not have a notion of "current" directory for a client.
Once created, the file is visible and available for read to other clients.
Although, other clients cannot ClientProtocol.delete(String, boolean), re-create
or ClientProtocol.rename(String, String) it until the file is completed
or explicitly as a result of lease expiration.
Blocks have a maximum size. Clients that intend to create
multi-block files must also use
ClientProtocol.addBlock(java.lang.String, java.lang.String, org.apache.hadoop.hdfs.protocol.ExtendedBlock, org.apache.hadoop.hdfs.protocol.DatanodeInfo[], long, java.lang.String[], java.util.EnumSet<org.apache.hadoop.hdfs.AddBlockFlag>)
create in interface ClientProtocolsrc - path of the file being created.masked - masked permission.clientName - name of the current client.flag - indicates whether the file should be overwritten if it already
exists or create if it does not exist or append, or whether the
file should be a replicate file, no matter what its ancestor's
replication or erasure coding policy is.createParent - create missing parent directory if truereplication - block replication factor.blockSize - maximum block size.supportedVersions - CryptoProtocolVersions supported by the clientecPolicyName - the name of erasure coding policy. A null value means
this file will inherit its parent directory's policy,
either traditional replication or erasure coding
policy. ecPolicyName and SHOULD_REPLICATE CreateFlag
are mutually exclusive. It's invalid to set both
SHOULD_REPLICATE flag and a non-null ecPolicyName.storagePolicy - the name of the storage policy.org.apache.hadoop.security.AccessControlException - If access is
deniedAlreadyBeingCreatedException - if the path does not exist.DSQuotaExceededException - If file creation violates disk space
quota restrictionorg.apache.hadoop.fs.FileAlreadyExistsException - If file
src already existsjava.io.FileNotFoundException - If parent of src does
not exist and createParent is falseorg.apache.hadoop.fs.ParentNotDirectoryException - If parent of
src is not a directory.NSQuotaExceededException - If file creation violates name space
quota restrictionSafeModeException - create not
allowed in safemodeorg.apache.hadoop.fs.UnresolvedLinkException - If src
contains a symlinkSnapshotAccessControlException - if path is in RO snapshotjava.io.IOException - If an I/O error occurred
RuntimeExceptions:public boolean truncate(java.lang.String src,
long newLength,
java.lang.String clientName)
throws java.io.IOException
ClientProtocolThis implementation of truncate is purely a namespace operation if truncate occurs at a block boundary. Requires DataNode block recovery otherwise.
truncate in interface ClientProtocolsrc - existing filenewLength - the target sizeorg.apache.hadoop.security.AccessControlException - If access is
deniedjava.io.FileNotFoundException - If file src is not foundSafeModeException - truncate
not allowed in safemodeorg.apache.hadoop.fs.UnresolvedLinkException - If src
contains a symlinkSnapshotAccessControlException - if path is in RO snapshotjava.io.IOException - If an I/O error occurredpublic LastBlockWithStatus append(java.lang.String src, java.lang.String clientName, org.apache.hadoop.io.EnumSetWritable<org.apache.hadoop.fs.CreateFlag> flag) throws java.io.IOException
ClientProtocolappend in interface ClientProtocolsrc - path of the file being created.clientName - name of the current client.flag - indicates whether the data is appended to a new block.org.apache.hadoop.security.AccessControlException - if permission to
append file is denied by the system. As usually on the client side the
exception will be wrapped into
RemoteException.
Allows appending to an existing file if the server is
configured with the parameter dfs.support.append set to true, otherwise
throws an IOException.java.io.FileNotFoundException - If file src is not foundDSQuotaExceededException - If append violates disk space quota
restrictionSafeModeException - append not
allowed in safemodeorg.apache.hadoop.fs.UnresolvedLinkException - If src
contains a symlinkSnapshotAccessControlException - if path is in RO snapshotjava.io.IOException - If an I/O error occurred.
RuntimeExceptions:public boolean setReplication(java.lang.String src,
short replication)
throws java.io.IOException
ClientProtocolThe NameNode sets replication to the new value and returns. The actual block replication is not expected to be performed during this method call. The blocks will be populated or removed in the background as the result of the routine block maintenance procedures.
setReplication in interface ClientProtocolsrc - file namereplication - new replicationorg.apache.hadoop.security.AccessControlException - If access is
deniedDSQuotaExceededException - If replication violates disk space
quota restrictionjava.io.FileNotFoundException - If file src is not foundSafeModeException - not
allowed in safemodeorg.apache.hadoop.fs.UnresolvedLinkException - if src
contains a symlinkSnapshotAccessControlException - if path is in RO snapshotjava.io.IOException - If an I/O error occurredpublic void setPermission(java.lang.String src,
org.apache.hadoop.fs.permission.FsPermission permission)
throws java.io.IOException
ClientProtocolsetPermission in interface ClientProtocolorg.apache.hadoop.security.AccessControlException - If access is
deniedjava.io.FileNotFoundException - If file src is not foundSafeModeException - not
allowed in safemodeorg.apache.hadoop.fs.UnresolvedLinkException - If src
contains a symlinkSnapshotAccessControlException - if path is in RO snapshotjava.io.IOException - If an I/O error occurredpublic void setOwner(java.lang.String src,
java.lang.String username,
java.lang.String groupname)
throws java.io.IOException
ClientProtocolsetOwner in interface ClientProtocolsrc - file pathusername - If it is null, the original username remains unchanged.groupname - If it is null, the original groupname remains unchanged.org.apache.hadoop.security.AccessControlException - If access is
deniedjava.io.FileNotFoundException - If file src is not foundSafeModeException - not
allowed in safemodeorg.apache.hadoop.fs.UnresolvedLinkException - If src
contains a symlinkSnapshotAccessControlException - if path is in RO snapshotjava.io.IOException - If an I/O error occurredpublic void abandonBlock(ExtendedBlock b, long fileId, java.lang.String src, java.lang.String holder) throws java.io.IOException
ClientProtocolabandonBlock in interface ClientProtocolb - Block to abandonfileId - The id of the file where the block resides. Older clients
will pass GRANDFATHER_INODE_ID here.src - The path of the file where the block resides.holder - Lease holder.org.apache.hadoop.security.AccessControlException - If access is
deniedjava.io.FileNotFoundException - file src is not foundorg.apache.hadoop.fs.UnresolvedLinkException - If src
contains a symlinkjava.io.IOException - If an I/O error occurredpublic LocatedBlock addBlock(java.lang.String src, java.lang.String clientName, ExtendedBlock previous, DatanodeInfo[] excludeNodes, long fileId, java.lang.String[] favoredNodes, java.util.EnumSet<AddBlockFlag> addBlockFlags) throws java.io.IOException
ClientProtocoladdBlock in interface ClientProtocolsrc - the file being createdclientName - the name of the client that adds the blockprevious - previous blockexcludeNodes - a list of nodes that should not be
allocated for the current blockfileId - the id uniquely identifying a filefavoredNodes - the list of nodes where the client wants the blocks.
Nodes are identified by either host name or address.addBlockFlags - flags to advise the behavior of allocating and placing
a new block.org.apache.hadoop.security.AccessControlException - If access is
deniedjava.io.FileNotFoundException - If file src is not foundNotReplicatedYetException - previous blocks of the file are not replicated yet.
Blocks cannot be added until replication completes.SafeModeException - create not
allowed in safemodeorg.apache.hadoop.fs.UnresolvedLinkException - If src
contains a symlinkjava.io.IOException - If an I/O error occurredpublic LocatedBlock getAdditionalDatanode(java.lang.String src, long fileId, ExtendedBlock blk, DatanodeInfo[] existings, java.lang.String[] existingStorageIDs, DatanodeInfo[] excludes, int numAdditionalNodes, java.lang.String clientName) throws java.io.IOException
ClientProtocolgetAdditionalDatanode in interface ClientProtocolsrc - the file being writtenfileId - the ID of the file being writtenblk - the block being writtenexistings - the existing nodes in the pipelineexcludes - the excluded nodesnumAdditionalNodes - number of additional datanodesclientName - the name of the clientorg.apache.hadoop.security.AccessControlException - If access is
deniedjava.io.FileNotFoundException - If file src is not foundSafeModeException - create not
allowed in safemodeorg.apache.hadoop.fs.UnresolvedLinkException - If src
contains a symlinkjava.io.IOException - If an I/O error occurredpublic boolean complete(java.lang.String src,
java.lang.String clientName,
ExtendedBlock last,
long fileId)
throws java.io.IOException
ClientProtocolcomplete in interface ClientProtocolsrc - the file being createdclientName - the name of the client that adds the blocklast - the last block infofileId - the id uniquely identifying a fileorg.apache.hadoop.security.AccessControlException - If access is
deniedjava.io.FileNotFoundException - If file src is not foundSafeModeException - create not
allowed in safemodeorg.apache.hadoop.fs.UnresolvedLinkException - If src
contains a symlinkjava.io.IOException - If an I/O error occurredpublic void reportBadBlocks(LocatedBlock[] blocks) throws java.io.IOException
ClientProtocolreportBadBlocks in interface ClientProtocolblocks - Array of located blocks to reportjava.io.IOExceptionpublic boolean rename(java.lang.String src,
java.lang.String dst)
throws java.io.IOException
ClientProtocolrename in interface ClientProtocolsrc - existing file or directory name.dst - new name.SnapshotAccessControlException - if path is in RO snapshotjava.io.IOException - an I/O error occurredpublic void rename2(java.lang.String src,
java.lang.String dst,
org.apache.hadoop.fs.Options.Rename... options)
throws java.io.IOException
ClientProtocolWithout OVERWRITE option, rename fails if the dst already exists. With OVERWRITE option, rename overwrites the dst, if it is a file or an empty directory. Rename fails if dst is a non-empty directory.
This implementation of rename is atomic.
rename2 in interface ClientProtocolsrc - existing file or directory name.dst - new name.options - Rename optionsorg.apache.hadoop.security.AccessControlException - If access is
deniedDSQuotaExceededException - If rename violates disk space
quota restrictionorg.apache.hadoop.fs.FileAlreadyExistsException - If dst
already exists and options has
Options.Rename.OVERWRITE option
false.java.io.FileNotFoundException - If src does not existNSQuotaExceededException - If rename violates namespace
quota restrictionorg.apache.hadoop.fs.ParentNotDirectoryException - If parent of
dst is not a directorySafeModeException - rename not
allowed in safemodeorg.apache.hadoop.fs.UnresolvedLinkException - If src or
dst contains a symlinkSnapshotAccessControlException - if path is in RO snapshotjava.io.IOException - If an I/O error occurredpublic void concat(java.lang.String trg,
java.lang.String[] srcs)
throws java.io.IOException
ClientProtocolconcat in interface ClientProtocoltrg - existing filesrcs - - list of existing files (same block size, same replication)java.io.IOException - if some arguments are invalidorg.apache.hadoop.fs.UnresolvedLinkException - if trg or
srcs contains a symlinkSnapshotAccessControlException - if path is in RO snapshotpublic boolean delete(java.lang.String src,
boolean recursive)
throws java.io.IOException
ClientProtocolsame as delete but provides a way to avoid accidentally deleting non empty directories programmatically.
delete in interface ClientProtocolsrc - existing namerecursive - if true deletes a non empty directory recursively,
else throws an exception.org.apache.hadoop.security.AccessControlException - If access is
deniedjava.io.FileNotFoundException - If file src is not foundSafeModeException - create not
allowed in safemodeorg.apache.hadoop.fs.UnresolvedLinkException - If src
contains a symlinkSnapshotAccessControlException - if path is in RO snapshotorg.apache.hadoop.fs.PathIsNotEmptyDirectoryException - if path is a non-empty directory
and recursive is set to falsejava.io.IOException - If an I/O error occurredpublic boolean mkdirs(java.lang.String src,
org.apache.hadoop.fs.permission.FsPermission masked,
boolean createParent)
throws java.io.IOException
ClientProtocolmkdirs in interface ClientProtocolsrc - The path of the directory being createdmasked - The masked permission of the directory being createdcreateParent - create missing parent directory if trueorg.apache.hadoop.security.AccessControlException - If access is
deniedorg.apache.hadoop.fs.FileAlreadyExistsException - If src
already existsjava.io.FileNotFoundException - If parent of src does
not exist and createParent is falseNSQuotaExceededException - If file creation violates quota
restrictionorg.apache.hadoop.fs.ParentNotDirectoryException - If parent of
src is not a directorySafeModeException - create not
allowed in safemodeorg.apache.hadoop.fs.UnresolvedLinkException - If src
contains a symlinkSnapshotAccessControlException - if path is in RO snapshotjava.io.IOException - If an I/O error occurred.
RunTimeExceptions:public DirectoryListing getListing(java.lang.String src, byte[] startAfter, boolean needLocation) throws java.io.IOException
ClientProtocolgetListing in interface ClientProtocolsrc - the directory namestartAfter - the name to start listing after encoded in java UTF8needLocation - if the FileStatus should contain block locationsorg.apache.hadoop.security.AccessControlException - permission deniedjava.io.FileNotFoundException - file src is not foundorg.apache.hadoop.fs.UnresolvedLinkException - If src
contains a symlinkjava.io.IOException - If an I/O error occurredpublic BatchedDirectoryListing getBatchedListing(java.lang.String[] srcs, byte[] startAfter, boolean needLocation) throws java.io.IOException
ClientProtocolgetBatchedListing in interface ClientProtocolsrcs - the input directoriesstartAfter - the name to start listing after encoded in Java UTF8needLocation - if the FileStatus should contain block locationsjava.io.IOException - if an I/O error occurredpublic void renewLease(java.lang.String clientName,
java.util.List<java.lang.String> namespaces)
throws java.io.IOException
ClientProtocolSo, the NameNode will revoke the locks and live file-creates for clients that it thinks have died. A client tells the NameNode that it is still alive by periodically calling renewLease(). If a certain amount of time passes since the last call to renewLease(), the NameNode assumes the client has died.
renewLease in interface ClientProtocolnamespaces - The full Namespace list that the renewLease rpc
should be forwarded by RBF.
Tips: NN side, this value should be null.
RBF side, if this value is null, this rpc will
be forwarded to all available namespaces,
else this rpc will be forwarded to
the special namespaces.org.apache.hadoop.security.AccessControlException - permission deniedjava.io.IOException - If an I/O error occurredpublic boolean recoverLease(java.lang.String src,
java.lang.String clientName)
throws java.io.IOException
ClientProtocolrecoverLease in interface ClientProtocolsrc - path of the file to start lease recoveryclientName - name of the current clientjava.io.IOExceptionpublic long[] getStats()
throws java.io.IOException
ClientProtocolBlockType.CONTIGUOUS and BlockType.STRIPED in the
filesystem. Use public constants like ClientProtocol.GET_STATS_CAPACITY_IDX in
place of actual numbers to index into the array.
getStats in interface ClientProtocoljava.io.IOExceptionpublic ReplicatedBlockStats getReplicatedBlockStats() throws java.io.IOException
ClientProtocolBlockType.CONTIGUOUS
in the filesystem.getReplicatedBlockStats in interface ClientProtocoljava.io.IOExceptionpublic ECBlockGroupStats getECBlockGroupStats() throws java.io.IOException
ClientProtocolBlockType.STRIPED
in the filesystem.getECBlockGroupStats in interface ClientProtocoljava.io.IOExceptionpublic DatanodeInfo[] getDatanodeReport(HdfsConstants.DatanodeReportType type) throws java.io.IOException
ClientProtocolgetDatanodeReport in interface ClientProtocoljava.io.IOExceptionpublic DatanodeStorageReport[] getDatanodeStorageReport(HdfsConstants.DatanodeReportType type) throws java.io.IOException
ClientProtocolgetDatanodeStorageReport in interface ClientProtocoljava.io.IOExceptionpublic long getPreferredBlockSize(java.lang.String filename)
throws java.io.IOException
ClientProtocolgetPreferredBlockSize in interface ClientProtocolfilename - The name of the filejava.io.IOExceptionorg.apache.hadoop.fs.UnresolvedLinkException - if the path contains
a symlink.public boolean setSafeMode(HdfsConstants.SafeModeAction action, boolean isChecked) throws java.io.IOException
ClientProtocolSafe mode is a name node state when it
Safe mode is entered automatically at name node startup.
Safe mode can also be entered manually using
setSafeMode(SafeModeAction.SAFEMODE_ENTER,false).
At startup the name node accepts data node reports collecting
information about block locations.
In order to leave safe mode it needs to collect a configurable
percentage called threshold of blocks, which satisfy the minimal
replication condition.
The minimal replication condition is that each block must have at least
dfs.namenode.replication.min replicas.
When the threshold is reached the name node extends safe mode
for a configurable amount of time
to let the remaining data nodes to check in before it
will start replicating missing blocks.
Then the name node leaves safe mode.
If safe mode is turned on manually using
setSafeMode(SafeModeAction.SAFEMODE_ENTER,false)
then the name node stays in safe mode until it is manually turned off
using setSafeMode(SafeModeAction.SAFEMODE_LEAVE,false).
Current state of the name node can be verified using
setSafeMode(SafeModeAction.SAFEMODE_GET,false)
Configuration parameters:
dfs.safemode.threshold.pct is the threshold parameter.dfs.safemode.extension is the safe mode extension parameter.dfs.namenode.replication.min is the minimal replication parameter.
Special cases:
The name node does not enter safe mode at startup if the threshold is set to 0 or if the name space is empty.setSafeMode in interface ClientProtocolaction - isChecked - If true then action will be done only in ActiveNN.java.io.IOExceptionpublic boolean saveNamespace(long timeWindow,
long txGap)
throws java.io.IOException
ClientProtocolSaves current namespace into storage directories and reset edits log. Requires superuser privilege and safe mode.
saveNamespace in interface ClientProtocoltimeWindow - NameNode does a checkpoint if the latest checkpoint was
done beyond the given time period (in seconds).txGap - NameNode does a checkpoint if the gap between the latest
checkpoint and the latest transaction id is greater this gap.java.io.IOException - if image creation failed.public long rollEdits()
throws java.io.IOException
ClientProtocolrollEdits in interface ClientProtocolorg.apache.hadoop.security.AccessControlException - if the superuser
privilege is violatedjava.io.IOException - if log roll failspublic boolean restoreFailedStorage(java.lang.String arg)
throws java.io.IOException
ClientProtocolsets flag to enable restore of failed storage replicas
restoreFailedStorage in interface ClientProtocolorg.apache.hadoop.security.AccessControlException - if the superuser
privilege is violated.java.io.IOExceptionpublic void refreshNodes()
throws java.io.IOException
ClientProtocolrefreshNodes in interface ClientProtocoljava.io.IOExceptionpublic void finalizeUpgrade()
throws java.io.IOException
ClientProtocolfinalizeUpgrade in interface ClientProtocoljava.io.IOExceptionpublic boolean upgradeStatus()
throws java.io.IOException
ClientProtocolupgradeStatus in interface ClientProtocoljava.io.IOExceptionpublic RollingUpgradeInfo rollingUpgrade(HdfsConstants.RollingUpgradeAction action) throws java.io.IOException
ClientProtocolrollingUpgrade in interface ClientProtocolaction - either query, prepare or finalize.java.io.IOExceptionpublic CorruptFileBlocks listCorruptFileBlocks(java.lang.String path, java.lang.String cookie) throws java.io.IOException
listCorruptFileBlocks in interface ClientProtocoljava.io.IOException - Each call returns a subset of the corrupt files in the system. To obtain
all corrupt files, call this method repeatedly and each time pass in the
cookie returned from the previous call.public void metaSave(java.lang.String filename)
throws java.io.IOException
ClientProtocolmetaSave in interface ClientProtocoljava.io.IOExceptionpublic HdfsFileStatus getFileInfo(java.lang.String src) throws java.io.IOException
ClientProtocolgetFileInfo in interface ClientProtocolsrc - The string representation of the path to the fileorg.apache.hadoop.security.AccessControlException - permission deniedjava.io.FileNotFoundException - file src is not foundorg.apache.hadoop.fs.UnresolvedLinkException - if the path contains
a symlink.java.io.IOException - If an I/O error occurredpublic HdfsLocatedFileStatus getLocatedFileInfo(java.lang.String src, boolean needBlockToken) throws java.io.IOException
ClientProtocolLocatedBlocks.getLocatedFileInfo in interface ClientProtocolsrc - The string representation of the path to the fileneedBlockToken - Generate block tokens for LocatedBlocksorg.apache.hadoop.security.AccessControlException - permission deniedjava.io.FileNotFoundException - file src is not foundjava.io.IOException - If an I/O error occurredpublic HdfsFileStatus getFileLinkInfo(java.lang.String src) throws java.io.IOException
ClientProtocolgetFileLinkInfo in interface ClientProtocolsrc - The string representation of the path to the fileorg.apache.hadoop.security.AccessControlException - permission deniedorg.apache.hadoop.fs.UnresolvedLinkException - if src
contains a symlinkjava.io.IOException - If an I/O error occurredpublic org.apache.hadoop.fs.ContentSummary getContentSummary(java.lang.String path)
throws java.io.IOException
ClientProtocolContentSummary rooted at the specified directory.getContentSummary in interface ClientProtocolpath - The string representation of the pathorg.apache.hadoop.security.AccessControlException - permission deniedjava.io.FileNotFoundException - file path is not foundorg.apache.hadoop.fs.UnresolvedLinkException - if path
contains a symlink.java.io.IOException - If an I/O error occurredpublic void setQuota(java.lang.String path,
long namespaceQuota,
long storagespaceQuota,
org.apache.hadoop.fs.StorageType type)
throws java.io.IOException
ClientProtocolsetQuota in interface ClientProtocolpath - The string representation of the path to the directorynamespaceQuota - Limit on the number of names in the tree rooted
at the directorystoragespaceQuota - Limit on storage space occupied all the files
under this directory.type - StorageType that the space quota is intended to be set on.
It may be null when called by traditional space/namespace
quota. When type is is not null, the storagespaceQuota
parameter is for type specified and namespaceQuota must be
HdfsConstants.QUOTA_DONT_SET.
HdfsConstants.QUOTA_DONT_SET implies
the quota will not be changed, and (3) HdfsConstants.QUOTA_RESET
implies the quota will be reset. Any other value is a runtime error.org.apache.hadoop.security.AccessControlException - permission deniedjava.io.FileNotFoundException - file path is not foundQuotaExceededException - if the directory size
is greater than the given quotaorg.apache.hadoop.fs.UnresolvedLinkException - if the
path contains a symlink.SnapshotAccessControlException - if path is in RO snapshotjava.io.IOException - If an I/O error occurredpublic void fsync(java.lang.String src,
long fileId,
java.lang.String client,
long lastBlockLength)
throws java.io.IOException
ClientProtocolfsync in interface ClientProtocolsrc - The string representation of the pathfileId - The inode ID, or GRANDFATHER_INODE_ID if the client is
too old to support fsync with inode IDs.client - The string representation of the clientlastBlockLength - The length of the last block (under construction)
to be reported to NameNodeorg.apache.hadoop.security.AccessControlException - permission deniedjava.io.FileNotFoundException - file src is not foundorg.apache.hadoop.fs.UnresolvedLinkException - if src
contains a symlink.java.io.IOException - If an I/O error occurredpublic void setTimes(java.lang.String src,
long mtime,
long atime)
throws java.io.IOException
ClientProtocolsetTimes in interface ClientProtocolsrc - The string representation of the pathmtime - The number of milliseconds since Jan 1, 1970.
Setting negative mtime means that modification time should not
be set by this call.atime - The number of milliseconds since Jan 1, 1970.
Setting negative atime means that access time should not be
set by this call.org.apache.hadoop.security.AccessControlException - permission deniedjava.io.FileNotFoundException - file src is not foundorg.apache.hadoop.fs.UnresolvedLinkException - if src
contains a symlink.SnapshotAccessControlException - if path is in RO snapshotjava.io.IOException - If an I/O error occurredpublic void createSymlink(java.lang.String target,
java.lang.String link,
org.apache.hadoop.fs.permission.FsPermission dirPerm,
boolean createParent)
throws java.io.IOException
ClientProtocolcreateSymlink in interface ClientProtocoltarget - The path of the destination that the
link points to.link - The path of the link being created.dirPerm - permissions to use when creating parent directoriescreateParent - - if true then missing parent dirs are created
if false then parent must existorg.apache.hadoop.security.AccessControlException - permission deniedorg.apache.hadoop.fs.FileAlreadyExistsException - If file
link already existsjava.io.FileNotFoundException - If parent of link does
not exist and createParent is falseorg.apache.hadoop.fs.ParentNotDirectoryException - If parent of
link is not a directory.org.apache.hadoop.fs.UnresolvedLinkException - if link
contains a symlink.SnapshotAccessControlException - if path is in RO snapshotjava.io.IOException - If an I/O error occurredpublic java.lang.String getLinkTarget(java.lang.String path)
throws java.io.IOException
ClientProtocolgetLinkTarget in interface ClientProtocolpath - The path with a link that needs resolution.org.apache.hadoop.security.AccessControlException - permission deniedjava.io.FileNotFoundException - If path does not existjava.io.IOException - If the given path does not refer to a symlink
or an I/O error occurredpublic LocatedBlock updateBlockForPipeline(ExtendedBlock block, java.lang.String clientName) throws java.io.IOException
ClientProtocolupdateBlockForPipeline in interface ClientProtocolblock - a blockclientName - the name of the clientjava.io.IOException - if any error occurspublic void updatePipeline(java.lang.String clientName,
ExtendedBlock oldBlock,
ExtendedBlock newBlock,
DatanodeID[] newNodes,
java.lang.String[] storageIDs)
throws java.io.IOException
ClientProtocolupdatePipeline in interface ClientProtocolclientName - the name of the clientoldBlock - the old blocknewBlock - the new block containing new generation stamp and lengthnewNodes - datanodes in the pipelinejava.io.IOException - if any error occurspublic org.apache.hadoop.security.token.Token<DelegationTokenIdentifier> getDelegationToken(org.apache.hadoop.io.Text renewer) throws java.io.IOException
ClientProtocolgetDelegationToken in interface ClientProtocolrenewer - the designated renewer for the tokenjava.io.IOExceptionpublic long renewDelegationToken(org.apache.hadoop.security.token.Token<DelegationTokenIdentifier> token) throws java.io.IOException
ClientProtocolrenewDelegationToken in interface ClientProtocoltoken - delegation token obtained earlierjava.io.IOExceptionpublic void cancelDelegationToken(org.apache.hadoop.security.token.Token<DelegationTokenIdentifier> token) throws java.io.IOException
ClientProtocolcancelDelegationToken in interface ClientProtocoltoken - delegation tokenjava.io.IOExceptionpublic void setBalancerBandwidth(long bandwidth)
throws java.io.IOException
ClientProtocolsetBalancerBandwidth in interface ClientProtocolbandwidth - Blanacer bandwidth in bytes per second for this datanode.java.io.IOExceptionpublic boolean isMethodSupported(java.lang.String methodName)
throws java.io.IOException
isMethodSupported in interface org.apache.hadoop.ipc.ProtocolMetaInterfacejava.io.IOExceptionpublic DataEncryptionKey getDataEncryptionKey() throws java.io.IOException
getDataEncryptionKey in interface ClientProtocoljava.io.IOExceptionpublic boolean isFileClosed(java.lang.String src)
throws java.io.IOException
ClientProtocolisFileClosed in interface ClientProtocolsrc - The string representation of the path to the fileorg.apache.hadoop.security.AccessControlException - permission deniedjava.io.FileNotFoundException - file src is not foundorg.apache.hadoop.fs.UnresolvedLinkException - if the path contains
a symlink.java.io.IOException - If an I/O error occurredpublic java.lang.Object getUnderlyingProxyObject()
getUnderlyingProxyObject in interface org.apache.hadoop.ipc.ProtocolTranslatorpublic java.lang.String createSnapshot(java.lang.String snapshotRoot,
java.lang.String snapshotName)
throws java.io.IOException
ClientProtocolcreateSnapshot in interface ClientProtocolsnapshotRoot - the path that is being snapshottedsnapshotName - name of the snapshot createdjava.io.IOExceptionpublic void deleteSnapshot(java.lang.String snapshotRoot,
java.lang.String snapshotName)
throws java.io.IOException
ClientProtocoldeleteSnapshot in interface ClientProtocolsnapshotRoot - The snapshottable directorysnapshotName - Name of the snapshot for the snapshottable directoryjava.io.IOExceptionpublic void allowSnapshot(java.lang.String snapshotRoot)
throws java.io.IOException
ClientProtocolallowSnapshot in interface ClientProtocolsnapshotRoot - the directory to be snappedjava.io.IOException - on errorpublic void disallowSnapshot(java.lang.String snapshotRoot)
throws java.io.IOException
ClientProtocoldisallowSnapshot in interface ClientProtocolsnapshotRoot - the directory to disallow snapshotjava.io.IOException - on errorpublic void renameSnapshot(java.lang.String snapshotRoot,
java.lang.String snapshotOldName,
java.lang.String snapshotNewName)
throws java.io.IOException
ClientProtocolrenameSnapshot in interface ClientProtocolsnapshotRoot - the directory path where the snapshot was takensnapshotOldName - old name of the snapshotsnapshotNewName - new name of the snapshotjava.io.IOExceptionpublic SnapshottableDirectoryStatus[] getSnapshottableDirListing() throws java.io.IOException
ClientProtocolgetSnapshottableDirListing in interface ClientProtocoljava.io.IOException - If an I/O error occurred.public SnapshotStatus[] getSnapshotListing(java.lang.String path) throws java.io.IOException
ClientProtocolgetSnapshotListing in interface ClientProtocoljava.io.IOException - If an I/O error occurredpublic SnapshotDiffReport getSnapshotDiffReport(java.lang.String snapshotRoot, java.lang.String fromSnapshot, java.lang.String toSnapshot) throws java.io.IOException
ClientProtocolgetSnapshotDiffReport in interface ClientProtocolsnapshotRoot - full path of the directory where snapshots are takenfromSnapshot - snapshot name of the from point. Null indicates the current
treetoSnapshot - snapshot name of the to point. Null indicates the current
tree.SnapshotDiffReport.java.io.IOException - on errorpublic SnapshotDiffReportListing getSnapshotDiffReportListing(java.lang.String snapshotRoot, java.lang.String fromSnapshot, java.lang.String toSnapshot, byte[] startPath, int index) throws java.io.IOException
ClientProtocolgetSnapshotDiffReportListing in interface ClientProtocolsnapshotRoot - full path of the directory where snapshots are takenfromSnapshot - snapshot name of the from point. Null indicates the current
treetoSnapshot - snapshot name of the to point. Null indicates the current
tree.startPath - path relative to the snapshottable root directory from where the
snapshotdiff computation needs to start across multiple rpc callsindex - index in the created or deleted list of the directory at which
the snapshotdiff computation stopped during the last rpc call
as the no of entries exceeded the snapshotdiffentry limit. -1
indicates, the snapshotdiff compuatation needs to start right
from the startPath provided.SnapshotDiffReport.java.io.IOException - on errorpublic long addCacheDirective(CacheDirectiveInfo directive, java.util.EnumSet<CacheFlag> flags) throws java.io.IOException
ClientProtocoladdCacheDirective in interface ClientProtocoldirective - A CacheDirectiveInfo to be addedflags - CacheFlags to use for this operation.java.io.IOException - if the directive could not be addedpublic void modifyCacheDirective(CacheDirectiveInfo directive, java.util.EnumSet<CacheFlag> flags) throws java.io.IOException
ClientProtocolmodifyCacheDirective in interface ClientProtocolflags - CacheFlags to use for this operation.java.io.IOException - if the directive could not be modifiedpublic void removeCacheDirective(long id)
throws java.io.IOException
ClientProtocolremoveCacheDirective in interface ClientProtocolid - of a CacheDirectiveInfojava.io.IOException - if the cache directive could not be removedpublic org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries<CacheDirectiveEntry> listCacheDirectives(long prevId, CacheDirectiveInfo filter) throws java.io.IOException
ClientProtocollistCacheDirectives in interface ClientProtocolprevId - The last listed entry ID, or -1 if this is the first call to
listCacheDirectives.filter - Parameters to use to filter the list results,
or null to display all directives visible to us.java.io.IOExceptionpublic void addCachePool(CachePoolInfo info) throws java.io.IOException
ClientProtocoladdCachePool in interface ClientProtocolinfo - Description of the new cache pooljava.io.IOException - If the request could not be completed.public void modifyCachePool(CachePoolInfo req) throws java.io.IOException
ClientProtocolmodifyCachePool in interface ClientProtocolreq - The request to modify a cache pool.java.io.IOException - If the request could not be completed.public void removeCachePool(java.lang.String cachePoolName)
throws java.io.IOException
ClientProtocolremoveCachePool in interface ClientProtocolcachePoolName - name of the cache pool to remove.java.io.IOException - if the cache pool did not exist, or could not be
removed.public org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries<CachePoolEntry> listCachePools(java.lang.String prevKey) throws java.io.IOException
ClientProtocollistCachePools in interface ClientProtocolprevKey - name of the last pool listed, or the empty string if this
is the first invocation of listCachePoolsjava.io.IOExceptionpublic void modifyAclEntries(java.lang.String src,
java.util.List<org.apache.hadoop.fs.permission.AclEntry> aclSpec)
throws java.io.IOException
ClientProtocolmodifyAclEntries in interface ClientProtocoljava.io.IOExceptionpublic void removeAclEntries(java.lang.String src,
java.util.List<org.apache.hadoop.fs.permission.AclEntry> aclSpec)
throws java.io.IOException
ClientProtocolremoveAclEntries in interface ClientProtocoljava.io.IOExceptionpublic void removeDefaultAcl(java.lang.String src)
throws java.io.IOException
ClientProtocolremoveDefaultAcl in interface ClientProtocoljava.io.IOExceptionpublic void removeAcl(java.lang.String src)
throws java.io.IOException
ClientProtocolremoveAcl in interface ClientProtocoljava.io.IOExceptionpublic void setAcl(java.lang.String src,
java.util.List<org.apache.hadoop.fs.permission.AclEntry> aclSpec)
throws java.io.IOException
ClientProtocolsetAcl in interface ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.fs.permission.AclStatus getAclStatus(java.lang.String src)
throws java.io.IOException
ClientProtocolgetAclStatus in interface ClientProtocoljava.io.IOExceptionpublic void createEncryptionZone(java.lang.String src,
java.lang.String keyName)
throws java.io.IOException
ClientProtocolcreateEncryptionZone in interface ClientProtocoljava.io.IOExceptionpublic EncryptionZone getEZForPath(java.lang.String src) throws java.io.IOException
ClientProtocolgetEZForPath in interface ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries<EncryptionZone> listEncryptionZones(long id) throws java.io.IOException
ClientProtocolEncryptionZones.listEncryptionZones in interface ClientProtocolid - ID of the last item in the previous batch. If there is no
previous batch, a negative value can be used.java.io.IOExceptionpublic void setErasureCodingPolicy(java.lang.String src,
java.lang.String ecPolicyName)
throws java.io.IOException
ClientProtocolsetErasureCodingPolicy in interface ClientProtocolsrc - The path to set policy on.ecPolicyName - The erasure coding policy name.java.io.IOExceptionpublic void unsetErasureCodingPolicy(java.lang.String src)
throws java.io.IOException
ClientProtocolunsetErasureCodingPolicy in interface ClientProtocolsrc - The path to unset policy.java.io.IOExceptionpublic ECTopologyVerifierResult getECTopologyResultForPolicies(java.lang.String... policyNames) throws java.io.IOException
ClientProtocolgetECTopologyResultForPolicies in interface ClientProtocolpolicyNames - name of policies.java.io.IOExceptionpublic void reencryptEncryptionZone(java.lang.String zone,
HdfsConstants.ReencryptAction action)
throws java.io.IOException
ClientProtocolreencryptEncryptionZone in interface ClientProtocolzone - the encryption zone to re-encrypt.action - the action for the re-encryption.java.io.IOExceptionpublic org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries<ZoneReencryptionStatus> listReencryptionStatus(long id) throws java.io.IOException
ClientProtocolZoneReencryptionStatuss.listReencryptionStatus in interface ClientProtocolid - ID of the last item in the previous batch. If there is no
previous batch, a negative value can be used.java.io.IOExceptionpublic void setXAttr(java.lang.String src,
XAttr xAttr,
java.util.EnumSet<org.apache.hadoop.fs.XAttrSetFlag> flag)
throws java.io.IOException
ClientProtocolRefer to the HDFS extended attributes user documentation for details.
setXAttr in interface ClientProtocolsrc - file or directoryxAttr - XAttr to setflag - set flagjava.io.IOExceptionpublic java.util.List<XAttr> getXAttrs(java.lang.String src, java.util.List<XAttr> xAttrs) throws java.io.IOException
ClientProtocolRefer to the HDFS extended attributes user documentation for details.
getXAttrs in interface ClientProtocolsrc - file or directoryxAttrs - xAttrs to getXAttr listjava.io.IOExceptionpublic java.util.List<XAttr> listXAttrs(java.lang.String src) throws java.io.IOException
ClientProtocolRefer to the HDFS extended attributes user documentation for details.
listXAttrs in interface ClientProtocolsrc - file or directoryXAttr listjava.io.IOExceptionpublic void removeXAttr(java.lang.String src,
XAttr xAttr)
throws java.io.IOException
ClientProtocolRefer to the HDFS extended attributes user documentation for details.
removeXAttr in interface ClientProtocolsrc - file or directoryxAttr - XAttr to removejava.io.IOExceptionpublic void checkAccess(java.lang.String path,
org.apache.hadoop.fs.permission.FsAction mode)
throws java.io.IOException
ClientProtocolAccessControlException.
In general, applications should avoid using this method, due to the risk of
time-of-check/time-of-use race conditions. The permissions on a file may
change immediately after the access call returns.checkAccess in interface ClientProtocolpath - Path to checkmode - type of access to checkorg.apache.hadoop.security.AccessControlException - if access is
deniedjava.io.FileNotFoundException - if the path does not existjava.io.IOException - see specific implementationpublic void setStoragePolicy(java.lang.String src,
java.lang.String policyName)
throws java.io.IOException
ClientProtocolsetStoragePolicy in interface ClientProtocolsrc - Path of an existing file/directory.policyName - The name of the storage policySnapshotAccessControlException - If access is deniedorg.apache.hadoop.fs.UnresolvedLinkException - if src
contains a symlinkjava.io.FileNotFoundException - If file/dir src is not
foundQuotaExceededException - If changes violate the quota restrictionjava.io.IOExceptionpublic void unsetStoragePolicy(java.lang.String src)
throws java.io.IOException
ClientProtocolunsetStoragePolicy in interface ClientProtocolsrc - Path of an existing file/directory.SnapshotAccessControlException - If access is deniedorg.apache.hadoop.fs.UnresolvedLinkException - if src
contains a symlinkjava.io.FileNotFoundException - If file/dir src is not
foundQuotaExceededException - If changes violate the quota restrictionjava.io.IOExceptionpublic BlockStoragePolicy getStoragePolicy(java.lang.String path) throws java.io.IOException
ClientProtocolgetStoragePolicy in interface ClientProtocolpath - Path of an existing file/directory.org.apache.hadoop.security.AccessControlException - If access is deniedorg.apache.hadoop.fs.UnresolvedLinkException - if src contains a symlinkjava.io.FileNotFoundException - If file/dir src is not foundjava.io.IOExceptionpublic BlockStoragePolicy[] getStoragePolicies() throws java.io.IOException
ClientProtocolgetStoragePolicies in interface ClientProtocoljava.io.IOExceptionpublic long getCurrentEditLogTxid()
throws java.io.IOException
ClientProtocolgetCurrentEditLogTxid in interface ClientProtocoljava.io.IOExceptionpublic EventBatchList getEditsFromTxid(long txid) throws java.io.IOException
ClientProtocolgetEditsFromTxid in interface ClientProtocoljava.io.IOExceptionpublic AddErasureCodingPolicyResponse[] addErasureCodingPolicies(ErasureCodingPolicy[] policies) throws java.io.IOException
ClientProtocoladdErasureCodingPolicies in interface ClientProtocolpolicies - The user defined ec policy list to add.java.io.IOExceptionpublic void removeErasureCodingPolicy(java.lang.String ecPolicyName)
throws java.io.IOException
ClientProtocolremoveErasureCodingPolicy in interface ClientProtocolecPolicyName - The name of the policy to be removed.java.io.IOExceptionpublic void enableErasureCodingPolicy(java.lang.String ecPolicyName)
throws java.io.IOException
ClientProtocolenableErasureCodingPolicy in interface ClientProtocolecPolicyName - The name of the policy to be enabled.java.io.IOExceptionpublic void disableErasureCodingPolicy(java.lang.String ecPolicyName)
throws java.io.IOException
ClientProtocoldisableErasureCodingPolicy in interface ClientProtocolecPolicyName - The name of the policy to be disabled.java.io.IOExceptionpublic ErasureCodingPolicyInfo[] getErasureCodingPolicies() throws java.io.IOException
ClientProtocolgetErasureCodingPolicies in interface ClientProtocoljava.io.IOExceptionpublic java.util.Map<java.lang.String,java.lang.String> getErasureCodingCodecs()
throws java.io.IOException
ClientProtocolgetErasureCodingCodecs in interface ClientProtocoljava.io.IOExceptionpublic ErasureCodingPolicy getErasureCodingPolicy(java.lang.String src) throws java.io.IOException
ClientProtocolgetErasureCodingPolicy in interface ClientProtocolsrc - path to get the info forjava.io.IOExceptionpublic org.apache.hadoop.fs.QuotaUsage getQuotaUsage(java.lang.String path)
throws java.io.IOException
ClientProtocolQuotaUsage rooted at the specified directory.
Note: due to HDFS-6763, standby/observer doesn't keep up-to-date info
about quota usage, and thus even though this is ReadOnly, it can only be
directed to the active namenode.getQuotaUsage in interface ClientProtocolpath - The string representation of the pathorg.apache.hadoop.security.AccessControlException - permission deniedjava.io.FileNotFoundException - file path is not foundorg.apache.hadoop.fs.UnresolvedLinkException - if path
contains a symlink.java.io.IOException - If an I/O error occurred@Deprecated public org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries<OpenFileEntry> listOpenFiles(long prevId) throws java.io.IOException
ClientProtocollistOpenFiles in interface ClientProtocolprevId - the cursor INode id.java.io.IOExceptionpublic org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries<OpenFileEntry> listOpenFiles(long prevId, java.util.EnumSet<OpenFilesIterator.OpenFilesType> openFilesTypes, java.lang.String path) throws java.io.IOException
ClientProtocollistOpenFiles in interface ClientProtocolprevId - the cursor INode id.openFilesTypes - types to filter the open files.path - path to filter the open files.java.io.IOExceptionpublic void msync()
throws java.io.IOException
ClientProtocolmsync in interface ClientProtocoljava.io.IOExceptionpublic void satisfyStoragePolicy(java.lang.String src)
throws java.io.IOException
ClientProtocolsatisfyStoragePolicy in interface ClientProtocolsrc - Path of an existing file/directory.org.apache.hadoop.security.AccessControlException - If access is denied.org.apache.hadoop.fs.UnresolvedLinkException - if src
contains a symlink.java.io.FileNotFoundException - If file/dir src is not
found.SafeModeException - append not
allowed in safemode.java.io.IOExceptionpublic DatanodeInfo[] getSlowDatanodeReport() throws java.io.IOException
ClientProtocolgetSlowDatanodeReport in interface ClientProtocoljava.io.IOException - If an I/O error occurs.public org.apache.hadoop.ha.HAServiceProtocol.HAServiceState getHAServiceState()
throws java.io.IOException
ClientProtocolgetHAServiceState in interface ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.fs.Path getEnclosingRoot(java.lang.String filename)
throws java.io.IOException
ClientProtocolgetEnclosingRoot in interface ClientProtocoljava.io.IOExceptionCopyright © 2008–2025 Apache Software Foundation. All rights reserved.