org.apache.hadoop.hdfs.protocol.ClientProtocolpublic class RouterClientProtocol
extends java.lang.Object
implements org.apache.hadoop.hdfs.protocol.ClientProtocol
ClientProtocol in the
RouterRpcServer.GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX, GET_STATS_CAPACITY_IDX, GET_STATS_CORRUPT_BLOCKS_IDX, GET_STATS_LOW_REDUNDANCY_IDX, GET_STATS_MISSING_BLOCKS_IDX, GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX, GET_STATS_PENDING_DELETION_BLOCKS_IDX, GET_STATS_REMAINING_IDX, GET_STATS_UNDER_REPLICATED_IDX, GET_STATS_USED_IDX, STATS_ARRAY_LENGTH, versionID| Modifier and Type | Method | Description |
|---|---|---|
void |
abandonBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock b,
long fileId,
java.lang.String src,
java.lang.String holder) |
|
org.apache.hadoop.hdfs.protocol.LocatedBlock |
addBlock(java.lang.String src,
java.lang.String clientName,
org.apache.hadoop.hdfs.protocol.ExtendedBlock previous,
org.apache.hadoop.hdfs.protocol.DatanodeInfo[] excludedNodes,
long fileId,
java.lang.String[] favoredNodes,
java.util.EnumSet<org.apache.hadoop.hdfs.AddBlockFlag> addBlockFlags) |
Excluded and favored nodes are not verified and will be ignored by
placement policy if they are not in the same nameservice as the file.
|
long |
addCacheDirective(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo path,
java.util.EnumSet<org.apache.hadoop.fs.CacheFlag> flags) |
|
void |
addCachePool(org.apache.hadoop.hdfs.protocol.CachePoolInfo info) |
|
org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse[] |
addErasureCodingPolicies(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy[] policies) |
|
void |
allowSnapshot(java.lang.String snapshotRoot) |
|
org.apache.hadoop.hdfs.protocol.LastBlockWithStatus |
append(java.lang.String src,
java.lang.String clientName,
org.apache.hadoop.io.EnumSetWritable<org.apache.hadoop.fs.CreateFlag> flag) |
|
void |
cancelDelegationToken(org.apache.hadoop.security.token.Token<org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier> token) |
|
void |
checkAccess(java.lang.String path,
org.apache.hadoop.fs.permission.FsAction mode) |
|
boolean |
complete(java.lang.String src,
java.lang.String clientName,
org.apache.hadoop.hdfs.protocol.ExtendedBlock last,
long fileId) |
|
void |
concat(java.lang.String trg,
java.lang.String[] src) |
|
org.apache.hadoop.hdfs.protocol.HdfsFileStatus |
create(java.lang.String src,
org.apache.hadoop.fs.permission.FsPermission masked,
java.lang.String clientName,
org.apache.hadoop.io.EnumSetWritable<org.apache.hadoop.fs.CreateFlag> flag,
boolean createParent,
short replication,
long blockSize,
org.apache.hadoop.crypto.CryptoProtocolVersion[] supportedVersions,
java.lang.String ecPolicyName,
java.lang.String storagePolicy) |
|
void |
createEncryptionZone(java.lang.String src,
java.lang.String keyName) |
|
java.lang.String |
createSnapshot(java.lang.String snapshotRoot,
java.lang.String snapshotName) |
|
void |
createSymlink(java.lang.String target,
java.lang.String link,
org.apache.hadoop.fs.permission.FsPermission dirPerms,
boolean createParent) |
|
boolean |
delete(java.lang.String src,
boolean recursive) |
|
void |
deleteSnapshot(java.lang.String snapshotRoot,
java.lang.String snapshotName) |
|
void |
disableErasureCodingPolicy(java.lang.String ecPolicyName) |
|
void |
disallowSnapshot(java.lang.String snapshot) |
|
void |
enableErasureCodingPolicy(java.lang.String ecPolicyName) |
|
void |
finalizeUpgrade() |
|
void |
fsync(java.lang.String src,
long fileId,
java.lang.String clientName,
long lastBlockLength) |
|
org.apache.hadoop.fs.permission.AclStatus |
getAclStatus(java.lang.String src) |
|
org.apache.hadoop.hdfs.protocol.LocatedBlock |
getAdditionalDatanode(java.lang.String src,
long fileId,
org.apache.hadoop.hdfs.protocol.ExtendedBlock blk,
org.apache.hadoop.hdfs.protocol.DatanodeInfo[] existings,
java.lang.String[] existingStorageIDs,
org.apache.hadoop.hdfs.protocol.DatanodeInfo[] excludes,
int numAdditionalNodes,
java.lang.String clientName) |
Excluded nodes are not verified and will be ignored by placement if they
are not in the same nameservice as the file.
|
org.apache.hadoop.hdfs.protocol.BatchedDirectoryListing |
getBatchedListing(java.lang.String[] srcs,
byte[] startAfter,
boolean needLocation) |
|
org.apache.hadoop.hdfs.protocol.LocatedBlocks |
getBlockLocations(java.lang.String src,
long offset,
long length) |
|
org.apache.hadoop.fs.ContentSummary |
getContentSummary(java.lang.String path) |
|
long |
getCurrentEditLogTxid() |
|
org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey |
getDataEncryptionKey() |
|
org.apache.hadoop.hdfs.protocol.DatanodeInfo[] |
getDatanodeReport(org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType type) |
|
org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport[] |
getDatanodeStorageReport(org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType type) |
|
org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport[] |
getDatanodeStorageReport(org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType type,
boolean requireResponse,
long timeOutMs) |
|
org.apache.hadoop.security.token.Token<org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier> |
getDelegationToken(org.apache.hadoop.io.Text renewer) |
|
java.util.Map<FederationNamespaceInfo,org.apache.hadoop.security.token.Token<org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier>> |
getDelegationTokens(org.apache.hadoop.io.Text renewer) |
The the delegation token from each name service.
|
org.apache.hadoop.hdfs.protocol.ECBlockGroupStats |
getECBlockGroupStats() |
|
org.apache.hadoop.hdfs.protocol.ECTopologyVerifierResult |
getECTopologyResultForPolicies(java.lang.String... policyNames) |
|
org.apache.hadoop.hdfs.inotify.EventBatchList |
getEditsFromTxid(long txid) |
|
org.apache.hadoop.fs.Path |
getEnclosingRoot(java.lang.String src) |
|
java.util.Map<java.lang.String,java.lang.String> |
getErasureCodingCodecs() |
|
org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo[] |
getErasureCodingPolicies() |
|
org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy |
getErasureCodingPolicy(java.lang.String src) |
|
org.apache.hadoop.hdfs.protocol.EncryptionZone |
getEZForPath(java.lang.String src) |
|
org.apache.hadoop.hdfs.protocol.HdfsFileStatus |
getFileInfo(java.lang.String src) |
|
org.apache.hadoop.hdfs.protocol.HdfsFileStatus |
getFileLinkInfo(java.lang.String src) |
|
org.apache.hadoop.ha.HAServiceProtocol.HAServiceState |
getHAServiceState() |
|
java.lang.String |
getLinkTarget(java.lang.String path) |
|
org.apache.hadoop.hdfs.protocol.DirectoryListing |
getListing(java.lang.String src,
byte[] startAfter,
boolean needLocation) |
|
org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus |
getLocatedFileInfo(java.lang.String src,
boolean needBlockToken) |
|
long |
getPreferredBlockSize(java.lang.String src) |
|
org.apache.hadoop.fs.QuotaUsage |
getQuotaUsage(java.lang.String path) |
|
org.apache.hadoop.hdfs.protocol.ReplicatedBlockStats |
getReplicatedBlockStats() |
|
int |
getRouterFederationRenameCount() |
|
org.apache.hadoop.fs.FsServerDefaults |
getServerDefaults() |
|
org.apache.hadoop.hdfs.protocol.DatanodeInfo[] |
getSlowDatanodeReport() |
|
org.apache.hadoop.hdfs.protocol.SnapshotDiffReport |
getSnapshotDiffReport(java.lang.String snapshotRoot,
java.lang.String earlierSnapshotName,
java.lang.String laterSnapshotName) |
|
org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing |
getSnapshotDiffReportListing(java.lang.String snapshotRoot,
java.lang.String earlierSnapshotName,
java.lang.String laterSnapshotName,
byte[] startPath,
int index) |
|
org.apache.hadoop.hdfs.protocol.SnapshotStatus[] |
getSnapshotListing(java.lang.String snapshotRoot) |
|
org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus[] |
getSnapshottableDirListing() |
|
long[] |
getStats() |
|
org.apache.hadoop.hdfs.protocol.BlockStoragePolicy[] |
getStoragePolicies() |
|
org.apache.hadoop.hdfs.protocol.BlockStoragePolicy |
getStoragePolicy(java.lang.String path) |
|
java.util.List<org.apache.hadoop.fs.XAttr> |
getXAttrs(java.lang.String src,
java.util.List<org.apache.hadoop.fs.XAttr> xAttrs) |
|
boolean |
isFileClosed(java.lang.String src) |
|
protected static boolean |
isUnavailableSubclusterException(java.io.IOException ioe) |
Check if an exception is caused by an unavailable subcluster or not.
|
org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries<org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry> |
listCacheDirectives(long prevId,
org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo filter) |
|
org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries<org.apache.hadoop.hdfs.protocol.CachePoolEntry> |
listCachePools(java.lang.String prevKey) |
|
org.apache.hadoop.hdfs.protocol.CorruptFileBlocks |
listCorruptFileBlocks(java.lang.String path,
java.lang.String cookie) |
|
org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries<org.apache.hadoop.hdfs.protocol.EncryptionZone> |
listEncryptionZones(long prevId) |
|
org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries<org.apache.hadoop.hdfs.protocol.OpenFileEntry> |
listOpenFiles(long prevId) |
Deprecated.
|
org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries<org.apache.hadoop.hdfs.protocol.OpenFileEntry> |
listOpenFiles(long prevId,
java.util.EnumSet<org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType> openFilesTypes,
java.lang.String path) |
|
org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries<org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus> |
listReencryptionStatus(long prevId) |
|
java.util.List<org.apache.hadoop.fs.XAttr> |
listXAttrs(java.lang.String src) |
|
void |
metaSave(java.lang.String filename) |
|
boolean |
mkdirs(java.lang.String src,
org.apache.hadoop.fs.permission.FsPermission masked,
boolean createParent) |
|
void |
modifyAclEntries(java.lang.String src,
java.util.List<org.apache.hadoop.fs.permission.AclEntry> aclSpec) |
|
void |
modifyCacheDirective(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo directive,
java.util.EnumSet<org.apache.hadoop.fs.CacheFlag> flags) |
|
void |
modifyCachePool(org.apache.hadoop.hdfs.protocol.CachePoolInfo info) |
|
void |
msync() |
|
boolean |
recoverLease(java.lang.String src,
java.lang.String clientName) |
|
void |
reencryptEncryptionZone(java.lang.String zone,
org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction action) |
|
void |
refreshNodes() |
|
void |
removeAcl(java.lang.String src) |
|
void |
removeAclEntries(java.lang.String src,
java.util.List<org.apache.hadoop.fs.permission.AclEntry> aclSpec) |
|
void |
removeCacheDirective(long id) |
|
void |
removeCachePool(java.lang.String cachePoolName) |
|
void |
removeDefaultAcl(java.lang.String src) |
|
void |
removeErasureCodingPolicy(java.lang.String ecPolicyName) |
|
void |
removeXAttr(java.lang.String src,
org.apache.hadoop.fs.XAttr xAttr) |
|
boolean |
rename(java.lang.String src,
java.lang.String dst) |
Deprecated.
|
void |
rename2(java.lang.String src,
java.lang.String dst,
org.apache.hadoop.fs.Options.Rename... options) |
|
void |
renameSnapshot(java.lang.String snapshotRoot,
java.lang.String snapshotOldName,
java.lang.String snapshotNewName) |
|
long |
renewDelegationToken(org.apache.hadoop.security.token.Token<org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier> token) |
|
void |
renewLease(java.lang.String clientName,
java.util.List<java.lang.String> namespaces) |
|
void |
reportBadBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlock[] blocks) |
|
boolean |
restoreFailedStorage(java.lang.String arg) |
|
long |
rollEdits() |
|
org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo |
rollingUpgrade(org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction action) |
|
void |
satisfyStoragePolicy(java.lang.String path) |
|
boolean |
saveNamespace(long timeWindow,
long txGap) |
|
void |
setAcl(java.lang.String src,
java.util.List<org.apache.hadoop.fs.permission.AclEntry> aclSpec) |
|
void |
setBalancerBandwidth(long bandwidth) |
|
void |
setErasureCodingPolicy(java.lang.String src,
java.lang.String ecPolicyName) |
|
void |
setOwner(java.lang.String src,
java.lang.String username,
java.lang.String groupname) |
|
void |
setPermission(java.lang.String src,
org.apache.hadoop.fs.permission.FsPermission permissions) |
|
void |
setQuota(java.lang.String path,
long namespaceQuota,
long storagespaceQuota,
org.apache.hadoop.fs.StorageType type) |
|
boolean |
setReplication(java.lang.String src,
short replication) |
|
boolean |
setSafeMode(org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction action,
boolean isChecked) |
|
void |
setStoragePolicy(java.lang.String src,
java.lang.String policyName) |
|
void |
setTimes(java.lang.String src,
long mtime,
long atime) |
|
void |
setXAttr(java.lang.String src,
org.apache.hadoop.fs.XAttr xAttr,
java.util.EnumSet<org.apache.hadoop.fs.XAttrSetFlag> flag) |
|
boolean |
truncate(java.lang.String src,
long newLength,
java.lang.String clientName) |
|
void |
unsetErasureCodingPolicy(java.lang.String src) |
|
void |
unsetStoragePolicy(java.lang.String src) |
|
org.apache.hadoop.hdfs.protocol.LocatedBlock |
updateBlockForPipeline(org.apache.hadoop.hdfs.protocol.ExtendedBlock block,
java.lang.String clientName) |
|
void |
updatePipeline(java.lang.String clientName,
org.apache.hadoop.hdfs.protocol.ExtendedBlock oldBlock,
org.apache.hadoop.hdfs.protocol.ExtendedBlock newBlock,
org.apache.hadoop.hdfs.protocol.DatanodeID[] newNodes,
java.lang.String[] newStorageIDs) |
Datanode are not verified to be in the same nameservice as the old block.
|
boolean |
upgradeStatus() |
public org.apache.hadoop.security.token.Token<org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier> getDelegationToken(org.apache.hadoop.io.Text renewer)
throws java.io.IOException
getDelegationToken in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic java.util.Map<FederationNamespaceInfo,org.apache.hadoop.security.token.Token<org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier>> getDelegationTokens(org.apache.hadoop.io.Text renewer) throws java.io.IOException
renewer - The token renewer.java.io.IOException - If it cannot get the delegation token.public long renewDelegationToken(org.apache.hadoop.security.token.Token<org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier> token)
throws java.io.IOException
renewDelegationToken in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void cancelDelegationToken(org.apache.hadoop.security.token.Token<org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier> token)
throws java.io.IOException
cancelDelegationToken in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.LocatedBlocks getBlockLocations(java.lang.String src,
long offset,
long length)
throws java.io.IOException
getBlockLocations in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.fs.FsServerDefaults getServerDefaults()
throws java.io.IOException
getServerDefaults in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.HdfsFileStatus create(java.lang.String src,
org.apache.hadoop.fs.permission.FsPermission masked,
java.lang.String clientName,
org.apache.hadoop.io.EnumSetWritable<org.apache.hadoop.fs.CreateFlag> flag,
boolean createParent,
short replication,
long blockSize,
org.apache.hadoop.crypto.CryptoProtocolVersion[] supportedVersions,
java.lang.String ecPolicyName,
java.lang.String storagePolicy)
throws java.io.IOException
create in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionprotected static boolean isUnavailableSubclusterException(java.io.IOException ioe)
ioe - IOException to check.public org.apache.hadoop.hdfs.protocol.LastBlockWithStatus append(java.lang.String src,
java.lang.String clientName,
org.apache.hadoop.io.EnumSetWritable<org.apache.hadoop.fs.CreateFlag> flag)
throws java.io.IOException
append in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic boolean recoverLease(java.lang.String src,
java.lang.String clientName)
throws java.io.IOException
recoverLease in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic boolean setReplication(java.lang.String src,
short replication)
throws java.io.IOException
setReplication in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void setStoragePolicy(java.lang.String src,
java.lang.String policyName)
throws java.io.IOException
setStoragePolicy in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.BlockStoragePolicy[] getStoragePolicies()
throws java.io.IOException
getStoragePolicies in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void setPermission(java.lang.String src,
org.apache.hadoop.fs.permission.FsPermission permissions)
throws java.io.IOException
setPermission in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void setOwner(java.lang.String src,
java.lang.String username,
java.lang.String groupname)
throws java.io.IOException
setOwner in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.LocatedBlock addBlock(java.lang.String src,
java.lang.String clientName,
org.apache.hadoop.hdfs.protocol.ExtendedBlock previous,
org.apache.hadoop.hdfs.protocol.DatanodeInfo[] excludedNodes,
long fileId,
java.lang.String[] favoredNodes,
java.util.EnumSet<org.apache.hadoop.hdfs.AddBlockFlag> addBlockFlags)
throws java.io.IOException
addBlock in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.LocatedBlock getAdditionalDatanode(java.lang.String src,
long fileId,
org.apache.hadoop.hdfs.protocol.ExtendedBlock blk,
org.apache.hadoop.hdfs.protocol.DatanodeInfo[] existings,
java.lang.String[] existingStorageIDs,
org.apache.hadoop.hdfs.protocol.DatanodeInfo[] excludes,
int numAdditionalNodes,
java.lang.String clientName)
throws java.io.IOException
getAdditionalDatanode in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void abandonBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock b,
long fileId,
java.lang.String src,
java.lang.String holder)
throws java.io.IOException
abandonBlock in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic boolean complete(java.lang.String src,
java.lang.String clientName,
org.apache.hadoop.hdfs.protocol.ExtendedBlock last,
long fileId)
throws java.io.IOException
complete in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.LocatedBlock updateBlockForPipeline(org.apache.hadoop.hdfs.protocol.ExtendedBlock block,
java.lang.String clientName)
throws java.io.IOException
updateBlockForPipeline in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void updatePipeline(java.lang.String clientName,
org.apache.hadoop.hdfs.protocol.ExtendedBlock oldBlock,
org.apache.hadoop.hdfs.protocol.ExtendedBlock newBlock,
org.apache.hadoop.hdfs.protocol.DatanodeID[] newNodes,
java.lang.String[] newStorageIDs)
throws java.io.IOException
updatePipeline in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic long getPreferredBlockSize(java.lang.String src)
throws java.io.IOException
getPreferredBlockSize in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOException@Deprecated
public boolean rename(java.lang.String src,
java.lang.String dst)
throws java.io.IOException
rename in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void rename2(java.lang.String src,
java.lang.String dst,
org.apache.hadoop.fs.Options.Rename... options)
throws java.io.IOException
rename2 in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void concat(java.lang.String trg,
java.lang.String[] src)
throws java.io.IOException
concat in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic boolean truncate(java.lang.String src,
long newLength,
java.lang.String clientName)
throws java.io.IOException
truncate in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic boolean delete(java.lang.String src,
boolean recursive)
throws java.io.IOException
delete in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic boolean mkdirs(java.lang.String src,
org.apache.hadoop.fs.permission.FsPermission masked,
boolean createParent)
throws java.io.IOException
mkdirs in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void renewLease(java.lang.String clientName,
java.util.List<java.lang.String> namespaces)
throws java.io.IOException
renewLease in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.DirectoryListing getListing(java.lang.String src,
byte[] startAfter,
boolean needLocation)
throws java.io.IOException
getListing in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.BatchedDirectoryListing getBatchedListing(java.lang.String[] srcs,
byte[] startAfter,
boolean needLocation)
throws java.io.IOException
getBatchedListing in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.HdfsFileStatus getFileInfo(java.lang.String src)
throws java.io.IOException
getFileInfo in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic boolean isFileClosed(java.lang.String src)
throws java.io.IOException
isFileClosed in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.HdfsFileStatus getFileLinkInfo(java.lang.String src)
throws java.io.IOException
getFileLinkInfo in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus getLocatedFileInfo(java.lang.String src,
boolean needBlockToken)
throws java.io.IOException
getLocatedFileInfo in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic long[] getStats()
throws java.io.IOException
getStats in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.DatanodeInfo[] getDatanodeReport(org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType type)
throws java.io.IOException
getDatanodeReport in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport[] getDatanodeStorageReport(org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType type)
throws java.io.IOException
getDatanodeStorageReport in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport[] getDatanodeStorageReport(org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType type,
boolean requireResponse,
long timeOutMs)
throws java.io.IOException
java.io.IOExceptionpublic boolean setSafeMode(org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction action,
boolean isChecked)
throws java.io.IOException
setSafeMode in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic boolean restoreFailedStorage(java.lang.String arg)
throws java.io.IOException
restoreFailedStorage in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic boolean saveNamespace(long timeWindow,
long txGap)
throws java.io.IOException
saveNamespace in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic long rollEdits()
throws java.io.IOException
rollEdits in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void refreshNodes()
throws java.io.IOException
refreshNodes in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void finalizeUpgrade()
throws java.io.IOException
finalizeUpgrade in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic boolean upgradeStatus()
throws java.io.IOException
upgradeStatus in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo rollingUpgrade(org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction action)
throws java.io.IOException
rollingUpgrade in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void metaSave(java.lang.String filename)
throws java.io.IOException
metaSave in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.CorruptFileBlocks listCorruptFileBlocks(java.lang.String path,
java.lang.String cookie)
throws java.io.IOException
listCorruptFileBlocks in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void setBalancerBandwidth(long bandwidth)
throws java.io.IOException
setBalancerBandwidth in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.fs.ContentSummary getContentSummary(java.lang.String path)
throws java.io.IOException
getContentSummary in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void fsync(java.lang.String src,
long fileId,
java.lang.String clientName,
long lastBlockLength)
throws java.io.IOException
fsync in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void setTimes(java.lang.String src,
long mtime,
long atime)
throws java.io.IOException
setTimes in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void createSymlink(java.lang.String target,
java.lang.String link,
org.apache.hadoop.fs.permission.FsPermission dirPerms,
boolean createParent)
throws java.io.IOException
createSymlink in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic java.lang.String getLinkTarget(java.lang.String path)
throws java.io.IOException
getLinkTarget in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void allowSnapshot(java.lang.String snapshotRoot)
throws java.io.IOException
allowSnapshot in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void disallowSnapshot(java.lang.String snapshot)
throws java.io.IOException
disallowSnapshot in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void renameSnapshot(java.lang.String snapshotRoot,
java.lang.String snapshotOldName,
java.lang.String snapshotNewName)
throws java.io.IOException
renameSnapshot in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus[] getSnapshottableDirListing()
throws java.io.IOException
getSnapshottableDirListing in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.SnapshotStatus[] getSnapshotListing(java.lang.String snapshotRoot)
throws java.io.IOException
getSnapshotListing in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.SnapshotDiffReport getSnapshotDiffReport(java.lang.String snapshotRoot,
java.lang.String earlierSnapshotName,
java.lang.String laterSnapshotName)
throws java.io.IOException
getSnapshotDiffReport in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing getSnapshotDiffReportListing(java.lang.String snapshotRoot,
java.lang.String earlierSnapshotName,
java.lang.String laterSnapshotName,
byte[] startPath,
int index)
throws java.io.IOException
getSnapshotDiffReportListing in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic long addCacheDirective(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo path,
java.util.EnumSet<org.apache.hadoop.fs.CacheFlag> flags)
throws java.io.IOException
addCacheDirective in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void modifyCacheDirective(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo directive,
java.util.EnumSet<org.apache.hadoop.fs.CacheFlag> flags)
throws java.io.IOException
modifyCacheDirective in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void removeCacheDirective(long id)
throws java.io.IOException
removeCacheDirective in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries<org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry> listCacheDirectives(long prevId,
org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo filter)
throws java.io.IOException
listCacheDirectives in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void addCachePool(org.apache.hadoop.hdfs.protocol.CachePoolInfo info)
throws java.io.IOException
addCachePool in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void modifyCachePool(org.apache.hadoop.hdfs.protocol.CachePoolInfo info)
throws java.io.IOException
modifyCachePool in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void removeCachePool(java.lang.String cachePoolName)
throws java.io.IOException
removeCachePool in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries<org.apache.hadoop.hdfs.protocol.CachePoolEntry> listCachePools(java.lang.String prevKey)
throws java.io.IOException
listCachePools in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void modifyAclEntries(java.lang.String src,
java.util.List<org.apache.hadoop.fs.permission.AclEntry> aclSpec)
throws java.io.IOException
modifyAclEntries in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void removeAclEntries(java.lang.String src,
java.util.List<org.apache.hadoop.fs.permission.AclEntry> aclSpec)
throws java.io.IOException
removeAclEntries in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void removeDefaultAcl(java.lang.String src)
throws java.io.IOException
removeDefaultAcl in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void removeAcl(java.lang.String src)
throws java.io.IOException
removeAcl in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void setAcl(java.lang.String src,
java.util.List<org.apache.hadoop.fs.permission.AclEntry> aclSpec)
throws java.io.IOException
setAcl in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.fs.permission.AclStatus getAclStatus(java.lang.String src)
throws java.io.IOException
getAclStatus in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void createEncryptionZone(java.lang.String src,
java.lang.String keyName)
throws java.io.IOException
createEncryptionZone in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.EncryptionZone getEZForPath(java.lang.String src)
throws java.io.IOException
getEZForPath in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries<org.apache.hadoop.hdfs.protocol.EncryptionZone> listEncryptionZones(long prevId)
throws java.io.IOException
listEncryptionZones in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void reencryptEncryptionZone(java.lang.String zone,
org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction action)
throws java.io.IOException
reencryptEncryptionZone in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries<org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus> listReencryptionStatus(long prevId)
throws java.io.IOException
listReencryptionStatus in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void setXAttr(java.lang.String src,
org.apache.hadoop.fs.XAttr xAttr,
java.util.EnumSet<org.apache.hadoop.fs.XAttrSetFlag> flag)
throws java.io.IOException
setXAttr in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic java.util.List<org.apache.hadoop.fs.XAttr> getXAttrs(java.lang.String src,
java.util.List<org.apache.hadoop.fs.XAttr> xAttrs)
throws java.io.IOException
getXAttrs in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic java.util.List<org.apache.hadoop.fs.XAttr> listXAttrs(java.lang.String src)
throws java.io.IOException
listXAttrs in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void removeXAttr(java.lang.String src,
org.apache.hadoop.fs.XAttr xAttr)
throws java.io.IOException
removeXAttr in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void checkAccess(java.lang.String path,
org.apache.hadoop.fs.permission.FsAction mode)
throws java.io.IOException
checkAccess in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic long getCurrentEditLogTxid()
throws java.io.IOException
getCurrentEditLogTxid in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.inotify.EventBatchList getEditsFromTxid(long txid)
throws java.io.IOException
getEditsFromTxid in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey getDataEncryptionKey()
throws java.io.IOException
getDataEncryptionKey in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic java.lang.String createSnapshot(java.lang.String snapshotRoot,
java.lang.String snapshotName)
throws java.io.IOException
createSnapshot in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void deleteSnapshot(java.lang.String snapshotRoot,
java.lang.String snapshotName)
throws java.io.IOException
deleteSnapshot in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void setQuota(java.lang.String path,
long namespaceQuota,
long storagespaceQuota,
org.apache.hadoop.fs.StorageType type)
throws java.io.IOException
setQuota in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.fs.QuotaUsage getQuotaUsage(java.lang.String path)
throws java.io.IOException
getQuotaUsage in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void reportBadBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlock[] blocks)
throws java.io.IOException
reportBadBlocks in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void unsetStoragePolicy(java.lang.String src)
throws java.io.IOException
unsetStoragePolicy in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.BlockStoragePolicy getStoragePolicy(java.lang.String path)
throws java.io.IOException
getStoragePolicy in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo[] getErasureCodingPolicies()
throws java.io.IOException
getErasureCodingPolicies in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic java.util.Map<java.lang.String,java.lang.String> getErasureCodingCodecs()
throws java.io.IOException
getErasureCodingCodecs in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse[] addErasureCodingPolicies(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy[] policies)
throws java.io.IOException
addErasureCodingPolicies in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void removeErasureCodingPolicy(java.lang.String ecPolicyName)
throws java.io.IOException
removeErasureCodingPolicy in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void disableErasureCodingPolicy(java.lang.String ecPolicyName)
throws java.io.IOException
disableErasureCodingPolicy in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void enableErasureCodingPolicy(java.lang.String ecPolicyName)
throws java.io.IOException
enableErasureCodingPolicy in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy getErasureCodingPolicy(java.lang.String src)
throws java.io.IOException
getErasureCodingPolicy in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void setErasureCodingPolicy(java.lang.String src,
java.lang.String ecPolicyName)
throws java.io.IOException
setErasureCodingPolicy in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void unsetErasureCodingPolicy(java.lang.String src)
throws java.io.IOException
unsetErasureCodingPolicy in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.ECTopologyVerifierResult getECTopologyResultForPolicies(java.lang.String... policyNames)
throws java.io.IOException
getECTopologyResultForPolicies in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.ECBlockGroupStats getECBlockGroupStats()
throws java.io.IOException
getECBlockGroupStats in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.ReplicatedBlockStats getReplicatedBlockStats()
throws java.io.IOException
getReplicatedBlockStats in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOException@Deprecated
public org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries<org.apache.hadoop.hdfs.protocol.OpenFileEntry> listOpenFiles(long prevId)
throws java.io.IOException
listOpenFiles in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries<org.apache.hadoop.hdfs.protocol.OpenFileEntry> listOpenFiles(long prevId,
java.util.EnumSet<org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType> openFilesTypes,
java.lang.String path)
throws java.io.IOException
listOpenFiles in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void msync()
throws java.io.IOException
msync in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void satisfyStoragePolicy(java.lang.String path)
throws java.io.IOException
satisfyStoragePolicy in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.DatanodeInfo[] getSlowDatanodeReport()
throws java.io.IOException
getSlowDatanodeReport in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.fs.Path getEnclosingRoot(java.lang.String src)
throws java.io.IOException
getEnclosingRoot in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.ha.HAServiceProtocol.HAServiceState getHAServiceState()
getHAServiceState in interface org.apache.hadoop.hdfs.protocol.ClientProtocolpublic int getRouterFederationRenameCount()
Copyright © 2008–2025 Apache Software Foundation. All rights reserved.