org.apache.hadoop.ha.HAServiceProtocol, org.apache.hadoop.hdfs.protocol.ClientProtocol, org.apache.hadoop.hdfs.protocol.ReconfigurationProtocol, DatanodeLifelineProtocol, DatanodeProtocol, NamenodeProtocol, NamenodeProtocols, org.apache.hadoop.ipc.GenericRefreshProtocol, org.apache.hadoop.ipc.RefreshCallQueueProtocol, org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol, org.apache.hadoop.security.RefreshUserMappingsProtocol, org.apache.hadoop.tools.GetUserMappingsProtocol@Private @VisibleForTesting public class NameNodeRpcServer extends java.lang.Object implements NamenodeProtocols
NameNode.| Modifier and Type | Field | Description |
|---|---|---|
protected java.net.InetSocketAddress |
clientRpcAddress |
|
protected org.apache.hadoop.ipc.RPC.Server |
clientRpcServer |
The RPC server that listens to requests from clients
|
protected FSNamesystem |
namesystem |
|
protected NameNode |
nn |
GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX, GET_STATS_CAPACITY_IDX, GET_STATS_CORRUPT_BLOCKS_IDX, GET_STATS_LOW_REDUNDANCY_IDX, GET_STATS_MISSING_BLOCKS_IDX, GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX, GET_STATS_PENDING_DELETION_BLOCKS_IDX, GET_STATS_REMAINING_IDX, GET_STATS_UNDER_REPLICATED_IDX, GET_STATS_USED_IDX, STATS_ARRAY_LENGTH, versionIDDISK_ERROR, DNA_ACCESSKEYUPDATE, DNA_BALANCERBANDWIDTHUPDATE, DNA_BLOCK_STORAGE_MOVEMENT, DNA_CACHE, DNA_DROP_SPS_WORK_COMMAND, DNA_ERASURE_CODING_RECONSTRUCTION, DNA_FINALIZE, DNA_INVALIDATE, DNA_RECOVERBLOCK, DNA_REGISTER, DNA_SHUTDOWN, DNA_TRANSFER, DNA_UNCACHE, DNA_UNKNOWN, FATAL_DISK_ERROR, INVALID_BLOCK, NOTIFY, versionIDACT_CHECKPOINT, ACT_SHUTDOWN, ACT_UNKNOWN, FATAL, NOTIFY, versionID| Constructor | Description |
|---|---|
NameNodeRpcServer(org.apache.hadoop.conf.Configuration conf,
NameNode nn) |
| Modifier and Type | Method | Description |
|---|---|---|
void |
abandonBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock b,
long fileId,
java.lang.String src,
java.lang.String holder) |
The client needs to give up on the block.
|
org.apache.hadoop.hdfs.protocol.LocatedBlock |
addBlock(java.lang.String src,
java.lang.String clientName,
org.apache.hadoop.hdfs.protocol.ExtendedBlock previous,
org.apache.hadoop.hdfs.protocol.DatanodeInfo[] excludedNodes,
long fileId,
java.lang.String[] favoredNodes,
java.util.EnumSet<org.apache.hadoop.hdfs.AddBlockFlag> addBlockFlags) |
|
long |
addCacheDirective(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo path,
java.util.EnumSet<org.apache.hadoop.fs.CacheFlag> flags) |
|
void |
addCachePool(org.apache.hadoop.hdfs.protocol.CachePoolInfo info) |
|
org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse[] |
addErasureCodingPolicies(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy[] policies) |
|
void |
allowSnapshot(java.lang.String snapshotRoot) |
|
org.apache.hadoop.hdfs.protocol.LastBlockWithStatus |
append(java.lang.String src,
java.lang.String clientName,
org.apache.hadoop.io.EnumSetWritable<org.apache.hadoop.fs.CreateFlag> flag) |
|
void |
blockReceivedAndDeleted(DatanodeRegistration nodeReg,
java.lang.String poolId,
StorageReceivedDeletedBlocks[] receivedAndDeletedBlocks) |
blockReceivedAndDeleted() allows the DataNode to tell the NameNode about
recently-received and -deleted block data.
|
DatanodeCommand |
blockReport(DatanodeRegistration nodeReg,
java.lang.String poolId,
StorageBlockReport[] reports,
BlockReportContext context) |
blockReport() tells the NameNode about all the locally-stored blocks.
|
DatanodeCommand |
cacheReport(DatanodeRegistration nodeReg,
java.lang.String poolId,
java.util.List<java.lang.Long> blockIds) |
Communicates the complete list of locally cached blocks to the NameNode.
|
void |
cancelDelegationToken(org.apache.hadoop.security.token.Token<org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier> token) |
|
void |
checkAccess(java.lang.String path,
org.apache.hadoop.fs.permission.FsAction mode) |
|
void |
commitBlockSynchronization(org.apache.hadoop.hdfs.protocol.ExtendedBlock block,
long newgenerationstamp,
long newlength,
boolean closeFile,
boolean deleteblock,
org.apache.hadoop.hdfs.protocol.DatanodeID[] newtargets,
java.lang.String[] newtargetstorages) |
Commit block synchronization in lease recovery
|
boolean |
complete(java.lang.String src,
java.lang.String clientName,
org.apache.hadoop.hdfs.protocol.ExtendedBlock last,
long fileId) |
|
void |
concat(java.lang.String trg,
java.lang.String[] src) |
|
org.apache.hadoop.hdfs.protocol.HdfsFileStatus |
create(java.lang.String src,
org.apache.hadoop.fs.permission.FsPermission masked,
java.lang.String clientName,
org.apache.hadoop.io.EnumSetWritable<org.apache.hadoop.fs.CreateFlag> flag,
boolean createParent,
short replication,
long blockSize,
org.apache.hadoop.crypto.CryptoProtocolVersion[] supportedVersions,
java.lang.String ecPolicyName,
java.lang.String storagePolicy) |
|
void |
createEncryptionZone(java.lang.String src,
java.lang.String keyName) |
|
java.lang.String |
createSnapshot(java.lang.String snapshotRoot,
java.lang.String snapshotName) |
|
void |
createSymlink(java.lang.String target,
java.lang.String link,
org.apache.hadoop.fs.permission.FsPermission dirPerms,
boolean createParent) |
|
boolean |
delete(java.lang.String src,
boolean recursive) |
|
void |
deleteSnapshot(java.lang.String snapshotRoot,
java.lang.String snapshotName) |
|
void |
disableErasureCodingPolicy(java.lang.String ecPolicyName) |
|
void |
disallowSnapshot(java.lang.String snapshot) |
|
void |
enableErasureCodingPolicy(java.lang.String ecPolicyName) |
|
void |
endCheckpoint(NamenodeRegistration registration,
CheckpointSignature sig) |
A request to the active name-node to finalize
previously started checkpoint.
|
void |
errorReport(DatanodeRegistration nodeReg,
int errorCode,
java.lang.String msg) |
errorReport() tells the NameNode about something that has gone
awry.
|
void |
errorReport(NamenodeRegistration registration,
int errorCode,
java.lang.String msg) |
Report to the active name-node an error occurred on a subordinate node.
|
void |
finalizeUpgrade() |
|
void |
fsync(java.lang.String src,
long fileId,
java.lang.String clientName,
long lastBlockLength) |
|
org.apache.hadoop.fs.permission.AclStatus |
getAclStatus(java.lang.String src) |
|
org.apache.hadoop.hdfs.protocol.LocatedBlock |
getAdditionalDatanode(java.lang.String src,
long fileId,
org.apache.hadoop.hdfs.protocol.ExtendedBlock blk,
org.apache.hadoop.hdfs.protocol.DatanodeInfo[] existings,
java.lang.String[] existingStorageIDs,
org.apache.hadoop.hdfs.protocol.DatanodeInfo[] excludes,
int numAdditionalNodes,
java.lang.String clientName) |
|
java.util.Set<java.net.InetSocketAddress> |
getAuxiliaryRpcAddresses() |
|
org.apache.hadoop.hdfs.protocol.BatchedDirectoryListing |
getBatchedListing(java.lang.String[] srcs,
byte[] startAfter,
boolean needLocation) |
|
ExportedBlockKeys |
getBlockKeys() |
Get the current block keys
|
org.apache.hadoop.hdfs.protocol.LocatedBlocks |
getBlockLocations(java.lang.String src,
long offset,
long length) |
|
BlocksWithLocations |
getBlocks(org.apache.hadoop.hdfs.protocol.DatanodeInfo datanode,
long size,
long minBlockSize,
long timeInterval,
org.apache.hadoop.fs.StorageType storageType) |
Get a list of blocks belonging to
datanode
whose total size equals size. |
org.apache.hadoop.ipc.RPC.Server |
getClientRpcServer() |
Allow access to the client RPC server for testing
|
org.apache.hadoop.fs.ContentSummary |
getContentSummary(java.lang.String path) |
|
long |
getCurrentEditLogTxid() |
|
org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey |
getDataEncryptionKey() |
|
org.apache.hadoop.hdfs.protocol.DatanodeInfo[] |
getDatanodeReport(org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType type) |
|
org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport[] |
getDatanodeStorageReport(org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType type) |
|
org.apache.hadoop.security.token.Token<org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier> |
getDelegationToken(org.apache.hadoop.io.Text renewer) |
|
org.apache.hadoop.hdfs.protocol.ECBlockGroupStats |
getECBlockGroupStats() |
|
org.apache.hadoop.hdfs.protocol.ECTopologyVerifierResult |
getECTopologyResultForPolicies(java.lang.String... policyNames) |
|
RemoteEditLogManifest |
getEditLogManifest(long sinceTxId) |
Return a structure containing details about all edit logs
available to be fetched from the NameNode.
|
org.apache.hadoop.hdfs.inotify.EventBatchList |
getEditsFromTxid(long txid) |
|
org.apache.hadoop.fs.Path |
getEnclosingRoot(java.lang.String src) |
|
java.util.Map<java.lang.String,java.lang.String> |
getErasureCodingCodecs() |
|
org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo[] |
getErasureCodingPolicies() |
|
org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy |
getErasureCodingPolicy(java.lang.String src) |
|
org.apache.hadoop.hdfs.protocol.EncryptionZone |
getEZForPath(java.lang.String src) |
|
org.apache.hadoop.hdfs.protocol.HdfsFileStatus |
getFileInfo(java.lang.String src) |
|
org.apache.hadoop.hdfs.protocol.HdfsFileStatus |
getFileLinkInfo(java.lang.String src) |
|
java.lang.String[] |
getGroupsForUser(java.lang.String user) |
|
org.apache.hadoop.ha.HAServiceProtocol.HAServiceState |
getHAServiceState() |
|
java.lang.String |
getLinkTarget(java.lang.String path) |
|
org.apache.hadoop.hdfs.protocol.DirectoryListing |
getListing(java.lang.String src,
byte[] startAfter,
boolean needLocation) |
|
org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus |
getLocatedFileInfo(java.lang.String src,
boolean needBlockToken) |
|
long |
getMostRecentCheckpointTxId() |
Get the transaction ID of the most recent checkpoint.
|
java.lang.Long |
getNextSPSPath() |
|
long |
getPreferredBlockSize(java.lang.String filename) |
|
org.apache.hadoop.fs.QuotaUsage |
getQuotaUsage(java.lang.String path) |
|
org.apache.hadoop.conf.ReconfigurationTaskStatus |
getReconfigurationStatus() |
|
org.apache.hadoop.hdfs.protocol.ReplicatedBlockStats |
getReplicatedBlockStats() |
|
java.net.InetSocketAddress |
getRpcAddress() |
|
org.apache.hadoop.fs.FsServerDefaults |
getServerDefaults() |
|
org.apache.hadoop.ha.HAServiceStatus |
getServiceStatus() |
|
org.apache.hadoop.hdfs.protocol.DatanodeInfo[] |
getSlowDatanodeReport() |
|
org.apache.hadoop.hdfs.protocol.SnapshotDiffReport |
getSnapshotDiffReport(java.lang.String snapshotRoot,
java.lang.String earlierSnapshotName,
java.lang.String laterSnapshotName) |
|
org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing |
getSnapshotDiffReportListing(java.lang.String snapshotRoot,
java.lang.String earlierSnapshotName,
java.lang.String laterSnapshotName,
byte[] startPath,
int index) |
|
org.apache.hadoop.hdfs.protocol.SnapshotStatus[] |
getSnapshotListing(java.lang.String snapshotRoot) |
|
org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus[] |
getSnapshottableDirListing() |
|
long[] |
getStats() |
|
org.apache.hadoop.hdfs.protocol.BlockStoragePolicy[] |
getStoragePolicies() |
|
org.apache.hadoop.hdfs.protocol.BlockStoragePolicy |
getStoragePolicy(java.lang.String path) |
|
long |
getTransactionID() |
|
java.util.List<org.apache.hadoop.fs.XAttr> |
getXAttrs(java.lang.String src,
java.util.List<org.apache.hadoop.fs.XAttr> xAttrs) |
|
boolean |
isFileClosed(java.lang.String src) |
|
boolean |
isRollingUpgrade() |
return whether the Namenode is rolling upgrade in progress (true) or
not (false).
|
boolean |
isUpgradeFinalized() |
|
org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries<org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry> |
listCacheDirectives(long prevId,
org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo filter) |
|
org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries<org.apache.hadoop.hdfs.protocol.CachePoolEntry> |
listCachePools(java.lang.String prevKey) |
|
org.apache.hadoop.hdfs.protocol.CorruptFileBlocks |
listCorruptFileBlocks(java.lang.String path,
java.lang.String cookie) |
|
org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries<org.apache.hadoop.hdfs.protocol.EncryptionZone> |
listEncryptionZones(long prevId) |
|
org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries<org.apache.hadoop.hdfs.protocol.OpenFileEntry> |
listOpenFiles(long prevId) |
Deprecated.
|
org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries<org.apache.hadoop.hdfs.protocol.OpenFileEntry> |
listOpenFiles(long prevId,
java.util.EnumSet<org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType> openFilesTypes,
java.lang.String path) |
|
java.util.List<java.lang.String> |
listReconfigurableProperties() |
|
org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries<org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus> |
listReencryptionStatus(long prevId) |
|
java.util.List<org.apache.hadoop.fs.XAttr> |
listXAttrs(java.lang.String src) |
|
void |
metaSave(java.lang.String filename) |
|
boolean |
mkdirs(java.lang.String src,
org.apache.hadoop.fs.permission.FsPermission masked,
boolean createParent) |
|
void |
modifyAclEntries(java.lang.String src,
java.util.List<org.apache.hadoop.fs.permission.AclEntry> aclSpec) |
|
void |
modifyCacheDirective(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo directive,
java.util.EnumSet<org.apache.hadoop.fs.CacheFlag> flags) |
|
void |
modifyCachePool(org.apache.hadoop.hdfs.protocol.CachePoolInfo info) |
|
void |
monitorHealth() |
|
void |
msync() |
|
boolean |
recoverLease(java.lang.String src,
java.lang.String clientName) |
|
void |
reencryptEncryptionZone(java.lang.String zone,
org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction action) |
|
java.util.Collection<org.apache.hadoop.ipc.RefreshResponse> |
refresh(java.lang.String identifier,
java.lang.String[] args) |
|
void |
refreshCallQueue() |
|
void |
refreshNodes() |
|
void |
refreshServiceAcl() |
|
void |
refreshSuperUserGroupsConfiguration() |
|
void |
refreshUserToGroupsMappings() |
|
DatanodeRegistration |
registerDatanode(DatanodeRegistration nodeReg) |
Register Datanode.
|
NamenodeRegistration |
registerSubordinateNamenode(NamenodeRegistration registration) |
Register a subordinate name-node like backup node.
|
void |
removeAcl(java.lang.String src) |
|
void |
removeAclEntries(java.lang.String src,
java.util.List<org.apache.hadoop.fs.permission.AclEntry> aclSpec) |
|
void |
removeCacheDirective(long id) |
|
void |
removeCachePool(java.lang.String cachePoolName) |
|
void |
removeDefaultAcl(java.lang.String src) |
|
void |
removeErasureCodingPolicy(java.lang.String ecPolicyName) |
|
void |
removeXAttr(java.lang.String src,
org.apache.hadoop.fs.XAttr xAttr) |
|
boolean |
rename(java.lang.String src,
java.lang.String dst) |
Deprecated.
|
void |
rename2(java.lang.String src,
java.lang.String dst,
org.apache.hadoop.fs.Options.Rename... options) |
|
void |
renameSnapshot(java.lang.String snapshotRoot,
java.lang.String snapshotOldName,
java.lang.String snapshotNewName) |
|
long |
renewDelegationToken(org.apache.hadoop.security.token.Token<org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier> token) |
|
void |
renewLease(java.lang.String clientName,
java.util.List<java.lang.String> namespaces) |
|
void |
reportBadBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlock[] blocks) |
The client has detected an error on the specified located blocks
and is reporting them to the server.
|
boolean |
restoreFailedStorage(java.lang.String arg) |
|
CheckpointSignature |
rollEditLog() |
Closes the current edit log and opens a new one.
|
long |
rollEdits() |
|
org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo |
rollingUpgrade(org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction action) |
|
void |
satisfyStoragePolicy(java.lang.String src) |
|
boolean |
saveNamespace(long timeWindow,
long txGap) |
|
HeartbeatResponse |
sendHeartbeat(DatanodeRegistration nodeReg,
org.apache.hadoop.hdfs.server.protocol.StorageReport[] report,
long dnCacheCapacity,
long dnCacheUsed,
int xmitsInProgress,
int xceiverCount,
int failedVolumes,
VolumeFailureSummary volumeFailureSummary,
boolean requestFullBlockReportLease,
org.apache.hadoop.hdfs.server.protocol.SlowPeerReports slowPeers,
org.apache.hadoop.hdfs.server.protocol.SlowDiskReports slowDisks) |
sendHeartbeat() tells the NameNode that the DataNode is still
alive and well.
|
void |
sendLifeline(DatanodeRegistration nodeReg,
org.apache.hadoop.hdfs.server.protocol.StorageReport[] report,
long dnCacheCapacity,
long dnCacheUsed,
int xmitsInProgress,
int xceiverCount,
int failedVolumes,
VolumeFailureSummary volumeFailureSummary) |
|
void |
setAcl(java.lang.String src,
java.util.List<org.apache.hadoop.fs.permission.AclEntry> aclSpec) |
|
void |
setBalancerBandwidth(long bandwidth) |
Tell all datanodes to use a new, non-persistent bandwidth value for
dfs.datanode.balance.bandwidthPerSec.
|
void |
setErasureCodingPolicy(java.lang.String src,
java.lang.String ecPolicyName) |
|
void |
setOwner(java.lang.String src,
java.lang.String username,
java.lang.String groupname) |
|
void |
setPermission(java.lang.String src,
org.apache.hadoop.fs.permission.FsPermission permissions) |
|
void |
setQuota(java.lang.String path,
long namespaceQuota,
long storagespaceQuota,
org.apache.hadoop.fs.StorageType type) |
|
boolean |
setReplication(java.lang.String src,
short replication) |
|
boolean |
setSafeMode(org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction action,
boolean isChecked) |
|
void |
setStoragePolicy(java.lang.String src,
java.lang.String policyName) |
|
void |
setTimes(java.lang.String src,
long mtime,
long atime) |
|
void |
setXAttr(java.lang.String src,
org.apache.hadoop.fs.XAttr xAttr,
java.util.EnumSet<org.apache.hadoop.fs.XAttrSetFlag> flag) |
|
NamenodeCommand |
startCheckpoint(NamenodeRegistration registration) |
A request to the active name-node to start a checkpoint.
|
void |
startReconfiguration() |
|
void |
transitionToActive(org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo req) |
|
void |
transitionToObserver(org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo req) |
|
void |
transitionToStandby(org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo req) |
|
boolean |
truncate(java.lang.String src,
long newLength,
java.lang.String clientName) |
|
void |
unsetErasureCodingPolicy(java.lang.String src) |
|
void |
unsetStoragePolicy(java.lang.String src) |
|
org.apache.hadoop.hdfs.protocol.LocatedBlock |
updateBlockForPipeline(org.apache.hadoop.hdfs.protocol.ExtendedBlock block,
java.lang.String clientName) |
|
void |
updatePipeline(java.lang.String clientName,
org.apache.hadoop.hdfs.protocol.ExtendedBlock oldBlock,
org.apache.hadoop.hdfs.protocol.ExtendedBlock newBlock,
org.apache.hadoop.hdfs.protocol.DatanodeID[] newNodes,
java.lang.String[] newStorageIDs) |
|
boolean |
upgradeStatus() |
|
NamespaceInfo |
versionRequest() |
Request name-node version and storage information.
|
protected final FSNamesystem namesystem
protected final NameNode nn
protected final org.apache.hadoop.ipc.RPC.Server clientRpcServer
protected final java.net.InetSocketAddress clientRpcAddress
public NameNodeRpcServer(org.apache.hadoop.conf.Configuration conf,
NameNode nn)
throws java.io.IOException
java.io.IOException@VisibleForTesting public org.apache.hadoop.ipc.RPC.Server getClientRpcServer()
@VisibleForTesting public java.net.InetSocketAddress getRpcAddress()
@VisibleForTesting public java.util.Set<java.net.InetSocketAddress> getAuxiliaryRpcAddresses()
public BlocksWithLocations getBlocks(org.apache.hadoop.hdfs.protocol.DatanodeInfo datanode, long size, long minBlockSize, long timeInterval, org.apache.hadoop.fs.StorageType storageType) throws java.io.IOException
NamenodeProtocoldatanode
whose total size equals size.getBlocks in interface NamenodeProtocoldatanode - a data nodesize - requested sizeminBlockSize - each block should be of this minimum Block SizetimeInterval - prefer to get blocks which are belong to
the cold files accessed before the time intervalstorageType - the given storage type StorageTypejava.io.IOException - if size is less than or equal to 0 or
datanode does not existBalancerpublic ExportedBlockKeys getBlockKeys() throws java.io.IOException
NamenodeProtocolgetBlockKeys in interface NamenodeProtocoljava.io.IOExceptionpublic void errorReport(NamenodeRegistration registration, int errorCode, java.lang.String msg) throws java.io.IOException
NamenodeProtocolerrorReport in interface NamenodeProtocolregistration - requesting node.errorCode - indicates the errormsg - free text description of the errorjava.io.IOExceptionpublic NamenodeRegistration registerSubordinateNamenode(NamenodeRegistration registration) throws java.io.IOException
NamenodeProtocolregisterSubordinateNamenode in interface NamenodeProtocolNamenodeRegistration of the node,
which this node has just registered with.java.io.IOExceptionpublic NamenodeCommand startCheckpoint(NamenodeRegistration registration) throws java.io.IOException
NamenodeProtocolstartCheckpoint in interface NamenodeProtocolregistration - the requesting nodeCheckpointCommand if checkpoint is allowed.java.io.IOExceptionCheckpointCommand,
NamenodeCommand,
NamenodeProtocol.ACT_SHUTDOWNpublic void endCheckpoint(NamenodeRegistration registration, CheckpointSignature sig) throws java.io.IOException
NamenodeProtocolendCheckpoint in interface NamenodeProtocolregistration - the requesting nodesig - CheckpointSignature which identifies the checkpoint.java.io.IOExceptionpublic org.apache.hadoop.security.token.Token<org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier> getDelegationToken(org.apache.hadoop.io.Text renewer)
throws java.io.IOException
getDelegationToken in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic long renewDelegationToken(org.apache.hadoop.security.token.Token<org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier> token)
throws org.apache.hadoop.security.token.SecretManager.InvalidToken,
java.io.IOException
renewDelegationToken in interface org.apache.hadoop.hdfs.protocol.ClientProtocolorg.apache.hadoop.security.token.SecretManager.InvalidTokenjava.io.IOExceptionpublic void cancelDelegationToken(org.apache.hadoop.security.token.Token<org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier> token)
throws java.io.IOException
cancelDelegationToken in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.LocatedBlocks getBlockLocations(java.lang.String src,
long offset,
long length)
throws java.io.IOException
getBlockLocations in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.fs.FsServerDefaults getServerDefaults()
throws java.io.IOException
getServerDefaults in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.HdfsFileStatus create(java.lang.String src,
org.apache.hadoop.fs.permission.FsPermission masked,
java.lang.String clientName,
org.apache.hadoop.io.EnumSetWritable<org.apache.hadoop.fs.CreateFlag> flag,
boolean createParent,
short replication,
long blockSize,
org.apache.hadoop.crypto.CryptoProtocolVersion[] supportedVersions,
java.lang.String ecPolicyName,
java.lang.String storagePolicy)
throws java.io.IOException
create in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.LastBlockWithStatus append(java.lang.String src,
java.lang.String clientName,
org.apache.hadoop.io.EnumSetWritable<org.apache.hadoop.fs.CreateFlag> flag)
throws java.io.IOException
append in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic boolean recoverLease(java.lang.String src,
java.lang.String clientName)
throws java.io.IOException
recoverLease in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic boolean setReplication(java.lang.String src,
short replication)
throws java.io.IOException
setReplication in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void unsetStoragePolicy(java.lang.String src)
throws java.io.IOException
unsetStoragePolicy in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void setStoragePolicy(java.lang.String src,
java.lang.String policyName)
throws java.io.IOException
setStoragePolicy in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.BlockStoragePolicy getStoragePolicy(java.lang.String path)
throws java.io.IOException
getStoragePolicy in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.BlockStoragePolicy[] getStoragePolicies()
throws java.io.IOException
getStoragePolicies in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void setPermission(java.lang.String src,
org.apache.hadoop.fs.permission.FsPermission permissions)
throws java.io.IOException
setPermission in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void setOwner(java.lang.String src,
java.lang.String username,
java.lang.String groupname)
throws java.io.IOException
setOwner in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.LocatedBlock addBlock(java.lang.String src,
java.lang.String clientName,
org.apache.hadoop.hdfs.protocol.ExtendedBlock previous,
org.apache.hadoop.hdfs.protocol.DatanodeInfo[] excludedNodes,
long fileId,
java.lang.String[] favoredNodes,
java.util.EnumSet<org.apache.hadoop.hdfs.AddBlockFlag> addBlockFlags)
throws java.io.IOException
addBlock in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.LocatedBlock getAdditionalDatanode(java.lang.String src,
long fileId,
org.apache.hadoop.hdfs.protocol.ExtendedBlock blk,
org.apache.hadoop.hdfs.protocol.DatanodeInfo[] existings,
java.lang.String[] existingStorageIDs,
org.apache.hadoop.hdfs.protocol.DatanodeInfo[] excludes,
int numAdditionalNodes,
java.lang.String clientName)
throws java.io.IOException
getAdditionalDatanode in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void abandonBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock b,
long fileId,
java.lang.String src,
java.lang.String holder)
throws java.io.IOException
abandonBlock in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic boolean complete(java.lang.String src,
java.lang.String clientName,
org.apache.hadoop.hdfs.protocol.ExtendedBlock last,
long fileId)
throws java.io.IOException
complete in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void reportBadBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlock[] blocks)
throws java.io.IOException
reportBadBlocks in interface org.apache.hadoop.hdfs.protocol.ClientProtocolreportBadBlocks in interface DatanodeProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.LocatedBlock updateBlockForPipeline(org.apache.hadoop.hdfs.protocol.ExtendedBlock block,
java.lang.String clientName)
throws java.io.IOException
updateBlockForPipeline in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void updatePipeline(java.lang.String clientName,
org.apache.hadoop.hdfs.protocol.ExtendedBlock oldBlock,
org.apache.hadoop.hdfs.protocol.ExtendedBlock newBlock,
org.apache.hadoop.hdfs.protocol.DatanodeID[] newNodes,
java.lang.String[] newStorageIDs)
throws java.io.IOException
updatePipeline in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void commitBlockSynchronization(org.apache.hadoop.hdfs.protocol.ExtendedBlock block,
long newgenerationstamp,
long newlength,
boolean closeFile,
boolean deleteblock,
org.apache.hadoop.hdfs.protocol.DatanodeID[] newtargets,
java.lang.String[] newtargetstorages)
throws java.io.IOException
DatanodeProtocolcommitBlockSynchronization in interface DatanodeProtocoljava.io.IOExceptionpublic long getPreferredBlockSize(java.lang.String filename)
throws java.io.IOException
getPreferredBlockSize in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOException@Deprecated
public boolean rename(java.lang.String src,
java.lang.String dst)
throws java.io.IOException
rename in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void concat(java.lang.String trg,
java.lang.String[] src)
throws java.io.IOException
concat in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void rename2(java.lang.String src,
java.lang.String dst,
org.apache.hadoop.fs.Options.Rename... options)
throws java.io.IOException
rename2 in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic boolean truncate(java.lang.String src,
long newLength,
java.lang.String clientName)
throws java.io.IOException
truncate in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic boolean delete(java.lang.String src,
boolean recursive)
throws java.io.IOException
delete in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic boolean mkdirs(java.lang.String src,
org.apache.hadoop.fs.permission.FsPermission masked,
boolean createParent)
throws java.io.IOException
mkdirs in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void renewLease(java.lang.String clientName,
java.util.List<java.lang.String> namespaces)
throws java.io.IOException
renewLease in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.DirectoryListing getListing(java.lang.String src,
byte[] startAfter,
boolean needLocation)
throws java.io.IOException
getListing in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.BatchedDirectoryListing getBatchedListing(java.lang.String[] srcs,
byte[] startAfter,
boolean needLocation)
throws java.io.IOException
getBatchedListing in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.HdfsFileStatus getFileInfo(java.lang.String src)
throws java.io.IOException
getFileInfo in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus getLocatedFileInfo(java.lang.String src,
boolean needBlockToken)
throws java.io.IOException
getLocatedFileInfo in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic boolean isFileClosed(java.lang.String src)
throws java.io.IOException
isFileClosed in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.HdfsFileStatus getFileLinkInfo(java.lang.String src)
throws java.io.IOException
getFileLinkInfo in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic long[] getStats()
throws java.io.IOException
getStats in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.ReplicatedBlockStats getReplicatedBlockStats()
throws java.io.IOException
getReplicatedBlockStats in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.ECBlockGroupStats getECBlockGroupStats()
throws java.io.IOException
getECBlockGroupStats in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.DatanodeInfo[] getDatanodeReport(org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType type)
throws java.io.IOException
getDatanodeReport in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport[] getDatanodeStorageReport(org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType type)
throws java.io.IOException
getDatanodeStorageReport in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic boolean setSafeMode(org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction action,
boolean isChecked)
throws java.io.IOException
setSafeMode in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic boolean restoreFailedStorage(java.lang.String arg)
throws java.io.IOException
restoreFailedStorage in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic boolean saveNamespace(long timeWindow,
long txGap)
throws java.io.IOException
saveNamespace in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic long rollEdits()
throws org.apache.hadoop.security.AccessControlException,
java.io.IOException
rollEdits in interface org.apache.hadoop.hdfs.protocol.ClientProtocolorg.apache.hadoop.security.AccessControlExceptionjava.io.IOExceptionpublic void refreshNodes()
throws java.io.IOException
refreshNodes in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic long getTransactionID()
throws java.io.IOException
getTransactionID in interface NamenodeProtocoljava.io.IOExceptionpublic long getMostRecentCheckpointTxId()
throws java.io.IOException
NamenodeProtocolgetMostRecentCheckpointTxId in interface NamenodeProtocoljava.io.IOExceptionpublic CheckpointSignature rollEditLog() throws java.io.IOException
NamenodeProtocolrollEditLog in interface NamenodeProtocoljava.io.IOExceptionpublic RemoteEditLogManifest getEditLogManifest(long sinceTxId) throws java.io.IOException
NamenodeProtocolgetEditLogManifest in interface NamenodeProtocolsinceTxId - return only logs that contain transactions >=
sinceTxIdjava.io.IOExceptionpublic boolean isUpgradeFinalized()
throws java.io.IOException
isUpgradeFinalized in interface NamenodeProtocoljava.io.IOExceptionpublic boolean isRollingUpgrade()
throws java.io.IOException
NamenodeProtocolisRollingUpgrade in interface NamenodeProtocoljava.io.IOExceptionpublic void finalizeUpgrade()
throws java.io.IOException
finalizeUpgrade in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic boolean upgradeStatus()
throws java.io.IOException
upgradeStatus in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo rollingUpgrade(org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction action)
throws java.io.IOException
rollingUpgrade in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void metaSave(java.lang.String filename)
throws java.io.IOException
metaSave in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOException@Deprecated
public org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries<org.apache.hadoop.hdfs.protocol.OpenFileEntry> listOpenFiles(long prevId)
throws java.io.IOException
listOpenFiles in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries<org.apache.hadoop.hdfs.protocol.OpenFileEntry> listOpenFiles(long prevId,
java.util.EnumSet<org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType> openFilesTypes,
java.lang.String path)
throws java.io.IOException
listOpenFiles in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void msync()
throws java.io.IOException
msync in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.ha.HAServiceProtocol.HAServiceState getHAServiceState()
throws java.io.IOException
getHAServiceState in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.CorruptFileBlocks listCorruptFileBlocks(java.lang.String path,
java.lang.String cookie)
throws java.io.IOException
listCorruptFileBlocks in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void setBalancerBandwidth(long bandwidth)
throws java.io.IOException
setBalancerBandwidth in interface org.apache.hadoop.hdfs.protocol.ClientProtocolbandwidth - Balancer bandwidth in bytes per second for all datanodes.java.io.IOExceptionpublic org.apache.hadoop.fs.ContentSummary getContentSummary(java.lang.String path)
throws java.io.IOException
getContentSummary in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.fs.QuotaUsage getQuotaUsage(java.lang.String path)
throws java.io.IOException
getQuotaUsage in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void satisfyStoragePolicy(java.lang.String src)
throws java.io.IOException
satisfyStoragePolicy in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.DatanodeInfo[] getSlowDatanodeReport()
throws java.io.IOException
getSlowDatanodeReport in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void setQuota(java.lang.String path,
long namespaceQuota,
long storagespaceQuota,
org.apache.hadoop.fs.StorageType type)
throws java.io.IOException
setQuota in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void fsync(java.lang.String src,
long fileId,
java.lang.String clientName,
long lastBlockLength)
throws java.io.IOException
fsync in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void setTimes(java.lang.String src,
long mtime,
long atime)
throws java.io.IOException
setTimes in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void createSymlink(java.lang.String target,
java.lang.String link,
org.apache.hadoop.fs.permission.FsPermission dirPerms,
boolean createParent)
throws java.io.IOException
createSymlink in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic java.lang.String getLinkTarget(java.lang.String path)
throws java.io.IOException
getLinkTarget in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic DatanodeRegistration registerDatanode(DatanodeRegistration nodeReg) throws java.io.IOException
DatanodeProtocolregisterDatanode in interface DatanodeProtocolnodeReg - datanode registration informationDatanodeRegistration with
updated registration informationjava.io.IOExceptionFSNamesystem.registerDatanode(DatanodeRegistration)public HeartbeatResponse sendHeartbeat(DatanodeRegistration nodeReg, org.apache.hadoop.hdfs.server.protocol.StorageReport[] report, long dnCacheCapacity, long dnCacheUsed, int xmitsInProgress, int xceiverCount, int failedVolumes, VolumeFailureSummary volumeFailureSummary, boolean requestFullBlockReportLease, @Nonnull org.apache.hadoop.hdfs.server.protocol.SlowPeerReports slowPeers, @Nonnull org.apache.hadoop.hdfs.server.protocol.SlowDiskReports slowDisks) throws java.io.IOException
DatanodeProtocolsendHeartbeat in interface DatanodeProtocolnodeReg - datanode registration information.report - utilization report per storage.dnCacheCapacity - the total cache capacity of the datanode (in bytes).dnCacheUsed - the amount of cache used by the datanode (in bytes).xmitsInProgress - number of transfers from this datanode to others.xceiverCount - number of active transceiver threads.failedVolumes - number of failed volumes.volumeFailureSummary - info about volume failures.requestFullBlockReportLease - whether to request a full block
report lease.slowPeers - Details of peer DataNodes that were detected as being
slow to respond to packet writes. Empty report if no
slow peers were detected by the DataNode.slowDisks - Details of disks on DataNodes that were detected as
being slow. Empty report if no slow disks were detected.java.io.IOException - on error.public DatanodeCommand blockReport(DatanodeRegistration nodeReg, java.lang.String poolId, StorageBlockReport[] reports, BlockReportContext context) throws java.io.IOException
DatanodeProtocolblockReport in interface DatanodeProtocolnodeReg - datanode registrationpoolId - the block pool ID for the blocksreports - report of blocks per storage
Each finalized block is represented as 3 longs. Each under-
construction replica is represented as 4 longs.
This is done instead of Block[] to reduce memory used by block reports.context - Context information for this block report.java.io.IOExceptionpublic DatanodeCommand cacheReport(DatanodeRegistration nodeReg, java.lang.String poolId, java.util.List<java.lang.Long> blockIds) throws java.io.IOException
DatanodeProtocolDatanodeProtocol.blockReport(DatanodeRegistration, String, StorageBlockReport[], BlockReportContext),
which is used to communicated blocks stored on disk.cacheReport in interface DatanodeProtocolnodeReg - The datanode registration.poolId - The block pool ID for the blocks.blockIds - A list of block IDs.java.io.IOExceptionpublic void blockReceivedAndDeleted(DatanodeRegistration nodeReg, java.lang.String poolId, StorageReceivedDeletedBlocks[] receivedAndDeletedBlocks) throws java.io.IOException
DatanodeProtocolblockReceivedAndDeleted in interface DatanodeProtocoljava.io.IOExceptionpublic void errorReport(DatanodeRegistration nodeReg, int errorCode, java.lang.String msg) throws java.io.IOException
DatanodeProtocolerrorReport in interface DatanodeProtocoljava.io.IOExceptionpublic NamespaceInfo versionRequest() throws java.io.IOException
NamenodeProtocolversionRequest in interface DatanodeProtocolversionRequest in interface NamenodeProtocolNamespaceInfo identifying versions and storage information
of the name-nodejava.io.IOExceptionpublic void sendLifeline(DatanodeRegistration nodeReg, org.apache.hadoop.hdfs.server.protocol.StorageReport[] report, long dnCacheCapacity, long dnCacheUsed, int xmitsInProgress, int xceiverCount, int failedVolumes, VolumeFailureSummary volumeFailureSummary) throws java.io.IOException
sendLifeline in interface DatanodeLifelineProtocoljava.io.IOExceptionpublic void refreshServiceAcl()
throws java.io.IOException
refreshServiceAcl in interface org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocoljava.io.IOExceptionpublic void refreshUserToGroupsMappings()
throws java.io.IOException
refreshUserToGroupsMappings in interface org.apache.hadoop.security.RefreshUserMappingsProtocoljava.io.IOExceptionpublic void refreshSuperUserGroupsConfiguration()
throws java.io.IOException
refreshSuperUserGroupsConfiguration in interface org.apache.hadoop.security.RefreshUserMappingsProtocoljava.io.IOExceptionpublic void refreshCallQueue()
throws java.io.IOException
refreshCallQueue in interface org.apache.hadoop.ipc.RefreshCallQueueProtocoljava.io.IOExceptionpublic java.util.Collection<org.apache.hadoop.ipc.RefreshResponse> refresh(java.lang.String identifier,
java.lang.String[] args)
refresh in interface org.apache.hadoop.ipc.GenericRefreshProtocolpublic java.lang.String[] getGroupsForUser(java.lang.String user)
throws java.io.IOException
getGroupsForUser in interface org.apache.hadoop.tools.GetUserMappingsProtocoljava.io.IOExceptionpublic void monitorHealth()
throws org.apache.hadoop.ha.HealthCheckFailedException,
org.apache.hadoop.security.AccessControlException,
java.io.IOException
monitorHealth in interface org.apache.hadoop.ha.HAServiceProtocolorg.apache.hadoop.ha.HealthCheckFailedExceptionorg.apache.hadoop.security.AccessControlExceptionjava.io.IOExceptionpublic void transitionToActive(org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo req)
throws org.apache.hadoop.ha.ServiceFailedException,
org.apache.hadoop.security.AccessControlException,
java.io.IOException
transitionToActive in interface org.apache.hadoop.ha.HAServiceProtocolorg.apache.hadoop.ha.ServiceFailedExceptionorg.apache.hadoop.security.AccessControlExceptionjava.io.IOExceptionpublic void transitionToStandby(org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo req)
throws org.apache.hadoop.ha.ServiceFailedException,
org.apache.hadoop.security.AccessControlException,
java.io.IOException
transitionToStandby in interface org.apache.hadoop.ha.HAServiceProtocolorg.apache.hadoop.ha.ServiceFailedExceptionorg.apache.hadoop.security.AccessControlExceptionjava.io.IOExceptionpublic void transitionToObserver(org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo req)
throws org.apache.hadoop.ha.ServiceFailedException,
org.apache.hadoop.security.AccessControlException,
java.io.IOException
transitionToObserver in interface org.apache.hadoop.ha.HAServiceProtocolorg.apache.hadoop.ha.ServiceFailedExceptionorg.apache.hadoop.security.AccessControlExceptionjava.io.IOExceptionpublic org.apache.hadoop.ha.HAServiceStatus getServiceStatus()
throws org.apache.hadoop.security.AccessControlException,
org.apache.hadoop.ha.ServiceFailedException,
java.io.IOException
getServiceStatus in interface org.apache.hadoop.ha.HAServiceProtocolorg.apache.hadoop.security.AccessControlExceptionorg.apache.hadoop.ha.ServiceFailedExceptionjava.io.IOExceptionpublic org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey getDataEncryptionKey()
throws java.io.IOException
getDataEncryptionKey in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic java.lang.String createSnapshot(java.lang.String snapshotRoot,
java.lang.String snapshotName)
throws java.io.IOException
createSnapshot in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void deleteSnapshot(java.lang.String snapshotRoot,
java.lang.String snapshotName)
throws java.io.IOException
deleteSnapshot in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void allowSnapshot(java.lang.String snapshotRoot)
throws java.io.IOException
allowSnapshot in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void disallowSnapshot(java.lang.String snapshot)
throws java.io.IOException
disallowSnapshot in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void renameSnapshot(java.lang.String snapshotRoot,
java.lang.String snapshotOldName,
java.lang.String snapshotNewName)
throws java.io.IOException
renameSnapshot in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus[] getSnapshottableDirListing()
throws java.io.IOException
getSnapshottableDirListing in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.SnapshotStatus[] getSnapshotListing(java.lang.String snapshotRoot)
throws java.io.IOException
getSnapshotListing in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.SnapshotDiffReport getSnapshotDiffReport(java.lang.String snapshotRoot,
java.lang.String earlierSnapshotName,
java.lang.String laterSnapshotName)
throws java.io.IOException
getSnapshotDiffReport in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing getSnapshotDiffReportListing(java.lang.String snapshotRoot,
java.lang.String earlierSnapshotName,
java.lang.String laterSnapshotName,
byte[] startPath,
int index)
throws java.io.IOException
getSnapshotDiffReportListing in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic long addCacheDirective(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo path,
java.util.EnumSet<org.apache.hadoop.fs.CacheFlag> flags)
throws java.io.IOException
addCacheDirective in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void modifyCacheDirective(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo directive,
java.util.EnumSet<org.apache.hadoop.fs.CacheFlag> flags)
throws java.io.IOException
modifyCacheDirective in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void removeCacheDirective(long id)
throws java.io.IOException
removeCacheDirective in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries<org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry> listCacheDirectives(long prevId,
org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo filter)
throws java.io.IOException
listCacheDirectives in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void addCachePool(org.apache.hadoop.hdfs.protocol.CachePoolInfo info)
throws java.io.IOException
addCachePool in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void modifyCachePool(org.apache.hadoop.hdfs.protocol.CachePoolInfo info)
throws java.io.IOException
modifyCachePool in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void removeCachePool(java.lang.String cachePoolName)
throws java.io.IOException
removeCachePool in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries<org.apache.hadoop.hdfs.protocol.CachePoolEntry> listCachePools(java.lang.String prevKey)
throws java.io.IOException
listCachePools in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void modifyAclEntries(java.lang.String src,
java.util.List<org.apache.hadoop.fs.permission.AclEntry> aclSpec)
throws java.io.IOException
modifyAclEntries in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void removeAclEntries(java.lang.String src,
java.util.List<org.apache.hadoop.fs.permission.AclEntry> aclSpec)
throws java.io.IOException
removeAclEntries in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void removeDefaultAcl(java.lang.String src)
throws java.io.IOException
removeDefaultAcl in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void removeAcl(java.lang.String src)
throws java.io.IOException
removeAcl in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void setAcl(java.lang.String src,
java.util.List<org.apache.hadoop.fs.permission.AclEntry> aclSpec)
throws java.io.IOException
setAcl in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.fs.permission.AclStatus getAclStatus(java.lang.String src)
throws java.io.IOException
getAclStatus in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void createEncryptionZone(java.lang.String src,
java.lang.String keyName)
throws java.io.IOException
createEncryptionZone in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.EncryptionZone getEZForPath(java.lang.String src)
throws java.io.IOException
getEZForPath in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries<org.apache.hadoop.hdfs.protocol.EncryptionZone> listEncryptionZones(long prevId)
throws java.io.IOException
listEncryptionZones in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void reencryptEncryptionZone(java.lang.String zone,
org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction action)
throws java.io.IOException
reencryptEncryptionZone in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries<org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus> listReencryptionStatus(long prevId)
throws java.io.IOException
listReencryptionStatus in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void setErasureCodingPolicy(java.lang.String src,
java.lang.String ecPolicyName)
throws java.io.IOException
setErasureCodingPolicy in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void setXAttr(java.lang.String src,
org.apache.hadoop.fs.XAttr xAttr,
java.util.EnumSet<org.apache.hadoop.fs.XAttrSetFlag> flag)
throws java.io.IOException
setXAttr in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic java.util.List<org.apache.hadoop.fs.XAttr> getXAttrs(java.lang.String src,
java.util.List<org.apache.hadoop.fs.XAttr> xAttrs)
throws java.io.IOException
getXAttrs in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic java.util.List<org.apache.hadoop.fs.XAttr> listXAttrs(java.lang.String src)
throws java.io.IOException
listXAttrs in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void removeXAttr(java.lang.String src,
org.apache.hadoop.fs.XAttr xAttr)
throws java.io.IOException
removeXAttr in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void checkAccess(java.lang.String path,
org.apache.hadoop.fs.permission.FsAction mode)
throws java.io.IOException
checkAccess in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic long getCurrentEditLogTxid()
throws java.io.IOException
getCurrentEditLogTxid in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.inotify.EventBatchList getEditsFromTxid(long txid)
throws java.io.IOException
getEditsFromTxid in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo[] getErasureCodingPolicies()
throws java.io.IOException
getErasureCodingPolicies in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic java.util.Map<java.lang.String,java.lang.String> getErasureCodingCodecs()
throws java.io.IOException
getErasureCodingCodecs in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy getErasureCodingPolicy(java.lang.String src)
throws java.io.IOException
getErasureCodingPolicy in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void unsetErasureCodingPolicy(java.lang.String src)
throws java.io.IOException
unsetErasureCodingPolicy in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.ECTopologyVerifierResult getECTopologyResultForPolicies(java.lang.String... policyNames)
throws java.io.IOException
getECTopologyResultForPolicies in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse[] addErasureCodingPolicies(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy[] policies)
throws java.io.IOException
addErasureCodingPolicies in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void removeErasureCodingPolicy(java.lang.String ecPolicyName)
throws java.io.IOException
removeErasureCodingPolicy in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void enableErasureCodingPolicy(java.lang.String ecPolicyName)
throws java.io.IOException
enableErasureCodingPolicy in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void disableErasureCodingPolicy(java.lang.String ecPolicyName)
throws java.io.IOException
disableErasureCodingPolicy in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionpublic void startReconfiguration()
throws java.io.IOException
startReconfiguration in interface org.apache.hadoop.hdfs.protocol.ReconfigurationProtocoljava.io.IOExceptionpublic org.apache.hadoop.conf.ReconfigurationTaskStatus getReconfigurationStatus()
throws java.io.IOException
getReconfigurationStatus in interface org.apache.hadoop.hdfs.protocol.ReconfigurationProtocoljava.io.IOExceptionpublic java.util.List<java.lang.String> listReconfigurableProperties()
throws java.io.IOException
listReconfigurableProperties in interface org.apache.hadoop.hdfs.protocol.ReconfigurationProtocoljava.io.IOExceptionpublic java.lang.Long getNextSPSPath()
throws java.io.IOException
getNextSPSPath in interface NamenodeProtocoljava.io.IOExceptionpublic org.apache.hadoop.fs.Path getEnclosingRoot(java.lang.String src)
throws java.io.IOException
getEnclosingRoot in interface org.apache.hadoop.hdfs.protocol.ClientProtocoljava.io.IOExceptionCopyright © 2008–2025 Apache Software Foundation. All rights reserved.