| Package | Description |
|---|---|
| org.apache.hadoop.hdfs.server.blockmanagement | |
| org.apache.hadoop.hdfs.server.namenode |
| Modifier and Type | Class | Description |
|---|---|---|
static class |
ProvidedStorageMap.ProvidedDescriptor |
An abstract DatanodeDescriptor to track datanodes with provided storages.
|
| Modifier and Type | Field | Description |
|---|---|---|
static DatanodeDescriptor[] |
DatanodeDescriptor.EMPTY_ARRAY |
| Modifier and Type | Method | Description |
|---|---|---|
protected DatanodeDescriptor |
AvailableSpaceBlockPlacementPolicy.chooseDataNode(java.lang.String scope,
java.util.Collection<org.apache.hadoop.net.Node> excludedNode) |
|
protected DatanodeDescriptor |
AvailableSpaceBlockPlacementPolicy.chooseDataNode(java.lang.String scope,
java.util.Collection<org.apache.hadoop.net.Node> excludedNode,
org.apache.hadoop.fs.StorageType type) |
|
protected DatanodeDescriptor |
AvailableSpaceRackFaultTolerantBlockPlacementPolicy.chooseDataNode(java.lang.String scope,
java.util.Collection<org.apache.hadoop.net.Node> excludedNode) |
|
protected DatanodeDescriptor |
AvailableSpaceRackFaultTolerantBlockPlacementPolicy.chooseDataNode(java.lang.String scope,
java.util.Collection<org.apache.hadoop.net.Node> excludedNode,
org.apache.hadoop.fs.StorageType type) |
|
protected DatanodeDescriptor |
BlockPlacementPolicyDefault.chooseDataNode(java.lang.String scope,
java.util.Collection<org.apache.hadoop.net.Node> excludedNodes) |
Choose a datanode from the given scope.
|
protected DatanodeDescriptor |
BlockPlacementPolicyDefault.chooseDataNode(java.lang.String scope,
java.util.Collection<org.apache.hadoop.net.Node> excludedNodes,
org.apache.hadoop.fs.StorageType type) |
Choose a datanode from the given scope with specified
storage type.
|
DatanodeDescriptor |
ProvidedStorageMap.chooseProvidedDatanode() |
Choose a datanode that reported a volume of
StorageType PROVIDED. |
DatanodeDescriptor |
BlockInfo.getDatanode(int index) |
|
DatanodeDescriptor |
DatanodeDescriptor.CachedBlocksList.getDatanode() |
|
DatanodeDescriptor |
DatanodeManager.getDatanode(java.lang.String datanodeUuid) |
Get a datanode descriptor given corresponding DatanodeUUID
|
DatanodeDescriptor |
DatanodeManager.getDatanode(org.apache.hadoop.hdfs.protocol.DatanodeID nodeID) |
Get data node by datanode ID.
|
DatanodeDescriptor |
DatanodeManager.getDatanodeByHost(java.lang.String host) |
|
DatanodeDescriptor |
DatanodeManager.getDatanodeByXferAddr(java.lang.String host,
int xferPort) |
|
DatanodeDescriptor |
DatanodeStorageInfo.getDatanodeDescriptor() |
| Modifier and Type | Method | Description |
|---|---|---|
java.util.List<DatanodeDescriptor> |
DatanodeManager.getAllSlowDataNodes() |
|
java.util.Queue<DatanodeDescriptor> |
DatanodeAdminMonitorBase.getCancelledNodes() |
|
java.util.Queue<DatanodeDescriptor> |
DatanodeAdminMonitorInterface.getCancelledNodes() |
|
java.util.Collection<DatanodeDescriptor> |
BlockManager.getCorruptReplicas(org.apache.hadoop.hdfs.protocol.Block block) |
Get the replicas which are corrupt for a given block.
|
java.util.List<DatanodeDescriptor> |
DatanodeManager.getDatanodeListForReport(org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType type) |
For generating datanode reports
|
java.util.Map<java.lang.String,DatanodeDescriptor> |
DatanodeManager.getDatanodeMap() |
|
java.util.Set<DatanodeDescriptor> |
DatanodeManager.getDatanodes() |
|
java.util.List<DatanodeDescriptor> |
DatanodeManager.getDecommissioningNodes() |
|
java.util.List<DatanodeDescriptor> |
DatanodeManager.getEnteringMaintenanceNodes() |
|
java.util.Queue<DatanodeDescriptor> |
DatanodeAdminManager.getPendingNodes() |
|
java.util.Queue<DatanodeDescriptor> |
DatanodeAdminMonitorBase.getPendingNodes() |
|
java.util.Queue<DatanodeDescriptor> |
DatanodeAdminMonitorInterface.getPendingNodes() |
| Modifier and Type | Method | Description |
|---|---|---|
protected int |
BlockPlacementPolicyDefault.addToExcludedNodes(DatanodeDescriptor localMachine,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes) |
Add localMachine and related nodes to excludedNodes
for next replica choosing.
|
protected int |
BlockPlacementPolicyWithNodeGroup.addToExcludedNodes(DatanodeDescriptor chosenNode,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes) |
Find other nodes in the same nodegroup of localMachine and add them
into excludeNodes as replica should not be duplicated for nodes
within the same nodegroup
|
protected void |
BlockPlacementPolicyDefault.chooseRemoteRack(int numOfReplicas,
DatanodeDescriptor localMachine,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
int maxReplicasPerRack,
java.util.List<DatanodeStorageInfo> results,
boolean avoidStaleNodes,
java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes) |
Choose numOfReplicas nodes from the racks
that localMachine is NOT on.
|
protected void |
BlockPlacementPolicyWithNodeGroup.chooseRemoteRack(int numOfReplicas,
DatanodeDescriptor localMachine,
java.util.Set<org.apache.hadoop.net.Node> excludedNodes,
long blocksize,
int maxReplicasPerRack,
java.util.List<DatanodeStorageInfo> results,
boolean avoidStaleNodes,
java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes) |
|
abstract java.util.List<DatanodeStorageInfo> |
BlockPlacementPolicy.chooseReplicasToDelete(java.util.Collection<DatanodeStorageInfo> availableReplicas,
java.util.Collection<DatanodeStorageInfo> delCandidates,
int expectedNumOfReplicas,
java.util.List<org.apache.hadoop.fs.StorageType> excessTypes,
DatanodeDescriptor addedNode,
DatanodeDescriptor delNodeHint) |
Select the excess replica storages for deletion based on either
delNodehint/Excess storage types.
|
java.util.List<DatanodeStorageInfo> |
BlockPlacementPolicyDefault.chooseReplicasToDelete(java.util.Collection<DatanodeStorageInfo> availableReplicas,
java.util.Collection<DatanodeStorageInfo> delCandidates,
int expectedNumOfReplicas,
java.util.List<org.apache.hadoop.fs.StorageType> excessTypes,
DatanodeDescriptor addedNode,
DatanodeDescriptor delNodeHint) |
|
DatanodeStorageInfo[] |
BlockManager.chooseTarget4WebHDFS(java.lang.String src,
DatanodeDescriptor clientnode,
java.util.Set<org.apache.hadoop.net.Node> excludes,
long blocksize) |
Choose target for WebHDFS redirection.
|
protected int |
AvailableSpaceBlockPlacementPolicy.compareDataNode(DatanodeDescriptor a,
DatanodeDescriptor b,
boolean isBalanceLocal) |
Compare the two data nodes.
|
protected int |
AvailableSpaceRackFaultTolerantBlockPlacementPolicy.compareDataNode(DatanodeDescriptor a,
DatanodeDescriptor b) |
Compare the two data nodes.
|
java.lang.String |
BlockManager.getCorruptReason(org.apache.hadoop.hdfs.protocol.Block block,
DatanodeDescriptor node) |
Get reason for certain corrupted replicas for a given block and a given dn.
|
boolean |
BlockManager.isExcess(DatanodeDescriptor dn,
BlockInfo blk) |
|
protected boolean |
BlockPlacementPolicyWithUpgradeDomain.isGoodDatanode(DatanodeDescriptor node,
int maxTargetPerRack,
boolean considerLoad,
java.util.List<DatanodeStorageInfo> results,
boolean avoidStaleNodes) |
|
protected void |
DatanodeAdminManager.logBlockReplicationInfo(BlockInfo block,
BlockCollection bc,
DatanodeDescriptor srcNode,
NumberReplicas num,
java.lang.Iterable<DatanodeStorageInfo> storages) |
|
void |
BlockManagerFaultInjector.removeBlockReportLease(DatanodeDescriptor node,
long leaseId) |
|
void |
ProvidedStorageMap.removeDatanode(DatanodeDescriptor dnToRemove) |
|
void |
BlockManager.removeStoredBlock(BlockInfo storedBlock,
DatanodeDescriptor node) |
Modify (block-->datanode) map.
|
void |
BlockManagerFaultInjector.requestBlockReportLease(DatanodeDescriptor node,
long leaseId) |
|
protected void |
DatanodeAdminManager.setDecommissioned(DatanodeDescriptor dn) |
|
protected void |
DatanodeAdminManager.setInMaintenance(DatanodeDescriptor dn) |
|
void |
DatanodeAdminManager.startDecommission(DatanodeDescriptor node) |
Start decommissioning the specified datanode.
|
void |
DatanodeAdminManager.startMaintenance(DatanodeDescriptor node,
long maintenanceExpireTimeInMS) |
Start maintenance of the specified datanode.
|
void |
DatanodeAdminMonitorBase.startTrackingNode(DatanodeDescriptor dn) |
Start tracking a node for decommission or maintenance.
|
void |
DatanodeAdminMonitorInterface.startTrackingNode(DatanodeDescriptor dn) |
|
void |
DatanodeAdminManager.stopDecommission(DatanodeDescriptor node) |
Stop decommissioning the specified datanode.
|
void |
DatanodeAdminManager.stopMaintenance(DatanodeDescriptor node) |
Stop maintenance of the specified datanode.
|
void |
DatanodeAdminBackoffMonitor.stopTrackingNode(DatanodeDescriptor dn) |
Queue a node to be removed from tracking.
|
void |
DatanodeAdminDefaultMonitor.stopTrackingNode(DatanodeDescriptor dn) |
|
void |
DatanodeAdminMonitorInterface.stopTrackingNode(DatanodeDescriptor dn) |
|
void |
ProvidedStorageMap.updateStorage(DatanodeDescriptor node,
org.apache.hadoop.hdfs.server.protocol.DatanodeStorage storage) |
| Modifier and Type | Method | Description |
|---|---|---|
protected void |
BlockPlacementPolicyDefault.chooseFavouredNodes(java.lang.String src,
int numOfReplicas,
java.util.List<DatanodeDescriptor> favoredNodes,
java.util.Set<org.apache.hadoop.net.Node> favoriteAndExcludedNodes,
long blocksize,
int maxNodesPerRack,
java.util.List<DatanodeStorageInfo> results,
boolean avoidStaleNodes,
java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes) |
|
protected void |
BlockPlacementPolicyWithNodeGroup.chooseFavouredNodes(java.lang.String src,
int numOfReplicas,
java.util.List<DatanodeDescriptor> favoredNodes,
java.util.Set<org.apache.hadoop.net.Node> favoriteAndExcludedNodes,
long blocksize,
int maxNodesPerRack,
java.util.List<DatanodeStorageInfo> results,
boolean avoidStaleNodes,
java.util.EnumMap<org.apache.hadoop.fs.StorageType,java.lang.Integer> storageTypes) |
choose all good favored nodes as target.
|
void |
DatanodeManager.fetchDatanodes(java.util.List<DatanodeDescriptor> live,
java.util.List<DatanodeDescriptor> dead,
boolean removeDecommissionNode) |
Fetch live and dead datanodes.
|
| Modifier and Type | Method | Description |
|---|---|---|
java.util.List<DatanodeDescriptor> |
CachedBlock.getDatanodes(DatanodeDescriptor.CachedBlocksList.Type type) |
Get a list of the datanodes which this block is cached,
planned to be cached, or planned to be uncached on.
|
Copyright © 2008–2025 Apache Software Foundation. All rights reserved.