java.io.Closeable, java.lang.AutoCloseable, org.apache.hadoop.conf.ConfigurableCancelCommand, ExecuteCommand, HelpCommand, PlanCommand, QueryCommand, ReportCommandpublic abstract class Command
extends org.apache.hadoop.conf.Configured
implements java.io.Closeable
| Constructor | Description |
|---|---|
Command(org.apache.hadoop.conf.Configuration conf) |
Constructs a command.
|
Command(org.apache.hadoop.conf.Configuration conf,
java.io.PrintStream ps) |
Constructs a command.
|
| Modifier and Type | Method | Description |
|---|---|---|
protected void |
addValidCommandParameters(java.lang.String key,
java.lang.String desc) |
Adds valid params to the valid args table.
|
void |
close() |
Cleans any resources held by this command.
|
protected org.apache.hadoop.fs.FSDataOutputStream |
create(java.lang.String fileName) |
Returns a file created in the cluster.
|
abstract void |
execute(org.apache.commons.cli.CommandLine cmd) |
Executes the Client Calls.
|
java.net.URI |
getClusterURI() |
Gets cluster URL.
|
org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol |
getDataNodeProxy(java.lang.String datanode) |
Copied from DFSAdmin.java.
|
protected int |
getDefaultTop() |
returns default top number of nodes.
|
protected java.util.Set<java.lang.String> |
getNodeList(java.lang.String listArg) |
Gets the node set from a file or a string.
|
protected java.util.List<DiskBalancerDataNode> |
getNodes(java.lang.String listArg) |
Returns a DiskBalancer Node list from the Cluster or null if not found.
|
protected org.apache.hadoop.fs.Path |
getOutputPath() |
Returns the output path where the plan and snapshot gets written.
|
int |
getTopNodes() |
Get top number of nodes to be processed.
|
protected org.apache.hadoop.fs.FSDataInputStream |
open(java.lang.String fileName) |
Returns a InputStream to read data.
|
protected int |
parseTopNodes(org.apache.commons.cli.CommandLine cmd,
org.apache.commons.text.TextStringBuilder result) |
Parse top number of nodes to be processed.
|
protected void |
populatePathNames(DiskBalancerDataNode node) |
Reads the Physical path of the disks we are balancing.
|
abstract void |
printHelp() |
Gets extended help for this command.
|
protected DiskBalancerCluster |
readClusterInfo(org.apache.commons.cli.CommandLine cmd) |
Process the URI and return the cluster with nodes setup.
|
protected void |
recordOutput(org.apache.commons.text.TextStringBuilder result,
java.lang.String outputLine) |
Put output line to log and string buffer.
|
void |
setCluster(DiskBalancerCluster newCluster) |
Set DiskBalancer cluster
|
void |
setClusterURI(java.net.URI clusterURI) |
Set cluster URL.
|
protected void |
setNodesToProcess(java.util.List<DiskBalancerDataNode> nodes) |
Sets the list of Nodes to process.
|
protected void |
setNodesToProcess(DiskBalancerDataNode node) |
Sets the nodes to process.
|
protected void |
setOutputPath(java.lang.String path) |
Setup the outpath.
|
void |
setTopNodes(int topNodes) |
Set top number of nodes to be processed.
|
protected void |
verifyCommandOptions(java.lang.String commandName,
org.apache.commons.cli.CommandLine cmd) |
Verifies if the command line options are sane.
|
public Command(org.apache.hadoop.conf.Configuration conf)
public Command(org.apache.hadoop.conf.Configuration conf,
java.io.PrintStream ps)
public void close()
throws java.io.IOException
The main goal is to delete id file created in
.NameNodeConnector#checkAndMarkRunning
, otherwise, it's not allowed to run multiple commands in a row.
close in interface java.lang.AutoCloseableclose in interface java.io.Closeablejava.io.IOExceptionpublic abstract void execute(org.apache.commons.cli.CommandLine cmd)
throws java.lang.Exception
cmd - - CommandLinejava.lang.Exceptionpublic abstract void printHelp()
protected DiskBalancerCluster readClusterInfo(org.apache.commons.cli.CommandLine cmd) throws java.lang.Exception
cmd - - CommandLinejava.lang.Exceptionprotected void setOutputPath(java.lang.String path)
throws java.io.IOException
path - - Path or null to use default path.java.io.IOExceptionprotected void setNodesToProcess(DiskBalancerDataNode node)
node - - Nodeprotected void setNodesToProcess(java.util.List<DiskBalancerDataNode> nodes)
nodes - Nodes.protected java.util.Set<java.lang.String> getNodeList(java.lang.String listArg)
throws java.io.IOException
listArg - - String File URL or a comma separated list of node names.java.io.IOExceptionprotected java.util.List<DiskBalancerDataNode> getNodes(java.lang.String listArg) throws java.io.IOException
listArg - String File URL or a comma separated list of node names.java.io.IOExceptionprotected void verifyCommandOptions(java.lang.String commandName,
org.apache.commons.cli.CommandLine cmd)
commandName - - Name of the commandcmd - - Parsed Command Linepublic java.net.URI getClusterURI()
public void setClusterURI(java.net.URI clusterURI)
clusterURI - - URLpublic org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol getDataNodeProxy(java.lang.String datanode)
throws java.io.IOException
datanode - - dataNode.java.io.IOExceptionprotected org.apache.hadoop.fs.FSDataOutputStream create(java.lang.String fileName)
throws java.io.IOException
fileName - - fileName to open.java.io.IOExceptionprotected org.apache.hadoop.fs.FSDataInputStream open(java.lang.String fileName)
throws java.io.IOException
java.io.IOExceptionprotected org.apache.hadoop.fs.Path getOutputPath()
protected void addValidCommandParameters(java.lang.String key,
java.lang.String desc)
key - desc - protected int getDefaultTop()
protected void recordOutput(org.apache.commons.text.TextStringBuilder result,
java.lang.String outputLine)
protected int parseTopNodes(org.apache.commons.cli.CommandLine cmd,
org.apache.commons.text.TextStringBuilder result)
throws java.lang.IllegalArgumentException
java.lang.IllegalArgumentExceptionprotected void populatePathNames(DiskBalancerDataNode node) throws java.io.IOException
node - - Disk Balancer Node.java.io.IOExceptionpublic void setTopNodes(int topNodes)
public int getTopNodes()
@VisibleForTesting public void setCluster(DiskBalancerCluster newCluster)
Copyright © 2008–2025 Apache Software Foundation. All rights reserved.