TaskAttempt, org.apache.hadoop.yarn.event.EventHandler<TaskAttemptEvent>MapTaskAttemptImpl, ReduceTaskAttemptImplpublic abstract class TaskAttemptImpl extends java.lang.Object implements TaskAttempt, org.apache.hadoop.yarn.event.EventHandler<TaskAttemptEvent>
| Modifier and Type | Field | Description |
|---|---|---|
protected org.apache.hadoop.mapred.JobConf |
conf |
|
org.apache.hadoop.yarn.api.records.Container |
container |
|
protected java.util.Set<java.lang.String> |
dataLocalHosts |
|
protected java.util.Set<java.lang.String> |
dataLocalRacks |
|
protected org.apache.hadoop.yarn.event.EventHandler |
eventHandler |
|
protected org.apache.hadoop.fs.Path |
jobFile |
|
protected int |
partition |
|
protected static java.util.Map<org.apache.hadoop.mapreduce.v2.api.records.TaskType,org.apache.hadoop.yarn.api.records.Resource> |
RESOURCE_REQUEST_CACHE |
| Constructor | Description |
|---|---|
TaskAttemptImpl(org.apache.hadoop.mapreduce.v2.api.records.TaskId taskId,
int i,
org.apache.hadoop.yarn.event.EventHandler eventHandler,
TaskAttemptListener taskAttemptListener,
org.apache.hadoop.fs.Path jobFile,
int partition,
org.apache.hadoop.mapred.JobConf conf,
java.lang.String[] dataLocalHosts,
org.apache.hadoop.security.token.Token<org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier> jobToken,
org.apache.hadoop.security.Credentials credentials,
org.apache.hadoop.yarn.util.Clock clock,
AppContext appContext) |
| Modifier and Type | Method | Description |
|---|---|---|
protected abstract org.apache.hadoop.mapred.Task |
createRemoteTask() |
|
org.apache.hadoop.yarn.api.records.ContainerId |
getAssignedContainerID() |
|
java.lang.String |
getAssignedContainerMgrAddress() |
|
org.apache.hadoop.mapreduce.v2.api.records.Avataar |
getAvataar() |
|
org.apache.hadoop.mapreduce.Counters |
getCounters() |
|
java.util.List<java.lang.String> |
getDiagnostics() |
|
protected static org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState |
getExternalState(TaskAttemptStateInternal smState) |
|
long |
getFinishTime() |
|
org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId |
getID() |
|
TaskAttemptStateInternal |
getInternalState() |
|
long |
getLaunchTime() |
|
org.apache.hadoop.mapreduce.v2.api.records.Locality |
getLocality() |
|
java.lang.String |
getNodeHttpAddress() |
If container Assigned then return the node's address, otherwise null.
|
org.apache.hadoop.yarn.api.records.NodeId |
getNodeId() |
|
java.lang.String |
getNodeRackName() |
If container Assigned then return the node's rackname, otherwise null.
|
org.apache.hadoop.mapreduce.v2.api.records.Phase |
getPhase() |
|
float |
getProgress() |
|
org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport |
getReport() |
|
java.util.Map<java.lang.String,java.nio.ByteBuffer> |
getServicesMetaData() |
Get ServiceMetadata - not sure if we need to duplicate here
the only concern is if data was already partially read
but so far we read data only while converting to proto and back
or while doing read/write for TaskCompletionEvent
|
long |
getShuffleFinishTime() |
|
int |
getShufflePort() |
|
long |
getSortFinishTime() |
|
org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState |
getState() |
|
void |
handle(TaskAttemptEvent event) |
|
boolean |
isFinished() |
Has attempt reached the final state or not.
|
protected boolean |
isIP(java.lang.String src) |
|
boolean |
isTaskFailFast() |
|
TaskAttemptStateInternal |
recover(org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo taInfo,
org.apache.hadoop.mapreduce.OutputCommitter committer,
boolean recoverOutput) |
|
protected java.lang.String |
resolveHost(java.lang.String src) |
|
protected java.util.Set<java.lang.String> |
resolveHosts(java.lang.String[] src) |
|
void |
setAvataar(org.apache.hadoop.mapreduce.v2.api.records.Avataar avataar) |
|
void |
setLocality(org.apache.hadoop.mapreduce.v2.api.records.Locality locality) |
|
void |
setTaskFailFast(boolean failFast) |
@VisibleForTesting protected static final java.util.Map<org.apache.hadoop.mapreduce.v2.api.records.TaskType,org.apache.hadoop.yarn.api.records.Resource> RESOURCE_REQUEST_CACHE
protected final org.apache.hadoop.mapred.JobConf conf
protected final org.apache.hadoop.fs.Path jobFile
protected final int partition
protected org.apache.hadoop.yarn.event.EventHandler eventHandler
protected java.util.Set<java.lang.String> dataLocalHosts
protected java.util.Set<java.lang.String> dataLocalRacks
@VisibleForTesting public org.apache.hadoop.yarn.api.records.Container container
public TaskAttemptImpl(org.apache.hadoop.mapreduce.v2.api.records.TaskId taskId,
int i,
org.apache.hadoop.yarn.event.EventHandler eventHandler,
TaskAttemptListener taskAttemptListener,
org.apache.hadoop.fs.Path jobFile,
int partition,
org.apache.hadoop.mapred.JobConf conf,
java.lang.String[] dataLocalHosts,
org.apache.hadoop.security.token.Token<org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier> jobToken,
org.apache.hadoop.security.Credentials credentials,
org.apache.hadoop.yarn.util.Clock clock,
AppContext appContext)
public org.apache.hadoop.yarn.api.records.ContainerId getAssignedContainerID()
getAssignedContainerID in interface TaskAttemptpublic java.lang.String getAssignedContainerMgrAddress()
getAssignedContainerMgrAddress in interface TaskAttemptpublic long getLaunchTime()
getLaunchTime in interface TaskAttemptpublic long getFinishTime()
getFinishTime in interface TaskAttemptpublic long getShuffleFinishTime()
getShuffleFinishTime in interface TaskAttemptpublic long getSortFinishTime()
getSortFinishTime in interface TaskAttemptpublic int getShufflePort()
getShufflePort in interface TaskAttemptpublic org.apache.hadoop.yarn.api.records.NodeId getNodeId()
getNodeId in interface TaskAttemptpublic java.lang.String getNodeHttpAddress()
getNodeHttpAddress in interface TaskAttemptpublic java.lang.String getNodeRackName()
getNodeRackName in interface TaskAttemptprotected abstract org.apache.hadoop.mapred.Task createRemoteTask()
public org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId getID()
getID in interface TaskAttemptpublic boolean isFinished()
TaskAttemptisFinished in interface TaskAttemptpublic org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport getReport()
getReport in interface TaskAttemptpublic java.util.List<java.lang.String> getDiagnostics()
getDiagnostics in interface TaskAttemptpublic org.apache.hadoop.mapreduce.Counters getCounters()
getCounters in interface TaskAttemptpublic float getProgress()
getProgress in interface TaskAttemptpublic org.apache.hadoop.mapreduce.v2.api.records.Phase getPhase()
getPhase in interface TaskAttemptpublic org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState getState()
getState in interface TaskAttemptpublic void handle(TaskAttemptEvent event)
handle in interface org.apache.hadoop.yarn.event.EventHandler<TaskAttemptEvent>@VisibleForTesting public TaskAttemptStateInternal getInternalState()
public org.apache.hadoop.mapreduce.v2.api.records.Locality getLocality()
public void setLocality(org.apache.hadoop.mapreduce.v2.api.records.Locality locality)
public org.apache.hadoop.mapreduce.v2.api.records.Avataar getAvataar()
public void setAvataar(org.apache.hadoop.mapreduce.v2.api.records.Avataar avataar)
public void setTaskFailFast(boolean failFast)
public boolean isTaskFailFast()
public TaskAttemptStateInternal recover(org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo taInfo, org.apache.hadoop.mapreduce.OutputCommitter committer, boolean recoverOutput)
protected static org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState getExternalState(TaskAttemptStateInternal smState)
protected java.util.Set<java.lang.String> resolveHosts(java.lang.String[] src)
protected java.lang.String resolveHost(java.lang.String src)
protected boolean isIP(java.lang.String src)
public java.util.Map<java.lang.String,java.nio.ByteBuffer> getServicesMetaData()
Copyright © 2008–2025 Apache Software Foundation. All rights reserved.