|
||||||||||
PREV NEXT | FRAMES NO FRAMES |
Packages that use Counters.Counter | |
---|---|
org.apache.hadoop.mapred | |
org.apache.hadoop.mapreduce.task.reduce |
Uses of Counters.Counter in org.apache.hadoop.mapred |
---|
Fields in org.apache.hadoop.mapred declared as Counters.Counter | |
---|---|
protected Counters.Counter |
Task.failedShuffleCounter
|
protected Counters.Counter |
Task.CombinerRunner.inputCounter
|
protected Counters.Counter |
Task.mergedMapOutputsCounter
|
protected Counters.Counter |
Task.spilledRecordsCounter
|
Methods in org.apache.hadoop.mapred that return Counters.Counter | |
---|---|
Counters.Counter |
Counters.Group.addCounter(String name,
String displayName,
long value)
|
Counters.Counter |
Counters.Group.findCounter(String counterName)
|
Counters.Counter |
Counters.Group.findCounter(String counterName,
boolean create)
|
Counters.Counter |
Counters.findCounter(String group,
int id,
String name)
Deprecated. use Counters.findCounter(String, String) instead |
Counters.Counter |
Counters.findCounter(String group,
String name)
|
Counters.Counter |
Counters.Group.findCounter(String counterName,
String displayName)
|
Counters.Counter |
Reporter.getCounter(Enum<?> name)
Get the Counters.Counter of the given group with the given name. |
Counters.Counter |
Task.TaskReporter.getCounter(Enum<?> name)
|
Counters.Counter |
Counters.Group.getCounter(int id,
String name)
Deprecated. use Counters.Group.findCounter(String) instead |
Counters.Counter |
Reporter.getCounter(String group,
String name)
Get the Counters.Counter of the given group with the given name. |
Counters.Counter |
Task.TaskReporter.getCounter(String group,
String name)
|
Counters.Counter |
Counters.Group.getCounterForName(String name)
Get the counter for the given name and create it if it doesn't exist. |
Counters.Counter |
ShuffleConsumerPlugin.Context.getFailedShuffleCounter()
|
Counters.Counter |
ShuffleConsumerPlugin.Context.getMergedMapOutputsCounter()
|
Counters.Counter |
ShuffleConsumerPlugin.Context.getReduceCombineInputCounter()
|
Counters.Counter |
ShuffleConsumerPlugin.Context.getReduceShuffleBytes()
|
Counters.Counter |
ShuffleConsumerPlugin.Context.getShuffledMapsCounter()
|
Counters.Counter |
ShuffleConsumerPlugin.Context.getSpilledRecordsCounter()
|
Methods in org.apache.hadoop.mapred that return types with arguments of type Counters.Counter | |
---|---|
CounterGroupBase<Counters.Counter> |
Counters.Group.getUnderlyingGroup()
|
Iterator<Counters.Counter> |
Counters.Group.iterator()
|
Methods in org.apache.hadoop.mapred with parameters of type Counters.Counter | ||
---|---|---|
void |
Counters.Group.addCounter(Counters.Counter counter)
|
|
boolean |
Counters.Counter.contentEquals(Counters.Counter counter)
Deprecated. |
|
static
|
Task.CombinerRunner.create(JobConf job,
TaskAttemptID taskId,
Counters.Counter inputCounter,
Task.TaskReporter reporter,
OutputCommitter committer)
|
|
static
|
Merger.merge(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem fs,
Class<K> keyClass,
Class<V> valueClass,
org.apache.hadoop.io.compress.CompressionCodec codec,
List<Merger.Segment<K,V>> segments,
int mergeFactor,
int inMemSegments,
org.apache.hadoop.fs.Path tmpDir,
org.apache.hadoop.io.RawComparator<K> comparator,
org.apache.hadoop.util.Progressable reporter,
boolean sortSegments,
Counters.Counter readsCounter,
Counters.Counter writesCounter,
org.apache.hadoop.util.Progress mergePhase)
|
|
static
|
Merger.merge(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem fs,
Class<K> keyClass,
Class<V> valueClass,
org.apache.hadoop.io.compress.CompressionCodec codec,
List<Merger.Segment<K,V>> segments,
int mergeFactor,
org.apache.hadoop.fs.Path tmpDir,
org.apache.hadoop.io.RawComparator<K> comparator,
org.apache.hadoop.util.Progressable reporter,
boolean sortSegments,
Counters.Counter readsCounter,
Counters.Counter writesCounter,
org.apache.hadoop.util.Progress mergePhase)
|
|
static
|
Merger.merge(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem fs,
Class<K> keyClass,
Class<V> valueClass,
org.apache.hadoop.io.compress.CompressionCodec codec,
org.apache.hadoop.fs.Path[] inputs,
boolean deleteInputs,
int mergeFactor,
org.apache.hadoop.fs.Path tmpDir,
org.apache.hadoop.io.RawComparator<K> comparator,
org.apache.hadoop.util.Progressable reporter,
Counters.Counter readsCounter,
Counters.Counter writesCounter,
Counters.Counter mergedMapOutputsCounter,
org.apache.hadoop.util.Progress mergePhase)
|
|
static
|
Merger.merge(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem fs,
Class<K> keyClass,
Class<V> valueClass,
org.apache.hadoop.io.compress.CompressionCodec codec,
org.apache.hadoop.fs.Path[] inputs,
boolean deleteInputs,
int mergeFactor,
org.apache.hadoop.fs.Path tmpDir,
org.apache.hadoop.io.RawComparator<K> comparator,
org.apache.hadoop.util.Progressable reporter,
Counters.Counter readsCounter,
Counters.Counter writesCounter,
org.apache.hadoop.util.Progress mergePhase)
|
|
static
|
Merger.merge(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem fs,
Class<K> keyClass,
Class<V> valueClass,
List<Merger.Segment<K,V>> segments,
int mergeFactor,
int inMemSegments,
org.apache.hadoop.fs.Path tmpDir,
org.apache.hadoop.io.RawComparator<K> comparator,
org.apache.hadoop.util.Progressable reporter,
boolean sortSegments,
Counters.Counter readsCounter,
Counters.Counter writesCounter,
org.apache.hadoop.util.Progress mergePhase)
|
|
static
|
Merger.merge(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem fs,
Class<K> keyClass,
Class<V> valueClass,
List<Merger.Segment<K,V>> segments,
int mergeFactor,
org.apache.hadoop.fs.Path tmpDir,
org.apache.hadoop.io.RawComparator<K> comparator,
org.apache.hadoop.util.Progressable reporter,
boolean sortSegments,
Counters.Counter readsCounter,
Counters.Counter writesCounter,
org.apache.hadoop.util.Progress mergePhase)
|
|
static
|
Merger.merge(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem fs,
Class<K> keyClass,
Class<V> valueClass,
List<Merger.Segment<K,V>> segments,
int mergeFactor,
org.apache.hadoop.fs.Path tmpDir,
org.apache.hadoop.io.RawComparator<K> comparator,
org.apache.hadoop.util.Progressable reporter,
Counters.Counter readsCounter,
Counters.Counter writesCounter,
org.apache.hadoop.util.Progress mergePhase)
|
Method parameters in org.apache.hadoop.mapred with type arguments of type Counters.Counter | |
---|---|
void |
Counters.Group.incrAllCounters(CounterGroupBase<Counters.Counter> rightGroup)
|
Constructors in org.apache.hadoop.mapred with parameters of type Counters.Counter | |
---|---|
IFile.Reader(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path file,
org.apache.hadoop.io.compress.CompressionCodec codec,
Counters.Counter readsCounter)
Construct an IFile Reader. |
|
IFile.Reader(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FSDataInputStream in,
long length,
org.apache.hadoop.io.compress.CompressionCodec codec,
Counters.Counter readsCounter)
Construct an IFile Reader. |
|
IFile.Writer(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path file,
Class<K> keyClass,
Class<V> valueClass,
org.apache.hadoop.io.compress.CompressionCodec codec,
Counters.Counter writesCounter)
|
|
IFile.Writer(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FSDataOutputStream out,
Class<K> keyClass,
Class<V> valueClass,
org.apache.hadoop.io.compress.CompressionCodec codec,
Counters.Counter writesCounter)
|
|
IFile.Writer(Counters.Counter writesCounter)
|
|
Merger.Segment(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path file,
org.apache.hadoop.io.compress.CompressionCodec codec,
boolean preserve,
Counters.Counter mergedMapOutputsCounter)
|
|
Merger.Segment(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path file,
org.apache.hadoop.io.compress.CompressionCodec codec,
boolean preserve,
Counters.Counter mergedMapOutputsCounter,
long rawDataLength)
|
|
Merger.Segment(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path file,
long segmentOffset,
long segmentLength,
org.apache.hadoop.io.compress.CompressionCodec codec,
boolean preserve,
Counters.Counter mergedMapOutputsCounter)
|
|
Merger.Segment(IFile.Reader<K,V> reader,
boolean preserve,
Counters.Counter mapOutputsCounter)
|
|
ShuffleConsumerPlugin.Context(TaskAttemptID reduceId,
JobConf jobConf,
org.apache.hadoop.fs.FileSystem localFS,
TaskUmbilicalProtocol umbilical,
org.apache.hadoop.fs.LocalDirAllocator localDirAllocator,
Reporter reporter,
org.apache.hadoop.io.compress.CompressionCodec codec,
Class<? extends Reducer> combinerClass,
Task.CombineOutputCollector<K,V> combineCollector,
Counters.Counter spilledRecordsCounter,
Counters.Counter reduceCombineInputCounter,
Counters.Counter shuffledMapsCounter,
Counters.Counter reduceShuffleBytes,
Counters.Counter failedShuffleCounter,
Counters.Counter mergedMapOutputsCounter,
TaskStatus status,
org.apache.hadoop.util.Progress copyPhase,
org.apache.hadoop.util.Progress mergePhase,
Task reduceTask,
MapOutputFile mapOutputFile)
|
|
Task.CombineOutputCollector(Counters.Counter outCounter,
org.apache.hadoop.util.Progressable progressable,
org.apache.hadoop.conf.Configuration conf)
|
|
Task.CombineValuesIterator(RawKeyValueIterator in,
org.apache.hadoop.io.RawComparator<KEY> comparator,
Class<KEY> keyClass,
Class<VALUE> valClass,
org.apache.hadoop.conf.Configuration conf,
Reporter reporter,
Counters.Counter combineInputCounter)
|
|
Task.OldCombinerRunner(Class<? extends Reducer<K,V,K,V>> cls,
JobConf conf,
Counters.Counter inputCounter,
Task.TaskReporter reporter)
|
Uses of Counters.Counter in org.apache.hadoop.mapreduce.task.reduce |
---|
Constructors in org.apache.hadoop.mapreduce.task.reduce with parameters of type Counters.Counter | |
---|---|
MergeManagerImpl(TaskAttemptID reduceId,
JobConf jobConf,
org.apache.hadoop.fs.FileSystem localFS,
org.apache.hadoop.fs.LocalDirAllocator localDirAllocator,
Reporter reporter,
org.apache.hadoop.io.compress.CompressionCodec codec,
Class<? extends Reducer> combinerClass,
Task.CombineOutputCollector<K,V> combineCollector,
Counters.Counter spilledRecordsCounter,
Counters.Counter reduceCombineInputCounter,
Counters.Counter mergedMapOutputsCounter,
ExceptionReporter exceptionReporter,
org.apache.hadoop.util.Progress mergePhase,
MapOutputFile mapOutputFile)
|
|
ShuffleSchedulerImpl(JobConf job,
TaskStatus status,
TaskAttemptID reduceId,
ExceptionReporter reporter,
org.apache.hadoop.util.Progress progress,
Counters.Counter shuffledMapsCounter,
Counters.Counter reduceShuffleBytes,
Counters.Counter failedShuffleCounter)
|
|
||||||||||
PREV NEXT | FRAMES NO FRAMES |