public class SleepJob
extends org.apache.hadoop.conf.Configured
implements org.apache.hadoop.util.Tool, org.apache.hadoop.mapred.Mapper<org.apache.hadoop.io.IntWritable,org.apache.hadoop.io.IntWritable,org.apache.hadoop.io.IntWritable,org.apache.hadoop.io.NullWritable>, org.apache.hadoop.mapred.Reducer<org.apache.hadoop.io.IntWritable,org.apache.hadoop.io.NullWritable,org.apache.hadoop.io.NullWritable,org.apache.hadoop.io.NullWritable>, org.apache.hadoop.mapred.Partitioner<org.apache.hadoop.io.IntWritable,org.apache.hadoop.io.NullWritable>
numMappers * mapSleepTime / 100, so the job uses
some disk space.| Modifier and Type | Class and Description |
|---|---|
static class |
SleepJob.EmptySplit |
static class |
SleepJob.SleepInputFormat |
| Constructor and Description |
|---|
SleepJob() |
| Modifier and Type | Method and Description |
|---|---|
void |
close() |
void |
configure(org.apache.hadoop.mapred.JobConf job) |
int |
getPartition(org.apache.hadoop.io.IntWritable k,
org.apache.hadoop.io.NullWritable v,
int numPartitions) |
static void |
main(String[] args) |
void |
map(org.apache.hadoop.io.IntWritable key,
org.apache.hadoop.io.IntWritable value,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.IntWritable,org.apache.hadoop.io.NullWritable> output,
org.apache.hadoop.mapred.Reporter reporter) |
void |
reduce(org.apache.hadoop.io.IntWritable key,
Iterator<org.apache.hadoop.io.NullWritable> values,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.NullWritable,org.apache.hadoop.io.NullWritable> output,
org.apache.hadoop.mapred.Reporter reporter) |
int |
run(int numMapper,
int numReducer,
long mapSleepTime,
int mapSleepCount,
long reduceSleepTime,
int reduceSleepCount) |
int |
run(String[] args) |
org.apache.hadoop.mapred.JobConf |
setupJobConf(int numMapper,
int numReducer,
long mapSleepTime,
int mapSleepCount,
long reduceSleepTime,
int reduceSleepCount) |
public int getPartition(org.apache.hadoop.io.IntWritable k,
org.apache.hadoop.io.NullWritable v,
int numPartitions)
getPartition in interface org.apache.hadoop.mapred.Partitioner<org.apache.hadoop.io.IntWritable,org.apache.hadoop.io.NullWritable>public void map(org.apache.hadoop.io.IntWritable key,
org.apache.hadoop.io.IntWritable value,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.IntWritable,org.apache.hadoop.io.NullWritable> output,
org.apache.hadoop.mapred.Reporter reporter)
throws IOException
map in interface org.apache.hadoop.mapred.Mapper<org.apache.hadoop.io.IntWritable,org.apache.hadoop.io.IntWritable,org.apache.hadoop.io.IntWritable,org.apache.hadoop.io.NullWritable>IOExceptionpublic void reduce(org.apache.hadoop.io.IntWritable key,
Iterator<org.apache.hadoop.io.NullWritable> values,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.NullWritable,org.apache.hadoop.io.NullWritable> output,
org.apache.hadoop.mapred.Reporter reporter)
throws IOException
reduce in interface org.apache.hadoop.mapred.Reducer<org.apache.hadoop.io.IntWritable,org.apache.hadoop.io.NullWritable,org.apache.hadoop.io.NullWritable,org.apache.hadoop.io.NullWritable>IOExceptionpublic void configure(org.apache.hadoop.mapred.JobConf job)
configure in interface org.apache.hadoop.mapred.JobConfigurablepublic void close()
throws IOException
close in interface Closeableclose in interface AutoCloseableIOExceptionpublic int run(int numMapper,
int numReducer,
long mapSleepTime,
int mapSleepCount,
long reduceSleepTime,
int reduceSleepCount)
throws IOException
IOExceptionpublic org.apache.hadoop.mapred.JobConf setupJobConf(int numMapper,
int numReducer,
long mapSleepTime,
int mapSleepCount,
long reduceSleepTime,
int reduceSleepCount)
Copyright © 2016 Apache Software Foundation. All Rights Reserved.