public static class TokenizerMapper extends
Mapper<Object, Text, Text, IntWritable> {
private static final IntWritable one = new IntWritable(1);
private Text word = new Text();
public void map(Object key, Text value,
Mapper<Object, Text, Text, IntWritable>.Context context)
throws IOException, InterruptedException {
StringTokenizer itr = new StringTokenizer(value.toString());
while (itr.hasMoreTokens()) {
this.word.set(itr.nextToken());
context.write(this.word, one);
}
}
}
public static class IntSumReducer extends
Reducer<Text, IntWritable, Text, IntWritable> {
private IntWritable result = new IntWritable();
public void reduce(Text key, Iterable<IntWritable> values,
Reducer<Text, IntWritable, Text, IntWritable>.Context context)
throws IOException, InterruptedException {
int sum = 0;
for (IntWritable val : values) {
sum += val.get();
}
this.result.set(sum);
context.write(key, this.result);
}
}
public static void main(String[] args) throws IOException,
ClassNotFoundException, InterruptedException {
Configuration conf = new Configuration();
conf.set("mapred.job.tracker", "master:9001");
conf.set("mapred.jar", "hadoop-test.jar");
String[] ars = new String[] { "hdfs://master:9000/usr/hadoop/input",
"hdfs://master:9000/usr/hadoop/newout1" };
String[] otherArgs = new GenericOptionsParser(conf, ars)
.getRemainingArgs();
if (otherArgs.length != 2) {
System.err.println("Usage: wordcount <in> <out>");
System.exit(2);
}
Job job = new Job(conf, "wordcount");
job.setJarByClass(WordCount.class);
job.setMapperClass(WordCount.TokenizerMapper.class);
job.setCombinerClass(WordCount.IntSumReducer.class);
job.setReducerClass(WordCount.IntSumReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
最后运行成功
14/10/18 10:12:27 INFO input.FileInputFormat: Total input paths to process : 2
14/10/18 10:12:27 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
14/10/18 10:12:27 WARN snappy.LoadSnappy: Snappy native library not loaded
14/10/18 10:12:27 INFO mapred.JobClient: Running job: job_201410181754_0004
14/10/18 10:12:28 INFO mapred.JobClient: map 0% reduce 0%
14/10/18 10:12:32 INFO mapred.JobClient: map 100% reduce 0%
14/10/18 10:12:39 INFO mapred.JobClient: map 100% reduce 33%
14/10/18 10:12:40 INFO mapred.JobClient: map 100% reduce 100%
14/10/18 10:12:40 INFO mapred.JobClient: Job complete: job_201410181754_0004
14/10/18 10:12:40 INFO mapred.JobClient: Counters: 29
14/10/18 10:12:40 INFO mapred.JobClient: Job Counters
14/10/18 10:12:40 INFO mapred.JobClient: Launched reduce tasks=1
14/10/18 10:12:40 INFO mapred.JobClient: SLOTS_MILLIS_MAPS=4614
14/10/18 10:12:40 INFO mapred.JobClient: Total time spent by all reduces waiting after reserving slots (ms)=0
14/10/18 10:12:40 INFO mapred.JobClient: Total time spent by all maps waiting after reserving slots (ms)=0
14/10/18 10:12:40 INFO mapred.JobClient: Launched map tasks=2
14/10/18 10:12:40 INFO mapred.JobClient: Data-local map tasks=2
14/10/18 10:12:40 INFO mapred.JobClient: SLOTS_MILLIS_REDUCES=8329
14/10/18 10:12:40 INFO mapred.JobClient: File Output Format Counters
14/10/18 10:12:40 INFO mapred.JobClient: Bytes Written=31
14/10/18 10:12:40 INFO mapred.JobClient: FileSystemCounters
14/10/18 10:12:40 INFO mapred.JobClient: FILE_BYTES_READ=75
14/10/18 10:12:40 INFO mapred.JobClient: HDFS_BYTES_READ=264
14/10/18 10:12:40 INFO mapred.JobClient: FILE_BYTES_WRITTEN=154204
14/10/18 10:12:40 INFO mapred.JobClient: HDFS_BYTES_WRITTEN=31
14/10/18 10:12:40 INFO mapred.JobClient: File Input Format Counters
14/10/18 10:12:40 INFO mapred.JobClient: Bytes Read=44
14/10/18 10:12:40 INFO mapred.JobClient: Map-Reduce Framework
14/10/18 10:12:40 INFO mapred.JobClient: Map output materialized bytes=81
14/10/18 10:12:40 INFO mapred.JobClient: Map input records=2
14/10/18 10:12:40 INFO mapred.JobClient: Reduce shuffle bytes=81
14/10/18 10:12:40 INFO mapred.JobClient: Spilled Records=12
14/10/18 10:12:40 INFO mapred.JobClient: Map output bytes=78
14/10/18 10:12:40 INFO mapred.JobClient: CPU time spent (ms)=1090
14/10/18 10:12:40 INFO mapred.JobClient: Total committed heap usage (bytes)=241246208
14/10/18 10:12:40 INFO mapred.JobClient: Combine input records=8
14/10/18 10:12:40 INFO mapred.JobClient: SPLIT_RAW_BYTES=220
14/10/18 10:12:40 INFO mapred.JobClient: Reduce input records=6
14/10/18 10:12:40 INFO mapred.JobClient: Reduce input groups=4
14/10/18 10:12:40 INFO mapred.JobClient: Combine output records=6
14/10/18 10:12:40 INFO mapred.JobClient: Physical memory (bytes) snapshot=311574528
14/10/18 10:12:40 INFO mapred.JobClient: Reduce output records=4
14/10/18 10:12:40 INFO mapred.JobClient: Virtual memory (bytes) snapshot=1034760192
14/10/18 10:12:40 INFO mapred.JobClient: Map output records=8
Ubuntu 13.04上搭建Hadoop环境