Hadoop--map/reduce实现单词计数

 import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.*; import java.io.IOException;
import java.util.*; public class WordCount { /*
* 实现输入内容单词的计数功能
* 一、mapper方法将输入内容处理为<key1,value1>形式
* 二、reduce方法接收mapper的结果,将相同key1的value值相加得到单词的个数
* 三、输出得到的结果到hdfs中
*
* */ //main函数
public static void main(String[] args) throws Exception{
JobConf conf=new JobConf(WordCount.class);
conf.setJobName("WordCount");
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(IntWritable.class); conf.setMapperClass(Map.class);
conf.setReducerClass(Reduce.class); conf.setInputFormat(TextInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class); FileInputFormat.setInputPaths(conf,new Path(args[0]));
FileOutputFormat.setOutputPath(conf, new Path(args[1])); JobClient.runJob(conf); } //map函数
public static class Map extends MapReduceBase implements Mapper<LongWritable,Text,
Text,IntWritable>{
private final static IntWritable one=new IntWritable(1);
private Text word=new Text(); public void map(LongWritable key,Text value,
OutputCollector<Text,IntWritable>output,Reporter reporter)throws IOException{
String line=value.toString();
StringTokenizer tokenizer=new StringTokenizer(line);
while(tokenizer.hasMoreTokens()){
word.set(tokenizer.nextToken());
output.collect(word, one); } } } //reduce函数
public static class Reduce extends MapReduceBase implements Reducer<Text,IntWritable,
Text,IntWritable>{
public void reduce(Text key,Iterator<IntWritable>values,OutputCollector<Text,
IntWritable>output,Reporter repoter) throws IOException{
int sum=0;
while(values.hasNext()){
sum+=values.next().get();
}
output.collect(key,new IntWritable(sum));
}
} }
上一篇:实验隐藏参数"_allow_resetlogs_corruption"的使用


下一篇:Servlet的ClassLoader