1、创建一个学生成绩的文件
vi data
2、将txt文件上传到hdfs上
hdfs -fs -put /hadoop/data
3、 在eclipse上实现统计学生成绩的代码
(1)ScoreSortEntity.class
(2)ScoreSortReduce.class
package demo2;
import java.io.IOException;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.Reducer;
public class ScoreSortReduce extends Reducer< ScoreSortEntity, Text,Text, ScoreSortEntity> {
@Override
protected void reduce(ScoreSortEntity scoreSortEntity, Iterable<Text> iterable,Context context)
throws IOException, InterruptedException {
try {
context.write(iterable.iterator().next(),scoreSortEntity);
} catch (Exception e) {
Counter countPrint = context.getCounter("Reduce-OutValue",e.getMessage());
countPrint.increment(1l);
}
}
}
(3)ScoreSortMapper.class
package demo2;
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.Mapper;
public class ScoreSortMapper extends Mapper<LongWritable, Text, ScoreSortEntity, Text>{
@Override
protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
String[] fields=value.toString().split("\t");
try {
ScoreSortEntity entity=new ScoreSortEntity(
fields[0],
Integer.parseInt(fields[1]),
Integer.parseInt(fields[2]),
Integer.parseInt(fields[3]),
Integer.parseInt(fields[4]));
context.write(entity, new Text(fields[0]));
} catch (Exception e) {
Counter countPrint = context.getCounter("Map-Exception",e.toString());
countPrint.increment(1l);
}
}
}
(4)ScoreSortDemo.class
package demo2;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.io.Text;
public class ScoreSortDemo {
public static void main(String[] args) throws Exception {
Configuration conf=new Configuration();
Job job=Job.getInstance(conf);
//设置jar
job.setJarByClass(ScoreSortDemo.class);
//设置Map和Reduce类
job.setMapperClass(ScoreSortMapper.class);
job.setReducerClass(ScoreSortReduce.class);
//设置Map输出
job.setMapOutputKeyClass(ScoreSortEntity.class);
job.setMapOutputValueClass(Text.class);
//设置Reduce输出
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(ScoreSortEntity.class);
Path inputPath= new Path("/hadoop/data");
Path outputPath = new Path("/hadoop/dataout");
outputPath.getFileSystem(conf).delete(outputPath, true);//如果输出路径存在,删除之
//设置输入输出路径
FileInputFormat.setInputPaths(job, inputPath);
FileOutputFormat.setOutputPath(job,outputPath);
boolean waitForCompletion= job.waitForCompletion(true);
System.exit(waitForCompletion?0:1);
}
}
将写好的的代码打架包
可参考(81条消息) 在eclipse上实现一个 WordCount 程序,并将 WordCount 程序打包发布到 Hadoop 分布式中运行。_柿子镭的博客-CSDN博客
4、在hadoop中运行
hadoop jar jar_004.jar demo2.ScoreSortDemo
查看结果
hadoop fs -cat /hadoop/dataout/part-r-00000