1、运行Hadoop 自带的WordCount
准备数据
hadoop mapreduce yarn
bigdata
hive sql
hello flink
spark flink streaming
上传到HDFS 上
hadoop fs -put wc.txt /test/wc
${HADOOP_HOME}/share/hadoop/mapreduce
WordCount 这个类就在hadoop-mapreduce-examples-2.9.2.jar 中
启动MR 任务
hadoop jar /root/bigdata/hadoop-2.9.2/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.9.2.jar wordcount /test/wc/wc.txt /test/wc/wc_output
[root@master hadoop-2.9.2]# hadoop fs -text /test/wc/wc_output/part-r-00000
bigdata 1
flink 2
hadoop 1
hello 1
hive 1
mapreduce 1
spark 1
sql 1
streaming 1
yarn 1
[root@master hadoop-2.9.2]#
2、 自定义实现Wordcount
map
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
/**
* 实现Map阶段的逻辑需要继承Mapper类并实现map方法
* 前两个类型表时map输入kv对的类型
* 后两个类型表时map输出kv对的类型
* */
public class WordCountMapper extends Mapper<LongWritable,Text, Text, IntWritable>{
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//super.map(key, value, context);
//System.out.println("key="+key+" value="+value);
String line = value.toString();
String[] words = line.trim().replaceAll("\\s* ",",").split(",");
for (String word : words) {
context.write(new Text(word),new IntWritable(1));
}
}
}
reduce
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
import java.util.Iterator;
/**
* 实现reduce阶段的逻辑需要继承Reduce类并实现reduce方法
* 前两个类型表时reduce输入kv对的类型,要跟map的输出kv对的类型一致
* 后两个类型表时reduce输出kv对的类型
* */
public class WordCountReducer extends Reducer<Text, IntWritable,Text,IntWritable> {
@Override
protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
int sum= 0;
Iterator<IntWritable> iterator = values.iterator();
while (iterator.hasNext()){
IntWritable next = iterator.next();
sum += next.get();
}
context.write(key,new IntWritable(sum));
}
}
driver
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class WordCountDriver {
public static void main(String[] args) throws IOException,
ClassNotFoundException, InterruptedException {
//System.setProperty("hadoop.home.dir", "D:\\dev_soft\\hadoop-2.9.2");
//System.load("D:\\dev_soft\\hadoop-2.9.2\\bin\\hadoop.dll");
// 1 获取配置信息以及封装任务
Configuration configuration = new Configuration();
Job job = Job.getInstance(configuration);
// 2 设置jar加载路径
job.setJarByClass(WordCountDriver.class);
// 3 设置map和reduce类
job.setMapperClass(WordCountMapper.class);
job.setReducerClass(WordCountReducer.class);
// 4 设置map输出
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
// 5 设置最终输出kv类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
// 6 设置输⼊和输出路径
// 本地模式 在program arguments设置参数
FileInputFormat.setInputPaths(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
// 7 提交
boolean result = job.waitForCompletion(true);
System.exit(result ? 0 : 1);
}
}
maven 依赖可以看[添加链接描述](https://editor.csdn.net/md/?articleId=120251279 ) [这里面的]
打包,选择没有依赖的jar 上传
指定主类、输入和输出
hadoop jar wc-1.0-SNAPSHOT.jar mr.WordCountDriver /test/wc/wc.txt /test/wc/wc_output2
[root@master test_data]# hadoop fs -text /test/wc/wc_output2/part-r-00000
bigdata 1
flink 2
hadoop 1
hello 1
hive 1
mapreduce 1
spark 1
sql 1
streaming 1
yarn 1
[root@master test_data]# hadoop fs -text /test/wc/wc_output/part-r-00000
bigdata 1
flink 2
hadoop 1
hello 1
hive 1
mapreduce 1
spark 1
sql 1
streaming 1
yarn 1
[root@master test_data]#