MapReduce单表关联学习~

首先考虑表的自连接,其次是列的设置,最后是结果的整理.

文件内容:

MapReduce单表关联学习~

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner; import java.io.IOException;
import java.util.Iterator;
import java.util.Objects; public class STjoin extends Configured implements Tool {
public static int time = 0;
  //map将输入分割成child和parent,然后正序输出一次作为右表,反序输出一次作为左表
  //需要注意的是在输出的value中必须加上左右表区别标志
public static class Map extends Mapper<Object,Text,Text,Text>{ public void map(Object key,Text value,Context context) throws IOException,
InterruptedException{
String childname = new String();
String parentname = new String();
String relationtype = new String();
String line = value.toString();
int i = 0;
       //文件以空格分隔
while(line.charAt(i) != ' '){
i++;
}
       //拆分child 和 parent
String[] values = {line.substring(0,i),line.substring(i+1)};
if(values[0].compareTo("child") != 0){
childname = values[0];
parentname = values[1];
         //左右表区分标志
relationtype = "1";
context.write(new Text(values[1]),new Text(relationtype + "+" + childname + "+" + parentname)); relationtype = "2";
context.write(new Text(values[0]),new Text(relationtype + "+" + childname + "+" + parentname));
}
}
} public static class Reduce extends Reducer<Text,Text,Text,Text>{ public void reduce(Text key,Iterable<Text> values,Context context) throws IOException,InterruptedException{
        //输出表头
if(time == 0){
context.write(new Text("grandchild"),new Text("grandparent"));
time++;
}
int grandchildnum = 0;
String grandchild[] = new String[10];
int grandparentnum = 0;
String grandparent[] = new String[10];
Iterator ite = values.iterator(); while(ite.hasNext()){
String record = ite.next().toString();
int len = record.length();
int i = 2;
if(len == 0){
continue;
}
char relationtype = record.charAt(0);
String childname = new String();
String parentname = new String(); while(record.charAt(i) != '+'){
childname = childname + record.charAt(i);
i++;
}
i = i+1; while(i<len){
parentname = parentname + record.charAt(i);
i++;
} if(relationtype == '1') {
grandchild[grandchildnum] = childname;
;
grandchildnum++;
}else{
grandparent[grandparentnum] = parentname;
grandparentnum++;
} } if(grandparentnum != 0 && grandchildnum != 0){
for(int m = 0;m<grandchildnum;m++){
for(int n = 0;n<grandparentnum;n++){
System.out.println(grandchild[m] + " " + grandparent[n]);
context.write(new Text(grandchild[m]),new Text(grandparent[n]));
}
}
} }
} public int run(String[] args) throws Exception{
Configuration aaa = new Configuration();
Job job = Job.getInstance(aaa);
String InputPaths = "/usr/local/idea-IC-139.1117.1/Hadoop/out/datainput/child-parent.txt";
String OutputPath = "/usr/local/idea-IC-139.1117.1/Hadoop/out/dataout/"; job.setJarByClass(Sort.class);
job.setJobName("Sort"); job.setMapperClass(Map.class);
job.setReducerClass(Reduce.class);
FileInputFormat.setInputPaths(job, new Path(InputPaths));
FileOutputFormat.setOutputPath(job, new Path(OutputPath)); job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class); job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(org.apache.hadoop.mapreduce.lib.output.TextOutputFormat.class);
boolean success = job.waitForCompletion(true);
return success ? 0 : 1; } public static void main(String[] args) throws Exception{
int ret = ToolRunner.run(new STjoin(), args);
System.exit(ret);
}
}

输出结果:

MapReduce单表关联学习~

参考:《Hadoop实战》

上一篇:Challenges-XSS


下一篇:myeclipse js报错