spark中迭代器的使用(求最大或最小)

groupbykey

import java.util.Arrays;
import java.util.List;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaSparkContext;

import jersey.repackaged.com.google.common.collect.Lists;
import scala.Tuple2;

public class groupbykey {

	public static void main(String[] args) {
		// TODO Auto-generated method stub
		SparkConf conf=new SparkConf().setAppName("").setMaster("local");
		JavaSparkContext sc=new JavaSparkContext(conf);
		List<Tuple2<String,Integer>> scores=Arrays.asList(
				new Tuple2<String,Integer>("jac",80),
				new Tuple2<String,Integer>("jac",100),
				new Tuple2<String,Integer>("jac",70),
				new Tuple2<String,Integer>("gs",80),
				new Tuple2<String,Integer>("gs",90)
				);
		JavaPairRDD<String,Integer> pardd=sc.parallelizePairs(scores);
		JavaPairRDD<String,Iterable<Integer>> pardd1=pardd.groupByKey();
		JavaPairRDD<String,Integer> pardd2=
				pardd1.mapValues(f->{
			List<Integer> li=Lists.newArrayList(f);//迭代器转list
			
			Integer j=0;
			for(int i=0;i<li.size();i++) {
				if(li.get(i)>j) {
					j=li.get(i);
				}
				
			}
			return j;
		});
		pardd2.foreach(f->System.out.println(f));

	}

}

上一篇:flink java旁路输出(Side Output),对原始流进行分流、复制


下一篇:flink的watermark机制你学会了吗?