使用的ide是eclipse
package com.luogankun.spark.base import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._ /**
* 统计字符出现次数
*/
object WordCount {
def main(args: Array[String]) {
if (args.length < 1) {
System.err.println("Usage: <file>")
System.exit(1)
} val conf = new SparkConf()
val sc = new SparkContext(conf)
val line = sc.textFile(args(0)) line.flatMap(_.split("\t")).map((_, 1)).reduceByKey(_+_).collect.foreach(println) sc.stop
}
}
导出成jar包。
提交到集群运行脚本: WordCount.sh
#!/bin/bash set -x cd $SPARK_HOME/bin spark-submit \
--name WordCount \
--class com.luogankun.spark.base.WordCount \
--master spark://hadoop000:7077 \
--executor-memory 1G \
--total-executor-cores 1 \
/home/spark/lib/spark.jar \
hdfs://hadoop000:8020/hello.txt
执行WordCount.sh脚本