使用kafka数据源创建SparkStreaming--DStream

1)需求:通过SparkStreaming从Kafka读取数据,并将读取过来的数据做简单计算,最终打印到控制台。

2)导入依赖

<dependency>

     <groupId>org.apache.spark</groupId>

     <artifactId>spark-streaming-kafka-0-10_2.12</artifactId>

     <version>2.4.5</version>

</dependency>

3)编写代码

import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}

import org.apache.spark.SparkConf

import org.apache.spark.streaming.dstream.{DStream, InputDStream}

import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}

import org.apache.spark.streaming.{Seconds, StreamingContext}

 

object DirectAPI {

 

  def main(args: Array[String]): Unit = {

 

    //1.创建SparkConf

    val sparkConf: SparkConf = new SparkConf().setAppName("ReceiverWordCount").setMaster("local[*]")

 

    //2.创建StreamingContext

    val ssc = new StreamingContext(sparkConf, Seconds(3))

 

    //3.定义Kafka参数

    val kafkaPara: Map[String, Object] = Map[String, Object](

      ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> "linux1:9092,linux2:9092,linux3:9092",

      ConsumerConfig.GROUP_ID_CONFIG -> "atguigu",

      "key.deserializer" -> "org.apache.kafka.common.serialization.StringDeserializer",

      "value.deserializer" -> "org.apache.kafka.common.serialization.StringDeserializer"

    )

 

    //4.读取Kafka数据创建DStream

    val kafkaDStream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](ssc,

      LocationStrategies.PreferConsistent,

      ConsumerStrategies.Subscribe[String, String](Set("atguigu"), kafkaPara))

 

    //5.将每条消息的KV取出

    val valueDStream: DStream[String] = kafkaDStream.map(record => record.value())

 

    //6.计算WordCount

    valueDStream.flatMap(_.split(" "))

      .map((_, 1))

      .reduceByKey(_ + _)

      .print()

 

    //7.开启任务

    ssc.start()

    ssc.awaitTermination()

  }

}

上一篇:状态流累加


下一篇:Spark RDD 分区