spark-streaming-kafka怎样通过KafkaUtils.createDirectStream的方式处理数据,相信很多没有经验的人对此束手无策,为此本文总结了问题出现的原因和解决方法,通过这篇文章希望你能解决这个问题。
package hgs.spark.streamingimport org.apache.spark.SparkConfimport org.apache.spark.SparkContextimport org.apache.spark.streaming.StreamingContextimport org.apache.spark.streaming.Secondsimport org.apache.spark.streaming.kafka.KafkaUtilsimport org.apache.spark.streaming.kafka.KafkaClusterimport scala.collection.immutable.Mapimport java.util.NoSuchElementExceptionimport org.apache.spark.SparkExceptionimport kafka.common.TopicAndPartitionimport kafka.message.MessageAndMetadataimport org.codehaus.jackson.map.deser.std.PrimitiveArrayDeserializers.StringDeserimport kafka.serializer.StringDecoderimport org.apache.spark.streaming.kafka.DirectKafkaInputDStreamimport org.apache.spark.rdd.RDDimport org.apache.spark.streaming.kafka.HasOffsetRangesimport org.apache.spark.HashPartitionerobject SparkStreamingKafkaDirectWordCount { def main(args: Array[String]): Unit = { val conf = new SparkConf().setAppName("KafkaWordCount").setMaster("local[5]") conf.set("spark.streaming.kafka.maxRatePerPartition", "1") val sc = new SparkContext(conf) val ssc = new StreamingContext(sc,Seconds(1)) ssc.checkpoint("d:\\checkpoint") val kafkaParams = Map[String,String]( "metadata.broker.list"->"bigdata01:9092,bigdata02:9092,bigdata03:9092", "group.id"->"group_hgs", "zookeeper.connect"->"bigdata01:2181,bigdata02:2181,bigdata03:2181") val kc = new KafkaCluster(kafkaParams) val topics = Set[String]("test") //每个rdd返回的数据是(K,V)类型的,该函数规定了函数返回数据的类型 val mmdFunct = (mmd: MessageAndMetadata[String, String])=>(mmd.topic+" "+mmd.partition,mmd.message()) val rds = KafkaUtils.createDirectStream[String,String,StringDecoder,StringDecoder,(String,String)](ssc, kafkaParams, getOffsets(topics,kc,kafkaParams),mmdFunct) val updateFunc=(iter:Iterator[(String,Seq[Int],Option[Int])])=>{ //iter.flatMap(it=>Some(it._2.sum+it._3.getOrElse(0)).map((it._1,_)))//方式一 //iter.flatMap{case(x,y,z)=>{Some(y.sum+z.getOrElse(0)).map((x,_))}}//方式二 iter.flatMap(it=>Some(it._1,(it._2.sum.toInt+it._3.getOrElse(0))))//方式三 } val words = rds.flatMap(x=>x._2.split(" ")).map((_,1)) //val wordscount = words.map((_,1)).updateStateByKey(updateFunc, new HashPartitioner(sc.defaultMinPartitions), true) //println(getOffsets(topics,kc,kafkaParams)) rds.foreachRDD(rdd=>{ if(!rdd.isEmpty()){ //对每个dataStreamoffset进行更新 upateOffsets(topics,kc,rdd,kafkaParams) } } ) words.print() ssc.start() ssc.awaitTermination() } def getOffsets(topics : Set[String],kc:KafkaCluster,kafkaParams:Map[String,String]):Map[TopicAndPartition, Long]={ val topicAndPartitionsOrNull = kc.getPartitions(topics) if(topicAndPartitionsOrNull.isLeft){ throw new SparkException(s"$topics in the set may not found") } else{ val topicAndPartitions = topicAndPartitionsOrNull.right.get val groups = kafkaParams.get("group.id").get val offsetOrNull = kc.getConsumerOffsets(groups, topicAndPartitions) if(offsetOrNull.isLeft){ println(s"$groups you assignment may not exists!now redirect to zero!") //如果没有消费过,则从最开始的位置消费 val erliestOffset = kc.getEarliestLeaderOffsets(topicAndPartitions) if(erliestOffset.isLeft) throw new SparkException(s"Topics and Partions not definded not found!") else erliestOffset.right.get.map(x=>(x._1,x._2.offset)) } else{ //如果消费组已经存在则从记录的地方开始消费 offsetOrNull.right.get } } } //每次拉取数据后存储offset到ZK def upateOffsets(topics : Set[String],kc:KafkaCluster,directRDD:RDD[(String,String)],kafkaParams:Map[String,String]){ val offsetRanges = directRDD.asInstanceOf[HasOffsetRanges].offsetRanges for(offr <-offsetRanges){ val topicAndPartitions = TopicAndPartition(offr.topic,offr.partition) val yesOrNo = kc.setConsumerOffsets(kafkaParams.get("group.id").get, Map(topicAndPartitions->offr.untilOffset)) if(yesOrNo.isLeft){ println(s"Error when update offset of $topicAndPartitions") } } } }
看完上述内容,你们掌握spark-streaming-kafka怎样通过KafkaUtils.createDirectStream的方式处理数据的方法了吗?如果还想学到更多技能或想了解更多相关内容,欢迎关注编程网行业资讯频道,感谢各位的阅读!