2016-12-15 75 views
0

我在这里再次尝试使用sparkstream_1从kafka_0.9.0.0主题读取数据。 6.1用Scala编写的类-2.10.5。它是一个简单的程序,我在sbt_0.13.12中创建它。 当我运行程序我得到此异常java.lang.ClassCastException:[B不能在解析json时转换为java.lang.String [String,String]

(运行主-0)org.apache.spark.SparkException:作业中止由于阶段 失败:任务0在阶段1.0失败1次,最近失败:丢失 阶段1.0中的任务0.0(TID 1,localhost): java.lang.ClassCastException:[B无法转换为java.lang.String [error] at org.kafka.receiver.AvroCons $ $ anonfun $ 1.apply(AvroConsumer.scala:54) [error] at org.kafka.receiver.AvroCons $$ anonfun $ 1.apply(AvroConsumer.scala:54) [error] at scala.collection.Iterato r $$ anon $ 11.next(Iterator.scala:328)[error]
at org.apache.spark.util.Utils $ .getIteratorSize(Utils.scala:1597) [error] at org.apache.spark .rdd.RDD $$ anonfun $ count $ 1.apply(RDD.scala:1157) [error] at org.apache.spark.rdd.RDD $$ anonfun $ count $ 1.apply(RDD.scala:1157) [error] at org.apache.spark.SparkContext $$ anonfun $ runJob $ 5.apply(SparkContext.scala:1858) [error] at org.apache.spark.SparkContext $$ anonfun $ runJob $ 5.apply(SparkContext .scala:1858) [error] at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66) [error] at org.apache.spark.scheduler.Task.run(Task.scala :89) [错误]在 org.apache.spark.executor.Executor $ TaskRunner.run(Executor.scala:214) [错误]在 java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145 ) [error] at java.util.concurrent.ThreadPoolExecutor $ Worker.run(ThreadPoolExecutor.java:615) [error] at java.lang.Thread.run(Thread.java:745)[error] [error] ] Driver stacktrace:org.apache.spark.SparkException:作业 由于阶段失败而中止:阶段1.0中的任务0失败1次,大多数 最近失败:在阶段1.0(TID 1,localhost)中丢失任务0.0: java。 lang.ClassCastException:[B不能转换为java.lang.String

这里是斯卡拉程序,

1 package org.kafka.receiver 
     2 case class mobileData(action: String, tenantid: Int, lat: Float, lon: Float, memberid: Int, event_name: String, productUpccd: Int, device_type: String, device_os_ver: Float, item_na  me: String) 
     3 import java.util.HashMap 
     4 import org.apache.avro.SchemaBuilder 
     5 import org.apache.spark.SparkConf 
     6 import org.apache.spark.SparkContext 
     7 import org.apache.spark.serializer.KryoSerializer 
     8 import org.apache.spark.storage.StorageLevel 
     9 import org.apache.spark.streaming.Seconds 
    10 import org.apache.spark.streaming.StreamingContext 
    11 import org.apache.spark.streaming.StreamingContext._ 
    12 import org.apache.spark.SparkContext._ 
    13 import org.apache.spark.streaming.dstream.ReceiverInputDStream 
    14 import org.apache.spark.streaming.kafka.KafkaUtils 
    15 import kafka.serializer.DefaultDecoder 
    16 import org.apache.spark.sql.SQLContext 
    17 import com.sun.istack.internal.logging.Logger 
    18 object AvroCons { 
    19 val eventSchema = SchemaBuilder.record("eventRecord").fields 
    20  .name("action").`type`().stringType().noDefault() 
    21  .name("tenantid").`type`().intType().noDefault() 
    22  .name("lat").`type`().doubleType().noDefault() 
    23  .name("lon").`type`().doubleType().noDefault() 
    24  .name("memberid").`type`().intType().noDefault() 
    25  .name("event_name").`type`().stringType().noDefault() 
    26  .name("productUpccd").`type`().intType().noDefault() 
    27  .name("device_type").`type`().stringType().noDefault() 
    28  .name("device_os_ver").`type`().stringType().noDefault() 
    29  .name("item_name").`type`().stringType().noDefault().endRecord 
    30  def main(args: Array[String]): Unit = { 
    31 
    32  val sparkConf = new SparkConf().setAppName("Avro Consumer"). 
    33  set("spark.driver.allowMultipleContexts", "true").setMaster("local[2]") 
    34  sparkConf.set("spark.cores.max", "2") 
    35  sparkConf.set("spark.serializer", classOf[KryoSerializer].getName) 
    36  sparkConf.set("spark.sql.tungsten.enabled", "true") 
    37  sparkConf.set("spark.eventLog.enabled", "true") 
    38  sparkConf.set("spark.app.id", "KafkaConsumer") 
    39  sparkConf.set("spark.io.compression.codec", "snappy") 
    40  sparkConf.set("spark.rdd.compress", "true") 
    41  sparkConf.set("spark.streaming.backpressure.enabled", "true") 
    42  sparkConf.set("spark.sql.avro.compression.codec", "snappy") 
    43  sparkConf.set("spark.sql.avro.mergeSchema", "true") 
    44  sparkConf.set("spark.sql.avro.binaryAsString", "true") 
    45  val sc = new SparkContext(sparkConf) 
    46  sc.hadoopConfiguration.set("avro.enable.summary-metadata", "false") 
    47  val ssc = new StreamingContext(sc, Seconds(2)) 
    48  val kafkaConf = Map[String, String]("metadata.broker.list" -> "############:9092", 
    49   "zookeeper.connect" -> "#############", 
    50   "group.id" -> "KafkaConsumer", 
    51   "zookeeper.connection.timeout.ms" -> "1000000") 
    52  val topicMaps = Map("fishbowl" -> 1) 
    53  val messages = KafkaUtils.createStream[String, String,DefaultDecoder, DefaultDecoder](ssc, kafkaConf, topicMaps, StorageLevel.MEMORY_ONLY_SER) 

    54 messages.print()  
    55 val lines = messages.map(x=>x._2); lines.foreachRDD((rdd,time)=>{ 
    56 val count = rdd.count() 
    57 if(count>0) 
    58 rdd.foreach(record=>{println(record)})}) 
    59 
    60 ssc.start() 
    61  ssc.awaitTermination() 
    62  } 
    63 
    64 } 

这里是我的build.sbt

name := "AvroConsumer" 
version := "1.0" 
scalaVersion := "2.10.6" 
jarName in assembly := "AvroConsumer.jar" 

libraryDependencies += "org.apache.spark" % "spark-core_2.10" % "1.6.1" % "provided"  

libraryDependencies += "org.apache.spark" % "spark-sql_2.10" % "1.6.1" % "provided" 

libraryDependencies += "org.apache.spark" % "spark-streaming_2.10" % "1.6.1" 

libraryDependencies += "org.apache.spark" % "spark-streaming-kafka-assembly_2.10" % "1.6.1" 

libraryDependencies += "org.apache.spark" % "spark-streaming-kafka_2.10" % "1.6.1" 

libraryDependencies += "org.codehaus.jackson" % "jackson-mapper-asl" % "1.9.13" 

libraryDependencies += "org.openrdf.sesame" % "sesame-rio-api" % "2.7.2" 

libraryDependencies += "com.databricks" % "spark-csv_2.10" % "0.1" 

libraryDependencies += "org.apache.avro" % "avro" % "1.8.1" 

libraryDependencies += "log4j" % "log4j" % "1.2.17" 

libraryDependencies += "org.apache.avro" % "avro-tools" % "1.7.4" 

assemblyMergeStrategy in assembly := { case PathList("META-INF", xs @ 
_*) => MergeStrategy.discard case x => MergeStrategy.first } 

我准备这个代码从卡夫卡主题创建DF,所以我不得不在sparkConf()中设置所有这些属性。 这里是我的输入数据的模式,

{ 
    "action": "AppEvent", 
    "tenantid": 299, 
    "lat": 0.0, 
    "lon": 0.0, 
    "memberid": 16445, 
    "event_name": "CATEGORY_CLICK", 
    "productUpccd": 0, 
    "device_type": "iPhone", 
    "device_os_ver": "10.1", 
    "item_name": "CHICKEN" 
} 

这里是我的卡夫卡制片类。

public class KafkaAvroProducer { 

    /* case class 
    TopicData("action":"AppEvent","tenantid":1173,"lat":0.0,"lon":0.0,"memberid":55, 
    "event_name":"CATEGORY_CLICK", 
    "productUpccd":0,"device_type":"iPhone","device_os_ver":"10.1","item_name":"CHICKEN",*/ 

    public static final String EVENT_SCHEMA = "{" + "\"type\":\"record\"," 
      + "\"name\":\"eventrecord\"," + "\"fields\":[" 
      + " { \"name\":\"action\", \"type\":\"string\" }," 
      + " { \"name\":\"tenantid\", \"type\":\"int\" }," 
      + " { \"name\":\"lat\", \"type\":\"double\" }," 
      + " { \"name\":\"lon\", \"type\":\"double\" }," 
      + " { \"name\":\"memberid\", \"type\":\"int\" }," 
      + " { \"name\":\"event_name\", \"type\":\"string\" }," 
      + " { \"name\":\"productUpccd\", \"type\":\"int\" }," 
      + " { \"name\":\"device_type\", \"type\":\"string\" }," 
      + " { \"name\":\"device_os_ver\", \"type\":\"string\" }," 
      + "{ \"name\":\"item_name\", \"type\":\"string\" }" + "]}"; 

    public static void main(String[] args) throws InterruptedException { 
     Properties props = new Properties(); 
     props.put("bootstrap.servers", "##########:9092"); 
     props.put("key.serializer", 
       "org.apache.kafka.common.serialization.StringSerializer"); 
     props.put("value.serializer", 
       "org.apache.kafka.common.serialization.StringSerializer"); 
     props.put("producer.type", "async"); 
     Schema.Parser parser = new Schema.Parser(); 
     Schema schema = parser.parse(EVENT_SCHEMA); 
     Injection<GenericRecord, String> avroRecords = GenericAvroCodecs.toJson(schema); 
     KafkaProducer<String, String> producer = new KafkaProducer<>(props); 
     for(int i = 0; i<300;i++){ 
      GenericData.Record avroRecord = new GenericData.Record(schema); 
      setEventValues(i, avroRecord); 
      String messages = avroRecords.apply(avroRecord); 
      ProducerRecord<String, String> producerRecord = new ProducerRecord<String, String>("fishbowl",String.valueOf(i),messages); 
      System.out.println(producerRecord); 
      producer.send(producerRecord); 

     } 
     producer.close(); 
    } 

    private static void setEventValues(int i, Record avroRecord) { 

     avroRecord.put("action", "AppEvent"); 
     avroRecord.put("tenantid", i); 
     avroRecord.put("lat", i*0.0); 
     avroRecord.put("lon", 0.0); 
     avroRecord.put("memberid", i*55); 
     avroRecord.put("event_name", "CATEGORY_CLICK"); 
     avroRecord.put("productUpccd", 0); 
     avroRecord.put("device_type", "iPhone"); 
     avroRecord.put("device_os_ver", "10.1"); 
     avroRecord.put("item_name", "CHICKEN"); 
    } 

} 
+0

什么的类型数据插入卡夫卡? – maasg

+0

我也会为您提供生产者班,查看变化。 –

+0

当程序启动时,你确定Kafka没有其他消息吗?例外情况表明,我们试图将'Byte []''作为'String'来投射,但Kafka反序列化器与制作人一样配置良好。在测试之前尝试修剪所有消息的卡夫卡。 – maasg

回答

3

卡夫卡消费者应配置为使用正确的解码器:

相反的:

KafkaUtils.createStream[String, String,DefaultDecoder, DefaultDecoder] 

对于String它应该是:

KafkaUtils.createStream[String, String,StringDecoder, StringDecoder] 
+0

感谢Maasg的帮助。 –

+0

@jackAKAkarthik对于新问题,提出一个新问题。不要询问关于评论的后续问题。 – maasg

+0

我没有发布新问题http://stackoverflow.com/questions/41222594/inappropriate-output-while-creating-a-dataframe –

相关问题