这篇文章将为大家详细讲解有关spark如何写入hive数据,小编觉得挺实用的,因此分享给大家做个参考,希望大家阅读完这篇文章后可以有所收获。
package hgs.spark.hiveimport org.apache.spark.SparkConfimport org.apache.spark.SparkContextimport org.apache.spark.sql.SparkSessionimport org.apache.spark.sql.SQLContextimport org.apache.spark.sql.SaveModeimport org.apache.spark.sql.types.StructFieldimport org.apache.spark.sql.types.StructTypeimport org.apache.spark.sql.types.IntegerTypeimport org.apache.spark.sql.types.StringTypeimport org.apache.spark.sql.Rowobject WriteDatatoHive { def main(args: Array[String]): Unit = { val conf = new SparkConf().setAppName("WriteDatatoHive").setMaster("local") val context = new SparkContext(conf) val rdd = context.parallelize(List(("wd",22),("cm",25)), 2).map(x=>Row(x._1,x._2)) val builder = SparkSession.builder() .appName("hiveApp") .config("spark.sql.warehouse.dir","hdfs://bigdata00:9000/user/hive/warehouse/") .enableHiveSupport() .getOrCreate() //import builder.implicits._ import builder.implicits._ val personShcema = StructType( List( //下面为一个列的描述,分别为 列名,数据类型,是否为空 StructField("name",StringType,true), StructField("age",IntegerType,true) ) ) val personDF = builder.createDataFrame(rdd, personShcema) personDF.createOrReplaceTempView("personm") //这个可以存储数据与hiveSQL兼容 builder.table("personm").write.insertInto("test.person") //builder.sql("select * from personm").write.option("spark.sql.hive.convertMetastoreParquet", false) //.mode(SaveMode.Append).saveAsTable("test.person") context.stop() }}case class person(name:String,age:Int)
关于“spark如何写入hive数据”这篇文章就分享到这里了,希望以上内容可以对大家有一定的帮助,使各位可以学到更多知识,如果觉得文章不错,请把它分享出去让更多的人看到。