首页 > 其他 > 详细

spark操作hive方式(scala)

时间:2018-12-26 17:27:27      阅读:416      评论:0      收藏:0      [点我收藏+]

第一种方式:

def operatorHive: Unit = {
    Class.forName("org.apache.hive.jdbc.HiveDriver")
    val url = "jdbc:hive2://192.168.2.xxx:10000"
    val connection: Connection = DriverManager.getConnection(url, "root", "diagbotkwz@123")
    val createStatement: Statement = connection.createStatement()
    val query: ResultSet = createStatement.executeQuery("select * from diagbot.ord_lis_trend limit 2")
    while (query.next()) {
      println(query.getString(1))
    }
  }

第二种方式:

object SparkOperaterHive {
  val sparkConf: SparkConf = new SparkConf().setAppName(SparkOperaterHive.getClass.getSimpleName)
  val sparkSession: SparkSession = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate()
  val sc: SparkContext = sparkSession.sparkContext
  val sqlContext: SQLContext = sparkSession.sqlContext
  
  def main(args: Array[String]) {
   
    import sparkSession.implicits._
    val sql1: DataFrame = sparkSession.sql("select * from janggan.diagnosismedication")
    val properties: Properties = new Properties()
    properties.put("user", "root")
    properties.put("password", "diagbot@20180822")
    properties.put("driver", "com.mysql.jdbc.Driver")
    //    sql1.write.mode(SaveMode.Append).jdbc(url,"doc_info_hive",properties)
    println("总数为:" + sql1.count())
    println("sddhdj" + sql1.columns(1))

    sparkSession.stop()
  }
}

 

spark操作hive方式(scala)

原文:https://www.cnblogs.com/kwzblog/p/10180174.html

(0)
(0)
   
举报
评论 一句话评论(0
关于我们 - 联系我们 - 留言反馈 - 联系我们:wmxa8@hotmail.com
© 2014 bubuko.com 版权所有
打开技术之扣,分享程序人生!