详情请看:http://www.ibm.com/developerworks/cn/opensource/os-cn-spark-practice3/
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.types._
import org.apache.spark.sql.Row
import org.apache.spark.rdd.RDD
object PeopleDataStatisticSparkSQL {
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setAppName("SparkSQL").setMaster("local");
val sc = new SparkContext(conf)
val peopleDataRDD = sc.textFile("/Users/lihu/Desktop/crawle/xingbie.txt")
val sqlCtx = new SQLContext(sc)
import sqlCtx.implicits._
val shemaArray = Array("id", "gender", "height")
val schema = StructType(shemaArray.map(StructField(_, StringType, true)))
val rowRDD:RDD[Row] = peopleDataRDD.map(_.split(" ")).map(eachRow => Row(eachRow(0), eachRow(1), eachRow(2)))
val peopleDF = sqlCtx.createDataFrame(rowRDD, schema)
peopleDF.registerTempTable("people")
println(sqlCtx.sql("select id from people where height > 180 and gender = ‘M‘").count())
println(sqlCtx.sql("select id from people where height > 170 and gender = ‘F‘").count())
println(peopleDF.filter(peopleDF("gender").equalTo("M")).filter(peopleDF("height") > 165).count())
peopleDF.groupBy(peopleDF("gender")).count().show()
peopleDF.filter(peopleDF("gender").equalTo("M")).filter(peopleDF("height") > 165).show(2)
peopleDF.sort($"height".desc).take(3).foreach{row => println(row(0) + " " + row(1) + " " + row(2))}
peopleDF.groupBy(peopleDF("gender")).agg(Map("height" -> "avg")).show()
peopleDF.groupBy(peopleDF("gender")).agg(Map("height" -> "max")).show()
peopleDF.groupBy(peopleDF("gender")).agg(Map("height" -> "min")).show()
}
}
原文:http://www.cnblogs.com/sunyaxue/p/6373456.html