Skip to content
Snippets Groups Projects
Commit f86f7176 authored by Dongjoon Hyun's avatar Dongjoon Hyun Committed by Reynold Xin
Browse files

[MINOR][EXAMPLE] Use SparkSession instead of SQLContext in RDDRelation.scala

## What changes were proposed in this pull request?

Now, `SQLContext` is used for backward-compatibility, we had better use `SparkSession` in Spark 2.0 examples.

## How was this patch tested?

It's just example change. After building, run `bin/run-example org.apache.spark.examples.sql.RDDRelation`.

Author: Dongjoon Hyun <dongjoon@apache.org>

Closes #12808 from dongjoon-hyun/rddrelation.
parent 3d09ceee
No related branches found
No related tags found
No related merge requests found
......@@ -19,7 +19,7 @@
package org.apache.spark.examples.sql
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{SaveMode, SQLContext}
import org.apache.spark.sql.{SaveMode, SparkSession}
// One method for defining the schema of an RDD is to make a case class with the desired column
// names and types.
......@@ -29,10 +29,10 @@ object RDDRelation {
def main(args: Array[String]) {
val sparkConf = new SparkConf().setAppName("RDDRelation")
val sc = new SparkContext(sparkConf)
val sqlContext = new SQLContext(sc)
val spark = new SparkSession(sc)
// Importing the SQL context gives access to all the SQL functions and implicit conversions.
import sqlContext.implicits._
// Importing the SparkSession gives access to all the SQL functions and implicit conversions.
import spark.implicits._
val df = sc.parallelize((1 to 100).map(i => Record(i, s"val_$i"))).toDF()
// Any RDD containing case classes can be registered as a table. The schema of the table is
......@@ -41,15 +41,15 @@ object RDDRelation {
// Once tables have been registered, you can run SQL queries over them.
println("Result of SELECT *:")
sqlContext.sql("SELECT * FROM records").collect().foreach(println)
spark.sql("SELECT * FROM records").collect().foreach(println)
// Aggregation queries are also supported.
val count = sqlContext.sql("SELECT COUNT(*) FROM records").collect().head.getLong(0)
val count = spark.sql("SELECT COUNT(*) FROM records").collect().head.getLong(0)
println(s"COUNT(*): $count")
// The results of SQL queries are themselves RDDs and support all normal RDD functions. The
// The results of SQL queries are themselves RDDs and support all normal RDD functions. The
// items in the RDD are of type Row, which allows you to access each column by ordinal.
val rddFromSql = sqlContext.sql("SELECT key, value FROM records WHERE key < 10")
val rddFromSql = spark.sql("SELECT key, value FROM records WHERE key < 10")
println("Result of RDD.map:")
rddFromSql.rdd.map(row => s"Key: ${row(0)}, Value: ${row(1)}").collect().foreach(println)
......@@ -61,14 +61,14 @@ object RDDRelation {
df.write.mode(SaveMode.Overwrite).parquet("pair.parquet")
// Read in parquet file. Parquet files are self-describing so the schema is preserved.
val parquetFile = sqlContext.read.parquet("pair.parquet")
val parquetFile = spark.read.parquet("pair.parquet")
// Queries can be run using the DSL on parquet files just like the original RDD.
parquetFile.where($"key" === 1).select($"value".as("a")).collect().foreach(println)
// These files can also be registered as tables.
parquetFile.registerTempTable("parquetFile")
sqlContext.sql("SELECT * FROM parquetFile").collect().foreach(println)
spark.sql("SELECT * FROM parquetFile").collect().foreach(println)
sc.stop()
}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment