Skip to content
Snippets Groups Projects
Commit 86534d0f authored by Michael Armbrust's avatar Michael Armbrust
Browse files

[SPARK-2631][SQL] Use SQLConf to configure in-memory columnar caching

Author: Michael Armbrust <michael@databricks.com>

Closes #1638 from marmbrus/cachedConfig and squashes the following commits:

2362082 [Michael Armbrust] Use SQLConf to configure in-memory columnar caching
parent 39b81931
No related branches found
No related tags found
No related merge requests found
......@@ -22,6 +22,7 @@ import java.util.Properties
import scala.collection.JavaConverters._
object SQLConf {
val COMPRESS_CACHED = "spark.sql.inMemoryColumnarStorage.compressed"
val AUTO_BROADCASTJOIN_THRESHOLD = "spark.sql.autoBroadcastJoinThreshold"
val SHUFFLE_PARTITIONS = "spark.sql.shuffle.partitions"
val DEFAULT_SIZE_IN_BYTES = "spark.sql.defaultSizeInBytes"
......@@ -49,6 +50,9 @@ trait SQLConf {
/** ************************ Spark SQL Params/Hints ******************* */
// TODO: refactor so that these hints accessors don't pollute the name space of SQLContext?
/** When true tables cached using the in-memory columnar caching will be compressed. */
private[spark] def useCompression: Boolean = get(COMPRESS_CACHED, "false").toBoolean
/** Number of partitions to use for shuffle operators. */
private[spark] def numShufflePartitions: Int = get(SHUFFLE_PARTITIONS, "200").toInt
......
......@@ -192,8 +192,6 @@ class SQLContext(@transient val sparkContext: SparkContext)
currentTable.logicalPlan
case _ =>
val useCompression =
sparkContext.conf.getBoolean("spark.sql.inMemoryColumnarStorage.compressed", false)
InMemoryRelation(useCompression, executePlan(currentTable).executedPlan)
}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment