Skip to content

Commit

Permalink
Use SQLConf to configure in-memory columnar caching
Browse files Browse the repository at this point in the history
  • Loading branch information
marmbrus committed Jul 30, 2014
1 parent 2c35666 commit 2362082
Show file tree
Hide file tree
Showing 2 changed files with 4 additions and 2 deletions.
4 changes: 4 additions & 0 deletions sql/core/src/main/scala/org/apache/spark/sql/SQLConf.scala
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import java.util.Properties
import scala.collection.JavaConverters._

object SQLConf {
val COMPRESS_CACHED = "spark.sql.inMemoryColumnarStorage.compressed"
val AUTO_BROADCASTJOIN_THRESHOLD = "spark.sql.autoBroadcastJoinThreshold"
val SHUFFLE_PARTITIONS = "spark.sql.shuffle.partitions"
val DEFAULT_SIZE_IN_BYTES = "spark.sql.defaultSizeInBytes"
Expand Down Expand Up @@ -49,6 +50,9 @@ trait SQLConf {
/** ************************ Spark SQL Params/Hints ******************* */
// TODO: refactor so that these hints accessors don't pollute the name space of SQLContext?

/** When true tables cached using the in-memory columnar caching will be compressed. */
private[spark] def useCompression: Boolean = get(COMPRESS_CACHED, "false").toBoolean

/** Number of partitions to use for shuffle operators. */
private[spark] def numShufflePartitions: Int = get(SHUFFLE_PARTITIONS, "200").toInt

Expand Down
2 changes: 0 additions & 2 deletions sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
Original file line number Diff line number Diff line change
Expand Up @@ -192,8 +192,6 @@ class SQLContext(@transient val sparkContext: SparkContext)
currentTable.logicalPlan

case _ =>
val useCompression =
sparkContext.conf.getBoolean("spark.sql.inMemoryColumnarStorage.compressed", false)
InMemoryRelation(useCompression, executePlan(currentTable).executedPlan)
}

Expand Down

0 comments on commit 2362082

Please sign in to comment.