Skip to content

Commit

Permalink
[SPARK-3367] Remove spark.shuffle.spill.compress
Browse files Browse the repository at this point in the history
Replaced it with existing spark.shuffle.compress.
  • Loading branch information
rxin committed Sep 3, 2014
1 parent c64cc43 commit 2029d60
Show file tree
Hide file tree
Showing 3 changed files with 3 additions and 13 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -97,12 +97,10 @@ private[spark] class BlockManager(

// Whether to compress broadcast variables that are stored
private val compressBroadcast = conf.getBoolean("spark.broadcast.compress", true)
// Whether to compress shuffle output that are stored
// Whether to compress shuffle temporary output that are stored
private val compressShuffle = conf.getBoolean("spark.shuffle.compress", true)
// Whether to compress RDD partitions that are stored serialized
private val compressRdds = conf.getBoolean("spark.rdd.compress", false)
// Whether to compress shuffle output temporarily spilled to disk
private val compressShuffleSpill = conf.getBoolean("spark.shuffle.spill.compress", true)

private val slaveActor = actorSystem.actorOf(
Props(new BlockManagerSlaveActor(this, mapOutputTracker)),
Expand Down Expand Up @@ -997,7 +995,7 @@ private[spark] class BlockManager(
case _: ShuffleBlockId => compressShuffle
case _: BroadcastBlockId => compressBroadcast
case _: RDDBlockId => compressRdds
case _: TempBlockId => compressShuffleSpill
case _: TempBlockId => compressShuffle
case _ => false
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ class ExternalAppendOnlyMapSuite extends FunSuite with LocalSparkContext {
// for a bug we had with bytes written past the last object in a batch (SPARK-2792)
conf.set("spark.serializer.objectStreamReset", "1")
conf.set("spark.serializer", "org.apache.spark.serializer.JavaSerializer")
conf.set("spark.shuffle.spill.compress", codec.isDefined.toString)
conf.set("spark.shuffle.compress", codec.isDefined.toString)
codec.foreach { c => conf.set("spark.io.compression.codec", c) }
// Ensure that we actually have multiple batches per spill file
conf.set("spark.shuffle.spill.batchSize", "10")
Expand Down
8 changes: 0 additions & 8 deletions docs/configuration.md
Original file line number Diff line number Diff line change
Expand Up @@ -247,14 +247,6 @@ Apart from these, the following properties are also available, and may be useful
This spilling threshold is specified by <code>spark.shuffle.memoryFraction</code>.
</td>
</tr>
<tr>
<td><code>spark.shuffle.spill.compress</code></td>
<td>true</td>
<td>
Whether to compress data spilled during shuffles. Compression will use
<code>spark.io.compression.codec</code>.
</td>
</tr>
<tr>
<td><code>spark.shuffle.memoryFraction</code></td>
<td>0.2</td>
Expand Down

0 comments on commit 2029d60

Please sign in to comment.