Skip to content
Snippets Groups Projects
Commit 4576d80a authored by Reynold Xin's avatar Reynold Xin
Browse files

[SPARK-2469] Use Snappy (instead of LZF) for default shuffle compression codec

This reduces shuffle compression memory usage by 3x.

Author: Reynold Xin <rxin@apache.org>

Closes #1415 from rxin/snappy and squashes the following commits:

06c1a01 [Reynold Xin] SPARK-2469: Use Snappy (instead of LZF) for default shuffle compression codec.
parent c2048a51
No related branches found
No related tags found
No related merge requests found
......@@ -56,7 +56,7 @@ private[spark] object CompressionCodec {
ctor.newInstance(conf).asInstanceOf[CompressionCodec]
}
val DEFAULT_COMPRESSION_CODEC = classOf[LZFCompressionCodec].getName
val DEFAULT_COMPRESSION_CODEC = classOf[SnappyCompressionCodec].getName
}
......@@ -103,7 +103,7 @@ class LZFCompressionCodec(conf: SparkConf) extends CompressionCodec {
/**
* :: DeveloperApi ::
* Snappy implementation of [[org.apache.spark.io.CompressionCodec]].
* Block size can be configured by spark.io.compression.snappy.block.size.
* Block size can be configured by `spark.io.compression.snappy.block.size`.
*
* Note: The wire protocol for this codec is not guaranteed to be compatible across versions
* of Spark. This is intended for use as an internal compression utility within a single Spark
......
......@@ -46,7 +46,7 @@ class CompressionCodecSuite extends FunSuite {
test("default compression codec") {
val codec = CompressionCodec.createCodec(conf)
assert(codec.getClass === classOf[LZFCompressionCodec])
assert(codec.getClass === classOf[SnappyCompressionCodec])
testCodec(codec)
}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment