From c3816de5040e3c48e58ed4762d2f4eb606812938 Mon Sep 17 00:00:00 2001
From: Patrick Wendell <pwendell@gmail.com>
Date: Mon, 13 Jan 2014 13:24:04 -0800
Subject: [PATCH] Changing option wording per discussion with Andrew

---
 core/src/main/scala/org/apache/spark/Aggregator.scala       | 2 +-
 .../main/scala/org/apache/spark/storage/BlockManager.scala  | 4 ++--
 .../spark/util/collection/ExternalAppendOnlyMap.scala       | 4 ++--
 docs/configuration.md                                       | 6 +++---
 4 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/core/src/main/scala/org/apache/spark/Aggregator.scala b/core/src/main/scala/org/apache/spark/Aggregator.scala
index ee4467085f..38073707cd 100644
--- a/core/src/main/scala/org/apache/spark/Aggregator.scala
+++ b/core/src/main/scala/org/apache/spark/Aggregator.scala
@@ -32,7 +32,7 @@ case class Aggregator[K, V, C] (
     mergeCombiners: (C, C) => C) {
 
   private val sparkConf = SparkEnv.get.conf
-  private val externalSorting = sparkConf.getBoolean("spark.shuffle.external", true)
+  private val externalSorting = sparkConf.getBoolean("spark.shuffle.spill", true)
 
   def combineValuesByKey(iter: Iterator[_ <: Product2[K, V]]) : Iterator[(K, C)] = {
     if (!externalSorting) {
diff --git a/core/src/main/scala/org/apache/spark/storage/BlockManager.scala b/core/src/main/scala/org/apache/spark/storage/BlockManager.scala
index 0e770ed152..6461deee32 100644
--- a/core/src/main/scala/org/apache/spark/storage/BlockManager.scala
+++ b/core/src/main/scala/org/apache/spark/storage/BlockManager.scala
@@ -81,7 +81,7 @@ private[spark] class BlockManager(
   // Whether to compress RDD partitions that are stored serialized
   val compressRdds = conf.getBoolean("spark.rdd.compress", false)
   // Whether to compress shuffle output temporarily spilled to disk
-  val compressExternalShuffle = conf.getBoolean("spark.shuffle.external.compress", false)
+  val compressShuffleSpill = conf.getBoolean("spark.shuffle.spill.compress", false)
 
   val heartBeatFrequency = BlockManager.getHeartBeatFrequency(conf)
 
@@ -792,7 +792,7 @@ private[spark] class BlockManager(
     case ShuffleBlockId(_, _, _) => compressShuffle
     case BroadcastBlockId(_) => compressBroadcast
     case RDDBlockId(_, _) => compressRdds
-    case TempBlockId(_) => compressExternalShuffle
+    case TempBlockId(_) => compressShuffleSpill
     case _ => false
   }
 
diff --git a/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala b/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala
index fd17413952..2eef6a7c10 100644
--- a/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala
+++ b/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala
@@ -87,9 +87,9 @@ private[spark] class ExternalAppendOnlyMap[K, V, C](
   // batches, with each batch using its own serialization stream. This cuts down on the size
   // of reference-tracking maps constructed when deserializing a stream.
   //
-  // NOTE: Setting this too low can cause excess copying when serializing, since some serailizers
+  // NOTE: Setting this too low can cause excess copying when serializing, since some serializers
   // grow internal data structures by growing + copying every time the number of objects doubles.
-  private val serializerBatchSize = sparkConf.getLong("spark.shuffle.external.batchSize", 10000)
+  private val serializerBatchSize = sparkConf.getLong("spark.shuffle.spill.batchSize", 10000)
 
   // How many times we have spilled so far
   private var spillCount = 0
diff --git a/docs/configuration.md b/docs/configuration.md
index 350e3145c0..be06bd19be 100644
--- a/docs/configuration.md
+++ b/docs/configuration.md
@@ -116,7 +116,7 @@ Apart from these, the following properties are also available, and may be useful
   <td>0.3</td>
   <td>
     Fraction of Java heap to use for aggregation and cogroups during shuffles, if
-    <code>spark.shuffle.external</code> is true. At any given time, the collective size of
+    <code>spark.shuffle.spill</code> is true. At any given time, the collective size of
     all in-memory maps used for shuffles is bounded by this limit, beyond which the contents will
     begin to spill to disk. If spills are often, consider increasing this value at the expense of
     <code>spark.storage.memoryFraction</code>.
@@ -155,7 +155,7 @@ Apart from these, the following properties are also available, and may be useful
   </td>
 </tr>
 <tr>
-  <td>spark.shuffle.external.compress</td>
+  <td>spark.shuffle.spill.compress</td>
   <td>false</td>
   <td>
     Whether to compress data spilled during shuffles.
@@ -395,7 +395,7 @@ Apart from these, the following properties are also available, and may be useful
   </td>
 </tr>
 <tr>
-  <td>spark.shuffle.external</td>
+  <td>spark.shuffle.spill</td>
   <td>true</td>
   <td>
     If set to "true", limits the amount of memory used during reduces by spilling data out to disk. This spilling
-- 
GitLab