diff --git a/core/src/main/scala/org/apache/spark/Aggregator.scala b/core/src/main/scala/org/apache/spark/Aggregator.scala
index ee4467085f79396527f0a40b6e4bfa6d67fa5894..38073707cd3df63cc4d85ee0d189e276fa00977b 100644
--- a/core/src/main/scala/org/apache/spark/Aggregator.scala
+++ b/core/src/main/scala/org/apache/spark/Aggregator.scala
@@ -32,7 +32,7 @@ case class Aggregator[K, V, C] (
     mergeCombiners: (C, C) => C) {
 
   private val sparkConf = SparkEnv.get.conf
-  private val externalSorting = sparkConf.getBoolean("spark.shuffle.external", true)
+  private val externalSorting = sparkConf.getBoolean("spark.shuffle.spill", true)
 
   def combineValuesByKey(iter: Iterator[_ <: Product2[K, V]]) : Iterator[(K, C)] = {
     if (!externalSorting) {
diff --git a/core/src/main/scala/org/apache/spark/storage/BlockManager.scala b/core/src/main/scala/org/apache/spark/storage/BlockManager.scala
index 0e770ed1521e94dae828802222b4eb4e6dd764c3..6461deee3203d89c9aa807b294b400940611a6d8 100644
--- a/core/src/main/scala/org/apache/spark/storage/BlockManager.scala
+++ b/core/src/main/scala/org/apache/spark/storage/BlockManager.scala
@@ -81,7 +81,7 @@ private[spark] class BlockManager(
   // Whether to compress RDD partitions that are stored serialized
   val compressRdds = conf.getBoolean("spark.rdd.compress", false)
   // Whether to compress shuffle output temporarily spilled to disk
-  val compressExternalShuffle = conf.getBoolean("spark.shuffle.external.compress", false)
+  val compressShuffleSpill = conf.getBoolean("spark.shuffle.spill.compress", false)
 
   val heartBeatFrequency = BlockManager.getHeartBeatFrequency(conf)
 
@@ -792,7 +792,7 @@ private[spark] class BlockManager(
     case ShuffleBlockId(_, _, _) => compressShuffle
     case BroadcastBlockId(_) => compressBroadcast
     case RDDBlockId(_, _) => compressRdds
-    case TempBlockId(_) => compressExternalShuffle
+    case TempBlockId(_) => compressShuffleSpill
     case _ => false
   }
 
diff --git a/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala b/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala
index fd17413952cd612d05252b01c47cd24a9dda4b60..2eef6a7c1038d14508b8c951ae86d86efb2a4828 100644
--- a/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala
+++ b/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala
@@ -87,9 +87,9 @@ private[spark] class ExternalAppendOnlyMap[K, V, C](
   // batches, with each batch using its own serialization stream. This cuts down on the size
   // of reference-tracking maps constructed when deserializing a stream.
   //
-  // NOTE: Setting this too low can cause excess copying when serializing, since some serailizers
+  // NOTE: Setting this too low can cause excess copying when serializing, since some serializers
   // grow internal data structures by growing + copying every time the number of objects doubles.
-  private val serializerBatchSize = sparkConf.getLong("spark.shuffle.external.batchSize", 10000)
+  private val serializerBatchSize = sparkConf.getLong("spark.shuffle.spill.batchSize", 10000)
 
   // How many times we have spilled so far
   private var spillCount = 0
diff --git a/docs/configuration.md b/docs/configuration.md
index 350e3145c033e0d18e9df428bca96daf5f75a758..be06bd19be110bf81e2deba93276bc43e9584ad5 100644
--- a/docs/configuration.md
+++ b/docs/configuration.md
@@ -116,7 +116,7 @@ Apart from these, the following properties are also available, and may be useful
   <td>0.3</td>
   <td>
     Fraction of Java heap to use for aggregation and cogroups during shuffles, if
-    <code>spark.shuffle.external</code> is true. At any given time, the collective size of
+    <code>spark.shuffle.spill</code> is true. At any given time, the collective size of
     all in-memory maps used for shuffles is bounded by this limit, beyond which the contents will
     begin to spill to disk. If spills are often, consider increasing this value at the expense of
     <code>spark.storage.memoryFraction</code>.
@@ -155,7 +155,7 @@ Apart from these, the following properties are also available, and may be useful
   </td>
 </tr>
 <tr>
-  <td>spark.shuffle.external.compress</td>
+  <td>spark.shuffle.spill.compress</td>
   <td>false</td>
   <td>
     Whether to compress data spilled during shuffles.
@@ -395,7 +395,7 @@ Apart from these, the following properties are also available, and may be useful
   </td>
 </tr>
 <tr>
-  <td>spark.shuffle.external</td>
+  <td>spark.shuffle.spill</td>
   <td>true</td>
   <td>
     If set to "true", limits the amount of memory used during reduces by spilling data out to disk. This spilling