From fddcdf87c919cadc5069b9ee3fc45396a24e8f9c Mon Sep 17 00:00:00 2001
From: Mosharaf Chowdhury <mosharaf@mosharaf-ubuntu.(none)>
Date: Thu, 16 Dec 2010 11:58:00 -0800
Subject: [PATCH] Added a small description of how ParallelLFS works.

---
 src/scala/spark/BasicLocalFileShuffle.scala    | 2 +-
 src/scala/spark/ParallelLocalFileShuffle.scala | 8 +++++---
 2 files changed, 6 insertions(+), 4 deletions(-)

diff --git a/src/scala/spark/BasicLocalFileShuffle.scala b/src/scala/spark/BasicLocalFileShuffle.scala
index aa83e5cf8c..57e8abf964 100644
--- a/src/scala/spark/BasicLocalFileShuffle.scala
+++ b/src/scala/spark/BasicLocalFileShuffle.scala
@@ -9,7 +9,7 @@ import scala.collection.mutable.{ArrayBuffer, HashMap}
 
 
 /**
- * A simple implementation of shuffle using local files served through HTTP.
+ * A basic implementation of shuffle using local files served through HTTP.
  *
  * TODO: Add support for compression when spark.compress is set to true.
  */
diff --git a/src/scala/spark/ParallelLocalFileShuffle.scala b/src/scala/spark/ParallelLocalFileShuffle.scala
index af461e48c0..7c957aba42 100644
--- a/src/scala/spark/ParallelLocalFileShuffle.scala
+++ b/src/scala/spark/ParallelLocalFileShuffle.scala
@@ -10,7 +10,9 @@ import scala.collection.mutable.{ArrayBuffer, HashMap}
 
 
 /**
- * A simple implementation of shuffle using local files served through HTTP.
+ * An implementation of shuffle using local files served through HTTP where 
+ * receivers create simultaneous connections to multiple servers by setting the
+ * 'spark.parallelLocalFileShuffle.maxConnections' config option.
  *
  * TODO: Add support for compression when spark.compress is set to true.
  */
@@ -221,12 +223,12 @@ object ParallelLocalFileShuffle extends Logging {
     if (!initialized) {
       // Load config parameters
       MinKnockInterval_ = System.getProperty (
-          "spark.parallelLocalFileShuffle.minKnockInterval", "1000").toInt
+        "spark.parallelLocalFileShuffle.minKnockInterval", "1000").toInt
       MaxKnockInterval_ = System.getProperty (
         "spark.parallelLocalFileShuffle.maxKnockInterval", "5000").toInt
 
       MaxConnections_ = System.getProperty (
-          "spark.parallelLocalFileShuffle.maxConnections", "4").toInt
+        "spark.parallelLocalFileShuffle.maxConnections", "4").toInt
       
       // TODO: localDir should be created by some mechanism common to Spark
       // so that it can be shared among shuffle, broadcast, etc
-- 
GitLab