diff --git a/core/src/main/java/spark/network/netty/FileClient.java b/core/src/main/java/spark/network/netty/FileClient.java
index 517772202fe7a613ea473ae289fb2481e9eefa72..a4bb4bc701af98536c1840ab9d4c263fa893335e 100644
--- a/core/src/main/java/spark/network/netty/FileClient.java
+++ b/core/src/main/java/spark/network/netty/FileClient.java
@@ -30,7 +30,7 @@ class FileClient {
       .channel(OioSocketChannel.class)
       .option(ChannelOption.SO_KEEPALIVE, true)
       .option(ChannelOption.TCP_NODELAY, true)
-      .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, connectTimeout) // Disable connect timeout
+      .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, connectTimeout)
       .handler(new FileClientChannelInitializer(handler));
   }
 
diff --git a/core/src/main/scala/spark/network/netty/ShuffleCopier.scala b/core/src/main/scala/spark/network/netty/ShuffleCopier.scala
index afb2cdbb3a2d9770adf7a1ca87e69d03d4218f91..8d5194a7379eb52c95b9dde84d025a423d69119e 100644
--- a/core/src/main/scala/spark/network/netty/ShuffleCopier.scala
+++ b/core/src/main/scala/spark/network/netty/ShuffleCopier.scala
@@ -18,8 +18,9 @@ private[spark] class ShuffleCopier extends Logging {
       resultCollectCallback: (String, Long, ByteBuf) => Unit) {
 
     val handler = new ShuffleCopier.ShuffleClientHandler(resultCollectCallback)
-    val fc = new FileClient(handler,
-                            System.getProperty("spark.shuffle.netty.connect.timeout", "60000").toInt)
+    val connectTimeout = System.getProperty("spark.shuffle.netty.connect.timeout", "60000").toInt
+    val fc = new FileClient(handler, connectTimeout)
+
     try {
       fc.init()
       fc.connect(host, port)
@@ -29,8 +30,7 @@ private[spark] class ShuffleCopier extends Logging {
     } catch {
       // Handle any socket-related exceptions in FileClient
       case e: Exception => {
-        logError("Shuffle copy of block " + blockId + " from " + host + ":" + port + 
-          " failed", e)
+        logError("Shuffle copy of block " + blockId + " from " + host + ":" + port + " failed", e)
         handler.handleError(blockId)
       }
     }
diff --git a/core/src/main/scala/spark/storage/BlockFetcherIterator.scala b/core/src/main/scala/spark/storage/BlockFetcherIterator.scala
index bb78207c9f3c2d924a12e909c524b24870572c5a..bec876213e9f476e2ca189da2df49d2e8c393840 100644
--- a/core/src/main/scala/spark/storage/BlockFetcherIterator.scala
+++ b/core/src/main/scala/spark/storage/BlockFetcherIterator.scala
@@ -322,11 +322,7 @@ object BlockFetcherIterator {
     override def next(): (String, Option[Iterator[Any]]) = {
       resultsGotten += 1
       val result = results.take()
-      // if all the results has been retrieved, shutdown the copiers
-      // NO need to stop the copiers if we got all the blocks ?
-      // if (resultsGotten == _numBlocksToFetch && copiers != null) {
-      //   stopCopiers()
-      // }
+      // If all the results has been retrieved, copiers will exit automatically
       (result.blockId, if (result.failed) None else Some(result.deserialize()))
     }
   }
diff --git a/core/src/main/scala/spark/storage/DiskStore.scala b/core/src/main/scala/spark/storage/DiskStore.scala
index 0af6e4a35917fd8551f15ab6c6d47c391824d4b2..15ab8401554bf42f5b067939ab636e3075d29d5d 100644
--- a/core/src/main/scala/spark/storage/DiskStore.scala
+++ b/core/src/main/scala/spark/storage/DiskStore.scala
@@ -212,10 +212,9 @@ private class DiskStore(blockManager: BlockManager, rootDirs: String)
     val file = getFile(blockId)
     if (!allowAppendExisting && file.exists()) {
       // NOTE(shivaram): Delete the file if it exists. This might happen if a ShuffleMap task
-      // was rescheduled on the same machine as the old task ?
+      // was rescheduled on the same machine as the old task.
       logWarning("File for block " + blockId + " already exists on disk: " + file + ". Deleting")
       file.delete()
-      // throw new Exception("File for block " + blockId + " already exists on disk: " + file)
     }
     file
   }
diff --git a/core/src/test/scala/spark/ShuffleSuite.scala b/core/src/test/scala/spark/ShuffleSuite.scala
index 33b02fff801445a678b5ab4dfde1014bc8d59f44..1916885a7383748c13a79e83466e0033e06b02f0 100644
--- a/core/src/test/scala/spark/ShuffleSuite.scala
+++ b/core/src/test/scala/spark/ShuffleSuite.scala
@@ -376,8 +376,7 @@ class ShuffleSuite extends FunSuite with ShouldMatchers with LocalSparkContext {
     val a = sc.parallelize(1 to 4, NUM_BLOCKS)
     val b = a.map(x => (x, x*2))
 
-    // NOTE: The default Java serializer doesn't create zero-sized blocks.
-    //       So, use Kryo
+    // NOTE: The default Java serializer should create zero-sized blocks
     val c = new ShuffledRDD(b, new HashPartitioner(10))
 
     val shuffleId = c.dependencies.head.asInstanceOf[ShuffleDependency[Int, Int]].shuffleId