diff --git a/core/src/main/scala/spark/CacheTracker.scala b/core/src/main/scala/spark/CacheTracker.scala index c8c4063cadd61dd684e3e64141d8a2a85294d1b1..04c26b2e40faeaec2a30700b1e7f6284f8f2c768 100644 --- a/core/src/main/scala/spark/CacheTracker.scala +++ b/core/src/main/scala/spark/CacheTracker.scala @@ -204,17 +204,11 @@ private[spark] class CacheTracker(actorSystem: ActorSystem, isMaster: Boolean, b } try { // If we got here, we have to load the split - // Tell the master that we're doing so - //val host = System.getProperty("spark.hostname", Utils.localHostName) - //val future = trackerActor !! AddedToCache(rdd.id, split.index, host) - // TODO: fetch any remote copy of the split that may be available - // TODO: also register a listener for when it unloads val elements = new ArrayBuffer[Any] logInfo("Computing partition " + split) elements ++= rdd.compute(split, context) // Try to put this block in the blockManager blockManager.put(key, elements, storageLevel, true) - //future.apply() // Wait for the reply from the cache tracker return elements.iterator.asInstanceOf[Iterator[T]] } finally { loading.synchronized {