From d1eee44a0312e89dea5da803dc3f87e25a024c82 Mon Sep 17 00:00:00 2001
From: Tathagata Das <tathagata.das1565@gmail.com>
Date: Fri, 27 Jul 2012 18:33:32 +0000
Subject: [PATCH] Fixed more stuff in BoundedMemoryCache.

---
 core/src/main/scala/spark/BoundedMemoryCache.scala | 12 ++++++++----
 1 file changed, 8 insertions(+), 4 deletions(-)

diff --git a/core/src/main/scala/spark/BoundedMemoryCache.scala b/core/src/main/scala/spark/BoundedMemoryCache.scala
index 5ea5c303bd..6fe0b94297 100644
--- a/core/src/main/scala/spark/BoundedMemoryCache.scala
+++ b/core/src/main/scala/spark/BoundedMemoryCache.scala
@@ -91,10 +91,14 @@ class BoundedMemoryCache(maxBytes: Long) extends Cache with Logging {
   protected def reportEntryDropped(datasetId: Any, partition: Int, entry: Entry) {
     logInfo("Dropping key (%s, %d) of size %d to make space".format(datasetId, partition, entry.size))
     // TODO: remove BoundedMemoryCache
-    datasetId match {
-      case rddDatasetId: (Int, Int) =>
-        SparkEnv.get.cacheTracker.dropEntry(rddDatasetId._2, partition)
-      case _ =>
+    
+    val (keySpaceId, innerDatasetId) = datasetId.asInstanceOf[(Any, Any)] 
+    innerDatasetId match {
+      case rddId: Int =>
+        SparkEnv.get.cacheTracker.dropEntry(rddId, partition)
+      case broadcastUUID: java.util.UUID =>
+        // TODO: Maybe something should be done if the broadcasted variable falls out of cache  
+      case _ => 
     }    
   }
 }
-- 
GitLab