diff --git a/src/scala/spark/HdfsFile.scala b/src/scala/spark/HdfsFile.scala
index 595386fceb07a179fc7149ac43793ef2cff75017..886272a8edd14131f096b3ef2a233a901876db1c 100644
--- a/src/scala/spark/HdfsFile.scala
+++ b/src/scala/spark/HdfsFile.scala
@@ -14,6 +14,7 @@ import org.apache.hadoop.mapred.Reporter
 @serializable class HdfsSplit(@transient s: InputSplit)
 extends Split { 
   val inputSplit = new SerializableWritable[InputSplit](s)
+  override def toString = inputSplit.toString
 }
 
 class HdfsTextFile(sc: SparkContext, path: String)
diff --git a/src/scala/spark/RDD.scala b/src/scala/spark/RDD.scala
index 4d0c8c671160763127f7fab9a2f52a6f2bb2d712..181f7e8b030fe08eed5757a51e48088cb0f813f1 100644
--- a/src/scala/spark/RDD.scala
+++ b/src/scala/spark/RDD.scala
@@ -198,6 +198,7 @@ extends RDD[T](prev.sparkContext) with Logging {
   
   override def iterator(split: Split): Iterator[T] = {
     val key = id + "::" + split.toString
+    logInfo("CachedRDD split key is " + key)
     val cache = CachedRDD.cache
     val loading = CachedRDD.loading
     val cachedVal = cache.get(key)