diff --git a/examples/src/main/scala/spark/examples/HBaseTest.scala b/examples/src/main/scala/spark/examples/HBaseTest.scala
index 37aedde30245700d035b340842e4d98f75801d39..d94b25828d0171a6d7a165f557bba65280e4fc44 100644
--- a/examples/src/main/scala/spark/examples/HBaseTest.scala
+++ b/examples/src/main/scala/spark/examples/HBaseTest.scala
@@ -12,6 +12,9 @@ object HBaseTest {
       System.getenv("SPARK_HOME"), Seq(System.getenv("SPARK_EXAMPLES_JAR")))
 
     val conf = HBaseConfiguration.create()
+
+    // Other options for configuring scan behavior are available. More information available at 
+    // http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/TableInputFormat.html
     conf.set(TableInputFormat.INPUT_TABLE, args(1))
 
     // Initialize hBase table if necessary
@@ -22,8 +25,8 @@ object HBaseTest {
     }
 
     val hBaseRDD = new NewHadoopRDD(sc, classOf[TableInputFormat], 
-                        classOf[org.apache.hadoop.hbase.io.ImmutableBytesWritable],
-                        classOf[org.apache.hadoop.hbase.client.Result], conf)
+      classOf[org.apache.hadoop.hbase.io.ImmutableBytesWritable],
+      classOf[org.apache.hadoop.hbase.client.Result], conf)
 
     hBaseRDD.count()