diff --git a/core/src/main/scala/spark/RDD.scala b/core/src/main/scala/spark/RDD.scala
index 4d984591bd633260c2824144f5813c69ff7c59ca..17869fb31b69055b12f93e979756438737410495 100644
--- a/core/src/main/scala/spark/RDD.scala
+++ b/core/src/main/scala/spark/RDD.scala
@@ -188,7 +188,7 @@ abstract class RDD[T: ClassManifest](@transient sc: SparkContext) extends Serial
     map(x => (x, null)).reduceByKey((x, y) => x, numSplits).map(_._1)
 
   /**
-   * Return a sampled subset of this RDD.  
+   * Return a sampled subset of this RDD.
    */
   def sample(withReplacement: Boolean, fraction: Double, seed: Int): RDD[T] =
     new SampledRDD(this, withReplacement, fraction, seed)
@@ -305,7 +305,7 @@ abstract class RDD[T: ClassManifest](@transient sc: SparkContext) extends Serial
     val results = sc.runJob(this, (iter: Iterator[T]) => iter.toArray)
     Array.concat(results: _*)
   }
-  
+
   /**
    * Return an array that contains all of the elements in this RDD.
    */
@@ -471,7 +471,7 @@ abstract class RDD[T: ClassManifest](@transient sc: SparkContext) extends Serial
   }
 
   /**
-   * Save this RDD as a text file, using string representations of elements. 
+   * Save this RDD as a text file, using string representations of elements.
    */
   def saveAsTextFile(path: String) {
     this.map(x => (NullWritable.get(), new Text(x.toString)))
@@ -479,7 +479,7 @@ abstract class RDD[T: ClassManifest](@transient sc: SparkContext) extends Serial
   }
 
   /**
-   * Save this RDD as a SequenceFile of serialized objects. 
+   * Save this RDD as a SequenceFile of serialized objects.
    */
   def saveAsObjectFile(path: String) {
     this.mapPartitions(iter => iter.grouped(10).map(_.toArray))
diff --git a/core/src/main/scala/spark/SparkContext.scala b/core/src/main/scala/spark/SparkContext.scala
index 84fc541f82ef543f41d85cb8de91433f16bdd9a7..47e002201ba3d79986af6649f05e052ecc059745 100644
--- a/core/src/main/scala/spark/SparkContext.scala
+++ b/core/src/main/scala/spark/SparkContext.scala
@@ -54,15 +54,21 @@ import spark.storage.BlockManagerMaster
  * Main entry point for Spark functionality. A SparkContext represents the connection to a Spark
  * cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster.
  *
+ * @constructor Returns a new SparkContext.
  * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
  * @param jobName A name for your job, to display on the cluster web UI
- * @param sparkHome Location where Spark is instaled on cluster nodes
+ * @param sparkHome Location where Spark is installed on cluster nodes
  * @param jars Collection of JARs to send to the cluster. These can be paths on the local file
  *             system or HDFS, HTTP, HTTPS, or FTP URLs.
  */
 class SparkContext(master: String, jobName: String, val sparkHome: String, val jars: Seq[String])
   extends Logging {
 
+  /**
+   * @constructor Returns a new SparkContext.
+   * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
+   * @param jobName A name for your job, to display on the cluster web UI
+   */
   def this(master: String, jobName: String) = this(master, jobName, null, Nil)
 
   // Ensure logging is initialized before we spawn any threads