diff --git a/mllib/src/main/scala/spark/mllib/regression/LogisticRegression.scala b/mllib/src/main/scala/spark/mllib/regression/LogisticRegression.scala
index 448ab9dce9017c298c78f014e2c3475563407e5d..e4db7bb9b736fad3287436cecce1403299659e3e 100644
--- a/mllib/src/main/scala/spark/mllib/regression/LogisticRegression.scala
+++ b/mllib/src/main/scala/spark/mllib/regression/LogisticRegression.scala
@@ -150,7 +150,7 @@ object LogisticRegression {
       System.exit(1)
     }
     val sc = new SparkContext(args(0), "LogisticRegression")
-    val data = MLUtils.loadData(sc, args(1))
+    val data = MLUtils.loadLabeledData(sc, args(1))
     val model = LogisticRegression.train(data, args(3).toInt, args(2).toDouble)
 
     sc.stop()
diff --git a/mllib/src/main/scala/spark/mllib/regression/LogisticRegressionGenerator.scala b/mllib/src/main/scala/spark/mllib/regression/LogisticRegressionGenerator.scala
index 9f6abab70b99e8cea51c865f282bed5adf48109c..6e7c023bac79bcaeddef7b7c06afe72901605065 100644
--- a/mllib/src/main/scala/spark/mllib/regression/LogisticRegressionGenerator.scala
+++ b/mllib/src/main/scala/spark/mllib/regression/LogisticRegressionGenerator.scala
@@ -35,7 +35,7 @@ object LogisticRegressionGenerator {
       (y, x)
     }
 
-    MLUtils.saveData(data, outputPath)
+    MLUtils.saveLabeledData(data, outputPath)
     sc.stop()
   }
 }
diff --git a/mllib/src/main/scala/spark/mllib/regression/RidgeRegression.scala b/mllib/src/main/scala/spark/mllib/regression/RidgeRegression.scala
index f66025bc0bb97f521a79009df4fba9f0734b9ab1..5f813df402afa4f4a8fe9ec265d52382c0136f7c 100644
--- a/mllib/src/main/scala/spark/mllib/regression/RidgeRegression.scala
+++ b/mllib/src/main/scala/spark/mllib/regression/RidgeRegression.scala
@@ -187,7 +187,7 @@ object RidgeRegression {
       System.exit(1)
     }
     val sc = new SparkContext(args(0), "RidgeRegression")
-    val data = MLUtils.loadData(sc, args(1))
+    val data = MLUtils.loadLabeledData(sc, args(1))
     val model = RidgeRegression.train(data, 0, 1000)
     sc.stop()
   }
diff --git a/mllib/src/main/scala/spark/mllib/regression/RidgeRegressionGenerator.scala b/mllib/src/main/scala/spark/mllib/regression/RidgeRegressionGenerator.scala
index c9ac4a8b07c733881aefc1a2a0d2b023d05e0fcb..b83f505d8e41a5896b64566f1a7f50c2f448f574 100644
--- a/mllib/src/main/scala/spark/mllib/regression/RidgeRegressionGenerator.scala
+++ b/mllib/src/main/scala/spark/mllib/regression/RidgeRegressionGenerator.scala
@@ -49,7 +49,7 @@ object RidgeRegressionGenerator {
       }
     }
 
-    MLUtils.saveData(data, outputPath)
+    MLUtils.saveLabeledData(data, outputPath)
     sc.stop()
   }
 }
diff --git a/mllib/src/main/scala/spark/mllib/util/MLUtils.scala b/mllib/src/main/scala/spark/mllib/util/MLUtils.scala
index 0a4a037c7139aa6bc03e398e4b2e562a687d5d42..08a031dded1783ff260f0c78aeafc7da96df8072 100644
--- a/mllib/src/main/scala/spark/mllib/util/MLUtils.scala
+++ b/mllib/src/main/scala/spark/mllib/util/MLUtils.scala
@@ -19,7 +19,7 @@ object MLUtils {
    * @return An RDD of tuples. For each tuple, the first element is the label, and the second
    *         element represents the feature values (an array of Double).
    */
-  def loadData(sc: SparkContext, dir: String): RDD[(Double, Array[Double])] = {
+  def loadLabeledData(sc: SparkContext, dir: String): RDD[(Double, Array[Double])] = {
     sc.textFile(dir).map { line =>
       val parts = line.split(",")
       val label = parts(0).toDouble
@@ -28,7 +28,7 @@ object MLUtils {
     }
   }
 
-  def saveData(data: RDD[(Double, Array[Double])], dir: String) {
+  def saveLabeledData(data: RDD[(Double, Array[Double])], dir: String) {
     val dataStr = data.map(x => x._1 + "," + x._2.mkString(" "))
     dataStr.saveAsTextFile(dir)
   }