diff --git a/examples/src/main/java/org/apache/spark/examples/JavaHdfsLR.java b/examples/src/main/java/org/apache/spark/examples/JavaHdfsLR.java
index 31a79ddd3fff1d64d6f92c46cb269a5afad69d7c..f64155ce3c0cf3c847414e6089a0b0da57f851bf 100644
--- a/examples/src/main/java/org/apache/spark/examples/JavaHdfsLR.java
+++ b/examples/src/main/java/org/apache/spark/examples/JavaHdfsLR.java
@@ -32,8 +32,7 @@ import java.util.regex.Pattern;
  * Logistic regression based classification.
  *
  * This is an example implementation for learning how to use Spark. For more conventional use,
- * please refer to either org.apache.spark.mllib.classification.LogisticRegressionWithSGD or
- * org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS based on your needs.
+ * please refer to org.apache.spark.ml.classification.LogisticRegression.
  */
 public final class JavaHdfsLR {
 
@@ -43,8 +42,7 @@ public final class JavaHdfsLR {
   static void showWarning() {
     String warning = "WARN: This is a naive implementation of Logistic Regression " +
             "and is given as an example!\n" +
-            "Please use either org.apache.spark.mllib.classification.LogisticRegressionWithSGD " +
-            "or org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS " +
+            "Please use org.apache.spark.ml.classification.LogisticRegression " +
             "for more conventional use.";
     System.err.println(warning);
   }
diff --git a/examples/src/main/python/als.py b/examples/src/main/python/als.py
index 205ca02962bee4282261ecfb141406429b39bf73..f07020b50397b0426e20b1fac16d22c76584ffdb 100755
--- a/examples/src/main/python/als.py
+++ b/examples/src/main/python/als.py
@@ -17,7 +17,7 @@
 
 """
 This is an example implementation of ALS for learning how to use Spark. Please refer to
-ALS in pyspark.mllib.recommendation for more conventional use.
+pyspark.ml.recommendation.ALS for more conventional use.
 
 This example requires numpy (http://www.numpy.org/)
 """
@@ -59,7 +59,7 @@ if __name__ == "__main__":
     """
 
     print("""WARN: This is a naive implementation of ALS and is given as an
-      example. Please use the ALS method found in pyspark.mllib.recommendation for more
+      example. Please use pyspark.ml.recommendation.ALS for more
       conventional use.""", file=sys.stderr)
 
     sc = SparkContext(appName="PythonALS")
diff --git a/examples/src/main/python/kmeans.py b/examples/src/main/python/kmeans.py
index 0ea7cfb7025a012d42b9cf55299549806b73521c..3426e491dc74adc6ace71f815fd3a6915d6f7432 100755
--- a/examples/src/main/python/kmeans.py
+++ b/examples/src/main/python/kmeans.py
@@ -17,8 +17,8 @@
 
 """
 The K-means algorithm written from scratch against PySpark. In practice,
-one may prefer to use the KMeans algorithm in MLlib, as shown in
-examples/src/main/python/mllib/kmeans.py.
+one may prefer to use the KMeans algorithm in ML, as shown in
+examples/src/main/python/ml/kmeans_example.py.
 
 This example requires NumPy (http://www.numpy.org/).
 """
@@ -52,8 +52,8 @@ if __name__ == "__main__":
         exit(-1)
 
     print("""WARN: This is a naive implementation of KMeans Clustering and is given
-       as an example! Please refer to examples/src/main/python/mllib/kmeans.py for an example on
-       how to use MLlib's KMeans implementation.""", file=sys.stderr)
+       as an example! Please refer to examples/src/main/python/ml/kmeans_example.py for an
+       example on how to use ML's KMeans implementation.""", file=sys.stderr)
 
     sc = SparkContext(appName="PythonKMeans")
     lines = sc.textFile(sys.argv[1])
diff --git a/examples/src/main/python/logistic_regression.py b/examples/src/main/python/logistic_regression.py
index b318b7d87bfdc6e0af5190a5f792d279b324080f..7d33be7e81d73e73b1ac45594b2a553be9ba3edf 100755
--- a/examples/src/main/python/logistic_regression.py
+++ b/examples/src/main/python/logistic_regression.py
@@ -20,7 +20,7 @@ A logistic regression implementation that uses NumPy (http://www.numpy.org)
 to act on batches of input data using efficient matrix operations.
 
 In practice, one may prefer to use the LogisticRegression algorithm in
-MLlib, as shown in examples/src/main/python/mllib/logistic_regression.py.
+ML, as shown in examples/src/main/python/ml/logistic_regression_with_elastic_net.py.
 """
 from __future__ import print_function
 
@@ -51,8 +51,9 @@ if __name__ == "__main__":
         exit(-1)
 
     print("""WARN: This is a naive implementation of Logistic Regression and is
-      given as an example! Please refer to examples/src/main/python/mllib/logistic_regression.py
-      to see how MLlib's implementation is used.""", file=sys.stderr)
+      given as an example!
+      Please refer to examples/src/main/python/ml/logistic_regression_with_elastic_net.py
+      to see how ML's implementation is used.""", file=sys.stderr)
 
     sc = SparkContext(appName="PythonLR")
     points = sc.textFile(sys.argv[1]).mapPartitions(readPointBatch).cache()
diff --git a/examples/src/main/scala/org/apache/spark/examples/LocalALS.scala b/examples/src/main/scala/org/apache/spark/examples/LocalALS.scala
index fa1010195551ac1aa872c9573079b0ecd691c462..97aefac025e5556d7f23ccf6da019d33907ecce4 100644
--- a/examples/src/main/scala/org/apache/spark/examples/LocalALS.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/LocalALS.scala
@@ -24,7 +24,7 @@ import org.apache.commons.math3.linear._
  * Alternating least squares matrix factorization.
  *
  * This is an example implementation for learning how to use Spark. For more conventional use,
- * please refer to org.apache.spark.mllib.recommendation.ALS
+ * please refer to org.apache.spark.ml.recommendation.ALS.
  */
 object LocalALS {
 
@@ -96,7 +96,7 @@ object LocalALS {
   def showWarning() {
     System.err.println(
       """WARN: This is a naive implementation of ALS and is given as an example!
-        |Please use the ALS method found in org.apache.spark.mllib.recommendation
+        |Please use org.apache.spark.ml.recommendation.ALS
         |for more conventional use.
       """.stripMargin)
   }
diff --git a/examples/src/main/scala/org/apache/spark/examples/LocalFileLR.scala b/examples/src/main/scala/org/apache/spark/examples/LocalFileLR.scala
index bec89f7c3dff01e5ff87b519cc585b8e56a96bd1..3d02ce05619adc17027d353bb68cb54a4ad3928d 100644
--- a/examples/src/main/scala/org/apache/spark/examples/LocalFileLR.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/LocalFileLR.scala
@@ -26,8 +26,7 @@ import breeze.linalg.{DenseVector, Vector}
  * Logistic regression based classification.
  *
  * This is an example implementation for learning how to use Spark. For more conventional use,
- * please refer to either org.apache.spark.mllib.classification.LogisticRegressionWithSGD or
- * org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS based on your needs.
+ * please refer to org.apache.spark.ml.classification.LogisticRegression.
  */
 object LocalFileLR {
   val D = 10   // Number of dimensions
@@ -43,8 +42,7 @@ object LocalFileLR {
   def showWarning() {
     System.err.println(
       """WARN: This is a naive implementation of Logistic Regression and is given as an example!
-        |Please use either org.apache.spark.mllib.classification.LogisticRegressionWithSGD or
-        |org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS
+        |Please use org.apache.spark.ml.classification.LogisticRegression
         |for more conventional use.
       """.stripMargin)
   }
diff --git a/examples/src/main/scala/org/apache/spark/examples/LocalKMeans.scala b/examples/src/main/scala/org/apache/spark/examples/LocalKMeans.scala
index f8961847f3df20d307eda58041a1c763ebb28a35..fca585c2a362bc0e3d25eb5a2177c162ee28b43a 100644
--- a/examples/src/main/scala/org/apache/spark/examples/LocalKMeans.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/LocalKMeans.scala
@@ -29,7 +29,7 @@ import breeze.linalg.{squaredDistance, DenseVector, Vector}
  * K-means clustering.
  *
  * This is an example implementation for learning how to use Spark. For more conventional use,
- * please refer to org.apache.spark.mllib.clustering.KMeans
+ * please refer to org.apache.spark.ml.clustering.KMeans.
  */
 object LocalKMeans {
   val N = 1000
@@ -66,7 +66,7 @@ object LocalKMeans {
   def showWarning() {
     System.err.println(
       """WARN: This is a naive implementation of KMeans Clustering and is given as an example!
-        |Please use the KMeans method found in org.apache.spark.mllib.clustering
+        |Please use org.apache.spark.ml.clustering.KMeans
         |for more conventional use.
       """.stripMargin)
   }
diff --git a/examples/src/main/scala/org/apache/spark/examples/LocalLR.scala b/examples/src/main/scala/org/apache/spark/examples/LocalLR.scala
index 0baf6db607ad9db5b7ef4ab214b5c11f767af95d..13ccc2ae7c3d88b392d11cb87cf29969fb353fb9 100644
--- a/examples/src/main/scala/org/apache/spark/examples/LocalLR.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/LocalLR.scala
@@ -26,8 +26,7 @@ import breeze.linalg.{DenseVector, Vector}
  * Logistic regression based classification.
  *
  * This is an example implementation for learning how to use Spark. For more conventional use,
- * please refer to either org.apache.spark.mllib.classification.LogisticRegressionWithSGD or
- * org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS based on your needs.
+ * please refer to org.apache.spark.ml.classification.LogisticRegression.
  */
 object LocalLR {
   val N = 10000  // Number of data points
@@ -50,8 +49,7 @@ object LocalLR {
   def showWarning() {
     System.err.println(
       """WARN: This is a naive implementation of Logistic Regression and is given as an example!
-        |Please use either org.apache.spark.mllib.classification.LogisticRegressionWithSGD or
-        |org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS
+        |Please use org.apache.spark.ml.classification.LogisticRegression
         |for more conventional use.
       """.stripMargin)
   }
diff --git a/examples/src/main/scala/org/apache/spark/examples/SparkALS.scala b/examples/src/main/scala/org/apache/spark/examples/SparkALS.scala
index 4263680c6fde3808fc6781015411668f821feb25..b06c629802006505a16f8387376315937d3378c6 100644
--- a/examples/src/main/scala/org/apache/spark/examples/SparkALS.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/SparkALS.scala
@@ -26,7 +26,7 @@ import org.apache.spark._
  * Alternating least squares matrix factorization.
  *
  * This is an example implementation for learning how to use Spark. For more conventional use,
- * please refer to org.apache.spark.mllib.recommendation.ALS
+ * please refer to org.apache.spark.ml.recommendation.ALS.
  */
 object SparkALS {
 
@@ -81,7 +81,7 @@ object SparkALS {
   def showWarning() {
     System.err.println(
       """WARN: This is a naive implementation of ALS and is given as an example!
-        |Please use the ALS method found in org.apache.spark.mllib.recommendation
+        |Please use org.apache.spark.ml.recommendation.ALS
         |for more conventional use.
       """.stripMargin)
   }
diff --git a/examples/src/main/scala/org/apache/spark/examples/SparkHdfsLR.scala b/examples/src/main/scala/org/apache/spark/examples/SparkHdfsLR.scala
index 7463b868ff19b0e759a41e09d1fc7171cce0d0bf..c514eb0fa51ae3949e0639fe2909a442688a1180 100644
--- a/examples/src/main/scala/org/apache/spark/examples/SparkHdfsLR.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/SparkHdfsLR.scala
@@ -31,8 +31,7 @@ import org.apache.spark._
  * Logistic regression based classification.
  *
  * This is an example implementation for learning how to use Spark. For more conventional use,
- * please refer to either org.apache.spark.mllib.classification.LogisticRegressionWithSGD or
- * org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS based on your needs.
+ * please refer to org.apache.spark.ml.classification.LogisticRegression.
  */
 object SparkHdfsLR {
   val D = 10   // Number of dimensions
@@ -54,8 +53,7 @@ object SparkHdfsLR {
   def showWarning() {
     System.err.println(
       """WARN: This is a naive implementation of Logistic Regression and is given as an example!
-        |Please use either org.apache.spark.mllib.classification.LogisticRegressionWithSGD or
-        |org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS
+        |Please use org.apache.spark.ml.classification.LogisticRegression
         |for more conventional use.
       """.stripMargin)
   }
diff --git a/examples/src/main/scala/org/apache/spark/examples/SparkKMeans.scala b/examples/src/main/scala/org/apache/spark/examples/SparkKMeans.scala
index d9f94a42b1a0be6e80534d36848ea51b15d49a2a..676164806e19648ba9a1c15bbc5989aeefecc6e1 100644
--- a/examples/src/main/scala/org/apache/spark/examples/SparkKMeans.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/SparkKMeans.scala
@@ -26,7 +26,7 @@ import org.apache.spark.{SparkConf, SparkContext}
  * K-means clustering.
  *
  * This is an example implementation for learning how to use Spark. For more conventional use,
- * please refer to org.apache.spark.mllib.clustering.KMeans
+ * please refer to org.apache.spark.ml.clustering.KMeans.
  */
 object SparkKMeans {
 
@@ -52,7 +52,7 @@ object SparkKMeans {
   def showWarning() {
     System.err.println(
       """WARN: This is a naive implementation of KMeans Clustering and is given as an example!
-        |Please use the KMeans method found in org.apache.spark.mllib.clustering
+        |Please use org.apache.spark.ml.clustering.KMeans
         |for more conventional use.
       """.stripMargin)
   }
diff --git a/examples/src/main/scala/org/apache/spark/examples/SparkLR.scala b/examples/src/main/scala/org/apache/spark/examples/SparkLR.scala
index acd8656b65a69e4b9a16c0aee1d66272e09ab9a9..718f84f6450e058f6ec6428f0387566bdea59d33 100644
--- a/examples/src/main/scala/org/apache/spark/examples/SparkLR.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/SparkLR.scala
@@ -31,8 +31,7 @@ import org.apache.spark._
  * Usage: SparkLR [slices]
  *
  * This is an example implementation for learning how to use Spark. For more conventional use,
- * please refer to either org.apache.spark.mllib.classification.LogisticRegressionWithSGD or
- * org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS based on your needs.
+ * please refer to org.apache.spark.ml.classification.LogisticRegression.
  */
 object SparkLR {
   val N = 10000  // Number of data points
@@ -55,8 +54,7 @@ object SparkLR {
   def showWarning() {
     System.err.println(
       """WARN: This is a naive implementation of Logistic Regression and is given as an example!
-        |Please use either org.apache.spark.mllib.classification.LogisticRegressionWithSGD or
-        |org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS
+        |Please use org.apache.spark.ml.classification.LogisticRegression
         |for more conventional use.
       """.stripMargin)
   }