diff --git a/examples/src/main/python/ml/elementwise_product_example.py b/examples/src/main/python/ml/elementwise_product_example.py
index 598deae886ee138cb3ddac45e79b37e9bf319329..590053998bccc172d7cd7db037a09ba400b2716f 100644
--- a/examples/src/main/python/ml/elementwise_product_example.py
+++ b/examples/src/main/python/ml/elementwise_product_example.py
@@ -30,10 +30,12 @@ if __name__ == "__main__":
         .getOrCreate()
 
     # $example on$
+    # Create some vector data; also works for sparse vectors
     data = [(Vectors.dense([1.0, 2.0, 3.0]),), (Vectors.dense([4.0, 5.0, 6.0]),)]
     df = spark.createDataFrame(data, ["vector"])
     transformer = ElementwiseProduct(scalingVec=Vectors.dense([0.0, 1.0, 2.0]),
                                      inputCol="vector", outputCol="transformedVector")
+    # Batch transform the vectors to create new column:
     transformer.transform(df).show()
     # $example off$
 
diff --git a/examples/src/main/python/ml/polynomial_expansion_example.py b/examples/src/main/python/ml/polynomial_expansion_example.py
index 9475e33218cfd4e8e1a73791a024c5d44b11a535..b46c1ba2f4391d19f7b908b56ac64f2f20ca5dc1 100644
--- a/examples/src/main/python/ml/polynomial_expansion_example.py
+++ b/examples/src/main/python/ml/polynomial_expansion_example.py
@@ -35,7 +35,7 @@ if __name__ == "__main__":
                           (Vectors.dense([0.0, 0.0]),),
                           (Vectors.dense([0.6, -1.1]),)],
                          ["features"])
-    px = PolynomialExpansion(degree=2, inputCol="features", outputCol="polyFeatures")
+    px = PolynomialExpansion(degree=3, inputCol="features", outputCol="polyFeatures")
     polyDF = px.transform(df)
     for expanded in polyDF.select("polyFeatures").take(3):
         print(expanded)
diff --git a/examples/src/main/python/ml/quantile_discretizer_example.py b/examples/src/main/python/ml/quantile_discretizer_example.py
index 5444cacd957f307cb54752321081d9d6e22b5034..6f422f840ad282849e2d7970bafc77bdd6f14bd8 100644
--- a/examples/src/main/python/ml/quantile_discretizer_example.py
+++ b/examples/src/main/python/ml/quantile_discretizer_example.py
@@ -24,7 +24,7 @@ from pyspark.sql import SparkSession
 
 
 if __name__ == "__main__":
-    spark = SparkSession.builder.appName("PythonQuantileDiscretizerExample").getOrCreate()
+    spark = SparkSession.builder.appName("QuantileDiscretizerExample").getOrCreate()
 
     # $example on$
     data = [(0, 18.0,), (1, 19.0,), (2, 8.0,), (3, 5.0,), (4, 2.2,)]
diff --git a/examples/src/main/python/ml/random_forest_classifier_example.py b/examples/src/main/python/ml/random_forest_classifier_example.py
index a7fc765318b991687dc0105c21adc219a448bfb5..eb9ded9af555ed82ca5b19269f8fa2c4db1f8a6b 100644
--- a/examples/src/main/python/ml/random_forest_classifier_example.py
+++ b/examples/src/main/python/ml/random_forest_classifier_example.py
@@ -50,7 +50,7 @@ if __name__ == "__main__":
     (trainingData, testData) = data.randomSplit([0.7, 0.3])
 
     # Train a RandomForest model.
-    rf = RandomForestClassifier(labelCol="indexedLabel", featuresCol="indexedFeatures")
+    rf = RandomForestClassifier(labelCol="indexedLabel", featuresCol="indexedFeatures", numTrees=10)
 
     # Chain indexers and forest in a Pipeline
     pipeline = Pipeline(stages=[labelIndexer, featureIndexer, rf])
diff --git a/examples/src/main/python/ml/simple_text_classification_pipeline.py b/examples/src/main/python/ml/simple_text_classification_pipeline.py
index 886f43c0b08e8e8089f62fefa149146d42deebae..b528b59be9621238b5fa57a50700222f95dd6c51 100644
--- a/examples/src/main/python/ml/simple_text_classification_pipeline.py
+++ b/examples/src/main/python/ml/simple_text_classification_pipeline.py
@@ -48,7 +48,7 @@ if __name__ == "__main__":
 
     # Configure an ML pipeline, which consists of tree stages: tokenizer, hashingTF, and lr.
     tokenizer = Tokenizer(inputCol="text", outputCol="words")
-    hashingTF = HashingTF(inputCol=tokenizer.getOutputCol(), outputCol="features")
+    hashingTF = HashingTF(numFeatures=1000, inputCol=tokenizer.getOutputCol(), outputCol="features")
     lr = LogisticRegression(maxIter=10, regParam=0.001)
     pipeline = Pipeline(stages=[tokenizer, hashingTF, lr])
 
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/DataFrameExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/DataFrameExample.scala
index 11faa6192b3fc26da36987a22a24563b70b36c9a..38c1c1c1865b096c2a2860859ff92198e9d442f9 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/DataFrameExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/DataFrameExample.scala
@@ -20,7 +20,6 @@ package org.apache.spark.examples.ml
 
 import java.io.File
 
-import com.google.common.io.Files
 import scopt.OptionParser
 
 import org.apache.spark.examples.mllib.AbstractParams
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/GaussianMixtureExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/GaussianMixtureExample.scala
index c484ee55569b99a4fdc5fd80709f3a17fae5c54c..2c2bf421bc5d3d492c86934aa52686ba474c1310 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/GaussianMixtureExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/GaussianMixtureExample.scala
@@ -21,8 +21,8 @@ package org.apache.spark.examples.ml
 
 // $example on$
 import org.apache.spark.ml.clustering.GaussianMixture
-import org.apache.spark.sql.SparkSession
 // $example off$
+import org.apache.spark.sql.SparkSession
 
 /**
  * An example demonstrating Gaussian Mixture Model (GMM).
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/NaiveBayesExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/NaiveBayesExample.scala
index a59ba182fc2083398609eb77fba70452b93b0d21..7089a4bc87aaa0788da33533ad2549da40193ad7 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/NaiveBayesExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/NaiveBayesExample.scala
@@ -35,7 +35,7 @@ object NaiveBayesExample {
     val data = spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
 
     // Split the data into training and test sets (30% held out for testing)
-    val Array(trainingData, testData) = data.randomSplit(Array(0.7, 0.3))
+    val Array(trainingData, testData) = data.randomSplit(Array(0.7, 0.3), seed = 1234L)
 
     // Train a NaiveBayes model.
     val model = new NaiveBayes()