diff --git a/core/src/test/scala/org/apache/spark/AccumulatorSuite.scala b/core/src/test/scala/org/apache/spark/AccumulatorSuite.scala
index 6cbd5ae5d428af01181e87c6c1061320a4bccd98..6d03ee091e4ed1467bd3d67ccda792c50176c4b8 100644
--- a/core/src/test/scala/org/apache/spark/AccumulatorSuite.scala
+++ b/core/src/test/scala/org/apache/spark/AccumulatorSuite.scala
@@ -100,7 +100,9 @@ class AccumulatorSuite extends SparkFunSuite with Matchers with LocalSparkContex
     val acc: Accumulator[Int] = sc.accumulator(0)
 
     val d = sc.parallelize(1 to 20)
-    an [Exception] should be thrownBy {d.foreach{x => acc.value = x}}
+    intercept[SparkException] {
+      d.foreach(x => acc.value = x)
+    }
   }
 
   test ("add value to collection accumulators") {
@@ -171,7 +173,7 @@ class AccumulatorSuite extends SparkFunSuite with Matchers with LocalSparkContex
       d.foreach {
         x => acc.localValue ++= x
       }
-      acc.value should be ( (0 to maxI).toSet)
+      acc.value should be ((0 to maxI).toSet)
       resetSparkContext()
     }
   }
diff --git a/mllib/src/main/scala/org/apache/spark/ml/feature/Interaction.scala b/mllib/src/main/scala/org/apache/spark/ml/feature/Interaction.scala
index 96d0bdee9e2b969714d0f3c8dc5e41ca559cd0f2..902f84f862c176eda47199b4552b5d9899d4facb 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/feature/Interaction.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/feature/Interaction.scala
@@ -136,7 +136,7 @@ class Interaction @Since("1.6.0") (@Since("1.6.0") override val uid: String) ext
         case _: VectorUDT =>
           val attrs = AttributeGroup.fromStructField(f).attributes.getOrElse(
             throw new SparkException("Vector attributes must be defined for interaction."))
-          attrs.map(getNumFeatures).toArray
+          attrs.map(getNumFeatures)
       }
       new FeatureEncoder(numFeatures)
     }.toArray
diff --git a/mllib/src/main/scala/org/apache/spark/ml/r/IsotonicRegressionWrapper.scala b/mllib/src/main/scala/org/apache/spark/ml/r/IsotonicRegressionWrapper.scala
index 1ea80cb46ab7b3e42b99016de605205cf43c0016..a7992debe684a580658f08fc2841e092e64a0712 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/r/IsotonicRegressionWrapper.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/r/IsotonicRegressionWrapper.scala
@@ -23,7 +23,7 @@ import org.json4s.JsonDSL._
 import org.json4s.jackson.JsonMethods._
 
 import org.apache.spark.ml.{Pipeline, PipelineModel}
-import org.apache.spark.ml.attribute.{AttributeGroup}
+import org.apache.spark.ml.attribute.AttributeGroup
 import org.apache.spark.ml.feature.RFormula
 import org.apache.spark.ml.regression.{IsotonicRegression, IsotonicRegressionModel}
 import org.apache.spark.ml.util._
diff --git a/mllib/src/main/scala/org/apache/spark/ml/util/stopwatches.scala b/mllib/src/main/scala/org/apache/spark/ml/util/stopwatches.scala
index e79b1f31643d0e4014256411c003aeb979a89e28..e539deca4b0367672344d5fa5e5442e905da5f19 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/util/stopwatches.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/util/stopwatches.scala
@@ -20,7 +20,7 @@ package org.apache.spark.ml.util
 import scala.collection.mutable
 
 import org.apache.spark.SparkContext
-import org.apache.spark.util.LongAccumulator;
+import org.apache.spark.util.LongAccumulator
 
 /**
  * Abstract class for stopwatches.
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/feature/ChiSqSelector.scala b/mllib/src/main/scala/org/apache/spark/mllib/feature/ChiSqSelector.scala
index 56fb2d33c2ca0416ea739791d9a0759cac09e0bd..33a1f18bccca597ea8a1ff5dc74b649da4eff38a 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/feature/ChiSqSelector.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/feature/ChiSqSelector.scala
@@ -164,7 +164,7 @@ object ChiSqSelectorModel extends Loader[ChiSqSelectorModel] {
         case Row(feature: Int) => (feature)
       }.collect()
 
-      return new ChiSqSelectorModel(features)
+      new ChiSqSelectorModel(features)
     }
   }
 }
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/random/RandomRDDs.scala b/mllib/src/main/scala/org/apache/spark/mllib/random/RandomRDDs.scala
index c2bc1f17ccd58ce5749653dc648aa182bbe1ea6d..6d60136ddc38f103c47f900f1333d9ab32a59713 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/random/RandomRDDs.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/random/RandomRDDs.scala
@@ -438,10 +438,10 @@ object RandomRDDs {
   @DeveloperApi
   @Since("1.6.0")
   def randomJavaRDD[T](
-    jsc: JavaSparkContext,
-    generator: RandomDataGenerator[T],
-    size: Long): JavaRDD[T] = {
-    randomJavaRDD(jsc, generator, size, 0);
+      jsc: JavaSparkContext,
+      generator: RandomDataGenerator[T],
+      size: Long): JavaRDD[T] = {
+    randomJavaRDD(jsc, generator, size, 0)
   }
 
   // TODO Generate RDD[Vector] from multivariate distributions.
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
index c060091c7fc3847314eaaa10ea93024be5ccf03b..93bf74d06b71d9e0ddc4763ebe0cdbe0973375a7 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
@@ -26,7 +26,7 @@ import org.apache.spark.api.java.JavaRDD
 import org.apache.spark.internal.Logging
 import org.apache.spark.rdd.RDD
 import org.apache.spark.sql.execution.LogicalRDD
-import org.apache.spark.sql.execution.datasources.{DataSource, LogicalRelation}
+import org.apache.spark.sql.execution.datasources.DataSource
 import org.apache.spark.sql.execution.datasources.jdbc.{JDBCPartition, JDBCPartitioningInfo, JDBCRelation}
 import org.apache.spark.sql.execution.datasources.json.{InferSchema, JacksonParser, JSONOptions}
 import org.apache.spark.sql.types.StructType
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
index 6da99ce0dd68335e980c8871d9aa96008c59f172..e7dcf0f51f4a5da9143fa2382627098b4101183b 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
@@ -43,7 +43,7 @@ import org.apache.spark.sql.catalyst.plans.logical._
 import org.apache.spark.sql.catalyst.util.usePrettyExpression
 import org.apache.spark.sql.execution.{FileRelation, LogicalRDD, QueryExecution, SQLExecution}
 import org.apache.spark.sql.execution.command.{CreateViewCommand, ExplainCommand}
-import org.apache.spark.sql.execution.datasources.{CreateTable, LogicalRelation}
+import org.apache.spark.sql.execution.datasources.LogicalRelation
 import org.apache.spark.sql.execution.datasources.json.JacksonGenerator
 import org.apache.spark.sql.execution.python.EvaluatePython
 import org.apache.spark.sql.streaming.{DataStreamWriter, StreamingQuery}
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
index fbf22197a1a11fefc24935c62e0efb03770b2805..2edf2e19720530c4b5b590d65e9b117630e55d02 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
@@ -1093,7 +1093,7 @@ object SQLContext {
     }
     data.map{ element =>
       new GenericInternalRow(
-        methodsToConverts.map { case (e, convert) => convert(e.invoke(element)) }.toArray[Any]
+        methodsToConverts.map { case (e, convert) => convert(e.invoke(element)) }
       ): InternalRow
     }
   }
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala
index b783d699745b10d3dc3a66c35fbf2840251a9110..348530888de3acb849310dda1f7fb60db8f23ed7 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala
@@ -61,7 +61,7 @@ import org.apache.spark.util.Utils
  *              qualified. This option only works when reading from a [[FileFormat]].
  * @param userSpecifiedSchema An optional specification of the schema of the data. When present
  *                            we skip attempting to infer the schema.
- * @param partitionColumns A list of column names that the relation is partitioned by.  When this
+ * @param partitionColumns A list of column names that the relation is partitioned by. When this
  *                         list is empty, the relation is unpartitioned.
  * @param bucketSpec An optional specification for bucketing (hash-partitioning) of the data.
  */