diff --git a/core/src/main/scala/org/apache/spark/ui/JettyUtils.scala b/core/src/main/scala/org/apache/spark/ui/JettyUtils.scala
index d9fecc5e3011e448aaf0fcc9688f4845e1f6cc74..c3c59f857dc433f7ab7daf79b828697ac3213ce4 100644
--- a/core/src/main/scala/org/apache/spark/ui/JettyUtils.scala
+++ b/core/src/main/scala/org/apache/spark/ui/JettyUtils.scala
@@ -25,7 +25,7 @@ import scala.collection.mutable.ArrayBuffer
 import scala.language.implicitConversions
 import scala.xml.Node
 
-import org.eclipse.jetty.server.{AbstractConnector, Connector, Request, Server}
+import org.eclipse.jetty.server.{Connector, Request, Server}
 import org.eclipse.jetty.server.handler._
 import org.eclipse.jetty.server.nio.SelectChannelConnector
 import org.eclipse.jetty.server.ssl.SslSelectChannelConnector
diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaCorrelationsExample.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaCorrelationsExample.java
index fd19b43504ac19ffbad277286b86d51f534e56c6..c0fa0b3cac1e971f9c1d2af90514200c444777e5 100644
--- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaCorrelationsExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaCorrelationsExample.java
@@ -58,7 +58,8 @@ public class JavaCorrelationsExample {
       )
     );
 
-    // calculate the correlation matrix using Pearson's method. Use "spearman" for Spearman's method.
+    // calculate the correlation matrix using Pearson's method.
+    // Use "spearman" for Spearman's method.
     // If a method is not specified, Pearson's method will be used by default.
     Matrix correlMatrix = Statistics.corr(data.rdd(), "pearson");
     System.out.println(correlMatrix.toString());
diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaStratifiedSamplingExample.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaStratifiedSamplingExample.java
index f5a451019bd21daf2febed947bd6108fee5bf094..c27fba278347b8ffd8dd649501e738e1ab798463 100644
--- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaStratifiedSamplingExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaStratifiedSamplingExample.java
@@ -27,7 +27,6 @@ import java.util.*;
 import scala.Tuple2;
 
 import org.apache.spark.api.java.JavaPairRDD;
-import org.apache.spark.api.java.function.VoidFunction;
 // $example off$
 
 public class JavaStratifiedSamplingExample {
diff --git a/graphx/src/main/scala/org/apache/spark/graphx/impl/GraphImpl.scala b/graphx/src/main/scala/org/apache/spark/graphx/impl/GraphImpl.scala
index 7903caa3129da5bab7cb536791ba76752927134c..e18831382d4d566ff5f2ec2d07ff7d22309c7fd9 100644
--- a/graphx/src/main/scala/org/apache/spark/graphx/impl/GraphImpl.scala
+++ b/graphx/src/main/scala/org/apache/spark/graphx/impl/GraphImpl.scala
@@ -20,11 +20,9 @@ package org.apache.spark.graphx.impl
 import scala.reflect.{classTag, ClassTag}
 
 import org.apache.spark.HashPartitioner
-import org.apache.spark.SparkContext._
 import org.apache.spark.graphx._
-import org.apache.spark.graphx.impl.GraphImpl._
 import org.apache.spark.graphx.util.BytecodeUtils
-import org.apache.spark.rdd.{RDD, ShuffledRDD}
+import org.apache.spark.rdd.RDD
 import org.apache.spark.storage.StorageLevel
 
 /**
diff --git a/sql/catalyst/src/main/java/org/apache/spark/sql/catalyst/expressions/XXH64.java b/sql/catalyst/src/main/java/org/apache/spark/sql/catalyst/expressions/XXH64.java
index 5f2de266b538f0ea4feb1746561307c4936829de..f37ef83ad92b44c7d1b86ac8b4aad4b4da917765 100644
--- a/sql/catalyst/src/main/java/org/apache/spark/sql/catalyst/expressions/XXH64.java
+++ b/sql/catalyst/src/main/java/org/apache/spark/sql/catalyst/expressions/XXH64.java
@@ -17,10 +17,8 @@
 package org.apache.spark.sql.catalyst.expressions;
 
 import org.apache.spark.unsafe.Platform;
-import org.apache.spark.util.SystemClock;
 
 // scalastyle: off
-
 /**
  * xxHash64. A high quality and fast 64 bit hash code by Yann Colet and Mathias Westerdahl. The
  * class below is modelled like its Murmur3_x86_32 cousin.
diff --git a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedParquetRecordReader.java b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedParquetRecordReader.java
index c06342c3d40b72f3bfedb3c975a6abd4e7f7656c..5bfde55c3ba70e12e05d358fa9c5a00b402e83d7 100644
--- a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedParquetRecordReader.java
+++ b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedParquetRecordReader.java
@@ -24,8 +24,6 @@ import org.apache.hadoop.mapreduce.InputSplit;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.parquet.column.ColumnDescriptor;
 import org.apache.parquet.column.page.PageReadStore;
-import org.apache.parquet.schema.OriginalType;
-import org.apache.parquet.schema.PrimitiveType;
 import org.apache.parquet.schema.Type;
 
 import org.apache.spark.memory.MemoryMode;
diff --git a/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java b/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java
index 86db8df4c00fdc303b50e0f00c433a93e1dbcffd..a6c819373bfaca8af1be86bafc7b62afa4ba09be 100644
--- a/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java
+++ b/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java
@@ -169,12 +169,14 @@ public class JavaDatasetSuite implements Serializable {
   public void testGroupBy() {
     List<String> data = Arrays.asList("a", "foo", "bar");
     Dataset<String> ds = context.createDataset(data, Encoders.STRING());
-    KeyValueGroupedDataset<Integer, String> grouped = ds.groupByKey(new MapFunction<String, Integer>() {
-      @Override
-      public Integer call(String v) throws Exception {
-        return v.length();
-      }
-    }, Encoders.INT());
+    KeyValueGroupedDataset<Integer, String> grouped = ds.groupByKey(
+      new MapFunction<String, Integer>() {
+        @Override
+        public Integer call(String v) throws Exception {
+          return v.length();
+        }
+      },
+      Encoders.INT());
 
     Dataset<String> mapped = grouped.mapGroups(new MapGroupsFunction<Integer, String, String>() {
       @Override
@@ -217,12 +219,14 @@ public class JavaDatasetSuite implements Serializable {
 
     List<Integer> data2 = Arrays.asList(2, 6, 10);
     Dataset<Integer> ds2 = context.createDataset(data2, Encoders.INT());
-    KeyValueGroupedDataset<Integer, Integer> grouped2 = ds2.groupByKey(new MapFunction<Integer, Integer>() {
-      @Override
-      public Integer call(Integer v) throws Exception {
-        return v / 2;
-      }
-    }, Encoders.INT());
+    KeyValueGroupedDataset<Integer, Integer> grouped2 = ds2.groupByKey(
+      new MapFunction<Integer, Integer>() {
+        @Override
+        public Integer call(Integer v) throws Exception {
+          return v / 2;
+        }
+      },
+      Encoders.INT());
 
     Dataset<String> cogrouped = grouped.cogroup(
       grouped2,