Skip to content
Snippets Groups Projects
Commit d34d6503 authored by Dongjoon Hyun's avatar Dongjoon Hyun Committed by Reynold Xin
Browse files

[SPARK-14868][BUILD] Enable NewLineAtEofChecker in checkstyle and fix lint-java errors

## What changes were proposed in this pull request?

Spark uses `NewLineAtEofChecker` rule in Scala by ScalaStyle. And, most Java code also comply with the rule. This PR aims to enforce the same rule `NewlineAtEndOfFile` by CheckStyle explicitly. Also, this fixes lint-java errors since SPARK-14465. The followings are the items.

- Adds a new line at the end of the files (19 files)
- Fixes 25 lint-java errors (12 RedundantModifier, 6 **ArrayTypeStyle**, 2 LineLength, 2 UnusedImports, 2 RegexpSingleline, 1 ModifierOrder)

## How was this patch tested?

After the Jenkins test succeeds, `dev/lint-java` should pass. (Currently, Jenkins dose not run lint-java.)
```bash
$ dev/lint-java
Using `mvn` from path: /usr/local/bin/mvn
Checkstyle checks passed.
```

Author: Dongjoon Hyun <dongjoon@apache.org>

Closes #12632 from dongjoon-hyun/SPARK-14868.
parent d0ca5797
No related branches found
No related tags found
No related merge requests found
Showing
with 36 additions and 35 deletions
......@@ -20,4 +20,4 @@
* these interfaces to pass functions to various Java API methods for Spark. Please visit Spark's
* Java programming guide for more details.
*/
package org.apache.spark.api.java.function;
\ No newline at end of file
package org.apache.spark.api.java.function;
......@@ -256,8 +256,8 @@ final class ShuffleExternalSorter extends MemoryConsumer {
final long spillSize = freeMemory();
inMemSorter.reset();
// Reset the in-memory sorter's pointer array only after freeing up the memory pages holding the
// records. Otherwise, if the task is over allocated memory, then without freeing the memory pages,
// we might not be able to get memory for the pointer array.
// records. Otherwise, if the task is over allocated memory, then without freeing the memory
// pages, we might not be able to get memory for the pointer array.
taskContext.taskMetrics().incMemoryBytesSpilled(spillSize);
return spillSize;
}
......
......@@ -22,7 +22,6 @@ import com.google.common.primitives.UnsignedLongs;
import org.apache.spark.annotation.Private;
import org.apache.spark.unsafe.types.ByteArray;
import org.apache.spark.unsafe.types.UTF8String;
import org.apache.spark.util.Utils;
@Private
public class PrefixComparators {
......@@ -69,7 +68,7 @@ public class PrefixComparators {
* Provides radix sort parameters. Comparators implementing this also are indicating that the
* ordering they define is compatible with radix sort.
*/
public static abstract class RadixSortSupport extends PrefixComparator {
public abstract static class RadixSortSupport extends PrefixComparator {
/** @return Whether the sort should be descending in binary sort order. */
public abstract boolean sortDescending();
......@@ -82,37 +81,37 @@ public class PrefixComparators {
//
public static final class UnsignedPrefixComparator extends RadixSortSupport {
@Override public final boolean sortDescending() { return false; }
@Override public final boolean sortSigned() { return false; }
@Override public boolean sortDescending() { return false; }
@Override public boolean sortSigned() { return false; }
@Override
public final int compare(long aPrefix, long bPrefix) {
public int compare(long aPrefix, long bPrefix) {
return UnsignedLongs.compare(aPrefix, bPrefix);
}
}
public static final class UnsignedPrefixComparatorDesc extends RadixSortSupport {
@Override public final boolean sortDescending() { return true; }
@Override public final boolean sortSigned() { return false; }
@Override public boolean sortDescending() { return true; }
@Override public boolean sortSigned() { return false; }
@Override
public final int compare(long bPrefix, long aPrefix) {
public int compare(long bPrefix, long aPrefix) {
return UnsignedLongs.compare(aPrefix, bPrefix);
}
}
public static final class SignedPrefixComparator extends RadixSortSupport {
@Override public final boolean sortDescending() { return false; }
@Override public final boolean sortSigned() { return true; }
@Override public boolean sortDescending() { return false; }
@Override public boolean sortSigned() { return true; }
@Override
public final int compare(long a, long b) {
public int compare(long a, long b) {
return (a < b) ? -1 : (a > b) ? 1 : 0;
}
}
public static final class SignedPrefixComparatorDesc extends RadixSortSupport {
@Override public final boolean sortDescending() { return true; }
@Override public final boolean sortSigned() { return true; }
@Override public boolean sortDescending() { return true; }
@Override public boolean sortSigned() { return true; }
@Override
public final int compare(long b, long a) {
public int compare(long b, long a) {
return (a < b) ? -1 : (a > b) ? 1 : 0;
}
}
......
......@@ -16,7 +16,7 @@
*/
package org.apache.spark.util.collection.unsafe.sort;
import org.apache.spark.unsafe.Platform;
import org.apache.spark.unsafe.array.LongArray;
......@@ -227,7 +227,7 @@ public class RadixSort {
}
return counts;
}
/**
* Specialization of sortAtByte() for key-prefix arrays.
*/
......
......@@ -212,8 +212,8 @@ public final class UnsafeExternalSorter extends MemoryConsumer {
// written to disk. This also counts the space needed to store the sorter's pointer array.
inMemSorter.reset();
// Reset the in-memory sorter's pointer array only after freeing up the memory pages holding the
// records. Otherwise, if the task is over allocated memory, then without freeing the memory pages,
// we might not be able to get memory for the pointer array.
// records. Otherwise, if the task is over allocated memory, then without freeing the memory
// pages, we might not be able to get memory for the pointer array.
taskContext.taskMetrics().incMemoryBytesSpilled(spillSize);
totalSpillBytes += spillSize;
......
......@@ -20,4 +20,4 @@
* This package consist of these annotations, which are used project wide and are reflected in
* Scala and Java docs.
*/
package org.apache.spark.annotation;
\ No newline at end of file
package org.apache.spark.annotation;
......@@ -18,4 +18,4 @@
/**
* Spark Java programming APIs.
*/
package org.apache.spark.api.java;
\ No newline at end of file
package org.apache.spark.api.java;
......@@ -18,4 +18,4 @@
/**
* Spark's broadcast variables, used to broadcast immutable datasets to all nodes.
*/
package org.apache.spark.broadcast;
\ No newline at end of file
package org.apache.spark.broadcast;
......@@ -18,4 +18,4 @@
/**
* Package for executor components used with various cluster managers.
*/
package org.apache.spark.executor;
\ No newline at end of file
package org.apache.spark.executor;
......@@ -18,4 +18,4 @@
/**
* IO codecs used for compression.
*/
package org.apache.spark.io;
\ No newline at end of file
package org.apache.spark.io;
......@@ -18,4 +18,4 @@
/**
* Provides implementation's of various RDDs.
*/
package org.apache.spark.rdd;
\ No newline at end of file
package org.apache.spark.rdd;
......@@ -18,4 +18,4 @@
/**
* Spark's DAG scheduler.
*/
package org.apache.spark.scheduler;
\ No newline at end of file
package org.apache.spark.scheduler;
......@@ -18,4 +18,4 @@
/**
* Spark utilities.
*/
package org.apache.spark.util;
\ No newline at end of file
package org.apache.spark.util;
......@@ -18,4 +18,4 @@
/**
* Utilities for random number generation.
*/
package org.apache.spark.util.random;
\ No newline at end of file
package org.apache.spark.util.random;
......@@ -64,6 +64,8 @@
<property name="message" value="No trailing whitespace allowed."/>
</module>
<module name="NewlineAtEndOfFile"/>
<module name="TreeWalker">
<module name="OuterTypeFilename"/>
<module name="IllegalTokenText">
......
......@@ -18,4 +18,4 @@
/**
* Spark streaming receiver for Flume.
*/
package org.apache.spark.streaming.flume;
\ No newline at end of file
package org.apache.spark.streaming.flume;
......@@ -18,4 +18,4 @@
/**
* Kafka receiver for spark streaming.
*/
package org.apache.spark.streaming.kafka;
\ No newline at end of file
package org.apache.spark.streaming.kafka;
......@@ -34,4 +34,4 @@ log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}:
log4j.logger.org.spark_project.jetty=WARN
log4j.logger.org.spark_project.jetty.util.component.AbstractLifeCycle=ERROR
log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO
log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO
\ No newline at end of file
log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO
......@@ -19,4 +19,4 @@
* ALPHA COMPONENT
* GraphX is a graph processing framework built on top of Spark.
*/
package org.apache.spark.graphx;
\ No newline at end of file
package org.apache.spark.graphx;
......@@ -18,4 +18,4 @@
/**
* Collections of utilities used by graphx.
*/
package org.apache.spark.graphx.util;
\ No newline at end of file
package org.apache.spark.graphx.util;
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment