Skip to content
Snippets Groups Projects
Commit 7b92922f authored by Reynold Xin's avatar Reynold Xin
Browse files

Update MimaExcludes now Spark 1.6 is in Maven.

Author: Reynold Xin <rxin@databricks.com>

Closes #10561 from rxin/update-mima.
parent c82924d5
No related branches found
No related tags found
No related merge requests found
......@@ -91,7 +91,7 @@ object MimaBuild {
def mimaSettings(sparkHome: File, projectRef: ProjectRef) = {
val organization = "org.apache.spark"
val previousSparkVersion = "1.5.0"
val previousSparkVersion = "1.6.0"
val fullId = "spark-" + projectRef.project + "_2.10"
mimaDefaultSettings ++
Seq(previousArtifact := Some(organization % fullId % previousSparkVersion),
......
......@@ -30,165 +30,29 @@ import com.typesafe.tools.mima.core.ProblemFilters._
* It is also possible to exclude Spark classes and packages. This should be used sparingly:
*
* MimaBuild.excludeSparkClass("graphx.util.collection.GraphXPrimitiveKeyOpenHashMap")
*
* For a new Spark version, please update MimaBuild.scala to reflect the previous version.
*/
object MimaExcludes {
def excludes(version: String) = version match {
case v if v.startsWith("2.0") =>
Seq(
// SPARK-7995 Remove AkkaRpcEnv
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.rpc.akka.AkkaFailure"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.rpc.akka.AkkaFailure$"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.rpc.akka.AkkaRpcEndpointRef$"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.rpc.akka.AkkaRpcEnvFactory"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.rpc.akka.AkkaRpcEnv"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.rpc.akka.AkkaMessage$"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.rpc.akka.AkkaRpcEndpointRef"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.rpc.akka.ErrorMonitor"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.rpc.akka.AkkaMessage")
excludePackage("org.apache.spark.rpc"),
excludePackage("org.spark-project.jetty"),
excludePackage("org.apache.spark.unused"),
excludePackage("org.apache.spark.sql.catalyst"),
excludePackage("org.apache.spark.sql.execution"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.mllib.feature.PCAModel.this"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.status.api.v1.StageData.this")
) ++ Seq(
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.SparkContext.emptyRDD"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.broadcast.HttpBroadcastFactory")
) ++
Seq(
// SPARK-12481
// SPARK-12481 Remove Hadoop 1.x
ProblemFilters.exclude[IncompatibleTemplateDefProblem](
"org.apache.spark.mapred.SparkHadoopMapRedUtil")
) ++
// When 1.6 is officially released, update this exclusion list.
Seq(
MimaBuild.excludeSparkPackage("deploy"),
MimaBuild.excludeSparkPackage("network"),
MimaBuild.excludeSparkPackage("unsafe"),
// These are needed if checking against the sbt build, since they are part of
// the maven-generated artifacts in 1.3.
excludePackage("org.spark-project.jetty"),
MimaBuild.excludeSparkPackage("unused"),
// SQL execution is considered private.
excludePackage("org.apache.spark.sql.execution"),
// SQL columnar is considered private.
excludePackage("org.apache.spark.sql.columnar"),
// The shuffle package is considered private.
excludePackage("org.apache.spark.shuffle"),
// The collections utlities are considered pricate.
excludePackage("org.apache.spark.util.collection")
) ++
MimaBuild.excludeSparkClass("streaming.flume.FlumeTestUtils") ++
MimaBuild.excludeSparkClass("streaming.flume.PollingFlumeTestUtils") ++
Seq(
// MiMa does not deal properly with sealed traits
ProblemFilters.exclude[MissingMethodProblem](
"org.apache.spark.ml.classification.LogisticRegressionSummary.featuresCol")
) ++ Seq(
// SPARK-11530
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.mllib.feature.PCAModel.this")
) ++ Seq(
// SPARK-10381 Fix types / units in private AskPermissionToCommitOutput RPC message.
// This class is marked as `private` but MiMa still seems to be confused by the change.
ProblemFilters.exclude[MissingMethodProblem](
"org.apache.spark.scheduler.AskPermissionToCommitOutput.task"),
ProblemFilters.exclude[IncompatibleResultTypeProblem](
"org.apache.spark.scheduler.AskPermissionToCommitOutput.copy$default$2"),
ProblemFilters.exclude[IncompatibleMethTypeProblem](
"org.apache.spark.scheduler.AskPermissionToCommitOutput.copy"),
ProblemFilters.exclude[MissingMethodProblem](
"org.apache.spark.scheduler.AskPermissionToCommitOutput.taskAttempt"),
ProblemFilters.exclude[IncompatibleResultTypeProblem](
"org.apache.spark.scheduler.AskPermissionToCommitOutput.copy$default$3"),
ProblemFilters.exclude[IncompatibleMethTypeProblem](
"org.apache.spark.scheduler.AskPermissionToCommitOutput.this"),
ProblemFilters.exclude[IncompatibleMethTypeProblem](
"org.apache.spark.scheduler.AskPermissionToCommitOutput.apply")
) ++ Seq(
ProblemFilters.exclude[MissingClassProblem](
"org.apache.spark.shuffle.FileShuffleBlockResolver$ShuffleFileGroup")
) ++ Seq(
ProblemFilters.exclude[MissingMethodProblem](
"org.apache.spark.ml.regression.LeastSquaresAggregator.add"),
ProblemFilters.exclude[MissingMethodProblem](
"org.apache.spark.ml.regression.LeastSquaresCostFun.this"),
ProblemFilters.exclude[MissingMethodProblem](
"org.apache.spark.sql.SQLContext.clearLastInstantiatedContext"),
ProblemFilters.exclude[MissingMethodProblem](
"org.apache.spark.sql.SQLContext.setLastInstantiatedContext"),
ProblemFilters.exclude[MissingClassProblem](
"org.apache.spark.sql.SQLContext$SQLSession"),
ProblemFilters.exclude[MissingMethodProblem](
"org.apache.spark.sql.SQLContext.detachSession"),
ProblemFilters.exclude[MissingMethodProblem](
"org.apache.spark.sql.SQLContext.tlSession"),
ProblemFilters.exclude[MissingMethodProblem](
"org.apache.spark.sql.SQLContext.defaultSession"),
ProblemFilters.exclude[MissingMethodProblem](
"org.apache.spark.sql.SQLContext.currentSession"),
ProblemFilters.exclude[MissingMethodProblem](
"org.apache.spark.sql.SQLContext.openSession"),
ProblemFilters.exclude[MissingMethodProblem](
"org.apache.spark.sql.SQLContext.setSession"),
ProblemFilters.exclude[MissingMethodProblem](
"org.apache.spark.sql.SQLContext.createSession")
) ++ Seq(
ProblemFilters.exclude[MissingMethodProblem](
"org.apache.spark.SparkContext.preferredNodeLocationData_="),
ProblemFilters.exclude[MissingClassProblem](
"org.apache.spark.rdd.MapPartitionsWithPreparationRDD"),
ProblemFilters.exclude[MissingClassProblem](
"org.apache.spark.rdd.MapPartitionsWithPreparationRDD$"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.SparkSQLParser")
) ++ Seq(
// SPARK-11485
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.sql.DataFrameHolder.df"),
// SPARK-11541 mark various JDBC dialects as private
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.sql.jdbc.NoopDialect.productElement"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.sql.jdbc.NoopDialect.productArity"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.sql.jdbc.NoopDialect.canEqual"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.sql.jdbc.NoopDialect.productIterator"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.sql.jdbc.NoopDialect.productPrefix"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.sql.jdbc.NoopDialect.toString"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.sql.jdbc.NoopDialect.hashCode"),
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.sql.jdbc.PostgresDialect$"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.sql.jdbc.PostgresDialect.productElement"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.sql.jdbc.PostgresDialect.productArity"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.sql.jdbc.PostgresDialect.canEqual"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.sql.jdbc.PostgresDialect.productIterator"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.sql.jdbc.PostgresDialect.productPrefix"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.sql.jdbc.PostgresDialect.toString"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.sql.jdbc.PostgresDialect.hashCode"),
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.sql.jdbc.NoopDialect$")
) ++ Seq (
ProblemFilters.exclude[MissingMethodProblem](
"org.apache.spark.status.api.v1.ApplicationInfo.this"),
ProblemFilters.exclude[MissingMethodProblem](
"org.apache.spark.status.api.v1.StageData.this")
) ++ Seq(
// SPARK-11766 add toJson to Vector
ProblemFilters.exclude[MissingMethodProblem](
"org.apache.spark.mllib.linalg.Vector.toJson")
) ++ Seq(
// SPARK-9065 Support message handler in Kafka Python API
ProblemFilters.exclude[MissingMethodProblem](
"org.apache.spark.streaming.kafka.KafkaUtilsPythonHelper.createDirectStream"),
ProblemFilters.exclude[MissingMethodProblem](
"org.apache.spark.streaming.kafka.KafkaUtilsPythonHelper.createRDD")
) ++ Seq(
// SPARK-4557 Changed foreachRDD to use VoidFunction
ProblemFilters.exclude[MissingMethodProblem](
"org.apache.spark.streaming.api.java.JavaDStreamLike.foreachRDD")
) ++ Seq(
// SPARK-11996 Make the executor thread dump work again
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.executor.ExecutorEndpoint"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.executor.ExecutorEndpoint$"),
ProblemFilters.exclude[MissingClassProblem](
"org.apache.spark.storage.BlockManagerMessages$GetRpcHostPortForExecutor"),
ProblemFilters.exclude[MissingClassProblem](
"org.apache.spark.storage.BlockManagerMessages$GetRpcHostPortForExecutor$")
) ++ Seq(
// SPARK-3580 Add getNumPartitions method to JavaRDD
ProblemFilters.exclude[MissingMethodProblem](
"org.apache.spark.api.java.JavaRDDLike.getNumPartitions")
) ++
// SPARK-11314: YARN backend moved to yarn sub-module and MiMA complains even though it's a
// private class.
MimaBuild.excludeSparkClass("scheduler.cluster.YarnSchedulerBackend$YarnSchedulerEndpoint")
)
case v if v.startsWith("1.6") =>
Seq(
MimaBuild.excludeSparkPackage("deploy"),
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment