diff --git a/project/MimaExcludes.scala b/project/MimaExcludes.scala index a9973bc24cf6e205c197d692654cf2cae346f4e8..2a4a874fef8bbf616319a26b281dd69a39d1e309 100644 --- a/project/MimaExcludes.scala +++ b/project/MimaExcludes.scala @@ -314,6 +314,7 @@ object MimaExcludes { ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.DataFrame"), ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.DataFrame$"), + ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.LegacyFunctions"), ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.mllib.evaluation.MultilabelMetrics.this"), ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.classification.LogisticRegressionSummary.predictions"), diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala similarity index 100% rename from sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala rename to sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala diff --git a/sql/core/src/main/scala/org/apache/spark/sql/functions.scala b/sql/core/src/main/scala/org/apache/spark/sql/functions.scala index 737e125f6cf03a67c920b89af97f6553ad2cef56..326c1e5a7cc035d9e5e2f1b121fd599385443eeb 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/functions.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/functions.scala @@ -33,25 +33,6 @@ import org.apache.spark.sql.expressions.UserDefinedFunction import org.apache.spark.sql.types._ import org.apache.spark.util.Utils -/** - * Ensures that java functions signatures for methods that now return a [[TypedColumn]] still have - * legacy equivalents in bytecode. This compatibility is done by forcing the compiler to generate - * "bridge" methods due to the use of covariant return types. - * - * {{{ - * // In LegacyFunctions: - * public abstract org.apache.spark.sql.Column avg(java.lang.String); - * - * // In functions: - * public static org.apache.spark.sql.TypedColumn<java.lang.Object, java.lang.Object> avg(...); - * }}} - * - * This allows us to use the same functions both in typed [[Dataset]] operations and untyped - * [[DataFrame]] operations when the return type for a given function is statically known. - */ -private[sql] abstract class LegacyFunctions { - def count(columnName: String): Column -} /** * :: Experimental :: @@ -72,7 +53,7 @@ private[sql] abstract class LegacyFunctions { */ @Experimental // scalastyle:off -object functions extends LegacyFunctions { +object functions { // scalastyle:on private def withExpr(expr: Expression): Column = Column(expr) @@ -287,7 +268,7 @@ object functions extends LegacyFunctions { * @since 1.3.0 */ def count(columnName: String): TypedColumn[Any, Long] = - count(Column(columnName)).as(ExpressionEncoder[Long]) + count(Column(columnName)).as(ExpressionEncoder[Long]()) /** * Aggregate function: returns the number of distinct items in a group.