diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala index e6046055bf0f61d59f0dd5132e8d323e991a7c9c..9237f2f3ddb69efa71460ef84989890f72bd0760 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala @@ -482,7 +482,7 @@ class Analyzer( val newOrdering = resolveSortOrders(ordering, grandchild, throws = true) // Construct a set that contains all of the attributes that we need to evaluate the // ordering. - val requiredAttributes = AttributeSet(newOrdering.filter(_.resolved)) + val requiredAttributes = AttributeSet(newOrdering).filter(_.resolved) // Figure out which ones are missing from the projection, so that we can add them and // remove them after the sort. val missingInProject = requiredAttributes -- child.output diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisSuite.scala index ec05cfa63c5bffd67463adfce45066fcf57d1624..24af8483a7ecc7404dc077644d0445098f97276c 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisSuite.scala @@ -143,4 +143,14 @@ class AnalysisSuite extends AnalysisTest { plan = testRelation.select(CreateStructUnsafe(Seq(a, (a + 1).as("a+1"))).as("col")) checkAnalysis(plan, plan) } + + test("SPARK-10534: resolve attribute references in order by clause") { + val a = testRelation2.output(0) + val c = testRelation2.output(2) + + val plan = testRelation2.select('c).orderBy(Floor('a).asc) + val expected = testRelation2.select(c, a).orderBy(Floor(a.cast(DoubleType)).asc).select(c) + + checkAnalysis(plan, expected) + } }