Skip to content
Snippets Groups Projects
Commit bbc24754 authored by Tathagata Das's avatar Tathagata Das Committed by Yin Huai
Browse files

[SPARK-16748][SQL] SparkExceptions during planning should not wrapped in TreeNodeException

## What changes were proposed in this pull request?
We do not want SparkExceptions from job failures in the planning phase to create TreeNodeException. Hence do not wrap SparkException in TreeNodeException.

## How was this patch tested?
New unit test

Author: Tathagata Das <tathagata.das1565@gmail.com>

Closes #14395 from tdas/SPARK-16748.
parent 2182e432
No related branches found
No related tags found
No related merge requests found
...@@ -17,7 +17,10 @@ ...@@ -17,7 +17,10 @@
package org.apache.spark.sql.catalyst package org.apache.spark.sql.catalyst
import scala.util.control.NonFatal
import org.apache.spark.sql.catalyst.trees.TreeNode import org.apache.spark.sql.catalyst.trees.TreeNode
import org.apache.spark.SparkException
/** /**
* Functions for attaching and retrieving trees that are associated with errors. * Functions for attaching and retrieving trees that are associated with errors.
...@@ -47,7 +50,10 @@ package object errors { ...@@ -47,7 +50,10 @@ package object errors {
*/ */
def attachTree[TreeType <: TreeNode[_], A](tree: TreeType, msg: String = "")(f: => A): A = { def attachTree[TreeType <: TreeNode[_], A](tree: TreeType, msg: String = "")(f: => A): A = {
try f catch { try f catch {
case e: Exception => throw new TreeNodeException(tree, msg, e) // SPARK-16748: We do not want SparkExceptions from job failures in the planning phase
// to create TreeNodeException. Hence, wrap exception only if it is not SparkException.
case NonFatal(e) if !e.isInstanceOf[SparkException] =>
throw new TreeNodeException(tree, msg, e)
} }
} }
} }
...@@ -20,7 +20,7 @@ package org.apache.spark.sql ...@@ -20,7 +20,7 @@ package org.apache.spark.sql
import java.math.MathContext import java.math.MathContext
import java.sql.Timestamp import java.sql.Timestamp
import org.apache.spark.AccumulatorSuite import org.apache.spark.{AccumulatorSuite, SparkException}
import org.apache.spark.sql.catalyst.analysis.UnresolvedException import org.apache.spark.sql.catalyst.analysis.UnresolvedException
import org.apache.spark.sql.catalyst.expressions.SortOrder import org.apache.spark.sql.catalyst.expressions.SortOrder
import org.apache.spark.sql.catalyst.plans.logical.Aggregate import org.apache.spark.sql.catalyst.plans.logical.Aggregate
...@@ -1339,6 +1339,14 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext { ...@@ -1339,6 +1339,14 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
checkAggregation("SELECT key + 1 + 1, COUNT(*) FROM testData GROUP BY key + 1", false) checkAggregation("SELECT key + 1 + 1, COUNT(*) FROM testData GROUP BY key + 1", false)
} }
testQuietly(
"SPARK-16748: SparkExceptions during planning should not wrapped in TreeNodeException") {
intercept[SparkException] {
val df = spark.range(0, 5).map(x => (1 / x).toString).toDF("a").orderBy("a")
df.queryExecution.toRdd // force physical planning, but not execution of the plan
}
}
test("Test to check we can use Long.MinValue") { test("Test to check we can use Long.MinValue") {
checkAnswer( checkAnswer(
sql(s"SELECT ${Long.MinValue} FROM testData ORDER BY key LIMIT 1"), Row(Long.MinValue) sql(s"SELECT ${Long.MinValue} FROM testData ORDER BY key LIMIT 1"), Row(Long.MinValue)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment