From bbc247548ac6faeca15afc05c266cee37ef13416 Mon Sep 17 00:00:00 2001
From: Tathagata Das <tathagata.das1565@gmail.com>
Date: Fri, 29 Jul 2016 19:59:35 -0700
Subject: [PATCH] [SPARK-16748][SQL] SparkExceptions during planning should not
 wrapped in TreeNodeException

## What changes were proposed in this pull request?
We do not want SparkExceptions from job failures in the planning phase to create TreeNodeException. Hence do not wrap SparkException in TreeNodeException.

## How was this patch tested?
New unit test

Author: Tathagata Das <tathagata.das1565@gmail.com>

Closes #14395 from tdas/SPARK-16748.
---
 .../org/apache/spark/sql/catalyst/errors/package.scala |  8 +++++++-
 .../scala/org/apache/spark/sql/SQLQuerySuite.scala     | 10 +++++++++-
 2 files changed, 16 insertions(+), 2 deletions(-)

diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/errors/package.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/errors/package.scala
index 0420b4b538..0d45f371fa 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/errors/package.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/errors/package.scala
@@ -17,7 +17,10 @@
 
 package org.apache.spark.sql.catalyst
 
+import scala.util.control.NonFatal
+
 import org.apache.spark.sql.catalyst.trees.TreeNode
+import org.apache.spark.SparkException
 
 /**
  * Functions for attaching and retrieving trees that are associated with errors.
@@ -47,7 +50,10 @@ package object errors {
    */
   def attachTree[TreeType <: TreeNode[_], A](tree: TreeType, msg: String = "")(f: => A): A = {
     try f catch {
-      case e: Exception => throw new TreeNodeException(tree, msg, e)
+      // SPARK-16748: We do not want SparkExceptions from job failures in the planning phase
+      // to create TreeNodeException. Hence, wrap exception only if it is not SparkException.
+      case NonFatal(e) if !e.isInstanceOf[SparkException] =>
+        throw new TreeNodeException(tree, msg, e)
     }
   }
 }
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
index d89bda1e48..6e485a8f5b 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
@@ -20,7 +20,7 @@ package org.apache.spark.sql
 import java.math.MathContext
 import java.sql.Timestamp
 
-import org.apache.spark.AccumulatorSuite
+import org.apache.spark.{AccumulatorSuite, SparkException}
 import org.apache.spark.sql.catalyst.analysis.UnresolvedException
 import org.apache.spark.sql.catalyst.expressions.SortOrder
 import org.apache.spark.sql.catalyst.plans.logical.Aggregate
@@ -1339,6 +1339,14 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
     checkAggregation("SELECT key + 1 + 1, COUNT(*) FROM testData GROUP BY key + 1", false)
   }
 
+  testQuietly(
+    "SPARK-16748: SparkExceptions during planning should not wrapped in TreeNodeException") {
+    intercept[SparkException] {
+      val df = spark.range(0, 5).map(x => (1 / x).toString).toDF("a").orderBy("a")
+      df.queryExecution.toRdd // force physical planning, but not execution of the plan
+    }
+  }
+
   test("Test to check we can use Long.MinValue") {
     checkAnswer(
       sql(s"SELECT ${Long.MinValue} FROM testData ORDER BY key LIMIT 1"), Row(Long.MinValue)
-- 
GitLab