From aea7a99761b00b514df2b58ca836ba0de6742a3d Mon Sep 17 00:00:00 2001 From: carlmartin <carlmartinmax@gmail.com> Date: Sun, 30 Nov 2014 16:19:41 -0800 Subject: [PATCH] [SPARK-4623]Add the some error infomation if using spark-sql in yarn-cluster mode If using spark-sql in yarn-cluster mode, print an error infomation just as the spark shell in yarn-cluster mode. Author: carlmartin <carlmartinmax@gmail.com> Author: huangzhaowei <carlmartinmax@gmail.com> Closes #3479 from SaintBacchus/sparkSqlShell and squashes the following commits: 35829a9 [carlmartin] improve the description of comment e6c1eb7 [carlmartin] add a comment in bin/spark-sql to remind user who wants to change the class f1c5c8d [carlmartin] Merge branch 'master' into sparkSqlShell 8e112c5 [huangzhaowei] singular form ec957bc [carlmartin] Add the some error infomation if using spark-sql in yarn-cluster mode 7bcecc2 [carlmartin] Merge branch 'master' of https://github.com/apache/spark into codereview 4fad75a [carlmartin] Add the Error infomation using spark-sql in yarn-cluster mode --- bin/spark-sql | 2 ++ .../main/scala/org/apache/spark/deploy/SparkSubmit.scala | 9 +++++++++ 2 files changed, 11 insertions(+) diff --git a/bin/spark-sql b/bin/spark-sql index 63d00437d5..3b6cc420fe 100755 --- a/bin/spark-sql +++ b/bin/spark-sql @@ -23,6 +23,8 @@ # Enter posix mode for bash set -o posix +# NOTE: This exact class name is matched downstream by SparkSubmit. +# Any changes need to be reflected there. CLASS="org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver" # Figure out where Spark is installed diff --git a/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala b/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala index 00f291823e..0c7d247519 100644 --- a/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala +++ b/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala @@ -142,6 +142,8 @@ object SparkSubmit { printErrorAndExit("Cluster deploy mode is currently not supported for python applications.") case (_, CLUSTER) if isShell(args.primaryResource) => printErrorAndExit("Cluster deploy mode is not applicable to Spark shells.") + case (_, CLUSTER) if isSqlShell(args.mainClass) => + printErrorAndExit("Cluster deploy mode is not applicable to Spark SQL shell.") case _ => } @@ -393,6 +395,13 @@ object SparkSubmit { primaryResource == SPARK_SHELL || primaryResource == PYSPARK_SHELL } + /** + * Return whether the given main class represents a sql shell. + */ + private[spark] def isSqlShell(mainClass: String): Boolean = { + mainClass == "org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver" + } + /** * Return whether the given primary resource requires running python. */ -- GitLab