From ee682fe293b47988056b540ee46ca49861309982 Mon Sep 17 00:00:00 2001 From: Andrew Or <andrew@databricks.com> Date: Wed, 25 May 2016 15:08:39 -0700 Subject: [PATCH] [SPARK-15534][SPARK-15535][SQL] Truncate table fixes ## What changes were proposed in this pull request? Two changes: - When things fail, `TRUNCATE TABLE` just returns nothing. Instead, we should throw exceptions. - Remove `TRUNCATE TABLE ... COLUMN`, which was never supported by either Spark or Hive. ## How was this patch tested? Jenkins. Author: Andrew Or <andrew@databricks.com> Closes #13302 from andrewor14/truncate-table. --- .../org/apache/spark/sql/catalyst/parser/SqlBase.g4 | 3 +-- .../apache/spark/sql/execution/SparkSqlParser.scala | 7 +------ .../apache/spark/sql/execution/command/tables.scala | 7 ++++--- .../spark/sql/hive/execution/HiveCommandSuite.scala | 12 ------------ 4 files changed, 6 insertions(+), 23 deletions(-) diff --git a/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBase.g4 b/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBase.g4 index 403191af5e..b0e71c7e7c 100644 --- a/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBase.g4 +++ b/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBase.g4 @@ -115,8 +115,7 @@ statement | CLEAR CACHE #clearCache | LOAD DATA LOCAL? INPATH path=STRING OVERWRITE? INTO TABLE tableIdentifier partitionSpec? #loadData - | TRUNCATE TABLE tableIdentifier partitionSpec? - (COLUMNS identifierList)? #truncateTable + | TRUNCATE TABLE tableIdentifier partitionSpec? #truncateTable | op=(ADD | LIST) identifier .*? #manageResource | SET ROLE .*? #failNativeCommand | SET .*? #setConfiguration diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala index 57f534cd9e..cfebfc6a5c 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala @@ -368,17 +368,12 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder { * For example: * {{{ * TRUNCATE TABLE tablename [PARTITION (partcol1=val1, partcol2=val2 ...)] - * [COLUMNS (col1, col2)] * }}} */ override def visitTruncateTable(ctx: TruncateTableContext): LogicalPlan = withOrigin(ctx) { - if (ctx.identifierList != null) { - throw operationNotAllowed("TRUNCATE TABLE ... COLUMNS", ctx) - } TruncateTableCommand( visitTableIdentifier(ctx.tableIdentifier), - Option(ctx.partitionSpec).map(visitNonOptionalPartitionSpec) - ) + Option(ctx.partitionSpec).map(visitNonOptionalPartitionSpec)) } /** diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala index 13e63a1bef..bef4c9222c 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala @@ -278,7 +278,7 @@ case class LoadDataCommand( * * The syntax of this command is: * {{{ - * TRUNCATE TABLE tablename [PARTITION (partcol1=val1, partcol2=val2 ...)] + * TRUNCATE TABLE tablename [PARTITION (partcol1=val1, partcol2=val2 ...)] * }}} */ case class TruncateTableCommand( @@ -288,9 +288,10 @@ case class TruncateTableCommand( override def run(sparkSession: SparkSession): Seq[Row] = { val catalog = sparkSession.sessionState.catalog if (!catalog.tableExists(tableName)) { - logError(s"table '$tableName' in TRUNCATE TABLE does not exist.") + throw new AnalysisException(s"Table '$tableName' in TRUNCATE TABLE does not exist.") } else if (catalog.isTemporaryTable(tableName)) { - logError(s"table '$tableName' in TRUNCATE TABLE is a temporary table.") + throw new AnalysisException( + s"Operation not allowed: TRUNCATE TABLE on temporary tables: '$tableName'") } else { val locations = if (partitionSpec.isDefined) { catalog.listPartitions(tableName, partitionSpec).map(_.storage.locationUri) diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveCommandSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveCommandSuite.scala index df62ba08b8..6f374d713b 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveCommandSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveCommandSuite.scala @@ -289,10 +289,6 @@ class HiveCommandSuite extends QueryTest with SQLTestUtils with TestHiveSingleto val testResults = sql("SELECT * FROM non_part_table").collect() - intercept[ParseException] { - sql("TRUNCATE TABLE non_part_table COLUMNS (employeeID)") - } - sql("TRUNCATE TABLE non_part_table") checkAnswer(sql("SELECT * FROM non_part_table"), Seq.empty[Row]) @@ -320,10 +316,6 @@ class HiveCommandSuite extends QueryTest with SQLTestUtils with TestHiveSingleto sql("SELECT employeeID, employeeName FROM part_table WHERE c = '2' AND d = '2'"), testResults) - intercept[ParseException] { - sql("TRUNCATE TABLE part_table PARTITION(c='1', d='1') COLUMNS (employeeID)") - } - sql("TRUNCATE TABLE part_table PARTITION(c='1', d='1')") checkAnswer( sql("SELECT employeeID, employeeName FROM part_table WHERE c = '1' AND d = '1'"), @@ -332,10 +324,6 @@ class HiveCommandSuite extends QueryTest with SQLTestUtils with TestHiveSingleto sql("SELECT employeeID, employeeName FROM part_table WHERE c = '1' AND d = '2'"), testResults) - intercept[ParseException] { - sql("TRUNCATE TABLE part_table PARTITION(c='1') COLUMNS (employeeID)") - } - sql("TRUNCATE TABLE part_table PARTITION(c='1')") checkAnswer( sql("SELECT employeeID, employeeName FROM part_table WHERE c = '1'"), -- GitLab