From 9b1b3ae771babf127f64898d5dc110721597a760 Mon Sep 17 00:00:00 2001
From: Dongjoon Hyun <dongjoon@apache.org>
Date: Wed, 29 Jun 2016 15:00:41 -0700
Subject: [PATCH] [SPARK-16006][SQL] Attemping to write empty DataFrame with no
 fields throws non-intuitive exception

## What changes were proposed in this pull request?

This PR allows `emptyDataFrame.write` since the user didn't specify any partition columns.

**Before**
```scala
scala> spark.emptyDataFrame.write.parquet("/tmp/t1")
org.apache.spark.sql.AnalysisException: Cannot use all columns for partition columns;
scala> spark.emptyDataFrame.write.csv("/tmp/t1")
org.apache.spark.sql.AnalysisException: Cannot use all columns for partition columns;
```

After this PR, there occurs no exceptions and the created directory has only one file, `_SUCCESS`, as expected.

## How was this patch tested?

Pass the Jenkins tests including updated test cases.

Author: Dongjoon Hyun <dongjoon@apache.org>

Closes #13730 from dongjoon-hyun/SPARK-16006.
---
 .../spark/sql/execution/datasources/PartitioningUtils.scala    | 2 +-
 .../org/apache/spark/sql/test/DataFrameReaderWriterSuite.scala | 3 ++-
 2 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala
index 388df7002d..c3561099d6 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala
@@ -351,7 +351,7 @@ private[sql] object PartitioningUtils {
       }
     }
 
-    if (partitionColumns.size == schema.fields.size) {
+    if (partitionColumns.nonEmpty && partitionColumns.size == schema.fields.length) {
       throw new AnalysisException(s"Cannot use all columns for partition columns")
     }
   }
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/test/DataFrameReaderWriterSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/test/DataFrameReaderWriterSuite.scala
index 58b1d56358..d454100ccb 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/test/DataFrameReaderWriterSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/test/DataFrameReaderWriterSuite.scala
@@ -246,8 +246,9 @@ class DataFrameReaderWriterSuite extends QueryTest with SharedSQLContext with Be
         spark.range(10).write.format("parquet").mode("overwrite").partitionBy("id").save(path)
       }
       intercept[AnalysisException] {
-        spark.range(10).write.format("orc").mode("overwrite").partitionBy("id").save(path)
+        spark.range(10).write.format("csv").mode("overwrite").partitionBy("id").save(path)
       }
+      spark.emptyDataFrame.write.format("parquet").mode("overwrite").save(path)
     }
   }
 
-- 
GitLab