diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala
index 388df7002dc36cff332255449cc8616216c50e47..c3561099d6842419ae2b58b230277f7a7996fc16 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala
@@ -351,7 +351,7 @@ private[sql] object PartitioningUtils {
       }
     }
 
-    if (partitionColumns.size == schema.fields.size) {
+    if (partitionColumns.nonEmpty && partitionColumns.size == schema.fields.length) {
       throw new AnalysisException(s"Cannot use all columns for partition columns")
     }
   }
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/test/DataFrameReaderWriterSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/test/DataFrameReaderWriterSuite.scala
index 58b1d56358147d58a38ff0e858a8fc18e15fa9da..d454100ccb8f6275524dc7d9e83b1e5febef1a44 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/test/DataFrameReaderWriterSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/test/DataFrameReaderWriterSuite.scala
@@ -246,8 +246,9 @@ class DataFrameReaderWriterSuite extends QueryTest with SharedSQLContext with Be
         spark.range(10).write.format("parquet").mode("overwrite").partitionBy("id").save(path)
       }
       intercept[AnalysisException] {
-        spark.range(10).write.format("orc").mode("overwrite").partitionBy("id").save(path)
+        spark.range(10).write.format("csv").mode("overwrite").partitionBy("id").save(path)
       }
+      spark.emptyDataFrame.write.format("parquet").mode("overwrite").save(path)
     }
   }