From befab9c1c6b59ad90f63a7d10e12b186be897f15 Mon Sep 17 00:00:00 2001
From: Sean Owen <sowen@cloudera.com>
Date: Tue, 30 Aug 2016 11:19:45 +0100
Subject: [PATCH] [SPARK-17264][SQL] DataStreamWriter should document that it
 only supports Parquet for now

## What changes were proposed in this pull request?

Clarify that only parquet files are supported by DataStreamWriter now

## How was this patch tested?

(Doc build -- no functional changes to test)

Author: Sean Owen <sowen@cloudera.com>

Closes #14860 from srowen/SPARK-17264.
---
 python/pyspark/sql/streaming.py                                 | 2 +-
 .../scala/org/apache/spark/sql/streaming/DataStreamWriter.scala | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/python/pyspark/sql/streaming.py b/python/pyspark/sql/streaming.py
index a0ba5825f3..67375f6b5f 100644
--- a/python/pyspark/sql/streaming.py
+++ b/python/pyspark/sql/streaming.py
@@ -591,7 +591,7 @@ class DataStreamWriter(object):
 
         .. note:: Experimental.
 
-        :param source: string, name of the data source, e.g. 'json', 'parquet'.
+        :param source: string, name of the data source, which for now can be 'parquet'.
 
         >>> writer = sdf.writeStream.format('json')
         """
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamWriter.scala b/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamWriter.scala
index d38e3e5812..f70c7d08a6 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamWriter.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamWriter.scala
@@ -122,7 +122,7 @@ final class DataStreamWriter[T] private[sql](ds: Dataset[T]) {
 
   /**
    * :: Experimental ::
-   * Specifies the underlying output data source. Built-in options include "parquet", "json", etc.
+   * Specifies the underlying output data source. Built-in options include "parquet" for now.
    *
    * @since 2.0.0
    */
-- 
GitLab