diff --git a/python/pyspark/sql/context.py b/python/pyspark/sql/context.py index 8a1a874884e2a8df2ad05e45ee89021147e432bb..b5dde13ed7cc2461f6cb35b0ee9f92da405917cb 100644 --- a/python/pyspark/sql/context.py +++ b/python/pyspark/sql/context.py @@ -26,7 +26,7 @@ from pyspark import since from pyspark.rdd import ignore_unicode_prefix from pyspark.sql.session import _monkey_patch_RDD, SparkSession from pyspark.sql.dataframe import DataFrame -from pyspark.sql.readwriter import DataFrameReader +from pyspark.sql.readwriter import DataFrameReader, DataStreamReader from pyspark.sql.types import Row, StringType from pyspark.sql.utils import install_exception_handler @@ -438,8 +438,12 @@ class SQLContext(object): .. note:: Experimental. :return: :class:`DataStreamReader` + + >>> text_sdf = sqlContext.readStream.text(os.path.join(tempfile.mkdtemp(), 'data')) + >>> text_sdf.isStreaming + True """ - return DataStreamReader(self._wrapped) + return DataStreamReader(self) @property @since(2.0) @@ -515,6 +519,7 @@ class UDFRegistration(object): def _test(): import os import doctest + import tempfile from pyspark.context import SparkContext from pyspark.sql import Row, SQLContext import pyspark.sql.context @@ -523,6 +528,8 @@ def _test(): globs = pyspark.sql.context.__dict__.copy() sc = SparkContext('local[4]', 'PythonTest') + globs['tempfile'] = tempfile + globs['os'] = os globs['sc'] = sc globs['sqlContext'] = SQLContext(sc) globs['rdd'] = rdd = sc.parallelize(