diff --git a/examples/src/main/python/streaming/queue_stream.py b/examples/src/main/python/streaming/queue_stream.py
new file mode 100644
index 0000000000000000000000000000000000000000..dcd6a0fc6ff91ec3f609665e68922a6b58d7f1a8
--- /dev/null
+++ b/examples/src/main/python/streaming/queue_stream.py
@@ -0,0 +1,50 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+ Create a queue of RDDs that will be mapped/reduced one at a time in
+ 1 second intervals.
+
+ To run this example use
+    `$ bin/spark-submit examples/src/main/python/streaming/queue_stream.py
+"""
+import sys
+import time
+
+from pyspark import SparkContext
+from pyspark.streaming import StreamingContext
+
+if __name__ == "__main__":
+
+    sc = SparkContext(appName="PythonStreamingQueueStream")
+    ssc = StreamingContext(sc, 1)
+
+    # Create the queue through which RDDs can be pushed to
+    # a QueueInputDStream
+    rddQueue = []
+    for i in xrange(5):
+        rddQueue += [ssc.sparkContext.parallelize([j for j in xrange(1, 1001)], 10)]
+
+    # Create the QueueInputDStream and use it do some processing
+    inputStream = ssc.queueStream(rddQueue)
+    mappedStream = inputStream.map(lambda x: (x % 10, 1))
+    reducedStream = mappedStream.reduceByKey(lambda a, b: a + b)
+    reducedStream.pprint()
+
+    ssc.start()
+    time.sleep(6)
+    ssc.stop(stopSparkContext=True, stopGraceFully=True)
diff --git a/python/pyspark/streaming/dstream.py b/python/pyspark/streaming/dstream.py
index ff097985fae3edb749ad61a3250ebcbf5b77d1e2..8dcb9645cdc6b157ca89981f3921de879d7d9b23 100644
--- a/python/pyspark/streaming/dstream.py
+++ b/python/pyspark/streaming/dstream.py
@@ -176,7 +176,7 @@ class DStream(object):
                 print(record)
             if len(taken) > num:
                 print("...")
-            print()
+            print("")
 
         self.foreachRDD(takeAndPrint)