From b3c22912284c2a010a4af3c43dc5e6fd53c68f8c Mon Sep 17 00:00:00 2001 From: Gaetan Semet <gaetan@xeberon.net> Date: Mon, 12 Sep 2016 12:21:33 +0100 Subject: [PATCH] [SPARK-16992][PYSPARK] use map comprehension in doc Code is equivalent, but map comprehency is most of the time faster than a map. Author: Gaetan Semet <gaetan@xeberon.net> Closes #14863 from Stibbons/map_comprehension. --- examples/src/main/python/ml/quantile_discretizer_example.py | 2 +- examples/src/main/python/ml/vector_slicer_example.py | 4 ++-- examples/src/main/python/sql/hive.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/src/main/python/ml/quantile_discretizer_example.py b/examples/src/main/python/ml/quantile_discretizer_example.py index 788a0baffe..0fc1d1949a 100644 --- a/examples/src/main/python/ml/quantile_discretizer_example.py +++ b/examples/src/main/python/ml/quantile_discretizer_example.py @@ -29,7 +29,7 @@ if __name__ == "__main__": .getOrCreate() # $example on$ - data = [(0, 18.0,), (1, 19.0,), (2, 8.0,), (3, 5.0,), (4, 2.2,)] + data = [(0, 18.0), (1, 19.0), (2, 8.0), (3, 5.0), (4, 2.2)] df = spark.createDataFrame(data, ["id", "hour"]) # $example off$ diff --git a/examples/src/main/python/ml/vector_slicer_example.py b/examples/src/main/python/ml/vector_slicer_example.py index d2f46b190f..68c8cfe27e 100644 --- a/examples/src/main/python/ml/vector_slicer_example.py +++ b/examples/src/main/python/ml/vector_slicer_example.py @@ -32,8 +32,8 @@ if __name__ == "__main__": # $example on$ df = spark.createDataFrame([ - Row(userFeatures=Vectors.sparse(3, {0: -2.0, 1: 2.3}),), - Row(userFeatures=Vectors.dense([-2.0, 2.3, 0.0]),)]) + Row(userFeatures=Vectors.sparse(3, {0: -2.0, 1: 2.3})), + Row(userFeatures=Vectors.dense([-2.0, 2.3, 0.0]))]) slicer = VectorSlicer(inputCol="userFeatures", outputCol="features", indices=[1]) diff --git a/examples/src/main/python/sql/hive.py b/examples/src/main/python/sql/hive.py index 9b2a2c4e6a..98b48908b5 100644 --- a/examples/src/main/python/sql/hive.py +++ b/examples/src/main/python/sql/hive.py @@ -79,7 +79,7 @@ if __name__ == "__main__": # You can also use DataFrames to create temporary views within a SparkSession. Record = Row("key", "value") - recordsDF = spark.createDataFrame(map(lambda i: Record(i, "val_" + str(i)), range(1, 101))) + recordsDF = spark.createDataFrame([Record(i, "val_" + str(i)) for i in range(1, 101)]) recordsDF.createOrReplaceTempView("records") # Queries can then join DataFrame data with data stored in Hive. -- GitLab