From e02ac303c6356cdf7fffec7361311d828a723afe Mon Sep 17 00:00:00 2001 From: zero323 <zero323@users.noreply.github.com> Date: Mon, 13 Feb 2017 15:23:56 -0800 Subject: [PATCH] [SPARK-19429][PYTHON][SQL] Support slice arguments in Column.__getitem__ ## What changes were proposed in this pull request? - Add support for `slice` arguments in `Column.__getitem__`. - Remove obsolete `__getslice__` bindings. ## How was this patch tested? Existing unit tests, additional tests covering `[]` with `slice`. Author: zero323 <zero323@users.noreply.github.com> Closes #16771 from zero323/SPARK-19429. --- python/pyspark/sql/column.py | 11 ++++++++--- python/pyspark/sql/tests.py | 8 ++++++++ 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/python/pyspark/sql/column.py b/python/pyspark/sql/column.py index ec059d6258..73c8672eff 100644 --- a/python/pyspark/sql/column.py +++ b/python/pyspark/sql/column.py @@ -180,7 +180,6 @@ class Column(object): # container operators __contains__ = _bin_op("contains") - __getitem__ = _bin_op("apply") # bitwise operators bitwiseOR = _bin_op("bitwiseOR") @@ -236,6 +235,14 @@ class Column(object): raise AttributeError(item) return self.getField(item) + def __getitem__(self, k): + if isinstance(k, slice): + if k.step is not None: + raise ValueError("slice with step is not supported.") + return self.substr(k.start, k.stop) + else: + return _bin_op("apply")(self, k) + def __iter__(self): raise TypeError("Column is not iterable") @@ -267,8 +274,6 @@ class Column(object): raise TypeError("Unexpected type: %s" % type(startPos)) return Column(jc) - __getslice__ = substr - @ignore_unicode_prefix @since(1.5) def isin(self, *cols): diff --git a/python/pyspark/sql/tests.py b/python/pyspark/sql/tests.py index ab9d3f6c94..d9d03337ff 100644 --- a/python/pyspark/sql/tests.py +++ b/python/pyspark/sql/tests.py @@ -874,6 +874,14 @@ class SQLTests(ReusedPySparkTestCase): self.assertTrue(all(isinstance(c, Column) for c in css)) self.assertTrue(isinstance(ci.cast(LongType()), Column)) + def test_column_getitem(self): + from pyspark.sql.functions import col + + self.assertIsInstance(col("foo")[1:3], Column) + self.assertIsInstance(col("foo")[0], Column) + self.assertIsInstance(col("foo")["bar"], Column) + self.assertRaises(ValueError, lambda: col("foo")[0:10:2]) + def test_column_select(self): df = self.df self.assertEqual(self.testData, df.select("*").collect()) -- GitLab