From fdde7d0aa0ef69d0e9a88cf712601bba1d5b0706 Mon Sep 17 00:00:00 2001 From: "Joseph K. Bradley" <joseph@databricks.com> Date: Tue, 5 Jul 2016 17:00:24 -0700 Subject: [PATCH] [SPARK-16348][ML][MLLIB][PYTHON] Use full classpaths for pyspark ML JVM calls ## What changes were proposed in this pull request? Issue: Omitting the full classpath can cause problems when calling JVM methods or classes from pyspark. This PR: Changed all uses of jvm.X in pyspark.ml and pyspark.mllib to use full classpath for X ## How was this patch tested? Existing unit tests. Manual testing in an environment where this was an issue. Author: Joseph K. Bradley <joseph@databricks.com> Closes #14023 from jkbradley/SPARK-16348. --- python/pyspark/ml/common.py | 10 +++++----- python/pyspark/ml/tests.py | 8 ++++---- python/pyspark/mllib/clustering.py | 5 +++-- python/pyspark/mllib/common.py | 10 +++++----- python/pyspark/mllib/feature.py | 2 +- python/pyspark/mllib/fpm.py | 2 +- python/pyspark/mllib/recommendation.py | 2 +- python/pyspark/mllib/tests.py | 15 ++++++++------- 8 files changed, 28 insertions(+), 26 deletions(-) diff --git a/python/pyspark/ml/common.py b/python/pyspark/ml/common.py index 256e91e141..7d449aaccb 100644 --- a/python/pyspark/ml/common.py +++ b/python/pyspark/ml/common.py @@ -63,7 +63,7 @@ def _to_java_object_rdd(rdd): RDD is serialized in batch or not. """ rdd = rdd._reserialize(AutoBatchedSerializer(PickleSerializer())) - return rdd.ctx._jvm.MLSerDe.pythonToJava(rdd._jrdd, True) + return rdd.ctx._jvm.org.apache.spark.ml.python.MLSerDe.pythonToJava(rdd._jrdd, True) def _py2java(sc, obj): @@ -82,7 +82,7 @@ def _py2java(sc, obj): pass else: data = bytearray(PickleSerializer().dumps(obj)) - obj = sc._jvm.MLSerDe.loads(data) + obj = sc._jvm.org.apache.spark.ml.python.MLSerDe.loads(data) return obj @@ -95,17 +95,17 @@ def _java2py(sc, r, encoding="bytes"): clsName = 'JavaRDD' if clsName == 'JavaRDD': - jrdd = sc._jvm.MLSerDe.javaToPython(r) + jrdd = sc._jvm.org.apache.spark.ml.python.MLSerDe.javaToPython(r) return RDD(jrdd, sc) if clsName == 'Dataset': return DataFrame(r, SQLContext.getOrCreate(sc)) if clsName in _picklable_classes: - r = sc._jvm.MLSerDe.dumps(r) + r = sc._jvm.org.apache.spark.ml.python.MLSerDe.dumps(r) elif isinstance(r, (JavaArray, JavaList)): try: - r = sc._jvm.MLSerDe.dumps(r) + r = sc._jvm.org.apache.spark.ml.python.MLSerDe.dumps(r) except Py4JJavaError: pass # not pickable diff --git a/python/pyspark/ml/tests.py b/python/pyspark/ml/tests.py index 981ed9dda0..24efce812b 100755 --- a/python/pyspark/ml/tests.py +++ b/python/pyspark/ml/tests.py @@ -1195,12 +1195,12 @@ class VectorTests(MLlibTestCase): def _test_serialize(self, v): self.assertEqual(v, ser.loads(ser.dumps(v))) - jvec = self.sc._jvm.MLSerDe.loads(bytearray(ser.dumps(v))) - nv = ser.loads(bytes(self.sc._jvm.MLSerDe.dumps(jvec))) + jvec = self.sc._jvm.org.apache.spark.ml.python.MLSerDe.loads(bytearray(ser.dumps(v))) + nv = ser.loads(bytes(self.sc._jvm.org.apache.spark.ml.python.MLSerDe.dumps(jvec))) self.assertEqual(v, nv) vs = [v] * 100 - jvecs = self.sc._jvm.MLSerDe.loads(bytearray(ser.dumps(vs))) - nvs = ser.loads(bytes(self.sc._jvm.MLSerDe.dumps(jvecs))) + jvecs = self.sc._jvm.org.apache.spark.ml.python.MLSerDe.loads(bytearray(ser.dumps(vs))) + nvs = ser.loads(bytes(self.sc._jvm.org.apache.spark.ml.python.MLSerDe.dumps(jvecs))) self.assertEqual(vs, nvs) def test_serialize(self): diff --git a/python/pyspark/mllib/clustering.py b/python/pyspark/mllib/clustering.py index 95f7278dc6..93a0b64569 100644 --- a/python/pyspark/mllib/clustering.py +++ b/python/pyspark/mllib/clustering.py @@ -507,7 +507,7 @@ class GaussianMixtureModel(JavaModelWrapper, JavaSaveable, JavaLoader): Path to where the model is stored. """ model = cls._load_java(sc, path) - wrapper = sc._jvm.GaussianMixtureModelWrapper(model) + wrapper = sc._jvm.org.apache.spark.mllib.api.python.GaussianMixtureModelWrapper(model) return cls(wrapper) @@ -638,7 +638,8 @@ class PowerIterationClusteringModel(JavaModelWrapper, JavaSaveable, JavaLoader): Load a model from the given path. """ model = cls._load_java(sc, path) - wrapper = sc._jvm.PowerIterationClusteringModelWrapper(model) + wrapper =\ + sc._jvm.org.apache.spark.mllib.api.python.PowerIterationClusteringModelWrapper(model) return PowerIterationClusteringModel(wrapper) diff --git a/python/pyspark/mllib/common.py b/python/pyspark/mllib/common.py index 31afdf576b..21f0e09ea7 100644 --- a/python/pyspark/mllib/common.py +++ b/python/pyspark/mllib/common.py @@ -66,7 +66,7 @@ def _to_java_object_rdd(rdd): RDD is serialized in batch or not. """ rdd = rdd._reserialize(AutoBatchedSerializer(PickleSerializer())) - return rdd.ctx._jvm.SerDe.pythonToJava(rdd._jrdd, True) + return rdd.ctx._jvm.org.apache.spark.mllib.api.python.SerDe.pythonToJava(rdd._jrdd, True) def _py2java(sc, obj): @@ -85,7 +85,7 @@ def _py2java(sc, obj): pass else: data = bytearray(PickleSerializer().dumps(obj)) - obj = sc._jvm.SerDe.loads(data) + obj = sc._jvm.org.apache.spark.mllib.api.python.SerDe.loads(data) return obj @@ -98,17 +98,17 @@ def _java2py(sc, r, encoding="bytes"): clsName = 'JavaRDD' if clsName == 'JavaRDD': - jrdd = sc._jvm.SerDe.javaToPython(r) + jrdd = sc._jvm.org.apache.spark.mllib.api.python.SerDe.javaToPython(r) return RDD(jrdd, sc) if clsName == 'Dataset': return DataFrame(r, SQLContext.getOrCreate(sc)) if clsName in _picklable_classes: - r = sc._jvm.SerDe.dumps(r) + r = sc._jvm.org.apache.spark.mllib.api.python.SerDe.dumps(r) elif isinstance(r, (JavaArray, JavaList)): try: - r = sc._jvm.SerDe.dumps(r) + r = sc._jvm.org.apache.spark.mllib.api.python.SerDe.dumps(r) except Py4JJavaError: pass # not pickable diff --git a/python/pyspark/mllib/feature.py b/python/pyspark/mllib/feature.py index e31c75c1e8..aef91a8ddc 100644 --- a/python/pyspark/mllib/feature.py +++ b/python/pyspark/mllib/feature.py @@ -553,7 +553,7 @@ class Word2VecModel(JavaVectorTransformer, JavaSaveable, JavaLoader): """ jmodel = sc._jvm.org.apache.spark.mllib.feature \ .Word2VecModel.load(sc._jsc.sc(), path) - model = sc._jvm.Word2VecModelWrapper(jmodel) + model = sc._jvm.org.apache.spark.mllib.api.python.Word2VecModelWrapper(jmodel) return Word2VecModel(model) diff --git a/python/pyspark/mllib/fpm.py b/python/pyspark/mllib/fpm.py index ab4066f7d6..fb226e84e5 100644 --- a/python/pyspark/mllib/fpm.py +++ b/python/pyspark/mllib/fpm.py @@ -64,7 +64,7 @@ class FPGrowthModel(JavaModelWrapper, JavaSaveable, JavaLoader): Load a model from the given path. """ model = cls._load_java(sc, path) - wrapper = sc._jvm.FPGrowthModelWrapper(model) + wrapper = sc._jvm.org.apache.spark.mllib.api.python.FPGrowthModelWrapper(model) return FPGrowthModel(wrapper) diff --git a/python/pyspark/mllib/recommendation.py b/python/pyspark/mllib/recommendation.py index 7e60255d43..732300ee9c 100644 --- a/python/pyspark/mllib/recommendation.py +++ b/python/pyspark/mllib/recommendation.py @@ -207,7 +207,7 @@ class MatrixFactorizationModel(JavaModelWrapper, JavaSaveable, JavaLoader): def load(cls, sc, path): """Load a model from the given path""" model = cls._load_java(sc, path) - wrapper = sc._jvm.MatrixFactorizationModelWrapper(model) + wrapper = sc._jvm.org.apache.spark.mllib.api.python.MatrixFactorizationModelWrapper(model) return MatrixFactorizationModel(wrapper) diff --git a/python/pyspark/mllib/tests.py b/python/pyspark/mllib/tests.py index 72fa8b5f3d..99bf50b5a1 100644 --- a/python/pyspark/mllib/tests.py +++ b/python/pyspark/mllib/tests.py @@ -150,12 +150,12 @@ class VectorTests(MLlibTestCase): def _test_serialize(self, v): self.assertEqual(v, ser.loads(ser.dumps(v))) - jvec = self.sc._jvm.SerDe.loads(bytearray(ser.dumps(v))) - nv = ser.loads(bytes(self.sc._jvm.SerDe.dumps(jvec))) + jvec = self.sc._jvm.org.apache.spark.mllib.api.python.SerDe.loads(bytearray(ser.dumps(v))) + nv = ser.loads(bytes(self.sc._jvm.org.apache.spark.mllib.api.python.SerDe.dumps(jvec))) self.assertEqual(v, nv) vs = [v] * 100 - jvecs = self.sc._jvm.SerDe.loads(bytearray(ser.dumps(vs))) - nvs = ser.loads(bytes(self.sc._jvm.SerDe.dumps(jvecs))) + jvecs = self.sc._jvm.org.apache.spark.mllib.api.python.SerDe.loads(bytearray(ser.dumps(vs))) + nvs = ser.loads(bytes(self.sc._jvm.org.apache.spark.mllib.api.python.SerDe.dumps(jvecs))) self.assertEqual(vs, nvs) def test_serialize(self): @@ -1650,8 +1650,8 @@ class ALSTests(MLlibTestCase): def test_als_ratings_serialize(self): r = Rating(7, 1123, 3.14) - jr = self.sc._jvm.SerDe.loads(bytearray(ser.dumps(r))) - nr = ser.loads(bytes(self.sc._jvm.SerDe.dumps(jr))) + jr = self.sc._jvm.org.apache.spark.mllib.api.python.SerDe.loads(bytearray(ser.dumps(r))) + nr = ser.loads(bytes(self.sc._jvm.org.apache.spark.mllib.api.python.SerDe.dumps(jr))) self.assertEqual(r.user, nr.user) self.assertEqual(r.product, nr.product) self.assertAlmostEqual(r.rating, nr.rating, 2) @@ -1659,7 +1659,8 @@ class ALSTests(MLlibTestCase): def test_als_ratings_id_long_error(self): r = Rating(1205640308657491975, 50233468418, 1.0) # rating user id exceeds max int value, should fail when pickled - self.assertRaises(Py4JJavaError, self.sc._jvm.SerDe.loads, bytearray(ser.dumps(r))) + self.assertRaises(Py4JJavaError, self.sc._jvm.org.apache.spark.mllib.api.python.SerDe.loads, + bytearray(ser.dumps(r))) class HashingTFTest(MLlibTestCase): -- GitLab