Skip to content
Snippets Groups Projects
Commit 352102ed authored by Jakob Odersky's avatar Jakob Odersky Committed by Reynold Xin
Browse files

[SPARK-13208][CORE] Replace use of Pairs with Tuple2s

Another trivial deprecation fix for Scala 2.11

Author: Jakob Odersky <jakob@odersky.com>

Closes #11089 from jodersky/SPARK-13208.
parent e3c75c63
No related branches found
No related tags found
No related merge requests found
......@@ -230,7 +230,7 @@ class JavaDoubleRDD(val srdd: RDD[scala.Double])
* If the RDD contains infinity, NaN throws an exception
* If the elements in RDD do not vary (max == min) always returns a single bucket.
*/
def histogram(bucketCount: Int): Pair[Array[scala.Double], Array[Long]] = {
def histogram(bucketCount: Int): (Array[scala.Double], Array[Long]) = {
val result = srdd.histogram(bucketCount)
(result._1, result._2)
}
......
......@@ -103,7 +103,7 @@ class DoubleRDDFunctions(self: RDD[Double]) extends Logging with Serializable {
* If the RDD contains infinity, NaN throws an exception
* If the elements in RDD do not vary (max == min) always returns a single bucket.
*/
def histogram(bucketCount: Int): Pair[Array[Double], Array[Long]] = self.withScope {
def histogram(bucketCount: Int): (Array[Double], Array[Long]) = self.withScope {
// Scala's built-in range has issues. See #SI-8782
def customRange(min: Double, max: Double, steps: Int): IndexedSeq[Double] = {
val span = max - min
......@@ -112,7 +112,7 @@ class DoubleRDDFunctions(self: RDD[Double]) extends Logging with Serializable {
// Compute the minimum and the maximum
val (max: Double, min: Double) = self.mapPartitions { items =>
Iterator(items.foldRight(Double.NegativeInfinity,
Double.PositiveInfinity)((e: Double, x: Pair[Double, Double]) =>
Double.PositiveInfinity)((e: Double, x: (Double, Double)) =>
(x._1.max(e), x._2.min(e))))
}.reduce { (maxmin1, maxmin2) =>
(maxmin1._1.max(maxmin2._1), maxmin1._2.min(maxmin2._2))
......
......@@ -261,7 +261,7 @@ class ParquetSchemaInferenceSuite extends ParquetSchemaTest {
int96AsTimestamp = true,
writeLegacyParquetFormat = true)
testSchemaInference[Tuple1[Pair[Int, String]]](
testSchemaInference[Tuple1[(Int, String)]](
"struct",
"""
|message root {
......
......@@ -770,14 +770,14 @@ class HiveQuerySuite extends HiveComparisonTest with BeforeAndAfter {
test("SPARK-2180: HAVING support in GROUP BY clauses (positive)") {
val fixture = List(("foo", 2), ("bar", 1), ("foo", 4), ("bar", 3))
.zipWithIndex.map {case Pair(Pair(value, attr), key) => HavingRow(key, value, attr)}
.zipWithIndex.map {case ((value, attr), key) => HavingRow(key, value, attr)}
TestHive.sparkContext.parallelize(fixture).toDF().registerTempTable("having_test")
val results =
sql("SELECT value, max(attr) AS attr FROM having_test GROUP BY value HAVING attr > 3")
.collect()
.map(x => Pair(x.getString(0), x.getInt(1)))
.map(x => (x.getString(0), x.getInt(1)))
assert(results === Array(Pair("foo", 4)))
assert(results === Array(("foo", 4)))
TestHive.reset()
}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment