Skip to content
Snippets Groups Projects
Commit 6d887db7 authored by Cheng Lian's avatar Cheng Lian Committed by Michael Armbrust
Browse files

[SPARK-3515][SQL] Moves test suite setup code to beforeAll rather than in constructor

Please refer to the JIRA ticket for details.

**NOTE** We should check all test suites that do similar initialization-like side effects in their constructors. This PR only fixes `ParquetMetastoreSuite` because it breaks our Jenkins Maven build.

Author: Cheng Lian <lian.cs.zju@gmail.com>

Closes #2375 from liancheng/say-no-to-constructor and squashes the following commits:

0ceb75b [Cheng Lian] Moves test suite setup code to beforeAll rather than in constructor
parent 885d1621
No related branches found
No related tags found
No related merge requests found
......@@ -20,14 +20,10 @@ package org.apache.spark.sql.parquet
import java.io.File
import org.apache.spark.sql.hive.execution.HiveTableScan
import org.scalatest.BeforeAndAfterAll
import scala.reflect.ClassTag
import org.apache.spark.sql.{SQLConf, QueryTest}
import org.apache.spark.sql.execution.{BroadcastHashJoin, ShuffledHashJoin}
import org.apache.spark.sql.hive.test.TestHive
import org.apache.spark.sql.QueryTest
import org.apache.spark.sql.hive.execution.HiveTableScan
import org.apache.spark.sql.hive.test.TestHive._
case class ParquetData(intField: Int, stringField: String)
......@@ -36,27 +32,19 @@ case class ParquetData(intField: Int, stringField: String)
* Tests for our SerDe -> Native parquet scan conversion.
*/
class ParquetMetastoreSuite extends QueryTest with BeforeAndAfterAll {
override def beforeAll(): Unit = {
setConf("spark.sql.hive.convertMetastoreParquet", "true")
}
override def afterAll(): Unit = {
setConf("spark.sql.hive.convertMetastoreParquet", "false")
}
val partitionedTableDir = File.createTempFile("parquettests", "sparksql")
partitionedTableDir.delete()
partitionedTableDir.mkdir()
(1 to 10).foreach { p =>
val partDir = new File(partitionedTableDir, s"p=$p")
sparkContext.makeRDD(1 to 10)
.map(i => ParquetData(i, s"part-$p"))
.saveAsParquetFile(partDir.getCanonicalPath)
}
sql(s"""
val partitionedTableDir = File.createTempFile("parquettests", "sparksql")
partitionedTableDir.delete()
partitionedTableDir.mkdir()
(1 to 10).foreach { p =>
val partDir = new File(partitionedTableDir, s"p=$p")
sparkContext.makeRDD(1 to 10)
.map(i => ParquetData(i, s"part-$p"))
.saveAsParquetFile(partDir.getCanonicalPath)
}
sql(s"""
create external table partitioned_parquet
(
intField INT,
......@@ -70,7 +58,7 @@ class ParquetMetastoreSuite extends QueryTest with BeforeAndAfterAll {
location '${partitionedTableDir.getCanonicalPath}'
""")
sql(s"""
sql(s"""
create external table normal_parquet
(
intField INT,
......@@ -83,8 +71,15 @@ class ParquetMetastoreSuite extends QueryTest with BeforeAndAfterAll {
location '${new File(partitionedTableDir, "p=1").getCanonicalPath}'
""")
(1 to 10).foreach { p =>
sql(s"ALTER TABLE partitioned_parquet ADD PARTITION (p=$p)")
(1 to 10).foreach { p =>
sql(s"ALTER TABLE partitioned_parquet ADD PARTITION (p=$p)")
}
setConf("spark.sql.hive.convertMetastoreParquet", "true")
}
override def afterAll(): Unit = {
setConf("spark.sql.hive.convertMetastoreParquet", "false")
}
test("project the partitioning column") {
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment