diff --git a/R/pkg/inst/tests/testthat/test_Serde.R b/R/pkg/inst/tests/testthat/test_Serde.R index 96fb6dda26450e246f7f470a013147d8fcaf84e6..b5f6f1b54fa85dd365d47e95840c9b71ff222973 100644 --- a/R/pkg/inst/tests/testthat/test_Serde.R +++ b/R/pkg/inst/tests/testthat/test_Serde.R @@ -17,7 +17,7 @@ context("SerDe functionality") -sparkSession <- sparkR.session() +sparkSession <- sparkR.session(enableHiveSupport = FALSE) test_that("SerDe of primitive types", { x <- callJStatic("SparkRHandler", "echo", 1L) @@ -75,3 +75,5 @@ test_that("SerDe of list of lists", { y <- callJStatic("SparkRHandler", "echo", x) expect_equal(x, y) }) + +sparkR.session.stop() diff --git a/R/pkg/inst/tests/testthat/test_binaryFile.R b/R/pkg/inst/tests/testthat/test_binaryFile.R index b69f017de81d13bd8617748ea6f5ac61ef5fa74a..56ac8eb7280174a7453d4d752afeb8818876032e 100644 --- a/R/pkg/inst/tests/testthat/test_binaryFile.R +++ b/R/pkg/inst/tests/testthat/test_binaryFile.R @@ -18,7 +18,7 @@ context("functions on binary files") # JavaSparkContext handle -sparkSession <- sparkR.session() +sparkSession <- sparkR.session(enableHiveSupport = FALSE) sc <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "getJavaSparkContext", sparkSession) mockFile <- c("Spark is pretty.", "Spark is awesome.") @@ -88,3 +88,5 @@ test_that("saveAsObjectFile()/objectFile() works with multiple paths", { unlink(fileName1, recursive = TRUE) unlink(fileName2, recursive = TRUE) }) + +sparkR.session.stop() diff --git a/R/pkg/inst/tests/testthat/test_binary_function.R b/R/pkg/inst/tests/testthat/test_binary_function.R index 6f51d20687277a840d2c96f54fe66268c8680d1b..ae7abe20ccbe589ccf1c7abcbba1470872158b7c 100644 --- a/R/pkg/inst/tests/testthat/test_binary_function.R +++ b/R/pkg/inst/tests/testthat/test_binary_function.R @@ -18,7 +18,7 @@ context("binary functions") # JavaSparkContext handle -sparkSession <- sparkR.session() +sparkSession <- sparkR.session(enableHiveSupport = FALSE) sc <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "getJavaSparkContext", sparkSession) # Data @@ -100,3 +100,5 @@ test_that("zipPartitions() on RDDs", { unlink(fileName) }) + +sparkR.session.stop() diff --git a/R/pkg/inst/tests/testthat/test_broadcast.R b/R/pkg/inst/tests/testthat/test_broadcast.R index cf1d43277105eaaca293701693e3a9d20116d6be..c7fefb5cf9cbc6729214374590dc7dbd73716343 100644 --- a/R/pkg/inst/tests/testthat/test_broadcast.R +++ b/R/pkg/inst/tests/testthat/test_broadcast.R @@ -18,7 +18,7 @@ context("broadcast variables") # JavaSparkContext handle -sparkSession <- sparkR.session() +sparkSession <- sparkR.session(enableHiveSupport = FALSE) sc <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "getJavaSparkContext", sparkSession) # Partitioned data @@ -47,3 +47,5 @@ test_that("without using broadcast variable", { expected <- list(sum(randomMat) * 1, sum(randomMat) * 2) expect_equal(actual, expected) }) + +sparkR.session.stop() diff --git a/R/pkg/inst/tests/testthat/test_context.R b/R/pkg/inst/tests/testthat/test_context.R index 2a1bd61b11118d2668d1da5a47f4a17c6ff7bf25..8bd134a58d68e1e842469b013c39b5f1b8bd708e 100644 --- a/R/pkg/inst/tests/testthat/test_context.R +++ b/R/pkg/inst/tests/testthat/test_context.R @@ -63,18 +63,14 @@ test_that("repeatedly starting and stopping SparkR", { } }) -# Does not work consistently even with Hive off -# nolint start -# test_that("repeatedly starting and stopping SparkR", { -# for (i in 1:4) { -# sparkR.session(enableHiveSupport = FALSE) -# df <- createDataFrame(data.frame(dummy=1:i)) -# expect_equal(count(df), i) -# sparkR.session.stop() -# Sys.sleep(5) # Need more time to shutdown Hive metastore -# } -# }) -# nolint end +test_that("repeatedly starting and stopping SparkSession", { + for (i in 1:4) { + sparkR.session(enableHiveSupport = FALSE) + df <- createDataFrame(data.frame(dummy = 1:i)) + expect_equal(count(df), i) + sparkR.session.stop() + } +}) test_that("rdd GC across sparkR.stop", { sc <- sparkR.sparkContext() # sc should get id 0 @@ -96,6 +92,7 @@ test_that("rdd GC across sparkR.stop", { count(rdd3) count(rdd4) + sparkR.session.stop() }) test_that("job group functions can be called", { @@ -164,7 +161,7 @@ test_that("sparkJars sparkPackages as comma-separated strings", { }) test_that("spark.lapply should perform simple transforms", { - sc <- sparkR.sparkContext() + sparkR.sparkContext() doubled <- spark.lapply(1:10, function(x) { 2 * x }) expect_equal(doubled, as.list(2 * 1:10)) sparkR.session.stop() diff --git a/R/pkg/inst/tests/testthat/test_includePackage.R b/R/pkg/inst/tests/testthat/test_includePackage.R index d6a3766539c023cc9ee1758f456039c793f10ffa..ca2b90057278a1ec0c2994a50442492bbfbf8fe2 100644 --- a/R/pkg/inst/tests/testthat/test_includePackage.R +++ b/R/pkg/inst/tests/testthat/test_includePackage.R @@ -18,7 +18,7 @@ context("include R packages") # JavaSparkContext handle -sparkSession <- sparkR.session() +sparkSession <- sparkR.session(enableHiveSupport = FALSE) sc <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "getJavaSparkContext", sparkSession) # Partitioned data @@ -56,3 +56,5 @@ test_that("use include package", { actual <- collect(data) } }) + +sparkR.session.stop() diff --git a/R/pkg/inst/tests/testthat/test_mllib.R b/R/pkg/inst/tests/testthat/test_mllib.R index 753da817609714c5b33a9db4d58479074821654d..ab390a86d1ccd2718df12f995e50f6121549df4d 100644 --- a/R/pkg/inst/tests/testthat/test_mllib.R +++ b/R/pkg/inst/tests/testthat/test_mllib.R @@ -20,7 +20,7 @@ library(testthat) context("MLlib functions") # Tests for MLlib functions in SparkR -sparkSession <- sparkR.session() +sparkSession <- sparkR.session(enableHiveSupport = FALSE) test_that("formula of spark.glm", { training <- suppressWarnings(createDataFrame(iris)) @@ -453,3 +453,5 @@ test_that("spark.survreg", { expect_equal(predict(model, rData)[[1]], 3.724591, tolerance = 1e-4) } }) + +sparkR.session.stop() diff --git a/R/pkg/inst/tests/testthat/test_parallelize_collect.R b/R/pkg/inst/tests/testthat/test_parallelize_collect.R index f79a8a70aafb1c46031b50968607996956b58b56..959d7ab9e644d6e1ad04991e016d66bafc308dad 100644 --- a/R/pkg/inst/tests/testthat/test_parallelize_collect.R +++ b/R/pkg/inst/tests/testthat/test_parallelize_collect.R @@ -33,7 +33,7 @@ numPairs <- list(list(1, 1), list(1, 2), list(2, 2), list(2, 3)) strPairs <- list(list(strList, strList), list(strList, strList)) # JavaSparkContext handle -sparkSession <- sparkR.session() +sparkSession <- sparkR.session(enableHiveSupport = FALSE) jsc <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "getJavaSparkContext", sparkSession) # Tests @@ -108,3 +108,5 @@ test_that("parallelize() and collect() work for lists of pairs (pairwise data)", expect_equal(collect(strPairsRDDD1), strPairs) expect_equal(collect(strPairsRDDD2), strPairs) }) + +sparkR.session.stop() diff --git a/R/pkg/inst/tests/testthat/test_rdd.R b/R/pkg/inst/tests/testthat/test_rdd.R index 429311d2924f0d5b376dd30381af90ebdc4c814f..508a3a7dfd5f76be35ea58aaf2048e539e533422 100644 --- a/R/pkg/inst/tests/testthat/test_rdd.R +++ b/R/pkg/inst/tests/testthat/test_rdd.R @@ -18,7 +18,7 @@ context("basic RDD functions") # JavaSparkContext handle -sparkSession <- sparkR.session() +sparkSession <- sparkR.session(enableHiveSupport = FALSE) sc <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "getJavaSparkContext", sparkSession) # Data @@ -800,3 +800,5 @@ test_that("Test correct concurrency of RRDD.compute()", { count <- callJMethod(zrdd, "count") expect_equal(count, 1000) }) + +sparkR.session.stop() diff --git a/R/pkg/inst/tests/testthat/test_shuffle.R b/R/pkg/inst/tests/testthat/test_shuffle.R index 7d4f342016441e571de635604dd888130f2da36a..2586056773f1cca81500de9fdcc9ddba9f743cdb 100644 --- a/R/pkg/inst/tests/testthat/test_shuffle.R +++ b/R/pkg/inst/tests/testthat/test_shuffle.R @@ -18,7 +18,7 @@ context("partitionBy, groupByKey, reduceByKey etc.") # JavaSparkContext handle -sparkSession <- sparkR.session() +sparkSession <- sparkR.session(enableHiveSupport = FALSE) sc <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "getJavaSparkContext", sparkSession) # Data @@ -220,3 +220,5 @@ test_that("test partitionBy with string keys", { expect_equal(sortKeyValueList(actual_first), sortKeyValueList(expected_first)) expect_equal(sortKeyValueList(actual_second), sortKeyValueList(expected_second)) }) + +sparkR.session.stop() diff --git a/R/pkg/inst/tests/testthat/test_sparkSQL.R b/R/pkg/inst/tests/testthat/test_sparkSQL.R index f275284b09d88cea0b3030e36caeaa683dc4ebb7..3f3cb766b38f12e0f02fc732fb9cedf210037378 100644 --- a/R/pkg/inst/tests/testthat/test_sparkSQL.R +++ b/R/pkg/inst/tests/testthat/test_sparkSQL.R @@ -2489,3 +2489,5 @@ unlink(parquetPath) unlink(orcPath) unlink(jsonPath) unlink(jsonPathNa) + +sparkR.session.stop() diff --git a/R/pkg/inst/tests/testthat/test_take.R b/R/pkg/inst/tests/testthat/test_take.R index daf5e41abe13f638e116987eff59726dfa381448..07f00c9915d1207613e7dbc5ee132fc0aa79b58f 100644 --- a/R/pkg/inst/tests/testthat/test_take.R +++ b/R/pkg/inst/tests/testthat/test_take.R @@ -30,7 +30,7 @@ strList <- list("Dexter Morgan: Blood. Sometimes it sets my teeth on edge, ", "raising me. But they're both dead now. I didn't kill them. Honest.") # JavaSparkContext handle -sparkSession <- sparkR.session() +sparkSession <- sparkR.session(enableHiveSupport = FALSE) sc <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "getJavaSparkContext", sparkSession) test_that("take() gives back the original elements in correct count and order", { @@ -65,3 +65,5 @@ test_that("take() gives back the original elements in correct count and order", expect_equal(length(take(numListRDD, 0)), 0) expect_equal(length(take(numVectorRDD, 0)), 0) }) + +sparkR.session.stop() diff --git a/R/pkg/inst/tests/testthat/test_textFile.R b/R/pkg/inst/tests/testthat/test_textFile.R index 7b2cc74753fe2b31317b0871d9f12c5ee5b2309f..b7dcbe472ac282d8813e764e5c8a4d8216cff2b9 100644 --- a/R/pkg/inst/tests/testthat/test_textFile.R +++ b/R/pkg/inst/tests/testthat/test_textFile.R @@ -18,7 +18,7 @@ context("the textFile() function") # JavaSparkContext handle -sparkSession <- sparkR.session() +sparkSession <- sparkR.session(enableHiveSupport = FALSE) sc <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "getJavaSparkContext", sparkSession) mockFile <- c("Spark is pretty.", "Spark is awesome.") @@ -160,3 +160,5 @@ test_that("Pipelined operations on RDDs created using textFile", { unlink(fileName) }) + +sparkR.session.stop() diff --git a/R/pkg/inst/tests/testthat/test_utils.R b/R/pkg/inst/tests/testthat/test_utils.R index 21a119a06b937eb96e95d8051b98abb81eb79e90..58ff3debfa70781a271f3ef1c84de6152aab7ad5 100644 --- a/R/pkg/inst/tests/testthat/test_utils.R +++ b/R/pkg/inst/tests/testthat/test_utils.R @@ -18,7 +18,7 @@ context("functions in utils.R") # JavaSparkContext handle -sparkSession <- sparkR.session() +sparkSession <- sparkR.session(enableHiveSupport = FALSE) sc <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "getJavaSparkContext", sparkSession) test_that("convertJListToRList() gives back (deserializes) the original JLists @@ -182,3 +182,5 @@ test_that("overrideEnvs", { expect_equal(config[["param_only"]], "blah") expect_equal(config[["config_only"]], "ok") }) + +sparkR.session.stop()