From 728af88cf6be4c25a732ab7e4fe66c1ed0041164 Mon Sep 17 00:00:00 2001 From: zsxwing <zsxwing@gmail.com> Date: Wed, 13 May 2015 17:58:29 -0700 Subject: [PATCH] [HOTFIX] Use 'new Job' in fsBasedParquet.scala Same issue as #6095 cc liancheng Author: zsxwing <zsxwing@gmail.com> Closes #6136 from zsxwing/hotfix and squashes the following commits: 4beea54 [zsxwing] Use 'new Job' in fsBasedParquet.scala --- .../scala/org/apache/spark/sql/parquet/fsBasedParquet.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/parquet/fsBasedParquet.scala b/sql/core/src/main/scala/org/apache/spark/sql/parquet/fsBasedParquet.scala index d810d6a028..c83a9c35db 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/parquet/fsBasedParquet.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/parquet/fsBasedParquet.scala @@ -231,7 +231,7 @@ private[sql] class FSBasedParquetRelation( filters: Array[Filter], inputPaths: Array[String]): RDD[Row] = { - val job = Job.getInstance(SparkHadoopUtil.get.conf) + val job = new Job(SparkHadoopUtil.get.conf) val conf = ContextUtil.getConfiguration(job) ParquetInputFormat.setReadSupportClass(job, classOf[RowReadSupport]) -- GitLab