diff --git a/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala b/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala index 9a50ef77efbd4645002893975bc4156e0ee11b42..1d1e2884414d8f734c38c31e263e96b5f15dc216 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala @@ -345,12 +345,9 @@ object SQLConf { defaultValue = Some(true), doc = "Enables using the custom ParquetUnsafeRowRecordReader.") - // Note: this can not be enabled all the time because the reader will not be returning UnsafeRows. - // Doing so is very expensive and we should remove this requirement instead of fixing it here. - // Initial testing seems to indicate only sort requires this. val PARQUET_VECTORIZED_READER_ENABLED = booleanConf( key = "spark.sql.parquet.enableVectorizedReader", - defaultValue = Some(false), + defaultValue = Some(true), doc = "Enables vectorized parquet decoding.") val ORC_FILTER_PUSHDOWN_ENABLED = booleanConf("spark.sql.orc.filterPushdown",