diff --git a/sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetTableSupport.scala b/sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetTableSupport.scala index 7bc249660053a74da8fd6379c0f285f6d16dfe5f..ef3687e692964dc48f13ee858c52dabe495866b5 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetTableSupport.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetTableSupport.scala @@ -152,14 +152,15 @@ private[parquet] class RowWriteSupport extends WriteSupport[Row] with Logging { } override def write(record: Row): Unit = { - if (attributes.size > record.size) { + val attributesSize = attributes.size + if (attributesSize > record.size) { throw new IndexOutOfBoundsException( - s"Trying to write more fields than contained in row (${attributes.size}>${record.size})") + s"Trying to write more fields than contained in row (${attributesSize}>${record.size})") } var index = 0 writer.startMessage() - while(index < attributes.size) { + while(index < attributesSize) { // null values indicate optional fields but we do not check currently if (record(index) != null) { writer.startField(attributes(index).name, index) @@ -312,14 +313,15 @@ private[parquet] class RowWriteSupport extends WriteSupport[Row] with Logging { // Optimized for non-nested rows private[parquet] class MutableRowWriteSupport extends RowWriteSupport { override def write(record: Row): Unit = { - if (attributes.size > record.size) { + val attributesSize = attributes.size + if (attributesSize > record.size) { throw new IndexOutOfBoundsException( - s"Trying to write more fields than contained in row (${attributes.size}>${record.size})") + s"Trying to write more fields than contained in row (${attributesSize}>${record.size})") } var index = 0 writer.startMessage() - while(index < attributes.size) { + while(index < attributesSize) { // null values indicate optional fields but we do not check currently if (record(index) != null && record(index) != Nil) { writer.startField(attributes(index).name, index)