From 3a652f691b220fada0286f8d0a562c5657973d4d Mon Sep 17 00:00:00 2001
From: Reynold Xin <rxin@databricks.com>
Date: Fri, 6 Nov 2015 14:47:41 -0800
Subject: [PATCH] [SPARK-11561][SQL] Rename text data source's column name to
 value.

Author: Reynold Xin <rxin@databricks.com>

Closes #9527 from rxin/SPARK-11561.
---
 .../sql/execution/datasources/text/DefaultSource.scala      | 6 ++----
 .../spark/sql/execution/datasources/text/TextSuite.scala    | 2 +-
 2 files changed, 3 insertions(+), 5 deletions(-)

diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/text/DefaultSource.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/text/DefaultSource.scala
index 52c4421d7e..4b8b8e4e74 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/text/DefaultSource.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/text/DefaultSource.scala
@@ -30,14 +30,12 @@ import org.apache.spark.deploy.SparkHadoopUtil
 import org.apache.spark.mapred.SparkHadoopMapRedUtil
 import org.apache.spark.rdd.RDD
 import org.apache.spark.sql.catalyst.InternalRow
-import org.apache.spark.sql.catalyst.expressions.{UnsafeRow, GenericMutableRow}
+import org.apache.spark.sql.catalyst.expressions.UnsafeRow
 import org.apache.spark.sql.catalyst.expressions.codegen.{UnsafeRowWriter, BufferHolder}
-import org.apache.spark.sql.columnar.MutableUnsafeRow
 import org.apache.spark.sql.{AnalysisException, Row, SQLContext}
 import org.apache.spark.sql.execution.datasources.PartitionSpec
 import org.apache.spark.sql.sources._
 import org.apache.spark.sql.types.{StringType, StructType}
-import org.apache.spark.unsafe.types.UTF8String
 import org.apache.spark.util.SerializableConfiguration
 
 /**
@@ -78,7 +76,7 @@ private[sql] class TextRelation(
   extends HadoopFsRelation(maybePartitionSpec) {
 
   /** Data schema is always a single column, named "text". */
-  override def dataSchema: StructType = new StructType().add("text", StringType)
+  override def dataSchema: StructType = new StructType().add("value", StringType)
 
   /** This is an internal data source that outputs internal row format. */
   override val needConversion: Boolean = false
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/text/TextSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/text/TextSuite.scala
index 0a2306c066..914e516613 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/text/TextSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/text/TextSuite.scala
@@ -65,7 +65,7 @@ class TextSuite extends QueryTest with SharedSQLContext {
   /** Verifies data and schema. */
   private def verifyFrame(df: DataFrame): Unit = {
     // schema
-    assert(df.schema == new StructType().add("text", StringType))
+    assert(df.schema == new StructType().add("value", StringType))
 
     // verify content
     val data = df.collect()
-- 
GitLab