diff --git a/sql/core/src/main/scala/org/apache/spark/sql/sources/ddl.scala b/sql/core/src/main/scala/org/apache/spark/sql/sources/ddl.scala index ca510cb0b07e3325e5f23ecd6a2fd43ea0fc4f35..8a66ac31f2dfbdca4659df1d7405e3d4372f4faf 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/sources/ddl.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/sources/ddl.scala @@ -100,9 +100,26 @@ private[sql] case class CreateTableUsing( } } val dataSource = clazz.newInstance().asInstanceOf[org.apache.spark.sql.sources.RelationProvider] - val relation = dataSource.createRelation(sqlContext, options) + val relation = dataSource.createRelation(sqlContext, new CaseInsensitiveMap(options)) sqlContext.baseRelationToSchemaRDD(relation).registerTempTable(tableName) Seq.empty } } + +/** + * Builds a map in which keys are case insensitive + */ +protected class CaseInsensitiveMap(map: Map[String, String]) extends Map[String, String] { + + val baseMap = map.map(kv => kv.copy(_1 = kv._1.toLowerCase)) + + override def get(k: String): Option[String] = baseMap.get(k.toLowerCase) + + override def + [B1 >: String](kv: (String, B1)): Map[String, B1] = + baseMap + kv.copy(_1 = kv._1.toLowerCase) + + override def iterator: Iterator[(String, String)] = baseMap.iterator + + override def -(key: String): Map[String, String] = baseMap - key.toLowerCase() +} diff --git a/sql/core/src/main/scala/org/apache/spark/sql/sources/interfaces.scala b/sql/core/src/main/scala/org/apache/spark/sql/sources/interfaces.scala index 939b4e15163a65f98f1388f4d54d0ce4a7ecbd1d..02eff80456dbe7859b81c315c81165992b407c43 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/sources/interfaces.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/sources/interfaces.scala @@ -36,7 +36,11 @@ import org.apache.spark.sql.catalyst.expressions.{Expression, Attribute} */ @DeveloperApi trait RelationProvider { - /** Returns a new base relation with the given parameters. */ + /** + * Returns a new base relation with the given parameters. + * Note: the parameters' keywords are case insensitive and this insensitivity is enforced + * by the Map that is passed to the function. + */ def createRelation(sqlContext: SQLContext, parameters: Map[String, String]): BaseRelation } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/sources/TableScanSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/sources/TableScanSuite.scala index b254b0620c7794503697be7253b601a45314c36e..3cd7b0115d56729b5af6413aadbed0e776712a30 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/sources/TableScanSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/sources/TableScanSuite.scala @@ -25,7 +25,7 @@ class SimpleScanSource extends RelationProvider { override def createRelation( sqlContext: SQLContext, parameters: Map[String, String]): BaseRelation = { - SimpleScan(parameters("from").toInt, parameters("to").toInt)(sqlContext) + SimpleScan(parameters("from").toInt, parameters("TO").toInt)(sqlContext) } } @@ -47,8 +47,8 @@ class TableScanSuite extends DataSourceTest { |CREATE TEMPORARY TABLE oneToTen |USING org.apache.spark.sql.sources.SimpleScanSource |OPTIONS ( - | from '1', - | to '10' + | From '1', + | To '10' |) """.stripMargin) }