diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala
index 4ea80fee23e1e6aac99caf4a26916eba502329d0..5b6aea81cb7d16ec97526f15b4226fd6c726a333 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala
@@ -219,7 +219,7 @@ class SqlParser extends StandardTokenParsers {
 
   protected lazy val relationFactor: Parser[LogicalPlan] =
     ident ~ (opt(AS) ~> opt(ident)) ^^ {
-      case ident ~ alias => UnresolvedRelation(alias, ident)
+      case tableName ~ alias => UnresolvedRelation(None, tableName, alias)
     } |
     "(" ~> query ~ ")" ~ opt(AS) ~ ident ^^ { case s ~ _ ~ _ ~ a => Subquery(a, s) }
 
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/dsl/package.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/dsl/package.scala
index 2c4bf1715b6468570e33be46b3dc386937ff4411..2d62e4cbbce01734e0b37580f4dedcb929c05e72 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/dsl/package.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/dsl/package.scala
@@ -70,7 +70,7 @@ package object dsl {
     def > (other: Expression) = GreaterThan(expr, other)
     def >= (other: Expression) = GreaterThanOrEqual(expr, other)
     def === (other: Expression) = Equals(expr, other)
-    def != (other: Expression) = Not(Equals(expr, other))
+    def !== (other: Expression) = Not(Equals(expr, other))
 
     def like(other: Expression) = Like(expr, other)
     def rlike(other: Expression) = RLike(expr, other)
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Row.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Row.scala
index 6f939e6c41f6b4cf867bdecbc70b00b5e8989863..9f4d84466e6accd88692bbc07d9a2ca422ecaf84 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Row.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Row.scala
@@ -19,6 +19,21 @@ package org.apache.spark.sql.catalyst.expressions
 
 import org.apache.spark.sql.catalyst.types.NativeType
 
+object Row {
+  /**
+   * This method can be used to extract fields from a [[Row]] object in a pattern match. Example:
+   * {{{
+   * import org.apache.spark.sql._
+   *
+   * val pairs = sql("SELECT key, value FROM src").rdd.map {
+   *   case Row(key: Int, value: String) =>
+   *     key -> value
+   * }
+   * }}}
+   */
+  def unapplySeq(row: Row): Some[Seq[Any]] = Some(row)
+}
+
 /**
  * Represents one row of output from a relational operator.  Allows both generic access by ordinal,
  * which will incur boxing overhead for primitives, as well as native primitive access.
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicOperators.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicOperators.scala
index b39c2b32cc42c7d9396f9246bd17c5ecbbbd77d3..cfc0b0c3a8d98276edea37700f1c79b3c08c9e3a 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicOperators.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicOperators.scala
@@ -162,6 +162,7 @@ case class LowerCaseSchema(child: LogicalPlan) extends UnaryNode {
         a.nullable)(
         a.exprId,
         a.qualifiers)
+    case other => other
   }
 
   def references = Set.empty
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/package.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/package.scala
index 9ec31689b5098d3aebb13808034e434a99266160..4589129cd1c909ecc3bc5361596d3572b9cb9f09 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/package.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/package.scala
@@ -32,18 +32,5 @@ package object sql {
 
   type Row = catalyst.expressions.Row
 
-  object Row {
-    /**
-     * This method can be used to extract fields from a [[Row]] object in a pattern match. Example:
-     * {{{
-     * import org.apache.spark.sql._
-     *
-     * val pairs = sql("SELECT key, value FROM src").rdd.map {
-     *   case Row(key: Int, value: String) =>
-     *     key -> value
-     * }
-     * }}}
-     */
-    def unapplySeq(row: Row): Some[Seq[Any]] = Some(row)
-  }
+  val Row = catalyst.expressions.Row
 }
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SchemaRDD.scala b/sql/core/src/main/scala/org/apache/spark/sql/SchemaRDD.scala
index a62cb8aa1321f3c37c80037d54614414b28966c2..fc9578144856943224d956e224c16f42a992ace6 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SchemaRDD.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SchemaRDD.scala
@@ -148,17 +148,17 @@ class SchemaRDD(
    *
    * @param otherPlan the [[SchemaRDD]] that should be joined with this one.
    * @param joinType One of `Inner`, `LeftOuter`, `RightOuter`, or `FullOuter`. Defaults to `Inner.`
-   * @param condition An optional condition for the join operation.  This is equivilent to the `ON`
-   *                  clause in standard SQL.  In the case of `Inner` joins, specifying a
-   *                  `condition` is equivilent to adding `where` clauses after the `join`.
+   * @param on       An optional condition for the join operation.  This is equivilent to the `ON`
+   *                 clause in standard SQL.  In the case of `Inner` joins, specifying a
+   *                 `condition` is equivilent to adding `where` clauses after the `join`.
    *
    * @group Query
    */
   def join(
       otherPlan: SchemaRDD,
       joinType: JoinType = Inner,
-      condition: Option[Expression] = None): SchemaRDD =
-    new SchemaRDD(sqlContext, Join(logicalPlan, otherPlan.logicalPlan, joinType, condition))
+      on: Option[Expression] = None): SchemaRDD =
+    new SchemaRDD(sqlContext, Join(logicalPlan, otherPlan.logicalPlan, joinType, on))
 
   /**
    * Sorts the results by the given expressions.
@@ -195,14 +195,14 @@ class SchemaRDD(
    * with the same name, for example, when peforming self-joins.
    *
    * {{{
-   *   val x = schemaRDD.where('a === 1).subquery('x)
-   *   val y = schemaRDD.where('a === 2).subquery('y)
+   *   val x = schemaRDD.where('a === 1).as('x)
+   *   val y = schemaRDD.where('a === 2).as('y)
    *   x.join(y).where("x.a".attr === "y.a".attr),
    * }}}
    *
    * @group Query
    */
-  def subquery(alias: Symbol) =
+  def as(alias: Symbol) =
     new SchemaRDD(sqlContext, Subquery(alias.name, logicalPlan))
 
   /**
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala
index acb1ee83a72f617a67c88dc7cd463085ccda2ada..daa423cb8ea1a6a9b332e34006361391ba83c6cf 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala
@@ -24,6 +24,7 @@ import org.apache.spark.sql.catalyst.analysis.MultiInstanceRelation
 import org.apache.spark.sql.catalyst.expressions.GenericRow
 import org.apache.spark.sql.catalyst.plans.{QueryPlan, logical}
 import org.apache.spark.sql.catalyst.plans.physical._
+import org.apache.spark.sql.columnar.InMemoryColumnarTableScan
 
 abstract class SparkPlan extends QueryPlan[SparkPlan] with Logging {
   self: Product =>
@@ -69,6 +70,8 @@ case class SparkLogicalPlan(alreadyPlanned: SparkPlan)
     SparkLogicalPlan(
       alreadyPlanned match {
         case ExistingRdd(output, rdd) => ExistingRdd(output.map(_.newInstance), rdd)
+        case InMemoryColumnarTableScan(output, child) =>
+          InMemoryColumnarTableScan(output.map(_.newInstance), child)
         case _ => sys.error("Multiple instance of the same relation detected.")
       }).asInstanceOf[this.type]
   }
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala
index e5902c3cae3815a1076f0872dc271094d956bcca..7c6a642278226fd40222b32a79e886acd7b467ca 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala
@@ -58,4 +58,17 @@ class CachedTableSuite extends QueryTest {
       TestSQLContext.uncacheTable("testData")
     }
   }
+
+  test("SELECT Star Cached Table") {
+    TestSQLContext.sql("SELECT * FROM testData").registerAsTable("selectStar")
+    TestSQLContext.cacheTable("selectStar")
+    TestSQLContext.sql("SELECT * FROM selectStar")
+    TestSQLContext.uncacheTable("selectStar")
+  }
+
+  test("Self-join cached") {
+    TestSQLContext.cacheTable("testData")
+    TestSQLContext.sql("SELECT * FROM testData a JOIN testData b ON a.key = b.key")
+    TestSQLContext.uncacheTable("testData")
+  }
 }
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DslQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DslQuerySuite.scala
index 2524a37cbac13673aa97698b985e4408ee30da6f..be0f4a4c73b365542ac33d323d03d5d5672529d3 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DslQuerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DslQuerySuite.scala
@@ -119,8 +119,8 @@ class DslQuerySuite extends QueryTest {
   }
 
   test("inner join, where, multiple matches") {
-    val x = testData2.where('a === 1).subquery('x)
-    val y = testData2.where('a === 1).subquery('y)
+    val x = testData2.where('a === 1).as('x)
+    val y = testData2.where('a === 1).as('y)
     checkAnswer(
       x.join(y).where("x.a".attr === "y.a".attr),
       (1,1,1,1) ::
@@ -131,8 +131,8 @@ class DslQuerySuite extends QueryTest {
   }
 
   test("inner join, no matches") {
-    val x = testData2.where('a === 1).subquery('x)
-    val y = testData2.where('a === 2).subquery('y)
+    val x = testData2.where('a === 1).as('x)
+    val y = testData2.where('a === 2).as('y)
     checkAnswer(
       x.join(y).where("x.a".attr === "y.a".attr),
       Nil)
@@ -140,8 +140,8 @@ class DslQuerySuite extends QueryTest {
 
   test("big inner join, 4 matches per row") {
     val bigData = testData.unionAll(testData).unionAll(testData).unionAll(testData)
-    val bigDataX = bigData.subquery('x)
-    val bigDataY = bigData.subquery('y)
+    val bigDataX = bigData.as('x)
+    val bigDataY = bigData.as('y)
 
     checkAnswer(
       bigDataX.join(bigDataY).where("x.key".attr === "y.key".attr),
@@ -181,8 +181,8 @@ class DslQuerySuite extends QueryTest {
   }
 
   test("full outer join") {
-    val left = upperCaseData.where('N <= 4).subquery('left)
-    val right = upperCaseData.where('N >= 3).subquery('right)
+    val left = upperCaseData.where('N <= 4).as('left)
+    val right = upperCaseData.where('N >= 3).as('right)
 
     checkAnswer(
       left.join(right, FullOuter, Some("left.N".attr === "right.N".attr)),
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/parquet/ParquetQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/parquet/ParquetQuerySuite.scala
index a62a3c4d02354d85721493d24eab9a8cc6e574bb..fc68d6c5620d3b1e5ba70882d97f4d04be8016a7 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/parquet/ParquetQuerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/parquet/ParquetQuerySuite.scala
@@ -56,8 +56,8 @@ class ParquetQuerySuite extends FunSuite with BeforeAndAfterAll {
   }
 
   test("self-join parquet files") {
-    val x = ParquetTestData.testData.subquery('x)
-    val y = ParquetTestData.testData.subquery('y)
+    val x = ParquetTestData.testData.as('x)
+    val y = ParquetTestData.testData.as('y)
     val query = x.join(y).where("x.myint".attr === "y.myint".attr)
 
     // Check to make sure that the attributes from either side of the join have unique expression