diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala
index 25e98c0bdd4310828c4993437415a50719ec2956..688555cf136e879e8c7fdecfc519fd8a2789da96 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala
@@ -364,7 +364,7 @@ private[sql] abstract class SparkStrategies extends QueryPlanner[SparkPlan] {
       case e @ EvaluatePython(udf, child, _) =>
         BatchPythonEvaluation(udf, e.output, planLater(child)) :: Nil
       case LogicalRDD(output, rdd) => PhysicalRDD(output, rdd, "ExistingRDD") :: Nil
-      case BroadcastHint(child) => apply(child)
+      case BroadcastHint(child) => planLater(child) :: Nil
       case _ => Nil
     }
   }
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameJoinSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameJoinSuite.scala
index 56ad71ea4f487826ef32dca0f4839576a9686099..c70397f9853ae978d9ac88134456855cdedb056c 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameJoinSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameJoinSuite.scala
@@ -120,5 +120,12 @@ class DataFrameJoinSuite extends QueryTest with SharedSQLContext {
 
     // planner should not crash without a join
     broadcast(df1).queryExecution.executedPlan
+
+    // SPARK-12275: no physical plan for BroadcastHint in some condition
+    withTempPath { path =>
+      df1.write.parquet(path.getCanonicalPath)
+      val pf1 = sqlContext.read.parquet(path.getCanonicalPath)
+      assert(df1.join(broadcast(pf1)).count() === 4)
+    }
   }
 }