diff --git a/docs/_layouts/global.html b/docs/_layouts/global.html
index 3a3b8dce37757e3c0b62c47e194e6dce5cf05b02..2f6bdcabe8c44dd20e939d559d7a99bcc57082fa 100755
--- a/docs/_layouts/global.html
+++ b/docs/_layouts/global.html
@@ -98,7 +98,7 @@
                             <ul class="dropdown-menu">
                                 <li><a href="configuration.html">Configuration</a></li>
                                 <li><a href="tuning.html">Tuning Guide</a></li>
-                                <li><a href="cdh-hdp.html">Running with CDH/HDP</a></li>
+                                <li><a href="hadoop-third-party-distributions.html">Running with CDH/HDP</a></li>
                                 <li><a href="hardware-provisioning.html">Hardware Provisioning</a></li>
                                 <li><a href="building-with-maven.html">Building Spark with Maven</a></li>
                                 <li><a href="contributing-to-spark.html">Contributing to Spark</a></li>
diff --git a/docs/cdh-hdp.md b/docs/hadoop-third-party-distributions.md
similarity index 93%
rename from docs/cdh-hdp.md
rename to docs/hadoop-third-party-distributions.md
index 679fb1100d115e1ac845621b67dd3d9b04c8e2a7..9f4f354525b7a876df18656bbb2318adad3a91e1 100644
--- a/docs/cdh-hdp.md
+++ b/docs/hadoop-third-party-distributions.md
@@ -54,9 +54,7 @@ Spark can run in a variety of deployment modes:
   cores dedicated to Spark on each node.
 * Run Spark alongside Hadoop using a cluster resource manager, such as YARN or Mesos.
 
-These options are identical for those using CDH and HDP. Note that if you have a YARN cluster,
-but still prefer to run Spark on a dedicated set of nodes rather than scheduling through YARN, 
-use `mr1` versions of HADOOP_HOME when compiling.
+These options are identical for those using CDH and HDP. 
 
 # Inheriting Cluster Configuration
 If you plan to read and write from HDFS using Spark, there are two Hadoop configuration files that