diff --git a/core/src/main/scala/org/apache/spark/Accumulator.scala b/core/src/main/scala/org/apache/spark/Accumulator.scala
index 7bea636c94aa03b106533b5eabbef77220c2be9d..9d5fbefc824adb2f8fef41a2f08197330d515ac8 100644
--- a/core/src/main/scala/org/apache/spark/Accumulator.scala
+++ b/core/src/main/scala/org/apache/spark/Accumulator.scala
@@ -24,8 +24,7 @@ package org.apache.spark
  * They can be used to implement counters (as in MapReduce) or sums. Spark natively supports
  * accumulators of numeric value types, and programmers can add support for new types.
  *
- * An accumulator is created from an initial value `v` by calling
- * [[SparkContext#accumulator SparkContext.accumulator]].
+ * An accumulator is created from an initial value `v` by calling `SparkContext.accumulator`.
  * Tasks running on the cluster can then add to it using the `+=` operator.
  * However, they cannot read its value. Only the driver program can read the accumulator's value,
  * using its [[#value]] method.
diff --git a/core/src/main/scala/org/apache/spark/api/java/JavaPairRDD.scala b/core/src/main/scala/org/apache/spark/api/java/JavaPairRDD.scala
index 766aea213a972fa922f11087a1fd58a3e97a783f..9544475ff0428b428777b4896f416e8f15e69a5b 100644
--- a/core/src/main/scala/org/apache/spark/api/java/JavaPairRDD.scala
+++ b/core/src/main/scala/org/apache/spark/api/java/JavaPairRDD.scala
@@ -166,7 +166,7 @@ class JavaPairRDD[K, V](val rdd: RDD[(K, V)])
    * Return a subset of this RDD sampled by key (via stratified sampling) containing exactly
    * math.ceil(numItems * samplingRate) for each stratum (group of pairs with the same key).
    *
-   * This method differs from [[sampleByKey]] in that we make additional passes over the RDD to
+   * This method differs from `sampleByKey` in that we make additional passes over the RDD to
    * create a sample size that's exactly equal to the sum of math.ceil(numItems * samplingRate)
    * over all key values with a 99.99% confidence. When sampling without replacement, we need one
    * additional pass over the RDD to guarantee sample size; when sampling with replacement, we need
@@ -184,7 +184,7 @@ class JavaPairRDD[K, V](val rdd: RDD[(K, V)])
    * Return a subset of this RDD sampled by key (via stratified sampling) containing exactly
    * math.ceil(numItems * samplingRate) for each stratum (group of pairs with the same key).
    *
-   * This method differs from [[sampleByKey]] in that we make additional passes over the RDD to
+   * This method differs from `sampleByKey` in that we make additional passes over the RDD to
    * create a sample size that's exactly equal to the sum of math.ceil(numItems * samplingRate)
    * over all key values with a 99.99% confidence. When sampling without replacement, we need one
    * additional pass over the RDD to guarantee sample size; when sampling with replacement, we need
diff --git a/core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala b/core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala
index eda16d957cc58bbbf1597439c0f247290824700f..91ae1002abd215f27c4f6d9a59fd52ca7bf256aa 100644
--- a/core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala
+++ b/core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala
@@ -393,7 +393,7 @@ trait JavaRDDLike[T, This <: JavaRDDLike[T, This]] extends Serializable {
   def treeReduce(f: JFunction2[T, T, T], depth: Int): T = rdd.treeReduce(f, depth)
 
   /**
-   * [[org.apache.spark.api.java.JavaRDDLike#treeReduce]] with suggested depth 2.
+   * `org.apache.spark.api.java.JavaRDDLike.treeReduce` with suggested depth 2.
    */
   def treeReduce(f: JFunction2[T, T, T]): T = treeReduce(f, 2)
 
@@ -440,7 +440,7 @@ trait JavaRDDLike[T, This <: JavaRDDLike[T, This]] extends Serializable {
   }
 
   /**
-   * [[org.apache.spark.api.java.JavaRDDLike#treeAggregate]] with suggested depth 2.
+   * `org.apache.spark.api.java.JavaRDDLike.treeAggregate` with suggested depth 2.
    */
   def treeAggregate[U](
       zeroValue: U,
diff --git a/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala b/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala
index ec12b9963e7c289d57ea95f44685acea5aed877d..d7bfdbad8442f1975b3e6663828ed19df2ff116b 100644
--- a/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala
@@ -109,7 +109,7 @@ class PairRDDFunctions[K, V](self: RDD[(K, V)])
    * functions. This method is here for backward compatibility. It does not provide combiner
    * classtag information to the shuffle.
    *
-   * @see [[combineByKeyWithClassTag]]
+   * @see `combineByKeyWithClassTag`
    */
   def combineByKey[C](
       createCombiner: V => C,
@@ -127,7 +127,7 @@ class PairRDDFunctions[K, V](self: RDD[(K, V)])
    * This method is here for backward compatibility. It does not provide combiner
    * classtag information to the shuffle.
    *
-   * @see [[combineByKeyWithClassTag]]
+   * @see `combineByKeyWithClassTag`
    */
   def combineByKey[C](
       createCombiner: V => C,
@@ -608,7 +608,7 @@ class PairRDDFunctions[K, V](self: RDD[(K, V)])
    * existing partitioner/parallelism level. This method is here for backward compatibility. It
    * does not provide combiner classtag information to the shuffle.
    *
-   * @see [[combineByKeyWithClassTag]]
+   * @see `combineByKeyWithClassTag`
    */
   def combineByKey[C](
       createCombiner: V => C,
diff --git a/core/src/main/scala/org/apache/spark/rdd/RDD.scala b/core/src/main/scala/org/apache/spark/rdd/RDD.scala
index 374abccf6ad55d0a294f22cba461f1822baff664..a7e01f397e0626fa8cd07cb3dd302a72738f6216 100644
--- a/core/src/main/scala/org/apache/spark/rdd/RDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/RDD.scala
@@ -1841,7 +1841,7 @@ abstract class RDD[T: ClassTag](
  * Defines implicit functions that provide extra functionalities on RDDs of specific types.
  *
  * For example, [[RDD.rddToPairRDDFunctions]] converts an RDD into a [[PairRDDFunctions]] for
- * key-value-pair RDDs, and enabling extra functionalities such as [[PairRDDFunctions.reduceByKey]].
+ * key-value-pair RDDs, and enabling extra functionalities such as `PairRDDFunctions.reduceByKey`.
  */
 object RDD {
 
diff --git a/graphx/src/main/scala/org/apache/spark/graphx/Graph.scala b/graphx/src/main/scala/org/apache/spark/graphx/Graph.scala
index c55a5885ba80568b540dc178b62af4304eb89731..b3a3420b8494d873073b139e7396a9a768497e8a 100644
--- a/graphx/src/main/scala/org/apache/spark/graphx/Graph.scala
+++ b/graphx/src/main/scala/org/apache/spark/graphx/Graph.scala
@@ -331,7 +331,7 @@ abstract class Graph[VD: ClassTag, ED: ClassTag] protected () extends Serializab
 
   /**
    * Merges multiple edges between two vertices into a single edge. For correct results, the graph
-   * must have been partitioned using [[partitionBy]].
+   * must have been partitioned using `partitionBy`.
    *
    * @param merge the user-supplied commutative associative function to merge edge attributes
    *              for duplicate edges.
diff --git a/graphx/src/main/scala/org/apache/spark/graphx/GraphOps.scala b/graphx/src/main/scala/org/apache/spark/graphx/GraphOps.scala
index 90907300be9758858118c9ea8b395d3fc9348140..475bccf9bfc76a7237fa974d9a884912e8a45487 100644
--- a/graphx/src/main/scala/org/apache/spark/graphx/GraphOps.scala
+++ b/graphx/src/main/scala/org/apache/spark/graphx/GraphOps.scala
@@ -428,7 +428,7 @@ class GraphOps[VD: ClassTag, ED: ClassTag](graph: Graph[VD, ED]) extends Seriali
    * Compute the connected component membership of each vertex and return a graph with the vertex
    * value containing the lowest vertex id in the connected component containing that vertex.
    *
-   * @see [[org.apache.spark.graphx.lib.ConnectedComponents$#run]]
+   * @see `org.apache.spark.graphx.lib.ConnectedComponents.run`
    */
   def connectedComponents(): Graph[VertexId, ED] = {
     ConnectedComponents.run(graph)
@@ -438,7 +438,7 @@ class GraphOps[VD: ClassTag, ED: ClassTag](graph: Graph[VD, ED]) extends Seriali
    * Compute the connected component membership of each vertex and return a graph with the vertex
    * value containing the lowest vertex id in the connected component containing that vertex.
    *
-   * @see [[org.apache.spark.graphx.lib.ConnectedComponents$#run]]
+   * @see `org.apache.spark.graphx.lib.ConnectedComponents.run`
    */
   def connectedComponents(maxIterations: Int): Graph[VertexId, ED] = {
     ConnectedComponents.run(graph, maxIterations)
diff --git a/mllib/src/main/scala/org/apache/spark/ml/Pipeline.scala b/mllib/src/main/scala/org/apache/spark/ml/Pipeline.scala
index 08e9cb9ba86688539958b841a822dca2eb6a07dc..b76dc5f93193cb2c43b7f56c312c590d0140e793 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/Pipeline.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/Pipeline.scala
@@ -83,11 +83,11 @@ abstract class PipelineStage extends Params with Logging {
 
 /**
  * A simple pipeline, which acts as an estimator. A Pipeline consists of a sequence of stages, each
- * of which is either an [[Estimator]] or a [[Transformer]]. When [[Pipeline#fit]] is called, the
- * stages are executed in order. If a stage is an [[Estimator]], its [[Estimator#fit]] method will
+ * of which is either an [[Estimator]] or a [[Transformer]]. When `Pipeline.fit` is called, the
+ * stages are executed in order. If a stage is an [[Estimator]], its `Estimator.fit` method will
  * be called on the input dataset to fit a model. Then the model, which is a transformer, will be
  * used to transform the dataset as the input to the next stage. If a stage is a [[Transformer]],
- * its [[Transformer#transform]] method will be called to produce the dataset for the next stage.
+ * its `Transformer.transform` method will be called to produce the dataset for the next stage.
  * The fitted model from a [[Pipeline]] is a [[PipelineModel]], which consists of fitted models and
  * transformers, corresponding to the pipeline stages. If there are no stages, the pipeline acts as
  * an identity transformer.
@@ -121,9 +121,9 @@ class Pipeline @Since("1.4.0") (
 
   /**
    * Fits the pipeline to the input dataset with additional parameters. If a stage is an
-   * [[Estimator]], its [[Estimator#fit]] method will be called on the input dataset to fit a model.
+   * [[Estimator]], its `Estimator.fit` method will be called on the input dataset to fit a model.
    * Then the model, which is a transformer, will be used to transform the dataset as the input to
-   * the next stage. If a stage is a [[Transformer]], its [[Transformer#transform]] method will be
+   * the next stage. If a stage is a [[Transformer]], its `Transformer.transform` method will be
    * called to produce the dataset for the next stage. The fitted model from a [[Pipeline]] is an
    * [[PipelineModel]], which consists of fitted models and transformers, corresponding to the
    * pipeline stages. If there are no stages, the output model acts as an identity transformer.
diff --git a/mllib/src/main/scala/org/apache/spark/ml/param/params.scala b/mllib/src/main/scala/org/apache/spark/ml/param/params.scala
index 9adb0fa618f29bc7decfd844622ad73b501374e8..ab0620ca7553ac1b64665c3bb6a19c9388600ec8 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/param/params.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/param/params.scala
@@ -728,7 +728,7 @@ trait Params extends Identifiable with Serializable {
   }
 
   /**
-   * [[extractParamMap]] with no extra values.
+   * `extractParamMap` with no extra values.
    */
   final def extractParamMap(): ParamMap = {
     extractParamMap(ParamMap.empty)
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/clustering/LDAModel.scala b/mllib/src/main/scala/org/apache/spark/mllib/clustering/LDAModel.scala
index ae336982092d6cc334d32740431623bd4a07fba6..7fd722a332923fcfdaa24b14a072ea96f0ee0c86 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/clustering/LDAModel.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/clustering/LDAModel.scala
@@ -237,7 +237,7 @@ class LocalLDAModel private[spark] (
     vocabSize)
 
   /**
-   * Java-friendly version of [[logLikelihood]]
+   * Java-friendly version of `logLikelihood`
    */
   @Since("1.5.0")
   def logLikelihood(documents: JavaPairRDD[java.lang.Long, Vector]): Double = {
@@ -259,7 +259,9 @@ class LocalLDAModel private[spark] (
     -logLikelihood(documents) / corpusTokenCount
   }
 
-  /** Java-friendly version of [[logPerplexity]] */
+  /**
+   * Java-friendly version of `logPerplexity`
+   */
   @Since("1.5.0")
   def logPerplexity(documents: JavaPairRDD[java.lang.Long, Vector]): Double = {
     logPerplexity(documents.rdd.asInstanceOf[RDD[(Long, Vector)]])
@@ -365,7 +367,9 @@ class LocalLDAModel private[spark] (
     }
   }
 
-  /** Get a method usable as a UDF for [[topicDistributions()]] */
+  /**
+   * Get a method usable as a UDF for `topicDistributions()`
+   */
   private[spark] def getTopicDistributionMethod(sc: SparkContext): Vector => Vector = {
     val expElogbeta = exp(LDAUtils.dirichletExpectation(topicsMatrix.asBreeze.toDenseMatrix.t).t)
     val expElogbetaBc = sc.broadcast(expElogbeta)
@@ -414,7 +418,7 @@ class LocalLDAModel private[spark] (
   }
 
   /**
-   * Java-friendly version of [[topicDistributions]]
+   * Java-friendly version of `topicDistributions`
    */
   @Since("1.4.1")
   def topicDistributions(
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/fpm/AssociationRules.scala b/mllib/src/main/scala/org/apache/spark/mllib/fpm/AssociationRules.scala
index dca031477d3b760698a6ee2a37edd9c821971972..85a90fa959f938347de85dca3a115468b232ac79 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/fpm/AssociationRules.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/fpm/AssociationRules.scala
@@ -80,7 +80,9 @@ class AssociationRules private[fpm] (
     }.filter(_.confidence >= minConfidence)
   }
 
-  /** Java-friendly version of [[run]]. */
+  /**
+   * Java-friendly version of `run`.
+   */
   @Since("1.5.0")
   def run[Item](freqItemsets: JavaRDD[FreqItemset[Item]]): JavaRDD[Rule[Item]] = {
     val tag = fakeClassTag[Item]
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/fpm/FPGrowth.scala b/mllib/src/main/scala/org/apache/spark/mllib/fpm/FPGrowth.scala
index e3cf0d4979ed43d56a4baa4b4218ec58e6312768..635da00b6990e197e9563709d91749252ef67cab 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/fpm/FPGrowth.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/fpm/FPGrowth.scala
@@ -218,7 +218,9 @@ class FPGrowth private (
     new FPGrowthModel(freqItemsets)
   }
 
-  /** Java-friendly version of [[run]]. */
+  /**
+   * Java-friendly version of `run`.
+   */
   @Since("1.3.0")
   def run[Item, Basket <: JavaIterable[Item]](data: JavaRDD[Basket]): FPGrowthModel[Item] = {
     implicit val tag = fakeClassTag[Item]
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/optimization/GradientDescent.scala b/mllib/src/main/scala/org/apache/spark/mllib/optimization/GradientDescent.scala
index 8979707666a2fc4314b0bbae7097f4abfea061ab..07a67a9e719db956505e89b9b1be09be92d72f18 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/optimization/GradientDescent.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/optimization/GradientDescent.scala
@@ -279,7 +279,7 @@ object GradientDescent extends Logging {
   }
 
   /**
-   * Alias of [[runMiniBatchSGD]] with convergenceTol set to default value of 0.001.
+   * Alias of `runMiniBatchSGD` with convergenceTol set to default value of 0.001.
    */
   def runMiniBatchSGD(
       data: RDD[(Double, Vector)],
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/random/RandomRDDs.scala b/mllib/src/main/scala/org/apache/spark/mllib/random/RandomRDDs.scala
index 85d4d7f37f2c006284e599c01cc97fc14b835007..258b1763bba86767e8057fa084eae158280bbb8b 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/random/RandomRDDs.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/random/RandomRDDs.scala
@@ -57,7 +57,7 @@ object RandomRDDs {
   }
 
   /**
-   * Java-friendly version of [[RandomRDDs#uniformRDD]].
+   * Java-friendly version of `RandomRDDs.uniformRDD`.
    */
   @Since("1.1.0")
   def uniformJavaRDD(
@@ -69,7 +69,7 @@ object RandomRDDs {
   }
 
   /**
-   * [[RandomRDDs#uniformJavaRDD]] with the default seed.
+   * `RandomRDDs.uniformJavaRDD` with the default seed.
    */
   @Since("1.1.0")
   def uniformJavaRDD(jsc: JavaSparkContext, size: Long, numPartitions: Int): JavaDoubleRDD = {
@@ -77,7 +77,7 @@ object RandomRDDs {
   }
 
   /**
-   * [[RandomRDDs#uniformJavaRDD]] with the default number of partitions and the default seed.
+   * `RandomRDDs.uniformJavaRDD` with the default number of partitions and the default seed.
    */
   @Since("1.1.0")
   def uniformJavaRDD(jsc: JavaSparkContext, size: Long): JavaDoubleRDD = {
@@ -107,7 +107,7 @@ object RandomRDDs {
   }
 
   /**
-   * Java-friendly version of [[RandomRDDs#normalRDD]].
+   * Java-friendly version of `RandomRDDs.normalRDD`.
    */
   @Since("1.1.0")
   def normalJavaRDD(
@@ -119,7 +119,7 @@ object RandomRDDs {
   }
 
   /**
-   * [[RandomRDDs#normalJavaRDD]] with the default seed.
+   * `RandomRDDs.normalJavaRDD` with the default seed.
    */
   @Since("1.1.0")
   def normalJavaRDD(jsc: JavaSparkContext, size: Long, numPartitions: Int): JavaDoubleRDD = {
@@ -127,7 +127,7 @@ object RandomRDDs {
   }
 
   /**
-   * [[RandomRDDs#normalJavaRDD]] with the default number of partitions and the default seed.
+   * `RandomRDDs.normalJavaRDD` with the default number of partitions and the default seed.
    */
   @Since("1.1.0")
   def normalJavaRDD(jsc: JavaSparkContext, size: Long): JavaDoubleRDD = {
@@ -157,7 +157,7 @@ object RandomRDDs {
   }
 
   /**
-   * Java-friendly version of [[RandomRDDs#poissonRDD]].
+   * Java-friendly version of `RandomRDDs.poissonRDD`.
    */
   @Since("1.1.0")
   def poissonJavaRDD(
@@ -170,7 +170,7 @@ object RandomRDDs {
   }
 
   /**
-   * [[RandomRDDs#poissonJavaRDD]] with the default seed.
+   * `RandomRDDs.poissonJavaRDD` with the default seed.
    */
   @Since("1.1.0")
   def poissonJavaRDD(
@@ -182,7 +182,7 @@ object RandomRDDs {
   }
 
   /**
-   * [[RandomRDDs#poissonJavaRDD]] with the default number of partitions and the default seed.
+   * `RandomRDDs.poissonJavaRDD` with the default number of partitions and the default seed.
    */
   @Since("1.1.0")
   def poissonJavaRDD(jsc: JavaSparkContext, mean: Double, size: Long): JavaDoubleRDD = {
@@ -212,7 +212,7 @@ object RandomRDDs {
   }
 
   /**
-   * Java-friendly version of [[RandomRDDs#exponentialRDD]].
+   * Java-friendly version of `RandomRDDs.exponentialRDD`.
    */
   @Since("1.3.0")
   def exponentialJavaRDD(
@@ -225,7 +225,7 @@ object RandomRDDs {
   }
 
   /**
-   * [[RandomRDDs#exponentialJavaRDD]] with the default seed.
+   * `RandomRDDs.exponentialJavaRDD` with the default seed.
    */
   @Since("1.3.0")
   def exponentialJavaRDD(
@@ -237,7 +237,7 @@ object RandomRDDs {
   }
 
   /**
-   * [[RandomRDDs#exponentialJavaRDD]] with the default number of partitions and the default seed.
+   * `RandomRDDs.exponentialJavaRDD` with the default number of partitions and the default seed.
    */
   @Since("1.3.0")
   def exponentialJavaRDD(jsc: JavaSparkContext, mean: Double, size: Long): JavaDoubleRDD = {
@@ -269,7 +269,7 @@ object RandomRDDs {
   }
 
   /**
-   * Java-friendly version of [[RandomRDDs#gammaRDD]].
+   * Java-friendly version of `RandomRDDs.gammaRDD`.
    */
   @Since("1.3.0")
   def gammaJavaRDD(
@@ -283,7 +283,7 @@ object RandomRDDs {
   }
 
   /**
-   * [[RandomRDDs#gammaJavaRDD]] with the default seed.
+   * `RandomRDDs.gammaJavaRDD` with the default seed.
    */
   @Since("1.3.0")
   def gammaJavaRDD(
@@ -296,7 +296,7 @@ object RandomRDDs {
   }
 
   /**
-   * [[RandomRDDs#gammaJavaRDD]] with the default number of partitions and the default seed.
+   * `RandomRDDs.gammaJavaRDD` with the default number of partitions and the default seed.
    */
   @Since("1.3.0")
   def gammaJavaRDD(
@@ -332,7 +332,7 @@ object RandomRDDs {
   }
 
   /**
-   * Java-friendly version of [[RandomRDDs#logNormalRDD]].
+   * Java-friendly version of `RandomRDDs.logNormalRDD`.
    */
   @Since("1.3.0")
   def logNormalJavaRDD(
@@ -346,7 +346,7 @@ object RandomRDDs {
   }
 
   /**
-   * [[RandomRDDs#logNormalJavaRDD]] with the default seed.
+   * `RandomRDDs.logNormalJavaRDD` with the default seed.
    */
   @Since("1.3.0")
   def logNormalJavaRDD(
@@ -359,7 +359,7 @@ object RandomRDDs {
   }
 
   /**
-   * [[RandomRDDs#logNormalJavaRDD]] with the default number of partitions and the default seed.
+   * `RandomRDDs.logNormalJavaRDD` with the default number of partitions and the default seed.
    */
   @Since("1.3.0")
   def logNormalJavaRDD(
@@ -419,7 +419,7 @@ object RandomRDDs {
 
   /**
    * :: DeveloperApi ::
-   * [[RandomRDDs#randomJavaRDD]] with the default seed.
+   * `RandomRDDs.randomJavaRDD` with the default seed.
    */
   @DeveloperApi
   @Since("1.6.0")
@@ -433,7 +433,7 @@ object RandomRDDs {
 
   /**
    * :: DeveloperApi ::
-   * [[RandomRDDs#randomJavaRDD]] with the default seed & numPartitions
+   * `RandomRDDs.randomJavaRDD` with the default seed & numPartitions
    */
   @DeveloperApi
   @Since("1.6.0")
@@ -469,7 +469,7 @@ object RandomRDDs {
   }
 
   /**
-   * Java-friendly version of [[RandomRDDs#uniformVectorRDD]].
+   * Java-friendly version of `RandomRDDs.uniformVectorRDD`.
    */
   @Since("1.1.0")
   def uniformJavaVectorRDD(
@@ -482,7 +482,7 @@ object RandomRDDs {
   }
 
   /**
-   * [[RandomRDDs#uniformJavaVectorRDD]] with the default seed.
+   * `RandomRDDs.uniformJavaVectorRDD` with the default seed.
    */
   @Since("1.1.0")
   def uniformJavaVectorRDD(
@@ -494,7 +494,7 @@ object RandomRDDs {
   }
 
   /**
-   * [[RandomRDDs#uniformJavaVectorRDD]] with the default number of partitions and the default seed.
+   * `RandomRDDs.uniformJavaVectorRDD` with the default number of partitions and the default seed.
    */
   @Since("1.1.0")
   def uniformJavaVectorRDD(
@@ -527,7 +527,7 @@ object RandomRDDs {
   }
 
   /**
-   * Java-friendly version of [[RandomRDDs#normalVectorRDD]].
+   * Java-friendly version of `RandomRDDs.normalVectorRDD`.
    */
   @Since("1.1.0")
   def normalJavaVectorRDD(
@@ -540,7 +540,7 @@ object RandomRDDs {
   }
 
   /**
-   * [[RandomRDDs#normalJavaVectorRDD]] with the default seed.
+   * `RandomRDDs.normalJavaVectorRDD` with the default seed.
    */
   @Since("1.1.0")
   def normalJavaVectorRDD(
@@ -552,7 +552,7 @@ object RandomRDDs {
   }
 
   /**
-   * [[RandomRDDs#normalJavaVectorRDD]] with the default number of partitions and the default seed.
+   * `RandomRDDs.normalJavaVectorRDD` with the default number of partitions and the default seed.
    */
   @Since("1.1.0")
   def normalJavaVectorRDD(
@@ -590,7 +590,7 @@ object RandomRDDs {
   }
 
   /**
-   * Java-friendly version of [[RandomRDDs#logNormalVectorRDD]].
+   * Java-friendly version of `RandomRDDs.logNormalVectorRDD`.
    */
   @Since("1.3.0")
   def logNormalJavaVectorRDD(
@@ -605,7 +605,7 @@ object RandomRDDs {
   }
 
   /**
-   * [[RandomRDDs#logNormalJavaVectorRDD]] with the default seed.
+   * `RandomRDDs.logNormalJavaVectorRDD` with the default seed.
    */
   @Since("1.3.0")
   def logNormalJavaVectorRDD(
@@ -619,7 +619,7 @@ object RandomRDDs {
   }
 
   /**
-   * [[RandomRDDs#logNormalJavaVectorRDD]] with the default number of partitions and
+   * `RandomRDDs.logNormalJavaVectorRDD` with the default number of partitions and
    * the default seed.
    */
   @Since("1.3.0")
@@ -657,7 +657,7 @@ object RandomRDDs {
   }
 
   /**
-   * Java-friendly version of [[RandomRDDs#poissonVectorRDD]].
+   * Java-friendly version of `RandomRDDs.poissonVectorRDD`.
    */
   @Since("1.1.0")
   def poissonJavaVectorRDD(
@@ -671,7 +671,7 @@ object RandomRDDs {
   }
 
   /**
-   * [[RandomRDDs#poissonJavaVectorRDD]] with the default seed.
+   * `RandomRDDs.poissonJavaVectorRDD` with the default seed.
    */
   @Since("1.1.0")
   def poissonJavaVectorRDD(
@@ -684,7 +684,7 @@ object RandomRDDs {
   }
 
   /**
-   * [[RandomRDDs#poissonJavaVectorRDD]] with the default number of partitions and the default seed.
+   * `RandomRDDs.poissonJavaVectorRDD` with the default number of partitions and the default seed.
    */
   @Since("1.1.0")
   def poissonJavaVectorRDD(
@@ -721,7 +721,7 @@ object RandomRDDs {
   }
 
   /**
-   * Java-friendly version of [[RandomRDDs#exponentialVectorRDD]].
+   * Java-friendly version of `RandomRDDs.exponentialVectorRDD`.
    */
   @Since("1.3.0")
   def exponentialJavaVectorRDD(
@@ -735,7 +735,7 @@ object RandomRDDs {
   }
 
   /**
-   * [[RandomRDDs#exponentialJavaVectorRDD]] with the default seed.
+   * `RandomRDDs.exponentialJavaVectorRDD` with the default seed.
    */
   @Since("1.3.0")
   def exponentialJavaVectorRDD(
@@ -748,7 +748,7 @@ object RandomRDDs {
   }
 
   /**
-   * [[RandomRDDs#exponentialJavaVectorRDD]] with the default number of partitions
+   * `RandomRDDs.exponentialJavaVectorRDD` with the default number of partitions
    * and the default seed.
    */
   @Since("1.3.0")
@@ -788,7 +788,7 @@ object RandomRDDs {
   }
 
   /**
-   * Java-friendly version of [[RandomRDDs#gammaVectorRDD]].
+   * Java-friendly version of `RandomRDDs.gammaVectorRDD`.
    */
   @Since("1.3.0")
   def gammaJavaVectorRDD(
@@ -803,7 +803,7 @@ object RandomRDDs {
   }
 
   /**
-   * [[RandomRDDs#gammaJavaVectorRDD]] with the default seed.
+   * `RandomRDDs.gammaJavaVectorRDD` with the default seed.
    */
   @Since("1.3.0")
   def gammaJavaVectorRDD(
@@ -817,7 +817,7 @@ object RandomRDDs {
   }
 
   /**
-   * [[RandomRDDs#gammaJavaVectorRDD]] with the default number of partitions and the default seed.
+   * `RandomRDDs.gammaJavaVectorRDD` with the default number of partitions and the default seed.
    */
   @Since("1.3.0")
   def gammaJavaVectorRDD(
@@ -857,7 +857,7 @@ object RandomRDDs {
 
   /**
    * :: DeveloperApi ::
-   * Java-friendly version of [[RandomRDDs#randomVectorRDD]].
+   * Java-friendly version of `RandomRDDs.randomVectorRDD`.
    */
   @DeveloperApi
   @Since("1.6.0")
@@ -873,7 +873,7 @@ object RandomRDDs {
 
   /**
    * :: DeveloperApi ::
-   * [[RandomRDDs#randomJavaVectorRDD]] with the default seed.
+   * `RandomRDDs.randomJavaVectorRDD` with the default seed.
    */
   @DeveloperApi
   @Since("1.6.0")
@@ -888,7 +888,7 @@ object RandomRDDs {
 
   /**
    * :: DeveloperApi ::
-   * [[RandomRDDs#randomJavaVectorRDD]] with the default number of partitions and the default seed.
+   * `RandomRDDs.randomJavaVectorRDD` with the default number of partitions and the default seed.
    */
   @DeveloperApi
   @Since("1.6.0")
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/tree/DecisionTree.scala b/mllib/src/main/scala/org/apache/spark/mllib/tree/DecisionTree.scala
index 499c80767aea7cddee1c562be8d9b75509dce0f7..e5aece779826dcd7d423baf1365aafaab30230f0 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/tree/DecisionTree.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/tree/DecisionTree.scala
@@ -83,8 +83,8 @@ object DecisionTree extends Serializable with Logging {
    *                 categorical), depth of the tree, quantile calculation strategy, etc.
    * @return DecisionTreeModel that can be used for prediction.
    *
-   * @note Using [[org.apache.spark.mllib.tree.DecisionTree$#trainClassifier]]
-   * and [[org.apache.spark.mllib.tree.DecisionTree$#trainRegressor]]
+   * @note Using `org.apache.spark.mllib.tree.DecisionTree.trainClassifier`
+   * and `org.apache.spark.mllib.tree.DecisionTree.trainRegressor`
    * is recommended to clearly separate classification and regression.
    */
   @Since("1.0.0")
@@ -105,8 +105,8 @@ object DecisionTree extends Serializable with Logging {
    *                 1 internal node + 2 leaf nodes).
    * @return DecisionTreeModel that can be used for prediction.
    *
-   * @note Using [[org.apache.spark.mllib.tree.DecisionTree$#trainClassifier]]
-   * and [[org.apache.spark.mllib.tree.DecisionTree$#trainRegressor]]
+   * @note Using `org.apache.spark.mllib.tree.DecisionTree.trainClassifier`
+   * and `org.apache.spark.mllib.tree.DecisionTree.trainRegressor`
    * is recommended to clearly separate classification and regression.
    */
   @Since("1.0.0")
@@ -133,8 +133,8 @@ object DecisionTree extends Serializable with Logging {
    * @param numClasses Number of classes for classification. Default value of 2.
    * @return DecisionTreeModel that can be used for prediction.
    *
-   * @note Using [[org.apache.spark.mllib.tree.DecisionTree$#trainClassifier]]
-   * and [[org.apache.spark.mllib.tree.DecisionTree$#trainRegressor]]
+   * @note Using `org.apache.spark.mllib.tree.DecisionTree.trainClassifier`
+   * and `org.apache.spark.mllib.tree.DecisionTree.trainRegressor`
    * is recommended to clearly separate classification and regression.
    */
   @Since("1.2.0")
@@ -167,8 +167,8 @@ object DecisionTree extends Serializable with Logging {
    *                                indexed from 0: {0, 1, ..., k-1}.
    * @return DecisionTreeModel that can be used for prediction.
    *
-   * @note Using [[org.apache.spark.mllib.tree.DecisionTree$#trainClassifier]]
-   * and [[org.apache.spark.mllib.tree.DecisionTree$#trainRegressor]]
+   * @note Using `org.apache.spark.mllib.tree.DecisionTree.trainClassifier`
+   * and `org.apache.spark.mllib.tree.DecisionTree.trainRegressor`
    * is recommended to clearly separate classification and regression.
    */
   @Since("1.0.0")
@@ -218,7 +218,7 @@ object DecisionTree extends Serializable with Logging {
   }
 
   /**
-   * Java-friendly API for [[org.apache.spark.mllib.tree.DecisionTree$#trainClassifier]]
+   * Java-friendly API for `org.apache.spark.mllib.tree.DecisionTree.trainClassifier`
    */
   @Since("1.1.0")
   def trainClassifier(
@@ -262,7 +262,7 @@ object DecisionTree extends Serializable with Logging {
   }
 
   /**
-   * Java-friendly API for [[org.apache.spark.mllib.tree.DecisionTree$#trainRegressor]]
+   * Java-friendly API for `org.apache.spark.mllib.tree.DecisionTree.trainRegressor`
    */
   @Since("1.1.0")
   def trainRegressor(
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/tree/GradientBoostedTrees.scala b/mllib/src/main/scala/org/apache/spark/mllib/tree/GradientBoostedTrees.scala
index 3e85678906b335173ab54b59ae6a1d56fca4eb51..df2c1b02f4f40710b67142d9c0dcf108250ad6b8 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/tree/GradientBoostedTrees.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/tree/GradientBoostedTrees.scala
@@ -136,7 +136,7 @@ object GradientBoostedTrees extends Logging {
   }
 
   /**
-   * Java-friendly API for [[org.apache.spark.mllib.tree.GradientBoostedTrees$#train]]
+   * Java-friendly API for `org.apache.spark.mllib.tree.GradientBoostedTrees.train`
    */
   @Since("1.2.0")
   def train(
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/tree/RandomForest.scala b/mllib/src/main/scala/org/apache/spark/mllib/tree/RandomForest.scala
index 1f6cb086cefa98effb0359b8b361171a6e5ea9e8..d1331a57de27bdaaffd5440697d7e08355814066 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/tree/RandomForest.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/tree/RandomForest.scala
@@ -172,7 +172,7 @@ object RandomForest extends Serializable with Logging {
   }
 
   /**
-   * Java-friendly API for [[org.apache.spark.mllib.tree.RandomForest$#trainClassifier]]
+   * Java-friendly API for `org.apache.spark.mllib.tree.RandomForest.trainClassifier`
    */
   @Since("1.2.0")
   def trainClassifier(
@@ -259,7 +259,7 @@ object RandomForest extends Serializable with Logging {
   }
 
   /**
-   * Java-friendly API for [[org.apache.spark.mllib.tree.RandomForest$#trainRegressor]]
+   * Java-friendly API for `org.apache.spark.mllib.tree.RandomForest.trainRegressor`
    */
   @Since("1.2.0")
   def trainRegressor(
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/util/MLUtils.scala b/mllib/src/main/scala/org/apache/spark/mllib/util/MLUtils.scala
index 6bb3271aacb44d2476fba2c8b28a8b5085475ff4..de66c7ca1d13fc5ba4d6a23d9d992d51360494a7 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/util/MLUtils.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/util/MLUtils.scala
@@ -149,7 +149,7 @@ object MLUtils extends Logging {
    * Save labeled data in LIBSVM format.
    * @param data an RDD of LabeledPoint to be saved
    * @param dir directory to save the data
-   * @see [[org.apache.spark.mllib.util.MLUtils#loadLibSVMFile]]
+   * @see `org.apache.spark.mllib.util.MLUtils.loadLibSVMFile`
    */
   @Since("1.0.0")
   def saveAsLibSVMFile(data: RDD[LabeledPoint], dir: String) {
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLImplicits.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLImplicits.scala
index 2caf723669f63b48166a9464f5a9134f927d5c01..375df64d39734623f3957e138bd488089dc30481 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SQLImplicits.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLImplicits.scala
@@ -259,7 +259,7 @@ abstract class SQLImplicits extends LowPrioritySQLImplicits {
  * Conflicting implicits are placed here to disambiguate resolution.
  *
  * Reasons for including specific implicits:
- * newProductEncoder - to disambiguate for [[List]]s which are both [[Seq]] and [[Product]]
+ * newProductEncoder - to disambiguate for `List`s which are both `Seq` and `Product`
  */
 trait LowPrioritySQLImplicits {
   /** @since 1.6.0 */
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/functions.scala b/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
index 9a080fd3c97c13a86f105a745dbd334ffe58dabe..cabe1f456375cec59e25b868996366c2e89dcdcb 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
@@ -633,7 +633,7 @@ object functions {
   def skewness(columnName: String): Column = skewness(Column(columnName))
 
   /**
-   * Aggregate function: alias for [[stddev_samp]].
+   * Aggregate function: alias for `stddev_samp`.
    *
    * @group agg_funcs
    * @since 1.6.0
@@ -641,7 +641,7 @@ object functions {
   def stddev(e: Column): Column = withAggregateFunction { StddevSamp(e.expr) }
 
   /**
-   * Aggregate function: alias for [[stddev_samp]].
+   * Aggregate function: alias for `stddev_samp`.
    *
    * @group agg_funcs
    * @since 1.6.0
@@ -717,7 +717,7 @@ object functions {
   def sumDistinct(columnName: String): Column = sumDistinct(Column(columnName))
 
   /**
-   * Aggregate function: alias for [[var_samp]].
+   * Aggregate function: alias for `var_samp`.
    *
    * @group agg_funcs
    * @since 1.6.0
@@ -725,7 +725,7 @@ object functions {
   def variance(e: Column): Column = withAggregateFunction { VarianceSamp(e.expr) }
 
   /**
-   * Aggregate function: alias for [[var_samp]].
+   * Aggregate function: alias for `var_samp`.
    *
    * @group agg_funcs
    * @since 1.6.0