diff --git a/core/src/main/scala/org/apache/spark/TaskContextImpl.scala b/core/src/main/scala/org/apache/spark/TaskContextImpl.scala
index ea8dcdfd5d7d9ddd127c7412558d3465d163ed6f..f346cf8d65806bfc78ebea1fdce523d2613fb46f 100644
--- a/core/src/main/scala/org/apache/spark/TaskContextImpl.scala
+++ b/core/src/main/scala/org/apache/spark/TaskContextImpl.scala
@@ -38,7 +38,7 @@ import org.apache.spark.util._
  * callbacks are protected by locking on the context instance. For instance, this ensures
  * that you cannot add a completion listener in one thread while we are completing (and calling
  * the completion listeners) in another thread. Other state is immutable, however the exposed
- * [[TaskMetrics]] & [[MetricsSystem]] objects are not thread safe.
+ * `TaskMetrics` & `MetricsSystem` objects are not thread safe.
  */
 private[spark] class TaskContextImpl(
     val stageId: Int,
diff --git a/mllib/src/main/scala/org/apache/spark/ml/fpm/FPGrowth.scala b/mllib/src/main/scala/org/apache/spark/ml/fpm/FPGrowth.scala
index e2bc270b38da75dc056bb5652ac9edfb7bc82d05..65cc80619569e877e2056523bb7cc5ab8fc1193b 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/fpm/FPGrowth.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/fpm/FPGrowth.scala
@@ -69,8 +69,8 @@ private[fpm] trait FPGrowthParams extends Params with HasPredictionCol {
   def getMinSupport: Double = $(minSupport)
 
   /**
-   * Number of partitions (>=1) used by parallel FP-growth. By default the param is not set, and
-   * partition number of the input dataset is used.
+   * Number of partitions (at least 1) used by parallel FP-growth. By default the param is not
+   * set, and partition number of the input dataset is used.
    * @group expertParam
    */
   @Since("2.2.0")
diff --git a/sql/core/src/main/java/org/apache/spark/api/java/function/FlatMapGroupsWithStateFunction.java b/sql/core/src/main/java/org/apache/spark/api/java/function/FlatMapGroupsWithStateFunction.java
index 026b37cabbf1c75ebdb9abcf143aad344990ce01..802949c0ddb60e34f1f5664e31c1cb1134f8dc04 100644
--- a/sql/core/src/main/java/org/apache/spark/api/java/function/FlatMapGroupsWithStateFunction.java
+++ b/sql/core/src/main/java/org/apache/spark/api/java/function/FlatMapGroupsWithStateFunction.java
@@ -27,7 +27,7 @@ import org.apache.spark.sql.streaming.GroupState;
 /**
  * ::Experimental::
  * Base interface for a map function used in
- * {@link org.apache.spark.sql.KeyValueGroupedDataset#flatMapGroupsWithState(
+ * {@code org.apache.spark.sql.KeyValueGroupedDataset.flatMapGroupsWithState(
  * FlatMapGroupsWithStateFunction, org.apache.spark.sql.streaming.OutputMode,
  * org.apache.spark.sql.Encoder, org.apache.spark.sql.Encoder)}
  * @since 2.1.1
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/KeyValueGroupedDataset.scala b/sql/core/src/main/scala/org/apache/spark/sql/KeyValueGroupedDataset.scala
index 87c5621768872d905ae58f2d427783eb3e72fa46..022c2f5629e86bb4cb3c80a73cf7183019e0343b 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/KeyValueGroupedDataset.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/KeyValueGroupedDataset.scala
@@ -298,7 +298,7 @@ class KeyValueGroupedDataset[K, V] private[sql](
    * For a static batch Dataset, the function will be invoked once per group. For a streaming
    * Dataset, the function will be invoked for each group repeatedly in every trigger, and
    * updates to each group's state will be saved across invocations.
-   * See [[GroupState]] for more details.
+   * See `GroupState` for more details.
    *
    * @tparam S The type of the user-defined state. Must be encodable to Spark SQL types.
    * @tparam U The type of the output objects. Must be encodable to Spark SQL types.
@@ -328,7 +328,7 @@ class KeyValueGroupedDataset[K, V] private[sql](
    * For a static batch Dataset, the function will be invoked once per group. For a streaming
    * Dataset, the function will be invoked for each group repeatedly in every trigger, and
    * updates to each group's state will be saved across invocations.
-   * See [[GroupState]] for more details.
+   * See `GroupState` for more details.
    *
    * @tparam S The type of the user-defined state. Must be encodable to Spark SQL types.
    * @tparam U The type of the output objects. Must be encodable to Spark SQL types.
@@ -360,7 +360,7 @@ class KeyValueGroupedDataset[K, V] private[sql](
    * For a static batch Dataset, the function will be invoked once per group. For a streaming
    * Dataset, the function will be invoked for each group repeatedly in every trigger, and
    * updates to each group's state will be saved across invocations.
-   * See [[GroupState]] for more details.
+   * See `GroupState` for more details.
    *
    * @tparam S The type of the user-defined state. Must be encodable to Spark SQL types.
    * @tparam U The type of the output objects. Must be encodable to Spark SQL types.
@@ -400,7 +400,7 @@ class KeyValueGroupedDataset[K, V] private[sql](
    * For a static batch Dataset, the function will be invoked once per group. For a streaming
    * Dataset, the function will be invoked for each group repeatedly in every trigger, and
    * updates to each group's state will be saved across invocations.
-   * See [[GroupState]] for more details.
+   * See `GroupState` for more details.
    *
    * @tparam S The type of the user-defined state. Must be encodable to Spark SQL types.
    * @tparam U The type of the output objects. Must be encodable to Spark SQL types.
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/streaming/GroupState.scala b/sql/core/src/main/scala/org/apache/spark/sql/streaming/GroupState.scala
index 60a4d0d8f98a1af1250a646f57c273f1065716e9..15df906ca7b1369e8de1569489b600acb36d20e0 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/streaming/GroupState.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/streaming/GroupState.scala
@@ -18,18 +18,18 @@
 package org.apache.spark.sql.streaming
 
 import org.apache.spark.annotation.{Experimental, InterfaceStability}
-import org.apache.spark.sql.{Encoder, KeyValueGroupedDataset}
+import org.apache.spark.sql.KeyValueGroupedDataset
 import org.apache.spark.sql.catalyst.plans.logical.LogicalGroupState
 
 /**
  * :: Experimental ::
  *
  * Wrapper class for interacting with per-group state data in `mapGroupsWithState` and
- * `flatMapGroupsWithState` operations on [[KeyValueGroupedDataset]].
+ * `flatMapGroupsWithState` operations on `KeyValueGroupedDataset`.
  *
  * Detail description on `[map/flatMap]GroupsWithState` operation
  * --------------------------------------------------------------
- * Both, `mapGroupsWithState` and `flatMapGroupsWithState` in [[KeyValueGroupedDataset]]
+ * Both, `mapGroupsWithState` and `flatMapGroupsWithState` in `KeyValueGroupedDataset`
  * will invoke the user-given function on each group (defined by the grouping function in
  * `Dataset.groupByKey()`) while maintaining user-defined per-group state between invocations.
  * For a static batch Dataset, the function will be invoked once per group. For a streaming
@@ -70,8 +70,8 @@ import org.apache.spark.sql.catalyst.plans.logical.LogicalGroupState
  *    `[map|flatMap]GroupsWithState`, but the exact timeout duration/timestamp is configurable per
  *    group by calling `setTimeout...()` in `GroupState`.
  *  - Timeouts can be either based on processing time (i.e.
- *    [[GroupStateTimeout.ProcessingTimeTimeout]]) or event time (i.e.
- *    [[GroupStateTimeout.EventTimeTimeout]]).
+ *    `GroupStateTimeout.ProcessingTimeTimeout`) or event time (i.e.
+ *    `GroupStateTimeout.EventTimeTimeout`).
  *  - With `ProcessingTimeTimeout`, the timeout duration can be set by calling
  *    `GroupState.setTimeoutDuration`. The timeout will occur when the clock has advanced by the set
  *    duration. Guarantees provided by this timeout with a duration of D ms are as follows:
@@ -177,7 +177,7 @@ import org.apache.spark.sql.catalyst.plans.logical.LogicalGroupState
  * }}}
  *
  * @tparam S User-defined type of the state to be stored for each group. Must be encodable into
- *           Spark SQL types (see [[Encoder]] for more details).
+ *           Spark SQL types (see `Encoder` for more details).
  * @since 2.2.0
  */
 @Experimental
@@ -224,7 +224,7 @@ trait GroupState[S] extends LogicalGroupState[S] {
   /**
    * Set the timeout duration for this key as a string. For example, "1 hour", "2 days", etc.
    *
-   * @note, ProcessingTimeTimeout must be enabled in `[map/flatmap]GroupsWithStates`.
+   * @note ProcessingTimeTimeout must be enabled in `[map/flatmap]GroupsWithStates`.
    */
   @throws[IllegalArgumentException]("if 'duration' is not a valid duration")
   @throws[IllegalStateException]("when state is either not initialized, or already removed")
@@ -240,7 +240,7 @@ trait GroupState[S] extends LogicalGroupState[S] {
    * Set the timeout timestamp for this key as milliseconds in epoch time.
    * This timestamp cannot be older than the current watermark.
    *
-   * @note, EventTimeTimeout must be enabled in `[map/flatmap]GroupsWithStates`.
+   * @note EventTimeTimeout must be enabled in `[map/flatmap]GroupsWithStates`.
    */
   def setTimeoutTimestamp(timestampMs: Long): Unit
 
@@ -254,7 +254,7 @@ trait GroupState[S] extends LogicalGroupState[S] {
    * The final timestamp (including the additional duration) cannot be older than the
    * current watermark.
    *
-   * @note, EventTimeTimeout must be enabled in `[map/flatmap]GroupsWithStates`.
+   * @note EventTimeTimeout must be enabled in `[map/flatmap]GroupsWithStates`.
    */
   def setTimeoutTimestamp(timestampMs: Long, additionalDuration: String): Unit
 
@@ -265,7 +265,7 @@ trait GroupState[S] extends LogicalGroupState[S] {
    * Set the timeout timestamp for this key as a java.sql.Date.
    * This timestamp cannot be older than the current watermark.
    *
-   * @note, EventTimeTimeout must be enabled in `[map/flatmap]GroupsWithStates`.
+   * @note EventTimeTimeout must be enabled in `[map/flatmap]GroupsWithStates`.
    */
   def setTimeoutTimestamp(timestamp: java.sql.Date): Unit
 
@@ -279,7 +279,7 @@ trait GroupState[S] extends LogicalGroupState[S] {
    * The final timestamp (including the additional duration) cannot be older than the
    * current watermark.
    *
-   * @note, EventTimeTimeout must be enabled in `[map/flatmap]GroupsWithStates`.
+   * @note EventTimeTimeout must be enabled in `[map/flatmap]GroupsWithStates`.
    */
   def setTimeoutTimestamp(timestamp: java.sql.Date, additionalDuration: String): Unit
 }