From faabdfa2bd416ae514961535f1953e8e9e8b1f3f Mon Sep 17 00:00:00 2001
From: felixcheung <felixcheung_m@hotmail.com>
Date: Wed, 25 Nov 2015 10:36:35 -0800
Subject: [PATCH] [SPARK-11984][SQL][PYTHON] Fix typos in doc for pivot for
 scala and python

Author: felixcheung <felixcheung_m@hotmail.com>

Closes #9967 from felixcheung/pypivotdoc.
---
 python/pyspark/sql/group.py                                 | 6 +++---
 .../src/main/scala/org/apache/spark/sql/GroupedData.scala   | 6 +++---
 2 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/python/pyspark/sql/group.py b/python/pyspark/sql/group.py
index d8ed7eb2dd..1911588309 100644
--- a/python/pyspark/sql/group.py
+++ b/python/pyspark/sql/group.py
@@ -169,11 +169,11 @@ class GroupedData(object):
 
     @since(1.6)
     def pivot(self, pivot_col, values=None):
-        """Pivots a column of the current DataFrame and preform the specified aggregation.
+        """Pivots a column of the current DataFrame and perform the specified aggregation.
 
         :param pivot_col: Column to pivot
-        :param values: Optional list of values of pivotColumn that will be translated to columns in
-            the output data frame. If values are not provided the method with do an immediate call
+        :param values: Optional list of values of pivot column that will be translated to columns in
+            the output DataFrame. If values are not provided the method will do an immediate call
             to .distinct() on the pivot column.
 
         >>> df4.groupBy("year").pivot("course", ["dotNET", "Java"]).sum("earnings").collect()
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/GroupedData.scala b/sql/core/src/main/scala/org/apache/spark/sql/GroupedData.scala
index abd531c4ba..13341a88a6 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/GroupedData.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/GroupedData.scala
@@ -282,7 +282,7 @@ class GroupedData protected[sql](
   }
 
   /**
-   * Pivots a column of the current [[DataFrame]] and preform the specified aggregation.
+   * Pivots a column of the current [[DataFrame]] and perform the specified aggregation.
    * There are two versions of pivot function: one that requires the caller to specify the list
    * of distinct values to pivot on, and one that does not. The latter is more concise but less
    * efficient, because Spark needs to first compute the list of distinct values internally.
@@ -321,7 +321,7 @@ class GroupedData protected[sql](
   }
 
   /**
-   * Pivots a column of the current [[DataFrame]] and preform the specified aggregation.
+   * Pivots a column of the current [[DataFrame]] and perform the specified aggregation.
    * There are two versions of pivot function: one that requires the caller to specify the list
    * of distinct values to pivot on, and one that does not. The latter is more concise but less
    * efficient, because Spark needs to first compute the list of distinct values internally.
@@ -353,7 +353,7 @@ class GroupedData protected[sql](
   }
 
   /**
-   * Pivots a column of the current [[DataFrame]] and preform the specified aggregation.
+   * Pivots a column of the current [[DataFrame]] and perform the specified aggregation.
    * There are two versions of pivot function: one that requires the caller to specify the list
    * of distinct values to pivot on, and one that does not. The latter is more concise but less
    * efficient, because Spark needs to first compute the list of distinct values internally.
-- 
GitLab