diff --git a/python/pyspark/sql/types.py b/python/pyspark/sql/types.py
index f7cd4b80ca91d2502652a94b5026b427d422761a..30ab130f29481e8c72abd8e83a3fbf8fb1eb9f29 100644
--- a/python/pyspark/sql/types.py
+++ b/python/pyspark/sql/types.py
@@ -1359,7 +1359,13 @@ def _create_row(fields, values):
 class Row(tuple):
 
     """
-    A row in L{DataFrame}. The fields in it can be accessed like attributes.
+    A row in L{DataFrame}.
+    The fields in it can be accessed:
+
+    * like attributes (``row.key``)
+    * like dictionary values (``row[key]``)
+
+    ``key in row`` will search through row keys.
 
     Row can be used to create a row object by using named arguments,
     the fields will be sorted by names.
@@ -1371,6 +1377,10 @@ class Row(tuple):
     ('Alice', 11)
     >>> row.name, row.age
     ('Alice', 11)
+    >>> 'name' in row
+    True
+    >>> 'wrong_key' in row
+    False
 
     Row also can be used to create another Row like class, then it
     could be used to create Row objects, such as
@@ -1378,6 +1388,10 @@ class Row(tuple):
     >>> Person = Row("name", "age")
     >>> Person
     <Row(name, age)>
+    >>> 'name' in Person
+    True
+    >>> 'wrong_key' in Person
+    False
     >>> Person("Alice", 11)
     Row(name='Alice', age=11)
     """
@@ -1431,6 +1445,12 @@ class Row(tuple):
         else:
             return dict(zip(self.__fields__, self))
 
+    def __contains__(self, item):
+        if hasattr(self, "__fields__"):
+            return item in self.__fields__
+        else:
+            return super(Row, self).__contains__(item)
+
     # let object acts like class
     def __call__(self, *args):
         """create new Row object"""