diff --git a/python/pyspark/sql.py b/python/pyspark/sql.py
index abb284d1e3dd9470277365ca07e7423aa0f55149..ae288471b0e51dee35a00c47a8879e3bbed74f77 100644
--- a/python/pyspark/sql.py
+++ b/python/pyspark/sql.py
@@ -1178,7 +1178,7 @@ def _create_cls(dataType):
 
         def asDict(self):
             """ Return as a dict """
-            return dict(zip(self.__FIELDS__, self))
+            return dict((n, getattr(self, n)) for n in self.__FIELDS__)
 
         def __repr__(self):
             # call collect __repr__ for nested objects
diff --git a/python/pyspark/tests.py b/python/pyspark/tests.py
index a01bd8d4157874fd36e8f2a73c6503f489076646..29bcd38908d1068b459e3792472ee5f2eb31ed94 100644
--- a/python/pyspark/tests.py
+++ b/python/pyspark/tests.py
@@ -803,7 +803,7 @@ class SQLTests(ReusedPySparkTestCase):
     @classmethod
     def tearDownClass(cls):
         ReusedPySparkTestCase.tearDownClass()
-        shutil.rmtree(cls.tempdir.name)
+        shutil.rmtree(cls.tempdir.name, ignore_errors=True)
 
     def setUp(self):
         self.sqlCtx = SQLContext(self.sc)
@@ -930,8 +930,9 @@ class SQLTests(ReusedPySparkTestCase):
         rdd = self.sc.parallelize([row])
         srdd = self.sqlCtx.inferSchema(rdd)
         srdd.registerTempTable("test")
-        row = self.sqlCtx.sql("select l[0].a AS la from test").first()
-        self.assertEqual(1, row.asDict()["la"])
+        row = self.sqlCtx.sql("select l, d from test").first()
+        self.assertEqual(1, row.asDict()["l"][0].a)
+        self.assertEqual(1.0, row.asDict()['d']['key'].c)
 
     def test_infer_schema_with_udt(self):
         from pyspark.tests import ExamplePoint, ExamplePointUDT