diff --git a/ec2/spark_ec2.py b/ec2/spark_ec2.py
index c6d5a1f0d0a81b64c3aeb4db23b45fafc64a4232..724811eaa1bddf8cb48fb8c0d7ec75738d67a48a 100755
--- a/ec2/spark_ec2.py
+++ b/ec2/spark_ec2.py
@@ -19,8 +19,9 @@
 # limitations under the License.
 #
 
-from __future__ import with_statement, print_function
+from __future__ import division, print_function, with_statement
 
+import codecs
 import hashlib
 import itertools
 import logging
@@ -47,6 +48,8 @@ if sys.version < "3":
 else:
     from urllib.request import urlopen, Request
     from urllib.error import HTTPError
+    raw_input = input
+    xrange = range
 
 SPARK_EC2_VERSION = "1.3.1"
 SPARK_EC2_DIR = os.path.dirname(os.path.realpath(__file__))
@@ -423,13 +426,14 @@ def get_spark_ami(opts):
         b=opts.spark_ec2_git_branch)
 
     ami_path = "%s/%s/%s" % (ami_prefix, opts.region, instance_type)
+    reader = codecs.getreader("ascii")
     try:
-        ami = urlopen(ami_path).read().strip()
-        print("Spark AMI: " + ami)
+        ami = reader(urlopen(ami_path)).read().strip()
     except:
         print("Could not resolve AMI at: " + ami_path, file=stderr)
         sys.exit(1)
 
+    print("Spark AMI: " + ami)
     return ami
 
 
@@ -750,7 +754,7 @@ def setup_cluster(conn, master_nodes, slave_nodes, opts, deploy_ssh_key):
                'mapreduce', 'spark-standalone', 'tachyon']
 
     if opts.hadoop_major_version == "1":
-        modules = filter(lambda x: x != "mapreduce", modules)
+        modules = list(filter(lambda x: x != "mapreduce", modules))
 
     if opts.ganglia:
         modules.append('ganglia')
@@ -1160,7 +1164,7 @@ def get_zones(conn, opts):
 
 # Gets the number of items in a partition
 def get_partition(total, num_partitions, current_partitions):
-    num_slaves_this_zone = total / num_partitions
+    num_slaves_this_zone = total // num_partitions
     if (total % num_partitions) - current_partitions > 0:
         num_slaves_this_zone += 1
     return num_slaves_this_zone