diff --git a/answer_classifier_cached_features/train_no_pretraining.py b/answer_classifier_cached_features/train_no_pretraining.py
index d6a5b8a6dc5d951ee1a6736974d3e49a46d58a5c..3b3899b9a69b366fbbf08766f750423858fc0ba5 100644
--- a/answer_classifier_cached_features/train_no_pretraining.py
+++ b/answer_classifier_cached_features/train_no_pretraining.py
@@ -3,6 +3,7 @@ import object_attribute_classifier_cached_features.inference as feature_graph
 import region_relevance_network.inference as relevance_graph
 import answer_classifier_cached_features.inference as answer_graph
 from tftools import var_collect, placeholder_management
+import tftools.train as multi_rate_train
 import tftools.data
 import losses
 import constants
@@ -455,6 +456,7 @@ def create_vqa_batch_generator():
     data_mgr = vqa_data.data(
         constants.vqa_train_resnet_feat_dir,
         constants.vqa_train_anno,
+        constants.vqa_train_subset_qids,
         constants.vocab_json,
         constants.vqa_answer_vocab_json,
         constants.image_size,
@@ -462,11 +464,13 @@ def create_vqa_batch_generator():
         constants.num_negative_answers,
         resnet_feat_dim=constants.resnet_feat_dim)
 
+    num_train_subset_questions = len(data_mgr.qids)
+
     index_generator = tftools.data.random(
         constants.answer_batch_size, 
-        constants.num_train_questions, 
+        num_train_subset_questions, 
         constants.answer_num_epochs, 
-        constants.answer_offset)
+        0)
     
     batch_generator = tftools.data.async_batch_generator(
         data_mgr, 
@@ -483,16 +487,20 @@ def create_vgenome_batch_generator():
         constants.object_labels_json,
         constants.attribute_labels_json,
         constants.regions_json,
+        constants.genome_train_subset_region_ids,
         constants.image_size,
         channels=3,
         resnet_feat_dim=constants.resnet_feat_dim,
         mean_image_filename=None)
 
+    num_train_subset_regions = len(data_mgr.region_ids)
+    print num_train_subset_regions
+    
     index_generator = tftools.data.random(
         constants.num_regions_with_labels, 
-        constants.region_num_samples, 
+        num_train_subset_regions,
         constants.region_num_epochs, 
-        constants.region_offset)
+        0)
     
     batch_generator = tftools.data.async_batch_generator(
         data_mgr, 
@@ -515,7 +523,9 @@ class attach_optimizer():
         self.lr = lr
         with graph.tf_graph.as_default():
             all_trainable_vars = tf.trainable_variables()
-            self.not_to_train = []#+ graph.object_attribute_vars
+                        
+            self.not_to_train = graph.object_attribute_vars + graph.word_vec_vars
+
             vars_to_train = [
                 var for var in all_trainable_vars
                 if var not in self.not_to_train]
@@ -527,13 +537,28 @@ class attach_optimizer():
 
             all_vars = tf.all_variables()
             self.ops = dict()
+            
+            self.optimizer = multi_rate_train.MultiRateOptimizer(
+                tf.train.AdamOptimizer)
+
+            self.optimizer.add_variables(
+                self.graph.object_attribute_vars + self.graph.word_vec_vars,
+                learning_rate = 1.0*self.lr)
 
-            self.add_adam_optimizer(
-                graph.total_loss,
+            
+            self.optimizer.add_variables(
                 vars_to_train,
-                'optimizer')
+                learning_rate = self.lr)
+
+            self.train_op = self.optimizer.minimize(graph.total_loss)
+                
+            # self.add_adam_optimizer(
+            #     graph.total_loss,
+            #     vars_to_train,
+            #     'optimizer')
 
-            self.train_op = self.group_all_train_ops()
+            # self.train_op = self.group_all_train_ops()
+            
             all_vars_with_opt_vars = tf.all_variables()
             self.opt_vars = [var for var in all_vars_with_opt_vars if var not in all_vars]
 
diff --git a/constants_vision_gpu_2.py b/constants_vision_gpu_2.py
index aa195ba13d2ec704bef230a185a42b23a028c1f6..16ae816e0bf74a8782baea6a193fef115581582c 100644
--- a/constants_vision_gpu_2.py
+++ b/constants_vision_gpu_2.py
@@ -5,7 +5,7 @@ def mkdir_if_not_exists(dir_name):
     if not os.path.exists(dir_name):
         os.mkdir(dir_name)
         
-experiment_name = 'QA_explicit_dot_joint_training_pretrained_fix_pretrained'
+experiment_name = 'QA_genome_split'
 #experiment_name = 'object_attribute_classifier_large_images'
 # Global output directory (all subexperiments will be saved here)
 global_output_dir = '/data/tanmay/GenVQA_Exp_Results'
@@ -56,6 +56,18 @@ vocab_json = os.path.join(
     data_absolute_path,
     'restructured/vocab_subset.json')
 
+genome_train_subset_region_ids = os.path.join(
+    data_absolute_path,
+    'restructured/train_subset_region_ids.json')
+
+genome_train_held_out_region_ids = os.path.join(
+    data_absolute_path,
+    'restructured/train_held_out_region_ids.json')
+
+genome_test_region_ids = os.path.join(
+    data_absolute_path,
+    'restructured/test_region_ids.json')
+
 num_object_labels = 1000
 num_attribute_labels = 1000
 
@@ -63,12 +75,12 @@ num_attribute_labels = 1000
 # First 80% meant to be used for training
 # Next 10% is set aside for validation
 # Last 10% is to be used for testing
-num_total_regions = 1951768
-num_train_regions = 1561416 # First 80%
-num_val_regions = 195176 # Next 10%
-num_test_regions = num_total_regions \
-                   - num_train_regions \
-                   - num_val_regions 
+# num_total_regions = 1951768
+# num_train_regions = 1561416 # First 80%
+# num_val_regions = 195176 # Next 10%
+# num_test_regions = num_total_regions \
+#                    - num_train_regions \
+#                    - num_val_regions 
 
 # Pretrained resnet ckpt
 resnet_ckpt = '/home/nfs/tgupta6/data/Resnet/' + \
@@ -88,7 +100,7 @@ pretrained_vocab_word_vectors_npy = os.path.join(
 
 # Object Attribute Classifier Training Params
 region_batch_size = 200
-region_num_samples = num_train_regions
+# region_num_samples = num_train_regions
 region_num_epochs = 4
 region_offset = 0
 region_queue_size = 400
@@ -171,11 +183,11 @@ vqa_answer_vocab_json = os.path.join(
 answer_batch_size = 50
 answer_num_epochs = 10
 answer_offset = 0
-answer_obj_atr_loss_wt = 0.1
+answer_obj_atr_loss_wt = 0.0
 answer_regularization_coeff = 1e-5
 answer_queue_size = 500
 answer_embedding_dim = 600
-answer_lr = 1e-4
+answer_lr = 1e-3
 answer_log_every_n_iter = 500
 answer_output_dir = os.path.join(
     global_experiment_dir,
@@ -184,8 +196,8 @@ answer_output_dir = os.path.join(
 mkdir_if_not_exists(answer_output_dir)
 
 pretrained_model = '/home/nfs/tgupta6/projects/GenVQA/Exp_Results/' +\
-                   'pretrained_object_attribute_classifier/' +\
-                   'obj_atr_model_77500'
+                   'object_attribute_classifier_large_images_vqa_split/' +\
+                   'object_attribute_classifiers/model-80000'
 
 answer_model = os.path.join(
     answer_output_dir,
@@ -195,12 +207,12 @@ answer_model = os.path.join(
 num_regions_with_labels = 100
 
 # Answer fine tune params
-answer_fine_tune_from_iter = 18500
+answer_fine_tune_from_iter = 22500
 answer_fine_tune_from = answer_model + '-' + str(answer_fine_tune_from_iter)
 
 # Answer eval params
 answer_eval_on = 'val'
-answer_model_to_eval = answer_model + '-18500'
+answer_model_to_eval = answer_model + '-69500'
 
 answer_eval_data_json = os.path.join(
     answer_output_dir,