diff --git a/answer_classifier_cached_features/train.py b/answer_classifier_cached_features/train.py
index 1db0b33e29492b948d8fa9f1a57756d11daaf193..1cfc90e7fcbdf004b6790c5bdbc3e9d35a802845 100644
--- a/answer_classifier_cached_features/train.py
+++ b/answer_classifier_cached_features/train.py
@@ -554,7 +554,7 @@ class attach_optimizer():
 
             self.optimizer.add_variables(
                 self.graph.object_attribute_vars + self.graph.word_vec_vars,
-                learning_rate = 1.0*self.lr)
+                learning_rate = 0.0*self.lr)
 
             
             self.optimizer.add_variables(
diff --git a/constants_vision_gpu_1.py b/constants_vision_gpu_1.py
index 99378eb64e9666cef73dcd0eaf6d3d338f24b499..76e1bf8dfbc11f50b2211d7e2ba7c2d62d871a7f 100644
--- a/constants_vision_gpu_1.py
+++ b/constants_vision_gpu_1.py
@@ -5,7 +5,7 @@ def mkdir_if_not_exists(dir_name):
     if not os.path.exists(dir_name):
         os.mkdir(dir_name)
         
-experiment_name = 'QA_explicit_dot_joint_training_pretrained_fix_pretrained'
+experiment_name = 'QA_pretrain_genome_split'
 #experiment_name = 'object_attribute_classifier_large_images'
 # Global output directory (all subexperiments will be saved here)
 global_output_dir = '/data/tanmay/GenVQA_Exp_Results'
@@ -56,6 +56,18 @@ vocab_json = os.path.join(
     data_absolute_path,
     'restructured/vocab_subset.json')
 
+genome_train_subset_region_ids = os.path.join(
+    data_absolute_path,
+    'restructured/train_subset_region_ids.json')
+
+genome_train_held_out_region_ids = os.path.join(
+    data_absolute_path,
+    'restructured/train_held_out_region_ids.json')
+
+genome_test_region_ids = os.path.join(
+    data_absolute_path,
+    'restructured/test_region_ids.json')
+
 num_object_labels = 1000
 num_attribute_labels = 1000
 
@@ -63,12 +75,12 @@ num_attribute_labels = 1000
 # First 80% meant to be used for training
 # Next 10% is set aside for validation
 # Last 10% is to be used for testing
-num_total_regions = 1951768
-num_train_regions = 1561416 # First 80%
-num_val_regions = 195176 # Next 10%
-num_test_regions = num_total_regions \
-                   - num_train_regions \
-                   - num_val_regions 
+# num_total_regions = 1951768
+# num_train_regions = 1561416 # First 80%
+# num_val_regions = 195176 # Next 10%
+# num_test_regions = num_total_regions \
+#                    - num_train_regions \
+#                    - num_val_regions 
 
 # Pretrained resnet ckpt
 resnet_ckpt = '/home/nfs/tgupta6/data/Resnet/' + \
@@ -88,7 +100,7 @@ pretrained_vocab_word_vectors_npy = os.path.join(
 
 # Object Attribute Classifier Training Params
 region_batch_size = 200
-region_num_samples = num_train_regions
+# region_num_samples = num_train_regions
 region_num_epochs = 4
 region_offset = 0
 region_queue_size = 400
@@ -171,11 +183,11 @@ vqa_answer_vocab_json = os.path.join(
 answer_batch_size = 50
 answer_num_epochs = 10
 answer_offset = 0
-answer_obj_atr_loss_wt = 0.1
+answer_obj_atr_loss_wt = 0.0
 answer_regularization_coeff = 1e-5
 answer_queue_size = 500
 answer_embedding_dim = 600
-answer_lr = 1e-4
+answer_lr = 1e-3
 answer_log_every_n_iter = 500
 answer_output_dir = os.path.join(
     global_experiment_dir,
@@ -184,8 +196,8 @@ answer_output_dir = os.path.join(
 mkdir_if_not_exists(answer_output_dir)
 
 pretrained_model = '/home/nfs/tgupta6/projects/GenVQA/Exp_Results/' +\
-                   'pretrained_object_attribute_classifier/' +\
-                   'obj_atr_model_77500'
+                   'object_attribute_classifier_large_images_vqa_split/' +\
+                   'object_attribute_classifiers/model-80000'
 
 answer_model = os.path.join(
     answer_output_dir,