diff --git a/answer_classifier_cached_features/fine_tune.py b/answer_classifier_cached_features/fine_tune.py
index b84469ff53c535b3e6644f2c56bda651d0b1f9a4..3a3ea8704efc3ed8bf8a6c2b6dac11475f12c7d3 100644
--- a/answer_classifier_cached_features/fine_tune.py
+++ b/answer_classifier_cached_features/fine_tune.py
@@ -87,6 +87,7 @@ if __name__=='__main__':
         constants.num_regions_with_labels,
         constants.num_object_labels,
         constants.num_attribute_labels,
+        constants.answer_obj_atr_loss_wt,
         resnet_feat_dim=constants.resnet_feat_dim,
         training=True)
 
diff --git a/answer_classifier_cached_features/train.py b/answer_classifier_cached_features/train.py
index bb213d9c96554d0e57e2424558aa7b24a71ccfbc..608516a0362715a7956ca627785c44cc3efc2b23 100644
--- a/answer_classifier_cached_features/train.py
+++ b/answer_classifier_cached_features/train.py
@@ -29,6 +29,7 @@ class graph_creator():
             num_regions_w_labels,
             num_object_labels,
             num_attribute_labels,
+            obj_atr_loss_wt,
             resnet_feat_dim=2048,
             training=True):
         self.im_h, self.im_w = image_size
@@ -40,6 +41,7 @@ class graph_creator():
         self.num_regions_w_labels = num_regions_w_labels
         self.num_object_labels = num_object_labels
         self.num_attribute_labels = num_attribute_labels
+        self.obj_atr_loss_wt = obj_atr_loss_wt
         self.resnet_feat_dim = resnet_feat_dim
         self.training = training
         self.tf_graph = tf.Graph()
@@ -369,7 +371,7 @@ class graph_creator():
 
         self.regularization_loss = self.regularization()
 
-        self.total_loss = 0.0*(self.object_loss + 1000.0*self.attribute_loss) + \
+        self.total_loss = self.obj_atr_loss_wt*(self.object_loss + 1000.0*self.attribute_loss) + \
                           self.regularization_loss + \
                           self.answer_loss
                           
@@ -743,6 +745,7 @@ if __name__=='__main__':
         constants.num_regions_with_labels,
         constants.num_object_labels,
         constants.num_attribute_labels,
+        constants.answer_obj_atr_loss_wt,
         resnet_feat_dim=constants.resnet_feat_dim,
         training=True)
 
diff --git a/constants_crunchy.py b/constants_crunchy.py
index 512af61bfee12afc479537dacc9a334e0c05ad7f..70b07fc70b8bdbb7a5fc3aa1677970b7c8c8192f 100644
--- a/constants_crunchy.py
+++ b/constants_crunchy.py
@@ -5,7 +5,7 @@ def mkdir_if_not_exists(dir_name):
     if not os.path.exists(dir_name):
         os.mkdir(dir_name)
         
-experiment_name = 'QA_explicit_dot_joint_training_pretrained'
+experiment_name = 'QA_explicit_dot_joint_training_pretrained_same_lr'
 #experiment_name = 'object_attribute_classifier_large_images'
 # Global output directory (all subexperiments will be saved here)
 global_output_dir = '/home/tanmay/Code/GenVQA/Exp_Results/VQA'
@@ -159,12 +159,13 @@ num_test_questions = 0
 
 # Answer classifier training params
 answer_batch_size = 50
-answer_num_epochs = 10
+answer_num_epochs = 4
 answer_offset = 0
+answer_obj_atr_loss_wt = 0.1
 answer_regularization_coeff = 1e-5
 answer_queue_size = 500
 answer_embedding_dim = 600
-answer_lr = 1e-4
+answer_lr = 1e-3
 answer_log_every_n_iter = 500
 answer_output_dir = os.path.join(
     global_experiment_dir,