diff --git a/answer_classifier_cached_features/inference.py b/answer_classifier_cached_features/inference.py
index 101114202a49effbb20246c7941f5bf8efd050f4..d599de1d27b7117c9a539083ac043c945fae7cb6 100644
--- a/answer_classifier_cached_features/inference.py
+++ b/answer_classifier_cached_features/inference.py
@@ -38,7 +38,7 @@ class AnswerInference():
                 else:
                     reuse_vars = True
                 
-                num_regions = self.object_feat[j].get_shape()[0].value
+                num_regions = 100
 
                 q_feat = tf.reshape(
                     self.question_vert_concat[j],
diff --git a/constants_crunchy.py b/constants_crunchy.py
index 6ae09552f7140415b5b9b7c92edfa61e6bd73424..9ae185b468f5308ced4363572e358a735a6ffd85 100644
--- a/constants_crunchy.py
+++ b/constants_crunchy.py
@@ -5,7 +5,7 @@ def mkdir_if_not_exists(dir_name):
     if not os.path.exists(dir_name):
         os.mkdir(dir_name)
         
-experiment_name = 'QA_classifier_joint_pretrain_wordvec_xform' #'QA_joint_pretrain_genome_split'
+experiment_name = 'QA_classifier_joint_pretrain_wordvec_xform_large_obj_atr_wt' #'QA_joint_pretrain_genome_split'
 
 # Global output directory (all subexperiments will be saved here)
 global_output_dir = '/home/tanmay/Code/GenVQA/Exp_Results/VQA'
@@ -195,7 +195,7 @@ answer_obj_atr_loss_wt = 0.1
 answer_regularization_coeff = 1e-5
 answer_queue_size = 500
 answer_embedding_dim = 600
-answer_lr = 1e-3
+answer_lr = 1.0*1e-3
 answer_log_every_n_iter = 500
 answer_output_dir = os.path.join(
     global_experiment_dir,
@@ -215,12 +215,12 @@ answer_model = os.path.join(
 num_regions_with_labels = 100
 
 # Answer fine tune params
-answer_fine_tune_from_iter = 13000
+answer_fine_tune_from_iter = 29500
 answer_fine_tune_from = answer_model + '-' + str(answer_fine_tune_from_iter)
 
 # Answer eval params
 answer_eval_on = 'val'
-answer_model_to_eval = answer_model + '-20000'
+answer_model_to_eval = answer_model + '-28000'
 
 vqa_results_dir = os.path.join(
     answer_output_dir,
@@ -238,8 +238,8 @@ answer_eval_results_json = os.path.join(
 
 # Select best model
 models_dir = answer_output_dir
-start_model = 40000
-step_size = 2000
+start_model = 20000
+step_size = 4000
 model_accuracies_txt = os.path.join(
     answer_output_dir,
     'model_accuracies.txt')
diff --git a/object_attribute_classifier_cached_features/eval.py b/object_attribute_classifier_cached_features/eval.py
index a022e2aa7fa1a8d9dda00184c24a13b98347e775..17ccfe924645136d76688d266953641e5e145186 100644
--- a/object_attribute_classifier_cached_features/eval.py
+++ b/object_attribute_classifier_cached_features/eval.py
@@ -291,8 +291,9 @@ if __name__=='__main__':
     initializer = create_initializer(
         graph, 
         sess, 
+        constants.pretrained_model)
         #constants.answer_model_to_eval)
-        constants.region_model_to_eval)
+        #constants.region_model_to_eval)
 
     print 'Creating feed dict creator...'
     feed_dict_creator = train.create_feed_dict_creator(graph.plh)
diff --git a/visual_util/visualize_relevance.py b/visual_util/visualize_relevance.py
index be332af7d5f8d15591fc8231e71874cace060194..695b6d3558bec2a91f764ea0d20a8995e169f2c7 100644
--- a/visual_util/visualize_relevance.py
+++ b/visual_util/visualize_relevance.py
@@ -200,7 +200,7 @@ if __name__=='__main__':
         'mscoco_val2014_annotations_with_parsed_questions.json')
 
     exp_dir = '/home/tanmay/Code/GenVQA/Exp_Results/VQA/' + \
-              'QA_classifier_wordvec_xform/'
+              'QA_classifier_joint_pretrain_wordvec_xform/'
 
     eval_data_json = os.path.join(
         exp_dir,