diff --git a/answer_classifier_cached_features/train.py b/answer_classifier_cached_features/train.py
index 7bd5ebee4b3abda1781d68488afb934c8db40340..8dc118c14f6a3c6b16ca4f8f0695ecc9c081223a 100644
--- a/answer_classifier_cached_features/train.py
+++ b/answer_classifier_cached_features/train.py
@@ -634,18 +634,18 @@ def create_vgenome_batch_generator():
         constants.object_labels_json,
         constants.attribute_labels_json,
         constants.regions_json,
-        constants.genome_train_subset_region_ids,
+        constants.genome_train_region_ids,
         constants.image_size,
         channels=3,
         resnet_feat_dim=constants.resnet_feat_dim,
         mean_image_filename=None)
 
-    num_train_subset_regions = len(data_mgr.region_ids)
-    print num_train_subset_regions
+    num_train_regions = len(data_mgr.region_ids)
+    print num_train_regions
     
     index_generator = tftools.data.random(
         constants.num_regions_with_labels, 
-        num_train_subset_regions,
+        num_train_regions,
         constants.region_num_epochs, 
         0)
     
diff --git a/constants_crunchy.py b/constants_crunchy.py
index 56f17a379cf963ccd68c979b7e12651709760395..229585440c853819b141a98b684bfc623149ccfd 100644
--- a/constants_crunchy.py
+++ b/constants_crunchy.py
@@ -5,7 +5,11 @@ def mkdir_if_not_exists(dir_name):
     if not os.path.exists(dir_name):
         os.mkdir(dir_name)
         
-experiment_name = 'obj_atr_through_none' #'QA_joint_pretrain_genome_split'
+experiment_name = 'obj_atr_through_none'
+
+##########################################################################
+#                    Machine Specific Paths                              #
+##########################################################################
 
 # Global output directory (all subexperiments will be saved here)
 global_output_dir = '/home/tanmay/Code/GenVQA/Exp_Results/VQA'
@@ -22,15 +26,56 @@ mkdir_if_not_exists(global_output_dir)
 mkdir_if_not_exists(global_experiment_dir)
 mkdir_if_not_exists(tb_log_dir)
 
-#height and width to which images are resized before feeding into networks
+# Genome Parent Directory
+data_absolute_path = '/home/ssd/VisualGenome'
+
+# Pretrained resnet ckpt
+resnet_ckpt = '/home/tanmay/Downloads/pretrained_networks/' + \
+              'Resnet/tensorflow-resnet-pretrained-20160509/' + \
+              'ResNet-L50.ckpt'
+
+# Pretrained word vectors
+word2vec_binary = '/home/tanmay/Code/word2vec/word2vec-api-master/' + \
+                  'GoogleNews-vectors-negative300.bin'
+
+# VQA parent directory
+vqa_basedir = '/home/ssd/VQA/'
+
+# Pretrained obj atr model to be restored
+pretrained_model = '/home/tanmay/Code/GenVQA/Exp_Results/VQA/' + \
+    'object_attribute_classifier_wordvec_xform/' + \
+    'object_attribute_classifiers/model-102000'
+
+##########################################################################
+#                         Model Parameters                               #
+##########################################################################
+
+# height and width to which images are resized before feeding into networks
 image_size = (224, 224) 
 
 # Token to be used if object or attribute variable is unknown
 unknown_token = 'UNK'
 
-# Genome Data paths
-data_absolute_path = '/home/ssd/VisualGenome'
+##########################################################################
+#                  Labels and Word2Vec Parameters                        #
+##########################################################################
+
+num_object_labels = 1000
+num_attribute_labels = 1000
+
+word_vector_size = 300
+resnet_feat_dim = 2048
 
+# Numpy matrix storing vocabulary word vectors
+pretrained_vocab_word_vectors_npy = os.path.join(
+    data_absolute_path,
+    'restructured/pretrained_vocab_word_vectors.npy')
+
+##########################################################################
+#                         Genome Parameters                              #
+##########################################################################
+
+# Genome paths
 image_dir = os.path.join(data_absolute_path, 'cropped_regions_large')
 genome_resnet_feat_dir = os.path.join(
     data_absolute_path,
@@ -56,55 +101,23 @@ vocab_json = os.path.join(
     data_absolute_path,
     'restructured/vocab_subset.json')
 
-genome_train_subset_region_ids = os.path.join(
+genome_train_region_ids = os.path.join(
     data_absolute_path,
-    'restructured/train_subset_region_ids.json')
+    'restructured/train_region_ids_simple.json')
 
-genome_train_held_out_region_ids = os.path.join(
+genome_val_region_ids = os.path.join(
     data_absolute_path,
-    'restructured/train_held_out_region_ids.json')
+    'restructured/val_region_ids_simple.json')
 
 genome_test_region_ids = os.path.join(
     data_absolute_path,
     'restructured/test_region_ids.json')
 
-num_object_labels = 1000
-num_attribute_labels = 1000
-
-# Regions data partition
-# First 80% meant to be used for training
-# Next 10% is set aside for validation
-# Last 10% is to be used for testing
-# num_total_regions = 1951768
-# num_train_regions = 1561416 # First 80%
-# num_val_regions = 195176 # Next 10%
-# num_test_regions = num_total_regions \
-#                    - num_train_regions \
-#                    - num_val_regions 
-
-# Pretrained resnet ckpt
-resnet_ckpt = '/home/tanmay/Downloads/pretrained_networks/' + \
-              'Resnet/tensorflow-resnet-pretrained-20160509/' + \
-              'ResNet-L50.ckpt'
-
-# Pretrained word vectors
-word2vec_binary = '/home/tanmay/Code/word2vec/word2vec-api-master/' + \
-                  'GoogleNews-vectors-negative300.bin'
-
-word_vector_size = 300
-resnet_feat_dim = 2048
-
-# Numpy matrix storing vocabulary word vectors
-pretrained_vocab_word_vectors_npy = os.path.join(
-    data_absolute_path,
-    'restructured/pretrained_vocab_word_vectors.npy')
+##########################################################################
+#               Genome Training/Fine-Tuning Parameters                   #
+##########################################################################
 
-# Object Attribute Classifier Training Params
-region_batch_size = 200
-region_num_epochs = 20
-region_queue_size = 400
-region_regularization_coeff = 1e-5
-region_lr = 1e-3
+# Logging parameters
 region_log_every_n_iter = 500
 region_output_dir = os.path.join(
     global_experiment_dir,
@@ -116,10 +129,21 @@ region_model = os.path.join(
     region_output_dir,
     'model')                    
 
+# Object Attribute Classifier Training Params
+region_batch_size = 200
+region_num_epochs = 20
+region_queue_size = 400
+region_regularization_coeff = 1e-5
+region_lr = 1e-3
+
 # Object Attribute Finetuning Params
 region_fine_tune_from_iter = 50500
 region_fine_tune_from = region_model + '-' + str(region_fine_tune_from_iter)
 
+##########################################################################
+#                     Genome Evaluation Parameters                       #
+##########################################################################
+
 # Object Attribute Model Selection
 region_start_model = 8000
 region_step_size = 8000
@@ -131,6 +155,7 @@ region_model_accuracies_txt = os.path.join(
 region_eval_on = 'train_held_out' # One of {'test','train_held_out','train_subset'}
 region_model_to_eval = region_model + '-' + '102000'
 
+# Path to results
 region_pred_vis_dirname = os.path.join(
     region_output_dir,
     'region_pred_vis_'+region_eval_on)
@@ -143,14 +168,16 @@ region_attribute_scores_dirname = os.path.join(
 
 mkdir_if_not_exists(region_attribute_scores_dirname)
 
+##########################################################################
+#                         VQA Parameters                                 #
+##########################################################################
+
 # Answer prediction
 num_region_proposals = 100
 num_mcq_candidates = 18
 num_negative_answers = num_mcq_candidates - 1
 
 # VQA data paths
-vqa_basedir = '/home/ssd/VQA/'
-
 vqa_train_image_dir = os.path.join(
     vqa_basedir,
     'train2014_cropped_large')
@@ -184,42 +211,41 @@ vqa_answer_vocab_json = os.path.join(
     vqa_basedir,
     'answer_vocab.json')
 
-# VQA dataset params
+# Dataset statistics
 # num_train_questions = 248349
 # num_train_held_out_questions = 12500
 # num_train_subset_questions = num_train_questions - num_train_held_out_questions
 # num_val_questions = 121512
-# num_val_subset_questions = 10000
 
-# num_test_questions = 0
+##########################################################################
+#                   VQA Training/Finetuning Parameters                   #
+##########################################################################
 
-# Answer classifier training params
-answer_batch_size = 25
-answer_num_epochs = 20
-answer_offset = 0
-answer_obj_atr_loss_wt = 1.0
-answer_ans_loss_wt = 0.0
-answer_mil_loss_wt = 0.0
-answer_regularization_coeff = 1e-5
-answer_queue_size = 500
-answer_embedding_dim = 600
-answer_lr = 1e-3
+# Logging parameters
 answer_log_every_n_iter = 500
-answer_train_from_scratch = True
 answer_output_dir = os.path.join(
     global_experiment_dir,
     'answer_classifiers')
     
 mkdir_if_not_exists(answer_output_dir)
 
-pretrained_model = '/home/tanmay/Code/GenVQA/Exp_Results/VQA/' + \
-    'object_attribute_classifier_wordvec_xform/' + \
-    'object_attribute_classifiers/model-102000'
-
 answer_model = os.path.join(
     answer_output_dir,
     'model')
 
+# Answer classifier training params
+answer_train_from_scratch = True
+
+answer_batch_size = 25
+answer_num_epochs = 20
+answer_queue_size = 500
+answer_regularization_coeff = 1e-5
+answer_lr = 1e-3
+
+answer_obj_atr_loss_wt = 1.0
+answer_ans_loss_wt = 0.0
+answer_mil_loss_wt = 0.0
+
 # Answer classifier additional joint training params
 num_regions_with_labels = 100
 
@@ -227,6 +253,18 @@ num_regions_with_labels = 100
 answer_fine_tune_from_iter = 29500
 answer_fine_tune_from = answer_model + '-' + str(answer_fine_tune_from_iter)
 
+##########################################################################
+#                        VQA Evaluation Parameters                       #
+##########################################################################
+
+# Select best model
+models_dir = answer_output_dir
+start_model = 20000
+step_size = 4000
+model_accuracies_txt = os.path.join(
+    answer_output_dir,
+    'model_accuracies.txt')
+
 # Answer eval params
 answer_eval_on = 'val'
 answer_model_to_eval = answer_model + '-98000'
@@ -245,14 +283,6 @@ answer_eval_results_json = os.path.join(
     vqa_results_dir,
     'eval_' + answer_eval_on + '_results.json')
 
-# Select best model
-models_dir = answer_output_dir
-start_model = 20000
-step_size = 4000
-model_accuracies_txt = os.path.join(
-    answer_output_dir,
-    'model_accuracies.txt')
-
 # Fine Grained Evaluation File paths
 raw_vqa_val_ques_json = os.path.join(
     vqa_basedir,
diff --git a/constants_vision_gpu_1.py b/constants_vision_gpu_1.py
index dd75d168d035dbdc01d161253f0a619cd04dcaf3..72f615ae5e5587de74f9960858b79818c848d566 100644
--- a/constants_vision_gpu_1.py
+++ b/constants_vision_gpu_1.py
@@ -5,8 +5,12 @@ def mkdir_if_not_exists(dir_name):
     if not os.path.exists(dir_name):
         os.mkdir(dir_name)
         
-experiment_name = 'obj_atr_through_ans_mil_wordvecl2'
-#experiment_name = 'object_attribute_classifier_large_images'
+experiment_name = 'obj_atr_through_none'
+
+##########################################################################
+#                    Machine Specific Paths                              #
+##########################################################################
+
 # Global output directory (all subexperiments will be saved here)
 global_output_dir = '/data/tanmay/GenVQA_Exp_Results'
 
@@ -22,15 +26,55 @@ mkdir_if_not_exists(global_output_dir)
 mkdir_if_not_exists(global_experiment_dir)
 mkdir_if_not_exists(tb_log_dir)
 
-#height and width to which images are resized before feeding into networks
+# Genome Parent Directory
+data_absolute_path = '/home/nfs/tgupta6/data/VisualGenome'
+
+# Pretrained resnet ckpt
+resnet_ckpt = '/home/nfs/tgupta6/data/Resnet/' + \
+              'ResNet-L50.ckpt'
+
+# Pretrained word vectors
+word2vec_binary = '/home/nfs/tgupta6/data/word_vectors/' + \
+                  'GoogleNews-vectors-negative300.bin'
+
+# VQA parent directory
+vqa_basedir = '/home/nfs/tgupta6/data/VQA/'
+
+# Pretrained obj atr model to be restored
+pretrained_model = '/home/nfs/tgupta6/projects/GenVQA/Exp_Results/' +\
+                   'object_attribute_classifier_large_images_vqa_split/' +\
+                   'object_attribute_classifiers/model-80000'
+
+##########################################################################
+#                         Model Parameters                               #
+##########################################################################
+
+# height and width to which images are resized before feeding into networks
 image_size = (224, 224) 
 
 # Token to be used if object or attribute variable is unknown
 unknown_token = 'UNK'
 
-# Genome Data paths
-data_absolute_path = '/home/nfs/tgupta6/data/VisualGenome'
+##########################################################################
+#                  Labels and Word2Vec Parameters                        #
+##########################################################################
+
+num_object_labels = 1000
+num_attribute_labels = 1000
 
+word_vector_size = 300
+resnet_feat_dim = 2048
+
+# Numpy matrix storing vocabulary word vectors
+pretrained_vocab_word_vectors_npy = os.path.join(
+    data_absolute_path,
+    'restructured/pretrained_vocab_word_vectors.npy')
+
+##########################################################################
+#                         Genome Parameters                              #
+##########################################################################
+
+# Genome paths
 image_dir = os.path.join(data_absolute_path, 'cropped_regions_large')
 genome_resnet_feat_dir = os.path.join(
     data_absolute_path,
@@ -56,55 +100,23 @@ vocab_json = os.path.join(
     data_absolute_path,
     'restructured/vocab_subset.json')
 
-genome_train_subset_region_ids = os.path.join(
+genome_train_region_ids = os.path.join(
     data_absolute_path,
-    'restructured/train_subset_region_ids.json')
+    'restructured/train_region_ids_simple.json')
 
-genome_train_held_out_region_ids = os.path.join(
+genome_val_region_ids = os.path.join(
     data_absolute_path,
-    'restructured/train_held_out_region_ids.json')
+    'restructured/val_region_ids_simple.json')
 
 genome_test_region_ids = os.path.join(
     data_absolute_path,
     'restructured/test_region_ids.json')
 
-num_object_labels = 1000
-num_attribute_labels = 1000
-
-# Regions data partition
-# First 80% meant to be used for training
-# Next 10% is set aside for validation
-# Last 10% is to be used for testing
-# num_total_regions = 1951768
-# num_train_regions = 1561416 # First 80%
-# num_val_regions = 195176 # Next 10%
-# num_test_regions = num_total_regions \
-#                    - num_train_regions \
-#                    - num_val_regions 
+##########################################################################
+#               Genome Training/Fine-Tuning Parameters                   #
+##########################################################################
 
-# Pretrained resnet ckpt
-resnet_ckpt = '/home/nfs/tgupta6/data/Resnet/' + \
-              'ResNet-L50.ckpt'
-
-# Pretrained word vectors
-word2vec_binary = '/home/nfs/tgupta6/data/word_vectors/' + \
-                  'GoogleNews-vectors-negative300.bin'
-
-word_vector_size = 300
-resnet_feat_dim = 2048
-
-# Numpy matrix storing vocabulary word vectors
-pretrained_vocab_word_vectors_npy = os.path.join(
-    data_absolute_path,
-    'restructured/pretrained_vocab_word_vectors.npy')
-
-# Object Attribute Classifier Training Params
-region_batch_size = 200
-region_num_epochs = 20
-region_offset = 0
-region_queue_size = 400
-region_regularization_coeff = 1e-5
-region_lr = 1e-3
+# Logging parameters
 region_log_every_n_iter = 500
 region_output_dir = os.path.join(
     global_experiment_dir,
@@ -116,13 +128,38 @@ region_model = os.path.join(
     region_output_dir,
     'model')                    
 
+# Object Attribute Classifier Training Params
+region_batch_size = 200
+region_num_epochs = 20
+region_queue_size = 400
+region_regularization_coeff = 1e-5
+region_lr = 1e-3
+
 # Object Attribute Finetuning Params
-region_fine_tune_from_iter = 3000
+region_fine_tune_from_iter = 50500
 region_fine_tune_from = region_model + '-' + str(region_fine_tune_from_iter)
 
+##########################################################################
+#                     Genome Evaluation Parameters                       #
+##########################################################################
+
+# Object Attribute Model Selection
+region_start_model = 8000
+region_step_size = 8000
+region_model_accuracies_txt = os.path.join(
+    region_output_dir,
+    'model_accuracies.txt')
+
 # Object Attribute Classifier Evaluation Params
-region_eval_on = 'train_held_out' # One of {'train_subset','train_held_out','test'}
-region_model_to_eval = region_model + '-' + '77500'
+region_eval_on = 'train_held_out' # One of {'test','train_held_out','train_subset'}
+region_model_to_eval = region_model + '-' + '102000'
+
+# Path to results
+region_pred_vis_dirname = os.path.join(
+    region_output_dir,
+    'region_pred_vis_'+region_eval_on)
+
+mkdir_if_not_exists(region_pred_vis_dirname)
 
 region_attribute_scores_dirname = os.path.join(
     region_output_dir,
@@ -130,14 +167,16 @@ region_attribute_scores_dirname = os.path.join(
 
 mkdir_if_not_exists(region_attribute_scores_dirname)
 
+##########################################################################
+#                         VQA Parameters                                 #
+##########################################################################
+
 # Answer prediction
 num_region_proposals = 100
 num_mcq_candidates = 18
 num_negative_answers = num_mcq_candidates - 1
 
 # VQA data paths
-vqa_basedir = '/home/nfs/tgupta6/data/VQA/'
-
 vqa_train_image_dir = os.path.join(
     vqa_basedir,
     'train2014_cropped_large')
@@ -152,7 +191,7 @@ vqa_train_subset_qids = os.path.join(
     'train_subset_qids.json')
 vqa_train_held_out_qids = os.path.join(
     vqa_basedir,
-    'train_held_out_qids.json') 
+    'train_held_out_qids.json')
 
 vqa_val_image_dir = os.path.join(
     vqa_basedir,
@@ -171,63 +210,83 @@ vqa_answer_vocab_json = os.path.join(
     vqa_basedir,
     'answer_vocab.json')
 
-# VQA dataset params
+# Dataset statistics
 # num_train_questions = 248349
-# num_val_subset_questions = 10000
+# num_train_held_out_questions = 12500
+# num_train_subset_questions = num_train_questions - num_train_held_out_questions
 # num_val_questions = 121512
-# num_val_rest_questions = num_val_questions - num_val_subset_questions
-# num_test_questions = 0
 
-# Answer classifier training params
-answer_batch_size = 25
-answer_num_epochs = 20
-answer_offset = 0
-answer_obj_atr_loss_wt = 1.0
-answer_ans_loss_wt = 0.1
-answer_mil_loss_wt = 0.2
-answer_regularization_coeff = 1e-5
-answer_queue_size = 500
-answer_embedding_dim = 600
-answer_lr = 1e-3
+##########################################################################
+#                   VQA Training/Finetuning Parameters                   #
+##########################################################################
+
+# Logging parameters
 answer_log_every_n_iter = 500
-answer_train_from_scratch = True
 answer_output_dir = os.path.join(
     global_experiment_dir,
     'answer_classifiers')
     
 mkdir_if_not_exists(answer_output_dir)
 
-pretrained_model = '/home/nfs/tgupta6/projects/GenVQA/Exp_Results/' +\
-                   'object_attribute_classifier_large_images_vqa_split/' +\
-                   'object_attribute_classifiers/model-80000'
-
 answer_model = os.path.join(
     answer_output_dir,
     'model')
 
+# Answer classifier training params
+answer_train_from_scratch = True
+
+answer_batch_size = 25
+answer_num_epochs = 20
+answer_queue_size = 500
+answer_regularization_coeff = 1e-5
+answer_lr = 1e-3
+
+answer_obj_atr_loss_wt = 1.0
+answer_ans_loss_wt = 0.0
+answer_mil_loss_wt = 0.0
+
 # Answer classifier additional joint training params
 num_regions_with_labels = 100
 
 # Answer fine tune params
-answer_fine_tune_from_iter = 22500
+answer_fine_tune_from_iter = 29500
 answer_fine_tune_from = answer_model + '-' + str(answer_fine_tune_from_iter)
 
+##########################################################################
+#                        VQA Evaluation Parameters                       #
+##########################################################################
+
+# Select best model
+models_dir = answer_output_dir
+start_model = 20000
+step_size = 4000
+model_accuracies_txt = os.path.join(
+    answer_output_dir,
+    'model_accuracies.txt')
+
 # Answer eval params
 answer_eval_on = 'val'
-answer_model_to_eval = answer_model + '-55000'
+answer_model_to_eval = answer_model + '-98000'
 
-answer_eval_data_json = os.path.join(
+vqa_results_dir = os.path.join(
     answer_output_dir,
+    'Results')
+
+mkdir_if_not_exists(vqa_results_dir)
+
+answer_eval_data_json = os.path.join(
+    vqa_results_dir,
     'eval_' + answer_eval_on + '_data.json')
 
 answer_eval_results_json = os.path.join(
-    answer_output_dir,
+    vqa_results_dir,
     'eval_' + answer_eval_on + '_results.json')
 
-# Select best model
-models_dir = answer_output_dir
-start_model = 1000
-step_size = 2000
-model_accuracies_txt = os.path.join(
-    answer_output_dir,
-    'model_accuracies.txt')
+# Fine Grained Evaluation File paths
+raw_vqa_val_ques_json = os.path.join(
+    vqa_basedir,
+    'MultipleChoice_mscoco_val2014_questions.json')
+
+raw_vqa_val_anno_json = os.path.join(
+    vqa_basedir,
+    'mscoco_val2014_annotations.json')
diff --git a/constants_vision_gpu_2.py b/constants_vision_gpu_2.py
index 16ae816e0bf74a8782baea6a193fef115581582c..72f615ae5e5587de74f9960858b79818c848d566 100644
--- a/constants_vision_gpu_2.py
+++ b/constants_vision_gpu_2.py
@@ -5,8 +5,12 @@ def mkdir_if_not_exists(dir_name):
     if not os.path.exists(dir_name):
         os.mkdir(dir_name)
         
-experiment_name = 'QA_genome_split'
-#experiment_name = 'object_attribute_classifier_large_images'
+experiment_name = 'obj_atr_through_none'
+
+##########################################################################
+#                    Machine Specific Paths                              #
+##########################################################################
+
 # Global output directory (all subexperiments will be saved here)
 global_output_dir = '/data/tanmay/GenVQA_Exp_Results'
 
@@ -22,15 +26,55 @@ mkdir_if_not_exists(global_output_dir)
 mkdir_if_not_exists(global_experiment_dir)
 mkdir_if_not_exists(tb_log_dir)
 
-#height and width to which images are resized before feeding into networks
+# Genome Parent Directory
+data_absolute_path = '/home/nfs/tgupta6/data/VisualGenome'
+
+# Pretrained resnet ckpt
+resnet_ckpt = '/home/nfs/tgupta6/data/Resnet/' + \
+              'ResNet-L50.ckpt'
+
+# Pretrained word vectors
+word2vec_binary = '/home/nfs/tgupta6/data/word_vectors/' + \
+                  'GoogleNews-vectors-negative300.bin'
+
+# VQA parent directory
+vqa_basedir = '/home/nfs/tgupta6/data/VQA/'
+
+# Pretrained obj atr model to be restored
+pretrained_model = '/home/nfs/tgupta6/projects/GenVQA/Exp_Results/' +\
+                   'object_attribute_classifier_large_images_vqa_split/' +\
+                   'object_attribute_classifiers/model-80000'
+
+##########################################################################
+#                         Model Parameters                               #
+##########################################################################
+
+# height and width to which images are resized before feeding into networks
 image_size = (224, 224) 
 
 # Token to be used if object or attribute variable is unknown
 unknown_token = 'UNK'
 
-# Genome Data paths
-data_absolute_path = '/home/nfs/tgupta6/data/VisualGenome'
+##########################################################################
+#                  Labels and Word2Vec Parameters                        #
+##########################################################################
 
+num_object_labels = 1000
+num_attribute_labels = 1000
+
+word_vector_size = 300
+resnet_feat_dim = 2048
+
+# Numpy matrix storing vocabulary word vectors
+pretrained_vocab_word_vectors_npy = os.path.join(
+    data_absolute_path,
+    'restructured/pretrained_vocab_word_vectors.npy')
+
+##########################################################################
+#                         Genome Parameters                              #
+##########################################################################
+
+# Genome paths
 image_dir = os.path.join(data_absolute_path, 'cropped_regions_large')
 genome_resnet_feat_dir = os.path.join(
     data_absolute_path,
@@ -56,56 +100,23 @@ vocab_json = os.path.join(
     data_absolute_path,
     'restructured/vocab_subset.json')
 
-genome_train_subset_region_ids = os.path.join(
+genome_train_region_ids = os.path.join(
     data_absolute_path,
-    'restructured/train_subset_region_ids.json')
+    'restructured/train_region_ids_simple.json')
 
-genome_train_held_out_region_ids = os.path.join(
+genome_val_region_ids = os.path.join(
     data_absolute_path,
-    'restructured/train_held_out_region_ids.json')
+    'restructured/val_region_ids_simple.json')
 
 genome_test_region_ids = os.path.join(
     data_absolute_path,
     'restructured/test_region_ids.json')
 
-num_object_labels = 1000
-num_attribute_labels = 1000
-
-# Regions data partition
-# First 80% meant to be used for training
-# Next 10% is set aside for validation
-# Last 10% is to be used for testing
-# num_total_regions = 1951768
-# num_train_regions = 1561416 # First 80%
-# num_val_regions = 195176 # Next 10%
-# num_test_regions = num_total_regions \
-#                    - num_train_regions \
-#                    - num_val_regions 
-
-# Pretrained resnet ckpt
-resnet_ckpt = '/home/nfs/tgupta6/data/Resnet/' + \
-              'ResNet-L50.ckpt'
-
-# Pretrained word vectors
-word2vec_binary = '/home/nfs/tgupta6/data/word_vectors/' + \
-                  'GoogleNews-vectors-negative300.bin'
+##########################################################################
+#               Genome Training/Fine-Tuning Parameters                   #
+##########################################################################
 
-word_vector_size = 300
-resnet_feat_dim = 2048
-
-# Numpy matrix storing vocabulary word vectors
-pretrained_vocab_word_vectors_npy = os.path.join(
-    data_absolute_path,
-    'restructured/pretrained_vocab_word_vectors.npy')
-
-# Object Attribute Classifier Training Params
-region_batch_size = 200
-# region_num_samples = num_train_regions
-region_num_epochs = 4
-region_offset = 0
-region_queue_size = 400
-region_regularization_coeff = 1e-4
-region_lr = 1e-3
+# Logging parameters
 region_log_every_n_iter = 500
 region_output_dir = os.path.join(
     global_experiment_dir,
@@ -117,13 +128,38 @@ region_model = os.path.join(
     region_output_dir,
     'model')                    
 
+# Object Attribute Classifier Training Params
+region_batch_size = 200
+region_num_epochs = 20
+region_queue_size = 400
+region_regularization_coeff = 1e-5
+region_lr = 1e-3
+
 # Object Attribute Finetuning Params
-region_fine_tune_from_iter = 3000
+region_fine_tune_from_iter = 50500
 region_fine_tune_from = region_model + '-' + str(region_fine_tune_from_iter)
 
+##########################################################################
+#                     Genome Evaluation Parameters                       #
+##########################################################################
+
+# Object Attribute Model Selection
+region_start_model = 8000
+region_step_size = 8000
+region_model_accuracies_txt = os.path.join(
+    region_output_dir,
+    'model_accuracies.txt')
+
 # Object Attribute Classifier Evaluation Params
-region_eval_on = 'val' # One of {'val','test','train'}
-region_model_to_eval = region_model + '-' + '77500'
+region_eval_on = 'train_held_out' # One of {'test','train_held_out','train_subset'}
+region_model_to_eval = region_model + '-' + '102000'
+
+# Path to results
+region_pred_vis_dirname = os.path.join(
+    region_output_dir,
+    'region_pred_vis_'+region_eval_on)
+
+mkdir_if_not_exists(region_pred_vis_dirname)
 
 region_attribute_scores_dirname = os.path.join(
     region_output_dir,
@@ -131,14 +167,16 @@ region_attribute_scores_dirname = os.path.join(
 
 mkdir_if_not_exists(region_attribute_scores_dirname)
 
+##########################################################################
+#                         VQA Parameters                                 #
+##########################################################################
+
 # Answer prediction
 num_region_proposals = 100
 num_mcq_candidates = 18
 num_negative_answers = num_mcq_candidates - 1
 
 # VQA data paths
-vqa_basedir = '/home/nfs/tgupta6/data/VQA/'
-
 vqa_train_image_dir = os.path.join(
     vqa_basedir,
     'train2014_cropped_large')
@@ -153,7 +191,7 @@ vqa_train_subset_qids = os.path.join(
     'train_subset_qids.json')
 vqa_train_held_out_qids = os.path.join(
     vqa_basedir,
-    'train_held_out_qids.json') 
+    'train_held_out_qids.json')
 
 vqa_val_image_dir = os.path.join(
     vqa_basedir,
@@ -172,22 +210,17 @@ vqa_answer_vocab_json = os.path.join(
     vqa_basedir,
     'answer_vocab.json')
 
-# VQA dataset params
+# Dataset statistics
 # num_train_questions = 248349
-# num_val_subset_questions = 10000
+# num_train_held_out_questions = 12500
+# num_train_subset_questions = num_train_questions - num_train_held_out_questions
 # num_val_questions = 121512
-# num_val_rest_questions = num_val_questions - num_val_subset_questions
-# num_test_questions = 0
 
-# Answer classifier training params
-answer_batch_size = 50
-answer_num_epochs = 10
-answer_offset = 0
-answer_obj_atr_loss_wt = 0.0
-answer_regularization_coeff = 1e-5
-answer_queue_size = 500
-answer_embedding_dim = 600
-answer_lr = 1e-3
+##########################################################################
+#                   VQA Training/Finetuning Parameters                   #
+##########################################################################
+
+# Logging parameters
 answer_log_every_n_iter = 500
 answer_output_dir = os.path.join(
     global_experiment_dir,
@@ -195,37 +228,65 @@ answer_output_dir = os.path.join(
     
 mkdir_if_not_exists(answer_output_dir)
 
-pretrained_model = '/home/nfs/tgupta6/projects/GenVQA/Exp_Results/' +\
-                   'object_attribute_classifier_large_images_vqa_split/' +\
-                   'object_attribute_classifiers/model-80000'
-
 answer_model = os.path.join(
     answer_output_dir,
     'model')
 
+# Answer classifier training params
+answer_train_from_scratch = True
+
+answer_batch_size = 25
+answer_num_epochs = 20
+answer_queue_size = 500
+answer_regularization_coeff = 1e-5
+answer_lr = 1e-3
+
+answer_obj_atr_loss_wt = 1.0
+answer_ans_loss_wt = 0.0
+answer_mil_loss_wt = 0.0
+
 # Answer classifier additional joint training params
 num_regions_with_labels = 100
 
 # Answer fine tune params
-answer_fine_tune_from_iter = 22500
+answer_fine_tune_from_iter = 29500
 answer_fine_tune_from = answer_model + '-' + str(answer_fine_tune_from_iter)
 
+##########################################################################
+#                        VQA Evaluation Parameters                       #
+##########################################################################
+
+# Select best model
+models_dir = answer_output_dir
+start_model = 20000
+step_size = 4000
+model_accuracies_txt = os.path.join(
+    answer_output_dir,
+    'model_accuracies.txt')
+
 # Answer eval params
 answer_eval_on = 'val'
-answer_model_to_eval = answer_model + '-69500'
+answer_model_to_eval = answer_model + '-98000'
 
-answer_eval_data_json = os.path.join(
+vqa_results_dir = os.path.join(
     answer_output_dir,
+    'Results')
+
+mkdir_if_not_exists(vqa_results_dir)
+
+answer_eval_data_json = os.path.join(
+    vqa_results_dir,
     'eval_' + answer_eval_on + '_data.json')
 
 answer_eval_results_json = os.path.join(
-    answer_output_dir,
+    vqa_results_dir,
     'eval_' + answer_eval_on + '_results.json')
 
-# Select best model
-models_dir = answer_output_dir
-start_model = 1000
-step_size = 2000
-model_accuracies_txt = os.path.join(
-    answer_output_dir,
-    'model_accuracies.txt')
+# Fine Grained Evaluation File paths
+raw_vqa_val_ques_json = os.path.join(
+    vqa_basedir,
+    'MultipleChoice_mscoco_val2014_questions.json')
+
+raw_vqa_val_anno_json = os.path.join(
+    vqa_basedir,
+    'mscoco_val2014_annotations.json')
diff --git a/split_genome.py b/split_genome.py
index d118ed2ea913f25ad3ac8ba002e4f27b7efc7a3e..76441a7b06f1ab79e43af914083e8c93078a44fb 100644
--- a/split_genome.py
+++ b/split_genome.py
@@ -228,6 +228,7 @@ if __name__=='__main__':
             i = image_assignment[str(region_data['image_id'])]
             sets[i].add(region_id)
 
+    pdb.set_trace()
     genome_safe_region_ids = list(sets[0]) + list(sets[1])
     genome_test_region_ids = list(sets[2]) + list(sets[3])
 
diff --git a/split_genome_simple.py b/split_genome_simple.py
new file mode 100644
index 0000000000000000000000000000000000000000..7bff86e3c795739d53134fe7a85b466414bcb98a
--- /dev/null
+++ b/split_genome_simple.py
@@ -0,0 +1,307 @@
+import ujson
+import hashlib
+import os
+import glob
+import numpy as np
+import constants
+import image_io
+import pdb
+
+def vqa_filename_to_image_id(
+        filename):
+    _, image_name = os.path.split(filename)
+    return str(int(image_name.split('_')[-1].strip('.jpg')))
+
+def genome_filename_to_image_id(
+        filename):
+    _, image_name = os.path.split(filename)
+    return image_name.strip('.jpg')
+
+def image_id_to_vqa_filename(
+        image_dir,
+        image_id,
+        mode):
+
+    filename = os.path.join(
+        image_dir,
+        'COCO_' + mode + '_' + image_id.zfill(12) + '.jpg')
+    
+    return filename
+
+def image_id_to_genome_filename(
+        image_dir,
+        image_id):
+    
+    filename = os.path.join(
+        image_dir,
+        image_id + '.jpg')
+
+    return filename
+
+def generate_md5hash(
+        image_dir,
+        hash_json,
+        filename_to_image_id):
+    
+    files = glob.glob(os.path.join(image_dir,'*.jpg'))
+    md5_hashes = dict()
+    count = 0
+    for file in files:
+        count+=1
+        # print count, len(files)
+        try:
+            im = image_io.imread(file)
+            md5_hashes[hashlib.md5(im).hexdigest()] = \
+                filename_to_image_id(file)
+        except:
+            print 'Can not read {}'.format(file)
+            pass
+    
+    with open(hash_json, 'w') as file:
+        ujson.dump(md5_hashes, file)
+
+
+if __name__=='__main__':
+    genome_image_dir = os.path.join(
+        constants.data_absolute_path,
+        'images')
+
+    vqa_train2014_image_dir = os.path.join(
+        constants.vqa_basedir,
+        'train2014')
+
+    vqa_val2014_image_dir = os.path.join(
+        constants.vqa_basedir,
+        'val2014')
+
+    vqa_test2015_image_dir = os.path.join(
+        constants.vqa_basedir,
+        'test2015')
+
+    vqa_train2014_held_out_qids_json = os.path.join(
+        constants.vqa_basedir,
+        'train_held_out_qids.json')
+
+    vqa_train2014_anno_json = os.path.join(
+        constants.vqa_basedir,
+        'mscoco_train2014_annotations_with_parsed_questions.json')
+    
+    genome_hash_json = os.path.join(
+        constants.data_absolute_path,
+        'genome_hash.json')
+
+    vqa_train2014_hash_json = os.path.join(
+        constants.vqa_basedir,
+        'train2014_hash.json')
+
+    vqa_val2014_hash_json = os.path.join(
+        constants.vqa_basedir,
+        'val2014_hash.json')
+
+    vqa_test2015_hash_json = os.path.join(
+        constants.vqa_basedir,
+        'test2015_hash.json')
+
+    genome_not_in_vqa_train2014_subset_region_ids_json = os.path.join(
+        constants.data_absolute_path,
+        'restructured/genome_not_in_vqa_train2014_subset_region_ids.json')
+
+    genome_train_region_ids_json = os.path.join(
+        constants.data_absolute_path,
+        'restructured/train_region_ids_simple.json')
+
+    genome_val_region_ids_json = os.path.join(
+        constants.data_absolute_path,
+        'restructured/val_region_ids_simple.json')
+
+    genome_test_region_ids_json = os.path.join(
+        constants.data_absolute_path,
+        'restructured/test_region_ids_simple.json')
+
+   # print 'Generating {}'.format(genome_hash_json)
+    # generate_md5hash(
+    #     genome_image_dir,
+    #     genome_hash_json,
+    #     genome_filename_to_image_id)
+
+    # print 'Generating {}'.format(vqa_train2014_hash_json)
+    # generate_md5hash(
+    #     vqa_train2014_image_dir,
+    #     vqa_train2014_hash_json,
+    #     vqa_filename_to_image_id)
+
+    # print 'Generating {}'.format(vqa_val2014_hash_json)
+    # generate_md5hash(
+    #     vqa_val2014_image_dir,
+    #     vqa_val2014_hash_json,
+    #     vqa_filename_to_image_id)
+
+    # print 'Generating {}'.format(vqa_test2015_hash_json)
+    # generate_md5hash(
+    #     vqa_test2015_image_dir,
+    #     vqa_test2015_hash_json,
+    #     vqa_filename_to_image_id)
+
+
+    print 'Reading {} ...'.format(vqa_train2014_anno_json)
+    with open(vqa_train2014_anno_json, 'r') as file:
+        vqa_train2014_anno = ujson.load(file)
+
+    print 'Reading {} ...'.format(vqa_train2014_held_out_qids_json)
+    with open(vqa_train2014_held_out_qids_json, 'r') as file:
+        vqa_train2014_held_out_qids = ujson.load(file)
+
+    
+    vqa_train2014_held_out_image_ids = set()
+    for qid in list(vqa_train2014_held_out_qids):
+        image_id = str(vqa_train2014_anno[qid]['image_id'])
+        if image_id not in vqa_train2014_held_out_image_ids:
+            vqa_train2014_held_out_image_ids.add(image_id)
+        
+    # The first one should always be genome hash
+    hashes_to_load = {
+        'genome': genome_hash_json,
+        'vqa_train': vqa_train2014_hash_json,
+        'vqa_val': vqa_val2014_hash_json,
+        'vqa_test': vqa_test2015_hash_json
+    }
+
+    hashes = dict()
+    for hash_name, hash_to_load in hashes_to_load.items():
+        with open(hash_to_load,'r') as file:
+            hashes[hash_name] = ujson.load(file)
+
+    image_assignment = dict()
+    for hash_key, image_id in hashes['genome'].items():
+        if hash_key in hashes['vqa_train']:
+            if hashes['vqa_train'][hash_key] in vqa_train2014_held_out_image_ids:
+                image_assignment[image_id] = 'in_vqa_train_held_out'
+            else:
+                image_assignment[image_id] = 'in_vqa_train_subset'
+
+        elif hash_key in hashes['vqa_val']:
+            image_assignment[image_id] = 'in_vqa_val'
+
+        elif hash_key in hashes['vqa_test']:
+            image_assignment[image_id] = 'in_vqa_test'
+
+        else:
+            image_assignment[image_id] = 'not_in_vqa'
+
+           
+    split_of_image_ids_based_on_vqa = {
+        'not_in_vqa': set(),
+        'in_vqa_train_subset': set(),
+        'in_vqa_train_held_out': set(),
+        'in_vqa_val': set(),
+        'in_vqa_test': set(),
+    }
+
+    for image_id, vqa_identity in image_assignment.items():
+        split_of_image_ids_based_on_vqa[vqa_identity].add(image_id)
+
+    for vqa_identity, set_of_image_ids in split_of_image_ids_based_on_vqa.items():
+        print '{}: {}'.format(vqa_identity,len(set_of_image_ids))
+    
+    list_of_genome_image_ids_not_in_vqa = list(split_of_image_ids_based_on_vqa['not_in_vqa'])
+    total_num_genome_image_ids = len(list_of_genome_image_ids_not_in_vqa)
+    split_fractions = [0.85, 0.9, 1.0]
+    split_numbers = [int(frac*total_num_genome_image_ids) for frac in split_fractions]
+    split_genome_image_ids_not_in_vqa = {
+        'train': set(list_of_genome_image_ids_not_in_vqa[0:split_numbers[0]]),
+        'val': set(list_of_genome_image_ids_not_in_vqa[split_numbers[0]+1:split_numbers[1]]),
+        'test': set(list_of_genome_image_ids_not_in_vqa[split_numbers[1]+1:]),
+    }
+
+    print 'Number of images in genome split'
+    for identity, image_ids in split_genome_image_ids_not_in_vqa.items():
+        print '{}: {}'.format(identity,len(image_ids))
+        
+    with open(constants.regions_json, 'r') as file:
+        regions_data = ujson.load(file)
+
+    split_genome_region_ids_not_in_vqa = {
+        'train': [],
+        'val': [],
+        'test': [],
+    }
+
+    for region_id, region_data in regions_data.items():
+        if str(region_data['image_id']) in split_genome_image_ids_not_in_vqa['train']:
+            split_genome_region_ids_not_in_vqa['train'].append(region_id)
+        elif str(region_data['image_id']) in split_genome_image_ids_not_in_vqa['val']:
+            split_genome_region_ids_not_in_vqa['val'].append(region_id)
+        elif str(region_data['image_id']) in split_genome_image_ids_not_in_vqa['test']:
+            split_genome_region_ids_not_in_vqa['test'].append(region_id)
+            
+            
+    print 'Number of regions in genome split'
+    for identity, region_ids in split_genome_region_ids_not_in_vqa.items():
+        print '{}: {}'.format(identity, len(region_ids))
+
+    with open(genome_train_region_ids_json,'w') as file:
+        ujson.dump(split_genome_region_ids_not_in_vqa['train'],file)
+
+    with open(genome_val_region_ids_json,'w') as file:
+        ujson.dump(split_genome_region_ids_not_in_vqa['val'],file)
+
+    with open(genome_test_region_ids_json,'w') as file:
+        ujson.dump(split_genome_region_ids_not_in_vqa['test'],file)
+        
+    # splits = [
+    #     'unknown',
+    #     'train2014_subset',
+    #     'val2014',
+    #     'test2015',
+    #     'train2014_held_out']
+
+    # sets = [set() for i in xrange(len(splits))]
+    
+    # for region_id, region_data in regions_data.items():
+    #     if str(region_data['image_id']) in image_assignment:
+    #         i = image_assignment[str(region_data['image_id'])]
+    #         sets[i].add(region_id)
+
+    # pdb.set_trace()
+    # genome_safe_region_ids = list(sets[0]) + list(sets[1])
+    # genome_test_region_ids = list(sets[2]) + list(sets[3])
+
+    # with open(genome_not_in_vqa_train2014_subset_region_ids_json, 'w') as file:
+    #     ujson.dump(genome_safe_region_ids, file)
+
+    # with open(genome_test_region_ids_json, 'w') as file:
+    #     ujson.dump(genome_test_region_ids, file)
+
+    # split_frac = 0.9
+    # num_valid_train_regions = len(genome_safe_region_ids)
+    # num_genome_train_subset_regions = int(num_valid_train_regions*split_frac)
+    # num_genome_train_held_out_regions = num_valid_train_regions - num_genome_train_subset_regions
+    # train_subset_ids = set(
+    #     np.random.choice(
+    #         num_valid_train_regions, 
+    #         num_genome_train_subset_regions,
+    #         replace=False))
+
+    # genome_train_subset_region_ids = set()
+    # genome_train_held_out_region_ids = set()
+    # for i in xrange(num_valid_train_regions):
+    #     if i in train_subset_ids:
+    #         genome_train_subset_region_ids.add(genome_safe_region_ids[i])
+    #     else:
+    #         genome_train_held_out_region_ids.add(genome_safe_region_ids[i])
+        
+    # print len(genome_train_subset_region_ids), len(genome_train_held_out_region_ids)
+    # with open(genome_train_subset_region_ids_json, 'w') as file:
+    #     ujson.dump(genome_train_subset_region_ids, file)
+
+    # with open(genome_train_held_out_region_ids_json, 'w') as file:
+    #     ujson.dump(genome_train_held_out_region_ids, file)
+        
+    # pdb.set_trace()
+
+    
+        
+        
+    
+    
+