diff --git a/answer_classifier_cached_features/eval.py b/answer_classifier_cached_features/eval.py
index 51d0080ed9c18921e0560fbc5198838321c71d48..046161ce4c9a4438a2eff0937081c8585a51cff7 100644
--- a/answer_classifier_cached_features/eval.py
+++ b/answer_classifier_cached_features/eval.py
@@ -51,6 +51,8 @@ def create_batch_generator(mode):
         qids_json,
         constants.vocab_json,
         constants.vqa_answer_vocab_json,
+        constants.object_labels_json,
+        constants.attribute_labels_json,
         constants.image_size,
         constants.num_region_proposals,
         constants.num_negative_answers,
@@ -267,13 +269,15 @@ if __name__=='__main__':
         0,
         0,
         constants.answer_obj_atr_loss_wt,
+        constants.answer_ans_loss_wt,
+        constants.answer_mil_loss_wt,
         resnet_feat_dim=constants.resnet_feat_dim,
         training=False)
 
     print 'Starting a session...'
     config = tf.ConfigProto()
     config.gpu_options.allow_growth = True
-    config.gpu_options.per_process_gpu_memory_fraction = 0.5
+    config.gpu_options.per_process_gpu_memory_fraction = 0.9
     sess = tf.Session(config=config, graph=graph.tf_graph)
 
     print 'Creating initializer...'
diff --git a/answer_classifier_cached_features/train.py b/answer_classifier_cached_features/train.py
index e2bd786192c4ccca3d472b9416cf6fbce8d098ad..a3705e6541a3e53b8c21199e95bf249fbdbb3004 100644
--- a/answer_classifier_cached_features/train.py
+++ b/answer_classifier_cached_features/train.py
@@ -15,7 +15,7 @@ import numpy as np
 import pdb
 from itertools import izip
 import tensorflow as tf
-
+import gc
 
 class graph_creator():
     def __init__(
@@ -484,23 +484,24 @@ class graph_creator():
                     ["accuracy_answer"],
                     self.moving_average_accuracy)
 
-            # object
-            self.object_accuracy = self.add_object_accuracy_computation(
-                self.object_scores_with_labels,
-                self.plh['object_labels'])
-
-            object_accuracy_summary = tf.scalar_summary(
-                "accuracy_object", 
-                self.object_accuracy)
-
-            # attributes
-            self.attribute_accuracy = self.add_attribute_accuracy_computation(
-                self.attribute_scores_with_labels,
-                self.plh['attribute_labels'])
-
-            attribute_accuracy_summary = tf.scalar_summary(
-                "accuracy_attribute", 
-                self.attribute_accuracy)
+            if self.training:
+                # object
+                self.object_accuracy = self.add_object_accuracy_computation(
+                    self.object_scores_with_labels,
+                    self.plh['object_labels'])
+
+                object_accuracy_summary = tf.scalar_summary(
+                    "accuracy_object", 
+                    self.object_accuracy)
+
+                # attributes
+                self.attribute_accuracy = self.add_attribute_accuracy_computation(
+                    self.attribute_scores_with_labels,
+                    self.plh['attribute_labels'])
+
+                attribute_accuracy_summary = tf.scalar_summary(
+                    "accuracy_attribute", 
+                    self.attribute_accuracy)
             
     def add_answer_accuracy_computation(self, scores):
         with tf.variable_scope('answer_accuracy'):
@@ -596,6 +597,7 @@ def create_scratch_initializer(graph, sess):
     return initializer()
 
 def create_vqa_batch_generator():
+    
     data_mgr = vqa_data.data(
         constants.vqa_train_resnet_feat_dir,
         constants.vqa_train_anno,
@@ -617,11 +619,15 @@ def create_vqa_batch_generator():
         num_train_subset_questions, 
         constants.answer_num_epochs, 
         0)
-    
-    batch_generator = tftools.data.async_batch_generator(
+
+    batch_generator = tftools.data.batch_generator(
         data_mgr, 
-        index_generator, 
-        constants.answer_queue_size)
+        index_generator)
+
+    # batch_generator = tftools.data.async_batch_generator(
+    #     data_mgr, 
+    #     index_generator, 
+    #     constants.answer_queue_size)
     
     return batch_generator
 
@@ -647,7 +653,7 @@ def create_vgenome_batch_generator():
         num_train_subset_regions,
         constants.region_num_epochs, 
         0)
-    
+
     batch_generator = tftools.data.async_batch_generator(
         data_mgr, 
         index_generator, 
@@ -895,11 +901,22 @@ def train(
                 zip(vars_to_eval_names, eval_vars)}
             logger.log(iter, False, eval_vars_dict)
             iter+=1
+<<<<<<< HEAD
+            
+            if iter%8000==0 and iter!=0:
+                gc.collect()
             
+=======
+
+            if iter%8000==0 and iter!=0:
+                gc.collect()
+
+>>>>>>> adfc0ab... make ans eval and train compatible with each other
         logger.log(iter-1, True, eval_vars_dict)
 
 
 if __name__=='__main__':
+   
     print 'Creating batch generator...'
     batch_generator = create_batch_generator()
 
@@ -921,11 +938,13 @@ if __name__=='__main__':
         resnet_feat_dim=constants.resnet_feat_dim,
         training=True)
 
+
     print 'Attaching optimizer...'
     optimizer = attach_optimizer(
         graph, 
         constants.answer_lr)
 
+
     print 'Starting a session...'
     config = tf.ConfigProto()
     config.gpu_options.allow_growth = True
diff --git a/constants_vision_gpu_1.py b/constants_vision_gpu_1.py
index dd75d168d035dbdc01d161253f0a619cd04dcaf3..24f562690923d7a2e5d1da3f941f025e3fdf8c7e 100644
--- a/constants_vision_gpu_1.py
+++ b/constants_vision_gpu_1.py
@@ -120,9 +120,16 @@ region_model = os.path.join(
 region_fine_tune_from_iter = 3000
 region_fine_tune_from = region_model + '-' + str(region_fine_tune_from_iter)
 
+# Object Attribute Model Selection
+region_start_model = 8000
+region_step_size = 8000
+region_model_accuracies_txt = os.path.join(
+    region_output_dir,
+    'model_accuracies.txt')
+
 # Object Attribute Classifier Evaluation Params
-region_eval_on = 'train_held_out' # One of {'train_subset','train_held_out','test'}
-region_model_to_eval = region_model + '-' + '77500'
+region_eval_on = 'test' # One of {'train_subset','train_held_out','test'}
+region_model_to_eval = region_model + '-' + '72000'
 
 region_attribute_scores_dirname = os.path.join(
     region_output_dir,
@@ -214,7 +221,7 @@ answer_fine_tune_from = answer_model + '-' + str(answer_fine_tune_from_iter)
 
 # Answer eval params
 answer_eval_on = 'val'
-answer_model_to_eval = answer_model + '-55000'
+answer_model_to_eval = answer_model + '-72000'
 
 answer_eval_data_json = os.path.join(
     answer_output_dir,
diff --git a/data/cropped_regions_cached_features.py b/data/cropped_regions_cached_features.py
index c0f72b1a584243b3b20cfa6eb24099c15a5472b4..8adf16dcfb1d21447e0b8623c158f554b7804e2a 100644
--- a/data/cropped_regions_cached_features.py
+++ b/data/cropped_regions_cached_features.py
@@ -4,7 +4,7 @@ import ujson
 import os
 import pdb
 import time
-import threading
+#import threading
 
 import tftools.data 
 import image_io
@@ -90,8 +90,9 @@ class data():
             batch_list[worker_id] = batch
 
         except Exception, e:
-            print 'Error in thread {}: {}'.format(
-                threading.current_thread().name, str(e))
+            print 'Error while reading image: {}'.format(str(e))
+            # print 'Error in thread {}: {}'.format(
+            #     threading.current_thread().name, str(e))
 
 
     def get_parallel(self, samples):
diff --git a/data/vqa_cached_features.py b/data/vqa_cached_features.py
index ac04cefef38bd03abaa6f207d379c5621096a689..681356f54689c100f6214eae9ca3506c0a02497a 100644
--- a/data/vqa_cached_features.py
+++ b/data/vqa_cached_features.py
@@ -5,7 +5,7 @@ import re
 import pdb
 import time
 import nltk
-import threading
+#import threading
 
 import tftools.data 
 import image_io
@@ -210,8 +210,9 @@ class data():
             batch_list[worker_id] = region_image
 
         except Exception, e:
-            print 'Error in thread {}: {}'.format(
-                threading.current_thread().name, str(e))
+            print 'Error while reading image: {}'.format(str(e))
+            # print 'Error in thread {}: {}'.format(
+            #     threading.current_thread().name, str(e))
 
     def get_region_image(self, sample, region_number):
         question_id = self.sample_to_question_dict[sample]
diff --git a/object_attribute_classifier_cached_features/select_best_model.py b/object_attribute_classifier_cached_features/select_best_model.py
index 198ac93ed468b0413315875de210ea068d77f2ea..2aa5426371a4ac8bfcb75f2f6bfe9b30fe74ba57 100644
--- a/object_attribute_classifier_cached_features/select_best_model.py
+++ b/object_attribute_classifier_cached_features/select_best_model.py
@@ -316,7 +316,8 @@ def model_path_generator(models_dir, start_model, step_size):
 
 if __name__=='__main__':
     model_paths = model_path_generator(
-        constants.region_output_dir,
+        constants.models_dir,
+        #constants.region_output_dir,
         constants.region_start_model,
         constants.region_step_size)
 
diff --git a/split_genome.py b/split_genome.py
index d118ed2ea913f25ad3ac8ba002e4f27b7efc7a3e..d2fc0dd6c255e0ebff139758ce1d21b5aff91251 100644
--- a/split_genome.py
+++ b/split_genome.py
@@ -151,7 +151,8 @@ if __name__=='__main__':
     with open(vqa_train2014_held_out_qids_json, 'r') as file:
         vqa_train2014_held_out_qids = ujson.load(file)
 
-    
+
+    # Find all image ids in vqa train2014
     vqa_train2014_held_out_image_ids = set()
     for qid in list(vqa_train2014_held_out_qids):
         image_id = str(vqa_train2014_anno[qid]['image_id'])