diff --git a/answer_classifier_cached_features/eval_test.py b/answer_classifier_cached_features/eval_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..a8e31cd0d04f1e40401baeb49dba7910401ad810
--- /dev/null
+++ b/answer_classifier_cached_features/eval_test.py
@@ -0,0 +1,331 @@
+# from word2vec.word_vector_management import word_vector_manager
+# import object_attribute_classifier.inference as feature_graph 
+# import region_relevance_network.inference as relevance_graph
+# import answer_classifier.inference as answer_graph
+from tftools import var_collect, placeholder_management
+import tftools.data
+import losses
+import constants
+import tftools.var_collect as var_collect
+import data.vqa_cached_features_test as vqa_data
+import answer_classifier_cached_features.train as train
+
+import numpy as np
+import pdb
+import ujson
+import tensorflow as tf
+
+
+def create_initializer(graph, sess, model):
+    class initializer():
+        def __init__(self):
+            with graph.tf_graph.as_default():
+                model_vars = graph.vars_to_save
+                model_restorer = tf.train.Saver(model_vars)
+                model_restorer.restore(sess, model)    
+                not_to_init = model_vars
+                all_vars = tf.all_variables()
+                other_vars = [var for var in all_vars
+                              if var not in not_to_init]
+                var_collect.print_var_list(
+                    other_vars,
+                    'vars_to_init')
+                self.init = tf.initialize_variables(other_vars)
+
+        def initialize(self):
+            sess.run(self.init)
+    
+    return initializer()
+
+def create_batch_generator(mode):
+    if mode=='val':
+        vqa_resnet_feat_dir = constants.vqa_val_resnet_feat_dir
+        vqa_anno = constants.vqa_val_anno
+        qids_json = constants.vqa_val_qids
+    elif mode=='test':
+        vqa_resnet_feat_dir = constants.vqa_test_resnet_feat_dir
+        vqa_anno = constants.vqa_test_anno
+        qids_json = constants.vqa_test_qids
+    elif mode=='testdev':
+        vqa_resnet_feat_dir = constants.vqa_test_resnet_feat_dir
+        vqa_anno = constants.vqa_testdev_anno
+        qids_json = constants.vqa_testdev_qids
+    else:
+        print "mode needs to be one of {'val','test','testdev'}, found " + mode
+    
+    data_mgr = vqa_data.data(
+        vqa_resnet_feat_dir,
+        vqa_anno,
+        qids_json,
+        constants.vocab_json,
+        constants.vqa_answer_vocab_json,
+        constants.object_labels_json,
+        constants.attribute_labels_json,
+        constants.image_size,
+        constants.num_region_proposals,
+        constants.num_negative_answers,
+        resnet_feat_dim=constants.resnet_feat_dim)
+
+    num_questions = len(data_mgr.qids)
+
+    index_generator = tftools.data.sequential(
+        constants.answer_batch_size, 
+        num_questions, 
+        1, 
+        0)
+    
+    batch_generator = tftools.data.async_batch_generator(
+        data_mgr, 
+        index_generator, 
+        constants.answer_queue_size)
+    
+    return batch_generator
+
+
+def create_feed_dict_creator(plh, num_neg_answers):
+    def feed_dict_creator(batch):
+        vqa_batch = batch
+        batch_size = len(vqa_batch['question'])
+        # Create vqa inputs
+        inputs = {
+            'region_feats': np.concatenate(vqa_batch['region_feats'], axis=0),
+            'positive_answer': vqa_batch['positive_answer'],
+        }
+        for i in xrange(4):
+            bin_name = 'bin_' + str(i)
+            inputs[bin_name] = [
+                vqa_batch['question'][j][bin_name] for j in xrange(batch_size)]
+        
+        for i in xrange(num_neg_answers):
+            answer_name = 'negative_answer_' + str(i)
+            inputs[answer_name] = [
+                vqa_batch['negative_answers'][j][i] for j in xrange(batch_size)]
+
+        inputs['positive_nouns'] = [
+            a + b for a, b in zip(
+                vqa_batch['question_nouns'],
+                vqa_batch['positive_answer_nouns'])]
+
+        inputs['positive_adjectives'] = [
+            a + b for a, b in zip(
+                vqa_batch['question_adjectives'],
+                vqa_batch['positive_answer_adjectives'])]
+
+        for i in xrange(num_neg_answers):
+            name = 'negative_nouns_' + str(i)
+            list_ith_negative_answer_nouns = [
+                vqa_batch['negative_answers_nouns'][j][i]
+                for j in xrange(batch_size)]
+            inputs[name] = [
+                a + b  for a, b in zip(
+                    vqa_batch['question_nouns'],
+                    list_ith_negative_answer_nouns)]
+            
+            name = 'negative_adjectives_' + str(i)
+            list_ith_negative_answer_adjectives = [
+                vqa_batch['negative_answers_adjectives'][j][i]
+                for j in xrange(batch_size)]
+            inputs[name] = [
+                a + b for a, b in zip(
+                    vqa_batch['question_adjectives'],
+                    list_ith_negative_answer_adjectives)]
+
+        inputs['yes_no_feat'] = vqa_batch['yes_no_feat']
+
+        inputs['keep_prob'] = 1.0
+
+        return plh.get_feed_dict(inputs)
+
+    return feed_dict_creator
+
+
+class eval_mgr():
+    def __init__(self, eval_data_json, results_json):
+        self.eval_data_json= eval_data_json
+        self.results_json = results_json
+        self.eval_data = dict()
+        self.correct = 0
+        self.total = 0
+        self.results = []
+        self.seen_qids = set()
+
+    def eval(self, iter, eval_vars_dict, batch):
+        batch_size = len(batch['question_unencoded'])
+        
+        for j in xrange(batch_size):
+            dict_entry = dict()
+            dict_entry['question'] = batch['question_unencoded'][j]
+            dict_entry['positive_answer'] = {
+                batch['positive_answer_unencoded'][j]: 
+                str(eval_vars_dict['answer_score_' + str(j)][0,0])}
+
+            dict_entry['negative_answers'] = dict()
+            for i in xrange(len(batch['negative_answers_unencoded'][j])):
+                answer = batch['negative_answers_unencoded'][j][i]
+                dict_entry['negative_answers'][answer] = \
+                    str(eval_vars_dict['answer_score_' + str(j)][0,i+1])
+            
+            dict_entry['relevance_scores'] = eval_vars_dict['relevance_prob_' + str(j)].tolist()
+
+            question_id = batch['question_id'][j]
+            pred_answer, pred_score = self.get_pred_answer(
+                [batch['positive_answer_unencoded'][j]] + \
+                batch['negative_answers_unencoded'][j],
+                eval_vars_dict['answer_score_' + str(j)][0,:].tolist()
+            )
+
+            result_entry = {
+                'question_id': int(question_id),
+                'answer': pred_answer
+            }
+            
+            if question_id not in self.seen_qids:
+                self.seen_qids.add(question_id)
+                self.results.append(result_entry)
+            else:
+                print 'Already evaluated on this sample'
+            
+            self.eval_data[str(question_id)] = dict_entry
+
+            # print dict_entry
+
+        self.total += batch_size
+        
+        self.correct += eval_vars_dict['accuracy']*batch_size
+
+        self.print_accuracy()
+
+
+        if iter%100==0:
+            self.write_data()
+
+    def get_pred_answer(self, answers, scores):
+        pred_answer = ''
+        pred_score = -1e5
+        for answer, score in zip(answers, scores):
+            if score > pred_score:
+                pred_score = score
+                pred_answer = answer
+
+        return pred_answer, pred_score
+
+    def is_correct(self, answer_scores):
+        max_id = np.argmax(answer_scores, 1)
+        if max_id[0]==0:
+            return True
+
+    def print_accuracy(self):
+        print 'Total: {}  Correct: {}  Accuracy: {}'.format(
+            self.total,
+            self.correct,
+            self.correct/float(self.total))
+
+    def write_data(self):
+        with open(self.eval_data_json, 'w') as file:
+            ujson.dump(self.eval_data, file, indent=4, sort_keys=True)
+
+        with open(self.results_json, 'w') as file:
+            ujson.dump(self.results, file, indent=4, sort_keys=True)
+        
+        
+def eval(
+        batch_generator, 
+        sess, 
+        initializer,
+        vars_to_eval_dict,
+        feed_dict_creator,
+        evaluator):
+
+    vars_to_eval_names = []
+    vars_to_eval = []
+    for var_name, var in vars_to_eval_dict.items():
+        vars_to_eval_names += [var_name]
+        vars_to_eval += [var]
+
+    with sess.as_default():
+        initializer.initialize()
+
+        iter = 0
+        for batch in batch_generator:
+            print '---'
+            print 'Iter: {}'.format(iter)
+            feed_dict = feed_dict_creator(batch)
+            eval_vars = sess.run(
+                vars_to_eval,
+                feed_dict = feed_dict)
+            eval_vars_dict = {
+                var_name: eval_var for var_name, eval_var in
+                zip(vars_to_eval_names, eval_vars)}
+            evaluator.eval(iter, eval_vars_dict, batch)
+            iter+=1
+        
+        evaluator.write_data()
+
+
+if __name__=='__main__':
+    print 'Creating batch generator...'
+    batch_generator = create_batch_generator(constants.answer_eval_on)
+
+    print 'Creating computation graph...'
+    graph = train.graph_creator(
+        constants.tb_log_dir,
+        constants.answer_batch_size,
+        constants.image_size,
+        constants.num_negative_answers,
+        constants.answer_regularization_coeff,
+        constants.answer_batch_size*constants.num_region_proposals,
+        0,
+        0,
+        0,
+        constants.answer_obj_atr_loss_wt,
+        constants.answer_ans_loss_wt,
+        constants.answer_mil_loss_wt,
+        resnet_feat_dim=constants.resnet_feat_dim,
+        training=False)
+
+    print 'Starting a session...'
+    config = tf.ConfigProto()
+    config.gpu_options.allow_growth = True
+    config.gpu_options.per_process_gpu_memory_fraction = 0.9
+    sess = tf.Session(config=config, graph=graph.tf_graph)
+
+    print 'Creating initializer...'
+    initializer = create_initializer(
+        graph, 
+        sess, 
+        constants.answer_model_to_eval)
+
+    print 'Creating feed dict creator...'
+    feed_dict_creator = create_feed_dict_creator(
+        graph.plh,
+        constants.num_negative_answers)
+
+    print 'Creating dict of vars to be evaluated...'
+    vars_to_eval_dict = {
+        'accuracy': graph.answer_accuracy,
+    }
+    for j in xrange(constants.answer_batch_size):
+        vars_to_eval_dict['answer_score_'+str(j)] = \
+            graph.answer_inference.answer_score[j]
+        vars_to_eval_dict['relevance_prob_'+str(j)] = \
+            graph.relevance_inference.answer_region_prob[j]
+
+    print 'Creating evaluation manager...'
+    evaluator = eval_mgr(
+        constants.answer_eval_data_json,
+        constants.answer_eval_results_json)
+
+    print 'Start training...'
+    eval(
+        batch_generator, 
+        sess, 
+        initializer,
+        vars_to_eval_dict,
+        feed_dict_creator,
+        evaluator)
+
+
+    
+
+            
+    
diff --git a/constants_crunchy.py b/constants_crunchy.py
index 7dee0b0f16bb83d23b83ed9926304dff450c8775..c24380a41322406369f12679d50adefee6f3a258 100644
--- a/constants_crunchy.py
+++ b/constants_crunchy.py
@@ -5,7 +5,7 @@ def mkdir_if_not_exists(dir_name):
     if not os.path.exists(dir_name):
         os.mkdir(dir_name)
         
-experiment_name = 'obj_atr_through_mil_det_scores'
+experiment_name = 'ans_through_obj_atr_det_debug_rel'
 
 ##########################################################################
 #                    Machine Specific Paths                              #
@@ -210,6 +210,22 @@ vqa_val_qids = os.path.join(
     vqa_basedir,
     'val_qids.json')
 
+vqa_test_resnet_feat_dir = os.path.join(
+    vqa_basedir,
+    'test2015_cropped_large_resnet_features')
+vqa_test_anno = os.path.join(
+    vqa_basedir,
+    'mscoco_test2015_annotations_with_parsed_questions.json')
+vqa_test_qids = os.path.join(
+    vqa_basedir,
+    'test_qids.json')
+vqa_testdev_anno = os.path.join(
+    vqa_basedir,
+    'mscoco_test-dev2015_annotations_with_parsed_questions.json')
+vqa_testdev_qids = os.path.join(
+    vqa_basedir,
+    'test-dev_qids.json')
+
 vqa_answer_vocab_json = os.path.join(
     vqa_basedir,
     'answer_vocab.json')
@@ -269,8 +285,8 @@ model_accuracies_txt = os.path.join(
     'model_accuracies.txt')
 
 # Answer eval params
-answer_eval_on = 'val'
-answer_model_to_eval = answer_model + '-1000'
+answer_eval_on = 'testdev'
+answer_model_to_eval = answer_model + '-43000'
 
 vqa_results_dir = os.path.join(
     answer_output_dir,
@@ -294,3 +310,11 @@ raw_vqa_val_ques_json = os.path.join(
 raw_vqa_val_anno_json = os.path.join(
     vqa_basedir,
     'mscoco_val2014_annotations.json')
+
+raw_vqa_test_ques_json = os.path.join(
+    vqa_basedir,
+    'MultipleChoice_mscoco_test2015_questions.json')
+
+raw_vqa_val_anno_json = os.path.join(
+    vqa_basedir,
+    'mscoco_test2015_annotations.json')
diff --git a/data/vqa_cached_features_test.py b/data/vqa_cached_features_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..93a4ac0686e230d6f99b6b544a65af7dbbc59bc0
--- /dev/null
+++ b/data/vqa_cached_features_test.py
@@ -0,0 +1,440 @@
+import numpy as np
+import ujson
+import os
+import re
+import pdb
+import time
+import nltk
+import threading
+
+import tftools.data 
+import image_io
+import constants
+
+import tensorflow as tf
+
+_unknown_token = constants.unknown_token
+
+def unwrap_self_get_single(arg, **kwarg):
+    return data.get_single(*arg, **kwarg)
+
+class data():
+    def __init__(self,
+                 feat_dir,
+                 anno_json,
+                 qids_json,
+                 vocab_json,
+                 ans_vocab_json,
+                 obj_labels_json,
+                 atr_labels_json,
+                 image_size,
+                 num_region_proposals,
+                 num_neg_answers,
+                 channels=3,
+                 mode='mcq',
+                 resnet_feat_dim=2048,
+                 mean_image_filename=None):
+        self.feat_dir = feat_dir
+        # data_split = re.split(
+        #     '_',
+        #     os.path.split(self.feat_dir)[1])[0]
+        # pdb.set_trace()
+        self.h = image_size[0]
+        self.w = image_size[1]
+        self.c = channels
+        self.mode = mode
+        self.resnet_feat_dim = 2048
+        self.num_region_proposals = num_region_proposals
+        self.num_neg_answers = num_neg_answers
+        self.anno = self.read_json_file(anno_json)
+        self.vocab = self.read_json_file(vocab_json)
+        self.ans_vocab = self.read_json_file(ans_vocab_json)
+        self.obj_labels = self.read_json_file(obj_labels_json)
+        self.atr_labels = self.read_json_file(atr_labels_json)
+        self.qids = self.read_json_file(qids_json)
+        self.inv_vocab = self.invert_label_dict(self.vocab)
+        self.inv_ans_vocab = self.invert_label_dict(self.ans_vocab)
+        self.num_questions = len(self.anno)
+        self.create_sample_to_question_dict()
+        self.lemmatizer = nltk.stem.WordNetLemmatizer()
+
+    def create_sample_to_question_dict(self):
+        self.sample_to_question_dict = \
+            {k: v for k, v in zip(xrange(len(self.qids)),
+                                  self.qids)}
+
+        # self.sample_to_question_dict = \
+        #     {k: v for k, v in zip(xrange(self.num_questions),
+        #                           self.anno.keys())}
+
+    def invert_label_dict(self, label_dict):
+        return {v: k for k, v in label_dict.items()}
+
+    def read_json_file(self, filename):
+        print 'Reading {} ...'.format(filename)
+        with open(filename, 'r') as file:
+            return ujson.load(file)
+
+    def get_single(self, sample, batch_list, worker_id):
+        batch = dict()
+
+        batch['region_feats'] = self.get_region_feats(sample)
+
+        question, nouns, adjectives, question_id, question_unencoded = \
+            self.get_question(sample)
+        batch['question_id'] = question_id
+
+        batch['question'] = question
+        batch['question_nouns'] = nouns
+        batch['question_adjectives'] = adjectives
+        batch['question_id'] = question_id
+        batch['question_unencoded'] = question_unencoded
+
+        positive_answer, nouns, adjectives, positive_answer_unencoded = \
+            self.get_positive_answer(
+                sample, self.mode)
+        batch['positive_answer'] = positive_answer
+        batch['positive_answer_nouns'] = nouns
+        batch['positive_answer_adjectives'] = adjectives
+        batch['positive_answer_unencoded'] = positive_answer_unencoded
+        
+        negative_answers, nouns, adjectives, negative_answers_unencoded = \
+            self.get_negative_answers(
+                sample, self.mode)
+        batch['negative_answers'] = negative_answers
+        batch['negative_answers_nouns'] = nouns
+        batch['negative_answers_adjectives'] = adjectives 
+        batch['negative_answers_unencoded'] = negative_answers_unencoded
+
+        batch['positive_nouns'] = batch['question_nouns'] \
+                                  + batch['positive_answer_nouns']
+        batch['positive_adjectives'] = batch['question_adjectives'] \
+                                       + batch['positive_answer_adjectives']
+
+        _, batch['positive_nouns_vec_enc'] = self.noun_to_obj_id(
+            batch['positive_nouns'],
+            positive_answer_unencoded)
+        _, batch['positive_adjectives_vec_enc'] = self.adj_to_atr_id(
+            batch['positive_adjectives'],
+            positive_answer_unencoded)
+
+        batch['yes_no_feat'] = self.is_yes_no(
+            positive_answer_unencoded,
+            negative_answers_unencoded)
+
+        batch_list[worker_id] = batch
+
+    def get_parallel(self, samples):
+        batch_list = [None]*len(samples)
+        worker_ids = range(len(samples))
+        workers = []
+        for count, sample in enumerate(samples):
+            self.get_single(sample, batch_list, worker_ids[count])
+        #     worker = threading.Thread(
+        #         target = self.get_single, 
+        #         args = (sample, batch_list, worker_ids[count]))
+        #     worker.setDaemon(True)
+        #     worker.start()
+        #     workers.append(worker)
+        
+        # for worker in workers:
+        #     worker.join()
+
+        batch_size = len(samples)
+        batch = dict()
+        for key in batch_list[0].keys():
+            batch[key] = []
+        
+        for single_batch in batch_list:
+            for key, value in single_batch.items():
+                batch[key].append(value)
+
+        return batch
+
+    def is_yes_no(
+            self, 
+            positive_answer_unencoded,
+            negative_answers_unencoded):
+
+        feat = np.zeros([self.num_neg_answers+1,2])
+
+        if 'yes' in positive_answer_unencoded:
+            feat[0,0] = 1.0
+        elif 'no' in positive_answer_unencoded:
+            feat[0,1] = 1.0
+
+        for i in xrange(1,self.num_neg_answers+1):
+            if 'yes' in negative_answers_unencoded[i-1]:
+                feat[i,0] = 1.0
+            elif 'no' in negative_answers_unencoded[i-1]:
+                feat[i,1] = 1.0
+
+        return feat
+
+    def noun_to_obj_id(self,list_of_noun_ids,pos_ans):
+        obj_ids = [None]*len(list_of_noun_ids)
+        vec_enc = np.zeros([1,constants.num_object_labels])
+        if pos_ans.lower() in set(['no','0','none','nil']):
+            return obj_ids, vec_enc
+
+        for i, id in enumerate(list_of_noun_ids):
+            if id in self.inv_vocab:
+                if self.inv_vocab[id] in self.obj_labels:
+                    obj_ids[i] = int(self.obj_labels[self.inv_vocab[id]])
+                    vec_enc[0,obj_ids[i]] = 1.0
+                else:
+                    obj_ids[i] = -1
+            else:
+                obj_ids[i] = -1
+                
+        
+        return obj_ids, vec_enc
+
+    def adj_to_atr_id(self,list_of_adj_ids,pos_ans):
+        atr_ids = [None]*len(list_of_adj_ids)
+        vec_enc = np.zeros([1,constants.num_attribute_labels])
+        if pos_ans.lower() in set(['no','0','none','nil']):
+            return atr_ids, vec_enc
+
+        for i, id in enumerate(list_of_adj_ids):
+            if id in self.inv_vocab:
+                if self.inv_vocab[id] in self.atr_labels:
+                    atr_ids[i] = int(self.atr_labels[self.inv_vocab[id]])
+                    vec_enc[0,atr_ids[i]] = 1.0
+            else:
+                atr_ids[i] = -1
+                
+        return atr_ids, vec_enc
+
+    
+    def get_region_feats(self, sample):
+        question_id = self.sample_to_question_dict[sample]
+        image_id = self.anno[str(question_id)]['image_id']
+        data_split = re.split(
+            '_',
+            os.path.split(self.feat_dir)[1])[0]
+
+        feat_path = os.path.join(
+            self.feat_dir,
+            'COCO_' + data_split + '_' + str(image_id).zfill(12) + '.npy')
+        return np.load(feat_path)
+
+    def get_single_image(self, sample, region_number, batch_list, worker_id):
+        try:
+            batch = dict()
+            question_id = self.sample_to_question_dict[sample]
+            region_image, read_success = self.get_region_image(
+                sample,
+                region_number)
+
+            if not read_success:
+                region_image = np.zeros(
+                    [self.h, self.w, self.c], np.float32)
+
+            batch_list[worker_id] = region_image
+
+        except Exception, e:
+            print 'Error in thread {}: {}'.format(
+                threading.current_thread().name, str(e))
+
+    def get_region_image(self, sample, region_number):
+        question_id = self.sample_to_question_dict[sample]
+        image_id = self.anno[str(question_id)]['image_id']
+        image_subdir = os.path.join(
+            self.image_dir,
+            'COCO_train2014_' + str(image_id).zfill(12))
+        
+        filename = os.path.join(image_subdir,
+                                str(region_number+1) + '.jpg')
+        read_success = True
+        try:
+            region_image = image_io.imread(filename)
+            region_image = region_image.astype(np.float32)
+        except:
+            # print 'Could not read image {}: Setting the image pixels to 0s'.format(
+            #     filename)
+            read_success = False
+            region_image = np.zeros([self.h, self.w, 3], dtype=np.float32)
+
+        return region_image, read_success
+    
+    def get_question(self, sample):
+        question_id = self.sample_to_question_dict[sample]
+        question_nouns = self.encode_sentence(
+            ' '.join(self.anno[question_id]['question_nouns']))
+        question_adjectives = self.encode_sentence(
+            ' '.join(self.anno[question_id]['question_adjectives']))
+        parsed_question = self.anno[question_id]['parsed_question']
+        unencoded_question = self.anno[question_id]['question']
+
+        encoded_parsed_question = dict()
+        for bin, words in parsed_question.items():
+            encoded_parsed_question[bin] = self.encode_sentence(words)
+        return encoded_parsed_question, question_nouns, question_adjectives, \
+            question_id, unencoded_question
+
+    def get_positive_answer(self, sample, mode='mcq'):
+        question_id = self.sample_to_question_dict[sample]
+        if mode=='mcq':
+            positive_answer = self.anno[question_id]['multiple_choice_answer'].lower()
+            popular_answer = positive_answer
+
+        else:
+            answers = self.anno[question_id]['answers']
+            answer_counts = dict()
+            for answer in answers:
+                answer_lower = answer['answer'].lower()
+                if answer not in answer_counts:
+                    answer_counts[answer_lower] = 1
+                else:
+                    answer_counts[answer_lower] += 1
+
+            popular_answer = ''
+            current_count = 0
+            for answer, count in answer_counts.items():
+                if count > current_count:
+                    popular_answer = answer
+                    current_count = count
+
+        nouns, adjectives = self.get_nouns_adjectives(popular_answer)
+        answer = self.encode_sentence(popular_answer)
+        return answer, nouns, adjectives, popular_answer
+
+    def get_negative_answers(self, sample, mode='mcq'):
+        question_id = self.sample_to_question_dict[sample]
+        positive_answers = self.anno[question_id]['multiple_choice_answer']
+        # for answer in self.anno[question_id]['answers']:
+        #     positive_answers.append(answer['answer'].lower())
+
+        if mode=='mcq':
+            multiple_choices = self.anno[question_id]['multiple_choices']
+            remaining_answers = [
+                ans.lower() for ans in multiple_choices if ans.lower() not in positive_answers]
+            sampled_negative_answers = remaining_answers
+        else:
+
+            remaining_answers = [
+                ans.lower() for ans in self.ans_vocab.keys() if ans.lower() not in positive_answers]
+            sampled_negative_answers = np.random.choice(
+                remaining_answers, 
+                size=self.num_neg_answers,
+                replace=False)
+
+        remainder = self.num_neg_answers-len(sampled_negative_answers)
+        for i in xrange(remainder):
+            sampled_negative_answers.append(constants.unknown_token)
+
+        encoded_answers = []                
+        encoded_nouns = []
+        encoded_adjectives = []
+        for answer in sampled_negative_answers:
+            nouns, adjectives = self.get_nouns_adjectives(answer)
+            encoded_nouns.append(nouns)
+            encoded_adjectives.append(adjectives)
+            encoded_answers.append(self.encode_sentence(answer))
+            
+        return encoded_answers, encoded_nouns, encoded_adjectives, sampled_negative_answers
+
+    def get_nouns_adjectives(self, sentence):
+        words = nltk.tokenize.word_tokenize(sentence)
+        nouns = []
+        adjectives = []
+        for word, pos_tag in nltk.pos_tag(words):
+            if pos_tag in ['NN', 'NNS', 'NNP', 'NNPS']:
+                nouns.append(self.lemmatizer.lemmatize(word.lower()))
+            elif pos_tag in ['JJ', 'JJR', 'JJS']:
+                adjectives.append(self.lemmatizer.lemmatize(word.lower()))
+        # print 'Sentence: {}'.format(sentence)
+        # print 'Nouns: {}'.format(nouns)
+        # print 'Adjectives: {}'.format(adjectives)
+        nouns = self.encode_sentence(' '.join(nouns))
+        adjectives = self.encode_sentence(' '.join(adjectives))
+        return nouns, adjectives
+
+    def encode_sentence(self, sentence):
+        # Split into words with only characters and numbers
+        words = re.split('\W+',sentence.lower())
+        
+        # Remove ''
+        words = [word for word in words if word!='']
+
+        # If no words are left put an unknown_token
+        if not words:
+            words = [constants.unknown_token]
+
+        encoded_sentence = []
+        for word in words:
+            if word not in self.vocab:
+                word = constants.unknown_token
+            encoded_sentence.append(int(self.vocab[word]))
+            
+        return encoded_sentence
+
+
+if __name__=='__main__':
+    data_mgr = data(
+        constants.vqa_train_resnet_feat_dir,
+        constants.vqa_train_anno,
+        constants.vocab_json,
+        constants.vqa_answer_vocab_json,
+        constants.image_size,
+        constants.num_region_proposals,
+        constants.num_negative_answers)
+
+    # for sample in xrange(10):
+    #     print sample
+    #     batch = data_mgr.get_parallel([sample])
+    #     pdb.set_trace()
+    batch = data_mgr.get_parallel(xrange(10))
+    pdb.set_trace()
+    
+    # print 'Number of object labels: {}'.format(data_mgr.num_object_labels)
+    # print 'Number of attribute labels: {}'.format(data_mgr.num_attribute_labels)
+    # print 'Number of regions: {}'.format(data_mgr.num_regions)
+
+    # #Test sample
+    # samples = [1, 2]
+    # sample = samples[0]
+    # region_id = data_mgr.sample_to_region_dict[sample]
+    # region = data_mgr.regions[region_id]
+    # attribute_encoding = data_mgr.get_attribute_label(sample)
+    # object_encoding = data_mgr.get_object_label(sample)
+    # region_image = data_mgr.get_region_image(sample)
+
+    # attributes = []
+    # for i in xrange(attribute_encoding.shape[1]):
+    #     if attribute_encoding[0,i] > 0 :
+    #         attributes.append(data_mgr.inv_attribute_labels_dict[i])
+
+    # objects = []
+    # for i in xrange(object_encoding.shape[1]):
+    #     if object_encoding[0,i] > 0 :
+    #         objects.append(data_mgr.inv_object_labels_dict[i])
+    
+    # print "Region: {}".format(region)
+    # print "Attributes: {}".format(", ".join(attributes))
+    # print "Objects: {}".format(", ".join(objects))
+
+    # batch_size = 200
+    # num_samples = 200
+    # num_epochs = 1
+    # offset = 0
+    # queue_size = 100
+
+    # index_generator = tftools.data.random(
+    #     batch_size, 
+    #     num_samples, 
+    #     num_epochs, 
+    #     offset)
+    
+    # batch_generator = tftools.data.async_batch_generator(
+    #     data_mgr, 
+    #     index_generator, 
+    #     queue_size)
+
+    # count = 0 
+    # for batch in batch_generator:
+    #     print 'Batch Number: {}'.format(count)
+    #     count += 1
+
+