diff --git a/.gitignore b/.gitignore
index 6fbbab773b43a701fa5fea0ef8d7ed55433779bd..f2d66e7cb279765722a42a99999a1ccd853af638 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,4 @@
 *~
+shapes_dataset/images_old
 shapes_dataset/images
 shapes_dataset/*.json
\ No newline at end of file
diff --git a/classifiers/answer_classifier/ans_data_io_helper.py b/classifiers/answer_classifier/ans_data_io_helper.py
index 50317d1ac7cea4161ae88d2e6bc57c2ada1c0372..a5f0c7c53e6fe32ca483f72bb340d31a3f70c263 100644
--- a/classifiers/answer_classifier/ans_data_io_helper.py
+++ b/classifiers/answer_classifier/ans_data_io_helper.py
@@ -2,6 +2,7 @@ import json
 import sys
 import os
 import time
+import random
 import matplotlib.pyplot as plt
 import matplotlib.image as mpimg
 import numpy as np
@@ -99,77 +100,100 @@ def save_regions(image_dir, out_dir, qa_dict, region_anno_dict, start_id,
         
             image_done[image_id-1] = True
 
+class batch_creator():
+    def __init__(self, start_id, end_id):
+        self.start_id = start_id
+        self.end_id = end_id
+        self.id_list = range(start_id, end_id+1)
 
-def ans_mini_batch_loader(qa_dict, region_anno_dict, ans_dict, vocab, 
-                          image_dir, mean_image, start_index, batch_size, 
-                          img_height=100, img_width=100, channels = 3):
-    
-    ans_labels = np.zeros(shape=[batch_size, len(ans_dict)])
-    for i in xrange(start_index, start_index + batch_size):
-        answer = qa_dict[i].answer
-        ans_labels[i-start_index, ans_dict[answer]] = 1
-        
-    # number of regions in the batch
-    count = batch_size*num_proposals;
-    region_shape = np.array([img_height/3, img_width/3], np.int32)
-    region_images = np.zeros(shape=[count, region_shape[0], 
-                                    region_shape[1], channels])
-    region_score = np.zeros(shape=[1,count])
-    partition = np.zeros(shape=[count])
-    question_encodings = np.zeros(shape=[count, len(vocab)])
+    def shuffle_ids(self):
+        random.shuffle(self.id_list)
 
-    for i in xrange(start_index, start_index + batch_size):
-        
-        image_id = qa_dict[i].image_id
-        question = qa_dict[i].question
-        answer = qa_dict[i].answer
-        gt_regions_for_image = region_anno_dict[image_id]
-        start1 = time.time()
-        regions = region_proposer.rank_regions(None, question,
-                                               region_coords, region_coords_,
-                                               gt_regions_for_image,
-                                               False)
-
-        end1 = time.time()
-#        print('Ranking Region: ' + str(end1-start1))
-        question_encoding_tmp = np.zeros(shape=[1, len(vocab)])
-        
-        for word in question[0:-1].split():
+    def qa_index(self, start_index, batch_size):
+        return self.id_list[start_index - self.start_id 
+                            : start_index - self.start_id + batch_size]
+
+    def ans_mini_batch_loader(self, qa_dict, region_anno_dict, ans_dict, vocab, 
+                              image_dir, mean_image, start_index, batch_size, 
+                              img_height=100, img_width=100, channels = 3):
+
+        q_ids = self.qa_index(start_index, batch_size)
+
+        ans_labels = np.zeros(shape=[batch_size, len(ans_dict)])
+        for i in xrange(batch_size):
+            q_id = q_ids[i]
+            answer = qa_dict[q_id].answer
+            ans_labels[i, ans_dict[answer]] = 1
+            
+        # number of regions in the batch
+        count = batch_size*num_proposals;
+        region_shape = np.array([img_height/3, img_width/3], np.int32)
+        region_images = np.zeros(shape=[count, region_shape[0], 
+                                        region_shape[1], channels])
+        region_score = np.zeros(shape=[1,count])
+        partition = np.zeros(shape=[count])
+        question_encodings = np.zeros(shape=[count, len(vocab)])
             
+        for i in xrange(batch_size):
+            q_id = q_ids[i]
+            image_id = qa_dict[q_id].image_id
+            question = qa_dict[q_id].question
+            answer = qa_dict[q_id].answer
+            gt_regions_for_image = region_anno_dict[image_id]
+            regions = region_proposer.rank_regions(None, question,
+                                                   region_coords, 
+                                                   region_coords_,
+                                                   gt_regions_for_image,
+                                                   False)
+            
+            question_encoding_tmp = np.zeros(shape=[1, len(vocab)])
+            for word in question[0:-1].split():
                 if word not in vocab:
                     word = 'unk'
                 question_encoding_tmp[0, vocab[word]] += 1 
-        question_len = np.sum(question_encoding_tmp)
-        # print(question[0:-1].split())
-        # print(question_len)
-        # print(question_encoding_tmp)
-        # print(vocab)
-        assert (not question_len==0)
-
-        question_encoding_tmp /= question_len
+                    
+            question_len = np.sum(question_encoding_tmp)
+            assert (not question_len==0)
+            question_encoding_tmp /= question_len
         
-        for j in xrange(num_proposals):
-            counter = j + (i-start_index)*num_proposals
-            
-            proposal = regions[j]
-
-            start2 = time.time()
-            resized_region = mpimg.imread(os.path.join(image_dir,
-                                                       '{}_{}.png'
-                                                       .format(image_id,j)))
-            end2 = time.time()
-#            print('Reading Region: ' + str(end2-start2))
-            region_images[counter,:,:,:] = (resized_region / 254.0) \
-                                           - mean_image
-            region_score[0,counter] = proposal.score
-            partition[counter] = i-start_index
-            
-            question_encodings[counter,:] = question_encoding_tmp
-
-        score_start_id = (i-start_index)*num_proposals
-        region_score[0, score_start_id:score_start_id+num_proposals] /= \
-            np.sum(region_score[0,score_start_id:score_start_id+num_proposals])
-    return region_images, ans_labels, question_encodings, region_score, partition
+            for j in xrange(num_proposals):
+                counter = j + i*num_proposals
+                proposal = regions[j]
+                resized_region = mpimg.imread(os.path.join(image_dir, 
+                                             '{}_{}.png'.format(image_id,j)))
+                region_images[counter,:,:,:] = (resized_region / 254.0) \
+                                               - mean_image
+                region_score[0,counter] = proposal.score
+                partition[counter] = i
+                
+                question_encodings[counter,:] = question_encoding_tmp
+                
+            score_start_id = i*num_proposals
+            region_score[0, score_start_id:score_start_id+num_proposals] /=\
+                np.sum(region_score[0,score_start_id
+                                        : score_start_id+num_proposals])
+        return region_images, ans_labels, question_encodings, \
+            region_score, partition
+
+
+class html_ans_table_writer():
+    def __init__(self, filename):
+        self.filename = filename
+        self.html_file = open(self.filename, 'w')
+        self.html_file.write("""<!DOCTYPE html>\n<html>\n<body>\n<table border="1" style="width:100%"> \n""")
+    
+    def add_element(self, col_dict):
+        self.html_file.write('    <tr>\n')
+        for key in range(len(col_dict)):
+            self.html_file.write("""    <td>{}</td>\n""".format(col_dict[key]))
+        self.html_file.write('    </tr>\n')
+
+    def image_tag(self, image_path, height, width):
+        return """<img src="{}" alt="IMAGE NOT FOUND!" height={} width={}>""".format(image_path,height,width)
+        
+    def close_file(self):
+        self.html_file.write('</table>\n</body>\n</html>')
+        self.html_file.close()
 
     
 if __name__=='__main__':
diff --git a/classifiers/answer_classifier/ans_data_io_helper.pyc b/classifiers/answer_classifier/ans_data_io_helper.pyc
index a82756a940860691e4e9f2f7fbb6be98c2d944f8..31700abb56a4f4321672b3149d430c2bb7291e78 100644
Binary files a/classifiers/answer_classifier/ans_data_io_helper.pyc and b/classifiers/answer_classifier/ans_data_io_helper.pyc differ
diff --git a/classifiers/answer_classifier/eval_ans_classifier.py b/classifiers/answer_classifier/eval_ans_classifier.py
index 35eb6578875181654e45d90bf3b95cf67062c824..d6b7eb35216a979615e97df910bbd23652cc5f4d 100644
--- a/classifiers/answer_classifier/eval_ans_classifier.py
+++ b/classifiers/answer_classifier/eval_ans_classifier.py
@@ -12,35 +12,33 @@ import plot_helper as plotter
 import ans_data_io_helper as ans_io_helper
 import region_ranker.perfect_ranker as region_proposer 
 import train_ans_classifier as ans_trainer
+from PIL import Image, ImageDraw
 
 def get_pred(y, qa_anno_dict, region_anno_dict, ans_vocab, vocab,
              image_dir, mean_image, start_index, val_set_size, batch_size,
-             placeholders, img_height=100, img_width=100):
+             placeholders, img_height, img_width, batch_creator):
 
     inv_ans_vocab = {v: k for k, v in ans_vocab.items()}
     pred_list = []
     correct = 0
     max_iter = int(math.ceil(val_set_size*1.0/batch_size))
-#    print ([val_set_size, batch_size])
-#    print('max_iter: ' + str(max_iter))
     batch_size_tmp = batch_size
     for i in xrange(max_iter):
         if i==(max_iter-1):
             batch_size_tmp = val_set_size - i*batch_size
+
         print('Iter: ' + str(i+1) + '/' + str(max_iter))
-#        print batch_size_tmp
+
         region_images, ans_labels, questions, \
-        region_score, partition= \
-            ans_io_helper.ans_mini_batch_loader(qa_anno_dict, 
-                                                region_anno_dict, 
-                                                ans_vocab, vocab, 
-                                                image_dir, mean_image, 
-                                                start_index+i*batch_size, 
-                                                batch_size_tmp, 
-                                                img_height, img_width, 3)
+        region_score, partition = batch_creator \
+            .ans_mini_batch_loader(qa_anno_dict, 
+                                   region_anno_dict, 
+                                   ans_vocab, vocab, 
+                                   image_dir, mean_image, 
+                                   start_index+i*batch_size, 
+                                   batch_size_tmp, 
+                                   img_height, img_width, 3)
             
-        # print [start_index+i*batch_size, 
-        #        start_index+i*batch_size + batch_size_tmp -1]
         if i==max_iter-1:
                                     
             residual_batch_size = batch_size - batch_size_tmp
@@ -63,10 +61,6 @@ def get_pred(y, qa_anno_dict, region_anno_dict, ans_vocab, vocab,
                                         axis=0)
             region_score = np.concatenate((region_score, residual_region_score),
                                           axis=1)
-            # print region_images.shape
-            # print questions.shape
-            # print ans_labels.shape
-            # print region_score.shape
 
         feed_dict = {
             placeholders[0] : region_images, 
@@ -82,8 +76,6 @@ def get_pred(y, qa_anno_dict, region_anno_dict, ans_vocab, vocab,
                 'question_id' : start_index+i*batch_size+j,
                 'answer' : inv_ans_vocab[ans_ids[j]]
             }]
-            # print qa_anno_dict[start_index+i*batch_size+j].question
-            # print inv_ans_vocab[ans_ids[j]]
 
     return pred_list
 
@@ -143,10 +135,15 @@ def eval(eval_params):
 
     placeholders = [image_regions, questions, keep_prob, y, region_score]
 
+    # Batch creator
+    test_batch_creator = ans_io_helper.batch_creator(test_start_id,
+                                                     test_start_id 
+                                                     + test_set_size - 1)
     # Get predictions
-    pred_dict =get_pred(y_avg, qa_anno_dict, region_anno_dict, ans_vocab, vocab,
+    pred_dict = get_pred(y_avg, qa_anno_dict, region_anno_dict, ans_vocab, vocab,
                         image_regions_dir, mean_image, test_start_id, 
-                        test_set_size, batch_size, placeholders, 75, 75)
+                        test_set_size, batch_size, placeholders, 75, 75,
+                        test_batch_creator)
 
     json_filename = os.path.join(outdir, 'predicted_ans_' + \
                                  eval_params['mode'] + '.json')
@@ -154,12 +151,89 @@ def eval(eval_params):
         json.dump(pred_dict, json_file)
 
     
+def create_html_file(outdir, test_anno_filename, regions_anno_filename,
+                     pred_json_filename, image_dir):
+    qa_dict = ans_io_helper.parse_qa_anno(test_anno_filename)
+    region_anno_dict = region_proposer.parse_region_anno(regions_anno_filename)
+    ans_vocab, inv_ans_vocab = ans_io_helper.create_ans_dict()
+
+    with open(pred_json_filename,'r') as json_file:
+        raw_data = json.load(json_file)
+    
+    # Create director for storing images with region boxes
+    images_bbox_dir = os.path.join(outdir, 'images_bbox')
+    if not os.path.exists(images_bbox_dir):
+        os.mkdir(images_bbox_dir)
+    
+    col_dict = {
+        0 : 'Question_Id',
+        1 : 'Question',
+        2 : 'Answer (GT)',
+        3 : 'Answer (Pred)',
+        4 : 'Image',
+    }
+    html_correct_filename = os.path.join(outdir, 'correct_ans.html')
+    html_writer_correct = ans_io_helper \
+        .html_ans_table_writer(html_correct_filename)
+    html_writer_correct.add_element(col_dict)
+
+    html_incorrect_filename = os.path.join(outdir, 'incorrect_ans.html')
+    html_writer_incorrect = ans_io_helper \
+        .html_ans_table_writer(html_incorrect_filename)
+    html_writer_incorrect.add_element(col_dict)
+
+    region_coords, region_coords_ = region_proposer.get_region_coords(300,300)
+
+    for entry in raw_data:
+        q_id = entry['question_id']
+        pred_ans = entry['answer']
+        gt_ans = qa_dict[q_id].answer
+        question = qa_dict[q_id].question
+        img_id = qa_dict[q_id].image_id
+        image_filename = os.path.join(image_dir, str(img_id) + '.jpg')
+        image = Image.open(image_filename)
+        
+        regions = region_proposer.rank_regions(image, question, region_coords, 
+                                               region_coords_, 
+                                               region_anno_dict[img_id],
+                                               crop=False)
+        dr = ImageDraw.Draw(image)
+        # print(q_id)
+        # print([regions[key].score for key in regions.keys()])
+        for i in xrange(ans_io_helper.num_proposals):
+            if not regions[i].score==0:
+                coord = regions[i].coord
+                x1 = coord[0]
+                y1 = coord[1]
+                x2 = coord[2]
+                y2 = coord[3]
+                dr.rectangle([(x1,y1),(x2,y2)], outline="red")
+        
+        image_bbox_filename = os.path.join(images_bbox_dir,str(q_id) + '.jpg')
+        image.save(image_bbox_filename)
+        image_bbox_filename_rel = 'images_bbox/' + str(q_id) + '.jpg' 
+        col_dict = {
+            0 : q_id,
+            1 : question,
+            2 : gt_ans,
+            3 : pred_ans,
+            4 : html_writer_correct.image_tag(image_bbox_filename_rel,50,50)
+        }
+        if pred_ans==gt_ans:
+            html_writer_correct.add_element(col_dict)
+        else:
+            html_writer_incorrect.add_element(col_dict)
+
+    html_writer_correct.close_file()
+    html_writer_incorrect.close_file()
+    
 
 if __name__=='__main__':
     ans_classifier_eval_params = {
         'train_json': '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/train_anno.json',
         'test_json': '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/test_anno.json',
         'regions_json': '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/regions_anno.json',
+        'image_dir': '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/images',
         'image_regions_dir': '/mnt/ramdisk/image_regions',
         'outdir': '/home/tanmay/Code/GenVQA/Exp_Results/Ans_Classifier',
         'model': '/home/tanmay/Code/GenVQA/Exp_Results/Ans_Classifier/ans_classifier_q_obj_atr-9',
@@ -169,4 +243,11 @@ if __name__=='__main__':
         'test_set_size': 160725-111352+1,
     }
 
-    eval(ans_classifier_eval_params)
+#    eval(ans_classifier_eval_params)
+    outdir = ans_classifier_eval_params['outdir']
+    test_anno_filename = ans_classifier_eval_params['test_json']
+    regions_anno_filename = ans_classifier_eval_params['regions_json']
+    pred_json_filename = os.path.join(outdir, 'predicted_ans_q.json')
+    image_dir = ans_classifier_eval_params['image_dir']
+    create_html_file(outdir, test_anno_filename, regions_anno_filename,
+                     pred_json_filename, image_dir)
diff --git a/classifiers/answer_classifier/train_ans_classifier.py b/classifiers/answer_classifier/train_ans_classifier.py
index 524d294c4184be372c38958552e87f4883ce163a..764d4bc1074c19d5679b1127b06f05f0d7f50861 100644
--- a/classifiers/answer_classifier/train_ans_classifier.py
+++ b/classifiers/answer_classifier/train_ans_classifier.py
@@ -22,20 +22,17 @@ val_set_size_small = 100
 
 def evaluate(accuracy, qa_anno_dict, region_anno_dict, ans_vocab, vocab,
              image_dir, mean_image, start_index, val_set_size, batch_size,
-             placeholders, img_height=100, img_width=100):
+             placeholders, img_height, img_width, batch_creator):
     
     correct = 0
     max_iter = int(math.floor(val_set_size/batch_size))
     for i in xrange(max_iter):
         region_images, ans_labels, questions, \
-        region_score, partition= \
-            ans_io_helper.ans_mini_batch_loader(qa_anno_dict, 
-                                                region_anno_dict, 
-                                                ans_vocab, vocab, 
-                                                image_dir, mean_image, 
-                                                start_index+i*batch_size, 
-                                                batch_size, 
-                                                img_height, img_width, 3)
+        region_score, partition= batch_creator \
+            .ans_mini_batch_loader(qa_anno_dict, region_anno_dict, 
+                                   ans_vocab, vocab, image_dir, mean_image, 
+                                   start_index+i*batch_size, batch_size, 
+                                   img_height, img_width, 3)
             
         feed_dict = {
             placeholders[0] : region_images, 
@@ -224,32 +221,43 @@ def train(train_params):
 
     placeholders = [image_regions, questions, keep_prob, y, region_score]
 
+
+    # Start Training
+    max_epoch = train_params['max_epoch']
+    max_iter = 5000
+    val_acc_array_epoch = np.zeros([max_epoch])
+    train_acc_array_epoch = np.zeros([max_epoch])
+
+    # Batch creators
+    train_batch_creator = ans_io_helper.batch_creator(1, max_iter*batch_size)
+    val_batch_creator = ans_io_helper.batch_creator(val_start_id, val_start_id 
+                                                    + val_set_size - 1)
+    val_small_batch_creator = ans_io_helper.batch_creator(val_start_id, 
+                                                          val_start_id + 
+                                                          val_set_size_small-1)
+
+    # Check accuracy of restored model
     if train_params['fine_tune']==True:
         restored_accuracy = evaluate(accuracy, qa_anno_dict, 
                                      region_anno_dict, ans_vocab, 
                                      vocab, image_regions_dir, 
                                      mean_image, val_start_id, 
                                      val_set_size, batch_size,
-                                     placeholders, 75, 75)
+                                     placeholders, 75, 75,
+                                     val_batch_creator)
         print('Accuracy of restored model: ' + str(restored_accuracy))
-
-    # Start Training
-    max_epoch = train_params['max_epoch']
-    max_iter = 5000
-    val_acc_array_epoch = np.zeros([max_epoch])
-    train_acc_array_epoch = np.zeros([max_epoch])
+    
     for epoch in range(start_epoch, max_epoch):
-        iter_ids = range(max_iter)
-        random.shuffle(iter_ids)
-        for i in iter_ids: #range(max_iter):
+        train_batch_creator.shuffle_ids()
+        for i in range(max_iter):
         
             train_region_images, train_ans_labels, train_questions, \
-            train_region_score, train_partition= \
-            ans_io_helper.ans_mini_batch_loader(qa_anno_dict, region_anno_dict, 
-                                                ans_vocab, vocab, 
-                                                image_regions_dir, mean_image, 
-                                                1+i*batch_size, batch_size, 
-                                                75, 75, 3)
+            train_region_score, train_partition= train_batch_creator \
+                .ans_mini_batch_loader(qa_anno_dict, region_anno_dict, 
+                                       ans_vocab, vocab, 
+                                       image_regions_dir, mean_image, 
+                                       1+i*batch_size, batch_size, 
+                                       75, 75, 3)
                         
             feed_dict_train = {
                 image_regions : train_region_images, 
@@ -258,7 +266,7 @@ def train(train_params):
                 y: train_ans_labels,        
                 region_score: train_region_score,
             }
-
+            
             if pretrained_vars_low_lr:
                 _, _, current_train_batch_acc, y_pred_eval, loss_eval = \
                     sess.run([train_step_low_lr, train_step_high_lr,
@@ -280,10 +288,10 @@ def train(train_params):
                                         region_anno_dict, ans_vocab, vocab,
                                         image_regions_dir, mean_image, 
                                         val_start_id, val_set_size_small,
-                                        batch_size, placeholders, 75, 75)
+                                        batch_size, placeholders, 75, 75,
+                                        val_small_batch_creator)
                 
-                print('Iter: ' + str(i+1) + ' Val Sm Acc: ' + str(val_accuracy) 
-                      + ' Loss: ' + str(loss_eval))
+                print('Iter: ' + str(i+1) + ' Val Sm Acc: ' + str(val_accuracy))
 
         train_acc_array_epoch[epoch] = train_acc_array_epoch[epoch] / max_iter
         val_acc_array_epoch[epoch] = evaluate(accuracy, qa_anno_dict, 
@@ -291,7 +299,8 @@ def train(train_params):
                                               vocab, image_regions_dir, 
                                               mean_image, val_start_id, 
                                               val_set_size, batch_size,
-                                              placeholders, 75, 75)
+                                              placeholders, 75, 75,
+                                              val_batch_creator)
 
         print('Val Acc: ' + str(val_acc_array_epoch[epoch]) + 
               ' Train Acc: ' + str(train_acc_array_epoch[epoch]))
diff --git a/classifiers/answer_classifier/train_ans_classifier.pyc b/classifiers/answer_classifier/train_ans_classifier.pyc
index 7a476b58d08ede1221bca8d1d9e83afa1f34f095..ee2dbe19a14f6d26908de083a00f417fc54d44d6 100644
Binary files a/classifiers/answer_classifier/train_ans_classifier.pyc and b/classifiers/answer_classifier/train_ans_classifier.pyc differ
diff --git a/classifiers/object_classifiers/#obj_data_io_helper.py# b/classifiers/object_classifiers/#obj_data_io_helper.py#
new file mode 100644
index 0000000000000000000000000000000000000000..caab72a60bd70f4235d6d7e08b8679472ddc94e1
--- /dev/null
+++ b/classifiers/object_classifiers/#obj_data_io_helper.py#
@@ -0,0 +1,84 @@
+#Embedded file name: /home/tanmay/Code/GenVQA/GenVQA/classifiers/object_classifiers/obj_data_io_helper.py
+import json
+import sys
+import os
+import matplotlib.pyplot as plt
+import matplotlib.image as mpimg
+import numpy as np
+import tensorflow as tf
+from scipy import misc
+
+def obj_mini_batch_loader(json_filename, image_dir, mean_image, start_index, batch_size, img_height = 100, img_width = 100, channels = 3):
+    with open(json_filename, 'r') as json_file:
+        json_data = json.load(json_file)
+        obj_images = np.empty(shape=[9 * batch_size,      img_height / 3,
+     img_width / 3,
+     channels])
+    obj_labels = np.zeros(shape=[9 * batch_size, 4])
+    for i in range(start_index, start_index + batch_size):
+        image_name = os.path.join(image_dir, str(i) + '.jpg')
+        image = misc.imresize(mpimg.imread(image_name), (img_height, img_width), interp='nearest')
+        crop_shape = np.array([image.shape[0], image.shape[1]]) / 3
+        selected_anno = [ q for q in json_data if q['image_id'] == i ]
+        grid_config = selected_anno[0]['config']
+        counter = 0
+        for grid_row in range(0, 3):
+            for grid_col in range(0, 3):
+                start_row = grid_row * crop_shape[0]
+                start_col = grid_col * crop_shape[1]
+                cropped_image = image[start_row:start_row + crop_shape[0], start_col:start_col + crop_shape[1], :]
+                if np.ndim(mean_image) == 0:
+                    obj_images[9 * (i - start_index) + counter, :, :, :] = cropped_image / 254.0
+                else:
+                    obj_images[9 * (i - start_index) + counter, :, :, :] = (cropped_image / 254.0) - mean_image
+                obj_labels[9 * (i - start_index) + counter, grid_config[6 * grid_row + 2 * grid_col]] = 1
+                counter = counter + 1
+
+    return (obj_images, obj_labels)
+
+
+def mean_image_batch(json_filename, image_dir, start_index, batch_size, img_height = 100, img_width = 100, channels = 3):
+    batch = obj_mini_batch_loader(json_filename, image_dir, np.empty([]), start_index, batch_size, img_height, img_width, channels)
+    mean_image = np.mean(batch[0], 0)
+    return mean_image
+
+
+def mean_image(json_filename, image_dir, num_images, batch_size, img_height = 100, img_width = 100, channels = 3):
+    max_iter = np.floor(num_images / batch_size)
+    mean_image = np.zeros([img_height / 3, img_width / 3, channels])
+    for i in range(max_iter.astype(np.int16)):
+        mean_image = mean_image + mean_image_batch(json_filename, image_dir, 1 + i * batch_size, batch_size, img_height, img_width, channels)
+
+    mean_image = mean_image / max_iter
+    tmp_mean_image = mean_image * 254
+    return mean_image
+
+
+class html_obj_table_writer:
+
+    def __init__(self, filename):
+        self.filename = filename
+        self.html_file = open(self.filename, 'w')
+        self.html_file.write('<!DOCTYPE html>\n<html>\n<body>\n<table border="1" style="width:100%"> \n')
+
+    def add_element(self, col_dict):
+        self.html_file.write('    <tr>\n')
+        for key in range(len(col_dict)):
+            self.html_file.write('    <td>{}</td>\n'.format(col_dict[key]))
+
+        self.html_file.write('    </tr>\n')
+
+    def image_tag(self, image_path, height, width):
+        return '<img src="{}" alt="IMAGE NOT FOUND!" height={} width={}>'.format(image_path, height, width)
+
+    def close_file(self):
+        self.html_file.write('</table>\n</body>\n</html>')
+        self.html_file.close()
+
+
+if __name__ == '__main__':
+    html_writer = html_obj_table_writer('/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier_v_1/trial.html')
+    col_dict = {0: 'sam',
+     1: html_writer.image_tag('something.png', 25, 25)}
+    html_writer.add_element(col_dict)
+    html_writer.close_file()
diff --git a/classifiers/region_ranker/perfect_ranker.py b/classifiers/region_ranker/perfect_ranker.py
index 9d44e14f6c3f988f2e4504ba9b0569554071339f..5ab52a0e7ce26c1a69ff199ed8fc813e09db43af 100644
--- a/classifiers/region_ranker/perfect_ranker.py
+++ b/classifiers/region_ranker/perfect_ranker.py
@@ -8,7 +8,6 @@ import matplotlib.image as mpimg
 from scipy import misc
 region = namedtuple('region','image score coord')
 
-
 def parse_region_anno(json_filename):
     with open(json_filename,'r') as json_file:
         raw_data = json.load(json_file)
@@ -19,7 +18,6 @@ def parse_region_anno(json_filename):
         
     return region_anno_dict
 
-
 def get_region_coords(img_height, img_width):
     region_coords_ = np.array([[   1,     1,   100,   100],
                              [  101,    1,  200,  100],
diff --git a/classifiers/region_ranker/perfect_ranker.pyc b/classifiers/region_ranker/perfect_ranker.pyc
index 6807d1410576a03a277f3b0cf87f780fe3fe750f..763268d210df514c8542e4b876ade8918f75b1ad 100644
Binary files a/classifiers/region_ranker/perfect_ranker.pyc and b/classifiers/region_ranker/perfect_ranker.pyc differ
diff --git a/classifiers/tf_graph_creation_helper.py b/classifiers/tf_graph_creation_helper.py
index db4917528627a299c27a2ef4fa297d7a359d5cf5..bab0ffde1222beb9bcf11af9534e2bb64308c0ce 100644
--- a/classifiers/tf_graph_creation_helper.py
+++ b/classifiers/tf_graph_creation_helper.py
@@ -282,8 +282,7 @@ def loss(y, y_pred):
     cross_entropy = -tf.reduce_sum(y * tf.log(y_pred_clipped), 
                                    name='cross_entropy')
     batch_size = tf.shape(y)
-    print 'Batch Size:' + str(tf.cast(batch_size[0],tf.float32))
-    return tf.truediv(cross_entropy, tf.cast(20,tf.float32))#batch_size[0],tf.float32))
+    return tf.truediv(cross_entropy, tf.cast(batch_size[0],tf.float32))
 
 
 if __name__ == '__main__':
diff --git a/classifiers/tf_graph_creation_helper.pyc b/classifiers/tf_graph_creation_helper.pyc
index 02d92a889c316357acd64391630440f3d31982be..367a32fb594471ea8c6b8dc46181d6581eb8ecc7 100644
Binary files a/classifiers/tf_graph_creation_helper.pyc and b/classifiers/tf_graph_creation_helper.pyc differ
diff --git a/classifiers/train_classifiers.py b/classifiers/train_classifiers.py
index 3bf5366d38b963e2b05f2dcabe0ab2ab75ec8797..fc3b0bb1c2830397552c05d3c325b52070db5465 100644
--- a/classifiers/train_classifiers.py
+++ b/classifiers/train_classifiers.py
@@ -73,8 +73,8 @@ ans_classifier_train_params = {
     'crop_n_save_regions': False,
     'max_epoch': 10,
     'batch_size': 20,
-    'fine_tune': False,
-    'start_model': 9,
+    'fine_tune': True,
+    'start_model': 3,
 }
 
 if __name__=='__main__':
diff --git a/object_classifiers/#eval_obj_classifier.py# b/object_classifiers/#eval_obj_classifier.py#
new file mode 100644
index 0000000000000000000000000000000000000000..28fa50eefc4a383fe4c3ff5c65c47c6f69069e74
--- /dev/null
+++ b/object_classifiers/#eval_obj_classifier.py#
@@ -0,0 +1,45 @@
+import sys
+import os
+import matplotlib.pyplot as plt
+import matplotlib.image as mpimg
+import numpy as np
+import tensorflow as tf
+import obj_data_io_helper as shape_data_loader 
+from train_obj_classifier import placeholder_inputs, comp_graph_v_1, evaluation
+
+sess=tf.InteractiveSession()
+
+x, y, keep_prob = placeholder_inputs()
+y_pred = comp_graph_v_1(x, y, keep_prob)
+
+accuracy = evaluation(y, y_pred)
+
+saver = tf.train.Saver()
+
+saver.restore(sess, '/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier_v_1/obj_classifier_9.ckpt')
+
+mean_image = np.load('/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier_v_1/mean_image.npy')
+
+# Test Data
+test_json_filename = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/test_anno.json'
+image_dir = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/images'
+
+# HTML file writer
+html_writer = shape_data_loader.html_obj_table_writer('/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier_v_1/trial.html')
+batch_size = 100
+correct = 0
+for i in range(1): #50
+    test_batch = shape_data_loader.obj_mini_batch_loader(test_json_filename, image_dir, mean_image, 10000+i*batch_size, batch_size, 75, 75)
+    feed_dict_test={x: test_batch[0], y: test_batch[1], keep_prob: 1.0}
+    result = sess.run([accuracy, y_pred], feed_dict=feed_dict_test)
+    correct = correct + result[0]*batch_size
+    print(correct)
+
+    for row in range(batch_size):
+        col_dict = {
+            0: test_batch[1][row,:],
+            1: y_pred[row, :]}
+        html_writer.add_element(col_dict)
+
+html_writer.close_file()
+print('Test Accuracy: {}'.format(correct/5000))