diff --git a/color_classifiers/__init__.py b/color_classifiers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/color_classifiers/atr_data_io_helper.py b/color_classifiers/atr_data_io_helper.py
new file mode 100644
index 0000000000000000000000000000000000000000..67337b845d916812c3217dfde2ce0c5e18955864
--- /dev/null
+++ b/color_classifiers/atr_data_io_helper.py
@@ -0,0 +1,90 @@
+import json
+import sys
+import os
+import matplotlib.pyplot as plt
+import matplotlib.image as mpimg
+import numpy as np
+import tensorflow as tf
+from scipy import misc
+
+def atr_mini_batch_loader(json_filename, image_dir, mean_image, start_index, batch_size, img_height=100, img_width=100, channels=3):
+
+    with open(json_filename, 'r') as json_file: 
+        json_data = json.load(json_file)
+
+    atr_images = np.empty(shape=[9*batch_size, img_height/3, img_width/3, channels])
+    atr_labels = np.zeros(shape=[9*batch_size, 4])
+
+    for i in range(start_index, start_index + batch_size):
+        image_name = os.path.join(image_dir, str(i) + '.jpg')
+        image = misc.imresize(mpimg.imread(image_name),(img_height, img_width), interp='nearest')
+#        image.resize((img_height, img_width, 3))
+        crop_shape = np.array([image.shape[0], image.shape[1]])/3
+        selected_anno = [q for q in json_data if q['image_id']==i]
+        grid_config = selected_anno[0]['config']
+        
+        counter = 0;
+        for grid_row in range(0,3):
+            for grid_col in range(0,3):
+                start_row = grid_row*crop_shape[0]
+                start_col = grid_col*crop_shape[1]
+#                print([start_row, start_col])
+                cropped_image = image[start_row:start_row+crop_shape[0], start_col:start_col+crop_shape[1], :]
+                if np.ndim(mean_image)==0:
+                    atr_images[9*(i-start_index)+counter,:,:,:] = cropped_image/254.
+                else:
+                    atr_images[9*(i-start_index)+counter,:,:,:] = (cropped_image-mean_image)/254
+                atr_labels[9*(i-start_index)+counter, grid_config[6*grid_row+2*grid_col+1]] = 1
+                counter = counter + 1
+
+    # imgplot = plt.imshow(obj_images[0,:,:,:].astype(np.uint8))
+    # plt.show()
+    return (atr_images, atr_labels)
+
+def mean_image_batch(json_filename, image_dir, start_index, batch_size, img_height=100, img_width=100, channels=3):
+    batch = obj_mini_batch_loader(json_filename, image_dir, np.empty([]), start_index, batch_size, img_height, img_width, channels)
+    mean_image = np.mean(batch[0], 0)
+    return mean_image
+
+def mean_image(json_filename, image_dir, num_images, batch_size, img_height=100, img_width=100, channels=3):
+    max_iter = np.floor(num_images/batch_size)
+    mean_image = np.zeros([img_height/3, img_width/3, channels])
+    for i in range(max_iter.astype(np.int16)):
+        mean_image = mean_image + mean_image_batch(json_filename, image_dir, 1+i*batch_size, batch_size, img_height, img_width, channels)
+
+    mean_image = mean_image/max_iter
+    tmp_mean_image = mean_image*254
+    # imgplot = plt.imshow(tmp_mean_image.astype(np.uint8))
+    # plt.show()
+    return mean_image
+
+
+class html_atr_table_writer():
+    def __init__(self, filename):
+        self.filename = filename
+        self.html_file = open(self.filename, 'w')
+        self.html_file.write("""<!DOCTYPE html>\n<html>\n<body>\n<table border="1" style="width:100%"> \n""")
+    
+    def add_element(self, col_dict):
+        self.html_file.write('    <tr>\n')
+        for key in range(len(col_dict)):
+            self.html_file.write("""    <td>{}</td>\n""".format(col_dict[key]))
+        self.html_file.write('    </tr>\n')
+
+    def image_tag(self, image_path, height, width):
+        return """<img src="{}" alt="IMAGE NOT FOUND!" height={} width={}>""".format(image_path,height,width)
+        
+    def close_file(self):
+        self.html_file.write('</table>\n</body>\n</html>')
+        self.html_file.close()
+
+        
+    
+
+if __name__=="__main__":
+
+    html_writer = html_atr_table_writer('/home/tanmay/Code/GenVQA/Exp_Results/Atr_Classifier_v_1/trial.html')
+    col_dict={0: 'sam', 1: html_writer.image_tag('something.png',25,25)}
+    html_writer.add_element(col_dict)
+    html_writer.close_file()
+
diff --git a/color_classifiers/eval_atr_classifier.py b/color_classifiers/eval_atr_classifier.py
new file mode 100644
index 0000000000000000000000000000000000000000..2f4120283972d3bd15336d73282440b8f19b8cef
--- /dev/null
+++ b/color_classifiers/eval_atr_classifier.py
@@ -0,0 +1,69 @@
+import sys
+import os
+import matplotlib.pyplot as plt
+import matplotlib.image as mpimg
+import numpy as np
+from scipy import misc
+import tensorflow as tf
+import obj_data_io_helper as shape_data_loader 
+from train_obj_classifier import placeholder_inputs, comp_graph_v_1, evaluation
+
+sess=tf.InteractiveSession()
+
+x, y, keep_prob = placeholder_inputs()
+y_pred = comp_graph_v_1(x, y, keep_prob)
+
+accuracy = evaluation(y, y_pred)
+
+saver = tf.train.Saver()
+
+saver.restore(sess, '/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier_v_1/obj_classifier_9.ckpt')
+
+mean_image = np.load('/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier_v_1/mean_image.npy')
+
+# Test Data
+test_json_filename = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/test_anno.json'
+image_dir = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/images'
+
+# Base dir for html visualizer
+html_dir = '/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier_v_1/html'
+if not os.path.exists(html_dir):
+    os.mkdir(html_dir)
+
+# HTML file writer
+html_writer = shape_data_loader.html_obj_table_writer(os.path.join(html_dir,'index.html'))
+col_dict={
+    0: 'Grount Truth',
+    1: 'Prediction',
+    2: 'Image'}
+html_writer.add_element(col_dict)
+
+shape_dict = {
+    0: 'blank',
+    1: 'rectangle',
+    2: 'triangle',
+    3: 'circle'}
+
+batch_size = 100
+correct = 0
+for i in range(50): 
+    test_batch = shape_data_loader.obj_mini_batch_loader(test_json_filename, image_dir, mean_image, 10000+i*batch_size, batch_size, 75, 75)
+    feed_dict_test={x: test_batch[0], y: test_batch[1], keep_prob: 1.0}
+    result = sess.run([accuracy, y_pred], feed_dict=feed_dict_test)
+    correct = correct + result[0]*batch_size
+    print(correct)
+
+    for row in range(batch_size*9):
+        gt_id = np.argmax(test_batch[1][row,:])
+        pred_id = np.argmax(result[1][row, :])
+        if not gt_id==pred_id:
+            img_filename = os.path.join(html_dir,'{}_{}.png'.format(i,row))
+            misc.imsave(img_filename, test_batch[0][row,:,:,:])
+            col_dict = {
+                0: shape_dict[gt_id],
+                1: shape_dict[pred_id],
+                2: html_writer.image_tag('{}_{}.png'.format(i,row), 25, 25)}
+            html_writer.add_element(col_dict)
+
+html_writer.close_file()
+print('Test Accuracy: {}'.format(correct/5000))
diff --git a/color_classifiers/train_atr_classifier.py b/color_classifiers/train_atr_classifier.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d9c88f9eead9991f8194a7188cb8d54408f65db
--- /dev/null
+++ b/color_classifiers/train_atr_classifier.py
@@ -0,0 +1,197 @@
+import sys
+import os
+import matplotlib.pyplot as plt
+import matplotlib.image as mpimg
+import numpy as np
+import tensorflow as tf
+import atr_data_io_helper as atr_data_loader
+
+def plot_accuracy(xdata, ydata, xlim=None, ylim=None, savePath=None):
+    fig, ax = plt.subplots( nrows=1, ncols=1 )
+    ax.plot(xdata, ydata)
+    plt.xlabel('Iterations')
+    plt.ylabel('Accuracy')
+
+    if not xlim==None:
+        plt.xlim(xlim)
+
+    if not ylim==None:
+        plt.ylim(ylim)
+
+    if not savePath==None:
+        fig.savefig(savePath)
+
+
+    plt.close(fig)
+
+def plot_accuracies(xdata, ydata_train, ydata_val, xlim=None, ylim=None, savePath=None):
+    fig, ax = plt.subplots( nrows=1, ncols=1 )
+    ax.plot(xdata, ydata_train, xdata, ydata_val)
+    plt.xlabel('Epochs')
+    plt.ylabel('Accuracy')
+    plt.legend(['Train', 'Val'], loc='lower right')
+    if not xlim==None:
+        plt.xlim(xlim)
+
+    if not ylim==None:
+        plt.ylim(ylim)
+
+    if not savePath==None:
+        fig.savefig(savePath)
+
+
+    plt.close(fig)
+
+
+def weight_variable(shape):
+    initial = tf.truncated_normal(shape, stddev=0.1)
+    return tf.Variable(initial)
+
+def bias_variable(shape):
+    initial = tf.constant(0.1, shape=shape)
+    return tf.Variable(initial)
+
+def conv2d(x, W):
+    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
+
+def max_pool_2x2(x):
+    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
+
+def max_pool_4x4(x):
+    return tf.nn.max_pool(x, ksize=[1, 4, 4, 1], strides=[1, 4, 4, 1], padding='SAME')
+
+def placeholder_inputs():
+    # Specify placeholder_inputs
+    x = tf.placeholder(tf.float32, shape=[None, 25, 25, 3])
+    y = tf.placeholder(tf.float32, shape=[None, 4])
+    keep_prob = tf.placeholder(tf.float32)
+    return x, y, keep_prob
+
+def comp_graph_v_1(x, y, keep_prob):
+    # Specify computation graph
+    W_conv1 = weight_variable([5, 5, 3, 10])
+    b_conv1 = bias_variable([10])
+    
+    h_conv1 = tf.nn.relu(conv2d(x, W_conv1) + b_conv1)
+    
+    h_pool1 = max_pool_2x2(h_conv1)
+    #print(tf.Tensor.get_shape(h_pool1))
+    
+    W_fc1 = weight_variable([13*13*10, 4])
+    b_fc1 = bias_variable([4])
+    
+    h_pool1_flat = tf.reshape(h_pool1, [-1, 13*13*10])
+    h_pool1_flat_drop = tf.nn.dropout(h_pool1_flat, keep_prob)
+    
+    y_pred = tf.nn.softmax(tf.matmul(h_pool1_flat_drop,W_fc1) + b_fc1)
+    
+    return y_pred
+
+def comp_graph_v_2(x, y, keep_prob):
+    # Specify computation graph
+    W_conv1 = weight_variable([5, 5, 3, 10])
+    b_conv1 = bias_variable([10])
+    
+    h_conv1 = tf.nn.relu(conv2d(x, W_conv1) + b_conv1)
+    h_pool1 = max_pool_2x2(h_conv1)
+    h_conv1_drop = tf.nn.dropout(h_pool1, keep_prob)
+    
+    W_conv2 = weight_variable([5, 5, 10, 20])
+    b_conv2 = bias_variable([20])
+    
+    h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
+    h_pool2 = max_pool_2x2(h_conv2)
+    h_conv2_drop = tf.nn.dropout(h_pool2, keep_prob)
+
+    W_fc1 = weight_variable([7*7*20, 4])
+    b_fc1 = bias_variable([4])
+    
+    h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*20])
+    h_pool2_flat_drop = tf.nn.dropout(h_pool2_flat, keep_prob)
+    
+    y_pred = tf.nn.softmax(tf.matmul(h_pool2_flat_drop,W_fc1) + b_fc1)
+    
+    return y_pred
+
+def evaluation(y, y_pred):
+    # Evaluation function
+    correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_pred,1))
+    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
+    #tf.scalar_summary("accuracy", accuracy)
+
+    return accuracy
+
+
+def train():
+    # Start session
+    sess = tf.InteractiveSession()
+
+    x, y, keep_prob = placeholder_inputs()
+    y_pred = comp_graph_v_2(x, y, keep_prob)
+
+    # Specify loss
+    cross_entropy = -tf.reduce_sum(y*tf.log(y_pred))
+    
+    # Specify training method
+    train_step = tf.train.AdamOptimizer(0.001).minimize(cross_entropy)
+
+    # Evaluator
+    accuracy = evaluation(y, y_pred)
+    
+    # Merge summaries and write them to ~/Code/Tensorflow_Exp/logDir
+    merged = tf.merge_all_summaries()
+
+    # Output dir
+    outdir = '/home/tanmay/Code/GenVQA/Exp_Results/Atr_Classifier_v_1/'
+    if not os.path.exists(outdir):
+        os.mkdir(outdir)
+
+    # Training Data
+    img_width = 75
+    img_height = 75
+    train_json_filename = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/train_anno.json'
+    image_dir = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/images'
+    mean_image = atr_data_loader.mean_image(train_json_filename, image_dir, 1000, 100, img_height, img_width)
+    np.save(os.path.join(outdir, 'mean_image.npy'), mean_image)
+
+    # Val Data
+    val_batch = atr_data_loader.atr_mini_batch_loader(train_json_filename, image_dir, mean_image, 9501, 499, img_height, img_width)
+    feed_dict_val={x: val_batch[0], y: val_batch[1], keep_prob: 1.0}
+    
+    # Session Saver
+    saver = tf.train.Saver()
+
+    # Start Training
+    sess.run(tf.initialize_all_variables())
+    batch_size = 100
+    max_epoch = 10
+    max_iter = 95
+    val_acc_array_iter = np.empty([max_iter*max_epoch])
+    val_acc_array_epoch = np.zeros([max_epoch])
+    train_acc_array_epoch = np.zeros([max_epoch])
+    for epoch in range(max_epoch):
+        for i in range(max_iter):
+            train_batch = atr_data_loader.atr_mini_batch_loader(train_json_filename, image_dir, mean_image, 1+i*batch_size, batch_size, img_height, img_width)
+            feed_dict_train={x: train_batch[0], y: train_batch[1], keep_prob: 0.5}
+
+            _, current_train_batch_acc = sess.run([train_step, accuracy], feed_dict=feed_dict_train)
+
+            train_acc_array_epoch[epoch] =  train_acc_array_epoch[epoch] + current_train_batch_acc
+            val_acc_array_iter[i+epoch*max_iter] = accuracy.eval(feed_dict_val)
+            print('Step: {}  Val Accuracy: {}'.format(i+1+epoch*max_iter, val_acc_array_iter[i+epoch*max_iter]))
+            plot_accuracy(np.arange(0,i+1+epoch*max_iter)+1, val_acc_array_iter[0:i+1+epoch*max_iter], xlim=[1, max_epoch*max_iter], ylim=[0, 1.], savePath=os.path.join(outdir,'valAcc_vs_iter.pdf'))
+            
+            
+        train_acc_array_epoch[epoch] = train_acc_array_epoch[epoch] / max_iter 
+        val_acc_array_epoch[epoch] = val_acc_array_iter[i+epoch*max_iter]
+        
+        plot_accuracies(xdata=np.arange(0,epoch+1)+1, ydata_train=train_acc_array_epoch[0:epoch+1], ydata_val=val_acc_array_epoch[0:epoch+1], xlim=[1, max_epoch], ylim=[0, 1.], savePath=os.path.join(outdir,'acc_vs_epoch.pdf'))
+
+        save_path = saver.save(sess, os.path.join(outdir,'obj_classifier_{}.ckpt'.format(epoch)))
+            
+    
+    sess.close()
+
+if __name__=='__main__':
+    train()
+
diff --git a/object_classifiers/eval_obj_classifier.py b/object_classifiers/eval_obj_classifier.py
index d9dd4e1ee72649fbfc4dc7d56414be006e953ec8..9853116cdccde23b3e043b83a53044c64d7ae585 100644
--- a/object_classifiers/eval_obj_classifier.py
+++ b/object_classifiers/eval_obj_classifier.py
@@ -3,34 +3,67 @@ import os
 import matplotlib.pyplot as plt
 import matplotlib.image as mpimg
 import numpy as np
+from scipy import misc
 import tensorflow as tf
-import obj_data_io_helper as shape_data_loader
-from train_obj_classifier import placeholder_inputs, comp_graph, evaluation
+import obj_data_io_helper as shape_data_loader 
+from train_obj_classifier import placeholder_inputs, comp_graph_v_1, comp_graph_v_2, evaluation
 
 sess=tf.InteractiveSession()
 
 x, y, keep_prob = placeholder_inputs()
-y_pred = comp_graph(x, y, keep_prob)
+y_pred = comp_graph_v_2(x, y, keep_prob)
 
 accuracy = evaluation(y, y_pred)
 
 saver = tf.train.Saver()
 
-saver.restore(sess, '/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier/obj_classifier_5.ckpt')
+saver.restore(sess, '/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier_v_2/obj_classifier_1.ckpt')
 
-mean_image = np.load('/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier/mean_image.npy')
+mean_image = np.load('/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier_v_2/mean_image.npy')
 
 # Test Data
 test_json_filename = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/test_anno.json'
 image_dir = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/images'
 
+# Base dir for html visualizer
+html_dir = '/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier_v_2/html'
+if not os.path.exists(html_dir):
+    os.mkdir(html_dir)
+
+# HTML file writer
+html_writer = shape_data_loader.html_obj_table_writer(os.path.join(html_dir,'index.html'))
+col_dict={
+    0: 'Grount Truth',
+    1: 'Prediction',
+    2: 'Image'}
+html_writer.add_element(col_dict)
+
+shape_dict = {
+    0: 'blank',
+    1: 'rectangle',
+    2: 'triangle',
+    3: 'circle'}
+
 batch_size = 100
 correct = 0
-for i in range(50):
-    test_batch = shape_data_loader.obj_mini_batch_loader(test_json_filename, image_dir, mean_image, 10000+i*batch_size, batch_size)
-
+for i in range(50): 
+    test_batch = shape_data_loader.obj_mini_batch_loader(test_json_filename, image_dir, mean_image, 10000+i*batch_size, batch_size, 75, 75)
     feed_dict_test={x: test_batch[0], y: test_batch[1], keep_prob: 1.0}
-    correct = correct + accuracy.eval(feed_dict_test)*batch_size
+    result = sess.run([accuracy, y_pred], feed_dict=feed_dict_test)
+    correct = correct + result[0]*batch_size
     print(correct)
 
+    for row in range(batch_size*9):
+        gt_id = np.argmax(test_batch[1][row,:])
+        pred_id = np.argmax(result[1][row, :])
+        if not gt_id==pred_id:
+            img_filename = os.path.join(html_dir,'{}_{}.png'.format(i,row))
+            misc.imsave(img_filename, test_batch[0][row,:,:,:])
+            col_dict = {
+                0: shape_dict[gt_id],
+                1: shape_dict[pred_id],
+                2: html_writer.image_tag('{}_{}.png'.format(i,row), 25, 25)}
+            html_writer.add_element(col_dict)
+
+html_writer.close_file()
 print('Test Accuracy: {}'.format(correct/5000))
diff --git a/object_classifiers/obj_data_io_helper.py b/object_classifiers/obj_data_io_helper.py
index 3f5484ac87048bd67c3f72d06070c1c218aa0397..d0427abb6d1f3d56ec3d99e5a3b913adc7177959 100644
--- a/object_classifiers/obj_data_io_helper.py
+++ b/object_classifiers/obj_data_io_helper.py
@@ -5,18 +5,20 @@ import matplotlib.pyplot as plt
 import matplotlib.image as mpimg
 import numpy as np
 import tensorflow as tf
+from scipy import misc
 
 def obj_mini_batch_loader(json_filename, image_dir, mean_image, start_index, batch_size, img_height=100, img_width=100, channels=3):
 
     with open(json_filename, 'r') as json_file: 
         json_data = json.load(json_file)
 
-    obj_images = np.empty(shape=[9*batch_size, img_height, img_width, channels])
+    obj_images = np.empty(shape=[9*batch_size, img_height/3, img_width/3, channels])
     obj_labels = np.zeros(shape=[9*batch_size, 4])
 
     for i in range(start_index, start_index + batch_size):
         image_name = os.path.join(image_dir, str(i) + '.jpg')
-        image = mpimg.imread(image_name)
+        image = misc.imresize(mpimg.imread(image_name),(img_height, img_width), interp='nearest')
+#        image.resize((img_height, img_width, 3))
         crop_shape = np.array([image.shape[0], image.shape[1]])/3
         selected_anno = [q for q in json_data if q['image_id']==i]
         grid_config = selected_anno[0]['config']
@@ -46,26 +48,43 @@ def mean_image_batch(json_filename, image_dir, start_index, batch_size, img_heig
 
 def mean_image(json_filename, image_dir, num_images, batch_size, img_height=100, img_width=100, channels=3):
     max_iter = np.floor(num_images/batch_size)
-    mean_image = np.zeros([img_height, img_width, channels])
+    mean_image = np.zeros([img_height/3, img_width/3, channels])
     for i in range(max_iter.astype(np.int16)):
         mean_image = mean_image + mean_image_batch(json_filename, image_dir, 1+i*batch_size, batch_size, img_height, img_width, channels)
 
     mean_image = mean_image/max_iter
     tmp_mean_image = mean_image*254
-    imgplot = plt.imshow(tmp_mean_image.astype(np.uint8))
-    plt.show()
+    # imgplot = plt.imshow(tmp_mean_image.astype(np.uint8))
+    # plt.show()
     return mean_image
 
 
-if __name__=="__main__":
+class html_obj_table_writer():
+    def __init__(self, filename):
+        self.filename = filename
+        self.html_file = open(self.filename, 'w')
+        self.html_file.write("""<!DOCTYPE html>\n<html>\n<body>\n<table border="1" style="width:100%"> \n""")
+    
+    def add_element(self, col_dict):
+        self.html_file.write('    <tr>\n')
+        for key in range(len(col_dict)):
+            self.html_file.write("""    <td>{}</td>\n""".format(col_dict[key]))
+        self.html_file.write('    </tr>\n')
+
+    def image_tag(self, image_path, height, width):
+        return """<img src="{}" alt="IMAGE NOT FOUND!" height={} width={}>""".format(image_path,height,width)
+        
+    def close_file(self):
+        self.html_file.write('</table>\n</body>\n</html>')
+        self.html_file.close()
 
-    json_filename = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/train_anno.json'
-    batch_size = 1
-    start_index = 0
+        
+    
 
-    image_dir = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/images'
+if __name__=="__main__":
 
-    batch = obj_mini_batch_loader(json_filename, image_dir, start_index, batch_size)
-    print(batch[0].shape)
-    print(batch[1].shape)
+    html_writer = html_obj_table_writer('/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier_v_1/trial.html')
+    col_dict={0: 'sam', 1: html_writer.image_tag('something.png',25,25)}
+    html_writer.add_element(col_dict)
+    html_writer.close_file()
 
diff --git a/object_classifiers/obj_data_io_helper.pyc b/object_classifiers/obj_data_io_helper.pyc
index 73a426be9496860d6a43819bf5c4c1506db53ffc..48169a55a825c93a57adb1cf4cc9cfeb258f4b15 100644
Binary files a/object_classifiers/obj_data_io_helper.pyc and b/object_classifiers/obj_data_io_helper.pyc differ
diff --git a/object_classifiers/train_obj_classifier.py b/object_classifiers/train_obj_classifier.py
index a54e8461f4005a1ad942515782f2dacf670a9ddf..19b48a25fa3dcf668d8c2350a0e20538d1eb934e 100644
--- a/object_classifiers/train_obj_classifier.py
+++ b/object_classifiers/train_obj_classifier.py
@@ -57,29 +57,58 @@ def conv2d(x, W):
 def max_pool_2x2(x):
     return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
 
+def max_pool_4x4(x):
+    return tf.nn.max_pool(x, ksize=[1, 4, 4, 1], strides=[1, 4, 4, 1], padding='SAME')
+
 def placeholder_inputs():
     # Specify placeholder_inputs
-    x = tf.placeholder(tf.float32, shape=[None, 100, 100, 3])
+    x = tf.placeholder(tf.float32, shape=[None, 25, 25, 3])
     y = tf.placeholder(tf.float32, shape=[None, 4])
     keep_prob = tf.placeholder(tf.float32)
     return x, y, keep_prob
 
-def comp_graph(x, y, keep_prob):
+def comp_graph_v_1(x, y, keep_prob):
     # Specify computation graph
     W_conv1 = weight_variable([5, 5, 3, 10])
     b_conv1 = bias_variable([10])
     
     h_conv1 = tf.nn.relu(conv2d(x, W_conv1) + b_conv1)
+    
     h_pool1 = max_pool_2x2(h_conv1)
     #print(tf.Tensor.get_shape(h_pool1))
     
-    W_fc1 = weight_variable([50*50*10, 4])
+    W_fc1 = weight_variable([13*13*10, 4])
     b_fc1 = bias_variable([4])
     
-    h_pool1_flat = tf.reshape(h_pool1, [-1, 50*50*10])
+    h_pool1_flat = tf.reshape(h_pool1, [-1, 13*13*10])
     h_pool1_flat_drop = tf.nn.dropout(h_pool1_flat, keep_prob)
     
-    y_pred = tf.nn.softmax(tf.matmul(h_pool1_flat,W_fc1) + b_fc1)
+    y_pred = tf.nn.softmax(tf.matmul(h_pool1_flat_drop,W_fc1) + b_fc1)
+    
+    return y_pred
+
+def comp_graph_v_2(x, y, keep_prob):
+    # Specify computation graph
+    W_conv1 = weight_variable([5, 5, 3, 4])
+    b_conv1 = bias_variable([4])
+    
+    h_conv1 = tf.nn.relu(conv2d(x, W_conv1) + b_conv1)
+    h_pool1 = max_pool_2x2(h_conv1)
+    h_conv1_drop = tf.nn.dropout(h_pool1, keep_prob)
+    
+    W_conv2 = weight_variable([3, 3, 4, 8])
+    b_conv2 = bias_variable([8])
+    
+    h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
+    h_pool2 = max_pool_2x2(h_conv2)
+
+    W_fc1 = weight_variable([7*7*8, 4])
+    b_fc1 = bias_variable([4])
+    
+    h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*8])
+    h_pool2_flat_drop = tf.nn.dropout(h_pool2_flat, keep_prob)
+    
+    y_pred = tf.nn.softmax(tf.matmul(h_pool2_flat_drop,W_fc1) + b_fc1)
     
     return y_pred
 
@@ -97,7 +126,7 @@ def train():
     sess = tf.InteractiveSession()
 
     x, y, keep_prob = placeholder_inputs()
-    y_pred = comp_graph(x, y, keep_prob)
+    y_pred = comp_graph_v_2(x, y, keep_prob)
 
     # Specify loss
     cross_entropy = -tf.reduce_sum(y*tf.log(y_pred))
@@ -112,14 +141,21 @@ def train():
     merged = tf.merge_all_summaries()
     #writer = tf.train.SummaryWriter("/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier", graph_def=tf.GraphDef())
 
+    # Output dir
+    outdir = '/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier_v_2/'
+    if not os.path.exists(outdir):
+        os.mkdir(outdir)
+
     # Training Data
+    img_width = 75
+    img_height = 75
     train_json_filename = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/train_anno.json'
     image_dir = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/images'
-    mean_image = shape_data_loader.mean_image(train_json_filename, image_dir, 1000, 100)
-    np.save('/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier/mean_image.npy', mean_image)
+    mean_image = shape_data_loader.mean_image(train_json_filename, image_dir, 1000, 100, img_height, img_width)
+    np.save(os.path.join(outdir, 'mean_image.npy'), mean_image)
 
     # Val Data
-    val_batch = shape_data_loader.obj_mini_batch_loader(train_json_filename, image_dir, mean_image, 9501, 499)
+    val_batch = shape_data_loader.obj_mini_batch_loader(train_json_filename, image_dir, mean_image, 9501, 499, img_height, img_width)
     feed_dict_val={x: val_batch[0], y: val_batch[1], keep_prob: 1.0}
     
     # Session Saver
@@ -127,15 +163,15 @@ def train():
 
     # Start Training
     sess.run(tf.initialize_all_variables())
-    batch_size = 100
-    max_epoch = 10
-    max_iter = 95
+    batch_size = 10
+    max_epoch = 2
+    max_iter = 950
     val_acc_array_iter = np.empty([max_iter*max_epoch])
     val_acc_array_epoch = np.zeros([max_epoch])
     train_acc_array_epoch = np.zeros([max_epoch])
     for epoch in range(max_epoch):
         for i in range(max_iter):
-            train_batch = shape_data_loader.obj_mini_batch_loader(train_json_filename, image_dir, mean_image, 1+i*batch_size, batch_size)
+            train_batch = shape_data_loader.obj_mini_batch_loader(train_json_filename, image_dir, mean_image, 1+i*batch_size, batch_size, img_height, img_width)
             feed_dict_train={x: train_batch[0], y: train_batch[1], keep_prob: 0.5}
 
             _, current_train_batch_acc = sess.run([train_step, accuracy], feed_dict=feed_dict_train)
@@ -143,15 +179,15 @@ def train():
             train_acc_array_epoch[epoch] =  train_acc_array_epoch[epoch] + current_train_batch_acc
             val_acc_array_iter[i+epoch*max_iter] = accuracy.eval(feed_dict_val)
             print('Step: {}  Val Accuracy: {}'.format(i+1+epoch*max_iter, val_acc_array_iter[i+epoch*max_iter]))
-            plot_accuracy(np.arange(0,i+1+epoch*max_iter)+1, val_acc_array_iter[0:i+1+epoch*max_iter], xlim=[1, max_epoch*max_iter], ylim=[0, 1.], savePath='/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier/valAcc_vs_iter.pdf')
+            plot_accuracy(np.arange(0,i+1+epoch*max_iter)+1, val_acc_array_iter[0:i+1+epoch*max_iter], xlim=[1, max_epoch*max_iter], ylim=[0, 1.], savePath=os.path.join(outdir,'valAcc_vs_iter.pdf'))
             
             
         train_acc_array_epoch[epoch] = train_acc_array_epoch[epoch] / max_iter 
         val_acc_array_epoch[epoch] = val_acc_array_iter[i+epoch*max_iter]
         
-        plot_accuracies(xdata=np.arange(0,epoch+1)+1, ydata_train=train_acc_array_epoch[0:epoch+1], ydata_val=val_acc_array_epoch[0:epoch+1], xlim=[1, max_epoch], ylim=[0, 1.], savePath='/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier/acc_vs_epoch.pdf')
+        plot_accuracies(xdata=np.arange(0,epoch+1)+1, ydata_train=train_acc_array_epoch[0:epoch+1], ydata_val=val_acc_array_epoch[0:epoch+1], xlim=[1, max_epoch], ylim=[0, 1.], savePath=os.path.join(outdir,'acc_vs_epoch.pdf'))
 
-        save_path = saver.save(sess, '/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier/obj_classifier_{}.ckpt'.format(epoch))
+        save_path = saver.save(sess, os.path.join(outdir,'obj_classifier_{}.ckpt'.format(epoch)))
             
     
     sess.close()
diff --git a/object_classifiers/train_obj_classifier.pyc b/object_classifiers/train_obj_classifier.pyc
index cd5b5cafd0d303334de0fb93f98f0f61611f70e1..550d73d4e5c289d02061f24457f8bc52c638aaee 100644
Binary files a/object_classifiers/train_obj_classifier.pyc and b/object_classifiers/train_obj_classifier.pyc differ